diff --git a/Documentation/dev/develop.md b/Documentation/dev/develop.md index 70143952..6e955cba 100644 --- a/Documentation/dev/develop.md +++ b/Documentation/dev/develop.md @@ -23,7 +23,7 @@ Alternately, build a Docker image `coreos/bootcfg:latest`. sudo ./build-docker -## Check Version +## Version ./bin/bootcfg -version sudo rkt --insecure-options=image run bootcfg.aci -- -version @@ -41,4 +41,19 @@ Run the ACI with rkt on `metal0`. Alternately, run the Docker image on `docker0`. - sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/bootcfg:Z -v $PWD/examples/groups/etcd-docker:/var/lib/bootcfg/groups:Z coreos/bootcfg:latest -address=0.0.0.0:8080 -log-level=debug \ No newline at end of file + sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/bootcfg:Z -v $PWD/examples/groups/etcd-docker:/var/lib/bootcfg/groups:Z coreos/bootcfg:latest -address=0.0.0.0:8080 -log-level=debug + +## Dependencies + +Project dependencies are commited to the `vendor` directory, so Go 1.6+ users can clone to their `GOPATH` and build or test immediately. Go 1.5 users should set `GO15VENDOREXPERIMENT=1`. + +Project developers should use [glide](https://github.com/Masterminds/glide) to manage commited dependencies under `vendor`. Configure `glide.yaml` as desired. Use `glide update` to download and update dependencies listed in `glide.yaml` into `/vendor` (do **not** use glide `get`). + + glide update --update-vendored --strip-vendor --strip-vcs + +Recursive dependencies are also vendored. A `glide.lock` will be created to represent the exact versions of each dependency. + +With an empty `vendor` directory, you can install the `glide.lock` dependencies. + + rm -rf vendor/ + glide install --strip-vendor --strip-vcs diff --git a/glide.lock b/glide.lock new file mode 100644 index 00000000..d561e65e --- /dev/null +++ b/glide.lock @@ -0,0 +1,95 @@ +hash: 8f33fd1c87e2136cdff69364e0668a595a129b8ffd686851336c841bf7e4f705 +updated: 2016-05-12T14:04:55.653773498-07:00 +imports: +- name: github.com/alecthomas/units + version: 2efee857e7cfd4f3d0138cc3cbb1b4966962b93a +- name: github.com/camlistore/camlistore + version: 9106ce829629773474c689b34aacd7d3aaa99426 + subpackages: + - pkg/errorutil +- name: github.com/coreos/coreos-cloudinit + version: b3f805dee6a4aa5ed298a1f370284df470eecf43 + subpackages: + - config +- name: github.com/coreos/go-semver + version: 294930c1e79c64e7dbe360054274fdad492c8cf5 + subpackages: + - semver +- name: github.com/coreos/go-systemd + version: 7b2428fec40033549c68f54e26e89e7ca9a9ce31 + subpackages: + - journal +- name: github.com/coreos/ignition + version: 44c274ab414294a8e34b3a940e0ec1afe6b6c610 + subpackages: + - config + - config/types + - config/v1 + - config/v1/types +- name: github.com/coreos/pkg + version: 66fe44ad037ccb80329115cb4db0dbe8e9beb03a + subpackages: + - capnslog + - flagutil +- name: github.com/coreos/yaml + version: 6b16a5714269b2f70720a45406b1babd947a17ef +- name: github.com/davecgh/go-spew + version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d + subpackages: + - spew +- name: github.com/golang/protobuf + version: f0a097ddac24fb00e07d2ac17f8671423f3ea47c + subpackages: + - proto +- name: github.com/inconshreveable/mousetrap + version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +- name: github.com/pmezard/go-difflib + version: 792786c7400a136282c1664665ae0a8db921c6c2 + subpackages: + - difflib +- name: github.com/spf13/cobra + version: 65a708cee0a4424f4e353d031ce440643e312f92 +- name: github.com/spf13/pflag + version: 7f60f83a2c81bc3c3c0d5297f61ddfa68da9d3b7 +- name: github.com/stretchr/testify + version: 1f4a1643a57e798696635ea4c126e9127adb7d3c + subpackages: + - assert +- name: github.com/vincent-petithory/dataurl + version: 9a301d65acbb728fcc3ace14f45f511a4cfeea9c +- name: go4.org + version: 03efcb870d84809319ea509714dd6d19a1498483 + subpackages: + - errorutil +- name: golang.org/x/crypto + version: 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3 + subpackages: + - cast5 + - openpgp + - openpgp/armor + - openpgp/errors + - openpgp/packet + - openpgp/s2k + - openpgp/elgamal +- name: golang.org/x/net + version: fb93926129b8ec0056f2f458b1f519654814edf0 + subpackages: + - context + - http2 + - internal/timeseries + - trace + - http2/hpack +- name: google.golang.org/grpc + version: 8eeecf2291de9d171d0b1392a27ff3975679f4f5 + subpackages: + - codes + - credentials + - grpclog + - internal + - metadata + - naming + - transport + - peer +- name: gopkg.in/yaml.v2 + version: f7716cbe52baa25d2e9b0d0da546fcf909fc16b4 +devImports: [] diff --git a/glide.yaml b/glide.yaml new file mode 100644 index 00000000..f49bcd34 --- /dev/null +++ b/glide.yaml @@ -0,0 +1,77 @@ +package: github.com/coreos/coreos-baremetal +import: +- package: github.com/alecthomas/units + version: 2efee857e7cfd4f3d0138cc3cbb1b4966962b93a +- package: github.com/camlistore/camlistore + version: 9106ce829629773474c689b34aacd7d3aaa99426 +- package: github.com/coreos/coreos-cloudinit + version: b3f805dee6a4aa5ed298a1f370284df470eecf43 + subpackages: + - Godeps/_workspace/src/github.com/coreos/yaml + - config +- package: github.com/coreos/go-semver + version: 294930c1e79c64e7dbe360054274fdad492c8cf5 + subpackages: + - semver +- package: github.com/coreos/go-systemd + version: 7b2428fec40033549c68f54e26e89e7ca9a9ce31 + subpackages: + - journal +- package: github.com/coreos/ignition + version: 44c274ab414294a8e34b3a940e0ec1afe6b6c610 + subpackages: + - config + - config/types + - config/v1 + - config/v1/types +- package: github.com/coreos/pkg + version: 66fe44ad037ccb80329115cb4db0dbe8e9beb03a + subpackages: + - capnslog + - flagutil +- package: github.com/davecgh/go-spew + version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d + subpackages: + - spew +- package: github.com/golang/protobuf + version: f0a097ddac24fb00e07d2ac17f8671423f3ea47c + subpackages: + - proto +- package: github.com/pmezard/go-difflib + version: 792786c7400a136282c1664665ae0a8db921c6c2 + subpackages: + - difflib +- package: github.com/spf13/cobra + version: 65a708cee0a4424f4e353d031ce440643e312f92 +- package: github.com/spf13/pflag + version: 7f60f83a2c81bc3c3c0d5297f61ddfa68da9d3b7 +- package: github.com/stretchr/testify + version: 1f4a1643a57e798696635ea4c126e9127adb7d3c + subpackages: + - assert +- package: github.com/vincent-petithory/dataurl + version: 9a301d65acbb728fcc3ace14f45f511a4cfeea9c +- package: go4.org + version: 03efcb870d84809319ea509714dd6d19a1498483 + subpackages: + - errorutil +- package: golang.org/x/crypto + version: 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3 + subpackages: + - cast5 + - openpgp +- package: golang.org/x/net + version: fb93926129b8ec0056f2f458b1f519654814edf0 + subpackages: + - context + - http2 + - internal/timeseries + - trace +- package: google.golang.org/grpc + version: 8eeecf2291de9d171d0b1392a27ff3975679f4f5 + subpackages: + - codes +- package: gopkg.in/yaml.v2 + version: f7716cbe52baa25d2e9b0d0da546fcf909fc16b4 +- package: github.com/coreos/yaml + version: 6b16a5714269b2f70720a45406b1babd947a17ef diff --git a/vendor/github.com/camlistore/camlistore/.gitignore b/vendor/github.com/camlistore/camlistore/.gitignore new file mode 100644 index 00000000..57a50967 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/.gitignore @@ -0,0 +1,34 @@ +*~ +*.o +*.pyc +\#*\# +.\#* +.*.swp +logs +_obj +[68].out +_test +_gotest* +_testmain* +_go_.[568] +_cgo* +clients/go/camgsinit/camgsinit +clients/go/camwebdav/camwebdav +.goroot +appengine-sdk +build/root +.DS_Store +bin/cam* +bin/devcam +bin/*_* +bin/hello +bin/publisher +tmp +server/camlistored/newui/all.js +server/camlistored/newui/all.js.map +server/camlistored/newui/zembed_all.js.go +server/appengine/source_root/ +config/tls.* +misc/docker/djpeg-static/djpeg +misc/docker/camlistored/camlistored* +misc/docker/camlistored/djpeg diff --git a/vendor/github.com/camlistore/camlistore/.hackfests/2010-12-01.txt b/vendor/github.com/camlistore/camlistore/.hackfests/2010-12-01.txt new file mode 100644 index 00000000..8f8b6b62 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/.hackfests/2010-12-01.txt @@ -0,0 +1,23 @@ +Brett and I eating Burritos from Little Chihuahua at my house. + +Plan is for Brett to work on Clip-it-Good (the Chrome Extension)'s +camli support (currently non-existent), and get it to: + + -- select an image + -- upload the image blob + -- create the permanode blob + -- create (and sign, with the signing server) the "become" claim, + pointing the permanode at the image + -- create (a signed) "tag" blob, tagging an image e.g. "funny" + +I will work on docs & signing server tests & signing verification +endpoint. + + +-------------- + +Done: + + * Brad: docs re-organized + * Brad: camlistore.{com,org,net,info,us} domains purchased + diff --git a/vendor/github.com/camlistore/camlistore/.hackfests/2012-11-03.txt b/vendor/github.com/camlistore/camlistore/.hackfests/2012-11-03.txt new file mode 100644 index 00000000..41b65d92 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/.hackfests/2012-11-03.txt @@ -0,0 +1,4 @@ +Saturday & Sunday in Paris with Mathieu, meeting for the first time, +hacking on EXIF rotation, thumbnail indexing, Postgres support, and +then Monday at Google Paris, working on different parts of the UI +permanode thumbnail page, and genfileembed problems. diff --git a/vendor/github.com/camlistore/camlistore/.hackfests/2012-12-23.txt b/vendor/github.com/camlistore/camlistore/.hackfests/2012-12-23.txt new file mode 100644 index 00000000..0abf5809 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/.hackfests/2012-12-23.txt @@ -0,0 +1 @@ +Closure newui hacking with bslatkin. diff --git a/vendor/github.com/camlistore/camlistore/.hackfests/2013-01-20.txt b/vendor/github.com/camlistore/camlistore/.hackfests/2013-01-20.txt new file mode 100644 index 00000000..3af57bed --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/.hackfests/2013-01-20.txt @@ -0,0 +1,5 @@ +At Brett's place, with Brett Slatkin, Lindsey Simon, Ryan Barrett. + +Goal: More closure UI stuff. + +Ryan getting up-to-speed and maybe working on Activity Streams import. diff --git a/vendor/github.com/camlistore/camlistore/.hackfests/2013-12-27.txt b/vendor/github.com/camlistore/camlistore/.hackfests/2013-12-27.txt new file mode 100644 index 00000000..23ca80e2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/.hackfests/2013-12-27.txt @@ -0,0 +1,8 @@ +Aaron Boodman, react js permanode UI +Andy Smith, discussing data model, updating HACKING, etc +Brad Fitzpatrick, misc +Brett Slatkin, web screenshotting client app, claim creation API/ACLs +Daisy Stanton, her own thing +Dan Erat, music player app +Emil Eklund, getting up to speed, photo stuff +Nick O'Neill, iOS diff --git a/vendor/github.com/camlistore/camlistore/.header b/vendor/github.com/camlistore/camlistore/.header new file mode 100644 index 00000000..aeaff6c0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/.header @@ -0,0 +1,20 @@ +/* +Copyright 2015 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package x + +import ( +) diff --git a/vendor/github.com/camlistore/camlistore/AUTHORS b/vendor/github.com/camlistore/camlistore/AUTHORS new file mode 100644 index 00000000..983d7b05 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/AUTHORS @@ -0,0 +1,65 @@ +# This is the official list of Camlistore authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +Aaron Bieber +Alessandro Arzilli gh=aarzilli +Amir Mohammad Saied +Amit Levy +Andy Smith +Anthony Martin +Antonin Amand +Antti Rasinen +Armen Baghumian +Bret Comnes +Brian Marete +Caine Tighe +Dan Kortschak +Daniel Coonce +Daniel Dermott Bryan +Daniel Pupius +Dean Landolt +Dustin Sallings +Edward Sheffler III +Emil Hessman +Eric Drechsel +Fabian Wickborn +Gina White +Google Inc. +Govert Versluis +Hernan Grecco +Iain Peet +Jakub Brzeski +Jani Monoses +Jingguo Yao +Josh Bleecher Snyder +Josh Huckabee +Joshua Gay +Jrabbit +Julien Danjou +Kamil Kisiel +Kristopher Cost +Lindsey Simon +Mario Russo +Mateus Braga +Mathieu Lonjaret +Matthieu Rakotojaona Rainimangavelo +Matt Jibson +Maxime Lavigne +Michael Vincent Zuffoletti +Nick O'Neill +Nolan Darilek +Philio +Piotr Staszewski +Ranveer +Ritesh Sinha +Rob Young +Robert Obryk +Robert Hencke +Salman Aljammaz +Sarath Lakshman +Steve Phillips +Steven L. Speek +Tamás Gulácsi +Timo Truyts +Ulf Holm Nielsen diff --git a/vendor/github.com/camlistore/camlistore/BUILDING b/vendor/github.com/camlistore/camlistore/BUILDING new file mode 100644 index 00000000..2ce7ab2e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/BUILDING @@ -0,0 +1,12 @@ +To build Camlistore: + +1) Install Go 1.5 or later. + +2) cd to the root of the Camlistore source (where this file is) + +3) Run: + + $ go run make.go + +4) The compiled binaries should now be in the "bin" subdirectory: + camlistored (the server), camget, camput, and camtool. diff --git a/vendor/github.com/camlistore/camlistore/CONTRIBUTORS b/vendor/github.com/camlistore/camlistore/CONTRIBUTORS new file mode 100644 index 00000000..6e6c3093 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/CONTRIBUTORS @@ -0,0 +1,93 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# http://code.google.com/legal/individual-cla-v1.0.html (electronic submission) +# http://code.google.com/legal/corporate-cla-v1.0.html (requires FAX) +# +# Note that the CLA isn't a copyright _assignment_ but rather a +# copyright _license_. You retain the copyright on your +# contributions. + +Aaron Bieber +Aaron Boodman +Aaron Racine +Adam Langley +Alessandro Arzilli gh=aarzilli +Ali Afshar +Amir Mohammad Saied +Amit Levy +Andrew Gerrand +Andy Smith +Anthony Martin +Antonin Amand +Antti Rasinen +Armen Baghumian +Bill Thiede +Brad Fitzpatrick +Bret Comnes +Brett Slatkin +Brian Marete +Burcu Dogan +Caine Tighe +Dan Kortschak +Daniel Coonce +Daniel Dermott Bryan +Daniel Erat +Daniel Pupius +Dean Landolt +Dustin Sallings +Edward Sheffler III +Emil Hessman +Eric Drechsel +Evan Martin +Fabian Wickborn +Gina White +Govert Versluis +Han-Wen Nienhuys +Hernan Grecco +Iain Peet +Jakub Brzeski +Jani Monoses +Jingguo Yao +Johan Euphrosine +Josh Bleecher Snyder +Josh Huckabee +Joshua Gay +Jrabbit +Julien Danjou +Kamil Kisiel +Kristopher Cost +Lindsey Simon +Marc-Antoine Ruel +Mario Russo +Mateus Braga +Mathieu Lonjaret +Matthieu Rakotojaona Rainimangavelo +Matt Jibson +Maxime Lavigne +Michael Vincent Zuffoletti +Nick O'Neill +Nico Weber +Nigel Tao +Nolan Darilek +Pawel Szczur +Philio +Piotr Staszewski +Ranveer +Ritesh Sinha +Rob Young +Robert Hencke +Robert Kroeger +Robert Obryk +Ryan Barrett +Salman Aljammaz +Sarath Lakshman +Steve Phillips +Steven L. Speek +Tamás Gulácsi +Timo Truyts +Tony Chang +Tony Scelfo +Ulf Holm Nielsen diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/camlistore/camlistore/COPYING similarity index 100% rename from vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/LICENSE rename to vendor/github.com/camlistore/camlistore/COPYING diff --git a/vendor/github.com/camlistore/camlistore/Dockerfile b/vendor/github.com/camlistore/camlistore/Dockerfile new file mode 100644 index 00000000..5892fe04 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/Dockerfile @@ -0,0 +1,56 @@ +# Build everything at least. This is a work in progress. +# +# Useful for testing things before a release. +# +# Will also be used for running the camlistore.org website and public +# read-only blobserver. + +FROM ubuntu:12.04 + +MAINTAINER camlistore + +ENV DEBIAN_FRONTEND noninteractive +RUN apt-get update && apt-get upgrade -y +RUN apt-get install -y curl make git + +RUN curl -o /tmp/go.tar.gz https://storage.googleapis.com/golang/go1.3.1.linux-amd64.tar.gz +RUN tar -C /usr/local -zxvf /tmp/go.tar.gz +RUN rm /tmp/go.tar.gz +RUN /usr/local/go/bin/go version + +ENV GOROOT /usr/local/go +ENV PATH $GOROOT/bin:/gopath/bin:$PATH + +RUN mkdir -p /gopath/src +ADD pkg /gopath/src/camlistore.org/pkg +ADD cmd /gopath/src/camlistore.org/cmd +ADD website /gopath/src/camlistore.org/website +ADD third_party /gopath/src/camlistore.org/third_party +ADD server /gopath/src/camlistore.org/server +ADD dev /gopath/src/camlistore.org/dev +ADD depcheck /gopath/src/camlistore.org/depcheck + +RUN adduser --disabled-password --quiet --gecos Camli camli +RUN mkdir -p /gopath/bin +RUN chown camli.camli /gopath/bin +RUN mkdir -p /gopath/pkg +RUN chown camli.camli /gopath/pkg +USER camli + +ENV GOPATH /gopath + +RUN go install --tags=purego \ + camlistore.org/server/camlistored \ + camlistore.org/cmd/camput \ + camlistore.org/cmd/camget \ + camlistore.org/cmd/camtool \ + camlistore.org/website \ + camlistore.org/dev/devcam + +ENV USER camli +ENV HOME /home/camli +WORKDIR /home/camli + +EXPOSE 80 443 3179 8080 + +CMD /bin/bash diff --git a/vendor/github.com/camlistore/camlistore/HACKING b/vendor/github.com/camlistore/camlistore/HACKING new file mode 100644 index 00000000..ff998e9e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/HACKING @@ -0,0 +1,110 @@ +Camlistore contributors regularly use Linux and OS X, and both are +100% supported. + +Developing on Windows is sometimes broken, but should work. Let us +know if we broke something, or we accidentally depend on some +Unix-specific build tool somewhere. + +See http://camlistore.org/docs/contributing for information on how to +contribute to the project and submit patches. Notably, we use Gerrit +for code review. Our Gerrit instance is at https://camlistore.org/r/ + +See architecture docs: https://camlistore.org/docs/ + +You can view docs for Camlistore packages with local godoc, or +godoc.org. + +It's recommended you use git to fetch the source code, rather than +hack from a Camlistore release's zip file: + +$ git clone https://camlistore.googlesource.com/camlistore + +(We use github for distribution but its code review system is so poor, +we don't use its Pull Request mechanism. The Gerrit git server & code +review system is the main repo. See +http://camlistore.org/docs/contributing for how to use them. We might +support github for pull requests in the future, once it's properly +integrated with external code review tools. We had a meeting with Github +to discuss the ways in which their code review tools are poor.) + +On Debian/Ubuntu, some deps to get started: + +$ sudo apt-get install libsqlite3-dev sqlite3 pkg-config git + +During development, rather than use the main binaries ("camput", +"camget", "camtool", "cammount", etc) directly, we instead use a +wrapper (devcam) that automatically configure the environment to use +the test server & test environment. + +To build devcam: + +$ go run make.go + +And devcam will be in /bin/devcam. You'll probably want to +symlink it into your $PATH. + +Alternatively, if your Camlistore root is checked out at +$GOPATH/src/camlistore.org (optional, but natural for Go users), you +can just: + +$ export GO15VENDOREXPERIMENT=1 # required for all Camlistore builds +$ go install ./dev/devcam + +The subcommands of devcam start the server or run camput/camget/etc: + +$ devcam server # main server +$ devcam appengine # App Engine version of the server +$ devcam put # camput +$ devcam get # camget +$ devcam tool # camtool +$ devcam mount # cammount + +Once the dev server is running, + + - Upload a file: + devcam put file ~/camlistore/COPYING + - Create a permanode: + devcam put permanode + - Use the UI: http://localhost:3179/ui/ + +Before submitting a patch, you should check that all the tests pass with: + +$ devcam test + +You can use your usual git workflow to commit your changes, but for each +change to be reviewed you should merge your commits into one before submitting +to gerrit for review. + +You should also try to write a meaningful commit message, which at least states +in the first sentence what part or package of camlistore this commit is affecting. +The following text should state what problem the change is addressing, and how. +Finally, you should refer to the github issue(s) the commit is addressing, if any, +and with the appropriate keyword if the commit is fixing the issue. (See +https://help.github.com/articles/closing-issues-via-commit-messages/). + +For example: + +" +pkg/search: add "file" predicate to search by file name + +File names were already indexed but there was no way to query the index for a file +by its name. The "file" predicate can now be used in search expressions (e.g. in the +search box of the web user interface) to achieve that. + +Fixes #10987 +" + +If your commit is adding or updating a vendored third party, you must indicate +in your commit message the version (e.g. git commit hash) of said third party. + +You can optionally use our pre-commit hook so that your code gets gofmt'ed +before being submitted (which should be done anyway). + +$ cd .git/hooks +$ ln -s ../../misc/pre-commit.githook pre-commit + +Finally, submit your code to gerrit with: + +$ devcam review + +Please update this file as appropriate. diff --git a/vendor/github.com/camlistore/camlistore/Makefile b/vendor/github.com/camlistore/camlistore/Makefile new file mode 100644 index 00000000..20106875 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/Makefile @@ -0,0 +1,36 @@ +# The normal way to build Camlistore is just "go run make.go", which +# works everywhere, even on systems without Make. The rest of this +# Makefile is mostly historical and should hopefully disappear over +# time. +all: + go run make.go + +# On OS X with "brew install sqlite3", you need PKG_CONFIG_PATH=/usr/local/Cellar/sqlite/3.7.17/lib/pkgconfig/ +full: + go install --ldflags="-X camlistore.org/pkg/buildinfo.GitInfo "`./misc/gitversion` `pkg-config --libs sqlite3 1>/dev/null 2>/dev/null && echo "--tags=with_sqlite"` ./pkg/... ./server/... ./cmd/... ./third_party/... ./dev/... + + +# Workaround Go bug where the $GOPATH/pkg cache doesn't know about tag changes. +# Useful when you accidentally run "make" and then "make presubmit" doesn't work. +# See https://code.google.com/p/go/issues/detail?id=4443 +forcefull: + go install -a --tags=with_sqlite ./pkg/... ./server/camlistored ./cmd/... ./dev/... + +oldpresubmit: fmt + SKIP_DEP_TESTS=1 go test `pkg-config --libs sqlite3 1>/dev/null 2>/dev/null && echo "--tags=with_sqlite"` -short ./pkg/... ./server/camlistored/... ./server/appengine ./cmd/... ./dev/... && echo PASS + +presubmit: fmt + go run dev/devcam/*.go test -short + +embeds: + go install ./pkg/fileembed/genfileembed/ && genfileembed ./server/camlistored/ui && genfileembed ./pkg/server + +UIDIR = server/camlistored/ui + +NEWUIDIR = server/camlistored/newui + +clean: + rm -f $(NEWUIDIR)/all.js $(NEWUIDIR)/all.js.map + +fmt: + go fmt camlistore.org/cmd... camlistore.org/dev... camlistore.org/misc... camlistore.org/pkg... camlistore.org/server... diff --git a/vendor/github.com/camlistore/camlistore/README b/vendor/github.com/camlistore/camlistore/README new file mode 100644 index 00000000..a4b4d47f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/README @@ -0,0 +1,23 @@ +Camlistore is your personal storage system for life. + +It's a way to store, sync, share, model and back up content. + +It stands for "Content-Addressable Multi-Layer Indexed Storage", for +lack of a better name. For more, see: + + http://camlistore.org/ + http://camlistore.org/docs/ + +Other useful files: + + BUILDING how to compile it ("go run make.go") + HACKING how to do development and contribute + +Mailing lists: + + http://camlistore.org/lists + +Bugs and contributing: + + https://github.com/camlistore/camlistore/issues + http://camlistore.org/docs/contributing diff --git a/vendor/github.com/camlistore/camlistore/TESTS b/vendor/github.com/camlistore/camlistore/TESTS new file mode 100644 index 00000000..053a437b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/TESTS @@ -0,0 +1,34 @@ +Tests needed + +-- integration test of reindexing + race detector + +-- support for running race detector on all tests. when in race mode, also run + integration test children in race mode. + +-- test that server/camlistored still builds & starts even when sqlite isn't + available (TODO: hide it from the test by running make.go in a child + process with a faked-out PKG_CONFIG environment or something, to make + cmd/go unable to find it even if it's installed) + +-- search & corpus use of EnumeratePermanodesLastModified + +-- pkg/client --- test FetchVia against a server returning compressed content. + (fix in 3fa6d69405f036308931dd36e5070b2b19dbeadf without a new test) + +-cmd/camput/ + -verify that stat caching works. verify that -filenodes does create the permanode even if the file was already uploaded (and cached) in a previous run. + +-- blobserver/{remote,shard} have no tests. should be easier now that + test.Fetcher is a full blobserver? see encrypt, replica, and cond's + nascent tests for examples. + +-- app engine integration tests (before we make a release, for sure, + but probably in presubmit) + +-- cross-compiling to freebsd and windows etc still works. + +-- pkg/auth -- not enough tests. see regression at + https://camlistore-review.googlesource.com/#/c/556/1 + +-- blobserver.WaitForBlob, and integration tests for the http handlers + for long-polling on Enumerate and Stat diff --git a/vendor/github.com/camlistore/camlistore/TODO b/vendor/github.com/camlistore/camlistore/TODO new file mode 100644 index 00000000..3eeebf10 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/TODO @@ -0,0 +1,253 @@ +There are two TODO lists. This file (good for airplanes) and the online bug tracker: + + https://github.com/camlistore/camlistore/issues + +Offline list: + +-- fix the presubmit's gofmt to be happy about emacs: + + go fmt camlistore.org/cmd... camlistore.org/dev... camlistore.org/misc... camlistore.org/pkg... camlistore.org/server... + stat pkg/blobserver/.#multistream_test.go: no such file or directory + exit status 2 + make: *** [fmt] Error 1 + + +-- add HTTP handler for blobstreamer. stream a tar file? where to put + continuation token? special file after each tar entry? special file + at the end? HTTP Trailers? (but nobody supports them) + +-- reindexing: + * add streaming interface to localdisk? maybe, even though not ideal, but + really: migrate my personal instance from localdisk to blobpacked + + maybe diskpacked for loose blobs? start by migrating to blobpacked and + measuring size of loose. + * add blobserver.EnumerateAllUnsorted (which could use StreamBlobs + if available, else use EnumerateAll, else maybe even use a new + interface method that goes forever and can't resume at a point, + but can be canceled, and localdisk could implement that at least) + * add buffered sorted.KeyValue implementation: a memory one (of + configurable max size) in front of a real disk one. add a Flush method + to it. also Flush when memory gets big enough. + In progress: pkg/sorted/buffer + +-- stop using the "cond" blob router storage type in genconfig, as + well as the /bs-and-index/ "replica" storage type, and just let the + index register its own AddReceiveHook like the sync handler + (pkg/server/sync.go). But whereas the sync handler only synchronously + _enqueues_ the blob to replicate, the indexer should synchronously + do the ReceiveBlob (ooo-reindex) on it too before returning. + But the sync handler, despite technically only synchronously-enqueueing + and being therefore async, is still very fast. It's likely the + sync handler will therefore send a ReceiveBlob to the indexer + at the ~same time the indexer is already indexing it. So the indexer + should have some dup/merge suppression, and not do double work. + singleflight should work. The loser should still consume the + source io.Reader body and reply with the same error value. + +-- ditch the importer.Interrupt type and pass along a context.Context + instead, which has its Done channel for cancelation. + +-- be able to put a search (expr or JSON) into camlistore as a blob, + and search on it. and then name it with a permanode, and then + use a expr search like "named:someset" which looks up someset's + current camliContent, fetches it, and then expands into that blob's + search.expr or search.Constraint. + +-- S3-only mode doesn't work with a local disk index (kvfile) because + there's no directory for us to put the kv in. + +-- fault injection many more places with pkg/fault. maybe even in all + handlers automatically somehow? + +-- sync handler's shard validation doesn't retry on error. + only reports the errors now. + +-- export blobserver.checkHashReader and document it with + the blob.Fetcher docs. + +-- "filestogether" handler, putting related blobs (e.g. files) + next to each other in bigger blobs / separate files, and recording + offsets of small blobs into bigger ones + +-- diskpacked doesn't seem to sync its index quickly enough. + A new blob receieved + process exit + read in a new process + doesn't find that blob. kv bug? Seems to need an explicit Close. + This feels broken. Add tests & debug. + +-- websocket upload protocol. different write & read on same socket, + as opposed to HTTP, to have multiple chunks in flight. + +-- extension to blobserver upload protocol to minimize fsyncs: maybe a + client can say "no rush" on a bunch of data blobs first (which + still don't get acked back over websocket until they've been + fsynced), and then when the client uploads the schema/vivivy blob, + that websocket message won't have the "no rush" flag, calling the + optional blobserver.Storage method to fsync (in the case of + diskpacked/localdisk) and getting all the "uploaded" messages back + for the data chunks that were written-but-not-synced. + +-- measure FUSE operations, latency, round-trips, performance. + see next item: + +-- ... we probaby need a "describe all chunks in file" HTTP handler. + then FUSE (when it sees sequential access) can say "what's the + list of all chunks in this file?" and then fetch them all at once. + see next item: + +-- ... HTTP handler to get multiple blobs at once. multi-download + in multipart/mime body. we have this for stat and upload, but + not download. + +-- ... if we do blob fetching over websocket too, then we can support + cancellation of blob requests. Then we can combine the previous + two items: FUSE client can ask the server, over websockets, for a + list of all chunks, and to also start streaming them all. assume a + high-latency (but acceptable bandwidth) link. the chunks are + already in flight, but some might be redundant. once the client figures + out some might be redundant, it can issue "stop send" messages over + that websocket connection to prevent dups. this should work on + both "files" and "bytes" types. + +-- cacher: configurable policy on max cache size. clean oldest + things (consider mtime+atime) to get back under max cache size. + maybe prefer keeping small things (metadata blobs) too, + and only delete large data chunks. + +-- UI: video, at least thumbnailing (use external program, + like VLC or whatever nautilus uses?) + +-- rename server.ImageHandler to ThumbnailRequest or something? It's + not really a Handler in the normal sense. It's not built once and + called repeatedly; it's built for every ServeHTTP request. + +-- unexport more stuff from pkg/server. Cache, etc. + +-- look into garbage from openpgp signing + +-- make leveldb memdb's iterator struct only 8 bytes, pointing to a recycled + object, and just nil out that pointer at EOF. + +-- bring in the google glog package to third_party and use it in + places that want selective logging (e.g. pkg/index/receive.go) + +-- (Mostly done) verify all ReceiveBlob calls and see which should be + blobserver.Receive instead, or ReceiveNoHash. git grep -E + "\.ReceiveBlob\(" And maybe ReceiveNoHash should go away and be + replaced with a "ReceiveString" method which combines the + blobref-from-string and ReceiveNoHash at once. + +-- union storage target. sharder can be thought of a specialization + of union. sharder already unions, but has a hard-coded policy + of where to put new blobs. union could a library (used by sharder) + with a pluggable policy on that. + +-- support for running cammount under camlistored. especially for OS X, + where the lifetime of the background daemon will be the same as the + user's login session. + +-- website: remove the "Installation" heading for /cmd/*, since + they're misleading and people should use "go run make.go" in the + general case. + +-- website: add godoc for /server/camlistored (also without a "go get" + line) + +-- tests for all cmd/* stuff, perhaps as part of some integration + tests. + +-- move most of camput into a library, not a package main. + +-- server cron support: full syncs, camput file backups, integrity + checks. + +-- status in top right of UI: sync, crons. (in-progress, un-acked + problems) + +-- finish metadata compaction on the encryption blobserver.Storage wrapper. + +-- get security review on encryption wrapper. (agl?) + +-- peer-to-peer server and blobserver target to store encrypted blobs + on stranger's hardrives. server will be open source so groups of + friends/family can run their own for small circles, or some company + could run a huge instance. spray encrypted backup chunks across + friends' machines, and have central server(s) present challenges to + the replicas to have them verify what they have and how big, and + also occasionally say what the SHA-1("challenge" + blob-data) is. + +-- sharing: make camget work with permanode sets too, not just + "directory" and "file" things. + +-- sharing: when hitting e.g. http://myserver/share/sha1-xxxxx, if + a web browser and not a smart client (Accept header? User-Agent?) + then redirect or render a cutesy gallery or file browser instead, + still with machine-readable data for slurping. + +-- rethink the directory schema so it can a) represent directories + with millions of files (without making a >1MB or >16MB schema blob), + probably forming a tree, similar to files. but rather than rolling checksum, + just split lexically when nodes get too big. + +-- delete mostly-obsolete camsigd. see big TODO in camsigd.go. + +-- we used to be able live-edit js/css files in server/camlistored/ui when + running under the App Engine dev_appserver.py. That's now broken with my + latest efforts to revive it. The place to start looking is: + server/camlistored/ui/fileembed_appengine.go + +-- should a "share" claim be not a claim but its own permanode, so it + can be rescinded? right now you can't really unshare a "haveref" + claim. or rather, TODO: verify we support "delete" claims to + delete any claim, and verify the share system and indexer all + support it. I think the indexer might, but not the share system. + Also TODO: "camput delete" or "rescind" subcommand. + Also TODO: document share claims in doc/schema/ and on website. + +-- make the -transitive flag for "camput share -transitive" be a tri-state: + unset, true, false, and unset should then mean default to true for "file" + and "directory" schema blobs, and "false" for other things. + +-- index: static directory recursive sizes: search: ask to see biggest directories? + +-- index: index dates in filenames ("yyyy-mm-dd-Foo-Trip", "yyyy-mm blah", etc). + +-- get webdav server working again, for mounting on Windows. This worked before Go 1 + but bitrot when we moved pkg/fs to use the rsc/fuse. + +-- work on runsit more, so I can start using this more often. runsit should + be able to reload itself, and also watch for binaries changing and restart + when binaries change. (or symlinks to binaries) + +-- BUG: osutil paths.go on OS X: should use Library everywhere instead of mix of + Library and ~/.camlistore? + +OLD: + +-- add CROS support? Access-Control-Allow-Origin: * + w/ OPTIONS + http://hacks.mozilla.org/2009/07/cross-site-xmlhttprequest-with-cors/ + +-- brackup integration, perhaps sans GPG? (requires Perl client?) + +-- blobserver: clean up channel-closing consistency in blobserver interface + (most close, one doesn't. all should probably close) + +Android: + +[ ] Fix wake locks in UploadThread. need to hold CPU + WiFi whenever + something's enqueued at all and we're running. Move out of the Thread + that's uploading itself. +[ ] GPG signing of blobs (brad) + http://code.google.com/p/android-privacy-guard/ + http://www.thialfihar.org/projects/apg/ + (supports signing in code, but not an Intent?) + http://code.google.com/p/android-privacy-guard/wiki/UsingApgForDevelopment + ... mailed the author. + +Client libraries: + +[X] Go +[X] JavaScript +[/] Python (Brett); but see https://github.com/tsileo/camlipy +[ ] Perl +[ ] Ruby +[ ] PHP diff --git a/vendor/github.com/camlistore/camlistore/app/hello/main.go b/vendor/github.com/camlistore/camlistore/app/hello/main.go new file mode 100644 index 00000000..f7ede0ea --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/app/hello/main.go @@ -0,0 +1,97 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// The hello application serves as an example on how to make stand-alone +// server applications, interacting with a Camlistore server. +package main + +import ( + "flag" + "fmt" + "log" + "net/http" + "os" + "runtime" + + "camlistore.org/pkg/app" + "camlistore.org/pkg/buildinfo" + "camlistore.org/pkg/webserver" +) + +var ( + flagVersion = flag.Bool("version", false, "show version") +) + +// config is used to unmarshal the application configuration JSON +// that we get from Camlistore when we request it at $CAMLI_APP_CONFIG_URL. +type config struct { + Word string `json:"word,omitempty"` // Argument printed after "Hello " in the helloHandler response. +} + +func appConfig() *config { + configURL := os.Getenv("CAMLI_APP_CONFIG_URL") + if configURL == "" { + log.Fatalf("Hello application needs a CAMLI_APP_CONFIG_URL env var") + } + cl, err := app.Client() + if err != nil { + log.Fatalf("could not get a client to fetch extra config: %v", err) + } + conf := &config{} + if err := cl.GetJSON(configURL, conf); err != nil { + log.Fatalf("could not get app config at %v: %v", configURL, err) + } + return conf +} + +type helloHandler struct { + who string // who to say hello to. +} + +func (h *helloHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("Content-Type", "text/plain; charset=utf-8") + rw.WriteHeader(200) + fmt.Fprintf(rw, "Hello %s\n", h.who) +} + +func main() { + flag.Parse() + + if *flagVersion { + fmt.Fprintf(os.Stderr, "hello version: %s\nGo version: %s (%s/%s)\n", + buildinfo.Version(), runtime.Version(), runtime.GOOS, runtime.GOARCH) + return + } + + log.Printf("Starting hello version %s; Go %s (%s/%s)", buildinfo.Version(), runtime.Version(), + runtime.GOOS, runtime.GOARCH) + + listenAddr, err := app.ListenAddress() + if err != nil { + log.Fatalf("Listen address: %v", err) + } + conf := appConfig() + ws := webserver.New() + ws.Handle("/", &helloHandler{who: conf.Word}) + // TODO(mpl): handle status requests too. Camlistore will send an auth + // token in the extra config that should be used as the "password" for + // subsequent status requests. + if err := ws.Listen(listenAddr); err != nil { + log.Fatalf("Listen: %v", err) + } + + ws.Serve() +} diff --git a/vendor/github.com/camlistore/camlistore/app/publisher/fileembed.go b/vendor/github.com/camlistore/camlistore/app/publisher/fileembed.go new file mode 100644 index 00000000..3c8694a9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/app/publisher/fileembed.go @@ -0,0 +1,32 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +The publisher application serves and renders items published by Camlistore. +That is, items that are children, through a (direct or not) camliPath relation, +of a camliRoot node (a permanode with a camliRoot attribute set). + +#fileembed pattern .+\.(js|css|html|png|svg)$ +*/ +package main + +import ( + "camlistore.org/pkg/fileembed" +) + +// TODO(mpl): appengine case + +var Files = &fileembed.Files{} diff --git a/vendor/github.com/camlistore/camlistore/app/publisher/gallery.html b/vendor/github.com/camlistore/camlistore/app/publisher/gallery.html new file mode 100644 index 00000000..90a54baa --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/app/publisher/gallery.html @@ -0,0 +1,53 @@ + + +{{if $header := call .Header}} + + {{$header.Title}} + {{range $css := $header.CSSFiles}} + + {{end}} + + + +

{{$header.Title}}

+ {{if $file := call .File}} +
File: {{$file.FileName}}, {{$file.Size}} bytes, type {{$file.MIMEType}}
+ {{if $file.IsImage}} + + {{end}} + + {{if $nav := call $file.Nav}} +
+ {{if $prev := $nav.PrevPath}}[prev] {{end}} + {{if $up := $nav.ParentPath}}[up] {{end}} + {{if $next := $nav.NextPath}}[next] {{end}} +
+ {{end}} + {{else}} + {{if $membersData := call .Members}} + + + + {{end}} + {{end}} +{{end}} + + diff --git a/vendor/github.com/camlistore/camlistore/app/publisher/main.go b/vendor/github.com/camlistore/camlistore/app/publisher/main.go new file mode 100644 index 00000000..cbbb4a82 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/app/publisher/main.go @@ -0,0 +1,1008 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "html" + "html/template" + "io" + "io/ioutil" + "log" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + + "camlistore.org/pkg/app" + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/blobserver/localdisk" + "camlistore.org/pkg/buildinfo" + "camlistore.org/pkg/constants" + "camlistore.org/pkg/fileembed" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/publish" + "camlistore.org/pkg/search" + "camlistore.org/pkg/server" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/syncutil" + "camlistore.org/pkg/types/camtypes" + "camlistore.org/pkg/webserver" + + _ "camlistore.org/pkg/sorted/kvfile" +) + +var ( + flagVersion = flag.Bool("version", false, "show version") +) + +var ( + logger = log.New(os.Stderr, "PUBLISHER: ", log.LstdFlags) + logf = logger.Printf +) + +// config is used to unmarshal the application configuration JSON +// that we get from Camlistore when we request it at $CAMLI_APP_CONFIG_URL. +type config struct { + HTTPSCert string `json:"httpsCert,omitempty"` // Path to the HTTPS certificate file. + HTTPSKey string `json:"httpsKey,omitempty"` // Path to the HTTPS key file. + RootName string `json:"camliRoot"` // Publish root name (i.e. value of the camliRoot attribute on the root permanode). + MaxResizeBytes int64 `json:"maxResizeBytes,omitempty"` // See constants.DefaultMaxResizeMem + SourceRoot string `json:"sourceRoot,omitempty"` // Path to the app's resources dir, such as html and css files. + GoTemplate string `json:"goTemplate"` // Go html template to render the publication. + CacheRoot string `json:"cacheRoot,omitempty"` // Root path for the caching blobserver. No caching if empty. +} + +func appConfig() *config { + configURL := os.Getenv("CAMLI_APP_CONFIG_URL") + if configURL == "" { + logger.Fatalf("Publisher application needs a CAMLI_APP_CONFIG_URL env var") + } + cl, err := app.Client() + if err != nil { + logger.Fatalf("could not get a client to fetch extra config: %v", err) + } + conf := &config{} + if err := cl.GetJSON(configURL, conf); err != nil { + logger.Fatalf("could not get app config at %v: %v", configURL, err) + } + return conf +} + +func main() { + flag.Parse() + + if *flagVersion { + fmt.Fprintf(os.Stderr, "publisher version: %s\nGo version: %s (%s/%s)\n", + buildinfo.Version(), runtime.Version(), runtime.GOOS, runtime.GOARCH) + return + } + + logf("Starting publisher version %s; Go %s (%s/%s)", buildinfo.Version(), runtime.Version(), + runtime.GOOS, runtime.GOARCH) + + listenAddr, err := app.ListenAddress() + if err != nil { + logger.Fatalf("Listen address: %v", err) + } + conf := appConfig() + ph := newPublishHandler(conf) + if err := ph.initRootNode(); err != nil { + logf("%v", err) + } + ws := webserver.New() + ws.Logger = logger + ws.Handle("/", ph) + if conf.HTTPSCert != "" && conf.HTTPSKey != "" { + ws.SetTLS(conf.HTTPSCert, conf.HTTPSKey) + } + if err := ws.Listen(listenAddr); err != nil { + logger.Fatalf("Listen: %v", err) + } + ws.Serve() +} + +func newPublishHandler(conf *config) *publishHandler { + cl, err := app.Client() + if err != nil { + logger.Fatalf("could not get a client for the publish handler %v", err) + } + if conf.RootName == "" { + logger.Fatal("camliRoot not found in the app configuration") + } + maxResizeBytes := conf.MaxResizeBytes + if maxResizeBytes == 0 { + maxResizeBytes = constants.DefaultMaxResizeMem + } + var CSSFiles []string + if conf.SourceRoot != "" { + appRoot := filepath.Join(conf.SourceRoot, "app", "publisher") + Files = &fileembed.Files{ + DirFallback: appRoot, + } + // TODO(mpl): Can I readdir by listing with "/" on Files, even with DirFallBack? + // Apparently not, but retry later. + dir, err := os.Open(appRoot) + if err != nil { + logger.Fatal(err) + } + defer dir.Close() + names, err := dir.Readdirnames(-1) + if err != nil { + logger.Fatal(err) + } + for _, v := range names { + if strings.HasSuffix(v, ".css") { + CSSFiles = append(CSSFiles, v) + } + } + } else { + Files.Listable = true + dir, err := Files.Open("/") + if err != nil { + logger.Fatal(err) + } + defer dir.Close() + fis, err := dir.Readdir(-1) + if err != nil { + logger.Fatal(err) + } + for _, v := range fis { + name := v.Name() + if strings.HasSuffix(name, ".css") { + CSSFiles = append(CSSFiles, name) + } + } + } + // TODO(mpl): add all htmls found in Files to the template if none specified? + if conf.GoTemplate == "" { + logger.Fatal("a go template is required in the app configuration") + } + goTemplate, err := goTemplate(Files, conf.GoTemplate) + if err != nil { + logger.Fatal(err) + } + serverURL := os.Getenv("CAMLI_API_HOST") + if serverURL == "" { + logger.Fatal("CAMLI_API_HOST var not set") + } + var cache blobserver.Storage + var thumbMeta *server.ThumbMeta + if conf.CacheRoot != "" { + cache, err = localdisk.New(conf.CacheRoot) + if err != nil { + logger.Fatalf("Could not create localdisk cache: %v", err) + } + thumbsCacheDir := filepath.Join(os.TempDir(), "camli-publisher-cache") + if err := os.MkdirAll(thumbsCacheDir, 0700); err != nil { + logger.Fatalf("Could not create cache dir %s for %v publisher: %v", thumbsCacheDir, conf.RootName, err) + } + kv, err := sorted.NewKeyValue(map[string]interface{}{ + "type": "kv", + "file": filepath.Join(thumbsCacheDir, conf.RootName+"-thumbnails.kv"), + }) + if err != nil { + logger.Fatalf("Could not create kv for %v's thumbs cache: %v", conf.RootName, err) + } + thumbMeta = server.NewThumbMeta(kv) + } + + return &publishHandler{ + rootName: conf.RootName, + cl: cl, + resizeSem: syncutil.NewSem(maxResizeBytes), + staticFiles: Files, + goTemplate: goTemplate, + CSSFiles: CSSFiles, + describedCache: make(map[string]*search.DescribedBlob), + cache: cache, + thumbMeta: thumbMeta, + } +} + +func goTemplate(files *fileembed.Files, templateFile string) (*template.Template, error) { + f, err := files.Open(templateFile) + if err != nil { + return nil, fmt.Errorf("Could not open template %v: %v", templateFile, err) + } + defer f.Close() + templateBytes, err := ioutil.ReadAll(f) + if err != nil { + return nil, fmt.Errorf("Could not read template %v: %v", templateFile, err) + } + return template.Must(template.New("subject").Parse(string(templateBytes))), nil +} + +// We're using this interface in a publishHandler, instead of directly +// a *client.Client, so we can use a fake client in tests. +type client interface { + search.QueryDescriber + GetJSON(url string, data interface{}) error + Post(url string, bodyType string, body io.Reader) error + blob.Fetcher +} + +type publishHandler struct { + rootName string // Publish root name (i.e. value of the camliRoot attribute on the root permanode). + + rootNodeMu sync.Mutex + rootNode blob.Ref // Root permanode, origin of all camliPaths for this publish handler. + + cl client // Used for searching, and remote storage. + + staticFiles *fileembed.Files // For static resources. + goTemplate *template.Template // For publishing/rendering. + CSSFiles []string + resizeSem *syncutil.Sem // Limit peak RAM used by concurrent image thumbnail calls. + + describedCacheMu sync.RWMutex + describedCache map[string]*search.DescribedBlob // So that each item in a gallery does not actually require a describe round-trip. + + cache blobserver.Storage // For caching images and files, or nil. + thumbMeta *server.ThumbMeta // For keeping track of cached images, or nil. +} + +func (ph *publishHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ph.rootNodeMu.Lock() + if !ph.rootNode.Valid() { + // we want to retry doing this every time because the rootNode could have been created + // (by e.g. the owner) since last time. + err := ph.initRootNode() + if err != nil { + httputil.ServeError(w, r, fmt.Errorf("No publish root node: %v", err)) + ph.rootNodeMu.Unlock() + return + } + } + ph.rootNodeMu.Unlock() + + preq, err := ph.NewRequest(w, r) + if err != nil { + httputil.ServeError(w, r, fmt.Errorf("Could not create publish request: %v", err)) + return + } + preq.serveHTTP() +} + +func (ph *publishHandler) initRootNode() error { + var getRootNode = func() (blob.Ref, error) { + result, err := ph.camliRootQuery() + if err != nil { + return blob.Ref{}, fmt.Errorf("could not find permanode for root %q of publish handler: %v", ph.rootName, err) + } + if len(result.Blobs) == 0 || !result.Blobs[0].Blob.Valid() { + return blob.Ref{}, fmt.Errorf("could not find permanode for root %q of publish handler: %v", ph.rootName, os.ErrNotExist) + } + return result.Blobs[0].Blob, nil + } + node, err := getRootNode() + if err != nil { + return err + } + ph.rootNode = node + return nil +} + +func (ph *publishHandler) camliRootQuery() (*search.SearchResult, error) { + // TODO(mpl): I've voluntarily omitted the owner because it's not clear to + // me that we actually care about that. Same for signer in lookupPathTarget. + return ph.cl.Query(&search.SearchQuery{ + Limit: 1, + Constraint: &search.Constraint{ + Permanode: &search.PermanodeConstraint{ + Attr: "camliRoot", + Value: ph.rootName, + }, + }, + }) +} + +func (ph *publishHandler) lookupPathTarget(root blob.Ref, suffix string) (blob.Ref, error) { + if suffix == "" { + return root, nil + } + // TODO: verify it's optimized: http://camlistore.org/issue/405 + result, err := ph.cl.Query(&search.SearchQuery{ + Limit: 1, + Constraint: &search.Constraint{ + Permanode: &search.PermanodeConstraint{ + SkipHidden: true, + Relation: &search.RelationConstraint{ + Relation: "parent", + EdgeType: "camliPath:" + suffix, + Any: &search.Constraint{ + BlobRefPrefix: root.String(), + }, + }, + }, + }, + }) + if err != nil { + return blob.Ref{}, err + } + if len(result.Blobs) == 0 || !result.Blobs[0].Blob.Valid() { + return blob.Ref{}, os.ErrNotExist + } + return result.Blobs[0].Blob, nil +} + +// Given a blobref and a few hex characters of the digest of the next hop, return the complete +// blobref of the prefix, if that's a valid next hop. +func (ph *publishHandler) resolvePrefixHop(parent blob.Ref, prefix string) (child blob.Ref, err error) { + // TODO: this is a linear scan right now. this should be + // optimized to use a new database table of members so this is + // a quick lookup. in the meantime it should be in memcached + // at least. + if len(prefix) < 8 { + return blob.Ref{}, fmt.Errorf("Member prefix %q too small", prefix) + } + des, err := ph.describe(parent) + if err != nil { + return blob.Ref{}, fmt.Errorf("Failed to describe member %q in parent %q", prefix, parent) + } + if des.Permanode != nil { + cr, ok := des.ContentRef() + if ok && strings.HasPrefix(cr.Digest(), prefix) { + return cr, nil + } + for _, member := range des.Members() { + if strings.HasPrefix(member.BlobRef.Digest(), prefix) { + return member.BlobRef, nil + } + } + crdes, err := ph.describe(cr) + if err != nil { + return blob.Ref{}, fmt.Errorf("Failed to describe content %q of parent %q", cr, parent) + } + if crdes.Dir != nil { + return ph.resolvePrefixHop(cr, prefix) + } + } else if des.Dir != nil { + for _, child := range des.DirChildren { + if strings.HasPrefix(child.Digest(), prefix) { + return child, nil + } + } + } + return blob.Ref{}, fmt.Errorf("Member prefix %q not found in %q", prefix, parent) +} + +func (ph *publishHandler) describe(br blob.Ref) (*search.DescribedBlob, error) { + ph.describedCacheMu.RLock() + if des, ok := ph.describedCache[br.String()]; ok { + ph.describedCacheMu.RUnlock() + return des, nil + } + ph.describedCacheMu.RUnlock() + res, err := ph.cl.Describe(&search.DescribeRequest{ + BlobRef: br, + Depth: 1, + }) + if err != nil { + return nil, fmt.Errorf("Could not describe %v: %v", br, err) + } + return res.Meta[br.String()], nil +} + +func (ph *publishHandler) deepDescribe(br blob.Ref) (*search.DescribeResponse, error) { + res, err := ph.cl.Query(&search.SearchQuery{ + Constraint: &search.Constraint{ + BlobRefPrefix: br.String(), + CamliType: "permanode", + }, + Describe: &search.DescribeRequest{ + Depth: 1, + Rules: []*search.DescribeRule{ + { + Attrs: []string{"camliContent", "camliContentImage", "camliMember", "camliPath:*"}, + }, + }, + }, + Limit: -1, + }) + if err != nil { + return nil, fmt.Errorf("Could not deep describe %v: %v", br, err) + } + if res == nil || res.Describe == nil { + return nil, fmt.Errorf("no describe result for %v", br) + } + return res.Describe, nil +} + +// publishRequest is the state around a single HTTP request to the +// publish handler +type publishRequest struct { + ph *publishHandler + rw http.ResponseWriter + req *http.Request + base, suffix, subres string + rootpn blob.Ref + subject blob.Ref + inSubjectChain map[string]bool // blobref -> true + subjectBasePath string +} + +func (ph *publishHandler) NewRequest(rw http.ResponseWriter, req *http.Request) (*publishRequest, error) { + // splits a path request into its suffix and subresource parts. + // e.g. /blog/foo/camli/res/file/xxx -> ("foo", "file/xxx") + suffix, res := httputil.PathSuffix(req), "" + if strings.HasPrefix(suffix, "-/") { + suffix, res = "", suffix[2:] + } else if s := strings.SplitN(suffix, "/-/", 2); len(s) == 2 { + suffix, res = s[0], s[1] + } + + return &publishRequest{ + ph: ph, + rw: rw, + req: req, + suffix: suffix, + base: httputil.PathBase(req), + subres: res, + rootpn: ph.rootNode, + inSubjectChain: make(map[string]bool), + subjectBasePath: "", + }, nil +} + +func (pr *publishRequest) serveHTTP() { + if !pr.rootpn.Valid() { + pr.rw.WriteHeader(404) + return + } + + if pr.Debug() { + pr.rw.Header().Set("Content-Type", "text/html") + pr.pf("I am publish handler at base %q, serving root %q (permanode=%s), suffix %q, subreq %q
", + pr.base, pr.ph.rootName, pr.rootpn, html.EscapeString(pr.suffix), html.EscapeString(pr.subres)) + } + + if err := pr.findSubject(); err != nil { + if err == os.ErrNotExist { + pr.rw.WriteHeader(404) + return + } + logf("Error looking up %s/%q: %v", pr.rootpn, pr.suffix, err) + pr.rw.WriteHeader(500) + return + } + + if pr.Debug() { + pr.pf("

Subject: %s

", pr.subject, pr.subject) + return + } + + switch pr.subresourceType() { + case "": + pr.serveSubjectTemplate() + case "b": + // TODO: download a raw blob + case "f": // file download + pr.serveSubresFileDownload() + case "i": // image, scaled + pr.serveSubresImage() + case "s": // static + pr.req.URL.Path = pr.subres[len("/=s"):] + if len(pr.req.URL.Path) <= 1 { + http.Error(pr.rw, "Illegal URL.", http.StatusNotFound) + return + } + file := pr.req.URL.Path[1:] + server.ServeStaticFile(pr.rw, pr.req, pr.ph.staticFiles, file) + case "z": + pr.serveZip() + default: + pr.rw.WriteHeader(400) + pr.pf("

Invalid or unsupported resource request.

") + } +} + +func (pr *publishRequest) Debug() bool { + return pr.req.FormValue("debug") == "1" +} + +var memberRE = regexp.MustCompile(`^/?h([0-9a-f]+)`) + +func (pr *publishRequest) findSubject() error { + if strings.HasPrefix(pr.suffix, "=s/") { + pr.subres = "/" + pr.suffix + return nil + } + + subject, err := pr.ph.lookupPathTarget(pr.rootpn, pr.suffix) + if err != nil { + return err + } + if strings.HasPrefix(pr.subres, "=z/") { + // this happens when we are at the root of the published path, + // e.g /base/suffix/-/=z/foo.zip + // so we need to reset subres as fullpath so that it is detected + // properly when switching on pr.subresourceType() + pr.subres = "/" + pr.subres + // since we return early, we set the subject because that is + // what is going to be used as a root node by the zip handler. + pr.subject = subject + return nil + } + + pr.inSubjectChain[subject.String()] = true + pr.subjectBasePath = pr.base + pr.suffix + + // Chase /h hops in suffix. + for { + m := memberRE.FindStringSubmatch(pr.subres) + if m == nil { + break + } + match, memberPrefix := m[0], m[1] + + if err != nil { + return fmt.Errorf("Error looking up potential member %q in describe of subject %q: %v", + memberPrefix, subject, err) + } + + subject, err = pr.ph.resolvePrefixHop(subject, memberPrefix) + if err != nil { + return err + } + pr.inSubjectChain[subject.String()] = true + pr.subres = pr.subres[len(match):] + pr.subjectBasePath = addPathComponent(pr.subjectBasePath, match) + } + + pr.subject = subject + return nil +} + +func (pr *publishRequest) subresourceType() string { + if len(pr.subres) >= 3 && strings.HasPrefix(pr.subres, "/=") { + return pr.subres[2:3] + } + return "" +} + +func (pr *publishRequest) pf(format string, args ...interface{}) { + fmt.Fprintf(pr.rw, format, args...) +} + +func addPathComponent(base, addition string) string { + if !strings.HasPrefix(addition, "/") { + addition = "/" + addition + } + if strings.Contains(base, "/-/") { + return base + addition + } + return base + "/-" + addition +} + +const ( + resSeparator = "/-" + digestPrefix = "h" + digestLen = 10 +) + +// var hopRE = regexp.MustCompile(fmt.Sprintf("^/%s([0-9a-f]{%d})", digestPrefix, digestLen)) + +func getFileInfo(item blob.Ref, peers map[string]*search.DescribedBlob) (path []blob.Ref, fi *camtypes.FileInfo, ok bool) { + described := peers[item.String()] + if described == nil || + described.Permanode == nil || + described.Permanode.Attr == nil { + return + } + contentRef := described.Permanode.Attr.Get("camliContent") + if contentRef == "" { + return + } + if cdes := peers[contentRef]; cdes != nil && cdes.File != nil { + return []blob.Ref{described.BlobRef, cdes.BlobRef}, cdes.File, true + } + return +} + +// serveSubjectTemplate creates the funcs to generate the PageHeader, PageFile, +// and pageMembers that can be used by the subject template, and serves the template. +func (pr *publishRequest) serveSubjectTemplate() { + res, err := pr.ph.deepDescribe(pr.subject) + if err != nil { + httputil.ServeError(pr.rw, pr.req, err) + return + } + pr.ph.cacheDescribed(res.Meta) + + subdes := res.Meta[pr.subject.String()] + if subdes.CamliType == "file" { + pr.serveFileDownload(subdes) + return + } + + headerFunc := func() *publish.PageHeader { + return pr.subjectHeader(res.Meta) + } + fileFunc := func() *publish.PageFile { + file, err := pr.subjectFile(res.Meta) + if err != nil { + logf("%v", err) + return nil + } + return file + } + membersFunc := func() *publish.PageMembers { + members, err := pr.subjectMembers(res.Meta) + if err != nil { + logf("%v", err) + return nil + } + return members + } + page := &publish.SubjectPage{ + Header: headerFunc, + File: fileFunc, + Members: membersFunc, + } + + err = pr.ph.goTemplate.Execute(pr.rw, page) + if err != nil { + logf("Error serving subject template: %v", err) + http.Error(pr.rw, "Error serving template", http.StatusInternalServerError) + return + } +} + +const cacheSize = 1000 + +func (ph *publishHandler) cacheDescribed(described map[string]*search.DescribedBlob) { + ph.describedCacheMu.Lock() + defer ph.describedCacheMu.Unlock() + if len(ph.describedCache) > cacheSize { + ph.describedCache = described + return + } + for k, v := range described { + ph.describedCache[k] = v + } +} + +func (pr *publishRequest) serveFileDownload(des *search.DescribedBlob) { + fileref, fileinfo, ok := pr.fileSchemaRefFromBlob(des) + if !ok { + logf("Didn't get file schema from described blob %q", des.BlobRef) + return + } + mime := "" + if fileinfo != nil && fileinfo.IsImage() { + mime = fileinfo.MIMEType + } + dh := &server.DownloadHandler{ + Fetcher: pr.ph.cl, + Cache: pr.ph.cache, + ForceMIME: mime, + } + dh.ServeHTTP(pr.rw, pr.req, fileref) +} + +// Given a described blob, optionally follows a camliContent and +// returns the file's schema blobref and its fileinfo (if found). +func (pr *publishRequest) fileSchemaRefFromBlob(des *search.DescribedBlob) (fileref blob.Ref, fileinfo *camtypes.FileInfo, ok bool) { + if des == nil { + http.NotFound(pr.rw, pr.req) + return + } + if des.Permanode != nil { + // TODO: get "forceMime" attr out of the permanode? or + // fileName content-disposition? + if cref := des.Permanode.Attr.Get("camliContent"); cref != "" { + cbr, ok2 := blob.Parse(cref) + if !ok2 { + http.Error(pr.rw, "bogus camliContent", 500) + return + } + des = des.PeerBlob(cbr) + if des == nil { + http.Error(pr.rw, "camliContent not a peer in describe", 500) + return + } + } + } + if des.CamliType == "file" { + return des.BlobRef, des.File, true + } + http.Error(pr.rw, "failed to find fileSchemaRefFromBlob", 404) + return +} + +// subjectHeader returns the PageHeader corresponding to the described subject. +func (pr *publishRequest) subjectHeader(described map[string]*search.DescribedBlob) *publish.PageHeader { + subdes := described[pr.subject.String()] + header := &publish.PageHeader{ + Title: html.EscapeString(getTitle(subdes.BlobRef, described)), + CSSFiles: pr.cssFiles(), + Meta: func() string { + jsonRes, _ := json.MarshalIndent(described, "", " ") + return string(jsonRes) + }(), + Subject: pr.subject.String(), + } + return header +} + +func (pr *publishRequest) cssFiles() []string { + files := []string{} + for _, filename := range pr.ph.CSSFiles { + files = append(files, pr.staticPath(filename)) + } + return files +} + +func (pr *publishRequest) staticPath(fileName string) string { + return pr.base + "=s/" + fileName +} + +func getTitle(item blob.Ref, peers map[string]*search.DescribedBlob) string { + described := peers[item.String()] + if described == nil { + return "" + } + if described.Permanode != nil { + if t := described.Permanode.Attr.Get("title"); t != "" { + return t + } + if contentRef := described.Permanode.Attr.Get("camliContent"); contentRef != "" { + if cdes := peers[contentRef]; cdes != nil { + return getTitle(cdes.BlobRef, peers) + } + } + } + if described.File != nil { + return described.File.FileName + } + if described.Dir != nil { + return described.Dir.FileName + } + return "" +} + +// subjectFile returns the relevant PageFile if the described subject is a file permanode. +func (pr *publishRequest) subjectFile(described map[string]*search.DescribedBlob) (*publish.PageFile, error) { + subdes := described[pr.subject.String()] + contentRef, ok := subdes.ContentRef() + if !ok { + return nil, nil + } + fileDes, err := pr.ph.describe(contentRef) + if err != nil { + return nil, err + } + if fileDes.File == nil { + // most likely a dir + return nil, nil + } + + path := []blob.Ref{pr.subject, contentRef} + downloadURL := pr.SubresFileURL(path, fileDes.File.FileName) + thumbnailURL := "" + if fileDes.File.IsImage() { + thumbnailURL = pr.SubresThumbnailURL(path, fileDes.File.FileName, 600) + } + fileName := html.EscapeString(fileDes.File.FileName) + return &publish.PageFile{ + FileName: fileName, + Size: fileDes.File.Size, + MIMEType: fileDes.File.MIMEType, + IsImage: fileDes.File.IsImage(), + DownloadURL: downloadURL, + ThumbnailURL: thumbnailURL, + DomID: contentRef.DomID(), + Nav: func() *publish.Nav { + return nil + }, + }, nil +} + +func (pr *publishRequest) SubresFileURL(path []blob.Ref, fileName string) string { + return pr.SubresThumbnailURL(path, fileName, -1) +} + +func (pr *publishRequest) SubresThumbnailURL(path []blob.Ref, fileName string, maxDimen int) string { + var buf bytes.Buffer + resType := "i" + if maxDimen == -1 { + resType = "f" + } + fmt.Fprintf(&buf, "%s", pr.subjectBasePath) + if !strings.Contains(pr.subjectBasePath, "/-/") { + buf.Write([]byte("/-")) + } + for _, br := range path { + if pr.inSubjectChain[br.String()] { + continue + } + fmt.Fprintf(&buf, "/h%s", br.DigestPrefix(10)) + } + fmt.Fprintf(&buf, "/=%s", resType) + fmt.Fprintf(&buf, "/%s", url.QueryEscape(fileName)) + if maxDimen != -1 { + fmt.Fprintf(&buf, "?mw=%d&mh=%d", maxDimen, maxDimen) + } + return buf.String() +} + +// subjectMembers returns the relevant PageMembers if the described subject is a permanode with members. +func (pr *publishRequest) subjectMembers(resMap map[string]*search.DescribedBlob) (*publish.PageMembers, error) { + subdes := resMap[pr.subject.String()] + res, err := pr.ph.describeMembers(pr.subject) + if err != nil { + return nil, err + } + members := []*search.DescribedBlob{} + for _, v := range res.Blobs { + members = append(members, res.Describe.Meta[v.Blob.String()]) + } + if len(members) == 0 { + return nil, nil + } + + zipName := "" + if title := getTitle(subdes.BlobRef, resMap); title == "" { + zipName = "download.zip" + } else { + zipName = title + ".zip" + } + subjectPath := pr.subjectBasePath + if !strings.Contains(subjectPath, "/-/") { + subjectPath += "/-" + } + + return &publish.PageMembers{ + SubjectPath: subjectPath, + ZipName: zipName, + Members: members, + Description: func(member *search.DescribedBlob) string { + des := member.Description() + if des != "" { + des = " - " + des + } + return des + }, + Title: func(member *search.DescribedBlob) string { + memberTitle := getTitle(member.BlobRef, resMap) + if memberTitle == "" { + memberTitle = member.BlobRef.DigestPrefix(10) + } + return html.EscapeString(memberTitle) + }, + Path: func(member *search.DescribedBlob) string { + return pr.memberPath(member.BlobRef) + }, + DomID: func(member *search.DescribedBlob) string { + return member.DomID() + }, + FileInfo: func(member *search.DescribedBlob) *publish.MemberFileInfo { + if path, fileInfo, ok := getFileInfo(member.BlobRef, resMap); ok { + info := &publish.MemberFileInfo{ + FileName: fileInfo.FileName, + FileDomID: path[len(path)-1].DomID(), + FilePath: html.EscapeString(pr.SubresFileURL(path, fileInfo.FileName)), + } + if fileInfo.IsImage() { + info.FileThumbnailURL = pr.SubresThumbnailURL(path, fileInfo.FileName, 200) + } + return info + } + return nil + }, + }, nil +} + +func (ph *publishHandler) describeMembers(br blob.Ref) (*search.SearchResult, error) { + res, err := ph.cl.Query(&search.SearchQuery{ + Constraint: &search.Constraint{ + Permanode: &search.PermanodeConstraint{ + Relation: &search.RelationConstraint{ + Relation: "parent", + Any: &search.Constraint{ + BlobRefPrefix: br.String(), + }, + }, + }, + CamliType: "permanode", + }, + Describe: &search.DescribeRequest{ + Depth: 1, + Rules: []*search.DescribeRule{ + { + Attrs: []string{"camliContent", "camliContentImage"}, + }, + }, + }, + Limit: -1, + }) + if err != nil { + return nil, fmt.Errorf("Could not describe members of %v: %v", br, err) + } + return res, nil +} + +func (pr *publishRequest) memberPath(member blob.Ref) string { + return addPathComponent(pr.subjectBasePath, "/h"+member.DigestPrefix(10)) +} + +func (pr *publishRequest) serveSubresFileDownload() { + des, err := pr.ph.describe(pr.subject) + if err != nil { + logf("error describing subject %q: %v", pr.subject, err) + return + } + pr.serveFileDownload(des) +} + +func (pr *publishRequest) serveSubresImage() { + params := pr.req.URL.Query() + mw, _ := strconv.Atoi(params.Get("mw")) + mh, _ := strconv.Atoi(params.Get("mh")) + des, err := pr.ph.describe(pr.subject) + if err != nil { + logf("error describing subject %q: %v", pr.subject, err) + return + } + pr.serveScaledImage(des, mw, mh, params.Get("square") == "1") +} + +func (pr *publishRequest) serveScaledImage(des *search.DescribedBlob, maxWidth, maxHeight int, square bool) { + fileref, _, ok := pr.fileSchemaRefFromBlob(des) + if !ok { + logf("scaled image fail; failed to get file schema from des %q", des.BlobRef) + return + } + ih := &server.ImageHandler{ + Fetcher: pr.ph.cl, + Cache: pr.ph.cache, + MaxWidth: maxWidth, + MaxHeight: maxHeight, + Square: square, + ThumbMeta: pr.ph.thumbMeta, + ResizeSem: pr.ph.resizeSem, + } + ih.ServeHTTP(pr.rw, pr.req, fileref) +} + +// serveZip streams a zip archive of all the files "under" +// pr.subject. That is, all the files pointed by file permanodes, +// which are directly members of pr.subject or recursively down +// directory permanodes and permanodes members. +func (pr *publishRequest) serveZip() { + filename := "" + if len(pr.subres) > len("/=z/") { + filename = pr.subres[4:] + } + zh := &zipHandler{ + fetcher: pr.ph.cl, + cl: pr.ph.cl, + root: pr.subject, + filename: filename, + } + zh.ServeHTTP(pr.rw, pr.req) +} diff --git a/vendor/github.com/camlistore/camlistore/app/publisher/pics.css b/vendor/github.com/camlistore/camlistore/app/publisher/pics.css new file mode 100644 index 00000000..be091a75 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/app/publisher/pics.css @@ -0,0 +1,112 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* Something arbitrary for testing. */ +body { + font: 13px/1.3 normal Verdana, Geneva, sans-serif; + background: #000; + color: #aaa; + margin: 0; + padding: 30px; +} +a { + color: #aaa; +} +a:hover { + color: #bbb; +} +h1 { + border: 3px dashed #aaa; + padding: 1em; +} +ul { + list-style: none; + background: #262626; + + margin: 0; + padding: 20px; + border-radius: 10px; +} +li { + display: inline-block; + vertical-align: top; + padding: 1em; + margin: 1em; + text-align: right; +} +li:hover { + background: #000; + border-radius: 10px; + border-bottom-left-radius: 0; + border-bottom-right-radius: 0; +} +li:hover img { + xmax-height: none; + xmax-width: none; +} +li a { + font-weight: bold; +} +li img { + border: 0; + border-radius: 6px; + display: block; + max-height: 200px; + max-width: 200px; + margin-bottom: 1em; +} +li .camlifile { + display: none; +} +li a span { + padding: 2px; + border: 1px solid transparent; +} +li input { + text-align: right; +} + +a.title-edit, +a.title-edit:hover { + font-size: 70%; + color: #f00; + margin-right: .5em; + font-weight: normal; +} + +a.hidden { + display: none; +} + +a.visible { + display: inline; +} + +input.hidden { + display: none; +} + +input.visible { + display: inline-block; +} + +span.hidden { + display: none; +} + +span.visible { + display: inline-block; +} diff --git a/vendor/github.com/camlistore/camlistore/app/publisher/publish_test.go b/vendor/github.com/camlistore/camlistore/app/publisher/publish_test.go new file mode 100644 index 00000000..62f0ff91 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/app/publisher/publish_test.go @@ -0,0 +1,234 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + camliClient "camlistore.org/pkg/client" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/index" + "camlistore.org/pkg/index/indextest" + "camlistore.org/pkg/search" +) + +type publishURLTest struct { + path string // input + subject, subres string // expected +} + +var publishURLTests []publishURLTest + +func setupContent(rootName string) *indextest.IndexDeps { + idx := index.NewMemoryIndex() + idxd := indextest.NewIndexDeps(idx) + + picNode := idxd.NewPlannedPermanode("picpn-1234") // sha1-f5e90fcc50a79caa8b22a4aa63ba92e436cab9ec + galRef := idxd.NewPlannedPermanode("gal-1234") // sha1-2bdf2053922c3dfa70b01a4827168fce1c1df691 + rootRef := idxd.NewPlannedPermanode("root-abcd") // sha1-dbb3e5f28c7e01536d43ce194f3dd7b921b8460d + camp0 := idxd.NewPlannedPermanode("picpn-9876543210") // sha1-2d473e07ca760231dd82edeef4019d5b7d0ccb42 + camp1 := idxd.NewPlannedPermanode("picpn-9876543211") // sha1-961b700536d5151fc1f3920955cc92767572a064 + camp0f, _ := idxd.UploadFile("picfile-f00ff00f00a5.jpg", "picfile-f00ff00f00a5", time.Time{}) // sha1-01dbcb193fc789033fb2d08ed22abe7105b48640 + camp1f, _ := idxd.UploadFile("picfile-f00ff00f00b6.jpg", "picfile-f00ff00f00b6", time.Time{}) // sha1-1213ec17a42cc51bdeb95ff91ac1b5fc5157740f + + idxd.SetAttribute(rootRef, "camliRoot", rootName) + idxd.SetAttribute(rootRef, "camliPath:singlepic", picNode.String()) + idxd.SetAttribute(picNode, "title", "picnode without a pic?") + idxd.SetAttribute(rootRef, "camliPath:camping", galRef.String()) + idxd.AddAttribute(galRef, "camliMember", camp0.String()) + idxd.AddAttribute(galRef, "camliMember", camp1.String()) + idxd.SetAttribute(camp0, "camliContent", camp0f.String()) + idxd.SetAttribute(camp1, "camliContent", camp1f.String()) + + publishURLTests = []publishURLTest{ + // URL to a single picture permanode (returning its HTML wrapper page) + { + path: "/pics/singlepic", + subject: picNode.String(), + }, + + // URL to a gallery permanode (returning its HTML wrapper page) + { + path: "/pics/camping", + subject: galRef.String(), + }, + + // URL to a picture permanode within a gallery (following one hop, returning HTML) + { + path: "/pics/camping/-/h2d473e07ca", + subject: camp0.String(), + }, + + // URL to a gallery -> picture permanode -> its file + // (following two hops, returning HTML) + { + path: "/pics/camping/-/h2d473e07ca/h01dbcb193f", + subject: camp0f.String(), + }, + + // URL to a gallery -> picture permanode -> its file + // (following two hops, returning the file download) + { + path: "/pics/camping/-/h2d473e07ca/h01dbcb193f/=f/marshmallow.jpg", + subject: camp0f.String(), + subres: "/=f/marshmallow.jpg", + }, + + // URL to a gallery -> picture permanode -> its file + // (following two hops, returning the file, scaled as an image) + { + path: "/pics/camping/-/h961b700536/h1213ec17a4/=i/marshmallow.jpg?mw=200&mh=200", + subject: camp1f.String(), + subres: "/=i/marshmallow.jpg", + }, + + // Path to a static file in the root. + // TODO: ditch these and use content-addressable javascript + css, having + // the server digest them on start, or rather part of fileembed. This is + // a short-term hack to unblock Lindsey. + { + path: "/pics/=s/pics.js", + subject: "", + subres: "/=s/pics.js", + }, + } + + return idxd +} + +type fakeClient struct { + *camliClient.Client // for blob.Fetcher + sh *search.Handler +} + +func (fc *fakeClient) Search(req *search.SearchQuery) (*search.SearchResult, error) { + return fc.sh.Query(req) +} + +func (fc *fakeClient) Describe(req *search.DescribeRequest) (*search.DescribeResponse, error) { + return fc.sh.Describe(req) +} + +func (fc *fakeClient) GetJSON(url string, data interface{}) error { + // no need to implement + return nil +} + +func (fc *fakeClient) Post(url string, bodyType string, body io.Reader) error { + // no need to implement + return nil +} + +func TestPublishURLs(t *testing.T) { + rootName := "foo" + idxd := setupContent(rootName) + sh := search.NewHandler(idxd.Index, idxd.SignerBlobRef) + corpus, err := idxd.Index.KeepInMemory() + if err != nil { + t.Fatalf("error slurping index to memory: %v", err) + } + sh.SetCorpus(corpus) + cl := camliClient.New("http://whatever.fake") + fcl := &fakeClient{cl, sh} + ph := &publishHandler{ + rootName: rootName, + cl: fcl, + } + if err := ph.initRootNode(); err != nil { + t.Fatalf("initRootNode: %v", err) + } + + for ti, tt := range publishURLTests { + rw := httptest.NewRecorder() + if !strings.HasPrefix(tt.path, "/pics/") { + panic("expected /pics/ prefix on " + tt.path) + } + req, _ := http.NewRequest("GET", "http://foo.com"+tt.path, nil) + + pfxh := &httputil.PrefixHandler{ + Prefix: "/pics/", + Handler: http.HandlerFunc(func(_ http.ResponseWriter, req *http.Request) { + pr, err := ph.NewRequest(rw, req) + if err != nil { + t.Fatalf("test #%d, NewRequest: %v", ti, err) + } + + err = pr.findSubject() + if tt.subject != "" { + if err != nil { + t.Errorf("test #%d, findSubject: %v", ti, err) + return + } + if pr.subject.String() != tt.subject { + t.Errorf("test #%d, got subject %q, want %q", ti, pr.subject, tt.subject) + } + } + if pr.subres != tt.subres { + t.Errorf("test #%d, got subres %q, want %q", ti, pr.subres, tt.subres) + } + }), + } + pfxh.ServeHTTP(rw, req) + } +} + +func TestPublishMembers(t *testing.T) { + rootName := "foo" + idxd := setupContent(rootName) + + sh := search.NewHandler(idxd.Index, idxd.SignerBlobRef) + corpus, err := idxd.Index.KeepInMemory() + if err != nil { + t.Fatalf("error slurping index to memory: %v", err) + } + sh.SetCorpus(corpus) + cl := camliClient.New("http://whatever.fake") + fcl := &fakeClient{cl, sh} + ph := &publishHandler{ + rootName: rootName, + cl: fcl, + } + + rw := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "http://foo.com/pics", nil) + + pfxh := &httputil.PrefixHandler{ + Prefix: "/pics/", + Handler: http.HandlerFunc(func(_ http.ResponseWriter, req *http.Request) { + pr, err := ph.NewRequest(rw, req) + if err != nil { + t.Fatalf("NewRequest: %v", err) + } + + res, err := pr.ph.deepDescribe(pr.subject) + if err != nil { + t.Fatalf("deepDescribe: %v", err) + } + + members, err := pr.subjectMembers(res.Meta) + if len(members.Members) != 2 { + t.Errorf("Expected two members in publish root (one camlipath, one camlimember), got %d", len(members.Members)) + } + }), + } + pfxh.ServeHTTP(rw, req) +} diff --git a/vendor/github.com/camlistore/camlistore/app/publisher/zip.go b/vendor/github.com/camlistore/camlistore/app/publisher/zip.go new file mode 100644 index 00000000..2202e9e9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/app/publisher/zip.go @@ -0,0 +1,308 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "archive/zip" + "crypto/sha1" + "fmt" + "io" + "log" + "mime" + "net/http" + "path" + "sort" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/search" + "camlistore.org/pkg/types/camtypes" +) + +type zipHandler struct { + fetcher blob.Fetcher + cl client // Used for search and describe requests. + // root is the "parent" permanode of everything to zip. + // Either a directory permanode, or a permanode with members. + root blob.Ref + // Optional name to use in the response header + filename string +} + +// blobFile contains all the information we need about +// a file blob to add the corresponding file to a zip. +type blobFile struct { + blobRef blob.Ref + // path is the full path of the file from the root of the zip. + // slashes are always forward slashes, per the zip spec. + path string +} + +type sortedFiles []*blobFile + +func (s sortedFiles) Less(i, j int) bool { return s[i].path < s[j].path } +func (s sortedFiles) Len() int { return len(s) } +func (s sortedFiles) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (zh *zipHandler) describeMembers(br blob.Ref) (*search.DescribeResponse, error) { + res, err := zh.cl.Query(&search.SearchQuery{ + Constraint: &search.Constraint{ + BlobRefPrefix: br.String(), + CamliType: "permanode", + }, + Describe: &search.DescribeRequest{ + Depth: 1, + Rules: []*search.DescribeRule{ + { + Attrs: []string{"camliContent", "camliContentImage", "camliMember"}, + }, + }, + }, + Limit: -1, + }) + if err != nil { + return nil, fmt.Errorf("Could not describe %v: %v", br, err) + } + if res == nil || res.Describe == nil { + return nil, fmt.Errorf("no describe result for %v", br) + } + return res.Describe, nil +} + +// blobList returns the list of file blobs "under" dirBlob. +// It traverses permanode directories and permanode with members (collections). +func (zh *zipHandler) blobList(dirPath string, dirBlob blob.Ref) ([]*blobFile, error) { + // dr := zh.search.NewDescribeRequest() + // dr.Describe(dirBlob, 3) + // res, err := dr.Result() + // if err != nil { + // return nil, fmt.Errorf("Could not describe %v: %v", dirBlob, err) + // } + res, err := zh.describeMembers(dirBlob) + if err != nil { + return nil, err + } + + described := res.Meta[dirBlob.String()] + members := described.Members() + dirBlobPath, _, isDir := described.PermanodeDir() + if len(members) == 0 && !isDir { + return nil, nil + } + var list []*blobFile + if isDir { + dirRoot := dirBlobPath[1] + children, err := zh.blobsFromDir("/", dirRoot) + if err != nil { + return nil, fmt.Errorf("Could not get list of blobs from %v: %v", dirRoot, err) + } + list = append(list, children...) + return list, nil + } + for _, member := range members { + if fileBlobPath, fileInfo, ok := getFileInfo(member.BlobRef, res.Meta); ok { + // file + list = append(list, + &blobFile{fileBlobPath[1], path.Join(dirPath, fileInfo.FileName)}) + continue + } + if dirBlobPath, dirInfo, ok := getDirInfo(member.BlobRef, res.Meta); ok { + // directory + newZipRoot := dirBlobPath[1] + children, err := zh.blobsFromDir( + path.Join(dirPath, dirInfo.FileName), newZipRoot) + if err != nil { + return nil, fmt.Errorf("Could not get list of blobs from %v: %v", newZipRoot, err) + } + list = append(list, children...) + // TODO(mpl): we assume a directory permanode does not also have members. + // I know there is nothing preventing it, but does it make any sense? + continue + } + // it might have members, so recurse + // If it does have members, we must consider it as a pseudo dir, + // so we can build a fullpath for each of its members. + // As a dir name, we're using its title if it has one, its (shortened) + // blobref otherwise. + pseudoDirName := member.Title() + if pseudoDirName == "" { + pseudoDirName = member.BlobRef.DigestPrefix(10) + } + fullpath := path.Join(dirPath, pseudoDirName) + moreMembers, err := zh.blobList(fullpath, member.BlobRef) + if err != nil { + return nil, fmt.Errorf("Could not get list of blobs from %v: %v", member.BlobRef, err) + } + list = append(list, moreMembers...) + } + return list, nil +} + +// blobsFromDir returns the list of file blobs in directory dirBlob. +// It only traverses permanode directories. +func (zh *zipHandler) blobsFromDir(dirPath string, dirBlob blob.Ref) ([]*blobFile, error) { + var list []*blobFile + dr, err := schema.NewDirReader(zh.fetcher, dirBlob) + if err != nil { + return nil, fmt.Errorf("Could not read dir blob %v: %v", dirBlob, err) + } + ent, err := dr.Readdir(-1) + if err != nil { + return nil, fmt.Errorf("Could not read dir entries: %v", err) + } + for _, v := range ent { + fullpath := path.Join(dirPath, v.FileName()) + switch v.CamliType() { + case "file": + list = append(list, &blobFile{v.BlobRef(), fullpath}) + case "directory": + children, err := zh.blobsFromDir(fullpath, v.BlobRef()) + if err != nil { + return nil, fmt.Errorf("Could not get list of blobs from %v: %v", v.BlobRef(), err) + } + list = append(list, children...) + } + } + return list, nil +} + +// renameDuplicates goes through bf to check for duplicate filepaths. +// It renames duplicate filepaths and returns a new slice, sorted by +// file path. +func renameDuplicates(bf []*blobFile) sortedFiles { + noDup := make(map[string]blob.Ref) + // use a map to detect duplicates and rename them + for _, file := range bf { + if _, ok := noDup[file.path]; ok { + // path already exists, so rename + suffix := 0 + var newname string + for { + suffix++ + ext := path.Ext(file.path) + newname = fmt.Sprintf("%s(%d)%s", + file.path[:len(file.path)-len(ext)], suffix, ext) + if _, ok := noDup[newname]; !ok { + break + } + } + noDup[newname] = file.blobRef + } else { + noDup[file.path] = file.blobRef + } + } + + // reinsert in a slice and sort it + var sorted sortedFiles + for p, b := range noDup { + sorted = append(sorted, &blobFile{path: p, blobRef: b}) + } + sort.Sort(sorted) + return sorted +} + +// ServeHTTP streams a zip archive of all the files "under" +// zh.root. That is, all the files pointed by file permanodes, +// which are directly members of zh.root or recursively down +// directory permanodes and permanodes members. +// To build the fullpath of a file in a collection, it uses +// the collection title if present, its blobRef otherwise, as +// a directory name. +func (zh *zipHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // TODO: use http.ServeContent, so Range requests work and downloads can be resumed. + // Will require calculating the zip length once first (ideally as cheaply as possible, + // with dummy counting writer and dummy all-zero-byte-files of a fixed size), + // and then making a dummy ReadSeeker for ServeContent that can seek to the end, + // and then seek back to the beginning, but then seeks forward make it remember + // to skip that many bytes from the archive/zip writer when answering Reads. + if !httputil.IsGet(req) { + http.Error(rw, "Invalid method", http.StatusMethodNotAllowed) + return + } + bf, err := zh.blobList("/", zh.root) + if err != nil { + log.Printf("Could not serve zip for %v: %v", zh.root, err) + http.Error(rw, "Server error", http.StatusInternalServerError) + return + } + blobFiles := renameDuplicates(bf) + + // TODO(mpl): streaming directly won't work on appengine if the size goes + // over 32 MB. Deal with that. + h := rw.Header() + h.Set("Content-Type", "application/zip") + filename := zh.filename + if filename == "" { + filename = "download.zip" + } + h.Set("Content-Disposition", mime.FormatMediaType("attachment", map[string]string{"filename": filename})) + zw := zip.NewWriter(rw) + etag := sha1.New() + for _, file := range blobFiles { + etag.Write([]byte(file.blobRef.String())) + } + h.Set("Etag", fmt.Sprintf(`"%x"`, etag.Sum(nil))) + + for _, file := range blobFiles { + fr, err := schema.NewFileReader(zh.fetcher, file.blobRef) + if err != nil { + log.Printf("Can not add %v in zip, not a file: %v", file.blobRef, err) + http.Error(rw, "Server error", http.StatusInternalServerError) + return + } + f, err := zw.CreateHeader( + &zip.FileHeader{ + Name: file.path, + Method: zip.Store, + }) + if err != nil { + log.Printf("Could not create %q in zip: %v", file.path, err) + http.Error(rw, "Server error", http.StatusInternalServerError) + return + } + _, err = io.Copy(f, fr) + fr.Close() + if err != nil { + log.Printf("Could not zip %q: %v", file.path, err) + return + } + } + err = zw.Close() + if err != nil { + log.Printf("Could not close zipwriter: %v", err) + return + } +} + +// TODO(mpl): refactor with getFileInfo +func getDirInfo(item blob.Ref, peers map[string]*search.DescribedBlob) (path []blob.Ref, di *camtypes.FileInfo, ok bool) { + described := peers[item.String()] + if described == nil || + described.Permanode == nil || + described.Permanode.Attr == nil { + return + } + contentRef := described.Permanode.Attr.Get("camliContent") + if contentRef == "" { + return + } + if cdes := peers[contentRef]; cdes != nil && cdes.Dir != nil { + return []blob.Ref{described.BlobRef, cdes.BlobRef}, cdes.Dir, true + } + return +} diff --git a/vendor/github.com/camlistore/camlistore/clients/android/.classpath b/vendor/github.com/camlistore/camlistore/clients/android/.classpath new file mode 100644 index 00000000..d57ec025 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/android/.classpath @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/clients/android/.gitignore b/vendor/github.com/camlistore/camlistore/clients/android/.gitignore new file mode 100644 index 00000000..48dba237 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/android/.gitignore @@ -0,0 +1,8 @@ +build +gen +bin +local.properties +test/local.properties +test/build +test/gen +test/bin diff --git a/vendor/github.com/camlistore/camlistore/clients/android/.project b/vendor/github.com/camlistore/camlistore/clients/android/.project new file mode 100644 index 00000000..71acf2ff --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/android/.project @@ -0,0 +1,33 @@ + + + camlistore + + + + + + com.android.ide.eclipse.adt.ResourceManagerBuilder + + + + + com.android.ide.eclipse.adt.PreCompilerBuilder + + + + + org.eclipse.jdt.core.javabuilder + + + + + com.android.ide.eclipse.adt.ApkBuilder + + + + + + com.android.ide.eclipse.adt.AndroidNature + org.eclipse.jdt.core.javanature + + diff --git a/vendor/github.com/camlistore/camlistore/clients/android/.settings/org.eclipse.jdt.core.prefs b/vendor/github.com/camlistore/camlistore/clients/android/.settings/org.eclipse.jdt.core.prefs new file mode 100644 index 00000000..24758068 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/android/.settings/org.eclipse.jdt.core.prefs @@ -0,0 +1,301 @@ +eclipse.preferences.version=1 +org.eclipse.jdt.core.codeComplete.argumentPrefixes= +org.eclipse.jdt.core.codeComplete.argumentSuffixes= +org.eclipse.jdt.core.codeComplete.fieldPrefixes= +org.eclipse.jdt.core.codeComplete.fieldSuffixes= +org.eclipse.jdt.core.codeComplete.localPrefixes= +org.eclipse.jdt.core.codeComplete.localSuffixes= +org.eclipse.jdt.core.codeComplete.staticFieldPrefixes= +org.eclipse.jdt.core.codeComplete.staticFieldSuffixes= +org.eclipse.jdt.core.codeComplete.staticFinalFieldPrefixes= +org.eclipse.jdt.core.codeComplete.staticFinalFieldSuffixes= +org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled +org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6 +org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve +org.eclipse.jdt.core.compiler.compliance=1.6 +org.eclipse.jdt.core.compiler.debug.lineNumber=generate +org.eclipse.jdt.core.compiler.debug.localVariable=generate +org.eclipse.jdt.core.compiler.debug.sourceFile=generate +org.eclipse.jdt.core.compiler.problem.assertIdentifier=error +org.eclipse.jdt.core.compiler.problem.enumIdentifier=error +org.eclipse.jdt.core.compiler.source=1.6 +org.eclipse.jdt.core.formatter.align_type_members_on_columns=false +org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression=16 +org.eclipse.jdt.core.formatter.alignment_for_arguments_in_annotation=0 +org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant=16 +org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call=16 +org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation=16 +org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression=16 +org.eclipse.jdt.core.formatter.alignment_for_assignment=0 +org.eclipse.jdt.core.formatter.alignment_for_binary_expression=16 +org.eclipse.jdt.core.formatter.alignment_for_compact_if=16 +org.eclipse.jdt.core.formatter.alignment_for_conditional_expression=80 +org.eclipse.jdt.core.formatter.alignment_for_enum_constants=0 +org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer=16 +org.eclipse.jdt.core.formatter.alignment_for_method_declaration=0 +org.eclipse.jdt.core.formatter.alignment_for_multiple_fields=16 +org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration=16 +org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration=16 +org.eclipse.jdt.core.formatter.alignment_for_resources_in_try=80 +org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation=16 +org.eclipse.jdt.core.formatter.alignment_for_superclass_in_type_declaration=16 +org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_enum_declaration=16 +org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration=16 +org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration=16 +org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration=16 +org.eclipse.jdt.core.formatter.alignment_for_union_type_in_multicatch=16 +org.eclipse.jdt.core.formatter.blank_lines_after_imports=1 +org.eclipse.jdt.core.formatter.blank_lines_after_package=1 +org.eclipse.jdt.core.formatter.blank_lines_before_field=0 +org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration=0 +org.eclipse.jdt.core.formatter.blank_lines_before_imports=1 +org.eclipse.jdt.core.formatter.blank_lines_before_member_type=1 +org.eclipse.jdt.core.formatter.blank_lines_before_method=1 +org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk=1 +org.eclipse.jdt.core.formatter.blank_lines_before_package=0 +org.eclipse.jdt.core.formatter.blank_lines_between_import_groups=1 +org.eclipse.jdt.core.formatter.blank_lines_between_type_declarations=1 +org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_array_initializer=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_block=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_block_in_case=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_constructor_declaration=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_enum_constant=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_method_declaration=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_switch=end_of_line +org.eclipse.jdt.core.formatter.brace_position_for_type_declaration=end_of_line +org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_block_comment=false +org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_javadoc_comment=false +org.eclipse.jdt.core.formatter.comment.format_block_comments=true +org.eclipse.jdt.core.formatter.comment.format_header=false +org.eclipse.jdt.core.formatter.comment.format_html=true +org.eclipse.jdt.core.formatter.comment.format_javadoc_comments=true +org.eclipse.jdt.core.formatter.comment.format_line_comments=false +org.eclipse.jdt.core.formatter.comment.format_source_code=true +org.eclipse.jdt.core.formatter.comment.indent_parameter_description=true +org.eclipse.jdt.core.formatter.comment.indent_root_tags=true +org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags=insert +org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter=insert +org.eclipse.jdt.core.formatter.comment.line_length=500 +org.eclipse.jdt.core.formatter.comment.new_lines_at_block_boundaries=true +org.eclipse.jdt.core.formatter.comment.new_lines_at_javadoc_boundaries=true +org.eclipse.jdt.core.formatter.comment.preserve_white_space_between_code_and_line_comments=false +org.eclipse.jdt.core.formatter.compact_else_if=true +org.eclipse.jdt.core.formatter.continuation_indentation=2 +org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer=2 +org.eclipse.jdt.core.formatter.disabling_tag=@formatter\:off +org.eclipse.jdt.core.formatter.enabling_tag=@formatter\:on +org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line=false +org.eclipse.jdt.core.formatter.format_line_comment_starting_on_first_column=true +org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header=true +org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_constant_header=true +org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header=true +org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header=true +org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases=true +org.eclipse.jdt.core.formatter.indent_empty_lines=false +org.eclipse.jdt.core.formatter.indent_statements_compare_to_block=true +org.eclipse.jdt.core.formatter.indent_statements_compare_to_body=true +org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases=true +org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch=false +org.eclipse.jdt.core.formatter.indentation.size=4 +org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_field=insert +org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_local_variable=insert +org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_method=insert +org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_package=insert +org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_parameter=do not insert +org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_type=insert +org.eclipse.jdt.core.formatter.insert_new_line_after_label=do not insert +org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer=do not insert +org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing=do not insert +org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement=do not insert +org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer=do not insert +org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement=do not insert +org.eclipse.jdt.core.formatter.insert_new_line_before_finally_in_try_statement=do not insert +org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement=do not insert +org.eclipse.jdt.core.formatter.insert_new_line_in_empty_annotation_declaration=insert +org.eclipse.jdt.core.formatter.insert_new_line_in_empty_anonymous_type_declaration=insert +org.eclipse.jdt.core.formatter.insert_new_line_in_empty_block=insert +org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_constant=insert +org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_declaration=insert +org.eclipse.jdt.core.formatter.insert_new_line_in_empty_method_body=insert +org.eclipse.jdt.core.formatter.insert_new_line_in_empty_type_declaration=insert +org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter=insert +org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator=insert +org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_binary_operator=insert +org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments=insert +org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters=insert +org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block=insert +org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast=insert +org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert=insert +org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case=insert +org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional=insert +org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for=insert +org.eclipse.jdt.core.formatter.insert_space_after_colon_in_labeled_statement=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments=insert +org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters=insert +org.eclipse.jdt.core.formatter.insert_space_after_ellipsis=insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer=insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_cast=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_try=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional=insert +org.eclipse.jdt.core.formatter.insert_space_after_question_in_wildcard=do not insert +org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for=insert +org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_try_resources=insert +org.eclipse.jdt.core.formatter.insert_space_after_unary_operator=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter=insert +org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator=insert +org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration=insert +org.eclipse.jdt.core.formatter.insert_space_before_binary_operator=insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer=insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_annotation=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_try=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert=insert +org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_colon_in_conditional=insert +org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_colon_in_for=insert +org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_ellipsis=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_array_initializer=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_constant=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_declaration=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_for=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_invocation=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_try=insert +org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while=insert +org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return=insert +org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_throw=insert +org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional=insert +org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_semicolon=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_try_resources=do not insert +org.eclipse.jdt.core.formatter.insert_space_before_unary_operator=do not insert +org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference=do not insert +org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer=do not insert +org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression=do not insert +org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant=do not insert +org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration=do not insert +org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation=do not insert +org.eclipse.jdt.core.formatter.join_lines_in_comments=true +org.eclipse.jdt.core.formatter.join_wrapped_lines=false +org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line=false +org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line=false +org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line=false +org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line=false +org.eclipse.jdt.core.formatter.lineSplit=200 +org.eclipse.jdt.core.formatter.never_indent_block_comments_on_first_column=false +org.eclipse.jdt.core.formatter.never_indent_line_comments_on_first_column=false +org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body=0 +org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve=1 +org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line=true +org.eclipse.jdt.core.formatter.tabulation.char=space +org.eclipse.jdt.core.formatter.tabulation.size=4 +org.eclipse.jdt.core.formatter.use_on_off_tags=false +org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations=false +org.eclipse.jdt.core.formatter.wrap_before_binary_operator=true +org.eclipse.jdt.core.formatter.wrap_before_or_operator_multicatch=true +org.eclipse.jdt.core.formatter.wrap_outer_expressions_when_nested=true diff --git a/vendor/github.com/camlistore/camlistore/clients/android/.settings/org.eclipse.jdt.ui.prefs b/vendor/github.com/camlistore/camlistore/clients/android/.settings/org.eclipse.jdt.ui.prefs new file mode 100644 index 00000000..d8c2527c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/android/.settings/org.eclipse.jdt.ui.prefs @@ -0,0 +1,60 @@ +eclipse.preferences.version=1 +editor_save_participant_org.eclipse.jdt.ui.postsavelistener.cleanup=true +formatter_profile=_Camlistore Policy +formatter_settings_version=12 +org.eclipse.jdt.ui.exception.name=e +org.eclipse.jdt.ui.gettersetter.use.is=true +org.eclipse.jdt.ui.keywordthis=false +org.eclipse.jdt.ui.overrideannotation=true +sp_cleanup.add_default_serial_version_id=true +sp_cleanup.add_generated_serial_version_id=false +sp_cleanup.add_missing_annotations=true +sp_cleanup.add_missing_deprecated_annotations=true +sp_cleanup.add_missing_methods=false +sp_cleanup.add_missing_nls_tags=false +sp_cleanup.add_missing_override_annotations=true +sp_cleanup.add_missing_override_annotations_interface_methods=true +sp_cleanup.add_serial_version_id=false +sp_cleanup.always_use_blocks=true +sp_cleanup.always_use_parentheses_in_expressions=false +sp_cleanup.always_use_this_for_non_static_field_access=false +sp_cleanup.always_use_this_for_non_static_method_access=false +sp_cleanup.convert_to_enhanced_for_loop=false +sp_cleanup.correct_indentation=true +sp_cleanup.format_source_code=true +sp_cleanup.format_source_code_changes_only=false +sp_cleanup.make_local_variable_final=false +sp_cleanup.make_parameters_final=false +sp_cleanup.make_private_fields_final=true +sp_cleanup.make_type_abstract_if_missing_method=false +sp_cleanup.make_variable_declarations_final=true +sp_cleanup.never_use_blocks=false +sp_cleanup.never_use_parentheses_in_expressions=true +sp_cleanup.on_save_use_additional_actions=true +sp_cleanup.organize_imports=true +sp_cleanup.qualify_static_field_accesses_with_declaring_class=false +sp_cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true +sp_cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true +sp_cleanup.qualify_static_member_accesses_with_declaring_class=false +sp_cleanup.qualify_static_method_accesses_with_declaring_class=false +sp_cleanup.remove_private_constructors=true +sp_cleanup.remove_trailing_whitespaces=true +sp_cleanup.remove_trailing_whitespaces_all=true +sp_cleanup.remove_trailing_whitespaces_ignore_empty=false +sp_cleanup.remove_unnecessary_casts=true +sp_cleanup.remove_unnecessary_nls_tags=false +sp_cleanup.remove_unused_imports=true +sp_cleanup.remove_unused_local_variables=false +sp_cleanup.remove_unused_private_fields=true +sp_cleanup.remove_unused_private_members=false +sp_cleanup.remove_unused_private_methods=true +sp_cleanup.remove_unused_private_types=true +sp_cleanup.sort_members=false +sp_cleanup.sort_members_all=false +sp_cleanup.use_blocks=false +sp_cleanup.use_blocks_only_for_return_and_throw=false +sp_cleanup.use_parentheses_in_expressions=false +sp_cleanup.use_this_for_non_static_field_access=false +sp_cleanup.use_this_for_non_static_field_access_only_if_necessary=true +sp_cleanup.use_this_for_non_static_method_access=false +sp_cleanup.use_this_for_non_static_method_access_only_if_necessary=true diff --git a/vendor/github.com/camlistore/camlistore/clients/android/AndroidManifest.xml b/vendor/github.com/camlistore/camlistore/clients/android/AndroidManifest.xml new file mode 100644 index 00000000..e9bf0cdc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/android/AndroidManifest.xml @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/clients/android/Makefile b/vendor/github.com/camlistore/camlistore/clients/android/Makefile new file mode 100644 index 00000000..5cab0c3f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/android/Makefile @@ -0,0 +1,17 @@ +all: + ./check-environment.pl + ant debug + +# Dummy target to make build.pl happy +install: + ./check-environment.pl + ant debug + +env: + docker build -t camlistore/android devenv + +dockerdebug: + docker run -v $(GOPATH)/src/camlistore.org:/src/camlistore.org camlistore/android /src/camlistore.org/clients/android/build-in-docker.pl debug + +dockerrelease: + docker run -i -t -v $(GOPATH)/src/camlistore.org:/src/camlistore.org -v $(HOME)/keys/android-camlistore:/keys camlistore/android /src/camlistore.org/clients/android/build-in-docker.pl release diff --git a/vendor/github.com/camlistore/camlistore/clients/android/build-in-docker.pl b/vendor/github.com/camlistore/camlistore/clients/android/build-in-docker.pl new file mode 100755 index 00000000..820dc201 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/android/build-in-docker.pl @@ -0,0 +1,64 @@ +#!/usr/bin/perl + +use strict; +use File::Path qw(make_path); + +die "This script is meant to be run within the camlistore/android Docker contain. Run 'make env' to build it.\n" + unless $ENV{IN_DOCKER}; + +my $mode = shift || "debug"; + +my $ANDROID = "/src/camlistore.org/clients/android"; +my $ASSETS = "$ANDROID/assets"; +my $GENDIR = "$ANDROID/gen/org/camlistore"; + +umask 0; +make_path($GENDIR, { mode => 0755 }) unless -d $GENDIR; + +$ENV{GOROOT} = "/usr/local/go"; +$ENV{GOBIN} = $GENDIR; +$ENV{GOPATH} = "/"; +$ENV{GOARCH} = "arm"; +print "Building ARM camlistore.org/cmd/camput\n"; +system("/usr/local/go/bin/go", "install", "camlistore.org/cmd/camput") + and die "Failed to build camput"; + +system("cp", "-p", "$GENDIR/linux_arm/camput", "$ASSETS/camput.arm") + and die "cp failure"; +# TODO: build an x86 version too? if/when those Android devices matter. + +{ + open(my $vfh, ">$ASSETS/camput-version.txt") or die "open camput-version error: $!"; + # TODO(bradfitz): make these values automatic, and don't make the + # "Version" menu say "camput version" when it runs. Also maybe + # keep a history of these somewhere more convenient. + print $vfh "app 0.6.1 camput ccacf764 go 70499e5fbe5b"; +} + +chdir $ASSETS or die "can't cd to assets dir"; + +my $digest = `openssl sha1 camput.arm`; +chomp $digest; +print "ARM camput is $digest\n"; +die "No digest" unless $digest; +write_file("$GENDIR/ChildProcessConfig.java", "package org.camlistore; public final class ChildProcessConfig { // $digest\n}"); + +print "Running ant $mode\n"; +chdir $ANDROID or die "can't cd to android dir"; +exec "ant", + "-Dsdk.dir=/usr/local/android-sdk-linux", + "-Dkey.store=/keys/android-camlistore.keystore", + "-Dkey.alias=camkey", + $mode; + +sub write_file { + my ($file, $contents) = @_; + if (open(my $fh, $file)) { + my $cur = do { local $/; <$fh> }; + return if $cur eq $contents; + } + open(my $fh, ">$file") or die "Failed to open $file: $!"; + print $fh $contents; + close($fh) or die "Close: $!"; + print "Wrote $file\n"; +} diff --git a/vendor/github.com/camlistore/camlistore/clients/android/build.properties b/vendor/github.com/camlistore/camlistore/clients/android/build.properties new file mode 100644 index 00000000..4ccf4d1c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/android/build.properties @@ -0,0 +1,2 @@ +out.dir=build +gen.dir=build/gen diff --git a/vendor/github.com/camlistore/camlistore/clients/android/build.xml b/vendor/github.com/camlistore/camlistore/clients/android/build.xml new file mode 100644 index 00000000..d1d6357f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/android/build.xml @@ -0,0 +1,72 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/clients/android/check-environment.pl b/vendor/github.com/camlistore/camlistore/clients/android/check-environment.pl new file mode 100755 index 00000000..daf1610e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/android/check-environment.pl @@ -0,0 +1,14 @@ +#!/usr/bin/perl + +use strict; +use FindBin qw($Bin); + +my $props = "$Bin/local.properties"; +unless (-e $props) { + die "\n". + "**************************************************************\n". + "Can't build the Camlistore Android client; SDK not configured.\n". + "You need to create your $props file.\n". + "See local.properties.TEMPLATE for instructions.\n". + "**************************************************************\n\n"; +} diff --git a/vendor/github.com/camlistore/camlistore/clients/android/default.properties b/vendor/github.com/camlistore/camlistore/clients/android/default.properties new file mode 100644 index 00000000..b6f30167 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/android/default.properties @@ -0,0 +1,12 @@ +# This file is automatically generated by Android Tools. +# Do not modify this file -- YOUR CHANGES WILL BE ERASED! +# +# This file must be checked in Version Control Systems. +# +# To customize properties used by the Ant build system use, +# "build.properties", and override values to adapt the script to your +# project structure. + +# Project target. +target=android-17 + diff --git a/vendor/github.com/camlistore/camlistore/clients/android/devenv/Dockerfile b/vendor/github.com/camlistore/camlistore/clients/android/devenv/Dockerfile new file mode 100644 index 00000000..39acc747 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/android/devenv/Dockerfile @@ -0,0 +1,38 @@ +# Build environment in which to build the Camlistore Android app. +# +# This extends the Dockerfile from https://index.docker.io/u/wasabeef/android/ + +FROM wasabeef/android +MAINTAINER bradfitz + +# Found these from: android list sdk -u -e +RUN android list sdk -u -e | grep build-tools- | perl -npe 's/.+"(.+)"/$1/' > /tmp/build-tools-version +RUN perl -e 'die "No Android build tools version found." unless -s "/tmp/build-tools-version"' +RUN echo y | android update sdk -u -t $(cat /tmp/build-tools-version) +RUN echo y | android update sdk -u -t android-17 + +# Don't need mercurial yet, since we're just using the archive URL to fetch Go. +# But it's possible we may want to switch to using hg, in which case: +# RUN yum -y install mercurial + +# Update the GOVERS to depend on a new version of Go. +# +# The 073fc578434b version is Go 1.3.1 (2014-02-21), +# to satisfy the dependency for Go 1.3 in the Docker build of +# camput. +ENV GOVERS 073fc578434b + +RUN cd /usr/local && curl -O http://go.googlecode.com/archive/$GOVERS.zip +RUN cd /usr/local && unzip -q $GOVERS.zip +RUN cd /usr/local && mv go-$GOVERS go +RUN chmod 0755 /usr/local/go/src/make.bash +RUN echo $GOVERS > /usr/local/go/VERSION +RUN GOROOT=/usr/local/go GOARCH=arm bash -c "cd /usr/local/go/src && ./make.bash" + + +ENV ANDROID_HOME /usr/local/android-sdk-linux +ENV ANT_HOME /usr/local/apache-ant-1.9.2 +ENV PATH $PATH:$ANDROID_HOME/tools +ENV PATH $PATH:$ANDROID_HOME/platform-tools +ENV PATH $PATH:$ANT_HOME/bin +ENV IN_DOCKER 1 diff --git a/vendor/github.com/camlistore/camlistore/clients/android/local.properties.TEMPLATE b/vendor/github.com/camlistore/camlistore/clients/android/local.properties.TEMPLATE new file mode 100644 index 00000000..32c51f69 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/android/local.properties.TEMPLATE @@ -0,0 +1,3 @@ +# Copy this file to one named "local.properties" and update this path to +# wherever your Android SDK is located: +sdk.dir=/home/bradfitz/sdk/android diff --git a/vendor/github.com/camlistore/camlistore/clients/android/project.properties b/vendor/github.com/camlistore/camlistore/clients/android/project.properties new file mode 100644 index 00000000..a3ee5ab6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/android/project.properties @@ -0,0 +1,14 @@ +# This file is automatically generated by Android Tools. +# Do not modify this file -- YOUR CHANGES WILL BE ERASED! +# +# This file must be checked in Version Control Systems. +# +# To customize properties used by the Ant build system edit +# "ant.properties", and override values to adapt the script to your +# project structure. +# +# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home): +#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt + +# Project target. +target=android-17 diff --git a/vendor/github.com/camlistore/camlistore/clients/android/res/drawable-hdpi/icon.png b/vendor/github.com/camlistore/camlistore/clients/android/res/drawable-hdpi/icon.png new file mode 100644 index 00000000..978e3fab Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/clients/android/res/drawable-hdpi/icon.png differ diff --git a/vendor/github.com/camlistore/camlistore/clients/android/res/drawable-mdpi/icon.png b/vendor/github.com/camlistore/camlistore/clients/android/res/drawable-mdpi/icon.png new file mode 100644 index 00000000..14f58169 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/clients/android/res/drawable-mdpi/icon.png differ diff --git a/vendor/github.com/camlistore/camlistore/clients/android/res/drawable-xhdpi/icon.png b/vendor/github.com/camlistore/camlistore/clients/android/res/drawable-xhdpi/icon.png new file mode 100644 index 00000000..50d451ec Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/clients/android/res/drawable-xhdpi/icon.png differ diff --git a/vendor/github.com/camlistore/camlistore/clients/android/res/drawable/icon_file.png b/vendor/github.com/camlistore/camlistore/clients/android/res/drawable/icon_file.png new file mode 100644 index 00000000..fecbbc1a Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/clients/android/res/drawable/icon_file.png differ diff --git a/vendor/github.com/camlistore/camlistore/clients/android/res/drawable/icon_folder.png b/vendor/github.com/camlistore/camlistore/clients/android/res/drawable/icon_folder.png new file mode 100644 index 00000000..3634827e Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/clients/android/res/drawable/icon_folder.png differ diff --git a/vendor/github.com/camlistore/camlistore/clients/android/res/layout/main.xml b/vendor/github.com/camlistore/camlistore/clients/android/res/layout/main.xml new file mode 100644 index 00000000..6d3debd9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/android/res/layout/main.xml @@ -0,0 +1,77 @@ + + + + + + + + ",h).unbind("click").click(function(){h.click.apply(b.element[0],arguments)}).appendTo(g);c.fn.button&&e.button()});f.appendTo(b.uiDialog)}},_makeDraggable:function(){function a(e){return{position:e.position, +offset:e.offset}}var b=this,d=b.options,f=c(document),g;b.uiDialog.draggable({cancel:".ui-dialog-content, .ui-dialog-titlebar-close",handle:".ui-dialog-titlebar",containment:"document",start:function(e,h){g=d.height==="auto"?"auto":c(this).height();c(this).height(c(this).height()).addClass("ui-dialog-dragging");b._trigger("dragStart",e,a(h))},drag:function(e,h){b._trigger("drag",e,a(h))},stop:function(e,h){d.position=[h.position.left-f.scrollLeft(),h.position.top-f.scrollTop()];c(this).removeClass("ui-dialog-dragging").height(g); +b._trigger("dragStop",e,a(h));c.ui.dialog.overlay.resize()}})},_makeResizable:function(a){function b(e){return{originalPosition:e.originalPosition,originalSize:e.originalSize,position:e.position,size:e.size}}a=a===j?this.options.resizable:a;var d=this,f=d.options,g=d.uiDialog.css("position");a=typeof a==="string"?a:"n,e,s,w,se,sw,ne,nw";d.uiDialog.resizable({cancel:".ui-dialog-content",containment:"document",alsoResize:d.element,maxWidth:f.maxWidth,maxHeight:f.maxHeight,minWidth:f.minWidth,minHeight:d._minHeight(), +handles:a,start:function(e,h){c(this).addClass("ui-dialog-resizing");d._trigger("resizeStart",e,b(h))},resize:function(e,h){d._trigger("resize",e,b(h))},stop:function(e,h){c(this).removeClass("ui-dialog-resizing");f.height=c(this).height();f.width=c(this).width();d._trigger("resizeStop",e,b(h));c.ui.dialog.overlay.resize()}}).css("position",g).find(".ui-resizable-se").addClass("ui-icon ui-icon-grip-diagonal-se")},_minHeight:function(){var a=this.options;return a.height==="auto"?a.minHeight:Math.min(a.minHeight, +a.height)},_position:function(a){var b=[],d=[0,0],f;if(a){if(typeof a==="string"||typeof a==="object"&&"0"in a){b=a.split?a.split(" "):[a[0],a[1]];if(b.length===1)b[1]=b[0];c.each(["left","top"],function(g,e){if(+b[g]===b[g]){d[g]=b[g];b[g]=e}});a={my:b.join(" "),at:b.join(" "),offset:d.join(" ")}}a=c.extend({},c.ui.dialog.prototype.options.position,a)}else a=c.ui.dialog.prototype.options.position;(f=this.uiDialog.is(":visible"))||this.uiDialog.show();this.uiDialog.css({top:0,left:0}).position(a); +f||this.uiDialog.hide()},_setOption:function(a,b){var d=this,f=d.uiDialog,g=f.is(":data(resizable)"),e=false;switch(a){case "beforeclose":a="beforeClose";break;case "buttons":d._createButtons(b);e=true;break;case "closeText":d.uiDialogTitlebarCloseText.text(""+b);break;case "dialogClass":f.removeClass(d.options.dialogClass).addClass("ui-dialog ui-widget ui-widget-content ui-corner-all "+b);break;case "disabled":b?f.addClass("ui-dialog-disabled"):f.removeClass("ui-dialog-disabled");break;case "draggable":b? +d._makeDraggable():f.draggable("destroy");break;case "height":e=true;break;case "maxHeight":g&&f.resizable("option","maxHeight",b);e=true;break;case "maxWidth":g&&f.resizable("option","maxWidth",b);e=true;break;case "minHeight":g&&f.resizable("option","minHeight",b);e=true;break;case "minWidth":g&&f.resizable("option","minWidth",b);e=true;break;case "position":d._position(b);break;case "resizable":g&&!b&&f.resizable("destroy");g&&typeof b==="string"&&f.resizable("option","handles",b);!g&&b!==false&& +d._makeResizable(b);break;case "title":c(".ui-dialog-title",d.uiDialogTitlebar).html(""+(b||" "));break;case "width":e=true;break}c.Widget.prototype._setOption.apply(d,arguments);e&&d._size()},_size:function(){var a=this.options,b;this.element.css({width:"auto",minHeight:0,height:0});if(a.minWidth>a.width)a.width=a.minWidth;b=this.uiDialog.css({height:"auto",width:a.width}).height();this.element.css(a.height==="auto"?{minHeight:Math.max(a.minHeight-b,0),height:c.support.minHeight?"auto":Math.max(a.minHeight- +b,0)}:{minHeight:0,height:Math.max(a.height-b,0)}).show();this.uiDialog.is(":data(resizable)")&&this.uiDialog.resizable("option","minHeight",this._minHeight())}});c.extend(c.ui.dialog,{version:"1.8.5",uuid:0,maxZ:0,getTitleId:function(a){a=a.attr("id");if(!a){this.uuid+=1;a=this.uuid}return"ui-dialog-title-"+a},overlay:function(a){this.$el=c.ui.dialog.overlay.create(a)}});c.extend(c.ui.dialog.overlay,{instances:[],oldInstances:[],maxZ:0,events:c.map("focus,mousedown,mouseup,keydown,keypress,click".split(","), +function(a){return a+".dialog-overlay"}).join(" "),create:function(a){if(this.instances.length===0){setTimeout(function(){c.ui.dialog.overlay.instances.length&&c(document).bind(c.ui.dialog.overlay.events,function(d){if(c(d.target).zIndex()").addClass("ui-widget-overlay")).appendTo(document.body).css({width:this.width(),height:this.height()});c.fn.bgiframe&&b.bgiframe();this.instances.push(b);return b},destroy:function(a){this.oldInstances.push(this.instances.splice(c.inArray(a,this.instances),1)[0]);this.instances.length===0&&c([document,window]).unbind(".dialog-overlay");a.remove();var b=0;c.each(this.instances,function(){b=Math.max(b,this.css("z-index"))});this.maxZ=b},height:function(){var a, +b;if(c.browser.msie&&c.browser.version<7){a=Math.max(document.documentElement.scrollHeight,document.body.scrollHeight);b=Math.max(document.documentElement.offsetHeight,document.body.offsetHeight);return a");if(!b.values)b.values=[this._valueMin(),this._valueMin()];if(b.values.length&&b.values.length!==2)b.values=[b.values[0],b.values[0]]}else this.range=d("
");this.range.appendTo(this.element).addClass("ui-slider-range");if(b.range==="min"||b.range==="max")this.range.addClass("ui-slider-range-"+b.range);this.range.addClass("ui-widget-header")}d(".ui-slider-handle",this.element).length===0&&d("").appendTo(this.element).addClass("ui-slider-handle"); +if(b.values&&b.values.length)for(;d(".ui-slider-handle",this.element).length").appendTo(this.element).addClass("ui-slider-handle");this.handles=d(".ui-slider-handle",this.element).addClass("ui-state-default ui-corner-all");this.handle=this.handles.eq(0);this.handles.add(this.range).filter("a").click(function(c){c.preventDefault()}).hover(function(){b.disabled||d(this).addClass("ui-state-hover")},function(){d(this).removeClass("ui-state-hover")}).focus(function(){if(b.disabled)d(this).blur(); +else{d(".ui-slider .ui-state-focus").removeClass("ui-state-focus");d(this).addClass("ui-state-focus")}}).blur(function(){d(this).removeClass("ui-state-focus")});this.handles.each(function(c){d(this).data("index.ui-slider-handle",c)});this.handles.keydown(function(c){var e=true,f=d(this).data("index.ui-slider-handle"),h,g,i;if(!a.options.disabled){switch(c.keyCode){case d.ui.keyCode.HOME:case d.ui.keyCode.END:case d.ui.keyCode.PAGE_UP:case d.ui.keyCode.PAGE_DOWN:case d.ui.keyCode.UP:case d.ui.keyCode.RIGHT:case d.ui.keyCode.DOWN:case d.ui.keyCode.LEFT:e= +false;if(!a._keySliding){a._keySliding=true;d(this).addClass("ui-state-active");h=a._start(c,f);if(h===false)return}break}i=a.options.step;h=a.options.values&&a.options.values.length?(g=a.values(f)):(g=a.value());switch(c.keyCode){case d.ui.keyCode.HOME:g=a._valueMin();break;case d.ui.keyCode.END:g=a._valueMax();break;case d.ui.keyCode.PAGE_UP:g=a._trimAlignValue(h+(a._valueMax()-a._valueMin())/5);break;case d.ui.keyCode.PAGE_DOWN:g=a._trimAlignValue(h-(a._valueMax()-a._valueMin())/5);break;case d.ui.keyCode.UP:case d.ui.keyCode.RIGHT:if(h=== +a._valueMax())return;g=a._trimAlignValue(h+i);break;case d.ui.keyCode.DOWN:case d.ui.keyCode.LEFT:if(h===a._valueMin())return;g=a._trimAlignValue(h-i);break}a._slide(c,f,g);return e}}).keyup(function(c){var e=d(this).data("index.ui-slider-handle");if(a._keySliding){a._keySliding=false;a._stop(c,e);a._change(c,e);d(this).removeClass("ui-state-active")}});this._refreshValue();this._animateOff=false},destroy:function(){this.handles.remove();this.range.remove();this.element.removeClass("ui-slider ui-slider-horizontal ui-slider-vertical ui-slider-disabled ui-widget ui-widget-content ui-corner-all").removeData("slider").unbind(".slider"); +this._mouseDestroy();return this},_mouseCapture:function(a){var b=this.options,c,e,f,h,g;if(b.disabled)return false;this.elementSize={width:this.element.outerWidth(),height:this.element.outerHeight()};this.elementOffset=this.element.offset();c=this._normValueFromMouse({x:a.pageX,y:a.pageY});e=this._valueMax()-this._valueMin()+1;h=this;this.handles.each(function(i){var j=Math.abs(c-h.values(i));if(e>j){e=j;f=d(this);g=i}});if(b.range===true&&this.values(1)===b.min){g+=1;f=d(this.handles[g])}if(this._start(a, +g)===false)return false;this._mouseSliding=true;h._handleIndex=g;f.addClass("ui-state-active").focus();b=f.offset();this._clickOffset=!d(a.target).parents().andSelf().is(".ui-slider-handle")?{left:0,top:0}:{left:a.pageX-b.left-f.width()/2,top:a.pageY-b.top-f.height()/2-(parseInt(f.css("borderTopWidth"),10)||0)-(parseInt(f.css("borderBottomWidth"),10)||0)+(parseInt(f.css("marginTop"),10)||0)};this._slide(a,g,c);return this._animateOff=true},_mouseStart:function(){return true},_mouseDrag:function(a){var b= +this._normValueFromMouse({x:a.pageX,y:a.pageY});this._slide(a,this._handleIndex,b);return false},_mouseStop:function(a){this.handles.removeClass("ui-state-active");this._mouseSliding=false;this._stop(a,this._handleIndex);this._change(a,this._handleIndex);this._clickOffset=this._handleIndex=null;return this._animateOff=false},_detectOrientation:function(){this.orientation=this.options.orientation==="vertical"?"vertical":"horizontal"},_normValueFromMouse:function(a){var b;if(this.orientation==="horizontal"){b= +this.elementSize.width;a=a.x-this.elementOffset.left-(this._clickOffset?this._clickOffset.left:0)}else{b=this.elementSize.height;a=a.y-this.elementOffset.top-(this._clickOffset?this._clickOffset.top:0)}b=a/b;if(b>1)b=1;if(b<0)b=0;if(this.orientation==="vertical")b=1-b;a=this._valueMax()-this._valueMin();return this._trimAlignValue(this._valueMin()+b*a)},_start:function(a,b){var c={handle:this.handles[b],value:this.value()};if(this.options.values&&this.options.values.length){c.value=this.values(b); +c.values=this.values()}return this._trigger("start",a,c)},_slide:function(a,b,c){var e;if(this.options.values&&this.options.values.length){e=this.values(b?0:1);if(this.options.values.length===2&&this.options.range===true&&(b===0&&c>e||b===1&&c1){this.options.values[a]=this._trimAlignValue(b);this._refreshValue();this._change(null,a)}if(arguments.length)if(d.isArray(arguments[0])){c=this.options.values;e=arguments[0];for(f=0;fthis._valueMax())return this._valueMax();var b=this.options.step>0?this.options.step:1,c=a%b;a=a-c;if(Math.abs(c)*2>=b)a+=c>0?b:-b;return parseFloat(a.toFixed(5))},_valueMin:function(){return this.options.min},_valueMax:function(){return this.options.max},_refreshValue:function(){var a= +this.options.range,b=this.options,c=this,e=!this._animateOff?b.animate:false,f,h={},g,i,j,l;if(this.options.values&&this.options.values.length)this.handles.each(function(k){f=(c.values(k)-c._valueMin())/(c._valueMax()-c._valueMin())*100;h[c.orientation==="horizontal"?"left":"bottom"]=f+"%";d(this).stop(1,1)[e?"animate":"css"](h,b.animate);if(c.options.range===true)if(c.orientation==="horizontal"){if(k===0)c.range.stop(1,1)[e?"animate":"css"]({left:f+"%"},b.animate);if(k===1)c.range[e?"animate":"css"]({width:f- +g+"%"},{queue:false,duration:b.animate})}else{if(k===0)c.range.stop(1,1)[e?"animate":"css"]({bottom:f+"%"},b.animate);if(k===1)c.range[e?"animate":"css"]({height:f-g+"%"},{queue:false,duration:b.animate})}g=f});else{i=this.value();j=this._valueMin();l=this._valueMax();f=l!==j?(i-j)/(l-j)*100:0;h[c.orientation==="horizontal"?"left":"bottom"]=f+"%";this.handle.stop(1,1)[e?"animate":"css"](h,b.animate);if(a==="min"&&this.orientation==="horizontal")this.range.stop(1,1)[e?"animate":"css"]({width:f+"%"}, +b.animate);if(a==="max"&&this.orientation==="horizontal")this.range[e?"animate":"css"]({width:100-f+"%"},{queue:false,duration:b.animate});if(a==="min"&&this.orientation==="vertical")this.range.stop(1,1)[e?"animate":"css"]({height:f+"%"},b.animate);if(a==="max"&&this.orientation==="vertical")this.range[e?"animate":"css"]({height:100-f+"%"},{queue:false,duration:b.animate})}}});d.extend(d.ui.slider,{version:"1.8.5"})})(jQuery); +;/* + * jQuery UI Tabs 1.8.5 + * + * Copyright 2010, AUTHORS.txt (http://jqueryui.com/about) + * Dual licensed under the MIT or GPL Version 2 licenses. + * http://jquery.org/license + * + * http://docs.jquery.com/UI/Tabs + * + * Depends: + * jquery.ui.core.js + * jquery.ui.widget.js + */ +(function(d,p){function u(){return++v}function w(){return++x}var v=0,x=0;d.widget("ui.tabs",{options:{add:null,ajaxOptions:null,cache:false,cookie:null,collapsible:false,disable:null,disabled:[],enable:null,event:"click",fx:null,idPrefix:"ui-tabs-",load:null,panelTemplate:"
",remove:null,select:null,show:null,spinner:"Loading…",tabTemplate:"
  • #{label}
  • "},_create:function(){this._tabify(true)},_setOption:function(a,e){if(a=="selected")this.options.collapsible&& +e==this.options.selected||this.select(e);else{this.options[a]=e;this._tabify()}},_tabId:function(a){return a.title&&a.title.replace(/\s/g,"_").replace(/[^\w\u00c0-\uFFFF-]/g,"")||this.options.idPrefix+u()},_sanitizeSelector:function(a){return a.replace(/:/g,"\\:")},_cookie:function(){var a=this.cookie||(this.cookie=this.options.cookie.name||"ui-tabs-"+w());return d.cookie.apply(null,[a].concat(d.makeArray(arguments)))},_ui:function(a,e){return{tab:a,panel:e,index:this.anchors.index(a)}},_cleanup:function(){this.lis.filter(".ui-state-processing").removeClass("ui-state-processing").find("span:data(label.tabs)").each(function(){var a= +d(this);a.html(a.data("label.tabs")).removeData("label.tabs")})},_tabify:function(a){function e(g,f){g.css("display","");!d.support.opacity&&f.opacity&&g[0].style.removeAttribute("filter")}var b=this,c=this.options,h=/^#.+/;this.list=this.element.find("ol,ul").eq(0);this.lis=d(" > li:has(a[href])",this.list);this.anchors=this.lis.map(function(){return d("a",this)[0]});this.panels=d([]);this.anchors.each(function(g,f){var i=d(f).attr("href"),l=i.split("#")[0],q;if(l&&(l===location.toString().split("#")[0]|| +(q=d("base")[0])&&l===q.href)){i=f.hash;f.href=i}if(h.test(i))b.panels=b.panels.add(b._sanitizeSelector(i));else if(i&&i!=="#"){d.data(f,"href.tabs",i);d.data(f,"load.tabs",i.replace(/#.*$/,""));i=b._tabId(f);f.href="#"+i;f=d("#"+i);if(!f.length){f=d(c.panelTemplate).attr("id",i).addClass("ui-tabs-panel ui-widget-content ui-corner-bottom").insertAfter(b.panels[g-1]||b.list);f.data("destroy.tabs",true)}b.panels=b.panels.add(f)}else c.disabled.push(g)});if(a){this.element.addClass("ui-tabs ui-widget ui-widget-content ui-corner-all"); +this.list.addClass("ui-tabs-nav ui-helper-reset ui-helper-clearfix ui-widget-header ui-corner-all");this.lis.addClass("ui-state-default ui-corner-top");this.panels.addClass("ui-tabs-panel ui-widget-content ui-corner-bottom");if(c.selected===p){location.hash&&this.anchors.each(function(g,f){if(f.hash==location.hash){c.selected=g;return false}});if(typeof c.selected!=="number"&&c.cookie)c.selected=parseInt(b._cookie(),10);if(typeof c.selected!=="number"&&this.lis.filter(".ui-tabs-selected").length)c.selected= +this.lis.index(this.lis.filter(".ui-tabs-selected"));c.selected=c.selected||(this.lis.length?0:-1)}else if(c.selected===null)c.selected=-1;c.selected=c.selected>=0&&this.anchors[c.selected]||c.selected<0?c.selected:0;c.disabled=d.unique(c.disabled.concat(d.map(this.lis.filter(".ui-state-disabled"),function(g){return b.lis.index(g)}))).sort();d.inArray(c.selected,c.disabled)!=-1&&c.disabled.splice(d.inArray(c.selected,c.disabled),1);this.panels.addClass("ui-tabs-hide");this.lis.removeClass("ui-tabs-selected ui-state-active"); +if(c.selected>=0&&this.anchors.length){this.panels.eq(c.selected).removeClass("ui-tabs-hide");this.lis.eq(c.selected).addClass("ui-tabs-selected ui-state-active");b.element.queue("tabs",function(){b._trigger("show",null,b._ui(b.anchors[c.selected],b.panels[c.selected]))});this.load(c.selected)}d(window).bind("unload",function(){b.lis.add(b.anchors).unbind(".tabs");b.lis=b.anchors=b.panels=null})}else c.selected=this.lis.index(this.lis.filter(".ui-tabs-selected"));this.element[c.collapsible?"addClass": +"removeClass"]("ui-tabs-collapsible");c.cookie&&this._cookie(c.selected,c.cookie);a=0;for(var j;j=this.lis[a];a++)d(j)[d.inArray(a,c.disabled)!=-1&&!d(j).hasClass("ui-tabs-selected")?"addClass":"removeClass"]("ui-state-disabled");c.cache===false&&this.anchors.removeData("cache.tabs");this.lis.add(this.anchors).unbind(".tabs");if(c.event!=="mouseover"){var k=function(g,f){f.is(":not(.ui-state-disabled)")&&f.addClass("ui-state-"+g)},n=function(g,f){f.removeClass("ui-state-"+g)};this.lis.bind("mouseover.tabs", +function(){k("hover",d(this))});this.lis.bind("mouseout.tabs",function(){n("hover",d(this))});this.anchors.bind("focus.tabs",function(){k("focus",d(this).closest("li"))});this.anchors.bind("blur.tabs",function(){n("focus",d(this).closest("li"))})}var m,o;if(c.fx)if(d.isArray(c.fx)){m=c.fx[0];o=c.fx[1]}else m=o=c.fx;var r=o?function(g,f){d(g).closest("li").addClass("ui-tabs-selected ui-state-active");f.hide().removeClass("ui-tabs-hide").animate(o,o.duration||"normal",function(){e(f,o);b._trigger("show", +null,b._ui(g,f[0]))})}:function(g,f){d(g).closest("li").addClass("ui-tabs-selected ui-state-active");f.removeClass("ui-tabs-hide");b._trigger("show",null,b._ui(g,f[0]))},s=m?function(g,f){f.animate(m,m.duration||"normal",function(){b.lis.removeClass("ui-tabs-selected ui-state-active");f.addClass("ui-tabs-hide");e(f,m);b.element.dequeue("tabs")})}:function(g,f){b.lis.removeClass("ui-tabs-selected ui-state-active");f.addClass("ui-tabs-hide");b.element.dequeue("tabs")};this.anchors.bind(c.event+".tabs", +function(){var g=this,f=d(g).closest("li"),i=b.panels.filter(":not(.ui-tabs-hide)"),l=d(b._sanitizeSelector(g.hash));if(f.hasClass("ui-tabs-selected")&&!c.collapsible||f.hasClass("ui-state-disabled")||f.hasClass("ui-state-processing")||b.panels.filter(":animated").length||b._trigger("select",null,b._ui(this,l[0]))===false){this.blur();return false}c.selected=b.anchors.index(this);b.abort();if(c.collapsible)if(f.hasClass("ui-tabs-selected")){c.selected=-1;c.cookie&&b._cookie(c.selected,c.cookie);b.element.queue("tabs", +function(){s(g,i)}).dequeue("tabs");this.blur();return false}else if(!i.length){c.cookie&&b._cookie(c.selected,c.cookie);b.element.queue("tabs",function(){r(g,l)});b.load(b.anchors.index(this));this.blur();return false}c.cookie&&b._cookie(c.selected,c.cookie);if(l.length){i.length&&b.element.queue("tabs",function(){s(g,i)});b.element.queue("tabs",function(){r(g,l)});b.load(b.anchors.index(this))}else throw"jQuery UI Tabs: Mismatching fragment identifier.";d.browser.msie&&this.blur()});this.anchors.bind("click.tabs", +function(){return false})},_getIndex:function(a){if(typeof a=="string")a=this.anchors.index(this.anchors.filter("[href$="+a+"]"));return a},destroy:function(){var a=this.options;this.abort();this.element.unbind(".tabs").removeClass("ui-tabs ui-widget ui-widget-content ui-corner-all ui-tabs-collapsible").removeData("tabs");this.list.removeClass("ui-tabs-nav ui-helper-reset ui-helper-clearfix ui-widget-header ui-corner-all");this.anchors.each(function(){var e=d.data(this,"href.tabs");if(e)this.href= +e;var b=d(this).unbind(".tabs");d.each(["href","load","cache"],function(c,h){b.removeData(h+".tabs")})});this.lis.unbind(".tabs").add(this.panels).each(function(){d.data(this,"destroy.tabs")?d(this).remove():d(this).removeClass("ui-state-default ui-corner-top ui-tabs-selected ui-state-active ui-state-hover ui-state-focus ui-state-disabled ui-tabs-panel ui-widget-content ui-corner-bottom ui-tabs-hide")});a.cookie&&this._cookie(null,a.cookie);return this},add:function(a,e,b){if(b===p)b=this.anchors.length; +var c=this,h=this.options;e=d(h.tabTemplate.replace(/#\{href\}/g,a).replace(/#\{label\}/g,e));a=!a.indexOf("#")?a.replace("#",""):this._tabId(d("a",e)[0]);e.addClass("ui-state-default ui-corner-top").data("destroy.tabs",true);var j=d("#"+a);j.length||(j=d(h.panelTemplate).attr("id",a).data("destroy.tabs",true));j.addClass("ui-tabs-panel ui-widget-content ui-corner-bottom ui-tabs-hide");if(b>=this.lis.length){e.appendTo(this.list);j.appendTo(this.list[0].parentNode)}else{e.insertBefore(this.lis[b]); +j.insertBefore(this.panels[b])}h.disabled=d.map(h.disabled,function(k){return k>=b?++k:k});this._tabify();if(this.anchors.length==1){h.selected=0;e.addClass("ui-tabs-selected ui-state-active");j.removeClass("ui-tabs-hide");this.element.queue("tabs",function(){c._trigger("show",null,c._ui(c.anchors[0],c.panels[0]))});this.load(0)}this._trigger("add",null,this._ui(this.anchors[b],this.panels[b]));return this},remove:function(a){a=this._getIndex(a);var e=this.options,b=this.lis.eq(a).remove(),c=this.panels.eq(a).remove(); +if(b.hasClass("ui-tabs-selected")&&this.anchors.length>1)this.select(a+(a+1=a?--h:h});this._tabify();this._trigger("remove",null,this._ui(b.find("a")[0],c[0]));return this},enable:function(a){a=this._getIndex(a);var e=this.options;if(d.inArray(a,e.disabled)!=-1){this.lis.eq(a).removeClass("ui-state-disabled");e.disabled=d.grep(e.disabled,function(b){return b!=a});this._trigger("enable",null, +this._ui(this.anchors[a],this.panels[a]));return this}},disable:function(a){a=this._getIndex(a);var e=this.options;if(a!=e.selected){this.lis.eq(a).addClass("ui-state-disabled");e.disabled.push(a);e.disabled.sort();this._trigger("disable",null,this._ui(this.anchors[a],this.panels[a]))}return this},select:function(a){a=this._getIndex(a);if(a==-1)if(this.options.collapsible&&this.options.selected!=-1)a=this.options.selected;else return this;this.anchors.eq(a).trigger(this.options.event+".tabs");return this}, +load:function(a){a=this._getIndex(a);var e=this,b=this.options,c=this.anchors.eq(a)[0],h=d.data(c,"load.tabs");this.abort();if(!h||this.element.queue("tabs").length!==0&&d.data(c,"cache.tabs"))this.element.dequeue("tabs");else{this.lis.eq(a).addClass("ui-state-processing");if(b.spinner){var j=d("span",c);j.data("label.tabs",j.html()).html(b.spinner)}this.xhr=d.ajax(d.extend({},b.ajaxOptions,{url:h,success:function(k,n){d(e._sanitizeSelector(c.hash)).html(k);e._cleanup();b.cache&&d.data(c,"cache.tabs", +true);e._trigger("load",null,e._ui(e.anchors[a],e.panels[a]));try{b.ajaxOptions.success(k,n)}catch(m){}},error:function(k,n){e._cleanup();e._trigger("load",null,e._ui(e.anchors[a],e.panels[a]));try{b.ajaxOptions.error(k,n,a,c)}catch(m){}}}));e.element.dequeue("tabs");return this}},abort:function(){this.element.queue([]);this.panels.stop(false,true);this.element.queue("tabs",this.element.queue("tabs").splice(-2,2));if(this.xhr){this.xhr.abort();delete this.xhr}this._cleanup();return this},url:function(a, +e){this.anchors.eq(a).removeData("cache.tabs").data("load.tabs",e);return this},length:function(){return this.anchors.length}});d.extend(d.ui.tabs,{version:"1.8.5"});d.extend(d.ui.tabs.prototype,{rotation:null,rotate:function(a,e){var b=this,c=this.options,h=b._rotate||(b._rotate=function(j){clearTimeout(b.rotation);b.rotation=setTimeout(function(){var k=c.selected;b.select(++k')}function E(a,b){d.extend(a, +b);for(var c in b)if(b[c]==null||b[c]==G)a[c]=b[c];return a}d.extend(d.ui,{datepicker:{version:"1.8.5"}});var y=(new Date).getTime();d.extend(L.prototype,{markerClassName:"hasDatepicker",log:function(){this.debug&&console.log.apply("",arguments)},_widgetDatepicker:function(){return this.dpDiv},setDefaults:function(a){E(this._defaults,a||{});return this},_attachDatepicker:function(a,b){var c=null;for(var e in this._defaults){var f=a.getAttribute("date:"+e);if(f){c=c||{};try{c[e]=eval(f)}catch(h){c[e]= +f}}}e=a.nodeName.toLowerCase();f=e=="div"||e=="span";if(!a.id){this.uuid+=1;a.id="dp"+this.uuid}var i=this._newInst(d(a),f);i.settings=d.extend({},b||{},c||{});if(e=="input")this._connectDatepicker(a,i);else f&&this._inlineDatepicker(a,i)},_newInst:function(a,b){return{id:a[0].id.replace(/([^A-Za-z0-9_])/g,"\\\\$1"),input:a,selectedDay:0,selectedMonth:0,selectedYear:0,drawMonth:0,drawYear:0,inline:b,dpDiv:!b?this.dpDiv:d('
    ')}}, +_connectDatepicker:function(a,b){var c=d(a);b.append=d([]);b.trigger=d([]);if(!c.hasClass(this.markerClassName)){this._attachments(c,b);c.addClass(this.markerClassName).keydown(this._doKeyDown).keypress(this._doKeyPress).keyup(this._doKeyUp).bind("setData.datepicker",function(e,f,h){b.settings[f]=h}).bind("getData.datepicker",function(e,f){return this._get(b,f)});this._autoSize(b);d.data(a,"datepicker",b)}},_attachments:function(a,b){var c=this._get(b,"appendText"),e=this._get(b,"isRTL");b.append&& +b.append.remove();if(c){b.append=d(''+c+"");a[e?"before":"after"](b.append)}a.unbind("focus",this._showDatepicker);b.trigger&&b.trigger.remove();c=this._get(b,"showOn");if(c=="focus"||c=="both")a.focus(this._showDatepicker);if(c=="button"||c=="both"){c=this._get(b,"buttonText");var f=this._get(b,"buttonImage");b.trigger=d(this._get(b,"buttonImageOnly")?d("").addClass(this._triggerClass).attr({src:f,alt:c,title:c}):d('').addClass(this._triggerClass).html(f== +""?c:d("").attr({src:f,alt:c,title:c})));a[e?"before":"after"](b.trigger);b.trigger.click(function(){d.datepicker._datepickerShowing&&d.datepicker._lastInput==a[0]?d.datepicker._hideDatepicker():d.datepicker._showDatepicker(a[0]);return false})}},_autoSize:function(a){if(this._get(a,"autoSize")&&!a.inline){var b=new Date(2009,11,20),c=this._get(a,"dateFormat");if(c.match(/[DM]/)){var e=function(f){for(var h=0,i=0,g=0;gh){h=f[g].length;i=g}return i};b.setMonth(e(this._get(a, +c.match(/MM/)?"monthNames":"monthNamesShort")));b.setDate(e(this._get(a,c.match(/DD/)?"dayNames":"dayNamesShort"))+20-b.getDay())}a.input.attr("size",this._formatDate(a,b).length)}},_inlineDatepicker:function(a,b){var c=d(a);if(!c.hasClass(this.markerClassName)){c.addClass(this.markerClassName).append(b.dpDiv).bind("setData.datepicker",function(e,f,h){b.settings[f]=h}).bind("getData.datepicker",function(e,f){return this._get(b,f)});d.data(a,"datepicker",b);this._setDate(b,this._getDefaultDate(b), +true);this._updateDatepicker(b);this._updateAlternate(b)}},_dialogDatepicker:function(a,b,c,e,f){a=this._dialogInst;if(!a){this.uuid+=1;this._dialogInput=d('');this._dialogInput.keydown(this._doKeyDown);d("body").append(this._dialogInput);a=this._dialogInst=this._newInst(this._dialogInput,false);a.settings={};d.data(this._dialogInput[0],"datepicker",a)}E(a.settings,e||{});b=b&&b.constructor== +Date?this._formatDate(a,b):b;this._dialogInput.val(b);this._pos=f?f.length?f:[f.pageX,f.pageY]:null;if(!this._pos)this._pos=[document.documentElement.clientWidth/2-100+(document.documentElement.scrollLeft||document.body.scrollLeft),document.documentElement.clientHeight/2-150+(document.documentElement.scrollTop||document.body.scrollTop)];this._dialogInput.css("left",this._pos[0]+20+"px").css("top",this._pos[1]+"px");a.settings.onSelect=c;this._inDialog=true;this.dpDiv.addClass(this._dialogClass);this._showDatepicker(this._dialogInput[0]); +d.blockUI&&d.blockUI(this.dpDiv);d.data(this._dialogInput[0],"datepicker",a);return this},_destroyDatepicker:function(a){var b=d(a),c=d.data(a,"datepicker");if(b.hasClass(this.markerClassName)){var e=a.nodeName.toLowerCase();d.removeData(a,"datepicker");if(e=="input"){c.append.remove();c.trigger.remove();b.removeClass(this.markerClassName).unbind("focus",this._showDatepicker).unbind("keydown",this._doKeyDown).unbind("keypress",this._doKeyPress).unbind("keyup",this._doKeyUp)}else if(e=="div"||e=="span")b.removeClass(this.markerClassName).empty()}}, +_enableDatepicker:function(a){var b=d(a),c=d.data(a,"datepicker");if(b.hasClass(this.markerClassName)){var e=a.nodeName.toLowerCase();if(e=="input"){a.disabled=false;c.trigger.filter("button").each(function(){this.disabled=false}).end().filter("img").css({opacity:"1.0",cursor:""})}else if(e=="div"||e=="span")b.children("."+this._inlineClass).children().removeClass("ui-state-disabled");this._disabledInputs=d.map(this._disabledInputs,function(f){return f==a?null:f})}},_disableDatepicker:function(a){var b= +d(a),c=d.data(a,"datepicker");if(b.hasClass(this.markerClassName)){var e=a.nodeName.toLowerCase();if(e=="input"){a.disabled=true;c.trigger.filter("button").each(function(){this.disabled=true}).end().filter("img").css({opacity:"0.5",cursor:"default"})}else if(e=="div"||e=="span")b.children("."+this._inlineClass).children().addClass("ui-state-disabled");this._disabledInputs=d.map(this._disabledInputs,function(f){return f==a?null:f});this._disabledInputs[this._disabledInputs.length]=a}},_isDisabledDatepicker:function(a){if(!a)return false; +for(var b=0;b-1}},_doKeyUp:function(a){a=d.datepicker._getInst(a.target);if(a.input.val()!=a.lastVal)try{if(d.datepicker.parseDate(d.datepicker._get(a,"dateFormat"),a.input?a.input.val():null,d.datepicker._getFormatConfig(a))){d.datepicker._setDateFromField(a);d.datepicker._updateAlternate(a);d.datepicker._updateDatepicker(a)}}catch(b){d.datepicker.log(b)}return true},_showDatepicker:function(a){a=a.target|| +a;if(a.nodeName.toLowerCase()!="input")a=d("input",a.parentNode)[0];if(!(d.datepicker._isDisabledDatepicker(a)||d.datepicker._lastInput==a)){var b=d.datepicker._getInst(a);d.datepicker._curInst&&d.datepicker._curInst!=b&&d.datepicker._curInst.dpDiv.stop(true,true);var c=d.datepicker._get(b,"beforeShow");E(b.settings,c?c.apply(a,[a,b]):{});b.lastVal=null;d.datepicker._lastInput=a;d.datepicker._setDateFromField(b);if(d.datepicker._inDialog)a.value="";if(!d.datepicker._pos){d.datepicker._pos=d.datepicker._findPos(a); +d.datepicker._pos[1]+=a.offsetHeight}var e=false;d(a).parents().each(function(){e|=d(this).css("position")=="fixed";return!e});if(e&&d.browser.opera){d.datepicker._pos[0]-=document.documentElement.scrollLeft;d.datepicker._pos[1]-=document.documentElement.scrollTop}c={left:d.datepicker._pos[0],top:d.datepicker._pos[1]};d.datepicker._pos=null;b.dpDiv.css({position:"absolute",display:"block",top:"-1000px"});d.datepicker._updateDatepicker(b);c=d.datepicker._checkOffset(b,c,e);b.dpDiv.css({position:d.datepicker._inDialog&& +d.blockUI?"static":e?"fixed":"absolute",display:"none",left:c.left+"px",top:c.top+"px"});if(!b.inline){c=d.datepicker._get(b,"showAnim");var f=d.datepicker._get(b,"duration"),h=function(){d.datepicker._datepickerShowing=true;var i=d.datepicker._getBorders(b.dpDiv);b.dpDiv.find("iframe.ui-datepicker-cover").css({left:-i[0],top:-i[1],width:b.dpDiv.outerWidth(),height:b.dpDiv.outerHeight()})};b.dpDiv.zIndex(d(a).zIndex()+1);d.effects&&d.effects[c]?b.dpDiv.show(c,d.datepicker._get(b,"showOptions"),f, +h):b.dpDiv[c||"show"](c?f:null,h);if(!c||!f)h();b.input.is(":visible")&&!b.input.is(":disabled")&&b.input.focus();d.datepicker._curInst=b}}},_updateDatepicker:function(a){var b=this,c=d.datepicker._getBorders(a.dpDiv);a.dpDiv.empty().append(this._generateHTML(a)).find("iframe.ui-datepicker-cover").css({left:-c[0],top:-c[1],width:a.dpDiv.outerWidth(),height:a.dpDiv.outerHeight()}).end().find("button, .ui-datepicker-prev, .ui-datepicker-next, .ui-datepicker-calendar td a").bind("mouseout",function(){d(this).removeClass("ui-state-hover"); +this.className.indexOf("ui-datepicker-prev")!=-1&&d(this).removeClass("ui-datepicker-prev-hover");this.className.indexOf("ui-datepicker-next")!=-1&&d(this).removeClass("ui-datepicker-next-hover")}).bind("mouseover",function(){if(!b._isDisabledDatepicker(a.inline?a.dpDiv.parent()[0]:a.input[0])){d(this).parents(".ui-datepicker-calendar").find("a").removeClass("ui-state-hover");d(this).addClass("ui-state-hover");this.className.indexOf("ui-datepicker-prev")!=-1&&d(this).addClass("ui-datepicker-prev-hover"); +this.className.indexOf("ui-datepicker-next")!=-1&&d(this).addClass("ui-datepicker-next-hover")}}).end().find("."+this._dayOverClass+" a").trigger("mouseover").end();c=this._getNumberOfMonths(a);var e=c[1];e>1?a.dpDiv.addClass("ui-datepicker-multi-"+e).css("width",17*e+"em"):a.dpDiv.removeClass("ui-datepicker-multi-2 ui-datepicker-multi-3 ui-datepicker-multi-4").width("");a.dpDiv[(c[0]!=1||c[1]!=1?"add":"remove")+"Class"]("ui-datepicker-multi");a.dpDiv[(this._get(a,"isRTL")?"add":"remove")+"Class"]("ui-datepicker-rtl"); +a==d.datepicker._curInst&&d.datepicker._datepickerShowing&&a.input&&a.input.is(":visible")&&!a.input.is(":disabled")&&a.input.focus()},_getBorders:function(a){var b=function(c){return{thin:1,medium:2,thick:3}[c]||c};return[parseFloat(b(a.css("border-left-width"))),parseFloat(b(a.css("border-top-width")))]},_checkOffset:function(a,b,c){var e=a.dpDiv.outerWidth(),f=a.dpDiv.outerHeight(),h=a.input?a.input.outerWidth():0,i=a.input?a.input.outerHeight():0,g=document.documentElement.clientWidth+d(document).scrollLeft(), +k=document.documentElement.clientHeight+d(document).scrollTop();b.left-=this._get(a,"isRTL")?e-h:0;b.left-=c&&b.left==a.input.offset().left?d(document).scrollLeft():0;b.top-=c&&b.top==a.input.offset().top+i?d(document).scrollTop():0;b.left-=Math.min(b.left,b.left+e>g&&g>e?Math.abs(b.left+e-g):0);b.top-=Math.min(b.top,b.top+f>k&&k>f?Math.abs(f+i):0);return b},_findPos:function(a){for(var b=this._get(this._getInst(a),"isRTL");a&&(a.type=="hidden"||a.nodeType!=1);)a=a[b?"previousSibling":"nextSibling"]; +a=d(a).offset();return[a.left,a.top]},_hideDatepicker:function(a){var b=this._curInst;if(!(!b||a&&b!=d.data(a,"datepicker")))if(this._datepickerShowing){a=this._get(b,"showAnim");var c=this._get(b,"duration"),e=function(){d.datepicker._tidyDialog(b);this._curInst=null};d.effects&&d.effects[a]?b.dpDiv.hide(a,d.datepicker._get(b,"showOptions"),c,e):b.dpDiv[a=="slideDown"?"slideUp":a=="fadeIn"?"fadeOut":"hide"](a?c:null,e);a||e();if(a=this._get(b,"onClose"))a.apply(b.input?b.input[0]:null,[b.input?b.input.val(): +"",b]);this._datepickerShowing=false;this._lastInput=null;if(this._inDialog){this._dialogInput.css({position:"absolute",left:"0",top:"-100px"});if(d.blockUI){d.unblockUI();d("body").append(this.dpDiv)}}this._inDialog=false}},_tidyDialog:function(a){a.dpDiv.removeClass(this._dialogClass).unbind(".ui-datepicker-calendar")},_checkExternalClick:function(a){if(d.datepicker._curInst){a=d(a.target);a[0].id!=d.datepicker._mainDivId&&a.parents("#"+d.datepicker._mainDivId).length==0&&!a.hasClass(d.datepicker.markerClassName)&& +!a.hasClass(d.datepicker._triggerClass)&&d.datepicker._datepickerShowing&&!(d.datepicker._inDialog&&d.blockUI)&&d.datepicker._hideDatepicker()}},_adjustDate:function(a,b,c){a=d(a);var e=this._getInst(a[0]);if(!this._isDisabledDatepicker(a[0])){this._adjustInstDate(e,b+(c=="M"?this._get(e,"showCurrentAtPos"):0),c);this._updateDatepicker(e)}},_gotoToday:function(a){a=d(a);var b=this._getInst(a[0]);if(this._get(b,"gotoCurrent")&&b.currentDay){b.selectedDay=b.currentDay;b.drawMonth=b.selectedMonth=b.currentMonth; +b.drawYear=b.selectedYear=b.currentYear}else{var c=new Date;b.selectedDay=c.getDate();b.drawMonth=b.selectedMonth=c.getMonth();b.drawYear=b.selectedYear=c.getFullYear()}this._notifyChange(b);this._adjustDate(a)},_selectMonthYear:function(a,b,c){a=d(a);var e=this._getInst(a[0]);e._selectingMonthYear=false;e["selected"+(c=="M"?"Month":"Year")]=e["draw"+(c=="M"?"Month":"Year")]=parseInt(b.options[b.selectedIndex].value,10);this._notifyChange(e);this._adjustDate(a)},_clickMonthYear:function(a){var b= +this._getInst(d(a)[0]);b.input&&b._selectingMonthYear&&setTimeout(function(){b.input.focus()},0);b._selectingMonthYear=!b._selectingMonthYear},_selectDay:function(a,b,c,e){var f=d(a);if(!(d(e).hasClass(this._unselectableClass)||this._isDisabledDatepicker(f[0]))){f=this._getInst(f[0]);f.selectedDay=f.currentDay=d("a",e).html();f.selectedMonth=f.currentMonth=b;f.selectedYear=f.currentYear=c;this._selectDate(a,this._formatDate(f,f.currentDay,f.currentMonth,f.currentYear))}},_clearDate:function(a){a= +d(a);this._getInst(a[0]);this._selectDate(a,"")},_selectDate:function(a,b){a=this._getInst(d(a)[0]);b=b!=null?b:this._formatDate(a);a.input&&a.input.val(b);this._updateAlternate(a);var c=this._get(a,"onSelect");if(c)c.apply(a.input?a.input[0]:null,[b,a]);else a.input&&a.input.trigger("change");if(a.inline)this._updateDatepicker(a);else{this._hideDatepicker();this._lastInput=a.input[0];typeof a.input[0]!="object"&&a.input.focus();this._lastInput=null}},_updateAlternate:function(a){var b=this._get(a, +"altField");if(b){var c=this._get(a,"altFormat")||this._get(a,"dateFormat"),e=this._getDate(a),f=this.formatDate(c,e,this._getFormatConfig(a));d(b).each(function(){d(this).val(f)})}},noWeekends:function(a){a=a.getDay();return[a>0&&a<6,""]},iso8601Week:function(a){a=new Date(a.getTime());a.setDate(a.getDate()+4-(a.getDay()||7));var b=a.getTime();a.setMonth(0);a.setDate(1);return Math.floor(Math.round((b-a)/864E5)/7)+1},parseDate:function(a,b,c){if(a==null||b==null)throw"Invalid arguments";b=typeof b== +"object"?b.toString():b+"";if(b=="")return null;for(var e=(c?c.shortYearCutoff:null)||this._defaults.shortYearCutoff,f=(c?c.dayNamesShort:null)||this._defaults.dayNamesShort,h=(c?c.dayNames:null)||this._defaults.dayNames,i=(c?c.monthNamesShort:null)||this._defaults.monthNamesShort,g=(c?c.monthNames:null)||this._defaults.monthNames,k=c=-1,l=-1,u=-1,j=false,o=function(p){(p=z+1 +-1){k=1;l=u;do{e=this._getDaysInMonth(c,k-1);if(l<=e)break;k++;l-=e}while(1)}v=this._daylightSavingAdjust(new Date(c,k-1,l));if(v.getFullYear()!=c||v.getMonth()+1!=k||v.getDate()!=l)throw"Invalid date";return v},ATOM:"yy-mm-dd",COOKIE:"D, dd M yy",ISO_8601:"yy-mm-dd",RFC_822:"D, d M y",RFC_850:"DD, dd-M-y",RFC_1036:"D, d M y",RFC_1123:"D, d M yy",RFC_2822:"D, d M yy",RSS:"D, d M y",TICKS:"!",TIMESTAMP:"@",W3C:"yy-mm-dd",_ticksTo1970:(718685+Math.floor(492.5)-Math.floor(19.7)+Math.floor(4.925))*24* +60*60*1E7,formatDate:function(a,b,c){if(!b)return"";var e=(c?c.dayNamesShort:null)||this._defaults.dayNamesShort,f=(c?c.dayNames:null)||this._defaults.dayNames,h=(c?c.monthNamesShort:null)||this._defaults.monthNamesShort;c=(c?c.monthNames:null)||this._defaults.monthNames;var i=function(o){(o=j+112?a.getHours()+2:0);return a},_setDate:function(a,b,c){var e=!b,f=a.selectedMonth,h=a.selectedYear;b=this._restrictMinMax(a,this._determineDate(a,b,new Date));a.selectedDay=a.currentDay=b.getDate();a.drawMonth=a.selectedMonth=a.currentMonth=b.getMonth();a.drawYear=a.selectedYear=a.currentYear=b.getFullYear();if((f!=a.selectedMonth||h!=a.selectedYear)&&!c)this._notifyChange(a);this._adjustInstDate(a);if(a.input)a.input.val(e? +"":this._formatDate(a))},_getDate:function(a){return!a.currentYear||a.input&&a.input.val()==""?null:this._daylightSavingAdjust(new Date(a.currentYear,a.currentMonth,a.currentDay))},_generateHTML:function(a){var b=new Date;b=this._daylightSavingAdjust(new Date(b.getFullYear(),b.getMonth(),b.getDate()));var c=this._get(a,"isRTL"),e=this._get(a,"showButtonPanel"),f=this._get(a,"hideIfNoPrevNext"),h=this._get(a,"navigationAsDateFormat"),i=this._getNumberOfMonths(a),g=this._get(a,"showCurrentAtPos"),k= +this._get(a,"stepMonths"),l=i[0]!=1||i[1]!=1,u=this._daylightSavingAdjust(!a.currentDay?new Date(9999,9,9):new Date(a.currentYear,a.currentMonth,a.currentDay)),j=this._getMinMaxDate(a,"min"),o=this._getMinMaxDate(a,"max");g=a.drawMonth-g;var m=a.drawYear;if(g<0){g+=12;m--}if(o){var n=this._daylightSavingAdjust(new Date(o.getFullYear(),o.getMonth()-i[0]*i[1]+1,o.getDate()));for(n=j&&nn;){g--;if(g<0){g=11;m--}}}a.drawMonth=g;a.drawYear=m;n=this._get(a, +"prevText");n=!h?n:this.formatDate(n,this._daylightSavingAdjust(new Date(m,g-k,1)),this._getFormatConfig(a));n=this._canAdjustMonth(a,-1,m,g)?''+n+"":f?"":''+ +n+"";var r=this._get(a,"nextText");r=!h?r:this.formatDate(r,this._daylightSavingAdjust(new Date(m,g+k,1)),this._getFormatConfig(a));f=this._canAdjustMonth(a,+1,m,g)?''+r+"":f?"":''+r+"";k=this._get(a,"currentText");r=this._get(a,"gotoCurrent")&&a.currentDay?u:b;k=!h?k:this.formatDate(k,r,this._getFormatConfig(a));h=!a.inline?'":"";e=e?'
    '+(c?h:"")+(this._isInRange(a,r)?'":"")+(c?"":h)+"
    ":"";h=parseInt(this._get(a,"firstDay"),10);h=isNaN(h)?0:h;k=this._get(a,"showWeek");r=this._get(a,"dayNames");this._get(a,"dayNamesShort");var s=this._get(a,"dayNamesMin"),z=this._get(a,"monthNames"),v=this._get(a,"monthNamesShort"),p=this._get(a,"beforeShowDay"),w=this._get(a,"showOtherMonths"),H=this._get(a,"selectOtherMonths");this._get(a,"calculateWeek");for(var M=this._getDefaultDate(a),I="",C=0;C1)switch(D){case 0:x+=" ui-datepicker-group-first";t=" ui-corner-"+(c?"right":"left");break;case i[1]-1:x+=" ui-datepicker-group-last";t=" ui-corner-"+(c?"left":"right");break;default:x+=" ui-datepicker-group-middle";t="";break}x+='">'}x+='
    '+(/all|left/.test(t)&&C==0?c? +f:n:"")+(/all|right/.test(t)&&C==0?c?n:f:"")+this._generateMonthYearHeader(a,g,m,j,o,C>0||D>0,z,v)+'
    ';var A=k?'":"";for(t=0;t<7;t++){var q=(t+h)%7;A+="=5?' class="ui-datepicker-week-end"':"")+'>'+s[q]+""}x+=A+"";A=this._getDaysInMonth(m,g);if(m==a.selectedYear&&g==a.selectedMonth)a.selectedDay=Math.min(a.selectedDay, +A);t=(this._getFirstDayOfMonth(m,g)-h+7)%7;A=l?6:Math.ceil((t+A)/7);q=this._daylightSavingAdjust(new Date(m,g,1-t));for(var O=0;O";var P=!k?"":'";for(t=0;t<7;t++){var F=p?p.apply(a.input?a.input[0]:null,[q]):[true,""],B=q.getMonth()!=g,K=B&&!H||!F[0]||j&&qo;P+='";q.setDate(q.getDate()+1);q=this._daylightSavingAdjust(q)}x+=P+""}g++;if(g>11){g=0;m++}x+="
    '+this._get(a,"weekHeader")+"
    '+this._get(a,"calculateWeek")(q)+""+(B&&!w?" ":K?''+q.getDate()+ +"":''+q.getDate()+"")+"
    "+(l?""+(i[0]>0&&D==i[1]-1?'
    ':""):"");N+=x}I+=N}I+=e+(d.browser.msie&&parseInt(d.browser.version,10)<7&&!a.inline?'': +"");a._keyEvent=false;return I},_generateMonthYearHeader:function(a,b,c,e,f,h,i,g){var k=this._get(a,"changeMonth"),l=this._get(a,"changeYear"),u=this._get(a,"showMonthAfterYear"),j='
    ',o="";if(h||!k)o+=''+i[b]+"";else{i=e&&e.getFullYear()==c;var m=f&&f.getFullYear()==c;o+='"}u||(j+=o+(h||!(k&&l)?" ":""));if(h||!l)j+=''+c+"";else{g=this._get(a,"yearRange").split(":");var r=(new Date).getFullYear();i=function(s){s=s.match(/c[+-].*/)?c+parseInt(s.substring(1),10):s.match(/[+-].*/)?r+parseInt(s,10):parseInt(s,10);return isNaN(s)?r:s};b=i(g[0]);g=Math.max(b, +i(g[1]||""));b=e?Math.max(b,e.getFullYear()):b;g=f?Math.min(g,f.getFullYear()):g;for(j+='"}j+=this._get(a,"yearSuffix");if(u)j+=(h||!(k&&l)?" ":"")+o;j+="
    ";return j},_adjustInstDate:function(a,b,c){var e= +a.drawYear+(c=="Y"?b:0),f=a.drawMonth+(c=="M"?b:0);b=Math.min(a.selectedDay,this._getDaysInMonth(e,f))+(c=="D"?b:0);e=this._restrictMinMax(a,this._daylightSavingAdjust(new Date(e,f,b)));a.selectedDay=e.getDate();a.drawMonth=a.selectedMonth=e.getMonth();a.drawYear=a.selectedYear=e.getFullYear();if(c=="M"||c=="Y")this._notifyChange(a)},_restrictMinMax:function(a,b){var c=this._getMinMaxDate(a,"min");a=this._getMinMaxDate(a,"max");b=c&&ba?a:b},_notifyChange:function(a){var b=this._get(a, +"onChangeMonthYear");if(b)b.apply(a.input?a.input[0]:null,[a.selectedYear,a.selectedMonth+1,a])},_getNumberOfMonths:function(a){a=this._get(a,"numberOfMonths");return a==null?[1,1]:typeof a=="number"?[1,a]:a},_getMinMaxDate:function(a,b){return this._determineDate(a,this._get(a,b+"Date"),null)},_getDaysInMonth:function(a,b){return 32-(new Date(a,b,32)).getDate()},_getFirstDayOfMonth:function(a,b){return(new Date(a,b,1)).getDay()},_canAdjustMonth:function(a,b,c,e){var f=this._getNumberOfMonths(a); +c=this._daylightSavingAdjust(new Date(c,e+(b<0?b:f[0]*f[1]),1));b<0&&c.setDate(this._getDaysInMonth(c.getFullYear(),c.getMonth()));return this._isInRange(a,c)},_isInRange:function(a,b){var c=this._getMinMaxDate(a,"min");a=this._getMinMaxDate(a,"max");return(!c||b.getTime()>=c.getTime())&&(!a||b.getTime()<=a.getTime())},_getFormatConfig:function(a){var b=this._get(a,"shortYearCutoff");b=typeof b!="string"?b:(new Date).getFullYear()%100+parseInt(b,10);return{shortYearCutoff:b,dayNamesShort:this._get(a, +"dayNamesShort"),dayNames:this._get(a,"dayNames"),monthNamesShort:this._get(a,"monthNamesShort"),monthNames:this._get(a,"monthNames")}},_formatDate:function(a,b,c,e){if(!b){a.currentDay=a.selectedDay;a.currentMonth=a.selectedMonth;a.currentYear=a.selectedYear}b=b?typeof b=="object"?b:this._daylightSavingAdjust(new Date(e,c,b)):this._daylightSavingAdjust(new Date(a.currentYear,a.currentMonth,a.currentDay));return this.formatDate(this._get(a,"dateFormat"),b,this._getFormatConfig(a))}});d.fn.datepicker= +function(a){if(!d.datepicker.initialized){d(document).mousedown(d.datepicker._checkExternalClick).find("body").append(d.datepicker.dpDiv);d.datepicker.initialized=true}var b=Array.prototype.slice.call(arguments,1);if(typeof a=="string"&&(a=="isDisabled"||a=="getDate"||a=="widget"))return d.datepicker["_"+a+"Datepicker"].apply(d.datepicker,[this[0]].concat(b));if(a=="option"&&arguments.length==2&&typeof arguments[1]=="string")return d.datepicker["_"+a+"Datepicker"].apply(d.datepicker,[this[0]].concat(b)); +return this.each(function(){typeof a=="string"?d.datepicker["_"+a+"Datepicker"].apply(d.datepicker,[this].concat(b)):d.datepicker._attachDatepicker(this,a)})};d.datepicker=new L;d.datepicker.initialized=false;d.datepicker.uuid=(new Date).getTime();d.datepicker.version="1.8.5";window["DP_jQuery_"+y]=d})(jQuery); +;/* + * jQuery UI Progressbar 1.8.5 + * + * Copyright 2010, AUTHORS.txt (http://jqueryui.com/about) + * Dual licensed under the MIT or GPL Version 2 licenses. + * http://jquery.org/license + * + * http://docs.jquery.com/UI/Progressbar + * + * Depends: + * jquery.ui.core.js + * jquery.ui.widget.js + */ +(function(b,c){b.widget("ui.progressbar",{options:{value:0},min:0,max:100,_create:function(){this.element.addClass("ui-progressbar ui-widget ui-widget-content ui-corner-all").attr({role:"progressbar","aria-valuemin":this.min,"aria-valuemax":this.max,"aria-valuenow":this._value()});this.valueDiv=b("
    ").appendTo(this.element);this._refreshValue()},destroy:function(){this.element.removeClass("ui-progressbar ui-widget ui-widget-content ui-corner-all").removeAttr("role").removeAttr("aria-valuemin").removeAttr("aria-valuemax").removeAttr("aria-valuenow"); +this.valueDiv.remove();b.Widget.prototype.destroy.apply(this,arguments)},value:function(a){if(a===c)return this._value();this._setOption("value",a);return this},_setOption:function(a,d){if(a==="value"){this.options.value=d;this._refreshValue();this._trigger("change")}b.Widget.prototype._setOption.apply(this,arguments)},_value:function(){var a=this.options.value;if(typeof a!=="number")a=0;return Math.min(this.max,Math.max(this.min,a))},_refreshValue:function(){var a=this.value();this.valueDiv.toggleClass("ui-corner-right", +a===this.max).width(a+"%");this.element.attr("aria-valuenow",a)}});b.extend(b.ui.progressbar,{version:"1.8.5"})})(jQuery); +;/* + * jQuery UI Effects 1.8.5 + * + * Copyright 2010, AUTHORS.txt (http://jqueryui.com/about) + * Dual licensed under the MIT or GPL Version 2 licenses. + * http://jquery.org/license + * + * http://docs.jquery.com/UI/Effects/ + */ +jQuery.effects||function(f,j){function l(c){var a;if(c&&c.constructor==Array&&c.length==3)return c;if(a=/rgb\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*\)/.exec(c))return[parseInt(a[1],10),parseInt(a[2],10),parseInt(a[3],10)];if(a=/rgb\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*\)/.exec(c))return[parseFloat(a[1])*2.55,parseFloat(a[2])*2.55,parseFloat(a[3])*2.55];if(a=/#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})/.exec(c))return[parseInt(a[1], +16),parseInt(a[2],16),parseInt(a[3],16)];if(a=/#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])/.exec(c))return[parseInt(a[1]+a[1],16),parseInt(a[2]+a[2],16),parseInt(a[3]+a[3],16)];if(/rgba\(0, 0, 0, 0\)/.exec(c))return m.transparent;return m[f.trim(c).toLowerCase()]}function r(c,a){var b;do{b=f.curCSS(c,a);if(b!=""&&b!="transparent"||f.nodeName(c,"body"))break;a="backgroundColor"}while(c=c.parentNode);return l(b)}function n(){var c=document.defaultView?document.defaultView.getComputedStyle(this,null):this.currentStyle, +a={},b,d;if(c&&c.length&&c[0]&&c[c[0]])for(var e=c.length;e--;){b=c[e];if(typeof c[b]=="string"){d=b.replace(/\-(\w)/g,function(g,h){return h.toUpperCase()});a[d]=c[b]}}else for(b in c)if(typeof c[b]==="string")a[b]=c[b];return a}function o(c){var a,b;for(a in c){b=c[a];if(b==null||f.isFunction(b)||a in s||/scrollbar/.test(a)||!/color/i.test(a)&&isNaN(parseFloat(b)))delete c[a]}return c}function t(c,a){var b={_:0},d;for(d in a)if(c[d]!=a[d])b[d]=a[d];return b}function k(c,a,b,d){if(typeof c=="object"){d= +a;b=null;a=c;c=a.effect}if(f.isFunction(a)){d=a;b=null;a={}}if(typeof a=="number"||f.fx.speeds[a]){d=b;b=a;a={}}if(f.isFunction(b)){d=b;b=null}a=a||{};b=b||a.duration;b=f.fx.off?0:typeof b=="number"?b:f.fx.speeds[b]||f.fx.speeds._default;d=d||a.complete;return[c,a,b,d]}f.effects={};f.each(["backgroundColor","borderBottomColor","borderLeftColor","borderRightColor","borderTopColor","color","outlineColor"],function(c,a){f.fx.step[a]=function(b){if(!b.colorInit){b.start=r(b.elem,a);b.end=l(b.end);b.colorInit= +true}b.elem.style[a]="rgb("+Math.max(Math.min(parseInt(b.pos*(b.end[0]-b.start[0])+b.start[0],10),255),0)+","+Math.max(Math.min(parseInt(b.pos*(b.end[1]-b.start[1])+b.start[1],10),255),0)+","+Math.max(Math.min(parseInt(b.pos*(b.end[2]-b.start[2])+b.start[2],10),255),0)+")"}});var m={aqua:[0,255,255],azure:[240,255,255],beige:[245,245,220],black:[0,0,0],blue:[0,0,255],brown:[165,42,42],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgrey:[169,169,169],darkgreen:[0,100,0],darkkhaki:[189, +183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkviolet:[148,0,211],fuchsia:[255,0,255],gold:[255,215,0],green:[0,128,0],indigo:[75,0,130],khaki:[240,230,140],lightblue:[173,216,230],lightcyan:[224,255,255],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightyellow:[255,255,224],lime:[0,255,0],magenta:[255,0,255],maroon:[128,0,0],navy:[0,0,128],olive:[128,128,0],orange:[255, +165,0],pink:[255,192,203],purple:[128,0,128],violet:[128,0,128],red:[255,0,0],silver:[192,192,192],white:[255,255,255],yellow:[255,255,0],transparent:[255,255,255]},p=["add","remove","toggle"],s={border:1,borderBottom:1,borderColor:1,borderLeft:1,borderRight:1,borderTop:1,borderWidth:1,margin:1,padding:1};f.effects.animateClass=function(c,a,b,d){if(f.isFunction(b)){d=b;b=null}return this.each(function(){var e=f(this),g=e.attr("style")||" ",h=o(n.call(this)),q,u=e.attr("className");f.each(p,function(v, +i){c[i]&&e[i+"Class"](c[i])});q=o(n.call(this));e.attr("className",u);e.animate(t(h,q),a,b,function(){f.each(p,function(v,i){c[i]&&e[i+"Class"](c[i])});if(typeof e.attr("style")=="object"){e.attr("style").cssText="";e.attr("style").cssText=g}else e.attr("style",g);d&&d.apply(this,arguments)})})};f.fn.extend({_addClass:f.fn.addClass,addClass:function(c,a,b,d){return a?f.effects.animateClass.apply(this,[{add:c},a,b,d]):this._addClass(c)},_removeClass:f.fn.removeClass,removeClass:function(c,a,b,d){return a? +f.effects.animateClass.apply(this,[{remove:c},a,b,d]):this._removeClass(c)},_toggleClass:f.fn.toggleClass,toggleClass:function(c,a,b,d,e){return typeof a=="boolean"||a===j?b?f.effects.animateClass.apply(this,[a?{add:c}:{remove:c},b,d,e]):this._toggleClass(c,a):f.effects.animateClass.apply(this,[{toggle:c},a,b,d])},switchClass:function(c,a,b,d,e){return f.effects.animateClass.apply(this,[{add:a,remove:c},b,d,e])}});f.extend(f.effects,{version:"1.8.5",save:function(c,a){for(var b=0;b").addClass("ui-effects-wrapper").css({fontSize:"100%",background:"transparent",border:"none",margin:0,padding:0});c.wrap(b);b=c.parent();if(c.css("position")=="static"){b.css({position:"relative"});c.css({position:"relative"})}else{f.extend(a,{position:c.css("position"),zIndex:c.css("z-index")});f.each(["top","left","bottom","right"],function(d,e){a[e]=c.css(e);if(isNaN(parseInt(a[e],10)))a[e]="auto"}); +c.css({position:"relative",top:0,left:0})}return b.css(a).show()},removeWrapper:function(c){if(c.parent().is(".ui-effects-wrapper"))return c.parent().replaceWith(c);return c},setTransition:function(c,a,b,d){d=d||{};f.each(a,function(e,g){unit=c.cssUnit(g);if(unit[0]>0)d[g]=unit[0]*b+unit[1]});return d}});f.fn.extend({effect:function(c){var a=k.apply(this,arguments);a={options:a[1],duration:a[2],callback:a[3]};var b=f.effects[c];return b&&!f.fx.off?b.call(this,a):this},_show:f.fn.show,show:function(c){if(!c|| +typeof c=="number"||f.fx.speeds[c]||!f.effects[c])return this._show.apply(this,arguments);else{var a=k.apply(this,arguments);a[1].mode="show";return this.effect.apply(this,a)}},_hide:f.fn.hide,hide:function(c){if(!c||typeof c=="number"||f.fx.speeds[c]||!f.effects[c])return this._hide.apply(this,arguments);else{var a=k.apply(this,arguments);a[1].mode="hide";return this.effect.apply(this,a)}},__toggle:f.fn.toggle,toggle:function(c){if(!c||typeof c=="number"||f.fx.speeds[c]||!f.effects[c]||typeof c== +"boolean"||f.isFunction(c))return this.__toggle.apply(this,arguments);else{var a=k.apply(this,arguments);a[1].mode="toggle";return this.effect.apply(this,a)}},cssUnit:function(c){var a=this.css(c),b=[];f.each(["em","px","%","pt"],function(d,e){if(a.indexOf(e)>0)b=[parseFloat(a),e]});return b}});f.easing.jswing=f.easing.swing;f.extend(f.easing,{def:"easeOutQuad",swing:function(c,a,b,d,e){return f.easing[f.easing.def](c,a,b,d,e)},easeInQuad:function(c,a,b,d,e){return d*(a/=e)*a+b},easeOutQuad:function(c, +a,b,d,e){return-d*(a/=e)*(a-2)+b},easeInOutQuad:function(c,a,b,d,e){if((a/=e/2)<1)return d/2*a*a+b;return-d/2*(--a*(a-2)-1)+b},easeInCubic:function(c,a,b,d,e){return d*(a/=e)*a*a+b},easeOutCubic:function(c,a,b,d,e){return d*((a=a/e-1)*a*a+1)+b},easeInOutCubic:function(c,a,b,d,e){if((a/=e/2)<1)return d/2*a*a*a+b;return d/2*((a-=2)*a*a+2)+b},easeInQuart:function(c,a,b,d,e){return d*(a/=e)*a*a*a+b},easeOutQuart:function(c,a,b,d,e){return-d*((a=a/e-1)*a*a*a-1)+b},easeInOutQuart:function(c,a,b,d,e){if((a/= +e/2)<1)return d/2*a*a*a*a+b;return-d/2*((a-=2)*a*a*a-2)+b},easeInQuint:function(c,a,b,d,e){return d*(a/=e)*a*a*a*a+b},easeOutQuint:function(c,a,b,d,e){return d*((a=a/e-1)*a*a*a*a+1)+b},easeInOutQuint:function(c,a,b,d,e){if((a/=e/2)<1)return d/2*a*a*a*a*a+b;return d/2*((a-=2)*a*a*a*a+2)+b},easeInSine:function(c,a,b,d,e){return-d*Math.cos(a/e*(Math.PI/2))+d+b},easeOutSine:function(c,a,b,d,e){return d*Math.sin(a/e*(Math.PI/2))+b},easeInOutSine:function(c,a,b,d,e){return-d/2*(Math.cos(Math.PI*a/e)-1)+ +b},easeInExpo:function(c,a,b,d,e){return a==0?b:d*Math.pow(2,10*(a/e-1))+b},easeOutExpo:function(c,a,b,d,e){return a==e?b+d:d*(-Math.pow(2,-10*a/e)+1)+b},easeInOutExpo:function(c,a,b,d,e){if(a==0)return b;if(a==e)return b+d;if((a/=e/2)<1)return d/2*Math.pow(2,10*(a-1))+b;return d/2*(-Math.pow(2,-10*--a)+2)+b},easeInCirc:function(c,a,b,d,e){return-d*(Math.sqrt(1-(a/=e)*a)-1)+b},easeOutCirc:function(c,a,b,d,e){return d*Math.sqrt(1-(a=a/e-1)*a)+b},easeInOutCirc:function(c,a,b,d,e){if((a/=e/2)<1)return-d/ +2*(Math.sqrt(1-a*a)-1)+b;return d/2*(Math.sqrt(1-(a-=2)*a)+1)+b},easeInElastic:function(c,a,b,d,e){c=1.70158;var g=0,h=d;if(a==0)return b;if((a/=e)==1)return b+d;g||(g=e*0.3);if(h").css({position:"absolute",visibility:"visible",left:-f*(h/d),top:-e*(i/c)}).parent().addClass("ui-effects-explode").css({position:"absolute",overflow:"hidden",width:h/d,height:i/c,left:g.left+f*(h/d)+(a.options.mode=="show"?(f-Math.floor(d/2))*(h/d):0),top:g.top+e*(i/c)+(a.options.mode=="show"?(e-Math.floor(c/2))*(i/c):0),opacity:a.options.mode=="show"?0:1}).animate({left:g.left+f*(h/d)+(a.options.mode=="show"?0:(f-Math.floor(d/2))*(h/d)),top:g.top+ +e*(i/c)+(a.options.mode=="show"?0:(e-Math.floor(c/2))*(i/c)),opacity:a.options.mode=="show"?1:0},a.duration||500);setTimeout(function(){a.options.mode=="show"?b.css({visibility:"visible"}):b.css({visibility:"visible"}).hide();a.callback&&a.callback.apply(b[0]);b.dequeue();j("div.ui-effects-explode").remove()},a.duration||500)})}})(jQuery); +;/* + * jQuery UI Effects Fade 1.8.5 + * + * Copyright 2010, AUTHORS.txt (http://jqueryui.com/about) + * Dual licensed under the MIT or GPL Version 2 licenses. + * http://jquery.org/license + * + * http://docs.jquery.com/UI/Effects/Fade + * + * Depends: + * jquery.effects.core.js + */ +(function(b){b.effects.fade=function(a){return this.queue(function(){var c=b(this),d=b.effects.setMode(c,a.options.mode||"hide");c.animate({opacity:d},{queue:false,duration:a.duration,easing:a.options.easing,complete:function(){a.callback&&a.callback.apply(this,arguments);c.dequeue()}})})}})(jQuery); +;/* + * jQuery UI Effects Fold 1.8.5 + * + * Copyright 2010, AUTHORS.txt (http://jqueryui.com/about) + * Dual licensed under the MIT or GPL Version 2 licenses. + * http://jquery.org/license + * + * http://docs.jquery.com/UI/Effects/Fold + * + * Depends: + * jquery.effects.core.js + */ +(function(c){c.effects.fold=function(a){return this.queue(function(){var b=c(this),j=["position","top","left"],d=c.effects.setMode(b,a.options.mode||"hide"),g=a.options.size||15,h=!!a.options.horizFirst,k=a.duration?a.duration/2:c.fx.speeds._default/2;c.effects.save(b,j);b.show();var e=c.effects.createWrapper(b).css({overflow:"hidden"}),f=d=="show"!=h,l=f?["width","height"]:["height","width"];f=f?[e.width(),e.height()]:[e.height(),e.width()];var i=/([0-9]+)%/.exec(g);if(i)g=parseInt(i[1],10)/100* +f[d=="hide"?0:1];if(d=="show")e.css(h?{height:0,width:g}:{height:g,width:0});h={};i={};h[l[0]]=d=="show"?f[0]:g;i[l[1]]=d=="show"?f[1]:0;e.animate(h,k,a.options.easing).animate(i,k,a.options.easing,function(){d=="hide"&&b.hide();c.effects.restore(b,j);c.effects.removeWrapper(b);a.callback&&a.callback.apply(b[0],arguments);b.dequeue()})})}})(jQuery); +;/* + * jQuery UI Effects Highlight 1.8.5 + * + * Copyright 2010, AUTHORS.txt (http://jqueryui.com/about) + * Dual licensed under the MIT or GPL Version 2 licenses. + * http://jquery.org/license + * + * http://docs.jquery.com/UI/Effects/Highlight + * + * Depends: + * jquery.effects.core.js + */ +(function(b){b.effects.highlight=function(c){return this.queue(function(){var a=b(this),e=["backgroundImage","backgroundColor","opacity"],d=b.effects.setMode(a,c.options.mode||"show"),f={backgroundColor:a.css("backgroundColor")};if(d=="hide")f.opacity=0;b.effects.save(a,e);a.show().css({backgroundImage:"none",backgroundColor:c.options.color||"#ffff99"}).animate(f,{queue:false,duration:c.duration,easing:c.options.easing,complete:function(){d=="hide"&&a.hide();b.effects.restore(a,e);d=="show"&&!b.support.opacity&& +this.style.removeAttribute("filter");c.callback&&c.callback.apply(this,arguments);a.dequeue()}})})}})(jQuery); +;/* + * jQuery UI Effects Pulsate 1.8.5 + * + * Copyright 2010, AUTHORS.txt (http://jqueryui.com/about) + * Dual licensed under the MIT or GPL Version 2 licenses. + * http://jquery.org/license + * + * http://docs.jquery.com/UI/Effects/Pulsate + * + * Depends: + * jquery.effects.core.js + */ +(function(d){d.effects.pulsate=function(a){return this.queue(function(){var b=d(this),c=d.effects.setMode(b,a.options.mode||"show");times=(a.options.times||5)*2-1;duration=a.duration?a.duration/2:d.fx.speeds._default/2;isVisible=b.is(":visible");animateTo=0;if(!isVisible){b.css("opacity",0).show();animateTo=1}if(c=="hide"&&isVisible||c=="show"&&!isVisible)times--;for(c=0;c').appendTo(document.body).addClass(a.options.className).css({top:d.top,left:d.left,height:b.innerHeight(),width:b.innerWidth(),position:"absolute"}).animate(c,a.duration,a.options.easing,function(){f.remove();a.callback&&a.callback.apply(b[0],arguments); +b.dequeue()})})}})(jQuery); +; \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/clients/chrome/clip-it-good/json2.js b/vendor/github.com/camlistore/camlistore/clients/chrome/clip-it-good/json2.js new file mode 100644 index 00000000..39d8f370 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/chrome/clip-it-good/json2.js @@ -0,0 +1,481 @@ +/* + http://www.JSON.org/json2.js + 2009-09-29 + + Public Domain. + + NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + + See http://www.JSON.org/js.html + + + This code should be minified before deployment. + See http://javascript.crockford.com/jsmin.html + + USE YOUR OWN COPY. IT IS EXTREMELY UNWISE TO LOAD CODE FROM SERVERS YOU DO + NOT CONTROL. + + + This file creates a global JSON object containing two methods: stringify + and parse. + + JSON.stringify(value, replacer, space) + value any JavaScript value, usually an object or array. + + replacer an optional parameter that determines how object + values are stringified for objects. It can be a + function or an array of strings. + + space an optional parameter that specifies the indentation + of nested structures. If it is omitted, the text will + be packed without extra whitespace. If it is a number, + it will specify the number of spaces to indent at each + level. If it is a string (such as '\t' or ' '), + it contains the characters used to indent at each level. + + This method produces a JSON text from a JavaScript value. + + When an object value is found, if the object contains a toJSON + method, its toJSON method will be called and the result will be + stringified. A toJSON method does not serialize: it returns the + value represented by the name/value pair that should be serialized, + or undefined if nothing should be serialized. The toJSON method + will be passed the key associated with the value, and this will be + bound to the value + + For example, this would serialize Dates as ISO strings. + + Date.prototype.toJSON = function (key) { + function f(n) { + // Format integers to have at least two digits. + return n < 10 ? '0' + n : n; + } + + return this.getUTCFullYear() + '-' + + f(this.getUTCMonth() + 1) + '-' + + f(this.getUTCDate()) + 'T' + + f(this.getUTCHours()) + ':' + + f(this.getUTCMinutes()) + ':' + + f(this.getUTCSeconds()) + 'Z'; + }; + + You can provide an optional replacer method. It will be passed the + key and value of each member, with this bound to the containing + object. The value that is returned from your method will be + serialized. If your method returns undefined, then the member will + be excluded from the serialization. + + If the replacer parameter is an array of strings, then it will be + used to select the members to be serialized. It filters the results + such that only members with keys listed in the replacer array are + stringified. + + Values that do not have JSON representations, such as undefined or + functions, will not be serialized. Such values in objects will be + dropped; in arrays they will be replaced with null. You can use + a replacer function to replace those with JSON values. + JSON.stringify(undefined) returns undefined. + + The optional space parameter produces a stringification of the + value that is filled with line breaks and indentation to make it + easier to read. + + If the space parameter is a non-empty string, then that string will + be used for indentation. If the space parameter is a number, then + the indentation will be that many spaces. + + Example: + + text = JSON.stringify(['e', {pluribus: 'unum'}]); + // text is '["e",{"pluribus":"unum"}]' + + + text = JSON.stringify(['e', {pluribus: 'unum'}], null, '\t'); + // text is '[\n\t"e",\n\t{\n\t\t"pluribus": "unum"\n\t}\n]' + + text = JSON.stringify([new Date()], function (key, value) { + return this[key] instanceof Date ? + 'Date(' + this[key] + ')' : value; + }); + // text is '["Date(---current time---)"]' + + + JSON.parse(text, reviver) + This method parses a JSON text to produce an object or array. + It can throw a SyntaxError exception. + + The optional reviver parameter is a function that can filter and + transform the results. It receives each of the keys and values, + and its return value is used instead of the original value. + If it returns what it received, then the structure is not modified. + If it returns undefined then the member is deleted. + + Example: + + // Parse the text. Values that look like ISO date strings will + // be converted to Date objects. + + myData = JSON.parse(text, function (key, value) { + var a; + if (typeof value === 'string') { + a = +/^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}(?:\.\d*)?)Z$/.exec(value); + if (a) { + return new Date(Date.UTC(+a[1], +a[2] - 1, +a[3], +a[4], + +a[5], +a[6])); + } + } + return value; + }); + + myData = JSON.parse('["Date(09/09/2001)"]', function (key, value) { + var d; + if (typeof value === 'string' && + value.slice(0, 5) === 'Date(' && + value.slice(-1) === ')') { + d = new Date(value.slice(5, -1)); + if (d) { + return d; + } + } + return value; + }); + + + This is a reference implementation. You are free to copy, modify, or + redistribute. +*/ + +/*jslint evil: true, strict: false */ + +/*members "", "\b", "\t", "\n", "\f", "\r", "\"", JSON, "\\", apply, + call, charCodeAt, getUTCDate, getUTCFullYear, getUTCHours, + getUTCMinutes, getUTCMonth, getUTCSeconds, hasOwnProperty, join, + lastIndex, length, parse, prototype, push, replace, slice, stringify, + test, toJSON, toString, valueOf +*/ + + +// Create a JSON object only if one does not already exist. We create the +// methods in a closure to avoid creating global variables. + +if (!this.JSON) { + this.JSON = {}; +} + +(function () { + + function f(n) { + // Format integers to have at least two digits. + return n < 10 ? '0' + n : n; + } + + if (typeof Date.prototype.toJSON !== 'function') { + + Date.prototype.toJSON = function (key) { + + return isFinite(this.valueOf()) ? + this.getUTCFullYear() + '-' + + f(this.getUTCMonth() + 1) + '-' + + f(this.getUTCDate()) + 'T' + + f(this.getUTCHours()) + ':' + + f(this.getUTCMinutes()) + ':' + + f(this.getUTCSeconds()) + 'Z' : null; + }; + + String.prototype.toJSON = + Number.prototype.toJSON = + Boolean.prototype.toJSON = function (key) { + return this.valueOf(); + }; + } + + var cx = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g, + escapable = /[\\\"\x00-\x1f\x7f-\x9f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g, + gap, + indent, + meta = { // table of character substitutions + '\b': '\\b', + '\t': '\\t', + '\n': '\\n', + '\f': '\\f', + '\r': '\\r', + '"' : '\\"', + '\\': '\\\\' + }, + rep; + + + function quote(string) { + +// If the string contains no control characters, no quote characters, and no +// backslash characters, then we can safely slap some quotes around it. +// Otherwise we must also replace the offending characters with safe escape +// sequences. + + escapable.lastIndex = 0; + return escapable.test(string) ? + '"' + string.replace(escapable, function (a) { + var c = meta[a]; + return typeof c === 'string' ? c : + '\\u' + ('0000' + a.charCodeAt(0).toString(16)).slice(-4); + }) + '"' : + '"' + string + '"'; + } + + + function str(key, holder) { + +// Produce a string from holder[key]. + + var i, // The loop counter. + k, // The member key. + v, // The member value. + length, + mind = gap, + partial, + value = holder[key]; + +// If the value has a toJSON method, call it to obtain a replacement value. + + if (value && typeof value === 'object' && + typeof value.toJSON === 'function') { + value = value.toJSON(key); + } + +// If we were called with a replacer function, then call the replacer to +// obtain a replacement value. + + if (typeof rep === 'function') { + value = rep.call(holder, key, value); + } + +// What happens next depends on the value's type. + + switch (typeof value) { + case 'string': + return quote(value); + + case 'number': + +// JSON numbers must be finite. Encode non-finite numbers as null. + + return isFinite(value) ? String(value) : 'null'; + + case 'boolean': + case 'null': + +// If the value is a boolean or null, convert it to a string. Note: +// typeof null does not produce 'null'. The case is included here in +// the remote chance that this gets fixed someday. + + return String(value); + +// If the type is 'object', we might be dealing with an object or an array or +// null. + + case 'object': + +// Due to a specification blunder in ECMAScript, typeof null is 'object', +// so watch out for that case. + + if (!value) { + return 'null'; + } + +// Make an array to hold the partial results of stringifying this object value. + + gap += indent; + partial = []; + +// Is the value an array? + + if (Object.prototype.toString.apply(value) === '[object Array]') { + +// The value is an array. Stringify every element. Use null as a placeholder +// for non-JSON values. + + length = value.length; + for (i = 0; i < length; i += 1) { + partial[i] = str(i, value) || 'null'; + } + +// Join all of the elements together, separated with commas, and wrap them in +// brackets. + + v = partial.length === 0 ? '[]' : + gap ? '[\n' + gap + + partial.join(',\n' + gap) + '\n' + + mind + ']' : + '[' + partial.join(',') + ']'; + gap = mind; + return v; + } + +// If the replacer is an array, use it to select the members to be stringified. + + if (rep && typeof rep === 'object') { + length = rep.length; + for (i = 0; i < length; i += 1) { + k = rep[i]; + if (typeof k === 'string') { + v = str(k, value); + if (v) { + partial.push(quote(k) + (gap ? ': ' : ':') + v); + } + } + } + } else { + +// Otherwise, iterate through all of the keys in the object. + + for (k in value) { + if (Object.hasOwnProperty.call(value, k)) { + v = str(k, value); + if (v) { + partial.push(quote(k) + (gap ? ': ' : ':') + v); + } + } + } + } + +// Join all of the member texts together, separated with commas, +// and wrap them in braces. + + v = partial.length === 0 ? '{}' : + gap ? '{\n' + gap + partial.join(',\n' + gap) + '\n' + + mind + '}' : '{' + partial.join(',') + '}'; + gap = mind; + return v; + } + } + +// If the JSON object does not yet have a stringify method, give it one. + + if (typeof JSON.stringify !== 'function') { + JSON.stringify = function (value, replacer, space) { + +// The stringify method takes a value and an optional replacer, and an optional +// space parameter, and returns a JSON text. The replacer can be a function +// that can replace values, or an array of strings that will select the keys. +// A default replacer method can be provided. Use of the space parameter can +// produce text that is more easily readable. + + var i; + gap = ''; + indent = ''; + +// If the space parameter is a number, make an indent string containing that +// many spaces. + + if (typeof space === 'number') { + for (i = 0; i < space; i += 1) { + indent += ' '; + } + +// If the space parameter is a string, it will be used as the indent string. + + } else if (typeof space === 'string') { + indent = space; + } + +// If there is a replacer, it must be a function or an array. +// Otherwise, throw an error. + + rep = replacer; + if (replacer && typeof replacer !== 'function' && + (typeof replacer !== 'object' || + typeof replacer.length !== 'number')) { + throw new Error('JSON.stringify'); + } + +// Make a fake root object containing our value under the key of ''. +// Return the result of stringifying the value. + + return str('', {'': value}); + }; + } + + +// If the JSON object does not yet have a parse method, give it one. + + if (typeof JSON.parse !== 'function') { + JSON.parse = function (text, reviver) { + +// The parse method takes a text and an optional reviver function, and returns +// a JavaScript value if the text is a valid JSON text. + + var j; + + function walk(holder, key) { + +// The walk method is used to recursively walk the resulting structure so +// that modifications can be made. + + var k, v, value = holder[key]; + if (value && typeof value === 'object') { + for (k in value) { + if (Object.hasOwnProperty.call(value, k)) { + v = walk(value, k); + if (v !== undefined) { + value[k] = v; + } else { + delete value[k]; + } + } + } + } + return reviver.call(holder, key, value); + } + + +// Parsing happens in four stages. In the first stage, we replace certain +// Unicode characters with escape sequences. JavaScript handles many characters +// incorrectly, either silently deleting them, or treating them as line endings. + + cx.lastIndex = 0; + if (cx.test(text)) { + text = text.replace(cx, function (a) { + return '\\u' + + ('0000' + a.charCodeAt(0).toString(16)).slice(-4); + }); + } + +// In the second stage, we run the text against regular expressions that look +// for non-JSON patterns. We are especially concerned with '()' and 'new' +// because they can cause invocation, and '=' because it can cause mutation. +// But just to be safe, we want to reject all unexpected forms. + +// We split the second stage into 4 regexp operations in order to work around +// crippling inefficiencies in IE's and Safari's regexp engines. First we +// replace the JSON backslash pairs with '@' (a non-JSON character). Second, we +// replace all simple value tokens with ']' characters. Third, we delete all +// open brackets that follow a colon or comma or that begin the text. Finally, +// we look to see that the remaining characters are only whitespace or ']' or +// ',' or ':' or '{' or '}'. If that is so, then the text is safe for eval. + + if (/^[\],:{}\s]*$/. +test(text.replace(/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g, '@'). +replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g, ']'). +replace(/(?:^|:|,)(?:\s*\[)+/g, ''))) { + +// In the third stage we use the eval function to compile the text into a +// JavaScript structure. The '{' operator is subject to a syntactic ambiguity +// in JavaScript: it can begin a block or an object literal. We wrap the text +// in parens to eliminate the ambiguity. + + j = eval('(' + text + ')'); + +// In the optional fourth stage, we recursively walk the new structure, passing +// each name/value pair to a reviver function for possible transformation. + + return typeof reviver === 'function' ? + walk({'': j}, '') : j; + } + +// If the text is not JSON parseable, then a SyntaxError is thrown. + + throw new SyntaxError('JSON.parse'); + }; + } +}()); diff --git a/vendor/github.com/camlistore/camlistore/clients/chrome/clip-it-good/manifest.json b/vendor/github.com/camlistore/camlistore/clients/chrome/clip-it-good/manifest.json new file mode 100644 index 00000000..4e2830ca --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/chrome/clip-it-good/manifest.json @@ -0,0 +1,25 @@ +{ + "name": "Clip It Good", + "version": "0.1", + "description": "Save webpage clippings and images to your Picasa Web Albums.", + "background_page": "background.html", + "options_page": "options.html", + "permissions": [ + "tabs", + "http://*/*", + "https://*/*", + "https://www.google.com/accounts/OAuthGetRequestToken", + "https://www.google.com/accounts/OAuthAuthorizeToken", + "https://www.google.com/accounts/OAuthGetAccessToken", + "https://picasaweb.google.com/data/*", + "contextMenus" + ], + "page_action": { + "default_icon": "icon19.png" + }, + "minimum_chrome_version": "9.0", + "icons": { + "48": "icon48.png", + "128": "icon128.png" + } +} diff --git a/vendor/github.com/camlistore/camlistore/clients/chrome/clip-it-good/options.html b/vendor/github.com/camlistore/camlistore/clients/chrome/clip-it-good/options.html new file mode 100644 index 00000000..ce4e00ee --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/chrome/clip-it-good/options.html @@ -0,0 +1,314 @@ + + + + Clip It Good: Configure options + + + + + + + + + + + + + +

    Clip It Good: Configure options

    + +
    + Loading... +
    + +
    + + +
    + +

    Security information

    +
    +Connecting an album to Clip It Good will require giving this extension permission to access your albums even when you are not logged into your account. At any time you may revoke access to this extension by using the authorized access control panel for each photo hosting provider: Google Accounts +
    + +

    About

    +
    + +

    + Brett Slatkin, ©2010 +
    + Email +

    +

    + Extension and source licensed under the + Apache License, + Version 2.0. Uses jQuery UI (MIT/GPL), + json2 parser (public domain), + Fred Palmer's Base64 + (BSD compat), and Jeff + Mott's SHA1 (BSD compat). +

    +
    + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/clients/curl/example.sh b/vendor/github.com/camlistore/camlistore/clients/curl/example.sh new file mode 100755 index 00000000..50ed2c52 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/curl/example.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Example client accesses to blob server using curl. + +# Configuration variables here: +BSHOST=localhost:3179/bs +BSUSER=user +BSPASS=foo + +# Shorter name for curl auth param: +AUTH=$BSUSER:$BSPASS + +# Stat -- 200 response +curl -u $AUTH -d camliversion=1 http://$BSHOST/camli/stat + +# Upload -- 200 response +curl -u $AUTH -v -L \ + -F sha1-126249fd8c18cbb5312a5705746a2af87fba9538=@./test_data.txt \ + # + +# Put with bad blob_ref parameter -- 400 response +curl -v -L \ + -F sha1-22a7fdd575f4c3e7caa3a55cc83db8b8a6714f0f=@./test_data.txt \ + # + +# Get present -- the blob +curl -u $AUTH -v http://$BSHOST/camli/sha1-126249fd8c18cbb5312a5705746a2af87fba9538 + +# Get missing -- 404 +curl -u $AUTH -v http://$BSHOST/camli/sha1-22a7fdd575f4c3e7caa3a55cc83db8b8a6714f0f + +# Check present -- 200 with only headers +curl -u $AUTH -I http://$BSHOST/camli/sha1-126249fd8c18cbb5312a5705746a2af87fba9538 + +# Check missing -- 404 with empty list response +curl -I http://$BSHOST/camli/sha1-22a7fdd575f4c3e7caa3a55cc83db8b8a6714f0f + +# List -- 200 with list of blobs (just one) +curl -v -u $AUTH http://$BSHOST/camli/enumerate-blobs?limit=1 + +# List offset -- 200 with list of no blobs +curl -v -u $AUTH http://$BSHOST/camli/enumerate-blobs?after=sha1-126249fd8c18cbb5312a5705746a2af87fba9538 diff --git a/vendor/github.com/camlistore/camlistore/clients/curl/test_data.txt b/vendor/github.com/camlistore/camlistore/clients/curl/test_data.txt new file mode 100644 index 00000000..a26826a5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/curl/test_data.txt @@ -0,0 +1 @@ +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque at tortor in tellus accumsan euismod. Quisque scelerisque velit vel nisi ornare lacinia. Vivamus viverra eleifend congue. Maecenas dolor magna, rhoncus vitae fermentum id, convallis id. diff --git a/vendor/github.com/camlistore/camlistore/clients/curl/upload-file.pl b/vendor/github.com/camlistore/camlistore/clients/curl/upload-file.pl new file mode 100755 index 00000000..bee85afd --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/curl/upload-file.pl @@ -0,0 +1,21 @@ +#!/usr/bin/perl +# +# Lame upload script for development testing only. Doesn't do the +# stat step and hard-codes the Go server's upload path (not +# conformant to spec). + +use strict; +my $file = shift or die + "Usage: upload-file.pl: "; +-r $file or die "$file isn't readable."; +-f $file or die "$file isn't a file."; + +die "bogus filename" if $file =~ /[ <>&\!]/; + +my $sha1 = `sha1sum $file`; +chomp $sha1; +$sha1 =~ s/\s.+//; + +system("curl", "-u", "foo:foo", "-F", "sha1-$sha1=\@$file", + "http://127.0.0.1:3179/bs/camli/upload") and die "upload failed."; +print "Uploaded http://127.0.0.1:3179/bs/camli/sha1-$sha1\n"; diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/.gitignore b/vendor/github.com/camlistore/camlistore/clients/ios-objc/.gitignore new file mode 100644 index 00000000..6a25330b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/.gitignore @@ -0,0 +1 @@ +Pods \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/Podfile b/vendor/github.com/camlistore/camlistore/clients/ios-objc/Podfile new file mode 100644 index 00000000..c3e4a8fe --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/Podfile @@ -0,0 +1,5 @@ +platform :ios, '7.0' + +pod 'HockeySDK', '~> 3.5.0' +pod 'SSKeychain', '~> 1.2.1' +pod 'BugshotKit', :podspec => 'https://raw.github.com/marcoarment/BugshotKit/e4031a8e5a863939f9c91f0d86352cba07d82d79/BugshotKit.podspec' \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/Podfile.lock b/vendor/github.com/camlistore/camlistore/clients/ios-objc/Podfile.lock new file mode 100644 index 00000000..d12a2f27 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/Podfile.lock @@ -0,0 +1,20 @@ +PODS: + - BugshotKit (0.1.0) + - HockeySDK (3.5.2) + - SSKeychain (1.2.1) + +DEPENDENCIES: + - BugshotKit (from `https://raw.github.com/marcoarment/BugshotKit/e4031a8e5a863939f9c91f0d86352cba07d82d79/BugshotKit.podspec`) + - HockeySDK (~> 3.5.0) + - SSKeychain (~> 1.2.1) + +EXTERNAL SOURCES: + BugshotKit: + :podspec: https://raw.github.com/marcoarment/BugshotKit/e4031a8e5a863939f9c91f0d86352cba07d82d79/BugshotKit.podspec + +SPEC CHECKSUMS: + BugshotKit: c94c3c580d179b034791a26a8e1196e72c3c6312 + HockeySDK: 203d3af93c2a229bfb528ff085201670ff65e1cf + SSKeychain: d18926838c2e7cd342e2a49e9f869858e49f035a + +COCOAPODS: 0.29.0 diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/Readme.md b/vendor/github.com/camlistore/camlistore/clients/ios-objc/Readme.md new file mode 100644 index 00000000..6bb95a52 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/Readme.md @@ -0,0 +1,5 @@ +== SETUP + +Use the podfile to setup the (ignored) dependencies, `pod install` should be all you need, then open the .xcworkspace file in xcode. + +We use clang-format in the form of the ClangFormat-Xcode plugin (https://github.com/travisjeffery/ClangFormat-Xcode) for style consistency. Please set your formatting tool to use the WebKit style (http://www.webkit.org/coding/coding-style.html). \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup.xcodeproj/project.pbxproj b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup.xcodeproj/project.pbxproj new file mode 100644 index 00000000..abee3320 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup.xcodeproj/project.pbxproj @@ -0,0 +1,617 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 46; + objects = { + +/* Begin PBXBuildFile section */ + 2051975F9A3B42668D11045C /* libPods.a in Frameworks */ = {isa = PBXBuildFile; fileRef = C6265C93000C47B5BFE9BB61 /* libPods.a */; }; + D075B282184944330054FED3 /* LACamliUtil.m in Sources */ = {isa = PBXBuildFile; fileRef = D075B281184944330054FED3 /* LACamliUtil.m */; }; + D075B28518494DB20054FED3 /* LACamliUploadOperation.m in Sources */ = {isa = PBXBuildFile; fileRef = D075B28418494DB20054FED3 /* LACamliUploadOperation.m */; }; + D078FB0918726D1300F2ABF7 /* CoreText.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D078FB0818726D1300F2ABF7 /* CoreText.framework */; }; + D078FB0B18726D1C00F2ABF7 /* QuartzCore.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D078FB0A18726D1C00F2ABF7 /* QuartzCore.framework */; }; + D078FB0D18726D2100F2ABF7 /* Security.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D078FB0C18726D2100F2ABF7 /* Security.framework */; }; + D078FB0F18726D2900F2ABF7 /* SystemConfiguration.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D078FB0E18726D2900F2ABF7 /* SystemConfiguration.framework */; }; + D08F9118187B417E006F6B8D /* UploadStatusCell.m in Sources */ = {isa = PBXBuildFile; fileRef = D08F9117187B417E006F6B8D /* UploadStatusCell.m */; }; + D08F911B187B4189006F6B8D /* UploadTaskCell.m in Sources */ = {isa = PBXBuildFile; fileRef = D08F911A187B4189006F6B8D /* UploadTaskCell.m */; }; + D095AE131814AF10008163F2 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D095AE121814AF10008163F2 /* Foundation.framework */; }; + D095AE151814AF10008163F2 /* CoreGraphics.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D095AE141814AF10008163F2 /* CoreGraphics.framework */; }; + D095AE171814AF10008163F2 /* UIKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D095AE161814AF10008163F2 /* UIKit.framework */; }; + D095AE1D1814AF10008163F2 /* InfoPlist.strings in Resources */ = {isa = PBXBuildFile; fileRef = D095AE1B1814AF10008163F2 /* InfoPlist.strings */; }; + D095AE1F1814AF10008163F2 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = D095AE1E1814AF10008163F2 /* main.m */; }; + D095AE231814AF10008163F2 /* LAAppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = D095AE221814AF10008163F2 /* LAAppDelegate.m */; }; + D095AE261814AF10008163F2 /* Main_iPhone.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = D095AE241814AF10008163F2 /* Main_iPhone.storyboard */; }; + D095AE2C1814AF10008163F2 /* LAViewController.m in Sources */ = {isa = PBXBuildFile; fileRef = D095AE2B1814AF10008163F2 /* LAViewController.m */; }; + D095AE2E1814AF10008163F2 /* Images.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = D095AE2D1814AF10008163F2 /* Images.xcassets */; }; + D095AE351814AF10008163F2 /* XCTest.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D095AE341814AF10008163F2 /* XCTest.framework */; }; + D095AE361814AF10008163F2 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D095AE121814AF10008163F2 /* Foundation.framework */; }; + D095AE371814AF10008163F2 /* UIKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D095AE161814AF10008163F2 /* UIKit.framework */; }; + D095AE3F1814AF10008163F2 /* InfoPlist.strings in Resources */ = {isa = PBXBuildFile; fileRef = D095AE3D1814AF10008163F2 /* InfoPlist.strings */; }; + D095AE411814AF10008163F2 /* photobackupTests.m in Sources */ = {isa = PBXBuildFile; fileRef = D095AE401814AF10008163F2 /* photobackupTests.m */; }; + D095AE501814B1B9008163F2 /* LACamliFile.m in Sources */ = {isa = PBXBuildFile; fileRef = D095AE4C1814B1B9008163F2 /* LACamliFile.m */; }; + D095AE511814B1B9008163F2 /* LACamliClient.m in Sources */ = {isa = PBXBuildFile; fileRef = D095AE4E1814B1B9008163F2 /* LACamliClient.m */; }; + D0D45EA1185FE2BE00EBC0A2 /* SettingsViewController.m in Sources */ = {isa = PBXBuildFile; fileRef = D0D45EA0185FE2BE00EBC0A2 /* SettingsViewController.m */; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + D095AE381814AF10008163F2 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = D095AE071814AF10008163F2 /* Project object */; + proxyType = 1; + remoteGlobalIDString = D095AE0E1814AF10008163F2; + remoteInfo = photobackup; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXFileReference section */ + 208E1D70286D49129C896012 /* Pods.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = Pods.xcconfig; path = Pods/Pods.xcconfig; sourceTree = ""; }; + C6265C93000C47B5BFE9BB61 /* libPods.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libPods.a; sourceTree = BUILT_PRODUCTS_DIR; }; + D075B280184944330054FED3 /* LACamliUtil.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LACamliUtil.h; sourceTree = ""; }; + D075B281184944330054FED3 /* LACamliUtil.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = LACamliUtil.m; sourceTree = ""; }; + D075B28318494DB20054FED3 /* LACamliUploadOperation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LACamliUploadOperation.h; sourceTree = ""; }; + D075B28418494DB20054FED3 /* LACamliUploadOperation.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = LACamliUploadOperation.m; sourceTree = ""; }; + D078FB0818726D1300F2ABF7 /* CoreText.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreText.framework; path = System/Library/Frameworks/CoreText.framework; sourceTree = SDKROOT; }; + D078FB0A18726D1C00F2ABF7 /* QuartzCore.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = QuartzCore.framework; path = System/Library/Frameworks/QuartzCore.framework; sourceTree = SDKROOT; }; + D078FB0C18726D2100F2ABF7 /* Security.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Security.framework; path = System/Library/Frameworks/Security.framework; sourceTree = SDKROOT; }; + D078FB0E18726D2900F2ABF7 /* SystemConfiguration.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = SystemConfiguration.framework; path = System/Library/Frameworks/SystemConfiguration.framework; sourceTree = SDKROOT; }; + D08F9116187B417E006F6B8D /* UploadStatusCell.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UploadStatusCell.h; sourceTree = ""; }; + D08F9117187B417E006F6B8D /* UploadStatusCell.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = UploadStatusCell.m; sourceTree = ""; }; + D08F9119187B4189006F6B8D /* UploadTaskCell.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UploadTaskCell.h; sourceTree = ""; }; + D08F911A187B4189006F6B8D /* UploadTaskCell.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = UploadTaskCell.m; sourceTree = ""; }; + D095AE0F1814AF10008163F2 /* photobackup.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = photobackup.app; sourceTree = BUILT_PRODUCTS_DIR; }; + D095AE121814AF10008163F2 /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = System/Library/Frameworks/Foundation.framework; sourceTree = SDKROOT; }; + D095AE141814AF10008163F2 /* CoreGraphics.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreGraphics.framework; path = System/Library/Frameworks/CoreGraphics.framework; sourceTree = SDKROOT; }; + D095AE161814AF10008163F2 /* UIKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = UIKit.framework; path = System/Library/Frameworks/UIKit.framework; sourceTree = SDKROOT; }; + D095AE1A1814AF10008163F2 /* photobackup-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "photobackup-Info.plist"; sourceTree = ""; }; + D095AE1C1814AF10008163F2 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + D095AE1E1814AF10008163F2 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = ""; }; + D095AE201814AF10008163F2 /* photobackup-Prefix.pch */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "photobackup-Prefix.pch"; sourceTree = ""; }; + D095AE211814AF10008163F2 /* LAAppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = LAAppDelegate.h; sourceTree = ""; }; + D095AE221814AF10008163F2 /* LAAppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = LAAppDelegate.m; sourceTree = ""; }; + D095AE251814AF10008163F2 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main_iPhone.storyboard; sourceTree = ""; }; + D095AE2A1814AF10008163F2 /* LAViewController.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = LAViewController.h; sourceTree = ""; }; + D095AE2B1814AF10008163F2 /* LAViewController.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = LAViewController.m; sourceTree = ""; }; + D095AE2D1814AF10008163F2 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + D095AE331814AF10008163F2 /* photobackupTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = photobackupTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; + D095AE341814AF10008163F2 /* XCTest.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = XCTest.framework; path = Library/Frameworks/XCTest.framework; sourceTree = DEVELOPER_DIR; }; + D095AE3C1814AF10008163F2 /* photobackupTests-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "photobackupTests-Info.plist"; sourceTree = ""; }; + D095AE3E1814AF10008163F2 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + D095AE401814AF10008163F2 /* photobackupTests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = photobackupTests.m; sourceTree = ""; }; + D095AE4B1814B1B9008163F2 /* LACamliFile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LACamliFile.h; sourceTree = ""; }; + D095AE4C1814B1B9008163F2 /* LACamliFile.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = LACamliFile.m; sourceTree = ""; }; + D095AE4D1814B1B9008163F2 /* LACamliClient.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LACamliClient.h; sourceTree = ""; }; + D095AE4E1814B1B9008163F2 /* LACamliClient.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = LACamliClient.m; sourceTree = ""; }; + D0D45E9F185FE2BE00EBC0A2 /* SettingsViewController.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SettingsViewController.h; sourceTree = ""; }; + D0D45EA0185FE2BE00EBC0A2 /* SettingsViewController.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SettingsViewController.m; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + D095AE0C1814AF10008163F2 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + D078FB0F18726D2900F2ABF7 /* SystemConfiguration.framework in Frameworks */, + D078FB0D18726D2100F2ABF7 /* Security.framework in Frameworks */, + D078FB0B18726D1C00F2ABF7 /* QuartzCore.framework in Frameworks */, + D078FB0918726D1300F2ABF7 /* CoreText.framework in Frameworks */, + D095AE151814AF10008163F2 /* CoreGraphics.framework in Frameworks */, + D095AE171814AF10008163F2 /* UIKit.framework in Frameworks */, + D095AE131814AF10008163F2 /* Foundation.framework in Frameworks */, + 2051975F9A3B42668D11045C /* libPods.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + D095AE301814AF10008163F2 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + D095AE351814AF10008163F2 /* XCTest.framework in Frameworks */, + D095AE371814AF10008163F2 /* UIKit.framework in Frameworks */, + D095AE361814AF10008163F2 /* Foundation.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + D08F911C187B4190006F6B8D /* Main UI */ = { + isa = PBXGroup; + children = ( + D095AE241814AF10008163F2 /* Main_iPhone.storyboard */, + D095AE2A1814AF10008163F2 /* LAViewController.h */, + D095AE2B1814AF10008163F2 /* LAViewController.m */, + D08F9116187B417E006F6B8D /* UploadStatusCell.h */, + D08F9117187B417E006F6B8D /* UploadStatusCell.m */, + D08F9119187B4189006F6B8D /* UploadTaskCell.h */, + D08F911A187B4189006F6B8D /* UploadTaskCell.m */, + ); + name = "Main UI"; + sourceTree = ""; + }; + D095AE061814AF10008163F2 = { + isa = PBXGroup; + children = ( + D095AE181814AF10008163F2 /* photobackup */, + D095AE3A1814AF10008163F2 /* photobackupTests */, + D095AE111814AF10008163F2 /* Frameworks */, + D095AE101814AF10008163F2 /* Products */, + 208E1D70286D49129C896012 /* Pods.xcconfig */, + ); + sourceTree = ""; + }; + D095AE101814AF10008163F2 /* Products */ = { + isa = PBXGroup; + children = ( + D095AE0F1814AF10008163F2 /* photobackup.app */, + D095AE331814AF10008163F2 /* photobackupTests.xctest */, + ); + name = Products; + sourceTree = ""; + }; + D095AE111814AF10008163F2 /* Frameworks */ = { + isa = PBXGroup; + children = ( + D078FB0E18726D2900F2ABF7 /* SystemConfiguration.framework */, + D078FB0C18726D2100F2ABF7 /* Security.framework */, + D078FB0A18726D1C00F2ABF7 /* QuartzCore.framework */, + D078FB0818726D1300F2ABF7 /* CoreText.framework */, + D095AE121814AF10008163F2 /* Foundation.framework */, + D095AE141814AF10008163F2 /* CoreGraphics.framework */, + D095AE161814AF10008163F2 /* UIKit.framework */, + D095AE341814AF10008163F2 /* XCTest.framework */, + C6265C93000C47B5BFE9BB61 /* libPods.a */, + ); + name = Frameworks; + sourceTree = ""; + }; + D095AE181814AF10008163F2 /* photobackup */ = { + isa = PBXGroup; + children = ( + D095AE4A1814B1B9008163F2 /* LACamliClient */, + D095AE211814AF10008163F2 /* LAAppDelegate.h */, + D095AE221814AF10008163F2 /* LAAppDelegate.m */, + D08F911C187B4190006F6B8D /* Main UI */, + D0D45E9F185FE2BE00EBC0A2 /* SettingsViewController.h */, + D0D45EA0185FE2BE00EBC0A2 /* SettingsViewController.m */, + D095AE2D1814AF10008163F2 /* Images.xcassets */, + D095AE191814AF10008163F2 /* Supporting Files */, + ); + path = photobackup; + sourceTree = ""; + }; + D095AE191814AF10008163F2 /* Supporting Files */ = { + isa = PBXGroup; + children = ( + D095AE1A1814AF10008163F2 /* photobackup-Info.plist */, + D095AE1B1814AF10008163F2 /* InfoPlist.strings */, + D095AE1E1814AF10008163F2 /* main.m */, + D095AE201814AF10008163F2 /* photobackup-Prefix.pch */, + ); + name = "Supporting Files"; + sourceTree = ""; + }; + D095AE3A1814AF10008163F2 /* photobackupTests */ = { + isa = PBXGroup; + children = ( + D095AE401814AF10008163F2 /* photobackupTests.m */, + D095AE3B1814AF10008163F2 /* Supporting Files */, + ); + path = photobackupTests; + sourceTree = ""; + }; + D095AE3B1814AF10008163F2 /* Supporting Files */ = { + isa = PBXGroup; + children = ( + D095AE3C1814AF10008163F2 /* photobackupTests-Info.plist */, + D095AE3D1814AF10008163F2 /* InfoPlist.strings */, + ); + name = "Supporting Files"; + sourceTree = ""; + }; + D095AE4A1814B1B9008163F2 /* LACamliClient */ = { + isa = PBXGroup; + children = ( + D095AE4B1814B1B9008163F2 /* LACamliFile.h */, + D095AE4C1814B1B9008163F2 /* LACamliFile.m */, + D095AE4D1814B1B9008163F2 /* LACamliClient.h */, + D095AE4E1814B1B9008163F2 /* LACamliClient.m */, + D075B280184944330054FED3 /* LACamliUtil.h */, + D075B281184944330054FED3 /* LACamliUtil.m */, + D075B28318494DB20054FED3 /* LACamliUploadOperation.h */, + D075B28418494DB20054FED3 /* LACamliUploadOperation.m */, + ); + path = LACamliClient; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + D095AE0E1814AF10008163F2 /* photobackup */ = { + isa = PBXNativeTarget; + buildConfigurationList = D095AE441814AF10008163F2 /* Build configuration list for PBXNativeTarget "photobackup" */; + buildPhases = ( + 85BE1708753D47A8B10D430C /* Check Pods Manifest.lock */, + D095AE0B1814AF10008163F2 /* Sources */, + D095AE0C1814AF10008163F2 /* Frameworks */, + D095AE0D1814AF10008163F2 /* Resources */, + A368DE813E1349B0810D9274 /* Copy Pods Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = photobackup; + productName = photobackup; + productReference = D095AE0F1814AF10008163F2 /* photobackup.app */; + productType = "com.apple.product-type.application"; + }; + D095AE321814AF10008163F2 /* photobackupTests */ = { + isa = PBXNativeTarget; + buildConfigurationList = D095AE471814AF10008163F2 /* Build configuration list for PBXNativeTarget "photobackupTests" */; + buildPhases = ( + D095AE2F1814AF10008163F2 /* Sources */, + D095AE301814AF10008163F2 /* Frameworks */, + D095AE311814AF10008163F2 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + D095AE391814AF10008163F2 /* PBXTargetDependency */, + ); + name = photobackupTests; + productName = photobackupTests; + productReference = D095AE331814AF10008163F2 /* photobackupTests.xctest */; + productType = "com.apple.product-type.bundle.unit-test"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + D095AE071814AF10008163F2 /* Project object */ = { + isa = PBXProject; + attributes = { + CLASSPREFIX = LA; + LastUpgradeCheck = 0500; + ORGANIZATIONNAME = "Nick O'Neill"; + TargetAttributes = { + D095AE0E1814AF10008163F2 = { + DevelopmentTeam = H6S4PTUWAA; + SystemCapabilities = { + com.apple.BackgroundModes = { + enabled = 1; + }; + }; + }; + D095AE321814AF10008163F2 = { + TestTargetID = D095AE0E1814AF10008163F2; + }; + }; + }; + buildConfigurationList = D095AE0A1814AF10008163F2 /* Build configuration list for PBXProject "photobackup" */; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; + hasScannedForEncodings = 0; + knownRegions = ( + en, + Base, + ); + mainGroup = D095AE061814AF10008163F2; + productRefGroup = D095AE101814AF10008163F2 /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + D095AE0E1814AF10008163F2 /* photobackup */, + D095AE321814AF10008163F2 /* photobackupTests */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + D095AE0D1814AF10008163F2 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + D095AE2E1814AF10008163F2 /* Images.xcassets in Resources */, + D095AE261814AF10008163F2 /* Main_iPhone.storyboard in Resources */, + D095AE1D1814AF10008163F2 /* InfoPlist.strings in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + D095AE311814AF10008163F2 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + D095AE3F1814AF10008163F2 /* InfoPlist.strings in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXShellScriptBuildPhase section */ + 85BE1708753D47A8B10D430C /* Check Pods Manifest.lock */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "Check Pods Manifest.lock"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n"; + showEnvVarsInLog = 0; + }; + A368DE813E1349B0810D9274 /* Copy Pods Resources */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "Copy Pods Resources"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${SRCROOT}/Pods/Pods-resources.sh\"\n"; + showEnvVarsInLog = 0; + }; +/* End PBXShellScriptBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + D095AE0B1814AF10008163F2 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + D095AE1F1814AF10008163F2 /* main.m in Sources */, + D095AE511814B1B9008163F2 /* LACamliClient.m in Sources */, + D08F911B187B4189006F6B8D /* UploadTaskCell.m in Sources */, + D095AE501814B1B9008163F2 /* LACamliFile.m in Sources */, + D075B282184944330054FED3 /* LACamliUtil.m in Sources */, + D095AE231814AF10008163F2 /* LAAppDelegate.m in Sources */, + D0D45EA1185FE2BE00EBC0A2 /* SettingsViewController.m in Sources */, + D095AE2C1814AF10008163F2 /* LAViewController.m in Sources */, + D075B28518494DB20054FED3 /* LACamliUploadOperation.m in Sources */, + D08F9118187B417E006F6B8D /* UploadStatusCell.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + D095AE2F1814AF10008163F2 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + D095AE411814AF10008163F2 /* photobackupTests.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXTargetDependency section */ + D095AE391814AF10008163F2 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = D095AE0E1814AF10008163F2 /* photobackup */; + targetProxy = D095AE381814AF10008163F2 /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + +/* Begin PBXVariantGroup section */ + D095AE1B1814AF10008163F2 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + D095AE1C1814AF10008163F2 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + D095AE241814AF10008163F2 /* Main_iPhone.storyboard */ = { + isa = PBXVariantGroup; + children = ( + D095AE251814AF10008163F2 /* Base */, + ); + name = Main_iPhone.storyboard; + sourceTree = ""; + }; + D095AE3D1814AF10008163F2 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + D095AE3E1814AF10008163F2 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; +/* End PBXVariantGroup section */ + +/* Begin XCBuildConfiguration section */ + D095AE421814AF10008163F2 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + ARCHS = "$(ARCHS_STANDARD_INCLUDING_64_BIT)"; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_SYMBOLS_PRIVATE_EXTERN = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 7.0; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = iphoneos; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Debug; + }; + D095AE431814AF10008163F2 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + ARCHS = "$(ARCHS_STANDARD_INCLUDING_64_BIT)"; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = YES; + ENABLE_NS_ASSERTIONS = NO; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 7.0; + SDKROOT = iphoneos; + TARGETED_DEVICE_FAMILY = "1,2"; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; + D095AE451814AF10008163F2 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 208E1D70286D49129C896012 /* Pods.xcconfig */; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_LAUNCHIMAGE_NAME = LaunchImage; + CODE_SIGN_IDENTITY = "iPhone Developer"; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + FRAMEWORK_SEARCH_PATHS = "$(inherited)"; + GCC_PRECOMPILE_PREFIX_HEADER = YES; + GCC_PREFIX_HEADER = "photobackup/photobackup-Prefix.pch"; + INFOPLIST_FILE = "photobackup/photobackup-Info.plist"; + IPHONEOS_DEPLOYMENT_TARGET = 7.0; + PRODUCT_NAME = "$(TARGET_NAME)"; + PROVISIONING_PROFILE = ""; + TARGETED_DEVICE_FAMILY = 1; + WRAPPER_EXTENSION = app; + }; + name = Debug; + }; + D095AE461814AF10008163F2 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 208E1D70286D49129C896012 /* Pods.xcconfig */; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_LAUNCHIMAGE_NAME = LaunchImage; + CODE_SIGN_IDENTITY = "iPhone Distribution: Launch Apps LLC (H6S4PTUWAA)"; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Distribution: Launch Apps LLC (H6S4PTUWAA)"; + FRAMEWORK_SEARCH_PATHS = "$(inherited)"; + GCC_PRECOMPILE_PREFIX_HEADER = YES; + GCC_PREFIX_HEADER = "photobackup/photobackup-Prefix.pch"; + INFOPLIST_FILE = "photobackup/photobackup-Info.plist"; + IPHONEOS_DEPLOYMENT_TARGET = 7.0; + PRODUCT_NAME = "$(TARGET_NAME)"; + PROVISIONING_PROFILE = ""; + TARGETED_DEVICE_FAMILY = 1; + WRAPPER_EXTENSION = app; + }; + name = Release; + }; + D095AE481814AF10008163F2 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ARCHS = "$(ARCHS_STANDARD_INCLUDING_64_BIT)"; + BUNDLE_LOADER = "$(BUILT_PRODUCTS_DIR)/photobackup.app/photobackup"; + FRAMEWORK_SEARCH_PATHS = ( + "$(SDKROOT)/Developer/Library/Frameworks", + "$(inherited)", + "$(DEVELOPER_FRAMEWORKS_DIR)", + ); + GCC_PRECOMPILE_PREFIX_HEADER = YES; + GCC_PREFIX_HEADER = "photobackup/photobackup-Prefix.pch"; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + INFOPLIST_FILE = "photobackupTests/photobackupTests-Info.plist"; + PRODUCT_NAME = "$(TARGET_NAME)"; + TEST_HOST = "$(BUNDLE_LOADER)"; + WRAPPER_EXTENSION = xctest; + }; + name = Debug; + }; + D095AE491814AF10008163F2 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ARCHS = "$(ARCHS_STANDARD_INCLUDING_64_BIT)"; + BUNDLE_LOADER = "$(BUILT_PRODUCTS_DIR)/photobackup.app/photobackup"; + FRAMEWORK_SEARCH_PATHS = ( + "$(SDKROOT)/Developer/Library/Frameworks", + "$(inherited)", + "$(DEVELOPER_FRAMEWORKS_DIR)", + ); + GCC_PRECOMPILE_PREFIX_HEADER = YES; + GCC_PREFIX_HEADER = "photobackup/photobackup-Prefix.pch"; + INFOPLIST_FILE = "photobackupTests/photobackupTests-Info.plist"; + PRODUCT_NAME = "$(TARGET_NAME)"; + TEST_HOST = "$(BUNDLE_LOADER)"; + WRAPPER_EXTENSION = xctest; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + D095AE0A1814AF10008163F2 /* Build configuration list for PBXProject "photobackup" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + D095AE421814AF10008163F2 /* Debug */, + D095AE431814AF10008163F2 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + D095AE441814AF10008163F2 /* Build configuration list for PBXNativeTarget "photobackup" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + D095AE451814AF10008163F2 /* Debug */, + D095AE461814AF10008163F2 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + D095AE471814AF10008163F2 /* Build configuration list for PBXNativeTarget "photobackupTests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + D095AE481814AF10008163F2 /* Debug */, + D095AE491814AF10008163F2 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = D095AE071814AF10008163F2 /* Project object */; +} diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup.xcworkspace/contents.xcworkspacedata b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup.xcworkspace/contents.xcworkspacedata new file mode 100644 index 00000000..13577cec --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup.xcworkspace/contents.xcworkspacedata @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup.xcworkspace/xcshareddata/photobackup.xccheckout b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup.xcworkspace/xcshareddata/photobackup.xccheckout new file mode 100644 index 00000000..ecee56d0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup.xcworkspace/xcshareddata/photobackup.xccheckout @@ -0,0 +1,41 @@ + + + + + IDESourceControlProjectFavoriteDictionaryKey + + IDESourceControlProjectIdentifier + 79DD2B70-7262-4160-804F-F637A84D4765 + IDESourceControlProjectName + photobackup + IDESourceControlProjectOriginsDictionary + + 1AD6A32F-218B-40FF-A64E-FD2FF680305E + https://camlistore.googlesource.com/camlistore + + IDESourceControlProjectPath + clients/ios-objc/photobackup.xcworkspace + IDESourceControlProjectRelativeInstallPathDictionary + + 1AD6A32F-218B-40FF-A64E-FD2FF680305E + ../../.. + + IDESourceControlProjectURL + https://camlistore.googlesource.com/camlistore + IDESourceControlProjectVersion + 110 + IDESourceControlProjectWCCIdentifier + 1AD6A32F-218B-40FF-A64E-FD2FF680305E + IDESourceControlProjectWCConfigurations + + + IDESourceControlRepositoryExtensionIdentifierKey + public.vcs.git + IDESourceControlWCCIdentifierKey + 1AD6A32F-218B-40FF-A64E-FD2FF680305E + IDESourceControlWCCName + camlistore + + + + diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Base.lproj/Main_iPad.storyboard b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Base.lproj/Main_iPad.storyboard new file mode 100644 index 00000000..7e2236e4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Base.lproj/Main_iPad.storyboard @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Lorem ipsum dolor sit er elit lamet, consectetaur cillium adipisicing pecu, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. Nam liber te conscient to factor tum poen legum odioque civiuda. + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Base.lproj/Main_iPhone.storyboard b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Base.lproj/Main_iPhone.storyboard new file mode 100644 index 00000000..c097bde4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Base.lproj/Main_iPhone.storyboard @@ -0,0 +1,193 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/AppIcon.appiconset/AppIcon29x29@2x.png b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/AppIcon.appiconset/AppIcon29x29@2x.png new file mode 100644 index 00000000..ad75e95c Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/AppIcon.appiconset/AppIcon29x29@2x.png differ diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/AppIcon.appiconset/AppIcon40x40@2x.png b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/AppIcon.appiconset/AppIcon40x40@2x.png new file mode 100644 index 00000000..54163755 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/AppIcon.appiconset/AppIcon40x40@2x.png differ diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/AppIcon.appiconset/AppIcon60x60@2x.png b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/AppIcon.appiconset/AppIcon60x60@2x.png new file mode 100644 index 00000000..0f782faa Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/AppIcon.appiconset/AppIcon60x60@2x.png differ diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/AppIcon.appiconset/Contents.json b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/AppIcon.appiconset/Contents.json new file mode 100644 index 00000000..bcb9688b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/AppIcon.appiconset/Contents.json @@ -0,0 +1,56 @@ +{ + "images" : [ + { + "size" : "29x29", + "idiom" : "iphone", + "filename" : "AppIcon29x29@2x.png", + "scale" : "2x" + }, + { + "size" : "40x40", + "idiom" : "iphone", + "filename" : "AppIcon40x40@2x.png", + "scale" : "2x" + }, + { + "size" : "60x60", + "idiom" : "iphone", + "filename" : "AppIcon60x60@2x.png", + "scale" : "2x" + }, + { + "idiom" : "ipad", + "size" : "29x29", + "scale" : "1x" + }, + { + "idiom" : "ipad", + "size" : "29x29", + "scale" : "2x" + }, + { + "idiom" : "ipad", + "size" : "40x40", + "scale" : "1x" + }, + { + "idiom" : "ipad", + "size" : "40x40", + "scale" : "2x" + }, + { + "idiom" : "ipad", + "size" : "76x76", + "scale" : "1x" + }, + { + "idiom" : "ipad", + "size" : "76x76", + "scale" : "2x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/LaunchImage.launchimage/Contents.json b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/LaunchImage.launchimage/Contents.json new file mode 100644 index 00000000..2f5550f1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/LaunchImage.launchimage/Contents.json @@ -0,0 +1,53 @@ +{ + "images" : [ + { + "orientation" : "portrait", + "idiom" : "iphone", + "extent" : "full-screen", + "minimum-system-version" : "7.0", + "filename" : "startup-small.png", + "scale" : "2x" + }, + { + "extent" : "full-screen", + "idiom" : "iphone", + "subtype" : "retina4", + "filename" : "startup-r4.png", + "minimum-system-version" : "7.0", + "orientation" : "portrait", + "scale" : "2x" + }, + { + "orientation" : "portrait", + "idiom" : "ipad", + "extent" : "full-screen", + "minimum-system-version" : "7.0", + "scale" : "1x" + }, + { + "orientation" : "landscape", + "idiom" : "ipad", + "extent" : "full-screen", + "minimum-system-version" : "7.0", + "scale" : "1x" + }, + { + "orientation" : "portrait", + "idiom" : "ipad", + "extent" : "full-screen", + "minimum-system-version" : "7.0", + "scale" : "2x" + }, + { + "orientation" : "landscape", + "idiom" : "ipad", + "extent" : "full-screen", + "minimum-system-version" : "7.0", + "scale" : "2x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/LaunchImage.launchimage/startup-r4.png b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/LaunchImage.launchimage/startup-r4.png new file mode 100644 index 00000000..10dc6430 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/LaunchImage.launchimage/startup-r4.png differ diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/LaunchImage.launchimage/startup-small.png b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/LaunchImage.launchimage/startup-small.png new file mode 100644 index 00000000..20f538b4 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/Images.xcassets/LaunchImage.launchimage/startup-small.png differ diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LAAppDelegate.h b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LAAppDelegate.h new file mode 100644 index 00000000..54c3ee08 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LAAppDelegate.h @@ -0,0 +1,31 @@ +// +// LAAppDelegate.h +// photobackup +// +// Created by Nick O'Neill on 10/20/13. +// Copyright (c) 2013 The Camlistore Authors. All rights reserved. +// + +#import +#import +#import "LACamliClient.h" +#import + +@class ALAssetsLibrary; + +static NSString* const CamliUsernameKey = @"org.camlistore.username"; +static NSString* const CamliServerKey = @"org.camlistore.serverurl"; +static NSString* const CamliCredentialsKey = @"org.camlistore.credentials"; + +@interface LAAppDelegate : UIResponder + +@property(strong, nonatomic) UIWindow* window; +@property CLLocationManager* locationManager; + +@property LACamliClient* client; +@property ALAssetsLibrary* library; + +- (void)loadCredentials; +- (void)checkForUploads; + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LAAppDelegate.m b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LAAppDelegate.m new file mode 100644 index 00000000..4dfa7e4c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LAAppDelegate.m @@ -0,0 +1,156 @@ +// +// LAAppDelegate.m +// photobackup +// +// Created by Nick O'Neill on 10/20/13. +// Copyright (c) 2013 The Camlistore Authors. All rights reserved. +// + +#import "LAAppDelegate.h" +#import "LACamliUtil.h" +#import "LACamliFile.h" +#import "LAViewController.h" +#import +#import +#import + +@implementation LAAppDelegate + +- (BOOL)application:(UIApplication*)application didFinishLaunchingWithOptions:(NSDictionary*)launchOptions +{ + [[BITHockeyManager sharedHockeyManager] configureWithIdentifier:@"de94cf9f0f0ad2ea0b19b2ad18ebe11f" + delegate:self]; + [[BITHockeyManager sharedHockeyManager] startManager]; + [[BITHockeyManager sharedHockeyManager].updateManager setDelegate:self]; + [[BITHockeyManager sharedHockeyManager].updateManager checkForUpdate]; + + [BugshotKit enableWithNumberOfTouches:1 + performingGestures:BSKInvocationGestureNone + feedbackEmailAddress:@"nick.oneill@gmail.com"]; + + self.locationManager = [[CLLocationManager alloc] init]; + self.locationManager.delegate = self; + [self.locationManager startMonitoringSignificantLocationChanges]; + + [self loadCredentials]; + + self.library = [[ALAssetsLibrary alloc] init]; + + return YES; +} + +- (void)locationManager:(CLLocationManager*)manager didUpdateLocations:(NSArray*)locations +{ + [self checkForUploads]; +} + +- (void)loadCredentials +{ + NSURL* serverURL = [NSURL URLWithString:[[NSUserDefaults standardUserDefaults] stringForKey:CamliServerKey]]; + NSString* username = [[NSUserDefaults standardUserDefaults] stringForKey:CamliUsernameKey]; + + NSString* password = nil; + if (username) { + password = [LACamliUtil passwordForUsername:username]; + } + + if (serverURL && username && password) { + [LACamliUtil statusText:@[ + @"found credentials" + ]]; + [LACamliUtil logText:@[ + @"found credentials" + ]]; + self.client = [[LACamliClient alloc] initWithServer:serverURL + username:username + andPassword:password]; + + // TODO there must be a better way to get the current instance of this + LAViewController* mainView = (LAViewController*)[(UINavigationController*)self.window.rootViewController topViewController]; + [self.client setDelegate:mainView]; + } else { + [LACamliUtil statusText:@[ + @"credentials or server not found" + ]]; + } + + [self checkForUploads]; +} + +- (void)checkForUploads +{ + if (self.client && [self.client readyToUpload]) { + NSInteger __block filesToUpload = 0; + + [LACamliUtil statusText:@[ + @"looking for new files..." + ]]; + + // checking all assets can take some time + dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{ + [self.library enumerateGroupsWithTypes:ALAssetsGroupSavedPhotos usingBlock:^(ALAssetsGroup *group, BOOL *stop) { + + [group enumerateAssetsUsingBlock:^(ALAsset *result, NSUInteger index, BOOL *stop) { + if (result && [result valueForProperty:ALAssetPropertyType] != ALAssetTypeVideo) { // enumerate returns null after the last item + + NSString *filename = [[result defaultRepresentation] filename]; + + @synchronized(self.client){ + if (![self.client fileAlreadyUploaded:filename]) { + filesToUpload++; + + [LACamliUtil logText:@[[NSString stringWithFormat:@"found %ld files",(long)filesToUpload]]]; + + __block LACamliClient *weakClient = self.client; + + LACamliFile *file = [[LACamliFile alloc] initWithAsset:result]; + [self.client addFile:file withCompletion:^{ + [UIApplication sharedApplication].applicationIconBadgeNumber = [weakClient.uploadQueue operationCount]; + }]; + } + } + } + }]; + + if (filesToUpload == 0) { + [LACamliUtil statusText:@[@"no new files to upload"]]; + } + + [UIApplication sharedApplication].applicationIconBadgeNumber = filesToUpload; + + } failureBlock:^(NSError *error) { + [LACamliUtil errorText:@[@"failed enumerate: ",[error description]]]; + }]; + }); + } +} + +- (void)applicationWillResignActive:(UIApplication*)application +{ + // Sent when the application is about to move from active to inactive state. This can occur for certain types of temporary interruptions (such as an incoming phone call or SMS message) or when the user quits the application and it begins the transition to the background state. + // Use this method to pause ongoing tasks, disable timers, and throttle down OpenGL ES frame rates. Games should use this method to pause the game. +} + +- (void)applicationDidEnterBackground:(UIApplication*)application +{ + // Use this method to release shared resources, save user data, invalidate timers, and store enough application state information to restore your application to its current state in case it is terminated later. + // If your application supports background execution, this method is called instead of applicationWillTerminate: when the user quits. +} + +- (void)applicationWillEnterForeground:(UIApplication*)application +{ + // Called as part of the transition from the background to the inactive state; here you can undo many of the changes made on entering the background. +} + +- (void)applicationDidBecomeActive:(UIApplication*)application +{ + // Restart any tasks that were paused (or not yet started) while the application was inactive. If the application was previously in the background, optionally refresh the user interface. + [self checkForUploads]; +} + +- (void)applicationWillTerminate:(UIApplication*)application +{ + // Called when the application is about to terminate. Save data if appropriate. See also applicationDidEnterBackground:. +} + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliClient.h b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliClient.h new file mode 100644 index 00000000..56b2b66c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliClient.h @@ -0,0 +1,50 @@ +// +// LACamliClient.h +// +// Created by Nick O'Neill on 1/10/13. +// Copyright (c) 2013 The Camlistore Authors. All rights reserved. +// + +#import + +@class LACamliFile, LACamliUploadOperation; + +@protocol LACamliStatusDelegate + +@optional +- (void)finishedDiscovery:(NSDictionary*)config; +- (void)addedUploadOperation:(LACamliUploadOperation*)op; +- (void)finishedUploadOperation:(LACamliUploadOperation*)op; +- (void)uploadProgress:(float)pct forOperation:(LACamliUploadOperation*)op; +@end + +@interface LACamliClient : NSObject + +@property NSURLSessionConfiguration* sessionConfig; +@property id delegate; + +@property NSURL* serverURL; +@property NSString* username; +@property NSString* password; + +@property NSString* blobRootComponent; +@property NSOperationQueue* uploadQueue; +@property NSUInteger totalUploads; + +@property NSMutableArray* uploadedFileNames; +@property UIBackgroundTaskIdentifier backgroundID; + +@property BOOL isAuthorized; +@property BOOL authorizing; + +- (id)initWithServer:(NSURL*)server username:(NSString*)username andPassword:(NSString*)password; +- (BOOL)readyToUpload; +- (void)discoveryWithUsername:(NSString*)user andPassword:(NSString*)pass; + +- (BOOL)fileAlreadyUploaded:(NSString*)filename; +- (void)addFile:(LACamliFile*)file withCompletion:(void (^)())completion; + +- (NSURL*)statURL; +- (NSURL*)uploadURL; + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliClient.m b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliClient.m new file mode 100644 index 00000000..a69ab3a0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliClient.m @@ -0,0 +1,339 @@ +// +// LACamliClient.m +// +// Created by Nick O'Neill on 1/10/13. +// Copyright (c) 2013 The Camlistore Authors. All rights reserved. +// + +#import "LACamliClient.h" +#import "LACamliUploadOperation.h" +#import "LACamliFile.h" +#import "LACamliUtil.h" + +@implementation LACamliClient + +NSString* const CamliStorageGenerationKey = @"org.camlistore.storagetoken"; + +- (id)initWithServer:(NSURL*)server + username:(NSString*)username + andPassword:(NSString*)password +{ + NSParameterAssert(server); + NSParameterAssert(username); + NSParameterAssert(password); + + if (self = [super init]) { + _serverURL = server; + _username = username; + _password = password; + + if ([[NSFileManager defaultManager] + fileExistsAtPath:[self uploadedFilenamesArchivePath]]) { + self.uploadedFileNames = [NSMutableArray + arrayWithContentsOfFile:[self uploadedFilenamesArchivePath]]; + } + + if (!self.uploadedFileNames) { + self.uploadedFileNames = [NSMutableArray array]; + } + + [LACamliUtil logText:@[ + @"uploads in cache: ", + [NSString stringWithFormat:@"%lu", (unsigned long) + [self.uploadedFileNames count]] + ]]; + + self.uploadQueue = [[NSOperationQueue alloc] init]; + self.uploadQueue.maxConcurrentOperationCount = 1; + self.totalUploads = 0; + + self.isAuthorized = false; + self.authorizing = false; + + self.sessionConfig = [NSURLSessionConfiguration defaultSessionConfiguration]; + self.sessionConfig.HTTPAdditionalHeaders = @{ + @"Authorization" : + [NSString stringWithFormat:@"Basic %@", [self encodedAuth]] + }; + } + + return self; +} + +#pragma mark - ready state + +- (BOOL)readyToUpload +{ + // can't upload if we don't have credentials + if (!self.username || !self.password || !self.serverURL) { + [LACamliUtil logText:@[ + @"not ready: no u/p/s" + ]]; + return NO; + } + + // don't want to start a new upload if we're already going + if ([self.uploadQueue operationCount] > 0) { + [LACamliUtil logText:@[ + @"not ready: already uploading" + ]]; + return NO; + } + + [LACamliUtil logText:@[ + @"starting upload" + ]]; + return YES; +} + +#pragma mark - discovery + +// discovery is done on demand when we have a new file to upload +- (void)discoveryWithUsername:(NSString*)user andPassword:(NSString*)pass +{ + [LACamliUtil statusText:@[ + @"discovering..." + ]]; + self.authorizing = YES; + + NSURLSessionConfiguration* discoverConfig = + [NSURLSessionConfiguration defaultSessionConfiguration]; + discoverConfig.HTTPAdditionalHeaders = @{ + @"Accept" : @"text/x-camli-configuration", + @"Authorization" : + [NSString stringWithFormat:@"Basic %@", [self encodedAuth]] + }; + NSURLSession* discoverSession = + [NSURLSession sessionWithConfiguration:discoverConfig + delegate:self + delegateQueue:nil]; + + NSURLSessionDataTask *data = [discoverSession dataTaskWithURL:self.serverURL completionHandler:^(NSData *data, NSURLResponse *response, NSError *error) + { + + if (error) { + if ([error code] == NSURLErrorNotConnectedToInternet || [error code] == NSURLErrorNetworkConnectionLost) { + LALog(@"connection lost or unavailable"); + [LACamliUtil statusText:@[ + @"internet connection appears offline" + ]]; + } else if ([error code] == NSURLErrorCannotConnectToHost || [error code] == NSURLErrorCannotFindHost) { + LALog(@"can't connect to server"); + [LACamliUtil statusText:@[ + @"can't connect to server" + ]]; + + } else { + LALog(@"error discovery: %@", error); + [LACamliUtil errorText:@[ + @"discovery error: ", + [error description] + ]]; + } + + } else { + NSHTTPURLResponse* res = (NSHTTPURLResponse*)response; + + if (res.statusCode != 200) { + NSString* serverSaid = [[NSString alloc] + initWithData:data + encoding:NSUTF8StringEncoding]; + + [LACamliUtil + errorText:@[ + @"error discovery: ", + serverSaid + ]]; + [LACamliUtil + logText:@[ + [NSString stringWithFormat: + @"server said: %@", + serverSaid] + ]]; + + if ([self.delegate respondsToSelector:@selector(finishedDiscovery:)]) { + [self.delegate finishedDiscovery:@{ + @"error" : serverSaid + }]; + } + } else { + NSError* err; + NSDictionary* config = [NSJSONSerialization JSONObjectWithData:data + options:0 + error:&err]; + if (!err) { + self.blobRootComponent = config[@"blobRoot"]; + self.isAuthorized = YES; + [self.uploadQueue setSuspended:NO]; + + // files may have already been rejected for being previously uploaded when + // dicovery returns, this doesn't kick off a new check for files. The next + // file check will catch anything that was missed by timing + + // if the storage generation changes, zero the saved array + if (![[self storageToken] isEqualToString:config[@"storageGeneration"]]) { + self.uploadedFileNames = [NSMutableArray array]; + [self saveStorageToken:config[@"storageGeneration"]]; + } + + [LACamliUtil + logText: + @[ + [NSString stringWithFormat:@"Welcome to %@'s camlistore", + config[@"ownerName"]] + ]]; + + [LACamliUtil statusText:@[ + @"discovery OK" + ]]; + + if ([self.delegate respondsToSelector:@selector(finishedDiscovery:)]) { + [self.delegate finishedDiscovery:config]; + } + } else { + [LACamliUtil + errorText:@[ + @"bad json from discovery", + [err description] + ]]; + [LACamliUtil + logText:@[ + @"json from discovery: ", + [err description] + ]]; + + if ([self.delegate respondsToSelector:@selector(finishedDiscovery:)]) { + [self.delegate finishedDiscovery:@{ + @"error" : [err description] + }]; + } + } + } + } + }]; + + [data resume]; +} + +#pragma mark - upload methods + +- (BOOL)fileAlreadyUploaded:(NSString*)filename +{ + NSParameterAssert(filename); + + if ([self.uploadedFileNames containsObject:filename]) { + return YES; + } + + return NO; +} + +// starts uploading immediately +- (void)addFile:(LACamliFile*)file withCompletion:(void (^)())completion +{ + NSParameterAssert(file); + + self.totalUploads++; + + if (![self isAuthorized]) { + [self.uploadQueue setSuspended:YES]; + + if (!self.authorizing) { + [self discoveryWithUsername:self.username + andPassword:self.password]; + } + } + + LACamliUploadOperation* op = + [[LACamliUploadOperation alloc] initWithFile:file + andClient:self]; + + __block LACamliUploadOperation* weakOp = op; + op.completionBlock = ^{ + LALog(@"finished op %@", file.blobRef); + if ([self.delegate respondsToSelector:@selector(finishedUploadOperation:)]) { + [self.delegate performSelector:@selector(finishedUploadOperation:) + onThread:[NSThread mainThread] + withObject:weakOp + waitUntilDone:NO]; + } + + if (weakOp.failedTransfer) { + LALog(@"failed transfer"); + } else { + [self.uploadedFileNames addObject:file.name]; + [self.uploadedFileNames writeToFile:[self uploadedFilenamesArchivePath] + atomically:YES]; + } + + if (![self.uploadQueue operationCount]) { + self.totalUploads = 0; + [LACamliUtil statusText:@[@"done uploading"]]; + } + + if (completion) { + completion(); + } + }; + + if ([self.delegate respondsToSelector:@selector(addedUploadOperation:)]) { + [self.delegate performSelector:@selector(addedUploadOperation:) + onThread:[NSThread mainThread] + withObject:op + waitUntilDone:NO]; + } + + [self.uploadQueue addOperation:op]; +} + +#pragma mark - utility + +- (NSString*)storageToken +{ + NSUserDefaults* defaults = [NSUserDefaults standardUserDefaults]; + if ([defaults objectForKey:CamliStorageGenerationKey]) { + return [defaults objectForKey:CamliStorageGenerationKey]; + } + + return nil; +} + +- (void)saveStorageToken:(NSString*)token +{ + NSUserDefaults* defaults = [NSUserDefaults standardUserDefaults]; + [defaults setObject:token + forKey:CamliStorageGenerationKey]; + [defaults synchronize]; +} + +- (NSURL*)blobRoot +{ + return [self.serverURL URLByAppendingPathComponent:self.blobRootComponent]; +} + +- (NSURL*)statURL +{ + return [[self blobRoot] URLByAppendingPathComponent:@"camli/stat"]; +} + +- (NSURL*)uploadURL +{ + return [[self blobRoot] URLByAppendingPathComponent:@"camli/upload"]; +} + +- (NSString*)encodedAuth +{ + NSString* auth = [NSString stringWithFormat:@"%@:%@", self.username, self.password]; + + return [LACamliUtil base64EncodedStringFromString:auth]; +} + +- (NSString*)uploadedFilenamesArchivePath +{ + NSString* documents = NSSearchPathForDirectoriesInDomains( + NSDocumentDirectory, NSUserDomainMask, YES)[0]; + + return [documents stringByAppendingPathComponent:@"uploadedFilenames.plist"]; +} + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliFile.h b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliFile.h new file mode 100644 index 00000000..e2842ced --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliFile.h @@ -0,0 +1,29 @@ +// +// LACamliFile.h +// +// Created by Nick O'Neill on 1/13/13. +// Copyright (c) 2013 The Camlistore Authors. All rights reserved. +// + +#import + +@class ALAsset; + +@interface LACamliFile : NSObject + +@property ALAsset* asset; +@property NSMutableArray* allBlobs; +@property NSMutableArray* uploadMarks; +@property NSArray* allBlobRefs; + +@property NSString* blobRef; + +- (id)initWithAsset:(ALAsset*)asset; +- (NSArray*)blobsToUpload; + +- (long long)size; +- (NSString *)name; +- (NSDate*)creation; +- (UIImage*)thumbnail; + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliFile.m b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliFile.m new file mode 100644 index 00000000..ab2bb06a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliFile.m @@ -0,0 +1,163 @@ +// +// LACamliFile.m +// +// Created by Nick O'Neill on 1/13/13. +// Copyright (c) 2013 The Camlistore Authors. All rights reserved. +// + +#import "LACamliFile.h" +#import "LACamliUtil.h" +#import + +@implementation LACamliFile + +@synthesize allBlobs = _allBlobs; +@synthesize allBlobRefs = _allBlobRefs; + +static NSUInteger const ChunkSize = 64000; + +- (id)initWithAsset:(ALAsset*)asset +{ + if (self = [super init]) { + _asset = asset; + + self.blobRef = [LACamliUtil blobRef:[self fileData]]; + + float chunkCount = (float)[self size] / (float)ChunkSize; + + _uploadMarks = [NSMutableArray array]; + for (int i = 0; i < chunkCount; i++) { + [_uploadMarks addObject:@YES]; + } + } + + return self; +} + +- (id)initWithPath:(NSString*)path +{ + // TODO, can init from random path to file + + if (self = [super init]) { + // [self setBlobRef:[LACamliClient blobRef:data]]; + // [self setFileData:data]; + + // set time, size and other properties here? + } + + return self; +} + +#pragma mark - convenience + +- (NSData*)fileData +{ + ALAssetRepresentation* rep = [_asset defaultRepresentation]; + Byte* buf = (Byte*)malloc((int)rep.size); + NSUInteger bufferLength = [rep getBytes:buf + fromOffset:0.0 + length:(int)rep.size + error:nil]; + + return [NSData dataWithBytesNoCopy:buf + length:bufferLength + freeWhenDone:YES]; +} + +- (long long)size +{ + return [_asset defaultRepresentation].size; +} + +- (NSString *)name +{ + return [_asset defaultRepresentation].filename; +} + +- (NSDate*)creation +{ + return [_asset valueForProperty:ALAssetPropertyDate]; +} + +- (UIImage*)thumbnail +{ + return [UIImage imageWithCGImage:[_asset thumbnail]]; +} + +- (NSArray*)blobsToUpload +{ + NSMutableArray* blobs = [NSMutableArray array]; + + int i = 0; + for (NSData* blob in _allBlobs) { + if ([[_uploadMarks objectAtIndex:i] boolValue]) { + [blobs addObject:blob]; + } + i++; + } + + return blobs; +} + +#pragma mark - delayed creation methods + +- (void)setAllBlobs:(NSMutableArray*)allBlobs +{ + _allBlobs = allBlobs; +} + +- (NSMutableArray*)allBlobs +{ + if (!_allBlobs) { + [self makeBlobsAndRefs]; + } + + // not a huge fan of how this doesn't obviously assign to _allBlobs + return _allBlobs; +} + +- (void)setAllBlobRefs:(NSArray*)allBlobRefs +{ + _allBlobRefs = allBlobRefs; +} + +- (NSArray*)allBlobRefs +{ + if (!_allBlobRefs) { + [self makeBlobsAndRefs]; + } + + // not a huge fan of how this doesn't obviously assign to _allBlobRefs + return _allBlobRefs; +} + +- (void)makeBlobsAndRefs +{ + LALog(@"making blob refs"); + + NSMutableArray* chunks = [NSMutableArray array]; + NSMutableArray* blobRefs = [NSMutableArray array]; + + float chunkCount = (float)[self size] / (float)ChunkSize; + + NSData* fileData = [self fileData]; + + for (int i = 0; i < chunkCount; i++) { + + // ChunkSize size chunks, unless the last one is less + NSData* chunkData; + if (ChunkSize * (i + 1) <= [self size]) { + chunkData = [fileData subdataWithRange:NSMakeRange(ChunkSize * i, ChunkSize)]; + } else { + chunkData = [fileData subdataWithRange:NSMakeRange(ChunkSize * i, (int)[self size] - (ChunkSize * i))]; + } + + [chunks addObject:chunkData]; + [blobRefs addObject:[LACamliUtil blobRef:chunkData]]; + } + + _allBlobs = chunks; + _allBlobRefs = blobRefs; +} + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliUploadOperation.h b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliUploadOperation.h new file mode 100644 index 00000000..256955fb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliUploadOperation.h @@ -0,0 +1,29 @@ +// +// LACamliUploadOperation.h +// photobackup +// +// Created by Nick O'Neill on 11/29/13. +// Copyright (c) 2013 The Camlistore Authors. All rights reserved. +// + +#import + +@class LACamliFile, LACamliClient; + +@interface LACamliUploadOperation : NSOperation + +@property LACamliClient* client; +@property LACamliFile* file; +@property NSURLSession* session; +@property UIBackgroundTaskIdentifier taskID; + +@property(readonly) BOOL failedTransfer; +@property(readonly) BOOL isExecuting; +@property(readonly) BOOL isFinished; + +- (id)initWithFile:(LACamliFile*)file andClient:(LACamliClient*)client; +- (BOOL)isConcurrent; + +- (NSString*)name; + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliUploadOperation.m b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliUploadOperation.m new file mode 100644 index 00000000..7035e82a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliUploadOperation.m @@ -0,0 +1,337 @@ +// +// LACamliUploadOperation.m +// photobackup +// +// Created by Nick O'Neill on 11/29/13. +// Copyright (c) 2013 The Camlistore Authors. All rights reserved. +// + +#import "LACamliUploadOperation.h" +#import "LACamliFile.h" +#import "LACamliClient.h" +#import "LACamliUtil.h" + +static NSUInteger const camliVersion = 1; +static NSString* const multipartBoundary = @"Qe43VdbVVaGtkkMd"; + +@implementation LACamliUploadOperation + +- (id)initWithFile:(LACamliFile*)file andClient:(LACamliClient*)client +{ + NSParameterAssert(file); + NSParameterAssert(client); + + if (self = [super init]) { + _file = file; + _client = client; + _isExecuting = NO; + _isFinished = NO; + _failedTransfer = NO; + _session = [NSURLSession sessionWithConfiguration:_client.sessionConfig + delegate:self + delegateQueue:nil]; + } + + return self; +} + +- (BOOL)isConcurrent +{ + return YES; +} + +#pragma mark - convenience + +- (NSString*)name +{ + return _file.blobRef; +} + +#pragma mark - operation flow + +// request stats for each chunk, making sure the server doesn't already have the chunk +- (void)start +{ + [LACamliUtil statusText:@[ + @"performing stat..." + ]]; + + _taskID = [[UIApplication sharedApplication] beginBackgroundTaskWithName:@"uploadtask" + expirationHandler:^{ + LALog(@"upload task expired"); + }]; + + if (_client.backgroundID) { + [[UIApplication sharedApplication] endBackgroundTask:_client.backgroundID]; + } + + [self willChangeValueForKey:@"isExecuting"]; + _isExecuting = YES; + [self didChangeValueForKey:@"isExecuting"]; + + NSMutableDictionary* params = [NSMutableDictionary dictionary]; + [params setObject:[NSNumber numberWithInt:camliVersion] + forKey:@"camliversion"]; + + int i = 1; + for (NSString* blobRef in _file.allBlobRefs) { + [params setObject:blobRef + forKey:[NSString stringWithFormat:@"blob%d", i]]; + i++; + } + + NSString* formValues = @""; + for (NSString* key in params) { + formValues = [formValues stringByAppendingString:[NSString stringWithFormat:@"%@=%@&", key, params[key]]]; + } + + LALog(@"uploading to %@", [_client statURL]); + NSMutableURLRequest* req = [NSMutableURLRequest requestWithURL:[_client statURL]]; + [req setHTTPMethod:@"POST"]; + [req setHTTPBody:[formValues dataUsingEncoding:NSUTF8StringEncoding]]; + + NSURLSessionDataTask *statTask = [_session dataTaskWithRequest:req completionHandler:^(NSData *data, NSURLResponse *response, NSError *error) + { + + if (!error) { + // LALog(@"data: %@",[[NSString alloc] initWithData:data encoding:NSUTF8StringEncoding]); + + // we can remove any chunks that the server claims it already has + NSError* err; + NSMutableDictionary* resObj = [NSJSONSerialization JSONObjectWithData:data + options:0 + error:&err]; + if (err) { + LALog(@"error getting json: %@", err); + } + + if (resObj[@"stat"] != [NSNull null]) { + for (NSDictionary* stat in resObj[@"stat"]) { + for (NSString* blobRef in _file.allBlobRefs) { + if ([stat[@"blobRef"] isEqualToString:blobRef]) { + [_file.uploadMarks replaceObjectAtIndex:[_file.allBlobRefs indexOfObject:blobRef] + withObject:@NO]; + } + } + } + } + + BOOL allUploaded = YES; + for (NSNumber* upload in _file.uploadMarks) { + if ([upload boolValue]) { + allUploaded = NO; + } + } + + // TODO: there's a posibility all chunks have been uploaded but no permanode exists + if (allUploaded) { + LALog(@"everything's been uploaded already for this file"); + [LACamliUtil logText:@[ + @"everything already uploaded for ", + _file.blobRef + ]]; + [self finished]; + return; + } + + [self uploadChunks]; + } else { + if ([error code] == NSURLErrorNotConnectedToInternet || [error code] == NSURLErrorNetworkConnectionLost) { + LALog(@"connection lost or unavailable"); + [LACamliUtil statusText:@[ + @"internet connection appears offline" + ]]; + } else { + LALog(@"failed stat: %@", error); + [LACamliUtil errorText:@[ + @"failed to stat: ", + [error description] + ]]; + [LACamliUtil logText:@[ + [NSString stringWithFormat:@"failed to stat: %@", error] + ]]; + } + + _failedTransfer = YES; + [self finished]; + } + }]; + + [statTask resume]; +} + +- (void)uploadChunks +{ + [LACamliUtil statusText:@[ + @"uploading..." + ]]; + + NSMutableURLRequest* uploadReq = [NSMutableURLRequest requestWithURL:[_client uploadURL]]; + [uploadReq setHTTPMethod:@"POST"]; + [uploadReq setValue:[NSString stringWithFormat:@"multipart/form-data; boundary=%@", multipartBoundary] + forHTTPHeaderField:@"Content-Type"]; + + NSMutableData* uploadData = [self multipartDataForChunks]; + + NSURLSessionUploadTask *upload = [_session uploadTaskWithRequest:uploadReq fromData:uploadData completionHandler:^(NSData *data, NSURLResponse *response, NSError *error) + { + + // LALog(@"upload response: %@",[[NSString alloc]initWithData:data encoding:NSUTF8StringEncoding]); + + if (error) { + if ([error code] == NSURLErrorNotConnectedToInternet || [error code] == NSURLErrorNetworkConnectionLost) { + LALog(@"connection lost or unavailable"); + [LACamliUtil statusText:@[ + @"internet connection appears offline" + ]]; + } else { + LALog(@"upload error: %@", error); + [LACamliUtil errorText:@[ + @"error uploading: ", + error + ]]; + } + _failedTransfer = YES; + [self finished]; + } else { + [self vivifyChunks]; + } + }]; + + [upload resume]; +} + +// ask the server to vivify the blobrefs into a file +- (void)vivifyChunks +{ + [LACamliUtil statusText:@[ + @"vivify" + ]]; + + NSMutableURLRequest* req = [NSMutableURLRequest requestWithURL:[_client uploadURL]]; + [req setHTTPMethod:@"POST"]; + [req setValue:[NSString stringWithFormat:@"multipart/form-data; boundary=%@", multipartBoundary] + forHTTPHeaderField:@"Content-Type"]; + [req addValue:@"1" + forHTTPHeaderField:@"X-Camlistore-Vivify"]; + + NSMutableData* vivifyData = [self multipartVivifyDataForChunks]; + + NSURLSessionUploadTask *vivify = [_session uploadTaskWithRequest:req fromData:vivifyData completionHandler:^(NSData *data, NSURLResponse *response, NSError *error) + { + if (error) { + LALog(@"error vivifying: %@", error); + [LACamliUtil errorText:@[ + @"error vivify: ", + [error description] + ]]; + _failedTransfer = YES; + } + + [self finished]; + }]; + + [vivify resume]; +} + +- (void)finished +{ + [LACamliUtil statusText:@[ + @"cleaning up..." + ]]; + + _client.backgroundID = [[UIApplication sharedApplication] beginBackgroundTaskWithName:@"queuesync" + expirationHandler:^{ + LALog(@"queue sync task expired"); + }]; + + [[UIApplication sharedApplication] endBackgroundTask:_taskID]; + + LALog(@"finished op %@", _file.blobRef); + + // There's an extra retain on this operation that I cannot find, + // this mitigates the issue so the leak is tiny + _file.allBlobs = nil; + + [self willChangeValueForKey:@"isExecuting"]; + [self willChangeValueForKey:@"isFinished"]; + + _isExecuting = NO; + _isFinished = YES; + + [self didChangeValueForKey:@"isExecuting"]; + [self didChangeValueForKey:@"isFinished"]; +} + +#pragma mark - nsurlsession delegate + +- (void)URLSession:(NSURLSession*)session task:(NSURLSessionTask*)task didSendBodyData:(int64_t)bytesSent totalBytesSent:(int64_t)totalBytesSent totalBytesExpectedToSend:(int64_t)totalBytesExpectedToSend +{ + if ([_client.delegate respondsToSelector:@selector(uploadProgress: + forOperation:)]) { + float progress = (float)totalBytesSent / (float)totalBytesExpectedToSend; + + dispatch_async(dispatch_get_main_queue(), ^{ + [_client.delegate uploadProgress:progress forOperation:self]; + }); + } +} + +#pragma mark - multipart bits + +- (NSMutableData*)multipartDataForChunks +{ + NSMutableData* data = [NSMutableData data]; + + for (NSData* chunk in [_file blobsToUpload]) { + [data appendData:[[NSString stringWithFormat:@"--%@\r\n", multipartBoundary] dataUsingEncoding:NSUTF8StringEncoding]]; + // server ignores this filename and mimetype, it doesn't matter what it is + [data appendData:[[NSString stringWithFormat:@"Content-Disposition: form-data; name=\"%@\"; filename=\"image.jpg\"\r\n", [LACamliUtil blobRef:chunk]] dataUsingEncoding:NSUTF8StringEncoding]]; + [data appendData:[@"Content-Type: image/jpeg\r\n\r\n" dataUsingEncoding:NSUTF8StringEncoding]]; + [data appendData:chunk]; + [data appendData:[[NSString stringWithFormat:@"\r\n"] dataUsingEncoding:NSUTF8StringEncoding]]; + } + + [data appendData:[[NSString stringWithFormat:@"--%@--\r\n", multipartBoundary] dataUsingEncoding:NSUTF8StringEncoding]]; + + return data; +} + +- (NSMutableData*)multipartVivifyDataForChunks +{ + NSMutableData* data = [NSMutableData data]; + + NSMutableDictionary* schemaBlob = [@{ + @"camliVersion" : @1, + @"camliType" : @"file", + @"unixMTime" : [LACamliUtil rfc3339StringFromDate:_file.creation], + @"fileName" : _file.name + } mutableCopy]; + + NSMutableArray* parts = [NSMutableArray array]; + int i = 0; + for (NSString* blobRef in _file.allBlobRefs) { + [parts addObject:@{ + @"blobRef" : blobRef, @"size" : [NSNumber numberWithInteger:[[_file.allBlobs objectAtIndex:i] length]] + }]; + i++; + } + [schemaBlob setObject:parts + forKey:@"parts"]; + + NSData* schemaData = [NSJSONSerialization dataWithJSONObject:schemaBlob + options:NSJSONWritingPrettyPrinted + error:nil]; + + [data appendData:[[NSString stringWithFormat:@"--%@\r\n", multipartBoundary] dataUsingEncoding:NSUTF8StringEncoding]]; + [data appendData:[[NSString stringWithFormat:@"Content-Disposition: form-data; name=\"%@\"; filename=\"json\"\r\n", [LACamliUtil blobRef:schemaData]] dataUsingEncoding:NSUTF8StringEncoding]]; + [data appendData:[@"Content-Type: application/json\r\n\r\n" dataUsingEncoding:NSUTF8StringEncoding]]; + [data appendData:schemaData]; + [data appendData:[[NSString stringWithFormat:@"\r\n"] dataUsingEncoding:NSUTF8StringEncoding]]; + + [data appendData:[[NSString stringWithFormat:@"--%@--\r\n", multipartBoundary] dataUsingEncoding:NSUTF8StringEncoding]]; + + return data; +} + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliUtil.h b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliUtil.h new file mode 100644 index 00000000..52f84005 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliUtil.h @@ -0,0 +1,23 @@ +// +// LACamliUtil.h +// photobackup +// +// Created by Nick O'Neill on 11/29/13. +// Copyright (c) 2013 The Camlistore Authors. All rights reserved. +// + +#import + +@interface LACamliUtil : NSObject + ++ (NSString*)base64EncodedStringFromString:(NSString*)string; ++ (NSString*)passwordForUsername:(NSString*)username; ++ (BOOL)savePassword:(NSString*)password forUsername:(NSString*)username; ++ (NSString*)blobRef:(NSData*)data; ++ (NSString*)rfc3339StringFromDate:(NSDate*)date; + ++ (void)logText:(NSArray*)logs; ++ (void)statusText:(NSArray*)statuses; ++ (void)errorText:(NSArray*)errors; + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliUtil.m b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliUtil.m new file mode 100644 index 00000000..6eaceaf6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LACamliClient/LACamliUtil.m @@ -0,0 +1,174 @@ +// +// LACamliUtil.m +// photobackup +// +// Created by Nick O'Neill on 11/29/13. +// Copyright (c) 2013 The Camlistore Authors. All rights reserved. +// + +#import "LACamliUtil.h" +#import "LAAppDelegate.h" +#import +#import + +@implementation LACamliUtil + +static NSString* const serviceName = @"org.camlistore.credentials"; + +// h/t AFNetworking ++ (NSString*)base64EncodedStringFromString:(NSString*)string +{ + NSData* data = [NSData dataWithBytes:[string UTF8String] + length:[string lengthOfBytesUsingEncoding:NSUTF8StringEncoding]]; + NSUInteger length = [data length]; + NSMutableData* mutableData = [NSMutableData dataWithLength:((length + 2) / 3) * 4]; + + uint8_t* input = (uint8_t*)[data bytes]; + uint8_t* output = (uint8_t*)[mutableData mutableBytes]; + + for (NSUInteger i = 0; i < length; i += 3) { + NSUInteger value = 0; + for (NSUInteger j = i; j < (i + 3); j++) { + value <<= 8; + if (j < length) { + value |= (0xFF & input[j]); + } + } + + static uint8_t const kAFBase64EncodingTable[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + + NSUInteger idx = (i / 3) * 4; + output[idx + 0] = kAFBase64EncodingTable[(value >> 18) & 0x3F]; + output[idx + 1] = kAFBase64EncodingTable[(value >> 12) & 0x3F]; + output[idx + 2] = (i + 1) < length ? kAFBase64EncodingTable[(value >> 6) & 0x3F] : '='; + output[idx + 3] = (i + 2) < length ? kAFBase64EncodingTable[(value >> 0) & 0x3F] : '='; + } + + return [[NSString alloc] initWithData:mutableData + encoding:NSASCIIStringEncoding]; +} + +#pragma mark - keychain stuff + ++ (NSString*)passwordForUsername:(NSString*)username +{ + NSError* error; + NSString* password = [SSKeychain passwordForService:CamliCredentialsKey + account:username + error:&error]; + + if (!password || error) { + [LACamliUtil errorText:@[ + @"error getting password: ", + [error description] + ]]; + return nil; + } + + return password; +} + ++ (BOOL)savePassword:(NSString*)password forUsername:(NSString*)username +{ + NSError* error; + BOOL setPassword = [SSKeychain setPassword:password + forService:CamliCredentialsKey + account:username + error:&error]; + + if (!setPassword || error) { + [LACamliUtil errorText:@[ + @"error setting password: ", + [error description] + ]]; + + return NO; + } + + return YES; +} + +#pragma mark - hashes + ++ (NSString*)blobRef:(NSData*)data +{ + uint8_t digest[CC_SHA1_DIGEST_LENGTH]; + + CC_SHA1(data.bytes, data.length, digest); + + NSMutableString* output = [NSMutableString stringWithCapacity:(CC_SHA1_DIGEST_LENGTH * 2) + 5]; + [output appendString:@"sha1-"]; + + for (int i = 0; i < CC_SHA1_DIGEST_LENGTH; i++) { + [output appendFormat:@"%02x", digest[i]]; + } + + return output; +} + +#pragma mark - dates + ++ (NSString*)rfc3339StringFromDate:(NSDate*)date +{ + NSDateFormatter* rfc3339DateFormatter = [[NSDateFormatter alloc] init]; + + NSLocale* enUSPOSIXLocale = [[NSLocale alloc] initWithLocaleIdentifier:@"en_US_POSIX"]; + + [rfc3339DateFormatter setLocale:enUSPOSIXLocale]; + [rfc3339DateFormatter setDateFormat:@"yyyy'-'MM'-'dd'T'HH':'mm':'ss'Z'"]; + [rfc3339DateFormatter setTimeZone:[NSTimeZone timeZoneForSecondsFromGMT:0]]; + + return [rfc3339DateFormatter stringFromDate:date]; +} + +#pragma mark - yucky logging hack + ++ (void)logText:(NSArray*)logs +{ + NSMutableString* logString = [NSMutableString string]; + + for (NSString* log in logs) { + [logString appendString:log]; + } + + LALog(@"LOG: %@", logString); + + [[NSNotificationCenter defaultCenter] postNotificationName:@"logtext" + object:@{ + @"text" : logString + }]; +} + ++ (void)statusText:(NSArray*)statuses +{ + NSMutableString* statusString = [NSMutableString string]; + + for (NSString* status in statuses) { + [statusString appendString:status]; + } + + LALog(@"STATUS: %@", statusString); + + [[NSNotificationCenter defaultCenter] postNotificationName:@"statusText" + object:@{ + @"text" : statusString + }]; +} + ++ (void)errorText:(NSArray*)errors +{ + NSMutableString* errorString = [NSMutableString string]; + + for (NSString* error in errors) { + [errorString appendString:error]; + } + + LALog(@"ERROR: %@", errorString); + + [[NSNotificationCenter defaultCenter] postNotificationName:@"errorText" + object:@{ + @"text" : errorString + }]; +} + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LAViewController.h b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LAViewController.h new file mode 100644 index 00000000..e077a8c5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LAViewController.h @@ -0,0 +1,22 @@ +// +// LAViewController.h +// photobackup +// +// Created by Nick O'Neill on 10/20/13. +// Copyright (c) 2013 The Camlistore Authors. All rights reserved. +// + +#import +#import "LACamliClient.h" + +@class ProgressViewController; + +@interface LAViewController : UIViewController + +@property IBOutlet UITableView* table; +@property NSMutableArray* operations; +@property ProgressViewController* progress; + +- (void)dismissSettings; + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LAViewController.m b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LAViewController.m new file mode 100644 index 00000000..b9c2eec3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/LAViewController.m @@ -0,0 +1,207 @@ +// +// LAViewController.m +// photobackup +// +// Created by Nick O'Neill on 10/20/13. +// Copyright (c) 2013 The Camlistore Authors. All rights reserved. +// + +#import "LAViewController.h" +#import "LACamliClient.h" +#import "LAAppDelegate.h" +#import "LACamliUtil.h" +#import "SettingsViewController.h" +#import "LACamliUploadOperation.h" +#import "UploadStatusCell.h" +#import "UploadTaskCell.h" +#import "LACamliFile.h" +#import + +@implementation LAViewController + +- (void)viewDidLoad +{ + [super viewDidLoad]; + + _operations = [NSMutableArray array]; + + self.navigationItem.title = @"camlistore"; + + UIBarButtonItem* reportItem = [[UIBarButtonItem alloc] initWithBarButtonSystemItem:UIBarButtonSystemItemAction + target:self + action:@selector(reportBug)]; + + [self.navigationItem setLeftBarButtonItem:reportItem]; + + UIBarButtonItem* settingsItem = [[UIBarButtonItem alloc] initWithBarButtonSystemItem:UIBarButtonSystemItemEdit + target:self + action:@selector(showSettings)]; + + [self.navigationItem setRightBarButtonItem:settingsItem]; + + [[NSNotificationCenter defaultCenter] addObserverForName:@"statusText" object:nil queue:nil usingBlock:^(NSNotification *note) + { + UploadStatusCell* cell = (UploadStatusCell*)[_table cellForRowAtIndexPath:[NSIndexPath indexPathForRow:0 + inSection:0]]; + + dispatch_async(dispatch_get_main_queue(), ^{ + cell.status.text = note.object[@"text"]; + }); + }]; + + [[NSNotificationCenter defaultCenter] addObserverForName:@"errorText" object:nil queue:nil usingBlock:^(NSNotification *note) + { + UploadStatusCell* cell = (UploadStatusCell*)[_table cellForRowAtIndexPath:[NSIndexPath indexPathForRow:0 + inSection:0]]; + + dispatch_async(dispatch_get_main_queue(), ^{ + cell.error.text = note.object[@"text"]; + }); + }]; +} + +- (void)viewDidAppear:(BOOL)animated +{ + NSURL* serverURL = [NSURL URLWithString:[[NSUserDefaults standardUserDefaults] stringForKey:CamliServerKey]]; + NSString* username = [[NSUserDefaults standardUserDefaults] stringForKey:CamliUsernameKey]; + + NSString* password = nil; + if (username) { + password = [LACamliUtil passwordForUsername:username]; + } + + if (!serverURL || !username || !password) { + [self showSettings]; + } +} + +- (void)reportBug +{ + [BugshotKit show]; +} + +- (void)showSettings +{ + SettingsViewController* settings = [self.storyboard instantiateViewControllerWithIdentifier:@"settings"]; + [settings setParent:self]; + + [self presentViewController:settings + animated:YES + completion:nil]; +} + +- (void)dismissSettings +{ + [self dismissViewControllerAnimated:YES + completion:nil]; + + [(LAAppDelegate*)[[UIApplication sharedApplication] delegate] loadCredentials]; +} + +#pragma mark - client delegate methods + +- (void)addedUploadOperation:(LACamliUploadOperation*)op +{ + @synchronized(_operations) + { + NSIndexPath* path = [NSIndexPath indexPathForRow:[_operations count] + inSection:1]; + + [_operations addObject:op]; + [_table insertRowsAtIndexPaths:@[ + path + ] + withRowAnimation:UITableViewRowAnimationAutomatic]; + } +} + +- (void)finishedUploadOperation:(LACamliUploadOperation*)op +{ + NSIndexPath* path = [NSIndexPath indexPathForRow:[_operations indexOfObject:op] + inSection:1]; + + @synchronized(_operations) + { + [_operations removeObject:op]; + [_table deleteRowsAtIndexPaths:@[ + path + ] + withRowAnimation:UITableViewRowAnimationAutomatic]; + } +} + +- (void)uploadProgress:(float)pct forOperation:(LACamliUploadOperation*)op +{ + NSIndexPath* path = [NSIndexPath indexPathForRow:[_operations indexOfObject:op] + inSection:1]; + UploadTaskCell* cell = (UploadTaskCell*)[_table cellForRowAtIndexPath:path]; + + cell.progress.progress = pct; +} + +#pragma mark - table view methods + +- (UITableViewCell*)tableView:(UITableView*)tableView cellForRowAtIndexPath:(NSIndexPath*)indexPath +{ + if (indexPath.section == 0) { + UploadStatusCell* cell = [tableView dequeueReusableCellWithIdentifier:@"statusCell" + forIndexPath:indexPath]; + + return cell; + } else { + UploadTaskCell* cell = [tableView dequeueReusableCellWithIdentifier:@"taskCell" + forIndexPath:indexPath]; + + cell.progress.progress = 0.0; + + LACamliUploadOperation* op = [_operations objectAtIndex:indexPath.row]; + [cell.displayText setText:[NSString stringWithFormat:@"%@", [op name]]]; + [cell.preview setImage:[op.file thumbnail]]; + + return cell; + } + + return nil; +} + +- (NSString*)tableView:(UITableView*)tableView titleForHeaderInSection:(NSInteger)section +{ + NSString* title = @""; + + if (section == 0) { + title = @"status"; + } else { + title = @"uploads"; + } + + return title; +} + +- (NSInteger)numberOfSectionsInTableView:(UITableView*)tableView +{ + return 2; +} + +- (NSInteger)tableView:(UITableView*)tableView numberOfRowsInSection:(NSInteger)section +{ + if (section == 0) { + return 1; + } else { + return [_operations count]; + } +} + +#pragma mark - other + +- (void)didReceiveMemoryWarning +{ + [super didReceiveMemoryWarning]; + // Dispose of any resources that can be recreated. +} + +- (void)dealloc +{ + [[NSNotificationCenter defaultCenter] removeObserver:self]; +} + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/SettingsViewController.h b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/SettingsViewController.h new file mode 100644 index 00000000..01d4e133 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/SettingsViewController.h @@ -0,0 +1,23 @@ +// +// SettingsViewController.h +// photobackup +// +// Created by Nick O'Neill on 12/16/13. +// Copyright (c) 2013 Nick O'Neill. All rights reserved. +// + +#import + +@class LAViewController; + +@interface SettingsViewController : UIViewController + +@property(weak) LAViewController* parent; +@property IBOutlet UILabel* errors; +@property IBOutlet UITextField* server; +@property IBOutlet UITextField* username; +@property IBOutlet UITextField* password; + +- (IBAction)validate; + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/SettingsViewController.m b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/SettingsViewController.m new file mode 100644 index 00000000..783bcadd --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/SettingsViewController.m @@ -0,0 +1,122 @@ +// +// SettingsViewController.m +// photobackup +// +// Created by Nick O'Neill on 12/16/13. +// Copyright (c) 2013 The Camlistore Authors. All rights reserved. +// + +#import "SettingsViewController.h" +#import "LAViewController.h" +#import "LACamliUtil.h" +#import "LAAppDelegate.h" + +@interface SettingsViewController () + +@end + +@implementation SettingsViewController + +- (id)initWithNibName:(NSString*)nibNameOrNil bundle:(NSBundle*)nibBundleOrNil +{ + self = [super initWithNibName:nibNameOrNil + bundle:nibBundleOrNil]; + if (self) { + // Custom initialization + } + return self; +} + +- (void)viewDidLoad +{ + [super viewDidLoad]; + + NSString* serverUrl = [[NSUserDefaults standardUserDefaults] stringForKey:CamliServerKey]; + if (serverUrl) { + self.server.text = serverUrl; + } + + NSString* username = [[NSUserDefaults standardUserDefaults] stringForKey:CamliUsernameKey]; + if (username) { + self.username.text = username; + + NSString* password = [LACamliUtil passwordForUsername:username]; + if (password) { + self.password.text = password; + } + } +} + +#pragma mark - uitextfield delegate + +- (BOOL)textFieldShouldReturn:(UITextField*)textField +{ + LALog(@"text field return %@", textField); + + [self.server resignFirstResponder]; + [self.username resignFirstResponder]; + [self.password resignFirstResponder]; + + if (textField == self.server) { + [self.username becomeFirstResponder]; + } else if (textField == self.username) { + [self.password becomeFirstResponder]; + } + + return YES; +} + +#pragma mark - done + +- (IBAction)validate +{ + self.errors.text = @""; + + BOOL hasErrors = NO; + + NSURL* serverUrl = [NSURL URLWithString:self.server.text]; + + if (!serverUrl || !serverUrl.scheme || !serverUrl.host) { + hasErrors = YES; + self.errors.text = @"bad url :("; + } + + if (!self.username.text || [self.username.text isEqualToString:@""]) { + hasErrors = YES; + self.errors.text = [self.errors.text stringByAppendingString:@"type a username :("]; + } + + if (!self.password.text || [self.password.text isEqualToString:@""]) { + hasErrors = YES; + self.errors.text = [self.errors.text stringByAppendingString:@"type a password :("]; + } + + if (!hasErrors) { + [self saveValues]; + } +} + +- (void)saveValues +{ + [LACamliUtil savePassword:self.password.text forUsername:self.username.text]; + + [[NSUserDefaults standardUserDefaults] setObject:self.username.text + forKey:CamliUsernameKey]; + [[NSUserDefaults standardUserDefaults] setObject:self.server.text + forKey:CamliServerKey]; + [[NSUserDefaults standardUserDefaults] synchronize]; + + [LACamliUtil errorText:@[ + @"" + ]]; + + [self.parent dismissSettings]; +} + +- (void)didReceiveMemoryWarning +{ + [super didReceiveMemoryWarning]; + // Dispose of any resources that can be recreated. +} + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/UploadStatusCell.h b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/UploadStatusCell.h new file mode 100644 index 00000000..2813ed17 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/UploadStatusCell.h @@ -0,0 +1,16 @@ +// +// UploadStatusCell.h +// photobackup +// +// Created by Nick O'Neill on 1/6/14. +// Copyright (c) 2014 Nick O'Neill. All rights reserved. +// + +#import + +@interface UploadStatusCell : UITableViewCell + +@property IBOutlet UILabel* status; +@property IBOutlet UILabel* error; + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/UploadStatusCell.m b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/UploadStatusCell.m new file mode 100644 index 00000000..d5f43168 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/UploadStatusCell.m @@ -0,0 +1,31 @@ +// +// UploadStatusCell.m +// photobackup +// +// Created by Nick O'Neill on 1/6/14. +// Copyright (c) 2014 Nick O'Neill. All rights reserved. +// + +#import "UploadStatusCell.h" + +@implementation UploadStatusCell + +- (id)initWithStyle:(UITableViewCellStyle)style reuseIdentifier:(NSString*)reuseIdentifier +{ + self = [super initWithStyle:style + reuseIdentifier:reuseIdentifier]; + if (self) { + // Initialization code + } + return self; +} + +- (void)setSelected:(BOOL)selected animated:(BOOL)animated +{ + [super setSelected:selected + animated:animated]; + + // Configure the view for the selected state +} + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/UploadTaskCell.h b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/UploadTaskCell.h new file mode 100644 index 00000000..abeb6807 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/UploadTaskCell.h @@ -0,0 +1,17 @@ +// +// UploadTaskCell.h +// photobackup +// +// Created by Nick O'Neill on 1/6/14. +// Copyright (c) 2014 Nick O'Neill. All rights reserved. +// + +#import + +@interface UploadTaskCell : UITableViewCell + +@property IBOutlet UILabel* displayText; +@property IBOutlet UIImageView* preview; +@property IBOutlet UIProgressView* progress; + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/UploadTaskCell.m b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/UploadTaskCell.m new file mode 100644 index 00000000..fc5ad31e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/UploadTaskCell.m @@ -0,0 +1,31 @@ +// +// UploadTaskCell.m +// photobackup +// +// Created by Nick O'Neill on 1/6/14. +// Copyright (c) 2014 Nick O'Neill. All rights reserved. +// + +#import "UploadTaskCell.h" + +@implementation UploadTaskCell + +- (id)initWithStyle:(UITableViewCellStyle)style reuseIdentifier:(NSString*)reuseIdentifier +{ + self = [super initWithStyle:style + reuseIdentifier:reuseIdentifier]; + if (self) { + // Initialization code + } + return self; +} + +- (void)setSelected:(BOOL)selected animated:(BOOL)animated +{ + [super setSelected:selected + animated:animated]; + + // Configure the view for the selected state +} + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/en.lproj/InfoPlist.strings b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/en.lproj/InfoPlist.strings new file mode 100644 index 00000000..477b28ff --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/en.lproj/InfoPlist.strings @@ -0,0 +1,2 @@ +/* Localized versions of Info.plist keys */ + diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/main.m b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/main.m new file mode 100644 index 00000000..5a06d486 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/main.m @@ -0,0 +1,18 @@ +// +// main.m +// photobackup +// +// Created by Nick O'Neill on 10/20/13. +// Copyright (c) 2013 The Camlistore Authors. All rights reserved. +// + +#import + +#import "LAAppDelegate.h" + +int main(int argc, char * argv[]) +{ + @autoreleasepool { + return UIApplicationMain(argc, argv, nil, NSStringFromClass([LAAppDelegate class])); + } +} diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/photobackup-Info.plist b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/photobackup-Info.plist new file mode 100644 index 00000000..812e94fa --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/photobackup-Info.plist @@ -0,0 +1,49 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleDisplayName + Camlistore + CFBundleExecutable + ${EXECUTABLE_NAME} + CFBundleIdentifier + org.camlistore.${PRODUCT_NAME:rfc1034identifier} + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + ${PRODUCT_NAME} + CFBundlePackageType + APPL + CFBundleShortVersionString + 1.0 + CFBundleSignature + ???? + CFBundleVersion + 20140224 + LSRequiresIPhoneOS + + UIBackgroundModes + + location + + UIMainStoryboardFile + Main_iPhone + UIRequiredDeviceCapabilities + + armv7 + + UISupportedInterfaceOrientations + + UIInterfaceOrientationPortrait + + UISupportedInterfaceOrientations~ipad + + UIInterfaceOrientationPortrait + UIInterfaceOrientationPortraitUpsideDown + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + + + diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/photobackup-Prefix.pch b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/photobackup-Prefix.pch new file mode 100644 index 00000000..4a09e3b1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackup/photobackup-Prefix.pch @@ -0,0 +1,23 @@ +// +// Prefix header +// +// The contents of this file are implicitly included at the beginning of every source file. +// + +#import + +#ifndef __IPHONE_5_0 +#warning "This project uses features only available in iOS SDK 5.0 and later." +#endif + +#ifdef DEBUG +# define LALog(fmt, ...) NSLog((@"%s [Line %d] " fmt), __PRETTY_FUNCTION__, __LINE__, ##__VA_ARGS__) +#else +# define LALog(...) +#endif + +#ifdef __OBJC__ + #import + #import + #import "LACamliUtil.h" +#endif diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackupTests/en.lproj/InfoPlist.strings b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackupTests/en.lproj/InfoPlist.strings new file mode 100644 index 00000000..477b28ff --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackupTests/en.lproj/InfoPlist.strings @@ -0,0 +1,2 @@ +/* Localized versions of Info.plist keys */ + diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackupTests/photobackupTests-Info.plist b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackupTests/photobackupTests-Info.plist new file mode 100644 index 00000000..35e2b971 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackupTests/photobackupTests-Info.plist @@ -0,0 +1,22 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleExecutable + ${EXECUTABLE_NAME} + CFBundleIdentifier + net.launchapps.${PRODUCT_NAME:rfc1034identifier} + CFBundleInfoDictionaryVersion + 6.0 + CFBundlePackageType + BNDL + CFBundleShortVersionString + 1.0 + CFBundleSignature + ???? + CFBundleVersion + 1 + + diff --git a/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackupTests/photobackupTests.m b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackupTests/photobackupTests.m new file mode 100644 index 00000000..e8947f21 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/ios-objc/photobackupTests/photobackupTests.m @@ -0,0 +1,34 @@ +// +// photobackupTests.m +// photobackupTests +// +// Created by Nick O'Neill on 10/20/13. +// Copyright (c) 2013 The Camlistore Authors. All rights reserved. +// + +#import + +@interface photobackupTests : XCTestCase + +@end + +@implementation photobackupTests + +- (void)setUp +{ + [super setUp]; + // Put setup code here. This method is called before the invocation of each test method in the class. +} + +- (void)tearDown +{ + // Put teardown code here. This method is called after the invocation of each test method in the class. + [super tearDown]; +} + +- (void)testExample +{ + XCTFail(@"No implementation for \"%s\"", __PRETTY_FUNCTION__); +} + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/js/README b/vendor/github.com/camlistore/camlistore/clients/js/README new file mode 100644 index 00000000..b4792a81 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/js/README @@ -0,0 +1,6 @@ +This is a sketch of a client written in JavaScript. + +The idea is twofold: +1) any working server can drop in this directory to get a blobstore + browser UI, while simultaneously testing their API implementation +2) provide an easy library to plug into node.js if needed diff --git a/vendor/github.com/camlistore/camlistore/clients/js/camel.jpg b/vendor/github.com/camlistore/camlistore/clients/js/camel.jpg new file mode 100644 index 00000000..34a46f40 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/clients/js/camel.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/clients/js/client.js b/vendor/github.com/camlistore/camlistore/clients/js/client.js new file mode 100644 index 00000000..aff5b065 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/js/client.js @@ -0,0 +1,38 @@ +var Camli = { + +BlobStore: function() { +} + +}; + +Camli.BlobStore.prototype.blobURL = function(ref) { + return '/camli/' + ref; +}; + +Camli.BlobStore.prototype.xhr = function(url, cb) { + var xhr = new XMLHttpRequest(); + xhr.onreadystatechange = function() { + if (xhr.readyState == 4) { + if (xhr.status == 200) { + cb(xhr.responseText); + } + } + // XXX handle error + }; + xhr.open('GET', url, true); + xhr.send(null); +}; + +Camli.BlobStore.prototype.xhrJSON = function(url, cb) { + this.xhr('/camli/enumerate-blobs', function(data) { + cb(JSON.parse(data)); + }); +}; + +Camli.BlobStore.prototype.enumerate = function(cb) { + this.xhrJSON('/camli/enumerate-blobs', cb); +}; + +Camli.BlobStore.prototype.getBlob = function(ref, cb) { + this.xhr(this.blobURL(ref), cb); +}; diff --git a/vendor/github.com/camlistore/camlistore/clients/js/index.html b/vendor/github.com/camlistore/camlistore/clients/js/index.html new file mode 100644 index 00000000..7315a986 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/js/index.html @@ -0,0 +1,61 @@ + + + + + + + + + + + + + +
    refsize
    + + + + + diff --git a/vendor/github.com/camlistore/camlistore/clients/js/style.css b/vendor/github.com/camlistore/camlistore/clients/js/style.css new file mode 100644 index 00000000..1031c002 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/js/style.css @@ -0,0 +1,27 @@ +body { + font-family: sans-serif; + font-size: 0.8em; +} +#logo { + background: url(camel.jpg); + width: 320px; + height: 302px; + color: white; + font-size: 400%; + text-align: center; + vertical-align: bottom; + text-shadow: 0 0 10px black; +} +#bloblist { + border: solid 1px gray; +} +#bloblist th { + background: #eee; + padding: 2px 4px; +} +#bloblist td { + padding-right: 1ex; +} +.blobref { + font-family: WebKitWorkaround, monospace; +} diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/.gitignore b/vendor/github.com/camlistore/camlistore/clients/osx/.gitignore new file mode 100644 index 00000000..ed29a66e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/.gitignore @@ -0,0 +1,4 @@ +*.pbxuser +*.xccheckout +build/ +xcuserdata diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/BUILDING b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/BUILDING new file mode 100644 index 00000000..815eea67 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/BUILDING @@ -0,0 +1,8 @@ +1. Install Go. +2. Install xcode. +3. From top-level directory: + $ go run make.go + (this will create bin/camlistored, bin/cammount, and bin/camput) +4. From this directory: + $ xcodebuild -target Camlistore.dmg + (this will create build/Release/Camlistore.app and Camlistore.dmg) diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore.xcodeproj/project.pbxproj b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore.xcodeproj/project.pbxproj new file mode 100644 index 00000000..6d34abe7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore.xcodeproj/project.pbxproj @@ -0,0 +1,591 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 46; + objects = { + +/* Begin PBXBuildFile section */ + DA31F9381866744D002E3F33 /* TimeTravelWindowController.m in Sources */ = {isa = PBXBuildFile; fileRef = DA31F9361866744D002E3F33 /* TimeTravelWindowController.m */; }; + DA31F9391866744D002E3F33 /* TimeTravelWindowController.xib in Resources */ = {isa = PBXBuildFile; fileRef = DA31F9371866744D002E3F33 /* TimeTravelWindowController.xib */; }; + DA71BCB718642A3A000A102C /* Camlicon.icns in Resources */ = {isa = PBXBuildFile; fileRef = DA71BCB618642A3A000A102C /* Camlicon.icns */; }; + DAABA574186435DA000D62B6 /* camlistored in Resources */ = {isa = PBXBuildFile; fileRef = DAABA572186435DA000D62B6 /* camlistored */; }; + DAABA575186435DA000D62B6 /* cammount in Resources */ = {isa = PBXBuildFile; fileRef = DAABA573186435DA000D62B6 /* cammount */; }; + DAABA57718643710000D62B6 /* Credits.html in Resources */ = {isa = PBXBuildFile; fileRef = DAABA57618643710000D62B6 /* Credits.html */; }; + DAD59F8C1877CC250018193C /* camput in Resources */ = {isa = PBXBuildFile; fileRef = DAD59F8B1877CC250018193C /* camput */; }; + DAF109491863EDAF00F6A3F9 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = DAF109481863EDAF00F6A3F9 /* Cocoa.framework */; }; + DAF109531863EDAF00F6A3F9 /* InfoPlist.strings in Resources */ = {isa = PBXBuildFile; fileRef = DAF109511863EDAF00F6A3F9 /* InfoPlist.strings */; }; + DAF109551863EDAF00F6A3F9 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = DAF109541863EDAF00F6A3F9 /* main.m */; }; + DAF1095C1863EDAF00F6A3F9 /* AppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = DAF1095B1863EDAF00F6A3F9 /* AppDelegate.m */; }; + DAF109611863EDAF00F6A3F9 /* Images.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = DAF109601863EDAF00F6A3F9 /* Images.xcassets */; }; + DAF109681863EDAF00F6A3F9 /* XCTest.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = DAF109671863EDAF00F6A3F9 /* XCTest.framework */; }; + DAF109691863EDAF00F6A3F9 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = DAF109481863EDAF00F6A3F9 /* Cocoa.framework */; }; + DAF109711863EDAF00F6A3F9 /* InfoPlist.strings in Resources */ = {isa = PBXBuildFile; fileRef = DAF1096F1863EDAF00F6A3F9 /* InfoPlist.strings */; }; + DAF109731863EDAF00F6A3F9 /* CamlistoreTests.m in Sources */ = {isa = PBXBuildFile; fileRef = DAF109721863EDAF00F6A3F9 /* CamlistoreTests.m */; }; + DAF1097E1863EEFB00F6A3F9 /* LoginItemManager.m in Sources */ = {isa = PBXBuildFile; fileRef = DAF1097D1863EEFB00F6A3F9 /* LoginItemManager.m */; }; + DAF109831863F1F900F6A3F9 /* menuicon-selected.png in Resources */ = {isa = PBXBuildFile; fileRef = DAF1097F1863F1F900F6A3F9 /* menuicon-selected.png */; }; + DAF109841863F1F900F6A3F9 /* menuicon-selected@2x.png in Resources */ = {isa = PBXBuildFile; fileRef = DAF109801863F1F900F6A3F9 /* menuicon-selected@2x.png */; }; + DAF109851863F1F900F6A3F9 /* menuicon.png in Resources */ = {isa = PBXBuildFile; fileRef = DAF109811863F1F900F6A3F9 /* menuicon.png */; }; + DAF109861863F1F900F6A3F9 /* menuicon@2x.png in Resources */ = {isa = PBXBuildFile; fileRef = DAF109821863F1F900F6A3F9 /* menuicon@2x.png */; }; + DAF1098A1863FDD600F6A3F9 /* MainMenu.xib in Resources */ = {isa = PBXBuildFile; fileRef = DAF1095D1863EDAF00F6A3F9 /* MainMenu.xib */; }; + DAF63B9D1864D0AC0000EAC9 /* FUSEManager.m in Sources */ = {isa = PBXBuildFile; fileRef = DAF63B9C1864D0AC0000EAC9 /* FUSEManager.m */; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + DAC21F0C188B359300EEA8BB /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = DAF1093D1863EDAF00F6A3F9 /* Project object */; + proxyType = 1; + remoteGlobalIDString = DAF109441863EDAF00F6A3F9; + remoteInfo = Camlistore; + }; + DAF1096A1863EDAF00F6A3F9 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = DAF1093D1863EDAF00F6A3F9 /* Project object */; + proxyType = 1; + remoteGlobalIDString = DAF109441863EDAF00F6A3F9; + remoteInfo = Camlistore; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXFileReference section */ + DA31F9351866744D002E3F33 /* TimeTravelWindowController.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TimeTravelWindowController.h; sourceTree = ""; }; + DA31F9361866744D002E3F33 /* TimeTravelWindowController.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TimeTravelWindowController.m; sourceTree = ""; }; + DA31F9371866744D002E3F33 /* TimeTravelWindowController.xib */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = file.xib; path = TimeTravelWindowController.xib; sourceTree = ""; }; + DA71BCB618642A3A000A102C /* Camlicon.icns */ = {isa = PBXFileReference; lastKnownFileType = image.icns; path = Camlicon.icns; sourceTree = ""; }; + DAABA572186435DA000D62B6 /* camlistored */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.executable"; name = camlistored; path = ../../../bin/camlistored; sourceTree = ""; }; + DAABA573186435DA000D62B6 /* cammount */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.executable"; name = cammount; path = ../../../bin/cammount; sourceTree = ""; }; + DAABA57618643710000D62B6 /* Credits.html */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.html; path = Credits.html; sourceTree = ""; }; + DAD59F8B1877CC250018193C /* camput */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.executable"; name = camput; path = ../../../bin/camput; sourceTree = ""; }; + DAF109451863EDAF00F6A3F9 /* Camlistore.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = Camlistore.app; sourceTree = BUILT_PRODUCTS_DIR; }; + DAF109481863EDAF00F6A3F9 /* Cocoa.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Cocoa.framework; path = System/Library/Frameworks/Cocoa.framework; sourceTree = SDKROOT; }; + DAF1094B1863EDAF00F6A3F9 /* AppKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AppKit.framework; path = System/Library/Frameworks/AppKit.framework; sourceTree = SDKROOT; }; + DAF1094C1863EDAF00F6A3F9 /* CoreData.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreData.framework; path = System/Library/Frameworks/CoreData.framework; sourceTree = SDKROOT; }; + DAF1094D1863EDAF00F6A3F9 /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = System/Library/Frameworks/Foundation.framework; sourceTree = SDKROOT; }; + DAF109501863EDAF00F6A3F9 /* Camlistore-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "Camlistore-Info.plist"; sourceTree = ""; }; + DAF109521863EDAF00F6A3F9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + DAF109541863EDAF00F6A3F9 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = ""; }; + DAF109561863EDAF00F6A3F9 /* Camlistore-Prefix.pch */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "Camlistore-Prefix.pch"; sourceTree = ""; }; + DAF1095A1863EDAF00F6A3F9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + DAF1095B1863EDAF00F6A3F9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + DAF1095E1863EDAF00F6A3F9 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.xib; name = Base; path = Base.lproj/MainMenu.xib; sourceTree = ""; }; + DAF109601863EDAF00F6A3F9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + DAF109661863EDAF00F6A3F9 /* CamlistoreTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = CamlistoreTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; + DAF109671863EDAF00F6A3F9 /* XCTest.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = XCTest.framework; path = Library/Frameworks/XCTest.framework; sourceTree = DEVELOPER_DIR; }; + DAF1096E1863EDAF00F6A3F9 /* CamlistoreTests-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "CamlistoreTests-Info.plist"; sourceTree = ""; }; + DAF109701863EDAF00F6A3F9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + DAF109721863EDAF00F6A3F9 /* CamlistoreTests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = CamlistoreTests.m; sourceTree = ""; }; + DAF1097C1863EEFB00F6A3F9 /* LoginItemManager.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LoginItemManager.h; sourceTree = ""; }; + DAF1097D1863EEFB00F6A3F9 /* LoginItemManager.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = LoginItemManager.m; sourceTree = ""; }; + DAF1097F1863F1F900F6A3F9 /* menuicon-selected.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "menuicon-selected.png"; sourceTree = ""; }; + DAF109801863F1F900F6A3F9 /* menuicon-selected@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "menuicon-selected@2x.png"; sourceTree = ""; }; + DAF109811863F1F900F6A3F9 /* menuicon.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = menuicon.png; sourceTree = ""; }; + DAF109821863F1F900F6A3F9 /* menuicon@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "menuicon@2x.png"; sourceTree = ""; }; + DAF63B9B1864D0AC0000EAC9 /* FUSEManager.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FUSEManager.h; sourceTree = ""; }; + DAF63B9C1864D0AC0000EAC9 /* FUSEManager.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = FUSEManager.m; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + DAF109421863EDAF00F6A3F9 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + DAF109491863EDAF00F6A3F9 /* Cocoa.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + DAF109631863EDAF00F6A3F9 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + DAF109691863EDAF00F6A3F9 /* Cocoa.framework in Frameworks */, + DAF109681863EDAF00F6A3F9 /* XCTest.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + DAF1093C1863EDAF00F6A3F9 = { + isa = PBXGroup; + children = ( + DAD59F8B1877CC250018193C /* camput */, + DAABA572186435DA000D62B6 /* camlistored */, + DAABA573186435DA000D62B6 /* cammount */, + DAF1094E1863EDAF00F6A3F9 /* Camlistore */, + DAF1096C1863EDAF00F6A3F9 /* CamlistoreTests */, + DAF109471863EDAF00F6A3F9 /* Frameworks */, + DAF109461863EDAF00F6A3F9 /* Products */, + ); + sourceTree = ""; + }; + DAF109461863EDAF00F6A3F9 /* Products */ = { + isa = PBXGroup; + children = ( + DAF109451863EDAF00F6A3F9 /* Camlistore.app */, + DAF109661863EDAF00F6A3F9 /* CamlistoreTests.xctest */, + ); + name = Products; + sourceTree = ""; + }; + DAF109471863EDAF00F6A3F9 /* Frameworks */ = { + isa = PBXGroup; + children = ( + DAF109481863EDAF00F6A3F9 /* Cocoa.framework */, + DAF109671863EDAF00F6A3F9 /* XCTest.framework */, + DAF1094A1863EDAF00F6A3F9 /* Other Frameworks */, + ); + name = Frameworks; + sourceTree = ""; + }; + DAF1094A1863EDAF00F6A3F9 /* Other Frameworks */ = { + isa = PBXGroup; + children = ( + DAF1094B1863EDAF00F6A3F9 /* AppKit.framework */, + DAF1094C1863EDAF00F6A3F9 /* CoreData.framework */, + DAF1094D1863EDAF00F6A3F9 /* Foundation.framework */, + ); + name = "Other Frameworks"; + sourceTree = ""; + }; + DAF1094E1863EDAF00F6A3F9 /* Camlistore */ = { + isa = PBXGroup; + children = ( + DAF1095A1863EDAF00F6A3F9 /* AppDelegate.h */, + DAF1095B1863EDAF00F6A3F9 /* AppDelegate.m */, + DAF1097C1863EEFB00F6A3F9 /* LoginItemManager.h */, + DAF1097D1863EEFB00F6A3F9 /* LoginItemManager.m */, + DAF1095D1863EDAF00F6A3F9 /* MainMenu.xib */, + DAF109601863EDAF00F6A3F9 /* Images.xcassets */, + DAF1094F1863EDAF00F6A3F9 /* Supporting Files */, + DAF63B9B1864D0AC0000EAC9 /* FUSEManager.h */, + DAF63B9C1864D0AC0000EAC9 /* FUSEManager.m */, + DA31F9351866744D002E3F33 /* TimeTravelWindowController.h */, + DA31F9361866744D002E3F33 /* TimeTravelWindowController.m */, + DA31F9371866744D002E3F33 /* TimeTravelWindowController.xib */, + ); + path = Camlistore; + sourceTree = ""; + }; + DAF1094F1863EDAF00F6A3F9 /* Supporting Files */ = { + isa = PBXGroup; + children = ( + DAABA57618643710000D62B6 /* Credits.html */, + DA71BCB618642A3A000A102C /* Camlicon.icns */, + DAF1097F1863F1F900F6A3F9 /* menuicon-selected.png */, + DAF109801863F1F900F6A3F9 /* menuicon-selected@2x.png */, + DAF109811863F1F900F6A3F9 /* menuicon.png */, + DAF109821863F1F900F6A3F9 /* menuicon@2x.png */, + DAF109501863EDAF00F6A3F9 /* Camlistore-Info.plist */, + DAF109511863EDAF00F6A3F9 /* InfoPlist.strings */, + DAF109541863EDAF00F6A3F9 /* main.m */, + DAF109561863EDAF00F6A3F9 /* Camlistore-Prefix.pch */, + ); + name = "Supporting Files"; + sourceTree = ""; + }; + DAF1096C1863EDAF00F6A3F9 /* CamlistoreTests */ = { + isa = PBXGroup; + children = ( + DAF109721863EDAF00F6A3F9 /* CamlistoreTests.m */, + DAF1096D1863EDAF00F6A3F9 /* Supporting Files */, + ); + path = CamlistoreTests; + sourceTree = ""; + }; + DAF1096D1863EDAF00F6A3F9 /* Supporting Files */ = { + isa = PBXGroup; + children = ( + DAF1096E1863EDAF00F6A3F9 /* CamlistoreTests-Info.plist */, + DAF1096F1863EDAF00F6A3F9 /* InfoPlist.strings */, + ); + name = "Supporting Files"; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXLegacyTarget section */ + DAC21F08188B358700EEA8BB /* Camlistore.dmg */ = { + isa = PBXLegacyTarget; + buildArgumentsString = "Camlistore/make-dmg.sh"; + buildConfigurationList = DAC21F0B188B358700EEA8BB /* Build configuration list for PBXLegacyTarget "Camlistore.dmg" */; + buildPhases = ( + ); + buildToolPath = /bin/sh; + buildWorkingDirectory = ""; + dependencies = ( + DAC21F0D188B359300EEA8BB /* PBXTargetDependency */, + ); + name = Camlistore.dmg; + passBuildSettingsInEnvironment = 1; + productName = CamlistoreDMG; + }; +/* End PBXLegacyTarget section */ + +/* Begin PBXNativeTarget section */ + DAF109441863EDAF00F6A3F9 /* Camlistore */ = { + isa = PBXNativeTarget; + buildConfigurationList = DAF109761863EDAF00F6A3F9 /* Build configuration list for PBXNativeTarget "Camlistore" */; + buildPhases = ( + DAF109411863EDAF00F6A3F9 /* Sources */, + DAF109421863EDAF00F6A3F9 /* Frameworks */, + DAF109431863EDAF00F6A3F9 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = Camlistore; + productName = Camlistore; + productReference = DAF109451863EDAF00F6A3F9 /* Camlistore.app */; + productType = "com.apple.product-type.application"; + }; + DAF109651863EDAF00F6A3F9 /* CamlistoreTests */ = { + isa = PBXNativeTarget; + buildConfigurationList = DAF109791863EDAF00F6A3F9 /* Build configuration list for PBXNativeTarget "CamlistoreTests" */; + buildPhases = ( + DAF109621863EDAF00F6A3F9 /* Sources */, + DAF109631863EDAF00F6A3F9 /* Frameworks */, + DAF109641863EDAF00F6A3F9 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + DAF1096B1863EDAF00F6A3F9 /* PBXTargetDependency */, + ); + name = CamlistoreTests; + productName = CamlistoreTests; + productReference = DAF109661863EDAF00F6A3F9 /* CamlistoreTests.xctest */; + productType = "com.apple.product-type.bundle.unit-test"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + DAF1093D1863EDAF00F6A3F9 /* Project object */ = { + isa = PBXProject; + attributes = { + LastUpgradeCheck = 0500; + ORGANIZATIONNAME = Camlistore; + TargetAttributes = { + DAF109651863EDAF00F6A3F9 = { + TestTargetID = DAF109441863EDAF00F6A3F9; + }; + }; + }; + buildConfigurationList = DAF109401863EDAF00F6A3F9 /* Build configuration list for PBXProject "Camlistore" */; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; + hasScannedForEncodings = 0; + knownRegions = ( + en, + Base, + ); + mainGroup = DAF1093C1863EDAF00F6A3F9; + productRefGroup = DAF109461863EDAF00F6A3F9 /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + DAF109441863EDAF00F6A3F9 /* Camlistore */, + DAF109651863EDAF00F6A3F9 /* CamlistoreTests */, + DAC21F08188B358700EEA8BB /* Camlistore.dmg */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + DAF109431863EDAF00F6A3F9 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + DA31F9391866744D002E3F33 /* TimeTravelWindowController.xib in Resources */, + DAABA574186435DA000D62B6 /* camlistored in Resources */, + DAABA575186435DA000D62B6 /* cammount in Resources */, + DAD59F8C1877CC250018193C /* camput in Resources */, + DAF109531863EDAF00F6A3F9 /* InfoPlist.strings in Resources */, + DAF109841863F1F900F6A3F9 /* menuicon-selected@2x.png in Resources */, + DAF109861863F1F900F6A3F9 /* menuicon@2x.png in Resources */, + DA71BCB718642A3A000A102C /* Camlicon.icns in Resources */, + DAABA57718643710000D62B6 /* Credits.html in Resources */, + DAF109611863EDAF00F6A3F9 /* Images.xcassets in Resources */, + DAF109851863F1F900F6A3F9 /* menuicon.png in Resources */, + DAF109831863F1F900F6A3F9 /* menuicon-selected.png in Resources */, + DAF1098A1863FDD600F6A3F9 /* MainMenu.xib in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + DAF109641863EDAF00F6A3F9 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + DAF109711863EDAF00F6A3F9 /* InfoPlist.strings in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + DAF109411863EDAF00F6A3F9 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + DA31F9381866744D002E3F33 /* TimeTravelWindowController.m in Sources */, + DAF1097E1863EEFB00F6A3F9 /* LoginItemManager.m in Sources */, + DAF1095C1863EDAF00F6A3F9 /* AppDelegate.m in Sources */, + DAF63B9D1864D0AC0000EAC9 /* FUSEManager.m in Sources */, + DAF109551863EDAF00F6A3F9 /* main.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + DAF109621863EDAF00F6A3F9 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + DAF109731863EDAF00F6A3F9 /* CamlistoreTests.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXTargetDependency section */ + DAC21F0D188B359300EEA8BB /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = DAF109441863EDAF00F6A3F9 /* Camlistore */; + targetProxy = DAC21F0C188B359300EEA8BB /* PBXContainerItemProxy */; + }; + DAF1096B1863EDAF00F6A3F9 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = DAF109441863EDAF00F6A3F9 /* Camlistore */; + targetProxy = DAF1096A1863EDAF00F6A3F9 /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + +/* Begin PBXVariantGroup section */ + DAF109511863EDAF00F6A3F9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + DAF109521863EDAF00F6A3F9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + DAF1095D1863EDAF00F6A3F9 /* MainMenu.xib */ = { + isa = PBXVariantGroup; + children = ( + DAF1095E1863EDAF00F6A3F9 /* Base */, + ); + name = MainMenu.xib; + sourceTree = ""; + }; + DAF1096F1863EDAF00F6A3F9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + DAF109701863EDAF00F6A3F9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; +/* End PBXVariantGroup section */ + +/* Begin XCBuildConfiguration section */ + DAC21F09188B358700EEA8BB /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + DEBUGGING_SYMBOLS = YES; + GCC_GENERATE_DEBUGGING_SYMBOLS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + OTHER_CFLAGS = ""; + OTHER_LDFLAGS = ""; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + DAC21F0A188B358700EEA8BB /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + OTHER_CFLAGS = ""; + OTHER_LDFLAGS = ""; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + DAF109741863EDAF00F6A3F9 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_SYMBOLS_PRIVATE_EXTERN = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + MACOSX_DEPLOYMENT_TARGET = 10.9; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = macosx; + }; + name = Debug; + }; + DAF109751863EDAF00F6A3F9 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + MACOSX_DEPLOYMENT_TARGET = 10.9; + SDKROOT = macosx; + }; + name = Release; + }; + DAF109771863EDAF00F6A3F9 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + COMBINE_HIDPI_IMAGES = YES; + GCC_PRECOMPILE_PREFIX_HEADER = YES; + GCC_PREFIX_HEADER = "Camlistore/Camlistore-Prefix.pch"; + INFOPLIST_FILE = "Camlistore/Camlistore-Info.plist"; + PRODUCT_NAME = "$(TARGET_NAME)"; + WRAPPER_EXTENSION = app; + }; + name = Debug; + }; + DAF109781863EDAF00F6A3F9 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + COMBINE_HIDPI_IMAGES = YES; + GCC_PRECOMPILE_PREFIX_HEADER = YES; + GCC_PREFIX_HEADER = "Camlistore/Camlistore-Prefix.pch"; + INFOPLIST_FILE = "Camlistore/Camlistore-Info.plist"; + PRODUCT_NAME = "$(TARGET_NAME)"; + WRAPPER_EXTENSION = app; + }; + name = Release; + }; + DAF1097A1863EDAF00F6A3F9 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + BUNDLE_LOADER = "$(BUILT_PRODUCTS_DIR)/Camlistore.app/Contents/MacOS/Camlistore"; + COMBINE_HIDPI_IMAGES = YES; + FRAMEWORK_SEARCH_PATHS = ( + "$(DEVELOPER_FRAMEWORKS_DIR)", + "$(inherited)", + ); + GCC_PRECOMPILE_PREFIX_HEADER = YES; + GCC_PREFIX_HEADER = "Camlistore/Camlistore-Prefix.pch"; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + INFOPLIST_FILE = "CamlistoreTests/CamlistoreTests-Info.plist"; + PRODUCT_NAME = "$(TARGET_NAME)"; + TEST_HOST = "$(BUNDLE_LOADER)"; + WRAPPER_EXTENSION = xctest; + }; + name = Debug; + }; + DAF1097B1863EDAF00F6A3F9 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + BUNDLE_LOADER = "$(BUILT_PRODUCTS_DIR)/Camlistore.app/Contents/MacOS/Camlistore"; + COMBINE_HIDPI_IMAGES = YES; + FRAMEWORK_SEARCH_PATHS = ( + "$(DEVELOPER_FRAMEWORKS_DIR)", + "$(inherited)", + ); + GCC_PRECOMPILE_PREFIX_HEADER = YES; + GCC_PREFIX_HEADER = "Camlistore/Camlistore-Prefix.pch"; + INFOPLIST_FILE = "CamlistoreTests/CamlistoreTests-Info.plist"; + PRODUCT_NAME = "$(TARGET_NAME)"; + TEST_HOST = "$(BUNDLE_LOADER)"; + WRAPPER_EXTENSION = xctest; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + DAC21F0B188B358700EEA8BB /* Build configuration list for PBXLegacyTarget "Camlistore.dmg" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DAC21F09188B358700EEA8BB /* Debug */, + DAC21F0A188B358700EEA8BB /* Release */, + ); + defaultConfigurationIsVisible = 0; + }; + DAF109401863EDAF00F6A3F9 /* Build configuration list for PBXProject "Camlistore" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DAF109741863EDAF00F6A3F9 /* Debug */, + DAF109751863EDAF00F6A3F9 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + DAF109761863EDAF00F6A3F9 /* Build configuration list for PBXNativeTarget "Camlistore" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DAF109771863EDAF00F6A3F9 /* Debug */, + DAF109781863EDAF00F6A3F9 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + DAF109791863EDAF00F6A3F9 /* Build configuration list for PBXNativeTarget "CamlistoreTests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DAF1097A1863EDAF00F6A3F9 /* Debug */, + DAF1097B1863EDAF00F6A3F9 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = DAF1093D1863EDAF00F6A3F9 /* Project object */; +} diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore.xcodeproj/project.xcworkspace/contents.xcworkspacedata new file mode 100644 index 00000000..ef6ea098 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore.xcodeproj/project.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,7 @@ + + + + + diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/AppDelegate.h b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/AppDelegate.h new file mode 100644 index 00000000..3cdc4b48 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/AppDelegate.h @@ -0,0 +1,78 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#import + +#import "LoginItemManager.h" +#import "TimeTravelWindowController.h" +#import "FUSEManager.h" + +#define MIN_LIFETIME 10 + +@interface AppDelegate : NSObject { + NSStatusItem *statusBar; + IBOutlet NSMenu *statusMenu; + + IBOutlet NSMenuItem *launchBrowserItem; + IBOutlet NSMenuItem *launchAtStartupItem; + IBOutlet LoginItemManager *loginItems; + IBOutlet FUSEManager *fuseManager; + IBOutlet NSMenuItem *fuseMountItem; + + NSTask *task; + NSPipe *in, *out; + + BOOL hasSeenStart; + time_t startTime; + + BOOL terminatingApp; + int shutdownWaitEvents; + NSTimer *taskKiller; + + NSString *logPath; + FILE *logFile; + + TimeTravelWindowController *timeTraveler; +} + +- (IBAction)browse:(id)sender; + +- (void)launchServer; +- (void)stop; +- (void)openUI; +- (void)taskTerminated:(NSNotification *)note; +- (void)cleanup; + +- (void)updateAddItemButtonState; + +- (IBAction)setLaunchPref:(id)sender; +- (IBAction)changeLoginItems:(id)sender; + +- (IBAction)showAboutPanel:(id)sender; +- (IBAction)showLogs:(id)sender; +- (IBAction)showTechSupport:(id)sender; + +- (void)applicationWillTerminate:(NSNotification *)notification; +- (IBAction)toggleMount:(id)sender; + +- (void) fuseMounted; +- (void) fuseDismounted; + +- (IBAction)openFinder:(id)sender; +- (IBAction)openFinderAsOf:(id)sender; + + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/AppDelegate.m b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/AppDelegate.m new file mode 100644 index 00000000..cc8027c2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/AppDelegate.m @@ -0,0 +1,382 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#import "AppDelegate.h" +#import "TimeTravelWindowController.h" + +#define FORCEKILL_INTERVAL 15.0 // How long to wait for the server task to exit, on quit + +@implementation AppDelegate + +- (IBAction)showAboutPanel:(id)sender +{ + [NSApp activateIgnoringOtherApps:YES]; + [[NSApplication sharedApplication] orderFrontStandardAboutPanel:sender]; +} + +- (void)logMessage:(NSString*)msg +{ + const char *str = [msg cStringUsingEncoding:NSUTF8StringEncoding]; + if (str) { + fwrite(str, strlen(str), 1, logFile); + } +} + +- (void)flushLog +{ + fflush(logFile); +} + +- (NSString *)logFilePath:(NSString*)logName +{ + NSArray *URLs = [[NSFileManager defaultManager] URLsForDirectory:NSLibraryDirectory + inDomains:NSUserDomainMask]; + NSURL *logsURL = [[URLs lastObject] URLByAppendingPathComponent:@"Logs"]; + NSString *logDir = [logsURL path]; + return [logDir stringByAppendingPathComponent:logName]; +} + +- (void)awakeFromNib +{ + hasSeenStart = NO; + + logPath = [self logFilePath:@"Camlistored.log"]; + const char *logPathC = [logPath cStringUsingEncoding:NSUTF8StringEncoding]; + + NSString *oldLogFileString = [self logFilePath:@"Camlistored.log.old"]; + const char *oldLogPath = [oldLogFileString cStringUsingEncoding:NSUTF8StringEncoding]; + rename(logPathC, oldLogPath); // This will fail the first time. + + // Now our logs go to a private file. + logFile = fopen(logPathC, "w"); + + [NSTimer scheduledTimerWithTimeInterval:1.0 + target:self selector:@selector(flushLog) + userInfo:nil + repeats:YES]; + + [[NSUserDefaults standardUserDefaults] + registerDefaults: [NSDictionary dictionaryWithObjectsAndKeys: + [NSNumber numberWithBool:YES], @"browseAtStart", + nil, nil]]; + NSUserDefaults *defaults = [NSUserDefaults standardUserDefaults]; + + statusBar=[[NSStatusBar systemStatusBar] statusItemWithLength: 26.0]; + [statusBar setAlternateImage: [NSImage imageNamed:@"menuicon-selected"]]; + [statusBar setImage: [NSImage imageNamed:@"menuicon"]]; + [statusBar setMenu: statusMenu]; + [statusBar setEnabled:YES]; + [statusBar setHighlightMode:YES]; + + // Fix up the masks for all the alt items. + for (int i = 0; i < [statusMenu numberOfItems]; ++i) { + NSMenuItem *itm = [statusMenu itemAtIndex:i]; + if ([itm isAlternate]) { + [itm setKeyEquivalentModifierMask:NSAlternateKeyMask]; + } + } + + [launchBrowserItem setState:([defaults boolForKey:@"browseAtStart"] ? NSOnState : NSOffState)]; + [self updateAddItemButtonState]; + + [self launchServer]; +} + +- (void)stop +{ + NSFileHandle *writer; + writer = [in fileHandleForWriting]; + [writer closeFile]; + [task terminate]; +} + +- (void)launchServer +{ + in = [[NSPipe alloc] init]; + out = [[NSPipe alloc] init]; + task = [[NSTask alloc] init]; + + startTime = time(NULL); + + NSMutableString *launchPath = [NSMutableString string]; + [launchPath appendString:[[NSBundle mainBundle] resourcePath]]; + [task setCurrentDirectoryPath:launchPath]; + + [launchPath appendString:@"/camlistored"]; + + NSDictionary *env = [NSDictionary dictionaryWithObjectsAndKeys: + NSHomeDirectory(), @"HOME", + NSUserName(), @"USER", + nil, nil]; + [task setEnvironment:env]; + + [self logMessage:[NSString stringWithFormat:@"Launching '%@'\n", launchPath]]; + [task setLaunchPath:launchPath]; + [task setArguments:[NSArray arrayWithObjects:@"-openbrowser=false", nil]]; + [task setStandardInput:in]; + [task setStandardOutput:out]; + [task setStandardError:out]; + + NSFileHandle *fh = [out fileHandleForReading]; + NSNotificationCenter *nc; + nc = [NSNotificationCenter defaultCenter]; + + [nc addObserver:self + selector:@selector(dataReady:) + name:NSFileHandleReadCompletionNotification + object:fh]; + + [nc addObserver:self + selector:@selector(taskTerminated:) + name:NSTaskDidTerminateNotification + object:task]; + + [task launch]; + [fh readInBackgroundAndNotify]; + NSLog(@"Launched server task -- pid = %d", task.processIdentifier); +} + +- (void) shutdownEvent { + shutdownWaitEvents--; + NSLog(@"Received a shutdown event. %d to go", shutdownWaitEvents); + if (shutdownWaitEvents == 0) { + NSLog(@"Received last shutdown event. bye"); + [NSApp replyToApplicationShouldTerminate:NSTerminateNow]; + } +} + +- (NSApplicationTerminateReply)applicationShouldTerminate:(NSApplication *)sender { + NSLog(@"Asking if we should terminate..."); + BOOL isRunning = [task isRunning]; + if (isRunning) { + terminatingApp = YES; + [self stopTask]; + shutdownWaitEvents = 1; + if ([fuseManager isMounted]) { + [fuseManager dismount]; + shutdownWaitEvents++; + } + return NSTerminateLater; + } + return NSTerminateNow; +} + +- (void)applicationWillTerminate:(NSNotification *)notification +{ + NSLog(@"Terminating."); +} + +- (void)stopTask +{ + if (taskKiller) { + return; // Already shutting down. + } + NSLog(@"Telling server task to stop..."); + NSFileHandle *writer; + writer = [in fileHandleForWriting]; + [task terminate]; + [writer closeFile]; + taskKiller = [NSTimer scheduledTimerWithTimeInterval:FORCEKILL_INTERVAL + target:self + selector:@selector(killTask) + userInfo:nil + repeats:NO]; +} + +- (void)killTask +{ + NSLog(@"Force terminating task"); + [task terminate]; +} + +- (void)taskTerminated:(NSNotification *)note +{ + int status = [[note object] terminationStatus]; + NSLog(@"Task terminated with status %d", status); + [self cleanup]; + [self logMessage: [NSString stringWithFormat:@"Terminated with status %d\n", + status]]; + + if (terminatingApp) { + // I was just waiting for the task to exit before quitting + [self shutdownEvent]; + } else { + time_t now = time(NULL); + if (now - startTime < MIN_LIFETIME) { + NSInteger b = NSRunAlertPanel(@"Problem Running Camlistore", + @"camlistored doesn't seem to be operating properly. " + @"Check Console logs for more details.", @"Retry", @"Quit", nil); + if (b == NSAlertAlternateReturn) { + [NSApp terminate:self]; + } + } + + // Relaunch the server task... + [NSTimer scheduledTimerWithTimeInterval:1.0 + target:self selector:@selector(launchServer) + userInfo:nil + repeats:NO]; + } +} + +- (void)cleanup +{ + [taskKiller invalidate]; + taskKiller = nil; + + task = nil; + + in = nil; + out = nil; + + [[NSNotificationCenter defaultCenter] removeObserver:self]; +} + +- (void)openUI +{ + NSDictionary *info = [[NSBundle mainBundle] infoDictionary]; + NSString *homePage = [info objectForKey:@"HomePage"]; + NSURL *url=[NSURL URLWithString:homePage]; + [[NSWorkspace sharedWorkspace] openURL:url]; +} + +- (IBAction)browse:(id)sender +{ + [self openUI]; +} + +- (void)appendData:(NSData *)d +{ + NSString *s = [[NSString alloc] initWithData: d + encoding: NSUTF8StringEncoding]; + if (!hasSeenStart) { + if ([s rangeOfString:@"Available on http"].location != NSNotFound) { + NSUserDefaults *defaults = [NSUserDefaults standardUserDefaults]; + if ([defaults boolForKey:@"browseAtStart"]) { + [self openUI]; + } + hasSeenStart = YES; + } + } + + [self logMessage:s]; +} + +- (void)dataReady:(NSNotification *)n +{ + NSData *d; + d = [[n userInfo] valueForKey:NSFileHandleNotificationDataItem]; + if ([d length]) { + [self appendData:d]; + } + if (task) { + [[out fileHandleForReading] readInBackgroundAndNotify]; + } +} + +- (IBAction)setLaunchPref:(id)sender { + NSCellStateValue stateVal = [sender state]; + stateVal = (stateVal == NSOnState) ? NSOffState : NSOnState; + + NSLog(@"Setting launch pref to %s", stateVal == NSOnState ? "on" : "off"); + + [[NSUserDefaults standardUserDefaults] + setBool:(stateVal == NSOnState) + forKey:@"browseAtStart"]; + + [launchBrowserItem setState:([[NSUserDefaults standardUserDefaults] + boolForKey:@"browseAtStart"] ? NSOnState : NSOffState)]; + + [[NSUserDefaults standardUserDefaults] synchronize]; +} + +- (void) updateAddItemButtonState +{ + [launchAtStartupItem setState:[loginItems inLoginItems] ? NSOnState : NSOffState]; +} + +- (IBAction)changeLoginItems:(id)sender +{ + if([sender state] == NSOffState) { + [loginItems addToLoginItems:self]; + } else { + [loginItems removeLoginItem:self]; + } + [self updateAddItemButtonState]; +} + + +- (IBAction)showTechSupport:(id)sender +{ + NSDictionary *info = [[NSBundle mainBundle] infoDictionary]; + NSString *homePage = [info objectForKey:@"SupportPage"]; + NSURL *url=[NSURL URLWithString:homePage]; + [[NSWorkspace sharedWorkspace] openURL:url]; + +} + +- (IBAction)showLogs:(id)sender +{ + if (![[NSWorkspace sharedWorkspace] openFile:logPath]) { + NSRunAlertPanel(@"Cannot Find Logfile", + @"I've been looking for logs in all the wrong places.", nil, nil, nil); + return; + } +} + +- (IBAction)toggleMount:(id)sender { + NSLog(@"Toggling mount"); + if ([fuseManager isMounted]) { + [fuseManager dismount]; + } else { + [fuseManager mount]; + } +} + +- (void) fuseDismounted { + NSLog(@"FUSE dismounted"); + if (terminatingApp) { + [self shutdownEvent]; + } +} + +- (void) fuseMounted { + NSLog(@"FUSE mounted"); +} + +- (IBAction)openFinder:(id)sender +{ + if (![[NSWorkspace sharedWorkspace] openFile:[fuseManager mountPath]]) { + NSRunAlertPanel(@"Cannot Open Finder Window", + @"Can't find mount path or something.", nil, nil, nil); + return; + } +} + +- (IBAction)openFinderAsOf:(id)sender +{ + [NSApp activateIgnoringOtherApps:YES]; + + if (timeTraveler == nil) { + timeTraveler = [[TimeTravelWindowController alloc] + initWithWindowNibName:@"TimeTravelWindowController"]; + [timeTraveler setMountPath:[fuseManager mountPath]]; + } + [timeTraveler showWindow:self]; +} + + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Base.lproj/MainMenu.xib b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Base.lproj/MainMenu.xib new file mode 100644 index 00000000..6189cfc4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Base.lproj/MainMenu.xib @@ -0,0 +1,106 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Camlicon.icns b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Camlicon.icns new file mode 100644 index 00000000..32017dcd Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Camlicon.icns differ diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Camlistore-Info.plist b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Camlistore-Info.plist new file mode 100644 index 00000000..fcd57eaf --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Camlistore-Info.plist @@ -0,0 +1,42 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleExecutable + ${EXECUTABLE_NAME} + CFBundleIconFile + Camlicon.icns + CFBundleIdentifier + org.camlistore.${PRODUCT_NAME:rfc1034identifier} + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + ${PRODUCT_NAME} + CFBundlePackageType + APPL + CFBundleShortVersionString + 1.0 + CFBundleSignature + ???? + CFBundleVersion + 1 + LSApplicationCategoryType + public.app-category.utilities + LSMinimumSystemVersion + ${MACOSX_DEPLOYMENT_TARGET} + NSHumanReadableCopyright + Copyright © 2013 Camlistore. All rights reserved. + NSMainNibFile + MainMenu + SupportPage + http://groups.google.com/group/camlistore + NSPrincipalClass + NSApplication + LSUIElement + + HomePage + http://localhost:3179/ + + diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Camlistore-Prefix.pch b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Camlistore-Prefix.pch new file mode 100644 index 00000000..35d76409 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Camlistore-Prefix.pch @@ -0,0 +1,9 @@ +// +// Prefix header +// +// The contents of this file are implicitly included at the beginning of every source file. +// + +#ifdef __OBJC__ + #import +#endif diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Credits.html b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Credits.html new file mode 100644 index 00000000..b64159e3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Credits.html @@ -0,0 +1,26 @@ + + + Credits + + + + +

    Camlistore is your personal storage system for life.

    + +

    It's a way to store, sync, share, model and back up content.

    + +

    It stands for "Content-Addressable Multi-Layer Indexed Storage", for + lack of a better name. For more, see:

    + + + +
    + +

    + File a bug +

    + + diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/FUSEManager.h b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/FUSEManager.h new file mode 100644 index 00000000..65ffed30 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/FUSEManager.h @@ -0,0 +1,47 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#import + +#define MIN_FUSE_LIFETIME 10 + +@protocol FUSEManagerDelegate +- (void) fuseMounted; +- (void) fuseDismounted; +@end + +@interface FUSEManager : NSObject { +@private + BOOL shouldBeMounted; + BOOL mounted; + NSString *mountPoint; + + time_t startTime; + NSTask *task; + NSPipe *in, *out; + + IBOutlet id delegate; + IBOutlet NSMenuItem *mountMenu; +} + +- (NSString *)mountPath; +- (BOOL) isMounted; +- (void) mount; +- (void) dismount; + + + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/FUSEManager.m b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/FUSEManager.m new file mode 100644 index 00000000..7489236f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/FUSEManager.m @@ -0,0 +1,234 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#import + +#import "FUSEManager.h" + +@implementation FUSEManager + +- (BOOL) isMounted +{ + return mounted; +} + +- (void)justMounted +{ + mounted = YES; + [delegate fuseMounted]; + [mountMenu setState:NSOnState]; +} + +- (void)justUnmounted +{ + mounted = NO; + [delegate fuseDismounted]; + [mountMenu setState:NSOffState]; +} + +- (NSString*) mountPath +{ + NSArray* paths = NSSearchPathForDirectoriesInDomains(NSDesktopDirectory, NSUserDomainMask, YES ); + return [NSString stringWithFormat: @"%@/camlistore", [paths objectAtIndex:0]]; +} + +- (void) mount +{ + shouldBeMounted = YES; + + in = [[NSPipe alloc] init]; + out = [[NSPipe alloc] init]; + task = [[NSTask alloc] init]; + + startTime = time(NULL); + + NSMutableString *launchPath = [NSMutableString string]; + [launchPath appendString:[[NSBundle mainBundle] resourcePath]]; + [task setCurrentDirectoryPath:launchPath]; + + [launchPath appendString:@"/cammount"]; + + NSString *mountDir = [self mountPath]; + [[NSFileManager defaultManager] createDirectoryAtPath:mountDir + withIntermediateDirectories:YES + attributes:nil + error:nil]; + + NSDictionary *env = [NSDictionary dictionaryWithObjectsAndKeys: + NSHomeDirectory(), @"HOME", + NSUserName(), @"USER", + @"/bin:/usr/bin:/sbin:/usr/sbin", @"PATH", + nil, nil]; + [task setEnvironment:env]; + + NSLog(@"Launching '%@'\n", launchPath); + [task setLaunchPath:launchPath]; + [task setArguments:[NSArray arrayWithObjects:@"-open", [self mountPath], nil]]; + [task setStandardInput:in]; + [task setStandardOutput:out]; + [task setStandardError:out]; + + NSFileHandle *fh = [out fileHandleForReading]; + NSNotificationCenter *nc; + nc = [NSNotificationCenter defaultCenter]; + + [nc addObserver:self + selector:@selector(dataReady:) + name:NSFileHandleReadCompletionNotification + object:fh]; + + [nc addObserver:self + selector:@selector(taskTerminated:) + name:NSTaskDidTerminateNotification + object:task]; + + [task launch]; + [fh readInBackgroundAndNotify]; + NSLog(@"Launched server task -- pid = %d", task.processIdentifier); + + [self justMounted]; +} + +- (void)dataReady:(NSNotification *)n +{ + NSData *d; + d = [[n userInfo] valueForKey:NSFileHandleNotificationDataItem]; + if ([d length]) { + NSString *s = [[NSString alloc] initWithData: d + encoding: NSUTF8StringEncoding]; + NSLog(@"%@", s); + } + if (task) { + [[out fileHandleForReading] readInBackgroundAndNotify]; + } +} + +- (void)cleanup +{ + task = nil; + + in = nil; + out = nil; + + [[NSNotificationCenter defaultCenter] removeObserver:self]; +} + +- (BOOL)hasFUSE +{ + return [[NSFileManager defaultManager] fileExistsAtPath:@"/Library/Filesystems/osxfusefs.fs"]; +} + +- (BOOL)hasClientConfig +{ + NSString *confFile = [NSString stringWithFormat:@"%@/.config/camlistore/client-config.json", NSHomeDirectory()]; + return [[NSFileManager defaultManager] fileExistsAtPath:confFile]; +} + +- (void)createClientConfig +{ + NSTask *put = [[NSTask alloc] init]; + + NSMutableString *launchPath = [NSMutableString string]; + [launchPath appendString:[[NSBundle mainBundle] resourcePath]]; + [put setCurrentDirectoryPath:launchPath]; + [launchPath appendString:@"/camput"]; + NSDictionary *env = [NSDictionary dictionaryWithObjectsAndKeys: + NSHomeDirectory(), @"HOME", + NSUserName(), @"USER", + @"/bin:/usr/bin:/sbin:/usr/sbin", @"PATH", + nil, nil]; + [put setEnvironment:env]; + [put setLaunchPath:launchPath]; + [put setArguments:[NSArray arrayWithObjects:@"init", nil]]; + [put launch]; + [put waitUntilExit]; +} + +// If YES is returned, try to remount, otherwise stop +- (BOOL)resolveMountProblemAndRemount +{ + time_t now = time(NULL); + if (now - startTime < MIN_FUSE_LIFETIME) { + // See if we can guide the user to a solution + if (![self hasFUSE]) { + NSRunAlertPanel(@"Problem Mounting Camlistore FUSE", + @"You don't seem to have osxfuse installed. " + @"Please go here, install, and try again:\n\n" + @"http://osxfuse.github.io/", @"OK", nil, nil); + return NO; + } else if (![self hasClientConfig]) { + NSInteger b = NSRunAlertPanel(@"Problem Mounting Camlistore FUSE", + @"You don't have a camlistore client config. " + @"Would you like me to make you one?", + @"Make Client Config", @"Don't Mount", nil); + if (b == NSAlertDefaultReturn) { + [self createClientConfig]; + } else { + return NO; + } + } else { + NSInteger b = NSRunAlertPanel(@"Problem Mounting Camlistore FUSE", + @"I'm having trouble mounting the FUSE filesystem. " + @"Check Console logs for more details.", + @"Retry", @"Don't Mount", nil); + return b == NSAlertDefaultReturn; + } + + } + return YES; +} + +- (void)taskTerminated:(NSNotification *)note +{ + int status = [[note object] terminationStatus]; + NSLog(@"Task terminated with status %d", status); + [self cleanup]; + [self justUnmounted]; + NSLog(@"Terminated with status %d\n", status); + + if (shouldBeMounted) { + // Relaunch the server task... + if ([self resolveMountProblemAndRemount]) { + NSLog(@"Remounting"); + [NSTimer scheduledTimerWithTimeInterval:1.0 + target:self selector:@selector(mount) + userInfo:nil + repeats:NO]; + } else { + NSLog(@"Should no longer be mounted"); + shouldBeMounted = NO; + } + } + if (!shouldBeMounted) { + [[NSWorkspace sharedWorkspace] performFileOperation:NSWorkspaceRecycleOperation + source:[[self mountPath] stringByDeletingLastPathComponent] + destination:@"" + files:[NSArray arrayWithObject:[[self mountPath] lastPathComponent]] + tag:nil]; + } +} + +- (void) dismount +{ + NSLog(@"Unmounting"); + shouldBeMounted = NO; + NSFileHandle *writer; + writer = [in fileHandleForWriting]; + [writer writeData:[@"q\n" dataUsingEncoding:NSASCIIStringEncoding]]; + [writer closeFile]; +} + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Images.xcassets/AppIcon.appiconset/Contents.json b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Images.xcassets/AppIcon.appiconset/Contents.json new file mode 100644 index 00000000..2db2b1c7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/Images.xcassets/AppIcon.appiconset/Contents.json @@ -0,0 +1,58 @@ +{ + "images" : [ + { + "idiom" : "mac", + "size" : "16x16", + "scale" : "1x" + }, + { + "idiom" : "mac", + "size" : "16x16", + "scale" : "2x" + }, + { + "idiom" : "mac", + "size" : "32x32", + "scale" : "1x" + }, + { + "idiom" : "mac", + "size" : "32x32", + "scale" : "2x" + }, + { + "idiom" : "mac", + "size" : "128x128", + "scale" : "1x" + }, + { + "idiom" : "mac", + "size" : "128x128", + "scale" : "2x" + }, + { + "idiom" : "mac", + "size" : "256x256", + "scale" : "1x" + }, + { + "idiom" : "mac", + "size" : "256x256", + "scale" : "2x" + }, + { + "idiom" : "mac", + "size" : "512x512", + "scale" : "1x" + }, + { + "idiom" : "mac", + "size" : "512x512", + "scale" : "2x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/LoginItemManager.h b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/LoginItemManager.h new file mode 100644 index 00000000..3a60293f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/LoginItemManager.h @@ -0,0 +1,31 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#import + + +@interface LoginItemManager : NSObject { +@private + +} + +- (BOOL)loginItemExistsWithLoginItemReference:(LSSharedFileListRef)theLoginItemsRefs forPath:(CFURLRef)thePath; + +- (BOOL)inLoginItems; +- (void)removeLoginItem:(id)sender; +- (void)addToLoginItems:(id)sender; + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/LoginItemManager.m b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/LoginItemManager.m new file mode 100644 index 00000000..0b62fd69 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/LoginItemManager.m @@ -0,0 +1,89 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#import "LoginItemManager.h" + + +@implementation LoginItemManager + +- (id)init +{ + self = [super init]; + if (self) { + // Initialization code here. + } + + return self; +} + +- (BOOL)loginItemExistsWithLoginItemReference:(LSSharedFileListRef)theLoginItemsRefs forPath:(CFURLRef)thePath { + BOOL exists = NO; + + return exists; +} + +- (BOOL) inLoginItems { + BOOL exists = NO; + UInt32 seedValue; + + LSSharedFileListRef theLoginItemsRefs = LSSharedFileListCreate(NULL, kLSSharedFileListSessionLoginItems, NULL); + CFURLRef thePath = (CFURLRef)CFBridgingRetain([[NSBundle mainBundle] bundlePath]); + + // We're going to grab the contents of the shared file list (LSSharedFileListItemRef objects) + // and pop it in an array so we can iterate through it to find our item. + NSArray *loginItemsArray = (NSArray *)CFBridgingRelease(LSSharedFileListCopySnapshot(theLoginItemsRefs, &seedValue)); + for (id item in loginItemsArray) { + LSSharedFileListItemRef itemRef = (LSSharedFileListItemRef)CFBridgingRetain(item); + if (LSSharedFileListItemResolve(itemRef, 0, (CFURLRef*) &thePath, NULL) == noErr) { + if ([[(NSURL *)CFBridgingRelease(thePath) path] hasPrefix:[[NSBundle mainBundle] bundlePath]]) + exists = YES; + } + } + return exists; +} + +- (void) removeLoginItem:(id)sender { + UInt32 seedValue; + + LSSharedFileListRef theLoginItemsRefs = LSSharedFileListCreate(NULL, kLSSharedFileListSessionLoginItems, NULL); + CFURLRef thePath = (CFURLRef)CFBridgingRetain([[NSBundle mainBundle] bundlePath]); + + // We're going to grab the contents of the shared file list (LSSharedFileListItemRef objects) + // and pop it in an array so we can iterate through it to find our item. + NSArray *loginItemsArray = (NSArray *)CFBridgingRelease(LSSharedFileListCopySnapshot(theLoginItemsRefs, &seedValue)); + for (id item in loginItemsArray) { + LSSharedFileListItemRef itemRef = (LSSharedFileListItemRef)CFBridgingRetain(item); + if (LSSharedFileListItemResolve(itemRef, 0, (CFURLRef*) &thePath, NULL) == noErr) { + if ([[(NSURL *)CFBridgingRelease(thePath) path] hasPrefix:[[NSBundle mainBundle] bundlePath]]) { + LSSharedFileListItemRemove(theLoginItemsRefs, itemRef); + } + } + } +} + +- (void)addToLoginItems:(id)sender { + [self removeLoginItem: self]; + + LSSharedFileListRef theLoginItemsRefs = LSSharedFileListCreate(NULL, kLSSharedFileListSessionLoginItems, NULL); + + // CFURLRef to the insertable item. + CFURLRef url = (CFURLRef)CFBridgingRetain([NSURL fileURLWithPath:[[NSBundle mainBundle] bundlePath]]); + + // Actual insertion of an item. + LSSharedFileListInsertItemURL(theLoginItemsRefs, kLSSharedFileListItemLast, NULL, NULL, url, NULL, NULL); +} + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/TimeTravelWindowController.h b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/TimeTravelWindowController.h new file mode 100644 index 00000000..31bf800c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/TimeTravelWindowController.h @@ -0,0 +1,28 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#import + +@interface TimeTravelWindowController : NSWindowController { + IBOutlet NSDate *when; + NSString *mountPath; +} + +- (void)setMountPath:(NSString*)to; + +- (IBAction)openFinder:(id)sender; + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/TimeTravelWindowController.m b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/TimeTravelWindowController.m new file mode 100644 index 00000000..204a5dc8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/TimeTravelWindowController.m @@ -0,0 +1,61 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#import "TimeTravelWindowController.h" + +@implementation TimeTravelWindowController + +- (void)setMountPath:(NSString*)to +{ + mountPath = to; +} + +- (id)initWithWindow:(NSWindow *)window +{ + self = [super initWithWindow:window]; + if (self) { + // Initialization code here. + } + return self; +} + +- (void)windowDidLoad +{ + [super windowDidLoad]; +} + +- (void)loadWindow { + when = [NSDate date]; + [super loadWindow]; +} + +- (IBAction)openFinder:(id)sender +{ + NSDateFormatter *formatter = [[NSDateFormatter alloc] init]; + [formatter setTimeZone:[NSTimeZone timeZoneForSecondsFromGMT:0]]; + [formatter setDateFormat:@"yyyy-MM-dd'T'HH:mm:ss'Z'"]; + [[self window] orderOut:self]; + + if (![[NSWorkspace sharedWorkspace] openFile:[NSString stringWithFormat:@"%@/at/%@", + mountPath,[formatter stringFromDate:when]]]) { + NSRunAlertPanel(@"Cannot Open Finder Window", + [NSString stringWithFormat:@"Can't open path for %@.", [formatter stringFromDate:when]], + nil, nil, nil); + return; + } +} + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/TimeTravelWindowController.xib b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/TimeTravelWindowController.xib new file mode 100644 index 00000000..df6b78cf --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/TimeTravelWindowController.xib @@ -0,0 +1,122 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +VFppZgAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAAAAAAC5AAAABAAAABCepkign7sVkKCGKqChmveQ +y4kaoNIj9HDSYSYQ1v50INiArZDa/tGg28CQENzes6DdqayQ3r6VoN+JjpDgnneg4WlwkOJ+WaDjSVKQ +5F47oOUpNJDmR1gg5xJREOgnOiDo8jMQ6gccIOrSFRDr5v4g7LH3EO3G4CDukdkQ76/8oPBxuxDxj96g +8n/BkPNvwKD0X6OQ9U+ioPY/hZD3L4Sg+CiiEPkPZqD6CIQQ+viDIPvoZhD82GUg/chIEP64RyD/qCoQ +AJgpIAGIDBACeAsgA3EokARhJ6AFUQqQBkEJoAcw7JAHjUOgCRDOkAmtvyAK8LCQC+CvoAzZzRANwJGg +DrmvEA+priAQmZEQEYmQIBJ5cxATaXIgFFlVEBVJVCAWOTcQFyk2IBgiU5AZCRggGgI1kBryNKAb4heQ +HNIWoB3B+ZAesfigH6HbkCB2KyAhgb2QIlYNICNq2hAkNe8gJUq8ECYV0SAnKp4QJ/7toCkKgBAp3s+g +KupiECu+saAs036QLZ6ToC6zYJAvfnWgMJNCkDFnkiAycySQM0d0IDRTBpA1J1YgNjLokDcHOCA4HAUQ +OOcaIDn75xA6xvwgO9vJEDywGKA9u6sQPo/6oD+bjRBAb9ygQYSpkEJPvqBDZIuQRC+goEVEbZBF89Mg +Ry2KEEfTtSBJDWwQSbOXIErtThBLnLOgTNZqkE18laBOtkyQT1x3oFCWLpBRPFmgUnYQkFMcO6BUVfKQ +VPwdoFY11JBW5TogWB7xEFjFHCBZ/tMQWqT+IFvetRBchOAgXb6XEF5kwiBfnnkQYE3eoGGHlZBiLcCg +Y2d3kGQNoqBlR1mQZe2EoGcnO5BnzWagaQcdkGmtSKBq5v+Qa5ZlIGzQHBBtdkcgbq/+EG9WKSBwj+AQ +cTYLIHJvwhBzFe0gdE+kEHT/CaB2OMCQdt7roHgYopB4vs2gefiEkHqer6B72GaQfH6RoH24SJB+XnOg +f5gqkAABAAECAwEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEA +AQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEA +AQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEA +AQABAAEAAQAB//+dkAEA//+PgAAE//+dkAEI//+dkAEMUERUAFBTVABQV1QAUFBUAAAAAAEAAAABA + + + + + + + + + + + + + + + + + + + + + +VFppZgAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAAAAAAC5AAAABAAAABCepkign7sVkKCGKqChmveQ +y4kaoNIj9HDSYSYQ1v50INiArZDa/tGg28CQENzes6DdqayQ3r6VoN+JjpDgnneg4WlwkOJ+WaDjSVKQ +5F47oOUpNJDmR1gg5xJREOgnOiDo8jMQ6gccIOrSFRDr5v4g7LH3EO3G4CDukdkQ76/8oPBxuxDxj96g +8n/BkPNvwKD0X6OQ9U+ioPY/hZD3L4Sg+CiiEPkPZqD6CIQQ+viDIPvoZhD82GUg/chIEP64RyD/qCoQ +AJgpIAGIDBACeAsgA3EokARhJ6AFUQqQBkEJoAcw7JAHjUOgCRDOkAmtvyAK8LCQC+CvoAzZzRANwJGg +DrmvEA+priAQmZEQEYmQIBJ5cxATaXIgFFlVEBVJVCAWOTcQFyk2IBgiU5AZCRggGgI1kBryNKAb4heQ +HNIWoB3B+ZAesfigH6HbkCB2KyAhgb2QIlYNICNq2hAkNe8gJUq8ECYV0SAnKp4QJ/7toCkKgBAp3s+g +KupiECu+saAs036QLZ6ToC6zYJAvfnWgMJNCkDFnkiAycySQM0d0IDRTBpA1J1YgNjLokDcHOCA4HAUQ +OOcaIDn75xA6xvwgO9vJEDywGKA9u6sQPo/6oD+bjRBAb9ygQYSpkEJPvqBDZIuQRC+goEVEbZBF89Mg +Ry2KEEfTtSBJDWwQSbOXIErtThBLnLOgTNZqkE18laBOtkyQT1x3oFCWLpBRPFmgUnYQkFMcO6BUVfKQ +VPwdoFY11JBW5TogWB7xEFjFHCBZ/tMQWqT+IFvetRBchOAgXb6XEF5kwiBfnnkQYE3eoGGHlZBiLcCg +Y2d3kGQNoqBlR1mQZe2EoGcnO5BnzWagaQcdkGmtSKBq5v+Qa5ZlIGzQHBBtdkcgbq/+EG9WKSBwj+AQ +cTYLIHJvwhBzFe0gdE+kEHT/CaB2OMCQdt7roHgYopB4vs2gefiEkHqer6B72GaQfH6RoH24SJB+XnOg +f5gqkAABAAECAwEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEA +AQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEA +AQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEA +AQABAAEAAQAB//+dkAEA//+PgAAE//+dkAEI//+dkAEMUERUAFBTVABQV1QAUFBUAAAAAAEAAAABA + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/en.lproj/InfoPlist.strings b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/en.lproj/InfoPlist.strings new file mode 100644 index 00000000..b92732c7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/en.lproj/InfoPlist.strings @@ -0,0 +1 @@ +/* Localized versions of Info.plist keys */ diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/main.m b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/main.m new file mode 100644 index 00000000..28545437 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/main.m @@ -0,0 +1,22 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#import + +int main(int argc, const char * argv[]) +{ + return NSApplicationMain(argc, argv); +} diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/make-dmg.sh b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/make-dmg.sh new file mode 100644 index 00000000..9048e3cc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/make-dmg.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +# make-dmg.sh +# Created by Dustin Sallings on 2014/1/18. +# Copyright (c) 2014 Camlistore. All rights reserved. + +set -ex + +dir="$TARGET_TEMP_DIR/disk" +dmg="$BUILT_PRODUCTS_DIR/$PROJECT_NAME.dmg" + +rm -rf "$dir" +mkdir -p "$dir" +cp -R "$BUILT_PRODUCTS_DIR/$PROJECT_NAME.app" "$dir" +cp -R "$PROJECT_DIR/../../../README" "$dir/README.txt" +cp -R "$PROJECT_DIR/../../../COPYING" "$dir/LICENSE.txt" +ln -s "/Applications" "$dir/Applications" +rm -f "$dmg" +hdiutil create -srcfolder "$dir" -volname "$PROJECT_NAME" "$dmg" +rm -rf "$dir" diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/menuicon-selected.png b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/menuicon-selected.png new file mode 100644 index 00000000..ee3afbb4 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/menuicon-selected.png differ diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/menuicon-selected@2x.png b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/menuicon-selected@2x.png new file mode 100644 index 00000000..642096d2 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/menuicon-selected@2x.png differ diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/menuicon.png b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/menuicon.png new file mode 100644 index 00000000..cf5d6ea6 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/menuicon.png differ diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/menuicon@2x.png b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/menuicon@2x.png new file mode 100644 index 00000000..545a4a7f Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/Camlistore/menuicon@2x.png differ diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/CamlistoreTests/CamlistoreTests-Info.plist b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/CamlistoreTests/CamlistoreTests-Info.plist new file mode 100644 index 00000000..a76147ab --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/CamlistoreTests/CamlistoreTests-Info.plist @@ -0,0 +1,22 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleExecutable + ${EXECUTABLE_NAME} + CFBundleIdentifier + org.camlistore.${PRODUCT_NAME:rfc1034identifier} + CFBundleInfoDictionaryVersion + 6.0 + CFBundlePackageType + BNDL + CFBundleShortVersionString + 1.0 + CFBundleSignature + ???? + CFBundleVersion + 1 + + diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/CamlistoreTests/CamlistoreTests.m b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/CamlistoreTests/CamlistoreTests.m new file mode 100644 index 00000000..d439db03 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/CamlistoreTests/CamlistoreTests.m @@ -0,0 +1,34 @@ +// +// CamlistoreTests.m +// CamlistoreTests +// +// Created by Dustin Sallings on 12/19/13. +// Copyright (c) 2013 Camlistore. All rights reserved. +// + +#import + +@interface CamlistoreTests : XCTestCase + +@end + +@implementation CamlistoreTests + +- (void)setUp +{ + [super setUp]; + // Put setup code here. This method is called before the invocation of each test method in the class. +} + +- (void)tearDown +{ + // Put teardown code here. This method is called after the invocation of each test method in the class. + [super tearDown]; +} + +- (void)testExample +{ + XCTFail(@"No implementation for \"%s\"", __PRETTY_FUNCTION__); +} + +@end diff --git a/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/CamlistoreTests/en.lproj/InfoPlist.strings b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/CamlistoreTests/en.lproj/InfoPlist.strings new file mode 100644 index 00000000..b92732c7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/osx/Camlistore/CamlistoreTests/en.lproj/InfoPlist.strings @@ -0,0 +1 @@ +/* Localized versions of Info.plist keys */ diff --git a/vendor/github.com/camlistore/camlistore/clients/python/camliclient.py b/vendor/github.com/camlistore/camlistore/clients/python/camliclient.py new file mode 100755 index 00000000..1cf15523 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/clients/python/camliclient.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python +# +# Camlistore uploader client for Python. +# +# Copyright 2010 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Command-line example client for Camlistore.""" + +__author__ = 'Brett Slatkin (bslatkin@gmail.com)' + +import logging +import optparse +import os +import re +import sys + +try: + import camli.op +except ImportError: + sys.path.insert(0, '../../lib/python') + import camli.op + + +def upload_files(op, path_list): + """Uploads a list of files. + + Args: + op: The CamliOp to use. + path_list: The list of file paths to upload. + + Returns: + Exit code. + """ + real_path_set = set([os.path.abspath(path) for path in path_list]) + all_blob_files = [open(path, 'rb') for path in real_path_set] + logging.debug('Uploading blob paths: %r', real_path_set) + op.put_blobs(all_blob_files) + return 0 + + +def upload_dir(op, root_path, recursive=True, ignore_patterns=[r'^\..*']): + """Uploads a directory of files recursively. + + Args: + op: The CamliOp to use. + root_path: The path of the directory to upload. + recursively: If the whole directory and its children should be uploaded. + ignore_patterns: Set of ignore regex expressions. + + Returns: + Exit code. + """ + def should_ignore(dirname): + for pattern in ignore_patterns: + if re.match(pattern, dirname): + return True + return False + + def error(e): + raise e + + all_blob_paths = [] + for dirpath, dirnames, filenames in os.walk(root_path, onerror=error): + allowed_dirnames = [] + for name in dirnames: + if not should_ignore(name): + allowed_dirnames.append(name) + for i in xrange(len(dirnames)): + dirnames.pop(0) + if recursive: + dirnames.extend(allowed_dirnames) + + all_blob_paths.extend(os.path.join(dirpath, name) for name in filenames) + + logging.debug('Uploading dir=%r', root_path) + upload_files(op, all_blob_paths) + return 0 + + +def download_files(op, blobref_list, target_dir): + """Downloads blobs to a target directory. + + Args: + op: The CamliOp to use. + blobref_list: The list of blobrefs to download. + target_dir: The directory to save the downloaded blobrefs in. + + Returns: + Exit code. 1 if there were any missing blobrefs. + """ + all_blobs = set(blobref_list) + found_blobs = set() + + def start_out(blobref): + blob_path = os.path.join(target_dir, blobref) + return open(blob_path, 'wb') + + def end_out(blobref, blob_file): + found_blobs.add(blobref) + blob_file.close() + + op.get_blobs(blobref_list, start_out=start_out, end_out=end_out) + missing_blobs = all_blobs - found_blobs + if missing_blobs: + print >>sys.stderr, 'Missing blobrefs: %s' % ', '.join(missing_blobs) + return 1 + else: + return 0 + + +def main(argv): + usage = \ +"""usage: %prog [options] [command] + +Commands: + put ... [filepathN] + \t\t\tupload a set of specific files + putdir + \t\t\tput all blobs present in a directory recursively + get ... [blobrefN] + \t\t\tget and save blobs to a directory, named as their blobrefs; + \t\t\t(!) files already present will be overwritten""" + parser = optparse.OptionParser(usage=usage) + parser.add_option('-a', '--auth', dest='auth', + default='', + help='username:pasword for HTTP basic authentication') + parser.add_option('-s', '--server', dest='server', + default='localhost:3179', + help='hostname:port to connect to') + parser.add_option('-d', '--debug', dest='debug', + action='store_true', + help='print debug logging') + parser.add_option('-i', '--ignore_patterns', dest="ignore_patterns", + default="", + help='regexp patterns to ignore') + + def error_and_exit(message): + print >>sys.stderr, message, '\n' + parser.print_help() + sys.exit(2) + + opts, args = parser.parse_args(argv[1:]) + if not args: + parser.print_help() + sys.exit(2) + + if opts.debug: + logging.getLogger().setLevel(logging.DEBUG) + + op = camli.op.CamliOp(opts.server, auth=opts.auth, basepath="/bs") + command = args[0].lower() + + if command == 'putdir': + if len(args) < 2: + error_and_exit('Must supply at least a directory to put') + return upload_dir(op, args[1], opts.ignore_patterns) + elif command == 'put': + if len(args) < 2: + error_and_exit('Must supply one or more file paths to upload') + return upload_files(op, args[1:]) + elif command == 'get': + if len(args) < 3: + error_and_exit('Must supply one or more blobrefs to download ' + 'and a directory to save them to') + return download_files(op, args[1:-1], args[-1]) + else: + error_and_exit('Unknown command: %s' % command) + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/vendor/github.com/camlistore/camlistore/cmd/camdeploy/camdeploy.go b/vendor/github.com/camlistore/camlistore/cmd/camdeploy/camdeploy.go new file mode 100644 index 00000000..b1636a44 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camdeploy/camdeploy.go @@ -0,0 +1,27 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// The camdeploy program deploys Camlistore on cloud computing platforms such as Google +// Compute Engine or Amazon EC2. +package main + +import ( + "camlistore.org/pkg/cmdmain" +) + +func main() { + cmdmain.Main() +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camdeploy/gce.go b/vendor/github.com/camlistore/camlistore/cmd/camdeploy/gce.go new file mode 100644 index 00000000..690220b0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camdeploy/gce.go @@ -0,0 +1,155 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bufio" + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/context" + "camlistore.org/pkg/deploy/gce" + "camlistore.org/pkg/oauthutil" + + "golang.org/x/oauth2" +) + +type gceCmd struct { + project string + zone string + machine string + instName string + hostname string + certFile string + keyFile string + sshPub string + verbose bool +} + +func init() { + cmdmain.RegisterCommand("gce", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(gceCmd) + flags.StringVar(&cmd.project, "project", "", "Name of Project.") + flags.StringVar(&cmd.zone, "zone", gce.Zone, "GCE zone.") + flags.StringVar(&cmd.machine, "machine", gce.Machine, "e.g. n1-standard-1, f1-micro, g1-small") + flags.StringVar(&cmd.instName, "instance_name", gce.InstanceName, "Name of VM instance.") + flags.StringVar(&cmd.hostname, "hostname", "", "Hostname for the instance and self-signed certificates. Must be given if generating self-signed certs.") + flags.StringVar(&cmd.certFile, "cert", "", "Certificate file for TLS. A self-signed one will be generated if this flag is omitted.") + flags.StringVar(&cmd.keyFile, "key", "", "Key file for the TLS certificate. Must be given with --cert") + flags.StringVar(&cmd.sshPub, "ssh_public_key", "", "SSH public key file to authorize. Can modify later in Google's web UI anyway.") + flags.BoolVar(&cmd.verbose, "verbose", false, "Be verbose.") + return cmd + }) +} + +const ( + clientIdDat = "client-id.dat" + clientSecretDat = "client-secret.dat" + helpEnableAuth = `Enable authentication: in your project console, navigate to "APIs and auth", "Credentials", click on "Create new Client ID", and pick "Installed application", with type "Other". Copy the CLIENT ID to ` + clientIdDat + `, and the CLIENT SECRET to ` + clientSecretDat +) + +func (c *gceCmd) Describe() string { + return "Deploy Camlistore on Google Compute Engine." +} + +func (c *gceCmd) Usage() { + fmt.Fprintf(os.Stderr, "Usage:\n\n %s\n %s\n\n", + "camdeploy gce --project= --hostname= [options]", + "camdeploy gce --project= --cert= --key= [options]") + flag.PrintDefaults() + fmt.Fprintln(os.Stderr, "\nTo get started:\n") + printHelp() +} + +func printHelp() { + for _, v := range []string{gce.HelpCreateProject, helpEnableAuth, gce.HelpEnableAPIs} { + fmt.Fprintf(os.Stderr, "%v\n", v) + } +} + +func (c *gceCmd) RunCommand(args []string) error { + if c.verbose { + gce.Verbose = true + } + if c.project == "" { + return cmdmain.UsageError("Missing --project flag.") + } + if (c.certFile == "") != (c.keyFile == "") { + return cmdmain.UsageError("--cert and --key must both be given together.") + } + if c.certFile == "" && c.hostname == "" { + return cmdmain.UsageError("Either --hostname, or --cert & --key must provided.") + } + config := gce.NewOAuthConfig(readFile(clientIdDat), readFile(clientSecretDat)) + config.RedirectURL = "urn:ietf:wg:oauth:2.0:oob" + + instConf := &gce.InstanceConf{ + Name: c.instName, + Project: c.project, + Machine: c.machine, + Zone: c.zone, + CertFile: c.certFile, + KeyFile: c.keyFile, + Hostname: c.hostname, + } + if c.sshPub != "" { + instConf.SSHPub = strings.TrimSpace(readFile(c.sshPub)) + } + + depl := &gce.Deployer{ + Client: oauth2.NewClient(oauth2.NoContext, oauth2.ReuseTokenSource(nil, &oauthutil.TokenSource{ + Config: config, + CacheFile: c.project + "-token.json", + AuthCode: func() string { + fmt.Println("Get auth code from:") + fmt.Printf("%v\n", config.AuthCodeURL("my-state", oauth2.AccessTypeOffline, oauth2.ApprovalForce)) + fmt.Println("Enter auth code:") + sc := bufio.NewScanner(os.Stdin) + sc.Scan() + return strings.TrimSpace(sc.Text()) + }, + })), + Conf: instConf, + } + inst, err := depl.Create(context.TODO()) + if err != nil { + return err + } + + log.Printf("Instance is up at %s", inst.NetworkInterfaces[0].AccessConfigs[0].NatIP) + return nil +} + +func readFile(v string) string { + slurp, err := ioutil.ReadFile(v) + if err != nil { + if os.IsNotExist(err) { + msg := fmt.Sprintf("%v does not exist.", v) + if v == clientIdDat || v == clientSecretDat { + msg = fmt.Sprintf("%v\n%s", msg, helpEnableAuth) + } + log.Fatal(msg) + } + log.Fatalf("Error reading %s: %v", v, err) + } + return strings.TrimSpace(string(slurp)) +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camget/.gitignore b/vendor/github.com/camlistore/camlistore/cmd/camget/.gitignore new file mode 100644 index 00000000..c464bd61 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camget/.gitignore @@ -0,0 +1,2 @@ +*.[568] +camget diff --git a/vendor/github.com/camlistore/camlistore/cmd/camget/camget.go b/vendor/github.com/camlistore/camlistore/cmd/camget/camget.go new file mode 100644 index 00000000..36c33ce2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camget/camget.go @@ -0,0 +1,418 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "errors" + "flag" + "fmt" + "io" + "log" + "net/http" + "os" + "path/filepath" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/buildinfo" + "camlistore.org/pkg/cacher" + "camlistore.org/pkg/client" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/index" + "camlistore.org/pkg/legal/legalprint" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/types" +) + +var ( + flagVersion = flag.Bool("version", false, "show version") + flagVerbose = flag.Bool("verbose", false, "be verbose") + flagHTTP = flag.Bool("verbose_http", false, "show HTTP request summaries") + flagCheck = flag.Bool("check", false, "just check for the existence of listed blobs; returning 0 if all are present") + flagOutput = flag.String("o", "-", "Output file/directory to create. Use -f to overwrite.") + flagGraph = flag.Bool("graph", false, "Output a graphviz directed graph .dot file of the provided root schema blob, to be rendered with 'dot -Tsvg -o graph.svg graph.dot'") + flagContents = flag.Bool("contents", false, "If true and the target blobref is a 'bytes' or 'file' schema blob, the contents of that file are output instead.") + flagShared = flag.String("shared", "", "If non-empty, the URL of a \"share\" blob. The URL will be used as the root of future fetches. Only \"haveref\" shares are currently supported.") + flagTrustedCert = flag.String("cert", "", "If non-empty, the fingerprint (20 digits lowercase prefix of the SHA256 of the complete certificate) of the TLS certificate we trust for the share URL. Requires --shared.") + flagInsecureTLS = flag.Bool("insecure", false, "If set, when using TLS, the server's certificates verification is disabled, and they are not checked against the trustedCerts in the client configuration either.") + flagSkipIrregular = flag.Bool("skip_irregular", false, "If true, symlinks, device files, and other special file types are skipped.") +) + +func main() { + client.AddFlags() + flag.Parse() + + if *flagVersion { + fmt.Fprintf(os.Stderr, "camget version: %s\n", buildinfo.Version()) + return + } + + if legalprint.MaybePrint(os.Stderr) { + return + } + + if *flagGraph && flag.NArg() != 1 { + log.Fatalf("The --graph option requires exactly one parameter.") + } + + var cl *client.Client + var items []blob.Ref + + if *flagShared != "" { + if client.ExplicitServer() != "" { + log.Fatal("Can't use --shared with an explicit blobserver; blobserver is implicit from the --shared URL.") + } + if flag.NArg() != 0 { + log.Fatal("No arguments permitted when using --shared") + } + cl1, target, err := client.NewFromShareRoot(*flagShared, + client.OptionInsecure(*flagInsecureTLS), + client.OptionTrustedCert(*flagTrustedCert)) + if err != nil { + log.Fatal(err) + } + cl = cl1 + items = append(items, target) + } else { + if *flagTrustedCert != "" { + log.Fatal("Can't use --cert without --shared.") + } + cl = client.NewOrFail() + for n := 0; n < flag.NArg(); n++ { + arg := flag.Arg(n) + br, ok := blob.Parse(arg) + if !ok { + log.Fatalf("Failed to parse argument %q as a blobref.", arg) + } + items = append(items, br) + } + } + + cl.InsecureTLS = *flagInsecureTLS + tr := cl.TransportForConfig(&client.TransportConfig{ + Verbose: *flagHTTP, + }) + httpStats, _ := tr.(*httputil.StatsTransport) + cl.SetHTTPClient(&http.Client{Transport: tr}) + + diskCacheFetcher, err := cacher.NewDiskCache(cl) + if err != nil { + log.Fatalf("Error setting up local disk cache: %v", err) + } + defer diskCacheFetcher.Clean() + if *flagVerbose { + log.Printf("Using temp blob cache directory %s", diskCacheFetcher.Root) + } + + for _, br := range items { + if *flagGraph { + printGraph(diskCacheFetcher, br) + return + } + if *flagCheck { + // TODO: do HEAD requests checking if the blobs exists. + log.Fatal("not implemented") + return + } + if *flagOutput == "-" { + var rc io.ReadCloser + var err error + if *flagContents { + rc, err = schema.NewFileReader(diskCacheFetcher, br) + if err == nil { + rc.(*schema.FileReader).LoadAllChunks() + } + } else { + rc, err = fetch(diskCacheFetcher, br) + } + if err != nil { + log.Fatal(err) + } + defer rc.Close() + if _, err := io.Copy(os.Stdout, rc); err != nil { + log.Fatalf("Failed reading %q: %v", br, err) + } + } else { + if err := smartFetch(diskCacheFetcher, *flagOutput, br); err != nil { + log.Fatal(err) + } + } + } + + if *flagVerbose { + log.Printf("HTTP requests: %d\n", httpStats.Requests()) + } +} + +func fetch(src blob.Fetcher, br blob.Ref) (r io.ReadCloser, err error) { + if *flagVerbose { + log.Printf("Fetching %s", br.String()) + } + r, _, err = src.Fetch(br) + if err != nil { + return nil, fmt.Errorf("Failed to fetch %s: %s", br, err) + } + return r, err +} + +// A little less than the sniffer will take, so we don't truncate. +const sniffSize = 900 * 1024 + +// smartFetch the things that blobs point to, not just blobs. +func smartFetch(src blob.Fetcher, targ string, br blob.Ref) error { + rc, err := fetch(src, br) + if err != nil { + return err + } + rcc := types.NewOnceCloser(rc) + defer rcc.Close() + + sniffer := index.NewBlobSniffer(br) + _, err = io.CopyN(sniffer, rc, sniffSize) + if err != nil && err != io.EOF { + return err + } + + sniffer.Parse() + b, ok := sniffer.SchemaBlob() + + if !ok { + if *flagVerbose { + log.Printf("Fetching opaque data %v into %q", br, targ) + } + + // opaque data - put it in a file + f, err := os.Create(targ) + if err != nil { + return fmt.Errorf("opaque: %v", err) + } + defer f.Close() + body, _ := sniffer.Body() + r := io.MultiReader(bytes.NewReader(body), rc) + _, err = io.Copy(f, r) + return err + } + rcc.Close() + + switch b.Type() { + case "directory": + dir := filepath.Join(targ, b.FileName()) + if *flagVerbose { + log.Printf("Fetching directory %v into %s", br, dir) + } + if err := os.MkdirAll(dir, b.FileMode()); err != nil { + return err + } + if err := setFileMeta(dir, b); err != nil { + log.Print(err) + } + entries, ok := b.DirectoryEntries() + if !ok { + return fmt.Errorf("bad entries blobref in dir %v", b.BlobRef()) + } + return smartFetch(src, dir, entries) + case "static-set": + if *flagVerbose { + log.Printf("Fetching directory entries %v into %s", br, targ) + } + + // directory entries + const numWorkers = 10 + type work struct { + br blob.Ref + errc chan<- error + } + members := b.StaticSetMembers() + workc := make(chan work, len(members)) + defer close(workc) + for i := 0; i < numWorkers; i++ { + go func() { + for wi := range workc { + wi.errc <- smartFetch(src, targ, wi.br) + } + }() + } + var errcs []<-chan error + for _, mref := range members { + errc := make(chan error, 1) + errcs = append(errcs, errc) + workc <- work{mref, errc} + } + for _, errc := range errcs { + if err := <-errc; err != nil { + return err + } + } + return nil + case "file": + fr, err := schema.NewFileReader(src, br) + if err != nil { + return fmt.Errorf("NewFileReader: %v", err) + } + fr.LoadAllChunks() + defer fr.Close() + + name := filepath.Join(targ, b.FileName()) + + if fi, err := os.Stat(name); err == nil && fi.Size() == fr.Size() { + if *flagVerbose { + log.Printf("Skipping %s; already exists.", name) + } + return nil + } + + if *flagVerbose { + log.Printf("Writing %s to %s ...", br, name) + } + + f, err := os.Create(name) + if err != nil { + return fmt.Errorf("file type: %v", err) + } + defer f.Close() + if _, err := io.Copy(f, fr); err != nil { + return fmt.Errorf("Copying %s to %s: %v", br, name, err) + } + if err := setFileMeta(name, b); err != nil { + log.Print(err) + } + return nil + case "symlink": + if *flagSkipIrregular { + return nil + } + sf, ok := b.AsStaticFile() + if !ok { + return errors.New("blob is not a static file") + } + sl, ok := sf.AsStaticSymlink() + if !ok { + return errors.New("blob is not a symlink") + } + name := filepath.Join(targ, sl.FileName()) + if _, err := os.Lstat(name); err == nil { + if *flagVerbose { + log.Printf("Skipping creating symbolic link %s: A file with that name exists", name) + } + return nil + } + target := sl.SymlinkTargetString() + if target == "" { + return errors.New("symlink without target") + } + + // On Windows, os.Symlink isn't yet implemented as of Go 1.3. + // See https://code.google.com/p/go/issues/detail?id=5750 + err := os.Symlink(target, name) + // We won't call setFileMeta for a symlink because: + // the permissions of a symlink do not matter and Go's + // os.Chtimes always dereferences (does not act on the + // symlink but its target). + return err + case "fifo": + if *flagSkipIrregular { + return nil + } + name := filepath.Join(targ, b.FileName()) + + sf, ok := b.AsStaticFile() + if !ok { + return errors.New("blob is not a static file") + } + _, ok = sf.AsStaticFIFO() + if !ok { + return errors.New("blob is not a static FIFO") + } + + if _, err := os.Lstat(name); err == nil { + log.Printf("Skipping FIFO %s: A file with that name already exists", name) + return nil + } + + err = osutil.Mkfifo(name, 0600) + if err == osutil.ErrNotSupported { + log.Printf("Skipping FIFO %s: Unsupported filetype", name) + return nil + } + if err != nil { + return fmt.Errorf("%s: osutil.Mkfifo(): %v", name, err) + } + + if err := setFileMeta(name, b); err != nil { + log.Print(err) + } + + return nil + + case "socket": + if *flagSkipIrregular { + return nil + } + name := filepath.Join(targ, b.FileName()) + + sf, ok := b.AsStaticFile() + if !ok { + return errors.New("blob is not a static file") + } + _, ok = sf.AsStaticSocket() + if !ok { + return errors.New("blob is not a static socket") + } + + if _, err := os.Lstat(name); err == nil { + log.Printf("Skipping socket %s: A file with that name already exists", name) + return nil + } + + err = osutil.Mksocket(name) + if err == osutil.ErrNotSupported { + log.Printf("Skipping socket %s: Unsupported filetype", name) + return nil + } + if err != nil { + return fmt.Errorf("%s: %v", name, err) + } + + if err := setFileMeta(name, b); err != nil { + log.Print(err) + } + + return nil + + default: + return errors.New("unknown blob type: " + b.Type()) + } + panic("unreachable") +} + +func setFileMeta(name string, blob *schema.Blob) error { + err1 := os.Chmod(name, blob.FileMode()) + var err2 error + if mt := blob.ModTime(); !mt.IsZero() { + err2 = os.Chtimes(name, mt, mt) + } + // TODO: we previously did os.Chown here, but it's rarely wanted, + // then the schema.Blob refactor broke it, so it's gone. + // Add it back later once we care? + for _, err := range []error{err1, err2} { + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camget/doc.go b/vendor/github.com/camlistore/camlistore/cmd/camget/doc.go new file mode 100644 index 00000000..9f516644 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camget/doc.go @@ -0,0 +1,39 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +The camget tool fetches blobs, files, and directories. + + +Examples + +Writes to stdout by default: + + camget // dump raw blob + camget -contents // dump file contents + +Like curl, lets you set output file/directory with -o: + + camget -o + (if exists and is directory, must be a directory; + use -f to overwrite any files) + + camget -o + +Camget isn't very fleshed out. In general, using 'cammount' to just +mount a tree is an easier way to get files back. +*/ +package main diff --git a/vendor/github.com/camlistore/camlistore/cmd/camget/graph.go b/vendor/github.com/camlistore/camlistore/cmd/camget/graph.go new file mode 100644 index 00000000..4db9f59f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camget/graph.go @@ -0,0 +1,156 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "io" + "log" + "strings" + "sync" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/index" + "camlistore.org/pkg/schema" +) + +func check(err error) { + if err != nil { + log.Fatal(err) + } +} + +type node struct { + br blob.Ref + g *graph + + size int64 + blob *schema.Blob + edges []blob.Ref +} + +func (n *node) dotName() string { + return strings.Replace(n.br.String(), "-", "_", -1) +} + +func (n *node) dotLabel() string { + name := n.displayName() + if n.blob == nil { + return fmt.Sprintf("%s\n%d bytes", name, n.size) + } + return name + "\n" + n.blob.Type() +} + +func (n *node) color() string { + if n.br == n.g.root { + return "#a0ffa0" + } + if n.blob == nil { + return "#aaaaaa" + } + return "#a0a0ff" +} + +func (n *node) displayName() string { + s := n.br.String() + s = s[strings.Index(s, "-")+1:] + return s[:7] +} + +func (n *node) load() { + defer n.g.wg.Done() + rc, err := fetch(n.g.src, n.br) + check(err) + defer rc.Close() + sniff := index.NewBlobSniffer(n.br) + n.size, err = io.Copy(sniff, rc) + check(err) + sniff.Parse() + blob, ok := sniff.SchemaBlob() + if !ok { + return + } + n.blob = blob + for _, part := range blob.ByteParts() { + n.addEdge(part.BlobRef) + n.addEdge(part.BytesRef) + } +} + +func (n *node) addEdge(dst blob.Ref) { + if !dst.Valid() { + return + } + n.g.startLoadNode(dst) + n.edges = append(n.edges, dst) +} + +type graph struct { + src blob.Fetcher + root blob.Ref + + mu sync.Mutex // guards n + n map[string]*node + + wg sync.WaitGroup +} + +func (g *graph) startLoadNode(br blob.Ref) { + g.mu.Lock() + defer g.mu.Unlock() + key := br.String() + if _, ok := g.n[key]; ok { + return + } + n := &node{ + g: g, + br: br, + } + g.n[key] = n + g.wg.Add(1) + go n.load() +} + +func printGraph(src blob.Fetcher, root blob.Ref) { + g := &graph{ + src: src, + root: root, + n: make(map[string]*node), + } + g.startLoadNode(root) + g.wg.Wait() + fmt.Println("digraph G {") + fmt.Println(" node [fontsize=10,fontname=Arial]") + fmt.Println(" edge [fontsize=10,fontname=Arial]") + + for _, n := range g.n { + fmt.Printf("\n %s [label=%q,style=filled,fillcolor=%q]\n", n.dotName(), n.dotLabel(), n.color()) + for i, e := range n.edges { + // TODO: create an edge type. + // Also, this edgeLabel is specific to file parts. Other schema + // types might not even have a concept of ordering. This is hack. + edgeLabel := fmt.Sprintf("%d", i) + if i == 0 { + edgeLabel = "first" + } else if i == len(n.edges)-1 { + edgeLabel = "last" + } + fmt.Printf(" %s -> %s [label=%q]\n", n.dotName(), g.n[e.String()].dotName(), edgeLabel) + } + } + fmt.Printf("}\n") +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/cammount/.gitignore b/vendor/github.com/camlistore/camlistore/cmd/cammount/.gitignore new file mode 100644 index 00000000..dc711e52 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/cammount/.gitignore @@ -0,0 +1,2 @@ +cammount +_go_.[568] diff --git a/vendor/github.com/camlistore/camlistore/cmd/cammount/cammount.go b/vendor/github.com/camlistore/camlistore/cmd/cammount/cammount.go new file mode 100644 index 00000000..e02604a1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/cammount/cammount.go @@ -0,0 +1,251 @@ +// +build linux darwin + +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + "os/exec" + "os/signal" + "path/filepath" + "runtime" + "strings" + "syscall" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/cacher" + "camlistore.org/pkg/client" + "camlistore.org/pkg/fs" + "camlistore.org/pkg/legal/legalprint" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/search" + "camlistore.org/third_party/bazil.org/fuse" + fusefs "camlistore.org/third_party/bazil.org/fuse/fs" +) + +var ( + debug = flag.Bool("debug", false, "print debugging messages.") + xterm = flag.Bool("xterm", false, "Run an xterm in the mounted directory. Shut down when xterm ends.") + term = flag.Bool("term", false, "Open a terminal window. Doesn't shut down when exited. Mostly for demos.") + open = flag.Bool("open", false, "Open a GUI window") +) + +func usage() { + fmt.Fprint(os.Stderr, "usage: cammount [opts] [ [||]]\n") + flag.PrintDefaults() + os.Exit(2) +} + +func main() { + var conn *fuse.Conn + + // Scans the arg list and sets up flags + client.AddFlags() + flag.Usage = usage + flag.Parse() + + if legalprint.MaybePrint(os.Stderr) { + return + } + + narg := flag.NArg() + if narg > 2 { + usage() + } + + var mountPoint string + var err error + if narg > 0 { + mountPoint = flag.Arg(0) + } else { + mountPoint, err = ioutil.TempDir("", "cammount") + if err != nil { + log.Fatal(err) + } + log.Printf("No mount point given. Using: %s", mountPoint) + defer os.Remove(mountPoint) + } + + errorf := func(msg string, args ...interface{}) { + fmt.Fprintf(os.Stderr, msg, args...) + fmt.Fprint(os.Stderr, "\n") + usage() + } + + var ( + cl *client.Client + root blob.Ref // nil if only one arg + camfs *fs.CamliFileSystem + ) + if narg == 2 { + rootArg := flag.Arg(1) + // not trying very hard since NewFromShareRoot will do it better with a regex + if strings.HasPrefix(rootArg, "http://") || + strings.HasPrefix(rootArg, "https://") { + if client.ExplicitServer() != "" { + errorf("Can't use an explicit blobserver with a share URL; the blobserver is implicit from the share URL.") + } + var err error + cl, root, err = client.NewFromShareRoot(rootArg) + if err != nil { + log.Fatal(err) + } + } else { + cl = client.NewOrFail() // automatic from flags + cl.SetHTTPClient(&http.Client{Transport: cl.TransportForConfig(nil)}) + + var ok bool + root, ok = blob.Parse(rootArg) + + if !ok { + // not a blobref, check for root name instead + req := &search.WithAttrRequest{N: 1, Attr: "camliRoot", Value: rootArg} + wres, err := cl.GetPermanodesWithAttr(req) + + if err != nil { + log.Fatal("could not query search") + } + + if wres.WithAttr != nil { + root = wres.WithAttr[0].Permanode + } else { + log.Fatalf("root specified is not a blobref or name of a root: %q\n", rootArg) + } + } + } + } else { + cl = client.NewOrFail() // automatic from flags + cl.SetHTTPClient(&http.Client{Transport: cl.TransportForConfig(nil)}) + } + + diskCacheFetcher, err := cacher.NewDiskCache(cl) + if err != nil { + log.Fatalf("Error setting up local disk cache: %v", err) + } + defer diskCacheFetcher.Clean() + if root.Valid() { + var err error + camfs, err = fs.NewRootedCamliFileSystem(cl, diskCacheFetcher, root) + if err != nil { + log.Fatalf("Error creating root with %v: %v", root, err) + } + } else { + camfs = fs.NewDefaultCamliFileSystem(cl, diskCacheFetcher) + } + + if *debug { + fuse.Debug = func(msg interface{}) { log.Print(msg) } + // TODO: set fs's logger + } + + // This doesn't appear to work on OS X: + sigc := make(chan os.Signal, 1) + + conn, err = fuse.Mount(mountPoint, fuse.VolumeName(filepath.Base(mountPoint))) + if err != nil { + if err.Error() == "cannot find load_fusefs" && runtime.GOOS == "darwin" { + log.Fatal("FUSE not available; install from http://osxfuse.github.io/") + } + log.Fatalf("Mount: %v", err) + } + + xtermDone := make(chan bool, 1) + if *xterm { + cmd := exec.Command("xterm") + cmd.Dir = mountPoint + if err := cmd.Start(); err != nil { + log.Printf("Error starting xterm: %v", err) + } else { + go func() { + cmd.Wait() + xtermDone <- true + }() + defer cmd.Process.Kill() + } + } + if *open { + if runtime.GOOS == "darwin" { + go exec.Command("open", mountPoint).Run() + } + } + if *term { + if runtime.GOOS == "darwin" { + if osutil.DirExists("/Applications/iTerm.app/") { + go exec.Command("open", "-a", "iTerm", mountPoint).Run() + } else { + log.Printf("TODO: iTerm not installed. Figure out how to open with Terminal.app instead.") + } + } + } + + signal.Notify(sigc, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT) + + doneServe := make(chan error, 1) + go func() { + doneServe <- fusefs.Serve(conn, camfs) + }() + + quitKey := make(chan bool, 1) + go awaitQuitKey(quitKey) + + select { + case err := <-doneServe: + log.Printf("conn.Serve returned %v", err) + + // check if the mount process has an error to report + <-conn.Ready + if err := conn.MountError; err != nil { + log.Printf("conn.MountError: %v", err) + } + case sig := <-sigc: + log.Printf("Signal %s received, shutting down.", sig) + case <-quitKey: + log.Printf("Quit key pressed. Shutting down.") + case <-xtermDone: + log.Printf("xterm done") + } + + time.AfterFunc(2*time.Second, func() { + os.Exit(1) + }) + log.Printf("Unmounting...") + err = fs.Unmount(mountPoint) + log.Printf("Unmount = %v", err) + + log.Printf("cammount FUSE process ending.") +} + +func awaitQuitKey(done chan<- bool) { + var buf [1]byte + for { + _, err := os.Stdin.Read(buf[:]) + if err != nil { + return + } + if buf[0] == 'q' { + done <- true + return + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/cammount/cammount_other.go b/vendor/github.com/camlistore/camlistore/cmd/cammount/cammount_other.go new file mode 100644 index 00000000..4db2d389 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/cammount/cammount_other.go @@ -0,0 +1,27 @@ +// +build !linux,!darwin + +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import ( + "log" + "runtime" +) + +func main() { + log.Fatalln("cammount not implemented on", runtime.GOOS) +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/cammount/doc.go b/vendor/github.com/camlistore/camlistore/cmd/cammount/doc.go new file mode 100644 index 00000000..e30e7ea9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/cammount/doc.go @@ -0,0 +1,30 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +The cammount tool mounts a root directory blob onto the given mountpoint. The blobref can be given directly or through a share blob URL. If no root blobref is given, an automatic root is created instead. + + +Usage: + + cammount [opts] [|] + -debug=false: print debugging messages. + -server="": Camlistore server prefix. + If blank, the default from the "server" field of ~/.camlistore/config is used. + Acceptable forms: https://you.example.com, example.com:1345 (https assumed), or + http://you.example.com/alt-root +*/ +package main diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/.gitignore b/vendor/github.com/camlistore/camlistore/cmd/camput/.gitignore new file mode 100644 index 00000000..3189f004 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/.gitignore @@ -0,0 +1,5 @@ +*.8 +*.6 +camput + + diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/androidx.go b/vendor/github.com/camlistore/camlistore/cmd/camput/androidx.go new file mode 100644 index 00000000..8b08aa51 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/androidx.go @@ -0,0 +1,42 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Hacks for running camput as a child process on Android. + +package main + +import ( + "camlistore.org/pkg/client/android" +) + +type allStats struct { + total, skipped, uploaded stats +} + +var lastStatBroadcast allStats + +func printAndroidCamputStatus(t *TreeUpload) { + bcast := allStats{t.total, t.skipped, t.uploaded} + if bcast == lastStatBroadcast { + return + } + lastStatBroadcast = bcast + + android.Printf("STATS nfile=%d nbyte=%d skfile=%d skbyte=%d upfile=%d upbyte=%d\n", + t.total.files, t.total.bytes, + t.skipped.files, t.skipped.bytes, + t.uploaded.files, t.uploaded.bytes) +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/attr.go b/vendor/github.com/camlistore/camlistore/cmd/camput/attr.go new file mode 100644 index 00000000..859617af --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/attr.go @@ -0,0 +1,103 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/schema" +) + +type attrCmd struct { + add bool + del bool + up *Uploader +} + +func init() { + cmdmain.RegisterCommand("attr", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(attrCmd) + flags.BoolVar(&cmd.add, "add", false, `Adds attribute (e.g. "tag")`) + flags.BoolVar(&cmd.del, "del", false, "Deletes named attribute [value]") + return cmd + }) +} + +func (c *attrCmd) Describe() string { + return "Add, set, or delete a permanode's attribute." +} + +func (c *attrCmd) Usage() { + cmdmain.Errorf("Usage: camput [globalopts] attr [attroption] ") +} + +func (c *attrCmd) Examples() []string { + return []string{ + " Set attribute", + "--add Adds attribute (e.g. \"tag\")", + "--del [] Deletes named attribute", + } +} + +func (c *attrCmd) RunCommand(args []string) error { + if err := c.checkArgs(args); err != nil { + return err + } + permanode, attr := args[0], args[1] + value := "" + if len(args) > 2 { + value = args[2] + } + + pn, ok := blob.Parse(permanode) + if !ok { + return fmt.Errorf("Error parsing blobref %q", permanode) + } + claimFunc := func() func(blob.Ref, string, string) *schema.Builder { + switch { + case c.add: + return schema.NewAddAttributeClaim + case c.del: + return schema.NewDelAttributeClaim + default: + return schema.NewSetAttributeClaim + } + }() + bb := claimFunc(pn, attr, value) + put, err := getUploader().UploadAndSignBlob(bb) + handleResult(bb.Type(), put, err) + return nil +} + +func (c *attrCmd) checkArgs(args []string) error { + if c.del { + if c.add { + return cmdmain.UsageError("Add and del options are exclusive") + } + if len(args) < 2 { + return cmdmain.UsageError("Attr -del takes at least 2 args: []") + } + return nil + } + if len(args) != 3 { + return cmdmain.UsageError("Attr takes 3 args: ") + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/blobs.go b/vendor/github.com/camlistore/camlistore/cmd/camput/blobs.go new file mode 100644 index 00000000..b7242b5c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/blobs.go @@ -0,0 +1,157 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "crypto/sha1" + "errors" + "flag" + "fmt" + "io" + "os" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/client" + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/constants" +) + +type blobCmd struct{} + +func init() { + cmdmain.RegisterCommand("blob", func(flags *flag.FlagSet) cmdmain.CommandRunner { + return new(blobCmd) + }) +} + +func (c *blobCmd) Describe() string { + return "Upload raw blob(s)." +} + +func (c *blobCmd) Usage() { + fmt.Fprintf(cmdmain.Stderr, "Usage: camput [globalopts] blob \n camput [globalopts] blob -\n") +} + +func (c *blobCmd) Examples() []string { + return []string{ + " (raw, without any metadata)", + "- (read from stdin)", + } +} + +func (c *blobCmd) RunCommand(args []string) error { + if len(args) == 0 { + return errors.New("No files given.") + } + + up := getUploader() + for _, arg := range args { + var ( + handle *client.UploadHandle + err error + ) + if arg == "-" { + handle, err = stdinBlobHandle() + } else { + handle, err = fileBlobHandle(up, arg) + } + if err != nil { + return err + } + put, err := up.Upload(handle) + handleResult("blob", put, err) + continue + } + return nil +} + +func stdinBlobHandle() (uh *client.UploadHandle, err error) { + var buf bytes.Buffer + size, err := io.CopyN(&buf, cmdmain.Stdin, constants.MaxBlobSize+1) + if err == io.EOF { + err = nil + } + if err != nil { + return + } + if size > constants.MaxBlobSize { + err = fmt.Errorf("blob size cannot be bigger than %d", constants.MaxBlobSize) + } + file := buf.Bytes() + h := blob.NewHash() + size, err = io.Copy(h, bytes.NewReader(file)) + if err != nil { + return + } + return &client.UploadHandle{ + BlobRef: blob.RefFromHash(h), + Size: uint32(size), + Contents: io.LimitReader(bytes.NewReader(file), size), + }, nil +} + +func fileBlobHandle(up *Uploader, path string) (uh *client.UploadHandle, err error) { + fi, err := up.stat(path) + if err != nil { + return + } + if fi.Mode()&os.ModeType != 0 { + return nil, fmt.Errorf("%q is not a regular file", path) + } + file, err := up.open(path) + if err != nil { + return + } + ref, size, err := blobDetails(file) + if err != nil { + return nil, err + } + return &client.UploadHandle{ + BlobRef: ref, + Size: size, + Contents: io.LimitReader(file, int64(size)), + }, nil +} + +func blobDetails(contents io.ReadSeeker) (bref blob.Ref, size uint32, err error) { + s1 := sha1.New() + if _, err = contents.Seek(0, 0); err != nil { + return + } + defer func() { + if _, seekErr := contents.Seek(0, 0); seekErr != nil { + if err == nil { + err = seekErr + } else { + err = fmt.Errorf("%s, cannot seek back: %v", err, seekErr) + } + } + }() + sz, err := io.CopyN(s1, contents, constants.MaxBlobSize+1) + if err == nil || err == io.EOF { + bref, err = blob.RefFromHash(s1), nil + } else { + err = fmt.Errorf("error reading contents: %v", err) + return + } + if sz > constants.MaxBlobSize { + err = fmt.Errorf("blob size cannot be bigger than %d", constants.MaxBlobSize) + } + size = uint32(sz) + return +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/cache.go b/vendor/github.com/camlistore/camlistore/cmd/camput/cache.go new file mode 100644 index 00000000..fc446dca --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/cache.go @@ -0,0 +1,50 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "os" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/client" +) + +// A HaveCache tracks whether a remove blobserver has a blob or not. +type HaveCache interface { + StatBlobCache(br blob.Ref) (size uint32, ok bool) + NoteBlobExists(br blob.Ref, size uint32) + Close() error +} + +// UploadCache is the "stat cache" for regular files. Given a current +// working directory, possibly relative filename, stat info, and +// whether that file was uploaded with a permanode (-filenodes), +// returns what the ultimate put result (the top-level "file" schema +// blob) for that regular file was. +type UploadCache interface { + // CachedPutResult looks in the cache for the put result for the file + // that was uploaded. If withPermanode, it is only a hit if a planned + // permanode for the file was created and uploaded too, and vice-versa. + // The returned PutResult is always for the "file" schema blob. + CachedPutResult(pwd, filename string, fi os.FileInfo, withPermanode bool) (*client.PutResult, error) + // AddCachedPutResult stores in the cache the put result for the file that + // was uploaded. If withPermanode, it means a planned permanode was created + // for this file when it was uploaded (with -filenodes), and the cache entry + // will reflect that. + AddCachedPutResult(pwd, filename string, fi os.FileInfo, pr *client.PutResult, withPermanode bool) + Close() error +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/camput.go b/vendor/github.com/camlistore/camlistore/cmd/camput/camput.go new file mode 100644 index 00000000..fd80a8c2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/camput.go @@ -0,0 +1,187 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "log" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync" + + "camlistore.org/pkg/blobserver/dir" + "camlistore.org/pkg/client" + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/syncutil" +) + +const buffered = 16 // arbitrary + +var ( + flagProxyLocal = false + flagHTTP = flag.Bool("verbose_http", false, "show HTTP request summaries") + flagHaveCache = true + flagBlobDir = flag.String("blobdir", "", "If non-empty, the local directory to put blobs, instead of sending them over the network. If the string \"discard\", no blobs are written or sent over the network anywhere.") +) + +var ( + uploaderOnce sync.Once + uploader *Uploader // initialized by getUploader +) + +var debugFlagOnce sync.Once + +func registerDebugFlags() { + flag.BoolVar(&flagProxyLocal, "proxy_local", false, "If true, the HTTP_PROXY environment is also used for localhost requests. This can be helpful during debugging.") + flag.BoolVar(&flagHaveCache, "havecache", true, "Use the 'have cache', a cache keeping track of what blobs the remote server should already have from previous uploads.") +} + +func init() { + if debug, _ := strconv.ParseBool(os.Getenv("CAMLI_DEBUG")); debug { + debugFlagOnce.Do(registerDebugFlags) + } + cmdmain.ExtraFlagRegistration = client.AddFlags + cmdmain.PreExit = func() { + if up := uploader; up != nil { + up.Close() + stats := up.Stats() + if *cmdmain.FlagVerbose { + log.Printf("Client stats: %s", stats.String()) + if up.transport != nil { + log.Printf(" #HTTP reqs: %d", up.transport.Requests()) + } + } + } + + // So multiple cmd/camput TestFoo funcs run, each with + // an fresh (and not previously closed) Uploader: + uploader = nil + uploaderOnce = sync.Once{} + } +} + +func getUploader() *Uploader { + uploaderOnce.Do(initUploader) + return uploader +} + +func initUploader() { + up := newUploader() + if flagHaveCache && *flagBlobDir == "" { + gen, err := up.StorageGeneration() + if err != nil { + log.Printf("WARNING: not using local server inventory cache; failed to retrieve server's storage generation: %v", err) + } else { + up.haveCache = NewKvHaveCache(gen) + up.Client.SetHaveCache(up.haveCache) + } + } + uploader = up +} + +func handleResult(what string, pr *client.PutResult, err error) error { + if err != nil { + cmdmain.Errorf("Error putting %s: %s\n", what, err) + cmdmain.ExitWithFailure = true + return err + } + fmt.Fprintln(cmdmain.Stdout, pr.BlobRef.String()) + return nil +} + +func getenvEitherCase(k string) string { + if v := os.Getenv(strings.ToUpper(k)); v != "" { + return v + } + return os.Getenv(strings.ToLower(k)) +} + +// proxyFromEnvironment is similar to http.ProxyFromEnvironment but it skips +// $NO_PROXY blacklist so it proxies every requests, including localhost +// requests. +func proxyFromEnvironment(req *http.Request) (*url.URL, error) { + proxy := getenvEitherCase("HTTP_PROXY") + if proxy == "" { + return nil, nil + } + proxyURL, err := url.Parse(proxy) + if err != nil || proxyURL.Scheme == "" { + if u, err := url.Parse("http://" + proxy); err == nil { + proxyURL = u + err = nil + } + } + if err != nil { + return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) + } + return proxyURL, nil +} + +func newUploader() *Uploader { + var cc *client.Client + var httpStats *httputil.StatsTransport + if d := *flagBlobDir; d != "" { + ss, err := dir.New(d) + if err != nil && d == "discard" { + ss = discardStorage{} + err = nil + } + if err != nil { + log.Fatalf("Error using dir %s as storage: %v", d, err) + } + cc = client.NewStorageClient(ss) + } else { + cc = client.NewOrFail() + proxy := http.ProxyFromEnvironment + if flagProxyLocal { + proxy = proxyFromEnvironment + } + tr := cc.TransportForConfig( + &client.TransportConfig{ + Proxy: proxy, + Verbose: *flagHTTP, + }) + httpStats, _ = tr.(*httputil.StatsTransport) + cc.SetHTTPClient(&http.Client{Transport: tr}) + } + if *cmdmain.FlagVerbose { + cc.SetLogger(log.New(cmdmain.Stderr, "", log.LstdFlags)) + } else { + cc.SetLogger(nil) + } + + pwd, err := os.Getwd() + if err != nil { + log.Fatalf("os.Getwd: %v", err) + } + + return &Uploader{ + Client: cc, + transport: httpStats, + pwd: pwd, + fdGate: syncutil.NewGate(100), // gate things that waste fds, assuming a low system limit + } +} + +func main() { + cmdmain.Main() +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/camput_test.go b/vendor/github.com/camlistore/camlistore/cmd/camput/camput_test.go new file mode 100644 index 00000000..2af08960 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/camput_test.go @@ -0,0 +1,247 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "camlistore.org/pkg/cmdmain" +) + +// env is the environment that a camput test runs within. +type env struct { + // stdin is the standard input, or /dev/null if nil + stdin io.Reader + + // Timeout optionally specifies the timeout on the command. + Timeout time.Duration + + // TODO(bradfitz): vfs files. +} + +func (e *env) timeout() time.Duration { + if e.Timeout != 0 { + return e.Timeout + } + return 15 * time.Second + +} +func (e *env) Run(args ...string) (out, err []byte, exitCode int) { + outbuf := new(bytes.Buffer) + errbuf := new(bytes.Buffer) + os.Args = append(os.Args[:1], args...) + cmdmain.Stdout, cmdmain.Stderr = outbuf, errbuf + if e.stdin == nil { + cmdmain.Stdin = strings.NewReader("") + } else { + cmdmain.Stdin = e.stdin + } + exitc := make(chan int, 1) + cmdmain.Exit = func(code int) { + exitc <- code + runtime.Goexit() + } + go func() { + cmdmain.Main() + cmdmain.Exit(0) + }() + select { + case exitCode = <-exitc: + case <-time.After(e.timeout()): + panic("timeout running command") + } + out = outbuf.Bytes() + err = errbuf.Bytes() + return +} + +// TestUsageOnNoargs tests that we output a usage message when given no args, and return +// with a non-zero exit status. +func TestUsageOnNoargs(t *testing.T) { + var e env + out, err, code := e.Run() + if code != 1 { + t.Errorf("exit code = %d; want 1", code) + } + if len(out) != 0 { + t.Errorf("wanted nothing on stdout; got:\n%s", out) + } + if !bytes.Contains(err, []byte("Usage: camput")) { + t.Errorf("stderr doesn't contain usage. Got:\n%s", err) + } +} + +// TestCommandUsage tests that we output a command-specific usage message and return +// with a non-zero exit status. +func TestCommandUsage(t *testing.T) { + var e env + out, err, code := e.Run("attr") + if code != 1 { + t.Errorf("exit code = %d; want 1", code) + } + if len(out) != 0 { + t.Errorf("wanted nothing on stdout; got:\n%s", out) + } + sub := "Attr takes 3 args: " + if !bytes.Contains(err, []byte(sub)) { + t.Errorf("stderr doesn't contain substring %q. Got:\n%s", sub, err) + } +} + +func TestUploadingChangingDirectory(t *testing.T) { + // TODO(bradfitz): + // $ mkdir /tmp/somedir + // $ cp dev-camput /tmp/somedir + // $ ./dev-camput -file /tmp/somedir/ 2>&1 | tee /tmp/somedir/log + // ... verify it doesn't hang. + t.Logf("TODO") +} + +func testWithTempDir(t *testing.T, fn func(tempDir string)) { + tempDir, err := ioutil.TempDir("", "") + if err != nil { + t.Errorf("error creating temp dir: %v", err) + return + } + defer os.RemoveAll(tempDir) + + confDir := filepath.Join(tempDir, "conf") + mustMkdir(t, confDir, 0700) + defer os.Setenv("CAMLI_CONFIG_DIR", os.Getenv("CAMLI_CONFIG_DIR")) + os.Setenv("CAMLI_CONFIG_DIR", confDir) + if err := ioutil.WriteFile(filepath.Join(confDir, "client-config.json"), []byte("{}"), 0644); err != nil { + t.Fatal(err) + } + + debugFlagOnce.Do(registerDebugFlags) + + fn(tempDir) +} + +// Tests that uploads of deep directory trees don't deadlock. +// See commit ee4550bff453526ebae460da1ad59f6e7f3efe77 for backstory +func TestUploadDirectories(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + testWithTempDir(t, func(tempDir string) { + uploadRoot := filepath.Join(tempDir, "to_upload") // read from here + mustMkdir(t, uploadRoot, 0700) + + blobDestDir := filepath.Join(tempDir, "blob_dest") // write to here + mustMkdir(t, blobDestDir, 0700) + + // There are 10 stat cache workers. Simulate a slow lookup in + // the file-based ones (similar to reality), so the + // directory-based nodes make it to the upload worker first + // (where it would currently/previously deadlock waiting on + // children that are starved out) See + // ee4550bff453526ebae460da1ad59f6e7f3efe77. + testHookStatCache = func(el interface{}, ok bool) { + if !ok { + return + } + if ok && strings.HasSuffix(el.(*node).fullPath, ".txt") { + time.Sleep(50 * time.Millisecond) + } + } + defer func() { testHookStatCache = nil }() + + dirIter := uploadRoot + for i := 0; i < 2; i++ { + dirPath := filepath.Join(dirIter, "dir") + mustMkdir(t, dirPath, 0700) + for _, baseFile := range []string{"file.txt", "FILE.txt"} { + filePath := filepath.Join(dirPath, baseFile) + if err := ioutil.WriteFile(filePath, []byte("some file contents "+filePath), 0600); err != nil { + t.Fatalf("error writing to %s: %v", filePath, err) + } + t.Logf("Wrote file %s", filePath) + } + dirIter = dirPath + } + + // Now set statCacheWorkers greater than uploadWorkers, so the + // sleep above can re-arrange the order that files get + // uploaded in, so the directory comes before the file. This + // was the old deadlock. + defer setAndRestore(&uploadWorkers, 1)() + defer setAndRestore(&dirUploadWorkers, 1)() + defer setAndRestore(&statCacheWorkers, 5)() + + e := &env{ + Timeout: 5 * time.Second, + } + stdout, stderr, exit := e.Run( + "--blobdir="+blobDestDir, + "--havecache=false", + "--verbose=false", // useful to set true for debugging + "file", + uploadRoot) + if exit != 0 { + t.Fatalf("Exit status %d: stdout=[%s], stderr=[%s]", exit, stdout, stderr) + } + }) +} + +func TestCamputBlob(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + testWithTempDir(t, func(tempDir string) { + blobDestDir := filepath.Join(tempDir, "blob_dest") // write to here + mustMkdir(t, blobDestDir, 0700) + + e := &env{ + Timeout: 5 * time.Second, + stdin: strings.NewReader("foo"), + } + stdout, stderr, exit := e.Run( + "--blobdir="+blobDestDir, + "--havecache=false", + "--verbose=false", // useful to set true for debugging + "blob", "-") + if exit != 0 { + t.Fatalf("Exit status %d: stdout=[%s], stderr=[%s]", exit, stdout, stderr) + } + if got, want := string(stdout), "sha1-0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33\n"; got != want { + t.Errorf("Stdout = %q; want %q", got, want) + } + }) +} + +func mustMkdir(t *testing.T, fn string, mode int) { + if err := os.Mkdir(fn, 0700); err != nil { + t.Errorf("error creating dir %s: %v", fn, err) + } +} + +func setAndRestore(dst *int, v int) func() { + old := *dst + *dst = v + return func() { *dst = old } +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/delete.go b/vendor/github.com/camlistore/camlistore/cmd/camput/delete.go new file mode 100644 index 00000000..4efa0b7f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/delete.go @@ -0,0 +1,70 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/schema" +) + +type deleteCmd struct { + up *Uploader +} + +func init() { + cmdmain.RegisterCommand("delete", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(deleteCmd) + return cmd + }) +} + +func (c *deleteCmd) Describe() string { + return "Create and upload a delete claim." +} + +func (c *deleteCmd) Usage() { + cmdmain.Errorf("Usage: camput [globalopts] delete [blobref2]...") +} + +func (c *deleteCmd) RunCommand(args []string) error { + if len(args) < 1 { + return cmdmain.UsageError("Need at least one blob to delete.") + } + if err := delete(args); err != nil { + return err + } + return nil +} + +func delete(args []string) error { + for _, arg := range args { + br, ok := blob.Parse(arg) + if !ok { + return fmt.Errorf("Error parsing blobref %q", arg) + } + bb := schema.NewDeleteClaim(br) + put, err := getUploader().UploadAndSignBlob(bb) + if err := handleResult(bb.Type(), put, err); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/discard.go b/vendor/github.com/camlistore/camlistore/cmd/camput/discard.go new file mode 100644 index 00000000..5bee98cf --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/discard.go @@ -0,0 +1,48 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "io" + "io/ioutil" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/context" +) + +type discardStorage struct { + blobserver.NoImplStorage +} + +func (discardStorage) ReceiveBlob(br blob.Ref, r io.Reader) (sb blob.SizedRef, err error) { + n, err := io.Copy(ioutil.Discard, r) + return blob.SizedRef{br, uint32(n)}, err +} + +func (discardStorage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error { + return nil +} + +func (discardStorage) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) error { + defer close(dest) + return nil +} + +func (discardStorage) RemoveBlobs(blobs []blob.Ref) error { + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/doc.go b/vendor/github.com/camlistore/camlistore/cmd/camput/doc.go new file mode 100644 index 00000000..991c0a5f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/doc.go @@ -0,0 +1,74 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +The camput tool mainly pushes blobs, files, and directories. It can also perform various related tasks, such as setting tags, creating permanodes, and creating share blobs. + + +Usage: + + camput [globalopts] [commandopts] [commandargs] + +Modes: + + delete: Create and upload a delete claim. + attr: Add, set, or delete a permanode's attribute. + file: Upload file(s). + init: Initialize the camput configuration file. With no option, it tries to use the GPG key found in the default identity secret ring. + permanode: Create and upload a permanode. + rawobj: Upload a custom JSON schema blob. + share: Grant access to a resource by making a "share" blob. + blob: Upload raw blob(s). + +Examples: + + camput file [opts] (raw, without any metadata) + camput blob - (read from stdin) + + camput permanode (create a new permanode) + camput permanode -name="Some Name" -tag=foo,bar (with attributes added) + + camput init + camput init --gpgkey=XXXXX + + camput share [opts] + + camput rawobj (debug command) + + camput attr Set attribute + camput attr --add Adds attribute (e.g. "tag") + camput attr --del [] Deletes named attribute [value + +For mode-specific help: + + camput -help + +Global options: + -help=false: print usage + -secret-keyring="~/.gnupg/secring.gpg": GnuPG secret keyring file to use. + -server="": Camlistore server prefix. If blank, the default from the "server" field of + ~/.camlistore/config is used. + Acceptable forms: https://you.example.com, example.com:1345 (https assumed), + or http://you.example.com/alt-root + -verbose=false: extra debug logging + -verbose_http=false: show HTTP request summaries + -version=false: show version +*/ +package main diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/files.go b/vendor/github.com/camlistore/camlistore/cmd/camput/files.go new file mode 100644 index 00000000..5290136d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/files.go @@ -0,0 +1,1198 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bufio" + "crypto/sha1" + "errors" + "flag" + "fmt" + "hash" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "camlistore.org/internal/chanworker" + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + statspkg "camlistore.org/pkg/blobserver/stats" + "camlistore.org/pkg/client" + "camlistore.org/pkg/client/android" + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/schema" +) + +type fileCmd struct { + title string + tag string + + makePermanode bool // make new, unique permanode of the root (dir or file) + filePermanodes bool // make planned permanodes for each file (based on their digest) + vivify bool + exifTime bool // use metadata (such as in EXIF) to find the creation time of the file + capCtime bool // use mtime as creation time of the file, if it would be bigger than modification time + diskUsage bool // show "du" disk usage only (dry run mode), don't actually upload + argsFromInput bool // Android mode: filenames piped into stdin, one at a time. + deleteAfterUpload bool // with fileNodes, deletes the input file once uploaded + contentsOnly bool // do not store any of the file's attributes, only its contents. + + statcache bool + + // Go into in-memory stats mode only; doesn't actually upload. + memstats bool + histo string // optional histogram output filename +} + +var flagUseSQLiteChildCache bool // Use sqlite for the statcache and havecache. + +var ( + uploadWorkers = 5 // concurrent upload workers (negative means unbounded: memory hog) + dirUploadWorkers = 3 // concurrent directory uploading workers + statCacheWorkers = 5 // concurrent statcache workers +) + +func init() { + cmdmain.RegisterCommand("file", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(fileCmd) + flags.BoolVar(&cmd.makePermanode, "permanode", false, "Create an associate a new permanode for the uploaded file or directory.") + flags.BoolVar(&cmd.filePermanodes, "filenodes", false, "Create (if necessary) content-based permanodes for each uploaded file.") + flags.BoolVar(&cmd.deleteAfterUpload, "delete_after_upload", false, "If using -filenodes, deletes files once they're uploaded, or if they've already been uploaded.") + flags.BoolVar(&cmd.vivify, "vivify", false, + "If true, ask the server to create and sign permanode(s) associated with each uploaded"+ + " file. This permits the server to have your signing key. Used mostly with untrusted"+ + " or at-risk clients, such as phones.") + flags.BoolVar(&cmd.exifTime, "exiftime", false, "Try to use metadata (such as EXIF) to get a stable creation time. If found, used as the replacement for the modtime. Mainly useful with vivify or filenodes.") + flags.StringVar(&cmd.title, "title", "", "Optional title attribute to set on permanode when using -permanode.") + flags.StringVar(&cmd.tag, "tag", "", "Optional tag(s) to set on permanode when using -permanode or -filenodes. Single value or comma separated.") + + flags.BoolVar(&cmd.diskUsage, "du", false, "Dry run mode: only show disk usage information, without upload or statting dest. Used for testing skipDirs configs, mostly.") + + if debug, _ := strconv.ParseBool(os.Getenv("CAMLI_DEBUG")); debug { + flags.BoolVar(&cmd.statcache, "statcache", true, "(debug flag) Use the stat cache, assuming unchanged files already uploaded in the past are still there. Fast, but potentially dangerous.") + flags.BoolVar(&cmd.memstats, "debug-memstats", false, "(debug flag) Enter debug in-memory mode; collecting stats only. Doesn't upload anything.") + flags.StringVar(&cmd.histo, "debug-histogram-file", "", "(debug flag) Optional file to create and write the blob size for each file uploaded. For use with GNU R and hist(read.table(\"filename\")$V1). Requires debug-memstats.") + flags.BoolVar(&cmd.capCtime, "capctime", false, "(debug flag) For file blobs use file modification time as creation time if it would be bigger (newer) than modification time. For stable filenode creation (you can forge mtime, but can't forge ctime).") + flags.BoolVar(&flagUseSQLiteChildCache, "sqlitecache", false, "(debug flag) Use sqlite for the statcache and havecache instead of a flat cache.") + flags.BoolVar(&cmd.contentsOnly, "contents_only", false, "(debug flag) Do not store any of the file's attributes. We write only the file's contents (the blobRefs for its parts) to the created file schema.") + } else { + cmd.statcache = true + } + if android.IsChild() { + flags.BoolVar(&cmd.argsFromInput, "stdinargs", false, "If true, filenames to upload are sent one-per-line on stdin. EOF means to quit the process with exit status 0.") + // limit number of goroutines to limit memory + uploadWorkers = 2 + dirUploadWorkers = 2 + statCacheWorkers = 2 + } + flagCacheLog = flags.Bool("logcache", false, "log caching details") + + return cmd + }) +} + +func (c *fileCmd) Describe() string { + return "Upload file(s)." +} + +func (c *fileCmd) Usage() { + fmt.Fprintf(cmdmain.Stderr, "Usage: camput [globalopts] file [fileopts] \n") +} + +func (c *fileCmd) Examples() []string { + return []string{ + "[opts] 0 { + return errors.New("args not supported with -argsfrominput") + } + tu := up.NewRootlessTreeUpload() + tu.Start() + br := bufio.NewReader(os.Stdin) + for { + path, err := br.ReadString('\n') + if path = strings.TrimSpace(path); path != "" { + tu.Enqueue(path) + } + if err == io.EOF { + android.PreExit() + os.Exit(0) + } + if err != nil { + log.Fatal(err) + } + } + } + + if len(args) == 0 { + return cmdmain.UsageError("No files or directories given.") + } + if up.statCache != nil { + defer up.statCache.Close() + } + for _, filename := range args { + fi, err := os.Stat(filename) + if err != nil { + return err + } + // Skip ignored files or base directories. Failing to skip the + // latter results in a panic. + if up.Client.IsIgnoredFile(filename) { + log.Printf("Client configured to ignore %s; skipping.", filename) + continue + } + if fi.IsDir() { + if up.fileOpts.wantVivify() { + vlog.Printf("Directories not supported in vivify mode; skipping %v\n", filename) + continue + } + t := up.NewTreeUpload(filename) + t.Start() + lastPut, err = t.Wait() + } else { + lastPut, err = up.UploadFile(filename) + if err == nil && c.deleteAfterUpload { + if err := os.Remove(filename); err != nil { + log.Printf("Error deleting %v: %v", filename, err) + } else { + log.Printf("Deleted %v", filename) + } + } + } + if handleResult("file", lastPut, err) != nil { + return err + } + } + + if permaNode != nil && lastPut != nil { + put, err := up.UploadAndSignBlob(schema.NewSetAttributeClaim(permaNode.BlobRef, "camliContent", lastPut.BlobRef.String())) + if handleResult("claim-permanode-content", put, err) != nil { + return err + } + if c.title != "" { + put, err := up.UploadAndSignBlob(schema.NewSetAttributeClaim(permaNode.BlobRef, "title", c.title)) + handleResult("claim-permanode-title", put, err) + } + if c.tag != "" { + tags := strings.Split(c.tag, ",") + for _, tag := range tags { + m := schema.NewAddAttributeClaim(permaNode.BlobRef, "tag", tag) + put, err := up.UploadAndSignBlob(m) + handleResult("claim-permanode-tag", put, err) + } + } + handleResult("permanode", permaNode, nil) + } + return nil +} + +func (c *fileCmd) initCaches(up *Uploader) { + if !c.statcache || *flagBlobDir != "" { + return + } + gen, err := up.StorageGeneration() + if err != nil { + log.Printf("WARNING: not using local caches; failed to retrieve server's storage generation: %v", err) + return + } + if c.statcache { + up.statCache = NewKvStatCache(gen) + } +} + +// DumpStats creates the destFile and writes a line per received blob, +// with its blob size. +func DumpStats(sr *statspkg.Receiver, destFile string) { + sr.Lock() + defer sr.Unlock() + + f, err := os.Create(destFile) + if err != nil { + log.Fatal(err) + } + + var sum int64 + for _, size := range sr.Have { + fmt.Fprintf(f, "%d\n", size) + } + fmt.Printf("In-memory blob stats: %d blobs, %d bytes\n", len(sr.Have), sum) + + err = f.Close() + if err != nil { + log.Fatal(err) + } +} + +type stats struct { + files, bytes int64 +} + +func (s *stats) incr(n *node) { + s.files++ + if !n.fi.IsDir() { + s.bytes += n.fi.Size() + } +} + +func (up *Uploader) lstat(path string) (os.FileInfo, error) { + // TODO(bradfitz): use VFS + return os.Lstat(path) +} + +func (up *Uploader) stat(path string) (os.FileInfo, error) { + if up.fs == nil { + return os.Stat(path) + } + f, err := up.fs.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return f.Stat() +} + +func (up *Uploader) open(path string) (http.File, error) { + if up.fs == nil { + return os.Open(path) + } + return up.fs.Open(path) +} + +func (n *node) directoryStaticSet() (*schema.StaticSet, error) { + ss := new(schema.StaticSet) + for _, c := range n.children { + pr, err := c.PutResult() + if err != nil { + return nil, fmt.Errorf("Error populating directory static set for child %q: %v", c.fullPath, err) + } + ss.Add(pr.BlobRef) + } + return ss, nil +} + +func (up *Uploader) uploadNode(n *node) (*client.PutResult, error) { + fi := n.fi + mode := fi.Mode() + if mode&os.ModeType == 0 { + return up.uploadNodeRegularFile(n) + } + bb := schema.NewCommonFileMap(n.fullPath, fi) + switch { + case mode&os.ModeSymlink != 0: + // TODO(bradfitz): use VFS here; not os.Readlink + target, err := os.Readlink(n.fullPath) + if err != nil { + return nil, err + } + bb.SetSymlinkTarget(target) + case mode&os.ModeDevice != 0: + // including mode & os.ModeCharDevice + fallthrough + case mode&os.ModeSocket != 0: + bb.SetType("socket") + case mode&os.ModeNamedPipe != 0: // fifo + bb.SetType("fifo") + default: + return nil, fmt.Errorf("camput.files: unsupported file type %v for file %v", mode, n.fullPath) + case fi.IsDir(): + ss, err := n.directoryStaticSet() + if err != nil { + return nil, err + } + sspr, err := up.UploadBlob(ss) + if err != nil { + return nil, err + } + bb.PopulateDirectoryMap(sspr.BlobRef) + } + + mappr, err := up.UploadBlob(bb) + if err == nil { + if !mappr.Skipped { + vlog.Printf("Uploaded %q, %s for %s", bb.Type(), mappr.BlobRef, n.fullPath) + } + } else { + vlog.Printf("Error uploading map for %s (%s, %s): %v", n.fullPath, bb.Type(), bb.Blob().BlobRef(), err) + } + return mappr, err + +} + +// statReceiver returns the StatReceiver used for checking for and uploading blobs. +// +// The optional provided node is only used for conditionally printing out status info to stdout. +func (up *Uploader) statReceiver(n *node) blobserver.StatReceiver { + statReceiver := up.altStatReceiver + if statReceiver == nil { + // TODO(mpl): simplify the altStatReceiver situation as well, + // see TODO in cmd/camput/uploader.go + statReceiver = up.Client + } + if android.IsChild() && n != nil && n.fi.Mode()&os.ModeType == 0 { + return android.StatusReceiver{Sr: statReceiver, Path: n.fullPath} + } + return statReceiver +} + +func (up *Uploader) noStatReceiver(r blobserver.BlobReceiver) blobserver.StatReceiver { + return noStatReceiver{r} +} + +// A haveCacheStatReceiver relays Receive calls to the embedded +// BlobReceiver and treats all Stat calls like the blob doesn't exist. +// +// This is used by the client once it's already asked the server that +// it doesn't have the whole file in some chunk layout already, so we +// know we're just writing new stuff. For resuming in the middle of +// larger uploads, it turns out that the pkg/client.Client.Upload +// already checks the have cache anyway, so going right to mid-chunk +// receives is fine. +// +// TODO(bradfitz): this probabaly all needs an audit/rationalization/tests +// to make sure all the players are agreeing on the responsibilities. +// And maybe the Android stats are wrong, too. (see pkg/client/android's +// StatReceiver) +type noStatReceiver struct { + blobserver.BlobReceiver +} + +func (noStatReceiver) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error { + return nil +} + +var atomicDigestOps int64 // number of files digested + +// wholeFileDigest returns the sha1 digest of the regular file's absolute +// path given in fullPath. +func (up *Uploader) wholeFileDigest(fullPath string) (blob.Ref, error) { + // TODO(bradfitz): cache this. + file, err := up.open(fullPath) + if err != nil { + return blob.Ref{}, err + } + defer file.Close() + td := &trackDigestReader{r: file} + _, err = io.Copy(ioutil.Discard, td) + atomic.AddInt64(&atomicDigestOps, 1) + if err != nil { + return blob.Ref{}, err + } + return blob.MustParse(td.Sum()), nil +} + +var noDupSearch, _ = strconv.ParseBool(os.Getenv("CAMLI_NO_FILE_DUP_SEARCH")) + +// fileMapFromDuplicate queries the server's search interface for an +// existing file with an entire contents of sum (a blobref string). +// If the server has it, it's validated, and then fileMap (which must +// already be partially populated) has its "parts" field populated, +// and then fileMap is uploaded (if necessary) and a PutResult with +// its blobref is returned. If there's any problem, or a dup doesn't +// exist, ok is false. +// If required, Vivify is also done here. +func (up *Uploader) fileMapFromDuplicate(bs blobserver.StatReceiver, fileMap *schema.Builder, sum string) (pr *client.PutResult, ok bool) { + if noDupSearch { + return + } + _, err := up.Client.SearchRoot() + if err != nil { + return + } + dupFileRef, err := up.Client.SearchExistingFileSchema(blob.MustParse(sum)) + if err != nil { + log.Printf("Warning: error searching for already-uploaded copy of %s: %v", sum, err) + return nil, false + } + if !dupFileRef.Valid() { + return nil, false + } + if *cmdmain.FlagVerbose { + log.Printf("Found dup of contents %s in file schema %s", sum, dupFileRef) + } + dupMap, err := up.Client.FetchSchemaBlob(dupFileRef) + if err != nil { + log.Printf("Warning: error fetching %v: %v", dupFileRef, err) + return nil, false + } + + fileMap.PopulateParts(dupMap.PartsSize(), dupMap.ByteParts()) + + json, err := fileMap.JSON() + if err != nil { + return nil, false + } + uh := client.NewUploadHandleFromString(json) + if up.fileOpts.wantVivify() { + uh.Vivify = true + } + if !uh.Vivify && uh.BlobRef == dupFileRef { + // Unchanged (same filename, modtime, JSON serialization, etc) + return &client.PutResult{BlobRef: dupFileRef, Size: uint32(len(json)), Skipped: true}, true + } + pr, err = up.Upload(uh) + if err != nil { + log.Printf("Warning: error uploading file map after finding server dup of %v: %v", sum, err) + return nil, false + } + return pr, true +} + +func (up *Uploader) uploadNodeRegularFile(n *node) (*client.PutResult, error) { + var filebb *schema.Builder + if up.fileOpts.contentsOnly { + filebb = schema.NewFileMap("") + } else { + filebb = schema.NewCommonFileMap(n.fullPath, n.fi) + } + filebb.SetType("file") + + up.fdGate.Start() + defer up.fdGate.Done() + + file, err := up.open(n.fullPath) + if err != nil { + return nil, err + } + defer file.Close() + if !up.fileOpts.contentsOnly { + if up.fileOpts.exifTime { + ra, ok := file.(io.ReaderAt) + if !ok { + return nil, errors.New("Error asserting local file to io.ReaderAt") + } + modtime, err := schema.FileTime(ra) + if err != nil { + log.Printf("warning: getting time from EXIF failed for %v: %v", n.fullPath, err) + } else { + filebb.SetModTime(modtime) + } + } + if up.fileOpts.capCtime { + filebb.CapCreationTime() + } + } + + var ( + size = n.fi.Size() + fileContents io.Reader = io.LimitReader(file, size) + br blob.Ref // of file schemaref + sum string // sha1 hashsum of the file to upload + pr *client.PutResult // of the final "file" schema blob + ) + + const dupCheckThreshold = 256 << 10 + if size > dupCheckThreshold { + sumRef, err := up.wholeFileDigest(n.fullPath) + if err == nil { + sum = sumRef.String() + ok := false + pr, ok = up.fileMapFromDuplicate(up.statReceiver(n), filebb, sum) + if ok { + br = pr.BlobRef + android.NoteFileUploaded(n.fullPath, !pr.Skipped) + if up.fileOpts.wantVivify() { + // we can return early in that case, because the other options + // are disallowed in the vivify case. + return pr, nil + } + } + } + } + + if up.fileOpts.wantVivify() { + // If vivify wasn't already done in fileMapFromDuplicate. + err := schema.WriteFileChunks(up.noStatReceiver(up.statReceiver(n)), filebb, fileContents) + if err != nil { + return nil, err + } + json, err := filebb.JSON() + if err != nil { + return nil, err + } + br = blob.SHA1FromString(json) + h := &client.UploadHandle{ + BlobRef: br, + Size: uint32(len(json)), + Contents: strings.NewReader(json), + Vivify: true, + } + pr, err = up.Upload(h) + if err != nil { + return nil, err + } + android.NoteFileUploaded(n.fullPath, true) + return pr, nil + } + + if !br.Valid() { + // br still zero means fileMapFromDuplicate did not find the file on the server, + // and the file has not just been uploaded subsequently to a vivify request. + // So we do the full file + file schema upload here. + if sum == "" && up.fileOpts.wantFilePermanode() { + fileContents = &trackDigestReader{r: fileContents} + } + br, err = schema.WriteFileMap(up.noStatReceiver(up.statReceiver(n)), filebb, fileContents) + if err != nil { + return nil, err + } + } + + // The work for those planned permanodes (and the claims) is redone + // everytime we get here (i.e past the stat cache). However, they're + // caught by the have cache, so they won't be reuploaded for nothing + // at least. + if up.fileOpts.wantFilePermanode() { + if td, ok := fileContents.(*trackDigestReader); ok { + sum = td.Sum() + } + // claimTime is both the time of the "claimDate" in the + // JSON claim, as well as the date in the OpenPGP + // header. + // TODO(bradfitz): this is a little clumsy to do by hand. + // There should probably be a method on *Uploader to do this + // from an unsigned schema map. Maybe ditch the schema.Claimer + // type and just have the Uploader override the claimDate. + claimTime, ok := filebb.ModTime() + if !ok { + return nil, fmt.Errorf("couldn't get modtime for file %v", n.fullPath) + } + err = up.uploadFilePermanode(sum, br, claimTime) + if err != nil { + return nil, fmt.Errorf("Error uploading permanode for node %v: %v", n, err) + } + } + + // TODO(bradfitz): faking a PutResult here to return + // is kinda gross. should instead make a + // blobserver.Storage wrapper type (wrapping + // statReceiver) that can track some of this? or make + // schemaWriteFileMap return it? + json, _ := filebb.JSON() + pr = &client.PutResult{BlobRef: br, Size: uint32(len(json)), Skipped: false} + return pr, nil +} + +// uploadFilePermanode creates and uploads the planned permanode (with sum as a +// fixed key) associated with the file blobref fileRef. +// It also sets the optional tags for this permanode. +func (up *Uploader) uploadFilePermanode(sum string, fileRef blob.Ref, claimTime time.Time) error { + // Use a fixed time value for signing; not using modtime + // so two identical files don't have different modtimes? + // TODO(bradfitz): consider this more? + permaNodeSigTime := time.Unix(0, 0) + permaNode, err := up.UploadPlannedPermanode(sum, permaNodeSigTime) + if err != nil { + return fmt.Errorf("Error uploading planned permanode: %v", err) + } + handleResult("node-permanode", permaNode, nil) + + contentAttr := schema.NewSetAttributeClaim(permaNode.BlobRef, "camliContent", fileRef.String()) + contentAttr.SetClaimDate(claimTime) + signer, err := up.Signer() + if err != nil { + return err + } + signed, err := contentAttr.SignAt(signer, claimTime) + if err != nil { + return fmt.Errorf("Failed to sign content claim: %v", err) + } + put, err := up.uploadString(signed) + if err != nil { + return fmt.Errorf("Error uploading permanode's attribute: %v", err) + } + + handleResult("node-permanode-contentattr", put, nil) + if tags := up.fileOpts.tags(); len(tags) > 0 { + errch := make(chan error) + for _, tag := range tags { + go func(tag string) { + m := schema.NewAddAttributeClaim(permaNode.BlobRef, "tag", tag) + m.SetClaimDate(claimTime) + signed, err := m.SignAt(signer, claimTime) + if err != nil { + errch <- fmt.Errorf("Failed to sign tag claim: %v", err) + return + } + put, err := up.uploadString(signed) + if err != nil { + errch <- fmt.Errorf("Error uploading permanode's tag attribute %v: %v", tag, err) + return + } + handleResult("node-permanode-tag", put, nil) + errch <- nil + }(tag) + } + + for _ = range tags { + if e := <-errch; e != nil && err == nil { + err = e + } + } + if err != nil { + return err + } + } + return nil +} + +func (up *Uploader) UploadFile(filename string) (*client.PutResult, error) { + fullPath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + fi, err := up.lstat(fullPath) + if err != nil { + return nil, err + } + + if fi.IsDir() { + panic("must use UploadTree now for directories") + } + n := &node{ + fullPath: fullPath, + fi: fi, + } + + withPermanode := up.fileOpts.wantFilePermanode() + if up.statCache != nil && !up.fileOpts.wantVivify() { + // Note: ignoring cache hits if wantVivify, otherwise + // a non-vivify put followed by a vivify one wouldn't + // end up doing the vivify. + if cachedRes, err := up.statCache.CachedPutResult( + up.pwd, n.fullPath, n.fi, withPermanode); err == nil { + return cachedRes, nil + } + } + + pr, err := up.uploadNode(n) + if err == nil && up.statCache != nil { + up.statCache.AddCachedPutResult( + up.pwd, n.fullPath, n.fi, pr, withPermanode) + } + + return pr, err +} + +// NewTreeUpload returns a TreeUpload. It doesn't begin uploading any files until a +// call to Start +func (up *Uploader) NewTreeUpload(dir string) *TreeUpload { + tu := up.NewRootlessTreeUpload() + tu.rootless = false + tu.base = dir + return tu +} + +func (up *Uploader) NewRootlessTreeUpload() *TreeUpload { + return &TreeUpload{ + rootless: true, + base: "", + up: up, + donec: make(chan bool, 1), + errc: make(chan error, 1), + stattedc: make(chan *node, buffered), + } +} + +func (t *TreeUpload) Start() { + go t.run() +} + +type node struct { + tu *TreeUpload // nil if not doing a tree upload + fullPath string + fi os.FileInfo + children []*node + + // cond (and its &mu Lock) guard err and res. + cond sync.Cond // with L being &mu + mu sync.Mutex + err error + res *client.PutResult + + sumBytes int64 // cached value, if non-zero. also guarded by mu. +} + +func (n *node) String() string { + if n == nil { + return "" + } + return fmt.Sprintf("[node %s, isDir=%v, nchild=%d]", n.fullPath, n.fi.IsDir(), len(n.children)) +} + +func (n *node) SetPutResult(res *client.PutResult, err error) { + n.mu.Lock() + defer n.mu.Unlock() + if res == nil && err == nil { + panic("SetPutResult called with (nil, nil)") + } + if n.res != nil || n.err != nil { + panic("SetPutResult called twice on node " + n.fullPath) + } + n.res, n.err = res, err + n.cond.Signal() +} + +func (n *node) PutResult() (*client.PutResult, error) { + n.mu.Lock() + defer n.mu.Unlock() + for n.err == nil && n.res == nil { + n.cond.Wait() + } + return n.res, n.err +} + +func (n *node) SumBytes() (v int64) { + n.mu.Lock() + defer n.mu.Unlock() + if n.sumBytes != 0 { + return n.sumBytes + } + for _, c := range n.children { + v += c.SumBytes() + } + if n.fi.Mode()&os.ModeType == 0 { + v += n.fi.Size() + } + n.sumBytes = v + return +} + +/* +A TreeUpload holds the state of an ongoing recursive directory tree +upload. Call Wait to get the final result. + +Uploading a directory tree involves several concurrent processes, each +which may involve multiple goroutines: + +1) one process stats all files and walks all directories as fast as possible + to calculate how much total work there will be. this goroutine also + filters out directories to be skipped. (caches, temp files, skipDirs, etc) + + 2) one process works though the files that were discovered and checks + the statcache to see what actually needs to be uploaded. + The statcache is + full path => {last os.FileInfo signature, put result from last time} + and is used to avoid re-reading/digesting the file even locally, + trusting that it's already on the server. + + 3) one process uploads files & metadata. This process checks the "havecache" + to see which blobs are already on the server. For awhile the local havecache + (if configured) and the remote blobserver "stat" RPC are raced to determine + if the local havecache is even faster. If not, it's not consulted. But if the + latency of remote stats is high enough, checking locally is preferred. +*/ +type TreeUpload struct { + // If DiskUsageMode is set true before Start, only + // per-directory disk usage stats are output, like the "du" + // command. + DiskUsageMode bool + + // Immutable: + rootless bool // if true, "base" will be empty. + base string // base directory + up *Uploader + stattedc chan *node // from stat-the-world goroutine to run() + + donec chan bool // closed when run() finishes + err error + errc chan error // with 1 buffer item + + // Owned by run goroutine: + total stats // total bytes on disk + skipped stats // not even tried to upload (trusting stat cache) + uploaded stats // uploaded (even if server said it already had it and bytes weren't sent) + + finalPutRes *client.PutResult // set after run() returns +} + +// Enqueue starts uploading path (a file, directory, etc). +func (t *TreeUpload) Enqueue(path string) { + t.statPath(path, nil) +} + +// fi is optional (will be statted if nil) +func (t *TreeUpload) statPath(fullPath string, fi os.FileInfo) (nod *node, err error) { + defer func() { + if err == nil && nod != nil { + t.stattedc <- nod + } + }() + if t.up.Client.IsIgnoredFile(fullPath) { + return nil, nil + } + if fi == nil { + fi, err = t.up.lstat(fullPath) + if err != nil { + return nil, err + } + } + n := &node{ + tu: t, + fullPath: fullPath, + fi: fi, + } + n.cond.L = &n.mu + + if !fi.IsDir() { + return n, nil + } + f, err := t.up.open(fullPath) + if err != nil { + return nil, err + } + fis, err := f.Readdir(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Sort(byTypeAndName(fis)) + for _, fi := range fis { + depn, err := t.statPath(filepath.Join(fullPath, filepath.Base(fi.Name())), fi) + if err != nil { + return nil, err + } + if depn != nil { + n.children = append(n.children, depn) + } + } + return n, nil +} + +// testHookStatCache, if non-nil, runs first in the checkStatCache worker. +var testHookStatCache func(el interface{}, ok bool) + +func (t *TreeUpload) run() { + defer close(t.donec) + + // Kick off scanning all files, eventually learning the root + // node (which references all its children). + var root *node // nil until received and set in loop below. + rootc := make(chan *node, 1) + if !t.rootless { + go func() { + n, err := t.statPath(t.base, nil) + if err != nil { + log.Fatalf("Error scanning files under %s: %v", t.base, err) + } + close(t.stattedc) + rootc <- n + }() + } + + var lastStat, lastUpload string + dumpStats := func() { + if android.IsChild() { + printAndroidCamputStatus(t) + return + } + statStatus := "" + if root == nil { + statStatus = fmt.Sprintf("last stat: %s", lastStat) + } + blobStats := t.up.Stats() + log.Printf("FILES: Total: %+v Skipped: %+v Uploaded: %+v %s BLOBS: %s Digested: %d last upload: %s", + t.total, t.skipped, t.uploaded, + statStatus, + blobStats.String(), + atomic.LoadInt64(&atomicDigestOps), + lastUpload) + } + + // Channels for stats & progress bars. These are never closed: + uploadedc := make(chan *node) // at least tried to upload; server might have had blob + skippedc := make(chan *node) // didn't even hit blobserver; trusted our stat cache + + uploadsdonec := make(chan bool) + var upload chan<- interface{} + withPermanode := t.up.fileOpts.wantFilePermanode() + if t.DiskUsageMode { + upload = chanworker.NewWorker(1, func(el interface{}, ok bool) { + if !ok { + uploadsdonec <- true + return + } + n := el.(*node) + if n.fi.IsDir() { + fmt.Printf("%d\t%s\n", n.SumBytes()>>10, n.fullPath) + } + }) + } else { + dirUpload := chanworker.NewWorker(dirUploadWorkers, func(el interface{}, ok bool) { + if !ok { + log.Printf("done uploading directories - done with all uploads.") + uploadsdonec <- true + return + } + n := el.(*node) + put, err := t.up.uploadNode(n) + if err != nil { + log.Fatalf("Error uploading %s: %v", n.fullPath, err) + } + n.SetPutResult(put, nil) + uploadedc <- n + }) + + upload = chanworker.NewWorker(uploadWorkers, func(el interface{}, ok bool) { + if !ok { + log.Printf("done with all uploads.") + close(dirUpload) + return + } + n := el.(*node) + if n.fi.IsDir() { + dirUpload <- n + return + } + put, err := t.up.uploadNode(n) + if err != nil { + log.Fatalf("Error uploading %s: %v", n.fullPath, err) + } + n.SetPutResult(put, nil) + if c := t.up.statCache; c != nil { + c.AddCachedPutResult( + t.up.pwd, n.fullPath, n.fi, put, withPermanode) + } + uploadedc <- n + }) + } + + checkStatCache := chanworker.NewWorker(statCacheWorkers, func(el interface{}, ok bool) { + if hook := testHookStatCache; hook != nil { + hook(el, ok) + } + if !ok { + if t.up.statCache != nil { + log.Printf("done checking stat cache") + } + close(upload) + return + } + n := el.(*node) + if t.DiskUsageMode || t.up.statCache == nil { + upload <- n + return + } + if !n.fi.IsDir() { + cachedRes, err := t.up.statCache.CachedPutResult( + t.up.pwd, n.fullPath, n.fi, withPermanode) + if err == nil { + n.SetPutResult(cachedRes, nil) + cachelog.Printf("Cache HIT on %q -> %v", n.fullPath, cachedRes) + android.NoteFileUploaded(n.fullPath, false) + skippedc <- n + return + } + } + upload <- n + }) + + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + + stattedc := t.stattedc +Loop: + for { + select { + case <-uploadsdonec: + break Loop + case n := <-rootc: + root = n + case n := <-uploadedc: + t.uploaded.incr(n) + lastUpload = n.fullPath + case n := <-skippedc: + t.skipped.incr(n) + case n, ok := <-stattedc: + if !ok { + log.Printf("done statting:") + dumpStats() + close(checkStatCache) + stattedc = nil + continue + } + lastStat = n.fullPath + t.total.incr(n) + checkStatCache <- n + case <-ticker.C: + dumpStats() + } + } + + log.Printf("tree upload finished. final stats:") + dumpStats() + + if root == nil { + panic("unexpected nil root node") + } + var err error + log.Printf("Waiting on root node %q", root.fullPath) + t.finalPutRes, err = root.PutResult() + log.Printf("Waited on root node %q: %v", root.fullPath, t.finalPutRes) + if err != nil { + t.err = err + } +} + +func (t *TreeUpload) Wait() (*client.PutResult, error) { + <-t.donec + // If an error is waiting and we don't otherwise have one, use it: + if t.err == nil { + select { + case t.err = <-t.errc: + default: + } + } + if t.err == nil && t.finalPutRes == nil { + panic("Nothing ever set t.finalPutRes, but no error set") + } + return t.finalPutRes, t.err +} + +type byTypeAndName []os.FileInfo + +func (s byTypeAndName) Len() int { return len(s) } +func (s byTypeAndName) Less(i, j int) bool { + // files go before directories + if s[i].IsDir() { + if !s[j].IsDir() { + return false + } + } else if s[j].IsDir() { + return true + } + return s[i].Name() < s[j].Name() +} +func (s byTypeAndName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// trackDigestReader is an io.Reader wrapper which records the digest of what it reads. +type trackDigestReader struct { + r io.Reader + h hash.Hash +} + +func (t *trackDigestReader) Read(p []byte) (n int, err error) { + if t.h == nil { + t.h = sha1.New() + } + n, err = t.r.Read(p) + t.h.Write(p[:n]) + return +} + +func (t *trackDigestReader) Sum() string { + return fmt.Sprintf("sha1-%x", t.h.Sum(nil)) +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/init.go b/vendor/github.com/camlistore/camlistore/cmd/camput/init.go new file mode 100644 index 00000000..1460a946 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/init.go @@ -0,0 +1,259 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + "path/filepath" + "strings" + + "camlistore.org/pkg/auth" + "camlistore.org/pkg/blob" + "camlistore.org/pkg/client" + "camlistore.org/pkg/client/android" + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/jsonsign" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/types/clientconfig" +) + +type initCmd struct { + newKey bool // whether to create a new GPG ring and key. + noconfig bool // whether to generate a client config file. + keyId string // GPG key ID to use. + secretRing string // GPG secret ring file to use. + userPass string // username and password to use when asking a server for the config. + insecureTLS bool // TLS certificate verification disabled +} + +func init() { + cmdmain.RegisterCommand("init", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(initCmd) + flags.BoolVar(&cmd.newKey, "newkey", false, + "Automatically generate a new identity in a new secret ring at the default location (~/.config/camlistore/identity-secring.gpg on linux).") + flags.StringVar(&cmd.keyId, "gpgkey", "", "GPG key ID to use for signing (overrides $GPGKEY environment)") + flags.BoolVar(&cmd.noconfig, "noconfig", false, "Stop after creating the public key blob, and do not try and create a config file.") + flags.StringVar(&cmd.userPass, "userpass", "", "username:password to use when asking a server for a client configuration. Requires --server global option.") + flags.BoolVar(&cmd.insecureTLS, "insecure", false, "If set, when getting configuration from a server (with --server and --userpass) over TLS, the server's certificate verification is disabled. Needed when the server is using a self-signed certificate.") + return cmd + }) +} + +func (c *initCmd) Describe() string { + return "Initialize the camput configuration file. With no option, it tries to use the GPG key found in the default identity secret ring." +} + +func (c *initCmd) Usage() { + usage := "Usage: camput [--server host] init [opts]\n\nExamples:\n" + for _, v := range c.usageExamples() { + usage += v + "\n" + } + fmt.Fprintf(cmdmain.Stderr, usage) +} + +func (c *initCmd) usageExamples() []string { + var examples []string + for _, v := range c.Examples() { + examples = append(examples, "camput init "+v) + } + return append(examples, + "camput --server=https://localhost:3179 init --userpass=foo:bar --insecure=true") +} + +func (c *initCmd) Examples() []string { + // TODO(mpl): I can't add the correct -userpass example to that list, because + // it requires the global --server flag, which has to be passed before the + // "init" subcommand. We should have a way to override that. + // Or I could just add a -server flag to the init subcommand, but it sounds + // like a lame hack. + return []string{ + "", + "--gpgkey=XXXXX", + "--newkey #Creates a new identity", + } +} + +// initSecretRing sets c.secretRing. It tries, in this order, the --secret-keyring flag, +// the CAMLI_SECRET_RING env var, then defaults to the operating system dependent location +// otherwise. +// It returns an error if the file does not exist. +func (c *initCmd) initSecretRing() error { + if secretRing, ok := osutil.ExplicitSecretRingFile(); ok { + c.secretRing = secretRing + } else { + if android.OnAndroid() { + panic("on android, so CAMLI_SECRET_RING should have been defined, or --secret-keyring used.") + } + c.secretRing = osutil.SecretRingFile() + } + if _, err := os.Stat(c.secretRing); err != nil { + hint := "\nA GPG key is required, please use 'camput init --newkey'.\n\nOr if you know what you're doing, you can set the global camput flag --secret-keyring, or the CAMLI_SECRET_RING env var, to use your own GPG ring. And --gpgkey= or GPGKEY to select which key ID to use." + return fmt.Errorf("Could not use secret ring file %v: %v.\n%v", c.secretRing, err, hint) + } + return nil +} + +// initKeyId sets c.keyId. It checks, in this order, the --gpgkey flag, the GPGKEY env var, +// and in the default identity secret ring. +func (c *initCmd) initKeyId() error { + if k := c.keyId; k != "" { + return nil + } + if k := os.Getenv("GPGKEY"); k != "" { + c.keyId = k + return nil + } + + k, err := jsonsign.KeyIdFromRing(c.secretRing) + if err != nil { + hint := "You can set --gpgkey= or the GPGKEY env var to select which key ID to use.\n" + return fmt.Errorf("No suitable gpg key was found in %v: %v.\n%v", c.secretRing, err, hint) + } + c.keyId = k + log.Printf("Re-using identity with keyId %q found in file %s", c.keyId, c.secretRing) + return nil +} + +func (c *initCmd) getPublicKeyArmored() ([]byte, error) { + entity, err := jsonsign.EntityFromSecring(c.keyId, c.secretRing) + if err != nil { + return nil, fmt.Errorf("Could not find keyId %v in ring %v: %v", c.keyId, c.secretRing, err) + } + pubArmor, err := jsonsign.ArmoredPublicKey(entity) + if err != nil { + return nil, fmt.Errorf("failed to export armored public key ID %q from %v: %v", c.keyId, c.secretRing, err) + } + return []byte(pubArmor), nil +} + +func (c *initCmd) clientConfigFromServer() (*clientconfig.Config, error) { + if c.noconfig { + log.Print("--userpass and --noconfig are mutually exclusive") + return nil, cmdmain.ErrUsage + } + server := client.ExplicitServer() + if server == "" { + log.Print("--userpass requires --server") + return nil, cmdmain.ErrUsage + } + fields := strings.Split(c.userPass, ":") + if len(fields) != 2 { + log.Printf("wrong userpass; wanted username:password, got %q", c.userPass) + return nil, cmdmain.ErrUsage + } + + cl := client.NewFromParams(server, auth.NewBasicAuth(fields[0], fields[1])) + cl.InsecureTLS = c.insecureTLS + cl.SetHTTPClient(&http.Client{Transport: cl.TransportForConfig(nil)}) + var cc clientconfig.Config + + helpRoot, err := cl.HelpRoot() + if err != nil { + return nil, err + } + + if err := cl.GetJSON(helpRoot+"?clientConfig=true", &cc); err != nil { + return nil, err + } + return &cc, nil +} + +func (c *initCmd) writeConfig(cc *clientconfig.Config) error { + configFilePath := osutil.UserClientConfigPath() + if _, err := os.Stat(configFilePath); err == nil { + return fmt.Errorf("Config file %q already exists; quitting without touching it.", configFilePath) + } + if err := os.MkdirAll(filepath.Dir(configFilePath), 0700); err != nil { + return err + } + + jsonBytes, err := json.MarshalIndent(cc, "", " ") + if err != nil { + log.Fatalf("JSON serialization error: %v", err) + } + if err := ioutil.WriteFile(configFilePath, jsonBytes, 0600); err != nil { + return fmt.Errorf("could not write client config file %v: %v", configFilePath, err) + } + log.Printf("Wrote %q; modify as necessary.", configFilePath) + return nil + +} + +func (c *initCmd) RunCommand(args []string) error { + if len(args) > 0 { + return cmdmain.ErrUsage + } + + if c.newKey && c.keyId != "" { + log.Fatal("--newkey and --gpgkey are mutually exclusive") + } + + if c.userPass != "" { + cc, err := c.clientConfigFromServer() + if err != nil { + return err + } + return c.writeConfig(cc) + } + + var err error + if c.newKey { + c.secretRing = osutil.DefaultSecretRingFile() + c.keyId, err = jsonsign.GenerateNewSecRing(c.secretRing) + if err != nil { + return err + } + } else { + if err := c.initSecretRing(); err != nil { + return err + } + if err := c.initKeyId(); err != nil { + return err + } + } + + pubArmor, err := c.getPublicKeyArmored() + if err != nil { + return err + } + + bref := blob.SHA1FromString(string(pubArmor)) + + log.Printf("Your Camlistore identity (your GPG public key's blobref) is: %s", bref.String()) + + if c.noconfig { + return nil + } + + return c.writeConfig(&clientconfig.Config{ + Servers: map[string]*clientconfig.Server{ + "localhost": { + Server: "http://localhost:3179", + IsDefault: true, + Auth: "localhost", + }, + }, + Identity: c.keyId, + IgnoredFiles: []string{".DS_Store"}, + }) +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/kvcache.go b/vendor/github.com/camlistore/camlistore/cmd/camput/kvcache.go new file mode 100644 index 00000000..15a1b1d1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/kvcache.go @@ -0,0 +1,398 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + "log" + "net/url" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/client" + "camlistore.org/pkg/kvutil" + "camlistore.org/pkg/osutil" + "camlistore.org/third_party/github.com/cznic/kv" +) + +var errCacheMiss = errors.New("not in cache") + +// KvHaveCache is a HaveCache on top of a single +// mutable database file on disk using github.com/cznic/kv. +// It stores the blobref in binary as the key, and +// the blobsize in binary as the value. +// Access to the cache is restricted to one process +// at a time with a lock file. Close should be called +// to remove the lock. +type KvHaveCache struct { + filename string + db *kv.DB +} + +func NewKvHaveCache(gen string) *KvHaveCache { + cleanCacheDir() + fullPath := filepath.Join(osutil.CacheDir(), "camput.havecache."+escapeGen(gen)+".kv") + db, err := kvutil.Open(fullPath, nil) + if err != nil { + log.Fatalf("Could not create/open new have cache at %v, %v", fullPath, err) + } + return &KvHaveCache{ + filename: fullPath, + db: db, + } +} + +// Close should be called to commit all the writes +// to the db and to unlock the file. +func (c *KvHaveCache) Close() error { + return c.db.Close() +} + +func (c *KvHaveCache) StatBlobCache(br blob.Ref) (size uint32, ok bool) { + if !br.Valid() { + return + } + binBr, _ := br.MarshalBinary() + binVal, err := c.db.Get(nil, binBr) + if err != nil { + log.Fatalf("Could not query have cache %v for %v: %v", c.filename, br, err) + } + if binVal == nil { + cachelog.Printf("have cache MISS on %v", br) + return + } + val, err := strconv.ParseUint(string(binVal), 10, 32) + if err != nil { + log.Fatalf("Could not decode have cache binary value for %v: %v", br, err) + } + if val < 0 { + log.Fatalf("Error decoding have cache binary value for %v: size=%d", br, val) + } + cachelog.Printf("have cache HIT on %v", br) + return uint32(val), true +} + +func (c *KvHaveCache) NoteBlobExists(br blob.Ref, size uint32) { + if !br.Valid() { + return + } + if size < 0 { + log.Fatalf("Got a negative blob size to note in have cache for %v", br) + } + binBr, _ := br.MarshalBinary() + binVal := []byte(strconv.Itoa(int(size))) + cachelog.Printf("Adding to have cache %v: %q", br, binVal) + _, _, err := c.db.Put(nil, binBr, + func(binBr, old []byte) ([]byte, bool, error) { + // We do not overwrite dups + if old != nil { + return nil, false, nil + } + return binVal, true, nil + }) + if err != nil { + log.Fatalf("Could not write %v in have cache: %v", br, err) + } +} + +// KvStatCache is an UploadCache on top of a single +// mutable database file on disk using github.com/cznic/kv. +// It stores a binary combination of an os.FileInfo fingerprint and +// a client.Putresult as the key, and the blobsize in binary as +// the value. +// Access to the cache is restricted to one process +// at a time with a lock file. Close should be called +// to remove the lock. +type KvStatCache struct { + filename string + db *kv.DB +} + +func NewKvStatCache(gen string) *KvStatCache { + fullPath := filepath.Join(osutil.CacheDir(), "camput.statcache."+escapeGen(gen)+".kv") + db, err := kvutil.Open(fullPath, nil) + if err != nil { + log.Fatalf("Could not create/open new stat cache at %v, %v", fullPath, err) + } + return &KvStatCache{ + filename: fullPath, + db: db, + } +} + +// Close should be called to commit all the writes +// to the db and to unlock the file. +func (c *KvStatCache) Close() error { + return c.db.Close() +} + +func (c *KvStatCache) CachedPutResult(pwd, filename string, fi os.FileInfo, withPermanode bool) (*client.PutResult, error) { + fullPath := fullpath(pwd, filename) + cacheKey := &statCacheKey{ + Filepath: fullPath, + Permanode: withPermanode, + } + binKey, err := cacheKey.marshalBinary() + binVal, err := c.db.Get(nil, binKey) + if err != nil { + log.Fatalf("Could not query stat cache %v for %q: %v", binKey, fullPath, err) + } + if binVal == nil { + cachelog.Printf("stat cache MISS on %q", binKey) + return nil, errCacheMiss + } + val := &statCacheValue{} + if err = val.unmarshalBinary(binVal); err != nil { + return nil, fmt.Errorf("Bogus stat cached value for %q: %v", binKey, err) + } + fp := fileInfoToFingerprint(fi) + if val.Fingerprint != fp { + cachelog.Printf("cache MISS on %q: stats not equal:\n%#v\n%#v", binKey, val.Fingerprint, fp) + return nil, errCacheMiss + } + cachelog.Printf("stat cache HIT on %q", binKey) + return &val.Result, nil +} + +func (c *KvStatCache) AddCachedPutResult(pwd, filename string, fi os.FileInfo, pr *client.PutResult, withPermanode bool) { + fullPath := fullpath(pwd, filename) + cacheKey := &statCacheKey{ + Filepath: fullPath, + Permanode: withPermanode, + } + val := &statCacheValue{fileInfoToFingerprint(fi), *pr} + + binKey, err := cacheKey.marshalBinary() + if err != nil { + log.Fatalf("Could not add %q to stat cache: %v", binKey, err) + } + binVal, err := val.marshalBinary() + if err != nil { + log.Fatalf("Could not add %q to stat cache: %v", binKey, err) + } + cachelog.Printf("Adding to stat cache %q: %q", binKey, binVal) + _, _, err = c.db.Put(nil, binKey, + func(binKey, old []byte) ([]byte, bool, error) { + // We do not overwrite dups + if old != nil { + return nil, false, nil + } + return binVal, true, nil + }) + if err != nil { + log.Fatalf("Could not add %q to stat cache: %v", binKey, err) + } +} + +type statCacheKey struct { + Filepath string + Permanode bool // whether -filenodes is being used. +} + +// marshalBinary returns a more compact binary +// representation of the contents of sk. +func (sk *statCacheKey) marshalBinary() ([]byte, error) { + if sk == nil { + return nil, errors.New("Can not marshal from a nil stat cache key") + } + data := make([]byte, 0, len(sk.Filepath)+3) + data = append(data, 1) // version number + data = append(data, sk.Filepath...) + data = append(data, '|') + if sk.Permanode { + data = append(data, 1) + } + return data, nil +} + +type statFingerprint string + +type statCacheValue struct { + Fingerprint statFingerprint + Result client.PutResult +} + +// marshalBinary returns a more compact binary +// representation of the contents of scv. +func (scv *statCacheValue) marshalBinary() ([]byte, error) { + if scv == nil { + return nil, errors.New("Can not marshal from a nil stat cache value") + } + binBr, _ := scv.Result.BlobRef.MarshalBinary() + // Blob size fits on 4 bytes when binary encoded + data := make([]byte, 0, len(scv.Fingerprint)+1+4+1+len(binBr)) + buf := bytes.NewBuffer(data) + _, err := buf.WriteString(string(scv.Fingerprint)) + if err != nil { + return nil, fmt.Errorf("Could not write fingerprint %v: %v", scv.Fingerprint, err) + } + err = buf.WriteByte('|') + if err != nil { + return nil, fmt.Errorf("Could not write '|': %v", err) + } + err = binary.Write(buf, binary.BigEndian, int32(scv.Result.Size)) + if err != nil { + return nil, fmt.Errorf("Could not write blob size %d: %v", scv.Result.Size, err) + } + err = buf.WriteByte('|') + if err != nil { + return nil, fmt.Errorf("Could not write '|': %v", err) + } + _, err = buf.Write(binBr) + if err != nil { + return nil, fmt.Errorf("Could not write binary blobref %q: %v", binBr, err) + } + return buf.Bytes(), nil +} + +var pipe = []byte("|") + +func (scv *statCacheValue) unmarshalBinary(data []byte) error { + if scv == nil { + return errors.New("Can't unmarshalBinary into a nil stat cache value") + } + if scv.Fingerprint != "" { + return errors.New("Can't unmarshalBinary into a non empty stat cache value") + } + + parts := bytes.SplitN(data, pipe, 3) + if len(parts) != 3 { + return fmt.Errorf("Bogus stat cache value; was expecting fingerprint|blobSize|blobRef, got %q", data) + } + fingerprint := string(parts[0]) + buf := bytes.NewReader(parts[1]) + var size int32 + err := binary.Read(buf, binary.BigEndian, &size) + if err != nil { + return fmt.Errorf("Could not decode blob size from stat cache value part %q: %v", parts[1], err) + } + br := new(blob.Ref) + if err := br.UnmarshalBinary(parts[2]); err != nil { + return fmt.Errorf("Could not unmarshalBinary for %q: %v", parts[2], err) + } + + scv.Fingerprint = statFingerprint(fingerprint) + scv.Result = client.PutResult{ + BlobRef: *br, + Size: uint32(size), + Skipped: true, + } + return nil +} + +func fullpath(pwd, filename string) string { + var fullPath string + if filepath.IsAbs(filename) { + fullPath = filepath.Clean(filename) + } else { + fullPath = filepath.Join(pwd, filename) + } + return fullPath +} + +func escapeGen(gen string) string { + // Good enough: + return url.QueryEscape(gen) +} + +var cleanSysStat func(v interface{}) interface{} + +func fileInfoToFingerprint(fi os.FileInfo) statFingerprint { + // We calculate the CRC32 of the underlying system stat structure to get + // ctime, owner, group, etc. This is overkill (e.g. we don't care about + // the inode or device number probably), but works. + sysHash := uint32(0) + if sys := fi.Sys(); sys != nil { + if clean := cleanSysStat; clean != nil { + // TODO: don't clean bad fields, but provide a + // portable way to extract all good fields. + // This is a Linux+Mac-specific hack for now. + sys = clean(sys) + } + c32 := crc32.NewIEEE() + fmt.Fprintf(c32, "%#v", sys) + sysHash = c32.Sum32() + } + return statFingerprint(fmt.Sprintf("%dB/%dMOD/sys-%d", fi.Size(), fi.ModTime().UnixNano(), sysHash)) +} + +// Delete stranded lock files and all but the oldest 5 +// havecache/statcache files, unless they're newer than 30 days. +func cleanCacheDir() { + dir := osutil.CacheDir() + f, err := os.Open(dir) + if err != nil { + return + } + defer f.Close() + fis, err := f.Readdir(-1) + if err != nil { + return + } + var haveCache, statCache []os.FileInfo + seen := make(map[string]bool) + for _, fi := range fis { + seen[fi.Name()] = true + } + + for name := range seen { + if strings.HasSuffix(name, ".lock") && !seen[strings.TrimSuffix(name, ".lock")] { + os.Remove(filepath.Join(dir, name)) + } + } + + for _, fi := range fis { + if strings.HasSuffix(fi.Name(), ".lock") { + continue + } + if strings.HasPrefix(fi.Name(), "camput.havecache.") { + haveCache = append(haveCache, fi) + continue + } + if strings.HasPrefix(fi.Name(), "camput.statcache.") { + statCache = append(statCache, fi) + continue + } + } + for _, list := range [][]os.FileInfo{haveCache, statCache} { + if len(list) <= 5 { + continue + } + sort.Sort(byModtime(list)) + list = list[:len(list)-5] + for _, fi := range list { + if fi.ModTime().Before(time.Now().Add(-30 * 24 * time.Hour)) { + os.Remove(filepath.Join(dir, fi.Name())) + } + } + } +} + +type byModtime []os.FileInfo + +func (s byModtime) Len() int { return len(s) } +func (s byModtime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byModtime) Less(i, j int) bool { return s[i].ModTime().Before(s[j].ModTime()) } diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/logging.go b/vendor/github.com/camlistore/camlistore/cmd/camput/logging.go new file mode 100644 index 00000000..fa194736 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/logging.go @@ -0,0 +1,42 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "log" + + "camlistore.org/pkg/cmdmain" +) + +type Logger interface { + Printf(format string, args ...interface{}) +} + +type flagLogger struct { + flagPtr **bool +} + +var flagCacheLog *bool + +var vlog = &flagLogger{&cmdmain.FlagVerbose} +var cachelog = &flagLogger{&flagCacheLog} + +func (fl *flagLogger) Printf(format string, args ...interface{}) { + if fl.flagPtr != nil && **fl.flagPtr { + log.Printf(format, args...) + } +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/permanode.go b/vendor/github.com/camlistore/camlistore/cmd/camput/permanode.go new file mode 100644 index 00000000..0be79e68 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/permanode.go @@ -0,0 +1,106 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "errors" + "flag" + "fmt" + "strings" + "time" + + "camlistore.org/pkg/client" + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/schema" +) + +type permanodeCmd struct { + title string + tag string + key string // else random + sigTime string +} + +func init() { + cmdmain.RegisterCommand("permanode", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(permanodeCmd) + flags.StringVar(&cmd.title, "title", "", "Optional 'title' attribute to set on new permanode") + flags.StringVar(&cmd.tag, "tag", "", "Optional tag(s) to set on new permanode; comma separated.") + flags.StringVar(&cmd.key, "key", "", "Optional key to create deterministic ('planned') permanodes. Must also use --sigtime.") + flags.StringVar(&cmd.sigTime, "sigtime", "", "Optional time to put in the OpenPGP signature packet instead of the current time. Required when producing a deterministic permanode (with --key). In format YYYY-MM-DD HH:MM:SS") + return cmd + }) +} + +func (c *permanodeCmd) Describe() string { + return "Create and upload a permanode." +} + +func (c *permanodeCmd) Usage() { + cmdmain.Errorf("Usage: camput [globalopts] permanode [permanodeopts]\n") +} + +func (c *permanodeCmd) Examples() []string { + return []string{ + " (create a new permanode)", + `-title="Some Title" -tag=foo,bar (with attributes added)`, + } +} + +func (c *permanodeCmd) RunCommand(args []string) error { + if len(args) > 0 { + return errors.New("Permanode command doesn't take any additional arguments") + } + + var ( + permaNode *client.PutResult + err error + up = getUploader() + ) + if (c.key != "") != (c.sigTime != "") { + return errors.New("Both --key and --sigtime must be used to produce deterministic permanodes.") + } + if c.key == "" { + // Normal case, with a random permanode. + permaNode, err = up.UploadNewPermanode() + } else { + const format = "2006-01-02 15:04:05" + sigTime, err := time.Parse(format, c.sigTime) + if err != nil { + return fmt.Errorf("Error parsing time %q; expecting time of form %q", c.sigTime, format) + } + permaNode, err = up.UploadPlannedPermanode(c.key, sigTime) + } + if handleResult("permanode", permaNode, err) != nil { + return err + } + + if c.title != "" { + put, err := up.UploadAndSignBlob(schema.NewSetAttributeClaim(permaNode.BlobRef, "title", c.title)) + handleResult("claim-permanode-title", put, err) + } + if c.tag != "" { + tags := strings.Split(c.tag, ",") + m := schema.NewSetAttributeClaim(permaNode.BlobRef, "tag", tags[0]) + for _, tag := range tags { + m = schema.NewAddAttributeClaim(permaNode.BlobRef, "tag", tag) + put, err := up.UploadAndSignBlob(m) + handleResult("claim-permanode-tag", put, err) + } + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/rawobj.go b/vendor/github.com/camlistore/camlistore/cmd/camput/rawobj.go new file mode 100644 index 00000000..559d3315 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/rawobj.go @@ -0,0 +1,82 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "errors" + "flag" + "strings" + + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/schema" +) + +type rawCmd struct { + vals string // pipe separated key=value "camliVersion=1|camliType=foo", etc + signed bool +} + +func init() { + cmdmain.RegisterCommand("rawobj", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(rawCmd) + flags.StringVar(&cmd.vals, "vals", "", "Pipe-separated key=value properties") + flags.BoolVar(&cmd.signed, "signed", true, "whether to sign the JSON object") + return cmd + }) +} + +func (c *rawCmd) Describe() string { + return "Upload a custom JSON schema blob." +} + +func (c *rawCmd) Usage() { + cmdmain.Errorf("Usage: camput [globalopts] rawobj [rawopts]\n") +} + +func (c *rawCmd) Examples() []string { + return []string{"(debug command)"} +} + +func (c *rawCmd) RunCommand(args []string) error { + if len(args) > 0 { + return errors.New("Raw Object command doesn't take any additional arguments") + } + + if c.vals == "" { + return errors.New("No values") + } + + bb := schema.NewBuilder() + for _, kv := range strings.Split(c.vals, "|") { + kv := strings.SplitN(kv, "=", 2) + bb.SetRawStringField(kv[0], kv[1]) + } + + up := getUploader() + if c.signed { + put, err := up.UploadAndSignBlob(bb) + handleResult("raw-object-signed", put, err) + return err + } + cj, err := bb.JSON() + if err != nil { + return err + } + put, err := up.uploadString(cj) + handleResult("raw-object-unsigned", put, err) + return err +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/remove.go b/vendor/github.com/camlistore/camlistore/cmd/camput/remove.go new file mode 100644 index 00000000..f5db0d03 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/remove.go @@ -0,0 +1,56 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/cmdmain" +) + +type removeCmd struct{} + +func init() { + cmdmain.RegisterCommand("remove", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(removeCmd) + return cmd + }) +} + +func (c *removeCmd) Usage() { + fmt.Fprintf(cmdmain.Stderr, `Usage: camput remove + +This command is for debugging only. You're not expected to use it in practice. +`) +} + +func (c *removeCmd) RunCommand(args []string) error { + if len(args) == 0 { + return cmdmain.ErrUsage + } + refs := make([]blob.Ref, 0, len(args)) + for _, s := range args { + br, ok := blob.Parse(s) + if !ok { + return fmt.Errorf("Invalid blobref %q", s) + } + refs = append(refs, br) + } + return getUploader().RemoveBlobs(refs) +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/share.go b/vendor/github.com/camlistore/camlistore/cmd/camput/share.go new file mode 100644 index 00000000..7cf362d4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/share.go @@ -0,0 +1,93 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "flag" + "fmt" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/search" +) + +type shareCmd struct { + search string + transitive bool + duration time.Duration // zero means forever +} + +func init() { + cmdmain.RegisterCommand("share", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(shareCmd) + flags.StringVar(&cmd.search, "search", "", "share a search result, rather than a single blob. Should be the JSON representation of a search.SearchQuery (see https://camlistore.org/pkg/search/#SearchQuery for details). Exclusive with, and overrides the parameter.") + flags.BoolVar(&cmd.transitive, "transitive", false, "share everything reachable from the given blobref") + flags.DurationVar(&cmd.duration, "duration", 0, "how long the share claim is valid for. The default of 0 means forever. For valid formats, see http://golang.org/pkg/time/#ParseDuration") + return cmd + }) +} + +func (c *shareCmd) Describe() string { + return `Grant access to a resource or search by making a "share" blob.` +} + +func (c *shareCmd) Usage() { + fmt.Fprintf(cmdmain.Stderr, `Usage: camput share [opts] [] +`) +} + +func (c *shareCmd) Examples() []string { + return []string{ + "-transitive sha1-83896fcb182db73b653181652129d739280766b5", + `-search='{"expression":"tag:blogphotos is:image","limit":42}'`, + } +} + +func (c *shareCmd) RunCommand(args []string) error { + unsigned := schema.NewShareRef(schema.ShareHaveRef, c.transitive) + + if c.search != "" { + if len(args) != 0 { + return cmdmain.UsageError("when using the -search flag, share takes zero arguments") + } + var q search.SearchQuery + if err := json.Unmarshal([]byte(c.search), &q); err != nil { + return cmdmain.UsageError(fmt.Sprintf("invalid search: %s", err)) + } + unsigned.SetShareSearch(&q) + } else { + if len(args) != 1 { + return cmdmain.UsageError("share takes at most one argument") + } + target, ok := blob.Parse(args[0]) + if !ok { + return cmdmain.UsageError("invalid blobref") + } + unsigned.SetShareTarget(target) + } + + if c.duration != 0 { + unsigned.SetShareExpiration(time.Now().Add(c.duration)) + } + + pr, err := getUploader().UploadAndSignBlob(unsigned) + handleResult("share", pr, err) + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/stat_darwin.go b/vendor/github.com/camlistore/camlistore/cmd/camput/stat_darwin.go new file mode 100644 index 00000000..688b8f64 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/stat_darwin.go @@ -0,0 +1,18 @@ +//+build darwin + +package main + +import ( + "syscall" +) + +func init() { + cleanSysStat = func(si interface{}) interface{} { + st, ok := si.(*syscall.Stat_t) + if !ok { + return si + } + st.Atimespec = syscall.Timespec{} + return st + } +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/stat_linux.go b/vendor/github.com/camlistore/camlistore/cmd/camput/stat_linux.go new file mode 100644 index 00000000..318a1eba --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/stat_linux.go @@ -0,0 +1,22 @@ +//+build linux + +// TODO: move this to somewhere generic in osutil; use it for all +// posix-y operation systems? Or rather, don't clean bad fields, but +// provide a portable way to extract all good fields. + +package main + +import ( + "syscall" +) + +func init() { + cleanSysStat = func(si interface{}) interface{} { + st, ok := si.(*syscall.Stat_t) + if !ok { + return si + } + st.Atim = syscall.Timespec{} + return st + } +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camput/uploader.go b/vendor/github.com/camlistore/camlistore/cmd/camput/uploader.go new file mode 100644 index 00000000..030f479b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camput/uploader.go @@ -0,0 +1,95 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "net/http" + "strings" + + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/client" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/syncutil" +) + +// TODO(mpl): move Uploader to pkg/client, or maybe its own pkg, and clean up files.go + +type Uploader struct { + *client.Client + + // fdGate guards gates the creation of file descriptors. + fdGate *syncutil.Gate + + fileOpts *fileOptions // per-file options; may be nil + + // for debugging; normally nil, but overrides Client if set + // TODO(bradfitz): clean this up? embed a StatReceiver instead + // of a Client? + altStatReceiver blobserver.StatReceiver + + transport *httputil.StatsTransport // for HTTP statistics + pwd string + statCache UploadCache + haveCache HaveCache + + fs http.FileSystem // virtual filesystem to read from; nil means OS filesystem. +} + +// possible options when uploading a file +type fileOptions struct { + permanode bool // create a content-based permanode for each uploaded file + // tag is an optional tag or comma-delimited tags to apply to + // the above permanode. + tag string + // perform for the client the actions needing gpg signing when uploading a file. + vivify bool + exifTime bool // use the time in exif metadata as the modtime if possible. + capCtime bool // use mtime as ctime if ctime > mtime + contentsOnly bool // do not store any of the file's attributes, only its contents. +} + +func (o *fileOptions) tags() []string { + if o == nil || o.tag == "" { + return nil + } + return strings.Split(o.tag, ",") +} + +func (o *fileOptions) wantFilePermanode() bool { + return o != nil && o.permanode +} + +func (o *fileOptions) wantVivify() bool { + return o != nil && o.vivify +} + +func (o *fileOptions) wantCapCtime() bool { + return o != nil && o.capCtime +} + +func (up *Uploader) uploadString(s string) (*client.PutResult, error) { + return up.Upload(client.NewUploadHandleFromString(s)) +} + +func (up *Uploader) Close() error { + var grp syncutil.Group + if up.haveCache != nil { + grp.Go(up.haveCache.Close) + } + grp.Go(up.Client.Close) + return grp.Err() +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/.gitignore b/vendor/github.com/camlistore/camlistore/cmd/camtool/.gitignore new file mode 100644 index 00000000..bb08a8d5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/.gitignore @@ -0,0 +1 @@ +camtool diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/camtool.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/camtool.go new file mode 100644 index 00000000..148dec8a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/camtool.go @@ -0,0 +1,53 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "log" + "net/http" + + "camlistore.org/pkg/client" + "camlistore.org/pkg/cmdmain" +) + +func main() { + cmdmain.Main() +} + +const serverFlagHelp = "Format is is either a URL prefix (with optional path), a host[:port], a config file server alias, or blank to use the Camlistore client config's default server." + +// newClient returns a Camlistore client for the server. +// The server may be: +// * blank, to use the default in the config file +// * an alias, to use that named alias in the config file +// * host:port +// * https?://host[:port][/path] +func newClient(server string) *client.Client { + var cl *client.Client + if server == "" { + cl = client.NewOrFail() + } else { + cl = client.New(server) + if err := cl.SetupAuth(); err != nil { + log.Fatalf("Could not setup auth for connecting to %v: %v", server, err) + } + } + cl.SetHTTPClient(&http.Client{ + Transport: cl.TransportForConfig(nil), + }) + return cl +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/claims.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/claims.go new file mode 100644 index 00000000..09fa4ea6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/claims.go @@ -0,0 +1,79 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "flag" + "fmt" + "os" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/search" +) + +type claimsCmd struct { + server string + attr string +} + +func init() { + cmdmain.RegisterCommand("claims", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(claimsCmd) + flags.StringVar(&cmd.server, "server", "", "Server to fetch claims from. "+serverFlagHelp) + flags.StringVar(&cmd.attr, "attr", "", "Filter claims about a specific attribute. If empty, all claims are returned.") + return cmd + }) +} + +func (c *claimsCmd) Describe() string { + return "Ask the search system to list the claims that modify a permanode." +} + +func (c *claimsCmd) Usage() { + fmt.Fprintf(os.Stderr, "Usage: camtool [globalopts] claims [--depth=n] [--attr=s] permanodeBlobRef\n") +} + +func (c *claimsCmd) Examples() []string { + return []string{} +} + +func (c *claimsCmd) RunCommand(args []string) error { + if len(args) != 1 { + return cmdmain.UsageError("requires 1 blobref") + } + br, ok := blob.Parse(args[0]) + if !ok { + return cmdmain.UsageError("invalid blobref") + } + cl := newClient(c.server) + res, err := cl.GetClaims(&search.ClaimsRequest{ + Permanode: br, + AttrFilter: c.attr, + }) + if err != nil { + return err + } + resj, err := json.MarshalIndent(res, "", " ") + if err != nil { + return err + } + resj = append(resj, '\n') + _, err = os.Stdout.Write(resj) + return err +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/dbinit.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/dbinit.go new file mode 100644 index 00000000..61ca30ca --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/dbinit.go @@ -0,0 +1,300 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "database/sql" + "errors" + "flag" + "fmt" + "os" + "strings" + + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/sorted/mongo" + "camlistore.org/pkg/sorted/mysql" + "camlistore.org/pkg/sorted/postgres" + "camlistore.org/pkg/sorted/sqlite" + + _ "camlistore.org/third_party/github.com/go-sql-driver/mysql" + _ "camlistore.org/third_party/github.com/lib/pq" + "camlistore.org/third_party/labix.org/v2/mgo" +) + +type dbinitCmd struct { + user string + password string + host string + dbName string + dbType string + sslMode string // Postgres SSL mode configuration + + wipe bool + keep bool + wal bool // Write-Ahead Logging for SQLite +} + +func init() { + cmdmain.RegisterCommand("dbinit", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(dbinitCmd) + flags.StringVar(&cmd.user, "user", "root", "Admin user.") + flags.StringVar(&cmd.password, "password", "", "Admin password.") + flags.StringVar(&cmd.host, "host", "localhost", "host[:port]") + flags.StringVar(&cmd.dbName, "dbname", "", "Database to wipe or create. For sqlite, this is the db filename.") + flags.StringVar(&cmd.dbType, "dbtype", "mysql", "Which RDMS to use; possible values: mysql, postgres, sqlite, mongo.") + flags.StringVar(&cmd.sslMode, "sslmode", "require", "Configure SSL mode for postgres. Possible values: require, verify-full, disable.") + + flags.BoolVar(&cmd.wipe, "wipe", false, "Wipe the database and re-create it?") + flags.BoolVar(&cmd.keep, "ignoreexists", false, "Do nothing if database already exists.") + // Defaults to true, because it fixes http://camlistore.org/issues/114 + flags.BoolVar(&cmd.wal, "wal", true, "Enable Write-Ahead Logging with SQLite, for better concurrency. Requires SQLite >= 3.7.0.") + + return cmd + }) +} + +func (c *dbinitCmd) Describe() string { + return "Set up the database for the indexer." +} + +func (c *dbinitCmd) Usage() { + fmt.Fprintf(os.Stderr, "Usage: camtool [globalopts] dbinit [dbinitopts] \n") +} + +func (c *dbinitCmd) Examples() []string { + return []string{ + "-user root -password root -host localhost -dbname camliprod -wipe", + } +} + +func (c *dbinitCmd) RunCommand(args []string) error { + if c.dbName == "" { + return cmdmain.UsageError("--dbname flag required") + } + + if c.dbType != "mysql" && c.dbType != "postgres" && c.dbType != "mongo" { + if c.dbType == "sqlite" { + if !WithSQLite { + return ErrNoSQLite + } + c.wal = c.wal && sqlite.IsWALCapable() + if !c.wal { + fmt.Print("WARNING: An SQLite indexer without Write Ahead Logging will most likely fail. See http://camlistore.org/issues/114\n") + } + } else { + return cmdmain.UsageError(fmt.Sprintf("--dbtype flag: got %v, want %v", c.dbType, `"mysql" or "postgres" or "sqlite", or "mongo"`)) + } + } + + var rootdb *sql.DB + var err error + switch c.dbType { + case "postgres": + conninfo := fmt.Sprintf("user=%s dbname=%s host=%s password=%s sslmode=%s", c.user, "postgres", c.host, c.password, c.sslMode) + rootdb, err = sql.Open("postgres", conninfo) + case "mysql": + rootdb, err = sql.Open("mysql", c.user+":"+c.password+"@/mysql") + } + if err != nil { + exitf("Error connecting to the root %s database: %v", c.dbType, err) + } + + dbname := c.dbName + exists := c.dbExists(rootdb) + if exists { + if c.keep { + return nil + } + if !c.wipe { + return cmdmain.UsageError(fmt.Sprintf("Database %q already exists, but --wipe not given. Stopping.", dbname)) + } + if c.dbType == "mongo" { + return c.wipeMongo() + } + if c.dbType != "sqlite" { + do(rootdb, "DROP DATABASE "+dbname) + } + } + switch c.dbType { + case "sqlite": + _, err := os.Create(dbname) + if err != nil { + exitf("Error creating file %v for sqlite db: %v", dbname, err) + } + case "mongo": + return nil + case "postgres": + // because we want string comparison to work as on MySQL and SQLite. + // in particular we want: 'foo|bar' < 'foo}' (which is not the case with an utf8 collation apparently). + do(rootdb, "CREATE DATABASE "+dbname+" LC_COLLATE = 'C' TEMPLATE = template0") + default: + do(rootdb, "CREATE DATABASE "+dbname) + } + + var db *sql.DB + switch c.dbType { + case "postgres": + conninfo := fmt.Sprintf("user=%s dbname=%s host=%s password=%s sslmode=%s", c.user, dbname, c.host, c.password, c.sslMode) + db, err = sql.Open("postgres", conninfo) + case "sqlite": + db, err = sql.Open("sqlite3", dbname) + default: + db, err = sql.Open("mysql", c.user+":"+c.password+"@/"+dbname) + } + if err != nil { + return fmt.Errorf("Connecting to the %s %s database: %v", dbname, c.dbType, err) + } + + switch c.dbType { + case "postgres": + for _, tableSql := range postgres.SQLCreateTables() { + do(db, tableSql) + } + for _, statement := range postgres.SQLDefineReplace() { + do(db, statement) + } + doQuery(db, fmt.Sprintf(`SELECT replaceintometa('version', '%d')`, postgres.SchemaVersion())) + case "mysql": + if err := mysql.CreateDB(db, dbname); err != nil { + exitf("%v", err) + } + for _, tableSQL := range mysql.SQLCreateTables() { + do(db, tableSQL) + } + do(db, fmt.Sprintf(`REPLACE INTO meta VALUES ('version', '%d')`, mysql.SchemaVersion())) + case "sqlite": + for _, tableSql := range sqlite.SQLCreateTables() { + do(db, tableSql) + } + if c.wal { + do(db, sqlite.EnableWAL()) + } + do(db, fmt.Sprintf(`REPLACE INTO meta VALUES ('version', '%d')`, sqlite.SchemaVersion())) + } + return nil +} + +func do(db *sql.DB, sql string) { + _, err := db.Exec(sql) + if err != nil { + exitf("Error %q running SQL: %q", err, sql) + } +} + +func doQuery(db *sql.DB, sql string) { + r, err := db.Query(sql) + if err == nil { + r.Close() + return + } + exitf("Error %q running SQL: %q", err, sql) +} + +func (c *dbinitCmd) dbExists(db *sql.DB) bool { + query := "SHOW DATABASES" + switch c.dbType { + case "postgres": + query = "SELECT datname FROM pg_database" + case "mysql": + query = "SHOW DATABASES" + case "sqlite": + // There is no point in using sql.Open because it apparently does + // not return an error when the file does not exist. + fi, err := os.Stat(c.dbName) + return err == nil && fi.Size() > 0 + case "mongo": + session, err := c.mongoSession() + if err != nil { + exitf("%v", err) + } + defer session.Close() + n, err := session.DB(c.dbName).C(mongo.CollectionName).Find(nil).Limit(1).Count() + if err != nil { + exitf("%v", err) + } + return n != 0 + } + rows, err := db.Query(query) + check(err) + defer rows.Close() + for rows.Next() { + var db string + check(rows.Scan(&db)) + if db == c.dbName { + return true + } + } + return false +} + +func check(err error) { + if err == nil { + return + } + exitf("SQL error: %v", err) +} + +func exitf(format string, args ...interface{}) { + if !strings.HasSuffix(format, "\n") { + format = format + "\n" + } + cmdmain.Errorf(format, args...) + cmdmain.Exit(1) +} + +var WithSQLite = false + +var ErrNoSQLite = errors.New("the command was not built with SQLite support. See https://code.google.com/p/camlistore/wiki/SQLite" + compileHint()) + +func compileHint() string { + if _, err := os.Stat("/etc/apt"); err == nil { + return " (Required: apt-get install libsqlite3-dev)" + } + return "" +} + +// mongoSession returns an *mgo.Session or nil if c.dbtype is +// not "mongo" or if there was an error. +func (c *dbinitCmd) mongoSession() (*mgo.Session, error) { + if c.dbType != "mongo" { + return nil, nil + } + url := "" + if c.user == "" || c.password == "" { + url = c.host + } else { + url = c.user + ":" + c.password + "@" + c.host + "/" + c.dbName + } + return mgo.Dial(url) +} + +// wipeMongo erases all documents from the mongo collection +// if c.dbType is "mongo". +func (c *dbinitCmd) wipeMongo() error { + if c.dbType != "mongo" { + return nil + } + session, err := c.mongoSession() + if err != nil { + return err + } + defer session.Close() + if _, err := session.DB(c.dbName).C(mongo.CollectionName).RemoveAll(nil); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/debug.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/debug.go new file mode 100644 index 00000000..583fc419 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/debug.go @@ -0,0 +1,82 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "os" + "strings" + + "camlistore.org/pkg/cmdmain" +) + +var debugSubModes = map[string]*debugSubMode{ + "splits": &debugSubMode{ + doc: "Show splits of provided file.", + fun: showSplits, + }, + "mime": &debugSubMode{ + doc: "Show MIME type of provided file.", + fun: showMIME, + }, + "exif": &debugSubMode{ + doc: "Show EXIF dump of provided file.", + fun: showEXIF, + }, +} + +type debugSubMode struct { + doc string + fun func(string) +} + +type debugCmd struct{} + +func init() { + cmdmain.RegisterCommand("debug", func(flags *flag.FlagSet) cmdmain.CommandRunner { + return new(debugCmd) + }) +} + +func (c *debugCmd) Describe() string { + return "Show misc meta-info from the given file." +} + +func (c *debugCmd) Usage() { + var subModes, docs string + for k, v := range debugSubModes { + subModes += k + "|" + docs += fmt.Sprintf(" %s: %s\n", k, v.doc) + } + subModes = strings.TrimRight(subModes, "|") + fmt.Fprintf(os.Stderr, + "Usage: camtool [globalopts] debug %s file\n%s", + subModes, docs) +} + +func (c *debugCmd) RunCommand(args []string) error { + if args == nil || len(args) != 2 { + return cmdmain.UsageError("Incorrect number of arguments.") + } + subMode, ok := debugSubModes[args[0]] + if !ok { + return cmdmain.UsageError(fmt.Sprintf("Invalid submode: %v", args[0])) + } + subMode.fun(args[1]) + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/describe.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/describe.go new file mode 100644 index 00000000..0cd389e3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/describe.go @@ -0,0 +1,88 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "flag" + "fmt" + "os" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/search" + "camlistore.org/pkg/types" +) + +type desCmd struct { + server string + depth int +} + +func init() { + cmdmain.RegisterCommand("describe", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(desCmd) + flags.StringVar(&cmd.server, "server", "", "Server to query. "+serverFlagHelp) + flags.IntVar(&cmd.depth, "depth", 1, "Depth to follow in describe request") + return cmd + }) +} + +func (c *desCmd) Describe() string { + return "Ask the search system to describe one or more blobs." +} + +func (c *desCmd) Usage() { + fmt.Fprintf(os.Stderr, "Usage: camtool [globalopts] describe [--depth=n] blobref [blobref, blobref...]\n") +} + +func (c *desCmd) Examples() []string { + return []string{} +} + +func (c *desCmd) RunCommand(args []string) error { + if len(args) == 0 { + return cmdmain.UsageError("requires blobref") + } + var blobs []blob.Ref + for _, arg := range args { + br, ok := blob.Parse(arg) + if !ok { + return cmdmain.UsageError(fmt.Sprintf("invalid blobref %q", arg)) + } + blobs = append(blobs, br) + } + var at time.Time // TODO: implement. from "2 days ago" "-2d", "-2h", "2013-02-05", etc + + cl := newClient(c.server) + res, err := cl.Describe(&search.DescribeRequest{ + BlobRefs: blobs, + Depth: c.depth, + At: types.Time3339(at), + }) + if err != nil { + return err + } + resj, err := json.MarshalIndent(res, "", " ") + if err != nil { + return err + } + resj = append(resj, '\n') + _, err = os.Stdout.Write(resj) + return err +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/disco.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/disco.go new file mode 100644 index 00000000..0dead988 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/disco.go @@ -0,0 +1,63 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "io" + "os" + + "camlistore.org/pkg/cmdmain" +) + +type discoCmd struct { + server string +} + +func init() { + cmdmain.RegisterCommand("discovery", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(discoCmd) + flags.StringVar(&cmd.server, "server", "", "Server to do discovery against. "+serverFlagHelp) + return cmd + }) +} + +func (c *discoCmd) Describe() string { + return "Perform configuration discovery against a server." +} + +func (c *discoCmd) Usage() { + fmt.Fprintf(os.Stderr, "Usage: camtool [globalopts] discovery") +} + +func (c *discoCmd) Examples() []string { + return []string{} +} + +func (c *discoCmd) RunCommand(args []string) error { + if len(args) > 0 { + return cmdmain.UsageError("doesn't take args") + } + cl := newClient(c.server) + disco, err := cl.DiscoveryDoc() + if err != nil { + return err + } + _, err = io.Copy(os.Stdout, disco) + return err +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/doc.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/doc.go new file mode 100644 index 00000000..c1183dcf --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/doc.go @@ -0,0 +1,57 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +The camtool binary is a collection of commands to help with the use of +a camlistore server. Notably, it can initialize a database for the +indexer, and it can sync blobs between blobservers. + +Usage: + + camtool [globalopts] [commandopts] [commandargs] + +Modes: + + env: Return Camlistore environment information + googinit: Init Google Drive or Google Cloud Storage. + list: List blobs on a server. + claims: Ask the search system to list the claims that modify a permanode. + dumpconfig: Dump the low-level server config from its simple config. + describe: Ask the search system to describe one or more blobs. + discovery: Perform configuration discovery against a server. + reindex-diskpacked: Rebuild the index of the diskpacked blob store + index: Synchronize blobs for all discovered blobs storage - indexer pairs. + sync: Synchronize blobs from a source to a destination. + dbinit: Set up the database for the indexer. + debug: Show misc meta-info from the given file. + +Examples: + + camtool sync --all + camtool sync --src http://localhost:3179/bs/ --dest http://localhost:3179/index-mem/ + + camtool dbinit -user root -password root -host localhost -dbname camliprod -wipe + +For mode-specific help: + + camtool -help + +Global options: + -help=false: print usage + -verbose=false: extra debug logging + -version=false: show version +*/ +package main diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/dp_idx_rebuild.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/dp_idx_rebuild.go new file mode 100644 index 00000000..45edbf5b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/dp_idx_rebuild.go @@ -0,0 +1,128 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "errors" + "flag" + "fmt" + "log" + "os" + + "camlistore.org/pkg/blobserver/diskpacked" + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/serverinit" +) + +type reindexdpCmd struct { + overwrite, verbose bool +} + +func init() { + cmdmain.RegisterCommand("reindex-diskpacked", + func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(reindexdpCmd) + flags.BoolVar(&cmd.overwrite, "overwrite", false, + "Whether to overwrite the existing index. If false, only check.") + return cmd + }) +} + +func (c *reindexdpCmd) Describe() string { + return "Rebuild the index of the diskpacked blob store" +} + +func (c *reindexdpCmd) Usage() { + fmt.Fprintln(os.Stderr, "Usage: camtool [globalopts] reindex-diskpacked [reindex-opts]") + fmt.Fprintln(os.Stderr, " camtool reindex-diskpacked [--overwrite] # dir from server config") + fmt.Fprintln(os.Stderr, " camtool reindex-diskpacked [--overwrite] /path/to/directory") +} + +func (c *reindexdpCmd) RunCommand(args []string) error { + var path string + var indexConf jsonconfig.Obj + + switch len(args) { + case 0: + case 1: + path = args[0] + default: + return errors.New("More than 1 argument not allowed") + } + cfg, err := serverinit.LoadFile(osutil.UserServerConfigPath()) + if err != nil { + return err + } + prefixes, ok := cfg.Obj["prefixes"].(map[string]interface{}) + if !ok { + return fmt.Errorf("No 'prefixes' object in low-level (or converted) config file %s", osutil.UserServerConfigPath()) + } + paths, confs := []string{}, []jsonconfig.Obj{} + for prefix, vei := range prefixes { + pmap, ok := vei.(map[string]interface{}) + if !ok { + log.Printf("prefix %q value is a %T, not an object", prefix, vei) + continue + } + pconf := jsonconfig.Obj(pmap) + handlerType := pconf.RequiredString("handler") + handlerArgs := pconf.OptionalObject("handlerArgs") + // no pconf.Validate, as this is a recover tool + if handlerType != "storage-diskpacked" { + continue + } + log.Printf("handlerArgs of %q: %v", prefix, handlerArgs) + if handlerArgs == nil { + log.Printf("no handlerArgs for %q", prefix) + continue + } + aconf := jsonconfig.Obj(handlerArgs) + apath := aconf.RequiredString("path") + // no aconv.Validate, as this is a recover tool + if apath == "" { + log.Printf("path is missing for %q", prefix) + continue + } + if path != "" && path != apath { + continue + } + paths = append(paths, apath) + confs = append(confs, aconf) + } + if len(paths) == 0 { + return fmt.Errorf("Server config file %s doesn't specify a disk-packed storage handler.", + osutil.UserServerConfigPath()) + } + if len(paths) > 1 { + return fmt.Errorf("Ambiguity. Server config file %s d specify more than 1 disk-packed storage handler. Please specify one of: %v", osutil.UserServerConfigPath(), paths) + } + path = paths[0] + if path == "" { + return errors.New("no path is given/found") + } + // If no index is specified, the default will be used (as on the regular path). + if mi := confs[0]["metaIndex"]; mi != nil { + if mi, ok := mi.(map[string]interface{}); ok { + indexConf = jsonconfig.Obj(mi) + } + } + log.Printf("indexConf: %v", indexConf) + + return diskpacked.Reindex(path, c.overwrite, indexConf) +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/dumpconfig.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/dumpconfig.go new file mode 100644 index 00000000..c82caada --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/dumpconfig.go @@ -0,0 +1,67 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "errors" + "flag" + "os" + + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/osutil" + _ "camlistore.org/pkg/osutil/gce" + "camlistore.org/pkg/serverinit" +) + +type dumpconfigCmd struct{} + +func init() { + cmdmain.RegisterCommand("dumpconfig", func(flags *flag.FlagSet) cmdmain.CommandRunner { + return new(dumpconfigCmd) + }) +} + +func (c *dumpconfigCmd) Describe() string { + return "Dump the low-level server config from its simple config." +} + +func (c *dumpconfigCmd) Usage() { +} + +func (c *dumpconfigCmd) RunCommand(args []string) error { + var file string + switch { + case len(args) == 0: + file = osutil.UserServerConfigPath() + case len(args) == 1: + file = args[0] + default: + return errors.New("More than 1 argument not allowed") + } + cfg, err := serverinit.LoadFile(file) + if err != nil { + return err + } + cfg.Obj["handlerConfig"] = true + ll, err := json.MarshalIndent(cfg.Obj, "", " ") + if err != nil { + return err + } + _, err = os.Stdout.Write(ll) + return err +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/env.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/env.go new file mode 100644 index 00000000..dc5e015e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/env.go @@ -0,0 +1,77 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "os" + "path/filepath" + + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/osutil" +) + +var envMap = map[string]func() string{ + "configdir": osutil.CamliConfigDir, + "clientconfig": osutil.UserClientConfigPath, + "serverconfig": osutil.UserServerConfigPath, + "camsrcroot": srcRoot, +} + +type envCmd struct{} + +func init() { + cmdmain.RegisterCommand("env", func(flags *flag.FlagSet) cmdmain.CommandRunner { + return new(envCmd) + }) +} + +func (c *envCmd) Describe() string { + return "Return Camlistore environment information" +} + +func (c *envCmd) Usage() { + fmt.Fprintf(os.Stderr, "camtool env [key]\n") +} + +func (c *envCmd) RunCommand(args []string) error { + if len(args) == 0 { + for k, fn := range envMap { + fmt.Printf("%s: %s\n", k, fn()) + } + return nil + } + if len(args) > 1 { + return cmdmain.UsageError("only 0 or 1 arguments allowed") + } + fn := envMap[args[0]] + if fn == nil { + return fmt.Errorf("unknown environment key %q", args[0]) + } + fmt.Println(fn()) + return nil +} + +func srcRoot() string { + for _, dir := range filepath.SplitList(os.Getenv("GOPATH")) { + if d := filepath.Join(dir, "src", "camlistore.org"); osutil.DirExists(d) { + return d + } + } + return "" +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/exif.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/exif.go new file mode 100644 index 00000000..01d14e43 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/exif.go @@ -0,0 +1,49 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "log" + "os" + + "camlistore.org/third_party/github.com/rwcarlsen/goexif/exif" +) + +func showEXIF(file string) { + f, err := os.Open(file) + if err != nil { + panic(err.Error()) + } + defer f.Close() + ex, err := exif.Decode(f) + if err != nil { + if exif.IsCriticalError(err) { + log.Fatalf("exif.Decode, critical error: %v", err) + } + log.Printf("exif.Decode, warning: %v", err) + } + fmt.Printf("%v\n", ex) + if exif.IsExifError(err) { + // the error happened while decoding the EXIF sub-IFD, so as DateTime is + // part of it, we have to assume (until there's a better "decode effort" + // strategy in goexif) that it's not usable. + return + } + ct, err := ex.DateTime() + fmt.Printf("exif.DateTime = %v, %v\n", ct, err) +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/googinit.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/googinit.go new file mode 100644 index 00000000..76128863 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/googinit.go @@ -0,0 +1,131 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bufio" + "encoding/json" + "flag" + "fmt" + "strings" + + "camlistore.org/pkg/blobserver/google/drive" + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/constants/google" + "camlistore.org/pkg/googlestorage" + "camlistore.org/pkg/oauthutil" + + "golang.org/x/oauth2" +) + +type googinitCmd struct { + storageType string +} + +func init() { + cmdmain.RegisterCommand("googinit", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(googinitCmd) + flags.StringVar(&cmd.storageType, "type", "", "Storage type: drive or cloud") + return cmd + }) +} + +func (c *googinitCmd) Describe() string { + return "Init Google Drive or Google Cloud Storage." +} + +func (c *googinitCmd) Usage() { + fmt.Fprintf(cmdmain.Stderr, "Usage: camtool [globalopts] googinit [commandopts] \n") +} + +func (c *googinitCmd) RunCommand(args []string) error { + var ( + err error + clientId string + clientSecret string + oauthConfig *oauth2.Config + ) + + if c.storageType != "drive" && c.storageType != "cloud" { + return cmdmain.UsageError("Invalid storage type: must be drive for Google Drive or cloud for Google Cloud Storage.") + } + + clientId, clientSecret = getClientInfo() + + switch c.storageType { + case "drive": + oauthConfig = &oauth2.Config{ + Scopes: []string{drive.Scope}, + Endpoint: google.Endpoint, + ClientID: clientId, + ClientSecret: clientSecret, + RedirectURL: oauthutil.TitleBarRedirectURL, + } + case "cloud": + oauthConfig = &oauth2.Config{ + Scopes: []string{googlestorage.Scope}, + Endpoint: google.Endpoint, + ClientID: clientId, + ClientSecret: clientSecret, + RedirectURL: oauthutil.TitleBarRedirectURL, + } + } + + token, err := oauth2.ReuseTokenSource(nil, &oauthutil.TokenSource{ + Config: oauthConfig, + AuthCode: func() string { + fmt.Fprintf(cmdmain.Stdout, "Get auth code from:\n\n") + fmt.Fprintf(cmdmain.Stdout, "%v\n\n", oauthConfig.AuthCodeURL("", oauth2.AccessTypeOffline, oauth2.ApprovalForce)) + return prompt("Enter auth code:") + }, + }).Token() + if err != nil { + return fmt.Errorf("could not acquire token: %v", err) + } + + fmt.Fprintf(cmdmain.Stdout, "\nYour Google auth object:\n\n") + enc := json.NewEncoder(cmdmain.Stdout) + authObj := map[string]string{ + "client_id": clientId, + "client_secret": clientSecret, + "refresh_token": token.RefreshToken, + } + enc.Encode(authObj) + fmt.Fprint(cmdmain.Stdout, "\n") + return nil +} + +// Prompt the user for an input line. Return the given input. +func prompt(promptText string) string { + fmt.Fprint(cmdmain.Stdout, promptText) + sc := bufio.NewScanner(cmdmain.Stdin) + sc.Scan() + return strings.TrimSpace(sc.Text()) +} + +// Prompt for client id / secret +func getClientInfo() (string, string) { + fmt.Fprintf(cmdmain.Stdout, "Please provide the client id and client secret \n") + fmt.Fprintf(cmdmain.Stdout, "(You can find these at http://code.google.com/apis/console > your project > API Access)\n") + var ( + clientId string + clientSecret string + ) + clientId = prompt("Client ID:") + clientSecret = prompt("Client Secret:") + return clientId, clientSecret +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/index.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/index.go new file mode 100644 index 00000000..256eaee1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/index.go @@ -0,0 +1,86 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "os" + "strconv" + + "camlistore.org/pkg/client" + "camlistore.org/pkg/cmdmain" +) + +type indexCmd struct { + verbose bool + wipe bool + insecureTLS bool +} + +func init() { + cmdmain.RegisterCommand("index", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(indexCmd) + flags.BoolVar(&cmd.verbose, "verbose", false, "Be verbose.") + flags.BoolVar(&cmd.wipe, "wipe", false, "Erase and recreate all discovered indexes. NOOP for now.") + if debug, _ := strconv.ParseBool(os.Getenv("CAMLI_DEBUG")); debug { + flags.BoolVar(&cmd.insecureTLS, "insecure", false, "If set, when using TLS, the server's certificates verification is disabled, and they are not checked against the trustedCerts in the client configuration either.") + } + return cmd + }) +} + +func (c *indexCmd) Describe() string { + return "Synchronize blobs for all discovered blobs storage - indexer pairs." +} + +func (c *indexCmd) Usage() { + fmt.Fprintf(os.Stderr, "Usage: camtool [globalopts] index [indexopts] \n") +} + +func (c *indexCmd) RunCommand(args []string) error { + dc := c.discoClient() + syncHandlers, err := dc.SyncHandlers() + if err != nil { + return fmt.Errorf("sync handlers discovery failed: %v", err) + } + + for _, sh := range syncHandlers { + if sh.ToIndex { + if err := c.sync(sh.From, sh.To); err != nil { + return fmt.Errorf("Error while indexing from %v to %v: %v", sh.From, sh.To, err) + } + } + } + return nil +} + +func (c *indexCmd) sync(from, to string) error { + return (&syncCmd{ + src: from, + dest: to, + verbose: c.verbose, + wipe: c.wipe, + }).RunCommand(nil) +} + +// discoClient returns a client initialized with a server +// based from the configuration file. The returned client +// can then be used to discover the blobRoot and syncHandlers. +func (c *indexCmd) discoClient() *client.Client { + return newClient("") +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/list.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/list.go new file mode 100644 index 00000000..b159a1d9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/list.go @@ -0,0 +1,166 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bufio" + "flag" + "fmt" + "log" + "os" + "strings" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/client" + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/search" +) + +type listCmd struct { + *syncCmd + + describe bool // whether to describe each blob. + cl *client.Client // client used for the describe requests. +} + +func init() { + cmdmain.RegisterCommand("list", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := &listCmd{ + syncCmd: &syncCmd{ + dest: "stdout", + }, + describe: false, + } + flags.StringVar(&cmd.syncCmd.src, "src", "", "Source blobserver is either a URL prefix (with optional path), a host[:port], a path (starting with /, ./, or ../), or blank to use the Camlistore client config's default host.") + flags.BoolVar(&cmd.verbose, "verbose", false, "Be verbose.") + flags.BoolVar(&cmd.describe, "describe", false, "Use describe requests to get each blob's type. Requires a source server with a search endpoint. Mostly used for demos. Requires many extra round-trips to the server currently.") + return cmd + }) +} + +const describeBatchSize = 50 + +func (c *listCmd) Describe() string { + return "List blobs on a server." +} + +func (c *listCmd) Usage() { + fmt.Fprintf(os.Stderr, "Usage: camtool [globalopts] list [listopts] \n") +} + +func (c *listCmd) Examples() []string { + return nil +} + +func (c *listCmd) RunCommand(args []string) error { + if !c.describe { + return c.syncCmd.RunCommand(args) + } + + stdout := cmdmain.Stdout + defer func() { cmdmain.Stdout = stdout }() + pr, pw, err := os.Pipe() + if err != nil { + return fmt.Errorf("Could not create pipe to read from stdout: %v", err) + } + defer pr.Close() + cmdmain.Stdout = pw + + if err := c.setClient(); err != nil { + return err + } + + scanner := bufio.NewScanner(pr) + go func() { + err := c.syncCmd.RunCommand(args) + if err != nil { + log.Printf("Error when enumerating source with sync: %v", err) + } + pw.Close() + }() + + blobRefs := make([]blob.Ref, 0, describeBatchSize) + describe := func() error { + if len(blobRefs) == 0 { + return nil + } + // TODO(mpl): setting depth to 1, not 0, because otherwise r.depth() in pkg/search/handler.go defaults to 4. Can't remember why we disallowed 0 right now, and I do not want to change that in pkg/search/handler.go and risk breaking things. + described, err := c.cl.Describe(&search.DescribeRequest{ + BlobRefs: blobRefs, + Depth: 1, + }) + if err != nil { + return fmt.Errorf("Error when describing blobs %v: %v", blobRefs, err) + } + for _, v := range blobRefs { + blob, ok := described.Meta[v.String()] + if !ok { + // This can happen if the index is out of sync with the storage we enum from. + fmt.Fprintf(stdout, "%v \n", v) + continue + } + detailed := detail(blob) + if detailed != "" { + detailed = fmt.Sprintf("\t%v", detailed) + } + fmt.Fprintf(stdout, "%v %v%v\n", v, blob.Size, detailed) + } + blobRefs = make([]blob.Ref, 0, describeBatchSize) + return nil + } + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) != 2 { + return fmt.Errorf("Bogus output from sync: got %q, wanted \"blobref size\"", scanner.Text()) + } + blobRefs = append(blobRefs, blob.MustParse(fields[0])) + if len(blobRefs) == describeBatchSize { + if err := describe(); err != nil { + return err + } + } + } + if err := describe(); err != nil { + return err + } + if err := scanner.Err(); err != nil { + return fmt.Errorf("Error reading on pipe from stdout: %v", err) + } + return nil +} + +// setClient configures a client for c, for the describe requests. +func (c *listCmd) setClient() error { + ss, err := c.syncCmd.storageFromParam("src", c.syncCmd.src) + if err != nil { + fmt.Errorf("Could not set client for describe requests: %v", err) + } + var ok bool + c.cl, ok = ss.(*client.Client) + if !ok { + return fmt.Errorf("storageFromParam returned a %T, was expecting a *client.Client", ss) + } + return nil +} + +func detail(blob *search.DescribedBlob) string { + // TODO(mpl): attrType, value for claim. but I don't think they're accessible just with a describe req. + if blob.CamliType == "file" { + return fmt.Sprintf("%v (%v size=%v)", blob.CamliType, blob.File.FileName, blob.File.Size) + } + return blob.CamliType +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/makestatic.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/makestatic.go new file mode 100644 index 00000000..75d11ac0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/makestatic.go @@ -0,0 +1,131 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "os" + "strings" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/search" +) + +type makeStaticCmd struct { + server string +} + +func init() { + cmdmain.RegisterCommand("makestatic", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(makeStaticCmd) + flags.StringVar(&cmd.server, "server", "", "Server to search. "+serverFlagHelp) + return cmd + }) +} + +func (c *makeStaticCmd) Describe() string { + return "Creates a static directory from a permanode set" +} + +func (c *makeStaticCmd) Usage() { + fmt.Fprintf(os.Stderr, "Usage: camtool [globalopts] makestatic [permanode]\n") +} + +func (c *makeStaticCmd) Examples() []string { + return []string{} +} + +func (c *makeStaticCmd) RunCommand(args []string) error { + if len(args) != 1 { + return cmdmain.UsageError("requires a permanode") + } + pn, ok := blob.Parse(args[0]) + if !ok { + return cmdmain.UsageError("invalid permanode argument") + } + + cl := newClient(c.server) + res, err := cl.Describe(&search.DescribeRequest{ + BlobRefs: []blob.Ref{pn}, + Rules: []*search.DescribeRule{ + { + IfResultRoot: true, + Attrs: []string{"camliMember"}, + Rules: []*search.DescribeRule{ + {Attrs: []string{"camliContent"}}, + }, + }, + }, + }) + if err != nil { + return err + } + + camliType := func(ref string) string { + m := res.Meta[ref] + if m == nil { + return "" + } + return m.CamliType + } + + var ss schema.StaticSet + pnDes, ok := res.Meta[pn.String()] + if !ok { + return fmt.Errorf("permanode %v not described", pn) + } + if pnDes.Permanode == nil { + return fmt.Errorf("blob %v is not a permanode", pn) + } + members := pnDes.Permanode.Attr["camliMember"] + if len(members) == 0 { + return fmt.Errorf("permanode %v has no camliMember attributes", pn) + } + for _, fileRefStr := range members { + if camliType(fileRefStr) != "permanode" { + continue + } + contentRef := res.Meta[fileRefStr].Permanode.Attr.Get("camliContent") + if contentRef == "" { + continue + } + if camliType(contentRef) == "file" { + ss.Add(blob.MustParse(contentRef)) + } + } + + b := ss.Blob() + _, err = cl.UploadBlob(b) + if err != nil { + return err + } + title := pnDes.Title() + title = strings.Replace(title, string(os.PathSeparator), "", -1) + if title == "" { + title = pn.String() + } + dir := schema.NewDirMap(title).PopulateDirectoryMap(b.BlobRef()) + dirBlob := dir.Blob() + _, err = cl.UploadBlob(dirBlob) + if err == nil { + fmt.Println(dirBlob.BlobRef().String()) + } + return err +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/mime.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/mime.go new file mode 100644 index 00000000..3e6ea2f0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/mime.go @@ -0,0 +1,34 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "log" + "os" + + "camlistore.org/pkg/magic" +) + +func showMIME(file string) { + f, err := os.Open(file) + if err != nil { + log.Fatal(err) + } + mime, _ := magic.MIMETypeFromReader(f) + fmt.Println(mime) +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/packblobs.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/packblobs.go new file mode 100644 index 00000000..eccc663f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/packblobs.go @@ -0,0 +1,107 @@ +/* +Copyright 2015 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "flag" + "fmt" + "io" + "log" + "os" + + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/search" +) + +type packBlobsCmd struct { + server string +} + +func init() { + cmdmain.RegisterCommand("packblobs", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(packBlobsCmd) + flags.StringVar(&cmd.server, "server", "", "Server to search. "+serverFlagHelp) + return cmd + }) +} + +func (c *packBlobsCmd) Describe() string { + return "Pack related blobs together (migration tool)" +} + +func (c *packBlobsCmd) Usage() { + fmt.Fprintf(os.Stderr, "Usage: camtool [globalopts] packblobs\n") +} + +func (c *packBlobsCmd) Examples() []string { + return []string{} +} + +func (c *packBlobsCmd) RunCommand(args []string) error { + if len(args) != 0 { + return cmdmain.UsageError("doesn't take arguments") + } + req := &search.SearchQuery{ + Limit: -1, + Sort: search.BlobRefAsc, + Constraint: &search.Constraint{ + File: &search.FileConstraint{ + FileSize: &search.IntConstraint{ + Min: 512 << 10, + }, + }, + }, + } + cl := newClient(c.server) + looseClient := cl.NewPathClient("/bs-loose/") + + res, err := cl.Query(req) + if err != nil { + return err + } + total := len(res.Blobs) + n := 0 + var buf bytes.Buffer + for _, sr := range res.Blobs { + n++ + fileRef := sr.Blob + rc, _, err := looseClient.Fetch(fileRef) + if err == os.ErrNotExist { + fmt.Printf("%d/%d: %v already done\n", n, total, fileRef) + continue + } + if err != nil { + log.Printf("error fetching %v: %v\n", fileRef, err) + continue + } + buf.Reset() + _, err = io.Copy(&buf, rc) + rc.Close() + if err != nil { + log.Printf("error reading %v: %v\n", fileRef, err) + continue + } + _, err = cl.ReceiveBlob(fileRef, &buf) + if err != nil { + log.Printf("error write %v: %v\n", fileRef, err) + continue + } + fmt.Printf("%d/%d: %v\n", n, total, fileRef) + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/search.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/search.go new file mode 100644 index 00000000..9e4596dc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/search.go @@ -0,0 +1,116 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "os" + "strings" + + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/search" + "camlistore.org/pkg/strutil" +) + +type searchCmd struct { + server string + limit int + describe bool + rawQuery bool +} + +func init() { + cmdmain.RegisterCommand("search", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(searchCmd) + flags.StringVar(&cmd.server, "server", "", "Server to search. "+serverFlagHelp) + flags.IntVar(&cmd.limit, "limit", 0, "Limit number of results. 0 is default. Negative means no limit.") + flags.BoolVar(&cmd.describe, "describe", false, "Describe results as well.") + flags.BoolVar(&cmd.rawQuery, "rawquery", false, "If true, the provided JSON is a SearchQuery, and not a Constraint. In this case, the -limit flag if non-zero is applied after parsing the JSON.") + return cmd + }) +} + +func (c *searchCmd) Describe() string { + return "Execute a search query" +} + +func (c *searchCmd) Usage() { + fmt.Fprintf(os.Stderr, "Usage: camtool [globalopts] search \n") +} + +func (c *searchCmd) Examples() []string { + return []string{ + `"loc:paris is:portrait" # expression`, + `'{"blobrefPrefix":"sha1-f00d"}' # SearchConstraint JSON`, + `- # piped from stdin`, + } +} + +func (c *searchCmd) RunCommand(args []string) error { + if len(args) != 1 { + return cmdmain.UsageError("requires search expression or Constraint JSON") + } + q := args[0] + if q == "-" { + slurp, err := ioutil.ReadAll(cmdmain.Stdin) + if err != nil { + return err + } + q = string(slurp) + } + q = strings.TrimSpace(q) + + req := &search.SearchQuery{ + Limit: c.limit, + } + if c.rawQuery { + req.Limit = 0 // clear it if they provided it + if err := json.NewDecoder(strings.NewReader(q)).Decode(&req); err != nil { + return err + } + if c.limit != 0 { + req.Limit = c.limit + } + } else if strutil.IsPlausibleJSON(q) { + cs := new(search.Constraint) + if err := json.NewDecoder(strings.NewReader(q)).Decode(&cs); err != nil { + return err + } + req.Constraint = cs + } else { + req.Expression = q + } + if c.describe { + req.Describe = &search.DescribeRequest{} + } + + cl := newClient(c.server) + res, err := cl.Query(req) + if err != nil { + return err + } + resj, err := json.MarshalIndent(res, "", " ") + if err != nil { + return err + } + resj = append(resj, '\n') + _, err = os.Stdout.Write(resj) + return err +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/searchdoc.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/searchdoc.go new file mode 100644 index 00000000..acb22c45 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/searchdoc.go @@ -0,0 +1,83 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "flag" + "fmt" + "os" + "strings" + "text/tabwriter" + + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/search" +) + +type searchDocCmd struct{} + +func init() { + cmdmain.RegisterCommand("searchdoc", func(flags *flag.FlagSet) cmdmain.CommandRunner { + return new(searchDocCmd) + }) +} + +func (c *searchDocCmd) Describe() string { + return "Provide help on the predicates for search expressions" +} + +func (c *searchDocCmd) Usage() { + cmdmain.Errorf("camtool searchdoc") +} + +func (c *searchDocCmd) RunCommand(args []string) error { + if len(args) > 0 { + return cmdmain.UsageError("No arguments allowed") + } + + formattedSearchHelp() + return nil +} + +func formattedSearchHelp() { + s := search.SearchHelp() + type help struct{ Name, Description string } + h := []help{} + err := json.Unmarshal([]byte(s), &h) + if err != nil { + cmdmain.Errorf("%v", err) + os.Exit(1) + } + + w := new(tabwriter.Writer) + w.Init(cmdmain.Stdout, 0, 8, 0, '\t', 0) + fmt.Fprintln(w, "Predicates for search expressions") + fmt.Fprintln(w) + fmt.Fprintln(w, "Predicate\tDescription") + fmt.Fprintln(w) + for _, predicate := range h { + desc := strings.Split(predicate.Description, "\n") + for i, d := range desc { + if i == 0 { + fmt.Fprintf(w, "%s\t%s\n", predicate.Name, d) + } else { + fmt.Fprintf(w, "\t%s\n", d) + } + } + } + w.Flush() +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/splits.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/splits.go new file mode 100644 index 00000000..515bf6bc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/splits.go @@ -0,0 +1,96 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bufio" + "fmt" + "io" + "log" + "os" + "strings" + + "camlistore.org/pkg/rollsum" +) + +type span struct { + from, to int64 + bits int + children []span +} + +func showSplits(file string) { + f, err := os.Open(file) + if err != nil { + panic(err.Error()) + } + bufr := bufio.NewReader(f) + + spans := []span{} + rs := rollsum.New() + n := int64(0) + last := n + + for { + c, err := bufr.ReadByte() + if err != nil { + if err == io.EOF { + if n != last { + spans = append(spans, span{from: last, to: n}) + } + break + } + panic(err.Error()) + } + n++ + rs.Roll(c) + if rs.OnSplit() { + bits := rs.Bits() + sliceFrom := len(spans) + for sliceFrom > 0 && spans[sliceFrom-1].bits < bits { + sliceFrom-- + } + nCopy := len(spans) - sliceFrom + var children []span + if nCopy > 0 { + children = make([]span, nCopy) + nCopied := copy(children, spans[sliceFrom:]) + if nCopied != nCopy { + panic("n wrong") + } + spans = spans[:sliceFrom] + } + spans = append(spans, span{from: last, to: n, bits: bits, children: children}) + + log.Printf("split at %d (after %d), bits=%d", n, n-last, bits) + last = n + } + } + + var dumpSpans func(s []span, indent int) + dumpSpans = func(s []span, indent int) { + in := strings.Repeat(" ", indent) + for _, sp := range s { + fmt.Printf("%sfrom=%d, to=%d (len %d) bits=%d\n", in, sp.from, sp.to, sp.to-sp.from, sp.bits) + if len(sp.children) > 0 { + dumpSpans(sp.children, indent+4) + } + } + } + dumpSpans(spans, 0) + fmt.Printf("\n\nNOTE NOTE NOTE: the camdebug tool hasn't been updated to use the splitting policy from pkg/schema/filewriter.go.") +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/sqlite_cond.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/sqlite_cond.go new file mode 100644 index 00000000..5339361a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/sqlite_cond.go @@ -0,0 +1,27 @@ +// +build with_sqlite + +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + _ "camlistore.org/third_party/github.com/mattn/go-sqlite3" +) + +func init() { + WithSQLite = true +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/sync.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/sync.go new file mode 100644 index 00000000..bcc0386d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/sync.go @@ -0,0 +1,456 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "log" + "net/http" + "os" + "strconv" + "strings" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/blobserver/localdisk" + "camlistore.org/pkg/client" + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/context" +) + +type syncCmd struct { + src string + dest string + third string + srcKeyID string // GPG public key ID of the source server, if supported. + destKeyID string // GPG public key ID of the destination server, if supported. + + loop bool + verbose bool + all bool + removeSrc bool + wipe bool + insecureTLS bool + oneIsDisk bool // Whether one of src or dest is a local disk. + + logger *log.Logger +} + +func init() { + cmdmain.RegisterCommand("sync", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(syncCmd) + flags.StringVar(&cmd.src, "src", "", "Source blobserver. "+serverFlagHelp) + flags.StringVar(&cmd.dest, "dest", "", "Destination blobserver (same format as src), or 'stdout' to just enumerate the --src blobs to stdout.") + flags.StringVar(&cmd.third, "thirdleg", "", "Copy blobs present in source but missing from destination to this 'third leg' blob store, instead of the destination. (same format as src)") + + flags.BoolVar(&cmd.loop, "loop", false, "Create an associate a new permanode for the uploaded file or directory.") + flags.BoolVar(&cmd.verbose, "verbose", false, "Be verbose.") + flags.BoolVar(&cmd.wipe, "wipe", false, "If dest is an index, drop it and repopulate it from scratch. NOOP for now.") + flags.BoolVar(&cmd.all, "all", false, "Discover all sync destinations configured on the source server and run them.") + flags.BoolVar(&cmd.removeSrc, "removesrc", false, "Remove each blob from the source after syncing to the destination; for queue processing.") + // TODO(mpl): maybe move this flag up to the client pkg as an AddFlag, as it can be used by all commands. + if debug, _ := strconv.ParseBool(os.Getenv("CAMLI_DEBUG")); debug { + flags.BoolVar(&cmd.insecureTLS, "insecure", false, "If set, when using TLS, the server's certificates verification is disabled, and they are not checked against the trustedCerts in the client configuration either.") + } + + return cmd + }) +} + +func (c *syncCmd) Describe() string { + return "Synchronize blobs from a source to a destination." +} + +func (c *syncCmd) Usage() { + fmt.Fprintf(cmdmain.Stderr, "Usage: camtool [globalopts] sync [syncopts] \n") +} + +func (c *syncCmd) Examples() []string { + return []string{ + "--all", + "--src http://localhost:3179/bs/ --dest http://localhost:3179/index-mem/", + } +} + +func (c *syncCmd) RunCommand(args []string) error { + if c.loop && !c.removeSrc { + return cmdmain.UsageError("Can't use --loop without --removesrc") + } + if c.verbose { + c.logger = log.New(cmdmain.Stderr, "", 0) // else nil + } + if c.all { + err := c.syncAll() + if err != nil { + return fmt.Errorf("sync all failed: %v", err) + } + return nil + } + + ss, err := c.storageFromParam("src", c.src) + if err != nil { + return err + } + ds, err := c.storageFromParam("dest", c.dest) + if err != nil { + return err + } + ts, err := c.storageFromParam("thirdleg", c.third) + if err != nil { + return err + } + + differentKeyIDs := fmt.Sprintf("WARNING: the source server GPG key ID (%v) and the destination's (%v) differ. All blobs will be synced, but because the indexer at the other side is indexing claims by a different user, you may not see what you expect in that server's web UI, etc.", c.srcKeyID, c.destKeyID) + + if c.dest != "stdout" && !c.oneIsDisk && c.srcKeyID != c.destKeyID { // both blank is ok. + // Warn at the top (and hope the user sees it and can abort if it was a mistake): + fmt.Fprintln(cmdmain.Stderr, differentKeyIDs) + // Warn also at the end (in case the user missed the first one) + defer fmt.Fprintln(cmdmain.Stderr, differentKeyIDs) + } + + passNum := 0 + for { + passNum++ + stats, err := c.doPass(ss, ds, ts) + if c.verbose { + log.Printf("sync stats - pass: %d, blobs: %d, bytes %d\n", passNum, stats.BlobsCopied, stats.BytesCopied) + } + if err != nil { + return fmt.Errorf("sync failed: %v", err) + } + if !c.loop { + break + } + } + return nil +} + +// A storageType is one of "src", "dest", or "thirdleg". These match the flag names. +type storageType string + +const ( + storageSource storageType = "src" + storageDest storageType = "dest" + storageThird storageType = "thirdleg" +) + +// which is one of "src", "dest", or "thirdleg" +func (c *syncCmd) storageFromParam(which storageType, val string) (blobserver.Storage, error) { + var httpClient *http.Client + + if val == "" { + switch which { + case storageThird: + return nil, nil + case storageSource: + discl := c.discoClient() + discl.SetLogger(c.logger) + src, err := discl.BlobRoot() + if err != nil { + return nil, fmt.Errorf("Failed to discover source server's blob path: %v", err) + } + val = src + httpClient = discl.HTTPClient() + } + if val == "" { + return nil, cmdmain.UsageError("No --" + string(which) + " flag value specified") + } + } + if which == storageDest && val == "stdout" { + return nil, nil + } + if looksLikePath(val) { + disk, err := localdisk.New(val) + if err != nil { + return nil, fmt.Errorf("Interpreted --%v=%q as a local disk path, but got error: %v", which, val, err) + } + c.oneIsDisk = true + return disk, nil + } + cl := client.New(val) + cl.InsecureTLS = c.insecureTLS + if httpClient == nil { + httpClient = &http.Client{ + Transport: cl.TransportForConfig(nil), + } + } + cl.SetHTTPClient(httpClient) + if err := cl.SetupAuth(); err != nil { + return nil, fmt.Errorf("could not setup auth for connecting to %v: %v", val, err) + } + cl.SetLogger(c.logger) + serverKeyID, err := cl.ServerKeyID() + if err != nil && err != client.ErrNoSigning { + fmt.Fprintf(cmdmain.Stderr, "Failed to discover keyId for server %v: %v", val, err) + } else { + if which == storageSource { + c.srcKeyID = serverKeyID + } else if which == storageDest { + c.destKeyID = serverKeyID + } + } + return cl, nil +} + +func looksLikePath(v string) bool { + prefix := func(s string) bool { return strings.HasPrefix(v, s) } + return prefix("./") || prefix("/") || prefix("../") +} + +type SyncStats struct { + BlobsCopied int + BytesCopied int64 + ErrorCount int +} + +func (c *syncCmd) syncAll() error { + if c.loop { + return cmdmain.UsageError("--all can't be used with --loop") + } + if c.third != "" { + return cmdmain.UsageError("--all can't be used with --thirdleg") + } + if c.dest != "" { + return cmdmain.UsageError("--all can't be used with --dest") + } + + dc := c.discoClient() + dc.SetLogger(c.logger) + syncHandlers, err := dc.SyncHandlers() + if err != nil { + return fmt.Errorf("sync handlers discovery failed: %v", err) + } + if c.verbose { + log.Printf("To be synced:\n") + for _, sh := range syncHandlers { + log.Printf("%v -> %v", sh.From, sh.To) + } + } + for _, sh := range syncHandlers { + from := client.New(sh.From) + from.SetLogger(c.logger) + from.InsecureTLS = c.insecureTLS + from.SetHTTPClient(&http.Client{ + Transport: from.TransportForConfig(nil), + }) + if err := from.SetupAuth(); err != nil { + return fmt.Errorf("could not setup auth for connecting to %v: %v", sh.From, err) + } + to := client.New(sh.To) + to.SetLogger(c.logger) + to.InsecureTLS = c.insecureTLS + to.SetHTTPClient(&http.Client{ + Transport: to.TransportForConfig(nil), + }) + if err := to.SetupAuth(); err != nil { + return fmt.Errorf("could not setup auth for connecting to %v: %v", sh.To, err) + } + if c.verbose { + log.Printf("Now syncing: %v -> %v", sh.From, sh.To) + } + stats, err := c.doPass(from, to, nil) + if c.verbose { + log.Printf("sync stats, blobs: %d, bytes %d\n", stats.BlobsCopied, stats.BytesCopied) + } + if err != nil { + return err + } + } + return nil +} + +// discoClient returns a client initialized with a server +// based from --src or from the configuration file if --src +// is blank. The returned client can then be used to discover +// the blobRoot and syncHandlers. +func (c *syncCmd) discoClient() *client.Client { + cl := newClient(c.src) + cl.SetLogger(c.logger) + cl.InsecureTLS = c.insecureTLS + return cl +} + +func enumerateAllBlobs(ctx *context.Context, s blobserver.Storage, destc chan<- blob.SizedRef) error { + // Use *client.Client's support for enumerating all blobs if + // possible, since it could probably do a better job knowing + // HTTP boundaries and such. + if c, ok := s.(*client.Client); ok { + return c.SimpleEnumerateBlobs(ctx, destc) + } + + defer close(destc) + return blobserver.EnumerateAll(ctx, s, func(sb blob.SizedRef) error { + select { + case destc <- sb: + case <-ctx.Done(): + return context.ErrCanceled + } + return nil + }) +} + +// src: non-nil source +// dest: non-nil destination +// thirdLeg: optional third-leg client. if not nil, anything on src +// but not on dest will instead be copied to thirdLeg, instead of +// directly to dest. (sneakernet mode, copying to a portable drive +// and transporting thirdLeg to dest) +func (c *syncCmd) doPass(src, dest, thirdLeg blobserver.Storage) (stats SyncStats, retErr error) { + srcBlobs := make(chan blob.SizedRef, 100) + destBlobs := make(chan blob.SizedRef, 100) + srcErr := make(chan error, 1) + destErr := make(chan error, 1) + + ctx := context.TODO() + enumCtx := ctx.New() // used for all (2 or 3) enumerates + defer enumCtx.Cancel() + enumerate := func(errc chan<- error, sto blobserver.Storage, blobc chan<- blob.SizedRef) { + err := enumerateAllBlobs(enumCtx, sto, blobc) + if err != nil { + enumCtx.Cancel() + } + errc <- err + } + + go enumerate(srcErr, src, srcBlobs) + checkSourceError := func() { + if err := <-srcErr; err != nil && err != context.ErrCanceled { + retErr = fmt.Errorf("Enumerate error from source: %v", err) + } + } + + if c.dest == "stdout" { + for sb := range srcBlobs { + fmt.Fprintf(cmdmain.Stdout, "%s %d\n", sb.Ref, sb.Size) + } + checkSourceError() + return + } + + if c.wipe { + // TODO(mpl): dest is a client. make it send a "wipe" request? + // upon reception its server then wipes itself if it is a wiper. + log.Print("Index wiping not yet supported.") + } + + go enumerate(destErr, dest, destBlobs) + checkDestError := func() { + if err := <-destErr; err != nil && err != context.ErrCanceled { + retErr = fmt.Errorf("Enumerate error from destination: %v", err) + } + } + + destNotHaveBlobs := make(chan blob.SizedRef) + + readSrcBlobs := srcBlobs + if c.verbose { + readSrcBlobs = loggingBlobRefChannel(srcBlobs) + } + + mismatches := []blob.Ref{} + onMismatch := func(br blob.Ref) { + // TODO(bradfitz): check both sides and repair, carefully. For now, fail. + log.Printf("WARNING: blobref %v has differing sizes on source and dest", br) + stats.ErrorCount++ + mismatches = append(mismatches, br) + } + + go blobserver.ListMissingDestinationBlobs(destNotHaveBlobs, onMismatch, readSrcBlobs, destBlobs) + + // Handle three-legged mode if tc is provided. + checkThirdError := func() {} // default nop + syncBlobs := destNotHaveBlobs + firstHopDest := dest + if thirdLeg != nil { + thirdBlobs := make(chan blob.SizedRef, 100) + thirdErr := make(chan error, 1) + go enumerate(thirdErr, thirdLeg, thirdBlobs) + checkThirdError = func() { + if err := <-thirdErr; err != nil && err != context.ErrCanceled { + retErr = fmt.Errorf("Enumerate error from third leg: %v", err) + } + } + thirdNeedBlobs := make(chan blob.SizedRef) + go blobserver.ListMissingDestinationBlobs(thirdNeedBlobs, onMismatch, destNotHaveBlobs, thirdBlobs) + syncBlobs = thirdNeedBlobs + firstHopDest = thirdLeg + } + + for sb := range syncBlobs { + fmt.Fprintf(cmdmain.Stdout, "Destination needs blob: %s\n", sb) + + blobReader, size, err := src.Fetch(sb.Ref) + if err != nil { + stats.ErrorCount++ + log.Printf("Error fetching %s: %v", sb.Ref, err) + continue + } + if size != sb.Size { + stats.ErrorCount++ + log.Printf("Source blobserver's enumerate size of %d for blob %s doesn't match its Get size of %d", + sb.Size, sb.Ref, size) + continue + } + + if _, err := blobserver.Receive(firstHopDest, sb.Ref, blobReader); err != nil { + stats.ErrorCount++ + log.Printf("Upload of %s to destination blobserver failed: %v", sb.Ref, err) + continue + } + stats.BlobsCopied++ + stats.BytesCopied += int64(size) + + if c.removeSrc { + if err = src.RemoveBlobs([]blob.Ref{sb.Ref}); err != nil { + stats.ErrorCount++ + log.Printf("Failed to delete %s from source: %v", sb.Ref, err) + } + } + } + + checkSourceError() + checkDestError() + checkThirdError() + if retErr == nil && stats.ErrorCount > 0 { + retErr = fmt.Errorf("%d errors during sync", stats.ErrorCount) + } + return stats, retErr +} + +func loggingBlobRefChannel(ch <-chan blob.SizedRef) chan blob.SizedRef { + ch2 := make(chan blob.SizedRef) + go func() { + defer close(ch2) + var last time.Time + var nblob, nbyte int64 + for v := range ch { + ch2 <- v + nblob++ + nbyte += int64(v.Size) + now := time.Now() + if last.IsZero() || now.After(last.Add(1*time.Second)) { + last = now + log.Printf("At source blob %v (%d blobs, %d bytes)", v.Ref, nblob, nbyte) + } + } + log.Printf("Total blobs: %d, %d bytes", nblob, nbyte) + }() + return ch2 +} diff --git a/vendor/github.com/camlistore/camlistore/cmd/camtool/sync_test.go b/vendor/github.com/camlistore/camlistore/cmd/camtool/sync_test.go new file mode 100644 index 00000000..d8e7148a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/cmd/camtool/sync_test.go @@ -0,0 +1,42 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "testing" +) + +func TestLooksLikePath(t *testing.T) { + tests := []struct { + v string + want bool + }{ + {"foo.com", false}, + {"127.0.0.1:234", false}, + {"foo", false}, + + {"/foo", true}, + {"./foo", true}, + {"../foo", true}, + } + for _, tt := range tests { + got := looksLikePath(tt.v) + if got != tt.want { + t.Errorf("looksLikePath(%q) = %v; want %v", tt.v, got, tt.want) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/config/dev-blobserver-config.json b/vendor/github.com/camlistore/camlistore/config/dev-blobserver-config.json new file mode 100644 index 00000000..0b6de0a6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/config/dev-blobserver-config.json @@ -0,0 +1,18 @@ +{ "_for-emacs": "-*- mode: js2;-*-", + "handlerConfig": true, + "baseURL": ["_env", "http://localhost:${CAMLI_PORT}"], + "password": ["_env", "${CAMLI_PASSWORD}"], + + "TLSCertFile": ["_env", "${CAMLI_TLS_CRT_FILE}", ""], + "TLSKeyFile": ["_env", "${CAMLI_TLS_KEY_FILE}", ""], + + "prefixes": { + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": ["_env", "${CAMLI_ROOT}"] + } + } + } +} + diff --git a/vendor/github.com/camlistore/camlistore/config/dev-client-dir-demo/client-config.json b/vendor/github.com/camlistore/camlistore/config/dev-client-dir-demo/client-config.json new file mode 100644 index 00000000..8ee1dd3c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/config/dev-client-dir-demo/client-config.json @@ -0,0 +1,12 @@ +{ + "servers": { + "devcam": { + "server": "http://localhost:3179/", + "auth": "devauth:camli3179", + "default": true + } + }, + "ignoredFiles": [".DS_Store"], + "identity": "26F5ABDA", + "identitySecretRing": ["_env", "${HOME}/src/camlistore.org/pkg/jsonsign/testdata/test-secring.gpg"] +} diff --git a/vendor/github.com/camlistore/camlistore/config/dev-client-dir/client-config.json b/vendor/github.com/camlistore/camlistore/config/dev-client-dir/client-config.json new file mode 100644 index 00000000..41ce982e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/config/dev-client-dir/client-config.json @@ -0,0 +1,12 @@ +{ + "servers": { + "devcam": { + "server": ["_env", "${CAMLI_SERVER}", "http://localhost:3179/"], + "auth": ["_env", "${CAMLI_AUTH}"], + "default": true + } + }, + "ignoredFiles": [".DS_Store"], + "identity": ["_env", "${CAMLI_KEYID}"], + "identitySecretRing": ["_env", "${CAMLI_SECRET_RING}"] +} diff --git a/vendor/github.com/camlistore/camlistore/config/dev-indexer-config.json b/vendor/github.com/camlistore/camlistore/config/dev-indexer-config.json new file mode 100644 index 00000000..df902766 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/config/dev-indexer-config.json @@ -0,0 +1,18 @@ +{ "_for-emacs": "-*- mode: js2;-*-", + "handlerConfig": true, + "baseURL": ["_env", "http://localhost:${CAMLI_PORT}"], + "password": ["_env", "${CAMLI_PASSWORD}"], + "prefixes": { + "/indexer/": { + "handler": "storage-mysqlindexer", + "handlerArgs": { + "database": "devcamlistore", + "user": "root", + "password": "root", + "host": "127.0.0.1" + } + } + } +} + + diff --git a/vendor/github.com/camlistore/camlistore/config/dev-server-config.json b/vendor/github.com/camlistore/camlistore/config/dev-server-config.json new file mode 100644 index 00000000..9f0ddd22 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/config/dev-server-config.json @@ -0,0 +1,362 @@ +{ "_for-emacs": "-*- mode: js2;-*-", + "handlerConfig": true, + "baseURL": ["_env", "${CAMLI_BASEURL}"], + "auth": ["_env", "${CAMLI_AUTH}"], + "https": ["_env", "${CAMLI_TLS}", false], + "httpsCert": "config/tls.crt", + "httpsKey": "config/tls.key", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "ownerName": ["_env", "${USER}-dev"], + "jsonSignRoot": "/sighelper/", + "blobRoot": "/bs-recv/", + "helpRoot": "/help/", + "statusRoot": "/status/", + "searchRoot": "/my-search/", + "stealth": false + } + }, + + "/hello/": { + "handler": "app", + "enabled": ["_env", "${CAMLI_HELLO_ENABLED}"], + "handlerArgs": { + "program": "hello", + "appConfig": { + "word": "world" + } + } + }, + + "/pics/": { + "handler": "app", + "enabled": ["_env", "${CAMLI_PUBLISH_ENABLED}"], + "handlerArgs": { + "program": "publisher", + "appConfig": { + "camliRoot": "dev-pics-root", + "sourceRoot": ["_env", "${CAMLI_DEV_CAMLI_ROOT}", ""], + "cacheRoot": ["_env", "${CAMLI_ROOT_CACHE}"], + "goTemplate": "gallery.html" + } + } + }, + + "/stub-test-disable/": { + "handler": "publish", + "enabled": false, + "handlerArgs": { + } + }, + + "/ui/": { + "handler": "ui", + "handlerArgs": { + "sourceRoot": ["_env", "${CAMLI_DEV_CAMLI_ROOT}", ""], + "cache": "/cache/", + "scaledImage": { + "type": "kv", + "file": ["_env", "${CAMLI_ROOT_CACHE}/thumbnails.kv", ""] + } + } + }, + + "/status/": { + "handler": "status" + }, + + "/help/": { + "handler": "help" + }, + + "/sync-index/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "to": ["_env", "${CAMLI_INDEXER_PATH}"], + "queue": { "type": "memory" }, + "fullSyncOnStart": ["_env", "${CAMLI_FULL_INDEX_SYNC_ON_START}"], + "blockingFullSyncOnStart": ["_env", "${CAMLI_FULL_INDEX_SYNC_ON_START}"] + } + }, + + "/sync-r1/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "to": "/r1/", + "queue": { "type": "memory" } + } + }, + + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "secretRing": ["_env", "${CAMLI_SECRET_RING}"], + "keyId": ["_env", "${CAMLI_KEYID}"], + "publicKeyDest": "/bs/" + } + }, + + "/bs-recv/": { + "handler": "storage-replica", + "handlerArgs": { + "minWritesForSuccess": 2, + "backends": ["/bs/", ["_env", "${CAMLI_INDEXER_PATH}"]], + "readBackends": ["/bs/"] + } + }, + + "/cond-unused/": { + "handler": "storage-cond", + "handlerArgs": { + "write": { + "if": "isSchema", + "then": "/bs-recv/", + "else": "/bs/" + }, + "read": "/bs/" + } + }, + + "/bs/": { + "handler": "storage-blobpacked", + "handlerArgs": { + "smallBlobs": "/bs-loose/", + "largeBlobs": "/bs-packed/", + "metaIndex": { + "type": "kv", + "file": ["_env", "${CAMLI_ROOT}/packindex.kv"] + } + } + }, + + "/bs-loose/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": ["_env", "${CAMLI_ROOT}/loose"] + } + }, + + "/bs-packed/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": ["_env", "${CAMLI_ROOT}/packed"] + } + }, + + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": ["_env", "${CAMLI_ROOT_CACHE}"] + } + }, + + "/sharder/": { + "handler": "storage-shard", + "handlerArgs": { + "backends": ["/s1/", "/s2/"] + } + }, + + "/s1/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": ["_env", "${CAMLI_ROOT_SHARD1}"] + } + }, + + "/s2/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": ["_env", "${CAMLI_ROOT_SHARD2}"] + } + }, + + "/repl/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": ["/r1/", "/r2/", "/r3/"], + "minWritesForSuccess": 2 + } + }, + + "/r1/": { + "handler": "storage-diskpacked", + "handlerArgs": { + "path": ["_env", "${CAMLI_ROOT_REPLICA1}"] + } + }, + + "/r2/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": ["_env", "${CAMLI_ROOT_REPLICA2}"] + } + }, + + "/r3/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": ["_env", "${CAMLI_ROOT_REPLICA3}"] + } + }, + + "/enc/": { + "handler": "storage-encrypt", + "handlerArgs": { + "I_AGREE": "that encryption support hasn't been peer-reviewed, isn't finished, and its format might change.", + "meta": "/encmeta/", + "blobs": "/encblob/", + "metaIndex": { "type": "memory" }, + "key": "000102030405060708090a0b0c0d0e0f" + } + }, + + "/encmeta/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": ["_env", "${CAMLI_ROOT_ENCMETA}"] + } + }, + + "/encblob/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": ["_env", "${CAMLI_ROOT_ENCBLOB}"] + } + }, + + "/index-memory/": { + "enabled": ["_env", "${CAMLI_MEMINDEX_ENABLED}"], + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "type": "memory" + } + } + }, + + "/index-leveldb/": { + "enabled": ["_env", "${CAMLI_LEVELDB_ENABLED}"], + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "type": "leveldb", + "file": ["_env", "${CAMLI_DBNAME}", ""] + } + } + }, + + "/index-kv/": { + "enabled": ["_env", "${CAMLI_KVINDEX_ENABLED}"], + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "type": "kv", + "file": ["_env", "${CAMLI_DBNAME}", ""] + } + } + }, + + "/index-mongo/": { + "enabled": ["_env", "${CAMLI_MONGO_ENABLED}", true], + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "type": "mongo", + "host": "localhost", + "database": ["_env", "${CAMLI_DBNAME}"] + } + } + }, + + "/index-mysql/": { + "enabled": ["_env", "${CAMLI_MYSQL_ENABLED}", true], + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "type": "mysql", + "database": ["_env", "${CAMLI_DBNAME}"], + "user": "root", + "password": "root", + "host": "127.0.0.1" + } + } + }, + + "/index-postgres/": { + "enabled": ["_env", "${CAMLI_POSTGRES_ENABLED}", true], + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "type": "postgres", + "database": ["_env", "${CAMLI_DBNAME}"], + "user": "postgres", + "password": "postgres", + "host": "127.0.0.1" + } + } + }, + + "/index-sqlite/": { + "enabled": ["_env", "${CAMLI_SQLITE_ENABLED}", true], + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "type": "sqlite", + "file": ["_env", "${CAMLI_DBNAME}"] + } + } + }, + + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": ["_env", "${CAMLI_INDEXER_PATH}"], + "owner": ["_env", "${CAMLI_PUBKEY_BLOBREF}"], + "slurpToMemory": true, + "devBlockStartupOn": "/sync-index/" + } + }, + + "/importer/": { + "handler": "importer", + "handlerArgs": { + "dummy": { + "clientID": "dummyID", + "clientSecret": "foobar" + }, + "flickr": { + "clientSecret": ["_env", "${CAMLI_FLICKR_API_KEY}", ""] + }, + "foursquare": { + "clientSecret": ["_env", "${CAMLI_FOURSQUARE_API_KEY}", ""] + }, + "picasa": { + "clientSecret": ["_env", "${CAMLI_PICASA_API_KEY}", ""] + }, + "twitter": { + "clientSecret": ["_env", "${CAMLI_TWITTER_API_KEY}", ""] + } + } + }, + + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + } + } + +} diff --git a/vendor/github.com/camlistore/camlistore/depcheck/depcheck.go b/vendor/github.com/camlistore/camlistore/depcheck/depcheck.go new file mode 100644 index 00000000..d7f9a77a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/depcheck/depcheck.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package depcheck does nothing except for fail to build when +// the system's version of Go is too old. +package depcheck diff --git a/vendor/github.com/camlistore/camlistore/depcheck/min_go_version.go b/vendor/github.com/camlistore/camlistore/depcheck/min_go_version.go new file mode 100644 index 00000000..5c6d4f69 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/depcheck/min_go_version.go @@ -0,0 +1,28 @@ +// +build !go1.3 + +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package depcheck + +import ` + +**************************************************************************** + + Camlistore requires Go 1.3 or later. + +**************************************************************************** +` diff --git a/vendor/github.com/camlistore/camlistore/dev/camfix.pl b/vendor/github.com/camlistore/camlistore/dev/camfix.pl new file mode 100755 index 00000000..f7fe3ff0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/camfix.pl @@ -0,0 +1,23 @@ +#!/usr/bin/perl + +my $file = shift; +die "$file doesn't exist" unless -e $file; + +open(my $fh, $file) or die "failed: $!\n"; +my $c = do { local $/; <$fh> }; +close($fh); + +my $changes = 0; + +$changes = 1 if $c =~ s!^(\s+)\"camli/(.+)\"!$1\"camlistore.org/pkg/$2\"!mg; +$changes = 1 if $c =~ s!^(\s+)\"camlistore/(.+)\"!$1\"camlistore.org/$2\"!mg; +$changes = 1 if $c =~ s!^(\s+_ )\"camlistore/(.+)\"!$1\"camlistore.org/$2\"!mg; +$changes = 1 if $c =~ s!/pkg/pkg/!/pkg/!g; +$changes = 1 if $c =~ s!camlistore.org/pkg/third_party/!camlistore.org/third_party/!g; + +exit 0 unless $changes; + +open(my $fh, ">$file") or die; +print $fh $c; +close($fh); +print STDERR "rewrote $file\n"; diff --git a/vendor/github.com/camlistore/camlistore/dev/config-dir-local/client-config.json b/vendor/github.com/camlistore/camlistore/dev/config-dir-local/client-config.json new file mode 100644 index 00000000..4e3a147e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/config-dir-local/client-config.json @@ -0,0 +1,12 @@ +{ + "servers": { + "dev": { + "server": "http://localhost:3179/", + "auth": "devauth:camli3179", + "default": true + } + }, + "ignoredFiles": [".DS_Store"], + "identity": "26F5ABDA", + "identitySecretRing": ["_env", "${CAMLI_CONFENV_SECRET_RING}"] +} diff --git a/vendor/github.com/camlistore/camlistore/dev/demo.sh b/vendor/github.com/camlistore/camlistore/dev/demo.sh new file mode 100644 index 00000000..37179f5d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/demo.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +# Some hacks to make make demoing Camlistore less distracting but +# still permit using the dev-* scripts (which are normally slow and +# noisy) + +#go run make.go +#go install camlistore.org/dev/devcam +#export CAMLI_QUIET=1 +#export CAMLI_FAST_DEV=1 + +# Or just: +# (This way is buggy in that the server selection doesn't let you also +# pick an identity) +# export CAMLI_DEFAULT_SERVER=dev + +# Better: +export CAMLI_CONFIG_DIR=$HOME/src/camlistore.org/config/dev-client-dir-demo diff --git a/vendor/github.com/camlistore/camlistore/dev/dev-db b/vendor/github.com/camlistore/camlistore/dev/dev-db new file mode 100755 index 00000000..5598ffe7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/dev-db @@ -0,0 +1,3 @@ +#!/bin/sh + +exec mysql -uroot -proot devcamli$USER diff --git a/vendor/github.com/camlistore/camlistore/dev/devcam/appengine.go b/vendor/github.com/camlistore/camlistore/dev/devcam/appengine.go new file mode 100644 index 00000000..0d0b66d7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/devcam/appengine.go @@ -0,0 +1,120 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file adds the "appengine" subcommand to devcam, to run the +// development appengine camlistore with dev_appserver.py. + +package main + +import ( + "flag" + "fmt" + "os" + "path/filepath" + "strconv" + + "camlistore.org/pkg/cmdmain" +) + +type gaeCmd struct { + // start of flag vars + all bool + port string + sdk string + wipe bool + // end of flag vars +} + +func init() { + cmdmain.RegisterCommand("appengine", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(gaeCmd) + flags.BoolVar(&cmd.all, "all", false, "Listen on all interfaces.") + flags.StringVar(&cmd.port, "port", "3179", "Port to listen on.") + flags.StringVar(&cmd.sdk, "sdk", "", "The path to the App Engine Go SDK (or a symlink to it).") + flags.BoolVar(&cmd.wipe, "wipe", false, "Wipe the blobs on disk and the indexer.") + return cmd + }) +} + +func (c *gaeCmd) Usage() { + fmt.Fprintf(cmdmain.Stderr, "Usage: devcam [globalopts] appengine [cmdopts] [other_dev_appserver_opts] \n") +} + +func (c *gaeCmd) Describe() string { + return "run the App Engine camlistored in dev mode." +} + +func (c *gaeCmd) RunCommand(args []string) error { + err := c.checkFlags(args) + if err != nil { + return cmdmain.UsageError(fmt.Sprint(err)) + } + applicationDir := filepath.Join("server", "appengine") + if _, err := os.Stat(applicationDir); err != nil { + return fmt.Errorf("Appengine application dir not found at %s", applicationDir) + } + if err = c.checkSDK(); err != nil { + return err + } + if err = c.mirrorSourceRoot(applicationDir); err != nil { + return err + } + + devAppServerBin := filepath.Join(c.sdk, "dev_appserver.py") + cmdArgs := []string{ + "--skip_sdk_update_check", + fmt.Sprintf("--port=%s", c.port), + } + if c.all { + cmdArgs = append(cmdArgs, "--host", "0.0.0.0") + } + if c.wipe { + cmdArgs = append(cmdArgs, "--clear_datastore") + } + cmdArgs = append(cmdArgs, args...) + cmdArgs = append(cmdArgs, applicationDir) + return runExec(devAppServerBin, cmdArgs, NewCopyEnv()) +} + +func (c *gaeCmd) checkFlags(args []string) error { + if _, err := strconv.ParseInt(c.port, 0, 0); err != nil { + return fmt.Errorf("Invalid -port value: %q", c.port) + } + return nil +} + +func (c *gaeCmd) checkSDK() error { + defaultSDK := "appengine-sdk" + if c.sdk == "" { + c.sdk = defaultSDK + } + if _, err := os.Stat(c.sdk); err != nil { + return fmt.Errorf("App Engine SDK not found. Please specify it with --sdk or:\n$ ln -s /path/to/appengine-go-sdk %s\n\n", defaultSDK) + } + return nil +} + +func (c *gaeCmd) mirrorSourceRoot(gaeAppDir string) error { + uiDirs := []string{"server/camlistored/ui", "third_party/closure/lib/closure", "pkg/server"} + for _, dir := range uiDirs { + oriPath := filepath.Join(camliSrcRoot, filepath.FromSlash(dir)) + dstPath := filepath.Join(gaeAppDir, "source_root", filepath.FromSlash(dir)) + if err := cpDir(oriPath, dstPath, []string{".go"}); err != nil { + return fmt.Errorf("Error while mirroring %s to %s: %v", oriPath, dstPath, err) + } + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/dev/devcam/camget.go b/vendor/github.com/camlistore/camlistore/dev/devcam/camget.go new file mode 100644 index 00000000..f1cc33a3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/devcam/camget.go @@ -0,0 +1,118 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file adds the "get" subcommand to devcam, to run camget against the dev server. + +package main + +import ( + "flag" + "fmt" + "path/filepath" + "regexp" + "strconv" + "strings" + + "camlistore.org/pkg/cmdmain" +) + +type getCmd struct { + // start of flag vars + altkey bool + path string + port string + tls bool + // end of flag vars + + env *Env +} + +func init() { + cmdmain.RegisterCommand("get", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := &getCmd{ + env: NewCopyEnv(), + } + flags.BoolVar(&cmd.altkey, "altkey", false, "Use different gpg key and password from the server's.") + flags.StringVar(&cmd.path, "path", "/bs", "Optional URL prefix path.") + flags.StringVar(&cmd.port, "port", "3179", "Port camlistore is listening on.") + flags.BoolVar(&cmd.tls, "tls", false, "Use TLS.") + return cmd + }) +} + +func (c *getCmd) Usage() { + fmt.Fprintf(cmdmain.Stderr, "Usage: devcam get [get_opts] -- camget_args\n") +} + +func (c *getCmd) Examples() []string { + return []string{ + "", + "-- --shared http://localhost:3169/share/", + } +} + +func (c *getCmd) Describe() string { + return "run camget in dev mode." +} + +func (c *getCmd) RunCommand(args []string) error { + err := c.checkFlags(args) + if err != nil { + return cmdmain.UsageError(fmt.Sprint(err)) + } + if !*noBuild { + if err := build(filepath.Join("cmd", "camget")); err != nil { + return fmt.Errorf("Could not build camget: %v", err) + } + } + c.env.SetCamdevVars(c.altkey) + // wipeCacheDir needs to be called after SetCamdevVars, because that is + // where CAMLI_CACHE_DIR is defined. + if *wipeCache { + c.env.wipeCacheDir() + } + + cmdBin := filepath.Join("bin", "camget") + cmdArgs := []string{ + "-verbose=" + strconv.FormatBool(*cmdmain.FlagVerbose || !quiet), + } + if !isSharedMode(args) { + blobserver := "http://localhost:" + c.port + c.path + if c.tls { + blobserver = strings.Replace(blobserver, "http://", "https://", 1) + } + cmdArgs = append(cmdArgs, "-server="+blobserver) + } + cmdArgs = append(cmdArgs, args...) + return runExec(cmdBin, cmdArgs, c.env) +} + +func (c *getCmd) checkFlags(args []string) error { + if _, err := strconv.ParseInt(c.port, 0, 0); err != nil { + return fmt.Errorf("Invalid -port value: %q", c.port) + } + return nil +} + +func isSharedMode(args []string) bool { + sharedRgx := regexp.MustCompile("--?shared") + for _, v := range args { + if sharedRgx.MatchString(v) { + return true + } + } + return false +} diff --git a/vendor/github.com/camlistore/camlistore/dev/devcam/cammount.go b/vendor/github.com/camlistore/camlistore/dev/devcam/cammount.go new file mode 100644 index 00000000..4e049e8e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/devcam/cammount.go @@ -0,0 +1,127 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file adds the "mount" subcommand to devcam, to run cammount against the dev server. + +package main + +import ( + "flag" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + + "camlistore.org/pkg/cmdmain" +) + +type mountCmd struct { + // start of flag vars + altkey bool + path string + port string + tls bool + debug bool + // end of flag vars + + env *Env +} + +const mountpoint = "/tmp/cammount-dir" + +func init() { + cmdmain.RegisterCommand("mount", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := &mountCmd{ + env: NewCopyEnv(), + } + flags.BoolVar(&cmd.altkey, "altkey", false, "Use different gpg key and password from the server's.") + flags.BoolVar(&cmd.tls, "tls", false, "Use TLS.") + flags.StringVar(&cmd.path, "path", "/", "Optional URL prefix path.") + flags.StringVar(&cmd.port, "port", "3179", "Port camlistore is listening on.") + flags.BoolVar(&cmd.debug, "debug", false, "print debugging messages.") + return cmd + }) +} + +func (c *mountCmd) Usage() { + fmt.Fprintf(cmdmain.Stderr, "Usage: devcam mount [mount_opts] [|]\n") +} + +func (c *mountCmd) Examples() []string { + return []string{ + "", + "http://localhost:3169/share/", + } +} + +func (c *mountCmd) Describe() string { + return "run cammount in dev mode." +} + +func tryUnmount(dir string) error { + if runtime.GOOS == "darwin" { + return exec.Command("diskutil", "umount", "force", dir).Run() + } + return exec.Command("fusermount", "-u", dir).Run() +} + +func (c *mountCmd) RunCommand(args []string) error { + err := c.checkFlags(args) + if err != nil { + return cmdmain.UsageError(fmt.Sprint(err)) + } + if !*noBuild { + if err := build(filepath.Join("cmd", "cammount")); err != nil { + return fmt.Errorf("Could not build cammount: %v", err) + } + } + c.env.SetCamdevVars(c.altkey) + // wipeCacheDir needs to be called after SetCamdevVars, because that is + // where CAMLI_CACHE_DIR is defined. + if *wipeCache { + c.env.wipeCacheDir() + } + + tryUnmount(mountpoint) + if err := os.Mkdir(mountpoint, 0700); err != nil && !os.IsExist(err) { + return fmt.Errorf("Could not make mount point: %v", err) + } + + blobserver := "http://localhost:" + c.port + c.path + if c.tls { + blobserver = strings.Replace(blobserver, "http://", "https://", 1) + } + + cmdBin := filepath.Join("bin", "cammount") + cmdArgs := []string{ + "-debug=" + strconv.FormatBool(c.debug), + "-server=" + blobserver, + } + cmdArgs = append(cmdArgs, args...) + cmdArgs = append(cmdArgs, mountpoint) + fmt.Printf("Cammount running with mountpoint %v. Press 'q' or ctrl-c to shut down.\n", mountpoint) + return runExec(cmdBin, cmdArgs, c.env) +} + +func (c *mountCmd) checkFlags(args []string) error { + if _, err := strconv.ParseInt(c.port, 0, 0); err != nil { + return fmt.Errorf("Invalid -port value: %q", c.port) + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/dev/devcam/camput.go b/vendor/github.com/camlistore/camlistore/dev/devcam/camput.go new file mode 100644 index 00000000..dda1dfdc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/devcam/camput.go @@ -0,0 +1,105 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file adds the "put" subcommand to devcam, to run camput against the dev server. + +package main + +import ( + "flag" + "fmt" + "path/filepath" + "strconv" + "strings" + + "camlistore.org/pkg/cmdmain" +) + +type putCmd struct { + // start of flag vars + altkey bool + path string + port string + tls bool + // end of flag vars + + env *Env +} + +func init() { + cmdmain.RegisterCommand("put", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := &putCmd{ + env: NewCopyEnv(), + } + flags.BoolVar(&cmd.altkey, "altkey", false, "Use different gpg key and password from the server's.") + flags.BoolVar(&cmd.tls, "tls", false, "Use TLS.") + flags.StringVar(&cmd.path, "path", "/", "Optional URL prefix path.") + flags.StringVar(&cmd.port, "port", "3179", "Port camlistore is listening on.") + return cmd + }) +} + +func (c *putCmd) Usage() { + fmt.Fprintf(cmdmain.Stderr, "Usage: devcam put [put_opts] camput_args\n") +} + +func (c *putCmd) Examples() []string { + return []string{ + "file --filenodes /mnt/camera/DCIM", + } +} + +func (c *putCmd) Describe() string { + return "run camput in dev mode." +} + +func (c *putCmd) RunCommand(args []string) error { + err := c.checkFlags(args) + if err != nil { + return cmdmain.UsageError(fmt.Sprint(err)) + } + if !*noBuild { + if err := build(filepath.Join("cmd", "camput")); err != nil { + return fmt.Errorf("Could not build camput: %v", err) + } + } + c.env.SetCamdevVars(c.altkey) + // wipeCacheDir needs to be called after SetCamdevVars, because that is + // where CAMLI_CACHE_DIR is defined. + if *wipeCache { + c.env.wipeCacheDir() + } + + blobserver := "http://localhost:" + c.port + c.path + if c.tls { + blobserver = strings.Replace(blobserver, "http://", "https://", 1) + } + + cmdBin := filepath.Join("bin", "camput") + cmdArgs := []string{ + "-verbose=" + strconv.FormatBool(*cmdmain.FlagVerbose || !quiet), + "-server=" + blobserver, + } + cmdArgs = append(cmdArgs, args...) + return runExec(cmdBin, cmdArgs, c.env) +} + +func (c *putCmd) checkFlags(args []string) error { + if _, err := strconv.ParseInt(c.port, 0, 0); err != nil { + return fmt.Errorf("Invalid -port value: %q", c.port) + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/dev/devcam/camtool.go b/vendor/github.com/camlistore/camlistore/dev/devcam/camtool.go new file mode 100644 index 00000000..4eeb5ed9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/devcam/camtool.go @@ -0,0 +1,77 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file adds the "tool" subcommand to devcam, to run camtool against +// the dev server. + +package main + +import ( + "flag" + "fmt" + "path/filepath" + + "camlistore.org/pkg/cmdmain" +) + +type toolCmd struct { + // start of flag vars + altkey bool + // end of flag vars + + env *Env +} + +func init() { + cmdmain.RegisterCommand("tool", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := &toolCmd{ + env: NewCopyEnv(), + } + flags.BoolVar(&cmd.altkey, "altkey", false, "Use different gpg key and password from the server's.") + return cmd + }) +} + +func (c *toolCmd) Usage() { + fmt.Fprintf(cmdmain.Stderr, "Usage: devcam tool [globalopts] [commandopts] [commandargs]\n") +} + +func (c *toolCmd) Examples() []string { + return []string{ + "sync --all", + } +} + +func (c *toolCmd) Describe() string { + return "run camtool in dev mode." +} + +func (c *toolCmd) RunCommand(args []string) error { + if !*noBuild { + if err := build(filepath.Join("cmd", "camtool")); err != nil { + return fmt.Errorf("Could not build camtool: %v", err) + } + } + c.env.SetCamdevVars(c.altkey) + // wipeCacheDir needs to be called after SetCamdevVars, because that is + // where CAMLI_CACHE_DIR is defined. + if *wipeCache { + c.env.wipeCacheDir() + } + + cmdBin := filepath.Join("bin", "camtool") + return runExec(cmdBin, args, c.env) +} diff --git a/vendor/github.com/camlistore/camlistore/dev/devcam/devcam.go b/vendor/github.com/camlistore/camlistore/dev/devcam/devcam.go new file mode 100644 index 00000000..7cbd2a99 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/devcam/devcam.go @@ -0,0 +1,284 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "io" + "log" + "os" + "os/exec" + "os/signal" + pathpkg "path" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "time" + + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/osutil" +) + +var ( + noBuild = flag.Bool("nobuild", false, "do not rebuild anything") + race = flag.Bool("race", false, "build with race detector") + quiet, _ = strconv.ParseBool(os.Getenv("CAMLI_QUIET")) + wipeCache = flag.Bool("wipecache", false, "wipe the cache directory. Server cache with devcam server, client cache otherwise.") + // Whether to build the subcommand with sqlite support. This only + // concerns the server subcommand, which sets it to serverCmd.sqlite. + withSqlite bool +) + +// The path to the Camlistore source tree. Any devcam command +// should be run from there. +var camliSrcRoot string + +// sysExec is set to syscall.Exec on platforms that support it. +var sysExec func(argv0 string, argv []string, envv []string) (err error) + +// runExec execs bin. If the platform doesn't support exec, it runs it and waits +// for it to finish. +func runExec(bin string, args []string, env *Env) error { + if sysExec != nil { + sysExec(bin, append([]string{filepath.Base(bin)}, args...), env.Flat()) + } + + cmd := exec.Command(bin, args...) + cmd.Env = env.Flat() + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Start(); err != nil { + return fmt.Errorf("Could not run %v: %v", bin, err) + } + go handleSignals(cmd.Process) + return cmd.Wait() +} + +// cpDir copies the contents of src dir into dst dir. +// filter is a list of file suffixes to skip. ex: ".go" +func cpDir(src, dst string, filter []string) error { + return filepath.Walk(src, func(fullpath string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + for _, suffix := range filter { + if strings.HasSuffix(fi.Name(), suffix) { + return nil + } + } + suffix, err := filepath.Rel(src, fullpath) + if err != nil { + return fmt.Errorf("Failed to find Rel(%q, %q): %v", src, fullpath, err) + } + if fi.IsDir() { + return nil + } + return cpFile(fullpath, filepath.Join(dst, suffix)) + }) +} + +func cpFile(src, dst string) error { + sfi, err := os.Stat(src) + if err != nil { + return err + } + if !sfi.Mode().IsRegular() { + return fmt.Errorf("cpFile can't deal with non-regular file %s", src) + } + + dstDir := filepath.Dir(dst) + if err := os.MkdirAll(dstDir, 0755); err != nil { + return err + } + + df, err := os.Create(dst) + if err != nil { + return err + } + sf, err := os.Open(src) + if err != nil { + return err + } + defer sf.Close() + + n, err := io.Copy(df, sf) + if err == nil && n != sfi.Size() { + err = fmt.Errorf("copied wrong size for %s -> %s: copied %d; want %d", src, dst, n, sfi.Size()) + } + cerr := df.Close() + if err == nil { + err = cerr + } + return err +} + +func handleSignals(camliProc *os.Process) { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT) + for { + sig := <-c + sysSig, ok := sig.(syscall.Signal) + if !ok { + log.Fatal("Not a unix signal") + } + switch sysSig { + case syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT: + log.Printf("Received %v signal, terminating.", sig) + err := camliProc.Kill() + if err != nil { + log.Fatalf("Failed to kill child: %v ", err) + } + default: + log.Fatal("Received another signal, should not happen.") + } + } +} + +func checkCamliSrcRoot() { + args := flag.Args() + // TODO(mpl): we should probably get rid of that limitation someday. + if len(args) > 0 && (args[0] == "review" || + args[0] == "hook" || + args[0] == "fixv") { + // exception for devcam review, which does its own check. + return + } + if _, err := os.Stat("make.go"); err != nil { + if !os.IsNotExist(err) { + log.Fatalf("Could not stat make.go: %v", err) + } + log.Fatal("./make.go not found; devcam needs to be run from the Camlistore source tree root.") + } + cwd, err := os.Getwd() + if err != nil { + log.Fatal(err) + } + camliSrcRoot = cwd +} + +func repoRoot() (string, error) { + dir, err := os.Getwd() + if err != nil { + return "", fmt.Errorf("could not get current directory: %v", err) + } + rootlen := 1 + if runtime.GOOS == "windows" { + rootlen += len(filepath.VolumeName(dir)) + } + for { + if _, err := os.Stat(filepath.Join(dir, ".git")); err == nil { + return dir, nil + } + if len(dir) == rootlen && dir[rootlen-1] == filepath.Separator { + return "", fmt.Errorf(".git not found. Rerun from within the Camlistore source tree.") + } + dir = filepath.Dir(dir) + } +} + +func selfModTime() (time.Time, error) { + var modTime time.Time + devcamBin, err := osutil.SelfPath() + if err != nil { + return modTime, err + } + fi, err := os.Stat(devcamBin) + if err != nil { + return modTime, err + } + return fi.ModTime(), nil +} + +func checkModtime() error { + binModtime, err := selfModTime() + if err != nil { + return fmt.Errorf("could not get ModTime of current devcam executable: %v", err) + } + + devcamDir := filepath.Join(camliSrcRoot, "dev", "devcam") + d, err := os.Open(devcamDir) + if err != nil { + return fmt.Errorf("could not read devcam source dir %v: %v", devcamDir, err) + } + defer d.Close() + fis, err := d.Readdir(-1) + if err != nil { + return fmt.Errorf("could not read devcam source dir %v: %v", devcamDir, err) + } + for _, fi := range fis { + if fi.ModTime().After(binModtime) { + log.Printf("**************************************************************") + log.Printf("WARNING: your devcam binary is outdated, you should rebuild it") + log.Printf("**************************************************************") + return nil + } + } + return nil +} + +// Build builds the camlistore command at the given path from the source tree root. +func build(path string) error { + if v, _ := strconv.ParseBool(os.Getenv("CAMLI_FAST_DEV")); v { + // Demo mode. See dev/demo.sh. + return nil + } + _, cmdName := filepath.Split(path) + target := pathpkg.Join("camlistore.org", filepath.ToSlash(path)) + binPath := filepath.Join("bin", cmdName) + var modtime int64 + fi, err := os.Stat(binPath) + if err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("Could not stat %v: %v", binPath, err) + } + } else { + modtime = fi.ModTime().Unix() + } + args := []string{ + "run", "make.go", + "--quiet", + "--race=" + strconv.FormatBool(*race), + "--embed_static=false", + "--sqlite=" + strconv.FormatBool(withSqlite), + fmt.Sprintf("--if_mods_since=%d", modtime), + "--targets=" + target, + } + cmd := exec.Command("go", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("Error building %v: %v", target, err) + } + return nil +} + +func main() { + cmdmain.CheckCwd = checkCamliSrcRoot + cmdmain.CheckModtime = func() error { + if err := checkModtime(); err != nil { + log.Printf("Skipping freshness check: %v", err) + } + return nil + } + + // TODO(mpl): usage error is not really correct for devcam. + // See if I can reimplement it while still using cmdmain.Main(). + cmdmain.Main() +} diff --git a/vendor/github.com/camlistore/camlistore/dev/devcam/doc.go b/vendor/github.com/camlistore/camlistore/dev/devcam/doc.go new file mode 100644 index 00000000..5c9bcc7e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/devcam/doc.go @@ -0,0 +1,47 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +The devcam tool is a collection of wrappers around the camlistore programs +(camistored, camput, camtool...) which take care of setup and configuration, +so they can be used by developers to ease hacking on camlistore. + +Usage: + + devcam [modeopts] -- [commandargs] + +Modes: + + appengine: run the App Engine camlistored in dev mode. + get: run camget in dev mode. + put: run camput in dev mode. + server: run the stand-alone camlistored in dev mode. + +Examples: + + devcam get + devcam get -- --shared http://localhost:3169/share/ + + devcam put file --filenodes /mnt/camera/DCIM + + devcam server -wipe -mysql -fullclosure + +For mode-specific help: + + devcam -help + +*/ +package main diff --git a/vendor/github.com/camlistore/camlistore/dev/devcam/env.go b/vendor/github.com/camlistore/camlistore/dev/devcam/env.go new file mode 100644 index 00000000..6d2bfdac --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/devcam/env.go @@ -0,0 +1,168 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "errors" + "flag" + "log" + "os" + "path/filepath" + "strings" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/jsonsign" + "camlistore.org/pkg/osutil" +) + +const ( + // default secret ring used in tests and in devcam commands + defaultSecring = "pkg/jsonsign/testdata/test-secring.gpg" + // public ID of the GPG key in defaultSecring + defaultIdentity = "26F5ABDA" +) + +var ( + flagSecretRing = flag.String("secretring", "", "the secret ring file to run with") + flagIdentity = flag.String("identity", "", "the key id of the identity to run with") +) + +type Env struct { + m map[string]string + order []string +} + +func (e *Env) Set(k, v string) { + _, dup := e.m[k] + e.m[k] = v + if !dup { + e.order = append(e.order, k) + } +} + +func (e *Env) Del(k string) { + delete(e.m, k) +} + +// NoGo removes GOPATH and GOBIN. +func (e *Env) NoGo() { + e.Del("GOPATH") + e.Del("GOBIN") +} + +func (e *Env) Flat() []string { + vv := make([]string, 0, len(e.order)) + for _, k := range e.order { + if v, ok := e.m[k]; ok { + vv = append(vv, k+"="+v) + } + } + return vv +} + +func NewEnv() *Env { + return &Env{make(map[string]string), nil} +} + +func NewCopyEnv() *Env { + env := NewEnv() + for _, kv := range os.Environ() { + eq := strings.Index(kv, "=") + if eq > 0 { + env.Set(kv[:eq], kv[eq+1:]) + } + } + return env +} + +func (e *Env) SetCamdevVars(altkey bool) { + setCamdevVarsFor(e, altkey) +} + +func setCamdevVars() { + setCamdevVarsFor(nil, false) +} + +func rootInTmpDir() (string, error) { + user := osutil.Username() + if user == "" { + return "", errors.New("Could not get username from environment") + } + return filepath.Join(os.TempDir(), "camliroot-"+user), nil +} + +func setCamdevVarsFor(e *Env, altkey bool) { + var setenv func(string, string) error + if e != nil { + setenv = func(k, v string) error { e.Set(k, v); return nil } + } else { + setenv = os.Setenv + } + + setenv("CAMLI_AUTH", "userpass:camlistore:pass3179") + // env values for clients. server will overwrite them anyway in its setEnvVars. + root, err := rootInTmpDir() + if err != nil { + log.Fatal(err) + } + setenv("CAMLI_CACHE_DIR", filepath.Join(root, "client", "cache")) + setenv("CAMLI_CONFIG_DIR", filepath.Join("config", "dev-client-dir")) + + secring := defaultSecring + identity := defaultIdentity + + if altkey { + secring = filepath.FromSlash("pkg/jsonsign/testdata/password-foo-secring.gpg") + identity = "C7C3E176" + println("**\n** Note: password is \"foo\"\n**\n") + } else { + if *flagSecretRing != "" { + secring = *flagSecretRing + } + if *flagIdentity != "" { + identity = *flagIdentity + } + } + + entity, err := jsonsign.EntityFromSecring(identity, secring) + if err != nil { + panic(err) + } + armoredPublicKey, err := jsonsign.ArmoredPublicKey(entity) + if err != nil { + panic(err) + } + pubKeyRef := blob.SHA1FromString(armoredPublicKey) + + setenv("CAMLI_SECRET_RING", secring) + setenv("CAMLI_KEYID", identity) + setenv("CAMLI_PUBKEY_BLOBREF", pubKeyRef.String()) + setenv("CAMLI_KV_VERIFY", "true") +} + +func (e *Env) wipeCacheDir() { + cacheDir, _ := e.m["CAMLI_CACHE_DIR"] + if cacheDir == "" { + log.Fatal("Could not wipe cache dir, CAMLI_CACHE_DIR not defined") + } + if err := os.RemoveAll(cacheDir); err != nil { + log.Fatalf("Could not remove cache dir %v: %v", cacheDir, err) + } + if err := os.MkdirAll(cacheDir, 0700); err != nil { + log.Fatalf("Could not recreate cache dir %v: %v", cacheDir, err) + } +} diff --git a/vendor/github.com/camlistore/camlistore/dev/devcam/exec.go b/vendor/github.com/camlistore/camlistore/dev/devcam/exec.go new file mode 100644 index 00000000..f62ee153 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/devcam/exec.go @@ -0,0 +1,27 @@ +// +build !windows + +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "syscall" +) + +func init() { + sysExec = syscall.Exec +} diff --git a/vendor/github.com/camlistore/camlistore/dev/devcam/hook.go b/vendor/github.com/camlistore/camlistore/dev/devcam/hook.go new file mode 100644 index 00000000..4da9a7a7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/devcam/hook.go @@ -0,0 +1,305 @@ +/* +Copyright 2015 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file adds the "hook" subcommand to devcam, to install and run git hooks. +package main + +import ( + "bytes" + "errors" + "flag" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" + + "camlistore.org/pkg/cmdmain" +) + +var hookPath = ".git/hooks/" +var hookFiles = []string{ + "pre-commit", +} + +func (c *hookCmd) installHook() error { + root, err := repoRoot() + if err != nil { + return err + } + for _, hookFile := range hookFiles { + filename := filepath.Join(root, hookPath+hookFile) + hookContent := fmt.Sprintf(hookScript, hookFile) + // If hook file exists, assume it is okay. + _, err := os.Stat(filename) + if err == nil { + if c.verbose { + data, err := ioutil.ReadFile(filename) + if err != nil { + c.verbosef("reading hook: %v", err) + } else if string(data) != hookContent { + c.verbosef("unexpected hook content in %s", filename) + } + } + continue + } + + if !os.IsNotExist(err) { + return fmt.Errorf("checking hook: %v", err) + } + c.verbosef("installing %s hook", hookFile) + if err := ioutil.WriteFile(filename, []byte(hookContent), 0700); err != nil { + return fmt.Errorf("writing hook: %v", err) + } + } + return nil +} + +var hookScript = `#!/bin/sh +exec devcam hook %s "$@" +` + +type hookCmd struct { + verbose bool +} + +func init() { + cmdmain.RegisterCommand("hook", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := &hookCmd{} + flags.BoolVar(&cmd.verbose, "verbose", false, "Be verbose.") + // TODO(mpl): "-w" flag to run gofmt -w and devcam fixv -w. for now just print instruction. + return cmd + }) +} + +func (c *hookCmd) Usage() { + printf("Usage: devcam [globalopts] hook [[hook-name] [args...]]\n") +} + +func (c *hookCmd) Examples() []string { + return []string{ + "# install the hooks (if needed)", + "pre-commit # install the hooks (if needed), then run the pre-commit hook", + } +} + +func (c *hookCmd) Describe() string { + return "Install git hooks for Camlistore, and if given, run the hook given as argument. Currently available hooks are: " + strings.TrimSuffix(strings.Join(hookFiles, ", "), ",") + "." +} + +func (c *hookCmd) RunCommand(args []string) error { + if err := c.installHook(); err != nil { + return err + } + if len(args) == 0 { + return nil + } + switch args[0] { + case "pre-commit": + if err := c.hookPreCommit(args[1:]); err != nil { + if !(len(args) > 1 && args[1] == "test") { + printf("You can override these checks with 'git commit --no-verify'\n") + } + cmdmain.ExitWithFailure = true + return err + } + } + return nil +} + +// hookPreCommit does the following checks, in order: +// gofmt, and trailing space. +// If appropriate, any one of these checks prints the action +// required from the user, and the following checks are not +// performed. +func (c *hookCmd) hookPreCommit(args []string) (err error) { + if err = c.hookGofmt(); err != nil { + return err + } + return c.hookTrailingSpace() +} + +// hookGofmt runs a gofmt check on the local files matching the files in the +// git staging area. +// An error is returned if something went wrong or if some of the files need +// gofmting. In the latter case, the instruction is printed. +func (c *hookCmd) hookGofmt() error { + if os.Getenv("GIT_GOFMT_HOOK") == "off" { + printf("gofmt disabled by $GIT_GOFMT_HOOK=off\n") + return nil + } + + files, err := c.runGofmt() + if err != nil { + printf("gofmt hook reported errors:\n\t%v\n", strings.Replace(strings.TrimSpace(err.Error()), "\n", "\n\t", -1)) + return errors.New("gofmt errors") + } + if len(files) == 0 { + return nil + } + printf("You need to format with gofmt:\n\tgofmt -w %s\n", + strings.Join(files, " ")) + return errors.New("gofmt required") +} + +func (c *hookCmd) hookTrailingSpace() error { + out, _ := cmdOutputDirErr(".", "git", "diff-index", "--check", "--diff-filter=ACM", "--cached", "HEAD", "--") + if out != "" { + printf("\n%s", out) + printf("Trailing whitespace detected, you need to clean it up manually.\n") + return errors.New("trailing whitespace.") + } + return nil +} + +// runGofmt runs the external gofmt command over the local version of staged files. +// It returns the files that need gofmting. +func (c *hookCmd) runGofmt() (files []string, err error) { + repo, err := repoRoot() + if err != nil { + return nil, err + } + if !strings.HasSuffix(repo, string(filepath.Separator)) { + repo += string(filepath.Separator) + } + + out, err := cmdOutputDirErr(".", "git", "diff-index", "--name-only", "--diff-filter=ACM", "--cached", "HEAD", "--") + if err != nil { + return nil, err + } + indexFiles := addRoot(repo, filter(gofmtRequired, nonBlankLines(out))) + if len(indexFiles) == 0 { + return + } + + args := []string{"-l"} + // TODO(mpl): it would be nice to TrimPrefix the pwd from each file to get a shorter output. + // However, since git sets the pwd to GIT_DIR before running the pre-commit hook, we lost + // the actual pwd from when we ran `git commit`, so no dice so far. + for _, file := range indexFiles { + args = append(args, file) + } + + if c.verbose { + fmt.Fprintln(cmdmain.Stderr, commandString("gofmt", args)) + } + cmd := exec.Command("gofmt", args...) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err = cmd.Run() + + if err != nil { + // Error but no stderr: usually can't find gofmt. + if stderr.Len() == 0 { + return nil, fmt.Errorf("invoking gofmt: %v", err) + } + return nil, fmt.Errorf("%s: %v", stderr.String(), err) + } + + // Build file list. + files = lines(stdout.String()) + sort.Strings(files) + return files, nil +} + +func printf(format string, args ...interface{}) { + cmdmain.Errorf(format, args...) +} + +func addRoot(root string, list []string) []string { + var out []string + for _, x := range list { + out = append(out, filepath.Join(root, x)) + } + return out +} + +// nonBlankLines returns the non-blank lines in text. +func nonBlankLines(text string) []string { + var out []string + for _, s := range lines(text) { + if strings.TrimSpace(s) != "" { + out = append(out, s) + } + } + return out +} + +// filter returns the elements in list satisfying f. +func filter(f func(string) bool, list []string) []string { + var out []string + for _, x := range list { + if f(x) { + out = append(out, x) + } + } + return out +} + +// gofmtRequired reports whether the specified file should be checked +// for gofmt'dness by the pre-commit hook. +// The file name is relative to the repo root. +func gofmtRequired(file string) bool { + if !strings.HasSuffix(file, ".go") { + return false + } + if !strings.HasPrefix(file, "test/") { + return true + } + return strings.HasPrefix(file, "test/bench/") || file == "test/run.go" +} + +func commandString(command string, args []string) string { + return strings.Join(append([]string{command}, args...), " ") +} + +func lines(text string) []string { + out := strings.Split(text, "\n") + // Split will include a "" after the last line. Remove it. + if n := len(out) - 1; n >= 0 && out[n] == "" { + out = out[:n] + } + return out +} + +func (c *hookCmd) verbosef(format string, args ...interface{}) { + if c.verbose { + fmt.Fprintf(cmdmain.Stdout, format, args...) + } +} + +// cmdOutputDirErr runs the command line in dir, returning its output +// and any error results. +// +// NOTE: cmdOutputDirErr must be used only to run commands that read state, +// not for commands that make changes. Commands that make changes +// should be run using runDirErr so that the -v and -n flags apply to them. +func cmdOutputDirErr(dir, command string, args ...string) (string, error) { + // NOTE: We only show these non-state-modifying commands with -v -v. + // Otherwise things like 'git sync -v' show all our internal "find out about + // the git repo" commands, which is confusing if you are just trying to find + // out what git sync means. + + cmd := exec.Command(command, args...) + if dir != "." { + cmd.Dir = dir + } + b, err := cmd.CombinedOutput() + return string(b), err +} diff --git a/vendor/github.com/camlistore/camlistore/dev/devcam/review.go b/vendor/github.com/camlistore/camlistore/dev/devcam/review.go new file mode 100644 index 00000000..5fd0b4fe --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/devcam/review.go @@ -0,0 +1,126 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file adds the "review" subcommand to devcam, to send changes for peer review. + +package main + +import ( + "bufio" + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + + "camlistore.org/pkg/cmdmain" +) + +var ( + defaultHook = filepath.FromSlash("misc/commit-msg.githook") + hookFile = filepath.FromSlash(".git/hooks/commit-msg") +) + +type reviewCmd struct{} + +func init() { + cmdmain.RegisterCommand("review", func(flags *flag.FlagSet) cmdmain.CommandRunner { + return new(reviewCmd) + }) +} + +func (c *reviewCmd) Usage() { + fmt.Fprintf(cmdmain.Stderr, "Usage: devcam review\n") +} + +func (c *reviewCmd) Describe() string { + return "Submit your git commits for review." +} + +func (c *reviewCmd) RunCommand(args []string) error { + if len(args) > 0 { + return cmdmain.UsageError("too many arguments.") + } + goToCamliRoot() + c.checkHook() + gitPush() + return nil +} + +func goToCamliRoot() { + prevDir, err := os.Getwd() + if err != nil { + log.Fatalf("could not get current directory: %v", err) + } + for { + if _, err := os.Stat(defaultHook); err == nil { + return + } + if err := os.Chdir(".."); err != nil { + log.Fatalf("Could not chdir: %v", err) + } + currentDir, err := os.Getwd() + if err != nil { + log.Fatalf("Could not get current directory: %v", err) + } + if currentDir == prevDir { + log.Fatal("Camlistore tree root not found. Run from within the Camlistore tree please.") + } + prevDir = currentDir + } +} + +func (c *reviewCmd) checkHook() { + _, err := os.Stat(hookFile) + if err == nil { + return + } + if !os.IsNotExist(err) { + log.Fatal(err) + } + fmt.Fprintf(cmdmain.Stdout, "Presubmit hook to add Change-Id to commit messages is missing.\nNow automatically creating it at %v from %v\n\n", hookFile, defaultHook) + data, err := ioutil.ReadFile(defaultHook) + if err != nil { + log.Fatal(err) + } + if err := ioutil.WriteFile(hookFile, data, 0700); err != nil { + log.Fatal(err) + } + fmt.Fprintf(cmdmain.Stdout, "Amending last commit to add Change-Id.\nPlease re-save description without making changes.\n\n") + fmt.Fprintf(cmdmain.Stdout, "Press Enter to continue.\n") + if _, _, err := bufio.NewReader(cmdmain.Stdin).ReadLine(); err != nil { + log.Fatal(err) + } + + cmd := exec.Command("git", []string{"commit", "--amend"}...) + cmd.Stdout = cmdmain.Stdout + cmd.Stderr = cmdmain.Stderr + if err := cmd.Run(); err != nil { + log.Fatal(err) + } +} + +func gitPush() { + cmd := exec.Command("git", + []string{"push", "https://camlistore.googlesource.com/camlistore", "HEAD:refs/for/master"}...) + cmd.Stdout = cmdmain.Stdout + cmd.Stderr = cmdmain.Stderr + if err := cmd.Run(); err != nil { + log.Fatalf("Could not git push: %v", err) + } +} diff --git a/vendor/github.com/camlistore/camlistore/dev/devcam/server.go b/vendor/github.com/camlistore/camlistore/dev/devcam/server.go new file mode 100644 index 00000000..d72462c8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/devcam/server.go @@ -0,0 +1,553 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file adds the "server" subcommand to devcam, to run camlistored. + +package main + +import ( + "bytes" + "errors" + "flag" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + "camlistore.org/pkg/client" + "camlistore.org/pkg/cmdmain" + "camlistore.org/pkg/importer" + _ "camlistore.org/pkg/importer/allimporters" + "camlistore.org/pkg/netutil" + "camlistore.org/pkg/osutil" +) + +type serverCmd struct { + // start of flag vars + all bool + hostname string + port string + tls bool + wipe bool + things bool + debug bool + + mongo bool + mysql bool + postgres bool + sqlite bool + kvfile bool + memory bool + + slow bool + throttle int + latency int + + fullIndexSync bool + + fullClosure bool + mini bool + publish bool // whether to build and start the publisher app(s) + hello bool // whether to build and start the hello demo app + + openBrowser bool + flickrAPIKey string + foursquareAPIKey string + picasaAPIKey string + twitterAPIKey string + extraArgs string // passed to camlistored + // end of flag vars + + listen string // address + port to listen on + root string // the temp dir where blobs are stored + env *Env +} + +func init() { + cmdmain.RegisterCommand("server", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := &serverCmd{ + env: NewCopyEnv(), + } + flags.BoolVar(&cmd.all, "all", false, "Listen on all interfaces.") + flags.StringVar(&cmd.hostname, "hostname", "", "Hostname to advertise, defaults to the hostname reported by the kernel.") + flags.StringVar(&cmd.port, "port", "3179", "Port to listen on.") + flags.BoolVar(&cmd.tls, "tls", false, "Use TLS.") + flags.BoolVar(&cmd.wipe, "wipe", false, "Wipe the blobs on disk and the indexer.") + flags.BoolVar(&cmd.things, "makethings", false, "Create various test data on startup (twitter imports for now). Requires wipe. Conflicts with mini.") + flags.BoolVar(&cmd.debug, "debug", false, "Enable http debugging.") + flags.BoolVar(&cmd.publish, "publish", true, "Enable publisher app(s)") + flags.BoolVar(&cmd.hello, "hello", false, "Enable hello (demo) app") + flags.BoolVar(&cmd.mini, "mini", false, "Enable minimal mode, where all optional features are disabled. (Currently just publishing)") + + flags.BoolVar(&cmd.mongo, "mongo", false, "Use mongodb as the index storage. Excludes -mysql, -postgres, -sqlite, -memory, -kvfile.") + flags.BoolVar(&cmd.mysql, "mysql", false, "Use mysql as the index storage. Excludes -mongo, -postgres, -sqlite, -memory, -kvfile.") + flags.BoolVar(&cmd.postgres, "postgres", false, "Use postgres as the index storage. Excludes -mongo, -mysql, -sqlite, -memory, -kvfile.") + flags.BoolVar(&cmd.sqlite, "sqlite", false, "Use sqlite as the index storage. Excludes -mongo, -mysql, -postgres, -memory, -kvfile.") + flags.BoolVar(&cmd.kvfile, "kvfile", false, "Use cznic/kv as the index storage. Excludes -mongo, -mysql, -postgres, -memory, -sqlite.") + flags.BoolVar(&cmd.memory, "memory", false, "Use a memory-only index storage. Excludes -mongo, -mysql, -postgres, -sqlite, -kvfile.") + + flags.BoolVar(&cmd.slow, "slow", false, "Add artificial latency.") + flags.IntVar(&cmd.throttle, "throttle", 150, "If -slow, this is the rate in kBps, to which we should throttle.") + flags.IntVar(&cmd.latency, "latency", 90, "If -slow, this is the added latency, in ms.") + + flags.BoolVar(&cmd.fullIndexSync, "fullindexsync", false, "Perform full sync to indexer on startup.") + + flags.BoolVar(&cmd.fullClosure, "fullclosure", false, "Use the ondisk closure library.") + + flags.BoolVar(&cmd.openBrowser, "openbrowser", false, "Open the start page on startup.") + flags.StringVar(&cmd.flickrAPIKey, "flickrapikey", "", "The key and secret to use with the Flickr importer. Formatted as ':'.") + flags.StringVar(&cmd.foursquareAPIKey, "foursquareapikey", "", "The key and secret to use with the Foursquare importer. Formatted as ':'.") + flags.StringVar(&cmd.picasaAPIKey, "picasakey", "", "The username and password to use with the Picasa importer. Formatted as ':'.") + flags.StringVar(&cmd.twitterAPIKey, "twitterapikey", "", "The key and secret to use with the Twitter importer. Formatted as ':'.") + flags.StringVar(&cmd.root, "root", "", "A directory to store data in. Defaults to a location in the OS temp directory.") + flags.StringVar(&cmd.extraArgs, "extraargs", "", + "List of comma separated options that will be passed to camlistored") + return cmd + }) +} + +func (c *serverCmd) Usage() { + fmt.Fprintf(cmdmain.Stderr, "Usage: devcam [globalopts] server [serveropts]\n") +} + +func (c *serverCmd) Examples() []string { + return []string{ + "-wipe -mysql -fullclosure", + } +} + +func (c *serverCmd) Describe() string { + return "run the stand-alone camlistored in dev mode." +} + +func (c *serverCmd) checkFlags(args []string) error { + if len(args) != 0 { + c.Usage() + } + if c.mini { + if c.things { + return cmdmain.UsageError("--mini and --makethings are mutually exclusive.") + } + c.publish = false + c.hello = false + } + if c.things && !c.wipe { + return cmdmain.UsageError("--makethings requires --wipe.") + } + nindex := 0 + for _, v := range []bool{c.mongo, c.mysql, c.postgres, c.sqlite, c.memory, c.kvfile} { + if v { + nindex++ + } + } + if nindex > 1 { + return fmt.Errorf("Only one index option allowed") + } + + if _, err := strconv.ParseInt(c.port, 0, 0); err != nil { + return fmt.Errorf("Invalid -port value: %q", c.port) + } + return nil +} + +func (c *serverCmd) setRoot() error { + if c.root == "" { + if root, err := rootInTmpDir(); err != nil { + return err + } else { + c.root = filepath.Join(root, "port"+c.port) + } + } + log.Printf("Temp dir root is %v", c.root) + if c.wipe { + log.Printf("Wiping %v", c.root) + if err := os.RemoveAll(c.root); err != nil { + return fmt.Errorf("Could not wipe %v: %v", c.root, err) + } + } + return nil +} + +func (c *serverCmd) makeSuffixdir(fullpath string) { + if err := os.MkdirAll(fullpath, 0755); err != nil { + log.Fatalf("Could not create %v: %v", fullpath, err) + } +} + +func (c *serverCmd) setEnvVars() error { + c.env.SetCamdevVars(false) + setenv := func(k, v string) { + c.env.Set(k, v) + } + if c.slow { + setenv("DEV_THROTTLE_KBPS", fmt.Sprintf("%d", c.throttle)) + setenv("DEV_THROTTLE_LATENCY_MS", fmt.Sprintf("%d", c.latency)) + } + if c.debug { + setenv("CAMLI_HTTP_DEBUG", "1") + } + user := osutil.Username() + if user == "" { + return errors.New("Could not get username from environment") + } + setenv("CAMLI_FULL_INDEX_SYNC_ON_START", "false") + if c.fullIndexSync { + setenv("CAMLI_FULL_INDEX_SYNC_ON_START", "true") + } + setenv("CAMLI_DBNAME", "devcamli"+user) + setenv("CAMLI_MYSQL_ENABLED", "false") + setenv("CAMLI_MONGO_ENABLED", "false") + setenv("CAMLI_POSTGRES_ENABLED", "false") + setenv("CAMLI_SQLITE_ENABLED", "false") + setenv("CAMLI_KVINDEX_ENABLED", "false") + setenv("CAMLI_MEMINDEX_ENABLED", "false") + setenv("CAMLI_LEVELDB_ENABLED", "false") + + setenv("CAMLI_PUBLISH_ENABLED", strconv.FormatBool(c.publish)) + setenv("CAMLI_HELLO_ENABLED", strconv.FormatBool(c.hello)) + switch { + case c.memory: + setenv("CAMLI_MEMINDEX_ENABLED", "true") + setenv("CAMLI_INDEXER_PATH", "/index-memory/") + case c.mongo: + setenv("CAMLI_MONGO_ENABLED", "true") + setenv("CAMLI_INDEXER_PATH", "/index-mongo/") + case c.postgres: + setenv("CAMLI_POSTGRES_ENABLED", "true") + setenv("CAMLI_INDEXER_PATH", "/index-postgres/") + case c.mysql: + setenv("CAMLI_MYSQL_ENABLED", "true") + setenv("CAMLI_INDEXER_PATH", "/index-mysql/") + case c.kvfile: + setenv("CAMLI_KVINDEX_ENABLED", "true") + setenv("CAMLI_INDEXER_PATH", "/index-kv/") + if c.root == "" { + panic("no root set") + } + setenv("CAMLI_DBNAME", filepath.Join(c.root, "kvindex.db")) + case c.sqlite: + setenv("CAMLI_SQLITE_ENABLED", "true") + setenv("CAMLI_INDEXER_PATH", "/index-sqlite/") + if c.root == "" { + panic("no root set") + } + setenv("CAMLI_DBNAME", filepath.Join(c.root, "sqliteindex.db")) + default: + setenv("CAMLI_LEVELDB_ENABLED", "true") + setenv("CAMLI_INDEXER_PATH", "/index-leveldb/") + if c.root == "" { + panic("no root set") + } + setenv("CAMLI_DBNAME", filepath.Join(c.root, "leveldbindex.db")) + } + + base := "http://localhost:" + c.port + c.listen = "127.0.0.1:" + c.port + if c.all { + c.listen = "0.0.0.0:" + c.port + if c.hostname == "" { + hostname, err := os.Hostname() + if err != nil { + return fmt.Errorf("Could not get system hostname: %v", err) + } + base = "http://" + hostname + ":" + c.port + } else { + base = "http://" + c.hostname + ":" + c.port + } + } + setenv("CAMLI_TLS", "false") + if c.tls { + base = strings.Replace(base, "http://", "https://", 1) + setenv("CAMLI_TLS", "true") + } + setenv("CAMLI_BASEURL", base) + + setenv("CAMLI_DEV_CAMLI_ROOT", camliSrcRoot) + setenv("CAMLI_AUTH", "devauth:pass3179") + fullSuffix := func(name string) string { + return filepath.Join(c.root, name) + } + suffixes := map[string]string{ + "CAMLI_ROOT": fullSuffix("bs"), + "CAMLI_ROOT_SHARD1": fullSuffix("s1"), + "CAMLI_ROOT_SHARD2": fullSuffix("s2"), + "CAMLI_ROOT_REPLICA1": fullSuffix("r1"), + "CAMLI_ROOT_REPLICA2": fullSuffix("r2"), + "CAMLI_ROOT_REPLICA3": fullSuffix("r3"), + "CAMLI_ROOT_CACHE": fullSuffix("cache"), + "CAMLI_ROOT_ENCMETA": fullSuffix("encmeta"), + "CAMLI_ROOT_ENCBLOB": fullSuffix("encblob"), + } + for k, v := range suffixes { + c.makeSuffixdir(v) + setenv(k, v) + } + c.makeSuffixdir(filepath.Join(fullSuffix("bs"), "packed")) + c.makeSuffixdir(filepath.Join(fullSuffix("bs"), "loose")) + setenv("CAMLI_PORT", c.port) + if c.flickrAPIKey != "" { + setenv("CAMLI_FLICKR_ENABLED", "true") + setenv("CAMLI_FLICKR_API_KEY", c.flickrAPIKey) + } + if c.foursquareAPIKey != "" { + setenv("CAMLI_FOURSQUARE_ENABLED", "true") + setenv("CAMLI_FOURSQUARE_API_KEY", c.foursquareAPIKey) + } + if c.picasaAPIKey != "" { + setenv("CAMLI_PICASA_ENABLED", "true") + setenv("CAMLI_PICASA_API_KEY", c.picasaAPIKey) + } + if c.twitterAPIKey != "" { + setenv("CAMLI_TWITTER_ENABLED", "true") + setenv("CAMLI_TWITTER_API_KEY", c.twitterAPIKey) + } + setenv("CAMLI_CONFIG_DIR", "config") + setenv("CAMLI_CACHE_DIR", filepath.Join(c.root, "cache")) + setenv("CAMLI_APP_BINDIR", "bin") + return nil +} + +func (c *serverCmd) setupIndexer() error { + args := []string{"dbinit"} + switch { + case c.postgres: + args = append(args, + "-dbtype=postgres", + "-user=postgres", + "-password=postgres", + "-host=localhost", + "-dbname="+c.env.m["CAMLI_DBNAME"]) + case c.mysql: + args = append(args, + "-user=root", + "-password=root", + "-host=localhost", + "-dbname="+c.env.m["CAMLI_DBNAME"]) + case c.sqlite: + args = append(args, + "-dbtype=sqlite", + "-dbname="+c.env.m["CAMLI_DBNAME"]) + case c.mongo: + args = append(args, + "-dbtype=mongo", + "-host=localhost", + "-dbname="+c.env.m["CAMLI_DBNAME"]) + default: + return nil + } + if c.wipe { + args = append(args, "-wipe") + } else { + args = append(args, "-ignoreexists") + } + binPath := filepath.Join("bin", "camtool") + cmd := exec.Command(binPath, args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("Could not run camtool dbinit: %v", err) + } + return nil +} + +func (c *serverCmd) syncTemplateBlobs() error { + if c.wipe { + templateDir := "dev-server-template" + if _, err := os.Stat(templateDir); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + blobsDir := filepath.Join(c.root, "sha1") + if err := cpDir(templateDir, blobsDir, nil); err != nil { + return fmt.Errorf("Could not cp template blobs: %v", err) + } + } + return nil +} + +func (c *serverCmd) setFullClosure() error { + if c.fullClosure { + oldsvn := filepath.Join(c.root, filepath.FromSlash("tmp/closure-lib/.svn")) + if err := os.RemoveAll(oldsvn); err != nil { + return fmt.Errorf("Could not remove svn checkout of closure-lib %v: %v", + oldsvn, err) + } + log.Println("Updating closure library...") + args := []string{"run", "third_party/closure/updatelibrary.go", "-verbose"} + cmd := exec.Command("go", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("Could not run updatelibrary.go: %v", err) + } + c.env.Set("CAMLI_DEV_CLOSURE_DIR", "third_party/closure/lib/closure") + } + return nil +} + +func (c *serverCmd) makeThings() error { + const importerPrefix = "/importer/" + // check that "/importer/" prefix is in config, just in case it ever changes. + configFile := filepath.Join(camliSrcRoot, "config", "dev-server-config.json") + config, err := ioutil.ReadFile(configFile) + if err != nil { + return fmt.Errorf("could not read config file %v: %v", configFile, err) + } + if !bytes.Contains(config, []byte(importerPrefix)) { + return fmt.Errorf("%s prefix not found in dev config. Did it change?", importerPrefix) + } + + if err := netutil.AwaitReachable("localhost:"+c.port, time.Minute); err != nil { + return err + } + + osutil.AddSecretRingFlag() + setCamdevVars() + + baseURL := c.env.m["CAMLI_BASEURL"] + if baseURL == "" { + return errors.New("CAMLI_BASEURL is not set") + } + + cl := client.New(baseURL) + signer, err := cl.Signer() + if err != nil { + return err + } + ClientId := make(map[string]string) + ClientSecret := make(map[string]string) + for name := range importer.All() { + ClientId[name] = "fakeStaticClientId" + ClientSecret[name] = "fakeStaticClientSecret" + } + hc := importer.HostConfig{ + BaseURL: baseURL, + Prefix: importerPrefix, + Target: cl, + BlobSource: cl, + Signer: signer, + Search: cl, + ClientId: ClientId, + ClientSecret: ClientSecret, + } + + for name, imp := range importer.All() { + mk, ok := imp.(importer.TestDataMaker) + if !ok { + continue + } + + tr := mk.MakeTestData() + + hc.HTTPClient = &http.Client{Transport: tr} + host, err := importer.NewHost(hc) + if err != nil { + return fmt.Errorf("could not obtain Host: %v", err) + } + + rc, err := importer.CreateAccount(host, name) + if err != nil { + return err + } + + if err := mk.SetTestAccount(rc.AccountNode()); err != nil { + return fmt.Errorf("could not set fake account node for importer %v: %v", name, err) + } + + if err := imp.Run(rc); err != nil { + return err + } + } + return nil +} + +func (c *serverCmd) RunCommand(args []string) error { + err := c.checkFlags(args) + if err != nil { + return cmdmain.UsageError(fmt.Sprint(err)) + } + if !*noBuild { + withSqlite = c.sqlite + targets := []string{ + filepath.Join("server", "camlistored"), + filepath.Join("cmd", "camtool"), + } + if c.hello { + targets = append(targets, filepath.Join("app", "hello")) + } + if c.publish { + targets = append(targets, filepath.Join("app", "publisher")) + } + for _, name := range targets { + err := build(name) + if err != nil { + return fmt.Errorf("Could not build %v: %v", name, err) + } + } + } + if err := c.setRoot(); err != nil { + return fmt.Errorf("Could not setup the camli root: %v", err) + } + if err := c.setEnvVars(); err != nil { + return fmt.Errorf("Could not setup the env vars: %v", err) + } + // wipeCacheDir needs to be called after setEnvVars, because that is where + // CAMLI_CACHE_DIR is defined. + if *wipeCache { + c.env.wipeCacheDir() + } + if err := c.setupIndexer(); err != nil { + return fmt.Errorf("Could not setup the indexer: %v", err) + } + if err := c.syncTemplateBlobs(); err != nil { + return fmt.Errorf("Could not copy the template blobs: %v", err) + } + if err := c.setFullClosure(); err != nil { + return fmt.Errorf("Could not setup the closure lib: %v", err) + } + + log.Printf("Starting dev server on %v/ui/ with password \"pass3179\"\n", + c.env.m["CAMLI_BASEURL"]) + + camliBin := filepath.Join("bin", "camlistored") + cmdArgs := []string{ + "-configfile=" + filepath.Join(camliSrcRoot, "config", "dev-server-config.json"), + "-listen=" + c.listen, + "-openbrowser=" + strconv.FormatBool(c.openBrowser), + } + if c.extraArgs != "" { + cmdArgs = append(cmdArgs, strings.Split(c.extraArgs, ",")...) + } + if c.things { + // force camlistored to be run as a child process instead of with + // syscall.Exec, so c.makeThings() is able to run. + sysExec = nil + go func() { + if err := c.makeThings(); err != nil { + log.Fatalf("%v", err) + } + }() + } + return runExec(camliBin, cmdArgs, c.env) +} diff --git a/vendor/github.com/camlistore/camlistore/dev/devcam/test.go b/vendor/github.com/camlistore/camlistore/dev/devcam/test.go new file mode 100644 index 00000000..013b487c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/devcam/test.go @@ -0,0 +1,169 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file adds the "test" subcommand to devcam, to run the full test suite. + +package main + +import ( + "flag" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "camlistore.org/pkg/cmdmain" +) + +type testCmd struct { + // start of flag vars + verbose bool + precommit bool + short bool + run string + // end of flag vars + + // buildGoPath becomes our child "go" processes' GOPATH environment variable + buildGoPath string +} + +func init() { + cmdmain.RegisterCommand("test", func(flags *flag.FlagSet) cmdmain.CommandRunner { + cmd := new(testCmd) + flags.BoolVar(&cmd.short, "short", false, "Use '-short' with go test.") + flags.BoolVar(&cmd.precommit, "precommit", true, "Run the pre-commit githook as part of tests.") + flags.BoolVar(&cmd.verbose, "v", false, "Use '-v' (for verbose) with go test.") + flags.StringVar(&cmd.run, "run", "", "Use '-run' with go test.") + return cmd + }) +} + +func (c *testCmd) Usage() { + fmt.Fprintf(cmdmain.Stderr, "Usage: devcam test [test_opts] [targets]\n") +} + +func (c *testCmd) Describe() string { + return "run the full test suite, or the tests in the specified target packages." +} + +func (c *testCmd) RunCommand(args []string) error { + if c.precommit { + if err := c.runPrecommitHook(); err != nil { + return err + } + } + if err := c.syncSrc(); err != nil { + return err + } + buildSrcDir := filepath.Join(c.buildGoPath, "src", "camlistore.org") + if err := os.Chdir(buildSrcDir); err != nil { + return err + } + if err := c.buildSelf(); err != nil { + return err + } + if err := c.runTests(args); err != nil { + return err + } + println("PASS") + return nil +} + +func (c *testCmd) env() *Env { + if c.buildGoPath == "" { + panic("called too early") + } + env := NewCopyEnv() + env.NoGo() + env.Set("GOPATH", c.buildGoPath) + env.Set("CAMLI_MAKE_USEGOPATH", "true") + env.Set("GO15VENDOREXPERIMENT", "1") + return env +} + +func (c *testCmd) syncSrc() error { + args := []string{"run", "make.go", "--onlysync"} + cmd := exec.Command("go", args...) + cmd.Stderr = os.Stderr + out, err := cmd.Output() + if err != nil { + return fmt.Errorf("Error populating tmp src tree: %v", err) + } + c.buildGoPath = strings.TrimSpace(string(out)) + return nil +} + +func (c *testCmd) buildSelf() error { + args := []string{ + "install", + filepath.FromSlash("./dev/devcam"), + } + cmd := exec.Command("go", args...) + binDir, err := filepath.Abs("bin") + if err != nil { + return fmt.Errorf("Error setting GOBIN: %v", err) + } + env := c.env() + env.Set("GOBIN", binDir) + cmd.Env = env.Flat() + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("Error building devcam: %v", err) + } + return nil +} + +func (c *testCmd) runTests(args []string) error { + targs := []string{"test"} + if !strings.HasSuffix(c.buildGoPath, "-nosqlite") { + targs = append(targs, "--tags=with_sqlite fake_android") + } else { + targs = append(targs, "--tags=fake_android") + } + if c.short { + targs = append(targs, "-short") + } + if c.verbose { + targs = append(targs, "-v") + } + if c.run != "" { + targs = append(targs, "-run="+c.run) + } + if len(args) > 0 { + targs = append(targs, args...) + } else { + targs = append(targs, []string{ + "./pkg/...", + "./server/camlistored", + "./server/appengine", + "./cmd/...", + }...) + } + env := c.env() + env.Set("SKIP_DEP_TESTS", "1") + return runExec("go", targs, env) +} + +func (c *testCmd) runPrecommitHook() error { + out, err := exec.Command(filepath.FromSlash("./bin/devcam"), "hook", "pre-commit", "test").CombinedOutput() + if err != nil { + fmt.Println(string(out)) + } + return err + +} diff --git a/vendor/github.com/camlistore/camlistore/dev/envvardoc/envvardoc.go b/vendor/github.com/camlistore/camlistore/dev/envvardoc/envvardoc.go new file mode 100644 index 00000000..1ccbaecc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/envvardoc/envvardoc.go @@ -0,0 +1,189 @@ +// Program envvardoc will verify all referenced environment variables in go +// source are properly documented. +package main + +import ( + "bufio" + "flag" + "fmt" + "io" + "log" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + "text/tabwriter" +) + +var ( + srcDirs = flag.String("srcDirs", "cmd,dev,pkg,server", + "comma separated source directories") + doc = flag.String("doc", "doc/environment-vars.txt", + "file containing environment variable documentation") + all = flag.Bool("all", false, "show all environment vars found") + prefixes = flag.String("prefixes", "CAM,DEV,AWS", + "comma-separated list of env var prefixes we care about. Empty implies all") + + docVar = regexp.MustCompile(`^(\w+) \(.+?\):$`) + literalEnvVar = regexp.MustCompile(`os.Getenv\("(\w+)"\)`) + variableEnvVar = regexp.MustCompile(`os.Getenv\((\w+)\)`) +) + +type pos struct { + line int + path string +} + +func (p pos) String() string { + return fmt.Sprintf("%s:%d", p.path, p.line) +} + +type varMap map[string][]pos + +func sortedKeys(m varMap) []string { + keys := make([]string, 0, len(m)) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +type envCollector struct { + literals varMap + variables varMap + documented map[string]struct{} +} + +func newEncCollector() *envCollector { + return &envCollector{ + literals: varMap{}, + variables: varMap{}, + documented: map[string]struct{}{}, + } +} + +func (ec *envCollector) findEnvVars(path string, r io.Reader) error { + scanner := bufio.NewScanner(r) + line := 1 + for scanner.Scan() { + l := scanner.Text() + m := literalEnvVar.FindStringSubmatch(l) + if len(m) == 2 { + p := pos{line: line, path: path} + ec.literals[m[1]] = append(ec.literals[m[1]], p) + } + + m = variableEnvVar.FindStringSubmatch(l) + if len(m) == 2 { + p := pos{line: line, path: path} + ec.variables[m[1]] = append(ec.variables[m[1]], p) + } + line++ + } + return scanner.Err() +} + +func (ec *envCollector) findDocVars(r io.Reader) error { + scanner := bufio.NewScanner(r) + for scanner.Scan() { + l := scanner.Text() + m := docVar.FindStringSubmatch(l) + if len(m) == 2 { + ec.documented[m[1]] = struct{}{} + } + } + return scanner.Err() +} + +func (ec *envCollector) walk(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() || !strings.HasSuffix(path, ".go") { + return nil + } + + r, err := os.Open(path) + if err != nil { + return err + } + defer r.Close() + return ec.findEnvVars(path, r) +} + +func printMap(header string, m varMap) { + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 8, 1, ' ', 0) + fmt.Fprintln(w, header) + for _, k := range sortedKeys(m) { + for _, pos := range m[k] { + fmt.Fprintf(w, "%s\t%s\n", k, pos) + } + } + w.Flush() +} + +func (ec *envCollector) printAll() { + fmt.Println("All environment variables") + printMap("Literal\tLocation", ec.literals) + fmt.Println() + printMap("Variable\tLocation", ec.variables) +} + +func (ec *envCollector) printUndocumented(prefixes []string) bool { + missing := varMap{} + for k, v := range ec.literals { + if _, ok := ec.documented[k]; !ok { + keep := false + for _, p := range prefixes { + if strings.HasPrefix(k, p) { + keep = true + break + } + } + if keep || len(prefixes) == 0 { + missing[k] = v + } + } + } + + if len(missing) != 0 { + printMap("Undocumented\tLocation", missing) + } else { + fmt.Println("All environment variables are documented") + } + return len(missing) != 0 +} + +func main() { + flag.Parse() + ec := newEncCollector() + + r, err := os.Open(*doc) + if err != nil { + log.Fatal(err) + } + defer r.Close() + err = ec.findDocVars(r) + if err != nil { + log.Fatal(err) + } + + for _, dn := range strings.Split(*srcDirs, ",") { + err := filepath.Walk(dn, ec.walk) + if err != nil { + log.Fatal(err) + } + } + + if *all { + ec.printAll() + } else { + if ec.printUndocumented(strings.Split(*prefixes, ",")) { + os.Exit(1) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/dev/local.sh b/vendor/github.com/camlistore/camlistore/dev/local.sh new file mode 100644 index 00000000..c302d9c6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/local.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +export CAMLI_CONFENV_SECRET_RING=$(camtool env camsrcroot)/pkg/jsonsign/testdata/test-secring.gpg +export CAMLI_CONFIG_DIR=$(camtool env camsrcroot)/dev/config-dir-local + +# Redundant, but: +export CAMLI_DEFAULT_SERVER=dev diff --git a/vendor/github.com/camlistore/camlistore/dev/make-release b/vendor/github.com/camlistore/camlistore/dev/make-release new file mode 100755 index 00000000..4aa3cc34 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/make-release @@ -0,0 +1,45 @@ +#!/usr/bin/perl + +use strict; +use Getopt::Long; +my $opt_force; +GetOptions("force" => \$opt_force) or die "Usage: make-release [-f] "; + +my $version = shift or die "Usage: make-release "; + +die "Not being run from root of Camlistore" unless -e ".git" && -e "pkg/blob/ref.go"; + +my $cur_branch = `git rev-parse --abbrev-ref HEAD`; +chomp $cur_branch; +die "Not on master" unless $cur_branch eq "master"; + +my $new_branch = "releases/$version"; + +if ($opt_force) { + system("git", "tag", "-d", $version); + system("git", "branch", "-D", $new_branch); +} + +system("git", "checkout", "-b", $new_branch) and die "Failed to create branch $new_branch from master. Does it already exist?"; + +open(my $fh, ">VERSION") or die; +print $fh "$version\n"; +close($fh); + +system("git", "add", "VERSION") and die; +system("git", "commit", "-m", "Add VERSION file on the $new_branch branch.") and die "Failed to commit"; +system("git", "tag", $version) and die "Failed to tag"; + +my $commit = do { open(my $f, ".git/refs/tags/$version") or die; local $/; <$f> }; +chomp $commit; + +system("git", "checkout", "master") and die; +open(my $fh, ">>misc/release-history-tags"); +print $fh "$commit\t$version\n"; +close($fh); + +print "Created branch $new_branch from master, cleaned it and wrote VERSION file, & tagged $version.\n"; +print "\n"; +print "Push with:\n"; +print "\$ git push github refs/tags/$version:refs/tags/$version\n"; +print "\$ git push github $new_branch:$new_branch\n"; diff --git a/vendor/github.com/camlistore/camlistore/dev/push b/vendor/github.com/camlistore/camlistore/dev/push new file mode 100755 index 00000000..45f00a32 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/push @@ -0,0 +1,7 @@ +#!/bin/bash + +set -e + +git push origin master +git push github master +curl http://camlistore.org/mailnow diff --git a/vendor/github.com/camlistore/camlistore/dev/update_closure_compiler.go b/vendor/github.com/camlistore/camlistore/dev/update_closure_compiler.go new file mode 100644 index 00000000..847a3230 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/dev/update_closure_compiler.go @@ -0,0 +1,139 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// update_closure_compiler downloads a new version +// of the closure compiler if the one in tmp/closure-compiler +// doesn't exist or is older than the requested version. +package main + +import ( + "archive/zip" + "io" + "log" + "net/http" + "os" + "os/exec" + "path/filepath" + "regexp" + + "camlistore.org/pkg/osutil" +) + +const ( + compilerDirURL = "http://closure-compiler.googlecode.com/files/" + compilerVersion = "20121212" +) + +var rgxVersion = regexp.MustCompile(`.*Version: (.*) \(revision.*`) + +func main() { + + // check JRE presence + _, err := exec.LookPath("java") + if err != nil { + log.Fatal("Didn't find 'java' in $PATH. The Java Runtime Environment is needed to run the closure compiler.\n") + } + + camliRootPath, err := osutil.GoPackagePath("camlistore.org") + if err != nil { + log.Fatal("Package camlistore.org not found in $GOPATH (or $GOPATH not defined).") + } + destDir := filepath.Join(camliRootPath, "tmp", "closure-compiler") + // check if compiler already exists + jarFile := filepath.Join(destDir, "compiler.jar") + _, err = os.Stat(jarFile) + if err == nil { + // if compiler exists, check version + cmd := exec.Command("java", "-jar", jarFile, "--version", "--help", "2>&1") + output, _ := cmd.CombinedOutput() + m := rgxVersion.FindStringSubmatch(string(output)) + if m == nil { + log.Fatalf("Could not find compiler version in %q", output) + } + if m[1] == compilerVersion { + log.Printf("compiler already at version %v , nothing to do.", compilerVersion) + os.Exit(0) + } + if err := os.Remove(jarFile); err != nil { + log.Fatalf("Could not remove %v: %v", jarFile, err) + } + } else { + if !os.IsNotExist(err) { + log.Fatalf("Could not stat %v: %v", jarFile, err) + } + } + + // otherwise, download compiler + log.Printf("Getting closure compiler version %s.\n", compilerVersion) + if err := os.MkdirAll(destDir, 0755); err != nil { + log.Fatal(err) + } + if err := os.Chdir(destDir); err != nil { + log.Fatal(err) + } + zipFilename := "compiler-" + compilerVersion + ".zip" + compilerURL := compilerDirURL + zipFilename + resp, err := http.Get(compilerURL) + if err != nil { + log.Fatal(err) + } + defer resp.Body.Close() + f, err := os.Create(zipFilename) + if err != nil { + log.Fatal(err) + } + if _, err := io.Copy(f, resp.Body); err != nil { + log.Fatal(err) + } + if err := f.Close(); err != nil { + log.Fatal(err) + } + + r, err := zip.OpenReader(zipFilename) + if err != nil { + log.Fatal(err) + } + for x, f := range r.File { + if f.FileHeader.Name != "compiler.jar" { + if x == len(r.File)-1 { + log.Fatal("compiler.jar was not found in the zip archive") + } + continue + } + rc, err := f.Open() + if err != nil { + log.Fatal(err) + } + g, err := os.Create(jarFile) + if err != nil { + log.Fatal(err) + } + defer g.Close() + if _, err = io.Copy(g, rc); err != nil { + log.Fatal(err) + } + rc.Close() + break + } + + if err := r.Close(); err != nil { + log.Fatal(err) + } + if err := os.Remove(zipFilename); err != nil { + log.Fatal(err) + } + log.Printf("Success. Installed at %v", jarFile) +} diff --git a/vendor/github.com/camlistore/camlistore/doc/app-environment.txt b/vendor/github.com/camlistore/camlistore/doc/app-environment.txt new file mode 100644 index 00000000..e5d5b97f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/app-environment.txt @@ -0,0 +1,31 @@ +Camlistore applications run with the following environment variables set: + +CAMLI_API_HOST (string): + URL prefix of the Camlistore server which the app should use to make API calls. + It always ends in a trailing slash. Examples: + https://foo.org:3178/pub/ + https://foo.org/pub/ + http://192.168.0.1/ + http://192.168.0.1:1234/ + +CAMLI_APP_BACKEND_URL (string): + URL of the application's process, always ending in a trailing slash. That path + represents the top-most path that requests will hit. The path usually matches + the path as visible in the outside world when camlistored is proxying an app, + but that is not guaranteed. Examples: + https://foo.org:3178/pub/ + https://foo.org/pub/ + http://192.168.0.1/ + http://192.168.0.1:1234/ + +CAMLI_APP_CONFIG_URL (string): + URL containing JSON configuration for the app. The app should once, upon + startup, fetch this URL (using CAMLI_AUTH) to retrieve its configuration data. + The response JSON is the contents of the app's "appConfig" part of the config + file. + +CAMLI_AUTH (string): + Username and password (username:password) that the app should use to + authenticate over HTTP basic auth with the Camlistore server. Basic auth is + unencrypted, hence it should only be used with HTTPS or in a secure (local + loopback) environment. diff --git a/vendor/github.com/camlistore/camlistore/doc/blog-notes.txt b/vendor/github.com/camlistore/camlistore/doc/blog-notes.txt new file mode 100644 index 00000000..b8c397e0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/blog-notes.txt @@ -0,0 +1,40 @@ +Thoughts on storing a blog in Camlistore and serving it from the +publish handler. + +* a blog is a permanode + +* a blog post is a permanode + +* the post's permanode is a member of the blog's permanode + +* views of the blog we'd like: + + 1) reverse chronological (typical blog view) + + - needs efficient reverse time index on membership. + + - membership is currently "add-attribute" claims on parent + permanode, implying that a large/old blog with thousands + of posts will involve resolving the attributes of + the blog's permanode all the time. we need to either make + that efficient (caching it as a function of last mutation + claim to that permanode?) or find a different model + for memberships. I'm inclined to say keep the model + and make it fast. + + 2) forward chronological by date posted. (year, month, day view) + + - denormalization question. the date of the blog post should + be an attribute of the post's permanode (defaulting to the + date of the first/last claim mutation on it), but for efficient + indexing we'll need to either mirror this into the blog + permanode's attributes, or have another attribute on the + blog post that we can prefix scan that includes as the prefix + the blog's permanode. the latter is probably ideal so + blog posts can be cross-posted to multiple blogs, and keeps + the number of attributes on the blog permanode lower. + + e.g. blog post can have (add-)attributes: + + "inparent" => "| + diff --git a/vendor/github.com/camlistore/camlistore/doc/environment-vars.txt b/vendor/github.com/camlistore/camlistore/doc/environment-vars.txt new file mode 100644 index 00000000..29db5255 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/environment-vars.txt @@ -0,0 +1,215 @@ +The standard library's strconv.ParseBool() is used to parse boolean environment +variables. It accepts 1, t, T, TRUE, true, True, 0, f, F, FALSE, false, +False. Any other value is an implicit false. + +For integer values, strconv.Atoi() is used which means only base 10 numbers are +valid. + +AWS_ACCESS_KEY_ID (string): +AWS_ACCESS_KEY_SECRET (string): + See http://docs.aws.amazon.com/fws/1.1/GettingStartedGuide/index.html?AWSCredentials.html + Used in s3 tests. If not set some tests are skip. If set, queries will be + sent to Amazon's S3 service. + +CAMLI_APP_BINDIR (string): + Path to the directory where Camlistore first looks for the server applications + executables, when starting them. It looks in PATH otherwise. + +CAMLI_AUTH (string): + See http://camlistore.org/docs/server-config + Used as a fallback in pkg/client.Client (except on android) when + configuration files lack and 'auth' entry. If a client is using the -server + commandline to specify the camlistore instance to talk to, this env var + takes precedence over that specified in the configuration files. + +CAMLI_BASEURL (string): + URL set in devcam to act as a baseURL in the devcam launched camlistored. + +CAMLI_CACHE_DIR (string): + Path used by pkg/osutil to override operating system specific cache + directory. + +CAMLI_CONFIG_DIR (string): + Path used by pkg/osutil to override operating system specific configuration + directory. + +CAMLI_DBNAME (string): + Backend specific data source name (DSN). + Set in devcam to pass database configuration for the indexer to the devcam + launched camlistored. + +CAMLI_DEBUG (bool): + Used by camlistored and camput to enable additional commandline options. + Used in pkg/schema to enable additional logging. + +CAMLI_DEBUG_CONFIG (bool): + Causes pkg/serverconfig to dump low-level configuration derived from + high-level configuation on load. + +CAMLI_DEBUG_X (string): + String containing magic substring(s) to enable debuggging in code. + +CAMLI_DEBUG_UPLOADS (bool): + Used by pkg/client to enable additional logging. + +CAMLI_DEFAULT_SERVER (string): + The server alias to use by default. The string is the server's alias key + in the client-config.json "servers" object. If set, the CAMLI_DEFAULT_SERVER + takes precedence over the "default" bool in client-config.json. + +CAMLI_DEV_CAMLI_ROOT (string): + If set, the base directory of Camlistore when in dev mode. + Used by pkg/server for finding static assests (js, css, html). + Used as a signal by pkg/index/* and pkg/server to output more helpful error + message when run under devcam. + +CAMLI_DEV_CLOSURE_DIR (string): + Path override for pkg/server. If specified, this path will be used to serve + the closure handler. + +CAMLI_DISABLE_IMPORTERS (bool): + If true, importers are disabled (at least automatic background + importing, e.g. at start-up). Mostly for debugging. + +CAMLI_FAST_DEV (bool): + Used by dev/demo.sh for giving presentations with devcam server/put/etc + for faster pre-built builds, without calling make.go. + +CAMLI_FORCE_OSARCH (bool): + Used by make.go to force building an unrecommended OS/ARCH pair. + +CAMLI_GCE_*: + Variables prefixed with CAMLI_GCE_ concern the Google Compute Engine deploy handler in + pkg/deploy/gce, which is only used by camweb to launch Camlistore on Google Compute + Engine. They do not affect Camlistore's behaviour. + +CAMLI_GCE_CLIENTID (string): + See CAMLI_GCE_* first. This string is used by gce.DeployHandler as the application's + OAuth Client ID. If blank, camweb does not enable the Google Compute Engine launcher. + +CAMLI_GCE_CLIENTSECRET (string): + See CAMLI_GCE_* first. Used by gce.DeployHandler as the application's OAuth Client + Secret. If blank, gce.NewDeployHandler returns an error, and camweb fails to start if + the Google Compute Engine launcher was enabled. + +CAMLI_GCE_DATA (string): + See CAMLI_GCE_* first. Path to the directory where gce.DeployHandler stores the + instances configuration and state. If blank, the "camli-gce-data" default is used + instead. + +CAMLI_GCE_PROJECT (string): + See CAMLI_GCE_* first. ID of the Google Project that provides the above client ID and + secret. It is used when we query for the list of all the existing zones, since such a + query requires a project ID. If blank, a hard-coded list of zones is used instead. + +CAMLI_GCE_SERVICE_ACCOUNT (string): + See CAMLI_GCE_* first. Path to a Google service account JSON file. This account should + have at least compute.readonly permissions on the Google Project wih ID CAMLI_GCE_PROJECT. + It is used to authenticate when querying for the list of all the existing zones. If blank, + a hard-coded list of zones is used instead. + +CAMLI_GCE_XSRFKEY (string): + See CAMLI_GCE_* first. Used by gce.DeployHandler as the XSRF protection key. If blank, + gce.NewDeployHandler generates a new random key instead. + +CAMLI_HTTP_DEBUG (bool): + Enable per-request logging in pkg/webserver. + +CAMLI_HTTP_EXPVAR (bool): + Enable json export of expvars at /debug/vars + +CAMLI_HTTP_PPROF (bool): + Enable standard library's pprof handler at /debug/pprof/ + +CAMLI_IGNORED_FILES (string): + Override client configuration option 'ignoredFiles'. + Comma-seperated list of files to be ignored by pkg/client when uploading. + +CAMLI_INCLUDE_PATH (string): + Path to search for files. + Referenced in pkg/osutil and used indirectly by pkg/jsonconfig.ConfigParser + to search for files mentioned in configurations. This is used as a last + resort after first checking the current directory and the camlistore config + directory. It should be in the OS path form, i.e. unix-like systems would be + /path/1:/path/two:/some/other/path, and Windows would be C:\path\one;D:\path\2 + +CAMLI_KEYID (string): + Optional GPG identity to use, taking precedence over config files. + Used by devcam commands, in config/dev-server-config.json, and + config/dev-client-dir/client-config.json as the public ID of the GPG + key to use for signing. + +CAMLI_KV_VERIFY (bool): + Enable all the VerifyDb* options in cznic/kv, to e.g. track down + corruptions. + +CAMLI_KVINDEX_ENABLED (bool): + Use cznic/kv as the indexer. Variable used only by devcam server. + +CAMLI_LEVELDB_ENABLED (bool): + Use syndtr/goleveldb as the indexer. Variable used only by devcam server. + +CAMLI_MEMINDEX_ENABLED (bool): + Use a memory-only indexer. Supported only by devcam server. + +CAMLI_MONGO_WIPE (bool): + Wipe out mongo based index on startup. + +CAMLI_MAKE_USEGOPATH (bool): + When running make.go, overrides the -use_gopath flag. + +CAMLI_NO_FILE_DUP_SEARCH (bool): + This will cause the search-for-exists-before-upload step to be skipped when + camput is uploading files. + +CAMLI_PPROF_START (string): + Filename base to write a ".cpu" and ".mem" profile out + to during server start-up. Used to profile index corpus scanning, + mostly. + +CAMLI_QUIET (bool): + Used by devcam to enable -verbose flag for camput/camget. + +CAMLI_SECRET_RING (string): + Path to the GPG secret keyring, which is otherwise set by identitySecretRing + in the server config, and secretRing in the client config. + +CAMLI_DISABLE_CLIENT_CONFIG_FILE (bool): + If set, the pkg/client code will never use the on-disk config file. + +CAMLI_TRACK_FS_STATS (bool): + Enable operation counts for fuse filesystem. + +CAMLI_TRUSTED_CERT (string): + Override client configuration option 'trustedCerts'. + Comma-seperated list of paths to trusted certificate fingerprints. + +CAMPUT_ANDROID_OUTPUT (bool): + Enable pkg/client status messages to print to stdout. Used in android client. + +CAMLI_DEBUG_IMAGES (bool): + Enable extra debugging in pkg/images when decoding images. Used by indexers. + +CAMLI_DISABLE_DJPEG (bool): + Disable use of djpeg(1) to down-sample JPEG images by a factor of 2, 4 or 8. + Only has an effect when djpeg is found in the PATH. + +CAMLI_DISABLE_THUMB_CACHE (bool): + If true, no thumbnail caching is done, and URLs even have cache + buster components, to force browsers to reload a lot. + +CAMLI_VAR_DIR (string): + Path used by pkg/osutil to override operating system specific application + storage directory. Generally unused. + +CAMLI_S3_FAIL_PERCENT (int): + Number from 0-100 of what percentage of the time to fail receiving blobs + for the S3 handler. + +DEV_THROTTLE_KBPS (integer): +DEV_THROTTLE_LATENCY_MS (integer): + Rate limit and/or inject latency in pkg/webserver responses. A value of 0 + disables traffic-shaping. + +RUN_BROKEN_TESTS (bool): + Run known-broken tests. diff --git a/vendor/github.com/camlistore/camlistore/doc/example-blobs/README.txt b/vendor/github.com/camlistore/camlistore/doc/example-blobs/README.txt new file mode 100644 index 00000000..2d8d1e18 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/example-blobs/README.txt @@ -0,0 +1,2 @@ +A random collection of collection of blobs for reference in mailing +lists and other docs. diff --git a/vendor/github.com/camlistore/camlistore/doc/example-blobs/sha1-648357ea2ee9dd7031d0ff786840e6deac8b7a6a.dat b/vendor/github.com/camlistore/camlistore/doc/example-blobs/sha1-648357ea2ee9dd7031d0ff786840e6deac8b7a6a.dat new file mode 100644 index 00000000..6526ba82 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/example-blobs/sha1-648357ea2ee9dd7031d0ff786840e6deac8b7a6a.dat @@ -0,0 +1,5 @@ +{"camliVersion": 1, + "camliSigner": "sha1-c4da9d771661563a27704b91b67989e7ea1e50b8", + "camliType": "permanode", + "random": "}6{T2o+u<[!aE.&babHX" +,"camliSig":"iQEcBAABAgAGBQJNScvDAAoJEGjzeDN/6vt8h+sIAILavU5sLhdtb5zsdLQJE5uVzWVNyImtUS+xlH8qi+LFefGVqlMpuij0mzuNBybRKKYkJeYXD/bQ4hZb/6XgzQWHhcXpDqLJrLRVK6jjflaOv23bDZm8J/1Q0pMe291qfON+iX2KD7F1f6yCY60uUMTwaF/0MfJxITH9sPdD0AhNxJUNPSkviUnPa9YLr8S3NmARsReV1EorvYQmZI2y2cJdfWgQ4LAghJhWIE1LwexHcOuiDLg2QLZiGx6sqhBCKpIfAzigSXs/ehIBNC9uSjkiWa4aRIWk6MGxkg1z4yzERIjJ38+1vJh1866DMeo5fxwH1K2o53gvBm+xq1UJXcg==iqHS"} diff --git a/vendor/github.com/camlistore/camlistore/doc/example-blobs/sha1-648357ea2ee9dd7031d0ff786840e6deac8b7a6a.txt b/vendor/github.com/camlistore/camlistore/doc/example-blobs/sha1-648357ea2ee9dd7031d0ff786840e6deac8b7a6a.txt new file mode 100644 index 00000000..163ff545 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/example-blobs/sha1-648357ea2ee9dd7031d0ff786840e6deac8b7a6a.txt @@ -0,0 +1 @@ +A permanode, signed by sha1-c4da9d771661563a27704b91b67989e7ea1e50b8. diff --git a/vendor/github.com/camlistore/camlistore/doc/example-blobs/sha1-c4da9d771661563a27704b91b67989e7ea1e50b8.dat b/vendor/github.com/camlistore/camlistore/doc/example-blobs/sha1-c4da9d771661563a27704b91b67989e7ea1e50b8.dat new file mode 100644 index 00000000..de8bc48d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/example-blobs/sha1-c4da9d771661563a27704b91b67989e7ea1e50b8.dat @@ -0,0 +1,30 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.4.10 (GNU/Linux) + +mQENBE0zz0ABCACxjNdfvUMA1iHFKIMaHQadNrpQVlH+/+3bgJO7dPeKv3eg/TC1 +OdmN3PdZg0u0KxfAhs+Q7SFPD53YWFvd333WJwiGpDdJ1QA538BQnxHGGGxYGCIQ +Rr1rgB4wm/tWh326/uXWpA7RAty2Mm64UkgqULuZVrMxNjXm3e+q9QNNnBp1dWpo +k1/j8y9Z+a1zoxFK5zw3i15hV8zmpMsoLLRBBkvT2qYasThGUM65tDkkoCCW+Qom +NoRiAx4bDEI1y5pP+CUmJTr9Zo+N20IXC5tDTRxRLOYprxxDiw3Mg0ML6PSMitcM +u5LMztm+hJ2FCg/n0ZkKXXFTEGr6lflaIHUxABEBAAG0IUJyYWQgRml0enBhdHJp +Y2sgPGJyYWRAZGFuZ2EuY29tPokBOAQTAQIAIgUCTTPPQAIbAwYLCQgHAwIGFQgC +CQoLBBYCAwECHgECF4AACgkQaPN4M3/q+3w6hgf/bHBFllZgaicklI6iaGedDh44 +rvwSzpOQ3v2VyfqG6CrdgGa6JpqMTntvLygxaFQBkLwQYDX8jgxqkm7fOGYXqoK1 +UPAkORNAt8gyqYA/Uo3jYI8VvJ5NK/BvYgiGCFwceuXLus24+PuKnp2PMpEn7fe1 +Yz57vOO3khsvA+nPOoWYhK++Xv/TG5qVStV8eON0WXDbyoUcVFyUEFCjE0ipCUBe +rEFxotgtiJ+OscY4XtbXnOa5ztqW0xEAnSSlomauJWmLZn5uaJceaObB1tf+r8af +WkjSU+YLjqZ/HdUmg/6Kv/Xhk3fn5v2tDLjkbmwBhhT46g/zrvWVk1rnMPZGaLkB +DQRNM89AAQgA1U4HDH2s/Fy2f7WSB6pqQVHcGqVLjj3KtnDV2zBxa5ps6NiMaZav +LdPcj1DcaytvmGMN+VeWy4nR2hYMNej645ZIVuuPKnLFe5ir9gKEYcM6nE+BSGUp +bkIhLsitXmcc2fXJfP7aZOl61EsQJyihrNb7yWT8ViX+txC8/59ftdBB+eB8G+Dh +I0KVRfZlMgHA1Et8o9FsBULrw2zFuqdRYBQsidnaOnG61UN1/R1iij7vAQEGxJaH +jghL6OYthi6dFoYlKuBc8439UonKsiV80wmH6fZdJxM//WfpNY5N3c2fvRmR/PNR +UXxOZ8ySArBphimH5jMlUJ5bJAQYLj4KcQARAQABiQEfBBgBAgAJBQJNM89AAhsM +AAoJEGjzeDN/6vt8hX8H/1Veh1wFJW2qo4hkTOD9JTLirHiDGK11L+Xy/PR8mu3p +dgdFv1kSUe4i3Qqrf606kF8TkjFVbWd8zvmANgLfwAPJD5xWJMNWT2dvpKxzSvjj +DTyeRM0wcUMCvgiWrQdbyoGbh0UZu7QAt7OihHlUbFH9EU3IpimayzRTDYLhLyWI +0bgFaH6BdbmpL4flAs31AI6WHfBZmAO34hAEdMxBDEPIQpSqRZn3GmgAknIw5bUk +emjIVDVO1402eZTLRGDi/1Aw9ey/GJUPeSxm8JqHRRLhpuTrMEtGe7xv2oDskhaG +u+wLedM+EXyuni2oGRcAQPBBNFyOoABzBUhW+bK13oo= +=361b +-----END PGP PUBLIC KEY BLOCK----- diff --git a/vendor/github.com/camlistore/camlistore/doc/example-blobs/sha1-c4da9d771661563a27704b91b67989e7ea1e50b8.txt b/vendor/github.com/camlistore/camlistore/doc/example-blobs/sha1-c4da9d771661563a27704b91b67989e7ea1e50b8.txt new file mode 100644 index 00000000..185b70c9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/example-blobs/sha1-c4da9d771661563a27704b91b67989e7ea1e50b8.txt @@ -0,0 +1,2 @@ +Brad's public key, used to sign the permanode in +sha1-648357ea2ee9dd7031d0ff786840e6deac8b7a6a.dat diff --git a/vendor/github.com/camlistore/camlistore/doc/json-signing/example/public-key.txt b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/public-key.txt new file mode 100644 index 00000000..813ebf8f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/public-key.txt @@ -0,0 +1,30 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.4.11 (GNU/Linux) + +mQENBEzgoVsBCAC/56aEJ9BNIGV9FVP+WzenTAkg12k86YqlwJVAB/VwdMlyXxvi +bCT1RVRfnYxscs14LLfcMWF3zMucw16mLlJCBSLvbZ0jn4h+/8vK5WuAdjw2YzLs +WtBcjWn3lV6tb4RJz5gtD/o1w8VWxwAnAVIWZntKAWmkcChCRgdUeWso76+plxE5 +aRYBJqdT1mctGqNEISd/WYPMgwnWXQsVi3x4z1dYu2tD9uO1dkAff12z1kyZQIBQ +rexKYRRRh9IKAayD4kgS0wdlULjBU98aeEaMz1ckuB46DX3lAYqmmTEL/Rl9cOI0 +Enpn/oOOfYFa5h0AFndZd1blMvruXfdAobjVABEBAAG0JUNhbWxpIFRlc3RlciA8 +Y2FtbGktdGVzdEBleGFtcGxlLmNvbT6JATgEEwECACIFAkzgoVsCGwMGCwkIBwMC +BhUIAgkKCwQWAgMBAh4BAheAAAoJECkxpnwm9avaHE0IAJ/pMZgiURl3kefrFMAV +7ei0XDfTekZOwDRcZWTVQ/A97phpzO8t78qLYbFeHuq3myNhrlVO9Gyp+2V904rN +dudoHLhpegf5TNeHGmAGHBxcooMPMp0JyIDnUBxtCNGxgWfbKpEDRsQAjkCc7sR0 +H+OegzlEf6JZGzEhV5ohOioTsC1DmJNoQsRz5Kes7sLoAzpQCbCv4yv+1o+mnzgW +9qPJXKxcScc0t2YTvcvpJ7LV8no1OP6vpYqB1A9Pzze6XFBlcXOUKbRKk0fEIV/u +pU3ph1fF7wlyRgA4A3iPwDC4BgVmHYkz9nYPn+7IcT/dDig5SWU+n7WZgGeyv75y +0Ue5AQ0ETOChWwEIALuHxKI+oSH+eeMSXhxcSUXnhp4cUeyvOV7oNPYcmsDclF0Y +7y8NrSPiEZod9vSTEDMq7hd3BG+feCBqjgR4qtmoXguJhWcnJqDBk5iAMuuAph9O +CC8QLACMJPhoxQ0UtDPKlpG4X8kLK1woHd716ulPl2KLjTgd6K4kCGj+CV5Ekn6u +IJj+3IPbYDOwk1l06ksimwQAY4dA1CXOTviH1bVqR6CzuzVPg4hcryWDva1rEO5c +LcOR8Wk/thANFLSNjqX8UgtGXhFZRWxKetFDQiX5f2BKoqTVYvD3pqt+zzyLNFAz +xhMc3cyFfqM8yQdzdEey/DIWtMoDqZCSVMJ63N8AEQEAAYkBHwQYAQIACQUCTOCh +WwIbDAAKCRApMaZ8JvWr2mHACACkco+fAfRK+gmprF2m8E0Bp1frwFH0g4RJVHXQ +BUDbg7OZbWumzD4Br28si6XDVMP6fLOeyD0EHYb6LhAHDkBLqx6e3kKG1mQ8fMIV +O4YMQfskYH2FJqlCtgMnM8N3oslPBTpZedNPSUq7HJh2pKr9GIDi1V+Hgc/qEigE +dj9f2zSSaKZdC4eL73GvlQOh+4XqgaMnMiKfI+/2WlRaJs1KOgKmIp5yHt0qY0ef +y+40BY/z9pMjyUvr/Wwp8KXArw0NAwzp8NUl5fNxRg9XWQWLn6hW8ydR20X3t2ym +iNSWzNQiTT6k7fumOABCoSZsow/AJxQSxqKOJBjgpKjIKCgY +=ru0J +-----END PGP PUBLIC KEY BLOCK----- diff --git a/vendor/github.com/camlistore/camlistore/doc/json-signing/example/signing-after.camli b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/signing-after.camli new file mode 100644 index 00000000..49244c28 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/signing-after.camli @@ -0,0 +1,4 @@ +{"camliVersion": 1, + "camliSigner": "sha1-8616ebc5143efe038528c2ab8fa6582353805a7a", + "foo": "bar" +,"camliSig":"iQEcBAABAgAGBQJO3/DNAAoJECkxpnwm9avaf6EH/3HVJC+6ybOJDTJIInQBum9YFzC1I8b6xNLN0yFdDtypZUotvW9pvU2pVpbfNSmcW/OL02eR2kgL55dHxbUjbN9CvXlvSb2QAy8IQMdA3721pMR41rNNn08w5bbAWgW/suiyN5z0pIKn3vPEHbguGeNQBStgOSq1WkgCozNBxPA7V5mcUx2rUOsWHYSmEY8foPdeDYcrw2pvxPN8kXk6zBrZilrtaY+Yx5zPLkq8trhHPgCdf4chL+Y2kmxXMKYjU+bkmJaNycUURdncZakTEv9YfbBp04kbHIaN6DttEoXuU96nTyuCFhIftmV+GPbvGpl3e2yhmae5hUUt1g0o8FE==aSCK"} diff --git a/vendor/github.com/camlistore/camlistore/doc/json-signing/example/signing-before-J.camli b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/signing-before-J.camli new file mode 100644 index 00000000..0da13522 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/signing-before-J.camli @@ -0,0 +1,4 @@ +{"camliVersion": 1, + "camliSigner": "sha1-8616ebc5143efe038528c2ab8fa6582353805a7a", + "foo": "bar" +} diff --git a/vendor/github.com/camlistore/camlistore/doc/json-signing/example/signing-before.camli b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/signing-before.camli new file mode 100644 index 00000000..426b2e94 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/signing-before.camli @@ -0,0 +1,3 @@ +{"camliVersion": 1, + "camliSigner": "sha1-8616ebc5143efe038528c2ab8fa6582353805a7a", + "foo": "bar" diff --git a/vendor/github.com/camlistore/camlistore/doc/json-signing/example/signing-before.camli.detachsig b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/signing-before.camli.detachsig new file mode 100644 index 00000000..125626e7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/signing-before.camli.detachsig @@ -0,0 +1,11 @@ +-----BEGIN PGP SIGNATURE----- +Version: GnuPG v1.4.11 (GNU/Linux) + +iQEcBAABAgAGBQJO3/DNAAoJECkxpnwm9avaf6EH/3HVJC+6ybOJDTJIInQBum9Y +FzC1I8b6xNLN0yFdDtypZUotvW9pvU2pVpbfNSmcW/OL02eR2kgL55dHxbUjbN9C +vXlvSb2QAy8IQMdA3721pMR41rNNn08w5bbAWgW/suiyN5z0pIKn3vPEHbguGeNQ +BStgOSq1WkgCozNBxPA7V5mcUx2rUOsWHYSmEY8foPdeDYcrw2pvxPN8kXk6zBrZ +ilrtaY+Yx5zPLkq8trhHPgCdf4chL+Y2kmxXMKYjU+bkmJaNycUURdncZakTEv9Y +fbBp04kbHIaN6DttEoXuU96nTyuCFhIftmV+GPbvGpl3e2yhmae5hUUt1g0o8FE= +=aSCK +-----END PGP SIGNATURE----- diff --git a/vendor/github.com/camlistore/camlistore/doc/json-signing/example/some-notes.txt b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/some-notes.txt new file mode 100644 index 00000000..098acb4b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/some-notes.txt @@ -0,0 +1,8 @@ +This is the example notes file. + +TODO: +[X] find good unique name for this project +[ ] write docs about it +[ ] implement + + diff --git a/vendor/github.com/camlistore/camlistore/doc/json-signing/example/some-notes.txt.camli b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/some-notes.txt.camli new file mode 100644 index 00000000..a275c8da --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/some-notes.txt.camli @@ -0,0 +1,9 @@ +{"camliVersion":"1", + "camliType": "file:1", + "name": "some-notes.txt", + "contents": "sha1-8ba9e53cbc83c1be3835b94a3690c3b03de0b522", + "size": 122, + "modtime": "2010-06-10T18:02Z", + "ctime": "2008-04-12T04:12:17.194Z", + "permissions": "0644" +} diff --git a/vendor/github.com/camlistore/camlistore/doc/json-signing/example/test-keyring.gpg b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/test-keyring.gpg new file mode 100644 index 00000000..3d20ba68 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/test-keyring.gpg differ diff --git a/vendor/github.com/camlistore/camlistore/doc/json-signing/example/test-secring.gpg b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/test-secring.gpg new file mode 100644 index 00000000..bca3ad03 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/doc/json-signing/example/test-secring.gpg differ diff --git a/vendor/github.com/camlistore/camlistore/doc/json-signing/json-signing.txt b/vendor/github.com/camlistore/camlistore/doc/json-signing/json-signing.txt new file mode 100644 index 00000000..88b1bb4f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/json-signing/json-signing.txt @@ -0,0 +1,166 @@ +JSON claim objects need to be signed. If I want to distribute a Camli +blob object publicly, declaring that I "favorite" or "star" a named +entity, it should be verifiable. + +The properties we want in the JSON file, ideally, include: + +GOAL #1) it's still a valid JSON file in its entirety. + +This means no non-JSON compliant header or footer. + +This implies that the data structure to be signed and the signature +metadata be separate, in an outer JSON wrapper. + +This has been discussed and implemented in various ways. For example, +in jchris's canonical-json project, + + http://github.com/jchris/canonical-json + +... the "signed-content" and the "signature" are parallel objects under the +same outer JSON object. + +The problem then becomes that the verifier, after parsing the JSON +blob, needs to re-serialize the JSON "signed-content" object, +byte-for-byte as in the original, in order to verify the signature. + +In jchris' strategy, the canonicalization is implemented by +referencing JavaScript code that serializes it. This has the +advantage that the serialization could change over time, but the +disadvantage that you have to embed a Rhino, V8, SpiderMonkey, or +similar into your parser, which is somewhat heavy. Considering that +canonical JSON serialization is something that should be relatively +static and could be defined once, I'm not sure that the flexibility is +worth the cost. + +Overall, though, the jchris approach's structure of the JSON file is +good. + +Notably, it satisfies on of my other goals: + +GOAL #2) The document still be human-readable. + +For instance, the laptop.org project is proposing this Canonical JSON +format: + + http://wiki.laptop.org/go/Canonical_JSON + +.. unfortunately, all whitespace is stripped. It's not a deal +breaker, but lacks human readableness. + +You might say, "Bring your own serialization! Wrap the signed-content +in a string!" + +But then you're back to the readable problem, because JSON strings +can't have embedded newline literals. + +Further, the laptop.org proposal requires the use of a new JSON +serialization library and parser for each language which wants to +produce camli documents. This isn't a huge deal, but considering that +JSON libraries already exist and people are oddly passionate about +their favorites and inertia's not to be ignored, I state the next +goal: + +GOAL #3) Don't require a new JSON library for parsing/serialization. + +With the above goals in mind, Camli uses the following scheme to sign +and verify JSON documents: + +SIGNING +======= + +-- Start with a JSON object (not an array) to be encoded and signed. + We'll call this data structure 'O'. While this signing technique + could be used for applications other than Camlistore, this document + is specifically about Camlistore, which requires that the JSON + object 'O' contain the following two key/value pairs: + "camliVersion": "1" + "camliSigner": "hashalg-xxxxxxxxxxx" (blobref of ASCII-armored public key) + +-- To find your camliSigner value, you could use GPG like: + + $ gpg --no-default-keyring --keyring=example/test-keyring.gpg --secret-keyring=example/test-secring.gpg \ + --export --armor 26F5ABDA > example/public-key.txt + + $ sha1sum example/public-key.txt + 8616ebc5143efe038528c2ab8fa6582353805a7a + + ... so the blobref value for camliSigner is "sha1-8616ebc5143efe038528c2ab8fa6582353805a7a". + Clients will use this value in the future to find the public key to verify + signtures. + +-- Serialize in-memory JSON object 'O' with whatever JSON + serialization library you have available. internal or trailing + whitespace doesn't matter. We'll call the JSON serialization of + 'O' (defined in earlier step) 'J' + (e.g. doc/example/signing-before-J.camli) + +-- Now remove any trailing whitespace and exactly and only one '}' + character from the end of string 'J'. We'll call this truncated, + trimmed string 'T'. + (e.g. doc/example/signing-before.camli) + +-- Create an ASCII-armored detached signature of this document, + e.g.: + + gpg --detach-sign --local-user=54F8A914 --armor \ + -o signing-before.camli.detachsig signing-before.camli + + (The output file is in doc/example/signing-before.camli.detachsig) + +-- Take just the base64 part of that ASCII detached signature + into a single line, and call that 'S'. + +-- Append the following to 'T' above: + + ,"camliSig":""}\n + + ... where is the single-line ASCII base64 detached signature. + Note that there are exactly 13 bytes before and exactly + 3 bytes after . Those must match exactly. + +-- The resulting string is 'C', the camli-signed JSON document. + + (The output file is in doc/example/signing-after.camli) + +In review: + +O == the object to be signed +J == any valid JSON serialization of O +T == J, with 0+ trailing whitespace removed, and then 1 '}' character + removed +S == ascii-armored detached signature of T +C == CONCAT(T, ',"camliSig":"', S, '"}', '\n') + +(strictly, the trailing newline and the exact JSON serialziation of +the camlisig element doesn't matter, but it'd be advised to follow +this recommendation for compatibility with other verification code) + +VERIFYING +========= + +-- start with a byte array representing the JSON to be verified. + call this 'BA' ("bytes all") + +-- given the byte array, find the last index in 'BA' of the 13 byte + substring: + ,"camliSig":" + + Let's call the bytes before that 'BP' ("bytes payload") and the bytes + starting at that substring 'BS' ("bytes signature") + +-- define 'BPJ' ("bytes payload JSON") as 'BP' + the single byte '}'. + +-- parse 'BPJ', verifying that it's valid JSON object (dictionary). + verify that the object has a 'camliSigner' key with a string key + that's a valid blobref (e.g. "sha1-xxxxxxx") note the camliSigner. + +-- replace the first byte of 'BS' (the ',') with an open brace ('{') + and parse it as JSON. verify that it's a valid JSON object with + exactly one key: "camliSig" + +-- using 'camliSigner', a camli blobref, find the blob (cached, via + camli/web lookup, etc) that represents a GPG public key. + +-- use GnuPG or equivalent libraries to verify that the ASCII-armored + GPG signature in "camliSig" signs the bytes in 'BP' using the + GPG public key found via the 'camliSigner' blobref diff --git a/vendor/github.com/camlistore/camlistore/doc/overview.txt b/vendor/github.com/camlistore/camlistore/doc/overview.txt new file mode 100644 index 00000000..0b6915a1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/overview.txt @@ -0,0 +1,161 @@ +============================================================================ +Camlistore: Content-Addressable Multi-Layer, Indexed Store +============================================================================ + +This file contains old design notes. They're correct in spirit, but shouldn't +be considered authorative. + +See http://camlistore.org/docs/ + + +-=-=-=-=-=-=-=-=-=-=-=-=-=- +Design goals: +-=-=-=-=-=-=-=-=-=-=-=-=-=- + +* Content storage & indexing & backup system +* No master node +* Anything can sync any which way, in any directed graph (cycles or not) + (phone -> personal server <-> home machine <-> amazon <-> google, etc) +* No sync state or races on arguments of latest versions +* Future-proof +* Very obvious/intuitive schema (easy to recover in the future, even + if all docs/notes about Camlistore are lost, or the recoverer in + five decades after I die doesn't even know that Camlistore was being + used....) should be easy for future digital archaeologists to grok. + +-=-=-=-=-=-=-=-=-=-=-=-=-=- +Design assumptions: +-=-=-=-=-=-=-=-=-=-=-=-=-=- + +* disk is cheap and getting cheaper +* bandwidth is high and getting faster +* plentiful CPU & compression will fix size & redundancy of metadata + +-=-=-=-=-=-=-=-=-=-=-=-=-=- +Layer 1: +-=-=-=-=-=-=-=-=-=-=-=-=-=- + +* content-addressable blobs only + - no notion of "files", filenames, dates, streams, encryption, + permissions, metadata. +* immutable +* only operations: + - store(digest, bytes) + - check(digest) => bool (have it or not) + - get(digest) => bytes + - list([start_digest]) => [(digest[, size]), ...]+ +* amenable to implementation on ordinary filesystems (e.g. ext3, vfat, + ntfs) or on Amazon S3, BigTable, AppEngine Datastore, Azure, Hadoop + HDFS, etc. + +-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=- +Schema of files/objects in Layer 1: +-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=- + +* Let's start by describing the storage of files that aren't self-describing, + e.g. "some-notes.txt" (as opposed to a jpg file from a camera that might + likely contain EXIF data, addressed later...). This file, for reference, + is in doc/json-signing/example/some-notes.txt + +* The bytes of file "some-notes.txt" are stored as-is in one blob, + addressed as "sha1-8ba9e53cbc83c1be3835b94a3690c3b03de0b522". + (note the explicit naming of the hash function as part of the name, + for upgradability later, and so all parties involved know how to + verify it...) + +* The filename, stat(2) metadata (modtime, ctime, permissions, etc) now + also need to be stored. The key design point here is that file + metdata is ALSO just a blob, content-addressed. The blob is a JSON + file (for human readability, compactness). XML and Protocol Buffers + were both also considered, but the former is too redundant, bloaty, + tree-ish (overkill) and out of vogue, while Protocol Buffers don't + stand up to the human readable future digital archaeologist test, + and they're also not self-describing with the proto schema declared + in-line. + + This file would thus be represented by a JSON file, as seen in + docs/json-signing/example/some-notes.txt.camli, and addressed as + "sha1-7e7960756b39cd7da614e7edbcf1fa7d696eb660", its sha1sum. This identifier + can be used in directory listings, etc. Note that camli files do not have any + magical filename, as they're not typically stored with their filename. (they + are in the doc/json-signing/examples/ directory just to separate them out, but + that's a rare case.) Instead, a camli JSON object is known as such if the + bytes of the file begin exactly with the bytes: + + {"camliVersion" + + ... which lets upper layers know what it is, and how to index it. + + See the doc/schema/ directory for details on Camli JSON objects and their + schema. + +* Note that camli files can represent: + + -- files + -- directories + -- trees/snapshots (git-style) + -- tags on other objects + -- stars/ratings on other objects + -- deletion claims/requests (since everything is immutable, you can + only request a deletion, and wait/hope for GC later...) + -- signed statements/claims on other objects + (think decentralized commenting/starring on the web, + verifying claims with webfinger lookups to find + public keys to verify signatures) + -- references to encrypted/split files + -- etc... (extensible over time) + +-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=- +Syncing +-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=- + +-- nodes can push/pull between storage layers without thought. No + chance of overwriting stuff. + +-- the assumption is that users control and trust and secure all their + storage nodes: e.g. your phone, your home server, your internet + server, your Amazon S3 node, your App Engine appid / datastore + instance, etc. + +-- users configure which nodes push/pull to which other nodes, forming + their own sync topology. For instance, your phone may not need a + full copy of all content you've ever saved/produced... its primary + goal in life is probably to quickly push out any unique content it + produces (e.g. photos) to another machine for backup. And maybe + cache other recently-accessed content locally, but not worry about + it being destroyed when you drop and break your phone. + +-- no encryption is assumed at the Camli storage layer, though you may + run a Camli storage node on an encrypted filesystem or blockdevice. + +-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=- +Indexing Layer +-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=- + +* scans/mapreduces over all blobs, provides higher-level APIs to list + objects, list directories, see snapshots of trees at points in time, + traverse graphs of objects (reverse indexing e.g. tags/stars/claims + object<->object) + +* ... TODO: document + +-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=- +Mid layer +-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=- + +* It'll often be the case that a client (e.g. your phone) knows about + a file (e.g. a photo) and has its metadata, but doesn't have its raw + JPEG blob bytes, which might be several MB, and slow to transfer + over a wireless connection. Camli storage nodes may also declare + their support for helper APIs for when the client knows/assumes the + type of a given blob. + + In addition to the operations in layer 1 above, you could also assume + most Camli storage nodes would support any API such as: + + getThumbnail(blobName, [ ... sizeParams .. ]) -> JPEG thumbnail + + .. which would make mobile content browsers lives easier. + + +TODO: finish documenting diff --git a/vendor/github.com/camlistore/camlistore/doc/protocol/blob-enumerate-protocol.txt b/vendor/github.com/camlistore/camlistore/doc/protocol/blob-enumerate-protocol.txt new file mode 100644 index 00000000..3ed84723 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/protocol/blob-enumerate-protocol.txt @@ -0,0 +1,74 @@ +The /camli/enumerate-blobs endpoint enumerates all blobs that the +server knows about. + +They're returned in sorted order, sorted by (digest_type, +digest_value). That is, md5-acbd18db4cc2f85cedef654fccc4a4d8 sorts +before sha1-0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 because "m" sorts +before "s", even though "0" sorts before "a". + +GET /camli/enumerate-blobs?after=&limit= HTTP/1.1 +Host: example.com + +URL GET parameters: + + after optional If provided, only blobs GREATER THAN this + value are returned. + + Can't be used in combination with 'maxwaitsec' + + limit optional Limit the number of returned blobrefs. The + server may have its own lower limit, however, + so be sure to pay attention to the presence + of a "continueAfter" key in the JSON response. + + maxwaitsec optional The client may send this, an integer max + number of seconds the client is willing to + wait for the arrival of blobs. If the + server supports long-polling (an optional + feature), then the server will return + immediately if any blobs or available, else + it will wait for this number of seconds. + It is an error to send this option with a non- + zero value along with the 'after' option. + The server's reply must include + "canLongPoll" set to true if the server + supports this feature. Even if the server + supports long polling, the server may cap + 'maxwaitsec' and wait for less time than + requested by the client. + + Can't be used in combination with 'after'. + + +Response: + +HTTP/1.1 200 OK +Content-Type: text/javascript + +{ + "blobs": [ + {"blobRef": "md5-acbd18db4cc2f85cedef654fccc4a4d8", + "size": 3}, + {"blobRef": "sha1-0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33", + "size": 3}, + ], + "continueAfter": "sha1-0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33", + "canLongPoll": true, +} + +Response keys: + + blobs required Array of {"blobRef": BLOBREF, "size": INT_bytes} + will be an empty list if no blobs are present. + + continueAfter optional If present, the result is truncated and there are + are (likely) more blobs after the provided blobref, + which should be passed to the next request's + "after" request parameter. It's possible but rare + that the final page of actual results has + continueAfter set, but the subsequent page is + empty. (if numBlobs % limit == 0) + + canLongPoll optional Set to true (type boolean) if the server supports + long polling. If not true, the server ignores + the client's "maxwaitsec" parameter. diff --git a/vendor/github.com/camlistore/camlistore/doc/protocol/blob-get-protocol.txt b/vendor/github.com/camlistore/camlistore/doc/protocol/blob-get-protocol.txt new file mode 100644 index 00000000..c75b01f9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/protocol/blob-get-protocol.txt @@ -0,0 +1,46 @@ +The /camli/ endpoint returns a blob the server knows about. + +A request with the GET verb will return 200 and the blob contents if +present, 404 if not. A request with the HEAD verb will return 200 and +the blob meta data (i.e., content-length), or 404 if the blob is not +present. + +The response must include an explicit Content-Length, even with HTTP/1.1. +(The one piece of metadata a blobserver keeps on a blob is its length, + which is used in both enumerate-blobs bodies and responses to blob GETs.) + +Get the blob: + +GET /camli/sha1-126249fd8c18cbb5312a5705746a2af87fba9538 HTTP/1.1 +Host: example.com + +Response: + +HTTP/1.1 200 OK +Content-Type: application/octet-stream +Content-Length: + + + + +Existence check: + +HEAD /camli/sha1-126249fd8c18cbb5312a5705746a2af87fba9538 HTTP/1.1 +Host: example.com + +Response: + +HTTP/1.1 200 OK +Content-Type: application/octet-stream +Content-Length: + + +Does not exist: + +GET /camli/sha1-126249fd8c18cbb5312a5705746a2af87fba9538 HTTP/1.1 +Host: example.com + +Response: + +HTTP/1.1 404 Not Found + diff --git a/vendor/github.com/camlistore/camlistore/doc/protocol/blob-stat-protocol.txt b/vendor/github.com/camlistore/camlistore/doc/protocol/blob-stat-protocol.txt new file mode 100644 index 00000000..ad3dc76b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/protocol/blob-stat-protocol.txt @@ -0,0 +1,83 @@ +This document describes the "batch stat" API end-point, for checking +the size/existence of multiple blobs when the client and/or server do +not support SPDY or HTTP/2.0. See blob-upload-protocol.txt for more +background. + +Notably: the HTTP method may be GET or POST. GET is more correct but +be aware that HTTP servers and proxies start to suck around the 2k and +4k URL lengths. If you're stat'ing more than ~37 blobs, using POST +would be safest. + +The HTTP request path is $blobRoot/camli/stat. See +blob-upload-protocol.txt and discovery.txt for background. + +In either case, the request form values: (either in the URL for GET or +application/x-www-form-urlencoded body for POST) + + camliversion required Version of camlistore and/or stat protocol; + reserved for future use. Must be "1" for now. + + blob optional/ Must start at 1 and go up, no gaps allowed, not + repeated zero-padded, etc. Value is a blobref, e.g + "sha1-9b03f7aca1ac60d40b5e570c34f79a3e07c918e8" + There's no defined limit on how many you include here, + but servers may return a 400 Bad Request if you ask + for too many. All servers should support <= 1000 + though. + + maxwaitsec optional The client may send this, an integer max number + of seconds the client is willing to wait + for the arrival of blobs. If the server + supports long-polling (an optional + feature), then the server will return + immediately if all the requested blobs + or available, or wait up until this + amount of time for the blobs to become + available. The server's reply must + include "canLongPoll" set to true if the + server supports this feature. Even if + the server supports long polling, the + server may cap 'maxwaitsec' and wait for + less time than requested by the client. + +Examples: + +GET /some-blob-root/camli/stat?camliversion=1&blob1=sha1-9b03f7aca1ac60d40b5e570c34f79a3e07c918e8 HTTP/1.1 +Host: example.com + + -or- + +POST /some-blob-root/camli/stat HTTP/1.1 +Content-Type: application/x-www-form-urlencoded +Host: example.com + +camliversion=1& +blob1=sha1-9b03f7aca1ac60d40b5e570c34f79a3e07c918e8& +blob2=sha1-abcdabcdabcdabcdabcdabcdabcdabcdabcdabcd& +blob3=sha1-deadbeefdeadbeefdeadbeefdeadbeefdeadbeef + +-------------------------------------------------- +Response: +-------------------------------------------------- + +HTTP/1.1 200 OK +Content-Length: ... +Content-Type: text/javascript + +{ + "stat": [ + {"blobRef": "sha1-abcdabcdabcdabcdabcdabcdabcdabcdabcdabcd", + "size": 12312} + ], + "canLongPoll": true +} + +Response keys: + + stat required Array of {"blobRef": BLOBREF, "size": INT_bytes} + for blobs that the system already has. Empty + list if no blobs are already present. + + canLongPoll optional Set to true (type boolean) if the server supports + long polling. If not true, the server ignores + the client's "maxwaitsec" parameter. diff --git a/vendor/github.com/camlistore/camlistore/doc/protocol/blob-upload-protocol.txt b/vendor/github.com/camlistore/camlistore/doc/protocol/blob-upload-protocol.txt new file mode 100644 index 00000000..8af1b65b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/protocol/blob-upload-protocol.txt @@ -0,0 +1,122 @@ +Uploading a single blob is done in two parts: + +1) Optional: see if the server already has it with a HEAD request to + its blob URL, or do a multi-stat request with a single blob. If the + server has it, you're done. Blobs are content-addressable and have + no metadata or version, so if the server has it, it has the right + version. + +2) PUT that blob to its blob URL (or do a multipart batch put of a + single blob) + + +When uploading multiple blobs (the common case), the fastest option +depends on whether or not you're using a modern HTTP transport +(e.g. SPDY). If your client and server don't support SPDY, you want +to use the batch stat and batch upload endpoints, which hopefully can +die when the future finishes arriving. + +If you have SPDY, uploading 100 blobs is just like uploading 100 +single blobs, but all at once. Send all your 100 HEAD requests at +once, wait 1 RTT for all 100 replies, and then send then the <= 100 +PUT requests with the blobs that the server didn't have. + +If you DON'T have SPDY on both sides, you want to use the batch stat +and batch upload endpoints, described below. + +============================================================================ +Preupload request: +============================================================================ + +(see blob-stat-protocol.txt) + +============================================================================ +Batch upload request: +============================================================================ + +Things to note about the request: + + * You do a POST to $BLOB_ROOT/camli/upload where $BLOB_ROOT is the + blobserver's root path. You can get the path from + performing "discovery" on the server and getting the + "blobRoot" value. See discovery.txt. + + * You MUST provide a "name" parameter in each multipart part's + Content-Disposition value. The part's name matters and is the + blobref ("digest-hexhexhexhex") of your blob. The bytes MUST + match the blobref and the server MUST reject it if they don't + match. + + * You (currently) MUST provide a Content-Type for each multipart + part. It doesn't matter what it is (it's thrown away), but it's + necessary to satisfy various HTTP libraries. Easiest is to just + set it to "application/octet-stream" Server implementions SHOULD + fail if you clients forget it, to encourage clients to remember + it for compatibility with all blob servers. + + * You (currently) MUST provide a "filename" parameter in each + multipart's Content-Disposition value, unique per blob, but it + will also be thrown away and exists purely to satisfy various + HTTP libraries (mostly App Engine). It's recommended to either + set this to an increasing number (e.g. "blob1", "blob2") or just + repeat the blobref value here. + + * The total size of a batch upload HTTP request, including headers + and body (including MIME bits) should not exceed 32 MB. (A + single blob can be at most 16 MB, but will in practice be much + smaller: claims will be at most ~1 KB, and file chunks are + typically at most 64 KB or 256 KB) + +Some of these requirements may be relaxed in the future. + +Example: + +POST /$BLOB_SERVER_PATH_ROOT/camli/upload HTTP/1.1 +Host: upload-server.example.com +Content-Type: multipart/form-data; boundary=randomboundaryXYZ + +--randomboundaryXYZ +Content-Disposition: form-data; name="sha1-9b03f7aca1ac60d40b5e570c34f79a3e07c918e8"; filename="blob1" +Content-Type: application/octet-stream + +(binary or text blob data) +--randomboundaryXYZ +Content-Disposition: form-data; name="sha1-deadbeefdeadbeefdeadbeefdeadbeefdeadbeef"; filename="blob2" +Content-Type: application/octet-stream + +(binary or text blob data) +--randomboundaryXYZ-- + +----------------------------------------------------- +Response (status may be a 200 or a 303 to this data) +----------------------------------------------------- + +HTTP/1.1 200 OK +Content-Type: text/plain + +{ + "received": [ + {"blobRef": "sha1-9b03f7aca1ac60d40b5e570c34f79a3e07c918e8", + "size": 12312}, + {"blobRef": "sha1-deadbeefdeadbeefdeadbeefdeadbeefdeadbeef", + "size": 29384933} + ] +} + +Response keys: + + received required Array of {"blobRef": BLOBREF, "size": INT_bytes} + for blobs that were successfully saved. Empty + list in the case nothing was received. + errorText optional String error message for protocol errors + not relating to a particular blob. + Mostly for debugging clients. + +If connection drops during a POST to an upload URL, you should re-do a +stat request to verify which objects were received by the server +and which were not. Also, the URL you received from stat before +might no longer work, so stat is required to a get a valid upload +URL. + +For information on resuming truncated uploads, read blob-upload-resume.txt + diff --git a/vendor/github.com/camlistore/camlistore/doc/protocol/blob-upload-resume.txt b/vendor/github.com/camlistore/camlistore/doc/protocol/blob-upload-resume.txt new file mode 100644 index 00000000..034fcf03 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/protocol/blob-upload-resume.txt @@ -0,0 +1,36 @@ +Optional upload protocol extension: + +Blobs can be large, devices (e.g. mobile phones) can have slow +uploads, or both. Thus, it's nice to have an upload resume mechanism. + +In a stat response, a server can return a JSON key +"alreadyHavePartially" with similar format to the spec-required "stat" +array. Instead of just "blobRef" and "size", though, there's a +continuation key and blobref of the part that server already has: + +... + "alreadyHavePartially": [ + {"blobRef": "sha1-abcdabcdabcdabcdabcdabcdabcdabcdabcdabcd", + "size": 12312, + "partBlobRef": "sha1-beefbeefbeefbeefbeefbeefbeefbeefbeefbeef" + "resumeKey": "resume-sha1-abcdabcdabcdabcdabcdabcdabcdabcdabcdabcd-12312-server-chosen", + } + ], +... + +If the client also supports this optional extension and parses the +"alreadyHavePartially" section, the client may resume their upload by: + + 1) verifying that digest of the client blob from byte 0 (incl) to + "size" (exclusive) matches the server's provided "partBlobRef". + (the server must use the same digest function). if it doesn't, + skip, and/or proceed to any other "alreadyHavePartially" + blobref with the same "blobRef" value. (the server may have + multiple partial uploads in different states, and perhaps one + is corrupt for various HTTP client failure reasons...) + + 2) do an upload like normal, but the name of the + multipart/form-data body part should be whatever the server + provided in the mandatory "resumeKey" value. skip the first + "size" bytes in your upload. + diff --git a/vendor/github.com/camlistore/camlistore/doc/protocol/discovery.txt b/vendor/github.com/camlistore/camlistore/doc/protocol/discovery.txt new file mode 100644 index 00000000..446d0d81 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/protocol/discovery.txt @@ -0,0 +1,62 @@ +Discovery is the process of asking the server for its configuration. + +You send the discovery HTTP request to the URL the user has +configured. If the user hasn't specified a path, use "/". + +Then make a GET request to that URL with either Accept header set to +"text/x-camli-configuration" or the the URL query parameter +"camli.mode" set to "config": + + GET /some/user/?camli.mode=config HTTP/1.1 + Host: camlihost.example.com + +Or: + + GET / HTTP1.1 + Host: 127.0.0.1 + Accept: text/x-camli-configuration + +The response is a JSON document: + +{ + "blobHashFuncs": [ + "sha1" + ], + "blobRoot": "/bs-and-maybe-also-index/", + "directoryHelper": "/ui/tree/", + "downloadHelper": "/ui/download/", + "helpRoot": "/help/", + "importerRoot": "/importer/", + "jsonSignRoot": "/sighelper/", + "ownerName": "The User Name", + "publishRoots": {}, + "searchRoot": "/my-search/", + "signing": { + "publicKey": "/sighelper/camli/sha1-f72d9090b61b70ee6501cceacc9d81a0801d32f6", + "publicKeyBlobRef": "sha1-f72d9090b61b70ee6501cceacc9d81a0801d32f6", + "publicKeyId": "94DE83C46401800C", + "signHandler": "/sighelper/camli/sig/sign", + "verifyHandler": "/sighelper/camli/sig/verify" + }, + "statusRoot": "/status/", + "storageGeneration": "231ceff7a04a77cdf881b0422ea733334eee3b8f", + "storageInitTime": "2012-11-30T03:34:47Z", + "syncHandlers": [ + { + "from": "/bs/", + "to": "/index-mysql/", + "toIndex": true + }, + { + "from": "/bs/", + "to": "/sto-s3/", + "toIndex": false + } + ], + "thumbVersion": "2", + "uiRoot": "/ui/", + "uploadHelper": "/ui/?camli.mode=uploadhelper", + "wsAuthToken": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +} + +TODO: document these more diff --git a/vendor/github.com/camlistore/camlistore/doc/publishing/README b/vendor/github.com/camlistore/camlistore/doc/publishing/README new file mode 100644 index 00000000..444e3ae2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/publishing/README @@ -0,0 +1,16 @@ +Camlistore delegates publishing to the publisher server application, which uses Go html templates (http://golang.org/pkg/text/template/) to publish pages. + +Resources for publishing, such as go templates, javascript and css files should be placed in the application source directory - app/publisher/ - so they can be served directly when using the dev server or automatically embedded in production. + +You should then specify the Go template to be used through the configuration file. The CSS files are automatically all available to the app. For example, there already is a go template (gallery.html), and css file (pics.css) that work together to provide publishing for image galleries. The dev server config (config/dev-server-config.json) already uses them. Here is how one would configure publishing for an image gallery in the server config ($HOME/.config/camlistore/server-config.json): + +"publish": { + "/pics/": { + "camliRoot": "mypics", + "cacheRoot": "/home/joe/var/camlistore/blobs/cache", + "goTemplate": "gallery.html" + } +} + +If you want to provide your own (Go) template, see http://camlistore.org/pkg/publish for the data structures and functions available to the template. + diff --git a/vendor/github.com/camlistore/camlistore/doc/schema/blob-magic.txt b/vendor/github.com/camlistore/camlistore/doc/schema/blob-magic.txt new file mode 100644 index 00000000..10571c80 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/schema/blob-magic.txt @@ -0,0 +1,26 @@ +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +Camli Blob Magic +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + +[Note: not totally happy with this yet...] + +Ideal Camli JSON blobs should begin with the following 15 bytes: + + {"camliVersion" + +However, it's acknowledged that some JSON serialization libraries will +format things differently, so additional whitespace should be +tolerated. + +An ideal camli serializer will strive for the above header, though, by +doing something like: + + -- removing the "camliVersion" from the object, noting its value + (and requiring it to be present) + + -- serializing the JSON with an existing JSON serialization library, + + -- removing the serialized JSON's leading "{" character and prepending + the 15 byte header above, as well as the colon and saved version + and comma (which can have whitespace as desired) + diff --git a/vendor/github.com/camlistore/camlistore/doc/schema/bytes.txt b/vendor/github.com/camlistore/camlistore/doc/schema/bytes.txt new file mode 100644 index 00000000..41afbc9c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/schema/bytes.txt @@ -0,0 +1,38 @@ +Description of a series of bytes. + +A "bytes" is a metadata (JSON) blob to describe blobs. It's a recursive +definition that's able to describe a hash tree, describing very large +blobs (or "files"). + +A "bytes" blob can be used on its own, but is also used by things like +a "file" schema blob. + + +{"camliVersion": 1, + "camliType": "bytes", + + // Required. Array of contiguous regions of bytes. Zero or more elements. + // + // Each element must have: + // "size": the number of bytes that this element contributes to array of bytes. + // Required, and must be greater than zero. + // + // At most one of: + // "blobRef": where to get the raw bytes from. if this and "bytesRef" + // are missing, the bytes are all zero (e.g. a sparse file hole) + // "bytesRef": alternative to blobRef, where to get the range's bytes + // from, but pointing recursively at a "bytes" schema blob + // describing the range, recursively. large files are made of + // these in a hash tree. it is an error if both "bytesRef" + // and "blobRef" are specified. + // + // Optional: + // "offset": the number of bytes into blobRef or bytesRef to skip to + // get the necessary bytes for the range. usually zero (unspecified) + "parts": [ + {"blobRef": "digalg-blobref", "size": 1024}, + {"bytesRef": "digalg-blobref", "size": 5000000, "offset": 492 }, + {"size": 1000000}, + {"blobRef": "digalg-blobref", "size": 10}, + ] +} diff --git a/vendor/github.com/camlistore/camlistore/doc/schema/claims/TODO b/vendor/github.com/camlistore/camlistore/doc/schema/claims/TODO new file mode 100644 index 00000000..85ce4932 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/schema/claims/TODO @@ -0,0 +1,90 @@ +TODO: +----- +Clean this up and/or break into separate files. + +{"camliVersion": 1, + "camliType": "claim", + "camliSigner": "....", + "claimDate": "2010-07-10T17:20:03.9212Z", // redundant with data in ascii armored "camliSig", + // but required. more legible. takes precedence over + // any date inferred from camliSig + "permaNode": "dig-xxxxxxx", // what is being modified + "claimType": "set-attribute", + "attribute": "camliContent", + "value": "dig-yyyyyyy", + "camliSig": .........} + +claimTypes: +----------- +"add-attribute" (adds a value to a multi-valued attribute (e.g. "tag")) +"set-attribute" (set a single-valued attribute. equivalent to "del-attribute" of "attribute" and then add-attribute) +"del-attribute" (deletes all values of "attribute", if no "value" given, or just the provided "value" if multi-valued) + +"multi".. atomically do multiple add/set/del from above on potentially different permanodes. looks like: + + {"camliVersion": 1, + "camliType": "claim", + "claimType": "multi", + "claimDate": "2013-02-24T17:20:03.9212Z", + "claims": [ + {"claimType": "set-attribute", + "permanode": "dig-xxxxxx", + "attribute": "foo", + "value": "fooValue"}, + {"claimType": "add-attribute", + "permanode": "dig-yyyyy", + "attribute": "tag", + "value": "funny"} + ], + "camliSig": .........} + +Attribute names: +---------------- +camliContent: a permanode "becoming" something. value is pointer to what it is now. + + +Old notes from July 2010 doc: +----------------------------- +Claim types: +permanode-become: + -- implies either: + 1) switching from typeless/lifeless virgin pnode into something (dynamic set, filesystem tree, etc) + 2) changing versions of that base metadata (new filesystem snapshot) + -- ‘permaNode’ is the thing that is changing + -- ‘contents’ is the current node that represents what permaNode changes to +set-membership: add a blobref to a dynamic set + -- "permaNode" is blobref of the dynamic set +delete-claim: delete another claim (target is claim to delete) + -- "contents" is the claim blobref you’re deleting +{set,add}-attribute: + -- attach a piece of metadata to something. + -- use set-attribute for single-valued attributes only: highest dated claim wins (of trusted person) e.g. "title", "description" + -- use add-attribute for multi-valued things. e.g. "tag" + +Tagging something: +{"claimType": "add-attribute", // + "attribute": "tag", // utf-8, should have list of valid attributes names, preferrably not made up by us (open social spec?) + "value": "funny", // value that doesn’t have lasting value + "valueRef": "sha1-blobref", // hefty reference to a lasting value + + "claimer?": "sha1-of-the-dude-who’s-signing", + "claimDate": "2010-07-10T17:20:03.9212Z", + "claimType", "permanode-become", + "permaNode": "sha1-pnode", +} + +filesystem root claim: +{ + "camliVersion": 1, + "camliType": "claim", + + // Stuff for camliType "claim": + "claimDate": "2010-07-10T17:20:03.9212Z", // redundant with data in ascii armored "camliSig". TODO: resolve + "claimType", "permanode-become", + + // Stuff for "permanode-become": + "permaNode": "sha1-pnode", + "contents": "sha1-fs-node" + +,"camliSigner": "digalg-blobref-to-ascii-armor-public-key-of-signer", +"camliSig": "......"} diff --git a/vendor/github.com/camlistore/camlistore/doc/schema/claims/attributes.txt b/vendor/github.com/camlistore/camlistore/doc/schema/claims/attributes.txt new file mode 100644 index 00000000..89d5ea4b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/schema/claims/attributes.txt @@ -0,0 +1,80 @@ +Permanode Attributes + +While a permanode can have any arbitrary attributes and values (and +each value can be single-valued or multi-valued), the following are +the conventional attributes and values used by the various tools, +search, FUSE, and web UI. + +"tag": (multi-valued) + + a set of zero or more keywords (or phrases) indexed completely, for + searching by tag. No HTML. + +"title": (single-valued) + + A name given to the permanode. No HTML. + +"description": (single-valued) + + An account of the permanode. It may include but is not limited to: + an abstract, a table of contents, or a free-text account of the + resource. No HTML. As of 2013-12-28, not very defined yet. + +"camliContent": (single-valued) + + when a permanode is a file, the camliContent is set to the fileref + (the blobref of the "file" schema blob) + +"camliContentImage": (single-valued) + + when a permanode has camliContent set, but the camliContent is of + a non-image, the camliContentImage points to a "file" schema's + blobref of an image representation of the content. For instance, + this might be the cover art of an MP3 file or a thumbnail of a + spreadsheet. + +"camliMember": (multi-valued) + + when the permanode represents a set (unordered, unkeyed), the + parent permanode (the container set) has a camliMember set to the + permanode of each child element. + +"camliPath:$dirent_name" (single-valued) + + when the permanode represents an associative container, each keyed + child permanode blobref is pointed to by the "camliPath:$key" + attribute on the parent. This is used by the FUSE client, and + respected in the UI (browser, publishing code), etc. + +"camliNodeType" (single-valued) + + when the application needs to note the type of a permanode before + any other attributes (like those above) are added which would otherwise + imply its type, the camliNodeType lets applications be specific. + This should only be used if another attribute can't imply it. + Currently only used by FUSE to indicate the difference between a new + file and a new directory permanode. Known values include: + + * "directory". this permanode will have "camliPath:$key" + attributes later. + * "file". this permanode will have a "camliContent" later. for + now, it should be treated as if it's an empty, 0-byte file. + +"camliDefVis" (single-valued) + + Can be "hide". Experimental. Affects default visibility in web UI. + +"xattr:$attr_name" (single-valued) + + when a permanode represents a file or directory visible to FUSE, + "xattr:$x" is used to store the value for extended attribute "x". + Extended attribute data may contain any arbitrary bytes, so values + are base64 encoded. + +"camliRoot" (single-valued) + + TODO: doc + +"camliImportRoot" (single-valued) + + TODO: doc diff --git a/vendor/github.com/camlistore/camlistore/doc/schema/claims/delete.txt b/vendor/github.com/camlistore/camlistore/doc/schema/claims/delete.txt new file mode 100644 index 00000000..6d44e99f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/schema/claims/delete.txt @@ -0,0 +1,13 @@ +A claim can delete a permanode or another claim. +(Un)Deletions are not considered as modifications, so the claimDate of a delete claim +is never considered as a modtime in the context of time constrained searches. +----- + +{"camliVersion": 1, + "camliType": "claim", + "camliSigner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "claimDate": "2010-07-10T17:20:03.9212Z", + "claimType": "delete", + "target": "sha1-ab6dacb972eeee72df2a846aab7d751b5856a1a0", // the permanode or claim being deleted. + +} diff --git a/vendor/github.com/camlistore/camlistore/doc/schema/claims/share.txt b/vendor/github.com/camlistore/camlistore/doc/schema/claims/share.txt new file mode 100644 index 00000000..99e41ee5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/schema/claims/share.txt @@ -0,0 +1,31 @@ +A share claim makes blob(s) available to others. (that is, parties who are not +the owner of the Camlistore instance). +----- + +{"camliVersion": 1, + + // Type of authentication required to access the share. Currently only haveref + // is supported, which means that anyone with the claim blobref can access. + "authType": "haveref", + + "camliType": "claim", + "camliSigner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "claimDate": "2014-09-04T20:04:09.193945801Z", + "claimType": "share", + + // The blob or search to share. Exactly one of these must be present. It is an + // error to set neither or both. + "target": "sha1-543fbdfdbcb1297af8a4dc7d299c0cb90e2bea0f", + "search": , + + // If true, anything recursively reachable from target or search is also + // shared. Edges that are guaranteed to be followed for purposes of + // reachability are: + // - blobRef and bytesRef values of camliType="blob|file" + // - members of camliType="static-set" + // Currently reachability is implemented more loosely, but clients should not + // depend on that. + "transitive": false, + + +} diff --git a/vendor/github.com/camlistore/camlistore/doc/schema/files/directory.txt b/vendor/github.com/camlistore/camlistore/doc/schema/files/directory.txt new file mode 100644 index 00000000..917bb7d7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/schema/files/directory.txt @@ -0,0 +1,13 @@ +Directory schema + +{"camliVersion": 1, + "camliType": "directory", + + // + // INCLUDE ALL REQUIRED & ANY OPTIONAL FIELDS FROM file-common.txt + // + + // Required: + "entries": "digalg-blobref-to-static-set", +} + diff --git a/vendor/github.com/camlistore/camlistore/doc/schema/files/fifo.txt b/vendor/github.com/camlistore/camlistore/doc/schema/files/fifo.txt new file mode 100644 index 00000000..160f4a84 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/schema/files/fifo.txt @@ -0,0 +1,9 @@ +fifo schema + +{"camliVersion": 1, + "camliType": "fifo", + + // + // INCLUDE ALL REQUIRED & ANY OPTIONAL FIELDS FROM file-common.txt + // +} diff --git a/vendor/github.com/camlistore/camlistore/doc/schema/files/file-common.txt b/vendor/github.com/camlistore/camlistore/doc/schema/files/file-common.txt new file mode 100644 index 00000000..7475ce07 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/schema/files/file-common.txt @@ -0,0 +1,24 @@ +Fields common to files, directories, symlinks, FIFOs and sockets + +{"camliVersion": 1, + "camliType": "...", // one of "file", "directory", "symlink", "fifo", "socket" + + // At most one of these may be set. (zero may be present only for large files' subranges, + // represented as a tree of file schemas) But exactly one of these is required for + // top-level files, directories, symlinks, FIFOs, sockets, e.t.c. + "fileName": "if-it-is-utf8.txt", // only for utf-8 + "fileNameBytes": [65, 234, 234, 192, 23, 123], // if unknown charset (not recommended) + + // Optional: + "unixPermission": "0755", // no octal in JSON, so octal as string + "unixOwnerId": 1000, + "unixOwner": "bradfitz", + "unixGroupId": 500, + "unixGroup": "camliteam", + "unixXattrs": [....], // TBD + "unixMtime": "2010-07-10T17:14:51.5678Z", // UTC-- ISO 8601, as many significant digits as known + "unixCtime": "2010-07-10T17:20:03.9212Z", // UTC-- ISO 8601, best-effort to match unix meaning + + // Not recommended to include, but if you must: (atime is a bit silly) + "unixAtime": "2010-07-10T17:14:22.1234Z", // UTC-- ISO 8601 +} diff --git a/vendor/github.com/camlistore/camlistore/doc/schema/files/file.txt b/vendor/github.com/camlistore/camlistore/doc/schema/files/file.txt new file mode 100644 index 00000000..c1389ba5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/schema/files/file.txt @@ -0,0 +1,15 @@ +File schema + +{"camliVersion": 1, + "camliType": "file", + + // #include "file-common.txt" # metadata about the file + // #include "../bytes.txt" # describes the bytes of the file + + // Optional, if linkcount > 1, for representing hardlinks properly. + "inodeRef": "digalg-blobref", // to "inode" blobref, when the link count > 1 +} + +// TODO: Mac/NTFS-style resource forks? perhaps just a "streams" +// array of recursive file objects? + diff --git a/vendor/github.com/camlistore/camlistore/doc/schema/files/inode.txt b/vendor/github.com/camlistore/camlistore/doc/schema/files/inode.txt new file mode 100644 index 00000000..26274cde --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/schema/files/inode.txt @@ -0,0 +1,15 @@ +Inode schema. + +{"camliVersion": 1, + "camliType": "inode", + "inodeId": 12345 // st_ino + "deviceId": 53, // st_dev + "numLinks": 3, // st_nlink +} + +This is optional and probably rarely used, but lets two+ files be +represented as hardlinks with each other. If both files point to the +same inode object, they're hardlinks of each other. + +Note that unlink "directory", "file", and "schema", this does not +inherit fields from the "file-common" schema. diff --git a/vendor/github.com/camlistore/camlistore/doc/schema/files/socket.txt b/vendor/github.com/camlistore/camlistore/doc/schema/files/socket.txt new file mode 100644 index 00000000..b13d3568 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/schema/files/socket.txt @@ -0,0 +1,9 @@ +socket schema + +{"camliVersion": 1, + "camliType": "socket", + + // + // INCLUDE ALL REQUIRED & ANY OPTIONAL FIELDS FROM file-common.txt + // +} diff --git a/vendor/github.com/camlistore/camlistore/doc/schema/files/symlink.txt b/vendor/github.com/camlistore/camlistore/doc/schema/files/symlink.txt new file mode 100644 index 00000000..3969f739 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/schema/files/symlink.txt @@ -0,0 +1,18 @@ +Symlink schema + +{"camliVersion": 1, + "camliType": "symlink", + + // + // INCLUDE ALL REQUIRED & ANY OPTIONAL FIELDS FROM file-common.txt + // + + // Exactly one of: + + // If UTF-8: + "symlinkTarget": "../foo/blah", + + // If unknown charset & have raw 8-bit filenames and can't convert + // to UTF-8. The array is a mix of UTF-8 and/or non-UTF-8 bytes (0-255). + "symlinkTargetBytes": ["../foo/Am", 233, "lie.jpg"], // e.g. Amélie in ISO-8859-1 when charset unknown +} diff --git a/vendor/github.com/camlistore/camlistore/doc/schema/objects/keep.txt b/vendor/github.com/camlistore/camlistore/doc/schema/objects/keep.txt new file mode 100644 index 00000000..65558538 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/schema/objects/keep.txt @@ -0,0 +1,16 @@ +A signed "keep" edge for GC/indexing purposes. Expresses a user's +intent to keep an object. + +This is not the only way to keep an object alive for the purposes of +GC. Permanodes (signed by a user) are also part of that user's roots, +and anything they reference (including blobs via "become" claims on +those permanodes) + +This is just the most explicit way when you're not modeling the data +with permanodes. + +{"camliVersion": 1, + "camliType": "keep", + "target": "digalg-blobref-of-thing-to-keep", +} + diff --git a/vendor/github.com/camlistore/camlistore/doc/schema/objects/permanode.txt b/vendor/github.com/camlistore/camlistore/doc/schema/objects/permanode.txt new file mode 100644 index 00000000..0bcfd863 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/schema/objects/permanode.txt @@ -0,0 +1,15 @@ +The idea of a permanode is that it's the anchor from which you build +mutable objects. To serve as a reliable (consistently nameable) +object it must have no mutable state itself. + +{"camliVersion": 1, + "camliType": "permanode", + + // Required. Any random string, to force the sha1 of this + // node to be unique. Note that the date in the ASCII-armored + // GPG JSON signature will already help it be unique, so this + // doesn't need to be a great random. + "random": "615e05c68c8411df81a2001b639d041f" + +} + diff --git a/vendor/github.com/camlistore/camlistore/doc/schema/objects/static-set.txt b/vendor/github.com/camlistore/camlistore/doc/schema/objects/static-set.txt new file mode 100644 index 00000000..b6a06de0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/schema/objects/static-set.txt @@ -0,0 +1,23 @@ +Static set schema + +{"camliVersion": 1, + "camliType": "static-set", + + // Required. + // May be ordered to unordered, depending on context/needs. If unordered, + // it's recommended but not required to sort the blobrefs. + "members": [ + "digalg-blobref-item1", // maybe a file? + "digalg-blobref-item2", // maybe a directory? + "digalg-blobref-item3", // maybe a symlink? + "digalg-blobref-item4", // maybe a permanode? + "digalg-blobref-item5", // ... don't know until you fetch it + "digalg-blobref-item6", // ... and what's valid depends on context + "digalg-blobref-item7", // ... a permanode in a directory would + "digalg-blobref-item8" // ... be invalid, for instance. + ] +} + +Note: dynamic sets are structured differently, using a permanode and + membership claim nodes. The above is just for presenting a snapshot + of members. diff --git a/vendor/github.com/camlistore/camlistore/doc/search-ui.txt b/vendor/github.com/camlistore/camlistore/doc/search-ui.txt new file mode 100644 index 00000000..7a0308b6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/search-ui.txt @@ -0,0 +1,51 @@ +The User Interface's "Search" box accepts predicates of the form "[-]operator:value[:value]". +These predicates may be separated by 'and' or 'or' keywords, or spaces which mean the same +as 'and'. Expressions like this may be grouped with parenthesis. Grouped expressions are +evaluated first. Grouped expressions may be negated. +An 'and' besides an 'or' is evaluated first. This means for example that + +tag:foo or is:pano tag:bar + +will return all images having tag foo together with the panorama images having tag bar. + +Negation of a predicate is achieved by prepending a minus sign: -is:landscape will match +with pictures of not landscape ratio. + +For example + + -(after:"2010-01-01" before:"2010-03-02T12:33:44") or loc:"Amsterdam" + +will return all images having "modtime" outside the specified period, joined with +all images taken in Amsterdam. + +When you need to match a value containing a space, you need to use double quotes around +the value only. For example: tag:"Three word tagname" and not "tag:Three word tagname". +If your value contains double quotes you can use backslash escaping. +For example: attr:bar:"He said: \"Hi\"" + +Usable operators: + after: date format is RFC3339, but can be shortened as required. + before: i.e. 2011-01-01 is Jan 1 of year 2011 and "2011" means the same. + attr: match on attribute. Use attr:foo:bar to match nodes having their foo + attribute set to bar. + format: file's format (or MIME-type) such as jpg, pdf, tiff. + has:location image has a location (GPSLatitude and GPSLongitude can be + retrieved from the image's EXIF tags). + loc: uses the EXIF GPS fields to match images having a location near + the specified location. Locations are resolved using + maps.googleapis.com. For example: loc:"new york, new york" + is:image object is an image + is:landscape the image has a landscape aspect + is:pano the image's aspect ratio is over 2 - panorama picture. + is:portrait the image has a portrait aspect. + height: use height:min-max to match images having a height of at least min + and at most max. Use height:min- to specify only an underbound and + height:-max to specify only an upperbound. + Exact matches should use height:480 + tag: match on a tag + width: use width:min-max to match images having a width of at least min + and at most max. Use width:min- to specify only an underbound and + width:-max to specify only an upperbound. + Exact matches should use width:640 + childrenof: Find child permanodes of a parent permanode (or prefix + of a parent permanode): childrenof:sha1-527cf12 diff --git a/vendor/github.com/camlistore/camlistore/doc/terminology.txt b/vendor/github.com/camlistore/camlistore/doc/terminology.txt new file mode 100644 index 00000000..e5a0051c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/doc/terminology.txt @@ -0,0 +1,4 @@ +See: + http://camlistore.org/docs/terms +or + website/content/docs/terms diff --git a/vendor/github.com/camlistore/camlistore/internal/chanworker/chanworker.go b/vendor/github.com/camlistore/camlistore/internal/chanworker/chanworker.go new file mode 100644 index 00000000..c0da8cc5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/internal/chanworker/chanworker.go @@ -0,0 +1,120 @@ +/* +Copyright 2012 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chanworker + +import ( + "container/list" + "sync" +) + +type chanWorker struct { + c chan interface{} + + donec chan bool + workc chan interface{} + fn func(n interface{}, ok bool) + buf *list.List +} + +// TODO: make it configurable if need be. Although so far in camput it wasn't. +const buffered = 16 + +// NewWorker starts nWorkers goroutines running fn on incoming +// items sent on the returned channel. fn may block; writes to the +// channel will buffer. +// If nWorkers is negative, a new goroutine running fn is called for each +// item sent on the returned channel. +// When the returned channel is closed, fn is called with (nil, false) +// after all other calls to fn have completed. +// If nWorkers is zero, NewWorker will panic. +func NewWorker(nWorkers int, fn func(el interface{}, ok bool)) chan<- interface{} { + if nWorkers == 0 { + panic("NewChanWorker: invalid value of 0 for nWorkers") + } + retc := make(chan interface{}, buffered) + if nWorkers < 0 { + // Unbounded number of workers. + go func() { + var wg sync.WaitGroup + for w := range retc { + wg.Add(1) + go func(w interface{}) { + fn(w, true) + wg.Done() + }(w) + } + wg.Wait() + fn(nil, false) + }() + return retc + } + w := &chanWorker{ + c: retc, + workc: make(chan interface{}, buffered), + donec: make(chan bool), // when workers finish + fn: fn, + buf: list.New(), + } + go w.pump() + for i := 0; i < nWorkers; i++ { + go w.work() + } + go func() { + for i := 0; i < nWorkers; i++ { + <-w.donec + } + fn(nil, false) // final sentinel + }() + return retc +} + +func (w *chanWorker) pump() { + inc := w.c + for inc != nil || w.buf.Len() > 0 { + outc := w.workc + var frontNode interface{} + if e := w.buf.Front(); e != nil { + frontNode = e.Value + } else { + outc = nil + } + select { + case outc <- frontNode: + w.buf.Remove(w.buf.Front()) + case el, ok := <-inc: + if !ok { + inc = nil + continue + } + w.buf.PushBack(el) + } + } + close(w.workc) +} + +func (w *chanWorker) work() { + for { + select { + case n, ok := <-w.workc: + if !ok { + w.donec <- true + return + } + w.fn(n, true) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/lib/python/camli/__init__.py b/vendor/github.com/camlistore/camlistore/lib/python/camli/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/camlistore/camlistore/lib/python/camli/op.py b/vendor/github.com/camlistore/camlistore/lib/python/camli/op.py new file mode 100755 index 00000000..74026b88 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/camli/op.py @@ -0,0 +1,390 @@ +#!/usr/bin/env python +# +# Camlistore uploader client for Python. +# +# Copyright 2010 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Client library for Camlistore.""" + +__author__ = 'Brett Slatkin (bslatkin@gmail.com)' + +import base64 +import cStringIO +import hashlib +import httplib +import logging +import mimetools +import urllib +import urlparse + +import simplejson + +__all__ = ['Error', 'ServerError', 'PayloadError', 'BUFFER_SIZE', 'CamliOp'] + + +BUFFER_SIZE = 512 * 1024 + + +class Error(Exception): + """Base class for exceptions in this module.""" + + +class ServerError(Error): + """An unexpected error was returned by the server.""" + + +class PayloadError(ServerError): + """Something about a data payload was bad.""" + + +def buffered_sha1(data, buffer_size=BUFFER_SIZE): + """Calculates the sha1 hash of some data. + + Args: + data: A string of data to write or an open file-like object. File-like + objects will be seeked back to their original position before this + function returns. + buffer_size: How much data to munge at a time. + + Returns: + Hex sha1 string. + """ + compute = hashlib.sha1() + if isinstance(data, basestring): + compute.update(data) + else: + start = data.tell() + while True: + line = data.read(buffer_size) + if line == '': + break + compute.update(line) + data.seek(start) + return compute.hexdigest() + + +class CamliOp(object): + """Camlistore client class that is single threaded, using one socket.""" + + def __init__(self, + server_address, + buffer_size=BUFFER_SIZE, + create_connection=httplib.HTTPConnection, + auth=None, + basepath=""): + """Initializer. + + Args: + server_address: hostname:port for the server. + buffer_size: Byte size to use for in-memory buffering for various + client-related operations. + create_connection: Use for testing. + auth: Optional. 'username:password' to use for HTTP basic auth. + basepath: Optional path suffix. e.g. if the server is at + "localhost:3179/bs", the basepath should be "/bs". + """ + self.server_address = server_address + self.buffer_size = buffer_size + self._create_connection = create_connection + self._connection = None + self._authorization = '' + self.basepath = "" + if auth: + if len(auth.split(':')) != 2: + # Default to dummy username; current server doesn't care + # TODO(jrabbit): care when necessary + auth = "username:" + auth #If username not given use the implicit default, 'username' + self._authorization = ('Basic ' + base64.encodestring(auth).strip()) + if basepath: + if '/' not in basepath: + raise NameError("basepath must be in form '/bs'") + if basepath[-1] == '/': + basepath = basepath[:-1] + self.basepath = basepath + + def _setup_connection(self): + """Sets up the HTTP connection.""" + self.connection = self._create_connection(self.server_address) + + def put_blobs(self, blobs): + """Puts a set of blobs. + + Args: + blobs: List of (data, blobref) tuples; list of open files; or list of + blob data strings. + + Returns: + The set of blobs that were actually uploaded. If all blobs are already + present this set will be empty. + + Raises: + ServerError if the server response is bad. + PayloadError if the server response is not in the right format. + OSError or IOError if reading any blobs breaks. + """ + if isinstance(blobs, dict): + raise TypeError('Must pass iterable of tuples, open files, or strings.') + + blobref_dict = {} + for item in blobs: + if isinstance(item, tuple): + blob, blobref = item + else: + blob, blobref = item, None + if blobref is None: + blobref = 'sha1-' + buffered_sha1(blob, buffer_size=self.buffer_size) + blobref_dict[blobref] = blob + + preupload = {'camliversion': '1'} + for index, blobref in enumerate(blobref_dict.keys()): + preupload['blob%d' % (index+1)] = blobref + + # TODO: What is the max number of blobs that can be specified in a + # preupload request? The server probably has some reasonable limit and + # after that we need to do batching in smaller groups. + + self._setup_connection() + if self.basepath: + fullpath = self.basepath + '/camli/stat' + else: + fullpath = '/camli/stat' + self.connection.request( + 'POST', fullpath, urllib.urlencode(preupload), + {'Content-Type': 'application/x-www-form-urlencoded', + 'Authorization': self._authorization}) + response = self.connection.getresponse() + logging.debug('Preupload HTTP response: %d %s', + response.status, response.reason) + if response.status != 200: + raise ServerError('Bad preupload response status: %d %s' % + (response.status, response.reason)) + + data = response.read() + try: + response_dict = simplejson.loads(data) + except simplejson.decoder.JSONDecodeError: + raise PayloadError('Server returned bad preupload response: %r' % data) + + logging.debug('Parsed preupload response: %r', response_dict) + if 'stat' not in response_dict: + raise PayloadError( + 'Could not find "stat" in preupload response: %r' % + response_dict) + if 'uploadUrl' not in response_dict: + raise PayloadError( + 'Could not find "uploadUrl" in preupload response: %r' % + response_dict) + + already_have_blobrefs = set() + for blobref_json in response_dict['stat']: + if 'blobRef' not in blobref_json: + raise PayloadError( + 'Cannot find "blobRef" in preupload response: %r', + response_dict) + already_have_blobrefs.add(blobref_json['blobRef']) + logging.debug('Already have blobs: %r', already_have_blobrefs) + + missing_blobrefs = set(blobref_dict.iterkeys()) + missing_blobrefs.difference_update(already_have_blobrefs) + if not missing_blobrefs: + logging.debug('All blobs already present.') + return + + # TODO(bslatkin): Figure out the 'Content-Length' header value by looking + # at the size of the files by seeking; required for multipart POST. + out = cStringIO.StringIO() + boundary = mimetools.choose_boundary() + boundary_start = '--' + boundary + + blob_number = 0 + for blobref in blobref_dict.iterkeys(): + if blobref in already_have_blobrefs: + logging.debug('Already have blobref=%s', blobref) + continue + blob = blobref_dict[blobref] + blob_number += 1 + + out.write(boundary_start) + out.write('\r\nContent-Type: application/octet-stream\r\n') + out.write('Content-Disposition: form-data; name="%s"; ' + 'filename="%d"\r\n\r\n' % (blobref, blob_number)) + if isinstance(blob, basestring): + out.write(blob) + else: + while True: + buf = blob.read(self.buffer_size) + if buf == '': + break + out.write(buf) + out.write('\r\n') + out.write(boundary_start) + out.write('--\r\n') + request_body = out.getvalue() + + pieces = list(urlparse.urlparse(response_dict['uploadUrl'])) + # TODO: Support upload servers on another base URL. + pieces[0], pieces[1] = '', '' + relative_url = urlparse.urlunparse(pieces) + self.connection.request( + 'POST', relative_url, request_body, + {'Content-Type': 'multipart/form-data; boundary="%s"' % boundary, + 'Content-Length': str(len(request_body)), + 'Authorization': self._authorization}) + + response = self.connection.getresponse() + logging.debug('Upload response: %d %s', response.status, response.reason) + if response.status not in (200, 301, 302, 303): + raise ServerError('Bad upload response status: %d %s' % + (response.status, response.reason)) + + while response.status in (301, 302, 303): + # TODO(bslatkin): Support connections to servers on different addresses + # after redirects. For now just send another request to the same server. + location = response.getheader('Location') + pieces = list(urlparse.urlparse(location)) + pieces[0], pieces[1] = '', '' + new_relative_url = urlparse.urlunparse(pieces) + logging.debug('Redirect %s -> %s', relative_url, new_relative_url) + relative_url = new_relative_url + self.connection.request('GET', relative_url, headers={ + 'Authorization': self._authorization}) + response = self.connection.getresponse() + + if response.status != 200: + raise ServerError('Bad upload response status: %d %s' % + (response.status, response.reason)) + + data = response.read() + try: + response_dict = simplejson.loads(data) + except simplejson.decoder.JSONDecodeError: + raise PayloadError('Server returned bad upload response: %r' % data) + + if 'received' not in response_dict: + raise PayloadError('Could not find "received" in upload response: %r' % + response_dict) + + received_blobrefs = set() + for blobref_json in response_dict['received']: + if 'blobRef' not in blobref_json: + raise PayloadError( + 'Cannot find "blobRef" in upload response: %r', + response_dict) + received_blobrefs.add(blobref_json['blobRef']) + logging.debug('Received blobs: %r', received_blobrefs) + + missing_blobrefs.difference_update(received_blobrefs) + if missing_blobrefs: + # TODO: Try to upload the missing ones. + raise ServerError('Some blobs not uploaded: %r', missing_blobrefs) + + logging.debug('Upload of %d blobs successful.', len(blobref_dict)) + return received_blobrefs + + def get_blobs(self, + blobref_list, + start_out=None, + end_out=None, + check_sha1=True): + """Gets a set of blobs. + + Args: + blobref_list: A single blobref as a string or an iterable of strings that + are blobrefs. + start_out: Optional. A function taking the blobref's key, returns a + file-like object to which the blob should be written. Called before + the blob has started any writing. + end_out: Optional along with start_out. A function that takes the + blobref and open file-like object that does proper cleanup and closing + of the file. Called when all of the file's contents have been written. + check_sha1: Double-check that the file's contents match the blobref. + + Returns: + If start_out is not supplied, then all blobs will be kept in memory. If + blobref_list is a single blobref, then the return value will be a string + with the blob data or None if the blob was not present. If blobref_list + was iterable, the return value will be a dictionary mapping blobref to + blob data for each blob that was found. + + If start_out is supplied, the return value will be None. Callers can + check for missing blobs by comparing their own input of the blobref_list + argument to the blobrefs that are passed to start_out. + + Raises: + ServerError if the server response is invalid for whatever reason. + OSError or IOError if writing to any files breaks. + """ + multiple = not isinstance(blobref_list, basestring) + result = {} + if start_out is None: + def start_out(blobref): + buffer = cStringIO.StringIO() + return buffer + + def end_out(blobref, file_like): + result[blobref] = file_like.getvalue() + else: + result = None # Rely on user-supplied start_out for reporting blobrefs. + if end_out is None: + def end_out(blobref, file_like): + file_like.close() + + self._setup_connection() + + # Note, we could use a 'preupload' here as a quick, bulk existence check, + # but that may not always work depending on the access the user has. + # It's possible the user has read-only access, and thus can only do + # GET or HEAD on objects. + + for blobref in blobref_list: + logging.debug('Getting blobref=%s', blobref) + if self.basepath: + fullpath = self.basepath + '/camli/' + else: + fullpath = '/camli/' + self.connection.request('GET', fullpath + blobref, + headers={'Authorization': self._authorization}) + response = self.connection.getresponse() + if response.status == 404: + logging.debug('Server does not have blobref=%s', blobref) + continue + elif response.status != 200: + raise ServerError('Bad response status: %d %s' % + (response.status, response.reason)) + + if check_sha1: + compute_hash = hashlib.sha1() + + out_file = start_out(blobref) + while True: + buf = response.read(self.buffer_size) + if buf == '': + end_out(blobref, out_file) + break + + if check_sha1: + compute_hash.update(buf) + + out_file.write(buf) + + if check_sha1: + found = 'sha1-' + compute_hash.hexdigest() + if found != blobref: + raise ValueError('sha1 hash of blobref does not match; ' + 'found %s, expected %s' % (found, blobref)) + + if result and not multiple: + return result.values()[0] + return result diff --git a/vendor/github.com/camlistore/camlistore/lib/python/camli/schema.py b/vendor/github.com/camlistore/camlistore/lib/python/camli/schema.py new file mode 100644 index 00000000..8ee204d4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/camli/schema.py @@ -0,0 +1,337 @@ +#!/usr/bin/env python +# +# Camlistore uploader client for Python. +# +# Copyright 2011 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Schema blob library for Camlistore.""" + +__author__ = 'Brett Slatkin (bslatkin@gmail.com)' + +import datetime +import re +import simplejson + +__all__ = [ + 'Error', 'DecodeError', 'SchemaBlob', 'FileCommon', 'File', + 'Directory', 'Symlink', 'decode'] + + +class Error(Exception): + """Base class for exceptions in this module.""" + +class DecodeError(Error): + """Could not decode the supplied schema blob.""" + + +# Maps 'camliType' to SchemaBlob sub-classes. +_TYPE_TO_CLASS = {} + + +def _camel_to_python(name): + """Converts camelcase to Python case.""" + return re.sub(r'([a-z]+)([A-Z])', r'\1_\2', name).lower() + + +class _SchemaMeta(type): + """Meta-class for schema blobs.""" + + def __init__(cls, name, bases, dict): + required_fields = set() + optional_fields = set() + json_to_python = {} + python_to_json = {} + serializers = {} + + def map_name(field): + if field.islower(): + return field + python_name = _camel_to_python(field) + json_to_python[field] = python_name + python_to_json[python_name] = field + return python_name + + for klz in bases + (cls,): + if hasattr(klz, '_json_to_python'): + json_to_python.update(klz._json_to_python) + if hasattr(klz, '_python_to_json'): + python_to_json.update(klz._python_to_json) + + if hasattr(klz, 'required_fields'): + for field in klz.required_fields: + field = map_name(field) + assert field not in required_fields, (klz, field) + assert field not in optional_fields, (klz, field) + required_fields.add(field) + + if hasattr(klz, 'optional_fields'): + for field in klz.optional_fields: + field = map_name(field) + assert field not in required_fields, (klz, field) + assert field not in optional_fields, (klz, field) + optional_fields.add(field) + + if hasattr(klz, '_serializers'): + for field, value in klz._serializers.iteritems(): + field = map_name(field) + assert (field in required_fields or + field in optional_fields), (klz, field) + if not isinstance(value, _FieldSerializer): + serializers[field] = value(field) + else: + serializers[field] = value + + setattr(cls, 'required_fields', frozenset(required_fields)) + setattr(cls, 'optional_fields', frozenset(optional_fields)) + setattr(cls, '_serializers', serializers) + setattr(cls, '_json_to_python', json_to_python) + setattr(cls, '_python_to_json', python_to_json) + if hasattr(cls, 'type'): + _TYPE_TO_CLASS[cls.type] = cls + + +class SchemaBlob(object): + """Base-class for schema blobs. + + Each sub-class should have these fields: + type: Required value of 'camliType'. + required_fields: Set of required field names. + optional_fields: Set of optional field names. + _serializers: Dictionary mapping field names to the _FieldSerializer + sub-class to use for serializing/deserializing the field's value. + """ + + __metaclass__ = _SchemaMeta + + required_fields = frozenset([ + 'camliVersion', + 'camliType', + ]) + optional_fields = frozenset([ + 'camliSigner', + 'camliSig', + ]) + _serializers = {} + + def __init__(self, blobref): + """Initializer. + + Args: + blobref: The blobref of the schema blob. + """ + self.blobref = blobref + self.unexpected_fields = {} + + @property + def all_fields(self): + """Returns the set of all potential fields for this blob.""" + all_fields = set() + all_fields.update(self.required_fields) + all_fields.update(self.optional_fields) + all_fields.update(self.unexpected_fields) + return all_fields + + def decode(self, blob_bytes, parsed=None): + """Decodes a schema blob's bytes and unmarshals its fields. + + Args: + blob_bytes: String with the bytes of the blob. + parsed: If not None, an already parsed version of the blob bytes. When + set, the blob_bytes argument is ignored. + + Raises: + DecodeError if the blob_bytes are bad or the parsed blob is missing + required fields. + """ + for field in self.all_fields: + if hasattr(self, field): + delattr(self, field) + + if parsed is None: + try: + parsed = simplejson.loads(blob_bytes) + except simplejson.JSONDecodeError, e: + raise DecodeError('Could not parse JSON. %s: %s' % (e.__class__, e)) + + for json_name, value in parsed.iteritems(): + name = self._json_to_python.get(json_name, json_name) + if not (name in self.required_fields or name in self.optional_fields): + self.unexpected_fields[name] = value + continue + serializer = self._serializers.get(name) + if serializer: + value = serializer.from_json(value) + setattr(self, name, value) + + for name in self.required_fields: + if not hasattr(self, name): + raise DecodeError('Missing required field: %s' % name) + + def encode(self): + """Encodes a schema blob's bytes and marshals its fields. + + Returns: + A UTF-8-encoding plain string containing the encoded blob bytes. + """ + out = {} + for python_name in self.all_fields: + if not hasattr(self, python_name): + continue + value = getattr(self, python_name) + serializer = self._serializers.get(python_name) + if serializer: + value = serializer.to_json(value) + json_name = self._python_to_json.get(python_name, python_name) + out[json_name] = value + return simplejson.dumps(out) + +################################################################################ +# Serializers for converting JSON fields to/from Python values + +class _FieldSerializer(object): + """Serializes a named field's value to and from JSON.""" + + def __init__(self, name): + """Initializer. + + Args: + name: The name of the field. + """ + self.name = name + + def from_json(self, value): + """Converts the JSON format of the field to the Python type. + + Args: + value: The JSON value. + + Returns: + The Python value. + """ + raise NotImplemented('Must implement from_json') + + def to_json(self, value): + """Converts the Python field value to the JSON format of the field. + + Args: + value: The Python value. + + Returns: + The JSON formatted-value. + """ + raise NotImplemented('Must implement to_json') + + +class _DateTimeSerializer(_FieldSerializer): + """Formats ISO 8601 strings to/from datetime.datetime instances.""" + + def from_json(self, value): + if '.' in value: + iso, micros = value.split('.') + micros = int((micros[:-1] + ('0' * 6))[:6]) + else: + iso, micros = value[:-1], 0 + + when = datetime.datetime.strptime(iso, '%Y-%m-%dT%H:%M:%S') + return when + datetime.timedelta(microseconds=micros) + + def to_json(self, value): + return value.isoformat() + 'Z' + +################################################################################ +# Concrete Schema Blobs + +class FileCommon(SchemaBlob): + """Common base-class for all unix-y files.""" + + required_fields = frozenset([]) + optional_fields = frozenset([ + 'fileName', + 'fileNameBytes', + 'unixPermission', + 'unixOwnerId', + 'unixGroupId', + 'unixGroup', + 'unixXattrs', + 'unixMtime', + 'unixCtime', + 'unixAtime', + ]) + _serializers = { + 'unixMtime': _DateTimeSerializer, + 'unixCtime': _DateTimeSerializer, + 'unixAtime': _DateTimeSerializer, + } + + +class File(FileCommon): + """A file.""" + + type = 'file' + required_fields = frozenset([ + 'size', + 'contentParts', + ]) + optional_fields = frozenset([ + 'inodeRef', + ]) + _serializers = {} + + +class Directory(FileCommon): + """A directory.""" + + type = 'directory' + required_fields = frozenset([ + 'entries', + ]) + optional_fields = frozenset([]) + _serializers = {} + + +class Symlink(FileCommon): + """A symlink.""" + + type = 'symlink' + required_fields = frozenset([]) + optional_fields = frozenset([ + 'symlinkTarget', + 'symlinkTargetBytes', + ]) + _serializers = {} + + +################################################################################ +# Helper methods + +def decode(blobref, blob_bytes): + """Decode any schema blob, validating all required fields for its time.""" + try: + parsed = simplejson.loads(blob_bytes) + except simplejson.JSONDecodeError, e: + raise DecodeError('Could not parse JSON. %s: %s' % (e.__class__, e)) + + if 'camliType' not in parsed: + raise DecodeError('Could not find "camliType" field.') + + camli_type = parsed['camliType'] + blob_class = _TYPE_TO_CLASS.get(camli_type) + if blob_class is None: + raise DecodeError( + 'Could not find SchemaBlob sub-class for camliType=%r' % camli_type) + + schema_blob = blob_class(blobref) + schema_blob.decode(None, parsed=parsed) + return schema_blob diff --git a/vendor/github.com/camlistore/camlistore/lib/python/camli/schema_test.py b/vendor/github.com/camlistore/camlistore/lib/python/camli/schema_test.py new file mode 100755 index 00000000..d38e4ea4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/camli/schema_test.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# +# Camlistore uploader client for Python. +# +# Copyright 2011 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Schema blob library for Camlistore.""" + +__author__ = 'Brett Slatkin (bslatkin@gmail.com)' + +import datetime +import os +import sys +import unittest + +sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) + +import camli.schema +import simplejson + + +class SchemaTest(unittest.TestCase): + """End-to-end tests for Schema blobs.""" + + def testFile(self): + schema_blob = camli.schema.decode('asdf-myblobref', """{ + "camliVersion": 1, + "camliType": "file", + "size": 0, + "contentParts": [], + "unixMtime": "2010-07-10T17:14:51.5678Z", + "unixCtime": "2010-07-10T17:20:03Z" + }""") + self.assertTrue(isinstance(schema_blob, camli.schema.File)) + self.assertTrue(isinstance(schema_blob, camli.schema.FileCommon)) + self.assertTrue(isinstance(schema_blob, camli.schema.SchemaBlob)) + expected = { + 'unexpected_fields': {}, + 'unix_mtime': datetime.datetime(2010, 7, 10, 17, 14, 51, 567800), + 'content_parts': [], + 'blobref': 'asdf-myblobref', + 'unix_ctime': datetime.datetime(2010, 7, 10, 17, 20, 3), + 'camli_version': 1, + 'camli_type': u'file', + 'size': 0 + } + self.assertEquals(expected, schema_blob.__dict__) + result = schema_blob.encode() + result_parsed = simplejson.loads(result) + expected = { + 'camliType': 'file', + 'camliVersion': 1, + 'unixMtime': '2010-07-10T17:14:51.567800Z', + 'unixCtime': '2010-07-10T17:20:03Z', + 'contentParts': [], + 'size': 0, + } + self.assertEquals(expected, result_parsed) + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/__init__.py b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/__init__.py new file mode 100644 index 00000000..4f327c18 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/__init__.py @@ -0,0 +1,8 @@ +import sys +pyver = sys.version_info[0:2] +if pyver <= (2, 4): + from fuse24 import * +elif pyver >= (3, 0): + from fuse3 import * +else: + from fuse import * diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/context.py b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/context.py new file mode 100755 index 00000000..2609aa05 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/context.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python + +from errno import ENOENT +from stat import S_IFDIR, S_IFREG +from sys import argv, exit +from time import time + +from fuse import FUSE, FuseOSError, Operations, LoggingMixIn, fuse_get_context + + +class Context(LoggingMixIn, Operations): + """Example filesystem to demonstrate fuse_get_context()""" + + def getattr(self, path, fh=None): + uid, gid, pid = fuse_get_context() + if path == '/': + st = dict(st_mode=(S_IFDIR | 0755), st_nlink=2) + elif path == '/uid': + size = len('%s\n' % uid) + st = dict(st_mode=(S_IFREG | 0444), st_size=size) + elif path == '/gid': + size = len('%s\n' % gid) + st = dict(st_mode=(S_IFREG | 0444), st_size=size) + elif path == '/pid': + size = len('%s\n' % pid) + st = dict(st_mode=(S_IFREG | 0444), st_size=size) + else: + raise FuseOSError(ENOENT) + st['st_ctime'] = st['st_mtime'] = st['st_atime'] = time() + return st + + def read(self, path, size, offset, fh): + uid, gid, pid = fuse_get_context() + if path == '/uid': + return '%s\n' % uid + elif path == '/gid': + return '%s\n' % gid + elif path == '/pid': + return '%s\n' % pid + return '' + + def readdir(self, path, fh): + return ['.', '..', 'uid', 'gid', 'pid'] + + # Disable unused operations: + access = None + flush = None + getxattr = None + listxattr = None + open = None + opendir = None + release = None + releasedir = None + statfs = None + + +if __name__ == "__main__": + if len(argv) != 2: + print 'usage: %s ' % argv[0] + exit(1) + fuse = FUSE(Context(), argv[1], foreground=True) \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/fuse.py b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/fuse.py new file mode 100644 index 00000000..b68e737a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/fuse.py @@ -0,0 +1,650 @@ +# Copyright (c) 2008 Giorgos Verigakis +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from __future__ import division + +from ctypes import * +from ctypes.util import find_library +from errno import * +from functools import partial +from os import strerror +from platform import machine, system +from stat import S_IFDIR +from traceback import print_exc + + +class c_timespec(Structure): + _fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)] + +class c_utimbuf(Structure): + _fields_ = [('actime', c_timespec), ('modtime', c_timespec)] + +class c_stat(Structure): + pass # Platform dependent + +_system = system() +if _system in ('Darwin', 'FreeBSD'): + _libiconv = CDLL(find_library("iconv"), RTLD_GLOBAL) # libfuse dependency + ENOTSUP = 45 + c_dev_t = c_int32 + c_fsblkcnt_t = c_ulong + c_fsfilcnt_t = c_ulong + c_gid_t = c_uint32 + c_mode_t = c_uint16 + c_off_t = c_int64 + c_pid_t = c_int32 + c_uid_t = c_uint32 + setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), + c_size_t, c_int, c_uint32) + getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), + c_size_t, c_uint32) + c_stat._fields_ = [ + ('st_dev', c_dev_t), + ('st_ino', c_uint32), + ('st_mode', c_mode_t), + ('st_nlink', c_uint16), + ('st_uid', c_uid_t), + ('st_gid', c_gid_t), + ('st_rdev', c_dev_t), + ('st_atimespec', c_timespec), + ('st_mtimespec', c_timespec), + ('st_ctimespec', c_timespec), + ('st_size', c_off_t), + ('st_blocks', c_int64), + ('st_blksize', c_int32)] +elif _system == 'Linux': + ENOTSUP = 95 + c_dev_t = c_ulonglong + c_fsblkcnt_t = c_ulonglong + c_fsfilcnt_t = c_ulonglong + c_gid_t = c_uint + c_mode_t = c_uint + c_off_t = c_longlong + c_pid_t = c_int + c_uid_t = c_uint + setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int) + getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t) + + _machine = machine() + if _machine == 'x86_64': + c_stat._fields_ = [ + ('st_dev', c_dev_t), + ('st_ino', c_ulong), + ('st_nlink', c_ulong), + ('st_mode', c_mode_t), + ('st_uid', c_uid_t), + ('st_gid', c_gid_t), + ('__pad0', c_int), + ('st_rdev', c_dev_t), + ('st_size', c_off_t), + ('st_blksize', c_long), + ('st_blocks', c_long), + ('st_atimespec', c_timespec), + ('st_mtimespec', c_timespec), + ('st_ctimespec', c_timespec)] + elif _machine == 'ppc': + c_stat._fields_ = [ + ('st_dev', c_dev_t), + ('st_ino', c_ulonglong), + ('st_mode', c_mode_t), + ('st_nlink', c_uint), + ('st_uid', c_uid_t), + ('st_gid', c_gid_t), + ('st_rdev', c_dev_t), + ('__pad2', c_ushort), + ('st_size', c_off_t), + ('st_blksize', c_long), + ('st_blocks', c_longlong), + ('st_atimespec', c_timespec), + ('st_mtimespec', c_timespec), + ('st_ctimespec', c_timespec)] + else: + # i686, use as fallback for everything else + c_stat._fields_ = [ + ('st_dev', c_dev_t), + ('__pad1', c_ushort), + ('__st_ino', c_ulong), + ('st_mode', c_mode_t), + ('st_nlink', c_uint), + ('st_uid', c_uid_t), + ('st_gid', c_gid_t), + ('st_rdev', c_dev_t), + ('__pad2', c_ushort), + ('st_size', c_off_t), + ('st_blksize', c_long), + ('st_blocks', c_longlong), + ('st_atimespec', c_timespec), + ('st_mtimespec', c_timespec), + ('st_ctimespec', c_timespec), + ('st_ino', c_ulonglong)] +else: + raise NotImplementedError('%s is not supported.' % _system) + + +class c_statvfs(Structure): + _fields_ = [ + ('f_bsize', c_ulong), + ('f_frsize', c_ulong), + ('f_blocks', c_fsblkcnt_t), + ('f_bfree', c_fsblkcnt_t), + ('f_bavail', c_fsblkcnt_t), + ('f_files', c_fsfilcnt_t), + ('f_ffree', c_fsfilcnt_t), + ('f_favail', c_fsfilcnt_t)] + +if _system == 'FreeBSD': + c_fsblkcnt_t = c_uint64 + c_fsfilcnt_t = c_uint64 + setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int) + getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t) + class c_statvfs(Structure): + _fields_ = [ + ('f_bavail', c_fsblkcnt_t), + ('f_bfree', c_fsblkcnt_t), + ('f_blocks', c_fsblkcnt_t), + ('f_favail', c_fsfilcnt_t), + ('f_ffree', c_fsfilcnt_t), + ('f_files', c_fsfilcnt_t), + ('f_bsize', c_ulong), + ('f_flag', c_ulong), + ('f_frsize', c_ulong)] + +class fuse_file_info(Structure): + _fields_ = [ + ('flags', c_int), + ('fh_old', c_ulong), + ('writepage', c_int), + ('direct_io', c_uint, 1), + ('keep_cache', c_uint, 1), + ('flush', c_uint, 1), + ('padding', c_uint, 29), + ('fh', c_uint64), + ('lock_owner', c_uint64)] + +class fuse_context(Structure): + _fields_ = [ + ('fuse', c_voidp), + ('uid', c_uid_t), + ('gid', c_gid_t), + ('pid', c_pid_t), + ('private_data', c_voidp)] + +class fuse_operations(Structure): + _fields_ = [ + ('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))), + ('readlink', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)), + ('getdir', c_voidp), # Deprecated, use readdir + ('mknod', CFUNCTYPE(c_int, c_char_p, c_mode_t, c_dev_t)), + ('mkdir', CFUNCTYPE(c_int, c_char_p, c_mode_t)), + ('unlink', CFUNCTYPE(c_int, c_char_p)), + ('rmdir', CFUNCTYPE(c_int, c_char_p)), + ('symlink', CFUNCTYPE(c_int, c_char_p, c_char_p)), + ('rename', CFUNCTYPE(c_int, c_char_p, c_char_p)), + ('link', CFUNCTYPE(c_int, c_char_p, c_char_p)), + ('chmod', CFUNCTYPE(c_int, c_char_p, c_mode_t)), + ('chown', CFUNCTYPE(c_int, c_char_p, c_uid_t, c_gid_t)), + ('truncate', CFUNCTYPE(c_int, c_char_p, c_off_t)), + ('utime', c_voidp), # Deprecated, use utimens + ('open', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), + ('read', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t, + POINTER(fuse_file_info))), + ('write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t, + POINTER(fuse_file_info))), + ('statfs', CFUNCTYPE(c_int, c_char_p, POINTER(c_statvfs))), + ('flush', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), + ('release', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), + ('fsync', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))), + ('setxattr', setxattr_t), + ('getxattr', getxattr_t), + ('listxattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)), + ('removexattr', CFUNCTYPE(c_int, c_char_p, c_char_p)), + ('opendir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), + ('readdir', CFUNCTYPE(c_int, c_char_p, c_voidp, CFUNCTYPE(c_int, c_voidp, + c_char_p, POINTER(c_stat), c_off_t), c_off_t, POINTER(fuse_file_info))), + ('releasedir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), + ('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))), + ('init', CFUNCTYPE(c_voidp, c_voidp)), + ('destroy', CFUNCTYPE(c_voidp, c_voidp)), + ('access', CFUNCTYPE(c_int, c_char_p, c_int)), + ('create', CFUNCTYPE(c_int, c_char_p, c_mode_t, POINTER(fuse_file_info))), + ('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t, POINTER(fuse_file_info))), + ('fgetattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat), + POINTER(fuse_file_info))), + ('lock', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info), c_int, c_voidp)), + ('utimens', CFUNCTYPE(c_int, c_char_p, POINTER(c_utimbuf))), + ('bmap', CFUNCTYPE(c_int, c_char_p, c_size_t, POINTER(c_ulonglong)))] + + +def time_of_timespec(ts): + return ts.tv_sec + ts.tv_nsec / 10 ** 9 + +def set_st_attrs(st, attrs): + for key, val in attrs.items(): + if key in ('st_atime', 'st_mtime', 'st_ctime'): + timespec = getattr(st, key + 'spec') + timespec.tv_sec = int(val) + timespec.tv_nsec = int((val - timespec.tv_sec) * 10 ** 9) + elif hasattr(st, key): + setattr(st, key, val) + + +_libfuse_path = find_library('fuse') +if not _libfuse_path: + raise EnvironmentError('Unable to find libfuse') +_libfuse = CDLL(_libfuse_path) +_libfuse.fuse_get_context.restype = POINTER(fuse_context) + + +def fuse_get_context(): + """Returns a (uid, gid, pid) tuple""" + ctxp = _libfuse.fuse_get_context() + ctx = ctxp.contents + return ctx.uid, ctx.gid, ctx.pid + + +class FuseOSError(OSError): + def __init__(self, errno): + super(FuseOSError, self).__init__(errno, strerror(errno)) + + +class FUSE(object): + """This class is the lower level interface and should not be subclassed + under normal use. Its methods are called by fuse. + Assumes API version 2.6 or later.""" + + def __init__(self, operations, mountpoint, raw_fi=False, **kwargs): + """Setting raw_fi to True will cause FUSE to pass the fuse_file_info + class as is to Operations, instead of just the fh field. + This gives you access to direct_io, keep_cache, etc.""" + + self.operations = operations + self.raw_fi = raw_fi + args = ['fuse'] + if kwargs.pop('foreground', False): + args.append('-f') + if kwargs.pop('debug', False): + args.append('-d') + if kwargs.pop('nothreads', False): + args.append('-s') + kwargs.setdefault('fsname', operations.__class__.__name__) + args.append('-o') + args.append(','.join(key if val == True else '%s=%s' % (key, val) + for key, val in kwargs.items())) + args.append(mountpoint) + argv = (c_char_p * len(args))(*args) + + fuse_ops = fuse_operations() + for name, prototype in fuse_operations._fields_: + if prototype != c_voidp and getattr(operations, name, None): + op = partial(self._wrapper_, getattr(self, name)) + setattr(fuse_ops, name, prototype(op)) + err = _libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops), + sizeof(fuse_ops), None) + del self.operations # Invoke the destructor + if err: + raise RuntimeError(err) + + def _wrapper_(self, func, *args, **kwargs): + """Decorator for the methods that follow""" + try: + return func(*args, **kwargs) or 0 + except OSError, e: + return -(e.errno or EFAULT) + except: + print_exc() + return -EFAULT + + def getattr(self, path, buf): + return self.fgetattr(path, buf, None) + + def readlink(self, path, buf, bufsize): + ret = self.operations('readlink', path) + data = create_string_buffer(ret[:bufsize - 1]) + memmove(buf, data, len(data)) + return 0 + + def mknod(self, path, mode, dev): + return self.operations('mknod', path, mode, dev) + + def mkdir(self, path, mode): + return self.operations('mkdir', path, mode) + + def unlink(self, path): + return self.operations('unlink', path) + + def rmdir(self, path): + return self.operations('rmdir', path) + + def symlink(self, source, target): + return self.operations('symlink', target, source) + + def rename(self, old, new): + return self.operations('rename', old, new) + + def link(self, source, target): + return self.operations('link', target, source) + + def chmod(self, path, mode): + return self.operations('chmod', path, mode) + + def chown(self, path, uid, gid): + # Check if any of the arguments is a -1 that has overflowed + if c_uid_t(uid + 1).value == 0: + uid = -1 + if c_gid_t(gid + 1).value == 0: + gid = -1 + return self.operations('chown', path, uid, gid) + + def truncate(self, path, length): + return self.operations('truncate', path, length) + + def open(self, path, fip): + fi = fip.contents + if self.raw_fi: + return self.operations('open', path, fi) + else: + fi.fh = self.operations('open', path, fi.flags) + return 0 + + def read(self, path, buf, size, offset, fip): + fh = fip.contents if self.raw_fi else fip.contents.fh + ret = self.operations('read', path, size, offset, fh) + if not ret: + return 0 + data = create_string_buffer(ret[:size], size) + memmove(buf, data, size) + return size + + def write(self, path, buf, size, offset, fip): + data = string_at(buf, size) + fh = fip.contents if self.raw_fi else fip.contents.fh + return self.operations('write', path, data, offset, fh) + + def statfs(self, path, buf): + stv = buf.contents + attrs = self.operations('statfs', path) + for key, val in attrs.items(): + if hasattr(stv, key): + setattr(stv, key, val) + return 0 + + def flush(self, path, fip): + fh = fip.contents if self.raw_fi else fip.contents.fh + return self.operations('flush', path, fh) + + def release(self, path, fip): + fh = fip.contents if self.raw_fi else fip.contents.fh + return self.operations('release', path, fh) + + def fsync(self, path, datasync, fip): + fh = fip.contents if self.raw_fi else fip.contents.fh + return self.operations('fsync', path, datasync, fh) + + def setxattr(self, path, name, value, size, options, *args): + data = string_at(value, size) + return self.operations('setxattr', path, name, data, options, *args) + + def getxattr(self, path, name, value, size, *args): + ret = self.operations('getxattr', path, name, *args) + retsize = len(ret) + buf = create_string_buffer(ret, retsize) # Does not add trailing 0 + if bool(value): + if retsize > size: + return -ERANGE + memmove(value, buf, retsize) + return retsize + + def listxattr(self, path, namebuf, size): + ret = self.operations('listxattr', path) + buf = create_string_buffer('\x00'.join(ret)) if ret else '' + bufsize = len(buf) + if bool(namebuf): + if bufsize > size: + return -ERANGE + memmove(namebuf, buf, bufsize) + return bufsize + + def removexattr(self, path, name): + return self.operations('removexattr', path, name) + + def opendir(self, path, fip): + # Ignore raw_fi + fip.contents.fh = self.operations('opendir', path) + return 0 + + def readdir(self, path, buf, filler, offset, fip): + # Ignore raw_fi + for item in self.operations('readdir', path, fip.contents.fh): + if isinstance(item, str): + name, st, offset = item, None, 0 + else: + name, attrs, offset = item + if attrs: + st = c_stat() + set_st_attrs(st, attrs) + else: + st = None + if filler(buf, name, st, offset) != 0: + break + return 0 + + def releasedir(self, path, fip): + # Ignore raw_fi + return self.operations('releasedir', path, fip.contents.fh) + + def fsyncdir(self, path, datasync, fip): + # Ignore raw_fi + return self.operations('fsyncdir', path, datasync, fip.contents.fh) + + def init(self, conn): + return self.operations('init', '/') + + def destroy(self, private_data): + return self.operations('destroy', '/') + + def access(self, path, amode): + return self.operations('access', path, amode) + + def create(self, path, mode, fip): + fi = fip.contents + if self.raw_fi: + return self.operations('create', path, mode, fi) + else: + fi.fh = self.operations('create', path, mode) + return 0 + + def ftruncate(self, path, length, fip): + fh = fip.contents if self.raw_fi else fip.contents.fh + return self.operations('truncate', path, length, fh) + + def fgetattr(self, path, buf, fip): + memset(buf, 0, sizeof(c_stat)) + st = buf.contents + fh = fip and (fip.contents if self.raw_fi else fip.contents.fh) + attrs = self.operations('getattr', path, fh) + set_st_attrs(st, attrs) + return 0 + + def lock(self, path, fip, cmd, lock): + fh = fip.contents if self.raw_fi else fip.contents.fh + return self.operations('lock', path, fh, cmd, lock) + + def utimens(self, path, buf): + if buf: + atime = time_of_timespec(buf.contents.actime) + mtime = time_of_timespec(buf.contents.modtime) + times = (atime, mtime) + else: + times = None + return self.operations('utimens', path, times) + + def bmap(self, path, blocksize, idx): + return self.operations('bmap', path, blocksize, idx) + + +class Operations(object): + """This class should be subclassed and passed as an argument to FUSE on + initialization. All operations should raise a FuseOSError exception + on error. + + When in doubt of what an operation should do, check the FUSE header + file or the corresponding system call man page.""" + + def __call__(self, op, *args): + if not hasattr(self, op): + raise FuseOSError(EFAULT) + return getattr(self, op)(*args) + + def access(self, path, amode): + return 0 + + bmap = None + + def chmod(self, path, mode): + raise FuseOSError(EROFS) + + def chown(self, path, uid, gid): + raise FuseOSError(EROFS) + + def create(self, path, mode, fi=None): + """When raw_fi is False (default case), fi is None and create should + return a numerical file handle. + When raw_fi is True the file handle should be set directly by create + and return 0.""" + raise FuseOSError(EROFS) + + def destroy(self, path): + """Called on filesystem destruction. Path is always /""" + pass + + def flush(self, path, fh): + return 0 + + def fsync(self, path, datasync, fh): + return 0 + + def fsyncdir(self, path, datasync, fh): + return 0 + + def getattr(self, path, fh=None): + """Returns a dictionary with keys identical to the stat C structure + of stat(2). + st_atime, st_mtime and st_ctime should be floats. + NOTE: There is an incombatibility between Linux and Mac OS X concerning + st_nlink of directories. Mac OS X counts all files inside the directory, + while Linux counts only the subdirectories.""" + + if path != '/': + raise FuseOSError(ENOENT) + return dict(st_mode=(S_IFDIR | 0755), st_nlink=2) + + def getxattr(self, path, name, position=0): + raise FuseOSError(ENOTSUP) + + def init(self, path): + """Called on filesystem initialization. Path is always / + Use it instead of __init__ if you start threads on initialization.""" + pass + + def link(self, target, source): + raise FuseOSError(EROFS) + + def listxattr(self, path): + return [] + + lock = None + + def mkdir(self, path, mode): + raise FuseOSError(EROFS) + + def mknod(self, path, mode, dev): + raise FuseOSError(EROFS) + + def open(self, path, flags): + """When raw_fi is False (default case), open should return a numerical + file handle. + When raw_fi is True the signature of open becomes: + open(self, path, fi) + and the file handle should be set directly.""" + return 0 + + def opendir(self, path): + """Returns a numerical file handle.""" + return 0 + + def read(self, path, size, offset, fh): + """Returns a string containing the data requested.""" + raise FuseOSError(EIO) + + def readdir(self, path, fh): + """Can return either a list of names, or a list of (name, attrs, offset) + tuples. attrs is a dict as in getattr.""" + return ['.', '..'] + + def readlink(self, path): + raise FuseOSError(ENOENT) + + def release(self, path, fh): + return 0 + + def releasedir(self, path, fh): + return 0 + + def removexattr(self, path, name): + raise FuseOSError(ENOTSUP) + + def rename(self, old, new): + raise FuseOSError(EROFS) + + def rmdir(self, path): + raise FuseOSError(EROFS) + + def setxattr(self, path, name, value, options, position=0): + raise FuseOSError(ENOTSUP) + + def statfs(self, path): + """Returns a dictionary with keys identical to the statvfs C structure + of statvfs(3). + On Mac OS X f_bsize and f_frsize must be a power of 2 (minimum 512).""" + return {} + + def symlink(self, target, source): + raise FuseOSError(EROFS) + + def truncate(self, path, length, fh=None): + raise FuseOSError(EROFS) + + def unlink(self, path): + raise FuseOSError(EROFS) + + def utimens(self, path, times=None): + """Times is a (atime, mtime) tuple. If None use current time.""" + return 0 + + def write(self, path, data, offset, fh): + raise FuseOSError(EROFS) + + +class LoggingMixIn: + def __call__(self, op, path, *args): + print '->', op, path, repr(args) + ret = '[Unhandled Exception]' + try: + ret = getattr(self, op)(path, *args) + return ret + except OSError, e: + ret = str(e) + raise + finally: + print '<-', op, repr(ret) diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/fuse24.py b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/fuse24.py new file mode 100644 index 00000000..8c20679a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/fuse24.py @@ -0,0 +1,669 @@ +# Copyright (c) 2008 Giorgos Verigakis +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from __future__ import division + +from ctypes import * +from ctypes.util import find_library +from errno import * +from platform import machine, system +from stat import S_IFDIR +from traceback import print_exc + + +class c_timespec(Structure): + _fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)] + +class c_utimbuf(Structure): + _fields_ = [('actime', c_timespec), ('modtime', c_timespec)] + +class c_stat(Structure): + pass # Platform dependent + +_system = system() +if _system in ('Darwin', 'FreeBSD'): + _libiconv = CDLL(find_library("iconv"), RTLD_GLOBAL) # libfuse dependency + ENOTSUP = 45 + c_dev_t = c_int32 + c_fsblkcnt_t = c_ulong + c_fsfilcnt_t = c_ulong + c_gid_t = c_uint32 + c_mode_t = c_uint16 + c_off_t = c_int64 + c_pid_t = c_int32 + c_uid_t = c_uint32 + setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), + c_size_t, c_int, c_uint32) + getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), + c_size_t, c_uint32) + c_stat._fields_ = [ + ('st_dev', c_dev_t), + ('st_ino', c_uint32), + ('st_mode', c_mode_t), + ('st_nlink', c_uint16), + ('st_uid', c_uid_t), + ('st_gid', c_gid_t), + ('st_rdev', c_dev_t), + ('st_atimespec', c_timespec), + ('st_mtimespec', c_timespec), + ('st_ctimespec', c_timespec), + ('st_size', c_off_t), + ('st_blocks', c_int64), + ('st_blksize', c_int32)] +elif _system == 'Linux': + ENOTSUP = 95 + c_dev_t = c_ulonglong + c_fsblkcnt_t = c_ulonglong + c_fsfilcnt_t = c_ulonglong + c_gid_t = c_uint + c_mode_t = c_uint + c_off_t = c_longlong + c_pid_t = c_int + c_uid_t = c_uint + setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int) + getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t) + + _machine = machine() + if _machine == 'x86_64': + c_stat._fields_ = [ + ('st_dev', c_dev_t), + ('st_ino', c_ulong), + ('st_nlink', c_ulong), + ('st_mode', c_mode_t), + ('st_uid', c_uid_t), + ('st_gid', c_gid_t), + ('__pad0', c_int), + ('st_rdev', c_dev_t), + ('st_size', c_off_t), + ('st_blksize', c_long), + ('st_blocks', c_long), + ('st_atimespec', c_timespec), + ('st_mtimespec', c_timespec), + ('st_ctimespec', c_timespec)] + elif _machine == 'ppc': + c_stat._fields_ = [ + ('st_dev', c_dev_t), + ('st_ino', c_ulonglong), + ('st_mode', c_mode_t), + ('st_nlink', c_uint), + ('st_uid', c_uid_t), + ('st_gid', c_gid_t), + ('st_rdev', c_dev_t), + ('__pad2', c_ushort), + ('st_size', c_off_t), + ('st_blksize', c_long), + ('st_blocks', c_longlong), + ('st_atimespec', c_timespec), + ('st_mtimespec', c_timespec), + ('st_ctimespec', c_timespec)] + else: + # i686, use as fallback for everything else + c_stat._fields_ = [ + ('st_dev', c_dev_t), + ('__pad1', c_ushort), + ('__st_ino', c_ulong), + ('st_mode', c_mode_t), + ('st_nlink', c_uint), + ('st_uid', c_uid_t), + ('st_gid', c_gid_t), + ('st_rdev', c_dev_t), + ('__pad2', c_ushort), + ('st_size', c_off_t), + ('st_blksize', c_long), + ('st_blocks', c_longlong), + ('st_atimespec', c_timespec), + ('st_mtimespec', c_timespec), + ('st_ctimespec', c_timespec), + ('st_ino', c_ulonglong)] +else: + raise NotImplementedError('%s is not supported.' % _system) + + +class c_statvfs(Structure): + _fields_ = [ + ('f_bsize', c_ulong), + ('f_frsize', c_ulong), + ('f_blocks', c_fsblkcnt_t), + ('f_bfree', c_fsblkcnt_t), + ('f_bavail', c_fsblkcnt_t), + ('f_files', c_fsfilcnt_t), + ('f_ffree', c_fsfilcnt_t), + ('f_favail', c_fsfilcnt_t)] + +if _system == 'FreeBSD': + c_fsblkcnt_t = c_uint64 + c_fsfilcnt_t = c_uint64 + setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int) + getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t) + class c_statvfs(Structure): + _fields_ = [ + ('f_bavail', c_fsblkcnt_t), + ('f_bfree', c_fsblkcnt_t), + ('f_blocks', c_fsblkcnt_t), + ('f_favail', c_fsfilcnt_t), + ('f_ffree', c_fsfilcnt_t), + ('f_files', c_fsfilcnt_t), + ('f_bsize', c_ulong), + ('f_flag', c_ulong), + ('f_frsize', c_ulong)] + +class fuse_file_info(Structure): + _fields_ = [ + ('flags', c_int), + ('fh_old', c_ulong), + ('writepage', c_int), + ('direct_io', c_uint, 1), + ('keep_cache', c_uint, 1), + ('flush', c_uint, 1), + ('padding', c_uint, 29), + ('fh', c_uint64), + ('lock_owner', c_uint64)] + +class fuse_context(Structure): + _fields_ = [ + ('fuse', c_voidp), + ('uid', c_uid_t), + ('gid', c_gid_t), + ('pid', c_pid_t), + ('private_data', c_voidp)] + +class fuse_operations(Structure): + _fields_ = [ + ('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))), + ('readlink', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)), + ('getdir', c_voidp), # Deprecated, use readdir + ('mknod', CFUNCTYPE(c_int, c_char_p, c_mode_t, c_dev_t)), + ('mkdir', CFUNCTYPE(c_int, c_char_p, c_mode_t)), + ('unlink', CFUNCTYPE(c_int, c_char_p)), + ('rmdir', CFUNCTYPE(c_int, c_char_p)), + ('symlink', CFUNCTYPE(c_int, c_char_p, c_char_p)), + ('rename', CFUNCTYPE(c_int, c_char_p, c_char_p)), + ('link', CFUNCTYPE(c_int, c_char_p, c_char_p)), + ('chmod', CFUNCTYPE(c_int, c_char_p, c_mode_t)), + ('chown', CFUNCTYPE(c_int, c_char_p, c_uid_t, c_gid_t)), + ('truncate', CFUNCTYPE(c_int, c_char_p, c_off_t)), + ('utime', c_voidp), # Deprecated, use utimens + ('open', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), + ('read', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t, + POINTER(fuse_file_info))), + ('write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t, + POINTER(fuse_file_info))), + ('statfs', CFUNCTYPE(c_int, c_char_p, POINTER(c_statvfs))), + ('flush', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), + ('release', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), + ('fsync', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))), + ('setxattr', setxattr_t), + ('getxattr', getxattr_t), + ('listxattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)), + ('removexattr', CFUNCTYPE(c_int, c_char_p, c_char_p)), + ('opendir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), + ('readdir', CFUNCTYPE(c_int, c_char_p, c_voidp, CFUNCTYPE(c_int, c_voidp, + c_char_p, POINTER(c_stat), c_off_t), c_off_t, POINTER(fuse_file_info))), + ('releasedir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), + ('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))), + ('init', CFUNCTYPE(c_voidp, c_voidp)), + ('destroy', CFUNCTYPE(c_voidp, c_voidp)), + ('access', CFUNCTYPE(c_int, c_char_p, c_int)), + ('create', CFUNCTYPE(c_int, c_char_p, c_mode_t, POINTER(fuse_file_info))), + ('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t, POINTER(fuse_file_info))), + ('fgetattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat), + POINTER(fuse_file_info))), + ('lock', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info), c_int, c_voidp)), + ('utimens', CFUNCTYPE(c_int, c_char_p, POINTER(c_utimbuf))), + ('bmap', CFUNCTYPE(c_int, c_char_p, c_size_t, POINTER(c_ulonglong)))] + + +def time_of_timespec(ts): + return ts.tv_sec + ts.tv_nsec / 10 ** 9 + +def set_st_attrs(st, attrs): + for key, val in attrs.items(): + if key in ('st_atime', 'st_mtime', 'st_ctime'): + timespec = getattr(st, key + 'spec') + timespec.tv_sec = int(val) + timespec.tv_nsec = int((val - timespec.tv_sec) * 10 ** 9) + elif hasattr(st, key): + setattr(st, key, val) + + +_libfuse_path = find_library('fuse') +if not _libfuse_path: + raise EnvironmentError('Unable to find libfuse') +_libfuse = CDLL(_libfuse_path) +_libfuse.fuse_get_context.restype = POINTER(fuse_context) + + +def fuse_get_context(): + """Returns a (uid, gid, pid) tuple""" + ctxp = _libfuse.fuse_get_context() + ctx = ctxp.contents + return ctx.uid, ctx.gid, ctx.pid + + +class FUSE(object): + """This class is the lower level interface and should not be subclassed + under normal use. Its methods are called by fuse. + Assumes API version 2.6 or later.""" + + def __init__(self, operations, mountpoint, raw_fi=False, **kwargs): + """Setting raw_fi to True will cause FUSE to pass the fuse_file_info + class as is to Operations, instead of just the fh field. + This gives you access to direct_io, keep_cache, etc.""" + + self.operations = operations + self.raw_fi = raw_fi + args = ['fuse'] + if kwargs.pop('foreground', False): + args.append('-f') + if kwargs.pop('debug', False): + args.append('-d') + if kwargs.pop('nothreads', False): + args.append('-s') + kwargs.setdefault('fsname', operations.__class__.__name__) + args.append('-o') + args.append(','.join(val is True and key or '%s=%s' % (key, val) + for key, val in kwargs.items())) + args.append(mountpoint) + argv = (c_char_p * len(args))(*args) + + fuse_ops = fuse_operations() + for name, prototype in fuse_operations._fields_: + if prototype != c_voidp and getattr(operations, name, None): + op = self._create_wrapper_(getattr(self, name)) + setattr(fuse_ops, name, prototype(op)) + _libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops), + sizeof(fuse_ops), None) + del self.operations # Invoke the destructor + + @staticmethod + def _create_wrapper_(func): + def _wrapper_(*args, **kwargs): + """Decorator for the methods that follow""" + try: + return func(*args, **kwargs) or 0 + except OSError, e: + return -(e.errno or EFAULT) + except: + print_exc() + return -EFAULT + return _wrapper_ + + def getattr(self, path, buf): + return self.fgetattr(path, buf, None) + + def readlink(self, path, buf, bufsize): + ret = self.operations('readlink', path) + data = create_string_buffer(ret[:bufsize - 1]) + memmove(buf, data, len(data)) + return 0 + + def mknod(self, path, mode, dev): + return self.operations('mknod', path, mode, dev) + + def mkdir(self, path, mode): + return self.operations('mkdir', path, mode) + + def unlink(self, path): + return self.operations('unlink', path) + + def rmdir(self, path): + return self.operations('rmdir', path) + + def symlink(self, source, target): + return self.operations('symlink', target, source) + + def rename(self, old, new): + return self.operations('rename', old, new) + + def link(self, source, target): + return self.operations('link', target, source) + + def chmod(self, path, mode): + return self.operations('chmod', path, mode) + + def chown(self, path, uid, gid): + return self.operations('chown', path, uid, gid) + + def truncate(self, path, length): + return self.operations('truncate', path, length) + + def open(self, path, fip): + fi = fip.contents + if self.raw_fi: + return self.operations('open', path, fi) + else: + fi.fh = self.operations('open', path, fi.flags) + return 0 + + def read(self, path, buf, size, offset, fip): + if self.raw_fi: + fh = fip.contents + else: + fh = fip.contents.fh + ret = self.operations('read', path, size, offset, fh) + if not ret: + return 0 + data = create_string_buffer(ret[:size], size) + memmove(buf, data, size) + return size + + def write(self, path, buf, size, offset, fip): + data = string_at(buf, size) + if self.raw_fi: + fh = fip.contents + else: + fh = fip.contents.fh + return self.operations('write', path, data, offset, fh) + + def statfs(self, path, buf): + stv = buf.contents + attrs = self.operations('statfs', path) + for key, val in attrs.items(): + if hasattr(stv, key): + setattr(stv, key, val) + return 0 + + def flush(self, path, fip): + if self.raw_fi: + fh = fip.contents + else: + fh = fip.contents.fh + return self.operations('flush', path, fh) + + def release(self, path, fip): + if self.raw_fi: + fh = fip.contents + else: + fh = fip.contents.fh + return self.operations('release', path, fh) + + def fsync(self, path, datasync, fip): + if self.raw_fi: + fh = fip.contents + else: + fh = fip.contents.fh + return self.operations('fsync', path, datasync, fh) + + def setxattr(self, path, name, value, size, options, *args): + data = string_at(value, size) + return self.operations('setxattr', path, name, data, options, *args) + + def getxattr(self, path, name, value, size, *args): + ret = self.operations('getxattr', path, name, *args) + retsize = len(ret) + buf = create_string_buffer(ret, retsize) # Does not add trailing 0 + if bool(value): + if retsize > size: + return -ERANGE + memmove(value, buf, retsize) + return retsize + + def listxattr(self, path, namebuf, size): + ret = self.operations('listxattr', path) + if ret: + buf = create_string_buffer('\x00'.join(ret)) + else: + buf = '' + bufsize = len(buf) + if bool(namebuf): + if bufsize > size: + return -ERANGE + memmove(namebuf, buf, bufsize) + return bufsize + + def removexattr(self, path, name): + return self.operations('removexattr', path, name) + + def opendir(self, path, fip): + # Ignore raw_fi + fip.contents.fh = self.operations('opendir', path) + return 0 + + def readdir(self, path, buf, filler, offset, fip): + # Ignore raw_fi + for item in self.operations('readdir', path, fip.contents.fh): + if isinstance(item, str): + name, st, offset = item, None, 0 + else: + name, attrs, offset = item + if attrs: + st = c_stat() + set_st_attrs(st, attrs) + else: + st = None + if filler(buf, name, st, offset) != 0: + break + return 0 + + def releasedir(self, path, fip): + # Ignore raw_fi + return self.operations('releasedir', path, fip.contents.fh) + + def fsyncdir(self, path, datasync, fip): + # Ignore raw_fi + return self.operations('fsyncdir', path, datasync, fip.contents.fh) + + def init(self, conn): + return self.operations('init', '/') + + def destroy(self, private_data): + return self.operations('destroy', '/') + + def access(self, path, amode): + return self.operations('access', path, amode) + + def create(self, path, mode, fip): + fi = fip.contents + if self.raw_fi: + return self.operations('create', path, mode, fi) + else: + fi.fh = self.operations('create', path, mode) + return 0 + + def ftruncate(self, path, length, fip): + if self.raw_fi: + fh = fip.contents + else: + fh = fip.contents.fh + return self.operations('truncate', path, length, fh) + + def fgetattr(self, path, buf, fip): + memset(buf, 0, sizeof(c_stat)) + st = buf.contents + if not fip: + fh = fip + elif self.raw_fi: + fh = fip.contents + else: + fh = fip.contents.fh + attrs = self.operations('getattr', path, fh) + set_st_attrs(st, attrs) + return 0 + + def lock(self, path, fip, cmd, lock): + if self.raw_fi: + fh = fip.contents + else: + fh = fip.contents.fh + return self.operations('lock', path, fh, cmd, lock) + + def utimens(self, path, buf): + if buf: + atime = time_of_timespec(buf.contents.actime) + mtime = time_of_timespec(buf.contents.modtime) + times = (atime, mtime) + else: + times = None + return self.operations('utimens', path, times) + + def bmap(self, path, blocksize, idx): + return self.operations('bmap', path, blocksize, idx) + + +class Operations(object): + """This class should be subclassed and passed as an argument to FUSE on + initialization. All operations should raise an OSError exception on + error. + + When in doubt of what an operation should do, check the FUSE header + file or the corresponding system call man page.""" + + def __call__(self, op, *args): + if not hasattr(self, op): + raise OSError(EFAULT, '') + return getattr(self, op)(*args) + + def access(self, path, amode): + return 0 + + bmap = None + + def chmod(self, path, mode): + raise OSError(EROFS, '') + + def chown(self, path, uid, gid): + raise OSError(EROFS, '') + + def create(self, path, mode, fi=None): + """When raw_fi is False (default case), fi is None and create should + return a numerical file handle. + When raw_fi is True the file handle should be set directly by create + and return 0.""" + raise OSError(EROFS, '') + + def destroy(self, path): + """Called on filesystem destruction. Path is always /""" + pass + + def flush(self, path, fh): + return 0 + + def fsync(self, path, datasync, fh): + return 0 + + def fsyncdir(self, path, datasync, fh): + return 0 + + def getattr(self, path, fh=None): + """Returns a dictionary with keys identical to the stat C structure + of stat(2). + st_atime, st_mtime and st_ctime should be floats. + NOTE: There is an incombatibility between Linux and Mac OS X concerning + st_nlink of directories. Mac OS X counts all files inside the directory, + while Linux counts only the subdirectories.""" + + if path != '/': + raise OSError(ENOENT, '') + return dict(st_mode=(S_IFDIR | 0755), st_nlink=2) + + def getxattr(self, path, name, position=0): + raise OSError(ENOTSUP, '') + + def init(self, path): + """Called on filesystem initialization. Path is always / + Use it instead of __init__ if you start threads on initialization.""" + pass + + def link(self, target, source): + raise OSError(EROFS, '') + + def listxattr(self, path): + return [] + + lock = None + + def mkdir(self, path, mode): + raise OSError(EROFS, '') + + def mknod(self, path, mode, dev): + raise OSError(EROFS, '') + + def open(self, path, flags): + """When raw_fi is False (default case), open should return a numerical + file handle. + When raw_fi is True the signature of open becomes: + open(self, path, fi) + and the file handle should be set directly.""" + return 0 + + def opendir(self, path): + """Returns a numerical file handle.""" + return 0 + + def read(self, path, size, offset, fh): + """Returns a string containing the data requested.""" + raise OSError(ENOENT, '') + + def readdir(self, path, fh): + """Can return either a list of names, or a list of (name, attrs, offset) + tuples. attrs is a dict as in getattr.""" + return ['.', '..'] + + def readlink(self, path): + raise OSError(ENOENT, '') + + def release(self, path, fh): + return 0 + + def releasedir(self, path, fh): + return 0 + + def removexattr(self, path, name): + raise OSError(ENOTSUP, '') + + def rename(self, old, new): + raise OSError(EROFS, '') + + def rmdir(self, path): + raise OSError(EROFS, '') + + def setxattr(self, path, name, value, options, position=0): + raise OSError(ENOTSUP, '') + + def statfs(self, path): + """Returns a dictionary with keys identical to the statvfs C structure + of statvfs(3). + On Mac OS X f_bsize and f_frsize must be a power of 2 (minimum 512).""" + return {} + + def symlink(self, target, source): + raise OSError(EROFS, '') + + def truncate(self, path, length, fh=None): + raise OSError(EROFS, '') + + def unlink(self, path): + raise OSError(EROFS, '') + + def utimens(self, path, times=None): + """Times is a (atime, mtime) tuple. If None use current time.""" + return 0 + + def write(self, path, data, offset, fh): + raise OSError(EROFS, '') + + +class LoggingMixIn: + def __call__(self, op, path, *args): + print '->', op, path, repr(args) + ret = '[Unknown Error]' + try: + try: + ret = getattr(self, op)(path, *args) + return ret + except OSError, e: + ret = str(e) + raise + finally: + print '<-', op, repr(ret) diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/fuse3.py b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/fuse3.py new file mode 100644 index 00000000..8717b47a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/fuse3.py @@ -0,0 +1,637 @@ +# Copyright (c) 2008 Giorgos Verigakis +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from ctypes import * +from ctypes.util import find_library +from errno import * +from functools import partial +from platform import machine, system +from stat import S_IFDIR +from traceback import print_exc + +import logging + + +class c_timespec(Structure): + _fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)] + +class c_utimbuf(Structure): + _fields_ = [('actime', c_timespec), ('modtime', c_timespec)] + +class c_stat(Structure): + pass # Platform dependent + +_system = system() +if _system in ('Darwin', 'FreeBSD'): + _libiconv = CDLL(find_library("iconv"), RTLD_GLOBAL) # libfuse dependency + ENOTSUP = 45 + c_dev_t = c_int32 + c_fsblkcnt_t = c_ulong + c_fsfilcnt_t = c_ulong + c_gid_t = c_uint32 + c_mode_t = c_uint16 + c_off_t = c_int64 + c_pid_t = c_int32 + c_uid_t = c_uint32 + setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), + c_size_t, c_int, c_uint32) + getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), + c_size_t, c_uint32) + c_stat._fields_ = [ + ('st_dev', c_dev_t), + ('st_ino', c_uint32), + ('st_mode', c_mode_t), + ('st_nlink', c_uint16), + ('st_uid', c_uid_t), + ('st_gid', c_gid_t), + ('st_rdev', c_dev_t), + ('st_atimespec', c_timespec), + ('st_mtimespec', c_timespec), + ('st_ctimespec', c_timespec), + ('st_size', c_off_t), + ('st_blocks', c_int64), + ('st_blksize', c_int32)] +elif _system == 'Linux': + ENOTSUP = 95 + c_dev_t = c_ulonglong + c_fsblkcnt_t = c_ulonglong + c_fsfilcnt_t = c_ulonglong + c_gid_t = c_uint + c_mode_t = c_uint + c_off_t = c_longlong + c_pid_t = c_int + c_uid_t = c_uint + setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int) + getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t) + + _machine = machine() + if _machine == 'x86_64': + c_stat._fields_ = [ + ('st_dev', c_dev_t), + ('st_ino', c_ulong), + ('st_nlink', c_ulong), + ('st_mode', c_mode_t), + ('st_uid', c_uid_t), + ('st_gid', c_gid_t), + ('__pad0', c_int), + ('st_rdev', c_dev_t), + ('st_size', c_off_t), + ('st_blksize', c_long), + ('st_blocks', c_long), + ('st_atimespec', c_timespec), + ('st_mtimespec', c_timespec), + ('st_ctimespec', c_timespec)] + elif _machine == 'ppc': + c_stat._fields_ = [ + ('st_dev', c_dev_t), + ('st_ino', c_ulonglong), + ('st_mode', c_mode_t), + ('st_nlink', c_uint), + ('st_uid', c_uid_t), + ('st_gid', c_gid_t), + ('st_rdev', c_dev_t), + ('__pad2', c_ushort), + ('st_size', c_off_t), + ('st_blksize', c_long), + ('st_blocks', c_longlong), + ('st_atimespec', c_timespec), + ('st_mtimespec', c_timespec), + ('st_ctimespec', c_timespec)] + else: + # i686, use as fallback for everything else + c_stat._fields_ = [ + ('st_dev', c_dev_t), + ('__pad1', c_ushort), + ('__st_ino', c_ulong), + ('st_mode', c_mode_t), + ('st_nlink', c_uint), + ('st_uid', c_uid_t), + ('st_gid', c_gid_t), + ('st_rdev', c_dev_t), + ('__pad2', c_ushort), + ('st_size', c_off_t), + ('st_blksize', c_long), + ('st_blocks', c_longlong), + ('st_atimespec', c_timespec), + ('st_mtimespec', c_timespec), + ('st_ctimespec', c_timespec), + ('st_ino', c_ulonglong)] +else: + raise NotImplementedError('%s is not supported.' % _system) + + +class c_statvfs(Structure): + _fields_ = [ + ('f_bsize', c_ulong), + ('f_frsize', c_ulong), + ('f_blocks', c_fsblkcnt_t), + ('f_bfree', c_fsblkcnt_t), + ('f_bavail', c_fsblkcnt_t), + ('f_files', c_fsfilcnt_t), + ('f_ffree', c_fsfilcnt_t), + ('f_favail', c_fsfilcnt_t)] + +if _system == 'FreeBSD': + c_fsblkcnt_t = c_uint64 + c_fsfilcnt_t = c_uint64 + setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int) + getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t) + class c_statvfs(Structure): + _fields_ = [ + ('f_bavail', c_fsblkcnt_t), + ('f_bfree', c_fsblkcnt_t), + ('f_blocks', c_fsblkcnt_t), + ('f_favail', c_fsfilcnt_t), + ('f_ffree', c_fsfilcnt_t), + ('f_files', c_fsfilcnt_t), + ('f_bsize', c_ulong), + ('f_flag', c_ulong), + ('f_frsize', c_ulong)] + +class fuse_file_info(Structure): + _fields_ = [ + ('flags', c_int), + ('fh_old', c_ulong), + ('writepage', c_int), + ('direct_io', c_uint, 1), + ('keep_cache', c_uint, 1), + ('flush', c_uint, 1), + ('padding', c_uint, 29), + ('fh', c_uint64), + ('lock_owner', c_uint64)] + +class fuse_context(Structure): + _fields_ = [ + ('fuse', c_voidp), + ('uid', c_uid_t), + ('gid', c_gid_t), + ('pid', c_pid_t), + ('private_data', c_voidp)] + +class fuse_operations(Structure): + _fields_ = [ + ('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))), + ('readlink', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)), + ('getdir', c_voidp), # Deprecated, use readdir + ('mknod', CFUNCTYPE(c_int, c_char_p, c_mode_t, c_dev_t)), + ('mkdir', CFUNCTYPE(c_int, c_char_p, c_mode_t)), + ('unlink', CFUNCTYPE(c_int, c_char_p)), + ('rmdir', CFUNCTYPE(c_int, c_char_p)), + ('symlink', CFUNCTYPE(c_int, c_char_p, c_char_p)), + ('rename', CFUNCTYPE(c_int, c_char_p, c_char_p)), + ('link', CFUNCTYPE(c_int, c_char_p, c_char_p)), + ('chmod', CFUNCTYPE(c_int, c_char_p, c_mode_t)), + ('chown', CFUNCTYPE(c_int, c_char_p, c_uid_t, c_gid_t)), + ('truncate', CFUNCTYPE(c_int, c_char_p, c_off_t)), + ('utime', c_voidp), # Deprecated, use utimens + ('open', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), + ('read', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t, + POINTER(fuse_file_info))), + ('write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t, + POINTER(fuse_file_info))), + ('statfs', CFUNCTYPE(c_int, c_char_p, POINTER(c_statvfs))), + ('flush', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), + ('release', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), + ('fsync', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))), + ('setxattr', setxattr_t), + ('getxattr', getxattr_t), + ('listxattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)), + ('removexattr', CFUNCTYPE(c_int, c_char_p, c_char_p)), + ('opendir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), + ('readdir', CFUNCTYPE(c_int, c_char_p, c_voidp, CFUNCTYPE(c_int, c_voidp, + c_char_p, POINTER(c_stat), c_off_t), c_off_t, POINTER(fuse_file_info))), + ('releasedir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), + ('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))), + ('init', CFUNCTYPE(c_voidp, c_voidp)), + ('destroy', CFUNCTYPE(c_voidp, c_voidp)), + ('access', CFUNCTYPE(c_int, c_char_p, c_int)), + ('create', CFUNCTYPE(c_int, c_char_p, c_mode_t, POINTER(fuse_file_info))), + ('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t, POINTER(fuse_file_info))), + ('fgetattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat), + POINTER(fuse_file_info))), + ('lock', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info), c_int, c_voidp)), + ('utimens', CFUNCTYPE(c_int, c_char_p, POINTER(c_utimbuf))), + ('bmap', CFUNCTYPE(c_int, c_char_p, c_size_t, POINTER(c_ulonglong)))] + + +def time_of_timespec(ts): + return ts.tv_sec + ts.tv_nsec / 10 ** 9 + +def set_st_attrs(st, attrs): + for key, val in attrs.items(): + if key in ('st_atime', 'st_mtime', 'st_ctime'): + timespec = getattr(st, key + 'spec') + timespec.tv_sec = int(val) + timespec.tv_nsec = int((val - timespec.tv_sec) * 10 ** 9) + elif hasattr(st, key): + setattr(st, key, val) + + +_libfuse_path = find_library('fuse') +if not _libfuse_path: + raise EnvironmentError('Unable to find libfuse') +_libfuse = CDLL(_libfuse_path) +_libfuse.fuse_get_context.restype = POINTER(fuse_context) + + +def fuse_get_context(): + """Returns a (uid, gid, pid) tuple""" + ctxp = _libfuse.fuse_get_context() + ctx = ctxp.contents + return ctx.uid, ctx.gid, ctx.pid + + +class FUSE(object): + """This class is the lower level interface and should not be subclassed + under normal use. Its methods are called by fuse. + Assumes API version 2.6 or later.""" + + def __init__(self, operations, mountpoint, raw_fi=False, **kwargs): + """Setting raw_fi to True will cause FUSE to pass the fuse_file_info + class as is to Operations, instead of just the fh field. + This gives you access to direct_io, keep_cache, etc.""" + + self.operations = operations + self.raw_fi = raw_fi + args = ['fuse'] + if kwargs.pop('foreground', False): + args.append('-f') + if kwargs.pop('debug', False): + args.append('-d') + if kwargs.pop('nothreads', False): + args.append('-s') + kwargs.setdefault('fsname', operations.__class__.__name__) + args.append('-o') + args.append(','.join(key if val == True else '%s=%s' % (key, val) + for key, val in kwargs.items())) + args.append(mountpoint) + argv = (c_char_p * len(args))(*args) + + fuse_ops = fuse_operations() + for name, prototype in fuse_operations._fields_: + if prototype != c_voidp and getattr(operations, name, None): + op = partial(self._wrapper_, getattr(self, name)) + setattr(fuse_ops, name, prototype(op)) + _libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops), + sizeof(fuse_ops), None) + del self.operations # Invoke the destructor + + def _wrapper_(self, func, *args, **kwargs): + """Decorator for the methods that follow""" + try: + return func(*args, **kwargs) or 0 + except OSError as e: + return -(e.errno or EFAULT) + except: + print_exc() + return -EFAULT + + def getattr(self, path, buf): + return self.fgetattr(path, buf, None) + + def readlink(self, path, buf, bufsize): + ret = self.operations('readlink', path).encode('utf-8') + data = create_string_buffer(ret[:bufsize - 1]) + memmove(buf, data, len(data)) + return 0 + + def mknod(self, path, mode, dev): + return self.operations('mknod', path, mode, dev) + + def mkdir(self, path, mode): + return self.operations('mkdir', path, mode) + + def unlink(self, path): + return self.operations('unlink', path) + + def rmdir(self, path): + return self.operations('rmdir', path) + + def symlink(self, source, target): + return self.operations('symlink', target, source) + + def rename(self, old, new): + return self.operations('rename', old, new) + + def link(self, source, target): + return self.operations('link', target, source) + + def chmod(self, path, mode): + return self.operations('chmod', path, mode) + + def chown(self, path, uid, gid): + return self.operations('chown', path, uid, gid) + + def truncate(self, path, length): + return self.operations('truncate', path, length) + + def open(self, path, fip): + fi = fip.contents + if self.raw_fi: + return self.operations('open', path, fi) + else: + fi.fh = self.operations('open', path, fi.flags) + return 0 + + def read(self, path, buf, size, offset, fip): + fh = fip.contents if self.raw_fi else fip.contents.fh + ret = self.operations('read', path, size, offset, fh) + if not ret: + return 0 + data = create_string_buffer(ret[:size], size) + memmove(buf, data, size) + return size + + def write(self, path, buf, size, offset, fip): + data = string_at(buf, size) + fh = fip.contents if self.raw_fi else fip.contents.fh + return self.operations('write', path, data, offset, fh) + + def statfs(self, path, buf): + stv = buf.contents + attrs = self.operations('statfs', path) + for key, val in attrs.items(): + if hasattr(stv, key): + setattr(stv, key, val) + return 0 + + def flush(self, path, fip): + fh = fip.contents if self.raw_fi else fip.contents.fh + return self.operations('flush', path, fh) + + def release(self, path, fip): + fh = fip.contents if self.raw_fi else fip.contents.fh + return self.operations('release', path, fh) + + def fsync(self, path, datasync, fip): + fh = fip.contents if self.raw_fi else fip.contents.fh + return self.operations('fsync', path, datasync, fh) + + def setxattr(self, path, name, value, size, options, *args): + data = string_at(value, size) + return self.operations('setxattr', path, name, data, options, *args) + + def getxattr(self, path, name, value, size, *args): + ret = self.operations('getxattr', path, name, *args) + retsize = len(ret) + buf = create_string_buffer(ret, retsize) # Does not add trailing 0 + if bool(value): + if retsize > size: + return -ERANGE + memmove(value, buf, retsize) + return retsize + + def listxattr(self, path, namebuf, size): + ret = self.operations('listxattr', path) + buf = create_string_buffer('\x00'.join(ret)) if ret else '' + bufsize = len(buf) + if bool(namebuf): + if bufsize > size: + return -ERANGE + memmove(namebuf, buf, bufsize) + return bufsize + + def removexattr(self, path, name): + return self.operations('removexattr', path, name) + + def opendir(self, path, fip): + # Ignore raw_fi + fip.contents.fh = self.operations('opendir', path) + return 0 + + def readdir(self, path, buf, filler, offset, fip): + # Ignore raw_fi + for item in self.operations('readdir', path, fip.contents.fh): + if isinstance(item, str): + name, st, offset = item, None, 0 + else: + name, attrs, offset = item + if attrs: + st = c_stat() + set_st_attrs(st, attrs) + else: + st = None + if filler(buf, name.encode('utf-8'), st, offset) != 0: + break + return 0 + + def releasedir(self, path, fip): + # Ignore raw_fi + return self.operations('releasedir', path, fip.contents.fh) + + def fsyncdir(self, path, datasync, fip): + # Ignore raw_fi + return self.operations('fsyncdir', path, datasync, fip.contents.fh) + + def init(self, conn): + return self.operations('init', '/') + + def destroy(self, private_data): + return self.operations('destroy', '/') + + def access(self, path, amode): + return self.operations('access', path, amode) + + def create(self, path, mode, fip): + fi = fip.contents + if self.raw_fi: + return self.operations('create', path, mode, fi) + else: + fi.fh = self.operations('create', path, mode) + return 0 + + def ftruncate(self, path, length, fip): + fh = fip.contents if self.raw_fi else fip.contents.fh + return self.operations('truncate', path, length, fh) + + def fgetattr(self, path, buf, fip): + memset(buf, 0, sizeof(c_stat)) + st = buf.contents + fh = fip and (fip.contents if self.raw_fi else fip.contents.fh) + attrs = self.operations('getattr', path, fh) + set_st_attrs(st, attrs) + return 0 + + def lock(self, path, fip, cmd, lock): + fh = fip.contents if self.raw_fi else fip.contents.fh + return self.operations('lock', path, fh, cmd, lock) + + def utimens(self, path, buf): + if buf: + atime = time_of_timespec(buf.contents.actime) + mtime = time_of_timespec(buf.contents.modtime) + times = (atime, mtime) + else: + times = None + return self.operations('utimens', path, times) + + def bmap(self, path, blocksize, idx): + return self.operations('bmap', path, blocksize, idx) + + +class Operations(object): + """This class should be subclassed and passed as an argument to FUSE on + initialization. All operations should raise an OSError exception on + error. + + When in doubt of what an operation should do, check the FUSE header + file or the corresponding system call man page.""" + + def __call__(self, op, *args): + if not hasattr(self, op): + raise OSError(EFAULT, '') + return getattr(self, op)(*args) + + def access(self, path, amode): + return 0 + + bmap = None + + def chmod(self, path, mode): + raise OSError(EROFS, '') + + def chown(self, path, uid, gid): + raise OSError(EROFS, '') + + def create(self, path, mode, fi=None): + """When raw_fi is False (default case), fi is None and create should + return a numerical file handle. + When raw_fi is True the file handle should be set directly by create + and return 0.""" + raise OSError(EROFS, '') + + def destroy(self, path): + """Called on filesystem destruction. Path is always /""" + pass + + def flush(self, path, fh): + return 0 + + def fsync(self, path, datasync, fh): + return 0 + + def fsyncdir(self, path, datasync, fh): + return 0 + + def getattr(self, path, fh=None): + """Returns a dictionary with keys identical to the stat C structure + of stat(2). + st_atime, st_mtime and st_ctime should be floats. + NOTE: There is an incombatibility between Linux and Mac OS X concerning + st_nlink of directories. Mac OS X counts all files inside the directory, + while Linux counts only the subdirectories.""" + + if path != '/': + raise OSError(ENOENT, '') + return dict(st_mode=(S_IFDIR | 0o755), st_nlink=2) + + def getxattr(self, path, name, position=0): + raise OSError(ENOTSUP, '') + + def init(self, path): + """Called on filesystem initialization. Path is always / + Use it instead of __init__ if you start threads on initialization.""" + pass + + def link(self, target, source): + raise OSError(EROFS, '') + + def listxattr(self, path): + return [] + + lock = None + + def mkdir(self, path, mode): + raise OSError(EROFS, '') + + def mknod(self, path, mode, dev): + raise OSError(EROFS, '') + + def open(self, path, flags): + """When raw_fi is False (default case), open should return a numerical + file handle. + When raw_fi is True the signature of open becomes: + open(self, path, fi) + and the file handle should be set directly.""" + return 0 + + def opendir(self, path): + """Returns a numerical file handle.""" + return 0 + + def read(self, path, size, offset, fh): + """Returns a string containing the data requested.""" + raise OSError(ENOENT, '') + + def readdir(self, path, fh): + """Can return either a list of names, or a list of (name, attrs, offset) + tuples. attrs is a dict as in getattr.""" + return ['.', '..'] + + def readlink(self, path): + raise OSError(ENOENT, '') + + def release(self, path, fh): + return 0 + + def releasedir(self, path, fh): + return 0 + + def removexattr(self, path, name): + raise OSError(ENOTSUP, '') + + def rename(self, old, new): + raise OSError(EROFS, '') + + def rmdir(self, path): + raise OSError(EROFS, '') + + def setxattr(self, path, name, value, options, position=0): + raise OSError(ENOTSUP, '') + + def statfs(self, path): + """Returns a dictionary with keys identical to the statvfs C structure + of statvfs(3). + On Mac OS X f_bsize and f_frsize must be a power of 2 (minimum 512).""" + return {} + + def symlink(self, target, source): + raise OSError(EROFS, '') + + def truncate(self, path, length, fh=None): + raise OSError(EROFS, '') + + def unlink(self, path): + raise OSError(EROFS, '') + + def utimens(self, path, times=None): + """Times is a (atime, mtime) tuple. If None use current time.""" + return 0 + + def write(self, path, data, offset, fh): + raise OSError(EROFS, '') + + +class LoggingMixIn: + def __call__(self, op, path, *args): + logging.debug('-> %s %s %s', op, path, repr(args)) + ret = '[Unknown Error]' + try: + ret = getattr(self, op)(path, *args) + return ret + except OSError as e: + ret = str(e) + raise + finally: + logging.debug('<- %s %s', op, repr(ret)) diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/fusell.py b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/fusell.py new file mode 100644 index 00000000..d0bc25ea --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/fusell.py @@ -0,0 +1,619 @@ +# Copyright (c) 2010 Giorgos Verigakis +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from __future__ import division + +from ctypes import * +from ctypes.util import find_library +from errno import * +from functools import partial, wraps +from inspect import getmembers, ismethod +from platform import machine, system +from stat import S_IFDIR, S_IFREG + + +_system = system() +_machine = machine() + +class LibFUSE(CDLL): + def __init__(self): + if _system == 'Darwin': + self.libiconv = CDLL(find_library('iconv'), RTLD_GLOBAL) + super(LibFUSE, self).__init__(find_library('fuse')) + + self.fuse_mount.argtypes = (c_char_p, POINTER(fuse_args)) + self.fuse_mount.restype = c_void_p + self.fuse_lowlevel_new.argtypes = (POINTER(fuse_args), POINTER(fuse_lowlevel_ops), + c_size_t, c_void_p) + self.fuse_lowlevel_new.restype = c_void_p + self.fuse_set_signal_handlers.argtypes = (c_void_p,) + self.fuse_session_add_chan.argtypes = (c_void_p, c_void_p) + self.fuse_session_loop.argtypes = (c_void_p,) + self.fuse_remove_signal_handlers.argtypes = (c_void_p,) + self.fuse_session_remove_chan.argtypes = (c_void_p,) + self.fuse_session_destroy.argtypes = (c_void_p,) + self.fuse_unmount.argtypes = (c_char_p, c_void_p) + + self.fuse_req_ctx.restype = POINTER(fuse_ctx) + self.fuse_req_ctx.argtypes = (fuse_req_t,) + + self.fuse_reply_err.argtypes = (fuse_req_t, c_int) + self.fuse_reply_attr.argtypes = (fuse_req_t, c_void_p, c_double) + self.fuse_reply_entry.argtypes = (fuse_req_t, c_void_p) + self.fuse_reply_open.argtypes = (fuse_req_t, c_void_p) + self.fuse_reply_buf.argtypes = (fuse_req_t, c_char_p, c_size_t) + self.fuse_reply_write.argtypes = (fuse_req_t, c_size_t) + + self.fuse_add_direntry.argtypes = (c_void_p, c_char_p, c_size_t, c_char_p, + c_stat_p, c_off_t) + +class fuse_args(Structure): + _fields_ = [('argc', c_int), ('argv', POINTER(c_char_p)), ('allocated', c_int)] + +class c_timespec(Structure): + _fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)] + +class c_stat(Structure): + pass # Platform dependent + +if _system == 'Darwin': + ENOTSUP = 45 + c_dev_t = c_int32 + c_fsblkcnt_t = c_ulong + c_fsfilcnt_t = c_ulong + c_gid_t = c_uint32 + c_mode_t = c_uint16 + c_off_t = c_int64 + c_pid_t = c_int32 + c_uid_t = c_uint32 + c_stat._fields_ = [ + ('st_dev', c_dev_t), + ('st_ino', c_uint32), + ('st_mode', c_mode_t), + ('st_nlink', c_uint16), + ('st_uid', c_uid_t), + ('st_gid', c_gid_t), + ('st_rdev', c_dev_t), + ('st_atimespec', c_timespec), + ('st_mtimespec', c_timespec), + ('st_ctimespec', c_timespec), + ('st_size', c_off_t), + ('st_blocks', c_int64), + ('st_blksize', c_int32)] +elif _system == 'Linux': + ENOTSUP = 95 + c_dev_t = c_ulonglong + c_fsblkcnt_t = c_ulonglong + c_fsfilcnt_t = c_ulonglong + c_gid_t = c_uint + c_mode_t = c_uint + c_off_t = c_longlong + c_pid_t = c_int + c_uid_t = c_uint + + if _machine == 'x86_64': + c_stat._fields_ = [ + ('st_dev', c_dev_t), + ('st_ino', c_ulong), + ('st_nlink', c_ulong), + ('st_mode', c_mode_t), + ('st_uid', c_uid_t), + ('st_gid', c_gid_t), + ('__pad0', c_int), + ('st_rdev', c_dev_t), + ('st_size', c_off_t), + ('st_blksize', c_long), + ('st_blocks', c_long), + ('st_atimespec', c_timespec), + ('st_mtimespec', c_timespec), + ('st_ctimespec', c_timespec)] + elif _machine == 'ppc': + c_stat._fields_ = [ + ('st_dev', c_dev_t), + ('st_ino', c_ulonglong), + ('st_mode', c_mode_t), + ('st_nlink', c_uint), + ('st_uid', c_uid_t), + ('st_gid', c_gid_t), + ('st_rdev', c_dev_t), + ('__pad2', c_ushort), + ('st_size', c_off_t), + ('st_blksize', c_long), + ('st_blocks', c_longlong), + ('st_atimespec', c_timespec), + ('st_mtimespec', c_timespec), + ('st_ctimespec', c_timespec)] + else: + # i686, use as fallback for everything else + c_stat._fields_ = [ + ('st_dev', c_dev_t), + ('__pad1', c_ushort), + ('__st_ino', c_ulong), + ('st_mode', c_mode_t), + ('st_nlink', c_uint), + ('st_uid', c_uid_t), + ('st_gid', c_gid_t), + ('st_rdev', c_dev_t), + ('__pad2', c_ushort), + ('st_size', c_off_t), + ('st_blksize', c_long), + ('st_blocks', c_longlong), + ('st_atimespec', c_timespec), + ('st_mtimespec', c_timespec), + ('st_ctimespec', c_timespec), + ('st_ino', c_ulonglong)] +else: + raise NotImplementedError('%s is not supported.' % _system) + +class c_statvfs(Structure): + _fields_ = [ + ('f_bsize', c_ulong), + ('f_frsize', c_ulong), + ('f_blocks', c_fsblkcnt_t), + ('f_bfree', c_fsblkcnt_t), + ('f_bavail', c_fsblkcnt_t), + ('f_files', c_fsfilcnt_t), + ('f_ffree', c_fsfilcnt_t), + ('f_favail', c_fsfilcnt_t)] + +class fuse_file_info(Structure): + _fields_ = [ + ('flags', c_int), + ('fh_old', c_ulong), + ('writepage', c_int), + ('direct_io', c_uint, 1), + ('keep_cache', c_uint, 1), + ('flush', c_uint, 1), + ('padding', c_uint, 29), + ('fh', c_uint64), + ('lock_owner', c_uint64)] + +class fuse_ctx(Structure): + _fields_ = [('uid', c_uid_t), ('gid', c_gid_t), ('pid', c_pid_t)] + +fuse_ino_t = c_ulong +fuse_req_t = c_void_p +c_stat_p = POINTER(c_stat) +fuse_file_info_p = POINTER(fuse_file_info) + +FUSE_SET_ATTR = ('st_mode', 'st_uid', 'st_gid', 'st_size', 'st_atime', 'st_mtime') + +class fuse_entry_param(Structure): + _fields_ = [ + ('ino', fuse_ino_t), + ('generation', c_ulong), + ('attr', c_stat), + ('attr_timeout', c_double), + ('entry_timeout', c_double)] + +class fuse_lowlevel_ops(Structure): + _fields_ = [ + ('init', CFUNCTYPE(None, c_void_p, c_void_p)), + ('destroy', CFUNCTYPE(None, c_void_p)), + ('lookup', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, c_char_p)), + ('forget', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, c_ulong)), + ('getattr', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, fuse_file_info_p)), + ('setattr', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, c_stat_p, c_int, fuse_file_info_p)), + ('readlink', CFUNCTYPE(None, fuse_req_t, fuse_ino_t)), + ('mknod', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, c_char_p, c_mode_t, c_dev_t)), + ('mkdir', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, c_char_p, c_mode_t)), + ('unlink', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, c_char_p)), + ('rmdir', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, c_char_p)), + ('symlink', CFUNCTYPE(None, fuse_req_t, c_char_p, fuse_ino_t, c_char_p)), + ('rename', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, c_char_p, fuse_ino_t, c_char_p)), + ('link', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, fuse_ino_t, c_char_p)), + ('open', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, fuse_file_info_p)), + ('read', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, c_size_t, c_off_t, fuse_file_info_p)), + ('write', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, c_char_p, c_size_t, c_off_t, + fuse_file_info_p)), + ('flush', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, fuse_file_info_p)), + ('release', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, fuse_file_info_p)), + ('fsync', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, c_int, fuse_file_info_p)), + ('opendir', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, fuse_file_info_p)), + ('readdir', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, c_size_t, c_off_t, fuse_file_info_p)), + ('releasedir', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, fuse_file_info_p)), + ('fsyncdir', CFUNCTYPE(None, fuse_req_t, fuse_ino_t, c_int, fuse_file_info_p))] + + +def struct_to_dict(p): + try: + x = p.contents + return dict((key, getattr(x, key)) for key, type in x._fields_) + except ValueError: + return {} + +def stat_to_dict(p): + try: + d = {} + x = p.contents + for key, type in x._fields_: + if key in ('st_atimespec', 'st_mtimespec', 'st_ctimespec'): + ts = getattr(x, key) + key = key[:-4] # Lose the "spec" + d[key] = ts.tv_sec + ts.tv_nsec / 10 ** 9 + else: + d[key] = getattr(x, key) + return d + except ValueError: + return {} + +def dict_to_stat(d): + for key in ('st_atime', 'st_mtime', 'st_ctime'): + if key in d: + val = d[key] + sec = int(val) + nsec = int((val - sec) * 10 ** 9) + d[key + 'spec'] = c_timespec(sec, nsec) + return c_stat(**d) + +def setattr_mask_to_list(mask): + return [FUSE_SET_ATTR[i] for i in range(len(FUSE_SET_ATTR)) if mask & (1 << i)] + +class FUSELL(object): + def __init__(self, mountpoint): + self.libfuse = LibFUSE() + + fuse_ops = fuse_lowlevel_ops() + + for name, prototype in fuse_lowlevel_ops._fields_: + method = getattr(self, 'fuse_' + name, None) or getattr(self, name, None) + if method: + setattr(fuse_ops, name, prototype(method)) + + args = ['fuse'] + argv = fuse_args(len(args), (c_char_p * len(args))(*args), 0) + + # TODO: handle initialization errors + + chan = self.libfuse.fuse_mount(mountpoint, argv) + assert chan + + session = self.libfuse.fuse_lowlevel_new(argv, byref(fuse_ops), sizeof(fuse_ops), None) + assert session + + err = self.libfuse.fuse_set_signal_handlers(session) + assert err == 0 + + self.libfuse.fuse_session_add_chan(session, chan) + + err = self.libfuse.fuse_session_loop(session) + assert err == 0 + + err = self.libfuse.fuse_remove_signal_handlers(session) + assert err == 0 + + self.libfuse.fuse_session_remove_chan(chan) + self.libfuse.fuse_session_destroy(session) + self.libfuse.fuse_unmount(mountpoint, chan) + + def reply_err(self, req, err): + return self.libfuse.fuse_reply_err(req, err) + + def reply_none(self, req): + self.libfuse.fuse_reply_none(req) + + def reply_entry(self, req, entry): + entry['attr'] = c_stat(**entry['attr']) + e = fuse_entry_param(**entry) + self.libfuse.fuse_reply_entry(req, byref(e)) + + def reply_create(self, req, *args): + pass # XXX + + def reply_attr(self, req, attr, attr_timeout): + st = dict_to_stat(attr) + return self.libfuse.fuse_reply_attr(req, byref(st), c_double(attr_timeout)) + + def reply_readlink(self, req, *args): + pass # XXX + + def reply_open(self, req, d): + fi = fuse_file_info(**d) + return self.libfuse.fuse_reply_open(req, byref(fi)) + + def reply_write(self, req, count): + return self.libfuse.fuse_reply_write(req, count) + + def reply_buf(self, req, buf): + return self.libfuse.fuse_reply_buf(req, buf, len(buf)) + + def reply_readdir(self, req, size, off, entries): + bufsize = 0 + sized_entries = [] + for name, attr in entries: + entsize = self.libfuse.fuse_add_direntry(req, None, 0, name, None, 0) + sized_entries.append((name, attr, entsize)) + bufsize += entsize + + next = 0 + buf = create_string_buffer(bufsize) + for name, attr, entsize in sized_entries: + entbuf = cast(addressof(buf) + next, c_char_p) + st = c_stat(**attr) + next += entsize + self.libfuse.fuse_add_direntry(req, entbuf, entsize, name, byref(st), next) + + if off < bufsize: + buf = cast(addressof(buf) + off, c_char_p) if off else buf + return self.libfuse.fuse_reply_buf(req, buf, min(bufsize - off, size)) + else: + return self.libfuse.fuse_reply_buf(req, None, 0) + + + # If you override the following methods you should reply directly + # with the self.libfuse.fuse_reply_* methods. + + def fuse_getattr(self, req, ino, fi): + self.getattr(req, ino, struct_to_dict(fi)) + + def fuse_setattr(self, req, ino, attr, to_set, fi): + attr_dict = stat_to_dict(attr) + to_set_list = setattr_mask_to_list(to_set) + fi_dict = struct_to_dict(fi) + self.setattr(req, ino, attr_dict, to_set_list, fi_dict) + + def fuse_open(self, req, ino, fi): + self.open(req, ino, struct_to_dict(fi)) + + def fuse_read(self, req, ino, size, off, fi): + self.read(req, ino, size, off, fi) + + def fuse_write(self, req, ino, buf, size, off, fi): + buf_str = string_at(buf, size) + fi_dict = struct_to_dict(fi) + self.write(req, ino, buf_str, off, fi_dict) + + def fuse_flush(self, req, ino, fi): + self.flush(req, ino, struct_to_dict(fi)) + + def fuse_release(self, req, ino, fi): + self.release(req, ino, struct_to_dict(fi)) + + def fuse_fsync(self, req, ino, datasync, fi): + self.fsyncdir(req, ino, datasync, struct_to_dict(fi)) + + def fuse_opendir(self, req, ino, fi): + self.opendir(req, ino, struct_to_dict(fi)) + + def fuse_readdir(self, req, ino, size, off, fi): + self.readdir(req, ino, size, off, struct_to_dict(fi)) + + def fuse_releasedir(self, req, ino, fi): + self.releasedir(req, ino, struct_to_dict(fi)) + + def fuse_fsyncdir(self, req, ino, datasync, fi): + self.fsyncdir(req, ino, datasync, struct_to_dict(fi)) + + + # Utility methods + + def req_ctx(self, req): + ctx = self.libfuse.fuse_req_ctx(req) + return struct_to_dict(ctx) + + + # Methods to be overridden in subclasses. + # Reply with the self.reply_* methods. + + def init(self, userdata, conn): + """Initialize filesystem + + There's no reply to this method + """ + pass + + def destroy(self, userdata): + """Clean up filesystem + + There's no reply to this method + """ + pass + + def lookup(self, req, parent, name): + """Look up a directory entry by name and get its attributes. + + Valid replies: + reply_entry + reply_err + """ + self.reply_err(req, ENOENT) + + def forget(self, req, ino, nlookup): + """Forget about an inode + + Valid replies: + reply_none + """ + self.reply_none(req) + + def getattr(self, req, ino, fi): + """Get file attributes + + Valid replies: + reply_attr + reply_err + """ + if ino == 1: + attr = {'st_ino': 1, 'st_mode': S_IFDIR | 0755, 'st_nlink': 2} + self.reply_attr(req, attr, 1.0) + else: + self.reply_err(req, ENOENT) + + def setattr(self, req, ino, attr, to_set, fi): + """Set file attributes + + Valid replies: + reply_attr + reply_err + """ + self.reply_err(req, EROFS) + + def readlink(self, req, ino): + """Read symbolic link + + Valid replies: + reply_readlink + reply_err + """ + self.reply_err(req, ENOENT) + + def mknod(self, req, parent, name, mode, rdev): + """Create file node + + Valid replies: + reply_entry + reply_err + """ + self.reply_err(req, EROFS) + + def mkdir(self, req, parent, name, mode): + """Create a directory + + Valid replies: + reply_entry + reply_err + """ + self.reply_err(req, EROFS) + + def unlink(self, req, parent, name): + """Remove a file + + Valid replies: + reply_err + """ + self.reply_err(req, EROFS) + + def rmdir(self, req, parent, name): + """Remove a directory + + Valid replies: + reply_err + """ + self.reply_err(req, EROFS) + + def symlink(self, req, link, parent, name): + """Create a symbolic link + + Valid replies: + reply_entry + reply_err + """ + self.reply_err(req, EROFS) + + def rename(self, req, parent, name, newparent, newname): + """Rename a file + + Valid replies: + reply_err + """ + self.reply_err(req, EROFS) + + def link(self, req, ino, newparent, newname): + """Create a hard link + + Valid replies: + reply_entry + reply_err + """ + self.reply_err(req, EROFS) + + def open(self, req, ino, fi): + """Open a file + + Valid replies: + reply_open + reply_err + """ + self.reply_open(req, fi) + + def read(self, req, ino, size, off, fi): + """Read data + + Valid replies: + reply_buf + reply_err + """ + self.reply_err(req, EIO) + + def write(self, req, ino, buf, off, fi): + """Write data + + Valid replies: + reply_write + reply_err + """ + self.reply_err(req, EROFS) + + def flush(self, req, ino, fi): + """Flush method + + Valid replies: + reply_err + """ + self.reply_err(req, 0) + + def release(self, req, ino, fi): + """Release an open file + + Valid replies: + reply_err + """ + self.reply_err(req, 0) + + def fsync(self, req, ino, datasync, fi): + """Synchronize file contents + + Valid replies: + reply_err + """ + self.reply_err(req, 0) + + def opendir(self, req, ino, fi): + """Open a directory + + Valid replies: + reply_open + reply_err + """ + self.reply_open(req, fi) + + def readdir(self, req, ino, size, off, fi): + """Read directory + + Valid replies: + reply_readdir + reply_err + """ + if ino == 1: + attr = {'st_ino': 1, 'st_mode': S_IFDIR} + entries = [('.', attr), ('..', attr)] + self.reply_readdir(req, size, off, entries) + else: + self.reply_err(req, ENOENT) + + def releasedir(self, req, ino, fi): + """Release an open directory + + Valid replies: + reply_err + """ + self.reply_err(req, 0) + + def fsyncdir(self, req, ino, datasync, fi): + """Synchronize directory contents + + Valid replies: + reply_err + """ + self.reply_err(req, 0) \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/loopback.py b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/loopback.py new file mode 100755 index 00000000..5ce16ed1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/loopback.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python + +from __future__ import with_statement + +from errno import EACCES +from os.path import realpath +from sys import argv, exit +from threading import Lock + +import os + +from fuse import FUSE, FuseOSError, Operations, LoggingMixIn + + +class Loopback(LoggingMixIn, Operations): + def __init__(self, root): + self.root = realpath(root) + self.rwlock = Lock() + + def __call__(self, op, path, *args): + return super(Loopback, self).__call__(op, self.root + path, *args) + + def access(self, path, mode): + if not os.access(path, mode): + raise FuseOSError(EACCES) + + chmod = os.chmod + chown = os.chown + + def create(self, path, mode): + return os.open(path, os.O_WRONLY | os.O_CREAT, mode) + + def flush(self, path, fh): + return os.fsync(fh) + + def fsync(self, path, datasync, fh): + return os.fsync(fh) + + def getattr(self, path, fh=None): + st = os.lstat(path) + return dict((key, getattr(st, key)) for key in ('st_atime', 'st_ctime', + 'st_gid', 'st_mode', 'st_mtime', 'st_nlink', 'st_size', 'st_uid')) + + getxattr = None + + def link(self, target, source): + return os.link(source, target) + + listxattr = None + mkdir = os.mkdir + mknod = os.mknod + open = os.open + + def read(self, path, size, offset, fh): + with self.rwlock: + os.lseek(fh, offset, 0) + return os.read(fh, size) + + def readdir(self, path, fh): + return ['.', '..'] + os.listdir(path) + + readlink = os.readlink + + def release(self, path, fh): + return os.close(fh) + + def rename(self, old, new): + return os.rename(old, self.root + new) + + rmdir = os.rmdir + + def statfs(self, path): + stv = os.statvfs(path) + return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree', + 'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag', + 'f_frsize', 'f_namemax')) + + def symlink(self, target, source): + return os.symlink(source, target) + + def truncate(self, path, length, fh=None): + with open(path, 'r+') as f: + f.truncate(length) + + unlink = os.unlink + utimens = os.utime + + def write(self, path, data, offset, fh): + with self.rwlock: + os.lseek(fh, offset, 0) + return os.write(fh, data) + + +if __name__ == "__main__": + if len(argv) != 3: + print 'usage: %s ' % argv[0] + exit(1) + fuse = FUSE(Loopback(argv[1]), argv[2], foreground=True) \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/.project b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/.project new file mode 100644 index 00000000..929b4087 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/.project @@ -0,0 +1,17 @@ + + + llfuse + + + + + + org.python.pydev.PyDevBuilder + + + + + + org.python.pydev.pythonNature + + diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/.pydevproject b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/.pydevproject new file mode 100644 index 00000000..e7f99ef6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/.pydevproject @@ -0,0 +1,10 @@ + + + + +python 2.6 +Default + +/llfuse + + diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/README.txt b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/README.txt new file mode 100644 index 00000000..03f4df0f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/README.txt @@ -0,0 +1,22 @@ +Note that the low-level API needs to generate the Python interface +to the local FUSE library before it can be used. For that, +you have to have both the FUSE headers and the GCC-XML +(http://www.gccxml.org) compiler installed. + +The interface is generated by running + +# python setup.py build_ctypes + +this will create the file llfuse/ctypes_api.py + +Please keep in mind that it's probably not wise to ship this file +with your application, because it has been generated for your +system only. + + +Note that the fuse_daemonize() function is deliberately not exported +by this module. If you want to daemonize a Python process, you have to +do so from within Python or you will get into trouble. See + - http://bugs.python.org/issue7931 + - http://www.python.org/dev/peps/pep-3143/ + \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/ctypeslib.zip b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/ctypeslib.zip new file mode 100644 index 00000000..f125d984 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/ctypeslib.zip differ diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/fuse_ctypes.h b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/fuse_ctypes.h new file mode 100644 index 00000000..2793bdf6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/fuse_ctypes.h @@ -0,0 +1,10 @@ +/* Necessary to prevent gccxml from complaining about + * an undefined type */ +#define __builtin_va_arg_pack_len int + + +#define FUSE_USE_VERSION 28 +#include +#include +#include + diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/llfuse/__init__.py b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/llfuse/__init__.py new file mode 100644 index 00000000..589e2462 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/llfuse/__init__.py @@ -0,0 +1,24 @@ +''' +$Id: __init__.py 47 2010-01-29 17:11:23Z nikratio $ + +Copyright (c) 2010, Nikolaus Rath +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + * Neither the name of the main author nor the names of other contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +''' + +from __future__ import division, print_function, absolute_import + +__all__ = [ 'ctypes_api', 'interface', 'operations' ] + +# Wildcard imports desired +#pylint: disable-msg=W0401 +from llfuse.operations import * +from llfuse.interface import * + diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/llfuse/interface.py b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/llfuse/interface.py new file mode 100644 index 00000000..07dc84aa --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/llfuse/interface.py @@ -0,0 +1,897 @@ +''' +$Id: interface.py 54 2010-02-22 02:33:10Z nikratio $ + +Copyright (c) 2010, Nikolaus Rath +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + * Neither the name of the main author nor the names of other contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +This module defines the interface between the FUSE C and Python API. The actual file system +is implemented as an `Operations` instance whose methods will +be called by the fuse library. + +Note that all "string-like" quantities (e.g. file names, extended attribute names & values) are +represented as bytes, since POSIX doesn't require any of them to be valid unicode strings. + + +Exception Handling +------------------ + +Since Python exceptions cannot be forwarded to the FUSE kernel module, +the FUSE Python API catches all exceptions that are generated during +request processing. + +If the exception is of type `FUSEError`, the appropriate errno is returned +to the kernel module and the exception is discarded. + +For any other exceptions, a warning is logged and a generic error signaled +to the kernel module. Then the `handle_exc` method of the `Operations` +instance is called, so that the file system itself has a chance to react +to the problem (e.g. by marking the file system as needing a check). + +The return value and any raised exceptions of `handle_exc` are ignored. + +''' + +# Since we are using ctype Structures, we often have to +# access attributes that are not defined in __init__ +# (since they are defined in _fields_ instead) +#pylint: disable-msg=W0212 + +# We need globals +#pylint: disable-msg=W0603 + +from __future__ import division, print_function, absolute_import + +# Using .. as libfuse makes PyDev really unhappy. +from . import ctypes_api +libfuse = ctypes_api + +from ctypes import c_char_p, sizeof, create_string_buffer, addressof, string_at, POINTER, c_char, cast +from functools import partial +import errno +import logging +import sys + + +__all__ = [ 'FUSEError', 'ENOATTR', 'ENOTSUP', 'init', 'main', 'close', + 'fuse_version' ] + + +# These should really be defined in the errno module, but +# unfortunately they are missing +ENOATTR = libfuse.ENOATTR +ENOTSUP = libfuse.ENOTSUP + +log = logging.getLogger("fuse") + +# Init globals +operations = None +fuse_ops = None +mountpoint = None +session = None +channel = None + +class DiscardedRequest(Exception): + '''Request was interrupted and reply discarded. + + ''' + + pass + +class ReplyError(Exception): + '''Unable to send reply to fuse kernel module. + + ''' + + pass + +class FUSEError(Exception): + '''Wrapped errno value to be returned to the fuse kernel module + + This exception can store only an errno. Request handlers should raise + to return a specific errno to the fuse kernel module. + ''' + + __slots__ = [ 'errno' ] + + def __init__(self, errno_): + super(FUSEError, self).__init__() + self.errno = errno_ + + def __str__(self): + # errno may not have strings for all error codes + return errno.errorcode.get(self.errno, str(self.errno)) + + + +def check_reply_result(result, func, *args): + '''Check result of a call to a fuse_reply_* foreign function + + If `result` is 0, it is assumed that the call succeeded and the function does nothing. + + If result is `-errno.ENOENT`, this means that the request has been discarded and `DiscardedRequest` + is raised. + + In all other cases, `ReplyError` is raised. + + (We do not try to call `fuse_reply_err` or any other reply method as well, because the first reply + function may have already invalidated the `req` object and it seems better to (possibly) let the + request pend than to crash the server application.) + ''' + + if result == 0: + return None + + elif result == -errno.ENOENT: + raise DiscardedRequest() + + elif result > 0: + raise ReplyError('Foreign function %s returned unexpected value %d' + % (func.name, result)) + elif result < 0: + raise ReplyError('Foreign function %s returned error %s' + % (func.name, errno.errorcode.get(-result, str(-result)))) + + +# +# Set return checker for common ctypes calls +# +reply_functions = [ 'fuse_reply_err', 'fuse_reply_entry', + 'fuse_reply_create', 'fuse_reply_readlink', 'fuse_reply_open', + 'fuse_reply_write', 'fuse_reply_attr', 'fuse_reply_buf', + 'fuse_reply_iov', 'fuse_reply_statfs', 'fuse_reply_xattr', + 'fuse_reply_lock' ] +for fname in reply_functions: + getattr(libfuse, fname).errcheck = check_reply_result + + # Name isn't stored by ctypes + getattr(libfuse, fname).name = fname + + +def dict_to_entry(attr): + '''Convert dict to fuse_entry_param''' + + entry = libfuse.fuse_entry_param() + + entry.ino = attr['st_ino'] + entry.generation = attr.pop('generation') + entry.entry_timeout = attr.pop('entry_timeout') + entry.attr_timeout = attr.pop('attr_timeout') + + entry.attr = dict_to_stat(attr) + + return entry + +def dict_to_stat(attr): + '''Convert dict to struct stat''' + + stat = libfuse.stat() + + # Determine correct way to store times + if hasattr(stat, 'st_atim'): # Linux + get_timespec_key = lambda key: key[:-1] + elif hasattr(stat, 'st_atimespec'): # FreeBSD + get_timespec_key = lambda key: key + 'spec' + else: + get_timespec_key = False + + # Raises exception if there are any unknown keys + for (key, val) in attr.iteritems(): + if val is None: # do not set undefined items + continue + if get_timespec_key and key in ('st_atime', 'st_mtime', 'st_ctime'): + key = get_timespec_key(key) + spec = libfuse.timespec() + spec.tv_sec = int(val) + spec.tv_nsec = int((val - int(val)) * 10 ** 9) + val = spec + setattr(stat, key, val) + + return stat + + +def stat_to_dict(stat): + '''Convert ``struct stat`` to dict''' + + attr = dict() + for (name, dummy) in libfuse.stat._fields_: + if name.startswith('__'): + continue + + if name in ('st_atim', 'st_mtim', 'st_ctim'): + key = name + 'e' + attr[key] = getattr(stat, name).tv_sec + getattr(stat, name).tv_nsec / 10 ** 9 + elif name in ('st_atimespec', 'st_mtimespec', 'st_ctimespec'): + key = name[:-4] + attr[key] = getattr(stat, name).tv_sec + getattr(stat, name).tv_nsec / 10 ** 9 + else: + attr[name] = getattr(stat, name) + + return attr + + +def op_wrapper(func, req, *args): + '''Catch all exceptions and call fuse_reply_err instead''' + + try: + func(req, *args) + except FUSEError as e: + log.debug('op_wrapper caught FUSEError, calling fuse_reply_err(%s)', + errno.errorcode.get(e.errno, str(e.errno))) + try: + libfuse.fuse_reply_err(req, e.errno) + except DiscardedRequest: + pass + except Exception as exc: + log.exception('FUSE handler raised exception.') + + # Report error to filesystem + if hasattr(operations, 'handle_exc'): + try: + operations.handle_exc(exc) + except: + pass + + # Send error reply, unless the error occured when replying + if not isinstance(exc, ReplyError): + log.debug('Calling fuse_reply_err(EIO)') + libfuse.fuse_reply_err(req, errno.EIO) + +def fuse_version(): + '''Return version of loaded fuse library''' + + return libfuse.fuse_version() + + +def init(operations_, mountpoint_, args): + '''Initialize and mount FUSE file system + + `operations_` has to be an instance of the `Operations` class (or another + class defining the same methods). + + `args` has to be a list of strings. Valid options are listed in struct fuse_opt fuse_mount_opts[] + (mount.c:68) and struct fuse_opt fuse_ll_opts[] (fuse_lowlevel_c:1526). + ''' + + log.debug('Initializing llfuse') + + global operations + global fuse_ops + global mountpoint + global session + global channel + + # Give operations instance a chance to check and change + # the FUSE options + operations_.check_args(args) + + mountpoint = mountpoint_ + operations = operations_ + fuse_ops = libfuse.fuse_lowlevel_ops() + fuse_args = make_fuse_args(args) + + # Init fuse_ops + module = globals() + for (name, prototype) in libfuse.fuse_lowlevel_ops._fields_: + if hasattr(operations, name): + method = partial(op_wrapper, module['fuse_' + name]) + setattr(fuse_ops, name, prototype(method)) + + log.debug('Calling fuse_mount') + channel = libfuse.fuse_mount(mountpoint, fuse_args) + if not channel: + raise RuntimeError('fuse_mount failed') + try: + log.debug('Calling fuse_lowlevel_new') + session = libfuse.fuse_lowlevel_new(fuse_args, fuse_ops, sizeof(fuse_ops), None) + if not session: + raise RuntimeError("fuse_lowlevel_new() failed") + try: + log.debug('Calling fuse_set_signal_handlers') + if libfuse.fuse_set_signal_handlers(session) == -1: + raise RuntimeError("fuse_set_signal_handlers() failed") + try: + log.debug('Calling fuse_session_add_chan') + libfuse.fuse_session_add_chan(session, channel) + session = session + channel = channel + return + + except: + log.debug('Calling fuse_remove_signal_handlers') + libfuse.fuse_remove_signal_handlers(session) + raise + + except: + log.debug('Calling fuse_session_destroy') + libfuse.fuse_session_destroy(session) + raise + except: + log.debug('Calling fuse_unmount') + libfuse.fuse_unmount(mountpoint, channel) + raise + +def make_fuse_args(args): + '''Create fuse_args Structure for given mount options''' + + args1 = [ sys.argv[0] ] + for opt in args: + args1.append(b'-o') + args1.append(opt) + + # Init fuse_args struct + fuse_args = libfuse.fuse_args() + fuse_args.allocated = 0 + fuse_args.argc = len(args1) + fuse_args.argv = (POINTER(c_char) * len(args1))(*[cast(c_char_p(x), POINTER(c_char)) + for x in args1]) + return fuse_args + +def main(single=False): + '''Run FUSE main loop''' + + if not session: + raise RuntimeError('Need to call init() before main()') + + if single: + log.debug('Calling fuse_session_loop') + if libfuse.fuse_session_loop(session) != 0: + raise RuntimeError("fuse_session_loop() failed") + else: + log.debug('Calling fuse_session_loop_mt') + if libfuse.fuse_session_loop_mt(session) != 0: + raise RuntimeError("fuse_session_loop_mt() failed") + +def close(): + '''Unmount file system and clean up''' + + global operations + global fuse_ops + global mountpoint + global session + global channel + + log.debug('Calling fuse_session_remove_chan') + libfuse.fuse_session_remove_chan(channel) + log.debug('Calling fuse_remove_signal_handlers') + libfuse.fuse_remove_signal_handlers(session) + log.debug('Calling fuse_session_destroy') + libfuse.fuse_session_destroy(session) + log.debug('Calling fuse_unmount') + libfuse.fuse_unmount(mountpoint, channel) + + operations = None + fuse_ops = None + mountpoint = None + session = None + channel = None + + +def fuse_lookup(req, parent_inode, name): + '''Look up a directory entry by name and get its attributes''' + + log.debug('Handling lookup(%d, %s)', parent_inode, string_at(name)) + + attr = operations.lookup(parent_inode, string_at(name)) + entry = dict_to_entry(attr) + + log.debug('Calling fuse_reply_entry') + try: + libfuse.fuse_reply_entry(req, entry) + except DiscardedRequest: + pass + +def fuse_init(userdata_p, conn_info_p): + '''Initialize Operations''' + operations.init() + +def fuse_destroy(userdata_p): + '''Cleanup Operations''' + operations.destroy() + +def fuse_getattr(req, ino, _unused): + '''Get attributes for `ino`''' + + log.debug('Handling getattr(%d)', ino) + + attr = operations.getattr(ino) + + attr_timeout = attr.pop('attr_timeout') + stat = dict_to_stat(attr) + + log.debug('Calling fuse_reply_attr') + try: + libfuse.fuse_reply_attr(req, stat, attr_timeout) + except DiscardedRequest: + pass + +def fuse_access(req, ino, mask): + '''Check if calling user has `mask` rights for `ino`''' + + log.debug('Handling access(%d, %o)', ino, mask) + + # Get UID + ctx = libfuse.fuse_req_ctx(req).contents + + # Define a function that returns a list of the GIDs + def get_gids(): + # Get GID list if FUSE supports it + # Weird syntax to prevent PyDev from complaining + getgroups = getattr(libfuse, "fuse_req_getgroups") + gid_t = getattr(libfuse, 'gid_t') + no = 10 + buf = (gid_t * no)(range(no)) + ret = getgroups(req, no, buf) + if ret > no: + no = ret + buf = (gid_t * no)(range(no)) + ret = getgroups(req, no, buf) + + return [ buf[i].value for i in range(ret) ] + + ret = operations.access(ino, mask, ctx, get_gids) + + log.debug('Calling fuse_reply_err') + try: + if ret: + libfuse.fuse_reply_err(req, 0) + else: + libfuse.fuse_reply_err(req, errno.EPERM) + except DiscardedRequest: + pass + + +def fuse_create(req, ino_parent, name, mode, fi): + '''Create and open a file''' + + log.debug('Handling create(%d, %s, %o)', ino_parent, string_at(name), mode) + (fh, attr) = operations.create(ino_parent, string_at(name), mode, + libfuse.fuse_req_ctx(req).contents) + fi.contents.fh = fh + fi.contents.keep_cache = 1 + entry = dict_to_entry(attr) + + log.debug('Calling fuse_reply_create') + try: + libfuse.fuse_reply_create(req, entry, fi) + except DiscardedRequest: + operations.release(fh) + + +def fuse_flush(req, ino, fi): + '''Handle close() system call + + May be called multiple times for the same open file. + ''' + + log.debug('Handling flush(%d)', fi.contents.fh) + operations.flush(fi.contents.fh) + log.debug('Calling fuse_reply_err(0)') + try: + libfuse.fuse_reply_err(req, 0) + except DiscardedRequest: + pass + + +def fuse_fsync(req, ino, datasync, fi): + '''Flush buffers for `ino` + + If the datasync parameter is non-zero, then only the user data + is flushed (and not the meta data). + ''' + + log.debug('Handling fsync(%d, %s)', fi.contents.fh, datasync != 0) + operations.fsync(fi.contents.fh, datasync != 0) + log.debug('Calling fuse_reply_err(0)') + try: + libfuse.fuse_reply_err(req, 0) + except DiscardedRequest: + pass + + +def fuse_fsyncdir(req, ino, datasync, fi): + '''Synchronize directory contents + + If the datasync parameter is non-zero, then only the directory contents + are flushed (and not the meta data about the directory itself). + ''' + + log.debug('Handling fsyncdir(%d, %s)', fi.contents.fh, datasync != 0) + operations.fsyncdir(fi.contents.fh, datasync != 0) + log.debug('Calling fuse_reply_err(0)') + try: + libfuse.fuse_reply_err(req, 0) + except DiscardedRequest: + pass + + +def fuse_getxattr(req, ino, name, size): + '''Get an extended attribute. + ''' + + log.debug('Handling getxattr(%d, %r, %d)', ino, string_at(name), size) + val = operations.getxattr(ino, string_at(name)) + if not isinstance(val, bytes): + raise TypeError("getxattr return value must be of type bytes") + + try: + if size == 0: + log.debug('Calling fuse_reply_xattr') + libfuse.fuse_reply_xattr(req, len(val)) + elif size >= len(val): + log.debug('Calling fuse_reply_buf') + libfuse.fuse_reply_buf(req, val, len(val)) + else: + raise FUSEError(errno.ERANGE) + except DiscardedRequest: + pass + + +def fuse_link(req, ino, new_parent_ino, new_name): + '''Create a hard link''' + + log.debug('Handling fuse_link(%d, %d, %s)', ino, new_parent_ino, string_at(new_name)) + attr = operations.link(ino, new_parent_ino, string_at(new_name)) + entry = dict_to_entry(attr) + + log.debug('Calling fuse_reply_entry') + try: + libfuse.fuse_reply_entry(req, entry) + except DiscardedRequest: + pass + +def fuse_listxattr(req, inode, size): + '''List extended attributes for `inode`''' + + log.debug('Handling listxattr(%d)', inode) + names = operations.listxattr(inode) + + if not all([ isinstance(name, bytes) for name in names]): + raise TypeError("listxattr return value must be list of bytes") + + # Size of the \0 separated buffer + act_size = (len(names) - 1) + sum([ len(name) for name in names ]) + + if size == 0: + try: + log.debug('Calling fuse_reply_xattr') + libfuse.fuse_reply_xattr(req, len(names)) + except DiscardedRequest: + pass + + elif act_size > size: + raise FUSEError(errno.ERANGE) + + else: + try: + log.debug('Calling fuse_reply_buf') + libfuse.fuse_reply_buf(req, b'\0'.join(names), act_size) + except DiscardedRequest: + pass + + +def fuse_mkdir(req, inode_parent, name, mode): + '''Create directory''' + + log.debug('Handling mkdir(%d, %s, %o)', inode_parent, string_at(name), mode) + attr = operations.mkdir(inode_parent, string_at(name), mode, + libfuse.fuse_req_ctx(req).contents) + entry = dict_to_entry(attr) + + log.debug('Calling fuse_reply_entry') + try: + libfuse.fuse_reply_entry(req, entry) + except DiscardedRequest: + pass + +def fuse_mknod(req, inode_parent, name, mode, rdev): + '''Create (possibly special) file''' + + log.debug('Handling mknod(%d, %s, %o, %d)', inode_parent, string_at(name), + mode, rdev) + attr = operations.mknod(inode_parent, string_at(name), mode, rdev, + libfuse.fuse_req_ctx(req).contents) + entry = dict_to_entry(attr) + + log.debug('Calling fuse_reply_entry') + try: + libfuse.fuse_reply_entry(req, entry) + except DiscardedRequest: + pass + +def fuse_open(req, inode, fi): + '''Open a file''' + log.debug('Handling open(%d, %d)', inode, fi.contents.flags) + fi.contents.fh = operations.open(inode, fi.contents.flags) + fi.contents.keep_cache = 1 + + log.debug('Calling fuse_reply_open') + try: + libfuse.fuse_reply_open(req, fi) + except DiscardedRequest: + operations.release(inode, fi.contents.fh) + +def fuse_opendir(req, inode, fi): + '''Open a directory''' + + log.debug('Handling opendir(%d)', inode) + fi.contents.fh = operations.opendir(inode) + + log.debug('Calling fuse_reply_open') + try: + libfuse.fuse_reply_open(req, fi) + except DiscardedRequest: + operations.releasedir(fi.contents.fh) + + +def fuse_read(req, ino, size, off, fi): + '''Read data from file''' + + log.debug('Handling read(ino=%d, off=%d, size=%d)', fi.contents.fh, off, size) + data = operations.read(fi.contents.fh, off, size) + + if not isinstance(data, bytes): + raise TypeError("read() must return bytes") + + if len(data) > size: + raise ValueError('read() must not return more than `size` bytes') + + log.debug('Calling fuse_reply_buf') + try: + libfuse.fuse_reply_buf(req, data, len(data)) + except DiscardedRequest: + pass + + +def fuse_readlink(req, inode): + '''Read target of symbolic link''' + + log.debug('Handling readlink(%d)', inode) + target = operations.readlink(inode) + log.debug('Calling fuse_reply_readlink') + try: + libfuse.fuse_reply_readlink(req, target) + except DiscardedRequest: + pass + + +def fuse_readdir(req, ino, bufsize, off, fi): + '''Read directory entries''' + + log.debug('Handling readdir(%d, %d, %d, %d)', ino, bufsize, off, fi.contents.fh) + + # Collect as much entries as we can return + entries = list() + size = 0 + for (name, attr) in operations.readdir(fi.contents.fh, off): + if not isinstance(name, bytes): + raise TypeError("readdir() must return entry names as bytes") + + stat = dict_to_stat(attr) + + entry_size = libfuse.fuse_add_direntry(req, None, 0, name, stat, 0) + if size + entry_size > bufsize: + break + + entries.append((name, stat)) + size += entry_size + + log.debug('Gathered %d entries, total size %d', len(entries), size) + + # If there are no entries left, return empty buffer + if not entries: + try: + log.debug('Calling fuse_reply_buf') + libfuse.fuse_reply_buf(req, None, 0) + except DiscardedRequest: + pass + return + + # Create and fill buffer + log.debug('Adding entries to buffer') + buf = create_string_buffer(size) + next_ = off + addr_off = 0 + for (name, stat) in entries: + next_ += 1 + addr_off += libfuse.fuse_add_direntry(req, cast(addressof(buf) + addr_off, POINTER(c_char)), + bufsize, name, stat, next_) + + # Return buffer + log.debug('Calling fuse_reply_buf') + try: + libfuse.fuse_reply_buf(req, buf, size) + except DiscardedRequest: + pass + + +def fuse_release(req, inode, fi): + '''Release open file''' + + log.debug('Handling release(%d)', fi.contents.fh) + operations.release(fi.contents.fh) + log.debug('Calling fuse_reply_err(0)') + try: + libfuse.fuse_reply_err(req, 0) + except DiscardedRequest: + pass + +def fuse_releasedir(req, inode, fi): + '''Release open directory''' + + log.debug('Handling releasedir(%d)', fi.contents.fh) + operations.releasedir(fi.contents.fh) + log.debug('Calling fuse_reply_err(0)') + try: + libfuse.fuse_reply_err(req, 0) + except DiscardedRequest: + pass + +def fuse_removexattr(req, inode, name): + '''Remove extended attribute''' + + log.debug('Handling removexattr(%d, %s)', inode, string_at(name)) + operations.removexattr(inode, string_at(name)) + log.debug('Calling fuse_reply_err(0)') + try: + libfuse.fuse_reply_err(req, 0) + except DiscardedRequest: + pass + +def fuse_rename(req, parent_inode_old, name_old, parent_inode_new, name_new): + '''Rename a directory entry''' + + log.debug('Handling rename(%d, %r, %d, %r)', parent_inode_old, string_at(name_old), + parent_inode_new, string_at(name_new)) + operations.rename(parent_inode_old, string_at(name_old), parent_inode_new, + string_at(name_new)) + log.debug('Calling fuse_reply_err(0)') + try: + libfuse.fuse_reply_err(req, 0) + except DiscardedRequest: + pass + +def fuse_rmdir(req, inode_parent, name): + '''Remove a directory''' + + log.debug('Handling rmdir(%d, %r)', inode_parent, string_at(name)) + operations.rmdir(inode_parent, string_at(name)) + log.debug('Calling fuse_reply_err(0)') + try: + libfuse.fuse_reply_err(req, 0) + except DiscardedRequest: + pass + +def fuse_setattr(req, inode, stat, to_set, fi): + '''Change directory entry attributes''' + + log.debug('Handling fuse_setattr(%d)', inode) + + # Note: We can't check if we know all possible flags, + # because the part of to_set that is not "covered" + # by flags seems to be undefined rather than zero. + + attr_all = stat_to_dict(stat.contents) + attr = dict() + + if (to_set & libfuse.FUSE_SET_ATTR_MTIME) != 0: + attr['st_mtime'] = attr_all['st_mtime'] + + if (to_set & libfuse.FUSE_SET_ATTR_ATIME) != 0: + attr['st_atime'] = attr_all['st_atime'] + + if (to_set & libfuse.FUSE_SET_ATTR_MODE) != 0: + attr['st_mode'] = attr_all['st_mode'] + + if (to_set & libfuse.FUSE_SET_ATTR_UID) != 0: + attr['st_uid'] = attr_all['st_uid'] + + if (to_set & libfuse.FUSE_SET_ATTR_GID) != 0: + attr['st_gid'] = attr_all['st_gid'] + + if (to_set & libfuse.FUSE_SET_ATTR_SIZE) != 0: + attr['st_size'] = attr_all['st_size'] + + attr = operations.setattr(inode, attr) + + attr_timeout = attr.pop('attr_timeout') + stat = dict_to_stat(attr) + + log.debug('Calling fuse_reply_attr') + try: + libfuse.fuse_reply_attr(req, stat, attr_timeout) + except DiscardedRequest: + pass + +def fuse_setxattr(req, inode, name, val, size, flags): + '''Set an extended attribute''' + + log.debug('Handling setxattr(%d, %r, %r, %d)', inode, string_at(name), + string_at(val, size), flags) + + # Make sure we know all the flags + if (flags & ~(libfuse.XATTR_CREATE | libfuse.XATTR_REPLACE)) != 0: + raise ValueError('unknown flag') + + if (flags & libfuse.XATTR_CREATE) != 0: + try: + operations.getxattr(inode, string_at(name)) + except FUSEError as e: + if e.errno == ENOATTR: + pass + raise + else: + raise FUSEError(errno.EEXIST) + elif (flags & libfuse.XATTR_REPLACE) != 0: + # Exception can be passed on if the attribute does not exist + operations.getxattr(inode, string_at(name)) + + operations.setxattr(inode, string_at(name), string_at(val, size)) + + log.debug('Calling fuse_reply_err(0)') + try: + libfuse.fuse_reply_err(req, 0) + except DiscardedRequest: + pass + +def fuse_statfs(req, inode): + '''Return filesystem statistics''' + + log.debug('Handling statfs(%d)', inode) + attr = operations.statfs() + statfs = libfuse.statvfs() + + for (key, val) in attr.iteritems(): + setattr(statfs, key, val) + + log.debug('Calling fuse_reply_statfs') + try: + libfuse.fuse_reply_statfs(req, statfs) + except DiscardedRequest: + pass + +def fuse_symlink(req, target, parent_inode, name): + '''Create a symbolic link''' + + log.debug('Handling symlink(%d, %r, %r)', parent_inode, string_at(name), string_at(target)) + attr = operations.symlink(parent_inode, string_at(name), string_at(target), + libfuse.fuse_req_ctx(req).contents) + entry = dict_to_entry(attr) + + log.debug('Calling fuse_reply_entry') + try: + libfuse.fuse_reply_entry(req, entry) + except DiscardedRequest: + pass + + +def fuse_unlink(req, parent_inode, name): + '''Delete a file''' + + log.debug('Handling unlink(%d, %r)', parent_inode, string_at(name)) + operations.unlink(parent_inode, string_at(name)) + log.debug('Calling fuse_reply_err(0)') + try: + libfuse.fuse_reply_err(req, 0) + except DiscardedRequest: + pass + +def fuse_write(req, inode, buf, size, off, fi): + '''Write into an open file handle''' + + log.debug('Handling write(fh=%d, off=%d, size=%d)', fi.contents.fh, off, size) + written = operations.write(fi.contents.fh, off, string_at(buf, size)) + + log.debug('Calling fuse_reply_write') + try: + libfuse.fuse_reply_write(req, written) + except DiscardedRequest: + pass diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/llfuse/operations.py b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/llfuse/operations.py new file mode 100644 index 00000000..4510016b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/llfuse/operations.py @@ -0,0 +1,348 @@ +''' +$Id: operations.py 47 2010-01-29 17:11:23Z nikratio $ + +Copyright (c) 2010, Nikolaus Rath +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + * Neither the name of the main author nor the names of other contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +''' + +from __future__ import division, print_function, absolute_import + +from .interface import FUSEError +import errno + +class Operations(object): + ''' + This is a dummy class that just documents the possible methods that + a file system may declare. + ''' + + # This is a dummy class, so all the methods could of course + # be functions + #pylint: disable-msg=R0201 + + def handle_exc(self, exc): + '''Handle exceptions that occured during request processing. + + This method returns nothing and does not raise any exceptions itself. + ''' + + pass + + def init(self): + '''Initialize operations + + This function has to be called before any request has been received, + but after the mountpoint has been set up and the process has + daemonized. + ''' + + pass + + def destroy(self): + '''Clean up operations. + + This method has to be called after the last request has been + received, when the file system is about to be unmounted. + ''' + + pass + + def check_args(self, fuse_args): + '''Review FUSE arguments + + This method checks if the FUSE options `fuse_args` are compatible + with the way that the file system operations are implemented. + It raises an exception if incompatible options are encountered and + silently adds required options if they are missing. + ''' + + pass + + def readdir(self, fh, off): + '''Read directory entries + + This method returns an iterator over the contents of directory `fh`, + starting at entry `off`. The iterator yields tuples of the form + ``(name, attr)``, where ``attr` is a dict with keys corresponding to + the elements of ``struct stat``. + + Iteration may be stopped as soon as enough elements have been + retrieved and does not have to be continued until `StopIteration` + is raised. + ''' + + raise FUSEError(errno.ENOSYS) + + + def read(self, fh, off, size): + '''Read `size` bytes from `fh` at position `off` + + Unless the file has been opened in direct_io mode or EOF is reached, + this function returns exactly `size` bytes. + ''' + + raise FUSEError(errno.ENOSYS) + + def link(self, inode, new_parent_inode, new_name): + '''Create a hard link. + + Returns a dict with the attributes of the newly created directory + entry. The keys are the same as for `lookup`. + ''' + + raise FUSEError(errno.ENOSYS) + + def open(self, inode, flags): + '''Open a file. + + Returns an (integer) file handle. `flags` is a bitwise or of the open flags + described in open(2) and defined in the `os` module (with the exception of + ``O_CREAT``, ``O_EXCL``, ``O_NOCTTY`` and ``O_TRUNC``) + ''' + + raise FUSEError(errno.ENOSYS) + + def opendir(self, inode): + '''Open a directory. + + Returns an (integer) file handle. + ''' + + raise FUSEError(errno.ENOSYS) + + + def mkdir(self, parent_inode, name, mode, ctx): + '''Create a directory + + `ctx` must be a context object that contains pid, uid and + primary gid of the requesting process. + + Returns a dict with the attributes of the newly created directory + entry. The keys are the same as for `lookup`. + ''' + + raise FUSEError(errno.ENOSYS) + + def mknod(self, parent_inode, name, mode, rdev, ctx): + '''Create (possibly special) file + + `ctx` must be a context object that contains pid, uid and + primary gid of the requesting process. + + Returns a dict with the attributes of the newly created directory + entry. The keys are the same as for `lookup`. + ''' + + raise FUSEError(errno.ENOSYS) + + + def lookup(self, parent_inode, name): + '''Look up a directory entry by name and get its attributes. + + Returns a dict with keys corresponding to the elements in + ``struct stat`` and the following additional keys: + + :generation: The inode generation number + :attr_timeout: Validity timeout (in seconds) for the attributes + :entry_timeout: Validity timeout (in seconds) for the name + + Note also that the ``st_Xtime`` entries support floating point numbers + to allow for nano second resolution. + + The returned dict can be modified at will by the caller without + influencing the internal state of the file system. + + If the entry does not exist, raises `FUSEError(errno.ENOENT)`. + ''' + + raise FUSEError(errno.ENOSYS) + + def listxattr(self, inode): + '''Get list of extended attribute names''' + + raise FUSEError(errno.ENOSYS) + + def getattr(self, inode): + '''Get attributes for `inode` + + Returns a dict with keys corresponding to the elements in + ``struct stat`` and the following additional keys: + + :attr_timeout: Validity timeout (in seconds) for the attributes + + The returned dict can be modified at will by the caller without + influencing the internal state of the file system. + + Note that the ``st_Xtime`` entries support floating point numbers + to allow for nano second resolution. + ''' + + raise FUSEError(errno.ENOSYS) + + def getxattr(self, inode, name): + '''Return extended attribute value + + If the attribute does not exist, raises `FUSEError(ENOATTR)` + ''' + + raise FUSEError(errno.ENOSYS) + + def access(self, inode, mode, ctx, get_sup_gids): + '''Check if requesting process has `mode` rights on `inode`. + + Returns a boolean value. `get_sup_gids` must be a function that + returns a list of the supplementary group ids of the requester. + + `ctx` must be a context object that contains pid, uid and + primary gid of the requesting process. + ''' + + raise FUSEError(errno.ENOSYS) + + def create(self, inode_parent, name, mode, ctx): + '''Create a file and open it + + `ctx` must be a context object that contains pid, uid and + primary gid of the requesting process. + + Returns a tuple of the form ``(fh, attr)``. `fh` is + integer file handle that is used to identify the open file and + `attr` is a dict similar to the one returned by `lookup`. + ''' + + raise FUSEError(errno.ENOSYS) + + def flush(self, fh): + '''Handle close() syscall. + + May be called multiple times for the same open file (e.g. if the file handle + has been duplicated). + + If the filesystem supports file locking operations, all locks belonging + to the file handle's owner are cleared. + ''' + + raise FUSEError(errno.ENOSYS) + + def fsync(self, fh, datasync): + '''Flush buffers for file `fh` + + If `datasync` is true, only the user data is flushed (and no meta data). + ''' + + raise FUSEError(errno.ENOSYS) + + + def fsyncdir(self, fh, datasync): + '''Flush buffers for directory `fh` + + If the `datasync` is true, then only the directory contents + are flushed (and not the meta data about the directory itself). + ''' + + raise FUSEError(errno.ENOSYS) + + def readlink(self, inode): + '''Return target of symbolic link''' + + raise FUSEError(errno.ENOSYS) + + def release(self, fh): + '''Release open file + + This method must be called exactly once for each `open` call. + ''' + + raise FUSEError(errno.ENOSYS) + + def releasedir(self, fh): + '''Release open directory + + This method must be called exactly once for each `opendir` call. + ''' + + raise FUSEError(errno.ENOSYS) + + def removexattr(self, inode, name): + '''Remove extended attribute + + If the attribute does not exist, raises FUSEError(ENOATTR) + ''' + + raise FUSEError(errno.ENOSYS) + + def rename(self, inode_parent_old, name_old, inode_parent_new, name_new): + '''Rename a directory entry''' + + raise FUSEError(errno.ENOSYS) + + def rmdir(self, inode_parent, name): + '''Remove a directory''' + + raise FUSEError(errno.ENOSYS) + + def setattr(self, inode, attr): + '''Change directory entry attributes + + `attr` must be a dict with keys corresponding to the attributes of + ``struct stat``. `attr` may also include a new value for ``st_size`` which + means that the file should be truncated or extended. + + Returns a dict with the new attributs of the directory entry, + similar to the one returned by `getattr()` + ''' + + raise FUSEError(errno.ENOSYS) + + def setxattr(self, inode, name, value): + '''Set an extended attribute. + + The attribute may or may not exist already. + ''' + + raise FUSEError(errno.ENOSYS) + + def statfs(self): + '''Get file system statistics + + Returns a `dict` with keys corresponding to the attributes of + ``struct statfs``. + ''' + + raise FUSEError(errno.ENOSYS) + + def symlink(self, inode_parent, name, target, ctx): + '''Create a symbolic link + + `ctx` must be a context object that contains pid, uid and + primary gid of the requesting process. + + Returns a dict with the attributes of the newly created directory + entry. The keys are the same as for `lookup`. + ''' + + raise FUSEError(errno.ENOSYS) + + def unlink(self, parent_inode, name): + '''Remove a (possibly special) file''' + + raise FUSEError(errno.ENOSYS) + + def write(self, fh, off, data): + '''Write data into an open file + + Returns the number of bytes written. + Unless the file was opened in ``direct_io`` mode, this is always equal to + `len(data)`. + ''' + + raise FUSEError(errno.ENOSYS) + diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/llfuse_example.py b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/llfuse_example.py new file mode 100755 index 00000000..8b4d7041 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/llfuse_example.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python +''' +$Id: llfuse_example.py 46 2010-01-29 17:10:10Z nikratio $ + +Copyright (c) 2010, Nikolaus Rath +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + * Neither the name of the main author nor the names of other contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +''' + +from __future__ import division, print_function, absolute_import + +import llfuse +import errno +import stat +import sys + +class Operations(llfuse.Operations): + '''A very simple example filesystem''' + + def __init__(self): + super(Operations, self).__init__() + self.entries = [ + # name, attr + (b'.', { 'st_ino': 1, + 'st_mode': stat.S_IFDIR | 0755, + 'st_nlink': 2}), + (b'..', { 'st_ino': 1, + 'st_mode': stat.S_IFDIR | 0755, + 'st_nlink': 2}), + (b'file1', { 'st_ino': 2, 'st_nlink': 1, + 'st_mode': stat.S_IFREG | 0644 }), + (b'file2', { 'st_ino': 3, 'st_nlink': 1, + 'st_mode': stat.S_IFREG | 0644 }) ] + + self.contents = { # Inode: Contents + 2: b'Hello, World\n', + 3: b'Some more file contents\n' + } + + self.by_inode = dict() + self.by_name = dict() + + for entry in self.entries: + (name, attr) = entry + if attr['st_ino'] in self.contents: + attr['st_size'] = len(self.contents[attr['st_ino']]) + + + self.by_inode[attr['st_ino']] = attr + self.by_name[name] = attr + + + + def lookup(self, parent_inode, name): + try: + attr = self.by_name[name].copy() + except KeyError: + raise llfuse.FUSEError(errno.ENOENT) + + attr['attr_timeout'] = 1 + attr['entry_timeout'] = 1 + attr['generation'] = 1 + + return attr + + + def getattr(self, inode): + attr = self.by_inode[inode].copy() + attr['attr_timeout'] = 1 + return attr + + def readdir(self, fh, off): + for entry in self.entries: + if off > 0: + off -= 1 + continue + + yield entry + + + def read(self, fh, off, size): + return self.contents[fh][off:off+size] + + def open(self, inode, flags): + if inode in self.contents: + return inode + else: + raise RuntimeError('Attempted to open() a directory') + + def opendir(self, inode): + return inode + + def access(self, inode, mode, ctx, get_sup_gids): + return True + + + +if __name__ == '__main__': + + if len(sys.argv) != 2: + raise SystemExit('Usage: %s ' % sys.argv[0]) + + mountpoint = sys.argv[1] + operations = Operations() + + llfuse.init(operations, mountpoint, [ b"nonempty", b'fsname=llfuses_xmp' ]) + llfuse.main() + \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/setup.py b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/setup.py new file mode 100755 index 00000000..88a5018f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/low-level/setup.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python +''' +$Id: setup.py 53 2010-02-22 01:48:45Z nikratio $ + +Copyright (c) 2010, Nikolaus Rath +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + * Neither the name of the main author nor the names of other contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +''' + +from __future__ import division, print_function + +from distutils.core import setup, Command +import distutils.command.build +import sys +import os +import tempfile +import subprocess +import re +import logging +import ctypes.util + +# These are the definitions that we need +fuse_export_regex = ['^FUSE_SET_.*', '^XATTR_.*', 'fuse_reply_.*' ] +fuse_export_symbols = ['fuse_mount', 'fuse_lowlevel_new', 'fuse_add_direntry', + 'fuse_set_signal_handlers', 'fuse_session_add_chan', + 'fuse_session_loop_mt', 'fuse_session_remove_chan', + 'fuse_remove_signal_handlers', 'fuse_session_destroy', + 'fuse_unmount', 'fuse_req_ctx', 'fuse_lowlevel_ops', + 'fuse_session_loop', 'ENOATTR', 'ENOTSUP', + 'fuse_version' ] + +class build_ctypes(Command): + + description = "Build ctypes interfaces" + user_options = [] + boolean_options = [] + + def initialize_options(self): + pass + + def finalize_options(self): + pass + + def run(self): + '''Create ctypes API to local FUSE headers''' + + # Import ctypeslib + basedir = os.path.abspath(os.path.dirname(sys.argv[0])) + sys.path.insert(0, os.path.join(basedir, 'ctypeslib.zip')) + from ctypeslib import h2xml, xml2py + from ctypeslib.codegen import codegenerator as ctypeslib + + print('Creating ctypes API from local fuse headers...') + + cflags = self.get_cflags() + print('Using cflags: %s' % ' '.join(cflags)) + + fuse_path = 'fuse' + if not ctypes.util.find_library(fuse_path): + print('Could not find fuse library', file=sys.stderr) + sys.exit(1) + + + # Create temporary XML file + tmp_fh = tempfile.NamedTemporaryFile() + tmp_name = tmp_fh.name + + print('Calling h2xml...') + argv = [ 'h2xml.py', '-o', tmp_name, '-c', '-q', '-I', basedir, 'fuse_ctypes.h' ] + argv += cflags + ctypeslib.ASSUME_STRINGS = False + ctypeslib.CDLL_SET_ERRNO = False + ctypeslib.PREFIX = ('# Code autogenerated by ctypeslib. Any changes will be lost!\n\n' + '#pylint: disable-all\n' + '#@PydevCodeAnalysisIgnore\n\n') + h2xml.main(argv) + + print('Calling xml2py...') + api_file = os.path.join(basedir, 'llfuse', 'ctypes_api.py') + argv = [ 'xml2py.py', tmp_name, '-o', api_file, '-l', fuse_path ] + for el in fuse_export_regex: + argv.append('-r') + argv.append(el) + for el in fuse_export_symbols: + argv.append('-s') + argv.append(el) + xml2py.main(argv) + + # Delete temporary XML file + tmp_fh.close() + + print('Code generation complete.') + + def get_cflags(self): + '''Get cflags required to compile with fuse library''' + + proc = subprocess.Popen(['pkg-config', 'fuse', '--cflags'], stdout=subprocess.PIPE) + cflags = proc.stdout.readline().rstrip() + proc.stdout.close() + if proc.wait() != 0: + sys.stderr.write('Failed to execute pkg-config. Exit code: %d.\n' + % proc.returncode) + sys.stderr.write('Check that the FUSE development package been installed properly.\n') + sys.exit(1) + return cflags.split() + + +# Add as subcommand of build +distutils.command.build.build.sub_commands.insert(0, ('build_ctypes', None)) + + +setup(name='llfuse_example', + version='1.0', + author='Nikolaus Rath', + author_email='Nikolaus@rath.org', + url='http://code.google.com/p/fusepy/', + packages=[ 'llfuse' ], + provides=['llfuse'], + cmdclass={ 'build_ctypes': build_ctypes} + ) diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/memory.py b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/memory.py new file mode 100755 index 00000000..246b305d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/memory.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python + +from collections import defaultdict +from errno import ENOENT +from stat import S_IFDIR, S_IFLNK, S_IFREG +from sys import argv, exit +from time import time + +from fuse import FUSE, FuseOSError, Operations, LoggingMixIn + + +class Memory(LoggingMixIn, Operations): + """Example memory filesystem. Supports only one level of files.""" + + def __init__(self): + self.files = {} + self.data = defaultdict(str) + self.fd = 0 + now = time() + self.files['/'] = dict(st_mode=(S_IFDIR | 0755), st_ctime=now, + st_mtime=now, st_atime=now, st_nlink=2) + + def chmod(self, path, mode): + self.files[path]['st_mode'] &= 0770000 + self.files[path]['st_mode'] |= mode + return 0 + + def chown(self, path, uid, gid): + self.files[path]['st_uid'] = uid + self.files[path]['st_gid'] = gid + + def create(self, path, mode): + self.files[path] = dict(st_mode=(S_IFREG | mode), st_nlink=1, + st_size=0, st_ctime=time(), st_mtime=time(), st_atime=time()) + self.fd += 1 + return self.fd + + def getattr(self, path, fh=None): + if path not in self.files: + raise FuseOSError(ENOENT) + st = self.files[path] + return st + + def getxattr(self, path, name, position=0): + attrs = self.files[path].get('attrs', {}) + try: + return attrs[name] + except KeyError: + return '' # Should return ENOATTR + + def listxattr(self, path): + attrs = self.files[path].get('attrs', {}) + return attrs.keys() + + def mkdir(self, path, mode): + self.files[path] = dict(st_mode=(S_IFDIR | mode), st_nlink=2, + st_size=0, st_ctime=time(), st_mtime=time(), st_atime=time()) + self.files['/']['st_nlink'] += 1 + + def open(self, path, flags): + self.fd += 1 + return self.fd + + def read(self, path, size, offset, fh): + return self.data[path][offset:offset + size] + + def readdir(self, path, fh): + return ['.', '..'] + [x[1:] for x in self.files if x != '/'] + + def readlink(self, path): + return self.data[path] + + def removexattr(self, path, name): + attrs = self.files[path].get('attrs', {}) + try: + del attrs[name] + except KeyError: + pass # Should return ENOATTR + + def rename(self, old, new): + self.files[new] = self.files.pop(old) + + def rmdir(self, path): + self.files.pop(path) + self.files['/']['st_nlink'] -= 1 + + def setxattr(self, path, name, value, options, position=0): + # Ignore options + attrs = self.files[path].setdefault('attrs', {}) + attrs[name] = value + + def statfs(self, path): + return dict(f_bsize=512, f_blocks=4096, f_bavail=2048) + + def symlink(self, target, source): + self.files[target] = dict(st_mode=(S_IFLNK | 0777), st_nlink=1, + st_size=len(source)) + self.data[target] = source + + def truncate(self, path, length, fh=None): + self.data[path] = self.data[path][:length] + self.files[path]['st_size'] = length + + def unlink(self, path): + self.files.pop(path) + + def utimens(self, path, times=None): + now = time() + atime, mtime = times if times else (now, now) + self.files[path]['st_atime'] = atime + self.files[path]['st_mtime'] = mtime + + def write(self, path, data, offset, fh): + self.data[path] = self.data[path][:offset] + data + self.files[path]['st_size'] = len(self.data[path]) + return len(data) + + +if __name__ == "__main__": + if len(argv) != 2: + print 'usage: %s ' % argv[0] + exit(1) + fuse = FUSE(Memory(), argv[1], foreground=True) \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/memory3.py b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/memory3.py new file mode 100755 index 00000000..e5cbad72 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/memory3.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python + +from fuse3 import FUSE, Operations, LoggingMixIn + +from collections import defaultdict +from errno import ENOENT +from stat import S_IFDIR, S_IFLNK, S_IFREG +from sys import argv, exit +from time import time + +import logging + + +class Memory(LoggingMixIn, Operations): + """Example memory filesystem. Supports only one level of files.""" + + def __init__(self): + self.files = {} + self.data = defaultdict(bytearray) + self.fd = 0 + now = time() + self.files['/'] = dict(st_mode=(S_IFDIR | 0o755), st_ctime=now, + st_mtime=now, st_atime=now, st_nlink=2) + + def chmod(self, path, mode): + self.files[path]['st_mode'] &= 0o770000 + self.files[path]['st_mode'] |= mode + return 0 + + def chown(self, path, uid, gid): + self.files[path]['st_uid'] = uid + self.files[path]['st_gid'] = gid + + def create(self, path, mode): + self.files[path] = dict(st_mode=(S_IFREG | mode), st_nlink=1, + st_size=0, st_ctime=time(), st_mtime=time(), st_atime=time()) + self.fd += 1 + return self.fd + + def getattr(self, path, fh=None): + if path not in self.files: + raise OSError(ENOENT, '') + st = self.files[path] + return st + + def getxattr(self, path, name, position=0): + attrs = self.files[path].get('attrs', {}) + try: + return attrs[name] + except KeyError: + return '' # Should return ENOATTR + + def listxattr(self, path): + attrs = self.files[path].get('attrs', {}) + return attrs.keys() + + def mkdir(self, path, mode): + self.files[path] = dict(st_mode=(S_IFDIR | mode), st_nlink=2, + st_size=0, st_ctime=time(), st_mtime=time(), st_atime=time()) + self.files['/']['st_nlink'] += 1 + + def open(self, path, flags): + self.fd += 1 + return self.fd + + def read(self, path, size, offset, fh): + return bytes(self.data[path][offset:offset + size]) + + def readdir(self, path, fh): + return ['.', '..'] + [x[1:] for x in self.files if x != '/'] + + def readlink(self, path): + return self.data[path].decode('utf-8') + + def removexattr(self, path, name): + attrs = self.files[path].get('attrs', {}) + try: + del attrs[name] + except KeyError: + pass # Should return ENOATTR + + def rename(self, old, new): + self.files[new] = self.files.pop(old) + + def rmdir(self, path): + self.files.pop(path) + self.files['/']['st_nlink'] -= 1 + + def setxattr(self, path, name, value, options, position=0): + # Ignore options + attrs = self.files[path].setdefault('attrs', {}) + attrs[name] = value + + def statfs(self, path): + return dict(f_bsize=512, f_blocks=4096, f_bavail=2048) + + def symlink(self, target, source): + source = source.encode('utf-8') + self.files[target] = dict(st_mode=(S_IFLNK | 0o777), st_nlink=1, + st_size=len(source)) + self.data[target] = bytearray(source) + + def truncate(self, path, length, fh=None): + del self.data[path][length:] + self.files[path]['st_size'] = length + + def unlink(self, path): + self.files.pop(path) + + def utimens(self, path, times=None): + now = time() + atime, mtime = times if times else (now, now) + self.files[path]['st_atime'] = atime + self.files[path]['st_mtime'] = mtime + + def write(self, path, data, offset, fh): + del self.data[path][offset:] + self.data[path].extend(data) + self.files[path]['st_size'] = len(self.data[path]) + return len(data) + + +if __name__ == "__main__": + if len(argv) != 2: + print('usage: %s ' % argv[0]) + exit(1) + logging.getLogger().setLevel(logging.DEBUG) + fuse = FUSE(Memory(), argv[1], foreground=True) \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/memoryll.py b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/memoryll.py new file mode 100755 index 00000000..307a1af0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/memoryll.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python + +from collections import defaultdict +from errno import ENOENT, EROFS +from stat import S_IFMT, S_IMODE, S_IFDIR, S_IFREG +from sys import argv, exit +from time import time + +from fusell import FUSELL + + +class Memory(FUSELL): + def create_ino(self): + self.ino += 1 + return self.ino + + def init(self, userdata, conn): + self.ino = 1 + self.attr = defaultdict(dict) + self.data = defaultdict(str) + self.parent = {} + self.children = defaultdict(dict) + + self.attr[1] = {'st_ino': 1, 'st_mode': S_IFDIR | 0777, 'st_nlink': 2} + self.parent[1] = 1 + + forget = None + + def getattr(self, req, ino, fi): + print 'getattr:', ino + attr = self.attr[ino] + if attr: + self.reply_attr(req, attr, 1.0) + else: + self.reply_err(req, ENOENT) + + def lookup(self, req, parent, name): + print 'lookup:', parent, name + children = self.children[parent] + ino = children.get(name, 0) + attr = self.attr[ino] + + if attr: + entry = {'ino': ino, 'attr': attr, 'atttr_timeout': 1.0, 'entry_timeout': 1.0} + self.reply_entry(req, entry) + else: + self.reply_err(req, ENOENT) + + def mkdir(self, req, parent, name, mode): + print 'mkdir:', parent, name + ino = self.create_ino() + ctx = self.req_ctx(req) + now = time() + attr = { + 'st_ino': ino, + 'st_mode': S_IFDIR | mode, + 'st_nlink': 2, + 'st_uid': ctx['uid'], + 'st_gid': ctx['gid'], + 'st_atime': now, + 'st_mtime': now, + 'st_ctime': now} + + self.attr[ino] = attr + self.attr[parent]['st_nlink'] += 1 + self.parent[ino] = parent + self.children[parent][name] = ino + + entry = {'ino': ino, 'attr': attr, 'atttr_timeout': 1.0, 'entry_timeout': 1.0} + self.reply_entry(req, entry) + + def mknod(self, req, parent, name, mode, rdev): + print 'mknod:', parent, name + ino = self.create_ino() + ctx = self.req_ctx(req) + now = time() + attr = { + 'st_ino': ino, + 'st_mode': mode, + 'st_nlink': 1, + 'st_uid': ctx['uid'], + 'st_gid': ctx['gid'], + 'st_rdev': rdev, + 'st_atime': now, + 'st_mtime': now, + 'st_ctime': now} + + self.attr[ino] = attr + self.attr[parent]['st_nlink'] += 1 + self.children[parent][name] = ino + + entry = {'ino': ino, 'attr': attr, 'atttr_timeout': 1.0, 'entry_timeout': 1.0} + self.reply_entry(req, entry) + + def open(self, req, ino, fi): + print 'open:', ino + self.reply_open(req, fi) + + def read(self, req, ino, size, off, fi): + print 'read:', ino, size, off + buf = self.data[ino][off:(off + size)] + self.reply_buf(req, buf) + + def readdir(self, req, ino, size, off, fi): + print 'readdir:', ino + parent = self.parent[ino] + entries = [('.', {'st_ino': ino, 'st_mode': S_IFDIR}), + ('..', {'st_ino': parent, 'st_mode': S_IFDIR})] + for name, child in self.children[ino].items(): + entries.append((name, self.attr[child])) + self.reply_readdir(req, size, off, entries) + + def rename(self, req, parent, name, newparent, newname): + print 'rename:', parent, name, newparent, newname + ino = self.children[parent].pop(name) + self.children[newparent][newname] = ino + self.parent[ino] = newparent + self.reply_err(req, 0) + + def setattr(self, req, ino, attr, to_set, fi): + print 'setattr:', ino, to_set + a = self.attr[ino] + for key in to_set: + if key == 'st_mode': + # Keep the old file type bit fields + a['st_mode'] = S_IFMT(a['st_mode']) | S_IMODE(attr['st_mode']) + else: + a[key] = attr[key] + self.attr[ino] = a + self.reply_attr(req, a, 1.0) + + def write(self, req, ino, buf, off, fi): + print 'write:', ino, off, len(buf) + self.data[ino] = self.data[ino][:off] + buf + self.attr[ino]['st_size'] = len(self.data[ino]) + self.reply_write(req, len(buf)) + +if __name__ == '__main__': + if len(argv) != 2: + print 'usage: %s ' % argv[0] + exit(1) + fuse = Memory(argv[1]) diff --git a/vendor/github.com/camlistore/camlistore/lib/python/fusepy/sftp.py b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/sftp.py new file mode 100755 index 00000000..019fb29d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/fusepy/sftp.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python + +from sys import argv, exit +from time import time + +from paramiko import SSHClient + +from fuse import FUSE, Operations + + +class SFTP(Operations): + """A simple SFTP filesystem. Requires paramiko: + http://www.lag.net/paramiko/ + + You need to be able to login to remote host without entering a password. + """ + def __init__(self, host, path='.'): + self.client = SSHClient() + self.client.load_system_host_keys() + self.client.connect(host) + self.sftp = self.client.open_sftp() + self.root = path + + def __del__(self): + self.sftp.close() + self.client.close() + + def __call__(self, op, path, *args): + print '->', op, path, args[0] if args else '' + ret = '[Unhandled Exception]' + try: + ret = getattr(self, op)(self.root + path, *args) + return ret + except OSError, e: + ret = str(e) + raise + except IOError, e: + ret = str(e) + raise OSError(*e.args) + finally: + print '<-', op + + def chmod(self, path, mode): + return self.sftp.chmod(path, mode) + + def chown(self, path, uid, gid): + return self.sftp.chown(path, uid, gid) + + def create(self, path, mode): + f = self.sftp.open(path, 'w') + f.chmod(mode) + f.close() + return 0 + + def getattr(self, path, fh=None): + st = self.sftp.lstat(path) + return dict((key, getattr(st, key)) for key in ('st_atime', 'st_gid', + 'st_mode', 'st_mtime', 'st_size', 'st_uid')) + + def mkdir(self, path, mode): + return self.sftp.mkdir(path, mode) + + def read(self, path, size, offset, fh): + f = self.sftp.open(path) + f.seek(offset, 0) + buf = f.read(size) + f.close() + return buf + + def readdir(self, path, fh): + return ['.', '..'] + [name.encode('utf-8') for name in self.sftp.listdir(path)] + + def readlink(self, path): + return self.sftp.readlink(path) + + def rename(self, old, new): + return self.sftp.rename(old, self.root + new) + + def rmdir(self, path): + return self.sftp.rmdir(path) + + def symlink(self, target, source): + return self.sftp.symlink(source, target) + + def truncate(self, path, length, fh=None): + return self.sftp.truncate(path, length) + + def unlink(self, path): + return self.sftp.unlink(path) + + def utimens(self, path, times=None): + return self.sftp.utime(path, times) + + def write(self, path, data, offset, fh): + f = self.sftp.open(path, 'r+') + f.seek(offset, 0) + f.write(data) + f.close() + return len(data) + + +if __name__ == "__main__": + if len(argv) != 3: + print 'usage: %s ' % argv[0] + exit(1) + fuse = FUSE(SFTP(argv[1]), argv[2], foreground=True, nothreads=True) \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/lib/python/setup.py b/vendor/github.com/camlistore/camlistore/lib/python/setup.py new file mode 100644 index 00000000..cefac2d8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/setup.py @@ -0,0 +1,16 @@ +from setuptools import setup +setup( + name='camlistore-client', + version='1.0.3dev', + author='Brett Slatkin', + author_email='bslatkin@gmail.com', + maintainer='Jack Laxson', + maintainer_email='jackjrabbit+camli@gmail.com', + description="Client library for Camlistore.", + url='http://camlistore.org', + license='Apache v2', + long_description='A convience library for python developers wishing to explore camlistore.', + packages=['camli'], + install_requires=['simplejson'], + classifiers=['Environment :: Console', 'Topic :: Internet :: WWW/HTTP'] +) \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/lib/python/simplejson/__init__.py b/vendor/github.com/camlistore/camlistore/lib/python/simplejson/__init__.py new file mode 100644 index 00000000..dcfd5413 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/simplejson/__init__.py @@ -0,0 +1,437 @@ +r"""JSON (JavaScript Object Notation) is a subset of +JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data +interchange format. + +:mod:`simplejson` exposes an API familiar to users of the standard library +:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained +version of the :mod:`json` library contained in Python 2.6, but maintains +compatibility with Python 2.4 and Python 2.5 and (currently) has +significant performance advantages, even without using the optional C +extension for speedups. + +Encoding basic Python object hierarchies:: + + >>> import simplejson as json + >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) + '["foo", {"bar": ["baz", null, 1.0, 2]}]' + >>> print json.dumps("\"foo\bar") + "\"foo\bar" + >>> print json.dumps(u'\u1234') + "\u1234" + >>> print json.dumps('\\') + "\\" + >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True) + {"a": 0, "b": 0, "c": 0} + >>> from StringIO import StringIO + >>> io = StringIO() + >>> json.dump(['streaming API'], io) + >>> io.getvalue() + '["streaming API"]' + +Compact encoding:: + + >>> import simplejson as json + >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':')) + '[1,2,3,{"4":5,"6":7}]' + +Pretty printing:: + + >>> import simplejson as json + >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ') + >>> print '\n'.join([l.rstrip() for l in s.splitlines()]) + { + "4": 5, + "6": 7 + } + +Decoding JSON:: + + >>> import simplejson as json + >>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}] + >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj + True + >>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar' + True + >>> from StringIO import StringIO + >>> io = StringIO('["streaming API"]') + >>> json.load(io)[0] == 'streaming API' + True + +Specializing JSON object decoding:: + + >>> import simplejson as json + >>> def as_complex(dct): + ... if '__complex__' in dct: + ... return complex(dct['real'], dct['imag']) + ... return dct + ... + >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', + ... object_hook=as_complex) + (1+2j) + >>> from decimal import Decimal + >>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1') + True + +Specializing JSON object encoding:: + + >>> import simplejson as json + >>> def encode_complex(obj): + ... if isinstance(obj, complex): + ... return [obj.real, obj.imag] + ... raise TypeError(repr(o) + " is not JSON serializable") + ... + >>> json.dumps(2 + 1j, default=encode_complex) + '[2.0, 1.0]' + >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j) + '[2.0, 1.0]' + >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j)) + '[2.0, 1.0]' + + +Using simplejson.tool from the shell to validate and pretty-print:: + + $ echo '{"json":"obj"}' | python -m simplejson.tool + { + "json": "obj" + } + $ echo '{ 1.2:3.4}' | python -m simplejson.tool + Expecting property name: line 1 column 2 (char 2) +""" +__version__ = '2.1.1' +__all__ = [ + 'dump', 'dumps', 'load', 'loads', + 'JSONDecoder', 'JSONDecodeError', 'JSONEncoder', + 'OrderedDict', +] + +__author__ = 'Bob Ippolito ' + +from decimal import Decimal + +from decoder import JSONDecoder, JSONDecodeError +from encoder import JSONEncoder +def _import_OrderedDict(): + import collections + try: + return collections.OrderedDict + except AttributeError: + import ordered_dict + return ordered_dict.OrderedDict +OrderedDict = _import_OrderedDict() + +def _import_c_make_encoder(): + try: + from simplejson._speedups import make_encoder + return make_encoder + except ImportError: + return None + +_default_encoder = JSONEncoder( + skipkeys=False, + ensure_ascii=True, + check_circular=True, + allow_nan=True, + indent=None, + separators=None, + encoding='utf-8', + default=None, + use_decimal=False, +) + +def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, + allow_nan=True, cls=None, indent=None, separators=None, + encoding='utf-8', default=None, use_decimal=False, **kw): + """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a + ``.write()``-supporting file-like object). + + If ``skipkeys`` is true then ``dict`` keys that are not basic types + (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) + will be skipped instead of raising a ``TypeError``. + + If ``ensure_ascii`` is false, then the some chunks written to ``fp`` + may be ``unicode`` instances, subject to normal Python ``str`` to + ``unicode`` coercion rules. Unless ``fp.write()`` explicitly + understands ``unicode`` (as in ``codecs.getwriter()``) this is likely + to cause an error. + + If ``check_circular`` is false, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``OverflowError`` (or worse). + + If ``allow_nan`` is false, then it will be a ``ValueError`` to + serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) + in strict compliance of the JSON specification, instead of using the + JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). + + If *indent* is a string, then JSON array elements and object members + will be pretty-printed with a newline followed by that string repeated + for each level of nesting. ``None`` (the default) selects the most compact + representation without any newlines. For backwards compatibility with + versions of simplejson earlier than 2.1.0, an integer is also accepted + and is converted to a string with that many spaces. + + If ``separators`` is an ``(item_separator, dict_separator)`` tuple + then it will be used instead of the default ``(', ', ': ')`` separators. + ``(',', ':')`` is the most compact JSON representation. + + ``encoding`` is the character encoding for str instances, default is UTF-8. + + ``default(obj)`` is a function that should return a serializable version + of obj or raise TypeError. The default simply raises TypeError. + + If *use_decimal* is true (default: ``False``) then decimal.Decimal + will be natively serialized to JSON with full precision. + + To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg. + + """ + # cached encoder + if (not skipkeys and ensure_ascii and + check_circular and allow_nan and + cls is None and indent is None and separators is None and + encoding == 'utf-8' and default is None and not kw): + iterable = _default_encoder.iterencode(obj) + else: + if cls is None: + cls = JSONEncoder + iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, allow_nan=allow_nan, indent=indent, + separators=separators, encoding=encoding, + default=default, use_decimal=use_decimal, **kw).iterencode(obj) + # could accelerate with writelines in some versions of Python, at + # a debuggability cost + for chunk in iterable: + fp.write(chunk) + + +def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, + allow_nan=True, cls=None, indent=None, separators=None, + encoding='utf-8', default=None, use_decimal=False, **kw): + """Serialize ``obj`` to a JSON formatted ``str``. + + If ``skipkeys`` is false then ``dict`` keys that are not basic types + (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) + will be skipped instead of raising a ``TypeError``. + + If ``ensure_ascii`` is false, then the return value will be a + ``unicode`` instance subject to normal Python ``str`` to ``unicode`` + coercion rules instead of being escaped to an ASCII ``str``. + + If ``check_circular`` is false, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``OverflowError`` (or worse). + + If ``allow_nan`` is false, then it will be a ``ValueError`` to + serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in + strict compliance of the JSON specification, instead of using the + JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). + + If ``indent`` is a string, then JSON array elements and object members + will be pretty-printed with a newline followed by that string repeated + for each level of nesting. ``None`` (the default) selects the most compact + representation without any newlines. For backwards compatibility with + versions of simplejson earlier than 2.1.0, an integer is also accepted + and is converted to a string with that many spaces. + + If ``separators`` is an ``(item_separator, dict_separator)`` tuple + then it will be used instead of the default ``(', ', ': ')`` separators. + ``(',', ':')`` is the most compact JSON representation. + + ``encoding`` is the character encoding for str instances, default is UTF-8. + + ``default(obj)`` is a function that should return a serializable version + of obj or raise TypeError. The default simply raises TypeError. + + If *use_decimal* is true (default: ``False``) then decimal.Decimal + will be natively serialized to JSON with full precision. + + To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg. + + """ + # cached encoder + if (not skipkeys and ensure_ascii and + check_circular and allow_nan and + cls is None and indent is None and separators is None and + encoding == 'utf-8' and default is None and not use_decimal + and not kw): + return _default_encoder.encode(obj) + if cls is None: + cls = JSONEncoder + return cls( + skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, allow_nan=allow_nan, indent=indent, + separators=separators, encoding=encoding, default=default, + use_decimal=use_decimal, **kw).encode(obj) + + +_default_decoder = JSONDecoder(encoding=None, object_hook=None, + object_pairs_hook=None) + + +def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, + parse_int=None, parse_constant=None, object_pairs_hook=None, + use_decimal=False, **kw): + """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing + a JSON document) to a Python object. + + *encoding* determines the encoding used to interpret any + :class:`str` objects decoded by this instance (``'utf-8'`` by + default). It has no effect when decoding :class:`unicode` objects. + + Note that currently only encodings that are a superset of ASCII work, + strings of other encodings should be passed in as :class:`unicode`. + + *object_hook*, if specified, will be called with the result of every + JSON object decoded and its return value will be used in place of the + given :class:`dict`. This can be used to provide custom + deserializations (e.g. to support JSON-RPC class hinting). + + *object_pairs_hook* is an optional function that will be called with + the result of any object literal decode with an ordered list of pairs. + The return value of *object_pairs_hook* will be used instead of the + :class:`dict`. This feature can be used to implement custom decoders + that rely on the order that the key and value pairs are decoded (for + example, :func:`collections.OrderedDict` will remember the order of + insertion). If *object_hook* is also defined, the *object_pairs_hook* + takes priority. + + *parse_float*, if specified, will be called with the string of every + JSON float to be decoded. By default, this is equivalent to + ``float(num_str)``. This can be used to use another datatype or parser + for JSON floats (e.g. :class:`decimal.Decimal`). + + *parse_int*, if specified, will be called with the string of every + JSON int to be decoded. By default, this is equivalent to + ``int(num_str)``. This can be used to use another datatype or parser + for JSON integers (e.g. :class:`float`). + + *parse_constant*, if specified, will be called with one of the + following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This + can be used to raise an exception if invalid JSON numbers are + encountered. + + If *use_decimal* is true (default: ``False``) then it implies + parse_float=decimal.Decimal for parity with ``dump``. + + To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` + kwarg. + + """ + return loads(fp.read(), + encoding=encoding, cls=cls, object_hook=object_hook, + parse_float=parse_float, parse_int=parse_int, + parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, + use_decimal=use_decimal, **kw) + + +def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None, + parse_int=None, parse_constant=None, object_pairs_hook=None, + use_decimal=False, **kw): + """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON + document) to a Python object. + + *encoding* determines the encoding used to interpret any + :class:`str` objects decoded by this instance (``'utf-8'`` by + default). It has no effect when decoding :class:`unicode` objects. + + Note that currently only encodings that are a superset of ASCII work, + strings of other encodings should be passed in as :class:`unicode`. + + *object_hook*, if specified, will be called with the result of every + JSON object decoded and its return value will be used in place of the + given :class:`dict`. This can be used to provide custom + deserializations (e.g. to support JSON-RPC class hinting). + + *object_pairs_hook* is an optional function that will be called with + the result of any object literal decode with an ordered list of pairs. + The return value of *object_pairs_hook* will be used instead of the + :class:`dict`. This feature can be used to implement custom decoders + that rely on the order that the key and value pairs are decoded (for + example, :func:`collections.OrderedDict` will remember the order of + insertion). If *object_hook* is also defined, the *object_pairs_hook* + takes priority. + + *parse_float*, if specified, will be called with the string of every + JSON float to be decoded. By default, this is equivalent to + ``float(num_str)``. This can be used to use another datatype or parser + for JSON floats (e.g. :class:`decimal.Decimal`). + + *parse_int*, if specified, will be called with the string of every + JSON int to be decoded. By default, this is equivalent to + ``int(num_str)``. This can be used to use another datatype or parser + for JSON integers (e.g. :class:`float`). + + *parse_constant*, if specified, will be called with one of the + following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This + can be used to raise an exception if invalid JSON numbers are + encountered. + + If *use_decimal* is true (default: ``False``) then it implies + parse_float=decimal.Decimal for parity with ``dump``. + + To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` + kwarg. + + """ + if (cls is None and encoding is None and object_hook is None and + parse_int is None and parse_float is None and + parse_constant is None and object_pairs_hook is None + and not use_decimal and not kw): + return _default_decoder.decode(s) + if cls is None: + cls = JSONDecoder + if object_hook is not None: + kw['object_hook'] = object_hook + if object_pairs_hook is not None: + kw['object_pairs_hook'] = object_pairs_hook + if parse_float is not None: + kw['parse_float'] = parse_float + if parse_int is not None: + kw['parse_int'] = parse_int + if parse_constant is not None: + kw['parse_constant'] = parse_constant + if use_decimal: + if parse_float is not None: + raise TypeError("use_decimal=True implies parse_float=Decimal") + kw['parse_float'] = Decimal + return cls(encoding=encoding, **kw).decode(s) + + +def _toggle_speedups(enabled): + import simplejson.decoder as dec + import simplejson.encoder as enc + import simplejson.scanner as scan + c_make_encoder = _import_c_make_encoder() + if enabled: + dec.scanstring = dec.c_scanstring or dec.py_scanstring + enc.c_make_encoder = c_make_encoder + enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or + enc.py_encode_basestring_ascii) + scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner + else: + dec.scanstring = dec.py_scanstring + enc.c_make_encoder = None + enc.encode_basestring_ascii = enc.py_encode_basestring_ascii + scan.make_scanner = scan.py_make_scanner + dec.make_scanner = scan.make_scanner + global _default_decoder + _default_decoder = JSONDecoder( + encoding=None, + object_hook=None, + object_pairs_hook=None, + ) + global _default_encoder + _default_encoder = JSONEncoder( + skipkeys=False, + ensure_ascii=True, + check_circular=True, + allow_nan=True, + indent=None, + separators=None, + encoding='utf-8', + default=None, + ) diff --git a/vendor/github.com/camlistore/camlistore/lib/python/simplejson/decoder.py b/vendor/github.com/camlistore/camlistore/lib/python/simplejson/decoder.py new file mode 100644 index 00000000..4cf4015f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/simplejson/decoder.py @@ -0,0 +1,421 @@ +"""Implementation of JSONDecoder +""" +import re +import sys +import struct + +from simplejson.scanner import make_scanner +def _import_c_scanstring(): + try: + from simplejson._speedups import scanstring + return scanstring + except ImportError: + return None +c_scanstring = _import_c_scanstring() + +__all__ = ['JSONDecoder'] + +FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL + +def _floatconstants(): + _BYTES = '7FF80000000000007FF0000000000000'.decode('hex') + # The struct module in Python 2.4 would get frexp() out of range here + # when an endian is specified in the format string. Fixed in Python 2.5+ + if sys.byteorder != 'big': + _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1] + nan, inf = struct.unpack('dd', _BYTES) + return nan, inf, -inf + +NaN, PosInf, NegInf = _floatconstants() + + +class JSONDecodeError(ValueError): + """Subclass of ValueError with the following additional properties: + + msg: The unformatted error message + doc: The JSON document being parsed + pos: The start index of doc where parsing failed + end: The end index of doc where parsing failed (may be None) + lineno: The line corresponding to pos + colno: The column corresponding to pos + endlineno: The line corresponding to end (may be None) + endcolno: The column corresponding to end (may be None) + + """ + def __init__(self, msg, doc, pos, end=None): + ValueError.__init__(self, errmsg(msg, doc, pos, end=end)) + self.msg = msg + self.doc = doc + self.pos = pos + self.end = end + self.lineno, self.colno = linecol(doc, pos) + if end is not None: + self.endlineno, self.endcolno = linecol(doc, pos) + else: + self.endlineno, self.endcolno = None, None + + +def linecol(doc, pos): + lineno = doc.count('\n', 0, pos) + 1 + if lineno == 1: + colno = pos + else: + colno = pos - doc.rindex('\n', 0, pos) + return lineno, colno + + +def errmsg(msg, doc, pos, end=None): + # Note that this function is called from _speedups + lineno, colno = linecol(doc, pos) + if end is None: + #fmt = '{0}: line {1} column {2} (char {3})' + #return fmt.format(msg, lineno, colno, pos) + fmt = '%s: line %d column %d (char %d)' + return fmt % (msg, lineno, colno, pos) + endlineno, endcolno = linecol(doc, end) + #fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})' + #return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end) + fmt = '%s: line %d column %d - line %d column %d (char %d - %d)' + return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end) + + +_CONSTANTS = { + '-Infinity': NegInf, + 'Infinity': PosInf, + 'NaN': NaN, +} + +STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS) +BACKSLASH = { + '"': u'"', '\\': u'\\', '/': u'/', + 'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t', +} + +DEFAULT_ENCODING = "utf-8" + +def py_scanstring(s, end, encoding=None, strict=True, + _b=BACKSLASH, _m=STRINGCHUNK.match): + """Scan the string s for a JSON string. End is the index of the + character in s after the quote that started the JSON string. + Unescapes all valid JSON string escape sequences and raises ValueError + on attempt to decode an invalid string. If strict is False then literal + control characters are allowed in the string. + + Returns a tuple of the decoded string and the index of the character in s + after the end quote.""" + if encoding is None: + encoding = DEFAULT_ENCODING + chunks = [] + _append = chunks.append + begin = end - 1 + while 1: + chunk = _m(s, end) + if chunk is None: + raise JSONDecodeError( + "Unterminated string starting at", s, begin) + end = chunk.end() + content, terminator = chunk.groups() + # Content is contains zero or more unescaped string characters + if content: + if not isinstance(content, unicode): + content = unicode(content, encoding) + _append(content) + # Terminator is the end of string, a literal control character, + # or a backslash denoting that an escape sequence follows + if terminator == '"': + break + elif terminator != '\\': + if strict: + msg = "Invalid control character %r at" % (terminator,) + #msg = "Invalid control character {0!r} at".format(terminator) + raise JSONDecodeError(msg, s, end) + else: + _append(terminator) + continue + try: + esc = s[end] + except IndexError: + raise JSONDecodeError( + "Unterminated string starting at", s, begin) + # If not a unicode escape sequence, must be in the lookup table + if esc != 'u': + try: + char = _b[esc] + except KeyError: + msg = "Invalid \\escape: " + repr(esc) + raise JSONDecodeError(msg, s, end) + end += 1 + else: + # Unicode escape sequence + esc = s[end + 1:end + 5] + next_end = end + 5 + if len(esc) != 4: + msg = "Invalid \\uXXXX escape" + raise JSONDecodeError(msg, s, end) + uni = int(esc, 16) + # Check for surrogate pair on UCS-4 systems + if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535: + msg = "Invalid \\uXXXX\\uXXXX surrogate pair" + if not s[end + 5:end + 7] == '\\u': + raise JSONDecodeError(msg, s, end) + esc2 = s[end + 7:end + 11] + if len(esc2) != 4: + raise JSONDecodeError(msg, s, end) + uni2 = int(esc2, 16) + uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00)) + next_end += 6 + char = unichr(uni) + end = next_end + # Append the unescaped character + _append(char) + return u''.join(chunks), end + + +# Use speedup if available +scanstring = c_scanstring or py_scanstring + +WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS) +WHITESPACE_STR = ' \t\n\r' + +def JSONObject((s, end), encoding, strict, scan_once, object_hook, + object_pairs_hook, memo=None, + _w=WHITESPACE.match, _ws=WHITESPACE_STR): + # Backwards compatibility + if memo is None: + memo = {} + memo_get = memo.setdefault + pairs = [] + # Use a slice to prevent IndexError from being raised, the following + # check will raise a more specific ValueError if the string is empty + nextchar = s[end:end + 1] + # Normally we expect nextchar == '"' + if nextchar != '"': + if nextchar in _ws: + end = _w(s, end).end() + nextchar = s[end:end + 1] + # Trivial empty object + if nextchar == '}': + if object_pairs_hook is not None: + result = object_pairs_hook(pairs) + return result, end + pairs = {} + if object_hook is not None: + pairs = object_hook(pairs) + return pairs, end + 1 + elif nextchar != '"': + raise JSONDecodeError("Expecting property name", s, end) + end += 1 + while True: + key, end = scanstring(s, end, encoding, strict) + key = memo_get(key, key) + + # To skip some function call overhead we optimize the fast paths where + # the JSON key separator is ": " or just ":". + if s[end:end + 1] != ':': + end = _w(s, end).end() + if s[end:end + 1] != ':': + raise JSONDecodeError("Expecting : delimiter", s, end) + + end += 1 + + try: + if s[end] in _ws: + end += 1 + if s[end] in _ws: + end = _w(s, end + 1).end() + except IndexError: + pass + + try: + value, end = scan_once(s, end) + except StopIteration: + raise JSONDecodeError("Expecting object", s, end) + pairs.append((key, value)) + + try: + nextchar = s[end] + if nextchar in _ws: + end = _w(s, end + 1).end() + nextchar = s[end] + except IndexError: + nextchar = '' + end += 1 + + if nextchar == '}': + break + elif nextchar != ',': + raise JSONDecodeError("Expecting , delimiter", s, end - 1) + + try: + nextchar = s[end] + if nextchar in _ws: + end += 1 + nextchar = s[end] + if nextchar in _ws: + end = _w(s, end + 1).end() + nextchar = s[end] + except IndexError: + nextchar = '' + + end += 1 + if nextchar != '"': + raise JSONDecodeError("Expecting property name", s, end - 1) + + if object_pairs_hook is not None: + result = object_pairs_hook(pairs) + return result, end + pairs = dict(pairs) + if object_hook is not None: + pairs = object_hook(pairs) + return pairs, end + +def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR): + values = [] + nextchar = s[end:end + 1] + if nextchar in _ws: + end = _w(s, end + 1).end() + nextchar = s[end:end + 1] + # Look-ahead for trivial empty array + if nextchar == ']': + return values, end + 1 + _append = values.append + while True: + try: + value, end = scan_once(s, end) + except StopIteration: + raise JSONDecodeError("Expecting object", s, end) + _append(value) + nextchar = s[end:end + 1] + if nextchar in _ws: + end = _w(s, end + 1).end() + nextchar = s[end:end + 1] + end += 1 + if nextchar == ']': + break + elif nextchar != ',': + raise JSONDecodeError("Expecting , delimiter", s, end) + + try: + if s[end] in _ws: + end += 1 + if s[end] in _ws: + end = _w(s, end + 1).end() + except IndexError: + pass + + return values, end + +class JSONDecoder(object): + """Simple JSON decoder + + Performs the following translations in decoding by default: + + +---------------+-------------------+ + | JSON | Python | + +===============+===================+ + | object | dict | + +---------------+-------------------+ + | array | list | + +---------------+-------------------+ + | string | unicode | + +---------------+-------------------+ + | number (int) | int, long | + +---------------+-------------------+ + | number (real) | float | + +---------------+-------------------+ + | true | True | + +---------------+-------------------+ + | false | False | + +---------------+-------------------+ + | null | None | + +---------------+-------------------+ + + It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as + their corresponding ``float`` values, which is outside the JSON spec. + + """ + + def __init__(self, encoding=None, object_hook=None, parse_float=None, + parse_int=None, parse_constant=None, strict=True, + object_pairs_hook=None): + """ + *encoding* determines the encoding used to interpret any + :class:`str` objects decoded by this instance (``'utf-8'`` by + default). It has no effect when decoding :class:`unicode` objects. + + Note that currently only encodings that are a superset of ASCII work, + strings of other encodings should be passed in as :class:`unicode`. + + *object_hook*, if specified, will be called with the result of every + JSON object decoded and its return value will be used in place of the + given :class:`dict`. This can be used to provide custom + deserializations (e.g. to support JSON-RPC class hinting). + + *object_pairs_hook* is an optional function that will be called with + the result of any object literal decode with an ordered list of pairs. + The return value of *object_pairs_hook* will be used instead of the + :class:`dict`. This feature can be used to implement custom decoders + that rely on the order that the key and value pairs are decoded (for + example, :func:`collections.OrderedDict` will remember the order of + insertion). If *object_hook* is also defined, the *object_pairs_hook* + takes priority. + + *parse_float*, if specified, will be called with the string of every + JSON float to be decoded. By default, this is equivalent to + ``float(num_str)``. This can be used to use another datatype or parser + for JSON floats (e.g. :class:`decimal.Decimal`). + + *parse_int*, if specified, will be called with the string of every + JSON int to be decoded. By default, this is equivalent to + ``int(num_str)``. This can be used to use another datatype or parser + for JSON integers (e.g. :class:`float`). + + *parse_constant*, if specified, will be called with one of the + following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This + can be used to raise an exception if invalid JSON numbers are + encountered. + + *strict* controls the parser's behavior when it encounters an + invalid control character in a string. The default setting of + ``True`` means that unescaped control characters are parse errors, if + ``False`` then control characters will be allowed in strings. + + """ + self.encoding = encoding + self.object_hook = object_hook + self.object_pairs_hook = object_pairs_hook + self.parse_float = parse_float or float + self.parse_int = parse_int or int + self.parse_constant = parse_constant or _CONSTANTS.__getitem__ + self.strict = strict + self.parse_object = JSONObject + self.parse_array = JSONArray + self.parse_string = scanstring + self.memo = {} + self.scan_once = make_scanner(self) + + def decode(self, s, _w=WHITESPACE.match): + """Return the Python representation of ``s`` (a ``str`` or ``unicode`` + instance containing a JSON document) + + """ + obj, end = self.raw_decode(s, idx=_w(s, 0).end()) + end = _w(s, end).end() + if end != len(s): + raise JSONDecodeError("Extra data", s, end, len(s)) + return obj + + def raw_decode(self, s, idx=0): + """Decode a JSON document from ``s`` (a ``str`` or ``unicode`` + beginning with a JSON document) and return a 2-tuple of the Python + representation and the index in ``s`` where the document ended. + + This can be used to decode a JSON document from a string that may + have extraneous data at the end. + + """ + try: + obj, end = self.scan_once(s, idx) + except StopIteration: + raise JSONDecodeError("No JSON object could be decoded", s, idx) + return obj, end diff --git a/vendor/github.com/camlistore/camlistore/lib/python/simplejson/encoder.py b/vendor/github.com/camlistore/camlistore/lib/python/simplejson/encoder.py new file mode 100644 index 00000000..cab84565 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/lib/python/simplejson/encoder.py @@ -0,0 +1,501 @@ +"""Implementation of JSONEncoder +""" +import re +from decimal import Decimal + +def _import_speedups(): + try: + from simplejson import _speedups + return _speedups.encode_basestring_ascii, _speedups.make_encoder + except ImportError: + return None, None +c_encode_basestring_ascii, c_make_encoder = _import_speedups() + +from simplejson.decoder import PosInf + +ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') +ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') +HAS_UTF8 = re.compile(r'[\x80-\xff]') +ESCAPE_DCT = { + '\\': '\\\\', + '"': '\\"', + '\b': '\\b', + '\f': '\\f', + '\n': '\\n', + '\r': '\\r', + '\t': '\\t', +} +for i in range(0x20): + #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) + ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) + +FLOAT_REPR = repr + +def encode_basestring(s): + """Return a JSON representation of a Python string + + """ + if isinstance(s, str) and HAS_UTF8.search(s) is not None: + s = s.decode('utf-8') + def replace(match): + return ESCAPE_DCT[match.group(0)] + return u'"' + ESCAPE.sub(replace, s) + u'"' + + +def py_encode_basestring_ascii(s): + """Return an ASCII-only JSON representation of a Python string + + """ + if isinstance(s, str) and HAS_UTF8.search(s) is not None: + s = s.decode('utf-8') + def replace(match): + s = match.group(0) + try: + return ESCAPE_DCT[s] + except KeyError: + n = ord(s) + if n < 0x10000: + #return '\\u{0:04x}'.format(n) + return '\\u%04x' % (n,) + else: + # surrogate pair + n -= 0x10000 + s1 = 0xd800 | ((n >> 10) & 0x3ff) + s2 = 0xdc00 | (n & 0x3ff) + #return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) + return '\\u%04x\\u%04x' % (s1, s2) + return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' + + +encode_basestring_ascii = ( + c_encode_basestring_ascii or py_encode_basestring_ascii) + +class JSONEncoder(object): + """Extensible JSON encoder for Python data structures. + + Supports the following objects and types by default: + + +-------------------+---------------+ + | Python | JSON | + +===================+===============+ + | dict | object | + +-------------------+---------------+ + | list, tuple | array | + +-------------------+---------------+ + | str, unicode | string | + +-------------------+---------------+ + | int, long, float | number | + +-------------------+---------------+ + | True | true | + +-------------------+---------------+ + | False | false | + +-------------------+---------------+ + | None | null | + +-------------------+---------------+ + + To extend this to recognize other objects, subclass and implement a + ``.default()`` method with another method that returns a serializable + object for ``o`` if possible, otherwise it should call the superclass + implementation (to raise ``TypeError``). + + """ + item_separator = ', ' + key_separator = ': ' + def __init__(self, skipkeys=False, ensure_ascii=True, + check_circular=True, allow_nan=True, sort_keys=False, + indent=None, separators=None, encoding='utf-8', default=None, + use_decimal=False): + """Constructor for JSONEncoder, with sensible defaults. + + If skipkeys is false, then it is a TypeError to attempt + encoding of keys that are not str, int, long, float or None. If + skipkeys is True, such items are simply skipped. + + If ensure_ascii is true, the output is guaranteed to be str + objects with all incoming unicode characters escaped. If + ensure_ascii is false, the output will be unicode object. + + If check_circular is true, then lists, dicts, and custom encoded + objects will be checked for circular references during encoding to + prevent an infinite recursion (which would cause an OverflowError). + Otherwise, no such check takes place. + + If allow_nan is true, then NaN, Infinity, and -Infinity will be + encoded as such. This behavior is not JSON specification compliant, + but is consistent with most JavaScript based encoders and decoders. + Otherwise, it will be a ValueError to encode such floats. + + If sort_keys is true, then the output of dictionaries will be + sorted by key; this is useful for regression tests to ensure + that JSON serializations can be compared on a day-to-day basis. + + If indent is a string, then JSON array elements and object members + will be pretty-printed with a newline followed by that string repeated + for each level of nesting. ``None`` (the default) selects the most compact + representation without any newlines. For backwards compatibility with + versions of simplejson earlier than 2.1.0, an integer is also accepted + and is converted to a string with that many spaces. + + If specified, separators should be a (item_separator, key_separator) + tuple. The default is (', ', ': '). To get the most compact JSON + representation you should specify (',', ':') to eliminate whitespace. + + If specified, default is a function that gets called for objects + that can't otherwise be serialized. It should return a JSON encodable + version of the object or raise a ``TypeError``. + + If encoding is not None, then all input strings will be + transformed into unicode using that encoding prior to JSON-encoding. + The default is UTF-8. + + If use_decimal is true (not the default), ``decimal.Decimal`` will + be supported directly by the encoder. For the inverse, decode JSON + with ``parse_float=decimal.Decimal``. + + """ + + self.skipkeys = skipkeys + self.ensure_ascii = ensure_ascii + self.check_circular = check_circular + self.allow_nan = allow_nan + self.sort_keys = sort_keys + self.use_decimal = use_decimal + if isinstance(indent, (int, long)): + indent = ' ' * indent + self.indent = indent + if separators is not None: + self.item_separator, self.key_separator = separators + if default is not None: + self.default = default + self.encoding = encoding + + def default(self, o): + """Implement this method in a subclass such that it returns + a serializable object for ``o``, or calls the base implementation + (to raise a ``TypeError``). + + For example, to support arbitrary iterators, you could + implement default like this:: + + def default(self, o): + try: + iterable = iter(o) + except TypeError: + pass + else: + return list(iterable) + return JSONEncoder.default(self, o) + + """ + raise TypeError(repr(o) + " is not JSON serializable") + + def encode(self, o): + """Return a JSON string representation of a Python data structure. + + >>> from simplejson import JSONEncoder + >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) + '{"foo": ["bar", "baz"]}' + + """ + # This is for extremely simple cases and benchmarks. + if isinstance(o, basestring): + if isinstance(o, str): + _encoding = self.encoding + if (_encoding is not None + and not (_encoding == 'utf-8')): + o = o.decode(_encoding) + if self.ensure_ascii: + return encode_basestring_ascii(o) + else: + return encode_basestring(o) + # This doesn't pass the iterator directly to ''.join() because the + # exceptions aren't as detailed. The list call should be roughly + # equivalent to the PySequence_Fast that ''.join() would do. + chunks = self.iterencode(o, _one_shot=True) + if not isinstance(chunks, (list, tuple)): + chunks = list(chunks) + if self.ensure_ascii: + return ''.join(chunks) + else: + return u''.join(chunks) + + def iterencode(self, o, _one_shot=False): + """Encode the given object and yield each string + representation as available. + + For example:: + + for chunk in JSONEncoder().iterencode(bigobject): + mysocket.write(chunk) + + """ + if self.check_circular: + markers = {} + else: + markers = None + if self.ensure_ascii: + _encoder = encode_basestring_ascii + else: + _encoder = encode_basestring + if self.encoding != 'utf-8': + def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding): + if isinstance(o, str): + o = o.decode(_encoding) + return _orig_encoder(o) + + def floatstr(o, allow_nan=self.allow_nan, + _repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf): + # Check for specials. Note that this type of test is processor + # and/or platform-specific, so do tests which don't depend on + # the internals. + + if o != o: + text = 'NaN' + elif o == _inf: + text = 'Infinity' + elif o == _neginf: + text = '-Infinity' + else: + return _repr(o) + + if not allow_nan: + raise ValueError( + "Out of range float values are not JSON compliant: " + + repr(o)) + + return text + + + key_memo = {} + if (_one_shot and c_make_encoder is not None + and not self.indent and not self.sort_keys): + _iterencode = c_make_encoder( + markers, self.default, _encoder, self.indent, + self.key_separator, self.item_separator, self.sort_keys, + self.skipkeys, self.allow_nan, key_memo, self.use_decimal) + else: + _iterencode = _make_iterencode( + markers, self.default, _encoder, self.indent, floatstr, + self.key_separator, self.item_separator, self.sort_keys, + self.skipkeys, _one_shot, self.use_decimal) + try: + return _iterencode(o, 0) + finally: + key_memo.clear() + + +class JSONEncoderForHTML(JSONEncoder): + """An encoder that produces JSON safe to embed in HTML. + + To embed JSON content in, say, a script tag on a web page, the + characters &, < and > should be escaped. They cannot be escaped + with the usual entities (e.g. &) because they are not expanded + within + {{end}} + {{end}} + + {{define "messages"}} +
    +

    Camlistore on Google Cloud

    + + {{if .InstanceIP}} +

    Success. Your Camlistore instance should be up at https://{{.InstanceIP}}. It can take a couple of minutes to be ready.

    +

    Please save the information on this page in case you need to come back for the instruction.

    + +

    First connection

    +

    + A self-signed HTTPS certificate was automatically generated with "{{.Conf.Hostname}}" as the common name.
    + You will need to add an exception for it in your browser when you get a security warning the first time you connect. At which point you should check that the certificate fingerprint matches one of: + + + +
    SHA-1{{.CertFingerprintSHA1}}
    SHA-256{{.CertFingerprintSHA256}}
    +

    + +

    Further configuration

    +

    + Manage your instance at {{.ProjectConsoleURL}}. +

    + +

    + To change your login and password, go to the camlistore-server instance page. Set camlistore-username and/or camlistore-password in the custom metadata section. Then restart Camlistore. +

    + +

    + If you want to use your own HTTPS certificate and key, go to the storage browser. Delete "` + certFilename + `", "` + keyFilename + `", and replace them by uploading your own files (with the same names). Then restart Camlistore. +

    + +

    + To manage/add SSH keys, go to the camlistore-server instance page. Scroll down to the SSH Keys section. +

    + {{end}} + {{if .Err}} +

    {{.Err}}

    + {{range $hint := .Hints}} +

    {{$hint}}

    + {{end}} + {{end}} + {{end}} + +{{define "withform"}} + +{{template "header" .}} + + {{if .InstanceKey}} +
    + {{end}} + {{template "banner" .}} + {{template "toplinks" .}} + {{template "progress" .}} + {{template "messages" .}} +
    + + +

    Deploy Camlistore on Google Cloud

    + +

    +This tool helps you create your own private Camlistore instance running on Google's cloud. Be sure to understand Google Compute Engine's pricing before proceeding. To delete your instance and stop paying Google for the virtual machine, visit the Google Cloud console. +

    + + + + + + + + +
    Project ID
    +
      +
    • Select a Google Project in which to create the VM. If it doesn't already exist, create it first before using this Camlistore creation tool.
    • +
    • Requirements:
    • +
        +
      • Enable billing. (Billing & settings)
      • +
      • APIs and auth > APIs > Google Cloud Storage
      • +
      • APIs and auth > APIs > Google Cloud Storage JSON API
      • +
      • APIs and auth > APIs > Google Compute Engine
      • +
      • APIs and auth > APIs > Google Cloud Logging API
      • +
      +
    +
    New password
    New password for your Camlistore server.
    Zone + + + {{range $k, $v := .ZoneValues}} + + {{end}} + +
    Machine type + + + {{range $k, $v := .MachineValues}} + + {{end}} + +

    (it will ask for permissions)
    +
    +
    + {{template "footer" .}} + {{if .InstanceKey}} +
    + {{end}} + + +{{end}} + +{{define "noform"}} + +{{template "header" .}} + + {{if .InstanceKey}} +
    + {{end}} + {{template "banner" .}} + {{template "toplinks" .}} + {{template "progress" .}} + {{template "messages" .}} + {{template "footer" .}} + {{if .InstanceKey}} +
    + {{end}} + + +{{end}} +` diff --git a/vendor/github.com/camlistore/camlistore/pkg/deploy/gce/notes.txt b/vendor/github.com/camlistore/camlistore/pkg/deploy/gce/notes.txt new file mode 100644 index 00000000..f62b16e0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/deploy/gce/notes.txt @@ -0,0 +1,31 @@ +non-core dev: +gcutil --service_version="v1" --project="camanaged" addinstance "camlistore" --zone="us-central1-b" --machine_type="n1-standard-1" --network="default" --external_ip_address="107.178.214.163" --metadata="cam-key-1:cam-value-1" --metadata="cam-key-2:cam-value-2" --metadata="sshKeys:bradfitz:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw6Dk3iskKylP2zginCOAzIunMA38vGL9b/i18UG/Iuq+jKczZXB/1dlcZGSOs3+LtGh/C341TXTioydxTw+ux1AbmUk4c6L404skl85XFOys/GLxA4sHxBSb5we0Q57yohSgeZNlQd+Scmu5v7WC0N7I3hOK0lJgtxRNyC2nncGC0UOm+IGPTWcqPJERTauH/OhoAddWQehf1ugxTJYFU9atl3Op/mDXfyGBSLweWAQ84fhVKRZnl4i9Yhk1b357Q8cVKH6UQUADVamo7CQOsenzx99UL0thFRTSbuKALyf9e+SPwJrtIxZaX+skVSR+CzooRbypIamLbNXhfbxNz bradfitz@Bradleys-MacBook-Air.local" --service_account_scopes="https://www.googleapis.com/auth/userinfo.email,https://www.googleapis.com/auth/compute.readonly,https://www.googleapis.com/auth/devstorage.full_control,https://www.googleapis.com/auth/sqlservice,https://www.googleapis.com/auth/sqlservice.admin,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/datastore,https://www.googleapis.com/auth/userinfo.email,https://www.googleapis.com/auth/compute,https://www.googleapis.com/auth/devstorage.full_control,https://www.googleapis.com/auth/taskqueue,https://www.googleapis.com/auth/bigquery,https://www.googleapis.com/auth/sqlservice,https://www.googleapis.com/auth/datastore" --tags="tag,tag2,http-server,https-server" --persistent_boot_disk="true" --auto_delete_boot_disk="false" --image=projects/debian-cloud/global/images/backports-debian-7-wheezy-v20140718 + +$ curl -H "Metadata-Flavor:Google" http://metadata/computeMetadata/v1/instance/service-accounts/default/scopes +https://www.googleapis.com/auth/bigquery +https://www.googleapis.com/auth/cloud-platform +https://www.googleapis.com/auth/compute +https://www.googleapis.com/auth/compute.readonly +https://www.googleapis.com/auth/datastore +https://www.googleapis.com/auth/devstorage.full_control +https://www.googleapis.com/auth/sqlservice +https://www.googleapis.com/auth/sqlservice.admin +https://www.googleapis.com/auth/taskqueue +https://www.googleapis.com/auth/userinfo.email + +gcutil --project=camanaged addinstance \ + --image=projects/coreos-cloud/global/images/coreos-alpha-394-0-0-v20140801 \ + --persistent_boot_disk \ + --zone=us-central1-a --machine_type=n1-standard-1 \ + --external_ip_address=107.178.208.16 \ + --auto_delete_boot_disk \ + --tags=http-server,https-server \ + --metadata_from_file=user-data:cloud-config.yaml core1 + +TODO: +- allow config from /gcs/bucket/key; add pkg for os.Stat/os.Open wrappers checking + prefix +- use that package for: + "httpsCert": "/home/bradfitz/keys/camlihouse/ssl.crt", + "httpsKey": "/home/bradfitz/keys/camlihouse/ssl.key", + "identitySecretRing": "/home/bradfitz/.config/camlistore/identity-secring.gpg", diff --git a/vendor/github.com/camlistore/camlistore/pkg/env/env.go b/vendor/github.com/camlistore/camlistore/pkg/env/env.go new file mode 100644 index 00000000..79befcaf --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/env/env.go @@ -0,0 +1,69 @@ +/* +Copyright 2015 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package env detects what sort of environment Camlistore is running in. +package env + +import ( + "os" + "strconv" + "sync" + + "google.golang.org/cloud/compute/metadata" +) + +// IsDebug reports whether this is a debug environment. +func IsDebug() bool { + return isDebug +} + +// DebugUploads reports whether this is a debug environment for uploads. +func DebugUploads() bool { + return isDebugUploads +} + +// IsDev reports whether this is a development server environment (devcam server). +func IsDev() bool { + return isDev +} + +// OsGCE reports whether this process is running in a Google Compute +// Engine (GCE) environment. This only returns true if the +// "camlistore-config-dir" instance metadata value is defined. +// Instances running in custom configs on GCE will be unaffected. +func OnGCE() bool { + gceOnce.Do(detectGCE) + return isGCE +} + +var ( + gceOnce sync.Once + isGCE bool +) + +func detectGCE() { + if !metadata.OnGCE() { + return + } + v, _ := metadata.InstanceAttributeValue("camlistore-config-dir") + isGCE = v != "" +} + +var ( + isDev = os.Getenv("CAMLI_DEV_CAMLI_ROOT") != "" + isDebug, _ = strconv.ParseBool(os.Getenv("CAMLI_DEBUG")) + isDebugUploads, _ = strconv.ParseBool(os.Getenv("CAMLI_DEBUG_UPLOADS")) +) diff --git a/vendor/github.com/camlistore/camlistore/pkg/fault/fault.go b/vendor/github.com/camlistore/camlistore/pkg/fault/fault.go new file mode 100644 index 00000000..e3ac452f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fault/fault.go @@ -0,0 +1,59 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fault handles fault injection for testing. +package fault + +import ( + "errors" + "math/rand" + "os" + "strconv" + "strings" +) + +var fakeErr = errors.New("fake injected error for testing") + +// An Injector reports whether fake errors should be returned. +type Injector struct { + failPercent int +} + +// NewInjector returns a new fault injector with the given name. The +// environment variable "FAULT_" + capital(name) + "_FAIL_PERCENT" +// controls the percentage of requests that fail. If undefined or +// zero, no requests fail. +func NewInjector(name string) *Injector { + var failPercent, _ = strconv.Atoi(os.Getenv("FAULT_" + strings.ToUpper(name) + "_FAIL_PERCENT")) + return &Injector{ + failPercent: failPercent, + } +} + +// ShouldFail reports whether a fake error should be returned. +func (in *Injector) ShouldFail() bool { + return in.failPercent > 0 && in.failPercent > rand.Intn(100) +} + +// FailErr checks ShouldFail and, if true, assigns a fake error to err +// and returns true. +func (in *Injector) FailErr(err *error) bool { + if !in.ShouldFail() { + return false + } + *err = fakeErr + return true +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/fileembed/fileembed.go b/vendor/github.com/camlistore/camlistore/pkg/fileembed/fileembed.go new file mode 100644 index 00000000..c3e5d324 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fileembed/fileembed.go @@ -0,0 +1,331 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fileembed provides access to static data resources (images, +// HTML, css, etc) embedded into the binary with genfileembed. +// +// Most of the package contains internal details used by genfileembed. +// Normal applications will simply make a global Files variable. +package fileembed + +import ( + "compress/zlib" + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "path/filepath" + "strings" + "sync" + "time" +) + +// Files contains all the embedded resources. +type Files struct { + // Optional environment variable key to override + OverrideEnv string + + // Optional fallback directory to check, if not in memory. + DirFallback string + + // SlurpToMemory controls whether on first access the file is + // slurped into memory. It's intended for use with DirFallback. + SlurpToMemory bool + + // Listable controls whether requests for the http file "/" return + // a directory of available files. Must be set to true for + // http.FileServer to correctly handle requests for index.html. + Listable bool + + lk sync.Mutex + file map[string]*staticFile +} + +type staticFile struct { + name string + contents []byte + modtime time.Time +} + +type Opener interface { + Open() (io.Reader, error) +} + +type String string + +func (s String) Open() (io.Reader, error) { + return strings.NewReader(string(s)), nil +} + +// ZlibCompressed is used to store a compressed file. +type ZlibCompressed string + +func (zb ZlibCompressed) Open() (io.Reader, error) { + rz, err := zlib.NewReader(strings.NewReader(string(zb))) + if err != nil { + return nil, fmt.Errorf("Could not open ZlibCompressed: %v", err) + } + return rz, nil +} + +// ZlibCompressedBase64 is used to store a compressed file. +// Unlike ZlibCompressed, the string is base64 encoded, +// in standard base64 encoding. +type ZlibCompressedBase64 string + +func (zb ZlibCompressedBase64) Open() (io.Reader, error) { + rz, err := zlib.NewReader(base64.NewDecoder(base64.StdEncoding, strings.NewReader(string(zb)))) + if err != nil { + return nil, fmt.Errorf("Could not open ZlibCompressedBase64: %v", err) + } + return rz, nil +} + +// Multi concatenates multiple Openers into one, like io.MultiReader. +func Multi(openers ...Opener) Opener { + return multi(openers) +} + +type multi []Opener + +func (m multi) Open() (io.Reader, error) { + rs := make([]io.Reader, 0, len(m)) + for _, o := range m { + r, err := o.Open() + if err != nil { + return nil, err + } + rs = append(rs, r) + } + return io.MultiReader(rs...), nil +} + +// Add adds a file to the file set. +func (f *Files) Add(filename string, size int64, modtime time.Time, o Opener) { + f.lk.Lock() + defer f.lk.Unlock() + + r, err := o.Open() + if err != nil { + log.Printf("Could not add file %v: %v", filename, err) + return + } + contents, err := ioutil.ReadAll(r) + if err != nil { + log.Printf("Could not read contents of file %v: %v", filename, err) + return + } + + f.add(filename, &staticFile{ + name: filename, + contents: contents, + modtime: modtime, + }) +} + +// f.lk must be locked +func (f *Files) add(filename string, sf *staticFile) { + if f.file == nil { + f.file = make(map[string]*staticFile) + } + f.file[filename] = sf +} + +var _ http.FileSystem = (*Files)(nil) + +func (f *Files) Open(filename string) (hf http.File, err error) { + // don't bother locking f.lk here, because Listable will normally be set on initialization + if filename == "/" && f.Listable { + return openDir(f) + } + filename = strings.TrimLeft(filename, "/") + if e := f.OverrideEnv; e != "" && os.Getenv(e) != "" { + diskPath := filepath.Join(os.Getenv(e), filename) + return os.Open(diskPath) + } + f.lk.Lock() + defer f.lk.Unlock() + sf, ok := f.file[filename] + if !ok { + return f.openFallback(filename) + } + return &fileHandle{sf: sf}, nil +} + +// f.lk is held +func (f *Files) openFallback(filename string) (http.File, error) { + if f.DirFallback == "" { + return nil, os.ErrNotExist + } + of, err := os.Open(filepath.Join(f.DirFallback, filename)) + switch { + case err != nil: + return nil, err + case f.SlurpToMemory: + defer of.Close() + bs, err := ioutil.ReadAll(of) + if err != nil { + return nil, err + } + fi, err := of.Stat() + + sf := &staticFile{ + name: filename, + contents: bs, + modtime: fi.ModTime(), + } + f.add(filename, sf) + return &fileHandle{sf: sf}, nil + } + return of, nil +} + +type fileHandle struct { + sf *staticFile + off int64 + closed bool +} + +var _ http.File = (*fileHandle)(nil) + +func (f *fileHandle) Close() error { + if f.closed { + return os.ErrInvalid + } + f.closed = true + return nil +} + +func (f *fileHandle) Read(p []byte) (n int, err error) { + if f.off >= int64(len(f.sf.contents)) { + return 0, io.EOF + } + n = copy(p, f.sf.contents[f.off:]) + f.off += int64(n) + return +} + +func (f *fileHandle) Readdir(int) ([]os.FileInfo, error) { + return nil, errors.New("not directory") +} + +func (f *fileHandle) Seek(offset int64, whence int) (int64, error) { + switch whence { + case os.SEEK_SET: + f.off = offset + case os.SEEK_CUR: + f.off += offset + case os.SEEK_END: + f.off = f.sf.Size() + offset + default: + return 0, os.ErrInvalid + } + if f.off < 0 { + f.off = 0 + } + return f.off, nil +} + +func (f *fileHandle) Stat() (os.FileInfo, error) { + return f.sf, nil +} + +var _ os.FileInfo = (*staticFile)(nil) + +func (f *staticFile) Name() string { return f.name } +func (f *staticFile) Size() int64 { return int64(len(f.contents)) } +func (f *staticFile) Mode() os.FileMode { return 0444 } +func (f *staticFile) ModTime() time.Time { return f.modtime } +func (f *staticFile) IsDir() bool { return false } +func (f *staticFile) Sys() interface{} { return nil } + +func openDir(f *Files) (hf http.File, err error) { + f.lk.Lock() + defer f.lk.Unlock() + + allFiles := make([]os.FileInfo, 0, len(f.file)) + var dirModtime time.Time + + for filename, sfile := range f.file { + if strings.Contains(filename, "/") { + continue // skip child directories; we only support readdir on the rootdir for now + } + allFiles = append(allFiles, sfile) + // a directory's modtime is the maximum contained modtime + if sfile.modtime.After(dirModtime) { + dirModtime = sfile.modtime + } + } + + return &dirHandle{ + sd: &staticDir{name: "/", modtime: dirModtime}, + files: allFiles, + }, nil +} + +type dirHandle struct { + sd *staticDir + files []os.FileInfo + off int +} + +func (d *dirHandle) Readdir(n int) ([]os.FileInfo, error) { + if n <= 0 { + return d.files, nil + } + if d.off >= len(d.files) { + return []os.FileInfo{}, io.EOF + } + + if d.off+n > len(d.files) { + n = len(d.files) - d.off + } + matches := d.files[d.off : d.off+n] + d.off += n + + var err error + if d.off > len(d.files) { + err = io.EOF + } + + return matches, err +} + +func (d *dirHandle) Close() error { return nil } +func (d *dirHandle) Read(p []byte) (int, error) { return 0, errors.New("not file") } +func (d *dirHandle) Seek(int64, int) (int64, error) { return 0, os.ErrInvalid } +func (d *dirHandle) Stat() (os.FileInfo, error) { return d.sd, nil } + +type staticDir struct { + name string + modtime time.Time +} + +func (d *staticDir) Name() string { return d.name } +func (d *staticDir) Size() int64 { return 0 } +func (d *staticDir) Mode() os.FileMode { return 0444 | os.ModeDir } +func (d *staticDir) ModTime() time.Time { return d.modtime } +func (d *staticDir) IsDir() bool { return true } +func (d *staticDir) Sys() interface{} { return nil } + +// JoinStrings joins returns the concatentation of ss. +func JoinStrings(ss ...string) string { + return strings.Join(ss, "") +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/fileembed/genfileembed/genfileembed.go b/vendor/github.com/camlistore/camlistore/pkg/fileembed/genfileembed/genfileembed.go new file mode 100644 index 00000000..a19fb3d0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fileembed/genfileembed/genfileembed.go @@ -0,0 +1,372 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// The genfileembed command embeds resources into Go files, to eliminate run-time +// dependencies on files on the filesystem. +package main + +import ( + "bytes" + "compress/zlib" + "crypto/sha1" + "encoding/base64" + "flag" + "fmt" + "go/parser" + "go/printer" + "go/token" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "camlistore.org/pkg/rollsum" +) + +var ( + processAll = flag.Bool("all", false, "process all files (if false, only process modified files)") + + fileEmbedPkgPath = flag.String("fileembed-package", "camlistore.org/pkg/fileembed", "the Go package name for fileembed. If you have vendored fileembed (e.g. with goven), you can use this flag to ensure that generated code imports the vendored package.") + + chunkThreshold = flag.Int64("chunk-threshold", 0, "If non-zero, the maximum size of a file before it's cut up into content-addressable chunks with a rolling checksum") + chunkPackage = flag.String("chunk-package", "", "Package to hold chunks") + + destFilesStderr = flag.Bool("output-files-stderr", false, "Write the absolute path of all output files to stderr prefixed with OUTPUT:") + + patternFilename = flag.String("pattern-file", "fileembed.go", "Filepath relative to from which to read the #fileembed pattern") + + buildTags = flag.String("build-tags", "", "Add these tags as +build constraints to the resulting zembed_*.go files") +) + +const ( + maxUncompressed = 50 << 10 // 50KB + // Threshold ratio for compression. + // Files which don't compress at least as well are kept uncompressed. + zRatio = 0.5 +) + +func usage() { + fmt.Fprintf(os.Stderr, "usage: genfileembed [flags] []\n") + flag.PrintDefaults() + os.Exit(2) +} + +func main() { + flag.Usage = usage + flag.Parse() + + absPath, err := os.Getwd() // absolute path of output directory + if err != nil { + log.Fatal(err) + } + dir := "." + switch flag.NArg() { + case 0: + case 1: + dir = flag.Arg(0) + if err := os.Chdir(dir); err != nil { + log.Fatalf("chdir(%q) = %v", dir, err) + } + if filepath.IsAbs(dir) { + absPath = dir + } else { + absPath = filepath.Join(absPath, dir) + } + default: + flag.Usage() + } + + pkgName, filePattern, fileEmbedModTime, err := parseFileEmbed() + if err != nil { + log.Fatalf("Error parsing %s/%s: %v", dir, *patternFilename, err) + } + + for _, fileName := range matchingFiles(filePattern) { + fi, err := os.Stat(fileName) + if err != nil { + log.Fatal(err) + } + + embedName := "zembed_" + strings.Replace(fileName, string(filepath.Separator), "_", -1) + ".go" + if *destFilesStderr { + fmt.Fprintf(os.Stderr, "OUTPUT:%s\n", filepath.Join(absPath, embedName)) + } + zfi, zerr := os.Stat(embedName) + genFile := func() bool { + if *processAll || zerr != nil { + return true + } + if zfi.ModTime().Before(fi.ModTime()) { + return true + } + if zfi.ModTime().Before(fileEmbedModTime) { + return true + } + return false + } + if !genFile() { + continue + } + log.Printf("Updating %s (package %s)", embedName, pkgName) + + bs, err := ioutil.ReadFile(fileName) + if err != nil { + log.Fatal(err) + } + + zb, fileSize := compressFile(bytes.NewReader(bs)) + ratio := float64(len(zb)) / float64(fileSize) + byteStreamType := "" + var qb []byte // quoted string, or Go expression evaluating to a string + var imports string + if *chunkThreshold > 0 && int64(len(bs)) > *chunkThreshold { + byteStreamType = "fileembed.Multi" + qb = chunksOf(bs) + if *chunkPackage == "" { + log.Fatalf("Must provide a --chunk-package value with --chunk-threshold") + } + imports = fmt.Sprintf("import chunkpkg \"%s\"\n", *chunkPackage) + } else if fileSize < maxUncompressed || ratio > zRatio { + byteStreamType = "fileembed.String" + qb = quote(bs) + } else { + byteStreamType = "fileembed.ZlibCompressedBase64" + qb = quote([]byte(base64.StdEncoding.EncodeToString(zb))) + } + + var b bytes.Buffer + fmt.Fprintf(&b, "// THIS FILE IS AUTO-GENERATED FROM %s\n", fileName) + fmt.Fprintf(&b, "// DO NOT EDIT.\n") + if *buildTags != "" { + fmt.Fprintf(&b, "// +build %s\n", *buildTags) + } + fmt.Fprintf(&b, "\n") + fmt.Fprintf(&b, "package %s\n\n", pkgName) + fmt.Fprintf(&b, "import \"time\"\n\n") + fmt.Fprintf(&b, "import \""+*fileEmbedPkgPath+"\"\n\n") + b.WriteString(imports) + fmt.Fprintf(&b, "func init() {\n\tFiles.Add(%q, %d, time.Unix(0, %d), %s(%s));\n}\n", + fileName, fileSize, fi.ModTime().UnixNano(), byteStreamType, qb) + + // gofmt it + fset := token.NewFileSet() + ast, err := parser.ParseFile(fset, "", b.Bytes(), parser.ParseComments) + if err != nil { + log.Fatal(err) + } + + var clean bytes.Buffer + config := &printer.Config{ + Mode: printer.TabIndent | printer.UseSpaces, + Tabwidth: 8, + } + err = config.Fprint(&clean, fset, ast) + if err != nil { + log.Fatal(err) + } + + if err := writeFileIfDifferent(embedName, clean.Bytes()); err != nil { + log.Fatal(err) + } + } +} + +func writeFileIfDifferent(filename string, contents []byte) error { + fi, err := os.Stat(filename) + if err == nil && fi.Size() == int64(len(contents)) && contentsEqual(filename, contents) { + os.Chtimes(filename, time.Now(), time.Now()) + return nil + } + return ioutil.WriteFile(filename, contents, 0644) +} + +func contentsEqual(filename string, contents []byte) bool { + got, err := ioutil.ReadFile(filename) + if err != nil { + return false + } + return bytes.Equal(got, contents) +} + +func compressFile(r io.Reader) ([]byte, int64) { + var zb bytes.Buffer + w := zlib.NewWriter(&zb) + n, err := io.Copy(w, r) + if err != nil { + log.Fatal(err) + } + w.Close() + return zb.Bytes(), n +} + +func quote(bs []byte) []byte { + var qb bytes.Buffer + qb.WriteString(`fileembed.JoinStrings("`) + run := 0 + concatCount := 0 + for _, b := range bs { + if b == '\n' { + qb.WriteString(`\n`) + } + if b == '\n' || run > 80 { + // Prevent too many strings from being concatenated together. + // See https://code.google.com/p/go/issues/detail?id=8240 + concatCount++ + if concatCount < 50 { + qb.WriteString("\" +\n\t\"") + } else { + concatCount = 0 + qb.WriteString("\",\n\t\"") + } + run = 0 + } + if b == '\n' { + continue + } + run++ + if b == '\\' { + qb.WriteString(`\\`) + continue + } + if b == '"' { + qb.WriteString(`\"`) + continue + } + if (b >= 32 && b <= 126) || b == '\t' { + qb.WriteByte(b) + continue + } + fmt.Fprintf(&qb, "\\x%02x", b) + } + qb.WriteString(`")`) + return qb.Bytes() +} + +// matchingFiles finds all files matching a regex that should be embedded. This +// skips files prefixed with "zembed_", since those are an implementation +// detail of the embedding process itself. +func matchingFiles(p *regexp.Regexp) []string { + var f []string + err := filepath.Walk(".", func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + n := filepath.Base(path) + if !fi.IsDir() && !strings.HasPrefix(n, "zembed_") && p.MatchString(n) { + f = append(f, path) + } + return nil + }) + if err != nil { + log.Fatalf("Error walking directory tree: %s", err) + return nil + } + return f +} + +func parseFileEmbed() (pkgName string, filePattern *regexp.Regexp, modTime time.Time, err error) { + fe, err := os.Open(*patternFilename) + if err != nil { + return + } + defer fe.Close() + + fi, err := fe.Stat() + if err != nil { + return + } + modTime = fi.ModTime() + + fs := token.NewFileSet() + astf, err := parser.ParseFile(fs, *patternFilename, fe, parser.PackageClauseOnly|parser.ParseComments) + if err != nil { + return + } + pkgName = astf.Name.Name + + if astf.Doc == nil { + err = fmt.Errorf("no package comment before the %q line", "package "+pkgName) + return + } + + pkgComment := astf.Doc.Text() + findPattern := regexp.MustCompile(`(?m)^#fileembed\s+pattern\s+(\S+)\s*$`) + m := findPattern.FindStringSubmatch(pkgComment) + if m == nil { + err = fmt.Errorf("package comment lacks line of form: #fileembed pattern ") + return + } + pattern := m[1] + filePattern, err = regexp.Compile(pattern) + if err != nil { + err = fmt.Errorf("bad regexp %q: %v", pattern, err) + return + } + return +} + +// chunksOf takes a (presumably large) file's uncompressed input, +// rolling-checksum splits it into ~514 byte chunks, compresses each, +// base64s each, and writes chunk files out, with each file just +// defining an exported fileembed.Opener variable named C where +// xxxx is the first 8 lowercase hex digits of the SHA-1 of the chunk +// value pre-compression. The return value is a Go expression +// referencing each of those chunks concatenated together. +func chunksOf(in []byte) (stringExpression []byte) { + var multiParts [][]byte + rs := rollsum.New() + const nBits = 9 // ~512 byte chunks + last := 0 + for i, b := range in { + rs.Roll(b) + if rs.OnSplitWithBits(nBits) || i == len(in)-1 { + raw := in[last : i+1] // inclusive + last = i + 1 + s1 := sha1.New() + s1.Write(raw) + sha1hex := fmt.Sprintf("%x", s1.Sum(nil))[:8] + writeChunkFile(sha1hex, raw) + multiParts = append(multiParts, []byte(fmt.Sprintf("chunkpkg.C%s", sha1hex))) + } + } + return bytes.Join(multiParts, []byte(",\n\t")) +} + +func writeChunkFile(hex string, raw []byte) { + path := os.Getenv("GOPATH") + if path == "" { + log.Fatalf("No GOPATH set") + } + path = filepath.SplitList(path)[0] + file := filepath.Join(path, "src", filepath.FromSlash(*chunkPackage), "chunk_"+hex+".go") + zb, _ := compressFile(bytes.NewReader(raw)) + var buf bytes.Buffer + buf.WriteString("// THIS FILE IS AUTO-GENERATED. SEE README.\n\n") + buf.WriteString("package chunkpkg\n") + buf.WriteString("import \"" + *fileEmbedPkgPath + "\"\n\n") + fmt.Fprintf(&buf, "var C%s fileembed.Opener\n\nfunc init() { C%s = fileembed.ZlibCompressedBase64(%s)\n }\n", + hex, + hex, + quote([]byte(base64.StdEncoding.EncodeToString(zb)))) + err := writeFileIfDifferent(file, buf.Bytes()) + if err != nil { + log.Fatalf("Error writing chunk %s to %v: %v", hex, file, err) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/fs/at.go b/vendor/github.com/camlistore/camlistore/pkg/fs/at.go new file mode 100644 index 00000000..3ae3ff0f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fs/at.go @@ -0,0 +1,113 @@ +// +build linux darwin + +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fs + +import ( + "log" + "os" + + "camlistore.org/third_party/bazil.org/fuse" + fusefs "camlistore.org/third_party/bazil.org/fuse/fs" +) + +type atDir struct { + noXattr + fs *CamliFileSystem +} + +func (n *atDir) Attr() fuse.Attr { + return fuse.Attr{ + Mode: os.ModeDir | 0500, + Uid: uint32(os.Getuid()), + Gid: uint32(os.Getgid()), + } +} + +func (n *atDir) ReadDir(intr fusefs.Intr) ([]fuse.Dirent, fuse.Error) { + return []fuse.Dirent{ + {Name: "README.txt"}, + }, nil +} + +const atReadme = `You are now in the "at" filesystem, where you can look into the past. + +Locations in the top-level of this directory are dynamically created +as you request them. A dynamic directory is designated by a +timestamp. Once you enter a directory, you'll have a read-only view +of all of the roots that existed as of the specified time. + +Example: + +If you had a root called "importantstuff" and a file in it called +"todo.txt", you can look at the contents of that file as it existed +back before Christmas like this (from the location you mounted +camlistore): + + cat at/2013-12-24/importantstuff/todo.txt + +If you cd into "at/2013-12-24/importantstuff" you can also see all the +files that you deleted since (but none that were created after). + +Timestamps are specified in UTC unless otherwise specified, and may be +in any of the following forms: + +With Nanosecond Granularity + +* 2012-08-28T21:24:35.37465188Z - RFC3339 (this is the canonical format) +* 1346189075374651880 - nanoseconds since 1970-1-1 + +With Millisecond Granularity + +* 1346189075374 - milliseconds since 1970-1-1, common in java + +With Second Granularity + +* 1346189075 - seconds since 1970-1-1, common in unix +* 2012-08-28T21:24:35Z - RFC3339 +* 2012-08-28T21:24:35-08:00 - RFC3339 with numeric timezone +* Tue, 28 Aug 2012 21:24:35 +0000 - RFC1123 + numeric timezone +* Tue, 28 Aug 2012 21:24:35 UTC RFC1123 +* Tue Aug 28 21:24:35 UTC 2012 - Unix date +* Tue Aug 28 21:24:35 2012 - ansi C timestamp +* Tue Aug 28 21:24:35 +0000 2012 - ruby datestamp + +With More Coarse Granularities + +* 2012-08-28T21:24 (This will be considered the same as 2012-08-28T21:24:00Z) +* 2012-08-28T21 (This will be considered the same as 2012-08-28T21:00:00Z) +* 2012-08-28 (This will be considered the same as 2012-08-28T00:00:00Z) +* 2012-08 (This will be considered the same as 2012-08-01T00:00:00Z) +* 2012 (This will be considered the same as 2012-01-01T00:00:00Z) +` + +func (n *atDir) Lookup(name string, intr fusefs.Intr) (fusefs.Node, fuse.Error) { + log.Printf("fs.atDir: Lookup(%q)", name) + + if name == "README.txt" { + return staticFileNode(atReadme), nil + } + + asOf, err := parseTime(name) + if err != nil { + log.Printf("Can't parse time: %v", err) + return nil, fuse.ENOENT + } + + return &rootsDir{fs: n.fs, at: asOf}, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/fs/debug.go b/vendor/github.com/camlistore/camlistore/pkg/fs/debug.go new file mode 100644 index 00000000..d3877437 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fs/debug.go @@ -0,0 +1,139 @@ +// +build linux darwin + +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fs + +import ( + "bytes" + "fmt" + "os" + "strconv" + + "camlistore.org/pkg/types" + + "camlistore.org/third_party/bazil.org/fuse" + "camlistore.org/third_party/bazil.org/fuse/fs" +) + +// If TrackStats is true, statistics are kept on operations. +var TrackStats bool + +func init() { + TrackStats, _ = strconv.ParseBool(os.Getenv("CAMLI_TRACK_FS_STATS")) +} + +var ( + mutFileOpen = newStat("mutfile-open") + mutFileOpenError = newStat("mutfile-open-error") + mutFileOpenRO = newStat("mutfile-open-ro") + mutFileOpenRW = newStat("mutfile-open-rw") + roFileOpen = newStat("rofile-open") + roFileOpenError = newStat("rofile-open-error") +) + +var statByName = map[string]*stat{} + +func newStat(name string) *stat { + if statByName[name] != nil { + panic("duplicate registraton of " + name) + } + s := &stat{name: name} + statByName[name] = s + return s +} + +// A stat is a wrapper around an atomic int64, as is a fuse.Node +// exporting that data as a decimal. +type stat struct { + n types.AtomicInt64 + name string +} + +func (s *stat) Incr() { + if TrackStats { + s.n.Add(1) + } +} + +func (s *stat) content() []byte { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%d", s.n.Get()) + buf.WriteByte('\n') + return buf.Bytes() +} + +func (s *stat) Attr() fuse.Attr { + return fuse.Attr{ + Mode: 0400, + Uid: uint32(os.Getuid()), + Gid: uint32(os.Getgid()), + Size: uint64(len(s.content())), + Mtime: serverStart, + Ctime: serverStart, + Crtime: serverStart, + } +} + +func (s *stat) Open(req *fuse.OpenRequest, res *fuse.OpenResponse, intr fs.Intr) (fs.Handle, fuse.Error) { + // Set DirectIO to keep this file from being cached in OS X's kernel. + res.Flags |= fuse.OpenDirectIO + return s, nil +} + +func (s *stat) Read(req *fuse.ReadRequest, res *fuse.ReadResponse, intr fs.Intr) fuse.Error { + c := s.content() + if req.Offset > int64(len(c)) { + return nil + } + c = c[req.Offset:] + size := req.Size + if size > len(c) { + size = len(c) + } + res.Data = make([]byte, size) + copy(res.Data, c) + return nil +} + +// A statsDir FUSE directory node is returned by root.go, by opening +// ".camli_fs_stats" in the root directory. +type statsDir struct{} + +func (statsDir) Attr() fuse.Attr { + return fuse.Attr{ + Mode: os.ModeDir | 0700, + Uid: uint32(os.Getuid()), + Gid: uint32(os.Getgid()), + } +} + +func (statsDir) ReadDir(intr fs.Intr) (ents []fuse.Dirent, err fuse.Error) { + for k := range statByName { + ents = append(ents, fuse.Dirent{Name: k}) + } + return +} + +func (statsDir) Lookup(req *fuse.LookupRequest, res *fuse.LookupResponse, intr fs.Intr) (fs.Node, fuse.Error) { + name := req.Name + s, ok := statByName[name] + if !ok { + return nil, fuse.ENOENT + } + return s, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/fs/fs.go b/vendor/github.com/camlistore/camlistore/pkg/fs/fs.go new file mode 100644 index 00000000..82453b88 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fs/fs.go @@ -0,0 +1,424 @@ +// +build linux darwin + +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fs implements a FUSE filesystem for Camlistore and is +// used by the cammount binary. +package fs + +import ( + "fmt" + "io" + "log" + "os" + "sync" + "syscall" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/client" + "camlistore.org/pkg/lru" + "camlistore.org/pkg/schema" + + "camlistore.org/third_party/bazil.org/fuse" + fusefs "camlistore.org/third_party/bazil.org/fuse/fs" +) + +var serverStart = time.Now() + +var errNotDir = fuse.Errno(syscall.ENOTDIR) + +type CamliFileSystem struct { + fetcher blob.Fetcher + client *client.Client // or nil, if not doing search queries + root fusefs.Node + + // IgnoreOwners, if true, collapses all file ownership to the + // uid/gid running the fuse filesystem, and sets all the + // permissions to 0600/0700. + IgnoreOwners bool + + blobToSchema *lru.Cache // ~map[blobstring]*schema.Blob + nameToBlob *lru.Cache // ~map[string]blob.Ref + nameToAttr *lru.Cache // ~map[string]*fuse.Attr +} + +var _ fusefs.FS = (*CamliFileSystem)(nil) + +func newCamliFileSystem(fetcher blob.Fetcher) *CamliFileSystem { + return &CamliFileSystem{ + fetcher: fetcher, + blobToSchema: lru.New(1024), // arbitrary; TODO: tunable/smarter? + nameToBlob: lru.New(1024), // arbitrary: TODO: tunable/smarter? + nameToAttr: lru.New(1024), // arbitrary: TODO: tunable/smarter? + } +} + +// NewDefaultCamliFileSystem returns a filesystem with a generic base, from which +// users can navigate by blobref, tag, date, etc. +func NewDefaultCamliFileSystem(client *client.Client, fetcher blob.Fetcher) *CamliFileSystem { + if client == nil || fetcher == nil { + panic("nil argument") + } + fs := newCamliFileSystem(fetcher) + fs.root = &root{fs: fs} // root.go + fs.client = client + return fs +} + +// NewRootedCamliFileSystem returns a CamliFileSystem with a node based on a blobref +// as its base. +func NewRootedCamliFileSystem(cli *client.Client, fetcher blob.Fetcher, root blob.Ref) (*CamliFileSystem, error) { + fs := newCamliFileSystem(fetcher) + fs.client = cli + + n, err := fs.newNodeFromBlobRef(root) + + if err != nil { + return nil, err + } + + fs.root = n + + return fs, nil +} + +// node implements fuse.Node with a read-only Camli "file" or +// "directory" blob. +type node struct { + noXattr + fs *CamliFileSystem + blobref blob.Ref + + pnodeModTime time.Time // optionally set by recent.go; modtime of permanode + + dmu sync.Mutex // guards dirents. acquire before mu. + dirents []fuse.Dirent // nil until populated once + + mu sync.Mutex // guards rest + attr fuse.Attr + meta *schema.Blob + lookMap map[string]blob.Ref +} + +func (n *node) Attr() (attr fuse.Attr) { + _, err := n.schema() + if err != nil { + // Hm, can't return it. Just log it I guess. + log.Printf("error fetching schema superset for %v: %v", n.blobref, err) + } + return n.attr +} + +func (n *node) addLookupEntry(name string, ref blob.Ref) { + n.mu.Lock() + defer n.mu.Unlock() + if n.lookMap == nil { + n.lookMap = make(map[string]blob.Ref) + } + n.lookMap[name] = ref +} + +func (n *node) Lookup(name string, intr fusefs.Intr) (fusefs.Node, fuse.Error) { + if name == ".quitquitquit" { + // TODO: only in dev mode + log.Fatalf("Shutting down due to .quitquitquit lookup.") + } + + // If we haven't done Readdir yet (dirents isn't set), then force a Readdir + // call to populate lookMap. + n.dmu.Lock() + loaded := n.dirents != nil + n.dmu.Unlock() + if !loaded { + n.ReadDir(nil) + } + + n.mu.Lock() + defer n.mu.Unlock() + ref, ok := n.lookMap[name] + if !ok { + return nil, fuse.ENOENT + } + return &node{fs: n.fs, blobref: ref}, nil +} + +func (n *node) schema() (*schema.Blob, error) { + // TODO: use singleflight library here instead of a lock? + n.mu.Lock() + defer n.mu.Unlock() + if n.meta != nil { + return n.meta, nil + } + blob, err := n.fs.fetchSchemaMeta(n.blobref) + if err == nil { + n.meta = blob + n.populateAttr() + } + return blob, err +} + +func isWriteFlags(flags fuse.OpenFlags) bool { + // TODO read/writeness are not flags, use O_ACCMODE + return flags&fuse.OpenFlags(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE) != 0 +} + +func (n *node) Open(req *fuse.OpenRequest, res *fuse.OpenResponse, intr fusefs.Intr) (fusefs.Handle, fuse.Error) { + log.Printf("CAMLI Open on %v: %#v", n.blobref, req) + if isWriteFlags(req.Flags) { + return nil, fuse.EPERM + } + ss, err := n.schema() + if err != nil { + log.Printf("open of %v: %v", n.blobref, err) + return nil, fuse.EIO + } + if ss.Type() == "directory" { + return n, nil + } + fr, err := ss.NewFileReader(n.fs.fetcher) + if err != nil { + // Will only happen if ss.Type != "file" or "bytes" + log.Printf("NewFileReader(%s) = %v", n.blobref, err) + return nil, fuse.EIO + } + return &nodeReader{n: n, fr: fr}, nil +} + +type nodeReader struct { + n *node + fr *schema.FileReader +} + +func (nr *nodeReader) Read(req *fuse.ReadRequest, res *fuse.ReadResponse, intr fusefs.Intr) fuse.Error { + log.Printf("CAMLI nodeReader READ on %v: %#v", nr.n.blobref, req) + if req.Offset >= nr.fr.Size() { + return nil + } + size := req.Size + if int64(size)+req.Offset >= nr.fr.Size() { + size -= int((int64(size) + req.Offset) - nr.fr.Size()) + } + buf := make([]byte, size) + n, err := nr.fr.ReadAt(buf, req.Offset) + if err == io.EOF { + err = nil + } + if err != nil { + log.Printf("camli read on %v at %d: %v", nr.n.blobref, req.Offset, err) + return fuse.EIO + } + res.Data = buf[:n] + return nil +} + +func (nr *nodeReader) Release(req *fuse.ReleaseRequest, intr fusefs.Intr) fuse.Error { + log.Printf("CAMLI nodeReader RELEASE on %v", nr.n.blobref) + nr.fr.Close() + return nil +} + +func (n *node) ReadDir(intr fusefs.Intr) ([]fuse.Dirent, fuse.Error) { + log.Printf("CAMLI ReadDir on %v", n.blobref) + n.dmu.Lock() + defer n.dmu.Unlock() + if n.dirents != nil { + return n.dirents, nil + } + + ss, err := n.schema() + if err != nil { + log.Printf("camli.ReadDir error on %v: %v", n.blobref, err) + return nil, fuse.EIO + } + dr, err := schema.NewDirReader(n.fs.fetcher, ss.BlobRef()) + if err != nil { + log.Printf("camli.ReadDir error on %v: %v", n.blobref, err) + return nil, fuse.EIO + } + schemaEnts, err := dr.Readdir(-1) + if err != nil { + log.Printf("camli.ReadDir error on %v: %v", n.blobref, err) + return nil, fuse.EIO + } + n.dirents = make([]fuse.Dirent, 0) + for _, sent := range schemaEnts { + if name := sent.FileName(); name != "" { + n.addLookupEntry(name, sent.BlobRef()) + n.dirents = append(n.dirents, fuse.Dirent{Name: name}) + } + } + return n.dirents, nil +} + +// populateAttr should only be called once n.ss is known to be set and +// non-nil +func (n *node) populateAttr() error { + meta := n.meta + + n.attr.Mode = meta.FileMode() + + if n.fs.IgnoreOwners { + n.attr.Uid = uint32(os.Getuid()) + n.attr.Gid = uint32(os.Getgid()) + executeBit := n.attr.Mode & 0100 + n.attr.Mode = (n.attr.Mode ^ n.attr.Mode.Perm()) | 0400 | executeBit + } else { + n.attr.Uid = uint32(meta.MapUid()) + n.attr.Gid = uint32(meta.MapGid()) + } + + // TODO: inode? + + if mt := meta.ModTime(); !mt.IsZero() { + n.attr.Mtime = mt + } else { + n.attr.Mtime = n.pnodeModTime + } + + switch meta.Type() { + case "file": + n.attr.Size = uint64(meta.PartsSize()) + n.attr.Blocks = 0 // TODO: set? + n.attr.Mode |= 0400 + case "directory": + n.attr.Mode |= 0500 + case "symlink": + n.attr.Mode |= 0400 + default: + log.Printf("unknown attr ss.Type %q in populateAttr", meta.Type()) + } + return nil +} + +func (fs *CamliFileSystem) Root() (fusefs.Node, fuse.Error) { + return fs.root, nil +} + +func (fs *CamliFileSystem) Statfs(req *fuse.StatfsRequest, res *fuse.StatfsResponse, intr fusefs.Intr) fuse.Error { + // Make some stuff up, just to see if it makes "lsof" happy. + res.Blocks = 1 << 35 + res.Bfree = 1 << 34 + res.Bavail = 1 << 34 + res.Files = 1 << 29 + res.Ffree = 1 << 28 + res.Namelen = 2048 + res.Bsize = 1024 + return nil +} + +// Errors returned are: +// os.ErrNotExist -- blob not found +// os.ErrInvalid -- not JSON or a camli schema blob +func (fs *CamliFileSystem) fetchSchemaMeta(br blob.Ref) (*schema.Blob, error) { + blobStr := br.String() + if blob, ok := fs.blobToSchema.Get(blobStr); ok { + return blob.(*schema.Blob), nil + } + + rc, _, err := fs.fetcher.Fetch(br) + if err != nil { + return nil, err + } + defer rc.Close() + blob, err := schema.BlobFromReader(br, rc) + if err != nil { + log.Printf("Error parsing %s as schema blob: %v", br, err) + return nil, os.ErrInvalid + } + if blob.Type() == "" { + log.Printf("blob %s is JSON but lacks camliType", br) + return nil, os.ErrInvalid + } + fs.blobToSchema.Add(blobStr, blob) + return blob, nil +} + +// consolated logic for determining a node to mount based on an arbitrary blobref +func (fs *CamliFileSystem) newNodeFromBlobRef(root blob.Ref) (fusefs.Node, error) { + blob, err := fs.fetchSchemaMeta(root) + if err != nil { + return nil, err + } + + switch blob.Type() { + case "directory": + n := &node{fs: fs, blobref: root, meta: blob} + n.populateAttr() + return n, nil + + case "permanode": + // other mutDirs listed in the default fileystem have names and are displayed + return &mutDir{fs: fs, permanode: root, name: "-"}, nil + } + + return nil, fmt.Errorf("Blobref must be of a directory or permanode got a %v", blob.Type()) +} + +type notImplementDirNode struct{ noXattr } + +func (notImplementDirNode) Attr() fuse.Attr { + return fuse.Attr{ + Mode: os.ModeDir | 0000, + Uid: uint32(os.Getuid()), + Gid: uint32(os.Getgid()), + } +} + +type staticFileNode string + +func (s staticFileNode) Attr() fuse.Attr { + return fuse.Attr{ + Mode: 0400, + Uid: uint32(os.Getuid()), + Gid: uint32(os.Getgid()), + Size: uint64(len(s)), + Mtime: serverStart, + Ctime: serverStart, + Crtime: serverStart, + } +} + +func (s staticFileNode) Read(req *fuse.ReadRequest, res *fuse.ReadResponse, intr fusefs.Intr) fuse.Error { + if req.Offset > int64(len(s)) { + return nil + } + s = s[req.Offset:] + size := req.Size + if size > len(s) { + size = len(s) + } + res.Data = make([]byte, size) + copy(res.Data, s) + return nil +} + +func (n staticFileNode) Getxattr(*fuse.GetxattrRequest, *fuse.GetxattrResponse, fusefs.Intr) fuse.Error { + return fuse.ENODATA +} + +func (n staticFileNode) Listxattr(*fuse.ListxattrRequest, *fuse.ListxattrResponse, fusefs.Intr) fuse.Error { + return nil +} + +func (n staticFileNode) Setxattr(*fuse.SetxattrRequest, fusefs.Intr) fuse.Error { + return fuse.EPERM +} + +func (n staticFileNode) Removexattr(*fuse.RemovexattrRequest, fusefs.Intr) fuse.Error { + return fuse.EPERM +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/fs/fs_test.go b/vendor/github.com/camlistore/camlistore/pkg/fs/fs_test.go new file mode 100644 index 00000000..cb9450fd --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fs/fs_test.go @@ -0,0 +1,710 @@ +// +build linux darwin + +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fs + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "testing" + "time" + + "camlistore.org/pkg/test" + "camlistore.org/third_party/bazil.org/fuse/syscallx" +) + +var ( + errmu sync.Mutex + lasterr error +) + +func condSkip(t *testing.T) { + errmu.Lock() + defer errmu.Unlock() + if lasterr != nil { + t.Skipf("Skipping test; some other test already failed.") + } + if !(runtime.GOOS == "darwin" || runtime.GOOS == "linux") { + t.Skipf("Skipping test on OS %q", runtime.GOOS) + } + if runtime.GOOS == "darwin" { + _, err := os.Stat("/Library/Filesystems/osxfusefs.fs/Support/mount_osxfusefs") + if os.IsNotExist(err) { + test.DependencyErrorOrSkip(t) + } else if err != nil { + t.Fatal(err) + } + } +} + +func brokenTest(t *testing.T) { + if v, _ := strconv.ParseBool(os.Getenv("RUN_BROKEN_TESTS")); !v { + t.Skipf("Skipping broken tests without RUN_BROKEN_TESTS=1") + } +} + +type mountEnv struct { + t *testing.T + mountPoint string + process *os.Process +} + +func (e *mountEnv) Stat(s *stat) int64 { + file := filepath.Join(e.mountPoint, ".camli_fs_stats", s.name) + slurp, err := ioutil.ReadFile(file) + if err != nil { + e.t.Fatal(err) + } + slurp = bytes.TrimSpace(slurp) + v, err := strconv.ParseInt(string(slurp), 10, 64) + if err != nil { + e.t.Fatalf("unexpected value %q in file %s", slurp, file) + } + return v +} + +func testName() string { + skip := 0 + for { + pc, _, _, ok := runtime.Caller(skip) + skip++ + if !ok { + panic("Failed to find test name") + } + name := strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "camlistore.org/pkg/fs.") + if strings.HasPrefix(name, "Test") { + return name + } + } +} + +func inEmptyMutDir(t *testing.T, fn func(env *mountEnv, dir string)) { + cammountTest(t, func(env *mountEnv) { + dir := filepath.Join(env.mountPoint, "roots", testName()) + if err := os.Mkdir(dir, 0755); err != nil { + t.Fatalf("Failed to make roots/r dir: %v", err) + } + fi, err := os.Stat(dir) + if err != nil || !fi.IsDir() { + t.Fatalf("Stat of %s dir = %v, %v; want a directory", dir, fi, err) + } + fn(env, dir) + }) +} + +func cammountTest(t *testing.T, fn func(env *mountEnv)) { + dupLog := io.MultiWriter(os.Stderr, testLog{t}) + log.SetOutput(dupLog) + defer log.SetOutput(os.Stderr) + + w := test.GetWorld(t) + mountPoint, err := ioutil.TempDir("", "fs-test-mount") + if err != nil { + t.Fatal(err) + } + defer func() { + if err := os.RemoveAll(mountPoint); err != nil { + t.Fatal(err) + } + }() + verbose := "false" + var stderrDest io.Writer = ioutil.Discard + if v, _ := strconv.ParseBool(os.Getenv("VERBOSE_FUSE")); v { + verbose = "true" + stderrDest = testLog{t} + } + if v, _ := strconv.ParseBool(os.Getenv("VERBOSE_FUSE_STDERR")); v { + stderrDest = io.MultiWriter(stderrDest, os.Stderr) + } + + mount := w.Cmd("cammount", "--debug="+verbose, mountPoint) + mount.Stderr = stderrDest + mount.Env = append(mount.Env, "CAMLI_TRACK_FS_STATS=1") + + stdin, err := mount.StdinPipe() + if err != nil { + t.Fatal(err) + } + if err := w.Ping(); err != nil { + t.Fatal(err) + } + if err := mount.Start(); err != nil { + t.Fatal(err) + } + waitc := make(chan error, 1) + go func() { waitc <- mount.Wait() }() + defer func() { + log.Printf("Sending quit") + stdin.Write([]byte("q\n")) + select { + case <-time.After(5 * time.Second): + log.Printf("timeout waiting for cammount to finish") + mount.Process.Kill() + Unmount(mountPoint) + case err := <-waitc: + log.Printf("cammount exited: %v", err) + } + if !test.WaitFor(not(dirToBeFUSE(mountPoint)), 5*time.Second, 1*time.Second) { + // It didn't unmount. Try again. + Unmount(mountPoint) + } + }() + + if !test.WaitFor(dirToBeFUSE(mountPoint), 5*time.Second, 100*time.Millisecond) { + t.Fatalf("error waiting for %s to be mounted", mountPoint) + } + fn(&mountEnv{ + t: t, + mountPoint: mountPoint, + process: mount.Process, + }) + +} + +func TestRoot(t *testing.T) { + condSkip(t) + cammountTest(t, func(env *mountEnv) { + f, err := os.Open(env.mountPoint) + if err != nil { + t.Fatal(err) + } + defer f.Close() + names, err := f.Readdirnames(-1) + if err != nil { + t.Fatal(err) + } + sort.Strings(names) + want := []string{"WELCOME.txt", "at", "date", "recent", "roots", "sha1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", "tag"} + if !reflect.DeepEqual(names, want) { + t.Errorf("root directory = %q; want %q", names, want) + } + }) +} + +type testLog struct { + t *testing.T +} + +func (tl testLog) Write(p []byte) (n int, err error) { + tl.t.Log(strings.TrimSpace(string(p))) + return len(p), nil +} + +func TestMutable(t *testing.T) { + condSkip(t) + inEmptyMutDir(t, func(env *mountEnv, rootDir string) { + filename := filepath.Join(rootDir, "x") + f, err := os.Create(filename) + if err != nil { + t.Fatalf("Create: %v", err) + } + if err := f.Close(); err != nil { + t.Fatalf("Close: %v", err) + } + fi, err := os.Stat(filename) + if err != nil { + t.Errorf("Stat error: %v", err) + } else if !fi.Mode().IsRegular() || fi.Size() != 0 { + t.Errorf("Stat of roots/r/x = %v size %d; want a %d byte regular file", fi.Mode(), fi.Size(), 0) + } + + for _, str := range []string{"foo, ", "bar\n", "another line.\n"} { + f, err = os.OpenFile(filename, os.O_WRONLY|os.O_APPEND, 0644) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + if _, err := f.Write([]byte(str)); err != nil { + t.Logf("Error with append: %v", err) + t.Fatalf("Error appending %q to %s: %v", str, filename, err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + ro0 := env.Stat(mutFileOpenRO) + slurp, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + if env.Stat(mutFileOpenRO)-ro0 != 1 { + t.Error("Read didn't trigger read-only path optimization.") + } + + const want = "foo, bar\nanother line.\n" + fi, err = os.Stat(filename) + if err != nil { + t.Errorf("Stat error: %v", err) + } else if !fi.Mode().IsRegular() || fi.Size() != int64(len(want)) { + t.Errorf("Stat of roots/r/x = %v size %d; want a %d byte regular file", fi.Mode(), fi.Size(), len(want)) + } + if got := string(slurp); got != want { + t.Fatalf("contents = %q; want %q", got, want) + } + + // Delete it. + if err := os.Remove(filename); err != nil { + t.Fatal(err) + } + + // Gone? + if _, err := os.Stat(filename); !os.IsNotExist(err) { + t.Fatalf("expected file to be gone; got stat err = %v instead", err) + } + }) +} + +func TestDifferentWriteTypes(t *testing.T) { + condSkip(t) + inEmptyMutDir(t, func(env *mountEnv, rootDir string) { + filename := filepath.Join(rootDir, "big") + + writes := []struct { + name string + flag int + write []byte // if non-nil, Write is called + writeAt []byte // if non-nil, WriteAt is used + writePos int64 // writeAt position + want string // shortenString of remaining file + }{ + { + name: "write 8k of a", + flag: os.O_RDWR | os.O_CREATE | os.O_TRUNC, + write: bytes.Repeat([]byte("a"), 8<<10), + want: "a{8192}", + }, + { + name: "writeAt HI at offset 10", + flag: os.O_RDWR, + writeAt: []byte("HI"), + writePos: 10, + want: "a{10}HIa{8180}", + }, + { + name: "append single C", + flag: os.O_WRONLY | os.O_APPEND, + write: []byte("C"), + want: "a{10}HIa{8180}C", + }, + { + name: "append 8k of b", + flag: os.O_WRONLY | os.O_APPEND, + write: bytes.Repeat([]byte("b"), 8<<10), + want: "a{10}HIa{8180}Cb{8192}", + }, + } + + for _, wr := range writes { + f, err := os.OpenFile(filename, wr.flag, 0644) + if err != nil { + t.Fatalf("%s: OpenFile: %v", wr.name, err) + } + if wr.write != nil { + if n, err := f.Write(wr.write); err != nil || n != len(wr.write) { + t.Fatalf("%s: Write = (%v, %v); want (%d, nil)", wr.name, n, err, len(wr.write)) + } + } + if wr.writeAt != nil { + if n, err := f.WriteAt(wr.writeAt, wr.writePos); err != nil || n != len(wr.writeAt) { + t.Fatalf("%s: WriteAt = (%v, %v); want (%d, nil)", wr.name, n, err, len(wr.writeAt)) + } + } + if err := f.Close(); err != nil { + t.Fatalf("%s: Close: %v", wr.name, err) + } + + slurp, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatalf("%s: Slurp: %v", wr.name, err) + } + if got := shortenString(string(slurp)); got != wr.want { + t.Fatalf("%s: afterwards, file = %q; want %q", wr.name, got, wr.want) + } + + } + + // Delete it. + if err := os.Remove(filename); err != nil { + t.Fatal(err) + } + }) +} + +func statStr(name string) string { + fi, err := os.Stat(name) + if os.IsNotExist(err) { + return "ENOENT" + } + if err != nil { + return "err=" + err.Error() + } + return fmt.Sprintf("file %v, size %d", fi.Mode(), fi.Size()) +} + +func TestRename(t *testing.T) { + condSkip(t) + inEmptyMutDir(t, func(env *mountEnv, rootDir string) { + name1 := filepath.Join(rootDir, "1") + name2 := filepath.Join(rootDir, "2") + subdir := filepath.Join(rootDir, "dir") + name3 := filepath.Join(subdir, "3") + + contents := []byte("Some file contents") + const gone = "ENOENT" + const reg = "file -rw-------, size 18" + + if err := ioutil.WriteFile(name1, contents, 0644); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(subdir, 0755); err != nil { + t.Fatal(err) + } + + if got, want := statStr(name1), reg; got != want { + t.Errorf("name1 = %q; want %q", got, want) + } + if err := os.Rename(name1, name2); err != nil { + t.Fatal(err) + } + if got, want := statStr(name1), gone; got != want { + t.Errorf("name1 = %q; want %q", got, want) + } + if got, want := statStr(name2), reg; got != want { + t.Errorf("name2 = %q; want %q", got, want) + } + + // Moving to a different directory. + if err := os.Rename(name2, name3); err != nil { + t.Fatal(err) + } + if got, want := statStr(name2), gone; got != want { + t.Errorf("name2 = %q; want %q", got, want) + } + if got, want := statStr(name3), reg; got != want { + t.Errorf("name3 = %q; want %q", got, want) + } + }) +} + +func parseXattrList(from []byte) map[string]bool { + attrNames := bytes.Split(from, []byte{0}) + m := map[string]bool{} + for _, nm := range attrNames { + if len(nm) == 0 { + continue + } + m[string(nm)] = true + } + return m +} + +func TestXattr(t *testing.T) { + condSkip(t) + inEmptyMutDir(t, func(env *mountEnv, rootDir string) { + name1 := filepath.Join(rootDir, "1") + attr1 := "attr1" + attr2 := "attr2" + + contents := []byte("Some file contents") + + if err := ioutil.WriteFile(name1, contents, 0644); err != nil { + t.Fatal(err) + } + + buf := make([]byte, 8192) + // list empty + n, err := syscallx.Listxattr(name1, buf) + if err != nil { + t.Errorf("Error in initial listxattr: %v", err) + } + if n != 0 { + t.Errorf("Expected zero-length xattr list, got %q", buf[:n]) + } + + // get missing + n, err = syscallx.Getxattr(name1, attr1, buf) + if err == nil { + t.Errorf("Expected error getting non-existent xattr, got %q", buf[:n]) + } + + // Set (two different attributes) + err = syscallx.Setxattr(name1, attr1, []byte("hello1"), 0) + if err != nil { + t.Fatalf("Error setting xattr: %v", err) + } + err = syscallx.Setxattr(name1, attr2, []byte("hello2"), 0) + if err != nil { + t.Fatalf("Error setting xattr: %v", err) + } + // Alternate value for first attribute + err = syscallx.Setxattr(name1, attr1, []byte("hello1a"), 0) + if err != nil { + t.Fatalf("Error setting xattr: %v", err) + } + + // list attrs + n, err = syscallx.Listxattr(name1, buf) + if err != nil { + t.Errorf("Error in initial listxattr: %v", err) + } + m := parseXattrList(buf[:n]) + if !(len(m) == 2 && m[attr1] && m[attr2]) { + t.Errorf("Missing an attribute: %q", buf[:n]) + } + + // Remove attr + err = syscallx.Removexattr(name1, attr2) + if err != nil { + t.Errorf("Failed to remove attr: %v", err) + } + + // List attrs + n, err = syscallx.Listxattr(name1, buf) + if err != nil { + t.Errorf("Error in initial listxattr: %v", err) + } + m = parseXattrList(buf[:n]) + if !(len(m) == 1 && m[attr1]) { + t.Errorf("Missing an attribute: %q", buf[:n]) + } + + // Get remaining attr + n, err = syscallx.Getxattr(name1, attr1, buf) + if err != nil { + t.Errorf("Error getting attr1: %v", err) + } + if string(buf[:n]) != "hello1a" { + t.Logf("Expected hello1a, got %q", buf[:n]) + } + }) +} + +func TestSymlink(t *testing.T) { + condSkip(t) + // Do it all once, unmount, re-mount and then check again. + // TODO(bradfitz): do this same pattern (unmount and remount) in the other tests. + var suffix string + var link string + const target = "../../some-target" // arbitrary string. some-target is fake. + check := func() { + fi, err := os.Lstat(link) + if err != nil { + t.Fatalf("Stat: %v", err) + } + if fi.Mode()&os.ModeSymlink == 0 { + t.Errorf("Mode = %v; want Symlink bit set", fi.Mode()) + } + got, err := os.Readlink(link) + if err != nil { + t.Fatalf("Readlink: %v", err) + } + if got != target { + t.Errorf("ReadLink = %q; want %q", got, target) + } + } + inEmptyMutDir(t, func(env *mountEnv, rootDir string) { + // Save for second test: + link = filepath.Join(rootDir, "some-link") + suffix = strings.TrimPrefix(link, env.mountPoint) + + if err := os.Symlink(target, link); err != nil { + t.Fatalf("Symlink: %v", err) + } + t.Logf("Checking in first process...") + check() + }) + cammountTest(t, func(env *mountEnv) { + t.Logf("Checking in second process...") + link = env.mountPoint + suffix + check() + }) +} + +func TestFinderCopy(t *testing.T) { + if runtime.GOOS != "darwin" { + t.Skipf("Skipping Darwin-specific test.") + } + condSkip(t) + inEmptyMutDir(t, func(env *mountEnv, destDir string) { + f, err := ioutil.TempFile("", "finder-copy-file") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + want := []byte("Some data for Finder to copy.") + if _, err := f.Write(want); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + cmd := exec.Command("osascript") + script := fmt.Sprintf(` +tell application "Finder" + copy file POSIX file %q to folder POSIX file %q +end tell +`, f.Name(), destDir) + cmd.Stdin = strings.NewReader(script) + + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("Error running AppleScript: %v, %s", err, out) + } else { + t.Logf("AppleScript said: %q", out) + } + + destFile := filepath.Join(destDir, filepath.Base(f.Name())) + fi, err := os.Stat(destFile) + if err != nil { + t.Errorf("Stat = %v, %v", fi, err) + } + if fi.Size() != int64(len(want)) { + t.Errorf("Dest stat size = %d; want %d", fi.Size(), len(want)) + } + slurp, err := ioutil.ReadFile(destFile) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + if !bytes.Equal(slurp, want) { + t.Errorf("Dest file = %q; want %q", slurp, want) + } + }) +} + +func TestTextEdit(t *testing.T) { + if testing.Short() { + t.Skipf("Skipping in short mode") + } + if runtime.GOOS != "darwin" { + t.Skipf("Skipping Darwin-specific test.") + } + condSkip(t) + inEmptyMutDir(t, func(env *mountEnv, testDir string) { + var ( + testFile = filepath.Join(testDir, "some-text-file.txt") + content1 = []byte("Some text content.") + content2 = []byte("Some replacement content.") + ) + if err := ioutil.WriteFile(testFile, content1, 0644); err != nil { + t.Fatal(err) + } + + cmd := exec.Command("osascript") + script := fmt.Sprintf(` +tell application "TextEdit" + activate + open POSIX file %q + tell front document + set paragraph 1 to %q as text + save + close + end tell +end tell +`, testFile, content2) + cmd.Stdin = strings.NewReader(script) + + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("Error running AppleScript: %v, %s", err, out) + } else { + t.Logf("AppleScript said: %q", out) + } + + fi, err := os.Stat(testFile) + if err != nil { + t.Errorf("Stat = %v, %v", fi, err) + } else if fi.Size() != int64(len(content2)) { + t.Errorf("Stat size = %d; want %d", fi.Size(), len(content2)) + } + slurp, err := ioutil.ReadFile(testFile) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + if !bytes.Equal(slurp, content2) { + t.Errorf("File = %q; want %q", slurp, content2) + } + }) +} + +func not(cond func() bool) func() bool { + return func() bool { + return !cond() + } +} + +func dirToBeFUSE(dir string) func() bool { + return func() bool { + out, err := exec.Command("df", dir).CombinedOutput() + if err != nil { + return false + } + if runtime.GOOS == "darwin" { + if strings.Contains(string(out), "mount_osxfusefs@") { + return true + } + return false + } + if runtime.GOOS == "linux" { + return strings.Contains(string(out), "/dev/fuse") && + strings.Contains(string(out), dir) + } + return false + } +} + +// shortenString reduces any run of 5 or more identical bytes to "x{17}". +// "hello" => "hello" +// "fooooooooooooooooo" => "fo{17}" +func shortenString(v string) string { + var buf bytes.Buffer + var last byte + var run int + flush := func() { + switch { + case run == 0: + case run < 5: + for i := 0; i < run; i++ { + buf.WriteByte(last) + } + default: + buf.WriteByte(last) + fmt.Fprintf(&buf, "{%d}", run) + } + run = 0 + } + for i := 0; i < len(v); i++ { + b := v[i] + if b != last { + flush() + } + last = b + run++ + } + flush() + return buf.String() +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/fs/mut.go b/vendor/github.com/camlistore/camlistore/pkg/fs/mut.go new file mode 100644 index 00000000..97197f0b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fs/mut.go @@ -0,0 +1,898 @@ +// +build linux darwin + +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fs + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/readerutil" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/search" + "camlistore.org/pkg/syncutil" + + "camlistore.org/third_party/bazil.org/fuse" + "camlistore.org/third_party/bazil.org/fuse/fs" +) + +// How often to refresh directory nodes by reading from the blobstore. +const populateInterval = 30 * time.Second + +// How long an item that was created locally will be present +// regardless of its presence in the indexing server. +const deletionRefreshWindow = time.Minute + +type nodeType int + +const ( + fileType nodeType = iota + dirType + symlinkType +) + +// mutDir is a mutable directory. +// Its br is the permanode with camliPath:entname attributes. +type mutDir struct { + fs *CamliFileSystem + permanode blob.Ref + parent *mutDir // or nil, if the root within its roots.go root. + name string // ent name (base name within parent) + + localCreateTime time.Time // time this node was created locally (iff it was) + + mu sync.Mutex + lastPop time.Time + children map[string]mutFileOrDir + xattrs map[string][]byte + deleted bool +} + +func (m *mutDir) String() string { + return fmt.Sprintf("&mutDir{%p name=%q perm:%v}", m, m.fullPath(), m.permanode) +} + +// for debugging +func (n *mutDir) fullPath() string { + if n == nil { + return "" + } + return filepath.Join(n.parent.fullPath(), n.name) +} + +func (n *mutDir) Attr() fuse.Attr { + return fuse.Attr{ + Inode: n.permanode.Sum64(), + Mode: os.ModeDir | 0700, + Uid: uint32(os.Getuid()), + Gid: uint32(os.Getgid()), + } +} + +func (n *mutDir) Access(req *fuse.AccessRequest, intr fs.Intr) fuse.Error { + n.mu.Lock() + defer n.mu.Unlock() + if n.deleted { + return fuse.ENOENT + } + return nil +} + +func (n *mutFile) Access(req *fuse.AccessRequest, intr fs.Intr) fuse.Error { + n.mu.Lock() + defer n.mu.Unlock() + if n.deleted { + return fuse.ENOENT + } + return nil +} + +// populate hits the blobstore to populate map of child nodes. +func (n *mutDir) populate() error { + n.mu.Lock() + defer n.mu.Unlock() + + // Only re-populate if we haven't done so recently. + now := time.Now() + if n.lastPop.Add(populateInterval).After(now) { + return nil + } + n.lastPop = now + + res, err := n.fs.client.Describe(&search.DescribeRequest{ + BlobRef: n.permanode, + Depth: 3, + }) + if err != nil { + log.Println("mutDir.paths:", err) + return nil + } + db := res.Meta[n.permanode.String()] + if db == nil { + return errors.New("dir blobref not described") + } + + // Find all child permanodes and stick them in n.children + if n.children == nil { + n.children = make(map[string]mutFileOrDir) + } + currentChildren := map[string]bool{} + for k, v := range db.Permanode.Attr { + const p = "camliPath:" + if !strings.HasPrefix(k, p) || len(v) < 1 { + continue + } + name := k[len(p):] + childRef := v[0] + child := res.Meta[childRef] + if child == nil { + log.Printf("child not described: %v", childRef) + continue + } + if child.Permanode == nil { + log.Printf("invalid child, not a permanode: %v", childRef) + continue + } + if target := child.Permanode.Attr.Get("camliSymlinkTarget"); target != "" { + // This is a symlink. + n.maybeAddChild(name, child.Permanode, &mutFile{ + fs: n.fs, + permanode: blob.ParseOrZero(childRef), + parent: n, + name: name, + symLink: true, + target: target, + }) + } else if isDir(child.Permanode) { + // This is a directory. + n.maybeAddChild(name, child.Permanode, &mutDir{ + fs: n.fs, + permanode: blob.ParseOrZero(childRef), + parent: n, + name: name, + }) + } else if contentRef := child.Permanode.Attr.Get("camliContent"); contentRef != "" { + // This is a file. + content := res.Meta[contentRef] + if content == nil { + log.Printf("child content not described: %v", childRef) + continue + } + if content.CamliType != "file" { + log.Printf("child not a file: %v", childRef) + continue + } + if content.File == nil { + log.Printf("camlitype \"file\" child %v has no described File member", childRef) + continue + } + n.maybeAddChild(name, child.Permanode, &mutFile{ + fs: n.fs, + permanode: blob.ParseOrZero(childRef), + parent: n, + name: name, + content: blob.ParseOrZero(contentRef), + size: content.File.Size, + }) + } else { + // unhandled type... + continue + } + currentChildren[name] = true + } + // Remove unreferenced children + for name, oldchild := range n.children { + if _, ok := currentChildren[name]; !ok { + if oldchild.eligibleToDelete() { + delete(n.children, name) + } + } + } + return nil +} + +// maybeAddChild adds a child directory to this mutable directory +// unless it already has one with this name and permanode. +func (m *mutDir) maybeAddChild(name string, permanode *search.DescribedPermanode, + child mutFileOrDir) { + if current, ok := m.children[name]; !ok || + current.permanodeString() != child.permanodeString() { + + child.xattr().load(permanode) + m.children[name] = child + } +} + +func isDir(d *search.DescribedPermanode) bool { + // Explicit + if d.Attr.Get("camliNodeType") == "directory" { + return true + } + // Implied + for k := range d.Attr { + if strings.HasPrefix(k, "camliPath:") { + return true + } + } + return false +} + +func (n *mutDir) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) { + if err := n.populate(); err != nil { + log.Println("populate:", err) + return nil, fuse.EIO + } + n.mu.Lock() + defer n.mu.Unlock() + var ents []fuse.Dirent + for name, childNode := range n.children { + var ino uint64 + switch v := childNode.(type) { + case *mutDir: + ino = v.permanode.Sum64() + case *mutFile: + ino = v.permanode.Sum64() + default: + log.Printf("mutDir.ReadDir: unknown child type %T", childNode) + } + + // TODO: figure out what Dirent.Type means. + // fuse.go says "Type uint32 // ?" + dirent := fuse.Dirent{ + Name: name, + Inode: ino, + } + log.Printf("mutDir(%q) appending inode %x, %+v", n.fullPath(), dirent.Inode, dirent) + ents = append(ents, dirent) + } + return ents, nil +} + +func (n *mutDir) Lookup(name string, intr fs.Intr) (ret fs.Node, err fuse.Error) { + defer func() { + log.Printf("mutDir(%q).Lookup(%q) = %v, %v", n.fullPath(), name, ret, err) + }() + if err := n.populate(); err != nil { + log.Println("populate:", err) + return nil, fuse.EIO + } + n.mu.Lock() + defer n.mu.Unlock() + if n2 := n.children[name]; n2 != nil { + return n2, nil + } + return nil, fuse.ENOENT +} + +// Create of regular file. (not a dir) +// +// Flags are always 514: O_CREAT is 0x200 | O_RDWR is 0x2. +// From fuse_vnops.c: +// /* XXX: We /always/ creat() like this. Wish we were on Linux. */ +// foi->flags = O_CREAT | O_RDWR; +// +// 2013/07/21 05:26:35 <- &{Create [ID=0x3 Node=0x8 Uid=61652 Gid=5000 Pid=13115] "x" fl=514 mode=-rw-r--r-- fuse.Intr} +// 2013/07/21 05:26:36 -> 0x3 Create {LookupResponse:{Node:23 Generation:0 EntryValid:1m0s AttrValid:1m0s Attr:{Inode:15976986887557313215 Size:0 Blocks:0 Atime:2013-07-21 05:23:51.537251251 +1200 NZST Mtime:2013-07-21 05:23:51.537251251 +1200 NZST Ctime:2013-07-21 05:23:51.537251251 +1200 NZST Crtime:2013-07-21 05:23:51.537251251 +1200 NZST Mode:-rw------- Nlink:1 Uid:61652 Gid:5000 Rdev:0 Flags:0}} OpenResponse:{Handle:1 Flags:0}} +func (n *mutDir) Create(req *fuse.CreateRequest, res *fuse.CreateResponse, intr fs.Intr) (fs.Node, fs.Handle, fuse.Error) { + child, err := n.creat(req.Name, fileType) + if err != nil { + log.Printf("mutDir.Create(%q): %v", req.Name, err) + return nil, nil, fuse.EIO + } + + // Create and return a file handle. + h, ferr := child.(*mutFile).newHandle(nil) + if ferr != nil { + return nil, nil, ferr + } + + return child, h, nil +} + +func (n *mutDir) Mkdir(req *fuse.MkdirRequest, intr fs.Intr) (fs.Node, fuse.Error) { + child, err := n.creat(req.Name, dirType) + if err != nil { + log.Printf("mutDir.Mkdir(%q): %v", req.Name, err) + return nil, fuse.EIO + } + return child, nil +} + +// &fuse.SymlinkRequest{Header:fuse.Header{Conn:(*fuse.Conn)(0xc210047180), ID:0x4, Node:0x8, Uid:0xf0d4, Gid:0x1388, Pid:0x7e88}, NewName:"some-link", Target:"../../some-target"} +func (n *mutDir) Symlink(req *fuse.SymlinkRequest, intr fs.Intr) (fs.Node, fuse.Error) { + node, err := n.creat(req.NewName, symlinkType) + if err != nil { + log.Printf("mutDir.Symlink(%q): %v", req.NewName, err) + return nil, fuse.EIO + } + mf := node.(*mutFile) + mf.symLink = true + mf.target = req.Target + + claim := schema.NewSetAttributeClaim(mf.permanode, "camliSymlinkTarget", req.Target) + _, err = n.fs.client.UploadAndSignBlob(claim) + if err != nil { + log.Printf("mutDir.Symlink(%q) upload error: %v", req.NewName, err) + return nil, fuse.EIO + } + + return node, nil +} + +func (n *mutDir) creat(name string, typ nodeType) (fs.Node, error) { + // Create a Permanode for the file/directory. + pr, err := n.fs.client.UploadNewPermanode() + if err != nil { + return nil, err + } + + var grp syncutil.Group + grp.Go(func() (err error) { + // Add a camliPath:name attribute to the directory permanode. + claim := schema.NewSetAttributeClaim(n.permanode, "camliPath:"+name, pr.BlobRef.String()) + _, err = n.fs.client.UploadAndSignBlob(claim) + return + }) + + // Hide OS X Finder .DS_Store junk. This is distinct from + // extended attributes. + if name == ".DS_Store" { + grp.Go(func() (err error) { + claim := schema.NewSetAttributeClaim(pr.BlobRef, "camliDefVis", "hide") + _, err = n.fs.client.UploadAndSignBlob(claim) + return + }) + } + + if typ == dirType { + grp.Go(func() (err error) { + // Set a directory type on the permanode + claim := schema.NewSetAttributeClaim(pr.BlobRef, "camliNodeType", "directory") + _, err = n.fs.client.UploadAndSignBlob(claim) + return + }) + grp.Go(func() (err error) { + // Set the permanode title to the directory name + claim := schema.NewSetAttributeClaim(pr.BlobRef, "title", name) + _, err = n.fs.client.UploadAndSignBlob(claim) + return + }) + } + if err := grp.Err(); err != nil { + return nil, err + } + + // Add a child node to this node. + var child mutFileOrDir + switch typ { + case dirType: + child = &mutDir{ + fs: n.fs, + permanode: pr.BlobRef, + parent: n, + name: name, + xattrs: map[string][]byte{}, + localCreateTime: time.Now(), + } + case fileType, symlinkType: + child = &mutFile{ + fs: n.fs, + permanode: pr.BlobRef, + parent: n, + name: name, + xattrs: map[string][]byte{}, + localCreateTime: time.Now(), + } + default: + panic("bogus creat type") + } + n.mu.Lock() + if n.children == nil { + n.children = make(map[string]mutFileOrDir) + } + n.children[name] = child + n.mu.Unlock() + + log.Printf("Created %v in %p", child, n) + + return child, nil +} + +func (n *mutDir) Remove(req *fuse.RemoveRequest, intr fs.Intr) fuse.Error { + // Remove the camliPath:name attribute from the directory permanode. + claim := schema.NewDelAttributeClaim(n.permanode, "camliPath:"+req.Name, "") + _, err := n.fs.client.UploadAndSignBlob(claim) + if err != nil { + log.Println("mutDir.Remove:", err) + return fuse.EIO + } + // Remove child from map. + n.mu.Lock() + if n.children != nil { + if removed, ok := n.children[req.Name]; ok { + removed.invalidate() + delete(n.children, req.Name) + log.Printf("Removed %v from %p", removed, n) + } + } + n.mu.Unlock() + return nil +} + +// &RenameRequest{Header:fuse.Header{Conn:(*fuse.Conn)(0xc210048180), ID:0x2, Node:0x8, Uid:0xf0d4, Gid:0x1388, Pid:0x5edb}, NewDir:0x8, OldName:"1", NewName:"2"} +func (n *mutDir) Rename(req *fuse.RenameRequest, newDir fs.Node, intr fs.Intr) fuse.Error { + n2, ok := newDir.(*mutDir) + if !ok { + log.Printf("*mutDir newDir node isn't a *mutDir; is a %T; can't handle. returning EIO.", newDir) + return fuse.EIO + } + + var wg syncutil.Group + wg.Go(n.populate) + wg.Go(n2.populate) + if err := wg.Err(); err != nil { + log.Printf("*mutDir.Rename src dir populate = %v", err) + return fuse.EIO + } + + n.mu.Lock() + target, ok := n.children[req.OldName] + n.mu.Unlock() + if !ok { + log.Printf("*mutDir.Rename src name %q isn't known", req.OldName) + return fuse.ENOENT + } + + now := time.Now() + + // Add a camliPath:name attribute to the dest permanode before unlinking it from + // the source. + claim := schema.NewSetAttributeClaim(n2.permanode, "camliPath:"+req.NewName, target.permanodeString()) + claim.SetClaimDate(now) + _, err := n.fs.client.UploadAndSignBlob(claim) + if err != nil { + log.Printf("Upload rename link error: %v", err) + return fuse.EIO + } + + var grp syncutil.Group + // Unlink the dest permanode from the source. + grp.Go(func() (err error) { + delClaim := schema.NewDelAttributeClaim(n.permanode, "camliPath:"+req.OldName, "") + delClaim.SetClaimDate(now) + _, err = n.fs.client.UploadAndSignBlob(delClaim) + return + }) + // If target is a directory then update its title. + if dir, ok := target.(*mutDir); ok { + grp.Go(func() (err error) { + claim := schema.NewSetAttributeClaim(dir.permanode, "title", req.NewName) + _, err = n.fs.client.UploadAndSignBlob(claim) + return + }) + } + if err := grp.Err(); err != nil { + log.Printf("Upload rename unlink/title error: %v", err) + return fuse.EIO + } + + // TODO(bradfitz): this locking would be racy, if the kernel + // doesn't do it properly. (It should) Let's just trust the + // kernel for now. Later we can verify and remove this + // comment. + n.mu.Lock() + if n.children[req.OldName] != target { + panic("Race.") + } + delete(n.children, req.OldName) + n.mu.Unlock() + n2.mu.Lock() + n2.children[req.NewName] = target + n2.mu.Unlock() + + return nil +} + +// mutFile is a mutable file, or symlink. +type mutFile struct { + fs *CamliFileSystem + permanode blob.Ref + parent *mutDir + name string // ent name (base name within parent) + + localCreateTime time.Time // time this node was created locally (iff it was) + + mu sync.Mutex // protects all following fields + symLink bool // if true, is a symlink + target string // if a symlink + content blob.Ref // if a regular file + size int64 + mtime, atime time.Time // if zero, use serverStart + xattrs map[string][]byte + deleted bool +} + +func (m *mutFile) String() string { + return fmt.Sprintf("&mutFile{%p name=%q perm:%v}", m, m.fullPath(), m.permanode) +} + +// for debugging +func (n *mutFile) fullPath() string { + if n == nil { + return "" + } + return filepath.Join(n.parent.fullPath(), n.name) +} + +func (n *mutFile) xattr() *xattr { + return &xattr{"mutFile", n.fs, n.permanode, &n.mu, &n.xattrs} +} + +func (n *mutDir) xattr() *xattr { + return &xattr{"mutDir", n.fs, n.permanode, &n.mu, &n.xattrs} +} + +func (n *mutDir) Removexattr(req *fuse.RemovexattrRequest, intr fs.Intr) fuse.Error { + return n.xattr().remove(req) +} + +func (n *mutDir) Setxattr(req *fuse.SetxattrRequest, intr fs.Intr) fuse.Error { + return n.xattr().set(req) +} + +func (n *mutDir) Getxattr(req *fuse.GetxattrRequest, res *fuse.GetxattrResponse, intr fs.Intr) fuse.Error { + return n.xattr().get(req, res) +} + +func (n *mutDir) Listxattr(req *fuse.ListxattrRequest, res *fuse.ListxattrResponse, intr fs.Intr) fuse.Error { + return n.xattr().list(req, res) +} + +func (n *mutFile) Getxattr(req *fuse.GetxattrRequest, res *fuse.GetxattrResponse, intr fs.Intr) fuse.Error { + return n.xattr().get(req, res) +} + +func (n *mutFile) Listxattr(req *fuse.ListxattrRequest, res *fuse.ListxattrResponse, intr fs.Intr) fuse.Error { + return n.xattr().list(req, res) +} + +func (n *mutFile) Removexattr(req *fuse.RemovexattrRequest, intr fs.Intr) fuse.Error { + return n.xattr().remove(req) +} + +func (n *mutFile) Setxattr(req *fuse.SetxattrRequest, intr fs.Intr) fuse.Error { + return n.xattr().set(req) +} + +func (n *mutFile) Attr() fuse.Attr { + // TODO: don't grab n.mu three+ times in here. + var mode os.FileMode = 0600 // writable + + n.mu.Lock() + size := n.size + var blocks uint64 + if size > 0 { + blocks = uint64(size)/512 + 1 + } + inode := n.permanode.Sum64() + if n.symLink { + mode |= os.ModeSymlink + } + n.mu.Unlock() + + return fuse.Attr{ + Inode: inode, + Mode: mode, + Uid: uint32(os.Getuid()), + Gid: uint32(os.Getgid()), + Size: uint64(size), + Blocks: blocks, + Mtime: n.modTime(), + Atime: n.accessTime(), + Ctime: serverStart, + Crtime: serverStart, + } +} + +func (n *mutFile) accessTime() time.Time { + n.mu.Lock() + if !n.atime.IsZero() { + defer n.mu.Unlock() + return n.atime + } + n.mu.Unlock() + return n.modTime() +} + +func (n *mutFile) modTime() time.Time { + n.mu.Lock() + defer n.mu.Unlock() + if !n.mtime.IsZero() { + return n.mtime + } + return serverStart +} + +func (n *mutFile) setContent(br blob.Ref, size int64) error { + n.mu.Lock() + defer n.mu.Unlock() + n.content = br + n.size = size + claim := schema.NewSetAttributeClaim(n.permanode, "camliContent", br.String()) + _, err := n.fs.client.UploadAndSignBlob(claim) + return err +} + +func (n *mutFile) setSizeAtLeast(size int64) { + n.mu.Lock() + defer n.mu.Unlock() + log.Printf("mutFile.setSizeAtLeast(%d). old size = %d", size, n.size) + if size > n.size { + n.size = size + } +} + +// Empirically: +// open for read: req.Flags == 0 +// open for append: req.Flags == 1 +// open for write: req.Flags == 1 +// open for read/write (+<) == 2 (bitmask? of?) +// +// open flags are O_WRONLY (1), O_RDONLY (0), or O_RDWR (2). and also +// bitmaks of O_SYMLINK (0x200000) maybe. (from +// fuse_filehandle_xlate_to_oflags in macosx/kext/fuse_file.h) +func (n *mutFile) Open(req *fuse.OpenRequest, res *fuse.OpenResponse, intr fs.Intr) (fs.Handle, fuse.Error) { + mutFileOpen.Incr() + + log.Printf("mutFile.Open: %v: content: %v dir=%v flags=%v", n.permanode, n.content, req.Dir, req.Flags) + r, err := schema.NewFileReader(n.fs.fetcher, n.content) + if err != nil { + mutFileOpenError.Incr() + log.Printf("mutFile.Open: %v", err) + return nil, fuse.EIO + } + + // Read-only. + if !isWriteFlags(req.Flags) { + mutFileOpenRO.Incr() + log.Printf("mutFile.Open returning read-only file") + n := &node{ + fs: n.fs, + blobref: n.content, + } + return &nodeReader{n: n, fr: r}, nil + } + + mutFileOpenRW.Incr() + log.Printf("mutFile.Open returning read-write filehandle") + + defer r.Close() + return n.newHandle(r) +} + +func (n *mutFile) Fsync(r *fuse.FsyncRequest, intr fs.Intr) fuse.Error { + // TODO(adg): in the fuse package, plumb through fsync to mutFileHandle + // in the same way we did Truncate. + log.Printf("mutFile.Fsync: TODO") + return nil +} + +func (n *mutFile) Readlink(req *fuse.ReadlinkRequest, intr fs.Intr) (string, fuse.Error) { + n.mu.Lock() + defer n.mu.Unlock() + if !n.symLink { + log.Printf("mutFile.Readlink on node that's not a symlink?") + return "", fuse.EIO + } + return n.target, nil +} + +func (n *mutFile) Setattr(req *fuse.SetattrRequest, res *fuse.SetattrResponse, intr fs.Intr) fuse.Error { + log.Printf("mutFile.Setattr on %q: %#v", n.fullPath(), req) + // 2013/07/17 19:43:41 mutFile.Setattr on "foo": &fuse.SetattrRequest{Header:fuse.Header{Conn:(*fuse.Conn)(0xc210047180), ID:0x3, Node:0x3d, Uid:0xf0d4, Gid:0x1388, Pid:0x75e8}, Valid:0x30, Handle:0x0, Size:0x0, Atime:time.Time{sec:63509651021, nsec:0x4aec6b8, loc:(*time.Location)(0x47f7600)}, Mtime:time.Time{sec:63509651021, nsec:0x4aec6b8, loc:(*time.Location)(0x47f7600)}, Mode:0x4000000, Uid:0x0, Gid:0x0, Bkuptime:time.Time{sec:62135596800, nsec:0x0, loc:(*time.Location)(0x47f7600)}, Chgtime:time.Time{sec:62135596800, nsec:0x0, loc:(*time.Location)(0x47f7600)}, Crtime:time.Time{sec:0, nsec:0x0, loc:(*time.Location)(nil)}, Flags:0x0} + + n.mu.Lock() + if req.Valid&fuse.SetattrMtime != 0 { + n.mtime = req.Mtime + } + if req.Valid&fuse.SetattrAtime != 0 { + n.atime = req.Atime + } + if req.Valid&fuse.SetattrSize != 0 { + // TODO(bradfitz): truncate? + n.size = int64(req.Size) + } + n.mu.Unlock() + + res.AttrValid = 1 * time.Minute + res.Attr = n.Attr() + return nil +} + +func (n *mutFile) newHandle(body io.Reader) (fs.Handle, fuse.Error) { + tmp, err := ioutil.TempFile("", "camli-") + if err == nil && body != nil { + _, err = io.Copy(tmp, body) + } + if err != nil { + log.Printf("mutFile.newHandle: %v", err) + if tmp != nil { + tmp.Close() + os.Remove(tmp.Name()) + } + return nil, fuse.EIO + } + return &mutFileHandle{f: n, tmp: tmp}, nil +} + +// mutFileHandle represents an open mutable file. +// It stores the file contents in a temporary file, and +// delegates reads and writes directly to the temporary file. +// When the handle is released, it writes the contents of the +// temporary file to the blobstore, and instructs the parent +// mutFile to update the file permanode. +type mutFileHandle struct { + f *mutFile + tmp *os.File +} + +func (h *mutFileHandle) Read(req *fuse.ReadRequest, res *fuse.ReadResponse, intr fs.Intr) fuse.Error { + if h.tmp == nil { + log.Printf("Read called on camli mutFileHandle without a tempfile set") + return fuse.EIO + } + + buf := make([]byte, req.Size) + n, err := h.tmp.ReadAt(buf, req.Offset) + if err == io.EOF { + err = nil + } + if err != nil { + log.Printf("mutFileHandle.Read: %v", err) + return fuse.EIO + } + res.Data = buf[:n] + return nil +} + +func (h *mutFileHandle) Write(req *fuse.WriteRequest, res *fuse.WriteResponse, intr fs.Intr) fuse.Error { + if h.tmp == nil { + log.Printf("Write called on camli mutFileHandle without a tempfile set") + return fuse.EIO + } + + n, err := h.tmp.WriteAt(req.Data, req.Offset) + log.Printf("mutFileHandle.Write(%q, %d bytes at %d, flags %v) = %d, %v", + h.f.fullPath(), len(req.Data), req.Offset, req.Flags, n, err) + if err != nil { + log.Println("mutFileHandle.Write:", err) + return fuse.EIO + } + res.Size = n + h.f.setSizeAtLeast(req.Offset + int64(n)) + return nil +} + +// Flush is called to let the file system clean up any data buffers +// and to pass any errors in the process of closing a file to the user +// application. +// +// Flush *may* be called more than once in the case where a file is +// opened more than once, but it's not possible to detect from the +// call itself whether this is a final flush. +// +// This is generally the last opportunity to finalize data and the +// return value sets the return value of the Close that led to the +// calling of Flush. +// +// Note that this is distinct from Fsync -- which is a user-requested +// flush (fsync, etc...) +func (h *mutFileHandle) Flush(*fuse.FlushRequest, fs.Intr) fuse.Error { + if h.tmp == nil { + log.Printf("Flush called on camli mutFileHandle without a tempfile set") + return fuse.EIO + } + _, err := h.tmp.Seek(0, 0) + if err != nil { + log.Println("mutFileHandle.Flush:", err) + return fuse.EIO + } + var n int64 + br, err := schema.WriteFileFromReader(h.f.fs.client, h.f.name, readerutil.CountingReader{Reader: h.tmp, N: &n}) + if err != nil { + log.Println("mutFileHandle.Flush:", err) + return fuse.EIO + } + err = h.f.setContent(br, n) + if err != nil { + log.Printf("mutFileHandle.Flush: %v", err) + return fuse.EIO + } + + return nil +} + +// Release is called when a file handle is no longer needed. This is +// called asynchronously after the last handle to a file is closed. +func (h *mutFileHandle) Release(req *fuse.ReleaseRequest, intr fs.Intr) fuse.Error { + h.tmp.Close() + os.Remove(h.tmp.Name()) + h.tmp = nil + + return nil +} + +func (h *mutFileHandle) Truncate(size uint64, intr fs.Intr) fuse.Error { + if h.tmp == nil { + log.Printf("Truncate called on camli mutFileHandle without a tempfile set") + return fuse.EIO + } + + log.Printf("mutFileHandle.Truncate(%q) to size %d", h.f.fullPath(), size) + if err := h.tmp.Truncate(int64(size)); err != nil { + log.Println("mutFileHandle.Truncate:", err) + return fuse.EIO + } + return nil +} + +// mutFileOrDir is a *mutFile or *mutDir +type mutFileOrDir interface { + fs.Node + invalidate() + permanodeString() string + xattr() *xattr + eligibleToDelete() bool +} + +func (n *mutFile) permanodeString() string { + return n.permanode.String() +} + +func (n *mutDir) permanodeString() string { + return n.permanode.String() +} + +func (n *mutFile) invalidate() { + n.mu.Lock() + n.deleted = true + n.mu.Unlock() +} + +func (n *mutDir) invalidate() { + n.mu.Lock() + n.deleted = true + n.mu.Unlock() +} + +func (n *mutFile) eligibleToDelete() bool { + return n.localCreateTime.Before(time.Now().Add(-deletionRefreshWindow)) +} + +func (n *mutDir) eligibleToDelete() bool { + return n.localCreateTime.Before(time.Now().Add(-deletionRefreshWindow)) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/fs/mut_test.go b/vendor/github.com/camlistore/camlistore/pkg/fs/mut_test.go new file mode 100644 index 00000000..b20b5370 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fs/mut_test.go @@ -0,0 +1,49 @@ +// +build linux darwin + +/* +Copyright 2014 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fs + +import ( + "testing" + "time" +) + +func TestDeleteEligibility(t *testing.T) { + tests := []struct { + name string + ts time.Time + exp bool + }{ + {"zero", time.Time{}, true}, + {"now", time.Now(), false}, + {"future", time.Now().Add(time.Hour), false}, + {"recent", time.Now().Add(-(deletionRefreshWindow / 2)), false}, + {"past", time.Now().Add(-(deletionRefreshWindow * 2)), true}, + } + + for _, test := range tests { + d := &mutDir{localCreateTime: test.ts} + if d.eligibleToDelete() != test.exp { + t.Errorf("Expected %v %T/%v", test.exp, d, test.name) + } + f := &mutFile{localCreateTime: test.ts} + if f.eligibleToDelete() != test.exp { + t.Errorf("Expected %v for %T/%v", test.exp, f, test.name) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/fs/recent.go b/vendor/github.com/camlistore/camlistore/pkg/fs/recent.go new file mode 100644 index 00000000..ba9149d7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fs/recent.go @@ -0,0 +1,156 @@ +// +build linux darwin + +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fs + +import ( + "log" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/search" + + "camlistore.org/third_party/bazil.org/fuse" + "camlistore.org/third_party/bazil.org/fuse/fs" +) + +// recentDir implements fuse.Node and is a directory of recent +// permanodes' files, for permanodes with a camliContent pointing to a +// "file". +type recentDir struct { + noXattr + fs *CamliFileSystem + + mu sync.Mutex + ents map[string]*search.DescribedBlob // filename to blob meta + modTime map[string]time.Time // filename to permanode modtime + lastReaddir time.Time + lastNames []string +} + +func (n *recentDir) Attr() fuse.Attr { + return fuse.Attr{ + Mode: os.ModeDir | 0500, + Uid: uint32(os.Getuid()), + Gid: uint32(os.Getgid()), + } +} + +const recentSearchInterval = 10 * time.Second + +func (n *recentDir) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) { + var ents []fuse.Dirent + + n.mu.Lock() + defer n.mu.Unlock() + if n.lastReaddir.After(time.Now().Add(-recentSearchInterval)) { + log.Printf("fs.recent: ReadDir from cache") + for _, name := range n.lastNames { + ents = append(ents, fuse.Dirent{Name: name}) + } + return ents, nil + } + + log.Printf("fs.recent: ReadDir, doing search") + + n.ents = make(map[string]*search.DescribedBlob) + n.modTime = make(map[string]time.Time) + + req := &search.RecentRequest{N: 100} + res, err := n.fs.client.GetRecentPermanodes(req) + if err != nil { + log.Printf("fs.recent: GetRecentPermanodes error in ReadDir: %v", err) + return nil, fuse.EIO + } + + n.lastNames = nil + for _, ri := range res.Recent { + modTime := ri.ModTime.Time() + meta := res.Meta.Get(ri.BlobRef) + if meta == nil || meta.Permanode == nil { + continue + } + cc, ok := blob.Parse(meta.Permanode.Attr.Get("camliContent")) + if !ok { + continue + } + ccMeta := res.Meta.Get(cc) + if ccMeta == nil { + continue + } + var name string + switch { + case ccMeta.File != nil: + name = ccMeta.File.FileName + if mt := ccMeta.File.Time; !mt.IsZero() { + modTime = mt.Time() + } + case ccMeta.Dir != nil: + name = ccMeta.Dir.FileName + default: + continue + } + if name == "" || n.ents[name] != nil { + ext := filepath.Ext(name) + if ext == "" && ccMeta.File != nil && strings.HasSuffix(ccMeta.File.MIMEType, "image/jpeg") { + ext = ".jpg" + } + name = strings.TrimPrefix(ccMeta.BlobRef.String(), "sha1-")[:10] + ext + if n.ents[name] != nil { + continue + } + } + n.ents[name] = ccMeta + n.modTime[name] = modTime + log.Printf("fs.recent: name %q = %v (at %v -> %v)", name, ccMeta.BlobRef, ri.ModTime.Time(), modTime) + n.lastNames = append(n.lastNames, name) + ents = append(ents, fuse.Dirent{ + Name: name, + }) + } + log.Printf("fs.recent returning %d entries", len(ents)) + n.lastReaddir = time.Now() + return ents, nil +} + +func (n *recentDir) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + n.mu.Lock() + defer n.mu.Unlock() + if n.ents == nil { + // Odd case: a Lookup before a Readdir. Force a readdir to + // seed our map. Mostly hit just during development. + n.mu.Unlock() // release, since ReadDir will acquire + n.ReadDir(intr) + n.mu.Lock() + } + db := n.ents[name] + log.Printf("fs.recent: Lookup(%q) = %v", name, db) + if db == nil { + return nil, fuse.ENOENT + } + nod := &node{ + fs: n.fs, + blobref: db.BlobRef, + pnodeModTime: n.modTime[name], + } + return nod, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/fs/ro.go b/vendor/github.com/camlistore/camlistore/pkg/fs/ro.go new file mode 100644 index 00000000..4fcdb28e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fs/ro.go @@ -0,0 +1,383 @@ +// +build linux darwin + +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fs + +import ( + "errors" + "log" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/search" + "camlistore.org/pkg/types" + + "camlistore.org/third_party/bazil.org/fuse" + "camlistore.org/third_party/bazil.org/fuse/fs" +) + +// roDir is a read-only directory. +// Its permanode is the permanode with camliPath:entname attributes. +type roDir struct { + fs *CamliFileSystem + permanode blob.Ref + parent *roDir // or nil, if the root within its roots.go root. + name string // ent name (base name within parent) + at time.Time + + mu sync.Mutex + children map[string]roFileOrDir + xattrs map[string][]byte +} + +func newRODir(fs *CamliFileSystem, permanode blob.Ref, name string, at time.Time) *roDir { + return &roDir{ + fs: fs, + permanode: permanode, + name: name, + at: at, + } +} + +// for debugging +func (n *roDir) fullPath() string { + if n == nil { + return "" + } + return filepath.Join(n.parent.fullPath(), n.name) +} + +func (n *roDir) Attr() fuse.Attr { + return fuse.Attr{ + Inode: n.permanode.Sum64(), + Mode: os.ModeDir | 0500, + Uid: uint32(os.Getuid()), + Gid: uint32(os.Getgid()), + } +} + +// populate hits the blobstore to populate map of child nodes. +func (n *roDir) populate() error { + n.mu.Lock() + defer n.mu.Unlock() + + // Things never change here, so if we've ever populated, we're + // populated. + if n.children != nil { + return nil + } + + log.Printf("roDir.populate(%q) - Sending request At %v", n.fullPath(), n.at) + + res, err := n.fs.client.Describe(&search.DescribeRequest{ + BlobRef: n.permanode, + Depth: 3, + At: types.Time3339(n.at), + }) + if err != nil { + log.Println("roDir.paths:", err) + return nil + } + db := res.Meta[n.permanode.String()] + if db == nil { + return errors.New("dir blobref not described") + } + + // Find all child permanodes and stick them in n.children + n.children = make(map[string]roFileOrDir) + for k, v := range db.Permanode.Attr { + const p = "camliPath:" + if !strings.HasPrefix(k, p) || len(v) < 1 { + continue + } + name := k[len(p):] + childRef := v[0] + child := res.Meta[childRef] + if child == nil { + log.Printf("child not described: %v", childRef) + continue + } + if target := child.Permanode.Attr.Get("camliSymlinkTarget"); target != "" { + // This is a symlink. + n.children[name] = &roFile{ + fs: n.fs, + permanode: blob.ParseOrZero(childRef), + parent: n, + name: name, + symLink: true, + target: target, + } + } else if isDir(child.Permanode) { + // This is a directory. + n.children[name] = &roDir{ + fs: n.fs, + permanode: blob.ParseOrZero(childRef), + parent: n, + name: name, + } + } else if contentRef := child.Permanode.Attr.Get("camliContent"); contentRef != "" { + // This is a file. + content := res.Meta[contentRef] + if content == nil { + log.Printf("child content not described: %v", childRef) + continue + } + if content.CamliType != "file" { + log.Printf("child not a file: %v", childRef) + continue + } + n.children[name] = &roFile{ + fs: n.fs, + permanode: blob.ParseOrZero(childRef), + parent: n, + name: name, + content: blob.ParseOrZero(contentRef), + size: content.File.Size, + } + } else { + // unknown type + continue + } + n.children[name].xattr().load(child.Permanode) + } + return nil +} + +func (n *roDir) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) { + if err := n.populate(); err != nil { + log.Println("populate:", err) + return nil, fuse.EIO + } + n.mu.Lock() + defer n.mu.Unlock() + var ents []fuse.Dirent + for name, childNode := range n.children { + var ino uint64 + switch v := childNode.(type) { + case *roDir: + ino = v.permanode.Sum64() + case *roFile: + ino = v.permanode.Sum64() + default: + log.Printf("roDir.ReadDir: unknown child type %T", childNode) + } + + // TODO: figure out what Dirent.Type means. + // fuse.go says "Type uint32 // ?" + dirent := fuse.Dirent{ + Name: name, + Inode: ino, + } + log.Printf("roDir(%q) appending inode %x, %+v", n.fullPath(), dirent.Inode, dirent) + ents = append(ents, dirent) + } + return ents, nil +} + +func (n *roDir) Lookup(name string, intr fs.Intr) (ret fs.Node, err fuse.Error) { + defer func() { + log.Printf("roDir(%q).Lookup(%q) = %#v, %v", n.fullPath(), name, ret, err) + }() + if err := n.populate(); err != nil { + log.Println("populate:", err) + return nil, fuse.EIO + } + n.mu.Lock() + defer n.mu.Unlock() + if n2 := n.children[name]; n2 != nil { + return n2, nil + } + return nil, fuse.ENOENT +} + +// roFile is a read-only file, or symlink. +type roFile struct { + fs *CamliFileSystem + permanode blob.Ref + parent *roDir + name string // ent name (base name within parent) + + mu sync.Mutex // protects all following fields + symLink bool // if true, is a symlink + target string // if a symlink + content blob.Ref // if a regular file + size int64 + mtime, atime time.Time // if zero, use serverStart + xattrs map[string][]byte +} + +func (n *roDir) Getxattr(req *fuse.GetxattrRequest, res *fuse.GetxattrResponse, intr fs.Intr) fuse.Error { + return n.xattr().get(req, res) +} + +func (n *roDir) Listxattr(req *fuse.ListxattrRequest, res *fuse.ListxattrResponse, intr fs.Intr) fuse.Error { + return n.xattr().list(req, res) +} + +func (n *roFile) Getxattr(req *fuse.GetxattrRequest, res *fuse.GetxattrResponse, intr fs.Intr) fuse.Error { + return n.xattr().get(req, res) +} + +func (n *roFile) Listxattr(req *fuse.ListxattrRequest, res *fuse.ListxattrResponse, intr fs.Intr) fuse.Error { + return n.xattr().list(req, res) +} + +func (n *roFile) Removexattr(req *fuse.RemovexattrRequest, intr fs.Intr) fuse.Error { + return fuse.EPERM +} + +func (n *roFile) Setxattr(req *fuse.SetxattrRequest, intr fs.Intr) fuse.Error { + return fuse.EPERM +} + +// for debugging +func (n *roFile) fullPath() string { + if n == nil { + return "" + } + return filepath.Join(n.parent.fullPath(), n.name) +} + +func (n *roFile) Attr() fuse.Attr { + // TODO: don't grab n.mu three+ times in here. + var mode os.FileMode = 0400 // read-only + + n.mu.Lock() + size := n.size + var blocks uint64 + if size > 0 { + blocks = uint64(size)/512 + 1 + } + inode := n.permanode.Sum64() + if n.symLink { + mode |= os.ModeSymlink + } + n.mu.Unlock() + + return fuse.Attr{ + Inode: inode, + Mode: mode, + Uid: uint32(os.Getuid()), + Gid: uint32(os.Getgid()), + Size: uint64(size), + Blocks: blocks, + Mtime: n.modTime(), + Atime: n.accessTime(), + Ctime: serverStart, + Crtime: serverStart, + } +} + +func (n *roFile) accessTime() time.Time { + n.mu.Lock() + if !n.atime.IsZero() { + defer n.mu.Unlock() + return n.atime + } + n.mu.Unlock() + return n.modTime() +} + +func (n *roFile) modTime() time.Time { + n.mu.Lock() + defer n.mu.Unlock() + if !n.mtime.IsZero() { + return n.mtime + } + return serverStart +} + +// Empirically: +// open for read: req.Flags == 0 +// open for append: req.Flags == 1 +// open for write: req.Flags == 1 +// open for read/write (+<) == 2 (bitmask? of?) +// +// open flags are O_WRONLY (1), O_RDONLY (0), or O_RDWR (2). and also +// bitmaks of O_SYMLINK (0x200000) maybe. (from +// fuse_filehandle_xlate_to_oflags in macosx/kext/fuse_file.h) +func (n *roFile) Open(req *fuse.OpenRequest, res *fuse.OpenResponse, intr fs.Intr) (fs.Handle, fuse.Error) { + roFileOpen.Incr() + + if isWriteFlags(req.Flags) { + return nil, fuse.EPERM + } + + log.Printf("roFile.Open: %v: content: %v dir=%v flags=%v", n.permanode, n.content, req.Dir, req.Flags) + r, err := schema.NewFileReader(n.fs.fetcher, n.content) + if err != nil { + roFileOpenError.Incr() + log.Printf("roFile.Open: %v", err) + return nil, fuse.EIO + } + + // Turn off the OpenDirectIO bit (on by default in rsc fuse server.go), + // else append operations don't work for some reason. + res.Flags &= ^fuse.OpenDirectIO + + // Read-only. + nod := &node{ + fs: n.fs, + blobref: n.content, + } + return &nodeReader{n: nod, fr: r}, nil +} + +func (n *roFile) Fsync(r *fuse.FsyncRequest, intr fs.Intr) fuse.Error { + // noop + return nil +} + +func (n *roFile) Readlink(req *fuse.ReadlinkRequest, intr fs.Intr) (string, fuse.Error) { + log.Printf("roFile.Readlink(%q)", n.fullPath()) + n.mu.Lock() + defer n.mu.Unlock() + if !n.symLink { + log.Printf("roFile.Readlink on node that's not a symlink?") + return "", fuse.EIO + } + return n.target, nil +} + +// roFileOrDir is a *roFile or *roDir +type roFileOrDir interface { + fs.Node + permanodeString() string + xattr() *xattr +} + +func (n *roFile) permanodeString() string { + return n.permanode.String() +} + +func (n *roDir) permanodeString() string { + return n.permanode.String() +} + +func (n *roFile) xattr() *xattr { + return &xattr{"roFile", n.fs, n.permanode, &n.mu, &n.xattrs} +} + +func (n *roDir) xattr() *xattr { + return &xattr{"roDir", n.fs, n.permanode, &n.mu, &n.xattrs} +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/fs/root.go b/vendor/github.com/camlistore/camlistore/pkg/fs/root.go new file mode 100644 index 00000000..42b891fb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fs/root.go @@ -0,0 +1,121 @@ +// +build linux darwin + +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fs + +import ( + "log" + "os" + "sync" + + "camlistore.org/pkg/blob" + + "camlistore.org/third_party/bazil.org/fuse" + "camlistore.org/third_party/bazil.org/fuse/fs" +) + +// root implements fuse.Node and is the typical root of a +// CamliFilesystem with a little hello message and the ability to +// search and browse static snapshots, etc. +type root struct { + noXattr + fs *CamliFileSystem + + mu sync.Mutex // guards recent + recent *recentDir + roots *rootsDir + atDir *atDir +} + +func (n *root) Attr() fuse.Attr { + return fuse.Attr{ + Mode: os.ModeDir | 0700, + Uid: uint32(os.Getuid()), + Gid: uint32(os.Getgid()), + } +} + +func (n *root) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) { + return []fuse.Dirent{ + {Name: "WELCOME.txt"}, + {Name: "tag"}, + {Name: "date"}, + {Name: "recent"}, + {Name: "roots"}, + {Name: "at"}, + {Name: "sha1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}, + }, nil +} + +func (n *root) getRecentDir() *recentDir { + n.mu.Lock() + defer n.mu.Unlock() + if n.recent == nil { + n.recent = &recentDir{fs: n.fs} + } + return n.recent +} + +func (n *root) getRootsDir() *rootsDir { + n.mu.Lock() + defer n.mu.Unlock() + if n.roots == nil { + n.roots = &rootsDir{fs: n.fs} + } + return n.roots +} + +func (n *root) getAtDir() *atDir { + n.mu.Lock() + defer n.mu.Unlock() + if n.atDir == nil { + n.atDir = &atDir{fs: n.fs} + } + return n.atDir +} + +func (n *root) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + switch name { + case ".quitquitquit": + log.Fatalf("Shutting down due to root .quitquitquit lookup.") + case "WELCOME.txt": + return staticFileNode("Welcome to CamlistoreFS.\n\nFor now you can only cd into a sha1-xxxx directory, if you know the blobref of a directory or a file.\n"), nil + case "recent": + return n.getRecentDir(), nil + case "tag", "date": + return notImplementDirNode{}, nil + case "at": + return n.getAtDir(), nil + case "roots": + return n.getRootsDir(), nil + case "sha1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx": + return notImplementDirNode{}, nil + case ".camli_fs_stats": + return statsDir{}, nil + case "mach_kernel", ".hidden", "._.": + // Just quiet some log noise on OS X. + return nil, fuse.ENOENT + } + + if br, ok := blob.Parse(name); ok { + log.Printf("Root lookup of blobref. %q => %v", name, br) + return &node{fs: n.fs, blobref: br}, nil + } + log.Printf("Bogus root lookup of %q", name) + return nil, fuse.ENOENT +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/fs/roots.go b/vendor/github.com/camlistore/camlistore/pkg/fs/roots.go new file mode 100644 index 00000000..e5c428c7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fs/roots.go @@ -0,0 +1,337 @@ +// +build linux darwin + +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fs + +import ( + "log" + "os" + "strings" + "sync" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/search" + "camlistore.org/pkg/syncutil" + "camlistore.org/third_party/bazil.org/fuse" + "camlistore.org/third_party/bazil.org/fuse/fs" +) + +const refreshTime = 1 * time.Minute + +type rootsDir struct { + noXattr + fs *CamliFileSystem + at time.Time + + mu sync.Mutex // guards following + lastQuery time.Time + m map[string]blob.Ref // ent name => permanode + children map[string]fs.Node // ent name => child node +} + +func (n *rootsDir) isRO() bool { + return !n.at.IsZero() +} + +func (n *rootsDir) dirMode() os.FileMode { + if n.isRO() { + return 0500 + } + return 0700 +} + +func (n *rootsDir) Attr() fuse.Attr { + return fuse.Attr{ + Mode: os.ModeDir | n.dirMode(), + Uid: uint32(os.Getuid()), + Gid: uint32(os.Getgid()), + } +} + +func (n *rootsDir) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) { + n.mu.Lock() + defer n.mu.Unlock() + if err := n.condRefresh(); err != nil { + return nil, fuse.EIO + } + var ents []fuse.Dirent + for name := range n.m { + ents = append(ents, fuse.Dirent{Name: name}) + } + log.Printf("rootsDir.ReadDir() -> %v", ents) + return ents, nil +} + +func (n *rootsDir) Remove(req *fuse.RemoveRequest, intr fs.Intr) fuse.Error { + if n.isRO() { + return fuse.EPERM + } + n.mu.Lock() + defer n.mu.Unlock() + + if err := n.condRefresh(); err != nil { + return err + } + br := n.m[req.Name] + if !br.Valid() { + return fuse.ENOENT + } + + claim := schema.NewDelAttributeClaim(br, "camliRoot", "") + _, err := n.fs.client.UploadAndSignBlob(claim) + if err != nil { + log.Println("rootsDir.Remove:", err) + return fuse.EIO + } + + delete(n.m, req.Name) + delete(n.children, req.Name) + + return nil +} + +func (n *rootsDir) Rename(req *fuse.RenameRequest, newDir fs.Node, intr fs.Intr) fuse.Error { + log.Printf("rootsDir.Rename %q -> %q", req.OldName, req.NewName) + if n.isRO() { + return fuse.EPERM + } + + n.mu.Lock() + target, exists := n.m[req.OldName] + _, collision := n.m[req.NewName] + n.mu.Unlock() + if !exists { + log.Printf("*rootsDir.Rename src name %q isn't known", req.OldName) + return fuse.ENOENT + } + if collision { + log.Printf("*rootsDir.Rename dest %q already exists", req.NewName) + return fuse.EIO + } + + // Don't allow renames if the root contains content. Rename + // is mostly implemented to make GUIs that create directories + // before asking for the directory name. + res, err := n.fs.client.Describe(&search.DescribeRequest{BlobRef: target}) + if err != nil { + log.Println("rootsDir.Rename:", err) + return fuse.EIO + } + db := res.Meta[target.String()] + if db == nil { + log.Printf("Failed to pull meta for target: %v", target) + return fuse.EIO + } + + for k := range db.Permanode.Attr { + const p = "camliPath:" + if strings.HasPrefix(k, p) { + log.Printf("Found file in %q: %q, disallowing rename", req.OldName, k[len(p):]) + return fuse.EIO + } + } + + claim := schema.NewSetAttributeClaim(target, "camliRoot", req.NewName) + _, err = n.fs.client.UploadAndSignBlob(claim) + if err != nil { + log.Printf("Upload rename link error: %v", err) + return fuse.EIO + } + + // Comment transplanted from mutDir.Rename + // TODO(bradfitz): this locking would be racy, if the kernel + // doesn't do it properly. (It should) Let's just trust the + // kernel for now. Later we can verify and remove this + // comment. + n.mu.Lock() + if n.m[req.OldName] != target { + panic("Race.") + } + delete(n.m, req.OldName) + delete(n.children, req.OldName) + delete(n.children, req.NewName) + n.m[req.NewName] = target + n.mu.Unlock() + + return nil +} + +func (n *rootsDir) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + log.Printf("fs.roots: Lookup(%q)", name) + n.mu.Lock() + defer n.mu.Unlock() + if err := n.condRefresh(); err != nil { + return nil, err + } + br := n.m[name] + if !br.Valid() { + return nil, fuse.ENOENT + } + + nod, ok := n.children[name] + if ok { + return nod, nil + } + + if n.isRO() { + nod = newRODir(n.fs, br, name, n.at) + } else { + nod = &mutDir{ + fs: n.fs, + permanode: br, + name: name, + xattrs: map[string][]byte{}, + } + } + n.children[name] = nod + + return nod, nil +} + +// requires n.mu is held +func (n *rootsDir) condRefresh() fuse.Error { + if n.lastQuery.After(time.Now().Add(-refreshTime)) { + return nil + } + log.Printf("fs.roots: querying") + + var rootRes, impRes *search.WithAttrResponse + var grp syncutil.Group + grp.Go(func() (err error) { + rootRes, err = n.fs.client.GetPermanodesWithAttr(&search.WithAttrRequest{N: 100, Attr: "camliRoot"}) + return + }) + grp.Go(func() (err error) { + impRes, err = n.fs.client.GetPermanodesWithAttr(&search.WithAttrRequest{N: 100, Attr: "camliImportRoot"}) + return + }) + if err := grp.Err(); err != nil { + log.Printf("fs.recent: GetRecentPermanodes error in ReadDir: %v", err) + return fuse.EIO + } + + n.m = make(map[string]blob.Ref) + if n.children == nil { + n.children = make(map[string]fs.Node) + } + + dr := &search.DescribeRequest{ + Depth: 1, + } + for _, wi := range rootRes.WithAttr { + dr.BlobRefs = append(dr.BlobRefs, wi.Permanode) + } + for _, wi := range impRes.WithAttr { + dr.BlobRefs = append(dr.BlobRefs, wi.Permanode) + } + if len(dr.BlobRefs) == 0 { + return nil + } + + dres, err := n.fs.client.Describe(dr) + if err != nil { + log.Printf("Describe failure: %v", err) + return fuse.EIO + } + + // Roots + currentRoots := map[string]bool{} + for _, wi := range rootRes.WithAttr { + pn := wi.Permanode + db := dres.Meta[pn.String()] + if db != nil && db.Permanode != nil { + name := db.Permanode.Attr.Get("camliRoot") + if name != "" { + currentRoots[name] = true + n.m[name] = pn + } + } + } + + // Remove any children objects we have mapped that are no + // longer relevant. + for name := range n.children { + if !currentRoots[name] { + delete(n.children, name) + } + } + + // Importers (mapped as roots for now) + for _, wi := range impRes.WithAttr { + pn := wi.Permanode + db := dres.Meta[pn.String()] + if db != nil && db.Permanode != nil { + name := db.Permanode.Attr.Get("camliImportRoot") + if name != "" { + name = strings.Replace(name, ":", "-", -1) + name = strings.Replace(name, "/", "-", -1) + n.m["importer-"+name] = pn + } + } + } + + n.lastQuery = time.Now() + return nil +} + +func (n *rootsDir) Mkdir(req *fuse.MkdirRequest, intr fs.Intr) (fs.Node, fuse.Error) { + if n.isRO() { + return nil, fuse.EPERM + } + + name := req.Name + + // Create a Permanode for the root. + pr, err := n.fs.client.UploadNewPermanode() + if err != nil { + log.Printf("rootsDir.Create(%q): %v", name, err) + return nil, fuse.EIO + } + + var grp syncutil.Group + // Add a camliRoot attribute to the root permanode. + grp.Go(func() (err error) { + claim := schema.NewSetAttributeClaim(pr.BlobRef, "camliRoot", name) + _, err = n.fs.client.UploadAndSignBlob(claim) + return + }) + // Set the title of the root permanode to the root name. + grp.Go(func() (err error) { + claim := schema.NewSetAttributeClaim(pr.BlobRef, "title", name) + _, err = n.fs.client.UploadAndSignBlob(claim) + return + }) + if err := grp.Err(); err != nil { + log.Printf("rootsDir.Create(%q): %v", name, err) + return nil, fuse.EIO + } + + nod := &mutDir{ + fs: n.fs, + permanode: pr.BlobRef, + name: name, + xattrs: map[string][]byte{}, + } + n.mu.Lock() + n.m[name] = pr.BlobRef + n.mu.Unlock() + + return nod, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/fs/time.go b/vendor/github.com/camlistore/camlistore/pkg/fs/time.go new file mode 100644 index 00000000..94512413 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fs/time.go @@ -0,0 +1,151 @@ +// +build linux darwin + +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fs + +import ( + "errors" + "fmt" + "math" + "strconv" + "time" +) + +var timeFormats = []string{ + time.RFC3339Nano, + time.RFC3339, + time.RFC1123Z, + time.RFC1123, + time.UnixDate, + time.ANSIC, + time.RubyDate, + "2006-01-02T15:04", + "2006-01-02T15", + "2006-01-02", + "2006-01", + "2006", +} + +var errUnparseableTimestamp = errors.New("unparsable timestamp") + +var powTable = []int{ + 10e8, + 10e7, + 10e6, + 10e5, + 10e4, + 10e3, + 10e2, + 10e1, + 10, + 1, +} + +// Hand crafted this parser since it's a really common path. +func parseCanonicalTime(in string) (time.Time, error) { + if len(in) < 20 || in[len(in)-1] != 'Z' { + return time.Time{}, errUnparseableTimestamp + } + + if !(in[4] == '-' && in[7] == '-' && in[10] == 'T' && + in[13] == ':' && in[16] == ':' && (in[19] == '.' || in[19] == 'Z')) { + return time.Time{}, fmt.Errorf("positionally incorrect: %v", in) + } + + // 2012-08-28T21:24:35.37465188Z + // 4 7 10 13 16 19 + // ----------------------------- + // 0-4 5 8 11 14 17 20 + + year, err := strconv.Atoi(in[0:4]) + if err != nil { + return time.Time{}, fmt.Errorf("error parsing year: %v", err) + } + + month, err := strconv.Atoi(in[5:7]) + if err != nil { + return time.Time{}, fmt.Errorf("error parsing month: %v", err) + } + + day, err := strconv.Atoi(in[8:10]) + if err != nil { + return time.Time{}, fmt.Errorf("error parsing day: %v", err) + } + + hour, err := strconv.Atoi(in[11:13]) + if err != nil { + return time.Time{}, fmt.Errorf("error parsing hour: %v", err) + } + + minute, err := strconv.Atoi(in[14:16]) + if err != nil { + return time.Time{}, fmt.Errorf("error parsing minute: %v", err) + } + + second, err := strconv.Atoi(in[17:19]) + if err != nil { + return time.Time{}, fmt.Errorf("error parsing second: %v", err) + } + + var nsecstr string + if in[19] != 'Z' { + nsecstr = in[20 : len(in)-1] + } + var nsec int + + if nsecstr != "" { + nsec, err = strconv.Atoi(nsecstr) + if err != nil { + return time.Time{}, fmt.Errorf("error parsing nanoseconds: %v", err) + } + } + + nsec *= powTable[len(nsecstr)] + + return time.Date(year, time.Month(month), day, + hour, minute, second, nsec, time.UTC), nil +} + +func parseTime(in string) (time.Time, error) { + // First, try a few numerics + n, err := strconv.ParseInt(in, 10, 64) + if err == nil { + switch { + case n > int64(math.MaxInt32)*1000: + // nanosecond timestamps + return time.Unix(n/1e9, n%1e9), nil + case n > int64(math.MaxInt32): + // millisecond timestamps + return time.Unix(n/1000, (n%1000)*1e6), nil + case n > 10000: + // second timestamps + return time.Unix(n, 0), nil + } + } + rv, err := parseCanonicalTime(in) + if err == nil { + return rv, nil + } + for _, f := range timeFormats { + parsed, err := time.Parse(f, in) + if err == nil { + return parsed, nil + } + } + return time.Time{}, errUnparseableTimestamp +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/fs/time_test.go b/vendor/github.com/camlistore/camlistore/pkg/fs/time_test.go new file mode 100644 index 00000000..56fb9fd1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fs/time_test.go @@ -0,0 +1,165 @@ +// +build linux darwin + +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fs + +import ( + "log" + "testing" + "time" +) + +const exampleTimeString = "2012-08-28T21:24:35.37465188Z" +const milliAccuracy = "2012-08-28T21:24:35.374Z" +const secondAccuracy = "2012-08-28T21:24:35Z" + +var exampleTime time.Time + +func init() { + var err error + exampleTime, err = time.Parse(time.RFC3339, exampleTimeString) + if err != nil { + panic(err) + } + if exampleTimeString != exampleTime.UTC().Format(time.RFC3339Nano) { + log.Panicf("Expected %v, got %v", exampleTimeString, + exampleTime.UTC().Format(time.RFC3339Nano)) + } +} + +func TestTimeParsing(t *testing.T) { + tests := []struct { + input string + exp string + }{ + {"1346189075374651880", exampleTimeString}, + {"1346189075374", milliAccuracy}, + {"1346189075", secondAccuracy}, + {"2012-08-28T21:24:35.37465188Z", exampleTimeString}, + {secondAccuracy, secondAccuracy}, + {"Tue, 28 Aug 2012 21:24:35 +0000", secondAccuracy}, + {"Tue, 28 Aug 2012 21:24:35 UTC", secondAccuracy}, + {"Tue Aug 28 21:24:35 UTC 2012", secondAccuracy}, + {"Tue Aug 28 21:24:35 2012", secondAccuracy}, + {"Tue Aug 28 21:24:35 +0000 2012", secondAccuracy}, + {"2012-08-28T21:24", "2012-08-28T21:24:00Z"}, + {"2012-08-28T21", "2012-08-28T21:00:00Z"}, + {"2012-08-28", "2012-08-28T00:00:00Z"}, + {"2012-08", "2012-08-01T00:00:00Z"}, + {"2012", "2012-01-01T00:00:00Z"}, + } + + for _, x := range tests { + tm, err := parseTime(x.input) + if err != nil { + t.Errorf("Error on %v - %v", x.input, err) + t.Fail() + } + got := tm.UTC().Format(time.RFC3339Nano) + if x.exp != got { + t.Errorf("Expected %v for %v, got %v", x.exp, x.input, got) + t.Fail() + } + } +} + +func TestCanonicalParser(t *testing.T) { + tests := []struct { + input string + exp string + }{ + {"2012-08-28T21:24:35.374651883Z", ""}, + {"2012-08-28T21:24:35.37465188Z", ""}, + {"2012-08-28T21:24:35.3746518Z", ""}, + {"2012-08-28T21:24:35.374651Z", ""}, + {"2012-08-28T21:24:35.37465Z", ""}, + {"2012-08-28T21:24:35.3746Z", ""}, + {"2012-08-28T21:24:35.374Z", ""}, + {"2012-08-28T21:24:35.37Z", ""}, + {"2012-08-28T21:24:35.3Z", ""}, + {"2012-08-28T21:24:35.0Z", "2012-08-28T21:24:35Z"}, + {"2012-08-28T21:24:35.Z", "2012-08-28T21:24:35Z"}, + {"2012-08-28T21:24:35Z", ""}, + } + + for _, x := range tests { + tm, err := parseCanonicalTime(x.input) + if err != nil { + t.Errorf("Error on %v - %v", x.input, err) + t.Fail() + } + got := tm.UTC().Format(time.RFC3339Nano) + exp := x.exp + if exp == "" { + exp = x.input + } + if exp != got { + t.Errorf("Expected %v for %v, got %v", x.exp, x.input, got) + t.Fail() + } + } +} + +func benchTimeParsing(b *testing.B, input string) { + for i := 0; i < b.N; i++ { + _, err := parseTime(input) + if err != nil { + b.Fatalf("Error on %v - %v", input, err) + } + } +} + +func BenchmarkParseTimeCanonicalDirect(b *testing.B) { + input := "2012-08-28T21:24:35.37465188Z" + for i := 0; i < b.N; i++ { + _, err := parseCanonicalTime(input) + if err != nil { + b.Fatalf("Error on %v - %v", input, err) + } + } +} + +func BenchmarkParseTimeCanonicalStdlib(b *testing.B) { + input := "2012-08-28T21:24:35.37465188Z" + for i := 0; i < b.N; i++ { + _, err := time.Parse(time.RFC3339, input) + if err != nil { + b.Fatalf("Error on %v - %v", input, err) + } + } +} + +func BenchmarkParseTimeCanonical(b *testing.B) { + benchTimeParsing(b, "2012-08-28T21:24:35.37465188Z") +} + +func BenchmarkParseTimeMisc(b *testing.B) { + benchTimeParsing(b, "Tue, 28 Aug 2012 21:24:35 +0000") +} + +func BenchmarkParseTimeIntNano(b *testing.B) { + benchTimeParsing(b, "1346189075374651880") +} + +func BenchmarkParseTimeIntMillis(b *testing.B) { + benchTimeParsing(b, "1346189075374") +} + +func BenchmarkParseTimeIntSecs(b *testing.B) { + benchTimeParsing(b, "1346189075") +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/fs/util.go b/vendor/github.com/camlistore/camlistore/pkg/fs/util.go new file mode 100644 index 00000000..c158aa50 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fs/util.go @@ -0,0 +1,53 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fs + +import ( + "errors" + "os/exec" + "runtime" + "time" +) + +// Unmount attempts to unmount the provided FUSE mount point, forcibly +// if necessary. +func Unmount(point string) error { + var cmd *exec.Cmd + switch runtime.GOOS { + case "darwin": + cmd = exec.Command("/usr/sbin/diskutil", "umount", "force", point) + case "linux": + cmd = exec.Command("fusermount", "-u", point) + default: + return errors.New("unmount: unimplemented") + } + + errc := make(chan error, 1) + go func() { + if err := exec.Command("umount", point).Run(); err == nil { + errc <- err + } + // retry to unmount with the fallback cmd + errc <- cmd.Run() + }() + select { + case <-time.After(1 * time.Second): + return errors.New("umount timeout") + case err := <-errc: + return err + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/fs/xattr.go b/vendor/github.com/camlistore/camlistore/pkg/fs/xattr.go new file mode 100644 index 00000000..b8c29f1a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fs/xattr.go @@ -0,0 +1,162 @@ +// +build linux darwin + +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fs + +import ( + "encoding/base64" + "log" + "strings" + "sync" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/search" + "camlistore.org/third_party/bazil.org/fuse" + "camlistore.org/third_party/bazil.org/fuse/fs" +) + +// xattrPrefix is the permanode attribute prefix used for record +// extended attributes. +const xattrPrefix = "xattr:" + +// xattr provides common support for extended attributes for various +// file and directory implementations (fuse.Node) within the FUSE services. +type xattr struct { + typeName string // for logging + fs *CamliFileSystem + permanode blob.Ref + + // mu guards xattrs. Both mu and the xattrs map are provided by the + // fuse.Node when the struct is created. + mu *sync.Mutex + + // This is a pointer to the particular fuse.Node's location of its + // xattr map so that it can be initialized commonly when the fuse.Node + // calls xattr.load(*search.DescribedPermanode) + xattrs *map[string][]byte +} + +// load is invoked after the creation of a fuse.Node that may contain extended +// attributes. This creates the node's xattr map as well as fills it with any +// extended attributes found in the permanode's claims. +func (x *xattr) load(p *search.DescribedPermanode) { + x.mu.Lock() + defer x.mu.Unlock() + + *x.xattrs = map[string][]byte{} + for k, v := range p.Attr { + if strings.HasPrefix(k, xattrPrefix) { + name := k[len(xattrPrefix):] + val, err := base64.StdEncoding.DecodeString(v[0]) + if err != nil { + log.Printf("Base64 decoding error on attribute %v: %v", name, err) + continue + } + (*x.xattrs)[name] = val + } + } +} + +func (x *xattr) set(req *fuse.SetxattrRequest) fuse.Error { + log.Printf("%s.setxattr(%q) -> %q", x.typeName, req.Name, req.Xattr) + + claim := schema.NewSetAttributeClaim(x.permanode, xattrPrefix+req.Name, + base64.StdEncoding.EncodeToString(req.Xattr)) + _, err := x.fs.client.UploadAndSignBlob(claim) + if err != nil { + log.Printf("Error setting xattr: %v", err) + return fuse.EIO + } + + val := make([]byte, len(req.Xattr)) + copy(val, req.Xattr) + x.mu.Lock() + (*x.xattrs)[req.Name] = val + x.mu.Unlock() + + return nil +} + +func (x *xattr) remove(req *fuse.RemovexattrRequest) fuse.Error { + log.Printf("%s.Removexattr(%q)", x.typeName, req.Name) + + claim := schema.NewDelAttributeClaim(x.permanode, xattrPrefix+req.Name, "") + _, err := x.fs.client.UploadAndSignBlob(claim) + + if err != nil { + log.Printf("Error removing xattr: %v", err) + return fuse.EIO + } + + x.mu.Lock() + delete(*x.xattrs, req.Name) + x.mu.Unlock() + + return nil +} + +func (x *xattr) get(req *fuse.GetxattrRequest, res *fuse.GetxattrResponse) fuse.Error { + x.mu.Lock() + defer x.mu.Unlock() + + val, found := (*x.xattrs)[req.Name] + + if !found { + return fuse.ENODATA + } + + res.Xattr = val + + return nil +} + +func (x *xattr) list(req *fuse.ListxattrRequest, res *fuse.ListxattrResponse) fuse.Error { + x.mu.Lock() + defer x.mu.Unlock() + + for k := range *x.xattrs { + res.Xattr = append(res.Xattr, k...) + res.Xattr = append(res.Xattr, '\x00') + } + return nil +} + +// noXattr provides default xattr methods for fuse nodes. The fuse +// package itself defaults to ENOSYS which causes some systems (read: +// MacOSX) to assume that no extended attribute support is available +// anywhere in the filesystem. This different set of defaults just +// returns no values for read requests and permission denied for write +// requests. +type noXattr struct{} + +func (n noXattr) Getxattr(*fuse.GetxattrRequest, *fuse.GetxattrResponse, fs.Intr) fuse.Error { + return fuse.ENODATA +} + +func (n noXattr) Listxattr(*fuse.ListxattrRequest, *fuse.ListxattrResponse, fs.Intr) fuse.Error { + return nil +} + +func (n noXattr) Setxattr(*fuse.SetxattrRequest, fs.Intr) fuse.Error { + return fuse.EPERM +} + +func (n noXattr) Removexattr(*fuse.RemovexattrRequest, fs.Intr) fuse.Error { + return fuse.EPERM +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/fs/z_test.go b/vendor/github.com/camlistore/camlistore/pkg/fs/z_test.go new file mode 100644 index 00000000..bca132e4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/fs/z_test.go @@ -0,0 +1,34 @@ +// +build linux darwin + +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fs + +import ( + "testing" + + "camlistore.org/pkg/test" +) + +// Make sure that the camlistored process started +// by the World gets terminated when all the tests +// are done. +// This works only as long as TestZLastTest is the +// last test to run in the package. +func TestZLastTest(t *testing.T) { + test.GetWorldMaybe(t).Stop() +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/gc/gc.go b/vendor/github.com/camlistore/camlistore/pkg/gc/gc.go new file mode 100644 index 00000000..06a1298a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/gc/gc.go @@ -0,0 +1,198 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package gc defines a generic garbage collector. +package gc + +import ( + "errors" + "fmt" + + "camlistore.org/pkg/context" + "camlistore.org/pkg/syncutil" +) + +const buffered = 32 // arbitrary + +// Item is something that exists that may or may not survive a GC collection. +type Item interface{} + +// A Collector performs a garbage collection. +type Collector struct { + // World specifies a World that should be stopped before a + // collection and started again after. + World World + + Marker Marker + Roots Enumerator + Sweeper Enumerator + ItemEnumerator ItemEnumerator + Deleter Deleter +} + +type Marker interface { + // Mark marks that an item should exist. + // It must be safe for calls from concurrent goroutines. + Mark(Item) error + + // IsMarked returns whether the item is marked. + // It must be safe for calls from concurrent goroutines. + IsMarked(Item) (bool, error) +} + +// World defines the thing that should be stopped before GC and started after. +type World interface { + Stop() error + Start() error +} + +type Deleter interface { + // Delete deletes an item that was deemed unreachable via + // the garbage collector. + // It must be safe for calls from concurrent goroutines. + Delete(Item) error +} + +// Enumerator enumerates items. +type Enumerator interface { + // Enumerate enumerates items (which items depends on usage) + // and sends them to the provided channel. Regardless of return + // value, the channel should be closed. + // + // If the provided context is closed, Enumerate should return + // with an error (typically context.ErrCanceled) + Enumerate(*context.Context, chan<- Item) error +} + +// ItemEnumerator enumerates all the edges out from an item. +type ItemEnumerator interface { + // EnumerateItme is like Enuerator's Enumerate, but specific + // to the provided item. + EnumerateItem(*context.Context, Item, chan<- Item) error +} + +// ctx will be canceled on failure +func (c *Collector) markItem(ctx *context.Context, it Item, isRoot bool) error { + if !isRoot { + marked, err := c.Marker.IsMarked(it) + if err != nil { + return err + } + if marked { + return nil + } + } + if err := c.Marker.Mark(it); err != nil { + return err + } + + ch := make(chan Item, buffered) + var grp syncutil.Group + grp.Go(func() error { + return c.ItemEnumerator.EnumerateItem(ctx, it, ch) + }) + grp.Go(func() error { + for it := range ch { + if err := c.markItem(ctx, it, false); err != nil { + return err + } + } + return nil + }) + if err := grp.Err(); err != nil { + ctx.Cancel() + return err + } + return nil +} + +// Collect performs a garbage collection. +func (c *Collector) Collect(ctx *context.Context) (err error) { + if c.World == nil { + return errors.New("no World") + } + if c.Marker == nil { + return errors.New("no Marker") + } + if c.Roots == nil { + return errors.New("no Roots") + } + if c.Sweeper == nil { + return errors.New("no Sweeper") + } + if c.ItemEnumerator == nil { + return errors.New("no ItemEnumerator") + } + if c.Deleter == nil { + return errors.New("no Deleter") + } + if err := c.World.Stop(); err != nil { + return err + } + defer func() { + startErr := c.World.Start() + if err == nil { + err = startErr + } + }() + + // Mark. + roots := make(chan Item, buffered) + markCtx := ctx.New() + var marker syncutil.Group + marker.Go(func() error { + defer markCtx.Cancel() + for it := range roots { + if err := c.markItem(markCtx, it, true); err != nil { + return err + } + } + return nil + }) + marker.Go(func() error { + return c.Roots.Enumerate(markCtx, roots) + }) + if err := marker.Err(); err != nil { + return fmt.Errorf("Mark failure: %v", err) + } + + // Sweep. + all := make(chan Item, buffered) + sweepCtx := ctx.New() + var sweeper syncutil.Group + sweeper.Go(func() error { + return c.Sweeper.Enumerate(sweepCtx, all) + }) + sweeper.Go(func() error { + defer sweepCtx.Done() + for it := range all { + ok, err := c.Marker.IsMarked(it) + if err != nil { + return err + } + if !ok { + if err := c.Deleter.Delete(it); err != nil { + return err + } + } + } + return nil + }) + if err := sweeper.Err(); err != nil { + return fmt.Errorf("Sweep failure: %v", err) + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/gc/gc_test.go b/vendor/github.com/camlistore/camlistore/pkg/gc/gc_test.go new file mode 100644 index 00000000..82edef3e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/gc/gc_test.go @@ -0,0 +1,183 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gc + +import ( + "reflect" + "sort" + "testing" + + "camlistore.org/pkg/context" +) + +func sl(v ...string) []string { + if len(v) == 0 { + return nil + } + return v +} + +var collectTests = []struct { + name string + world []string + roots []string + graph map[string][]string + wantWorld []string +}{ + { + name: "delete everything", + world: sl("a", "b", "c"), + wantWorld: sl(), + }, + + { + name: "keep everything", + world: sl("a", "b", "c"), + roots: sl("a", "b", "c"), + wantWorld: sl("a", "b", "c"), + }, + + { + name: "keep all via chain", + world: sl("a", "b", "c", "d", "e"), + roots: sl("a"), + graph: map[string][]string{ + "a": sl("b"), + "b": sl("c"), + "c": sl("d"), + "d": sl("e"), + }, + wantWorld: sl("a", "b", "c", "d", "e"), + }, + + { + name: "keep all via fan", + world: sl("a", "b", "c", "d", "e"), + roots: sl("a"), + graph: map[string][]string{ + "a": sl("b", "c", "d", "e"), + }, + wantWorld: sl("a", "b", "c", "d", "e"), + }, + + { + name: "c dies, two roots", + world: sl("a", "b", "c", "d", "e"), + roots: sl("a", "d"), + graph: map[string][]string{ + "a": sl("b"), + "d": sl("e"), + }, + wantWorld: sl("a", "b", "d", "e"), + }, +} + +type worldSet map[string]bool + +func newWorldSet(start []string) worldSet { + s := make(worldSet) + for _, v := range start { + s[v] = true + } + return s +} + +func (s worldSet) Delete(it Item) error { + delete(s, it.(string)) + return nil +} + +func (s worldSet) items() []string { + if len(s) == 0 { + return nil + } + ret := make([]string, 0, len(s)) + for it := range s { + ret = append(ret, it) + } + sort.Strings(ret) + return ret +} + +func TestCollector(t *testing.T) { + for _, tt := range collectTests { + if tt.name == "" { + panic("no name in test") + } + w := newWorldSet(tt.world) + c := &Collector{ + World: testWorld{}, + Marker: testMarker(map[Item]bool{}), + Roots: testEnum(tt.roots), + Sweeper: testEnum(tt.world), + ItemEnumerator: testItemEnum(tt.graph), + Deleter: w, + } + if err := c.Collect(context.New()); err != nil { + t.Errorf("%s: Collect = %v", tt.name, err) + } + got := w.items() + if !reflect.DeepEqual(tt.wantWorld, got) { + t.Errorf("%s: world = %q; want %q", tt.name, got, tt.wantWorld) + } + } +} + +type testEnum []string + +func (s testEnum) Enumerate(ctx *context.Context, dest chan<- Item) error { + defer close(dest) + for _, v := range s { + select { + case dest <- v: + case <-ctx.Done(): + return context.ErrCanceled + } + } + return nil +} + +type testItemEnum map[string][]string + +func (m testItemEnum) EnumerateItem(ctx *context.Context, it Item, dest chan<- Item) error { + defer close(dest) + for _, v := range m[it.(string)] { + select { + case dest <- v: + case <-ctx.Done(): + return context.ErrCanceled + } + } + return nil +} + +type testMarker map[Item]bool + +func (m testMarker) Mark(it Item) error { + m[it] = true + return nil +} + +func (m testMarker) IsMarked(it Item) (v bool, err error) { + v = m[it] + return +} + +type testWorld struct{} + +func (testWorld) Start() error { return nil } +func (testWorld) Stop() error { return nil } diff --git a/vendor/github.com/camlistore/camlistore/pkg/geocode/geocode.go b/vendor/github.com/camlistore/camlistore/pkg/geocode/geocode.go new file mode 100644 index 00000000..15254566 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/geocode/geocode.go @@ -0,0 +1,116 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package geocode handles mapping user-entered locations into lat/long polygons. +package geocode + +import ( + "encoding/json" + "io" + "log" + "net/url" + "sync" + + "camlistore.org/pkg/context" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/singleflight" +) + +type LatLong struct { + Lat float64 `json:"lat"` + Long float64 `json:"lng"` +} + +type Rect struct { + NorthEast LatLong `json:"northeast"` + SouthWest LatLong `json:"southwest"` +} + +var ( + mu sync.RWMutex + cache = map[string][]Rect{} + + sf singleflight.Group +) + +// Lookup returns rectangles for the given address. Currently the only +// implementation is the Google geocoding service. +func Lookup(ctx *context.Context, address string) ([]Rect, error) { + mu.RLock() + rects, ok := cache[address] + mu.RUnlock() + if ok { + return rects, nil + } + + rectsi, err := sf.Do(address, func() (interface{}, error) { + // TODO: static data files from OpenStreetMap, Wikipedia, etc? + urlStr := "https://maps.googleapis.com/maps/api/geocode/json?address=" + url.QueryEscape(address) + "&sensor=false" + res, err := ctx.HTTPClient().Get(urlStr) + if err != nil { + return nil, err + } + defer httputil.CloseBody(res.Body) + rects, err := decodeGoogleResponse(res.Body) + log.Printf("Google geocode lookup (%q) = %#v, %v", address, rects, err) + if err == nil { + mu.Lock() + cache[address] = rects + mu.Unlock() + } + return rects, err + }) + if err != nil { + return nil, err + } + return rectsi.([]Rect), nil +} + +type googleResTop struct { + Results []*googleResult `json:"results"` +} + +type googleResult struct { + Geometry *googleGeometry `json:"geometry"` +} + +type googleGeometry struct { + Bounds *Rect `json:"bounds"` + Viewport *Rect `json:"viewport"` +} + +func decodeGoogleResponse(r io.Reader) (rects []Rect, err error) { + var resTop googleResTop + if err := json.NewDecoder(r).Decode(&resTop); err != nil { + return nil, err + } + for _, res := range resTop.Results { + if res.Geometry != nil && res.Geometry.Bounds != nil { + r := res.Geometry.Bounds + if r.NorthEast.Lat == 90 && r.NorthEast.Long == 180 && + r.SouthWest.Lat == -90 && r.SouthWest.Long == -180 { + // Google sometimes returns a "whole world" rect for large addresses (like "USA") + // so instead use the viewport in that case. + if res.Geometry.Viewport != nil { + rects = append(rects, *res.Geometry.Viewport) + } + } else { + rects = append(rects, *r) + } + } + } + return +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/geocode/geocode_test.go b/vendor/github.com/camlistore/camlistore/pkg/geocode/geocode_test.go new file mode 100644 index 00000000..660636b9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/geocode/geocode_test.go @@ -0,0 +1,238 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package geocode + +import ( + "reflect" + "strconv" + "strings" + "testing" +) + +func TestDecodeGoogleResponse(t *testing.T) { + tests := []struct { + name string + res string + want []Rect + }{ + { + name: "moscow", + res: googleMoscow, + want: []Rect{ + Rect{ + NorthEast: LatLong{pf("56.009657"), pf("37.945661")}, + SouthWest: LatLong{pf("55.48992699999999"), pf("37.319329")}, + }, + Rect{ + NorthEast: LatLong{pf("46.758882"), pf("-116.962068")}, + SouthWest: LatLong{pf("46.710912"), pf("-117.039698")}, + }, + }, + }, + { + name: "usa", + res: googleUSA, + want: []Rect{ + Rect{ + NorthEast: LatLong{pf("49.38"), pf("-66.94")}, + SouthWest: LatLong{pf("25.82"), pf("-124.39")}, + }, + }, + }, + } + for _, tt := range tests { + rects, err := decodeGoogleResponse(strings.NewReader(tt.res)) + if err != nil { + t.Errorf("Decoding %s: %v", tt.name, err) + continue + } + if !reflect.DeepEqual(rects, tt.want) { + t.Errorf("Test %s: wrong rects\n Got %#v\nWant %#v", tt.name, rects, tt.want) + } + } +} + +// parseFloat64 +func pf(s string) float64 { + v, err := strconv.ParseFloat(s, 64) + if err != nil { + panic(err) + } + return v +} + +var googleMoscow = ` +{ + "results" : [ + { + "address_components" : [ + { + "long_name" : "Moscow", + "short_name" : "Moscow", + "types" : [ "locality", "political" ] + }, + { + "long_name" : "gorod Moskva", + "short_name" : "g. Moskva", + "types" : [ "administrative_area_level_2", "political" ] + }, + { + "long_name" : "Moscow", + "short_name" : "Moscow", + "types" : [ "administrative_area_level_1", "political" ] + }, + { + "long_name" : "Russia", + "short_name" : "RU", + "types" : [ "country", "political" ] + } + ], + "formatted_address" : "Moscow, Russia", + "geometry" : { + "bounds" : { + "northeast" : { + "lat" : 56.009657, + "lng" : 37.945661 + }, + "southwest" : { + "lat" : 55.48992699999999, + "lng" : 37.319329 + } + }, + "location" : { + "lat" : 55.755826, + "lng" : 37.6173 + }, + "location_type" : "APPROXIMATE", + "viewport" : { + "northeast" : { + "lat" : 56.009657, + "lng" : 37.945661 + }, + "southwest" : { + "lat" : 55.48992699999999, + "lng" : 37.319329 + } + } + }, + "types" : [ "locality", "political" ] + }, + { + "address_components" : [ + { + "long_name" : "Moscow", + "short_name" : "Moscow", + "types" : [ "locality", "political" ] + }, + { + "long_name" : "Latah", + "short_name" : "Latah", + "types" : [ "administrative_area_level_2", "political" ] + }, + { + "long_name" : "Idaho", + "short_name" : "ID", + "types" : [ "administrative_area_level_1", "political" ] + }, + { + "long_name" : "United States", + "short_name" : "US", + "types" : [ "country", "political" ] + } + ], + "formatted_address" : "Moscow, ID, USA", + "geometry" : { + "bounds" : { + "northeast" : { + "lat" : 46.758882, + "lng" : -116.962068 + }, + "southwest" : { + "lat" : 46.710912, + "lng" : -117.039698 + } + }, + "location" : { + "lat" : 46.73238749999999, + "lng" : -117.0001651 + }, + "location_type" : "APPROXIMATE", + "viewport" : { + "northeast" : { + "lat" : 46.758882, + "lng" : -116.962068 + }, + "southwest" : { + "lat" : 46.710912, + "lng" : -117.039698 + } + } + }, + "types" : [ "locality", "political" ] + } + ], + "status" : "OK" +} +` + +// Response for "usa". +// Note the geometry bounds covering the whole world. In this case, use the viewport instead. +var googleUSA = ` +{ + "results" : [ + { + "address_components" : [ + { + "long_name" : "United States", + "short_name" : "US", + "types" : [ "country", "political" ] + } + ], + "formatted_address" : "United States", + "geometry" : { + "bounds" : { + "northeast" : { + "lat" : 90, + "lng" : 180 + }, + "southwest" : { + "lat" : -90, + "lng" : -180 + } + }, + "location" : { + "lat" : 37.09024, + "lng" : -95.712891 + }, + "location_type" : "APPROXIMATE", + "viewport" : { + "northeast" : { + "lat" : 49.38, + "lng" : -66.94 + }, + "southwest" : { + "lat" : 25.82, + "lng" : -124.39 + } + } + }, + "types" : [ "country", "political" ] + } + ], + "status" : "OK" +} +` diff --git a/vendor/github.com/camlistore/camlistore/pkg/googlestorage/README b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/README new file mode 100644 index 00000000..b1390059 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/README @@ -0,0 +1,41 @@ +Implements the Storage interface for Google Storage. +A GoogleStorage instance stores blobs in a single Google Storage bucket, with +each blob keyed by its blobref. + +Server configuration +===================== + +High-level server config is formatted like: + + "googlecloudstorage": "clientId:clientSecret:refreshToken:bucketName" + + +Testing +======== + +googlestorage_test.go contains integration tests which run against Google Storage. +In order to run these tests properly, you will need to: + +1. Set up google storage. See: + http://code.google.com/apis/storage/docs/signup.html + +2. Upload the contents of the testdata dir to a google storage bucket. Note + that all these files begin with 'test-': such files will be ignored when + the bucket is used as blobserver storage. + +3. Create the config file '~/.config/camlistore/gstestconfig.json'. The + file should look something like this: + + { + "gsconf": { + "auth": { + "client_id": "your client id", + "client_secret": "your client secret", + "refresh_token": "a refresh token" + }, + "bucket": "bucketName" + } + } + + + You can use 'camtool googinit' to help obtain the auth config object. diff --git a/vendor/github.com/camlistore/camlistore/pkg/googlestorage/googlestorage.go b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/googlestorage.go new file mode 100644 index 00000000..c629755d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/googlestorage.go @@ -0,0 +1,327 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package googlestorage is simple Google Cloud Storage client. +// +// It does not include any Camlistore-specific logic. +package googlestorage + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "unicode/utf8" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/httputil" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + api "google.golang.org/api/storage/v1" + "google.golang.org/cloud/compute/metadata" +) + +const ( + gsAccessURL = "https://storage.googleapis.com" + // Scope is the OAuth2 scope used for Google Cloud Storage. + Scope = "https://www.googleapis.com/auth/devstorage.read_write" +) + +// A Client provides access to Google Cloud Storage. +type Client struct { + client *http.Client + service *api.Service +} + +// An Object holds the name of an object (its bucket and key) within +// Google Cloud Storage. +type Object struct { + Bucket string + Key string +} + +func (o *Object) valid() error { + if o == nil { + return errors.New("invalid nil Object") + } + if o.Bucket == "" { + return errors.New("missing required Bucket field in Object") + } + if o.Key == "" { + return errors.New("missing required Key field in Object") + } + return nil +} + +// A SizedObject holds the bucket, key, and size of an object. +type SizedObject struct { + Object + Size int64 +} + +// NewServiceClient returns a Client for use when running on Google +// Compute Engine. This client can access buckets owned by the same +// project ID as the VM. +func NewServiceClient() (*Client, error) { + if !metadata.OnGCE() { + return nil, errors.New("not running on Google Compute Engine") + } + scopes, _ := metadata.Scopes("default") + haveScope := func(scope string) bool { + for _, x := range scopes { + if x == scope { + return true + } + } + return false + } + if !haveScope("https://www.googleapis.com/auth/devstorage.full_control") && + !haveScope("https://www.googleapis.com/auth/devstorage.read_write") { + return nil, errors.New("when this Google Compute Engine VM instance was created, it wasn't granted access to Cloud Storage") + } + client := oauth2.NewClient(context.Background(), google.ComputeTokenSource("")) + service, _ := api.New(client) + return &Client{client: client, service: service}, nil +} + +func NewClient(oauthClient *http.Client) *Client { + service, _ := api.New(oauthClient) + return &Client{ + client: oauthClient, + service: service, + } +} + +func (o *Object) String() string { + if o == nil { + return "" + } + return fmt.Sprintf("%v/%v", o.Bucket, o.Key) +} + +func (so SizedObject) String() string { + return fmt.Sprintf("%v/%v (%vB)", so.Bucket, so.Key, so.Size) +} + +// Makes a simple body-less google storage request +func (gsa *Client) simpleRequest(method, url_ string) (resp *http.Response, err error) { + // Construct the request + req, err := http.NewRequest(method, url_, nil) + if err != nil { + return + } + req.Header.Set("x-goog-api-version", "2") + + return gsa.client.Do(req) +} + +// GetObject fetches a Google Cloud Storage object. +// The caller must close rc. +func (c *Client) GetObject(obj *Object) (rc io.ReadCloser, size int64, err error) { + if err = obj.valid(); err != nil { + return + } + resp, err := c.simpleRequest("GET", gsAccessURL+"/"+obj.Bucket+"/"+obj.Key) + if err != nil { + return nil, 0, fmt.Errorf("GS GET request failed: %v\n", err) + } + if resp.StatusCode == http.StatusNotFound { + resp.Body.Close() + return nil, 0, os.ErrNotExist + } + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + return nil, 0, fmt.Errorf("GS GET request failed status: %v\n", resp.Status) + } + + return resp.Body, resp.ContentLength, nil +} + +// GetPartialObject fetches part of a Google Cloud Storage object. +// If length is negative, the rest of the object is returned. +// The caller must close rc. +func (c *Client) GetPartialObject(obj Object, offset, length int64) (rc io.ReadCloser, err error) { + if offset < 0 || length < 0 { + return nil, blob.ErrNegativeSubFetch + } + if err = obj.valid(); err != nil { + return + } + + req, err := http.NewRequest("GET", gsAccessURL+"/"+obj.Bucket+"/"+obj.Key, nil) + if err != nil { + return + } + req.Header.Set("x-goog-api-version", "2") + if length >= 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) + } else { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } + + resp, err := c.client.Do(req) + if err != nil { + return nil, fmt.Errorf("GS GET request failed: %v\n", err) + } + if resp.StatusCode == http.StatusNotFound { + resp.Body.Close() + return nil, os.ErrNotExist + } + if !(resp.StatusCode == http.StatusPartialContent || (offset == 0 && resp.StatusCode == http.StatusOK)) { + resp.Body.Close() + if resp.StatusCode == http.StatusRequestedRangeNotSatisfiable { + return nil, blob.ErrOutOfRangeOffsetSubFetch + } + return nil, fmt.Errorf("GS GET request failed status: %v\n", resp.Status) + } + + return resp.Body, nil +} + +// StatObject checks for the size & existence of a Google Cloud Storage object. +// Non-existence of a file is not an error. +func (gsa *Client) StatObject(obj *Object) (size int64, exists bool, err error) { + if err = obj.valid(); err != nil { + return + } + res, err := gsa.simpleRequest("HEAD", gsAccessURL+"/"+obj.Bucket+"/"+obj.Key) + if err != nil { + return + } + res.Body.Close() // per contract but unnecessary for most RoundTrippers + + switch res.StatusCode { + case http.StatusNotFound: + return 0, false, nil + case http.StatusOK: + if size, err = strconv.ParseInt(res.Header["Content-Length"][0], 10, 64); err != nil { + return + } + return size, true, nil + default: + return 0, false, fmt.Errorf("Bad head response code: %v", res.Status) + } +} + +// PutObject uploads a Google Cloud Storage object. +// shouldRetry will be true if the put failed due to authorization, but +// credentials have been refreshed and another attempt is likely to succeed. +// In this case, content will have been consumed. +func (gsa *Client) PutObject(obj *Object, content io.Reader) error { + if err := obj.valid(); err != nil { + return err + } + const maxSlurp = 2 << 20 + var buf bytes.Buffer + n, err := io.CopyN(&buf, content, maxSlurp) + if err != nil && err != io.EOF { + return err + } + contentType := http.DetectContentType(buf.Bytes()) + if contentType == "application/octet-stream" && n < maxSlurp && utf8.Valid(buf.Bytes()) { + contentType = "text/plain; charset=utf-8" + } + + objURL := gsAccessURL + "/" + obj.Bucket + "/" + obj.Key + var req *http.Request + if req, err = http.NewRequest("PUT", objURL, ioutil.NopCloser(io.MultiReader(&buf, content))); err != nil { + return err + } + req.Header.Set("x-goog-api-version", "2") + req.Header.Set("Content-Type", contentType) + + var resp *http.Response + if resp, err = gsa.client.Do(req); err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("Bad put response code: %v", resp.Status) + } + return nil +} + +// DeleteObject removes an object. +func (gsa *Client) DeleteObject(obj *Object) error { + if err := obj.valid(); err != nil { + return err + } + resp, err := gsa.simpleRequest("DELETE", gsAccessURL+"/"+obj.Bucket+"/"+obj.Key) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusNoContent { + return fmt.Errorf("Error deleting %v: bad delete response code: %v", obj, resp.Status) + } + return nil +} + +// EnumerateObjects lists the objects in a bucket. +// If after is non-empty, listing will begin with lexically greater object names. +// If limit is non-zero, the length of the list will be limited to that number. +func (gsa *Client) EnumerateObjects(bucket, after string, limit int) ([]SizedObject, error) { + // Build url, with query params + var params []string + if after != "" { + params = append(params, "marker="+url.QueryEscape(after)) + } + if limit > 0 { + params = append(params, fmt.Sprintf("max-keys=%v", limit)) + } + query := "" + if len(params) > 0 { + query = "?" + strings.Join(params, "&") + } + + resp, err := gsa.simpleRequest("GET", gsAccessURL+"/"+bucket+"/"+query) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("Bad enumerate response code: %v", resp.Status) + } + + var xres struct { + Contents []SizedObject + } + defer httputil.CloseBody(resp.Body) + if err = xml.NewDecoder(resp.Body).Decode(&xres); err != nil { + return nil, err + } + + // Fill in the Bucket on all the SizedObjects + for _, o := range xres.Contents { + o.Bucket = bucket + } + + return xres.Contents, nil +} + +// BucketInfo returns information about a bucket. +func (c *Client) BucketInfo(bucket string) (*api.Bucket, error) { + return c.service.Buckets.Get(bucket).Do() +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/googlestorage/googlestorage_test.go b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/googlestorage_test.go new file mode 100644 index 00000000..3ec2206b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/googlestorage_test.go @@ -0,0 +1,245 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// FYI These tests are integration tests that need to run against google +// storage. See the README for more details on necessary setup + +package googlestorage + +import ( + "bytes" + "flag" + "fmt" + "io/ioutil" + "testing" + "time" + + "camlistore.org/pkg/constants/google" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/oauthutil" + + "golang.org/x/oauth2" +) + +const testObjectContent = "Google Storage Test\n" + +type BufferCloser struct { + *bytes.Buffer +} + +func (b *BufferCloser) Close() error { + b.Reset() + return nil +} + +var gsConfigPath = flag.String("gs_config_path", "", "Path to Google Storage configuration JSON file, or empty to skip the test.") + +// Reads google storage config and creates a Client. Exits on error. +func doConfig(t *testing.T) (gsa *Client, bucket string) { + if *gsConfigPath == "" { + t.Skip("Skipping manual test. Set flag --gs_config_path to test Google Storage.") + } + + cf, err := jsonconfig.ReadFile(*gsConfigPath) + if err != nil { + t.Fatalf("Failed to read config: %v", err) + } + + var config jsonconfig.Obj + config = cf.RequiredObject("gsconf") + if err := cf.Validate(); err != nil { + t.Fatalf("Invalid config: %v", err) + } + + auth := config.RequiredObject("auth") + bucket = config.RequiredString("bucket") + if err := config.Validate(); err != nil { + t.Fatalf("Invalid config: %v", err) + } + + gsa = NewClient(oauth2.NewClient(oauth2.NoContext, oauthutil.NewRefreshTokenSource(&oauth2.Config{ + Scopes: []string{Scope}, + Endpoint: google.Endpoint, + ClientID: auth.RequiredString("client_id"), + ClientSecret: auth.RequiredString("client_secret"), + RedirectURL: oauthutil.TitleBarRedirectURL, + }, auth.RequiredString("refresh_token")))) + + if err := auth.Validate(); err != nil { + t.Fatalf("Invalid config: %v", err) + } + return +} + +func TestGetPartialObject(t *testing.T) { + gs, bucket := doConfig(t) + + body, err := gs.GetPartialObject(Object{bucket, "test-get"}, 5, 10) + if err != nil { + t.Fatalf("Fetch failed: %v\n", err) + } + defer body.Close() + + contents, err := ioutil.ReadAll(body) + if err != nil { + t.Fatalf("Failed to get object contents: %v", err) + } + if len(contents) != 10 { + t.Fatalf("wrong contents size: got %d, want %d", len(contents), 10) + } + + if string(contents) != testObjectContent[5:15] { + t.Fatalf("Object has incorrect content.\nExpected: '%v'\nFound: '%v'\n", testObjectContent, string(contents)) + } +} + +func TestGetObject(t *testing.T) { + gs, bucket := doConfig(t) + + body, size, err := gs.GetObject(&Object{bucket, "test-get"}) + if err != nil { + t.Fatalf("Fetch failed: %v\n", err) + } + defer body.Close() + + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatalf("Failed to get object contents: %v", err) + } + if len(content) != int(size) { + t.Fatalf("wrong contents size: got %d, want %d", len(content), size) + } + + if string(content) != testObjectContent { + t.Fatalf("Object has incorrect content.\nExpected: '%v'\nFound: '%v'\n", testObjectContent, string(content)) + } +} + +func TestStatObject(t *testing.T) { + gs, bucket := doConfig(t) + + // Stat a nonexistant file + size, exists, err := gs.StatObject(&Object{bucket, "test-shouldntexist"}) + if err != nil { + t.Fatalf("Stat failed: %v\n", err) + } else { + if exists { + t.Errorf("Test object exists!") + } + if size != 0 { + t.Errorf("Expected size to be 0, found %v\n", size) + } + } + + // Try statting an object which does exist + size, exists, err = gs.StatObject(&Object{bucket, "test-stat"}) + if err != nil { + t.Fatalf("Stat failed: %v\n", err) + } else { + if !exists { + t.Errorf("Test object doesn't exist!") + } + if size != int64(len(testObjectContent)) { + t.Errorf("Test object size is wrong: \nexpected: %v\nfound: %v\n", + len(testObjectContent), size) + } + } +} + +func TestPutObject(t *testing.T) { + gs, bucket := doConfig(t) + + now := time.Now() + testKey := fmt.Sprintf("test-put-%v.%v.%v-%v.%v.%v", + now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute(), now.Second()) + + err := gs.PutObject(&Object{bucket, testKey}, + &BufferCloser{bytes.NewBufferString(testObjectContent)}) + if err != nil { + t.Fatalf("Failed to put object: %v", err) + } + + // Just stat to check that it actually uploaded, don't bother reading back + size, exists, err := gs.StatObject(&Object{bucket, testKey}) + if !exists { + t.Errorf("Test object doesn't exist!") + } + if size != int64(len(testObjectContent)) { + t.Errorf("Test object size is wrong: \nexpected: %v\nfound: %v\n", + len(testObjectContent), size) + } +} + +func TestDeleteObject(t *testing.T) { + gs, bucket := doConfig(t) + + // Try deleting a nonexitent file + err := gs.DeleteObject(&Object{bucket, "test-shouldntexist"}) + if err == nil { + t.Errorf("Tried to delete nonexistent object, succeeded.") + } + + // Create a file, try to delete it + now := time.Now() + testKey := fmt.Sprintf("test-delete-%v.%v.%v-%v.%v.%v", + now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute(), now.Second()) + err = gs.PutObject(&Object{bucket, testKey}, + &BufferCloser{bytes.NewBufferString("Delete Me")}) + if err != nil { + t.Fatalf("Failed to put file to delete.") + } + err = gs.DeleteObject(&Object{bucket, testKey}) + if err != nil { + t.Errorf("Failed to delete object: %v", err) + } +} + +func TestEnumerateBucket(t *testing.T) { + gs, bucket := doConfig(t) + + // Enumerate ALL the things! + objs, err := gs.EnumerateObjects(bucket, "", 0) + if err != nil { + t.Errorf("Enumeration failed: %v\n", err) + } else if len(objs) < 7 { + // Minimum number of blobs, equal to the number of files in testdata + t.Errorf("Expected at least 7 files, found %v", len(objs)) + } + + // Test a limited enum + objs, err = gs.EnumerateObjects(bucket, "", 5) + if err != nil { + t.Errorf("Enumeration failed: %v\n", err) + } else if len(objs) != 5 { + t.Errorf( + "Limited enum returned wrong number of blobs.\nExpected: %v\nFound: %v", + 5, len(objs)) + } + + // Test fetching a limited set from a known start point + objs, err = gs.EnumerateObjects(bucket, "test-enum", 4) + if err != nil { + t.Errorf("Enumeration failed: %v\n", err) + } else { + for i := 0; i < 4; i += 1 { + if objs[i].Key != fmt.Sprintf("test-enum-%v", i+1) { + t.Errorf( + "Enum from start point returned wrong key:\nExpected: test-enum-%v\nFound: %v", + i+1, objs[i].Key) + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-enum b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-enum new file mode 100644 index 00000000..f2227372 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-enum @@ -0,0 +1 @@ +Google Storage Test diff --git a/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-enum-1 b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-enum-1 new file mode 100644 index 00000000..d00491fd --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-enum-1 @@ -0,0 +1 @@ +1 diff --git a/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-enum-2 b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-enum-2 new file mode 100644 index 00000000..0cfbf088 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-enum-2 @@ -0,0 +1 @@ +2 diff --git a/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-enum-3 b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-enum-3 new file mode 100644 index 00000000..00750edc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-enum-3 @@ -0,0 +1 @@ +3 diff --git a/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-enum-4 b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-enum-4 new file mode 100644 index 00000000..b8626c4c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-enum-4 @@ -0,0 +1 @@ +4 diff --git a/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-get b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-get new file mode 100644 index 00000000..f2227372 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-get @@ -0,0 +1 @@ +Google Storage Test diff --git a/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-stat b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-stat new file mode 100644 index 00000000..f2227372 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/googlestorage/testdata/test-stat @@ -0,0 +1 @@ +Google Storage Test diff --git a/vendor/github.com/camlistore/camlistore/pkg/hashutil/hashutil.go b/vendor/github.com/camlistore/camlistore/pkg/hashutil/hashutil.go new file mode 100644 index 00000000..56847352 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/hashutil/hashutil.go @@ -0,0 +1,40 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package hashutil contains misc hashing functions lacking homes elsewhere. +package hashutil + +import ( + "crypto/sha1" + "crypto/sha256" + "fmt" +) + +// SHA256Prefix computes the SHA-256 digest of data and returns +// its first twenty lowercase hex digits. +func SHA256Prefix(data []byte) string { + h := sha256.New() + h.Write(data) + return fmt.Sprintf("%x", h.Sum(nil))[:20] +} + +// SHA1Prefix computes the SHA-1 digest of data and returns +// its first twenty lowercase hex digits. +func SHA1Prefix(data []byte) string { + h := sha1.New() + h.Write(data) + return fmt.Sprintf("%x", h.Sum(nil))[:20] +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/httputil/auth.go b/vendor/github.com/camlistore/camlistore/pkg/httputil/auth.go new file mode 100644 index 00000000..c9f84988 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/httputil/auth.go @@ -0,0 +1,101 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package httputil + +import ( + "encoding/base64" + "fmt" + "log" + "net/http" + "os" + "regexp" + "runtime" + "strings" + + "camlistore.org/pkg/netutil" +) + +var kBasicAuthPattern = regexp.MustCompile(`^Basic ([a-zA-Z0-9\+/=]+)`) + +// IsLocalhost reports whether the requesting connection is from this machine +// and has the same owner as this process. +func IsLocalhost(req *http.Request) bool { + uid := os.Getuid() + from, err := netutil.HostPortToIP(req.RemoteAddr, nil) + if err != nil { + return false + } + to, err := netutil.HostPortToIP(req.Host, from) + if err != nil { + return false + } + + // If our OS doesn't support uid. + // TODO(bradfitz): netutil on OS X uses "lsof" to figure out + // ownership of tcp connections, but when fuse is mounted and a + // request is outstanding (for instance, a fuse request that's + // making a request to camlistored and landing in this code + // path), lsof then blocks forever waiting on a lock held by the + // VFS, leading to a deadlock. Instead, on darwin, just trust + // any localhost connection here, which is kinda lame, but + // whatever. Macs aren't very multi-user anyway. + if uid == -1 || runtime.GOOS == "darwin" { + return from.IP.IsLoopback() && to.IP.IsLoopback() + } + if uid == 0 { + log.Printf("camlistored running as root. Don't do that.") + return false + } + if uid > 0 { + connUid, err := netutil.AddrPairUserid(from, to) + if err == nil { + if uid == connUid { + return true + } + log.Printf("auth: local connection uid %d doesn't match server uid %d", connUid, uid) + } + } + return false +} + +// BasicAuth parses the Authorization header on req +// If absent or invalid, an error is returned. +func BasicAuth(req *http.Request) (username, password string, err error) { + auth := req.Header.Get("Authorization") + if auth == "" { + err = fmt.Errorf("Missing \"Authorization\" in header") + return + } + matches := kBasicAuthPattern.FindStringSubmatch(auth) + if len(matches) != 2 { + err = fmt.Errorf("Bogus Authorization header") + return + } + encoded := matches[1] + enc := base64.StdEncoding + decBuf := make([]byte, enc.DecodedLen(len(encoded))) + n, err := enc.Decode(decBuf, []byte(encoded)) + if err != nil { + return + } + pieces := strings.SplitN(string(decBuf[0:n]), ":", 2) + if len(pieces) != 2 { + err = fmt.Errorf("didn't get two pieces") + return + } + return pieces[0], pieces[1], nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/httputil/auth_test.go b/vendor/github.com/camlistore/camlistore/pkg/httputil/auth_test.go new file mode 100644 index 00000000..fba2371f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/httputil/auth_test.go @@ -0,0 +1,182 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package httputil + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "testing" +) + +func testServer(t *testing.T, l net.Listener) *httptest.Server { + ts := &httptest.Server{ + Listener: l, + Config: &http.Server{ + Handler: http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if IsLocalhost(r) { + fmt.Fprintf(rw, "authorized") + return + } + fmt.Fprintf(rw, "unauthorized") + }), + }, + } + ts.Start() + + return ts +} + +func TestLocalhostAuthIPv6(t *testing.T) { + l, err := net.Listen("tcp", "[::1]:0") + if err != nil { + t.Skip("skipping IPv6 test; can't listen on [::1]:0") + } + _, port, err := net.SplitHostPort(l.Addr().String()) + if err != nil { + t.Fatal(err) + } + + // See if IPv6 works on this machine first. It seems the above + // Listen can pass on Linux but fail here in the dial. + c, err := net.Dial("tcp6", l.Addr().String()) + if err != nil { + t.Skipf("skipping IPv6 test; dial back to %s failed with %v", l.Addr(), err) + } + c.Close() + + ts := testServer(t, l) + defer ts.Close() + + // Use an explicit transport to force IPv6 (http.Get resolves localhost in IPv4 otherwise) + trans := &http.Transport{ + Dial: func(network, addr string) (net.Conn, error) { + c, err := net.Dial("tcp6", addr) + return c, err + }, + } + + testLoginRequest(t, &http.Client{Transport: trans}, "http://[::1]:"+port) + + // See if we can get an IPv6 from resolving localhost + localips, err := net.LookupIP("localhost") + if err != nil { + t.Skipf("skipping IPv6 test; resolving localhost failed with %v", err) + } + if hasIPv6(localips) { + testLoginRequest(t, &http.Client{Transport: trans}, "http://localhost:"+port) + } else { + t.Logf("incomplete IPv6 test; resolving localhost didn't return any IPv6 addresses") + } +} + +func hasIPv6(ips []net.IP) bool { + for _, ip := range ips { + if ip.To4() == nil { + return true + } + } + return false +} + +func TestLocalhostAuthIPv4(t *testing.T) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Skip("skipping IPv4 test; can't listen on 127.0.0.1:0") + } + _, port, err := net.SplitHostPort(l.Addr().String()) + if err != nil { + t.Fatal(err) + } + + ts := testServer(t, l) + defer ts.Close() + + testLoginRequest(t, &http.Client{}, "http://127.0.0.1:"+port) + testLoginRequest(t, &http.Client{}, "http://localhost:"+port) +} + +func testLoginRequest(t *testing.T, client *http.Client, URL string) { + res, err := client.Get(URL) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + const exp = "authorized" + if string(body) != exp { + t.Errorf("got %q (instead of %v)", string(body), exp) + } +} + +func TestBasicAuth(t *testing.T) { + for _, d := range []struct { + header string + u, pw string + valid bool + }{ + // Empty is invalid. + {}, + { + // Missing password. + header: "Basic QWxhZGRpbg==", + }, + { + // Malformed base64 encoding. + header: "Basic foo", + }, + { + // Malformed header, no 'Basic ' prefix. + header: "QWxhZGRpbg==", + }, + { + header: "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==", + u: "Aladdin", + pw: "open sesame", + valid: true, + }, + } { + req, err := http.NewRequest("GET", "/", nil) + if err != nil { + t.Fatal(err) + } + if d.header != "" { + req.Header.Set("Authorization", d.header) + } + + u, pw, err := BasicAuth(req) + t.Log(d.header, err) + if d.valid && err != nil { + t.Error("Want success parse of auth header, got", err) + } + if !d.valid && err == nil { + t.Error("Want error parsing", d.header) + } + + if d.u != u { + t.Errorf("Want user %q, got %q", d.u, u) + } + + if d.pw != pw { + t.Errorf("Want password %q, got %q", d.pw, pw) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/httputil/certs.go b/vendor/github.com/camlistore/camlistore/pkg/httputil/certs.go new file mode 100644 index 00000000..28fdd3d5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/httputil/certs.go @@ -0,0 +1,5383 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package httputil + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "log" + "math/big" + "net/http" + "runtime" + "sync" + "time" + + "camlistore.org/pkg/hashutil" + "camlistore.org/pkg/legal" + "camlistore.org/pkg/wkfs" +) + +var ( + poolOnce sync.Once + pool *x509.CertPool +) + +var ( + sysRootsOnce sync.Once + sysRootsGood bool +) + +// GenSelfTLS generates a self-signed certificate and key for hostname. +func GenSelfTLS(hostname string) (certPEM, keyPEM []byte, err error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certPEM, keyPEM, fmt.Errorf("failed to generate private key: %s", err) + } + + now := time.Now() + + if hostname == "" { + hostname = "localhost" + } + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + log.Fatalf("failed to generate serial number: %s", err) + } + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: hostname, + Organization: []string{hostname}, + }, + NotBefore: now.Add(-5 * time.Minute).UTC(), + NotAfter: now.AddDate(1, 0, 0).UTC(), + SubjectKeyId: []byte{1, 2, 3, 4}, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + IsCA: true, + BasicConstraintsValid: true, + } + + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + return certPEM, keyPEM, fmt.Errorf("failed to create certificate: %s", err) + } + var buf bytes.Buffer + if err := pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { + return certPEM, keyPEM, fmt.Errorf("error writing self-signed HTTPS cert: %v", err) + } + certPEM = []byte(string(buf.Bytes())) + + buf.Reset() + if err := pem.Encode(&buf, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { + return certPEM, keyPEM, fmt.Errorf("error writing self-signed HTTPS private key: %v", err) + } + keyPEM = buf.Bytes() + return certPEM, keyPEM, nil +} + +// CertFingerprint returns the SHA-256 prefix of the x509 certificate encoded in certPEM. +func CertFingerprint(certPEM []byte) (string, error) { + p, _ := pem.Decode(certPEM) + if p == nil { + return "", errors.New("no valid PEM data found") + } + cert, err := x509.ParseCertificate(p.Bytes) + if err != nil { + return "", fmt.Errorf("failed to parse certificate: %v", err) + } + return hashutil.SHA256Prefix(cert.Raw), nil +} + +// CertFingerprints returns a map of hash prefixes of the x509 certificate encoded in +// certPEM. The hashes are keyed by name ("SHA-1", and "SHA-256"). +func CertFingerprints(certPEM []byte) (map[string]string, error) { + p, _ := pem.Decode(certPEM) + if p == nil { + return nil, errors.New("no valid PEM data found") + } + cert, err := x509.ParseCertificate(p.Bytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %v", err) + } + return map[string]string{ + "SHA-1": hashutil.SHA1Prefix(cert.Raw), + "SHA-256": hashutil.SHA256Prefix(cert.Raw), + }, nil +} + +// GenSelfTLSFiles generates a self-signed certificate and key for hostname, +// and writes them to the given paths. If it succeeds it also returns +// the SHA256 prefix of the new cert. +func GenSelfTLSFiles(hostname, certPath, keyPath string) (fingerprint string, err error) { + cert, key, err := GenSelfTLS(hostname) + if err != nil { + return "", err + } + sig, err := CertFingerprint(cert) + if err != nil { + return "", fmt.Errorf("could not get SHA-256 fingerprint of certificate: %v", err) + } + if err := wkfs.WriteFile(certPath, cert, 0666); err != nil { + return "", fmt.Errorf("failed to write self-signed TLS cert: %v", err) + } + if err := wkfs.WriteFile(keyPath, key, 0600); err != nil { + return "", fmt.Errorf("failed to write self-signed TLS key: %v", err) + } + return sig, nil +} + +// InstallCerts adds Mozilla's Certificate Authority root set to +// http.DefaultTransport's configuration if the current operating +// system's root CAs are not available. (for instance, if running inside +// a Docker container without a filesystem) +func InstallCerts() { + if !SystemCARootsAvailable() { + if tr, ok := http.DefaultTransport.(*http.Transport); ok { + tlsConf := tr.TLSClientConfig + if tlsConf == nil { + tlsConf = &tls.Config{} + tr.TLSClientConfig = tlsConf + } + if tlsConf.RootCAs == nil { + tlsConf.RootCAs = RootCAPool() + } + } + } +} + +// RootCAPool returns the Mozilla Root Certificate Authority pool, +// as statically compiled into the binary. +func RootCAPool() *x509.CertPool { + poolOnce.Do(buildPool) + return pool +} + +func buildPool() { + pool = x509.NewCertPool() + pool.AppendCertsFromPEM([]byte(certData)) +} + +// SystemCARootsAvailable reports whether the operating system's root +// CA files are available. +func SystemCARootsAvailable() bool { + sysRootsOnce.Do(checkSystemRoots) + return sysRootsGood +} + +func checkSystemRoots() { + if runtime.GOOS == "windows" { + // Windows is special somehow, and won't be running as + // a static Docker binary anywhere (which is what this + // whole file is about), so just say it's fine and we + // won't use the static CA set below. + sysRootsGood = true + return + } + + // Verify a dummy cert just to test whether the system roots + // are available. This depends on knowing that the x509 + // package returns this type of error first, before checking + // the certificate's validity. + _, err := new(x509.Certificate).Verify(x509.VerifyOptions{}) + _, isSysRootError := err.(x509.SystemRootsError) + sysRootsGood = !isSysRootError +} + +func init() { + legal.RegisterLicense(` +For Mozilla Root CA set, +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +`) +} + +// Generated from https://github.com/agl/extract-nss-root-certs +var certData = ` +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +# Issuer: CN=GTE CyberTrust Global Root O=GTE Corporation OU=GTE CyberTrust Solutions, Inc. +# Subject: CN=GTE CyberTrust Global Root O=GTE Corporation OU=GTE CyberTrust Solutions, Inc. +# Label: "GTE CyberTrust Global Root" +# Serial: 421 +# MD5 Fingerprint: ca:3d:d3:68:f1:03:5c:d0:32:fa:b8:2b:59:e8:5a:db +# SHA1 Fingerprint: 97:81:79:50:d8:1c:96:70:cc:34:d8:09:cf:79:44:31:36:7e:f4:74 +# SHA256 Fingerprint: a5:31:25:18:8d:21:10:aa:96:4b:02:c7:b7:c6:da:32:03:17:08:94:e5:fb:71:ff:fb:66:67:d5:e6:81:0a:36 +-----BEGIN CERTIFICATE----- +MIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYD +VQQKEw9HVEUgQ29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNv +bHV0aW9ucywgSW5jLjEjMCEGA1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJv +b3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEzMjM1OTAwWjB1MQswCQYDVQQGEwJV +UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU +cnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds +b2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrH +iM3dFw4usJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTS +r41tiGeA5u2ylc9yMcqlHHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X4 +04Wqk2kmhXBIgD8SFcd5tB8FLztimQIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAG3r +GwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMWM4ETCJ57NE7fQMh017l9 +3PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OFNMQkpw0P +lZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/ +-----END CERTIFICATE----- + +# Issuer: CN=Thawte Server CA O=Thawte Consulting cc OU=Certification Services Division +# Subject: CN=Thawte Server CA O=Thawte Consulting cc OU=Certification Services Division +# Label: "Thawte Server CA" +# Serial: 1 +# MD5 Fingerprint: c5:70:c4:a2:ed:53:78:0c:c8:10:53:81:64:cb:d0:1d +# SHA1 Fingerprint: 23:e5:94:94:51:95:f2:41:48:03:b4:d5:64:d2:a3:a3:f5:d8:8b:8c +# SHA256 Fingerprint: b4:41:0b:73:e2:e6:ea:ca:47:fb:c4:2f:8f:a4:01:8a:f4:38:1d:c5:4c:fa:a8:44:50:46:1e:ed:09:45:4d:e9 +-----BEGIN CERTIFICATE----- +MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkEx +FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD +VQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEm +MCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wHhcNOTYwODAx +MDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3 +dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNl +cyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3 +DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQAD +gY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl/Kj0R1HahbUgdJSGHg91 +yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg71CcEJRCX +L+eQbcAoQpnXTEPew/UhbVSfXcNY4cDk2VuwuNy0e982OsK1ZiIS1ocNAgMBAAGj +EzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAB/pMaVz7lcxG +7oWDTSEwjsrZqG9JGubaUeNgcGyEYRGhGshIPllDfU+VPaGLtwtimHp1it2ITk6e +QNuozDJ0uW8NxuOzRAvZim+aKZuZGCg70eNAKJpaPNW15yAbi8qkq43pUdniTCxZ +qdq5snUb9kLy78fyGPmJvKP/iiMucEc= +-----END CERTIFICATE----- + +# Issuer: CN=Thawte Premium Server CA O=Thawte Consulting cc OU=Certification Services Division +# Subject: CN=Thawte Premium Server CA O=Thawte Consulting cc OU=Certification Services Division +# Label: "Thawte Premium Server CA" +# Serial: 1 +# MD5 Fingerprint: 06:9f:69:79:16:66:90:02:1b:8c:8c:a2:c3:07:6f:3a +# SHA1 Fingerprint: 62:7f:8d:78:27:65:63:99:d2:7d:7f:90:44:c9:fe:b3:f3:3e:fa:9a +# SHA256 Fingerprint: ab:70:36:36:5c:71:54:aa:29:c2:c2:9f:5d:41:91:16:3b:16:2a:22:25:01:13:57:d5:6d:07:ff:a7:bc:1f:72 +-----BEGIN CERTIFICATE----- +MIIDJzCCApCgAwIBAgIBATANBgkqhkiG9w0BAQQFADCBzjELMAkGA1UEBhMCWkEx +FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD +VQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UEAxMYVGhhd3RlIFByZW1pdW0gU2Vy +dmVyIENBMSgwJgYJKoZIhvcNAQkBFhlwcmVtaXVtLXNlcnZlckB0aGF3dGUuY29t +MB4XDTk2MDgwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgc4xCzAJBgNVBAYTAlpB +MRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEdMBsG +A1UEChMUVGhhd3RlIENvbnN1bHRpbmcgY2MxKDAmBgNVBAsTH0NlcnRpZmljYXRp +b24gU2VydmljZXMgRGl2aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQcmVtaXVtIFNl +cnZlciBDQTEoMCYGCSqGSIb3DQEJARYZcHJlbWl1bS1zZXJ2ZXJAdGhhd3RlLmNv +bTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA0jY2aovXwlue2oFBYo847kkE +VdbQ7xwblRZH7xhINTpS9CtqBo87L+pW46+GjZ4X9560ZXUCTe/LCaIhUdib0GfQ +ug2SBhRz1JPLlyoAnFxODLz6FVL88kRu2hFKbgifLy3j+ao6hnO2RlNYyIkFvYMR +uHM/qgeN9EJN50CdHDcCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG +9w0BAQQFAAOBgQAmSCwWwlj66BZ0DKqqX1Q/8tfJeGBeXm43YyJ3Nn6yF8Q0ufUI +hfzJATj/Tb7yFkJD57taRvvBxhEf8UqwKEbJw8RCfbz6q1lu1bdRiBHjpIUZa4JM +pAwSremkrj/xw0llmozFyD4lt5SZu5IycQfwhl7tUCemDaYj+bvLpgcUQg== +-----END CERTIFICATE----- + +# Issuer: O=Equifax OU=Equifax Secure Certificate Authority +# Subject: O=Equifax OU=Equifax Secure Certificate Authority +# Label: "Equifax Secure CA" +# Serial: 903804111 +# MD5 Fingerprint: 67:cb:9d:c0:13:24:8a:82:9b:b2:17:1e:d1:1b:ec:d4 +# SHA1 Fingerprint: d2:32:09:ad:23:d3:14:23:21:74:e4:0d:7f:9d:62:13:97:86:63:3a +# SHA256 Fingerprint: 08:29:7a:40:47:db:a2:36:80:c7:31:db:6e:31:76:53:ca:78:48:e1:be:bd:3a:0b:01:79:a7:07:f9:2c:f1:78 +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV +UzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2Vy +dGlmaWNhdGUgQXV0aG9yaXR5MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1 +MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VxdWlmYXgxLTArBgNVBAsTJEVx +dWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCBnzANBgkqhkiG9w0B +AQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPRfM6f +BeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+A +cJkVV5MW8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kC +AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQ +MA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlm +aWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTgw +ODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvSspXXR9gj +IBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQF +MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA +A4GBAFjOKer89961zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y +7qj/WsjTVbJmcVfewCHrPSqnI0kBBIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh +1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee9570+sB3c4 +-----END CERTIFICATE----- + +# Issuer: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority - G2/(c) 1998 VeriSign, Inc. - For authorized use only/VeriSign Trust Network +# Subject: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority - G2/(c) 1998 VeriSign, Inc. - For authorized use only/VeriSign Trust Network +# Label: "Verisign Class 3 Public Primary Certification Authority - G2" +# Serial: 167285380242319648451154478808036881606 +# MD5 Fingerprint: a2:33:9b:4c:74:78:73:d4:6c:e7:c1:f3:8d:cb:5c:e9 +# SHA1 Fingerprint: 85:37:1c:a6:e5:50:14:3d:ce:28:03:47:1b:de:3a:09:e8:f8:77:0f +# SHA256 Fingerprint: 83:ce:3c:12:29:68:8a:59:3d:48:5f:81:97:3c:0f:91:95:43:1e:da:37:cc:5e:36:43:0e:79:c7:a8:88:63:8b +-----BEGIN CERTIFICATE----- +MIIDAjCCAmsCEH3Z/gfPqB63EHln+6eJNMYwDQYJKoZIhvcNAQEFBQAwgcExCzAJ +BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xh +c3MgMyBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcy +MTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3Jp +emVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMB4X +DTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVTMRcw +FQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMg +UHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEo +YykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5 +MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEB +AQUAA4GNADCBiQKBgQDMXtERXVxp0KvTuWpMmR9ZmDCOFoUgRm1HP9SFIIThbbP4 +pO0M8RcPO/mn+SXXwc+EY/J8Y8+iR/LGWzOOZEAEaMGAuWQcRXfH2G71lSk8UOg0 +13gfqLptQ5GVj0VXXn7F+8qkBOvqlzdUMG+7AUcyM83cV5tkaWH4mx0ciU9cZwID +AQABMA0GCSqGSIb3DQEBBQUAA4GBAFFNzb5cy5gZnBWyATl4Lk0PZ3BwmcYQWpSk +U01UbSuvDV1Ai2TT1+7eVmGSX6bEHRBhNtMsJzzoKQm5EWR0zLVznxxIqbxhAe7i +F6YM40AIOw7n60RzKprxaZLvcRTDOaxxp5EJb+RxBrO6WVcmeQD2+A2iMzAo1KpY +oJ2daZH9 +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Label: "GlobalSign Root CA" +# Serial: 4835703278459707669005204 +# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a +# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c +# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99 +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG +A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv +b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw +MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i +YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT +aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ +jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp +xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp +1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG +snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ +U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 +9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B +AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz +yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE +38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP +AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad +DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME +HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 +# Label: "GlobalSign Root CA - R2" +# Serial: 4835703278459682885658125 +# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30 +# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe +# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1 +MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL +v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8 +eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq +tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd +C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa +zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB +mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH +V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n +bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG +3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs +J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO +291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS +ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd +AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only +# Label: "Verisign Class 3 Public Primary Certification Authority - G3" +# Serial: 206684696279472310254277870180966723415 +# MD5 Fingerprint: cd:68:b6:a7:c7:c4:ce:75:e0:1d:4f:57:44:61:92:09 +# SHA1 Fingerprint: 13:2d:0d:45:53:4b:69:97:cd:b2:d5:c3:39:e2:55:76:60:9b:5c:c6 +# SHA256 Fingerprint: eb:04:cf:5e:b1:f3:9a:fa:76:2f:2b:b1:20:f2:96:cb:a5:20:c1:b9:7d:b1:58:95:65:b8:1c:b9:a1:7b:72:44 +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b +N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t +KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu +kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm +CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ +Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu +imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te +2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe +DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p +F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt +TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only +# Label: "Verisign Class 4 Public Primary Certification Authority - G3" +# Serial: 314531972711909413743075096039378935511 +# MD5 Fingerprint: db:c8:f2:27:2e:b1:ea:6a:29:23:5d:fe:56:3e:33:df +# SHA1 Fingerprint: c8:ec:8c:87:92:69:cb:4b:ab:39:e9:8d:7e:57:67:f3:14:95:73:9d +# SHA256 Fingerprint: e3:89:36:0d:0f:db:ae:b3:d2:50:58:4b:47:30:31:4e:22:2f:39:c1:56:a0:20:14:4e:8d:96:05:61:79:15:06 +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK3LpRFpxlmr8Y+1 +GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaStBO3IFsJ ++mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0Gbd +U6LM8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLm +NxdLMEYH5IBtptiWLugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XY +ufTsgsbSPZUd5cBPhMnZo0QoBmrXRazwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ +ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAj/ola09b5KROJ1WrIhVZPMq1 +CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXttmhwwjIDLk5Mq +g6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm +fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c +2NU8Qh0XwRJdRTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/ +bLvSHgCwIe34QWKCudiyxLtGUPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Label: "Entrust.net Premium 2048 Secure Server CA" +# Serial: 946069240 +# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90 +# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31 +# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77 +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3 +MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub +j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo +U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b +u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+ +bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er +fF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Label: "Baltimore CyberTrust Root" +# Serial: 33554617 +# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4 +# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74 +# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ +RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD +VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX +DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y +ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy +VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr +mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr +IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK +mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu +XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy +dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye +jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1 +BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3 +DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92 +9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx +jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0 +Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz +ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS +R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +# Issuer: CN=Equifax Secure Global eBusiness CA-1 O=Equifax Secure Inc. +# Subject: CN=Equifax Secure Global eBusiness CA-1 O=Equifax Secure Inc. +# Label: "Equifax Secure Global eBusiness CA" +# Serial: 1 +# MD5 Fingerprint: 8f:5d:77:06:27:c4:98:3c:5b:93:78:e7:d7:7d:9b:cc +# SHA1 Fingerprint: 7e:78:4a:10:1c:82:65:cc:2d:e1:f1:6d:47:b4:40:ca:d9:0a:19:45 +# SHA256 Fingerprint: 5f:0b:62:ea:b5:e3:53:ea:65:21:65:16:58:fb:b6:53:59:f4:43:28:0a:4a:fb:d1:04:d7:7d:10:f9:f0:4c:07 +-----BEGIN CERTIFICATE----- +MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEc +MBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBT +ZWN1cmUgR2xvYmFsIGVCdXNpbmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIw +MDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0VxdWlmYXggU2Vj +dXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEdsb2JhbCBlQnVzaW5l +c3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRVPEnC +UdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc +58O/gGzNqfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/ +o5brhTMhHD4ePmBudpxnhcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAH +MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUvqigdHJQa0S3ySPY+6j/s1dr +aGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hsMA0GCSqGSIb3DQEBBAUA +A4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okENI7SS+RkA +Z70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv +8qIYNMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV +-----END CERTIFICATE----- + +# Issuer: CN=Equifax Secure eBusiness CA-1 O=Equifax Secure Inc. +# Subject: CN=Equifax Secure eBusiness CA-1 O=Equifax Secure Inc. +# Label: "Equifax Secure eBusiness CA 1" +# Serial: 4 +# MD5 Fingerprint: 64:9c:ef:2e:44:fc:c6:8f:52:07:d0:51:73:8f:cb:3d +# SHA1 Fingerprint: da:40:18:8b:91:89:a3:ed:ee:ae:da:97:fe:2f:9d:f5:b7:d1:8a:41 +# SHA256 Fingerprint: cf:56:ff:46:a4:a1:86:10:9d:d9:65:84:b5:ee:b5:8a:51:0c:42:75:b0:e5:f9:4f:40:bb:ae:86:5e:19:f6:73 +-----BEGIN CERTIFICATE----- +MIICgjCCAeugAwIBAgIBBDANBgkqhkiG9w0BAQQFADBTMQswCQYDVQQGEwJVUzEc +MBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBT +ZWN1cmUgZUJ1c2luZXNzIENBLTEwHhcNOTkwNjIxMDQwMDAwWhcNMjAwNjIxMDQw +MDAwWjBTMQswCQYDVQQGEwJVUzEcMBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5j +LjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNzIENBLTEwgZ8wDQYJ +KoZIhvcNAQEBBQADgY0AMIGJAoGBAM4vGbwXt3fek6lfWg0XTzQaDJj0ItlZ1MRo +RvC0NcWFAyDGr0WlIVFFQesWWDYyb+JQYmT5/VGcqiTZ9J2DKocKIdMSODRsjQBu +WqDZQu4aIZX5UkxVWsUPOE9G+m34LjXWHXzr4vCwdYDIqROsvojvOm6rXyo4YgKw +Env+j6YDAgMBAAGjZjBkMBEGCWCGSAGG+EIBAQQEAwIABzAPBgNVHRMBAf8EBTAD +AQH/MB8GA1UdIwQYMBaAFEp4MlIR21kWNl7fwRQ2QGpHfEyhMB0GA1UdDgQWBBRK +eDJSEdtZFjZe38EUNkBqR3xMoTANBgkqhkiG9w0BAQQFAAOBgQB1W6ibAxHm6VZM +zfmpTMANmvPMZWnmJXbMWbfWVMMdzZmsGd20hdXgPfxiIKeES1hl8eL5lSE/9dR+ +WB5Hh1Q+WKG1tfgq73HnvMP2sUlG4tega+VWeponmHxGYhTnyfxuAxJ5gDgdSIKN +/Bf+KpYrtWKmpj29f5JZzVoqgrI3eQ== +-----END CERTIFICATE----- + +# Issuer: CN=AddTrust Class 1 CA Root O=AddTrust AB OU=AddTrust TTP Network +# Subject: CN=AddTrust Class 1 CA Root O=AddTrust AB OU=AddTrust TTP Network +# Label: "AddTrust Low-Value Services Root" +# Serial: 1 +# MD5 Fingerprint: 1e:42:95:02:33:92:6b:b9:5f:c0:7f:da:d6:b2:4b:fc +# SHA1 Fingerprint: cc:ab:0e:a0:4c:23:01:d6:69:7b:dd:37:9f:cd:12:eb:24:e3:94:9d +# SHA256 Fingerprint: 8c:72:09:27:9a:c0:4e:27:5e:16:d0:7f:d3:b7:75:e8:01:54:b5:96:80:46:e3:1f:52:dd:25:76:63:24:e9:a7 +-----BEGIN CERTIFICATE----- +MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3 +b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMw +MTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYD +VQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ul +CDtbKRY654eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6n +tGO0/7Gcrjyvd7ZWxbWroulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyl +dI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1Zmne3yzxbrww2ywkEtvrNTVokMsAsJch +PXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJuiGMx1I4S+6+JNM3GOGvDC ++Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8wHQYDVR0O +BBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBl +MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFk +ZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENB +IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxtZBsfzQ3duQH6lmM0MkhHma6X +7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0PhiVYrqW9yTkkz +43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY +eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJl +pz/+0WatC7xrmYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOA +WiFeIc9TVPC6b4nbqKqVz4vjccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= +-----END CERTIFICATE----- + +# Issuer: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network +# Subject: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network +# Label: "AddTrust External Root" +# Serial: 1 +# MD5 Fingerprint: 1d:35:54:04:85:78:b0:3f:42:42:4d:bf:20:73:0a:3f +# SHA1 Fingerprint: 02:fa:f3:e2:91:43:54:68:60:78:57:69:4d:f5:e4:5b:68:85:18:68 +# SHA256 Fingerprint: 68:7f:a4:51:38:22:78:ff:f0:c8:b1:1f:8d:43:d5:76:67:1c:6e:b2:bc:ea:b4:13:fb:83:d9:65:d0:6d:2f:f2 +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs +IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290 +MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h +bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v +dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt +H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9 +uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX +mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX +a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN +E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0 +WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD +VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0 +Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU +cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx +IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN +AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH +YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC +Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX +c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a +mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- + +# Issuer: CN=AddTrust Public CA Root O=AddTrust AB OU=AddTrust TTP Network +# Subject: CN=AddTrust Public CA Root O=AddTrust AB OU=AddTrust TTP Network +# Label: "AddTrust Public Services Root" +# Serial: 1 +# MD5 Fingerprint: c1:62:3e:23:c5:82:73:9c:03:59:4b:2b:e9:77:49:7f +# SHA1 Fingerprint: 2a:b6:28:48:5e:78:fb:f3:ad:9e:79:10:dd:6b:df:99:72:2c:96:e5 +# SHA256 Fingerprint: 07:91:ca:07:49:b2:07:82:aa:d3:c7:d7:bd:0c:df:c9:48:58:35:84:3e:b2:d7:99:60:09:ce:43:ab:6c:69:27 +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3 +b3JrMSAwHgYDVQQDExdBZGRUcnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAx +MDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtB +ZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5ldHdvcmsxIDAeBgNV +BAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV +6tsfSlbunyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nX +GCwwfQ56HmIexkvA/X1id9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnP +dzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSGAa2Il+tmzV7R/9x98oTaunet3IAIx6eH +1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAwHM+A+WD+eeSI8t0A65RF +62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0GA1UdDgQW +BBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDEL +MAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRU +cnVzdCBUVFAgTmV0d29yazEgMB4GA1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJv +b3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4JNojVhaTdt02KLmuG7jD8WS6 +IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL+YPoRNWyQSW/ +iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao +GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh +4SINhwBk/ox9Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQm +XiLsks3/QppEIW1cxeMiHV9HEufOX1362KqxMy3ZdvJOOjMMK7MtkAY= +-----END CERTIFICATE----- + +# Issuer: CN=AddTrust Qualified CA Root O=AddTrust AB OU=AddTrust TTP Network +# Subject: CN=AddTrust Qualified CA Root O=AddTrust AB OU=AddTrust TTP Network +# Label: "AddTrust Qualified Certificates Root" +# Serial: 1 +# MD5 Fingerprint: 27:ec:39:47:cd:da:5a:af:e2:9a:01:65:21:a9:4c:bb +# SHA1 Fingerprint: 4d:23:78:ec:91:95:39:b5:00:7f:75:8f:03:3b:21:1e:c5:4d:8b:cf +# SHA256 Fingerprint: 80:95:21:08:05:db:4b:bc:35:5e:44:28:d8:fd:6e:c2:cd:e3:ab:5f:b9:7a:99:42:98:8e:b8:f4:dc:d0:60:16 +-----BEGIN CERTIFICATE----- +MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3 +b3JrMSMwIQYDVQQDExpBZGRUcnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1 +MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcxCzAJBgNVBAYTAlNFMRQwEgYDVQQK +EwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5ldHdvcmsxIzAh +BgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwq +xBb/4Oxx64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G +87B4pfYOQnrjfxvM0PC3KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i +2O+tCBGaKZnhqkRFmhJePp1tUvznoD1oL/BLcHwTOK28FSXx1s6rosAx1i+f4P8U +WfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GRwVY18BTcZTYJbqukB8c1 +0cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HUMIHRMB0G +A1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6Fr +pGkwZzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQL +ExRBZGRUcnVzdCBUVFAgTmV0d29yazEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlm +aWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBABmrder4i2VhlRO6aQTv +hsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxGGuoYQ992zPlm +hpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X +dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3 +P6CxB9bpT9zeRXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9Y +iQBCYz95OdBEsIJuQRno3eDBiFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5no +xqE= +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Label: "Entrust Root Certification Authority" +# Serial: 1164660820 +# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4 +# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9 +# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +# Issuer: O=RSA Security Inc OU=RSA Security 2048 V3 +# Subject: O=RSA Security Inc OU=RSA Security 2048 V3 +# Label: "RSA Security 2048 v3" +# Serial: 13297492616345471454730593562152402946 +# MD5 Fingerprint: 77:0d:19:b1:21:fd:00:42:9c:3e:0c:a5:dd:0b:02:8e +# SHA1 Fingerprint: 25:01:90:19:cf:fb:d9:99:1c:b7:68:25:74:8d:94:5f:30:93:95:42 +# SHA256 Fingerprint: af:8b:67:62:a1:e5:28:22:81:61:a9:5d:5c:55:9e:e2:66:27:8f:75:d7:9e:83:01:89:a5:03:50:6a:bd:6b:4c +-----BEGIN CERTIFICATE----- +MIIDYTCCAkmgAwIBAgIQCgEBAQAAAnwAAAAKAAAAAjANBgkqhkiG9w0BAQUFADA6 +MRkwFwYDVQQKExBSU0EgU2VjdXJpdHkgSW5jMR0wGwYDVQQLExRSU0EgU2VjdXJp +dHkgMjA0OCBWMzAeFw0wMTAyMjIyMDM5MjNaFw0yNjAyMjIyMDM5MjNaMDoxGTAX +BgNVBAoTEFJTQSBTZWN1cml0eSBJbmMxHTAbBgNVBAsTFFJTQSBTZWN1cml0eSAy +MDQ4IFYzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt49VcdKA3Xtp +eafwGFAyPGJn9gqVB93mG/Oe2dJBVGutn3y+Gc37RqtBaB4Y6lXIL5F4iSj7Jylg +/9+PjDvJSZu1pJTOAeo+tWN7fyb9Gd3AIb2E0S1PRsNO3Ng3OTsor8udGuorryGl +wSMiuLgbWhOHV4PR8CDn6E8jQrAApX2J6elhc5SYcSa8LWrg903w8bYqODGBDSnh +AMFRD0xS+ARaqn1y07iHKrtjEAMqs6FPDVpeRrc9DvV07Jmf+T0kgYim3WBU6JU2 +PcYJk5qjEoAAVZkZR73QpXzDuvsf9/UP+Ky5tfQ3mBMY3oVbtwyCO4dvlTlYMNpu +AWgXIszACwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAfBgNVHSMEGDAWgBQHw1EwpKrpRa41JPr/JCwz0LGdjDAdBgNVHQ4EFgQUB8NR +MKSq6UWuNST6/yQsM9CxnYwwDQYJKoZIhvcNAQEFBQADggEBAF8+hnZuuDU8TjYc +HnmYv/3VEhF5Ug7uMYm83X/50cYVIeiKAVQNOvtUudZj1LGqlk2iQk3UUx+LEN5/ +Zb5gEydxiKRz44Rj0aRV4VCT5hsOedBnvEbIvz8XDZXmxpBp3ue0L96VfdASPz0+ +f00/FGj1EVDVwfSQpQgdMWD/YIwjVAqv/qFuxdF6Kmh4zx6CCiC0H63lhbJqaHVO +rSU3lIW+vaHU6rcMSzyd6BIA8F+sDeGscGNz9395nzIlQnQFgCi/vcEkllgVsRch +6YlL2weIZ/QVrXA+L02FO8K32/6YaCOJ4XQP3vTFhGMpG8zLB8kApKnXwiJPZ9d3 +7CAFYd4= +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Global CA O=GeoTrust Inc. +# Subject: CN=GeoTrust Global CA O=GeoTrust Inc. +# Label: "GeoTrust Global CA" +# Serial: 144470 +# MD5 Fingerprint: f7:75:ab:29:fb:51:4e:b7:77:5e:ff:05:3c:99:8e:f5 +# SHA1 Fingerprint: de:28:f4:a4:ff:e5:b9:2f:a3:c5:03:d1:a3:49:a7:f9:96:2a:82:12 +# SHA256 Fingerprint: ff:85:6a:2d:25:1d:cd:88:d3:66:56:f4:50:12:67:98:cf:ab:aa:de:40:79:9c:72:2d:e4:d2:b5:db:36:a7:3a +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT +MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i +YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg +R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9 +9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq +fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv +iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU +1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+ +bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW +MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA +ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l +uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn +Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS +tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF +PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un +hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV +5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw== +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Global CA 2 O=GeoTrust Inc. +# Subject: CN=GeoTrust Global CA 2 O=GeoTrust Inc. +# Label: "GeoTrust Global CA 2" +# Serial: 1 +# MD5 Fingerprint: 0e:40:a7:6c:de:03:5d:8f:d1:0f:e4:d1:8d:f9:6c:a9 +# SHA1 Fingerprint: a9:e9:78:08:14:37:58:88:f2:05:19:b0:6d:2b:0d:2b:60:16:90:7d +# SHA256 Fingerprint: ca:2d:82:a0:86:77:07:2f:8a:b6:76:4f:f0:35:67:6c:fe:3e:5e:32:5e:01:21:72:df:3f:92:09:6d:b7:9b:85 +-----BEGIN CERTIFICATE----- +MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEW +MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFs +IENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3Qg +R2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDvPE1A +PRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/NTL8 +Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hL +TytCOb1kLUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL +5mkWRxHCJ1kDs6ZgwiFAVvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7 +S4wMcoKK+xfNAGw6EzywhIdLFnopsk/bHdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe +2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNHK266ZUap +EBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6td +EPx7srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv +/NgdRN3ggX+d6YvhZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywN +A0ZF66D0f0hExghAzN4bcLUprbqLOzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0 +abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkCx1YAzUm5s2x7UwQa4qjJqhIF +I8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqFH4z1Ir+rzoPz +4iIprn2DQKi6bA== +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Universal CA O=GeoTrust Inc. +# Subject: CN=GeoTrust Universal CA O=GeoTrust Inc. +# Label: "GeoTrust Universal CA" +# Serial: 1 +# MD5 Fingerprint: 92:65:58:8b:a2:1a:31:72:73:68:5c:b4:a5:7a:07:48 +# SHA1 Fingerprint: e6:21:f3:35:43:79:05:9a:4b:68:30:9d:8a:2f:74:22:15:87:ec:79 +# SHA256 Fingerprint: a0:45:9b:9f:63:b2:25:59:f5:fa:5d:4c:6d:b3:f9:f7:2f:f1:93:42:03:35:78:f0:73:bf:1d:1b:46:cb:b9:12 +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW +MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy +c2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0 +IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV +VaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8 +cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT +QjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh +F7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v +c7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w +mZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd +VHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX +teGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ +f9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe +Bi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+ +nhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB +/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY +MBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG +9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX +IwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn +ANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z +uzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN +Pnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja +QI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW +koRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9 +ER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt +DF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm +bJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Universal CA 2 O=GeoTrust Inc. +# Subject: CN=GeoTrust Universal CA 2 O=GeoTrust Inc. +# Label: "GeoTrust Universal CA 2" +# Serial: 1 +# MD5 Fingerprint: 34:fc:b8:d0:36:db:9e:14:b3:c2:f2:db:8f:e4:94:c7 +# SHA1 Fingerprint: 37:9a:19:7b:41:85:45:35:0c:a6:03:69:f3:3c:2e:af:47:4f:20:79 +# SHA256 Fingerprint: a0:23:4f:3b:c8:52:7c:a5:62:8e:ec:81:ad:5d:69:89:5d:a5:68:0d:c9:1d:1c:b8:47:7f:33:f8:78:b9:5b:0b +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW +MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy +c2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD +VQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1 +c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81 +WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG +FF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq +XbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL +se4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb +KNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd +IgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73 +y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt +hAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc +QIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4 +Lt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV +HSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ +KoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ +L1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr +Fg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo +ag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY +T1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz +GDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m +1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV +OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH +6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX +QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +# Issuer: CN=America Online Root Certification Authority 1 O=America Online Inc. +# Subject: CN=America Online Root Certification Authority 1 O=America Online Inc. +# Label: "America Online Root Certification Authority 1" +# Serial: 1 +# MD5 Fingerprint: 14:f1:08:ad:9d:fa:64:e2:89:e7:1c:cf:a8:ad:7d:5e +# SHA1 Fingerprint: 39:21:c1:15:c1:5d:0e:ca:5c:cb:5b:c4:f0:7d:21:d8:05:0b:56:6a +# SHA256 Fingerprint: 77:40:73:12:c6:3a:15:3d:5b:c0:0b:4e:51:75:9c:df:da:c2:37:dc:2a:33:b6:79:46:e9:8e:9b:fa:68:0a:e3 +-----BEGIN CERTIFICATE----- +MIIDpDCCAoygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEc +MBoGA1UEChMTQW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBP +bmxpbmUgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAxMB4XDTAyMDUyODA2 +MDAwMFoXDTM3MTExOTIwNDMwMFowYzELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0Ft +ZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2EgT25saW5lIFJvb3Qg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAKgv6KRpBgNHw+kqmP8ZonCaxlCyfqXfaE0bfA+2l2h9LaaLl+lk +hsmj76CGv2BlnEtUiMJIxUo5vxTjWVXlGbR0yLQFOVwWpeKVBeASrlmLojNoWBym +1BW32J/X3HGrfpq/m44zDyL9Hy7nBzbvYjnF3cu6JRQj3gzGPTzOggjmZj7aUTsW +OqMFf6Dch9Wc/HKpoH145LcxVR5lu9RhsCFg7RAycsWSJR74kEoYeEfffjA3PlAb +2xzTa5qGUwew76wGePiEmf4hjUyAtgyC9mZweRrTT6PP8c9GsEsPPt2IYriMqQko +O3rHl+Ee5fSfwMCuJKDIodkP1nsmgmkyPacCAwEAAaNjMGEwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUAK3Zo/Z59m50qX8zPYEX10zPM94wHwYDVR0jBBgwFoAU +AK3Zo/Z59m50qX8zPYEX10zPM94wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB +BQUAA4IBAQB8itEfGDeC4Liwo+1WlchiYZwFos3CYiZhzRAW18y0ZTTQEYqtqKkF +Zu90821fnZmv9ov761KyBZiibyrFVL0lvV+uyIbqRizBs73B6UlwGBaXCBOMIOAb +LjpHyx7kADCVW/RFo8AasAFOq73AI25jP4BKxQft3OJvx8Fi8eNy1gTIdGcL+oir +oQHIb/AUr9KZzVGTfu0uOMe9zkZQPXLjeSWdm4grECDdpbgyn43gKd8hdIaC2y+C +MMbHNYaz+ZZfRtsMRf3zUMNvxsNIrUam4SdHCh0Om7bCd39j8uB9Gr784N/Xx6ds +sPmuujz9dLQR6FgNgLzTqIA6me11zEZ7 +-----END CERTIFICATE----- + +# Issuer: CN=America Online Root Certification Authority 2 O=America Online Inc. +# Subject: CN=America Online Root Certification Authority 2 O=America Online Inc. +# Label: "America Online Root Certification Authority 2" +# Serial: 1 +# MD5 Fingerprint: d6:ed:3c:ca:e2:66:0f:af:10:43:0d:77:9b:04:09:bf +# SHA1 Fingerprint: 85:b5:ff:67:9b:0c:79:96:1f:c8:6e:44:22:00:46:13:db:17:92:84 +# SHA256 Fingerprint: 7d:3b:46:5a:60:14:e5:26:c0:af:fc:ee:21:27:d2:31:17:27:ad:81:1c:26:84:2d:00:6a:f3:73:06:cc:80:bd +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEc +MBoGA1UEChMTQW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBP +bmxpbmUgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAyMB4XDTAyMDUyODA2 +MDAwMFoXDTM3MDkyOTE0MDgwMFowYzELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0Ft +ZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2EgT25saW5lIFJvb3Qg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAMxBRR3pPU0Q9oyxQcngXssNt79Hc9PwVU3dxgz6sWYFas14tNwC +206B89enfHG8dWOgXeMHDEjsJcQDIPT/DjsS/5uN4cbVG7RtIuOx238hZK+GvFci +KtZHgVdEglZTvYYUAQv8f3SkWq7xuhG1m1hagLQ3eAkzfDJHA1zEpYNI9FdWboE2 +JxhP7JsowtS013wMPgwr38oE18aO6lhOqKSlGBxsRZijQdEt0sdtjRnxrXm3gT+9 +BoInLRBYBbV4Bbkv2wxrkJB+FFk4u5QkE+XRnRTf04JNRvCAOVIyD+OEsnpD8l7e +Xz8d3eOyG6ChKiMDbi4BFYdcpnV1x5dhvt6G3NRI270qv0pV2uh9UPu0gBe4lL8B +PeraunzgWGcXuVjgiIZGZ2ydEEdYMtA1fHkqkKJaEBEjNa0vzORKW6fIJ/KD3l67 +Xnfn6KVuY8INXWHQjNJsWiEOyiijzirplcdIz5ZvHZIlyMbGwcEMBawmxNJ10uEq +Z8A9W6Wa6897GqidFEXlD6CaZd4vKL3Ob5Rmg0gp2OpljK+T2WSfVVcmv2/LNzGZ +o2C7HK2JNDJiuEMhBnIMoVxtRsX6Kc8w3onccVvdtjc+31D1uAclJuW8tf48ArO3 ++L5DwYcRlJ4jbBeKuIonDFRH8KmzwICMoCfrHRnjB453cMor9H124HhnAgMBAAGj +YzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFE1FwWg4u3OpaaEg5+31IqEj +FNeeMB8GA1UdIwQYMBaAFE1FwWg4u3OpaaEg5+31IqEjFNeeMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQUFAAOCAgEAZ2sGuV9FOypLM7PmG2tZTiLMubekJcmn +xPBUlgtk87FYT15R/LKXeydlwuXK5w0MJXti4/qftIe3RUavg6WXSIylvfEWK5t2 +LHo1YGwRgJfMqZJS5ivmae2p+DYtLHe/YUjRYwu5W1LtGLBDQiKmsXeu3mnFzccc +obGlHBD7GL4acN3Bkku+KVqdPzW+5X1R+FXgJXUjhx5c3LqdsKyzadsXg8n33gy8 +CNyRnqjQ1xU3c6U1uPx+xURABsPr+CKAXEfOAuMRn0T//ZoyzH1kUQ7rVyZ2OuMe +IjzCpjbdGe+n/BLzJsBZMYVMnNjP36TMzCmT/5RtdlwTCJfy7aULTd3oyWgOZtMA +DjMSW7yV5TKQqLPGbIOtd+6Lfn6xqavT4fG2wLHqiMDn05DpKJKUe2h7lyoKZy2F +AjgQ5ANh1NolNscIWC2hp1GvMApJ9aZphwctREZ2jirlmjvXGKL8nDgQzMY70rUX +Om/9riW99XJZZLF0KjhfGEzfz3EEWjbUvy+ZnOjZurGV5gJLIaFb1cFPj65pbVPb +AZO1XB4Y3WRayhgoPmMEEf0cjQAPuDffZ4qdZqkCapH/E8ovXYO8h5Ns3CRRFgQl +Zvqz2cK6Kb6aSDiCmfS/O0oxGfm/jiEzFMpPVF/7zvuPcX/9XhmgD0uRuMRUvAaw +RY8mkaKO/qk= +-----END CERTIFICATE----- + +# Issuer: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association +# Subject: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association +# Label: "Visa eCommerce Root" +# Serial: 25952180776285836048024890241505565794 +# MD5 Fingerprint: fc:11:b8:d8:08:93:30:00:6d:23:f9:7e:eb:52:1e:02 +# SHA1 Fingerprint: 70:17:9b:86:8c:00:a4:fa:60:91:52:22:3f:9f:3e:32:bd:e0:05:62 +# SHA256 Fingerprint: 69:fa:c9:bd:55:fb:0a:c7:8d:53:bb:ee:5c:f1:d5:97:98:9f:d0:aa:ab:20:a2:51:51:bd:f1:73:3e:e7:d1:22 +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBr +MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl +cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2WhcNMjIwNjI0MDAxNjEyWjBrMQsw +CQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5h +dGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1l +cmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h +2mCxlCfLF9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4E +lpF7sDPwsRROEW+1QK8bRaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdV +ZqW1LS7YgFmypw23RuwhY/81q6UCzyr0TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq +299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI/k4+oKsGGelT84ATB+0t +vz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzsGHxBvfaL +dXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUF +AAOCAQEAX/FBfXxcCLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcR +zCSs00Rsca4BIGsDoo8Ytyk6feUWYFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3 +LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pzzkWKsKZJ/0x9nXGIxHYdkFsd +7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBuYQa7FkKMcPcw +++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- + +# Issuer: CN=Certum CA O=Unizeto Sp. z o.o. +# Subject: CN=Certum CA O=Unizeto Sp. z o.o. +# Label: "Certum Root CA" +# Serial: 65568 +# MD5 Fingerprint: 2c:8f:9f:66:1d:18:90:b1:47:26:9d:8e:86:82:8c:a9 +# SHA1 Fingerprint: 62:52:dc:40:f7:11:43:a2:2f:de:9e:f7:34:8e:06:42:51:b1:81:18 +# SHA256 Fingerprint: d8:e0:fe:bc:1d:b2:e3:8d:00:94:0f:37:d2:7d:41:34:4d:99:3e:73:4b:99:d5:65:6d:97:78:d4:d8:14:36:24 +-----BEGIN CERTIFICATE----- +MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBM +MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD +QTAeFw0wMjA2MTExMDQ2MzlaFw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBM +MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD +QTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6xwS7TT3zNJc4YPk/E +jG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdLkKWo +ePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GI +ULdtlkIJ89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapu +Ob7kky/ZR6By6/qmW6/KUz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUg +AKpoC6EahQGcxEZjgoi2IrHu/qpGWX7PNSzVttpd90gzFFS269lvzs2I1qsb2pY7 +HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEA +uI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+GXYkHAQa +TOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTg +xSvgGrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1q +CjqTE5s7FCMTY5w/0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5x +O/fIR/RpbxXyEV6DHpx8Uq79AtoSqFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs +6GAqm4VKQPNriiTsBhYscw== +-----END CERTIFICATE----- + +# Issuer: CN=AAA Certificate Services O=Comodo CA Limited +# Subject: CN=AAA Certificate Services O=Comodo CA Limited +# Label: "Comodo AAA Services root" +# Serial: 1 +# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0 +# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49 +# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4 +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj +YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM +GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua +BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe +3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4 +YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR +rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm +ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU +oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v +QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t +b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF +AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q +GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2 +G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi +l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 +smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +# Issuer: CN=Secure Certificate Services O=Comodo CA Limited +# Subject: CN=Secure Certificate Services O=Comodo CA Limited +# Label: "Comodo Secure Services root" +# Serial: 1 +# MD5 Fingerprint: d3:d9:bd:ae:9f:ac:67:24:b3:c8:1b:52:e1:b9:a9:bd +# SHA1 Fingerprint: 4a:65:d5:f4:1d:ef:39:b8:b8:90:4a:4a:d3:64:81:33:cf:c7:a1:d1 +# SHA256 Fingerprint: bd:81:ce:3b:4f:65:91:d1:1a:67:b5:fc:7a:47:fd:ef:25:52:1b:f9:aa:4e:18:b9:e3:df:2e:34:a7:80:3b:e8 +-----BEGIN CERTIFICATE----- +MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRp +ZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVow +fjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAiBgNV +BAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPM +cm3ye5drswfxdySRXyWP9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3S +HpR7LZQdqnXXs5jLrLxkU0C8j6ysNstcrbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996 +CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rCoznl2yY4rYsK7hljxxwk +3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3Vp6ea5EQz +6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNV +HQ4EFgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1Ud +EwEB/wQFMAMBAf8wgYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2Rv +Y2EuY29tL1NlY3VyZUNlcnRpZmljYXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRw +Oi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmww +DQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm4J4oqF7Tt/Q0 +5qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj +Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtI +gKvcnDe4IRRLDXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJ +aD61JlfutuC23bkpgHl9j6PwpCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDl +izeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1HRR3B7Hzs/Sk= +-----END CERTIFICATE----- + +# Issuer: CN=Trusted Certificate Services O=Comodo CA Limited +# Subject: CN=Trusted Certificate Services O=Comodo CA Limited +# Label: "Comodo Trusted Services root" +# Serial: 1 +# MD5 Fingerprint: 91:1b:3f:6e:cd:9e:ab:ee:07:fe:1f:71:d2:b3:61:27 +# SHA1 Fingerprint: e1:9f:e3:0e:8b:84:60:9e:80:9b:17:0d:72:a8:c5:ba:6e:14:09:bd +# SHA256 Fingerprint: 3f:06:e5:56:81:d4:96:f5:be:16:9e:b5:38:9f:9f:2b:8f:f6:1e:17:08:df:68:81:72:48:49:cd:5d:27:cb:69 +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0 +aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEwMDAwMDBaFw0yODEyMzEyMzU5NTla +MH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAO +BgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUwIwYD +VQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWW +fnJSoBVC21ndZHoa0Lh73TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMt +TGo87IvDktJTdyR0nAducPy9C1t2ul/y/9c3S0pgePfw+spwtOpZqqPOSC+pw7IL +fhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6juljatEPmsbS9Is6FARW +1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsSivnkBbA7 +kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0G +A1UdDgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21v +ZG9jYS5jb20vVHJ1c3RlZENlcnRpZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRo +dHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENlcnRpZmljYXRlU2VydmljZXMu +Y3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8NtwuleGFTQQuS9/ +HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32 +pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxIS +jBc/lDb+XbDABHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+ +xqFx7D+gIIxmOom0jtTYsU0lR+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/Atyjcn +dBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O9y5Xt5hwXsjEeLBi +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority +# Subject: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority +# Label: "QuoVadis Root CA" +# Serial: 985026699 +# MD5 Fingerprint: 27:de:36:fe:72:b7:00:03:00:9d:f4:f0:1e:6c:04:24 +# SHA1 Fingerprint: de:3f:40:bd:50:93:d3:9b:6c:60:f6:da:bc:07:62:01:00:89:76:c9 +# SHA256 Fingerprint: a4:5e:de:3b:bb:f0:9c:8a:e1:5c:72:ef:c0:72:68:d6:93:a2:1c:99:6f:d5:1e:67:ca:07:94:60:fd:6d:88:73 +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz +MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw +IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR +dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp +li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D +rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ +WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug +F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU +xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC +Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv +dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw +ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl +IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh +c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy +ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI +KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T +KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq +y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p +dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD +VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL +MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk +fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8 +7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R +cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y +mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW +xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK +SnQ2+Q== +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2" +# Serial: 1289 +# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b +# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7 +# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86 +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa +GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg +Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J +WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB +rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp ++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1 +ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i +Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz +PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og +/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH +oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI +yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud +EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2 +A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL +MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f +BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn +g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl +fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K +WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha +B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc +hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR +TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD +mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z +ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y +4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza +8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3" +# Serial: 1478 +# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf +# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85 +# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35 +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM +V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB +4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr +H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd +8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv +vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT +mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe +btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc +T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt +WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ +c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A +4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD +VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG +CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0 +aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu +dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw +czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G +A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg +Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0 +7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem +d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd ++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B +4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN +t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x +DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57 +k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s +zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j +Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT +mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK +4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust.net OU=Security Communication RootCA1 +# Subject: O=SECOM Trust.net OU=Security Communication RootCA1 +# Label: "Security Communication Root CA" +# Serial: 0 +# MD5 Fingerprint: f1:bc:63:6a:54:e0:b5:27:f5:cd:e7:1a:e3:4d:6e:4a +# SHA1 Fingerprint: 36:b1:2b:49:f9:81:9e:d7:4c:9e:bc:38:0f:c6:56:8f:5d:ac:b2:f7 +# SHA256 Fingerprint: e7:5e:72:ed:9f:56:0e:ec:6e:b4:80:00:73:a4:3f:c3:ad:19:19:5a:39:22:82:01:78:95:97:4a:99:02:6b:6c +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY +MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t +dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5 +WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD +VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8 +9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ +DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9 +Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N +QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ +xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G +A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG +kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr +Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5 +Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU +JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot +RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw== +-----END CERTIFICATE----- + +# Issuer: CN=Sonera Class2 CA O=Sonera +# Subject: CN=Sonera Class2 CA O=Sonera +# Label: "Sonera Class 2 Root CA" +# Serial: 29 +# MD5 Fingerprint: a3:ec:75:0f:2e:88:df:fa:48:01:4e:0b:5c:48:6f:fb +# SHA1 Fingerprint: 37:f7:6d:e6:07:7c:90:c5:b1:3e:93:1a:b7:41:10:b4:f2:e4:9a:27 +# SHA256 Fingerprint: 79:08:b4:03:14:c1:38:10:0b:51:8d:07:35:80:7f:fb:fc:f8:51:8a:00:95:33:71:05:ba:38:6b:15:3d:d9:27 +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP +MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx +MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV +BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o +Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt +5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s +3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej +vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu +8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw +DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG +MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil +zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/ +3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD +FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6 +Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2 +ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M +-----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden Root CA O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden Root CA O=Staat der Nederlanden +# Label: "Staat der Nederlanden Root CA" +# Serial: 10000010 +# MD5 Fingerprint: 60:84:7c:5a:ce:db:0c:d4:cb:a7:e9:fe:02:c6:a9:c0 +# SHA1 Fingerprint: 10:1d:fa:3f:d5:0b:cb:bb:9b:b5:60:0c:19:55:a4:1a:f4:73:3a:04 +# SHA256 Fingerprint: d4:1d:82:9e:8c:16:59:82:2a:f9:3f:ce:62:bf:fc:de:26:4f:c8:4e:8b:95:0c:5f:f2:75:d0:52:35:46:95:a3 +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgIEAJiWijANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJO +TDEeMBwGA1UEChMVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSYwJAYDVQQDEx1TdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQTAeFw0wMjEyMTcwOTIzNDlaFw0xNTEy +MTYwOTE1MzhaMFUxCzAJBgNVBAYTAk5MMR4wHAYDVQQKExVTdGFhdCBkZXIgTmVk +ZXJsYW5kZW4xJjAkBgNVBAMTHVN0YWF0IGRlciBOZWRlcmxhbmRlbiBSb290IENB +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmNK1URF6gaYUmHFtvszn +ExvWJw56s2oYHLZhWtVhCb/ekBPHZ+7d89rFDBKeNVU+LCeIQGv33N0iYfXCxw71 +9tV2U02PjLwYdjeFnejKScfST5gTCaI+Ioicf9byEGW07l8Y1Rfj+MX94p2i71MO +hXeiD+EwR+4A5zN9RGcaC1Hoi6CeUJhoNFIfLm0B8mBF8jHrqTFoKbt6QZ7GGX+U +tFE5A3+y3qcym7RHjm+0Sq7lr7HcsBthvJly3uSJt3omXdozSVtSnA71iq3DuD3o +BmrC1SoLbHuEvVYFy4ZlkuxEK7COudxwC0barbxjiDn622r+I/q85Ej0ZytqERAh +SQIDAQABo4GRMIGOMAwGA1UdEwQFMAMBAf8wTwYDVR0gBEgwRjBEBgRVHSAAMDww +OgYIKwYBBQUHAgEWLmh0dHA6Ly93d3cucGtpb3ZlcmhlaWQubmwvcG9saWNpZXMv +cm9vdC1wb2xpY3kwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSofeu8Y6R0E3QA +7Jbg0zTBLL9s+DANBgkqhkiG9w0BAQUFAAOCAQEABYSHVXQ2YcG70dTGFagTtJ+k +/rvuFbQvBgwp8qiSpGEN/KtcCFtREytNwiphyPgJWPwtArI5fZlmgb9uXJVFIGzm +eafR2Bwp/MIgJ1HI8XxdNGdphREwxgDS1/PTfLbwMVcoEoJz6TMvplW0C5GUR5z6 +u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3ynGQI0DvDKcWy +7ZAEwbEpkcUwb8GpcjPM/l0WFywRaed+/sWDCN+83CI6LiBpIzlWYGeQiy52OfsR +iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw== +-----END CERTIFICATE----- + +# Issuer: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com +# Subject: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com +# Label: "UTN DATACorp SGC Root CA" +# Serial: 91374294542884689855167577680241077609 +# MD5 Fingerprint: b3:a5:3e:77:21:6d:ac:4a:c0:c9:fb:d5:41:3d:ca:06 +# SHA1 Fingerprint: 58:11:9f:0e:12:82:87:ea:50:fd:d9:87:45:6f:4f:78:dc:fa:d6:d4 +# SHA256 Fingerprint: 85:fb:2f:91:dd:12:27:5a:01:45:b6:36:53:4f:84:02:4a:d6:8b:69:b8:ee:88:68:4f:f7:11:37:58:05:b3:48 +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB +kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw +IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD +VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu +dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 +E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ +D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK +4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq +lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW +bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB +o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT +MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js +LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr +BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB +AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj +j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH +KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv +2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 +mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- + +# Issuer: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com +# Subject: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com +# Label: "UTN USERFirst Hardware Root CA" +# Serial: 91374294542884704022267039221184531197 +# MD5 Fingerprint: 4c:56:41:e5:0d:bb:2b:e8:ca:a3:ed:18:08:ad:43:39 +# SHA1 Fingerprint: 04:83:ed:33:99:ac:36:08:05:87:22:ed:bc:5e:46:00:e3:be:f9:d7 +# SHA256 Fingerprint: 6e:a5:47:41:d0:04:66:7e:ed:1b:48:16:63:4a:a3:a7:9e:6e:4b:96:95:0f:82:79:da:fc:8d:9b:d8:81:21:37 +-----BEGIN CERTIFICATE----- +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG +A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe +MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v +d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh +cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn +0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ +M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a +MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd +oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI +DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy +oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0 +dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy +bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF +BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli +CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE +CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t +3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS +KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA== +-----END CERTIFICATE----- + +# Issuer: CN=Chambers of Commerce Root O=AC Camerfirma SA CIF A82743287 OU=http://www.chambersign.org +# Subject: CN=Chambers of Commerce Root O=AC Camerfirma SA CIF A82743287 OU=http://www.chambersign.org +# Label: "Camerfirma Chambers of Commerce Root" +# Serial: 0 +# MD5 Fingerprint: b0:01:ee:14:d9:af:29:18:94:76:8e:f1:69:33:2a:84 +# SHA1 Fingerprint: 6e:3a:55:a4:19:0c:19:5c:93:84:3c:c0:db:72:2e:31:30:61:f0:b1 +# SHA256 Fingerprint: 0c:25:8a:12:a5:67:4a:ef:25:f2:8b:a7:dc:fa:ec:ee:a3:48:e5:41:e6:f5:cc:4e:e6:3b:71:b3:61:60:6a:c3 +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEn +MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL +ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMg +b2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAxNjEzNDNaFw0zNzA5MzAxNjEzNDRa +MH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZpcm1hIFNBIENJRiBB +ODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3JnMSIw +IAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0B +AQEFAAOCAQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtb +unXF/KGIJPov7coISjlUxFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0d +BmpAPrMMhe5cG3nCYsS4No41XQEMIwRHNaqbYE6gZj3LJgqcQKH0XZi/caulAGgq +7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jWDA+wWFjbw2Y3npuRVDM3 +0pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFVd9oKDMyX +roDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIG +A1UdEwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5j +aGFtYmVyc2lnbi5vcmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p +26EpW1eLTXYGduHRooowDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIA +BzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hhbWJlcnNpZ24ub3JnMCcGA1Ud +EgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYDVR0gBFEwTzBN +BgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz +aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEB +AAxBl8IahsAifJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZd +p0AJPaxJRUXcLo0waLIJuvvDL8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi +1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wNUPf6s+xCX6ndbcj0dc97wXImsQEc +XCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/nADydb47kMgkdTXg0 +eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1erfu +tGWaIZDgqtCYvDi1czyL+Nw= +-----END CERTIFICATE----- + +# Issuer: CN=Global Chambersign Root O=AC Camerfirma SA CIF A82743287 OU=http://www.chambersign.org +# Subject: CN=Global Chambersign Root O=AC Camerfirma SA CIF A82743287 OU=http://www.chambersign.org +# Label: "Camerfirma Global Chambersign Root" +# Serial: 0 +# MD5 Fingerprint: c5:e6:7b:bf:06:d0:4f:43:ed:c4:7a:65:8a:fb:6b:19 +# SHA1 Fingerprint: 33:9b:6b:14:50:24:9b:55:7a:01:87:72:84:d9:e0:2f:c3:d2:d8:e9 +# SHA256 Fingerprint: ef:3c:b4:17:fc:8e:bf:6f:97:87:6c:9e:4e:ce:39:de:1e:a5:fe:64:91:41:d1:02:8b:7d:11:c0:b2:29:8c:ed +-----BEGIN CERTIFICATE----- +MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEn +MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL +ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENo +YW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYxNDE4WhcNMzcwOTMwMTYxNDE4WjB9 +MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgy +NzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4G +A1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUA +A4IBDQAwggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0 +Mi+ITaFgCPS3CU6gSS9J1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/s +QJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8Oby4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpV +eAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl6DJWk0aJqCWKZQbua795 +B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c8lCrEqWh +z0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0T +AQH/BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1i +ZXJzaWduLm9yZy9jaGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4w +TcbOX60Qq+UDpfqpFDAOBgNVHQ8BAf8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAH +MCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBjaGFtYmVyc2lnbi5vcmcwKgYD +VR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9yZzBbBgNVHSAE +VDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh +bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0B +AQUFAAOCAQEAPDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUM +bKGKfKX0j//U2K0X1S0E0T9YgOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXi +ryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJPJ7oKXqJ1/6v/2j1pReQvayZzKWG +VwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4IBHNfTIzSJRUTN3c +ecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREest2d/ +AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== +-----END CERTIFICATE----- + +# Issuer: CN=NetLock Kozjegyzoi (Class A) Tanusitvanykiado O=NetLock Halozatbiztonsagi Kft. OU=Tanusitvanykiadok +# Subject: CN=NetLock Kozjegyzoi (Class A) Tanusitvanykiado O=NetLock Halozatbiztonsagi Kft. OU=Tanusitvanykiadok +# Label: "NetLock Notary (Class A) Root" +# Serial: 259 +# MD5 Fingerprint: 86:38:6d:5e:49:63:6c:85:5c:db:6d:dc:94:b7:d0:f7 +# SHA1 Fingerprint: ac:ed:5f:65:53:fd:25:ce:01:5f:1f:7a:48:3b:6a:74:9f:61:78:c6 +# SHA256 Fingerprint: 7f:12:cd:5f:7e:5e:29:0e:c7:d8:51:79:d5:b7:2c:20:a5:be:75:08:ff:db:5b:f8:1a:b9:68:4a:7f:c9:f6:67 +-----BEGIN CERTIFICATE----- +MIIGfTCCBWWgAwIBAgICAQMwDQYJKoZIhvcNAQEEBQAwga8xCzAJBgNVBAYTAkhV +MRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMe +TmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0 +dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBLb3pqZWd5em9pIChDbGFzcyBB +KSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNDIzMTQ0N1oXDTE5MDIxOTIzMTQ0 +N1owga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQHEwhC +dWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQu +MRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBL +b3pqZWd5em9pIChDbGFzcyBBKSBUYW51c2l0dmFueWtpYWRvMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvHSMD7tM9DceqQWC2ObhbHDqeLVu0ThEDaiD +zl3S1tWBxdRL51uUcCbbO51qTGL3cfNk1mE7PetzozfZz+qMkjvN9wfcZnSX9EUi +3fRc4L9t875lM+QVOr/bmJBVOMTtplVjC7B4BPTjbsE/jvxReB+SnoPC/tmwqcm8 +WgD/qaiYdPv2LD4VOQ22BFWoDpggQrOxJa1+mm9dU7GrDPzr4PN6s6iz/0b2Y6LY +Oph7tqyF/7AlT3Rj5xMHpQqPBffAZG9+pyeAlt7ULoZgx2srXnN7F+eRP2QM2Esi +NCubMvJIH5+hCoR64sKtlz2O1cH5VqNQ6ca0+pii7pXmKgOM3wIDAQABo4ICnzCC +ApswDgYDVR0PAQH/BAQDAgAGMBIGA1UdEwEB/wQIMAYBAf8CAQQwEQYJYIZIAYb4 +QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaCAk1GSUdZRUxFTSEgRXplbiB0 +YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pvbGdhbHRhdGFz +aSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQu +IEEgaGl0ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtm +ZWxlbG9zc2VnLWJpenRvc2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMg +ZWxmb2dhZGFzYW5hayBmZWx0ZXRlbGUgYXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVs +amFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFzIGxlaXJhc2EgbWVndGFsYWxoYXRv +IGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBhIGh0dHBzOi8vd3d3 +Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVub3J6 +ZXNAbmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1 +YW5jZSBhbmQgdGhlIHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3Qg +dG8gdGhlIE5ldExvY2sgQ1BTIGF2YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRs +b2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFpbCBhdCBjcHNAbmV0bG9jay5uZXQuMA0G +CSqGSIb3DQEBBAUAA4IBAQBIJEb3ulZv+sgoA0BO5TE5ayZrU3/b39/zcT0mwBQO +xmd7I6gMc90Bu8bKbjc5VdXHjFYgDigKDtIqpLBJUsY4B/6+CgmM0ZjPytoUMaFP +0jn8DxEsQ8Pdq5PHVT5HfBgaANzze9jyf1JsIPQLX2lS9O74silg6+NJMSEN1rUQ +QeJBCWziGppWS3cC9qCbmieH6FUpccKQn0V4GuEVZD3QDtigdp+uxdAu6tYPVuxk +f1qbFFgBJ34TUMdrKuZoPL9coAob4Q566eKAw+np9v1sEZ7Q5SgnK1QyQhSCdeZK +8CtmdWOMovsEPoMOmzbwGOQmIMOM8CgHrTwXZoi1/baI +-----END CERTIFICATE----- + +# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Label: "XRamp Global CA Root" +# Serial: 107108908803651509692980124233745014957 +# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1 +# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6 +# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2 +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB +gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk +MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY +UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx +NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3 +dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy +dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6 +38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP +KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q +DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4 +qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa +JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi +PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P +BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs +jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0 +eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR +vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa +IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy +i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ +O+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Label: "Go Daddy Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67 +# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4 +# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4 +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh +MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE +YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3 +MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo +ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg +MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN +ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA +PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w +wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi +EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY +avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+ +YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE +sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h +/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5 +IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy +OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P +TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER +dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf +ReYNnyicsbkqWletNw+vHX/bvZ8= +-----END CERTIFICATE----- + +# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Label: "Starfield Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24 +# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a +# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58 +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing +# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing +# Label: "StartCom Certification Authority" +# Serial: 1 +# MD5 Fingerprint: 22:4d:8f:8a:fc:f7:35:c2:bb:57:34:90:7b:8b:22:16 +# SHA1 Fingerprint: 3e:2b:f7:f2:03:1b:96:f3:8c:e6:c4:d8:a8:5d:3e:2d:58:47:6a:0f +# SHA256 Fingerprint: c7:66:a9:be:f2:d4:07:1c:86:3a:31:aa:49:20:e8:13:b2:d1:98:60:8c:b7:b7:cf:e2:11:43:b8:36:df:09:ea +-----BEGIN CERTIFICATE----- +MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW +MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg +Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9 +MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi +U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh +cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk +pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf +OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C +Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT +Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi +HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM +Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w ++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ +Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 +Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B +26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID +AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE +FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j +ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js +LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM +BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0 +Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy +dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh +cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh +YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg +dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp +bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ +YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT +TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ +9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8 +jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW +FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz +ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1 +ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L +EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu +L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq +yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC +O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V +um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh +NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14= +-----END CERTIFICATE----- + +# Issuer: O=Government Root Certification Authority +# Subject: O=Government Root Certification Authority +# Label: "Taiwan GRCA" +# Serial: 42023070807708724159991140556527066870 +# MD5 Fingerprint: 37:85:44:53:32:45:1f:20:f0:f3:95:e1:25:c4:43:4e +# SHA1 Fingerprint: f4:8b:11:bf:de:ab:be:94:54:20:71:e6:41:de:6b:be:88:2b:40:b9 +# SHA256 Fingerprint: 76:00:29:5e:ef:e8:5b:9e:1f:d6:24:db:76:06:2a:aa:ae:59:81:8a:54:d2:77:4c:d4:c0:b2:c0:11:31:e1:b3 +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/ +MQswCQYDVQQGEwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5MB4XDTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1ow +PzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dvdmVybm1lbnQgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +AJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qNw8XR +IePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1q +gQdW8or5BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKy +yhwOeYHWtXBiCAEuTk8O1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAts +F/tnyMKtsc2AtJfcdgEWFelq16TheEfOhtX7MfP6Mb40qij7cEwdScevLJ1tZqa2 +jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wovJ5pGfaENda1UhhXcSTvx +ls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7Q3hub/FC +VGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHK +YS1tB6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoH +EgKXTiCQ8P8NHuJBO9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThN +Xo+EHWbNxWCWtFJaBYmOlXqYwZE8lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1Ud +DgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNVHRMEBTADAQH/MDkGBGcqBwAE +MTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg209yewDL7MTqK +UWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyf +qzvS/3WXy6TjZwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaK +ZEk9GhiHkASfQlK3T8v+R0F2Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFE +JPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlUD7gsL0u8qV1bYH+Mh6XgUmMqvtg7 +hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6QzDxARvBMB1uUO07+1 +EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+HbkZ6Mm +nD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WX +udpVBrkk7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44Vbnz +ssQwmSNOXfJIoRIM3BKQCZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDe +LMDDav7v3Aun+kbfYNucpllQdSNpc5Oy+fwC00fmcc4QAu4njIT/rEUNE1yDMuAl +pYYsfPQS +-----END CERTIFICATE----- + +# Issuer: CN=Swisscom Root CA 1 O=Swisscom OU=Digital Certificate Services +# Subject: CN=Swisscom Root CA 1 O=Swisscom OU=Digital Certificate Services +# Label: "Swisscom Root CA 1" +# Serial: 122348795730808398873664200247279986742 +# MD5 Fingerprint: f8:38:7c:77:88:df:2c:16:68:2e:c2:e2:52:4b:b8:f9 +# SHA1 Fingerprint: 5f:3a:fc:0a:8b:64:f6:86:67:34:74:df:7e:a9:a2:fe:f9:fa:7a:51 +# SHA256 Fingerprint: 21:db:20:12:36:60:bb:2e:d4:18:20:5d:a1:1e:e7:a8:5a:65:e2:bc:6e:55:b5:af:7e:78:99:c8:a2:66:d9:2e +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBk +MQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0 +YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3Qg +Q0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4MTgyMjA2MjBaMGQxCzAJBgNVBAYT +AmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGlnaXRhbCBDZXJ0aWZp +Y2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9 +m2BtRsiMMW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdih +FvkcxC7mlSpnzNApbjyFNDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/ +TilftKaNXXsLmREDA/7n29uj/x2lzZAeAR81sH8A25Bvxn570e56eqeqDFdvpG3F +EzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkCb6dJtDZd0KTeByy2dbco +kdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn7uHbHaBu +HYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNF +vJbNcA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo +19AOeCMgkckkKmUpWyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjC +L3UcPX7ape8eYIVpQtPM+GP+HkM5haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJW +bjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNYMUJDLXT5xp6mig/p/r+D5kNX +JLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0hBBYw +FDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j +BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzc +K6FptWfUjNP9MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzf +ky9NfEBWMXrrpA9gzXrzvsMnjgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7Ik +Vh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQMbFamIp1TpBcahQq4FJHgmDmHtqB +sfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4HVtA4oJVwIHaM190e +3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtlvrsR +ls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ip +mXeascClOS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HH +b6D0jqTsNFFbjCYDcKF31QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksf +rK/7DZBaZmBwXarNeNQk7shBoJMBkpxqnvy5JMWzFYJ+vq6VK+uxwNrjAWALXmms +hFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCyx/yP2FS1k2Kdzs9Z+z0Y +zirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMWNY6E0F/6 +MBr1mmz0DlP5OlvRHA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root CA" +# Serial: 17154717934120587862167794914071425081 +# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72 +# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43 +# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root CA" +# Serial: 10944719598952040374951832963794454346 +# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e +# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36 +# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61 +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert High Assurance EV Root CA" +# Serial: 3553400076410547919724730734378100087 +# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a +# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25 +# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- + +# Issuer: CN=Class 2 Primary CA O=Certplus +# Subject: CN=Class 2 Primary CA O=Certplus +# Label: "Certplus Class 2 Primary CA" +# Serial: 177770208045934040241468760488327595043 +# MD5 Fingerprint: 88:2c:8c:52:b8:a2:3c:f3:f7:bb:03:ea:ae:ac:42:0b +# SHA1 Fingerprint: 74:20:74:41:72:9c:dd:92:ec:79:31:d8:23:10:8d:c2:81:92:e2:bb +# SHA256 Fingerprint: 0f:99:3c:8a:ef:97:ba:af:56:87:14:0e:d5:9a:d1:82:1b:b4:af:ac:f0:aa:9a:58:b5:d5:7a:33:8a:3a:fb:cb +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw +PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz +cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9 +MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz +IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ +ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR +VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL +kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd +EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas +H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0 +HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud +DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4 +QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu +Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/ +AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8 +yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR +FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA +ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB +kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- + +# Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co. +# Subject: CN=DST Root CA X3 O=Digital Signature Trust Co. +# Label: "DST Root CA X3" +# Serial: 91299735575339953335919266965803778155 +# MD5 Fingerprint: 41:03:52:dc:0f:f7:50:1b:16:f0:02:8e:ba:6f:45:c5 +# SHA1 Fingerprint: da:c9:02:4f:54:d8:f6:df:94:93:5f:b1:73:26:38:ca:6a:d7:7c:13 +# SHA256 Fingerprint: 06:87:26:03:31:a7:24:03:d9:09:f1:05:e6:9b:cf:0d:32:e1:bd:24:93:ff:c6:d9:20:6d:11:bc:d6:77:07:39 +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/ +MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT +DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow +PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD +Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O +rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq +OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b +xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw +7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD +aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG +SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69 +ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr +AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz +R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5 +JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo +Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +# Issuer: CN=DST ACES CA X6 O=Digital Signature Trust OU=DST ACES +# Subject: CN=DST ACES CA X6 O=Digital Signature Trust OU=DST ACES +# Label: "DST ACES CA X6" +# Serial: 17771143917277623872238992636097467865 +# MD5 Fingerprint: 21:d8:4c:82:2b:99:09:33:a2:eb:14:24:8d:8e:5f:e8 +# SHA1 Fingerprint: 40:54:da:6f:1c:3f:40:74:ac:ed:0f:ec:cd:db:79:d1:53:fb:90:1d +# SHA256 Fingerprint: 76:7c:95:5a:76:41:2c:89:af:68:8e:90:a1:c7:0f:55:6c:fd:6b:60:25:db:ea:10:41:6d:7e:b6:83:1f:8c:40 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBb +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3Qx +ETAPBgNVBAsTCERTVCBBQ0VTMRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0w +MzExMjAyMTE5NThaFw0xNzExMjAyMTE5NThaMFsxCzAJBgNVBAYTAlVTMSAwHgYD +VQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UECxMIRFNUIEFDRVMx +FzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPu +ktKe1jzIDZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7 +gLFViYsx+tC3dr5BPTCapCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZH +fAjIgrrep4c9oW24MFbCswKBXy314powGCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4a +ahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPyMjwmR/onJALJfh1biEIT +ajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1UdEwEB/wQF +MAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rk +c3QuY29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjto +dHRwOi8vd3d3LnRydXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMt +aW5kZXguaHRtbDAdBgNVHQ4EFgQUCXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZI +hvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V25FYrnJmQ6AgwbN99Pe7lv7Uk +QIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6tFr8hlxCBPeP/ +h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq +nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpR +rscL9yuwNwXsvFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf2 +9w4LTJxoeHtxMcfrHuBnQfO3oKfN5XozNmr6mis= +-----END CERTIFICATE----- + +# Issuer: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=(c) 2005 TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. +# Subject: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=(c) 2005 TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. +# Label: "TURKTRUST Certificate Services Provider Root 1" +# Serial: 1 +# MD5 Fingerprint: f1:6a:22:18:c9:cd:df:ce:82:1d:1d:b7:78:5c:a9:a5 +# SHA1 Fingerprint: 79:98:a3:08:e1:4d:65:85:e6:c2:1e:15:3a:71:9f:ba:5a:d3:4a:d9 +# SHA256 Fingerprint: 44:04:e3:3b:5e:14:0d:cf:99:80:51:fd:fc:80:28:c7:c8:16:15:c5:ee:73:7b:11:1b:58:82:33:a9:b5:35:a0 +-----BEGIN CERTIFICATE----- +MIID+zCCAuOgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBtzE/MD0GA1UEAww2VMOc +UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx +c8SxMQswCQYDVQQGDAJUUjEPMA0GA1UEBwwGQU5LQVJBMVYwVAYDVQQKDE0oYykg +MjAwNSBUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8 +dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjAeFw0wNTA1MTMxMDI3MTdaFw0xNTAz +MjIxMDI3MTdaMIG3MT8wPQYDVQQDDDZUw5xSS1RSVVNUIEVsZWt0cm9uaWsgU2Vy +dGlmaWthIEhpem1ldCBTYcSfbGF5xLFjxLFzxLExCzAJBgNVBAYMAlRSMQ8wDQYD +VQQHDAZBTktBUkExVjBUBgNVBAoMTShjKSAyMDA1IFTDnFJLVFJVU1QgQmlsZ2kg +xLBsZXRpxZ9pbSB2ZSBCaWxpxZ9pbSBHw7x2ZW5sacSfaSBIaXptZXRsZXJpIEEu +xZ4uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAylIF1mMD2Bxf3dJ7 +XfIMYGFbazt0K3gNfUW9InTojAPBxhEqPZW8qZSwu5GXyGl8hMW0kWxsE2qkVa2k +heiVfrMArwDCBRj1cJ02i67L5BuBf5OI+2pVu32Fks66WJ/bMsW9Xe8iSi9BB35J +YbOG7E6mQW6EvAPs9TscyB/C7qju6hJKjRTP8wrgUDn5CDX4EVmt5yLqS8oUBt5C +urKZ8y1UiBAG6uEaPj1nH/vO+3yC6BFdSsG5FOpU2WabfIl9BJpiyelSPJ6c79L1 +JuTm5Rh8i27fbMx4W09ysstcP4wFjdFMjK2Sx+F4f2VsSQZQLJ4ywtdKxnWKWU51 +b0dewQIDAQABoxAwDjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAV +9VX/N5aAWSGk/KEVTCD21F/aAyT8z5Aa9CEKmu46sWrv7/hg0Uw2ZkUd82YCdAR7 +kjCo3gp2D++Vbr3JN+YaDayJSFvMgzbC9UZcWYJWtNX+I7TYVBxEq8Sn5RTOPEFh +fEPmzcSBCYsk+1Ql1haolgxnB2+zUEfjHCQo3SqYpGH+2+oSN7wBGjSFvW5P55Fy +B0SFHljKVETd96y5y4khctuPwGkplyqjrhgjlxxBKot8KsF8kOipKMDTkcatKIdA +aLX/7KfS0zgYnNN9aV3wxqUeJBujR/xpB2jn5Jq07Q+hh4cCzofSSE7hvP/L8XKS +RGQDJereW26fyfJOrN3H +-----END CERTIFICATE----- + +# Issuer: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Kasım 2005 +# Subject: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Kasım 2005 +# Label: "TURKTRUST Certificate Services Provider Root 2" +# Serial: 1 +# MD5 Fingerprint: 37:a5:6e:d4:b1:25:84:97:b7:fd:56:15:7a:f9:a2:00 +# SHA1 Fingerprint: b4:35:d4:e1:11:9d:1c:66:90:a7:49:eb:b3:94:bd:63:7b:a7:82:b7 +# SHA256 Fingerprint: c4:70:cf:54:7e:23:02:b9:77:fb:29:dd:71:a8:9a:7b:6c:1f:60:77:7b:03:29:f5:60:17:f3:28:bf:4f:6b:e6 +-----BEGIN CERTIFICATE----- +MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOc +UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx +c8SxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xS +S1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kg +SGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcNMDUxMTA3MTAwNzU3 +WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVrdHJv +bmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJU +UjEPMA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSw +bGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWe +LiAoYykgS2FzxLFtIDIwMDUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqeLCDe2JAOCtFp0if7qnef +J1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKIx+XlZEdh +R3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJ +Qv2gQrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGX +JHpsmxcPbe9TmJEr5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1p +zpwACPI2/z7woQ8arBT9pmAPAgMBAAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58S +Fq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8GA1UdEwEB/wQFMAMBAf8wDQYJ +KoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/nttRbj2hWyfIvwq +ECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4 +Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFz +gw2lGh1uEpJ+hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotH +uFEJjOp9zYhys2AzsfAKRO8P9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LS +y3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5UrbnBEI= +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Label: "SwissSign Gold CA - G2" +# Serial: 13492815561806991280 +# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93 +# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61 +# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95 +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln +biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF +MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT +d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8 +76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+ +bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c +6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE +emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd +MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt +MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y +MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y +FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi +aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM +gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB +qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7 +lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn +8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6 +45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO +UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5 +O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC +bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv +GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a +77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC +hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3 +92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp +Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w +ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt +Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG +# Label: "SwissSign Silver CA - G2" +# Serial: 5700383053117599563 +# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13 +# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb +# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5 +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu +IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow +RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY +U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv +Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br +YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF +nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH +6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt +eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/ +c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ +MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH +HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf +jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6 +5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB +rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU +F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c +wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB +AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp +WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9 +xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ +2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ +IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8 +aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X +em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR +dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/ +OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+ +hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy +tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc. +# Subject: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc. +# Label: "GeoTrust Primary Certification Authority" +# Serial: 32798226551256963324313806436981982369 +# MD5 Fingerprint: 02:26:c3:01:5e:08:30:37:43:a9:d0:7d:cf:37:e6:bf +# SHA1 Fingerprint: 32:3c:11:8e:1b:f7:b8:b6:52:54:e2:e2:10:0d:d6:02:90:37:f0:96 +# SHA256 Fingerprint: 37:d5:10:06:c5:12:ea:ab:62:64:21:f1:ec:8c:92:01:3f:c5:f8:2a:e9:8e:e5:33:eb:46:19:b8:de:b4:d0:6c +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY +MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo +R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx +MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9 +AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA +ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0 +7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W +kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI +mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ +KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1 +6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl +4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K +oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj +UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU +AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA" +# Serial: 69529181992039203566298953787712940909 +# MD5 Fingerprint: 8c:ca:dc:0b:22:ce:f5:be:72:ac:41:1a:11:a8:d8:12 +# SHA1 Fingerprint: 91:c6:d6:ee:3e:8a:c8:63:84:e5:48:c2:99:29:5c:75:6c:81:7b:81 +# SHA256 Fingerprint: 8d:72:2f:81:a9:c1:13:c0:79:1d:f1:36:a2:96:6d:b2:6c:95:0a:97:1d:b4:6b:41:99:f4:ea:54:b7:8b:fb:9f +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB +qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV +BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw +NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j +LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG +A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs +W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta +3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk +6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6 +Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J +NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP +r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU +DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz +YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2 +/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/ +LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7 +jVaMaA== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Class 3 Public Primary Certification Authority - G5" +# Serial: 33037644167568058970164719475676101450 +# MD5 Fingerprint: cb:17:e4:31:67:3e:e2:09:fe:45:57:93:f3:0a:fa:1c +# SHA1 Fingerprint: 4e:b6:d5:78:49:9b:1c:cf:5f:58:1e:ad:56:be:3d:9b:67:44:a5:e5 +# SHA256 Fingerprint: 9a:cf:ab:7e:43:c8:d8:80:d0:6b:26:2a:94:de:ee:e4:b4:65:99:89:c3:d0:ca:f1:9b:af:64:05:e4:1a:b7:df +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW +ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1 +nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex +t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz +SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG +BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+ +rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/ +NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E +BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH +BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv +MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE +p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y +5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK +WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ +4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N +hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +# Issuer: CN=SecureTrust CA O=SecureTrust Corporation +# Subject: CN=SecureTrust CA O=SecureTrust Corporation +# Label: "SecureTrust CA" +# Serial: 17199774589125277788362757014266862032 +# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1 +# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11 +# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73 +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz +MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv +cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz +Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO +0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao +wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj +7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS +8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT +BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg +JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3 +6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/ +3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm +D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS +CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +# Issuer: CN=Secure Global CA O=SecureTrust Corporation +# Subject: CN=Secure Global CA O=SecureTrust Corporation +# Label: "Secure Global CA" +# Serial: 9751836167731051554232119481456978597 +# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de +# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b +# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69 +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx +MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg +Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ +iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa +/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ +jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI +HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7 +sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w +gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw +KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG +AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L +URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO +H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm +I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY +iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO Certification Authority O=COMODO CA Limited +# Label: "COMODO Certification Authority" +# Serial: 104350513648249232941998508985834464573 +# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75 +# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b +# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66 +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw +MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW +/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g +PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY +SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv +IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 +zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd +BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB +ZQ== +-----END CERTIFICATE----- + +# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. +# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. +# Label: "Network Solutions Certificate Authority" +# Serial: 116697915152937497490437556386812487904 +# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e +# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce +# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi +MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp +dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV +UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO +ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz +c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP +OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl +mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF +BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4 +qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw +gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu +bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp +dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8 +6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/ +h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH +/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN +pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +# Issuer: CN=WellsSecure Public Root Certificate Authority O=Wells Fargo WellsSecure OU=Wells Fargo Bank NA +# Subject: CN=WellsSecure Public Root Certificate Authority O=Wells Fargo WellsSecure OU=Wells Fargo Bank NA +# Label: "WellsSecure Public Root Certificate Authority" +# Serial: 1 +# MD5 Fingerprint: 15:ac:a5:c2:92:2d:79:bc:e8:7f:cb:67:ed:02:cf:36 +# SHA1 Fingerprint: e7:b4:f6:9d:61:ec:90:69:db:7e:90:a7:40:1a:3c:f4:7d:4f:e8:ee +# SHA256 Fingerprint: a7:12:72:ae:aa:a3:cf:e8:72:7f:7f:b3:9f:0f:b3:d1:e5:42:6e:90:60:b0:6e:e6:f1:3e:9a:3c:58:33:cd:43 +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMCVVMx +IDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxs +cyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9v +dCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDcxMjEzMTcwNzU0WhcNMjIxMjE0 +MDAwNzU0WjCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdl +bGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQD +DC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDub7S9eeKPCCGeOARBJe+r +WxxTkqxtnt3CxC5FlAM1iGd0V+PfjLindo8796jE2yljDpFoNoqXjopxaAkH5OjU +Dk/41itMpBb570OYj7OeUt9tkTmPOL13i0Nj67eT/DBMHAGTthP796EfvyXhdDcs +HqRePGj4S78NuR4uNuip5Kf4D8uCdXw1LSLWwr8L87T8bJVhHlfXBIEyg1J55oNj +z7fLY4sR4r1e6/aN7ZVyKLSsEmLpSjPmgzKuBXWVvYSV2ypcm44uDLiBK0HmOFaf +SZtsdvqKXfcBeYF8wYNABf5x/Qw/zE5gCQ5lRxAvAcAFP4/4s0HvWkJ+We/Slwxl +AgMBAAGjggE0MIIBMDAPBgNVHRMBAf8EBTADAQH/MDkGA1UdHwQyMDAwLqAsoCqG +KGh0dHA6Ly9jcmwucGtpLndlbGxzZmFyZ28uY29tL3dzcHJjYS5jcmwwDgYDVR0P +AQH/BAQDAgHGMB0GA1UdDgQWBBQmlRkQ2eihl5H/3BnZtQQ+0nMKajCBsgYDVR0j +BIGqMIGngBQmlRkQ2eihl5H/3BnZtQQ+0nMKaqGBi6SBiDCBhTELMAkGA1UEBhMC +VVMxIDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNX +ZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHmCAQEwDQYJKoZIhvcNAQEFBQADggEB +ALkVsUSRzCPIK0134/iaeycNzXK7mQDKfGYZUMbVmO2rvwNa5U3lHshPcZeG1eMd +/ZDJPHV3V3p9+N701NX3leZ0bh08rnyd2wIDBSxxSyU+B+NemvVmFymIGjifz6pB +A4SXa5M4esowRBskRDPQ5NHcKDj0E0M1NSljqHyita04pO2t/caaH/+Xc/77szWn +k4bGdpEA5qxRFsQnMlzbc9qlk1eOPm01JghZ1edE13YgY+esE2fDbbFwRnzVlhE9 +iW9dqKHrjQrawx0zbKPqZxmamX9LPYNRKh3KL4YMon4QLSvUFpULB6ouFJJJtylv +2G0xffX8oRAHh84vWdw+WNs= +-----END CERTIFICATE----- + +# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Label: "COMODO ECC Certification Authority" +# Serial: 41578283867086692638256921589707938090 +# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23 +# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11 +# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7 +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +# Issuer: CN=IGC/A O=PM/SGDN OU=DCSSI +# Subject: CN=IGC/A O=PM/SGDN OU=DCSSI +# Label: "IGC/A" +# Serial: 245102874772 +# MD5 Fingerprint: 0c:7f:dd:6a:f4:2a:b9:c8:9b:bd:20:7e:a9:db:5c:37 +# SHA1 Fingerprint: 60:d6:89:74:b5:c2:65:9e:8a:0f:c1:88:7c:88:d2:46:69:1b:18:2c +# SHA256 Fingerprint: b9:be:a7:86:0a:96:2e:a3:61:1d:ab:97:ab:6d:a3:e2:1c:10:68:b9:7d:55:57:5e:d0:e1:12:79:c1:1c:89:32 +-----BEGIN CERTIFICATE----- +MIIEAjCCAuqgAwIBAgIFORFFEJQwDQYJKoZIhvcNAQEFBQAwgYUxCzAJBgNVBAYT +AkZSMQ8wDQYDVQQIEwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQ +TS9TR0ROMQ4wDAYDVQQLEwVEQ1NTSTEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG +9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZyMB4XDTAyMTIxMzE0MjkyM1oXDTIw +MTAxNzE0MjkyMlowgYUxCzAJBgNVBAYTAkZSMQ8wDQYDVQQIEwZGcmFuY2UxDjAM +BgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVEQ1NTSTEO +MAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2 +LmZyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsh/R0GLFMzvABIaI +s9z4iPf930Pfeo2aSVz2TqrMHLmh6yeJ8kbpO0px1R2OLc/mratjUMdUC24SyZA2 +xtgv2pGqaMVy/hcKshd+ebUyiHDKcMCWSo7kVc0dJ5S/znIq7Fz5cyD+vfcuiWe4 +u0dzEvfRNWk68gq5rv9GQkaiv6GFGvm/5P9JhfejcIYyHF2fYPepraX/z9E0+X1b +F8bc1g4oa8Ld8fUzaJ1O/Id8NhLWo4DoQw1VYZTqZDdH6nfK0LJYBcNdfrGoRpAx +Vs5wKpayMLh35nnAvSk7/ZR3TL0gzUEl4C7HG7vupARB0l2tEmqKm0f7yd1GQOGd +PDPQtQIDAQABo3cwdTAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBRjAVBgNV +HSAEDjAMMAoGCCqBegF5AQEBMB0GA1UdDgQWBBSjBS8YYFDCiQrdKyFP/45OqDAx +NjAfBgNVHSMEGDAWgBSjBS8YYFDCiQrdKyFP/45OqDAxNjANBgkqhkiG9w0BAQUF +AAOCAQEABdwm2Pp3FURo/C9mOnTgXeQp/wYHE4RKq89toB9RlPhJy3Q2FLwV3duJ +L92PoF189RLrn544pEfMs5bZvpwlqwN+Mw+VgQ39FuCIvjfwbF3QMZsyK10XZZOY +YLxuj7GoPB7ZHPOpJkL5ZB3C55L29B5aqhlSXa/oovdgoPaN8In1buAKBQGVyYsg +Crpa/JosPL3Dt8ldeCUFP1YUmwza+zpI/pdpXsoQhvdOlgQITeywvl3cO45Pwf2a +NjSaTFR+FwNIlQgRHAdvhQh+XU3Endv7rs6y0bO4g2wdsrN58dhwmX7wEwLOXt1R +0982gaEbeC9xs/FZTEYYKKuF0mBWWg== +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication EV RootCA1 +# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication EV RootCA1 +# Label: "Security Communication EV RootCA1" +# Serial: 0 +# MD5 Fingerprint: 22:2d:a6:01:ea:7c:0a:f7:f0:6c:56:43:3f:77:76:d3 +# SHA1 Fingerprint: fe:b8:c4:32:dc:f9:76:9a:ce:ae:3d:d8:90:8f:fd:28:86:65:64:7d +# SHA256 Fingerprint: a2:2d:ba:68:1e:97:37:6e:2d:39:7d:72:8a:ae:3a:9b:62:96:b9:fd:ba:60:bc:2e:11:f6:47:f2:c6:75:fb:37 +-----BEGIN CERTIFICATE----- +MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMh +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIz +MloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09N +IFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNlY3VyaXR5IENvbW11 +bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSE +RMqm4miO/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gO +zXppFodEtZDkBp2uoQSXWHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5 +bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4zZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDF +MxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4bepJz11sS6/vmsJWXMY1 +VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK9U2vP9eC +OKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G +CSqGSIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HW +tWS3irO4G8za+6xmiEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZ +q51ihPZRwSzJIxXYKLerJRO1RuGGAv8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDb +EJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnWmHyojf6GPgcWkuF75x3sM3Z+ +Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEWT1MKZPlO9L9O +VL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GA CA" +# Serial: 86718877871133159090080555911823548314 +# MD5 Fingerprint: bc:6c:51:33:a7:e9:d3:66:63:54:15:72:1b:21:92:93 +# SHA1 Fingerprint: 59:22:a1:e1:5a:ea:16:35:21:f8:98:39:6a:46:46:b0:44:1b:0f:a9 +# SHA256 Fingerprint: 41:c9:23:86:6a:b4:ca:d6:b7:ad:57:80:81:58:2e:02:07:97:a6:cb:df:4f:ff:78:ce:83:96:b3:89:37:d7:f5 +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB +ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly +aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl +ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w +NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G +A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD +VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX +SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR +VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2 +w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF +mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg +4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9 +4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw +EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx +SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2 +ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8 +vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi +Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ +/L7fCg0= +-----END CERTIFICATE----- + +# Issuer: CN=Microsec e-Szigno Root CA O=Microsec Ltd. OU=e-Szigno CA +# Subject: CN=Microsec e-Szigno Root CA O=Microsec Ltd. OU=e-Szigno CA +# Label: "Microsec e-Szigno Root CA" +# Serial: 272122594155480254301341951808045322001 +# MD5 Fingerprint: f0:96:b6:2f:c5:10:d5:67:8e:83:25:32:e8:5e:2e:e5 +# SHA1 Fingerprint: 23:88:c9:d3:71:cc:9e:96:3d:ff:7d:3c:a7:ce:fc:d6:25:ec:19:0d +# SHA256 Fingerprint: 32:7a:3d:76:1a:ba:de:a0:34:eb:99:84:06:27:5c:b1:a4:77:6e:fd:ae:2f:df:6d:01:68:ea:1c:4f:55:67:d0 +-----BEGIN CERTIFICATE----- +MIIHqDCCBpCgAwIBAgIRAMy4579OKRr9otxmpRwsDxEwDQYJKoZIhvcNAQEFBQAw +cjELMAkGA1UEBhMCSFUxETAPBgNVBAcTCEJ1ZGFwZXN0MRYwFAYDVQQKEw1NaWNy +b3NlYyBMdGQuMRQwEgYDVQQLEwtlLVN6aWdubyBDQTEiMCAGA1UEAxMZTWljcm9z +ZWMgZS1Temlnbm8gUm9vdCBDQTAeFw0wNTA0MDYxMjI4NDRaFw0xNzA0MDYxMjI4 +NDRaMHIxCzAJBgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVzdDEWMBQGA1UEChMN +TWljcm9zZWMgTHRkLjEUMBIGA1UECxMLZS1Temlnbm8gQ0ExIjAgBgNVBAMTGU1p +Y3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDtyADVgXvNOABHzNuEwSFpLHSQDCHZU4ftPkNEU6+r+ICbPHiN1I2u +uO/TEdyB5s87lozWbxXGd36hL+BfkrYn13aaHUM86tnsL+4582pnS4uCzyL4ZVX+ +LMsvfUh6PXX5qqAnu3jCBspRwn5mS6/NoqdNAoI/gqyFxuEPkEeZlApxcpMqyabA +vjxWTHOSJ/FrtfX9/DAFYJLG65Z+AZHCabEeHXtTRbjcQR/Ji3HWVBTji1R4P770 +Yjtb9aPs1ZJ04nQw7wHb4dSrmZsqa/i9phyGI0Jf7Enemotb9HI6QMVJPqW+jqpx +62z69Rrkav17fVVA71hu5tnVvCSrwe+3AgMBAAGjggQ3MIIEMzBnBggrBgEFBQcB +AQRbMFkwKAYIKwYBBQUHMAGGHGh0dHBzOi8vcmNhLmUtc3ppZ25vLmh1L29jc3Aw +LQYIKwYBBQUHMAKGIWh0dHA6Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNydDAP +BgNVHRMBAf8EBTADAQH/MIIBcwYDVR0gBIIBajCCAWYwggFiBgwrBgEEAYGoGAIB +AQEwggFQMCgGCCsGAQUFBwIBFhxodHRwOi8vd3d3LmUtc3ppZ25vLmh1L1NaU1ov +MIIBIgYIKwYBBQUHAgIwggEUHoIBEABBACAAdABhAG4A+gBzAO0AdAB2AOEAbgB5 +ACAA6QByAHQAZQBsAG0AZQB6AOkAcwDpAGgAZQB6ACAA6QBzACAAZQBsAGYAbwBn +AGEAZADhAHMA4QBoAG8AegAgAGEAIABTAHoAbwBsAGcA4QBsAHQAYQB0APMAIABT +AHoAbwBsAGcA4QBsAHQAYQB0AOEAcwBpACAAUwB6AGEAYgDhAGwAeQB6AGEAdABh +ACAAcwB6AGUAcgBpAG4AdAAgAGsAZQBsAGwAIABlAGwAagDhAHIAbgBpADoAIABo +AHQAdABwADoALwAvAHcAdwB3AC4AZQAtAHMAegBpAGcAbgBvAC4AaAB1AC8AUwBa +AFMAWgAvMIHIBgNVHR8EgcAwgb0wgbqggbeggbSGIWh0dHA6Ly93d3cuZS1zemln +bm8uaHUvUm9vdENBLmNybIaBjmxkYXA6Ly9sZGFwLmUtc3ppZ25vLmh1L0NOPU1p +Y3Jvc2VjJTIwZS1Temlnbm8lMjBSb290JTIwQ0EsT1U9ZS1Temlnbm8lMjBDQSxP +PU1pY3Jvc2VjJTIwTHRkLixMPUJ1ZGFwZXN0LEM9SFU/Y2VydGlmaWNhdGVSZXZv +Y2F0aW9uTGlzdDtiaW5hcnkwDgYDVR0PAQH/BAQDAgEGMIGWBgNVHREEgY4wgYuB +EGluZm9AZS1zemlnbm8uaHWkdzB1MSMwIQYDVQQDDBpNaWNyb3NlYyBlLVN6aWdu +w7MgUm9vdCBDQTEWMBQGA1UECwwNZS1TemlnbsOzIEhTWjEWMBQGA1UEChMNTWlj +cm9zZWMgS2Z0LjERMA8GA1UEBxMIQnVkYXBlc3QxCzAJBgNVBAYTAkhVMIGsBgNV +HSMEgaQwgaGAFMegSXUWYYTbMUuE0vE3QJDvTtz3oXakdDByMQswCQYDVQQGEwJI +VTERMA8GA1UEBxMIQnVkYXBlc3QxFjAUBgNVBAoTDU1pY3Jvc2VjIEx0ZC4xFDAS +BgNVBAsTC2UtU3ppZ25vIENBMSIwIAYDVQQDExlNaWNyb3NlYyBlLVN6aWdubyBS +b290IENBghEAzLjnv04pGv2i3GalHCwPETAdBgNVHQ4EFgQUx6BJdRZhhNsxS4TS +8TdAkO9O3PcwDQYJKoZIhvcNAQEFBQADggEBANMTnGZjWS7KXHAM/IO8VbH0jgds +ZifOwTsgqRy7RlRw7lrMoHfqaEQn6/Ip3Xep1fvj1KcExJW4C+FEaGAHQzAxQmHl +7tnlJNUb3+FKG6qfx1/4ehHqE5MAyopYse7tDk2016g2JnzgOsHVV4Lxdbb9iV/a +86g4nzUGCM4ilb7N1fy+W955a9x6qWVmvrElWl/tftOsRm1M9DKHtCAE4Gx4sHfR +hUZLphK3dehKyVZs15KrnfVJONJPU+NVkBHbmJbGSfI+9J8b4PeI3CVimUTYc78/ +MPMMNz7UwiiAc7EBt51alhQBS6kRnSlqLtBdgcDPsiBDxwPgN05dCtxZICU= +-----END CERTIFICATE----- + +# Issuer: CN=Certigna O=Dhimyotis +# Subject: CN=Certigna O=Dhimyotis +# Label: "Certigna" +# Serial: 18364802974209362175 +# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff +# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97 +# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV +BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X +DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ +BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4 +QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny +gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw +zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q +130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2 +JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw +ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT +AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj +AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG +9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h +bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc +fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu +HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w +t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +# Issuer: CN=TC TrustCenter Class 2 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 2 CA +# Subject: CN=TC TrustCenter Class 2 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 2 CA +# Label: "TC TrustCenter Class 2 CA II" +# Serial: 941389028203453866782103406992443 +# MD5 Fingerprint: ce:78:33:5c:59:78:01:6e:18:ea:b9:36:a0:b9:2e:23 +# SHA1 Fingerprint: ae:50:83:ed:7c:f4:5c:bc:8f:61:c6:21:fe:68:5d:79:42:21:15:6e +# SHA256 Fingerprint: e6:b8:f8:76:64:85:f8:07:ae:7f:8d:ac:16:70:46:1f:07:c0:a1:3e:ef:3a:1f:f7:17:53:8d:7a:ba:d3:91:b4 +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjEL +MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNV +BAsTGVRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0 +Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYwMTEyMTQzODQzWhcNMjUxMjMxMjI1 +OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIgR21i +SDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UEAxMc +VEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jf +tMjWQ+nEdVl//OEd+DFwIxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKg +uNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2J +XjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQXa7pIXSSTYtZgo+U4+lK +8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7uSNQZu+99 +5OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3 +kUrL84J6E1wIqzCB7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRy +dXN0Y2VudGVyLmRlL2NybC92Mi90Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6 +Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBUcnVzdENlbnRlciUyMENsYXNz +JTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21iSCxPVT1yb290 +Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iS +GNn3Bzn1LL4GdXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprt +ZjluS5TmVfwLG4t3wVMTZonZKNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8 +au0WOB9/WIFaGusyiC2y8zl3gK9etmF1KdsjTYjKUCjLhdLTEKJZbtOTVAB6okaV +hgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kPJOzHdiEoZa5X6AeI +dUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfkvQ== +-----END CERTIFICATE----- + +# Issuer: CN=TC TrustCenter Class 3 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 3 CA +# Subject: CN=TC TrustCenter Class 3 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 3 CA +# Label: "TC TrustCenter Class 3 CA II" +# Serial: 1506523511417715638772220530020799 +# MD5 Fingerprint: 56:5f:aa:80:61:12:17:f6:67:21:e6:2b:6d:61:56:8e +# SHA1 Fingerprint: 80:25:ef:f4:6e:70:c8:d4:72:24:65:84:fe:40:3b:8a:8d:6a:db:f5 +# SHA256 Fingerprint: 8d:a0:84:fc:f9:9c:e0:77:22:f8:9b:32:05:93:98:06:fa:5c:b8:11:e1:c8:13:f6:a1:08:c7:d3:36:b3:40:8e +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOSkcAAQAC5aBd1j8AUb8wDQYJKoZIhvcNAQEFBQAwdjEL +MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNV +BAsTGVRDIFRydXN0Q2VudGVyIENsYXNzIDMgQ0ExJTAjBgNVBAMTHFRDIFRydXN0 +Q2VudGVyIENsYXNzIDMgQ0EgSUkwHhcNMDYwMTEyMTQ0MTU3WhcNMjUxMjMxMjI1 +OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIgR21i +SDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQTElMCMGA1UEAxMc +VEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBALTgu1G7OVyLBMVMeRwjhjEQY0NVJz/GRcekPewJDRoeIMJW +Ht4bNwcwIi9v8Qbxq63WyKthoy9DxLCyLfzDlml7forkzMA5EpBCYMnMNWju2l+Q +Vl/NHE1bWEnrDgFPZPosPIlY2C8u4rBo6SI7dYnWRBpl8huXJh0obazovVkdKyT2 +1oQDZogkAHhg8fir/gKya/si+zXmFtGt9i4S5Po1auUZuV3bOx4a+9P/FRQI2Alq +ukWdFHlgfa9Aigdzs5OW03Q0jTo3Kd5c7PXuLjHCINy+8U9/I1LZW+Jk2ZyqBwi1 +Rb3R0DHBq1SfqdLDYmAD8bs5SpJKPQq5ncWg/jcCAwEAAaOCATQwggEwMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTUovyfs8PYA9NX +XAek0CSnwPIA1DCB7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRy +dXN0Y2VudGVyLmRlL2NybC92Mi90Y19jbGFzc18zX2NhX0lJLmNybIaBn2xkYXA6 +Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBUcnVzdENlbnRlciUyMENsYXNz +JTIwMyUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21iSCxPVT1yb290 +Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEANmDkcPcGIEPZIxpC8vijsrlN +irTzwppVMXzEO2eatN9NDoqTSheLG43KieHPOh6sHfGcMrSOWXaiQYUlN6AT0PV8 +TtXqluJucsG7Kv5sbviRmEb8yRtXW+rIGjs/sFGYPAfaLFkB2otE6OF0/ado3VS6 +g0bsyEa1+K+XwDsJHI/OcpY9M1ZwvJbL2NV9IJqDnxrcOfHFcqMRA/07QlIp2+gB +95tejNaNhk4Z+rwcvsUhpYeeeC422wlxo3I0+GzjBgnyXlal092Y+tTmBvTwtiBj +S+opvaqCZh77gaqnN60TGOaSw4HBM7uIHqHn4rS9MWwOUT1v+5ZWgOI2F9Hc5A== +-----END CERTIFICATE----- + +# Issuer: CN=TC TrustCenter Universal CA I O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA +# Subject: CN=TC TrustCenter Universal CA I O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA +# Label: "TC TrustCenter Universal CA I" +# Serial: 601024842042189035295619584734726 +# MD5 Fingerprint: 45:e1:a5:72:c5:a9:36:64:40:9e:f5:e4:58:84:67:8c +# SHA1 Fingerprint: 6b:2f:34:ad:89:58:be:62:fd:b0:6b:5c:ce:bb:9d:d9:4f:4e:39:f3 +# SHA256 Fingerprint: eb:f3:c0:2a:87:89:b1:fb:7d:51:19:95:d6:63:b7:29:06:d9:13:ce:0d:5e:10:56:8a:8a:77:e2:58:61:67:e7 +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTEL +MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNV +BAsTG1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1 +c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcNMDYwMzIyMTU1NDI4WhcNMjUxMjMx +MjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIg +R21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYwJAYD +VQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSR +JJZ4Hgmgm5qVSkr1YnwCqMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3T +fCZdzHd55yx4Oagmcw6iXSVphU9VDprvxrlE4Vc93x9UIuVvZaozhDrzznq+VZeu +jRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtwag+1m7Z3W0hZneTvWq3z +wZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9OgdwZu5GQ +fezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYD +VR0jBBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0G +CSqGSIb3DQEBBQUAA4IBAQAo0uCG1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X1 +7caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/CyvwbZ71q+s2IhtNerNXxTPqYn +8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3ghUJGooWMNjs +ydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT +ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/ +2TYcuiUaUj0a7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY +-----END CERTIFICATE----- + +# Issuer: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center +# Subject: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center +# Label: "Deutsche Telekom Root CA 2" +# Serial: 38 +# MD5 Fingerprint: 74:01:4a:91:b1:08:c4:58:ce:47:cd:f0:dd:11:53:08 +# SHA1 Fingerprint: 85:a4:08:c0:9c:19:3e:5d:51:58:7d:cd:d6:13:30:fd:8c:de:37:bf +# SHA256 Fingerprint: b6:19:1a:50:d0:c3:97:7f:7d:a9:9b:cd:aa:c8:6a:22:7d:ae:b9:67:9e:c7:0b:a3:b0:c9:d9:22:71:c1:70:d3 +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc +MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj +IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB +IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE +RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl +U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290 +IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU +ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC +QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr +rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S +NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc +QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH +txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP +BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC +AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp +tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa +IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl +6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+ +xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- + +# Issuer: CN=ComSign Secured CA O=ComSign +# Subject: CN=ComSign Secured CA O=ComSign +# Label: "ComSign Secured CA" +# Serial: 264725503855295744117309814499492384489 +# MD5 Fingerprint: 40:01:25:06:8d:21:43:6a:0e:43:00:9c:e7:43:f3:d5 +# SHA1 Fingerprint: f9:cd:0e:2c:da:76:24:c1:8f:bd:f0:f0:ab:b6:45:b8:f7:fe:d5:7a +# SHA256 Fingerprint: 50:79:41:c7:44:60:a0:b4:70:86:22:0d:4e:99:32:57:2a:b5:d1:b5:bb:cb:89:80:ab:1c:b1:76:51:a8:44:d2 +-----BEGIN CERTIFICATE----- +MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAw +PDEbMBkGA1UEAxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWdu +MQswCQYDVQQGEwJJTDAeFw0wNDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwx +GzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBDQTEQMA4GA1UEChMHQ29tU2lnbjEL +MAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGtWhf +HZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs49oh +gHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sW +v+bznkqH7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ue +Mv5WJDmyVIRD9YTC2LxBkMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr +9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d19guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt +6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUwAwEB/zBEBgNVHR8EPTA7 +MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29tU2lnblNl +Y3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58 +ADsAj8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkq +hkiG9w0BAQUFAAOCAQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7p +iL1DRYHjZiM/EoZNGeQFsOY3wo3aBijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtC +dsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtpFhpFfTMDZflScZAmlaxMDPWL +kz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP51qJThRv4zdL +hfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz +OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw== +-----END CERTIFICATE----- + +# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc +# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc +# Label: "Cybertrust Global Root" +# Serial: 4835703278459682877484360 +# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1 +# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6 +# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3 +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG +A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh +bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE +ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS +b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5 +7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS +J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y +HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP +t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz +FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY +XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/ +MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw +hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js +MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA +A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj +Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx +XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o +omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc +A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Label: "ePKI Root Certification Authority" +# Serial: 28956088682735189655030529057352760477 +# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3 +# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0 +# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5 +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw +IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL +SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH +SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh +ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X +DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1 +TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ +fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA +sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU +WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS +nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH +dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip +NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC +AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF +MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB +uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl +PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP +JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/ +gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2 +j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6 +5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB +o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS +/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z +Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE +W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D +hNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +# Issuer: CN=TÜBİTAK UEKAE Kök Sertifika Hizmet Sağlayıcısı - Sürüm 3 O=Türkiye Bilimsel ve Teknolojik Araştırma Kurumu - TÜBİTAK OU=Ulusal Elektronik ve Kriptoloji Araştırma Enstitüsü - UEKAE/Kamu Sertifikasyon Merkezi +# Subject: CN=TÜBİTAK UEKAE Kök Sertifika Hizmet Sağlayıcısı - Sürüm 3 O=Türkiye Bilimsel ve Teknolojik Araştırma Kurumu - TÜBİTAK OU=Ulusal Elektronik ve Kriptoloji Araştırma Enstitüsü - UEKAE/Kamu Sertifikasyon Merkezi +# Label: "T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3" +# Serial: 17 +# MD5 Fingerprint: ed:41:f5:8c:50:c5:2b:9c:73:e6:ee:6c:eb:c2:a8:26 +# SHA1 Fingerprint: 1b:4b:39:61:26:27:6b:64:91:a2:68:6d:d7:02:43:21:2d:1f:1d:96 +# SHA256 Fingerprint: e4:c7:34:30:d7:a5:b5:09:25:df:43:37:0a:0d:21:6e:9a:79:b9:d6:db:83:73:a0:c6:9e:b1:cc:31:c7:c5:2a +-----BEGIN CERTIFICATE----- +MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRS +MRgwFgYDVQQHDA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJp +bGltc2VsIHZlIFRla25vbG9qaWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSw +VEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ryb25payB2ZSBLcmlwdG9sb2ppIEFy +YcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNVBAsMGkthbXUgU2Vy +dGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUgS8O2 +ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAe +Fw0wNzA4MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIx +GDAWBgNVBAcMD0dlYnplIC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmls +aW1zZWwgdmUgVGVrbm9sb2ppayBBcmHFn3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBU +QUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZlIEtyaXB0b2xvamkgQXJh +xZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2FtdSBTZXJ0 +aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7Zr +IFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4h +gb46ezzb8R1Sf1n68yJMlaCQvEhOEav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yK +O7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1xnnRFDDtG1hba+818qEhTsXO +fJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR6Oqeyjh1jmKw +lZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL +hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQID +AQABo0IwQDAdBgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmP +NOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4N5EY3ATIZJkrGG2AA1nJrvhY0D7t +wyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLTy9LQQfMmNkqblWwM +7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYhLBOh +gLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5n +oN+J1q2MdqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUs +yZyQ2uypQjyttgI= +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 2 CA 1 O=Buypass AS-983163327 +# Subject: CN=Buypass Class 2 CA 1 O=Buypass AS-983163327 +# Label: "Buypass Class 2 CA 1" +# Serial: 1 +# MD5 Fingerprint: b8:08:9a:f0:03:cc:1b:0d:c8:6c:0b:76:a1:75:64:23 +# SHA1 Fingerprint: a0:a1:ab:90:c9:fc:84:7b:3b:12:61:e8:97:7d:5f:d3:22:61:d3:cc +# SHA256 Fingerprint: 0f:4e:9c:dd:26:4b:02:55:50:d1:70:80:63:40:21:4f:e9:44:34:c9:b0:2f:69:7e:c7:10:fc:5f:ea:fb:5e:38 +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBATANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3Mg +Q2xhc3MgMiBDQSAxMB4XDTA2MTAxMzEwMjUwOVoXDTE2MTAxMzEwMjUwOVowSzEL +MAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MR0wGwYD +VQQDDBRCdXlwYXNzIENsYXNzIDIgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAIs8B0XY9t/mx8q6jUPFR42wWsE425KEHK8T1A9vNkYgxC7McXA0 +ojTTNy7Y3Tp3L8DrKehc0rWpkTSHIln+zNvnma+WwajHQN2lFYxuyHyXA8vmIPLX +l18xoS830r7uvqmtqEyeIWZDO6i88wmjONVZJMHCR3axiFyCO7srpgTXjAePzdVB +HfCuuCkslFJgNJQ72uA40Z0zPhX0kzLFANq1KWYOOngPIVJfAuWSeyXTkh4vFZ2B +5J2O6O+JzhRMVB0cgRJNcKi+EAUXfh/RuFdV7c27UsKwHnjCTTZoy1YmwVLBvXb3 +WNVyfh9EdrsAiR0WnVE1703CVu9r4Iw7DekCAwEAAaNCMEAwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUP42aWYv8e3uco684sDntkHGA1sgwDgYDVR0PAQH/BAQD +AgEGMA0GCSqGSIb3DQEBBQUAA4IBAQAVGn4TirnoB6NLJzKyQJHyIdFkhb5jatLP +gcIV1Xp+DCmsNx4cfHZSldq1fyOhKXdlyTKdqC5Wq2B2zha0jX94wNWZUYN/Xtm+ +DKhQ7SLHrQVMdvvt7h5HZPb3J31cKA9FxVxiXqaakZG3Uxcu3K1gnZZkOb1naLKu +BctN518fV4bVIJwo+28TOPX2EZL2fZleHwzoq0QkKXJAPTZSr4xYkHPB7GEseaHs +h7U/2k3ZIQAw3pDaDtMaSKk+hQsUi4y8QZ5q9w5wwDX3OaJdZtB7WZ+oRxKaJyOk +LY4ng5IgodcVf/EuGO70SH8vf/GhGLWhC5SgYiAynB321O+/TIho +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 3 CA 1 O=Buypass AS-983163327 +# Subject: CN=Buypass Class 3 CA 1 O=Buypass AS-983163327 +# Label: "Buypass Class 3 CA 1" +# Serial: 2 +# MD5 Fingerprint: df:3c:73:59:81:e7:39:50:81:04:4c:34:a2:cb:b3:7b +# SHA1 Fingerprint: 61:57:3a:11:df:0e:d8:7e:d5:92:65:22:ea:d0:56:d7:44:b3:23:71 +# SHA256 Fingerprint: b7:b1:2b:17:1f:82:1d:aa:99:0c:d0:fe:50:87:b1:28:44:8b:a8:e5:18:4f:84:c5:1e:02:b5:c8:fb:96:2b:24 +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBAjANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3Mg +Q2xhc3MgMyBDQSAxMB4XDTA1MDUwOTE0MTMwM1oXDTE1MDUwOTE0MTMwM1owSzEL +MAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MR0wGwYD +VQQDDBRCdXlwYXNzIENsYXNzIDMgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAKSO13TZKWTeXx+HgJHqTjnmGcZEC4DVC69TB4sSveZn8AKxifZg +isRbsELRwCGoy+Gb72RRtqfPFfV0gGgEkKBYouZ0plNTVUhjP5JW3SROjvi6K//z +NIqeKNc0n6wv1g/xpC+9UrJJhW05NfBEMJNGJPO251P7vGGvqaMU+8IXF4Rs4HyI ++MkcVyzwPX6UvCWThOiaAJpFBUJXgPROztmuOfbIUxAMZTpHe2DC1vqRycZxbL2R +hzyRhkmr8w+gbCZ2Xhysm3HljbybIR6c1jh+JIAVMYKWsUnTYjdbiAwKYjT+p0h+ +mbEwi5A3lRyoH6UsjfRVyNvdWQrCrXig9IsCAwEAAaNCMEAwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUOBTmyPCppAP0Tj4io1vy1uCtQHQwDgYDVR0PAQH/BAQD +AgEGMA0GCSqGSIb3DQEBBQUAA4IBAQABZ6OMySU9E2NdFm/soT4JXJEVKirZgCFP +Bdy7pYmrEzMqnji3jG8CcmPHc3ceCQa6Oyh7pEfJYWsICCD8igWKH7y6xsL+z27s +EzNxZy5p+qksP2bAEllNC1QCkoS72xLvg3BweMhT+t/Gxv/ciC8HwEmdMldg0/L2 +mSlf56oBzKwzqBwKu5HEA6BvtjT5htOzdlSY9EqBs1OdTUDs5XcTRa9bqh/YL0yC +e/4qxFi7T/ye/QNlGioOw6UgFpRreaaiErS7GqQjel/wroQk5PMr+4okoyeYZdow +dXb8GZHo2+ubPzK/QJcHJrrM85SFSnonk8+QQtS4Wxam58tAA915 +-----END CERTIFICATE----- + +# Issuer: CN=EBG Elektronik Sertifika Hizmet Sağlayıcısı O=EBG Bilişim Teknolojileri ve Hizmetleri A.Ş. +# Subject: CN=EBG Elektronik Sertifika Hizmet Sağlayıcısı O=EBG Bilişim Teknolojileri ve Hizmetleri A.Ş. +# Label: "EBG Elektronik Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1" +# Serial: 5525761995591021570 +# MD5 Fingerprint: 2c:20:26:9d:cb:1a:4a:00:85:b5:b7:5a:ae:c2:01:37 +# SHA1 Fingerprint: 8c:96:ba:eb:dd:2b:07:07:48:ee:30:32:66:a0:f3:98:6e:7c:ae:58 +# SHA256 Fingerprint: 35:ae:5b:dd:d8:f7:ae:63:5c:ff:ba:56:82:a8:f0:0b:95:f4:84:62:c7:10:8e:e9:a0:e5:29:2b:07:4a:af:b2 +-----BEGIN CERTIFICATE----- +MIIF5zCCA8+gAwIBAgIITK9zQhyOdAIwDQYJKoZIhvcNAQEFBQAwgYAxODA2BgNV +BAMML0VCRyBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx +c8SxMTcwNQYDVQQKDC5FQkcgQmlsacWfaW0gVGVrbm9sb2ppbGVyaSB2ZSBIaXpt +ZXRsZXJpIEEuxZ4uMQswCQYDVQQGEwJUUjAeFw0wNjA4MTcwMDIxMDlaFw0xNjA4 +MTQwMDMxMDlaMIGAMTgwNgYDVQQDDC9FQkcgRWxla3Ryb25payBTZXJ0aWZpa2Eg +SGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTE3MDUGA1UECgwuRUJHIEJpbGnFn2ltIFRl +a25vbG9qaWxlcmkgdmUgSGl6bWV0bGVyaSBBLsWeLjELMAkGA1UEBhMCVFIwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDuoIRh0DpqZhAy2DE4f6en5f2h +4fuXd7hxlugTlkaDT7byX3JWbhNgpQGR4lvFzVcfd2NR/y8927k/qqk153nQ9dAk +tiHq6yOU/im/+4mRDGSaBUorzAzu8T2bgmmkTPiab+ci2hC6X5L8GCcKqKpE+i4s +tPtGmggDg3KriORqcsnlZR9uKg+ds+g75AxuetpX/dfreYteIAbTdgtsApWjluTL +dlHRKJ2hGvxEok3MenaoDT2/F08iiFD9rrbskFBKW5+VQarKD7JK/oCZTqNGFav4 +c0JqwmZ2sQomFd2TkuzbqV9UIlKRcF0T6kjsbgNs2d1s/OsNA/+mgxKb8amTD8Um +TDGyY5lhcucqZJnSuOl14nypqZoaqsNW2xCaPINStnuWt6yHd6i58mcLlEOzrz5z ++kI2sSXFCjEmN1ZnuqMLfdb3ic1nobc6HmZP9qBVFCVMLDMNpkGMvQQxahByCp0O +Lna9XvNRiYuoP1Vzv9s6xiQFlpJIqkuNKgPlV5EQ9GooFW5Hd4RcUXSfGenmHmMW +OeMRFeNYGkS9y8RsZteEBt8w9DeiQyJ50hBs37vmExH8nYQKE3vwO9D8owrXieqW +fo1IhR5kX9tUoqzVegJ5a9KK8GfaZXINFHDk6Y54jzJ0fFfy1tb0Nokb+Clsi7n2 +l9GkLqq+CxnCRelwXQIDAJ3Zo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU587GT/wWZ5b6SqMHwQSny2re2kcwHwYDVR0jBBgw +FoAU587GT/wWZ5b6SqMHwQSny2re2kcwDQYJKoZIhvcNAQEFBQADggIBAJuYml2+ +8ygjdsZs93/mQJ7ANtyVDR2tFcU22NU57/IeIl6zgrRdu0waypIN30ckHrMk2pGI +6YNw3ZPX6bqz3xZaPt7gyPvT/Wwp+BVGoGgmzJNSroIBk5DKd8pNSe/iWtkqvTDO +TLKBtjDOWU/aWR1qeqRFsIImgYZ29fUQALjuswnoT4cCB64kXPBfrAowzIpAoHME +wfuJJPaaHFy3PApnNgUIMbOv2AFoKuB4j3TeuFGkjGwgPaL7s9QJ/XvCgKqTbCmY +Iai7FvOpEl90tYeY8pUm3zTvilORiF0alKM/fCL414i6poyWqD1SNGKfAB5UVUJn +xk1Gj7sURT0KlhaOEKGXmdXTMIXM3rRyt7yKPBgpaP3ccQfuJDlq+u2lrDgv+R4Q +DgZxGhBM/nV+/x5XOULK1+EVoVZVWRvRo68R2E7DpSvvkL/A7IITW43WciyTTo9q +Kd+FPNMN4KIYEsxVL0e3p5sC/kH2iExt2qkBR4NkJ2IQgtYSe14DHzSpyZH+r11t +hie3I6p1GMog57AP14kOpmciY/SDQSsGS7tY1dHXt7kQY9iJSrSq3RZj9W6+YKH4 +7ejWkE8axsWgKdOnIaj1Wjz3x0miIZpKlVIglnKaZsv30oZDfCK+lvm9AahH3eU7 +QPl1K5srRmSGjR70j/sHd9DqSaIcjVIUpgqT +-----END CERTIFICATE----- + +# Issuer: O=certSIGN OU=certSIGN ROOT CA +# Subject: O=certSIGN OU=certSIGN ROOT CA +# Label: "certSIGN ROOT CA" +# Serial: 35210227249154 +# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17 +# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b +# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT +AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD +QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP +MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do +0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ +UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d +RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ +OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv +JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C +AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O +BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ +LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY +MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ +44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I +Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw +i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN +9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +# Issuer: CN=CNNIC ROOT O=CNNIC +# Subject: CN=CNNIC ROOT O=CNNIC +# Label: "CNNIC ROOT" +# Serial: 1228079105 +# MD5 Fingerprint: 21:bc:82:ab:49:c4:13:3b:4b:b2:2b:5c:6b:90:9c:19 +# SHA1 Fingerprint: 8b:af:4c:9b:1d:f0:2a:92:f7:da:12:8e:b9:1b:ac:f4:98:60:4b:6f +# SHA256 Fingerprint: e2:83:93:77:3d:a8:45:a6:79:f2:08:0c:c7:fb:44:a3:b7:a1:c3:79:2c:b7:eb:77:29:fd:cb:6a:8d:99:ae:a7 +-----BEGIN CERTIFICATE----- +MIIDVTCCAj2gAwIBAgIESTMAATANBgkqhkiG9w0BAQUFADAyMQswCQYDVQQGEwJD +TjEOMAwGA1UEChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1QwHhcNMDcwNDE2 +MDcwOTE0WhcNMjcwNDE2MDcwOTE0WjAyMQswCQYDVQQGEwJDTjEOMAwGA1UEChMF +Q05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1QwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDTNfc/c3et6FtzF8LRb+1VvG7q6KR5smzDo+/hn7E7SIX1mlwh +IhAsxYLO2uOabjfhhyzcuQxauohV3/2q2x8x6gHx3zkBwRP9SFIhxFXf2tizVHa6 +dLG3fdfA6PZZxU3Iva0fFNrfWEQlMhkqx35+jq44sDB7R3IJMfAw28Mbdim7aXZO +V/kbZKKTVrdvmW7bCgScEeOAH8tjlBAKqeFkgjH5jCftppkA9nCTGPihNIaj3XrC +GHn2emU1z5DrvTOTn1OrczvmmzQgLx3vqR1jGqCA2wMv+SYahtKNu6m+UjqHZ0gN +v7Sg2Ca+I19zN38m5pIEo3/PIKe38zrKy5nLAgMBAAGjczBxMBEGCWCGSAGG+EIB +AQQEAwIABzAfBgNVHSMEGDAWgBRl8jGtKvf33VKWCscCwQ7vptU7ETAPBgNVHRMB +Af8EBTADAQH/MAsGA1UdDwQEAwIB/jAdBgNVHQ4EFgQUZfIxrSr3991SlgrHAsEO +76bVOxEwDQYJKoZIhvcNAQEFBQADggEBAEs17szkrr/Dbq2flTtLP1se31cpolnK +OOK5Gv+e5m4y3R6u6jW39ZORTtpC4cMXYFDy0VwmuYK36m3knITnA3kXr5g9lNvH +ugDnuL8BV8F3RTIMO/G0HAiw/VGgod2aHRM2mm23xzy54cXZF/qD1T0VoDy7Hgvi +yJA/qIYM/PmLXoXLT1tLYhFHxUV8BS9BsZ4QaRuZluBVeftOhpm4lNqGOGqTo+fL +buXf6iFViZx9fX+Y9QCJ7uOEwFyWtcVG6kbghVW2G8kS1sHNzYDzAgE8yGnLRUhj +2JTQ7IUOO04RZfSCjKY9ri4ilAnIXOo8gV0WKgOXFlUJ24pBgp5mmxE= +-----END CERTIFICATE----- + +# Issuer: O=Japanese Government OU=ApplicationCA +# Subject: O=Japanese Government OU=ApplicationCA +# Label: "ApplicationCA - Japanese Government" +# Serial: 49 +# MD5 Fingerprint: 7e:23:4e:5b:a7:a5:b4:25:e9:00:07:74:11:62:ae:d6 +# SHA1 Fingerprint: 7f:8a:b0:cf:d0:51:87:6a:66:f3:36:0f:47:c8:8d:8c:d3:35:fc:74 +# SHA256 Fingerprint: 2d:47:43:7d:e1:79:51:21:5a:12:f3:c5:8e:51:c7:29:a5:80:26:ef:1f:cc:0a:5f:b3:d9:dc:01:2f:60:0d:19 +-----BEGIN CERTIFICATE----- +MIIDoDCCAoigAwIBAgIBMTANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJKUDEc +MBoGA1UEChMTSmFwYW5lc2UgR292ZXJubWVudDEWMBQGA1UECxMNQXBwbGljYXRp +b25DQTAeFw0wNzEyMTIxNTAwMDBaFw0xNzEyMTIxNTAwMDBaMEMxCzAJBgNVBAYT +AkpQMRwwGgYDVQQKExNKYXBhbmVzZSBHb3Zlcm5tZW50MRYwFAYDVQQLEw1BcHBs +aWNhdGlvbkNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp23gdE6H +j6UG3mii24aZS2QNcfAKBZuOquHMLtJqO8F6tJdhjYq+xpqcBrSGUeQ3DnR4fl+K +f5Sk10cI/VBaVuRorChzoHvpfxiSQE8tnfWuREhzNgaeZCw7NCPbXCbkcXmP1G55 +IrmTwcrNwVbtiGrXoDkhBFcsovW8R0FPXjQilbUfKW1eSvNNcr5BViCH/OlQR9cw +FO5cjFW6WY2H/CPek9AEjP3vbb3QesmlOmpyM8ZKDQUXKi17safY1vC+9D/qDiht +QWEjdnjDuGWk81quzMKq2edY3rZ+nYVunyoKb58DKTCXKB28t89UKU5RMfkntigm +/qJj5kEW8DOYRwIDAQABo4GeMIGbMB0GA1UdDgQWBBRUWssmP3HMlEYNllPqa0jQ +k/5CdTAOBgNVHQ8BAf8EBAMCAQYwWQYDVR0RBFIwUKROMEwxCzAJBgNVBAYTAkpQ +MRgwFgYDVQQKDA/ml6XmnKzlm73mlL/lupwxIzAhBgNVBAsMGuOCouODl+ODquOC +seODvOOCt+ODp+ODs0NBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBADlqRHZ3ODrso2dGD/mLBqj7apAxzn7s2tGJfHrrLgy9mTLnsCTWw//1sogJ +hyzjVOGjprIIC8CFqMjSnHH2HZ9g/DgzE+Ge3Atf2hZQKXsvcJEPmbo0NI2VdMV+ +eKlmXb3KIXdCEKxmJj3ekav9FfBv7WxfEPjzFvYDio+nEhEMy/0/ecGc/WLuo89U +DNErXxc+4z6/wCs+CZv+iKZ+tJIX/COUgb1up8WMwusRRdv4QcmWdupwX3kSa+Sj +B1oF7ydJzyGfikwJcGapJsErEU4z0g781mzSDjJkaP+tBXhfAx2o45CsJOAPQKdL +rosot4LKGAfmt1t06SAZf7IbiVQ= +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only +# Subject: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only +# Label: "GeoTrust Primary Certification Authority - G3" +# Serial: 28809105769928564313984085209975885599 +# MD5 Fingerprint: b5:e8:34:36:c9:10:44:58:48:70:6d:2e:83:d4:b8:05 +# SHA1 Fingerprint: 03:9e:ed:b8:0b:e7:a0:3c:69:53:89:3b:20:d2:d9:32:3a:4c:2a:fd +# SHA256 Fingerprint: b4:78:b8:12:25:0d:f8:78:63:5c:2a:a7:ec:7d:15:5e:aa:62:5e:e8:29:16:e2:cd:29:43:61:88:6c:d1:fb:d4 +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB +mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT +MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s +eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ +BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0 +BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz ++uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm +hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn +5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W +JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL +DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC +huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw +HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB +AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB +zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN +kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH +SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G +spki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA - G2" +# Serial: 71758320672825410020661621085256472406 +# MD5 Fingerprint: 74:9d:ea:60:24:c4:fd:22:53:3e:cc:3a:72:d9:29:4f +# SHA1 Fingerprint: aa:db:bc:22:23:8f:c4:01:a1:27:bb:38:dd:f4:1d:db:08:9e:f0:12 +# SHA256 Fingerprint: a4:31:0d:50:af:18:a6:44:71:90:37:2a:86:af:af:8b:95:1f:fb:43:1d:83:7f:1e:56:88:b4:59:71:ed:15:57 +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp +IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi +BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw +MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig +YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v +dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/ +BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6 +papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K +DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3 +KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox +XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA - G3" +# Serial: 127614157056681299805556476275995414779 +# MD5 Fingerprint: fb:1b:5d:43:8a:94:cd:44:c6:76:f2:43:4b:47:e7:31 +# SHA1 Fingerprint: f1:8b:53:8d:1b:e9:03:b6:a6:f0:56:43:5b:17:15:89:ca:f3:6b:f2 +# SHA256 Fingerprint: 4b:03:f4:58:07:ad:70:f2:1b:fc:2c:ae:71:c9:fd:e4:60:4c:06:4c:f5:ff:b6:86:ba:e5:db:aa:d7:fd:d3:4c +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB +rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV +BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa +Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl +LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u +MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl +ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm +gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8 +YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf +b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9 +9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S +zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk +OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV +HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA +2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW +oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c +KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM +m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu +MdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only +# Subject: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only +# Label: "GeoTrust Primary Certification Authority - G2" +# Serial: 80682863203381065782177908751794619243 +# MD5 Fingerprint: 01:5e:d8:6b:bd:6f:3d:8e:a1:31:f8:12:e0:98:73:6a +# SHA1 Fingerprint: 8d:17:84:d5:37:f3:03:7d:ec:70:fe:57:8b:51:9a:99:e6:10:d7:b0 +# SHA256 Fingerprint: 5e:db:7a:c4:3b:82:a0:6a:87:61:e8:d7:be:49:79:eb:f2:61:1f:7d:d7:9b:f9:1c:1c:6b:56:6a:21:9e:d7:66 +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL +MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj +KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2 +MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw +NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV +BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH +MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL +So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal +tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG +CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT +qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz +rD6ogRLQy7rQkgu2npaqBA+K +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Universal Root Certification Authority" +# Serial: 85209574734084581917763752644031726877 +# MD5 Fingerprint: 8e:ad:b5:01:aa:4d:81:e4:8c:1d:d1:e1:14:00:95:19 +# SHA1 Fingerprint: 36:79:ca:35:66:87:72:30:4d:30:a5:fb:87:3b:0f:a7:7b:b7:0d:54 +# SHA256 Fingerprint: 23:99:56:11:27:a5:71:25:de:8c:ef:ea:61:0d:df:2f:a0:78:b5:c8:06:7f:4e:82:82:90:bf:b8:60:e8:4b:3c +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB +vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W +ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX +MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0 +IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y +IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh +bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF +9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH +H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H +LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN +/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT +rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw +WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs +exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4 +sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+ +seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz +4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+ +BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR +lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3 +7M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Class 3 Public Primary Certification Authority - G4" +# Serial: 63143484348153506665311985501458640051 +# MD5 Fingerprint: 3a:52:e1:e7:fd:6f:3a:e3:6f:f3:6f:99:1b:f9:22:41 +# SHA1 Fingerprint: 22:d5:d8:df:8f:02:31:d1:8d:f7:9d:b7:cf:8a:2d:64:c9:3f:6c:3a +# SHA256 Fingerprint: 69:dd:d7:ea:90:bb:57:c9:3e:13:5d:c8:5e:a6:fc:d5:48:0b:60:32:39:bd:c4:54:fc:75:8b:2a:26:cf:7f:79 +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG +A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp +U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg +SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln +biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm +GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve +fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ +aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj +aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW +kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC +4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga +FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +# Issuer: CN=NetLock Arany (Class Gold) Főtanúsítvány O=NetLock Kft. OU=Tanúsítványkiadók (Certification Services) +# Subject: CN=NetLock Arany (Class Gold) Főtanúsítvány O=NetLock Kft. OU=Tanúsítványkiadók (Certification Services) +# Label: "NetLock Arany (Class Gold) Főtanúsítvány" +# Serial: 80544274841616 +# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88 +# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91 +# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98 +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG +EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3 +MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR +dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB +pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM +b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm +aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz +IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT +lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz +AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5 +VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG +ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2 +BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG +AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M +U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh +bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C ++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F +uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2 +XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden +# Label: "Staat der Nederlanden Root CA - G2" +# Serial: 10000012 +# MD5 Fingerprint: 7c:a5:0f:f8:5b:9a:7d:6d:30:ae:54:5a:e3:42:a2:8a +# SHA1 Fingerprint: 59:af:82:79:91:86:c7:b4:75:07:cb:cf:03:57:46:eb:04:dd:b7:16 +# SHA256 Fingerprint: 66:8c:83:94:7d:a6:3b:72:4b:ec:e1:74:3c:31:a0:e6:ae:d0:db:8e:c5:b3:1b:e3:77:bb:78:4f:91:b6:71:6f +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX +DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291 +qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp +uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU +Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE +pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp +5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M +UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN +GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy +5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv +6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK +eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6 +B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/ +BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov +L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG +SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS +CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen +5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897 +IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK +gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL ++63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL +vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm +bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk +N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC +Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z +ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ== +-----END CERTIFICATE----- + +# Issuer: CN=CA Disig O=Disig a.s. +# Subject: CN=CA Disig O=Disig a.s. +# Label: "CA Disig" +# Serial: 1 +# MD5 Fingerprint: 3f:45:96:39:e2:50:87:f7:bb:fe:98:0c:3c:20:98:e6 +# SHA1 Fingerprint: 2a:c8:d5:8b:57:ce:bf:2f:49:af:f2:fc:76:8f:51:14:62:90:7a:41 +# SHA256 Fingerprint: 92:bf:51:19:ab:ec:ca:d0:b1:33:2d:c4:e1:d0:5f:ba:75:b5:67:90:44:ee:0c:a2:6e:93:1f:74:4f:2f:33:cf +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBATANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJTSzET +MBEGA1UEBxMKQnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UE +AxMIQ0EgRGlzaWcwHhcNMDYwMzIyMDEzOTM0WhcNMTYwMzIyMDEzOTM0WjBKMQsw +CQYDVQQGEwJTSzETMBEGA1UEBxMKQnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcg +YS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCS9jHBfYj9mQGp2HvycXXxMcbzdWb6UShGhJd4NLxs/LxFWYgmGErE +Nx+hSkS943EE9UQX4j/8SFhvXJ56CbpRNyIjZkMhsDxkovhqFQ4/61HhVKndBpnX +mjxUizkDPw/Fzsbrg3ICqB9x8y34dQjbYkzo+s7552oftms1grrijxaSfQUMbEYD +XcDtab86wYqg6I7ZuUUohwjstMoVvoLdtUSLLa2GDGhibYVW8qwUYzrG0ZmsNHhW +S8+2rT+MitcE5eN4TPWGqvWP+j1scaMtymfraHtuM6kMgiioTGohQBUgDCZbg8Kp +FhXAJIJdKxatymP2dACw30PEEGBWZ2NFAgMBAAGjgf8wgfwwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUjbJJaJ1yCCW5wCf1UJNWSEZx+Y8wDgYDVR0PAQH/BAQD +AgEGMDYGA1UdEQQvMC2BE2Nhb3BlcmF0b3JAZGlzaWcuc2uGFmh0dHA6Ly93d3cu +ZGlzaWcuc2svY2EwZgYDVR0fBF8wXTAtoCugKYYnaHR0cDovL3d3dy5kaXNpZy5z +ay9jYS9jcmwvY2FfZGlzaWcuY3JsMCygKqAohiZodHRwOi8vY2EuZGlzaWcuc2sv +Y2EvY3JsL2NhX2Rpc2lnLmNybDAaBgNVHSAEEzARMA8GDSuBHpGT5goAAAABAQEw +DQYJKoZIhvcNAQEFBQADggEBAF00dGFMrzvY/59tWDYcPQuBDRIrRhCA/ec8J9B6 +yKm2fnQwM6M6int0wHl5QpNt/7EpFIKrIYwvF/k/Ji/1WcbvgAa3mkkp7M5+cTxq +EEHA9tOasnxakZzArFvITV734VP/Q3f8nktnbNfzg9Gg4H8l37iYC5oyOGwwoPP/ +CBUz91BKez6jPiCp3C9WgArtQVCwyfTssuMmRAAOb54GvCKWU3BlxFAKRmukLyeB +EicTXxChds6KezfqwzlhA5WYOudsiCUI/HloDYd9Yvi0X/vF2Ey9WLw/Q1vUHgFN +PGO+I++MzVpQuGhU+QqZMxEA4Z7CRneC9VkGjCFMhwnN5ag= +-----END CERTIFICATE----- + +# Issuer: CN=Juur-SK O=AS Sertifitseerimiskeskus +# Subject: CN=Juur-SK O=AS Sertifitseerimiskeskus +# Label: "Juur-SK" +# Serial: 999181308 +# MD5 Fingerprint: aa:8e:5d:d9:f8:db:0a:58:b7:8d:26:87:6c:82:35:55 +# SHA1 Fingerprint: 40:9d:4b:d9:17:b5:5c:27:b6:9b:64:cb:98:22:44:0d:cd:09:b8:89 +# SHA256 Fingerprint: ec:c3:e9:c3:40:75:03:be:e0:91:aa:95:2f:41:34:8f:f8:8b:aa:86:3b:22:64:be:fa:c8:07:90:15:74:e9:39 +-----BEGIN CERTIFICATE----- +MIIE5jCCA86gAwIBAgIEO45L/DANBgkqhkiG9w0BAQUFADBdMRgwFgYJKoZIhvcN +AQkBFglwa2lAc2suZWUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKExlBUyBTZXJ0aWZp +dHNlZXJpbWlza2Vza3VzMRAwDgYDVQQDEwdKdXVyLVNLMB4XDTAxMDgzMDE0MjMw +MVoXDTE2MDgyNjE0MjMwMVowXTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMQsw +CQYDVQQGEwJFRTEiMCAGA1UEChMZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEQ +MA4GA1UEAxMHSnV1ci1TSzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AIFxNj4zB9bjMI0TfncyRsvPGbJgMUaXhvSYRqTCZUXP00B841oiqBB4M8yIsdOB +SvZiF3tfTQou0M+LI+5PAk676w7KvRhj6IAcjeEcjT3g/1tf6mTll+g/mX8MCgkz +ABpTpyHhOEvWgxutr2TC+Rx6jGZITWYfGAriPrsfB2WThbkasLnE+w0R9vXW+RvH +LCu3GFH+4Hv2qEivbDtPL+/40UceJlfwUR0zlv/vWT3aTdEVNMfqPxZIe5EcgEMP +PbgFPtGzlc3Yyg/CQ2fbt5PgIoIuvvVoKIO5wTtpeyDaTpxt4brNj3pssAki14sL +2xzVWiZbDcDq5WDQn/413z8CAwEAAaOCAawwggGoMA8GA1UdEwEB/wQFMAMBAf8w +ggEWBgNVHSAEggENMIIBCTCCAQUGCisGAQQBzh8BAQEwgfYwgdAGCCsGAQUFBwIC +MIHDHoHAAFMAZQBlACAAcwBlAHIAdABpAGYAaQBrAGEAYQB0ACAAbwBuACAAdgDk +AGwAagBhAHMAdABhAHQAdQBkACAAQQBTAC0AaQBzACAAUwBlAHIAdABpAGYAaQB0 +AHMAZQBlAHIAaQBtAGkAcwBrAGUAcwBrAHUAcwAgAGEAbABhAG0ALQBTAEsAIABz +AGUAcgB0AGkAZgBpAGsAYQBhAHQAaQBkAGUAIABrAGkAbgBuAGkAdABhAG0AaQBz +AGUAawBzMCEGCCsGAQUFBwIBFhVodHRwOi8vd3d3LnNrLmVlL2Nwcy8wKwYDVR0f +BCQwIjAgoB6gHIYaaHR0cDovL3d3dy5zay5lZS9qdXVyL2NybC8wHQYDVR0OBBYE +FASqekej5ImvGs8KQKcYP2/v6X2+MB8GA1UdIwQYMBaAFASqekej5ImvGs8KQKcY +P2/v6X2+MA4GA1UdDwEB/wQEAwIB5jANBgkqhkiG9w0BAQUFAAOCAQEAe8EYlFOi +CfP+JmeaUOTDBS8rNXiRTHyoERF5TElZrMj3hWVcRrs7EKACr81Ptcw2Kuxd/u+g +kcm2k298gFTsxwhwDY77guwqYHhpNjbRxZyLabVAyJRld/JXIWY7zoVAtjNjGr95 +HvxcHdMdkxuLDF2FvZkwMhgJkVLpfKG6/2SSmuz+Ne6ML678IIbsSt4beDI3poHS +na9aEhbKmVv8b20OxaAehsmR0FyYgl9jDIpaq9iVpszLita/ZEuOyoqysOkhMp6q +qIWYNIE5ITuoOlIyPfZrN4YGWhWY3PARZv40ILcD9EEQfTmEeZZyY7aWAuVrua0Z +TbvGRNs2yyqcjg== +-----END CERTIFICATE----- + +# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post +# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post +# Label: "Hongkong Post Root CA 1" +# Serial: 1000 +# MD5 Fingerprint: a8:0d:6f:39:78:b9:43:6d:77:42:6d:98:5a:cc:23:ca +# SHA1 Fingerprint: d6:da:a8:20:8d:09:d2:15:4d:24:b5:2f:cb:34:6e:b2:58:b2:8a:58 +# SHA256 Fingerprint: f9:e6:7d:33:6c:51:00:2a:c0:54:c6:32:02:2d:66:dd:a2:e7:e3:ff:f1:0a:d0:61:ed:31:d8:bb:b4:10:cf:b2 +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx +FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg +Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG +A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr +b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ +jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn +PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh +ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9 +nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h +q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED +MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC +mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3 +7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB +oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs +EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO +fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi +AmvZWg== +-----END CERTIFICATE----- + +# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. +# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. +# Label: "SecureSign RootCA11" +# Serial: 1 +# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26 +# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3 +# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12 +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr +MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG +A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0 +MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp +Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD +QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz +i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8 +h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV +MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9 +UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni +8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC +h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD +VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB +AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm +KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ +X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr +QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5 +pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN +QSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +# Issuer: CN=ACEDICOM Root O=EDICOM OU=PKI +# Subject: CN=ACEDICOM Root O=EDICOM OU=PKI +# Label: "ACEDICOM Root" +# Serial: 7029493972724711941 +# MD5 Fingerprint: 42:81:a0:e2:1c:e3:55:10:de:55:89:42:65:96:22:e6 +# SHA1 Fingerprint: e0:b4:32:2e:b2:f6:a5:68:b6:54:53:84:48:18:4a:50:36:87:43:84 +# SHA256 Fingerprint: 03:95:0f:b4:9a:53:1f:3e:19:91:94:23:98:df:a9:e0:ea:32:d7:ba:1c:dd:9b:c8:5d:b5:7e:d9:40:0b:43:4a +-----BEGIN CERTIFICATE----- +MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UE +AwwNQUNFRElDT00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00x +CzAJBgNVBAYTAkVTMB4XDTA4MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEW +MBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZF +RElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC +AgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHkWLn7 +09gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7 +XBZXehuDYAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5P +Grjm6gSSrj0RuVFCPYewMYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAK +t0SdE3QrwqXrIhWYENiLxQSfHY9g5QYbm8+5eaA9oiM/Qj9r+hwDezCNzmzAv+Yb +X79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbkHQl/Sog4P75n/TSW9R28 +MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTTxKJxqvQU +fecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI +2Sf23EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyH +K9caUPgn6C9D4zq92Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEae +ZAwUswdbxcJzbPEHXEUkFDWug/FqTYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAP +BgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz4SsrSbbXc6GqlPUB53NlTKxQ +MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU9QHnc2VMrFAw +RAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv +bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWIm +fQwng4/F9tqgaHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3 +gvoFNTPhNahXwOf9jU8/kzJPeGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKe +I6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1PwkzQSulgUV1qzOMPPKC8W64iLgpq0i +5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1ThCojz2GuHURwCRi +ipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oIKiMn +MCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZ +o5NjEFIqnxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6 +zqylfDJKZ0DcMDQj3dcEI2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacN +GHk0vFQYXlPKNFHtRQrmjseCNj6nOGOpMCwXEGCSn1WHElkQwg9naRHMTh5+Spqt +r0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3otkYNbn5XOmeUwssfnHdK +Z05phkOTOPu220+DkdRgfks+KzgHVZhepA== +-----END CERTIFICATE----- + +# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Label: "Microsec e-Szigno Root CA 2009" +# Serial: 14014712776195784473 +# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1 +# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e +# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78 +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD +VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0 +ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G +CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y +OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx +FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp +Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP +kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc +cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U +fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7 +N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC +xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1 ++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM +Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG +SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h +mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk +ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c +2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t +HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +# Issuer: CN=e-Guven Kok Elektronik Sertifika Hizmet Saglayicisi O=Elektronik Bilgi Guvenligi A.S. +# Subject: CN=e-Guven Kok Elektronik Sertifika Hizmet Saglayicisi O=Elektronik Bilgi Guvenligi A.S. +# Label: "E-Guven Kok Elektronik Sertifika Hizmet Saglayicisi" +# Serial: 91184789765598910059173000485363494069 +# MD5 Fingerprint: 3d:41:29:cb:1e:aa:11:74:cd:5d:b0:62:af:b0:43:5b +# SHA1 Fingerprint: dd:e1:d2:a9:01:80:2e:1d:87:5e:84:b3:80:7e:4b:b1:fd:99:41:34 +# SHA256 Fingerprint: e6:09:07:84:65:a4:19:78:0c:b6:ac:4c:1c:0b:fb:46:53:d9:d9:cc:6e:b3:94:6e:b7:f3:d6:99:97:ba:d5:98 +-----BEGIN CERTIFICATE----- +MIIDtjCCAp6gAwIBAgIQRJmNPMADJ72cdpW56tustTANBgkqhkiG9w0BAQUFADB1 +MQswCQYDVQQGEwJUUjEoMCYGA1UEChMfRWxla3Ryb25payBCaWxnaSBHdXZlbmxp +Z2kgQS5TLjE8MDoGA1UEAxMzZS1HdXZlbiBLb2sgRWxla3Ryb25payBTZXJ0aWZp +a2EgSGl6bWV0IFNhZ2xheWljaXNpMB4XDTA3MDEwNDExMzI0OFoXDTE3MDEwNDEx +MzI0OFowdTELMAkGA1UEBhMCVFIxKDAmBgNVBAoTH0VsZWt0cm9uaWsgQmlsZ2kg +R3V2ZW5saWdpIEEuUy4xPDA6BgNVBAMTM2UtR3V2ZW4gS29rIEVsZWt0cm9uaWsg +U2VydGlmaWthIEhpem1ldCBTYWdsYXlpY2lzaTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAMMSIJ6wXgBljU5Gu4Bc6SwGl9XzcslwuedLZYDBS75+PNdU +MZTe1RK6UxYC6lhj71vY8+0qGqpxSKPcEC1fX+tcS5yWCEIlKBHMilpiAVDV6wlT +L/jDj/6z/P2douNffb7tC+Bg62nsM+3YjfsSSYMAyYuXjDtzKjKzEve5TfL0TW3H +5tYmNwjy2f1rXKPlSFxYvEK+A1qBuhw1DADT9SN+cTAIJjjcJRFHLfO6IxClv7wC +90Nex/6wN1CZew+TzuZDLMN+DfIcQ2Zgy2ExR4ejT669VmxMvLz4Bcpk9Ok0oSy1 +c+HCPujIyTQlCFzz7abHlJ+tiEMl1+E5YP6sOVkCAwEAAaNCMEAwDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJ/uRLOU1fqRTy7ZVZoE +VtstxNulMA0GCSqGSIb3DQEBBQUAA4IBAQB/X7lTW2M9dTLn+sR0GstG30ZpHFLP +qk/CaOv/gKlR6D1id4k9CnU58W5dF4dvaAXBlGzZXd/aslnLpRCKysw5zZ/rTt5S +/wzw9JKp8mxTq5vSR6AfdPebmvEvFZ96ZDAYBzwqD2fK/A+JYZ1lpTzlvBNbCNvj +/+27BrtqBrF6T2XGgv0enIu1De5Iu7i9qgi0+6N8y5/NkHZchpZ4Vwpm+Vganf2X +KWDeEaaQHBkc7gGWIjQ0LpH5t8Qn0Xvmv/uARFoW5evg1Ao4vOSR49XrXMGs3xtq +fJ7lddK2l4fbzIcrQzqECK+rPNv3PGYxhrCdU3nt+CPeQuMtgvEP5fqX +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Label: "GlobalSign Root CA - R3" +# Serial: 4835703278459759426209954 +# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28 +# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad +# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- + +# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068" +# Serial: 6047274297262753887 +# MD5 Fingerprint: 73:3a:74:7a:ec:bb:a3:96:a6:c2:e4:e2:c8:9b:c0:c3 +# SHA1 Fingerprint: ae:c5:fb:3f:c8:e1:bf:c4:e5:4f:03:07:5a:9a:e8:00:b7:f7:b6:fa +# SHA256 Fingerprint: 04:04:80:28:bf:1f:28:64:d4:8f:9a:d4:d8:32:94:36:6a:82:88:56:55:3f:3b:14:30:3f:90:14:7f:5d:40:ef +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy +MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD +VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv +ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl +AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF +661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9 +am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1 +ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481 +PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS +3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k +SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF +3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM +ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g +StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz +Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB +jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +# Issuer: CN=Izenpe.com O=IZENPE S.A. +# Subject: CN=Izenpe.com O=IZENPE S.A. +# Label: "Izenpe.com" +# Serial: 917563065490389241595536686991402621 +# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73 +# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19 +# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4 +MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6 +ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD +VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j +b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq +scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO +xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H +LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX +uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD +yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+ +JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q +rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN +BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L +hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB +QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+ +HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu +Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg +QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB +BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA +A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb +laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56 +awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo +JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw +LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT +VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk +LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb +UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/ +QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+ +naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls +QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +# Issuer: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A. +# Subject: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A. +# Label: "Chambers of Commerce Root - 2008" +# Serial: 11806822484801597146 +# MD5 Fingerprint: 5e:80:9e:84:5a:0e:65:0b:17:02:f3:55:18:2a:3e:d7 +# SHA1 Fingerprint: 78:6a:74:ac:76:ab:14:7f:9c:6a:30:50:ba:9e:a8:7e:fe:9a:ce:3c +# SHA256 Fingerprint: 06:3e:4a:fa:c4:91:df:d3:32:f3:08:9b:85:42:e9:46:17:d8:93:d7:fe:94:4e:10:a7:93:7e:e2:9d:96:93:c0 +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz +IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz +MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj +dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw +EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp +MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9 +28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq +VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q +DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR +5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL +ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a +Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl +UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s ++12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5 +Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx +hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV +HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1 ++HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN +YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t +L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy +ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt +IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV +HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w +DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW +PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF +5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1 +glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH +FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2 +pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD +xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG +tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq +jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De +fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ +d0jQ +-----END CERTIFICATE----- + +# Issuer: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A. +# Subject: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A. +# Label: "Global Chambersign Root - 2008" +# Serial: 14541511773111788494 +# MD5 Fingerprint: 9e:80:ff:78:01:0c:2e:c1:36:bd:fe:96:90:6e:08:f3 +# SHA1 Fingerprint: 4a:bd:ee:ec:95:0d:35:9c:89:ae:c7:52:a1:2c:5b:29:f6:d6:aa:0c +# SHA256 Fingerprint: 13:63:35:43:93:34:a7:69:80:16:a0:d3:24:de:72:28:4e:07:9d:7b:52:20:bb:8f:bd:74:78:16:ee:be:ba:ca +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx +MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy +cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG +A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl +BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed +KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7 +G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2 +zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4 +ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG +HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2 +Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V +yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e +beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r +6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog +zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW +BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr +ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp +ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk +cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt +YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC +CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow +KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI +hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ +UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz +X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x +fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz +a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd +Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd +SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O +AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso +M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge +v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- + +# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Label: "Go Daddy Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01 +# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b +# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96 +# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e +# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5 +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Services Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2 +# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f +# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5 +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs +ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy +dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p +OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 +8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K +Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe +hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk +6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q +AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI +bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB +ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z +qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn +0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN +sSi6 +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Commercial O=AffirmTrust +# Subject: CN=AffirmTrust Commercial O=AffirmTrust +# Label: "AffirmTrust Commercial" +# Serial: 8608355977964138876 +# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7 +# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7 +# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7 +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Networking O=AffirmTrust +# Subject: CN=AffirmTrust Networking O=AffirmTrust +# Label: "AffirmTrust Networking" +# Serial: 8957382827206547757 +# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f +# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f +# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium O=AffirmTrust +# Subject: CN=AffirmTrust Premium O=AffirmTrust +# Label: "AffirmTrust Premium" +# Serial: 7893706540734352110 +# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57 +# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27 +# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust +# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust +# Label: "AffirmTrust Premium ECC" +# Serial: 8401224907861490260 +# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d +# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb +# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23 +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA" +# Serial: 279744 +# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78 +# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e +# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM +MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D +ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU +cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3 +WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg +Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw +IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH +UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM +TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU +BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM +kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x +AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y +sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL +I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8 +J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY +VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +# Issuer: CN=Certinomis - Autorité Racine O=Certinomis OU=0002 433998903 +# Subject: CN=Certinomis - Autorité Racine O=Certinomis OU=0002 433998903 +# Label: "Certinomis - Autorité Racine" +# Serial: 1 +# MD5 Fingerprint: 7f:30:78:8c:03:e3:ca:c9:0a:e2:c9:ea:1e:aa:55:1a +# SHA1 Fingerprint: 2e:14:da:ec:28:f0:fa:1e:8e:38:9a:4e:ab:eb:26:c0:0a:d3:83:c3 +# SHA256 Fingerprint: fc:bf:e2:88:62:06:f7:2b:27:59:3c:8b:07:02:97:e1:2d:76:9e:d1:0e:d7:93:07:05:a8:09:8e:ff:c1:4d:17 +-----BEGIN CERTIFICATE----- +MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjET +MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAk +BgNVBAMMHUNlcnRpbm9taXMgLSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4 +Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNl +cnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYwJAYDVQQDDB1DZXJ0 +aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jY +F1AMnmHawE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N +8y4oH3DfVS9O7cdxbwlyLu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWe +rP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K +/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92NjMD2AR5vpTESOH2VwnHu +7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9qc1pkIuVC +28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6 +lSTClrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1E +nn1So2+WLhl+HPNbxxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB +0iSVL1N6aaLwD4ZFjliCK0wi1F6g530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql09 +5gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna4NH4+ej9Uji29YnfAgMBAAGj +WzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBQN +jLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ +KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9s +ov3/4gbIOZ/xWqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZM +OH8oMDX/nyNTt7buFHAAQCvaR6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q +619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40nJ+U8/aGH88bc62UeYdocMMzpXDn +2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1BCxMjidPJC+iKunqj +o3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjvJL1v +nxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG +5ERQL1TEqkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWq +pdEdnV1j6CTmNhTih60bWfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZb +dsLLO7XSAPCjDuGtbkD326C00EauFddEwk01+dIL8hf2rGbVJLJP0RyZwG71fet0 +BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/vgt2Fl43N+bYdJeimUV5 +-----END CERTIFICATE----- + +# Issuer: CN=Root CA Generalitat Valenciana O=Generalitat Valenciana OU=PKIGVA +# Subject: CN=Root CA Generalitat Valenciana O=Generalitat Valenciana OU=PKIGVA +# Label: "Root CA Generalitat Valenciana" +# Serial: 994436456 +# MD5 Fingerprint: 2c:8c:17:5e:b1:54:ab:93:17:b5:36:5a:db:d1:c6:f2 +# SHA1 Fingerprint: a0:73:e5:c5:bd:43:61:0d:86:4c:21:13:0a:85:58:57:cc:9c:ea:46 +# SHA256 Fingerprint: 8c:4e:df:d0:43:48:f3:22:96:9e:7e:29:a4:cd:4d:ca:00:46:55:06:1c:16:e1:b0:76:42:2e:f3:42:ad:63:0e +-----BEGIN CERTIFICATE----- +MIIGizCCBXOgAwIBAgIEO0XlaDANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJF +UzEfMB0GA1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJ +R1ZBMScwJQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwHhcN +MDEwNzA2MTYyMjQ3WhcNMjEwNzAxMTUyMjQ3WjBoMQswCQYDVQQGEwJFUzEfMB0G +A1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScw +JQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGKqtXETcvIorKA3Qdyu0togu8M1JAJke+ +WmmmO3I2F0zo37i7L3bhQEZ0ZQKQUgi0/6iMweDHiVYQOTPvaLRfX9ptI6GJXiKj +SgbwJ/BXufjpTjJ3Cj9BZPPrZe52/lSqfR0grvPXdMIKX/UIKFIIzFVd0g/bmoGl +u6GzwZTNVOAydTGRGmKy3nXiz0+J2ZGQD0EbtFpKd71ng+CT516nDOeB0/RSrFOy +A8dEJvt55cs0YFAQexvba9dHq198aMpunUEDEO5rmXteJajCq+TA81yc477OMUxk +Hl6AovWDfgzWyoxVjr7gvkkHD6MkQXpYHYTqWBLI4bft75PelAgxAgMBAAGjggM7 +MIIDNzAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly9vY3NwLnBr +aS5ndmEuZXMwEgYDVR0TAQH/BAgwBgEB/wIBAjCCAjQGA1UdIASCAiswggInMIIC +IwYKKwYBBAG/VQIBADCCAhMwggHoBggrBgEFBQcCAjCCAdoeggHWAEEAdQB0AG8A +cgBpAGQAYQBkACAAZABlACAAQwBlAHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAFIA +YQDtAHoAIABkAGUAIABsAGEAIABHAGUAbgBlAHIAYQBsAGkAdABhAHQAIABWAGEA +bABlAG4AYwBpAGEAbgBhAC4ADQAKAEwAYQAgAEQAZQBjAGwAYQByAGEAYwBpAPMA +bgAgAGQAZQAgAFAAcgDhAGMAdABpAGMAYQBzACAAZABlACAAQwBlAHIAdABpAGYA +aQBjAGEAYwBpAPMAbgAgAHEAdQBlACAAcgBpAGcAZQAgAGUAbAAgAGYAdQBuAGMA +aQBvAG4AYQBtAGkAZQBuAHQAbwAgAGQAZQAgAGwAYQAgAHAAcgBlAHMAZQBuAHQA +ZQAgAEEAdQB0AG8AcgBpAGQAYQBkACAAZABlACAAQwBlAHIAdABpAGYAaQBjAGEA +YwBpAPMAbgAgAHMAZQAgAGUAbgBjAHUAZQBuAHQAcgBhACAAZQBuACAAbABhACAA +ZABpAHIAZQBjAGMAaQDzAG4AIAB3AGUAYgAgAGgAdAB0AHAAOgAvAC8AdwB3AHcA +LgBwAGsAaQAuAGcAdgBhAC4AZQBzAC8AYwBwAHMwJQYIKwYBBQUHAgEWGWh0dHA6 +Ly93d3cucGtpLmd2YS5lcy9jcHMwHQYDVR0OBBYEFHs100DSHHgZZu90ECjcPk+y +eAT8MIGVBgNVHSMEgY0wgYqAFHs100DSHHgZZu90ECjcPk+yeAT8oWykajBoMQsw +CQYDVQQGEwJFUzEfMB0GA1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0G +A1UECxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVu +Y2lhbmGCBDtF5WgwDQYJKoZIhvcNAQEFBQADggEBACRhTvW1yEICKrNcda3Fbcrn +lD+laJWIwVTAEGmiEi8YPyVQqHxK6sYJ2fR1xkDar1CdPaUWu20xxsdzCkj+IHLt +b8zog2EWRpABlUt9jppSCS/2bxzkoXHPjCpaF3ODR00PNvsETUlR4hTJZGH71BTg +9J63NI8KJr2XXPR5OkowGcytT6CYirQxlyric21+eLj4iIlPsSKRZEv1UN4D2+XF +ducTZnV+ZfsBn5OHiJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmC +IoaZM3Fa6hlXPZHNqcCjbgcTpsnt+GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM= +-----END CERTIFICATE----- + +# Issuer: CN=A-Trust-nQual-03 O=A-Trust Ges. f. Sicherheitssysteme im elektr. Datenverkehr GmbH OU=A-Trust-nQual-03 +# Subject: CN=A-Trust-nQual-03 O=A-Trust Ges. f. Sicherheitssysteme im elektr. Datenverkehr GmbH OU=A-Trust-nQual-03 +# Label: "A-Trust-nQual-03" +# Serial: 93214 +# MD5 Fingerprint: 49:63:ae:27:f4:d5:95:3d:d8:db:24:86:b8:9c:07:53 +# SHA1 Fingerprint: d3:c0:63:f2:19:ed:07:3e:34:ad:5d:75:0b:32:76:29:ff:d5:9a:f2 +# SHA256 Fingerprint: 79:3c:bf:45:59:b9:fd:e3:8a:b2:2d:f1:68:69:f6:98:81:ae:14:c4:b0:13:9a:c7:88:a7:8a:1a:fc:ca:02:fb +-----BEGIN CERTIFICATE----- +MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJB +VDFIMEYGA1UECgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBp +bSBlbGVrdHIuIERhdGVudmVya2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5R +dWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5RdWFsLTAzMB4XDTA1MDgxNzIyMDAw +MFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgwRgYDVQQKDD9BLVRy +dXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0ZW52 +ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMM +EEEtVHJ1c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCtPWFuA/OQO8BBC4SAzewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUj +lUC5B3ilJfYKvUWG6Nm9wASOhURh73+nyfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZ +znF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPESU7l0+m0iKsMrmKS1GWH +2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4iHQF63n1 +k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs +2e3Vcuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYD +VR0OBAoECERqlWdVeRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC +AQEAVdRU0VlIXLOThaq/Yy/kgM40ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fG +KOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmrsQd7TZjTXLDR8KdCoLXEjq/+ +8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZdJXDRZslo+S4R +FGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS +mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmE +DNuxUCAKGkq6ahq97BvIxYSazQ== +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Label: "TWCA Root Certification Authority" +# Serial: 1 +# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79 +# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48 +# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44 +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES +MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU +V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz +WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO +LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE +AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH +K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX +RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z +rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx +3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq +hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC +MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls +XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D +lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn +aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ +YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Label: "Security Communication RootCA2" +# Serial: 0 +# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43 +# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74 +# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX +DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy +dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj +YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV +OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr +zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM +VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ +hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO +ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw +awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs +OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF +coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc +okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8 +t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy +1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/ +SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions RootCA 2011" +# Serial: 0 +# MD5 Fingerprint: 73:9f:4c:4b:73:5b:79:e9:fa:ba:1c:ef:6e:cb:d5:c9 +# SHA1 Fingerprint: fe:45:65:9b:79:03:5b:98:a1:61:b5:51:2e:ac:da:58:09:48:22:4d +# SHA256 Fingerprint: bc:10:4f:15:a4:8b:e7:09:dc:a5:42:a7:e1:d4:b9:df:6f:05:45:27:e8:02:ea:a9:2d:59:54:44:25:8a:fe:71 +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix +RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p +YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw +NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK +EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl +cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz +dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ +fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns +bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD +75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP +FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV +HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp +5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu +b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA +A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p +6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7 +dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys +Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI +l7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Label: "Actalis Authentication Root CA" +# Serial: 6271844772424770508 +# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6 +# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac +# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66 +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE +BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w +MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC +SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1 +ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv +UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX +4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9 +KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/ +gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb +rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ +51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F +be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe +KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F +v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn +fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7 +jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz +ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL +e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70 +jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz +WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V +SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j +pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX +X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok +fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R +K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU +ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU +LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT +LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +# Issuer: O=Trustis Limited OU=Trustis FPS Root CA +# Subject: O=Trustis Limited OU=Trustis FPS Root CA +# Label: "Trustis FPS Root CA" +# Serial: 36053640375399034304724988975563710553 +# MD5 Fingerprint: 30:c9:e7:1e:6b:e6:14:eb:65:b2:16:69:20:31:67:4d +# SHA1 Fingerprint: 3b:c0:38:0b:33:c3:f6:a6:0c:86:15:22:93:d9:df:f5:4b:81:c0:04 +# SHA256 Fingerprint: c1:b4:82:99:ab:a5:20:8f:e9:63:0a:ce:55:ca:68:a0:3e:da:5a:51:9c:88:02:a0:d3:a6:73:be:8f:8e:55:7d +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF +MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL +ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx +MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc +MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+ +AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH +iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj +vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA +0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB +OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/ +BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E +FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01 +GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW +zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4 +1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE +f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F +jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN +ZetX2fNXlrtIzYE= +-----END CERTIFICATE----- + +# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing +# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing +# Label: "StartCom Certification Authority" +# Serial: 45 +# MD5 Fingerprint: c9:3b:0d:84:41:fc:a4:76:79:23:08:57:de:10:19:16 +# SHA1 Fingerprint: a3:f1:33:3f:e2:42:bf:cf:c5:d1:4e:8f:39:42:98:40:68:10:d1:a0 +# SHA256 Fingerprint: e1:78:90:ee:09:a3:fb:f4:f4:8b:9c:41:4a:17:d6:37:b7:a5:06:47:e9:bc:75:23:22:72:7f:cc:17:42:a9:11 +-----BEGIN CERTIFICATE----- +MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEW +MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg +Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM3WhcNMzYwOTE3MTk0NjM2WjB9 +MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi +U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh +cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk +pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf +OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C +Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT +Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi +HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM +Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w ++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ +Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 +Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B +26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID +AQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFul +F2mHMMo0aEPQQa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCC +ATgwLgYIKwYBBQUHAgEWImh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5w +ZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL2ludGVybWVk +aWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENvbW1lcmNpYWwgKFN0 +YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0aGUg +c2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93 +d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgG +CWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5fPGFf59Jb2vKXfuM/gTF +wWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWmN3PH/UvS +Ta0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst +0OcNOrg+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNc +pRJvkrKTlMeIFw6Ttn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKl +CcWw0bdT82AUuoVpaiF8H3VhFyAXe2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVF +P0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA2MFrLH9ZXF2RsXAiV+uKa0hK +1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBsHvUwyKMQ5bLm +KhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE +JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ +8dCAWZvLMdibD4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnm +fyWl8kgAwKQB2j8= +-----END CERTIFICATE----- + +# Issuer: CN=StartCom Certification Authority G2 O=StartCom Ltd. +# Subject: CN=StartCom Certification Authority G2 O=StartCom Ltd. +# Label: "StartCom Certification Authority G2" +# Serial: 59 +# MD5 Fingerprint: 78:4b:fb:9e:64:82:0a:d3:b8:4c:62:f3:64:f2:90:64 +# SHA1 Fingerprint: 31:f1:fd:68:22:63:20:ee:c6:3b:3f:9d:ea:4a:3e:53:7c:7c:39:17 +# SHA256 Fingerprint: c7:ba:65:67:de:93:a7:98:ae:1f:aa:79:1e:71:2d:37:8f:ae:1f:93:c4:39:7f:ea:44:1b:b7:cb:e6:fd:59:95 +-----BEGIN CERTIFICATE----- +MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEW +MBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkgRzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1 +OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoG +A1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgRzIwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8Oo1XJ +JZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsD +vfOpL9HG4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnoo +D/Uefyf3lLE3PbfHkffiAez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/ +Q0kGi4xDuFby2X8hQxfqp0iVAXV16iulQ5XqFYSdCI0mblWbq9zSOdIxHWDirMxW +RST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbsO+wmETRIjfaAKxojAuuK +HDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8HvKTlXcxN +nw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM +0D4LnMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/i +UUjXuG+v+E5+M5iSFGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9 +Ha90OrInwMEePnWjFqmveiJdnxMaz6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHg +TuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJKoZIhvcNAQEL +BQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K +2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfX +UfEpY9Z1zRbkJ4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl +6/2o1PXWT6RbdejF0mCy2wl+JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK +9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG/+gyRr61M3Z3qAFdlsHB1b6uJcDJ +HgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTcnIhT76IxW1hPkWLI +wpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/XldblhY +XzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5l +IxKVCCIcl85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoo +hdVddLHRDiBYmxOlsGOm7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulr +so8uBtjRkcfGEvRM/TAXw8HaOFvjqermobp573PYtlNXLfbQ4ddI +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 2 Root CA" +# Serial: 2 +# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29 +# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99 +# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48 +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr +6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV +L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91 +1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx +MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ +QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB +arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr +Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi +FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS +P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN +9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz +uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h +9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t +OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo ++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7 +KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2 +DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us +H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ +I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7 +5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h +3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz +Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA= +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 3 Root CA" +# Serial: 2 +# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec +# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57 +# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y +ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E +N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9 +tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX +0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c +/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X +KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY +zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS +O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D +34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP +K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3 +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv +Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj +QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS +IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2 +HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa +O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv +033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u +dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE +kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41 +3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD +u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq +4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc= +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 3" +# Serial: 1 +# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef +# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1 +# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN +8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/ +RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4 +hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5 +ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM +EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1 +A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy +WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ +1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30 +6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT +91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p +TpPDpFQUWw== +-----END CERTIFICATE----- + +# Issuer: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus +# Subject: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus +# Label: "EE Certification Centre Root CA" +# Serial: 112324828676200291871926431888494945866 +# MD5 Fingerprint: 43:5e:88:d4:7d:1a:4a:7e:fd:84:2e:52:eb:01:d4:6f +# SHA1 Fingerprint: c9:a8:b9:e7:55:80:5e:58:e3:53:77:a7:25:eb:af:c3:7b:27:cc:d7 +# SHA256 Fingerprint: 3e:84:ba:43:42:90:85:16:e7:75:73:c0:99:2f:09:79:ca:08:4e:46:85:68:1f:f1:95:cc:ba:8a:22:9b:8a:76 +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1 +MQswCQYDVQQGEwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1 +czEoMCYGA1UEAwwfRUUgQ2VydGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYG +CSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIwMTAxMDMwMTAxMDMwWhgPMjAzMDEy +MTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlBUyBTZXJ0aWZpdHNl +ZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRyZSBS +b290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUy +euuOF0+W2Ap7kaJjbMeMTC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvO +bntl8jixwKIy72KyaOBhU8E2lf/slLo2rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIw +WFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw93X2PaRka9ZP585ArQ/d +MtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtNP2MbRMNE +1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/ +zQas8fElyalL1BSZMEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYB +BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEF +BQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+RjxY6hUFaTlrg4wCQiZrxTFGGV +v9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqMlIpPnTX/dqQG +E5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIW +iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v +GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0= +-----END CERTIFICATE----- + +# Issuer: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Aralık 2007 +# Subject: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Aralık 2007 +# Label: "TURKTRUST Certificate Services Provider Root 2007" +# Serial: 1 +# MD5 Fingerprint: 2b:70:20:56:86:82:a0:18:c8:07:53:12:28:70:21:72 +# SHA1 Fingerprint: f1:7f:6f:b6:31:dc:99:e3:a3:c8:7f:fe:1c:f1:81:10:88:d9:60:33 +# SHA256 Fingerprint: 97:8c:d9:66:f2:fa:a0:7b:a7:aa:95:00:d9:c0:2e:9d:77:f2:cd:ad:a6:ad:6b:a7:4a:f4:b9:1c:66:59:3c:50 +-----BEGIN CERTIFICATE----- +MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOc +UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx +c8SxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xS +S1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kg +SGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4XDTA3MTIyNTE4Mzcx +OVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxla3Ry +b25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMC +VFIxDzANBgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDE +sGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7F +ni4gKGMpIEFyYWzEsWsgMjAwNzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9NYvDdE3ePYakqtdTyuTFY +KTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQvKUmi8wUG ++7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveG +HtyaKhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6P +IzdezKKqdfcYbwnTrqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M +733WB2+Y8a+xwXrXgTW4qhe04MsCAwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHk +Yb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G +CSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/sPx+EnWVUXKgW +AkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I +aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5 +mxRZNTZPz/OOXl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsa +XRik7r4EW5nVcV9VZWRi1aKbBFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZ +qxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAKpoRq0Tl9 +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 2009" +# Serial: 623603 +# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f +# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0 +# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1 +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha +ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM +HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 +UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 +tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R +ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM +lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp +/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G +A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy +MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl +cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js +L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni +acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K +zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 +PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y +Johw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 EV 2009" +# Serial: 623604 +# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6 +# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83 +# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81 +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw +NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV +BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn +ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0 +3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z +qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR +p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8 +HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw +ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea +HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw +Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh +c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E +RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt +dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku +Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp +3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF +CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na +xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX +KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +# Issuer: CN=Autoridad de Certificacion Raiz del Estado Venezolano O=Sistema Nacional de Certificacion Electronica OU=Superintendencia de Servicios de Certificacion Electronica +# Subject: CN=PSCProcert O=Sistema Nacional de Certificacion Electronica OU=Proveedor de Certificados PROCERT +# Label: "PSCProcert" +# Serial: 11 +# MD5 Fingerprint: e6:24:e9:12:01:ae:0c:de:8e:85:c4:ce:a3:12:dd:ec +# SHA1 Fingerprint: 70:c1:8d:74:b4:28:81:0a:e4:fd:a5:75:d7:01:9f:99:b0:3d:50:74 +# SHA256 Fingerprint: 3c:fc:3c:14:d1:f6:84:ff:17:e3:8c:43:ca:44:0c:00:b9:67:ec:93:3e:8b:fe:06:4c:a1:d7:2c:90:f2:ad:b0 +-----BEGIN CERTIFICATE----- +MIIJhjCCB26gAwIBAgIBCzANBgkqhkiG9w0BAQsFADCCAR4xPjA8BgNVBAMTNUF1 +dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9s +YW5vMQswCQYDVQQGEwJWRTEQMA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlz +dHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0 +aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBlcmludGVuZGVuY2lh +IGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUwIwYJ +KoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyODE2NTEw +MFoXDTIwMTIyNTIzNTk1OVowgdExJjAkBgkqhkiG9w0BCQEWF2NvbnRhY3RvQHBy +b2NlcnQubmV0LnZlMQ8wDQYDVQQHEwZDaGFjYW8xEDAOBgNVBAgTB01pcmFuZGEx +KjAoBgNVBAsTIVByb3ZlZWRvciBkZSBDZXJ0aWZpY2Fkb3MgUFJPQ0VSVDE2MDQG +A1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9u +aWNhMQswCQYDVQQGEwJWRTETMBEGA1UEAxMKUFNDUHJvY2VydDCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBANW39KOUM6FGqVVhSQ2oh3NekS1wwQYalNo9 +7BVCwfWMrmoX8Yqt/ICV6oNEolt6Vc5Pp6XVurgfoCfAUFM+jbnADrgV3NZs+J74 +BCXfgI8Qhd19L3uA3VcAZCP4bsm+lU/hdezgfl6VzbHvvnpC2Mks0+saGiKLt38G +ieU89RLAu9MLmV+QfI4tL3czkkohRqipCKzx9hEC2ZUWno0vluYC3XXCFCpa1sl9 +JcLB/KpnheLsvtF8PPqv1W7/U0HU9TI4seJfxPmOEO8GqQKJ/+MMbpfg353bIdD0 +PghpbNjU5Db4g7ayNo+c7zo3Fn2/omnXO1ty0K+qP1xmk6wKImG20qCZyFSTXai2 +0b1dCl53lKItwIKOvMoDKjSuc/HUtQy9vmebVOvh+qBa7Dh+PsHMosdEMXXqP+UH +0quhJZb25uSgXTcYOWEAM11G1ADEtMo88aKjPvM6/2kwLkDd9p+cJsmWN63nOaK/ +6mnbVSKVUyqUtd+tFjiBdWbjxywbk5yqjKPK2Ww8F22c3HxT4CAnQzb5EuE8XL1m +v6JpIzi4mWCZDlZTOpx+FIywBm/xhnaQr/2v/pDGj59/i5IjnOcVdo/Vi5QTcmn7 +K2FjiO/mpF7moxdqWEfLcU8UC17IAggmosvpr2uKGcfLFFb14dq12fy/czja+eev +bqQ34gcnAgMBAAGjggMXMIIDEzASBgNVHRMBAf8ECDAGAQH/AgEBMDcGA1UdEgQw +MC6CD3N1c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAzNi0w +MB0GA1UdDgQWBBRBDxk4qpl/Qguk1yeYVKIXTC1RVDCCAVAGA1UdIwSCAUcwggFD +gBStuyIdxuDSAaj9dlBSk+2YwU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0 +b3JpZGFkIGRlIENlcnRpZmljYWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xh +bm8xCzAJBgNVBAYTAlZFMRAwDgYDVQQHEwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0 +cml0byBDYXBpdGFsMTYwNAYDVQQKEy1TaXN0ZW1hIE5hY2lvbmFsIGRlIENlcnRp +ZmljYWNpb24gRWxlY3Ryb25pY2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5kZW5jaWEg +ZGUgU2VydmljaW9zIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkq +hkiG9w0BCQEWFmFjcmFpekBzdXNjZXJ0ZS5nb2IudmWCAQowDgYDVR0PAQH/BAQD +AgEGME0GA1UdEQRGMESCDnByb2NlcnQubmV0LnZloBUGBWCGXgIBoAwMClBTQy0w +MDAwMDKgGwYFYIZeAgKgEgwQUklGLUotMzE2MzUzNzMtNzB2BgNVHR8EbzBtMEag +RKBChkBodHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9sY3IvQ0VSVElGSUNBRE8t +UkFJWi1TSEEzODRDUkxERVIuY3JsMCOgIaAfhh1sZGFwOi8vYWNyYWl6LnN1c2Nl +cnRlLmdvYi52ZTA3BggrBgEFBQcBAQQrMCkwJwYIKwYBBQUHMAGGG2h0dHA6Ly9v +Y3NwLnN1c2NlcnRlLmdvYi52ZTBBBgNVHSAEOjA4MDYGBmCGXgMBAjAsMCoGCCsG +AQUFBwIBFh5odHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9kcGMwDQYJKoZIhvcN +AQELBQADggIBACtZ6yKZu4SqT96QxtGGcSOeSwORR3C7wJJg7ODU523G0+1ng3dS +1fLld6c2suNUvtm7CpsR72H0xpkzmfWvADmNg7+mvTV+LFwxNG9s2/NkAZiqlCxB +3RWGymspThbASfzXg0gTB1GEMVKIu4YXx2sviiCtxQuPcD4quxtxj7mkoP3Yldmv +Wb8lK5jpY5MvYB7Eqvh39YtsL+1+LrVPQA3uvFd359m21D+VJzog1eWuq2w1n8Gh +HVnchIHuTQfiSLaeS5UtQbHh6N5+LwUeaO6/u5BlOsju6rEYNxxik6SgMexxbJHm +pHmJWhSnFFAFTKQAVzAswbVhltw+HoSvOULP5dAssSS830DD7X9jSr3hTxJkhpXz +sOfIt+FTvZLm8wyWuevo5pLtp4EJFAv8lXrPj9Y0TzYS3F7RNHXGRoAvlQSMx4bE +qCaJqD8Zm4G7UaRKhqsLEQ+xrmNTbSjq3TNWOByyrYDT13K9mmyZY+gAu0F2Bbdb +mRiKw7gSXFbPVgx96OLP7bx0R/vu0xdOIk9W/1DzLuY5poLWccret9W6aAjtmcz9 +opLLabid+Qqkpj5PkygqYWwHJgD/ll9ohri4zspV4KuxPX+Y1zMOWj3YeMLEYC/H +YvBhkdI4sPaeVdtAgAUSM84dkpvRabP/v/GSCmE1P93+hvS84Bpxs2Km +-----END CERTIFICATE----- + +# Issuer: CN=China Internet Network Information Center EV Certificates Root O=China Internet Network Information Center +# Subject: CN=China Internet Network Information Center EV Certificates Root O=China Internet Network Information Center +# Label: "China Internet Network Information Center EV Certificates Root" +# Serial: 1218379777 +# MD5 Fingerprint: 55:5d:63:00:97:bd:6a:97:f5:67:ab:4b:fb:6e:63:15 +# SHA1 Fingerprint: 4f:99:aa:93:fb:2b:d1:37:26:a1:99:4a:ce:7f:f0:05:f2:93:5d:1e +# SHA256 Fingerprint: 1c:01:c6:f4:db:b2:fe:fc:22:55:8b:2b:ca:32:56:3f:49:84:4a:cf:c3:2b:7b:e4:b0:ff:59:9f:9e:8c:7a:f7 +-----BEGIN CERTIFICATE----- +MIID9zCCAt+gAwIBAgIESJ8AATANBgkqhkiG9w0BAQUFADCBijELMAkGA1UEBhMC +Q04xMjAwBgNVBAoMKUNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24g +Q2VudGVyMUcwRQYDVQQDDD5DaGluYSBJbnRlcm5ldCBOZXR3b3JrIEluZm9ybWF0 +aW9uIENlbnRlciBFViBDZXJ0aWZpY2F0ZXMgUm9vdDAeFw0xMDA4MzEwNzExMjVa +Fw0zMDA4MzEwNzExMjVaMIGKMQswCQYDVQQGEwJDTjEyMDAGA1UECgwpQ2hpbmEg +SW50ZXJuZXQgTmV0d29yayBJbmZvcm1hdGlvbiBDZW50ZXIxRzBFBgNVBAMMPkNo +aW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyIEVWIENlcnRp +ZmljYXRlcyBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAm35z +7r07eKpkQ0H1UN+U8i6yjUqORlTSIRLIOTJCBumD1Z9S7eVnAztUwYyZmczpwA// +DdmEEbK40ctb3B75aDFk4Zv6dOtouSCV98YPjUesWgbdYavi7NifFy2cyjw1l1Vx +zUOFsUcW9SxTgHbP0wBkvUCZ3czY28Sf1hNfQYOL+Q2HklY0bBoQCxfVWhyXWIQ8 +hBouXJE0bhlffxdpxWXvayHG1VA6v2G5BY3vbzQ6sm8UY78WO5upKv23KzhmBsUs +4qpnHkWnjQRmQvaPK++IIGmPMowUc9orhpFjIpryp9vOiYurXccUwVswah+xt54u +gQEC7c+WXmPbqOY4twIDAQABo2MwYTAfBgNVHSMEGDAWgBR8cks5x8DbYqVPm6oY +NJKiyoOCWTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4E +FgQUfHJLOcfA22KlT5uqGDSSosqDglkwDQYJKoZIhvcNAQEFBQADggEBACrDx0M3 +j92tpLIM7twUbY8opJhJywyA6vPtI2Z1fcXTIWd50XPFtQO3WKwMVC/GVhMPMdoG +52U7HW8228gd+f2ABsqjPWYWqJ1MFn3AlUa1UeTiH9fqBk1jjZaM7+czV0I664zB +echNdn3e9rG3geCg+aF4RhcaVpjwTj2rHO3sOdwHSPdj/gauwqRcalsyiMXHM4Ws +ZkJHwlgkmeHlPuV1LI5D1l08eB6olYIpUNHRFrrvwb562bTYzB5MRuF3sTGrvSrI +zo9uoV1/A3U05K2JRVRevq4opbs/eHnrc7MKDf2+yfdWrPa37S+bISnHOLaVxATy +wy39FCqQmbkHzJ8= +-----END CERTIFICATE----- + +# Issuer: CN=Swisscom Root CA 2 O=Swisscom OU=Digital Certificate Services +# Subject: CN=Swisscom Root CA 2 O=Swisscom OU=Digital Certificate Services +# Label: "Swisscom Root CA 2" +# Serial: 40698052477090394928831521023204026294 +# MD5 Fingerprint: 5b:04:69:ec:a5:83:94:63:18:a7:86:d0:e4:f2:6e:19 +# SHA1 Fingerprint: 77:47:4f:c6:30:e4:0f:4c:47:64:3f:84:ba:b8:c6:95:4a:8a:41:ec +# SHA256 Fingerprint: f0:9b:12:2c:71:14:f4:a0:9b:d4:ea:4f:4a:99:d5:58:b4:6e:4c:25:cd:81:14:0d:29:c0:56:13:91:4c:38:41 +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQHp4o6Ejy5e/DfEoeWhhntjANBgkqhkiG9w0BAQsFADBk +MQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0 +YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3Qg +Q0EgMjAeFw0xMTA2MjQwODM4MTRaFw0zMTA2MjUwNzM4MTRaMGQxCzAJBgNVBAYT +AmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGlnaXRhbCBDZXJ0aWZp +Y2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAyMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAlUJOhJ1R5tMJ6HJaI2nbeHCOFvEr +jw0DzpPMLgAIe6szjPTpQOYXTKueuEcUMncy3SgM3hhLX3af+Dk7/E6J2HzFZ++r +0rk0X2s682Q2zsKwzxNoysjL67XiPS4h3+os1OD5cJZM/2pYmLcX5BtS5X4HAB1f +2uY+lQS3aYg5oUFgJWFLlTloYhyxCwWJwDaCFCE/rtuh/bxvHGCGtlOUSbkrRsVP +ACu/obvLP+DHVxxX6NZp+MEkUp2IVd3Chy50I9AU/SpHWrumnf2U5NGKpV+GY3aF +y6//SSj8gO1MedK75MDvAe5QQQg1I3ArqRa0jG6F6bYRzzHdUyYb3y1aSgJA/MTA +tukxGggo5WDDH8SQjhBiYEQN7Aq+VRhxLKX0srwVYv8c474d2h5Xszx+zYIdkeNL +6yxSNLCK/RJOlrDrcH+eOfdmQrGrrFLadkBXeyq96G4DsguAhYidDMfCd7Camlf0 +uPoTXGiTOmekl9AbmbeGMktg2M7v0Ax/lZ9vh0+Hio5fCHyqW/xavqGRn1V9TrAL +acywlKinh/LTSlDcX3KwFnUey7QYYpqwpzmqm59m2I2mbJYV4+by+PGDYmy7Velh +k6M99bFXi08jsJvllGov34zflVEpYKELKeRcVVi3qPyZ7iVNTA6z00yPhOgpD/0Q +VAKFyPnlw4vP5w8CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0hBBYw +FDASBgdghXQBUwIBBgdghXQBUwIBMBIGA1UdEwEB/wQIMAYBAf8CAQcwHQYDVR0O +BBYEFE0mICKJS9PVpAqhb97iEoHF8TwuMB8GA1UdIwQYMBaAFE0mICKJS9PVpAqh +b97iEoHF8TwuMA0GCSqGSIb3DQEBCwUAA4ICAQAyCrKkG8t9voJXiblqf/P0wS4R +fbgZPnm3qKhyN2abGu2sEzsOv2LwnN+ee6FTSA5BesogpxcbtnjsQJHzQq0Qw1zv +/2BZf82Fo4s9SBwlAjxnffUy6S8w5X2lejjQ82YqZh6NM4OKb3xuqFp1mrjX2lhI +REeoTPpMSQpKwhI3qEAMw8jh0FcNlzKVxzqfl9NX+Ave5XLzo9v/tdhZsnPdTSpx +srpJ9csc1fV5yJmz/MFMdOO0vSk3FQQoHt5FRnDsr7p4DooqzgB53MBfGWcsa0vv +aGgLQ+OswWIJ76bdZWGgr4RVSJFSHMYlkSrQwSIjYVmvRRGFHQEkNI/Ps/8XciAT +woCqISxxOQ7Qj1zB09GOInJGTB2Wrk9xseEFKZZZ9LuedT3PDTcNYtsmjGOpI99n +Bjx8Oto0QuFmtEYE3saWmA9LSHokMnWRn6z3aOkquVVlzl1h0ydw2Df+n7mvoC5W +t6NlUe07qxS/TFED6F+KBZvuim6c779o+sjaC+NCydAXFJy3SuCvkychVSa1ZC+N +8f+mQAWFBVzKBxlcCxMoTFh/wqXvRdpg065lYZ1Tg3TCrvJcwhbtkj6EPnNgiLx2 +9CzP0H1907he0ZESEOnN3col49XtmS++dYFLJPlFRpTJKSFTnCZFqhMX5OfNeOI5 +wSsSnqaeG8XmDtkx2Q== +-----END CERTIFICATE----- + +# Issuer: CN=Swisscom Root EV CA 2 O=Swisscom OU=Digital Certificate Services +# Subject: CN=Swisscom Root EV CA 2 O=Swisscom OU=Digital Certificate Services +# Label: "Swisscom Root EV CA 2" +# Serial: 322973295377129385374608406479535262296 +# MD5 Fingerprint: 7b:30:34:9f:dd:0a:4b:6b:35:ca:31:51:28:5d:ae:ec +# SHA1 Fingerprint: e7:a1:90:29:d3:d5:52:dc:0d:0f:c6:92:d3:ea:88:0d:15:2e:1a:6b +# SHA256 Fingerprint: d9:5f:ea:3c:a4:ee:dc:e7:4c:d7:6e:75:fc:6d:1f:f6:2c:44:1f:0f:a8:bc:77:f0:34:b1:9e:5d:b2:58:01:5d +-----BEGIN CERTIFICATE----- +MIIF4DCCA8igAwIBAgIRAPL6ZOJ0Y9ON/RAdBB92ylgwDQYJKoZIhvcNAQELBQAw +ZzELMAkGA1UEBhMCY2gxETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxEaWdp +dGFsIENlcnRpZmljYXRlIFNlcnZpY2VzMR4wHAYDVQQDExVTd2lzc2NvbSBSb290 +IEVWIENBIDIwHhcNMTEwNjI0MDk0NTA4WhcNMzEwNjI1MDg0NTA4WjBnMQswCQYD +VQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2Vy +dGlmaWNhdGUgU2VydmljZXMxHjAcBgNVBAMTFVN3aXNzY29tIFJvb3QgRVYgQ0Eg +MjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMT3HS9X6lds93BdY7Bx +UglgRCgzo3pOCvrY6myLURYaVa5UJsTMRQdBTxB5f3HSek4/OE6zAMaVylvNwSqD +1ycfMQ4jFrclyxy0uYAyXhqdk/HoPGAsp15XGVhRXrwsVgu42O+LgrQ8uMIkqBPH +oCE2G3pXKSinLr9xJZDzRINpUKTk4RtiGZQJo/PDvO/0vezbE53PnUgJUmfANykR +HvvSEaeFGHR55E+FFOtSN+KxRdjMDUN/rhPSays/p8LiqG12W0OfvrSdsyaGOx9/ +5fLoZigWJdBLlzin5M8J0TbDC77aO0RYjb7xnglrPvMyxyuHxuxenPaHZa0zKcQv +idm5y8kDnftslFGXEBuGCxobP/YCfnvUxVFkKJ3106yDgYjTdLRZncHrYTNaRdHL +OdAGalNgHa/2+2m8atwBz735j9m9W8E6X47aD0upm50qKGsaCnw8qyIL5XctcfaC +NYGu+HuB5ur+rPQam3Rc6I8k9l2dRsQs0h4rIWqDJ2dVSqTjyDKXZpBy2uPUZC5f +46Fq9mDU5zXNysRojddxyNMkM3OxbPlq4SjbX8Y96L5V5jcb7STZDxmPX2MYWFCB +UWVv8p9+agTnNCRxunZLWB4ZvRVgRaoMEkABnRDixzgHcgplwLa7JSnaFp6LNYth +7eVxV4O1PHGf40+/fh6Bn0GXAgMBAAGjgYYwgYMwDgYDVR0PAQH/BAQDAgGGMB0G +A1UdIQQWMBQwEgYHYIV0AVMCAgYHYIV0AVMCAjASBgNVHRMBAf8ECDAGAQH/AgED +MB0GA1UdDgQWBBRF2aWBbj2ITY1x0kbBbkUe88SAnTAfBgNVHSMEGDAWgBRF2aWB +bj2ITY1x0kbBbkUe88SAnTANBgkqhkiG9w0BAQsFAAOCAgEAlDpzBp9SSzBc1P6x +XCX5145v9Ydkn+0UjrgEjihLj6p7jjm02Vj2e6E1CqGdivdj5eu9OYLU43otb98T +PLr+flaYC/NUn81ETm484T4VvwYmneTwkLbUwp4wLh/vx3rEUMfqe9pQy3omywC0 +Wqu1kx+AiYQElY2NfwmTv9SoqORjbdlk5LgpWgi/UOGED1V7XwgiG/W9mR4U9s70 +WBCCswo9GcG/W6uqmdjyMb3lOGbcWAXH7WMaLgqXfIeTK7KK4/HsGOV1timH59yL +Gn602MnTihdsfSlEvoqq9X46Lmgxk7lq2prg2+kupYTNHAq4Sgj5nPFhJpiTt3tm +7JFe3VE/23MPrQRYCd0EApUKPtN236YQHoA96M2kZNEzx5LH4k5E4wnJTsJdhw4S +nr8PyQUQ3nqjsTzyP6WqJ3mtMX0f/fwZacXduT98zca0wjAefm6S139hdlqP65VN +vBFuIXxZN5nQBrz5Bm0yFqXZaajh3DyAHmBR3NdUIR7KYndP+tiPsys6DXhyyWhB +WkdKwqPrGtcKqzwyVcgKEZzfdNbwQBUdyLmPtTbFr/giuMod89a2GQ+fYWVq6nTI +fI/DT11lgh/ZDYnadXL77/FHZxOzyNEZiCcmmpl5fx7kLD977vHeTYuWl8PVP3wb +I+2ksx0WckNLIOFZfsLorSa/ovc= +-----END CERTIFICATE----- + +# Issuer: CN=CA Disig Root R1 O=Disig a.s. +# Subject: CN=CA Disig Root R1 O=Disig a.s. +# Label: "CA Disig Root R1" +# Serial: 14052245610670616104 +# MD5 Fingerprint: be:ec:11:93:9a:f5:69:21:bc:d7:c1:c0:67:89:cc:2a +# SHA1 Fingerprint: 8e:1c:74:f8:a6:20:b9:e5:8a:f4:61:fa:ec:2b:47:56:51:1a:52:c6 +# SHA256 Fingerprint: f9:6f:23:f4:c3:e7:9c:07:7a:46:98:8d:5a:f5:90:06:76:a0:f0:39:cb:64:5d:d1:75:49:b2:16:c8:24:40:ce +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQy +MDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjEw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy3QRk +D2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/o +OI7bm+V8u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3A +fQ+lekLZWnDZv6fXARz2m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJe +IgpFy4QxTaz+29FHuvlglzmxZcfe+5nkCiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8n +oc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTaYVKvJrT1cU/J19IG32PK +/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6vpmumwKj +rckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD +3AjLLhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE +7cderVC6xkGbrPAXZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkC +yC2fg69naQanMVXVz0tv/wQFx1isXxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLd +qvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ04IwDQYJKoZI +hvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR +xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaA +SfX8MPWbTx9BLxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXo +HqJPYNcHKfyyo6SdbhWSVhlMCrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpB +emOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5GfbVSUZP/3oNn6z4eGBrxEWi1CXYBmC +AMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85YmLLW1AL14FABZyb +7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKSds+x +DzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvk +F7mGnjixlAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqF +a3qdnom2piiZk4hA9z7NUaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsT +Q6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJa7+h89n07eLw4+1knj0vllJPgFOL +-----END CERTIFICATE----- + +# Issuer: CN=CA Disig Root R2 O=Disig a.s. +# Subject: CN=CA Disig Root R2 O=Disig a.s. +# Label: "CA Disig Root R2" +# Serial: 10572350602393338211 +# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03 +# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71 +# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03 +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy +MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe +NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH +PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I +x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe +QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR +yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO +QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912 +H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ +QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD +i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs +nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1 +rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI +hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf +GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb +lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka ++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal +TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i +nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3 +gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr +G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os +zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x +L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Label: "ACCVRAIZ1" +# Serial: 6828503384748696800 +# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02 +# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17 +# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13 +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE +AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw +CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ +BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND +VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb +qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY +HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo +G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA +lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr +IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/ +0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH +k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47 +4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO +m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa +cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl +uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI +KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls +ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG +AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT +VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG +CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA +cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA +QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA +7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA +cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA +QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA +czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu +aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt +aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud +DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF +BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp +D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU +JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m +AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD +vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms +tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH +7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA +h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF +d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H +pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Label: "TWCA Global Root CA" +# Serial: 3262 +# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96 +# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65 +# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx +EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT +VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5 +NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT +B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF +10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz +0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh +MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH +zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc +46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2 +yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi +laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP +oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA +BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE +qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm +4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL +1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF +H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo +RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+ +nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh +15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW +6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW +nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j +wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz +aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy +KwbQBM0= +-----END CERTIFICATE----- + +# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Label: "TeliaSonera Root CA v1" +# Serial: 199041966741090107964904287217786801558 +# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c +# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37 +# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89 +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw +NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv +b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD +VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2 +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F +VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1 +7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X +Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+ +/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs +81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm +dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe +Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu +sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4 +pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs +slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ +arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG +9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl +dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj +TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed +Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7 +Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI +OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7 +vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW +t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn +HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx +SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +# Issuer: CN=E-Tugra Certification Authority O=E-Tuğra EBG Bilişim Teknolojileri ve Hizmetleri A.Ş. OU=E-Tugra Sertifikasyon Merkezi +# Subject: CN=E-Tugra Certification Authority O=E-Tuğra EBG Bilişim Teknolojileri ve Hizmetleri A.Ş. OU=E-Tugra Sertifikasyon Merkezi +# Label: "E-Tugra Certification Authority" +# Serial: 7667447206703254355 +# MD5 Fingerprint: b8:a1:03:63:b0:bd:21:71:70:8a:6f:13:3a:bb:79:49 +# SHA1 Fingerprint: 51:c6:e7:08:49:06:6e:f3:92:d4:5c:a0:0d:6d:a3:62:8f:c3:52:39 +# SHA256 Fingerprint: b0:bf:d5:2b:b0:d7:d9:bd:92:bf:5d:4d:c1:3d:a2:55:c0:2c:54:2f:37:83:65:ea:89:39:11:f5:5e:55:f2:3c +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV +BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC +aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV +BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1 +Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz +MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+ +BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp +em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY +B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH +D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF +Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo +q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D +k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH +fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut +dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM +ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8 +zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX +U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6 +Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5 +XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF +Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR +HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY +GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c +77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3 ++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK +vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6 +FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl +yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P +AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD +y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d +NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 2" +# Serial: 1 +# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a +# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9 +# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52 +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd +AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC +FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi +1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq +jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ +wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ +WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy +NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC +uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw +IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 +g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP +BSeOE6Fuwg== +-----END CERTIFICATE----- + +# Issuer: CN=Atos TrustedRoot 2011 O=Atos +# Subject: CN=Atos TrustedRoot 2011 O=Atos +# Label: "Atos TrustedRoot 2011" +# Serial: 6643877497813316402 +# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56 +# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21 +# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE +AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG +EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM +FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC +REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp +Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM +VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+ +SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ +4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L +cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi +eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG +A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3 +DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j +vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP +DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc +maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D +lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv +KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 1 G3" +# Serial: 687049649626669250736271037606554624078720034195 +# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab +# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67 +# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00 +MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV +wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe +rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341 +68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh +4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp +UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o +abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc +3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G +KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt +hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO +Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt +zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD +ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2 +cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN +qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5 +YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv +b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2 +8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k +NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj +ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp +q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt +nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2 G3" +# Serial: 390156079458959257446133169266079962026824725800 +# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06 +# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36 +# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3 G3" +# Serial: 268090761170461462463995952157327242137089239581 +# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7 +# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d +# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00 +MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR +/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu +FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR +U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c +ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR +FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k +A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw +eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl +sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp +VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q +A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ +ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD +ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI +FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv +oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg +u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP +0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf +3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl +8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+ +DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN +PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ +ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G2" +# Serial: 15385348160840213938643033620894905419 +# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d +# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f +# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85 +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA +n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc +biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp +EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA +bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu +YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW +BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI +QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I +0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni +lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 +B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv +ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G3" +# Serial: 15459312981008553731928384953135426796 +# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb +# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89 +# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2 +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg +RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf +Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q +RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD +AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY +JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv +6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G2" +# Serial: 4293743540046975378534879503202253541 +# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44 +# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4 +# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G3" +# Serial: 7089244469030293291760083333884364146 +# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca +# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e +# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0 +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe +Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw +EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x +IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG +fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO +Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd +BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx +AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ +oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 +sycX +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Trusted Root G4" +# Serial: 7451500558977370777930084869016614236 +# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49 +# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4 +# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88 +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ +-----END CERTIFICATE----- + +# Issuer: CN=Certification Authority of WoSign O=WoSign CA Limited +# Subject: CN=Certification Authority of WoSign O=WoSign CA Limited +# Label: "WoSign" +# Serial: 125491772294754854453622855443212256657 +# MD5 Fingerprint: a1:f2:f9:b5:d2:c8:7a:74:b8:f3:05:f1:d7:e1:84:8d +# SHA1 Fingerprint: b9:42:94:bf:91:ea:8f:b6:4b:e6:10:97:c7:fb:00:13:59:b6:76:cb +# SHA256 Fingerprint: 4b:22:d5:a6:ae:c9:9f:3c:db:79:aa:5e:c0:68:38:47:9c:d5:ec:ba:71:64:f7:f2:2d:c1:d6:5f:63:d8:57:08 +-----BEGIN CERTIFICATE----- +MIIFdjCCA16gAwIBAgIQXmjWEXGUY1BWAGjzPsnFkTANBgkqhkiG9w0BAQUFADBV +MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxKjAoBgNV +BAMTIUNlcnRpZmljYXRpb24gQXV0aG9yaXR5IG9mIFdvU2lnbjAeFw0wOTA4MDgw +MTAwMDFaFw0zOTA4MDgwMTAwMDFaMFUxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFX +b1NpZ24gQ0EgTGltaXRlZDEqMCgGA1UEAxMhQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgb2YgV29TaWduMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAvcqN +rLiRFVaXe2tcesLea9mhsMMQI/qnobLMMfo+2aYpbxY94Gv4uEBf2zmoAHqLoE1U +fcIiePyOCbiohdfMlZdLdNiefvAA5A6JrkkoRBoQmTIPJYhTpA2zDxIIFgsDcScc +f+Hb0v1naMQFXQoOXXDX2JegvFNBmpGN9J42Znp+VsGQX+axaCA2pIwkLCxHC1l2 +ZjC1vt7tj/id07sBMOby8w7gLJKA84X5KIq0VC6a7fd2/BVoFutKbOsuEo/Uz/4M +x1wdC34FMr5esAkqQtXJTpCzWQ27en7N1QhatH/YHGkR+ScPewavVIMYe+HdVHpR +aG53/Ma/UkpmRqGyZxq7o093oL5d//xWC0Nyd5DKnvnyOfUNqfTq1+ezEC8wQjch +zDBwyYaYD8xYTYO7feUapTeNtqwylwA6Y3EkHp43xP901DfA4v6IRmAR3Qg/UDar +uHqklWJqbrDKaiFaafPz+x1wOZXzp26mgYmhiMU7ccqjUu6Du/2gd/Tkb+dC221K +mYo0SLwX3OSACCK28jHAPwQ+658geda4BmRkAjHXqc1S+4RFaQkAKtxVi8QGRkvA +Sh0JWzko/amrzgD5LkhLJuYwTKVYyrREgk/nkR4zw7CT/xH8gdLKH3Ep3XZPkiWv +HYG3Dy+MwwbMLyejSuQOmbp8HkUff6oZRZb9/D0CAwEAAaNCMEAwDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOFmzw7R8bNLtwYgFP6H +EtX2/vs+MA0GCSqGSIb3DQEBBQUAA4ICAQCoy3JAsnbBfnv8rWTjMnvMPLZdRtP1 +LOJwXcgu2AZ9mNELIaCJWSQBnfmvCX0KI4I01fx8cpm5o9dU9OpScA7F9dY74ToJ +MuYhOZO9sxXqT2r09Ys/L3yNWC7F4TmgPsc9SnOeQHrAK2GpZ8nzJLmzbVUsWh2e +JXLOC62qx1ViC777Y7NhRCOjy+EaDveaBk3e1CNOIZZbOVtXHS9dCF4Jef98l7VN +g64N1uajeeAz0JmWAjCnPv/So0M/BVoG6kQC2nz4SNAzqfkHx5Xh9T71XXG68pWp +dIhhWeO/yloTunK0jF02h+mmxTwTv97QRCbut+wucPrXnbes5cVAWubXbHssw1ab +R80LzvobtCHXt2a49CUwi1wNuepnsvRtrtWhnk/Yn+knArAdBtaP4/tIEp9/EaEQ +PkxROpaw0RPxx9gmrjrKkcRpnd8BKWRRb2jaFOwIQZeQjdCygPLPwj2/kWjFgGce +xGATVdVhmVd8upUPYUk6ynW8yQqTP2cOEvIo4jEbwFcW3wh8GcF+Dx+FHgo2fFt+ +J7x6v+Db9NpSvd4MVHAxkUOVyLzwPt0JfjBkUO1/AaQzZ01oT74V77D2AhGiGxMl +OtzCWfHjXEa7ZywCRuoeSKbmW9m1vFGikpbbqsY3Iqb+zCB0oy2pLmvLwIIRIbWT +ee5Ehr7XHuQe+w== +-----END CERTIFICATE----- + +# Issuer: CN=CA 沃通根证书 O=WoSign CA Limited +# Subject: CN=CA 沃通根证书 O=WoSign CA Limited +# Label: "WoSign China" +# Serial: 106921963437422998931660691310149453965 +# MD5 Fingerprint: 78:83:5b:52:16:76:c4:24:3b:83:78:e8:ac:da:9a:93 +# SHA1 Fingerprint: 16:32:47:8d:89:f9:21:3a:92:00:85:63:f5:a4:a7:d3:12:40:8a:d6 +# SHA256 Fingerprint: d6:f0:34:bd:94:aa:23:3f:02:97:ec:a4:24:5b:28:39:73:e4:47:aa:59:0f:31:0c:77:f4:8f:df:83:11:22:54 +-----BEGIN CERTIFICATE----- +MIIFWDCCA0CgAwIBAgIQUHBrzdgT/BtOOzNy0hFIjTANBgkqhkiG9w0BAQsFADBG +MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNV +BAMMEkNBIOayg+mAmuagueivgeS5pjAeFw0wOTA4MDgwMTAwMDFaFw0zOTA4MDgw +MTAwMDFaMEYxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRl +ZDEbMBkGA1UEAwwSQ0Eg5rKD6YCa5qC56K+B5LmmMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA0EkhHiX8h8EqwqzbdoYGTufQdDTc7WU1/FDWiD+k8H/r +D195L4mx/bxjWDeTmzj4t1up+thxx7S8gJeNbEvxUNUqKaqoGXqW5pWOdO2XCld1 +9AXbbQs5uQF/qvbW2mzmBeCkTVL829B0txGMe41P/4eDrv8FAxNXUDf+jJZSEExf +v5RxadmWPgxDT74wwJ85dE8GRV2j1lY5aAfMh09Qd5Nx2UQIsYo06Yms25tO4dnk +UkWMLhQfkWsZHWgpLFbE4h4TV2TwYeO5Ed+w4VegG63XX9Gv2ystP9Bojg/qnw+L +NVgbExz03jWhCl3W6t8Sb8D7aQdGctyB9gQjF+BNdeFyb7Ao65vh4YOhn0pdr8yb ++gIgthhid5E7o9Vlrdx8kHccREGkSovrlXLp9glk3Kgtn3R46MGiCWOc76DbT52V +qyBPt7D3h1ymoOQ3OMdc4zUPLK2jgKLsLl3Az+2LBcLmc272idX10kaO6m1jGx6K +yX2m+Jzr5dVjhU1zZmkR/sgO9MHHZklTfuQZa/HpelmjbX7FF+Ynxu8b22/8DU0G +AbQOXDBGVWCvOGU6yke6rCzMRh+yRpY/8+0mBe53oWprfi1tWFxK1I5nuPHa1UaK +J/kR8slC/k7e3x9cxKSGhxYzoacXGKUN5AXlK8IrC6KVkLn9YDxOiT7nnO4fuwEC +AwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFOBNv9ybQV0T6GTwp+kVpOGBwboxMA0GCSqGSIb3DQEBCwUAA4ICAQBqinA4 +WbbaixjIvirTthnVZil6Xc1bL3McJk6jfW+rtylNpumlEYOnOXOvEESS5iVdT2H6 +yAa+Tkvv/vMx/sZ8cApBWNromUuWyXi8mHwCKe0JgOYKOoICKuLJL8hWGSbueBwj +/feTZU7n85iYr83d2Z5AiDEoOqsuC7CsDCT6eiaY8xJhEPRdF/d+4niXVOKM6Cm6 +jBAyvd0zaziGfjk9DgNyp115j0WKWa5bIW4xRtVZjc8VX90xJc/bYNaBRHIpAlf2 +ltTW/+op2znFuCyKGo3Oy+dCMYYFaA6eFN0AkLppRQjbbpCBhqcqBT/mhDn4t/lX +X0ykeVoQDF7Va/81XwVRHmyjdanPUIPTfPRm94KNPQx96N97qA4bLJyuQHCH2u2n +FoJavjVsIE4iYdm8UXrNemHcSxH5/mc0zy4EZmFcV5cjjPOGG0jfKq+nwf/Yjj4D +u9gqsPoUJbJRa4ZDhS4HIxaAjUz7tGM7zMN07RujHv41D198HRaG9Q7DlfEvr10l +O1Hm13ZBONFLAzkopR6RctR9q5czxNM+4Gm2KHmgCY0c0f9BckgG/Jou5yD5m6Le +ie2uPAmvylezkolwQOQvT8Jwg0DXJCxr5wkf09XHwQj02w47HAcLQxGEIYbpgNR1 +2KvxAmLBsX5VYc8T1yaw15zLKYs4SgsOkI26oQ== +-----END CERTIFICATE----- + +` diff --git a/vendor/github.com/camlistore/camlistore/pkg/httputil/certs_test.go b/vendor/github.com/camlistore/camlistore/pkg/httputil/certs_test.go new file mode 100644 index 00000000..40836577 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/httputil/certs_test.go @@ -0,0 +1,23 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package httputil + +import "testing" + +func TestSystemCARootsAvailable(t *testing.T) { + t.Logf("Roots available = %v", SystemCARootsAvailable()) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/httputil/faketransport.go b/vendor/github.com/camlistore/camlistore/pkg/httputil/faketransport.go new file mode 100644 index 00000000..fac64eb5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/httputil/faketransport.go @@ -0,0 +1,110 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package httputil + +import ( + "bufio" + "fmt" + "net/http" + "os" + "regexp" + "strings" + + "camlistore.org/pkg/types" +) + +// NewFakeTransport takes a map of URL to function generating a response +// and returns an http.RoundTripper that does HTTP requests out of that. +func NewFakeTransport(urls map[string]func() *http.Response) http.RoundTripper { + return fakeTransport(urls) +} + +type fakeTransport map[string]func() *http.Response + +func (m fakeTransport) RoundTrip(req *http.Request) (res *http.Response, err error) { + urls := req.URL.String() + fn, ok := m[urls] + if !ok { + return nil, fmt.Errorf("Unexpected FakeTransport URL requested: %s", urls) + } + return fn(), nil +} + +// Matcher describes a regular expression and the function that will +// be used if that regular expression is matched. +type Matcher struct { + URLRegex string // will be compiled and matched against URLs + Fn func() *http.Response // function that will be run if URLRegex matches +} + +// NewRegexpFakeTransport takes a slice of Matchers and returns an +// http.RoundTripper that will apply the function associated with the +// first UrlRegex that matches. +func NewRegexpFakeTransport(allMatchers []*Matcher) (http.RoundTripper, error) { + var result regexpFakeTransport = []*regexPair{} + for _, matcher := range allMatchers { + r, err := regexp.Compile(matcher.URLRegex) + if err != nil { + return nil, err + } + pair := regexPair{r, matcher.Fn} + result = append(result, &pair) + } + return result, nil +} + +type regexPair struct { + r *regexp.Regexp + fn func() *http.Response +} + +type regexpFakeTransport []*regexPair + +func (rft regexpFakeTransport) RoundTrip(req *http.Request) (*http.Response, error) { + s := req.URL.String() + for _, p := range rft { + if p.r.MatchString(s) { + return p.fn(), nil + } + } + return nil, fmt.Errorf("Unexpected RegexpFakeTransport URL requested: %s", s) +} + +// FileResponder returns an HTTP response generator that returns the +// contents of the named file. +func FileResponder(filename string) func() *http.Response { + return func() *http.Response { + f, err := os.Open(filename) + if err != nil { + return &http.Response{StatusCode: 404, Status: "404 Not Found", Body: types.EmptyBody} + } + return &http.Response{StatusCode: 200, Status: "200 OK", Body: f} + } +} + +// StaticResponder returns an HTTP response generator that parses res +// for an entire HTTP response, including headers and body. +func StaticResponder(res string) func() *http.Response { + _, err := http.ReadResponse(bufio.NewReader(strings.NewReader(res)), nil) + if err != nil { + panic("Invalid response given to StaticResponder: " + err.Error()) + } + return func() *http.Response { + res, _ := http.ReadResponse(bufio.NewReader(strings.NewReader(res)), nil) + return res + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/httputil/httputil.go b/vendor/github.com/camlistore/camlistore/pkg/httputil/httputil.go new file mode 100644 index 00000000..6702d13f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/httputil/httputil.go @@ -0,0 +1,337 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package httputil contains a bunch of HTTP utility code, some generic, +// and some Camlistore-specific. +package httputil + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log" + "net" + "net/http" + "net/url" + "path" + "strconv" + "strings" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/env" +) + +// IsGet reports whether r.Method is a GET or HEAD request. +func IsGet(r *http.Request) bool { + return r.Method == "GET" || r.Method == "HEAD" +} + +func ErrorRouting(rw http.ResponseWriter, req *http.Request) { + http.Error(rw, "Handlers wired up wrong; this path shouldn't be hit", 500) + log.Printf("Internal routing error on %q", req.URL.Path) +} + +func BadRequestError(rw http.ResponseWriter, errorMessage string, args ...interface{}) { + rw.WriteHeader(http.StatusBadRequest) + log.Printf("Bad request: %s", fmt.Sprintf(errorMessage, args...)) + fmt.Fprintf(rw, "

    Bad Request

    ") +} + +func ForbiddenError(rw http.ResponseWriter, errorMessage string, args ...interface{}) { + rw.WriteHeader(http.StatusForbidden) + log.Printf("Forbidden: %s", fmt.Sprintf(errorMessage, args...)) + fmt.Fprintf(rw, "

    Forbidden

    ") +} + +func RequestEntityTooLargeError(rw http.ResponseWriter) { + rw.WriteHeader(http.StatusRequestEntityTooLarge) + fmt.Fprintf(rw, "

    Request entity is too large

    ") +} + +func ServeError(rw http.ResponseWriter, req *http.Request, err error) { + rw.WriteHeader(http.StatusInternalServerError) + if IsLocalhost(req) || env.IsDev() { + fmt.Fprintf(rw, "Server error: %s\n", err) + return + } + fmt.Fprintf(rw, "An internal error occured, sorry.") +} + +func ReturnJSON(rw http.ResponseWriter, data interface{}) { + ReturnJSONCode(rw, 200, data) +} + +func ReturnJSONCode(rw http.ResponseWriter, code int, data interface{}) { + js, err := json.MarshalIndent(data, "", " ") + if err != nil { + BadRequestError(rw, fmt.Sprintf("JSON serialization error: %v", err)) + return + } + rw.Header().Set("Content-Type", "text/javascript") + rw.Header().Set("Content-Length", strconv.Itoa(len(js)+1)) + rw.WriteHeader(code) + rw.Write(js) + rw.Write([]byte("\n")) +} + +// PrefixHandler wraps another Handler and verifies that all requests' +// Path begin with Prefix. If they don't, a 500 error is returned. +// If they do, the headers PathBaseHeader and PathSuffixHeader are set +// on the request before proxying to Handler. +// PathBaseHeader is just the value of Prefix. +// PathSuffixHeader is the part of the path that follows Prefix. +type PrefixHandler struct { + Prefix string + Handler http.Handler +} + +const ( + PathBaseHeader = "X-Prefixhandler-Pathbase" + PathSuffixHeader = "X-Prefixhandler-Pathsuffix" +) + +func (p *PrefixHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + if !strings.HasPrefix(req.URL.Path, p.Prefix) { + http.Error(rw, "Inconfigured PrefixHandler", 500) + return + } + req.Header.Set(PathBaseHeader, p.Prefix) + req.Header.Set(PathSuffixHeader, strings.TrimPrefix(req.URL.Path, p.Prefix)) + p.Handler.ServeHTTP(rw, req) +} + +// PathBase returns a Request's base path, if it went via a PrefixHandler. +func PathBase(req *http.Request) string { return req.Header.Get(PathBaseHeader) } + +// PathSuffix returns a Request's suffix path, if it went via a PrefixHandler. +func PathSuffix(req *http.Request) string { return req.Header.Get(PathSuffixHeader) } + +// BaseURL returns the base URL (scheme + host and optional port + +// blobserver prefix) that should be used for requests (and responses) +// subsequent to req. The returned URL does not end in a trailing slash. +// The scheme and host:port are taken from urlStr if present, +// or derived from req otherwise. +// The prefix part comes from urlStr. +func BaseURL(urlStr string, req *http.Request) (string, error) { + var baseURL string + defaultURL, err := url.Parse(urlStr) + if err != nil { + return baseURL, err + } + prefix := path.Clean(defaultURL.Path) + scheme := "http" + if req.TLS != nil { + scheme = "https" + } + host := req.Host + if defaultURL.Host != "" { + host = defaultURL.Host + } + if defaultURL.Scheme != "" { + scheme = defaultURL.Scheme + } + baseURL = scheme + "://" + host + prefix + return baseURL, nil +} + +// RequestTargetPort returns the port targetted by the client +// in req. If not present, it returns 80, or 443 if TLS is used. +func RequestTargetPort(req *http.Request) int { + _, portStr, err := net.SplitHostPort(req.Host) + if err == nil && portStr != "" { + port, err := strconv.ParseInt(portStr, 0, 64) + if err == nil { + return int(port) + } + } + if req.TLS != nil { + return 443 + } + return 80 +} + +// Recover is meant to be used at the top of handlers with "defer" +// to catch errors from MustGet, etc: +// +// func handler(rw http.ResponseWriter, req *http.Request) { +// defer httputil.Recover(rw, req) +// id := req.MustGet("id") +// .... +// +// Recover will send the proper HTTP error type and message (e.g. +// a 400 Bad Request for MustGet) +func Recover(rw http.ResponseWriter, req *http.Request) { + RecoverJSON(rw, req) // TODO: for now. alternate format? +} + +// RecoverJSON is like Recover but returns with a JSON response. +func RecoverJSON(rw http.ResponseWriter, req *http.Request) { + e := recover() + if e == nil { + return + } + ServeJSONError(rw, e) +} + +type httpCoder interface { + HTTPCode() int +} + +// An InvalidMethodError is returned when an HTTP handler is invoked +// with an unsupported method. +type InvalidMethodError struct{} + +func (InvalidMethodError) Error() string { return "invalid method" } +func (InvalidMethodError) HTTPCode() int { return http.StatusMethodNotAllowed } + +// A MissingParameterError represents a missing HTTP parameter. +// The underlying string is the missing parameter name. +type MissingParameterError string + +func (p MissingParameterError) Error() string { return fmt.Sprintf("Missing parameter %q", string(p)) } +func (MissingParameterError) HTTPCode() int { return http.StatusBadRequest } + +// An InvalidParameterError represents an invalid HTTP parameter. +// The underlying string is the invalid parameter name, not value. +type InvalidParameterError string + +func (p InvalidParameterError) Error() string { return fmt.Sprintf("Invalid parameter %q", string(p)) } +func (InvalidParameterError) HTTPCode() int { return http.StatusBadRequest } + +// A ServerError is a generic 500 error. +type ServerError string + +func (e ServerError) Error() string { return string(e) } +func (ServerError) HTTPCode() int { return http.StatusInternalServerError } + +// MustGet returns a non-empty GET (or HEAD) parameter param and panics +// with a special error as caught by a deferred httputil.Recover. +func MustGet(req *http.Request, param string) string { + if !IsGet(req) { + panic(InvalidMethodError{}) + } + v := req.FormValue(param) + if v == "" { + panic(MissingParameterError(param)) + } + return v +} + +// MustGetBlobRef returns a non-nil BlobRef from req, as given by param. +// If it doesn't, it panics with a value understood by Recover or RecoverJSON. +func MustGetBlobRef(req *http.Request, param string) blob.Ref { + br, ok := blob.Parse(MustGet(req, param)) + if !ok { + panic(InvalidParameterError(param)) + } + return br +} + +// OptionalInt returns the integer in req given by param, or 0 if not present. +// If the form value is not an integer, it panics with a a value understood by Recover or RecoverJSON. +func OptionalInt(req *http.Request, param string) int { + v := req.FormValue(param) + if v == "" { + return 0 + } + i, err := strconv.Atoi(v) + if err != nil { + panic(InvalidParameterError(param)) + } + return i +} + +// ServeJSONError sends a JSON error response to rw for the provided +// error value. +func ServeJSONError(rw http.ResponseWriter, err interface{}) { + code := 500 + if i, ok := err.(httpCoder); ok { + code = i.HTTPCode() + } + msg := fmt.Sprint(err) + log.Printf("Sending error %v to client for: %v", code, msg) + ReturnJSONCode(rw, code, map[string]interface{}{ + "error": msg, + "errorType": http.StatusText(code), + }) +} + +// TODO: use a sync.Pool if/when Go 1.3 includes it and Camlistore depends on that. +var freeBuf = make(chan *bytes.Buffer, 2) + +func getBuf() *bytes.Buffer { + select { + case b := <-freeBuf: + b.Reset() + return b + default: + return new(bytes.Buffer) + } +} + +func putBuf(b *bytes.Buffer) { + select { + case freeBuf <- b: + default: + } +} + +// DecodeJSON decodes the JSON in res.Body into dest and then closes +// res.Body. +// It defensively caps the JSON at 8 MB for now. +func DecodeJSON(res *http.Response, dest interface{}) error { + defer CloseBody(res.Body) + buf := getBuf() + defer putBuf(buf) + if err := json.NewDecoder(io.TeeReader(io.LimitReader(res.Body, 8<<20), buf)).Decode(dest); err != nil { + return fmt.Errorf("httputil.DecodeJSON: %v, on input: %s", err, buf.Bytes()) + } + return nil +} + +// CloseBody should be used to close an http.Response.Body. +// +// It does a final little Read to maybe see EOF (to trigger connection +// re-use) before calling Close. +func CloseBody(rc io.ReadCloser) { + // Go 1.2 pseudo-bug: the NewDecoder(res.Body).Decode never + // sees an EOF, so we have to do this 0-byte copy here to + // force the http Transport to see its own EOF and recycle the + // connection. In Go 1.1 at least, the Close would cause it to + // read to EOF and recycle the connection, but in Go 1.2, a + // Close before EOF kills the underlying TCP connection. + // + // Will hopefully be fixed in Go 1.3, at least for bodies with + // Content-Length. Or maybe Go 1.3's Close itself would look + // to see if we're at EOF even if it hasn't been Read. + + // TODO: use a bytepool package somewhere for this byte? + // Justification for 3 byte reads: two for up to "\r\n" after + // a JSON/XML document, and then 1 to see EOF if we haven't yet. + buf := make([]byte, 1) + for i := 0; i < 3; i++ { + _, err := rc.Read(buf) + if err != nil { + break + } + } + rc.Close() +} + +func IsWebsocketUpgrade(req *http.Request) bool { + return req.Method == "GET" && req.Header.Get("Upgrade") == "websocket" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/httputil/httputil_test.go b/vendor/github.com/camlistore/camlistore/pkg/httputil/httputil_test.go new file mode 100644 index 00000000..da865c37 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/httputil/httputil_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package httputil + +import ( + "io" + "net/http" + "net/http/httptest" + "strconv" + "testing" +) + +func TestCloseBody(t *testing.T) { + const msg = "{\"foo\":\"bar\"}\r\n" + addrSeen := make(map[string]int) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + addrSeen[r.RemoteAddr]++ + w.Header().Set("Content-Length", strconv.Itoa(len(msg))) + w.WriteHeader(200) + w.Write([]byte(msg)) + })) + defer ts.Close() + + buf := make([]byte, len(msg)) + + for _, trim := range []int{0, 2} { + for i := 0; i < 3; i++ { + res, err := http.Get(ts.URL) + if err != nil { + t.Errorf("Get: %v", err) + continue + } + want := len(buf) - trim + n, err := res.Body.Read(buf[:want]) + CloseBody(res.Body) + if n != want { + t.Errorf("Read = %v; want %v", n, want) + } + if err != nil && err != io.EOF { + t.Errorf("Read = %v", err) + } + } + } + if len(addrSeen) != 1 { + t.Errorf("server saw %d distinct client addresses; want 1", len(addrSeen)) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/httputil/transport.go b/vendor/github.com/camlistore/camlistore/pkg/httputil/transport.go new file mode 100644 index 00000000..6fac04f0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/httputil/transport.go @@ -0,0 +1,97 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package httputil + +import ( + "io" + "log" + "net/http" + "sync" + "time" +) + +// StatsTransport wraps another RoundTripper (or uses the default one) and +// counts the number of HTTP requests performed. +type StatsTransport struct { + mu sync.Mutex + reqs int + + // Transport optionally specifies the transport to use. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper + + // If VerboseLog is true, HTTP request summaries are logged. + VerboseLog bool +} + +func (t *StatsTransport) Requests() int { + t.mu.Lock() + defer t.mu.Unlock() + return t.reqs +} + +func (t *StatsTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + t.mu.Lock() + t.reqs++ + n := t.reqs + t.mu.Unlock() + + rt := t.Transport + if rt == nil { + rt = http.DefaultTransport + } + var t0 time.Time + if t.VerboseLog { + t0 = time.Now() + log.Printf("(%d) %s %s ...", n, req.Method, req.URL) + } + resp, err = rt.RoundTrip(req) + if t.VerboseLog { + t1 := time.Now() + td := t1.Sub(t1) + if err == nil { + log.Printf("(%d) %s %s = status %d (in %v)", n, req.Method, req.URL, resp.StatusCode, td) + resp.Body = &logBody{body: resp.Body, n: n, t0: t0, t1: t1} + } else { + log.Printf("(%d) %s %s = error: %v (in %v)", n, req.Method, req.URL, err, td) + } + } + return +} + +type logBody struct { + body io.ReadCloser + n int + t0, t1 time.Time + readOnce sync.Once + closeOnce sync.Once +} + +func (b *logBody) Read(p []byte) (n int, err error) { + b.readOnce.Do(func() { + log.Printf("(%d) Read body", b.n) + }) + return b.body.Read(p) +} + +func (b *logBody) Close() error { + b.closeOnce.Do(func() { + t := time.Now() + log.Printf("(%d) Close body (%v tot, %v post-header)", b.n, t.Sub(b.t0), t.Sub(b.t1)) + }) + return b.body.Close() +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/bench_test.go b/vendor/github.com/camlistore/camlistore/pkg/images/bench_test.go new file mode 100644 index 00000000..b0faedc0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/images/bench_test.go @@ -0,0 +1,136 @@ +/* +Copyright 2013 The Camlistore AUTHORS + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package images + +import ( + "image" + "testing" +) + +func benchRescale(b *testing.B, w, h, thumbW, thumbH int) { + // Most JPEGs are YCbCr, so bench with that. + im := image.NewYCbCr(image.Rect(0, 0, w, h), image.YCbCrSubsampleRatio422) + o := &DecodeOpts{MaxWidth: thumbW, MaxHeight: thumbH} + sw, sh, needRescale := o.rescaleDimensions(im.Bounds(), false) + if !needRescale { + b.Fatal("opts.rescaleDimensions failed to indicate image needs rescale") + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = rescale(im, sw, sh) + } +} + +func BenchmarkRescale1000To50(b *testing.B) { + orig, thumb := 1000, 50 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale1000To100(b *testing.B) { + orig, thumb := 1000, 100 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale1000To200(b *testing.B) { + orig, thumb := 1000, 200 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale1000To400(b *testing.B) { + orig, thumb := 1000, 400 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale1000To800(b *testing.B) { + orig, thumb := 1000, 800 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale2000To50(b *testing.B) { + orig, thumb := 2000, 50 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale2000To100(b *testing.B) { + orig, thumb := 2000, 100 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale2000To200(b *testing.B) { + orig, thumb := 2000, 200 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale2000To400(b *testing.B) { + orig, thumb := 2000, 400 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale2000To800(b *testing.B) { + orig, thumb := 2000, 800 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale4000To50(b *testing.B) { + orig, thumb := 4000, 50 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale4000To100(b *testing.B) { + orig, thumb := 4000, 100 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale4000To200(b *testing.B) { + orig, thumb := 4000, 200 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale4000To400(b *testing.B) { + orig, thumb := 4000, 400 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale4000To800(b *testing.B) { + orig, thumb := 4000, 800 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale8000To50(b *testing.B) { + orig, thumb := 8000, 50 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale8000To100(b *testing.B) { + orig, thumb := 8000, 100 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale8000To200(b *testing.B) { + orig, thumb := 8000, 200 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale8000To400(b *testing.B) { + orig, thumb := 8000, 400 + benchRescale(b, orig, orig, thumb, thumb) +} + +func BenchmarkRescale8000To800(b *testing.B) { + orig, thumb := 8000, 800 + benchRescale(b, orig, orig, thumb, thumb) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/benchfastjpeg_test.go b/vendor/github.com/camlistore/camlistore/pkg/images/benchfastjpeg_test.go new file mode 100644 index 00000000..0138cb1b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/images/benchfastjpeg_test.go @@ -0,0 +1,138 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package images + +import ( + "bytes" + "image" + "io" + "io/ioutil" + "testing" + + "camlistore.org/pkg/images/fastjpeg" + "camlistore.org/pkg/images/resize" + "camlistore.org/pkg/types" + "camlistore.org/third_party/go/pkg/image/jpeg" +) + +// The decode routines being benchmarked in this file will use these bytes for +// their in-memory io.Readers. +var jpegBytes []byte + +func init() { + // Create image with non-uniform color to make decoding more realistic. + // Solid color jpeg images decode faster than non-uniform images. + b := new(bytes.Buffer) + w, h := 4000, 4000 + im := image.NewNRGBA(image.Rect(0, 0, w, h)) + for i := range im.Pix { + switch { + case i%4 == 3: + im.Pix[i] = 255 + default: + im.Pix[i] = uint8(i) + } + } + if err := jpeg.Encode(b, im, nil); err != nil { + panic(err) + } + jpegBytes = b.Bytes() +} + +type decodeFunc func(r io.Reader) (image.Image, string, error) + +func BenchmarkStdlib(b *testing.B) { + common(b, image.Decode) +} + +func decodeDownsample(factor int) decodeFunc { + return func(r io.Reader) (image.Image, string, error) { + im, err := fastjpeg.DecodeDownsample(r, factor) + return im, "jpeg", err + } +} + +func BenchmarkDjpeg1(b *testing.B) { + if !fastjpeg.Available() { + b.Skip("Skipping benchmark, djpeg unavailable.") + } + common(b, decodeDownsample(1)) +} + +func BenchmarkDjpeg2(b *testing.B) { + if !fastjpeg.Available() { + b.Skip("Skipping benchmark, djpeg unavailable.") + } + common(b, decodeDownsample(2)) +} + +func BenchmarkDjpeg4(b *testing.B) { + if !fastjpeg.Available() { + b.Skip("Skipping benchmark, djpeg unavailable.") + } + common(b, decodeDownsample(4)) +} + +func BenchmarkDjpeg8(b *testing.B) { + if !fastjpeg.Available() { + b.Skip("Skipping benchmark, djpeg unavailable.") + } + common(b, decodeDownsample(8)) +} + +func testRun(b types.TB, decode decodeFunc) { + if !fastjpeg.Available() { + b.Skip("Skipping benchmark, djpeg unavailable.") + } + im, _, err := decode(bytes.NewReader(jpegBytes)) + if err != nil { + b.Fatal(err) + } + rect := im.Bounds() + w, h := 128, 128 + im = resize.Resize(im, rect, w, h) + err = jpeg.Encode(ioutil.Discard, im, nil) + if err != nil { + b.Fatal(err) + } +} + +func common(b *testing.B, decode decodeFunc) { + for i := 0; i < b.N; i++ { + testRun(b, decode) + } +} + +func TestStdlib(t *testing.T) { + testRun(t, decodeDownsample(1)) +} + +func TestDjpeg1(t *testing.T) { + testRun(t, decodeDownsample(1)) +} + +func TestDjpeg2(t *testing.T) { + testRun(t, decodeDownsample(2)) +} + +func TestDjpeg4(t *testing.T) { + testRun(t, decodeDownsample(4)) +} + +func TestDjpeg8(t *testing.T) { + testRun(t, decodeDownsample(8)) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/fastjpeg/fastjpeg.go b/vendor/github.com/camlistore/camlistore/pkg/images/fastjpeg/fastjpeg.go new file mode 100644 index 00000000..42550a7d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/images/fastjpeg/fastjpeg.go @@ -0,0 +1,230 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fastjpeg uses djpeg(1), from the Independent JPEG Group's +// (www.ijg.org) jpeg package, to quickly down-sample images on load. It can +// sample images by a factor of 1, 2, 4 or 8. +// This reduces the amount of data that must be decompressed into memory when +// the full resolution image isn't required, i.e. in the case of generating +// thumbnails. +package fastjpeg + +import ( + "bytes" + "errors" + "expvar" + "fmt" + "image" + "image/color" + "io" + "log" + "os" + "os/exec" + "strconv" + "sync" + + "camlistore.org/pkg/buildinfo" + "camlistore.org/pkg/types" + _ "camlistore.org/third_party/go/pkg/image/jpeg" +) + +var ( + ErrDjpegNotFound = errors.New("fastjpeg: djpeg not found in path") +) + +// DjpegFailedError wraps errors returned when calling djpeg and handling its +// response. Used for type asserting and retrying with other jpeg decoders, +// i.e. the standard library's jpeg.Decode. +type DjpegFailedError struct { + Err error +} + +func (dfe DjpegFailedError) Error() string { + return dfe.Err.Error() +} + +// TODO(wathiede): do we need to conditionally add ".exe" on Windows? I have +// no access to test on Windows. +const djpegBin = "djpeg" + +var ( + checkAvailability sync.Once + available bool +) + +var ( + djpegSuccessVar = expvar.NewInt("fastjpeg-djpeg-success") + djpegFailureVar = expvar.NewInt("fastjpeg-djpeg-failure") + // Bytes read from djpeg subprocess + djpegBytesReadVar = expvar.NewInt("fastjpeg-djpeg-bytes-read") + // Bytes written to djpeg subprocess + djpegBytesWrittenVar = expvar.NewInt("fastjpeg-djpeg-bytes-written") +) + +func Available() bool { + checkAvailability.Do(func() { + if ok, _ := strconv.ParseBool(os.Getenv("CAMLI_DISABLE_DJPEG")); ok { + log.Println("CAMLI_DISABLE_DJPEG set in environment. Disabling fastjpeg.") + return + } + + if p, err := exec.LookPath(djpegBin); p != "" && err == nil { + available = true + log.Printf("fastjpeg enabled with %s.", p) + } + if !available { + log.Printf("%s not found in PATH, disabling fastjpeg.", djpegBin) + } + }) + + return available +} + +func init() { + buildinfo.RegisterDjpegStatusFunc(djpegStatus) +} + +func djpegStatus() string { + // TODO: more info: its path, whether it works, its version, etc. + if Available() { + return "djpeg available" + } + return "djpeg optimizaton unavailable" +} + +func readPNM(buf *bytes.Buffer) (image.Image, error) { + var imgType, w, h int + nTokens, err := fmt.Fscanf(buf, "P%d\n%d %d\n255\n", &imgType, &w, &h) + if err != nil { + return nil, err + } + if nTokens != 3 { + hdr := buf.Bytes() + if len(hdr) > 100 { + hdr = hdr[:100] + } + return nil, fmt.Errorf("fastjpeg: Invalid PNM header: %q", hdr) + } + + switch imgType { + case 5: // Gray + src := buf.Bytes() + if len(src) != w*h { + return nil, fmt.Errorf("fastjpeg: grayscale source buffer not sized w*h") + } + im := &image.Gray{ + Pix: src, + Stride: w, + Rect: image.Rect(0, 0, w, h), + } + return im, nil + case 6: // RGB + src := buf.Bytes() + if len(src) != w*h*3 { + return nil, fmt.Errorf("fastjpeg: RGB source buffer not sized w*h*3") + } + im := image.NewRGBA(image.Rect(0, 0, w, h)) + dst := im.Pix + for i := 0; i < len(src)/3; i++ { + dst[4*i+0] = src[3*i+0] // R + dst[4*i+1] = src[3*i+1] // G + dst[4*i+2] = src[3*i+2] // B + dst[4*i+3] = 255 // Alpha + } + return im, nil + default: + return nil, fmt.Errorf("fastjpeg: Unsupported PNM type P%d", imgType) + } +} + +// Factor returns the sample factor DecodeSample should use to generate a +// sampled image greater than or equal to sw x sh pixels given a source image +// of w x h pixels. +func Factor(w, h, sw, sh int) int { + switch { + case w>>3 >= sw && h>>3 >= sh: + return 8 + case w>>2 >= sw && h>>2 >= sh: + return 4 + case w>>1 >= sw && h>>1 >= sh: + return 2 + } + return 1 +} + +// DecodeDownsample decodes JPEG data in r, down-sampling it by factor. +// If djpeg is not found, err is ErrDjpegNotFound and r is not read from. +// If the execution of djpeg, or decoding the resulting PNM fails, error will +// be of type DjpegFailedError. +func DecodeDownsample(r io.Reader, factor int) (image.Image, error) { + if !Available() { + return nil, ErrDjpegNotFound + } + switch factor { + case 1, 2, 4, 8: + default: + return nil, fmt.Errorf("fastjpeg: unsupported sample factor %d", factor) + } + + buf := new(bytes.Buffer) + tr := io.TeeReader(r, buf) + ic, format, err := image.DecodeConfig(tr) + if err != nil { + return nil, err + } + if format != "jpeg" { + return nil, fmt.Errorf("fastjpeg: Unsupported format %q", format) + } + var bpp int + switch ic.ColorModel { + case color.YCbCrModel: + bpp = 4 // JPEG will decode to RGB, and we'll expand inplace to RGBA. + case color.GrayModel: + bpp = 1 + default: + return nil, fmt.Errorf("fastjpeg: Unsupported thumnbnail color model %T", ic.ColorModel) + } + args := []string{djpegBin, "-scale", fmt.Sprintf("1/%d", factor)} + cmd := exec.Command(args[0], args[1:]...) + cmd.Stdin = types.NewStatsReader(djpegBytesWrittenVar, io.MultiReader(buf, r)) + + // Allocate space for the RGBA / Gray pixel data plus some extra for PNM + // header info. Explicitly allocate all the memory upfront to prevent + // many smaller allocations. + pixSize := ic.Width*ic.Height*bpp/factor/factor + 128 + w := bytes.NewBuffer(make([]byte, 0, pixSize)) + cmd.Stdout = w + + stderrW := new(bytes.Buffer) + cmd.Stderr = stderrW + if err := cmd.Run(); err != nil { + // cmd.ProcessState == nil happens if /lib/*/ld-x.yz.so is missing, which gives you the ever useful: + // "fork/exec /usr/bin/djpeg: no such file or directory" error message. + // So of course it only happens on broken systems and this check is probably overkill. + if cmd.ProcessState == nil || !cmd.ProcessState.Success() { + djpegFailureVar.Add(1) + return nil, DjpegFailedError{Err: fmt.Errorf("%v: %s", err, stderrW)} + } + // false alarm, so proceed. See http://camlistore.org/issue/550 + } + djpegSuccessVar.Add(1) + djpegBytesReadVar.Add(int64(w.Len())) + m, err := readPNM(w) + if err != nil { + return m, DjpegFailedError{Err: err} + } + return m, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/fastjpeg/fastjpeg_test.go b/vendor/github.com/camlistore/camlistore/pkg/images/fastjpeg/fastjpeg_test.go new file mode 100644 index 00000000..6a1deadf --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/images/fastjpeg/fastjpeg_test.go @@ -0,0 +1,214 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fastjpeg + +import ( + "bytes" + "image" + "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "strconv" + "sync" + "testing" + + "camlistore.org/third_party/go/pkg/image/jpeg" +) + +const ( + width = 3840 + height = 1280 +) + +// testImage hold an image and the encoded jpeg bytes for the image. +type testImage struct { + im image.Image + buf []byte +} + +// makeTestImages generates an RGBA and a grayscale image and returns +// testImages containing the JPEG encoded form as bytes and the expected color +// model of the image when decoded. +func makeTestImages() ([]testImage, error) { + var ims []testImage + w := bytes.NewBuffer(nil) + im1 := image.NewRGBA(image.Rect(0, 0, width, height)) + for i := range im1.Pix { + switch { + case i%4 == 3: + im1.Pix[i] = 255 + default: + im1.Pix[i] = uint8(i) + } + } + if err := jpeg.Encode(w, im1, nil); err != nil { + return nil, err + } + ims = append(ims, testImage{im: im1, buf: w.Bytes()}) + + w = bytes.NewBuffer(nil) + im2 := image.NewGray(image.Rect(0, 0, width, height)) + for i := range im2.Pix { + im2.Pix[i] = uint8(i) + } + if err := jpeg.Encode(w, im2, nil); err != nil { + return nil, err + } + ims = append(ims, testImage{im: im2, buf: w.Bytes()}) + return ims, nil + +} + +func TestDecodeDownsample(t *testing.T) { + checkAvailability = sync.Once{} + if !Available() { + t.Skip("djpeg isn't available.") + } + + tis, err := makeTestImages() + if err != nil { + t.Fatal(err) + } + + if _, err := DecodeDownsample(bytes.NewReader(tis[0].buf), 0); err == nil { + t.Errorf("Expect error for invalid sample factor 0") + } + for i, ti := range tis { + for factor := 1; factor <= 8; factor *= 2 { + im, err := DecodeDownsample(bytes.NewReader(ti.buf), factor) + if err != nil { + t.Errorf("%d: Sample factor %d failed: %v", i, factor, err) + continue + } + wantW := width / factor + wantH := height / factor + b := im.Bounds() + gotW := b.Dx() + gotH := b.Dy() + + if wantW != gotW || wantH != gotH || reflect.TypeOf(im) != reflect.TypeOf(ti.im) { + t.Errorf("%d: Sample factor %d want image %dx%d %T got %dx%d %T", i, factor, wantW, wantH, ti.im, gotW, gotH, im) + } + } + } +} + +// TestUnavailable verifies the behavior of Available and DecodeDownsample +// when djpeg is not available. +// It sets the environment variable CAMLI_DISABLE_DJPEG and spawns +// a subprocess to simulate unavailability. +func TestUnavailable(t *testing.T) { + checkAvailability = sync.Once{} + defer os.Setenv("CAMLI_DISABLE_DJPEG", "0") + if ok, _ := strconv.ParseBool(os.Getenv("CAMLI_DISABLE_DJPEG")); !ok { + os.Setenv("CAMLI_DISABLE_DJPEG", "1") + out, err := exec.Command(os.Args[0], "-test.v", + "-test.run=TestUnavailable$").CombinedOutput() + if err != nil { + t.Fatalf("%v: %s", err, out) + } + return + } + + if Available() { + t.Fatal("djpeg shouldn't be available when run with CAMLI_DISABLE_DJPEG set.") + } + + tis, err := makeTestImages() + if err != nil { + t.Fatal(err) + } + if _, err := DecodeDownsample(bytes.NewReader(tis[0].buf), 2); err != ErrDjpegNotFound { + t.Errorf("Wanted ErrDjpegNotFound, got %v", err) + } +} + +func TestFailed(t *testing.T) { + switch runtime.GOOS { + case "darwin", "freebsd", "linux": + default: + t.Skip("test only runs on UNIX") + } + checkAvailability = sync.Once{} + if !Available() { + t.Skip("djpeg isn't available.") + } + + oldPath := os.Getenv("PATH") + defer os.Setenv("PATH", oldPath) + // Use djpeg that exits after calling false. + newPath, err := filepath.Abs("testdata") + if err != nil { + t.Fatal(err) + } + os.Setenv("PATH", newPath) + t.Log("PATH", os.Getenv("PATH")) + t.Log(exec.LookPath("djpeg")) + + tis, err := makeTestImages() + if err != nil { + t.Fatal(err) + } + _, err = DecodeDownsample(bytes.NewReader(tis[0].buf), 2) + if _, ok := err.(DjpegFailedError); !ok { + t.Errorf("Got err type %T want ErrDjpegFailed: %v", err, err) + } +} + +func TestFactor(t *testing.T) { + checkAvailability = sync.Once{} + if !Available() { + t.Skip("djpeg isn't available.") + } + + const ( + width = 3840 + height = 1280 + ) + testCases := []struct { + w, h int + want int + }{ + {width + 1, height, 1}, + {width, height + 1, 1}, + {width, height, 1}, + {width - 1, height, 1}, + {width, height - 1, 1}, + {width/2 + 1, height / 2, 1}, + {width / 2, height/2 + 1, 1}, + + {width / 2, height / 2, 2}, + {width/2 - 1, height / 2, 2}, + {width / 2, height/2 - 1, 2}, + + {width / 8, height/8 + 1, 4}, + {width/8 + 1, height / 8, 4}, + + {width / 8, height / 8, 8}, + {width / 8, height/8 - 1, 8}, + {width/8 - 1, height / 8, 8}, + {width/8 - 1, height/8 - 1, 8}, + } + for _, tc := range testCases { + if got := Factor(width, height, tc.w, tc.h); got != tc.want { + t.Errorf("%dx%d -> %dx%d got %d want %d", width, height, + tc.w, tc.h, got, tc.want) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/fastjpeg/testdata/djpeg b/vendor/github.com/camlistore/camlistore/pkg/images/fastjpeg/testdata/djpeg new file mode 100755 index 00000000..11a0e977 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/images/fastjpeg/testdata/djpeg @@ -0,0 +1,2 @@ +#!/bin/sh +/usr/bin/false diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/images.go b/vendor/github.com/camlistore/camlistore/pkg/images/images.go new file mode 100644 index 00000000..551f2297 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/images/images.go @@ -0,0 +1,564 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package images + +import ( + "bytes" + "fmt" + "image" + "image/draw" + "image/jpeg" + "io" + "log" + "os" + "strconv" + "time" + + _ "image/gif" + _ "image/png" + + "camlistore.org/pkg/images/fastjpeg" + "camlistore.org/pkg/images/resize" + "camlistore.org/third_party/github.com/nf/cr2" + "camlistore.org/third_party/github.com/rwcarlsen/goexif/exif" + + // TODO(mpl, wathiede): add test(s) to check we can decode both tiff and cr2, + // so we don't mess up the import order again. + // See https://camlistore-review.googlesource.com/5196 comments. + + // tiff package must be imported after any image packages that decode + // tiff-like formats, i.e. CR2 or DNG + _ "camlistore.org/third_party/golang.org/x/image/tiff" +) + +var disableThumbCache, _ = strconv.ParseBool(os.Getenv("CAMLI_DISABLE_THUMB_CACHE")) + +// thumbnailVersion should be incremented whenever we want to +// invalidate the cache of previous thumbnails on the server's +// cache and in browsers. +const thumbnailVersion = "2" + +// ThumbnailVersion returns a string safe for URL query components +// which is a generation number. Whenever the thumbnailing code is +// updated, so will this string. It should be placed in some URL +// component (typically "tv"). +func ThumbnailVersion() string { + if disableThumbCache { + return fmt.Sprintf("nocache%d", time.Now().UnixNano()) + } + return thumbnailVersion +} + +// Exif Orientation Tag values +// http://sylvana.net/jpegcrop/exif_orientation.html +const ( + topLeftSide = 1 + topRightSide = 2 + bottomRightSide = 3 + bottomLeftSide = 4 + leftSideTop = 5 + rightSideTop = 6 + rightSideBottom = 7 + leftSideBottom = 8 +) + +// The FlipDirection type is used by the Flip option in DecodeOpts +// to indicate in which direction to flip an image. +type FlipDirection int + +// FlipVertical and FlipHorizontal are two possible FlipDirections +// values to indicate in which direction an image will be flipped. +const ( + FlipVertical FlipDirection = 1 << iota + FlipHorizontal +) + +type DecodeOpts struct { + // Rotate specifies how to rotate the image. + // If nil, the image is rotated automatically based on EXIF metadata. + // If an int, Rotate is the number of degrees to rotate + // counter clockwise and must be one of 0, 90, -90, 180, or + // -180. + Rotate interface{} + + // Flip specifies how to flip the image. + // If nil, the image is flipped automatically based on EXIF metadata. + // Otherwise, Flip is a FlipDirection bitfield indicating how to flip. + Flip interface{} + + // MaxWidgth and MaxHeight optionally specify bounds on the + // image's size. Rescaling is done before flipping or rotating. + // Proportions are conserved, so the smallest of the two is used + // as the decisive one if needed. + MaxWidth, MaxHeight int + + // ScaleWidth and ScaleHeight optionally specify how to rescale the + // image's dimensions. Rescaling is done before flipping or rotating. + // Proportions are conserved, so the smallest of the two is used + // as the decisive one if needed. + // They overrule MaxWidth and MaxHeight. + ScaleWidth, ScaleHeight float32 + + // TODO: consider alternate options if scaled ratio doesn't + // match original ratio: + // Crop bool + // Stretch bool +} + +// Config is like the standard library's image.Config as used by DecodeConfig. +type Config struct { + Width, Height int + Format string + Modified bool // true if Decode actually rotated or flipped the image. +} + +func (c *Config) setBounds(im image.Image) { + if im != nil { + c.Width = im.Bounds().Dx() + c.Height = im.Bounds().Dy() + } +} + +func rotate(im image.Image, angle int) image.Image { + var rotated *image.NRGBA + // trigonometric (i.e counter clock-wise) + switch angle { + case 90: + newH, newW := im.Bounds().Dx(), im.Bounds().Dy() + rotated = image.NewNRGBA(image.Rect(0, 0, newW, newH)) + for y := 0; y < newH; y++ { + for x := 0; x < newW; x++ { + rotated.Set(x, y, im.At(newH-1-y, x)) + } + } + case -90: + newH, newW := im.Bounds().Dx(), im.Bounds().Dy() + rotated = image.NewNRGBA(image.Rect(0, 0, newW, newH)) + for y := 0; y < newH; y++ { + for x := 0; x < newW; x++ { + rotated.Set(x, y, im.At(y, newW-1-x)) + } + } + case 180, -180: + newW, newH := im.Bounds().Dx(), im.Bounds().Dy() + rotated = image.NewNRGBA(image.Rect(0, 0, newW, newH)) + for y := 0; y < newH; y++ { + for x := 0; x < newW; x++ { + rotated.Set(x, y, im.At(newW-1-x, newH-1-y)) + } + } + default: + return im + } + return rotated +} + +// flip returns a flipped version of the image im, according to +// the direction(s) in dir. +// It may flip the input im in place and return it, or it may allocate a +// new NRGBA (if im is an *image.YCbCr). +func flip(im image.Image, dir FlipDirection) image.Image { + if dir == 0 { + return im + } + ycbcr := false + var nrgba image.Image + dx, dy := im.Bounds().Dx(), im.Bounds().Dy() + di, ok := im.(draw.Image) + if !ok { + if _, ok := im.(*image.YCbCr); !ok { + log.Printf("failed to flip image: input does not satisfy draw.Image") + return im + } + // because YCbCr does not implement Set, we replace it with a new NRGBA + ycbcr = true + nrgba = image.NewNRGBA(image.Rect(0, 0, dx, dy)) + di, ok = nrgba.(draw.Image) + if !ok { + log.Print("failed to flip image: could not cast an NRGBA to a draw.Image") + return im + } + } + if dir&FlipHorizontal != 0 { + for y := 0; y < dy; y++ { + for x := 0; x < dx/2; x++ { + old := im.At(x, y) + di.Set(x, y, im.At(dx-1-x, y)) + di.Set(dx-1-x, y, old) + } + } + } + if dir&FlipVertical != 0 { + for y := 0; y < dy/2; y++ { + for x := 0; x < dx; x++ { + old := im.At(x, y) + di.Set(x, y, im.At(x, dy-1-y)) + di.Set(x, dy-1-y, old) + } + } + } + if ycbcr { + return nrgba + } + return im +} + +// ScaledDimensions returns the newWidth and newHeight obtained +// when an image of dimensions w x h has to be rescaled under +// mw x mh, while conserving the proportions. +// It returns 1,1 if any of the parameter is 0. +func ScaledDimensions(w, h, mw, mh int) (newWidth int, newHeight int) { + if w == 0 || h == 0 || mw == 0 || mh == 0 { + imageDebug("ScaledDimensions was given as 0; returning 1x1 as dimensions.") + return 1, 1 + } + newWidth, newHeight = mw, mh + if float32(h)/float32(mh) > float32(w)/float32(mw) { + newWidth = w * mh / h + } else { + newHeight = h * mw / w + } + return +} + +// rescaleDimensions computes the width & height in the pre-rotated +// orientation needed to meet the post-rotation constraints of opts. +// The image bound by b represents the pre-rotated dimensions of the image. +// needRescale is true if the image requires a resize. +func (opts *DecodeOpts) rescaleDimensions(b image.Rectangle, swapDimensions bool) (width, height int, needRescale bool) { + w, h := b.Dx(), b.Dy() + mw, mh := opts.MaxWidth, opts.MaxHeight + mwf, mhf := opts.ScaleWidth, opts.ScaleHeight + if mw == 0 && mh == 0 && mwf == 0 && mhf == 0 { + return w, h, false + } + + // Floating point compares probably only allow this to work if the values + // were specified as the literal 1 or 1.0, computed values will likely be + // off. If Scale{Width,Height} end up being 1.0-epsilon we'll rescale + // when it probably wouldn't even be noticeable but that's okay. + if opts.ScaleWidth == 1.0 && opts.ScaleHeight == 1.0 { + return w, h, false + } + + if swapDimensions { + w, h = h, w + } + + // ScaleWidth and ScaleHeight overrule MaxWidth and MaxHeight + if mwf > 0.0 && mwf <= 1 { + mw = int(mwf * float32(w)) + } + if mhf > 0.0 && mhf <= 1 { + mh = int(mhf * float32(h)) + } + + neww, newh := ScaledDimensions(w, h, mw, mh) + if neww > w || newh > h { + // Don't scale up. + return w, h, false + } + + needRescale = neww != w || newh != h + if swapDimensions { + return newh, neww, needRescale + } + return neww, newh, needRescale +} + +// rescale resizes im in-place to the dimensions sw x sh, overwriting the +// existing pixel data. It is up to the caller to ensure sw & sh maintain the +// aspect ratio of im. +func rescale(im image.Image, sw, sh int) image.Image { + b := im.Bounds() + w, h := b.Dx(), b.Dy() + if sw == w && sh == h { + return im + } + + // If it's gigantic, it's more efficient to downsample first + // and then resize; resizing will smooth out the roughness. + // (trusting the moustachio guys on that one). + if w > sw*2 && h > sh*2 { + im = resize.ResampleInplace(im, b, sw*2, sh*2) + return resize.HalveInplace(im) + } + return resize.Resize(im, b, sw, sh) +} + +// forcedRotate checks if the values in opts explicitly set a rotation. +func (opts *DecodeOpts) forcedRotate() bool { + return opts != nil && opts.Rotate != nil +} + +// forcedRotate checks if the values in opts explicitly set a flip. +func (opts *DecodeOpts) forcedFlip() bool { + return opts != nil && opts.Flip != nil +} + +// useEXIF checks if the values in opts imply EXIF data should be used for +// orientation. +func (opts *DecodeOpts) useEXIF() bool { + return !(opts.forcedRotate() || opts.forcedFlip()) +} + +// forcedOrientation returns the rotation and flip values stored in opts. The +// values are asserted to their proper type, and err is non-nil if an invalid +// value is found. This function ignores the orientation stored in EXIF. +// If auto-correction of the image's orientation is desired, it is the +// caller's responsibility to check via useEXIF first. +func (opts *DecodeOpts) forcedOrientation() (angle int, flipMode FlipDirection, err error) { + var ( + ok bool + ) + if opts.forcedRotate() { + if angle, ok = opts.Rotate.(int); !ok { + return 0, 0, fmt.Errorf("Rotate should be an int, not a %T", opts.Rotate) + } + } + if opts.forcedFlip() { + if flipMode, ok = opts.Flip.(FlipDirection); !ok { + return 0, 0, fmt.Errorf("Flip should be a FlipDirection, not a %T", opts.Flip) + } + } + return angle, flipMode, nil +} + +var debug, _ = strconv.ParseBool(os.Getenv("CAMLI_DEBUG_IMAGES")) + +func imageDebug(msg string) { + if debug { + log.Print(msg) + } +} + +// DecodeConfig returns the image Config similarly to +// the standard library's image.DecodeConfig with the +// addition that it also checks for an EXIF orientation, +// and sets the Width and Height as they would visibly +// be after correcting for that orientation. +func DecodeConfig(r io.Reader) (Config, error) { + var c Config + var buf bytes.Buffer + tr := io.TeeReader(io.LimitReader(r, 2<<20), &buf) + swapDimensions := false + + ex, err := exif.Decode(tr) + // trigger a retry when there isn't enough data for reading exif data from a tiff file + if exif.IsShortReadTagValueError(err) { + return c, io.ErrUnexpectedEOF + } + if err != nil { + imageDebug(fmt.Sprintf("No valid EXIF, error: %v.", err)) + } else { + tag, err := ex.Get(exif.Orientation) + if err != nil { + imageDebug(`No "Orientation" tag in EXIF.`) + } else { + orient, err := tag.Int(0) + if err == nil { + switch orient { + // those are the orientations that require + // a rotation of ±90 + case leftSideTop, rightSideTop, rightSideBottom, leftSideBottom: + swapDimensions = true + } + } else { + imageDebug(fmt.Sprintf("EXIF Error: %v", err)) + } + } + } + conf, format, err := image.DecodeConfig(io.MultiReader(&buf, r)) + if err != nil { + imageDebug(fmt.Sprintf("Image Decoding failed: %v", err)) + return c, err + } + c.Format = format + if swapDimensions { + c.Width, c.Height = conf.Height, conf.Width + } else { + c.Width, c.Height = conf.Width, conf.Height + } + return c, err +} + +// decoder reads an image from r and modifies the image as defined by opts. +// swapDimensions indicates the decoded image will be rotated after being +// returned, and when interpreting opts, the post-rotation dimensions should +// be considered. +// The decoded image is returned in im. The registered name of the decoder +// used is returned in format. If the image was not successfully decoded, err +// will be non-nil. If the decoded image was made smaller, needRescale will +// be true. +func decode(r io.Reader, opts *DecodeOpts, swapDimensions bool) (im image.Image, format string, err error, needRescale bool) { + if opts == nil { + // Fall-back to normal decode. + im, format, err = image.Decode(r) + return im, format, err, false + } + + var buf bytes.Buffer + tr := io.TeeReader(r, &buf) + ic, format, err := image.DecodeConfig(tr) + if err != nil { + return nil, "", err, false + } + + mr := io.MultiReader(&buf, r) + b := image.Rect(0, 0, ic.Width, ic.Height) + sw, sh, needRescale := opts.rescaleDimensions(b, swapDimensions) + if !needRescale { + im, format, err = image.Decode(mr) + return im, format, err, false + } + + imageDebug(fmt.Sprintf("Resizing from %dx%d -> %dx%d", ic.Width, ic.Height, sw, sh)) + if format == "cr2" { + // Replace mr with an io.Reader to the JPEG thumbnail embedded in a + // CR2 image. + if mr, err = cr2.NewReader(mr); err != nil { + return nil, "", err, false + } + format = "jpeg" + } + + if format == "jpeg" && fastjpeg.Available() { + factor := fastjpeg.Factor(ic.Width, ic.Height, sw, sh) + if factor > 1 { + var buf bytes.Buffer + tr := io.TeeReader(mr, &buf) + im, err = fastjpeg.DecodeDownsample(tr, factor) + switch err.(type) { + case fastjpeg.DjpegFailedError: + log.Printf("Retrying with jpeg.Decode, because djpeg failed with: %v", err) + im, err = jpeg.Decode(io.MultiReader(&buf, mr)) + case nil: + // fallthrough to rescale() below. + default: + return nil, format, err, false + } + return rescale(im, sw, sh), format, err, true + } + } + + // Fall-back to normal decode. + im, format, err = image.Decode(mr) + if err != nil { + return nil, "", err, false + } + return rescale(im, sw, sh), format, err, needRescale +} + +// exifOrientation parses the EXIF data in r and returns the stored +// orientation as the angle and flip necessary to transform the image. +func exifOrientation(r io.Reader) (int, FlipDirection) { + var ( + angle int + flipMode FlipDirection + ) + ex, err := exif.Decode(r) + if err != nil { + imageDebug("No valid EXIF; will not rotate or flip.") + return 0, 0 + } + tag, err := ex.Get(exif.Orientation) + if err != nil { + imageDebug(`No "Orientation" tag in EXIF; will not rotate or flip.`) + return 0, 0 + } + orient, err := tag.Int(0) + if err != nil { + imageDebug(fmt.Sprintf("EXIF error: %v", err)) + return 0, 0 + } + switch orient { + case topLeftSide: + // do nothing + case topRightSide: + flipMode = 2 + case bottomRightSide: + angle = 180 + case bottomLeftSide: + angle = 180 + flipMode = 2 + case leftSideTop: + angle = -90 + flipMode = 2 + case rightSideTop: + angle = -90 + case rightSideBottom: + angle = 90 + flipMode = 2 + case leftSideBottom: + angle = 90 + } + return angle, flipMode +} + +// Decode decodes an image from r using the provided decoding options. +// The Config returned is similar to the one from the image package, +// with the addition of the Modified field which indicates if the +// image was actually flipped, rotated, or scaled. +// If opts is nil, the defaults are used. +func Decode(r io.Reader, opts *DecodeOpts) (image.Image, Config, error) { + var ( + angle int + buf bytes.Buffer + c Config + flipMode FlipDirection + ) + + tr := io.TeeReader(io.LimitReader(r, 2<<20), &buf) + if opts.useEXIF() { + angle, flipMode = exifOrientation(tr) + } else { + var err error + angle, flipMode, err = opts.forcedOrientation() + if err != nil { + return nil, c, err + } + } + + // Orientation changing rotations should have their dimensions swapped + // when scaling. + var swapDimensions bool + switch angle { + case 90, -90: + swapDimensions = true + } + + mr := io.MultiReader(&buf, r) + im, format, err, rescaled := decode(mr, opts, swapDimensions) + if err != nil { + return nil, c, err + } + c.Modified = rescaled + + if angle != 0 { + im = rotate(im, angle) + c.Modified = true + } + + if flipMode != 0 { + im = flip(im, flipMode) + c.Modified = true + } + + c.Format = format + c.setBounds(im) + return im, c, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/images_test.go b/vendor/github.com/camlistore/camlistore/pkg/images/images_test.go new file mode 100644 index 00000000..0f51d063 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/images/images_test.go @@ -0,0 +1,374 @@ +/* +Copyright 2012 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package images + +import ( + "bytes" + "image" + "os" + "path/filepath" + "sort" + "strings" + "testing" + "time" + + "camlistore.org/third_party/github.com/rwcarlsen/goexif/exif" + "camlistore.org/third_party/go/pkg/image/jpeg" +) + +const datadir = "testdata" + +func equals(im1, im2 image.Image) bool { + if !im1.Bounds().Eq(im2.Bounds()) { + return false + } + for y := 0; y < im1.Bounds().Dy(); y++ { + for x := 0; x < im1.Bounds().Dx(); x++ { + r1, g1, b1, a1 := im1.At(x, y).RGBA() + r2, g2, b2, a2 := im2.At(x, y).RGBA() + if r1 != r2 || g1 != g2 || b1 != b2 || a1 != a2 { + return false + } + } + } + return true +} + +func straightFImage(t *testing.T) image.Image { + g, err := os.Open(filepath.Join(datadir, "f1.jpg")) + if err != nil { + t.Fatal(err) + } + defer g.Close() + straightF, err := jpeg.Decode(g) + if err != nil { + t.Fatal(err) + } + return straightF +} + +func smallStraightFImage(t *testing.T) image.Image { + g, err := os.Open(filepath.Join(datadir, "f1-s.jpg")) + if err != nil { + t.Fatal(err) + } + defer g.Close() + straightF, err := jpeg.Decode(g) + if err != nil { + t.Fatal(err) + } + return straightF +} + +func sampleNames(t *testing.T) []string { + dir, err := os.Open(datadir) + if err != nil { + t.Fatal(err) + } + defer dir.Close() + samples, err := dir.Readdirnames(-1) + if err != nil { + t.Fatal(err) + } + sort.Strings(samples) + return samples +} + +// TestEXIFCorrection tests that the input files with EXIF metadata +// are correctly automatically rotated/flipped when decoded. +func TestEXIFCorrection(t *testing.T) { + samples := sampleNames(t) + straightF := straightFImage(t) + for _, v := range samples { + if !strings.Contains(v, "exif") || strings.HasSuffix(v, "-s.jpg") { + continue + } + name := filepath.Join(datadir, v) + t.Logf("correcting %s with EXIF Orientation", name) + f, err := os.Open(name) + if err != nil { + t.Fatal(err) + } + defer f.Close() + im, _, err := Decode(f, nil) + if err != nil { + t.Fatal(err) + } + if !equals(im, straightF) { + t.Fatalf("%v not properly corrected with exif", name) + } + } +} + +// TestForcedCorrection tests that manually specifying the +// rotation/flipping to be applied when decoding works as +// expected. +func TestForcedCorrection(t *testing.T) { + samples := sampleNames(t) + straightF := straightFImage(t) + for _, v := range samples { + if strings.HasSuffix(v, "-s.jpg") { + continue + } + name := filepath.Join(datadir, v) + t.Logf("forced correction of %s", name) + f, err := os.Open(name) + if err != nil { + t.Fatal(err) + } + defer f.Close() + num := name[10] + angle, flipMode := 0, 0 + switch num { + case '1': + // nothing to do + case '2': + flipMode = 2 + case '3': + angle = 180 + case '4': + angle = 180 + flipMode = 2 + case '5': + angle = -90 + flipMode = 2 + case '6': + angle = -90 + case '7': + angle = 90 + flipMode = 2 + case '8': + angle = 90 + } + im, _, err := Decode(f, &DecodeOpts{Rotate: angle, Flip: FlipDirection(flipMode)}) + if err != nil { + t.Fatal(err) + } + if !equals(im, straightF) { + t.Fatalf("%v not properly corrected", name) + } + } +} + +// TestRescale verifies that rescaling an image, without +// any rotation/flipping, produces the expected image. +func TestRescale(t *testing.T) { + name := filepath.Join(datadir, "f1.jpg") + t.Logf("rescaling %s with half-width and half-height", name) + f, err := os.Open(name) + if err != nil { + t.Fatal(err) + } + defer f.Close() + rescaledIm, _, err := Decode(f, &DecodeOpts{ScaleWidth: 0.5, ScaleHeight: 0.5}) + if err != nil { + t.Fatal(err) + } + + smallIm := smallStraightFImage(t) + + gotB, wantB := rescaledIm.Bounds(), smallIm.Bounds() + if !gotB.Eq(wantB) { + t.Errorf("(scale) %v bounds not equal, got %v want %v", name, gotB, wantB) + } + if !equals(rescaledIm, smallIm) { + t.Errorf("(scale) %v pixels not equal", name) + } + + _, err = f.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + + rescaledIm, _, err = Decode(f, &DecodeOpts{MaxWidth: 2000, MaxHeight: 40}) + if err != nil { + t.Fatal(err) + } + gotB = rescaledIm.Bounds() + if !gotB.Eq(wantB) { + t.Errorf("(max) %v bounds not equal, got %v want %v", name, gotB, wantB) + } + if !equals(rescaledIm, smallIm) { + t.Errorf("(max) %v pixels not equal", name) + } +} + +// TestRescaleEXIF verifies that rescaling an image, followed +// by the automatic EXIF correction (rotation/flipping), +// produces the expected image. All the possible correction +// modes are tested. +func TestRescaleEXIF(t *testing.T) { + smallStraightF := smallStraightFImage(t) + samples := sampleNames(t) + for _, v := range samples { + if !strings.Contains(v, "exif") { + continue + } + name := filepath.Join(datadir, v) + t.Logf("rescaling %s with half-width and half-height", name) + f, err := os.Open(name) + if err != nil { + t.Fatal(err) + } + defer f.Close() + rescaledIm, _, err := Decode(f, &DecodeOpts{ScaleWidth: 0.5, ScaleHeight: 0.5}) + if err != nil { + t.Fatal(err) + } + + gotB, wantB := rescaledIm.Bounds(), smallStraightF.Bounds() + if !gotB.Eq(wantB) { + t.Errorf("(scale) %v bounds not equal, got %v want %v", name, gotB, wantB) + } + if !equals(rescaledIm, smallStraightF) { + t.Errorf("(scale) %v pixels not equal", name) + } + + _, err = f.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + rescaledIm, _, err = Decode(f, &DecodeOpts{MaxWidth: 2000, MaxHeight: 40}) + if err != nil { + t.Fatal(err) + } + + gotB = rescaledIm.Bounds() + if !gotB.Eq(wantB) { + t.Errorf("(max) %v bounds not equal, got %v want %v", name, gotB, wantB) + } + if !equals(rescaledIm, smallStraightF) { + t.Errorf("(max) %v pixels not equal", name) + } + } +} + +// TestUpscale verifies we don't resize up. +func TestUpscale(t *testing.T) { + b := new(bytes.Buffer) + w, h := 64, 48 + if err := jpeg.Encode(b, image.NewNRGBA(image.Rect(0, 0, w, h)), nil); err != nil { + t.Fatal(err) + } + sizes := []struct { + mw, mh int + wantW, wantH int + }{ + {wantW: w, wantH: h}, + {mw: w, mh: h, wantW: w, wantH: h}, + {mw: w, mh: 2 * h, wantW: w, wantH: h}, + {mw: 2 * w, mh: w, wantW: w, wantH: h}, + {mw: 2 * w, mh: 2 * h, wantW: w, wantH: h}, + {mw: w / 2, mh: h / 2, wantW: w / 2, wantH: h / 2}, + {mw: w / 2, mh: 2 * h, wantW: w / 2, wantH: h / 2}, + {mw: 2 * w, mh: h / 2, wantW: w / 2, wantH: h / 2}, + } + for i, size := range sizes { + var opts DecodeOpts + switch { + case size.mw != 0 && size.mh != 0: + opts = DecodeOpts{MaxWidth: size.mw, MaxHeight: size.mh} + case size.mw != 0: + opts = DecodeOpts{MaxWidth: size.mw} + case size.mh != 0: + opts = DecodeOpts{MaxHeight: size.mh} + } + im, _, err := Decode(bytes.NewReader(b.Bytes()), &opts) + if err != nil { + t.Error(i, err) + } + gotW := im.Bounds().Dx() + gotH := im.Bounds().Dy() + if gotW != size.wantW || gotH != size.wantH { + t.Errorf("%d got %dx%d want %dx%d", i, gotW, gotH, size.wantW, size.wantH) + } + } +} + +// TODO(mpl): move this test to the goexif lib if/when we contribute +// back the DateTime stuff to upstream. +func TestDateTime(t *testing.T) { + f, err := os.Open(filepath.Join(datadir, "f1-exif.jpg")) + if err != nil { + t.Fatal(err) + } + defer f.Close() + ex, err := exif.Decode(f) + if err != nil { + t.Fatal(err) + } + got, err := ex.DateTime() + if err != nil { + t.Fatal(err) + } + exifTimeLayout := "2006:01:02 15:04:05" + want, err := time.ParseInLocation(exifTimeLayout, "2012:11:04 05:42:02", time.Local) + if err != nil { + t.Fatal(err) + } + if got != want { + t.Fatalf("Creation times differ; got %v, want: %v\n", got, want) + } +} + +var issue513tests = []image.Rectangle{ + // These test image bounds give a fastjpeg.Factor() result of 1 since + // they give dim/max == 1, but require rescaling. + image.Rect(0, 0, 500, 500), // The file, bug.jpeg, in issue 315 is a black 500x500. + image.Rect(0, 0, 1, 257), + image.Rect(0, 0, 1, 511), + image.Rect(0, 0, 2001, 1), + image.Rect(0, 0, 3999, 1), + + // These test image bounds give either a fastjpeg.Factor() > 1 or + // do not require rescaling. + image.Rect(0, 0, 1, 256), + image.Rect(0, 0, 1, 512), + image.Rect(0, 0, 2000, 1), + image.Rect(0, 0, 4000, 1), +} + +// Test that decode does not hand off a nil image when using +// fastjpeg, and fastjpeg.Factor() == 1. +// See https://camlistore.org/issue/513 +func TestIssue513(t *testing.T) { + opts := &DecodeOpts{MaxWidth: 2000, MaxHeight: 256} + for _, rect := range issue513tests { + buf := &bytes.Buffer{} + err := jpeg.Encode(buf, image.NewRGBA(rect), nil) + if err != nil { + t.Fatalf("Failed to encode test image: %v", err) + } + func() { + defer func() { + if r := recover(); r != nil { + t.Errorf("Unexpected panic for image size %dx%d: %v", rect.Dx(), rect.Dy(), r) + } + }() + _, format, err, needsRescale := decode(buf, opts, false) + if err != nil { + t.Errorf("Unexpected error for image size %dx%d: %v", rect.Dx(), rect.Dy(), err) + } + if format != "jpeg" { + t.Errorf("Unexpected format for image size %dx%d: got %q want %q", rect.Dx(), rect.Dy(), format, "jpeg") + } + if needsRescale != (rect.Dx() > opts.MaxWidth || rect.Dy() > opts.MaxHeight) { + t.Errorf("Unexpected rescale for image size %dx%d: needsRescale = %t", rect.Dx(), rect.Dy(), needsRescale) + } + }() + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/resize/bench_test.go b/vendor/github.com/camlistore/camlistore/pkg/images/resize/bench_test.go new file mode 100644 index 00000000..875290f1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/images/resize/bench_test.go @@ -0,0 +1,63 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resize + +import ( + "image" + "testing" +) + +func resize(m image.Image) { + s := m.Bounds().Size().Div(2) + Resize(m, m.Bounds(), s.X, s.Y) +} + +func halve(m image.Image) { + HalveInplace(m) +} + +func BenchmarkResizeRGBA(b *testing.B) { + m := image.NewRGBA(orig) + b.ResetTimer() + for i := 0; i < b.N; i++ { + resize(m) + } +} + +func BenchmarkHalveRGBA(b *testing.B) { + m := image.NewRGBA(orig) + b.ResetTimer() + for i := 0; i < b.N; i++ { + halve(m) + } +} + +func BenchmarkResizeYCrCb(b *testing.B) { + m := image.NewYCbCr(orig, image.YCbCrSubsampleRatio422) + b.ResetTimer() + for i := 0; i < b.N; i++ { + resize(m) + } +} + +func BenchmarkHalveYCrCb(b *testing.B) { + m := image.NewYCbCr(orig, image.YCbCrSubsampleRatio422) + b.ResetTimer() + for i := 0; i < b.N; i++ { + halve(m) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/resize/resize.go b/vendor/github.com/camlistore/camlistore/pkg/images/resize/resize.go new file mode 100644 index 00000000..d9240408 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/images/resize/resize.go @@ -0,0 +1,320 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package resize resizes images. +package resize + +import ( + "image" + "image/color" + "image/draw" + + xdraw "golang.org/x/image/draw" +) + +// Resize returns a scaled copy of the image slice r of m. +// The returned image has width w and height h. +func Resize(m image.Image, r image.Rectangle, w, h int) image.Image { + if w < 0 || h < 0 { + return nil + } + if w == 0 || h == 0 || r.Dx() <= 0 || r.Dy() <= 0 { + return image.NewRGBA64(image.Rect(0, 0, w, h)) + } + switch m := m.(type) { + case *image.RGBA: + return resizeRGBA(m, r, w, h) + case *image.YCbCr: + if m, ok := resizeYCbCr(m, r, w, h); ok { + return m + } + } + ww, hh := uint64(w), uint64(h) + dx, dy := uint64(r.Dx()), uint64(r.Dy()) + // The scaling algorithm is to nearest-neighbor magnify the dx * dy source + // to a (ww*dx) * (hh*dy) intermediate image and then minify the intermediate + // image back down to a ww * hh destination with a simple box filter. + // The intermediate image is implied, we do not physically allocate a slice + // of length ww*dx*hh*dy. + // For example, consider a 4*3 source image. Label its pixels from a-l: + // abcd + // efgh + // ijkl + // To resize this to a 3*2 destination image, the intermediate is 12*6. + // Whitespace has been added to delineate the destination pixels: + // aaab bbcc cddd + // aaab bbcc cddd + // eeef ffgg ghhh + // + // eeef ffgg ghhh + // iiij jjkk klll + // iiij jjkk klll + // Thus, the 'b' source pixel contributes one third of its value to the + // (0, 0) destination pixel and two thirds to (1, 0). + // The implementation is a two-step process. First, the source pixels are + // iterated over and each source pixel's contribution to 1 or more + // destination pixels are summed. Second, the sums are divided by a scaling + // factor to yield the destination pixels. + // TODO: By interleaving the two steps, instead of doing all of + // step 1 first and all of step 2 second, we could allocate a smaller sum + // slice of length 4*w*2 instead of 4*w*h, although the resultant code + // would become more complicated. + n, sum := dx*dy, make([]uint64, 4*w*h) + for y := r.Min.Y; y < r.Max.Y; y++ { + for x := r.Min.X; x < r.Max.X; x++ { + // Get the source pixel. + r32, g32, b32, a32 := m.At(x, y).RGBA() + r64 := uint64(r32) + g64 := uint64(g32) + b64 := uint64(b32) + a64 := uint64(a32) + // Spread the source pixel over 1 or more destination rows. + py := uint64(y-r.Min.Y) * hh + for remy := hh; remy > 0; { + qy := dy - (py % dy) + if qy > remy { + qy = remy + } + // Spread the source pixel over 1 or more destination columns. + px := uint64(x-r.Min.X) * ww + index := 4 * ((py/dy)*ww + (px / dx)) + for remx := ww; remx > 0; { + qx := dx - (px % dx) + if qx > remx { + qx = remx + } + sum[index+0] += r64 * qx * qy + sum[index+1] += g64 * qx * qy + sum[index+2] += b64 * qx * qy + sum[index+3] += a64 * qx * qy + index += 4 + px += qx + remx -= qx + } + py += qy + remy -= qy + } + } + } + return average(sum, w, h, n*0x0101) +} + +// average convert the sums to averages and returns the result. +func average(sum []uint64, w, h int, n uint64) image.Image { + ret := image.NewRGBA(image.Rect(0, 0, w, h)) + for y := 0; y < h; y++ { + for x := 0; x < w; x++ { + i := y*ret.Stride + x*4 + j := 4 * (y*w + x) + ret.Pix[i+0] = uint8(sum[j+0] / n) + ret.Pix[i+1] = uint8(sum[j+1] / n) + ret.Pix[i+2] = uint8(sum[j+2] / n) + ret.Pix[i+3] = uint8(sum[j+3] / n) + } + } + return ret +} + +// resizeYCbCr returns a scaled copy of the YCbCr image slice r of m. +// The returned image has width w and height h. +func resizeYCbCr(m *image.YCbCr, r image.Rectangle, w, h int) (image.Image, bool) { + dst := image.NewRGBA(image.Rect(0, 0, w, h)) + xdraw.ApproxBiLinear.Scale(dst, dst.Bounds(), m, m.Bounds(), xdraw.Src, nil) + return dst, true +} + +// resizeRGBA returns a scaled copy of the RGBA image slice r of m. +// The returned image has width w and height h. +func resizeRGBA(m *image.RGBA, r image.Rectangle, w, h int) image.Image { + ww, hh := uint64(w), uint64(h) + dx, dy := uint64(r.Dx()), uint64(r.Dy()) + // See comment in Resize. + n, sum := dx*dy, make([]uint64, 4*w*h) + for y := r.Min.Y; y < r.Max.Y; y++ { + pix := m.Pix[(y-m.Rect.Min.Y)*m.Stride:] + for x := r.Min.X; x < r.Max.X; x++ { + // Get the source pixel. + p := pix[(x-m.Rect.Min.X)*4:] + r64 := uint64(p[0]) + g64 := uint64(p[1]) + b64 := uint64(p[2]) + a64 := uint64(p[3]) + // Spread the source pixel over 1 or more destination rows. + py := uint64(y-r.Min.Y) * hh + for remy := hh; remy > 0; { + qy := dy - (py % dy) + if qy > remy { + qy = remy + } + // Spread the source pixel over 1 or more destination columns. + px := uint64(x-r.Min.X) * ww + index := 4 * ((py/dy)*ww + (px / dx)) + for remx := ww; remx > 0; { + qx := dx - (px % dx) + if qx > remx { + qx = remx + } + qxy := qx * qy + sum[index+0] += r64 * qxy + sum[index+1] += g64 * qxy + sum[index+2] += b64 * qxy + sum[index+3] += a64 * qxy + index += 4 + px += qx + remx -= qx + } + py += qy + remy -= qy + } + } + } + return average(sum, w, h, n) +} + +// HalveInplace downsamples the image by 50% using averaging interpolation. +func HalveInplace(m image.Image) image.Image { + b := m.Bounds() + switch m := m.(type) { + case *image.YCbCr: + for y := b.Min.Y; y < b.Max.Y/2; y++ { + for x := b.Min.X; x < b.Max.X/2; x++ { + y00 := uint32(m.Y[m.YOffset(2*x, 2*y)]) + y10 := uint32(m.Y[m.YOffset(2*x+1, 2*y)]) + y01 := uint32(m.Y[m.YOffset(2*x, 2*y+1)]) + y11 := uint32(m.Y[m.YOffset(2*x+1, 2*y+1)]) + // Add before divide with uint32 or we get errors in the least + // significant bits. + m.Y[m.YOffset(x, y)] = uint8((y00 + y10 + y01 + y11) >> 2) + + cb00 := uint32(m.Cb[m.COffset(2*x, 2*y)]) + cb10 := uint32(m.Cb[m.COffset(2*x+1, 2*y)]) + cb01 := uint32(m.Cb[m.COffset(2*x, 2*y+1)]) + cb11 := uint32(m.Cb[m.COffset(2*x+1, 2*y+1)]) + m.Cb[m.COffset(x, y)] = uint8((cb00 + cb10 + cb01 + cb11) >> 2) + + cr00 := uint32(m.Cr[m.COffset(2*x, 2*y)]) + cr10 := uint32(m.Cr[m.COffset(2*x+1, 2*y)]) + cr01 := uint32(m.Cr[m.COffset(2*x, 2*y+1)]) + cr11 := uint32(m.Cr[m.COffset(2*x+1, 2*y+1)]) + m.Cr[m.COffset(x, y)] = uint8((cr00 + cr10 + cr01 + cr11) >> 2) + } + } + b.Max = b.Min.Add(b.Size().Div(2)) + return subImage(m, b) + case draw.Image: + for y := b.Min.Y; y < b.Max.Y/2; y++ { + for x := b.Min.X; x < b.Max.X/2; x++ { + r00, g00, b00, a00 := m.At(2*x, 2*y).RGBA() + r10, g10, b10, a10 := m.At(2*x+1, 2*y).RGBA() + r01, g01, b01, a01 := m.At(2*x, 2*y+1).RGBA() + r11, g11, b11, a11 := m.At(2*x+1, 2*y+1).RGBA() + + // Add before divide with uint32 or we get errors in the least + // significant bits. + r := (r00 + r10 + r01 + r11) >> 2 + g := (g00 + g10 + g01 + g11) >> 2 + b := (b00 + b10 + b01 + b11) >> 2 + a := (a00 + a10 + a01 + a11) >> 2 + + m.Set(x, y, color.RGBA{ + R: uint8(r >> 8), + G: uint8(g >> 8), + B: uint8(b >> 8), + A: uint8(a >> 8), + }) + } + } + b.Max = b.Min.Add(b.Size().Div(2)) + return subImage(m, b) + default: + // TODO(wathiede): fallback to generic Resample somehow? + panic("Unhandled image type") + } +} + +// ResampleInplace will resample m inplace, overwritting existing pixel data, +// and return a subimage of m sized to w and h. +func ResampleInplace(m image.Image, r image.Rectangle, w, h int) image.Image { + // We don't support scaling up. + if r.Dx() < w || r.Dy() < h { + return m + } + + switch m := m.(type) { + case *image.YCbCr: + xStep := float64(r.Dx()) / float64(w) + yStep := float64(r.Dy()) / float64(h) + for y := r.Min.Y; y < r.Min.Y+h; y++ { + for x := r.Min.X; x < r.Min.X+w; x++ { + xSrc := int(float64(x) * xStep) + ySrc := int(float64(y) * yStep) + cSrc := m.COffset(xSrc, ySrc) + cDst := m.COffset(x, y) + m.Y[m.YOffset(x, y)] = m.Y[m.YOffset(xSrc, ySrc)] + m.Cb[cDst] = m.Cb[cSrc] + m.Cr[cDst] = m.Cr[cSrc] + } + } + case draw.Image: + xStep := float64(r.Dx()) / float64(w) + yStep := float64(r.Dy()) / float64(h) + for y := r.Min.Y; y < r.Min.Y+h; y++ { + for x := r.Min.X; x < r.Min.X+w; x++ { + xSrc := int(float64(x) * xStep) + ySrc := int(float64(y) * yStep) + r, g, b, a := m.At(xSrc, ySrc).RGBA() + m.Set(x, y, color.RGBA{ + R: uint8(r >> 8), + G: uint8(g >> 8), + B: uint8(b >> 8), + A: uint8(a >> 8), + }) + } + } + default: + // TODO fallback to generic Resample somehow? + panic("Unhandled image type") + } + r.Max.X = r.Min.X + w + r.Max.Y = r.Min.Y + h + return subImage(m, r) +} + +func subImage(m image.Image, r image.Rectangle) image.Image { + type subImager interface { + SubImage(image.Rectangle) image.Image + } + if si, ok := m.(subImager); ok { + return si.SubImage(r) + } + panic("Image type doesn't support SubImage") +} + +// Resample returns a resampled copy of the image slice r of m. +// The returned image has width w and height h. +func Resample(m image.Image, r image.Rectangle, w, h int) image.Image { + if w < 0 || h < 0 { + return nil + } + if w == 0 || h == 0 || r.Dx() <= 0 || r.Dy() <= 0 { + return image.NewRGBA64(image.Rect(0, 0, w, h)) + } + img := image.NewRGBA(image.Rect(0, 0, w, h)) + xStep := float64(r.Dx()) / float64(w) + yStep := float64(r.Dy()) / float64(h) + for y := 0; y < h; y++ { + for x := 0; x < w; x++ { + xSrc := int(float64(r.Min.X) + float64(x)*xStep) + ySrc := int(float64(r.Min.Y) + float64(y)*yStep) + r, g, b, a := m.At(xSrc, ySrc).RGBA() + img.SetRGBA(x, y, color.RGBA{ + R: uint8(r >> 8), + G: uint8(g >> 8), + B: uint8(b >> 8), + A: uint8(a >> 8), + }) + } + } + return img +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/resize/resize_test.go b/vendor/github.com/camlistore/camlistore/pkg/images/resize/resize_test.go new file mode 100644 index 00000000..1d4358ad --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/images/resize/resize_test.go @@ -0,0 +1,366 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resize + +import ( + "flag" + "fmt" + "image" + "image/color" + "image/draw" + "image/png" + "io" + "math" + "os" + "path/filepath" + "strings" + "testing" +) + +const ( + // psnrThreshold is the threshold over which images must match to consider + // HalveInplace equivalent to Resize. It is in terms of dB and 60-80 is + // good for RGB. + psnrThreshold = 50.0 + + // TODO(wathiede, mpl): figure out why we got an increase from ~3% to ~16% for + // YCbCr images in Go 1.5. That is, for halving vs resizing. + maxPixelDiffPercentage = 10 +) + +var ( + output = flag.String("output", "", "If non-empty, the directory to save comparison images.") + + orig = image.Rect(0, 0, 1024, 1024) + thumb = image.Rect(0, 0, 64, 64) +) + +var somePalette = []color.Color{ + color.RGBA{0x00, 0x00, 0x00, 0xff}, + color.RGBA{0x00, 0x00, 0x44, 0xff}, + color.RGBA{0x00, 0x00, 0x88, 0xff}, + color.RGBA{0x00, 0x00, 0xcc, 0xff}, + color.RGBA{0x00, 0x44, 0x00, 0xff}, + color.RGBA{0x00, 0x44, 0x44, 0xff}, + color.RGBA{0x00, 0x44, 0x88, 0xff}, + color.RGBA{0x00, 0x44, 0xcc, 0xff}, +} + +func makeImages(r image.Rectangle) []image.Image { + return []image.Image{ + image.NewGray(r), + image.NewGray16(r), + image.NewNRGBA(r), + image.NewNRGBA64(r), + image.NewPaletted(r, somePalette), + image.NewRGBA(r), + image.NewRGBA64(r), + image.NewYCbCr(r, image.YCbCrSubsampleRatio444), + image.NewYCbCr(r, image.YCbCrSubsampleRatio422), + image.NewYCbCr(r, image.YCbCrSubsampleRatio420), + image.NewYCbCr(r, image.YCbCrSubsampleRatio440), + image.NewYCbCr(r, image.YCbCrSubsampleRatio410), + image.NewYCbCr(r, image.YCbCrSubsampleRatio411), + } +} + +func TestResize(t *testing.T) { + for i, im := range makeImages(orig) { + m := Resize(im, orig, thumb.Dx(), thumb.Dy()) + got, want := m.Bounds(), thumb + if !got.Eq(want) { + t.Error(i, "Want bounds", want, "got", got) + } + } +} + +func TestResampleInplace(t *testing.T) { + for i, im := range makeImages(orig) { + m := ResampleInplace(im, orig, thumb.Dx(), thumb.Dy()) + got, want := m.Bounds(), thumb + if !got.Eq(want) { + t.Error(i, "Want bounds", want, "got", got) + } + } +} + +func TestResample(t *testing.T) { + for i, im := range makeImages(orig) { + m := Resample(im, orig, thumb.Dx(), thumb.Dy()) + got, want := m.Bounds(), thumb + if !got.Eq(want) { + t.Error(i, "Want bounds", want, "got", got) + } + } + + for _, d := range []struct { + wantFn string + r image.Rectangle + w, h int + }{ + { + // Generated with imagemagick: + // $ convert -crop 128x128+320+160 -resize 64x64 -filter point \ + // testdata/test.png testdata/test-resample-128x128-64x64.png + wantFn: "test-resample-128x128-64x64.png", + r: image.Rect(320, 160, 320+128, 160+128), + w: 64, + h: 64, + }, + { + // Generated with imagemagick: + // $ convert -resize 128x128 -filter point testdata/test.png \ + // testdata/test-resample-768x576-128x96.png + wantFn: "test-resample-768x576-128x96.png", + r: image.Rect(0, 0, 768, 576), + w: 128, + h: 96, + }, + } { + m := image.NewRGBA(testIm.Bounds()) + fillTestImage(m) + r, err := os.Open(filepath.Join("testdata", d.wantFn)) + if err != nil { + t.Fatal(err) + } + defer r.Close() + want, err := png.Decode(r) + if err != nil { + t.Fatal(err) + } + got := Resample(m, d.r, d.w, d.h) + res := compareImages(got, want) + t.Logf("PSNR %.4f", res.psnr) + s := got.Bounds().Size() + tot := s.X * s.Y + per := float32(100*res.diffCnt) / float32(tot) + t.Logf("Resample not the same %d pixels different %.2f%%", res.diffCnt, per) + if *output != "" { + err = savePng(t, want, fmt.Sprintf("Resample.%s->%dx%d.want.png", + d.r, d.w, d.h)) + if err != nil { + t.Fatal(err) + } + err = savePng(t, got, fmt.Sprintf("Resample.%s->%dx%d.got.png", + d.r, d.w, d.h)) + if err != nil { + t.Fatal(err) + } + err = savePng(t, res.diffIm, + fmt.Sprintf("Resample.%s->%dx%d.diff.png", d.r, d.w, d.h)) + if err != nil { + t.Fatal(err) + } + } + } +} + +func TestHalveInplace(t *testing.T) { + for i, im := range makeImages(orig) { + m := HalveInplace(im) + b := im.Bounds() + got, want := m.Bounds(), image.Rectangle{ + Min: b.Min, + Max: b.Min.Add(b.Max.Div(2)), + } + if !got.Eq(want) { + t.Error(i, "Want bounds", want, "got", got) + } + } +} + +type results struct { + diffCnt int + psnr float64 + diffIm *image.Gray +} + +func compareImages(m1, m2 image.Image) results { + b := m1.Bounds() + s := b.Size() + res := results{} + mse := uint32(0) + for y := b.Min.Y; y < b.Max.Y; y++ { + for x := b.Min.X; x < b.Max.X; x++ { + r1, g1, b1, a1 := m1.At(x, y).RGBA() + r2, g2, b2, a2 := m2.At(x, y).RGBA() + + mse += ((r1-r2)*(r1-r2) + (g1-g2)*(g1-g2) + (b1-b2)*(b1-b2)) / 3 + if r1 != r2 || g1 != g2 || b1 != b2 || a1 != a2 { + if res.diffIm == nil { + res.diffIm = image.NewGray(m1.Bounds()) + } + res.diffCnt++ + res.diffIm.Set(x, y, color.White) + } + } + } + mse = mse / uint32(s.X*s.Y) + res.psnr = 20*math.Log10(1<<16) - 10*math.Log10(float64(mse)) + return res +} + +var testIm image.Image + +func init() { + r, err := os.Open(filepath.Join("testdata", "test.png")) + if err != nil { + panic(err) + } + defer r.Close() + testIm, err = png.Decode(r) +} + +func fillTestImage(im image.Image) { + b := im.Bounds() + if !b.Eq(testIm.Bounds()) { + panic("Requested target image dimensions not equal reference image.") + } + src := testIm + if dst, ok := im.(*image.YCbCr); ok { + b := testIm.Bounds() + for y := b.Min.Y; y < b.Max.Y; y++ { + for x := b.Min.X; x < b.Max.X; x++ { + r, g, b, _ := src.At(x, y).RGBA() + yp, cb, cr := color.RGBToYCbCr(uint8(r), uint8(g), uint8(b)) + + dst.Y[dst.YOffset(x, y)] = yp + off := dst.COffset(x, y) + dst.Cb[off] = cb + dst.Cr[off] = cr + } + } + return + } + draw.Draw(im.(draw.Image), b, testIm, b.Min, draw.Src) +} + +func savePng(t *testing.T, m image.Image, fn string) error { + fn = filepath.Join(*output, fn) + t.Log("Saving", fn) + f, err := os.Create(fn) + if err != nil { + return err + } + defer f.Close() + + return png.Encode(f, m) +} + +func getFilename(im image.Image, method string) string { + imgType := fmt.Sprintf("%T", im) + imgType = imgType[strings.Index(imgType, ".")+1:] + if m, ok := im.(*image.YCbCr); ok { + imgType += "." + m.SubsampleRatio.String() + } + return fmt.Sprintf("%s.%s.png", imgType, method) +} + +func TestCompareResizeToHalveInplace(t *testing.T) { + if testing.Short() { + t.Skip("Skipping TestCompareNewResizeToHalveInplace in short mode.") + } + testCompareResizeMethods(t, "resize", "halveInPlace") +} + +var resizeMethods = map[string]func(image.Image) image.Image{ + "resize": func(im image.Image) image.Image { + s := im.Bounds().Size() + return Resize(im, im.Bounds(), s.X/2, s.Y/2) + }, + "halveInPlace": func(im image.Image) image.Image { + return HalveInplace(im) + }, +} + +func testCompareResizeMethods(t *testing.T, method1, method2 string) { + images1, images2 := []image.Image{}, []image.Image{} + var imTypes []string + for _, im := range makeImages(testIm.Bounds()) { + // keeping track of the types for the final output + imTypes = append(imTypes, fmt.Sprintf("%T", im)) + fillTestImage(im) + images1 = append(images1, resizeMethods[method1](im)) + } + for _, im := range makeImages(testIm.Bounds()) { + fillTestImage(im) + images2 = append(images2, resizeMethods[method2](im)) + } + var ( + f io.WriteCloser + err error + ) + if *output != "" { + os.Mkdir(*output, os.FileMode(0777)) + f, err = os.Create(filepath.Join(*output, "index.html")) + if err != nil { + t.Fatal(err) + } + defer f.Close() + fmt.Fprintf(f, ` + + + + Image comparison for `+method1+` vs `+method2+` + + + +`) + } + for i, im1 := range images1 { + im2 := images2[i] + res := compareImages(im1, im2) + if *output != "" { + fmt.Fprintf(f, "") + fn := getFilename(im1, "halve") + err := savePng(t, im1, fn) + if err != nil { + t.Fatal(err) + } + fmt.Fprintf(f, `

    %s`, fn, fn) + + fn = getFilename(im1, "resize") + err = savePng(t, im2, fn) + if err != nil { + t.Fatal(err) + } + fmt.Fprintf(f, `

    %s`, fn, fn) + + if res.diffIm != nil { + fn = getFilename(im1, "diff") + err = savePng(t, res.diffIm, fn) + if err != nil { + t.Fatal(err) + } + fmt.Fprintf(f, `

    %s`, fn, fn) + } + fmt.Fprintln(f) + } + + if res.psnr < psnrThreshold { + t.Errorf("%v PSNR too low %.4f", imTypes[i], res.psnr) + } else { + t.Logf("%v PSNR %.4f", imTypes[i], res.psnr) + } + s := im1.Bounds().Size() + tot := s.X * s.Y + if per := float32(100*res.diffCnt) / float32(tot); per > maxPixelDiffPercentage { + t.Errorf("%v not the same %d pixels different %.2f%%", imTypes[i], res.diffCnt, per) + } + } + +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/resize/testdata/test-resample-128x128-64x64.png b/vendor/github.com/camlistore/camlistore/pkg/images/resize/testdata/test-resample-128x128-64x64.png new file mode 100644 index 00000000..7663ba5f Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/resize/testdata/test-resample-128x128-64x64.png differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/resize/testdata/test-resample-768x576-128x96.png b/vendor/github.com/camlistore/camlistore/pkg/images/resize/testdata/test-resample-768x576-128x96.png new file mode 100644 index 00000000..db07dba5 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/resize/testdata/test-resample-768x576-128x96.png differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/resize/testdata/test.png b/vendor/github.com/camlistore/camlistore/pkg/images/resize/testdata/test.png new file mode 100644 index 00000000..bd7c8e62 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/resize/testdata/test.png differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f1-exif.jpg b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f1-exif.jpg new file mode 100644 index 00000000..ff003e39 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f1-exif.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f1-s.jpg b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f1-s.jpg new file mode 100644 index 00000000..1fdacd65 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f1-s.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f1.jpg b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f1.jpg new file mode 100644 index 00000000..e5ecc084 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f1.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f2-exif.jpg b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f2-exif.jpg new file mode 100644 index 00000000..7e0f170e Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f2-exif.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f2.jpg b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f2.jpg new file mode 100644 index 00000000..bc3e1dba Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f2.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f3-exif.jpg b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f3-exif.jpg new file mode 100644 index 00000000..3ed7b16a Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f3-exif.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f3.jpg b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f3.jpg new file mode 100644 index 00000000..bca977c6 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f3.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f4-exif.jpg b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f4-exif.jpg new file mode 100644 index 00000000..0e081f91 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f4-exif.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f4.jpg b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f4.jpg new file mode 100644 index 00000000..395385b8 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f4.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f5-exif.jpg b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f5-exif.jpg new file mode 100644 index 00000000..e8d87547 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f5-exif.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f5.jpg b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f5.jpg new file mode 100644 index 00000000..4b985b19 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f5.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f6-exif.jpg b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f6-exif.jpg new file mode 100644 index 00000000..4e2c8641 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f6-exif.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f6.jpg b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f6.jpg new file mode 100644 index 00000000..175f4023 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f6.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f7-exif.jpg b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f7-exif.jpg new file mode 100644 index 00000000..b5dddea4 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f7-exif.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f7.jpg b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f7.jpg new file mode 100644 index 00000000..b543073d Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f7.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f8-exif.jpg b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f8-exif.jpg new file mode 100644 index 00000000..fb050fc6 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f8-exif.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f8.jpg b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f8.jpg new file mode 100644 index 00000000..6b1bd29f Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/images/testdata/f8.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/README b/vendor/github.com/camlistore/camlistore/pkg/importer/README new file mode 100644 index 00000000..e681bf26 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/README @@ -0,0 +1,10 @@ +Permanode type: + +camliNodeType: "importer" +importerType: "twitter" +authClientID: "xxx" // e.g. api token +authClientSecret: "sdkojfsldfjlsdkf" + +camliNodeType: "importerAccount" +importerType: "twitter" +twitterAccount: "bradfitz" diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/allimporters/importers.go b/vendor/github.com/camlistore/camlistore/pkg/importer/allimporters/importers.go new file mode 100644 index 00000000..5bab9dbb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/allimporters/importers.go @@ -0,0 +1,28 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package allimporters registers all the importer implementations. +package allimporters + +import ( + _ "camlistore.org/pkg/importer/dummy" + _ "camlistore.org/pkg/importer/feed" + _ "camlistore.org/pkg/importer/flickr" + _ "camlistore.org/pkg/importer/foursquare" + _ "camlistore.org/pkg/importer/picasa" + _ "camlistore.org/pkg/importer/pinboard" + _ "camlistore.org/pkg/importer/twitter" +) diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/attrs.go b/vendor/github.com/camlistore/camlistore/pkg/importer/attrs.go new file mode 100644 index 00000000..c3761552 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/attrs.go @@ -0,0 +1,74 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package importer + +// TODO(mpl): use these on all the importers. +const ( + // Account or user identity. + + // AcctAttrUserID is the account's internal representation, and often an account number. + // It is usually required as an argument in API calls to the site we import from. + // Not found on schema.org. + // Example: "3179713". + AcctAttrUserID = "userID" + // AcctAttrUserName is the public identifier of the account. Commonly referred to as + // "username", or "screen name", or "account name". Often a one word string. + // Not found on schema.org. + // Example: "johnSmith" from Twitter's "@johnSmith". + AcctAttrUserName = "userName" + + // AcctAttrCompletedVersion records the importer-specific + // "version number" that last ran to completion, doing a full + // importer. When importers are updated with new behavior, + // they update their version number and that triggers a full + // import, rather than incremental imports. + AcctAttrCompletedVersion = "completedVersion" + + // AcctAttrName is a longer or alternate public representation of the account's name. + // It is often the full name of the person's account (family name and given name), thus + // sometimes redundant with the combination of acctAttrFamilyName and acctAttrGivenName. + // Found at http://schema.org/Person. + // Example: "John Smith". + AcctAttrName = "name" + // http://schema.org/givenName + // Example: "John". + AcctAttrGivenName = "givenName" + // http://schema.org/familyName + // Example: "Smith". + AcctAttrFamilyName = "familyName" + + // Generic item, object. + + // ItemAttrID is the generic identifier of an item when nothing suitable and more specific + // was found on http://schema.org. Usually a number. + AttrID = "ID" + // http://schema.org/name + AttrName = "name" + // Free-flowing text definition of a location or place, such + // as a city name, or a full postal address. + AttrLocationText = "locationText" + // AttrURL is the item's original or origin URL. + AttrURL = "url" + + // AttrStartDate is http://schema.org/startDate: The start + // date and time of the event or item (in ISO 8601 date + // format) + AttrStartDate = "startDate" + + // Image, photo. + +) diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/dummy/dummy.go b/vendor/github.com/camlistore/camlistore/pkg/importer/dummy/dummy.go new file mode 100644 index 00000000..d7ac2fc6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/dummy/dummy.go @@ -0,0 +1,198 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package dummy is an example importer for development purposes. +package dummy + +import ( + "fmt" + "log" + "math/rand" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/env" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/importer" + "camlistore.org/pkg/schema" +) + +func init() { + if !env.IsDev() { + // For this particular example importer, we only + // register it if we're in "devcam server" mode. + // Normally you'd avoid this check. + return + } + + // This Register call must happen during init. + // + // Register only registers an importer site type and not a + // specific account on a site. + importer.Register("dummy", &imp{}) +} + +// imp is the dummy importer, as a demo of how to write an importer. +// +// It must implement the importer.Importer interface in order for +// it to be registered (in the init above). +type imp struct { + // The struct or underlying type implementing an importer + // holds state that is global, and not per-account, so it + // should not be used to cache account-specific + // resources. Some importers (e.g. Foursquare) use this space + // to cache mappings from site-specific global resource URLs + // (e.g. category icons) to the fileref once it's been copied + // into Camlistore. + + mu sync.Mutex // mu guards cache + categoryRef map[string]blob.Ref // URL -> file schema ref +} + +func (*imp) SupportsIncremental() bool { + // SupportsIncremental signals to the importer host that this + // importer has been optimized to be run regularly (e.g. every 5 + // minutes or half hour). If it returns false, the user must + // manually start imports. + return false +} + +func (*imp) NeedsAPIKey() bool { + // This tells the importer framework that we our importer will + // be calling the {RunContext,SetupContext}.Credentials method + // to get the OAuth client ID & client secret, which may be + // either configured on the importer permanode, or statically + // in the server's config file. + return true +} + +const ( + acctAttrToken = "my_token" + acctAttrUsername = "username" + acctAttrRunNumber = "run_number" // some state +) + +func (*imp) IsAccountReady(acct *importer.Object) (ready bool, err error) { + // This method tells the importer framework whether this account + // permanode (accessed via the importer.Object) is ready to start + // an import. Here you would typically check whether you have the + // right metadata/tokens on the account. + return acct.Attr(acctAttrToken) != "" && acct.Attr(acctAttrUsername) != "", nil +} + +func (*imp) SummarizeAccount(acct *importer.Object) string { + // This method is run by the importer framework if the account is + // ready (see IsAccountReady) and summarizes the account in + // the list of accounts on the importer page. + return acct.Attr(acctAttrUsername) +} + +func (*imp) ServeSetup(w http.ResponseWriter, r *http.Request, ctx *importer.SetupContext) error { + // ServeSetup gets called at the beginning of adding a new account + // to an importer, or when an account is being re-logged into to + // refresh its access token. + // You typically start the OAuth redirect flow here. + // The importer.OAuth2.RedirectURL and importer.OAuth2.RedirectState helpers can be used for OAuth2. + http.Redirect(w, r, ctx.CallbackURL(), http.StatusFound) + return nil +} + +// Statically declare that our importer supports the optional +// importer.ImporterSetupHTMLer interface. +// +// We do this in case importer.ImporterSetupHTMLer changes, or if we +// typo the method name below. It turns this into a compile-time +// error. In general you should do this in Go whenever you implement +// optional interfaces. +var _ importer.ImporterSetupHTMLer = (*imp)(nil) + +func (im *imp) AccountSetupHTML(host *importer.Host) string { + return "

    Hello from the dummy importer!

    I am example HTML. This importer is a demo of how to write an importer.

    " +} + +func (im *imp) ServeCallback(w http.ResponseWriter, r *http.Request, ctx *importer.SetupContext) { + // ServeCallback is called after ServeSetup, at the end of an + // OAuth redirect flow. + + code := r.FormValue("code") // e.g. get the OAuth code out of the redirect + if code == "" { + code = "some_dummy_code" + } + name := ctx.AccountNode.Attr(acctAttrUsername) + if name == "" { + names := []string{ + "alfred", "alice", "bob", "bethany", + "cooper", "claire", "doug", "darla", + "ed", "eve", "frank", "francine", + } + name = names[rand.Intn(len(names))] + } + if err := ctx.AccountNode.SetAttrs( + "title", fmt.Sprintf("dummy account: %s", name), + acctAttrUsername, name, + acctAttrToken, code, + ); err != nil { + httputil.ServeError(w, r, fmt.Errorf("Error setting attributes: %v", err)) + return + } + http.Redirect(w, r, ctx.AccountURL(), http.StatusFound) +} + +func (im *imp) Run(ctx *importer.RunContext) (err error) { + log.Printf("Running dummy importer.") + defer func() { + log.Printf("Dummy importer returned: %v", err) + }() + root := ctx.RootNode() + fileRef, err := schema.WriteFileFromReader(ctx.Host.Target(), "foo.txt", strings.NewReader("Some file.\n")) + if err != nil { + return err + } + obj, err := root.ChildPathObject("foo.txt") + if err != nil { + return err + } + if err = obj.SetAttr("camliContent", fileRef.String()); err != nil { + return err + } + n, _ := strconv.Atoi(ctx.AccountNode().Attr(acctAttrRunNumber)) + n++ + ctx.AccountNode().SetAttr(acctAttrRunNumber, fmt.Sprint(n)) + // Update the title each time, just to show it working. You + // wouldn't actually do this: + return root.SetAttr("title", fmt.Sprintf("dummy: %s import #%d", ctx.AccountNode().Attr(acctAttrUsername), n)) +} + +func (im *imp) ServeHTTP(w http.ResponseWriter, r *http.Request) { + httputil.BadRequestError(w, "Unexpected path: %s", r.URL.Path) +} + +func (im *imp) CallbackRequestAccount(r *http.Request) (blob.Ref, error) { + // We do not actually use OAuth, but this method works for us anyway. + // Even if your importer implementation does not use OAuth, you can + // probably just embed importer.OAuth1 in your implementation type. + // If OAuth2, embedding importer.OAuth2 should work. + return importer.OAuth1{}.CallbackRequestAccount(r) +} + +func (im *imp) CallbackURLParameters(acctRef blob.Ref) url.Values { + // See comment in CallbackRequestAccount. + return importer.OAuth1{}.CallbackURLParameters(acctRef) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/feed/atom/atom.go b/vendor/github.com/camlistore/camlistore/pkg/importer/feed/atom/atom.go new file mode 100644 index 00000000..73c3ab05 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/feed/atom/atom.go @@ -0,0 +1,61 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Adapted from encoding/xml/read_test.go. + +// Package atom defines XML data structures for an Atom feed. +package atom + +import ( + "encoding/xml" + "time" +) + +type Feed struct { + XMLName xml.Name `xml:"feed"` + Title string `xml:"title"` + ID string `xml:"id"` + Link []Link `xml:"link"` + Updated TimeStr `xml:"updated"` + Author *Person `xml:"author"` + Entry []*Entry `xml:"entry"` + XMLBase string `xml:"base,attr"` +} + +type Entry struct { + Title *Text `xml:"title"` + ID string `xml:"id"` + Link []Link `xml:"link"` + Published TimeStr `xml:"published"` + Updated TimeStr `xml:"updated"` + Author *Person `xml:"author"` + Summary *Text `xml:"summary"` + Content *Text `xml:"content"` + XMLBase string `xml:"base,attr"` +} + +type Link struct { + Rel string `xml:"rel,attr"` + Href string `xml:"href,attr"` + Type string `xml:"type,attr"` +} + +type Person struct { + Name string `xml:"name"` + URI string `xml:"uri"` + Email string `xml:"email"` + InnerXML string `xml:",innerxml"` +} + +type Text struct { + Type string `xml:"type,attr"` + Body string `xml:",chardata"` + InnerXML string `xml:",innerxml"` +} + +type TimeStr string + +func Time(t time.Time) TimeStr { + return TimeStr(t.Format("2006-01-02T15:04:05-07:00")) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/feed/feed.go b/vendor/github.com/camlistore/camlistore/pkg/importer/feed/feed.go new file mode 100644 index 00000000..fc337d61 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/feed/feed.go @@ -0,0 +1,282 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package feed implements an importer for RSS, Atom, and RDF feeds. +package feed + +import ( + "bytes" + "fmt" + "html/template" + "io" + "io/ioutil" + "log" + "net/http" + "net/url" + "strings" + "sync" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/context" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/importer" + "camlistore.org/pkg/schema" + "camlistore.org/third_party/code.google.com/p/go.net/html" + "camlistore.org/third_party/code.google.com/p/go.net/html/atom" +) + +const ( + // Permanode attributes on account node: + acctAttrFeedURL = "feedURL" +) + +func init() { + importer.Register("feed", &imp{ + urlFileRef: make(map[string]blob.Ref), + }) +} + +type imp struct { + mu sync.Mutex // guards following + urlFileRef map[string]blob.Ref // url to file schema blob + + importer.OAuth1 // for CallbackRequestAccount and CallbackURLParameters +} + +func (im *imp) NeedsAPIKey() bool { return false } + +func (im *imp) SupportsIncremental() bool { return true } + +func (im *imp) IsAccountReady(acctNode *importer.Object) (ok bool, err error) { + if acctNode.Attr(acctAttrFeedURL) != "" { + return true, nil + } + return false, nil +} + +func (im *imp) SummarizeAccount(acct *importer.Object) string { + ok, err := im.IsAccountReady(acct) + if err != nil { + return "Not configured; error = " + err.Error() + } + if !ok { + return "Not configured" + } + return fmt.Sprintf("feed %s", acct.Attr(acctAttrFeedURL)) +} + +// A run is our state for a given run of the importer. +type run struct { + *importer.RunContext + im *imp +} + +func (im *imp) Run(ctx *importer.RunContext) error { + r := &run{ + RunContext: ctx, + im: im, + } + + if err := r.importFeed(); err != nil { + return err + } + return nil +} + +func (r *run) importFeed() error { + accountNode := r.RunContext.AccountNode() + feedURL, err := url.Parse(accountNode.Attr(acctAttrFeedURL)) + if err != nil { + return err + } + body, err := doGet(r.Context, feedURL.String()) + if err != nil { + return err + } + if auto, err := autoDiscover(body); err == nil { + if autoURL, err := url.Parse(auto); err == nil { + if autoURL.Scheme == "" { + autoURL.Scheme = feedURL.Scheme + } + if autoURL.Host == "" { + autoURL.Host = feedURL.Host + } + body, err = doGet(r.Context, autoURL.String()) + if err != nil { + return err + } + } + } + feed, err := parseFeed(body, feedURL.String()) + if err != nil { + return err + } + itemsNode := r.RootNode() + if accountNode.Attr("title") == "" { + accountNode.SetAttr("title", fmt.Sprintf("%s Feed", feed.Title)) + } + if itemsNode.Attr("title") == "" { + itemsNode.SetAttr("title", fmt.Sprintf("%s Items", feed.Title)) + } + for _, item := range feed.Items { + if err := r.importItem(itemsNode, item); err != nil { + log.Printf("Feed importer: error importing item %s %v", item.ID, err) + continue + } + } + return nil +} + +func (r *run) importItem(parent *importer.Object, item *item) error { + itemNode, err := parent.ChildPathObject(item.ID) + if err != nil { + return err + } + fileRef, err := schema.WriteFileFromReader(r.Host.Target(), "", bytes.NewBufferString(item.Content)) + if err != nil { + return err + } + if err := itemNode.SetAttrs( + "feedItemId", item.ID, + "camliNodeType", "feed:item", + "title", item.Title, + "link", item.Link, + "author", item.Author, + "camliContent", fileRef.String(), + "feedMediaContentURL", item.MediaContent, + ); err != nil { + return err + } + return nil +} + +// autodiscover takes an HTML document and returns the autodiscovered feed +// URL. Returns an error if there is no such URL. +func autoDiscover(body []byte) (feedURL string, err error) { + r := bytes.NewReader(body) + z := html.NewTokenizer(r) + for { + if z.Next() == html.ErrorToken { + break + } + t := z.Token() + switch t.DataAtom { + case atom.Link: + if t.Type == html.StartTagToken || t.Type == html.SelfClosingTagToken { + attrs := make(map[string]string) + for _, a := range t.Attr { + attrs[a.Key] = a.Val + } + if attrs["rel"] == "alternate" && attrs["href"] != "" && + (attrs["type"] == "application/rss+xml" || attrs["type"] == "application/atom+xml") { + return attrs["href"], nil + } + } + } + } + return "", fmt.Errorf("No feed link found") +} + +func doGet(ctx *context.Context, url string) ([]byte, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + res, err := ctx.HTTPClient().Do(req) + if err != nil { + log.Printf("Error fetching %s: %v", url, err) + return nil, err + } + defer httputil.CloseBody(res.Body) + if res.StatusCode != http.StatusOK { + return nil, fmt.Errorf("Get request on %s failed with: %s", url, res.Status) + } + return ioutil.ReadAll(io.LimitReader(res.Body, 8<<20)) +} + +// urlFileRef slurps urlstr from the net, writes to a file and returns its +// fileref or "" on error +func (r *run) urlFileRef(urlstr string) string { + if urlstr == "" { + return "" + } + im := r.im + im.mu.Lock() + if br, ok := im.urlFileRef[urlstr]; ok { + im.mu.Unlock() + return br.String() + } + im.mu.Unlock() + + res, err := r.HTTPClient().Get(urlstr) + if err != nil { + log.Printf("couldn't get file: %v", err) + return "" + } + defer res.Body.Close() + + filename := urlstr[strings.LastIndex(urlstr, "/")+1:] + fileRef, err := schema.WriteFileFromReader(r.Host.Target(), filename, res.Body) + if err != nil { + log.Printf("couldn't write file: %v", err) + return "" + } + + im.mu.Lock() + defer im.mu.Unlock() + im.urlFileRef[urlstr] = fileRef + return fileRef.String() +} + +func (im *imp) ServeSetup(w http.ResponseWriter, r *http.Request, ctx *importer.SetupContext) error { + return tmpl.ExecuteTemplate(w, "serveSetup", ctx) +} + +var tmpl = template.Must(template.New("root").Parse(` +{{define "serveSetup"}} +

    Configuring Feed

    +
    + + + + +
    Feed URL
    +
    +{{end}} +`)) + +func (im *imp) ServeCallback(w http.ResponseWriter, r *http.Request, ctx *importer.SetupContext) { + u := r.FormValue("feedURL") + if u == "" { + http.Error(w, "Expected a feed URL", 400) + return + } + feed, err := url.Parse(u) + if err != nil { + httputil.ServeError(w, r, err) + return + } + if feed.Scheme == "" { + feed.Scheme = "http" + } + if err := ctx.AccountNode.SetAttrs( + acctAttrFeedURL, feed.String(), + ); err != nil { + httputil.ServeError(w, r, fmt.Errorf("Error setting attribute: %v", err)) + return + } + http.Redirect(w, r, ctx.AccountURL(), http.StatusFound) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/feed/parse.go b/vendor/github.com/camlistore/camlistore/pkg/importer/feed/parse.go new file mode 100644 index 00000000..6c6a0e15 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/feed/parse.go @@ -0,0 +1,508 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package feed + +import ( + "bytes" + "encoding/xml" + "fmt" + "html" + "log" + "net/url" + "strings" + "time" + + "camlistore.org/pkg/importer/feed/atom" + "camlistore.org/pkg/importer/feed/rdf" + "camlistore.org/pkg/importer/feed/rss" + "camlistore.org/third_party/code.google.com/p/go-charset/charset" + _ "camlistore.org/third_party/code.google.com/p/go-charset/data" +) + +type feed struct { + Title string + Updated time.Time + Link string + Items []*item +} + +type item struct { + ID string + Title string + Link string + Created time.Time + Published time.Time + Updated time.Time + Author string + Content string + MediaContent string +} + +func parseFeed(body []byte, feedURL string) (*feed, error) { + var f *feed + var atomerr, rsserr, rdferr error + f, atomerr = parseAtom(body) + if f == nil { + f, rsserr = parseRSS(body) + } + if f == nil { + f, rdferr = parseRDF(body) + } + if f == nil { + log.Printf("atom parse error: %s", atomerr.Error()) + log.Printf("xml parse error: %s", rsserr.Error()) + log.Printf("rdf parse error: %s", rdferr.Error()) + return nil, fmt.Errorf("Could not parse feed data") + } + return f, nil +} + +func parseAtom(body []byte) (*feed, error) { + var f feed + var a atom.Feed + d := xml.NewDecoder(bytes.NewReader(body)) + d.CharsetReader = charset.NewReader + if err := d.Decode(&a); err != nil { + return nil, err + } + f.Title = a.Title + if t, err := parseDate(string(a.Updated)); err == nil { + f.Updated = t + } + fb, err := url.Parse(a.XMLBase) + if err != nil { + fb, _ = url.Parse("") + } + if len(a.Link) > 0 { + f.Link = findBestAtomLink(a.Link) + if l, err := fb.Parse(f.Link); err == nil { + f.Link = l.String() + } + } + + for _, i := range a.Entry { + eb, err := fb.Parse(i.XMLBase) + if err != nil { + eb = fb + } + st := item{ + ID: i.ID, + Title: atomTitle(i.Title), + } + if t, err := parseDate(string(i.Updated)); err == nil { + st.Updated = t + } + if t, err := parseDate(string(i.Published)); err == nil { + st.Published = t + } + if len(i.Link) > 0 { + st.Link = findBestAtomLink(i.Link) + if l, err := eb.Parse(st.Link); err == nil { + st.Link = l.String() + } + } + if i.Author != nil { + st.Author = i.Author.Name + } + if i.Content != nil { + if len(strings.TrimSpace(i.Content.Body)) != 0 { + st.Content = i.Content.Body + } else if len(i.Content.InnerXML) != 0 { + st.Content = i.Content.InnerXML + } + } else if i.Summary != nil { + st.Content = i.Summary.Body + } + f.Items = append(f.Items, &st) + } + return &f, nil +} + +func parseRSS(body []byte) (*feed, error) { + var f feed + var r rss.RSS + d := xml.NewDecoder(bytes.NewReader(body)) + d.CharsetReader = charset.NewReader + d.DefaultSpace = "DefaultSpace" + if err := d.Decode(&r); err != nil { + return nil, err + } + f.Title = r.Title + if t, err := parseDate(r.LastBuildDate, r.PubDate); err == nil { + f.Updated = t + } + f.Link = r.BaseLink() + + for _, i := range r.Items { + st := item{ + Link: i.Link, + Author: i.Author, + } + if i.Content != "" { + st.Content = i.Content + } else if i.Description != "" { + st.Content = i.Description + } + if i.Title != "" { + st.Title = i.Title + } else if i.Description != "" { + st.Title = i.Description + } + if st.Content == st.Title { + st.Title = "" + } + st.Title = textTitle(st.Title) + if i.Guid != nil { + st.ID = i.Guid.Guid + } + if i.Enclosure != nil && strings.HasPrefix(i.Enclosure.Type, "audio/") { + st.MediaContent = i.Enclosure.Url + } else if i.Media != nil && strings.HasPrefix(i.Media.Type, "audio/") { + st.MediaContent = i.Media.URL + } + if t, err := parseDate(i.PubDate, i.Date, i.Published); err == nil { + st.Published = t + st.Updated = t + } + f.Items = append(f.Items, &st) + } + + return &f, nil +} + +func parseRDF(body []byte) (*feed, error) { + var f feed + var rd rdf.RDF + d := xml.NewDecoder(bytes.NewReader(body)) + d.CharsetReader = charset.NewReader + if err := d.Decode(&rd); err != nil { + return nil, err + } + if rd.Channel != nil { + f.Title = rd.Channel.Title + f.Link = rd.Channel.Link + if t, err := parseDate(rd.Channel.Date); err == nil { + f.Updated = t + } + } + + for _, i := range rd.Item { + st := item{ + ID: i.About, + Title: textTitle(i.Title), + Link: i.Link, + Author: i.Creator, + } + if len(i.Description) > 0 { + st.Content = html.UnescapeString(i.Description) + } else if len(i.Content) > 0 { + st.Content = html.UnescapeString(i.Content) + } + if t, err := parseDate(i.Date); err == nil { + st.Published = t + st.Updated = t + } + f.Items = append(f.Items, &st) + } + + return &f, nil +} + +func textTitle(t string) string { + return html.UnescapeString(t) +} + +func atomTitle(t *atom.Text) string { + if t == nil { + return "" + } + if t.Type == "html" { + // see: https://github.com/mjibson/goread/blob/59aec794f3ef87b36c1bac029438c33a6aa6d8d3/utils.go#L533 + //return html.UnescapeString(sanitizer.StripTags(t.Body)) + } + return textTitle(t.Body) +} + +func findBestAtomLink(links []atom.Link) string { + getScore := func(l atom.Link) int { + switch { + case l.Rel == "hub": + return 0 + case l.Rel == "alternate" && l.Type == "text/html": + return 5 + case l.Type == "text/html": + return 4 + case l.Rel == "self": + return 2 + case l.Rel == "": + return 3 + default: + return 1 + } + } + + var bestlink string + bestscore := -1 + for _, l := range links { + score := getScore(l) + if score > bestscore { + bestlink = l.Href + bestscore = score + } + } + + return bestlink +} + +func parseFix(f *feed, feedURL string) (*feed, error) { + f.Link = strings.TrimSpace(f.Link) + f.Title = html.UnescapeString(strings.TrimSpace(f.Title)) + + if u, err := url.Parse(feedURL); err == nil { + if ul, err := u.Parse(f.Link); err == nil { + f.Link = ul.String() + } + } + base, err := url.Parse(f.Link) + if err != nil { + log.Printf("unable to parse link: %v", f.Link) + } + + var nss []*item + now := time.Now() + for _, s := range f.Items { + s.Created = now + s.Link = strings.TrimSpace(s.Link) + if s.ID == "" { + if s.Link != "" { + s.ID = s.Link + } else if s.Title != "" { + s.ID = s.Title + } else { + log.Printf("item has no id: %v", s) + continue + } + } + // if a story doesn't have a link, see if its id is a URL + if s.Link == "" { + if u, err := url.Parse(s.ID); err == nil { + s.Link = u.String() + } + } + if base != nil && s.Link != "" { + link, err := base.Parse(s.Link) + if err == nil { + s.Link = link.String() + } else { + log.Printf("unable to resolve link: %v", s.Link) + } + } + nss = append(nss, s) + } + f.Items = nss + + return f, nil +} + +var dateFormats = []string{ + "01-02-2006", + "01/02/2006", + "01/02/2006 - 15:04", + "01/02/2006 15:04:05 MST", + "01/02/2006 3:04 PM", + "02-01-2006", + "02/01/2006", + "02.01.2006 -0700", + "02/01/2006 - 15:04", + "02.01.2006 15:04", + "02/01/2006 15:04:05", + "02.01.2006 15:04:05", + "02-01-2006 15:04:05 MST", + "02/01/2006 15:04 MST", + "02 Jan 2006", + "02 Jan 2006 15:04:05", + "02 Jan 2006 15:04:05 -0700", + "02 Jan 2006 15:04:05 MST", + "02 Jan 2006 15:04:05 UT", + "02 Jan 2006 15:04 MST", + "02 Monday, Jan 2006 15:04", + "06-1-2 15:04", + "06/1/2 15:04", + "1/2/2006", + "1/2/2006 15:04:05 MST", + "1/2/2006 3:04:05 PM", + "1/2/2006 3:04:05 PM MST", + "15:04 02.01.2006 -0700", + "2006-01-02", + "2006/01/02", + "2006-01-02 00:00:00.0 15:04:05.0 -0700", + "2006-01-02 15:04", + "2006-01-02 15:04:05 -0700", + "2006-01-02 15:04:05-07:00", + "2006-01-02 15:04:05-0700", + "2006-01-02 15:04:05 MST", + "2006-01-02 15:04:05Z", + "2006-01-02 at 15:04:05", + "2006-01-02T15:04:05", + "2006-01-02T15:04:05:00", + "2006-01-02T15:04:05 -0700", + "2006-01-02T15:04:05-07:00", + "2006-01-02T15:04:05-0700", + "2006-01-02T15:04:05:-0700", + "2006-01-02T15:04:05-07:00:00", + "2006-01-02T15:04:05Z", + "2006-01-02T15:04-07:00", + "2006-01-02T15:04Z", + "2006-1-02T15:04:05Z", + "2006-1-2", + "2006-1-2 15:04:05", + "2006-1-2T15:04:05Z", + "2006 January 02", + "2-1-2006", + "2/1/2006", + "2.1.2006 15:04:05", + "2 Jan 2006", + "2 Jan 2006 15:04:05 -0700", + "2 Jan 2006 15:04:05 MST", + "2 Jan 2006 15:04:05 Z", + "2 January 2006", + "2 January 2006 15:04:05 -0700", + "2 January 2006 15:04:05 MST", + "6-1-2 15:04", + "6/1/2 15:04", + "Jan 02, 2006", + "Jan 02 2006 03:04:05PM", + "Jan 2, 2006", + "Jan 2, 2006 15:04:05 MST", + "Jan 2, 2006 3:04:05 PM", + "Jan 2, 2006 3:04:05 PM MST", + "January 02, 2006", + "January 02, 2006 03:04 PM", + "January 02, 2006 15:04", + "January 02, 2006 15:04:05 MST", + "January 2, 2006", + "January 2, 2006 03:04 PM", + "January 2, 2006 15:04:05", + "January 2, 2006 15:04:05 MST", + "January 2, 2006, 3:04 p.m.", + "January 2, 2006 3:04 PM", + "Mon, 02 Jan 06 15:04:05 MST", + "Mon, 02 Jan 2006", + "Mon, 02 Jan 2006 15:04:05", + "Mon, 02 Jan 2006 15:04:05 00", + "Mon, 02 Jan 2006 15:04:05 -07", + "Mon 02 Jan 2006 15:04:05 -0700", + "Mon, 02 Jan 2006 15:04:05 --0700", + "Mon, 02 Jan 2006 15:04:05 -07:00", + "Mon, 02 Jan 2006 15:04:05 -0700", + "Mon,02 Jan 2006 15:04:05 -0700", + "Mon, 02 Jan 2006 15:04:05 GMT-0700", + "Mon , 02 Jan 2006 15:04:05 MST", + "Mon, 02 Jan 2006 15:04:05 MST", + "Mon, 02 Jan 2006 15:04:05MST", + "Mon, 02 Jan 2006, 15:04:05 MST", + "Mon, 02 Jan 2006 15:04:05 MST -0700", + "Mon, 02 Jan 2006 15:04:05 MST-07:00", + "Mon, 02 Jan 2006 15:04:05 UT", + "Mon, 02 Jan 2006 15:04:05 Z", + "Mon, 02 Jan 2006 15:04 -0700", + "Mon, 02 Jan 2006 15:04 MST", + "Mon,02 Jan 2006 15:04 MST", + "Mon, 02 Jan 2006 15 -0700", + "Mon, 02 Jan 2006 3:04:05 PM MST", + "Mon, 02 January 2006", + "Mon,02 January 2006 14:04:05 MST", + "Mon, 2006-01-02 15:04", + "Mon, 2 Jan 06 15:04:05 -0700", + "Mon, 2 Jan 06 15:04:05 MST", + "Mon, 2 Jan 15:04:05 MST", + "Mon, 2 Jan 2006", + "Mon,2 Jan 2006", + "Mon, 2 Jan 2006 15:04", + "Mon, 2 Jan 2006 15:04:05", + "Mon, 2 Jan 2006 15:04:05 -0700", + "Mon, 2 Jan 2006 15:04:05-0700", + "Mon, 2 Jan 2006 15:04:05 -0700 MST", + "mon,2 Jan 2006 15:04:05 MST", + "Mon 2 Jan 2006 15:04:05 MST", + "Mon, 2 Jan 2006 15:04:05 MST", + "Mon, 2 Jan 2006 15:04:05MST", + "Mon, 2 Jan 2006 15:04:05 UT", + "Mon, 2 Jan 2006 15:04 -0700", + "Mon, 2 Jan 2006, 15:04 -0700", + "Mon, 2 Jan 2006 15:04 MST", + "Mon, 2, Jan 2006 15:4", + "Mon, 2 Jan 2006 15:4:5 -0700 GMT", + "Mon, 2 Jan 2006 15:4:5 MST", + "Mon, 2 Jan 2006 3:04:05 PM -0700", + "Mon, 2 January 2006", + "Mon, 2 January 2006 15:04:05 -0700", + "Mon, 2 January 2006 15:04:05 MST", + "Mon, 2 January 2006, 15:04:05 MST", + "Mon, 2 January 2006, 15:04 -0700", + "Mon, 2 January 2006 15:04 MST", + "Monday, 02 January 2006 15:04:05", + "Monday, 02 January 2006 15:04:05 -0700", + "Monday, 02 January 2006 15:04:05 MST", + "Monday, 2 Jan 2006 15:04:05 -0700", + "Monday, 2 Jan 2006 15:04:05 MST", + "Monday, 2 January 2006 15:04:05 -0700", + "Monday, 2 January 2006 15:04:05 MST", + "Monday, January 02, 2006", + "Monday, January 2, 2006", + "Monday, January 2, 2006 03:04 PM", + "Monday, January 2, 2006 15:04:05 MST", + "Mon Jan 02 2006 15:04:05 -0700", + "Mon, Jan 02,2006 15:04:05 MST", + "Mon Jan 02, 2006 3:04 pm", + "Mon Jan 2 15:04:05 2006 MST", + "Mon Jan 2 15:04 2006", + "Mon, Jan 2 2006 15:04:05 -0700", + "Mon, Jan 2 2006 15:04:05 -700", + "Mon, Jan 2, 2006 15:04:05 MST", + "Mon, Jan 2 2006 15:04 MST", + "Mon, Jan 2, 2006 15:04 MST", + "Mon, January 02, 2006 15:04:05 MST", + "Mon, January 02, 2006, 15:04:05 MST", + "Mon, January 2 2006 15:04:05 -0700", + "Updated January 2, 2006", + time.ANSIC, + time.RFC1123, + time.RFC1123Z, + time.RFC3339, + time.RFC822, + time.RFC822Z, + time.RFC850, + time.RubyDate, + time.UnixDate, +} + +func parseDate(ds ...string) (t time.Time, err error) { + for _, d := range ds { + d = strings.TrimSpace(d) + if d == "" { + continue + } + for _, f := range dateFormats { + if t, err = time.Parse(f, d); err == nil { + return + } + } + } + err = fmt.Errorf("could not parse dates: %v", strings.Join(ds, ", ")) + return +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/feed/rdf/rdf.go b/vendor/github.com/camlistore/camlistore/pkg/importer/feed/rdf/rdf.go new file mode 100644 index 00000000..f931c419 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/feed/rdf/rdf.go @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2013 Matt Jibson + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +// Package rdf defines XML data structures for an RDF feed. +package rdf + +import ( + "encoding/xml" +) + +type RDF struct { + XMLName xml.Name `xml:"RDF"` + Channel *Channel `xml:"channel"` + Item []*Item `xml:"item"` +} + +type Channel struct { + Title string `xml:"title"` + Description string `xml:"description"` + Link string `xml:"link"` + Date string `xml:"date"` +} + +type Item struct { + About string `xml:"about,attr"` + Format string `xml:"format"` + Date string `xml:"date"` + Source string `xml:"source"` + Creator string `xml:"creator"` + Title string `xml:"title"` + Link string `xml:"link"` + Description string `xml:"description"` + Content string `xml:"encoded"` +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/feed/rss/rss.go b/vendor/github.com/camlistore/camlistore/pkg/importer/feed/rss/rss.go new file mode 100644 index 00000000..2598c042 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/feed/rss/rss.go @@ -0,0 +1,69 @@ +// Copyright 2012 Evan Farrer. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rss defines XML data structures for an RSS feed. +package rss + +type RSS struct { + XMLName string `xml:"rss"` + Title string `xml:"channel>title"` + Link []Link `xml:"channel>link"` + Description string `xml:"channel>description"` + PubDate string `xml:"channel>pubDate,omitempty"` + LastBuildDate string `xml:"channel>lastBuildDate,omitempty"` + Items []*Item `xml:"channel>item"` +} + +func (r *RSS) BaseLink() string { + for _, l := range r.Link { + if l.Rel == "" && l.Type == "" && l.Href == "" && l.Chardata != "" { + return l.Chardata + } + } + return "" +} + +type Link struct { + Rel string `xml:"rel,attr"` + Href string `xml:"href,attr"` + Type string `xml:"type,attr"` + Chardata string `xml:",chardata"` +} + +type Item struct { + Title string `xml:"title,omitempty"` + Link string `xml:"link,omitempty"` + Description string `xml:"description,omitempty"` + Author string `xml:"author,omitempty"` + Enclosure *Enclosure `xml:"enclosure"` + Guid *Guid `xml:"guid"` + PubDate string `xml:"pubDate,omitempty"` + Source *Source `xml:"source"` + Content string `xml:"encoded,omitempty"` + Date string `xml:"date,omitempty"` + Published string `xml:"published,omitempty"` + Media *MediaContent `xml:"content"` +} + +type MediaContent struct { + XMLBase string `xml:"http://search.yahoo.com/mrss/ content"` + URL string `xml:"url,attr"` + Type string `xml:"type,attr"` +} + +type Source struct { + Source string `xml:",chardata"` + Url string `xml:"url,attr"` +} + +type Guid struct { + Guid string `xml:",chardata"` + IsPermaLink bool `xml:"isPermaLink,attr,omitempty"` +} + +type Enclosure struct { + Url string `xml:"url,attr"` + Length string `xml:"length,attr,omitempty"` + Type string `xml:"type,attr"` +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/flickr/README b/vendor/github.com/camlistore/camlistore/pkg/importer/flickr/README new file mode 100644 index 00000000..395d73cd --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/flickr/README @@ -0,0 +1,20 @@ +Flickr Importer +=============== + +This is an incomplete Camlistore importer for Flickr. So far it can import the +first 100 photos from a photostream and also their set metadata. + +To use: + +1) Fill out http://www.flickr.com/services/apps/create/noncommercial/ to get a + Flickr API key and secret. +2) Start the devcam server with flickrapikey flag: + $ devcam server -flickrapikey=: +3) Navigate to http:///importer-flickr/login +4) Watch import progress on the command line + + +TODO: + +https://github.com/camlistore/camlistore/issues?q=is%3Aopen+flickr+ + diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/flickr/flickr.go b/vendor/github.com/camlistore/camlistore/pkg/importer/flickr/flickr.go new file mode 100644 index 00000000..07094b6e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/flickr/flickr.go @@ -0,0 +1,586 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package flickr implements an importer for flickr.com accounts. +package flickr + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "log" + "net/http" + "net/url" + "strconv" + "time" + + "camlistore.org/pkg/context" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/importer" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/schema/nodeattr" + "camlistore.org/third_party/github.com/garyburd/go-oauth/oauth" +) + +const ( + apiURL = "https://api.flickr.com/services/rest/" + temporaryCredentialRequestURL = "https://www.flickr.com/services/oauth/request_token" + resourceOwnerAuthorizationURL = "https://www.flickr.com/services/oauth/authorize" + tokenRequestURL = "https://www.flickr.com/services/oauth/access_token" + + photosetsAPIPath = "flickr.photosets.getList" + photosetAPIPath = "flickr.photosets.getPhotos" + photosAPIPath = "flickr.people.getPhotos" + + attrFlickrId = "flickrId" +) + +var oAuthURIs = importer.OAuthURIs{ + TemporaryCredentialRequestURI: temporaryCredentialRequestURL, + ResourceOwnerAuthorizationURI: resourceOwnerAuthorizationURL, + TokenRequestURI: tokenRequestURL, +} + +func init() { + importer.Register("flickr", imp{}) +} + +var _ importer.ImporterSetupHTMLer = imp{} + +type imp struct { + importer.OAuth1 // for CallbackRequestAccount and CallbackURLParameters +} + +func (imp) NeedsAPIKey() bool { return true } + +func (imp) SupportsIncremental() bool { return false } + +func (imp) IsAccountReady(acctNode *importer.Object) (ok bool, err error) { + return acctNode.Attr(importer.AcctAttrUserName) != "" && acctNode.Attr(importer.AcctAttrAccessToken) != "", nil +} + +func (im imp) SummarizeAccount(acct *importer.Object) string { + ok, err := im.IsAccountReady(acct) + if err != nil || !ok { + return "" + } + return acct.Attr(importer.AcctAttrUserName) +} + +func (imp) AccountSetupHTML(host *importer.Host) string { + base := host.ImporterBaseURL() + "flickr" + return fmt.Sprintf(` +

    Configuring Flickr

    +

    Visit http://www.flickr.com/services/apps/create/noncommercial/, fill out whatever's needed, and click on SUBMIT.

    +

    From your newly created app's main page, go to "Edit the authentication flow", use the following settings:

    +
      +
    • App Type: Web Application
    • +
    • Callback URL: %s
    • +
    +

    and SAVE CHANGES

    +

    Then go to "View the API Key for this app", and copy the "Key" and "Secret" into the "Client ID" and "Client Secret" boxes above.

    +`, base+"/callback") +} + +// A run is our state for a given run of the importer. +type run struct { + userID string + *importer.RunContext + oauthClient *oauth.Client // No need to guard, used read-only. + accessCreds *oauth.Credentials // No need to guard, used read-only. + + // primaryPhoto maps an album id to the id of its primary photo. + // If some concurrency is added to some of the importing routines, + // it will need some guarding. + primaryPhoto map[string]string +} + +func (imp) Run(ctx *importer.RunContext) error { + clientID, secret, err := ctx.Credentials() + if err != nil { + return fmt.Errorf("no API credentials: %v", err) + } + accountNode := ctx.AccountNode() + accessToken := accountNode.Attr(importer.AcctAttrAccessToken) + accessSecret := accountNode.Attr(importer.AcctAttrAccessTokenSecret) + if accessToken == "" || accessSecret == "" { + return errors.New("access credentials not found") + } + userID := ctx.AccountNode().Attr(importer.AcctAttrUserID) + if userID == "" { + return errors.New("UserID hasn't been set by account setup.") + } + r := &run{ + userID: userID, + RunContext: ctx, + oauthClient: &oauth.Client{ + TemporaryCredentialRequestURI: temporaryCredentialRequestURL, + ResourceOwnerAuthorizationURI: resourceOwnerAuthorizationURL, + TokenRequestURI: tokenRequestURL, + Credentials: oauth.Credentials{ + Token: clientID, + Secret: secret, + }, + }, + accessCreds: &oauth.Credentials{ + Token: accessToken, + Secret: accessSecret, + }, + primaryPhoto: make(map[string]string), + } + + if err := r.importPhotosets(); err != nil { + return err + } + if err := r.importPhotos(); err != nil { + return err + } + return nil +} + +type photosetList struct { + Page jsonInt + Pages jsonInt + PerPage jsonInt + Photoset []*photosetInfo +} + +type photosetInfo struct { + Id string `json:"id"` + PrimaryPhotoId string `json:"primary"` + Title contentString + Description contentString +} + +type photosetItems struct { + Id string `json:"id"` + Page jsonInt + Pages jsonInt + Photo []struct { + Id string + OriginalFormat string + } +} + +func (r *run) importPhotosets() error { + resp := struct { + Photosets photosetList + }{} + if err := r.flickrAPIRequest(&resp, + photosetsAPIPath, "user_id", r.userID); err != nil { + return err + } + + setsNode, err := r.getTopLevelNode("sets", "Sets") + if err != nil { + return err + } + log.Printf("Importing %d sets", len(resp.Photosets.Photoset)) + + for _, item := range resp.Photosets.Photoset { + if r.Context.IsCanceled() { + log.Printf("Flickr importer: interrupted") + return context.ErrCanceled + } + for page := 1; page >= 1; { + page, err = r.importPhotoset(setsNode, item, page) + if err != nil { + log.Printf("Flickr importer: error importing photoset %s: %s", item.Id, err) + continue + } + } + } + return nil +} + +func (r *run) importPhotoset(parent *importer.Object, photoset *photosetInfo, page int) (int, error) { + photosetNode, err := parent.ChildPathObject(photoset.Id) + if err != nil { + return 0, err + } + + if err := photosetNode.SetAttrs( + attrFlickrId, photoset.Id, + nodeattr.Title, photoset.Title.Content, + nodeattr.Description, photoset.Description.Content); err != nil { + return 0, err + } + // keep track of primary photo so we can set the fileRef of the photo as CamliContentImage + // on photosetNode when we eventually know that fileRef. + r.primaryPhoto[photoset.Id] = photoset.PrimaryPhotoId + + resp := struct { + Photoset photosetItems + }{} + if err := r.flickrAPIRequest(&resp, photosetAPIPath, "user_id", r.userID, + "page", fmt.Sprintf("%d", page), "photoset_id", photoset.Id, "extras", "original_format"); err != nil { + return 0, err + } + + log.Printf("Importing page %d from photoset %s", page, photoset.Id) + + photosNode, err := r.getPhotosNode() + if err != nil { + return 0, err + } + + for _, item := range resp.Photoset.Photo { + filename := fmt.Sprintf("%s.%s", item.Id, item.OriginalFormat) + photoNode, err := photosNode.ChildPathObject(filename) + if err != nil { + log.Printf("Flickr importer: error finding photo node %s for addition to photoset %s: %s", + item.Id, photoset.Id, err) + continue + } + if err := photosetNode.SetAttr("camliPath:"+filename, photoNode.PermanodeRef().String()); err != nil { + log.Printf("Flickr importer: error adding photo %s to photoset %s: %s", + item.Id, photoset.Id, err) + } + } + + if resp.Photoset.Page < resp.Photoset.Pages { + return page + 1, nil + } else { + return 0, nil + } +} + +type photosSearch struct { + Photos struct { + Page jsonInt + Pages jsonInt + Perpage jsonInt + Total jsonInt + Photo []*photosSearchItem + } + + Stat string +} + +type photosSearchItem struct { + Id string `json:"id"` + Title string + IsPublic jsonInt + IsFriend jsonInt + IsFamily jsonInt + Description contentString + DateUpload string // Unix timestamp, in GMT. + DateTaken string // formatted as "2006-01-02 15:04:05", so no timezone info. + OriginalFormat string + LastUpdate string // Unix timestamp. + Latitude jsonFloat + Longitude jsonFloat + Tags string + MachineTags string `json:"machine_tags"` + Views string + Media string + URL string `json:"url_o"` +} + +type contentString struct { + Content string `json:"_content"` +} + +// jsonInt is for unmarshaling quoted and unquoted integers ("0" and 0), too. +type jsonInt int + +func (jf jsonInt) MarshalJSON() ([]byte, error) { + return json.Marshal(int(jf)) +} +func (jf *jsonInt) UnmarshalJSON(p []byte) error { + return json.Unmarshal(bytes.Trim(p, `"`), (*int)(jf)) +} + +// jsonFloat is for unmarshaling quoted and unquoted numbers ("0" and 0), too. +type jsonFloat float32 + +func (jf jsonFloat) MarshalJSON() ([]byte, error) { + return json.Marshal(float32(jf)) +} +func (jf *jsonFloat) UnmarshalJSON(p []byte) error { + if len(p) == 1 && p[0] == '0' { // shortcut + *jf = 0 + return nil + } + return json.Unmarshal(bytes.Trim(p, `"`), (*float32)(jf)) +} + +func (r *run) importPhotos() error { + for page := 1; page >= 1; { + var err error + page, err = r.importPhotosPage(page) + if err != nil { + return err + } + } + return nil +} + +func (r *run) importPhotosPage(page int) (int, error) { + resp := photosSearch{} + if err := r.flickrAPIRequest(&resp, photosAPIPath, "user_id", r.userID, "page", fmt.Sprintf("%d", page), + "extras", "description,date_upload,date_taken,original_format,last_update,geo,tags,machine_tags,views,media,url_o"); err != nil { + return 0, err + } + + photosNode, err := r.getPhotosNode() + if err != nil { + return 0, err + } + log.Printf("Importing %d photos on page %d of %d", len(resp.Photos.Photo), page, resp.Photos.Pages) + + for _, item := range resp.Photos.Photo { + if err := r.importPhoto(photosNode, item); err != nil { + log.Printf("Flickr importer: error importing %s: %s", item.Id, err) + continue + } + } + + if resp.Photos.Pages > resp.Photos.Page { + return page + 1, nil + } else { + return 0, nil + } +} + +// TODO(aa): +// * Parallelize: http://golang.org/doc/effective_go.html#concurrency +// * Do more than one "page" worth of results +// * Report progress and errors back through host interface +// * All the rest of the metadata (see photoMeta) +// * Conflicts: For all metadata changes, prefer any non-imported claims +// * Test! +func (r *run) importPhoto(parent *importer.Object, photo *photosSearchItem) error { + filename := fmt.Sprintf("%s.%s", photo.Id, photo.OriginalFormat) + photoNode, err := parent.ChildPathObject(filename) + if err != nil { + return err + } + + // https://www.flickr.com/services/api/misc.dates.html + dateTaken, err := time.ParseInLocation("2006-01-02 15:04:05", photo.DateTaken, schema.UnknownLocation) + if err != nil { + // default to the published date otherwise + log.Printf("Flickr importer: problem with date taken of photo %v, defaulting to published date instead.", photo.Id) + seconds, err := strconv.ParseInt(photo.DateUpload, 10, 64) + if err != nil { + return fmt.Errorf("could not parse date upload time %q for image %v: %v", photo.DateUpload, photo.Id, err) + } + dateTaken = time.Unix(seconds, 0) + } + + attrs := []string{ + attrFlickrId, photo.Id, + nodeattr.DateCreated, schema.RFC3339FromTime(dateTaken), + nodeattr.Description, photo.Description.Content, + } + if schema.IsInterestingTitle(photo.Title) { + attrs = append(attrs, nodeattr.Title, photo.Title) + } + // Import all the metadata. SetAttrs() is a no-op if the value hasn't changed, so there's no cost to doing these on every run. + // And this way if we add more things to import, they will get picked up. + if err := photoNode.SetAttrs(attrs...); err != nil { + return err + } + + // Import the photo itself. Since it is expensive to fetch the image, we store its lastupdate and only refetch if it might have changed. + // lastupdate is a Unix timestamp according to https://www.flickr.com/services/api/flickr.photos.getInfo.html + seconds, err := strconv.ParseInt(photo.LastUpdate, 10, 64) + if err != nil { + return fmt.Errorf("could not parse lastupdate time for image %v: %v", photo.Id, err) + } + lastUpdate := time.Unix(seconds, 0) + if lastUpdateString := photoNode.Attr(nodeattr.DateModified); lastUpdateString != "" { + oldLastUpdate, err := time.Parse(time.RFC3339, lastUpdateString) + if err != nil { + return fmt.Errorf("could not parse last stored update time for image %v: %v", photo.Id, err) + } + if lastUpdate.Equal(oldLastUpdate) { + if err := r.updatePrimaryPhoto(photoNode); err != nil { + return err + } + return nil + } + } + form := url.Values{} + form.Set("user_id", r.userID) + res, err := r.fetch(photo.URL, form) + if err != nil { + log.Printf("Flickr importer: Could not fetch %s: %s", photo.URL, err) + return err + } + defer res.Body.Close() + + fileRef, err := schema.WriteFileFromReader(r.Host.Target(), filename, res.Body) + if err != nil { + return err + } + if err := photoNode.SetAttr(nodeattr.CamliContent, fileRef.String()); err != nil { + return err + } + if err := r.updatePrimaryPhoto(photoNode); err != nil { + return err + } + // Write lastupdate last, so that if any of the preceding fails, we will try again next time. + if err := photoNode.SetAttr(nodeattr.DateModified, schema.RFC3339FromTime(lastUpdate)); err != nil { + return err + } + + return nil +} + +// updatePrimaryPhoto uses the camliContent of photoNode to set the +// camliContentImage of any album for which photoNode is the primary photo. +func (r *run) updatePrimaryPhoto(photoNode *importer.Object) error { + photoId := photoNode.Attr(attrFlickrId) + for album, photo := range r.primaryPhoto { + if photoId != photo { + continue + } + setsNode, err := r.getTopLevelNode("sets", "Sets") + if err != nil { + return fmt.Errorf("could not set %v as primary photo of %v, no root sets: %v", photoId, album, err) + } + setNode, err := setsNode.ChildPathObject(album) + if err != nil { + return fmt.Errorf("could not set %v as primary photo of %v, no album: %v", photoId, album, err) + } + fileRef := photoNode.Attr(nodeattr.CamliContent) + if fileRef == "" { + return fmt.Errorf("could not set %v as primary photo of %v: fileRef of photo is unknown", photoId, album) + } + if err := setNode.SetAttr(nodeattr.CamliContentImage, fileRef); err != nil { + return fmt.Errorf("could not set %v as primary photo of %v: %v", photoId, album, err) + } + delete(r.primaryPhoto, album) + } + return nil +} + +func (r *run) getPhotosNode() (*importer.Object, error) { + return r.getTopLevelNode("photos", "Photos") +} + +func (r *run) getTopLevelNode(path string, title string) (*importer.Object, error) { + photos, err := r.RootNode().ChildPathObject(path) + if err != nil { + return nil, err + } + + if err := photos.SetAttr(nodeattr.Title, title); err != nil { + return nil, err + } + return photos, nil +} + +func (r *run) flickrAPIRequest(result interface{}, method string, keyval ...string) error { + keyval = append([]string{"method", method, "format", "json", "nojsoncallback", "1"}, keyval...) + return importer.OAuthContext{ + r.Context, + r.oauthClient, + r.accessCreds}.PopulateJSONFromURL(result, apiURL, keyval...) +} + +func (r *run) fetch(url string, form url.Values) (*http.Response, error) { + return importer.OAuthContext{ + r.Context, + r.oauthClient, + r.accessCreds}.Get(url, form) +} + +// TODO(mpl): same in twitter. refactor. Except for the additional perms in AuthorizationURL call. +func (imp) ServeSetup(w http.ResponseWriter, r *http.Request, ctx *importer.SetupContext) error { + oauthClient, err := ctx.NewOAuthClient(oAuthURIs) + if err != nil { + err = fmt.Errorf("error getting OAuth client: %v", err) + httputil.ServeError(w, r, err) + return err + } + tempCred, err := oauthClient.RequestTemporaryCredentials(ctx.HTTPClient(), ctx.CallbackURL(), nil) + if err != nil { + err = fmt.Errorf("Error getting temp cred: %v", err) + httputil.ServeError(w, r, err) + return err + } + if err := ctx.AccountNode.SetAttrs( + importer.AcctAttrTempToken, tempCred.Token, + importer.AcctAttrTempSecret, tempCred.Secret, + ); err != nil { + err = fmt.Errorf("Error saving temp creds: %v", err) + httputil.ServeError(w, r, err) + return err + } + + authURL := oauthClient.AuthorizationURL(tempCred, url.Values{"perms": {"read"}}) + http.Redirect(w, r, authURL, http.StatusFound) + return nil +} + +func (imp) ServeCallback(w http.ResponseWriter, r *http.Request, ctx *importer.SetupContext) { + tempToken := ctx.AccountNode.Attr(importer.AcctAttrTempToken) + tempSecret := ctx.AccountNode.Attr(importer.AcctAttrTempSecret) + if tempToken == "" || tempSecret == "" { + log.Printf("flicker: no temp creds in callback") + httputil.BadRequestError(w, "no temp creds in callback") + return + } + if tempToken != r.FormValue("oauth_token") { + log.Printf("unexpected oauth_token: got %v, want %v", r.FormValue("oauth_token"), tempToken) + httputil.BadRequestError(w, "unexpected oauth_token") + return + } + oauthClient, err := ctx.NewOAuthClient(oAuthURIs) + if err != nil { + err = fmt.Errorf("error getting OAuth client: %v", err) + httputil.ServeError(w, r, err) + return + } + tokenCred, vals, err := oauthClient.RequestToken( + ctx.Context.HTTPClient(), + &oauth.Credentials{ + Token: tempToken, + Secret: tempSecret, + }, + r.FormValue("oauth_verifier"), + ) + if err != nil { + httputil.ServeError(w, r, fmt.Errorf("Error getting request token: %v ", err)) + return + } + userID := vals.Get("user_nsid") + if userID == "" { + httputil.ServeError(w, r, fmt.Errorf("Couldn't get user id: %v", err)) + return + } + username := vals.Get("username") + if username == "" { + httputil.ServeError(w, r, fmt.Errorf("Couldn't get user name: %v", err)) + return + } + + // TODO(mpl): get a few more bits of info (first name, last name etc) like I did for twitter, if possible. + if err := ctx.AccountNode.SetAttrs( + importer.AcctAttrAccessToken, tokenCred.Token, + importer.AcctAttrAccessTokenSecret, tokenCred.Secret, + importer.AcctAttrUserID, userID, + importer.AcctAttrUserName, username, + ); err != nil { + httputil.ServeError(w, r, fmt.Errorf("Error setting basic account attributes: %v", err)) + return + } + http.Redirect(w, r, ctx.AccountURL(), http.StatusFound) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/flickr/flickr_test.go b/vendor/github.com/camlistore/camlistore/pkg/importer/flickr/flickr_test.go new file mode 100644 index 00000000..605845b9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/flickr/flickr_test.go @@ -0,0 +1,254 @@ +/* +Copyright 2015 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flickr + +import ( + "encoding/json" + "testing" +) + +func TestParseJSONFloat(t *testing.T) { + var ps photosSearch + if err := json.Unmarshal([]byte(photosSearchData1), &ps); err != nil { + t.Errorf("unmarshal error: %v", err) + } + for i, want := range []jsonFloat{-37.899166, -37.899166, 0} { + if ps.Photos.Photo[i].Latitude != want { + t.Errorf("%d. want latitude=%f got %f", i+1, want, ps.Photos.Photo[i].Latitude) + } + } +} +func TestParseJSONInt(t *testing.T) { + var ps photosSearch + if err := json.Unmarshal([]byte(photosSearchData2), &ps); err != nil { + t.Errorf("unmarshal error: %v", err) + } + for i, want := range []jsonInt{1, 1} { + if ps.Photos.Photo[i].IsPublic != want { + t.Errorf("%d. want ispublic=%d, got %d", i+1, want, ps.Photos.Photo[i].IsPublic) + } + } +} + +const ( + photosSearchData2 = `{ + "photos": { + "page": 63, + "pages": 63, + "perpage": 100, + "total": 6236, + "photo": [ + { + "id": "0000084", + "owner": "00002737@N00", + "secret": "0000f03945", + "server": "3", + "farm": 1, + "title": "Machinery in Brickmakers' park", + "ispublic": 1, + "isfriend": 0, + "isfamily": 0, + "description": { + "_content": "" + }, + "dateupload": "1106791534", + "lastupdate": "1251702196", + "datetaken": "2005-01-26 07:47:33", + "datetakengranularity": "0", + "datetakenunknown": 0, + "views": "145", + "tags": "australia melbourne victoria machinery oakleigh pc3166 auspctagged 3166 geo:country=australia geo:zip=3166 brickmakerspark", + "machine_tags": "geo:zip=3166 geo:country=australia", + "originalsecret": "0000f03945", + "originalformat": "jpg", + "latitude": "-37.895717", + "longitude": "145.099933", + "accuracy": "15", + "context": 0, + "place_id": "0000fRpQU7qUoVfD", + "woeid": "0000751", + "geo_is_family": 0, + "geo_is_friend": 0, + "geo_is_contact": 0, + "geo_is_public": 1, + "media": "photo", + "media_status": "ready", + "url_o": "https://farm1.staticflickr.com/3/3850184_87b1f03945_o.jpg", + "height_o": "640", + "width_o": "480" + }, + { + "id": "3850183", + "owner": "47322737@N00", + "secret": "492b7f19de", + "server": "3", + "farm": 1, + "title": "Machinery in Brickmakers' park", + "ispublic": "1", + "isfriend": 0, + "isfamily": 0, + "description": { + "_content": "" + }, + "dateupload": "1106791534", + "lastupdate": "1251702196", + "datetaken": "2005-01-26 07:50:58", + "datetakengranularity": "0", + "datetakenunknown": 0, + "views": "204", + "tags": "australia melbourne victoria machinery oakleigh pc3166 auspctagged 3166 geo:country=australia geo:zip=3166 brickmakerspark", + "machine_tags": "geo:zip=3166 geo:country=australia", + "originalsecret": "492b7f19de", + "originalformat": "jpg", + "latitude": "-37.895717", + "longitude": "145.099933", + "accuracy": "15", + "context": 0, + "place_id": "SrpyfRpQU7qUoVfD", + "woeid": "1104751", + "geo_is_family": 0, + "geo_is_friend": 0, + "geo_is_contact": 0, + "geo_is_public": 1, + "media": "photo", + "media_status": "ready", + "url_o": "https://farm1.staticflickr.com/3/3850183_492b7f19de_o.jpg", + "height_o": "480", + "width_o": "640" + } + ] + }, + "stat": "ok" +}` + + photosSearchData1 = `{ + "photos": { + "page": 1, + "pages": 63, + "perpage": 100, + "total": "6226", + "photo": [ + { + "id": "00007283018", + "owner": "00002737@N00", + "secret": "00000fa7ec", + "server": "331", + "farm": 1, + "title": "The mysterious masked man waits for his #milkshake", + "ispublic": 1, + "isfriend": 0, + "isfamily": 0, + "description": { + "_content": "" + }, + "dateupload": "1435974606", + "lastupdate": "1435974611", + "datetaken": "2015-07-04 11:50:06", + "datetakengranularity": 0, + "datetakenunknown": "1", + "views": "0", + "tags": "square squareformat juno iphoneography instagramapp uploaded:by=instagram", + "machine_tags": "uploaded:by=instagram", + "originalsecret": "0000958ab8", + "originalformat": "jpg", + "latitude": "-37.899166", + "longitude": "145.090277", + "accuracy": "16", + "context": 0, + "place_id": "0000fRpQU7qUoVfD", + "woeid": "0000751", + "geo_is_family": 0, + "geo_is_friend": 0, + "geo_is_contact": 0, + "geo_is_public": 1, + "media": "photo", + "media_status": "ready", + "url_o": "https://farm1.staticflickr.com/331/00007283018_0000958ab8_o.jpg", + "height_o": "1080", + "width_o": "1080" + }, + { + "id": "00001743956", + "owner": "00002737@N00", + "secret": "aa00088ef7", + "server": "380", + "farm": 1, + "title": "A #LEGO #maze", + "ispublic": 1, + "isfriend": 0, + "isfamily": 0, + "description": { + "_content": "" + }, + "dateupload": "1435481921", + "lastupdate": "1435481924", + "datetaken": "2015-06-28 18:58:41", + "datetakengranularity": 0, + "datetakenunknown": "1", + "views": "33", + "tags": "square squareformat lark iphoneography instagramapp uploaded:by=instagram", + "machine_tags": "uploaded:by=instagram", + "originalsecret": "000df6239a", + "originalformat": "jpg", + "latitude": -37.899166, + "longitude": "0", + "accuracy": 0, + "context": 0, + "media": "photo", + "media_status": "ready", + "url_o": "https://farm1.staticflickr.com/380/00001743956_0000f6239a_o.jpg", + "height_o": "640", + "width_o": "640" + }, + { + "id": "00001743956", + "owner": "00002737@N00", + "secret": "aa00088ef7", + "server": "380", + "farm": 1, + "title": "A #LEGO #maze", + "ispublic": 1, + "isfriend": 0, + "isfamily": 0, + "description": { + "_content": "" + }, + "dateupload": "1435481921", + "lastupdate": "1435481924", + "datetaken": "2015-06-28 18:58:41", + "datetakengranularity": 0, + "datetakenunknown": "1", + "views": "33", + "tags": "square squareformat lark iphoneography instagramapp uploaded:by=instagram", + "machine_tags": "uploaded:by=instagram", + "originalsecret": "000df6239a", + "originalformat": "jpg", + "latitude": 0, + "longitude": 0, + "accuracy": 0, + "context": 0, + "media": "photo", + "media_status": "ready", + "url_o": "https://farm1.staticflickr.com/380/00001743956_0000f6239a_o.jpg", + "height_o": "640", + "width_o": "640" + } + ] + }, + "stat": "ok" +}` +) diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/flickr/testdata.go b/vendor/github.com/camlistore/camlistore/pkg/importer/flickr/testdata.go new file mode 100644 index 00000000..715e7dac --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/flickr/testdata.go @@ -0,0 +1,288 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flickr + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "net/url" + "os" + "path/filepath" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/importer" + "camlistore.org/pkg/osutil" +) + +var _ importer.TestDataMaker = imp{} + +func (im imp) SetTestAccount(acctNode *importer.Object) error { + return acctNode.SetAttrs( + importer.AcctAttrAccessToken, "fakeAccessToken", + importer.AcctAttrAccessTokenSecret, "fakeAccessSecret", + importer.AcctAttrUserID, "fakeUserId", + importer.AcctAttrName, "fakeName", + importer.AcctAttrUserName, "fakeScreenName", + ) +} + +func (im imp) MakeTestData() http.RoundTripper { + const ( + nPhotosets = 5 // Arbitrary number of sets. + perPage = 3 // number of photos per page (both when getting sets and when getting photos). + fakeUserId = "fakeUserId" + ) + // Photoset N has N photos, so we've got 15 ( = 5 + 4 + 3 + 2 + 1) photos in total. + var nPhotos int + for i := 1; i <= nPhotosets; i++ { + nPhotos += i + } + nPhotosPages := nPhotos / perPage + if nPhotos%perPage != 0 { + nPhotosPages++ + } + + okHeader := `HTTP/1.1 200 OK +Content-Type: application/json; charset=UTF-8 + +` + + // TODO(mpl): this scheme does not take into account that we could have the same photo + // in different albums. These two photos will end up with a different photoId. + buildPhotoIds := func(nsets, perPage int) []string { + var ids []string + for i := 1; i <= nsets; i++ { + photosetId := blob.RefFromString(fmt.Sprintf("Photoset %d", i)).DigestPrefix(10) + page := 1 + // Photoset N has N photos. + indexOnPage := 1 + for j := 1; j <= i; j++ { + photoId := blob.RefFromString(fmt.Sprintf("Photo %d on page %d of photoset %s", indexOnPage, page, photosetId)).DigestPrefix(10) + ids = append(ids, photoId) + indexOnPage++ + if indexOnPage > perPage { + page++ + indexOnPage = 1 + } + } + } + return ids + } + photoIds := buildPhotoIds(nPhotosets, perPage) + + responses := make(map[string]func() *http.Response) + // Initial photo sets list + photosetsURL := fmt.Sprintf("%s?format=json&method=%s&nojsoncallback=1&user_id=%s", apiURL, photosetsAPIPath, fakeUserId) + response := fmt.Sprintf("%s%s", okHeader, fakePhotosetsList(nPhotosets)) + responses[photosetsURL] = httputil.StaticResponder(response) + + // All the photoset calls. One call for each page of each photoset. + // Each page as perPage photos, or maybe less if end of the photoset. + { + pageStart := 0 + albumEnd, pageEnd, albumNum, pages, page := 1, 1, 1, 1, 1 + photosetId := blob.RefFromString(fmt.Sprintf("Photoset %d", albumNum)).DigestPrefix(10) + photosURL := fmt.Sprintf("%s?extras=original_format&format=json&method=%s&nojsoncallback=1&page=%d&photoset_id=%s&user_id=%s", + apiURL, photosetAPIPath, page, photosetId, fakeUserId) + response := fmt.Sprintf("%s%s", okHeader, fakePhotoset(photosetId, page, pages, photoIds[pageStart:pageEnd])) + responses[photosURL] = httputil.StaticResponder(response) + for k, _ := range photoIds { + if k < pageEnd { + continue + } + page++ + pageStart = k + pageEnd = k + perPage + if page > pages { + albumNum++ + page = 1 + pages = albumNum / perPage + if albumNum%perPage != 0 { + pages++ + } + albumEnd = pageStart + albumNum + photosetId = blob.RefFromString(fmt.Sprintf("Photoset %d", albumNum)).DigestPrefix(10) + } + if pageEnd > albumEnd { + pageEnd = albumEnd + } + photosURL := fmt.Sprintf("%s?extras=original_format&format=json&method=%s&nojsoncallback=1&page=%d&photoset_id=%s&user_id=%s", + apiURL, photosetAPIPath, page, photosetId, fakeUserId) + response := fmt.Sprintf("%s%s", okHeader, fakePhotoset(photosetId, page, pages, photoIds[pageStart:pageEnd])) + responses[photosURL] = httputil.StaticResponder(response) + } + } + + // All the photo page calls (to get the photos info). + // Each page has perPage photos, until end of photos. + for i := 1; i <= nPhotosPages; i++ { + photosURL := fmt.Sprintf("%s?extras=", apiURL) + + url.QueryEscape("description,date_upload,date_taken,original_format,last_update,geo,tags,machine_tags,views,media,url_o") + + fmt.Sprintf("&format=json&method=%s&nojsoncallback=1&page=%d&user_id=%s", photosAPIPath, i, fakeUserId) + response := fmt.Sprintf("%s%s", okHeader, fakePhotosPage(i, nPhotosPages, perPage, photoIds)) + responses[photosURL] = httputil.StaticResponder(response) + } + + // Actual photo(s) URL. + pudgyPic := fakePicture() + for _, v := range photoIds { + photoURL := fmt.Sprintf("https://farm3.staticflickr.com/2897/14198397111_%s_o.jpg?user_id=%s", v, fakeUserId) + responses[photoURL] = httputil.FileResponder(pudgyPic) + } + + return httputil.NewFakeTransport(responses) +} + +func fakePhotosetsList(sets int) string { + var photosets []*photosetInfo + for i := 1; i <= sets; i++ { + title := fmt.Sprintf("Photoset %d", i) + photosetId := blob.RefFromString(title).DigestPrefix(10) + primaryPhotoId := blob.RefFromString(fmt.Sprintf("Photo 1 on page 1 of photoset %s", photosetId)).DigestPrefix(10) + item := &photosetInfo{ + Id: photosetId, + PrimaryPhotoId: primaryPhotoId, + Title: contentString{Content: title}, + Description: contentString{Content: "fakePhotosetDescription"}, + } + photosets = append(photosets, item) + } + + setslist := struct { + Photosets photosetList + }{ + Photosets: photosetList{ + Photoset: photosets, + }, + } + + list, err := json.MarshalIndent(&setslist, "", " ") + if err != nil { + log.Fatalf("%v", err) + } + return string(list) +} + +func fakePhotoset(photosetId string, page, pages int, photoIds []string) string { + var photos []struct { + Id string + OriginalFormat string + } + for _, v := range photoIds { + item := struct { + Id string + OriginalFormat string + }{ + Id: v, + OriginalFormat: "jpg", + } + photos = append(photos, item) + } + + photoslist := struct { + Photoset photosetItems + }{ + Photoset: photosetItems{ + Id: photosetId, + Page: jsonInt(page), + Pages: jsonInt(pages), + Photo: photos, + }, + } + + list, err := json.MarshalIndent(&photoslist, "", " ") + if err != nil { + log.Fatalf("%v", err) + } + return string(list) + +} + +func fakePhotosPage(page, pages, perPage int, photoIds []string) string { + var photos []*photosSearchItem + currentPage := 1 + indexOnPage := 1 + day := time.Hour * 24 + year := day * 365 + const dateCreatedFormat = "2006-01-02 15:04:05" + + for k, v := range photoIds { + if indexOnPage > perPage { + currentPage++ + indexOnPage = 1 + } + if currentPage < page { + indexOnPage++ + continue + } + created := time.Now().Add(-time.Duration(k) * year) + published := created.Add(day) + updated := published.Add(day) + item := &photosSearchItem{ + Id: v, + Title: fmt.Sprintf("Photo %d", k+1), + Description: contentString{Content: "fakePhotoDescription"}, + DateUpload: fmt.Sprintf("%d", published.Unix()), + DateTaken: created.Format(dateCreatedFormat), + LastUpdate: fmt.Sprintf("%d", updated.Unix()), + URL: fmt.Sprintf("https://farm3.staticflickr.com/2897/14198397111_%s_o.jpg", v), + OriginalFormat: "jpg", + } + photos = append(photos, item) + if len(photos) >= perPage { + break + } + indexOnPage++ + } + + photosPage := &photosSearch{ + Photos: struct { + Page jsonInt + Pages jsonInt + Perpage jsonInt + Total jsonInt + Photo []*photosSearchItem + }{ + Page: jsonInt(page), + Pages: jsonInt(pages), + Perpage: jsonInt(perPage), + Photo: photos, + }, + } + + list, err := json.MarshalIndent(photosPage, "", " ") + if err != nil { + log.Fatalf("%v", err) + } + return string(list) + +} + +func fakePicture() string { + camliDir, err := osutil.GoPackagePath("camlistore.org") + if err == os.ErrNotExist { + log.Fatal("Directory \"camlistore.org\" not found under GOPATH/src; are you not running with devcam?") + } + if err != nil { + log.Fatalf("Error searching for \"camlistore.org\" under GOPATH: %v", err) + } + return filepath.Join(camliDir, filepath.FromSlash("third_party/glitch/npc_piggy__x1_walk_png_1354829432.png")) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/README b/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/README new file mode 100644 index 00000000..73ff3092 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/README @@ -0,0 +1,19 @@ +Foursquare Importer +=================== + +This is an incomplete Camlistore importer for Foursquare. + +To use: + +1) Visit https://foursquare.com/developers/apps and "Create a new app" + to get a Foursquare Client ID and secret. +2) Start the devcam server with foursquareapikey flag: + $ devcam server -foursquareapikey=: +3) Navigate to http:///importer-foursquare/login +4) Watch import progress on the command line + + +TODO: + +https://github.com/camlistore/camlistore/issues?q=is%3Aopen+is%3Aissue+foursquare + diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/api.go b/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/api.go new file mode 100644 index 00000000..e3048174 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/api.go @@ -0,0 +1,108 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Types for Foursquare's JSON API. + +package foursquare + +type user struct { + Id string + FirstName string + LastName string +} + +type userInfo struct { + Response struct { + User user + } +} + +type checkinsList struct { + Response struct { + Checkins struct { + Items []*checkinItem + } + } +} + +type checkinItem struct { + Id string + CreatedAt int64 // unix time in seconds from 4sq + TimeZoneOffset int // offset in minutes. positive is east. + Shout string // "Message from check-in, if present and visible to the acting user." + Venue venueItem +} + +type venueItem struct { + Id string // eg 42474900f964a52087201fe3 from 4sq + Name string + Location *venueLocationItem + Categories []*venueCategory +} + +type photosList struct { + Response struct { + Photos struct { + Items []*photoItem + } + } +} + +type photoItem struct { + Id string + Prefix string + Suffix string + Width int + Height int +} + +func (vi *venueItem) primaryCategory() *venueCategory { + for _, c := range vi.Categories { + if c.Primary { + return c + } + } + return nil +} + +func (vi *venueItem) icon() string { + c := vi.primaryCategory() + if c == nil || c.Icon == nil || c.Icon.Prefix == "" { + return "" + } + return c.Icon.Prefix + "bg_88" + c.Icon.Suffix +} + +type venueLocationItem struct { + Address string + City string + PostalCode string + State string + Country string // 4sq provides "US" + Lat float64 + Lng float64 +} + +type venueCategory struct { + Primary bool + Name string + Icon *categoryIcon +} + +type categoryIcon struct { + Prefix string + Suffix string +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/foursquare.go b/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/foursquare.go new file mode 100644 index 00000000..e8f2ed75 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/foursquare.go @@ -0,0 +1,520 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package foursquare implements an importer for foursquare.com accounts. +package foursquare + +import ( + "fmt" + "log" + "net/http" + "net/url" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/context" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/importer" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/schema/nodeattr" + "camlistore.org/third_party/code.google.com/p/goauth2/oauth" +) + +const ( + apiURL = "https://api.foursquare.com/v2/" + authURL = "https://foursquare.com/oauth2/authenticate" + tokenURL = "https://foursquare.com/oauth2/access_token" + + apiVersion = "20140225" + checkinsAPIPath = "users/self/checkins" + + // runCompleteVersion is a cache-busting version number of the + // importer code. It should be incremented whenever the + // behavior of this importer is updated enough to warrant a + // complete run. Otherwise, if the importer runs to + // completion, this version number is recorded on the account + // permanode and subsequent importers can stop early. + runCompleteVersion = "1" + + // Permanode attributes on account node: + acctAttrUserId = "foursquareUserId" + acctAttrUserFirst = "foursquareFirstName" + acctAttrUserLast = "foursquareLastName" + acctAttrAccessToken = "oauthAccessToken" + + checkinsRequestLimit = 100 // max number of checkins we will ask for in a checkins list request + photosRequestLimit = 5 + + attrFoursquareId = "foursquareId" + attrFoursquareVenuePermanode = "foursquareVenuePermanode" + attrFoursquareCategoryName = "foursquareCategoryName" +) + +func init() { + importer.Register("foursquare", &imp{ + imageFileRef: make(map[string]blob.Ref), + }) +} + +var _ importer.ImporterSetupHTMLer = (*imp)(nil) + +type imp struct { + mu sync.Mutex // guards following + imageFileRef map[string]blob.Ref // url to file schema blob + + importer.OAuth2 // for CallbackRequestAccount and CallbackURLParameters +} + +func (im *imp) NeedsAPIKey() bool { return true } +func (im *imp) SupportsIncremental() bool { return true } + +func (im *imp) IsAccountReady(acctNode *importer.Object) (ok bool, err error) { + if acctNode.Attr(acctAttrUserId) != "" && acctNode.Attr(acctAttrAccessToken) != "" { + return true, nil + } + return false, nil +} + +func (im *imp) SummarizeAccount(acct *importer.Object) string { + ok, err := im.IsAccountReady(acct) + if err != nil { + return "Not configured; error = " + err.Error() + } + if !ok { + return "Not configured" + } + if acct.Attr(acctAttrUserFirst) == "" && acct.Attr(acctAttrUserLast) == "" { + return fmt.Sprintf("userid %s", acct.Attr(acctAttrUserId)) + } + return fmt.Sprintf("userid %s (%s %s)", acct.Attr(acctAttrUserId), + acct.Attr(acctAttrUserFirst), acct.Attr(acctAttrUserLast)) +} + +func (im *imp) AccountSetupHTML(host *importer.Host) string { + base := host.ImporterBaseURL() + "foursquare" + return fmt.Sprintf(` +

    Configuring Foursquare

    +

    Visit https://foursquare.com/developers/apps and click "Create a new app".

    +

    Use the following settings:

    +
      +
    • Download / welcome page url: %s
    • +
    • Your privacy policy url: %s
    • +
    • Redirect URI(s): %s
    • +
    +

    Click "SAVE CHANGES". Copy the "Client ID" and "Client Secret" into the boxes above.

    +`, base, base+"/privacy", base+"/callback") +} + +// A run is our state for a given run of the importer. +type run struct { + *importer.RunContext + im *imp + incremental bool // whether we've completed a run in the past + + mu sync.Mutex // guards anyErr + anyErr bool +} + +func (r *run) token() string { + return r.RunContext.AccountNode().Attr(acctAttrAccessToken) +} + +func (im *imp) Run(ctx *importer.RunContext) error { + r := &run{ + RunContext: ctx, + im: im, + incremental: ctx.AccountNode().Attr(importer.AcctAttrCompletedVersion) == runCompleteVersion, + } + + if err := r.importCheckins(); err != nil { + return err + } + + r.mu.Lock() + anyErr := r.anyErr + r.mu.Unlock() + + if !anyErr { + if err := r.AccountNode().SetAttrs(importer.AcctAttrCompletedVersion, runCompleteVersion); err != nil { + return err + } + } + + return nil +} + +func (r *run) errorf(format string, args ...interface{}) { + log.Printf(format, args...) + r.mu.Lock() + defer r.mu.Unlock() + r.anyErr = true +} + +// urlFileRef slurps urlstr from the net, writes to a file and returns its +// fileref or "" on error +func (r *run) urlFileRef(urlstr, filename string) string { + im := r.im + im.mu.Lock() + if br, ok := im.imageFileRef[urlstr]; ok { + im.mu.Unlock() + return br.String() + } + im.mu.Unlock() + + res, err := r.HTTPClient().Get(urlstr) + if err != nil { + log.Printf("couldn't get image: %v", err) + return "" + } + defer res.Body.Close() + + fileRef, err := schema.WriteFileFromReader(r.Host.Target(), filename, res.Body) + if err != nil { + r.errorf("couldn't write file: %v", err) + return "" + } + + im.mu.Lock() + defer im.mu.Unlock() + im.imageFileRef[urlstr] = fileRef + return fileRef.String() +} + +type byCreatedAt []*checkinItem + +func (s byCreatedAt) Less(i, j int) bool { return s[i].CreatedAt < s[j].CreatedAt } +func (s byCreatedAt) Len() int { return len(s) } +func (s byCreatedAt) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (r *run) importCheckins() error { + limit := checkinsRequestLimit + offset := 0 + continueRequests := true + + for continueRequests { + resp := checkinsList{} + if err := r.im.doAPI(r.Context, r.token(), &resp, checkinsAPIPath, "limit", strconv.Itoa(limit), "offset", strconv.Itoa(offset)); err != nil { + return err + } + + itemcount := len(resp.Response.Checkins.Items) + log.Printf("foursquare: importing %d checkins (offset %d)", itemcount, offset) + if itemcount < limit { + continueRequests = false + } else { + offset += itemcount + } + + checkinsNode, err := r.getTopLevelNode("checkins", "Checkins") + if err != nil { + return err + } + + placesNode, err := r.getTopLevelNode("places", "Places") + if err != nil { + return err + } + + sort.Sort(byCreatedAt(resp.Response.Checkins.Items)) + sawOldItem := false + for _, checkin := range resp.Response.Checkins.Items { + placeNode, err := r.importPlace(placesNode, &checkin.Venue) + if err != nil { + r.errorf("Foursquare importer: error importing place %s %v", checkin.Venue.Id, err) + continue + } + + _, dup, err := r.importCheckin(checkinsNode, checkin, placeNode.PermanodeRef()) + if err != nil { + r.errorf("Foursquare importer: error importing checkin %s %v", checkin.Id, err) + continue + } + + if dup { + sawOldItem = true + } + + err = r.importPhotos(placeNode, dup) + if err != nil { + r.errorf("Foursquare importer: error importing photos for checkin %s %v", checkin.Id, err) + continue + } + } + if sawOldItem && r.incremental { + break + } + } + + return nil +} + +func (r *run) importPhotos(placeNode *importer.Object, checkinWasDup bool) error { + photosNode, err := placeNode.ChildPathObject("photos") + if err != nil { + return err + } + + if err := photosNode.SetAttrs( + nodeattr.Title, "Photos of "+placeNode.Attr("title"), + nodeattr.DefaultVisibility, "hide"); err != nil { + return err + } + + nHave := 0 + photosNode.ForeachAttr(func(key, value string) { + if strings.HasPrefix(key, "camliPath:") { + nHave++ + } + }) + nWant := photosRequestLimit + if checkinWasDup { + nWant = 1 + } + if nHave >= nWant { + return nil + } + + resp := photosList{} + if err := r.im.doAPI(r.Context, r.token(), &resp, + "venues/"+placeNode.Attr(attrFoursquareId)+"/photos", + "limit", strconv.Itoa(nWant)); err != nil { + return err + } + + var need []*photoItem + for _, photo := range resp.Response.Photos.Items { + attr := "camliPath:" + photo.Id + filepath.Ext(photo.Suffix) + if photosNode.Attr(attr) == "" { + need = append(need, photo) + } + } + + if len(need) > 0 { + venueTitle := placeNode.Attr(nodeattr.Title) + log.Printf("foursquare: importing %d photos for venue %s", len(need), venueTitle) + for _, photo := range need { + attr := "camliPath:" + photo.Id + filepath.Ext(photo.Suffix) + if photosNode.Attr(attr) != "" { + continue + } + url := photo.Prefix + "original" + photo.Suffix + log.Printf("foursquare: importing photo for venue %s: %s", venueTitle, url) + ref := r.urlFileRef(url, "") + if ref == "" { + r.errorf("Error slurping photo: %s", url) + continue + } + if err := photosNode.SetAttr(attr, ref); err != nil { + r.errorf("Error adding venue photo: %#v", err) + } + } + } + + return nil +} + +func (r *run) importCheckin(parent *importer.Object, checkin *checkinItem, placeRef blob.Ref) (checkinNode *importer.Object, dup bool, err error) { + checkinNode, err = parent.ChildPathObject(checkin.Id) + if err != nil { + return + } + + title := fmt.Sprintf("Checkin at %s", checkin.Venue.Name) + dup = checkinNode.Attr(nodeattr.StartDate) != "" + if err := checkinNode.SetAttrs( + attrFoursquareId, checkin.Id, + attrFoursquareVenuePermanode, placeRef.String(), + nodeattr.Type, "foursquare.com:checkin", + nodeattr.StartDate, schema.RFC3339FromTime(time.Unix(checkin.CreatedAt, 0)), + nodeattr.Title, title); err != nil { + return nil, false, err + } + return checkinNode, dup, nil +} + +func (r *run) importPlace(parent *importer.Object, place *venueItem) (*importer.Object, error) { + placeNode, err := parent.ChildPathObject(place.Id) + if err != nil { + return nil, err + } + + catName := "" + if cat := place.primaryCategory(); cat != nil { + catName = cat.Name + } + + icon := place.icon() + if err := placeNode.SetAttrs( + attrFoursquareId, place.Id, + nodeattr.Type, "foursquare.com:venue", + nodeattr.CamliContentImage, r.urlFileRef(icon, path.Base(icon)), + attrFoursquareCategoryName, catName, + nodeattr.Title, place.Name, + nodeattr.StreetAddress, place.Location.Address, + nodeattr.AddressLocality, place.Location.City, + nodeattr.PostalCode, place.Location.PostalCode, + nodeattr.AddressRegion, place.Location.State, + nodeattr.AddressCountry, place.Location.Country, + nodeattr.Latitude, fmt.Sprint(place.Location.Lat), + nodeattr.Longitude, fmt.Sprint(place.Location.Lng)); err != nil { + return nil, err + } + + return placeNode, nil +} + +func (r *run) getTopLevelNode(path string, title string) (*importer.Object, error) { + childObject, err := r.RootNode().ChildPathObject(path) + if err != nil { + return nil, err + } + + if err := childObject.SetAttr(nodeattr.Title, title); err != nil { + return nil, err + } + return childObject, nil +} + +func (im *imp) getUserInfo(ctx *context.Context, accessToken string) (user, error) { + var ui userInfo + if err := im.doAPI(ctx, accessToken, &ui, "users/self"); err != nil { + return user{}, err + } + if ui.Response.User.Id == "" { + return user{}, fmt.Errorf("No userid returned") + } + return ui.Response.User, nil +} + +func (im *imp) doAPI(ctx *context.Context, accessToken string, result interface{}, apiPath string, keyval ...string) error { + if len(keyval)%2 == 1 { + panic("Incorrect number of keyval arguments") + } + + form := url.Values{} + form.Set("v", apiVersion) // 4sq requires this to version their API + form.Set("oauth_token", accessToken) + for i := 0; i < len(keyval); i += 2 { + form.Set(keyval[i], keyval[i+1]) + } + + fullURL := apiURL + apiPath + res, err := doGet(ctx, fullURL, form) + if err != nil { + return err + } + err = httputil.DecodeJSON(res, result) + if err != nil { + log.Printf("Error parsing response for %s: %v", fullURL, err) + } + return err +} + +func doGet(ctx *context.Context, url string, form url.Values) (*http.Response, error) { + requestURL := url + "?" + form.Encode() + req, err := http.NewRequest("GET", requestURL, nil) + if err != nil { + return nil, err + } + res, err := ctx.HTTPClient().Do(req) + if err != nil { + log.Printf("Error fetching %s: %v", url, err) + return nil, err + } + if res.StatusCode != http.StatusOK { + return nil, fmt.Errorf("Get request on %s failed with: %s", requestURL, res.Status) + } + return res, nil +} + +// auth returns a new oauth.Config +func auth(ctx *importer.SetupContext) (*oauth.Config, error) { + clientId, secret, err := ctx.Credentials() + if err != nil { + return nil, err + } + return &oauth.Config{ + ClientId: clientId, + ClientSecret: secret, + AuthURL: authURL, + TokenURL: tokenURL, + RedirectURL: ctx.CallbackURL(), + }, nil +} + +func (im *imp) ServeSetup(w http.ResponseWriter, r *http.Request, ctx *importer.SetupContext) error { + oauthConfig, err := auth(ctx) + if err != nil { + return err + } + oauthConfig.RedirectURL = im.RedirectURL(im, ctx) + state, err := im.RedirectState(im, ctx) + if err != nil { + return err + } + http.Redirect(w, r, oauthConfig.AuthCodeURL(state), http.StatusFound) + return nil +} + +func (im *imp) ServeCallback(w http.ResponseWriter, r *http.Request, ctx *importer.SetupContext) { + oauthConfig, err := auth(ctx) + if err != nil { + httputil.ServeError(w, r, fmt.Errorf("Error getting oauth config: %v", err)) + return + } + + if r.Method != "GET" { + http.Error(w, "Expected a GET", 400) + return + } + code := r.FormValue("code") + if code == "" { + http.Error(w, "Expected a code", 400) + return + } + transport := &oauth.Transport{Config: oauthConfig} + token, err := transport.Exchange(code) + log.Printf("Token = %#v, error %v", token, err) + if err != nil { + log.Printf("Token Exchange error: %v", err) + http.Error(w, "token exchange error", 500) + return + } + + u, err := im.getUserInfo(ctx.Context, token.AccessToken) + if err != nil { + log.Printf("Couldn't get username: %v", err) + http.Error(w, "can't get username", 500) + return + } + if err := ctx.AccountNode.SetAttrs( + acctAttrUserId, u.Id, + acctAttrUserFirst, u.FirstName, + acctAttrUserLast, u.LastName, + acctAttrAccessToken, token.AccessToken, + ); err != nil { + httputil.ServeError(w, r, fmt.Errorf("Error setting attribute: %v", err)) + return + } + http.Redirect(w, r, ctx.AccountURL(), http.StatusFound) + +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/foursquare_test.go b/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/foursquare_test.go new file mode 100644 index 00000000..9a55ced8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/foursquare_test.go @@ -0,0 +1,47 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package foursquare + +import ( + "net/http" + "testing" + + "camlistore.org/pkg/context" + "camlistore.org/pkg/httputil" +) + +func TestGetUserId(t *testing.T) { + im := &imp{} + ctx := context.New(context.WithHTTPClient(&http.Client{ + Transport: httputil.NewFakeTransport(map[string]func() *http.Response{ + "https://api.foursquare.com/v2/users/self?oauth_token=footoken&v=20140225": httputil.FileResponder("testdata/users-me-res.json"), + }), + })) + defer ctx.Cancel() + inf, err := im.getUserInfo(ctx, "footoken") + if err != nil { + t.Fatal(err) + } + want := user{ + Id: "13674", + FirstName: "Brad", + LastName: "Fitzpatrick", + } + if inf != want { + t.Errorf("user info = %+v; want %+v", inf, want) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/testdata.go b/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/testdata.go new file mode 100644 index 00000000..5477ddbc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/testdata.go @@ -0,0 +1,269 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package foursquare + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "os" + "path/filepath" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/importer" + "camlistore.org/pkg/osutil" +) + +var _ importer.TestDataMaker = (*imp)(nil) + +func (im *imp) SetTestAccount(acctNode *importer.Object) error { + // TODO(mpl): refactor with twitter + return acctNode.SetAttrs( + importer.AcctAttrAccessToken, "fakeAccessToken", + importer.AcctAttrAccessTokenSecret, "fakeAccessSecret", + importer.AcctAttrUserID, "fakeUserID", + importer.AcctAttrName, "fakeName", + importer.AcctAttrUserName, "fakeScreenName", + ) +} + +func (im *imp) MakeTestData() http.RoundTripper { + + const nCheckins = 150 // Arbitrary number of checkins generated. + + // if you add another venue, make sure the venueCounter reset + // in fakeCheckinsList allows for that case to happen. + // We could use global vars instead, but don't want to pollute the + // fousquare pkg namespace. + towns := map[int]*venueLocationItem{ + 0: { + Address: "Baker street", + City: "Dublin", + PostalCode: "0", + State: "none", + Country: "Ireland", + Lat: 53.4053427, + Lng: -8.3320801, + }, + 1: { + Address: "Fish&Ships street", + City: "London", + PostalCode: "1", + State: "none", + Country: "England", + Lat: 55.3617609, + Lng: -3.4433238, + }, + 2: { + Address: "Haggis street", + City: "Glasgow", + PostalCode: "2", + State: "none", + Country: "Scotland", + Lat: 57.7394571, + Lng: -4.686997, + }, + 3: { + Address: "rue du croissant", + City: "Grenoble", + PostalCode: "38000", + State: "none", + Country: "France", + Lat: 45.1841655, + Lng: 5.7155424, + }, + 4: { + Address: "burrito street", + City: "San Francisco", + PostalCode: "94114", + State: "CA", + Country: "US", + Lat: 37.7593625, + Lng: -122.4266995, + }, + } + + // We need to compute the venueIds in advance, because the venue id is used as a parameter + // in some of the requests we need to register. + var venueIds []string + for _, v := range towns { + venueIds = append(venueIds, blob.RefFromString(v.City).DigestPrefix(10)) + } + + checkinsURL := apiURL + checkinsAPIPath + checkinsListCached := make(map[int]string) + okHeader := `HTTP/1.1 200 OK +Content-Type: application/json; charset=UTF-8 + +` + + responses := make(map[string]func() *http.Response) + + // register all the checkins calls; offset varies. + for i := 0; i < nCheckins; i += checkinsRequestLimit { + url := fmt.Sprintf("%s?limit=%d&oauth_token=fakeAccessToken&offset=%d&v=%s", + checkinsURL, checkinsRequestLimit, i, apiVersion) + response := okHeader + fakeCheckinsList(i, nCheckins, towns, checkinsListCached) + responses[url] = httputil.StaticResponder(response) + } + + // register all the venue photos calls (venueId varies) + photosURL := apiURL + "venues" + photosResponse := okHeader + fakePhotosList() + for _, id := range venueIds { + url := fmt.Sprintf("%s/%s/photos?limit=%d&oauth_token=fakeAccessToken&v=%s", + photosURL, id, photosRequestLimit, apiVersion) + responses[url] = httputil.StaticResponder(photosResponse) + } + + // register the photoitem calls + pudgyPic := fakePhoto() + photoURL := "https://camlistore.org/pic/pudgy.png" + originalPhotoURL := "https://camlistore.org/original/pic/pudgy.png" + iconURL := "https://camlistore.org/bg_88/pic/pudgy.png" + responses[photoURL] = httputil.FileResponder(pudgyPic) + responses[originalPhotoURL] = httputil.FileResponder(pudgyPic) + responses[iconURL] = httputil.FileResponder(pudgyPic) + + return httputil.NewFakeTransport(responses) +} + +// fakeCheckinsList returns a JSON checkins list of checkinsRequestLimit checkin +// items, starting at offset. It stops before checkinsRequestLimit if maxCheckin is +// reached. It uses towns to populate the venues. The returned list is saved in +// cached. +func fakeCheckinsList(offset, maxCheckin int, towns map[int]*venueLocationItem, cached map[int]string) string { + if cl, ok := cached[offset]; ok { + return cl + } + max := offset + checkinsRequestLimit + if max > maxCheckin { + max = maxCheckin + } + var items []*checkinItem + tzCounter := 0 + venueCounter := 0 + for i := offset; i < max; i++ { + shout := fmt.Sprintf("fakeShout %d", i) + item := &checkinItem{ + Id: blob.RefFromString(shout).DigestPrefix(10), + Shout: shout, + CreatedAt: time.Now().Unix(), + TimeZoneOffset: tzCounter * 60, + Venue: fakeVenue(venueCounter, towns), + } + items = append(items, item) + tzCounter++ + venueCounter++ + if tzCounter == 24 { + tzCounter = 0 + } + if venueCounter == 5 { + venueCounter = 0 + } + } + + response := struct { + Checkins struct { + Items []*checkinItem + } + }{ + Checkins: struct { + Items []*checkinItem + }{ + Items: items, + }, + } + list, err := json.MarshalIndent(checkinsList{Response: response}, "", " ") + if err != nil { + log.Fatalf("%v", err) + } + cached[offset] = string(list) + return cached[offset] +} + +func fakeVenue(counter int, towns map[int]*venueLocationItem) venueItem { + prefix := "https://camlistore.org/" + suffix := "/pic/pudgy.png" + // TODO: add more. + categories := []*venueCategory{ + { + Primary: true, + Name: "town", + Icon: &categoryIcon{ + Prefix: prefix, + Suffix: suffix, + }, + }, + } + + return venueItem{ + Id: blob.RefFromString(towns[counter].City).DigestPrefix(10), + Name: towns[counter].City, + Location: towns[counter], + Categories: categories, + } +} + +func fakePhotosList() string { + items := []*photoItem{ + fakePhotoItem(), + } + response := struct { + Photos struct { + Items []*photoItem + } + }{ + Photos: struct { + Items []*photoItem + }{ + Items: items, + }, + } + list, err := json.MarshalIndent(photosList{Response: response}, "", " ") + if err != nil { + log.Fatalf("%v", err) + } + return string(list) +} + +func fakePhotoItem() *photoItem { + prefix := "https://camlistore.org/" + suffix := "/pic/pudgy.png" + return &photoItem{ + Id: blob.RefFromString(prefix + suffix).DigestPrefix(10), + Prefix: prefix, + Suffix: suffix, + Width: 704, + Height: 186, + } +} + +// TODO(mpl): refactor with twitter +func fakePhoto() string { + camliDir, err := osutil.GoPackagePath("camlistore.org") + if err == os.ErrNotExist { + log.Fatal("Directory \"camlistore.org\" not found under GOPATH/src; are you not running with devcam?") + } + if err != nil { + log.Fatalf("Error searching for \"camlistore.org\" under GOPATH: %v", err) + } + return filepath.Join(camliDir, filepath.FromSlash("third_party/glitch/npc_piggy__x1_walk_png_1354829432.png")) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/testdata/users-me-res.json b/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/testdata/users-me-res.json new file mode 100644 index 00000000..44a51312 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/foursquare/testdata/users-me-res.json @@ -0,0 +1,791 @@ +{ + "meta": { + "code": 200 + }, + "notifications": [ + { + "type": "notificationTray", + "item": { + "unreadCount": 4 + } + } + ], + "response": { + "user": { + "id": "13674", + "firstName": "Brad", + "lastName": "Fitzpatrick", + "gender": "male", + "relationship": "self", + "photo": { + "prefix": "https:\/\/irs0.4sqi.net\/img\/user\/", + "suffix": "\/CKG5FOF2WMCMPD3E.jpg" + }, + "friends": { + "count": 174, + "groups": [ + { + "type": "friends", + "name": "Mutual friends", + "count": 0, + "items": [] + }, + { + "type": "others", + "name": "Other friends", + "count": 174, + "items": [ + { + "id": "83878", + "firstName": "Randal", + "lastName": "Schwartz", + "gender": "male", + "relationship": "friend", + "photo": { + "prefix": "https:\/\/irs2.4sqi.net\/img\/user\/", + "suffix": "\/41NCM3VIMA30PNZ3.jpg" + }, + "tips": { + "count": 28 + }, + "lists": { + "groups": [ + { + "type": "created", + "count": 4, + "items": [] + } + ] + }, + "homeCity": "Beaverton, OR", + "bio": "Yeah, \u2022that\u2022 Randal Schwartz. I'm also a low-carb high-fat (ketogenic) consumer, so if you have location advice around that, let me know!", + "contact": { + "phone": "", + "email": "merlyn.foursquare@stonehenge.com", + "twitter": "merlyn", + "facebook": "504874371" + } + }, + { + "id": "38677952", + "firstName": "Nori", + "lastName": "Heikkinen", + "gender": "female", + "relationship": "friend", + "photo": { + "prefix": "https:\/\/irs0.4sqi.net\/img\/user\/", + "suffix": "\/VDOVTY0YUYUJ10QK.jpg" + }, + "tips": { + "count": 4 + }, + "lists": { + "groups": [ + { + "type": "created", + "count": 5, + "items": [] + } + ] + }, + "homeCity": "Baltimore, MD", + "bio": "", + "contact": { + "email": "nori.heikkinen@gmail.com", + "twitter": "n0r1", + "facebook": "677985398" + } + }, + { + "id": "64376555", + "firstName": "Miguel", + "lastName": "de Icaza", + "gender": "male", + "relationship": "friend", + "photo": { + "prefix": "https:\/\/irs0.4sqi.net\/img\/user\/", + "suffix": "\/LP510B3INEKRTNI2.jpg" + }, + "tips": { + "count": 2 + }, + "lists": { + "groups": [ + { + "type": "created", + "count": 1, + "items": [] + } + ] + }, + "homeCity": "Boston, MA", + "bio": "", + "contact": { + "email": "miguel.de.icaza@gmail.com", + "facebook": "532065026" + } + }, + { + "id": "50291", + "firstName": "Nat", + "lastName": "Friedman", + "gender": "male", + "relationship": "friend", + "photo": { + "prefix": "https:\/\/irs3.4sqi.net\/img\/user\/", + "suffix": "\/50291_1259165803611.jpg" + }, + "tips": { + "count": 12 + }, + "lists": { + "groups": [ + { + "type": "created", + "count": 2, + "items": [] + } + ] + }, + "homeCity": "San Francisco", + "bio": "", + "contact": { + "phone": "", + "email": "nat@nat.org", + "twitter": "natfriedman", + "facebook": "547946582" + } + }, + { + "id": "618", + "firstName": "Chris", + "lastName": "Messina", + "gender": "male", + "relationship": "friend", + "photo": { + "prefix": "https:\/\/irs0.4sqi.net\/img\/user\/", + "suffix": "\/-31MXPPWS4MBOET0B.jpg" + }, + "tips": { + "count": 389 + }, + "lists": { + "groups": [ + { + "type": "created", + "count": 17, + "items": [] + } + ] + }, + "homeCity": "San Francisco, CA", + "bio": "Bachelor of Arts.\r\n#godfather (http:\/\/nyti.ms\/lJ6Kdj)\r\nI am not the actor.", + "contact": { + "phone": "", + "email": "chris.messina@gmail.com", + "twitter": "chrismessina", + "facebook": "502411873" + }, + "superuser": 2 + }, + { + "id": "21279572", + "firstName": "Barry", + "lastName": "Abrahamson", + "gender": "male", + "relationship": "friend", + "photo": { + "prefix": "https:\/\/irs2.4sqi.net\/img\/user\/", + "suffix": "\/MAV25J2AQ3IORLLM.jpg" + }, + "tips": { + "count": 2 + }, + "lists": { + "groups": [ + { + "type": "created", + "count": 2, + "items": [] + } + ] + }, + "homeCity": "Houston, Texas", + "bio": "", + "contact": { + "email": "barry@yourang.org", + "twitter": "bazza", + "facebook": "732341470" + } + }, + { + "id": "76974963", + "firstName": "Tiffany", + "lastName": "Precissi", + "gender": "female", + "relationship": "friend", + "photo": { + "prefix": "https:\/\/irs2.4sqi.net\/img\/user\/", + "suffix": "\/blank_girl.png" + }, + "tips": { + "count": 0 + }, + "lists": { + "groups": [ + { + "type": "created", + "count": 1, + "items": [] + } + ] + }, + "homeCity": "Stockton, CA", + "bio": "", + "contact": { + "email": "tifflynn@gmail.com", + "twitter": "xo_tiff4ny", + "facebook": "665546095" + } + }, + { + "id": "4478390", + "firstName": "Julie", + "lastName": "Parent", + "gender": "female", + "relationship": "friend", + "photo": { + "prefix": "https:\/\/irs2.4sqi.net\/img\/user\/", + "suffix": "\/blank_girl.png" + }, + "tips": { + "count": 0 + }, + "lists": { + "groups": [ + { + "type": "created", + "count": 1, + "items": [] + } + ] + }, + "homeCity": "San Francisco, CA", + "bio": "", + "contact": { + "email": "jparent@gmail.com", + "twitter": "jewree", + "facebook": "1513396" + } + }, + { + "id": "68959715", + "firstName": "Ekaterina", + "lastName": "Ustinova", + "gender": "female", + "relationship": "friend", + "photo": { + "prefix": "https:\/\/irs2.4sqi.net\/img\/user\/", + "suffix": "\/blank_girl.png" + }, + "tips": { + "count": 0 + }, + "lists": { + "groups": [ + { + "type": "created", + "count": 1, + "items": [] + } + ] + }, + "homeCity": "New York, NY", + "bio": "", + "contact": { + "email": "ustinoid@gmail.com", + "facebook": "100000011060984" + } + }, + { + "id": "67841916", + "firstName": "Katherine", + "lastName": "Deyo", + "gender": "female", + "relationship": "friend", + "photo": { + "prefix": "https:\/\/irs2.4sqi.net\/img\/user\/", + "suffix": "\/blank_girl.png" + }, + "tips": { + "count": 0 + }, + "lists": { + "groups": [ + { + "type": "created", + "count": 1, + "items": [] + } + ] + }, + "homeCity": "San Francisco, CA", + "bio": "I wanna be myself", + "contact": { + "email": "katherine_w_deyo@gmail.com" + } + } + ] + } + ] + }, + "tips": { + "count": 18 + }, + "homeCity": "San Francisco, CA", + "bio": "", + "contact": { + "phone": "5035551212", + "email": "brad@danga.com", + "twitter": "bradfitz", + "facebook": "500033387" + }, + "superuser": 1, + "checkinPings": "off", + "pings": false, + "type": "user", + "badges": { + "count": 65, + "items": [ + { + "id": "518577cd498ebaa83dc8f7e0", + "badgeId": "4ebb078f7bebd6a83f1176bd", + "name": "Hot Tamale", + "unlockMessage": "You unlocked the Hot Tamale badge!", + "description": "Rice, beans, cheese, cilantro \u2013 why eat anything else when you can get all the important food groups wrapped into one delicious pound of foil? Now pass those nachos, will ya? It\u2019s time to guac and roll.\n\nThat's 45 different Mexican restaurants! Your taste buds must be scorched. Don't worry, some tequila shots should probably fix that. Congrats on Level 10 Hot Tamale status!", + "level": 10, + "badgeText": "Rice, beans, cheese, cilantro \u2013 why eat anything else when you can get all the important food groups wrapped into one delicious pound of foil? Now pass those nachos, will ya? It\u2019s time to guac and roll.", + "levelText": "That's 45 different Mexican restaurants! Your taste buds must be scorched. Don't worry, some tequila shots should probably fix that. Congrats on Level 10 Hot Tamale status!", + "categorySummary": "Mexican Restaurants", + "image": { + "prefix": "https:\/\/playfoursquare.s3.amazonaws.com\/badge\/", + "sizes": [ + 57, + 114, + 200, + 300, + 400 + ], + "name": "\/L2RRMCA2PGOBSFRN_10.png" + }, + "unlocks": [ + { + "checkins": [ + { + "id": "518577cc498ebaa83dc8f148", + "createdAt": 1367701452, + "type": "checkin", + "shout": "Van exchange point for The Relay. Not actually going to church.", + "timeZoneOffset": -420, + "venue": { + "id": "4bdc6861c79cc9285e6586e9", + "name": "Crosswalk Community Church", + "contact": {}, + "location": { + "lat": 38.30073598016023, + "lng": -122.30450377008302, + "postalCode": "94558", + "cc": "US", + "country": "United States" + }, + "categories": [ + { + "id": "4bf58dd8d48988d1c1941735", + "name": "Mexican Restaurant", + "pluralName": "Mexican Restaurants", + "shortName": "Mexican", + "icon": { + "prefix": "https:\/\/ss1.4sqi.net\/img\/categories_v2\/food\/mexican_", + "suffix": ".png" + }, + "primary": true + } + ], + "verified": false, + "stats": { + "checkinsCount": 248, + "usersCount": 90, + "tipCount": 4 + } + }, + "photos": { + "count": 0, + "items": [] + }, + "posts": { + "count": 0, + "textCount": 0 + }, + "comments": { + "count": 1 + }, + "source": { + "name": "foursquare for Android", + "url": "https:\/\/foursquare.com\/download\/#\/android" + } + } + ] + } + ] + } + ] + }, + "mayorships": { + "count": 4, + "items": [] + }, + "checkins": { + "count": 3272, + "items": [ + { + "id": "53396b10498e2c3aed309903", + "createdAt": 1396271888, + "type": "checkin", + "shout": "SFO-PDX", + "timeZoneOffset": -420, + "venue": { + "id": "4a7601b6f964a520efe11fe3", + "name": "Alaska Airlines Board Room", + "contact": { + "twitter": "alaskaair" + }, + "location": { + "address": "Terminal 1", + "crossStreet": "at SFO Airport", + "lat": 37.61343253150299, + "lng": -122.3850667476654, + "postalCode": "94128", + "cc": "US", + "city": "San Francisco", + "state": "CA", + "country": "United States" + }, + "categories": [ + { + "id": "4eb1bc533b7b2c5b1d4306cb", + "name": "Airport Lounge", + "pluralName": "Airport Lounges", + "shortName": "Lounge", + "icon": { + "prefix": "https:\/\/ss1.4sqi.net\/img\/categories_v2\/travel\/airport_lounge_", + "suffix": ".png" + }, + "primary": true + } + ], + "verified": false, + "stats": { + "checkinsCount": 1318, + "usersCount": 822, + "tipCount": 22 + }, + "url": "http:\/\/alaskaair.com", + "likes": { + "count": 6, + "groups": [ + { + "type": "others", + "count": 6, + "items": [ + { + "id": "6446336", + "firstName": "Aaron", + "lastName": "C.", + "gender": "male", + "photo": { + "prefix": "https:\/\/irs3.4sqi.net\/img\/user\/", + "suffix": "\/BVDOLAQG4BYFXHV3.jpg" + } + }, + { + "id": "15728179", + "firstName": "Christopher", + "lastName": "P.", + "gender": "male", + "photo": { + "prefix": "https:\/\/irs3.4sqi.net\/img\/user\/", + "suffix": "\/Q5AUNW2UZDUDG10E.jpg" + } + }, + { + "id": "7709153", + "firstName": "Farhad", + "lastName": "M.", + "gender": "male", + "photo": { + "prefix": "https:\/\/irs1.4sqi.net\/img\/user\/", + "suffix": "\/VZBNN1XYBEVWEAIO.jpg" + } + }, + { + "id": "181603", + "firstName": "Jeffrey-Ryan", + "lastName": "B.", + "gender": "male", + "photo": { + "prefix": "https:\/\/irs2.4sqi.net\/img\/user\/", + "suffix": "\/FP5Q5W1FHULIZOCV.jpg" + } + } + ] + } + ], + "summary": "Aaron Chaffee, Christopher Potter, Farhad M & 3 others" + }, + "like": false, + "beenHere": { + "count": 1, + "marked": true + } + }, + "likes": { + "count": 1, + "groups": [ + { + "type": "friends", + "count": 1, + "items": [ + { + "id": "431392", + "firstName": "Owen", + "lastName": "Thomas", + "gender": "male", + "relationship": "friend", + "photo": { + "prefix": "https:\/\/irs0.4sqi.net\/img\/user\/", + "suffix": "\/GEDBNXFSYUXRUFLD.gif" + } + } + ] + } + ], + "summary": "Owen Thomas" + }, + "like": false, + "photos": { + "count": 0, + "items": [] + }, + "posts": { + "count": 0, + "textCount": 0 + }, + "comments": { + "count": 0 + }, + "source": { + "name": "foursquare for Android", + "url": "https:\/\/foursquare.com\/download\/#\/android" + } + } + ] + }, + "following": { + "count": 1, + "groups": [ + { + "type": "following", + "name": "Mutual following", + "count": 0, + "items": [] + }, + { + "type": "others", + "name": "Other following", + "count": 1, + "items": [ + { + "id": "13276", + "firstName": "Loic", + "lastName": "L.", + "gender": "male", + "relationship": "followingThem", + "photo": { + "prefix": "https:\/\/irs0.4sqi.net\/img\/user\/", + "suffix": "\/MHWAEBRGHQOMJE22.jpg" + }, + "type": "celebrity", + "followers": { + "count": 8132, + "groups": [] + }, + "tips": { + "count": 16 + }, + "lists": { + "groups": [ + { + "type": "created", + "count": 1, + "items": [] + } + ] + }, + "homeCity": "San Francisco", + "bio": "LeWeb and Seesmic founder, love creating things", + "contact": { + "twitter": "loic", + "facebook": "1417669498" + } + } + ] + } + ] + }, + "requests": { + "count": 280 + }, + "lists": { + "count": 1, + "groups": [ + { + "type": "created", + "count": 1, + "items": [ + { + "id": "13674\/todos", + "name": "My to-do list", + "description": "", + "user": { + "id": "13674", + "firstName": "Brad", + "lastName": "Fitzpatrick", + "gender": "male", + "relationship": "self", + "photo": { + "prefix": "https:\/\/irs0.4sqi.net\/img\/user\/", + "suffix": "\/CKG5FOF2WMCMPD3E.jpg" + } + }, + "editable": false, + "public": false, + "collaborative": false, + "url": "\/bradfitz\/list\/todos", + "canonicalUrl": "https:\/\/foursquare.com\/bradfitz\/list\/todos", + "followers": { + "count": 0 + }, + "listItems": { + "count": 3 + } + } + ] + }, + { + "type": "followed", + "count": 0, + "items": [] + } + ] + }, + "photos": { + "count": 11, + "items": [ + { + "id": "530278c311d26be8ab5da961", + "createdAt": 1392670915, + "source": { + "name": "foursquare for Android", + "url": "https:\/\/foursquare.com\/download\/#\/android" + }, + "prefix": "https:\/\/irs1.4sqi.net\/img\/general\/", + "suffix": "\/13674_4TxZ1OeQuFwOlprqcI1lWGZN4Or2f4Oal1rGup6ZPS4.jpg", + "width": 960, + "height": 720, + "visibility": "public", + "venue": { + "id": "40870b00f964a520aaf21ee3", + "name": "The Liberties", + "contact": { + "phone": "4152826789", + "formattedPhone": "(415) 282-6789", + "twitter": "thelibertiesbar" + }, + "location": { + "address": "998 Guerrero St", + "crossStreet": "at 22nd St", + "lat": 37.75523648445705, + "lng": -122.4232582553582, + "postalCode": "94110", + "cc": "US", + "city": "San Francisco", + "state": "CA", + "country": "United States" + }, + "categories": [ + { + "id": "4bf58dd8d48988d116941735", + "name": "Bar", + "pluralName": "Bars", + "shortName": "Bar", + "icon": { + "prefix": "https:\/\/ss1.4sqi.net\/img\/categories_v2\/nightlife\/bar_", + "suffix": ".png" + }, + "primary": true + } + ], + "verified": true, + "stats": { + "checkinsCount": 4526, + "usersCount": 2147, + "tipCount": 35 + }, + "url": "http:\/\/www.theliberties.com", + "likes": { + "count": 21, + "groups": [ + { + "type": "others", + "count": 20, + "items": [] + } + ], + "summary": "You and 20 others" + }, + "like": true, + "menu": { + "type": "Menu", + "label": "Menu", + "anchor": "View Menu", + "url": "https:\/\/foursquare.com\/v\/the-liberties\/40870b00f964a520aaf21ee3\/menu", + "mobileUrl": "https:\/\/foursquare.com\/v\/40870b00f964a520aaf21ee3\/device_menu" + }, + "beenHere": { + "count": 24, + "marked": true + }, + "venuePage": { + "id": "46953806" + }, + "storeId": "" + }, + "checkin": { + "id": "5302789511d2c9d07c49130e", + "createdAt": 1392670869, + "type": "checkin", + "timeZoneOffset": -480 + } + } + ] + }, + "scores": { + "recent": 116, + "max": 272, + "checkinsCount": 27 + }, + "createdAt": 1242876758, + "referralId": "u-13674" + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/html.go b/vendor/github.com/camlistore/camlistore/pkg/importer/html.go new file mode 100644 index 00000000..0802238e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/html.go @@ -0,0 +1,211 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package importer + +import ( + "bytes" + "fmt" + "html/template" + "net/http" + "strings" + "time" + + "camlistore.org/pkg/blob" +) + +func (h *Host) execTemplate(w http.ResponseWriter, r *http.Request, data interface{}) { + tmplName := strings.TrimPrefix(fmt.Sprintf("%T", data), "importer.") + var buf bytes.Buffer + err := h.tmpl.ExecuteTemplate(&buf, tmplName, data) + if err != nil { + http.Error(w, fmt.Sprintf("Error executing template %q: %v", tmplName, err), 500) + return + } + w.Write(buf.Bytes()) +} + +type importersRootPage struct { + Title string + Body importersRootBody +} + +type importersRootBody struct { + Host *Host + Importers []*importer +} + +type importerPage struct { + Title string + Body importerBody +} + +type importerBody struct { + Host *Host + Importer *importer + SetupHelp template.HTML +} + +type acctPage struct { + Title string + Body acctBody +} + +type acctBody struct { + Acct *importerAcct + AcctType string + Running bool + LastStatus string + StartedAgo time.Duration // or zero if !Running + LastAgo time.Duration // non-zero if previous run && !Running + LastError string +} + +var tmpl = template.Must(template.New("root").Funcs(map[string]interface{}{ + "bloblink": func(br blob.Ref) string { + panic("should be overridden; this one won't be called") + }, +}).Parse(` +{{define "pageTop"}} + + + {{.Title}} + + +

    {{.Title}}

    +{{end}} + +{{define "pageBottom"}} + + +{{end}} + + +{{define "importersRootPage"}} + {{template "pageTop" .}} + {{template "importersRootBody" .Body}} + {{template "pageBottom"}} +{{end}} + +{{define "importersRootBody"}} +
      + {{$base := .Host.ImporterBaseURL}} + {{range .Importers}} +
    • {{.Name}}
    • + {{end}} +
    +{{end}} + + +{{define "importerPage"}} + {{template "pageTop" .}} + {{template "importerBody" .Body}} + {{template "pageBottom"}} +{{end}} + +{{define "importerBody"}} +

    [<< Back]

    +
      +
    • Importer configuration permanode: {{.Importer.Node.PermanodeRef | bloblink}}
    • +
    • Status: {{.Importer.Status}}
    • +
    + +{{if .Importer.ShowClientAuthEditForm}} +

    Client ID & Client Secret

    +
    + + + + + +
    Client ID
    Client Secret
    +
    +{{end}} + +{{.SetupHelp}} + + +

    Accounts

    + +{{if .Importer.CanAddNewAccount}} +
    + + +
    +{{end}} + +{{end}} + +{{define "acctPage"}} + {{template "pageTop" .}} + {{template "acctBody" .Body}} + {{template "pageBottom"}} +{{end}} + +{{define "acctBody"}} +

    [<< Back]

    +
      +
    • Account type: {{.AcctType}}
    • +
    • Account metadata permanode: {{.Acct.AccountObject.PermanodeRef | bloblink}}
    • +
    • Import root permanode: {{if .Acct.RootObject}}{{.Acct.RootObject.PermanodeRef | bloblink}}{{else}}(none){{end}}
    • +
    • Configured: {{.Acct.IsAccountReady}}
    • +
    • Summary: {{.Acct.AccountLinkSummary}}
    • +
    • Import interval: {{if .Acct.RefreshInterval}}{{.Acct.RefreshInterval}}{{else}}(manual){{end}}
    • +
    • Running: {{.Running}}
    • + {{if .Running}} +
    • Started: {{.StartedAgo}} ago
    • +
    • Last status: {{.LastStatus}}
    • + {{else}} + {{if .LastAgo}} +
    • Previous run: {{.LastAgo}} ago{{if .LastError}}: {{.LastError}}{{else}} (success){{end}}
    • + {{end}} + {{end}} +
    + +{{if .Acct.IsAccountReady}} +
    + {{if .Running}} + + + {{else}} + + + {{end}} +
    +{{end}} + +
    + + +
    + +
    + + +
    + +
    + + +
    + +{{end}} + +`)) diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/importer.go b/vendor/github.com/camlistore/camlistore/pkg/importer/importer.go new file mode 100644 index 00000000..ad9875e5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/importer.go @@ -0,0 +1,1335 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package importer imports content from third-party websites. +package importer + +import ( + "errors" + "fmt" + "html/template" + "log" + "net/http" + "net/url" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/context" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/jsonsign/signhandler" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/search" + "camlistore.org/pkg/server" + "camlistore.org/pkg/syncutil" + "camlistore.org/pkg/types/camtypes" +) + +const ( + attrNodeType = "camliNodeType" + nodeTypeImporter = "importer" + nodeTypeImporterAccount = "importerAccount" + + attrImporterType = "importerType" // => "twitter", "foursquare", etc + attrClientID = "authClientID" + attrClientSecret = "authClientSecret" + attrImportRoot = "importRoot" + attrImportAuto = "importAuto" // => time.Duration value ("30m") or "" for off +) + +// An Importer imports from a third-party site. +type Importer interface { + // Run runs a full or incremental import. + // + // The importer should continually or periodically monitor the + // context's Done channel to exit early if requested. The + // return value should be context.ErrCanceled if the importer + // exits for that reason. + Run(*RunContext) error + + // NeedsAPIKey reports whether this importer requires an API key + // (OAuth2 client_id & client_secret, or equivalent). + // If the API only requires a username & password, or a flow to get + // an auth token per-account without an overall API key, importers + // can return false here. + NeedsAPIKey() bool + + // SupportsIncremental reports whether this importer has been optimized + // to run efficiently in regular incremental runs. (e.g. every 5 minutes + // or half hour). Eventually all importers might support this and we'll + // make it required, in which case we might delete this option. + // For now, some importers (e.g. Flickr) don't yet support this. + SupportsIncremental() bool + + // IsAccountReady reports whether the provided account node + // is configured. + IsAccountReady(acctNode *Object) (ok bool, err error) + SummarizeAccount(acctNode *Object) string + + ServeSetup(w http.ResponseWriter, r *http.Request, ctx *SetupContext) error + ServeCallback(w http.ResponseWriter, r *http.Request, ctx *SetupContext) + + // CallbackRequestAccount extracts the blobref of the importer account from + // the callback URL parameters of r. For example, it will be encoded as: + // For Twitter (OAuth1), in its own URL parameter: "acct=sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4" + // For Picasa: (OAuth2), in the OAuth2 "state" parameter: "state=acct:sha1-97911b1a5887eb5862d1c81666ba839fc1363ea1" + CallbackRequestAccount(r *http.Request) (acctRef blob.Ref, err error) + + // CallbackURLParameters uses the input importer account blobRef to build + // and return the URL parameters, that will be appended to the callback URL. + CallbackURLParameters(acctRef blob.Ref) url.Values +} + +// TestDataMaker is an optional interface that may be implemented by Importers to +// generate test data locally. The returned Roundtripper will be used as the +// transport of the HTTPClient, in the RunContext that will be passed to Run +// during tests and devcam server --makethings. +// (See http://camlistore.org/issue/417). +type TestDataMaker interface { + MakeTestData() http.RoundTripper + // SetTestAccount allows an importer to set some needed attributes on the importer + // account node before a run is started. + SetTestAccount(acctNode *Object) error +} + +// ImporterSetupHTMLer is an optional interface that may be implemented by +// Importers to return some HTML to be included on the importer setup page. +type ImporterSetupHTMLer interface { + AccountSetupHTML(*Host) string +} + +var importers = make(map[string]Importer) + +// All returns the map of importer implementation name to implementation. This +// map should not be mutated. +func All() map[string]Importer { + return importers +} + +// Register registers a site-specific importer. It should only be called from init, +// and not from concurrent goroutines. +func Register(name string, im Importer) { + if _, dup := importers[name]; dup { + panic("Dup registration of importer " + name) + } + importers[name] = im +} + +func init() { + // Register the meta "importer" handler, which handles all other handlers. + blobserver.RegisterHandlerConstructor("importer", newFromConfig) +} + +// HostConfig holds the parameters to set up a Host. +type HostConfig struct { + BaseURL string + Prefix string // URL prefix for the importer handler + Target blobserver.StatReceiver // storage for the imported object blobs + BlobSource blob.Fetcher // for additional resources, such as twitter zip file + Signer *schema.Signer + Search search.QueryDescriber + ClientId map[string]string // optionally maps importer impl name to a clientId credential + ClientSecret map[string]string // optionally maps importer impl name to a clientSecret credential + + // HTTPClient optionally specifies how to fetch external network + // resources. The Host will use http.DefaultClient otherwise. + HTTPClient *http.Client + // TODO: add more if/when needed +} + +func NewHost(hc HostConfig) (*Host, error) { + h := &Host{ + baseURL: hc.BaseURL, + importerBase: hc.BaseURL + hc.Prefix, + imp: make(map[string]*importer), + } + var err error + h.tmpl, err = tmpl.Clone() + if err != nil { + return nil, err + } + h.tmpl = h.tmpl.Funcs(map[string]interface{}{ + "bloblink": func(br blob.Ref) template.HTML { + if h.uiPrefix == "" { + return template.HTML(br.String()) + } + return template.HTML(fmt.Sprintf("%s", h.uiPrefix, br, br)) + }, + }) + for k, impl := range importers { + h.importers = append(h.importers, k) + clientId, clientSecret := hc.ClientId[k], hc.ClientSecret[k] + if clientSecret != "" && clientId == "" { + return nil, fmt.Errorf("Invalid static configuration for importer %q: clientSecret specified without clientId", k) + } + imp := &importer{ + host: h, + name: k, + impl: impl, + clientID: clientId, + clientSecret: clientSecret, + } + h.imp[k] = imp + } + + sort.Strings(h.importers) + + h.target = hc.Target + h.blobSource = hc.BlobSource + h.signer = hc.Signer + h.search = hc.Search + h.client = hc.HTTPClient + + return h, nil +} + +func newFromConfig(ld blobserver.Loader, cfg jsonconfig.Obj) (http.Handler, error) { + hc := HostConfig{ + BaseURL: ld.BaseURL(), + Prefix: ld.MyPrefix(), + } + ClientId := make(map[string]string) + ClientSecret := make(map[string]string) + for k, _ := range importers { + var clientId, clientSecret string + if impConf := cfg.OptionalObject(k); impConf != nil { + clientId = impConf.OptionalString("clientID", "") + clientSecret = impConf.OptionalString("clientSecret", "") + // Special case: allow clientSecret to be of form "clientId:clientSecret" + // if the clientId is empty. + if clientId == "" && strings.Contains(clientSecret, ":") { + if f := strings.SplitN(clientSecret, ":", 2); len(f) == 2 { + clientId, clientSecret = f[0], f[1] + } + } + if err := impConf.Validate(); err != nil { + return nil, fmt.Errorf("Invalid static configuration for importer %q: %v", k, err) + } + ClientId[k] = clientId + ClientSecret[k] = clientSecret + } + } + if err := cfg.Validate(); err != nil { + return nil, err + } + hc.ClientId = ClientId + hc.ClientSecret = ClientSecret + host, err := NewHost(hc) + if err != nil { + return nil, err + } + host.didInit.Add(1) + return host, nil +} + +var _ blobserver.HandlerIniter = (*Host)(nil) + +type SetupContext struct { + *context.Context + Host *Host + AccountNode *Object + + ia *importerAcct +} + +func (sc *SetupContext) Credentials() (clientID, clientSecret string, err error) { + return sc.ia.im.credentials() +} + +func (sc *SetupContext) CallbackURL() string { + params := sc.ia.im.impl.CallbackURLParameters(sc.AccountNode.PermanodeRef()).Encode() + if params != "" { + params = "?" + params + } + return sc.Host.ImporterBaseURL() + sc.ia.im.name + "/callback" + params +} + +// AccountURL returns the URL to an account of an importer +// (http://host/importer/TYPE/sha1-sd8fsd7f8sdf7). +func (sc *SetupContext) AccountURL() string { + return sc.Host.ImporterBaseURL() + sc.ia.im.name + "/" + sc.AccountNode.PermanodeRef().String() +} + +// RunContext is the context provided for a given Run of an importer, importing +// a certain account on a certain importer. +type RunContext struct { + *context.Context + Host *Host + + ia *importerAcct + + mu sync.Mutex // guards following + lastProgress *ProgressMessage +} + +// CreateAccount creates a new importer account for the Host h, and the importer +// implementation named impl. It returns a RunContext setup with that account. +func CreateAccount(h *Host, impl string) (*RunContext, error) { + imp, ok := h.imp[impl] + if !ok { + return nil, fmt.Errorf("host does not have a %v importer", impl) + } + ia, err := imp.newAccount() + if err != nil { + return nil, fmt.Errorf("could not create new account for importer %v: %v", impl, err) + } + return &RunContext{ + // TODO: context plumbing + Context: context.New(context.WithHTTPClient(ia.im.host.HTTPClient())), + Host: ia.im.host, + ia: ia, + }, nil +} + +// Credentials returns the credentials for the importer. This is +// typically the OAuth1, OAuth2, or equivalent client ID (api token) +// and client secret (api secret). +func (rc *RunContext) Credentials() (clientID, clientSecret string, err error) { + return rc.ia.im.credentials() +} + +// AccountNode returns the permanode storing account information for this permanode. +// It will contain the attributes: +// * camliNodeType = "importerAccount" +// * importerType = "registered-type" +// +// You must not change the camliNodeType or importerType. +// +// You should use this permanode to store state about where your +// importer left off, if it can efficiently resume later (without +// missing anything). +func (rc *RunContext) AccountNode() *Object { return rc.ia.acct } + +// RootNode returns the initially-empty permanode storing the root +// of this account's data. You can change anything at will. This will +// typically be modeled as a dynamic directory (with camliPath:xxxx +// attributes), where each path element is either a file, object, or +// another dynamic directory. +func (rc *RunContext) RootNode() *Object { return rc.ia.root } + +// Host is the HTTP handler and state for managing all the importers +// linked into the binary, even if they're not configured. +type Host struct { + tmpl *template.Template + importers []string // sorted; e.g. dummy flickr foursquare picasa twitter + imp map[string]*importer + baseURL string + importerBase string + target blobserver.StatReceiver + blobSource blob.Fetcher // e.g. twitter reading zip file + search search.QueryDescriber + signer *schema.Signer + uiPrefix string // or empty if no UI handler + + // didInit is incremented by newFromConfig and marked done + // after InitHandler. Any method on Host that requires Init + // then calls didInit.Wait to guard against initialization + // races where serverinit calls InitHandler in a random + // order on start-up and different handlers access the + // not-yet-initialized Host (notably from a goroutine) + didInit sync.WaitGroup + + // HTTPClient optionally specifies how to fetch external network + // resources. Defaults to http.DefaultClient. + client *http.Client + transport http.RoundTripper +} + +// accountStatus is the JSON representation of the status of a configured importer account. +type accountStatus struct { + Name string `json:"name"` // display name + Type string `json:"type"` + Href string `json:"href"` + + StartedUnixSec int64 `json:"startedUnixSec"` // zero if not running + LastFinishedUnixSec int64 `json:"finishedUnixSec"` // zero if no previous run + LastError string `json:"lastRunError"` // empty if last run was success +} + +// AccountsStatus returns the currently configured accounts and their status for +// inclusion in the status.json document, as rendered by the web UI. +func (h *Host) AccountsStatus() (interface{}, []camtypes.StatusError) { + h.didInit.Wait() + var s []accountStatus + var errs []camtypes.StatusError + for _, impName := range h.importers { + imp := h.imp[impName] + accts, _ := imp.Accounts() + for _, ia := range accts { + as := accountStatus{ + Type: impName, + Href: ia.AccountURL(), + Name: ia.AccountLinkSummary(), + } + ia.mu.Lock() + if ia.current != nil { + as.StartedUnixSec = ia.lastRunStart.Unix() + } + if !ia.lastRunDone.IsZero() { + as.LastFinishedUnixSec = ia.lastRunDone.Unix() + } + if ia.lastRunErr != nil { + as.LastError = ia.lastRunErr.Error() + errs = append(errs, camtypes.StatusError{ + Error: ia.lastRunErr.Error(), + URL: ia.AccountURL(), + }) + } + ia.mu.Unlock() + s = append(s, as) + } + } + return s, errs +} + +func (h *Host) InitHandler(hl blobserver.FindHandlerByTyper) error { + if prefix, _, err := hl.FindHandlerByType("ui"); err == nil { + h.uiPrefix = prefix + } + + _, handler, err := hl.FindHandlerByType("root") + if err != nil || handler == nil { + return errors.New("importer requires a 'root' handler") + } + rh := handler.(*server.RootHandler) + searchHandler, ok := rh.SearchHandler() + if !ok { + return errors.New("importer requires a 'root' handler with 'searchRoot' defined.") + } + h.search = searchHandler + if rh.Storage == nil { + return errors.New("importer requires a 'root' handler with 'blobRoot' defined.") + } + h.target = rh.Storage + h.blobSource = rh.Storage + + _, handler, _ = hl.FindHandlerByType("jsonsign") + if sigh, ok := handler.(*signhandler.Handler); ok { + h.signer = sigh.Signer() + } + if h.signer == nil { + return errors.New("importer requires a 'jsonsign' handler") + } + h.didInit.Done() + go h.startPeriodicImporters() + return nil +} + +// ServeHTTP serves: +// http://host/importer/ +// http://host/importer/twitter/ +// http://host/importer/twitter/callback +// http://host/importer/twitter/sha1-abcabcabcabcabc (single account) +func (h *Host) ServeHTTP(w http.ResponseWriter, r *http.Request) { + suffix := httputil.PathSuffix(r) + seg := strings.Split(suffix, "/") + if suffix == "" || len(seg) == 0 { + h.serveImportersRoot(w, r) + return + } + impName := seg[0] + + imp, ok := h.imp[impName] + if !ok { + http.NotFound(w, r) + return + } + + if len(seg) == 1 || seg[1] == "" { + h.serveImporter(w, r, imp) + return + } + if seg[1] == "callback" { + h.serveImporterAcctCallback(w, r, imp) + return + } + acctRef, ok := blob.Parse(seg[1]) + if !ok { + http.NotFound(w, r) + return + } + h.serveImporterAccount(w, r, imp, acctRef) +} + +// Serves list of importers at http://host/importer/ +func (h *Host) serveImportersRoot(w http.ResponseWriter, r *http.Request) { + body := importersRootBody{ + Host: h, + Importers: make([]*importer, 0, len(h.imp)), + } + for _, v := range h.importers { + body.Importers = append(body.Importers, h.imp[v]) + } + h.execTemplate(w, r, importersRootPage{ + Title: "Importers", + Body: body, + }) +} + +// Serves list of accounts at http://host/importer/twitter +func (h *Host) serveImporter(w http.ResponseWriter, r *http.Request, imp *importer) { + if r.Method == "POST" { + h.serveImporterPost(w, r, imp) + return + } + + var setup string + node, _ := imp.Node() + if setuper, ok := imp.impl.(ImporterSetupHTMLer); ok && node != nil { + setup = setuper.AccountSetupHTML(h) + } + + h.execTemplate(w, r, importerPage{ + Title: "Importer - " + imp.Name(), + Body: importerBody{ + Host: h, + Importer: imp, + SetupHelp: template.HTML(setup), + }, + }) +} + +// Serves oauth callback at http://host/importer/TYPE/callback +func (h *Host) serveImporterAcctCallback(w http.ResponseWriter, r *http.Request, imp *importer) { + if r.Method != "GET" { + http.Error(w, "invalid method", 400) + return + } + acctRef, err := imp.impl.CallbackRequestAccount(r) + if err != nil { + httputil.ServeError(w, r, err) + return + } + if !acctRef.Valid() { + httputil.ServeError(w, r, errors.New("No valid blobref returned from CallbackRequestAccount(r)")) + return + } + ia, err := imp.account(acctRef) + if err != nil { + http.Error(w, "invalid 'acct' param: "+err.Error(), 400) + return + } + imp.impl.ServeCallback(w, r, &SetupContext{ + Context: context.TODO(), + Host: h, + AccountNode: ia.acct, + ia: ia, + }) +} + +func (h *Host) serveImporterPost(w http.ResponseWriter, r *http.Request, imp *importer) { + switch r.FormValue("mode") { + default: + http.Error(w, "Unknown mode.", 400) + case "newacct": + ia, err := imp.newAccount() + if err != nil { + http.Error(w, err.Error(), 500) + return + } + ia.setup(w, r) + return + case "saveclientidsecret": + n, err := imp.Node() + if err != nil { + http.Error(w, "Error getting node: "+err.Error(), 500) + return + } + if err := n.SetAttrs( + attrClientID, r.FormValue("clientID"), + attrClientSecret, r.FormValue("clientSecret"), + ); err != nil { + http.Error(w, "Error saving node: "+err.Error(), 500) + return + } + http.Redirect(w, r, h.ImporterBaseURL()+imp.name, http.StatusFound) + } +} + +// Serves details of accounts at http://host/importer/twitter/sha1-23098429382934 +func (h *Host) serveImporterAccount(w http.ResponseWriter, r *http.Request, imp *importer, acctRef blob.Ref) { + ia, err := imp.account(acctRef) + if err != nil { + http.Error(w, "Unknown or invalid importer account "+acctRef.String()+": "+err.Error(), 400) + return + } + ia.ServeHTTP(w, r) +} + +func (h *Host) startPeriodicImporters() { + res, err := h.search.Query(&search.SearchQuery{ + Expression: "attr:camliNodeType:importerAccount", + Describe: &search.DescribeRequest{ + Depth: 1, + }, + }) + if err != nil { + log.Printf("periodic importer search fail: %v", err) + return + } + if res.Describe == nil { + log.Printf("No describe response in search result") + return + } + for _, resBlob := range res.Blobs { + blob := resBlob.Blob + desBlob, ok := res.Describe.Meta[blob.String()] + if !ok || desBlob.Permanode == nil { + continue + } + attrs := desBlob.Permanode.Attr + if attrs.Get(attrNodeType) != nodeTypeImporterAccount { + panic("Search result returned non-importerAccount") + } + impType := attrs.Get("importerType") + imp, ok := h.imp[impType] + if !ok { + continue + } + ia, err := imp.account(blob) + if err != nil { + log.Printf("Can't load importer account %v for regular importing: %v", blob, err) + continue + } + go ia.maybeStart() + } +} + +var disableImporters, _ = strconv.ParseBool(os.Getenv("CAMLI_DISABLE_IMPORTERS")) + +func (ia *importerAcct) maybeStart() { + if disableImporters { + log.Printf("Importers disabled, per environment.") + return + } + acctObj, err := ia.im.host.ObjectFromRef(ia.acct.PermanodeRef()) + if err != nil { + log.Printf("Error maybe starting %v: %v", ia.acct.PermanodeRef(), err) + return + } + duration, err := time.ParseDuration(acctObj.Attr(attrImportAuto)) + if duration == 0 || err != nil { + return + } + ia.mu.Lock() + defer ia.mu.Unlock() + if ia.current != nil { + return + } + if ia.lastRunDone.After(time.Now().Add(-duration)) { + sleepFor := ia.lastRunDone.Add(duration).Sub(time.Now()) + log.Printf("%v ran recently enough. Sleeping for %v.", ia, sleepFor) + time.AfterFunc(sleepFor, ia.maybeStart) + return + } + + log.Printf("Starting regular periodic import for %v", ia) + go ia.start() +} + +// BaseURL returns the root of the whole server, without trailing +// slash. +func (h *Host) BaseURL() string { + return h.baseURL +} + +// ImporterBaseURL returns the URL base of the importer handler, +// including trailing slash. +func (h *Host) ImporterBaseURL() string { + return h.importerBase +} + +func (h *Host) Target() blobserver.StatReceiver { + return h.target +} + +func (h *Host) BlobSource() blob.Fetcher { + return h.blobSource +} + +func (h *Host) Searcher() search.QueryDescriber { return h.search } + +// importer is an importer for a certain site, but not a specific account on that site. +type importer struct { + host *Host + name string // importer name e.g. "twitter" + impl Importer + + // If statically configured in config file, else + // they come from the importer node's attributes. + clientID string + clientSecret string + + nodemu sync.Mutex // guards nodeCache + nodeCache *Object // or nil if unset + + acctmu sync.Mutex + acct map[blob.Ref]*importerAcct // key: account permanode +} + +func (im *importer) Name() string { return im.name } + +func (im *importer) StaticConfig() bool { return im.clientSecret != "" } + +// URL returns the importer's URL without trailing slash. +func (im *importer) URL() string { return im.host.ImporterBaseURL() + im.name } + +func (im *importer) ShowClientAuthEditForm() bool { + if im.StaticConfig() { + // Don't expose the server's statically-configured client secret + // to the user. (e.g. a hosted multi-user configuation) + return false + } + return im.impl.NeedsAPIKey() +} + +func (im *importer) CanAddNewAccount() bool { + if !im.impl.NeedsAPIKey() { + return true + } + id, sec, err := im.credentials() + return id != "" && sec != "" && err == nil +} + +func (im *importer) ClientID() (v string, err error) { + v, _, err = im.credentials() + return +} + +func (im *importer) ClientSecret() (v string, err error) { + _, v, err = im.credentials() + return +} + +func (im *importer) Status() (status string, err error) { + if !im.impl.NeedsAPIKey() { + return "no configuration required", nil + } + if im.StaticConfig() { + return "API key configured on server", nil + } + n, err := im.Node() + if err != nil { + return + } + if n.Attr(attrClientID) != "" && n.Attr(attrClientSecret) != "" { + return "API key configured on node", nil + } + return "API key (client ID & Secret) not configured", nil +} + +func (im *importer) credentials() (clientID, clientSecret string, err error) { + if im.StaticConfig() { + return im.clientID, im.clientSecret, nil + } + n, err := im.Node() + if err != nil { + return + } + return n.Attr(attrClientID), n.Attr(attrClientSecret), nil +} + +func (im *importer) deleteAccount(acctRef blob.Ref) { + im.acctmu.Lock() + delete(im.acct, acctRef) + im.acctmu.Unlock() +} + +func (im *importer) account(nodeRef blob.Ref) (*importerAcct, error) { + im.acctmu.Lock() + ia, ok := im.acct[nodeRef] + im.acctmu.Unlock() + if ok { + return ia, nil + } + + acct, err := im.host.ObjectFromRef(nodeRef) + if err != nil { + return nil, err + } + if acct.Attr(attrNodeType) != nodeTypeImporterAccount { + return nil, errors.New("account has wrong node type") + } + if acct.Attr(attrImporterType) != im.name { + return nil, errors.New("account has wrong importer type") + } + var root *Object + if v := acct.Attr(attrImportRoot); v != "" { + rootRef, ok := blob.Parse(v) + if !ok { + return nil, errors.New("invalid import root attribute") + } + root, err = im.host.ObjectFromRef(rootRef) + if err != nil { + return nil, err + } + } else { + root, err = im.host.NewObject() + if err != nil { + return nil, err + } + if err := acct.SetAttr(attrImportRoot, root.PermanodeRef().String()); err != nil { + return nil, err + } + } + ia = &importerAcct{ + im: im, + acct: acct, + root: root, + } + im.acctmu.Lock() + defer im.acctmu.Unlock() + im.addAccountLocked(ia) + return ia, nil +} + +func (im *importer) newAccount() (*importerAcct, error) { + acct, err := im.host.NewObject() + if err != nil { + return nil, err + } + root, err := im.host.NewObject() + if err != nil { + return nil, err + } + if err := acct.SetAttrs( + "title", fmt.Sprintf("%s account", im.name), + attrNodeType, nodeTypeImporterAccount, + attrImporterType, im.name, + attrImportRoot, root.PermanodeRef().String(), + ); err != nil { + return nil, err + } + + ia := &importerAcct{ + im: im, + acct: acct, + root: root, + } + im.acctmu.Lock() + defer im.acctmu.Unlock() + im.addAccountLocked(ia) + return ia, nil +} + +func (im *importer) addAccountLocked(ia *importerAcct) { + if im.acct == nil { + im.acct = make(map[blob.Ref]*importerAcct) + } + im.acct[ia.acct.PermanodeRef()] = ia +} + +func (im *importer) Accounts() ([]*importerAcct, error) { + var accts []*importerAcct + + // TODO: cache this search. invalidate when new accounts are made. + res, err := im.host.search.Query(&search.SearchQuery{ + Expression: fmt.Sprintf("attr:%s:%s attr:%s:%s", + attrNodeType, nodeTypeImporterAccount, + attrImporterType, im.name, + ), + }) + if err != nil { + return nil, err + } + for _, res := range res.Blobs { + ia, err := im.account(res.Blob) + if err != nil { + return nil, err + } + accts = append(accts, ia) + } + return accts, nil +} + +// node returns the importer node. (not specific to a certain account +// on that importer site) +// +// It is a permanode with: +// camliNodeType: "importer" +// importerType: "twitter" +// And optionally: +// authClientID: "xxx" // e.g. api token +// authClientSecret: "sdkojfsldfjlsdkf" +func (im *importer) Node() (*Object, error) { + im.nodemu.Lock() + defer im.nodemu.Unlock() + if im.nodeCache != nil { + return im.nodeCache, nil + } + + expr := fmt.Sprintf("attr:%s:%s attr:%s:%s", + attrNodeType, nodeTypeImporter, + attrImporterType, im.name, + ) + res, err := im.host.search.Query(&search.SearchQuery{ + Limit: 10, // only expect 1 + Expression: expr, + }) + if err != nil { + return nil, err + } + if len(res.Blobs) > 1 { + return nil, fmt.Errorf("Ambiguous; too many permanodes matched query %q: %v", expr, res.Blobs) + } + if len(res.Blobs) == 1 { + return im.host.ObjectFromRef(res.Blobs[0].Blob) + } + o, err := im.host.NewObject() + if err != nil { + return nil, err + } + if err := o.SetAttrs( + "title", fmt.Sprintf("%s importer", im.name), + attrNodeType, nodeTypeImporter, + attrImporterType, im.name, + ); err != nil { + return nil, err + } + + im.nodeCache = o + return o, nil +} + +// importerAcct is a long-lived type representing account +type importerAcct struct { + im *importer + acct *Object + root *Object + + mu sync.Mutex + current *RunContext // or nil if not running + stopped bool // stop requested (context canceled) + lastRunErr error + lastRunStart time.Time + lastRunDone time.Time +} + +func (ia *importerAcct) String() string { + return fmt.Sprintf("%v importer account, %v", ia.im.name, ia.acct.PermanodeRef()) +} + +func (ia *importerAcct) delete() error { + if err := ia.acct.SetAttrs( + attrNodeType, nodeTypeImporterAccount+"-deleted", + ); err != nil { + return err + } + ia.im.deleteAccount(ia.acct.PermanodeRef()) + return nil +} + +func (ia *importerAcct) toggleAuto() error { + old := ia.acct.Attr(attrImportAuto) + if old == "" && !ia.im.impl.SupportsIncremental() { + return fmt.Errorf("Importer %q doesn't support automatic mode.", ia.im.name) + } + var new string + if old == "" { + new = "30m" // TODO: configurable? + } + return ia.acct.SetAttrs(attrImportAuto, new) +} + +func (ia *importerAcct) IsAccountReady() (bool, error) { + return ia.im.impl.IsAccountReady(ia.acct) +} + +func (ia *importerAcct) AccountObject() *Object { return ia.acct } +func (ia *importerAcct) RootObject() *Object { return ia.root } + +func (ia *importerAcct) AccountURL() string { + return ia.im.URL() + "/" + ia.acct.PermanodeRef().String() +} + +func (ia *importerAcct) AccountLinkText() string { + return ia.acct.PermanodeRef().String() +} + +func (ia *importerAcct) AccountLinkSummary() string { + return ia.im.impl.SummarizeAccount(ia.acct) +} + +func (ia *importerAcct) RefreshInterval() time.Duration { + ds := ia.acct.Attr(attrImportAuto) + if ds == "" { + return 0 + } + d, _ := time.ParseDuration(ds) + return d +} + +func (ia *importerAcct) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method == "POST" { + ia.serveHTTPPost(w, r) + return + } + ia.mu.Lock() + defer ia.mu.Unlock() + body := acctBody{ + Acct: ia, + AcctType: fmt.Sprintf("%T", ia.im.impl), + } + if run := ia.current; run != nil { + body.Running = true + body.StartedAgo = time.Since(ia.lastRunStart) + run.mu.Lock() + body.LastStatus = fmt.Sprintf("%+v", run.lastProgress) + run.mu.Unlock() + } else if !ia.lastRunDone.IsZero() { + body.LastAgo = time.Since(ia.lastRunDone) + if ia.lastRunErr != nil { + body.LastError = ia.lastRunErr.Error() + } + } + title := fmt.Sprintf("%s account: ", ia.im.name) + if summary := ia.im.impl.SummarizeAccount(ia.acct); summary != "" { + title += summary + } else { + title += ia.acct.PermanodeRef().String() + } + ia.im.host.execTemplate(w, r, acctPage{ + Title: title, + Body: body, + }) +} + +func (ia *importerAcct) serveHTTPPost(w http.ResponseWriter, r *http.Request) { + // TODO: XSRF token + + switch r.FormValue("mode") { + case "": + // Nothing. + case "start": + ia.start() + case "stop": + ia.stop() + case "login": + ia.setup(w, r) + return + case "toggleauto": + if err := ia.toggleAuto(); err != nil { + http.Error(w, err.Error(), 500) + return + } + case "delete": + ia.stop() // can't hurt + if err := ia.delete(); err != nil { + http.Error(w, err.Error(), 500) + return + } + http.Redirect(w, r, ia.im.URL(), http.StatusFound) + return + default: + http.Error(w, "Unknown mode", 400) + return + } + http.Redirect(w, r, ia.AccountURL(), http.StatusFound) +} + +func (ia *importerAcct) setup(w http.ResponseWriter, r *http.Request) { + if err := ia.im.impl.ServeSetup(w, r, &SetupContext{ + Context: context.TODO(), + Host: ia.im.host, + AccountNode: ia.acct, + ia: ia, + }); err != nil { + log.Printf("%v", err) + } +} + +func (ia *importerAcct) start() { + ia.mu.Lock() + defer ia.mu.Unlock() + if ia.current != nil { + return + } + rc := &RunContext{ + // TODO: context plumbing + Context: context.New(context.WithHTTPClient(ia.im.host.HTTPClient())), + Host: ia.im.host, + ia: ia, + } + ia.current = rc + ia.stopped = false + ia.lastRunStart = time.Now() + go func() { + log.Printf("Starting %v: %s", ia, ia.AccountLinkSummary()) + err := ia.im.impl.Run(rc) + if err != nil { + log.Printf("%v error: %v", ia, err) + } else { + log.Printf("%v finished.", ia) + } + ia.mu.Lock() + defer ia.mu.Unlock() + ia.current = nil + ia.stopped = false + ia.lastRunDone = time.Now() + ia.lastRunErr = err + go ia.maybeStart() + }() +} + +func (ia *importerAcct) stop() { + ia.mu.Lock() + defer ia.mu.Unlock() + if ia.current == nil || ia.stopped { + return + } + ia.current.Context.Cancel() + ia.stopped = true +} + +// HTTPClient returns the HTTP client to use. +func (h *Host) HTTPClient() *http.Client { + if h.client == nil { + return http.DefaultClient + } + return h.client +} + +// HTTPTransport returns the HTTP transport to use. +func (h *Host) HTTPTransport() http.RoundTripper { + if h.transport == nil { + return http.DefaultTransport + } + return h.transport +} + +type ProgressMessage struct { + ItemsDone, ItemsTotal int + BytesDone, BytesTotal int64 +} + +func (h *Host) upload(bb *schema.Builder) (br blob.Ref, err error) { + signed, err := bb.Sign(h.signer) + if err != nil { + return + } + sb, err := blobserver.ReceiveString(h.target, signed) + if err != nil { + return + } + return sb.Ref, nil +} + +// NewObject creates a new permanode and returns its Object wrapper. +func (h *Host) NewObject() (*Object, error) { + pn, err := h.upload(schema.NewUnsignedPermanode()) + if err != nil { + return nil, err + } + // No need to do a describe query against it: we know it's + // empty (has no claims against it yet). + return &Object{h: h, pn: pn}, nil +} + +// An Object is wrapper around a permanode that the importer uses +// to synchronize. +type Object struct { + h *Host + pn blob.Ref // permanode ref + + mu sync.RWMutex + attr map[string][]string +} + +// PermanodeRef returns the permanode that this object wraps. +func (o *Object) PermanodeRef() blob.Ref { + return o.pn +} + +// Attr returns the object's attribute value for the provided attr, +// or the empty string if unset. To distinguish between unset, +// an empty string, or multiple attribute values, use Attrs. +func (o *Object) Attr(attr string) string { + o.mu.RLock() + defer o.mu.RUnlock() + if v := o.attr[attr]; len(v) > 0 { + return v[0] + } + return "" +} + +// Attrs returns the attribute values for the provided attr. +func (o *Object) Attrs(attr string) []string { + o.mu.RLock() + defer o.mu.RUnlock() + return o.attr[attr] +} + +// ForeachAttr runs fn for each of the object's attributes & values. +// There might be multiple values for the same attribute. +// The internal lock is held while running, so no mutations should be +// made or it will deadlock. +func (o *Object) ForeachAttr(fn func(key, value string)) { + o.mu.RLock() + defer o.mu.RUnlock() + for k, vv := range o.attr { + for _, v := range vv { + fn(k, v) + } + } +} + +// SetAttr sets the attribute key to value. +func (o *Object) SetAttr(key, value string) error { + if o.Attr(key) == value { + return nil + } + _, err := o.h.upload(schema.NewSetAttributeClaim(o.pn, key, value)) + if err != nil { + return err + } + o.mu.Lock() + defer o.mu.Unlock() + if o.attr == nil { + o.attr = make(map[string][]string) + } + o.attr[key] = []string{value} + return nil +} + +// SetAttrs sets multiple attributes. The provided keyval should be an +// even number of alternating key/value pairs to set. +func (o *Object) SetAttrs(keyval ...string) error { + _, err := o.SetAttrs2(keyval...) + return err +} + +// SetAttrs2 sets multiple attributes and returns whether there were +// any changes. The provided keyval should be an even number of +// alternating key/value pairs to set. +func (o *Object) SetAttrs2(keyval ...string) (changes bool, err error) { + if len(keyval)%2 == 1 { + panic("importer.SetAttrs: odd argument count") + } + + g := syncutil.Group{} + for i := 0; i < len(keyval); i += 2 { + key, val := keyval[i], keyval[i+1] + if val != o.Attr(key) { + changes = true + g.Go(func() error { + return o.SetAttr(key, val) + }) + } + } + return changes, g.Err() +} + +// SetAttrValues sets multi-valued attribute. +func (o *Object) SetAttrValues(key string, attrs []string) error { + exists := asSet(o.Attrs(key)) + actual := asSet(attrs) + o.mu.Lock() + defer o.mu.Unlock() + // add new values + for v := range actual { + if exists[v] { + delete(exists, v) + continue + } + _, err := o.h.upload(schema.NewAddAttributeClaim(o.pn, key, v)) + if err != nil { + return err + } + } + // delete unneeded values + for v := range exists { + _, err := o.h.upload(schema.NewDelAttributeClaim(o.pn, key, v)) + if err != nil { + return err + } + } + if o.attr == nil { + o.attr = make(map[string][]string) + } + o.attr[key] = attrs + return nil +} + +func asSet(elts []string) map[string]bool { + if len(elts) == 0 { + return nil + } + set := make(map[string]bool, len(elts)) + for _, elt := range elts { + set[elt] = true + } + return set +} + +// ChildPathObject returns (creating if necessary) the child object +// from the permanode o, given by the "camliPath:xxxx" attribute, +// where xxx is the provided path. +func (o *Object) ChildPathObject(path string) (*Object, error) { + return o.ChildPathObjectOrFunc(path, o.h.NewObject) +} + +// ChildPathObject returns the child object from the permanode o, +// given by the "camliPath:xxxx" attribute, where xxx is the provided +// path. If the path doesn't exist, the provided func should return an +// appropriate object. If the func fails, the return error is +// returned directly without any attempt to make a permanode. +func (o *Object) ChildPathObjectOrFunc(path string, fn func() (*Object, error)) (*Object, error) { + attrName := "camliPath:" + path + if v := o.Attr(attrName); v != "" { + br, ok := blob.Parse(v) + if !ok { + return nil, fmt.Errorf("invalid blobref %q already stored at camliPath %q", br, path) + } + return o.h.ObjectFromRef(br) + } + newObj, err := fn() + if err != nil { + return nil, err + } + if err := o.SetAttr(attrName, newObj.PermanodeRef().String()); err != nil { + return nil, err + } + return newObj, nil +} + +// ObjectFromRef returns the object given by the named permanode +func (h *Host) ObjectFromRef(permanodeRef blob.Ref) (*Object, error) { + res, err := h.search.Describe(&search.DescribeRequest{ + BlobRef: permanodeRef, + Depth: 1, + }) + if err != nil { + return nil, err + } + db, ok := res.Meta[permanodeRef.String()] + if !ok { + return nil, fmt.Errorf("permanode %v wasn't in Describe response", permanodeRef) + } + if db.Permanode == nil { + return nil, fmt.Errorf("permanode %v had no DescribedPermanode in Describe response", permanodeRef) + } + return &Object{ + h: h, + pn: permanodeRef, + attr: map[string][]string(db.Permanode.Attr), + }, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/importer_test.go b/vendor/github.com/camlistore/camlistore/pkg/importer/importer_test.go new file mode 100644 index 00000000..b80ce8f4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/importer_test.go @@ -0,0 +1,66 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package importer + +import ( + "testing" + + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/test" +) + +func init() { + Register("dummy1", TODOImporter) + Register("dummy2", TODOImporter) +} + +func TestStaticConfig(t *testing.T) { + ld := test.NewLoader() + h, err := newFromConfig(ld, jsonconfig.Obj{ + "dummy1": map[string]interface{}{ + "clientID": "id1", + "clientSecret": "secret1", + }, + "dummy2": map[string]interface{}{ + "clientSecret": "id2:secret2", + }, + }) + if err != nil { + t.Fatal(err) + } + host := h.(*Host) + if g, w := host.imp["dummy1"].clientID, "id1"; g != w { + t.Errorf("dummy1 id = %q; want %q", g, w) + } + if g, w := host.imp["dummy1"].clientSecret, "secret1"; g != w { + t.Errorf("dummy1 secret = %q; want %q", g, w) + } + if g, w := host.imp["dummy2"].clientID, "id2"; g != w { + t.Errorf("dummy2 id = %q; want %q", g, w) + } + if g, w := host.imp["dummy2"].clientSecret, "secret2"; g != w { + t.Errorf("dummy2 secret = %q; want %q", g, w) + } + + if _, err := newFromConfig(ld, jsonconfig.Obj{"dummy1": map[string]interface{}{"bogus": ""}}); err == nil { + t.Errorf("expected error from unknown key") + } + + if _, err := newFromConfig(ld, jsonconfig.Obj{"dummy1": map[string]interface{}{"clientSecret": "x"}}); err == nil { + t.Errorf("expected error from secret without id") + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/noop.go b/vendor/github.com/camlistore/camlistore/pkg/importer/noop.go new file mode 100644 index 00000000..bffc04cc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/noop.go @@ -0,0 +1,57 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package importer + +import ( + "errors" + "fmt" + "net/http" +) + +var TODOImporter Importer = todoImp{} + +type todoImp struct { + OAuth1 // for CallbackRequestAccount and CallbackURLParameters +} + +func (todoImp) NeedsAPIKey() bool { return false } + +func (todoImp) SupportsIncremental() bool { return false } + +func (todoImp) Run(*RunContext) error { + return errors.New("fake error from todo importer") +} + +func (todoImp) IsAccountReady(acctNode *Object) (ok bool, err error) { + return +} + +func (todoImp) SummarizeAccount(acctNode *Object) string { return "" } + +func (todoImp) ServeSetup(w http.ResponseWriter, r *http.Request, ctx *SetupContext) error { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprintf(w, "The Setup page for the TODO importer.\nnode = %v\ncallback = %s\naccount URL = %s\n", + ctx.AccountNode, + ctx.CallbackURL(), + "ctx.AccountURL()") + return nil +} + +func (todoImp) ServeCallback(w http.ResponseWriter, r *http.Request, ctx *SetupContext) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprintf(w, "The callback page for the TODO importer.\n") +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/oauth.go b/vendor/github.com/camlistore/camlistore/pkg/importer/oauth.go new file mode 100644 index 00000000..1353cd62 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/oauth.go @@ -0,0 +1,223 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package importer + +import ( + "errors" + "fmt" + "log" + "net/http" + "net/url" + "strings" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/context" + "camlistore.org/pkg/httputil" + "camlistore.org/third_party/github.com/garyburd/go-oauth/oauth" +) + +const ( + AcctAttrTempToken = "oauthTempToken" + AcctAttrTempSecret = "oauthTempSecret" + AcctAttrAccessToken = "oauthAccessToken" + AcctAttrAccessTokenSecret = "oauthAccessTokenSecret" +) + +// OAuth1 provides methods that the importer implementations can use to +// help with OAuth authentication. +type OAuth1 struct{} + +func (OAuth1) CallbackRequestAccount(r *http.Request) (blob.Ref, error) { + acctRef, ok := blob.Parse(r.FormValue("acct")) + if !ok { + return blob.Ref{}, errors.New("missing 'acct=' blobref param") + } + return acctRef, nil +} + +func (OAuth1) CallbackURLParameters(acctRef blob.Ref) url.Values { + v := url.Values{} + v.Add("acct", acctRef.String()) + return v +} + +// OAuth2 provides methods that the importer implementations can use to +// help with OAuth2 authentication. +type OAuth2 struct{} + +func (OAuth2) CallbackRequestAccount(r *http.Request) (blob.Ref, error) { + state := r.FormValue("state") + if state == "" { + return blob.Ref{}, errors.New("missing 'state' parameter") + } + if !strings.HasPrefix(state, "acct:") { + return blob.Ref{}, errors.New("wrong 'state' parameter value, missing 'acct:' prefix.") + } + acctRef, ok := blob.Parse(strings.TrimPrefix(state, "acct:")) + if !ok { + return blob.Ref{}, errors.New("invalid account blobref in 'state' parameter") + } + return acctRef, nil +} + +func (OAuth2) CallbackURLParameters(acctRef blob.Ref) url.Values { + v := url.Values{} + v.Set("state", "acct:"+acctRef.String()) + return v +} + +// RedirectURL returns the redirect URI that imp should set in an oauth.Config +// for the authorization phase of OAuth2 authentication. +func (OAuth2) RedirectURL(imp Importer, ctx *SetupContext) string { + // We strip our callback URL of its query component, because the Redirect URI + // we send during authorization has to match exactly the registered redirect + // URI(s). This query component should be stored in the "state" paremeter instead. + // See http://tools.ietf.org/html/rfc6749#section-3.1.2.2 + fullCallback := ctx.CallbackURL() + queryPart := imp.CallbackURLParameters(ctx.AccountNode.PermanodeRef()) + if len(queryPart) == 0 { + log.Printf("WARNING: callback URL %q has no query component", fullCallback) + } + u, _ := url.Parse(fullCallback) + v := u.Query() + // remove query params in CallbackURLParameters + for k := range queryPart { + v.Del(k) + } + u.RawQuery = v.Encode() + return u.String() +} + +// RedirectState returns the "state" query parameter that should be used for the authorization +// phase of OAuth2 authentication. This parameter contains the query component of the redirection +// URI. See http://tools.ietf.org/html/rfc6749#section-3.1.2.2 +func (OAuth2) RedirectState(imp Importer, ctx *SetupContext) (state string, err error) { + m := imp.CallbackURLParameters(ctx.AccountNode.PermanodeRef()) + state = m.Get("state") + if state == "" { + return "", errors.New("\"state\" not found in callback parameters") + } + return state, nil +} + +// IsAccountReady returns whether the account has been properly configured +// - whether the user ID and access token has been stored in the given account node. +func (OAuth2) IsAccountReady(acctNode *Object) (ok bool, err error) { + if acctNode.Attr(AcctAttrUserID) != "" && + acctNode.Attr(AcctAttrAccessToken) != "" { + return true, nil + } + return false, nil +} + +// NeedsAPIKey returns whether the importer needs an API key - returns constant true. +func (OAuth2) NeedsAPIKey() bool { return true } + +// SummarizeAccount returns a summary for the account if it is configured, +// or an error string otherwise. +func (im OAuth2) SummarizeAccount(acct *Object) string { + ok, err := im.IsAccountReady(acct) + if err != nil { + return "" + } + if !ok { + return "" + } + if acct.Attr(AcctAttrGivenName) == "" && + acct.Attr(AcctAttrFamilyName) == "" { + return fmt.Sprintf("userid %s", acct.Attr(AcctAttrUserID)) + } + return fmt.Sprintf("userid %s (%s %s)", + acct.Attr(AcctAttrUserID), + acct.Attr(AcctAttrGivenName), + acct.Attr(AcctAttrFamilyName)) +} + +// OAuthContext wraps the OAuth1 state needed to perform API calls. +// +// It is used as a value type. +type OAuthContext struct { + Ctx *context.Context + Client *oauth.Client + Creds *oauth.Credentials +} + +// Get fetches through octx the resource defined by url and the values in form. +func (octx OAuthContext) Get(url string, form url.Values) (*http.Response, error) { + if octx.Creds == nil { + return nil, errors.New("No OAuth credentials. Not logged in?") + } + if octx.Client == nil { + return nil, errors.New("No OAuth client.") + } + res, err := octx.Client.Get(octx.Ctx.HTTPClient(), octx.Creds, url, form) + if err != nil { + return nil, fmt.Errorf("Error fetching %s: %v", url, err) + } + if res.StatusCode != http.StatusOK { + return nil, fmt.Errorf("Get request on %s failed with: %s", url, res.Status) + } + return res, nil +} + +// PopulateJSONFromURL makes a GET call at apiURL, using keyval as parameters of +// the associated form. The JSON response is decoded into result. +func (ctx OAuthContext) PopulateJSONFromURL(result interface{}, apiURL string, keyval ...string) error { + if len(keyval)%2 == 1 { + return errors.New("Incorrect number of keyval arguments. must be even.") + } + + form := url.Values{} + for i := 0; i < len(keyval); i += 2 { + form.Set(keyval[i], keyval[i+1]) + } + + hres, err := ctx.Get(apiURL, form) + if err != nil { + return err + } + err = httputil.DecodeJSON(hres, result) + if err != nil { + return fmt.Errorf("could not parse response for %s: %v", apiURL, err) + } + return err +} + +// OAuthURIs holds the URIs needed to initialize an OAuth 1 client. +type OAuthURIs struct { + TemporaryCredentialRequestURI string + ResourceOwnerAuthorizationURI string + TokenRequestURI string +} + +// NewOAuthClient returns an oauth Client configured with uris and the +// credentials obtained from ctx. +func (ctx *SetupContext) NewOAuthClient(uris OAuthURIs) (*oauth.Client, error) { + clientId, secret, err := ctx.Credentials() + if err != nil { + return nil, err + } + return &oauth.Client{ + TemporaryCredentialRequestURI: uris.TemporaryCredentialRequestURI, + ResourceOwnerAuthorizationURI: uris.ResourceOwnerAuthorizationURI, + TokenRequestURI: uris.TokenRequestURI, + Credentials: oauth.Credentials{ + Token: clientId, + Secret: secret, + }, + }, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/README b/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/README new file mode 100644 index 00000000..7c6a67d8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/README @@ -0,0 +1,43 @@ + +Picasa Importer +=============== + +This is a working Camlistore importer for Picasa. So far it can import +all photos but not their metadata. + +To use: + +1) Retrieve an api credential from a project of you from + https://console.developers.google.com/ + Select/create a project, then under APIs & auth / Credentials, create a new + web application client id. + +2a) Start the devcam server with picasakey flag: + $ devcam server -verbose -picasakey='Client ID:Client secret' + +2b) Place the Client ID and the Client secret in your (low-level) server-config.json: + + "/importer-picasa/": { + "handler": "importer-picasa", + "handlerArgs": { + "apiKey": "Client ID:Client secret" + } + }, + + and start your camlistore server. + +3) Navigate to http:///importer-picasa/start and authorize the app + to manage your Photos. + +4) Watch import progress on the command line (start devcam with -verbose flag). + + +TODO +---- + + * The used OAuth2 scope is for managing (read & modify) photos, but this + needs only read rights. Is a stricter scope available? + * The album's author name is not used yet, and the album's short name is needed. + * Picasa Web dumps a lot of metadata on us. Which would be usable? + +See https://camlistore.org/issue/391 diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/oa2_importers.go b/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/oa2_importers.go new file mode 100644 index 00000000..169db93c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/oa2_importers.go @@ -0,0 +1,218 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package picasa + +import ( + "fmt" + "log" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/context" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/importer" + + "camlistore.org/third_party/code.google.com/p/goauth2/oauth" +) + +const ( + // acctAttrOAuthToken stores `access + " " + refresh + " " + expiry` + // See encodeToken and decodeToken. + acctAttrOAuthToken = "oauthToken" +) + +// extendedOAuth2 provides implementation for some common importer methods regarding authentication. +// +// The oauthConfig is used in the authentications - think Scope and AuthURL. +// +// The getUserInfo function (if provided) should return the +// user ID, first name and last name of the user. +type extendedOAuth2 struct { + importer.OAuth2 + oauthConfig oauth.Config + getUserInfo func(ctx *context.Context) (*userInfo, error) +} + +// newExtendedOAuth2 returns a default implementation of +// some common methods for OAuth2-based importers. +func newExtendedOAuth2(oauthConfig oauth.Config, + getUserInfo func(ctx *context.Context) (*userInfo, error), +) extendedOAuth2 { + return extendedOAuth2{oauthConfig: oauthConfig, getUserInfo: getUserInfo} +} + +func (extendedOAuth2) IsAccountReady(acctNode *importer.Object) (ok bool, err error) { + if acctNode.Attr(importer.AcctAttrUserID) != "" && acctNode.Attr(acctAttrOAuthToken) != "" { + return true, nil + } + return false, nil +} + +func (im extendedOAuth2) SummarizeAccount(acct *importer.Object) string { + ok, err := im.IsAccountReady(acct) + if err != nil || !ok { + return "" + } + if acct.Attr(importer.AcctAttrGivenName) == "" && acct.Attr(importer.AcctAttrFamilyName) == "" { + return fmt.Sprintf("userid %s", acct.Attr(importer.AcctAttrUserID)) + } + return fmt.Sprintf("userid %s (%s %s)", + acct.Attr(importer.AcctAttrUserID), + acct.Attr(importer.AcctAttrGivenName), + acct.Attr(importer.AcctAttrFamilyName)) +} + +func (im extendedOAuth2) ServeSetup(w http.ResponseWriter, r *http.Request, ctx *importer.SetupContext) error { + oauthConfig, err := im.auth(ctx) + if err == nil { + // we will get back this with the token, so use it for preserving account info + state := "acct:" + ctx.AccountNode.PermanodeRef().String() + http.Redirect(w, r, oauthConfig.AuthCodeURL(state), 302) + } + return err +} + +// CallbackURLParameters returns the needed callback parameters - empty for Google Picasa. +func (im extendedOAuth2) CallbackURLParameters(acctRef blob.Ref) url.Values { + return url.Values{} +} + +// notOAuthTransport returns c's Transport, or its underlying transport if c.Transport +// is an OAuth Transport. +func notOAuthTransport(c *http.Client) (tr http.RoundTripper) { + tr = c.Transport + if otr, ok := tr.(*oauth.Transport); ok { + tr = otr.Transport + } + return +} + +func (im extendedOAuth2) ServeCallback(w http.ResponseWriter, r *http.Request, ctx *importer.SetupContext) { + if im.getUserInfo == nil { + panic("No getUserInfo is provided, don't use the default ServeCallback!") + } + + oauthConfig, err := im.auth(ctx) + if err != nil { + httputil.ServeError(w, r, fmt.Errorf("Error getting oauth config: %v", err)) + return + } + + if r.Method != "GET" { + http.Error(w, "Expected a GET", 400) + return + } + code := r.FormValue("code") + if code == "" { + http.Error(w, "Expected a code", 400) + return + } + + // picago calls take an *http.Client, so we need to provide one which already + // has a transport set up correctly wrt to authentication. In particular, it + // needs to have the access token that is obtained during Exchange. + transport := &oauth.Transport{ + Config: oauthConfig, + Transport: notOAuthTransport(ctx.HTTPClient()), + } + token, err := transport.Exchange(code) + log.Printf("Token = %#v, error %v", token, err) + if err != nil { + log.Printf("Token Exchange error: %v", err) + httputil.ServeError(w, r, fmt.Errorf("token exchange error: %v", err)) + return + } + + picagoCtx := ctx.Context.New(context.WithHTTPClient(transport.Client())) + defer picagoCtx.Cancel() + + userInfo, err := im.getUserInfo(picagoCtx) + if err != nil { + log.Printf("Couldn't get username: %v", err) + httputil.ServeError(w, r, fmt.Errorf("can't get username: %v", err)) + return + } + + if err := ctx.AccountNode.SetAttrs( + importer.AcctAttrUserID, userInfo.ID, + importer.AcctAttrGivenName, userInfo.FirstName, + importer.AcctAttrFamilyName, userInfo.LastName, + acctAttrOAuthToken, encodeToken(token), + ); err != nil { + httputil.ServeError(w, r, fmt.Errorf("Error setting attribute: %v", err)) + return + } + http.Redirect(w, r, ctx.AccountURL(), http.StatusFound) +} + +// encodeToken encodes the oauth.Token as +// AccessToken + " " + RefreshToken + " " + Expiry.Unix() +func encodeToken(token *oauth.Token) string { + if token == nil { + return "" + } + var seconds int64 + if !token.Expiry.IsZero() { + seconds = token.Expiry.Unix() + } + return token.AccessToken + " " + token.RefreshToken + " " + strconv.FormatInt(seconds, 10) +} + +// decodeToken parses an access token, refresh token, and optional +// expiry unix timestamp separated by spaces into an oauth.Token. +// It returns as much as it can. +func decodeToken(encoded string) oauth.Token { + var t oauth.Token + f := strings.Fields(encoded) + if len(f) > 0 { + t.AccessToken = f[0] + } + if len(f) > 1 { + t.RefreshToken = f[1] + } + if len(f) > 2 && f[2] != "0" { + sec, err := strconv.ParseInt(f[2], 10, 64) + if err == nil { + t.Expiry = time.Unix(sec, 0) + } + } + return t +} + +func (im extendedOAuth2) auth(ctx *importer.SetupContext) (*oauth.Config, error) { + clientId, secret, err := ctx.Credentials() + if err != nil { + return nil, err + } + conf := im.oauthConfig + conf.ClientId, conf.ClientSecret, conf.RedirectURL = clientId, secret, ctx.CallbackURL() + return &conf, nil +} + +// userInfo contains basic information about the identity of the imported +// account owner. Its use is discouraged as it might be refactored soon. +// Importer implementations should rather make their own dedicated type for +// now. +type userInfo struct { + ID string + FirstName string + LastName string +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/picasa.go b/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/picasa.go new file mode 100644 index 00000000..dbe2369e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/picasa.go @@ -0,0 +1,468 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package picasa implements an importer for picasa.com accounts. +package picasa + +// TODO: removing camliPath from gallery permanode when pic deleted from gallery + +import ( + "errors" + "fmt" + "io" + "log" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/context" + "camlistore.org/pkg/importer" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/schema/nodeattr" + "camlistore.org/pkg/search" + "camlistore.org/pkg/syncutil" + + "camlistore.org/third_party/code.google.com/p/goauth2/oauth" + "camlistore.org/third_party/github.com/tgulacsi/picago" +) + +const ( + apiURL = "https://api.picasa.com/v2/" + authURL = "https://accounts.google.com/o/oauth2/auth" + tokenURL = "https://accounts.google.com/o/oauth2/token" + scopeURL = "https://picasaweb.google.com/data/" + + // runCompleteVersion is a cache-busting version number of the + // importer code. It should be incremented whenever the + // behavior of this importer is updated enough to warrant a + // complete run. Otherwise, if the importer runs to + // completion, this version number is recorded on the account + // permanode and subsequent importers can stop early. + runCompleteVersion = "4" + + // attrPicasaId is used for both picasa photo IDs and gallery IDs. + attrPicasaId = "picasaId" +) + +var _ importer.ImporterSetupHTMLer = imp{} + +type imp struct { + extendedOAuth2 +} + +func (imp) SupportsIncremental() bool { return true } + +var baseOAuthConfig = oauth.Config{ + AuthURL: authURL, + TokenURL: tokenURL, + Scope: scopeURL, + + // AccessType needs to be "offline", as the user is not here all the time; + // ApprovalPrompt needs to be "force" to be able to get a RefreshToken + // everytime, even for Re-logins, too. + // + // Source: https://developers.google.com/youtube/v3/guides/authentication#server-side-apps + AccessType: "offline", + ApprovalPrompt: "force", +} + +func init() { + importer.Register("picasa", imp{ + newExtendedOAuth2( + baseOAuthConfig, + func(ctx *context.Context) (*userInfo, error) { + u, err := picago.GetUser(ctx.HTTPClient(), "default") + if err != nil { + return nil, err + } + firstName, lastName := u.Name, "" + i := strings.LastIndex(u.Name, " ") + if i >= 0 { + firstName, lastName = u.Name[:i], u.Name[i+1:] + } + return &userInfo{ + ID: u.ID, + FirstName: firstName, + LastName: lastName, + }, nil + }), + }) +} + +func (imp) AccountSetupHTML(host *importer.Host) string { + // Picasa doesn't allow a path in the origin. Remove it. + origin := host.ImporterBaseURL() + if u, err := url.Parse(origin); err == nil { + u.Path = "" + origin = u.String() + } + + callback := host.ImporterBaseURL() + "picasa/callback" + return fmt.Sprintf(` +

    Configuring Picasa

    +

    Visit https://console.developers.google.com/ +and click "Create Project".

    +

    Then under "APIs & Auth" in the left sidebar, click on "Credentials", then click the button "Create new Client ID".

    +

    Use the following settings:

    +
      +
    • Web application
    • +
    • Authorized JavaScript origins: %s
    • +
    • Authorized Redirect URI: %s
    • +
    +

    Click "Create Client ID". Copy the "Client ID" and "Client Secret" into the boxes above.

    +`, origin, callback) +} + +// A run is our state for a given run of the importer. +type run struct { + *importer.RunContext + incremental bool // whether we've completed a run in the past + photoGate *syncutil.Gate + + mu sync.Mutex // guards anyErr + anyErr bool +} + +func (r *run) errorf(format string, args ...interface{}) { + log.Printf(format, args...) + r.mu.Lock() + defer r.mu.Unlock() + r.anyErr = true +} + +var forceFullImport, _ = strconv.ParseBool(os.Getenv("CAMLI_PICASA_FULL_IMPORT")) + +func (imp) Run(ctx *importer.RunContext) error { + clientId, secret, err := ctx.Credentials() + if err != nil { + return err + } + acctNode := ctx.AccountNode() + ocfg := baseOAuthConfig + ocfg.ClientId, ocfg.ClientSecret = clientId, secret + token := decodeToken(acctNode.Attr(acctAttrOAuthToken)) + transport := &oauth.Transport{ + Config: &ocfg, + Token: &token, + Transport: notOAuthTransport(ctx.HTTPClient()), + } + ctx.Context = ctx.Context.New(context.WithHTTPClient(transport.Client())) + + root := ctx.RootNode() + if root.Attr(nodeattr.Title) == "" { + if err := root.SetAttr(nodeattr.Title, + fmt.Sprintf("%s %s - Google/Picasa Photos", + acctNode.Attr(importer.AcctAttrGivenName), + acctNode.Attr(importer.AcctAttrFamilyName))); err != nil { + return err + } + } + + r := &run{ + RunContext: ctx, + incremental: !forceFullImport && acctNode.Attr(importer.AcctAttrCompletedVersion) == runCompleteVersion, + photoGate: syncutil.NewGate(3), + } + if err := r.importAlbums(); err != nil { + return err + } + + r.mu.Lock() + anyErr := r.anyErr + r.mu.Unlock() + if !anyErr { + if err := acctNode.SetAttrs(importer.AcctAttrCompletedVersion, runCompleteVersion); err != nil { + return err + } + } + + return nil +} + +func (r *run) importAlbums() error { + albums, err := picago.GetAlbums(r.HTTPClient(), "default") + if err != nil { + return fmt.Errorf("importAlbums: error listing albums: %v", err) + } + albumsNode, err := r.getTopLevelNode("albums", "Albums") + for _, album := range albums { + if r.Context.IsCanceled() { + return context.ErrCanceled + } + if err := r.importAlbum(albumsNode, album); err != nil { + return fmt.Errorf("picasa importer: error importing album %s: %v", album, err) + } + } + return nil +} + +func (r *run) importAlbum(albumsNode *importer.Object, album picago.Album) (ret error) { + if album.ID == "" { + return errors.New("album has no ID") + } + albumNode, err := albumsNode.ChildPathObject(album.ID) + if err != nil { + return fmt.Errorf("importAlbum: error listing album: %v", err) + } + + dateMod := schema.RFC3339FromTime(album.Updated) + + // Data reference: https://developers.google.com/picasa-web/docs/2.0/reference + // TODO(tgulacsi): add more album info + changes, err := albumNode.SetAttrs2( + attrPicasaId, album.ID, + nodeattr.Type, "picasaweb.google.com:album", + nodeattr.Title, album.Title, + nodeattr.DatePublished, schema.RFC3339FromTime(album.Published), + nodeattr.LocationText, album.Location, + nodeattr.Description, album.Description, + nodeattr.URL, album.URL, + ) + if err != nil { + return fmt.Errorf("error setting album attributes: %v", err) + } + if !changes && r.incremental && albumNode.Attr(nodeattr.DateModified) == dateMod { + return nil + } + defer func() { + // Don't update DateModified on the album node until + // we've successfully imported all the photos. + if ret == nil { + ret = albumNode.SetAttr(nodeattr.DateModified, dateMod) + } + }() + + log.Printf("Importing album %v: %v/%v (published %v, updated %v)", album.ID, album.Name, album.Title, album.Published, album.Updated) + + // TODO(bradfitz): GetPhotos does multiple HTTP requests to + // return a slice of all photos. My "InstantUpload/Auto + // Backup" album has 6678 photos (and growing) and this + // currently takes like 40 seconds. Fix. + photos, err := picago.GetPhotos(r.HTTPClient(), "default", album.ID) + if err != nil { + return err + } + + log.Printf("Importing %d photos from album %q (%s)", len(photos), albumNode.Attr(nodeattr.Title), + albumNode.PermanodeRef()) + + var grp syncutil.Group + for i := range photos { + if r.Context.IsCanceled() { + return context.ErrCanceled + } + photo := photos[i] + r.photoGate.Start() + grp.Go(func() error { + defer r.photoGate.Done() + return r.updatePhotoInAlbum(albumNode, photo) + }) + } + return grp.Err() +} + +func (r *run) updatePhotoInAlbum(albumNode *importer.Object, photo picago.Photo) (ret error) { + if photo.ID == "" { + return errors.New("photo has no ID") + } + + getMediaBytes := func() (io.ReadCloser, error) { + log.Printf("Importing media from %v", photo.URL) + resp, err := r.HTTPClient().Get(photo.URL) + if err != nil { + return nil, fmt.Errorf("importing photo %s: %v", photo.ID, err) + } + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + return nil, fmt.Errorf("importing photo %s: status code = %d", photo.ID, resp.StatusCode) + } + return resp.Body, nil + } + + var fileRefStr string + idFilename := photo.ID + "-" + photo.Filename + photoNode, err := albumNode.ChildPathObjectOrFunc(idFilename, func() (*importer.Object, error) { + h := blob.NewHash() + rc, err := getMediaBytes() + if err != nil { + return nil, err + } + fileRef, err := schema.WriteFileFromReader(r.Host.Target(), photo.Filename, io.TeeReader(rc, h)) + if err != nil { + return nil, err + } + fileRefStr = fileRef.String() + wholeRef := blob.RefFromHash(h) + if pn, err := findExistingPermanode(r.Host.Searcher(), wholeRef); err == nil { + return r.Host.ObjectFromRef(pn) + } + return r.Host.NewObject() + }) + if err != nil { + return err + } + + const attrMediaURL = "picasaMediaURL" + if fileRefStr == "" { + fileRefStr = photoNode.Attr(nodeattr.CamliContent) + // Only re-download the source photo if its URL has changed. + // Empirically this seems to work: cropping a photo in the + // photos.google.com UI causes its URL to change. And it makes + // sense, looking at the ugliness of the URLs with all their + // encoded/signed state. + if !mediaURLsEqual(photoNode.Attr(attrMediaURL), photo.URL) { + rc, err := getMediaBytes() + if err != nil { + return err + } + fileRef, err := schema.WriteFileFromReader(r.Host.Target(), photo.Filename, rc) + rc.Close() + if err != nil { + return err + } + fileRefStr = fileRef.String() + } + } + + title := strings.TrimSpace(photo.Description) + if strings.Contains(title, "\n") { + title = title[:strings.Index(title, "\n")] + } + if title == "" && schema.IsInterestingTitle(photo.Filename) { + title = photo.Filename + } + + // TODO(tgulacsi): add more attrs (comments ?) + // for names, see http://schema.org/ImageObject and http://schema.org/CreativeWork + attrs := []string{ + nodeattr.CamliContent, fileRefStr, + attrPicasaId, photo.ID, + nodeattr.Title, title, + nodeattr.Description, photo.Description, + nodeattr.LocationText, photo.Location, + nodeattr.DateModified, schema.RFC3339FromTime(photo.Updated), + nodeattr.DatePublished, schema.RFC3339FromTime(photo.Published), + nodeattr.URL, photo.PageURL, + } + if photo.Latitude != 0 || photo.Longitude != 0 { + attrs = append(attrs, + nodeattr.Latitude, fmt.Sprintf("%f", photo.Latitude), + nodeattr.Longitude, fmt.Sprintf("%f", photo.Longitude), + ) + } + if err := photoNode.SetAttrs(attrs...); err != nil { + return err + } + if err := photoNode.SetAttrValues("tag", photo.Keywords); err != nil { + return err + } + if photo.Position > 0 { + if err := albumNode.SetAttr( + nodeattr.CamliPathOrderColon+strconv.Itoa(photo.Position-1), + photoNode.PermanodeRef().String()); err != nil { + return err + } + } + + // Do this last, after we're sure the "camliContent" attribute + // has been saved successfully, because this is the one that + // causes us to do it again in the future or not. + if err := photoNode.SetAttrs(attrMediaURL, photo.URL); err != nil { + return err + } + return nil +} + +func (r *run) getTopLevelNode(path string, title string) (*importer.Object, error) { + childObject, err := r.RootNode().ChildPathObject(path) + if err != nil { + return nil, err + } + + if err := childObject.SetAttr(nodeattr.Title, title); err != nil { + return nil, err + } + return childObject, nil +} + +var sensitiveAttrs = []string{ + nodeattr.Type, + attrPicasaId, + nodeattr.Title, + nodeattr.DateModified, + nodeattr.DatePublished, + nodeattr.Latitude, + nodeattr.Longitude, + nodeattr.Description, +} + +// findExistingPermanode finds an existing permanode that has a +// camliContent pointing to a file with the provided wholeRef and +// doesn't have any conflicting attributes that would prevent the +// picasa importer from re-using that permanode for its own use. +func findExistingPermanode(qs search.QueryDescriber, wholeRef blob.Ref) (pn blob.Ref, err error) { + res, err := qs.Query(&search.SearchQuery{ + Constraint: &search.Constraint{ + Permanode: &search.PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &search.Constraint{ + File: &search.FileConstraint{ + WholeRef: wholeRef, + }, + }, + }, + }, + Describe: &search.DescribeRequest{ + Depth: 1, + }, + }) + if err != nil { + return + } + if res.Describe == nil { + return pn, os.ErrNotExist + } +Res: + for _, resBlob := range res.Blobs { + br := resBlob.Blob + desBlob, ok := res.Describe.Meta[br.String()] + if !ok || desBlob.Permanode == nil { + continue + } + attrs := desBlob.Permanode.Attr + for _, attr := range sensitiveAttrs { + if attrs.Get(attr) != "" { + continue Res + } + } + return br, nil + } + return pn, os.ErrNotExist +} + +func mediaURLsEqual(a, b string) bool { + const sub = ".googleusercontent.com/" + ai := strings.Index(a, sub) + bi := strings.Index(b, sub) + if ai >= 0 && bi >= 0 { + return a[ai:] == b[bi:] + } + return a == b +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/picasa_test.go b/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/picasa_test.go new file mode 100644 index 00000000..50cc6f00 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/picasa_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package picasa + +import ( + "net/http" + "testing" + + "camlistore.org/pkg/context" + "camlistore.org/pkg/httputil" + + "camlistore.org/third_party/github.com/tgulacsi/picago" +) + +func TestGetUserId(t *testing.T) { + userID := "11047045264" + responder := httputil.FileResponder("testdata/users-me-res.xml") + ctx := context.New(context.WithHTTPClient(&http.Client{ + Transport: httputil.NewFakeTransport(map[string]func() *http.Response{ + "https://picasaweb.google.com/data/feed/api/user/default/contacts?kind=user": responder, + "https://picasaweb.google.com/data/feed/api/user/" + userID + "/contacts?kind=user": responder, + }), + })) + defer ctx.Cancel() + inf, err := picago.GetUser(ctx.HTTPClient(), "default") + if err != nil { + t.Fatal(err) + } + want := picago.User{ + ID: userID, + URI: "https://picasaweb.google.com/" + userID, + Name: "Tamás Gulácsi", + Thumbnail: "https://lh4.googleusercontent.com/-qqove344/AAAAAAAAAAI/AAAAAAABcbg/TXl3f2K9dzI/s64-c/11047045264.jpg", + } + if inf != want { + t.Errorf("user info = %+v; want %+v", inf, want) + } +} + +func TestMediaURLsEqual(t *testing.T) { + if !mediaURLsEqual("https://lh1.googleusercontent.com/foo.jpg", "https://lh100.googleusercontent.com/foo.jpg") { + t.Fatal("want equal") + } + if mediaURLsEqual("https://foo.com/foo.jpg", "https://bar.com/foo.jpg") { + t.Fatal("want not equal") + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/testdata.go b/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/testdata.go new file mode 100644 index 00000000..7839ce5d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/testdata.go @@ -0,0 +1,239 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package picasa + +import ( + "encoding/xml" + "fmt" + "log" + "net/http" + "os" + "path/filepath" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/importer" + "camlistore.org/pkg/osutil" + + "camlistore.org/third_party/github.com/tgulacsi/picago" +) + +var _ importer.TestDataMaker = (*imp)(nil) + +func (im *imp) SetTestAccount(acctNode *importer.Object) error { + // TODO(mpl): refactor with twitter + return acctNode.SetAttrs( + importer.AcctAttrAccessToken, "fakeAccessToken", + importer.AcctAttrAccessTokenSecret, "fakeAccessSecret", + importer.AcctAttrUserID, "fakeUserId", + importer.AcctAttrName, "fakeName", + importer.AcctAttrUserName, "fakeScreenName", + ) +} + +func (im *imp) MakeTestData() http.RoundTripper { + + const ( + apiURL = "https://picasaweb.google.com/data/feed/api" + nAlbums = 10 // Arbitrary number of albums generated. + nEntries = 3 // number of albums or photos returned in the feed at each call. + defaultUserId = "default" + ) + + albumsListCached := make(map[int]string) + okHeader := `HTTP/1.1 200 OK +Content-Type: application/json; charset=UTF-8 + +` + + responses := make(map[string]func() *http.Response) + + // register the get albums list calls + for i := 1; i < nAlbums+1; i += nEntries { + url := fmt.Sprintf("%s/user/%s?start-index=%d", apiURL, defaultUserId, i) + response := okHeader + fakeAlbumsList(i, nAlbums, nEntries, albumsListCached) + responses[url] = httputil.StaticResponder(response) + } + + // register the get album calls + for i := 1; i < nAlbums+1; i++ { + albumId := blob.RefFromString(fmt.Sprintf("Album %d", i)).DigestPrefix(10) + for j := 1; j < i+1; j += nEntries { + url := fmt.Sprintf("%s/user/%s/albumid/%s?imgmax=d&start-index=%d", apiURL, defaultUserId, albumId, j) + // Using i as nTotal argument means album N will have N photos in it. + response := okHeader + fakePhotosList(j, i, nEntries) + responses[url] = httputil.StaticResponder(response) + } + } + + // register the photo download calls + pudgyPic := fakePhoto() + photoURL1 := "https://camlistore.org/pic/pudgy1.png" + photoURL2 := "https://camlistore.org/pic/pudgy2.png" + responses[photoURL1] = httputil.FileResponder(pudgyPic) + responses[photoURL2] = httputil.FileResponder(pudgyPic) + + return httputil.NewFakeTransport(responses) +} + +// fakeAlbumsList returns an xml feed of albums. The feed starts at index, and +// ends at index + nEntries (exclusive), or at nTotal (inclusive), whichever is the +// lowest. +func fakeAlbumsList(index, nTotal, nEntries int, cached map[int]string) string { + if cl, ok := cached[index]; ok { + return cl + } + + max := index + nEntries + if max > nTotal+1 { + max = nTotal + 1 + } + var entries []picago.Entry + for i := index; i < max; i++ { + entries = append(entries, fakeAlbum(i)) + } + atom := &picago.Atom{ + TotalResults: nTotal, + Entries: entries, + } + + feed, err := xml.MarshalIndent(atom, "", " ") + if err != nil { + log.Fatalf("%v", err) + } + cached[index] = string(feed) + return cached[index] +} + +func fakeAlbum(counter int) picago.Entry { + author := picago.Author{ + Name: "fakeAuthorName", + } + media := &picago.Media{ + Description: "fakeAlbumDescription", + Keywords: "fakeKeyword1,fakeKeyword2", + } + title := fmt.Sprintf("Album %d", counter) + year := time.Hour * 24 * 365 + month := year / 12 + return picago.Entry{ + ID: blob.RefFromString(title).DigestPrefix(10), + Published: time.Now().Add(-time.Duration(counter) * year), + Updated: time.Now().Add(-time.Duration(counter) * month), + Name: "fakeAlbumName", + Title: title, + Summary: "fakeAlbumSummary", + Location: "fakeAlbumLocation", + Author: author, + Media: media, + } +} + +// fakePhotosList returns an xml feed of an album's photos. The feed starts at +// index, and ends at index + nEntries (exclusive), or at nTotal (inclusive), +// whichever is the lowest. +func fakePhotosList(index, nTotal, nEntries int) string { + max := index + nEntries + if max > nTotal+1 { + max = nTotal + 1 + } + var entries []picago.Entry + for i := index; i < max; i++ { + entries = append(entries, fakePhotoEntry(i, nTotal)) + } + atom := &picago.Atom{ + NumPhotos: nTotal, + Entries: entries, + } + + feed, err := xml.MarshalIndent(atom, "", " ") + if err != nil { + log.Fatalf("%v", err) + } + return string(feed) +} + +func fakePhotoEntry(photoNbr int, albumNbr int) picago.Entry { + var content picago.EntryContent + if photoNbr%2 == 0 { + content = picago.EntryContent{ + URL: "https://camlistore.org/pic/pudgy1.png", + Type: "image/png", + } + } + var point string + if photoNbr%3 == 0 { + point = "37.7447124 -122.4341914" + } else { + point = "45.1822842 5.7141854" + } + mediaContent := picago.MediaContent{ + URL: "https://camlistore.org/pic/pudgy2.png", + Type: "image/png", + } + media := &picago.Media{ + Title: "fakePhotoTitle", + Description: "fakePhotoDescription", + Keywords: "fakeKeyword1,fakeKeyword2", + Content: []picago.MediaContent{mediaContent}, + } + // to be consistent, all the pics times should be anterior to their respective albums times. whatever. + day := time.Hour * 24 + year := day * 365 + created := time.Now().Add(-time.Duration(photoNbr) * year) + published := created.Add(day) + updated := published.Add(day) + + exif := &picago.Exif{ + FStop: 7.7, + Make: "whatisthis?", // not obvious to me, needs doc in picago + Model: "potato", + Exposure: 7.7, + Flash: false, + FocalLength: 7.7, + ISO: 100, + Timestamp: created.Unix(), + UID: "whatisthis?", // not obvious to me, needs doc in picago + } + + title := fmt.Sprintf("Photo %d of album %d", photoNbr, albumNbr) + return picago.Entry{ + ID: blob.RefFromString(title).DigestPrefix(10), + Exif: exif, + Summary: "fakePhotoSummary", + Title: title, + Location: "fakePhotoLocation", + Published: published, + Updated: updated, + Media: media, + Point: point, + Content: content, + } +} + +// TODO(mpl): refactor with twitter +func fakePhoto() string { + camliDir, err := osutil.GoPackagePath("camlistore.org") + if err == os.ErrNotExist { + log.Fatal("Directory \"camlistore.org\" not found under GOPATH/src; are you not running with devcam?") + } + if err != nil { + log.Fatalf("Error searching for \"camlistore.org\" under GOPATH: %v", err) + } + return filepath.Join(camliDir, filepath.FromSlash("third_party/glitch/npc_piggy__x1_walk_png_1354829432.png")) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/testdata/users-me-res.xml b/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/testdata/users-me-res.xml new file mode 100644 index 00000000..c313dc69 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/picasa/testdata/users-me-res.xml @@ -0,0 +1,69 @@ + + + https://picasaweb.google.com/data/feed/api/user/default/contacts + 2014-04-14T20:44:46.102Z + + 11047045264 + + https://lh4.googleusercontent.com/-qqMg344/AAAAAAAAAAI/AAAAAAABcbg/TXl3f2K9dzI/s64-c/11047045264.jpg + + + + + + Tamás Gulácsi + https://picasaweb.google.com/11047045264 + + Picasaweb + 2 + 1 + 500 + 110415264 + Tamás + https://lh4.googleusercontent.com/-qqove344/AAAAAAAAAAI/AAAAAAABcbg/TXl3f2K9dzI/s64-c/11047045264.jpg + 38654705664 + 17032238989 + 2000 + + https://picasaweb.google.com/data/entry/api/user/110415264/contacts/106948621299403 + 2013-12-17T18:40:10.000Z + 2014-04-14T20:44:46.102Z + + 106948621299403 + + + + + + + Petra + https://picasaweb.google.com/106948621299403 + + 106948621299403 + Petra + https://lh5.googleusercontent.com/-CiCHgcc/AAAAAAAAAAI/AAAAAAAAAAA/moXXlYbkPsk/s64-c/106948621299403.jpg + true + true + + + https://picasaweb.google.com/data/entry/api/user/11047045264/contacts/1163008697 + 1970-01-01T00:00:00.000Z + 2014-04-14T20:44:46.102Z + + 1163008697 + + + + + + + Viktória + https://picasaweb.google.com/1163008697 + + 1163008697 + Viktória + https://lh3.googleusercontent.com/-HmzwFI/AAAAAAAAAAI/AAAAAAAAAAA/g7DJ3IovKMY/s64-c/1163008697.jpg + true + true + + diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/pinboard/pinboard.go b/vendor/github.com/camlistore/camlistore/pkg/importer/pinboard/pinboard.go new file mode 100644 index 00000000..a88d9516 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/pinboard/pinboard.go @@ -0,0 +1,346 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package pinboard imports pinboard.in posts. + +This package uses the v1 api documented here: https://pinboard.in/api. + +Note that the api document seems to use 'post' and 'bookmark' +interchangeably. We use 'post' everywhere in this code. + +Posts in pinboard are mutable; they can be edited or deleted. + +We handle edited posts by always reimporting everything and rewriting +any nodes. Perhaps this would become more efficient if we would first +compare the meta tag from pinboard to the meta tag we have stored to +only write the node if there are changes. + +We don't handle deleted posts. One possible approach for this would +be to import everything under a new permanode, then once it is +successful, swap the new permanode and the posts node (note: I don't +think I really understand the data model here, so this is sort of +gibberish). + +I have exchanged email with Maciej Ceglowski of pinboard, who may in +the future provide an api that lets us query what has changed. We +might want to switch to that when available to make the import process +more light-weight. +*/ +package pinboard + +import ( + "encoding/json" + "fmt" + "html/template" + "io/ioutil" + "log" + "net/http" + "strings" + "time" + + "camlistore.org/pkg/context" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/importer" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/schema/nodeattr" + "camlistore.org/pkg/syncutil" +) + +func init() { + importer.Register("pinboard", imp{}) +} + +const ( + fetchUrl = "https://api.pinboard.in/v1/posts/all?auth_token=%s&format=json&results=%d&todt=%s" + + timeFormat = "2006-01-02T15:04:05Z" + + // pauseInterval is the time we wait between fetching batches (for + // a particualar user). This time is pretty long, but is what the + // api documentation suggests. + pauseInterval = 5 * time.Minute + + // batchLimit is the maximum number of posts we will fetch in one batch. + batchLimit = 10000 + + attrAuthToken = "authToken" + + // StatusTooManyRequests is the http status code returned by + // pinboard servers if we have made too many requests for a + // particular user. If we receive this status code, we should + // double the amount of time we wait before trying agian. + StatusTooManyRequests = 429 +) + +// We expect :. Sometimes pinboard calls this an +// auth token and sometimes they call it an api token. +func extractUsername(authToken string) string { + split := strings.SplitN(authToken, ":", 2) + if len(split) == 2 { + return split[0] + } else { + return "" + } +} + +type imp struct { + importer.OAuth1 // for CallbackRequestAccount and CallbackURLParameters +} + +func (imp) SupportsIncremental() bool { return false } + +func (imp) NeedsAPIKey() bool { return false } + +func (imp) IsAccountReady(acct *importer.Object) (ready bool, err error) { + ready = acct.Attr(attrAuthToken) != "" + return ready, nil +} + +func (im imp) SummarizeAccount(acct *importer.Object) string { + ok, err := im.IsAccountReady(acct) + if err != nil { + return "Not configured; error = " + err.Error() + } + if !ok { + return "Not configured" + } + return fmt.Sprintf("Pinboard account for %s", extractUsername(acct.Attr(attrAuthToken))) +} + +func (imp) ServeSetup(w http.ResponseWriter, r *http.Request, ctx *importer.SetupContext) error { + return tmpl.ExecuteTemplate(w, "serveSetup", ctx) +} + +var tmpl = template.Must(template.New("root").Parse(` +{{define "serveSetup"}} +

    Configuring Pinboad Account

    +
    + + + + +
    API token (You can find it here)
    +
    +{{end}} +`)) + +func (im imp) ServeCallback(w http.ResponseWriter, r *http.Request, ctx *importer.SetupContext) { + t := r.FormValue("apiToken") + if t == "" { + http.Error(w, "Expected an API Token", 400) + return + } + if extractUsername(t) == "" { + errText := fmt.Sprintf("Unable to parse %q as an api token. We expect :", t) + http.Error(w, errText, 400) + } + if err := ctx.AccountNode.SetAttrs( + attrAuthToken, t, + ); err != nil { + httputil.ServeError(w, r, fmt.Errorf("Error setting attribute: %v", err)) + return + } + http.Redirect(w, r, ctx.AccountURL(), http.StatusFound) +} + +func (im imp) Run(ctx *importer.RunContext) (err error) { + log.Printf("pinboard: Running importer.") + r := &run{ + RunContext: ctx, + im: im, + postGate: syncutil.NewGate(3), + nextCursor: time.Now().Format(timeFormat), + nextAfter: time.Now(), + lastPause: pauseInterval, + } + _, err = r.importPosts() + log.Printf("pinboard: Importer returned %v.", err) + return +} + +func (im imp) ServeHTTP(w http.ResponseWriter, r *http.Request) { + httputil.BadRequestError(w, "Unexpected path: %s", r.URL.Path) +} + +type run struct { + *importer.RunContext + im imp + postGate *syncutil.Gate + + // Return only bookmarks created before this time (exclusive bound) + nextCursor string + + // We should not fetch the next batch until this time (exclusive bound) + nextAfter time.Time + + // This gets set to pauseInterval at the beginning of each run and + // after each successful fetch. Every time we get a 429 back from + // pinboard, it gets doubled. It will be used to calculate the + // next time we fetch from pinboard. + lastPause time.Duration +} + +func (r *run) getPostsNode() (*importer.Object, error) { + username := extractUsername(r.AccountNode().Attr(attrAuthToken)) + root := r.RootNode() + rootTitle := fmt.Sprintf("%s's Pinboard Account", username) + log.Printf("pinboard: root title = %q; want %q.", root.Attr(nodeattr.Title), rootTitle) + if err := root.SetAttr(nodeattr.Title, rootTitle); err != nil { + return nil, err + } + obj, err := root.ChildPathObject("posts") + if err != nil { + return nil, err + } + title := fmt.Sprintf("%s's Posts", username) + return obj, obj.SetAttr(nodeattr.Title, title) +} + +func (r *run) importPosts() (*importer.Object, error) { + authToken := r.AccountNode().Attr(attrAuthToken) + parent, err := r.getPostsNode() + if err != nil { + return nil, err + } + + keepTrying := true + for keepTrying { + keepTrying, err = r.importBatch(authToken, parent) + if err != nil { + return nil, err + } + } + + return parent, nil +} + +// Used to parse json +type apiPost struct { + Href string + Description string + Extended string + Meta string + Hash string + Time string + Shared string + ToRead string + Tags string +} + +func (r *run) importBatch(authToken string, parent *importer.Object) (keepTrying bool, err error) { + sleepDuration := r.nextAfter.Sub(time.Now()) + // block until we either get canceled or until it is time to run + select { + case <-r.Done(): + log.Printf("pinboard: Importer interrupted.") + return false, context.ErrCanceled + case <-time.After(sleepDuration): + // just proceed + } + start := time.Now() + + u := fmt.Sprintf(fetchUrl, authToken, batchLimit, r.nextCursor) + resp, err := r.HTTPClient().Get(u) + if err != nil { + return false, err + } + defer resp.Body.Close() + switch { + case resp.StatusCode == StatusTooManyRequests: + r.lastPause = r.lastPause * 2 + r.nextAfter = time.Now().Add(r.lastPause) + return true, nil + case resp.StatusCode != http.StatusOK: + return false, fmt.Errorf("Unexpected status code %v fetching %v", resp.StatusCode, u) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return false, err + } + + var postBatch []apiPost + if err = json.Unmarshal(body, &postBatch); err != nil { + return false, err + } + + if err != nil { + return false, err + } + + postCount := len(postBatch) + if postCount == 0 { + // we are done! + return false, nil + } + + log.Printf("pinboard: Importing %d posts...", postCount) + var grp syncutil.Group + for _, post := range postBatch { + if r.Context.IsCanceled() { + log.Printf("pinboard: Importer interrupted") + return false, context.ErrCanceled + } + + post := post + r.postGate.Start() + grp.Go(func() error { + defer r.postGate.Done() + return r.importPost(&post, parent) + }) + } + + log.Printf("pinboard: Imported batch of %d posts in %s.", postCount, time.Now().Sub(start)) + + r.nextCursor = postBatch[postCount-1].Time + r.lastPause = pauseInterval + r.nextAfter = time.Now().Add(pauseInterval) + tryAgain := postCount == batchLimit + return tryAgain, grp.Err() +} + +func (r *run) importPost(post *apiPost, parent *importer.Object) error { + postNode, err := parent.ChildPathObject(post.Hash) + if err != nil { + return err + } + + t, err := time.Parse(timeFormat, post.Time) + if err != nil { + return err + } + + attrs := []string{ + "pinboard.in:hash", post.Hash, + nodeattr.Type, "pinboard.in:post", + nodeattr.DateCreated, schema.RFC3339FromTime(t), + nodeattr.Title, post.Description, + nodeattr.URL, post.Href, + "pinboard.in:extended", post.Extended, + "pinboard.in:meta", post.Meta, + "pinboard.in:shared", post.Shared, + "pinboard.in:toread", post.ToRead, + } + if err = postNode.SetAttrs(attrs...); err != nil { + return err + } + if err = postNode.SetAttrValues("tag", strings.Split(post.Tags, " ")); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/pinboard/pinboard_test.go b/vendor/github.com/camlistore/camlistore/pkg/importer/pinboard/pinboard_test.go new file mode 100644 index 00000000..5eb696aa --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/pinboard/pinboard_test.go @@ -0,0 +1,196 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pinboard + +import ( + "fmt" + "net/http" + "os" + "strings" + "testing" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/client" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/importer" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/schema/nodeattr" + "camlistore.org/pkg/test" +) + +func verifyUsername(t *testing.T, apiToken string, expected string) { + extracted := extractUsername(apiToken) + if extracted != expected { + t.Errorf("Testing %q: user name is %q when we want %q", apiToken, extracted, expected) + } +} + +func TestExtractUsername(t *testing.T) { + verifyUsername(t, "gina:foo", "gina") + verifyUsername(t, "", "") +} + +func findChildRefs(parent *importer.Object) ([]blob.Ref, error) { + childRefs := []blob.Ref{} + var err error + parent.ForeachAttr(func(key, value string) { + if strings.HasPrefix(key, "camliPath:") { + if br, ok := blob.Parse(value); ok { + childRefs = append(childRefs, br) + return + } + if err == nil { + err = fmt.Errorf("invalid blobRef for %s attribute of %v: %q", key, parent, value) + } + } + }) + return childRefs, err +} + +func getRequiredChildPathObj(parent *importer.Object, path string) (*importer.Object, error) { + return parent.ChildPathObjectOrFunc(path, func() (*importer.Object, error) { + return nil, fmt.Errorf("Unable to locate child path %s of node %v", path, parent.PermanodeRef()) + }) +} + +func setupClient(w *test.World) (*client.Client, error) { + // Do the silly env vars dance to avoid the "non-hermetic use of host config panic". + if err := os.Setenv("CAMLI_KEYID", w.ClientIdentity()); err != nil { + return nil, err + } + if err := os.Setenv("CAMLI_SECRET_RING", w.SecretRingFile()); err != nil { + return nil, err + } + osutil.AddSecretRingFlag() + cl := client.New(w.ServerBaseURL()) + // This permanode is not needed in itself, but that takes care of uploading + // behind the scenes the public key to the blob server. A bit gross, but + // it's just for a test anyway. + if _, err := cl.UploadNewPermanode(); err != nil { + return nil, err + } + return cl, nil +} + +// Verify that a batch import of 3 posts works +func TestIntegrationRun(t *testing.T) { + const importerPrefix = "/importer/" + const authToken = "gina:foo" + const attrKey = "key" + const attrValue = "value" + + w := test.GetWorld(t) + baseURL := w.ServerBaseURL() + + // TODO(mpl): add a utility in integration package to provide a client that + // just works with World. + cl, err := setupClient(w) + if err != nil { + t.Fatal(err) + } + signer, err := cl.Signer() + if err != nil { + t.Fatal(err) + } + clientId := map[string]string{ + "pinboard": "fakeStaticClientId", + } + clientSecret := map[string]string{ + "pinboard": "fakeStaticClientSecret", + } + + responder := httputil.FileResponder("testdata/batchresponse.json") + transport, err := httputil.NewRegexpFakeTransport([]*httputil.Matcher{ + &httputil.Matcher{`^https\://api\.pinboard\.in/v1/posts/all\?auth_token=gina:foo&format=json&results=10000&todt=\d\d\d\d.*`, responder}, + }) + if err != nil { + t.Fatal(err) + } + httpClient := &http.Client{ + Transport: transport, + } + + hc := importer.HostConfig{ + BaseURL: baseURL, + Prefix: importerPrefix, + Target: cl, + BlobSource: cl, + Signer: signer, + Search: cl, + ClientId: clientId, + ClientSecret: clientSecret, + HTTPClient: httpClient, + } + + host, err := importer.NewHost(hc) + if err != nil { + t.Fatal(err) + } + rc, err := importer.CreateAccount(host, "pinboard") + if err != nil { + t.Fatal(err) + } + err = rc.AccountNode().SetAttrs(attrAuthToken, authToken) + if err != nil { + t.Fatal(err) + } + + testee := imp{} + if err := testee.Run(rc); err != nil { + t.Fatal(err) + } + + postsNode, err := getRequiredChildPathObj(rc.RootNode(), "posts") + if err != nil { + t.Fatal(err) + } + + childRefs, err := findChildRefs(postsNode) + if err != nil { + t.Fatal(err) + } + + expectedPosts := map[string]string{ + `https://wiki.archlinux.org/index.php/xorg#Display_size_and_DPI`: "Xorg - ArchWiki", + `http://www.harihareswara.net/sumana/2014/08/17/0`: "One Way Confidence Will Look", + `http://www.wikiart.org/en/marcus-larson/fishing-near-the-fjord-by-moonlight-1862`: "Fishing Near The Fjord By Moonlight - Marcus Larson - WikiArt.org", + } + + if len(childRefs) != len(expectedPosts) { + t.Fatalf("After import, found %d child refs, want %d: %v", len(childRefs), len(expectedPosts), childRefs) + } + + for _, ref := range childRefs { + childNode, err := host.ObjectFromRef(ref) + if err != nil { + t.Fatal(err) + } + foundURL := childNode.Attr(nodeattr.URL) + expectedTitle, ok := expectedPosts[foundURL] + if !ok { + t.Fatalf("Found unexpected child node %v with url %q", childNode, foundURL) + } + foundTitle := childNode.Attr(nodeattr.Title) + if foundTitle != expectedTitle { + t.Fatalf("Found unexpected child node %v with title %q when we want %q", childNode, foundTitle, expectedTitle) + } + delete(expectedPosts, foundURL) + } + if len(expectedPosts) != 0 { + t.Fatalf("The following entries were expected but not found: %#v", expectedPosts) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/pinboard/testdata/batchresponse.json b/vendor/github.com/camlistore/camlistore/pkg/importer/pinboard/testdata/batchresponse.json new file mode 100644 index 00000000..0cef87f2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/pinboard/testdata/batchresponse.json @@ -0,0 +1,35 @@ +[ + { + "href": "https:\/\/wiki.archlinux.org\/index.php\/xorg#Display_size_and_DPI", + "description": "Xorg - ArchWiki", + "extended": "", + "meta": "595781d94cd21c7fba5c67738313f6ff", + "hash": "98ec0d964f002c48877e256e8c737be5", + "time": "2014-08-28T16:46:31Z", + "shared": "yes", + "toread": "yes", + "tags": "" + }, + { + "href": "http:\/\/www.harihareswara.net\/sumana\/2014\/08\/17\/0", + "description": "One Way Confidence Will Look", + "extended": "", + "meta": "728598533614784d426eaa49ee8aa0cf", + "hash": "0ad4cd6bd5b5c318694bcb759a4d24e1", + "time": "2014-08-18T17:22:10Z", + "shared": "yes", + "toread": "yes", + "tags": "feminism" + }, + { + "href": "http:\/\/www.wikiart.org\/en\/marcus-larson\/fishing-near-the-fjord-by-moonlight-1862", + "description": "Fishing Near The Fjord By Moonlight - Marcus Larson - WikiArt.org", + "extended": "", + "meta": "b2db4fe15bf13db76f76db9e4b1fbe98", + "hash": "1cf0c0bf2f4873f209e3ae71aef60e07", + "time": "2014-07-10T17:19:38Z", + "shared": "no", + "toread": "no", + "tags": "art" + } +] diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/twitter/README b/vendor/github.com/camlistore/camlistore/pkg/importer/twitter/README new file mode 100644 index 00000000..08d0a525 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/twitter/README @@ -0,0 +1,6 @@ +Twitter Importer +=================== + +This is a Camlistore importer for Twitter. + +Go to http[s]://your_camlistore_server/importer/twitter diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/twitter/testdata.go b/vendor/github.com/camlistore/camlistore/pkg/importer/twitter/testdata.go new file mode 100644 index 00000000..075d6c98 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/twitter/testdata.go @@ -0,0 +1,267 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package twitter + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "os" + "path/filepath" + "strconv" + "time" + + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/importer" + "camlistore.org/pkg/osutil" +) + +var _ importer.TestDataMaker = (*imp)(nil) + +func (im *imp) SetTestAccount(acctNode *importer.Object) error { + return acctNode.SetAttrs( + importer.AcctAttrAccessToken, "fakeAccessToken", + importer.AcctAttrAccessTokenSecret, "fakeAccessSecret", + importer.AcctAttrUserID, "fakeUserID", + importer.AcctAttrName, "fakeName", + importer.AcctAttrUserName, "fakeScreenName", + ) +} + +func (im *imp) MakeTestData() http.RoundTripper { + const ( + fakeMaxId = int64(486450108201201664) // Most recent tweet. + nTweets = 300 // Arbitrary number of tweets generated. + ) + fakeMinId := fakeMaxId - nTweets // Oldest tweet in our timeline. + + timeLineURL := apiURL + userTimeLineAPIPath + timeLineCached := make(map[int64]string) + okHeader := `HTTP/1.1 200 OK +Content-Type: application/json; charset=UTF-8 + +` + timeLineResponse := okHeader + fakeTimeLine(fakeMaxId, fakeMinId, timeLineCached) + + fakePic := fakePicture() + responses := map[string]func() *http.Response{ + timeLineURL: httputil.StaticResponder(timeLineResponse), + fmt.Sprintf("%s?count=%d&user_id=fakeUserID", timeLineURL, tweetRequestLimit): httputil.StaticResponder(timeLineResponse), + "https://twitpic.com/show/large/bar": httputil.FileResponder(fakePic), + "https://i.imgur.com/foo.gif": httputil.FileResponder(fakePic), + } + + // register all the user_timeline calls (max_id varies) that should occur, + responses[fmt.Sprintf("%s?count=%d&max_id=%d&user_id=fakeUserID", timeLineURL, tweetRequestLimit, fakeMaxId-nTweets+1)] = httputil.StaticResponder(okHeader + fakeTimeLine(fakeMaxId-nTweets+1, fakeMinId, timeLineCached)) + if nTweets > tweetRequestLimit { + // that is, once every tweetRequestLimit-1, going down from fakeMaxId. + for i := fakeMaxId; i > fakeMinId; i -= tweetRequestLimit - 1 { + responses[fmt.Sprintf("%s?count=%d&max_id=%d&user_id=fakeUserID", timeLineURL, tweetRequestLimit, i)] = httputil.StaticResponder(okHeader + fakeTimeLine(i, fakeMinId, timeLineCached)) + } + } + + // register all the possible combinations of media twimg + for _, scheme := range []string{"http://", "https://"} { + for _, picsize := range []string{"thumb", "small", "medium", "large"} { + responses[fmt.Sprintf("%spbs.twimg.com/media/foo.jpg:%s", scheme, picsize)] = httputil.FileResponder(fakePic) + responses[fmt.Sprintf("%spbs.twimg.com/media/bar.png:%s", scheme, picsize)] = httputil.FileResponder(fakePic) + } + } + + return httputil.NewFakeTransport(responses) +} + +// fakeTimeLine returns a JSON user timeline of tweetRequestLimit tweets, starting +// with maxId as the most recent tweet id. It stops before tweetRequestLimit if +// minId is reached. The returned timeline is saved in cached. +func fakeTimeLine(maxId, minId int64, cached map[int64]string) string { + if tl, ok := cached[maxId]; ok { + return tl + } + min := maxId - int64(tweetRequestLimit) + if min <= minId { + min = minId + } + var tweets []*apiTweetItem + entitiesCounter := 0 + geoCounter := 0 + for i := maxId; i > min; i-- { + tweet := &apiTweetItem{ + Id: strconv.FormatInt(i, 10), + TextStr: fmt.Sprintf("fakeText %d", i), + CreatedAtStr: time.Now().Format(time.RubyDate), + Entities: fakeEntities(entitiesCounter), + } + geo, coords := fakeGeo(geoCounter) + tweet.Geo = geo + tweet.Coordinates = coords + tweets = append(tweets, tweet) + entitiesCounter++ + geoCounter++ + if entitiesCounter == 10 { + entitiesCounter = 0 + } + if geoCounter == 5 { + geoCounter = 0 + } + } + userTimeLine, err := json.MarshalIndent(tweets, "", " ") + if err != nil { + log.Fatalf("%v", err) + } + cached[maxId] = string(userTimeLine) + return cached[maxId] +} + +func fakeGeo(counter int) (*geo, *coords) { + sf := []float64{37.7447124, -122.4341914} + gre := []float64{45.1822842, 5.7141854} + switch counter { + case 0: + return nil, nil + case 1: + return &geo{sf}, nil + case 2: + return nil, &coords{[]float64{gre[1], gre[0]}} + case 3: + return &geo{gre}, &coords{[]float64{sf[1], sf[0]}} + default: + return nil, nil + } +} + +func fakeEntities(counter int) entities { + sizes := func() map[string]mediaSize { + return map[string]mediaSize{ + "medium": {W: 591, H: 332, Resize: "fit"}, + "large": {W: 591, H: 332, Resize: "fit"}, + "small": {W: 338, H: 190, Resize: "fit"}, + "thumb": {W: 150, H: 150, Resize: "crop"}, + } + } + mediaTwimg1 := func() *media { + return &media{ + Id: "1", + IdNum: 1, + MediaURL: `http://pbs.twimg.com/media/foo.jpg`, + MediaURLHTTPS: `https://pbs.twimg.com/media/foo.jpg`, + Sizes: sizes(), + } + } + mediaTwimg2 := func() *media { + return &media{ + Id: "2", + IdNum: 2, + MediaURL: `http://pbs.twimg.com/media/bar.png`, + MediaURLHTTPS: `https://pbs.twimg.com/media/bar.png`, + Sizes: sizes(), + } + } + notPicURL := func() *urlEntity { + return &urlEntity{ + URL: `http://t.co/whatever`, + ExpandedURL: `http://camlistore.org`, + DisplayURL: `camlistore.org`, + } + } + imgurURL := func() *urlEntity { + return &urlEntity{ + URL: `http://t.co/whatever2`, + ExpandedURL: `http://imgur.com/foo`, + DisplayURL: `imgur.com/foo`, + } + } + twitpicURL := func() *urlEntity { + return &urlEntity{ + URL: `http://t.co/whatever3`, + ExpandedURL: `http://twitpic.com/bar`, + DisplayURL: `twitpic.com/bar`, + } + } + + // if you add another case, make sure the entities counter reset + // in fakeTimeLine allows for that case to happen. + // We could use global vars instead, but don't want to pollute the + // twitter pkg namespace. + switch counter { + case 0: + return entities{} + case 1: + return entities{ + Media: []*media{ + mediaTwimg1(), + mediaTwimg2(), + }, + } + case 2: + return entities{ + URLs: []*urlEntity{ + notPicURL(), + }, + } + case 3: + return entities{ + URLs: []*urlEntity{ + notPicURL(), + imgurURL(), + }, + } + case 4: + return entities{ + URLs: []*urlEntity{ + twitpicURL(), + imgurURL(), + }, + } + case 5: + return entities{ + Media: []*media{ + mediaTwimg2(), + mediaTwimg1(), + }, + URLs: []*urlEntity{ + notPicURL(), + twitpicURL(), + }, + } + case 6: + return entities{ + Media: []*media{ + mediaTwimg1(), + mediaTwimg2(), + }, + URLs: []*urlEntity{ + imgurURL(), + twitpicURL(), + }, + } + default: + return entities{} + } +} + +func fakePicture() string { + camliDir, err := osutil.GoPackagePath("camlistore.org") + if err == os.ErrNotExist { + log.Fatal("Directory \"camlistore.org\" not found under GOPATH/src; are you not running with devcam?") + } + if err != nil { + log.Fatalf("Error searching for \"camlistore.org\" under GOPATH: %v", err) + } + return filepath.Join(camliDir, filepath.FromSlash("third_party/glitch/npc_piggy__x1_walk_png_1354829432.png")) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/twitter/testdata/verify_credentials-res.json b/vendor/github.com/camlistore/camlistore/pkg/importer/twitter/testdata/verify_credentials-res.json new file mode 100755 index 00000000..8d06b457 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/twitter/testdata/verify_credentials-res.json @@ -0,0 +1,9 @@ +{ + "id":2325935334, + "id_str":"2325935334", + "name":"Mathieu Lonjaret", + "screen_name":"lejatorn", + "location":"", + "description":"potato, clever label, trendy word", + "url":"https:\/\/t.co\/TF5K7idMNj" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/twitter/twitter.go b/vendor/github.com/camlistore/camlistore/pkg/importer/twitter/twitter.go new file mode 100644 index 00000000..372e3578 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/twitter/twitter.go @@ -0,0 +1,886 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package twitter implements a twitter.com importer. +package twitter + +import ( + "archive/zip" + "bytes" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + "path" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/context" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/importer" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/schema/nodeattr" + "camlistore.org/pkg/syncutil" + "camlistore.org/third_party/github.com/garyburd/go-oauth/oauth" +) + +const ( + apiURL = "https://api.twitter.com/1.1/" + temporaryCredentialRequestURL = "https://api.twitter.com/oauth/request_token" + resourceOwnerAuthorizationURL = "https://api.twitter.com/oauth/authorize" + tokenRequestURL = "https://api.twitter.com/oauth/access_token" + userInfoAPIPath = "account/verify_credentials.json" + userTimeLineAPIPath = "statuses/user_timeline.json" + + // runCompleteVersion is a cache-busting version number of the + // importer code. It should be incremented whenever the + // behavior of this importer is updated enough to warrant a + // complete run. Otherwise, if the importer runs to + // completion, this version number is recorded on the account + // permanode and subsequent importers can stop early. + runCompleteVersion = "4" + + // acctAttrTweetZip specifies an optional attribte for the account permanode. + // If set, it should be of a "file" schema blob referencing the tweets.zip + // file that Twitter makes available for the full archive download. + // The Twitter API doesn't go back forever in time, so if you started using + // the Camlistore importer too late, you need to "camput file tweets.zip" + // once downloading it from Twitter, and then: + // $ camput attr twitterArchiveZipFileRef + // ... and re-do an import. + acctAttrTweetZip = "twitterArchiveZipFileRef" + + // acctAttrZipDoneVersion is updated at the end of a successful zip import and + // is used to determine whether the zip file needs to be re-imported in a future run. + acctAttrZipDoneVersion = "twitterZipDoneVersion" // == ":" + + // Per-tweet note of how we imported it: either "zip" or "api" + attrImportMethod = "twitterImportMethod" + + tweetRequestLimit = 200 // max number of tweets we can get in a user_timeline request + tweetsAtOnce = 20 // how many tweets to import at once +) + +var oAuthURIs = importer.OAuthURIs{ + TemporaryCredentialRequestURI: temporaryCredentialRequestURL, + ResourceOwnerAuthorizationURI: resourceOwnerAuthorizationURL, + TokenRequestURI: tokenRequestURL, +} + +func init() { + importer.Register("twitter", &imp{}) +} + +var _ importer.ImporterSetupHTMLer = (*imp)(nil) + +type imp struct { + importer.OAuth1 // for CallbackRequestAccount and CallbackURLParameters +} + +func (im *imp) NeedsAPIKey() bool { return true } +func (im *imp) SupportsIncremental() bool { return true } + +func (im *imp) IsAccountReady(acctNode *importer.Object) (ok bool, err error) { + if acctNode.Attr(importer.AcctAttrUserID) != "" && acctNode.Attr(importer.AcctAttrAccessToken) != "" { + return true, nil + } + return false, nil +} + +func (im *imp) SummarizeAccount(acct *importer.Object) string { + ok, err := im.IsAccountReady(acct) + if err != nil { + return "Not configured; error = " + err.Error() + } + if !ok { + return "Not configured" + } + s := fmt.Sprintf("@%s (%s), twitter id %s", + acct.Attr(importer.AcctAttrUserName), + acct.Attr(importer.AcctAttrName), + acct.Attr(importer.AcctAttrUserID), + ) + if acct.Attr(acctAttrTweetZip) != "" { + s += " + zip file" + } + return s +} + +func (im *imp) AccountSetupHTML(host *importer.Host) string { + base := host.ImporterBaseURL() + "twitter" + return fmt.Sprintf(` +

    Configuring Twitter

    +

    Visit https://apps.twitter.com/ and click "Create New App".

    +

    Use the following settings:

    +
      +
    • Name: Does not matter. (camlistore-importer).
    • +
    • Description: Does not matter. (imports twitter data into camlistore).
    • +
    • Website: %s
    • +
    • Callback URL: %s
    • +
    +

    Click "Create your Twitter application".You should be redirected to the Application Management page of your newly created application. +
    Go to the API Keys tab. Copy the "API key" and "API secret" into the "Client ID" and "Client Secret" boxes above.

    +`, base, base+"/callback") +} + +// A run is our state for a given run of the importer. +type run struct { + *importer.RunContext + im *imp + incremental bool // whether we've completed a run in the past + + oauthClient *oauth.Client // No need to guard, used read-only. + accessCreds *oauth.Credentials // No need to guard, used read-only. + + mu sync.Mutex // guards anyErr + anyErr bool +} + +var forceFullImport, _ = strconv.ParseBool(os.Getenv("CAMLI_TWITTER_FULL_IMPORT")) + +func (im *imp) Run(ctx *importer.RunContext) error { + clientId, secret, err := ctx.Credentials() + if err != nil { + return fmt.Errorf("no API credentials: %v", err) + } + acctNode := ctx.AccountNode() + accessToken := acctNode.Attr(importer.AcctAttrAccessToken) + accessSecret := acctNode.Attr(importer.AcctAttrAccessTokenSecret) + if accessToken == "" || accessSecret == "" { + return errors.New("access credentials not found") + } + r := &run{ + RunContext: ctx, + im: im, + incremental: !forceFullImport && acctNode.Attr(importer.AcctAttrCompletedVersion) == runCompleteVersion, + + oauthClient: &oauth.Client{ + TemporaryCredentialRequestURI: temporaryCredentialRequestURL, + ResourceOwnerAuthorizationURI: resourceOwnerAuthorizationURL, + TokenRequestURI: tokenRequestURL, + Credentials: oauth.Credentials{ + Token: clientId, + Secret: secret, + }, + }, + accessCreds: &oauth.Credentials{ + Token: accessToken, + Secret: accessSecret, + }, + } + + userID := acctNode.Attr(importer.AcctAttrUserID) + if userID == "" { + return errors.New("UserID hasn't been set by account setup.") + } + + skipAPITweets, _ := strconv.ParseBool(os.Getenv("CAMLI_TWITTER_SKIP_API_IMPORT")) + if !skipAPITweets { + if err := r.importTweets(userID); err != nil { + return err + } + } + + zipRef := acctNode.Attr(acctAttrTweetZip) + zipDoneVal := zipRef + ":" + runCompleteVersion + if zipRef != "" && !(r.incremental && acctNode.Attr(acctAttrZipDoneVersion) == zipDoneVal) { + zipbr, ok := blob.Parse(zipRef) + if !ok { + return fmt.Errorf("invalid zip file blobref %q", zipRef) + } + fr, err := schema.NewFileReader(r.Host.BlobSource(), zipbr) + if err != nil { + return fmt.Errorf("error opening zip %v: %v", zipbr, err) + } + defer fr.Close() + zr, err := zip.NewReader(fr, fr.Size()) + if err != nil { + return fmt.Errorf("Error opening twitter zip file %v: %v", zipRef, err) + } + if err := r.importTweetsFromZip(userID, zr); err != nil { + return err + } + if err := acctNode.SetAttrs(acctAttrZipDoneVersion, zipDoneVal); err != nil { + return err + } + } + + r.mu.Lock() + anyErr := r.anyErr + r.mu.Unlock() + + if !anyErr { + if err := acctNode.SetAttrs(importer.AcctAttrCompletedVersion, runCompleteVersion); err != nil { + return err + } + } + + return nil +} + +func (r *run) errorf(format string, args ...interface{}) { + log.Printf(format, args...) + r.mu.Lock() + defer r.mu.Unlock() + r.anyErr = true +} + +func (r *run) doAPI(result interface{}, apiPath string, keyval ...string) error { + return importer.OAuthContext{ + r.Context, + r.oauthClient, + r.accessCreds}.PopulateJSONFromURL(result, apiURL+apiPath, keyval...) +} + +func (r *run) importTweets(userID string) error { + maxId := "" + continueRequests := true + + tweetsNode, err := r.getTopLevelNode("tweets") + if err != nil { + return err + } + + numTweets := 0 + sawTweet := map[string]bool{} + + // If attrs is changed, so should the expected responses accordingly for the + // RoundTripper of MakeTestData (testdata.go). + attrs := []string{ + "user_id", userID, + "count", strconv.Itoa(tweetRequestLimit), + } + for continueRequests { + if r.Context.IsCanceled() { + r.errorf("Twitter importer: interrupted") + return context.ErrCanceled + } + + var resp []*apiTweetItem + var err error + if maxId == "" { + log.Printf("Fetching tweets for userid %s", userID) + err = r.doAPI(&resp, userTimeLineAPIPath, attrs...) + } else { + log.Printf("Fetching tweets for userid %s with max ID %s", userID, maxId) + err = r.doAPI(&resp, userTimeLineAPIPath, + append(attrs, "max_id", maxId)...) + } + if err != nil { + return err + } + + var ( + newThisBatch = 0 + allDupMu sync.Mutex + allDups = true + gate = syncutil.NewGate(tweetsAtOnce) + grp syncutil.Group + ) + for i := range resp { + tweet := resp[i] + + // Dup-suppression. + if sawTweet[tweet.Id] { + continue + } + sawTweet[tweet.Id] = true + newThisBatch++ + maxId = tweet.Id + + gate.Start() + grp.Go(func() error { + defer gate.Done() + dup, err := r.importTweet(tweetsNode, tweet, true) + if !dup { + allDupMu.Lock() + allDups = false + allDupMu.Unlock() + } + if err != nil { + r.errorf("Twitter importer: error importing tweet %s %v", tweet.Id, err) + } + return err + }) + } + if err := grp.Err(); err != nil { + return err + } + numTweets += newThisBatch + log.Printf("Imported %d tweets this batch; %d total.", newThisBatch, numTweets) + if r.incremental && allDups { + log.Printf("twitter incremental import found end batch") + break + } + continueRequests = newThisBatch > 0 + } + log.Printf("Successfully did full run of importing %d tweets", numTweets) + return nil +} + +func tweetsFromZipFile(zf *zip.File) (tweets []*zipTweetItem, err error) { + rc, err := zf.Open() + if err != nil { + return nil, err + } + slurp, err := ioutil.ReadAll(rc) + rc.Close() + if err != nil { + return nil, err + } + i := bytes.IndexByte(slurp, '[') + if i < 0 { + return nil, errors.New("No '[' found in zip file") + } + slurp = slurp[i:] + if err := json.Unmarshal(slurp, &tweets); err != nil { + return nil, fmt.Errorf("JSON error: %v", err) + } + return +} + +func (r *run) importTweetsFromZip(userID string, zr *zip.Reader) error { + log.Printf("Processing zip file with %d files", len(zr.File)) + + tweetsNode, err := r.getTopLevelNode("tweets") + if err != nil { + return err + } + + var ( + gate = syncutil.NewGate(tweetsAtOnce) + grp syncutil.Group + ) + total := 0 + for _, zf := range zr.File { + if !(strings.HasPrefix(zf.Name, "data/js/tweets/2") && strings.HasSuffix(zf.Name, ".js")) { + continue + } + tweets, err := tweetsFromZipFile(zf) + if err != nil { + return fmt.Errorf("error reading tweets from %s: %v", zf.Name, err) + } + + for i := range tweets { + total++ + tweet := tweets[i] + gate.Start() + grp.Go(func() error { + defer gate.Done() + _, err := r.importTweet(tweetsNode, tweet, false) + return err + }) + } + } + err = grp.Err() + log.Printf("zip import of tweets: %d total, err = %v", total, err) + return err +} + +func timeParseFirstFormat(timeStr string, format ...string) (t time.Time, err error) { + if len(format) == 0 { + panic("need more than 1 format") + } + for _, f := range format { + t, err = time.Parse(f, timeStr) + if err == nil { + break + } + } + return +} + +// viaAPI is true if it came via the REST API, or false if it came via a zip file. +func (r *run) importTweet(parent *importer.Object, tweet tweetItem, viaAPI bool) (dup bool, err error) { + if r.Context.IsCanceled() { + r.errorf("Twitter importer: interrupted") + return false, context.ErrCanceled + } + id := tweet.ID() + tweetNode, err := parent.ChildPathObject(id) + if err != nil { + return false, err + } + + // Because the zip format and the API format differ a bit, and + // might diverge more in the future, never use the zip content + // to overwrite data fetched via the API. If we add new + // support for different fields in the future, we might want + // to revisit this decision. Be wary of flip/flopping data if + // modifying this, though. + if tweetNode.Attr(attrImportMethod) == "api" && !viaAPI { + return true, nil + } + + // e.g. "2014-06-12 19:11:51 +0000" + createdTime, err := timeParseFirstFormat(tweet.CreatedAt(), time.RubyDate, "2006-01-02 15:04:05 -0700") + if err != nil { + return false, fmt.Errorf("could not parse time %q: %v", tweet.CreatedAt(), err) + } + + url := fmt.Sprintf("https://twitter.com/%s/status/%v", + r.AccountNode().Attr(importer.AcctAttrUserName), + id) + + attrs := []string{ + "twitterId", id, + nodeattr.Type, "twitter.com:tweet", + nodeattr.StartDate, schema.RFC3339FromTime(createdTime), + nodeattr.Content, tweet.Text(), + nodeattr.URL, url, + } + if lat, long, ok := tweet.LatLong(); ok { + attrs = append(attrs, + nodeattr.Latitude, fmt.Sprint(lat), + nodeattr.Longitude, fmt.Sprint(long), + ) + } + if viaAPI { + attrs = append(attrs, attrImportMethod, "api") + } else { + attrs = append(attrs, attrImportMethod, "zip") + } + + for i, m := range tweet.Media() { + filename := m.BaseFilename() + if tweetNode.Attr("camliPath:"+filename) != "" && (i > 0 || tweetNode.Attr("camliContentImage") != "") { + // Don't re-import media we've already fetched. + continue + } + tried, gotMedia := 0, false + for _, mediaURL := range m.URLs() { + tried++ + res, err := r.HTTPClient().Get(mediaURL) + if err != nil { + return false, fmt.Errorf("Error fetching %s for tweet %s : %v", mediaURL, url, err) + } + if res.StatusCode == http.StatusNotFound { + continue + } + if res.StatusCode != 200 { + return false, fmt.Errorf("HTTP status %d fetching %s for tweet %s", res.StatusCode, mediaURL, url) + } + if !viaAPI { + log.Printf("For zip tweet %s, reading %v", url, mediaURL) + } + fileRef, err := schema.WriteFileFromReader(r.Host.Target(), filename, res.Body) + res.Body.Close() + if err != nil { + return false, fmt.Errorf("Error fetching media %s for tweet %s: %v", mediaURL, url, err) + } + attrs = append(attrs, "camliPath:"+filename, fileRef.String()) + if i == 0 { + attrs = append(attrs, "camliContentImage", fileRef.String()) + } + log.Printf("Slurped %s as %s for tweet %s (%v)", mediaURL, fileRef.String(), url, tweetNode.PermanodeRef()) + gotMedia = true + break + } + if !gotMedia && tried > 0 { + return false, fmt.Errorf("All media URLs 404s for tweet %s", url) + } + } + + changes, err := tweetNode.SetAttrs2(attrs...) + if err == nil && changes { + log.Printf("Imported tweet %s", url) + } + return !changes, err +} + +// The path be one of "tweets". +// In the future: "lists", "direct_messages", etc. +func (r *run) getTopLevelNode(path string) (*importer.Object, error) { + acctNode := r.AccountNode() + + root := r.RootNode() + rootTitle := fmt.Sprintf("%s's Twitter Data", acctNode.Attr(importer.AcctAttrUserName)) + log.Printf("root title = %q; want %q", root.Attr(nodeattr.Title), rootTitle) + if err := root.SetAttr(nodeattr.Title, rootTitle); err != nil { + return nil, err + } + + obj, err := root.ChildPathObject(path) + if err != nil { + return nil, err + } + var title string + switch path { + case "tweets": + title = fmt.Sprintf("%s's Tweets", acctNode.Attr(importer.AcctAttrUserName)) + } + return obj, obj.SetAttr(nodeattr.Title, title) +} + +type userInfo struct { + ID string `json:"id_str"` + ScreenName string `json:"screen_name"` + Name string `json:"name,omitempty"` +} + +func getUserInfo(ctx importer.OAuthContext) (userInfo, error) { + var ui userInfo + if err := ctx.PopulateJSONFromURL(&ui, apiURL+userInfoAPIPath); err != nil { + return ui, err + } + if ui.ID == "" { + return ui, fmt.Errorf("No userid returned") + } + return ui, nil +} + +func (im *imp) ServeSetup(w http.ResponseWriter, r *http.Request, ctx *importer.SetupContext) error { + oauthClient, err := ctx.NewOAuthClient(oAuthURIs) + if err != nil { + err = fmt.Errorf("error getting OAuth client: %v", err) + httputil.ServeError(w, r, err) + return err + } + tempCred, err := oauthClient.RequestTemporaryCredentials(ctx.HTTPClient(), ctx.CallbackURL(), nil) + if err != nil { + err = fmt.Errorf("Error getting temp cred: %v", err) + httputil.ServeError(w, r, err) + return err + } + if err := ctx.AccountNode.SetAttrs( + importer.AcctAttrTempToken, tempCred.Token, + importer.AcctAttrTempSecret, tempCred.Secret, + ); err != nil { + err = fmt.Errorf("Error saving temp creds: %v", err) + httputil.ServeError(w, r, err) + return err + } + + authURL := oauthClient.AuthorizationURL(tempCred, nil) + http.Redirect(w, r, authURL, 302) + return nil +} + +func (im *imp) ServeCallback(w http.ResponseWriter, r *http.Request, ctx *importer.SetupContext) { + tempToken := ctx.AccountNode.Attr(importer.AcctAttrTempToken) + tempSecret := ctx.AccountNode.Attr(importer.AcctAttrTempSecret) + if tempToken == "" || tempSecret == "" { + log.Printf("twitter: no temp creds in callback") + httputil.BadRequestError(w, "no temp creds in callback") + return + } + if tempToken != r.FormValue("oauth_token") { + log.Printf("unexpected oauth_token: got %v, want %v", r.FormValue("oauth_token"), tempToken) + httputil.BadRequestError(w, "unexpected oauth_token") + return + } + oauthClient, err := ctx.NewOAuthClient(oAuthURIs) + if err != nil { + err = fmt.Errorf("error getting OAuth client: %v", err) + httputil.ServeError(w, r, err) + return + } + tokenCred, vals, err := oauthClient.RequestToken( + ctx.Context.HTTPClient(), + &oauth.Credentials{ + Token: tempToken, + Secret: tempSecret, + }, + r.FormValue("oauth_verifier"), + ) + if err != nil { + httputil.ServeError(w, r, fmt.Errorf("Error getting request token: %v ", err)) + return + } + userid := vals.Get("user_id") + if userid == "" { + httputil.ServeError(w, r, fmt.Errorf("Couldn't get user id: %v", err)) + return + } + if err := ctx.AccountNode.SetAttrs( + importer.AcctAttrAccessToken, tokenCred.Token, + importer.AcctAttrAccessTokenSecret, tokenCred.Secret, + ); err != nil { + httputil.ServeError(w, r, fmt.Errorf("Error setting token attributes: %v", err)) + return + } + + u, err := getUserInfo(importer.OAuthContext{ctx.Context, oauthClient, tokenCred}) + if err != nil { + httputil.ServeError(w, r, fmt.Errorf("Couldn't get user info: %v", err)) + return + } + if err := ctx.AccountNode.SetAttrs( + importer.AcctAttrUserID, u.ID, + importer.AcctAttrName, u.Name, + importer.AcctAttrUserName, u.ScreenName, + nodeattr.Title, fmt.Sprintf("%s's Twitter Account", u.ScreenName), + ); err != nil { + httputil.ServeError(w, r, fmt.Errorf("Error setting attribute: %v", err)) + return + } + http.Redirect(w, r, ctx.AccountURL(), http.StatusFound) +} + +type tweetItem interface { + ID() string + LatLong() (lat, long float64, ok bool) + CreatedAt() string + Text() string + Media() []tweetMedia +} + +type tweetMedia interface { + URLs() []string // use first non-404 one + BaseFilename() string +} + +type apiTweetItem struct { + Id string `json:"id_str"` + TextStr string `json:"text"` + CreatedAtStr string `json:"created_at"` + Entities entities `json:"entities"` + + // One or both might be present: + Geo *geo `json:"geo"` // lat, long + Coordinates *coords `json:"coordinates"` // geojson: long, lat +} + +// zipTweetItem is like apiTweetItem, but twitter is annoying and the schema for the JSON inside zip files is slightly different. +type zipTweetItem struct { + Id string `json:"id_str"` + TextStr string `json:"text"` + CreatedAtStr string `json:"created_at"` + + // One or both might be present: + Geo *geo `json:"geo"` // lat, long + Coordinates *coords `json:"coordinates"` // geojson: long, lat + Entities zipEntities `json:"entities"` +} + +func (t *apiTweetItem) ID() string { + if t.Id == "" { + panic("empty id") + } + return t.Id +} + +func (t *zipTweetItem) ID() string { + if t.Id == "" { + panic("empty id") + } + return t.Id +} + +func (t *apiTweetItem) CreatedAt() string { return t.CreatedAtStr } +func (t *zipTweetItem) CreatedAt() string { return t.CreatedAtStr } + +func (t *apiTweetItem) Text() string { return t.TextStr } +func (t *zipTweetItem) Text() string { return t.TextStr } + +func (t *apiTweetItem) LatLong() (lat, long float64, ok bool) { + return latLong(t.Geo, t.Coordinates) +} + +func (t *zipTweetItem) LatLong() (lat, long float64, ok bool) { + return latLong(t.Geo, t.Coordinates) +} + +func latLong(g *geo, c *coords) (lat, long float64, ok bool) { + if g != nil && len(g.Coordinates) == 2 { + co := g.Coordinates + if co[0] != 0 && co[1] != 0 { + return co[0], co[1], true + } + } + if c != nil && len(c.Coordinates) == 2 { + co := c.Coordinates + if co[0] != 0 && co[1] != 0 { + return co[1], co[0], true + } + } + return +} + +func (t *zipTweetItem) Media() (ret []tweetMedia) { + for _, m := range t.Entities.Media { + ret = append(ret, m) + } + ret = append(ret, getImagesFromURLs(t.Entities.URLs)...) + return +} + +func (t *apiTweetItem) Media() (ret []tweetMedia) { + for _, m := range t.Entities.Media { + ret = append(ret, m) + } + ret = append(ret, getImagesFromURLs(t.Entities.URLs)...) + return +} + +type geo struct { + Coordinates []float64 `json:"coordinates"` // lat,long +} + +type coords struct { + Coordinates []float64 `json:"coordinates"` // long,lat +} + +type entities struct { + Media []*media `json:"media"` + URLs []*urlEntity `json:"urls"` +} + +type zipEntities struct { + Media []*zipMedia `json:"media"` + URLs []*urlEntity `json:"urls"` +} + +// e.g. { +// "indices" : [ 105, 125 ], +// "url" : "http:\/\/t.co\/gbGO8Qep", +// "expanded_url" : "http:\/\/twitpic.com\/6mdqac", +// "display_url" : "twitpic.com\/6mdqac" +// } +type urlEntity struct { + URL string `json:"url"` + ExpandedURL string `json:"expanded_url"` + DisplayURL string `json:"display_url"` +} + +var ( + twitpicRx = regexp.MustCompile(`\btwitpic\.com/(\w\w\w+)`) + imgurRx = regexp.MustCompile(`\bimgur\.com/(\w\w\w+)`) +) + +func getImagesFromURLs(urls []*urlEntity) (ret []tweetMedia) { + // TODO: extract these regexps from tweet text too. Happens in + // a few cases I've seen in my history. + for _, u := range urls { + if strings.HasPrefix(u.DisplayURL, "twitpic.com") { + ret = append(ret, twitpicImage(strings.TrimPrefix(u.DisplayURL, "twitpic.com/"))) + continue + } + if m := imgurRx.FindStringSubmatch(u.DisplayURL); m != nil { + ret = append(ret, imgurImage(m[1])) + continue + } + } + return +} + +// The Media entity from the Rest API. See also: zipMedia. +type media struct { + Id string `json:"id_str"` + IdNum int64 `json:"id"` + MediaURL string `json:"media_url"` + MediaURLHTTPS string `json:"media_url_https"` + Sizes map[string]mediaSize `json:"sizes"` + Type_ string `json:"type"` +} + +// The Media entity from the zip file JSON. Similar but different to +// media. Thanks, Twitter. +type zipMedia struct { + Id string `json:"id_str"` + IdNum int64 `json:"id"` + MediaURL string `json:"media_url"` + MediaURLHTTPS string `json:"media_url_https"` + Sizes []mediaSize `json:"sizes"` // without a key! useless. +} + +func (m *media) URLs() []string { + u := m.baseURL() + if u == "" { + return nil + } + return []string{u + m.largestMediaSuffix(), u} +} + +func (m *zipMedia) URLs() []string { + // We don't get any suffix names, so just try some common + // ones. The first non-404 will be used: + u := m.baseURL() + if u == "" { + return nil + } + return []string{ + u + ":large", + u, + } +} + +func (m *media) baseURL() string { + if v := m.MediaURLHTTPS; v != "" { + return v + } + return m.MediaURL +} + +func (m *zipMedia) baseURL() string { + if v := m.MediaURLHTTPS; v != "" { + return v + } + return m.MediaURL +} + +func (m *media) BaseFilename() string { + return path.Base(m.baseURL()) +} + +func (m *zipMedia) BaseFilename() string { + return path.Base(m.baseURL()) +} + +func (m *media) largestMediaSuffix() string { + bestPixels := 0 + bestSuffix := "" + for k, sz := range m.Sizes { + if px := sz.W * sz.H; px > bestPixels { + bestPixels = px + bestSuffix = ":" + k + } + } + return bestSuffix +} + +type mediaSize struct { + W int `json:"w"` + H int `json:"h"` + Resize string `json:"resize"` +} + +// An image from twitpic. +type twitpicImage string + +func (im twitpicImage) BaseFilename() string { return string(im) } + +func (im twitpicImage) URLs() []string { + return []string{"https://twitpic.com/show/large/" + string(im)} +} + +// An image from imgur +type imgurImage string + +func (im imgurImage) BaseFilename() string { return string(im) } + +func (im imgurImage) URLs() []string { + // Imgur ignores the suffix if it's .gif, .png, or .jpg. So just pick .gif. + // The actual content will be returned. + return []string{"https://i.imgur.com/" + string(im) + ".gif"} +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/importer/twitter/twitter_test.go b/vendor/github.com/camlistore/camlistore/pkg/importer/twitter/twitter_test.go new file mode 100644 index 00000000..e17a1069 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/importer/twitter/twitter_test.go @@ -0,0 +1,50 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package twitter + +import ( + "net/http" + "path/filepath" + "testing" + + "camlistore.org/pkg/context" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/importer" + + "camlistore.org/third_party/github.com/garyburd/go-oauth/oauth" +) + +func TestGetUserID(t *testing.T) { + ctx := context.New(context.WithHTTPClient(&http.Client{ + Transport: httputil.NewFakeTransport(map[string]func() *http.Response{ + apiURL + userInfoAPIPath: httputil.FileResponder(filepath.FromSlash("testdata/verify_credentials-res.json")), + }), + })) + defer ctx.Cancel() + inf, err := getUserInfo(importer.OAuthContext{ctx, &oauth.Client{}, &oauth.Credentials{}}) + if err != nil { + t.Fatal(err) + } + want := userInfo{ + ID: "2325935334", + ScreenName: "lejatorn", + Name: "Mathieu Lonjaret", + } + if inf != want { + t.Errorf("user info = %+v; want %+v", inf, want) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/corpus.go b/vendor/github.com/camlistore/camlistore/pkg/index/corpus.go new file mode 100644 index 00000000..51acafa2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/corpus.go @@ -0,0 +1,1262 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index + +import ( + "bytes" + "errors" + "fmt" + "log" + "os" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/context" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/schema/nodeattr" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/strutil" + "camlistore.org/pkg/syncutil" + "camlistore.org/pkg/types/camtypes" +) + +// Corpus is an in-memory summary of all of a user's blobs' metadata. +type Corpus struct { + mu sync.RWMutex + //mu syncutil.RWMutexTracker // when debugging + + // building is true at start while scanning all rows in the + // index. While building, certain invariants (like things + // being sorted) can be temporarily violated and fixed at the + // end of scan. + building bool + + // gen is incremented on every blob received. + // It's used as a query cache invalidator. + gen int64 + + strs map[string]string // interned strings + brOfStr map[string]blob.Ref // blob.Parse fast path + brInterns int64 // blob.Ref -> blob.Ref, via br method + + blobs map[blob.Ref]*camtypes.BlobMeta + sumBlobBytes int64 + + // camlBlobs maps from camliType ("file") to blobref to the meta. + // The value is the same one in blobs. + camBlobs map[string]map[blob.Ref]*camtypes.BlobMeta + + // TODO: add GoLLRB to third_party; keep sorted BlobMeta + keyId map[blob.Ref]string + files map[blob.Ref]camtypes.FileInfo + permanodes map[blob.Ref]*PermanodeMeta + imageInfo map[blob.Ref]camtypes.ImageInfo // keyed by fileref (not wholeref) + fileWholeRef map[blob.Ref]blob.Ref // fileref -> its wholeref (TODO: multi-valued?) + gps map[blob.Ref]latLong // wholeRef -> GPS coordinates + + // edge tracks "forward" edges. e.g. from a directory's static-set to + // its members. Permanodes' camliMembers aren't tracked, since they + // can be obtained from permanodes.Claims. + // TODO: implement + edge map[blob.Ref][]edge + + // edgeBack tracks "backward" edges. e.g. from a file back to + // any directories it's part of. + // The map is from target (e.g. file) => owner (static-set). + // This only tracks static data structures, not permanodes. + // TODO: implement + edgeBack map[blob.Ref]map[blob.Ref]bool + + // claimBack allows hopping backwards from a Claim's Value + // when the Value is a blobref. It allows, for example, + // finding the parents of camliMember claims. If a permanode + // parent set A has a camliMembers B and C, it allows finding + // A from either B and C. + // The slice is not sorted. + claimBack map[blob.Ref][]*camtypes.Claim + + // TOOD: use deletedCache instead? + deletedBy map[blob.Ref]blob.Ref // key is deleted by value + // deletes tracks deletions of claims and permanodes. The key is + // the blobref of a claim or permanode. The values, sorted newest first, + // contain the blobref of the claim responsible for the deletion, as well + // as the date when that deletion happened. + deletes map[blob.Ref][]deletion + + mediaTags map[blob.Ref]map[string]string // wholeref -> "album" -> "foo" + + permanodesByTime *lazySortedPermanodes // cache of permanodes sorted by creation time. + permanodesByModtime *lazySortedPermanodes // cache of permanodes sorted by modtime. + + // scratch string slice + ss []string +} + +type latLong struct { + lat, long float64 +} + +// RLock locks the Corpus for reads. It must be used for any "Locked" methods. +func (c *Corpus) RLock() { c.mu.RLock() } + +// RUnlock unlocks the Corpus for reads. +func (c *Corpus) RUnlock() { c.mu.RUnlock() } + +// IsDeleted reports whether the provided blobref (of a permanode or claim) should be considered deleted. +func (c *Corpus) IsDeleted(br blob.Ref) bool { + c.RLock() + defer c.RUnlock() + return c.IsDeletedLocked(br) +} + +// IsDeletedLocked is the version of IsDeleted that assumes the Corpus is already locked with RLock. +func (c *Corpus) IsDeletedLocked(br blob.Ref) bool { + for _, v := range c.deletes[br] { + if !c.IsDeletedLocked(v.deleter) { + return true + } + } + return false +} + +type edge struct { + edgeType string + peer blob.Ref +} + +type PermanodeMeta struct { + // TODO: OwnerKeyId string + Claims []*camtypes.Claim // sorted by camtypes.ClaimsByDate +} + +func newCorpus() *Corpus { + c := &Corpus{ + blobs: make(map[blob.Ref]*camtypes.BlobMeta), + camBlobs: make(map[string]map[blob.Ref]*camtypes.BlobMeta), + files: make(map[blob.Ref]camtypes.FileInfo), + permanodes: make(map[blob.Ref]*PermanodeMeta), + imageInfo: make(map[blob.Ref]camtypes.ImageInfo), + deletedBy: make(map[blob.Ref]blob.Ref), + keyId: make(map[blob.Ref]string), + brOfStr: make(map[string]blob.Ref), + fileWholeRef: make(map[blob.Ref]blob.Ref), + gps: make(map[blob.Ref]latLong), + mediaTags: make(map[blob.Ref]map[string]string), + deletes: make(map[blob.Ref][]deletion), + claimBack: make(map[blob.Ref][]*camtypes.Claim), + } + c.permanodesByModtime = &lazySortedPermanodes{ + c: c, + pnTime: c.PermanodeModtimeLocked, + } + c.permanodesByTime = &lazySortedPermanodes{ + c: c, + pnTime: c.PermanodeAnyTimeLocked, + } + return c +} + +func NewCorpusFromStorage(s sorted.KeyValue) (*Corpus, error) { + if s == nil { + return nil, errors.New("storage is nil") + } + c := newCorpus() + return c, c.scanFromStorage(s) +} + +func (x *Index) KeepInMemory() (*Corpus, error) { + var err error + x.corpus, err = NewCorpusFromStorage(x.s) + return x.corpus, err +} + +// PreventStorageAccessForTesting causes any access to the index's underlying +// Storage interface to panic. +func (x *Index) PreventStorageAccessForTesting() { + x.s = crashStorage{} +} + +type crashStorage struct { + sorted.KeyValue +} + +func (crashStorage) Get(key string) (string, error) { + panic(fmt.Sprintf("unexpected KeyValue.Get(%q) called", key)) +} + +func (crashStorage) Find(start, end string) sorted.Iterator { + panic(fmt.Sprintf("unexpected KeyValue.Find(%q, %q) called", start, end)) +} + +// *********** Updating the corpus + +var corpusMergeFunc = map[string]func(c *Corpus, k, v []byte) error{ + "have": nil, // redundant with "meta" + "recpn": nil, // unneeded. + "meta": (*Corpus).mergeMetaRow, + "signerkeyid": (*Corpus).mergeSignerKeyIdRow, + "claim": (*Corpus).mergeClaimRow, + "fileinfo": (*Corpus).mergeFileInfoRow, + "filetimes": (*Corpus).mergeFileTimesRow, + "imagesize": (*Corpus).mergeImageSizeRow, + "wholetofile": (*Corpus).mergeWholeToFileRow, + "exifgps": (*Corpus).mergeEXIFGPSRow, + "exiftag": nil, // not using any for now + "signerattrvalue": nil, // ignoring for now + "mediatag": (*Corpus).mergeMediaTag, +} + +func memstats() *runtime.MemStats { + ms := new(runtime.MemStats) + runtime.GC() + runtime.ReadMemStats(ms) + return ms +} + +var logCorpusStats = true // set to false in tests + +var slurpPrefixes = []string{ + "meta:", // must be first + "signerkeyid:", + "claim|", + "fileinfo|", + "filetimes|", + "imagesize|", + "wholetofile|", + "exifgps|", + "mediatag|", +} + +// Key types (without trailing punctuation) that we slurp to memory at start. +var slurpedKeyType = make(map[string]bool) + +func init() { + for _, prefix := range slurpPrefixes { + slurpedKeyType[typeOfKey(prefix)] = true + } +} + +func (c *Corpus) scanFromStorage(s sorted.KeyValue) error { + c.building = true + + var ms0 *runtime.MemStats + if logCorpusStats { + ms0 = memstats() + log.Printf("Slurping corpus to memory from index...") + log.Printf("Slurping corpus to memory from index... (1/%d: meta rows)", len(slurpPrefixes)) + } + + // We do the "meta" rows first, before the prefixes below, because it + // populates the blobs map (used for blobref interning) and the camBlobs + // map (used for hinting the size of other maps) + if err := c.scanPrefix(s, "meta:"); err != nil { + return err + } + c.files = make(map[blob.Ref]camtypes.FileInfo, len(c.camBlobs["file"])) + c.permanodes = make(map[blob.Ref]*PermanodeMeta, len(c.camBlobs["permanode"])) + cpu0 := osutil.CPUUsage() + + var grp syncutil.Group + for i, prefix := range slurpPrefixes[1:] { + if logCorpusStats { + log.Printf("Slurping corpus to memory from index... (%d/%d: prefix %q)", i+2, len(slurpPrefixes), + prefix[:len(prefix)-1]) + } + prefix := prefix + grp.Go(func() error { return c.scanPrefix(s, prefix) }) + } + if err := grp.Err(); err != nil { + return err + } + + // Post-load optimizations and restoration of invariants. + for _, pm := range c.permanodes { + // Restore invariants violated during building: + sort.Sort(camtypes.ClaimPtrsByDate(pm.Claims)) + + // And intern some stuff. + for _, cl := range pm.Claims { + cl.BlobRef = c.br(cl.BlobRef) + cl.Signer = c.br(cl.Signer) + cl.Permanode = c.br(cl.Permanode) + cl.Target = c.br(cl.Target) + } + + } + c.brOfStr = nil // drop this now. + c.building = false + // log.V(1).Printf("interned blob.Ref = %d", c.brInterns) + + if err := c.initDeletes(s); err != nil { + return fmt.Errorf("Could not populate the corpus deletes: %v", err) + } + + if logCorpusStats { + cpu := osutil.CPUUsage() - cpu0 + ms1 := memstats() + memUsed := ms1.Alloc - ms0.Alloc + if ms1.Alloc < ms0.Alloc { + memUsed = 0 + } + log.Printf("Corpus stats: %.3f MiB mem: %d blobs (%.3f GiB) (%d schema (%d permanode, %d file (%d image), ...)", + float64(memUsed)/(1<<20), + len(c.blobs), + float64(c.sumBlobBytes)/(1<<30), + c.numSchemaBlobsLocked(), + len(c.permanodes), + len(c.files), + len(c.imageInfo)) + log.Printf("Corpus scanning CPU usage: %v", cpu) + } + + return nil +} + +// initDeletes populates the corpus deletes from the delete entries in s. +func (c *Corpus) initDeletes(s sorted.KeyValue) (err error) { + it := queryPrefix(s, keyDeleted) + defer closeIterator(it, &err) + for it.Next() { + cl, ok := kvDeleted(it.Key()) + if !ok { + return fmt.Errorf("Bogus keyDeleted entry key: want |\"deleted\"||||, got %q", it.Key()) + } + targetDeletions := append(c.deletes[cl.Target], + deletion{ + deleter: cl.BlobRef, + when: cl.Date, + }) + sort.Sort(sort.Reverse(byDeletionDate(targetDeletions))) + c.deletes[cl.Target] = targetDeletions + } + return err +} + +func (c *Corpus) numSchemaBlobsLocked() (n int64) { + for _, m := range c.camBlobs { + n += int64(len(m)) + } + return +} + +func (c *Corpus) scanPrefix(s sorted.KeyValue, prefix string) (err error) { + typeKey := typeOfKey(prefix) + fn, ok := corpusMergeFunc[typeKey] + if !ok { + panic("No registered merge func for prefix " + prefix) + } + + n, t0 := 0, time.Now() + it := queryPrefixString(s, prefix) + defer closeIterator(it, &err) + for it.Next() { + n++ + if n == 1 { + // Let the query be sent off and responses start flowing in before + // we take the lock. And if no rows: no lock. + c.mu.Lock() + defer c.mu.Unlock() + } + if err := fn(c, it.KeyBytes(), it.ValueBytes()); err != nil { + return err + } + } + if logCorpusStats { + d := time.Since(t0) + log.Printf("Scanned prefix %q: %d rows, %v", prefix[:len(prefix)-1], n, d) + } + return nil +} + +func (c *Corpus) addBlob(br blob.Ref, mm *mutationMap) error { + c.mu.Lock() + defer c.mu.Unlock() + if _, dup := c.blobs[br]; dup { + return nil + } + c.gen++ + for k, v := range mm.kv { + kt := typeOfKey(k) + if !slurpedKeyType[kt] { + continue + } + if err := corpusMergeFunc[kt](c, []byte(k), []byte(v)); err != nil { + return err + } + } + for _, cl := range mm.deletes { + if err := c.updateDeletes(cl); err != nil { + return fmt.Errorf("Could not update the deletes cache after deletion from %v: %v", cl, err) + } + } + return nil +} + +// updateDeletes updates the corpus deletes with the delete claim deleteClaim. +// deleteClaim is trusted to be a valid delete Claim. +func (c *Corpus) updateDeletes(deleteClaim schema.Claim) error { + target := c.br(deleteClaim.Target()) + deleter := deleteClaim.Blob() + when, err := deleter.ClaimDate() + if err != nil { + return fmt.Errorf("Could not get date of delete claim %v: %v", deleteClaim, err) + } + del := deletion{ + deleter: c.br(deleter.BlobRef()), + when: when, + } + for _, v := range c.deletes[target] { + if v == del { + return nil + } + } + targetDeletions := append(c.deletes[target], del) + sort.Sort(sort.Reverse(byDeletionDate(targetDeletions))) + c.deletes[target] = targetDeletions + return nil +} + +func (c *Corpus) mergeMetaRow(k, v []byte) error { + bm, ok := kvBlobMeta_bytes(k, v) + if !ok { + return fmt.Errorf("bogus meta row: %q -> %q", k, v) + } + return c.mergeBlobMeta(bm) +} + +func (c *Corpus) mergeBlobMeta(bm camtypes.BlobMeta) error { + if _, dup := c.blobs[bm.Ref]; dup { + panic("dup blob seen") + } + bm.CamliType = c.str(bm.CamliType) + + c.blobs[bm.Ref] = &bm + c.sumBlobBytes += int64(bm.Size) + if bm.CamliType != "" { + m, ok := c.camBlobs[bm.CamliType] + if !ok { + m = make(map[blob.Ref]*camtypes.BlobMeta) + c.camBlobs[bm.CamliType] = m + } + m[bm.Ref] = &bm + } + return nil +} + +func (c *Corpus) mergeSignerKeyIdRow(k, v []byte) error { + br, ok := blob.ParseBytes(k[len("signerkeyid:"):]) + if !ok { + return fmt.Errorf("bogus signerid row: %q -> %q", k, v) + } + c.keyId[br] = string(v) + return nil +} + +func (c *Corpus) mergeClaimRow(k, v []byte) error { + // TODO: update kvClaim to take []byte instead of string + cl, ok := kvClaim(string(k), string(v), c.blobParse) + if !ok || !cl.Permanode.Valid() { + return fmt.Errorf("bogus claim row: %q -> %q", k, v) + } + cl.Type = c.str(cl.Type) + cl.Attr = c.str(cl.Attr) + cl.Value = c.str(cl.Value) // less likely to intern, but some (tags) do + + pn := c.br(cl.Permanode) + pm, ok := c.permanodes[pn] + if !ok { + pm = new(PermanodeMeta) + c.permanodes[pn] = pm + } + pm.Claims = append(pm.Claims, &cl) + if !c.building { + // Unless we're still starting up (at which we sort at + // the end instead), keep this sorted. + sort.Sort(camtypes.ClaimPtrsByDate(pm.Claims)) + } + + if vbr, ok := blob.Parse(cl.Value); ok { + c.claimBack[vbr] = append(c.claimBack[vbr], &cl) + } + return nil +} + +func (c *Corpus) mergeFileInfoRow(k, v []byte) error { + // fileinfo|sha1-579f7f246bd420d486ddeb0dadbb256cfaf8bf6b" "5|some-stuff.txt|" + pipe := bytes.IndexByte(k, '|') + if pipe < 0 { + return fmt.Errorf("unexpected fileinfo key %q", k) + } + br, ok := blob.ParseBytes(k[pipe+1:]) + if !ok { + return fmt.Errorf("unexpected fileinfo blobref in key %q", k) + } + + // TODO: could at least use strutil.ParseUintBytes to not stringify and retain + // the length bytes of v. + c.ss = strutil.AppendSplitN(c.ss[:0], string(v), "|", 4) + if len(c.ss) != 3 && len(c.ss) != 4 { + return fmt.Errorf("unexpected fileinfo value %q", v) + } + size, err := strconv.ParseInt(c.ss[0], 10, 64) + if err != nil { + return fmt.Errorf("unexpected fileinfo value %q", v) + } + var wholeRef blob.Ref + if len(c.ss) == 4 && c.ss[3] != "" { // checking for "" because of special files such as symlinks. + var ok bool + wholeRef, ok = blob.Parse(urld(c.ss[3])) + if !ok { + return fmt.Errorf("invalid wholeRef blobref in value %q for fileinfo key %q", v, k) + } + } + c.mutateFileInfo(br, func(fi *camtypes.FileInfo) { + fi.Size = size + fi.FileName = c.str(urld(c.ss[1])) + fi.MIMEType = c.str(urld(c.ss[2])) + fi.WholeRef = wholeRef + }) + return nil +} + +func (c *Corpus) mergeFileTimesRow(k, v []byte) error { + if len(v) == 0 { + return nil + } + // "filetimes|sha1-579f7f246bd420d486ddeb0dadbb256cfaf8bf6b" "1970-01-01T00%3A02%3A03Z" + pipe := bytes.IndexByte(k, '|') + if pipe < 0 { + return fmt.Errorf("unexpected fileinfo key %q", k) + } + br, ok := blob.ParseBytes(k[pipe+1:]) + if !ok { + return fmt.Errorf("unexpected filetimes blobref in key %q", k) + } + c.ss = strutil.AppendSplitN(c.ss[:0], urld(string(v)), ",", -1) + times := c.ss + c.mutateFileInfo(br, func(fi *camtypes.FileInfo) { + updateFileInfoTimes(fi, times) + }) + return nil +} + +func (c *Corpus) mutateFileInfo(br blob.Ref, fn func(*camtypes.FileInfo)) { + br = c.br(br) + fi := c.files[br] // use zero value if not present + fn(&fi) + c.files[br] = fi +} + +func (c *Corpus) mergeImageSizeRow(k, v []byte) error { + br, okk := blob.ParseBytes(k[len("imagesize|"):]) + ii, okv := kvImageInfo(v) + if !okk || !okv { + return fmt.Errorf("bogus row %q = %q", k, v) + } + br = c.br(br) + c.imageInfo[br] = ii + return nil +} + +// "wholetofile|sha1-17b53c7c3e664d3613dfdce50ef1f2a09e8f04b5|sha1-fb88f3eab3acfcf3cfc8cd77ae4366f6f975d227" -> "1" +func (c *Corpus) mergeWholeToFileRow(k, v []byte) error { + pair := k[len("wholetofile|"):] + pipe := bytes.IndexByte(pair, '|') + if pipe < 0 { + return fmt.Errorf("bogus row %q = %q", k, v) + } + wholeRef, ok1 := blob.ParseBytes(pair[:pipe]) + fileRef, ok2 := blob.ParseBytes(pair[pipe+1:]) + if !ok1 || !ok2 { + return fmt.Errorf("bogus row %q = %q", k, v) + } + c.fileWholeRef[fileRef] = wholeRef + return nil +} + +// "mediatag|sha1-2b219be9d9691b4f8090e7ee2690098097f59566|album" = "Some+Album+Name" +func (c *Corpus) mergeMediaTag(k, v []byte) error { + f := strings.Split(string(k), "|") + if len(f) != 3 { + return fmt.Errorf("unexpected key %q", k) + } + wholeRef, ok := blob.Parse(f[1]) + if !ok { + return fmt.Errorf("failed to parse wholeref from key %q", k) + } + tm, ok := c.mediaTags[wholeRef] + if !ok { + tm = make(map[string]string) + c.mediaTags[wholeRef] = tm + } + tm[c.str(f[2])] = c.str(urld(string(v))) + return nil +} + +// "exifgps|sha1-17b53c7c3e664d3613dfdce50ef1f2a09e8f04b5" -> "-122.39897155555556|37.61952208333334" +func (c *Corpus) mergeEXIFGPSRow(k, v []byte) error { + wholeRef, ok := blob.ParseBytes(k[len("exifgps|"):]) + pipe := bytes.IndexByte(v, '|') + if pipe < 0 || !ok { + return fmt.Errorf("bogus row %q = %q", k, v) + } + lat, err := strconv.ParseFloat(string(v[:pipe]), 64) + long, err1 := strconv.ParseFloat(string(v[pipe+1:]), 64) + if err != nil || err1 != nil { + return fmt.Errorf("bogus row %q = %q", k, v) + } + c.gps[wholeRef] = latLong{lat, long} + return nil +} + +// This enables the blob.Parse fast path cache, which reduces CPU (via +// reduced GC from new garbage), but increases memory usage, even +// though it shouldn't. The GC should fully discard the brOfStr map +// (which we nil out at the end of parsing), but the Go GC doesn't +// seem to clear it all. +// TODO: investigate / file bugs. +const useBlobParseCache = false + +func (c *Corpus) blobParse(v string) (br blob.Ref, ok bool) { + if useBlobParseCache { + br, ok = c.brOfStr[v] + if ok { + return + } + } + return blob.Parse(v) +} + +// str returns s, interned. +func (c *Corpus) str(s string) string { + if s == "" { + return "" + } + if s, ok := c.strs[s]; ok { + return s + } + if c.strs == nil { + c.strs = make(map[string]string) + } + c.strs[s] = s + return s +} + +// br returns br, interned. +func (c *Corpus) br(br blob.Ref) blob.Ref { + if bm, ok := c.blobs[br]; ok { + c.brInterns++ + return bm.Ref + } + return br +} + +// *********** Reading from the corpus + +// EnumerateCamliBlobsLocked sends just camlistore meta blobs to ch. +// +// The Corpus must already be locked with RLock. +// +// If camType is empty, all camlistore blobs are sent, otherwise it specifies +// the camliType to send. +// ch is closed at the end. The err will either be nil or context.ErrCanceled. +func (c *Corpus) EnumerateCamliBlobsLocked(ctx *context.Context, camType string, ch chan<- camtypes.BlobMeta) error { + defer close(ch) + for t, m := range c.camBlobs { + if camType != "" && camType != t { + continue + } + for _, bm := range m { + select { + case ch <- *bm: + case <-ctx.Done(): + return context.ErrCanceled + } + } + } + return nil +} + +// EnumerateBlobMetaLocked sends all known blobs to ch, or until the context is canceled. +// +// The Corpus must already be locked with RLock. +func (c *Corpus) EnumerateBlobMetaLocked(ctx *context.Context, ch chan<- camtypes.BlobMeta) error { + defer close(ch) + for _, bm := range c.blobs { + select { + case ch <- *bm: + case <-ctx.Done(): + return context.ErrCanceled + } + } + return nil +} + +// pnAndTime is a value type wrapping a permanode blobref and its modtime. +// It's used by EnumeratePermanodesLastModified and EnumeratePermanodesCreated. +type pnAndTime struct { + pn blob.Ref + t time.Time +} + +type byPermanodeTime []pnAndTime + +func (s byPermanodeTime) Len() int { return len(s) } +func (s byPermanodeTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byPermanodeTime) Less(i, j int) bool { + if s[i].t.Equal(s[j].t) { + return s[i].pn.Less(s[j].pn) + } + return s[i].t.Before(s[j].t) +} + +type lazySortedPermanodes struct { + c *Corpus + pnTime func(blob.Ref) (time.Time, bool) // returns permanode's time (if any) to sort on + + mu sync.Mutex // guards sortedCache and ofGen + sortedCache []pnAndTime // nil if invalidated + sortedCacheReversed []pnAndTime // nil if invalidated + ofGen int64 // the Corpus.gen from which sortedCache was built +} + +func reversedCopy(original []pnAndTime) []pnAndTime { + l := len(original) + reversed := make([]pnAndTime, l) + for k, v := range original { + reversed[l-1-k] = v + } + return reversed +} + +// The Corpus must already be locked with RLock. +func (lsp *lazySortedPermanodes) sorted(reverse bool) []pnAndTime { + lsp.mu.Lock() + defer lsp.mu.Unlock() + if lsp.ofGen == lsp.c.gen { + // corpus hasn't changed -> caches are still valid, if they exist. + if reverse { + if lsp.sortedCacheReversed != nil { + return lsp.sortedCacheReversed + } + if lsp.sortedCache != nil { + // using sortedCache to quickly build sortedCacheReversed + lsp.sortedCacheReversed = reversedCopy(lsp.sortedCache) + return lsp.sortedCacheReversed + } + } + if !reverse { + if lsp.sortedCache != nil { + return lsp.sortedCache + } + if lsp.sortedCacheReversed != nil { + // using sortedCacheReversed to quickly build sortedCache + lsp.sortedCache = reversedCopy(lsp.sortedCacheReversed) + return lsp.sortedCache + } + } + } + // invalidate the caches + lsp.sortedCache = nil + lsp.sortedCacheReversed = nil + pns := make([]pnAndTime, 0, len(lsp.c.permanodes)) + for pn := range lsp.c.permanodes { + if lsp.c.IsDeletedLocked(pn) { + continue + } + if pt, ok := lsp.pnTime(pn); ok { + pns = append(pns, pnAndTime{pn, pt}) + } + } + // and rebuild one of them + if reverse { + sort.Sort(sort.Reverse(byPermanodeTime(pns))) + lsp.sortedCacheReversed = pns + } else { + sort.Sort(byPermanodeTime(pns)) + lsp.sortedCache = pns + } + lsp.ofGen = lsp.c.gen + return pns +} + +// corpus must be (read) locked. +func (c *Corpus) sendPermanodes(ctx *context.Context, ch chan<- camtypes.BlobMeta, pns []pnAndTime) error { + for _, cand := range pns { + bm := c.blobs[cand.pn] + if bm == nil { + continue + } + select { + case ch <- *bm: + continue + case <-ctx.Done(): + return context.ErrCanceled + } + } + return nil +} + +// EnumeratePermanodesLastModified sends all permanodes, sorted by most recently modified first, to ch, +// or until ctx is done. +// +// The Corpus must already be locked with RLock. +func (c *Corpus) EnumeratePermanodesLastModifiedLocked(ctx *context.Context, ch chan<- camtypes.BlobMeta) error { + defer close(ch) + + return c.sendPermanodes(ctx, ch, c.permanodesByModtime.sorted(true)) +} + +// EnumeratePermanodesCreatedLocked sends all permanodes to ch, or until ctx is done. +// They are sorted using the contents creation date if any, the permanode modtime +// otherwise, and in the order specified by newestFirst. +// +// The Corpus must already be locked with RLock. +func (c *Corpus) EnumeratePermanodesCreatedLocked(ctx *context.Context, ch chan<- camtypes.BlobMeta, newestFirst bool) error { + defer close(ch) + + return c.sendPermanodes(ctx, ch, c.permanodesByTime.sorted(newestFirst)) +} + +func (c *Corpus) GetBlobMeta(br blob.Ref) (camtypes.BlobMeta, error) { + c.mu.RLock() + defer c.mu.RUnlock() + return c.GetBlobMetaLocked(br) +} + +func (c *Corpus) GetBlobMetaLocked(br blob.Ref) (camtypes.BlobMeta, error) { + bm, ok := c.blobs[br] + if !ok { + return camtypes.BlobMeta{}, os.ErrNotExist + } + return *bm, nil +} + +func (c *Corpus) KeyId(signer blob.Ref) (string, error) { + c.mu.RLock() + defer c.mu.RUnlock() + if v, ok := c.keyId[signer]; ok { + return v, nil + } + return "", sorted.ErrNotFound +} + +var ( + errUnsupportedNodeType = errors.New("unsupported nodeType") + errNoNodeAttr = errors.New("attribute not found") +) + +func (c *Corpus) pnTimeAttrLocked(pn blob.Ref, attr string) (t time.Time, ok bool) { + if v := c.PermanodeAttrValueLocked(pn, attr, time.Time{}, blob.Ref{}); v != "" { + if t, err := time.Parse(time.RFC3339, v); err == nil { + return t, true + } + } + return +} + +// PermanodeTimeLocked returns the time of the content in permanode. +func (c *Corpus) PermanodeTimeLocked(pn blob.Ref) (t time.Time, ok bool) { + // TODO(bradfitz): keep this time property cached on the permanode / files + // TODO(bradfitz): finish implmenting all these + + // Priorities: + // -- Permanode explicit "camliTime" property + // -- EXIF GPS time + // -- Exif camera time - this one is actually already in the FileInfo, + // because we use schema.FileTime (which returns the EXIF time, if available) + // to index the time when receiving a file. + // -- File time + // -- File modtime + // -- camliContent claim set time + + if t, ok = c.pnTimeAttrLocked(pn, nodeattr.StartDate); ok { + return + } + if t, ok = c.pnTimeAttrLocked(pn, nodeattr.DateCreated); ok { + return + } + var fi camtypes.FileInfo + ccRef, ccTime, ok := c.pnCamliContentLocked(pn) + if ok { + fi, _ = c.files[ccRef] + } + if fi.Time != nil { + return time.Time(*fi.Time), true + } + + if t, ok = c.pnTimeAttrLocked(pn, nodeattr.DatePublished); ok { + return + } + if t, ok = c.pnTimeAttrLocked(pn, nodeattr.DateModified); ok { + return + } + if fi.ModTime != nil { + return time.Time(*fi.ModTime), true + } + if ok { + return ccTime, true + } + return time.Time{}, false +} + +// PermanodeAnyTimeLocked returns the time that best qualifies the permanode. +// It tries content-specific times first, the permanode modtime otherwise. +func (c *Corpus) PermanodeAnyTimeLocked(pn blob.Ref) (t time.Time, ok bool) { + if t, ok := c.PermanodeTimeLocked(pn); ok { + return t, ok + } + return c.PermanodeModtimeLocked(pn) +} + +func (c *Corpus) pnCamliContentLocked(pn blob.Ref) (cc blob.Ref, t time.Time, ok bool) { + // TODO(bradfitz): keep this property cached + pm, ok := c.permanodes[pn] + if !ok { + return + } + for _, cl := range pm.Claims { + if cl.Attr != "camliContent" { + continue + } + // TODO: pass down the 'PermanodeConstraint.At' parameter, and then do: if cl.Date.After(at) { continue } + switch cl.Type { + case string(schema.DelAttributeClaim): + cc = blob.Ref{} + t = time.Time{} + case string(schema.SetAttributeClaim): + cc = blob.ParseOrZero(cl.Value) + t = cl.Date + } + } + return cc, t, cc.Valid() + +} + +// PermanodeModtime returns the latest modification time of the given +// permanode. +// +// The ok value is true only if the permanode is known and has any +// non-deleted claims. A deleted claim is ignored and neither its +// claim date nor the date of the delete claim affect the modtime of +// the permanode. +func (c *Corpus) PermanodeModtime(pn blob.Ref) (t time.Time, ok bool) { + // TODO: figure out behavior wrt mutations by different people + c.mu.RLock() + defer c.mu.RUnlock() + return c.PermanodeModtimeLocked(pn) +} + +// PermanodeModtimeLocked is like PermanodeModtime but for when the Corpus is +// already locked via RLock. +func (c *Corpus) PermanodeModtimeLocked(pn blob.Ref) (t time.Time, ok bool) { + pm, ok := c.permanodes[pn] + if !ok { + return + } + + // Note: We intentionally don't try to derive any information + // (except the owner, elsewhere) from the permanode blob + // itself. Even though the permanode blob sometimes has the + // GPG signature time, we intentionally ignore it. + for _, cl := range pm.Claims { + if c.IsDeletedLocked(cl.BlobRef) { + continue + } + if cl.Date.After(t) { + t = cl.Date + } + } + return t, !t.IsZero() +} + +// AppendPermanodeAttrValues appends to dst all the values for the attribute +// attr set on permaNode. +// signerFilter is optional. +// dst must start with length 0 (laziness, mostly) +func (c *Corpus) AppendPermanodeAttrValues(dst []string, + permaNode blob.Ref, + attr string, + at time.Time, + signerFilter blob.Ref) []string { + c.mu.RLock() + defer c.mu.RUnlock() + return c.AppendPermanodeAttrValuesLocked(dst, permaNode, attr, at, signerFilter) +} + +// PermanodeAttrValueLocked returns a single-valued attribute or "". +func (c *Corpus) PermanodeAttrValueLocked(permaNode blob.Ref, + attr string, + at time.Time, + signerFilter blob.Ref) string { + pm, ok := c.permanodes[permaNode] + if !ok { + return "" + } + if at.IsZero() { + at = time.Now() + } + var v string + for _, cl := range pm.Claims { + if cl.Attr != attr || cl.Date.After(at) { + continue + } + if signerFilter.Valid() && signerFilter != cl.Signer { + continue + } + switch cl.Type { + case string(schema.DelAttributeClaim): + if cl.Value == "" { + v = "" + } else if v == cl.Value { + v = "" + } + case string(schema.SetAttributeClaim): + v = cl.Value + case string(schema.AddAttributeClaim): + if v == "" { + v = cl.Value + } + } + } + return v +} + +func (c *Corpus) AppendPermanodeAttrValuesLocked(dst []string, + permaNode blob.Ref, + attr string, + at time.Time, + signerFilter blob.Ref) []string { + if len(dst) > 0 { + panic("len(dst) must be 0") + } + pm, ok := c.permanodes[permaNode] + if !ok { + return dst + } + if at.IsZero() { + at = time.Now() + } + for _, cl := range pm.Claims { + if cl.Attr != attr || cl.Date.After(at) { + continue + } + if signerFilter.Valid() && signerFilter != cl.Signer { + continue + } + switch cl.Type { + case string(schema.DelAttributeClaim): + if cl.Value == "" { + dst = dst[:0] // delete all + } else { + for i := 0; i < len(dst); i++ { + v := dst[i] + if v == cl.Value { + copy(dst[i:], dst[i+1:]) + dst = dst[:len(dst)-1] + i-- + } + } + } + case string(schema.SetAttributeClaim): + dst = append(dst[:0], cl.Value) + case string(schema.AddAttributeClaim): + dst = append(dst, cl.Value) + } + } + return dst +} + +func (c *Corpus) AppendClaims(dst []camtypes.Claim, permaNode blob.Ref, + signerFilter blob.Ref, + attrFilter string) ([]camtypes.Claim, error) { + c.mu.RLock() + defer c.mu.RUnlock() + pm, ok := c.permanodes[permaNode] + if !ok { + return nil, nil + } + for _, cl := range pm.Claims { + if c.IsDeletedLocked(cl.BlobRef) { + continue + } + if signerFilter.Valid() && cl.Signer != signerFilter { + continue + } + if attrFilter != "" && cl.Attr != attrFilter { + continue + } + dst = append(dst, *cl) + } + return dst, nil +} + +func (c *Corpus) GetFileInfo(fileRef blob.Ref) (fi camtypes.FileInfo, err error) { + c.mu.RLock() + defer c.mu.RUnlock() + return c.GetFileInfoLocked(fileRef) +} + +func (c *Corpus) GetFileInfoLocked(fileRef blob.Ref) (fi camtypes.FileInfo, err error) { + fi, ok := c.files[fileRef] + if !ok { + err = os.ErrNotExist + } + return +} + +func (c *Corpus) GetImageInfo(fileRef blob.Ref) (ii camtypes.ImageInfo, err error) { + c.mu.RLock() + defer c.mu.RUnlock() + return c.GetImageInfoLocked(fileRef) +} + +func (c *Corpus) GetImageInfoLocked(fileRef blob.Ref) (ii camtypes.ImageInfo, err error) { + ii, ok := c.imageInfo[fileRef] + if !ok { + err = os.ErrNotExist + } + return +} + +func (c *Corpus) GetMediaTags(fileRef blob.Ref) (map[string]string, error) { + c.mu.RLock() + defer c.mu.RUnlock() + return c.GetMediaTagsLocked(fileRef) +} + +func (c *Corpus) GetMediaTagsLocked(fileRef blob.Ref) (map[string]string, error) { + wholeRef, ok := c.fileWholeRef[fileRef] + if !ok { + return nil, os.ErrNotExist + } + tags, ok := c.mediaTags[wholeRef] + if !ok { + return nil, os.ErrNotExist + } + return tags, nil +} + +func (c *Corpus) GetWholeRefLocked(fileRef blob.Ref) (wholeRef blob.Ref, ok bool) { + wholeRef, ok = c.fileWholeRef[fileRef] + return +} + +func (c *Corpus) FileLatLongLocked(fileRef blob.Ref) (lat, long float64, ok bool) { + wholeRef, ok := c.fileWholeRef[fileRef] + if !ok { + return + } + ll, ok := c.gps[wholeRef] + if !ok { + return + } + return ll.lat, ll.long, true +} + +// zero value of at means current +func (c *Corpus) PermanodeLatLongLocked(pn blob.Ref, at time.Time) (lat, long float64, ok bool) { + nodeType := c.PermanodeAttrValueLocked(pn, "camliNodeType", at, blob.Ref{}) + if nodeType == "" { + return + } + // TODO: make these pluggable, e.g. registered from an importer or something? + // How will that work when they're out-of-process? + if nodeType == "foursquare.com:checkin" { + venuePn, hasVenue := blob.Parse(c.PermanodeAttrValueLocked(pn, "foursquareVenuePermanode", at, blob.Ref{})) + if !hasVenue { + return + } + return c.PermanodeLatLongLocked(venuePn, at) + } + if nodeType == "foursquare.com:venue" || nodeType == "twitter.com:tweet" { + var err error + lat, err = strconv.ParseFloat(c.PermanodeAttrValueLocked(pn, "latitude", at, blob.Ref{}), 64) + if err != nil { + return + } + long, err = strconv.ParseFloat(c.PermanodeAttrValueLocked(pn, "longitude", at, blob.Ref{}), 64) + if err != nil { + return + } + return lat, long, true + } + return +} + +// ForeachClaimBackLocked calls fn for each claim with a value referencing br. +// If at is zero, all claims are yielded. +// If at is non-zero, claims after that point are skipped. +// If fn returns false, iteration ends. +// Iteration is in an undefined order. +func (c *Corpus) ForeachClaimBackLocked(value blob.Ref, at time.Time, fn func(*camtypes.Claim) bool) { + for _, cl := range c.claimBack[value] { + if !at.IsZero() && cl.Date.After(at) { + continue + } + if !fn(cl) { + return + } + } +} + +// PermanodeHasAttrValueLocked reports whether the permanode pn at +// time at (zero means now) has the given attribute with the given +// value. If the attribute is multi-valued, any may match. +func (c *Corpus) PermanodeHasAttrValueLocked(pn blob.Ref, at time.Time, attr, val string) bool { + pm, ok := c.permanodes[pn] + if !ok { + return false + } + if at.IsZero() { + at = time.Now() + } + ret := false + for _, cl := range pm.Claims { + if cl.Attr != attr { + continue + } + if cl.Date.After(at) { + break + } + switch cl.Type { + case string(schema.DelAttributeClaim): + if cl.Value == "" || cl.Value == val { + ret = false + } + case string(schema.SetAttributeClaim): + ret = (cl.Value == val) + case string(schema.AddAttributeClaim): + if cl.Value == val { + return true + } + } + } + return ret +} + +// SetVerboseCorpusLogging controls corpus setup verbosity. It's on by default +// but used to disable verbose logging in tests. +func SetVerboseCorpusLogging(v bool) { + logCorpusStats = v +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/corpus_bench_test.go b/vendor/github.com/camlistore/camlistore/pkg/index/corpus_bench_test.go new file mode 100644 index 00000000..835a79d1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/corpus_bench_test.go @@ -0,0 +1,63 @@ +/* +Copyright 2013 The Camlistore AUTHORS + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index_test + +import ( + "fmt" + "sync" + "testing" + "time" + + "camlistore.org/pkg/index" + "camlistore.org/pkg/index/indextest" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/test" +) + +var ( + buildKvOnce sync.Once + kvForBenchmark sorted.KeyValue +) + +func BenchmarkCorpusFromStorage(b *testing.B) { + defer test.TLog(b)() + buildKvOnce.Do(func() { + kvForBenchmark = sorted.NewMemoryKeyValue() + idx, err := index.New(kvForBenchmark) + if err != nil { + b.Fatal(err) + } + id := indextest.NewIndexDeps(idx) + id.Fataler = b + for i := 0; i < 10; i++ { + fileRef, _ := id.UploadFile("file.txt", fmt.Sprintf("some file %d", i), time.Unix(1382073153, 0)) + pn := id.NewPlannedPermanode(fmt.Sprint(i)) + id.SetAttribute(pn, "camliContent", fileRef.String()) + } + }) + defer index.SetVerboseCorpusLogging(true) + index.SetVerboseCorpusLogging(false) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err := index.NewCorpusFromStorage(kvForBenchmark) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/corpus_test.go b/vendor/github.com/camlistore/camlistore/pkg/index/corpus_test.go new file mode 100644 index 00000000..8227a0af --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/corpus_test.go @@ -0,0 +1,485 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index_test + +import ( + "fmt" + "reflect" + "testing" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/context" + "camlistore.org/pkg/index" + "camlistore.org/pkg/index/indextest" + "camlistore.org/pkg/types" + "camlistore.org/pkg/types/camtypes" +) + +func TestCorpusAppendPermanodeAttrValues(t *testing.T) { + c := index.ExpNewCorpus() + pn := blob.MustParse("abc-123") + tm := time.Unix(99, 0) + claim := func(verb, attr, val string) *camtypes.Claim { + tm = tm.Add(time.Second) + return &camtypes.Claim{ + Type: verb + "-attribute", + Attr: attr, + Value: val, + Date: tm, + } + } + s := func(s ...string) []string { return s } + + c.SetClaims(pn, &index.PermanodeMeta{ + Claims: []*camtypes.Claim{ + claim("set", "foo", "foov"), // time 100 + + claim("add", "tag", "a"), // time 101 + claim("add", "tag", "b"), // time 102 + claim("del", "tag", ""), + claim("add", "tag", "c"), + claim("add", "tag", "d"), + claim("add", "tag", "e"), + claim("del", "tag", "d"), + + claim("add", "DelAll", "a"), + claim("add", "DelAll", "b"), + claim("add", "DelAll", "c"), + claim("del", "DelAll", ""), + + claim("add", "DelOne", "a"), + claim("add", "DelOne", "b"), + claim("add", "DelOne", "c"), + claim("add", "DelOne", "d"), + claim("del", "DelOne", "d"), + claim("del", "DelOne", "a"), + + claim("add", "SetAfterAdd", "a"), + claim("add", "SetAfterAdd", "b"), + claim("set", "SetAfterAdd", "setv"), + }, + }) + + tests := []struct { + attr string + want []string + t time.Time + }{ + {attr: "not-exist", want: s()}, + {attr: "DelAll", want: s()}, + {attr: "DelOne", want: s("b", "c")}, + {attr: "foo", want: s("foov")}, + {attr: "tag", want: s("c", "e")}, + {attr: "tag", want: s("a", "b"), t: time.Unix(102, 0)}, + {attr: "SetAfterAdd", want: s("setv")}, + } + for i, tt := range tests { + got := c.AppendPermanodeAttrValues(nil, pn, tt.attr, tt.t, blob.Ref{}) + if len(got) == 0 && len(tt.want) == 0 { + continue + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d. attr %q = %q; want %q", + i, tt.attr, got, tt.want) + } + } + +} + +func TestKVClaimAllocs(t *testing.T) { + n := testing.AllocsPerRun(20, func() { + index.ExpKvClaim("claim|sha1-b380b3080f9c71faa5c1d82bbd4d583a473bc77d|2931A67C26F5ABDA|2011-11-28T01:32:37.000123456Z|sha1-b3d93daee62e40d36237ff444022f42d7d0e43f2", + "set-attribute|tag|foo1|sha1-ad87ca5c78bd0ce1195c46f7c98e6025abbaf007", + blob.Parse) + }) + t.Logf("%v allocations", n) +} + +func TestKVClaim(t *testing.T) { + tests := []struct { + k, v string + ok bool + want camtypes.Claim + }{ + { + k: "claim|sha1-b380b3080f9c71faa5c1d82bbd4d583a473bc77d|2931A67C26F5ABDA|2011-11-28T01:32:37.000123456Z|sha1-b3d93daee62e40d36237ff444022f42d7d0e43f2", + v: "set-attribute|tag|foo1|sha1-ad87ca5c78bd0ce1195c46f7c98e6025abbaf007", + ok: true, + want: camtypes.Claim{ + BlobRef: blob.MustParse("sha1-b3d93daee62e40d36237ff444022f42d7d0e43f2"), + Signer: blob.MustParse("sha1-ad87ca5c78bd0ce1195c46f7c98e6025abbaf007"), + Permanode: blob.MustParse("sha1-b380b3080f9c71faa5c1d82bbd4d583a473bc77d"), + Type: "set-attribute", + Attr: "tag", + Value: "foo1", + Date: time.Time(types.ParseTime3339OrZero("2011-11-28T01:32:37.000123456Z")), + }, + }, + } + for _, tt := range tests { + got, ok := index.ExpKvClaim(tt.k, tt.v, blob.Parse) + if ok != tt.ok { + t.Errorf("kvClaim(%q, %q) = ok %v; want %v", tt.k, tt.v, ok, tt.ok) + continue + } + if got != tt.want { + t.Errorf("kvClaim(%q, %q) = %+v; want %+v", tt.k, tt.v, got, tt.want) + continue + } + } +} + +func TestDeletePermanode_Modtime(t *testing.T) { + testDeletePermanodes(t, + func(c *index.Corpus, ctx *context.Context, ch chan<- camtypes.BlobMeta) error { + return c.EnumeratePermanodesLastModifiedLocked(ctx, ch) + }, + ) +} + +func TestDeletePermanode_CreateTime(t *testing.T) { + testDeletePermanodes(t, + func(c *index.Corpus, ctx *context.Context, ch chan<- camtypes.BlobMeta) error { + return c.EnumeratePermanodesCreatedLocked(ctx, ch, true) + }, + ) +} + +func testDeletePermanodes(t *testing.T, + enumFunc func(*index.Corpus, *context.Context, chan<- camtypes.BlobMeta) error) { + idx := index.NewMemoryIndex() + idxd := indextest.NewIndexDeps(idx) + + foopn := idxd.NewPlannedPermanode("foo") + idxd.SetAttribute(foopn, "tag", "foo") + barpn := idxd.NewPlannedPermanode("bar") + idxd.SetAttribute(barpn, "tag", "bar") + bazpn := idxd.NewPlannedPermanode("baz") + idxd.SetAttribute(bazpn, "tag", "baz") + idxd.Delete(barpn) + c, err := idxd.Index.KeepInMemory() + if err != nil { + t.Fatalf("error slurping index to memory: %v", err) + } + + // check that we initially only find permanodes foo and baz, + // because bar is already marked as deleted. + want := []blob.Ref{foopn, bazpn} + ch := make(chan camtypes.BlobMeta, 10) + var got []camtypes.BlobMeta + errc := make(chan error, 1) + c.RLock() + go func() { errc <- enumFunc(c, context.TODO(), ch) }() + for blobMeta := range ch { + got = append(got, blobMeta) + } + err = <-errc + c.RUnlock() + if err != nil { + t.Fatalf("Could not enumerate permanodes: %v", err) + } + if len(got) != len(want) { + t.Fatalf("Saw %d permanodes in corpus; want %d", len(got), len(want)) + } + for _, bm := range got { + found := false + for _, perm := range want { + if bm.Ref == perm { + found = true + break + } + } + if !found { + t.Fatalf("permanode %v was not found in corpus", bm.Ref) + } + } + + // now add a delete claim for permanode baz, and check that we're only left with foo permanode + delbaz := idxd.Delete(bazpn) + want = []blob.Ref{foopn} + got = got[:0] + ch = make(chan camtypes.BlobMeta, 10) + c.RLock() + go func() { errc <- enumFunc(c, context.TODO(), ch) }() + for blobMeta := range ch { + got = append(got, blobMeta) + } + err = <-errc + c.RUnlock() + if err != nil { + t.Fatalf("Could not enumerate permanodes: %v", err) + } + if len(got) != len(want) { + t.Fatalf("Saw %d permanodes in corpus; want %d", len(got), len(want)) + } + if got[0].Ref != foopn { + t.Fatalf("Wrong permanode found in corpus. Wanted %v, got %v", foopn, got[0].Ref) + } + + // baz undeletion. delete delbaz. + idxd.Delete(delbaz) + want = []blob.Ref{foopn, bazpn} + got = got[:0] + ch = make(chan camtypes.BlobMeta, 10) + c.RLock() + go func() { errc <- enumFunc(c, context.TODO(), ch) }() + for blobMeta := range ch { + got = append(got, blobMeta) + } + err = <-errc + c.RUnlock() + if err != nil { + t.Fatalf("Could not enumerate permanodes: %v", err) + } + if len(got) != len(want) { + t.Fatalf("Saw %d permanodes in corpus; want %d", len(got), len(want)) + } + for _, bm := range got { + found := false + for _, perm := range want { + if bm.Ref == perm { + found = true + break + } + } + if !found { + t.Fatalf("permanode %v was not found in corpus", bm.Ref) + } + } +} + +func TestEnumerateOrder_Modtime(t *testing.T) { + testEnumerateOrder(t, + func(c *index.Corpus, ctx *context.Context, ch chan<- camtypes.BlobMeta) error { + return c.EnumeratePermanodesLastModifiedLocked(ctx, ch) + }, + modtimeOrder, + ) +} + +func TestEnumerateOrder_CreateTime(t *testing.T) { + testEnumerateOrder(t, + func(c *index.Corpus, ctx *context.Context, ch chan<- camtypes.BlobMeta) error { + return c.EnumeratePermanodesCreatedLocked(ctx, ch, true) + }, + createOrder, + ) +} + +const ( + modtimeOrder = iota + createOrder +) + +func testEnumerateOrder(t *testing.T, + enumFunc func(*index.Corpus, *context.Context, chan<- camtypes.BlobMeta) error, + order int) { + idx := index.NewMemoryIndex() + idxd := indextest.NewIndexDeps(idx) + + // permanode with no contents + foopn := idxd.NewPlannedPermanode("foo") + idxd.SetAttribute(foopn, "tag", "foo") + // permanode with file contents + // we set the time of the contents 1 second older than the modtime of foopn + fooModTime := idxd.LastTime() + fileTime := fooModTime.Add(-1 * time.Second) + fileRef, _ := idxd.UploadFile("foo.html", "I am an html file.", fileTime) + barpn := idxd.NewPlannedPermanode("bar") + idxd.SetAttribute(barpn, "camliContent", fileRef.String()) + + c, err := idxd.Index.KeepInMemory() + if err != nil { + t.Fatalf("error slurping index to memory: %v", err) + } + + // check that we get a different order whether with enumerate according to + // contents time, or to permanode modtime. + var want []blob.Ref + if order == modtimeOrder { + // modtime. + want = []blob.Ref{barpn, foopn} + } else { + // creation time. + want = []blob.Ref{foopn, barpn} + } + ch := make(chan camtypes.BlobMeta, 10) + var got []camtypes.BlobMeta + errc := make(chan error, 1) + c.RLock() + go func() { errc <- enumFunc(c, context.TODO(), ch) }() + for blobMeta := range ch { + got = append(got, blobMeta) + } + err = <-errc + c.RUnlock() + if err != nil { + t.Fatalf("Could not enumerate permanodes: %v", err) + } + if len(got) != len(want) { + t.Fatalf("Saw %d permanodes in corpus; want %d", len(got), len(want)) + } + for k, v := range got { + if v.Ref != want[k] { + t.Fatalf("Wrong result from enumeration. Got %v, wanted %v.", v.Ref, want[k]) + } + } +} + +// should be run with -race +func TestCacheSortedPermanodes_ModtimeRace(t *testing.T) { + testCacheSortedPermanodesRace(t, + func(c *index.Corpus, ctx *context.Context, ch chan<- camtypes.BlobMeta) error { + return c.EnumeratePermanodesLastModifiedLocked(ctx, ch) + }, + ) +} + +// should be run with -race +func TestCacheSortedPermanodes_CreateTimeRace(t *testing.T) { + testCacheSortedPermanodesRace(t, + func(c *index.Corpus, ctx *context.Context, ch chan<- camtypes.BlobMeta) error { + return c.EnumeratePermanodesCreatedLocked(ctx, ch, true) + }, + ) +} + +func testCacheSortedPermanodesRace(t *testing.T, + enumFunc func(*index.Corpus, *context.Context, chan<- camtypes.BlobMeta) error) { + idx := index.NewMemoryIndex() + idxd := indextest.NewIndexDeps(idx) + idxd.Fataler = t + c, err := idxd.Index.KeepInMemory() + if err != nil { + t.Fatalf("error slurping index to memory: %v", err) + } + donec := make(chan struct{}) + go func() { + for i := 0; i < 100; i++ { + nth := fmt.Sprintf("%d", i) + pn := idxd.NewPlannedPermanode(nth) + idxd.SetAttribute(pn, "tag", nth) + } + donec <- struct{}{} + }() + go func() { + for i := 0; i < 10; i++ { + ch := make(chan camtypes.BlobMeta, 10) + errc := make(chan error, 1) + c.RLock() + go func() { errc <- enumFunc(c, context.TODO(), ch) }() + for _ = range ch { + } + err := <-errc + c.RUnlock() + if err != nil { + t.Fatalf("Could not enumerate permanodes: %v", err) + } + } + donec <- struct{}{} + }() + <-donec + <-donec +} + +func TestLazySortedPermanodes(t *testing.T) { + idx := index.NewMemoryIndex() + idxd := indextest.NewIndexDeps(idx) + idxd.Fataler = t + c, err := idxd.Index.KeepInMemory() + if err != nil { + t.Fatalf("error slurping index to memory: %v", err) + } + + lsp := c.Exp_LSPByTime(false) + if len(lsp) != 0 { + t.Fatal("LazySortedPermanodes cache should be empty on startup") + } + + pn := idxd.NewPlannedPermanode("one") + idxd.SetAttribute(pn, "tag", "one") + + enum := func(reverse bool) { + ch := make(chan camtypes.BlobMeta, 10) + errc := make(chan error, 1) + c.RLock() + go func() { errc <- c.EnumeratePermanodesCreatedLocked(context.TODO(), ch, reverse) }() + for _ = range ch { + } + err := <-errc + c.RUnlock() + if err != nil { + t.Fatalf("Could not enumerate permanodes: %v", err) + } + } + enum(false) + lsp = c.Exp_LSPByTime(false) + if len(lsp) != 1 { + t.Fatalf("LazySortedPermanodes after 1st enum: got %v items, wanted 1", len(lsp)) + } + lsp = c.Exp_LSPByTime(true) + if len(lsp) != 0 { + t.Fatalf("LazySortedPermanodes reversed after 1st enum: got %v items, wanted 0", len(lsp)) + } + + enum(true) + lsp = c.Exp_LSPByTime(false) + if len(lsp) != 1 { + t.Fatalf("LazySortedPermanodes after 2nd enum: got %v items, wanted 1", len(lsp)) + } + lsp = c.Exp_LSPByTime(true) + if len(lsp) != 1 { + t.Fatalf("LazySortedPermanodes reversed after 2nd enum: got %v items, wanted 1", len(lsp)) + } + + pn = idxd.NewPlannedPermanode("two") + idxd.SetAttribute(pn, "tag", "two") + + enum(true) + lsp = c.Exp_LSPByTime(false) + if len(lsp) != 0 { + t.Fatalf("LazySortedPermanodes after 2nd permanode: got %v items, wanted 0 because of cache invalidation", len(lsp)) + } + lsp = c.Exp_LSPByTime(true) + if len(lsp) != 2 { + t.Fatalf("LazySortedPermanodes reversed after 2nd permanode: got %v items, wanted 2", len(lsp)) + } + + pn = idxd.NewPlannedPermanode("three") + idxd.SetAttribute(pn, "tag", "three") + + enum(false) + lsp = c.Exp_LSPByTime(true) + if len(lsp) != 0 { + t.Fatalf("LazySortedPermanodes reversed after 3rd permanode: got %v items, wanted 0 because of cache invalidation", len(lsp)) + } + lsp = c.Exp_LSPByTime(false) + if len(lsp) != 3 { + t.Fatalf("LazySortedPermanodes after 3rd permanode: got %v items, wanted 3", len(lsp)) + } + + enum(true) + lsp = c.Exp_LSPByTime(false) + if len(lsp) != 3 { + t.Fatalf("LazySortedPermanodes after 5th enum: got %v items, wanted 3", len(lsp)) + } + lsp = c.Exp_LSPByTime(true) + if len(lsp) != 3 { + t.Fatalf("LazySortedPermanodes reversed after 5th enum: got %v items, wanted 3", len(lsp)) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/doc.go b/vendor/github.com/camlistore/camlistore/pkg/index/doc.go new file mode 100644 index 00000000..bd61bcbe --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/doc.go @@ -0,0 +1,46 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package index provides a generic indexing system on top of the abstract Storage interface. + +The following keys & values are populated by receiving blobs and queried +for search operations: + + * Recent Permanodes + "recpn|||" -> "" + where reverse-modtime flips each digit to '9'- and prepends "rt" (for reverse time) + "2011-11-27T01:23:45Z" = "rt7988-88-72T98:76:54Z" + + * signer blobref of ascii public key -> gpg key id + "signerkeyid:sha1-ad87ca5c78bd0ce1195c46f7c98e6025abbaf007" = "2931A67C26F5ABDA" + + * PermanodeOfSignerAttrValue: + "signerattrvalue|||||" -> "" + e.g. + "signerattrvalue|2931A67C26F5ABDA|camliRoot|rootval|"+ + "rt7988-88-71T98:67:60.999876543Z|sha1-bf115940641f1aae2e007edcf36b3b18c17256d9" = + "sha1-7a14cce982aa73ab519e63050f82e2a2adfcf039" + + * Other: + "meta:" -> "|" + "have:" -> "" (used for enumeration, which doesn't need mime type) + + * For GetOwnerClaims(permanode, signer): + "claim||||" -> "||" + +*/ +package index diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/enumstat.go b/vendor/github.com/camlistore/camlistore/pkg/index/enumstat.go new file mode 100644 index 00000000..969161f9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/enumstat.go @@ -0,0 +1,96 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index + +import ( + "fmt" + "strconv" + "strings" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/context" + "camlistore.org/pkg/sorted" +) + +func (ix *Index) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) (err error) { + defer close(dest) + it := ix.s.Find("have:"+after, "have~") + defer func() { + closeErr := it.Close() + if err == nil { + err = closeErr + } + }() + + afterKey := "have:" + after + n := int(0) + for n < limit && it.Next() { + k := it.Key() + if k <= afterKey { + continue + } + if !strings.HasPrefix(k, "have:") { + break + } + n++ + br, ok := blob.Parse(k[len("have:"):]) + if !ok { + continue + } + size, err := parseHaveVal(it.Value()) + if err == nil { + select { + case dest <- blob.SizedRef{br, uint32(size)}: + case <-ctx.Done(): + return context.ErrCanceled + } + } + } + return nil +} + +func (ix *Index) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error { + for _, br := range blobs { + key := "have:" + br.String() + v, err := ix.s.Get(key) + if err == sorted.ErrNotFound { + continue + } + if err != nil { + return fmt.Errorf("error looking up key %q: %v", key, err) + } + size, err := parseHaveVal(v) + if err != nil { + return fmt.Errorf("invalid size for key %q = %q", key, v) + } + dest <- blob.SizedRef{br, uint32(size)} + } + return nil +} + +// parseHaveVal takes the value part of an "have" index row and returns +// the blob size found in that value. Examples: +// parseHaveVal("324|indexed") == 324 +// parseHaveVal("654") == 654 +func parseHaveVal(val string) (size uint64, err error) { + pipei := strings.Index(val, "|") + if pipei >= 0 { + // filter out the "indexed" suffix + val = val[:pipei] + } + return strconv.ParseUint(val, 10, 32) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/export_test.go b/vendor/github.com/camlistore/camlistore/pkg/index/export_test.go new file mode 100644 index 00000000..d2148d20 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/export_test.go @@ -0,0 +1,121 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index + +import ( + "testing" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/types/camtypes" +) + +func ExpReverseTimeString(s string) string { + return reverseTimeString(s) +} + +func ExpUnreverseTimeString(s string) string { + return unreverseTimeString(s) +} + +func ExpNewCorpus() *Corpus { + return newCorpus() +} + +func (c *Corpus) Exp_mergeFileInfoRow(k, v string) error { + return c.mergeFileInfoRow([]byte(k), []byte(v)) +} + +func (c *Corpus) Exp_files(br blob.Ref) camtypes.FileInfo { + return c.files[br] +} + +func ExpKvClaim(k, v string, blobParse func(string) (blob.Ref, bool)) (c camtypes.Claim, ok bool) { + return kvClaim(k, v, blobParse) +} + +func (c *Corpus) SetClaims(pn blob.Ref, claims *PermanodeMeta) { + c.permanodes[pn] = claims +} + +func (x *Index) NeededMapsForTest() (needs, neededBy map[blob.Ref][]blob.Ref, ready map[blob.Ref]bool) { + return x.needs, x.neededBy, x.readyReindex +} + +func Exp_missingKey(have, missing blob.Ref) string { + return keyMissing.Key(have, missing) +} + +func Exp_schemaVersion() int { return requiredSchemaVersion } + +func (x *Index) Exp_noteBlobIndexed(br blob.Ref) { + x.noteBlobIndexed(br) +} + +func (x *Index) Exp_AwaitReindexing(t *testing.T) { + deadline := time.Now().Add(5 * time.Second) + for time.Now().Before(deadline) { + x.mu.Lock() + n := len(x.readyReindex) + x.mu.Unlock() + if n == 0 { + return + } + time.Sleep(50 * time.Millisecond) + } + t.Fatal("timeout waiting for readyReindex to drain") +} + +type ExpPnAndTime pnAndTime + +// Exp_LSPByTime returns the sorted cache lazySortedPermanodes for +// permanodesByTime (or the reverse sorted one). +func (c *Corpus) Exp_LSPByTime(reverse bool) []ExpPnAndTime { + if c.permanodesByTime == nil { + return nil + } + var pn []ExpPnAndTime + if reverse { + if c.permanodesByTime.sortedCacheReversed != nil { + for _, v := range c.permanodesByTime.sortedCacheReversed { + pn = append(pn, ExpPnAndTime(v)) + } + return pn + } + } else { + if c.permanodesByTime.sortedCache != nil { + for _, v := range c.permanodesByTime.sortedCache { + pn = append(pn, ExpPnAndTime(v)) + } + return pn + } + } + return nil +} + +func (x *Index) Exp_BlobSource() blobserver.FetcherEnumerator { + x.mu.Lock() + defer x.mu.Unlock() + return x.blobSource +} + +func (x *Index) Exp_FixMissingWholeRef(fetcher blob.Fetcher) (err error) { + return x.fixMissingWholeRef(fetcher) +} + +var Exp_ErrMissingWholeRef = errMissingWholeRef diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/index.go b/vendor/github.com/camlistore/camlistore/pkg/index/index.go new file mode 100644 index 00000000..420ef5ed --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/index.go @@ -0,0 +1,1516 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index + +import ( + "bytes" + "errors" + "fmt" + "io" + "log" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/context" + "camlistore.org/pkg/env" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/strutil" + "camlistore.org/pkg/types" + "camlistore.org/pkg/types/camtypes" +) + +func init() { + blobserver.RegisterStorageConstructor("index", newFromConfig) +} + +type Index struct { + *blobserver.NoImplStorage + + s sorted.KeyValue + + KeyFetcher blob.Fetcher // for verifying claims + + // TODO(mpl): do not init and use deletes when we have a corpus. Since corpus has its own deletes now, they are redundant. + + // deletes is a cache to keep track of the deletion status (deleted vs undeleted) + // of the blobs in the index. It makes for faster reads than the otherwise + // recursive calls on the index. + deletes *deletionCache + + corpus *Corpus // or nil, if not being kept in memory + + mu sync.RWMutex // guards following + // needs maps from a blob to the missing blobs it needs to + // finish indexing. + needs map[blob.Ref][]blob.Ref + // neededBy is the inverse of needs. The keys are missing blobs + // and the value(s) are blobs waiting to be reindexed. + neededBy map[blob.Ref][]blob.Ref + readyReindex map[blob.Ref]bool // set of things ready to be re-indexed + oooRunning bool // whether outOfOrderIndexerLoop is running. + // blobSource is used for fetching blobs when indexing files and other + // blobs types that reference other objects. + // The only write access to blobSource should be its initialization (transition + // from nil to non-nil), once, and protected by mu. + blobSource blobserver.FetcherEnumerator + + tickleOoo chan bool // tickle out-of-order reindex loop, whenever readyReindex is added to +} + +var ( + _ blobserver.Storage = (*Index)(nil) + _ Interface = (*Index)(nil) +) + +var aboutToReindex = false + +// SetImpendingReindex notes that the user ran the camlistored binary with the --reindex flag. +// Because the index is about to be wiped, schema version checks should be suppressed. +func SetImpendingReindex() { + // TODO: remove this function, once we refactor how indexes are created. + // They'll probably not all have their own storage constructor registered. + aboutToReindex = true +} + +// MustNew is wraps New and fails with a Fatal error on t if New +// returns an error. +func MustNew(t types.TB, s sorted.KeyValue) *Index { + ix, err := New(s) + if err != nil { + t.Fatalf("Error creating index: %v", err) + } + return ix +} + +// InitBlobSource sets the index's blob source and starts the background +// out-of-order indexing loop. It panics if the blobSource is already set. +// If the index's key fetcher is nil, it is also set to the blobSource +// argument. +func (x *Index) InitBlobSource(blobSource blobserver.FetcherEnumerator) { + x.mu.Lock() + defer x.mu.Unlock() + if x.blobSource != nil { + panic("blobSource of Index already set") + } + x.blobSource = blobSource + if x.oooRunning { + panic("outOfOrderIndexerLoop should never have previously started without a blobSource") + } + if x.KeyFetcher == nil { + x.KeyFetcher = blobSource + } + if disableOoo, _ := strconv.ParseBool(os.Getenv("CAMLI_TESTREINDEX_DISABLE_OOO")); disableOoo { + // For Reindex test in pkg/index/indextest/tests.go + return + } + go x.outOfOrderIndexerLoop() +} + +// New returns a new index using the provided key/value storage implementation. +func New(s sorted.KeyValue) (*Index, error) { + idx := &Index{ + s: s, + needs: make(map[blob.Ref][]blob.Ref), + neededBy: make(map[blob.Ref][]blob.Ref), + readyReindex: make(map[blob.Ref]bool), + tickleOoo: make(chan bool, 1), + } + if aboutToReindex { + idx.deletes = newDeletionCache() + return idx, nil + } + + schemaVersion := idx.schemaVersion() + switch { + case schemaVersion == 0 && idx.isEmpty(): + // New index. + err := idx.s.Set(keySchemaVersion.name, fmt.Sprint(requiredSchemaVersion)) + if err != nil { + return nil, fmt.Errorf("Could not write index schema version %q: %v", requiredSchemaVersion, err) + } + case schemaVersion != requiredSchemaVersion: + tip := "" + if env.IsDev() { + // Good signal that we're using the devcam server, so help out + // the user with a more useful tip: + tip = `(For the dev server, run "devcam server --wipe" to wipe both your blobs and index)` + } else { + if is4To5SchemaBump(schemaVersion) { + return idx, errMissingWholeRef + } + tip = "Run 'camlistored --reindex' (it might take awhile, but shows status). Alternative: 'camtool dbinit' (or just delete the file for a file based index), and then 'camtool sync --all'" + } + return nil, fmt.Errorf("index schema version is %d; required one is %d. You need to reindex. %s", + schemaVersion, requiredSchemaVersion, tip) + } + if err := idx.initDeletesCache(); err != nil { + return nil, fmt.Errorf("Could not initialize index's deletes cache: %v", err) + } + if err := idx.initNeededMaps(); err != nil { + return nil, fmt.Errorf("Could not initialize index's missing blob maps: %v", err) + } + return idx, nil +} + +func is4To5SchemaBump(schemaVersion int) bool { + return schemaVersion == 4 && requiredSchemaVersion == 5 +} + +var errMissingWholeRef = errors.New("missing wholeRef field in fileInfo rows") + +// fixMissingWholeRef appends the wholeRef to all the keyFileInfo rows values. It should +// only be called to upgrade a version 4 index schema to version 5. +func (x *Index) fixMissingWholeRef(fetcher blob.Fetcher) (err error) { + // We did that check from the caller, but double-check again to prevent from misuse + // of that function. + if x.schemaVersion() != 4 || requiredSchemaVersion != 5 { + panic("fixMissingWholeRef should only be used when upgrading from v4 to v5 of the index schema") + } + log.Println("index: fixing the missing wholeRef in the fileInfo rows...") + defer func() { + if err != nil { + log.Printf("index: fixing the fileInfo rows failed: %v", err) + return + } + log.Print("index: successfully fixed wholeRef in FileInfo rows.") + }() + + // first build a reverted keyWholeToFileRef map, so we can get the wholeRef from the fileRef easily. + fileRefToWholeRef := make(map[blob.Ref]blob.Ref) + it := x.queryPrefix(keyWholeToFileRef) + var keyA [3]string + for it.Next() { + keyPart := strutil.AppendSplitN(keyA[:0], it.Key(), "|", 3) + if len(keyPart) != 3 { + return fmt.Errorf("bogus keyWholeToFileRef key: got %q, wanted \"wholetofile|wholeRef|fileRef\"", it.Key()) + } + wholeRef, ok1 := blob.Parse(keyPart[1]) + fileRef, ok2 := blob.Parse(keyPart[2]) + if !ok1 || !ok2 { + return fmt.Errorf("bogus part in keyWholeToFileRef key: %q", it.Key()) + } + fileRefToWholeRef[fileRef] = wholeRef + } + if err := it.Close(); err != nil { + return err + } + + // We record the mutations and set them all after the iteration because of the sqlite locking: + // since BeginBatch takes a lock, and Find too, we would deadlock at queryPrefix if we + // started a batch mutation before. + mutations := make(map[string]string) + keyPrefix := keyFileInfo.name + "|" + it = x.queryPrefix(keyFileInfo) + defer it.Close() + var valA [3]string + for it.Next() { + br, ok := blob.ParseBytes(it.KeyBytes()[len(keyPrefix):]) + if !ok { + return fmt.Errorf("invalid blobRef %q", it.KeyBytes()[len(keyPrefix):]) + } + wholeRef, ok := fileRefToWholeRef[br] + if !ok { + log.Printf("WARNING: wholeRef for %v not found in index. You should probably rebuild the whole index.", br) + continue + } + valPart := strutil.AppendSplitN(valA[:0], it.Value(), "|", 3) + // The old format we're fixing should be: size|filename|mimetype + if len(valPart) != 3 { + return fmt.Errorf("bogus keyFileInfo value: got %q, wanted \"size|filename|mimetype\"", it.Value()) + } + size_s, filename, mimetype := valPart[0], valPart[1], urld(valPart[2]) + if strings.Contains(mimetype, "|") { + // I think this can only happen for people migrating from a commit at least as recent as + // 8229c1985079681a652cb65551b4e80a10d135aa, when wholeRef was introduced to keyFileInfo + // but there was no migration code yet. + // For the "production" migrations between 0.8 and 0.9, the index should not have any wholeRef + // in the keyFileInfo entries. So if something goes wrong and is somehow linked to that happening, + // I'd like to know about it, hence the logging. + log.Printf("%v: %v already has a wholeRef, not fixing it", it.Key(), it.Value()) + continue + } + size, err := strconv.Atoi(size_s) + if err != nil { + return fmt.Errorf("bogus size in keyFileInfo value %v: %v", it.Value(), err) + } + mutations[keyFileInfo.Key(br)] = keyFileInfo.Val(size, filename, mimetype, wholeRef) + } + if err := it.Close(); err != nil { + return err + } + bm := x.s.BeginBatch() + for k, v := range mutations { + bm.Set(k, v) + } + bm.Set(keySchemaVersion.name, "5") + if err := x.s.CommitBatch(bm); err != nil { + return err + } + return nil +} + +func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { + blobPrefix := config.RequiredString("blobSource") + kvConfig := config.RequiredObject("storage") + if err := config.Validate(); err != nil { + return nil, err + } + kv, err := sorted.NewKeyValue(kvConfig) + if err != nil { + return nil, err + } + sto, err := ld.GetStorage(blobPrefix) + if err != nil { + return nil, err + } + + ix, err := New(kv) + // TODO(mpl): next time we need to do another fix, make a new error + // type that lets us apply the needed fix depending on its value or + // something. For now just one value/fix. + if err == errMissingWholeRef { + // TODO: maybe we don't want to do that automatically. Brad says + // we have to think about the case on GCE/CoreOS in particular. + if err := ix.fixMissingWholeRef(sto); err != nil { + ix.Close() + return nil, fmt.Errorf("could not fix missing wholeRef entries: %v", err) + } + ix, err = New(kv) + } + if err != nil { + return nil, err + } + ix.InitBlobSource(sto) + + return ix, err +} + +func (x *Index) String() string { + return fmt.Sprintf("Camlistore index, using key/value implementation %T", x.s) +} + +func (x *Index) isEmpty() bool { + iter := x.s.Find("", "") + hasRows := iter.Next() + if err := iter.Close(); err != nil { + panic(err) + } + return !hasRows +} + +// reindexMaxProcs is the number of concurrent goroutines that will be used for reindexing. +var reindexMaxProcs = struct { + sync.RWMutex + v int +}{v: 4} + +// SetReindexMaxProcs sets the maximum number of concurrent goroutines that are +// used during reindexing. +func SetReindexMaxProcs(n int) { + reindexMaxProcs.Lock() + defer reindexMaxProcs.Unlock() + reindexMaxProcs.v = n +} + +// ReindexMaxProcs returns the maximum number of concurrent goroutines that are +// used during reindexing. +func ReindexMaxProcs() int { + reindexMaxProcs.RLock() + defer reindexMaxProcs.RUnlock() + return reindexMaxProcs.v +} + +func (x *Index) Reindex() error { + reindexMaxProcs.RLock() + defer reindexMaxProcs.RUnlock() + ctx := context.TODO() + + wiper, ok := x.s.(sorted.Wiper) + if !ok { + return fmt.Errorf("index's storage type %T doesn't support sorted.Wiper", x.s) + } + log.Printf("Wiping index storage type %T ...", x.s) + if err := wiper.Wipe(); err != nil { + return fmt.Errorf("error wiping index's sorted key/value type %T: %v", x.s, err) + } + log.Printf("Index wiped. Rebuilding...") + + reindexStart, _ := blob.Parse(os.Getenv("CAMLI_REINDEX_START")) + + err := x.s.Set(keySchemaVersion.name, fmt.Sprintf("%d", requiredSchemaVersion)) + if err != nil { + return err + } + + var nerrmu sync.Mutex + nerr := 0 + + blobc := make(chan blob.Ref, 32) + + enumCtx := ctx.New() + enumErr := make(chan error, 1) + go func() { + defer close(blobc) + donec := enumCtx.Done() + var lastTick time.Time + enumErr <- blobserver.EnumerateAll(enumCtx, x.blobSource, func(sb blob.SizedRef) error { + now := time.Now() + if lastTick.Before(now.Add(-1 * time.Second)) { + log.Printf("Reindexing at %v", sb.Ref) + lastTick = now + } + if reindexStart.Valid() && sb.Ref.Less(reindexStart) { + return nil + } + select { + case <-donec: + return context.ErrCanceled + case blobc <- sb.Ref: + return nil + } + }) + }() + var wg sync.WaitGroup + for i := 0; i < reindexMaxProcs.v; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for br := range blobc { + if err := x.indexBlob(br); err != nil { + log.Printf("Error reindexing %v: %v", br, err) + nerrmu.Lock() + nerr++ + nerrmu.Unlock() + // TODO: flag (or default?) to stop the EnumerateAll above once + // there's any error with reindexing? + } + } + }() + } + if err := <-enumErr; err != nil { + return err + } + + wg.Wait() + + x.mu.Lock() + readyCount := len(x.readyReindex) + x.mu.Unlock() + if readyCount > 0 { + return fmt.Errorf("%d blobs were ready to reindex in out-of-order queue, but not yet ran", readyCount) + } + + log.Printf("Index rebuild complete.") + nerrmu.Lock() // no need to unlock + if nerr != 0 { + return fmt.Errorf("%d blobs failed to re-index", nerr) + } + if err := x.initDeletesCache(); err != nil { + return err + } + return nil +} + +func queryPrefixString(s sorted.KeyValue, prefix string) sorted.Iterator { + if prefix == "" { + return s.Find("", "") + } + lastByte := prefix[len(prefix)-1] + if lastByte == 0xff { + panic("unsupported query prefix ending in 0xff") + } + end := prefix[:len(prefix)-1] + string(lastByte+1) + return s.Find(prefix, end) +} + +func (x *Index) queryPrefixString(prefix string) sorted.Iterator { + return queryPrefixString(x.s, prefix) +} + +func queryPrefix(s sorted.KeyValue, key *keyType, args ...interface{}) sorted.Iterator { + return queryPrefixString(s, key.Prefix(args...)) +} + +func (x *Index) queryPrefix(key *keyType, args ...interface{}) sorted.Iterator { + return x.queryPrefixString(key.Prefix(args...)) +} + +func closeIterator(it sorted.Iterator, perr *error) { + err := it.Close() + if err != nil && *perr == nil { + *perr = err + } +} + +// schemaVersion returns the version of schema as it is found +// in the currently used index. If not found, it returns 0. +func (x *Index) schemaVersion() int { + schemaVersionStr, err := x.s.Get(keySchemaVersion.name) + if err != nil { + if err == sorted.ErrNotFound { + return 0 + } + panic(fmt.Sprintf("Could not get index schema version: %v", err)) + } + schemaVersion, err := strconv.Atoi(schemaVersionStr) + if err != nil { + panic(fmt.Sprintf("Bogus index schema version: %q", schemaVersionStr)) + } + return schemaVersion +} + +type deletion struct { + deleter blob.Ref + when time.Time +} + +type byDeletionDate []deletion + +func (d byDeletionDate) Len() int { return len(d) } +func (d byDeletionDate) Swap(i, j int) { d[i], d[j] = d[j], d[i] } +func (d byDeletionDate) Less(i, j int) bool { return d[i].when.Before(d[j].when) } + +type deletionCache struct { + sync.RWMutex + m map[blob.Ref][]deletion +} + +func newDeletionCache() *deletionCache { + return &deletionCache{ + m: make(map[blob.Ref][]deletion), + } +} + +// initDeletesCache creates and populates the deletion status cache used by the index +// for faster calls to IsDeleted and DeletedAt. It is called by New. +func (x *Index) initDeletesCache() (err error) { + x.deletes = newDeletionCache() + it := x.queryPrefix(keyDeleted) + defer closeIterator(it, &err) + for it.Next() { + cl, ok := kvDeleted(it.Key()) + if !ok { + return fmt.Errorf("Bogus keyDeleted entry key: want |\"deleted\"||||, got %q", it.Key()) + } + targetDeletions := append(x.deletes.m[cl.Target], + deletion{ + deleter: cl.BlobRef, + when: cl.Date, + }) + sort.Sort(sort.Reverse(byDeletionDate(targetDeletions))) + x.deletes.m[cl.Target] = targetDeletions + } + return err +} + +func kvDeleted(k string) (c camtypes.Claim, ok bool) { + // TODO(bradfitz): garbage + keyPart := strings.Split(k, "|") + if len(keyPart) != 4 { + return + } + if keyPart[0] != "deleted" { + return + } + target, ok := blob.Parse(keyPart[1]) + if !ok { + return + } + claimRef, ok := blob.Parse(keyPart[3]) + if !ok { + return + } + date, err := time.Parse(time.RFC3339, unreverseTimeString(keyPart[2])) + if err != nil { + return + } + return camtypes.Claim{ + BlobRef: claimRef, + Target: target, + Date: date, + Type: string(schema.DeleteClaim), + }, true +} + +// IsDeleted reports whether the provided blobref (of a permanode or +// claim) should be considered deleted. +func (x *Index) IsDeleted(br blob.Ref) bool { + if x.deletes == nil { + // We still allow the slow path, in case someone creates + // their own Index without a deletes cache. + return x.isDeletedNoCache(br) + } + x.deletes.RLock() + defer x.deletes.RUnlock() + return x.isDeleted(br) +} + +// The caller must hold x.deletes.mu for read. +func (x *Index) isDeleted(br blob.Ref) bool { + deletes, ok := x.deletes.m[br] + if !ok { + return false + } + for _, v := range deletes { + if !x.isDeleted(v.deleter) { + return true + } + } + return false +} + +// Used when the Index has no deletes cache (x.deletes is nil). +func (x *Index) isDeletedNoCache(br blob.Ref) bool { + var err error + it := x.queryPrefix(keyDeleted, br) + for it.Next() { + cl, ok := kvDeleted(it.Key()) + if !ok { + panic(fmt.Sprintf("Bogus keyDeleted entry key: want |\"deleted\"||||, got %q", it.Key())) + } + if !x.isDeletedNoCache(cl.BlobRef) { + closeIterator(it, &err) + if err != nil { + // TODO: Do better? + panic(fmt.Sprintf("Could not close iterator on keyDeleted: %v", err)) + } + return true + } + } + closeIterator(it, &err) + if err != nil { + // TODO: Do better? + panic(fmt.Sprintf("Could not close iterator on keyDeleted: %v", err)) + } + return false +} + +// GetRecentPermanodes sends results to dest filtered by owner, limit, and +// before. A zero value for before will default to the current time. The +// results will have duplicates supressed, with most recent permanode +// returned. +// Note, permanodes more recent than before will still be fetched from the +// index then skipped. This means runtime scales linearly with the number of +// nodes more recent than before. +func (x *Index) GetRecentPermanodes(dest chan<- camtypes.RecentPermanode, owner blob.Ref, limit int, before time.Time) (err error) { + defer close(dest) + + keyId, err := x.KeyId(owner) + if err == sorted.ErrNotFound { + log.Printf("No recent permanodes because keyId for owner %v not found", owner) + return nil + } + if err != nil { + log.Printf("Error fetching keyId for owner %v: %v", owner, err) + return err + } + + sent := 0 + var seenPermanode dupSkipper + + if before.IsZero() { + before = time.Now() + } + // TODO(bradfitz): handle before efficiently. don't use queryPrefix. + it := x.queryPrefix(keyRecentPermanode, keyId) + defer closeIterator(it, &err) + for it.Next() { + permaStr := it.Value() + parts := strings.SplitN(it.Key(), "|", 4) + if len(parts) != 4 { + continue + } + mTime, _ := time.Parse(time.RFC3339, unreverseTimeString(parts[2])) + permaRef, ok := blob.Parse(permaStr) + if !ok { + continue + } + if x.IsDeleted(permaRef) { + continue + } + if seenPermanode.Dup(permaStr) { + continue + } + // Skip entries with an mTime less than or equal to before. + if !mTime.Before(before) { + continue + } + dest <- camtypes.RecentPermanode{ + Permanode: permaRef, + Signer: owner, // TODO(bradfitz): kinda. usually. for now. + LastModTime: mTime, + } + sent++ + if sent == limit { + break + } + } + return nil +} + +func (x *Index) AppendClaims(dst []camtypes.Claim, permaNode blob.Ref, + signerFilter blob.Ref, + attrFilter string) ([]camtypes.Claim, error) { + if x.corpus != nil { + return x.corpus.AppendClaims(dst, permaNode, signerFilter, attrFilter) + } + var ( + keyId string + err error + it sorted.Iterator + ) + if signerFilter.Valid() { + keyId, err = x.KeyId(signerFilter) + if err == sorted.ErrNotFound { + return nil, nil + } + if err != nil { + return nil, err + } + it = x.queryPrefix(keyPermanodeClaim, permaNode, keyId) + } else { + it = x.queryPrefix(keyPermanodeClaim, permaNode) + } + defer closeIterator(it, &err) + + // In the common case, an attribute filter is just a plain + // token ("camliContent") unescaped. If so, fast path that + // check to skip the row before we even split it. + var mustHave string + if attrFilter != "" && urle(attrFilter) == attrFilter { + mustHave = attrFilter + } + + for it.Next() { + val := it.Value() + if mustHave != "" && !strings.Contains(val, mustHave) { + continue + } + cl, ok := kvClaim(it.Key(), val, blob.Parse) + if !ok { + continue + } + if x.IsDeleted(cl.BlobRef) { + continue + } + if attrFilter != "" && cl.Attr != attrFilter { + continue + } + if signerFilter.Valid() && cl.Signer != signerFilter { + continue + } + dst = append(dst, cl) + } + return dst, nil +} + +func kvClaim(k, v string, blobParse func(string) (blob.Ref, bool)) (c camtypes.Claim, ok bool) { + const nKeyPart = 5 + const nValPart = 4 + var keya [nKeyPart]string + var vala [nValPart]string + keyPart := strutil.AppendSplitN(keya[:0], k, "|", -1) + valPart := strutil.AppendSplitN(vala[:0], v, "|", -1) + if len(keyPart) < nKeyPart || len(valPart) < nValPart { + return + } + signerRef, ok := blobParse(valPart[3]) + if !ok { + return + } + permaNode, ok := blobParse(keyPart[1]) + if !ok { + return + } + claimRef, ok := blobParse(keyPart[4]) + if !ok { + return + } + date, err := time.Parse(time.RFC3339, keyPart[3]) + if err != nil { + return + } + return camtypes.Claim{ + BlobRef: claimRef, + Signer: signerRef, + Permanode: permaNode, + Date: date, + Type: urld(valPart[0]), + Attr: urld(valPart[1]), + Value: urld(valPart[2]), + }, true +} + +func (x *Index) GetBlobMeta(br blob.Ref) (camtypes.BlobMeta, error) { + if x.corpus != nil { + return x.corpus.GetBlobMeta(br) + } + key := "meta:" + br.String() + meta, err := x.s.Get(key) + if err == sorted.ErrNotFound { + err = os.ErrNotExist + } + if err != nil { + return camtypes.BlobMeta{}, err + } + pos := strings.Index(meta, "|") + if pos < 0 { + panic(fmt.Sprintf("Bogus index row for key %q: got value %q", key, meta)) + } + size, err := strconv.ParseUint(meta[:pos], 10, 32) + if err != nil { + return camtypes.BlobMeta{}, err + } + mime := meta[pos+1:] + return camtypes.BlobMeta{ + Ref: br, + Size: uint32(size), + CamliType: camliTypeFromMIME(mime), + }, nil +} + +func (x *Index) KeyId(signer blob.Ref) (string, error) { + if x.corpus != nil { + return x.corpus.KeyId(signer) + } + return x.s.Get("signerkeyid:" + signer.String()) +} + +func (x *Index) PermanodeOfSignerAttrValue(signer blob.Ref, attr, val string) (permaNode blob.Ref, err error) { + keyId, err := x.KeyId(signer) + if err == sorted.ErrNotFound { + return blob.Ref{}, os.ErrNotExist + } + if err != nil { + return blob.Ref{}, err + } + it := x.queryPrefix(keySignerAttrValue, keyId, attr, val) + defer closeIterator(it, &err) + for it.Next() { + permaRef, ok := blob.Parse(it.Value()) + if ok && !x.IsDeleted(permaRef) { + return permaRef, nil + } + } + return blob.Ref{}, os.ErrNotExist +} + +// This is just like PermanodeOfSignerAttrValue except we return multiple and dup-suppress. +// If request.Query is "", it is not used in the prefix search. +func (x *Index) SearchPermanodesWithAttr(dest chan<- blob.Ref, request *camtypes.PermanodeByAttrRequest) (err error) { + defer close(dest) + if request.FuzzyMatch { + // TODO(bradfitz): remove this for now? figure out how to handle it generically? + return errors.New("TODO: SearchPermanodesWithAttr: generic indexer doesn't support FuzzyMatch on PermanodeByAttrRequest") + } + if request.Attribute == "" { + return errors.New("index: missing Attribute in SearchPermanodesWithAttr") + } + + keyId, err := x.KeyId(request.Signer) + if err == sorted.ErrNotFound { + return nil + } + if err != nil { + return err + } + seen := make(map[string]bool) + var it sorted.Iterator + if request.Query == "" { + it = x.queryPrefix(keySignerAttrValue, keyId, request.Attribute) + } else { + it = x.queryPrefix(keySignerAttrValue, keyId, request.Attribute, request.Query) + } + defer closeIterator(it, &err) + for it.Next() { + cl, ok := kvSignerAttrValue(it.Key(), it.Value()) + if !ok { + continue + } + if x.IsDeleted(cl.BlobRef) { + continue + } + if x.IsDeleted(cl.Permanode) { + continue + } + pnstr := cl.Permanode.String() + if seen[pnstr] { + continue + } + seen[pnstr] = true + + dest <- cl.Permanode + if len(seen) == request.MaxResults { + break + } + } + return nil +} + +func kvSignerAttrValue(k, v string) (c camtypes.Claim, ok bool) { + // TODO(bradfitz): garbage + keyPart := strings.Split(k, "|") + valPart := strings.Split(v, "|") + if len(keyPart) != 6 || len(valPart) != 1 { + // TODO(mpl): use glog + log.Printf("bogus keySignerAttrValue index entry: %q = %q", k, v) + return + } + if keyPart[0] != "signerattrvalue" { + return + } + date, err := time.Parse(time.RFC3339, unreverseTimeString(keyPart[4])) + if err != nil { + log.Printf("bogus time in keySignerAttrValue index entry: %q", keyPart[4]) + return + } + claimRef, ok := blob.Parse(keyPart[5]) + if !ok { + log.Printf("bogus claim in keySignerAttrValue index entry: %q", keyPart[5]) + return + } + permaNode, ok := blob.Parse(valPart[0]) + if !ok { + log.Printf("bogus permanode in keySignerAttrValue index entry: %q", valPart[0]) + return + } + return camtypes.Claim{ + BlobRef: claimRef, + Permanode: permaNode, + Date: date, + Attr: urld(keyPart[2]), + Value: urld(keyPart[3]), + }, true +} + +func (x *Index) PathsOfSignerTarget(signer, target blob.Ref) (paths []*camtypes.Path, err error) { + paths = []*camtypes.Path{} + keyId, err := x.KeyId(signer) + if err != nil { + if err == sorted.ErrNotFound { + err = nil + } + return + } + + mostRecent := make(map[string]*camtypes.Path) + maxClaimDates := make(map[string]time.Time) + + it := x.queryPrefix(keyPathBackward, keyId, target) + defer closeIterator(it, &err) + for it.Next() { + p, ok, active := kvPathBackward(it.Key(), it.Value()) + if !ok { + continue + } + if x.IsDeleted(p.Claim) { + continue + } + if x.IsDeleted(p.Base) { + continue + } + + key := p.Base.String() + "/" + p.Suffix + if p.ClaimDate.After(maxClaimDates[key]) { + maxClaimDates[key] = p.ClaimDate + if active { + mostRecent[key] = &p + } else { + delete(mostRecent, key) + } + } + } + for _, v := range mostRecent { + paths = append(paths, v) + } + return paths, nil +} + +func kvPathBackward(k, v string) (p camtypes.Path, ok bool, active bool) { + // TODO(bradfitz): garbage + keyPart := strings.Split(k, "|") + valPart := strings.Split(v, "|") + if len(keyPart) != 4 || len(valPart) != 4 { + // TODO(mpl): use glog + log.Printf("bogus keyPathBackward index entry: %q = %q", k, v) + return + } + if keyPart[0] != "signertargetpath" { + return + } + target, ok := blob.Parse(keyPart[2]) + if !ok { + log.Printf("bogus target in keyPathBackward index entry: %q", keyPart[2]) + return + } + claim, ok := blob.Parse(keyPart[3]) + if !ok { + log.Printf("bogus claim in keyPathBackward index entry: %q", keyPart[3]) + return + } + date, err := time.Parse(time.RFC3339, valPart[0]) + if err != nil { + log.Printf("bogus date in keyPathBackward index entry: %q", valPart[0]) + return + } + base, ok := blob.Parse(valPart[1]) + if !ok { + log.Printf("bogus base in keyPathBackward index entry: %q", valPart[1]) + return + } + if valPart[2] == "Y" { + active = true + } + return camtypes.Path{ + Claim: claim, + Base: base, + Target: target, + ClaimDate: date, + Suffix: urld(valPart[3]), + }, true, active +} + +func (x *Index) PathsLookup(signer, base blob.Ref, suffix string) (paths []*camtypes.Path, err error) { + paths = []*camtypes.Path{} + keyId, err := x.KeyId(signer) + if err != nil { + if err == sorted.ErrNotFound { + err = nil + } + return + } + + it := x.queryPrefix(keyPathForward, keyId, base, suffix) + defer closeIterator(it, &err) + for it.Next() { + p, ok, active := kvPathForward(it.Key(), it.Value()) + if !ok { + continue + } + if x.IsDeleted(p.Claim) { + continue + } + if x.IsDeleted(p.Target) { + continue + } + + // TODO(bradfitz): investigate what's up with deleted + // forward path claims here. Needs docs with the + // interface too, and tests. + _ = active + + paths = append(paths, &p) + } + return +} + +func kvPathForward(k, v string) (p camtypes.Path, ok bool, active bool) { + // TODO(bradfitz): garbage + keyPart := strings.Split(k, "|") + valPart := strings.Split(v, "|") + if len(keyPart) != 6 || len(valPart) != 2 { + // TODO(mpl): use glog + log.Printf("bogus keyPathForward index entry: %q = %q", k, v) + return + } + if keyPart[0] != "path" { + return + } + base, ok := blob.Parse(keyPart[2]) + if !ok { + log.Printf("bogus base in keyPathForward index entry: %q", keyPart[2]) + return + } + date, err := time.Parse(time.RFC3339, unreverseTimeString(keyPart[4])) + if err != nil { + log.Printf("bogus date in keyPathForward index entry: %q", keyPart[4]) + return + } + claim, ok := blob.Parse(keyPart[5]) + if !ok { + log.Printf("bogus claim in keyPathForward index entry: %q", keyPart[5]) + return + } + if valPart[0] == "Y" { + active = true + } + target, ok := blob.Parse(valPart[1]) + if !ok { + log.Printf("bogus target in keyPathForward index entry: %q", valPart[1]) + return + } + return camtypes.Path{ + Claim: claim, + Base: base, + Target: target, + ClaimDate: date, + Suffix: urld(keyPart[3]), + }, true, active +} + +func (x *Index) PathLookup(signer, base blob.Ref, suffix string, at time.Time) (*camtypes.Path, error) { + paths, err := x.PathsLookup(signer, base, suffix) + if err != nil { + return nil, err + } + var ( + newest = int64(0) + atSeconds = int64(0) + best *camtypes.Path + ) + + if !at.IsZero() { + atSeconds = at.Unix() + } + + for _, path := range paths { + t := path.ClaimDate + secs := t.Unix() + if atSeconds != 0 && secs > atSeconds { + // Too new + continue + } + if newest > secs { + // Too old + continue + } + // Just right + newest, best = secs, path + } + if best == nil { + return nil, os.ErrNotExist + } + return best, nil +} + +func (x *Index) ExistingFileSchemas(wholeRef blob.Ref) (schemaRefs []blob.Ref, err error) { + it := x.queryPrefix(keyWholeToFileRef, wholeRef) + defer closeIterator(it, &err) + for it.Next() { + keyPart := strings.Split(it.Key(), "|")[1:] + if len(keyPart) < 2 { + continue + } + ref, ok := blob.Parse(keyPart[1]) + if ok { + schemaRefs = append(schemaRefs, ref) + } + } + return schemaRefs, nil +} + +func (x *Index) loadKey(key string, val *string, err *error, wg *sync.WaitGroup) { + defer wg.Done() + *val, *err = x.s.Get(key) +} + +func (x *Index) GetFileInfo(fileRef blob.Ref) (camtypes.FileInfo, error) { + if x.corpus != nil { + return x.corpus.GetFileInfo(fileRef) + } + ikey := "fileinfo|" + fileRef.String() + tkey := "filetimes|" + fileRef.String() + // TODO: switch this to use syncutil.Group + wg := new(sync.WaitGroup) + wg.Add(2) + var iv, tv string // info value, time value + var ierr, terr error + go x.loadKey(ikey, &iv, &ierr, wg) + go x.loadKey(tkey, &tv, &terr, wg) + wg.Wait() + + if ierr == sorted.ErrNotFound { + return camtypes.FileInfo{}, os.ErrNotExist + } + if ierr != nil { + return camtypes.FileInfo{}, ierr + } + valPart := strings.Split(iv, "|") + if len(valPart) < 3 { + log.Printf("index: bogus key %q = %q", ikey, iv) + return camtypes.FileInfo{}, os.ErrNotExist + } + var wholeRef blob.Ref + if len(valPart) >= 4 { + wholeRef, _ = blob.Parse(valPart[3]) + } + size, err := strconv.ParseInt(valPart[0], 10, 64) + if err != nil { + log.Printf("index: bogus integer at position 0 in key %q = %q", ikey, iv) + return camtypes.FileInfo{}, os.ErrNotExist + } + fileName := urld(valPart[1]) + fi := camtypes.FileInfo{ + Size: size, + FileName: fileName, + MIMEType: urld(valPart[2]), + WholeRef: wholeRef, + } + + if tv != "" { + times := strings.Split(urld(tv), ",") + updateFileInfoTimes(&fi, times) + } + + return fi, nil +} + +func updateFileInfoTimes(fi *camtypes.FileInfo, times []string) { + if len(times) == 0 { + return + } + fi.Time = types.ParseTime3339OrNil(times[0]) + if len(times) == 2 { + fi.ModTime = types.ParseTime3339OrNil(times[1]) + } +} + +// v is "width|height" +func kvImageInfo(v []byte) (ii camtypes.ImageInfo, ok bool) { + pipei := bytes.IndexByte(v, '|') + if pipei < 0 { + return + } + w, err := strutil.ParseUintBytes(v[:pipei], 10, 16) + if err != nil { + return + } + h, err := strutil.ParseUintBytes(v[pipei+1:], 10, 16) + if err != nil { + return + } + ii.Width = uint16(w) + ii.Height = uint16(h) + return ii, true +} + +func (x *Index) GetImageInfo(fileRef blob.Ref) (camtypes.ImageInfo, error) { + if x.corpus != nil { + return x.corpus.GetImageInfo(fileRef) + } + // it might be that the key does not exist because image.DecodeConfig failed earlier + // (because of unsupported JPEG features like progressive mode). + key := keyImageSize.Key(fileRef.String()) + v, err := x.s.Get(key) + if err == sorted.ErrNotFound { + err = os.ErrNotExist + } + if err != nil { + return camtypes.ImageInfo{}, err + } + ii, ok := kvImageInfo([]byte(v)) + if !ok { + return camtypes.ImageInfo{}, fmt.Errorf("index: bogus key %q = %q", key, v) + } + return ii, nil +} + +func (x *Index) GetMediaTags(fileRef blob.Ref) (tags map[string]string, err error) { + if x.corpus != nil { + return x.corpus.GetMediaTags(fileRef) + } + fi, err := x.GetFileInfo(fileRef) + if err != nil { + return nil, err + } + it := x.queryPrefix(keyMediaTag, fi.WholeRef.String()) + defer closeIterator(it, &err) + for it.Next() { + tags[it.Key()] = it.Value() + } + return tags, nil +} + +func (x *Index) EdgesTo(ref blob.Ref, opts *camtypes.EdgesToOpts) (edges []*camtypes.Edge, err error) { + it := x.queryPrefix(keyEdgeBackward, ref) + defer closeIterator(it, &err) + permanodeParents := make(map[string]*camtypes.Edge) + for it.Next() { + edge, ok := kvEdgeBackward(it.Key(), it.Value()) + if !ok { + continue + } + if x.IsDeleted(edge.From) { + continue + } + if x.IsDeleted(edge.BlobRef) { + continue + } + edge.To = ref + if edge.FromType == "permanode" { + permanodeParents[edge.From.String()] = edge + } else { + edges = append(edges, edge) + } + } + for _, e := range permanodeParents { + edges = append(edges, e) + } + return edges, nil +} + +func kvEdgeBackward(k, v string) (edge *camtypes.Edge, ok bool) { + // TODO(bradfitz): garbage + keyPart := strings.Split(k, "|") + valPart := strings.Split(v, "|") + if len(keyPart) != 4 || len(valPart) != 2 { + // TODO(mpl): use glog + log.Printf("bogus keyEdgeBackward index entry: %q = %q", k, v) + return + } + if keyPart[0] != "edgeback" { + return + } + parentRef, ok := blob.Parse(keyPart[2]) + if !ok { + log.Printf("bogus parent in keyEdgeBackward index entry: %q", keyPart[2]) + return + } + blobRef, ok := blob.Parse(keyPart[3]) + if !ok { + log.Printf("bogus blobref in keyEdgeBackward index entry: %q", keyPart[3]) + return + } + return &camtypes.Edge{ + From: parentRef, + FromType: valPart[0], + FromTitle: valPart[1], + BlobRef: blobRef, + }, true +} + +// GetDirMembers sends on dest the children of the static directory dir. +func (x *Index) GetDirMembers(dir blob.Ref, dest chan<- blob.Ref, limit int) (err error) { + defer close(dest) + + sent := 0 + it := x.queryPrefix(keyStaticDirChild, dir.String()) + defer closeIterator(it, &err) + for it.Next() { + keyPart := strings.Split(it.Key(), "|") + if len(keyPart) != 3 { + return fmt.Errorf("index: bogus key keyStaticDirChild = %q", it.Key()) + } + + child, ok := blob.Parse(keyPart[2]) + if !ok { + continue + } + dest <- child + sent++ + if sent == limit { + break + } + } + return nil +} + +func kvBlobMeta(k, v string) (bm camtypes.BlobMeta, ok bool) { + refStr := k[len("meta:"):] + br, ok := blob.Parse(refStr) + if !ok { + return + } + pipe := strings.Index(v, "|") + if pipe < 0 { + return + } + size, err := strconv.ParseUint(v[:pipe], 10, 32) + if err != nil { + return + } + return camtypes.BlobMeta{ + Ref: br, + Size: uint32(size), + CamliType: camliTypeFromMIME(v[pipe+1:]), + }, true +} + +func kvBlobMeta_bytes(k, v []byte) (bm camtypes.BlobMeta, ok bool) { + ref := k[len("meta:"):] + br, ok := blob.ParseBytes(ref) + if !ok { + return + } + pipe := bytes.IndexByte(v, '|') + if pipe < 0 { + return + } + size, err := strutil.ParseUintBytes(v[:pipe], 10, 32) + if err != nil { + return + } + return camtypes.BlobMeta{ + Ref: br, + Size: uint32(size), + CamliType: camliTypeFromMIME_bytes(v[pipe+1:]), + }, true +} + +func enumerateBlobMeta(s sorted.KeyValue, cb func(camtypes.BlobMeta) error) (err error) { + it := queryPrefixString(s, "meta:") + defer closeIterator(it, &err) + for it.Next() { + bm, ok := kvBlobMeta(it.Key(), it.Value()) + if !ok { + continue + } + if err := cb(bm); err != nil { + return err + } + } + return nil +} + +func enumerateSignerKeyId(s sorted.KeyValue, cb func(blob.Ref, string)) (err error) { + const pfx = "signerkeyid:" + it := queryPrefixString(s, pfx) + defer closeIterator(it, &err) + for it.Next() { + if br, ok := blob.Parse(strings.TrimPrefix(it.Key(), pfx)); ok { + cb(br, it.Value()) + } + } + return +} + +// EnumerateBlobMeta sends all metadata about all known blobs to ch and then closes ch. +func (x *Index) EnumerateBlobMeta(ctx *context.Context, ch chan<- camtypes.BlobMeta) (err error) { + if x.corpus != nil { + x.corpus.RLock() + defer x.corpus.RUnlock() + return x.corpus.EnumerateBlobMetaLocked(ctx, ch) + } + defer close(ch) + return enumerateBlobMeta(x.s, func(bm camtypes.BlobMeta) error { + select { + case ch <- bm: + case <-ctx.Done(): + return context.ErrCanceled + } + return nil + }) +} + +// Storage returns the index's underlying Storage implementation. +func (x *Index) Storage() sorted.KeyValue { return x.s } + +// Close closes the underlying sorted.KeyValue, if the storage has a Close method. +// The return value is the return value of the underlying Close, or +// nil otherwise. +func (x *Index) Close() error { + if cl, ok := x.s.(io.Closer); ok { + return cl.Close() + } + close(x.tickleOoo) + return nil +} + +// initNeededMaps initializes x.needs and x.neededBy on start-up. +func (x *Index) initNeededMaps() (err error) { + x.deletes = newDeletionCache() + it := x.queryPrefix(keyMissing) + defer closeIterator(it, &err) + for it.Next() { + key := it.KeyBytes() + pair := key[len("missing|"):] + pipe := bytes.IndexByte(pair, '|') + if pipe < 0 { + return fmt.Errorf("Bogus missing key %q", key) + } + have, ok1 := blob.ParseBytes(pair[:pipe]) + missing, ok2 := blob.ParseBytes(pair[pipe+1:]) + if !ok1 || !ok2 { + return fmt.Errorf("Bogus missing key %q", key) + } + x.noteNeededMemory(have, missing) + } + return +} + +func (x *Index) noteNeeded(have, missing blob.Ref) error { + if err := x.s.Set(keyMissing.Key(have, missing), "1"); err != nil { + return err + } + x.noteNeededMemory(have, missing) + return nil +} + +func (x *Index) noteNeededMemory(have, missing blob.Ref) { + x.mu.Lock() + x.needs[have] = append(x.needs[have], missing) + x.neededBy[missing] = append(x.neededBy[missing], have) + x.mu.Unlock() +} + +const camliTypeMIMEPrefix = "application/json; camliType=" + +var camliTypeMIMEPrefixBytes = []byte(camliTypeMIMEPrefix) + +// "application/json; camliType=file" => "file" +// "image/gif" => "" +func camliTypeFromMIME(mime string) string { + if v := strings.TrimPrefix(mime, camliTypeMIMEPrefix); v != mime { + return v + } + return "" +} + +func camliTypeFromMIME_bytes(mime []byte) string { + if v := bytes.TrimPrefix(mime, camliTypeMIMEPrefixBytes); len(v) != len(mime) { + return strutil.StringFromBytes(v) + } + return "" +} + +// TODO(bradfitz): rename this? This is really about signer-attr-value +// (PermanodeOfSignerAttrValue), and not about indexed attributes in general. +func IsIndexedAttribute(attr string) bool { + switch attr { + case "camliRoot", "camliImportRoot", "tag", "title": + return true + } + return false +} + +// IsBlobReferenceAttribute returns whether attr is an attribute whose +// value is a blob reference (e.g. camliMember) and thus something the +// indexers should keep inverted indexes on for parent/child-type +// relationships. +func IsBlobReferenceAttribute(attr string) bool { + switch attr { + case "camliMember": + return true + } + return false +} + +func IsFulltextAttribute(attr string) bool { + switch attr { + case "tag", "title": + return true + } + return false +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/index_test.go b/vendor/github.com/camlistore/camlistore/pkg/index/index_test.go new file mode 100644 index 00000000..8bca1929 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/index_test.go @@ -0,0 +1,500 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index_test + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/index" + "camlistore.org/pkg/index/indextest" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/test" + "camlistore.org/pkg/types/camtypes" +) + +func TestReverseTimeString(t *testing.T) { + in := "2011-11-27T01:23:45Z" + got := index.ExpReverseTimeString(in) + want := "rt7988-88-72T98:76:54Z" + if got != want { + t.Fatalf("reverseTimeString = %q, want %q", got, want) + } + back := index.ExpUnreverseTimeString(got) + if back != in { + t.Fatalf("unreverseTimeString = %q, want %q", back, in) + } +} + +func TestIndex_Memory(t *testing.T) { + indextest.Index(t, index.NewMemoryIndex) +} + +func TestPathsOfSignerTarget_Memory(t *testing.T) { + indextest.PathsOfSignerTarget(t, index.NewMemoryIndex) +} + +func TestFiles_Memory(t *testing.T) { + indextest.Files(t, index.NewMemoryIndex) +} + +func TestEdgesTo_Memory(t *testing.T) { + indextest.EdgesTo(t, index.NewMemoryIndex) +} + +func TestDelete_Memory(t *testing.T) { + indextest.Delete(t, index.NewMemoryIndex) +} + +var ( + // those test files are not specific to an indexer implementation + // hence we do not want to check them. + notAnIndexer = []string{ + "corpus_bench_test.go", + "corpus_test.go", + "export_test.go", + "index_test.go", + "keys_test.go", + } + // A map is used in hasAllRequiredTests to note which required + // tests have been found in a package, by setting the corresponding + // booleans to true. Those are the keys for this map. + requiredTests = []string{"TestIndex_", "TestPathsOfSignerTarget_", "TestFiles_", "TestEdgesTo_"} +) + +// This function checks that all the functions using the tests +// defined in indextest, namely: +// TestIndex_, TestPathOfSignerTarget_, TestFiles_ +// do exist in the provided test file. +func hasAllRequiredTests(name string, t *testing.T) error { + tests := make(map[string]bool) + for _, v := range requiredTests { + tests[v] = false + } + + if !strings.HasSuffix(name, "_test.go") || skipFromList(name) { + return nil + } + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, name, nil, 0) + if err != nil { + t.Fatalf("%v: %v", name, err) + } + ast.Inspect(f, func(n ast.Node) bool { + switch x := n.(type) { + case *ast.FuncDecl: + name := x.Name.Name + for k, _ := range tests { + if strings.HasPrefix(name, k) { + tests[k] = true + } + } + } + return true + }) + + for k, v := range tests { + if !v { + return fmt.Errorf("%v not implemented in %v", k, name) + } + } + return nil +} + +// For each test file dedicated to an indexer implementation, this checks that +// all the required tests are present in its test suite. +func TestIndexerTestsCompleteness(t *testing.T) { + cwd, err := os.Open(".") + if err != nil { + t.Fatal(err) + } + defer cwd.Close() + files, err := cwd.Readdir(-1) + if err != nil { + t.Fatal(err) + } + + for _, file := range files { + name := file.Name() + if file.IsDir() || strings.HasPrefix(name, ".") { + continue + } + if err := hasAllRequiredTests(name, t); err != nil { + t.Error(err) + } + } + // special case for sqlite as it is the only one left in its own package + if err := hasAllRequiredTests(filepath.FromSlash("sqlite/sqlite_test.go"), t); err != nil { + t.Error(err) + } +} + +func skipFromList(name string) bool { + for _, v := range notAnIndexer { + if name == v { + return true + } + } + return false +} + +func testMergeFileInfoRow(t *testing.T, wholeRef string) { + c := index.ExpNewCorpus() + value := "100|something%2egif|image%2Fgif" + want := camtypes.FileInfo{ + Size: 100, + MIMEType: "image/gif", + FileName: "something.gif", + } + if wholeRef != "" { + value += "|" + wholeRef + want.WholeRef = blob.MustParse(wholeRef) + } + c.Exp_mergeFileInfoRow("fileinfo|sha1-579f7f246bd420d486ddeb0dadbb256cfaf8bf6b", value) + fi := c.Exp_files(blob.MustParse("sha1-579f7f246bd420d486ddeb0dadbb256cfaf8bf6b")) + if !reflect.DeepEqual(want, fi) { + t.Errorf("Got %+v; want %+v", fi, want) + } +} + +// When requiredSchemaVersion was at 4, i.e. wholeRef hadn't been introduced into fileInfo +func TestMergeFileInfoRow4(t *testing.T) { + testMergeFileInfoRow(t, "") +} + +func TestMergeFileInfoRow(t *testing.T) { + testMergeFileInfoRow(t, "sha1-142b504945338158e0149d4ed25a41a522a28e88") +} + +var ( + chunk1 = &test.Blob{Contents: "foo"} + chunk2 = &test.Blob{Contents: "bar"} + chunk3 = &test.Blob{Contents: "baz"} + + chunk1ref = chunk1.BlobRef() + chunk2ref = chunk2.BlobRef() + chunk3ref = chunk3.BlobRef() + + fileBlob = &test.Blob{fmt.Sprintf(`{"camliVersion": 1, +"camliType": "file", +"fileName": "stuff.txt", +"parts": [ + {"blobRef": "%s", "size": 3}, + {"blobRef": "%s", "size": 3}, + {"blobRef": "%s", "size": 3} +]}`, chunk1ref, chunk2ref, chunk3ref)} + fileBlobRef = fileBlob.BlobRef() +) + +func TestInitNeededMaps(t *testing.T) { + s := sorted.NewMemoryKeyValue() + + // Start unknowning that the data chunks are all gone: + s.Set("schemaversion", fmt.Sprint(index.Exp_schemaVersion())) + s.Set(index.Exp_missingKey(fileBlobRef, chunk1ref), "1") + s.Set(index.Exp_missingKey(fileBlobRef, chunk2ref), "1") + s.Set(index.Exp_missingKey(fileBlobRef, chunk3ref), "1") + ix, err := index.New(s) + if err != nil { + t.Fatal(err) + } + { + needs, neededBy, _ := ix.NeededMapsForTest() + needsWant := map[blob.Ref][]blob.Ref{ + fileBlobRef: []blob.Ref{chunk1ref, chunk2ref, chunk3ref}, + } + neededByWant := map[blob.Ref][]blob.Ref{ + chunk1ref: []blob.Ref{fileBlobRef}, + chunk2ref: []blob.Ref{fileBlobRef}, + chunk3ref: []blob.Ref{fileBlobRef}, + } + if !reflect.DeepEqual(needs, needsWant) { + t.Errorf("needs = %v; want %v", needs, needsWant) + } + if !reflect.DeepEqual(neededBy, neededByWant) { + t.Errorf("neededBy = %v; want %v", neededBy, neededByWant) + } + } + + ix.Exp_noteBlobIndexed(chunk2ref) + + { + needs, neededBy, ready := ix.NeededMapsForTest() + needsWant := map[blob.Ref][]blob.Ref{ + fileBlobRef: []blob.Ref{chunk1ref, chunk3ref}, + } + neededByWant := map[blob.Ref][]blob.Ref{ + chunk1ref: []blob.Ref{fileBlobRef}, + chunk3ref: []blob.Ref{fileBlobRef}, + } + if !reflect.DeepEqual(needs, needsWant) { + t.Errorf("needs = %v; want %v", needs, needsWant) + } + if !reflect.DeepEqual(neededBy, neededByWant) { + t.Errorf("neededBy = %v; want %v", neededBy, neededByWant) + } + if len(ready) != 0 { + t.Errorf("ready = %v; want nothing", ready) + } + } + + ix.Exp_noteBlobIndexed(chunk1ref) + + { + needs, neededBy, ready := ix.NeededMapsForTest() + needsWant := map[blob.Ref][]blob.Ref{ + fileBlobRef: []blob.Ref{chunk3ref}, + } + neededByWant := map[blob.Ref][]blob.Ref{ + chunk3ref: []blob.Ref{fileBlobRef}, + } + if !reflect.DeepEqual(needs, needsWant) { + t.Errorf("needs = %v; want %v", needs, needsWant) + } + if !reflect.DeepEqual(neededBy, neededByWant) { + t.Errorf("neededBy = %v; want %v", neededBy, neededByWant) + } + if len(ready) != 0 { + t.Errorf("ready = %v; want nothing", ready) + } + } + + ix.Exp_noteBlobIndexed(chunk3ref) + + { + needs, neededBy, ready := ix.NeededMapsForTest() + needsWant := map[blob.Ref][]blob.Ref{} + neededByWant := map[blob.Ref][]blob.Ref{} + if !reflect.DeepEqual(needs, needsWant) { + t.Errorf("needs = %v; want %v", needs, needsWant) + } + if !reflect.DeepEqual(neededBy, neededByWant) { + t.Errorf("neededBy = %v; want %v", neededBy, neededByWant) + } + if !ready[fileBlobRef] { + t.Error("fileBlobRef not ready") + } + } + dumpSorted(t, s) +} + +func dumpSorted(t *testing.T, s sorted.KeyValue) { + foreachSorted(t, s, func(k, v string) { + t.Logf("index %q = %q", k, v) + }) +} + +func foreachSorted(t *testing.T, s sorted.KeyValue, fn func(string, string)) { + it := s.Find("", "") + for it.Next() { + fn(it.Key(), it.Value()) + } + if err := it.Close(); err != nil { + t.Fatal(err) + } +} + +func TestOutOfOrderIndexing(t *testing.T) { + tf := new(test.Fetcher) + s := sorted.NewMemoryKeyValue() + + ix, err := index.New(s) + if err != nil { + t.Fatal(err) + } + ix.InitBlobSource(tf) + + t.Logf("file ref = %v", fileBlobRef) + t.Logf("missing data chunks = %v, %v, %v", chunk1ref, chunk2ref, chunk3ref) + + add := func(b *test.Blob) { + tf.AddBlob(b) + if _, err := ix.ReceiveBlob(b.BlobRef(), b.Reader()); err != nil { + t.Fatalf("ReceiveBlob(%v): %v", b.BlobRef(), err) + } + } + + add(fileBlob) + + { + key := fmt.Sprintf("missing|%s|%s", fileBlobRef, chunk1ref) + if got, err := s.Get(key); got == "" || err != nil { + t.Errorf("key %q missing (err: %v); want 1", key, err) + } + } + + add(chunk1) + add(chunk2) + + ix.Exp_AwaitReindexing(t) + + { + key := fmt.Sprintf("missing|%s|%s", fileBlobRef, chunk3ref) + if got, err := s.Get(key); got == "" || err != nil { + t.Errorf("key %q missing (err: %v); want 1", key, err) + } + } + + add(chunk3) + + ix.Exp_AwaitReindexing(t) + + foreachSorted(t, s, func(k, v string) { + if strings.HasPrefix(k, "missing|") { + t.Errorf("Shouldn't have missing key: %q", k) + } + }) +} + +func TestIndexingClaimMissingPubkey(t *testing.T) { + s := sorted.NewMemoryKeyValue() + idx, err := index.New(s) + if err != nil { + t.Fatal(err) + } + + id := indextest.NewIndexDeps(idx) + id.Fataler = t + + goodKeyFetcher := id.Index.KeyFetcher + emptyFetcher := new(test.Fetcher) + + pn := id.NewPermanode() + + // Prevent the index from being able to find the public key: + idx.KeyFetcher = emptyFetcher + + // This previous failed to upload, since the signer's public key was + // unavailable. + claimRef := id.SetAttribute(pn, "tag", "foo") + + t.Logf(" Claim is %v", claimRef) + t.Logf("Signer is %v", id.SignerBlobRef) + + // Verify that populateClaim noted the missing public key blob: + { + key := fmt.Sprintf("missing|%s|%s", claimRef, id.SignerBlobRef) + if got, err := s.Get(key); got == "" || err != nil { + t.Errorf("key %q missing (err: %v); want 1", key, err) + } + } + + // Now make it available again: + idx.KeyFetcher = idx.Exp_BlobSource() + + if err := copyBlob(id.SignerBlobRef, idx.Exp_BlobSource().(*test.Fetcher), goodKeyFetcher); err != nil { + t.Errorf("Error copying public key to BlobSource: %v", err) + } + if err := copyBlob(id.SignerBlobRef, idx, goodKeyFetcher); err != nil { + t.Errorf("Error uploading public key to indexer: %v", err) + } + + idx.Exp_AwaitReindexing(t) + + // Verify that populateClaim noted the missing public key blob: + { + key := fmt.Sprintf("missing|%s|%s", claimRef, id.SignerBlobRef) + if got, err := s.Get(key); got != "" || err == nil { + t.Errorf("row %q still exists", key) + } + } +} + +func copyBlob(br blob.Ref, dst blobserver.BlobReceiver, src blob.Fetcher) error { + rc, _, err := src.Fetch(br) + if err != nil { + return err + } + defer rc.Close() + _, err = dst.ReceiveBlob(br, rc) + return err +} + +// tests that we add the missing wholeRef entries in FileInfo rows when going from +// a version 4 to a version 5 index. +func TestFixMissingWholeref(t *testing.T) { + tf := new(test.Fetcher) + s := sorted.NewMemoryKeyValue() + + ix, err := index.New(s) + if err != nil { + t.Fatal(err) + } + ix.InitBlobSource(tf) + + // populate with a file + add := func(b *test.Blob) { + tf.AddBlob(b) + if _, err := ix.ReceiveBlob(b.BlobRef(), b.Reader()); err != nil { + t.Fatalf("ReceiveBlob(%v): %v", b.BlobRef(), err) + } + } + add(chunk1) + add(chunk2) + add(chunk3) + add(fileBlob) + + // revert the row to the old form, by stripping the wholeRef suffix + key := "fileinfo|" + fileBlobRef.String() + val5, err := s.Get(key) + if err != nil { + t.Fatalf("could not get %v: %v", key, err) + } + parts := strings.SplitN(val5, "|", 4) + val4 := strings.Join(parts[:3], "|") + if err := s.Set(key, val4); err != nil { + t.Fatalf("could not set (%v, %v): %v", key, val4, err) + } + + // revert index version at 4 to trigger the fix + if err := s.Set("schemaversion", "4"); err != nil { + t.Fatal(err) + } + + // init broken index + ix, err = index.New(s) + if err != index.Exp_ErrMissingWholeRef { + t.Fatalf("wrong error upon index initialization: got %v, wanted %v", err, index.Exp_ErrMissingWholeRef) + } + // and fix it + if err := ix.Exp_FixMissingWholeRef(tf); err != nil { + t.Fatal(err) + } + + // init fixed index + ix, err = index.New(s) + if err != nil { + t.Fatal(err) + } + // and check that the value is now actually fixed + fi, err := ix.GetFileInfo(fileBlobRef) + if err != nil { + t.Fatal(err) + } + if fi.WholeRef.String() != parts[3] { + t.Fatalf("index fileInfo wholeref was not fixed: got %q, wanted %v", fi.WholeRef, parts[3]) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/indextest/testdata/0s.mp3 b/vendor/github.com/camlistore/camlistore/pkg/index/indextest/testdata/0s.mp3 new file mode 100644 index 00000000..7e4a59de Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/index/indextest/testdata/0s.mp3 differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/indextest/testdata/dude-exif.jpg b/vendor/github.com/camlistore/camlistore/pkg/index/indextest/testdata/dude-exif.jpg new file mode 100644 index 00000000..1fcc5482 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/index/indextest/testdata/dude-exif.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/indextest/testdata/dude.jpg b/vendor/github.com/camlistore/camlistore/pkg/index/indextest/testdata/dude.jpg new file mode 100644 index 00000000..447710ec Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/index/indextest/testdata/dude.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/indextest/tests.go b/vendor/github.com/camlistore/camlistore/pkg/index/indextest/tests.go new file mode 100644 index 00000000..def0d0f9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/indextest/tests.go @@ -0,0 +1,1378 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package indextest contains the unit tests for the indexer so they +// can be re-used for each specific implementation of the index +// Storage interface. +package indextest + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "log" + "net/url" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/context" + "camlistore.org/pkg/index" + "camlistore.org/pkg/jsonsign" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/test" + "camlistore.org/pkg/types/camtypes" +) + +// An IndexDeps is a helper for populating and querying an Index for tests. +type IndexDeps struct { + Index *index.Index + + BlobSource *test.Fetcher + + // Following three needed for signing: + PublicKeyFetcher *test.Fetcher + EntityFetcher jsonsign.EntityFetcher // fetching decrypted openpgp entities + SignerBlobRef blob.Ref + + now time.Time // fake clock, nanos since epoch + + Fataler // optional means of failing. +} + +type Fataler interface { + Fatalf(format string, args ...interface{}) +} + +type logFataler struct{} + +func (logFataler) Fatalf(format string, args ...interface{}) { + log.Fatalf(format, args...) +} + +func (id *IndexDeps) Get(key string) string { + v, _ := id.Index.Storage().Get(key) + return v +} + +func (id *IndexDeps) Set(key, value string) error { + return id.Index.Storage().Set(key, value) +} + +func (id *IndexDeps) DumpIndex(t *testing.T) { + t.Logf("Begin index dump:") + it := id.Index.Storage().Find("", "") + for it.Next() { + t.Logf(" %q = %q", it.Key(), it.Value()) + } + if err := it.Close(); err != nil { + t.Fatalf("iterator close = %v", err) + } + t.Logf("End index dump.") +} + +func (id *IndexDeps) uploadAndSign(m *schema.Builder) blob.Ref { + m.SetSigner(id.SignerBlobRef) + unsigned, err := m.JSON() + if err != nil { + id.Fatalf("uploadAndSignMap: " + err.Error()) + } + sr := &jsonsign.SignRequest{ + UnsignedJSON: unsigned, + Fetcher: id.PublicKeyFetcher, + EntityFetcher: id.EntityFetcher, + SignatureTime: id.now, + } + signed, err := sr.Sign() + if err != nil { + id.Fatalf("problem signing: " + err.Error()) + } + tb := &test.Blob{Contents: signed} + _, err = id.BlobSource.ReceiveBlob(tb.BlobRef(), tb.Reader()) + if err != nil { + id.Fatalf("public uploading signed blob to blob source, pre-indexing: %v, %v", tb.BlobRef(), err) + } + _, err = id.Index.ReceiveBlob(tb.BlobRef(), tb.Reader()) + if err != nil { + id.Fatalf("problem indexing blob: %v\nblob was:\n%s", err, signed) + } + return tb.BlobRef() +} + +// NewPermanode creates (& signs) a new permanode and adds it +// to the index, returning its blobref. +func (id *IndexDeps) NewPermanode() blob.Ref { + unsigned := schema.NewUnsignedPermanode() + return id.uploadAndSign(unsigned) +} + +// NewPermanode creates (& signs) a new planned permanode and adds it +// to the index, returning its blobref. +func (id *IndexDeps) NewPlannedPermanode(key string) blob.Ref { + unsigned := schema.NewPlannedPermanode(key) + return id.uploadAndSign(unsigned) +} + +func (id *IndexDeps) advanceTime() time.Time { + id.now = id.now.Add(1 * time.Second) + return id.now +} + +// LastTime returns the time of the most recent mutation (claim). +func (id *IndexDeps) LastTime() time.Time { + return id.now +} + +func (id *IndexDeps) SetAttribute(permaNode blob.Ref, attr, value string) blob.Ref { + m := schema.NewSetAttributeClaim(permaNode, attr, value) + m.SetClaimDate(id.advanceTime()) + return id.uploadAndSign(m) +} + +func (id *IndexDeps) SetAttribute_NoTimeMove(permaNode blob.Ref, attr, value string) blob.Ref { + m := schema.NewSetAttributeClaim(permaNode, attr, value) + m.SetClaimDate(id.LastTime()) + return id.uploadAndSign(m) +} + +func (id *IndexDeps) AddAttribute(permaNode blob.Ref, attr, value string) blob.Ref { + m := schema.NewAddAttributeClaim(permaNode, attr, value) + m.SetClaimDate(id.advanceTime()) + return id.uploadAndSign(m) +} + +func (id *IndexDeps) DelAttribute(permaNode blob.Ref, attr, value string) blob.Ref { + m := schema.NewDelAttributeClaim(permaNode, attr, value) + m.SetClaimDate(id.advanceTime()) + return id.uploadAndSign(m) +} + +func (id *IndexDeps) Delete(target blob.Ref) blob.Ref { + m := schema.NewDeleteClaim(target) + m.SetClaimDate(id.advanceTime()) + return id.uploadAndSign(m) +} + +var noTime = time.Time{} + +func (id *IndexDeps) UploadString(v string) blob.Ref { + cb := &test.Blob{Contents: v} + id.BlobSource.AddBlob(cb) + br := cb.BlobRef() + _, err := id.Index.ReceiveBlob(br, cb.Reader()) + if err != nil { + id.Fatalf("UploadString: %v", err) + } + return br +} + +// If modTime is zero, it's not used. +func (id *IndexDeps) UploadFile(fileName string, contents string, modTime time.Time) (fileRef, wholeRef blob.Ref) { + wholeRef = id.UploadString(contents) + + m := schema.NewFileMap(fileName) + m.PopulateParts(int64(len(contents)), []schema.BytesPart{ + schema.BytesPart{ + Size: uint64(len(contents)), + BlobRef: wholeRef, + }}) + if !modTime.IsZero() { + m.SetModTime(modTime) + } + fjson, err := m.JSON() + if err != nil { + id.Fatalf("UploadFile.JSON: %v", err) + } + fb := &test.Blob{Contents: fjson} + id.BlobSource.AddBlob(fb) + fileRef = fb.BlobRef() + _, err = id.Index.ReceiveBlob(fileRef, fb.Reader()) + if err != nil { + panic(err) + } + return +} + +// If modTime is zero, it's not used. +func (id *IndexDeps) UploadDir(dirName string, children []blob.Ref, modTime time.Time) blob.Ref { + // static-set entries blob + ss := new(schema.StaticSet) + for _, child := range children { + ss.Add(child) + } + ssjson := ss.Blob().JSON() + ssb := &test.Blob{Contents: ssjson} + id.BlobSource.AddBlob(ssb) + _, err := id.Index.ReceiveBlob(ssb.BlobRef(), ssb.Reader()) + if err != nil { + id.Fatalf("UploadDir.ReceiveBlob: %v", err) + } + + // directory blob + bb := schema.NewDirMap(dirName) + bb.PopulateDirectoryMap(ssb.BlobRef()) + if !modTime.IsZero() { + bb.SetModTime(modTime) + } + dirjson, err := bb.JSON() + if err != nil { + id.Fatalf("UploadDir.JSON: %v", err) + } + dirb := &test.Blob{Contents: dirjson} + id.BlobSource.AddBlob(dirb) + _, err = id.Index.ReceiveBlob(dirb.BlobRef(), dirb.Reader()) + if err != nil { + id.Fatalf("UploadDir.ReceiveBlob: %v", err) + } + return dirb.BlobRef() +} + +// NewIndexDeps returns an IndexDeps helper for populating and working +// with the provided index for tests. +func NewIndexDeps(index *index.Index) *IndexDeps { + camliRootPath, err := osutil.GoPackagePath("camlistore.org") + if err != nil { + log.Fatal("Package camlistore.org no found in $GOPATH or $GOPATH not defined") + } + secretRingFile := filepath.Join(camliRootPath, "pkg", "jsonsign", "testdata", "test-secring.gpg") + pubKey := &test.Blob{Contents: `-----BEGIN PGP PUBLIC KEY BLOCK----- + +xsBNBEzgoVsBCAC/56aEJ9BNIGV9FVP+WzenTAkg12k86YqlwJVAB/VwdMlyXxvi +bCT1RVRfnYxscs14LLfcMWF3zMucw16mLlJCBSLvbZ0jn4h+/8vK5WuAdjw2YzLs +WtBcjWn3lV6tb4RJz5gtD/o1w8VWxwAnAVIWZntKAWmkcChCRgdUeWso76+plxE5 +aRYBJqdT1mctGqNEISd/WYPMgwnWXQsVi3x4z1dYu2tD9uO1dkAff12z1kyZQIBQ +rexKYRRRh9IKAayD4kgS0wdlULjBU98aeEaMz1ckuB46DX3lAYqmmTEL/Rl9cOI0 +Enpn/oOOfYFa5h0AFndZd1blMvruXfdAobjVABEBAAE= +=28/7 +-----END PGP PUBLIC KEY BLOCK-----`} + + id := &IndexDeps{ + Index: index, + BlobSource: new(test.Fetcher), + PublicKeyFetcher: new(test.Fetcher), + EntityFetcher: &jsonsign.CachingEntityFetcher{ + Fetcher: &jsonsign.FileEntityFetcher{File: secretRingFile}, + }, + SignerBlobRef: pubKey.BlobRef(), + now: test.ClockOrigin, + Fataler: logFataler{}, + } + // Add dev client test key public key, keyid 26F5ABDA, + // blobref sha1-ad87ca5c78bd0ce1195c46f7c98e6025abbaf007 + if g, w := id.SignerBlobRef.String(), "sha1-ad87ca5c78bd0ce1195c46f7c98e6025abbaf007"; g != w { + id.Fatalf("unexpected signer blobref; got signer = %q; want %q", g, w) + } + id.PublicKeyFetcher.AddBlob(pubKey) + id.Index.KeyFetcher = id.PublicKeyFetcher + id.Index.InitBlobSource(id.BlobSource) + return id +} + +func Index(t *testing.T, initIdx func() *index.Index) { + oldLocal := time.Local + time.Local = time.UTC + defer func() { time.Local = oldLocal }() + + id := NewIndexDeps(initIdx()) + id.Fataler = t + defer id.DumpIndex(t) + pn := id.NewPermanode() + t.Logf("uploaded permanode %q", pn) + br1 := id.SetAttribute(pn, "tag", "foo1") + br1Time := id.LastTime() + t.Logf("set attribute %q", br1) + br2 := id.SetAttribute(pn, "tag", "foo2") + br2Time := id.LastTime() + t.Logf("set attribute %q", br2) + rootClaim := id.SetAttribute(pn, "camliRoot", "rootval") + rootClaimTime := id.LastTime() + t.Logf("set attribute %q", rootClaim) + + pnChild := id.NewPermanode() + br3 := id.SetAttribute(pnChild, "tag", "bar") + br3Time := id.LastTime() + t.Logf("set attribute %q", br3) + memberRef := id.AddAttribute(pn, "camliMember", pnChild.String()) + t.Logf("add-attribute claim %q points to member permanode %q", memberRef, pnChild) + memberRefTime := id.LastTime() + + // TODO(bradfitz): add EXIF tests here, once that stuff is ready. + if false { + camliRootPath, err := osutil.GoPackagePath("camlistore.org") + if err != nil { + t.Fatal("Package camlistore.org no found in $GOPATH or $GOPATH not defined") + } + for i := 1; i <= 8; i++ { + fileBase := fmt.Sprintf("f%d-exif.jpg", i) + fileName := filepath.Join(camliRootPath, "pkg", "images", "testdata", fileBase) + contents, err := ioutil.ReadFile(fileName) + if err != nil { + t.Fatal(err) + } + id.UploadFile(fileBase, string(contents), noTime) + } + } + + // Upload some files. + var jpegFileRef, exifFileRef, mediaFileRef, mediaWholeRef blob.Ref + { + camliRootPath, err := osutil.GoPackagePath("camlistore.org") + if err != nil { + t.Fatal("Package camlistore.org no found in $GOPATH or $GOPATH not defined") + } + uploadFile := func(file string, modTime time.Time) (fileRef, wholeRef blob.Ref) { + fileName := filepath.Join(camliRootPath, "pkg", "index", "indextest", "testdata", file) + contents, err := ioutil.ReadFile(fileName) + if err != nil { + t.Fatal(err) + } + fileRef, wholeRef = id.UploadFile(file, string(contents), modTime) + return + } + jpegFileRef, _ = uploadFile("dude.jpg", noTime) + exifFileRef, _ = uploadFile("dude-exif.jpg", time.Unix(1361248796, 0)) + mediaFileRef, mediaWholeRef = uploadFile("0s.mp3", noTime) + } + + // Upload the dir containing the previous files. + imagesDirRef := id.UploadDir( + "testdata", + []blob.Ref{jpegFileRef, exifFileRef, mediaFileRef}, + time.Now(), + ) + + lastPermanodeMutation := id.LastTime() + + key := "signerkeyid:sha1-ad87ca5c78bd0ce1195c46f7c98e6025abbaf007" + if g, e := id.Get(key), "2931A67C26F5ABDA"; g != e { + t.Fatalf("%q = %q, want %q", key, g, e) + } + + key = "imagesize|" + jpegFileRef.String() + if g, e := id.Get(key), "50|100"; g != e { + t.Errorf("JPEG dude.jpg key %q = %q; want %q", key, g, e) + } + key = "filetimes|" + jpegFileRef.String() + if g, e := id.Get(key), ""; g != e { + t.Errorf("JPEG dude.jpg key %q = %q; want %q", key, g, e) + } + + key = "filetimes|" + exifFileRef.String() + if g, e := id.Get(key), "2013-02-18T01%3A11%3A20Z%2C2013-02-19T04%3A39%3A56Z"; g != e { + t.Errorf("EXIF dude-exif.jpg key %q = %q; want %q", key, g, e) + } + + key = "have:" + pn.String() + pnSizeStr := strings.TrimSuffix(id.Get(key), "|indexed") + if pnSizeStr == "" { + t.Fatalf("missing key %q", key) + } + + key = "meta:" + pn.String() + if g, e := id.Get(key), pnSizeStr+"|application/json; camliType=permanode"; g != e { + t.Errorf("key %q = %q, want %q", key, g, e) + } + + key = "recpn|2931A67C26F5ABDA|rt7988-88-71T98:67:62.999876543Z|" + br1.String() + if g, e := id.Get(key), pn.String(); g != e { + t.Fatalf("%q = %q, want %q (permanode)", key, g, e) + } + + key = "recpn|2931A67C26F5ABDA|rt7988-88-71T98:67:61.999876543Z|" + br2.String() + if g, e := id.Get(key), pn.String(); g != e { + t.Fatalf("%q = %q, want %q (permanode)", key, g, e) + } + + key = fmt.Sprintf("edgeback|%s|%s|%s", pnChild, pn, memberRef) + if g, e := id.Get(key), "permanode|"; g != e { + t.Fatalf("edgeback row %q = %q, want %q", key, g, e) + } + + mediaTests := []struct { + prop, exp string + }{ + {"title", "Zero Seconds"}, + {"artist", "Test Artist"}, + {"album", "Test Album"}, + {"genre", "(20)Alternative"}, + {"musicbrainzalbumid", "00000000-0000-0000-0000-000000000000"}, + {"year", "1992"}, + {"track", "1"}, + {"disc", "2"}, + {"mediaref", "sha1-fefac74a1d5928316d7131747107c8a61b71ffe4"}, + {"durationms", "26"}, + } + for _, tt := range mediaTests { + key = fmt.Sprintf("mediatag|%s|%s", mediaWholeRef.String(), tt.prop) + if g, _ := url.QueryUnescape(id.Get(key)); g != tt.exp { + t.Errorf("0s.mp3 key %q = %q; want %q", key, g, tt.exp) + } + } + + // PermanodeOfSignerAttrValue + { + gotPN, err := id.Index.PermanodeOfSignerAttrValue(id.SignerBlobRef, "camliRoot", "rootval") + if err != nil { + t.Fatalf("id.Index.PermanodeOfSignerAttrValue = %v", err) + } + if gotPN.String() != pn.String() { + t.Errorf("id.Index.PermanodeOfSignerAttrValue = %q, want %q", gotPN, pn) + } + _, err = id.Index.PermanodeOfSignerAttrValue(id.SignerBlobRef, "camliRoot", "MISSING") + if err == nil { + t.Errorf("expected an error from PermanodeOfSignerAttrValue on missing value") + } + } + + // SearchPermanodesWithAttr - match attr type "tag" and value "foo1" + { + ch := make(chan blob.Ref, 10) + req := &camtypes.PermanodeByAttrRequest{ + Signer: id.SignerBlobRef, + Attribute: "tag", + Query: "foo1", + } + err := id.Index.SearchPermanodesWithAttr(ch, req) + if err != nil { + t.Fatalf("SearchPermanodesWithAttr = %v", err) + } + var got []blob.Ref + for r := range ch { + got = append(got, r) + } + want := []blob.Ref{pn} + if len(got) < 1 || got[0].String() != want[0].String() { + t.Errorf("id.Index.SearchPermanodesWithAttr gives %q, want %q", got, want) + } + } + + // SearchPermanodesWithAttr - match all with attr type "tag" + { + ch := make(chan blob.Ref, 10) + req := &camtypes.PermanodeByAttrRequest{ + Signer: id.SignerBlobRef, + Attribute: "tag", + } + err := id.Index.SearchPermanodesWithAttr(ch, req) + if err != nil { + t.Fatalf("SearchPermanodesWithAttr = %v", err) + } + var got []blob.Ref + for r := range ch { + got = append(got, r) + } + want := []blob.Ref{pn, pnChild} + if len(got) != len(want) { + t.Errorf("SearchPermanodesWithAttr results differ.\n got: %q\nwant: %q", + got, want) + } + for _, w := range want { + found := false + for _, g := range got { + if g.String() == w.String() { + found = true + break + } + } + if !found { + t.Errorf("SearchPermanodesWithAttr: %v was not found.\n", w) + } + } + } + + // Delete value "pony" of type "title" (which does not actually exist) for pn + br4 := id.DelAttribute(pn, "title", "pony") + br4Time := id.LastTime() + // and verify it is not found when searching by attr + { + ch := make(chan blob.Ref, 10) + req := &camtypes.PermanodeByAttrRequest{ + Signer: id.SignerBlobRef, + Attribute: "title", + Query: "pony", + } + err := id.Index.SearchPermanodesWithAttr(ch, req) + if err != nil { + t.Fatalf("SearchPermanodesWithAttr = %v", err) + } + var got []blob.Ref + for r := range ch { + got = append(got, r) + } + want := []blob.Ref{} + if len(got) != len(want) { + t.Errorf("SearchPermanodesWithAttr results differ.\n got: %q\nwant: %q", + got, want) + } + } + + // GetRecentPermanodes + { + verify := func(prefix string, want []camtypes.RecentPermanode, before time.Time) { + ch := make(chan camtypes.RecentPermanode, 10) // expect 2 results, but maybe more if buggy. + err := id.Index.GetRecentPermanodes(ch, id.SignerBlobRef, 50, before) + if err != nil { + t.Fatalf("[%s] GetRecentPermanodes = %v", prefix, err) + } + got := []camtypes.RecentPermanode{} + for r := range ch { + got = append(got, r) + } + if len(got) != len(want) { + t.Errorf("[%s] GetRecentPermanode results differ.\n got: %v\nwant: %v", + prefix, searchResults(got), searchResults(want)) + } + for _, w := range want { + found := false + for _, g := range got { + if g.Equal(w) { + found = true + break + } + } + if !found { + t.Errorf("[%s] GetRecentPermanode: %v was not found.\n got: %v\nwant: %v", + prefix, w, searchResults(got), searchResults(want)) + } + } + } + + want := []camtypes.RecentPermanode{ + { + Permanode: pn, + Signer: id.SignerBlobRef, + LastModTime: br4Time, + }, + { + Permanode: pnChild, + Signer: id.SignerBlobRef, + LastModTime: br3Time, + }, + } + + before := time.Time{} + verify("Zero before", want, before) + + before = lastPermanodeMutation + t.Log("lastPermanodeMutation", lastPermanodeMutation, + lastPermanodeMutation.Unix()) + verify("Non-zero before", want[1:], before) + } + // GetDirMembers + { + ch := make(chan blob.Ref, 10) // expect 2 results + err := id.Index.GetDirMembers(imagesDirRef, ch, 50) + if err != nil { + t.Fatalf("GetDirMembers = %v", err) + } + got := []blob.Ref{} + for r := range ch { + got = append(got, r) + } + want := []blob.Ref{jpegFileRef, exifFileRef, mediaFileRef} + if len(got) != len(want) { + t.Errorf("GetDirMembers results differ.\n got: %v\nwant: %v", + got, want) + } + for _, w := range want { + found := false + for _, g := range got { + if w == g { + found = true + break + } + } + if !found { + t.Errorf("GetDirMembers: %v was not found.", w) + } + } + } + + // GetBlobMeta + { + meta, err := id.Index.GetBlobMeta(pn) + if err != nil { + t.Errorf("GetBlobMeta(%q) = %v", pn, err) + } else { + if e := "permanode"; meta.CamliType != e { + t.Errorf("GetBlobMeta(%q) mime = %q, want %q", pn, meta.CamliType, e) + } + if meta.Size == 0 { + t.Errorf("GetBlobMeta(%q) size is zero", pn) + } + } + _, err = id.Index.GetBlobMeta(blob.ParseOrZero("abc-123")) + if err != os.ErrNotExist { + t.Errorf("GetBlobMeta(dummy blobref) = %v; want os.ErrNotExist", err) + } + } + + // AppendClaims + { + claims, err := id.Index.AppendClaims(nil, pn, id.SignerBlobRef, "") + if err != nil { + t.Errorf("AppendClaims = %v", err) + } else { + want := []camtypes.Claim{ + { + BlobRef: br1, + Permanode: pn, + Signer: id.SignerBlobRef, + Date: br1Time.UTC(), + Type: "set-attribute", + Attr: "tag", + Value: "foo1", + }, + { + BlobRef: br2, + Permanode: pn, + Signer: id.SignerBlobRef, + Date: br2Time.UTC(), + Type: "set-attribute", + Attr: "tag", + Value: "foo2", + }, + { + BlobRef: rootClaim, + Permanode: pn, + Signer: id.SignerBlobRef, + Date: rootClaimTime.UTC(), + Type: "set-attribute", + Attr: "camliRoot", + Value: "rootval", + }, + { + BlobRef: memberRef, + Permanode: pn, + Signer: id.SignerBlobRef, + Date: memberRefTime.UTC(), + Type: "add-attribute", + Attr: "camliMember", + Value: pnChild.String(), + }, + { + BlobRef: br4, + Permanode: pn, + Signer: id.SignerBlobRef, + Date: br4Time.UTC(), + Type: "del-attribute", + Attr: "title", + Value: "pony", + }, + } + if !reflect.DeepEqual(claims, want) { + t.Errorf("AppendClaims results differ.\n got: %v\nwant: %v", + claims, want) + } + } + } +} + +func PathsOfSignerTarget(t *testing.T, initIdx func() *index.Index) { + id := NewIndexDeps(initIdx()) + id.Fataler = t + defer id.DumpIndex(t) + signer := id.SignerBlobRef + pn := id.NewPermanode() + t.Logf("uploaded permanode %q", pn) + + claim1 := id.SetAttribute(pn, "camliPath:somedir", "targ-123") + claim1Time := id.LastTime().UTC() + claim2 := id.SetAttribute(pn, "camliPath:with|pipe", "targ-124") + claim2Time := id.LastTime().UTC() + t.Logf("made path claims %q and %q", claim1, claim2) + + type test struct { + blobref string + want int + } + tests := []test{ + {"targ-123", 1}, + {"targ-124", 1}, + {"targ-125", 0}, + } + for _, tt := range tests { + paths, err := id.Index.PathsOfSignerTarget(signer, blob.ParseOrZero(tt.blobref)) + if err != nil { + t.Fatalf("PathsOfSignerTarget(%q): %v", tt.blobref, err) + } + if len(paths) != tt.want { + t.Fatalf("PathsOfSignerTarget(%q) got %d results; want %d", + tt.blobref, len(paths), tt.want) + } + if tt.blobref == "targ-123" { + p := paths[0] + want := fmt.Sprintf( + "Path{Claim: %s, %v; Base: %s + Suffix \"somedir\" => Target targ-123}", + claim1, claim1Time, pn) + if g := p.String(); g != want { + t.Errorf("claim wrong.\n got: %s\nwant: %s", g, want) + } + } + } + tests = []test{ + {"somedir", 1}, + {"with|pipe", 1}, + {"void", 0}, + } + for _, tt := range tests { + paths, err := id.Index.PathsLookup(id.SignerBlobRef, pn, tt.blobref) + if err != nil { + t.Fatalf("PathsLookup(%q): %v", tt.blobref, err) + } + if len(paths) != tt.want { + t.Fatalf("PathsLookup(%q) got %d results; want %d", + tt.blobref, len(paths), tt.want) + } + if tt.blobref == "with|pipe" { + p := paths[0] + want := fmt.Sprintf( + "Path{Claim: %s, %s; Base: %s + Suffix \"with|pipe\" => Target targ-124}", + claim2, claim2Time, pn) + if g := p.String(); g != want { + t.Errorf("claim wrong.\n got: %s\nwant: %s", g, want) + } + } + } + + // now test deletions + // Delete an existing value + claim3 := id.Delete(claim2) + t.Logf("claim %q deletes path claim %q", claim3, claim2) + tests = []test{ + {"targ-123", 1}, + {"targ-124", 0}, + {"targ-125", 0}, + } + for _, tt := range tests { + signer := id.SignerBlobRef + paths, err := id.Index.PathsOfSignerTarget(signer, blob.ParseOrZero(tt.blobref)) + if err != nil { + t.Fatalf("PathsOfSignerTarget(%q): %v", tt.blobref, err) + } + if len(paths) != tt.want { + t.Fatalf("PathsOfSignerTarget(%q) got %d results; want %d", + tt.blobref, len(paths), tt.want) + } + } + tests = []test{ + {"somedir", 1}, + {"with|pipe", 0}, + {"void", 0}, + } + for _, tt := range tests { + paths, err := id.Index.PathsLookup(id.SignerBlobRef, pn, tt.blobref) + if err != nil { + t.Fatalf("PathsLookup(%q): %v", tt.blobref, err) + } + if len(paths) != tt.want { + t.Fatalf("PathsLookup(%q) got %d results; want %d", + tt.blobref, len(paths), tt.want) + } + } + + // recreate second path, and test if the previous deletion of it + // is indeed ignored. + claim4 := id.Delete(claim3) + t.Logf("delete claim %q deletes claim %q, which should undelete %q", claim4, claim3, claim2) + tests = []test{ + {"targ-123", 1}, + {"targ-124", 1}, + {"targ-125", 0}, + } + for _, tt := range tests { + signer := id.SignerBlobRef + paths, err := id.Index.PathsOfSignerTarget(signer, blob.ParseOrZero(tt.blobref)) + if err != nil { + t.Fatalf("PathsOfSignerTarget(%q): %v", tt.blobref, err) + } + if len(paths) != tt.want { + t.Fatalf("PathsOfSignerTarget(%q) got %d results; want %d", + tt.blobref, len(paths), tt.want) + } + // and check the modtime too + if tt.blobref == "targ-124" { + p := paths[0] + want := fmt.Sprintf( + "Path{Claim: %s, %v; Base: %s + Suffix \"with|pipe\" => Target targ-124}", + claim2, claim2Time, pn) + if g := p.String(); g != want { + t.Errorf("claim wrong.\n got: %s\nwant: %s", g, want) + } + } + } + tests = []test{ + {"somedir", 1}, + {"with|pipe", 1}, + {"void", 0}, + } + for _, tt := range tests { + paths, err := id.Index.PathsLookup(id.SignerBlobRef, pn, tt.blobref) + if err != nil { + t.Fatalf("PathsLookup(%q): %v", tt.blobref, err) + } + if len(paths) != tt.want { + t.Fatalf("PathsLookup(%q) got %d results; want %d", + tt.blobref, len(paths), tt.want) + } + // and check that modtime is now claim4Time + if tt.blobref == "with|pipe" { + p := paths[0] + want := fmt.Sprintf( + "Path{Claim: %s, %s; Base: %s + Suffix \"with|pipe\" => Target targ-124}", + claim2, claim2Time, pn) + if g := p.String(); g != want { + t.Errorf("claim wrong.\n got: %s\nwant: %s", g, want) + } + } + } +} + +func Files(t *testing.T, initIdx func() *index.Index) { + id := NewIndexDeps(initIdx()) + id.Fataler = t + fileTime := time.Unix(1361250375, 0) + fileRef, wholeRef := id.UploadFile("foo.html", "I am an html file.", fileTime) + t.Logf("uploaded fileref %q, wholeRef %q", fileRef, wholeRef) + id.DumpIndex(t) + + // ExistingFileSchemas + { + key := fmt.Sprintf("wholetofile|%s|%s", wholeRef, fileRef) + if g, e := id.Get(key), "1"; g != e { + t.Fatalf("%q = %q, want %q", key, g, e) + } + + refs, err := id.Index.ExistingFileSchemas(wholeRef) + if err != nil { + t.Fatalf("ExistingFileSchemas = %v", err) + } + want := []blob.Ref{fileRef} + if !reflect.DeepEqual(refs, want) { + t.Errorf("ExistingFileSchemas got = %#v, want %#v", refs, want) + } + } + + // FileInfo + { + key := fmt.Sprintf("fileinfo|%s", fileRef) + if g, e := id.Get(key), "31|foo.html|text%2Fhtml|sha1-153cb1b63a8f120a0e3e14ff34c64f169df9430f"; g != e { + t.Fatalf("%q = %q, want %q", key, g, e) + } + + fi, err := id.Index.GetFileInfo(fileRef) + if err != nil { + t.Fatalf("GetFileInfo = %v", err) + } + if got, want := fi.Size, int64(31); got != want { + t.Errorf("Size = %d, want %d", got, want) + } + if got, want := fi.FileName, "foo.html"; got != want { + t.Errorf("FileName = %q, want %q", got, want) + } + if got, want := fi.MIMEType, "text/html"; got != want { + t.Errorf("MIMEType = %q, want %q", got, want) + } + if got, want := fi.Time, fileTime; !got.Time().Equal(want) { + t.Errorf("Time = %v; want %v", got, want) + } + if got, want := fi.WholeRef, blob.MustParse("sha1-153cb1b63a8f120a0e3e14ff34c64f169df9430f"); got != want { + t.Errorf("WholeRef = %v; want %v", got, want) + } + } +} + +func EdgesTo(t *testing.T, initIdx func() *index.Index) { + idx := initIdx() + id := NewIndexDeps(idx) + id.Fataler = t + defer id.DumpIndex(t) + + // pn1 ---member---> pn2 + pn1 := id.NewPermanode() + pn2 := id.NewPermanode() + claim1 := id.AddAttribute(pn1, "camliMember", pn2.String()) + + t.Logf("edge %s --> %s", pn1, pn2) + + // Look for pn1 + { + edges, err := idx.EdgesTo(pn2, nil) + if err != nil { + t.Fatal(err) + } + if len(edges) != 1 { + t.Fatalf("num edges = %d; want 1", len(edges)) + } + wantEdge := &camtypes.Edge{ + From: pn1, + To: pn2, + FromType: "permanode", + } + if got, want := edges[0].String(), wantEdge.String(); got != want { + t.Errorf("Wrong edge.\n GOT: %v\nWANT: %v", got, want) + } + } + + // Delete claim -> break edge relationship. + del1 := id.Delete(claim1) + t.Logf("del claim %q deletes claim %q, breaks link between p1 and p2", del1, claim1) + // test that we can't find anymore pn1 from pn2 + { + edges, err := idx.EdgesTo(pn2, nil) + if err != nil { + t.Fatal(err) + } + if len(edges) != 0 { + t.Fatalf("num edges = %d; want 0", len(edges)) + } + } + + // Undelete, should restore the link. + del2 := id.Delete(del1) + t.Logf("del claim %q deletes del claim %q, restores link between p1 and p2", del2, del1) + { + edges, err := idx.EdgesTo(pn2, nil) + if err != nil { + t.Fatal(err) + } + if len(edges) != 1 { + t.Fatalf("num edges = %d; want 1", len(edges)) + } + wantEdge := &camtypes.Edge{ + From: pn1, + To: pn2, + FromType: "permanode", + } + if got, want := edges[0].String(), wantEdge.String(); got != want { + t.Errorf("Wrong edge.\n GOT: %v\nWANT: %v", got, want) + } + } +} + +func Delete(t *testing.T, initIdx func() *index.Index) { + idx := initIdx() + id := NewIndexDeps(idx) + id.Fataler = t + defer id.DumpIndex(t) + pn1 := id.NewPermanode() + t.Logf("uploaded permanode %q", pn1) + cl1 := id.SetAttribute(pn1, "tag", "foo1") + cl1Time := id.LastTime() + t.Logf("set attribute %q", cl1) + + // delete pn1 + delpn1 := id.Delete(pn1) + t.Logf("del claim %q deletes %q", delpn1, pn1) + deleted := idx.IsDeleted(pn1) + if !deleted { + t.Fatal("pn1 should be deleted") + } + + // and try to find it with SearchPermanodesWithAttr (which should not work) + { + ch := make(chan blob.Ref, 10) + req := &camtypes.PermanodeByAttrRequest{ + Signer: id.SignerBlobRef, + Attribute: "tag", + Query: "foo1"} + err := id.Index.SearchPermanodesWithAttr(ch, req) + if err != nil { + t.Fatalf("SearchPermanodesWithAttr = %v", err) + } + var got []blob.Ref + for r := range ch { + got = append(got, r) + } + want := []blob.Ref{} + if len(got) != len(want) { + t.Errorf("id.Index.SearchPermanodesWithAttr gives %q, want %q", got, want) + } + } + + // delete pn1 again with another claim + delpn1bis := id.Delete(pn1) + t.Logf("del claim %q deletes %q a second time", delpn1bis, pn1) + deleted = idx.IsDeleted(pn1) + if !deleted { + t.Fatal("pn1 should be deleted") + } + + // verify that deleting delpn1 is not enough to make pn1 undeleted + del2 := id.Delete(delpn1) + t.Logf("delete claim %q deletes %q, which should not yet revive %q", del2, delpn1, pn1) + deleted = idx.IsDeleted(pn1) + if !deleted { + t.Fatal("pn1 should not yet be undeleted") + } + // we should not yet be able to find it again with SearchPermanodesWithAttr + { + ch := make(chan blob.Ref, 10) + req := &camtypes.PermanodeByAttrRequest{ + Signer: id.SignerBlobRef, + Attribute: "tag", + Query: "foo1"} + err := id.Index.SearchPermanodesWithAttr(ch, req) + if err != nil { + t.Fatalf("SearchPermanodesWithAttr = %v", err) + } + var got []blob.Ref + for r := range ch { + got = append(got, r) + } + want := []blob.Ref{} + if len(got) != len(want) { + t.Errorf("id.Index.SearchPermanodesWithAttr gives %q, want %q", got, want) + } + } + + // delete delpn1bis as well -> should undelete pn1 + del2bis := id.Delete(delpn1bis) + t.Logf("delete claim %q deletes %q, which should revive %q", del2bis, delpn1bis, pn1) + deleted = idx.IsDeleted(pn1) + if deleted { + t.Fatal("pn1 should be undeleted") + } + // we should now be able to find it again with SearchPermanodesWithAttr + { + ch := make(chan blob.Ref, 10) + req := &camtypes.PermanodeByAttrRequest{ + Signer: id.SignerBlobRef, + Attribute: "tag", + Query: "foo1"} + err := id.Index.SearchPermanodesWithAttr(ch, req) + if err != nil { + t.Fatalf("SearchPermanodesWithAttr = %v", err) + } + var got []blob.Ref + for r := range ch { + got = append(got, r) + } + want := []blob.Ref{pn1} + if len(got) < 1 || got[0].String() != want[0].String() { + t.Errorf("id.Index.SearchPermanodesWithAttr gives %q, want %q", got, want) + } + } + + // Delete cl1 + del3 := id.Delete(cl1) + t.Logf("del claim %q deletes claim %q", del3, cl1) + deleted = idx.IsDeleted(cl1) + if !deleted { + t.Fatal("cl1 should be deleted") + } + // we should not find anything with SearchPermanodesWithAttr + { + ch := make(chan blob.Ref, 10) + req := &camtypes.PermanodeByAttrRequest{ + Signer: id.SignerBlobRef, + Attribute: "tag", + Query: "foo1"} + err := id.Index.SearchPermanodesWithAttr(ch, req) + if err != nil { + t.Fatalf("SearchPermanodesWithAttr = %v", err) + } + var got []blob.Ref + for r := range ch { + got = append(got, r) + } + want := []blob.Ref{} + if len(got) != len(want) { + t.Errorf("id.Index.SearchPermanodesWithAttr gives %q, want %q", got, want) + } + } + // and now check that AppendClaims finds nothing for pn + { + claims, err := id.Index.AppendClaims(nil, pn1, id.SignerBlobRef, "") + if err != nil { + t.Errorf("AppendClaims = %v", err) + } else { + want := []camtypes.Claim{} + if len(claims) != len(want) { + t.Errorf("id.Index.AppendClaims gives %q, want %q", claims, want) + } + } + } + + // undelete cl1 + del4 := id.Delete(del3) + t.Logf("del claim %q deletes del claim %q, which should undelete %q", del4, del3, cl1) + // We should now be able to find it again with both methods + { + ch := make(chan blob.Ref, 10) + req := &camtypes.PermanodeByAttrRequest{ + Signer: id.SignerBlobRef, + Attribute: "tag", + Query: "foo1"} + err := id.Index.SearchPermanodesWithAttr(ch, req) + if err != nil { + t.Fatalf("SearchPermanodesWithAttr = %v", err) + } + var got []blob.Ref + for r := range ch { + got = append(got, r) + } + want := []blob.Ref{pn1} + if len(got) < 1 || got[0].String() != want[0].String() { + t.Errorf("id.Index.SearchPermanodesWithAttr gives %q, want %q", got, want) + } + } + // and check that AppendClaims finds cl1, with the right modtime too + { + claims, err := id.Index.AppendClaims(nil, pn1, id.SignerBlobRef, "") + if err != nil { + t.Errorf("AppendClaims = %v", err) + } else { + want := []camtypes.Claim{ + camtypes.Claim{ + BlobRef: cl1, + Permanode: pn1, + Signer: id.SignerBlobRef, + Date: cl1Time.UTC(), + Type: "set-attribute", + Attr: "tag", + Value: "foo1", + }, + } + if !reflect.DeepEqual(claims, want) { + t.Errorf("GetOwnerClaims results differ.\n got: %v\nwant: %v", + claims, want) + } + } + } +} + +type searchResults []camtypes.RecentPermanode + +func (s searchResults) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "[%d search results: ", len(s)) + for _, r := range s { + fmt.Fprintf(&buf, "{BlobRef: %s, Signer: %s, LastModTime: %d}", + r.Permanode, r.Signer, r.LastModTime.Unix()) + } + buf.WriteString("]") + return buf.String() +} + +func Reindex(t *testing.T, initIdx func() *index.Index) { + defaultReindexMaxProcs := index.ReindexMaxProcs() + // if not startOoo, the outOfOrderIndexerLoop will not be started, + // which should demonstrate that: + // since delpn1 will be enumerated before pn1, and indexing of delpn1 + // requires pn1, reindexing will fail. + reindex := func(t *testing.T, initIdx func() *index.Index, startOoo bool) { + if startOoo { + index.SetReindexMaxProcs(defaultReindexMaxProcs) + os.Setenv("CAMLI_TESTREINDEX_DISABLE_OOO", "false") + } else { + // We set the concurrency to 1, otherwise we could get "lucky" as the + // 2nd goroutine could index pn1 before the 1st goroutine notices it + // is missing as a dependency of delpn1 (which is the point of our test). + index.SetReindexMaxProcs(1) + os.Setenv("CAMLI_TESTREINDEX_DISABLE_OOO", "true") + } + idx := initIdx() + id := NewIndexDeps(idx) + id.Fataler = t + + pn1 := id.NewPlannedPermanode("foo1") // sha1-f06e30253644014922f955733a641cbc64d43d73 + t.Logf("uploaded permanode %q", pn1) + + // delete pn1 + delpn1 := id.Delete(pn1) // sha1-1d4c60cb3ce967edfb3194afd36124ce3f87ece0 + t.Logf("del claim %q deletes %q", delpn1, pn1) + deleted := idx.IsDeleted(pn1) + if !deleted { + t.Fatal("pn1 should be deleted") + } + + err := id.Index.Reindex() + if !startOoo && err == nil { + t.Fatal("Reindexing without outOfOrderIndexerLoop should have failed") + } + if startOoo && err != nil { + t.Fatal(err) + } + } + + reindex(t, initIdx, false) + reindex(t, initIdx, true) +} + +type enumArgs struct { + ctx *context.Context + dest chan blob.SizedRef + after string + limit int +} + +func checkEnumerate(idx *index.Index, want []blob.SizedRef, args *enumArgs) error { + if args == nil { + args = &enumArgs{} + } + if args.ctx == nil { + args.ctx = context.New() + } + if args.dest == nil { + args.dest = make(chan blob.SizedRef) + } + if args.limit == 0 { + args.limit = 5000 + } + errCh := make(chan error) + go func() { + errCh <- idx.EnumerateBlobs(args.ctx, args.dest, args.after, args.limit) + }() + for k, sbr := range want { + got, ok := <-args.dest + if !ok { + return fmt.Errorf("could not enumerate blob %d", k) + } + if got != sbr { + return fmt.Errorf("enumeration %d: got %v, wanted %v", k, got, sbr) + } + } + _, ok := <-args.dest + if ok { + return errors.New("chan was not closed after enumeration") + } + return <-errCh +} + +func checkStat(idx *index.Index, want []blob.SizedRef) error { + dest := make(chan blob.SizedRef) + defer close(dest) + errCh := make(chan error) + input := make([]blob.Ref, len(want)) + for _, sbr := range want { + input = append(input, sbr.Ref) + } + go func() { + errCh <- idx.StatBlobs(dest, input) + }() + for k, sbr := range want { + got, ok := <-dest + if !ok { + return fmt.Errorf("could not get stat number %d", k) + } + if got != sbr { + return fmt.Errorf("stat %d: got %v, wanted %v", k, got, sbr) + } + } + return <-errCh +} + +func EnumStat(t *testing.T, initIdx func() *index.Index) { + idx := initIdx() + id := NewIndexDeps(idx) + id.Fataler = t + + type step func() error + + // so we can refer to the added permanodes without using hardcoded blobRefs + added := make(map[string]blob.Ref) + + stepAdd := func(contents string) step { // add the blob + return func() error { + pn := id.NewPlannedPermanode(contents) + t.Logf("uploaded permanode %q", pn) + added[contents] = pn + return nil + } + } + + stepEnumCheck := func(want []blob.SizedRef, args *enumArgs) step { // check the blob + return func() error { + if err := checkEnumerate(idx, want, args); err != nil { + return err + } + return nil + } + } + + missingBlob := blob.MustParse("sha1-0000000000000000000000000000000000000000") + stepDelete := func(toDelete blob.Ref) step { + return func() error { + del := id.Delete(missingBlob) + t.Logf("added del claim %v to delete %v", del, toDelete) + return nil + } + } + + stepStatCheck := func(want []blob.SizedRef) step { + return func() error { + if err := checkStat(idx, want); err != nil { + return err + } + return nil + } + } + + for _, v := range []string{ + "foo", + "barr", + "bazzz", + } { + stepAdd(v)() + } + foo := blob.SizedRef{ // sha1-95d7290eb38520b257ef88d32f5b8d6be4fa9203 + Ref: blob.MustParse(added["foo"].String()), + Size: 534, + } + bar := blob.SizedRef{ // sha1-88c232875c2d6cfedfe91a2b06ea5c236e0389f4 + Ref: blob.MustParse(added["barr"].String()), + Size: 535, + } + baz := blob.SizedRef{ // sha1-718177762f7aba80a8b156bdd2b5a775b15a3132 + Ref: blob.MustParse(added["bazzz"].String()), + Size: 536, + } + delMissing := blob.SizedRef{ // sha1-a0b4db6c57851e5c63bfa81f5bdfd1eb9e32624e + Ref: blob.MustParse("sha1-a0b4db6c57851e5c63bfa81f5bdfd1eb9e32624e"), + Size: 649, + } + + if err := stepEnumCheck([]blob.SizedRef{baz, bar, foo}, nil)(); err != nil { + t.Fatalf("first enum, testing order: %v", err) + } + + // Now again, but skipping baz's blob + if err := stepEnumCheck([]blob.SizedRef{bar, foo}, + &enumArgs{ + after: added["bazzz"].String(), + }, + )(); err != nil { + t.Fatalf("second enum, testing skipping with after: %v", err) + } + + // Now add a delete claim with a missing dep, which should add an "have" row in the old format, + // i.e. without the "|indexed" suffix. So we can test if we're still compatible with old rows. + stepDelete(missingBlob)() + if err := stepEnumCheck([]blob.SizedRef{baz, bar, foo, delMissing}, nil)(); err != nil { + t.Fatalf("third enum, testing old \"have\" row compat: %v", err) + } + + if err := stepStatCheck([]blob.SizedRef{foo, bar, baz, delMissing})(); err != nil { + t.Fatalf("stat check: %v", err) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/interface.go b/vendor/github.com/camlistore/camlistore/pkg/index/interface.go new file mode 100644 index 00000000..05a9faf0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/interface.go @@ -0,0 +1,143 @@ +package index + +import ( + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/context" + "camlistore.org/pkg/types/camtypes" +) + +type Interface interface { + // os.ErrNotExist should be returned if the blob isn't known + GetBlobMeta(blob.Ref) (camtypes.BlobMeta, error) + + // Should return os.ErrNotExist if not found. + GetFileInfo(fileRef blob.Ref) (camtypes.FileInfo, error) + + // Should return os.ErrNotExist if not found. + GetImageInfo(fileRef blob.Ref) (camtypes.ImageInfo, error) + + // Should return os.ErrNotExist if not found. + GetMediaTags(fileRef blob.Ref) (map[string]string, error) + + // KeyId returns the GPG keyid (e.g. "2931A67C26F5ABDA) + // given the blobref of its ASCII-armored blobref. + // The error is ErrNotFound if not found. + KeyId(blob.Ref) (string, error) + + // AppendClaims appends to dst claims on the given permanode. + // The signerFilter and attrFilter are both optional. If non-zero, + // they filter the return items to only claims made by the given signer + // or claims about the given attribute, respectively. + // Deleted claims are never returned. + // The items may be appended in any order. + // + // TODO: this should take a context and a callback func + // instead of a dst, then it can append to a channel instead, + // and the context lets it be interrupted. The callback should + // take the context too, so the channel send's select can read + // from the Done channel. + AppendClaims(dst []camtypes.Claim, permaNode blob.Ref, + signerFilter blob.Ref, + attrFilter string) ([]camtypes.Claim, error) + + // TODO(bradfitz): methods below this line are slated for a redesign + // to work efficiently for the new in-memory index. + + // dest must be closed, even when returning an error. + // limit <= 0 means unlimited. + GetRecentPermanodes(dest chan<- camtypes.RecentPermanode, + owner blob.Ref, + limit int, + before time.Time) error + + // SearchPermanodes finds permanodes matching the provided + // request and sends unique permanode blobrefs to dest. + // In particular, if request.FuzzyMatch is true, a fulltext + // search is performed (if supported by the attribute(s)) + // instead of an exact match search. + // If request.Query is blank, the permanodes which have + // request.Attribute as an attribute (regardless of its value) + // are searched. + // Additionally, if request.Attribute is blank, all attributes + // are searched (as fulltext), otherwise the search is + // restricted to the named attribute. + // + // dest is always closed, regardless of the error return value. + SearchPermanodesWithAttr(dest chan<- blob.Ref, + request *camtypes.PermanodeByAttrRequest) error + + // ExistingFileSchemas returns 0 or more blobrefs of "bytes" + // (TODO(bradfitz): or file?) schema blobs that represent the + // bytes of a file given in bytesRef. The file schema blobs + // returned are not guaranteed to reference chunks that still + // exist on the blobservers, though. It's purely a hint for + // clients to avoid uploads if possible. Before re-using any + // returned blobref they should be checked. + // + // Use case: a user drag & drops a large file onto their + // browser to upload. (imagine that "large" means anything + // larger than a blobserver's max blob size) JavaScript can + // first SHA-1 the large file locally, then send the + // wholeFileRef to this call and see if they'd previously + // uploaded the same file in the past. If so, the upload + // can be avoided if at least one of the returned schemaRefs + // can be validated (with a validating HEAD request) to still + // all exist on the blob server. + ExistingFileSchemas(wholeFileRef blob.Ref) (schemaRefs []blob.Ref, err error) + + // GetDirMembers sends on dest the children of the static + // directory dirRef. It returns os.ErrNotExist if dirRef + // is nil. + // dest must be closed, even when returning an error. + // limit <= 0 means unlimited. + GetDirMembers(dirRef blob.Ref, dest chan<- blob.Ref, limit int) error + + // Given an owner key, a camliType 'claim', 'attribute' name, + // and specific 'value', find the most recent permanode that has + // a corresponding 'set-attribute' claim attached. + // Returns os.ErrNotExist if none is found. + // Only attributes white-listed by IsIndexedAttribute are valid. + // TODO(bradfitz): ErrNotExist here is a weird error message ("file" not found). change. + // TODO(bradfitz): use keyId instead of signer? + PermanodeOfSignerAttrValue(signer blob.Ref, attr, val string) (blob.Ref, error) + + // PathsOfSignerTarget queries the index about "camliPath:" + // URL-dispatch attributes. + // + // It returns a list of all the path claims that have been signed + // by the provided signer and point at the given target. + // + // This is used when editing a permanode, to figure work up + // the name resolution tree backwards ultimately to a + // camliRoot permanode (which should know its base URL), and + // then the complete URL(s) of a target can be found. + PathsOfSignerTarget(signer, target blob.Ref) ([]*camtypes.Path, error) + + // All Path claims for (signer, base, suffix) + PathsLookup(signer, base blob.Ref, suffix string) ([]*camtypes.Path, error) + + // Most recent Path claim for (signer, base, suffix) as of + // provided time 'at', or most recent if 'at' is nil. + PathLookup(signer, base blob.Ref, suffix string, at time.Time) (*camtypes.Path, error) + + // EdgesTo finds references to the provided ref. + // + // For instance, if ref is a permanode, it might find the parent permanodes + // that have ref as a member. + // Or, if ref is a static file, it might find static directories which contain + // that file. + // This is a way to go "up" or "back" in a hierarchy. + // + // opts may be nil to accept the defaults. + EdgesTo(ref blob.Ref, opts *camtypes.EdgesToOpts) ([]*camtypes.Edge, error) + + // EnumerateBlobMeta sends ch information about all blobs + // known to the indexer (which may be a subset of all total + // blobs, since the indexer is typically configured to not see + // non-metadata blobs) and then closes ch. When it returns an + // error, it also closes ch. The blobs may be sent in any order. + // If the context finishes, the return error is context.ErrCanceled. + EnumerateBlobMeta(*context.Context, chan<- camtypes.BlobMeta) error +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/keys.go b/vendor/github.com/camlistore/camlistore/pkg/index/keys.go new file mode 100644 index 00000000..6476586e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/keys.go @@ -0,0 +1,396 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index + +import ( + "bytes" + "fmt" + "strings" + + "camlistore.org/pkg/blob" +) + +// requiredSchemaVersion is incremented every time +// an index key type is added, changed, or removed. +// Version 4: EXIF tags + GPS +// Version 5: wholeRef added to keyFileInfo +const requiredSchemaVersion = 5 + +// type of key returns the identifier in k before the first ":" or "|". +// (Originally we packed keys by hand and there are a mix of styles) +func typeOfKey(k string) string { + c := strings.Index(k, ":") + p := strings.Index(k, "|") + if c < 0 && p < 0 { + return "" + } + if c < 0 { + return k[:p] + } + if p < 0 { + return k[:c] + } + min := c + if p < min { + min = p + } + return k[:min] +} + +type keyType struct { + name string + keyParts []part + valParts []part +} + +func (k *keyType) Prefix(args ...interface{}) string { + return k.build(true, true, k.keyParts, args...) +} + +func (k *keyType) Key(args ...interface{}) string { + return k.build(false, true, k.keyParts, args...) +} + +func (k *keyType) Val(args ...interface{}) string { + return k.build(false, false, k.valParts, args...) +} + +func (k *keyType) build(isPrefix, isKey bool, parts []part, args ...interface{}) string { + var buf bytes.Buffer + if isKey { + buf.WriteString(k.name) + } + if !isPrefix && len(args) != len(parts) { + panic("wrong number of arguments") + } + if len(args) > len(parts) { + panic("too many arguments") + } + for i, arg := range args { + if isKey || i > 0 { + buf.WriteString("|") + } + asStr := func() string { + s, ok := arg.(string) + if !ok { + s = arg.(fmt.Stringer).String() + } + return s + } + switch parts[i].typ { + case typeIntStr: + switch arg.(type) { + case int, int64, uint64: + buf.WriteString(fmt.Sprintf("%d", arg)) + default: + panic("bogus int type") + } + case typeStr: + buf.WriteString(urle(asStr())) + case typeRawStr: + buf.WriteString(asStr()) + case typeReverseTime: + s := asStr() + const example = "2011-01-23T05:23:12" + if len(s) < len(example) || s[4] != '-' && s[10] != 'T' { + panic("doesn't look like a time: " + s) + } + buf.WriteString(reverseTimeString(s)) + case typeBlobRef: + if br, ok := arg.(blob.Ref); ok { + if br.Valid() { + buf.WriteString(br.String()) + } + break + } + fallthrough + default: + if s, ok := arg.(string); ok { + buf.WriteString(s) + } else { + buf.WriteString(arg.(fmt.Stringer).String()) + } + } + } + if isPrefix { + buf.WriteString("|") + } + return buf.String() +} + +type part struct { + name string + typ partType +} + +type partType int + +const ( + typeKeyId partType = iota // PGP key id + typeTime + typeReverseTime // time prepended with "rt" + each numeric digit reversed from '9' + typeBlobRef + typeStr // URL-escaped + typeIntStr // integer as string + typeRawStr // not URL-escaped +) + +var ( + // keySchemaVersion indexes the index schema version. + keySchemaVersion = &keyType{ + "schemaversion", + nil, + []part{ + {"version", typeIntStr}, + }, + } + + keyMissing = &keyType{ + "missing", + []part{ + {"have", typeBlobRef}, + {"needed", typeBlobRef}, + }, + []part{ + {"1", typeStr}, + }, + } + + // keyPermanodeClaim indexes when a permanode is modified (or deleted) by a claim. + // It ties the affected permanode to the date of the modification, the responsible + // claim, and the nature of the modification. + keyPermanodeClaim = &keyType{ + "claim", + []part{ + {"permanode", typeBlobRef}, // modified permanode + {"signer", typeKeyId}, + {"claimDate", typeTime}, + {"claim", typeBlobRef}, + }, + []part{ + {"claimType", typeStr}, + {"attr", typeStr}, + {"value", typeStr}, + // And the signerRef, which seems redundant + // with the signer keyId in the jey, but the + // Claim struct needs this, and there's 1:m + // for keyId:blobRef, so: + {"signerRef", typeBlobRef}, + }, + } + + keyRecentPermanode = &keyType{ + "recpn", + []part{ + {"owner", typeKeyId}, + {"modtime", typeReverseTime}, + {"claim", typeBlobRef}, + }, + nil, + } + + keyPathBackward = &keyType{ + "signertargetpath", + []part{ + {"signer", typeKeyId}, + {"target", typeBlobRef}, + {"claim", typeBlobRef}, // for key uniqueness + }, + []part{ + {"claimDate", typeTime}, + {"base", typeBlobRef}, + {"active", typeStr}, // 'Y', or 'N' for deleted + {"suffix", typeStr}, + }, + } + + keyPathForward = &keyType{ + "path", + []part{ + {"signer", typeKeyId}, + {"base", typeBlobRef}, + {"suffix", typeStr}, + {"claimDate", typeReverseTime}, + {"claim", typeBlobRef}, // for key uniqueness + }, + []part{ + {"active", typeStr}, // 'Y', or 'N' for deleted + {"target", typeBlobRef}, + }, + } + + keyWholeToFileRef = &keyType{ + "wholetofile", + []part{ + {"whole", typeBlobRef}, + {"schema", typeBlobRef}, // for key uniqueness + }, + []part{ + {"1", typeStr}, + }, + } + + keyFileInfo = &keyType{ + "fileinfo", + []part{ + {"file", typeBlobRef}, + }, + []part{ + {"size", typeIntStr}, + {"filename", typeStr}, + {"mimetype", typeStr}, + {"whole", typeBlobRef}, + }, + } + + keyFileTimes = &keyType{ + "filetimes", + []part{ + {"file", typeBlobRef}, + }, + []part{ + // 0, 1, or 2 comma-separated types.Time3339 + // strings for creation/mod times. Oldest, + // then newest. See FileInfo docs. + {"time3339s", typeStr}, + }, + } + + keySignerAttrValue = &keyType{ + "signerattrvalue", + []part{ + {"signer", typeKeyId}, + {"attr", typeStr}, + {"value", typeStr}, + {"claimdate", typeReverseTime}, + {"claimref", typeBlobRef}, + }, + []part{ + {"permanode", typeBlobRef}, + }, + } + + // keyDeleted indexes a claim that deletes an entity. It ties the deleted + // entity to the date it was deleted, and to the deleter claim. + keyDeleted = &keyType{ + "deleted", + []part{ + {"deleted", typeBlobRef}, // the deleted entity (a permanode or another claim) + {"claimdate", typeReverseTime}, + {"deleter", typeBlobRef}, // the deleter claim blobref + }, + nil, + } + + // Given a blobref (permanode or static file or directory), provide a mapping + // to potential parents (they may no longer be parents, in the case of permanodes). + // In the case of permanodes, camliMember or camliContent constitutes a forward + // edge. In the case of static directories, the forward path is dir->static set->file, + // and that's what's indexed here, inverted. + keyEdgeBackward = &keyType{ + "edgeback", + []part{ + {"child", typeBlobRef}, // the edge target; thing we want to find parent(s) of + {"parent", typeBlobRef}, // the parent / edge source (e.g. permanode blobref) + // the blobref is the blob establishing the relationship + // (for a permanode: the claim; for static: often same as parent) + {"blobref", typeBlobRef}, + }, + []part{ + {"parenttype", typeStr}, // either "permanode" or the camliType ("file", "static-set", etc) + {"name", typeStr}, // the name, if static. + }, + } + + // Width and height after any EXIF rotation. + keyImageSize = &keyType{ + "imagesize", + []part{ + {"fileref", typeBlobRef}, // blobref of "file" schema blob + }, + []part{ + {"width", typeStr}, + {"height", typeStr}, + }, + } + + // child of a directory + keyStaticDirChild = &keyType{ + "dirchild", + []part{ + {"dirref", typeBlobRef}, // blobref of "directory" schema blob + {"child", typeStr}, // blobref of the child + }, + []part{ + {"1", typeStr}, + }, + } + + // Media attributes (e.g. ID3 tags). Uses generic terms like + // "artist", "title", "album", etc. + keyMediaTag = &keyType{ + "mediatag", + []part{ + {"wholeRef", typeBlobRef}, // wholeRef for song + {"tag", typeStr}, + }, + []part{ + {"value", typeStr}, + }, + } + + // EXIF tags + keyEXIFTag = &keyType{ + "exiftag", + []part{ + {"wholeRef", typeBlobRef}, // of entire file, not fileref + {"tag", typeStr}, // uint16 tag number as hex: xxxx + }, + []part{ + {"type", typeStr}, // "int", "rat", "float", "string" + {"n", typeIntStr}, // n components of type + {"vals", typeRawStr}, // pipe-separated; rats are n/d. strings are URL-escaped. + }, + } + + // Redundant version of keyEXIFTag. TODO: maybe get rid of this. + // Easier to process as one row instead of 4, though. + keyEXIFGPS = &keyType{ + "exifgps", + []part{ + {"wholeRef", typeBlobRef}, // of entire file, not fileref + }, + []part{ + {"lat", typeStr}, + {"long", typeStr}, + }, + } +) + +func containsUnsafeRawStrByte(s string) bool { + for _, r := range s { + if r >= 'z' || r < ' ' { + // pipe ('|) and non-ASCII are above 'z'. + return true + } + if r == '%' || r == '+' { + // Could be interpretted as URL-encoded + return true + } + } + return false +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/keys_test.go b/vendor/github.com/camlistore/camlistore/pkg/index/keys_test.go new file mode 100644 index 00000000..93440ba6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/keys_test.go @@ -0,0 +1,44 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index + +import ( + "testing" +) + +func TestKeyPrefix(t *testing.T) { + if g, e := keyRecentPermanode.Prefix("ABC"), "recpn|ABC|"; g != e { + t.Errorf("recpn = %q; want %q", g, e) + } +} + +func TestTypeOfKey(t *testing.T) { + tests := []struct { + in, want string + }{ + {"foo:bar", "foo"}, + {"foo|bar", "foo"}, + {"foo|bar:blah", "foo"}, + {"foo:bar|blah", "foo"}, + {"fooo", ""}, + } + for _, tt := range tests { + if got := typeOfKey(tt.in); got != tt.want { + t.Errorf("typeOfKey(%q) = %q; want %q", tt.in, got, tt.want) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/kvfile_test.go b/vendor/github.com/camlistore/camlistore/pkg/index/kvfile_test.go new file mode 100644 index 00000000..e1a37884 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/kvfile_test.go @@ -0,0 +1,104 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" + "testing" + + "camlistore.org/pkg/index" + "camlistore.org/pkg/index/indextest" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/sorted/kvfile" + "camlistore.org/pkg/sorted/kvtest" + "camlistore.org/pkg/test" +) + +func newKvfileSorted(t *testing.T) (kv sorted.KeyValue, cleanup func()) { + td, err := ioutil.TempDir("", "kvfile-test") + if err != nil { + t.Fatal(err) + } + kv, err = kvfile.NewStorage(filepath.Join(td, "kvfile")) + if err != nil { + os.RemoveAll(td) + t.Fatal(err) + } + return kv, func() { + kv.Close() + os.RemoveAll(td) + } +} + +func TestSorted_Kvfile(t *testing.T) { + kv, cleanup := newKvfileSorted(t) + defer cleanup() + kvtest.TestSorted(t, kv) +} + +func indexTest(t *testing.T, + sortedGenfn func(t *testing.T) (sorted.KeyValue, func()), + tfn func(*testing.T, func() *index.Index)) { + defer test.TLog(t)() + var mu sync.Mutex // guards cleanups + var cleanups []func() + defer func() { + mu.Lock() // never unlocked + for _, fn := range cleanups { + fn() + } + }() + makeIndex := func() *index.Index { + s, cleanup := sortedGenfn(t) + mu.Lock() + cleanups = append(cleanups, cleanup) + mu.Unlock() + return index.MustNew(t, s) + } + tfn(t, makeIndex) +} + +func TestIndex_Kvfile(t *testing.T) { + indexTest(t, newKvfileSorted, indextest.Index) +} + +func TestPathsOfSignerTarget_Kvfile(t *testing.T) { + indexTest(t, newKvfileSorted, indextest.PathsOfSignerTarget) +} + +func TestFiles_Kvfile(t *testing.T) { + indexTest(t, newKvfileSorted, indextest.Files) +} + +func TestEdgesTo_Kvfile(t *testing.T) { + indexTest(t, newKvfileSorted, indextest.EdgesTo) +} + +func TestDelete_Kvfile(t *testing.T) { + indexTest(t, newKvfileSorted, indextest.Delete) +} + +func TestReindex_Kvfile(t *testing.T) { + indexTest(t, newKvfileSorted, indextest.Reindex) +} + +func TestEnumStat_Kvfile(t *testing.T) { + indexTest(t, newKvfileSorted, indextest.EnumStat) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/memindex.go b/vendor/github.com/camlistore/camlistore/pkg/index/memindex.go new file mode 100644 index 00000000..3f1010bf --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/memindex.go @@ -0,0 +1,55 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index + +import ( + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/sorted" +) + +func init() { + blobserver.RegisterStorageConstructor("memory-only-dev-indexer", + blobserver.StorageConstructor(newMemoryIndexFromConfig)) +} + +// NewMemoryIndex returns an Index backed only by memory, for use in tests. +func NewMemoryIndex() *Index { + ix, err := New(sorted.NewMemoryKeyValue()) + if err != nil { + // Nothing to fail in memory, so worth panicing about + // if we ever see something. + panic(err) + } + return ix +} + +func newMemoryIndexFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { + blobPrefix := config.RequiredString("blobSource") + if err := config.Validate(); err != nil { + return nil, err + } + sto, err := ld.GetStorage(blobPrefix) + if err != nil { + return nil, err + } + + ix := NewMemoryIndex() + ix.InitBlobSource(sto) + + return ix, err +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/mongo_test.go b/vendor/github.com/camlistore/camlistore/pkg/index/mongo_test.go new file mode 100644 index 00000000..5ee85a12 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/mongo_test.go @@ -0,0 +1,74 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index_test + +import ( + "testing" + + "camlistore.org/pkg/index/indextest" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/sorted/kvtest" + _ "camlistore.org/pkg/sorted/mongo" + "camlistore.org/pkg/test/dockertest" +) + +func newMongoSorted(t *testing.T) (kv sorted.KeyValue, cleanup func()) { + dbname := "camlitest_" + osutil.Username() + containerID, ip := dockertest.SetupMongoContainer(t) + + kv, err := sorted.NewKeyValue(jsonconfig.Obj{ + "type": "mongo", + "host": ip, + "database": dbname, + }) + if err != nil { + containerID.KillRemove(t) + t.Fatal(err) + } + return kv, func() { + kv.Close() + containerID.KillRemove(t) + } +} + +func TestSorted_Mongo(t *testing.T) { + kv, cleanup := newMongoSorted(t) + defer cleanup() + kvtest.TestSorted(t, kv) +} + +func TestIndex_Mongo(t *testing.T) { + indexTest(t, newMongoSorted, indextest.Index) +} + +func TestPathsOfSignerTarget_Mongo(t *testing.T) { + indexTest(t, newMongoSorted, indextest.PathsOfSignerTarget) +} + +func TestFiles_Mongo(t *testing.T) { + indexTest(t, newMongoSorted, indextest.Files) +} + +func TestEdgesTo_Mongo(t *testing.T) { + indexTest(t, newMongoSorted, indextest.EdgesTo) +} + +func TestDelete_Mongo(t *testing.T) { + indexTest(t, newMongoSorted, indextest.Delete) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/mysql_test.go b/vendor/github.com/camlistore/camlistore/pkg/index/mysql_test.go new file mode 100644 index 00000000..b6c5b06b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/mysql_test.go @@ -0,0 +1,76 @@ +/* +Copyright 2012 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index_test + +import ( + "testing" + + "camlistore.org/pkg/index/indextest" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/sorted/kvtest" + _ "camlistore.org/pkg/sorted/mysql" + "camlistore.org/pkg/test/dockertest" +) + +func newMySQLSorted(t *testing.T) (kv sorted.KeyValue, clean func()) { + dbname := "camlitest_" + osutil.Username() + containerID, ip := dockertest.SetupMySQLContainer(t, dbname) + + kv, err := sorted.NewKeyValue(jsonconfig.Obj{ + "type": "mysql", + "host": ip + ":3306", + "database": dbname, + "user": dockertest.MySQLUsername, + "password": dockertest.MySQLPassword, + }) + if err != nil { + containerID.KillRemove(t) + t.Fatal(err) + } + return kv, func() { + kv.Close() + containerID.KillRemove(t) + } +} + +func TestSorted_MySQL(t *testing.T) { + kv, clean := newMySQLSorted(t) + defer clean() + kvtest.TestSorted(t, kv) +} + +func TestIndex_MySQL(t *testing.T) { + indexTest(t, newMySQLSorted, indextest.Index) +} + +func TestPathsOfSignerTarget_MySQL(t *testing.T) { + indexTest(t, newMySQLSorted, indextest.PathsOfSignerTarget) +} + +func TestFiles_MySQL(t *testing.T) { + indexTest(t, newMySQLSorted, indextest.Files) +} + +func TestEdgesTo_MySQL(t *testing.T) { + indexTest(t, newMySQLSorted, indextest.EdgesTo) +} + +func TestDelete_MySQL(t *testing.T) { + indexTest(t, newMySQLSorted, indextest.Delete) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/postgres_test.go b/vendor/github.com/camlistore/camlistore/pkg/index/postgres_test.go new file mode 100644 index 00000000..5e130f7b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/postgres_test.go @@ -0,0 +1,77 @@ +/* +Copyright 2012 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index_test + +import ( + "testing" + + "camlistore.org/pkg/index/indextest" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/sorted/kvtest" + _ "camlistore.org/pkg/sorted/postgres" + "camlistore.org/pkg/test/dockertest" +) + +func newPostgresSorted(t *testing.T) (kv sorted.KeyValue, clean func()) { + dbname := "camlitest_" + osutil.Username() + containerID, ip := dockertest.SetupPostgreSQLContainer(t, dbname) + + kv, err := sorted.NewKeyValue(jsonconfig.Obj{ + "type": "postgres", + "host": ip, + "database": dbname, + "user": dockertest.PostgresUsername, + "password": dockertest.PostgresPassword, + "sslmode": "disable", + }) + if err != nil { + containerID.KillRemove(t) + t.Fatal(err) + } + return kv, func() { + kv.Close() + containerID.KillRemove(t) + } +} + +func TestSorted_Postgres(t *testing.T) { + kv, clean := newPostgresSorted(t) + defer clean() + kvtest.TestSorted(t, kv) +} + +func TestIndex_Postgres(t *testing.T) { + indexTest(t, newPostgresSorted, indextest.Index) +} + +func TestPathsOfSignerTarget_Postgres(t *testing.T) { + indexTest(t, newPostgresSorted, indextest.PathsOfSignerTarget) +} + +func TestFiles_Postgres(t *testing.T) { + indexTest(t, newPostgresSorted, indextest.Files) +} + +func TestEdgesTo_Postgres(t *testing.T) { + indexTest(t, newPostgresSorted, indextest.EdgesTo) +} + +func TestDelete_Postgres(t *testing.T) { + indexTest(t, newPostgresSorted, indextest.Delete) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/receive.go b/vendor/github.com/camlistore/camlistore/pkg/index/receive.go new file mode 100644 index 00000000..e07b17a9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/receive.go @@ -0,0 +1,826 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index + +import ( + "bytes" + "crypto/sha1" + "errors" + "fmt" + _ "image/gif" + _ "image/png" + "io" + "log" + "os" + "sort" + "strings" + "sync" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/images" + "camlistore.org/pkg/jsonsign" + "camlistore.org/pkg/magic" + "camlistore.org/pkg/media" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/types" + + "camlistore.org/third_party/github.com/hjfreyer/taglib-go/taglib" + "camlistore.org/third_party/github.com/rwcarlsen/goexif/exif" + "camlistore.org/third_party/github.com/rwcarlsen/goexif/tiff" + _ "camlistore.org/third_party/go/pkg/image/jpeg" +) + +// outOfOrderIndexerLoop asynchronously reindexes blobs received +// out of order. It panics if started more than once or if the +// index has no blobSource. +func (ix *Index) outOfOrderIndexerLoop() { + ix.mu.RLock() + if ix.oooRunning == true { + panic("outOfOrderIndexerLoop is already running") + } + if ix.blobSource == nil { + panic("index has no blobSource") + } + ix.oooRunning = true + ix.mu.RUnlock() +WaitTickle: + for _ = range ix.tickleOoo { + for { + ix.mu.Lock() + if len(ix.readyReindex) == 0 { + ix.mu.Unlock() + continue WaitTickle + } + var br blob.Ref + for br = range ix.readyReindex { + break + } + delete(ix.readyReindex, br) + ix.mu.Unlock() + + err := ix.indexBlob(br) + if err != nil { + log.Printf("out-of-order indexBlob(%v) = %v", br, err) + ix.mu.Lock() + if len(ix.needs[br]) == 0 { + ix.readyReindex[br] = true + } + ix.mu.Unlock() + } + } + } +} + +func (ix *Index) indexBlob(br blob.Ref) error { + ix.mu.RLock() + bs := ix.blobSource + ix.mu.RUnlock() + if bs == nil { + panic(fmt.Sprintf("index: can't re-index %v: no blobSource", br)) + } + rc, _, err := bs.Fetch(br) + if err != nil { + return fmt.Errorf("index: failed to fetch %v for reindexing: %v", br, err) + } + defer rc.Close() + if _, err := blobserver.Receive(ix, br, rc); err != nil { + return err + } + return nil +} + +type mutationMap struct { + kv map[string]string // the keys and values we populate + + // We record if we get a delete claim, so we can update + // the deletes cache right after committing the mutation. + // + // TODO(mpl): we only need to keep track of one claim so far, + // but I chose a slice for when we need to do multi-claims? + deletes []schema.Claim +} + +func (mm *mutationMap) Set(k, v string) { + if mm.kv == nil { + mm.kv = make(map[string]string) + } + mm.kv[k] = v +} + +func (mm *mutationMap) noteDelete(deleteClaim schema.Claim) { + mm.deletes = append(mm.deletes, deleteClaim) +} + +func blobsFilteringOut(v []blob.Ref, x blob.Ref) []blob.Ref { + switch len(v) { + case 0: + return nil + case 1: + if v[0] == x { + return nil + } + return v + } + nl := v[:0] + for _, vb := range v { + if vb != x { + nl = append(nl, vb) + } + } + return nl +} + +func (ix *Index) noteBlobIndexed(br blob.Ref) { + ix.mu.Lock() + defer ix.mu.Unlock() + for _, needer := range ix.neededBy[br] { + newNeeds := blobsFilteringOut(ix.needs[needer], br) + if len(newNeeds) == 0 { + ix.readyReindex[needer] = true + delete(ix.needs, needer) + select { + case ix.tickleOoo <- true: + default: + } + } else { + ix.needs[needer] = newNeeds + } + } + delete(ix.neededBy, br) +} + +func (ix *Index) removeAllMissingEdges(br blob.Ref) { + var toDelete []string + it := ix.queryPrefix(keyMissing, br) + for it.Next() { + toDelete = append(toDelete, it.Key()) + } + if err := it.Close(); err != nil { + // TODO: Care? Can lazily clean up later. + log.Printf("Iterator close error: %v", err) + } + for _, k := range toDelete { + if err := ix.s.Delete(k); err != nil { + log.Printf("Error deleting key %s: %v", k, err) + } + } +} + +func (ix *Index) ReceiveBlob(blobRef blob.Ref, source io.Reader) (retsb blob.SizedRef, err error) { + missingDeps := false + defer func() { + if err == nil { + ix.noteBlobIndexed(blobRef) + if !missingDeps { + ix.removeAllMissingEdges(blobRef) + } + } + }() + sniffer := NewBlobSniffer(blobRef) + written, err := io.Copy(sniffer, source) + if err != nil { + return + } + if haveVal, haveErr := ix.s.Get("have:" + blobRef.String()); haveErr == nil { + if strings.HasSuffix(haveVal, "|indexed") { + return blob.SizedRef{blobRef, uint32(written)}, nil + } + } + + sniffer.Parse() + + fetcher := &missTrackFetcher{ + fetcher: ix.blobSource, + } + + mm, err := ix.populateMutationMap(fetcher, blobRef, sniffer) + if err != nil { + if err != errMissingDep { + return + } + fetcher.mu.Lock() + defer fetcher.mu.Unlock() + if len(fetcher.missing) == 0 { + panic("errMissingDep happened, but no fetcher.missing recorded") + } + missingDeps = true + allRecorded := true + for _, missing := range fetcher.missing { + if err := ix.noteNeeded(blobRef, missing); err != nil { + allRecorded = false + } + } + if allRecorded { + // Lie and say things are good. We've + // successfully recorded that the blob isn't + // indexed, but we'll reindex it later once + // the dependent blobs arrive. + return blob.SizedRef{blobRef, uint32(written)}, nil + } + return + } + + if err := ix.commit(mm); err != nil { + return retsb, err + } + + if c := ix.corpus; c != nil { + if err = c.addBlob(blobRef, mm); err != nil { + return + } + } + + // TODO(bradfitz): log levels? These are generally noisy + // (especially in tests, like search/handler_test), but I + // could see it being useful in production. For now, disabled: + // + // mimeType := sniffer.MIMEType() + // log.Printf("indexer: received %s; type=%v; truncated=%v", blobRef, mimeType, sniffer.IsTruncated()) + + return blob.SizedRef{blobRef, uint32(written)}, nil +} + +// commit writes the contents of the mutationMap on a batch +// mutation and commits that batch. It also updates the deletes +// cache. +func (ix *Index) commit(mm *mutationMap) error { + // We want the update of the deletes cache to be atomic + // with the transaction commit, so we lock here instead + // of within updateDeletesCache. + ix.deletes.Lock() + defer ix.deletes.Unlock() + bm := ix.s.BeginBatch() + for k, v := range mm.kv { + bm.Set(k, v) + } + err := ix.s.CommitBatch(bm) + if err != nil { + return err + } + for _, cl := range mm.deletes { + if err := ix.updateDeletesCache(cl); err != nil { + return fmt.Errorf("Could not update the deletes cache after deletion from %v: %v", cl, err) + } + } + return nil +} + +// populateMutationMap populates keys & values that will be committed +// into the returned map. +// +// the blobref can be trusted at this point (it's been fully consumed +// and verified to match), and the sniffer has been populated. +func (ix *Index) populateMutationMap(fetcher *missTrackFetcher, br blob.Ref, sniffer *BlobSniffer) (*mutationMap, error) { + mm := &mutationMap{ + kv: map[string]string{ + "meta:" + br.String(): fmt.Sprintf("%d|%s", sniffer.Size(), sniffer.MIMEType()), + }, + } + var err error + if blob, ok := sniffer.SchemaBlob(); ok { + switch blob.Type() { + case "claim": + err = ix.populateClaim(fetcher, blob, mm) + case "file": + err = ix.populateFile(fetcher, blob, mm) + case "directory": + err = ix.populateDir(fetcher, blob, mm) + } + } + if err != nil && err != errMissingDep { + return nil, err + } + var haveVal string + if err == errMissingDep { + haveVal = fmt.Sprintf("%d", sniffer.Size()) + } else { + haveVal = fmt.Sprintf("%d|indexed", sniffer.Size()) + } + mm.kv["have:"+br.String()] = haveVal + ix.mu.Lock() + defer ix.mu.Unlock() + if len(fetcher.missing) == 0 { + // If err == nil, we're good. Else (err == errMissingDep), we + // know the error did not come from a fetching miss (because + // len(fetcher.missing) == 0) , but from an index miss. Therefore + // we know the miss has already been noted and will be dealt with + // later, so we can also pretend everything's fine. + return mm, nil + } + return mm, err +} + +// keepFirstN keeps the first N bytes written to it in Bytes. +type keepFirstN struct { + N int + Bytes []byte +} + +func (w *keepFirstN) Write(p []byte) (n int, err error) { + if n := w.N - len(w.Bytes); n > 0 { + if n > len(p) { + n = len(p) + } + w.Bytes = append(w.Bytes, p[:n]...) + } + return len(p), nil +} + +// missTrackFetcher is a blob.Fetcher that records which blob(s) it +// failed to load from src. +type missTrackFetcher struct { + fetcher blob.Fetcher + + mu sync.Mutex // guards missing + missing []blob.Ref +} + +func (f *missTrackFetcher) Fetch(br blob.Ref) (blob io.ReadCloser, size uint32, err error) { + blob, size, err = f.fetcher.Fetch(br) + if err == os.ErrNotExist { + f.mu.Lock() + defer f.mu.Unlock() + f.missing = append(f.missing, br) + err = errMissingDep + } + return +} + +// filePrefixReader is both a *bytes.Reader and a *schema.FileReader for use in readPrefixOrFile +type filePrefixReader interface { + io.Reader + io.ReaderAt +} + +// readPrefixOrFile executes a given func with a reader on the passed prefix and +// falls back to passing a reader on the whole file if the func returns an error. +func readPrefixOrFile(prefix []byte, fetcher blob.Fetcher, b *schema.Blob, fn func(filePrefixReader) error) (err error) { + pr := bytes.NewReader(prefix) + err = fn(pr) + if err == io.EOF || err == io.ErrUnexpectedEOF { + var fr *schema.FileReader + fr, err = b.NewFileReader(fetcher) + if err == nil { + err = fn(fr) + fr.Close() + } + } + return err +} + +// b: the parsed file schema blob +// mm: keys to populate +func (ix *Index) populateFile(fetcher blob.Fetcher, b *schema.Blob, mm *mutationMap) (err error) { + var times []time.Time // all creation or mod times seen; may be zero + times = append(times, b.ModTime()) + + blobRef := b.BlobRef() + fr, err := b.NewFileReader(fetcher) + if err != nil { + return err + } + defer fr.Close() + mime, mr := magic.MIMETypeFromReader(fr) + + sha1 := sha1.New() + var copyDest io.Writer = sha1 + var imageBuf *keepFirstN // or nil + if strings.HasPrefix(mime, "image/") { + imageBuf = &keepFirstN{N: 512 << 10} + copyDest = io.MultiWriter(copyDest, imageBuf) + } + size, err := io.Copy(copyDest, mr) + if err != nil { + return err + } + wholeRef := blob.RefFromHash(sha1) + + if imageBuf != nil { + var conf images.Config + decodeConfig := func(r filePrefixReader) error { + conf, err = images.DecodeConfig(r) + return err + } + if err := readPrefixOrFile(imageBuf.Bytes, fetcher, b, decodeConfig); err == nil { + mm.Set(keyImageSize.Key(blobRef), keyImageSize.Val(fmt.Sprint(conf.Width), fmt.Sprint(conf.Height))) + } + + var ft time.Time + fileTime := func(r filePrefixReader) error { + ft, err = schema.FileTime(r) + return err + } + if err = readPrefixOrFile(imageBuf.Bytes, fetcher, b, fileTime); err == nil { + times = append(times, ft) + } + log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err) + + // TODO(mpl): find (generate?) more broken EXIF images to experiment with. + indexEXIFData := func(r filePrefixReader) error { + return indexEXIF(wholeRef, r, mm) + } + if err = readPrefixOrFile(imageBuf.Bytes, fetcher, b, indexEXIFData); err != nil { + log.Printf("error parsing EXIF: %v", err) + } + } + + var sortTimes []time.Time + for _, t := range times { + if !t.IsZero() { + sortTimes = append(sortTimes, t) + } + } + sort.Sort(types.ByTime(sortTimes)) + var time3339s string + switch { + case len(sortTimes) == 1: + time3339s = types.Time3339(sortTimes[0]).String() + case len(sortTimes) >= 2: + oldest, newest := sortTimes[0], sortTimes[len(sortTimes)-1] + time3339s = types.Time3339(oldest).String() + "," + types.Time3339(newest).String() + } + + mm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1") + mm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, b.FileName(), mime, wholeRef)) + mm.Set(keyFileTimes.Key(blobRef), keyFileTimes.Val(time3339s)) + + if strings.HasPrefix(mime, "audio/") { + indexMusic(io.NewSectionReader(fr, 0, fr.Size()), wholeRef, mm) + } + + return nil +} + +func tagFormatString(tag *tiff.Tag) string { + switch tag.Format() { + case tiff.IntVal: + return "int" + case tiff.RatVal: + return "rat" + case tiff.FloatVal: + return "float" + case tiff.StringVal: + return "string" + } + return "" +} + +type exifWalkFunc func(name exif.FieldName, tag *tiff.Tag) error + +func (f exifWalkFunc) Walk(name exif.FieldName, tag *tiff.Tag) error { return f(name, tag) } + +var errEXIFPanic = errors.New("EXIF library panicked while walking fields") + +func indexEXIF(wholeRef blob.Ref, r io.Reader, mm *mutationMap) (err error) { + var tiffErr error + ex, err := exif.Decode(r) + if err != nil { + tiffErr = err + if exif.IsCriticalError(err) { + if exif.IsShortReadTagValueError(err) { + return io.ErrUnexpectedEOF // trigger a retry with whole file + } + return + } + log.Printf("Non critical TIFF decoding error: %v", err) + } + defer func() { + // The EXIF library panics if you access a field past + // what the file contains. Be paranoid and just + // recover here, instead of crashing on an invalid + // EXIF file. + if e := recover(); e != nil { + err = errEXIFPanic + } + }() + + err = ex.Walk(exifWalkFunc(func(name exif.FieldName, tag *tiff.Tag) error { + tagFmt := tagFormatString(tag) + if tagFmt == "" { + return nil + } + key := keyEXIFTag.Key(wholeRef, fmt.Sprintf("%04x", tag.Id)) + numComp := int(tag.Count) + if tag.Format() == tiff.StringVal { + numComp = 1 + } + var val bytes.Buffer + val.WriteString(keyEXIFTag.Val(tagFmt, numComp, "")) + if tag.Format() == tiff.StringVal { + str, err := tag.StringVal() + if err != nil { + log.Printf("Invalid EXIF string data: %v", err) + return nil + } + if containsUnsafeRawStrByte(str) { + val.WriteString(urle(str)) + } else { + val.WriteString(str) + } + } else { + for i := 0; i < int(tag.Count); i++ { + if i > 0 { + val.WriteByte('|') + } + switch tagFmt { + case "int": + v, err := tag.Int(i) + if err != nil { + log.Printf("Invalid EXIF int data: %v", err) + return nil + } + fmt.Fprintf(&val, "%d", v) + case "rat": + n, d, err := tag.Rat2(i) + if err != nil { + log.Printf("Invalid EXIF rat data: %v", err) + return nil + } + fmt.Fprintf(&val, "%d/%d", n, d) + case "float": + v, err := tag.Float(i) + if err != nil { + log.Printf("Invalid EXIF float data: %v", err) + return nil + } + fmt.Fprintf(&val, "%v", v) + default: + panic("shouldn't get here") + } + } + } + valStr := val.String() + mm.Set(key, valStr) + return nil + })) + if err != nil { + return + } + + if exif.IsGPSError(tiffErr) { + log.Printf("Invalid EXIF GPS data: %v", tiffErr) + return nil + } + if lat, long, err := ex.LatLong(); err == nil { + mm.Set(keyEXIFGPS.Key(wholeRef), keyEXIFGPS.Val(fmt.Sprint(lat), fmt.Sprint(long))) + } else if !exif.IsTagNotPresentError(err) { + log.Printf("Invalid EXIF GPS data: %v", err) + } + return nil +} + +// indexMusic adds mutations to index the wholeRef by attached metadata and other properties. +func indexMusic(r types.SizeReaderAt, wholeRef blob.Ref, mm *mutationMap) { + tag, err := taglib.Decode(r, r.Size()) + if err != nil { + log.Print("index: error parsing tag: ", err) + return + } + + var footerLength int64 = 0 + if hasTag, err := media.HasID3v1Tag(r); err != nil { + log.Print("index: unable to check for ID3v1 tag: ", err) + return + } else if hasTag { + footerLength = media.ID3v1TagLength + } + + // Generate a hash of the audio portion of the file (i.e. excluding ID3v1 and v2 tags). + audioStart := int64(tag.TagSize()) + audioSize := r.Size() - audioStart - footerLength + hash := sha1.New() + if _, err := io.Copy(hash, io.NewSectionReader(r, audioStart, audioSize)); err != nil { + log.Print("index: error generating SHA1 from audio data: ", err) + return + } + mediaRef := blob.RefFromHash(hash) + + duration, err := media.GetMPEGAudioDuration(io.NewSectionReader(r, audioStart, audioSize)) + if err != nil { + log.Print("index: unable to calculate audio duration: ", err) + duration = 0 + } + + var yearStr, trackStr, discStr, durationStr string + if !tag.Year().IsZero() { + const justYearLayout = "2006" + yearStr = tag.Year().Format(justYearLayout) + } + if tag.Track() != 0 { + trackStr = fmt.Sprintf("%d", tag.Track()) + } + if tag.Disc() != 0 { + discStr = fmt.Sprintf("%d", tag.Disc()) + } + if duration != 0 { + durationStr = fmt.Sprintf("%d", duration/time.Millisecond) + } + + // Note: if you add to this map, please update + // pkg/search/query.go's MediaTagConstraint Tag docs. + tags := map[string]string{ + "title": tag.Title(), + "artist": tag.Artist(), + "album": tag.Album(), + "genre": tag.Genre(), + "musicbrainzalbumid": tag.CustomFrames()["MusicBrainz Album Id"], + "year": yearStr, + "track": trackStr, + "disc": discStr, + "mediaref": mediaRef.String(), + "durationms": durationStr, + } + + for tag, value := range tags { + if value != "" { + mm.Set(keyMediaTag.Key(wholeRef, tag), keyMediaTag.Val(value)) + } + } +} + +// b: the parsed file schema blob +// mm: keys to populate +func (ix *Index) populateDir(fetcher blob.Fetcher, b *schema.Blob, mm *mutationMap) error { + blobRef := b.BlobRef() + // TODO(bradfitz): move the NewDirReader and FileName method off *schema.Blob and onto + // StaticFile/StaticDirectory or something. + + dr, err := b.NewDirReader(fetcher) + if err != nil { + // TODO(bradfitz): propagate up a transient failure + // error type, so we can retry indexing files in the + // future if blobs are only temporarily unavailable. + log.Printf("index: error indexing directory, creating NewDirReader %s: %v", blobRef, err) + return nil + } + sts, err := dr.StaticSet() + if err != nil { + log.Printf("index: error indexing directory: can't get StaticSet: %v\n", err) + return nil + } + + mm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(len(sts), b.FileName(), "", blob.Ref{})) + for _, br := range sts { + mm.Set(keyStaticDirChild.Key(blobRef, br.String()), "1") + } + return nil +} + +var errMissingDep = errors.New("blob was not fully indexed because of a missing dependency") + +// populateDeleteClaim adds to mm the entries resulting from the delete claim cl. +// It is assumed cl is a valid claim, and vr has already been verified. +func (ix *Index) populateDeleteClaim(cl schema.Claim, vr *jsonsign.VerifyRequest, mm *mutationMap) error { + br := cl.Blob().BlobRef() + target := cl.Target() + if !target.Valid() { + log.Print(fmt.Errorf("no valid target for delete claim %v", br)) + return nil + } + meta, err := ix.GetBlobMeta(target) + if err != nil { + if err == os.ErrNotExist { + if err := ix.noteNeeded(br, target); err != nil { + return fmt.Errorf("could not note that delete claim %v depends on %v: %v", br, target, err) + } + return errMissingDep + } + log.Print(fmt.Errorf("Could not get mime type of target blob %v: %v", target, err)) + return nil + } + + // TODO(mpl): create consts somewhere for "claim" and "permanode" as camliTypes, and use them, + // instead of hardcoding. Unless they already exist ? (didn't find them). + if meta.CamliType != "permanode" && meta.CamliType != "claim" { + log.Print(fmt.Errorf("delete claim target in %v is neither a permanode nor a claim: %v", br, meta.CamliType)) + return nil + } + mm.Set(keyDeleted.Key(target, cl.ClaimDateString(), br), "") + if meta.CamliType == "claim" { + return nil + } + recentKey := keyRecentPermanode.Key(vr.SignerKeyId, cl.ClaimDateString(), br) + mm.Set(recentKey, target.String()) + attr, value := cl.Attribute(), cl.Value() + claimKey := keyPermanodeClaim.Key(target, vr.SignerKeyId, cl.ClaimDateString(), br) + mm.Set(claimKey, keyPermanodeClaim.Val(cl.ClaimType(), attr, value, vr.CamliSigner)) + return nil +} + +func (ix *Index) populateClaim(fetcher *missTrackFetcher, b *schema.Blob, mm *mutationMap) error { + br := b.BlobRef() + + claim, ok := b.AsClaim() + if !ok { + // Skip bogus claim with malformed permanode. + return nil + } + + vr := jsonsign.NewVerificationRequest(b.JSON(), blob.NewSerialFetcher(ix.KeyFetcher, fetcher)) + if !vr.Verify() { + // TODO(bradfitz): ask if the vr.Err.(jsonsign.Error).IsPermanent() and retry + // later if it's not permanent? or maybe do this up a level? + if vr.Err != nil { + return vr.Err + } + return errors.New("index: populateClaim verification failure") + } + verifiedKeyId := vr.SignerKeyId + mm.Set("signerkeyid:"+vr.CamliSigner.String(), verifiedKeyId) + + if claim.ClaimType() == string(schema.DeleteClaim) { + if err := ix.populateDeleteClaim(claim, vr, mm); err != nil { + return err + } + mm.noteDelete(claim) + return nil + } + + pnbr := claim.ModifiedPermanode() + if !pnbr.Valid() { + // A different type of claim; not modifying a permanode. + return nil + } + + attr, value := claim.Attribute(), claim.Value() + recentKey := keyRecentPermanode.Key(verifiedKeyId, claim.ClaimDateString(), br) + mm.Set(recentKey, pnbr.String()) + claimKey := keyPermanodeClaim.Key(pnbr, verifiedKeyId, claim.ClaimDateString(), br) + mm.Set(claimKey, keyPermanodeClaim.Val(claim.ClaimType(), attr, value, vr.CamliSigner)) + + if strings.HasPrefix(attr, "camliPath:") { + targetRef, ok := blob.Parse(value) + if ok { + // TODO: deal with set-attribute vs. del-attribute + // properly? I think we get it for free when + // del-attribute has no Value, but we need to deal + // with the case where they explicitly delete the + // current value. + suffix := attr[len("camliPath:"):] + active := "Y" + if claim.ClaimType() == "del-attribute" { + active = "N" + } + baseRef := pnbr + claimRef := br + + key := keyPathBackward.Key(verifiedKeyId, targetRef, claimRef) + val := keyPathBackward.Val(claim.ClaimDateString(), baseRef, active, suffix) + mm.Set(key, val) + + key = keyPathForward.Key(verifiedKeyId, baseRef, suffix, claim.ClaimDateString(), claimRef) + val = keyPathForward.Val(active, targetRef) + mm.Set(key, val) + } + } + + if claim.ClaimType() != string(schema.DelAttributeClaim) && IsIndexedAttribute(attr) { + key := keySignerAttrValue.Key(verifiedKeyId, attr, value, claim.ClaimDateString(), br) + mm.Set(key, keySignerAttrValue.Val(pnbr)) + } + + if IsBlobReferenceAttribute(attr) { + targetRef, ok := blob.Parse(value) + if ok { + key := keyEdgeBackward.Key(targetRef, pnbr, br) + mm.Set(key, keyEdgeBackward.Val("permanode", "")) + } + } + + return nil +} + +// updateDeletesCache updates the index deletes cache with the cl delete claim. +// deleteClaim is trusted to be a valid delete Claim. +func (x *Index) updateDeletesCache(deleteClaim schema.Claim) error { + target := deleteClaim.Target() + deleter := deleteClaim.Blob() + when, err := deleter.ClaimDate() + if err != nil { + return fmt.Errorf("Could not get date of delete claim %v: %v", deleteClaim, err) + } + targetDeletions := append(x.deletes.m[target], + deletion{ + deleter: deleter.BlobRef(), + when: when, + }) + sort.Sort(sort.Reverse(byDeletionDate(targetDeletions))) + x.deletes.m[target] = targetDeletions + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/reversetime.go b/vendor/github.com/camlistore/camlistore/pkg/index/reversetime.go new file mode 100644 index 00000000..818ba135 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/reversetime.go @@ -0,0 +1,51 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index + +import ( + "fmt" + "strings" +) + +func unreverseTimeString(s string) string { + if !strings.HasPrefix(s, "rt") { + panic(fmt.Sprintf("can't unreverse time string: %q", s)) + } + b := make([]byte, 0, len(s)-2) + b = appendReverseString(b, s[2:]) + return string(b) +} + +func reverseTimeString(s string) string { + b := make([]byte, 0, len(s)+2) + b = append(b, 'r') + b = append(b, 't') + b = appendReverseString(b, s) + return string(b) +} + +func appendReverseString(b []byte, s string) []byte { + for i := 0; i < len(s); i++ { + c := s[i] + if c >= '0' && c <= '9' { + b = append(b, '0'+('9'-c)) + } else { + b = append(b, c) + } + } + return b +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/sniff.go b/vendor/github.com/camlistore/camlistore/pkg/index/sniff.go new file mode 100644 index 00000000..d82081c0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/sniff.go @@ -0,0 +1,106 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index + +import ( + "bytes" + "errors" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/magic" + "camlistore.org/pkg/schema" +) + +type BlobSniffer struct { + br blob.Ref + + header []byte + written int64 + meta *schema.Blob // or nil + mimeType string + camliType string +} + +func NewBlobSniffer(ref blob.Ref) *BlobSniffer { + if !ref.Valid() { + panic("invalid ref") + } + return &BlobSniffer{br: ref} +} + +func (sn *BlobSniffer) SchemaBlob() (meta *schema.Blob, ok bool) { + return sn.meta, sn.meta != nil +} + +func (sn *BlobSniffer) Write(d []byte) (int, error) { + if !sn.br.Valid() { + panic("write on sniffer with invalid blobref") + } + sn.written += int64(len(d)) + if len(sn.header) < schema.MaxSchemaBlobSize { + n := schema.MaxSchemaBlobSize - len(sn.header) + if len(d) < n { + n = len(d) + } + sn.header = append(sn.header, d[:n]...) + } + return len(d), nil +} + +func (sn *BlobSniffer) Size() int64 { + return sn.written +} + +func (sn *BlobSniffer) IsTruncated() bool { + return sn.written > schema.MaxSchemaBlobSize +} + +func (sn *BlobSniffer) Body() ([]byte, error) { + if sn.IsTruncated() { + return nil, errors.New("index.Body: was truncated") + } + return sn.header, nil +} + +// MIMEType returns the sniffed blob's content-type or the empty string if unknown. +// If the blob is a Camlistore schema metadata blob, the MIME type will be of +// the form "application/json; camliType=foo". +func (sn *BlobSniffer) MIMEType() string { return sn.mimeType } + +func (sn *BlobSniffer) CamliType() string { return sn.camliType } + +func (sn *BlobSniffer) Parse() { + if sn.bufferIsCamliJSON() { + sn.camliType = sn.meta.Type() + sn.mimeType = "application/json; camliType=" + sn.camliType + } else { + sn.mimeType = magic.MIMEType(sn.header) + } +} + +func (sn *BlobSniffer) bufferIsCamliJSON() bool { + buf := sn.header + if !schema.LikelySchemaBlob(buf) { + return false + } + blob, err := schema.BlobFromReader(sn.br, bytes.NewReader(buf)) + if err != nil { + return false + } + sn.meta = blob + return true +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/sqlindex/sqlindex.go b/vendor/github.com/camlistore/camlistore/pkg/index/sqlindex/sqlindex.go new file mode 100644 index 00000000..5d8b4b78 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/sqlindex/sqlindex.go @@ -0,0 +1,250 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package sqlindex implements the sorted.KeyValue interface using an *sql.DB. +package sqlindex + +import ( + "database/sql" + "errors" + "fmt" + "log" + "regexp" + "sync" + + "camlistore.org/pkg/leak" + "camlistore.org/pkg/sorted" +) + +// Storage implements the sorted.KeyValue interface using an *sql.DB. +type Storage struct { + DB *sql.DB + + // SetFunc is an optional func to use when REPLACE INTO does not exist + SetFunc func(*sql.DB, string, string) error + BatchSetFunc func(*sql.Tx, string, string) error + + // PlaceHolderFunc optionally replaces ? placeholders with the right ones for the rdbms + // in use + PlaceHolderFunc func(string) string + + // Serial determines whether a Go-level mutex protects DB from + // concurrent access. This isn't perfect and exists just for + // SQLite, whose driver likes to return "the database is + // locked" (camlistore.org/issue/114), so this keeps some + // pressure off. But we still trust SQLite to deal with + // concurrency in most cases. + Serial bool + + mu sync.Mutex // the mutex used, if Serial is set +} + +func (s *Storage) sql(v string) string { + if f := s.PlaceHolderFunc; f != nil { + return f(v) + } + return v +} + +type batchTx struct { + tx *sql.Tx + err error // sticky + + // SetFunc is an optional func to use when REPLACE INTO does not exist + SetFunc func(*sql.Tx, string, string) error + + // PlaceHolderFunc optionally replaces ? placeholders with the right ones for the rdbms + // in use + PlaceHolderFunc func(string) string +} + +func (b *batchTx) sql(v string) string { + if f := b.PlaceHolderFunc; f != nil { + return f(v) + } + return v +} + +func (b *batchTx) Set(key, value string) { + if b.err != nil { + return + } + if b.SetFunc != nil { + b.err = b.SetFunc(b.tx, key, value) + return + } + _, b.err = b.tx.Exec(b.sql("REPLACE INTO rows (k, v) VALUES (?, ?)"), key, value) +} + +func (b *batchTx) Delete(key string) { + if b.err != nil { + return + } + _, b.err = b.tx.Exec(b.sql("DELETE FROM rows WHERE k=?"), key) +} + +func (s *Storage) BeginBatch() sorted.BatchMutation { + if s.Serial { + s.mu.Lock() + } + tx, err := s.DB.Begin() + return &batchTx{ + tx: tx, + err: err, + SetFunc: s.BatchSetFunc, + PlaceHolderFunc: s.PlaceHolderFunc, + } +} + +func (s *Storage) CommitBatch(b sorted.BatchMutation) error { + if s.Serial { + defer s.mu.Unlock() + } + bt, ok := b.(*batchTx) + if !ok { + return fmt.Errorf("wrong BatchMutation type %T", b) + } + if bt.err != nil { + return bt.err + } + return bt.tx.Commit() +} + +func (s *Storage) Get(key string) (value string, err error) { + if s.Serial { + s.mu.Lock() + defer s.mu.Unlock() + } + err = s.DB.QueryRow(s.sql("SELECT v FROM rows WHERE k=?"), key).Scan(&value) + if err == sql.ErrNoRows { + err = sorted.ErrNotFound + } + return +} + +func (s *Storage) Set(key, value string) error { + if s.Serial { + s.mu.Lock() + defer s.mu.Unlock() + } + if s.SetFunc != nil { + return s.SetFunc(s.DB, key, value) + } + _, err := s.DB.Exec(s.sql("REPLACE INTO rows (k, v) VALUES (?, ?)"), key, value) + return err +} + +func (s *Storage) Delete(key string) error { + if s.Serial { + s.mu.Lock() + defer s.mu.Unlock() + } + _, err := s.DB.Exec(s.sql("DELETE FROM rows WHERE k=?"), key) + return err +} + +func (s *Storage) Close() error { return s.DB.Close() } + +func (s *Storage) Find(start, end string) sorted.Iterator { + if s.Serial { + s.mu.Lock() + defer s.mu.Unlock() + } + var rows *sql.Rows + var err error + if end == "" { + rows, err = s.DB.Query(s.sql("SELECT k, v FROM rows WHERE k >= ? ORDER BY k "), start) + } else { + rows, err = s.DB.Query(s.sql("SELECT k, v FROM rows WHERE k >= ? AND k < ? ORDER BY k "), start, end) + } + if err != nil { + log.Printf("unexpected query error: %v", err) + return &iter{err: err} + } + + it := &iter{ + s: s, + rows: rows, + closeCheck: leak.NewChecker(), + } + return it +} + +var wordThenPunct = regexp.MustCompile(`^\w+\W$`) + +// iter is a iterator over sorted key/value pairs in rows. +type iter struct { + s *Storage + end string // optional end bound + err error // accumulated error, returned at Close + + closeCheck *leak.Checker + + rows *sql.Rows // if non-nil, the rows we're reading from + + key sql.RawBytes + val sql.RawBytes + skey, sval *string // if non-nil, it's been stringified +} + +var errClosed = errors.New("sqlkv: Iterator already closed") + +func (t *iter) KeyBytes() []byte { return t.key } +func (t *iter) Key() string { + if t.skey != nil { + return *t.skey + } + str := string(t.key) + t.skey = &str + return str +} + +func (t *iter) ValueBytes() []byte { return t.val } +func (t *iter) Value() string { + if t.sval != nil { + return *t.sval + } + str := string(t.val) + t.sval = &str + return str +} + +func (t *iter) Close() error { + t.closeCheck.Close() + if t.rows != nil { + t.rows.Close() + t.rows = nil + } + err := t.err + t.err = errClosed + return err +} + +func (t *iter) Next() bool { + if t.err != nil { + return false + } + t.skey, t.sval = nil, nil + if !t.rows.Next() { + return false + } + t.err = t.rows.Scan(&t.key, &t.val) + if t.err != nil { + log.Printf("unexpected Scan error: %v", t.err) + return false + } + return true +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/sqlite/sqlite.go b/vendor/github.com/camlistore/camlistore/pkg/index/sqlite/sqlite.go new file mode 100644 index 00000000..50a625f3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/sqlite/sqlite.go @@ -0,0 +1,3 @@ +// Empty file to make the go tool happy with a test-only directory. + +package sqlite diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/sqlite/sqlite_test.go b/vendor/github.com/camlistore/camlistore/pkg/index/sqlite/sqlite_test.go new file mode 100644 index 00000000..71ee4b6f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/sqlite/sqlite_test.go @@ -0,0 +1,185 @@ +/* +Copyright 2012 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlite_test + +import ( + "bytes" + "database/sql" + "fmt" + "io/ioutil" + "os" + "os/exec" + "sync" + "testing" + + "camlistore.org/pkg/index" + "camlistore.org/pkg/index/indextest" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/sorted/kvtest" + _ "camlistore.org/pkg/sorted/sqlite" + + _ "camlistore.org/third_party/github.com/mattn/go-sqlite3" +) + +var ( + once sync.Once + dbAvailable bool +) + +func do(db *sql.DB, sql string) { + _, err := db.Exec(sql) + if err == nil { + return + } + panic(fmt.Sprintf("Error %v running SQL: %s", err, sql)) +} + +func newSorted(t *testing.T) (kv sorted.KeyValue, clean func()) { + f, err := ioutil.TempFile("", "sqlite-test") + if err != nil { + t.Fatal(err) + } + + kv, err = sorted.NewKeyValue(jsonconfig.Obj{ + "type": "sqlite", + "file": f.Name(), + }) + if err != nil { + t.Fatal(err) + } + return kv, func() { + kv.Close() + os.Remove(f.Name()) + } +} + +func TestSorted_SQLite(t *testing.T) { + kv, clean := newSorted(t) + defer clean() + kvtest.TestSorted(t, kv) +} + +type tester struct{} + +func (tester) test(t *testing.T, tfn func(*testing.T, func() *index.Index)) { + var mu sync.Mutex // guards cleanups + var cleanups []func() + defer func() { + mu.Lock() // never unlocked + for _, fn := range cleanups { + fn() + } + }() + makeIndex := func() *index.Index { + s, cleanup := newSorted(t) + mu.Lock() + cleanups = append(cleanups, cleanup) + mu.Unlock() + return index.MustNew(t, s) + } + tfn(t, makeIndex) +} + +func TestIndex_SQLite(t *testing.T) { + tester{}.test(t, indextest.Index) +} + +func TestPathsOfSignerTarget_SQLite(t *testing.T) { + tester{}.test(t, indextest.PathsOfSignerTarget) +} + +func TestFiles_SQLite(t *testing.T) { + tester{}.test(t, indextest.Files) +} + +func TestEdgesTo_SQLite(t *testing.T) { + tester{}.test(t, indextest.EdgesTo) +} + +func TestDelete_SQLite(t *testing.T) { + tester{}.test(t, indextest.Delete) +} + +func TestConcurrency(t *testing.T) { + if testing.Short() { + t.Logf("skipping for short mode") + return + } + s, clean := newSorted(t) + defer clean() + const n = 100 + ch := make(chan error) + for i := 0; i < n; i++ { + i := i + go func() { + bm := s.BeginBatch() + bm.Set("keyA-"+fmt.Sprint(i), fmt.Sprintf("valA=%d", i)) + bm.Set("keyB-"+fmt.Sprint(i), fmt.Sprintf("valB=%d", i)) + ch <- s.CommitBatch(bm) + }() + } + for i := 0; i < n; i++ { + if err := <-ch; err != nil { + t.Errorf("%d: %v", i, err) + } + } +} + +func numFDs(t *testing.T) int { + lsofPath, err := exec.LookPath("lsof") + if err != nil { + t.Skipf("No lsof available; skipping test") + } + out, err := exec.Command(lsofPath, "-n", "-p", fmt.Sprint(os.Getpid())).Output() + if err != nil { + t.Skipf("Error running lsof; skipping test: %s", err) + } + return bytes.Count(out, []byte("\n")) - 1 // hacky +} + +func TestFDLeak(t *testing.T) { + if testing.Short() { + t.Skip("Skipping in short mode.") + } + fd0 := numFDs(t) + t.Logf("fd0 = %d", fd0) + + s, clean := newSorted(t) + defer clean() + + bm := s.BeginBatch() + const numRows = 150 // 3x the batchSize of 50 in sqlindex.go; to gaurantee we do multiple batches + for i := 0; i < numRows; i++ { + bm.Set(fmt.Sprintf("key:%05d", i), fmt.Sprint(i)) + } + if err := s.CommitBatch(bm); err != nil { + t.Fatal(err) + } + for i := 0; i < 5; i++ { + it := s.Find("key:", "key~") + n := 0 + for it.Next() { + n++ + } + if n != numRows { + t.Errorf("iterated over %d rows; want %d", n, numRows) + } + it.Close() + t.Logf("fd after iteration %d = %d", i, numFDs(t)) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/index/util.go b/vendor/github.com/camlistore/camlistore/pkg/index/util.go new file mode 100644 index 00000000..6ac352e1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/index/util.go @@ -0,0 +1,44 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index + +import ( + "net/url" +) + +var urle = url.QueryEscape + +func urld(s string) string { + d, _ := url.QueryUnescape(s) + return d +} + +type dupSkipper struct { + m map[string]bool +} + +// not thread safe. +func (s *dupSkipper) Dup(v string) bool { + if s.m == nil { + s.m = make(map[string]bool) + } + if s.m[v] { + return true + } + s.m[v] = true + return false +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/eval.go b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/eval.go new file mode 100644 index 00000000..0020821a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/eval.go @@ -0,0 +1,292 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonconfig + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + + "camlistore.org/pkg/errorutil" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/wkfs" +) + +type stringVector struct { + v []string +} + +func (v *stringVector) Push(s string) { + v.v = append(v.v, s) +} + +func (v *stringVector) Pop() { + v.v = v.v[:len(v.v)-1] +} + +func (v *stringVector) Last() string { + return v.v[len(v.v)-1] +} + +// A File is the type returned by ConfigParser.Open. +type File interface { + io.ReadSeeker + io.Closer + Name() string +} + +// ConfigParser specifies the environment for parsing a config file +// and evaluating expressions. +type ConfigParser struct { + rootJSON Obj + + touchedFiles map[string]bool + includeStack stringVector + + // Open optionally specifies an opener function. + Open func(filename string) (File, error) +} + +func (c *ConfigParser) open(filename string) (File, error) { + if c.Open == nil { + return wkfs.Open(filename) + } + return c.Open(filename) +} + +// Validates variable names for config _env expresssions +var envPattern = regexp.MustCompile(`\$\{[A-Za-z0-9_]+\}`) + +// ReadFile parses the provided path and returns the config file. +// If path is empty, the c.Open function must be defined. +func (c *ConfigParser) ReadFile(path string) (m map[string]interface{}, err error) { + if path == "" && c.Open == nil { + return nil, errors.New("ReadFile of empty string but Open hook not defined") + } + c.touchedFiles = make(map[string]bool) + c.rootJSON, err = c.recursiveReadJSON(path) + return c.rootJSON, err +} + +// Decodes and evaluates a json config file, watching for include cycles. +func (c *ConfigParser) recursiveReadJSON(configPath string) (decodedObject map[string]interface{}, err error) { + if configPath != "" { + absConfigPath, err := filepath.Abs(configPath) + if err != nil { + return nil, fmt.Errorf("Failed to expand absolute path for %s", configPath) + } + if c.touchedFiles[absConfigPath] { + return nil, fmt.Errorf("ConfigParser include cycle detected reading config: %v", + absConfigPath) + } + c.touchedFiles[absConfigPath] = true + + c.includeStack.Push(absConfigPath) + defer c.includeStack.Pop() + } + + var f File + if f, err = c.open(configPath); err != nil { + return nil, fmt.Errorf("Failed to open config: %v", err) + } + defer f.Close() + + decodedObject = make(map[string]interface{}) + dj := json.NewDecoder(f) + if err = dj.Decode(&decodedObject); err != nil { + extra := "" + if serr, ok := err.(*json.SyntaxError); ok { + if _, serr := f.Seek(0, os.SEEK_SET); serr != nil { + log.Fatalf("seek error: %v", serr) + } + line, col, highlight := errorutil.HighlightBytePosition(f, serr.Offset) + extra = fmt.Sprintf(":\nError at line %d, column %d (file offset %d):\n%s", + line, col, serr.Offset, highlight) + } + return nil, fmt.Errorf("error parsing JSON object in config file %s%s\n%v", + f.Name(), extra, err) + } + + if err = c.evaluateExpressions(decodedObject, nil, false); err != nil { + return nil, fmt.Errorf("error expanding JSON config expressions in %s:\n%v", + f.Name(), err) + } + + return decodedObject, nil +} + +var regFunc = map[string]expanderFunc{} + +// RegisterFunc registers a new function that may be called from JSON +// configs using an array of the form ["_name", arg0, argN...]. +// The provided name must begin with an underscore. +func RegisterFunc(name string, fn func(c *ConfigParser, v []interface{}) (interface{}, error)) { + if len(name) < 2 || !strings.HasPrefix(name, "_") { + panic("illegal name") + } + if _, dup := regFunc[name]; dup { + panic("duplicate registration of " + name) + } + regFunc[name] = fn +} + +type expanderFunc func(c *ConfigParser, v []interface{}) (interface{}, error) + +func namedExpander(name string) (fn expanderFunc, ok bool) { + switch name { + case "_env": + return (*ConfigParser).expandEnv, true + case "_fileobj": + return (*ConfigParser).expandFile, true + } + fn, ok = regFunc[name] + return +} + +func (c *ConfigParser) evalValue(v interface{}) (interface{}, error) { + sl, ok := v.([]interface{}) + if !ok { + return v, nil + } + if name, ok := sl[0].(string); ok { + if expander, ok := namedExpander(name); ok { + newval, err := expander(c, sl[1:]) + if err != nil { + return nil, err + } + return newval, nil + } + } + for i, oldval := range sl { + newval, err := c.evalValue(oldval) + if err != nil { + return nil, err + } + sl[i] = newval + } + return v, nil +} + +// CheckTypes parses m and returns an error if it encounters a type or value +// that is not supported by this package. +func (c *ConfigParser) CheckTypes(m map[string]interface{}) error { + return c.evaluateExpressions(m, nil, true) +} + +// evaluateExpressions parses recursively m, populating it with the values +// that are found, unless testOnly is true. +func (c *ConfigParser) evaluateExpressions(m map[string]interface{}, seenKeys []string, testOnly bool) error { + for k, ei := range m { + thisPath := append(seenKeys, k) + switch subval := ei.(type) { + case string, bool, float64, nil: + continue + case []interface{}: + if len(subval) == 0 { + continue + } + evaled, err := c.evalValue(subval) + if err != nil { + return fmt.Errorf("%s: value error %v", strings.Join(thisPath, "."), err) + } + if !testOnly { + m[k] = evaled + } + case map[string]interface{}: + if err := c.evaluateExpressions(subval, thisPath, testOnly); err != nil { + return err + } + default: + return fmt.Errorf("%s: unhandled type %T", strings.Join(thisPath, "."), ei) + } + } + return nil +} + +// Permit either: +// ["_env", "VARIABLE"] (required to be set) +// or ["_env", "VARIABLE", "default_value"] +func (c *ConfigParser) expandEnv(v []interface{}) (interface{}, error) { + hasDefault := false + def := "" + if len(v) < 1 || len(v) > 2 { + return "", fmt.Errorf("_env expansion expected 1 or 2 args, got %d", len(v)) + } + s, ok := v[0].(string) + if !ok { + return "", fmt.Errorf("Expected a string after _env expansion; got %#v", v[0]) + } + boolDefault, wantsBool := false, false + if len(v) == 2 { + hasDefault = true + switch vdef := v[1].(type) { + case string: + def = vdef + case bool: + wantsBool = true + boolDefault = vdef + default: + return "", fmt.Errorf("Expected default value in %q _env expansion; got %#v", s, v[1]) + } + } + var err error + expanded := envPattern.ReplaceAllStringFunc(s, func(match string) string { + envVar := match[2 : len(match)-1] + val := os.Getenv(envVar) + // Special case: + if val == "" && envVar == "USER" && runtime.GOOS == "windows" { + val = os.Getenv("USERNAME") + } + if val == "" { + if hasDefault { + return def + } + err = fmt.Errorf("couldn't expand environment variable %q", envVar) + } + return val + }) + if wantsBool { + if expanded == "" { + return boolDefault, nil + } + return strconv.ParseBool(expanded) + } + return expanded, err +} + +func (c *ConfigParser) expandFile(v []interface{}) (exp interface{}, err error) { + if len(v) != 1 { + return "", fmt.Errorf("_file expansion expected 1 arg, got %d", len(v)) + } + var incPath string + if incPath, err = osutil.FindCamliInclude(v[0].(string)); err != nil { + return "", fmt.Errorf("Included config does not exist: %v", v[0]) + } + if exp, err = c.recursiveReadJSON(incPath); err != nil { + return "", fmt.Errorf("In file included from %s:\n%v", + c.includeStack.Last(), err) + } + return exp, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/jsonconfig.go b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/jsonconfig.go new file mode 100644 index 00000000..5f7bd609 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/jsonconfig.go @@ -0,0 +1,296 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package jsonconfig defines a helper type for JSON objects to be +// used for configuration. +package jsonconfig + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +// Obj is a JSON configuration map. +type Obj map[string]interface{} + +// Reads json config data from the specified open file, expanding +// all expressions +func ReadFile(configPath string) (Obj, error) { + var c ConfigParser + return c.ReadFile(configPath) +} + +func (jc Obj) RequiredObject(key string) Obj { + return jc.obj(key, false) +} + +func (jc Obj) OptionalObject(key string) Obj { + return jc.obj(key, true) +} + +func (jc Obj) obj(key string, optional bool) Obj { + jc.noteKnownKey(key) + ei, ok := jc[key] + if !ok { + if optional { + return make(Obj) + } + jc.appendError(fmt.Errorf("Missing required config key %q (object)", key)) + return make(Obj) + } + m, ok := ei.(map[string]interface{}) + if !ok { + jc.appendError(fmt.Errorf("Expected config key %q to be an object, not %T", key, ei)) + return make(Obj) + } + return m +} + +func (jc Obj) RequiredString(key string) string { + return jc.string(key, nil) +} + +func (jc Obj) OptionalString(key, def string) string { + return jc.string(key, &def) +} + +func (jc Obj) string(key string, def *string) string { + jc.noteKnownKey(key) + ei, ok := jc[key] + if !ok { + if def != nil { + return *def + } + jc.appendError(fmt.Errorf("Missing required config key %q (string)", key)) + return "" + } + s, ok := ei.(string) + if !ok { + jc.appendError(fmt.Errorf("Expected config key %q to be a string", key)) + return "" + } + return s +} + +func (jc Obj) RequiredStringOrObject(key string) interface{} { + return jc.stringOrObject(key, true) +} + +func (jc Obj) OptionalStringOrObject(key string) interface{} { + return jc.stringOrObject(key, false) +} + +func (jc Obj) stringOrObject(key string, required bool) interface{} { + jc.noteKnownKey(key) + ei, ok := jc[key] + if !ok { + if !required { + return nil + } + jc.appendError(fmt.Errorf("Missing required config key %q (string or object)", key)) + return "" + } + if _, ok := ei.(map[string]interface{}); ok { + return ei + } + if _, ok := ei.(string); ok { + return ei + } + jc.appendError(fmt.Errorf("Expected config key %q to be a string or object", key)) + return "" +} + +func (jc Obj) RequiredBool(key string) bool { + return jc.bool(key, nil) +} + +func (jc Obj) OptionalBool(key string, def bool) bool { + return jc.bool(key, &def) +} + +func (jc Obj) bool(key string, def *bool) bool { + jc.noteKnownKey(key) + ei, ok := jc[key] + if !ok { + if def != nil { + return *def + } + jc.appendError(fmt.Errorf("Missing required config key %q (boolean)", key)) + return false + } + switch v := ei.(type) { + case bool: + return v + case string: + b, err := strconv.ParseBool(v) + if err != nil { + jc.appendError(fmt.Errorf("Config key %q has bad boolean format %q", key, v)) + } + return b + default: + jc.appendError(fmt.Errorf("Expected config key %q to be a boolean", key)) + return false + } +} + +func (jc Obj) RequiredInt(key string) int { + return jc.int(key, nil) +} + +func (jc Obj) OptionalInt(key string, def int) int { + return jc.int(key, &def) +} + +func (jc Obj) int(key string, def *int) int { + jc.noteKnownKey(key) + ei, ok := jc[key] + if !ok { + if def != nil { + return *def + } + jc.appendError(fmt.Errorf("Missing required config key %q (integer)", key)) + return 0 + } + b, ok := ei.(float64) + if !ok { + jc.appendError(fmt.Errorf("Expected config key %q to be a number", key)) + return 0 + } + return int(b) +} + +func (jc Obj) RequiredInt64(key string) int64 { + return jc.int64(key, nil) +} + +func (jc Obj) OptionalInt64(key string, def int64) int64 { + return jc.int64(key, &def) +} + +func (jc Obj) int64(key string, def *int64) int64 { + jc.noteKnownKey(key) + ei, ok := jc[key] + if !ok { + if def != nil { + return *def + } + jc.appendError(fmt.Errorf("Missing required config key %q (integer)", key)) + return 0 + } + b, ok := ei.(float64) + if !ok { + jc.appendError(fmt.Errorf("Expected config key %q to be a number", key)) + return 0 + } + return int64(b) +} + +func (jc Obj) RequiredList(key string) []string { + return jc.requiredList(key, true) +} + +func (jc Obj) OptionalList(key string) []string { + return jc.requiredList(key, false) +} + +func (jc Obj) requiredList(key string, required bool) []string { + jc.noteKnownKey(key) + ei, ok := jc[key] + if !ok { + if required { + jc.appendError(fmt.Errorf("Missing required config key %q (list of strings)", key)) + } + return nil + } + eil, ok := ei.([]interface{}) + if !ok { + jc.appendError(fmt.Errorf("Expected config key %q to be a list, not %T", key, ei)) + return nil + } + sl := make([]string, len(eil)) + for i, ei := range eil { + s, ok := ei.(string) + if !ok { + jc.appendError(fmt.Errorf("Expected config key %q index %d to be a string, not %T", key, i, ei)) + return nil + } + sl[i] = s + } + return sl +} + +func (jc Obj) noteKnownKey(key string) { + _, ok := jc["_knownkeys"] + if !ok { + jc["_knownkeys"] = make(map[string]bool) + } + jc["_knownkeys"].(map[string]bool)[key] = true +} + +func (jc Obj) appendError(err error) { + ei, ok := jc["_errors"] + if ok { + jc["_errors"] = append(ei.([]error), err) + } else { + jc["_errors"] = []error{err} + } +} + +// UnknownKeys returns the keys from the config that have not yet been discovered by one of the RequiredT or OptionalT calls. +func (jc Obj) UnknownKeys() []string { + ei, ok := jc["_knownkeys"] + var known map[string]bool + if ok { + known = ei.(map[string]bool) + } + var unknown []string + for k, _ := range jc { + if ok && known[k] { + continue + } + if strings.HasPrefix(k, "_") { + // Permit keys with a leading underscore as a + // form of comments. + continue + } + unknown = append(unknown, k) + } + sort.Strings(unknown) + return unknown +} + +func (jc Obj) Validate() error { + unknown := jc.UnknownKeys() + for _, k := range unknown { + jc.appendError(fmt.Errorf("Unknown key %q", k)) + } + + ei, ok := jc["_errors"] + if !ok { + return nil + } + errList := ei.([]error) + if len(errList) == 1 { + return errList[0] + } + strs := make([]string, 0) + for _, v := range errList { + strs = append(strs, v.Error()) + } + return fmt.Errorf("Multiple errors: " + strings.Join(strs, ", ")) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/jsonconfig_test.go b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/jsonconfig_test.go new file mode 100644 index 00000000..70d796c2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/jsonconfig_test.go @@ -0,0 +1,104 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonconfig + +import ( + "os" + "reflect" + "strings" + "testing" +) + +func TestIncludes(t *testing.T) { + obj, err := ReadFile("testdata/include1.json") + if err != nil { + t.Fatal(err) + } + two := obj.RequiredObject("two") + if err := obj.Validate(); err != nil { + t.Error(err) + } + if g, e := two.RequiredString("key"), "value"; g != e { + t.Errorf("sub object key = %q; want %q", g, e) + } +} + +func TestIncludeLoop(t *testing.T) { + _, err := ReadFile("testdata/loop1.json") + if err == nil { + t.Fatal("expected an error about import cycles.") + } + if !strings.Contains(err.Error(), "include cycle detected") { + t.Fatalf("expected an error about import cycles; got: %v", err) + } +} + +func TestBoolEnvs(t *testing.T) { + os.Setenv("TEST_EMPTY", "") + os.Setenv("TEST_TRUE", "true") + os.Setenv("TEST_ONE", "1") + os.Setenv("TEST_ZERO", "0") + os.Setenv("TEST_FALSE", "false") + obj, err := ReadFile("testdata/boolenv.json") + if err != nil { + t.Fatal(err) + } + if str := obj.RequiredString("emptystr"); str != "" { + t.Errorf("str = %q, want empty", str) + } + tests := []struct { + key string + want bool + }{ + {"def_false", false}, + {"def_true", true}, + {"set_true_def_false", true}, + {"set_false_def_true", false}, + {"lit_true", true}, + {"lit_false", false}, + {"one", true}, + {"zero", false}, + } + for _, tt := range tests { + if v := obj.RequiredBool(tt.key); v != tt.want { + t.Errorf("key %q = %v; want %v", tt.key, v, tt.want) + } + } + if err := obj.Validate(); err != nil { + t.Error(err) + } +} + +func TestListExpansion(t *testing.T) { + os.Setenv("TEST_BAR", "bar") + obj, err := ReadFile("testdata/listexpand.json") + if err != nil { + t.Fatal(err) + } + s := obj.RequiredString("str") + l := obj.RequiredList("list") + if err := obj.Validate(); err != nil { + t.Error(err) + } + want := []string{"foo", "bar"} + if !reflect.DeepEqual(l, want) { + t.Errorf("got = %#v\nwant = %#v", l, want) + } + if s != "bar" { + t.Errorf("str = %q, want %q", s, "bar") + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/boolenv.json b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/boolenv.json new file mode 100644 index 00000000..fe9431eb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/boolenv.json @@ -0,0 +1,11 @@ +{ + "emptystr": ["_env", "${TEST_EMPTY}", ""], + "def_false": ["_env", "${TEST_EMPTY}", false], + "def_true": ["_env", "${TEST_EMPTY}", true], + "set_true_def_false": ["_env", "${TEST_TRUE}", false], + "set_false_def_true": ["_env", "${TEST_FALSE}", true], + "one": ["_env", "${TEST_ONE}"], + "zero": ["_env", "${TEST_ZERO}"], + "lit_true": true, + "lit_false": false +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/include1.json b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/include1.json new file mode 100644 index 00000000..6d8b38e9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/include1.json @@ -0,0 +1,3 @@ +{ + "two": ["_fileobj", "testdata/include2.json"] +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/include2.json b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/include2.json new file mode 100644 index 00000000..7a9e8644 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/include2.json @@ -0,0 +1,3 @@ +{ + "key": "value" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/listexpand.json b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/listexpand.json new file mode 100644 index 00000000..ccabceff --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/listexpand.json @@ -0,0 +1,4 @@ +{ + "list": ["foo", ["_env", "${TEST_BAR}"]], + "str": ["_env", "${TEST_BAR}"] +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/loop1.json b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/loop1.json new file mode 100644 index 00000000..215146fd --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/loop1.json @@ -0,0 +1,3 @@ +{ + "obj": ["_fileobj", "testdata/loop2.json"] +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/loop2.json b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/loop2.json new file mode 100644 index 00000000..1d270eb4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/jsonconfig/testdata/loop2.json @@ -0,0 +1,3 @@ +{ + "obj": ["_fileobj", "testdata/loop1.json"] +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonsign/doc.go b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/doc.go new file mode 100644 index 00000000..ff30f857 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package jsonsign implements Camlistore's cryptographic signing and +// verification of JSON blobs. +package jsonsign diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonsign/jsonsign_test.go b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/jsonsign_test.go new file mode 100644 index 00000000..b78eb986 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/jsonsign_test.go @@ -0,0 +1,222 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonsign_test + +import ( + "bytes" + "fmt" + "sort" + "strings" + "testing" + + . "camlistore.org/pkg/jsonsign" + "camlistore.org/pkg/test" + . "camlistore.org/pkg/test/asserts" + "camlistore.org/third_party/code.google.com/p/go.crypto/openpgp" +) + +var unsigned = `{"camliVersion": 1, +"camliType": "foo" +}` + +var pubKey1 = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.4.10 (GNU/Linux) + +mQENBEzgoVsBCAC/56aEJ9BNIGV9FVP+WzenTAkg12k86YqlwJVAB/VwdMlyXxvi +bCT1RVRfnYxscs14LLfcMWF3zMucw16mLlJCBSLvbZ0jn4h+/8vK5WuAdjw2YzLs +WtBcjWn3lV6tb4RJz5gtD/o1w8VWxwAnAVIWZntKAWmkcChCRgdUeWso76+plxE5 +aRYBJqdT1mctGqNEISd/WYPMgwnWXQsVi3x4z1dYu2tD9uO1dkAff12z1kyZQIBQ +rexKYRRRh9IKAayD4kgS0wdlULjBU98aeEaMz1ckuB46DX3lAYqmmTEL/Rl9cOI0 +Enpn/oOOfYFa5h0AFndZd1blMvruXfdAobjVABEBAAG0JUNhbWxpIFRlc3RlciA8 +Y2FtbGktdGVzdEBleGFtcGxlLmNvbT6JATgEEwECACIFAkzgoVsCGwMGCwkIBwMC +BhUIAgkKCwQWAgMBAh4BAheAAAoJECkxpnwm9avaHE0IAJ/pMZgiURl3kefrFMAV +7ei0XDfTekZOwDRcZWTVQ/A97phpzO8t78qLYbFeHuq3myNhrlVO9Gyp+2V904rN +dudoHLhpegf5TNeHGmAGHBxcooMPMp0JyIDnUBxtCNGxgWfbKpEDRsQAjkCc7sR0 +H+OegzlEf6JZGzEhV5ohOioTsC1DmJNoQsRz5Kes7sLoAzpQCbCv4yv+1o+mnzgW +9qPJXKxcScc0t2YTvcvpJ7LV8no1OP6vpYqB1A9Pzze6XFBlcXOUKbRKk0fEIV/u +pU3ph1fF7wlyRgA4A3iPwDC4BgVmHYkz9nYPn+7IcT/dDig5SWU+n7WZgGeyv75y +0Ue5AQ0ETOChWwEIALuHxKI+oSH+eeMSXhxcSUXnhp4cUeyvOV7oNPYcmsDclF0Y +7y8NrSPiEZod9vSTEDMq7hd3BG+feCBqjgR4qtmoXguJhWcnJqDBk5iAMuuAph9O +CC8QLACMJPhoxQ0UtDPKlpG4X8kLK1woHd716ulPl2KLjTgd6K4kCGj+CV5Ekn6u +IJj+3IPbYDOwk1l06ksimwQAY4dA1CXOTviH1bVqR6CzuzVPg4hcryWDva1rEO5c +LcOR8Wk/thANFLSNjqX8UgtGXhFZRWxKetFDQiX5f2BKoqTVYvD3pqt+zzyLNFAz +xhMc3cyFfqM8yQdzdEey/DIWtMoDqZCSVMJ63N8AEQEAAYkBHwQYAQIACQUCTOCh +WwIbDAAKCRApMaZ8JvWr2mHACACkco+fAfRK+gmprF2m8E0Bp1frwFH0g4RJVHXQ +BUDbg7OZbWumzD4Br28si6XDVMP6fLOeyD0EHYb6LhAHDkBLqx6e3kKG1mQ8fMIV +O4YMQfskYH2FJqlCtgMnM8N3oslPBTpZedNPSUq7HJh2pKr9GIDi1V+Hgc/qEigE +dj9f2zSSaKZdC4eL73GvlQOh+4XqgaMnMiKfI+/2WlRaJs1KOgKmIp5yHt0qY0ef +y+40BY/z9pMjyUvr/Wwp8KXArw0NAwzp8NUl5fNxRg9XWQWLn6hW8ydR20X3t2ym +iNSWzNQiTT6k7fumOABCoSZsow/AJxQSxqKOJBjgpKjIKCgY +=ru0J +-----END PGP PUBLIC KEY BLOCK-----` + +var pubKey2 = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.4.10 (GNU/Linux) + +mQENBEz61lcBCADRQhcb9LIQdV3LhU5f7cCjOctmLsL+y4k4VKmznssWORiNPEHQ +13CxFLjRDN2OQYXi4NSqoUqHNMsRTUJTVW0CnznUUb11ibXLUYW/zbPN9dWs8PlI +UZSScS1dxtGKKk+VfXrvc1LB6pqrjWmAgEwQxsBWToW2IFR/eMo1LiVU83dzpKU1 +n/yb8Jy9wizchspd9xecK2X0JnKLRIJklLTAKQ+XKP+cSwXmShcs+3pxu5f4piqF +7oBfh9noFA0vdGYNBGVch3DfJwFcTmLkkGFZKdiehWncvVYT1jxUkJvc0K44ohDH +smkG2VZm3rJCwi2GIWA/clLiDAhYM6vTI3oZABEBAAG0K0NhbWxpIFRlc3R1c2Vy +IFR3byA8Y2FtbGkudGVzdEBleGFtcGxlLmNvbT6JATgEEwECACIFAkz61lcCGwMG +CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEIUeCLJL7Fq1c44IAKOJjymoinXd +9NOW7GfpHCmynzSflJJoRcRzsNz83lJbwITYCd1ExQxkO84sMKRPJiefc9epP/Hg +8V4b1SwkGi+A8WaoH/OZtEM8HA7iEKmV+wjfZE6kt+y0trbxdu42W5hLz/uerrNl +G+r90mBNjmJXsZxmwaZEFrLtFlqezCzdQSur35QLZMFvW6aoYFTAgOk1rk9lBtkC +DePaadZQGHNWr+Rw2M5xXv9BZ4Rrjl6VLjE2DuqMSBVkelckBcsmRppaszF3J8y3 +9gd10xC+5/LVfhU8niDZjY3pIcjQwsYJ+Jdyce2OEYo1i6pQDiq2WewXdCJ28DVK +1SX38WFB3Zm5AQ0ETPrWVwEIAMQ/dRCrkhy2D0SzJV5o/Z3uVf1nFLlEFfavV45F +8wtG/Bi5EuZXoYqU+O79O7sPy9Dw3Qhxtvt159l6/sSLXYTBBs3HJ2zTVhI5tbAZ +DMz4/wfkRP/h74KuXnWfin1ynswzqdPVXgrRvTsfHbkwbTaRwbx186VYqM17Wqy2 +hFAUCdQIIW0+X9upjGek+kESldSzeUV87fr3IN/pq6fRc90h8xAKfz6mMc7AAUUL +NLNxb9y18u4Bw+fKgc6W7YxB+gQN1IajmgGPcqUTxNxydWF974iqsKnkZpzHg0Ce +zGGLWzCAGzI8drltgJPBoGGo56U1s2hW6JzLUi03phV10H8AEQEAAYkBHwQYAQIA +CQUCTPrWVwIbDAAKCRCFHgiyS+xatUPIB/9VPOeIxH5UcNYuZT+LW2tdcWPNhyQ+ +u5UC9DC2A3F9AYNYRwDcSVOMmqS8hPJxg/biFxFoGFgm14Vp0nd1blOHcmNXcDzk +XTv2CKcUbgYpvDVmfCcEf6seSf+/RDbyj/VzebE6yvXuwsPus7ntbMw+Dum42z55 +XYiYsfEFu25RtxritG3eYklCKymdRg615pj8zoRpL5Z1NAy5QBb5sv5hPbdGSyqL +Kw6aLcq2IU7kev6CYJVyXzJ1XtsYv/o7hzKKmZ5WcwuPc9Yqh6onJt1RC8jzz8Ry +jyVNPb8AaaWVW1uZLg6Em61aKnbOG10B30m3CQ8dwBjF9hgmtcY0IZ/Y +=OWHA +-----END PGP PUBLIC KEY BLOCK----- +` + +var pubKeyBlob1 = &test.Blob{pubKey1} // user 1 +var pubKeyBlob2 = &test.Blob{pubKey2} // user 2 + +var testFetcher = &test.Fetcher{} + +func init() { + testFetcher.AddBlob(pubKeyBlob1) + testFetcher.AddBlob(pubKeyBlob2) +} + +func TestSigningBadInput(t *testing.T) { + sr := newRequest(1) + + sr.UnsignedJSON = "" + _, err := sr.Sign() + ExpectErrorContains(t, err, "json parse error", "empty input") + + sr.UnsignedJSON = "{}" + _, err = sr.Sign() + ExpectErrorContains(t, err, "json lacks \"camliSigner\" key", "just braces") + + sr.UnsignedJSON = `{"camliSigner": 123}` + _, err = sr.Sign() + ExpectErrorContains(t, err, "\"camliSigner\" key is malformed or unsupported", "camliSigner 123") + + sr.UnsignedJSON = `{"camliSigner": ""}` + _, err = sr.Sign() + ExpectErrorContains(t, err, "\"camliSigner\" key is malformed or unsupported", "empty camliSigner") +} + +func newRequest(userN int) *SignRequest { + if userN < 1 || userN > 2 { + panic("invalid userid") + } + suffix := ".gpg" + if userN == 2 { + suffix = "2.gpg" + } + return &SignRequest{ + UnsignedJSON: "", + Fetcher: testFetcher, + ServerMode: true, + SecretKeyringPath: "./testdata/test-secring" + suffix, + } +} + +func TestSigning(t *testing.T) { + sr := newRequest(1) + sr.UnsignedJSON = fmt.Sprintf(`{"camliVersion": 1, "foo": "fooVal", "camliSigner": %q }`, pubKeyBlob1.BlobRef().String()) + signed, err := sr.Sign() + AssertNil(t, err, "no error signing") + Assert(t, strings.Contains(signed, `"camliSig":`), "got a camliSig") + + vr := NewVerificationRequest(signed, testFetcher) + if !vr.Verify() { + t.Fatalf("verification failed on signed json [%s]: %v", signed, vr.Err) + } + ExpectString(t, "fooVal", vr.PayloadMap["foo"].(string), "PayloadMap") + ExpectString(t, "2931A67C26F5ABDA", vr.SignerKeyId, "SignerKeyId") + + // Test a non-matching signature. + fakeSigned := strings.Replace(signed, pubKeyBlob1.BlobRef().String(), pubKeyBlob2.BlobRef().String(), 1) + vr = NewVerificationRequest(fakeSigned, testFetcher) + if vr.Verify() { + t.Fatalf("unexpected verification of faked signature") + } + AssertErrorContains(t, vr.Err, "openpgp: invalid signature: hash tag doesn't match", + "expected signature verification error") + + t.Logf("TODO: verify GPG-vs-Go sign & verify interop both ways, once implemented.") +} + +func TestEntityFromSecring(t *testing.T) { + ent, err := EntityFromSecring("26F5ABDA", "testdata/test-secring.gpg") + if err != nil { + t.Fatalf("EntityFromSecring: %v", err) + } + if ent == nil { + t.Fatalf("nil entity") + } + if _, ok := ent.Identities["Camli Tester "]; !ok { + t.Errorf("missing expected identity") + } +} + +func TestWriteKeyRing(t *testing.T) { + ent, err := EntityFromSecring("26F5ABDA", "testdata/test-secring.gpg") + if err != nil { + t.Fatalf("NewEntity: %v", err) + } + var buf bytes.Buffer + err = WriteKeyRing(&buf, openpgp.EntityList([]*openpgp.Entity{ent})) + if err != nil { + t.Fatalf("WriteKeyRing: %v", err) + } + + el, err := openpgp.ReadKeyRing(&buf) + if err != nil { + t.Fatalf("ReadKeyRing: %v", err) + } + if len(el) != 1 { + t.Fatalf("ReadKeyRing read %d entities; want 1", len(el)) + } + orig := entityString(ent) + got := entityString(el[0]) + if orig != got { + t.Fatalf("original vs. wrote-then-read entities differ:\norig: %s\n got: %s", orig, got) + } +} + +// stupid entity stringier for testing. +func entityString(ent *openpgp.Entity) string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "PublicKey=%s", ent.PrimaryKey.KeyIdShortString()) + var ids []string + for k := range ent.Identities { + ids = append(ids, k) + } + sort.Strings(ids) + for _, k := range ids { + fmt.Fprintf(&buf, " id[%q]", k) + } + return buf.String() +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonsign/keys.go b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/keys.go new file mode 100644 index 00000000..14a2ae42 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/keys.go @@ -0,0 +1,220 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonsign + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/wkfs" + "camlistore.org/third_party/code.google.com/p/go.crypto/openpgp" + "camlistore.org/third_party/code.google.com/p/go.crypto/openpgp/armor" + "camlistore.org/third_party/code.google.com/p/go.crypto/openpgp/packet" +) + +const publicKeyMaxSize = 256 * 1024 + +// ParseArmoredPublicKey tries to parse an armored public key from r, +// taking care to bound the amount it reads. +// The returned shortKeyId is 8 capital hex digits. +// The returned armoredKey is a copy of the contents read. +func ParseArmoredPublicKey(r io.Reader) (shortKeyId, armoredKey string, err error) { + var buf bytes.Buffer + pk, err := openArmoredPublicKeyFile(ioutil.NopCloser(io.TeeReader(r, &buf))) + if err != nil { + return + } + return publicKeyId(pk), buf.String(), nil +} + +func VerifyPublicKeyFile(file, keyid string) (bool, error) { + f, err := wkfs.Open(file) + if err != nil { + return false, err + } + + key, err := openArmoredPublicKeyFile(f) + if err != nil { + return false, err + } + keyId := publicKeyId(key) + if keyId != strings.ToUpper(keyid) { + return false, fmt.Errorf("Key in file %q has id %q; expected %q", + file, keyId, keyid) + } + return true, nil +} + +// publicKeyId returns the short (8 character) capital hex GPG key ID +// of the provided public key. +func publicKeyId(pubKey *packet.PublicKey) string { + return fmt.Sprintf("%X", pubKey.Fingerprint[len(pubKey.Fingerprint)-4:]) +} + +func openArmoredPublicKeyFile(reader io.ReadCloser) (*packet.PublicKey, error) { + defer reader.Close() + + var lr = io.LimitReader(reader, publicKeyMaxSize) + block, _ := armor.Decode(lr) + if block == nil { + return nil, errors.New("Couldn't find PGP block in public key file") + } + if block.Type != "PGP PUBLIC KEY BLOCK" { + return nil, errors.New("Invalid public key blob.") + } + p, err := packet.Read(block.Body) + if err != nil { + return nil, fmt.Errorf("Invalid public key blob: %v", err) + } + + pk, ok := p.(*packet.PublicKey) + if !ok { + return nil, fmt.Errorf("Invalid public key blob; not a public key packet") + } + return pk, nil +} + +// EntityFromSecring returns the openpgp Entity from keyFile that matches keyId. +// If empty, keyFile defaults to osutil.SecretRingFile(). +func EntityFromSecring(keyId, keyFile string) (*openpgp.Entity, error) { + if keyId == "" { + return nil, errors.New("empty keyId passed to EntityFromSecring") + } + keyId = strings.ToUpper(keyId) + if keyFile == "" { + keyFile = osutil.SecretRingFile() + } + secring, err := wkfs.Open(keyFile) + if err != nil { + return nil, fmt.Errorf("jsonsign: failed to open keyring: %v", err) + } + defer secring.Close() + + el, err := openpgp.ReadKeyRing(secring) + if err != nil { + return nil, fmt.Errorf("openpgp.ReadKeyRing of %q: %v", keyFile, err) + } + var entity *openpgp.Entity + for _, e := range el { + pk := e.PrivateKey + if pk == nil || (pk.KeyIdString() != keyId && pk.KeyIdShortString() != keyId) { + continue + } + entity = e + } + if entity == nil { + found := []string{} + for _, e := range el { + pk := e.PrivateKey + if pk == nil { + continue + } + found = append(found, pk.KeyIdShortString()) + } + return nil, fmt.Errorf("didn't find a key in %q for keyId %q; other keyIds in file = %v", keyFile, keyId, found) + } + return entity, nil +} + +var newlineBytes = []byte("\n") + +func ArmoredPublicKey(entity *openpgp.Entity) (string, error) { + var buf bytes.Buffer + wc, err := armor.Encode(&buf, openpgp.PublicKeyType, nil) + if err != nil { + return "", err + } + err = entity.PrivateKey.PublicKey.Serialize(wc) + if err != nil { + return "", err + } + wc.Close() + if !bytes.HasSuffix(buf.Bytes(), newlineBytes) { + buf.WriteString("\n") + } + return buf.String(), nil +} + +// NewEntity returns a new OpenPGP entity. +func NewEntity() (*openpgp.Entity, error) { + name := "" // intentionally empty + comment := "camlistore" + email := "" // intentionally empty + return openpgp.NewEntity(name, comment, email, nil) +} + +func WriteKeyRing(w io.Writer, el openpgp.EntityList) error { + for _, ent := range el { + if err := ent.SerializePrivate(w, nil); err != nil { + return err + } + } + return nil +} + +// KeyIdFromRing returns the public keyId contained in the secret +// ring file secRing. It expects only one keyId in this secret ring +// and returns an error otherwise. +func KeyIdFromRing(secRing string) (keyId string, err error) { + f, err := wkfs.Open(secRing) + if err != nil { + return "", fmt.Errorf("Could not open secret ring file %v: %v", secRing, err) + } + defer f.Close() + el, err := openpgp.ReadKeyRing(f) + if err != nil { + return "", fmt.Errorf("Could not read secret ring file %s: %v", secRing, err) + } + if len(el) != 1 { + return "", fmt.Errorf("Secret ring file %v contained %d identities; expected 1", secRing, len(el)) + } + ent := el[0] + return ent.PrimaryKey.KeyIdShortString(), nil +} + +// GenerateNewSecRing creates a new secret ring file secRing, with +// a new GPG identity. It returns the public keyId of that identity. +// It returns an error if the file already exists. +func GenerateNewSecRing(secRing string) (keyId string, err error) { + ent, err := NewEntity() + if err != nil { + return "", fmt.Errorf("generating new identity: %v", err) + } + if err := os.MkdirAll(filepath.Dir(secRing), 0700); err != nil { + return "", err + } + f, err := wkfs.OpenFile(secRing, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return "", err + } + err = WriteKeyRing(f, openpgp.EntityList([]*openpgp.Entity{ent})) + if err != nil { + f.Close() + return "", fmt.Errorf("Could not write new key ring to %s: %v", secRing, err) + } + if err := f.Close(); err != nil { + return "", fmt.Errorf("Could not close %v: %v", secRing, err) + } + return ent.PrimaryKey.KeyIdShortString(), nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonsign/sign.go b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/sign.go new file mode 100644 index 00000000..79a0908b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/sign.go @@ -0,0 +1,219 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonsign + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "strings" + "sync" + "time" + "unicode" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/wkfs" + "camlistore.org/third_party/code.google.com/p/go.crypto/openpgp" + "camlistore.org/third_party/code.google.com/p/go.crypto/openpgp/packet" +) + +type EntityFetcher interface { + FetchEntity(keyId string) (*openpgp.Entity, error) +} + +type FileEntityFetcher struct { + File string +} + +func FlagEntityFetcher() *FileEntityFetcher { + return &FileEntityFetcher{File: osutil.SecretRingFile()} +} + +type CachingEntityFetcher struct { + Fetcher EntityFetcher + + lk sync.Mutex + m map[string]*openpgp.Entity +} + +func (ce *CachingEntityFetcher) FetchEntity(keyId string) (*openpgp.Entity, error) { + ce.lk.Lock() + if ce.m != nil { + e := ce.m[keyId] + if e != nil { + ce.lk.Unlock() + return e, nil + } + } + ce.lk.Unlock() + + e, err := ce.Fetcher.FetchEntity(keyId) + if err == nil { + ce.lk.Lock() + defer ce.lk.Unlock() + if ce.m == nil { + ce.m = make(map[string]*openpgp.Entity) + } + ce.m[keyId] = e + } + + return e, err +} + +func (fe *FileEntityFetcher) FetchEntity(keyId string) (*openpgp.Entity, error) { + f, err := wkfs.Open(fe.File) + if err != nil { + return nil, fmt.Errorf("jsonsign: FetchEntity: %v", err) + } + defer f.Close() + el, err := openpgp.ReadKeyRing(f) + if err != nil { + return nil, fmt.Errorf("jsonsign: openpgp.ReadKeyRing of %q: %v", fe.File, err) + } + for _, e := range el { + pubk := &e.PrivateKey.PublicKey + if pubk.KeyIdString() != keyId { + continue + } + if e.PrivateKey.Encrypted { + if err := fe.decryptEntity(e); err == nil { + return e, nil + } else { + return nil, err + } + } + return e, nil + } + return nil, fmt.Errorf("jsonsign: entity for keyid %q not found in %q", keyId, fe.File) +} + +type SignRequest struct { + UnsignedJSON string + Fetcher blob.Fetcher + ServerMode bool // if true, can't use pinentry or gpg-agent, etc. + + // Optional signature time. If zero, time.Now() is used. + SignatureTime time.Time + + // Optional function to return an entity (including decrypting + // the PrivateKey, if necessary) + EntityFetcher EntityFetcher + + // SecretKeyringPath is only used if EntityFetcher is nil, + // in which case SecretKeyringPath is used if non-empty. + // As a final resort, we default to osutil.SecretRingFile(). + SecretKeyringPath string +} + +func (sr *SignRequest) secretRingPath() string { + if sr.SecretKeyringPath != "" { + return sr.SecretKeyringPath + } + return osutil.SecretRingFile() +} + +func (sr *SignRequest) Sign() (signedJSON string, err error) { + trimmedJSON := strings.TrimRightFunc(sr.UnsignedJSON, unicode.IsSpace) + + // TODO: make sure these return different things + inputfail := func(msg string) (string, error) { + return "", errors.New(msg) + } + execfail := func(msg string) (string, error) { + return "", errors.New(msg) + } + + jmap := make(map[string]interface{}) + if err := json.Unmarshal([]byte(trimmedJSON), &jmap); err != nil { + return inputfail("json parse error") + } + + camliSigner, hasSigner := jmap["camliSigner"] + if !hasSigner { + return inputfail("json lacks \"camliSigner\" key with public key blobref") + } + + camliSignerStr, _ := camliSigner.(string) + signerBlob, ok := blob.Parse(camliSignerStr) + if !ok { + return inputfail("json \"camliSigner\" key is malformed or unsupported") + } + + pubkeyReader, _, err := sr.Fetcher.Fetch(signerBlob) + if err != nil { + // TODO: not really either an inputfail or an execfail.. but going + // with exec for now. + return execfail(fmt.Sprintf("failed to find public key %s: %v", signerBlob.String(), err)) + } + + pubk, err := openArmoredPublicKeyFile(pubkeyReader) + pubkeyReader.Close() + if err != nil { + return execfail(fmt.Sprintf("failed to parse public key from blobref %s: %v", signerBlob.String(), err)) + } + + // This check should be redundant if the above JSON parse succeeded, but + // for explicitness... + if len(trimmedJSON) == 0 || trimmedJSON[len(trimmedJSON)-1] != '}' { + return inputfail("json parameter lacks trailing '}'") + } + trimmedJSON = trimmedJSON[0 : len(trimmedJSON)-1] + + // sign it + entityFetcher := sr.EntityFetcher + if entityFetcher == nil { + file := sr.secretRingPath() + if file == "" { + return "", errors.New("jsonsign: no EntityFetcher, and no secret ring file defined.") + } + secring, err := wkfs.Open(sr.secretRingPath()) + if err != nil { + return "", fmt.Errorf("jsonsign: failed to open secret ring file %q: %v", sr.secretRingPath(), err) + } + secring.Close() // just opened to see if it's readable + entityFetcher = &FileEntityFetcher{File: file} + } + signer, err := entityFetcher.FetchEntity(pubk.KeyIdString()) + if err != nil { + return "", err + } + + var buf bytes.Buffer + err = openpgp.ArmoredDetachSign( + &buf, + signer, + strings.NewReader(trimmedJSON), + &packet.Config{Time: func() time.Time { return sr.SignatureTime }}, + ) + if err != nil { + return "", err + } + + output := buf.String() + + index1 := strings.Index(output, "\n\n") + index2 := strings.Index(output, "\n-----") + if index1 == -1 || index2 == -1 { + return execfail("Failed to parse signature from gpg.") + } + inner := output[index1+2 : index2] + signature := strings.Replace(inner, "\n", "", -1) + + return fmt.Sprintf("%s,\"camliSig\":\"%s\"}\n", trimmedJSON, signature), nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonsign/sign_appengine.go b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/sign_appengine.go new file mode 100644 index 00000000..0f185af3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/sign_appengine.go @@ -0,0 +1,29 @@ +// +build appengine + +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonsign + +import ( + "errors" + + "camlistore.org/third_party/code.google.com/p/go.crypto/openpgp" +) + +func (fe *FileEntityFetcher) decryptEntity(e *openpgp.Entity) error { + return errors.New("No gpg-agent or on-demand password entry on AppEngine.") +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonsign/sign_normal.go b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/sign_normal.go new file mode 100644 index 00000000..9aaf5504 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/sign_normal.go @@ -0,0 +1,86 @@ +// +build !appengine + +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonsign + +import ( + "errors" + "fmt" + "log" + "os" + + "camlistore.org/pkg/misc/gpgagent" + "camlistore.org/pkg/misc/pinentry" + "camlistore.org/third_party/code.google.com/p/go.crypto/openpgp" +) + +func (fe *FileEntityFetcher) decryptEntity(e *openpgp.Entity) error { + // TODO: syscall.Mlock a region and keep pass phrase in it. + pubk := &e.PrivateKey.PublicKey + desc := fmt.Sprintf("Need to unlock GPG key %s to use it for signing.", + pubk.KeyIdShortString()) + + conn, err := gpgagent.NewConn() + switch err { + case gpgagent.ErrNoAgent: + fmt.Fprintf(os.Stderr, "Note: gpg-agent not found; resorting to on-demand password entry.\n") + case nil: + defer conn.Close() + req := &gpgagent.PassphraseRequest{ + CacheKey: "camli:jsonsign:" + pubk.KeyIdShortString(), + Prompt: "Passphrase", + Desc: desc, + } + for tries := 0; tries < 2; tries++ { + pass, err := conn.GetPassphrase(req) + if err == nil { + err = e.PrivateKey.Decrypt([]byte(pass)) + if err == nil { + return nil + } + req.Error = "Passphrase failed to decrypt: " + err.Error() + conn.RemoveFromCache(req.CacheKey) + continue + } + if err == gpgagent.ErrCancel { + return errors.New("jsonsign: failed to decrypt key; action canceled") + } + log.Printf("jsonsign: gpgagent: %v", err) + } + default: + log.Printf("jsonsign: gpgagent: %v", err) + } + + pinReq := &pinentry.Request{Desc: desc, Prompt: "Passphrase"} + for tries := 0; tries < 2; tries++ { + pass, err := pinReq.GetPIN() + if err == nil { + err = e.PrivateKey.Decrypt([]byte(pass)) + if err == nil { + return nil + } + pinReq.Error = "Passphrase failed to decrypt: " + err.Error() + continue + } + if err == pinentry.ErrCancel { + return errors.New("jsonsign: failed to decrypt key; action canceled") + } + log.Printf("jsonsign: pinentry: %v", err) + } + return fmt.Errorf("jsonsign: failed to decrypt key %q", pubk.KeyIdShortString()) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonsign/signhandler/sig.go b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/signhandler/sig.go new file mode 100644 index 00000000..231965c1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/signhandler/sig.go @@ -0,0 +1,289 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package signhandler implements the HTTP interface to signing and verifying +// Camlistore JSON blobs. +package signhandler + +import ( + "fmt" + "log" + "net/http" + "strings" + "sync" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/blobserver/gethandler" + "camlistore.org/pkg/blobserver/memory" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/jsonsign" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/types/camtypes" + + "camlistore.org/third_party/code.google.com/p/go.crypto/openpgp" +) + +const kMaxJSONLength = 1024 * 1024 + +type Handler struct { + // Optional path to non-standard secret gpg keyring file + secretRing string + + pubKey string // armored + pubKeyBlobRef blob.Ref + pubKeyFetcher blob.Fetcher + + pubKeyBlobRefServeSuffix string // "camli/sha1-xxxx" + pubKeyHandler http.Handler + + pubKeyDest blobserver.Storage // Where our public key is published + + pubKeyUploadMu sync.RWMutex + pubKeyUploaded bool + + entity *openpgp.Entity + signer *schema.Signer +} + +func (h *Handler) Signer() *schema.Signer { return h.signer } + +func (h *Handler) secretRingPath() string { + if h.secretRing != "" { + return h.secretRing + } + return osutil.SecretRingFile() +} + +func init() { + blobserver.RegisterHandlerConstructor("jsonsign", newJSONSignFromConfig) +} + +func newJSONSignFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) { + var ( + // either a short form ("26F5ABDA") or one the longer forms. + keyId = conf.RequiredString("keyId") + + pubKeyDestPrefix = conf.OptionalString("publicKeyDest", "") + secretRing = conf.OptionalString("secretRing", "") + ) + if err := conf.Validate(); err != nil { + return nil, err + } + + h := &Handler{ + secretRing: secretRing, + } + + var err error + h.entity, err = jsonsign.EntityFromSecring(keyId, h.secretRingPath()) + if err != nil { + return nil, err + } + + h.pubKey, err = jsonsign.ArmoredPublicKey(h.entity) + + ms := &memory.Storage{} + h.pubKeyBlobRef = blob.SHA1FromString(h.pubKey) + if _, err := ms.ReceiveBlob(h.pubKeyBlobRef, strings.NewReader(h.pubKey)); err != nil { + return nil, fmt.Errorf("could not store pub key blob: %v", err) + } + h.pubKeyFetcher = ms + + if pubKeyDestPrefix != "" { + sto, err := ld.GetStorage(pubKeyDestPrefix) + if err != nil { + return nil, err + } + h.pubKeyDest = sto + } + h.pubKeyBlobRefServeSuffix = "camli/" + h.pubKeyBlobRef.String() + h.pubKeyHandler = &gethandler.Handler{ + Fetcher: ms, + } + + h.signer, err = schema.NewSigner(h.pubKeyBlobRef, strings.NewReader(h.pubKey), h.entity) + if err != nil { + return nil, err + } + + return h, nil +} + +func (h *Handler) uploadPublicKey() error { + h.pubKeyUploadMu.RLock() + if h.pubKeyUploaded { + h.pubKeyUploadMu.RUnlock() + return nil + } + h.pubKeyUploadMu.RUnlock() + + sto := h.pubKeyDest + + h.pubKeyUploadMu.Lock() + defer h.pubKeyUploadMu.Unlock() + if h.pubKeyUploaded { + return nil + } + _, err := blobserver.StatBlob(sto, h.pubKeyBlobRef) + if err == nil { + h.pubKeyUploaded = true + return nil + } + _, err = blobserver.Receive(sto, h.pubKeyBlobRef, strings.NewReader(h.pubKey)) + h.pubKeyUploaded = (err == nil) + return err +} + +// Discovery returns the Discovery response for the signing handler. +func (h *Handler) Discovery(base string) *camtypes.SignDiscovery { + sd := &camtypes.SignDiscovery{ + PublicKeyID: h.entity.PrimaryKey.KeyIdString(), + SignHandler: base + "camli/sig/sign", + VerifyHandler: base + "camli/sig/verify", + } + if h.pubKeyBlobRef.Valid() { + sd.PublicKeyBlobRef = h.pubKeyBlobRef + sd.PublicKey = base + h.pubKeyBlobRefServeSuffix + } + return sd +} + +func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + base := httputil.PathBase(req) + subPath := httputil.PathSuffix(req) + switch req.Method { + case "GET", "HEAD": + switch subPath { + case "": + http.Redirect(rw, req, base+"camli/sig/discovery", http.StatusFound) + return + case h.pubKeyBlobRefServeSuffix: + h.pubKeyHandler.ServeHTTP(rw, req) + return + case "camli/sig/sign": + fallthrough + case "camli/sig/verify": + http.Error(rw, "POST required", 400) + return + case "camli/sig/discovery": + httputil.ReturnJSON(rw, h.Discovery(base)) + return + } + case "POST": + switch subPath { + case "camli/sig/sign": + h.handleSign(rw, req) + return + case "camli/sig/verify": + h.handleVerify(rw, req) + return + } + } + http.Error(rw, "Unsupported path or method.", http.StatusBadRequest) +} + +func (h *Handler) handleVerify(rw http.ResponseWriter, req *http.Request) { + req.ParseForm() + sjson := req.FormValue("sjson") + if sjson == "" { + http.Error(rw, "missing \"sjson\" parameter", http.StatusBadRequest) + return + } + + // TODO: use a different fetcher here that checks memory, disk, + // the internet, etc. + fetcher := h.pubKeyFetcher + + var res camtypes.VerifyResponse + vreq := jsonsign.NewVerificationRequest(sjson, fetcher) + if vreq.Verify() { + res.SignatureValid = true + res.SignerKeyId = vreq.SignerKeyId + res.VerifiedData = vreq.PayloadMap + } else { + res.SignatureValid = false + res.ErrorMessage = vreq.Err.Error() + } + + rw.WriteHeader(http.StatusOK) // no HTTP response code fun, error info in JSON + httputil.ReturnJSON(rw, &res) +} + +func (h *Handler) handleSign(rw http.ResponseWriter, req *http.Request) { + req.ParseForm() + + badReq := func(s string) { + http.Error(rw, s, http.StatusBadRequest) + log.Printf("bad request: %s", s) + return + } + + jsonStr := req.FormValue("json") + if jsonStr == "" { + badReq("missing \"json\" parameter") + return + } + if len(jsonStr) > kMaxJSONLength { + badReq("parameter \"json\" too large") + return + } + + sreq := &jsonsign.SignRequest{ + UnsignedJSON: jsonStr, + Fetcher: h.pubKeyFetcher, + ServerMode: true, + SecretKeyringPath: h.secretRing, + } + signedJSON, err := sreq.Sign() + if err != nil { + // TODO: some aren't really a "bad request" + badReq(fmt.Sprintf("%v", err)) + return + } + if err := h.uploadPublicKey(); err != nil { + log.Printf("signing handler failed to upload public key: %v", err) + } + rw.Write([]byte(signedJSON)) +} + +func (h *Handler) Sign(bb *schema.Builder) (string, error) { + bb.SetSigner(h.pubKeyBlobRef) + unsigned, err := bb.JSON() + if err != nil { + return "", err + } + sreq := &jsonsign.SignRequest{ + UnsignedJSON: unsigned, + Fetcher: h.pubKeyFetcher, + ServerMode: true, + SecretKeyringPath: h.secretRing, + } + claimTime, err := bb.Blob().ClaimDate() + if err != nil { + if !schema.IsMissingField(err) { + return "", err + } + } else { + sreq.SignatureTime = claimTime + } + if err := h.uploadPublicKey(); err != nil { + log.Printf("signing handler failed to upload public key: %v", err) + } + return sreq.Sign() +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/password-foo-keyring.gpg b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/password-foo-keyring.gpg new file mode 100644 index 00000000..e2a30ae5 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/password-foo-keyring.gpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/password-foo-secring.gpg b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/password-foo-secring.gpg new file mode 100644 index 00000000..233cbba4 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/password-foo-secring.gpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/test-keyring.gpg b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/test-keyring.gpg new file mode 100644 index 00000000..3d20ba68 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/test-keyring.gpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/test-keyring2.gpg b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/test-keyring2.gpg new file mode 100644 index 00000000..ded7d515 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/test-keyring2.gpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/test-secring.gpg b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/test-secring.gpg new file mode 100644 index 00000000..bca3ad03 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/test-secring.gpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/test-secring2.gpg b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/test-secring2.gpg new file mode 100644 index 00000000..f4b7ed22 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/testdata/test-secring2.gpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/jsonsign/verify.go b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/verify.go new file mode 100644 index 00000000..460f33bb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/jsonsign/verify.go @@ -0,0 +1,239 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonsign + +import ( + "bytes" + "crypto" + "encoding/json" + "errors" + "fmt" + "log" + "os" + "strings" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/camerrors" + "camlistore.org/third_party/code.google.com/p/go.crypto/openpgp/armor" + "camlistore.org/third_party/code.google.com/p/go.crypto/openpgp/packet" +) + +const sigSeparator = `,"camliSig":"` + +// reArmor takes a camliSig (single line armor) and turns it back into an PGP-style +// multi-line armored string +func reArmor(line string) string { + lastEq := strings.LastIndex(line, "=") + if lastEq == -1 { + return "" + } + buf := new(bytes.Buffer) + fmt.Fprintf(buf, "-----BEGIN PGP SIGNATURE-----\n\n") + payload := line[0:lastEq] + crc := line[lastEq:] + for len(payload) > 0 { + chunkLen := len(payload) + if chunkLen > 60 { + chunkLen = 60 + } + fmt.Fprintf(buf, "%s\n", payload[0:chunkLen]) + payload = payload[chunkLen:] + } + fmt.Fprintf(buf, "%s\n-----BEGIN PGP SIGNATURE-----\n", crc) + return buf.String() +} + +// See doc/json-signing/* for background and details +// on these variable names. +type VerifyRequest struct { + fetcher blob.Fetcher // fetcher used to find public key blob + + ba []byte // "bytes all" + bp []byte // "bytes payload" (the part that is signed) + bpj []byte // "bytes payload, JSON" (BP + "}") + bs []byte // "bytes signature", "{" + separator + camliSig, valid JSON + + CamliSigner blob.Ref + CamliSig string + PublicKeyPacket *packet.PublicKey + + // set if Verify() returns true: + PayloadMap map[string]interface{} // The JSON values from BPJ + SignerKeyId string // e.g. "2931A67C26F5ABDA" + + Err error // last error encountered +} + +func (vr *VerifyRequest) fail(msg string) bool { + vr.Err = errors.New("jsonsign: " + msg) + return false +} + +func (vr *VerifyRequest) ParseSigMap() bool { + sigMap := make(map[string]interface{}) + if err := json.Unmarshal(vr.bs, &sigMap); err != nil { + return vr.fail("invalid JSON in signature") + } + + if len(sigMap) != 1 { + return vr.fail("signature JSON didn't have exactly 1 key") + } + + sigVal, hasCamliSig := sigMap["camliSig"] + if !hasCamliSig { + return vr.fail("no 'camliSig' key in signature") + } + + var ok bool + vr.CamliSig, ok = sigVal.(string) + if !ok { + return vr.fail("camliSig not a string") + } + + return true +} + +func (vr *VerifyRequest) ParsePayloadMap() bool { + vr.PayloadMap = make(map[string]interface{}) + pm := vr.PayloadMap + + if err := json.Unmarshal(vr.bpj, &pm); err != nil { + return vr.fail("parse error; payload JSON is invalid") + } + + if _, hasVersion := pm["camliVersion"]; !hasVersion { + return vr.fail("missing 'camliVersion' in the JSON payload") + } + + signer, hasSigner := pm["camliSigner"] + if !hasSigner { + return vr.fail("missing 'camliSigner' in the JSON payload") + } + + if _, ok := signer.(string); !ok { + return vr.fail("invalid 'camliSigner' in the JSON payload") + } + + var ok bool + vr.CamliSigner, ok = blob.Parse(signer.(string)) + if !ok { + return vr.fail("malformed 'camliSigner' blobref in the JSON payload") + } + return true +} + +func (vr *VerifyRequest) FindAndParsePublicKeyBlob() bool { + reader, _, err := vr.fetcher.Fetch(vr.CamliSigner) + if err == os.ErrNotExist { + vr.Err = camerrors.ErrMissingKeyBlob + return false + } + if err != nil { + log.Printf("error fetching public key blob %v: %v", vr.CamliSigner, err) + vr.Err = err + return false + } + defer reader.Close() + pk, err := openArmoredPublicKeyFile(reader) + if err != nil { + return vr.fail(fmt.Sprintf("error opening public key file: %v", err)) + } + vr.PublicKeyPacket = pk + return true +} + +func (vr *VerifyRequest) VerifySignature() bool { + armorData := reArmor(vr.CamliSig) + block, _ := armor.Decode(bytes.NewBufferString(armorData)) + if block == nil { + return vr.fail("can't parse camliSig armor") + } + var p packet.Packet + var err error + p, err = packet.Read(block.Body) + if err != nil { + return vr.fail("error reading PGP packet from camliSig: " + err.Error()) + } + sig, ok := p.(*packet.Signature) + if !ok { + return vr.fail("PGP packet isn't a signature packet") + } + if sig.Hash != crypto.SHA1 && sig.Hash != crypto.SHA256 { + return vr.fail("I can only verify SHA1 or SHA256 signatures") + } + if sig.SigType != packet.SigTypeBinary { + return vr.fail("I can only verify binary signatures") + } + hash := sig.Hash.New() + hash.Write(vr.bp) // payload bytes + err = vr.PublicKeyPacket.VerifySignature(hash, sig) + if err != nil { + return vr.fail(fmt.Sprintf("bad signature: %s", err)) + } + vr.SignerKeyId = vr.PublicKeyPacket.KeyIdString() + return true +} + +func NewVerificationRequest(sjson string, fetcher blob.Fetcher) (vr *VerifyRequest) { + if fetcher == nil { + panic("NewVerificationRequest fetcher is nil") + } + vr = new(VerifyRequest) + vr.ba = []byte(sjson) + vr.fetcher = fetcher + + sigIndex := bytes.LastIndex(vr.ba, []byte(sigSeparator)) + if sigIndex == -1 { + vr.Err = errors.New("jsonsign: no 13-byte camliSig separator found in sjson") + return + } + + // "Bytes Payload" + vr.bp = vr.ba[:sigIndex] + + // "Bytes Payload JSON". Note we re-use the memory (the ",") + // from BA in BPJ, so we can't re-use that "," byte for + // the opening "{" in "BS". + vr.bpj = vr.ba[:sigIndex+1] + vr.bpj[sigIndex] = '}' + vr.bs = []byte("{" + sjson[sigIndex+1:]) + return +} + +// TODO: turn this into (bool, os.Error) return, probably, or *Details, os.Error. +func (vr *VerifyRequest) Verify() bool { + if vr.Err != nil { + return false + } + + if vr.ParseSigMap() && + vr.ParsePayloadMap() && + vr.FindAndParsePublicKeyBlob() && + vr.VerifySignature() { + return true + } + + // Don't allow dumbs callers to accidentally check this + // if it's not valid. + vr.PayloadMap = nil + if vr.Err == nil { + // The other functions should have filled this in + // already, but just in case: + vr.Err = errors.New("jsonsign: verification failed") + } + return false +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/kvutil/kvutil.go b/vendor/github.com/camlistore/camlistore/pkg/kvutil/kvutil.go new file mode 100644 index 00000000..0fd6c781 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/kvutil/kvutil.go @@ -0,0 +1,64 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kvutil contains helpers related to +// github.com/cznic/kv. +package kvutil + +import ( + "fmt" + "io" + "os" + "strconv" + + "camlistore.org/third_party/github.com/camlistore/lock" + "camlistore.org/third_party/github.com/cznic/kv" +) + +// Open opens the named kv DB file for reading/writing. It +// creates the file if it does not exist yet. +func Open(dbFile string, opts *kv.Options) (*kv.DB, error) { + createOpen := kv.Open + verb := "opening" + if _, err := os.Stat(dbFile); os.IsNotExist(err) { + createOpen = kv.Create + verb = "creating" + } + if opts == nil { + opts = &kv.Options{} + } + if opts.Locker == nil { + opts.Locker = func(dbFile string) (io.Closer, error) { + lkfile := dbFile + ".lock" + cl, err := lock.Lock(lkfile) + if err != nil { + return nil, fmt.Errorf("failed to acquire lock on %s: %v", lkfile, err) + } + return cl, nil + } + } + if v, _ := strconv.ParseBool(os.Getenv("CAMLI_KV_VERIFY")); v { + opts.VerifyDbBeforeOpen = true + opts.VerifyDbAfterOpen = true + opts.VerifyDbBeforeClose = true + opts.VerifyDbAfterClose = true + } + db, err := createOpen(dbFile, opts) + if err != nil { + return nil, fmt.Errorf("error %s %s: %v", verb, dbFile, err) + } + return db, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/leak/leak.go b/vendor/github.com/camlistore/camlistore/pkg/leak/leak.go new file mode 100644 index 00000000..5bc2696e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/leak/leak.go @@ -0,0 +1,74 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leak + +import ( + "bytes" + "fmt" + "log" + "runtime" +) + +// A Checker checks for leaks. +type Checker struct { + pc []uintptr // nil once closed +} + +// NewChecker returns a Checker, remembering the stack trace. +func NewChecker() *Checker { + pc := make([]uintptr, 50) + ch := &Checker{pc[:runtime.Callers(0, pc)]} + runtime.SetFinalizer(ch, (*Checker).finalize) + return ch +} + +func (c *Checker) Close() { + if c != nil { + c.pc = nil + } +} + +func (c *Checker) finalize() { + if testHookFinalize != nil { + defer testHookFinalize() + } + if c == nil || c.pc == nil { + return + } + var buf bytes.Buffer + buf.WriteString("Leak at:\n") + for _, pc := range c.pc { + f := runtime.FuncForPC(pc) + if f == nil { + break + } + file, line := f.FileLine(f.Entry()) + fmt.Fprintf(&buf, " %s:%d\n", file, line) + } + onLeak(c, buf.String()) +} + +// testHookFinalize optionally specifies a function to run after +// finalization. For tests. +var testHookFinalize func() + +// onLeak is changed by tests. +var onLeak = logLeak + +func logLeak(c *Checker, stack string) { + log.Println(stack) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/leak/leak_test.go b/vendor/github.com/camlistore/camlistore/pkg/leak/leak_test.go new file mode 100644 index 00000000..84cb8609 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/leak/leak_test.go @@ -0,0 +1,74 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leak + +import ( + "runtime" + "strings" + "sync" + "testing" + "time" +) + +func TestLeak(t *testing.T) { + testLeak(t, true, 1) +} + +func TestNoLeak(t *testing.T) { + testLeak(t, false, 0) +} + +func testLeak(t *testing.T, leak bool, want int) { + defer func() { + testHookFinalize = nil + onLeak = logLeak + }() + var mu sync.Mutex // guards leaks + var leaks []string + onLeak = func(_ *Checker, stack string) { + mu.Lock() + defer mu.Unlock() + leaks = append(leaks, stack) + } + finalizec := make(chan bool) + testHookFinalize = func() { + finalizec <- true + } + + c := make(chan bool) + go func() { + ch := NewChecker() + if !leak { + ch.Close() + } + c <- true + }() + <-c + go runtime.GC() + select { + case <-time.After(5 * time.Second): + t.Error("timeout waiting for finalization") + case <-finalizec: + } + mu.Lock() // no need to unlock + if len(leaks) != want { + t.Errorf("got %d leaks; want %d", len(leaks), want) + } + if len(leaks) == 1 && !strings.Contains(leaks[0], "leak_test.go") { + t.Errorf("Leak stack doesn't contain leak_test.go: %s", leaks[0]) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/legal/legal.go b/vendor/github.com/camlistore/camlistore/pkg/legal/legal.go new file mode 100644 index 00000000..a75283fd --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/legal/legal.go @@ -0,0 +1,50 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package legal provides project-wide storage for compiled-in licenses. +package legal + +var licenses []string + +func init() { + RegisterLicense(` +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +`) +} + +// RegisterLicense stores the license text. +// It doesn't check whether the text was already present. +func RegisterLicense(text string) { + licenses = append(licenses, text) + return +} + +// Licenses returns a slice of the licenses. +func Licenses() []string { + return licenses +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/legal/legal_test.go b/vendor/github.com/camlistore/camlistore/pkg/legal/legal_test.go new file mode 100644 index 00000000..de30bac7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/legal/legal_test.go @@ -0,0 +1,39 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package legal + +import ( + "testing" +) + +func TestRegisterLicense(t *testing.T) { + initial := len(licenses) + RegisterLicense("dummy") + if initial+1 != len(licenses) { + t.Fatal("didn't add a license") + } +} + +func TestLicenses(t *testing.T) { + licenses := Licenses() + if len(licenses) < 2 { + t.Fatal("no second license text") + } + if licenses[1] != "dummy" { + t.Error("license text mismatch") + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/legal/legalprint/legalprint.go b/vendor/github.com/camlistore/camlistore/pkg/legal/legalprint/legalprint.go new file mode 100644 index 00000000..e4a3205a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/legal/legalprint/legalprint.go @@ -0,0 +1,42 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package legalprint provides a printing helper for the legal package. +package legalprint + +import ( + "flag" + "fmt" + "io" + + "camlistore.org/pkg/legal" +) + +var ( + flagLegal = flag.Bool("legal", false, "show licenses") +) + +// MaybePrint will print the licenses if flagLegal has been set. +// It will return the value of the flagLegal. +func MaybePrint(out io.Writer) bool { + if !*flagLegal { + return false + } + for _, text := range legal.Licenses() { + fmt.Fprintln(out, text) + } + return true +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/lru/cache.go b/vendor/github.com/camlistore/camlistore/pkg/lru/cache.go new file mode 100644 index 00000000..228b088e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/lru/cache.go @@ -0,0 +1,109 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package lru implements an LRU cache. +package lru + +import ( + "container/list" + "sync" +) + +// Cache is an LRU cache, safe for concurrent access. +type Cache struct { + maxEntries int + + mu sync.Mutex + ll *list.List + cache map[string]*list.Element +} + +// *entry is the type stored in each *list.Element. +type entry struct { + key string + value interface{} +} + +// New returns a new cache with the provided maximum items. +func New(maxEntries int) *Cache { + return &Cache{ + maxEntries: maxEntries, + ll: list.New(), + cache: make(map[string]*list.Element), + } +} + +// Add adds the provided key and value to the cache, evicting +// an old item if necessary. +func (c *Cache) Add(key string, value interface{}) { + c.mu.Lock() + defer c.mu.Unlock() + + // Already in cache? + if ee, ok := c.cache[key]; ok { + c.ll.MoveToFront(ee) + ee.Value.(*entry).value = value + return + } + + // Add to cache if not present + ele := c.ll.PushFront(&entry{key, value}) + c.cache[key] = ele + + if c.ll.Len() > c.maxEntries { + c.removeOldest() + } +} + +// Get fetches the key's value from the cache. +// The ok result will be true if the item was found. +func (c *Cache) Get(key string) (value interface{}, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + if ele, hit := c.cache[key]; hit { + c.ll.MoveToFront(ele) + return ele.Value.(*entry).value, true + } + return +} + +// RemoveOldest removes the oldest item in the cache and returns its key and value. +// If the cache is empty, the empty string and nil are returned. +func (c *Cache) RemoveOldest() (key string, value interface{}) { + c.mu.Lock() + defer c.mu.Unlock() + return c.removeOldest() +} + +// note: must hold c.mu +func (c *Cache) removeOldest() (key string, value interface{}) { + ele := c.ll.Back() + if ele == nil { + return + } + c.ll.Remove(ele) + ent := ele.Value.(*entry) + delete(c.cache, ent.key) + return ent.key, ent.value + +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + c.mu.Lock() + defer c.mu.Unlock() + return c.ll.Len() +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/lru/cache_test.go b/vendor/github.com/camlistore/camlistore/pkg/lru/cache_test.go new file mode 100644 index 00000000..48473266 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/lru/cache_test.go @@ -0,0 +1,71 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lru + +import ( + "reflect" + "testing" +) + +func TestLRU(t *testing.T) { + c := New(2) + + expectMiss := func(k string) { + v, ok := c.Get(k) + if ok { + t.Fatalf("expected cache miss on key %q but hit value %v", k, v) + } + } + + expectHit := func(k string, ev interface{}) { + v, ok := c.Get(k) + if !ok { + t.Fatalf("expected cache(%q)=%v; but missed", k, ev) + } + if !reflect.DeepEqual(v, ev) { + t.Fatalf("expected cache(%q)=%v; but got %v", k, ev, v) + } + } + + expectMiss("1") + c.Add("1", "one") + expectHit("1", "one") + + c.Add("2", "two") + expectHit("1", "one") + expectHit("2", "two") + + c.Add("3", "three") + expectHit("3", "three") + expectHit("2", "two") + expectMiss("1") +} + +func TestRemoveOldest(t *testing.T) { + c := New(2) + c.Add("1", "one") + c.Add("2", "two") + if k, v := c.RemoveOldest(); k != "1" || v != "one" { + t.Fatalf("oldest = %q, %q; want 1, one", k, v) + } + if k, v := c.RemoveOldest(); k != "2" || v != "two" { + t.Fatalf("oldest = %q, %q; want 2, two", k, v) + } + if k, v := c.RemoveOldest(); k != "" || v != nil { + t.Fatalf("oldest = %v, %v; want \"\", nil", k, v) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/magic/magic.go b/vendor/github.com/camlistore/camlistore/pkg/magic/magic.go new file mode 100644 index 00000000..7495dba9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/magic/magic.go @@ -0,0 +1,118 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +nYou may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package magic implements MIME type sniffing of data based on the +// well-known "magic" number prefixes in the file. +package magic + +import ( + "bytes" + "io" + "net/http" + "strings" +) + +type prefixEntry struct { + prefix []byte + mtype string +} + +// usable source: http://www.garykessler.net/library/file_sigs.html +// mime types: http://www.iana.org/assignments/media-types/media-types.xhtml +var prefixTable = []prefixEntry{ + {[]byte("GIF87a"), "image/gif"}, + {[]byte("GIF89a"), "image/gif"}, // TODO: Others? + {[]byte("\xff\xd8\xff\xe2"), "image/jpeg"}, + {[]byte("\xff\xd8\xff\xe1"), "image/jpeg"}, + {[]byte("\xff\xd8\xff\xe0"), "image/jpeg"}, + {[]byte("\xff\xd8\xff\xdb"), "image/jpeg"}, + {[]byte("\x49\x49\x2a\x00\x10\x00\x00\x00\x43\x52\x02"), "image/cr2"}, + {[]byte{137, 'P', 'N', 'G', '\r', '\n', 26, 10}, "image/png"}, + {[]byte{0x49, 0x20, 0x49}, "image/tiff"}, + {[]byte{0x49, 0x49, 0x2A, 0}, "image/tiff"}, + {[]byte{0x4D, 0x4D, 0, 0x2A}, "image/tiff"}, + {[]byte{0x4D, 0x4D, 0, 0x2B}, "image/tiff"}, + {[]byte("8BPS"), "image/vnd.adobe.photoshop"}, + {[]byte("gimp xcf "), "image/xcf"}, + {[]byte("-----BEGIN PGP PUBLIC KEY BLOCK---"), "text/x-openpgp-public-key"}, + {[]byte("fLaC\x00\x00\x00"), "audio/flac"}, + {[]byte{'I', 'D', '3'}, "audio/mpeg"}, + {[]byte{0, 0, 1, 0xB7}, "video/mpeg"}, + {[]byte{0, 0, 0, 0x14, 0x66, 0x74, 0x79, 0x70, 0x71, 0x74, 0x20, 0x20}, "video/quicktime"}, + {[]byte{0, 0x6E, 0x1E, 0xF0}, "application/vnd.ms-powerpoint"}, + {[]byte{0x1A, 0x45, 0xDF, 0xA3}, "video/webm"}, + {[]byte("FLV\x01"), "application/vnd.adobe.flash.video"}, + {[]byte{0x1F, 0x8B, 0x08}, "application/gzip"}, + {[]byte{0x37, 0x7A, 0xBC, 0xAF, 0x27, 0x1C}, "application/x-7z-compressed"}, + {[]byte("BZh"), "application/bzip2"}, + {[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0}, "application/x-xz"}, + {[]byte{'P', 'K', 3, 4, 0x0A, 0, 2, 0}, "application/epub+zip"}, + {[]byte{0xD0, 0xCF, 0x11, 0xE0, 0xA1, 0xB1, 0x1A, 0xE1}, "application/vnd.ms-word"}, + {[]byte{'P', 'K', 3, 4, 0x0A, 0x14, 0, 6, 0}, "application/vnd.openxmlformats-officedocument.custom-properties+xml"}, + {[]byte{'P', 'K', 3, 4}, "application/zip"}, + {[]byte("%PDF"), "application/pdf"}, + {[]byte("{rtf"), "text/rtf1"}, + {[]byte("BEGIN:VCARD\x0D\x0A"), "text/vcard"}, + {[]byte("Return-Path: "), "message/rfc822"}, + + // TODO(bradfitz): popular audio & video formats at least +} + +// MIMEType returns the MIME type from the data in the provided header +// of the data. +// It returns the empty string if the MIME type can't be determined. +func MIMEType(hdr []byte) string { + hlen := len(hdr) + for _, pte := range prefixTable { + plen := len(pte.prefix) + if hlen > plen && bytes.Equal(hdr[:plen], pte.prefix) { + return pte.mtype + } + } + t := http.DetectContentType(hdr) + t = strings.Replace(t, "; charset=utf-8", "", 1) + if t != "application/octet-stream" && t != "text/plain" { + return t + } + return "" +} + +// MIMETypeFromReader takes a reader, sniffs the beginning of it, +// and returns the mime (if sniffed, else "") and a new reader +// that's the concatenation of the bytes sniffed and the remaining +// reader. +func MIMETypeFromReader(r io.Reader) (mime string, reader io.Reader) { + var buf bytes.Buffer + _, err := io.Copy(&buf, io.LimitReader(r, 1024)) + mime = MIMEType(buf.Bytes()) + if err != nil { + return mime, io.MultiReader(&buf, errReader{err}) + } + return mime, io.MultiReader(&buf, r) +} + +// MIMETypeFromReader takes a ReaderAt, sniffs the beginning of it, +// and returns the MIME type if sniffed, else the empty string. +func MIMETypeFromReaderAt(ra io.ReaderAt) (mime string) { + var buf [1024]byte + n, _ := ra.ReadAt(buf[:], 0) + return MIMEType(buf[:n]) +} + +// errReader is an io.Reader which just returns err. +type errReader struct{ err error } + +func (er errReader) Read([]byte) (int, error) { return 0, er.err } diff --git a/vendor/github.com/camlistore/camlistore/pkg/magic/magic_test.go b/vendor/github.com/camlistore/camlistore/pkg/magic/magic_test.go new file mode 100644 index 00000000..633b7b5a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/magic/magic_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +nYou may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package magic + +import ( + "errors" + "io" + "io/ioutil" + "strings" + "testing" +) + +type magicTest struct { + fileName, data string // one of these set + want string +} + +var tests = []magicTest{ + {fileName: "smile.jpg", want: "image/jpeg"}, + {fileName: "smile.png", want: "image/png"}, + {fileName: "smile.psd", want: "image/vnd.adobe.photoshop"}, + {fileName: "smile.tiff", want: "image/tiff"}, + {fileName: "smile.xcf", want: "image/xcf"}, + {fileName: "smile.gif", want: "image/gif"}, + {fileName: "foo.tar.gz", want: "application/gzip"}, + {fileName: "foo.tar.xz", want: "application/x-xz"}, + {fileName: "foo.tbz2", want: "application/bzip2"}, + {fileName: "foo.zip", want: "application/zip"}, + {fileName: "magic.pdf", want: "application/pdf"}, + {data: "foo", want: "text/html"}, + {data: "\xff", want: ""}, +} + +func TestMagic(t *testing.T) { + for i, tt := range tests { + var err error + data := []byte(tt.data) + if tt.fileName != "" { + data, err = ioutil.ReadFile("testdata/" + tt.fileName) + if err != nil { + t.Fatalf("Error reading %s: %v", tt.fileName, + err) + } + } + mime := MIMEType(data) + if mime != tt.want { + t.Errorf("%d. got %q; want %q", i, mime, tt.want) + } + } +} + +func TestMIMETypeFromReader(t *testing.T) { + someErr := errors.New("some error") + const content = "foobar" + mime, r := MIMETypeFromReader(io.MultiReader( + strings.NewReader(content), + &onceErrReader{someErr}, + )) + if want := "text/html"; mime != want { + t.Errorf("mime = %q; want %q", mime, want) + } + slurp, err := ioutil.ReadAll(r) + if string(slurp) != "foobar" { + t.Errorf("read = %q; want %q", slurp, content) + } + if err != someErr { + t.Errorf("read error = %v; want %v", err, someErr) + } +} + +// errReader is an io.Reader which just returns err, once. +type onceErrReader struct{ err error } + +func (er *onceErrReader) Read([]byte) (int, error) { + if er.err != nil { + err := er.err + er.err = nil + return 0, err + } + return 0, io.EOF +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/foo.tar b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/foo.tar new file mode 100644 index 00000000..495006e3 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/foo.tar differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/foo.tar.gz b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/foo.tar.gz new file mode 100644 index 00000000..f735f229 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/foo.tar.gz differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/foo.tar.xz b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/foo.tar.xz new file mode 100644 index 00000000..bfcceda1 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/foo.tar.xz differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/foo.tbz2 b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/foo.tbz2 new file mode 100644 index 00000000..a6acd040 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/foo.tbz2 differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/foo.zip b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/foo.zip new file mode 100644 index 00000000..9ee5fe50 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/foo.zip differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/magic.pdf b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/magic.pdf new file mode 100644 index 00000000..0e9340c0 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/magic.pdf differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.bmp b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.bmp new file mode 100644 index 00000000..ef4ac5d2 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.bmp differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.gif b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.gif new file mode 100644 index 00000000..74674979 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.gif differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.ico b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.ico new file mode 100644 index 00000000..4af0d637 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.ico differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.jpg b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.jpg new file mode 100644 index 00000000..0f832595 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.png b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.png new file mode 100644 index 00000000..0de7d14d Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.png differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.psd b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.psd new file mode 100644 index 00000000..6be6eb8a Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.psd differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.tiff b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.tiff new file mode 100644 index 00000000..cb23fc1c Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.tiff differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.xcf b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.xcf new file mode 100644 index 00000000..3e0914f1 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/magic/testdata/smile.xcf differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/media/audio.go b/vendor/github.com/camlistore/camlistore/pkg/media/audio.go new file mode 100644 index 00000000..2611c9af --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/media/audio.go @@ -0,0 +1,196 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package media provides means for querying information about audio and video data. +package media + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "time" + + "camlistore.org/pkg/types" +) + +// ID3v1TagLength is the length of an MP3 ID3v1 tag in bytes. +const ID3v1TagLength = 128 + +// id3v1Magic is the byte sequence appearing at the beginning of an ID3v1 tag. +var id3v1Magic = []byte("TAG") + +// HasID3V1Tag returns true if an ID3v1 tag is present at the end of r. +func HasID3v1Tag(r types.SizeReaderAt) (bool, error) { + if r.Size() < ID3v1TagLength { + return false, nil + } + + buf := make([]byte, len(id3v1Magic), len(id3v1Magic)) + if _, err := r.ReadAt(buf, r.Size()-ID3v1TagLength); err != nil { + return false, fmt.Errorf("Failed to read ID3v1 data: %v", err) + } + if bytes.Equal(buf, id3v1Magic) { + return true, nil + } + return false, nil +} + +type mpegVersion int + +const ( + mpegVersion1 mpegVersion = iota + mpegVersion2 + mpegVersion2_5 +) + +// mpegVersionsById maps from a 2-bit version ID from an MPEG header to the corresponding MPEG audio version. +var mpegVersionsById = map[uint32]mpegVersion{ + 0x0: mpegVersion2_5, + 0x2: mpegVersion2, + 0x3: mpegVersion1, +} + +type mpegLayer int + +const ( + mpegLayer1 mpegLayer = iota + mpegLayer2 + mpegLayer3 +) + +// mpegLayersByIndex maps from a 2-bit layer index from an MPEG header to the corresponding MPEG layer. +var mpegLayersByIndex = map[uint32]mpegLayer{ + 0x1: mpegLayer3, + 0x2: mpegLayer2, + 0x3: mpegLayer1, +} + +// mpegBitrates is indexed by a 4-bit bitrate index from an MPEG header. Values are in kilobits. +var mpegBitrates = map[mpegVersion]map[mpegLayer][16]int{ + mpegVersion1: { + mpegLayer1: {0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, 0}, + mpegLayer2: {0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 0}, + mpegLayer3: {0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 0}, + }, + mpegVersion2: { + mpegLayer1: {0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256, 0}, + mpegLayer2: {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0}, + mpegLayer3: {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0}, + }, + mpegVersion2_5: { + mpegLayer1: {0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256, 0}, + mpegLayer2: {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0}, + mpegLayer3: {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0}, + }, +} + +// mpegSamplingRates is indexed by a 2-bit sampling rate index from an MPEG header. Values are in hertz. +var mpegSamplingRates = map[mpegVersion][4]int{ + mpegVersion1: {44100, 48000, 32000, 0}, + mpegVersion2: {22050, 24000, 16000, 0}, + mpegVersion2_5: {11025, 12000, 8000, 0}, +} + +var mpegSamplesPerFrame = map[mpegVersion]map[mpegLayer]int{ + mpegVersion1: { + mpegLayer1: 384, + mpegLayer2: 1152, + mpegLayer3: 1152, + }, + mpegVersion2: { + mpegLayer1: 384, + mpegLayer2: 1152, + mpegLayer3: 576, + }, + mpegVersion2_5: { + mpegLayer1: 384, + mpegLayer2: 1152, + mpegLayer3: 576, + }, +} + +var xingHeaderName = []byte("Xing") +var infoHeaderName = []byte("Info") + +// GetMPEGAudioDuration reads the first frame in r and returns the audio length with millisecond precision. +// Format details are at http://www.codeproject.com/Articles/8295/MPEG-Audio-Frame-Header. +func GetMPEGAudioDuration(r types.SizeReaderAt) (time.Duration, error) { + var header uint32 + if err := binary.Read(io.NewSectionReader(r, 0, r.Size()), binary.BigEndian, &header); err != nil { + return 0, fmt.Errorf("Failed to read MPEG frame header: %v", err) + } + getBits := func(startBit, numBits uint) uint32 { + return (header << startBit) >> (32 - numBits) + } + + if getBits(0, 11) != 0x7ff { + return 0, errors.New("Missing sync bits in MPEG frame header") + } + var version mpegVersion + var ok bool + if version, ok = mpegVersionsById[getBits(11, 2)]; !ok { + return 0, errors.New("Invalid MPEG version index") + } + var layer mpegLayer + if layer, ok = mpegLayersByIndex[getBits(13, 2)]; !ok { + return 0, errors.New("Invalid MPEG layer index") + } + bitrate := mpegBitrates[version][layer][getBits(16, 4)] + if bitrate == 0 { + return 0, errors.New("Invalid MPEG bitrate") + } + samplingRate := mpegSamplingRates[version][getBits(20, 2)] + if samplingRate == 0 { + return 0, errors.New("Invalid MPEG sample rate") + } + samplesPerFrame := mpegSamplesPerFrame[version][layer] + + var xingHeaderStart int64 = 4 + // Skip "side information". + if getBits(24, 2) == 0x3 { // Channel mode; 0x3 is mono. + xingHeaderStart += 17 + } else { + xingHeaderStart += 32 + } + // Skip 16-bit CRC if present. + if getBits(15, 1) == 0x0 { // 0x0 means "has protection". + xingHeaderStart += 2 + } + + b := make([]byte, 12, 12) + if _, err := r.ReadAt(b, xingHeaderStart); err != nil { + return 0, fmt.Errorf("Unable to read Xing header at %d: %v", xingHeaderStart, err) + } + var ms int64 + if bytes.Equal(b[0:4], xingHeaderName) || bytes.Equal(b[0:4], infoHeaderName) { + r := bytes.NewReader(b[4:]) + var xingFlags uint32 + binary.Read(r, binary.BigEndian, &xingFlags) + if xingFlags&0x1 == 0x0 { + return 0, fmt.Errorf("Xing header at %d lacks number of frames", xingHeaderStart) + } + var numFrames uint32 + binary.Read(r, binary.BigEndian, &numFrames) + ms = int64(samplesPerFrame) * int64(numFrames) * 1000 / int64(samplingRate) + } else { + // Okay, no Xing VBR header. Assume that the file has a constant bitrate. + // (The other alternative is to read the whole file and examine each frame.) + ms = r.Size() / int64(bitrate) * 8 + } + return time.Duration(ms) * time.Millisecond, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/media/audio_test.go b/vendor/github.com/camlistore/camlistore/pkg/media/audio_test.go new file mode 100644 index 00000000..ea514400 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/media/audio_test.go @@ -0,0 +1,89 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package media + +import ( + "io" + "os" + "path/filepath" + "testing" + "time" +) + +// openFile opens fn, a file within the testdata dir, and returns an FD and the file's size. +func openFile(fn string) (*os.File, int64, error) { + f, err := os.Open(filepath.Join("testdata", fn)) + if err != nil { + return nil, 0, err + } + s, err := f.Stat() + if err != nil { + f.Close() + return nil, 0, err + } + return f, s.Size(), nil +} + +func TestHasID3v1Tag(t *testing.T) { + tests := []struct { + fn string + hasTag bool + }{ + {"xing_header.mp3", false}, + {"id3v1.mp3", true}, + } + for _, tt := range tests { + f, s, err := openFile(tt.fn) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + hasTag, err := HasID3v1Tag(io.NewSectionReader(f, 0, s)) + if err != nil { + t.Fatal(err) + } + if hasTag != tt.hasTag { + t.Errorf("Expected %v for %s but got %v", tt.hasTag, tt.fn, hasTag) + } + } +} + +func TestGetMPEGAudioDuration(t *testing.T) { + tests := []struct { + fn string + d time.Duration + }{ + {"128_cbr.mp3", time.Duration(1088) * time.Millisecond}, + {"xing_header.mp3", time.Duration(1097) * time.Millisecond}, + } + for _, tt := range tests { + f, s, err := openFile(tt.fn) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + d, err := GetMPEGAudioDuration(io.NewSectionReader(f, 0, s)) + if err != nil { + t.Fatal(err) + } + if d != tt.d { + t.Errorf("Expected %d for %s but got %d", tt.d, tt.fn, d) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/media/testdata/128_cbr.mp3 b/vendor/github.com/camlistore/camlistore/pkg/media/testdata/128_cbr.mp3 new file mode 100644 index 00000000..196f3148 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/media/testdata/128_cbr.mp3 differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/media/testdata/id3v1.mp3 b/vendor/github.com/camlistore/camlistore/pkg/media/testdata/id3v1.mp3 new file mode 100644 index 00000000..7cc9dddd Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/media/testdata/id3v1.mp3 differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/media/testdata/xing_header.mp3 b/vendor/github.com/camlistore/camlistore/pkg/media/testdata/xing_header.mp3 new file mode 100644 index 00000000..8dc443b3 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/media/testdata/xing_header.mp3 differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/misc/amazon/s3/auth.go b/vendor/github.com/camlistore/camlistore/pkg/misc/amazon/s3/auth.go new file mode 100644 index 00000000..d8c45e24 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/misc/amazon/s3/auth.go @@ -0,0 +1,206 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3 + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strings" + "time" +) + +// See http://docs.amazonwebservices.com/AmazonS3/latest/dev/index.html?RESTAuthentication.html + +type Auth struct { + AccessKey string + SecretAccessKey string + + // Hostname is the S3 hostname to use. + // If empty, the standard US region of "s3.amazonaws.com" is + // used. + Hostname string +} + +const standardUSRegionAWS = "s3.amazonaws.com" + +func (a *Auth) hostname() string { + if a.Hostname != "" { + return a.Hostname + } + return standardUSRegionAWS +} + +func (a *Auth) SignRequest(req *http.Request) { + if date := req.Header.Get("Date"); date == "" { + req.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat)) + } + hm := hmac.New(sha1.New, []byte(a.SecretAccessKey)) + ss := a.stringToSign(req) + // log.Printf("String to sign: %q (%x)", ss, ss) + io.WriteString(hm, ss) + + authHeader := new(bytes.Buffer) + fmt.Fprintf(authHeader, "AWS %s:", a.AccessKey) + encoder := base64.NewEncoder(base64.StdEncoding, authHeader) + encoder.Write(hm.Sum(nil)) + encoder.Close() + req.Header.Set("Authorization", authHeader.String()) +} + +func firstNonEmptyString(strs ...string) string { + for _, s := range strs { + if s != "" { + return s + } + } + return "" +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// Content-MD5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedAmzHeaders + +// CanonicalizedResource; +func (a *Auth) stringToSign(req *http.Request) string { + buf := new(bytes.Buffer) + buf.WriteString(req.Method) + buf.WriteByte('\n') + buf.WriteString(req.Header.Get("Content-MD5")) + buf.WriteByte('\n') + buf.WriteString(req.Header.Get("Content-Type")) + buf.WriteByte('\n') + if req.Header.Get("x-amz-date") == "" { + buf.WriteString(req.Header.Get("Date")) + } + buf.WriteByte('\n') + a.writeCanonicalizedAmzHeaders(buf, req) + a.writeCanonicalizedResource(buf, req) + return buf.String() +} + +func hasPrefixCaseInsensitive(s, pfx string) bool { + if len(pfx) > len(s) { + return false + } + shead := s[:len(pfx)] + if shead == pfx { + return true + } + shead = strings.ToLower(shead) + return shead == pfx || shead == strings.ToLower(pfx) +} + +func (a *Auth) writeCanonicalizedAmzHeaders(buf *bytes.Buffer, req *http.Request) { + amzHeaders := make([]string, 0) + vals := make(map[string][]string) + for k, vv := range req.Header { + if hasPrefixCaseInsensitive(k, "x-amz-") { + lk := strings.ToLower(k) + amzHeaders = append(amzHeaders, lk) + vals[lk] = vv + } + } + sort.Strings(amzHeaders) + for _, k := range amzHeaders { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + if strings.Contains(v, "\n") { + // TODO: "Unfold" long headers that + // span multiple lines (as allowed by + // RFC 2616, section 4.2) by replacing + // the folding white-space (including + // new-line) by a single space. + buf.WriteString(v) + } else { + buf.WriteString(v) + } + } + buf.WriteByte('\n') + } +} + +// Must be sorted: +var subResList = []string{"acl", "lifecycle", "location", "logging", "notification", "partNumber", "policy", "requestPayment", "torrent", "uploadId", "uploads", "versionId", "versioning", "versions", "website"} + +// From the Amazon docs: +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +func (a *Auth) writeCanonicalizedResource(buf *bytes.Buffer, req *http.Request) { + bucket := a.bucketFromHostname(req) + if bucket != "" { + buf.WriteByte('/') + buf.WriteString(bucket) + } + buf.WriteString(req.URL.Path) + if req.URL.RawQuery != "" { + n := 0 + vals, _ := url.ParseQuery(req.URL.RawQuery) + for _, subres := range subResList { + if vv, ok := vals[subres]; ok && len(vv) > 0 { + n++ + if n == 1 { + buf.WriteByte('?') + } else { + buf.WriteByte('&') + } + buf.WriteString(subres) + if len(vv[0]) > 0 { + buf.WriteByte('=') + buf.WriteString(url.QueryEscape(vv[0])) + } + } + } + } +} + +// hasDotSuffix reports whether s ends with "." + suffix. +func hasDotSuffix(s string, suffix string) bool { + return len(s) >= len(suffix)+1 && strings.HasSuffix(s, suffix) && s[len(s)-len(suffix)-1] == '.' +} + +func (a *Auth) bucketFromHostname(req *http.Request) string { + host := req.Host + if host == "" { + host = req.URL.Host + } + if host == a.hostname() { + return "" + } + if hostSuffix := a.hostname(); hasDotSuffix(host, hostSuffix) { + return host[:len(host)-len(hostSuffix)-1] + } + if lastColon := strings.LastIndex(host, ":"); lastColon != -1 { + return host[:lastColon] + } + return host +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/misc/amazon/s3/auth_test.go b/vendor/github.com/camlistore/camlistore/pkg/misc/amazon/s3/auth_test.go new file mode 100644 index 00000000..531667d1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/misc/amazon/s3/auth_test.go @@ -0,0 +1,139 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3 + +import ( + "bufio" + "fmt" + "net/http" + "strings" + "testing" +) + +type reqAndExpected struct { + req, expected string +} + +func req(s string) *http.Request { + req, err := http.ReadRequest(bufio.NewReader(strings.NewReader(s))) + if err != nil { + panic(fmt.Sprintf("bad request in test: %v (error: %v)", req, err)) + } + return req +} + +func TestStringToSign(t *testing.T) { + var a Auth + tests := []reqAndExpected{ + {`GET /photos/puppy.jpg HTTP/1.1 +Host: johnsmith.s3.amazonaws.com +Date: Tue, 27 Mar 2007 19:36:42 +0000 + +`, + "GET\n\n\nTue, 27 Mar 2007 19:36:42 +0000\n/johnsmith/photos/puppy.jpg"}, + {`PUT /photos/puppy.jpg HTTP/1.1 +Content-Type: image/jpeg +Content-Length: 94328 +Host: johnsmith.s3.amazonaws.com +Date: Tue, 27 Mar 2007 21:15:45 +0000 + +`, + "PUT\n\nimage/jpeg\nTue, 27 Mar 2007 21:15:45 +0000\n/johnsmith/photos/puppy.jpg"}, + {`GET /?prefix=photos&max-keys=50&marker=puppy HTTP/1.1 +User-Agent: Mozilla/5.0 +Host: johnsmith.s3.amazonaws.com +Date: Tue, 27 Mar 2007 19:42:41 +0000 + +`, + "GET\n\n\nTue, 27 Mar 2007 19:42:41 +0000\n/johnsmith/"}, + {`DELETE /johnsmith/photos/puppy.jpg HTTP/1.1 +User-Agent: dotnet +Host: s3.amazonaws.com +Date: Tue, 27 Mar 2007 21:20:27 +0000 +x-amz-date: Tue, 27 Mar 2007 21:20:26 +0000 + +`, + "DELETE\n\n\n\nx-amz-date:Tue, 27 Mar 2007 21:20:26 +0000\n/johnsmith/photos/puppy.jpg"}, + {`PUT /db-backup.dat.gz HTTP/1.1 +User-Agent: curl/7.15.5 +Host: static.johnsmith.net:8080 +Date: Tue, 27 Mar 2007 21:06:08 +0000 +x-amz-acl: public-read +content-type: application/x-download +Content-MD5: 4gJE4saaMU4BqNR0kLY+lw== +X-Amz-Meta-ReviewedBy: joe@johnsmith.net +X-Amz-Meta-ReviewedBy: jane@johnsmith.net +X-Amz-Meta-FileChecksum: 0x02661779 +X-Amz-Meta-ChecksumAlgorithm: crc32 +Content-Disposition: attachment; filename=database.dat +Content-Encoding: gzip +Content-Length: 5913339 + +`, + "PUT\n4gJE4saaMU4BqNR0kLY+lw==\napplication/x-download\nTue, 27 Mar 2007 21:06:08 +0000\nx-amz-acl:public-read\nx-amz-meta-checksumalgorithm:crc32\nx-amz-meta-filechecksum:0x02661779\nx-amz-meta-reviewedby:joe@johnsmith.net,jane@johnsmith.net\n/static.johnsmith.net/db-backup.dat.gz"}, + } + for idx, test := range tests { + got := a.stringToSign(req(test.req)) + if got != test.expected { + t.Errorf("test %d: expected %q", idx, test.expected) + t.Errorf("test %d: got %q", idx, got) + } + } +} + +func TestBucketFromHostname(t *testing.T) { + var a Auth + tests := []reqAndExpected{ + {"GET / HTTP/1.0\n\n", ""}, + {"GET / HTTP/1.0\nHost: s3.amazonaws.com\n\n", ""}, + {"GET / HTTP/1.0\nHost: foo.s3.amazonaws.com\n\n", "foo"}, + {"GET / HTTP/1.0\nHost: foo.com:123\n\n", "foo.com"}, + {"GET / HTTP/1.0\nHost: bar.com\n\n", "bar.com"}, + } + for idx, test := range tests { + got := a.bucketFromHostname(req(test.req)) + if got != test.expected { + t.Errorf("test %d: expected %q; got %q", idx, test.expected, got) + } + } +} + +func TestSignRequest(t *testing.T) { + r := req("GET /foo HTTP/1.1\n\n") + auth := &Auth{AccessKey: "key", SecretAccessKey: "secretkey"} + auth.SignRequest(r) + if r.Header.Get("Date") == "" { + t.Error("expected a Date set") + } + r.Header.Set("Date", "Sat, 02 Apr 2011 04:23:52 GMT") + auth.SignRequest(r) + if e, g := r.Header.Get("Authorization"), "AWS key:kHpCR/N7Rw3PwRlDd8+5X40CFVc="; e != g { + t.Errorf("got header %q; expected %q", g, e) + } +} + +func TestHasDotSuffix(t *testing.T) { + if !hasDotSuffix("foo.com", "com") { + t.Fail() + } + if hasDotSuffix("foocom", "com") { + t.Fail() + } + if hasDotSuffix("com", "com") { + t.Fail() + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/misc/amazon/s3/client.go b/vendor/github.com/camlistore/camlistore/pkg/misc/amazon/s3/client.go new file mode 100644 index 00000000..a7b44e95 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/misc/amazon/s3/client.go @@ -0,0 +1,445 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package s3 implements a generic Amazon S3 client, not specific +// to Camlistore. +package s3 + +import ( + "bytes" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "log" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/httputil" +) + +const maxList = 1000 + +// Client is an Amazon S3 client. +type Client struct { + *Auth + Transport http.RoundTripper // or nil for the default +} + +type Bucket struct { + Name string + CreationDate string // 2006-02-03T16:45:09.000Z +} + +func (c *Client) transport() http.RoundTripper { + if c.Transport != nil { + return c.Transport + } + return http.DefaultTransport +} + +// bucketURL returns the URL prefix of the bucket, with trailing slash +func (c *Client) bucketURL(bucket string) string { + if IsValidBucket(bucket) && !strings.Contains(bucket, ".") { + return fmt.Sprintf("https://%s.%s/", bucket, c.hostname()) + } + return fmt.Sprintf("https://%s/%s/", c.hostname(), bucket) +} + +func (c *Client) keyURL(bucket, key string) string { + return c.bucketURL(bucket) + key +} + +func newReq(url_ string) *http.Request { + req, err := http.NewRequest("GET", url_, nil) + if err != nil { + panic(fmt.Sprintf("s3 client; invalid URL: %v", err)) + } + req.Header.Set("User-Agent", "go-camlistore-s3") + return req +} + +func (c *Client) Buckets() ([]*Bucket, error) { + req := newReq("https://" + c.hostname() + "/") + c.Auth.SignRequest(req) + res, err := c.transport().RoundTrip(req) + if err != nil { + return nil, err + } + defer httputil.CloseBody(res.Body) + if res.StatusCode != http.StatusOK { + return nil, fmt.Errorf("s3: Unexpected status code %d fetching bucket list", res.StatusCode) + } + return parseListAllMyBuckets(res.Body) +} + +func parseListAllMyBuckets(r io.Reader) ([]*Bucket, error) { + type allMyBuckets struct { + Buckets struct { + Bucket []*Bucket + } + } + var res allMyBuckets + if err := xml.NewDecoder(r).Decode(&res); err != nil { + return nil, err + } + return res.Buckets.Bucket, nil +} + +// Returns 0, os.ErrNotExist if not on S3, otherwise reterr is real. +func (c *Client) Stat(key, bucket string) (size int64, reterr error) { + req := newReq(c.keyURL(bucket, key)) + req.Method = "HEAD" + c.Auth.SignRequest(req) + res, err := c.transport().RoundTrip(req) + if err != nil { + return 0, err + } + if res.Body != nil { + defer res.Body.Close() + } + switch res.StatusCode { + case http.StatusNotFound: + return 0, os.ErrNotExist + case http.StatusOK: + return strconv.ParseInt(res.Header.Get("Content-Length"), 10, 64) + } + return 0, fmt.Errorf("s3: Unexpected status code %d statting object %v", res.StatusCode, key) +} + +func (c *Client) PutObject(key, bucket string, md5 hash.Hash, size int64, body io.Reader) error { + req := newReq(c.keyURL(bucket, key)) + req.Method = "PUT" + req.ContentLength = size + if md5 != nil { + b64 := new(bytes.Buffer) + encoder := base64.NewEncoder(base64.StdEncoding, b64) + encoder.Write(md5.Sum(nil)) + encoder.Close() + req.Header.Set("Content-MD5", b64.String()) + } + c.Auth.SignRequest(req) + req.Body = ioutil.NopCloser(body) + + res, err := c.transport().RoundTrip(req) + if res != nil && res.Body != nil { + defer httputil.CloseBody(res.Body) + } + if err != nil { + return err + } + if res.StatusCode != http.StatusOK { + // res.Write(os.Stderr) + return fmt.Errorf("Got response code %d from s3", res.StatusCode) + } + return nil +} + +type Item struct { + Key string + Size int64 +} + +type listBucketResults struct { + Contents []*Item + IsTruncated bool + MaxKeys int + Name string // bucket name + Marker string +} + +// BucketLocation returns the S3 hostname to be used with the given bucket. +func (c *Client) BucketLocation(bucket string) (location string, err error) { + if !strings.HasSuffix(c.hostname(), "amazonaws.com") { + return "", errors.New("BucketLocation not implemented for non-Amazon S3 hostnames") + } + url_ := fmt.Sprintf("https://s3.amazonaws.com/%s/?location", url.QueryEscape(bucket)) + req := newReq(url_) + c.Auth.SignRequest(req) + res, err := c.transport().RoundTrip(req) + if err != nil { + return + } + var xres xmlLocationConstraint + if err := xml.NewDecoder(res.Body).Decode(&xres); err != nil { + return "", err + } + if xres.Location == "" { + return "s3.amazonaws.com", nil + } + return "s3-" + xres.Location + ".amazonaws.com", nil +} + +// ListBucket returns 0 to maxKeys (inclusive) items from the provided +// bucket. Keys before startAt will be skipped. (This is the S3 +// 'marker' value). If the length of the returned items is equal to +// maxKeys, there is no indication whether or not the returned list is +// truncated. +func (c *Client) ListBucket(bucket string, startAt string, maxKeys int) (items []*Item, err error) { + if maxKeys < 0 { + return nil, errors.New("invalid negative maxKeys") + } + marker := startAt + for len(items) < maxKeys { + fetchN := maxKeys - len(items) + if fetchN > maxList { + fetchN = maxList + } + var bres listBucketResults + + url_ := fmt.Sprintf("%s?marker=%s&max-keys=%d", + c.bucketURL(bucket), url.QueryEscape(marker), fetchN) + + // Try the enumerate three times, since Amazon likes to close + // https connections a lot, and Go sucks at dealing with it: + // https://code.google.com/p/go/issues/detail?id=3514 + const maxTries = 5 + for try := 1; try <= maxTries; try++ { + time.Sleep(time.Duration(try-1) * 100 * time.Millisecond) + req := newReq(url_) + c.Auth.SignRequest(req) + res, err := c.transport().RoundTrip(req) + if err != nil { + if try < maxTries { + continue + } + return nil, err + } + if res.StatusCode != http.StatusOK { + if res.StatusCode < 500 { + body, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) + aerr := &Error{ + Op: "ListBucket", + Code: res.StatusCode, + Body: body, + Header: res.Header, + } + aerr.parseXML() + res.Body.Close() + return nil, aerr + } + } else { + bres = listBucketResults{} + var logbuf bytes.Buffer + err = xml.NewDecoder(io.TeeReader(res.Body, &logbuf)).Decode(&bres) + if err != nil { + log.Printf("Error parsing s3 XML response: %v for %q", err, logbuf.Bytes()) + } else if bres.MaxKeys != fetchN || bres.Name != bucket || bres.Marker != marker { + err = fmt.Errorf("Unexpected parse from server: %#v from: %s", bres, logbuf.Bytes()) + log.Print(err) + } + } + httputil.CloseBody(res.Body) + if err != nil { + if try < maxTries-1 { + continue + } + log.Print(err) + return nil, err + } + break + } + for _, it := range bres.Contents { + if it.Key == marker && it.Key != startAt { + // Skip first dup on pages 2 and higher. + continue + } + if it.Key < startAt { + return nil, fmt.Errorf("Unexpected response from Amazon: item key %q but wanted greater than %q", it.Key, startAt) + } + items = append(items, it) + marker = it.Key + } + if !bres.IsTruncated { + // log.Printf("Not truncated. so breaking. items = %d; len Contents = %d, url = %s", len(items), len(bres.Contents), url_) + break + } + } + return items, nil +} + +func (c *Client) Get(bucket, key string) (body io.ReadCloser, size int64, err error) { + req := newReq(c.keyURL(bucket, key)) + c.Auth.SignRequest(req) + res, err := c.transport().RoundTrip(req) + if err != nil { + return + } + switch res.StatusCode { + case http.StatusOK: + return res.Body, res.ContentLength, nil + case http.StatusNotFound: + res.Body.Close() + return nil, 0, os.ErrNotExist + default: + res.Body.Close() + return nil, 0, fmt.Errorf("Amazon HTTP error on GET: %d", res.StatusCode) + } +} + +// GetPartial fetches part of the s3 key object in bucket. +// If length is negative, the rest of the object is returned. +// The caller must close rc. +func (c *Client) GetPartial(bucket, key string, offset, length int64) (rc io.ReadCloser, err error) { + if offset < 0 { + return nil, errors.New("invalid negative length") + } + + req := newReq(c.keyURL(bucket, key)) + if length >= 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) + } else { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } + c.Auth.SignRequest(req) + + res, err := c.transport().RoundTrip(req) + if err != nil { + return + } + switch res.StatusCode { + case http.StatusOK, http.StatusPartialContent: + return res.Body, nil + case http.StatusNotFound: + res.Body.Close() + return nil, os.ErrNotExist + case http.StatusRequestedRangeNotSatisfiable: + res.Body.Close() + return nil, blob.ErrOutOfRangeOffsetSubFetch + default: + res.Body.Close() + return nil, fmt.Errorf("Amazon HTTP error on GET: %d", res.StatusCode) + } +} + +func (c *Client) Delete(bucket, key string) error { + req := newReq(c.keyURL(bucket, key)) + req.Method = "DELETE" + c.Auth.SignRequest(req) + res, err := c.transport().RoundTrip(req) + if err != nil { + return err + } + if res != nil && res.Body != nil { + defer res.Body.Close() + } + if res.StatusCode == http.StatusNotFound || res.StatusCode == http.StatusNoContent || + res.StatusCode == http.StatusOK { + return nil + } + return fmt.Errorf("Amazon HTTP error on DELETE: %d", res.StatusCode) +} + +// IsValid reports whether bucket is a valid bucket name, per Amazon's naming restrictions. +// +// See http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html +func IsValidBucket(bucket string) bool { + l := len(bucket) + if l < 3 || l > 63 { + return false + } + + valid := false + prev := byte('.') + for i := 0; i < len(bucket); i++ { + c := bucket[i] + switch { + default: + return false + case 'a' <= c && c <= 'z': + valid = true + case '0' <= c && c <= '9': + // Is allowed, but bucketname can't be just numbers. + // Therefore, don't set valid to true + case c == '-': + if prev == '.' { + return false + } + case c == '.': + if prev == '.' || prev == '-' { + return false + } + } + prev = c + } + + if prev == '-' || prev == '.' { + return false + } + return valid +} + +// Error is the type returned by some API operations. +// +// TODO: it should be more/all of them. +type Error struct { + Op string + Code int // HTTP status code + Body []byte // response body + Header http.Header // response headers + + // UsedEndpoint and AmazonCode are the XML response's Endpoint and + // Code fields, respectively. + UseEndpoint string // if a temporary redirect (wrong hostname) + AmazonCode string +} + +func (e *Error) Error() string { + if bytes.Contains(e.Body, []byte("")) { + return fmt.Sprintf("s3.%s: status %d: %s", e.Op, e.Code, e.Body) + } + return fmt.Sprintf("s3.%s: status %d", e.Op, e.Code) +} + +func (e *Error) parseXML() { + var xe xmlError + _ = xml.NewDecoder(bytes.NewReader(e.Body)).Decode(&xe) + e.AmazonCode = xe.Code + if xe.Code == "TemporaryRedirect" { + e.UseEndpoint = xe.Endpoint + } + if xe.Code == "SignatureDoesNotMatch" { + want, _ := hex.DecodeString(strings.Replace(xe.StringToSignBytes, " ", "", -1)) + log.Printf("S3 SignatureDoesNotMatch. StringToSign should be %d bytes: %q (%x)", len(want), want, want) + } + +} + +// xmlError is the Error response from Amazon. +type xmlError struct { + XMLName xml.Name `xml:"Error"` + Code string + Message string + RequestId string + Bucket string + Endpoint string + StringToSignBytes string +} + +// xmlLocationConstraint is the LocationConstraint returned from BucketLocation. +type xmlLocationConstraint struct { + XMLName xml.Name `xml:"LocationConstraint"` + Location string `xml:",chardata"` +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/misc/amazon/s3/client_test.go b/vendor/github.com/camlistore/camlistore/pkg/misc/amazon/s3/client_test.go new file mode 100644 index 00000000..d8cbd4e5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/misc/amazon/s3/client_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3 + +import ( + "net/http" + "os" + "reflect" + "strings" + "testing" +) + +var tc *Client + +func getTestClient(t *testing.T) bool { + accessKey := os.Getenv("AWS_ACCESS_KEY_ID") + secret := os.Getenv("AWS_ACCESS_KEY_SECRET") + if accessKey == "" || secret == "" { + t.Logf("Skipping test; no AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY_SECRET set in environment") + return false + } + tc = &Client{&Auth{AccessKey: accessKey, SecretAccessKey: secret}, http.DefaultTransport} + return true +} + +func TestBuckets(t *testing.T) { + if !getTestClient(t) { + return + } + tc.Buckets() +} + +func TestParseBuckets(t *testing.T) { + res := "\nownerIDFieldbobDisplayNamebucketOne2006-06-21T07:04:31.000ZbucketTwo2006-06-21T07:04:32.000Z" + buckets, err := parseListAllMyBuckets(strings.NewReader(res)) + if err != nil { + t.Fatal(err) + } + if g, w := len(buckets), 2; g != w { + t.Errorf("num parsed buckets = %d; want %d", g, w) + } + want := []*Bucket{ + {Name: "bucketOne", CreationDate: "2006-06-21T07:04:31.000Z"}, + {Name: "bucketTwo", CreationDate: "2006-06-21T07:04:32.000Z"}, + } + dump := func(v []*Bucket) { + for i, b := range v { + t.Logf("Bucket #%d: %#v", i, b) + } + } + if !reflect.DeepEqual(buckets, want) { + t.Error("mismatch; GOT:") + dump(buckets) + t.Error("WANT:") + dump(want) + } +} + +func TestValidBucketNames(t *testing.T) { + m := []struct { + in string + want bool + }{ + {"myawsbucket", true}, + {"my.aws.bucket", true}, + {"my-aws-bucket.1", true}, + {"my---bucket.1", true}, + {".myawsbucket", false}, + {"-myawsbucket", false}, + {"myawsbucket.", false}, + {"myawsbucket-", false}, + {"my..awsbucket", false}, + } + + for _, bt := range m { + got := IsValidBucket(bt.in) + if got != bt.want { + t.Errorf("func(%q) = %v; want %v", bt.in, got, bt.want) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/misc/closure/genclosuredeps/genclosuredeps.go b/vendor/github.com/camlistore/camlistore/pkg/misc/closure/genclosuredeps/genclosuredeps.go new file mode 100644 index 00000000..7d23df49 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/misc/closure/genclosuredeps/genclosuredeps.go @@ -0,0 +1,51 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// The genclosuredeps command, similarly to the closure depswriter.py tool, +// outputs to os.Stdout for each .js file, which namespaces +// it provides, and the namespaces it requires, hence helping +// the closure library to resolve dependencies between those files. +package main + +import ( + "bytes" + "flag" + "fmt" + "io" + "log" + "net/http" + "os" + + "camlistore.org/pkg/misc/closure" +) + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: genclosuredeps \n") + os.Exit(1) +} + +func main() { + flag.Parse() + args := flag.Args() + if len(args) != 1 { + usage() + } + b, err := closure.GenDeps(http.Dir(args[0])) + if err != nil { + log.Fatal(err) + } + io.Copy(os.Stdout, bytes.NewReader(b)) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/misc/closure/gendeps.go b/vendor/github.com/camlistore/camlistore/pkg/misc/closure/gendeps.go new file mode 100644 index 00000000..69178c24 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/misc/closure/gendeps.go @@ -0,0 +1,224 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package closure provides tools to help with the use of the +// closure library. +// +// See https://code.google.com/p/closure-library/ +package closure + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "os" + "regexp" + "strings" + "sync" + "time" +) + +// GenDeps returns the namespace dependencies between the closure javascript files in root. It does not descend in directories. +// Each of the files listed in the output is prepended with the path "../../", which is assumed to be the location where these files can be found, relative to Closure's base.js. +// +// The format for each relevant javascript file is: +// goog.addDependency("filepath", ["namespace provided"], ["required namespace 1", "required namespace 2", ...]); +func GenDeps(root http.FileSystem) ([]byte, error) { + // In the typical configuration, Closure is served at 'closure/goog/...'' + return GenDepsWithPath("../../", root) +} + +// GenDepsWithPath is like GenDeps, but you can specify a path where the files are to be found at runtime relative to Closure's base.js. +func GenDepsWithPath(pathPrefix string, root http.FileSystem) ([]byte, error) { + d, err := root.Open("/") + if err != nil { + return nil, fmt.Errorf("Failed to open root of %v: %v", root, err) + } + fi, err := d.Stat() + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, fmt.Errorf("root of %v is not a dir", root) + } + ent, err := d.Readdir(-1) + if err != nil { + return nil, fmt.Errorf("Could not read dir entries of root: %v", err) + } + var buf bytes.Buffer + for _, info := range ent { + name := info.Name() + if !strings.HasSuffix(name, ".js") { + continue + } + if strings.HasPrefix(name, ".#") { + // Emacs noise. + continue + } + f, err := root.Open(name) + if err != nil { + return nil, fmt.Errorf("Could not open %v: %v", name, err) + } + prov, req, err := parseProvidesRequires(info, name, f) + f.Close() + if err != nil { + return nil, fmt.Errorf("Could not parse deps for %v: %v", name, err) + } + if len(prov) > 0 { + fmt.Fprintf(&buf, "goog.addDependency(%q, %v, %v);\n", pathPrefix+name, jsList(prov), jsList(req)) + } + } + return buf.Bytes(), nil +} + +var provReqRx = regexp.MustCompile(`^goog\.(provide|require)\(['"]([\w\.]+)['"]\)`) + +type depCacheItem struct { + modTime time.Time + provides, requires []string +} + +var ( + depCacheMu sync.Mutex + depCache = map[string]depCacheItem{} +) + +func parseProvidesRequires(fi os.FileInfo, path string, f io.Reader) (provides, requires []string, err error) { + mt := fi.ModTime() + depCacheMu.Lock() + defer depCacheMu.Unlock() + if ci := depCache[path]; ci.modTime.Equal(mt) { + return ci.provides, ci.requires, nil + } + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + l := scanner.Text() + if !strings.HasPrefix(l, "goog.") { + continue + } + m := provReqRx.FindStringSubmatch(l) + if m != nil { + if m[1] == "provide" { + provides = append(provides, m[2]) + } else { + requires = append(requires, m[2]) + } + } + } + if err := scanner.Err(); err != nil { + return nil, nil, err + } + depCache[path] = depCacheItem{provides: provides, requires: requires, modTime: mt} + return provides, requires, nil +} + +// jsList prints a list of strings as JavaScript list. +type jsList []string + +func (s jsList) String() string { + var buf bytes.Buffer + buf.WriteByte('[') + for i, v := range s { + if i > 0 { + buf.WriteString(", ") + } + fmt.Fprintf(&buf, "%q", v) + } + buf.WriteByte(']') + return buf.String() +} + +// Example of a match: +// goog.addDependency('asserts/asserts.js', ['goog.asserts', 'goog.asserts.AssertionError'], ['goog.debug.Error', 'goog.string']); +// So with m := depsRx.FindStringSubmatch, +// the provider: m[1] == "asserts/asserts.js" +// the provided namespaces: m[2] == "'goog.asserts', 'goog.asserts.AssertionError'" +// the required namespaces: m[5] == "'goog.debug.Error', 'goog.string'" +var depsRx = regexp.MustCompile(`^goog.addDependency\(['"]([^/]+[a-zA-Z0-9\-\_/\.]*\.js)['"], \[((['"][\w\.]+['"])+(, ['"][\w\.]+['"])*)\], \[((['"][\w\.]+['"])+(, ['"][\w\.]+['"])*)?\]\);`) + +// ParseDeps reads closure namespace dependency lines and +// returns a map giving the js file provider for each namespace, +// and a map giving the namespace dependencies for each namespace. +func ParseDeps(r io.Reader) (providedBy map[string]string, requires map[string][]string, err error) { + providedBy = make(map[string]string) + requires = make(map[string][]string) + scanner := bufio.NewScanner(r) + for scanner.Scan() { + l := scanner.Text() + if strings.HasPrefix(l, "//") { + continue + } + if l == "" { + continue + } + m := depsRx.FindStringSubmatch(l) + if m == nil { + return nil, nil, fmt.Errorf("Invalid line in deps: %q", l) + } + jsfile := m[1] + provides := strings.Split(m[2], ", ") + var required []string + if m[5] != "" { + required = strings.Split( + strings.Replace(strings.Replace(m[5], "'", "", -1), `"`, "", -1), ", ") + } + for _, v := range provides { + namespace := strings.Trim(v, `'"`) + if otherjs, ok := providedBy[namespace]; ok { + return nil, nil, fmt.Errorf("Name %v is provided by both %v and %v", namespace, jsfile, otherjs) + } + providedBy[namespace] = jsfile + if _, ok := requires[namespace]; ok { + return nil, nil, fmt.Errorf("Name %v has two sets of dependencies", namespace) + } + if required != nil { + requires[namespace] = required + } + } + } + if err := scanner.Err(); err != nil { + return nil, nil, err + } + return providedBy, requires, nil +} + +// DeepParseDeps reads closure namespace dependency lines and +// returns a map giving all the required js files for each namespace. +func DeepParseDeps(r io.Reader) (map[string][]string, error) { + providedBy, requires, err := ParseDeps(r) + if err != nil { + return nil, err + } + filesDeps := make(map[string][]string) + var deeperDeps func(namespace string) []string + deeperDeps = func(namespace string) []string { + if jsdeps, ok := filesDeps[namespace]; ok { + return jsdeps + } + jsfiles := []string{providedBy[namespace]} + for _, dep := range requires[namespace] { + jsfiles = append(jsfiles, deeperDeps(dep)...) + } + return jsfiles + } + for namespace, _ := range providedBy { + filesDeps[namespace] = deeperDeps(namespace) + } + return filesDeps, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/misc/closure/gendeps_test.go b/vendor/github.com/camlistore/camlistore/pkg/misc/closure/gendeps_test.go new file mode 100644 index 00000000..6326c84a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/misc/closure/gendeps_test.go @@ -0,0 +1,79 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package closure + +import ( + "reflect" + "strings" + "testing" +) + +var testdata = ` +goog.addDependency('asserts/asserts.js', ['goog.asserts', 'goog.asserts.AssertionError'], ['goog.debug.Error', 'goog.string']); +goog.addDependency('debug/error.js', ['goog.debug.Error'], []); +goog.addDependency('string/string.js', ['goog.string', 'goog.string.Unicode'], []); +` + +type parsedDeps struct { + providedBy map[string]string + requires map[string][]string +} + +var parsedWant = parsedDeps{ + providedBy: map[string]string{ + "goog.asserts": "asserts/asserts.js", + "goog.asserts.AssertionError": "asserts/asserts.js", + "goog.debug.Error": "debug/error.js", + "goog.string": "string/string.js", + "goog.string.Unicode": "string/string.js", + }, + requires: map[string][]string{ + "goog.asserts": []string{"goog.debug.Error", "goog.string"}, + "goog.asserts.AssertionError": []string{"goog.debug.Error", "goog.string"}, + }, +} + +var deepParsedWant = map[string][]string{ + "goog.asserts": []string{"asserts/asserts.js", "debug/error.js", "string/string.js"}, + "goog.asserts.AssertionError": []string{"asserts/asserts.js", "debug/error.js", "string/string.js"}, + "goog.debug.Error": []string{"debug/error.js"}, + "goog.string": []string{"string/string.js"}, + "goog.string.Unicode": []string{"string/string.js"}, +} + +func TestParseDeps(t *testing.T) { + providedBy, requires, err := ParseDeps(strings.NewReader(testdata)) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(parsedWant.providedBy, providedBy) { + t.Fatalf("Failed to parse closure deps: wanted %v, got %v", parsedWant.providedBy, providedBy) + } + if !reflect.DeepEqual(parsedWant.requires, requires) { + t.Fatalf("Failed to parse closure deps: wanted %v, got %v", parsedWant.requires, requires) + } +} + +func TestDeepParseDeps(t *testing.T) { + deps, err := DeepParseDeps(strings.NewReader(testdata)) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(deepParsedWant, deps) { + t.Fatalf("Failed to parse closure deps: wanted %v, got %v", deepParsedWant, deps) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/misc/closure/jstest/jstest.go b/vendor/github.com/camlistore/camlistore/pkg/misc/closure/jstest/jstest.go new file mode 100644 index 00000000..3716a313 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/misc/closure/jstest/jstest.go @@ -0,0 +1,133 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package jstest uses the Go testing package to test JavaScript code using Node and Mocha. +package jstest + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + + "camlistore.org/pkg/misc/closure" +) + +// checkSystemRequirements checks whether system dependencies such as node and npm are present. +func checkSystemRequirements() error { + binaries := []string{"mocha", "node", "npm"} + for _, b := range binaries { + if _, err := exec.LookPath(b); err != nil { + return fmt.Errorf("Required dependency %q not present", b) + } + } + + checkModules := func(globally bool) error { + args := []string{"list", "--depth=0"} + if globally { + args = append([]string{"-g"}, args...) + } + c := exec.Command("npm", args...) + b, _ := c.Output() + s := string(b) + modules := []string{"mocha", "assert"} + for _, m := range modules { + if !strings.Contains(s, fmt.Sprintf(" %s@", m)) { + return fmt.Errorf("Required npm module %v not present", m) + } + } + return nil + } + if err := checkModules(true); err != nil { + if err := checkModules(false); err != nil { + return err + } + } + return nil +} + +func getRepoRoot(target string) (string, error) { + dir, err := filepath.Abs(filepath.Dir(target)) + if err != nil { + return "", fmt.Errorf("Could not get working directory: %v", err) + } + for ; dir != "" && filepath.Base(dir) != "camlistore.org"; dir = filepath.Dir(dir) { + } + if dir == "" { + return "", fmt.Errorf("Could not find Camlistore repo in ancestors of %q", target) + } + return dir, nil +} + +// writeDeps runs closure.GenDeps() on targetDir and writes the resulting dependencies to a temporary file which will be used during the test run. The entries in the deps files are generated with paths relative to baseJS, which should be Closure's base.js file. +func writeDeps(baseJS, targetDir string) (string, error) { + closureBaseDir := filepath.Dir(baseJS) + depPrefix, err := filepath.Rel(closureBaseDir, targetDir) + if err != nil { + return "", fmt.Errorf("Could not compute relative path from %q to %q: %v", baseJS, targetDir, err) + } + + depPrefix += string(os.PathSeparator) + b, err := closure.GenDepsWithPath(depPrefix, http.Dir(targetDir)) + if err != nil { + return "", fmt.Errorf("GenDepsWithPath failed: %v", err) + } + depsFile, err := ioutil.TempFile("", "camlistore_closure_test_runner") + if err != nil { + return "", fmt.Errorf("Could not create temp js deps file: %v", err) + } + err = ioutil.WriteFile(depsFile.Name(), b, 0644) + if err != nil { + return "", fmt.Errorf("Could not write js deps file: %v", err) + } + return depsFile.Name(), nil +} + +// TestCwd runs all the tests in the current working directory. +func TestCwd(t *testing.T) { + err := checkSystemRequirements() + if err != nil { + t.Logf("WARNING: JavaScript unit tests could not be run due to a missing system dependency: %v.\nIf you are doing something that might affect JavaScript, you might want to fix this.", err) + t.Log(err) + t.Skip() + } + + path, err := os.Getwd() + if err != nil { + t.Fatalf("Could not determine current directory: %v.", err) + } + + repoRoot, err := getRepoRoot(path) + if err != nil { + t.Fatalf("Could not find repository root: %v", err) + } + baseJS := filepath.Join(repoRoot, "third_party", "closure", "lib", "closure", "goog", "base.js") + bootstrap := filepath.Join(filepath.Dir(baseJS), "bootstrap", "nodejs.js") + depsFile, err := writeDeps(baseJS, path) + if err != nil { + t.Fatal(err) + } + + c := exec.Command("mocha", "-r", bootstrap, "-r", depsFile, filepath.Join(path, "*test.js")) + b, err := c.CombinedOutput() + if err != nil { + t.Fatalf(string(b)) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/misc/gpgagent/gpgagent.go b/vendor/github.com/camlistore/camlistore/pkg/misc/gpgagent/gpgagent.go new file mode 100644 index 00000000..9c6ddfcf --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/misc/gpgagent/gpgagent.go @@ -0,0 +1,177 @@ +// +build !appengine + +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package gpgagent interacts with the local GPG Agent. +package gpgagent + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + + "io" + "net" + "net/url" + "os" + "strings" +) + +// Conn is a connection to the GPG agent. +type Conn struct { + c io.ReadWriteCloser + br *bufio.Reader +} + +var ( + ErrNoAgent = errors.New("GPG_AGENT_INFO not set in environment") + ErrNoData = errors.New("GPG_ERR_NO_DATA cache miss") + ErrCancel = errors.New("gpgagent: Cancel") +) + +// NewConn connects to the GPG Agent as described in the +// GPG_AGENT_INFO environment variable. +func NewConn() (*Conn, error) { + sp := strings.SplitN(os.Getenv("GPG_AGENT_INFO"), ":", 3) + if len(sp) == 0 || len(sp[0]) == 0 { + return nil, ErrNoAgent + } + addr := &net.UnixAddr{Net: "unix", Name: sp[0]} + uc, err := net.DialUnix("unix", nil, addr) + if err != nil { + return nil, err + } + br := bufio.NewReader(uc) + lineb, err := br.ReadSlice('\n') + if err != nil { + return nil, err + } + line := string(lineb) + if !strings.HasPrefix(line, "OK") { + return nil, fmt.Errorf("gpgagent: didn't get OK; got %q", line) + } + return &Conn{uc, br}, nil +} + +func (c *Conn) Close() error { + c.br = nil + return c.c.Close() +} + +// PassphraseRequest is a request to get a passphrase from the GPG +// Agent. +type PassphraseRequest struct { + CacheKey, Error, Prompt, Desc string + + // If the option --no-ask is used and the passphrase is not in + // the cache the user will not be asked to enter a passphrase + // but the error code GPG_ERR_NO_DATA is returned. (ErrNoData) + NoAsk bool +} + +func (c *Conn) RemoveFromCache(cacheKey string) error { + _, err := fmt.Fprintf(c.c, "CLEAR_PASSPHRASE %s\n", url.QueryEscape(cacheKey)) + if err != nil { + return err + } + lineb, err := c.br.ReadSlice('\n') + if err != nil { + return err + } + line := string(lineb) + if !strings.HasPrefix(line, "OK") { + return fmt.Errorf("gpgagent: CLEAR_PASSPHRASE returned %q", line) + } + return nil +} + +func (c *Conn) GetPassphrase(pr *PassphraseRequest) (passphrase string, outerr error) { + defer func() { + if e, ok := recover().(string); ok { + passphrase = "" + outerr = errors.New(e) + } + }() + set := func(cmd string, val string) { + if val == "" { + return + } + _, err := fmt.Fprintf(c.c, "%s %s\n", cmd, val) + if err != nil { + panic("gpgagent: failed to send " + cmd) + } + line, _, err := c.br.ReadLine() + if err != nil { + panic("gpgagent: failed to read " + cmd) + } + if !strings.HasPrefix(string(line), "OK") { + panic("gpgagent: response to " + cmd + " was " + string(line)) + } + } + if d := os.Getenv("DISPLAY"); d != "" { + set("OPTION", "display="+d) + } + tty, err := os.Readlink("/proc/self/fd/0") + if err == nil { + set("OPTION", "ttyname="+tty) + } + set("OPTION", "ttytype="+os.Getenv("TERM")) + opts := "" + if pr.NoAsk { + opts += "--no-ask " + } + + encOrX := func(s string) string { + if s == "" { + return "X" + } + return url.QueryEscape(s) + } + + _, err = fmt.Fprintf(c.c, "GET_PASSPHRASE %s%s %s %s %s\n", + opts, + url.QueryEscape(pr.CacheKey), + encOrX(pr.Error), + encOrX(pr.Prompt), + encOrX(pr.Desc)) + if err != nil { + return "", err + } + lineb, err := c.br.ReadSlice('\n') + if err != nil { + return "", err + } + line := string(lineb) + if strings.HasPrefix(line, "OK ") { + decb, err := hex.DecodeString(line[3 : len(line)-1]) + if err != nil { + return "", err + } + return string(decb), nil + } + fields := strings.Split(line, " ") + if len(fields) >= 2 && fields[0] == "ERR" { + switch fields[1] { + case "67108922": + return "", ErrNoData + case "83886179": + return "", ErrCancel + } + } + return "", errors.New(line) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/misc/gpgagent/gpgagent_test.go b/vendor/github.com/camlistore/camlistore/pkg/misc/gpgagent/gpgagent_test.go new file mode 100644 index 00000000..ae0b0c23 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/misc/gpgagent/gpgagent_test.go @@ -0,0 +1,81 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gpgagent + +import ( + "fmt" + "os" + "testing" + "time" +) + +func TestPrompt(t *testing.T) { + if os.Getenv("TEST_GPGAGENT_LIB") != "1" { + t.Logf("skipping TestPrompt without $TEST_GPGAGENT_LIB == 1") + return + } + conn, err := NewConn() + if err != nil { + t.Fatal(err) + } + defer conn.Close() + req := &PassphraseRequest{ + Desc: "Type 'foo' for testing", + Error: "seriously, or I'll be an error.", + Prompt: "foo", + CacheKey: fmt.Sprintf("gpgagent_test-cachekey-%d", time.Now()), + } + s1, err := conn.GetPassphrase(req) + if err != nil { + t.Fatal(err) + } + t1 := time.Now() + s2, err := conn.GetPassphrase(req) + if err != nil { + t.Fatal(err) + } + t2 := time.Now() + if td := t2.Sub(t1); td > 1e9/5 { + t.Errorf("cached passphrase took more than 1/5 second; took %d ns", td) + } + if s1 != s2 { + t.Errorf("cached passphrase differed; got %q, want %q", s2, s1) + } + if s1 != "foo" { + t.Errorf("got passphrase %q; want %q", s1, "foo") + } + err = conn.RemoveFromCache(req.CacheKey) + if err != nil { + t.Fatal(err) + } + + req.NoAsk = true + s3, err := conn.GetPassphrase(req) + if err != ErrNoData { + t.Errorf("after remove from cache, expected gpgagent.ErrNoData, got %q, %v", s3, err) + } + + s4, err := conn.GetPassphrase(&PassphraseRequest{ + Desc: "Press Cancel for testing", + Error: "seriously, or I'll be an error.", + Prompt: "cancel!", + CacheKey: fmt.Sprintf("gpgagent_test-cachekey-%d", time.Now()), + }) + if err != ErrCancel { + t.Errorf("expected cancel, got %q, %v", s4, err) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/misc/pinentry/pinentry.go b/vendor/github.com/camlistore/camlistore/pkg/misc/pinentry/pinentry.go new file mode 100644 index 00000000..1e8241a4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/misc/pinentry/pinentry.go @@ -0,0 +1,147 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package pinentry interfaces with the pinentry(1) command to securely +// prompt the user for a password using whichever user interface the +// user is currently using. +package pinentry + +import ( + "bufio" + "errors" + "fmt" + "os" + "os/exec" + "strings" +) + +// ErrCancel is returned when the user explicitly aborts the password +// request. +var ErrCancel = errors.New("pinentry: Cancel") + +// Request describes what the user should see during the request for +// their password. +type Request struct { + Desc, Prompt, OK, Cancel, Error string +} + +func catch(err *error) { + rerr := recover() + if rerr == nil { + return + } + if e, ok := rerr.(string); ok { + *err = errors.New(e) + } + if e, ok := rerr.(error); ok { + *err = e + } +} + +func check(err error) { + if err != nil { + panic(err) + } +} + +func (r *Request) GetPIN() (pin string, outerr error) { + defer catch(&outerr) + bin, err := exec.LookPath("pinentry") + if err != nil { + return r.getPINNaïve() + } + cmd := exec.Command(bin) + stdin, _ := cmd.StdinPipe() + stdout, _ := cmd.StdoutPipe() + check(cmd.Start()) + defer cmd.Wait() + defer stdin.Close() + br := bufio.NewReader(stdout) + lineb, _, err := br.ReadLine() + if err != nil { + return "", fmt.Errorf("Failed to get getpin greeting") + } + line := string(lineb) + if !strings.HasPrefix(line, "OK") { + return "", fmt.Errorf("getpin greeting said %q", line) + } + set := func(cmd string, val string) { + if val == "" { + return + } + fmt.Fprintf(stdin, "%s %s\n", cmd, val) + line, _, err := br.ReadLine() + if err != nil { + panic("Failed to " + cmd) + } + if string(line) != "OK" { + panic("Response to " + cmd + " was " + string(line)) + } + } + set("SETPROMPT", r.Prompt) + set("SETDESC", r.Desc) + set("SETOK", r.OK) + set("SETCANCEL", r.Cancel) + set("SETERROR", r.Error) + set("OPTION", "ttytype="+os.Getenv("TERM")) + tty, err := os.Readlink("/proc/self/fd/0") + if err == nil { + set("OPTION", "ttyname="+tty) + } + fmt.Fprintf(stdin, "GETPIN\n") + lineb, _, err = br.ReadLine() + if err != nil { + return "", fmt.Errorf("Failed to read line after GETPIN: %v", err) + } + line = string(lineb) + if strings.HasPrefix(line, "D ") { + return line[2:], nil + } + if strings.HasPrefix(line, "ERR 83886179 ") { + return "", ErrCancel + } + return "", fmt.Errorf("GETPIN response didn't start with D; got %q", line) +} + +func runPass(bin string, args ...string) { + cmd := exec.Command(bin, args...) + cmd.Stdout = os.Stdout + cmd.Run() +} + +func (r *Request) getPINNaïve() (string, error) { + stty, err := exec.LookPath("stty") + if err != nil { + return "", errors.New("no pinentry or stty found") + } + runPass(stty, "-echo") + defer runPass(stty, "echo") + + if r.Desc != "" { + fmt.Printf("%s\n\n", r.Desc) + } + prompt := r.Prompt + if prompt == "" { + prompt = "Password" + } + fmt.Printf("%s: ", prompt) + br := bufio.NewReader(os.Stdin) + line, _, err := br.ReadLine() + if err != nil { + return "", err + } + return string(line), nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/netutil/ident.go b/vendor/github.com/camlistore/camlistore/pkg/netutil/ident.go new file mode 100644 index 00000000..ba3f5786 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/netutil/ident.go @@ -0,0 +1,275 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package netutil identifies the system userid responsible for +// localhost TCP connections. +package netutil + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "os" + "os/exec" + "os/user" + "regexp" + "runtime" + "strconv" + "strings" +) + +var ( + ErrNotFound = errors.New("netutil: connection not found") + ErrUnsupportedOS = errors.New("netutil: not implemented on this operating system") +) + +// ConnUserid returns the uid that owns the given localhost connection. +// The returned error is ErrNotFound if the connection wasn't found. +func ConnUserid(conn net.Conn) (uid int, err error) { + return AddrPairUserid(conn.LocalAddr(), conn.RemoteAddr()) +} + +// HostPortToIP parses a host:port to a TCPAddr without resolving names. +// If given a context IP, it will resolve localhost to match the context's IP family. +func HostPortToIP(hostport string, ctx *net.TCPAddr) (hostaddr *net.TCPAddr, err error) { + host, port, err := net.SplitHostPort(hostport) + if err != nil { + return nil, err + } + iport, err := strconv.Atoi(port) + if err != nil || iport < 0 || iport > 0xFFFF { + return nil, fmt.Errorf("invalid port %d", iport) + } + var addr net.IP + if ctx != nil && host == "localhost" { + if ctx.IP.To4() != nil { + addr = net.IPv4(127, 0, 0, 1) + } else { + addr = net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1} + } + } else if addr = net.ParseIP(host); addr == nil { + return nil, fmt.Errorf("could not parse IP %s", host) + } + + return &net.TCPAddr{IP: addr, Port: iport}, nil +} + +// AddrPairUserid returns the local userid who owns the TCP connection +// given by the local and remote ip:port (lipport and ripport, +// respectively). Returns ErrNotFound for the error if the TCP connection +// isn't found. +func AddrPairUserid(local, remote net.Addr) (uid int, err error) { + lAddr, lOk := local.(*net.TCPAddr) + rAddr, rOk := remote.(*net.TCPAddr) + if !(lOk && rOk) { + return -1, fmt.Errorf("netutil: Could not convert Addr to TCPAddr.") + } + + localv4 := (lAddr.IP.To4() != nil) + remotev4 := (rAddr.IP.To4() != nil) + if localv4 != remotev4 { + return -1, fmt.Errorf("netutil: address pairs of different families; localv4=%v, remotev4=%v", + localv4, remotev4) + } + + switch runtime.GOOS { + case "darwin": + return uidFromLsof(lAddr.IP, lAddr.Port, rAddr.IP, rAddr.Port) + case "freebsd": + return uidFromSockstat(lAddr.IP, lAddr.Port, rAddr.IP, rAddr.Port) + case "linux": + file := "/proc/net/tcp" + if !localv4 { + file = "/proc/net/tcp6" + } + f, err := os.Open(file) + if err != nil { + return -1, fmt.Errorf("Error opening %s: %v", file, err) + } + defer f.Close() + return uidFromProcReader(lAddr.IP, lAddr.Port, rAddr.IP, rAddr.Port, f) + } + return 0, ErrUnsupportedOS +} + +func toLinuxIPv4Order(b []byte) []byte { + binary.BigEndian.PutUint32(b, binary.LittleEndian.Uint32(b)) + return b +} + +func toLinuxIPv6Order(b []byte) []byte { + for i := 0; i < 16; i += 4 { + sb := b[i : i+4] + binary.BigEndian.PutUint32(sb, binary.LittleEndian.Uint32(sb)) + } + return b +} + +type maybeBrackets net.IP + +func (p maybeBrackets) String() string { + s := net.IP(p).String() + if strings.Contains(s, ":") { + return "[" + s + "]" + } + return s +} + +// Changed by tests. +var uidFromUsername = uidFromUsernameFn + +func uidFromUsernameFn(username string) (uid int, err error) { + if uid := os.Getuid(); uid != 0 && username == os.Getenv("USER") { + return uid, nil + } + u, err := user.Lookup(username) + if err == nil { + uid, err := strconv.Atoi(u.Uid) + return uid, err + } + return 0, err +} + +func uidFromLsof(lip net.IP, lport int, rip net.IP, rport int) (uid int, err error) { + seek := fmt.Sprintf("%s:%d->%s:%d", maybeBrackets(lip), lport, maybeBrackets(rip), rport) + seekb := []byte(seek) + if _, err = exec.LookPath("lsof"); err != nil { + return + } + cmd := exec.Command("lsof", + "-b", // avoid system calls that could block + "-w", // and don't warn about cases where -b fails + "-n", // don't resolve network names + "-P", // don't resolve network ports, + // TODO(bradfitz): pass down the uid we care about, then do: ? + //"-a", // AND the following together: + // "-u", strconv.Itoa(uid) // just this uid + "-itcp") // we only care about TCP connections + stdout, err := cmd.StdoutPipe() + if err != nil { + return + } + defer cmd.Wait() + defer stdout.Close() + err = cmd.Start() + if err != nil { + return + } + defer cmd.Process.Kill() + br := bufio.NewReader(stdout) + for { + line, err := br.ReadSlice('\n') + if err == io.EOF { + break + } + if err != nil { + return -1, err + } + if !bytes.Contains(line, seekb) { + continue + } + // SystemUIS 276 bradfitz 15u IPv4 0xffffff801a7c74e0 0t0 TCP 127.0.0.1:56718->127.0.0.1:5204 (ESTABLISHED) + f := bytes.Fields(line) + if len(f) < 8 { + continue + } + username := string(f[2]) + return uidFromUsername(username) + } + return -1, ErrNotFound + +} + +func uidFromSockstat(lip net.IP, lport int, rip net.IP, rport int) (int, error) { + cmd := exec.Command("sockstat", "-Ptcp") + stdout, err := cmd.StdoutPipe() + if err != nil { + return -1, err + } + defer cmd.Wait() + defer stdout.Close() + err = cmd.Start() + if err != nil { + return -1, err + } + defer cmd.Process.Kill() + + return uidFromSockstatReader(lip, lport, rip, rport, stdout) +} + +func uidFromSockstatReader(lip net.IP, lport int, rip net.IP, rport int, r io.Reader) (int, error) { + pat, err := regexp.Compile(fmt.Sprintf(`^([^ ]+).*%s:%d *%s:%d$`, + lip.String(), lport, rip.String(), rport)) + if err != nil { + return -1, err + } + scanner := bufio.NewScanner(r) + for scanner.Scan() { + l := scanner.Text() + m := pat.FindStringSubmatch(l) + if len(m) == 2 { + return uidFromUsername(m[1]) + } + } + + if err := scanner.Err(); err != nil { + return -1, err + } + + return -1, ErrNotFound +} + +func uidFromProcReader(lip net.IP, lport int, rip net.IP, rport int, r io.Reader) (uid int, err error) { + buf := bufio.NewReader(r) + + localHex := "" + remoteHex := "" + ipv4 := lip.To4() != nil + if ipv4 { + // In the kernel, the port is run through ntohs(), and + // the inet_request_socket in + // include/net/inet_socket.h says the "loc_addr" and + // "rmt_addr" fields are __be32, but get_openreq4's + // printf of them is raw, without byte order + // converstion. + localHex = fmt.Sprintf("%08X:%04X", toLinuxIPv4Order([]byte(lip.To4())), lport) + remoteHex = fmt.Sprintf("%08X:%04X", toLinuxIPv4Order([]byte(rip.To4())), rport) + } else { + localHex = fmt.Sprintf("%032X:%04X", toLinuxIPv6Order([]byte(lip.To16())), lport) + remoteHex = fmt.Sprintf("%032X:%04X", toLinuxIPv6Order([]byte(rip.To16())), rport) + } + + for { + line, err := buf.ReadString('\n') + if err != nil { + return -1, ErrNotFound + } + parts := strings.Fields(strings.TrimSpace(line)) + if len(parts) < 8 { + continue + } + // log.Printf("parts[1] = %q; localHex = %q", parts[1], localHex) + if parts[1] == localHex && parts[2] == remoteHex { + uid, err = strconv.Atoi(parts[7]) + return uid, err + } + } + panic("unreachable") +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/netutil/ident_test.go b/vendor/github.com/camlistore/camlistore/pkg/netutil/ident_test.go new file mode 100644 index 00000000..a28f210b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/netutil/ident_test.go @@ -0,0 +1,248 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package netutil + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "runtime" + "strings" + "testing" + "time" +) + +func TestLocalIPv4(t *testing.T) { + // Start listening on localhost IPv4, on some port. + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + testLocalListener(t, ln) +} + +func TestLocalIPv6(t *testing.T) { + ln, err := net.Listen("tcp", "[::1]:0") + if err != nil { + t.Logf("skipping IPv6 test; not supported on host machine?") + return + } + testLocalListener(t, ln) +} + +func testLocalListener(t *testing.T, ln net.Listener) { + defer ln.Close() + + // Accept a connection, run ConnUserId (what we're testing), and + // send its result on c. + type uidErr struct { + uid int + err error + } + c := make(chan uidErr, 2) + go func() { + conn, err := ln.Accept() + if err != nil { + c <- uidErr{0, err} + } + uid, err := ConnUserid(conn) + c <- uidErr{uid, err} + }() + + // Connect to our dummy server. Keep the connection open until + // the test is done. + donec := make(chan bool) + defer close(donec) + go func() { + c, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + return + } + <-donec + c.Close() + }() + + select { + case r := <-c: + if r.err != nil { + if r.err == ErrUnsupportedOS { + t.Skipf("Skipping test; not implemented on " + runtime.GOOS) + } + t.Fatal(r.err) + } + if r.uid != os.Getuid() { + t.Errorf("got uid %d; want %d", r.uid, os.Getuid()) + } + case <-time.After(3 * time.Second): + t.Fatal("timeout") + } +} + +func TestHTTPAuth(t *testing.T) { + var ts *httptest.Server + ts = httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + from, err := HostPortToIP(r.RemoteAddr, nil) + if err != nil { + t.Fatal(err) + } + to := ts.Listener.Addr() + uid, err := AddrPairUserid(from, to) + if err != nil { + fmt.Fprintf(rw, "ERR: %v", err) + return + } + fmt.Fprintf(rw, "uid=%d", uid) + })) + defer ts.Close() + res, err := http.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if g, e := string(body), fmt.Sprintf("uid=%d", os.Getuid()); g != e { + if g == "ERR: "+ErrUnsupportedOS.Error() { + t.Skipf("Skipping test; not implemented on " + runtime.GOOS) + } + t.Errorf("got body %q; want %q", g, e) + } +} + +func testUidFromUsername(username string) (int, error) { + switch username { + case "really-long-user": + return 1000, nil + case "root": + return 0, nil + } + panic("Unhandled username specified in test") +} + +func TestParseFreeBSDSockstat(t *testing.T) { + uidFromUsername = testUidFromUsername + pairs := []struct { + uid int + lip, rip net.IP + lport, rport int + }{ + { + // "really-long-user" + uid: 1000, + lip: net.ParseIP("192.168.123.5"), lport: 8000, + rip: net.ParseIP("192.168.123.21"), rport: 49826, + }, + { + // "really-long-user" + uid: 1000, + lip: net.ParseIP("192.168.123.5"), lport: 9000, + rip: net.ParseIP("192.168.123.21"), rport: 49866, + }, + { + // "root" + uid: 0, + lip: net.ParseIP("192.168.123.5"), lport: 22, + rip: net.ParseIP("192.168.123.21"), rport: 49747, + }, + } + + for _, p := range pairs { + uid, err := uidFromSockstatReader(p.lip, p.lport, p.rip, p.rport, strings.NewReader(sockstatPtcp)) + if err != nil { + t.Error(err) + } + + if p.uid != uid { + t.Error("Got", uid, "want", p.uid) + } + } +} + +func TestParseLinuxTCPStat4(t *testing.T) { + lip, lport := net.ParseIP("67.218.110.129"), 43436 + rip, rport := net.ParseIP("207.7.148.195"), 80 + + // 816EDA43:A9AC C39407CF:0050 + // 43436 80 + uid, err := uidFromProcReader(lip, lport, rip, rport, strings.NewReader(tcpstat4)) + if err != nil { + t.Error(err) + } + if e, g := 61652, uid; e != g { + t.Errorf("expected uid %d, got %d", e, g) + } +} + +var tcpstat4 = ` sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode +0: 0100007F:C204 00000000:0000 0A 00000000:00000000 00:00000000 00000000 61652 0 8722922 1 ffff880036b36180 300 0 0 2 -1 +1: 0100007F:0CEA 00000000:0000 0A 00000000:00000000 00:00000000 00000000 120 0 5714729 1 ffff880036b35480 300 0 0 2 -1 +2: 0100007F:2BCB 00000000:0000 0A 00000000:00000000 00:00000000 00000000 65534 0 7381 1 ffff880136370000 300 0 0 2 -1 +3: 0100007F:13AD 00000000:0000 0A 00000000:00000000 00:00000000 00000000 61652 0 4846349 1 ffff880123eb5480 300 0 0 2 -1 +4: 00000000:0050 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 8307 1 ffff880123eb0d00 300 0 0 2 -1 +5: 00000000:0071 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 8558503 1 ffff88001a242080 300 0 0 2 -1 6: 0100007F:7533 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 8686 1 ffff880136371380 300 0 0 2 -1 +7: 017AA8C0:0035 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 6015 1 ffff880123eb0680 300 0 0 2 -1 +8: 0100007F:0277 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 8705543 1 ffff88001a242d80 300 0 0 2 -1 +9: 816EDA43:D4DC 35E07D4A:01BB 01 00000000:00000000 02:00000E25 00000000 61652 0 8720744 2 ffff88001a243a80 346 4 24 3 2 +10: 0100007F:C204 0100007F:D981 01 00000000:00000000 00:00000000 00000000 61652 0 8722934 1 ffff88006712a700 21 4 30 5 -1 +11: 816EDA43:A9AC C39407CF:0050 01 00000000:00000000 00:00000000 00000000 61652 0 8754873 1 ffff88006712db00 27 0 0 3 -1 +12: 816EDA43:AFEF 51357D4A:01BB 01 00000000:00000000 02:00000685 00000000 61652 0 8752937 2 ffff880136375480 87 4 2 4 -1 +13: 0100007F:D981 0100007F:C204 01 00000000:00000000 00:00000000 00000000 61652 0 8722933 1 ffff880036b30d00 21 4 0 3 -1 +` + +// Output of 'sockstat -Ptcp'. User 'really-long-user' running two instances +// of nc copied to 'really-only-process-name' and 'spc in name' run with -l +// 8000 and -l 9000 respectively. Two connections were then open from +// 192.167.123.21 using 'nc 192.168.123.5 8000' and 'nc 192.168.123.5 9000'. +var sockstatPtcp = ` +sockstat -Ptcp +USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS +really-long-user spc in nam63210 3 tcp4 *:9000 *:* +really-long-user spc in nam63210 4 tcp4 192.168.123.5:9000192.168.123.21:49866 +www nginx 62982 7 tcp4 *:80 *:* +www nginx 62982 8 tcp6 *:80 *:* +really-long-user really-lon62928 3 tcp4 *:8000 *:* +really-long-user really-lon62928 4 tcp4 192.168.123.5:8000192.168.123.21:49826 +root sshd 62849 5 tcp4 192.168.123.5:22 192.168.123.21:49819 +root sshd 61819 5 tcp4 192.168.123.5:22 192.168.123.21:49747 +camlistore sshd 61746 5 tcp4 192.168.123.5:22 192.168.123.21:49739 +root sshd 61744 5 tcp4 192.168.123.5:22 192.168.123.21:49739 +camlistore camlistore10941 7 tcp4 6 *:3179 *:* +camlistore sshd 91620 5 tcp4 192.168.123.5:22 192.168.123.2:13404 +root sshd 91618 5 tcp4 192.168.123.5:22 192.168.123.2:13404 +root sshd 2309 4 tcp6 *:22 *:* +root sshd 2309 5 tcp4 *:22 *:* +root nginx 2152 7 tcp4 *:80 *:* +root nginx 2152 8 tcp6 *:80 *:* +root python2.7 2076 3 tcp4 127.0.0.1:9042 *:* +root python2.7 2076 6 tcp4 127.0.0.1:9042 127.0.0.1:51930 +root python2.7 2076 7 tcp4 127.0.0.1:9042 127.0.0.1:20433 +root python2.7 2076 8 tcp4 127.0.0.1:9042 127.0.0.1:55807 +root rpc.statd 1630 5 tcp6 *:664 *:* +root rpc.statd 1630 7 tcp4 *:664 *:* +root nfsd 1618 5 tcp4 *:2049 *:* +root nfsd 1618 6 tcp6 *:2049 *:* +root mountd 1604 6 tcp6 *:792 *:* +root mountd 1604 8 tcp4 *:792 *:* +root rpcbind 1600 8 tcp6 *:111 *:* +root rpcbind 1600 11 tcp4 *:111 *:* +? ? ? ? tcp4 *:895 *:* +? ? ? ? tcp6 *:777 *:* +` diff --git a/vendor/github.com/camlistore/camlistore/pkg/netutil/netutil.go b/vendor/github.com/camlistore/camlistore/pkg/netutil/netutil.go new file mode 100644 index 00000000..b7bfb2cc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/netutil/netutil.go @@ -0,0 +1,123 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package netutil + +import ( + "errors" + "fmt" + "net" + "net/url" + "strings" + "time" +) + +// AwaitReachable tries to make a TCP connection to addr regularly. +// It returns an error if it's unable to make a connection before maxWait. +func AwaitReachable(addr string, maxWait time.Duration) error { + done := time.Now().Add(maxWait) + for time.Now().Before(done) { + c, err := net.Dial("tcp", addr) + if err == nil { + c.Close() + return nil + } + time.Sleep(100 * time.Millisecond) + } + return fmt.Errorf("%v unreachable for %v", addr, maxWait) +} + +// HostPort takes a urlStr string URL, and returns a host:port string suitable +// to passing to net.Dial, with the port set as the scheme's default port if +// absent. +func HostPort(urlStr string) (string, error) { + u, err := url.Parse(urlStr) + if err != nil { + return "", fmt.Errorf("could not parse %q as a url: %v", urlStr, err) + } + if u.Scheme == "" { + return "", fmt.Errorf("url %q has no scheme", urlStr) + } + hostPort := u.Host + if hostPort == "" || strings.HasPrefix(hostPort, ":") { + return "", fmt.Errorf("url %q has no host", urlStr) + } + idx := strings.Index(hostPort, "]") + if idx == -1 { + idx = 0 + } + if !strings.Contains(hostPort[idx:], ":") { + if u.Scheme == "https" { + hostPort += ":443" + } else { + hostPort += ":80" + } + } + return hostPort, nil +} + +// ListenOnLocalRandomPort returns a TCP listener on a random +// localhost port. +func ListenOnLocalRandomPort() (net.Listener, error) { + ip, err := Localhost() + if err != nil { + return nil, err + } + return net.ListenTCP("tcp", &net.TCPAddr{IP: ip, Port: 0}) +} + +// Localhost returns the first address found when +// doing a lookup of "localhost". If not successful, +// it looks for an ip on the loopback interfaces. +func Localhost() (net.IP, error) { + if ip := localhostLookup(); ip != nil { + return ip, nil + } + if ip := loopbackIP(); ip != nil { + return ip, nil + } + return nil, errors.New("No loopback ip found.") +} + +// localhostLookup looks for a loopback IP by resolving localhost. +func localhostLookup() net.IP { + if ips, err := net.LookupIP("localhost"); err == nil && len(ips) > 0 { + return ips[0] + } + return nil +} + +// loopbackIP returns the first loopback IP address sniffing network +// interfaces or nil if none is found. +func loopbackIP() net.IP { + interfaces, err := net.Interfaces() + if err != nil { + return nil + } + for _, inf := range interfaces { + const flagUpLoopback = net.FlagUp | net.FlagLoopback + if inf.Flags&flagUpLoopback == flagUpLoopback { + addrs, _ := inf.Addrs() + for _, addr := range addrs { + ip, _, err := net.ParseCIDR(addr.String()) + if err == nil && ip.IsLoopback() { + return ip + } + } + } + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/netutil/netutil_test.go b/vendor/github.com/camlistore/camlistore/pkg/netutil/netutil_test.go new file mode 100644 index 00000000..644d7eb6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/netutil/netutil_test.go @@ -0,0 +1,181 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package netutil + +import ( + "net" + "strconv" + "testing" +) + +func TestHostPort(t *testing.T) { + tests := []struct { + baseURL string + wantNetAddr string + }{ + // IPv4, no prefix + { + baseURL: "http://foo.com/", + wantNetAddr: "foo.com:80", + }, + + { + baseURL: "https://foo.com/", + wantNetAddr: "foo.com:443", + }, + + { + baseURL: "http://foo.com:8080/", + wantNetAddr: "foo.com:8080", + }, + + { + baseURL: "https://foo.com:8080/", + wantNetAddr: "foo.com:8080", + }, + + // IPv4, with prefix + { + baseURL: "http://foo.com/pics/", + wantNetAddr: "foo.com:80", + }, + + { + baseURL: "https://foo.com/pics/", + wantNetAddr: "foo.com:443", + }, + + { + baseURL: "http://foo.com:8080/pics/", + wantNetAddr: "foo.com:8080", + }, + + { + baseURL: "https://foo.com:8080/pics/", + wantNetAddr: "foo.com:8080", + }, + + // IPv6, no prefix + { + baseURL: "http://[::1]/", + wantNetAddr: "[::1]:80", + }, + + { + baseURL: "https://[::1]/", + wantNetAddr: "[::1]:443", + }, + + { + baseURL: "http://[::1]:8080/", + wantNetAddr: "[::1]:8080", + }, + + { + baseURL: "https://[::1]:8080/", + wantNetAddr: "[::1]:8080", + }, + + // IPv6, with prefix + { + baseURL: "http://[::1]/pics/", + wantNetAddr: "[::1]:80", + }, + + { + baseURL: "https://[::1]/pics/", + wantNetAddr: "[::1]:443", + }, + + { + baseURL: "http://[::1]:8080/pics/", + wantNetAddr: "[::1]:8080", + }, + + { + baseURL: "https://[::1]:8080/pics/", + wantNetAddr: "[::1]:8080", + }, + } + for _, v := range tests { + got, err := HostPort(v.baseURL) + if err != nil { + t.Error(err) + continue + } + if got != v.wantNetAddr { + t.Errorf("got: %v for %v, want: %v", got, v.baseURL, v.wantNetAddr) + } + } +} + +func testLocalhostResolver(t *testing.T, resolve func() net.IP) { + ip := resolve() + if ip == nil { + t.Fatal("no ip found.") + } + if !ip.IsLoopback() { + t.Errorf("expected a loopback address: %s", ip) + } +} + +func testLocalhost(t *testing.T) { + testLocalhostResolver(t, localhostLookup) +} + +func testLoopbackIp(t *testing.T) { + testLocalhostResolver(t, loopbackIP) +} + +func TestLocalhost(t *testing.T) { + _, err := Localhost() + if err != nil { + t.Fatal(err) + } +} + +func TestListenOnLocalRandomPort(t *testing.T) { + l, err := ListenOnLocalRandomPort() + if err != nil { + t.Fatalf("unexpected error %v", err) + } + defer l.Close() + + _, port, err := net.SplitHostPort(l.Addr().String()) + if err != nil { + t.Fatal(err) + } + if p, _ := strconv.Atoi(port); p < 1 { + t.Fatalf("expected port(%d) to be > 0", p) + } +} + +func BenchmarkLocalhostLookup(b *testing.B) { + for i := 0; i < b.N; i++ { + if ip := localhostLookup(); ip == nil { + b.Fatal("no ip found.") + } + } +} + +func BenchmarkLoopbackIP(b *testing.B) { + for i := 0; i < b.N; i++ { + if ip := loopbackIP(); ip == nil { + b.Fatal("no ip found.") + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/oauthutil/oauth.go b/vendor/github.com/camlistore/camlistore/pkg/oauthutil/oauth.go new file mode 100644 index 00000000..48dc9177 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/oauthutil/oauth.go @@ -0,0 +1,121 @@ +/* +Copyright 2015 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package oauthutil + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "camlistore.org/pkg/wkfs" + + "golang.org/x/oauth2" +) + +// TitleBarRedirectURL is the OAuth2 redirect URL to use when the authorization +// code should be returned in the title bar of the browser, with the page text +// prompting the user to copy the code and paste it in the application. +const TitleBarRedirectURL = "urn:ietf:wg:oauth:2.0:oob" + +// ErrNoAuthCode is returned when Token() has not found any valid cached token +// and TokenSource does not have an AuthCode for getting a new token. +var ErrNoAuthCode = errors.New("oauthutil: unspecified TokenSource.AuthCode") + +// TokenSource is an implementation of oauth2.TokenSource. It uses CacheFile to store and +// reuse the the acquired token, and AuthCode to provide the authorization code that will be +// exchanged for a token otherwise. +type TokenSource struct { + Config *oauth2.Config + + // CacheFile is where the token will be stored JSON-encoded. Any call to Token + // first tries to read a valid token from CacheFile. + CacheFile string + + // AuthCode provides the authorization code that Token will exchange for a token. + // It usually is a way to prompt the user for the code. If CacheFile does not provide + // a token and AuthCode is nil, Token returns ErrNoAuthCode. + AuthCode func() string +} + +var errExpiredToken = errors.New("expired token") + +// cachedToken returns the token saved in cacheFile. It specifically returns +// errTokenExpired if the token is expired. +func cachedToken(cacheFile string) (*oauth2.Token, error) { + tok := new(oauth2.Token) + tokenData, err := wkfs.ReadFile(cacheFile) + if err != nil { + return nil, err + } + if err = json.Unmarshal(tokenData, tok); err != nil { + return nil, err + } + if !tok.Valid() { + if tok != nil && time.Now().After(tok.Expiry) { + return nil, errExpiredToken + } + return nil, errors.New("invalid token") + } + return tok, nil +} + +// Token first tries to find a valid token in CacheFile, and otherwise uses +// Config and AuthCode to fetch a new token. This new token is saved in CacheFile +// (if not blank). If CacheFile did not provide a token and AuthCode is nil, +// ErrNoAuthCode is returned. +func (src TokenSource) Token() (*oauth2.Token, error) { + var tok *oauth2.Token + var err error + if src.CacheFile != "" { + tok, err = cachedToken(src.CacheFile) + if err == nil { + return tok, nil + } + if err != errExpiredToken { + fmt.Printf("Error getting token from %s: %v\n", src.CacheFile, err) + } + } + if src.AuthCode == nil { + return nil, ErrNoAuthCode + } + tok, err = src.Config.Exchange(oauth2.NoContext, src.AuthCode()) + if err != nil { + return nil, fmt.Errorf("could not exchange auth code for a token: %v", err) + } + if src.CacheFile == "" { + return tok, nil + } + tokenData, err := json.Marshal(&tok) + if err != nil { + return nil, fmt.Errorf("could not encode token as json: %v", err) + } + if err := wkfs.WriteFile(src.CacheFile, tokenData, 0600); err != nil { + return nil, fmt.Errorf("could not cache token in %v: %v", src.CacheFile, err) + } + return tok, nil +} + +// NewRefreshTokenSource returns a token source that obtains its initial token +// based on the provided config and the refresh token. +func NewRefreshTokenSource(config *oauth2.Config, refreshToken string) oauth2.TokenSource { + var noInitialToken *oauth2.Token = nil + return oauth2.ReuseTokenSource(noInitialToken, config.TokenSource( + oauth2.NoContext, // TODO: maybe accept a context later. + &oauth2.Token{RefreshToken: refreshToken}, + )) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/cpu.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/cpu.go new file mode 100644 index 00000000..48eca43b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/cpu.go @@ -0,0 +1,30 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +import "time" + +var cpuUsage func() time.Duration + +// CPUUsage returns how much cumulative user CPU time the process has +// used. On unsupported operating systems, it returns zero. +func CPUUsage() time.Duration { + if f := cpuUsage; f != nil { + return f() + } + return 0 +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/cpu_freebsd.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/cpu_freebsd.go new file mode 100644 index 00000000..b80f6b30 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/cpu_freebsd.go @@ -0,0 +1,32 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +import ( + "syscall" + "time" +) + +func init() { + cpuUsage = cpuFreeBSD +} + +func cpuFreeBSD() time.Duration { + var ru syscall.Rusage + syscall.Getrusage(0, &ru) + return time.Duration(ru.Utime.Nano()) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/cpu_linux.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/cpu_linux.go new file mode 100644 index 00000000..f8acb66c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/cpu_linux.go @@ -0,0 +1,34 @@ +// +build linux,!appengine + +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +import ( + "syscall" + "time" +) + +func init() { + cpuUsage = cpuLinux +} + +func cpuLinux() time.Duration { + var ru syscall.Rusage + syscall.Getrusage(0, &ru) + return time.Duration(ru.Utime.Nano()) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/findproc_appengine.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/findproc_appengine.go new file mode 100644 index 00000000..4da511c4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/findproc_appengine.go @@ -0,0 +1,29 @@ +// +build appengine + +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +import ( + "log" +) + +func DieOnParentDeath() { + // TODO(mpl): maybe the way it's done in findproc_normal.go actually works + // on appengine too? Verify that. + log.Fatal("DieOnParentDeath not implemented on appengine.") +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/findproc_normal.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/findproc_normal.go new file mode 100644 index 00000000..abb8e4d6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/findproc_normal.go @@ -0,0 +1,43 @@ +// +build !appengine + +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +import ( + "os" + "time" +) + +// DieOnParentDeath starts a goroutine that regularly checks that +// the current process can find its parent, and calls os.Exit(0) +// as soon as it cannot. +func DieOnParentDeath() { + // TODO: on Linux, use PR_SET_PDEATHSIG later. For now, the portable way: + go func() { + pollParent(30 * time.Second) + os.Exit(0) + }() +} + +// pollParent checks every t that the ppid of the current +// process has not changed (i.e that the process has not +// been orphaned). It returns as soon as that ppid changes. +func pollParent(t time.Duration) { + for initial := os.Getppid(); initial == os.Getppid(); time.Sleep(t) { + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/gce/gce.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/gce/gce.go new file mode 100644 index 00000000..64850b55 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/gce/gce.go @@ -0,0 +1,98 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package gce configures hooks for running Camlistore for Google Compute Engine. +package gce + +import ( + "errors" + "fmt" + "io" + "log" + "os" + "path" + "strings" + + "camlistore.org/pkg/env" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/osutil" + _ "camlistore.org/pkg/wkfs/gcs" + "golang.org/x/net/context" + + "google.golang.org/cloud/compute/metadata" + "google.golang.org/cloud/logging" +) + +func init() { + if !env.OnGCE() { + return + } + osutil.RegisterConfigDirFunc(func() string { + v, _ := metadata.InstanceAttributeValue("camlistore-config-dir") + if v == "" { + return v + } + return path.Clean("/gcs/" + strings.TrimPrefix(v, "gs://")) + }) + jsonconfig.RegisterFunc("_gce_instance_meta", func(c *jsonconfig.ConfigParser, v []interface{}) (interface{}, error) { + if len(v) != 1 { + return nil, errors.New("only 1 argument supported after _gce_instance_meta") + } + attr, ok := v[0].(string) + if !ok { + return nil, errors.New("expected argument after _gce_instance_meta to be a string") + } + val, err := metadata.InstanceAttributeValue(attr) + if err != nil { + return nil, fmt.Errorf("error reading GCE instance attribute %q: %v", attr, err) + } + return val, nil + }) +} + +// LogWriter returns an environment-specific io.Writer suitable for passing +// to log.SetOutput. It will also include writing to os.Stderr as well. +func LogWriter() (w io.Writer) { + w = os.Stderr + if !env.OnGCE() { + return + } + projID, err := metadata.ProjectID() + if projID == "" { + log.Printf("Error getting project ID: %v", err) + return + } + scopes, _ := metadata.Scopes("default") + haveScope := func(scope string) bool { + for _, x := range scopes { + if x == scope { + return true + } + } + return false + } + if !haveScope(logging.Scope) { + log.Printf("when this Google Compute Engine VM instance was created, it wasn't granted enough access to use Google Cloud Logging (Scope URL: %v).", logging.Scope) + return + } + + logc, err := logging.NewClient(context.Background(), projID, "camlistored-stderr") + if err != nil { + log.Printf("Error creating Google logging client: %v", err) + return + } + return io.MultiWriter(w, logc.Writer(logging.Debug)) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/mem.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/mem.go new file mode 100644 index 00000000..c00ed2d1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/mem.go @@ -0,0 +1,28 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +var memUsage func() int64 + +// MemUsage returns the number of bytes used by the process. +// On unsupported operating systems, it returns zero. +func MemUsage() int64 { + if f := memUsage; f != nil { + return f() + } + return 0 +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/mem_unix.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/mem_unix.go new file mode 100644 index 00000000..743f23b0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/mem_unix.go @@ -0,0 +1,39 @@ +// +build linux,!appengine darwin freebsd + +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +import ( + "runtime" + "syscall" +) + +func init() { + memUsage = memUnix +} + +func memUnix() int64 { + var ru syscall.Rusage + syscall.Getrusage(0, &ru) + if runtime.GOOS == "linux" { + // in KB + return int64(ru.Maxrss) << 10 + } + // In bytes: + return int64(ru.Maxrss) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/openurl.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/openurl.go new file mode 100644 index 00000000..e33b0f6d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/openurl.go @@ -0,0 +1,34 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +import ( + "os/exec" + "runtime" +) + +func OpenURL(url string) error { + if runtime.GOOS == "windows" { + return exec.Command("cmd.exe", "/C", "start "+url).Run() + } + + if runtime.GOOS == "darwin" { + return exec.Command("open", url).Run() + } + + return exec.Command("xdg-open", url).Run() +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/osutil.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/osutil.go new file mode 100644 index 00000000..dac0c197 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/osutil.go @@ -0,0 +1,36 @@ +/* +Copyright 2014 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package osutil provides operating system-specific path information, +// and other utility functions. +package osutil + +import ( + "errors" + "os" +) + +// ErrNotSupported is returned by functions (like Mkfifo and Mksocket) +// when the underlying operating system or environment doesn't support +// the operation. +var ErrNotSupported = errors.New("operation not supported") + +// DirExists reports whether dir exists. Errors are ignored and are +// reported as false. +func DirExists(dir string) bool { + fi, err := os.Stat(dir) + return err == nil && fi.IsDir() +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/paths.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/paths.go new file mode 100644 index 00000000..1358e2ec --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/paths.go @@ -0,0 +1,283 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +import ( + "flag" + "log" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + + "camlistore.org/pkg/buildinfo" +) + +// HomeDir returns the path to the user's home directory. +// It returns the empty string if the value isn't known. +func HomeDir() string { + failInTests() + if runtime.GOOS == "windows" { + return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") + } + return os.Getenv("HOME") +} + +// Username returns the current user's username, as +// reported by the relevant environment variable. +func Username() string { + if runtime.GOOS == "windows" { + return os.Getenv("USERNAME") + } + return os.Getenv("USER") +} + +var cacheDirOnce sync.Once + +func CacheDir() string { + cacheDirOnce.Do(makeCacheDir) + return cacheDir() +} + +func cacheDir() string { + if d := os.Getenv("CAMLI_CACHE_DIR"); d != "" { + return d + } + failInTests() + switch runtime.GOOS { + case "darwin": + return filepath.Join(HomeDir(), "Library", "Caches", "Camlistore") + case "windows": + // Per http://technet.microsoft.com/en-us/library/cc749104(v=ws.10).aspx + // these should both exist. But that page overwhelms me. Just try them + // both. This seems to work. + for _, ev := range []string{"TEMP", "TMP"} { + if v := os.Getenv(ev); v != "" { + return filepath.Join(v, "Camlistore") + } + } + panic("No Windows TEMP or TMP environment variables found; please file a bug report.") + } + if xdg := os.Getenv("XDG_CACHE_HOME"); xdg != "" { + return filepath.Join(xdg, "camlistore") + } + return filepath.Join(HomeDir(), ".cache", "camlistore") +} + +func makeCacheDir() { + err := os.MkdirAll(cacheDir(), 0700) + if err != nil { + log.Fatalf("Could not create cacheDir %v: %v", cacheDir(), err) + } +} + +func CamliVarDir() string { + if d := os.Getenv("CAMLI_VAR_DIR"); d != "" { + return d + } + failInTests() + switch runtime.GOOS { + case "windows": + return filepath.Join(os.Getenv("APPDATA"), "Camlistore") + case "darwin": + return filepath.Join(HomeDir(), "Library", "Camlistore") + } + return filepath.Join(HomeDir(), "var", "camlistore") +} + +func CamliBlobRoot() string { + return filepath.Join(CamliVarDir(), "blobs") +} + +// RegisterConfigDirFunc registers a func f to return the Camlistore configuration directory. +// It may skip by returning the empty string. +func RegisterConfigDirFunc(f func() string) { + configDirFuncs = append(configDirFuncs, f) +} + +var configDirFuncs []func() string + +func CamliConfigDir() string { + if p := os.Getenv("CAMLI_CONFIG_DIR"); p != "" { + return p + } + for _, f := range configDirFuncs { + if v := f(); v != "" { + return v + } + } + + failInTests() + return camliConfigDir() +} + +func camliConfigDir() string { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "Camlistore") + } + if xdg := os.Getenv("XDG_CONFIG_HOME"); xdg != "" { + return filepath.Join(xdg, "camlistore") + } + return filepath.Join(HomeDir(), ".config", "camlistore") +} + +func UserServerConfigPath() string { + return filepath.Join(CamliConfigDir(), "server-config.json") +} + +func UserClientConfigPath() string { + return filepath.Join(CamliConfigDir(), "client-config.json") +} + +// If set, flagSecretRing overrides the JSON config file +// ~/.config/camlistore/client-config.json +// (i.e. UserClientConfigPath()) "identitySecretRing" key. +var ( + flagSecretRing string + secretRingFlagAdded bool +) + +func AddSecretRingFlag() { + flag.StringVar(&flagSecretRing, "secret-keyring", "", "GnuPG secret keyring file to use.") + secretRingFlagAdded = true +} + +// ExplicitSecretRingFile returns the path to the user's GPG secret ring +// file and true if it was ever set through the --secret-keyring flag or +// the CAMLI_SECRET_RING var. It returns "", false otherwise. +// Use of this function requires the program to call AddSecretRingFlag, +// and before flag.Parse is called. +func ExplicitSecretRingFile() (string, bool) { + if !secretRingFlagAdded { + panic("proper use of ExplicitSecretRingFile requires exposing flagSecretRing with AddSecretRingFlag") + } + if flagSecretRing != "" { + return flagSecretRing, true + } + if e := os.Getenv("CAMLI_SECRET_RING"); e != "" { + return e, true + } + return "", false +} + +// DefaultSecretRingFile returns the path to the default GPG secret +// keyring. It is not influenced by any flag or CAMLI* env var. +func DefaultSecretRingFile() string { + return filepath.Join(camliConfigDir(), "identity-secring.gpg") +} + +// identitySecretRing returns the path to the default GPG +// secret keyring. It is still affected by CAMLI_CONFIG_DIR. +func identitySecretRing() string { + return filepath.Join(CamliConfigDir(), "identity-secring.gpg") +} + +// SecretRingFile returns the path to the user's GPG secret ring file. +// The value comes from either the --secret-keyring flag (if previously +// registered with AddSecretRingFlag), or the CAMLI_SECRET_RING environment +// variable, or the operating system default location. +func SecretRingFile() string { + if flagSecretRing != "" { + return flagSecretRing + } + if e := os.Getenv("CAMLI_SECRET_RING"); e != "" { + return e + } + return identitySecretRing() +} + +// DefaultTLSCert returns the path to the default TLS certificate +// file that is used (creating if necessary) when TLS is specified +// without the cert file. +func DefaultTLSCert() string { + return filepath.Join(CamliConfigDir(), "tls.crt") +} + +// DefaultTLSKey returns the path to the default TLS key +// file that is used (creating if necessary) when TLS is specified +// without the key file. +func DefaultTLSKey() string { + return filepath.Join(CamliConfigDir(), "tls.key") +} + +// Find the correct absolute path corresponding to a relative path, +// searching the following sequence of directories: +// 1. Working Directory +// 2. CAMLI_CONFIG_DIR (deprecated, will complain if this is on env) +// 3. (windows only) APPDATA/camli +// 4. All directories in CAMLI_INCLUDE_PATH (standard PATH form for OS) +func FindCamliInclude(configFile string) (absPath string, err error) { + // Try to open as absolute / relative to CWD + _, err = os.Stat(configFile) + if err == nil { + return configFile, nil + } + if filepath.IsAbs(configFile) { + // End of the line for absolute path + return "", err + } + + // Try the config dir + configDir := CamliConfigDir() + if _, err = os.Stat(filepath.Join(configDir, configFile)); err == nil { + return filepath.Join(configDir, configFile), nil + } + + // Finally, search CAMLI_INCLUDE_PATH + p := os.Getenv("CAMLI_INCLUDE_PATH") + for _, d := range strings.Split(p, string(filepath.ListSeparator)) { + if _, err = os.Stat(filepath.Join(d, configFile)); err == nil { + return filepath.Join(d, configFile), nil + } + } + + return "", os.ErrNotExist +} + +// GoPackagePath returns the path to the provided Go package's +// source directory. +// pkg may be a path prefix without any *.go files. +// The error is os.ErrNotExist if GOPATH is unset or the directory +// doesn't exist in any GOPATH component. +func GoPackagePath(pkg string) (path string, err error) { + gp := os.Getenv("GOPATH") + if gp == "" { + return path, os.ErrNotExist + } + for _, p := range filepath.SplitList(gp) { + dir := filepath.Join(p, "src", filepath.FromSlash(pkg)) + fi, err := os.Stat(dir) + if os.IsNotExist(err) { + continue + } + if err != nil { + return "", err + } + if !fi.IsDir() { + continue + } + return dir, nil + } + return path, os.ErrNotExist +} + +func failInTests() { + if buildinfo.TestingLinked() { + panic("Unexpected non-hermetic use of host configuration during testing. (alternatively: the 'testing' package got accidentally linked in)") + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/paths_test.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/paths_test.go new file mode 100644 index 00000000..00c5ee6d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/paths_test.go @@ -0,0 +1,121 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +import ( + "fmt" + "os" + "path/filepath" + "testing" +) + +// Creates a file with the content "test" at path +func createTestInclude(path string) error { + // Create a config file for OpenCamliInclude to play with + cf, e := os.Create(path) + if e != nil { + return e + } + fmt.Fprintf(cf, "test") + return cf.Close() +} + +// Calls OpenCamliInclude to open path, and checks that it containts "test" +func checkOpen(t *testing.T, path string) { + found, e := FindCamliInclude(path) + if e != nil { + t.Errorf("Failed to find %v", path) + return + } + var file *os.File + file, e = os.Open(found) + if e != nil { + t.Errorf("Failed to open %v", path) + } else { + var d [10]byte + if n, _ := file.Read(d[:]); n != 4 { + t.Errorf("Read incorrect number of chars from test.config, wrong file?") + } + if string(d[0:4]) != "test" { + t.Errorf("Wrong test file content: %v", string(d[0:4])) + } + file.Close() + } +} + +// Test for error when file doesn't exist +func TestOpenCamliIncludeNoFile(t *testing.T) { + // Test that error occurs if no such file + const notExist = "this_config_doesnt_exist.config" + + defer os.Setenv("CAMLI_CONFIG_DIR", os.Getenv("CAMLI_CONFIG_DIR")) + os.Setenv("CAMLI_CONFIG_DIR", filepath.Join(os.TempDir(), "/x/y/z/not-exist")) + + _, e := FindCamliInclude(notExist) + if e == nil { + t.Errorf("Successfully opened config which doesn't exist: %v", notExist) + } +} + +// Test for when a file exists in CWD +func TestOpenCamliIncludeCWD(t *testing.T) { + const path string = "TestOpenCamliIncludeCWD.config" + if e := createTestInclude(path); e != nil { + t.Errorf("Couldn't create test config file, aborting test: %v", e) + return + } + defer os.Remove(path) + + checkOpen(t, path) +} + +// Test for when a file exists in CAMLI_CONFIG_DIR +func TestOpenCamliIncludeDir(t *testing.T) { + const name string = "TestOpenCamliIncludeDir.config" + if e := createTestInclude("/tmp/" + name); e != nil { + t.Errorf("Couldn't create test config file, aborting test: %v", e) + return + } + defer os.Remove("/tmp/" + name) + os.Setenv("CAMLI_CONFIG_DIR", "/tmp") + defer os.Setenv("CAMLI_CONFIG_DIR", "") + + checkOpen(t, name) +} + +// Test for when a file exits in CAMLI_INCLUDE_PATH +func TestOpenCamliIncludePath(t *testing.T) { + const name string = "TestOpenCamliIncludePath.config" + if e := createTestInclude("/tmp/" + name); e != nil { + t.Errorf("Couldn't create test config file, aborting test: %v", e) + return + } + defer os.Remove("/tmp/" + name) + defer os.Setenv("CAMLI_INCLUDE_PATH", "") + + defer os.Setenv("CAMLI_CONFIG_DIR", os.Getenv("CAMLI_CONFIG_DIR")) + os.Setenv("CAMLI_CONFIG_DIR", filepath.Join(os.TempDir(), "/x/y/z/not-exist")) + + os.Setenv("CAMLI_INCLUDE_PATH", "/tmp") + checkOpen(t, name) + + os.Setenv("CAMLI_INCLUDE_PATH", "/not/a/camli/config/dir:/tmp") + checkOpen(t, name) + + os.Setenv("CAMLI_INCLUDE_PATH", "/not/a/camli/config/dir:/tmp:/another/fake/camli/dir") + checkOpen(t, name) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/restart_freebsd.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/restart_freebsd.go new file mode 100644 index 00000000..835dca70 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/restart_freebsd.go @@ -0,0 +1,51 @@ +// +build freebsd + +/* +Copyright 2012 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +import ( + "syscall" + "unsafe" +) + +func init() { + osSelfPath = selfPathFreeBSD +} + +func selfPathFreeBSD() (string, error) { + mib := [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1} + + n := uintptr(0) + // get length + _, _, err := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0) + if err != 0 { + return "", err + } + if n == 0 { // shouldn't happen + return "", nil + } + buf := make([]byte, n) + _, _, err = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0) + if err != 0 { + return "", err + } + if n == 0 { // shouldn't happen + return "", nil + } + return string(buf[:n-1]), nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/restart_stub.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/restart_stub.go new file mode 100644 index 00000000..cfb51a6a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/restart_stub.go @@ -0,0 +1,39 @@ +// +build appengine + +/* +Copyright 2012 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +import ( + "errors" + "log" + "runtime" +) + +// SelfPath returns the path of the executable for the currently running +// process. +func SelfPath() (string, error) { + return "", errors.New("SelfPath not implemented on App Engine.") +} + +// RestartProcess returns an error if things couldn't be +// restarted. On success, this function never returns +// because the process becomes the new process. +func RestartProcess() error { + log.Print("RestartProcess not implemented on this platform.") + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/restart_unix.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/restart_unix.go new file mode 100644 index 00000000..84d5a5cb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/restart_unix.go @@ -0,0 +1,68 @@ +// +build !appengine +// +build linux darwin freebsd netbsd openbsd solaris + +/* +Copyright 2012 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +import ( + "errors" + "fmt" + "os" + "os/exec" + "runtime" + "syscall" +) + +// if non-nil, osSelfPath is used from selfPath. +var osSelfPath func() (string, error) + +// TODO(mpl): document the symlink behaviour in SelfPath for the BSDs when +// I know for sure. + +// SelfPath returns the path of the executable for the currently running +// process. At least on linux, the returned path is a symlink to the actual +// executable. +func SelfPath() (string, error) { + if f := osSelfPath; f != nil { + return f() + } + switch runtime.GOOS { + case "linux": + return "/proc/self/exe", nil + case "netbsd": + return "/proc/curproc/exe", nil + case "openbsd": + return "/proc/curproc/file", nil + case "darwin": + // TODO(mpl): maybe do the right thing for darwin too, but that may require changes to runtime. + // See https://codereview.appspot.com/6736069/ + return exec.LookPath(os.Args[0]) + } + return "", errors.New("SelfPath not implemented for " + runtime.GOOS) +} + +// RestartProcess returns an error if things couldn't be +// restarted. On success, this function never returns +// because the process becomes the new process. +func RestartProcess() error { + path, err := SelfPath() + if err != nil { + return fmt.Errorf("RestartProcess failed: %v", err) + } + return syscall.Exec(path, os.Args, os.Environ()) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/restart_windows.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/restart_windows.go new file mode 100644 index 00000000..3363ad07 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/restart_windows.go @@ -0,0 +1,54 @@ +// +build windows + +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +import ( + "log" + "syscall" + "unicode/utf16" + "unsafe" +) + +// SelfPath returns the path of the executable for the currently running +// process. +func SelfPath() (string, error) { + kernel32, err := syscall.LoadDLL("kernel32.dll") + if err != nil { + return "", err + } + sysproc, err := kernel32.FindProc("GetModuleFileNameW") + if err != nil { + return "", err + } + b := make([]uint16, syscall.MAX_PATH) + r, _, err := sysproc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))) + n := uint32(r) + if n == 0 { + return "", err + } + return string(utf16.Decode(b[0:n])), nil +} + +// RestartProcess returns an error if things couldn't be +// restarted. On success, this function never returns +// because the process becomes the new process. +func RestartProcess() error { + log.Print("RestartProcess not implemented on this platform.") + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/syscall_appengine.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/syscall_appengine.go new file mode 100644 index 00000000..2f0181ae --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/syscall_appengine.go @@ -0,0 +1,22 @@ +// +build appengine + +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +func Mkfifo(path string, mode uint32) error { return ErrNotSupported } +func Mksocket(path string) error { return ErrNotSupported } diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/syscall_posix.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/syscall_posix.go new file mode 100644 index 00000000..726397d3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/syscall_posix.go @@ -0,0 +1,52 @@ +// +build !windows,!appengine,!solaris + +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +import ( + "net" + "os" + "path/filepath" + "syscall" +) + +func Mkfifo(path string, mode uint32) error { + return syscall.Mkfifo(path, mode) +} + +// Mksocket creates a socket file (a Unix Domain Socket) named path. +func Mksocket(path string) error { + dir := filepath.Dir(path) + base := filepath.Base(path) + tmp := filepath.Join(dir, "."+base) + l, err := net.ListenUnix("unix", &net.UnixAddr{tmp, "unix"}) + if err != nil { + return err + } + + err = os.Rename(tmp, path) + if err != nil { + l.Close() + os.Remove(tmp) // Ignore error + return err + } + + l.Close() + + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/syscall_solaris.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/syscall_solaris.go new file mode 100644 index 00000000..db0593c8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/syscall_solaris.go @@ -0,0 +1,53 @@ +// +build solaris + +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +import ( + "net" + "os" + "path/filepath" + "syscall" +) + +func Mkfifo(path string, mode uint32) error { + // Mkfifo is missing from syscall, thus call Mknod as it does on Linux. + return syscall.Mknod(path, mode|syscall.S_IFIFO, 0) +} + +// Mksocket creates a socket file (a Unix Domain Socket) named path. +func Mksocket(path string) error { + dir := filepath.Dir(path) + base := filepath.Base(path) + tmp := filepath.Join(dir, "."+base) + l, err := net.ListenUnix("unix", &net.UnixAddr{tmp, "unix"}) + if err != nil { + return err + } + + err = os.Rename(tmp, path) + if err != nil { + l.Close() + os.Remove(tmp) // Ignore error + return err + } + + l.Close() + + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/osutil/syscall_windows.go b/vendor/github.com/camlistore/camlistore/pkg/osutil/syscall_windows.go new file mode 100644 index 00000000..7eedb32d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/osutil/syscall_windows.go @@ -0,0 +1,20 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package osutil + +func Mkfifo(path string, mode uint32) error { return ErrNotSupported } +func Mksocket(path string) error { return ErrNotSupported } diff --git a/vendor/github.com/camlistore/camlistore/pkg/pools/pools.go b/vendor/github.com/camlistore/camlistore/pkg/pools/pools.go new file mode 100644 index 00000000..4fc2f2e9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/pools/pools.go @@ -0,0 +1,41 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pools + +import ( + "bytes" + "sync" +) + +// bytesBuffer is a pool of *bytes.Buffer. +// Callers must Reset the buffer after obtaining it. +var bytesBuffer = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +// BytesBuffer returns an empty bytes.Buffer. +// It should be returned with PutBuffer. +func BytesBuffer() *bytes.Buffer { + buf := bytesBuffer.Get().(*bytes.Buffer) + buf.Reset() + return buf +} + +// PutBuffer returns a bytes.Buffer previously obtained with BytesBuffer. +func PutBuffer(buf *bytes.Buffer) { + bytesBuffer.Put(buf) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/publish/types.go b/vendor/github.com/camlistore/camlistore/pkg/publish/types.go new file mode 100644 index 00000000..8e2f43d2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/publish/types.go @@ -0,0 +1,89 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package publish exposes the types and functions that can be used +// from a Go template, for publishing. +package publish + +import ( + "html/template" + + "camlistore.org/pkg/search" +) + +// SubjectPage is the data structure used when serving a +// publishing template. It contains the functions that can be called +// from the template. +type SubjectPage struct { + Header func() *PageHeader + File func() *PageFile + Members func() *PageMembers +} + +// PageHeader contains the data available to the template, +// and relevant to the page header. +type PageHeader struct { + Title string // Page title. + CSSFiles []string // Available CSS files. + JSDeps []string // Dependencies (for e.g closure) that can/should be included as javascript files. + CamliClosure template.JS // Closure namespace defined in the provided js. e.g camlistore.GalleryPage from pics.js + Subject string // Subject of this page (i.e the object which is described and published). + Meta string // All the metadata describing the subject of this page. + ViewerIsOwner bool // Whether the viewer of the page is also the owner of the displayed subject. (localhost check for now.) +} + +// PageFile contains the file related data available to the subject template, +// if the page describes some file contents. +type PageFile struct { + FileName string + Size int64 + MIMEType string + IsImage bool + DownloadURL string + ThumbnailURL string + DomID string + Nav func() *Nav +} + +// Nav holds links to the previous, next, and parent elements, +// when displaying members. +type Nav struct { + ParentPath string + PrevPath string + NextPath string +} + +// PageMembers contains the data relevant to the members if the published subject +// is a permanode with members. +type PageMembers struct { + SubjectPath string // URL prefix path to the subject (i.e the permanode). + ZipName string // Name of the downloadable zip file which contains all the members. + Members []*search.DescribedBlob // List of the members. + Description func(*search.DescribedBlob) string // Returns the description of the given member. + Title func(*search.DescribedBlob) string // Returns the title for the given member. + Path func(*search.DescribedBlob) string // Returns the url prefix path to the given the member. + DomID func(*search.DescribedBlob) string // Returns the Dom ID of the given member. + FileInfo func(*search.DescribedBlob) *MemberFileInfo // Returns some file info if the given member is a file permanode. +} + +// MemberFileInfo contains the file related data available for each member, +// if the member is the permanode for a file. +type MemberFileInfo struct { + FileName string + FileDomID string + FilePath string + FileThumbnailURL string +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/readerutil/countingreader.go b/vendor/github.com/camlistore/camlistore/pkg/readerutil/countingreader.go new file mode 100644 index 00000000..51b33137 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/readerutil/countingreader.go @@ -0,0 +1,32 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package readerutil + +import "io" + +// CountingReader wraps a Reader, incrementing N by the number of +// bytes read. No locking is performed. +type CountingReader struct { + Reader io.Reader + N *int64 +} + +func (cr CountingReader) Read(p []byte) (n int, err error) { + n, err = cr.Reader.Read(p) + *cr.N += int64(n) + return +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/readerutil/opener.go b/vendor/github.com/camlistore/camlistore/pkg/readerutil/opener.go new file mode 100644 index 00000000..7e7122ea --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/readerutil/opener.go @@ -0,0 +1,125 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package readerutil + +import ( + "os" + "sync" + + "camlistore.org/pkg/singleflight" + "camlistore.org/pkg/types" +) + +var ( + openerGroup singleflight.Group + + openFileMu sync.Mutex // guards openFiles + openFiles = make(map[string]*openFile) +) + +type openFile struct { + *os.File + path string // map key of openFiles + refCount int +} + +type openFileHandle struct { + closed bool + *openFile +} + +func (f *openFileHandle) Close() error { + openFileMu.Lock() + if f.closed { + openFileMu.Unlock() + return nil + } + f.closed = true + f.refCount-- + if f.refCount < 0 { + panic("unexpected negative refcount") + } + zero := f.refCount == 0 + if zero { + delete(openFiles, f.path) + } + openFileMu.Unlock() + if !zero { + return nil + } + return f.openFile.File.Close() +} + +type openingFile struct { + path string + mu sync.RWMutex // write-locked until Open is done + + // Results, once mu is unlocked: + of *openFile + err error +} + +// OpenSingle opens the given file path for reading, reusing existing file descriptors +// when possible. +func OpenSingle(path string) (types.ReaderAtCloser, error) { + openFileMu.Lock() + of := openFiles[path] + if of != nil { + of.refCount++ + openFileMu.Unlock() + return &openFileHandle{false, of}, nil + } + openFileMu.Unlock() // release the lock while we call os.Open + + winner := false // this goroutine made it into Do's func + + // Returns an *openFile + resi, err := openerGroup.Do(path, func() (interface{}, error) { + winner = true + f, err := os.Open(path) + if err != nil { + return nil, err + } + of := &openFile{ + File: f, + path: path, + refCount: 1, + } + openFileMu.Lock() + openFiles[path] = of + openFileMu.Unlock() + return of, nil + }) + if err != nil { + return nil, err + } + of = resi.(*openFile) + + // If our os.Open was dup-suppressed, we have to increment our + // reference count. + if !winner { + openFileMu.Lock() + if of.refCount == 0 { + // Winner already closed it. Try again (rare). + openFileMu.Unlock() + return OpenSingle(path) + } + of.refCount++ + openFileMu.Unlock() + } + return &openFileHandle{false, of}, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/readerutil/opener_test.go b/vendor/github.com/camlistore/camlistore/pkg/readerutil/opener_test.go new file mode 100644 index 00000000..41fa37b5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/readerutil/opener_test.go @@ -0,0 +1,77 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package readerutil + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "runtime" + "testing" +) + +func TestOpenSingle(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + f, err := ioutil.TempFile("", "foo") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + contents := []byte("Some file contents") + if _, err := f.Write(contents); err != nil { + t.Fatal(err) + } + f.Close() + + const j = 4 + errc := make(chan error, j) + for i := 1; i < j; i++ { + go func() { + buf := make([]byte, len(contents)) + for i := 0; i < 400; i++ { + rac, err := OpenSingle(f.Name()) + if err != nil { + errc <- err + return + } + n, err := rac.ReadAt(buf, 0) + if err != nil { + errc <- err + return + } + if n != len(contents) || !bytes.Equal(buf, contents) { + errc <- fmt.Errorf("read %d, %q; want %d, %q", n, buf, len(contents), contents) + return + } + if err := rac.Close(); err != nil { + errc <- err + return + } + } + errc <- nil + }() + } + for i := 1; i < j; i++ { + if err := <-errc; err != nil { + t.Error(err) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/readerutil/readersize.go b/vendor/github.com/camlistore/camlistore/pkg/readerutil/readersize.go new file mode 100644 index 00000000..4cfc4116 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/readerutil/readersize.go @@ -0,0 +1,52 @@ +/* +Copyright 2012 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package readerutil provides and operates on io.Readers. +package readerutil + +import ( + "bytes" + "io" + "os" +) + +// ReaderSize tries to determine the length of r. +func ReaderSize(r io.Reader) (size int64, ok bool) { + switch rt := r.(type) { + case io.Seeker: + pos, err := rt.Seek(0, os.SEEK_CUR) + if err != nil { + return + } + end, err := rt.Seek(0, os.SEEK_END) + if err != nil { + return + } + size = end - pos + pos1, err := rt.Seek(pos, os.SEEK_SET) + if err != nil || pos1 != pos { + msg := "failed to restore seek position" + if err != nil { + msg += ": " + err.Error() + } + panic(msg) + } + return size, true + case *bytes.Buffer: + return int64(rt.Len()), true + } + return +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/readerutil/readersize_test.go b/vendor/github.com/camlistore/camlistore/pkg/readerutil/readersize_test.go new file mode 100644 index 00000000..ed7a9e83 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/readerutil/readersize_test.go @@ -0,0 +1,68 @@ +/* +Copyright 2012 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package readerutil + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "testing" +) + +const text = "HelloWorld" + +type testSrc struct { + name string + src io.Reader + want int64 +} + +func (tsrc *testSrc) run(t *testing.T) { + n, ok := ReaderSize(tsrc.src) + if !ok { + t.Fatalf("failed to read size for %q", tsrc.name) + } + if n != tsrc.want { + t.Fatalf("wanted %v, got %v", tsrc.want, n) + } +} + +func TestBytesBuffer(t *testing.T) { + buf := bytes.NewBuffer([]byte(text)) + tsrc := &testSrc{"buffer", buf, int64(len(text))} + tsrc.run(t) +} + +func TestSeeker(t *testing.T) { + f, err := ioutil.TempFile("", "camliTestReaderSize") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + defer f.Close() + size, err := f.Write([]byte(text)) + if err != nil { + t.Fatal(err) + } + pos, err := f.Seek(5, 0) + if err != nil { + t.Fatal(err) + } + tsrc := &testSrc{"seeker", f, int64(size) - pos} + tsrc.run(t) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/rollsum/rollsum.go b/vendor/github.com/camlistore/camlistore/pkg/rollsum/rollsum.go new file mode 100644 index 00000000..8f84b46e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/rollsum/rollsum.go @@ -0,0 +1,81 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rollsum implements rolling checksums similar to apenwarr's bup, which +// is similar to librsync. +// +// The bup project is at https://github.com/apenwarr/bup and its splitting in +// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c +package rollsum + +import () + +const windowSize = 64 +const charOffset = 31 + +const blobBits = 13 +const blobSize = 1 << blobBits // 8k + +type RollSum struct { + s1, s2 uint32 + window [windowSize]uint8 + wofs int +} + +func New() *RollSum { + return &RollSum{ + s1: windowSize * charOffset, + s2: windowSize * (windowSize - 1) * charOffset, + } +} + +func (rs *RollSum) add(drop, add uint8) { + rs.s1 += uint32(add) - uint32(drop) + rs.s2 += rs.s1 - uint32(windowSize)*uint32(drop+charOffset) +} + +func (rs *RollSum) Roll(ch byte) { + rs.add(rs.window[rs.wofs], ch) + rs.window[rs.wofs] = ch + rs.wofs = (rs.wofs + 1) % windowSize +} + +// OnSplit returns whether at least 13 consecutive trailing bits of +// the current checksum are set the same way. +func (rs *RollSum) OnSplit() bool { + return (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1)) +} + +// OnSplit returns whether at least n consecutive trailing bits +// of the current checksum are set the same way. +func (rs *RollSum) OnSplitWithBits(n uint32) bool { + mask := (uint32(1) << n) - 1 + return rs.s2&mask == (^uint32(0))&mask +} + +func (rs *RollSum) Bits() int { + bits := blobBits + rsum := rs.Digest() + rsum >>= blobBits + for ; (rsum>>1)&1 != 0; bits++ { + rsum >>= 1 + } + return bits +} + +func (rs *RollSum) Digest() uint32 { + return (rs.s1 << 16) | (rs.s2 & 0xffff) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/rollsum/rollsum_test.go b/vendor/github.com/camlistore/camlistore/pkg/rollsum/rollsum_test.go new file mode 100644 index 00000000..edd4cff0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/rollsum/rollsum_test.go @@ -0,0 +1,79 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rollsum + +import ( + "math/rand" + "testing" +) + +func TestSum(t *testing.T) { + var buf [100000]uint8 + rnd := rand.New(rand.NewSource(4)) + for i := range buf { + buf[i] = uint8(rnd.Intn(256)) + } + + sum := func(offset, len int) uint32 { + rs := New() + for count := offset; count < len; count++ { + rs.Roll(buf[count]) + } + return rs.Digest() + } + + sum1a := sum(0, len(buf)) + sum1b := sum(1, len(buf)) + sum2a := sum(len(buf)-windowSize*5/2, len(buf)-windowSize) + sum2b := sum(0, len(buf)-windowSize) + sum3a := sum(0, windowSize+3) + sum3b := sum(3, windowSize+3) + + if sum1a != sum1b { + t.Errorf("sum1a=%d sum1b=%d", sum1a, sum1b) + } + if sum2a != sum2b { + t.Errorf("sum2a=%d sum2b=%d", sum2a, sum2b) + } + if sum3a != sum3b { + t.Errorf("sum3a=%d sum3b=%d", sum3a, sum3b) + } +} + +func BenchmarkRollsum(b *testing.B) { + const bufSize = 5 << 20 + buf := make([]byte, bufSize) + for i := range buf { + buf[i] = byte(rand.Int63()) + } + + b.ResetTimer() + rs := New() + splits := 0 + for i := 0; i < b.N; i++ { + splits = 0 + for _, b := range buf { + rs.Roll(b) + if rs.OnSplit() { + _ = rs.Bits() + splits++ + } + } + } + b.SetBytes(bufSize) + b.Logf("num splits = %d; every %d bytes", splits, int(float64(bufSize)/float64(splits))) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/.gitignore b/vendor/github.com/camlistore/camlistore/pkg/schema/.gitignore new file mode 100644 index 00000000..cde0389a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/schema/.gitignore @@ -0,0 +1,3 @@ +_test* +*.out +*.[865] diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/blob.go b/vendor/github.com/camlistore/camlistore/pkg/schema/blob.go new file mode 100644 index 00000000..a69eab39 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/schema/blob.go @@ -0,0 +1,589 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "encoding/json" + "fmt" + "path/filepath" + "strings" + "time" + "unicode/utf8" + + "camlistore.org/pkg/blob" +) + +// A MissingFieldError represents a missing JSON field in a schema blob. +type MissingFieldError string + +func (e MissingFieldError) Error() string { + return fmt.Sprintf("schema: missing field %q", string(e)) +} + +// IsMissingField returns whether error is of type MissingFieldError. +func IsMissingField(err error) bool { + _, ok := err.(MissingFieldError) + return ok +} + +// AnyBlob represents any type of schema blob. +type AnyBlob interface { + Blob() *Blob +} + +// Buildable returns a Builder from a base. +type Buildable interface { + Builder() *Builder +} + +// A Blob represents a Camlistore schema blob. +// It is immutable. +type Blob struct { + br blob.Ref + str string + ss *superset +} + +// Type returns the blob's "camliType" field. +func (b *Blob) Type() string { return b.ss.Type } + +// BlobRef returns the schema blob's blobref. +func (b *Blob) BlobRef() blob.Ref { return b.br } + +// JSON returns the JSON bytes of the schema blob. +func (b *Blob) JSON() string { return b.str } + +// Blob returns itself, so it satisifies the AnyBlob interface. +func (b *Blob) Blob() *Blob { return b } + +// PartsSize returns the number of bytes represented by the "parts" field. +// TODO: move this off *Blob to a specialized type. +func (b *Blob) PartsSize() int64 { + n := int64(0) + for _, part := range b.ss.Parts { + n += int64(part.Size) + } + return n +} + +// FileName returns the file, directory, or symlink's filename, or the empty string. +// TODO: move this off *Blob to a specialized type. +func (b *Blob) FileName() string { + return b.ss.FileNameString() +} + +// ClaimDate returns the "claimDate" field. +// If there is no claimDate, the error will be a MissingFieldError. +func (b *Blob) ClaimDate() (time.Time, error) { + var ct time.Time + claimDate := b.ss.ClaimDate + if claimDate.IsZero() { + return ct, MissingFieldError("claimDate") + } + return claimDate.Time(), nil +} + +// ByteParts returns the "parts" field. The caller owns the returned +// slice. +func (b *Blob) ByteParts() []BytesPart { + // TODO: move this method off Blob, and make the caller go + // through a (*Blob).ByteBackedBlob() comma-ok accessor first. + s := make([]BytesPart, len(b.ss.Parts)) + for i, part := range b.ss.Parts { + s[i] = *part + } + return s +} + +func (b *Blob) Builder() *Builder { + var m map[string]interface{} + dec := json.NewDecoder(strings.NewReader(b.str)) + dec.UseNumber() + err := dec.Decode(&m) + if err != nil { + panic("failed to decode previously-thought-valid Blob's JSON: " + err.Error()) + } + return &Builder{m} +} + +// AsClaim returns a Claim if the receiver Blob has all the required fields. +func (b *Blob) AsClaim() (c Claim, ok bool) { + if b.ss.Signer.Valid() && b.ss.Sig != "" && b.ss.ClaimType != "" && !b.ss.ClaimDate.IsZero() { + return Claim{b}, true + } + return +} + +// AsShare returns a Share if the receiver Blob has all the required fields. +func (b *Blob) AsShare() (s Share, ok bool) { + c, isClaim := b.AsClaim() + if !isClaim { + return + } + + if ClaimType(b.ss.ClaimType) == ShareClaim && b.ss.AuthType == ShareHaveRef && (b.ss.Target.Valid() || b.ss.Search != nil) { + return Share{c}, true + } + return s, false +} + +// DirectoryEntries the "entries" field if valid and b's type is "directory". +func (b *Blob) DirectoryEntries() (br blob.Ref, ok bool) { + if b.Type() != "directory" { + return + } + return b.ss.Entries, true +} + +func (b *Blob) StaticSetMembers() []blob.Ref { + if b.Type() != "static-set" { + return nil + } + s := make([]blob.Ref, 0, len(b.ss.Members)) + for _, ref := range b.ss.Members { + if ref.Valid() { + s = append(s, ref) + } + } + return s +} + +func (b *Blob) ShareAuthType() string { + s, ok := b.AsShare() + if !ok { + return "" + } + return s.AuthType() +} + +func (b *Blob) ShareTarget() blob.Ref { + s, ok := b.AsShare() + if !ok { + return blob.Ref{} + } + return s.Target() +} + +// ModTime returns the "unixMtime" field, or the zero time. +func (b *Blob) ModTime() time.Time { return b.ss.ModTime() } + +// A Claim is a Blob that is signed. +type Claim struct { + b *Blob +} + +// Blob returns the claim's Blob. +func (c Claim) Blob() *Blob { return c.b } + +// ClaimDate returns the blob's "claimDate" field. +func (c Claim) ClaimDateString() string { return c.b.ss.ClaimDate.String() } + +// ClaimType returns the blob's "claimType" field. +func (c Claim) ClaimType() string { return c.b.ss.ClaimType } + +// Attribute returns the "attribute" field, if set. +func (c Claim) Attribute() string { return c.b.ss.Attribute } + +// Value returns the "value" field, if set. +func (c Claim) Value() string { return c.b.ss.Value } + +// ModifiedPermanode returns the claim's "permaNode" field, if it's +// a claim that modifies a permanode. Otherwise a zero blob.Ref is +// returned. +func (c Claim) ModifiedPermanode() blob.Ref { + return c.b.ss.Permanode +} + +// Target returns the blob referenced by the Share if it's +// a ShareClaim claim, or the object being deleted if it's a +// DeleteClaim claim. +// Otherwise a zero blob.Ref is returned. +func (c Claim) Target() blob.Ref { + return c.b.ss.Target +} + +// A Share is a claim for giving access to a user's blob(s). +// When returned from (*Blob).AsShare, it always represents +// a valid share with all required fields. +type Share struct { + Claim +} + +// AuthType returns the AuthType of the Share. +func (s Share) AuthType() string { + return s.b.ss.AuthType +} + +// IsTransitive returns whether the Share transitively +// gives access to everything reachable from the referenced +// blob. +func (s Share) IsTransitive() bool { + return s.b.ss.Transitive +} + +// IsExpired reports whether this share has expired. +func (s Share) IsExpired() bool { + t := time.Time(s.b.ss.Expires) + return !t.IsZero() && clockNow().After(t) +} + +// A StaticFile is a Blob representing a file, symlink fifo or socket +// (or device file, when support for these is added). +type StaticFile struct { + b *Blob +} + +// FileName returns the StaticFile's FileName if is not the empty string, otherwise it returns its FileNameBytes concatenated into a string. +func (sf StaticFile) FileName() string { + return sf.b.ss.FileNameString() +} + +// AsStaticFile returns the Blob as a StaticFile if it represents +// one. Otherwise, it returns false in the boolean parameter and the +// zero value of StaticFile. +func (b *Blob) AsStaticFile() (sf StaticFile, ok bool) { + // TODO (marete) Add support for device files to + // Camlistore and change the implementation of StaticFile to + // reflect that. + t := b.ss.Type + if t == "file" || t == "symlink" || t == "fifo" || t == "socket" { + return StaticFile{b}, true + } + + return +} + +// A StaticFIFO is a StaticFile that is also a fifo. +type StaticFIFO struct { + StaticFile +} + +// A StaticSocket is a StaticFile that is also a socket. +type StaticSocket struct { + StaticFile +} + +// A StaticSymlink is a StaticFile that is also a symbolic link. +type StaticSymlink struct { + // We name it `StaticSymlink' rather than just `Symlink' since + // a type called Symlink is already in schema.go. + StaticFile +} + +// SymlinkTargetString returns the field symlinkTarget if is +// non-empty. Otherwise it returns the contents of symlinkTargetBytes +// concatenated as a string. +func (sl StaticSymlink) SymlinkTargetString() string { + return sl.StaticFile.b.ss.SymlinkTargetString() +} + +// AsStaticSymlink returns the StaticFile as a StaticSymlink if the +// StaticFile represents a symlink. Othwerwise, it retuns the zero +// value of StaticSymlink and false. +func (sf StaticFile) AsStaticSymlink() (s StaticSymlink, ok bool) { + if sf.b.ss.Type == "symlink" { + return StaticSymlink{sf}, true + } + + return +} + +// AsStaticFIFO returns the StatifFile as a StaticFIFO if the +// StaticFile represents a fifo. Otherwise, it returns the zero value +// of StaticFIFO and false. +func (sf StaticFile) AsStaticFIFO() (fifo StaticFIFO, ok bool) { + if sf.b.ss.Type == "fifo" { + return StaticFIFO{sf}, true + } + + return +} + +// AsSataticSocket returns the StaticFile as a StaticSocket if the +// StaticFile represents a socket. Otherwise, it returns the zero +// value of StaticSocket and false. +func (sf StaticFile) AsStaticSocket() (ss StaticSocket, ok bool) { + if sf.b.ss.Type == "socket" { + return StaticSocket{sf}, true + } + + return +} + +// A Builder builds a JSON blob. +// After mutating the Builder, call Blob to get the built blob. +type Builder struct { + m map[string]interface{} +} + +// NewBuilder returns a new blob schema builder. +// The "camliVersion" field is set to "1" by default and the required +// "camliType" field is NOT set. +func NewBuilder() *Builder { + return &Builder{map[string]interface{}{ + "camliVersion": "1", + }} +} + +// SetShareTarget sets the target of share claim. +// It panics if bb isn't a "share" claim type. +func (bb *Builder) SetShareTarget(t blob.Ref) *Builder { + if bb.Type() != "claim" || bb.ClaimType() != ShareClaim { + panic("called SetShareTarget on non-share") + } + bb.m["target"] = t.String() + return bb +} + +// SetShareSearch sets the search of share claim. +// q is assumed to be of type *search.SearchQuery. +// It panics if bb isn't a "share" claim type. +func (bb *Builder) SetShareSearch(q SearchQuery) *Builder { + if bb.Type() != "claim" || bb.ClaimType() != ShareClaim { + panic("called SetShareSearch on non-share") + } + bb.m["search"] = q + return bb +} + +// SetShareExpiration sets the expiration time on share claim. +// It panics if bb isn't a "share" claim type. +// If t is zero, the expiration is removed. +func (bb *Builder) SetShareExpiration(t time.Time) *Builder { + if bb.Type() != "claim" || bb.ClaimType() != ShareClaim { + panic("called SetShareExpiration on non-share") + } + if t.IsZero() { + delete(bb.m, "expires") + } else { + bb.m["expires"] = RFC3339FromTime(t) + } + return bb +} + +func (bb *Builder) SetShareIsTransitive(b bool) *Builder { + if bb.Type() != "claim" || bb.ClaimType() != ShareClaim { + panic("called SetShareIsTransitive on non-share") + } + if !b { + delete(bb.m, "transitive") + } else { + bb.m["transitive"] = true + } + return bb +} + +// SetRawStringField sets a raw string field in the underlying map. +func (bb *Builder) SetRawStringField(key, value string) *Builder { + bb.m[key] = value + return bb +} + +// Blob builds the Blob. The builder continues to be usable after a call to Build. +func (bb *Builder) Blob() *Blob { + json, err := mapJSON(bb.m) + if err != nil { + panic(err) + } + ss, err := parseSuperset(strings.NewReader(json)) + if err != nil { + panic(err) + } + h := blob.NewHash() + h.Write([]byte(json)) + return &Blob{ + str: json, + ss: ss, + br: blob.RefFromHash(h), + } +} + +// Builder returns a clone of itself and satisifies the Buildable interface. +func (bb *Builder) Builder() *Builder { + return &Builder{clone(bb.m).(map[string]interface{})} +} + +// JSON returns the JSON of the blob as built so far. +func (bb *Builder) JSON() (string, error) { + return mapJSON(bb.m) +} + +// SetSigner sets the camliSigner field. +// Calling SetSigner is unnecessary if using Sign. +func (bb *Builder) SetSigner(signer blob.Ref) *Builder { + bb.m["camliSigner"] = signer.String() + return bb +} + +// SignAt sets the blob builder's camliSigner field with SetSigner +// and returns the signed JSON using the provided signer. +func (bb *Builder) Sign(signer *Signer) (string, error) { + return bb.SignAt(signer, time.Time{}) +} + +// SignAt sets the blob builder's camliSigner field with SetSigner +// and returns the signed JSON using the provided signer. +// The provided sigTime is the time of the signature, used mostly +// for planned permanodes. If the zero value, the current time is used. +func (bb *Builder) SignAt(signer *Signer, sigTime time.Time) (string, error) { + switch bb.Type() { + case "permanode", "claim": + default: + return "", fmt.Errorf("can't sign camliType %q", bb.Type()) + } + return signer.SignJSON(bb.SetSigner(signer.pubref).Blob().JSON(), sigTime) +} + +// SetType sets the camliType field. +func (bb *Builder) SetType(t string) *Builder { + bb.m["camliType"] = t + return bb +} + +// Type returns the camliType value. +func (bb *Builder) Type() string { + if s, ok := bb.m["camliType"].(string); ok { + return s + } + return "" +} + +// ClaimType returns the claimType value, or the empty string. +func (bb *Builder) ClaimType() ClaimType { + if s, ok := bb.m["claimType"].(string); ok { + return ClaimType(s) + } + return "" +} + +// SetFileName sets the fileName or fileNameBytes field. +// The filename is truncated to just the base. +func (bb *Builder) SetFileName(name string) *Builder { + baseName := filepath.Base(name) + if utf8.ValidString(baseName) { + bb.m["fileName"] = baseName + } else { + bb.m["fileNameBytes"] = mixedArrayFromString(baseName) + } + return bb +} + +// SetSymlinkTarget sets bb to be of type "symlink" and sets the symlink's target. +func (bb *Builder) SetSymlinkTarget(target string) *Builder { + bb.SetType("symlink") + if utf8.ValidString(target) { + bb.m["symlinkTarget"] = target + } else { + bb.m["symlinkTargetBytes"] = mixedArrayFromString(target) + } + return bb +} + +// IsClaimType returns whether this blob builder is for a type +// which should be signed. (a "claim" or "permanode") +func (bb *Builder) IsClaimType() bool { + switch bb.Type() { + case "claim", "permanode": + return true + } + return false +} + +// SetClaimDate sets the "claimDate" on a claim. +// It is a fatal error to call SetClaimDate if the Map isn't of Type "claim". +func (bb *Builder) SetClaimDate(t time.Time) *Builder { + if !bb.IsClaimType() { + // This is a little gross, using panic here, but I + // don't want all callers to check errors. This is + // really a programming error, not a runtime error + // that would arise from e.g. random user data. + panic("SetClaimDate called on non-claim *Builder; camliType=" + bb.Type()) + } + bb.m["claimDate"] = RFC3339FromTime(t) + return bb +} + +// SetModTime sets the "unixMtime" field. +func (bb *Builder) SetModTime(t time.Time) *Builder { + bb.m["unixMtime"] = RFC3339FromTime(t) + return bb +} + +// CapCreationTime caps the "unixCtime" field to be less or equal than "unixMtime" +func (bb *Builder) CapCreationTime() *Builder { + ctime, ok := bb.m["unixCtime"].(string) + if !ok { + return bb + } + mtime, ok := bb.m["unixMtime"].(string) + if ok && ctime > mtime { + bb.m["unixCtime"] = mtime + } + return bb +} + +// ModTime returns the "unixMtime" modtime field, if set. +func (bb *Builder) ModTime() (t time.Time, ok bool) { + s, ok := bb.m["unixMtime"].(string) + if !ok { + return + } + t, err := time.Parse(time.RFC3339, s) + if err != nil { + return + } + return t, true +} + +// PopulateDirectoryMap sets the type of *Builder to "directory" and sets +// the "entries" field to the provided staticSet blobref. +func (bb *Builder) PopulateDirectoryMap(staticSetRef blob.Ref) *Builder { + bb.m["camliType"] = "directory" + bb.m["entries"] = staticSetRef.String() + return bb +} + +// PartsSize returns the number of bytes represented by the "parts" field. +func (bb *Builder) PartsSize() int64 { + n := int64(0) + if parts, ok := bb.m["parts"].([]BytesPart); ok { + for _, part := range parts { + n += int64(part.Size) + } + } + return n +} + +func clone(i interface{}) interface{} { + switch t := i.(type) { + case map[string]interface{}: + m2 := make(map[string]interface{}) + for k, v := range t { + m2[k] = clone(v) + } + return m2 + case string, int, int64, float64, json.Number: + return t + case []interface{}: + s2 := make([]interface{}, len(t)) + for i, v := range t { + s2[i] = clone(v) + } + return s2 + } + panic(fmt.Sprintf("unsupported clone type %T", i)) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/dirreader.go b/vendor/github.com/camlistore/camlistore/pkg/schema/dirreader.go new file mode 100644 index 00000000..a6da856e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/schema/dirreader.go @@ -0,0 +1,164 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "encoding/json" + "errors" + "fmt" + "io" + + "camlistore.org/pkg/blob" +) + +// A DirReader reads the entries of a "directory" schema blob's +// referenced "static-set" blob. +type DirReader struct { + fetcher blob.Fetcher + ss *superset + + staticSet []blob.Ref + current int +} + +// NewDirReader creates a new directory reader and prepares to +// fetch the static-set entries +func NewDirReader(fetcher blob.Fetcher, dirBlobRef blob.Ref) (*DirReader, error) { + ss := new(superset) + err := ss.setFromBlobRef(fetcher, dirBlobRef) + if err != nil { + return nil, err + } + if ss.Type != "directory" { + return nil, fmt.Errorf("schema/dirreader: expected \"directory\" schema blob for %s, got %q", dirBlobRef, ss.Type) + } + dr, err := ss.NewDirReader(fetcher) + if err != nil { + return nil, fmt.Errorf("schema/dirreader: creating DirReader for %s: %v", dirBlobRef, err) + } + dr.current = 0 + return dr, nil +} + +func (b *Blob) NewDirReader(fetcher blob.Fetcher) (*DirReader, error) { + return b.ss.NewDirReader(fetcher) +} + +func (ss *superset) NewDirReader(fetcher blob.Fetcher) (*DirReader, error) { + if ss.Type != "directory" { + return nil, fmt.Errorf("Superset not of type \"directory\"") + } + return &DirReader{fetcher: fetcher, ss: ss}, nil +} + +func (ss *superset) setFromBlobRef(fetcher blob.Fetcher, blobRef blob.Ref) error { + if !blobRef.Valid() { + return errors.New("schema/dirreader: blobref invalid") + } + ss.BlobRef = blobRef + rc, _, err := fetcher.Fetch(blobRef) + if err != nil { + return fmt.Errorf("schema/dirreader: fetching schema blob %s: %v", blobRef, err) + } + defer rc.Close() + if err := json.NewDecoder(rc).Decode(ss); err != nil { + return fmt.Errorf("schema/dirreader: decoding schema blob %s: %v", blobRef, err) + } + return nil +} + +// StaticSet returns the whole of the static set members of that directory +func (dr *DirReader) StaticSet() ([]blob.Ref, error) { + if dr.staticSet != nil { + return dr.staticSet, nil + } + staticSetBlobref := dr.ss.Entries + if !staticSetBlobref.Valid() { + return nil, errors.New("schema/dirreader: Invalid blobref") + } + rsc, _, err := dr.fetcher.Fetch(staticSetBlobref) + if err != nil { + return nil, fmt.Errorf("schema/dirreader: fetching schema blob %s: %v", staticSetBlobref, err) + } + defer rsc.Close() + ss, err := parseSuperset(rsc) + if err != nil { + return nil, fmt.Errorf("schema/dirreader: decoding schema blob %s: %v", staticSetBlobref, err) + } + if ss.Type != "static-set" { + return nil, fmt.Errorf("schema/dirreader: expected \"static-set\" schema blob for %s, got %q", staticSetBlobref, ss.Type) + } + for _, member := range ss.Members { + if !member.Valid() { + return nil, fmt.Errorf("schema/dirreader: invalid (static-set member) blobref referred by \"static-set\" schema blob %v", staticSetBlobref) + } + dr.staticSet = append(dr.staticSet, member) + } + return dr.staticSet, nil +} + +// Readdir implements the Directory interface. +func (dr *DirReader) Readdir(n int) (entries []DirectoryEntry, err error) { + sts, err := dr.StaticSet() + if err != nil { + return nil, fmt.Errorf("schema/dirreader: can't get StaticSet: %v", err) + } + up := dr.current + n + if n <= 0 { + dr.current = 0 + up = len(sts) + } else { + if n > (len(sts) - dr.current) { + err = io.EOF + up = len(sts) + } + } + + // TODO(bradfitz): push down information to the fetcher + // (e.g. cachingfetcher -> remote client http) that we're + // going to load a bunch, so the HTTP client (if not using + // SPDY) can do discovery and see if the server supports a + // batch handler, then get them all in one round-trip, rather + // than attacking the server with hundreds of parallel TLS + // setups. + + type res struct { + ent DirectoryEntry + err error + } + var cs []chan res + + // Kick off all directory entry loads. + // TODO: bound this? + for _, entRef := range sts[dr.current:up] { + c := make(chan res, 1) + cs = append(cs, c) + go func(entRef blob.Ref) { + entry, err := NewDirectoryEntryFromBlobRef(dr.fetcher, entRef) + c <- res{entry, err} + }(entRef) + } + + for _, c := range cs { + res := <-c + if res.err != nil { + return nil, fmt.Errorf("schema/dirreader: can't create dirEntry: %v", res.err) + } + entries = append(entries, res.ent) + } + return entries, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/fileread_test.go b/vendor/github.com/camlistore/camlistore/pkg/schema/fileread_test.go new file mode 100644 index 00000000..32db4610 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/schema/fileread_test.go @@ -0,0 +1,453 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "testing" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/test" +) + +var testFetcher = &test.Fetcher{} + +var blobA = &test.Blob{"AAAAAaaaaa"} +var blobB = &test.Blob{"BBBBBbbbbb"} +var blobC = &test.Blob{"CCCCCccccc"} + +func init() { + testFetcher.AddBlob(blobA) + testFetcher.AddBlob(blobB) + testFetcher.AddBlob(blobC) +} + +type readTest struct { + parts []*BytesPart + skip uint64 + expected string +} + +func part(blob *test.Blob, offset, size uint64) *BytesPart { + return &BytesPart{BlobRef: blob.BlobRef(), Size: size, Offset: offset} +} + +// filePart returns a BytesPart that references a file JSON schema +// blob made of the provided content parts. +func filePart(cps []*BytesPart, skip uint64) *BytesPart { + m := newBytes() + fileSize := int64(0) + cpl := []BytesPart{} + for _, cp := range cps { + fileSize += int64(cp.Size) + cpl = append(cpl, *cp) + } + err := m.PopulateParts(fileSize, cpl) + if err != nil { + panic(err) + } + json, err := m.JSON() + if err != nil { + panic(err) + } + tb := &test.Blob{json} + testFetcher.AddBlob(tb) + return &BytesPart{BytesRef: tb.BlobRef(), Size: uint64(fileSize) - skip, Offset: skip} +} + +func all(blob *test.Blob) *BytesPart { + return part(blob, 0, uint64(blob.Size())) +} + +func zero(size uint64) *BytesPart { + return &BytesPart{Size: size} +} + +func parts(parts ...*BytesPart) []*BytesPart { + return parts +} + +func sizeSum(parts []*BytesPart) (s uint64) { + for _, p := range parts { + s += uint64(p.Size) + } + return +} + +var readTests = []readTest{ + {parts(all(blobA)), 0, "AAAAAaaaaa"}, + {parts(all(blobA)), 2, "AAAaaaaa"}, + {parts(part(blobA, 0, 5)), 0, "AAAAA"}, + {parts(part(blobA, 2, 8)), 0, "AAAaaaaa"}, + {parts(part(blobA, 2, 8)), 1, "AAaaaaa"}, + {parts(part(blobA, 4, 6)), 0, "Aaaaaa"}, + {parts(all(blobA), all(blobB)), 0, "AAAAAaaaaaBBBBBbbbbb"}, + {parts(all(blobA), all(blobB)), 1, "AAAAaaaaaBBBBBbbbbb"}, + {parts(all(blobA), all(blobB)), 10, "BBBBBbbbbb"}, + {parts(all(blobA), all(blobB)), 11, "BBBBbbbbb"}, + {parts(all(blobA), all(blobB)), 100, ""}, + {parts(all(blobA), all(blobB), all(blobC)), 0, "AAAAAaaaaaBBBBBbbbbbCCCCCccccc"}, + {parts(all(blobA), all(blobB), all(blobC)), 20, "CCCCCccccc"}, + {parts(all(blobA), all(blobB), all(blobC)), 22, "CCCccccc"}, + {parts(part(blobA, 5, 5), part(blobB, 0, 5), part(blobC, 4, 2)), 1, "aaaaBBBBBCc"}, + {parts(all(blobA), zero(2), all(blobB)), 5, "aaaaa\x00\x00BBBBBbbbbb"}, + {parts(all(blobB), part(blobC, 4, 2)), 0, "BBBBBbbbbbCc"}, + {parts( + all(blobA), + filePart(parts(all(blobB), part(blobC, 4, 2)), 0), + part(blobA, 5, 5)), + 1, + "AAAAaaaaa" + "BBBBBbbbbb" + "Cc" + "aaaaa"}, + {parts( + all(blobA), + filePart(parts(all(blobB), part(blobC, 4, 2)), 4), + part(blobA, 5, 5)), + 1, + "AAAAaaaaa" + "Bbbbbb" + "Cc" + "aaaaa"}, +} + +func skipBytes(fr *FileReader, skipBytes uint64) uint64 { + oldOff, err := fr.Seek(0, os.SEEK_CUR) + if err != nil { + panic("Failed to seek") + } + remain := fr.size - oldOff + if int64(skipBytes) > remain { + skipBytes = uint64(remain) + } + newOff, err := fr.Seek(int64(skipBytes), os.SEEK_CUR) + if err != nil { + panic("Failed to seek") + } + skipped := newOff - oldOff + if skipped < 0 { + panic("") + } + return uint64(skipped) +} + +func TestReader(t *testing.T) { + for idx, rt := range readTests { + ss := new(superset) + ss.Type = "file" + ss.Version = 1 + ss.Parts = rt.parts + fr, err := ss.NewFileReader(testFetcher) + if err != nil { + t.Errorf("read error on test %d: %v", idx, err) + continue + } + skipBytes(fr, rt.skip) + all, err := ioutil.ReadAll(fr) + if err != nil { + t.Errorf("read error on test %d: %v", idx, err) + continue + } + if g, e := string(all), rt.expected; e != g { + t.Errorf("test %d\nwant %q\n got %q", idx, e, g) + } + } +} + +func TestReaderSeekStress(t *testing.T) { + const fileSize = 750<<10 + 123 + bigFile := make([]byte, fileSize) + rnd := rand.New(rand.NewSource(1)) + for i := range bigFile { + bigFile[i] = byte(rnd.Intn(256)) + } + + sto := new(test.Fetcher) // in-memory blob storage + fileMap := NewFileMap("testfile") + fileref, err := WriteFileMap(sto, fileMap, bytes.NewReader(bigFile)) + if err != nil { + t.Fatalf("WriteFileMap: %v", err) + } + c, ok := sto.BlobContents(fileref) + if !ok { + t.Fatal("expected file contents to be present") + } + const debug = false + if debug { + t.Logf("Fileref %s: %s", fileref, c) + } + + // Test a bunch of reads at different offsets, making sure we always + // get the same results. + skipBy := int64(999) + if testing.Short() { + skipBy += 10 << 10 + } + for off := int64(0); off < fileSize; off += skipBy { + fr, err := NewFileReader(sto, fileref) + if err != nil { + t.Fatal(err) + } + + skipBytes(fr, uint64(off)) + got, err := ioutil.ReadAll(fr) + if err != nil { + t.Fatal(err) + } + want := bigFile[off:] + if !bytes.Equal(got, want) { + t.Errorf("Incorrect read at offset %d:\n got: %s\n want: %s", off, summary(got), summary(want)) + off := 0 + for len(got) > 0 && len(want) > 0 && got[0] == want[0] { + off++ + got = got[1:] + want = want[1:] + } + t.Errorf(" differences start at offset %d:\n got: %s\n want: %s\n", off, summary(got), summary(want)) + break + } + fr.Close() + } +} + +/* + +1KB ReadAt calls before: +fileread_test.go:253: Blob Size: 4194304 raw, 4201523 with meta (1.00172x) +fileread_test.go:283: Blobs fetched: 4160 (63.03x) +fileread_test.go:284: Bytes fetched: 361174780 (85.96x) + +2KB ReadAt calls before: +fileread_test.go:253: Blob Size: 4194304 raw, 4201523 with meta (1.00172x) +fileread_test.go:283: Blobs fetched: 2112 (32.00x) +fileread_test.go:284: Bytes fetched: 182535389 (43.45x) + +After fix: +fileread_test.go:253: Blob Size: 4194304 raw, 4201523 with meta (1.00172x) +fileread_test.go:283: Blobs fetched: 66 (1.00x) +fileread_test.go:284: Bytes fetched: 4201523 (1.00x) +*/ +func TestReaderEfficiency(t *testing.T) { + const fileSize = 4 << 20 + bigFile := make([]byte, fileSize) + rnd := rand.New(rand.NewSource(1)) + for i := range bigFile { + bigFile[i] = byte(rnd.Intn(256)) + } + + sto := new(test.Fetcher) // in-memory blob storage + fileMap := NewFileMap("testfile") + fileref, err := WriteFileMap(sto, fileMap, bytes.NewReader(bigFile)) + if err != nil { + t.Fatalf("WriteFileMap: %v", err) + } + + fr, err := NewFileReader(sto, fileref) + if err != nil { + t.Fatal(err) + } + + numBlobs := sto.NumBlobs() + t.Logf("Num blobs = %d", numBlobs) + sumSize := sto.SumBlobSize() + t.Logf("Blob Size: %d raw, %d with meta (%.05fx)", fileSize, sumSize, float64(sumSize)/float64(fileSize)) + + const readSize = 2 << 10 + buf := make([]byte, readSize) + for off := int64(0); off < fileSize; off += readSize { + n, err := fr.ReadAt(buf, off) + if err != nil { + t.Fatalf("ReadAt at offset %d: %v", off, err) + } + if n != readSize { + t.Fatalf("Read %d bytes at offset %d; want %d", n, off, readSize) + } + got, want := buf, bigFile[off:off+readSize] + if !bytes.Equal(buf, want) { + t.Errorf("Incorrect read at offset %d:\n got: %s\n want: %s", off, summary(got), summary(want)) + off := 0 + for len(got) > 0 && len(want) > 0 && got[0] == want[0] { + off++ + got = got[1:] + want = want[1:] + } + t.Errorf(" differences start at offset %d:\n got: %s\n want: %s\n", off, summary(got), summary(want)) + break + } + } + fr.Close() + blobsFetched, bytesFetched := sto.Stats() + if blobsFetched != int64(numBlobs) { + t.Errorf("Fetched %d blobs; want %d", blobsFetched, numBlobs) + } + if bytesFetched != sumSize { + t.Errorf("Fetched %d bytes; want %d", bytesFetched, sumSize) + } +} + +func TestReaderForeachChunk(t *testing.T) { + fileSize := 4 << 20 + if testing.Short() { + fileSize = 1 << 20 + } + bigFile := make([]byte, fileSize) + rnd := rand.New(rand.NewSource(1)) + for i := range bigFile { + bigFile[i] = byte(rnd.Intn(256)) + } + sto := new(test.Fetcher) // in-memory blob storage + fileMap := NewFileMap("testfile") + fileref, err := WriteFileMap(sto, fileMap, bytes.NewReader(bigFile)) + if err != nil { + t.Fatalf("WriteFileMap: %v", err) + } + + fr, err := NewFileReader(sto, fileref) + if err != nil { + t.Fatal(err) + } + + var back bytes.Buffer + var totSize uint64 + err = fr.ForeachChunk(func(sref []blob.Ref, p BytesPart) error { + if len(sref) < 1 { + t.Fatal("expected at least one schemaPath blob") + } + for i, br := range sref { + if !br.Valid() { + t.Fatalf("invalid schema blob in path index %d", i) + } + } + if p.BytesRef.Valid() { + t.Fatal("should never see a valid BytesRef") + } + if !p.BlobRef.Valid() { + t.Fatal("saw part with invalid blobref") + } + rc, size, err := sto.Fetch(p.BlobRef) + if err != nil { + return fmt.Errorf("Error fetching blobref of chunk %+v: %v", p, err) + } + defer rc.Close() + totSize += p.Size + if uint64(size) != p.Size { + return fmt.Errorf("fetched size %d doesn't match expected for chunk %+v", size, p) + } + n, err := io.Copy(&back, rc) + if err != nil { + return err + } + if n != int64(size) { + return fmt.Errorf("Copied unexpected %d bytes of chunk %+v", n, p) + } + return nil + }) + if err != nil { + t.Fatalf("ForeachChunk = %v", err) + } + if back.Len() != fileSize { + t.Fatalf("Read file is %d bytes; want %d", back.Len(), fileSize) + } + if totSize != uint64(fileSize) { + t.Errorf("sum of parts = %d; want %d", totSize, fileSize) + } + if !bytes.Equal(back.Bytes(), bigFile) { + t.Errorf("file read mismatch") + } +} + +func TestForeachChunkAllSchemaBlobs(t *testing.T) { + sto := new(test.Fetcher) // in-memory blob storage + foo := &test.Blob{"foo"} + bar := &test.Blob{"bar"} + sto.AddBlob(foo) + sto.AddBlob(bar) + + // Make a "bytes" schema blob referencing the "foo" and "bar" chunks. + // Verify it works. + bytesBlob := &test.Blob{`{"camliVersion": 1, +"camliType": "bytes", +"parts": [ + {"blobRef": "` + foo.BlobRef().String() + `", "size": 3}, + {"blobRef": "` + bar.BlobRef().String() + `", "size": 3} +]}`} + sto.AddBlob(bytesBlob) + + var fr *FileReader + mustRead := func(name string, br blob.Ref, want string) { + var err error + fr, err = NewFileReader(sto, br) + if err != nil { + t.Fatalf("%s: %v", name, err) + } + all, err := ioutil.ReadAll(fr) + if err != nil { + t.Fatalf("%s: %v", name, err) + } + if string(all) != want { + t.Errorf("%s: read contents %q; want %q", name, all, want) + } + } + mustRead("bytesBlob", bytesBlob.BlobRef(), "foobar") + + // Now make another bytes schema blob embedding the previous one. + bytesBlob2 := &test.Blob{`{"camliVersion": 1, +"camliType": "bytes", +"parts": [ + {"bytesRef": "` + bytesBlob.BlobRef().String() + `", "size": 6} +]}`} + sto.AddBlob(bytesBlob2) + mustRead("bytesBlob2", bytesBlob2.BlobRef(), "foobar") + + sawSchema := map[blob.Ref]bool{} + sawData := map[blob.Ref]bool{} + if err := fr.ForeachChunk(func(path []blob.Ref, p BytesPart) error { + for _, sref := range path { + sawSchema[sref] = true + } + sawData[p.BlobRef] = true + return nil + }); err != nil { + t.Fatal(err) + } + want := []struct { + name string + tb *test.Blob + m map[blob.Ref]bool + }{ + {"bytesBlob", bytesBlob, sawSchema}, + {"bytesBlob2", bytesBlob2, sawSchema}, + {"foo", foo, sawData}, + {"bar", bar, sawData}, + } + for _, tt := range want { + if b := tt.tb.BlobRef(); !tt.m[b] { + t.Errorf("didn't see %s (%s)", tt.name, b) + } + } +} + +type summary []byte + +func (s summary) String() string { + const prefix = 10 + plen := prefix + if len(s) < plen { + plen = len(s) + } + return fmt.Sprintf("%d bytes, starting with %q", len(s), []byte(s[:plen])) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/filereader.go b/vendor/github.com/camlistore/camlistore/pkg/schema/filereader.go new file mode 100644 index 00000000..cb805de4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/schema/filereader.go @@ -0,0 +1,395 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "sync" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/env" + "camlistore.org/pkg/singleflight" + "camlistore.org/pkg/syncutil" + "camlistore.org/pkg/types" +) + +const closedIndex = -1 + +var errClosed = errors.New("filereader is closed") + +// A FileReader reads the bytes of "file" and "bytes" schema blobrefs. +type FileReader struct { + // Immutable stuff: + *io.SectionReader // provides Read, Seek, and Size. + parent *FileReader // or nil. for sub-region readers to find the top. + rootOff int64 // this FileReader's offset from the root + fetcher blob.Fetcher + ss *superset + size int64 // total number of bytes + + sfg singleflight.Group // for loading blobrefs for ssm + + blobmu sync.Mutex // guards lastBlob + lastBlob *blob.Blob // most recently fetched blob; cuts dup reads up to 85x + + ssmmu sync.Mutex // guards ssm + ssm map[blob.Ref]*superset // blobref -> superset +} + +var _ interface { + io.Seeker + io.ReaderAt + io.Reader + io.Closer + Size() int64 +} = (*FileReader)(nil) + +// NewFileReader returns a new FileReader reading the contents of fileBlobRef, +// fetching blobs from fetcher. The fileBlobRef must be of a "bytes" or "file" +// schema blob. +// +// The caller should call Close on the FileReader when done reading. +func NewFileReader(fetcher blob.Fetcher, fileBlobRef blob.Ref) (*FileReader, error) { + // TODO(bradfitz): rename this into bytes reader? but for now it's still + // named FileReader, but can also read a "bytes" schema. + if !fileBlobRef.Valid() { + return nil, errors.New("schema/filereader: NewFileReader blobref invalid") + } + rc, _, err := fetcher.Fetch(fileBlobRef) + if err != nil { + return nil, fmt.Errorf("schema/filereader: fetching file schema blob: %v", err) + } + defer rc.Close() + ss, err := parseSuperset(rc) + if err != nil { + return nil, fmt.Errorf("schema/filereader: decoding file schema blob: %v", err) + } + ss.BlobRef = fileBlobRef + if ss.Type != "file" && ss.Type != "bytes" { + return nil, fmt.Errorf("schema/filereader: expected \"file\" or \"bytes\" schema blob, got %q", ss.Type) + } + fr, err := ss.NewFileReader(fetcher) + if err != nil { + return nil, fmt.Errorf("schema/filereader: creating FileReader for %s: %v", fileBlobRef, err) + } + return fr, nil +} + +func (b *Blob) NewFileReader(fetcher blob.Fetcher) (*FileReader, error) { + return b.ss.NewFileReader(fetcher) +} + +// NewFileReader returns a new FileReader, reading bytes and blobs +// from the provided fetcher. +// +// NewFileReader does no fetch operation on the fetcher itself. The +// fetcher is only used in subsequent read operations. +// +// An error is only returned if the type of the superset is not either +// "file" or "bytes". +func (ss *superset) NewFileReader(fetcher blob.Fetcher) (*FileReader, error) { + if ss.Type != "file" && ss.Type != "bytes" { + return nil, fmt.Errorf("schema/filereader: Superset not of type \"file\" or \"bytes\"") + } + size := int64(ss.SumPartsSize()) + fr := &FileReader{ + fetcher: fetcher, + ss: ss, + size: size, + ssm: make(map[blob.Ref]*superset), + } + fr.SectionReader = io.NewSectionReader(fr, 0, size) + return fr, nil +} + +// LoadAllChunks starts a process of loading all chunks of this file +// as quickly as possible. The contents are immediately discarded, so +// it is assumed that the fetcher is a caching fetcher. +func (fr *FileReader) LoadAllChunks() { + // TODO: ask the underlying blobserver to do this if it would + // prefer. Some blobservers (like blobpacked) might not want + // to do this at all. + go fr.loadAllChunksSync() +} + +func (fr *FileReader) loadAllChunksSync() { + gate := syncutil.NewGate(20) // num readahead chunk loads at a time + fr.ForeachChunk(func(_ []blob.Ref, p BytesPart) error { + if !p.BlobRef.Valid() { + return nil + } + gate.Start() + go func(br blob.Ref) { + defer gate.Done() + rc, _, err := fr.fetcher.Fetch(br) + if err == nil { + defer rc.Close() + var b [1]byte + rc.Read(b[:]) // fault in the blob + } + }(p.BlobRef) + return nil + }) +} + +// UnixMtime returns the file schema's UnixMtime field, or the zero value. +func (fr *FileReader) UnixMtime() time.Time { + t, err := time.Parse(time.RFC3339, fr.ss.UnixMtime) + if err != nil { + return time.Time{} + } + return t +} + +// FileName returns the file schema's filename, if any. +func (fr *FileReader) FileName() string { return fr.ss.FileNameString() } + +func (fr *FileReader) ModTime() time.Time { return fr.ss.ModTime() } + +func (fr *FileReader) SchemaBlobRef() blob.Ref { return fr.ss.BlobRef } + +// Close currently does nothing. +func (fr *FileReader) Close() error { return nil } + +func (fr *FileReader) ReadAt(p []byte, offset int64) (n int, err error) { + if offset < 0 { + return 0, errors.New("schema/filereader: negative offset") + } + if offset >= fr.Size() { + return 0, io.EOF + } + want := len(p) + for len(p) > 0 && err == nil { + rc, err := fr.readerForOffset(offset) + if err != nil { + return n, err + } + var n1 int + n1, err = io.ReadFull(rc, p) + rc.Close() + if err == io.EOF || err == io.ErrUnexpectedEOF { + err = nil + } + if n1 == 0 { + break + } + p = p[n1:] + offset += int64(n1) + n += n1 + } + if n < want && err == nil { + err = io.ErrUnexpectedEOF + } + return n, err +} + +// ForeachChunk calls fn for each chunk of fr, in order. +// +// The schemaPath argument will be the path from the "file" or "bytes" +// schema blob down to possibly other "bytes" schema blobs, the final +// one of which references the given BytesPart. The BytesPart will be +// the actual chunk. The fn function will not be called with +// BytesParts referencing a "BytesRef"; those are followed recursively +// instead. The fn function must not retain or mutate schemaPath. +// +// If fn returns an error, iteration stops and that error is returned +// from ForeachChunk. Other errors may be returned from ForeachChunk +// if schema blob fetches fail. +func (fr *FileReader) ForeachChunk(fn func(schemaPath []blob.Ref, p BytesPart) error) error { + return fr.foreachChunk(fn, nil) +} + +func (fr *FileReader) foreachChunk(fn func([]blob.Ref, BytesPart) error, path []blob.Ref) error { + path = append(path, fr.ss.BlobRef) + for _, bp := range fr.ss.Parts { + if bp.BytesRef.Valid() && bp.BlobRef.Valid() { + return fmt.Errorf("part in %v illegally contained both a blobRef and bytesRef", fr.ss.BlobRef) + } + if bp.BytesRef.Valid() { + ss, err := fr.getSuperset(bp.BytesRef) + if err != nil { + return err + } + subfr, err := ss.NewFileReader(fr.fetcher) + if err != nil { + return err + } + subfr.parent = fr + if err := subfr.foreachChunk(fn, path); err != nil { + return err + } + } else { + if err := fn(path, *bp); err != nil { + return err + } + } + } + return nil +} + +func (fr *FileReader) rootReader() *FileReader { + if fr.parent != nil { + return fr.parent.rootReader() + } + return fr +} + +func (fr *FileReader) getBlob(br blob.Ref) (*blob.Blob, error) { + if root := fr.rootReader(); root != fr { + return root.getBlob(br) + } + fr.blobmu.Lock() + last := fr.lastBlob + fr.blobmu.Unlock() + if last != nil && last.Ref() == br { + return last, nil + } + blob, err := blob.FromFetcher(fr.fetcher, br) + if err != nil { + return nil, err + } + + fr.blobmu.Lock() + fr.lastBlob = blob + fr.blobmu.Unlock() + return blob, nil +} + +func (fr *FileReader) getSuperset(br blob.Ref) (*superset, error) { + if root := fr.rootReader(); root != fr { + return root.getSuperset(br) + } + brStr := br.String() + ssi, err := fr.sfg.Do(brStr, func() (interface{}, error) { + fr.ssmmu.Lock() + ss, ok := fr.ssm[br] + fr.ssmmu.Unlock() + if ok { + return ss, nil + } + rc, _, err := fr.fetcher.Fetch(br) + if err != nil { + return nil, fmt.Errorf("schema/filereader: fetching file schema blob: %v", err) + } + defer rc.Close() + ss, err = parseSuperset(rc) + if err != nil { + return nil, err + } + ss.BlobRef = br + fr.ssmmu.Lock() + defer fr.ssmmu.Unlock() + fr.ssm[br] = ss + return ss, nil + }) + if err != nil { + return nil, err + } + return ssi.(*superset), nil +} + +var debug = env.IsDebug() + +// readerForOffset returns a ReadCloser that reads some number of bytes and then EOF +// from the provided offset. Seeing EOF doesn't mean the end of the whole file; just the +// chunk at that offset. The caller must close the ReadCloser when done reading. +func (fr *FileReader) readerForOffset(off int64) (io.ReadCloser, error) { + if debug { + log.Printf("(%p) readerForOffset %d + %d = %d", fr, fr.rootOff, off, fr.rootOff+off) + } + if off < 0 { + panic("negative offset") + } + if off >= fr.size { + return types.EmptyBody, nil + } + offRemain := off + var skipped int64 + parts := fr.ss.Parts + for len(parts) > 0 && parts[0].Size <= uint64(offRemain) { + offRemain -= int64(parts[0].Size) + skipped += int64(parts[0].Size) + parts = parts[1:] + } + if len(parts) == 0 { + return types.EmptyBody, nil + } + p0 := parts[0] + var rsc types.ReadSeekCloser + var err error + switch { + case p0.BlobRef.Valid() && p0.BytesRef.Valid(): + return nil, fmt.Errorf("part illegally contained both a blobRef and bytesRef") + case !p0.BlobRef.Valid() && !p0.BytesRef.Valid(): + return ioutil.NopCloser( + io.LimitReader(zeroReader{}, + int64(p0.Size-uint64(offRemain)))), nil + case p0.BlobRef.Valid(): + blob, err := fr.getBlob(p0.BlobRef) + if err != nil { + return nil, err + } + rsc = blob.Open() + case p0.BytesRef.Valid(): + var ss *superset + ss, err = fr.getSuperset(p0.BytesRef) + if err != nil { + return nil, err + } + rsc, err = ss.NewFileReader(fr.fetcher) + if err == nil { + subFR := rsc.(*FileReader) + subFR.parent = fr.rootReader() + subFR.rootOff = fr.rootOff + skipped + } + } + if err != nil { + return nil, err + } + offRemain += int64(p0.Offset) + if offRemain > 0 { + newPos, err := rsc.Seek(offRemain, os.SEEK_SET) + if err != nil { + return nil, err + } + if newPos != offRemain { + panic("Seek didn't work") + } + } + return struct { + io.Reader + io.Closer + }{ + io.LimitReader(rsc, int64(p0.Size)), + rsc, + }, nil +} + +type zeroReader struct{} + +func (zeroReader) Read(p []byte) (n int, err error) { + for i := range p { + p[i] = 0 + } + return len(p), nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/filewriter.go b/vendor/github.com/camlistore/camlistore/pkg/schema/filewriter.go new file mode 100644 index 00000000..f0fea393 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/schema/filewriter.go @@ -0,0 +1,469 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "strings" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/rollsum" + "camlistore.org/pkg/syncutil" +) + +const ( + // maxBlobSize is the largest blob we ever make when cutting up + // a file. + maxBlobSize = 1 << 20 + + // firstChunkSize is the ideal size of the first chunk of a + // file. It's kept smaller for the file(1) command, which + // likes to read 96 kB on Linux and 256 kB on OS X. Related + // are tools which extract the EXIF metadata from JPEGs, + // ID3 from mp3s, etc. Nautilus, OS X Finder, etc. + // The first chunk may be larger than this if cutting the file + // here would create a small subsequent chunk (e.g. a file one + // byte larger than firstChunkSize) + firstChunkSize = 256 << 10 + + // bufioReaderSize is an explicit size for our bufio.Reader, + // so we don't rely on NewReader's implicit size. + // We care about the buffer size because it affects how far + // in advance we can detect EOF from an io.Reader that doesn't + // know its size. Detecting an EOF bufioReaderSize bytes early + // means we can plan for the final chunk. + bufioReaderSize = 32 << 10 + + // tooSmallThreshold is the threshold at which rolling checksum + // boundaries are ignored if the current chunk being built is + // smaller than this. + tooSmallThreshold = 64 << 10 +) + +// WriteFileFromReaderWithModTime creates and uploads a "file" JSON schema +// composed of chunks of r, also uploading the chunks. The returned +// BlobRef is of the JSON file schema blob. +// Both filename and modTime are optional. +func WriteFileFromReaderWithModTime(bs blobserver.StatReceiver, filename string, modTime time.Time, r io.Reader) (blob.Ref, error) { + if strings.Contains(filename, "/") { + return blob.Ref{}, fmt.Errorf("schema.WriteFileFromReader: filename %q shouldn't contain a slash", filename) + } + + m := NewFileMap(filename) + if !modTime.IsZero() { + m.SetModTime(modTime) + } + return WriteFileMap(bs, m, r) +} + +// WriteFileFromReader creates and uploads a "file" JSON schema +// composed of chunks of r, also uploading the chunks. The returned +// BlobRef is of the JSON file schema blob. +// The filename is optional. +func WriteFileFromReader(bs blobserver.StatReceiver, filename string, r io.Reader) (blob.Ref, error) { + return WriteFileFromReaderWithModTime(bs, filename, time.Time{}, r) +} + +// WriteFileMap uploads chunks of r to bs while populating file and +// finally uploading file's Blob. The returned blobref is of file's +// JSON blob. +func WriteFileMap(bs blobserver.StatReceiver, file *Builder, r io.Reader) (blob.Ref, error) { + return writeFileMapRolling(bs, file, r) +} + +// This is the simple 1MB chunk version. The rolling checksum version is below. +func writeFileMapOld(bs blobserver.StatReceiver, file *Builder, r io.Reader) (blob.Ref, error) { + parts, size := []BytesPart{}, int64(0) + + var buf bytes.Buffer + for { + buf.Reset() + n, err := io.Copy(&buf, io.LimitReader(r, maxBlobSize)) + if err != nil { + return blob.Ref{}, err + } + if n == 0 { + break + } + + hash := blob.NewHash() + io.Copy(hash, bytes.NewReader(buf.Bytes())) + br := blob.RefFromHash(hash) + hasBlob, err := serverHasBlob(bs, br) + if err != nil { + return blob.Ref{}, err + } + if !hasBlob { + sb, err := bs.ReceiveBlob(br, &buf) + if err != nil { + return blob.Ref{}, err + } + if want := (blob.SizedRef{br, uint32(n)}); sb != want { + return blob.Ref{}, fmt.Errorf("schema/filewriter: wrote %s, expect %s", sb, want) + } + } + + size += n + parts = append(parts, BytesPart{ + BlobRef: br, + Size: uint64(n), + Offset: 0, // into BlobRef to read from (not of dest) + }) + } + + err := file.PopulateParts(size, parts) + if err != nil { + return blob.Ref{}, err + } + + json := file.Blob().JSON() + if err != nil { + return blob.Ref{}, err + } + br := blob.SHA1FromString(json) + sb, err := bs.ReceiveBlob(br, strings.NewReader(json)) + if err != nil { + return blob.Ref{}, err + } + if expect := (blob.SizedRef{br, uint32(len(json))}); expect != sb { + return blob.Ref{}, fmt.Errorf("schema/filewriter: wrote %s bytes, got %s ack'd", expect, sb) + } + + return br, nil +} + +func serverHasBlob(bs blobserver.BlobStatter, br blob.Ref) (have bool, err error) { + _, err = blobserver.StatBlob(bs, br) + if err == nil { + have = true + } else if err == os.ErrNotExist { + err = nil + } + return +} + +type span struct { + from, to int64 + bits int + br blob.Ref + children []span +} + +func (s *span) isSingleBlob() bool { + return len(s.children) == 0 +} + +func (s *span) size() int64 { + size := s.to - s.from + for _, cs := range s.children { + size += cs.size() + } + return size +} + +// noteEOFReader keeps track of when it's seen EOF, but otherwise +// delegates entirely to r. +type noteEOFReader struct { + r io.Reader + sawEOF bool +} + +func (r *noteEOFReader) Read(p []byte) (n int, err error) { + n, err = r.r.Read(p) + if err == io.EOF { + r.sawEOF = true + } + return +} + +func uploadString(bs blobserver.StatReceiver, br blob.Ref, s string) (blob.Ref, error) { + if !br.Valid() { + panic("invalid blobref") + } + hasIt, err := serverHasBlob(bs, br) + if err != nil { + return blob.Ref{}, err + } + if hasIt { + return br, nil + } + _, err = blobserver.ReceiveNoHash(bs, br, strings.NewReader(s)) + if err != nil { + return blob.Ref{}, err + } + return br, nil +} + +// uploadBytes populates bb (a builder of either type "bytes" or +// "file", which is a superset of "bytes"), sets it to the provided +// size, and populates with provided spans. The bytes or file schema +// blob is uploaded and its blobref is returned. +func uploadBytes(bs blobserver.StatReceiver, bb *Builder, size int64, s []span) *uploadBytesFuture { + future := newUploadBytesFuture() + parts := []BytesPart{} + addBytesParts(bs, &parts, s, future) + + if err := bb.PopulateParts(size, parts); err != nil { + future.errc <- err + return future + } + + // Hack until camlistore.org/issue/102 is fixed. If we happen to upload + // the "file" schema before any of its parts arrive, then the indexer + // can get confused. So wait on the parts before, and then upload + // the "file" blob afterwards. + if bb.Type() == "file" { + future.errc <- nil + _, err := future.Get() // may not be nil, if children parts failed + future = newUploadBytesFuture() + if err != nil { + future.errc <- err + return future + } + } + + json := bb.Blob().JSON() + br := blob.SHA1FromString(json) + future.br = br + go func() { + _, err := uploadString(bs, br, json) + future.errc <- err + }() + return future +} + +func newUploadBytesFuture() *uploadBytesFuture { + return &uploadBytesFuture{ + errc: make(chan error, 1), + } +} + +// An uploadBytesFuture is an eager result of a still-in-progress uploadBytes call. +// Call Get to wait and get its final result. +type uploadBytesFuture struct { + br blob.Ref + errc chan error + children []*uploadBytesFuture +} + +// BlobRef returns the optimistic blobref of this uploadBytes call without blocking. +func (f *uploadBytesFuture) BlobRef() blob.Ref { + return f.br +} + +// Get blocks for all children and returns any final error. +func (f *uploadBytesFuture) Get() (blob.Ref, error) { + for _, f := range f.children { + if _, err := f.Get(); err != nil { + return blob.Ref{}, err + } + } + return f.br, <-f.errc +} + +// addBytesParts uploads the provided spans to bs, appending elements to *dst. +func addBytesParts(bs blobserver.StatReceiver, dst *[]BytesPart, spans []span, parent *uploadBytesFuture) { + for _, sp := range spans { + if len(sp.children) == 1 && sp.children[0].isSingleBlob() { + // Remove an occasional useless indirection of + // what would become a bytes schema blob + // pointing to a single blobref. Just promote + // the blobref child instead. + child := sp.children[0] + *dst = append(*dst, BytesPart{ + BlobRef: child.br, + Size: uint64(child.size()), + }) + sp.children = nil + } + if len(sp.children) > 0 { + childrenSize := int64(0) + for _, cs := range sp.children { + childrenSize += cs.size() + } + future := uploadBytes(bs, newBytes(), childrenSize, sp.children) + parent.children = append(parent.children, future) + *dst = append(*dst, BytesPart{ + BytesRef: future.BlobRef(), + Size: uint64(childrenSize), + }) + } + if sp.from == sp.to { + panic("Shouldn't happen. " + fmt.Sprintf("weird span with same from & to: %#v", sp)) + } + *dst = append(*dst, BytesPart{ + BlobRef: sp.br, + Size: uint64(sp.to - sp.from), + }) + } +} + +// writeFileMap uploads chunks of r to bs while populating fileMap and +// finally uploading fileMap. The returned blobref is of fileMap's +// JSON blob. It uses rolling checksum for the chunks sizes. +func writeFileMapRolling(bs blobserver.StatReceiver, file *Builder, r io.Reader) (blob.Ref, error) { + n, spans, err := writeFileChunks(bs, file, r) + if err != nil { + return blob.Ref{}, err + } + // The top-level content parts + return uploadBytes(bs, file, n, spans).Get() +} + +// WriteFileChunks uploads chunks of r to bs while populating file. +// It does not upload file. +func WriteFileChunks(bs blobserver.StatReceiver, file *Builder, r io.Reader) error { + size, spans, err := writeFileChunks(bs, file, r) + if err != nil { + return err + } + parts := []BytesPart{} + future := newUploadBytesFuture() + addBytesParts(bs, &parts, spans, future) + future.errc <- nil // Get will still block on addBytesParts' children + if _, err := future.Get(); err != nil { + return err + } + return file.PopulateParts(size, parts) +} + +func writeFileChunks(bs blobserver.StatReceiver, file *Builder, r io.Reader) (n int64, spans []span, outerr error) { + src := ¬eEOFReader{r: r} + bufr := bufio.NewReaderSize(src, bufioReaderSize) + spans = []span{} // the tree of spans, cut on interesting rollsum boundaries + rs := rollsum.New() + var last int64 + var buf bytes.Buffer + blobSize := 0 // of the next blob being built, should be same as buf.Len() + + const chunksInFlight = 32 // at ~64 KB chunks, this is ~2MB memory per file + gatec := syncutil.NewGate(chunksInFlight) + firsterrc := make(chan error, 1) + + // uploadLastSpan runs in the same goroutine as the loop below and is responsible for + // starting uploading the contents of the buf. It returns false if there's been + // an error and the loop below should be stopped. + uploadLastSpan := func() bool { + chunk := buf.String() + buf.Reset() + br := blob.SHA1FromString(chunk) + spans[len(spans)-1].br = br + select { + case outerr = <-firsterrc: + return false + default: + // No error seen so far, continue. + } + gatec.Start() + go func() { + defer gatec.Done() + if _, err := uploadString(bs, br, chunk); err != nil { + select { + case firsterrc <- err: + default: + } + } + }() + return true + } + + for { + c, err := bufr.ReadByte() + if err == io.EOF { + if n != last { + spans = append(spans, span{from: last, to: n}) + if !uploadLastSpan() { + return + } + } + break + } + if err != nil { + return 0, nil, err + } + + buf.WriteByte(c) + n++ + blobSize++ + rs.Roll(c) + + var bits int + onRollSplit := rs.OnSplit() + switch { + case blobSize == maxBlobSize: + bits = 20 // arbitrary node weight; 1<<20 == 1MB + case src.sawEOF: + // Don't split. End is coming soon enough. + continue + case onRollSplit && n > firstChunkSize && blobSize > tooSmallThreshold: + bits = rs.Bits() + case n == firstChunkSize: + bits = 18 // 1 << 18 == 256KB + default: + // Don't split. + continue + } + blobSize = 0 + + // Take any spans from the end of the spans slice that + // have a smaller 'bits' score and make them children + // of this node. + var children []span + childrenFrom := len(spans) + for childrenFrom > 0 && spans[childrenFrom-1].bits < bits { + childrenFrom-- + } + if nCopy := len(spans) - childrenFrom; nCopy > 0 { + children = make([]span, nCopy) + copy(children, spans[childrenFrom:]) + spans = spans[:childrenFrom] + } + + spans = append(spans, span{from: last, to: n, bits: bits, children: children}) + last = n + if !uploadLastSpan() { + return + } + } + + // Loop was already hit earlier. + if outerr != nil { + return 0, nil, outerr + } + + // Wait for all uploads to finish, one way or another, and then + // see if any generated errors. + // Once this loop is done, we own all the tokens in gatec, so nobody + // else can have one outstanding. + for i := 0; i < chunksInFlight; i++ { + gatec.Start() + } + select { + case err := <-firsterrc: + return 0, nil, err + default: + } + + return n, spans, nil + +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/filewriter_test.go b/vendor/github.com/camlistore/camlistore/pkg/schema/filewriter_test.go new file mode 100644 index 00000000..dda6893d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/schema/filewriter_test.go @@ -0,0 +1,169 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/rand" + "sort" + "sync" + "testing" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver/stats" + "camlistore.org/pkg/test" +) + +func TestWriteFileMap(t *testing.T) { + m := NewFileMap("test-file") + r := &randReader{seed: 123, length: 5 << 20} + sr := new(stats.Receiver) + var buf bytes.Buffer + br, err := WriteFileMap(sr, m, io.TeeReader(r, &buf)) + if err != nil { + t.Fatal(err) + } + t.Logf("Got root file %v; %d blobs, %d bytes", br, sr.NumBlobs(), sr.SumBlobSize()) + sizes := sr.Sizes() + t.Logf("Sizes are %v", sizes) + + // TODO(bradfitz): these are fragile tests and mostly just a placeholder. + // Real tests to add: + // -- no "bytes" schema with a single "blobref" + // -- more seeds (including some that tickle the above) + // -- file reader reading back the root gets the same sha1 content back + // (will require keeping the full data in our stats receiver, not + // just the size) + // -- well-balanced tree + // -- nothing too big, nothing too small. + if g, w := br.String(), "sha1-95a5d2686b239e36dff3aeb5a45ed18153121835"; g != w { + t.Errorf("root blobref = %v; want %v", g, w) + } + if g, w := sr.NumBlobs(), 88; g != w { + t.Errorf("num blobs = %v; want %v", g, w) + } + if g, w := sr.SumBlobSize(), int64(5252655); g != w { + t.Errorf("sum blob size = %v; want %v", g, w) + } + if g, w := sizes[len(sizes)-1], 262144; g != w { + t.Errorf("biggest blob is %d; want %d", g, w) + } +} + +func TestWriteThenRead(t *testing.T) { + m := NewFileMap("test-file") + const size = 5 << 20 + r := &randReader{seed: 123, length: size} + sto := new(test.Fetcher) + var buf bytes.Buffer + br, err := WriteFileMap(sto, m, io.TeeReader(r, &buf)) + if err != nil { + t.Fatal(err) + } + + var got bytes.Buffer + fr, err := NewFileReader(sto, br) + if err != nil { + t.Fatal(err) + } + + n, err := io.Copy(&got, fr) + if err != nil { + t.Fatal(err) + } + if n != size { + t.Errorf("read back %d bytes; want %d", n, size) + } + if !bytes.Equal(buf.Bytes(), got.Bytes()) { + t.Error("bytes differ") + } + + var offs []int + + getOffsets := func() error { + offs = offs[:0] + var off int + return fr.ForeachChunk(func(_ []blob.Ref, p BytesPart) error { + offs = append(offs, off) + off += int(p.Size) + return err + }) + } + + if err := getOffsets(); err != nil { + t.Fatal(err) + } + sort.Ints(offs) + wantOffs := "[0 262144 358150 433428 525437 602690 675039 748088 816210 898743 980993 1053410 1120438 1188662 1265192 1332541 1398316 1463899 1530446 1596700 1668839 1738909 1817065 1891025 1961646 2031127 2099232 2170640 2238692 2304743 2374317 2440449 2514327 2582670 2653257 2753975 2827518 2905783 2975426 3053820 3134057 3204879 3271019 3346750 3421351 3487420 3557939 3624006 3701093 3768863 3842013 3918267 4001933 4069157 4139132 4208109 4281390 4348801 4422695 4490535 4568111 4642769 4709005 4785526 4866313 4933575 5005564 5071633 5152695 5227716]" + gotOffs := fmt.Sprintf("%v", offs) + if wantOffs != gotOffs { + t.Errorf("Got chunk offsets %v; want %v", gotOffs, wantOffs) + } + + // Now force a fetch failure on one of the filereader schema chunks, to + // force a failure of GetChunkOffsets + errFetch := errors.New("fake fetch error") + var fetches struct { + sync.Mutex + n int + } + sto.FetchErr = func() error { + fetches.Lock() + defer fetches.Unlock() + fetches.n++ + if fetches.n == 1 { + return nil + } + return errFetch + } + + fr, err = NewFileReader(sto, br) + if err != nil { + t.Fatal(err) + } + if err := getOffsets(); fmt.Sprint(err) != "schema/filereader: fetching file schema blob: fake fetch error" { + t.Errorf("expected second call of GetChunkOffsets to return wrapped errFetch; got %v", err) + } +} + +type randReader struct { + seed int64 + length int + rnd *rand.Rand // lazy init + remain int // lazy init +} + +func (r *randReader) Read(p []byte) (n int, err error) { + if r.rnd == nil { + r.rnd = rand.New(rand.NewSource(r.seed)) + r.remain = r.length + } + if r.remain == 0 { + return 0, io.EOF + } + if len(p) > r.remain { + p = p[:r.remain] + } + for i := range p { + p[i] = byte(r.rnd.Intn(256)) + } + r.remain -= len(p) + return len(p), nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/lookup.go b/vendor/github.com/camlistore/camlistore/pkg/schema/lookup.go new file mode 100644 index 00000000..5860b0cb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/schema/lookup.go @@ -0,0 +1,171 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "bufio" + "os" + "os/user" + "strconv" + "strings" + "sync" +) + +type intBool struct { + int + bool +} + +var ( + lookupMu sync.RWMutex // guards rest + uidName = map[int]string{} + gidName = map[int]string{} + userUid = map[string]intBool{} + groupGid = map[string]intBool{} + + parsedGroups, parsedPasswd bool +) + +func getUserFromUid(id int) string { + return cachedName(id, uidName, lookupUserid) +} + +func getGroupFromGid(id int) string { + return cachedName(id, gidName, lookupGroupId) +} + +func getUidFromName(user string) (int, bool) { + return cachedId(user, userUid, lookupUserToId) +} + +func getGidFromName(group string) (int, bool) { + return cachedId(group, groupGid, lookupGroupToId) +} + +func cachedName(id int, m map[int]string, fn func(int) string) string { + // TODO: use singleflight library here, keyed by 'id', rather than this lookupMu lock, + // which is too coarse. + lookupMu.RLock() + name, ok := m[id] + lookupMu.RUnlock() + if ok { + return name + } + lookupMu.Lock() + defer lookupMu.Unlock() + name, ok = m[id] + if ok { + return name // lost race, already populated + } + m[id] = fn(id) + return m[id] +} + +func cachedId(name string, m map[string]intBool, fn func(string) (int, bool)) (int, bool) { + // TODO: use singleflight library here, keyed by 'name', rather than this lookupMu lock, + // which is too coarse. + lookupMu.RLock() + intb, ok := m[name] + lookupMu.RUnlock() + if ok { + return intb.int, intb.bool + } + lookupMu.Lock() + defer lookupMu.Unlock() + intb, ok = m[name] + if ok { + return intb.int, intb.bool // lost race, already populated + } + id, ok := fn(name) + m[name] = intBool{id, ok} + return id, ok +} + +func lookupUserToId(name string) (uid int, ok bool) { + u, err := user.Lookup(name) + if err == nil { + uid, err := strconv.Atoi(u.Uid) + if err == nil { + return uid, true + } + } + return +} + +func lookupGroupToId(group string) (gid int, ok bool) { + if !parsedGroups { + lookupGroupId(0) // force them to be loaded + } + intb := groupGid[group] + return intb.int, intb.bool +} + +// lookupMu is held +func lookupGroupId(id int) string { + if parsedGroups { + return "" + } + parsedGroups = true + populateMap(gidName, groupGid, "/etc/group") + return gidName[id] +} + +// lookupMu is held +func lookupUserid(id int) string { + u, err := user.LookupId(strconv.Itoa(id)) + if err == nil { + return u.Username + } + if _, ok := err.(user.UnknownUserIdError); ok { + return "" + } + if parsedPasswd { + return "" + } + parsedPasswd = true + populateMap(uidName, nil, "/etc/passwd") + return uidName[id] +} + +// Lame fallback parsing /etc/password for non-cgo systems where os/user doesn't work, +// and used for groups (which also happens to work on OS X, generally) +// nameMap may be nil. +func populateMap(m map[int]string, nameMap map[string]intBool, file string) { + f, err := os.Open(file) + if err != nil { + return + } + defer f.Close() + bufr := bufio.NewReader(f) + for { + line, err := bufr.ReadString('\n') + if err != nil { + return + } + parts := strings.SplitN(line, ":", 4) + if len(parts) >= 3 { + idstr := parts[2] + id, err := strconv.Atoi(idstr) + if err == nil { + m[id] = parts[0] + if nameMap != nil { + nameMap[parts[0]] = intBool{id, true} + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/nodeattr/nodeattr.go b/vendor/github.com/camlistore/camlistore/pkg/schema/nodeattr/nodeattr.go new file mode 100644 index 00000000..7277067e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/schema/nodeattr/nodeattr.go @@ -0,0 +1,106 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package nodeattr contains constants for permanode attribute names. +// +// For all date values in RFC 3339 format, Camlistore additionally +// treats the special timezone offset -00:01 (one minute west of UTC) +// as meaning that the local time was known, but the location or +// timezone was not. Usually this is from EXIF files. +package nodeattr + +const ( + // Type is the Camlistore permanode type ("camliNodeType"). + // Importer-specific ones are of the form "domain.com:objecttype". + // Well-defined ones are documented in doc/schema/claims/attributes.txt. + Type = "camliNodeType" + + // CamliContent is "camliContent", the blobref of the permanode's content. + // For files or images, the camliContent is fileref (the blobref of + // the "file" schema blob). + CamliContent = "camliContent" + + // CamliContentImage is "camliContentImage", for when CamliContent is + // already set to the blobref of a non-image. + CamliContentImage = "camliContentImage" + + // DateCreated is http://schema.org/dateCreated in RFC 3339 + // format. + DateCreated = "dateCreated" + + // StartDate is http://schema.org/startDate, the start date + // and time of the event or item, in RFC 3339 format. + StartDate = "startDate" + + // DateModified is http://schema.org/dateModified, in RFC 3339 + // format. + DateModified = "dateModified" + + // DatePublished is http://schema.org/datePublished in RFC + // 3339 format. + DatePublished = "datePublished" + + // Title is http://schema.org/title + Title = "title" + + // Description is http://schema.org/description + // Value is plain text, no HTML, newlines are newlines. + Description = "description" + + // Content is "content", used e.g. for the content of a tweet. + // TODO: define this more + Content = "content" + + // URL is the item's original or origin URL. + URL = "url" + + // LocationText is free-flowing text definition of a location or place, such + // as a city name, or a full postal address. + LocationText = "locationText" + + Latitude = "latitude" + Longitude = "longitude" + + // StreetAddress is http://schema.org/streetAddress + StreetAddress = "streetAddress" + + // AddressLocality is http://schema.org/addressLocality + // City, town, village, etc. name, plus any additional locality + // information, such as suburb name. Not as restricted as + // the UK postal meaning. + AddressLocality = "addressLocality" + + // PostalCode is http://schema.org/postalCode + PostalCode = "postalCode" + + // AddressRegion is http://schema.org/addressRegion + // Region, or state name. + AddressRegion = "addressRegion" + + // AddressCountry is http://schema.org/addressCountry + AddressCountry = "addressCountry" + + // CamliPathOrderColon is the prefix "camliPathOrder:". + // The attribute key should be followed by a uint64. The attribute value + // is an existing value of a camliPath element. + // CamliPathOrder optionally sorts sets already using "camliPath:foo" keys. + // The integers do not need to be contiguous, nor 0- (or 1-) based. + CamliPathOrderColon = "camliPathOrder:" + + // DefaultVisibility is "camliDefVis", which affects the default + // visibility of the concerned permanode in the web UI. + DefaultVisibility = "camliDefVis" +) diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/schema.go b/vendor/github.com/camlistore/camlistore/pkg/schema/schema.go new file mode 100644 index 00000000..75e18fb1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/schema/schema.go @@ -0,0 +1,1056 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package schema manipulates Camlistore schema blobs. +// +// A schema blob is a JSON-encoded blob that describes other blobs. +// See documentation in Camlistore's doc/schema/ directory. +package schema + +import ( + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "io" + "log" + "os" + "reflect" + "regexp" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/strutil" + "camlistore.org/pkg/types" + "camlistore.org/third_party/github.com/bradfitz/latlong" + "camlistore.org/third_party/github.com/rwcarlsen/goexif/exif" + "camlistore.org/third_party/github.com/rwcarlsen/goexif/tiff" +) + +func init() { + // Intern common strings as used by schema blobs (camliType values), to reduce + // index memory usage, which uses strutil.StringFromBytes. + strutil.RegisterCommonString( + "bytes", + "claim", + "directory", + "file", + "permanode", + "share", + "static-set", + "symlink", + ) +} + +// MaxSchemaBlobSize represents the upper bound for how large +// a schema blob may be. +const MaxSchemaBlobSize = 1 << 20 + +var sha1Type = reflect.TypeOf(sha1.New()) + +var ( + ErrNoCamliVersion = errors.New("schema: no camliVersion key in map") +) + +var clockNow = time.Now + +type StatHasher interface { + Lstat(fileName string) (os.FileInfo, error) + Hash(fileName string) (blob.Ref, error) +} + +// File is the interface returned when opening a DirectoryEntry that +// is a regular file. +type File interface { + io.Closer + io.ReaderAt + io.Reader + Size() int64 +} + +// Directory is a read-only interface to a "directory" schema blob. +type Directory interface { + // Readdir reads the contents of the directory associated with dr + // and returns an array of up to n DirectoryEntries structures. + // Subsequent calls on the same file will yield further + // DirectoryEntries. + // If n > 0, Readdir returns at most n DirectoryEntry structures. In + // this case, if Readdir returns an empty slice, it will return + // a non-nil error explaining why. At the end of a directory, + // the error is os.EOF. + // If n <= 0, Readdir returns all the DirectoryEntries from the + // directory in a single slice. In this case, if Readdir succeeds + // (reads all the way to the end of the directory), it returns the + // slice and a nil os.Error. If it encounters an error before the + // end of the directory, Readdir returns the DirectoryEntry read + // until that point and a non-nil error. + Readdir(n int) ([]DirectoryEntry, error) +} + +type Symlink interface { + // .. TODO +} + +// FIFO is the read-only interface to a "fifo" schema blob. +type FIFO interface { + // .. TODO +} + +// Socket is the read-only interface to a "socket" schema blob. +type Socket interface { + // .. TODO +} + +// DirectoryEntry is a read-only interface to an entry in a (static) +// directory. +type DirectoryEntry interface { + // CamliType returns the schema blob's "camliType" field. + // This may be "file", "directory", "symlink", or other more + // obscure types added in the future. + CamliType() string + + FileName() string + BlobRef() blob.Ref + + File() (File, error) // if camliType is "file" + Directory() (Directory, error) // if camliType is "directory" + Symlink() (Symlink, error) // if camliType is "symlink" + FIFO() (FIFO, error) // if camliType is "fifo" + Socket() (Socket, error) // If camliType is "socket" +} + +// dirEntry is the default implementation of DirectoryEntry +type dirEntry struct { + ss superset + fetcher blob.Fetcher + fr *FileReader // or nil if not a file + dr *DirReader // or nil if not a directory +} + +// A SearchQuery must be of type *search.SearchQuery. +// This type breaks an otherwise-circular dependency. +type SearchQuery interface{} + +func (de *dirEntry) CamliType() string { + return de.ss.Type +} + +func (de *dirEntry) FileName() string { + return de.ss.FileNameString() +} + +func (de *dirEntry) BlobRef() blob.Ref { + return de.ss.BlobRef +} + +func (de *dirEntry) File() (File, error) { + if de.fr == nil { + if de.ss.Type != "file" { + return nil, fmt.Errorf("DirectoryEntry is camliType %q, not %q", de.ss.Type, "file") + } + fr, err := NewFileReader(de.fetcher, de.ss.BlobRef) + if err != nil { + return nil, err + } + de.fr = fr + } + return de.fr, nil +} + +func (de *dirEntry) Directory() (Directory, error) { + if de.dr == nil { + if de.ss.Type != "directory" { + return nil, fmt.Errorf("DirectoryEntry is camliType %q, not %q", de.ss.Type, "directory") + } + dr, err := NewDirReader(de.fetcher, de.ss.BlobRef) + if err != nil { + return nil, err + } + de.dr = dr + } + return de.dr, nil +} + +func (de *dirEntry) Symlink() (Symlink, error) { + return 0, errors.New("TODO: Symlink not implemented") +} + +func (de *dirEntry) FIFO() (FIFO, error) { + return 0, errors.New("TODO: FIFO not implemented") +} + +func (de *dirEntry) Socket() (Socket, error) { + return 0, errors.New("TODO: Socket not implemented") +} + +// newDirectoryEntry takes a superset and returns a DirectoryEntry if +// the Supserset is valid and represents an entry in a directory. It +// must by of type "file", "directory", "symlink" or "socket". +// TODO: "char", block", probably. later. +func newDirectoryEntry(fetcher blob.Fetcher, ss *superset) (DirectoryEntry, error) { + if ss == nil { + return nil, errors.New("ss was nil") + } + if !ss.BlobRef.Valid() { + return nil, errors.New("ss.BlobRef was invalid") + } + switch ss.Type { + case "file", "directory", "symlink", "fifo", "socket": + // Okay + default: + return nil, fmt.Errorf("invalid DirectoryEntry camliType of %q", ss.Type) + } + de := &dirEntry{ss: *ss, fetcher: fetcher} // defensive copy + return de, nil +} + +// NewDirectoryEntryFromBlobRef takes a BlobRef and returns a +// DirectoryEntry if the BlobRef contains a type "file", "directory", +// "symlink", "fifo" or "socket". +// TODO: ""char", "block", probably. later. +func NewDirectoryEntryFromBlobRef(fetcher blob.Fetcher, blobRef blob.Ref) (DirectoryEntry, error) { + ss := new(superset) + err := ss.setFromBlobRef(fetcher, blobRef) + if err != nil { + return nil, fmt.Errorf("schema/filereader: can't fill superset: %v\n", err) + } + return newDirectoryEntry(fetcher, ss) +} + +// superset represents the superset of common Camlistore JSON schema +// keys as a convenient json.Unmarshal target. +// TODO(bradfitz): unexport this type. Getting too gross. Move to schema.Blob +type superset struct { + // BlobRef isn't for a particular metadata blob field, but included + // for convenience. + BlobRef blob.Ref + + Version int `json:"camliVersion"` + Type string `json:"camliType"` + + Signer blob.Ref `json:"camliSigner"` + Sig string `json:"camliSig"` + + ClaimType string `json:"claimType"` + ClaimDate types.Time3339 `json:"claimDate"` + + Permanode blob.Ref `json:"permaNode"` + Attribute string `json:"attribute"` + Value string `json:"value"` + + // FileName and FileNameBytes represent one of the two + // representations of file names in schema blobs. They should + // not be accessed directly. Use the FileNameString accessor + // instead, which also sanitizes malicious values. + FileName string `json:"fileName"` + FileNameBytes []interface{} `json:"fileNameBytes"` + + SymlinkTarget string `json:"symlinkTarget"` + SymlinkTargetBytes []interface{} `json:"symlinkTargetBytes"` + + UnixPermission string `json:"unixPermission"` + UnixOwnerId int `json:"unixOwnerId"` + UnixOwner string `json:"unixOwner"` + UnixGroupId int `json:"unixGroupId"` + UnixGroup string `json:"unixGroup"` + UnixMtime string `json:"unixMtime"` + UnixCtime string `json:"unixCtime"` + UnixAtime string `json:"unixAtime"` + + // Parts are references to the data chunks of a regular file (or a "bytes" schema blob). + // See doc/schema/bytes.txt and doc/schema/files/file.txt. + Parts []*BytesPart `json:"parts"` + + Entries blob.Ref `json:"entries"` // for directories, a blobref to a static-set + Members []blob.Ref `json:"members"` // for static sets (for directory static-sets: blobrefs to child dirs/files) + + // Search allows a "share" blob to share an entire search. Contrast with "target". + Search SearchQuery `json:"search"` + // Target is a "share" blob's target (the thing being shared) + // Or it is the object being deleted in a DeleteClaim claim. + Target blob.Ref `json:"target"` + // Transitive is a property of a "share" blob. + Transitive bool `json:"transitive"` + // AuthType is a "share" blob's authentication type that is required. + // Currently (2013-01-02) just "haveref" (if you know the share's blobref, + // you get access: the secret URL model) + AuthType string `json:"authType"` + Expires types.Time3339 `json:"expires"` // or zero for no expiration +} + +func parseSuperset(r io.Reader) (*superset, error) { + var ss superset + if err := json.NewDecoder(io.LimitReader(r, MaxSchemaBlobSize)).Decode(&ss); err != nil { + return nil, err + } + return &ss, nil +} + +// BlobReader returns a new Blob from the provided Reader r, +// which should be the body of the provided blobref. +// Note: the hash checksum is not verified. +func BlobFromReader(ref blob.Ref, r io.Reader) (*Blob, error) { + if !ref.Valid() { + return nil, errors.New("schema.BlobFromReader: invalid blobref") + } + var buf bytes.Buffer + tee := io.TeeReader(r, &buf) + ss, err := parseSuperset(tee) + if err != nil { + return nil, err + } + var wb [16]byte + afterObj := 0 + for { + n, err := tee.Read(wb[:]) + afterObj += n + for i := 0; i < n; i++ { + if !isASCIIWhite(wb[i]) { + return nil, fmt.Errorf("invalid bytes after JSON schema blob in %v", ref) + } + } + if afterObj > MaxSchemaBlobSize { + break + } + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + } + json := buf.String() + if len(json) > MaxSchemaBlobSize { + return nil, fmt.Errorf("schema: metadata blob %v is over expected limit; size=%d", ref, len(json)) + } + return &Blob{ref, json, ss}, nil +} + +func isASCIIWhite(b byte) bool { + switch b { + case ' ', '\t', '\r', '\n': + return true + } + return false +} + +// BytesPart is the type representing one of the "parts" in a "file" +// or "bytes" JSON schema. +// +// See doc/schema/bytes.txt and doc/schema/files/file.txt. +type BytesPart struct { + // Size is the number of bytes that this part contributes to the overall segment. + Size uint64 `json:"size"` + + // At most one of BlobRef or BytesRef must be non-zero + // (Valid), but it's illegal for both. + // If neither are set, this BytesPart represents Size zero bytes. + // BlobRef refers to raw bytes. BytesRef references a "bytes" schema blob. + BlobRef blob.Ref `json:"blobRef,omitempty"` + BytesRef blob.Ref `json:"bytesRef,omitempty"` + + // Offset optionally specifies the offset into BlobRef to skip + // when reading Size bytes. + Offset uint64 `json:"offset,omitempty"` +} + +// stringFromMixedArray joins a slice of either strings or float64 +// values (as retrieved from JSON decoding) into a string. These are +// used for non-UTF8 filenames in "fileNameBytes" fields. The strings +// are UTF-8 segments and the float64s (actually uint8 values) are +// byte values. +func stringFromMixedArray(parts []interface{}) string { + var buf bytes.Buffer + for _, part := range parts { + if s, ok := part.(string); ok { + buf.WriteString(s) + continue + } + if num, ok := part.(float64); ok { + buf.WriteByte(byte(num)) + continue + } + } + return buf.String() +} + +// mixedArrayFromString is the inverse of stringFromMixedArray. It +// splits a string to a series of either UTF-8 strings and non-UTF-8 +// bytes. +func mixedArrayFromString(s string) (parts []interface{}) { + for len(s) > 0 { + if n := utf8StrLen(s); n > 0 { + parts = append(parts, s[:n]) + s = s[n:] + } else { + parts = append(parts, s[0]) + s = s[1:] + } + } + return parts +} + +// utf8StrLen returns how many prefix bytes of s are valid UTF-8. +func utf8StrLen(s string) int { + for i, r := range s { + for r == utf8.RuneError { + // The RuneError value can be an error + // sentinel value (if it's size 1) or the same + // value encoded properly. Decode it to see if + // it's the 1 byte sentinel value. + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + return i + } + } + } + return len(s) +} + +func (ss *superset) SumPartsSize() (size uint64) { + for _, part := range ss.Parts { + size += uint64(part.Size) + } + return size +} + +func (ss *superset) SymlinkTargetString() string { + if ss.SymlinkTarget != "" { + return ss.SymlinkTarget + } + return stringFromMixedArray(ss.SymlinkTargetBytes) +} + +// FileNameString returns the schema blob's base filename. +// +// If the fileName field of the blob accidentally or maliciously +// contains a slash, this function returns an empty string instead. +func (ss *superset) FileNameString() string { + v := ss.FileName + if v == "" { + v = stringFromMixedArray(ss.FileNameBytes) + } + if v != "" { + if strings.Index(v, "/") != -1 { + // Bogus schema blob; ignore. + return "" + } + if strings.Index(v, "\\") != -1 { + // Bogus schema blob; ignore. + return "" + } + } + return v +} + +func (ss *superset) HasFilename(name string) bool { + return ss.FileNameString() == name +} + +func (b *Blob) FileMode() os.FileMode { + // TODO: move this to a different type, off *Blob + return b.ss.FileMode() +} + +func (ss *superset) FileMode() os.FileMode { + var mode os.FileMode + hasPerm := ss.UnixPermission != "" + if hasPerm { + m64, err := strconv.ParseUint(ss.UnixPermission, 8, 64) + if err == nil { + mode = mode | os.FileMode(m64) + } + } + + // TODO: add other types (block, char, etc) + switch ss.Type { + case "directory": + mode = mode | os.ModeDir + case "file": + // No extra bit. + case "symlink": + mode = mode | os.ModeSymlink + case "fifo": + mode = mode | os.ModeNamedPipe + case "socket": + mode = mode | os.ModeSocket + } + if !hasPerm { + switch ss.Type { + case "directory": + mode |= 0755 + default: + mode |= 0644 + } + } + return mode +} + +// MapUid returns the most appropriate mapping from this file's owner +// to the local machine's owner, trying first a match by name, +// followed by just mapping the number through directly. +func (b *Blob) MapUid() int { return b.ss.MapUid() } + +// MapGid returns the most appropriate mapping from this file's group +// to the local machine's group, trying first a match by name, +// followed by just mapping the number through directly. +func (b *Blob) MapGid() int { return b.ss.MapGid() } + +func (ss *superset) MapUid() int { + if ss.UnixOwner != "" { + uid, ok := getUidFromName(ss.UnixOwner) + if ok { + return uid + } + } + return ss.UnixOwnerId // TODO: will be 0 if unset, which isn't ideal +} + +func (ss *superset) MapGid() int { + if ss.UnixGroup != "" { + gid, ok := getGidFromName(ss.UnixGroup) + if ok { + return gid + } + } + return ss.UnixGroupId // TODO: will be 0 if unset, which isn't ideal +} + +func (ss *superset) ModTime() time.Time { + if ss.UnixMtime == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC3339, ss.UnixMtime) + if err != nil { + return time.Time{} + } + return t +} + +var DefaultStatHasher = &defaultStatHasher{} + +type defaultStatHasher struct{} + +func (d *defaultStatHasher) Lstat(fileName string) (os.FileInfo, error) { + return os.Lstat(fileName) +} + +func (d *defaultStatHasher) Hash(fileName string) (blob.Ref, error) { + s1 := sha1.New() + file, err := os.Open(fileName) + if err != nil { + return blob.Ref{}, err + } + defer file.Close() + _, err = io.Copy(s1, file) + if err != nil { + return blob.Ref{}, err + } + return blob.RefFromHash(s1), nil +} + +type StaticSet struct { + l sync.Mutex + refs []blob.Ref +} + +func (ss *StaticSet) Add(ref blob.Ref) { + ss.l.Lock() + defer ss.l.Unlock() + ss.refs = append(ss.refs, ref) +} + +func base(version int, ctype string) *Builder { + return &Builder{map[string]interface{}{ + "camliVersion": version, + "camliType": ctype, + }} +} + +// NewUnsignedPermanode returns a new random permanode, not yet signed. +func NewUnsignedPermanode() *Builder { + bb := base(1, "permanode") + chars := make([]byte, 20) + _, err := io.ReadFull(rand.Reader, chars) + if err != nil { + panic("error reading random bytes: " + err.Error()) + } + bb.m["random"] = base64.StdEncoding.EncodeToString(chars) + return bb +} + +// NewPlannedPermanode returns a permanode with a fixed key. Like +// NewUnsignedPermanode, this builder is also not yet signed. Callers of +// NewPlannedPermanode must sign the map with a fixed claimDate and +// GPG date to create consistent JSON encodings of the Map (its +// blobref), between runs. +func NewPlannedPermanode(key string) *Builder { + bb := base(1, "permanode") + bb.m["key"] = key + return bb +} + +// NewHashPlannedPermanode returns a planned permanode with the sum +// of the hash, prefixed with "sha1-", as the key. +func NewHashPlannedPermanode(h hash.Hash) *Builder { + if reflect.TypeOf(h) != sha1Type { + panic("Hash not supported. Only sha1 for now.") + } + return NewPlannedPermanode(fmt.Sprintf("sha1-%x", h.Sum(nil))) +} + +// Map returns a Camli map of camliType "static-set" +// TODO: delete this method +func (ss *StaticSet) Blob() *Blob { + bb := base(1, "static-set") + ss.l.Lock() + defer ss.l.Unlock() + + members := make([]string, 0, len(ss.refs)) + if ss.refs != nil { + for _, ref := range ss.refs { + members = append(members, ref.String()) + } + } + bb.m["members"] = members + return bb.Blob() +} + +// JSON returns the map m encoded as JSON in its +// recommended canonical form. The canonical form is readable with newlines and indentation, +// and always starts with the header bytes: +// +// {"camliVersion": +// +func mapJSON(m map[string]interface{}) (string, error) { + version, hasVersion := m["camliVersion"] + if !hasVersion { + return "", ErrNoCamliVersion + } + delete(m, "camliVersion") + jsonBytes, err := json.MarshalIndent(m, "", " ") + if err != nil { + return "", err + } + m["camliVersion"] = version + var buf bytes.Buffer + fmt.Fprintf(&buf, "{\"camliVersion\": %v,\n", version) + buf.Write(jsonBytes[2:]) + return buf.String(), nil +} + +// NewFileMap returns a new builder of a type "file" schema for the provided fileName. +// The chunk parts of the file are not populated. +func NewFileMap(fileName string) *Builder { + return newCommonFilenameMap(fileName).SetType("file") +} + +// NewDirMap returns a new builder of a type "directory" schema for the provided fileName. +func NewDirMap(fileName string) *Builder { + return newCommonFilenameMap(fileName).SetType("directory") +} + +func newCommonFilenameMap(fileName string) *Builder { + bb := base(1, "" /* no type yet */) + if fileName != "" { + bb.SetFileName(fileName) + } + return bb +} + +var populateSchemaStat []func(schemaMap map[string]interface{}, fi os.FileInfo) + +func NewCommonFileMap(fileName string, fi os.FileInfo) *Builder { + bb := newCommonFilenameMap(fileName) + // Common elements (from file-common.txt) + if fi.Mode()&os.ModeSymlink == 0 { + bb.m["unixPermission"] = fmt.Sprintf("0%o", fi.Mode().Perm()) + } + + // OS-specific population; defined in schema_posix.go, etc. (not on App Engine) + for _, f := range populateSchemaStat { + f(bb.m, fi) + } + + if mtime := fi.ModTime(); !mtime.IsZero() { + bb.m["unixMtime"] = RFC3339FromTime(mtime) + } + return bb +} + +// PopulateParts sets the "parts" field of the blob with the provided +// parts. The sum of the sizes of parts must match the provided size +// or an error is returned. Also, each BytesPart may only contain either +// a BytesPart or a BlobRef, but not both. +func (bb *Builder) PopulateParts(size int64, parts []BytesPart) error { + return populateParts(bb.m, size, parts) +} + +func populateParts(m map[string]interface{}, size int64, parts []BytesPart) error { + sumSize := int64(0) + mparts := make([]map[string]interface{}, len(parts)) + for idx, part := range parts { + mpart := make(map[string]interface{}) + mparts[idx] = mpart + switch { + case part.BlobRef.Valid() && part.BytesRef.Valid(): + return errors.New("schema: part contains both BlobRef and BytesRef") + case part.BlobRef.Valid(): + mpart["blobRef"] = part.BlobRef.String() + case part.BytesRef.Valid(): + mpart["bytesRef"] = part.BytesRef.String() + default: + return errors.New("schema: part must contain either a BlobRef or BytesRef") + } + mpart["size"] = part.Size + sumSize += int64(part.Size) + if part.Offset != 0 { + mpart["offset"] = part.Offset + } + } + if sumSize != size { + return fmt.Errorf("schema: declared size %d doesn't match sum of parts size %d", size, sumSize) + } + m["parts"] = mparts + return nil +} + +func newBytes() *Builder { + return base(1, "bytes") +} + +// ClaimType is one of the valid "claimType" fields in a "claim" schema blob. See doc/schema/claims/. +type ClaimType string + +const ( + SetAttributeClaim ClaimType = "set-attribute" + AddAttributeClaim ClaimType = "add-attribute" + DelAttributeClaim ClaimType = "del-attribute" + ShareClaim ClaimType = "share" + // DeleteClaim deletes a permanode or another claim. + // A delete claim can itself be deleted, and so on. + DeleteClaim ClaimType = "delete" +) + +// claimParam is used to populate a claim map when building a new claim +type claimParam struct { + claimType ClaimType + + // Params specific to *Attribute claims: + permanode blob.Ref // modified permanode + attribute string // required + value string // optional if Type == DelAttributeClaim + + // Params specific to ShareClaim claims: + authType string + transitive bool + shareExpires time.Time // Zero means no expiration + + // Params specific to ShareClaim and DeleteClaim claims. + target blob.Ref +} + +func newClaim(claims ...*claimParam) *Builder { + bb := base(1, "claim") + bb.SetClaimDate(clockNow()) + if len(claims) == 1 { + cp := claims[0] + populateClaimMap(bb.m, cp) + return bb + } + var claimList []interface{} + for _, cp := range claims { + m := map[string]interface{}{} + populateClaimMap(m, cp) + claimList = append(claimList, m) + } + bb.m["claimType"] = "multi" + bb.m["claims"] = claimList + return bb +} + +func populateClaimMap(m map[string]interface{}, cp *claimParam) { + m["claimType"] = string(cp.claimType) + switch cp.claimType { + case ShareClaim: + m["authType"] = cp.authType + m["transitive"] = cp.transitive + case DeleteClaim: + m["target"] = cp.target.String() + default: + m["permaNode"] = cp.permanode.String() + m["attribute"] = cp.attribute + if !(cp.claimType == DelAttributeClaim && cp.value == "") { + m["value"] = cp.value + } + } +} + +// NewShareRef creates a *Builder for a "share" claim. +func NewShareRef(authType string, transitive bool) *Builder { + return newClaim(&claimParam{ + claimType: ShareClaim, + authType: authType, + transitive: transitive, + }) +} + +func NewSetAttributeClaim(permaNode blob.Ref, attr, value string) *Builder { + return newClaim(&claimParam{ + permanode: permaNode, + claimType: SetAttributeClaim, + attribute: attr, + value: value, + }) +} + +func NewAddAttributeClaim(permaNode blob.Ref, attr, value string) *Builder { + return newClaim(&claimParam{ + permanode: permaNode, + claimType: AddAttributeClaim, + attribute: attr, + value: value, + }) +} + +// NewDelAttributeClaim creates a new claim to remove value from the +// values set for the attribute attr of permaNode. If value is empty then +// all the values for attribute are cleared. +func NewDelAttributeClaim(permaNode blob.Ref, attr, value string) *Builder { + return newClaim(&claimParam{ + permanode: permaNode, + claimType: DelAttributeClaim, + attribute: attr, + value: value, + }) +} + +// NewDeleteClaim creates a new claim to delete a target claim or permanode. +func NewDeleteClaim(target blob.Ref) *Builder { + return newClaim(&claimParam{ + target: target, + claimType: DeleteClaim, + }) +} + +// ShareHaveRef is the auth type specifying that if you "have the +// reference" (know the blobref to the haveref share blob), then you +// have access to the referenced object from that share blob. +// This is the "send a link to a friend" access model. +const ShareHaveRef = "haveref" + +// UnknownLocation is a magic timezone value used when the actual location +// of a time is unknown. For instance, EXIF files commonly have a time without +// a corresponding location or timezone offset. +var UnknownLocation = time.FixedZone("Unknown", -60) // 1 minute west + +// IsZoneKnown reports whether t is in a known timezone. +// Camlistore uses the magic timezone offset of 1 minute west of UTC +// to mean that the timezone wasn't known. +func IsZoneKnown(t time.Time) bool { + if t.Location() == UnknownLocation { + return false + } + if _, off := t.Zone(); off == -60 { + return false + } + return true +} + +// RFC3339FromTime returns an RFC3339-formatted time. +// +// If the timezone is known, the time will be converted to UTC and +// returned with a "Z" suffix. For unknown zones, the timezone will be +// "-00:01" (1 minute west of UTC). +// +// Fractional seconds are only included if the time has fractional +// seconds. +func RFC3339FromTime(t time.Time) string { + if IsZoneKnown(t) { + t = t.UTC() + } + if t.UnixNano()%1e9 == 0 { + return t.Format(time.RFC3339) + } + return t.Format(time.RFC3339Nano) +} + +var bytesCamliVersion = []byte("camliVersion") + +// LikelySchemaBlob returns quickly whether buf likely contains (or is +// the prefix of) a schema blob. +func LikelySchemaBlob(buf []byte) bool { + if len(buf) == 0 || buf[0] != '{' { + return false + } + return bytes.Contains(buf, bytesCamliVersion) +} + +// findSize checks if v is an *os.File or if it has +// a Size() int64 method, to find its size. +// It returns 0, false otherwise. +func findSize(v interface{}) (size int64, ok bool) { + if fi, ok := v.(*os.File); ok { + v, _ = fi.Stat() + } + if sz, ok := v.(interface { + Size() int64 + }); ok { + return sz.Size(), true + } + // For bytes.Reader, strings.Reader, etc: + if li, ok := v.(interface { + Len() int + }); ok { + ln := int64(li.Len()) // unread portion, typically + // If it's also a seeker, remove add any seek offset: + if sk, ok := v.(io.Seeker); ok { + if cur, err := sk.Seek(0, 1); err == nil { + ln += cur + } + } + return ln, true + } + return 0, false +} + +// FileTime returns the best guess of the file's creation time (or modtime). +// If the file doesn't have its own metadata indication the creation time (such as in EXIF), +// FileTime uses the modification time from the file system. +// It there was a valid EXIF but an error while trying to get a date from it, +// it logs the error and tries the other methods. +func FileTime(f io.ReaderAt) (time.Time, error) { + var ct time.Time + defaultTime := func() (time.Time, error) { + if osf, ok := f.(*os.File); ok { + fi, err := osf.Stat() + if err != nil { + return ct, fmt.Errorf("Failed to find a modtime: stat: %v", err) + } + return fi.ModTime(), nil + } + return ct, errors.New("All methods failed to find a creation time or modtime.") + } + + size, ok := findSize(f) + if !ok { + size = 256 << 10 // enough to get the EXIF + } + r := io.NewSectionReader(f, 0, size) + var tiffErr error + ex, err := exif.Decode(r) + if err != nil { + tiffErr = err + if exif.IsShortReadTagValueError(err) { + return ct, io.ErrUnexpectedEOF + } + if exif.IsCriticalError(err) || exif.IsExifError(err) { + return defaultTime() + } + } + ct, err = ex.DateTime() + if err != nil { + return defaultTime() + } + // If the EXIF file only had local timezone, but it did have + // GPS, then lookup the timezone and correct the time. + if ct.Location() == time.Local { + if exif.IsGPSError(tiffErr) { + log.Printf("Invalid EXIF GPS data: %v", tiffErr) + return ct, nil + } + if lat, long, err := ex.LatLong(); err == nil { + if loc := lookupLocation(latlong.LookupZoneName(lat, long)); loc != nil { + if t, err := exifDateTimeInLocation(ex, loc); err == nil { + return t, nil + } + } + } else if !exif.IsTagNotPresentError(err) { + log.Printf("Invalid EXIF GPS data: %v", err) + } + } + return ct, nil +} + +// This is basically a copy of the exif.Exif.DateTime() method, except: +// * it takes a *time.Location to assume +// * the caller already assumes there's no timezone offset or GPS time +// in the EXIF, so any of that code can be ignored. +func exifDateTimeInLocation(x *exif.Exif, loc *time.Location) (time.Time, error) { + tag, err := x.Get(exif.DateTimeOriginal) + if err != nil { + tag, err = x.Get(exif.DateTime) + if err != nil { + return time.Time{}, err + } + } + if tag.Format() != tiff.StringVal { + return time.Time{}, errors.New("DateTime[Original] not in string format") + } + const exifTimeLayout = "2006:01:02 15:04:05" + dateStr := strings.TrimRight(string(tag.Val), "\x00") + return time.ParseInLocation(exifTimeLayout, dateStr, loc) +} + +var zoneCache struct { + sync.RWMutex + m map[string]*time.Location +} + +func lookupLocation(zone string) *time.Location { + if zone == "" { + return nil + } + zoneCache.RLock() + l, ok := zoneCache.m[zone] + zoneCache.RUnlock() + if ok { + return l + } + // could use singleflight here, but doesn't really + // matter if two callers both do this. + loc, err := time.LoadLocation(zone) + + zoneCache.Lock() + if zoneCache.m == nil { + zoneCache.m = make(map[string]*time.Location) + } + zoneCache.m[zone] = loc // even if nil + zoneCache.Unlock() + + if err != nil { + log.Printf("failed to lookup timezone %q: %v", zone, err) + return nil + } + return loc +} + +var boringTitlePattern = regexp.MustCompile(`^(?:IMG_|DSC|PANO_|ESR_).*$`) + +// IsInterestingTitle returns whether title would be interesting information as +// a title for a permanode. For example, filenames automatically created by +// cameras, such as IMG_XXXX.JPG, do not add any interesting value. +func IsInterestingTitle(title string) bool { + return !boringTitlePattern.MatchString(title) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/schema_darwin.go b/vendor/github.com/camlistore/camlistore/pkg/schema/schema_darwin.go new file mode 100644 index 00000000..a236caad --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/schema/schema_darwin.go @@ -0,0 +1,28 @@ +//+build darwin +//+build !appengine + +package schema + +import ( + "os" + "syscall" + "time" +) + +func init() { + populateSchemaStat = append(populateSchemaStat, populateSchemaCtime) +} + +func populateSchemaCtime(m map[string]interface{}, fi os.FileInfo) { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return + } + + // Include the ctime too, if it differs. + sec, nsec := st.Ctimespec.Unix() + ctime := time.Unix(sec, nsec) + if sec != 0 && !ctime.Equal(fi.ModTime()) { + m["unixCtime"] = RFC3339FromTime(ctime) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/schema_linux.go b/vendor/github.com/camlistore/camlistore/pkg/schema/schema_linux.go new file mode 100644 index 00000000..cd78e060 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/schema/schema_linux.go @@ -0,0 +1,28 @@ +//+build linux +//+build !appengine + +package schema + +import ( + "os" + "syscall" + "time" +) + +func init() { + populateSchemaStat = append(populateSchemaStat, populateSchemaCtime) +} + +func populateSchemaCtime(m map[string]interface{}, fi os.FileInfo) { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return + } + + // Include the ctime too, if it differs. + sec, nsec := st.Ctim.Unix() + ctime := time.Unix(sec, nsec) + if sec != 0 && !ctime.Equal(fi.ModTime()) { + m["unixCtime"] = RFC3339FromTime(ctime) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/schema_posix.go b/vendor/github.com/camlistore/camlistore/pkg/schema/schema_posix.go new file mode 100644 index 00000000..ceb32cc0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/schema/schema_posix.go @@ -0,0 +1,28 @@ +//+build linux darwin netbsd freebsd openbsd +//+build !appengine + +package schema + +import ( + "os" + "syscall" +) + +func init() { + populateSchemaStat = append(populateSchemaStat, populateSchemaUnix) +} + +func populateSchemaUnix(m map[string]interface{}, fi os.FileInfo) { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return + } + m["unixOwnerId"] = st.Uid + if user := getUserFromUid(int(st.Uid)); user != "" { + m["unixOwner"] = user + } + m["unixGroupId"] = st.Gid + if group := getGroupFromGid(int(st.Gid)); group != "" { + m["unixGroup"] = group + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/schema_public_test.go b/vendor/github.com/camlistore/camlistore/pkg/schema/schema_public_test.go new file mode 100644 index 00000000..5a47b0cc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/schema/schema_public_test.go @@ -0,0 +1,57 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema_test + +import ( + "testing" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/search" +) + +func TestShareSearchSerialization(t *testing.T) { + signer := blob.MustParse("yyy-5678") + + q := &search.SearchQuery{ + Expression: "is:image", + Limit: 42, + } + bb := schema.NewShareRef(schema.ShareHaveRef, true) + bb.SetShareSearch(q) + bb = bb.SetSigner(signer) + bb = bb.SetClaimDate(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)) + s := bb.Blob().JSON() + + want := `{"camliVersion": 1, + "authType": "haveref", + "camliSigner": "yyy-5678", + "camliType": "claim", + "claimDate": "2009-11-10T23:00:00Z", + "claimType": "share", + "search": { + "expression": "is:image", + "limit": 42, + "around": null + }, + "transitive": true +}` + if want != s { + t.Errorf("Incorrect serialization of shared search. Wanted:\n %s\nGot:\n%s\n", want, s) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/schema_test.go b/vendor/github.com/camlistore/camlistore/pkg/schema/schema_test.go new file mode 100644 index 00000000..e741fedc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/schema/schema_test.go @@ -0,0 +1,705 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/osutil" + . "camlistore.org/pkg/test/asserts" +) + +const kExpectedHeader = `{"camliVersion"` + +func TestJSON(t *testing.T) { + fileName := "schema_test.go" + fi, _ := os.Lstat(fileName) + m := NewCommonFileMap(fileName, fi) + json, err := m.JSON() + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + t.Logf("Got json: [%s]\n", json) + // TODO: test it parses back + + if !strings.HasPrefix(json, kExpectedHeader) { + t.Errorf("JSON does't start with expected header.") + } + +} + +func TestRegularFile(t *testing.T) { + fileName := "schema_test.go" + fi, err := os.Lstat(fileName) + AssertNil(t, err, "schema_test.go stat") + m := NewCommonFileMap("schema_test.go", fi) + json, err := m.JSON() + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + t.Logf("Got json for regular file: [%s]\n", json) +} + +func TestSymlink(t *testing.T) { + td, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + symFile := filepath.Join(td, "test-symlink") + if err := os.Symlink("test-target", symFile); err != nil { + t.Fatal(err) + } + + // Shouldn't be accessed: + if err := ioutil.WriteFile(filepath.Join(td, "test-target"), []byte("foo bar"), 0644); err != nil { + t.Fatal(err) + } + + fi, err := os.Lstat(symFile) + if err != nil { + t.Fatal(err) + } + m := NewCommonFileMap(symFile, fi) + json, err := m.JSON() + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if strings.Contains(string(json), "unixPermission") { + t.Errorf("JSON unexpectedly contains unixPermission: [%s]\n", json) + } +} + +func TestUtf8StrLen(t *testing.T) { + tests := []struct { + in string + want int + }{ + {"", 0}, + {"a", 1}, + {"foo", 3}, + {"Здравствуйте!", 25}, + {"foo\x80", 3}, + {"\x80foo", 0}, + } + for _, tt := range tests { + got := utf8StrLen(tt.in) + if got != tt.want { + t.Errorf("utf8StrLen(%q) = %v; want %v", tt.in, got, tt.want) + } + } +} + +func TestMixedArrayFromString(t *testing.T) { + b80 := byte('\x80') + tests := []struct { + in string + want []interface{} + }{ + {"foo", []interface{}{"foo"}}, + {"\x80foo", []interface{}{b80, "foo"}}, + {"foo\x80foo", []interface{}{"foo", b80, "foo"}}, + {"foo\x80", []interface{}{"foo", b80}}, + {"\x80", []interface{}{b80}}, + {"\x80\x80", []interface{}{b80, b80}}, + } + for _, tt := range tests { + got := mixedArrayFromString(tt.in) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("mixedArrayFromString(%q) = %#v; want %#v", tt.in, got, tt.want) + } + } +} + +type mixPartsTest struct { + json, expected string +} + +func TestStringFromMixedArray(t *testing.T) { + tests := []mixPartsTest{ + {`["brad"]`, "brad"}, + {`["brad", 32, 70]`, "brad F"}, + {`["brad", "fitz"]`, "bradfitz"}, + {`["Am", 233, "lie.jpg"]`, "Am\xe9lie.jpg"}, + } + for idx, test := range tests { + var v []interface{} + if err := json.Unmarshal([]byte(test.json), &v); err != nil { + t.Fatalf("invalid JSON in test %d", idx) + } + got := stringFromMixedArray(v) + if got != test.expected { + t.Errorf("test %d got %q; expected %q", idx, got, test.expected) + } + } +} + +func TestParseInLocation_UnknownLocation(t *testing.T) { + // Example of parsing a time from an API (e.g. Flickr) that + // doesn't know its timezone. + const format = "2006-01-02 15:04:05" + const when = "2010-11-12 13:14:15" + tm, err := time.ParseInLocation(format, when, UnknownLocation) + if err != nil { + t.Fatal(err) + } + got, want := RFC3339FromTime(tm), "2010-11-12T13:14:15-00:01" + if got != want { + t.Errorf("parsed %v to %s; want %s", tm, got, want) + } +} + +func TestIsZoneKnown(t *testing.T) { + if !IsZoneKnown(time.Now()) { + t.Errorf("should know Now's zone") + } + if !IsZoneKnown(time.Now().UTC()) { + t.Errorf("UTC should be known") + } + if IsZoneKnown(time.Now().In(UnknownLocation)) { + t.Errorf("with explicit unknown location, should be false") + } + if IsZoneKnown(time.Now().In(time.FixedZone("xx", -60))) { + t.Errorf("with other fixed zone at -60, should be false") + } +} + +func TestRFC3339(t *testing.T) { + tests := []string{ + "2012-05-13T15:02:47Z", + "2012-05-13T15:02:47.1234Z", + "2012-05-13T15:02:47.123456789Z", + "2012-05-13T15:02:47-00:01", + } + for _, in := range tests { + tm, err := time.Parse(time.RFC3339, in) + if err != nil { + t.Errorf("error parsing %q", in) + continue + } + knownZone := IsZoneKnown(tm) + out := RFC3339FromTime(tm) + if in != out { + t.Errorf("RFC3339FromTime(%q) = %q; want %q", in, out, in) + } + + sub := "Z" + if !knownZone { + sub = "-00:01" + } + if !strings.Contains(out, sub) { + t.Errorf("expected substring %q in %q", sub, out) + } + } +} + +func TestBlobFromReader(t *testing.T) { + br := blob.MustParse("sha1-f1d2d2f924e986ac86fdf7b36c94bcdf32beec15") + blob, err := BlobFromReader(br, strings.NewReader(`{"camliVersion": 1, "camliType": "foo"} `)) + if err != nil { + t.Error(err) + } else if blob.Type() != "foo" { + t.Errorf("got type %q; want foo", blob.Type()) + } + + blob, err = BlobFromReader(br, strings.NewReader(`{"camliVersion": 1, "camliType": "foo"} X `)) + if err == nil { + // TODO(bradfitz): fix this somehow. Currently encoding/json's + // decoder over-reads. + // See: https://code.google.com/p/go/issues/detail?id=1955 , + // which was "fixed", but not really. + t.Logf("TODO(bradfitz): make sure bogus non-whitespace after the JSON object causes an error.") + } +} + +func TestAttribute(t *testing.T) { + tm := time.Unix(123, 456) + br := blob.MustParse("xxx-1234") + tests := []struct { + bb *Builder + want string + }{ + { + bb: NewSetAttributeClaim(br, "attr1", "val1"), + want: `{"camliVersion": 1, + "attribute": "attr1", + "camliType": "claim", + "claimDate": "1970-01-01T00:02:03.000000456Z", + "claimType": "set-attribute", + "permaNode": "xxx-1234", + "value": "val1" +}`, + }, + { + bb: NewAddAttributeClaim(br, "tag", "funny"), + want: `{"camliVersion": 1, + "attribute": "tag", + "camliType": "claim", + "claimDate": "1970-01-01T00:02:03.000000456Z", + "claimType": "add-attribute", + "permaNode": "xxx-1234", + "value": "funny" +}`, + }, + { + bb: NewDelAttributeClaim(br, "attr1", "val1"), + want: `{"camliVersion": 1, + "attribute": "attr1", + "camliType": "claim", + "claimDate": "1970-01-01T00:02:03.000000456Z", + "claimType": "del-attribute", + "permaNode": "xxx-1234", + "value": "val1" +}`, + }, + { + bb: NewDelAttributeClaim(br, "attr2", ""), + want: `{"camliVersion": 1, + "attribute": "attr2", + "camliType": "claim", + "claimDate": "1970-01-01T00:02:03.000000456Z", + "claimType": "del-attribute", + "permaNode": "xxx-1234" +}`, + }, + { + bb: newClaim(&claimParam{ + permanode: br, + claimType: SetAttributeClaim, + attribute: "foo", + value: "bar", + }, &claimParam{ + permanode: br, + claimType: DelAttributeClaim, + attribute: "foo", + value: "specific-del", + }, &claimParam{ + permanode: br, + claimType: DelAttributeClaim, + attribute: "foo", + }), + want: `{"camliVersion": 1, + "camliType": "claim", + "claimDate": "1970-01-01T00:02:03.000000456Z", + "claimType": "multi", + "claims": [ + { + "attribute": "foo", + "claimType": "set-attribute", + "permaNode": "xxx-1234", + "value": "bar" + }, + { + "attribute": "foo", + "claimType": "del-attribute", + "permaNode": "xxx-1234", + "value": "specific-del" + }, + { + "attribute": "foo", + "claimType": "del-attribute", + "permaNode": "xxx-1234" + } + ] +}`, + }, + } + for i, tt := range tests { + tt.bb.SetClaimDate(tm) + got, err := tt.bb.JSON() + if err != nil { + t.Errorf("%d. JSON error = %v", i, err) + continue + } + if got != tt.want { + t.Errorf("%d.\t got:\n%s\n\twant:q\n%s", i, got, tt.want) + } + } +} + +func TestDeleteClaim(t *testing.T) { + tm := time.Unix(123, 456) + br := blob.MustParse("xxx-1234") + delTest := struct { + bb *Builder + want string + }{ + bb: NewDeleteClaim(br), + want: `{"camliVersion": 1, + "camliType": "claim", + "claimDate": "1970-01-01T00:02:03.000000456Z", + "claimType": "delete", + "target": "xxx-1234" +}`, + } + delTest.bb.SetClaimDate(tm) + got, err := delTest.bb.JSON() + if err != nil { + t.Fatalf("JSON error = %v", err) + } + if got != delTest.want { + t.Fatalf("got:\n%s\n\twant:q\n%s", got, delTest.want) + } +} + +func TestAsClaimAndAsShare(t *testing.T) { + br := blob.MustParse("xxx-1234") + signer := blob.MustParse("yyy-5678") + + bb := NewSetAttributeClaim(br, "title", "Test Title") + getBlob := func() *Blob { + var c *Blob + c = bb.Blob() + c.ss.Sig = "non-null-sig" // required by AsShare + return c + } + + bb = bb.SetSigner(signer) + bb = bb.SetClaimDate(time.Now()) + c1 := getBlob() + + bb = NewShareRef(ShareHaveRef, true) + bb = bb.SetSigner(signer) + bb = bb.SetClaimDate(time.Now()) + c2 := getBlob() + + if !br.Valid() { + t.Error("Blobref not valid") + } + + _, ok := c1.AsClaim() + if !ok { + t.Error("Claim 1 not returned as claim") + } + + _, ok = c2.AsClaim() + if !ok { + t.Error("Claim 2 not returned as claim") + } + + s, ok := c1.AsShare() + if ok { + t.Error("Title claim returned share", s) + } + + _, ok = c2.AsShare() + if ok { + t.Error("Share claim returned share without target or search") + } + + bb.SetShareTarget(br) + s, ok = getBlob().AsShare() + if !ok { + t.Error("Share claim failed to return share with target") + } + + bb = NewShareRef(ShareHaveRef, true) + bb = bb.SetSigner(signer) + bb = bb.SetClaimDate(time.Now()) + // Would be better to use search.SearchQuery but we can't reference it here. + bb.SetShareSearch(&struct{}{}) + s, ok = getBlob().AsShare() + if !ok { + t.Error("Share claim failed to return share with search") + } +} + +func TestShareExpiration(t *testing.T) { + defer func() { clockNow = time.Now }() + b, err := BlobFromReader( + blob.MustParse("sha1-64ffa72fa9bcb2f825e7ed40b9451e5cadca4c2c"), + strings.NewReader(`{"camliVersion": 1, + "authType": "haveref", + "camliSigner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "camliType": "claim", + "claimDate": "2013-09-08T23:58:53.656549677Z", + "claimType": "share", + "expires": "2013-09-09T23:58:53.65658012Z", + "target": "sha1-f1d2d2f924e986ac86fdf7b36c94bcdf32beec15", + "transitive": false +,"camliSig":"wsBcBAABCAAQBQJSLQ89CRApMaZ8JvWr2gAAcuEIABRQolhn+yKksfaBx6oLo18NWvWQ+aYweF+5Gu0TH0Ixur7t1o5HFtFSSfFISyggSZDJSjsxoxaawhWrvCe9dZuU2s/zgRpgUtd2xmBt82tLOn9JidnUavsNGFXbfCwdUBSkzN0vDYLmgXW0VtiybB354uIKfOInZor2j8Mq0p6pkWzK3qq9W0dku7iE96YFaTb4W7eOikqoSC6VpjC1/4MQWOYRHLcPcIEY6xJ8es2sYMMSNXuVaR9nMupz8ZcTygP4jh+lPR1OH61q/FSjpRp7GKt4wZ1PknYjMbnpIzVjiSz0MkYd65bpZwuPOwZh/h2kHW7wvHNQZfWUJHEsOAI==J2ID"}`), + ) + if err != nil { + t.Fatal(err) + } + s, ok := b.AsShare() + if !ok { + t.Fatal("expected share") + } + clockNow = func() time.Time { return time.Unix(100, 0) } + if s.IsExpired() { + t.Error("expected not expired") + } + clockNow = func() time.Time { return time.Unix(1378687181+2*86400, 0) } + if !s.IsExpired() { + t.Error("expected expired") + } + + // And without an expiration time: + b, err = BlobFromReader( + blob.MustParse("sha1-931875ec6b8d917b7aae9f672f4f92de1ffaeeb1"), + strings.NewReader(`{"camliVersion": 1, + "authType": "haveref", + "camliSigner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "camliType": "claim", + "claimDate": "2013-09-09T01:01:09.907842963Z", + "claimType": "share", + "target": "sha1-64ffa72fa9bcb2f825e7ed40b9451e5cadca4c2c", + "transitive": false +,"camliSig":"wsBcBAABCAAQBQJSLR3VCRApMaZ8JvWr2gAA14kIAKmi5rCI5JTBvHbBuAu7wPVA87BLXm/BaD6zjqOENB4U8B+6KxyuT6KXe9P591IDXdZmJTP5tesbLtKw0iAWiRf2ea0Y7Ms3K77nLnSZM5QIOzb4aQKd1668p/5KqU3VfNayoHt69YkXyKBkqyEPjHINzC03QuLz5NIEBMYJaNqKKtEtSgh4gG8BBYq5qQzdKFg/Hx7VhkhW1y/1wwGSFJjaiPFMIJsF4d/gaO01Ip7XLro63ccyCy81tqKHnVjv0uULmZdbpgd3RHGGSnW3c9BfqkGvc3Wl11UQKzqc9OT+WTAWp8TXg6bLES9sQNzerx2wUfjKB9J4Yrk14iBfjl8==AynO"}`), + ) + if err != nil { + t.Fatal(err) + } + s, ok = b.AsShare() + if !ok { + t.Fatal("expected share") + } + clockNow = func() time.Time { return time.Unix(100, 0) } + if s.IsExpired() { + t.Error("expected not expired") + } + clockNow = func() time.Time { return time.Unix(1378687181+2*86400, 0) } + if s.IsExpired() { + t.Error("expected not expired") + } +} + +// camlistore.org/issue/305 +func TestIssue305(t *testing.T) { + var in = `{"camliVersion": 1, + "camliType": "file", + "fileName": "2012-03-10 15.03.18.m4v", + "parts": [ + { + "bytesRef": "sha1-c76d8b17b887c207875e61a77b7eccc60289e61c", + "size": 20032564 + } + ] +}` + var ss superset + if err := json.NewDecoder(strings.NewReader(in)).Decode(&ss); err != nil { + t.Fatal(err) + } + inref := blob.SHA1FromString(in) + blob, err := BlobFromReader(inref, strings.NewReader(in)) + if err != nil { + t.Fatal(err) + } + if blob.BlobRef() != inref { + t.Errorf("original ref = %s; want %s", blob.BlobRef(), inref) + } + bb := blob.Builder() + jback, err := bb.JSON() + if err != nil { + t.Fatal(err) + } + if jback != in { + t.Errorf("JSON doesn't match:\n got: %q\nwant: %q\n", jback, in) + } + out := bb.Blob() + if got := out.BlobRef(); got != inref { + t.Errorf("cloned ref = %v; want %v", got, inref) + } +} + +func TestStaticFileAndStaticSymlink(t *testing.T) { + // TODO (marete): Split this into two test functions. + fd, err := ioutil.TempFile("", "schema-test-") + if err != nil { + t.Fatalf("io.TempFile(): %v", err) + } + defer os.Remove(fd.Name()) + defer fd.Close() + + fi, err := os.Lstat(fd.Name()) + if err != nil { + t.Fatalf("os.Lstat(): %v", err) + } + + bb := NewCommonFileMap(fd.Name(), fi) + bb.SetType("file") + bb.SetFileName(fd.Name()) + blob := bb.Blob() + + sf, ok := blob.AsStaticFile() + if !ok { + t.Fatalf("Blob.AsStaticFile(): Unexpected return value: false") + } + if want, got := filepath.Base(fd.Name()), sf.FileName(); want != got { + t.Fatalf("StaticFile.FileName(): Expected %s, got %s", + want, got) + } + + _, ok = sf.AsStaticSymlink() + if ok { + t.Fatalf("StaticFile.AsStaticSymlink(): Unexpected return value: true") + } + + dir, err := ioutil.TempDir("", "schema-test-") + if err != nil { + t.Fatalf("ioutil.TempDir(): %v", err) + } + defer os.RemoveAll(dir) + + target := "bar" + src := filepath.Join(dir, "foo") + err = os.Symlink(target, src) + fi, err = os.Lstat(src) + if err != nil { + t.Fatalf("os.Lstat(): %v", err) + } + + bb = NewCommonFileMap(src, fi) + bb.SetType("symlink") + bb.SetFileName(src) + bb.SetSymlinkTarget(target) + blob = bb.Blob() + + sf, ok = blob.AsStaticFile() + if !ok { + t.Fatalf("Blob.AsStaticFile(): Unexpected return value: false") + } + sl, ok := sf.AsStaticSymlink() + if !ok { + t.Fatalf("StaticFile.AsStaticSymlink(): Unexpected return value: false") + } + + if want, got := filepath.Base(src), sl.FileName(); want != got { + t.Fatalf("StaticSymlink.FileName(): Expected %s, got %s", + want, got) + } + + if want, got := target, sl.SymlinkTargetString(); got != want { + t.Fatalf("StaticSymlink.SymlinkTargetString(): Expected %s, got %s", want, got) + } +} + +func TestStaticFIFO(t *testing.T) { + tdir, err := ioutil.TempDir("", "schema-test-") + if err != nil { + t.Fatalf("ioutil.TempDir(): %v", err) + } + defer os.RemoveAll(tdir) + + fifoPath := filepath.Join(tdir, "fifo") + err = osutil.Mkfifo(fifoPath, 0660) + if err == osutil.ErrNotSupported { + t.SkipNow() + } + if err != nil { + t.Fatalf("osutil.Mkfifo(): %v", err) + } + + fi, err := os.Lstat(fifoPath) + if err != nil { + t.Fatalf("os.Lstat(): %v", err) + } + + bb := NewCommonFileMap(fifoPath, fi) + bb.SetType("fifo") + bb.SetFileName(fifoPath) + blob := bb.Blob() + t.Logf("Got JSON for fifo: %s\n", blob.JSON()) + + sf, ok := blob.AsStaticFile() + if !ok { + t.Fatalf("Blob.AsStaticFile(): Expected true, got false") + } + _, ok = sf.AsStaticFIFO() + if !ok { + t.Fatalf("StaticFile.AsStaticFIFO(): Expected true, got false") + } +} + +func TestStaticSocket(t *testing.T) { + tdir, err := ioutil.TempDir("", "schema-test-") + if err != nil { + t.Fatalf("ioutil.TempDir(): %v", err) + } + defer os.RemoveAll(tdir) + + sockPath := filepath.Join(tdir, "socket") + err = osutil.Mksocket(sockPath) + if err == osutil.ErrNotSupported { + t.SkipNow() + } + if err != nil { + t.Fatalf("osutil.Mksocket(): %v", err) + } + + fi, err := os.Lstat(sockPath) + if err != nil { + t.Fatalf("os.Lstat(): %v", err) + } + + bb := NewCommonFileMap(sockPath, fi) + bb.SetType("socket") + bb.SetFileName(sockPath) + blob := bb.Blob() + t.Logf("Got JSON for socket: %s\n", blob.JSON()) + + sf, ok := blob.AsStaticFile() + if !ok { + t.Fatalf("Blob.AsStaticFile(): Expected true, got false") + } + _, ok = sf.AsStaticSocket() + if !ok { + t.Fatalf("StaticFile.AsStaticSocket(): Expected true, got false") + } +} + +func TestTimezoneEXIFCorrection(t *testing.T) { + // Test that we get UTC times for photos taken in two + // different timezones. + // Both only have local time + GPS in the exif. + tests := []struct { + file, want, wantUTC string + }{ + {"coffee-sf.jpg", "2014-07-11 08:44:34 -0700 PDT", "2014-07-11 15:44:34 +0000 UTC"}, + {"gocon-tokyo.jpg", "2014-05-31 13:34:04 +0900 JST", "2014-05-31 04:34:04 +0000 UTC"}, + } + for _, tt := range tests { + f, err := os.Open("testdata/" + tt.file) + if err != nil { + t.Fatal(err) + } + // Hide *os.File type from FileTime, so it can't use modtime: + tm, err := FileTime(struct{ io.ReaderAt }{f}) + f.Close() + if err != nil { + t.Errorf("%s: %v", tt.file, err) + continue + } + if got := tm.String(); got != tt.want { + t.Errorf("%s: time = %q; want %q", tt.file, got, tt.want) + } + if got := tm.UTC().String(); got != tt.wantUTC { + t.Errorf("%s: utc time = %q; want %q", tt.file, got, tt.wantUTC) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/sign.go b/vendor/github.com/camlistore/camlistore/pkg/schema/sign.go new file mode 100644 index 00000000..801ed624 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/schema/sign.go @@ -0,0 +1,130 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/jsonsign" + "camlistore.org/third_party/code.google.com/p/go.crypto/openpgp" +) + +// A Signer signs the JSON schema blobs that require signing, such as claims +// and permanodes. +type Signer struct { + keyId string // short one; 8 capital hex digits + pubref blob.Ref + privEntity *openpgp.Entity + + // baseSigReq is the prototype signing request used with the jsonsig + // package. + baseSigReq jsonsign.SignRequest +} + +func (s *Signer) String() string { + return fmt.Sprintf("[*schema.Signer for key=%s pubkey=%s]", s.keyId, s.pubref) +} + +// KeyID returns the short 8 capital hex digit GPG key ID +func (s *Signer) KeyID() string { + return s.keyId +} + +// NewSigner returns an Signer given an armored public key's blobref, +// its armored content, and its associated private key entity. +// The privateKeySource must be either an *openpgp.Entity or a string filename to a secret key. +func NewSigner(pubKeyRef blob.Ref, armoredPubKey io.Reader, privateKeySource interface{}) (*Signer, error) { + hash := pubKeyRef.Hash() + keyId, armoredPubKeyString, err := jsonsign.ParseArmoredPublicKey(io.TeeReader(armoredPubKey, hash)) + if err != nil { + return nil, err + } + if !pubKeyRef.HashMatches(hash) { + return nil, fmt.Errorf("pubkey ref of %v doesn't match provided armored public key", pubKeyRef) + } + + var privateKey *openpgp.Entity + switch v := privateKeySource.(type) { + case *openpgp.Entity: + privateKey = v + case string: + privateKey, err = jsonsign.EntityFromSecring(keyId, v) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("invalid privateKeySource type %T", v) + } + if privateKey == nil { + return nil, errors.New("nil privateKey") + } + + return &Signer{ + keyId: keyId, + pubref: pubKeyRef, + privEntity: privateKey, + baseSigReq: jsonsign.SignRequest{ + ServerMode: true, // shouldn't matter, since we're supplying the rest of the fields + Fetcher: memoryBlobFetcher{ + pubKeyRef: func() (uint32, io.ReadCloser) { + return uint32(len(armoredPubKeyString)), ioutil.NopCloser(strings.NewReader(armoredPubKeyString)) + }, + }, + EntityFetcher: entityFetcherFunc(func(wantKeyId string) (*openpgp.Entity, error) { + if privateKey.PrivateKey.KeyIdString() != wantKeyId && + privateKey.PrivateKey.KeyIdShortString() != wantKeyId { + return nil, fmt.Errorf("jsonsign code unexpectedly requested keyId %q; only have %q", + wantKeyId, keyId) + } + return privateKey, nil + }), + }, + }, nil +} + +// SignJSON signs the provided json at the optional time t. +// If t is the zero Time, the current time is used. +func (s *Signer) SignJSON(json string, t time.Time) (string, error) { + sr := s.baseSigReq + sr.UnsignedJSON = json + sr.SignatureTime = t + return sr.Sign() +} + +type memoryBlobFetcher map[blob.Ref]func() (size uint32, rc io.ReadCloser) + +func (m memoryBlobFetcher) Fetch(br blob.Ref) (file io.ReadCloser, size uint32, err error) { + fn, ok := m[br] + if !ok { + return nil, 0, os.ErrNotExist + } + size, file = fn() + return +} + +type entityFetcherFunc func(keyId string) (*openpgp.Entity, error) + +func (f entityFetcherFunc) FetchEntity(keyId string) (*openpgp.Entity, error) { + return f(keyId) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/sign_test.go b/vendor/github.com/camlistore/camlistore/pkg/schema/sign_test.go new file mode 100644 index 00000000..92de1f49 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/schema/sign_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "strings" + "testing" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/jsonsign" +) + +func TestSigner(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + ent, err := jsonsign.NewEntity() + if err != nil { + t.Fatal(err) + } + armorPub, err := jsonsign.ArmoredPublicKey(ent) + if err != nil { + t.Fatal(err) + } + pubRef := blob.SHA1FromString(armorPub) + sig, err := NewSigner(pubRef, strings.NewReader(armorPub), ent) + if err != nil { + t.Fatalf("NewSigner: %v", err) + } + pn, err := NewUnsignedPermanode().Sign(sig) + if err != nil { + t.Fatalf("NewPermanode: %v", err) + } + if !strings.Contains(pn, `,"camliSig":"`) { + t.Errorf("Permanode doesn't look signed: %v", pn) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/testdata/coffee-sf.jpg b/vendor/github.com/camlistore/camlistore/pkg/schema/testdata/coffee-sf.jpg new file mode 100644 index 00000000..10c800fb Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/schema/testdata/coffee-sf.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/schema/testdata/gocon-tokyo.jpg b/vendor/github.com/camlistore/camlistore/pkg/schema/testdata/gocon-tokyo.jpg new file mode 100644 index 00000000..34f58fa2 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/schema/testdata/gocon-tokyo.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/search/describe.go b/vendor/github.com/camlistore/camlistore/pkg/search/describe.go new file mode 100644 index 00000000..0d100808 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/search/describe.go @@ -0,0 +1,884 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package search + +import ( + "bytes" + "encoding/json" + "fmt" + "log" + "net/http" + "net/url" + "os" + "sort" + "strings" + "sync" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/syncutil" + "camlistore.org/pkg/types" + "camlistore.org/pkg/types/camtypes" +) + +func (sh *Handler) serveDescribe(rw http.ResponseWriter, req *http.Request) { + defer httputil.RecoverJSON(rw, req) + var dr DescribeRequest + dr.fromHTTP(req) + + res, err := sh.Describe(&dr) + if err != nil { + httputil.ServeJSONError(rw, err) + return + } + httputil.ReturnJSON(rw, res) +} + +const verboseDescribe = false + +func (sh *Handler) Describe(dr *DescribeRequest) (dres *DescribeResponse, err error) { + if verboseDescribe { + t0 := time.Now() + defer func() { + td := time.Since(t0) + var num int + if dres != nil { + num = len(dres.Meta) + } + log.Printf("Described %d blobs in %v", num, td) + }() + } + sh.initDescribeRequest(dr) + if dr.BlobRef.Valid() { + dr.Describe(dr.BlobRef, dr.depth()) + } + for _, br := range dr.BlobRefs { + dr.Describe(br, dr.depth()) + } + if err := dr.expandRules(); err != nil { + return nil, err + } + metaMap, err := dr.metaMap() + if err != nil { + return nil, err + } + return &DescribeResponse{metaMap}, nil +} + +type DescribeRequest struct { + // BlobRefs are the blobs to describe. If length zero, BlobRef + // is used. + BlobRefs []blob.Ref `json:"blobrefs,omitempty"` + + // BlobRef is the blob to describe. + BlobRef blob.Ref `json:"blobref,omitempty"` + + // Depth is the optional traversal depth to describe from the + // root BlobRef. If zero, a default is used. + // Depth is deprecated and will be removed. Use Rules instead. + Depth int `json:"depth,omitempty"` + + // MaxDirChildren is the requested optional limit to the number + // of children that should be fetched when describing a static + // directory. If zero, a default is used. + MaxDirChildren int `json:"maxDirChildren,omitempty"` + + // At specifies the time which we wish to see the state of + // this blob. If zero (unspecified), all claims will be + // considered, otherwise, any claims after this date will not + // be considered. + At types.Time3339 `json:"at"` + + // Rules specifies a set of rules to instruct how to keep + // expanding the described set. All rules are tested and + // matching rules grow the the response set until all rules no + // longer match or internal limits are hit. + Rules []*DescribeRule `json:"rules,omitempty"` + + // Internal details, used while loading. + // Initialized by sh.initDescribeRequest. + sh *Handler + mu sync.Mutex // protects following: + m MetaMap + done map[blobrefAndDepth]bool // blobref -> true + errs map[string]error // blobref -> error + resFromRule map[*DescribeRule]map[blob.Ref]bool + flatRuleCache []*DescribeRule // flattened once, by flatRules + + wg *sync.WaitGroup // for load requests +} + +type blobrefAndDepth struct { + br blob.Ref + depth int +} + +// Requires dr.mu is held +func (dr *DescribeRequest) flatRules() []*DescribeRule { + if dr.flatRuleCache == nil { + dr.flatRuleCache = make([]*DescribeRule, 0) + for _, rule := range dr.Rules { + rule.appendToFlatCache(dr) + } + } + return dr.flatRuleCache +} + +func (r *DescribeRule) appendToFlatCache(dr *DescribeRequest) { + dr.flatRuleCache = append(dr.flatRuleCache, r) + for _, rchild := range r.Rules { + rchild.parentRule = r + rchild.appendToFlatCache(dr) + } +} + +// Requires dr.mu is held. +func (dr *DescribeRequest) foreachResultBlob(fn func(blob.Ref)) { + if dr.BlobRef.Valid() { + fn(dr.BlobRef) + } + for _, br := range dr.BlobRefs { + fn(br) + } + for brStr := range dr.m { + if br, ok := blob.Parse(brStr); ok { + fn(br) + } + } +} + +// Requires dr.mu is held. +func (dr *DescribeRequest) blobInitiallyRequested(br blob.Ref) bool { + if dr.BlobRef.Valid() && dr.BlobRef == br { + return true + } + for _, br1 := range dr.BlobRefs { + if br == br1 { + return true + } + } + return false +} + +type DescribeRule struct { + // All non-zero 'If*' fields in the following set must match + // for the rule to match: + + // IsResultRoot, if true, only matches if the blob was part of + // the original search results, not a blob expanded later. + IfResultRoot bool `json:"ifResultRoot,omitempty"` + + // IfCamliNodeType matches if the "camliNodeType" attribute + // equals this value. + IfCamliNodeType string `json:"ifCamliNodeType,omitempty"` + + // Attrs lists attributes to describe. A special case + // is if the value ends in "*", which matches prefixes + // (e.g. "camliPath:*" or "*"). + Attrs []string `json:"attrs,omitempty"` + + // Additional rules to run on the described results of Attrs. + Rules []*DescribeRule `json:"rules,omitempty"` + + parentRule *DescribeRule +} + +// DescribeResponse is the JSON response from $searchRoot/camli/search/describe. +type DescribeResponse struct { + Meta MetaMap `json:"meta"` +} + +// A MetaMap is a map from blobref to a DescribedBlob. +type MetaMap map[string]*DescribedBlob + +type DescribedBlob struct { + Request *DescribeRequest `json:"-"` + + BlobRef blob.Ref `json:"blobRef"` + CamliType string `json:"camliType,omitempty"` + Size int64 `json:"size,"` + + // if camliType "permanode" + Permanode *DescribedPermanode `json:"permanode,omitempty"` + + // if camliType "file" + File *camtypes.FileInfo `json:"file,omitempty"` + // if camliType "directory" + Dir *camtypes.FileInfo `json:"dir,omitempty"` + // if camliType "file", and File.IsImage() + Image *camtypes.ImageInfo `json:"image,omitempty"` + // if camliType "file" and media file + MediaTags map[string]string `json:"mediaTags,omitempty"` + + // if camliType "directory" + DirChildren []blob.Ref `json:"dirChildren,omitempty"` + + // Stub is set if this is not loaded, but referenced. + Stub bool `json:"-"` +} + +func (m MetaMap) Get(br blob.Ref) *DescribedBlob { + if !br.Valid() { + return nil + } + return m[br.String()] +} + +// URLSuffixPost returns the URL suffix for POST requests. +func (r *DescribeRequest) URLSuffixPost() string { + return "camli/search/describe" +} + +// URLSuffix returns the URL suffix for GET requests. +// This is deprecated. +func (r *DescribeRequest) URLSuffix() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "camli/search/describe?depth=%d&maxdirchildren=%d", + r.depth(), r.maxDirChildren()) + for _, br := range r.BlobRefs { + buf.WriteString("&blobref=") + buf.WriteString(br.String()) + } + if len(r.BlobRefs) == 0 && r.BlobRef.Valid() { + buf.WriteString("&blobref=") + buf.WriteString(r.BlobRef.String()) + } + if !r.At.IsZero() { + buf.WriteString("&at=") + buf.WriteString(r.At.String()) + } + return buf.String() +} + +// fromHTTP panics with an httputil value on failure +func (r *DescribeRequest) fromHTTP(req *http.Request) { + switch { + case httputil.IsGet(req): + r.fromHTTPGet(req) + case req.Method == "POST": + r.fromHTTPPost(req) + default: + panic("Unsupported method") + } +} + +func (r *DescribeRequest) fromHTTPPost(req *http.Request) { + err := json.NewDecoder(req.Body).Decode(r) + if err != nil { + panic(err) + } +} + +func (r *DescribeRequest) fromHTTPGet(req *http.Request) { + req.ParseForm() + if vv := req.Form["blobref"]; len(vv) > 1 { + for _, brs := range vv { + if br, ok := blob.Parse(brs); ok { + r.BlobRefs = append(r.BlobRefs, br) + } else { + panic(httputil.InvalidParameterError("blobref")) + } + } + } else { + r.BlobRef = httputil.MustGetBlobRef(req, "blobref") + } + r.Depth = httputil.OptionalInt(req, "depth") + r.MaxDirChildren = httputil.OptionalInt(req, "maxdirchildren") + r.At = types.ParseTime3339OrZero(req.FormValue("at")) +} + +// PermanodeFile returns in path the blobref of the described permanode +// and the blobref of its File camliContent. +// If b isn't a permanode, or doesn't have a camliContent that +// is a file blob, ok is false. +func (b *DescribedBlob) PermanodeFile() (path []blob.Ref, fi *camtypes.FileInfo, ok bool) { + if b == nil || b.Permanode == nil { + return + } + if contentRef := b.Permanode.Attr.Get("camliContent"); contentRef != "" { + if cdes := b.Request.DescribedBlobStr(contentRef); cdes != nil && cdes.File != nil { + return []blob.Ref{b.BlobRef, cdes.BlobRef}, cdes.File, true + } + } + return +} + +// PermanodeDir returns in path the blobref of the described permanode +// and the blobref of its Directory camliContent. +// If b isn't a permanode, or doesn't have a camliContent that +// is a directory blob, ok is false. +func (b *DescribedBlob) PermanodeDir() (path []blob.Ref, fi *camtypes.FileInfo, ok bool) { + if b == nil || b.Permanode == nil { + return + } + if contentRef := b.Permanode.Attr.Get("camliContent"); contentRef != "" { + if cdes := b.Request.DescribedBlobStr(contentRef); cdes != nil && cdes.Dir != nil { + return []blob.Ref{b.BlobRef, cdes.BlobRef}, cdes.Dir, true + } + } + return +} + +func (b *DescribedBlob) DomID() string { + if b == nil { + return "" + } + return b.BlobRef.DomID() +} + +func (b *DescribedBlob) Title() string { + if b == nil { + return "" + } + if b.Permanode != nil { + if t := b.Permanode.Attr.Get("title"); t != "" { + return t + } + if contentRef := b.Permanode.Attr.Get("camliContent"); contentRef != "" { + return b.Request.DescribedBlobStr(contentRef).Title() + } + } + if b.File != nil { + return b.File.FileName + } + if b.Dir != nil { + return b.Dir.FileName + } + return "" +} + +func (b *DescribedBlob) Description() string { + if b == nil { + return "" + } + if b.Permanode != nil { + return b.Permanode.Attr.Get("description") + } + return "" +} + +// Members returns all of b's children, as given by b's camliMember and camliPath:* +// attributes. Only the first entry for a given camliPath attribute is used. +func (b *DescribedBlob) Members() []*DescribedBlob { + if b == nil { + return nil + } + m := make([]*DescribedBlob, 0) + if b.Permanode != nil { + for _, bstr := range b.Permanode.Attr["camliMember"] { + if br, ok := blob.Parse(bstr); ok { + m = append(m, b.PeerBlob(br)) + } + } + for k, bstrs := range b.Permanode.Attr { + if strings.HasPrefix(k, "camliPath:") && len(bstrs) > 0 { + if br, ok := blob.Parse(bstrs[0]); ok { + m = append(m, b.PeerBlob(br)) + } + } + } + } + return m +} + +func (b *DescribedBlob) DirMembers() []*DescribedBlob { + if b == nil || b.Dir == nil || len(b.DirChildren) == 0 { + return nil + } + + m := make([]*DescribedBlob, 0) + for _, br := range b.DirChildren { + m = append(m, b.PeerBlob(br)) + } + return m +} + +func (b *DescribedBlob) ContentRef() (br blob.Ref, ok bool) { + if b != nil && b.Permanode != nil { + if cref := b.Permanode.Attr.Get("camliContent"); cref != "" { + return blob.Parse(cref) + } + } + return +} + +// Given a blobref string returns a Description or nil. +// dr may be nil itself. +func (dr *DescribeRequest) DescribedBlobStr(blobstr string) *DescribedBlob { + if dr == nil { + return nil + } + dr.mu.Lock() + defer dr.mu.Unlock() + return dr.m[blobstr] +} + +// PeerBlob returns a DescribedBlob for the provided blobref. +// +// Unlike DescribedBlobStr, the returned DescribedBlob is never nil. +// +// If the blob was never loaded along with the the receiver (or if the +// receiver is nil), a stub DescribedBlob is returned with its Stub +// field set true. +func (b *DescribedBlob) PeerBlob(br blob.Ref) *DescribedBlob { + if b.Request == nil { + return &DescribedBlob{BlobRef: br, Stub: true} + } + b.Request.mu.Lock() + defer b.Request.mu.Unlock() + return b.peerBlob(br) +} + +// version of PeerBlob when b.Request.mu is already held. +func (b *DescribedBlob) peerBlob(br blob.Ref) *DescribedBlob { + if peer, ok := b.Request.m[br.String()]; ok { + return peer + } + return &DescribedBlob{Request: b.Request, BlobRef: br, Stub: true} +} + +func (b *DescribedBlob) isPermanode() bool { + return b.Permanode != nil +} + +type DescribedPermanode struct { + Attr url.Values `json:"attr"` // a map[string][]string + ModTime time.Time `json:"modtime,omitempty"` +} + +// IsContainer returns whether the permanode has either named ("camliPath:"-prefixed) or unnamed +// ("camliMember") member attributes. +func (dp *DescribedPermanode) IsContainer() bool { + if members := dp.Attr["camliMember"]; len(members) > 0 { + return true + } + for k := range dp.Attr { + if strings.HasPrefix(k, "camliPath:") { + return true + } + } + return false +} + +func (dp *DescribedPermanode) jsonMap() map[string]interface{} { + m := jsonMap() + + am := jsonMap() + m["attr"] = am + for k, vv := range dp.Attr { + if len(vv) > 0 { + vl := make([]string, len(vv)) + copy(vl[:], vv[:]) + am[k] = vl + } + } + return m +} + +// NewDescribeRequest returns a new DescribeRequest holding the state +// of blobs and their summarized descriptions. Use DescribeBlob +// one or more times before calling Result. +func (sh *Handler) NewDescribeRequest() *DescribeRequest { + dr := new(DescribeRequest) + sh.initDescribeRequest(dr) + return dr +} + +func (sh *Handler) initDescribeRequest(req *DescribeRequest) { + if req.sh != nil { + panic("already initialized") + } + req.sh = sh + req.m = make(MetaMap) + req.errs = make(map[string]error) + req.wg = new(sync.WaitGroup) +} + +type DescribeError map[string]error + +func (de DescribeError) Error() string { + var buf bytes.Buffer + for b, err := range de { + fmt.Fprintf(&buf, "%s: %v; ", b, err) + } + return fmt.Sprintf("Errors (%d) describing blobs: %s", len(de), buf.String()) +} + +// Result waits for all outstanding lookups to complete and +// returns the map of blobref (strings) to their described +// results. The returned error is non-nil if any errors +// occured, and will be of type DescribeError. +func (dr *DescribeRequest) Result() (desmap map[string]*DescribedBlob, err error) { + dr.wg.Wait() + // TODO: set "done" / locked flag, so no more DescribeBlob can + // be called. + if len(dr.errs) > 0 { + return dr.m, DescribeError(dr.errs) + } + return dr.m, nil +} + +func (dr *DescribeRequest) depth() int { + if dr.Depth > 0 { + return dr.Depth + } + return 1 +} + +func (dr *DescribeRequest) maxDirChildren() int { + return sanitizeNumResults(dr.MaxDirChildren) +} + +func (dr *DescribeRequest) metaMap() (map[string]*DescribedBlob, error) { + dr.wg.Wait() + dr.mu.Lock() + defer dr.mu.Unlock() + for k, err := range dr.errs { + // TODO: include all? + return nil, fmt.Errorf("error populating %s: %v", k, err) + } + m := make(map[string]*DescribedBlob) + for k, desb := range dr.m { + m[k] = desb + } + return m, nil +} + +func (dr *DescribeRequest) describedBlob(b blob.Ref) *DescribedBlob { + dr.mu.Lock() + defer dr.mu.Unlock() + bs := b.String() + if des, ok := dr.m[bs]; ok { + return des + } + des := &DescribedBlob{Request: dr, BlobRef: b} + dr.m[bs] = des + return des +} + +func (dr *DescribeRequest) DescribeSync(br blob.Ref) (*DescribedBlob, error) { + dr.Describe(br, 1) + res, err := dr.Result() + if err != nil { + return nil, err + } + return res[br.String()], nil +} + +// Describe starts a lookup of br, down to the provided depth. +// It returns immediately. +func (dr *DescribeRequest) Describe(br blob.Ref, depth int) { + if depth <= 0 { + return + } + dr.mu.Lock() + defer dr.mu.Unlock() + if dr.done == nil { + dr.done = make(map[blobrefAndDepth]bool) + } + doneKey := blobrefAndDepth{br, depth} + if dr.done[doneKey] { + return + } + dr.done[doneKey] = true + dr.wg.Add(1) + go func() { + defer dr.wg.Done() + dr.describeReally(br, depth) + }() +} + +// requires dr.mu is held +func (dr *DescribeRequest) isDescribedOrError(br blob.Ref) bool { + brs := br.String() + if _, ok := dr.m[brs]; ok { + return true + } + if _, ok := dr.errs[brs]; ok { + return true + } + return false +} + +// requires dr.mu be held. +func (r *DescribeRule) newMatches(br blob.Ref, dr *DescribeRequest) (brs []blob.Ref) { + if r.IfResultRoot { + if !dr.blobInitiallyRequested(br) { + return nil + } + } + if r.parentRule != nil { + if _, ok := dr.resFromRule[r.parentRule][br]; !ok { + return nil + } + } + db, ok := dr.m[br.String()] + if !ok || db.Permanode == nil { + return nil + } + if t := r.IfCamliNodeType; t != "" { + gotType := db.Permanode.Attr.Get("camliNodeType") + if gotType != t { + return nil + } + } + for attr, vv := range db.Permanode.Attr { + matches := false + for _, matchAttr := range r.Attrs { + if attr == matchAttr { + matches = true + break + } + if strings.HasSuffix(matchAttr, "*") && strings.HasPrefix(attr, strings.TrimSuffix(matchAttr, "*")) { + matches = true + break + } + } + if !matches { + continue + } + for _, v := range vv { + if br, ok := blob.Parse(v); ok { + brs = append(brs, br) + } + } + } + return brs +} + +// dr.mu just be locked. +func (dr *DescribeRequest) noteResultFromRule(rule *DescribeRule, br blob.Ref) { + if dr.resFromRule == nil { + dr.resFromRule = make(map[*DescribeRule]map[blob.Ref]bool) + } + m, ok := dr.resFromRule[rule] + if !ok { + m = make(map[blob.Ref]bool) + dr.resFromRule[rule] = m + } + m[br] = true +} + +func (dr *DescribeRequest) expandRules() error { + loop := true + + for loop { + loop = false + dr.wg.Wait() + dr.mu.Lock() + len0 := len(dr.m) + var new []blob.Ref + for _, rule := range dr.flatRules() { + dr.foreachResultBlob(func(br blob.Ref) { + for _, nbr := range rule.newMatches(br, dr) { + new = append(new, nbr) + dr.noteResultFromRule(rule, nbr) + } + }) + } + dr.mu.Unlock() + for _, br := range new { + dr.Describe(br, 1) + } + dr.wg.Wait() + dr.mu.Lock() + len1 := len(dr.m) + dr.mu.Unlock() + loop = len0 != len1 + } + return nil +} + +func (dr *DescribeRequest) addError(br blob.Ref, err error) { + if err == nil { + return + } + dr.mu.Lock() + defer dr.mu.Unlock() + // TODO: append? meh. + dr.errs[br.String()] = err +} + +func (dr *DescribeRequest) describeReally(br blob.Ref, depth int) { + meta, err := dr.sh.index.GetBlobMeta(br) + if err == os.ErrNotExist { + return + } + if err != nil { + dr.addError(br, err) + return + } + + // TODO: convert all this in terms of + // DescribedBlob/DescribedPermanode/DescribedFile, not json + // maps. Then add JSON marhsallers to those types. Add tests. + des := dr.describedBlob(br) + if meta.CamliType != "" { + des.setMIMEType("application/json; camliType=" + meta.CamliType) + } + des.Size = int64(meta.Size) + + switch des.CamliType { + case "permanode": + des.Permanode = new(DescribedPermanode) + dr.populatePermanodeFields(des.Permanode, br, dr.sh.owner, depth) + case "file": + fi, err := dr.sh.index.GetFileInfo(br) + if err != nil { + if os.IsNotExist(err) { + log.Printf("index.GetFileInfo(file %s) failed; index stale?", br) + } else { + dr.addError(br, err) + } + return + } + des.File = &fi + if des.File.IsImage() { + imgInfo, err := dr.sh.index.GetImageInfo(br) + if err != nil { + if !os.IsNotExist(err) { + dr.addError(br, err) + } + } else { + des.Image = &imgInfo + } + } + if mediaTags, err := dr.sh.index.GetMediaTags(br); err == nil { + des.MediaTags = mediaTags + } + case "directory": + var g syncutil.Group + g.Go(func() (err error) { + fi, err := dr.sh.index.GetFileInfo(br) + if os.IsNotExist(err) { + log.Printf("index.GetFileInfo(directory %s) failed; index stale?", br) + } + if err == nil { + des.Dir = &fi + } + return + }) + g.Go(func() (err error) { + des.DirChildren, err = dr.getDirMembers(br, depth) + return + }) + if err := g.Err(); err != nil { + dr.addError(br, err) + } + } +} + +func (dr *DescribeRequest) populatePermanodeFields(pi *DescribedPermanode, pn, signer blob.Ref, depth int) { + pi.Attr = make(url.Values) + attr := pi.Attr + + claims, err := dr.sh.index.AppendClaims(nil, pn, signer, "") + if err != nil { + log.Printf("Error getting claims of %s: %v", pn.String(), err) + dr.addError(pn, fmt.Errorf("Error getting claims of %s: %v", pn.String(), err)) + return + } + + sort.Sort(camtypes.ClaimsByDate(claims)) +claimLoop: + for _, cl := range claims { + if !dr.At.IsZero() { + if cl.Date.After(dr.At.Time()) { + continue + } + } + switch cl.Type { + default: + continue + case "del-attribute": + if cl.Value == "" { + delete(attr, cl.Attr) + } else { + sl := attr[cl.Attr] + filtered := make([]string, 0, len(sl)) + for _, val := range sl { + if val != cl.Value { + filtered = append(filtered, val) + } + } + attr[cl.Attr] = filtered + } + case "set-attribute": + delete(attr, cl.Attr) + fallthrough + case "add-attribute": + if cl.Value == "" { + continue + } + sl, ok := attr[cl.Attr] + if ok { + for _, exist := range sl { + if exist == cl.Value { + continue claimLoop + } + } + } else { + sl = make([]string, 0, 1) + attr[cl.Attr] = sl + } + attr[cl.Attr] = append(sl, cl.Value) + } + pi.ModTime = cl.Date + } + + // Descend into any references in current attributes. + for key, vals := range attr { + dr.describeRefs(key, depth) + for _, v := range vals { + dr.describeRefs(v, depth) + } + } +} + +func (dr *DescribeRequest) getDirMembers(br blob.Ref, depth int) ([]blob.Ref, error) { + limit := dr.maxDirChildren() + ch := make(chan blob.Ref) + errch := make(chan error) + go func() { + errch <- dr.sh.index.GetDirMembers(br, ch, limit) + }() + + var members []blob.Ref + for child := range ch { + dr.Describe(child, depth) + members = append(members, child) + } + if err := <-errch; err != nil { + return nil, err + } + return members, nil +} + +func (dr *DescribeRequest) describeRefs(str string, depth int) { + for _, match := range blobRefPattern.FindAllString(str, -1) { + if ref, ok := blob.ParseKnown(match); ok { + dr.Describe(ref, depth-1) + } + } +} + +func (d *DescribedBlob) setMIMEType(mime string) { + if strings.HasPrefix(mime, camliTypePrefix) { + d.CamliType = strings.TrimPrefix(mime, camliTypePrefix) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/search/describe_test.go b/vendor/github.com/camlistore/camlistore/pkg/search/describe_test.go new file mode 100644 index 00000000..51346eab --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/search/describe_test.go @@ -0,0 +1,253 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package search_test + +import ( + "testing" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/index" + "camlistore.org/pkg/search" + "camlistore.org/pkg/test" +) + +func addPermanode(fi *test.FakeIndex, pnStr string, attrs ...string) { + pn := blob.MustParse(pnStr) + fi.AddMeta(pn, "permanode", 123) + for len(attrs) > 0 { + k, v := attrs[0], attrs[1] + attrs = attrs[2:] + fi.AddClaim(owner, pn, "add-attribute", k, v) + } +} + +func searchDescribeSetup(fi *test.FakeIndex) index.Interface { + addPermanode(fi, "abc-123", + "camliContent", "abc-123c", + "camliImageContent", "abc-888", + ) + addPermanode(fi, "abc-123c", + "camliContent", "abc-123cc", + "camliImageContent", "abc-123c1", + ) + addPermanode(fi, "abc-123c1", + "some", "image", + ) + addPermanode(fi, "abc-123cc", + "name", "leaf", + ) + addPermanode(fi, "abc-888", + "camliContent", "abc-8881", + ) + addPermanode(fi, "abc-8881", + "name", "leaf8881", + ) + + addPermanode(fi, "fourcheckin-0", + "camliNodeType", "foursquare.com:checkin", + "foursquareVenuePermanode", "fourvenue-123", + ) + addPermanode(fi, "fourvenue-123", + "camliNodeType", "foursquare.com:venue", + "camliPath:photos", "venuepicset-123", + ) + addPermanode(fi, "venuepicset-123", + "camliPath:1.jpg", "venuepic-1", + ) + addPermanode(fi, "venuepic-1", + "camliContent", "somevenuepic-0", + ) + addPermanode(fi, "somevenuepic-0", + "foo", "bar", + ) + addPermanode(fi, "venuepic-2", + "camliContent", "somevenuepic-2", + ) + addPermanode(fi, "somevenuepic-2", + "foo", "baz", + ) + + addPermanode(fi, "homedir-0", + "camliPath:subdir.1", "homedir-1", + ) + addPermanode(fi, "homedir-1", + "camliPath:subdir.2", "homedir-2", + ) + addPermanode(fi, "homedir-2", + "foo", "bar", + ) + + addPermanode(fi, "set-0", + "camliMember", "venuepic-1", + "camliMember", "venuepic-2", + ) + + return fi +} + +var searchDescribeTests = []handlerTest{ + { + name: "null", + postBody: marshalJSON(&search.DescribeRequest{}), + want: jmap(&search.DescribeResponse{ + Meta: search.MetaMap{}, + }), + }, + + { + name: "single", + postBody: marshalJSON(&search.DescribeRequest{ + BlobRef: blob.MustParse("abc-123"), + }), + wantDescribed: []string{"abc-123"}, + }, + + { + name: "follow all camliContent", + postBody: marshalJSON(&search.DescribeRequest{ + BlobRef: blob.MustParse("abc-123"), + Rules: []*search.DescribeRule{ + { + Attrs: []string{"camliContent"}, + }, + }, + }), + wantDescribed: []string{"abc-123", "abc-123c", "abc-123cc"}, + }, + + { + name: "follow only root camliContent", + postBody: marshalJSON(&search.DescribeRequest{ + BlobRef: blob.MustParse("abc-123"), + Rules: []*search.DescribeRule{ + { + IfResultRoot: true, + Attrs: []string{"camliContent"}, + }, + }, + }), + wantDescribed: []string{"abc-123", "abc-123c"}, + }, + + { + name: "follow all root, substring", + postBody: marshalJSON(&search.DescribeRequest{ + BlobRef: blob.MustParse("abc-123"), + Rules: []*search.DescribeRule{ + { + IfResultRoot: true, + Attrs: []string{"camli*"}, + }, + }, + }), + wantDescribed: []string{"abc-123", "abc-123c", "abc-888"}, + }, + + { + name: "two rules, two attrs", + postBody: marshalJSON(&search.DescribeRequest{ + BlobRef: blob.MustParse("abc-123"), + Rules: []*search.DescribeRule{ + { + IfResultRoot: true, + Attrs: []string{"camliContent", "camliImageContent"}, + }, + { + Attrs: []string{"camliContent"}, + }, + }, + }), + wantDescribed: []string{"abc-123", "abc-123c", "abc-123cc", "abc-888", "abc-8881"}, + }, + + { + name: "foursquare venue photos, but not recursive camliPath explosion", + postBody: marshalJSON(&search.DescribeRequest{ + BlobRefs: []blob.Ref{ + blob.MustParse("homedir-0"), + blob.MustParse("fourcheckin-0"), + }, + Rules: []*search.DescribeRule{ + { + Attrs: []string{"camliContent", "camliContentImage"}, + }, + { + IfCamliNodeType: "foursquare.com:checkin", + Attrs: []string{"foursquareVenuePermanode"}, + }, + { + IfCamliNodeType: "foursquare.com:venue", + Attrs: []string{"camliPath:photos"}, + Rules: []*search.DescribeRule{ + { + Attrs: []string{"camliPath:*"}, + }, + }, + }, + }, + }), + wantDescribed: []string{"homedir-0", "fourcheckin-0", "fourvenue-123", "venuepicset-123", "venuepic-1", "somevenuepic-0"}, + }, + + { + name: "home dirs forever", + postBody: marshalJSON(&search.DescribeRequest{ + BlobRefs: []blob.Ref{ + blob.MustParse("homedir-0"), + }, + Rules: []*search.DescribeRule{ + { + Attrs: []string{"camliPath:*"}, + }, + }, + }), + wantDescribed: []string{"homedir-0", "homedir-1", "homedir-2"}, + }, + + { + name: "find members", + postBody: marshalJSON(&search.DescribeRequest{ + BlobRef: blob.MustParse("set-0"), + Rules: []*search.DescribeRule{ + { + IfResultRoot: true, + Attrs: []string{"camliMember"}, + Rules: []*search.DescribeRule{ + {Attrs: []string{"camliContent"}}, + }, + }, + }, + }), + wantDescribed: []string{"set-0", "venuepic-1", "venuepic-2", "somevenuepic-0", "somevenuepic-2"}, + }, +} + +func init() { + checkNoDups("searchDescribeTests", searchDescribeTests) +} + +func TestSearchDescribe(t *testing.T) { + for _, ht := range searchDescribeTests { + if ht.setup == nil { + ht.setup = searchDescribeSetup + } + if ht.query == "" { + ht.query = "describe" + } + ht.test(t) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/search/export_test.go b/vendor/github.com/camlistore/camlistore/pkg/search/export_test.go new file mode 100644 index 00000000..2ea0d8cb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/search/export_test.go @@ -0,0 +1,31 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package search + +func SetTestHookBug121(hook func()) { + testHookBug121 = hook +} + +func ExportSetCandidateSourceHook(fn func(string)) { candSourceHook = fn } + +func ExportBufferedConst() int { return buffered } + +func (s *SearchQuery) ExportPlannedQuery() *SearchQuery { + return s.plannedQuery(nil) +} + +var SortName = sortName diff --git a/vendor/github.com/camlistore/camlistore/pkg/search/expr.go b/vendor/github.com/camlistore/camlistore/pkg/search/expr.go new file mode 100644 index 00000000..49d6765d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/search/expr.go @@ -0,0 +1,362 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package search + +import ( + "fmt" + "log" + "strconv" + "strings" + + "camlistore.org/pkg/context" +) + +const seeDocs = "\nSee: https://camlistore.googlesource.com/camlistore/+/master/doc/search-ui.txt" + +var ( + noMatchingOpening = "No matching opening parenthesis" + noMatchingClosing = "No matching closing parenthesis" + noLiteralSupport = "No support for literals yet" + noQuotedLiteralSupport = "No support for quoted literals yet" + expectedAtom = "Expected an atom" + predicateError = "Predicates do not start with a colon" + trailingTokens = "After parsing finished there is still input left" +) + +type parseExpError struct { + mesg string + t token +} + +func (e parseExpError) Error() string { + return fmt.Sprintf("%s at position %d, token: %q %s", e.mesg, e.t.start, e.t.val, seeDocs) +} + +func newParseExpError(mesg string, t token) error { + return parseExpError{mesg: mesg, t: t} +} + +func andConst(a, b *Constraint) *Constraint { + return &Constraint{ + Logical: &LogicalConstraint{ + Op: "and", + A: a, + B: b, + }, + } +} + +func orConst(a, b *Constraint) *Constraint { + return &Constraint{ + Logical: &LogicalConstraint{ + Op: "or", + A: a, + B: b, + }, + } +} + +func notConst(a *Constraint) *Constraint { + return &Constraint{ + Logical: &LogicalConstraint{ + Op: "not", + A: a, + }, + } +} + +type parser struct { + tokens chan token + peeked *token + ctx *context.Context +} + +func newParser(exp string, ctx *context.Context) parser { + _, tokens := lex(exp) + return parser{tokens: tokens, ctx: ctx} +} + +func (p *parser) next() *token { + if p.peeked != nil { + t := p.peeked + p.peeked = nil + return t + } + return p.readInternal() +} + +func (p *parser) peek() *token { + if p.peeked == nil { + p.peeked = p.readInternal() + } + return p.peeked +} + +// ReadInternal should not be called directly, use 'next' or 'peek' +func (p *parser) readInternal() *token { + for t := range p.tokens { + return &t + } + return &token{tokenEOF, "", -1} +} + +func (p *parser) stripNot() (negated bool) { + for { + switch p.peek().typ { + case tokenNot: + p.next() + negated = !negated + continue + } + return negated + } +} + +func (p *parser) parseExp() (c *Constraint, err error) { + if p.peek().typ == tokenEOF { + return + } + c, err = p.parseOperand() + if err != nil { + return + } + for { + switch p.peek().typ { + case tokenAnd: + p.next() + case tokenOr: + p.next() + return p.parseOrRHS(c) + case tokenClose, tokenEOF: + return + } + c, err = p.parseAndRHS(c) + if err != nil { + return + } + } +} + +func (p *parser) parseGroup() (c *Constraint, err error) { + i := p.next() + switch i.typ { + case tokenOpen: + c, err = p.parseExp() + if err != nil { + return + } + if p.peek().typ == tokenClose { + p.next() + return + } else { + err = newParseExpError(noMatchingClosing, *i) + return + } + } + err = newParseExpError("internal: do not call parseGroup when not on a '('", *i) + return +} + +func (p *parser) parseOrRHS(lhs *Constraint) (c *Constraint, err error) { + var rhs *Constraint + c = lhs + for { + rhs, err = p.parseAnd() + if err != nil { + return + } + c = orConst(c, rhs) + switch p.peek().typ { + case tokenOr: + p.next() + case tokenAnd, tokenClose, tokenEOF: + return + } + } +} + +func (p *parser) parseAnd() (c *Constraint, err error) { + for { + c, err = p.parseOperand() + if err != nil { + return + } + switch p.peek().typ { + case tokenAnd: + p.next() + case tokenOr, tokenClose, tokenEOF: + return + } + return p.parseAndRHS(c) + } +} + +func (p *parser) parseAndRHS(lhs *Constraint) (c *Constraint, err error) { + var rhs *Constraint + c = lhs + for { + rhs, err = p.parseOperand() + if err != nil { + return + } + c = andConst(c, rhs) + switch p.peek().typ { + case tokenOr, tokenClose, tokenEOF: + return + case tokenAnd: + p.next() + continue + } + return + } +} + +func (p *parser) parseOperand() (c *Constraint, err error) { + negated := p.stripNot() + i := p.peek() + switch i.typ { + case tokenError: + err = newParseExpError(i.val, *i) + return + case tokenEOF: + err = newParseExpError(expectedAtom, *i) + return + case tokenClose: + err = newParseExpError(noMatchingOpening, *i) + return + case tokenLiteral, tokenQuotedLiteral, tokenPredicate, tokenColon, tokenArg: + c, err = p.parseAtom() + case tokenOpen: + c, err = p.parseGroup() + } + if err != nil { + return + } + if negated { + c = notConst(c) + } + return +} + +// AtomWords returns the parsed atom, the starting position of this +// atom and an error. +func (p *parser) atomWords() (a atom, start int, err error) { + i := p.peek() + start = i.start + a = atom{} + switch i.typ { + case tokenLiteral: + err = newParseExpError(noLiteralSupport, *i) + return + case tokenQuotedLiteral: + err = newParseExpError(noQuotedLiteralSupport, *i) + return + case tokenColon: + err = newParseExpError(predicateError, *i) + return + case tokenPredicate: + i := p.next() + a.predicate = i.val + } + for { + switch p.peek().typ { + case tokenColon: + p.next() + continue + case tokenArg: + i := p.next() + a.args = append(a.args, i.val) + continue + case tokenQuotedArg: + i := p.next() + var uq string + uq, err = strconv.Unquote(i.val) + if err != nil { + return + } + a.args = append(a.args, uq) + continue + } + return + } +} + +func (p *parser) parseAtom() (*Constraint, error) { + a, start, err := p.atomWords() + if err != nil { + return nil, err + } + faultToken := func() token { + return token{ + typ: tokenError, + val: a.String(), + start: start, + } + } + var c *Constraint + for _, k := range keywords { + matched, err := k.Match(a) + if err != nil { + return nil, newParseExpError(err.Error(), faultToken()) + } + if matched { + c, err = k.Predicate(p.ctx, a.args) + if err != nil { + return nil, newParseExpError(err.Error(), faultToken()) + } + return c, nil + } + } + t := faultToken() + err = newParseExpError(fmt.Sprintf("Unknown search predicate: %q", t.val), t) + log.Printf(err.Error()) + return nil, err +} + +func parseExpression(ctx *context.Context, exp string) (*SearchQuery, error) { + base := &Constraint{ + Permanode: &PermanodeConstraint{ + SkipHidden: true, + }, + } + sq := &SearchQuery{ + Constraint: base, + } + + exp = strings.TrimSpace(exp) + if exp == "" { + return sq, nil + } + p := newParser(exp, ctx) + + c, err := p.parseExp() + if err != nil { + return nil, err + } + lastToken := p.next() + if lastToken.typ != tokenEOF { + switch lastToken.typ { + case tokenClose: + return nil, newParseExpError(noMatchingOpening, *lastToken) + } + return nil, newParseExpError(trailingTokens, *lastToken) + } + if c != nil { + sq.Constraint = andConst(base, c) + } + return sq, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/search/expr_test.go b/vendor/github.com/camlistore/camlistore/pkg/search/expr_test.go new file mode 100644 index 00000000..f3b71d65 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/search/expr_test.go @@ -0,0 +1,988 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package search + +import ( + "encoding/json" + "reflect" + "strings" + "testing" + + "camlistore.org/pkg/context" +) + +var skiphiddenC = &Constraint{ + Permanode: &PermanodeConstraint{ + SkipHidden: true, + }, +} + +var ispanoC = &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + WHRatio: &FloatConstraint{ + Min: 2.0, + }, + }, + }, + }, +} + +var attrfoobarC = &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "foo", + Value: "bar", + SkipHidden: true, + }, +} + +var attrgorunC = &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "go", + Value: "run", + SkipHidden: true, + }, +} + +var hasLocationC = orConst(&Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + Location: &LocationConstraint{Any: true}, + }, + }, + }, +}, &Constraint{ + Permanode: &PermanodeConstraint{ + Location: &LocationConstraint{Any: true}, + }, +}) + +var parseExpressionTests = []struct { + name string + in string + inList []string + want *SearchQuery + errContains string + ctx *context.Context +}{ + { + name: "empty search", + inList: []string{"", " ", "\n"}, + want: &SearchQuery{ + Constraint: skiphiddenC, + }, + }, + + { + in: "is:pano", + want: &SearchQuery{ + Constraint: andConst(skiphiddenC, ispanoC), + }, + }, + + { + in: "is:pano)", + errContains: "No matching opening", + }, + + { + in: "width:0-640", + want: &SearchQuery{ + Constraint: &Constraint{ + Logical: &LogicalConstraint{ + Op: "and", + A: skiphiddenC, + B: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + Width: &IntConstraint{ + ZeroMin: true, + Max: 640, + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "tag with spaces", + in: `tag:"Foo Bar"`, + want: &SearchQuery{ + Constraint: &Constraint{ + Logical: &LogicalConstraint{ + Op: "and", + A: skiphiddenC, + B: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "tag", + Value: "Foo Bar", + SkipHidden: true, + }, + }, + }, + }, + }, + }, + + { + name: "attribute search", + in: "attr:foo:bar", + want: &SearchQuery{ + Constraint: &Constraint{ + Logical: &LogicalConstraint{ + Op: "and", + A: skiphiddenC, + B: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "foo", + Value: "bar", + SkipHidden: true, + }, + }, + }, + }, + }, + }, + + { + name: "attribute search with space in value", + in: `attr:foo:"fun bar"`, + want: &SearchQuery{ + Constraint: &Constraint{ + Logical: &LogicalConstraint{ + Op: "and", + A: skiphiddenC, + B: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "foo", + Value: "fun bar", + SkipHidden: true, + }, + }, + }, + }, + }, + }, + + { + in: "tag:funny", + want: &SearchQuery{ + Constraint: &Constraint{ + Logical: &LogicalConstraint{ + Op: "and", + A: skiphiddenC, + B: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "tag", + Value: "funny", + SkipHidden: true, + }, + }, + }, + }, + }, + }, + + { + in: "title:Doggies", + want: &SearchQuery{ + Constraint: &Constraint{ + Logical: &LogicalConstraint{ + Op: "and", + A: skiphiddenC, + B: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "title", + ValueMatches: &StringConstraint{ + Contains: "Doggies", + CaseInsensitive: true, + }, + SkipHidden: true, + }, + }, + }, + }, + }, + }, + + { + in: "childrenof:sha1-f00ba4", + want: &SearchQuery{ + Constraint: &Constraint{ + Logical: &LogicalConstraint{ + Op: "and", + A: skiphiddenC, + B: &Constraint{ + Permanode: &PermanodeConstraint{ + Relation: &RelationConstraint{ + Relation: "parent", + Any: &Constraint{ + BlobRefPrefix: "sha1-f00ba4", + }, + }, + }, + }, + }, + }, + }, + }, + // Location predicates + { + in: "loc:Uitdam", // Small dutch town + want: &SearchQuery{ + Constraint: andConst(skiphiddenC, orConst(&Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + Location: uitdamLC, + }, + }, + }, + }, &Constraint{ + Permanode: &PermanodeConstraint{ + Location: uitdamLC, + }, + })), + }, + ctx: newGeocodeContext(), + }, + + { + in: "has:location", + want: &SearchQuery{ + Constraint: andConst(skiphiddenC, hasLocationC), + }, + }, + + // TODO: at least 'x' will go away eventually. + /* + { + inList: []string{"x", "bogus:operator"}, + errContains: "unknown expression", + }, + */ +} + +func TestParseExpression(t *testing.T) { + qj := func(sq *SearchQuery) []byte { + v, err := json.MarshalIndent(sq, "", " ") + if err != nil { + t.Fatal(err) + } + return v + } + for _, tt := range parseExpressionTests { + ins := tt.inList + if len(ins) == 0 { + ins = []string{tt.in} + } + for _, in := range ins { + ctx := tt.ctx + if ctx == nil { + ctx = context.TODO() + } + got, err := parseExpression(ctx, in) + if err != nil { + if tt.errContains != "" && strings.Contains(err.Error(), tt.errContains) { + continue + } + t.Errorf("%s: parseExpression(%q) error: %v", tt.name, in, err) + continue + } + if tt.errContains != "" { + t.Errorf("%s: parseExpression(%q) succeeded; want error containing %q", tt.name, in, tt.errContains) + continue + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%s: parseExpression(%q) got:\n%s\n\nwant:%s\n", tt.name, in, qj(got), qj(tt.want)) + } + } + } +} + +func doSticherChecking(name string, t *testing.T, tt sticherTestCase, got *Constraint, err error, p parser) { + ntt := parserTestCase{ + name: tt.name, + in: tt.in, + want: tt.want, + remCount: tt.remCount, + errContains: tt.errContains, + } + doChecking(name, t, ntt, got, err, p) +} + +func doChecking(name string, t *testing.T, tt parserTestCase, got *Constraint, err error, p parser) { + cj := func(c *Constraint) []byte { + v, err := json.MarshalIndent(c, "", " ") + if err != nil { + panic(err) + } + return v + } + remain := func() []token { + var remainder []token + var i int + for i = 0; true; i++ { + token := p.next() + if token.typ == tokenEOF { + break + } else { + remainder = append(remainder, *token) + } + } + return remainder + } + + if err != nil { + if tt.errContains != "" && strings.Contains(err.Error(), tt.errContains) { + return + } + if tt.errContains != "" { + t.Errorf("%s: %s(%q) error: %v, but wanted an error with: %v", tt.name, name, tt.in, err, tt.errContains) + } else { + t.Errorf("%s: %s(%q) unexpected error: %v", tt.name, name, tt.in, err) + } + return + } + if tt.errContains != "" { + t.Errorf("%s: %s(%q) succeeded; want error containing %q got: %s", tt.name, name, tt.in, tt.errContains, cj(got)) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%s: %s(%q) got:\n%s\n\nwant:%s\n", tt.name, name, tt.in, cj(got), cj(tt.want)) + } + remainder := remain() + if len(remainder) != tt.remCount { + t.Errorf("%s: %s(%s): Expected remainder of %d got %d\nRemaining tokens: %#v", tt.name, name, tt.in, tt.remCount, len(remainder), remainder) + } +} + +type parserTestCase struct { + name string + in string + want *Constraint + remCount int + errContains string +} + +type sticherTestCase struct { + name string + in string + want *Constraint + remCount int + errContains string + lhs *Constraint +} + +var parseOrRHSTests = []sticherTestCase{ + { + name: "stop on )", + in: "is:pano )", + want: orConst(nil, ispanoC), + remCount: 1, + }, + + { + in: "is:pano and attr:foo:bar", + want: orConst(nil, andConst(ispanoC, attrfoobarC)), + remCount: 0, + }, + + { + name: "add atom", + in: "is:pano", + want: orConst(nil, ispanoC), + remCount: 0, + }, +} + +func TestParseOrRhs(t *testing.T) { + for _, tt := range parseOrRHSTests { + p := newParser(tt.in, context.TODO()) + + got, err := p.parseOrRHS(tt.lhs) + + doSticherChecking("parseOrRHS", t, tt, got, err, p) + } +} + +var parseAndRHSTests = []sticherTestCase{ + { + name: "stop on )", + in: "is:pano )", + want: andConst(nil, ispanoC), + remCount: 1, + }, + + { + name: "stop on or", + in: "is:pano or", + want: andConst(nil, ispanoC), + remCount: 1, + }, + + { + name: "add atom", + in: "is:pano", + want: andConst(nil, ispanoC), + remCount: 0, + }, +} + +func TestParseConjuction(t *testing.T) { + for _, tt := range parseAndRHSTests { + p := newParser(tt.in, context.TODO()) + + got, err := p.parseAndRHS(tt.lhs) + + doSticherChecking("parseAndRHS", t, tt, got, err, p) + } +} + +var parseGroupTests = []struct { + name string + in string + want *Constraint + remCount int + errContains string +}{ + { + name: "simple grouped atom", + in: "( is:pano )", + want: ispanoC, + remCount: 0, + }, + + { + name: "simple grouped or with remainder", + in: "( attr:foo:bar or is:pano ) attr:foo:bar", + want: orConst(attrfoobarC, ispanoC), + remCount: 5, + }, + + { + name: "simple grouped and with remainder", + in: "( attr:foo:bar is:pano ) attr:foo:bar", + want: andConst(attrfoobarC, ispanoC), + remCount: 5, + }, + + { + name: "simple grouped atom with remainder", + in: "( is:pano ) attr:foo:bar", + want: ispanoC, + remCount: 5, + }, +} + +func TestParseGroup(t *testing.T) { + for _, tt := range parseGroupTests { + p := newParser(tt.in, context.TODO()) + + got, err := p.parseGroup() + + doChecking("parseGroup", t, tt, got, err, p) + } +} + +var parseOperandTests = []struct { + name string + in string + want *Constraint + remCount int + errContains string +}{ + { + name: "group of one atom", + in: "( is:pano )", + want: ispanoC, + remCount: 0, + }, + + { + name: "one atom", + in: "is:pano", + want: ispanoC, + remCount: 0, + }, + + { + name: "two atoms", + in: "is:pano attr:foo:bar", + want: ispanoC, + remCount: 5, + }, + + { + name: "grouped atom and atom", + in: "( is:pano ) attr:foo:bar", + want: ispanoC, + remCount: 5, + }, + + { + name: "atom and )", + in: "is:pano )", + want: ispanoC, + remCount: 1, + }, +} + +func TestParseOperand(t *testing.T) { + for _, tt := range parseOperandTests { + p := newParser(tt.in, context.TODO()) + + got, err := p.parseOperand() + + doChecking("parseOperand", t, tt, got, err, p) + } +} + +var parseExpTests = []parserTestCase{ + { + in: "attr:foo:", + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "foo", + ValueMatches: &StringConstraint{Empty: true}, + SkipHidden: true, + }, + }, + }, + + { + in: "after:foo", + errContains: "as \"2006\" at position 0", + }, + + { + in: "after:foo:bar", + errContains: `Wrong number of arguments for "after", given 2, expected 1 at position 0, token: "after:foo:bar"`, + }, + + { + in: " attr:foo", + errContains: `Wrong number of arguments for "attr", given 1, expected 2 at position 5, token: "attr:foo"`, + }, + + { + in: "has:location", + want: hasLocationC, + }, + + { + in: "is:pano", + want: ispanoC, + }, + + { + in: "height:0-640", + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + Height: &IntConstraint{ + ZeroMin: true, + Max: 640, + }, + }, + }, + }, + }, + }, + + { + in: "width:0-640", + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + Width: &IntConstraint{ + ZeroMin: true, + Max: 640, + }, + }, + }, + }, + }, + }, + + { + in: "height:++0", + errContains: "Unable to parse \"++0\" as range, wanted something like 480-1024, 480-, -1024 or 1024 at position 0", + }, + + { + in: "height:480", + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + Height: &IntConstraint{ + Min: 480, + Max: 480, + }, + }, + }, + }, + }, + }, + + { + in: "width:++0", + errContains: "Unable to parse \"++0\" as range, wanted something like 480-1024, 480-, -1024 or 1024 at position 0", + }, + + { + in: "width:640", + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + Width: &IntConstraint{ + Min: 640, + Max: 640, + }, + }, + }, + }, + }, + }, + { + name: "tag with spaces", + in: `tag:"Foo Bar"`, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "tag", + Value: "Foo Bar", + SkipHidden: true, + }, + }, + }, + + { + name: "attribute search", + in: "attr:foo:bar", + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "foo", + Value: "bar", + SkipHidden: true, + }, + }, + }, + + { + name: "attribute search with space in value", + in: `attr:foo:"fun bar"`, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "foo", + Value: "fun bar", + SkipHidden: true, + }, + }, + }, + + { + in: "tag:funny", + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "tag", + Value: "funny", + SkipHidden: true, + }, + }, + }, + + { + in: "title:Doggies", + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "title", + ValueMatches: &StringConstraint{ + Contains: "Doggies", + CaseInsensitive: true, + }, + SkipHidden: true, + }, + }, + }, + + { + in: "childrenof:sha1-f00ba4", + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Relation: &RelationConstraint{ + Relation: "parent", + Any: &Constraint{ + BlobRefPrefix: "sha1-f00ba4", + }, + }, + }, + }, + }, + + { + name: "Unmatched quote", + in: `is:pano and "foo`, + errContains: "Unclosed quote at position 12", + }, + + { + name: "Unmatched quote", + in: `"foo`, + errContains: "Unclosed quote at position 0", + }, + + { + name: "Unmatched (", + in: "(", + errContains: "No matching closing parenthesis at position 0", + }, + + { + name: "Unmatched )", + in: ")", + errContains: "No matching opening parenthesis", + }, + + { + name: "Unmatched ) at the end ", + in: "is:pano or attr:foo:bar )", + want: orConst(ispanoC, attrfoobarC), + remCount: 1, + }, + + { + name: "empty search", + in: "", + want: nil, + }, + + { + name: "faulty negation in 'or'", + in: "is:pano - or - is:pano", + errContains: "at position 10", + }, + + { + name: "faulty negation in 'or'", + in: "is:pano or -", + errContains: "an atom", + }, + + { + name: "faulty disjunction, empty right", + in: "is:pano or", + errContains: "at position 8", + }, + + { + name: "faulty disjunction", + in: "or is:pano", + errContains: "at position 0", + }, + + { + name: "faulty conjunction", + in: "and is:pano", + errContains: "at position 0", + }, + + { + name: "one atom", + in: "is:pano", + want: ispanoC, + }, + + { + name: "negated atom", + in: "- is:pano", + want: notConst(ispanoC), + }, + + { + name: "double negated atom", + in: "- - is:pano", + want: ispanoC, + }, + + { + name: "parenthesized atom with implicit 'and' and other atom", + in: "( is:pano ) attr:foo:bar", + want: andConst(ispanoC, attrfoobarC), + }, + + { + name: "negated implicit 'and'", + in: "- ( is:pano attr:foo:bar )", + want: notConst(andConst(ispanoC, attrfoobarC)), + }, + + { + name: "negated implicit 'and' with trailing attr:go:run", + in: "- ( is:pano attr:foo:bar ) attr:go:run", + want: andConst(notConst(andConst(ispanoC, attrfoobarC)), attrgorunC), + }, + + { + name: "parenthesized implicit 'and'", + in: "( is:pano attr:foo:bar )", + want: andConst(ispanoC, attrfoobarC), + }, + + { + name: "simple 'or' of two atoms", + in: "is:pano or attr:foo:bar", + want: orConst(ispanoC, attrfoobarC), + }, + + { + name: "left associativity of implicit 'and'", + in: "is:pano attr:go:run attr:foo:bar", + want: andConst(andConst(ispanoC, attrgorunC), attrfoobarC), + }, + + { + name: "left associativity of explicit 'and'", + in: "is:pano and attr:go:run and attr:foo:bar", + want: andConst(andConst(ispanoC, attrgorunC), attrfoobarC), + }, + + { + name: "left associativity of 'or'", + in: "is:pano or attr:go:run or attr:foo:bar", + want: orConst(orConst(ispanoC, attrgorunC), attrfoobarC)}, + + { + name: "left associativity of 'or' with negated atom", + in: "is:pano or - attr:go:run or attr:foo:bar", + want: orConst(orConst(ispanoC, notConst(attrgorunC)), attrfoobarC), + }, + + { + name: "left associativity of 'or' with double negated atom", + in: "is:pano or - - attr:go:run or attr:foo:bar", + want: orConst(orConst(ispanoC, attrgorunC), attrfoobarC), + }, + + { + name: "left associativity of 'or' with parenthesized subexpression", + in: "is:pano or ( - attr:go:run ) or attr:foo:bar", + want: orConst(orConst(ispanoC, notConst(attrgorunC)), attrfoobarC), + }, + + { + name: "explicit 'and' of two atoms", + in: "is:pano and attr:foo:bar", + want: andConst(ispanoC, attrfoobarC), + }, + + { + name: "implicit 'and' of two atom", + in: "is:pano attr:foo:bar", + want: andConst(ispanoC, attrfoobarC), + }, + + { + name: "grouping an 'and' in an 'or'", + in: "is:pano or ( attr:foo:bar attr:go:run )", + want: orConst(ispanoC, andConst(attrfoobarC, attrgorunC)), + }, + + { + name: "precedence of 'and' over 'or'", + in: "is:pano or attr:foo:bar and attr:go:run", + want: orConst(ispanoC, andConst(attrfoobarC, attrgorunC)), + }, + + { + name: "precedence of 'and' over 'or' with 'and' on the left", + in: "is:pano and attr:foo:bar or attr:go:run", + want: orConst(andConst(ispanoC, attrfoobarC), attrgorunC), + }, + + { + name: "precedence of 'and' over 'or' with 'and' on the left and right", + in: "is:pano and attr:foo:bar or attr:go:run is:pano", + want: orConst(andConst(ispanoC, attrfoobarC), andConst(attrgorunC, ispanoC)), + }, + + { + name: "precedence of 'and' over 'or' with 'and' on the left and right with a negation", + in: "is:pano and attr:foo:bar or - attr:go:run is:pano", + want: orConst(andConst(ispanoC, attrfoobarC), andConst(notConst(attrgorunC), ispanoC)), + }, + + { + name: "precedence of 'and' over 'or' with 'and' on the left and right with a negation of group and trailing 'and'", + in: "is:pano and attr:foo:bar or - ( attr:go:run is:pano ) is:pano", + want: orConst(andConst(ispanoC, attrfoobarC), andConst(notConst(andConst(attrgorunC, ispanoC)), ispanoC)), + }, + + { + name: "complicated", + in: "- ( is:pano and attr:foo:bar ) or - ( attr:go:run is:pano ) is:pano", + want: orConst(notConst(andConst(ispanoC, attrfoobarC)), andConst(notConst(andConst(attrgorunC, ispanoC)), ispanoC)), + }, + + { + name: "complicated", + in: "is:pano or attr:foo:bar attr:go:run or - attr:go:run or is:pano is:pano", + want: orConst(orConst(orConst(ispanoC, andConst(attrfoobarC, attrgorunC)), notConst(attrgorunC)), andConst(ispanoC, ispanoC)), + }, + + { + name: "complicated", + in: "is:pano or attr:foo:bar attr:go:run or - attr:go:run or is:pano is:pano or attr:foo:bar", + want: orConst(orConst(orConst(orConst(ispanoC, andConst(attrfoobarC, attrgorunC)), notConst(attrgorunC)), andConst(ispanoC, ispanoC)), attrfoobarC), + }, +} + +func TestParseExp(t *testing.T) { + for _, tt := range parseExpTests { + p := newParser(tt.in, context.TODO()) + + got, err := p.parseExp() + + doChecking("parseExp", t, tt, got, err, p) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/search/handler.go b/vendor/github.com/camlistore/camlistore/pkg/search/handler.go new file mode 100644 index 00000000..6db46bde --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/search/handler.go @@ -0,0 +1,811 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package search + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/index" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/types" + "camlistore.org/pkg/types/camtypes" +) + +const buffered = 32 // arbitrary channel buffer size +const maxResults = 1000 // arbitrary limit on the number of search results returned +const defaultNumResults = 50 + +// MaxImageSize is the maximum width or height in pixels that we will serve image +// thumbnails at. It is used in the search result UI. +const MaxImageSize = 2000 + +var blobRefPattern = regexp.MustCompile(blob.Pattern) + +func init() { + blobserver.RegisterHandlerConstructor("search", newHandlerFromConfig) +} + +// Handler handles search queries. +type Handler struct { + index index.Interface + owner blob.Ref + + // Corpus optionally specifies the full in-memory metadata corpus + // to use. + // TODO: this may be required in the future, or folded into the index + // interface. + corpus *index.Corpus + + // WebSocket hub + wsHub *wsHub +} + +// GetRecentPermanoder is the interface containing the GetRecentPermanodes method. +type GetRecentPermanoder interface { + // GetRecentPermanodes returns recently-modified permanodes. + // This is a higher-level query returning more metadata than the index.GetRecentPermanodes, + // which only scans the blobrefs but doesn't return anything about the permanodes. + GetRecentPermanodes(*RecentRequest) (*RecentResponse, error) +} + +var _ GetRecentPermanoder = (*Handler)(nil) + +func NewHandler(index index.Interface, owner blob.Ref) *Handler { + sh := &Handler{ + index: index, + owner: owner, + } + sh.wsHub = newWebsocketHub(sh) + go sh.wsHub.run() + sh.subscribeToNewBlobs() + return sh +} + +func (sh *Handler) subscribeToNewBlobs() { + ch := make(chan blob.Ref, buffered) + blobserver.GetHub(sh.index).RegisterListener(ch) + go func() { + for br := range ch { + bm, err := sh.index.GetBlobMeta(br) + if err == nil { + sh.wsHub.newBlobRecv <- bm.CamliType + } + } + }() +} + +func (h *Handler) SetCorpus(c *index.Corpus) { + h.corpus = c +} + +// SendStatusUpdate sends a JSON status map to any connected WebSocket clients. +func (h *Handler) SendStatusUpdate(status json.RawMessage) { + h.wsHub.statusUpdate <- status +} + +func newHandlerFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) { + indexPrefix := conf.RequiredString("index") // TODO: add optional help tips here? + ownerBlobStr := conf.RequiredString("owner") + devBlockStartupPrefix := conf.OptionalString("devBlockStartupOn", "") + slurpToMemory := conf.OptionalBool("slurpToMemory", false) + if err := conf.Validate(); err != nil { + return nil, err + } + + if devBlockStartupPrefix != "" { + _, err := ld.GetHandler(devBlockStartupPrefix) + if err != nil { + return nil, fmt.Errorf("search handler references bogus devBlockStartupOn handler %s: %v", devBlockStartupPrefix, err) + } + } + + indexHandler, err := ld.GetHandler(indexPrefix) + if err != nil { + return nil, fmt.Errorf("search config references unknown handler %q", indexPrefix) + } + indexer, ok := indexHandler.(index.Interface) + if !ok { + return nil, fmt.Errorf("search config references invalid indexer %q (actually a %T)", indexPrefix, indexHandler) + } + ownerBlobRef, ok := blob.Parse(ownerBlobStr) + if !ok { + return nil, fmt.Errorf("search 'owner' has malformed blobref %q; expecting e.g. sha1-xxxxxxxxxxxx", + ownerBlobStr) + } + h := NewHandler(indexer, ownerBlobRef) + if slurpToMemory { + ii := indexer.(*index.Index) + corpus, err := ii.KeepInMemory() + if err != nil { + return nil, fmt.Errorf("error slurping index to memory: %v", err) + } + h.corpus = corpus + } + return h, nil +} + +// Owner returns Handler owner's public key blobref. +func (h *Handler) Owner() blob.Ref { + // TODO: figure out a plan for an owner having multiple active public keys, or public + // key rotation + return h.owner +} + +func (h *Handler) Index() index.Interface { + return h.index +} + +func jsonMap() map[string]interface{} { + return make(map[string]interface{}) +} + +var getHandler = map[string]func(*Handler, http.ResponseWriter, *http.Request){ + "ws": (*Handler).serveWebSocket, + "recent": (*Handler).serveRecentPermanodes, + "permanodeattr": (*Handler).servePermanodesWithAttr, + "describe": (*Handler).serveDescribe, + "claims": (*Handler).serveClaims, + "files": (*Handler).serveFiles, + "signerattrvalue": (*Handler).serveSignerAttrValue, + "signerpaths": (*Handler).serveSignerPaths, + "edgesto": (*Handler).serveEdgesTo, +} + +var postHandler = map[string]func(*Handler, http.ResponseWriter, *http.Request){ + "describe": (*Handler).serveDescribe, + "query": (*Handler).serveQuery, +} + +func (sh *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + suffix := httputil.PathSuffix(req) + + handlers := getHandler + switch { + case httputil.IsGet(req): + // use default from above + case req.Method == "POST": + handlers = postHandler + default: + handlers = nil + } + fn := handlers[strings.TrimPrefix(suffix, "camli/search/")] + if fn != nil { + fn(sh, rw, req) + return + } + + // TODO: discovery for the endpoints & better error message with link to discovery info + ret := camtypes.SearchErrorResponse{ + Error: "Unsupported search path or method", + ErrorType: "input", + } + httputil.ReturnJSON(rw, &ret) +} + +// sanitizeNumResults takes n as a requested number of search results and sanitizes it. +func sanitizeNumResults(n int) int { + if n <= 0 || n > maxResults { + return defaultNumResults + } + return n +} + +// RecentRequest is a request to get a RecentResponse. +type RecentRequest struct { + N int // if zero, default number of results + Before time.Time // if zero, now +} + +func (r *RecentRequest) URLSuffix() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "camli/search/recent?n=%d", r.n()) + if !r.Before.IsZero() { + fmt.Fprintf(&buf, "&before=%s", types.Time3339(r.Before)) + } + return buf.String() +} + +// fromHTTP panics with an httputil value on failure +func (r *RecentRequest) fromHTTP(req *http.Request) { + r.N, _ = strconv.Atoi(req.FormValue("n")) + if before := req.FormValue("before"); before != "" { + r.Before = time.Time(types.ParseTime3339OrZero(before)) + } +} + +// n returns the sanitized maximum number of search results. +func (r *RecentRequest) n() int { + return sanitizeNumResults(r.N) +} + +// WithAttrRequest is a request to get a WithAttrResponse. +type WithAttrRequest struct { + N int // max number of results + Signer blob.Ref // if nil, will use the server's default owner (if configured) + // Requested attribute. If blank, all attributes are searched (for Value) + // as fulltext. + Attr string + // Value of the requested attribute. If blank, permanodes which have + // request.Attr as an attribute are searched. + Value string + Fuzzy bool // fulltext search (if supported). +} + +func (r *WithAttrRequest) URLSuffix() string { + return fmt.Sprintf("camli/search/permanodeattr?signer=%v&value=%v&fuzzy=%v&attr=%v&max=%v", + r.Signer, url.QueryEscape(r.Value), r.Fuzzy, r.Attr, r.N) +} + +// fromHTTP panics with an httputil value on failure +func (r *WithAttrRequest) fromHTTP(req *http.Request) { + r.Signer = blob.ParseOrZero(req.FormValue("signer")) + r.Value = req.FormValue("value") + fuzzy := req.FormValue("fuzzy") // exact match if empty + fuzzyMatch := false + if fuzzy != "" { + lowered := strings.ToLower(fuzzy) + if lowered == "true" || lowered == "t" { + fuzzyMatch = true + } + } + r.Attr = req.FormValue("attr") // all attributes if empty + if r.Attr == "" { // and force fuzzy in that case. + fuzzyMatch = true + } + r.Fuzzy = fuzzyMatch + max := req.FormValue("max") + if max != "" { + maxR, err := strconv.Atoi(max) + if err != nil { + panic(httputil.InvalidParameterError("max")) + } + r.N = maxR + } + r.N = r.n() +} + +// n returns the sanitized maximum number of search results. +func (r *WithAttrRequest) n() int { + return sanitizeNumResults(r.N) +} + +// ClaimsRequest is a request to get a ClaimsResponse. +type ClaimsRequest struct { + Permanode blob.Ref + + // AttrFilter optionally filters claims about the given attribute. + // If empty, all claims for the given Permanode are returned. + AttrFilter string +} + +func (r *ClaimsRequest) URLSuffix() string { + return fmt.Sprintf("camli/search/claims?permanode=%v&attrFilter=%s", + r.Permanode, url.QueryEscape(r.AttrFilter)) +} + +// fromHTTP panics with an httputil value on failure +func (r *ClaimsRequest) fromHTTP(req *http.Request) { + r.Permanode = httputil.MustGetBlobRef(req, "permanode") + r.AttrFilter = req.FormValue("attrFilter") +} + +// SignerPathsRequest is a request to get a SignerPathsResponse. +type SignerPathsRequest struct { + Signer blob.Ref + Target blob.Ref +} + +// fromHTTP panics with an httputil value on failure +func (r *SignerPathsRequest) fromHTTP(req *http.Request) { + r.Signer = httputil.MustGetBlobRef(req, "signer") + r.Target = httputil.MustGetBlobRef(req, "target") +} + +// EdgesRequest is a request to get an EdgesResponse. +type EdgesRequest struct { + // The blob we want to find as a reference. + ToRef blob.Ref +} + +// fromHTTP panics with an httputil value on failure +func (r *EdgesRequest) fromHTTP(req *http.Request) { + r.ToRef = httputil.MustGetBlobRef(req, "blobref") +} + +// TODO(mpl): it looks like we never populate RecentResponse.Error*, shouldn't we remove them? +// Same for WithAttrResponse. I suppose it doesn't matter much if we end up removing GetRecentPermanodes anyway... + +// RecentResponse is the JSON response from $searchRoot/camli/search/recent. +type RecentResponse struct { + Recent []*RecentItem `json:"recent"` + Meta MetaMap `json:"meta"` + + Error string `json:"error,omitempty"` + ErrorType string `json:"errorType,omitempty"` +} + +func (r *RecentResponse) Err() error { + if r.Error != "" || r.ErrorType != "" { + if r.ErrorType != "" { + return fmt.Errorf("%s: %s", r.ErrorType, r.Error) + } + return errors.New(r.Error) + } + return nil +} + +// WithAttrResponse is the JSON response from $searchRoot/camli/search/permanodeattr. +type WithAttrResponse struct { + WithAttr []*WithAttrItem `json:"withAttr"` + Meta MetaMap `json:"meta"` + + Error string `json:"error,omitempty"` + ErrorType string `json:"errorType,omitempty"` +} + +func (r *WithAttrResponse) Err() error { + if r.Error != "" || r.ErrorType != "" { + if r.ErrorType != "" { + return fmt.Errorf("%s: %s", r.ErrorType, r.Error) + } + return errors.New(r.Error) + } + return nil +} + +// ClaimsResponse is the JSON response from $searchRoot/camli/search/claims. +type ClaimsResponse struct { + Claims []*ClaimsItem `json:"claims"` +} + +// SignerPathsResponse is the JSON response from $searchRoot/camli/search/signerpaths. +type SignerPathsResponse struct { + Paths []*SignerPathsItem `json:"paths"` + Meta MetaMap `json:"meta"` +} + +// A RecentItem is an item returned from $searchRoot/camli/search/recent in the "recent" list. +type RecentItem struct { + BlobRef blob.Ref `json:"blobref"` + ModTime types.Time3339 `json:"modtime"` + Owner blob.Ref `json:"owner"` +} + +// A WithAttrItem is an item returned from $searchRoot/camli/search/permanodeattr. +type WithAttrItem struct { + Permanode blob.Ref `json:"permanode"` +} + +// A ClaimsItem is an item returned from $searchRoot/camli/search/claims. +type ClaimsItem struct { + BlobRef blob.Ref `json:"blobref"` + Signer blob.Ref `json:"signer"` + Permanode blob.Ref `json:"permanode"` + Date types.Time3339 `json:"date"` + Type string `json:"type"` + Attr string `json:"attr,omitempty"` + Value string `json:"value,omitempty"` +} + +// A SignerPathsItem is an item returned from $searchRoot/camli/search/signerpaths. +type SignerPathsItem struct { + ClaimRef blob.Ref `json:"claimRef"` + BaseRef blob.Ref `json:"baseRef"` + Suffix string `json:"suffix"` +} + +// EdgesResponse is the JSON response from $searchRoot/camli/search/edgesto. +type EdgesResponse struct { + ToRef blob.Ref `json:"toRef"` + EdgesTo []*EdgeItem `json:"edgesTo"` +} + +// An EdgeItem is an item returned from $searchRoot/camli/search/edgesto. +type EdgeItem struct { + From blob.Ref `json:"from"` + FromType string `json:"fromType"` +} + +var testHookBug121 = func() {} + +// GetRecentPermanodes returns recently-modified permanodes. +func (sh *Handler) GetRecentPermanodes(req *RecentRequest) (*RecentResponse, error) { + ch := make(chan camtypes.RecentPermanode) + errch := make(chan error, 1) + before := time.Now() + if !req.Before.IsZero() { + before = req.Before + } + go func() { + errch <- sh.index.GetRecentPermanodes(ch, sh.owner, req.n(), before) + }() + + dr := sh.NewDescribeRequest() + + var recent []*RecentItem + for res := range ch { + dr.Describe(res.Permanode, 2) + recent = append(recent, &RecentItem{ + BlobRef: res.Permanode, + Owner: res.Signer, + ModTime: types.Time3339(res.LastModTime), + }) + testHookBug121() // http://camlistore.org/issue/121 + } + + if err := <-errch; err != nil { + return nil, err + } + + metaMap, err := dr.metaMap() + if err != nil { + return nil, err + } + + res := &RecentResponse{ + Recent: recent, + Meta: metaMap, + } + return res, nil +} + +func (sh *Handler) serveRecentPermanodes(rw http.ResponseWriter, req *http.Request) { + defer httputil.RecoverJSON(rw, req) + var rr RecentRequest + rr.fromHTTP(req) + res, err := sh.GetRecentPermanodes(&rr) + if err != nil { + httputil.ServeJSONError(rw, err) + return + } + httputil.ReturnJSON(rw, res) +} + +// GetPermanodesWithAttr returns permanodes with attribute req.Attr +// having the req.Value as a value. +// See WithAttrRequest for more details about the query. +func (sh *Handler) GetPermanodesWithAttr(req *WithAttrRequest) (*WithAttrResponse, error) { + ch := make(chan blob.Ref, buffered) + errch := make(chan error, 1) + go func() { + signer := req.Signer + if !signer.Valid() { + signer = sh.owner + } + errch <- sh.index.SearchPermanodesWithAttr(ch, + &camtypes.PermanodeByAttrRequest{ + Attribute: req.Attr, + Query: req.Value, + Signer: signer, + FuzzyMatch: req.Fuzzy, + MaxResults: req.N, + }) + }() + + dr := sh.NewDescribeRequest() + + var withAttr []*WithAttrItem + for res := range ch { + dr.Describe(res, 2) + withAttr = append(withAttr, &WithAttrItem{ + Permanode: res, + }) + } + + metaMap, err := dr.metaMap() + if err != nil { + return nil, err + } + + if err := <-errch; err != nil { + return nil, err + } + + res := &WithAttrResponse{ + WithAttr: withAttr, + Meta: metaMap, + } + return res, nil +} + +// servePermanodesWithAttr uses the indexer to search for the permanodes matching +// the request. +// The valid values for the "attr" key in the request (i.e the only attributes +// for a permanode which are actually indexed as such) are "tag" and "title". +func (sh *Handler) servePermanodesWithAttr(rw http.ResponseWriter, req *http.Request) { + defer httputil.RecoverJSON(rw, req) + var wr WithAttrRequest + wr.fromHTTP(req) + res, err := sh.GetPermanodesWithAttr(&wr) + if err != nil { + httputil.ServeJSONError(rw, err) + return + } + httputil.ReturnJSON(rw, res) +} + +// GetClaims returns the claims on req.Permanode signed by sh.owner. +func (sh *Handler) GetClaims(req *ClaimsRequest) (*ClaimsResponse, error) { + if !req.Permanode.Valid() { + return nil, errors.New("Error getting claims: nil permanode.") + } + var claims []camtypes.Claim + claims, err := sh.index.AppendClaims(claims, req.Permanode, sh.owner, req.AttrFilter) + if err != nil { + return nil, fmt.Errorf("Error getting claims of %s: %v", req.Permanode.String(), err) + } + sort.Sort(camtypes.ClaimsByDate(claims)) + var jclaims []*ClaimsItem + for _, claim := range claims { + jclaim := &ClaimsItem{ + BlobRef: claim.BlobRef, + Signer: claim.Signer, + Permanode: claim.Permanode, + Date: types.Time3339(claim.Date), + Type: claim.Type, + Attr: claim.Attr, + Value: claim.Value, + } + jclaims = append(jclaims, jclaim) + } + + res := &ClaimsResponse{ + Claims: jclaims, + } + return res, nil +} + +func (sh *Handler) serveClaims(rw http.ResponseWriter, req *http.Request) { + defer httputil.RecoverJSON(rw, req) + var cr ClaimsRequest + cr.fromHTTP(req) + res, err := sh.GetClaims(&cr) + if err != nil { + httputil.ServeJSONError(rw, err) + return + } + httputil.ReturnJSON(rw, res) +} + +func (sh *Handler) serveFiles(rw http.ResponseWriter, req *http.Request) { + var ret camtypes.FileSearchResponse + defer httputil.ReturnJSON(rw, &ret) + + br, ok := blob.Parse(req.FormValue("wholedigest")) + if !ok { + ret.Error = "Missing or invalid 'wholedigest' param" + ret.ErrorType = "input" + return + } + + files, err := sh.index.ExistingFileSchemas(br) + if err != nil { + ret.Error = err.Error() + ret.ErrorType = "server" + return + } + + // the ui code expects an object + if files == nil { + files = []blob.Ref{} + } + + ret.Files = files + return +} + +// SignerAttrValueResponse is the JSON response to $search/camli/search/signerattrvalue +type SignerAttrValueResponse struct { + Permanode blob.Ref `json:"permanode"` + Meta MetaMap `json:"meta"` +} + +func (sh *Handler) serveSignerAttrValue(rw http.ResponseWriter, req *http.Request) { + defer httputil.RecoverJSON(rw, req) + signer := httputil.MustGetBlobRef(req, "signer") + attr := httputil.MustGet(req, "attr") + value := httputil.MustGet(req, "value") + + pn, err := sh.index.PermanodeOfSignerAttrValue(signer, attr, value) + if err != nil { + httputil.ServeJSONError(rw, err) + return + } + + dr := sh.NewDescribeRequest() + dr.Describe(pn, 2) + metaMap, err := dr.metaMap() + if err != nil { + httputil.ServeJSONError(rw, err) + return + } + + httputil.ReturnJSON(rw, &SignerAttrValueResponse{ + Permanode: pn, + Meta: metaMap, + }) +} + +// EdgesTo returns edges that reference req.RefTo. +// It filters out since-deleted permanode edges. +func (sh *Handler) EdgesTo(req *EdgesRequest) (*EdgesResponse, error) { + toRef := req.ToRef + toRefStr := toRef.String() + var edgeItems []*EdgeItem + + edges, err := sh.index.EdgesTo(toRef, nil) + if err != nil { + panic(err) + } + + type edgeOrError struct { + edge *EdgeItem // or nil + err error + } + resc := make(chan edgeOrError) + verify := func(edge *camtypes.Edge) { + db, err := sh.NewDescribeRequest().DescribeSync(edge.From) + if err != nil { + resc <- edgeOrError{err: err} + return + } + found := false + if db.Permanode != nil { + for attr, vv := range db.Permanode.Attr { + if index.IsBlobReferenceAttribute(attr) { + for _, v := range vv { + if v == toRefStr { + found = true + } + } + } + } + } + var ei *EdgeItem + if found { + ei = &EdgeItem{ + From: edge.From, + FromType: "permanode", + } + } + resc <- edgeOrError{edge: ei} + } + verifying := 0 + for _, edge := range edges { + if edge.FromType == "permanode" { + verifying++ + go verify(edge) + continue + } + ei := &EdgeItem{ + From: edge.From, + FromType: edge.FromType, + } + edgeItems = append(edgeItems, ei) + } + for i := 0; i < verifying; i++ { + res := <-resc + if res.err != nil { + return nil, res.err + } + if res.edge != nil { + edgeItems = append(edgeItems, res.edge) + } + } + + return &EdgesResponse{ + ToRef: toRef, + EdgesTo: edgeItems, + }, nil +} + +// Unlike the index interface's EdgesTo method, the "edgesto" Handler +// here additionally filters out since-deleted permanode edges. +func (sh *Handler) serveEdgesTo(rw http.ResponseWriter, req *http.Request) { + defer httputil.RecoverJSON(rw, req) + var er EdgesRequest + er.fromHTTP(req) + res, err := sh.EdgesTo(&er) + if err != nil { + httputil.ServeJSONError(rw, err) + return + } + httputil.ReturnJSON(rw, res) +} + +func (sh *Handler) serveQuery(rw http.ResponseWriter, req *http.Request) { + defer httputil.RecoverJSON(rw, req) + + var sq SearchQuery + if err := sq.fromHTTP(req); err != nil { + httputil.ServeJSONError(rw, err) + return + } + + sr, err := sh.Query(&sq) + if err != nil { + httputil.ServeJSONError(rw, err) + return + } + + httputil.ReturnJSON(rw, sr) +} + +// GetSignerPaths returns paths with a target of req.Target. +func (sh *Handler) GetSignerPaths(req *SignerPathsRequest) (*SignerPathsResponse, error) { + if !req.Signer.Valid() { + return nil, errors.New("Error getting signer paths: nil signer.") + } + if !req.Target.Valid() { + return nil, errors.New("Error getting signer paths: nil target.") + } + paths, err := sh.index.PathsOfSignerTarget(req.Signer, req.Target) + if err != nil { + return nil, fmt.Errorf("Error getting paths of %s: %v", req.Target.String(), err) + } + var jpaths []*SignerPathsItem + for _, path := range paths { + jpaths = append(jpaths, &SignerPathsItem{ + ClaimRef: path.Claim, + BaseRef: path.Base, + Suffix: path.Suffix, + }) + } + + dr := sh.NewDescribeRequest() + for _, path := range paths { + dr.Describe(path.Base, 2) + } + metaMap, err := dr.metaMap() + if err != nil { + return nil, err + } + + res := &SignerPathsResponse{ + Paths: jpaths, + Meta: metaMap, + } + return res, nil +} + +func (sh *Handler) serveSignerPaths(rw http.ResponseWriter, req *http.Request) { + defer httputil.RecoverJSON(rw, req) + var sr SignerPathsRequest + sr.fromHTTP(req) + + res, err := sh.GetSignerPaths(&sr) + if err != nil { + httputil.ServeJSONError(rw, err) + return + } + httputil.ReturnJSON(rw, res) +} + +const camliTypePrefix = "application/json; camliType=" diff --git a/vendor/github.com/camlistore/camlistore/pkg/search/handler_test.go b/vendor/github.com/camlistore/camlistore/pkg/search/handler_test.go new file mode 100644 index 00000000..0663ac0a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/search/handler_test.go @@ -0,0 +1,760 @@ +/* +Copyright 2011 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package search_test + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "path/filepath" + "reflect" + "sort" + "strings" + "testing" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/index" + "camlistore.org/pkg/index/indextest" + "camlistore.org/pkg/osutil" + . "camlistore.org/pkg/search" + "camlistore.org/pkg/test" +) + +// An indexOwnerer is something that knows who owns the index. +// It is implemented by indexAndOwner for use by TestHandler. +type indexOwnerer interface { + IndexOwner() blob.Ref +} + +type indexAndOwner struct { + index.Interface + owner blob.Ref +} + +func (io indexAndOwner) IndexOwner() blob.Ref { + return io.owner +} + +type handlerTest struct { + // setup is responsible for populating the index before the + // handler is invoked. + // + // A FakeIndex is constructed and provided to setup and is + // generally then returned as the Index to use, but an + // alternate Index may be returned instead, in which case the + // FakeIndex is not used. + setup func(fi *test.FakeIndex) index.Interface + + name string // test name + query string // the HTTP path + optional query suffix after "camli/search/" + postBody string // if non-nil, a POST request + + want map[string]interface{} + // wantDescribed is a list of blobref strings that should've been + // described in meta. If want is nil and this is non-zero length, + // want is ignored. + wantDescribed []string +} + +var owner = blob.MustParse("abcown-123") + +func parseJSON(s string) map[string]interface{} { + m := make(map[string]interface{}) + err := json.Unmarshal([]byte(s), &m) + if err != nil { + panic(err) + } + return m +} + +// addToClockOrigin returns the given Duration added +// to test.ClockOrigin, in UTC, and RFC3339Nano formatted. +func addToClockOrigin(d time.Duration) string { + return test.ClockOrigin.Add(d).UTC().Format(time.RFC3339Nano) +} + +func handlerDescribeTestSetup(fi *test.FakeIndex) index.Interface { + pn := blob.MustParse("perma-123") + fi.AddMeta(pn, "permanode", 123) + fi.AddClaim(owner, pn, "set-attribute", "camliContent", "fakeref-232") + fi.AddMeta(blob.MustParse("fakeref-232"), "", 878) + + // Test deleting all attributes + fi.AddClaim(owner, pn, "add-attribute", "wont-be-present", "x") + fi.AddClaim(owner, pn, "add-attribute", "wont-be-present", "y") + fi.AddClaim(owner, pn, "del-attribute", "wont-be-present", "") + + // Test deleting a specific attribute. + fi.AddClaim(owner, pn, "add-attribute", "only-delete-b", "a") + fi.AddClaim(owner, pn, "add-attribute", "only-delete-b", "b") + fi.AddClaim(owner, pn, "add-attribute", "only-delete-b", "c") + fi.AddClaim(owner, pn, "del-attribute", "only-delete-b", "b") + return fi +} + +// extends handlerDescribeTestSetup but adds a camliContentImage to pn. +func handlerDescribeTestSetupWithImage(fi *test.FakeIndex) index.Interface { + handlerDescribeTestSetup(fi) + pn := blob.MustParse("perma-123") + imageRef := blob.MustParse("fakeref-789") + fi.AddMeta(imageRef, "", 789) + fi.AddClaim(owner, pn, "set-attribute", "camliContentImage", imageRef.String()) + return fi +} + +// extends handlerDescribeTestSetup but adds various embedded references to other nodes. +func handlerDescribeTestSetupWithEmbeddedRefs(fi *test.FakeIndex) index.Interface { + handlerDescribeTestSetup(fi) + pn := blob.MustParse("perma-123") + c1 := blob.MustParse("fakeref-01") + c2 := blob.MustParse("fakeref-02") + c3 := blob.MustParse("fakeref-03") + c4 := blob.MustParse("fakeref-04") + c5 := blob.MustParse("fakeref-05") + c6 := blob.MustParse("fakeref-06") + fi.AddMeta(c1, "", 1) + fi.AddMeta(c2, "", 2) + fi.AddMeta(c3, "", 3) + fi.AddMeta(c4, "", 4) + fi.AddMeta(c5, "", 5) + fi.AddMeta(c6, "", 6) + fi.AddClaim(owner, pn, "set-attribute", c1.String(), "foo") + fi.AddClaim(owner, pn, "set-attribute", "foo,"+c2.String()+"=bar", "foo") + fi.AddClaim(owner, pn, "set-attribute", "foo:"+c3.String()+"?bar,"+c4.String(), "foo") + fi.AddClaim(owner, pn, "set-attribute", "foo", c5.String()) + fi.AddClaim(owner, pn, "add-attribute", "bar", "baz") + fi.AddClaim(owner, pn, "add-attribute", "bar", "monkey\n"+c6.String()) + return fi +} + +var handlerTests = []handlerTest{ + { + name: "describe-missing", + setup: func(fi *test.FakeIndex) index.Interface { return fi }, + query: "describe?blobref=eabfakeref-0555", + want: parseJSON(`{ + "meta": { + } + }`), + }, + + { + name: "describe-jpeg-blob", + setup: func(fi *test.FakeIndex) index.Interface { + fi.AddMeta(blob.MustParse("abfakeref-0555"), "", 999) + return fi + }, + query: "describe?blobref=abfakeref-0555", + want: parseJSON(`{ + "meta": { + "abfakeref-0555": { + "blobRef": "abfakeref-0555", + "size": 999 + } + } + }`), + }, + + { + name: "describe-permanode", + setup: handlerDescribeTestSetup, + query: "describe", + postBody: `{ + "blobref": "perma-123", + "rules": [ + {"attrs": ["camliContent"]} + ] +}`, + want: parseJSON(`{ + "meta": { + "fakeref-232": { + "blobRef": "fakeref-232", + "size": 878 + }, + "perma-123": { + "blobRef": "perma-123", + "camliType": "permanode", + "size": 123, + "permanode": { + "attr": { + "camliContent": [ "fakeref-232" ], + "only-delete-b": [ "a", "c" ] + }, + "modtime": "` + addToClockOrigin(8*time.Second) + `" + } + } + } + }`), + }, + + { + name: "describe-permanode-image", + setup: handlerDescribeTestSetupWithImage, + query: "describe", + postBody: `{ + "blobref": "perma-123", + "rules": [ + {"attrs": ["camliContent", "camliContentImage"]} + ] +}`, + want: parseJSON(`{ + "meta": { + "fakeref-232": { + "blobRef": "fakeref-232", + "size": 878 + }, + "fakeref-789": { + "blobRef": "fakeref-789", + "size": 789 + }, + "perma-123": { + "blobRef": "perma-123", + "camliType": "permanode", + "size": 123, + "permanode": { + "attr": { + "camliContent": [ "fakeref-232" ], + "camliContentImage": [ "fakeref-789" ], + "only-delete-b": [ "a", "c" ] + }, + "modtime": "` + addToClockOrigin(9*time.Second) + `" + } + } + } + }`), + }, + + // TODO(bradfitz): we'll probably will want to delete or redo this + // test when we remove depth=N support from describe. + { + name: "describe-permanode-embedded-references", + setup: handlerDescribeTestSetupWithEmbeddedRefs, + query: "describe?blobref=perma-123&depth=2", + want: parseJSON(`{ + "meta": { + "fakeref-01": { + "blobRef": "fakeref-01", + "size": 1 + }, + "fakeref-02": { + "blobRef": "fakeref-02", + "size": 2 + }, + "fakeref-03": { + "blobRef": "fakeref-03", + "size": 3 + }, + "fakeref-04": { + "blobRef": "fakeref-04", + "size": 4 + }, + "fakeref-05": { + "blobRef": "fakeref-05", + "size": 5 + }, + "fakeref-06": { + "blobRef": "fakeref-06", + "size": 6 + }, + "fakeref-232": { + "blobRef": "fakeref-232", + "size": 878 + }, + "perma-123": { + "blobRef": "perma-123", + "camliType": "permanode", + "size": 123, + "permanode": { + "attr": { + "bar": [ + "baz", + "monkey\nfakeref-06" + ], + "fakeref-01": [ + "foo" + ], + "camliContent": [ + "fakeref-232" + ], + "foo": [ + "fakeref-05" + ], + "foo,fakeref-02=bar": [ + "foo" + ], + "foo:fakeref-03?bar,fakeref-04": [ + "foo" + ], + "camliContent": [ "fakeref-232" ], + "only-delete-b": [ "a", "c" ] + }, + "modtime": "` + addToClockOrigin(14*time.Second) + `" + } + } + } + }`), + }, + + { + name: "describe-permanode-timetravel", + setup: handlerDescribeTestSetup, + query: "describe", + postBody: `{ + "blobref": "perma-123", + "at": "` + addToClockOrigin(3*time.Second) + `", + "rules": [ + {"attrs": ["camliContent"]} + ] +}`, + want: parseJSON(`{ + "meta": { + "fakeref-232": { + "blobRef": "fakeref-232", + "size": 878 + }, + "perma-123": { + "blobRef": "perma-123", + "camliType": "permanode", + "size": 123, + "permanode": { + "attr": { + "camliContent": [ "fakeref-232" ], + "wont-be-present": [ "x", "y" ] + }, + "modtime": "` + addToClockOrigin(3*time.Second) + `" + } + } + } + }`), + }, + + // test that describe follows camliPath:foo attributes + { + name: "describe-permanode-follows-camliPath", + setup: func(fi *test.FakeIndex) index.Interface { + pn := blob.MustParse("perma-123") + fi.AddMeta(pn, "permanode", 123) + fi.AddClaim(owner, pn, "set-attribute", "camliPath:foo", "fakeref-123") + + fi.AddMeta(blob.MustParse("fakeref-123"), "", 123) + return fi + }, + query: "describe", + postBody: `{ + "blobref": "perma-123", + "rules": [ + {"attrs": ["camliPath:*"]} + ] +}`, + want: parseJSON(`{ + "meta": { + "fakeref-123": { + "blobRef": "fakeref-123", + "size": 123 + }, + "perma-123": { + "blobRef": "perma-123", + "camliType": "permanode", + "size": 123, + "permanode": { + "attr": { + "camliPath:foo": [ + "fakeref-123" + ] + }, + "modtime": "` + addToClockOrigin(1*time.Second) + `" + } + } + } +}`), + }, + + // Test recent permanodes + { + name: "recent-1", + setup: func(*test.FakeIndex) index.Interface { + // Ignore the fakeindex and use the real (but in-memory) implementation, + // using IndexDeps to populate it. + idx := index.NewMemoryIndex() + id := indextest.NewIndexDeps(idx) + + pn := id.NewPlannedPermanode("pn1") + id.SetAttribute(pn, "title", "Some title") + return indexAndOwner{idx, id.SignerBlobRef} + }, + query: "recent", + want: parseJSON(`{ + "recent": [ + {"blobref": "sha1-7ca7743e38854598680d94ef85348f2c48a44513", + "modtime": "2011-11-28T01:32:37.000123456Z", + "owner": "sha1-ad87ca5c78bd0ce1195c46f7c98e6025abbaf007"} + ], + "meta": { + "sha1-7ca7743e38854598680d94ef85348f2c48a44513": { + "blobRef": "sha1-7ca7743e38854598680d94ef85348f2c48a44513", + "camliType": "permanode", + "permanode": { + "attr": { "title": [ "Some title" ] }, + "modtime": "` + addToClockOrigin(1*time.Second) + `" + }, + "size": 534 + } + } + }`), + }, + + // Test recent permanode of a file + { + name: "recent-file", + setup: func(*test.FakeIndex) index.Interface { + // Ignore the fakeindex and use the real (but in-memory) implementation, + // using IndexDeps to populate it. + idx := index.NewMemoryIndex() + id := indextest.NewIndexDeps(idx) + + // Upload a basic image + camliRootPath, err := osutil.GoPackagePath("camlistore.org") + if err != nil { + panic("Package camlistore.org no found in $GOPATH or $GOPATH not defined") + } + uploadFile := func(file string, modTime time.Time) blob.Ref { + fileName := filepath.Join(camliRootPath, "pkg", "index", "indextest", "testdata", file) + contents, err := ioutil.ReadFile(fileName) + if err != nil { + panic(err) + } + br, _ := id.UploadFile(file, string(contents), modTime) + return br + } + dudeFileRef := uploadFile("dude.jpg", time.Time{}) + + pn := id.NewPlannedPermanode("pn1") + id.SetAttribute(pn, "camliContent", dudeFileRef.String()) + return indexAndOwner{idx, id.SignerBlobRef} + }, + query: "recent", + want: parseJSON(`{ + "recent": [ + {"blobref": "sha1-7ca7743e38854598680d94ef85348f2c48a44513", + "modtime": "2011-11-28T01:32:37.000123456Z", + "owner": "sha1-ad87ca5c78bd0ce1195c46f7c98e6025abbaf007"} + ], + "meta": { + "sha1-7ca7743e38854598680d94ef85348f2c48a44513": { + "blobRef": "sha1-7ca7743e38854598680d94ef85348f2c48a44513", + "camliType": "permanode", + "permanode": { + "attr": { + "camliContent": [ + "sha1-e3f0ee86622dda4d7e8a4a4af51117fb79dbdbbb" + ] + }, + "modtime": "` + addToClockOrigin(1*time.Second) + `" + }, + "size": 534 + }, + "sha1-e3f0ee86622dda4d7e8a4a4af51117fb79dbdbbb": { + "blobRef": "sha1-e3f0ee86622dda4d7e8a4a4af51117fb79dbdbbb", + "camliType": "file", + "size": 184, + "file": { + "fileName": "dude.jpg", + "size": 1932, + "mimeType": "image/jpeg", + "wholeRef": "sha1-142b504945338158e0149d4ed25a41a522a28e88" + }, + "image": { + "width": 50, + "height": 100 + } + } + } + }`), + }, + + // Test recent permanode of a file, in a collection + { + name: "recent-file-collec", + setup: func(*test.FakeIndex) index.Interface { + SetTestHookBug121(func() { + time.Sleep(2 * time.Second) + }) + // Ignore the fakeindex and use the real (but in-memory) implementation, + // using IndexDeps to populate it. + idx := index.NewMemoryIndex() + id := indextest.NewIndexDeps(idx) + + // Upload a basic image + camliRootPath, err := osutil.GoPackagePath("camlistore.org") + if err != nil { + panic("Package camlistore.org no found in $GOPATH or $GOPATH not defined") + } + uploadFile := func(file string, modTime time.Time) blob.Ref { + fileName := filepath.Join(camliRootPath, "pkg", "index", "indextest", "testdata", file) + contents, err := ioutil.ReadFile(fileName) + if err != nil { + panic(err) + } + br, _ := id.UploadFile(file, string(contents), modTime) + return br + } + dudeFileRef := uploadFile("dude.jpg", time.Time{}) + pn := id.NewPlannedPermanode("pn1") + id.SetAttribute(pn, "camliContent", dudeFileRef.String()) + collec := id.NewPlannedPermanode("pn2") + id.SetAttribute(collec, "camliMember", pn.String()) + return indexAndOwner{idx, id.SignerBlobRef} + }, + query: "recent", + want: parseJSON(`{ + "recent": [ + { + "blobref": "sha1-3c8b5d36bd4182c6fe802984832f197786662ccf", + "modtime": "2011-11-28T01:32:38.000123456Z", + "owner": "sha1-ad87ca5c78bd0ce1195c46f7c98e6025abbaf007" + }, + { + "blobref": "sha1-7ca7743e38854598680d94ef85348f2c48a44513", + "modtime": "2011-11-28T01:32:37.000123456Z", + "owner": "sha1-ad87ca5c78bd0ce1195c46f7c98e6025abbaf007" + } + ], + "meta": { + "sha1-3c8b5d36bd4182c6fe802984832f197786662ccf": { + "blobRef": "sha1-3c8b5d36bd4182c6fe802984832f197786662ccf", + "camliType": "permanode", + "size": 534, + "permanode": { + "attr": { + "camliMember": [ + "sha1-7ca7743e38854598680d94ef85348f2c48a44513" + ] + }, + "modtime": "` + addToClockOrigin(2*time.Second) + `" + } + }, + "sha1-7ca7743e38854598680d94ef85348f2c48a44513": { + "blobRef": "sha1-7ca7743e38854598680d94ef85348f2c48a44513", + "camliType": "permanode", + "size": 534, + "permanode": { + "attr": { + "camliContent": [ + "sha1-e3f0ee86622dda4d7e8a4a4af51117fb79dbdbbb" + ] + }, + "modtime": "` + addToClockOrigin(1*time.Second) + `" + } + }, + "sha1-e3f0ee86622dda4d7e8a4a4af51117fb79dbdbbb": { + "blobRef": "sha1-e3f0ee86622dda4d7e8a4a4af51117fb79dbdbbb", + "camliType": "file", + "size": 184, + "file": { + "fileName": "dude.jpg", + "size": 1932, + "mimeType": "image/jpeg", + "wholeRef": "sha1-142b504945338158e0149d4ed25a41a522a28e88" + }, + "image": { + "width": 50, + "height": 100 + } + } + } + }`), + }, + + // Test recent permanodes with thumbnails + { + name: "recent-thumbs", + setup: func(*test.FakeIndex) index.Interface { + // Ignore the fakeindex and use the real (but in-memory) implementation, + // using IndexDeps to populate it. + idx := index.NewMemoryIndex() + id := indextest.NewIndexDeps(idx) + + pn := id.NewPlannedPermanode("pn1") + id.SetAttribute(pn, "title", "Some title") + return indexAndOwner{idx, id.SignerBlobRef} + }, + query: "recent?thumbnails=100", + want: parseJSON(`{ + "recent": [ + {"blobref": "sha1-7ca7743e38854598680d94ef85348f2c48a44513", + "modtime": "2011-11-28T01:32:37.000123456Z", + "owner": "sha1-ad87ca5c78bd0ce1195c46f7c98e6025abbaf007"} + ], + "meta": { + "sha1-7ca7743e38854598680d94ef85348f2c48a44513": { + "blobRef": "sha1-7ca7743e38854598680d94ef85348f2c48a44513", + "camliType": "permanode", + "permanode": { + "attr": { "title": [ "Some title" ] }, + "modtime": "` + addToClockOrigin(1*time.Second) + `" + }, + "size": 534 + } + } + }`), + }, + + // edgeto handler: put a permanode (member) in two parent + // permanodes, then delete the second and verify that edges + // back from member only reveal the first parent. + { + name: "edge-to", + setup: func(*test.FakeIndex) index.Interface { + // Ignore the fakeindex and use the real (but in-memory) implementation, + // using IndexDeps to populate it. + idx := index.NewMemoryIndex() + id := indextest.NewIndexDeps(idx) + + parent1 := id.NewPlannedPermanode("pn1") // sha1-7ca7743e38854598680d94ef85348f2c48a44513 + parent2 := id.NewPlannedPermanode("pn2") + member := id.NewPlannedPermanode("member") // always sha1-9ca84f904a9bc59e6599a53f0a3927636a6dbcae + id.AddAttribute(parent1, "camliMember", member.String()) + id.AddAttribute(parent2, "camliMember", member.String()) + id.DelAttribute(parent2, "camliMember", "") + return indexAndOwner{idx, id.SignerBlobRef} + }, + query: "edgesto?blobref=sha1-9ca84f904a9bc59e6599a53f0a3927636a6dbcae", + want: parseJSON(`{ + "toRef": "sha1-9ca84f904a9bc59e6599a53f0a3927636a6dbcae", + "edgesTo": [ + {"from": "sha1-7ca7743e38854598680d94ef85348f2c48a44513", + "fromType": "permanode"} + ] + }`), + }, +} + +func marshalJSON(v interface{}) string { + b, err := json.MarshalIndent(v, "", " ") + if err != nil { + panic(err) + } + return string(b) +} + +func jmap(v interface{}) map[string]interface{} { + m := make(map[string]interface{}) + if err := json.NewDecoder(strings.NewReader(marshalJSON(v))).Decode(&m); err != nil { + panic(err) + } + return m +} + +func checkNoDups(sliceName string, tests []handlerTest) { + seen := map[string]bool{} + for _, tt := range tests { + if seen[tt.name] { + panic(fmt.Sprintf("duplicate handlerTest named %q in var %s", tt.name, sliceName)) + } + seen[tt.name] = true + } +} + +func init() { + checkNoDups("handlerTests", handlerTests) +} + +func (ht handlerTest) test(t *testing.T) { + SetTestHookBug121(func() {}) + + fakeIndex := test.NewFakeIndex() + idx := ht.setup(fakeIndex) + + indexOwner := owner + if io, ok := idx.(indexOwnerer); ok { + indexOwner = io.IndexOwner() + } + h := NewHandler(idx, indexOwner) + + var body io.Reader + var method = "GET" + if ht.postBody != "" { + method = "POST" + body = strings.NewReader(ht.postBody) + } + req, err := http.NewRequest(method, "/camli/search/"+ht.query, body) + if err != nil { + t.Fatalf("%s: bad query: %v", ht.name, err) + } + req.Header.Set(httputil.PathSuffixHeader, req.URL.Path[1:]) + + rr := httptest.NewRecorder() + rr.Body = new(bytes.Buffer) + + h.ServeHTTP(rr, req) + got := rr.Body.Bytes() + + if len(ht.wantDescribed) > 0 { + dr := new(DescribeResponse) + if err := json.NewDecoder(bytes.NewReader(got)).Decode(dr); err != nil { + t.Fatalf("On test %s: Non-JSON response: %s", ht.name, got) + } + var gotDesc []string + for k := range dr.Meta { + gotDesc = append(gotDesc, k) + } + sort.Strings(ht.wantDescribed) + sort.Strings(gotDesc) + if !reflect.DeepEqual(gotDesc, ht.wantDescribed) { + t.Errorf("On test %s: described blobs:\n%v\nwant:\n%v\n", + ht.name, gotDesc, ht.wantDescribed) + } + if ht.want == nil { + return + } + } + + want, _ := json.MarshalIndent(ht.want, "", " ") + trim := bytes.TrimSpace + + if bytes.Equal(trim(got), trim(want)) { + return + } + + // Try with re-encoded got, since the JSON ordering doesn't matter + // to the test, + gotj := parseJSON(string(got)) + got2, _ := json.MarshalIndent(gotj, "", " ") + if bytes.Equal(got2, want) { + return + } + diff := test.Diff(want, got2) + + t.Errorf("test %s:\nwant: %s\n got: %s\ndiff:\n%s", ht.name, want, got, diff) +} + +func TestHandler(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + return + } + defer SetTestHookBug121(func() {}) + for _, ht := range handlerTests { + ht.test(t) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/search/lexer.go b/vendor/github.com/camlistore/camlistore/pkg/search/lexer.go new file mode 100644 index 00000000..ea4faa90 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/search/lexer.go @@ -0,0 +1,315 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This is the lexer for search expressions (see expr.go). + +package search + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +type tokenType int + +const ( + tokenAnd tokenType = iota + tokenArg + tokenClose + tokenColon + tokenEOF + tokenError + tokenLiteral + tokenNot + tokenOpen + tokenOr + tokenPredicate + tokenQuotedArg + tokenQuotedLiteral +) + +const ( + eof = -1 // -1 is unused in utf8 + whitespace = "\t\n\f\v\r " + opBound = whitespace + "(" +) + +// IsSearchWordRune defines the runes that can be used in unquoted predicate arguments +// or unquoted literals. These are all non-space unicode characters except ':' which is +// used for predicate marking, and '(', ')', which are used for predicate grouping. +func isSearchWordRune(r rune) bool { + switch r { + case ':', ')', '(', eof: + return false + } + return !unicode.IsSpace(r) +} + +type token struct { + typ tokenType + val string + start int +} + +func (t token) String() string { + switch t.typ { + case tokenEOF: + return "EOF" + case tokenError: + return fmt.Sprintf("{err:%q at pos: %d}", t.val, t.start) + } + return fmt.Sprintf("{t:%v,%q (col: %d)}", t.typ, t.val, t.start) +} + +type lexer struct { + input string + start int + pos int + width int + tokens chan token + state stateFn +} + +func (l *lexer) emit(typ tokenType) { + l.tokens <- token{typ, l.input[l.start:l.pos], l.start} + l.start = l.pos +} + +func (l *lexer) next() (r rune) { + if l.pos >= len(l.input) { + l.width = 0 + return eof + } + r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) + l.pos += l.width + return +} + +func (l *lexer) ignore() { + l.start = l.pos +} + +func (l *lexer) backup() { + l.pos -= l.width +} + +func (l *lexer) peek() rune { + r := l.next() + l.backup() + return r +} + +func (l *lexer) accept(valid string) bool { + if strings.IndexRune(valid, l.next()) >= 0 { + return true + } + l.backup() + return false +} + +func (l *lexer) acceptString(s string) bool { + for _, r := range s { + if l.next() != r { + l.backup() + return false + } + } + return true +} + +func (l *lexer) acceptRun(valid string) { + for strings.IndexRune(valid, l.next()) >= 0 { + } + l.backup() +} + +func (l *lexer) acceptRunFn(valid func(rune) bool) { + for valid(l.next()) { + } + l.backup() +} + +func (l *lexer) errorf(format string, args ...interface{}) stateFn { + l.tokens <- token{ + typ: tokenError, + val: fmt.Sprintf(format, args...), + start: l.start, + } + return nil +} + +func lex(input string) (*lexer, chan token) { + l := &lexer{ + input: input, + tokens: make(chan token), + state: readExp, + } + go l.run() + return l, l.tokens +} + +func (l *lexer) run() { + for { + if l.state == nil { + close(l.tokens) + return + } + l.state = l.state(l) + } +} + +// +// State functions +// +type stateFn func(*lexer) stateFn + +func readNeg(l *lexer) stateFn { + l.accept("-") + l.emit(tokenNot) + return readExp +} + +func readClose(l *lexer) stateFn { + l.accept(")") + l.emit(tokenClose) + return readOperator +} + +func readOpen(l *lexer) stateFn { + l.accept("(") + l.emit(tokenOpen) + return readExp +} + +func readColon(l *lexer) stateFn { + l.accept(":") + l.emit(tokenColon) + return readArg +} + +func readPredicate(l *lexer) stateFn { + l.acceptRunFn(unicode.IsLetter) + switch l.peek() { + case ':': + l.emit(tokenPredicate) + return readColon + } + return readLiteral +} + +func readLiteral(l *lexer) stateFn { + l.acceptRunFn(isSearchWordRune) + l.emit(tokenLiteral) + return readOperator +} + +func readArg(l *lexer) stateFn { + if l.peek() == '"' { + return readQuotedArg + } + l.acceptRunFn(isSearchWordRune) + l.emit(tokenArg) + if l.peek() == ':' { + return readColon + } + return readOperator +} + +func readAND(l *lexer) stateFn { + if l.acceptString("and") && l.accept(opBound) { + l.backup() + l.emit(tokenAnd) + return readExp + } else { + return readPredicate + } +} + +func readOR(l *lexer) stateFn { + if l.acceptString("or") && l.accept(opBound) { + l.backup() + l.emit(tokenOr) + return readExp + } else { + return readPredicate + } +} + +func runQuoted(l *lexer) bool { + l.accept("\"") + for { + r := l.next() + switch r { + case eof: + return false + case '\\': + l.next() + case '"': + return true + } + } +} + +func readQuotedLiteral(l *lexer) stateFn { + if !runQuoted(l) { + return l.errorf("Unclosed quote") + } + l.emit(tokenQuotedLiteral) + return readOperator +} + +func readQuotedArg(l *lexer) stateFn { + if !runQuoted(l) { + return l.errorf("Unclosed quote") + } + l.emit(tokenQuotedArg) + if l.peek() == ':' { + return readColon + } + return readOperator +} + +func readExp(l *lexer) stateFn { + l.acceptRun(whitespace) + l.ignore() + switch l.peek() { + case eof: + return nil + case '(': + return readOpen + case ')': + return readClose + case '-': + return readNeg + case '"': + return readQuotedLiteral + } + return readPredicate +} + +func readOperator(l *lexer) stateFn { + l.acceptRun(whitespace) + l.ignore() + switch l.peek() { + case 'a': + return readAND + case 'o': + return readOR + } + return readExp +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/search/lexer_test.go b/vendor/github.com/camlistore/camlistore/pkg/search/lexer_test.go new file mode 100644 index 00000000..2042b7e9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/search/lexer_test.go @@ -0,0 +1,191 @@ +package search + +import ( + "reflect" + "testing" +) + +const scaryQuote = `"\"Hi there\""` + +var lexerTests = []struct { + in string + want []token +}{ + { + in: "width:++1", + want: []token{ + {tokenPredicate, "width", 0}, + {tokenColon, ":", 5}, + {tokenArg, "++1", 6}, + }, + }, + + { + in: "and and and", + want: []token{ + {tokenLiteral, "and", 0}, + {tokenAnd, "and", 4}, + {tokenLiteral, "and", 8}, + }, + }, + + { + in: "and nd and", + want: []token{ + {tokenLiteral, "and", 0}, + {tokenLiteral, "nd", 4}, + {tokenLiteral, "and", 7}, + }, + }, + + { + in: "or or or", + want: []token{ + {tokenLiteral, "or", 0}, + {tokenOr, "or", 3}, + {tokenLiteral, "or", 6}, + }, + }, + + { + in: "or r or", + want: []token{ + {tokenLiteral, "or", 0}, + {tokenLiteral, "r", 3}, + {tokenLiteral, "or", 5}, + }, + }, + + { + in: "(or or or) and or", + want: []token{ + {tokenOpen, "(", 0}, + {tokenLiteral, "or", 1}, + {tokenOr, "or", 4}, + {tokenLiteral, "or", 7}, + {tokenClose, ")", 9}, + {tokenAnd, "and", 11}, + {tokenLiteral, "or", 15}, + }, + }, + + { + in: `(or or "or) and or`, + want: []token{ + {tokenOpen, "(", 0}, + {tokenLiteral, "or", 1}, + {tokenOr, "or", 4}, + {tokenError, "Unclosed quote", 7}, + }, + }, + + { + in: "bar and baz", + want: []token{{tokenLiteral, "bar", 0}, {tokenAnd, "and", 4}, {tokenLiteral, "baz", 8}}, + }, + + { + in: "foo or bar", + want: []token{{tokenLiteral, "foo", 0}, {tokenOr, "or", 4}, {tokenLiteral, "bar", 7}}, + }, + + { + in: "foo or (bar )", + want: []token{{tokenLiteral, "foo", 0}, {tokenOr, "or", 4}, {tokenOpen, "(", 7}, {tokenLiteral, "bar", 8}, {tokenClose, ")", 12}}, + }, + + { + in: "foo or bar:foo:baz", + want: []token{ + {tokenLiteral, "foo", 0}, + {tokenOr, "or", 4}, + {tokenPredicate, "bar", 7}, + {tokenColon, ":", 10}, + {tokenArg, "foo", 11}, + {tokenColon, ":", 14}, + {tokenArg, "baz", 15}, + }, + }, + + { + in: "--foo or - bar", + want: []token{ + {tokenNot, "-", 0}, + {tokenNot, "-", 1}, + {tokenLiteral, "foo", 2}, + {tokenOr, "or", 6}, + {tokenNot, "-", 9}, + {tokenLiteral, "bar", 11}, + }, + }, + + { + in: "foo:bar:baz or bar", + want: []token{ + {tokenPredicate, "foo", 0}, + {tokenColon, ":", 3}, + {tokenArg, "bar", 4}, + {tokenColon, ":", 7}, + {tokenArg, "baz", 8}, + {tokenOr, "or", 12}, + {tokenLiteral, "bar", 15}, + }, + }, + + { + in: "is:pano or", + want: []token{ + {tokenPredicate, "is", 0}, + {tokenColon, ":", 2}, + {tokenArg, "pano", 3}, + {tokenLiteral, "or", 8}, + }, + }, + + { + in: "foo:" + scaryQuote + " or bar", + want: []token{ + {tokenPredicate, "foo", 0}, + {tokenColon, ":", 3}, + {tokenQuotedArg, scaryQuote, 4}, + {tokenOr, "or", 19}, + {tokenLiteral, "bar", 22}, + }, + }, + + { + in: scaryQuote, + want: []token{ + {tokenQuotedLiteral, scaryQuote, 0}}, + }, + + { + in: "foo:", + want: []token{ + {tokenPredicate, "foo", 0}, + {tokenColon, ":", 3}, + {tokenArg, "", 4}, + }, + }, +} + +func array(in string) (parsed []token) { + _, tokens := lex(in) + for token := range tokens { + if token.typ == tokenEOF { + break + } + parsed = append(parsed, token) + } + return +} + +func TestLex(t *testing.T) { + for _, tt := range lexerTests { + + tokens := array(tt.in) + if !reflect.DeepEqual(tokens, tt.want) { + t.Errorf("Got lex(%q)=%v expected %v", tt.in, tokens, tt.want) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/search/match_test.go b/vendor/github.com/camlistore/camlistore/pkg/search/match_test.go new file mode 100644 index 00000000..d20a3f00 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/search/match_test.go @@ -0,0 +1,75 @@ +package search + +import ( + "testing" + "time" + + "camlistore.org/pkg/types" +) + +const year = time.Hour * 24 * 365 + +func TestTimeConstraint(t *testing.T) { + tests := []struct { + c *TimeConstraint + t time.Time + want bool + }{ + { + &TimeConstraint{ + Before: types.Time3339(time.Unix(124, 0)), + }, + time.Unix(123, 0), + true, + }, + { + &TimeConstraint{ + Before: types.Time3339(time.Unix(123, 0)), + }, + time.Unix(123, 1), + false, + }, + { + &TimeConstraint{ + After: types.Time3339(time.Unix(123, 0)), + }, + time.Unix(123, 0), + true, + }, + { + &TimeConstraint{ + After: types.Time3339(time.Unix(123, 0)), + }, + time.Unix(123, 1), + true, + }, + { + &TimeConstraint{ + After: types.Time3339(time.Unix(123, 0)), + }, + time.Unix(122, 0), + false, + }, + { + // This test will pass for 20 years at least. + &TimeConstraint{ + InLast: 20 * year, + }, + time.Unix(1384034605, 0), + true, + }, + { + &TimeConstraint{ + InLast: 1 * year, + }, + time.Unix(123, 0), + false, + }, + } + for i, tt := range tests { + got := tt.c.timeMatches(tt.t) + if got != tt.want { + t.Errorf("%d. matches(tc=%+v, t=%v) = %v; want %v", i, tt.c, tt.t, got, tt.want) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/search/predicate.go b/vendor/github.com/camlistore/camlistore/pkg/search/predicate.go new file mode 100644 index 00000000..f30ba2fe --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/search/predicate.go @@ -0,0 +1,689 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// These are the search-atom definitions (see expr.go). + +package search + +import ( + "bytes" + "encoding/json" + "fmt" + "regexp" + "strconv" + "strings" + "time" + + "camlistore.org/pkg/context" + "camlistore.org/pkg/geocode" + "camlistore.org/pkg/types" +) + +const base = "0000-01-01T00:00:00Z" + +var ( + // used for width/height ranges. 10 is max length of 32-bit + // int (strconv.Atoi on 32-bit platforms), even though a max + // JPEG dimension is only 16-bit. + whRangeExpr = regexp.MustCompile(`^(\d{0,10})-(\d{0,10})$`) + whValueExpr = regexp.MustCompile(`^(\d{1,10})$`) +) + +// Atoms holds the parsed words of an atom without the colons. +// Eg. tag:holiday becomes atom{"tag", []string{"holiday"}} +// Note that the form of camlisearch atoms implies that len(args) > 0 +type atom struct { + predicate string + args []string +} + +func (a atom) String() string { + s := bytes.NewBufferString(a.predicate) + for _, a := range a.args { + s.WriteRune(':') + s.WriteString(a) + } + return s.String() +} + +// Keyword determines by its matcher when a predicate is used. +type keyword interface { + // Name is the part before the first colon, or the whole atom. + Name() string + // Description provides user documentation for this keyword. Should + // return documentation for max/min values, usage help, or examples. + Description() string + // Match gets called with the predicate and arguments that were parsed. + // It should return true if it wishes to handle this search atom. + // An error if the number of arguments mismatches. + Match(a atom) (bool, error) + // Predicates will be called with the args array from an atom instance. + // Note that len(args) > 0 (see atom-struct comment above). + // It should return a pointer to a Constraint object, expressing the meaning of + // its keyword. + Predicate(ctx *context.Context, args []string) (*Constraint, error) +} + +var keywords []keyword + +// RegisterKeyword registers search atom types. +// TODO (sls) Export for applications? (together with keyword and atom) +func registerKeyword(k keyword) { + keywords = append(keywords, k) +} + +// SearchHelp returns JSON of an array of predicate names and descriptions. +func SearchHelp() string { + type help struct{ Name, Description string } + h := []help{} + for _, p := range keywords { + h = append(h, help{p.Name(), p.Description()}) + } + b, err := json.MarshalIndent(h, "", " ") + if err != nil { + return "Error marshalling" + } + return string(b) +} + +func init() { + // Core predicates + registerKeyword(newAfter()) + registerKeyword(newBefore()) + registerKeyword(newAttribute()) + registerKeyword(newChildrenOf()) + registerKeyword(newFormat()) + registerKeyword(newTag()) + registerKeyword(newTitle()) + + // Image predicates + registerKeyword(newIsImage()) + registerKeyword(newHeight()) + registerKeyword(newIsLandscape()) + registerKeyword(newIsPano()) + registerKeyword(newIsPortait()) + registerKeyword(newWidth()) + + // Custom predicates + registerKeyword(newIsPost()) + registerKeyword(newIsCheckin()) + + // Location predicates + registerKeyword(newHasLocation()) + registerKeyword(newLocation()) +} + +// Helper implementation for mixing into keyword implementations +// that match the full keyword, i.e. 'is:pano' +type matchEqual string + +func (me matchEqual) Name() string { + return string(me) +} + +func (me matchEqual) Match(a atom) (bool, error) { + return string(me) == a.String(), nil +} + +// Helper implementation for mixing into keyword implementations +// that match only the beginning of the keyword, and get their paramertes from +// the rest, i.e. 'width:' for searches like 'width:100-200'. +type matchPrefix struct { + prefix string + count int +} + +func newMatchPrefix(p string) matchPrefix { + return matchPrefix{prefix: p, count: 1} +} + +func (mp matchPrefix) Name() string { + return mp.prefix +} +func (mp matchPrefix) Match(a atom) (bool, error) { + if mp.prefix == a.predicate { + if len(a.args) != mp.count { + return true, fmt.Errorf("Wrong number of arguments for %q, given %d, expected %d", mp.prefix, len(a.args), mp.count) + } else { + return true, nil + } + } else { + return false, nil + } +} + +// Core predicates + +type after struct { + matchPrefix +} + +func newAfter() keyword { + return after{newMatchPrefix("after")} +} + +func (a after) Description() string { + return "date format is RFC3339, but can be shortened as required.\n" + + "i.e. 2011-01-01 is Jan 1 of year 2011 and \"2011\" means the same." +} + +func (a after) Predicate(ctx *context.Context, args []string) (*Constraint, error) { + t, err := parseTimePrefix(args[0]) + if err != nil { + return nil, err + } + tc := &TimeConstraint{} + tc.After = types.Time3339(t) + c := &Constraint{ + Permanode: &PermanodeConstraint{ + Time: tc, + }, + } + return c, nil +} + +type before struct { + matchPrefix +} + +func newBefore() keyword { + return before{newMatchPrefix("before")} +} + +func (b before) Description() string { + return "date format is RFC3339, but can be shortened as required.\n" + + "i.e. 2011-01-01 is Jan 1 of year 2011 and \"2011\" means the same." +} + +func (b before) Predicate(ctx *context.Context, args []string) (*Constraint, error) { + t, err := parseTimePrefix(args[0]) + if err != nil { + return nil, err + } + tc := &TimeConstraint{} + tc.Before = types.Time3339(t) + c := &Constraint{ + Permanode: &PermanodeConstraint{ + Time: tc, + }, + } + return c, nil +} + +type attribute struct { + matchPrefix +} + +func newAttribute() keyword { + return attribute{matchPrefix{"attr", 2}} +} + +func (a attribute) Description() string { + return "match on attribute. Use attr:foo:bar to match nodes having their foo\n" + + "attribute set to bar or attr:foo:~bar to do a substring\n" + + "case-insensitive search for 'bar' in attribute foo" +} + +func (a attribute) Predicate(ctx *context.Context, args []string) (*Constraint, error) { + c := attrConst(args[0], args[1]) + if strings.HasPrefix(args[1], "~") { + // Substring. Hack. Figure out better way to do this. + c.Permanode.Value = "" + c.Permanode.ValueMatches = &StringConstraint{ + Contains: args[1][1:], + CaseInsensitive: true, + } + } + return c, nil +} + +type childrenOf struct { + matchPrefix +} + +func newChildrenOf() keyword { + return childrenOf{newMatchPrefix("childrenof")} +} + +func (k childrenOf) Description() string { + return "Find child permanodes of a parent permanode (or prefix of a parent\n" + + "permanode): childrenof:sha1-527cf12 Only matches permanodes currently." +} + +func (k childrenOf) Predicate(ctx *context.Context, args []string) (*Constraint, error) { + c := &Constraint{ + Permanode: &PermanodeConstraint{ + Relation: &RelationConstraint{ + Relation: "parent", + Any: &Constraint{ + BlobRefPrefix: args[0], + }, + }, + }, + } + return c, nil +} + +type format struct { + matchPrefix +} + +func newFormat() keyword { + return format{newMatchPrefix("format")} +} + +func (f format) Description() string { + return "file's format (or MIME-type) such as jpg, pdf, tiff." +} + +func (f format) Predicate(ctx *context.Context, args []string) (*Constraint, error) { + mimeType, err := mimeFromFormat(args[0]) + if err != nil { + return nil, err + } + c := permOfFile(&FileConstraint{ + MIMEType: &StringConstraint{ + Equals: mimeType, + }, + }) + return c, nil +} + +type tag struct { + matchPrefix +} + +func newTag() keyword { + return tag{newMatchPrefix("tag")} +} + +func (t tag) Description() string { + return "match on a tag" +} + +func (t tag) Predicate(ctx *context.Context, args []string) (*Constraint, error) { + return attrConst("tag", args[0]), nil +} + +type title struct { + matchPrefix +} + +func newTitle() keyword { + return title{newMatchPrefix("title")} +} + +func (t title) Description() string { + return "match nodes containing substring in their title" +} + +func (t title) Predicate(ctx *context.Context, args []string) (*Constraint, error) { + c := &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "title", + SkipHidden: true, + ValueMatches: &StringConstraint{ + Contains: args[0], + CaseInsensitive: true, + }, + }, + } + return c, nil +} + +// Image predicates + +type isImage struct { + matchEqual +} + +func newIsImage() keyword { + return isImage{"is:image"} +} + +func (k isImage) Description() string { + return "object is an image" +} + +func (k isImage) Predicate(ctx *context.Context, args []string) (*Constraint, error) { + c := &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + }, + }, + }, + } + return c, nil +} + +type isLandscape struct { + matchEqual +} + +func newIsLandscape() keyword { + return isLandscape{"is:landscape"} +} + +func (k isLandscape) Description() string { + return "the image has a landscape aspect" +} + +func (k isLandscape) Predicate(ctx *context.Context, args []string) (*Constraint, error) { + return whRatio(&FloatConstraint{Min: 1.0}), nil +} + +type isPano struct { + matchEqual +} + +func newIsPano() keyword { + return isPano{"is:pano"} +} + +func (k isPano) Description() string { + return "the image's aspect ratio is over 2 - panorama picture." +} + +func (k isPano) Predicate(ctx *context.Context, args []string) (*Constraint, error) { + return whRatio(&FloatConstraint{Min: 2.0}), nil +} + +type isPortait struct { + matchEqual +} + +func newIsPortait() keyword { + return isPortait{"is:portrait"} +} + +func (k isPortait) Description() string { + return "the image has a portrait aspect" +} + +func (k isPortait) Predicate(ctx *context.Context, args []string) (*Constraint, error) { + return whRatio(&FloatConstraint{Max: 1.0}), nil +} + +type width struct { + matchPrefix +} + +func newWidth() keyword { + return width{newMatchPrefix("width")} +} + +func (w width) Description() string { + return "use width:min-max to match images having a width of at least min\n" + + "and at most max. Use width:min- to specify only an underbound and\n" + + "width:-max to specify only an upperbound.\n" + + "Exact matches should use width:640 " +} + +func (w width) Predicate(ctx *context.Context, args []string) (*Constraint, error) { + mins, maxs, err := parseWHExpression(args[0]) + if err != nil { + return nil, err + } + c := permOfFile(&FileConstraint{ + IsImage: true, + Width: whIntConstraint(mins, maxs), + }) + return c, nil +} + +type height struct { + matchPrefix +} + +func newHeight() keyword { + return height{newMatchPrefix("height")} +} + +func (h height) Description() string { + return "use height:min-max to match images having a height of at least min\n" + + "and at most max. Use height:min- to specify only an underbound and\n" + + "height:-max to specify only an upperbound.\n" + + "Exact matches should use height:480" +} + +func (h height) Predicate(ctx *context.Context, args []string) (*Constraint, error) { + mins, maxs, err := parseWHExpression(args[0]) + if err != nil { + return nil, err + } + c := permOfFile(&FileConstraint{ + IsImage: true, + Height: whIntConstraint(mins, maxs), + }) + return c, nil +} + +// Location predicates + +type location struct { + matchPrefix +} + +func newLocation() keyword { + return location{newMatchPrefix("loc")} +} + +func (l location) Description() string { + return "matches images and permanodes having a location near\n" + + "the specified location. Locations are resolved using\n" + + "maps.googleapis.com. For example: loc:\"new york, new york\" " +} + +func (l location) Predicate(ctx *context.Context, args []string) (*Constraint, error) { + where := args[0] + rects, err := geocode.Lookup(ctx, where) + if err != nil { + return nil, err + } + if len(rects) == 0 { + return nil, fmt.Errorf("No location found for %q", where) + } + var c *Constraint + for i, rect := range rects { + loc := &LocationConstraint{ + West: rect.SouthWest.Long, + East: rect.NorthEast.Long, + North: rect.NorthEast.Lat, + South: rect.SouthWest.Lat, + } + fileLoc := permOfFile(&FileConstraint{ + IsImage: true, + Location: loc, + }) + permLoc := &Constraint{ + Permanode: &PermanodeConstraint{ + Location: loc, + }, + } + rectConstraint := orConst(fileLoc, permLoc) + if i == 0 { + c = rectConstraint + } else { + c = orConst(c, rectConstraint) + } + } + return c, nil +} + +type hasLocation struct { + matchEqual +} + +func newHasLocation() keyword { + return hasLocation{"has:location"} +} + +func (h hasLocation) Description() string { + return "matches images and permanodes that have a location (GPSLatitude\n" + + "and GPSLongitude can be retrieved from the image's EXIF tags)." +} + +func (h hasLocation) Predicate(ctx *context.Context, args []string) (*Constraint, error) { + fileLoc := permOfFile(&FileConstraint{ + IsImage: true, + Location: &LocationConstraint{ + Any: true, + }, + }) + permLoc := &Constraint{ + Permanode: &PermanodeConstraint{ + Location: &LocationConstraint{ + Any: true, + }, + }, + } + return orConst(fileLoc, permLoc), nil +} + +// Helpers + +func attrConst(attr, val string) *Constraint { + c := &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: attr, + SkipHidden: true, + }, + } + if val == "" { + c.Permanode.ValueMatches = &StringConstraint{Empty: true} + } else { + c.Permanode.Value = val + } + return c +} + +func permOfFile(fc *FileConstraint) *Constraint { + return &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{File: fc}, + }, + } +} + +func whRatio(fc *FloatConstraint) *Constraint { + return permOfFile(&FileConstraint{ + IsImage: true, + WHRatio: fc, + }) +} + +func parseWHExpression(expr string) (min, max string, err error) { + if m := whRangeExpr.FindStringSubmatch(expr); m != nil { + return m[1], m[2], nil + } + if m := whValueExpr.FindStringSubmatch(expr); m != nil { + return m[1], m[1], nil + } + return "", "", fmt.Errorf("Unable to parse %q as range, wanted something like 480-1024, 480-, -1024 or 1024", expr) +} + +func parseTimePrefix(when string) (time.Time, error) { + if len(when) < len(base) { + when += base[len(when):] + } + return time.Parse(time.RFC3339, when) +} + +func whIntConstraint(mins, maxs string) *IntConstraint { + ic := &IntConstraint{} + if mins != "" { + if mins == "0" { + ic.ZeroMin = true + } else { + n, _ := strconv.Atoi(mins) + ic.Min = int64(n) + } + } + if maxs != "" { + if maxs == "0" { + ic.ZeroMax = true + } else { + n, _ := strconv.Atoi(maxs) + ic.Max = int64(n) + } + } + return ic +} + +func mimeFromFormat(v string) (string, error) { + if strings.Contains(v, "/") { + return v, nil + } + switch v { + case "jpg", "jpeg": + return "image/jpeg", nil + case "gif": + return "image/gif", nil + case "png": + return "image/png", nil + case "pdf": + return "application/pdf", nil // RFC 3778 + } + return "", fmt.Errorf("Unknown format: %s", v) +} + +// Custom predicates + +type isPost struct { + matchEqual +} + +func newIsPost() keyword { + return isPost{"is:post"} +} + +func (k isPost) Description() string { + return "matches tweets, status updates, blog posts, etc" +} + +func (k isPost) Predicate(ctx *context.Context, args []string) (*Constraint, error) { + return &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliNodeType", + Value: "twitter.com:tweet", + }, + }, nil +} + +type isCheckin struct { + matchEqual +} + +func newIsCheckin() keyword { + return isCheckin{"is:checkin"} +} + +func (k isCheckin) Description() string { + return "matches location check-ins (foursquare, etc)" +} + +func (k isCheckin) Predicate(ctx *context.Context, args []string) (*Constraint, error) { + return &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliNodeType", + Value: "foursquare.com:checkin", + }, + }, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/search/predicate_test.go b/vendor/github.com/camlistore/camlistore/pkg/search/predicate_test.go new file mode 100644 index 00000000..76425204 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/search/predicate_test.go @@ -0,0 +1,669 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package search + +import ( + "encoding/json" + "net/http" + "reflect" + "strings" + "testing" + "time" + + "camlistore.org/pkg/context" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/types" +) + +func TestSearchHelp(t *testing.T) { + s := SearchHelp() + type help struct{ Name, Description string } + h := []help{} + err := json.Unmarshal([]byte(s), &h) + if err != nil { + t.Fatal(err) + } + count := len(keywords) + if len(h) != count { + t.Errorf("Expected %d help items, got %d", count, len(h)) + } +} + +type keywordTestcase struct { + name string + object keyword + args []string + want *Constraint + errContains string + ctx *context.Context +} + +var uitdamLC = &LocationConstraint{ + North: 52.4486802, + West: 5.0353014, + East: 5.094973299999999, + South: 52.4152441, +} + +func newGeocodeContext() *context.Context { + url := "https://maps.googleapis.com/maps/api/geocode/json?address=Uitdam&sensor=false" + transport := httputil.NewFakeTransport(map[string]func() *http.Response{url: httputil.StaticResponder(uitdamGoogle)}) + return context.New(context.WithHTTPClient(&http.Client{Transport: transport})) +} + +var uitdamGoogle = `HTTP/1.1 200 OK +Content-Type: application/json; charset=UTF-8 + Date: Tue, 13 May 2014 21:15:01 GMT + Expires: Wed, 14 May 2014 21:15:01 GMT + Cache-Control: public, max-age=86400 + Vary: Accept-Language + Access-Control-Allow-Origin: * + Server: mafe + X-XSS-Protection: 1; mode=block + X-Frame-Options: SAMEORIGIN + Transfer-Encoding: chunked + + +{ + "results" : [ + { + "address_components" : [ + { + "long_name" : "Uitdam", + "short_name" : "Uitdam", + "types" : [ "locality", "political" ] + }, + { + "long_name" : "Waterland", + "short_name" : "Waterland", + "types" : [ "administrative_area_level_2", "political" ] + }, + { + "long_name" : "North Holland", + "short_name" : "NH", + "types" : [ "administrative_area_level_1", "political" ] + }, + { + "long_name" : "The Netherlands", + "short_name" : "NL", + "types" : [ "country", "political" ] + }, + { + "long_name" : "1154", + "short_name" : "1154", + "types" : [ "postal_code_prefix", "postal_code" ] + } + ], + "formatted_address" : "1154 Uitdam, The Netherlands", + "geometry" : { + "bounds" : { + "northeast" : { + "lat" : 52.4486802, + "lng" : 5.094973299999999 + }, + "southwest" : { + "lat" : 52.4152441, + "lng" : 5.0353014 + } + }, + "location" : { + "lat" : 52.4210268, + "lng" : 5.0724962 + }, + "location_type" : "APPROXIMATE", + "viewport" : { + "northeast" : { + "lat" : 52.4486802, + "lng" : 5.094973299999999 + }, + "southwest" : { + "lat" : 52.4152441, + "lng" : 5.0353014 + } + } + }, + "types" : [ "locality", "political" ] + } + ], + "status" : "OK" +} +` +var testtime = time.Date(2013, time.February, 3, 0, 0, 0, 0, time.UTC) + +var keywordTests = []keywordTestcase{ + // Core predicates + { + object: newAfter(), + args: []string{"faulty"}, + errContains: "faulty", + }, + + { + object: newAfter(), + args: []string{"2013-02-03"}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Time: &TimeConstraint{ + After: types.Time3339(testtime), + }, + }, + }, + }, + + { + object: newBefore(), + args: []string{"faulty"}, + errContains: "faulty", + }, + + { + object: newBefore(), + args: []string{"2013-02-03"}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Time: &TimeConstraint{ + Before: types.Time3339(testtime), + }, + }, + }, + }, + + { + object: newAttribute(), + args: []string{"foo", "bar"}, + want: attrfoobarC, + }, + + { + object: newAttribute(), + args: []string{"foo", ""}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "foo", + ValueMatches: &StringConstraint{Empty: true}, + SkipHidden: true, + }, + }, + }, + + { + object: newAttribute(), + args: []string{"foo", "~bar"}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "foo", + ValueMatches: &StringConstraint{ + Contains: "bar", + CaseInsensitive: true, + }, + SkipHidden: true, + }, + }, + }, + + { + object: newChildrenOf(), + args: []string{"foo"}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Relation: &RelationConstraint{ + Relation: "parent", + Any: &Constraint{ + BlobRefPrefix: "foo", + }, + }, + }, + }, + }, + + { + object: newFormat(), + args: []string{"faulty"}, + errContains: "Unknown format: faulty", + }, + + { + object: newFormat(), + args: []string{"pdf"}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + MIMEType: &StringConstraint{ + Equals: "application/pdf", + }, + }, + }, + }, + }, + }, + + { + object: newTag(), + args: []string{"foo"}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "tag", + Value: "foo", + SkipHidden: true, + }, + }, + }, + + { + object: newTag(), + args: []string{""}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "tag", + ValueMatches: &StringConstraint{Empty: true}, + SkipHidden: true, + }, + }, + }, + + { + object: newTitle(), + args: []string{""}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "title", + SkipHidden: true, + ValueMatches: &StringConstraint{ + CaseInsensitive: true, + }, + }}, + }, + + { + object: newTitle(), + args: []string{"foo"}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "title", + SkipHidden: true, + ValueMatches: &StringConstraint{ + Contains: "foo", + CaseInsensitive: true, + }, + }, + }, + }, + + // Image predicates + { + object: newIsImage(), + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + }, + }, + }, + }, + }, + + { + object: newIsPano(), + want: ispanoC, + }, + + { + object: newIsLandscape(), + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + WHRatio: &FloatConstraint{ + Min: 1.0, + }, + }, + }, + }, + }, + }, + + { + object: newIsPortait(), + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + WHRatio: &FloatConstraint{ + Max: 1.0, + }, + }, + }, + }, + }, + }, + + { + object: newWidth(), + args: []string{""}, + errContains: "Unable to parse \"\" as range, wanted something like 480-1024, 480-, -1024 or 1024", + }, + + { + object: newWidth(), + args: []string{"100-"}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + Width: &IntConstraint{ + Min: 100, + }, + }, + }, + }, + }, + }, + + { + object: newWidth(), + args: []string{"0-200"}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + Width: &IntConstraint{ + ZeroMin: true, + Max: 200, + }, + }, + }, + }, + }, + }, + + { + object: newWidth(), + args: []string{"-200"}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + Width: &IntConstraint{ + Max: 200, + }, + }, + }, + }, + }, + }, + + { + object: newWidth(), + args: []string{"100-200"}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + Width: &IntConstraint{ + Min: 100, + Max: 200, + }, + }, + }, + }, + }, + }, + + { + object: newHeight(), + args: []string{""}, + errContains: "Unable to parse \"\" as range, wanted something like 480-1024, 480-, -1024 or 1024", + }, + + { + object: newHeight(), + args: []string{"100-200"}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + Height: &IntConstraint{ + Min: 100, + Max: 200, + }, + }, + }, + }, + }, + }, + + { + object: newHeight(), + args: []string{"-200"}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + Height: &IntConstraint{ + Max: 200, + }, + }, + }, + }, + }, + }, + + { + object: newHeight(), + args: []string{"100-"}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + Height: &IntConstraint{ + Min: 100, + }, + }, + }, + }, + }, + }, + + { + object: newHeight(), + args: []string{"0-200"}, + want: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + Height: &IntConstraint{ + ZeroMin: true, + Max: 200, + }, + }, + }, + }, + }, + }, + + // Location predicates + { + object: newLocation(), + args: []string{"Uitdam"}, // Small dutch town + want: orConst(&Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + IsImage: true, + Location: uitdamLC, + }, + }, + }, + }, &Constraint{ + Permanode: &PermanodeConstraint{ + Location: uitdamLC, + }, + }), + ctx: newGeocodeContext(), + }, + + { + object: newHasLocation(), + want: hasLocationC, + }, +} + +func TestKeywords(t *testing.T) { + cj := func(c *Constraint) []byte { + v, err := json.MarshalIndent(c, "", " ") + if err != nil { + t.Fatal(err) + } + return v + } + for _, tt := range keywordTests { + got, err := tt.object.Predicate(tt.ctx, tt.args) + if err != nil { + if tt.errContains != "" && strings.Contains(err.Error(), tt.errContains) { + continue + } + t.Errorf("%v: %#v(%q) error: %v, but wanted an error containing: %v", tt.name, tt.object, tt.args, err, tt.errContains) + continue + } + if tt.errContains != "" { + t.Errorf("%v: %#v(%q) succeeded; want error containing %q", tt.name, tt.object, tt.args, tt.errContains) + continue + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%v: %#v(%q) got:\n%s\n\nwant:%s\n", tt.name, tt.object, tt.args, cj(got), cj(tt.want)) + } + } +} + +func TestParseWHExpression(t *testing.T) { + tests := []struct { + in string + wantMin string + wantMax string + errContains string + }{ + {in: "450-470", wantMin: "450", wantMax: "470"}, + {in: "450-470+", errContains: "Unable to parse \"450-470+\" as range, wanted something like 480-1024, 480-, -1024 or 1024"}, + {in: "", errContains: "Unable to parse \"\" as range, wanted something like 480-1024, 480-, -1024 or 1024"}, + {in: "450", wantMin: "450", wantMax: "450"}, + } + + for _, tt := range tests { + gotMin, gotMax, err := parseWHExpression(tt.in) + if err != nil { + if tt.errContains != "" && strings.Contains(err.Error(), tt.errContains) { + continue + } + t.Errorf("parseWHExpression(%v) error: %v, but wanted an error containing: %v", tt.in, err, tt.errContains) + continue + } + if tt.errContains != "" { + t.Errorf("parseWHExpression(%v) succeeded; want error containing %v got: %s,%s ", tt.in, tt.errContains, gotMin, gotMax) + continue + } + if !reflect.DeepEqual(gotMin, tt.wantMin) { + t.Errorf("parseWHExpression(%s) min = %v; want %v", tt.in, gotMin, tt.wantMin) + } + if !reflect.DeepEqual(gotMax, tt.wantMax) { + t.Errorf("parseWHExpression(%s) max = %v; want %v", tt.in, gotMax, tt.wantMax) + } + } +} + +func TestMatchEqual(t *testing.T) { + me := matchEqual("foo:bar:baz") + a := atom{"foo", []string{"bar", "baz"}} + + if m, _ := me.Match(a); !m { + t.Error("Expected a match") + } + + a = atom{"foo", []string{"foo", "baz"}} + if m, _ := me.Match(a); m { + t.Error("Did not expect a match") + } +} + +func TestMatchPrefix(t *testing.T) { + mp := matchPrefix{"foo", 1} + a := atom{"foo", []string{"bar"}} + if m, err := mp.Match(a); err != nil || !m { + t.Error("Expected a match") + } + + a = atom{"foo", []string{}} + if _, err := mp.Match(a); err == nil { + t.Error("Expected an error got nil") + } + a = atom{"bar", []string{}} + if m, err := mp.Match(a); err != nil || m { + t.Error("Expected simple mismatch") + } +} + +func TestLocationConstraint(t *testing.T) { + var c LocationConstraint + if c.matchesLatLong(1, 2) { + t.Error("zero value shouldn't match") + } + c.Any = true + if !c.matchesLatLong(1, 2) { + t.Error("Any should match") + } + + c = LocationConstraint{North: 2, South: 1, West: 0, East: 2} + tests := []struct { + lat, long float64 + want bool + }{ + {1, 1, true}, + {3, 1, false}, // too north + {1, 3, false}, // too east + {1, -1, false}, // too west + {0, 1, false}, // too south + } + for _, tt := range tests { + if got := c.matchesLatLong(tt.lat, tt.long); got != tt.want { + t.Errorf("matches(%v, %v) = %v; want %v", tt.lat, tt.long, got, tt.want) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/search/query.go b/vendor/github.com/camlistore/camlistore/pkg/search/query.go new file mode 100644 index 00000000..2e8bc9cb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/search/query.go @@ -0,0 +1,1616 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package search + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/context" + "camlistore.org/pkg/index" + "camlistore.org/pkg/strutil" + "camlistore.org/pkg/types" + "camlistore.org/pkg/types/camtypes" +) + +type SortType int + +const ( + UnspecifiedSort SortType = iota + Unsorted + LastModifiedDesc + LastModifiedAsc + CreatedDesc + CreatedAsc + BlobRefAsc + maxSortType +) + +var sortName = map[SortType][]byte{ + Unsorted: []byte(`"unsorted"`), + LastModifiedDesc: []byte(`"-mod"`), + LastModifiedAsc: []byte(`"mod"`), + CreatedDesc: []byte(`"-created"`), + CreatedAsc: []byte(`"created"`), + BlobRefAsc: []byte(`"blobref"`), +} + +func (t SortType) MarshalJSON() ([]byte, error) { + v, ok := sortName[t] + if !ok { + panic("unnamed SortType " + strconv.Itoa(int(t))) + } + return v, nil +} + +func (t *SortType) UnmarshalJSON(v []byte) error { + for n, nv := range sortName { + if bytes.Equal(v, nv) { + *t = n + return nil + } + } + return fmt.Errorf("Bogus search sort type %q", v) +} + +type SearchQuery struct { + // Exactly one of Expression or Contraint must be set. + // If an Expression is set, it's compiled to a Constraint. + + // Expression is a textual search query in minimal form, + // e.g. "hawaii before:2008" or "tag:foo" or "foo" or "location:portland" + // See expr.go and expr_test.go for all the operators. + Expression string `json:"expression,omitempty"` + Constraint *Constraint `json:"constraint,omitempty"` + + // Limit is the maximum number of returned results. A negative value means no + // limit. If unspecified, a default (of 200) will be used. + Limit int `json:"limit,omitempty"` + + // Sort specifies how the results will be sorted. It defaults to CreatedDesc when the + // query is about permanodes only. + Sort SortType `json:"sort,omitempty"` + + // Around specifies that the results, after sorting, should be centered around + // this result. If Around is not found the returned results will be empty. + // If both Continue and Around are set, an error is returned. + Around blob.Ref `json:"around,omitempty"` + + // Continue specifies the opaque token (as returned by a + // SearchResult) for where to continue fetching results when + // the Limit on a previous query was interrupted. + // Continue is only valid for the same query (Expression or Constraint), + // Limit, and Sort values. + // If empty, the top-most query results are returned, as given + // by Limit and Sort. + // Continue is not compatible with the Around option. + Continue string `json:"continue,omitempty"` + + // If Describe is specified, the matched blobs are also described, + // as if the Describe.BlobRefs field was populated. + Describe *DescribeRequest `json:"describe,omitempty"` +} + +func (q *SearchQuery) URLSuffix() string { return "camli/search/query" } + +func (q *SearchQuery) fromHTTP(req *http.Request) error { + dec := json.NewDecoder(io.LimitReader(req.Body, 1<<20)) + if err := dec.Decode(q); err != nil { + return err + } + + if q.Constraint == nil && q.Expression == "" { + return errors.New("query must have at least a constraint or an expression") + } + + return nil +} + +// exprQuery optionally specifies the *SearchQuery prototype that was generated +// by parsing the search expression +func (q *SearchQuery) plannedQuery(expr *SearchQuery) *SearchQuery { + pq := new(SearchQuery) + *pq = *q + if expr != nil { + pq.Constraint = expr.Constraint + if expr.Sort != 0 { + pq.Sort = expr.Sort + } + if expr.Limit != 0 { + pq.Limit = expr.Limit + } + } + if pq.Sort == UnspecifiedSort { + if pq.Constraint.onlyMatchesPermanode() { + pq.Sort = CreatedDesc + } + } + if pq.Limit == 0 { + pq.Limit = 200 // arbitrary + } + if err := pq.addContinueConstraint(); err != nil { + log.Printf("Ignoring continue token: %v", err) + } + pq.Constraint = optimizePlan(pq.Constraint) + return pq +} + +// For permanodes, the continue token is (currently!) +// of form "pn:nnnnnnn:sha1-xxxxx" where "pn" is a +// literal, "nnnnnn" is the UnixNano of the time +// (modified or created) and "sha1-xxxxx" was the item +// seen in the final result set, used as a tie breaker +// if multiple permanodes had the same mod/created +// time. This format is NOT an API promise or standard and +// clients should not rely on it. It may change without notice +func parsePermanodeContinueToken(v string) (t time.Time, br blob.Ref, ok bool) { + if !strings.HasPrefix(v, "pn:") { + return + } + v = v[len("pn:"):] + col := strings.Index(v, ":") + if col < 0 { + return + } + nano, err := strconv.ParseUint(v[:col], 10, 64) + if err != nil { + return + } + t = time.Unix(0, int64(nano)) + br, ok = blob.Parse(v[col+1:]) + return +} + +// addContinueConstraint conditionally modifies q.Constraint to scroll +// past the results as indicated by q.Continue. +func (q *SearchQuery) addContinueConstraint() error { + cont := q.Continue + if cont == "" { + return nil + } + if q.Constraint.onlyMatchesPermanode() { + tokent, lastbr, ok := parsePermanodeContinueToken(cont) + if !ok { + return errors.New("Unexpected continue token") + } + if q.Sort == LastModifiedDesc || q.Sort == CreatedDesc { + var lastMod, lastCreated time.Time + switch q.Sort { + case LastModifiedDesc: + lastMod = tokent + case CreatedDesc: + lastCreated = tokent + } + baseConstraint := q.Constraint + q.Constraint = &Constraint{ + Logical: &LogicalConstraint{ + Op: "and", + A: &Constraint{ + Permanode: &PermanodeConstraint{ + Continue: &PermanodeContinueConstraint{ + LastCreated: lastCreated, + LastMod: lastMod, + Last: lastbr, + }, + }, + }, + B: baseConstraint, + }, + } + } + return nil + } + return errors.New("token not valid for query type") +} + +func (q *SearchQuery) checkValid(ctx *context.Context) (sq *SearchQuery, err error) { + if q.Sort >= maxSortType || q.Sort < 0 { + return nil, errors.New("invalid sort type") + } + if q.Continue != "" && q.Around.Valid() { + return nil, errors.New("Continue and Around parameters are mutually exclusive") + } + if q.Constraint == nil { + if expr := q.Expression; expr != "" { + sq, err := parseExpression(ctx, expr) + if err != nil { + return nil, fmt.Errorf("Error parsing search expression %q: %v", expr, err) + } + if err := sq.Constraint.checkValid(); err != nil { + return nil, fmt.Errorf("Internal error: parseExpression(%q) returned invalid constraint: %v", expr, err) + } + return sq, nil + } + return nil, errors.New("no search constraint or expression") + } + return nil, q.Constraint.checkValid() +} + +// SearchResult is the result of the Search method for a given SearchQuery. +type SearchResult struct { + Blobs []*SearchResultBlob `json:"blobs"` + Describe *DescribeResponse `json:"description"` + + // Continue optionally specifies the continuation token to to + // continue fetching results in this result set, if interrupted + // by a Limit. + Continue string `json:"continue,omitempty"` +} + +type SearchResultBlob struct { + Blob blob.Ref `json:"blob"` + // ... file info, permanode info, blob info ... ? +} + +func (r *SearchResultBlob) String() string { + return fmt.Sprintf("[blob: %s]", r.Blob) +} + +// Constraint specifies a blob matching constraint. +// A blob matches if it matches all non-zero fields' predicates. +// A zero constraint matches nothing. +type Constraint struct { + // If Logical is non-nil, all other fields are ignored. + Logical *LogicalConstraint `json:"logical,omitempty"` + + // Anything, if true, matches all blobs. + Anything bool `json:"anything,omitempty"` + + CamliType string `json:"camliType,omitempty"` // camliType of the JSON blob + AnyCamliType bool `json:"anyCamliType,omitempty"` // if true, any camli JSON blob matches + BlobRefPrefix string `json:"blobRefPrefix,omitempty"` + + File *FileConstraint `json:"file,omitempty"` + Dir *DirConstraint `json:"dir,omitempty"` + + Claim *ClaimConstraint `json:"claim,omitempty"` + BlobSize *IntConstraint `json:"blobSize,omitempty"` + + Permanode *PermanodeConstraint `json:"permanode,omitempty"` + + matcherOnce sync.Once + matcherFn matchFn +} + +func (c *Constraint) checkValid() error { + type checker interface { + checkValid() error + } + if c.Claim != nil { + return errors.New("TODO: implement ClaimConstraint") + } + for _, cv := range []checker{ + c.Logical, + c.File, + c.Dir, + c.BlobSize, + c.Permanode, + } { + if err := cv.checkValid(); err != nil { + return err + } + } + return nil +} + +func (c *Constraint) onlyMatchesPermanode() bool { + if c.Permanode != nil || c.CamliType == "permanode" { + return true + } + + if c.Logical != nil && c.Logical.Op == "and" { + if c.Logical.A.onlyMatchesPermanode() || c.Logical.B.onlyMatchesPermanode() { + return true + } + } + + // TODO: There are other cases we can return true here, like: + // Logical:{Op:'or', A:PermanodeConstraint{...}, B:PermanodeConstraint{...} + + return false +} + +type FileConstraint struct { + // (All non-zero fields must match) + + FileSize *IntConstraint `json:"fileSize,omitempty"` + FileName *StringConstraint `json:"fileName,omitempty"` + MIMEType *StringConstraint `json:"mimeType,omitempty"` + Time *TimeConstraint `json:"time,omitempty"` + ModTime *TimeConstraint `json:"modTime,omitempty"` + + // WholeRef if non-zero only matches if the entire checksum of the + // file (the concatenation of all its blobs) is equal to the + // provided blobref. The index may not have every file's digest for + // every known hash algorithm. + WholeRef blob.Ref `json:"wholeRef,omitempty"` + + // For images: + IsImage bool `json:"isImage,omitempty"` + EXIF *EXIFConstraint `json:"exif,omitempty"` // TODO: implement + Width *IntConstraint `json:"width,omitempty"` + Height *IntConstraint `json:"height,omitempty"` + WHRatio *FloatConstraint `json:"widthHeightRation,omitempty"` + Location *LocationConstraint `json:"location,omitempty"` + + // MediaTag is for ID3 (and similar) embedded metadata in files. + MediaTag *MediaTagConstraint `json:"mediaTag,omitempty"` +} + +type MediaTagConstraint struct { + // Tag is the tag to match. + // For ID3, this includes: title, artist, album, genre, musicbrainzalbumid, year, track, disc, mediaref, durationms. + Tag string `json:"tag"` + + String *StringConstraint `json:"string,omitempty"` + Int *IntConstraint `json:"int,omitempty"` +} + +type DirConstraint struct { + // (All non-zero fields must match) + + // TODO: implement. mostly need more things in the index. + + FileName *StringConstraint + + TopFileSize, // not recursive + TopFileCount, // not recursive + FileSize, + FileCount *IntConstraint + + // TODO: these would need thought on how to index efficiently: + // (Also: top-only variants?) + // ContainsFile *FileConstraint + // ContainsDir *DirConstraint +} + +// An IntConstraint specifies constraints on an integer. +type IntConstraint struct { + // Min and Max are both optional and inclusive bounds. + // Zero means don't check. + Min int64 `json:"min,omitempty"` + Max int64 `json:"max,omitempty"` + ZeroMin bool `json:"zeroMin,omitempty"` // if true, min is actually zero + ZeroMax bool `json:"zeroMax,omitempty"` // if true, max is actually zero +} + +func (c *IntConstraint) hasMin() bool { return c.Min != 0 || c.ZeroMin } +func (c *IntConstraint) hasMax() bool { return c.Max != 0 || c.ZeroMax } + +func (c *IntConstraint) checkValid() error { + if c == nil { + return nil + } + if c.ZeroMin && c.Min != 0 { + return errors.New("in IntConstraint, can't set both ZeroMin and Min") + } + if c.ZeroMax && c.Max != 0 { + return errors.New("in IntConstraint, can't set both ZeroMax and Max") + } + if c.hasMax() && c.hasMin() && c.Min > c.Max { + return errors.New("in IntConstraint, min is greater than max") + } + return nil +} + +func (c *IntConstraint) intMatches(v int64) bool { + if c.hasMin() && v < c.Min { + return false + } + if c.hasMax() && v > c.Max { + return false + } + return true +} + +// A FloatConstraint specifies constraints on a float. +type FloatConstraint struct { + // Min and Max are both optional and inclusive bounds. + // Zero means don't check. + Min float64 `json:"min,omitempty"` + Max float64 `json:"max,omitempty"` + ZeroMin bool `json:"zeroMin,omitempty"` // if true, min is actually zero + ZeroMax bool `json:"zeroMax,omitempty"` // if true, max is actually zero +} + +func (c *FloatConstraint) hasMin() bool { return c.Min != 0 || c.ZeroMin } +func (c *FloatConstraint) hasMax() bool { return c.Max != 0 || c.ZeroMax } + +func (c *FloatConstraint) checkValid() error { + if c == nil { + return nil + } + if c.ZeroMin && c.Min != 0 { + return errors.New("in FloatConstraint, can't set both ZeroMin and Min") + } + if c.ZeroMax && c.Max != 0 { + return errors.New("in FloatConstraint, can't set both ZeroMax and Max") + } + if c.hasMax() && c.hasMin() && c.Min > c.Max { + return errors.New("in FloatConstraint, min is greater than max") + } + return nil +} + +func (c *FloatConstraint) floatMatches(v float64) bool { + if c.hasMin() && v < c.Min { + return false + } + if c.hasMax() && v > c.Max { + return false + } + return true +} + +type EXIFConstraint struct { + // TODO. need to put this in the index probably. + // Maybe: GPS *LocationConstraint + // ISO, Aperature, Camera Make/Model, etc. +} + +type LocationConstraint struct { + // Any, if true, matches any photo with a known location. + Any bool + + // North, West, East, and South define a region in which a photo + // must be in order to match. + North float64 + West float64 + East float64 + South float64 +} + +func (c *LocationConstraint) matchesLatLong(lat, long float64) bool { + return c.Any || (c.West <= long && long <= c.East && c.South <= lat && lat <= c.North) +} + +// A StringConstraint specifies constraints on a string. +// All non-zero must match. +type StringConstraint struct { + Empty bool `json:"empty,omitempty"` // matches empty string + Equals string `json:"equals,omitempty"` + Contains string `json:"contains,omitempty"` + HasPrefix string `json:"hasPrefix,omitempty"` + HasSuffix string `json:"hasSuffix,omitempty"` + ByteLength *IntConstraint `json:"byteLength,omitempty"` // length in bytes (not chars) + CaseInsensitive bool `json:"caseInsensitive,omitempty"` + + // TODO: CharLength (assume UTF-8) +} + +// stringCompareFunc contains a function to get a value from a StringConstraint and a second function to compare it +// against the string s that's being matched. +type stringConstraintFunc struct { + v func(*StringConstraint) string + fn func(s, v string) bool +} + +// Functions to compare fields of a StringConstraint against strings in a case-sensitive manner. +var stringConstraintFuncs = []stringConstraintFunc{ + {func(c *StringConstraint) string { return c.Equals }, func(a, b string) bool { return a == b }}, + {func(c *StringConstraint) string { return c.Contains }, strings.Contains}, + {func(c *StringConstraint) string { return c.HasPrefix }, strings.HasPrefix}, + {func(c *StringConstraint) string { return c.HasSuffix }, strings.HasSuffix}, +} + +// Functions to compare fields of a StringConstraint against strings in a case-insensitive manner. +var stringConstraintFuncsFold = []stringConstraintFunc{ + {func(c *StringConstraint) string { return c.Equals }, strings.EqualFold}, + {func(c *StringConstraint) string { return c.Contains }, strutil.ContainsFold}, + {func(c *StringConstraint) string { return c.HasPrefix }, strutil.HasPrefixFold}, + {func(c *StringConstraint) string { return c.HasSuffix }, strutil.HasSuffixFold}, +} + +func (c *StringConstraint) stringMatches(s string) bool { + if c.Empty && len(s) > 0 { + return false + } + if c.ByteLength != nil && !c.ByteLength.intMatches(int64(len(s))) { + return false + } + + funcs := stringConstraintFuncs + if c.CaseInsensitive { + funcs = stringConstraintFuncsFold + } + for _, pair := range funcs { + if v := pair.v(c); v != "" && !pair.fn(s, v) { + return false + } + } + return true +} + +type TimeConstraint struct { + Before types.Time3339 `json:"before"` // < + After types.Time3339 `json:"after"` // >= + + // TODO: this won't JSON-marshal/unmarshal well. Make a time.Duration marshal type? + // Likewise with time that supports omitempty? + InLast time.Duration `json:"inLast"` // >= +} + +type ClaimConstraint struct { + SignedBy string `json:"signedBy"` // identity + SignedAfter time.Time `json:"signedAfter"` + SignedBefore time.Time `json:"signedBefore"` +} + +func (c *ClaimConstraint) checkValid() error { + return errors.New("TODO: implement blobMatches and checkValid on ClaimConstraint") +} + +type LogicalConstraint struct { + Op string `json:"op"` // "and", "or", "xor", "not" + A *Constraint `json:"a"` + B *Constraint `json:"b"` // only valid if Op != "not" +} + +// PermanodeConstraint matches permanodes. +type PermanodeConstraint struct { + // At specifies the time at which to pretend we're resolving attributes. + // Attribute claims after this point in time are ignored. + // If zero, the current time is used. + At time.Time `json:"at,omitempty"` + + // ModTime optionally matches on the last modtime of the permanode. + ModTime *TimeConstraint `json:"modTime,omitempty"` + + // Time optionally matches the permanode's time. A Permanode + // may not have a known time. If the permanode does not have a + // known time, one may be guessed if the top-level search + // parameters request so. + Time *TimeConstraint `json:"time,omitempty"` + + // Attr optionally specifies the attribute to match. + // e.g. "camliContent", "camliMember", "tag" + // This is required if any of the items below are used. + Attr string `json:"attr,omitempty"` + + // SkipHidden skips hidden or other boring files. + SkipHidden bool `json:"skipHidden,omitempty"` + + // NumValue optionally tests the number of values this + // permanode has for Attr. + NumValue *IntConstraint `json:"numValue,omitempty"` + + // ValueAll modifies the matching behavior when an attribute + // is multi-valued. By default, when ValueAll is false, only + // one value of a multi-valued attribute needs to match. If + // ValueAll is true, all attributes must match. + ValueAll bool `json:"valueAllMatch,omitempty"` + + // Value specifies an exact string to match. + // This is a convenience form for the simple case of exact + // equality. The same can be accomplished with ValueMatches. + Value string `json:"value,omitempty"` // if non-zero, absolute match + + // ValueMatches optionally specifies a StringConstraint to + // match the value against. + ValueMatches *StringConstraint `json:"valueMatches,omitempty"` + + // ValueMatchesInt optionally specifies an IntConstraint to match + // the value against. Non-integer values will not match. + ValueMatchesInt *IntConstraint `json:"valueMatchesInt,omitempty"` + + // ValueMatchesFloat optionally specifies a FloatConstraint to match + // the value against. Non-float values will not match. + ValueMatchesFloat *FloatConstraint `json:"valueMatchesFloat,omitempty"` + + // ValueInSet optionally specifies a sub-query which the value + // (which must be a blobref) must be a part of. + ValueInSet *Constraint `json:"valueInSet,omitempty"` + + // Relation optionally specifies a constraint based on relations + // to other permanodes (e.g. camliMember or camliPath sets). + // You can use it to test the properties of a parent, ancestor, + // child, or progeny. + Relation *RelationConstraint `json:"relation,omitempty"` + + // Location optionally restricts matches to permanodes having + // this location. This only affects permanodes with a known + // type to have an lat/long location. + Location *LocationConstraint `json:"location,omitempty"` + + // Continue is for internal use. + Continue *PermanodeContinueConstraint `json:"-"` + + // TODO: + // NumClaims *IntConstraint // by owner + // Owner blob.Ref // search for permanodes by an owner + + // Note: When adding a field, update hasValueConstraint. +} + +type PermanodeContinueConstraint struct { + // LastMod if non-zero is the modtime of the last item + // that was seen. One of this or LastCreated will be set. + LastMod time.Time + + // LastCreated if non-zero is the creation time of the last + // item that was seen. + LastCreated time.Time + + // Last is the last blobref that was shown at the time + // given in ModLessEqual or CreateLessEqual. + // This is used as a tie-breaker. + // If the time is equal, permanodes <= this are not matched. + // If the time is past this in the scroll position, then this + // field is ignored. + Last blob.Ref +} + +func (pcc *PermanodeContinueConstraint) checkValid() error { + if pcc.LastMod.IsZero() == pcc.LastCreated.IsZero() { + return errors.New("exactly one of PermanodeContinueConstraint LastMod or LastCreated must be defined") + } + return nil +} + +type RelationConstraint struct { + // Relation must be one of: + // * "child" + // * "parent" (immediate parent only) + // * "progeny" (any level down) + // * "ancestor" (any level up) + Relation string + + // EdgeType optionally specifies an edge type. + // By default it matches "camliMember" and "camliPath:*". + EdgeType string + + // After finding all the nodes matching the Relation and + // EdgeType, either one or all (depending on whether Any or + // All is set) must then match for the RelationConstraint + // itself to match. + // + // It is an error to set both. + Any, All *Constraint +} + +func (rc *RelationConstraint) checkValid() error { + if rc.Relation != "parent" { + return errors.New("only RelationConstraint.Relation of \"parent\" is currently supported") + } + if (rc.Any == nil) == (rc.All == nil) { + return errors.New("exactly one of RelationConstraint Any or All must be defined") + } + return nil +} + +func (rc *RelationConstraint) matchesAttr(attr string) bool { + if rc.EdgeType != "" { + return attr == rc.EdgeType + } + return attr == "camliMember" || strings.HasPrefix(attr, "camliPath:") +} + +// The PermanodeConstraint matching of RelationConstraint. +func (rc *RelationConstraint) match(s *search, pn blob.Ref, at time.Time) (ok bool, err error) { + corpus := s.h.corpus + if corpus == nil { + // TODO: care? + return false, errors.New("RelationConstraint requires an in-memory corpus") + } + + if rc.Relation != "parent" { + panic("bogus") + } + + var matcher matchFn + if rc.Any != nil { + matcher = rc.Any.matcher() + } else { + matcher = rc.All.matcher() + } + + var anyGood bool + var anyBad bool + var lastChecked blob.Ref + var permanodesChecked map[blob.Ref]bool // lazily created to optimize for common case of 1 match + corpus.ForeachClaimBackLocked(pn, at, func(cl *camtypes.Claim) bool { + if !rc.matchesAttr(cl.Attr) { + return true // skip claim + } + if lastChecked.Valid() { + if permanodesChecked == nil { + permanodesChecked = make(map[blob.Ref]bool) + } + permanodesChecked[lastChecked] = true + lastChecked = blob.Ref{} // back to zero + } + if permanodesChecked[cl.Permanode] { + return true // skip checking + } + if !corpus.PermanodeHasAttrValueLocked(cl.Permanode, at, cl.Attr, cl.Value) { + return true // claim once matched permanode, but no longer + } + + var bm camtypes.BlobMeta + bm, err = s.blobMeta(cl.Permanode) + if err != nil { + return false + } + var ok bool + ok, err = matcher(s, cl.Permanode, bm) + if err != nil { + return false + } + if ok { + anyGood = true + if rc.Any != nil { + return false // done. stop searching. + } + } else { + anyBad = true + if rc.All != nil { + return false // fail fast + } + } + lastChecked = cl.Permanode + return true + }) + if err != nil { + return false, err + } + if rc.All != nil { + return anyGood && !anyBad, nil + } + return anyGood, nil +} + +// search is the state of an in-progress search +type search struct { + h *Handler + q *SearchQuery + res *SearchResult + ctx *context.Context + + // ss is a scratch string slice to avoid allocations. + // We assume (at least so far) that only 1 goroutine is used + // for a given search, so anything can use this. + ss []string // scratch +} + +func (s *search) blobMeta(br blob.Ref) (camtypes.BlobMeta, error) { + if c := s.h.corpus; c != nil { + return c.GetBlobMetaLocked(br) + } else { + return s.h.index.GetBlobMeta(br) + } +} + +func (s *search) fileInfo(br blob.Ref) (camtypes.FileInfo, error) { + if c := s.h.corpus; c != nil { + return c.GetFileInfoLocked(br) + } else { + return s.h.index.GetFileInfo(br) + } +} + +// optimizePlan returns an optimized version of c which will hopefully +// execute faster than executing c literally. +func optimizePlan(c *Constraint) *Constraint { + // TODO: what the comment above says. + return c +} + +func (h *Handler) Query(rawq *SearchQuery) (*SearchResult, error) { + ctx := context.TODO() // TODO: set from rawq + exprResult, err := rawq.checkValid(ctx) + if err != nil { + return nil, fmt.Errorf("Invalid SearchQuery: %v", err) + } + q := rawq.plannedQuery(exprResult) + res := new(SearchResult) + s := &search{ + h: h, + q: q, + res: res, + ctx: context.TODO(), + } + defer s.ctx.Cancel() + + corpus := h.corpus + var unlockOnce sync.Once + if corpus != nil { + corpus.RLock() + defer unlockOnce.Do(corpus.RUnlock) + } + + ch := make(chan camtypes.BlobMeta, buffered) + errc := make(chan error, 1) + + cands := q.pickCandidateSource(s) + if candSourceHook != nil { + candSourceHook(cands.name) + } + + sendCtx := s.ctx.New() + defer sendCtx.Cancel() + go func() { errc <- cands.send(sendCtx, s, ch) }() + + wantAround, foundAround := false, false + if q.Around.Valid() { + wantAround = true + } + blobMatches := q.Constraint.matcher() + for meta := range ch { + match, err := blobMatches(s, meta.Ref, meta) + if err != nil { + return nil, err + } + if match { + res.Blobs = append(res.Blobs, &SearchResultBlob{ + Blob: meta.Ref, + }) + if q.Limit <= 0 || !cands.sorted { + continue + } + if !wantAround || foundAround { + if len(res.Blobs) == q.Limit { + sendCtx.Cancel() + break + } + continue + } + if q.Around == meta.Ref { + foundAround = true + if len(res.Blobs)*2 > q.Limit { + // If we've already collected more than half of the Limit when Around is found, + // we ditch the surplus from the beginning of the slice of results. + // If Limit is even, and the number of results before and after Around + // are both greater than half the limit, then there will be one more result before + // than after. + discard := len(res.Blobs) - q.Limit/2 - 1 + if discard < 0 { + discard = 0 + } + res.Blobs = res.Blobs[discard:] + } + if len(res.Blobs) == q.Limit { + sendCtx.Cancel() + break + } + continue + } + if len(res.Blobs) == q.Limit { + n := copy(res.Blobs, res.Blobs[len(res.Blobs)/2:]) + res.Blobs = res.Blobs[:n] + } + } + } + if err := <-errc; err != nil && err != context.ErrCanceled { + return nil, err + } + if q.Limit > 0 && cands.sorted && wantAround && !foundAround { + // results are ignored if Around was not found + res.Blobs = nil + } + if !cands.sorted { + switch q.Sort { + case UnspecifiedSort, Unsorted: + // Nothing to do. + case BlobRefAsc: + sort.Sort(sortSearchResultBlobs{res.Blobs, func(a, b *SearchResultBlob) bool { + return a.Blob.Less(b.Blob) + }}) + case CreatedDesc, CreatedAsc: + if corpus == nil { + return nil, errors.New("TODO: Sorting without a corpus unsupported") + } + var err error + corpus.RLock() + sort.Sort(sortSearchResultBlobs{res.Blobs, func(a, b *SearchResultBlob) bool { + if err != nil { + return false + } + ta, ok := corpus.PermanodeAnyTimeLocked(a.Blob) + if !ok { + err = fmt.Errorf("no ctime or modtime found for %v", a.Blob) + return false + } + tb, ok := corpus.PermanodeAnyTimeLocked(b.Blob) + if !ok { + err = fmt.Errorf("no ctime or modtime found for %v", b.Blob) + return false + } + if q.Sort == CreatedAsc { + return ta.Before(tb) + } + return tb.Before(ta) + }}) + corpus.RUnlock() + if err != nil { + return nil, err + } + // TODO(mpl): LastModifiedDesc, LastModifiedAsc + default: + return nil, errors.New("TODO: unsupported sort+query combination.") + } + if q.Limit > 0 && len(res.Blobs) > q.Limit { + res.Blobs = res.Blobs[:q.Limit] + } + } + if corpus != nil { + if !wantAround { + q.setResultContinue(corpus, res) + } + unlockOnce.Do(corpus.RUnlock) + } + + if q.Describe != nil { + q.Describe.BlobRef = blob.Ref{} // zero this out, if caller set it + blobs := make([]blob.Ref, 0, len(res.Blobs)) + for _, srb := range res.Blobs { + blobs = append(blobs, srb.Blob) + } + q.Describe.BlobRefs = blobs + res, err := s.h.Describe(q.Describe) + if err != nil { + return nil, err + } + s.res.Describe = res + } + return s.res, nil +} + +// setResultContinue sets res.Continue if q is suitable for having a continue token. +// The corpus is locked for reads. +func (q *SearchQuery) setResultContinue(corpus *index.Corpus, res *SearchResult) { + if !q.Constraint.onlyMatchesPermanode() { + return + } + var pnTimeFunc func(blob.Ref) (t time.Time, ok bool) + switch q.Sort { + case LastModifiedDesc: + pnTimeFunc = corpus.PermanodeModtimeLocked + case CreatedDesc: + pnTimeFunc = corpus.PermanodeAnyTimeLocked + default: + return + } + + if q.Limit <= 0 || len(res.Blobs) != q.Limit { + return + } + lastpn := res.Blobs[len(res.Blobs)-1].Blob + t, ok := pnTimeFunc(lastpn) + if !ok { + return + } + res.Continue = fmt.Sprintf("pn:%d:%v", t.UnixNano(), lastpn) +} + +const camliTypeMIME = "application/json; camliType=" + +type matchFn func(*search, blob.Ref, camtypes.BlobMeta) (bool, error) + +func alwaysMatch(*search, blob.Ref, camtypes.BlobMeta) (bool, error) { + return true, nil +} + +func neverMatch(*search, blob.Ref, camtypes.BlobMeta) (bool, error) { + return false, nil +} + +func anyCamliType(s *search, br blob.Ref, bm camtypes.BlobMeta) (bool, error) { + return bm.CamliType != "", nil +} + +// Test hook. +var candSourceHook func(string) + +type candidateSource struct { + name string + sorted bool + + // sends sends to the channel and must close it, regardless of error + // or interruption from context.Done(). + send func(*context.Context, *search, chan<- camtypes.BlobMeta) error +} + +func (q *SearchQuery) pickCandidateSource(s *search) (src candidateSource) { + c := q.Constraint + corpus := s.h.corpus + if corpus != nil { + if c.onlyMatchesPermanode() { + src.sorted = true + switch q.Sort { + case LastModifiedDesc: + src.name = "corpus_permanode_lastmod" + src.send = func(ctx *context.Context, s *search, dst chan<- camtypes.BlobMeta) error { + return corpus.EnumeratePermanodesLastModifiedLocked(ctx, dst) + } + return + case CreatedDesc: + src.name = "corpus_permanode_created" + src.send = func(ctx *context.Context, s *search, dst chan<- camtypes.BlobMeta) error { + return corpus.EnumeratePermanodesCreatedLocked(ctx, dst, true) + } + return + default: + src.sorted = false + } + } + if c.AnyCamliType || c.CamliType != "" { + camType := c.CamliType // empty means all + src.name = "corpus_blob_meta" + src.send = func(ctx *context.Context, s *search, dst chan<- camtypes.BlobMeta) error { + return corpus.EnumerateCamliBlobsLocked(ctx, camType, dst) + } + return + } + } + src.name = "index_blob_meta" + src.send = func(ctx *context.Context, s *search, dst chan<- camtypes.BlobMeta) error { + return s.h.index.EnumerateBlobMeta(ctx, dst) + } + return +} + +type allMustMatch []matchFn + +func (fns allMustMatch) blobMatches(s *search, br blob.Ref, blobMeta camtypes.BlobMeta) (bool, error) { + for _, condFn := range fns { + match, err := condFn(s, br, blobMeta) + if !match || err != nil { + return match, err + } + } + return true, nil +} + +func (c *Constraint) matcher() func(s *search, br blob.Ref, blobMeta camtypes.BlobMeta) (bool, error) { + c.matcherOnce.Do(c.initMatcherFn) + return c.matcherFn +} + +func (c *Constraint) initMatcherFn() { + c.matcherFn = c.genMatcher() +} + +func (c *Constraint) genMatcher() matchFn { + var ncond int + var cond matchFn + var conds []matchFn + addCond := func(fn matchFn) { + ncond++ + if ncond == 1 { + cond = fn + return + } else if ncond == 2 { + conds = append(conds, cond) + } + conds = append(conds, fn) + } + if c.Logical != nil { + addCond(c.Logical.matcher()) + } + if c.Anything { + addCond(alwaysMatch) + } + if c.CamliType != "" { + addCond(func(s *search, br blob.Ref, bm camtypes.BlobMeta) (bool, error) { + return bm.CamliType == c.CamliType, nil + }) + } + if c.AnyCamliType { + addCond(anyCamliType) + } + if c.Permanode != nil { + addCond(c.Permanode.blobMatches) + } + // TODO: ClaimConstraint + if c.File != nil { + addCond(c.File.blobMatches) + } + if c.Dir != nil { + addCond(c.Dir.blobMatches) + } + if bs := c.BlobSize; bs != nil { + addCond(func(s *search, br blob.Ref, bm camtypes.BlobMeta) (bool, error) { + return bs.intMatches(int64(bm.Size)), nil + }) + } + if pfx := c.BlobRefPrefix; pfx != "" { + addCond(func(s *search, br blob.Ref, meta camtypes.BlobMeta) (bool, error) { + return strings.HasPrefix(br.String(), pfx), nil + }) + } + switch ncond { + case 0: + return neverMatch + case 1: + return cond + default: + return allMustMatch(conds).blobMatches + } +} + +func (c *LogicalConstraint) checkValid() error { + if c == nil { + return nil + } + if c.A == nil { + return errors.New("In LogicalConstraint, need to set A") + } + if err := c.A.checkValid(); err != nil { + return err + } + switch c.Op { + case "and", "xor", "or": + if c.B == nil { + return errors.New("In LogicalConstraint, need both A and B set") + } + if err := c.B.checkValid(); err != nil { + return err + } + case "not": + default: + return fmt.Errorf("In LogicalConstraint, unknown operation %q", c.Op) + } + return nil +} + +func (c *LogicalConstraint) matcher() matchFn { + amatches := c.A.matcher() + var bmatches matchFn + if c.Op != "not" { + bmatches = c.B.matcher() + } + return func(s *search, br blob.Ref, bm camtypes.BlobMeta) (bool, error) { + + // Note: not using multiple goroutines here, because + // so far the *search type assumes it's + // single-threaded. (e.g. the .ss scratch type). + // Also, not using multiple goroutines means we can + // short-circuit when Op == "and" and av is false. + + av, err := amatches(s, br, bm) + if err != nil { + return false, err + } + switch c.Op { + case "not": + return !av, nil + case "and": + if !av { + // Short-circuit. + return false, nil + } + case "or": + if av { + // Short-circuit. + return true, nil + } + } + + bv, err := bmatches(s, br, bm) + if err != nil { + return false, err + } + + switch c.Op { + case "and", "or": + return bv, nil + case "xor": + return av != bv, nil + } + panic("unreachable") + } +} + +func (c *PermanodeConstraint) checkValid() error { + if c == nil { + return nil + } + if c.Attr != "" { + if c.NumValue == nil && !c.hasValueConstraint() { + return errors.New("PermanodeConstraint with Attr requires also setting NumValue or a value-matching constraint") + } + if nv := c.NumValue; nv != nil { + if nv.ZeroMin { + return errors.New("NumValue with ZeroMin makes no sense; matches everything") + } + if nv.ZeroMax && c.hasValueConstraint() { + return errors.New("NumValue with ZeroMax makes no sense in conjunction with a value-matching constraint; matches nothing") + } + if nv.Min < 0 || nv.Max < 0 { + return errors.New("NumValue with negative Min or Max makes no sense") + } + } + } + if rc := c.Relation; rc != nil { + if err := rc.checkValid(); err != nil { + return err + } + } + if pcc := c.Continue; pcc != nil { + if err := pcc.checkValid(); err != nil { + return err + } + } + return nil +} + +var numPermanodeFields = reflect.TypeOf(PermanodeConstraint{}).NumField() + +// hasValueConstraint returns true if one or more constraints that check an attribute's value are set. +func (c *PermanodeConstraint) hasValueConstraint() bool { + // If a field has been added or removed, update this after adding the new field to the return statement if necessary. + const expectedFields = 15 + if numPermanodeFields != expectedFields { + panic(fmt.Sprintf("PermanodeConstraint field count changed (now %v rather than %v)", numPermanodeFields, expectedFields)) + } + return c.Value != "" || + c.ValueMatches != nil || + c.ValueMatchesInt != nil || + c.ValueMatchesFloat != nil || + c.ValueInSet != nil +} + +func (c *PermanodeConstraint) blobMatches(s *search, br blob.Ref, bm camtypes.BlobMeta) (ok bool, err error) { + if bm.CamliType != "permanode" { + return false, nil + } + corpus := s.h.corpus + + var dp *DescribedPermanode + if corpus == nil { + dr, err := s.h.Describe(&DescribeRequest{BlobRef: br}) + if err != nil { + return false, err + } + db := dr.Meta[br.String()] + if db == nil || db.Permanode == nil { + return false, nil + } + dp = db.Permanode + } + + if c.Attr != "" { + if !c.At.IsZero() && corpus == nil { + panic("PermanodeConstraint.At not supported without an in-memory corpus") + } + var vals []string + if corpus == nil { + vals = dp.Attr[c.Attr] + } else { + s.ss = corpus.AppendPermanodeAttrValuesLocked( + s.ss[:0], br, c.Attr, c.At, s.h.owner) + vals = s.ss + } + ok, err := c.permanodeMatchesAttrVals(s, vals) + if !ok || err != nil { + return false, err + } + } + + if c.SkipHidden && corpus != nil { + defVis := corpus.PermanodeAttrValueLocked(br, "camliDefVis", c.At, s.h.owner) + if defVis == "hide" { + return false, nil + } + nodeType := corpus.PermanodeAttrValueLocked(br, "camliNodeType", c.At, s.h.owner) + if nodeType == "foursquare.com:venue" { + // TODO: temporary. remove this, or change + // when/where (time) we show these. But these + // are flooding my results and I'm about to + // demo this. + return false, nil + } + } + + if c.ModTime != nil { + if corpus != nil { + mt, ok := corpus.PermanodeModtimeLocked(br) + if !ok || !c.ModTime.timeMatches(mt) { + return false, nil + } + } else if !c.ModTime.timeMatches(dp.ModTime) { + return false, nil + } + } + + if c.Time != nil { + if corpus != nil { + t, ok := corpus.PermanodeAnyTimeLocked(br) + if !ok || !c.Time.timeMatches(t) { + return false, nil + } + } else { + panic("TODO: not yet supported") + } + } + + if rc := c.Relation; rc != nil { + ok, err := rc.match(s, br, c.At) + if !ok || err != nil { + return ok, err + } + } + + if c.Location != nil { + if corpus == nil { + return false, nil + } + lat, long, ok := corpus.PermanodeLatLongLocked(br, c.At) + if !ok || !c.Location.matchesLatLong(lat, long) { + return false, nil + } + } + + if cc := c.Continue; cc != nil { + if corpus == nil { + // Requires an in-memory index for infinite + // scroll. At least for now. + return false, nil + } + var pnTime time.Time + var ok bool + switch { + case !cc.LastMod.IsZero(): + pnTime, ok = corpus.PermanodeModtimeLocked(br) + if !ok || pnTime.After(cc.LastMod) { + return false, nil + } + case !cc.LastCreated.IsZero(): + pnTime, ok = corpus.PermanodeAnyTimeLocked(br) + if !ok || pnTime.After(cc.LastCreated) { + return false, nil + } + default: + panic("Continue constraint without a LastMod or a LastCreated") + } + // Blobs are sorted by modtime, and then by + // blobref, and then reversed overall. From + // top of page, imagining this scenario, where + // the user requested a page size Limit of 4: + // mod5, sha1-25 + // mod4, sha1-72 + // mod3, sha1-cc + // mod3, sha1-bb <--- last seen item, continue = "pn:mod3:sha1-bb" + // mod3, sha1-aa <-- and we want this one next. + // In the case above, we'll see all of cc, bb, and cc for mod3. + if (pnTime.Equal(cc.LastMod) || pnTime.Equal(cc.LastCreated)) && !br.Less(cc.Last) { + return false, nil + } + } + return true, nil +} + +// permanodeMatchesAttrVals checks that the values in vals - all of them, if c.ValueAll is set - +// match the values for c.Attr. +// vals are the current permanode values of c.Attr. +func (c *PermanodeConstraint) permanodeMatchesAttrVals(s *search, vals []string) (bool, error) { + if c.NumValue != nil && !c.NumValue.intMatches(int64(len(vals))) { + return false, nil + } + if c.hasValueConstraint() { + nmatch := 0 + for _, val := range vals { + match, err := c.permanodeMatchesAttrVal(s, val) + if err != nil { + return false, err + } + if match { + nmatch++ + } + } + if nmatch == 0 { + return false, nil + } + if c.ValueAll { + return nmatch == len(vals), nil + } + } + return true, nil +} + +func (c *PermanodeConstraint) permanodeMatchesAttrVal(s *search, val string) (bool, error) { + if c.Value != "" && c.Value != val { + return false, nil + } + if c.ValueMatches != nil && !c.ValueMatches.stringMatches(val) { + return false, nil + } + if c.ValueMatchesInt != nil { + if i, err := strconv.ParseInt(val, 10, 64); err != nil || !c.ValueMatchesInt.intMatches(i) { + return false, nil + } + } + if c.ValueMatchesFloat != nil { + if f, err := strconv.ParseFloat(val, 64); err != nil || !c.ValueMatchesFloat.floatMatches(f) { + return false, nil + } + } + if subc := c.ValueInSet; subc != nil { + br, ok := blob.Parse(val) // TODO: use corpus's parse, or keep this as blob.Ref in corpus attr + if !ok { + return false, nil + } + meta, err := s.blobMeta(br) + if err == os.ErrNotExist { + return false, nil + } + if err != nil { + return false, err + } + return subc.matcher()(s, br, meta) + } + return true, nil +} + +func (c *FileConstraint) checkValid() error { + return nil +} + +func (c *FileConstraint) blobMatches(s *search, br blob.Ref, bm camtypes.BlobMeta) (bool, error) { + if bm.CamliType != "file" { + return false, nil + } + fi, err := s.fileInfo(br) + if err == os.ErrNotExist { + return false, nil + } + if err != nil { + return false, err + } + if fs := c.FileSize; fs != nil && !fs.intMatches(fi.Size) { + return false, nil + } + if c.IsImage && !strings.HasPrefix(fi.MIMEType, "image/") { + return false, nil + } + if sc := c.FileName; sc != nil && !sc.stringMatches(fi.FileName) { + return false, nil + } + if sc := c.MIMEType; sc != nil && !sc.stringMatches(fi.MIMEType) { + return false, nil + } + if tc := c.Time; tc != nil { + if fi.Time == nil || !tc.timeMatches(fi.Time.Time()) { + return false, nil + } + } + if tc := c.ModTime; tc != nil { + if fi.ModTime == nil || !tc.timeMatches(fi.ModTime.Time()) { + return false, nil + } + } + corpus := s.h.corpus + if c.WholeRef.Valid() { + if corpus == nil { + return false, nil + } + wholeRef, ok := corpus.GetWholeRefLocked(br) + if !ok || wholeRef != c.WholeRef { + return false, nil + } + } + var width, height int64 + if c.Width != nil || c.Height != nil || c.WHRatio != nil { + if corpus == nil { + return false, nil + } + imageInfo, err := corpus.GetImageInfoLocked(br) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + width = int64(imageInfo.Width) + height = int64(imageInfo.Height) + } + if c.Width != nil && !c.Width.intMatches(width) { + return false, nil + } + if c.Height != nil && !c.Height.intMatches(height) { + return false, nil + } + if c.WHRatio != nil && !c.WHRatio.floatMatches(float64(width)/float64(height)) { + return false, nil + } + if c.Location != nil { + if corpus == nil { + return false, nil + } + lat, long, ok := corpus.FileLatLongLocked(br) + if ok && c.Location.Any { + // Pass. + } else if !ok || !c.Location.matchesLatLong(lat, long) { + return false, nil + } + } + if mt := c.MediaTag; mt != nil { + if corpus == nil { + return false, nil + } + var tagValue string + if mediaTags, err := corpus.GetMediaTagsLocked(br); err == nil && mt.Tag != "" { + tagValue = mediaTags[mt.Tag] + } + if mt.Int != nil { + if i, err := strconv.ParseInt(tagValue, 10, 64); err != nil || !mt.Int.intMatches(i) { + return false, nil + } + } + if mt.String != nil && !mt.String.stringMatches(tagValue) { + return false, nil + } + } + // TOOD: EXIF timeconstraint + return true, nil +} + +func (c *TimeConstraint) timeMatches(t time.Time) bool { + if t.IsZero() { + return false + } + if !c.Before.IsZero() { + if !t.Before(time.Time(c.Before)) { + return false + } + } + after := time.Time(c.After) + if after.IsZero() && c.InLast > 0 { + after = time.Now().Add(-c.InLast) + } + if !after.IsZero() { + if !(t.Equal(after) || t.After(after)) { // after is >= + return false + } + } + return true +} + +func (c *DirConstraint) checkValid() error { + return nil +} + +func (c *DirConstraint) blobMatches(s *search, br blob.Ref, bm camtypes.BlobMeta) (bool, error) { + if bm.CamliType != "directory" { + return false, nil + } + + // TODO: implement + panic("TODO: implement DirConstraint.blobMatches") +} + +type sortSearchResultBlobs struct { + s []*SearchResultBlob + less func(a, b *SearchResultBlob) bool +} + +func (ss sortSearchResultBlobs) Len() int { return len(ss.s) } +func (ss sortSearchResultBlobs) Swap(i, j int) { ss.s[i], ss.s[j] = ss.s[j], ss.s[i] } +func (ss sortSearchResultBlobs) Less(i, j int) bool { return ss.less(ss.s[i], ss.s[j]) } diff --git a/vendor/github.com/camlistore/camlistore/pkg/search/query_test.go b/vendor/github.com/camlistore/camlistore/pkg/search/query_test.go new file mode 100644 index 00000000..89809458 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/search/query_test.go @@ -0,0 +1,1382 @@ +package search_test + +import ( + "encoding/json" + "flag" + "fmt" + "reflect" + "sort" + "strings" + "testing" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/index" + "camlistore.org/pkg/index/indextest" + . "camlistore.org/pkg/search" + "camlistore.org/pkg/test" + "camlistore.org/pkg/types" +) + +// indexType is one of the three ways we test the query handler code. +type indexType int + +var queryType = flag.String("querytype", "", "Empty for all query types, else 'classic', 'scan', or 'build'") + +const ( + indexClassic indexType = iota // sorted key/value pairs from index.Storage + indexCorpusScan // *Corpus scanned from key/value pairs on start + indexCorpusBuild // empty *Corpus, built iteratively as blob received. +) + +var ( + allIndexTypes = []indexType{indexClassic, indexCorpusScan, indexCorpusBuild} + memIndexTypes = []indexType{indexCorpusScan, indexCorpusBuild} + corpusTypeOnly = []indexType{indexCorpusScan} +) + +func (i indexType) String() string { + switch i { + case indexClassic: + return "classic" + case indexCorpusScan: + return "scan" + case indexCorpusBuild: + return "build" + default: + return fmt.Sprintf("unknown-index-type-%d", i) + } +} + +type queryTest struct { + t testing.TB + id *indextest.IndexDeps + itype indexType + + Handler func() *Handler +} + +func querySetup(t testing.TB) (*indextest.IndexDeps, *Handler) { + idx := index.NewMemoryIndex() // string key-value pairs in memory, as if they were on disk + id := indextest.NewIndexDeps(idx) + id.Fataler = t + h := NewHandler(idx, id.SignerBlobRef) + return id, h +} + +func testQuery(t testing.TB, fn func(*queryTest)) { + testQueryTypes(t, allIndexTypes, fn) +} + +func testQueryTypes(t testing.TB, types []indexType, fn func(*queryTest)) { + defer test.TLog(t)() + for _, it := range types { + if *queryType == "" || *queryType == it.String() { + t.Logf("Testing: --querytype=%s ...", it) + testQueryType(t, fn, it) + } + } +} + +func testQueryType(t testing.TB, fn func(*queryTest), itype indexType) { + defer index.SetVerboseCorpusLogging(true) + index.SetVerboseCorpusLogging(false) + + idx := index.NewMemoryIndex() // string key-value pairs in memory, as if they were on disk + var err error + var corpus *index.Corpus + if itype == indexCorpusBuild { + if corpus, err = idx.KeepInMemory(); err != nil { + t.Fatal(err) + } + } + qt := &queryTest{ + t: t, + id: indextest.NewIndexDeps(idx), + itype: itype, + } + qt.id.Fataler = t + qt.Handler = func() *Handler { + h := NewHandler(idx, qt.id.SignerBlobRef) + if itype == indexCorpusScan { + if corpus, err = idx.KeepInMemory(); err != nil { + t.Fatal(err) + } + idx.PreventStorageAccessForTesting() + } + if corpus != nil { + h.SetCorpus(corpus) + } + return h + } + fn(qt) +} + +func dumpRes(t *testing.T, res *SearchResult) { + t.Logf("Got: %#v", res) + for i, got := range res.Blobs { + t.Logf(" %d. %s", i, got) + } +} + +func (qt *queryTest) wantRes(req *SearchQuery, wanted ...blob.Ref) { + if qt.itype == indexClassic { + req.Sort = Unsorted + } + res, err := qt.Handler().Query(req) + if err != nil { + qt.t.Fatal(err) + } + + need := make(map[blob.Ref]bool) + for _, br := range wanted { + need[br] = true + } + for _, bi := range res.Blobs { + if !need[bi.Blob] { + qt.t.Errorf("unexpected search result: %v", bi.Blob) + } else { + delete(need, bi.Blob) + } + } + for br := range need { + qt.t.Errorf("missing from search result: %v", br) + } +} + +func TestQuery(t *testing.T) { + testQuery(t, func(qt *queryTest) { + fileRef, wholeRef := qt.id.UploadFile("file.txt", "the content", time.Unix(1382073153, 0)) + + sq := &SearchQuery{ + Constraint: &Constraint{ + Anything: true, + }, + Limit: 0, + Sort: UnspecifiedSort, + } + qt.wantRes(sq, fileRef, wholeRef) + }) +} + +func TestQueryCamliType(t *testing.T) { + testQuery(t, func(qt *queryTest) { + fileRef, _ := qt.id.UploadFile("file.txt", "foo", time.Unix(1382073153, 0)) + sq := &SearchQuery{ + Constraint: &Constraint{ + CamliType: "file", + }, + } + qt.wantRes(sq, fileRef) + }) +} + +func TestQueryAnyCamliType(t *testing.T) { + testQuery(t, func(qt *queryTest) { + fileRef, _ := qt.id.UploadFile("file.txt", "foo", time.Unix(1382073153, 0)) + + sq := &SearchQuery{ + Constraint: &Constraint{ + AnyCamliType: true, + }, + } + qt.wantRes(sq, fileRef) + }) +} + +func TestQueryBlobSize(t *testing.T) { + testQuery(t, func(qt *queryTest) { + _, smallFileRef := qt.id.UploadFile("file.txt", strings.Repeat("x", 5<<10), time.Unix(1382073153, 0)) + qt.id.UploadFile("file.txt", strings.Repeat("x", 20<<10), time.Unix(1382073153, 0)) + + sq := &SearchQuery{ + Constraint: &Constraint{ + BlobSize: &IntConstraint{ + Min: 4 << 10, + Max: 6 << 10, + }, + }, + } + qt.wantRes(sq, smallFileRef) + }) +} + +func TestQueryBlobRefPrefix(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + + // foo is 0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 + id.UploadFile("file.txt", "foo", time.Unix(1382073153, 0)) + // "bar.." is 08ef767ba2c93f8f40902118fa5260a65a2a4975 + id.UploadFile("file.txt", "bar..", time.Unix(1382073153, 0)) + + sq := &SearchQuery{ + Constraint: &Constraint{ + BlobRefPrefix: "sha1-0", + }, + } + sres, err := qt.Handler().Query(sq) + if err != nil { + t.Fatal(err) + } + if len(sres.Blobs) < 2 { + t.Errorf("expected at least 2 matches; got %d", len(sres.Blobs)) + } + for _, res := range sres.Blobs { + brStr := res.Blob.String() + if !strings.HasPrefix(brStr, "sha1-0") { + t.Errorf("matched blob %s didn't begin with sha1-0", brStr) + } + } + }) +} + +func TestQueryTwoConstraints(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + id.UploadString("a") // 86f7e437faa5a7fce15d1ddcb9eaeaea377667b8 + b := id.UploadString("b") // e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98 + id.UploadString("c4") // e4666a670f042877c67a84473a71675ee0950a08 + + sq := &SearchQuery{ + Constraint: &Constraint{ + BlobRefPrefix: "sha1-e", // matches b and c4 + BlobSize: &IntConstraint{ // matches a and b + Min: 1, + Max: 1, + }, + }, + } + qt.wantRes(sq, b) + }) +} + +func TestQueryLogicalOr(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + + // foo is 0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 + _, foo := id.UploadFile("file.txt", "foo", time.Unix(1382073153, 0)) + // "bar.." is 08ef767ba2c93f8f40902118fa5260a65a2a4975 + _, bar := id.UploadFile("file.txt", "bar..", time.Unix(1382073153, 0)) + + sq := &SearchQuery{ + Constraint: &Constraint{ + Logical: &LogicalConstraint{ + Op: "or", + A: &Constraint{ + BlobRefPrefix: "sha1-0beec7b5ea3f0fdbc95d0dd", + }, + B: &Constraint{ + BlobRefPrefix: "sha1-08ef767ba2c93f8f40", + }, + }, + }, + } + qt.wantRes(sq, foo, bar) + }) +} + +func TestQueryLogicalAnd(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + + // foo is 0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 + _, foo := id.UploadFile("file.txt", "foo", time.Unix(1382073153, 0)) + // "bar.." is 08ef767ba2c93f8f40902118fa5260a65a2a4975 + id.UploadFile("file.txt", "bar..", time.Unix(1382073153, 0)) + + sq := &SearchQuery{ + Constraint: &Constraint{ + Logical: &LogicalConstraint{ + Op: "and", + A: &Constraint{ + BlobRefPrefix: "sha1-0", + }, + B: &Constraint{ + BlobSize: &IntConstraint{ + Max: int64(len("foo")), // excludes "bar.." + }, + }, + }, + }, + } + qt.wantRes(sq, foo) + }) +} + +func TestQueryLogicalXor(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + + // foo is 0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 + _, foo := id.UploadFile("file.txt", "foo", time.Unix(1382073153, 0)) + // "bar.." is 08ef767ba2c93f8f40902118fa5260a65a2a4975 + id.UploadFile("file.txt", "bar..", time.Unix(1382073153, 0)) + + sq := &SearchQuery{ + Constraint: &Constraint{ + Logical: &LogicalConstraint{ + Op: "xor", + A: &Constraint{ + BlobRefPrefix: "sha1-0", + }, + B: &Constraint{ + BlobRefPrefix: "sha1-08ef767ba2c93f8f40", + }, + }, + }, + } + qt.wantRes(sq, foo) + }) +} + +func TestQueryLogicalNot(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + + // foo is 0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 + _, foo := id.UploadFile("file.txt", "foo", time.Unix(1382073153, 0)) + // "bar.." is 08ef767ba2c93f8f40902118fa5260a65a2a4975 + _, bar := id.UploadFile("file.txt", "bar..", time.Unix(1382073153, 0)) + + sq := &SearchQuery{ + Constraint: &Constraint{ + Logical: &LogicalConstraint{ + Op: "not", + A: &Constraint{ + CamliType: "file", + }, + }, + }, + } + qt.wantRes(sq, foo, bar) + }) +} + +func TestQueryPermanodeAttrExact(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + + p1 := id.NewPlannedPermanode("1") + p2 := id.NewPlannedPermanode("2") + id.SetAttribute(p1, "someAttr", "value1") + id.SetAttribute(p2, "someAttr", "value2") + + sq := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "someAttr", + Value: "value1", + }, + }, + } + qt.wantRes(sq, p1) + }) +} + +func TestQueryPermanodeAttrMatches(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + + p1 := id.NewPlannedPermanode("1") + p2 := id.NewPlannedPermanode("2") + p3 := id.NewPlannedPermanode("3") + id.SetAttribute(p1, "someAttr", "value1") + id.SetAttribute(p2, "someAttr", "value2") + id.SetAttribute(p3, "someAttr", "NOT starting with value") + + sq := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "someAttr", + ValueMatches: &StringConstraint{ + HasPrefix: "value", + }, + }, + }, + } + qt.wantRes(sq, p1, p2) + }) +} + +func TestQueryPermanodeAttrNumValue(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + + // TODO(bradfitz): if we set an empty attribute value here and try to search + // by NumValue IntConstraint Min = 1, it fails only in classic (no corpus) mode. + // Something there must be skipping empty values. + p1 := id.NewPlannedPermanode("1") + id.AddAttribute(p1, "x", "1") + id.AddAttribute(p1, "x", "2") + p2 := id.NewPlannedPermanode("2") + id.AddAttribute(p2, "x", "1") + id.AddAttribute(p2, "x", "2") + id.AddAttribute(p2, "x", "3") + + sq := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "x", + NumValue: &IntConstraint{ + Min: 3, + }, + }, + }, + } + qt.wantRes(sq, p2) + }) +} + +// Tests that NumValue queries with ZeroMax return permanodes without any values. +func TestQueryPermanodeAttrNumValueZeroMax(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + + p1 := id.NewPlannedPermanode("1") + id.AddAttribute(p1, "x", "1") + p2 := id.NewPlannedPermanode("2") + id.AddAttribute(p2, "y", "1") // Permanodes without any attributes are ignored. + + sq := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "x", + NumValue: &IntConstraint{ + ZeroMax: true, + }, + }, + }, + } + qt.wantRes(sq, p2) + }) +} + +// find a permanode (p2) that has a property being a blobref pointing +// to a sub-query +func TestQueryPermanodeAttrValueInSet(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + + p1 := id.NewPlannedPermanode("1") + id.SetAttribute(p1, "bar", "baz") + p2 := id.NewPlannedPermanode("2") + id.SetAttribute(p2, "foo", p1.String()) + + sq := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "foo", + ValueInSet: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "bar", + Value: "baz", + }, + }, + }, + }, + } + qt.wantRes(sq, p2) + }) +} + +// Tests PermanodeConstraint.ValueMatchesInt. +func TestQueryPermanodeValueMatchesInt(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + + p1 := id.NewPlannedPermanode("1") + p2 := id.NewPlannedPermanode("2") + p3 := id.NewPlannedPermanode("3") + p4 := id.NewPlannedPermanode("4") + p5 := id.NewPlannedPermanode("5") + id.SetAttribute(p1, "x", "-5") + id.SetAttribute(p2, "x", "0") + id.SetAttribute(p3, "x", "2") + id.SetAttribute(p4, "x", "10.0") + id.SetAttribute(p5, "x", "abc") + + sq := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "x", + ValueMatchesInt: &IntConstraint{ + Min: -2, + }, + }, + }, + } + qt.wantRes(sq, p2, p3) + }) +} + +// Tests PermanodeConstraint.ValueMatchesFloat. +func TestQueryPermanodeValueMatchesFloat(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + + p1 := id.NewPlannedPermanode("1") + p2 := id.NewPlannedPermanode("2") + p3 := id.NewPlannedPermanode("3") + p4 := id.NewPlannedPermanode("4") + id.SetAttribute(p1, "x", "2.5") + id.SetAttribute(p2, "x", "5.7") + id.SetAttribute(p3, "x", "10") + id.SetAttribute(p4, "x", "abc") + + sq := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "x", + ValueMatchesFloat: &FloatConstraint{ + Max: 6.0, + }, + }, + }, + } + qt.wantRes(sq, p1, p2) + }) +} + +// find permanodes matching a certain file query +func TestQueryFileConstraint(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + fileRef, _ := id.UploadFile("some-stuff.txt", "hello", time.Unix(123, 0)) + qt.t.Logf("fileRef = %q", fileRef) + p1 := id.NewPlannedPermanode("1") + id.SetAttribute(p1, "camliContent", fileRef.String()) + + fileRef2, _ := id.UploadFile("other-file", "hellooooo", time.Unix(456, 0)) + qt.t.Logf("fileRef2 = %q", fileRef2) + p2 := id.NewPlannedPermanode("2") + id.SetAttribute(p2, "camliContent", fileRef2.String()) + + sq := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + FileName: &StringConstraint{ + Contains: "-stuff", + }, + FileSize: &IntConstraint{ + Max: 5, + }, + }, + }, + }, + }, + } + qt.wantRes(sq, p1) + }) +} + +func TestQueryFileConstraint_WholeRef(t *testing.T) { + testQueryTypes(t, memIndexTypes, func(qt *queryTest) { + id := qt.id + fileRef, _ := id.UploadFile("some-stuff.txt", "hello", time.Unix(123, 0)) + qt.t.Logf("fileRef = %q", fileRef) + p1 := id.NewPlannedPermanode("1") + id.SetAttribute(p1, "camliContent", fileRef.String()) + + fileRef2, _ := id.UploadFile("other-file", "hellooooo", time.Unix(456, 0)) + qt.t.Logf("fileRef2 = %q", fileRef2) + p2 := id.NewPlannedPermanode("2") + id.SetAttribute(p2, "camliContent", fileRef2.String()) + + sq := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "camliContent", + ValueInSet: &Constraint{ + File: &FileConstraint{ + WholeRef: blob.SHA1FromString("hello"), + }, + }, + }, + }, + } + qt.wantRes(sq, p1) + }) +} + +func TestQueryPermanodeModtime(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + + // indextest advances time one second per operation: + p1 := id.NewPlannedPermanode("1") + p2 := id.NewPlannedPermanode("2") + p3 := id.NewPlannedPermanode("3") + id.SetAttribute(p1, "someAttr", "value1") // 2011-11-28 01:32:37.000123456 +0000 UTC 1322443957 + id.SetAttribute(p2, "someAttr", "value2") // 2011-11-28 01:32:38.000123456 +0000 UTC 1322443958 + id.SetAttribute(p3, "someAttr", "value3") // 2011-11-28 01:32:39.000123456 +0000 UTC 1322443959 + + sq := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{ + ModTime: &TimeConstraint{ + After: types.Time3339(time.Unix(1322443957, 456789)), + Before: types.Time3339(time.Unix(1322443959, 0)), + }, + }, + }, + } + qt.wantRes(sq, p2) + }) +} + +// This really belongs in pkg/index for the index-vs-corpus tests, but +// it's easier here for now. +// TODO: make all the indextest/tests.go +// also test the three memory build modes that testQuery does. +func TestDecodeFileInfo(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + fileRef, wholeRef := id.UploadFile("file.gif", "GIF87afoo", time.Unix(456, 0)) + res, err := qt.Handler().Describe(&DescribeRequest{ + BlobRef: fileRef, + }) + if err != nil { + qt.t.Error(err) + return + } + db := res.Meta[fileRef.String()] + if db == nil { + qt.t.Error("DescribedBlob missing") + return + } + if db.File == nil { + qt.t.Error("DescribedBlob.File is nil") + return + } + if db.File.MIMEType != "image/gif" { + qt.t.Errorf("DescribedBlob.File = %+v; mime type is not image/gif", db.File) + return + } + if db.File.WholeRef != wholeRef { + qt.t.Errorf("DescribedBlob.WholeRef: got %v, wanted %v", wholeRef, db.File.WholeRef) + return + } + }) +} + +func TestQueryRecentPermanodes_UnspecifiedSort(t *testing.T) { + testQueryRecentPermanodes(t, UnspecifiedSort, "corpus_permanode_created") +} + +func TestQueryRecentPermanodes_LastModifiedDesc(t *testing.T) { + testQueryRecentPermanodes(t, LastModifiedDesc, "corpus_permanode_lastmod") +} + +func TestQueryRecentPermanodes_CreatedDesc(t *testing.T) { + testQueryRecentPermanodes(t, CreatedDesc, "corpus_permanode_created") +} + +func testQueryRecentPermanodes(t *testing.T, sortType SortType, source string) { + testQueryTypes(t, memIndexTypes, func(qt *queryTest) { + id := qt.id + + p1 := id.NewPlannedPermanode("1") + id.SetAttribute(p1, "foo", "p1") + p2 := id.NewPlannedPermanode("2") + id.SetAttribute(p2, "foo", "p2") + p3 := id.NewPlannedPermanode("3") + id.SetAttribute(p3, "foo", "p3") + + var usedSource string + ExportSetCandidateSourceHook(func(s string) { + usedSource = s + }) + + req := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{}, + }, + Limit: 2, + Sort: sortType, + Describe: &DescribeRequest{}, + } + handler := qt.Handler() + res, err := handler.Query(req) + if err != nil { + qt.t.Fatal(err) + } + if usedSource != source { + t.Errorf("used candidate source strategy %q; want %v", usedSource, source) + } + wantBlobs := []*SearchResultBlob{ + {Blob: p3}, + {Blob: p2}, + } + if !reflect.DeepEqual(res.Blobs, wantBlobs) { + gotj, wantj := prettyJSON(res.Blobs), prettyJSON(wantBlobs) + t.Errorf("Got blobs:\n%s\nWant:\n%s\n", gotj, wantj) + } + if got := len(res.Describe.Meta); got != 2 { + t.Errorf("got %d described blobs; want 2", got) + } + + // And test whether continue (for infinite scroll) works: + { + if got, want := res.Continue, "pn:1322443958000123456:sha1-fbb5be10fcb4c88d32cfdddb20a7b8d13e9ba284"; got != want { + t.Fatalf("Continue token = %q; want %q", got, want) + } + req := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{}, + }, + Limit: 2, + Sort: sortType, + Continue: res.Continue, + } + res, err := handler.Query(req) + if err != nil { + qt.t.Fatal(err) + } + wantBlobs := []*SearchResultBlob{{Blob: p1}} + if !reflect.DeepEqual(res.Blobs, wantBlobs) { + gotj, wantj := prettyJSON(res.Blobs), prettyJSON(wantBlobs) + t.Errorf("After scroll, got blobs:\n%s\nWant:\n%s\n", gotj, wantj) + } + } + }) +} + +func TestQueryRecentPermanodes_Continue_UnspecifiedSort(t *testing.T) { + testQueryRecentPermanodes_Continue(t, UnspecifiedSort) +} + +func TestQueryRecentPermanodes_Continue_LastModifiedDesc(t *testing.T) { + testQueryRecentPermanodes_Continue(t, LastModifiedDesc) +} + +func TestQueryRecentPermanodes_Continue_CreatedDesc(t *testing.T) { + testQueryRecentPermanodes_Continue(t, CreatedDesc) +} + +// Tests the continue token on recent permanodes, notably when the +// page limit truncates in the middle of a bunch of permanodes with the +// same modtime. +func testQueryRecentPermanodes_Continue(t *testing.T, sortType SortType) { + testQueryTypes(t, memIndexTypes, func(qt *queryTest) { + id := qt.id + + var blobs []blob.Ref + for i := 1; i <= 4; i++ { + pn := id.NewPlannedPermanode(fmt.Sprint(i)) + blobs = append(blobs, pn) + t.Logf("permanode %d is %v", i, pn) + id.SetAttribute_NoTimeMove(pn, "foo", "bar") + } + sort.Sort(blob.ByRef(blobs)) + for i, br := range blobs { + t.Logf("Sorted %d = %v", i, br) + } + handler := qt.Handler() + + contToken := "" + tests := [][]blob.Ref{ + []blob.Ref{blobs[3], blobs[2]}, + []blob.Ref{blobs[1], blobs[0]}, + []blob.Ref{}, + } + + for i, wantBlobs := range tests { + req := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{}, + }, + Limit: 2, + Sort: sortType, + Continue: contToken, + } + res, err := handler.Query(req) + if err != nil { + qt.t.Fatalf("Error on query %d: %v", i+1, err) + } + t.Logf("Query %d/%d: continue = %q", i+1, len(tests), res.Continue) + for i, sb := range res.Blobs { + t.Logf(" res[%d]: %v", i, sb.Blob) + } + + var want []*SearchResultBlob + for _, br := range wantBlobs { + want = append(want, &SearchResultBlob{Blob: br}) + } + if !reflect.DeepEqual(res.Blobs, want) { + gotj, wantj := prettyJSON(res.Blobs), prettyJSON(want) + t.Fatalf("Query %d: Got blobs:\n%s\nWant:\n%s\n", i+1, gotj, wantj) + } + contToken = res.Continue + haveToken := contToken != "" + wantHaveToken := (i + 1) < len(tests) + if haveToken != wantHaveToken { + t.Fatalf("Query %d: token = %q; want token = %v", i+1, contToken, wantHaveToken) + } + } + }) +} + +func TestQueryRecentPermanodes_ContinueEndMidPage_UnspecifiedSort(t *testing.T) { + testQueryRecentPermanodes_ContinueEndMidPage(t, UnspecifiedSort) +} + +func TestQueryRecentPermanodes_ContinueEndMidPage_LastModifiedDesc(t *testing.T) { + testQueryRecentPermanodes_ContinueEndMidPage(t, LastModifiedDesc) +} + +func TestQueryRecentPermanodes_ContinueEndMidPage_CreatedDesc(t *testing.T) { + testQueryRecentPermanodes_ContinueEndMidPage(t, CreatedDesc) +} + +// Tests continue token hitting the end mid-page. +func testQueryRecentPermanodes_ContinueEndMidPage(t *testing.T, sortType SortType) { + testQueryTypes(t, memIndexTypes, func(qt *queryTest) { + id := qt.id + + var blobs []blob.Ref + for i := 1; i <= 3; i++ { + pn := id.NewPlannedPermanode(fmt.Sprint(i)) + blobs = append(blobs, pn) + t.Logf("permanode %d is %v", i, pn) + id.SetAttribute_NoTimeMove(pn, "foo", "bar") + } + sort.Sort(blob.ByRef(blobs)) + for i, br := range blobs { + t.Logf("Sorted %d = %v", i, br) + } + handler := qt.Handler() + + contToken := "" + tests := [][]blob.Ref{ + []blob.Ref{blobs[2], blobs[1]}, + []blob.Ref{blobs[0]}, + } + + for i, wantBlobs := range tests { + req := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{}, + }, + Limit: 2, + Sort: sortType, + Continue: contToken, + } + res, err := handler.Query(req) + if err != nil { + qt.t.Fatalf("Error on query %d: %v", i+1, err) + } + t.Logf("Query %d/%d: continue = %q", i+1, len(tests), res.Continue) + for i, sb := range res.Blobs { + t.Logf(" res[%d]: %v", i, sb.Blob) + } + + var want []*SearchResultBlob + for _, br := range wantBlobs { + want = append(want, &SearchResultBlob{Blob: br}) + } + if !reflect.DeepEqual(res.Blobs, want) { + gotj, wantj := prettyJSON(res.Blobs), prettyJSON(want) + t.Fatalf("Query %d: Got blobs:\n%s\nWant:\n%s\n", i+1, gotj, wantj) + } + contToken = res.Continue + haveToken := contToken != "" + wantHaveToken := (i + 1) < len(tests) + if haveToken != wantHaveToken { + t.Fatalf("Query %d: token = %q; want token = %v", i+1, contToken, wantHaveToken) + } + } + }) +} + +// Tests PermanodeConstraint.ValueAll +func TestQueryPermanodeValueAll(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + + p1 := id.NewPlannedPermanode("1") + p2 := id.NewPlannedPermanode("2") + id.SetAttribute(p1, "attr", "foo") + id.SetAttribute(p1, "attr", "barrrrr") + id.SetAttribute(p2, "attr", "foo") + id.SetAttribute(p2, "attr", "bar") + + sq := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "attr", + ValueAll: true, + ValueMatches: &StringConstraint{ + ByteLength: &IntConstraint{ + Min: 3, + Max: 3, + }, + }, + }, + }, + } + qt.wantRes(sq, p2) + }) +} + +// Tests PermanodeConstraint.ValueMatches.CaseInsensitive. +func TestQueryPermanodeValueMatchesCaseInsensitive(t *testing.T) { + testQuery(t, func(qt *queryTest) { + id := qt.id + + p1 := id.NewPlannedPermanode("1") + p2 := id.NewPlannedPermanode("2") + + id.SetAttribute(p1, "x", "Foo") + id.SetAttribute(p2, "x", "start") + + sq := &SearchQuery{ + Constraint: &Constraint{ + Logical: &LogicalConstraint{ + Op: "or", + + A: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "x", + ValueMatches: &StringConstraint{ + Equals: "foo", + CaseInsensitive: true, + }, + }, + }, + + B: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "x", + ValueMatches: &StringConstraint{ + Contains: "TAR", + CaseInsensitive: true, + }, + }, + }, + }, + }, + } + qt.wantRes(sq, p1, p2) + }) +} + +func TestQueryChildren(t *testing.T) { + testQueryTypes(t, memIndexTypes, func(qt *queryTest) { + id := qt.id + + pdir := id.NewPlannedPermanode("some_dir") + p1 := id.NewPlannedPermanode("1") + p2 := id.NewPlannedPermanode("2") + p3 := id.NewPlannedPermanode("3") + + id.AddAttribute(pdir, "camliMember", p1.String()) + id.AddAttribute(pdir, "camliPath:foo", p2.String()) + id.AddAttribute(pdir, "other", p3.String()) + + // Make p1, p2, and p3 actually exist. (permanodes without attributes are dead) + id.AddAttribute(p1, "x", "x") + id.AddAttribute(p2, "x", "x") + id.AddAttribute(p3, "x", "x") + + sq := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{ + Relation: &RelationConstraint{ + Relation: "parent", + Any: &Constraint{ + BlobRefPrefix: pdir.String(), + }, + }, + }, + }, + } + qt.wantRes(sq, p1, p2) + }) +} + +// 13 permanodes are created. 1 of them the parent, 11 are children +// (== results), 1 is unrelated to the parent. +// limit is the limit on the number of results. +// pos is the position of the around permanode. +// note: pos is in the permanode creation order, but keep in mind +// they're enumerated in the opposite order. +func testAroundChildren(limit, pos int, t *testing.T) { + testQueryTypes(t, memIndexTypes, func(qt *queryTest) { + id := qt.id + + pdir := id.NewPlannedPermanode("some_dir") + p0 := id.NewPlannedPermanode("0") + p1 := id.NewPlannedPermanode("1") + p2 := id.NewPlannedPermanode("2") + p3 := id.NewPlannedPermanode("3") + p4 := id.NewPlannedPermanode("4") + p5 := id.NewPlannedPermanode("5") + p6 := id.NewPlannedPermanode("6") + p7 := id.NewPlannedPermanode("7") + p8 := id.NewPlannedPermanode("8") + p9 := id.NewPlannedPermanode("9") + p10 := id.NewPlannedPermanode("10") + p11 := id.NewPlannedPermanode("11") + + id.AddAttribute(pdir, "camliMember", p0.String()) + id.AddAttribute(pdir, "camliMember", p1.String()) + id.AddAttribute(pdir, "camliPath:foo", p2.String()) + const noMatchIndex = 3 + id.AddAttribute(pdir, "other", p3.String()) + id.AddAttribute(pdir, "camliPath:bar", p4.String()) + id.AddAttribute(pdir, "camliMember", p5.String()) + id.AddAttribute(pdir, "camliMember", p6.String()) + id.AddAttribute(pdir, "camliMember", p7.String()) + id.AddAttribute(pdir, "camliMember", p8.String()) + id.AddAttribute(pdir, "camliMember", p9.String()) + id.AddAttribute(pdir, "camliMember", p10.String()) + id.AddAttribute(pdir, "camliMember", p11.String()) + + // Predict the results + var around blob.Ref + lowLimit := pos - limit/2 + if lowLimit <= noMatchIndex { + // Because 3 is not included in the results + lowLimit-- + } + if lowLimit < 0 { + lowLimit = 0 + } + highLimit := lowLimit + limit + if highLimit >= noMatchIndex { + // Because noMatchIndex is not included in the results + highLimit++ + } + var want []blob.Ref + // Make the permanodes actually exist. (permanodes without attributes are dead) + for k, v := range []blob.Ref{p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11} { + id.AddAttribute(v, "x", "x") + if k == pos { + around = v + } + if k != noMatchIndex && k >= lowLimit && k < highLimit { + want = append(want, v) + } + } + // invert the order because the results are appended in reverse creation order + // because that's how we enumerate. + revWant := make([]blob.Ref, len(want)) + for k, v := range want { + revWant[len(want)-1-k] = v + } + + sq := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{ + Relation: &RelationConstraint{ + Relation: "parent", + Any: &Constraint{ + BlobRefPrefix: pdir.String(), + }, + }, + }, + }, + Limit: limit, + Around: around, + } + qt.wantRes(sq, revWant...) + }) + +} + +// TODO(mpl): more tests. at least the 0 results case. + +// Around will be found in the first buffered window of results, +// because it's a position that fits within the limit. +// So it doesn't exercice the part of the algorithm that discards +// the would-be results that are not within the "around zone". +func TestQueryChildrenAroundNear(t *testing.T) { + testAroundChildren(5, 9, t) +} + +// pos is near the end of the results enumeration and the limit is small +// so this test should go through the part of the algorithm that discards +// results not within the "around zone". +func TestQueryChildrenAroundFar(t *testing.T) { + testAroundChildren(3, 4, t) +} + +// permanodes tagged "foo" or those in sets where the parent +// permanode set itself is tagged "foo". +func TestQueryPermanodeTaggedViaParent(t *testing.T) { + t.Skip("TODO: finish implementing") + + testQuery(t, func(qt *queryTest) { + id := qt.id + + ptagged := id.NewPlannedPermanode("tagged_photo") + pindirect := id.NewPlannedPermanode("via_parent") + pset := id.NewPlannedPermanode("set") + pboth := id.NewPlannedPermanode("both") // funny directly and via its parent + pnotfunny := id.NewPlannedPermanode("not_funny") + + id.SetAttribute(ptagged, "tag", "funny") + id.SetAttribute(pset, "tag", "funny") + id.SetAttribute(pboth, "tag", "funny") + id.AddAttribute(pset, "camliMember", pindirect.String()) + id.AddAttribute(pset, "camliMember", pboth.String()) + id.SetAttribute(pnotfunny, "tag", "boring") + + sq := &SearchQuery{ + Constraint: &Constraint{ + Logical: &LogicalConstraint{ + Op: "or", + + // Those tagged funny directly: + A: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "tag", + Value: "funny", + }, + }, + + // Those tagged funny indirectly: + B: &Constraint{ + Permanode: &PermanodeConstraint{ + Relation: &RelationConstraint{ + Relation: "ancestor", + Any: &Constraint{ + Permanode: &PermanodeConstraint{ + Attr: "tag", + Value: "funny", + }, + }, + }, + }, + }, + }, + }, + } + qt.wantRes(sq, ptagged, pset, pboth, pindirect) + }) +} + +func TestLimitDoesntDeadlock_UnspecifiedSort(t *testing.T) { + testLimitDoesntDeadlock(t, UnspecifiedSort) +} + +func TestLimitDoesntDeadlock_LastModifiedDesc(t *testing.T) { + testLimitDoesntDeadlock(t, LastModifiedDesc) +} + +func TestLimitDoesntDeadlock_CreatedDesc(t *testing.T) { + testLimitDoesntDeadlock(t, CreatedDesc) +} + +func testLimitDoesntDeadlock(t *testing.T, sortType SortType) { + testQueryTypes(t, memIndexTypes, func(qt *queryTest) { + id := qt.id + + const limit = 2 + for i := 0; i < ExportBufferedConst()+limit+1; i++ { + pn := id.NewPlannedPermanode(fmt.Sprint(i)) + id.SetAttribute(pn, "foo", "bar") + } + + req := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{}, + }, + Limit: limit, + Sort: sortType, + Describe: &DescribeRequest{}, + } + h := qt.Handler() + gotRes := make(chan bool, 1) + go func() { + _, err := h.Query(req) + if err != nil { + qt.t.Error(err) + } + gotRes <- true + }() + select { + case <-gotRes: + case <-time.After(5 * time.Second): + t.Error("timeout; deadlock?") + } + }) +} + +func prettyJSON(v interface{}) string { + b, err := json.MarshalIndent(v, "", " ") + if err != nil { + panic(err) + } + return string(b) +} + +func TestPlannedQuery(t *testing.T) { + tests := []struct { + in, want *SearchQuery + }{ + { + in: &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{}, + }, + }, + want: &SearchQuery{ + Sort: CreatedDesc, + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{}, + }, + Limit: 200, + }, + }, + } + for i, tt := range tests { + got := tt.in.ExportPlannedQuery() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d. for input:\n%s\ngot:\n%s\nwant:\n%s\n", i, + prettyJSON(tt.in), prettyJSON(got), prettyJSON(tt.want)) + } + } +} + +func TestDescribeMarshal(t *testing.T) { + // Empty Describe + q := &SearchQuery{ + Describe: &DescribeRequest{}, + } + enc, err := json.Marshal(q) + if err != nil { + t.Fatal(err) + } + if got, want := string(enc), `{"around":null,"describe":{"blobref":null,"at":null}}`; got != want { + t.Errorf("JSON: %s; want %s", got, want) + } + back := &SearchQuery{} + err = json.Unmarshal(enc, back) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(q, back) { + t.Errorf("Didn't round-trip. Got %#v; want %#v", back, q) + } + + // DescribeRequest with multiple blobref + q = &SearchQuery{ + Describe: &DescribeRequest{ + BlobRefs: []blob.Ref{blob.MustParse("sha-1234"), blob.MustParse("sha-abcd")}, + }, + } + enc, err = json.Marshal(q) + if err != nil { + t.Fatal(err) + } + if got, want := string(enc), `{"around":null,"describe":{"blobrefs":["sha-1234","sha-abcd"],"blobref":null,"at":null}}`; got != want { + t.Errorf("JSON: %s; want %s", got, want) + } + back = &SearchQuery{} + err = json.Unmarshal(enc, back) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(q, back) { + t.Errorf("Didn't round-trip. Got %#v; want %#v", back, q) + } + + // and the zero value + q = &SearchQuery{} + enc, err = json.Marshal(q) + if err != nil { + t.Fatal(err) + } + if string(enc) != `{"around":null}` { + t.Errorf(`Zero value: %q; want null`, enc) + } +} + +func TestSortMarshal_UnspecifiedSort(t *testing.T) { + testSortMarshal(t, UnspecifiedSort) +} + +func TestSortMarshal_LastModifiedDesc(t *testing.T) { + testSortMarshal(t, LastModifiedDesc) +} + +func TestSortMarshal_CreatedDesc(t *testing.T) { + testSortMarshal(t, CreatedDesc) +} + +var sortMarshalWant = map[SortType]string{ + UnspecifiedSort: `{"around":null}`, + LastModifiedDesc: `{"sort":` + string(SortName[LastModifiedDesc]) + `,"around":null}`, + CreatedDesc: `{"sort":` + string(SortName[CreatedDesc]) + `,"around":null}`, +} + +func testSortMarshal(t *testing.T, sortType SortType) { + q := &SearchQuery{ + Sort: sortType, + } + enc, err := json.Marshal(q) + if err != nil { + t.Fatal(err) + } + if got, want := string(enc), sortMarshalWant[sortType]; got != want { + t.Errorf("JSON: %s; want %s", got, want) + } + back := &SearchQuery{} + err = json.Unmarshal(enc, back) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(q, back) { + t.Errorf("Didn't round-trip. Got %#v; want %#v", back, q) + } + + // and the zero value + q = &SearchQuery{} + enc, err = json.Marshal(q) + if err != nil { + t.Fatal(err) + } + if string(enc) != `{"around":null}` { + t.Errorf("Zero value: %s; want {}", enc) + } +} + +func BenchmarkQueryRecentPermanodes(b *testing.B) { + b.ReportAllocs() + testQueryTypes(b, corpusTypeOnly, func(qt *queryTest) { + id := qt.id + + p1 := id.NewPlannedPermanode("1") + id.SetAttribute(p1, "foo", "p1") + p2 := id.NewPlannedPermanode("2") + id.SetAttribute(p2, "foo", "p2") + p3 := id.NewPlannedPermanode("3") + id.SetAttribute(p3, "foo", "p3") + + req := &SearchQuery{ + Constraint: &Constraint{ + Permanode: &PermanodeConstraint{}, + }, + Limit: 2, + Sort: UnspecifiedSort, + Describe: &DescribeRequest{}, + } + + h := qt.Handler() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + *req.Describe = DescribeRequest{} + _, err := h.Query(req) + if err != nil { + qt.t.Fatal(err) + } + } + }) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/search/search.go b/vendor/github.com/camlistore/camlistore/pkg/search/search.go new file mode 100644 index 00000000..8ba0badb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/search/search.go @@ -0,0 +1,29 @@ +/* +Copyright 2011 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package search describes and answers Camlistore search queries. +// +// Many of the search methods or functions provide results that are +// ordered by modification time, or at least depend on modification +// times. In that context, (un)deletions (of permanodes, or attributes) +// are not considered modifications and therefore the time at which they +// occured does not affect the result. +package search + +type QueryDescriber interface { + Query(*SearchQuery) (*SearchResult, error) + Describe(*DescribeRequest) (*DescribeResponse, error) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/search/websocket.go b/vendor/github.com/camlistore/camlistore/pkg/search/websocket.go new file mode 100644 index 00000000..bb6aec00 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/search/websocket.go @@ -0,0 +1,287 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package search + +import ( + "bytes" + "encoding/json" + "log" + "net/http" + "sync" + "time" + + "camlistore.org/third_party/github.com/gorilla/websocket" +) + +const ( + // Time allowed to write a message to the peer. + writeWait = 10 * time.Second + + // Time allowed to read the next pong message from the peer. + pongWait = 60 * time.Second + + // Send pings to peer with this period. Must be less than pongWait. + pingPeriod = (pongWait * 9) / 10 + + // Maximum message size allowed from peer. + maxMessageSize = 10 << 10 +) + +type wsHub struct { + sh *Handler + register chan *wsConn + unregister chan *wsConn + watchReq chan watchReq + newBlobRecv chan string // new blob received. string is camliType. + updatedResults chan *watchedQuery + statusUpdate chan json.RawMessage + + // Owned by func run: + conns map[*wsConn]bool +} + +func newWebsocketHub(sh *Handler) *wsHub { + return &wsHub{ + sh: sh, + register: make(chan *wsConn), // unbuffered; issue 563 + unregister: make(chan *wsConn), // unbuffered; issue 563 + conns: make(map[*wsConn]bool), + watchReq: make(chan watchReq, buffered), + newBlobRecv: make(chan string, buffered), + updatedResults: make(chan *watchedQuery, buffered), + statusUpdate: make(chan json.RawMessage, buffered), + } +} + +func (h *wsHub) run() { + var lastStatusMsg []byte + for { + select { + case st := <-h.statusUpdate: + const prefix = `{"tag":"_status","status":` + lastStatusMsg = make([]byte, 0, len(prefix)+len(st)+1) + lastStatusMsg = append(lastStatusMsg, prefix...) + lastStatusMsg = append(lastStatusMsg, st...) + lastStatusMsg = append(lastStatusMsg, '}') + for c := range h.conns { + c.send <- lastStatusMsg + } + case c := <-h.register: + h.conns[c] = true + c.send <- lastStatusMsg + case c := <-h.unregister: + delete(h.conns, c) + close(c.send) + case camliType := <-h.newBlobRecv: + if camliType == "" { + // TODO: something smarter. some + // queries might care about all blobs. + // But for now only re-kick off + // queries if schema blobs arrive. We + // should track per-WatchdQuery which + // blob types the search cares about. + continue + } + // New blob was received. Kick off standing search queries to see if any changed. + for conn := range h.conns { + for _, wq := range conn.queries { + go h.doSearch(wq) + } + } + case wr := <-h.watchReq: + // Unsubscribe + if wr.q == nil { + delete(wr.conn.queries, wr.tag) + log.Printf("Removed subscription for %v, %q", wr.conn, wr.tag) + continue + } + // Very similar type, but semantically + // different, so separate for now: + wq := &watchedQuery{ + conn: wr.conn, + tag: wr.tag, + q: wr.q, + } + wr.conn.queries[wr.tag] = wq + log.Printf("Added/updated search subscription for tag %q", wr.tag) + go h.doSearch(wq) + + case wq := <-h.updatedResults: + if !h.conns[wq.conn] || wq.conn.queries[wq.tag] == nil { + // Client has since disconnected or unsubscribed. + continue + } + wq.mu.Lock() + lastres := wq.lastres + wq.mu.Unlock() + resb, err := json.Marshal(wsUpdateMessage{ + Tag: wq.tag, + Result: lastres, + }) + if err != nil { + panic(err) + } + wq.conn.send <- resb + } + } +} + +func (h *wsHub) doSearch(wq *watchedQuery) { + // Make our own copy, in case + q := new(SearchQuery) + *q = *wq.q // shallow copy, since Query will mutate its internal state fields + if q.Describe != nil { + q.Describe = new(DescribeRequest) + *q.Describe = *wq.q.Describe + } + + res, err := h.sh.Query(q) + if err != nil { + log.Printf("Query error: %v", err) + return + } + resj, _ := json.Marshal(res) + + wq.mu.Lock() + eq := bytes.Equal(wq.lastresj, resj) + wq.lastres = res + wq.lastresj = resj + wq.mu.Unlock() + if eq { + // No change in search. Ignore. + return + } + h.updatedResults <- wq +} + +type wsConn struct { + ws *websocket.Conn + send chan []byte // Buffered channel of outbound messages. + sh *Handler + + // queries is owned by the wsHub.run goroutine. + queries map[string]*watchedQuery // tag -> subscription +} + +type watchedQuery struct { + conn *wsConn + tag string + q *SearchQuery + + mu sync.Mutex // guards lastRes + lastres *SearchResult + lastresj []byte // as JSON +} + +// watchReq is a (un)subscribe request. +type watchReq struct { + conn *wsConn + tag string // required + q *SearchQuery // if nil, subscribe +} + +// Client->Server subscription message. +type wsClientMessage struct { + // Tag is required. + Tag string `json:"tag"` + // Query is required to subscribe. If absent, it means unsubscribe. + Query *SearchQuery `json:"query,omitempty"` +} + +type wsUpdateMessage struct { + Tag string `json:"tag"` + Result *SearchResult `json:"result,omitempty"` +} + +// readPump pumps messages from the websocket connection to the hub. +func (c *wsConn) readPump() { + defer func() { + c.sh.wsHub.unregister <- c + c.ws.Close() + }() + c.ws.SetReadLimit(maxMessageSize) + c.ws.SetReadDeadline(time.Now().Add(pongWait)) + c.ws.SetPongHandler(func(string) error { c.ws.SetReadDeadline(time.Now().Add(pongWait)); return nil }) + for { + _, message, err := c.ws.ReadMessage() + if err != nil { + break + } + log.Printf("Got websocket message %#q", message) + cm := new(wsClientMessage) + if err := json.Unmarshal(message, cm); err != nil { + log.Printf("Ignoring bogus websocket message. Err: %v", err) + continue + } + c.sh.wsHub.watchReq <- watchReq{ + conn: c, + tag: cm.Tag, + q: cm.Query, + } + } +} + +// write writes a message with the given message type and payload. +func (c *wsConn) write(mt int, payload []byte) error { + c.ws.SetWriteDeadline(time.Now().Add(writeWait)) + return c.ws.WriteMessage(mt, payload) +} + +// writePump pumps messages from the hub to the websocket connection. +func (c *wsConn) writePump() { + ticker := time.NewTicker(pingPeriod) + defer func() { + ticker.Stop() + c.ws.Close() + }() + for { + select { + case message, ok := <-c.send: + if !ok { + c.write(websocket.CloseMessage, []byte{}) + return + } + if err := c.write(websocket.TextMessage, message); err != nil { + return + } + case <-ticker.C: + if err := c.write(websocket.PingMessage, []byte{}); err != nil { + return + } + } + } +} + +func (sh *Handler) serveWebSocket(rw http.ResponseWriter, req *http.Request) { + ws, err := websocket.Upgrade(rw, req, nil, 1024, 1024) + if _, ok := err.(websocket.HandshakeError); ok { + http.Error(rw, "Not a websocket handshake", 400) + return + } else if err != nil { + log.Println(err) + return + } + c := &wsConn{ + ws: ws, + send: make(chan []byte, 256), + sh: sh, + queries: make(map[string]*watchedQuery), + } + sh.wsHub.register <- c + go c.writePump() + c.readPump() +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/app/app.go b/vendor/github.com/camlistore/camlistore/pkg/server/app/app.go new file mode 100644 index 00000000..9199ab70 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/app/app.go @@ -0,0 +1,274 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package app helps with configuring and starting server applications +// from Camlistore. +package app + +import ( + "errors" + "fmt" + "log" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "camlistore.org/pkg/auth" + camhttputil "camlistore.org/pkg/httputil" + "camlistore.org/pkg/jsonconfig" +) + +// Handler acts as a reverse proxy for a server application started by +// Camlistore. It can also serve some extra JSON configuration to the app. +type Handler struct { + name string // Name of the app's program. + envVars map[string]string // Variables set in the app's process environment. See doc/app-environment.txt. + + auth auth.AuthMode // Used for basic HTTP authenticating against the app requests. + appConfig jsonconfig.Obj // Additional parameters the app can request, or nil. + + proxy *httputil.ReverseProxy // For redirecting requests to the app. + backendURL string // URL that we proxy to (i.e. base URL of the app). + + process *os.Process // The app's Pid. To send it signals on restart, etc. +} + +func (a *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + if camhttputil.PathSuffix(req) == "config.json" { + if a.auth.AllowedAccess(req)&auth.OpGet == auth.OpGet { + camhttputil.ReturnJSON(rw, a.appConfig) + } else { + auth.SendUnauthorized(rw, req) + } + return + } + if a.proxy == nil { + http.Error(rw, "no proxy for the app", 500) + return + } + a.proxy.ServeHTTP(rw, req) +} + +// randPortBackendURL picks a random free port to listen on, and combines it +// with apiHost and appHandlerPrefix to create the appBackendURL that the app +// will listen on, and that the app handler will proxy to. +func randPortBackendURL(apiHost, appHandlerPrefix string) (string, error) { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return "", err + } + listener, err := net.ListenTCP("tcp", addr) + if err != nil { + return "", fmt.Errorf("could not listen to find random port: %v", err) + } + randAddr := listener.Addr().(*net.TCPAddr) + if err := listener.Close(); err != nil { + return "", fmt.Errorf("could not close random listener: %v", err) + } + + // TODO(mpl): see if can use netutil.TCPAddress. + scheme := "https://" + noScheme := strings.TrimPrefix(apiHost, scheme) + if strings.HasPrefix(noScheme, "http://") { + scheme = "http://" + noScheme = strings.TrimPrefix(noScheme, scheme) + } + hostPortPrefix := strings.SplitN(noScheme, "/", 2) + if len(hostPortPrefix) != 2 { + return "", fmt.Errorf("invalid apiHost: %q (no trailing slash?)", apiHost) + } + var host string + if strings.Contains(hostPortPrefix[0], "]") { + // we've got some IPv6 probably + hostPort := strings.Split(hostPortPrefix[0], "]") + host = hostPort[0] + "]" + } else { + hostPort := strings.Split(hostPortPrefix[0], ":") + host = hostPort[0] + } + return fmt.Sprintf("%s%s:%d%s", scheme, host, randAddr.Port, appHandlerPrefix), nil +} + +// NewHandler returns a Handler that proxies requests to an app. Start() on the +// Handler starts the app. +// The apiHost must end in a slash and is the camlistored API server for the app +// process to hit. +// The appHandlerPrefix is the URL path prefix on apiHost where the app is mounted. +// It must end in a slash, and be at minimum "/". +// The conf object has the following members, related to the vars described in +// doc/app-environment.txt: +// "program", string, required. File name of the app's program executable. Either +// an absolute path, or the name of a file located in CAMLI_APP_BINDIR or in PATH. +// "backendURL", string, optional. Automatic if absent. It sets CAMLI_APP_BACKEND_URL. +// "appConfig", object, optional. Additional configuration that the app can request from Camlistore. +func NewHandler(conf jsonconfig.Obj, apiHost, appHandlerPrefix string) (*Handler, error) { + // TODO: remove the appHandlerPrefix if/when we change where the app config JSON URL is made available. + name := conf.RequiredString("program") + backendURL := conf.OptionalString("backendURL", "") + appConfig := conf.OptionalObject("appConfig") + // TODO(mpl): add an auth token in the extra config of the dev server config, + // that the hello app can use to setup a status handler than only responds + // to requests with that token. + if err := conf.Validate(); err != nil { + return nil, err + } + + if apiHost == "" { + return nil, fmt.Errorf("app: could not initialize Handler for %q: Camlistore apiHost is unknown", name) + } + if appHandlerPrefix == "" { + return nil, fmt.Errorf("app: could not initialize Handler for %q: empty appHandlerPrefix", name) + } + + if backendURL == "" { + var err error + // If not specified in the conf, we're dynamically picking the port of the CAMLI_APP_BACKEND_URL + // now (instead of letting the app itself do it), because we need to know it in advance in order + // to set the app handler's proxy. + backendURL, err = randPortBackendURL(apiHost, appHandlerPrefix) + if err != nil { + return nil, err + } + } + + username, password := auth.RandToken(20), auth.RandToken(20) + camliAuth := username + ":" + password + basicAuth := auth.NewBasicAuth(username, password) + envVars := map[string]string{ + "CAMLI_API_HOST": apiHost, + "CAMLI_AUTH": camliAuth, + "CAMLI_APP_BACKEND_URL": backendURL, + } + if appConfig != nil { + envVars["CAMLI_APP_CONFIG_URL"] = apiHost + strings.TrimPrefix(appHandlerPrefix, "/") + "config.json" + } + + proxyURL, err := url.Parse(backendURL) + if err != nil { + return nil, fmt.Errorf("could not parse backendURL %q: %v", backendURL, err) + } + return &Handler{ + name: name, + envVars: envVars, + auth: basicAuth, + appConfig: appConfig, + proxy: httputil.NewSingleHostReverseProxy(proxyURL), + backendURL: backendURL, + }, nil +} + +func (a *Handler) Start() error { + name := a.name + if name == "" { + return fmt.Errorf("invalid app name: %q", name) + } + var binPath string + var err error + if e := os.Getenv("CAMLI_APP_BINDIR"); e != "" { + binPath, err = exec.LookPath(filepath.Join(e, name)) + if err != nil { + log.Printf("%q executable not found in %q", name, e) + } + } + if binPath == "" || err != nil { + binPath, err = exec.LookPath(name) + if err != nil { + return fmt.Errorf("%q executable not found in PATH.", name) + } + } + + cmd := exec.Command(binPath) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + // TODO(mpl): extract Env methods from dev/devcam/env.go to a util pkg and use them here. + newVars := make(map[string]string, len(a.envVars)) + for k, v := range a.envVars { + newVars[k+"="] = v + } + env := os.Environ() + for pos, oldkv := range env { + for k, newVal := range newVars { + if strings.HasPrefix(oldkv, k) { + env[pos] = k + newVal + delete(newVars, k) + break + } + } + } + for k, v := range newVars { + env = append(env, k+v) + } + cmd.Env = env + if err := cmd.Start(); err != nil { + return fmt.Errorf("could not start app %v: %v", name, err) + } + a.process = cmd.Process + return nil +} + +// ProgramName returns the name of the app's binary. It may be a file name in +// CAMLI_APP_BINDIR or PATH, or an absolute path. +func (a *Handler) ProgramName() string { + return a.name +} + +// AuthMode returns the app handler's auth mode, which is also the auth that the +// app's client will be configured with. This mode should be registered with +// the server's auth modes, for the app to have access to the server's resources. +func (a *Handler) AuthMode() auth.AuthMode { + return a.auth +} + +// AppConfig returns the optional configuration parameters object that the app +// can request from the app handler. It can be nil. +func (a *Handler) AppConfig() map[string]interface{} { + return a.appConfig +} + +// BackendURL returns the appBackendURL that the app handler will proxy to. +func (a *Handler) BackendURL() string { + return a.backendURL +} + +var errProcessTookTooLong = errors.New("proccess took too long to quit") + +// Quit sends the app's process a SIGINT, and waits up to 5 seconds for it +// to exit, returning an error if it doesn't. +func (a *Handler) Quit() error { + err := a.process.Signal(os.Interrupt) + if err != nil { + return err + } + + c := make(chan error) + go func() { + _, err := a.process.Wait() + c <- err + }() + select { + case err = <-c: + case <-time.After(5 * time.Second): + // TODO Do we want to SIGKILL here or just leave the app alone? + err = errProcessTookTooLong + } + return err +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/app/app_test.go b/vendor/github.com/camlistore/camlistore/pkg/server/app/app_test.go new file mode 100644 index 00000000..019d0174 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/app/app_test.go @@ -0,0 +1,220 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "bufio" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "testing" +) + +func TestRandPortBackendURL(t *testing.T) { + tests := []struct { + apiHost string + appHandlerPrefix string + wantBackendURL string + wantErr bool + }{ + { + apiHost: "http://foo.com/", + appHandlerPrefix: "/pics/", + wantBackendURL: "http://foo.com:[0-9]+/pics/", + }, + + { + apiHost: "https://foo.com/", + appHandlerPrefix: "/pics/", + wantBackendURL: "https://foo.com:[0-9]+/pics/", + }, + + { + apiHost: "http://foo.com:8080/", + appHandlerPrefix: "/pics/", + wantBackendURL: "http://foo.com:[0-9]+/pics/", + }, + + { + apiHost: "https://foo.com:8080/", + appHandlerPrefix: "/pics/", + wantBackendURL: "https://foo.com:[0-9]+/pics/", + }, + + { + apiHost: "http://foo.com:/", + appHandlerPrefix: "/pics/", + wantBackendURL: "http://foo.com:[0-9]+/pics/", + }, + + { + apiHost: "https://foo.com:/", + appHandlerPrefix: "/pics/", + wantBackendURL: "https://foo.com:[0-9]+/pics/", + }, + + { + apiHost: "http://foo.com/bar/", + appHandlerPrefix: "/pics/", + wantBackendURL: "http://foo.com:[0-9]+/pics/", + }, + + { + apiHost: "https://foo.com/bar/", + appHandlerPrefix: "/pics/", + wantBackendURL: "https://foo.com:[0-9]+/pics/", + }, + + { + apiHost: "http://foo.com:8080/bar/", + appHandlerPrefix: "/pics/", + wantBackendURL: "http://foo.com:[0-9]+/pics/", + }, + + { + apiHost: "https://foo.com:8080/bar/", + appHandlerPrefix: "/pics/", + wantBackendURL: "https://foo.com:[0-9]+/pics/", + }, + + { + apiHost: "http://foo.com:/bar/", + appHandlerPrefix: "/pics/", + wantBackendURL: "http://foo.com:[0-9]+/pics/", + }, + + { + apiHost: "https://foo.com:/bar/", + appHandlerPrefix: "/pics/", + wantBackendURL: "https://foo.com:[0-9]+/pics/", + }, + + { + apiHost: "http://[::1]:80/", + appHandlerPrefix: "/pics/", + wantBackendURL: `http://\[::1\]:[0-9]+/pics/`, + }, + + { + apiHost: "https://[::1]:80/", + appHandlerPrefix: "/pics/", + wantBackendURL: `https://\[::1\]:[0-9]+/pics/`, + }, + + { + apiHost: "http://[::1]/", + appHandlerPrefix: "/pics/", + wantBackendURL: `http://\[::1\]:[0-9]+/pics/`, + }, + + { + apiHost: "https://[::1]/", + appHandlerPrefix: "/pics/", + wantBackendURL: `https://\[::1\]:[0-9]+/pics/`, + }, + + { + apiHost: "http://[::1]:/", + appHandlerPrefix: "/pics/", + wantBackendURL: `http://\[::1\]:[0-9]+/pics/`, + }, + + { + apiHost: "https://[::1]:/", + appHandlerPrefix: "/pics/", + wantBackendURL: `https://\[::1\]:[0-9]+/pics/`, + }, + } + for _, v := range tests { + got, err := randPortBackendURL(v.apiHost, v.appHandlerPrefix) + if err != nil { + t.Error(err) + continue + } + reg := regexp.MustCompile(v.wantBackendURL) + if !reg.MatchString(got) { + t.Errorf("got: %v for %v, want: %v", got, v.apiHost, v.wantBackendURL) + } + } +} + +// We just want a helper command that ignores SIGINT. +func ignoreInterrupt() (*os.Process, error) { + script := `trap "echo hello" SIGINT +echo READY +sleep 10000` + cmd := exec.Command("bash") + + w, err := cmd.StdinPipe() + if err != nil { + return nil, fmt.Errorf("couldn't get pipe for helper shell") + } + go io.WriteString(w, script) + + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("couldn't get pipe for helper shell") + } + + err = cmd.Start() + if err != nil { + return nil, fmt.Errorf("couldn't start helper shell") + } + + r := bufio.NewReader(stdout) + l, err := r.ReadBytes('\n') + if err != nil { + return nil, fmt.Errorf("couldn't read from helper shell") + } + if string(l) != "READY\n" { + return nil, fmt.Errorf("unexpected output from helper shell script") + } + return cmd.Process, nil +} + +func TestQuit(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + cmd := exec.Command("sleep", "10000") + err := cmd.Start() + if err != nil { + t.Skip("couldn't run test helper command") + } + h := Handler{ + process: cmd.Process, + } + err = h.Quit() + if err != nil { + t.Errorf("got %v, wanted %v", err, nil) + } + + pid, err := ignoreInterrupt() + if err != nil { + t.Skip("couldn't run test helper command: %v", err) + } + h = Handler{ + process: pid, + } + err = h.Quit() + if err != errProcessTookTooLong { + t.Errorf("got %v, wanted %v", err, errProcessTookTooLong) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/cgo_probe.go b/vendor/github.com/camlistore/camlistore/pkg/server/cgo_probe.go new file mode 100644 index 00000000..e6a010df --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/cgo_probe.go @@ -0,0 +1,23 @@ +/* +Copyright 2015 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +build cgo + +package server + +func init() { + cgoEnabled = true +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/doc.go b/vendor/github.com/camlistore/camlistore/pkg/server/doc.go new file mode 100644 index 00000000..651ee0dd --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package server implements the server HTTP interface for the UI, +// publishing, setup, status, sync, thubnailing, etc. +package server diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/download.go b/vendor/github.com/camlistore/camlistore/pkg/server/download.go new file mode 100644 index 00000000..83bc1998 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/download.go @@ -0,0 +1,199 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "fmt" + "io" + "log" + "net/http" + "os" + "strings" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/magic" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/search" + "camlistore.org/pkg/types" +) + +const oneYear = 365 * 86400 * time.Second + +var debugPack = strings.Contains(os.Getenv("CAMLI_DEBUG_X"), "packserve") + +type DownloadHandler struct { + Fetcher blob.Fetcher + Cache blobserver.Storage + + // Search is optional. If present, it's used to map a fileref + // to a wholeref, if the Fetcher is of a type that knows how + // to get at a wholeref more efficiently. (e.g. blobpacked) + Search *search.Handler + + ForceMIME string // optional +} + +func (dh *DownloadHandler) blobSource() blob.Fetcher { + return dh.Fetcher // TODO: use dh.Cache +} + +type fileInfo struct { + mime string + name string + size int64 + rs io.ReadSeeker + close func() error // release the rs + whyNot string // for testing, why fileInfoPacked failed. +} + +func (dh *DownloadHandler) fileInfo(req *http.Request, file blob.Ref) (fi fileInfo, packed bool, err error) { + // Fast path for blobpacked. + fi, ok := fileInfoPacked(dh.Search, dh.Fetcher, req, file) + if debugPack { + log.Printf("download.go: fileInfoPacked: ok=%v, %+v", ok, fi) + } + if ok { + return fi, true, nil + } + fr, err := schema.NewFileReader(dh.blobSource(), file) + if err != nil { + return + } + mime := dh.ForceMIME + if mime == "" { + mime = magic.MIMETypeFromReaderAt(fr) + } + if mime == "" { + mime = "application/octet-stream" + } + return fileInfo{ + mime: mime, + name: fr.FileName(), + size: fr.Size(), + rs: fr, + close: fr.Close, + }, false, nil +} + +// Fast path for blobpacked. +func fileInfoPacked(sh *search.Handler, src blob.Fetcher, req *http.Request, file blob.Ref) (packFileInfo fileInfo, ok bool) { + if sh == nil { + return fileInfo{whyNot: "no search"}, false + } + wf, ok := src.(blobserver.WholeRefFetcher) + if !ok { + return fileInfo{whyNot: "fetcher type"}, false + } + if req != nil && req.Header.Get("Range") != "" { + // TODO: not handled yet. Maybe not even important, + // considering rarity. + return fileInfo{whyNot: "range header"}, false + } + des, err := sh.Describe(&search.DescribeRequest{BlobRef: file}) + if err != nil { + log.Printf("ui: fileInfoPacked: skipping fast path due to error from search: %v", err) + return fileInfo{whyNot: "search error"}, false + } + db, ok := des.Meta[file.String()] + if !ok || db.File == nil { + return fileInfo{whyNot: "search index doesn't know file"}, false + } + fi := db.File + if !fi.WholeRef.Valid() { + return fileInfo{whyNot: "no wholeref from search index"}, false + } + + offset := int64(0) + rc, wholeSize, err := wf.OpenWholeRef(fi.WholeRef, offset) + if err == os.ErrNotExist { + return fileInfo{whyNot: "WholeRefFetcher returned ErrNotexist"}, false + } + if wholeSize != fi.Size { + log.Printf("ui: fileInfoPacked: OpenWholeRef size %d != index size %d; ignoring fast path", wholeSize, fi.Size) + return fileInfo{whyNot: "WholeRefFetcher and index don't agree"}, false + } + if err != nil { + log.Printf("ui: fileInfoPacked: skipping fast path due to error from WholeRefFetcher (%T): %v", src, err) + return fileInfo{whyNot: "WholeRefFetcher error"}, false + } + return fileInfo{ + mime: fi.MIMEType, + name: fi.FileName, + size: fi.Size, + rs: types.NewFakeSeeker(rc, fi.Size-offset), + close: rc.Close, + }, true +} + +func (dh *DownloadHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, file blob.Ref) { + if req.Method != "GET" && req.Method != "HEAD" { + http.Error(rw, "Invalid download method", http.StatusBadRequest) + return + } + if req.Header.Get("If-Modified-Since") != "" { + // Immutable, so any copy's a good copy. + rw.WriteHeader(http.StatusNotModified) + return + } + + fi, packed, err := dh.fileInfo(req, file) + if err != nil { + http.Error(rw, "Can't serve file: "+err.Error(), http.StatusInternalServerError) + return + } + defer fi.close() + + h := rw.Header() + h.Set("Content-Length", fmt.Sprint(fi.size)) + h.Set("Expires", time.Now().Add(oneYear).Format(http.TimeFormat)) + h.Set("Content-Type", fi.mime) + if packed { + h.Set("X-Camlistore-Packed", "1") + } + + if fi.mime == "application/octet-stream" { + // Chrome seems to silently do nothing on + // application/octet-stream unless this is set. + // Maybe it's confused by lack of URL it recognizes + // along with lack of mime type? + fileName := fi.name + if fileName == "" { + fileName = "file-" + file.String() + ".dat" + } + rw.Header().Set("Content-Disposition", "attachment; filename="+fileName) + } + + if req.Method == "HEAD" && req.FormValue("verifycontents") != "" { + vbr, ok := blob.Parse(req.FormValue("verifycontents")) + if !ok { + return + } + hash := vbr.Hash() + if hash == nil { + return + } + io.Copy(hash, fi.rs) // ignore errors, caught later + if vbr.HashMatches(hash) { + rw.Header().Set("X-Camli-Contents", vbr.String()) + } + return + } + + http.ServeContent(rw, req, "", time.Now(), fi.rs) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/favicon.ico b/vendor/github.com/camlistore/camlistore/pkg/server/favicon.ico new file mode 100644 index 00000000..b63b2b56 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/server/favicon.ico differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/fileembed.go b/vendor/github.com/camlistore/camlistore/pkg/server/fileembed.go new file mode 100644 index 00000000..fdb3aa81 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/fileembed.go @@ -0,0 +1,35 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +#fileembed pattern .+\.(ico)$ +*/ +package server + +import ( + "os" + "path/filepath" + + "camlistore.org/pkg/fileembed" +) + +var Files = &fileembed.Files{} + +func init() { + if root := os.Getenv("CAMLI_DEV_CAMLI_ROOT"); root != "" { + Files.DirFallback = filepath.Join(root, filepath.FromSlash("pkg/server")) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/filetree.go b/vendor/github.com/camlistore/camlistore/pkg/server/filetree.go new file mode 100644 index 00000000..daef6b35 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/filetree.go @@ -0,0 +1,87 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "log" + "net/http" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/schema" +) + +type FileTreeHandler struct { + Fetcher blob.Fetcher + file blob.Ref +} + +// FileTreeNode represents a file in a file tree. +// It is part of the FileTreeResponse. +type FileTreeNode struct { + // Name is the basename of the node. + Name string `json:"name"` + // Type is the camliType of the node. This may be "file", "directory", "symlink" + // or other in the future. + Type string `json:"type"` + // BlobRef is the blob.Ref of the node. + BlobRef blob.Ref `json:"blobRef"` +} + +// FileTreeResponse is the JSON response for the FileTreeHandler. +type FileTreeResponse struct { + // Children is the list of children files of a directory. + Children []FileTreeNode `json:"children"` +} + +func (fth *FileTreeHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + if req.Method != "GET" && req.Method != "HEAD" { + http.Error(rw, "Invalid method", 400) + return + } + + de, err := schema.NewDirectoryEntryFromBlobRef(fth.Fetcher, fth.file) + if err != nil { + http.Error(rw, "Error reading directory", 500) + log.Printf("Error reading directory from blobref %s: %v\n", fth.file, err) + return + } + dir, err := de.Directory() + if err != nil { + http.Error(rw, "Error reading directory", 500) + log.Printf("Error reading directory from blobref %s: %v\n", fth.file, err) + return + } + entries, err := dir.Readdir(-1) + if err != nil { + http.Error(rw, "Error reading directory", 500) + log.Printf("reading dir from blobref %s: %v\n", fth.file, err) + return + } + + var ret = FileTreeResponse{ + Children: make([]FileTreeNode, 0, len(entries)), + } + for _, v := range entries { + ret.Children = append(ret.Children, FileTreeNode{ + Name: v.FileName(), + Type: v.CamliType(), + BlobRef: v.BlobRef(), + }) + } + httputil.ReturnJSON(rw, ret) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/help.go b/vendor/github.com/camlistore/camlistore/pkg/server/help.go new file mode 100644 index 00000000..f731bf60 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/help.go @@ -0,0 +1,124 @@ +/* +Copyright 2015 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "encoding/json" + "fmt" + "html/template" + "net/http" + "strconv" + "sync" + + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/types/clientconfig" +) + +const helpHTML string = ` + + Help + + +

    Help

    + +

    Web User Interface

    +

    Search bar predicates.

    + +

    Client Configuration

    +

    You will need to use the following client configuration in order to access this server using the Camlistore command line tools.

    +
    {{ . }}
    + +

    Anything Else?

    +

    See the Camlistore online documentation and community contacts.

    + + ` + +// HelpHandler publishes information related to accessing the server +type HelpHandler struct { + clientConfig *clientconfig.Config // generated from serverConfig + serverConfig jsonconfig.Obj // low-level config + goTemplate *template.Template // for rendering +} + +// setServerConfigOnce guards operation within SetServerConfig +var setServerConfigOnce sync.Once + +// SetServerConfig enables the handler to receive the server config +// before InitHandler, which generates a client config from the server config, is called. +func (hh *HelpHandler) SetServerConfig(config jsonconfig.Obj) { + setServerConfigOnce.Do(func() { hh.serverConfig = config }) +} + +func init() { + blobserver.RegisterHandlerConstructor("help", newHelpFromConfig) +} + +func (hh *HelpHandler) InitHandler(hl blobserver.FindHandlerByTyper) error { + if hh.serverConfig == nil { + return fmt.Errorf("HelpHandler's serverConfig must be set before calling its InitHandler") + } + + clientConfig, err := clientconfig.GenerateClientConfig(hh.serverConfig) + if err != nil { + return fmt.Errorf("error generating client config: %v", err) + } + hh.clientConfig = clientConfig + + tmpl, err := template.New("help").Parse(helpHTML) + if err != nil { + return fmt.Errorf("error creating template: %v", err) + } + hh.goTemplate = tmpl + + return nil +} + +func newHelpFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { + return &HelpHandler{}, nil +} + +func (hh *HelpHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + suffix := httputil.PathSuffix(req) + if !httputil.IsGet(req) { + http.Error(rw, "Illegal help method.", http.StatusMethodNotAllowed) + return + } + switch suffix { + case "": + if clientConfig := req.FormValue("clientConfig"); clientConfig != "" { + if clientConfigOnly, err := strconv.ParseBool(clientConfig); err == nil && clientConfigOnly { + httputil.ReturnJSON(rw, hh.clientConfig) + return + } + } + hh.serveHelpHTML(rw, req) + default: + http.Error(rw, "Illegal help path.", http.StatusNotFound) + } +} + +func (hh *HelpHandler) serveHelpHTML(rw http.ResponseWriter, req *http.Request) { + jsonBytes, err := json.MarshalIndent(hh.clientConfig, "", " ") + if err != nil { + httputil.ServeError(rw, req, fmt.Errorf("could not serialize client config JSON: %v", err)) + return + } + + hh.goTemplate.Execute(rw, string(jsonBytes)) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/image.go b/vendor/github.com/camlistore/camlistore/pkg/server/image.go new file mode 100644 index 00000000..256e5a16 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/image.go @@ -0,0 +1,418 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "bytes" + "errors" + "expvar" + "fmt" + "image" + "image/png" + "io" + "io/ioutil" + "log" + "net/http" + "strconv" + "strings" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/constants" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/images" + "camlistore.org/pkg/magic" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/search" + "camlistore.org/pkg/singleflight" + "camlistore.org/pkg/syncutil" + "camlistore.org/pkg/types" + + _ "camlistore.org/third_party/github.com/nf/cr2" + "camlistore.org/third_party/go/pkg/image/jpeg" +) + +const imageDebug = false + +var ( + imageBytesServedVar = expvar.NewInt("image-bytes-served") + imageBytesFetchedVar = expvar.NewInt("image-bytes-fetched") + thumbCacheMiss = expvar.NewInt("thumbcache-miss") + thumbCacheHitFull = expvar.NewInt("thumbcache-hit-full") + thumbCacheHitFile = expvar.NewInt("thumbcache-hit-file") + thumbCacheHeader304 = expvar.NewInt("thumbcache-header-304") +) + +type ImageHandler struct { + Fetcher blob.Fetcher + Search *search.Handler // optional + Cache blobserver.Storage // optional + MaxWidth, MaxHeight int + Square bool + ThumbMeta *ThumbMeta // optional cache index for scaled images + ResizeSem *syncutil.Sem // Limit peak RAM used by concurrent image thumbnail calls. +} + +type subImager interface { + SubImage(image.Rectangle) image.Image +} + +func squareImage(i image.Image) image.Image { + si, ok := i.(subImager) + if !ok { + log.Fatalf("image %T isn't a subImager", i) + } + b := i.Bounds() + if b.Dx() > b.Dy() { + thin := (b.Dx() - b.Dy()) / 2 + newB := b + newB.Min.X += thin + newB.Max.X -= thin + return si.SubImage(newB) + } + thin := (b.Dy() - b.Dx()) / 2 + newB := b + newB.Min.Y += thin + newB.Max.Y -= thin + return si.SubImage(newB) +} + +func writeToCache(cache blobserver.Storage, thumbBytes []byte, name string) (br blob.Ref, err error) { + tr := bytes.NewReader(thumbBytes) + if len(thumbBytes) < constants.MaxBlobSize { + br = blob.SHA1FromBytes(thumbBytes) + _, err = blobserver.Receive(cache, br, tr) + } else { + // TODO: don't use rolling checksums when writing this. Tell + // the filewriter to use 16 MB chunks instead. + br, err = schema.WriteFileFromReader(cache, name, tr) + } + if err != nil { + return br, errors.New("failed to cache " + name + ": " + err.Error()) + } + if imageDebug { + log.Printf("Image Cache: saved as %v\n", br) + } + return br, nil +} + +// cacheScaled saves in the image handler's cache the scaled image bytes +// in thumbBytes, and puts its blobref in the scaledImage under the key name. +func (ih *ImageHandler) cacheScaled(thumbBytes []byte, name string) error { + br, err := writeToCache(ih.Cache, thumbBytes, name) + if err != nil { + return err + } + ih.ThumbMeta.Put(name, br) + return nil +} + +// cached returns a FileReader for the given blobref, which may +// point to either a blob representing the entire thumbnail (max +// 16MB) or a file schema blob. +// +// The ReadCloser should be closed when done reading. +func (ih *ImageHandler) cached(br blob.Ref) (io.ReadCloser, error) { + rsc, _, err := ih.Cache.Fetch(br) + if err != nil { + return nil, err + } + slurp, err := ioutil.ReadAll(rsc) + rsc.Close() + if err != nil { + return nil, err + } + // In the common case, when the scaled image itself is less than 16 MB, it's + // all together in one blob. + if strings.HasPrefix(magic.MIMEType(slurp), "image/") { + thumbCacheHitFull.Add(1) + if imageDebug { + log.Printf("Image Cache: hit: %v\n", br) + } + return ioutil.NopCloser(bytes.NewReader(slurp)), nil + } + + // For large scaled images, the cached blob is a file schema blob referencing + // the sub-chunks. + fileBlob, err := schema.BlobFromReader(br, bytes.NewReader(slurp)) + if err != nil { + log.Printf("Failed to parse non-image thumbnail cache blob %v: %v", br, err) + return nil, err + } + fr, err := fileBlob.NewFileReader(ih.Cache) + if err != nil { + log.Printf("cached(%v) NewFileReader = %v", br, err) + return nil, err + } + thumbCacheHitFile.Add(1) + if imageDebug { + log.Printf("Image Cache: fileref hit: %v\n", br) + } + return fr, nil +} + +// Key format: "scaled:" + bref + ":" + width "x" + height +// where bref is the blobref of the unscaled image. +func cacheKey(bref string, width int, height int) string { + return fmt.Sprintf("scaled:%v:%dx%d:tv%v", bref, width, height, images.ThumbnailVersion()) +} + +// ScaledCached reads the scaled version of the image in file, +// if it is in cache and writes it to buf. +// +// On successful read and population of buf, the returned format is non-empty. +// Almost all errors are not interesting. Real errors will be logged. +func (ih *ImageHandler) scaledCached(buf *bytes.Buffer, file blob.Ref) (format string) { + key := cacheKey(file.String(), ih.MaxWidth, ih.MaxHeight) + br, err := ih.ThumbMeta.Get(key) + if err == errCacheMiss { + return + } + if err != nil { + log.Printf("Warning: thumbnail cachekey(%q)->meta lookup error: %v", key, err) + return + } + fr, err := ih.cached(br) + if err != nil { + if imageDebug { + log.Printf("Could not get cached image %v: %v\n", br, err) + } + return + } + defer fr.Close() + _, err = io.Copy(buf, fr) + if err != nil { + return + } + mime := magic.MIMEType(buf.Bytes()) + if format = strings.TrimPrefix(mime, "image/"); format == mime { + log.Printf("Warning: unescaped MIME type %q of %v file for thumbnail %q", mime, br, key) + return + } + return format +} + +// Gate the number of concurrent image resizes to limit RAM & CPU use. + +type formatAndImage struct { + format string + image []byte +} + +// imageConfigFromReader calls image.DecodeConfig on r. It returns an +// io.Reader that is the concatentation of the bytes read and the remaining r, +// the image configuration, and the error from image.DecodeConfig. +func imageConfigFromReader(r io.Reader) (io.Reader, image.Config, error) { + header := new(bytes.Buffer) + tr := io.TeeReader(r, header) + // We just need width & height for memory considerations, so we use the + // standard library's DecodeConfig, skipping the EXIF parsing and + // orientation correction for images.DecodeConfig. + conf, _, err := image.DecodeConfig(tr) + return io.MultiReader(header, r), conf, err +} + +func (ih *ImageHandler) newFileReader(fileRef blob.Ref) (io.ReadCloser, error) { + fi, ok := fileInfoPacked(ih.Search, ih.Fetcher, nil, fileRef) + if debugPack { + log.Printf("pkg/server/image.go: fileInfoPacked: ok=%v, %+v", ok, fi) + } + if ok { + // This would be less gross if fileInfoPacked just + // returned an io.ReadCloser, but then the download + // handler would need more invasive changes for + // ServeContent. So tolerate this for now. + return struct { + io.Reader + io.Closer + }{ + fi.rs, + types.CloseFunc(fi.close), + }, nil + } + // Default path, not going through blobpacked's fast path: + return schema.NewFileReader(ih.Fetcher, fileRef) +} + +func (ih *ImageHandler) scaleImage(fileRef blob.Ref) (*formatAndImage, error) { + fr, err := ih.newFileReader(fileRef) + if err != nil { + return nil, err + } + defer fr.Close() + + sr := types.NewStatsReader(imageBytesFetchedVar, fr) + sr, conf, err := imageConfigFromReader(sr) + if err != nil { + return nil, err + } + + // TODO(wathiede): build a size table keyed by conf.ColorModel for + // common color models for a more exact size estimate. + + // This value is an estimate of the memory required to decode an image. + // PNGs range from 1-64 bits per pixel (not all of which are supported by + // the Go standard parser). JPEGs encoded in YCbCr 4:4:4 are 3 byte/pixel. + // For all other JPEGs this is an overestimate. For GIFs it is 3x larger + // than needed. How accurate this estimate is depends on the mix of + // images being resized concurrently. + ramSize := int64(conf.Width) * int64(conf.Height) * 3 + + if err = ih.ResizeSem.Acquire(ramSize); err != nil { + return nil, err + } + defer ih.ResizeSem.Release(ramSize) + + i, imConfig, err := images.Decode(sr, &images.DecodeOpts{ + MaxWidth: ih.MaxWidth, + MaxHeight: ih.MaxHeight, + }) + if err != nil { + return nil, err + } + b := i.Bounds() + format := imConfig.Format + + isSquare := b.Dx() == b.Dy() + if ih.Square && !isSquare { + i = squareImage(i) + b = i.Bounds() + } + + // Encode as a new image + var buf bytes.Buffer + switch format { + case "png": + err = png.Encode(&buf, i) + case "cr2": + // Recompress CR2 files as JPEG + format = "jpeg" + fallthrough + default: + err = jpeg.Encode(&buf, i, &jpeg.Options{ + Quality: 90, + }) + } + if err != nil { + return nil, err + } + + return &formatAndImage{format: format, image: buf.Bytes()}, nil +} + +// singleResize prevents generating the same thumbnail at once from +// two different requests. (e.g. sending out a link to a new photo +// gallery to a big audience) +var singleResize singleflight.Group + +func (ih *ImageHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, file blob.Ref) { + if !httputil.IsGet(req) { + http.Error(rw, "Invalid method", 400) + return + } + mw, mh := ih.MaxWidth, ih.MaxHeight + if mw == 0 || mh == 0 || mw > search.MaxImageSize || mh > search.MaxImageSize { + http.Error(rw, "bogus dimensions", 400) + return + } + + key := cacheKey(file.String(), mw, mh) + etag := blob.SHA1FromString(key).String()[5:] + inm := req.Header.Get("If-None-Match") + if inm != "" { + if strings.Trim(inm, `"`) == etag { + thumbCacheHeader304.Add(1) + rw.WriteHeader(http.StatusNotModified) + return + } + } else { + if !disableThumbCache && req.Header.Get("If-Modified-Since") != "" { + thumbCacheHeader304.Add(1) + rw.WriteHeader(http.StatusNotModified) + return + } + } + + var imageData []byte + format := "" + cacheHit := false + if ih.ThumbMeta != nil && !disableThumbCache { + var buf bytes.Buffer + format = ih.scaledCached(&buf, file) + if format != "" { + cacheHit = true + imageData = buf.Bytes() + } + } + + if !cacheHit { + thumbCacheMiss.Add(1) + imi, err := singleResize.Do(key, func() (interface{}, error) { + return ih.scaleImage(file) + }) + if err != nil { + http.Error(rw, err.Error(), 500) + return + } + im := imi.(*formatAndImage) + imageData = im.image + format = im.format + if ih.ThumbMeta != nil { + err := ih.cacheScaled(imageData, key) + if err != nil { + log.Printf("image resize: %v", err) + } + } + } + + h := rw.Header() + if !disableThumbCache { + h.Set("Expires", time.Now().Add(oneYear).Format(http.TimeFormat)) + h.Set("Last-Modified", time.Now().Format(http.TimeFormat)) + h.Set("Etag", strconv.Quote(etag)) + } + h.Set("Content-Type", imageContentTypeOfFormat(format)) + size := len(imageData) + h.Set("Content-Length", fmt.Sprint(size)) + imageBytesServedVar.Add(int64(size)) + + if req.Method == "GET" { + n, err := rw.Write(imageData) + if err != nil { + if strings.Contains(err.Error(), "broken pipe") { + // boring. + return + } + // TODO: vlog this: + log.Printf("error serving thumbnail of file schema %s: %v", file, err) + return + } + if n != size { + log.Printf("error serving thumbnail of file schema %s: sent %d, expected size of %d", + file, n, size) + return + } + } +} + +func imageContentTypeOfFormat(format string) string { + if format == "jpeg" { + return "image/jpeg" + } + return "image/png" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/root.go b/vendor/github.com/camlistore/camlistore/pkg/server/root.go new file mode 100644 index 00000000..cc87ad9f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/root.go @@ -0,0 +1,276 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "sort" + "sync" + + "camlistore.org/pkg/auth" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/buildinfo" + "camlistore.org/pkg/env" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/images" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/jsonsign/signhandler" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/search" + "camlistore.org/pkg/types" + "camlistore.org/pkg/types/camtypes" +) + +// RootHandler handles serving the about/splash page. +type RootHandler struct { + // Stealth determines whether we hide from non-authenticated + // clients. + Stealth bool + + OwnerName string // for display purposes only. + Username string // default user for mobile setup. + + // URL prefixes (path or full URL) to the primary blob and + // search root. + BlobRoot string + SearchRoot string + helpRoot string + importerRoot string + statusRoot string + Prefix string // root handler's prefix + + // JSONSignRoot is the optional path or full URL to the JSON + // Signing helper. + JSONSignRoot string + + Storage blobserver.Storage // of BlobRoot, or nil + + searchInitOnce sync.Once // runs searchInit, which populates searchHandler + searchInit func() + searchHandler *search.Handler // of SearchRoot, or nil + + ui *UIHandler // or nil, if none configured + sigh *signhandler.Handler // or nil, if none configured + sync []*SyncHandler // list of configured sync handlers, for discovery. +} + +func (rh *RootHandler) SearchHandler() (h *search.Handler, ok bool) { + rh.searchInitOnce.Do(rh.searchInit) + return rh.searchHandler, rh.searchHandler != nil +} + +func init() { + blobserver.RegisterHandlerConstructor("root", newRootFromConfig) +} + +func newRootFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { + checkType := func(key string, htype string) { + v := conf.OptionalString(key, "") + if v == "" { + return + } + ct := ld.GetHandlerType(v) + if ct == "" { + err = fmt.Errorf("root handler's %q references non-existant %q", key, v) + } else if ct != htype { + err = fmt.Errorf("root handler's %q references %q of type %q; expected type %q", key, v, ct, htype) + } + } + checkType("searchRoot", "search") + checkType("jsonSignRoot", "jsonsign") + if err != nil { + return + } + username, _ := getUserName() + root := &RootHandler{ + BlobRoot: conf.OptionalString("blobRoot", ""), + SearchRoot: conf.OptionalString("searchRoot", ""), + JSONSignRoot: conf.OptionalString("jsonSignRoot", ""), + OwnerName: conf.OptionalString("ownerName", username), + Username: osutil.Username(), + Prefix: ld.MyPrefix(), + } + root.Stealth = conf.OptionalBool("stealth", false) + root.statusRoot = conf.OptionalString("statusRoot", "") + root.helpRoot = conf.OptionalString("helpRoot", "") + if err = conf.Validate(); err != nil { + return + } + + if root.BlobRoot != "" { + bs, err := ld.GetStorage(root.BlobRoot) + if err != nil { + return nil, fmt.Errorf("Root handler's blobRoot of %q error: %v", root.BlobRoot, err) + } + root.Storage = bs + } + + if root.JSONSignRoot != "" { + h, _ := ld.GetHandler(root.JSONSignRoot) + if sigh, ok := h.(*signhandler.Handler); ok { + root.sigh = sigh + } + } + + root.searchInit = func() {} + if root.SearchRoot != "" { + prefix := root.SearchRoot + if t := ld.GetHandlerType(prefix); t != "search" { + if t == "" { + return nil, fmt.Errorf("root handler's searchRoot of %q is invalid and doesn't refer to a declared handler", prefix) + } + return nil, fmt.Errorf("root handler's searchRoot of %q is of type %q, not %q", prefix, t, "search") + } + root.searchInit = func() { + h, err := ld.GetHandler(prefix) + if err != nil { + log.Fatalf("Error fetching SearchRoot at %q: %v", prefix, err) + } + root.searchHandler = h.(*search.Handler) + root.searchInit = nil + } + } + + if pfx, _, _ := ld.FindHandlerByType("importer"); err == nil { + root.importerRoot = pfx + } + + return root, nil +} + +func (rh *RootHandler) registerUIHandler(h *UIHandler) { + rh.ui = h +} + +func (rh *RootHandler) registerSyncHandler(h *SyncHandler) { + rh.sync = append(rh.sync, h) + sort.Sort(byFromTo(rh.sync)) +} + +func (rh *RootHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + if wantsDiscovery(req) { + if auth.Allowed(req, auth.OpDiscovery) { + rh.serveDiscovery(rw, req) + return + } + if !rh.Stealth { + http.Error(rw, "Unauthorized", http.StatusUnauthorized) + } + return + } + + if rh.Stealth { + return + } + if req.RequestURI == "/" && rh.ui != nil { + http.Redirect(rw, req, "/ui/", http.StatusMovedPermanently) + return + } + if req.URL.Path == "/favicon.ico" { + ServeStaticFile(rw, req, Files, "favicon.ico") + return + } + f := func(p string, a ...interface{}) { + fmt.Fprintf(rw, p, a...) + } + f("

    This is camlistored (%s), a "+ + "Camlistore server.

    ", buildinfo.Version()) + if auth.IsLocalhost(req) && !env.IsDev() { + f("

    If you're coming from localhost, configure your Camlistore server at /setup.

    ") + } + if rh.ui != nil { + f("

    To manage your content, access the %s.

    ", rh.ui.prefix, rh.ui.prefix) + } + if rh.statusRoot != "" { + f("

    To view status, see %s.

    ", rh.statusRoot, rh.statusRoot) + } + if rh.helpRoot != "" { + f("

    To view more information on accessing the server, see %s.

    ", rh.helpRoot, rh.helpRoot) + } + fmt.Fprintf(rw, "") +} + +type byFromTo []*SyncHandler + +func (b byFromTo) Len() int { return len(b) } +func (b byFromTo) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byFromTo) Less(i, j int) bool { + if b[i].fromName < b[j].fromName { + return true + } + return b[i].fromName == b[j].fromName && b[i].toName < b[j].toName +} + +func (rh *RootHandler) serveDiscovery(rw http.ResponseWriter, req *http.Request) { + d := &camtypes.Discovery{ + BlobRoot: rh.BlobRoot, + JSONSignRoot: rh.JSONSignRoot, + HelpRoot: rh.helpRoot, + ImporterRoot: rh.importerRoot, + SearchRoot: rh.SearchRoot, + StatusRoot: rh.statusRoot, + OwnerName: rh.OwnerName, + UserName: rh.Username, + WSAuthToken: auth.ProcessRandom(), + ThumbVersion: images.ThumbnailVersion(), + } + if gener, ok := rh.Storage.(blobserver.Generationer); ok { + initTime, gen, err := gener.StorageGeneration() + if err != nil { + d.StorageGenerationError = err.Error() + } else { + d.StorageInitTime = types.Time3339(initTime) + d.StorageGeneration = gen + } + } else { + log.Printf("Storage type %T is not a blobserver.Generationer; not sending storageGeneration", rh.Storage) + } + if rh.ui != nil { + d.UIDiscovery = rh.ui.discovery() + } + if rh.sigh != nil { + d.Signing = rh.sigh.Discovery(rh.JSONSignRoot) + } + if len(rh.sync) > 0 { + syncHandlers := make([]camtypes.SyncHandlerDiscovery, 0, len(rh.sync)) + for _, sh := range rh.sync { + syncHandlers = append(syncHandlers, sh.discovery()) + } + d.SyncHandlers = syncHandlers + } + discoveryHelper(rw, req, d) +} + +func discoveryHelper(rw http.ResponseWriter, req *http.Request, dr *camtypes.Discovery) { + rw.Header().Set("Content-Type", "text/javascript") + if cb := req.FormValue("cb"); identOrDotPattern.MatchString(cb) { + fmt.Fprintf(rw, "%s(", cb) + defer rw.Write([]byte(");\n")) + } else if v := req.FormValue("var"); identOrDotPattern.MatchString(v) { + fmt.Fprintf(rw, "%s = ", v) + defer rw.Write([]byte(";\n")) + } + bytes, err := json.MarshalIndent(dr, "", " ") + if err != nil { + httputil.ServeJSONError(rw, httputil.ServerError("encoding discovery information: "+err.Error())) + return + } + rw.Write(bytes) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/root_appengine.go b/vendor/github.com/camlistore/camlistore/pkg/server/root_appengine.go new file mode 100644 index 00000000..8de71211 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/root_appengine.go @@ -0,0 +1,24 @@ +// +build appengine + +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +func getUserName() (string, error) { + // TODO(mpl): use appengine specific stuff to do that + return "unknown on Appengine", nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/root_normal.go b/vendor/github.com/camlistore/camlistore/pkg/server/root_normal.go new file mode 100644 index 00000000..eca54c27 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/root_normal.go @@ -0,0 +1,36 @@ +// +build !appengine + +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "os/user" + + "camlistore.org/pkg/osutil" +) + +func getUserName() (string, error) { + u, err := user.Current() + if err != nil { + if v := osutil.Username(); v != "" { + return v, nil + } + return "", err + } + return u.Name, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/share.go b/vendor/github.com/camlistore/camlistore/pkg/server/share.go new file mode 100644 index 00000000..ea58cce0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/share.go @@ -0,0 +1,250 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "strconv" + "strings" + "time" + + "camlistore.org/pkg/auth" + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/blobserver/gethandler" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/schema" +) + +type responseType int + +const ( + badRequest responseType = iota + unauthorizedRequest +) + +type errorCode int + +const ( + noError errorCode = iota + assembleNonTransitive + invalidMethod + invalidURL + invalidVia + shareBlobInvalid + shareBlobTooLarge + shareExpired + shareFetchFailed + shareReadFailed + shareTargetInvalid + shareNotTransitive + viaChainFetchFailed + viaChainInvalidLink + viaChainReadFailed +) + +type shareError struct { + code errorCode + response responseType + message string +} + +func (e *shareError) Error() string { + return e.message +} + +func unauthorized(code errorCode, format string, args ...interface{}) *shareError { + return &shareError{ + code: code, response: unauthorizedRequest, message: fmt.Sprintf(format, args...), + } +} + +const fetchFailureDelay = 200 * time.Millisecond + +// ShareHandler handles the requests for "share" (and shared) blobs. +type shareHandler struct { + fetcher blob.Fetcher +} + +func init() { + blobserver.RegisterHandlerConstructor("share", newShareFromConfig) +} + +func newShareFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { + blobRoot := conf.RequiredString("blobRoot") + if blobRoot == "" { + return nil, errors.New("No blobRoot defined for share handler") + } + if err = conf.Validate(); err != nil { + return + } + + share := &shareHandler{} + bs, err := ld.GetStorage(blobRoot) + if err != nil { + return nil, fmt.Errorf("Share handler's blobRoot of %q error: %v", blobRoot, err) + } + fetcher, ok := bs.(blob.Fetcher) + if !ok { + return nil, errors.New("Share handler's storage not a Fetcher.") + } + share.fetcher = fetcher + return share, nil +} + +// Unauthenticated user. Be paranoid. +func handleGetViaSharing(conn http.ResponseWriter, req *http.Request, + blobRef blob.Ref, fetcher blob.Fetcher) error { + if !httputil.IsGet(req) { + return &shareError{code: invalidMethod, response: badRequest, message: "Invalid method"} + } + + conn.Header().Set("Access-Control-Allow-Origin", "*") + + viaPathOkay := false + startTime := time.Now() + defer func() { + if !viaPathOkay { + // Insert a delay, to hide timing attacks probing + // for the existence of blobs. + sleep := fetchFailureDelay - (time.Now().Sub(startTime)) + time.Sleep(sleep) + } + }() + viaBlobs := make([]blob.Ref, 0) + if via := req.FormValue("via"); via != "" { + for _, vs := range strings.Split(via, ",") { + if br, ok := blob.Parse(vs); ok { + viaBlobs = append(viaBlobs, br) + } else { + return &shareError{code: invalidVia, response: badRequest, message: "Malformed blobref in via param"} + } + } + } + + fetchChain := make([]blob.Ref, 0) + fetchChain = append(fetchChain, viaBlobs...) + fetchChain = append(fetchChain, blobRef) + isTransitive := false + for i, br := range fetchChain { + switch i { + case 0: + file, size, err := fetcher.Fetch(br) + if err != nil { + return unauthorized(shareFetchFailed, "Fetch chain 0 of %s failed: %v", br, err) + } + defer file.Close() + if size > schema.MaxSchemaBlobSize { + return unauthorized(shareBlobTooLarge, "Fetch chain 0 of %s too large", br) + } + blob, err := schema.BlobFromReader(br, file) + if err != nil { + return unauthorized(shareReadFailed, "Can't create a blob from %v: %v", br, err) + } + share, ok := blob.AsShare() + if !ok { + return unauthorized(shareBlobInvalid, "Fetch chain 0 of %s wasn't a valid Share", br) + } + if share.IsExpired() { + return unauthorized(shareExpired, "Share is expired") + } + if len(fetchChain) > 1 && fetchChain[1].String() != share.Target().String() { + return unauthorized(shareTargetInvalid, + "Fetch chain 0->1 (%s -> %q) unauthorized, expected hop to %q", + br, fetchChain[1], share.Target()) + } + isTransitive = share.IsTransitive() + if len(fetchChain) > 2 && !isTransitive { + return unauthorized(shareNotTransitive, "Share is not transitive") + } + case len(fetchChain) - 1: + // Last one is fine (as long as its path up to here has been proven, and it's + // not the first thing in the chain) + continue + default: + file, _, err := fetcher.Fetch(br) + if err != nil { + return unauthorized(viaChainFetchFailed, "Fetch chain %d of %s failed: %v", i, br, err) + } + defer file.Close() + lr := io.LimitReader(file, schema.MaxSchemaBlobSize) + slurpBytes, err := ioutil.ReadAll(lr) + if err != nil { + return unauthorized(viaChainReadFailed, + "Fetch chain %d of %s failed in slurp: %v", i, br, err) + } + saught := fetchChain[i+1].String() + if bytes.Index(slurpBytes, []byte(saught)) == -1 { + return unauthorized(viaChainInvalidLink, + "Fetch chain %d of %s failed; no reference to %s", i, br, saught) + } + } + } + + if assemble, _ := strconv.ParseBool(req.FormValue("assemble")); assemble { + if !isTransitive { + return unauthorized(assembleNonTransitive, "Cannot assemble non-transitive share") + } + dh := &DownloadHandler{ + Fetcher: fetcher, + // TODO(aa): It would be nice to specify a local cache here, as the UI handler does. + } + dh.ServeHTTP(conn, req, blobRef) + } else { + gethandler.ServeBlobRef(conn, req, blobRef, fetcher) + } + viaPathOkay = true + return nil +} + +func (h *shareHandler) serveHTTP(rw http.ResponseWriter, req *http.Request) error { + var err error + pathSuffix := httputil.PathSuffix(req) + if len(pathSuffix) == 0 { + // This happens during testing because we don't go through PrefixHandler + pathSuffix = strings.TrimLeft(req.URL.Path, "/") + } + pathParts := strings.SplitN(pathSuffix, "/", 2) + blobRef, ok := blob.Parse(pathParts[0]) + if !ok { + err = &shareError{code: invalidURL, response: badRequest, + message: fmt.Sprintf("Malformed share pathSuffix: %s", pathSuffix)} + } else { + err = handleGetViaSharing(rw, req, blobRef, h.fetcher) + } + if se, ok := err.(*shareError); ok { + switch se.response { + case badRequest: + httputil.BadRequestError(rw, err.Error()) + case unauthorizedRequest: + log.Print(err) + auth.SendUnauthorized(rw, req) + } + } + return err +} + +func (h *shareHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + h.serveHTTP(rw, req) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/share_test.go b/vendor/github.com/camlistore/camlistore/pkg/server/share_test.go new file mode 100644 index 00000000..1f31e602 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/share_test.go @@ -0,0 +1,116 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/test" +) + +func TestHandleGetViaSharing(t *testing.T) { + sto := &test.Fetcher{} + handler := &shareHandler{fetcher: sto} + var wr *httptest.ResponseRecorder + + putRaw := func(ref blob.Ref, data string) { + if _, err := blobserver.Receive(sto, ref, strings.NewReader(data)); err != nil { + t.Fatal(err) + } + } + + put := func(blob *schema.Blob) { + putRaw(blob.BlobRef(), blob.JSON()) + } + + get := func(path string) *shareError { + wr = httptest.NewRecorder() + req, _ := http.NewRequest("GET", "http://unused/"+path, nil) + err := handler.serveHTTP(wr, req) + if err != nil { + return err.(*shareError) + } + return nil + } + + testGet := func(path string, expectedError errorCode) { + err := get(path) + if expectedError != noError { + if err == nil || err.code != expectedError { + t.Errorf("Fetching %s, expected error %#v, but got %#v", path, expectedError, err) + } + } else { + if err != nil { + t.Errorf("Fetching %s, expected success but got %#v", path, err) + } + } + + if wr.HeaderMap.Get("Access-Control-Allow-Origin") != "*" { + t.Errorf("Fetching %s, share response did not contain expected CORS header", path) + } + } + + content := "monkey" + contentRef := blob.SHA1FromString(content) + + // For the purposes of following the via chain, the only thing that + // matters is that the content of each link contains the name of the + // next link. + link := contentRef.String() + linkRef := blob.SHA1FromString(link) + + share := schema.NewShareRef(schema.ShareHaveRef, false). + SetShareTarget(linkRef). + SetSigner(blob.SHA1FromString("irrelevant")). + SetRawStringField("camliSig", "alsounused") + + testGet(share.Blob().BlobRef().String(), shareFetchFailed) + + put(share.Blob()) + testGet(fmt.Sprintf("%s?via=%s", contentRef, share.Blob().BlobRef()), shareTargetInvalid) + + putRaw(linkRef, link) + testGet(linkRef.String(), shareReadFailed) + testGet(share.Blob().BlobRef().String(), noError) + testGet(fmt.Sprintf("%s?via=%s", linkRef, share.Blob().BlobRef()), noError) + testGet(fmt.Sprintf("%s?via=%s,%s", contentRef, share.Blob().BlobRef(), linkRef), shareNotTransitive) + + share.SetShareIsTransitive(true) + put(share.Blob()) + testGet(fmt.Sprintf("%s?via=%s,%s", linkRef, share.Blob().BlobRef(), linkRef), viaChainInvalidLink) + + putRaw(contentRef, content) + testGet(fmt.Sprintf("%s?via=%s,%s", contentRef, share.Blob().BlobRef(), linkRef), noError) + + share.SetShareExpiration(time.Now().Add(-time.Duration(10) * time.Minute)) + put(share.Blob()) + testGet(fmt.Sprintf("%s?via=%s,%s", contentRef, share.Blob().BlobRef(), linkRef), shareExpired) + + share.SetShareExpiration(time.Now().Add(time.Duration(10) * time.Minute)) + put(share.Blob()) + testGet(fmt.Sprintf("%s?via=%s,%s", contentRef, share.Blob().BlobRef(), linkRef), noError) + + // TODO(aa): assemble +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/status.go b/vendor/github.com/camlistore/camlistore/pkg/server/status.go new file mode 100644 index 00000000..978089c2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/status.go @@ -0,0 +1,297 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "encoding/json" + "fmt" + "html" + "io" + "log" + "net/http" + "os" + "reflect" + "regexp" + "runtime" + "strings" + "time" + + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/buildinfo" + "camlistore.org/pkg/env" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/index" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/search" + "camlistore.org/pkg/server/app" + "camlistore.org/pkg/types/camtypes" +) + +// StatusHandler publishes server status information. +type StatusHandler struct { + prefix string + handlerFinder blobserver.FindHandlerByTyper +} + +func init() { + blobserver.RegisterHandlerConstructor("status", newStatusFromConfig) +} + +var _ blobserver.HandlerIniter = (*StatusHandler)(nil) + +func newStatusFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { + if err := conf.Validate(); err != nil { + return nil, err + } + return &StatusHandler{ + prefix: ld.MyPrefix(), + handlerFinder: ld, + }, nil +} + +func (sh *StatusHandler) InitHandler(hl blobserver.FindHandlerByTyper) error { + _, h, err := hl.FindHandlerByType("search") + if err == blobserver.ErrHandlerTypeNotFound { + return nil + } + if err != nil { + return err + } + go func() { + var lastSend *status + for { + cur := sh.currentStatus() + if reflect.DeepEqual(cur, lastSend) { + // TODO: something better. get notified on interesting events. + time.Sleep(10 * time.Second) + continue + } + lastSend = cur + js, _ := json.MarshalIndent(cur, "", " ") + h.(*search.Handler).SendStatusUpdate(js) + } + }() + return nil +} + +func (sh *StatusHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + suffix := httputil.PathSuffix(req) + if suffix == "restart" { + sh.serveRestart(rw, req) + return + } + if !httputil.IsGet(req) { + http.Error(rw, "Illegal status method.", http.StatusMethodNotAllowed) + return + } + switch suffix { + case "status.json": + sh.serveStatusJSON(rw, req) + case "": + sh.serveStatusHTML(rw, req) + default: + http.Error(rw, "Illegal status path.", http.StatusNotFound) + } +} + +type status struct { + Version string `json:"version"` + Errors []camtypes.StatusError `json:"errors,omitempty"` + Sync map[string]syncStatus `json:"sync"` + Storage map[string]storageStatus `json:"storage"` + importerRoot string + rootPrefix string + + ImporterAccounts interface{} `json:"importerAccounts"` +} + +func (st *status) addError(msg, url string) { + st.Errors = append(st.Errors, camtypes.StatusError{ + Error: msg, + URL: url, + }) +} + +func (st *status) isHandler(pfx string) bool { + if pfx == st.importerRoot { + return true + } + if _, ok := st.Sync[pfx]; ok { + return true + } + if _, ok := st.Storage[pfx]; ok { + return true + } + return false +} + +type storageStatus struct { + Primary bool `json:"primary,omitempty"` + IsIndex bool `json:"isIndex,omitempty"` + Type string `json:"type"` + ApproxBlobs int `json:"approxBlobs,omitempty"` + ApproxBytes int `json:"approxBytes,omitempty"` + ImplStatus interface{} `json:"implStatus,omitempty"` +} + +func (sh *StatusHandler) currentStatus() *status { + res := &status{ + Version: buildinfo.Version(), + Storage: make(map[string]storageStatus), + Sync: make(map[string]syncStatus), + } + if v := os.Getenv("CAMLI_FAKE_STATUS_ERROR"); v != "" { + res.addError(v, "/status/#fakeerror") + } + _, hi, err := sh.handlerFinder.FindHandlerByType("root") + if err != nil { + res.addError(fmt.Sprintf("Error finding root handler: %v", err), "") + return res + } + rh := hi.(*RootHandler) + res.rootPrefix = rh.Prefix + + if pfx, h, err := sh.handlerFinder.FindHandlerByType("importer"); err == nil { + res.importerRoot = pfx + as := h.(interface { + AccountsStatus() (interface{}, []camtypes.StatusError) + }) + var errs []camtypes.StatusError + res.ImporterAccounts, errs = as.AccountsStatus() + res.Errors = append(res.Errors, errs...) + } + + types, handlers := sh.handlerFinder.AllHandlers() + + // Sync + for pfx, h := range handlers { + sh, ok := h.(*SyncHandler) + if !ok { + continue + } + res.Sync[pfx] = sh.currentStatus() + } + + // Storage + for pfx, typ := range types { + if !strings.HasPrefix(typ, "storage-") { + continue + } + h := handlers[pfx] + _, isIndex := h.(*index.Index) + res.Storage[pfx] = storageStatus{ + Type: strings.TrimPrefix(typ, "storage-"), + Primary: pfx == rh.BlobRoot, + IsIndex: isIndex, + } + } + + return res +} + +func (sh *StatusHandler) serveStatusJSON(rw http.ResponseWriter, req *http.Request) { + httputil.ReturnJSON(rw, sh.currentStatus()) +} + +var quotedPrefix = regexp.MustCompile(`[;"]/(\S+?/)[&"]`) + +func (sh *StatusHandler) serveStatusHTML(rw http.ResponseWriter, req *http.Request) { + st := sh.currentStatus() + f := func(p string, a ...interface{}) { + if len(a) == 0 { + io.WriteString(rw, p) + } else { + fmt.Fprintf(rw, p, a...) + } + } + f("camlistored status") + f("") + + f("

    camlistored status

    ") + + f("

    Versions

      ") + var envStr string + if env.OnGCE() { + envStr = " (on GCE)" + } + f("
    • Camlistore: %s%s
    • ", html.EscapeString(buildinfo.Version()), envStr) + f("
    • Go: %s/%s %s, cgo=%v
    • ", runtime.GOOS, runtime.GOARCH, runtime.Version(), cgoEnabled) + f("
    • djpeg: %s", html.EscapeString(buildinfo.DjpegStatus())) + f("
    ") + + f("

    Logs

    ") + + f("

    Admin

    ") + f("
    ") + + f("

    Handlers

    ") + f("

    As JSON: status.json; and the discovery JSON.

    ", st.rootPrefix) + f("

    Not yet pretty HTML UI:

    ") + js, err := json.MarshalIndent(st, "", " ") + if err != nil { + log.Printf("JSON marshal error: %v", err) + } + jsh := html.EscapeString(string(js)) + jsh = quotedPrefix.ReplaceAllStringFunc(jsh, func(in string) string { + pfx := in[1 : len(in)-1] + if st.isHandler(pfx) { + return fmt.Sprintf("%s%s%s", in[:1], pfx, pfx, in[len(in)-1:]) + } + return in + }) + f("
    %s
    ", jsh) +} + +func (sh *StatusHandler) serveRestart(rw http.ResponseWriter, req *http.Request) { + if req.Method != "POST" { + http.Error(rw, "POST to restart", http.StatusMethodNotAllowed) + return + } + + _, handlers := sh.handlerFinder.AllHandlers() + for _, h := range handlers { + ah, ok := h.(*app.Handler) + if !ok { + continue + } + log.Printf("Sending SIGINT to %s", ah.ProgramName()) + err := ah.Quit() + if err != nil { + msg := fmt.Sprintf("Not restarting: couldn't interrupt app %s: %v", ah.ProgramName(), err) + log.Printf(msg) + http.Error(rw, msg, http.StatusInternalServerError) + return + } + } + + log.Println("Restarting camlistored") + rw.Header().Set("Connection", "close") + http.Redirect(rw, req, sh.prefix, http.StatusFound) + if f, ok := rw.(http.Flusher); ok { + f.Flush() + } + osutil.RestartProcess() +} + +var cgoEnabled bool diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/sync.go b/vendor/github.com/camlistore/camlistore/pkg/server/sync.go new file mode 100644 index 00000000..4241ff62 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/sync.go @@ -0,0 +1,1013 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "bytes" + "errors" + "fmt" + "html" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + + "camlistore.org/pkg/auth" + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/constants" + "camlistore.org/pkg/context" + "camlistore.org/pkg/index" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/syncutil" + "camlistore.org/pkg/types/camtypes" + "camlistore.org/third_party/code.google.com/p/xsrftoken" +) + +const ( + maxRecentErrors = 20 + queueSyncInterval = 5 * time.Second +) + +type blobReceiverEnumerator interface { + blobserver.BlobReceiver + blobserver.BlobEnumerator +} + +// The SyncHandler handles async replication in one direction between +// a pair storage targets, a source and target. +// +// SyncHandler is a BlobReceiver but doesn't actually store incoming +// blobs; instead, it records blobs it has received and queues them +// for async replication soon, or whenever it can. +type SyncHandler struct { + // TODO: rate control tunables + fromName, toName string + from blobserver.Storage + to blobReceiverEnumerator + queue sorted.KeyValue + toIndex bool // whether this sync is from a blob storage to an index + idle bool // if true, the handler does nothing other than providing the discovery. + copierPoolSize int + + // wakec wakes up the blob syncer loop when a blob is received. + wakec chan bool + + mu sync.Mutex // protects following + status string + copying map[blob.Ref]*copyStatus // to start time + needCopy map[blob.Ref]uint32 // blobs needing to be copied. some might be in lastFail too. + lastFail map[blob.Ref]failDetail // subset of needCopy that previously failed, and why + bytesRemain int64 // sum of needCopy values + recentErrors []blob.Ref // up to maxRecentErrors, recent first. valid if still in lastFail. + recentCopyTime time.Time + totalCopies int64 + totalCopyBytes int64 + totalErrors int64 + vshards []string // validation shards. if 0, validation not running + vshardDone int // shards validated + vshardErrs []string + vmissing int64 // missing blobs found during validat + vdestCount int // number of blobs seen on dest during validate + vdestBytes int64 // number of blob bytes seen on dest during validate + vsrcCount int // number of blobs seen on src during validate + vsrcBytes int64 // number of blob bytes seen on src during validate +} + +var ( + _ blobserver.Storage = (*SyncHandler)(nil) + _ blobserver.HandlerIniter = (*SyncHandler)(nil) +) + +func (sh *SyncHandler) String() string { + return fmt.Sprintf("[SyncHandler %v -> %v]", sh.fromName, sh.toName) +} + +func (sh *SyncHandler) logf(format string, args ...interface{}) { + log.Printf(sh.String()+" "+format, args...) +} + +func init() { + blobserver.RegisterHandlerConstructor("sync", newSyncFromConfig) +} + +// TODO: this is is temporary. should delete, or decide when it's on by default (probably always). +// Then need genconfig option to disable it. +var validateOnStartDefault, _ = strconv.ParseBool(os.Getenv("CAMLI_SYNC_VALIDATE")) + +func newSyncFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) { + var ( + from = conf.RequiredString("from") + to = conf.RequiredString("to") + fullSync = conf.OptionalBool("fullSyncOnStart", false) + blockFullSync = conf.OptionalBool("blockingFullSyncOnStart", false) + idle = conf.OptionalBool("idle", false) + queueConf = conf.OptionalObject("queue") + copierPoolSize = conf.OptionalInt("copierPoolSize", 5) + validate = conf.OptionalBool("validateOnStart", validateOnStartDefault) + ) + if err := conf.Validate(); err != nil { + return nil, err + } + if idle { + return newIdleSyncHandler(from, to), nil + } + if len(queueConf) == 0 { + return nil, errors.New(`Missing required "queue" object`) + } + q, err := sorted.NewKeyValue(queueConf) + if err != nil { + return nil, err + } + + isToIndex := false + fromBs, err := ld.GetStorage(from) + if err != nil { + return nil, err + } + toBs, err := ld.GetStorage(to) + if err != nil { + return nil, err + } + if _, ok := fromBs.(*index.Index); !ok { + if _, ok := toBs.(*index.Index); ok { + isToIndex = true + } + } + + sh := newSyncHandler(from, to, fromBs, toBs, q) + sh.toIndex = isToIndex + sh.copierPoolSize = copierPoolSize + if err := sh.readQueueToMemory(); err != nil { + return nil, fmt.Errorf("Error reading sync queue to memory: %v", err) + } + + if fullSync || blockFullSync { + sh.logf("Doing full sync") + didFullSync := make(chan bool, 1) + go func() { + for { + n := sh.runSync("queue", sh.enumeratePendingBlobs) + if n > 0 { + sh.logf("Queue sync copied %d blobs", n) + continue + } + break + } + n := sh.runSync("full", blobserverEnumerator(context.TODO(), fromBs)) + sh.logf("Full sync copied %d blobs", n) + didFullSync <- true + sh.syncLoop() + }() + if blockFullSync { + sh.logf("Blocking startup, waiting for full sync from %q to %q", from, to) + <-didFullSync + sh.logf("Full sync complete.") + } + } else { + go sh.syncLoop() + } + + if validate { + go sh.startFullValidation() + } + + blobserver.GetHub(fromBs).AddReceiveHook(sh.enqueue) + return sh, nil +} + +func (sh *SyncHandler) InitHandler(hl blobserver.FindHandlerByTyper) error { + _, h, err := hl.FindHandlerByType("root") + if err == blobserver.ErrHandlerTypeNotFound { + // It's optional. We register ourselves if it's there. + return nil + } + if err != nil { + return err + } + h.(*RootHandler).registerSyncHandler(sh) + return nil +} + +func newSyncHandler(fromName, toName string, + from blobserver.Storage, to blobReceiverEnumerator, + queue sorted.KeyValue) *SyncHandler { + return &SyncHandler{ + copierPoolSize: 2, + from: from, + to: to, + fromName: fromName, + toName: toName, + queue: queue, + wakec: make(chan bool), + status: "not started", + needCopy: make(map[blob.Ref]uint32), + lastFail: make(map[blob.Ref]failDetail), + copying: make(map[blob.Ref]*copyStatus), + } +} + +func newIdleSyncHandler(fromName, toName string) *SyncHandler { + return &SyncHandler{ + fromName: fromName, + toName: toName, + idle: true, + status: "disabled", + } +} + +func (sh *SyncHandler) discovery() camtypes.SyncHandlerDiscovery { + return camtypes.SyncHandlerDiscovery{ + From: sh.fromName, + To: sh.toName, + ToIndex: sh.toIndex, + } +} + +// syncStatus is a snapshot of the current status, for display by the +// status handler (status.go) in both JSON and HTML forms. +type syncStatus struct { + sh *SyncHandler + + From string `json:"from"` + FromDesc string `json:"fromDesc"` + To string `json:"to"` + ToDesc string `json:"toDesc"` + DestIsIndex bool `json:"destIsIndex,omitempty"` + BlobsToCopy int `json:"blobsToCopy"` + BytesToCopy int64 `json:"bytesToCopy"` + LastCopySecAgo int `json:"lastCopySecondsAgo,omitempty"` +} + +func (sh *SyncHandler) currentStatus() syncStatus { + sh.mu.Lock() + defer sh.mu.Unlock() + ago := 0 + if !sh.recentCopyTime.IsZero() { + ago = int(time.Now().Sub(sh.recentCopyTime).Seconds()) + } + return syncStatus{ + sh: sh, + From: sh.fromName, + FromDesc: storageDesc(sh.from), + To: sh.toName, + ToDesc: storageDesc(sh.to), + DestIsIndex: sh.toIndex, + BlobsToCopy: len(sh.needCopy), + BytesToCopy: sh.bytesRemain, + LastCopySecAgo: ago, + } +} + +// readQueueToMemory slurps in the pending queue from disk (or +// wherever) to memory. Even with millions of blobs, it's not much +// memory. The point of the persistent queue is to survive restarts if +// the "fullSyncOnStart" option is off. With "fullSyncOnStart" set to +// true, this is a little pointless (we'd figure out what's missing +// eventually), but this might save us a few minutes (let us start +// syncing missing blobs a few minutes earlier) since we won't have to +// wait to figure out what the destination is missing. +func (sh *SyncHandler) readQueueToMemory() error { + errc := make(chan error, 1) + blobs := make(chan blob.SizedRef, 16) + intr := make(chan struct{}) + defer close(intr) + go func() { + errc <- sh.enumerateQueuedBlobs(blobs, intr) + }() + n := 0 + for sb := range blobs { + sh.addBlobToCopy(sb) + n++ + } + sh.logf("Added %d pending blobs from sync queue to pending list", n) + return <-errc +} + +func (sh *SyncHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + if req.Method == "POST" { + if req.FormValue("mode") == "validate" { + token := req.FormValue("token") + if xsrftoken.Valid(token, auth.ProcessRandom(), "user", "runFullValidate") { + sh.startFullValidation() + http.Redirect(rw, req, "./", http.StatusFound) + return + } + } + http.Error(rw, "Bad POST request", http.StatusBadRequest) + return + } + + // TODO: remove this lock and instead just call currentStatus, + // and transition to using that here. + sh.mu.Lock() + defer sh.mu.Unlock() + f := func(p string, a ...interface{}) { + fmt.Fprintf(rw, p, a...) + } + now := time.Now() + f("

    Sync Status (for %s to %s)

    ", sh.fromName, sh.toName) + f("

    Current status: %s

    ", html.EscapeString(sh.status)) + if sh.idle { + return + } + + f("

    Stats:

      ") + f("
    • Source: %s
    • ", html.EscapeString(storageDesc(sh.from))) + f("
    • Target: %s
    • ", html.EscapeString(storageDesc(sh.to))) + f("
    • Blobs synced: %d
    • ", sh.totalCopies) + f("
    • Bytes synced: %d
    • ", sh.totalCopyBytes) + f("
    • Blobs yet to copy: %d
    • ", len(sh.needCopy)) + f("
    • Bytes yet to copy: %d
    • ", sh.bytesRemain) + if !sh.recentCopyTime.IsZero() { + f("
    • Most recent copy: %s (%v ago)
    • ", sh.recentCopyTime.Format(time.RFC3339), now.Sub(sh.recentCopyTime)) + } + clarification := "" + if len(sh.needCopy) == 0 && sh.totalErrors > 0 { + clarification = "(all since resolved)" + } + f("
    • Previous copy errors: %d %s
    • ", sh.totalErrors, clarification) + f("
    ") + + f("

    Validation

    ") + if len(sh.vshards) == 0 { + f("Validation disabled") + token := xsrftoken.Generate(auth.ProcessRandom(), "user", "runFullValidate") + f("
    ", token) + } else { + f("

    Background scan of source and destination to ensure that the destination has everything the source does, or is at least enqueued to sync.

    ") + f("
      ") + f("
    • Shards complete: %d/%d (%.1f%%)
    • ", + sh.vshardDone, + len(sh.vshards), + 100*float64(sh.vshardDone)/float64(len(sh.vshards))) + f("
    • Source blobs seen: %d
    • ", sh.vsrcCount) + f("
    • Source bytes seen: %d
    • ", sh.vsrcBytes) + f("
    • Dest blobs seen: %d
    • ", sh.vdestCount) + f("
    • Dest bytes seen: %d
    • ", sh.vdestBytes) + f("
    • Blobs found missing & enqueued: %d
    • ", sh.vmissing) + if len(sh.vshardErrs) > 0 { + f("
    • Validation errors: %s
    • ", sh.vshardErrs) + } + f("
    ") + } + + if len(sh.copying) > 0 { + f("

    Currently Copying

      ") + copying := make([]blob.Ref, 0, len(sh.copying)) + for br := range sh.copying { + copying = append(copying, br) + } + sort.Sort(blob.ByRef(copying)) + for _, br := range copying { + f("
    • %s
    • \n", sh.copying[br]) + } + f("
    ") + } + + recentErrors := make([]blob.Ref, 0, len(sh.recentErrors)) + for _, br := range sh.recentErrors { + if _, ok := sh.needCopy[br]; ok { + // Only show it in the web UI if it's still a problem. Blobs that + // have since succeeded just confused people. + recentErrors = append(recentErrors, br) + } + } + if len(recentErrors) > 0 { + f("

    Recent Errors

    Blobs that haven't successfully copied over yet, and their last errors:

      ") + for _, br := range recentErrors { + fail := sh.lastFail[br] + f("
    • %s: %s: %s
    • \n", + br, + fail.when.Format(time.RFC3339), + html.EscapeString(fail.err.Error())) + } + f("
    ") + } +} + +func (sh *SyncHandler) setStatusf(s string, args ...interface{}) { + s = time.Now().UTC().Format(time.RFC3339) + ": " + fmt.Sprintf(s, args...) + sh.mu.Lock() + defer sh.mu.Unlock() + sh.status = s +} + +type copyResult struct { + sb blob.SizedRef + err error +} + +func blobserverEnumerator(ctx *context.Context, src blobserver.BlobEnumerator) func(chan<- blob.SizedRef, <-chan struct{}) error { + return func(dst chan<- blob.SizedRef, intr <-chan struct{}) error { + return blobserver.EnumerateAll(ctx, src, func(sb blob.SizedRef) error { + select { + case dst <- sb: + case <-intr: + return errors.New("interrupted") + } + return nil + }) + } +} + +// enumeratePendingBlobs yields blobs from the in-memory pending list (needCopy). +// This differs from enumerateQueuedBlobs, which pulls in the on-disk sorted.KeyValue store. +func (sh *SyncHandler) enumeratePendingBlobs(dst chan<- blob.SizedRef, intr <-chan struct{}) error { + defer close(dst) + sh.mu.Lock() + var toSend []blob.SizedRef + { + n := len(sh.needCopy) + const maxBatch = 1000 + if n > maxBatch { + n = maxBatch + } + toSend = make([]blob.SizedRef, 0, n) + for br, size := range sh.needCopy { + toSend = append(toSend, blob.SizedRef{br, size}) + if len(toSend) == n { + break + } + } + } + sh.mu.Unlock() + for _, sb := range toSend { + select { + case dst <- sb: + case <-intr: + return nil + } + } + return nil +} + +// enumerateQueuedBlobs yields blobs from the on-disk sorted.KeyValue store. +// This differs from enumeratePendingBlobs, which sends from the in-memory pending list. +func (sh *SyncHandler) enumerateQueuedBlobs(dst chan<- blob.SizedRef, intr <-chan struct{}) error { + defer close(dst) + it := sh.queue.Find("", "") + for it.Next() { + br, ok := blob.Parse(it.Key()) + size, err := strconv.ParseUint(it.Value(), 10, 32) + if !ok || err != nil { + sh.logf("ERROR: bogus sync queue entry: %q => %q", it.Key(), it.Value()) + continue + } + select { + case dst <- blob.SizedRef{br, uint32(size)}: + case <-intr: + return it.Close() + } + } + return it.Close() +} + +func (sh *SyncHandler) runSync(srcName string, enumSrc func(chan<- blob.SizedRef, <-chan struct{}) error) int { + enumch := make(chan blob.SizedRef, 8) + errch := make(chan error, 1) + intr := make(chan struct{}) + defer close(intr) + go func() { errch <- enumSrc(enumch, intr) }() + + nCopied := 0 + toCopy := 0 + + workch := make(chan blob.SizedRef, 1000) + resch := make(chan copyResult, 8) +FeedWork: + for sb := range enumch { + if toCopy < sh.copierPoolSize { + go sh.copyWorker(resch, workch) + } + select { + case workch <- sb: + toCopy++ + default: + // Buffer full. Enough for this batch. Will get it later. + break FeedWork + } + } + close(workch) + for i := 0; i < toCopy; i++ { + sh.setStatusf("Copying blobs") + res := <-resch + if res.err == nil { + nCopied++ + } + } + + if err := <-errch; err != nil { + sh.logf("error enumerating from source: %v", err) + } + return nCopied +} + +func (sh *SyncHandler) syncLoop() { + for { + t0 := time.Now() + + for sh.runSync(sh.fromName, sh.enumeratePendingBlobs) > 0 { + // Loop, before sleeping. + } + sh.setStatusf("Sleeping briefly before next long poll.") + + d := queueSyncInterval - time.Since(t0) + select { + case <-time.After(d): + case <-sh.wakec: + } + } +} + +func (sh *SyncHandler) copyWorker(res chan<- copyResult, work <-chan blob.SizedRef) { + for sb := range work { + res <- copyResult{sb, sh.copyBlob(sb)} + } +} + +func (sh *SyncHandler) copyBlob(sb blob.SizedRef) (err error) { + cs := sh.newCopyStatus(sb) + defer func() { cs.setError(err) }() + br := sb.Ref + + sh.mu.Lock() + sh.copying[br] = cs + sh.mu.Unlock() + + if sb.Size > constants.MaxBlobSize { + return fmt.Errorf("blob size %d too large; max blob size is %d", sb.Size, constants.MaxBlobSize) + } + + cs.setStatus(statusFetching) + rc, fromSize, err := sh.from.Fetch(br) + if err != nil { + return fmt.Errorf("source fetch: %v", err) + } + if fromSize != sb.Size { + rc.Close() + return fmt.Errorf("source fetch size mismatch: get=%d, enumerate=%d", fromSize, sb.Size) + } + + buf := make([]byte, fromSize) + hash := br.Hash() + cs.setStatus(statusReading) + n, err := io.ReadFull(io.TeeReader(rc, + io.MultiWriter( + incrWriter{cs, &cs.nread}, + hash, + )), buf) + rc.Close() + if err != nil { + return fmt.Errorf("Read error after %d/%d bytes: %v", n, fromSize, err) + } + if !br.HashMatches(hash) { + return fmt.Errorf("Read data has unexpected digest %x", hash.Sum(nil)) + } + + cs.setStatus(statusWriting) + newsb, err := sh.to.ReceiveBlob(br, io.TeeReader(bytes.NewReader(buf), incrWriter{cs, &cs.nwrite})) + if err != nil { + return fmt.Errorf("dest write: %v", err) + } + if newsb.Size != sb.Size { + return fmt.Errorf("write size mismatch: source_read=%d but dest_write=%d", sb.Size, newsb.Size) + } + return nil +} + +func (sh *SyncHandler) ReceiveBlob(br blob.Ref, r io.Reader) (sb blob.SizedRef, err error) { + n, err := io.Copy(ioutil.Discard, r) + if err != nil { + return + } + sb = blob.SizedRef{br, uint32(n)} + return sb, sh.enqueue(sb) +} + +// addBlobToCopy adds a blob to copy to memory (not to disk: that's enqueue). +// It returns true if it was added, or false if it was a duplicate. +func (sh *SyncHandler) addBlobToCopy(sb blob.SizedRef) bool { + sh.mu.Lock() + defer sh.mu.Unlock() + if _, dup := sh.needCopy[sb.Ref]; dup { + return false + } + + sh.needCopy[sb.Ref] = sb.Size + sh.bytesRemain += int64(sb.Size) + + // Non-blocking send to wake up looping goroutine if it's + // sleeping... + select { + case sh.wakec <- true: + default: + } + return true +} + +func (sh *SyncHandler) enqueue(sb blob.SizedRef) error { + if !sh.addBlobToCopy(sb) { + // Dup + return nil + } + // TODO: include current time in encoded value, to attempt to + // do in-order delivery to remote side later? Possible + // friendly optimization later. Might help peer's indexer have + // less missing deps. + if err := sh.queue.Set(sb.Ref.String(), fmt.Sprint(sb.Size)); err != nil { + return err + } + return nil +} + +func (sh *SyncHandler) startFullValidation() { + sh.mu.Lock() + if len(sh.vshards) != 0 { + sh.mu.Unlock() + return + } + sh.mu.Unlock() + + sh.logf("Running full validation; determining validation shards...") + shards := sh.shardPrefixes() + + sh.mu.Lock() + if len(sh.vshards) != 0 { + sh.mu.Unlock() + return + } + sh.vshards = shards + sh.mu.Unlock() + + go sh.runFullValidation() +} + +func (sh *SyncHandler) runFullValidation() { + var wg sync.WaitGroup + + sh.mu.Lock() + shards := sh.vshards + wg.Add(len(shards)) + sh.mu.Unlock() + + sh.logf("full validation beginning with %d shards", len(shards)) + + const maxShardWorkers = 30 // arbitrary + gate := syncutil.NewGate(maxShardWorkers) + + for _, pfx := range shards { + pfx := pfx + gate.Start() + go func() { + wg.Done() + defer gate.Done() + sh.validateShardPrefix(pfx) + }() + } + wg.Wait() + sh.logf("Validation complete") +} + +func (sh *SyncHandler) validateShardPrefix(pfx string) (err error) { + defer func() { + sh.mu.Lock() + if err != nil { + errs := fmt.Sprintf("Failed to validate prefix %s: %v", pfx, err) + sh.logf("%s", errs) + sh.vshardErrs = append(sh.vshardErrs, errs) + } else { + sh.vshardDone++ + } + sh.mu.Unlock() + }() + ctx := context.New() + defer ctx.Cancel() + src, serrc := sh.startValidatePrefix(ctx, pfx, false) + dst, derrc := sh.startValidatePrefix(ctx, pfx, true) + srcErr := &chanError{ + C: serrc, + Wrap: func(err error) error { + return fmt.Errorf("Error enumerating source %s for validating shard %s: %v", sh.fromName, pfx, err) + }, + } + dstErr := &chanError{ + C: derrc, + Wrap: func(err error) error { + return fmt.Errorf("Error enumerating target %s for validating shard %s: %v", sh.toName, pfx, err) + }, + } + + missingc := make(chan blob.SizedRef, 8) + go blobserver.ListMissingDestinationBlobs(missingc, func(blob.Ref) {}, src, dst) + + var missing []blob.SizedRef + for sb := range missingc { + missing = append(missing, sb) + } + + if err := srcErr.Get(); err != nil { + return err + } + if err := dstErr.Get(); err != nil { + return err + } + + for _, sb := range missing { + if enqErr := sh.enqueue(sb); enqErr != nil { + if err == nil { + err = enqErr + } + } else { + sh.mu.Lock() + sh.vmissing += 1 + sh.mu.Unlock() + } + } + return err +} + +var errNotPrefix = errors.New("sentinel error: hit blob into the next shard") + +// doDest is false for source and true for dest. +func (sh *SyncHandler) startValidatePrefix(ctx *context.Context, pfx string, doDest bool) (<-chan blob.SizedRef, <-chan error) { + var e blobserver.BlobEnumerator + if doDest { + e = sh.to + } else { + e = sh.from + } + c := make(chan blob.SizedRef, 64) + errc := make(chan error, 1) + go func() { + defer close(c) + var last string // last blobref seen; to double check storage's enumeration works correctly. + err := blobserver.EnumerateAllFrom(ctx, e, pfx, func(sb blob.SizedRef) error { + // Just double-check that the storage target is returning sorted results correctly. + brStr := sb.Ref.String() + if brStr < pfx { + log.Fatalf("Storage target %T enumerate not behaving: %q < requested prefix %q", e, brStr, pfx) + } + if last != "" && last >= brStr { + log.Fatalf("Storage target %T enumerate not behaving: previous %q >= current %q", e, last, brStr) + } + last = brStr + + // TODO: could add a more efficient method on blob.Ref to do this, + // that doesn't involve call String(). + if !strings.HasPrefix(brStr, pfx) { + return errNotPrefix + } + select { + case c <- sb: + sh.mu.Lock() + if doDest { + sh.vdestCount++ + sh.vdestBytes += int64(sb.Size) + } else { + sh.vsrcCount++ + sh.vsrcBytes += int64(sb.Size) + } + sh.mu.Unlock() + return nil + case <-ctx.Done(): + return context.ErrCanceled + } + }) + if err == errNotPrefix { + err = nil + } + if err != nil { + // Send a zero value to shut down ListMissingDestinationBlobs. + c <- blob.SizedRef{} + } + errc <- err + }() + return c, errc +} + +func (sh *SyncHandler) shardPrefixes() []string { + var pfx []string + // TODO(bradfitz): do limit=1 enumerates against sh.from and sh.to with varying + // "after" values to determine all the blobref types on both sides. + // For now, be lazy and assume only sha1: + for i := 0; i < 256; i++ { + pfx = append(pfx, fmt.Sprintf("sha1-%02x", i)) + } + return pfx +} + +func (sh *SyncHandler) newCopyStatus(sb blob.SizedRef) *copyStatus { + now := time.Now() + return ©Status{ + sh: sh, + sb: sb, + state: statusStarting, + start: now, + t: now, + } +} + +// copyStatus is an in-progress copy. +type copyStatus struct { + sh *SyncHandler + sb blob.SizedRef + start time.Time + + mu sync.Mutex + state string // one of statusFoo, below + t time.Time // last status update time + nread uint32 + nwrite uint32 +} + +const ( + statusStarting = "starting" + statusFetching = "fetching source" + statusReading = "reading" + statusWriting = "writing" +) + +func (cs *copyStatus) setStatus(s string) { + now := time.Now() + cs.mu.Lock() + defer cs.mu.Unlock() + cs.state = s + cs.t = now +} + +func (cs *copyStatus) setError(err error) { + now := time.Now() + sh := cs.sh + br := cs.sb.Ref + if err == nil { + // This is somewhat slow, so do it before we acquire the lock. + // The queue is thread-safe. + if derr := sh.queue.Delete(br.String()); derr != nil { + sh.logf("queue delete of %v error: %v", cs.sb.Ref, derr) + } + } + + sh.mu.Lock() + defer sh.mu.Unlock() + if _, needCopy := sh.needCopy[br]; !needCopy { + sh.logf("IGNORING DUPLICATE UPLOAD of %v = %v", br, err) + return + } + delete(sh.copying, br) + if err == nil { + delete(sh.needCopy, br) + delete(sh.lastFail, br) + sh.recentCopyTime = now + sh.totalCopies++ + sh.totalCopyBytes += int64(cs.sb.Size) + sh.bytesRemain -= int64(cs.sb.Size) + return + } + + sh.totalErrors++ + sh.logf("error copying %v: %v", br, err) + sh.lastFail[br] = failDetail{ + when: now, + err: err, + } + + // Kinda lame. TODO: use a ring buffer or container/list instead. + if len(sh.recentErrors) == maxRecentErrors { + copy(sh.recentErrors[1:], sh.recentErrors) + sh.recentErrors = sh.recentErrors[:maxRecentErrors-1] + } + sh.recentErrors = append(sh.recentErrors, br) +} + +func (cs *copyStatus) String() string { + var buf bytes.Buffer + now := time.Now() + buf.WriteString(cs.sb.Ref.String()) + buf.WriteString(": ") + + cs.mu.Lock() + defer cs.mu.Unlock() + sinceStart := now.Sub(cs.start) + sinceLast := now.Sub(cs.t) + + switch cs.state { + case statusReading: + buf.WriteString(cs.state) + fmt.Fprintf(&buf, " (%d/%dB)", cs.nread, cs.sb.Size) + case statusWriting: + if cs.nwrite == cs.sb.Size { + buf.WriteString("wrote all, waiting ack") + } else { + buf.WriteString(cs.state) + fmt.Fprintf(&buf, " (%d/%dB)", cs.nwrite, cs.sb.Size) + } + default: + buf.WriteString(cs.state) + + } + if sinceLast > 5*time.Second { + fmt.Fprintf(&buf, ", last change %v ago (total elapsed %v)", sinceLast, sinceStart) + } + return buf.String() +} + +type failDetail struct { + when time.Time + err error +} + +// incrWriter is an io.Writer that locks mu and increments *n. +type incrWriter struct { + cs *copyStatus + n *uint32 +} + +func (w incrWriter) Write(p []byte) (n int, err error) { + w.cs.mu.Lock() + *w.n += uint32(len(p)) + w.cs.t = time.Now() + w.cs.mu.Unlock() + return len(p), nil +} + +func storageDesc(v interface{}) string { + if s, ok := v.(fmt.Stringer); ok { + return s.String() + } + return fmt.Sprintf("%T", v) +} + +// TODO(bradfitz): implement these? what do they mean? possibilities: +// a) proxy to sh.from +// b) proxy to sh.to +// c) merge intersection of sh.from, sh.to, and sh.queue: that is, a blob this pair +// currently or eventually will have. The only missing blob would be one that +// sh.from has, sh.to doesn't have, and isn't in the queue to be replicated. +// +// For now, don't implement them. Wait until we need them. + +func (sh *SyncHandler) Fetch(blob.Ref) (file io.ReadCloser, size uint32, err error) { + panic("Unimplemeted blobserver.Fetch called") +} + +func (sh *SyncHandler) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error { + sh.logf("Unexpected StatBlobs call") + return nil +} + +func (sh *SyncHandler) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) error { + defer close(dest) + sh.logf("Unexpected EnumerateBlobs call") + return nil +} + +func (sh *SyncHandler) RemoveBlobs(blobs []blob.Ref) error { + panic("Unimplemeted RemoveBlobs") +} + +// chanError is a Future around an incoming error channel of one item. +// It can also wrap its error in something more descriptive. +type chanError struct { + C <-chan error + Wrap func(error) error // optional + err error + received bool +} + +func (ce *chanError) Set(err error) { + if ce.Wrap != nil && err != nil { + err = ce.Wrap(err) + } + ce.err = err + ce.received = true +} + +func (ce *chanError) Get() error { + if ce.received { + return ce.err + } + ce.Set(<-ce.C) + return ce.err +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/thumbcache.go b/vendor/github.com/camlistore/camlistore/pkg/server/thumbcache.go new file mode 100644 index 00000000..925618f7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/thumbcache.go @@ -0,0 +1,84 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "errors" + "fmt" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/lru" + "camlistore.org/pkg/sorted" +) + +const memLRUSize = 1024 // arbitrary + +var errCacheMiss = errors.New("not in cache") + +// ThumbMeta is a mapping from an image's scaling parameters (encoding +// as an opaque "key" string) and the blobref of the thumbnail +// (currently its file schema blob). +// ThumbMeta is safe for concurrent use by multiple goroutines. +// +// The key will be some string containing the original full-sized image's +// blobref, its target dimensions, and any possible transformations on +// it (e.g. cropping it to square). +type ThumbMeta struct { + mem *lru.Cache // key -> blob.Ref + kv sorted.KeyValue // optional +} + +// NewThumbMeta returns a new in-memory ThumbMeta, backed with the +// optional kv. +// If kv is nil, key/value pairs are stored in memory only. +func NewThumbMeta(kv sorted.KeyValue) *ThumbMeta { + return &ThumbMeta{ + mem: lru.New(memLRUSize), + kv: kv, + } +} + +func (m *ThumbMeta) Get(key string) (blob.Ref, error) { + var br blob.Ref + if v, ok := m.mem.Get(key); ok { + return v.(blob.Ref), nil + } + if m.kv != nil { + v, err := m.kv.Get(key) + if err == sorted.ErrNotFound { + return br, errCacheMiss + } + if err != nil { + return br, err + } + br, ok := blob.Parse(v) + if !ok { + return br, fmt.Errorf("Invalid blobref %q found for key %q in thumbnail mea", v, key) + } + m.mem.Add(key, br) + return br, nil + } + return br, errCacheMiss +} + +func (m *ThumbMeta) Put(key string, br blob.Ref) error { + m.mem.Add(key, br) + if m.kv != nil { + return m.kv.Set(key, br.String()) + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/ui.go b/vendor/github.com/camlistore/camlistore/pkg/server/ui.go new file mode 100644 index 00000000..69b35797 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/ui.go @@ -0,0 +1,658 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "errors" + "fmt" + "log" + "net/http" + "os" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/constants" + "camlistore.org/pkg/fileembed" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/misc/closure" + "camlistore.org/pkg/search" + "camlistore.org/pkg/server/app" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/syncutil" + "camlistore.org/pkg/types/camtypes" + uistatic "camlistore.org/server/camlistored/ui" + closurestatic "camlistore.org/server/camlistored/ui/closure" + "camlistore.org/third_party/code.google.com/p/rsc/qr" + fontawesomestatic "camlistore.org/third_party/fontawesome" + glitchstatic "camlistore.org/third_party/glitch" + lessstatic "camlistore.org/third_party/less" + reactstatic "camlistore.org/third_party/react" +) + +var ( + staticFilePattern = regexp.MustCompile(`^([a-zA-Z0-9\-\_\.]+\.(html|js|css|png|jpg|gif|svg))$`) + identOrDotPattern = regexp.MustCompile(`^[a-zA-Z\_]+(\.[a-zA-Z\_]+)*$`) + + // Download URL suffix: + // $1: blobref (checked in download handler) + // $2: optional "/filename" to be sent as recommended download name, + // if sane looking + downloadPattern = regexp.MustCompile(`^download/([^/]+)(/.*)?$`) + + thumbnailPattern = regexp.MustCompile(`^thumbnail/([^/]+)(/.*)?$`) + treePattern = regexp.MustCompile(`^tree/([^/]+)(/.*)?$`) + closurePattern = regexp.MustCompile(`^closure/(([^/]+)(/.*)?)$`) + lessPattern = regexp.MustCompile(`^less/(.+)$`) + reactPattern = regexp.MustCompile(`^react/(.+)$`) + fontawesomePattern = regexp.MustCompile(`^fontawesome/(.+)$`) + glitchPattern = regexp.MustCompile(`^glitch/(.+)$`) + + disableThumbCache, _ = strconv.ParseBool(os.Getenv("CAMLI_DISABLE_THUMB_CACHE")) +) + +// UIHandler handles serving the UI and discovery JSON. +type UIHandler struct { + publishRoots map[string]*publishRoot + + prefix string // of the UI handler itself + root *RootHandler + search *search.Handler + + // Cache optionally specifies a cache blob server, used for + // caching image thumbnails and other emphemeral data. + Cache blobserver.Storage // or nil + + // Limit peak RAM used by concurrent image thumbnail calls. + resizeSem *syncutil.Sem + thumbMeta *ThumbMeta // optional thumbnail key->blob.Ref cache + + // sourceRoot optionally specifies the path to root of Camlistore's + // source. If empty, the UI files must be compiled in to the + // binary (with go run make.go). This comes from the "sourceRoot" + // ui handler config option. + sourceRoot string + + uiDir string // if sourceRoot != "", this is sourceRoot+"/server/camlistored/ui" + + closureHandler http.Handler + fileLessHandler http.Handler + fileReactHandler http.Handler + fileFontawesomeHandler http.Handler + fileGlitchHandler http.Handler +} + +func init() { + blobserver.RegisterHandlerConstructor("ui", uiFromConfig) +} + +// newKVOrNil wraps sorted.NewKeyValue and adds the ability +// to pass a nil conf to get a (nil, nil) response. +func newKVOrNil(conf jsonconfig.Obj) (sorted.KeyValue, error) { + if len(conf) == 0 { + return nil, nil + } + return sorted.NewKeyValue(conf) +} + +func uiFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { + ui := &UIHandler{ + prefix: ld.MyPrefix(), + sourceRoot: conf.OptionalString("sourceRoot", ""), + resizeSem: syncutil.NewSem(int64(conf.OptionalInt("maxResizeBytes", + constants.DefaultMaxResizeMem))), + } + cachePrefix := conf.OptionalString("cache", "") + scaledImageConf := conf.OptionalObject("scaledImage") + if err = conf.Validate(); err != nil { + return + } + + scaledImageKV, err := newKVOrNil(scaledImageConf) + if err != nil { + return nil, fmt.Errorf("in UI handler's scaledImage: %v", err) + } + if scaledImageKV != nil && cachePrefix == "" { + return nil, fmt.Errorf("in UI handler, can't specify scaledImage without cache") + } + if cachePrefix != "" { + bs, err := ld.GetStorage(cachePrefix) + if err != nil { + return nil, fmt.Errorf("UI handler's cache of %q error: %v", cachePrefix, err) + } + ui.Cache = bs + ui.thumbMeta = NewThumbMeta(scaledImageKV) + } + + if ui.sourceRoot == "" { + ui.sourceRoot = os.Getenv("CAMLI_DEV_CAMLI_ROOT") + if uistatic.IsAppEngine { + if _, err = os.Stat(filepath.Join(uistatic.GaeSourceRoot, + filepath.FromSlash("server/camlistored/ui/index.html"))); err != nil { + hint := fmt.Sprintf("\"sourceRoot\" was not specified in the config,"+ + " and the default sourceRoot dir %v does not exist or does not contain"+ + " \"server/camlistored/ui/index.html\". devcam appengine can do that for you.", + uistatic.GaeSourceRoot) + log.Print(hint) + return nil, errors.New("No sourceRoot found; UI not available.") + } + log.Printf("Using the default \"%v\" as the sourceRoot for AppEngine", uistatic.GaeSourceRoot) + ui.sourceRoot = uistatic.GaeSourceRoot + } + } + if ui.sourceRoot != "" { + ui.uiDir = filepath.Join(ui.sourceRoot, filepath.FromSlash("server/camlistored/ui")) + // Ignore any fileembed files: + Files = &fileembed.Files{ + DirFallback: filepath.Join(ui.sourceRoot, filepath.FromSlash("pkg/server")), + } + uistatic.Files = &fileembed.Files{ + DirFallback: ui.uiDir, + Listable: true, + // In dev_appserver, allow edit-and-reload without + // restarting. In production, though, it's faster to just + // slurp it in. + SlurpToMemory: uistatic.IsProdAppEngine, + } + } + + ui.closureHandler, err = ui.makeClosureHandler(ui.sourceRoot) + if err != nil { + return nil, fmt.Errorf(`Invalid "sourceRoot" value of %q: %v"`, ui.sourceRoot, err) + } + + if ui.sourceRoot != "" { + ui.fileReactHandler, err = makeFileServer(ui.sourceRoot, filepath.Join("third_party", "react"), "react.js") + if err != nil { + return nil, fmt.Errorf("Could not make react handler: %s", err) + } + ui.fileGlitchHandler, err = makeFileServer(ui.sourceRoot, filepath.Join("third_party", "glitch"), "npc_piggy__x1_walk_png_1354829432.png") + if err != nil { + return nil, fmt.Errorf("Could not make glitch handler: %s", err) + } + ui.fileFontawesomeHandler, err = makeFileServer(ui.sourceRoot, filepath.Join("third_party", "fontawesome"), "css/font-awesome.css") + if err != nil { + return nil, fmt.Errorf("Could not make fontawesome handler: %s", err) + } + ui.fileLessHandler, err = makeFileServer(ui.sourceRoot, filepath.Join("third_party", "less"), "less.js") + if err != nil { + return nil, fmt.Errorf("Could not make less handler: %s", err) + } + } + + rootPrefix, _, err := ld.FindHandlerByType("root") + if err != nil { + return nil, errors.New("No root handler configured, which is necessary for the ui handler") + } + if h, err := ld.GetHandler(rootPrefix); err == nil { + ui.root = h.(*RootHandler) + ui.root.registerUIHandler(ui) + } else { + return nil, errors.New("failed to find the 'root' handler") + } + + return ui, nil +} + +type publishRoot struct { + Name string + Permanode blob.Ref + Prefix string +} + +// InitHandler goes through all the other configured handlers to discover +// the publisher ones, and uses them to populate ui.publishRoots. +func (ui *UIHandler) InitHandler(hl blobserver.FindHandlerByTyper) error { + // InitHandler is called after all handlers have been setup, so the bootstrap + // of the camliRoot node for publishers in dev-mode is already done. + searchPrefix, _, err := hl.FindHandlerByType("search") + if err != nil { + return errors.New("No search handler configured, which is necessary for the ui handler") + } + var sh *search.Handler + htype, hi := hl.AllHandlers() + if h, ok := hi[searchPrefix]; !ok { + return errors.New("failed to find the \"search\" handler") + } else { + sh = h.(*search.Handler) + ui.search = sh + } + camliRootQuery := func(camliRoot string) (*search.SearchResult, error) { + return sh.Query(&search.SearchQuery{ + Limit: 1, + Constraint: &search.Constraint{ + Permanode: &search.PermanodeConstraint{ + Attr: "camliRoot", + Value: camliRoot, + }, + }, + }) + } + for prefix, typ := range htype { + if typ != "app" { + continue + } + ah, ok := hi[prefix].(*app.Handler) + if !ok { + panic(fmt.Sprintf("UI: handler for %v has type \"app\" but is not app.Handler", prefix)) + } + if ah.ProgramName() != "publisher" { + continue + } + appConfig := ah.AppConfig() + if appConfig == nil { + log.Printf("UI: app handler for %v has no appConfig", prefix) + continue + } + camliRoot, ok := appConfig["camliRoot"].(string) + if !ok { + log.Printf("UI: camliRoot in appConfig is %T, want string", appConfig["camliRoot"]) + continue + } + result, err := camliRootQuery(camliRoot) + if err != nil { + log.Printf("UI: could not find permanode for camliRoot %v: %v", camliRoot, err) + continue + } + if len(result.Blobs) == 0 || !result.Blobs[0].Blob.Valid() { + log.Printf("UI: no valid permanode for camliRoot %v", camliRoot) + continue + } + if ui.publishRoots == nil { + ui.publishRoots = make(map[string]*publishRoot) + } + ui.publishRoots[prefix] = &publishRoot{ + Name: camliRoot, + Prefix: prefix, + Permanode: result.Blobs[0].Blob, + } + } + return nil +} + +func (ui *UIHandler) makeClosureHandler(root string) (http.Handler, error) { + return makeClosureHandler(root, "ui") +} + +// makeClosureHandler returns a handler to serve Closure files. +// root is either: +// 1) empty: use the Closure files compiled in to the binary (if +// available), else redirect to the Internet. +// 2) a URL prefix: base of Camlistore to get Closure to redirect to +// 3) a path on disk to the root of camlistore's source (which +// contains the necessary subset of Closure files) +func makeClosureHandler(root, handlerName string) (http.Handler, error) { + // devcam server environment variable takes precedence: + if d := os.Getenv("CAMLI_DEV_CLOSURE_DIR"); d != "" { + log.Printf("%v: serving Closure from devcam server's $CAMLI_DEV_CLOSURE_DIR: %v", handlerName, d) + return http.FileServer(http.Dir(d)), nil + } + if root == "" { + fs, err := closurestatic.FileSystem() + if err == os.ErrNotExist { + log.Printf("%v: no configured setting or embedded resources; serving Closure via %v", handlerName, closureBaseURL) + return closureBaseURL, nil + } + if err != nil { + return nil, fmt.Errorf("error loading embedded Closure zip file: %v", err) + } + log.Printf("%v: serving Closure from embedded resources", handlerName) + return http.FileServer(fs), nil + } + if strings.HasPrefix(root, "http") { + log.Printf("%v: serving Closure using redirects to %v", handlerName, root) + return closureRedirector(root), nil + } + + path := filepath.Join("third_party", "closure", "lib", "closure") + return makeFileServer(root, path, filepath.Join("goog", "base.js")) +} + +func makeFileServer(sourceRoot string, pathToServe string, expectedContentPath string) (http.Handler, error) { + fi, err := os.Stat(sourceRoot) + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, errors.New("not a directory") + } + dirToServe := filepath.Join(sourceRoot, pathToServe) + _, err = os.Stat(filepath.Join(dirToServe, expectedContentPath)) + if err != nil { + return nil, fmt.Errorf("directory doesn't contain %s; wrong directory?", expectedContentPath) + } + return http.FileServer(http.Dir(dirToServe)), nil +} + +const closureBaseURL closureRedirector = "https://closure-library.googlecode.com/git" + +// closureRedirector is a hack to redirect requests for Closure's million *.js files +// to https://closure-library.googlecode.com/git. +// TODO: this doesn't work when offline. We need to run genjsdeps over all of the Camlistore +// UI to figure out which Closure *.js files to fileembed and generate zembed. Then this +// type can be deleted. +type closureRedirector string + +func (base closureRedirector) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + newURL := string(base) + "/" + path.Clean(httputil.PathSuffix(req)) + http.Redirect(rw, req, newURL, http.StatusTemporaryRedirect) +} + +func camliMode(req *http.Request) string { + return req.URL.Query().Get("camli.mode") +} + +func wantsBlobRef(req *http.Request) bool { + _, ok := blob.ParseKnown(httputil.PathSuffix(req)) + return ok +} + +func wantsDiscovery(req *http.Request) bool { + return httputil.IsGet(req) && + (req.Header.Get("Accept") == "text/x-camli-configuration" || + camliMode(req) == "config") +} + +func wantsUploadHelper(req *http.Request) bool { + return req.Method == "POST" && camliMode(req) == "uploadhelper" +} + +func wantsPermanode(req *http.Request) bool { + return httputil.IsGet(req) && blob.ValidRefString(req.FormValue("p")) +} + +func wantsBlobInfo(req *http.Request) bool { + return httputil.IsGet(req) && blob.ValidRefString(req.FormValue("b")) +} + +func wantsFileTreePage(req *http.Request) bool { + return httputil.IsGet(req) && blob.ValidRefString(req.FormValue("d")) +} + +func getSuffixMatches(req *http.Request, pattern *regexp.Regexp) bool { + if httputil.IsGet(req) { + suffix := httputil.PathSuffix(req) + return pattern.MatchString(suffix) + } + return false +} + +func (ui *UIHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + suffix := httputil.PathSuffix(req) + + rw.Header().Set("Vary", "Accept") + switch { + case wantsDiscovery(req): + ui.root.serveDiscovery(rw, req) + case wantsUploadHelper(req): + ui.serveUploadHelper(rw, req) + case strings.HasPrefix(suffix, "download/"): + ui.serveDownload(rw, req) + case strings.HasPrefix(suffix, "thumbnail/"): + ui.serveThumbnail(rw, req) + case strings.HasPrefix(suffix, "tree/"): + ui.serveFileTree(rw, req) + case strings.HasPrefix(suffix, "qr/"): + ui.serveQR(rw, req) + case getSuffixMatches(req, closurePattern): + ui.serveClosure(rw, req) + case getSuffixMatches(req, lessPattern): + ui.serveFromDiskOrStatic(rw, req, lessPattern, ui.fileLessHandler, lessstatic.Files) + case getSuffixMatches(req, reactPattern): + ui.serveFromDiskOrStatic(rw, req, reactPattern, ui.fileReactHandler, reactstatic.Files) + case getSuffixMatches(req, glitchPattern): + ui.serveFromDiskOrStatic(rw, req, glitchPattern, ui.fileGlitchHandler, glitchstatic.Files) + case getSuffixMatches(req, fontawesomePattern): + ui.serveFromDiskOrStatic(rw, req, fontawesomePattern, ui.fileFontawesomeHandler, fontawesomestatic.Files) + default: + file := "" + if m := staticFilePattern.FindStringSubmatch(suffix); m != nil { + file = m[1] + } else { + switch { + case wantsBlobRef(req): + file = "index.html" + case wantsPermanode(req): + file = "permanode.html" + case wantsBlobInfo(req): + file = "blobinfo.html" + case wantsFileTreePage(req): + file = "filetree.html" + case req.URL.Path == httputil.PathBase(req): + file = "index.html" + default: + http.Error(rw, "Illegal URL.", http.StatusNotFound) + return + } + } + if file == "deps.js" { + serveDepsJS(rw, req, ui.uiDir) + return + } + ServeStaticFile(rw, req, uistatic.Files, file) + } +} + +// ServeStaticFile serves file from the root virtual filesystem. +func ServeStaticFile(rw http.ResponseWriter, req *http.Request, root http.FileSystem, file string) { + f, err := root.Open("/" + file) + if err != nil { + http.NotFound(rw, req) + log.Printf("Failed to open file %q from embedded resources: %v", file, err) + return + } + defer f.Close() + var modTime time.Time + if fi, err := f.Stat(); err == nil { + modTime = fi.ModTime() + } + // TODO(wathiede): should pkg/magic be leveraged here somehow? It has a + // slightly different purpose. + if strings.HasSuffix(file, ".svg") { + rw.Header().Set("Content-Type", "image/svg+xml") + } + http.ServeContent(rw, req, file, modTime, f) +} + +func (ui *UIHandler) discovery() *camtypes.UIDiscovery { + pubRoots := map[string]*camtypes.PublishRootDiscovery{} + for _, v := range ui.publishRoots { + rd := &camtypes.PublishRootDiscovery{ + Name: v.Name, + Prefix: []string{v.Prefix}, + CurrentPermanode: v.Permanode, + } + pubRoots[v.Name] = rd + } + + uiDisco := &camtypes.UIDiscovery{ + UIRoot: ui.prefix, + UploadHelper: ui.prefix + "?camli.mode=uploadhelper", + DownloadHelper: path.Join(ui.prefix, "download") + "/", + DirectoryHelper: path.Join(ui.prefix, "tree") + "/", + PublishRoots: pubRoots, + } + return uiDisco +} + +func (ui *UIHandler) serveDownload(rw http.ResponseWriter, req *http.Request) { + if ui.root.Storage == nil { + http.Error(rw, "No BlobRoot configured", 500) + return + } + + suffix := httputil.PathSuffix(req) + m := downloadPattern.FindStringSubmatch(suffix) + if m == nil { + httputil.ErrorRouting(rw, req) + return + } + + fbr, ok := blob.Parse(m[1]) + if !ok { + http.Error(rw, "Invalid blobref", 400) + return + } + + dh := &DownloadHandler{ + Fetcher: ui.root.Storage, + Search: ui.search, + Cache: ui.Cache, + } + dh.ServeHTTP(rw, req, fbr) +} + +func (ui *UIHandler) serveThumbnail(rw http.ResponseWriter, req *http.Request) { + if ui.root.Storage == nil { + http.Error(rw, "No BlobRoot configured", 500) + return + } + + suffix := httputil.PathSuffix(req) + m := thumbnailPattern.FindStringSubmatch(suffix) + if m == nil { + httputil.ErrorRouting(rw, req) + return + } + + query := req.URL.Query() + width, _ := strconv.Atoi(query.Get("mw")) + height, _ := strconv.Atoi(query.Get("mh")) + blobref, ok := blob.Parse(m[1]) + if !ok { + http.Error(rw, "Invalid blobref", 400) + return + } + + if width == 0 { + width = search.MaxImageSize + } + if height == 0 { + height = search.MaxImageSize + } + + th := &ImageHandler{ + Fetcher: ui.root.Storage, + Cache: ui.Cache, + MaxWidth: width, + MaxHeight: height, + ThumbMeta: ui.thumbMeta, + ResizeSem: ui.resizeSem, + Search: ui.search, + } + th.ServeHTTP(rw, req, blobref) +} + +func (ui *UIHandler) serveFileTree(rw http.ResponseWriter, req *http.Request) { + if ui.root.Storage == nil { + http.Error(rw, "No BlobRoot configured", 500) + return + } + + suffix := httputil.PathSuffix(req) + m := treePattern.FindStringSubmatch(suffix) + if m == nil { + httputil.ErrorRouting(rw, req) + return + } + + blobref, ok := blob.Parse(m[1]) + if !ok { + http.Error(rw, "Invalid blobref", 400) + return + } + + fth := &FileTreeHandler{ + Fetcher: ui.root.Storage, + file: blobref, + } + fth.ServeHTTP(rw, req) +} + +func (ui *UIHandler) serveClosure(rw http.ResponseWriter, req *http.Request) { + suffix := httputil.PathSuffix(req) + if ui.closureHandler == nil { + log.Printf("%v not served: closure handler is nil", suffix) + http.NotFound(rw, req) + return + } + m := closurePattern.FindStringSubmatch(suffix) + if m == nil { + httputil.ErrorRouting(rw, req) + return + } + req.URL.Path = "/" + m[1] + ui.closureHandler.ServeHTTP(rw, req) +} + +// serveFromDiskOrStatic matches rx against req's path and serves the match either from disk (if non-nil) or from static (embedded in the binary). +func (ui *UIHandler) serveFromDiskOrStatic(rw http.ResponseWriter, req *http.Request, rx *regexp.Regexp, disk http.Handler, static *fileembed.Files) { + suffix := httputil.PathSuffix(req) + m := rx.FindStringSubmatch(suffix) + if m == nil { + panic("Caller should verify that rx matches") + } + file := m[1] + if disk != nil { + req.URL.Path = "/" + file + disk.ServeHTTP(rw, req) + } else { + ServeStaticFile(rw, req, static, file) + } + +} + +func (ui *UIHandler) serveQR(rw http.ResponseWriter, req *http.Request) { + url := req.URL.Query().Get("url") + if url == "" { + http.Error(rw, "Missing url parameter.", http.StatusBadRequest) + return + } + code, err := qr.Encode(url, qr.L) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + rw.Header().Set("Content-Type", "image/png") + rw.Write(code.PNG()) +} + +// serveDepsJS serves an auto-generated Closure deps.js file. +func serveDepsJS(rw http.ResponseWriter, req *http.Request, dir string) { + var root http.FileSystem + if dir == "" { + root = uistatic.Files + } else { + root = http.Dir(dir) + } + + b, err := closure.GenDeps(root) + if err != nil { + log.Print(err) + http.Error(rw, "Server error", 500) + return + } + rw.Header().Set("Content-Type", "text/javascript; charset=utf-8") + rw.Write([]byte("// auto-generated from camlistored\n")) + rw.Write(b) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/uploadhelper.go b/vendor/github.com/camlistore/camlistore/pkg/server/uploadhelper.go new file mode 100644 index 00000000..a9fec2e3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/uploadhelper.go @@ -0,0 +1,93 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "io" + "io/ioutil" + "log" + "net/http" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/types" +) + +// uploadHelperResponse is the response from serveUploadHelper. +type uploadHelperResponse struct { + Got []*uploadHelperGotItem `json:"got"` +} + +type uploadHelperGotItem struct { + FileName string `json:"filename"` + ModTime types.Time3339 `json:"modtime"` + FormName string `json:"formname"` + FileRef blob.Ref `json:"fileref"` +} + +func (ui *UIHandler) serveUploadHelper(rw http.ResponseWriter, req *http.Request) { + if ui.root.Storage == nil { + httputil.ServeJSONError(rw, httputil.ServerError("No BlobRoot configured")) + return + } + + mr, err := req.MultipartReader() + if err != nil { + httputil.ServeJSONError(rw, httputil.ServerError("reading body: "+err.Error())) + return + } + + var got []*uploadHelperGotItem + var modTime types.Time3339 + for { + part, err := mr.NextPart() + if err == io.EOF { + break + } + if err != nil { + httputil.ServeJSONError(rw, httputil.ServerError("reading body: "+err.Error())) + break + } + if part.FormName() == "modtime" { + payload, err := ioutil.ReadAll(part) + if err != nil { + log.Printf("ui uploadhelper: unable to read part for modtime: %v", err) + continue + } + modTime = types.ParseTime3339OrZero(string(payload)) + continue + } + fileName := part.FileName() + if fileName == "" { + continue + } + br, err := schema.WriteFileFromReaderWithModTime(ui.root.Storage, fileName, modTime.Time(), part) + if err != nil { + httputil.ServeJSONError(rw, httputil.ServerError("writing to blobserver: "+err.Error())) + return + } + got = append(got, &uploadHelperGotItem{ + FileName: part.FileName(), + ModTime: modTime, + FormName: part.FormName(), + FileRef: br, + }) + } + + httputil.ReturnJSON(rw, &uploadHelperResponse{Got: got}) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/wizard-html.go b/vendor/github.com/camlistore/camlistore/pkg/server/wizard-html.go new file mode 100644 index 00000000..5d467307 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/wizard-html.go @@ -0,0 +1,39 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +// TODO: this file and the code in wizard.go is outdated. Anyone interested enough +// can take care of updating it as something nicer which would fit better with the +// react UI. But in the meantime we don't link to it anymore. + +const topWizard = ` + + + + Camlistore setup + + +

    [Back]

    +

    Setup Wizard

    +

    See Server Configuration for information on configuring the values below.

    +
    +` + +const bottomWizard = ` + + +` diff --git a/vendor/github.com/camlistore/camlistore/pkg/server/wizard.go b/vendor/github.com/camlistore/camlistore/pkg/server/wizard.go new file mode 100644 index 00000000..54718a49 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/server/wizard.go @@ -0,0 +1,287 @@ +/* +Copyright 2012 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "crypto/rand" + "encoding/json" + "fmt" + "html/template" + "log" + "net/http" + "os" + "reflect" + "strconv" + "strings" + + "camlistore.org/pkg/auth" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/osutil" + + "camlistore.org/third_party/code.google.com/p/xsrftoken" +) + +var ignoredFields = map[string]bool{ + "gallery": true, + "blog": true, + "replicateTo": true, +} + +// SetupHandler handles serving the wizard setup page. +type SetupHandler struct { + config jsonconfig.Obj +} + +func init() { + blobserver.RegisterHandlerConstructor("setup", newSetupFromConfig) +} + +func newSetupFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) { + wizard := &SetupHandler{config: conf} + return wizard, nil +} + +func printWizard(i interface{}) (s string) { + switch ei := i.(type) { + case []string: + for _, v := range ei { + s += printWizard(v) + "," + } + s = strings.TrimRight(s, ",") + case []interface{}: + for _, v := range ei { + s += printWizard(v) + "," + } + s = strings.TrimRight(s, ",") + default: + return fmt.Sprintf("%v", i) + } + return s +} + +// TODO(mpl): probably not needed anymore. check later and remove. +// Flatten all published entities as lists and move them at the root +// of the conf, to have them displayed individually by the template +func flattenPublish(config jsonconfig.Obj) error { + gallery := []string{} + blog := []string{} + config["gallery"] = gallery + config["blog"] = blog + published, ok := config["publish"] + if !ok { + delete(config, "publish") + return nil + } + pubObj, ok := published.(map[string]interface{}) + if !ok { + return fmt.Errorf("Was expecting a map[string]interface{} for \"publish\", got %T", published) + } + for k, v := range pubObj { + pub, ok := v.(map[string]interface{}) + if !ok { + return fmt.Errorf("Was expecting a map[string]interface{} for %s, got %T", k, pub) + } + template, rootPermanode, style := "", "", "" + for pk, pv := range pub { + val, ok := pv.(string) + if !ok { + return fmt.Errorf("Was expecting a string for %s, got %T", pk, pv) + } + switch pk { + case "template": + template = val + case "rootPermanode": + rootPermanode = val + case "style": + style = val + default: + return fmt.Errorf("Unknown key %q in %s", pk, k) + } + } + if template == "" || rootPermanode == "" { + return fmt.Errorf("missing \"template\" key or \"rootPermanode\" key in %s", k) + } + obj := []string{k, rootPermanode, style} + config[template] = obj + } + + delete(config, "publish") + return nil +} + +var serverKey = func() string { + var b [20]byte + rand.Read(b[:]) + return string(b[:]) +}() + +func sendWizard(rw http.ResponseWriter, req *http.Request, hasChanged bool) { + config, err := jsonconfig.ReadFile(osutil.UserServerConfigPath()) + if err != nil { + httputil.ServeError(rw, req, err) + return + } + + err = flattenPublish(config) + if err != nil { + httputil.ServeError(rw, req, err) + return + } + + funcMap := template.FuncMap{ + "printWizard": printWizard, + "showField": func(inputName string) bool { + if _, ok := ignoredFields[inputName]; ok { + return false + } + return true + }, + "genXSRF": func() string { + return xsrftoken.Generate(serverKey, "user", "wizardSave") + }, + } + + body := ` + + + {{range $k,$v := .}}{{if showField $k}}{{end}}{{end}} +
    {{printf "%v" $k}}
    + + (Will restart server.)
    ` + + if hasChanged { + body += `

    Configuration succesfully rewritten

    ` + } + + tmpl, err := template.New("wizard").Funcs(funcMap).Parse(topWizard + body + bottomWizard) + if err != nil { + httputil.ServeError(rw, req, err) + return + } + err = tmpl.Execute(rw, config) + if err != nil { + httputil.ServeError(rw, req, err) + return + } +} + +func rewriteConfig(config *jsonconfig.Obj, configfile string) error { + b, err := json.MarshalIndent(*config, "", " ") + if err != nil { + return err + } + s := string(b) + f, err := os.Create(configfile) + if err != nil { + return err + } + defer f.Close() + _, err = f.WriteString(s) + return err +} + +func handleSetupChange(rw http.ResponseWriter, req *http.Request) { + hilevelConf, err := jsonconfig.ReadFile(osutil.UserServerConfigPath()) + if err != nil { + httputil.ServeError(rw, req, err) + return + } + if !xsrftoken.Valid(req.FormValue("token"), serverKey, "user", "wizardSave") { + http.Error(rw, "Form expired. Press back and reload form.", http.StatusBadRequest) + log.Printf("invalid xsrf token=%q", req.FormValue("token")) + return + } + + hasChanged := false + var el interface{} + publish := jsonconfig.Obj{} + for k, v := range req.Form { + if _, ok := hilevelConf[k]; !ok { + if k != "gallery" && k != "blog" { + continue + } + } + + switch k { + case "https", "shareHandler": + b, err := strconv.ParseBool(v[0]) + if err != nil { + httputil.ServeError(rw, req, fmt.Errorf("%v field expects a boolean value", k)) + } + el = b + default: + el = v[0] + } + if reflect.DeepEqual(hilevelConf[k], el) { + continue + } + hasChanged = true + hilevelConf[k] = el + } + // "publish" wasn't checked yet + if !reflect.DeepEqual(hilevelConf["publish"], publish) { + hilevelConf["publish"] = publish + hasChanged = true + } + + if hasChanged { + err = rewriteConfig(&hilevelConf, osutil.UserServerConfigPath()) + if err != nil { + httputil.ServeError(rw, req, err) + return + } + err = osutil.RestartProcess() + if err != nil { + log.Fatal("Failed to restart: " + err.Error()) + http.Error(rw, "Failed to restart process", 500) + return + } + } + sendWizard(rw, req, hasChanged) +} + +func (sh *SetupHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + if !auth.IsLocalhost(req) { + fmt.Fprintf(rw, + "Setup only allowed from localhost"+ + "

    Back

    "+ + "\n") + return + } + http.Redirect(rw, req, "http://camlistore.org/docs/server-config", http.StatusMovedPermanently) + return + + // TODO: this file and the code in wizard-html.go is outdated. Anyone interested enough + // can take care of updating it as something nicer which would fit better with the + // react UI. But in the meantime we don't link to it anymore. + + if req.Method == "POST" { + err := req.ParseMultipartForm(10e6) + if err != nil { + httputil.ServeError(rw, req, err) + return + } + if len(req.Form) > 0 { + handleSetupChange(rw, req) + } + return + } + + sendWizard(rw, req, false) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/devmode.go b/vendor/github.com/camlistore/camlistore/pkg/serverinit/devmode.go new file mode 100644 index 00000000..9c3d5971 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/devmode.go @@ -0,0 +1,106 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serverinit + +import ( + "errors" + "fmt" + "log" + "strings" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/env" + "camlistore.org/pkg/jsonsign/signhandler" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/search" + "camlistore.org/pkg/server/app" +) + +func (hl *handlerLoader) initPublisherRootNode(ah *app.Handler) error { + if !env.IsDev() { + return nil + } + + h, err := hl.GetHandler("/my-search/") + if err != nil { + return err + } + sh := h.(*search.Handler) + camliRootQuery := func(camliRoot string) (*search.SearchResult, error) { + return sh.Query(&search.SearchQuery{ + Limit: 1, + Constraint: &search.Constraint{ + Permanode: &search.PermanodeConstraint{ + Attr: "camliRoot", + Value: camliRoot, + }, + }, + }) + } + + appConfig := ah.AppConfig() + if appConfig == nil { + return errors.New("publisher app handler has no AppConfig") + } + camliRoot, ok := appConfig["camliRoot"].(string) + if !ok { + return fmt.Errorf("camliRoot in publisher app handler appConfig is %T, want string", appConfig["camliRoot"]) + } + result, err := camliRootQuery(camliRoot) + if err == nil && len(result.Blobs) > 0 && result.Blobs[0].Blob.Valid() { + // root node found, nothing more to do. + log.Printf("Found %v camliRoot node for publisher: %v", camliRoot, result.Blobs[0].Blob.String()) + return nil + } + + log.Printf("No %v camliRoot node found, creating one from scratch now.", camliRoot) + + bs, err := hl.GetStorage("/bs-recv/") + if err != nil { + return err + } + h, err = hl.GetHandler("/sighelper/") + if err != nil { + return err + } + sigh := h.(*signhandler.Handler) + + signUpload := func(bb *schema.Builder) (blob.Ref, error) { + signed, err := sigh.Sign(bb) + if err != nil { + return blob.Ref{}, fmt.Errorf("could not sign blob: %v", err) + } + br := blob.SHA1FromString(signed) + if _, err := blobserver.Receive(bs, br, strings.NewReader(signed)); err != nil { + return blob.Ref{}, fmt.Errorf("could not upload %v: %v", br.String(), err) + } + return br, nil + } + + pn, err := signUpload(schema.NewUnsignedPermanode()) + if err != nil { + return fmt.Errorf("could not create new camliRoot node: %v", err) + } + if _, err := signUpload(schema.NewSetAttributeClaim(pn, "camliRoot", camliRoot)); err != nil { + return fmt.Errorf("could not set camliRoot on new node %v: %v", pn, err) + } + if _, err := signUpload(schema.NewSetAttributeClaim(pn, "title", "Publish root node for "+camliRoot)); err != nil { + return fmt.Errorf("could not set camliRoot on new node %v: %v", pn, err) + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/env.go b/vendor/github.com/camlistore/camlistore/pkg/serverinit/env.go new file mode 100644 index 00000000..b098bc10 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/env.go @@ -0,0 +1,95 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serverinit + +import ( + "fmt" + "os" + "strings" + + "camlistore.org/pkg/env" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/types/serverconfig" + "google.golang.org/cloud/compute/metadata" +) + +// DefaultEnvConfig returns the default configuration when running on a known +// environment. Currently this just includes Google Compute Engine. +// If the environment isn't known (nil, nil) is returned. +func DefaultEnvConfig() (*Config, error) { + if !env.OnGCE() { + return nil, nil + } + auth := "none" + user, _ := metadata.InstanceAttributeValue("camlistore-username") + pass, _ := metadata.InstanceAttributeValue("camlistore-password") + confBucket, err := metadata.InstanceAttributeValue("camlistore-config-dir") + if confBucket == "" || err != nil { + return nil, fmt.Errorf("VM instance metadata key 'camlistore-config-dir' not set: %v", err) + } + blobBucket, err := metadata.InstanceAttributeValue("camlistore-blob-dir") + if blobBucket == "" || err != nil { + return nil, fmt.Errorf("VM instance metadata key 'camlistore-blob-dir' not set: %v", err) + } + if user != "" && pass != "" { + auth = "userpass:" + user + ":" + pass + } + + if v := osutil.SecretRingFile(); !strings.HasPrefix(v, "/gcs/") { + return nil, fmt.Errorf("Internal error: secret ring path on GCE should be at /gcs/, not %q", v) + } + keyId, secRing, err := getOrMakeKeyring() + if err != nil { + return nil, err + } + + ipOrHost, _ := metadata.ExternalIP() + host, _ := metadata.InstanceAttributeValue("camlistore-hostname") + if host != "" && host != "localhost" { + ipOrHost = host + } + + highConf := &serverconfig.Config{ + Auth: auth, + BaseURL: fmt.Sprintf("https://%s", ipOrHost), + HTTPS: true, + Listen: "0.0.0.0:443", + Identity: keyId, + IdentitySecretRing: secRing, + GoogleCloudStorage: ":" + strings.TrimPrefix(blobBucket, "gs://"), + DBNames: map[string]string{}, + PackRelated: true, + + // SourceRoot is where we look for the UI js/css/html files, and the Closure resources. + // Must be in sync with misc/docker/server/Dockerfile. + SourceRoot: "/camlistore", + } + + // Detect a linked Docker MySQL container. It must have alias "mysqldb". + if v := os.Getenv("MYSQLDB_PORT"); strings.HasPrefix(v, "tcp://") { + hostPort := strings.TrimPrefix(v, "tcp://") + highConf.MySQL = "root@" + hostPort + ":" // no password + highConf.DBNames["queue-sync-to-index"] = "sync_index_queue" + highConf.DBNames["ui_thumbcache"] = "ui_thumbmeta_cache" + highConf.DBNames["blobpacked_index"] = "blobpacked_index" + } else { + // TODO: also detect Cloud SQL. + highConf.KVFile = "/index.kv" + } + + return genLowLevelConfig(highConf) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/export_test.go b/vendor/github.com/camlistore/camlistore/pkg/serverinit/export_test.go new file mode 100644 index 00000000..68b8efa8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/export_test.go @@ -0,0 +1,29 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serverinit + +var GenLowLevelConfig = genLowLevelConfig + +var DefaultBaseConfig = defaultBaseConfig + +func SetTempDirFunc(f func() string) { + tempDir = f +} + +func SetNoMkdir(v bool) { + noMkdir = v +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/genconfig.go b/vendor/github.com/camlistore/camlistore/pkg/serverinit/genconfig.go new file mode 100644 index 00000000..87473b5a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/genconfig.go @@ -0,0 +1,947 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serverinit + +import ( + "encoding/json" + "errors" + "fmt" + "log" + "net/url" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/jsonsign" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/types/serverconfig" + "camlistore.org/pkg/wkfs" +) + +var ( + tempDir = os.TempDir + noMkdir bool // for tests to not call os.Mkdir +) + +type tlsOpts struct { + httpsCert string + httpsKey string +} + +// genLowLevelConfig returns a low-level config from a high-level config. +func genLowLevelConfig(conf *serverconfig.Config) (lowLevelConf *Config, err error) { + b := &lowBuilder{ + high: conf, + low: jsonconfig.Obj{ + "prefixes": make(map[string]interface{}), + }, + } + return b.build() +} + +// A lowBuilder builds a low-level config from a high-level config. +type lowBuilder struct { + high *serverconfig.Config // high-level config (input) + low jsonconfig.Obj // low-level handler config (output) +} + +// args is an alias for map[string]interface{} just to cut down on +// noise below. But we take care to convert it back to +// map[string]interface{} in the one place where we accept it. +type args map[string]interface{} + +func (b *lowBuilder) addPrefix(at, handler string, a args) { + v := map[string]interface{}{ + "handler": handler, + } + if a != nil { + v["handlerArgs"] = (map[string]interface{})(a) + } + b.low["prefixes"].(map[string]interface{})[at] = v +} + +func (b *lowBuilder) hasPrefix(p string) bool { + _, ok := b.low["prefixes"].(map[string]interface{})[p] + return ok +} + +func (b *lowBuilder) runIndex() bool { return b.high.RunIndex.Get() } +func (b *lowBuilder) copyIndexToMemory() bool { return b.high.CopyIndexToMemory.Get() } + +// dbName returns which database to use for the provided user ("of"). +// The user should be a key as described in pkg/types/serverconfig/config.go's +// description of DBNames: "index", "queue-sync-to-index", etc. +func (b *lowBuilder) dbName(of string) string { + if v, ok := b.high.DBNames[of]; ok && v != "" { + return v + } + if of == "index" { + if b.high.DBName != "" { + return b.high.DBName + } + username := osutil.Username() + if username == "" { + envVar := "USER" + if runtime.GOOS == "windows" { + envVar += "NAME" + } + return "camlistore_index" + } + return "camli" + username + } + return "" +} + +var errNoOwner = errors.New("no owner") + +// Error is errNoOwner if no identity configured +func (b *lowBuilder) searchOwner() (br blob.Ref, err error) { + if b.high.Identity == "" { + return br, errNoOwner + } + entity, err := jsonsign.EntityFromSecring(b.high.Identity, b.high.IdentitySecretRing) + if err != nil { + return br, err + } + armoredPublicKey, err := jsonsign.ArmoredPublicKey(entity) + if err != nil { + return br, err + } + return blob.SHA1FromString(armoredPublicKey), nil +} + +func (b *lowBuilder) addPublishedConfig(tlsO *tlsOpts) error { + published := b.high.Publish + for k, v := range published { + if v.CamliRoot == "" { + return fmt.Errorf("Missing \"camliRoot\" key in configuration for %s.", k) + } + if v.GoTemplate == "" { + return fmt.Errorf("Missing \"goTemplate\" key in configuration for %s.", k) + } + + appConfig := map[string]interface{}{ + "camliRoot": v.CamliRoot, + "cacheRoot": v.CacheRoot, + "goTemplate": v.GoTemplate, + } + if v.HTTPSCert != "" && v.HTTPSKey != "" { + // user can specify these directly in the publish section + appConfig["httpsCert"] = v.HTTPSCert + appConfig["httpsKey"] = v.HTTPSKey + } else { + // default to Camlistore parameters, if any + if tlsO != nil { + appConfig["httpsCert"] = tlsO.httpsCert + appConfig["httpsKey"] = tlsO.httpsKey + } + } + a := args{ + "program": v.Program, + "appConfig": appConfig, + } + if v.BaseURL != "" { + a["baseURL"] = v.BaseURL + } + program := "publisher" + if v.Program != "" { + program = v.Program + } + a["program"] = program + b.addPrefix(k, "app", a) + } + return nil +} + +// kvFileType returns the file based sorted type defined for index storage, if +// any. It defaults to "leveldb" otherwise. +func (b *lowBuilder) kvFileType() string { + switch { + case b.high.SQLite != "": + return "sqlite" + case b.high.KVFile != "": + return "kv" + case b.high.LevelDB != "": + return "leveldb" + default: + return sorted.DefaultKVFileType + } +} + +func (b *lowBuilder) addUIConfig() { + args := map[string]interface{}{ + "cache": "/cache/", + } + if b.high.SourceRoot != "" { + args["sourceRoot"] = b.high.SourceRoot + } + var thumbCache map[string]interface{} + if b.high.BlobPath != "" { + thumbCache = map[string]interface{}{ + "type": b.kvFileType(), + "file": filepath.Join(b.high.BlobPath, "thumbmeta."+b.kvFileType()), + } + } + if thumbCache == nil { + sorted, err := b.sortedStorage("ui_thumbcache") + if err == nil { + thumbCache = sorted + } + } + if thumbCache != nil { + args["scaledImage"] = thumbCache + } + b.addPrefix("/ui/", "ui", args) +} + +func (b *lowBuilder) mongoIndexStorage(confStr, sortedType string) (map[string]interface{}, error) { + dbName := b.dbName(sortedType) + if dbName == "" { + return nil, fmt.Errorf("no database name configured for sorted store %q", sortedType) + } + fields := strings.Split(confStr, "@") + if len(fields) == 2 { + host := fields[1] + fields = strings.Split(fields[0], ":") + if len(fields) == 2 { + user, pass := fields[0], fields[1] + return map[string]interface{}{ + "type": "mongo", + "host": host, + "user": user, + "password": pass, + "database": dbName, + }, nil + } + } + return nil, errors.New("Malformed mongo config string; want form: \"user:password@host\"") +} + +// parses "user@host:password", which you think would be easy, but we +// documented this format without thinking about port numbers, so this +// uses heuristics to guess what extra colons mean. +func parseUserHostPass(v string) (user, host, password string, ok bool) { + f := strings.SplitN(v, "@", 2) + if len(f) != 2 { + return + } + user = f[0] + f = strings.Split(f[1], ":") + if len(f) < 2 { + return "", "", "", false + } + host = f[0] + f = f[1:] + if len(f) >= 2 { + if _, err := strconv.ParseUint(f[0], 10, 16); err == nil { + host = host + ":" + f[0] + f = f[1:] + } + } + password = strings.Join(f, ":") + ok = true + return +} + +func (b *lowBuilder) dbIndexStorage(rdbms string, confStr string, sortedType string) (map[string]interface{}, error) { + dbName := b.dbName(sortedType) + if dbName == "" { + return nil, fmt.Errorf("no database name configured for sorted store %q", sortedType) + } + user, host, password, ok := parseUserHostPass(confStr) + if !ok { + return nil, fmt.Errorf("Malformed %s config string. Want: \"user@host:password\"", rdbms) + } + return map[string]interface{}{ + "type": rdbms, + "host": host, + "user": user, + "password": password, + "database": b.dbName(sortedType), + }, nil +} + +func (b *lowBuilder) sortedStorage(sortedType string) (map[string]interface{}, error) { + return b.sortedStorageAt(sortedType, "") +} + +// filePrefix gives a file path of where to put the database. It can be omitted by +// some sorted implementations, but is required by others. +// The filePrefix should be to a file, not a directory, and should not end in a ".ext" extension. +// An extension like ".kv" or ".sqlite" will be added. +func (b *lowBuilder) sortedStorageAt(sortedType, filePrefix string) (map[string]interface{}, error) { + if b.high.MySQL != "" { + return b.dbIndexStorage("mysql", b.high.MySQL, sortedType) + } + if b.high.PostgreSQL != "" { + return b.dbIndexStorage("postgres", b.high.PostgreSQL, sortedType) + } + if b.high.Mongo != "" { + return b.mongoIndexStorage(b.high.Mongo, sortedType) + } + if b.high.MemoryIndex { + return map[string]interface{}{ + "type": "memory", + }, nil + } + if sortedType != "index" && filePrefix == "" { + return nil, fmt.Errorf("internal error: use of sortedStorageAt with a non-index type and no file location for non-database sorted implementation") + } + // dbFile returns path directly if sortedType == "index", else it returns filePrefix+"."+ext. + dbFile := func(path, ext string) string { + if sortedType == "index" { + return path + } + return filePrefix + "." + ext + } + if b.high.SQLite != "" { + return map[string]interface{}{ + "type": "sqlite", + "file": dbFile(b.high.SQLite, "sqlite"), + }, nil + } + if b.high.KVFile != "" { + return map[string]interface{}{ + "type": "kv", + "file": dbFile(b.high.KVFile, "kv"), + }, nil + } + if b.high.LevelDB != "" { + return map[string]interface{}{ + "type": "leveldb", + "file": dbFile(b.high.LevelDB, "leveldb"), + }, nil + } + panic("internal error: sortedStorageAt didn't find a sorted implementation") +} + +func (b *lowBuilder) thatQueueUnlessMemory(thatQueue map[string]interface{}) (queue map[string]interface{}) { + if b.high.MemoryStorage { + return map[string]interface{}{ + "type": "memory", + } + } + return thatQueue +} + +func (b *lowBuilder) addS3Config(s3 string) error { + f := strings.SplitN(s3, ":", 4) + if len(f) < 3 { + return errors.New(`genconfig: expected "s3" field to be of form "access_key_id:secret_access_key:bucket"`) + } + accessKey, secret, bucket := f[0], f[1], f[2] + var hostname string + if len(f) == 4 { + hostname = f[3] + } + isPrimary := !b.hasPrefix("/bs/") + s3Prefix := "" + if isPrimary { + s3Prefix = "/bs/" + if b.high.PackRelated { + return errors.New("TODO: finish packRelated support for S3") + } + } else { + s3Prefix = "/sto-s3/" + } + a := args{ + "aws_access_key": accessKey, + "aws_secret_access_key": secret, + "bucket": bucket, + } + if hostname != "" { + a["hostname"] = hostname + } + b.addPrefix(s3Prefix, "storage-s3", a) + if isPrimary { + // TODO(mpl): s3CacheBucket + // See https://camlistore.org/issue/85 + b.addPrefix("/cache/", "storage-filesystem", args{ + "path": filepath.Join(tempDir(), "camli-cache"), + }) + } else { + if b.high.BlobPath == "" && !b.high.MemoryStorage { + panic("unexpected empty blobpath with sync-to-s3") + } + b.addPrefix("/sync-to-s3/", "sync", args{ + "from": "/bs/", + "to": s3Prefix, + "queue": b.thatQueueUnlessMemory( + map[string]interface{}{ + "type": b.kvFileType(), + "file": filepath.Join(b.high.BlobPath, "sync-to-s3-queue."+b.kvFileType()), + }), + }) + } + return nil +} + +func (b *lowBuilder) addGoogleDriveConfig(v string) error { + f := strings.SplitN(v, ":", 4) + if len(f) != 4 { + return errors.New(`genconfig: expected "googledrive" field to be of form "client_id:client_secret:refresh_token:parent_id"`) + } + clientId, secret, refreshToken, parentId := f[0], f[1], f[2], f[3] + + isPrimary := !b.hasPrefix("/bs/") + prefix := "" + if isPrimary { + prefix = "/bs/" + if b.high.PackRelated { + return errors.New("TODO: finish packRelated support for Google Drive") + } + } else { + prefix = "/sto-googledrive/" + } + b.addPrefix(prefix, "storage-googledrive", args{ + "parent_id": parentId, + "auth": map[string]interface{}{ + "client_id": clientId, + "client_secret": secret, + "refresh_token": refreshToken, + }, + }) + + if isPrimary { + b.addPrefix("/cache/", "storage-filesystem", args{ + "path": filepath.Join(tempDir(), "camli-cache"), + }) + } else { + b.addPrefix("/sync-to-googledrive/", "sync", args{ + "from": "/bs/", + "to": prefix, + "queue": b.thatQueueUnlessMemory( + map[string]interface{}{ + "type": b.kvFileType(), + "file": filepath.Join(b.high.BlobPath, "sync-to-googledrive-queue."+b.kvFileType()), + }), + }) + } + + return nil +} + +var errGCSUsage = errors.New(`genconfig: expected "googlecloudstorage" field to be of form "client_id:client_secret:refresh_token:bucket[/dir/]" or ":bucketname[/dir/]"`) + +func (b *lowBuilder) addGoogleCloudStorageConfig(v string) error { + var clientID, secret, refreshToken, bucket string + f := strings.SplitN(v, ":", 4) + switch len(f) { + default: + return errGCSUsage + case 4: + clientID, secret, refreshToken, bucket = f[0], f[1], f[2], f[3] + case 2: + if f[0] != "" { + return errGCSUsage + } + bucket = f[1] + clientID = "auto" + } + + isReplica := b.hasPrefix("/bs/") + if isReplica { + gsPrefix := "/sto-googlecloudstorage/" + b.addPrefix(gsPrefix, "storage-googlecloudstorage", args{ + "bucket": bucket, + "auth": map[string]interface{}{ + "client_id": clientID, + "client_secret": secret, + "refresh_token": refreshToken, + }, + }) + + b.addPrefix("/sync-to-googlecloudstorage/", "sync", args{ + "from": "/bs/", + "to": gsPrefix, + "queue": b.thatQueueUnlessMemory( + map[string]interface{}{ + "type": b.kvFileType(), + "file": filepath.Join(b.high.BlobPath, "sync-to-googlecloud-queue."+b.kvFileType()), + }), + }) + return nil + } + + // TODO: cacheBucket like s3CacheBucket? + b.addPrefix("/cache/", "storage-filesystem", args{ + "path": filepath.Join(tempDir(), "camli-cache"), + }) + if b.high.PackRelated { + b.addPrefix("/bs-loose/", "storage-googlecloudstorage", args{ + "bucket": bucket + "/loose", + "auth": map[string]interface{}{ + "client_id": clientID, + "client_secret": secret, + "refresh_token": refreshToken, + }, + }) + b.addPrefix("/bs-packed/", "storage-googlecloudstorage", args{ + "bucket": bucket + "/packed", + "auth": map[string]interface{}{ + "client_id": clientID, + "client_secret": secret, + "refresh_token": refreshToken, + }, + }) + blobPackedIndex, err := b.sortedStorageAt("blobpacked_index", "") + if err != nil { + return err + } + b.addPrefix("/bs/", "storage-blobpacked", args{ + "smallBlobs": "/bs-loose/", + "largeBlobs": "/bs-packed/", + "metaIndex": blobPackedIndex, + }) + return nil + } + b.addPrefix("/bs/", "storage-googlecloudstorage", args{ + "bucket": bucket, + "auth": map[string]interface{}{ + "client_id": clientID, + "client_secret": secret, + "refresh_token": refreshToken, + }, + }) + + return nil +} + +// indexFileDir returns the directory of the sqlite or kv file, or the +// empty string. +func (b *lowBuilder) indexFileDir() string { + switch { + case b.high.SQLite != "": + return filepath.Dir(b.high.SQLite) + case b.high.KVFile != "": + return filepath.Dir(b.high.KVFile) + case b.high.LevelDB != "": + return filepath.Dir(b.high.LevelDB) + } + return "" +} + +func (b *lowBuilder) syncToIndexArgs() (map[string]interface{}, error) { + a := map[string]interface{}{ + "from": "/bs/", + "to": "/index/", + } + + const sortedType = "queue-sync-to-index" + if dbName := b.dbName(sortedType); dbName != "" { + qj, err := b.sortedStorage(sortedType) + if err != nil { + return nil, err + } + a["queue"] = qj + return a, nil + } + + // TODO: currently when using s3, the index must be + // sqlite or kvfile, since only through one of those + // can we get a directory. + if !b.high.MemoryStorage && b.high.BlobPath == "" && b.indexFileDir() == "" { + // We don't actually have a working sync handler, but we keep a stub registered + // so it can be referred to from other places. + // See http://camlistore.org/issue/201 + a["idle"] = true + return a, nil + } + + dir := b.high.BlobPath + if dir == "" { + dir = b.indexFileDir() + } + a["queue"] = b.thatQueueUnlessMemory( + map[string]interface{}{ + "type": b.kvFileType(), + "file": filepath.Join(dir, "sync-to-index-queue."+b.kvFileType()), + }) + + return a, nil +} + +func (b *lowBuilder) genLowLevelPrefixes() error { + root := "/bs/" + pubKeyDest := root + if b.runIndex() { + root = "/bs-and-maybe-also-index/" + pubKeyDest = "/bs-and-index/" + } + + rootArgs := map[string]interface{}{ + "stealth": false, + "blobRoot": root, + "helpRoot": "/help/", + "statusRoot": "/status/", + "jsonSignRoot": "/sighelper/", + } + if b.high.OwnerName != "" { + rootArgs["ownerName"] = b.high.OwnerName + } + if b.runIndex() { + rootArgs["searchRoot"] = "/my-search/" + } + b.addPrefix("/", "root", rootArgs) + b.addPrefix("/setup/", "setup", nil) + b.addPrefix("/status/", "status", nil) + b.addPrefix("/help/", "help", nil) + + importerArgs := args{} + if b.high.Flickr != "" { + importerArgs["flickr"] = map[string]interface{}{ + "clientSecret": b.high.Flickr, + } + } + if b.high.Picasa != "" { + importerArgs["picasa"] = map[string]interface{}{ + "clientSecret": b.high.Picasa, + } + } + if b.runIndex() { + b.addPrefix("/importer/", "importer", importerArgs) + } + + if path := b.high.ShareHandlerPath; path != "" { + b.addPrefix(path, "share", args{ + "blobRoot": "/bs/", + }) + } + + b.addPrefix("/sighelper/", "jsonsign", args{ + "secretRing": b.high.IdentitySecretRing, + "keyId": b.high.Identity, + "publicKeyDest": pubKeyDest, + }) + + storageType := "filesystem" + if b.high.PackBlobs { + storageType = "diskpacked" + } + if b.high.BlobPath != "" { + if b.high.PackRelated { + b.addPrefix("/bs-loose/", "storage-filesystem", args{ + "path": b.high.BlobPath, + }) + b.addPrefix("/bs-packed/", "storage-filesystem", args{ + "path": filepath.Join(b.high.BlobPath, "packed"), + }) + blobPackedIndex, err := b.sortedStorageAt("blobpacked_index", filepath.Join(b.high.BlobPath, "packed", "packindex")) + if err != nil { + return err + } + b.addPrefix("/bs/", "storage-blobpacked", args{ + "smallBlobs": "/bs-loose/", + "largeBlobs": "/bs-packed/", + "metaIndex": blobPackedIndex, + }) + } else if b.high.PackBlobs { + b.addPrefix("/bs/", "storage-"+storageType, args{ + "path": b.high.BlobPath, + "metaIndex": map[string]interface{}{ + "type": b.kvFileType(), + "file": filepath.Join(b.high.BlobPath, "index."+b.kvFileType()), + }, + }) + } else { + b.addPrefix("/bs/", "storage-"+storageType, args{ + "path": b.high.BlobPath, + }) + } + if b.high.PackBlobs { + b.addPrefix("/cache/", "storage-"+storageType, args{ + "path": filepath.Join(b.high.BlobPath, "/cache"), + "metaIndex": map[string]interface{}{ + "type": b.kvFileType(), + "file": filepath.Join(b.high.BlobPath, "cache", "index."+b.kvFileType()), + }, + }) + } else { + b.addPrefix("/cache/", "storage-"+storageType, args{ + "path": filepath.Join(b.high.BlobPath, "/cache"), + }) + } + } else if b.high.MemoryStorage { + b.addPrefix("/bs/", "storage-memory", nil) + b.addPrefix("/cache/", "storage-memory", nil) + } + + if b.runIndex() { + syncArgs, err := b.syncToIndexArgs() + if err != nil { + return err + } + b.addPrefix("/sync/", "sync", syncArgs) + + b.addPrefix("/bs-and-index/", "storage-replica", args{ + "backends": []interface{}{"/bs/", "/index/"}, + }) + + b.addPrefix("/bs-and-maybe-also-index/", "storage-cond", args{ + "write": map[string]interface{}{ + "if": "isSchema", + "then": "/bs-and-index/", + "else": "/bs/", + }, + "read": "/bs/", + }) + + owner, err := b.searchOwner() + if err != nil { + return err + } + searchArgs := args{ + "index": "/index/", + "owner": owner.String(), + } + if b.copyIndexToMemory() { + searchArgs["slurpToMemory"] = true + } + b.addPrefix("/my-search/", "search", searchArgs) + } + + return nil +} + +func (b *lowBuilder) build() (*Config, error) { + conf, low := b.high, b.low + if conf.HTTPS { + if (conf.HTTPSCert != "") != (conf.HTTPSKey != "") { + return nil, errors.New("Must set both httpsCert and httpsKey (or neither to generate a self-signed cert)") + } + if conf.HTTPSCert != "" { + low["httpsCert"] = conf.HTTPSCert + low["httpsKey"] = conf.HTTPSKey + } else { + low["httpsCert"] = osutil.DefaultTLSCert() + low["httpsKey"] = osutil.DefaultTLSKey() + } + } + + if conf.BaseURL != "" { + u, err := url.Parse(conf.BaseURL) + if err != nil { + return nil, fmt.Errorf("Error parsing baseURL %q as a URL: %v", conf.BaseURL, err) + } + if u.Path != "" && u.Path != "/" { + return nil, fmt.Errorf("baseURL can't have a path, only a scheme, host, and optional port.") + } + u.Path = "" + low["baseURL"] = u.String() + } + if conf.Listen != "" { + low["listen"] = conf.Listen + } + if conf.PackBlobs && conf.PackRelated { + return nil, errors.New("can't use both packBlobs (for 'diskpacked') and packRelated (for 'blobpacked')") + } + low["https"] = conf.HTTPS + low["auth"] = conf.Auth + + numIndexers := numSet(conf.LevelDB, conf.Mongo, conf.MySQL, conf.PostgreSQL, conf.SQLite, conf.KVFile, conf.MemoryIndex) + + switch { + case b.runIndex() && numIndexers == 0: + return nil, fmt.Errorf("Unless runIndex is set to false, you must specify an index option (kvIndexFile, leveldb, mongo, mysql, postgres, sqlite, memoryIndex).") + case b.runIndex() && numIndexers != 1: + return nil, fmt.Errorf("With runIndex set true, you can only pick exactly one indexer (mongo, mysql, postgres, sqlite, kvIndexFile, leveldb, memoryIndex).") + case !b.runIndex() && numIndexers != 0: + return nil, fmt.Errorf("With runIndex disabled, you can't specify any of mongo, mysql, postgres, sqlite.") + } + + if conf.Identity == "" { + return nil, errors.New("no 'identity' in server config") + } + + noLocalDisk := conf.BlobPath == "" + if noLocalDisk { + if !conf.MemoryStorage && conf.S3 == "" && conf.GoogleCloudStorage == "" { + return nil, errors.New("Unless memoryStorage is set, you must specify at least one storage option for your blobserver (blobPath (for localdisk), s3, googlecloudstorage).") + } + if !conf.MemoryStorage && conf.S3 != "" && conf.GoogleCloudStorage != "" { + return nil, errors.New("Using S3 as a primary storage and Google Cloud Storage as a mirror is not supported for now.") + } + } + if conf.ShareHandler && conf.ShareHandlerPath == "" { + conf.ShareHandlerPath = "/share/" + } + if conf.MemoryStorage { + noMkdir = true + if conf.BlobPath != "" { + return nil, errors.New("memoryStorage and blobPath are mutually exclusive.") + } + if conf.PackRelated { + return nil, errors.New("memoryStorage doesn't support packRelated.") + } + } + + if err := b.genLowLevelPrefixes(); err != nil { + return nil, err + } + + var cacheDir string + if noLocalDisk { + // Whether camlistored is run from EC2 or not, we use + // a temp dir as the cache when primary storage is S3. + // TODO(mpl): s3CacheBucket + // See https://camlistore.org/issue/85 + cacheDir = filepath.Join(tempDir(), "camli-cache") + } else { + cacheDir = filepath.Join(conf.BlobPath, "cache") + } + if !noMkdir { + if err := os.MkdirAll(cacheDir, 0700); err != nil { + return nil, fmt.Errorf("Could not create blobs cache dir %s: %v", cacheDir, err) + } + } + + if len(conf.Publish) > 0 { + if !b.runIndex() { + return nil, fmt.Errorf("publishing requires an index") + } + var tlsO *tlsOpts + httpsCert, ok1 := low["httpsCert"].(string) + httpsKey, ok2 := low["httpsKey"].(string) + if ok1 && ok2 { + tlsO = &tlsOpts{ + httpsCert: httpsCert, + httpsKey: httpsKey, + } + } + if err := b.addPublishedConfig(tlsO); err != nil { + return nil, fmt.Errorf("Could not generate config for published: %v", err) + } + } + + if b.runIndex() { + b.addUIConfig() + sto, err := b.sortedStorage("index") + if err != nil { + return nil, err + } + b.addPrefix("/index/", "storage-index", args{ + "blobSource": "/bs/", + "storage": sto, + }) + } + + if conf.S3 != "" { + if err := b.addS3Config(conf.S3); err != nil { + return nil, err + } + } + if conf.GoogleDrive != "" { + if err := b.addGoogleDriveConfig(conf.GoogleDrive); err != nil { + return nil, err + } + } + if conf.GoogleCloudStorage != "" { + if err := b.addGoogleCloudStorageConfig(conf.GoogleCloudStorage); err != nil { + return nil, err + } + } + + return &Config{Obj: b.low}, nil +} + +func numSet(vv ...interface{}) (num int) { + for _, vi := range vv { + switch v := vi.(type) { + case string: + if v != "" { + num++ + } + case bool: + if v { + num++ + } + default: + panic("unknown type") + } + } + return +} + +var defaultBaseConfig = serverconfig.Config{ + Listen: ":3179", + HTTPS: false, + Auth: "localhost", +} + +// WriteDefaultConfigFile generates a new default high-level server configuration +// file at filePath. If useSQLite, the default indexer will use SQLite, otherwise +// kv. If filePath already exists, it is overwritten. +func WriteDefaultConfigFile(filePath string, useSQLite bool) error { + conf := defaultBaseConfig + blobDir := osutil.CamliBlobRoot() + if err := wkfs.MkdirAll(blobDir, 0700); err != nil { + return fmt.Errorf("Could not create default blobs directory: %v", err) + } + conf.BlobPath = blobDir + if useSQLite { + conf.SQLite = filepath.Join(osutil.CamliVarDir(), "camli-index.db") + } else { + conf.KVFile = filepath.Join(osutil.CamliVarDir(), "camli-index.kvdb") + } + + keyID, secretRing, err := getOrMakeKeyring() + if err != nil { + return err + } + conf.Identity = keyID + conf.IdentitySecretRing = secretRing + + confData, err := json.MarshalIndent(conf, "", " ") + if err != nil { + return fmt.Errorf("Could not json encode config file : %v", err) + } + + if err := wkfs.WriteFile(filePath, confData, 0600); err != nil { + return fmt.Errorf("Could not create or write default server config: %v", err) + } + + return nil +} + +func getOrMakeKeyring() (keyID, secRing string, err error) { + secRing = osutil.SecretRingFile() + _, err = wkfs.Stat(secRing) + switch { + case err == nil: + keyID, err = jsonsign.KeyIdFromRing(secRing) + if err != nil { + err = fmt.Errorf("Could not find any keyID in file %q: %v", secRing, err) + return + } + log.Printf("Re-using identity with keyID %q found in file %s", keyID, secRing) + case os.IsNotExist(err): + keyID, err = jsonsign.GenerateNewSecRing(secRing) + if err != nil { + err = fmt.Errorf("Could not generate new secRing at file %q: %v", secRing, err) + return + } + log.Printf("Generated new identity with keyID %q in file %s", keyID, secRing) + default: + err = fmt.Errorf("Could not stat secret ring %q: %v", secRing, err) + } + return +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/genconfig_test.go b/vendor/github.com/camlistore/camlistore/pkg/serverinit/genconfig_test.go new file mode 100644 index 00000000..095049ad --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/genconfig_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serverinit + +import "testing" + +func TestParseUserHostPass(t *testing.T) { + tests := []struct { + in string + user, host, password string + }{ + {in: "foo"}, + {in: "foo@bar"}, + {"bob@server:pass", "bob", "server", "pass"}, + {"bob@server:3307:pass", "bob", "server:3307", "pass"}, + {"bob@server:pass:word", "bob", "server", "pass:word"}, + {"bob@server:9999999:word", "bob", "server", "9999999:word"}, + {"bob@server:123:123:word", "bob", "server:123", "123:word"}, + {"bob@server:123", "bob", "server", "123"}, + {"bob@server:123:", "bob", "server:123", ""}, + } + for _, tt := range tests { + user, host, password, ok := parseUserHostPass(tt.in) + if ok != (user != "" || host != "" || password != "") { + t.Errorf("For input %q, inconsistent output %q, %q, %q, %v", tt.in, user, host, password, ok) + continue + } + if user != tt.user || host != tt.host || password != tt.password { + t.Errorf("parseUserHostPass(%q) = %q, %q, %q; want %q, %q, %q", tt.in, user, host, password, tt.user, tt.host, tt.password) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/serverinit.go b/vendor/github.com/camlistore/camlistore/pkg/serverinit/serverinit.go new file mode 100644 index 00000000..6c1379b4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/serverinit.go @@ -0,0 +1,714 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package serverinit is responsible for mapping from a Camlistore +// configuration file and instantiating HTTP Handlers for all the +// necessary endpoints. +package serverinit + +import ( + "bytes" + "encoding/json" + "errors" + "expvar" + "fmt" + "io" + "log" + "net" + "net/http" + "net/http/pprof" + "os" + "regexp" + "runtime" + rpprof "runtime/pprof" + "strconv" + "strings" + + "camlistore.org/pkg/auth" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/blobserver/handlers" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/index" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/server" + "camlistore.org/pkg/server/app" + "camlistore.org/pkg/types/serverconfig" +) + +const camliPrefix = "/camli/" + +var ErrCamliPath = errors.New("Invalid Camlistore request path") + +type handlerConfig struct { + prefix string // "/foo/" + htype string // "localdisk", etc + conf jsonconfig.Obj // never nil + internal bool // if true, not accessible over HTTP + + settingUp, setupDone bool +} + +type handlerLoader struct { + installer HandlerInstaller + baseURL string + config map[string]*handlerConfig // prefix -> config + handler map[string]interface{} // prefix -> http.Handler / func / blobserver.Storage + curPrefix string + closers []io.Closer + prefixStack []string + reindex bool + + // optional context (for App Engine, the first request that + // started up the process). we may need this if setting up + // handlers involves doing datastore/memcache/blobstore + // lookups. + context *http.Request +} + +// A HandlerInstaller is anything that can register an HTTP Handler at +// a prefix path. Both *http.ServeMux and camlistore.org/pkg/webserver.Server +// implement HandlerInstaller. +type HandlerInstaller interface { + Handle(path string, h http.Handler) +} + +type storageAndConfig struct { + blobserver.Storage + config *blobserver.Config +} + +// parseCamliPath looks for "/camli/" in the path and returns +// what follows it (the action). +func parseCamliPath(path string) (action string, err error) { + camIdx := strings.Index(path, camliPrefix) + if camIdx == -1 { + return "", ErrCamliPath + } + action = path[camIdx+len(camliPrefix):] + return +} + +func unsupportedHandler(conn http.ResponseWriter, req *http.Request) { + httputil.BadRequestError(conn, "Unsupported camlistore path or method.") +} + +func (s *storageAndConfig) Config() *blobserver.Config { + return s.config +} + +// GetStorage returns the unwrapped blobserver.Storage interface value for +// callers to type-assert optional interface implementations on. (e.g. EnumeratorConfig) +func (s *storageAndConfig) GetStorage() blobserver.Storage { + return s.Storage +} + +// action is the part following "/camli/" in the URL. It's either a +// string like "enumerate-blobs", "stat", "upload", or a blobref. +func camliHandlerUsingStorage(req *http.Request, action string, storage blobserver.StorageConfiger) (http.Handler, auth.Operation) { + var handler http.Handler + op := auth.OpAll + switch req.Method { + case "GET", "HEAD": + switch action { + case "enumerate-blobs": + handler = handlers.CreateEnumerateHandler(storage) + op = auth.OpGet + case "stat": + handler = handlers.CreateStatHandler(storage) + case "ws": + handler = nil // TODO: handlers.CreateSocketHandler(storage) + op = auth.OpDiscovery // rest of operation auth checks done in handler + default: + handler = handlers.CreateGetHandler(storage) + op = auth.OpGet + } + case "POST": + switch action { + case "stat": + handler = handlers.CreateStatHandler(storage) + op = auth.OpStat + case "upload": + handler = handlers.CreateBatchUploadHandler(storage) + op = auth.OpUpload + case "remove": + handler = handlers.CreateRemoveHandler(storage) + } + case "PUT": + handler = handlers.CreatePutUploadHandler(storage) + op = auth.OpUpload + } + if handler == nil { + handler = http.HandlerFunc(unsupportedHandler) + } + return handler, op +} + +// where prefix is like "/" or "/s3/" for e.g. "/camli/" or "/s3/camli/*" +func makeCamliHandler(prefix, baseURL string, storage blobserver.Storage, hf blobserver.FindHandlerByTyper) http.Handler { + if !strings.HasSuffix(prefix, "/") { + panic("expected prefix to end in slash") + } + baseURL = strings.TrimRight(baseURL, "/") + + canLongPoll := true + // TODO(bradfitz): set to false if this is App Engine, or provide some way to disable + + storageConfig := &storageAndConfig{ + storage, + &blobserver.Config{ + Writable: true, + Readable: true, + Deletable: false, + URLBase: baseURL + prefix[:len(prefix)-1], + CanLongPoll: canLongPoll, + HandlerFinder: hf, + }, + } + return http.HandlerFunc(func(conn http.ResponseWriter, req *http.Request) { + action, err := parseCamliPath(req.URL.Path[len(prefix)-1:]) + if err != nil { + log.Printf("Invalid request for method %q, path %q", + req.Method, req.URL.Path) + unsupportedHandler(conn, req) + return + } + handler := auth.RequireAuth(camliHandlerUsingStorage(req, action, storageConfig)) + handler.ServeHTTP(conn, req) + }) +} + +func (hl *handlerLoader) FindHandlerByType(htype string) (prefix string, handler interface{}, err error) { + nFound := 0 + for pfx, config := range hl.config { + if config.htype == htype { + nFound++ + prefix, handler = pfx, hl.handler[pfx] + } + } + if nFound == 0 { + return "", nil, blobserver.ErrHandlerTypeNotFound + } + if htype == "jsonsign" && nFound > 1 { + // TODO: do this for all handler types later? audit + // callers of FindHandlerByType and see if that's + // feasible. For now I'm only paranoid about jsonsign. + return "", nil, fmt.Errorf("%d handlers found of type %q; ambiguous", nFound, htype) + } + return +} + +func (hl *handlerLoader) AllHandlers() (types map[string]string, handlers map[string]interface{}) { + types = make(map[string]string) + handlers = make(map[string]interface{}) + for pfx, config := range hl.config { + types[pfx] = config.htype + handlers[pfx] = hl.handler[pfx] + } + return +} + +func (hl *handlerLoader) setupAll() { + for prefix := range hl.config { + hl.setupHandler(prefix) + } +} + +func (hl *handlerLoader) configType(prefix string) string { + if h, ok := hl.config[prefix]; ok { + return h.htype + } + return "" +} + +func (hl *handlerLoader) getOrSetup(prefix string) interface{} { + hl.setupHandler(prefix) + return hl.handler[prefix] +} + +func (hl *handlerLoader) MyPrefix() string { + return hl.curPrefix +} + +func (hl *handlerLoader) BaseURL() string { + return hl.baseURL +} + +func (hl *handlerLoader) GetStorage(prefix string) (blobserver.Storage, error) { + hl.setupHandler(prefix) + if s, ok := hl.handler[prefix].(blobserver.Storage); ok { + return s, nil + } + return nil, fmt.Errorf("bogus storage handler referenced as %q", prefix) +} + +func (hl *handlerLoader) GetHandler(prefix string) (interface{}, error) { + hl.setupHandler(prefix) + if s, ok := hl.handler[prefix].(blobserver.Storage); ok { + return s, nil + } + if h, ok := hl.handler[prefix].(http.Handler); ok { + return h, nil + } + return nil, fmt.Errorf("bogus http or storage handler referenced as %q", prefix) +} + +func (hl *handlerLoader) GetHandlerType(prefix string) string { + return hl.configType(prefix) +} + +func exitFailure(pattern string, args ...interface{}) { + if !strings.HasSuffix(pattern, "\n") { + pattern = pattern + "\n" + } + panic(fmt.Sprintf(pattern, args...)) +} + +func (hl *handlerLoader) setupHandler(prefix string) { + h, ok := hl.config[prefix] + if !ok { + exitFailure("invalid reference to undefined handler %q", prefix) + } + if h.setupDone { + // Already setup by something else reference it and forcing it to be + // setup before the bottom loop got to it. + return + } + hl.prefixStack = append(hl.prefixStack, prefix) + if h.settingUp { + buf := make([]byte, 1024) + buf = buf[:runtime.Stack(buf, false)] + exitFailure("loop in configuration graph; %q tried to load itself indirectly: %q\nStack:\n%s", + prefix, hl.prefixStack, buf) + } + h.settingUp = true + defer func() { + // log.Printf("Configured handler %q", prefix) + h.setupDone = true + hl.prefixStack = hl.prefixStack[:len(hl.prefixStack)-1] + r := recover() + if r == nil { + if hl.handler[prefix] == nil { + panic(fmt.Sprintf("setupHandler for %q didn't install a handler", prefix)) + } + } else { + panic(r) + } + }() + + hl.curPrefix = prefix + + if strings.HasPrefix(h.htype, "storage-") { + stype := strings.TrimPrefix(h.htype, "storage-") + // Assume a storage interface + pstorage, err := blobserver.CreateStorage(stype, hl, h.conf) + if err != nil { + exitFailure("error instantiating storage for prefix %q, type %q: %v", + h.prefix, stype, err) + } + if ix, ok := pstorage.(*index.Index); ok && hl.reindex { + log.Printf("Reindexing %s ...", h.prefix) + if err := ix.Reindex(); err != nil { + exitFailure("Error reindexing %s: %v", h.prefix, err) + } + } + hl.handler[h.prefix] = pstorage + if h.internal { + hl.installer.Handle(prefix, unauthorizedHandler{}) + } else { + hl.installer.Handle(prefix+"camli/", makeCamliHandler(prefix, hl.baseURL, pstorage, hl)) + } + if cl, ok := pstorage.(blobserver.ShutdownStorage); ok { + hl.closers = append(hl.closers, cl) + } + return + } + + var hh http.Handler + if h.htype == "app" { + ap, err := app.NewHandler(h.conf, hl.baseURL+"/", prefix) + if err != nil { + exitFailure("error setting up app for prefix %q: %v", h.prefix, err) + } + hh = ap + auth.AddMode(ap.AuthMode()) + if ap.ProgramName() == "publisher" { + if err := hl.initPublisherRootNode(ap); err != nil { + exitFailure("Error looking/setting up root node for publisher on %v: %v", h.prefix, err) + } + } + } else { + var err error + hh, err = blobserver.CreateHandler(h.htype, hl, h.conf) + if err != nil { + exitFailure("error instantiating handler for prefix %q, type %q: %v", + h.prefix, h.htype, err) + } + } + + hl.handler[prefix] = hh + var wrappedHandler http.Handler + if h.internal { + wrappedHandler = unauthorizedHandler{} + } else { + wrappedHandler = &httputil.PrefixHandler{prefix, hh} + if handlerTypeWantsAuth(h.htype) { + wrappedHandler = auth.Handler{wrappedHandler} + } + } + hl.installer.Handle(prefix, wrappedHandler) +} + +type unauthorizedHandler struct{} + +func (unauthorizedHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Unauthorized", http.StatusUnauthorized) +} + +func handlerTypeWantsAuth(handlerType string) bool { + // TODO(bradfitz): ask the handler instead? This is a bit of a + // weird spot for this policy maybe? + switch handlerType { + case "ui", "search", "jsonsign", "sync", "status", "help", "importer": + return true + } + return false +} + +// A Config is the wrapper around a Camlistore JSON configuration file. +// Files on disk can be in either high-level or low-level format, but +// the Load function always returns the Config in its low-level format. +type Config struct { + jsonconfig.Obj + UIPath string // Not valid until after InstallHandlers + + // apps is the list of server apps configured during InstallHandlers, + // and that should be started after camlistored has started serving. + apps []*app.Handler +} + +// detectConfigChange returns an informative error if conf contains obsolete keys. +func detectConfigChange(conf jsonconfig.Obj) error { + oldHTTPSKey, oldHTTPSCert := conf.OptionalString("HTTPSKeyFile", ""), conf.OptionalString("HTTPSCertFile", "") + if oldHTTPSKey != "" || oldHTTPSCert != "" { + return fmt.Errorf("Config keys %q and %q have respectively been renamed to %q and %q, please fix your server config.", + "HTTPSKeyFile", "HTTPSCertFile", "httpsKey", "httpsCert") + } + return nil +} + +// LoadFile returns a low-level "handler config" from the provided filename. +// If the config file doesn't contain a top-level JSON key of "handlerConfig" +// with boolean value true, the configuration is assumed to be a high-level +// "user config" file, and transformed into a low-level config. +func LoadFile(filename string) (*Config, error) { + return load(filename, nil) +} + +type jsonFileImpl struct { + *bytes.Reader + name string +} + +func (jsonFileImpl) Close() error { return nil } +func (f jsonFileImpl) Name() string { return f.name } + +// Load returns a low-level "handler config" from the provided config. +// If the config doesn't contain a top-level JSON key of "handlerConfig" +// with boolean value true, the configuration is assumed to be a high-level +// "user config" file, and transformed into a low-level config. +func Load(config []byte) (*Config, error) { + return load("", func(filename string) (jsonconfig.File, error) { + if filename != "" { + return nil, errors.New("JSON files with includes not supported with jsonconfig.Load") + } + return jsonFileImpl{bytes.NewReader(config), "config file"}, nil + }) +} + +func load(filename string, opener func(filename string) (jsonconfig.File, error)) (*Config, error) { + c := &jsonconfig.ConfigParser{Open: opener} + m, err := c.ReadFile(filename) + if err != nil { + return nil, err + } + obj := jsonconfig.Obj(m) + conf := &Config{ + Obj: obj, + } + + if lowLevel := obj.OptionalBool("handlerConfig", false); lowLevel { + return conf, nil + } + + // Check whether the high-level config uses the old names. + if err := detectConfigChange(obj); err != nil { + return nil, err + } + + // Because the original high-level config might have expanded + // through the use of functions, we re-encode the map back to + // JSON here so we can unmarshal it into the hiLevelConf + // struct later. + highExpandedJSON, err := json.Marshal(m) + if err != nil { + return nil, fmt.Errorf("Can't re-marshal high-level JSON config: %v", err) + } + + var hiLevelConf serverconfig.Config + if err := json.Unmarshal(highExpandedJSON, &hiLevelConf); err != nil { + return nil, fmt.Errorf("Could not unmarshal into a serverconfig.Config: %v", err) + } + + conf, err = genLowLevelConfig(&hiLevelConf) + if err != nil { + return nil, fmt.Errorf( + "failed to transform user config file into internal handler configuration: %v", + err) + } + if v, _ := strconv.ParseBool(os.Getenv("CAMLI_DEBUG_CONFIG")); v { + jsconf, _ := json.MarshalIndent(conf.Obj, "", " ") + log.Printf("From high-level config, generated low-level config: %s", jsconf) + } + return conf, nil +} + +func (config *Config) checkValidAuth() error { + authConfig := config.OptionalString("auth", "") + mode, err := auth.FromConfig(authConfig) + if err == nil { + auth.SetMode(mode) + } + return err +} + +// InstallHandlers creates and registers all the HTTP Handlers needed by config +// into the provided HandlerInstaller. +// +// baseURL is required and specifies the root of this webserver, without trailing slash. +// context may be nil (used and required by App Engine only) +// +// The returned shutdown value can be used to cleanly shut down the +// handlers. +func (config *Config) InstallHandlers(hi HandlerInstaller, baseURL string, reindex bool, context *http.Request) (shutdown io.Closer, err error) { + defer func() { + if e := recover(); e != nil { + log.Printf("Caught panic installer handlers: %v", e) + err = fmt.Errorf("Caught panic: %v", e) + } + }() + + if err := config.checkValidAuth(); err != nil { + return nil, fmt.Errorf("error while configuring auth: %v", err) + } + prefixes := config.RequiredObject("prefixes") + if err := config.Validate(); err != nil { + return nil, fmt.Errorf("configuration error in root object's keys: %v", err) + } + + if v := os.Getenv("CAMLI_PPROF_START"); v != "" { + cpuf := mustCreate(v + ".cpu") + defer cpuf.Close() + memf := mustCreate(v + ".mem") + defer memf.Close() + rpprof.StartCPUProfile(cpuf) + defer rpprof.StopCPUProfile() + defer rpprof.WriteHeapProfile(memf) + } + + hl := &handlerLoader{ + installer: hi, + baseURL: baseURL, + config: make(map[string]*handlerConfig), + handler: make(map[string]interface{}), + context: context, + reindex: reindex, + } + + for prefix, vei := range prefixes { + if !strings.HasPrefix(prefix, "/") { + exitFailure("prefix %q doesn't start with /", prefix) + } + if !strings.HasSuffix(prefix, "/") { + exitFailure("prefix %q doesn't end with /", prefix) + } + pmap, ok := vei.(map[string]interface{}) + if !ok { + exitFailure("prefix %q value is a %T, not an object", prefix, vei) + } + pconf := jsonconfig.Obj(pmap) + enabled := pconf.OptionalBool("enabled", true) + if !enabled { + continue + } + handlerType := pconf.RequiredString("handler") + handlerArgs := pconf.OptionalObject("handlerArgs") + internal := pconf.OptionalBool("internal", false) + if err := pconf.Validate(); err != nil { + exitFailure("configuration error in prefix %s: %v", prefix, err) + } + h := &handlerConfig{ + prefix: prefix, + htype: handlerType, + conf: handlerArgs, + internal: internal, + } + hl.config[prefix] = h + + if handlerType == "ui" { + config.UIPath = prefix + } + } + hl.setupAll() + + // Now that everything is setup, run any handlers' InitHandler + // methods. + // And register apps that will be started later. + for pfx, handler := range hl.handler { + if starter, ok := handler.(*app.Handler); ok { + config.apps = append(config.apps, starter) + } + if helpHandler, ok := handler.(*server.HelpHandler); ok { + helpHandler.SetServerConfig(config.Obj) + } + if in, ok := handler.(blobserver.HandlerIniter); ok { + if err := in.InitHandler(hl); err != nil { + return nil, fmt.Errorf("Error calling InitHandler on %s: %v", pfx, err) + } + } + } + + if v, _ := strconv.ParseBool(os.Getenv("CAMLI_HTTP_EXPVAR")); v { + hi.Handle("/debug/vars", expvarHandler{}) + } + if v, _ := strconv.ParseBool(os.Getenv("CAMLI_HTTP_PPROF")); v { + hi.Handle("/debug/pprof/", profileHandler{}) + } + hi.Handle("/debug/config", auth.RequireAuth(configHandler{config}, auth.OpAll)) + hi.Handle("/debug/logs", auth.RequireAuth(http.HandlerFunc(logsHandler), auth.OpAll)) + return multiCloser(hl.closers), nil +} + +// StartApps starts all the server applications that were configured +// during InstallHandlers. It should only be called after camlistored +// has started serving, since these apps might request some configuration +// from Camlistore to finish initializing. +func (config *Config) StartApps() error { + for _, ap := range config.apps { + if err := ap.Start(); err != nil { + return fmt.Errorf("error starting app %v: %v", ap.ProgramName(), err) + } + } + return nil +} + +// AppURL returns a map of app name to app base URL for all the configured +// server apps. +func (config *Config) AppURL() map[string]string { + appURL := make(map[string]string, len(config.apps)) + for _, ap := range config.apps { + appURL[ap.ProgramName()] = ap.BackendURL() + } + return appURL +} + +func mustCreate(path string) *os.File { + f, err := os.Create(path) + if err != nil { + log.Fatalf("Failed to create %s: %v", path, err) + } + return f +} + +type multiCloser []io.Closer + +func (s multiCloser) Close() (err error) { + for _, cl := range s { + if err1 := cl.Close(); err == nil && err1 != nil { + err = err1 + } + } + return +} + +// expvarHandler publishes expvar stats. +type expvarHandler struct{} + +func (expvarHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + first := true + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} + +type configHandler struct { + c *Config +} + +var ( + knownKeys = regexp.MustCompile(`(?ms)^\s+"_knownkeys": {.+?},?\n`) + sensitiveLine = regexp.MustCompile(`(?m)^\s+\"(auth|aws_secret_access_key|password)\": "[^\"]+".*\n`) +) + +func (h configHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + b, _ := json.MarshalIndent(h.c.Obj, "", " ") + b = knownKeys.ReplaceAll(b, nil) + b = sensitiveLine.ReplaceAllFunc(b, func(ln []byte) []byte { + i := bytes.IndexByte(ln, ':') + return []byte(string(ln[:i+1]) + " REDACTED\n") + }) + w.Write(b) +} + +// profileHandler publishes server profile information. +type profileHandler struct{} + +func (profileHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + switch req.URL.Path { + case "/debug/pprof/cmdline": + pprof.Cmdline(rw, req) + case "/debug/pprof/profile": + pprof.Profile(rw, req) + case "/debug/pprof/symbol": + pprof.Symbol(rw, req) + default: + pprof.Index(rw, req) + } +} + +func logsHandler(w http.ResponseWriter, r *http.Request) { + c := &http.Client{ + Transport: &http.Transport{ + Dial: func(network, addr string) (net.Conn, error) { + return net.Dial("unix", "/run/camjournald.sock") + }, + }, + } + res, err := c.Get("http://journal/entries") + if err != nil { + http.Error(w, err.Error(), 500) + return + } + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + io.Copy(w, res.Body) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/serverinit_test.go b/vendor/github.com/camlistore/camlistore/pkg/serverinit/serverinit_test.go new file mode 100644 index 00000000..252e5cdf --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/serverinit_test.go @@ -0,0 +1,478 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serverinit_test + +import ( + "bytes" + "encoding/json" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "reflect" + "regexp" + "runtime" + "sort" + "strings" + "testing" + + "camlistore.org/pkg/auth" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/importer" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/jsonsign/signhandler" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/search" + "camlistore.org/pkg/server" + "camlistore.org/pkg/serverinit" + "camlistore.org/pkg/test" + "camlistore.org/pkg/types/clientconfig" + "camlistore.org/pkg/types/serverconfig" + + // For registering all the handler constructors needed in TestInstallHandlers + _ "camlistore.org/pkg/blobserver/cond" + _ "camlistore.org/pkg/blobserver/replica" + _ "camlistore.org/pkg/importer/allimporters" + _ "camlistore.org/pkg/search" + _ "camlistore.org/pkg/server" +) + +var ( + updateGolden = flag.Bool("update_golden", false, "Update golden *.want files") + flagOnly = flag.String("only", "", "If non-empty, substring of foo.json input file to match.") +) + +const ( + // relativeRing points to a real secret ring, but serverinit + // rewrites it to be an absolute path. We then canonicalize + // it to secringPlaceholder in the golden files. + relativeRing = "../jsonsign/testdata/test-secring.gpg" + secringPlaceholder = "/path/to/secring" +) + +func init() { + // Avoid Linux vs. OS X differences in tests. + serverinit.SetTempDirFunc(func() string { return "/tmp" }) + serverinit.SetNoMkdir(true) +} + +func sortedKeys(m map[string]interface{}) (keys []string) { + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return +} + +func prettyPrint(t *testing.T, w io.Writer, v interface{}) { + out, err := json.MarshalIndent(v, "", " ") + if err != nil { + t.Fatal(err) + } + w.Write(out) +} + +func TestConfigs(t *testing.T) { + dir, err := os.Open("testdata") + if err != nil { + t.Fatal(err) + } + names, err := dir.Readdirnames(-1) + if err != nil { + t.Fatal(err) + } + for _, name := range names { + if strings.HasPrefix(name, ".#") { + // Emacs noise. + continue + } + if *flagOnly != "" && !strings.Contains(name, *flagOnly) { + continue + } + if strings.HasSuffix(name, ".json") { + if strings.HasSuffix(name, "-want.json") { + continue + } + testConfig(filepath.Join("testdata", name), t) + } + } +} + +type namedReadSeeker struct { + name string + io.ReadSeeker +} + +func (n namedReadSeeker) Name() string { return n.name } +func (n namedReadSeeker) Close() error { return nil } + +// configParser returns a custom jsonconfig ConfigParser whose reader rewrites +// "/path/to/secring" to the absolute path of the jsonconfig test-secring.gpg file. +// On windows, it also fixes the slash separated paths. +func configParser() *jsonconfig.ConfigParser { + return &jsonconfig.ConfigParser{ + Open: func(path string) (jsonconfig.File, error) { + slurp, err := replaceRingPath(path) + if err != nil { + return nil, err + } + slurp = backslashEscape(slurp) + return namedReadSeeker{path, bytes.NewReader(slurp)}, nil + }, + } +} + +// replaceRingPath returns the contents of the file at path with secringPlaceholder replaced with the absolute path of relativeRing. +func replaceRingPath(path string) ([]byte, error) { + secRing, err := filepath.Abs(relativeRing) + if err != nil { + return nil, fmt.Errorf("Could not get absolute path of %v: %v", relativeRing, err) + } + secRing = strings.Replace(secRing, `\`, `\\`, -1) + slurpBytes, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + return bytes.Replace(slurpBytes, []byte(secringPlaceholder), []byte(secRing), 1), nil +} + +// We just need to make sure that we don't match the prefix handlers too. +var unixPathPattern = regexp.MustCompile(`"/.*/.+"`) + +// backslashEscape, on windows, changes all the slash separated paths (which +// match unixPathPattern, to omit the prefix handler paths) with escaped +// backslashes. +func backslashEscape(b []byte) []byte { + if runtime.GOOS != "windows" { + return b + } + unixPaths := unixPathPattern.FindAll(b, -1) + if unixPaths == nil { + return b + } + var oldNew []string + for _, v := range unixPaths { + bStr := string(v) + oldNew = append(oldNew, bStr, strings.Replace(bStr, `/`, `\\`, -1)) + } + r := strings.NewReplacer(oldNew...) + return []byte(r.Replace(string(b))) +} + +func testConfig(name string, t *testing.T) { + wantedError := func() error { + slurp, err := ioutil.ReadFile(strings.Replace(name, ".json", ".err", 1)) + if os.IsNotExist(err) { + return nil + } + if err != nil { + t.Fatalf("Error reading .err file: %v", err) + } + return errors.New(string(slurp)) + } + b, err := replaceRingPath(name) + if err != nil { + t.Fatalf("Could not read %s: %v", name, err) + } + b = backslashEscape(b) + var hiLevelConf serverconfig.Config + if err := json.Unmarshal(b, &hiLevelConf); err != nil { + t.Fatalf("Could not unmarshal %s into a serverconfig.Config: %v", name, err) + } + + lowLevelConf, err := serverinit.GenLowLevelConfig(&hiLevelConf) + if g, w := strings.TrimSpace(fmt.Sprint(err)), strings.TrimSpace(fmt.Sprint(wantedError())); g != w { + t.Fatalf("test %s: got GenLowLevelConfig error %q; want %q", name, g, w) + } + if err != nil { + return + } + if err := (&jsonconfig.ConfigParser{}).CheckTypes(lowLevelConf.Obj); err != nil { + t.Fatalf("Error while parsing low-level conf generated from %v: %v", name, err) + } + + // TODO(mpl): should we stop execution (and not update golden files) + // if the comparison fails? Currently this is not the case. + wantFile := strings.Replace(name, ".json", "-want.json", 1) + wantConf, err := configParser().ReadFile(wantFile) + if err != nil { + t.Fatalf("test %s: ReadFile: %v", name, err) + } + if *updateGolden { + contents, err := json.MarshalIndent(lowLevelConf.Obj, "", "\t") + if err != nil { + t.Fatal(err) + } + contents = canonicalizeGolden(t, contents) + if err := ioutil.WriteFile(wantFile, contents, 0644); err != nil { + t.Fatal(err) + } + } + compareConfigurations(t, name, lowLevelConf.Obj, wantConf) +} + +func compareConfigurations(t *testing.T, name, g interface{}, w interface{}) { + var got, want bytes.Buffer + prettyPrint(t, &got, g) + prettyPrint(t, &want, w) + + if got.String() != want.String() { + t.Errorf("test %s configurations differ.\nGot:\n%s\nWant:\n%s\nDiff (want -> got), %s:\n%s", + name, &got, &want, name, test.Diff(want.Bytes(), got.Bytes())) + } +} + +func canonicalizeGolden(t *testing.T, v []byte) []byte { + localPath, err := filepath.Abs(relativeRing) + if err != nil { + t.Fatal(err) + } + v = bytes.Replace(v, []byte(localPath), []byte(secringPlaceholder), 1) + if !bytes.HasSuffix(v, []byte("\n")) { + v = append(v, '\n') + } + return v +} + +func TestExpansionsInHighlevelConfig(t *testing.T) { + camroot, err := osutil.GoPackagePath("camlistore.org") + if err != nil { + t.Fatalf("failed to find camlistore.org GOPATH root: %v", err) + } + const keyID = "26F5ABDA" + os.Setenv("TMP_EXPANSION_TEST", keyID) + os.Setenv("TMP_EXPANSION_SECRING", filepath.Join(camroot, filepath.FromSlash("pkg/jsonsign/testdata/test-secring.gpg"))) + conf, err := serverinit.Load([]byte(` +{ + "auth": "localhost", + "listen": ":4430", + "https": false, + "identity": ["_env", "${TMP_EXPANSION_TEST}"], + "identitySecretRing": ["_env", "${TMP_EXPANSION_SECRING}"], + "googlecloudstorage": ":camlistore-dev-blobs", + "kvIndexFile": "/tmp/camli-index.kvdb" +} +`)) + if err != nil { + t.Fatal(err) + } + got := fmt.Sprintf("%#v", conf) + if !strings.Contains(got, keyID) { + t.Errorf("Expected key %s in resulting low-level config. Got: %s", keyID, got) + } +} + +func TestInstallHandlers(t *testing.T) { + camroot, err := osutil.GoPackagePath("camlistore.org") + if err != nil { + t.Fatalf("failed to find camlistore.org GOPATH root: %v", err) + } + conf := serverinit.DefaultBaseConfig + conf.Identity = "26F5ABDA" + conf.IdentitySecretRing = filepath.Join(camroot, filepath.FromSlash("pkg/jsonsign/testdata/test-secring.gpg")) + conf.MemoryStorage = true + conf.MemoryIndex = true + + confData, err := json.MarshalIndent(conf, "", " ") + if err != nil { + t.Fatalf("Could not json encode config: %v", err) + } + + lowConf, err := serverinit.Load(confData) + if err != nil { + t.Fatal(err) + } + // because these two are normally consumed in camlistored.go + // TODO(mpl): serverinit.Load should consume these 2 as well. Once + // consumed, we should keep all the answers as private fields, and then we + // put accessors on serverinit.Config. Maybe we even stop embedding + // jsonconfig.Obj in serverinit.Config too, so none of those methods are + // accessible. + lowConf.OptionalBool("https", true) + lowConf.OptionalString("listen", "") + + reindex := false + var context *http.Request // only used by App Engine. See handlerLoader in serverinit.go + hi := http.NewServeMux() + address := "http://" + conf.Listen + _, err = lowConf.InstallHandlers(hi, address, reindex, context) + if err != nil { + t.Fatal(err) + } + + tests := []struct { + prefix string + authWrapped bool + prefixWrapped bool + handlerType reflect.Type + }{ + { + prefix: "/", + handlerType: reflect.TypeOf(&server.RootHandler{}), + prefixWrapped: true, + }, + + { + prefix: "/sync/", + handlerType: reflect.TypeOf(&server.SyncHandler{}), + prefixWrapped: true, + authWrapped: true, + }, + + { + prefix: "/my-search/", + handlerType: reflect.TypeOf(&search.Handler{}), + prefixWrapped: true, + authWrapped: true, + }, + + { + prefix: "/ui/", + handlerType: reflect.TypeOf(&server.UIHandler{}), + prefixWrapped: true, + authWrapped: true, + }, + + { + prefix: "/importer/", + handlerType: reflect.TypeOf(&importer.Host{}), + prefixWrapped: true, + authWrapped: true, + }, + + { + prefix: "/sighelper/", + handlerType: reflect.TypeOf(&signhandler.Handler{}), + prefixWrapped: true, + authWrapped: true, + }, + + { + prefix: "/status/", + handlerType: reflect.TypeOf(&server.StatusHandler{}), + prefixWrapped: true, + authWrapped: true, + }, + + { + prefix: "/help/", + handlerType: reflect.TypeOf(&server.HelpHandler{}), + prefixWrapped: true, + authWrapped: true, + }, + + { + prefix: "/setup/", + handlerType: reflect.TypeOf(&server.SetupHandler{}), + prefixWrapped: true, + }, + + { + prefix: "/bs/camli/", + handlerType: reflect.TypeOf(http.HandlerFunc(nil)), + }, + + { + prefix: "/index/camli/", + handlerType: reflect.TypeOf(http.HandlerFunc(nil)), + }, + + { + prefix: "/bs-and-index/camli/", + handlerType: reflect.TypeOf(http.HandlerFunc(nil)), + }, + + { + prefix: "/bs-and-maybe-also-index/camli/", + handlerType: reflect.TypeOf(http.HandlerFunc(nil)), + }, + + { + prefix: "/cache/camli/", + handlerType: reflect.TypeOf(http.HandlerFunc(nil)), + }, + } + for _, v := range tests { + req, err := http.NewRequest("GET", address+v.prefix, nil) + if err != nil { + t.Error(err) + continue + } + h, _ := hi.Handler(req) + if v.authWrapped { + ah, ok := h.(auth.Handler) + if !ok { + t.Errorf("handler for %v should be auth wrapped", v.prefix) + continue + } + h = ah.Handler + } + if v.prefixWrapped { + ph, ok := h.(*httputil.PrefixHandler) + if !ok { + t.Errorf("handler for %v should be prefix wrapped", v.prefix) + continue + } + h = ph.Handler + } + if reflect.TypeOf(h) != v.handlerType { + t.Errorf("for %v: want %v, got %v", v.prefix, v.handlerType, reflect.TypeOf(h)) + } + } +} + +// TestGenerateClientConfig validates the client config generated for display +// by the HelpHandler. +func TestGenerateClientConfig(t *testing.T) { + inName := filepath.Join("testdata", "gen_client_config.in") + wantName := strings.Replace(inName, ".in", ".out", 1) + + b, err := replaceRingPath(inName) + if err != nil { + t.Fatalf("Failed to read high-level server config file: %v", err) + } + b = backslashEscape(b) + var hiLevelConf serverconfig.Config + if err := json.Unmarshal(b, &hiLevelConf); err != nil { + t.Fatalf("Failed to unmarshal server config: %v", err) + } + lowLevelConf, err := serverinit.GenLowLevelConfig(&hiLevelConf) + if err != nil { + t.Fatalf("Failed to generate low-level config: %v", err) + } + generatedConf, err := clientconfig.GenerateClientConfig(lowLevelConf.Obj) + if err != nil { + t.Fatalf("Failed to generate client config: %v", err) + } + + wb, err := replaceRingPath(wantName) + if err != nil { + t.Fatalf("Failed to read want config file: %v", err) + } + wb = backslashEscape(wb) + var wantConf clientconfig.Config + if err := json.Unmarshal(wb, &wantConf); err != nil { + t.Fatalf("Failed to unmarshall want config: %v", err) + } + + compareConfigurations(t, inName, generatedConf, wantConf) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/baseurl-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/baseurl-want.json new file mode 100644 index 00000000..0ca85d3d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/baseurl-want.json @@ -0,0 +1,118 @@ +{ + "auth": "userpass:camlistore:pass3179", + "baseURL": "http://monkey.foo.com", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "ownerName": "Alice", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs/cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "file": "/tmp/blobs/thumbmeta.kv", + "type": "kv" + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/baseurl.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/baseurl.json new file mode 100644 index 00000000..b53d050d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/baseurl.json @@ -0,0 +1,11 @@ +{ + "listen": "localhost:3179", + "auth": "userpass:camlistore:pass3179", + "baseURL": "http://monkey.foo.com", + "blobPath": "/tmp/blobs", + "kvIndexFile": "/path/to/indexkv.db", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "ownerName": "Alice", + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/baseurlbad.err b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/baseurlbad.err new file mode 100644 index 00000000..f1733a00 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/baseurlbad.err @@ -0,0 +1 @@ +baseURL can't have a path, only a scheme, host, and optional port. \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/baseurlbad.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/baseurlbad.json new file mode 100644 index 00000000..e0eaf495 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/baseurlbad.json @@ -0,0 +1,10 @@ +{ + "listen": "localhost:3179", + "baseURL": "http://foo.com/bar/", + "https": false, + "auth": "userpass:camlistore:pass3179", + "blobPath": "/tmp/blobs", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "kvIndexFile": "/path/to/indexkv.db" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/blobpacked_googlecloud-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/blobpacked_googlecloud-want.json new file mode 100644 index 00000000..ee8a7669 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/blobpacked_googlecloud-want.json @@ -0,0 +1,156 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "ownerName": "Alice", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-blobpacked", + "handlerArgs": { + "smallBlobs": "/bs-loose/", + "largeBlobs": "/bs-packed/", + "metaIndex": { + "database": "blobpacked_index", + "host": "localhost", + "password": "root", + "type": "mysql", + "user": "root" + } + } + }, + "/bs-loose/": { + "handler": "storage-googlecloudstorage", + "handlerArgs": { + "auth": { + "client_id": "clientId", + "client_secret": "clientSecret", + "refresh_token": "refreshToken" + }, + "bucket": "bucketName/blobs/loose" + } + }, + "/bs-packed/": { + "handler": "storage-googlecloudstorage", + "handlerArgs": { + "auth": { + "client_id": "clientId", + "client_secret": "clientSecret", + "refresh_token": "refreshToken" + }, + "bucket": "bucketName/blobs/packed" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/camli-cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "database": "camlitest", + "host": "localhost", + "password": "root", + "type": "mysql", + "user": "root" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "database": "sync_index_queue", + "type": "mysql", + "host": "localhost", + "user": "root", + "password": "root" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "database": "ui_thumbmeta_cache", + "host": "localhost", + "type": "mysql", + "user": "root", + "password": "root" + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/blobpacked_googlecloud.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/blobpacked_googlecloud.json new file mode 100644 index 00000000..f32df479 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/blobpacked_googlecloud.json @@ -0,0 +1,17 @@ +{ + "listen": "localhost:3179", + "auth": "userpass:camlistore:pass3179", + "googlecloudstorage": "clientId:clientSecret:refreshToken:bucketName/blobs", + "packRelated": true, + "dbNames": { + "index": "camlitest", + "queue-sync-to-index": "sync_index_queue", + "blobpacked_index": "blobpacked_index", + "ui_thumbcache": "ui_thumbmeta_cache" + }, + "mysql": "root@localhost:root", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "ownerName": "Alice", + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/blobpacked_localdisk-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/blobpacked_localdisk-want.json new file mode 100644 index 00000000..472a3267 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/blobpacked_localdisk-want.json @@ -0,0 +1,134 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "ownerName": "Alice", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-blobpacked", + "handlerArgs": { + "smallBlobs": "/bs-loose/", + "largeBlobs": "/bs-packed/", + "metaIndex": { + "file": "/tmp/blobs/packed/packindex.kv", + "type": "kv" + } + } + }, + "/bs-loose/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs" + } + }, + "/bs-packed/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs/packed" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs/cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "file": "/tmp/blobs/thumbmeta.kv", + "type": "kv" + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/blobpacked_localdisk.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/blobpacked_localdisk.json new file mode 100644 index 00000000..7abf7cc8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/blobpacked_localdisk.json @@ -0,0 +1,11 @@ +{ + "listen": "localhost:3179", + "auth": "userpass:camlistore:pass3179", + "blobPath": "/tmp/blobs", + "packRelated": true, + "kvIndexFile": "/path/to/indexkv.db", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "ownerName": "Alice", + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/default-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/default-want.json new file mode 100644 index 00000000..41800bb6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/default-want.json @@ -0,0 +1,117 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "ownerName": "Alice", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs/cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "file": "/tmp/blobs/thumbmeta.kv", + "type": "kv" + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/default.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/default.json new file mode 100644 index 00000000..f15fc9ff --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/default.json @@ -0,0 +1,10 @@ +{ + "listen": "localhost:3179", + "auth": "userpass:camlistore:pass3179", + "blobPath": "/tmp/blobs", + "kvIndexFile": "/path/to/indexkv.db", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "ownerName": "Alice", + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/diskpacked-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/diskpacked-want.json new file mode 100644 index 00000000..03aadf28 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/diskpacked-want.json @@ -0,0 +1,125 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "ownerName": "Alice", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-diskpacked", + "handlerArgs": { + "path": "/tmp/blobs", + "metaIndex": { + "file": "/tmp/blobs/index.kv", + "type": "kv" + } + } + }, + "/cache/": { + "handler": "storage-diskpacked", + "handlerArgs": { + "metaIndex": { + "file": "/tmp/blobs/cache/index.kv", + "type": "kv" + }, + "path": "/tmp/blobs/cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "file": "/tmp/blobs/thumbmeta.kv", + "type": "kv" + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/diskpacked.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/diskpacked.json new file mode 100644 index 00000000..4e904cd6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/diskpacked.json @@ -0,0 +1,11 @@ +{ + "listen": "localhost:3179", + "auth": "userpass:camlistore:pass3179", + "blobPath": "/tmp/blobs", + "packBlobs": true, + "kvIndexFile": "/path/to/indexkv.db", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "ownerName": "Alice", + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/flickr-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/flickr-want.json new file mode 100644 index 00000000..3fd6a7b4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/flickr-want.json @@ -0,0 +1,121 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "ownerName": "Alice", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs/cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": { + "flickr": { + "clientSecret": "monkey:balls" + } + } + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "file": "/tmp/blobs/thumbmeta.kv", + "type": "kv" + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/flickr.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/flickr.json new file mode 100644 index 00000000..2070bbf0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/flickr.json @@ -0,0 +1,11 @@ +{ + "listen": "localhost:3179", + "auth": "userpass:camlistore:pass3179", + "blobPath": "/tmp/blobs", + "flickr": "monkey:balls", + "kvIndexFile": "/path/to/indexkv.db", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "ownerName": "Alice", + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/gen_client_config.in b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/gen_client_config.in new file mode 100644 index 00000000..0b7c7a90 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/gen_client_config.in @@ -0,0 +1,9 @@ +{ + "listen": "1.2.3.4:567", + "https": false, + "auth": "userpass:camlistore:pass3179", + "blobPath": "/tmp/blobs", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "kvIndexFile": "/path/to/indexkv.db" +} \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/gen_client_config.out b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/gen_client_config.out new file mode 100644 index 00000000..192fc57f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/gen_client_config.out @@ -0,0 +1,14 @@ +{ + "servers": { + "default": { + "server": "http://1.2.3.4:567", + "auth": "userpass:camlistore:pass3179", + "default": true + } + }, + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "ignoredFiles": [ + ".DS_Store" + ] +} \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_nolocaldisk-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_nolocaldisk-want.json new file mode 100644 index 00000000..ce59dbbb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_nolocaldisk-want.json @@ -0,0 +1,117 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-googlecloudstorage", + "handlerArgs": { + "auth": { + "client_id": "clientId", + "client_secret": "clientSecret", + "refresh_token": "refreshToken" + }, + "bucket": "bucketName" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/camli-cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/path/to/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/" + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_nolocaldisk.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_nolocaldisk.json new file mode 100644 index 00000000..aee30af0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_nolocaldisk.json @@ -0,0 +1,12 @@ +{ + "listen": "localhost:3179", + "https": false, + "auth": "userpass:camlistore:pass3179", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "kvIndexFile": "/path/to/indexkv.db", + "googlecloudstorage": "clientId:clientSecret:refreshToken:bucketName", + "replicateTo": [], + "publish": {}, + "shareHandler": true +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_nolocaldisk_subdir-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_nolocaldisk_subdir-want.json new file mode 100644 index 00000000..697c48e9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_nolocaldisk_subdir-want.json @@ -0,0 +1,117 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-googlecloudstorage", + "handlerArgs": { + "auth": { + "client_id": "clientId", + "client_secret": "clientSecret", + "refresh_token": "refreshToken" + }, + "bucket": "bucketName/blobs" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/camli-cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/path/to/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/" + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_nolocaldisk_subdir.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_nolocaldisk_subdir.json new file mode 100644 index 00000000..8250e694 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_nolocaldisk_subdir.json @@ -0,0 +1,12 @@ +{ + "listen": "localhost:3179", + "https": false, + "auth": "userpass:camlistore:pass3179", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "kvIndexFile": "/path/to/indexkv.db", + "googlecloudstorage": "clientId:clientSecret:refreshToken:bucketName/blobs", + "replicateTo": [], + "publish": {}, + "shareHandler": true +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_queues_on_db-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_queues_on_db-want.json new file mode 100644 index 00000000..ad45f27f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_queues_on_db-want.json @@ -0,0 +1,123 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-googlecloudstorage", + "handlerArgs": { + "auth": { + "client_id": "auto", + "client_secret": "", + "refresh_token": "" + }, + "bucket": "bucketName" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/camli-cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "database": "camindex", + "host": "camlistore.cloudsql.google.internal", + "password": "root", + "type": "mysql", + "user": "root" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "to": "/index/", + "queue": { + "database": "camindex_syncindex_q", + "host": "camlistore.cloudsql.google.internal", + "password": "root", + "type": "mysql", + "user": "root" + } + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/" + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_queues_on_db.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_queues_on_db.json new file mode 100644 index 00000000..e52aa5e4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_queues_on_db.json @@ -0,0 +1,14 @@ +{ + "listen": "localhost:3179", + "https": false, + "auth": "userpass:camlistore:pass3179", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "mysql": "root@camlistore.cloudsql.google.internal:root", + "googlecloudstorage": ":bucketName", + "shareHandler": true, + "dbNames": { + "index": "camindex", + "queue-sync-to-index": "camindex_syncindex_q" + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_service_account-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_service_account-want.json new file mode 100644 index 00000000..6d1aed82 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_service_account-want.json @@ -0,0 +1,117 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-googlecloudstorage", + "handlerArgs": { + "auth": { + "client_id": "auto", + "client_secret": "", + "refresh_token": "" + }, + "bucket": "bucketName" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/camli-cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/path/to/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/" + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_service_account.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_service_account.json new file mode 100644 index 00000000..cf056f69 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_service_account.json @@ -0,0 +1,12 @@ +{ + "listen": "localhost:3179", + "https": false, + "auth": "userpass:camlistore:pass3179", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "kvIndexFile": "/path/to/indexkv.db", + "googlecloudstorage": ":bucketName", + "replicateTo": [], + "publish": {}, + "shareHandler": true +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_service_account_subdir-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_service_account_subdir-want.json new file mode 100644 index 00000000..ddd8f0f3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_service_account_subdir-want.json @@ -0,0 +1,117 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-googlecloudstorage", + "handlerArgs": { + "auth": { + "client_id": "auto", + "client_secret": "", + "refresh_token": "" + }, + "bucket": "bucketName/config/" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/camli-cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/path/to/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/" + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_service_account_subdir.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_service_account_subdir.json new file mode 100644 index 00000000..0a18bcc6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/google_service_account_subdir.json @@ -0,0 +1,12 @@ +{ + "listen": "localhost:3179", + "https": false, + "auth": "userpass:camlistore:pass3179", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "kvIndexFile": "/path/to/indexkv.db", + "googlecloudstorage": ":bucketName/config/", + "replicateTo": [], + "publish": {}, + "shareHandler": true +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/justblobs-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/justblobs-want.json new file mode 100644 index 00000000..d1cef537 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/justblobs-want.json @@ -0,0 +1,52 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs/cache" + } + }, + "/help/": { + "handler": "help" + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/justblobs.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/justblobs.json new file mode 100644 index 00000000..41107326 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/justblobs.json @@ -0,0 +1,10 @@ +{ + "listen": "localhost:3179", + "https": false, + "auth": "userpass:camlistore:pass3179", + "blobPath": "/tmp/blobs", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "runIndex": false, + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/leveldb-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/leveldb-want.json new file mode 100644 index 00000000..2c4ec435 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/leveldb-want.json @@ -0,0 +1,117 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "ownerName": "Alice", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs/cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexleveldb.ldb", + "type": "leveldb" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-index-queue.leveldb", + "type": "leveldb" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "file": "/tmp/blobs/thumbmeta.leveldb", + "type": "leveldb" + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/leveldb.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/leveldb.json new file mode 100644 index 00000000..d087a866 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/leveldb.json @@ -0,0 +1,10 @@ +{ + "listen": "localhost:3179", + "auth": "userpass:camlistore:pass3179", + "blobPath": "/tmp/blobs", + "levelDB": "/path/to/indexleveldb.ldb", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "ownerName": "Alice", + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/listenbase-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/listenbase-want.json new file mode 100644 index 00000000..1e0ac957 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/listenbase-want.json @@ -0,0 +1,117 @@ +{ + "auth": "userpass:camlistore:pass3179", + "baseURL": "http://foo.com", + "https": false, + "listen": "1.2.3.4:80", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs/cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "file": "/tmp/blobs/thumbmeta.kv", + "type": "kv" + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/listenbase.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/listenbase.json new file mode 100644 index 00000000..c8e5554e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/listenbase.json @@ -0,0 +1,10 @@ +{ + "listen": "1.2.3.4:80", + "baseURL": "http://foo.com/", + "auth": "userpass:camlistore:pass3179", + "blobPath": "/tmp/blobs", + "kvIndexFile": "/path/to/indexkv.db", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "shareHandler": true +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/mem-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/mem-want.json new file mode 100644 index 00000000..c9dceb90 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/mem-want.json @@ -0,0 +1,180 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "ownerName": "Brad", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs/cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sto-googlecloudstorage/": { + "handler": "storage-googlecloudstorage", + "handlerArgs": { + "auth": { + "client_id": "clientId", + "client_secret": "clientSecret", + "refresh_token": "refreshToken" + }, + "bucket": "bucketName" + } + }, + "/sto-googledrive/": { + "handler": "storage-googledrive", + "handlerArgs": { + "auth": { + "client_id": "clientId", + "client_secret": "clientSecret", + "refresh_token": "refreshToken" + }, + "parent_id": "parentDirId" + } + }, + "/sto-s3/": { + "handler": "storage-s3", + "handlerArgs": { + "aws_access_key": "key", + "aws_secret_access_key": "secret", + "bucket": "bucket" + } + }, + "/sync-to-googlecloudstorage/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-googlecloud-queue.kv", + "type": "kv" + }, + "to": "/sto-googlecloudstorage/" + } + }, + "/sync-to-googledrive/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-googledrive-queue.kv", + "type": "kv" + }, + "to": "/sto-googledrive/" + } + }, + "/sync-to-s3/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-s3-queue.kv", + "type": "kv" + }, + "to": "/sto-s3/" + } + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "file": "/tmp/blobs/thumbmeta.kv", + "type": "kv" + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/mem.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/mem.json new file mode 100644 index 00000000..9bdb1ee9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/mem.json @@ -0,0 +1,16 @@ +{ + "listen": "localhost:3179", + "https": false, + "auth": "userpass:camlistore:pass3179", + "blobPath": "/tmp/blobs", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "kvIndexFile": "/path/to/indexkv.db", + "s3": "key:secret:bucket", + "googlecloudstorage": "clientId:clientSecret:refreshToken:bucketName", + "googledrive": "clientId:clientSecret:refreshToken:parentDirId", + "replicateTo": [], + "publish": {}, + "ownerName": "Brad", + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/memindex-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/memindex-want.json new file mode 100644 index 00000000..9d22fe21 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/memindex-want.json @@ -0,0 +1,116 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "ownerName": "Alice", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs/cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "type": "memory" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-index-queue.leveldb", + "type": "leveldb" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "file": "/tmp/blobs/thumbmeta.leveldb", + "type": "leveldb" + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/memindex.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/memindex.json new file mode 100644 index 00000000..ae0fadc3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/memindex.json @@ -0,0 +1,10 @@ +{ + "listen": "localhost:3179", + "auth": "userpass:camlistore:pass3179", + "blobPath": "/tmp/blobs", + "memoryIndex": true, + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "ownerName": "Alice", + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/memory_storage-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/memory_storage-want.json new file mode 100644 index 00000000..470c68a0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/memory_storage-want.json @@ -0,0 +1,106 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "ownerName": "Alice", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-memory" + }, + "/cache/": { + "handler": "storage-memory" + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "type": "memory" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/" + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/memory_storage.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/memory_storage.json new file mode 100644 index 00000000..43905973 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/memory_storage.json @@ -0,0 +1,10 @@ +{ + "listen": "localhost:3179", + "auth": "userpass:camlistore:pass3179", + "memoryStorage": true, + "kvIndexFile": "/path/to/indexkv.db", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "ownerName": "Alice", + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/mongo-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/mongo-want.json new file mode 100644 index 00000000..4f14079d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/mongo-want.json @@ -0,0 +1,120 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "ownerName": "Alice", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs/cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "database": "camlitest", + "host": "localhost", + "password": "", + "type": "mongo", + "user": "" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-index-queue.leveldb", + "type": "leveldb" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "file": "/tmp/blobs/thumbmeta.leveldb", + "type": "leveldb" + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/mongo.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/mongo.json new file mode 100644 index 00000000..ff6bfd75 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/mongo.json @@ -0,0 +1,11 @@ +{ + "listen": "localhost:3179", + "auth": "userpass:camlistore:pass3179", + "blobPath": "/tmp/blobs", + "dbname": "camlitest", + "mongo": ":@localhost", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "ownerName": "Alice", + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/multipublish-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/multipublish-want.json new file mode 100644 index 00000000..f9a29e67 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/multipublish-want.json @@ -0,0 +1,140 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "ownerName": "Alice", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs/cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/music/": { + "handler": "app", + "handlerArgs": { + "program": "publisher", + "baseURL": "http://localhost:3178/", + "appConfig": { + "camliRoot": "musicRoot", + "goTemplate": "music.html", + "cacheRoot": "/tmp/blobs/cache" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/pics/": { + "handler": "app", + "handlerArgs": { + "program": "publisher", + "appConfig": { + "camliRoot": "picsRoot", + "goTemplate": "gallery.html", + "cacheRoot": "/tmp/blobs/cache" + } + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "file": "/tmp/blobs/thumbmeta.kv", + "type": "kv" + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/multipublish.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/multipublish.json new file mode 100644 index 00000000..db23f87e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/multipublish.json @@ -0,0 +1,23 @@ +{ + "listen": "localhost:3179", + "auth": "userpass:camlistore:pass3179", + "blobPath": "/tmp/blobs", + "kvIndexFile": "/path/to/indexkv.db", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "ownerName": "Alice", + "shareHandlerPath": "/share/", + "publish": { + "/pics/": { + "camliRoot": "picsRoot", + "cacheRoot": "/tmp/blobs/cache", + "goTemplate": "gallery.html" + }, + "/music/": { + "camliRoot": "musicRoot", + "baseURL": "http://localhost:3178/", + "cacheRoot": "/tmp/blobs/cache", + "goTemplate": "music.html" + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/noindex.err b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/noindex.err new file mode 100644 index 00000000..14c33614 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/noindex.err @@ -0,0 +1 @@ +Unless runIndex is set to false, you must specify an index option (kvIndexFile, leveldb, mongo, mysql, postgres, sqlite, memoryIndex). diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/noindex.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/noindex.json new file mode 100644 index 00000000..7575438c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/noindex.json @@ -0,0 +1,7 @@ +{ + "listen": "localhost:3179", + "auth": "localhost", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "blobPath": "/var/blobs" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_alt_host-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_alt_host-want.json new file mode 100644 index 00000000..10bfcdb1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_alt_host-want.json @@ -0,0 +1,115 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-s3", + "handlerArgs": { + "aws_access_key": "key", + "aws_secret_access_key": "secret", + "bucket": "bucket", + "hostname": "foo.com" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/camli-cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/path/to/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/" + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_alt_host.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_alt_host.json new file mode 100644 index 00000000..ca187002 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_alt_host.json @@ -0,0 +1,12 @@ +{ + "listen": "localhost:3179", + "https": false, + "auth": "userpass:camlistore:pass3179", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "kvIndexFile": "/path/to/indexkv.db", + "s3": "key:secret:bucket:foo.com", + "replicateTo": [], + "publish": {}, + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_google_nolocaldisk.err b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_google_nolocaldisk.err new file mode 100644 index 00000000..01dd0e63 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_google_nolocaldisk.err @@ -0,0 +1 @@ +Using S3 as a primary storage and Google Cloud Storage as a mirror is not supported for now. \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_google_nolocaldisk.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_google_nolocaldisk.json new file mode 100644 index 00000000..31be495e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_google_nolocaldisk.json @@ -0,0 +1,13 @@ +{ + "listen": "localhost:3179", + "https": false, + "auth": "userpass:camlistore:pass3179", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "kvIndexFile": "/path/to/indexkv.db", + "s3": "key:secret:bucket", + "googlecloudstorage": "clientId:clientSecret:refreshToken:bucketName", + "replicateTo": [], + "publish": {}, + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_nolocaldisk-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_nolocaldisk-want.json new file mode 100644 index 00000000..506a53d3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_nolocaldisk-want.json @@ -0,0 +1,114 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-s3", + "handlerArgs": { + "aws_access_key": "key", + "aws_secret_access_key": "secret", + "bucket": "bucket" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/camli-cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/path/to/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/" + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_nolocaldisk.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_nolocaldisk.json new file mode 100644 index 00000000..5d741bb9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_nolocaldisk.json @@ -0,0 +1,12 @@ +{ + "listen": "localhost:3179", + "https": false, + "auth": "userpass:camlistore:pass3179", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "kvIndexFile": "/path/to/indexkv.db", + "s3": "key:secret:bucket", + "replicateTo": [], + "publish": {}, + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_nolocaldisk_mysql-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_nolocaldisk_mysql-want.json new file mode 100644 index 00000000..cba91976 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_nolocaldisk_mysql-want.json @@ -0,0 +1,114 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-s3", + "handlerArgs": { + "aws_access_key": "key", + "aws_secret_access_key": "secret", + "bucket": "bucket" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/camli-cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "database": "camlitest", + "host": "localhost", + "password": "password", + "type": "mysql", + "user": "user" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "idle": true, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/" + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_nolocaldisk_mysql.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_nolocaldisk_mysql.json new file mode 100644 index 00000000..eb0d88c8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/s3_nolocaldisk_mysql.json @@ -0,0 +1,13 @@ +{ + "listen": "localhost:3179", + "https": false, + "auth": "userpass:camlistore:pass3179", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "dbname": "camlitest", + "mysql": "user@localhost:password", + "s3": "key:secret:bucket", + "replicateTo": [], + "publish": {}, + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/sqlite-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/sqlite-want.json new file mode 100644 index 00000000..688a07f1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/sqlite-want.json @@ -0,0 +1,116 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs/cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/tmp/camli.db", + "type": "sqlite" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-index-queue.sqlite", + "type": "sqlite" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "file": "/tmp/blobs/thumbmeta.sqlite", + "type": "sqlite" + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/sqlite.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/sqlite.json new file mode 100644 index 00000000..1d3c6f22 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/sqlite.json @@ -0,0 +1,10 @@ +{ + "listen": "localhost:3179", + "https": false, + "auth": "userpass:camlistore:pass3179", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "blobPath": "/tmp/blobs", + "sqlite": "/tmp/camli.db", + "shareHandler": true +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/thumbcache_on_db-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/thumbcache_on_db-want.json new file mode 100644 index 00000000..f4fe61f9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/thumbcache_on_db-want.json @@ -0,0 +1,130 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-googlecloudstorage", + "handlerArgs": { + "auth": { + "client_id": "auto", + "client_secret": "", + "refresh_token": "" + }, + "bucket": "bucketName" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/camli-cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "database": "camindex", + "host": "camlistore.cloudsql.google.internal", + "password": "root", + "type": "mysql", + "user": "root" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "to": "/index/", + "queue": { + "database": "camindex_syncindex_q", + "host": "camlistore.cloudsql.google.internal", + "password": "root", + "type": "mysql", + "user": "root" + } + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "database": "thumbcache_db", + "host": "camlistore.cloudsql.google.internal", + "password": "root", + "type": "mysql", + "user": "root" + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/thumbcache_on_db.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/thumbcache_on_db.json new file mode 100644 index 00000000..d8bb564a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/thumbcache_on_db.json @@ -0,0 +1,15 @@ +{ + "listen": "localhost:3179", + "https": false, + "auth": "userpass:camlistore:pass3179", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "mysql": "root@camlistore.cloudsql.google.internal:root", + "googlecloudstorage": ":bucketName", + "shareHandler": true, + "dbNames": { + "index": "camindex", + "ui_thumbcache": "thumbcache_db", + "queue-sync-to-index": "camindex_syncindex_q" + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/tls-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/tls-want.json new file mode 100644 index 00000000..fb8d8815 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/tls-want.json @@ -0,0 +1,131 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": true, + "httpsCert": "/tls.crt", + "httpsKey": "/tls.key", + "listen": "1.2.3.4:443", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs/cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/pics/": { + "handler": "app", + "handlerArgs": { + "program": "publisher", + "appConfig": { + "camliRoot": "picsRoot", + "goTemplate": "gallery.html", + "cacheRoot": "/tmp/blobs/cache", + "httpsCert": "/tls.crt", + "httpsKey": "/tls.key" + } + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "file": "/tmp/blobs/thumbmeta.kv", + "type": "kv" + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/tls.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/tls.json new file mode 100644 index 00000000..72a449d6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/tls.json @@ -0,0 +1,21 @@ +{ + "listen": "1.2.3.4:443", + "https": true, + "httpsCert": "/tls.crt", + "httpsKey": "/tls.key", + "auth": "userpass:camlistore:pass3179", + "blobPath": "/tmp/blobs", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "kvIndexFile": "/path/to/indexkv.db", + "s3": "", + "replicateTo": [], + "publish": { + "/pics/": { + "camliRoot": "picsRoot", + "cacheRoot": "/tmp/blobs/cache", + "goTemplate": "gallery.html" + } + }, + "shareHandler": true +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_blog-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_blog-want.json new file mode 100644 index 00000000..a8b99351 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_blog-want.json @@ -0,0 +1,128 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/blog/": { + "handler": "app", + "handlerArgs": { + "program": "publisher", + "baseURL": "http://localhost:3178/", + "appConfig": { + "camliRoot": "blogRoot", + "goTemplate": "blog.html", + "cacheRoot": "/tmp/blobs/cache" + } + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs/cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "file": "/tmp/blobs/thumbmeta.kv", + "type": "kv" + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_blog.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_blog.json new file mode 100644 index 00000000..56096d7e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_blog.json @@ -0,0 +1,20 @@ +{ + "listen": "localhost:3179", + "https": false, + "auth": "userpass:camlistore:pass3179", + "blobPath": "/tmp/blobs", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "kvIndexFile": "/path/to/indexkv.db", + "s3": "", + "publish": { + "/blog/": { + "camliRoot": "blogRoot", + "baseURL": "http://localhost:3178/", + "cacheRoot": "/tmp/blobs/cache", + "goTemplate": "blog.html" + } + }, + "replicateTo": [], + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_gallery-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_gallery-want.json new file mode 100644 index 00000000..8b0a1ec5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_gallery-want.json @@ -0,0 +1,130 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs/cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/pics/": { + "handler": "app", + "handlerArgs": { + "program": "publisher", + "baseURL": "http://localhost:3178/", + "appConfig": { + "httpsCert": "/tls.crt", + "httpsKey": "/tls.key", + "camliRoot": "picsRoot", + "goTemplate": "gallery.html", + "cacheRoot": "/tmp/blobs/cache" + } + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "file": "/tmp/blobs/thumbmeta.kv", + "type": "kv" + } + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_gallery.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_gallery.json new file mode 100644 index 00000000..bc0250ae --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_gallery.json @@ -0,0 +1,22 @@ +{ + "listen": "localhost:3179", + "https": false, + "auth": "userpass:camlistore:pass3179", + "blobPath": "/tmp/blobs", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "kvIndexFile": "/path/to/indexkv.db", + "s3": "", + "publish": { + "/pics/": { + "httpsCert": "/tls.crt", + "httpsKey": "/tls.key", + "camliRoot": "picsRoot", + "baseURL": "http://localhost:3178/", + "cacheRoot": "/tmp/blobs/cache", + "goTemplate": "gallery.html" + } + }, + "replicateTo": [], + "shareHandlerPath": "/share/" +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_sourceroot-want.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_sourceroot-want.json new file mode 100644 index 00000000..695ec329 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_sourceroot-want.json @@ -0,0 +1,136 @@ +{ + "auth": "userpass:camlistore:pass3179", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs-and-maybe-also-index/", + "jsonSignRoot": "/sighelper/", + "helpRoot": "/help/", + "searchRoot": "/my-search/", + "statusRoot": "/status/", + "stealth": false + } + }, + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": [ + "/bs/", + "/index/" + ] + } + }, + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "read": "/bs/", + "write": { + "else": "/bs/", + "if": "isSchema", + "then": "/bs-and-index/" + } + } + }, + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs" + } + }, + "/cache/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": "/tmp/blobs/cache" + } + }, + "/help/": { + "handler": "help" + }, + "/importer/": { + "handler": "importer", + "handlerArgs": {} + }, + "/index/": { + "handler": "storage-index", + "handlerArgs": { + "blobSource": "/bs/", + "storage": { + "file": "/path/to/indexkv.db", + "type": "kv" + } + } + }, + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4", + "slurpToMemory": true + } + }, + "/setup/": { + "handler": "setup" + }, + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "keyId": "26F5ABDA", + "publicKeyDest": "/bs-and-index/", + "secretRing": "/path/to/secring" + } + }, + "/status/": { + "handler": "status" + }, + "/sto-s3/": { + "handler": "storage-s3", + "handlerArgs": { + "aws_access_key": "key", + "aws_secret_access_key": "secret", + "bucket": "bucket" + } + }, + "/sync-to-s3/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-s3-queue.kv", + "type": "kv" + }, + "to": "/sto-s3/" + } + }, + "/sync/": { + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "queue": { + "file": "/tmp/blobs/sync-to-index-queue.kv", + "type": "kv" + }, + "to": "/index/" + } + }, + "/ui/": { + "handler": "ui", + "handlerArgs": { + "cache": "/cache/", + "scaledImage": { + "file": "/tmp/blobs/thumbmeta.kv", + "type": "kv" + }, + "sourceRoot": "/path/to/alternative/camli/source" + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_sourceroot.json b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_sourceroot.json new file mode 100644 index 00000000..d3f96b60 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/serverinit/testdata/with_sourceroot.json @@ -0,0 +1,14 @@ +{ + "listen": "localhost:3179", + "https": false, + "auth": "userpass:camlistore:pass3179", + "blobPath": "/tmp/blobs", + "identity": "26F5ABDA", + "identitySecretRing": "/path/to/secring", + "kvIndexFile": "/path/to/indexkv.db", + "s3": "key:secret:bucket", + "replicateTo": [], + "publish": {}, + "sourceRoot": "/path/to/alternative/camli/source", + "shareHandler": true +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/singleflight/singleflight.go b/vendor/github.com/camlistore/camlistore/pkg/singleflight/singleflight.go new file mode 100644 index 00000000..3b174172 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/singleflight/singleflight.go @@ -0,0 +1,64 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight + +import "sync" + +// call is an in-flight or completed Do call +type call struct { + wg sync.WaitGroup + val interface{} + err error +} + +// Group represents a class of work and forms a namespace in which +// units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +func (g *Group) Do(key string, fn func() (interface{}, error)) (interface{}, error) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + delete(g.m, key) + g.mu.Unlock() + + return c.val, c.err +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/singleflight/singleflight_test.go b/vendor/github.com/camlistore/camlistore/pkg/singleflight/singleflight_test.go new file mode 100644 index 00000000..40edcf30 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/singleflight/singleflight_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package singleflight + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" +) + +func TestDo(t *testing.T) { + var g Group + v, err := g.Do("key", func() (interface{}, error) { + return "bar", nil + }) + if got, want := fmt.Sprintf("%v (%T)", v, v), "bar (string)"; got != want { + t.Errorf("Do = %v; want %v", got, want) + } + if err != nil { + t.Errorf("Do error = %v", err) + } +} + +func TestDoErr(t *testing.T) { + var g Group + someErr := errors.New("Some error") + v, err := g.Do("key", func() (interface{}, error) { + return nil, someErr + }) + if err != someErr { + t.Errorf("Do error = %v; want someErr %v", err, someErr) + } + if v != nil { + t.Errorf("unexpected non-nil value %#v", v) + } +} + +func TestDoDupSuppress(t *testing.T) { + var g Group + c := make(chan string) + var calls int32 + fn := func() (interface{}, error) { + atomic.AddInt32(&calls, 1) + return <-c, nil + } + + const n = 10 + var wg sync.WaitGroup + for i := 0; i < n; i++ { + wg.Add(1) + go func() { + v, err := g.Do("key", fn) + if err != nil { + t.Errorf("Do error: %v", err) + } + if v.(string) != "bar" { + t.Errorf("got %q; want %q", v, "bar") + } + wg.Done() + }() + } + time.Sleep(100 * time.Millisecond) // let goroutines above block + c <- "bar" + wg.Wait() + if got := atomic.LoadInt32(&calls); got != 1 { + t.Errorf("number of calls = %d; want 1", got) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/buffer/buffer.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/buffer/buffer.go new file mode 100644 index 00000000..601f4a8a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/buffer/buffer.go @@ -0,0 +1,319 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package buffer provides a sorted.KeyValue implementation that +// buffers one KeyValue implementation in front of an another. It's +// used for cases such as reindexing where you need a KeyValue but it +// doesn't need to be flushed and consistent until the end. +package buffer + +import ( + "fmt" + "sync" + + "camlistore.org/pkg/sorted" +) + +// New returnes a sorted.KeyValue implementation that adds a Flush +// method to flush the buffer to the backing storage. A flush will +// also be performed when maxBufferBytes are reached. If +// maxBufferBytes <= 0, no automatic flushing is performed. +func New(buffer, backing sorted.KeyValue, maxBufferBytes int64) *KeyValue { + return &KeyValue{ + buf: buffer, + back: backing, + maxBuffer: maxBufferBytes, + } +} + +var _ sorted.KeyValue = (*KeyValue)(nil) + +type KeyValue struct { + buf, back sorted.KeyValue + maxBuffer int64 + + bufMu sync.Mutex + buffered int64 + + // This read lock should be held during Set/Get/Delete/BatchCommit, + // and the write lock should be held during Flush. + mu sync.RWMutex +} + +func (kv *KeyValue) Flush() error { + kv.mu.Lock() + defer kv.mu.Unlock() + var ( + bmback = kv.back.BeginBatch() + bmbuf = kv.buf.BeginBatch() + commit = false + it = kv.buf.Find("", "") + ) + for it.Next() { + bmback.Set(it.Key(), it.Value()) + bmbuf.Delete(it.Key()) + commit = true + } + if err := it.Close(); err != nil { + return err + } + if commit { + if err := kv.back.CommitBatch(bmback); err != nil { + return err + } + if err := kv.buf.CommitBatch(bmbuf); err != nil { + return err + } + kv.bufMu.Lock() + kv.buffered = 0 + kv.bufMu.Unlock() + } + return nil +} + +func (kv *KeyValue) Get(key string) (string, error) { + kv.mu.RLock() + defer kv.mu.RUnlock() + v, err := kv.buf.Get(key) + switch err { + case sorted.ErrNotFound: + break + case nil: + return v, nil + default: + return "", err + } + return kv.back.Get(key) +} + +func (kv *KeyValue) Set(key, value string) error { + if err := sorted.CheckSizes(key, value); err != nil { + return err + } + kv.mu.RLock() + err := kv.buf.Set(key, value) + kv.mu.RUnlock() + if err == nil { + kv.bufMu.Lock() + kv.buffered += int64(len(key) + len(value)) + doFlush := kv.buffered > kv.maxBuffer + kv.bufMu.Unlock() + if doFlush { + err = kv.Flush() + } + } + return err +} + +func (kv *KeyValue) Delete(key string) error { + kv.mu.RLock() + defer kv.mu.RUnlock() + // This isn't an ideal implementation, since it synchronously + // deletes from the backing store. But deletes aren't really + // used, so ignoring for now. + // Could also use a syncutil.Group to do these in parallel, + // but the buffer should be an in-memory implementation + // anyway, so should be fast. + err1 := kv.buf.Delete(key) + err2 := kv.back.Delete(key) + if err1 != nil { + return err1 + } + return err2 +} + +func (kv *KeyValue) BeginBatch() sorted.BatchMutation { + return new(batch) +} + +func (kv *KeyValue) CommitBatch(bm sorted.BatchMutation) error { + kv.mu.RLock() + defer kv.mu.RUnlock() + b, ok := bm.(*batch) + if !ok { + return fmt.Errorf("unexpected BatchMutation type %T", bm) + } + var ( + // A batch mutation for applying this mutation to the buffer. + bmbuf = kv.buf.BeginBatch() + // A lazily created batch mutation for deleting from the backing + // storage; this should be rare. (See Delete above.) + bmback sorted.BatchMutation + ) + for _, m := range b.mods { + if m.isDelete { + bmbuf.Delete(m.key) + if bmback == nil { + bmback = kv.back.BeginBatch() + } + bmback.Delete(m.key) + continue + } else { + if err := sorted.CheckSizes(m.key, m.value); err != nil { + return err + } + } + bmbuf.Set(m.key, m.value) + } + if err := kv.buf.CommitBatch(bmbuf); err != nil { + return err + } + if bmback != nil { + return kv.back.CommitBatch(bmback) + } + return nil +} + +func (kv *KeyValue) Close() error { + if err := kv.Flush(); err != nil { + return err + } + return kv.back.Close() +} + +func (kv *KeyValue) Find(start, end string) sorted.Iterator { + // TODO(adg): hold read lock while iterating? seems complicated + ibuf := kv.buf.Find(start, end) + iback := kv.back.Find(start, end) + return &iter{ + buf: subIter{Iterator: ibuf}, + back: subIter{Iterator: iback}, + } +} + +type batch struct { + mu sync.Mutex + mods []mod +} + +type mod struct { + isDelete bool + key, value string +} + +func (b *batch) Set(key, value string) { + defer b.mu.Unlock() + b.mu.Lock() + b.mods = append(b.mods, mod{key: key, value: value}) +} + +func (b *batch) Delete(key string) { + defer b.mu.Unlock() + b.mu.Lock() + b.mods = append(b.mods, mod{key: key, isDelete: true}) +} + +type iter struct { + buf, back subIter +} + +func (it *iter) current() *subIter { + switch { + case it.back.eof: + return &it.buf + case it.buf.eof: + return &it.back + case it.buf.key <= it.back.key: + return &it.buf + default: + return &it.back + } +} + +func (it *iter) Next() bool { + // Call Next on both iterators for the first time, if we haven't + // already, so that the key comparisons below are valid. + start := false + if it.buf.key == "" && !it.buf.eof { + start = it.buf.next() + } + if it.back.key == "" && !it.buf.eof { + start = it.back.next() || start + } + if start { + // We started iterating with at least one value. + return true + } + // Bail if both iterators are done. + if it.buf.eof && it.back.eof { + return false + } + // If one iterator is done, advance the other. + if it.buf.eof { + return it.back.next() + } + if it.back.eof { + return it.buf.next() + } + // If both iterators still going, + // advance the one that is further behind, + // or both simultaneously if they point to the same key. + switch { + case it.buf.key < it.back.key: + it.buf.next() + case it.buf.key > it.back.key: + it.back.next() + case it.buf.key == it.back.key: + n1, n2 := it.buf.next(), it.back.next() + if !n1 && !n2 { + // Both finished simultaneously. + return false + } + } + return true +} + +func (it *iter) Key() string { + return it.current().key +} + +func (it *iter) Value() string { + return it.current().Value() +} + +func (it *iter) KeyBytes() []byte { + return it.current().KeyBytes() +} + +func (it *iter) ValueBytes() []byte { + return it.current().ValueBytes() +} + +func (it *iter) Close() error { + err1 := it.buf.Close() + err2 := it.back.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// subIter is an iterator (either the backing storage or the buffer) that +// keeps track of the current key and whether it has reached EOF. +type subIter struct { + sorted.Iterator + key string + eof bool +} + +func (it *subIter) next() bool { + if it.Next() { + it.key = it.Key() + return true + } + it.eof = true + return false +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/buffer/buffer_test.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/buffer/buffer_test.go new file mode 100644 index 00000000..dbb64648 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/buffer/buffer_test.go @@ -0,0 +1,104 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package buffer + +import ( + "testing" + + "camlistore.org/pkg/sorted" +) + +// TODO(adg): test batch mutations +// TODO(adg): test auto-flush behavior + +func TestBuffer(t *testing.T) { + var ( + toBack = []mod{ + {false, "b", "b1"}, + {false, "d", "d1"}, + {false, "f", "f1"}, + } + toBuf = []mod{ + {false, "a", "a2"}, + {false, "b", "b2"}, + {false, "c", "c2"}, + {false, "e", "e2"}, + {true, "f", ""}, + {false, "g", "g2"}, + } + backBeforeFlush = []mod{ + {false, "b", "b1"}, + {false, "d", "d1"}, + // f deleted + } + want = []mod{ + {false, "a", "a2"}, + {false, "b", "b2"}, + {false, "c", "c2"}, + {false, "d", "d1"}, + {false, "e", "e2"}, + // f deleted + {false, "g", "g2"}, + } + ) + + // Populate backing storage. + backing := sorted.NewMemoryKeyValue() + for _, m := range toBack { + backing.Set(m.key, m.value) + } + // Wrap with buffered storage, populate. + buf := New(sorted.NewMemoryKeyValue(), backing, 1<<20) + for _, m := range toBuf { + if m.isDelete { + buf.Delete(m.key) + } else { + buf.Set(m.key, m.value) + } + } + + // Check contents of buffered storage. + check(t, buf, "buffered", want) + check(t, backing, "backing before flush", backBeforeFlush) + + // Flush. + if err := buf.Flush(); err != nil { + t.Fatal("flush error: ", err) + } + + // Check contents of backing storage. + check(t, backing, "backing after flush", want) +} + +func check(t *testing.T, kv sorted.KeyValue, prefix string, want []mod) { + it := kv.Find("", "") + for i, m := range want { + if !it.Next() { + t.Fatalf("%v: unexpected it.Next == false on iteration %d", prefix, i) + } + if k, v := it.Key(), it.Value(); k != m.key || v != m.value { + t.Errorf("%v: got key == %q value == %q, want key == %q value == %q on iteration %d", + prefix, k, v, m.key, m.value, i) + } + } + if it.Next() { + t.Errorf("%v: unexpected it.Next == true after complete iteration", prefix) + } + if err := it.Close(); err != nil { + t.Errorf("%v: error closing iterator: %v", prefix, err) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/kv.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/kv.go new file mode 100644 index 00000000..2daaa839 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/kv.go @@ -0,0 +1,237 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package sorted provides a KeyValue interface and constructor registry. +package sorted + +import ( + "errors" + "fmt" + + "camlistore.org/pkg/jsonconfig" +) + +const ( + MaxKeySize = 767 // Maximum size, in bytes, for a key in any store implementing KeyValue. + MaxValueSize = 63000 // Maximum size, in bytes, for a value in any store implementing KeyValue. MaxKeySize and MaxValueSize values originate from InnoDB and MySQL limitations. +) + +const DefaultKVFileType = "leveldb" + +var ( + ErrNotFound = errors.New("sorted: key not found") + ErrKeyTooLarge = fmt.Errorf("sorted: key size is over %v", MaxKeySize) + ErrValueTooLarge = fmt.Errorf("sorted: value size is over %v", MaxValueSize) +) + +// KeyValue is a sorted, enumerable key-value interface supporting +// batch mutations. +type KeyValue interface { + // Get gets the value for the given key. It returns ErrNotFound if the DB + // does not contain the key. + Get(key string) (string, error) + + Set(key, value string) error + + // Delete deletes keys. Deleting a non-existent key does not return an error. + Delete(key string) error + + BeginBatch() BatchMutation + CommitBatch(b BatchMutation) error + + // Find returns an iterator positioned before the first key/value pair + // whose key is 'greater than or equal to' the given key. There may be no + // such pair, in which case the iterator will return false on Next. + // + // The optional end value specifies the exclusive upper + // bound. If the empty string, the iterator returns keys + // where "key >= start". + // If non-empty, the iterator returns keys where + // "key >= start && key < endHint". + // + // Any error encountered will be implicitly returned via the iterator. An + // error-iterator will yield no key/value pairs and closing that iterator + // will return that error. + Find(start, end string) Iterator + + // Close is a polite way for the server to shut down the storage. + // Implementations should never lose data after a Set, Delete, + // or CommmitBatch, though. + Close() error +} + +// Wiper is an optional interface that may be implemented by storage +// implementations. +type Wiper interface { + KeyValue + + // Wipe removes all key/value pairs. + Wipe() error +} + +// Iterator iterates over an index KeyValue's key/value pairs in key order. +// +// An iterator must be closed after use, but it is not necessary to read an +// iterator until exhaustion. +// +// An iterator is not necessarily goroutine-safe, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +type Iterator interface { + // Next moves the iterator to the next key/value pair. + // It returns false when the iterator is exhausted. + Next() bool + + // Key returns the key of the current key/value pair. + // Only valid after a call to Next returns true. + Key() string + + // KeyBytes returns the key as bytes. The returned bytes + // should not be written and are invalid after the next call + // to Next or Close. + // TODO(bradfitz): rename this and change it to return a + // mem.RO instead? + KeyBytes() []byte + + // Value returns the value of the current key/value pair. + // Only valid after a call to Next returns true. + Value() string + + // ValueBytes returns the value as bytes. The returned bytes + // should not be written and are invalid after the next call + // to Next or Close. + // TODO(bradfitz): rename this and change it to return a + // mem.RO instead? + ValueBytes() []byte + + // Close closes the iterator and returns any accumulated error. Exhausting + // all the key/value pairs in a table is not considered to be an error. + // It is valid to call Close multiple times. Other methods should not be + // called after the iterator has been closed. + Close() error +} + +type BatchMutation interface { + Set(key, value string) + Delete(key string) +} + +type Mutation interface { + Key() string + Value() string + IsDelete() bool +} + +type mutation struct { + key string + value string // used if !delete + delete bool // if to be deleted +} + +func (m mutation) Key() string { + return m.key +} + +func (m mutation) Value() string { + return m.value +} + +func (m mutation) IsDelete() bool { + return m.delete +} + +func NewBatchMutation() BatchMutation { + return &batch{} +} + +type batch struct { + m []Mutation +} + +func (b *batch) Mutations() []Mutation { + return b.m +} + +func (b *batch) Delete(key string) { + b.m = append(b.m, mutation{key: key, delete: true}) +} + +func (b *batch) Set(key, value string) { + b.m = append(b.m, mutation{key: key, value: value}) +} + +var ( + ctors = make(map[string]func(jsonconfig.Obj) (KeyValue, error)) +) + +func RegisterKeyValue(typ string, fn func(jsonconfig.Obj) (KeyValue, error)) { + if typ == "" || fn == nil { + panic("zero type or func") + } + if _, dup := ctors[typ]; dup { + panic("duplication registration of type " + typ) + } + ctors[typ] = fn +} + +func NewKeyValue(cfg jsonconfig.Obj) (KeyValue, error) { + var s KeyValue + var err error + typ := cfg.RequiredString("type") + ctor, ok := ctors[typ] + if typ != "" && !ok { + return nil, fmt.Errorf("Invalid sorted.KeyValue type %q", typ) + } + if ok { + s, err = ctor(cfg) + if err != nil { + return nil, fmt.Errorf("error from %q KeyValue: %v", typ, err) + } + } + return s, cfg.Validate() +} + +// Foreach runs fn for each key/value pair in kv. If fn returns an error, +// that same error is returned from Foreach and iteration stops. +func Foreach(kv KeyValue, fn func(key, value string) error) error { + return ForeachInRange(kv, "", "", fn) +} + +// ForeachInRange runs fn for each key/value pair in kv in the range +// of start and end, which behave the same as kv.Find. If fn returns +// an error, that same error is returned from Foreach and iteration +// stops. +func ForeachInRange(kv KeyValue, start, end string, fn func(key, value string) error) error { + it := kv.Find(start, end) + for it.Next() { + if err := fn(it.Key(), it.Value()); err != nil { + it.Close() + return err + } + } + return it.Close() +} + +// CheckSizes returns ErrKeyTooLarge if key does not respect KeyMaxSize or +// ErrValueTooLarge if value does not respect ValueMaxSize +func CheckSizes(key, value string) error { + if len(key) > MaxKeySize { + return ErrKeyTooLarge + } + if len(value) > MaxValueSize { + return ErrValueTooLarge + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/kvfile/kvfile.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/kvfile/kvfile.go new file mode 100644 index 00000000..4959cb30 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/kvfile/kvfile.go @@ -0,0 +1,269 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kvfile provides an implementation of sorted.KeyValue +// on top of a single mutable database file on disk using +// github.com/cznic/kv. +package kvfile + +import ( + "bytes" + "errors" + "fmt" + "io" + "log" + "os" + "sync" + + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/kvutil" + "camlistore.org/pkg/sorted" + + "camlistore.org/third_party/github.com/cznic/kv" +) + +var _ sorted.Wiper = (*kvis)(nil) + +func init() { + sorted.RegisterKeyValue("kv", newKeyValueFromJSONConfig) +} + +// NewStorage is a convenience that calls newKeyValueFromJSONConfig +// with file as the kv storage file. +func NewStorage(file string) (sorted.KeyValue, error) { + return newKeyValueFromJSONConfig(jsonconfig.Obj{"file": file}) +} + +// newKeyValueFromJSONConfig returns a KeyValue implementation on top of a +// github.com/cznic/kv file. +func newKeyValueFromJSONConfig(cfg jsonconfig.Obj) (sorted.KeyValue, error) { + file := cfg.RequiredString("file") + if err := cfg.Validate(); err != nil { + return nil, err + } + opts := &kv.Options{} + db, err := kvutil.Open(file, opts) + if err != nil { + return nil, err + } + is := &kvis{ + db: db, + opts: opts, + path: file, + } + return is, nil +} + +type kvis struct { + path string + db *kv.DB + opts *kv.Options + txmu sync.Mutex +} + +// TODO: use bytepool package. +func getBuf(n int) []byte { return make([]byte, n) } +func putBuf([]byte) {} + +func (is *kvis) Get(key string) (string, error) { + buf := getBuf(200) + defer putBuf(buf) + val, err := is.db.Get(buf, []byte(key)) + if err != nil { + return "", err + } + if val == nil { + return "", sorted.ErrNotFound + } + return string(val), nil +} + +func (is *kvis) Set(key, value string) error { + if err := sorted.CheckSizes(key, value); err != nil { + return err + } + return is.db.Set([]byte(key), []byte(value)) +} + +func (is *kvis) Delete(key string) error { + return is.db.Delete([]byte(key)) +} + +func (is *kvis) Find(start, end string) sorted.Iterator { + it := &iter{ + db: is.db, + startKey: start, + endKey: []byte(end), + } + it.enum, _, it.err = it.db.Seek([]byte(start)) + return it +} + +func (is *kvis) BeginBatch() sorted.BatchMutation { + return sorted.NewBatchMutation() +} + +func (is *kvis) Wipe() error { + // Unlock the already open DB. + if err := is.db.Close(); err != nil { + return err + } + if err := os.Remove(is.path); err != nil { + return err + } + + db, err := kv.Create(is.path, is.opts) + if err != nil { + return fmt.Errorf("error creating %s: %v", is.path, err) + } + is.db = db + return nil +} + +type batch interface { + Mutations() []sorted.Mutation +} + +func (is *kvis) CommitBatch(bm sorted.BatchMutation) error { + b, ok := bm.(batch) + if !ok { + return errors.New("invalid batch type") + } + is.txmu.Lock() + defer is.txmu.Unlock() + + good := false + defer func() { + if !good { + is.db.Rollback() + } + }() + + if err := is.db.BeginTransaction(); err != nil { + return err + } + for _, m := range b.Mutations() { + if m.IsDelete() { + if err := is.db.Delete([]byte(m.Key())); err != nil { + return err + } + } else { + if err := sorted.CheckSizes(m.Key(), m.Value()); err != nil { + return err + } + if err := is.db.Set([]byte(m.Key()), []byte(m.Value())); err != nil { + return err + } + } + } + + good = true + return is.db.Commit() +} + +func (is *kvis) Close() error { + log.Printf("Closing kvfile database %s", is.path) + return is.db.Close() +} + +type iter struct { + db *kv.DB + startKey string + endKey []byte + + enum *kv.Enumerator + + valid bool + key, val []byte + skey, sval *string // non-nil if valid + + err error + closed bool +} + +func (it *iter) Close() error { + it.closed = true + return it.err +} + +func (it *iter) KeyBytes() []byte { + if !it.valid { + panic("not valid") + } + return it.key +} + +func (it *iter) Key() string { + if !it.valid { + panic("not valid") + } + if it.skey != nil { + return *it.skey + } + str := string(it.key) + it.skey = &str + return str +} + +func (it *iter) ValueBytes() []byte { + if !it.valid { + panic("not valid") + } + return it.val +} + +func (it *iter) Value() string { + if !it.valid { + panic("not valid") + } + if it.sval != nil { + return *it.sval + } + str := string(it.val) + it.sval = &str + return str +} + +func (it *iter) end() bool { + it.valid = false + it.closed = true + return false +} + +func (it *iter) Next() bool { + if it.err != nil { + return false + } + if it.closed { + panic("Next called after Next returned value") + } + it.skey, it.sval = nil, nil + var err error + it.key, it.val, err = it.enum.Next() + if err == io.EOF { + it.err = nil + return it.end() + } + if err != nil { + it.err = err + return it.end() + } + if len(it.endKey) > 0 && bytes.Compare(it.key, it.endKey) >= 0 { + return it.end() + } + it.valid = true + return true +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/kvfile/kvfile_test.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/kvfile/kvfile_test.go new file mode 100644 index 00000000..3b567160 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/kvfile/kvfile_test.go @@ -0,0 +1,45 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kvfile + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/sorted/kvtest" +) + +func TestKvfileKV(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "camlistore-kvfilekv_test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + dbname := filepath.Join(tmpDir, "testdb.kvfile") + kv, err := sorted.NewKeyValue(jsonconfig.Obj{ + "type": "kv", + "file": dbname, + }) + if err != nil { + t.Fatalf("Could not create kvfile sorted kv at %v: %v", dbname, err) + } + kvtest.TestSorted(t, kv) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/kvtest/kvtest.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/kvtest/kvtest.go new file mode 100644 index 00000000..0d4dd101 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/kvtest/kvtest.go @@ -0,0 +1,173 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kvtest tests sorted.KeyValue implementations. +package kvtest + +import ( + "reflect" + "testing" + + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/test" +) + +func TestSorted(t *testing.T, kv sorted.KeyValue) { + defer test.TLog(t)() + if !isEmpty(t, kv) { + t.Fatal("kv for test is expected to be initially empty") + } + set := func(k, v string) { + if err := kv.Set(k, v); err != nil { + t.Fatalf("Error setting %q to %q: %v", k, v, err) + } + } + set("foo", "bar") + if isEmpty(t, kv) { + t.Fatalf("iterator reports the kv is empty after adding foo=bar; iterator must be broken") + } + if v, err := kv.Get("foo"); err != nil || v != "bar" { + t.Errorf("get(foo) = %q, %v; want bar", v, err) + } + if v, err := kv.Get("NOT_EXIST"); err != sorted.ErrNotFound { + t.Errorf("get(NOT_EXIST) = %q, %v; want error sorted.ErrNotFound", v, err) + } + for i := 0; i < 2; i++ { + if err := kv.Delete("foo"); err != nil { + t.Errorf("Delete(foo) (on loop %d/2) returned error %v", i+1, err) + } + } + set("a", "av") + set("b", "bv") + set("c", "cv") + testEnumerate(t, kv, "", "", "av", "bv", "cv") + testEnumerate(t, kv, "a", "", "av", "bv", "cv") + testEnumerate(t, kv, "b", "", "bv", "cv") + testEnumerate(t, kv, "a", "c", "av", "bv") + testEnumerate(t, kv, "a", "b", "av") + testEnumerate(t, kv, "a", "a") + testEnumerate(t, kv, "d", "") + testEnumerate(t, kv, "d", "e") + + // Verify that < comparison works identically for all DBs (because it is affected by collation rules) + // http://postgresql.1045698.n5.nabble.com/String-comparison-and-the-SQL-standard-td5740721.html + set("foo|abc", "foo|abcv") + testEnumerate(t, kv, "foo|", "", "foo|abcv") + testEnumerate(t, kv, "foo|", "foo}", "foo|abcv") + + // Verify that the value isn't being used instead of the key in the range comparison. + set("y", "x:foo") + testEnumerate(t, kv, "x:", "x~") + + testInsertLarge(t, kv) + testInsertTooLarge(t, kv) + + // TODO: test batch commits +} + +func testInsertLarge(t *testing.T, kv sorted.KeyValue) { + largeKey := make([]byte, sorted.MaxKeySize-1) + // setting all the bytes because postgres whines about an invalid byte sequence + // otherwise + for k, _ := range largeKey { + largeKey[k] = 'A' + } + largeKey[sorted.MaxKeySize-2] = 'B' + largeValue := make([]byte, sorted.MaxValueSize-1) + for k, _ := range largeValue { + largeValue[k] = 'A' + } + largeValue[sorted.MaxValueSize-2] = 'B' + + // insert with large key + if err := kv.Set(string(largeKey), "whatever"); err != nil { + t.Fatalf("Insertion of large key failed: %v", err) + } + + // and verify we can get it back, i.e. that the key hasn't been truncated. + it := kv.Find(string(largeKey), "") + if !it.Next() || it.Key() != string(largeKey) || it.Value() != "whatever" { + it.Close() + t.Fatalf("Find(largeKey) = %q, %q; want %q, %q", it.Key(), it.Value(), largeKey, "whatever") + } + it.Close() + + // insert with large value + if err := kv.Set("whatever", string(largeValue)); err != nil { + t.Fatalf("Insertion of large value failed: %v", err) + } + // and verify we can get it back, i.e. that the value hasn't been truncated. + if v, err := kv.Get("whatever"); err != nil || v != string(largeValue) { + t.Fatalf("get(\"whatever\") = %q, %v; want %q", v, err, largeValue) + } + + // insert with large key and large value + if err := kv.Set(string(largeKey), string(largeValue)); err != nil { + t.Fatalf("Insertion of large key and value failed: %v", err) + } + // and verify we can get them back + it = kv.Find(string(largeKey), "") + defer it.Close() + if !it.Next() || it.Key() != string(largeKey) || it.Value() != string(largeValue) { + t.Fatalf("Find(largeKey) = %q, %q; want %q, %q", it.Key(), it.Value(), largeKey, largeValue) + } +} + +func testInsertTooLarge(t *testing.T, kv sorted.KeyValue) { + largeKey := make([]byte, sorted.MaxKeySize+1) + largeValue := make([]byte, sorted.MaxValueSize+1) + if err := kv.Set(string(largeKey), "whatever"); err == nil || err != sorted.ErrKeyTooLarge { + t.Fatalf("Insertion of too large a key should have failed, but err was %v", err) + } + if err := kv.Set("whatever", string(largeValue)); err == nil || err != sorted.ErrValueTooLarge { + t.Fatalf("Insertion of too large a value should have failed, but err was %v", err) + } +} + +func testEnumerate(t *testing.T, kv sorted.KeyValue, start, end string, want ...string) { + var got []string + it := kv.Find(start, end) + for it.Next() { + key, val := it.Key(), it.Value() + keyb, valb := it.KeyBytes(), it.ValueBytes() + if key != string(keyb) { + t.Errorf("Key and KeyBytes disagree: %q vs %q", key, keyb) + } + if val != string(valb) { + t.Errorf("Value and ValueBytes disagree: %q vs %q", val, valb) + } + if key+"v" != val { + t.Errorf("iterator returned unexpected pair for test: %q, %q", key, val) + } + got = append(got, val) + } + err := it.Close() + if err != nil { + t.Errorf("for enumerate of (%q, %q), Close error: %v", start, end, err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("for enumerate of (%q, %q), got: %q; want %q", start, end, got, want) + } +} + +func isEmpty(t *testing.T, kv sorted.KeyValue) bool { + it := kv.Find("", "") + hasRow := it.Next() + if err := it.Close(); err != nil { + t.Fatalf("Error closing iterator while testing for emptiness: %v", err) + } + return !hasRow +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/leveldb/leveldb.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/leveldb/leveldb.go new file mode 100644 index 00000000..5ed7ee99 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/leveldb/leveldb.go @@ -0,0 +1,258 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package leveldb provides an implementation of sorted.KeyValue +// on top of a single mutable database file on disk using +// github.com/syndtr/goleveldb. +package leveldb + +import ( + "errors" + "fmt" + "os" + "sync" + + "camlistore.org/pkg/env" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/sorted" + + "camlistore.org/third_party/github.com/syndtr/goleveldb/leveldb" + "camlistore.org/third_party/github.com/syndtr/goleveldb/leveldb/filter" + "camlistore.org/third_party/github.com/syndtr/goleveldb/leveldb/iterator" + "camlistore.org/third_party/github.com/syndtr/goleveldb/leveldb/opt" + "camlistore.org/third_party/github.com/syndtr/goleveldb/leveldb/util" +) + +var _ sorted.Wiper = (*kvis)(nil) + +func init() { + sorted.RegisterKeyValue("leveldb", newKeyValueFromJSONConfig) +} + +// NewStorage is a convenience that calls newKeyValueFromJSONConfig +// with file as the leveldb storage file. +func NewStorage(file string) (sorted.KeyValue, error) { + return newKeyValueFromJSONConfig(jsonconfig.Obj{"file": file}) +} + +// newKeyValueFromJSONConfig returns a KeyValue implementation on top of a +// github.com/syndtr/goleveldb/leveldb file. +func newKeyValueFromJSONConfig(cfg jsonconfig.Obj) (sorted.KeyValue, error) { + file := cfg.RequiredString("file") + if err := cfg.Validate(); err != nil { + return nil, err + } + strictness := opt.DefaultStrict + if env.IsDev() { + // Be more strict in dev mode. + strictness = opt.StrictAll + } + opts := &opt.Options{ + // The default is 10, + // 8 means 2.126% or 1/47th disk check rate, + // 10 means 0.812% error rate (1/2^(bits/1.44)) or 1/123th disk check rate, + // 12 means 0.31% or 1/322th disk check rate. + // TODO(tgulacsi): decide which number is the best here. Till that go with the default. + Filter: filter.NewBloomFilter(10), + Strict: strictness, + } + db, err := leveldb.OpenFile(file, opts) + if err != nil { + return nil, err + } + is := &kvis{ + db: db, + path: file, + opts: opts, + readOpts: &opt.ReadOptions{Strict: strictness}, + // On machine crash we want to reindex anyway, and + // fsyncs may impose great performance penalty. + writeOpts: &opt.WriteOptions{Sync: false}, + } + return is, nil +} + +type kvis struct { + path string + db *leveldb.DB + opts *opt.Options + readOpts *opt.ReadOptions + writeOpts *opt.WriteOptions + txmu sync.Mutex +} + +func (is *kvis) Get(key string) (string, error) { + val, err := is.db.Get([]byte(key), is.readOpts) + if err != nil { + if err == leveldb.ErrNotFound { + return "", sorted.ErrNotFound + } + return "", err + } + if val == nil { + return "", sorted.ErrNotFound + } + return string(val), nil +} + +func (is *kvis) Set(key, value string) error { + if err := sorted.CheckSizes(key, value); err != nil { + return err + } + return is.db.Put([]byte(key), []byte(value), is.writeOpts) +} + +func (is *kvis) Delete(key string) error { + return is.db.Delete([]byte(key), is.writeOpts) +} + +func (is *kvis) Find(start, end string) sorted.Iterator { + var startB, endB []byte + // A nil Range.Start is treated as a key before all keys in the DB. + if start != "" { + startB = []byte(start) + } + // A nil Range.Limit is treated as a key after all keys in the DB. + if end != "" { + endB = []byte(end) + } + it := &iter{ + it: is.db.NewIterator( + &util.Range{Start: startB, Limit: endB}, + is.readOpts, + ), + } + return it +} + +func (is *kvis) Wipe() error { + // Close the already open DB. + if err := is.db.Close(); err != nil { + return err + } + if err := os.RemoveAll(is.path); err != nil { + return err + } + + db, err := leveldb.OpenFile(is.path, is.opts) + if err != nil { + return fmt.Errorf("error creating %s: %v", is.path, err) + } + is.db = db + return nil +} + +func (is *kvis) BeginBatch() sorted.BatchMutation { + return &lvbatch{batch: new(leveldb.Batch)} +} + +type lvbatch struct { + errMu sync.Mutex + err error // Set if one of the mutations had too large a key or value. Sticky. + + batch *leveldb.Batch +} + +func (lvb *lvbatch) Set(key, value string) { + lvb.errMu.Lock() + defer lvb.errMu.Unlock() + if lvb.err != nil { + return + } + if err := sorted.CheckSizes(key, value); err != nil { + if err == sorted.ErrKeyTooLarge { + lvb.err = fmt.Errorf("%v: %v", err, key) + } else { + lvb.err = fmt.Errorf("%v: %v", err, value) + } + return + } + lvb.batch.Put([]byte(key), []byte(value)) +} + +func (lvb *lvbatch) Delete(key string) { + lvb.batch.Delete([]byte(key)) +} + +func (is *kvis) CommitBatch(bm sorted.BatchMutation) error { + b, ok := bm.(*lvbatch) + if !ok { + return errors.New("invalid batch type") + } + b.errMu.Lock() + defer b.errMu.Unlock() + if b.err != nil { + return b.err + } + return is.db.Write(b.batch, is.writeOpts) +} + +func (is *kvis) Close() error { + return is.db.Close() +} + +type iter struct { + it iterator.Iterator + + key, val []byte + skey, sval *string // for caching string values + + err error + closed bool +} + +func (it *iter) Close() error { + it.closed = true + it.it.Release() + return nil +} + +func (it *iter) KeyBytes() []byte { + return it.it.Key() +} + +func (it *iter) Key() string { + if it.skey != nil { + return *it.skey + } + str := string(it.it.Key()) + it.skey = &str + return str +} + +func (it *iter) ValueBytes() []byte { + return it.it.Value() +} + +func (it *iter) Value() string { + if it.sval != nil { + return *it.sval + } + str := string(it.it.Value()) + it.sval = &str + return str +} + +func (it *iter) Next() bool { + if err := it.it.Error(); err != nil { + return false + } + if it.closed { + panic("Next called after Next returned value") + } + it.skey, it.sval = nil, nil + return it.it.Next() +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/leveldb/leveldb_test.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/leveldb/leveldb_test.go new file mode 100644 index 00000000..7e72ffdd --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/leveldb/leveldb_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leveldb + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/sorted/kvtest" +) + +func TestLeveldbKV(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "camlistore-leveldbkv_test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + dbname := filepath.Join(tmpDir, "testdb.leveldb") + t.Logf("Testing leveldb %q.", dbname) + kv, err := sorted.NewKeyValue(jsonconfig.Obj{ + "type": "leveldb", + "file": dbname, + }) + if err != nil { + t.Fatalf("Could not create leveldb sorted kv at %v: %v", dbname, err) + } + kvtest.TestSorted(t, kv) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/mem.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/mem.go new file mode 100644 index 00000000..e4fb6370 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/mem.go @@ -0,0 +1,176 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sorted + +import ( + "bytes" + "errors" + "sync" + + "camlistore.org/pkg/jsonconfig" + "camlistore.org/third_party/code.google.com/p/leveldb-go/leveldb/db" + "camlistore.org/third_party/code.google.com/p/leveldb-go/leveldb/memdb" +) + +// NewMemoryKeyValue returns a KeyValue implementation that's backed only +// by memory. It's mostly useful for tests and development. +func NewMemoryKeyValue() KeyValue { + db := memdb.New(nil) + return &memKeys{db: db} +} + +// memKeys is a naive in-memory implementation of KeyValue for test & development +// purposes only. +type memKeys struct { + mu sync.Mutex // guards db + db db.DB +} + +// memIter converts from leveldb's db.Iterator interface, which +// operates on []byte, to Camlistore's index.Iterator, which operates +// on string. +type memIter struct { + lit db.Iterator // underlying leveldb iterator + k, v *string // if nil, not stringified yet + end []byte // if len(end) > 0, the upper bound +} + +func (t *memIter) Next() bool { + t.k, t.v = nil, nil + if !t.lit.Next() { + return false + } + if len(t.end) > 0 && bytes.Compare(t.KeyBytes(), t.end) >= 0 { + return false + } + return true +} + +func (s *memIter) Close() error { + if s.lit == nil { + // Already closed. + return nil + } + err := s.lit.Close() + *s = memIter{} // to cause crashes on future access + return err +} + +func (s *memIter) KeyBytes() []byte { + return s.lit.Key() +} + +func (s *memIter) ValueBytes() []byte { + return s.lit.Value() +} + +func (s *memIter) Key() string { + if s.k != nil { + return *s.k + } + str := string(s.KeyBytes()) + s.k = &str + return str +} + +func (s *memIter) Value() string { + if s.v != nil { + return *s.v + } + str := string(s.ValueBytes()) + s.v = &str + return str +} + +func (mk *memKeys) Get(key string) (string, error) { + mk.mu.Lock() + defer mk.mu.Unlock() + k, err := mk.db.Get([]byte(key), nil) + if err == db.ErrNotFound { + return "", ErrNotFound + } + return string(k), err +} + +func (mk *memKeys) Find(start, end string) Iterator { + mk.mu.Lock() + defer mk.mu.Unlock() + lit := mk.db.Find([]byte(start), nil) + it := &memIter{lit: lit} + if end != "" { + it.end = []byte(end) + } + return it +} + +func (mk *memKeys) Set(key, value string) error { + if err := CheckSizes(key, value); err != nil { + return err + } + mk.mu.Lock() + defer mk.mu.Unlock() + return mk.db.Set([]byte(key), []byte(value), nil) +} + +func (mk *memKeys) Delete(key string) error { + mk.mu.Lock() + defer mk.mu.Unlock() + err := mk.db.Delete([]byte(key), nil) + if err == db.ErrNotFound { + return nil + } + return err +} + +func (mk *memKeys) BeginBatch() BatchMutation { + return &batch{} +} + +func (mk *memKeys) CommitBatch(bm BatchMutation) error { + b, ok := bm.(*batch) + if !ok { + return errors.New("invalid batch type; not an instance returned by BeginBatch") + } + mk.mu.Lock() + defer mk.mu.Unlock() + for _, m := range b.Mutations() { + if m.IsDelete() { + if err := mk.db.Delete([]byte(m.Key()), nil); err != nil { + return err + } + } else { + if err := CheckSizes(m.Key(), m.Value()); err != nil { + return err + } + if err := mk.db.Set([]byte(m.Key()), []byte(m.Value()), nil); err != nil { + return err + } + } + } + return nil +} + +func (mk *memKeys) Close() error { return nil } + +func init() { + RegisterKeyValue("memory", func(cfg jsonconfig.Obj) (KeyValue, error) { + if err := cfg.Validate(); err != nil { + return nil, err + } + return NewMemoryKeyValue(), nil + }) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/mem_test.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/mem_test.go new file mode 100644 index 00000000..2f722e26 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/mem_test.go @@ -0,0 +1,43 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sorted_test + +import ( + "testing" + + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/sorted/kvtest" +) + +func TestMemoryKV(t *testing.T) { + kv := sorted.NewMemoryKeyValue() + kvtest.TestSorted(t, kv) +} + +// TODO(mpl): move this test into kvtest. But that might require +// kvtest taking a "func () sorted.KeyValue) constructor param, +// so kvtest can create several and close in different ways. +func TestMemoryKV_DoubleClose(t *testing.T) { + kv := sorted.NewMemoryKeyValue() + + it := kv.Find("", "") + it.Close() + it.Close() + + kv.Close() + kv.Close() +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/mongo/mongokv.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/mongo/mongokv.go new file mode 100644 index 00000000..69978036 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/mongo/mongokv.go @@ -0,0 +1,280 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mongo provides an implementation of sorted.KeyValue +// using MongoDB. +package mongo + +import ( + "bytes" + "errors" + "sync" + "time" + + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/sorted" + + "camlistore.org/third_party/labix.org/v2/mgo" + "camlistore.org/third_party/labix.org/v2/mgo/bson" +) + +// We explicitely separate the key and the value in a document, +// instead of simply storing as key:value, to avoid problems +// such as "." being an illegal char in a key name. Also because +// there is no way to do partial matching for key names (one can +// only check for their existence with bson.M{$exists: true}). +const ( + CollectionName = "keys" // MongoDB collection, equiv. to SQL table + mgoKey = "k" + mgoValue = "v" +) + +func init() { + sorted.RegisterKeyValue("mongo", newKeyValueFromJSONConfig) +} + +func newKeyValueFromJSONConfig(cfg jsonconfig.Obj) (sorted.KeyValue, error) { + ins := &instance{ + server: cfg.OptionalString("host", "localhost"), + database: cfg.RequiredString("database"), + user: cfg.OptionalString("user", ""), + password: cfg.OptionalString("password", ""), + } + if err := cfg.Validate(); err != nil { + return nil, err + } + db, err := ins.getCollection() + if err != nil { + return nil, err + } + return &keyValue{db: db, session: ins.session}, nil +} + +// Implementation of Iterator +type iter struct { + res bson.M + *mgo.Iter + end []byte +} + +func (it *iter) Next() bool { + if !it.Iter.Next(&it.res) { + return false + } + if len(it.end) > 0 && bytes.Compare(it.KeyBytes(), it.end) >= 0 { + return false + } + return true +} + +func (it *iter) Key() string { + key, ok := (it.res[mgoKey]).(string) + if !ok { + return "" + } + return key +} + +func (it *iter) KeyBytes() []byte { + // TODO(bradfitz,mpl): this is less efficient than the string way. we should + // do better here, somehow, like all the other KeyValue iterators. + // For now: + return []byte(it.Key()) +} + +func (it *iter) Value() string { + value, ok := (it.res[mgoValue]).(string) + if !ok { + return "" + } + return value +} + +func (it *iter) ValueBytes() []byte { + // TODO(bradfitz,mpl): this is less efficient than the string way. we should + // do better here, somehow, like all the other KeyValue iterators. + // For now: + return []byte(it.Value()) +} + +func (it *iter) Close() error { + return it.Iter.Close() +} + +// Implementation of KeyValue +type keyValue struct { + session *mgo.Session // so we can close it + mu sync.Mutex // guards db + db *mgo.Collection +} + +func (kv *keyValue) Get(key string) (string, error) { + kv.mu.Lock() + defer kv.mu.Unlock() + res := bson.M{} + q := kv.db.Find(&bson.M{mgoKey: key}) + err := q.One(&res) + if err != nil { + if err == mgo.ErrNotFound { + return "", sorted.ErrNotFound + } else { + return "", err + } + } + return res[mgoValue].(string), err +} + +func (kv *keyValue) Find(start, end string) sorted.Iterator { + kv.mu.Lock() + defer kv.mu.Unlock() + it := kv.db.Find(&bson.M{mgoKey: &bson.M{"$gte": start}}).Sort(mgoKey).Iter() + return &iter{res: bson.M{}, Iter: it, end: []byte(end)} +} + +func (kv *keyValue) Set(key, value string) error { + if err := sorted.CheckSizes(key, value); err != nil { + return err + } + kv.mu.Lock() + defer kv.mu.Unlock() + _, err := kv.db.Upsert(&bson.M{mgoKey: key}, &bson.M{mgoKey: key, mgoValue: value}) + return err +} + +// Delete removes the document with the matching key. +func (kv *keyValue) Delete(key string) error { + kv.mu.Lock() + defer kv.mu.Unlock() + err := kv.db.Remove(&bson.M{mgoKey: key}) + if err == mgo.ErrNotFound { + return nil + } + return err +} + +// Wipe removes all documents from the collection. +func (kv *keyValue) Wipe() error { + kv.mu.Lock() + defer kv.mu.Unlock() + _, err := kv.db.RemoveAll(nil) + return err +} + +type batch interface { + Mutations() []sorted.Mutation +} + +func (kv *keyValue) BeginBatch() sorted.BatchMutation { + return sorted.NewBatchMutation() +} + +func (kv *keyValue) CommitBatch(bm sorted.BatchMutation) error { + b, ok := bm.(batch) + if !ok { + return errors.New("invalid batch type") + } + + kv.mu.Lock() + defer kv.mu.Unlock() + for _, m := range b.Mutations() { + if m.IsDelete() { + if err := kv.db.Remove(bson.M{mgoKey: m.Key()}); err != nil { + return err + } + } else { + if err := sorted.CheckSizes(m.Key(), m.Value()); err != nil { + return err + } + if _, err := kv.db.Upsert(&bson.M{mgoKey: m.Key()}, &bson.M{mgoKey: m.Key(), mgoValue: m.Value()}); err != nil { + return err + } + } + } + return nil +} + +func (kv *keyValue) Close() error { + kv.session.Close() + return nil +} + +// Ping tests if MongoDB on host can be dialed. +func Ping(host string, timeout time.Duration) bool { + return (&instance{server: host}).ping(timeout) +} + +// instance helps with the low level details about +// the connection to MongoDB. +type instance struct { + server string + database string + user string + password string + session *mgo.Session +} + +func (ins *instance) url() string { + if ins.user == "" || ins.password == "" { + return ins.server + } + return ins.user + ":" + ins.password + "@" + ins.server + "/" + ins.database +} + +// ping won't work with old (1.2) mongo servers. +func (ins *instance) ping(timeout time.Duration) bool { + session, err := mgo.DialWithTimeout(ins.url(), timeout) + if err != nil { + return false + } + defer session.Close() + session.SetSyncTimeout(timeout) + if err = session.Ping(); err != nil { + return false + } + return true +} + +func (ins *instance) getConnection() (*mgo.Session, error) { + if ins.session != nil { + return ins.session, nil + } + // TODO(mpl): do some "client caching" as in mysql, to avoid systematically dialing? + session, err := mgo.Dial(ins.url()) + if err != nil { + return nil, err + } + session.SetMode(mgo.Monotonic, true) + session.SetSafe(&mgo.Safe{}) // so we get an ErrNotFound error when deleting an absent key + ins.session = session + return session, nil +} + +// TODO(mpl): I'm only calling getCollection at the beginning, and +// keeping the collection around and reusing it everywhere, instead +// of calling getCollection everytime, because that's the easiest. +// But I can easily change that. Gustavo says it does not make +// much difference either way. +// Brad, what do you think? +func (ins *instance) getCollection() (*mgo.Collection, error) { + session, err := ins.getConnection() + if err != nil { + return nil, err + } + session.SetSafe(&mgo.Safe{}) + session.SetMode(mgo.Strong, true) + c := session.DB(ins.database).C(CollectionName) + return c, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/mongo/mongokv_test.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/mongo/mongokv_test.go new file mode 100644 index 00000000..25945d72 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/mongo/mongokv_test.go @@ -0,0 +1,44 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mongo + +import ( + "testing" + + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/sorted/kvtest" + "camlistore.org/pkg/test/dockertest" +) + +// TestMongoKV tests against a real MongoDB instance, using a Docker container. +func TestMongoKV(t *testing.T) { + // SetupMongoContainer may skip or fatal the test if docker isn't found or something goes wrong when setting up the container. + // Thus, no error is returned + containerID, ip := dockertest.SetupMongoContainer(t) + defer containerID.KillRemove(t) + + kv, err := sorted.NewKeyValue(jsonconfig.Obj{ + "type": "mongo", + "host": ip, + "database": "camlitest", + }) + if err != nil { + t.Fatalf("mongo.NewKeyValue = %v", err) + } + kvtest.TestSorted(t, kv) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/mysql/cloudsql.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/mysql/cloudsql.go new file mode 100644 index 00000000..8109a838 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/mysql/cloudsql.go @@ -0,0 +1,69 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysql + +import ( + "encoding/json" + "errors" + "fmt" + "log" + "strings" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + sqladmin "google.golang.org/api/sqladmin/v1beta3" + "google.golang.org/cloud/compute/metadata" +) + +const cloudSQLSuffix = ".cloudsql.google.internal" + +func maybeRemapCloudSQL(host string) (out string, err error) { + if !strings.HasSuffix(host, cloudSQLSuffix) { + return host, nil + } + inst := strings.TrimSuffix(host, cloudSQLSuffix) + if !metadata.OnGCE() { + return "", errors.New("CloudSQL support only available when running on Google Compute Engine.") + } + proj, err := metadata.ProjectID() + if err != nil { + return "", fmt.Errorf("Failed to lookup GCE project ID: %v", err) + } + + admin, _ := sqladmin.New(oauth2.NewClient(context.Background(), google.ComputeTokenSource(""))) + listRes, err := admin.Instances.List(proj).Do() + if err != nil { + return "", fmt.Errorf("error enumerating Cloud SQL instances: %v", err) + } + for _, it := range listRes.Items { + if !strings.EqualFold(it.Instance, inst) { + continue + } + js, _ := json.Marshal(it) + log.Printf("Found Cloud SQL instance %s: %s", inst, js) + for _, ipm := range it.IpAddresses { + return ipm.IpAddress, nil + } + return "", fmt.Errorf("No external IP address for Cloud SQL instances %s", inst) + } + var found []string + for _, it := range listRes.Items { + found = append(found, it.Instance) + } + return "", fmt.Errorf("Cloud SQL instance %q not found. Found: %q", inst, found) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/mysql/dbschema.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/mysql/dbschema.go new file mode 100644 index 00000000..bf8b338f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/mysql/dbschema.go @@ -0,0 +1,47 @@ +/* +Copyright 2011 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysql + +import ( + "strconv" + + "camlistore.org/pkg/sorted" +) + +const requiredSchemaVersion = 22 + +func SchemaVersion() int { + return requiredSchemaVersion +} + +// Note: using character set "binary", as any knowledge +// of character set encodings is handled by higher layers. +// At this layer we're just obeying the IndexStorage interface, +// which is purely about bytes. +func SQLCreateTables() []string { + return []string{ + `CREATE TABLE IF NOT EXISTS /*DB*/.rows ( + k VARCHAR(` + strconv.Itoa(sorted.MaxKeySize) + `) NOT NULL PRIMARY KEY, + v VARCHAR(` + strconv.Itoa(sorted.MaxValueSize) + `)) + DEFAULT CHARACTER SET binary`, + + `CREATE TABLE IF NOT EXISTS /*DB*/.meta ( + metakey VARCHAR(255) NOT NULL PRIMARY KEY, + value VARCHAR(255) NOT NULL) + DEFAULT CHARACTER SET binary`, + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/mysql/mysqlkv.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/mysql/mysqlkv.go new file mode 100644 index 00000000..1e6f3c92 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/mysql/mysqlkv.go @@ -0,0 +1,211 @@ +/* +Copyright 2011 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mysql provides an implementation of sorted.KeyValue +// on top of MySQL. +package mysql + +import ( + "database/sql" + "errors" + "fmt" + "os" + "regexp" + "strconv" + "strings" + "sync" + + "camlistore.org/pkg/env" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/sorted/sqlkv" + _ "camlistore.org/third_party/github.com/go-sql-driver/mysql" +) + +func init() { + sorted.RegisterKeyValue("mysql", newKeyValueFromJSONConfig) +} + +func newKeyValueFromJSONConfig(cfg jsonconfig.Obj) (sorted.KeyValue, error) { + var ( + user = cfg.RequiredString("user") + database = cfg.RequiredString("database") + host = cfg.OptionalString("host", "") + password = cfg.OptionalString("password", "") + ) + if err := cfg.Validate(); err != nil { + return nil, err + } + var err error + if host != "" { + host, err = maybeRemapCloudSQL(host) + if err != nil { + return nil, err + } + if !strings.Contains(host, ":") { + host += ":3306" + } + host = "tcp(" + host + ")" + } + // The DSN does NOT have a database name in it so it's + // cacheable and can be shared between different queues & the + // index, all sharing the same database server, cutting down + // number of TCP connections required. We add the database + // name in queries instead. + dsn := fmt.Sprintf("%s:%s@%s/", user, password, host) + + db, err := openOrCachedDB(dsn) + if err != nil { + return nil, err + } + + if err := CreateDB(db, database); err != nil { + return nil, err + } + for _, tableSQL := range SQLCreateTables() { + tableSQL = strings.Replace(tableSQL, "/*DB*/", database, -1) + if _, err := db.Exec(tableSQL); err != nil { + errMsg := "error creating table with %q: %v." + createError := err + sv, err := serverVersion(db) + if err != nil { + return nil, err + } + if !hasLargeVarchar(sv) { + errMsg += "\nYour MySQL server is too old (< 5.0.3) to support VARCHAR larger than 255." + } + return nil, fmt.Errorf(errMsg, tableSQL, createError) + } + } + if _, err := db.Exec(fmt.Sprintf(`REPLACE INTO %s.meta VALUES ('version', '%d')`, database, SchemaVersion())); err != nil { + return nil, fmt.Errorf("error setting schema version: %v", err) + } + + kv := &keyValue{ + db: db, + KeyValue: &sqlkv.KeyValue{ + DB: db, + TablePrefix: database + ".", + }, + } + if err := kv.ping(); err != nil { + return nil, fmt.Errorf("MySQL db unreachable: %v", err) + } + version, err := kv.SchemaVersion() + if err != nil { + return nil, fmt.Errorf("error getting schema version (need to init database?): %v", err) + } + if version != requiredSchemaVersion { + if version == 20 && requiredSchemaVersion == 21 { + fmt.Fprintf(os.Stderr, fixSchema20to21) + } + if env.IsDev() { + // Good signal that we're using the devcam server, so help out + // the user with a more useful tip: + return nil, fmt.Errorf("database schema version is %d; expect %d (run \"devcam server --wipe\" to wipe both your blobs and re-populate the database schema)", version, requiredSchemaVersion) + } + return nil, fmt.Errorf("database schema version is %d; expect %d (need to re-init/upgrade database?)", + version, requiredSchemaVersion) + } + + return kv, nil +} + +// CreateDB creates the named database if it does not already exist. +func CreateDB(db *sql.DB, dbname string) error { + if dbname == "" { + return errors.New("can not create database: database name is missing") + } + if _, err := db.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", dbname)); err != nil { + return fmt.Errorf("error creating database %v: %v", dbname, err) + } + return nil +} + +// We keep a cache of open database handles. +var ( + dbsmu sync.Mutex + dbs = map[string]*sql.DB{} // DSN -> db +) + +func openOrCachedDB(dsn string) (*sql.DB, error) { + dbsmu.Lock() + defer dbsmu.Unlock() + if db, ok := dbs[dsn]; ok { + return db, nil + } + db, err := sql.Open("mysql", dsn) + if err != nil { + return nil, err + } + dbs[dsn] = db + return db, nil +} + +type keyValue struct { + *sqlkv.KeyValue + + db *sql.DB +} + +func (kv *keyValue) ping() error { + // TODO(bradfitz): something more efficient here? + _, err := kv.SchemaVersion() + return err +} + +func (kv *keyValue) SchemaVersion() (version int, err error) { + err = kv.db.QueryRow("SELECT value FROM " + kv.KeyValue.TablePrefix + "meta WHERE metakey='version'").Scan(&version) + return +} + +const fixSchema20to21 = `Character set in tables changed to binary, you can fix your tables with: +ALTER TABLE rows CONVERT TO CHARACTER SET binary; +ALTER TABLE meta CONVERT TO CHARACTER SET binary; +UPDATE meta SET value=21 WHERE metakey='version' AND value=20; +` + +// serverVersion returns the MySQL server version as []int{major, minor, revision}. +func serverVersion(db *sql.DB) ([]int, error) { + versionRx := regexp.MustCompile(`([0-9]+)\.([0-9]+)\.([0-9]+)-.*`) + var version string + if err := db.QueryRow("SELECT VERSION()").Scan(&version); err != nil { + return nil, fmt.Errorf("error getting MySQL server version: %v", err) + } + m := versionRx.FindStringSubmatch(version) + if len(m) < 4 { + return nil, fmt.Errorf("bogus MySQL server version: %v", version) + } + major, _ := strconv.Atoi(m[1]) + minor, _ := strconv.Atoi(m[2]) + rev, _ := strconv.Atoi(m[3]) + return []int{major, minor, rev}, nil +} + +// hasLargeVarchar returns whether the given version (as []int{major, minor, revision}) +// supports VARCHAR larger than 255. +func hasLargeVarchar(version []int) bool { + if len(version) < 3 { + panic(fmt.Sprintf("bogus mysql server version %v: ", version)) + } + if version[0] < 5 { + return false + } + if version[1] > 0 { + return true + } + return version[0] == 5 && version[1] == 0 && version[2] >= 3 +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/mysql/mysqlkv_test.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/mysql/mysqlkv_test.go new file mode 100644 index 00000000..8d79ec12 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/mysql/mysqlkv_test.go @@ -0,0 +1,49 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysql + +import ( + "testing" + + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/sorted/kvtest" + "camlistore.org/pkg/test/dockertest" +) + +// TestMySQLKV tests against a real MySQL instance, using a Docker container. +func TestMySQLKV(t *testing.T) { + dbname := "camlitest_" + osutil.Username() + containerID, ip := dockertest.SetupMySQLContainer(t, dbname) + defer containerID.KillRemove(t) + + // TODO(mpl): add test for serverVersion once we host the docker image ourselves + // (and hence have the control over the version). + + kv, err := sorted.NewKeyValue(jsonconfig.Obj{ + "type": "mysql", + "host": ip + ":3306", + "database": dbname, + "user": dockertest.MySQLUsername, + "password": dockertest.MySQLPassword, + }) + if err != nil { + t.Fatalf("mysql.NewKeyValue = %v", err) + } + kvtest.TestSorted(t, kv) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/postgres/dbschema.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/postgres/dbschema.go new file mode 100644 index 00000000..4d9385fb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/postgres/dbschema.go @@ -0,0 +1,108 @@ +/* +Copyright 2012 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package postgres + +import ( + "strconv" + + "camlistore.org/pkg/sorted" +) + +const requiredSchemaVersion = 2 + +func SchemaVersion() int { + return requiredSchemaVersion +} + +func SQLCreateTables() []string { + return []string{ + `CREATE TABLE IF NOT EXISTS rows ( + k VARCHAR(` + strconv.Itoa(sorted.MaxKeySize) + `) NOT NULL PRIMARY KEY, + v VARCHAR(` + strconv.Itoa(sorted.MaxValueSize) + `))`, + + `CREATE TABLE IF NOT EXISTS meta ( + metakey VARCHAR(255) NOT NULL PRIMARY KEY, + value VARCHAR(255) NOT NULL)`, + } +} + +func SQLDefineReplace() []string { + return []string{ + // The first 3 statements here are a work around that allows us to issue + // the "CREATE LANGUAGE plpsql;" statement only if the language doesn't + // already exist. + `CREATE OR REPLACE FUNCTION create_language_plpgsql() RETURNS INTEGER AS +$$ +CREATE LANGUAGE plpgsql; +SELECT 1; +$$ +LANGUAGE SQL;`, + + `SELECT CASE WHEN NOT +( + SELECT TRUE AS exists + FROM pg_language + WHERE lanname = 'plpgsql' + UNION + SELECT FALSE AS exists + ORDER BY exists DESC + LIMIT 1 +) +THEN + create_language_plpgsql() +ELSE + 0 +END AS plpgsql_created;`, + + `DROP FUNCTION create_language_plpgsql();`, + + `CREATE OR REPLACE FUNCTION replaceinto(key TEXT, value TEXT) RETURNS VOID AS +$$ +BEGIN + LOOP + UPDATE rows SET v = value WHERE k = key; + IF found THEN + RETURN; + END IF; + BEGIN + INSERT INTO rows(k,v) VALUES (key, value); + RETURN; + EXCEPTION WHEN unique_violation THEN + END; + END LOOP; +END; +$$ +LANGUAGE plpgsql;`, + `CREATE OR REPLACE FUNCTION replaceintometa(key TEXT, val TEXT) RETURNS VOID AS +$$ +BEGIN + LOOP + UPDATE meta SET value = val WHERE metakey = key; + IF found THEN + RETURN; + END IF; + BEGIN + INSERT INTO meta(metakey,value) VALUES (key, val); + RETURN; + EXCEPTION WHEN unique_violation THEN + END; + END LOOP; +END; +$$ +LANGUAGE plpgsql;`, + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/postgres/postgreskv.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/postgres/postgreskv.go new file mode 100644 index 00000000..227a5bd2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/postgres/postgreskv.go @@ -0,0 +1,143 @@ +/* +Copyright 2012 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package postgres provides an implementation of sorted.KeyValue +// on top of PostgreSQL. +package postgres + +import ( + "database/sql" + "fmt" + "regexp" + + "camlistore.org/pkg/env" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/sorted/sqlkv" + + _ "camlistore.org/third_party/github.com/lib/pq" +) + +func init() { + sorted.RegisterKeyValue("postgres", newKeyValueFromJSONConfig) +} + +func newKeyValueFromJSONConfig(cfg jsonconfig.Obj) (sorted.KeyValue, error) { + conninfo := fmt.Sprintf("user=%s dbname=%s host=%s password=%s sslmode=%s", + cfg.RequiredString("user"), + cfg.RequiredString("database"), + cfg.OptionalString("host", "localhost"), + cfg.OptionalString("password", ""), + cfg.OptionalString("sslmode", "require"), + ) + if err := cfg.Validate(); err != nil { + return nil, err + } + db, err := sql.Open("postgres", conninfo) + if err != nil { + return nil, err + } + for _, tableSql := range SQLCreateTables() { + if _, err := db.Exec(tableSql); err != nil { + return nil, fmt.Errorf("error creating table with %q: %v", tableSql, err) + } + } + for _, statement := range SQLDefineReplace() { + if _, err := db.Exec(statement); err != nil { + return nil, fmt.Errorf("error setting up replace statement with %q: %v", statement, err) + } + } + r, err := db.Query(fmt.Sprintf(`SELECT replaceintometa('version', '%d')`, SchemaVersion())) + if err != nil { + return nil, fmt.Errorf("error setting schema version: %v", err) + } + r.Close() + + kv := &keyValue{ + db: db, + KeyValue: &sqlkv.KeyValue{ + DB: db, + SetFunc: altSet, + BatchSetFunc: altBatchSet, + PlaceHolderFunc: replacePlaceHolders, + }, + } + if err := kv.ping(); err != nil { + return nil, fmt.Errorf("PostgreSQL db unreachable: %v", err) + } + version, err := kv.SchemaVersion() + if err != nil { + return nil, fmt.Errorf("error getting schema version (need to init database?): %v", err) + } + if version != requiredSchemaVersion { + if env.IsDev() { + // Good signal that we're using the devcam server, so help out + // the user with a more useful tip: + return nil, fmt.Errorf("database schema version is %d; expect %d (run \"devcam server --wipe\" to wipe both your blobs and re-populate the database schema)", version, requiredSchemaVersion) + } + return nil, fmt.Errorf("database schema version is %d; expect %d (need to re-init/upgrade database?)", + version, requiredSchemaVersion) + } + + return kv, nil +} + +type keyValue struct { + *sqlkv.KeyValue + db *sql.DB +} + +// postgres does not have REPLACE INTO (upsert), so we use that custom +// one for Set operations instead +func altSet(db *sql.DB, key, value string) error { + r, err := db.Query("SELECT replaceinto($1, $2)", key, value) + if err != nil { + return err + } + return r.Close() +} + +// postgres does not have REPLACE INTO (upsert), so we use that custom +// one for Set operations in batch instead +func altBatchSet(tx *sql.Tx, key, value string) error { + r, err := tx.Query("SELECT replaceinto($1, $2)", key, value) + if err != nil { + return err + } + return r.Close() +} + +var qmark = regexp.MustCompile(`\?`) + +// replace all ? placeholders into the corresponding $n in queries +var replacePlaceHolders = func(query string) string { + i := 0 + dollarInc := func(b []byte) []byte { + i++ + return []byte(fmt.Sprintf("$%d", i)) + } + return string(qmark.ReplaceAllFunc([]byte(query), dollarInc)) +} + +func (kv *keyValue) ping() error { + _, err := kv.SchemaVersion() + return err +} + +func (kv *keyValue) SchemaVersion() (version int, err error) { + err = kv.db.QueryRow("SELECT value FROM meta WHERE metakey='version'").Scan(&version) + return +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/postgres/postgreskv_test.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/postgres/postgreskv_test.go new file mode 100644 index 00000000..2d825e4e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/postgres/postgreskv_test.go @@ -0,0 +1,47 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package postgres + +import ( + "testing" + + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/sorted/kvtest" + "camlistore.org/pkg/test/dockertest" +) + +// TestPostgreSQLKV tests against a real PostgreSQL instance, using a Docker container. +func TestPostgreSQLKV(t *testing.T) { + dbname := "camlitest_" + osutil.Username() + containerID, ip := dockertest.SetupPostgreSQLContainer(t, dbname) + defer containerID.KillRemove(t) + + kv, err := sorted.NewKeyValue(jsonconfig.Obj{ + "type": "postgres", + "host": ip, + "database": dbname, + "user": dockertest.PostgresUsername, + "password": dockertest.PostgresPassword, + "sslmode": "disable", + }) + if err != nil { + t.Fatalf("postgres.NewKeyValue = %v", err) + } + kvtest.TestSorted(t, kv) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlite/dbschema.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlite/dbschema.go new file mode 100644 index 00000000..fa467d91 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlite/dbschema.go @@ -0,0 +1,110 @@ +/* +Copyright 2012 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlite + +import ( + "bytes" + "database/sql" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + + "camlistore.org/pkg/sorted" +) + +const requiredSchemaVersion = 1 + +func SchemaVersion() int { + return requiredSchemaVersion +} + +func SQLCreateTables() []string { + // sqlite ignores n in VARCHAR(n), but setting it as such for consistency with + // other sqls. + return []string{ + `CREATE TABLE rows ( + k VARCHAR(` + strconv.Itoa(sorted.MaxKeySize) + `) NOT NULL PRIMARY KEY, + v VARCHAR(` + strconv.Itoa(sorted.MaxValueSize) + `))`, + + `CREATE TABLE meta ( + metakey VARCHAR(255) NOT NULL PRIMARY KEY, + value VARCHAR(255) NOT NULL)`, + } +} + +// IsWALCapable checks if the installed sqlite3 library can +// use Write-Ahead Logging (i.e version >= 3.7.0) +func IsWALCapable() bool { + // TODO(mpl): alternative to make it work on windows + cmdPath, err := exec.LookPath("pkg-config") + if err != nil { + log.Printf("Could not find pkg-config to check sqlite3 lib version: %v", err) + return false + } + var stderr bytes.Buffer + cmd := exec.Command(cmdPath, "--modversion", "sqlite3") + cmd.Stderr = &stderr + if runtime.GOOS == "darwin" && os.Getenv("PKG_CONFIG_PATH") == "" { + matches, err := filepath.Glob("/usr/local/Cellar/sqlite/*/lib/pkgconfig/sqlite3.pc") + if err == nil && len(matches) > 0 { + cmd.Env = append(os.Environ(), "PKG_CONFIG_PATH="+filepath.Dir(matches[0])) + } + } + + out, err := cmd.Output() + if err != nil { + log.Printf("Could not check sqlite3 version: %v\n", stderr.String()) + return false + } + version := strings.TrimRight(string(out), "\n") + return version >= "3.7.0" +} + +// EnableWAL returns the statement to enable Write-Ahead Logging, +// which improves SQLite concurrency. +// Requires SQLite >= 3.7.0 +func EnableWAL() string { + return "PRAGMA journal_mode = WAL" +} + +// initDB creates a new sqlite database based on the file at path. +func initDB(path string) error { + db, err := sql.Open("sqlite3", path) + if err != nil { + return err + } + defer db.Close() + for _, tableSql := range SQLCreateTables() { + if _, err := db.Exec(tableSql); err != nil { + return err + } + } + if IsWALCapable() { + if _, err := db.Exec(EnableWAL()); err != nil { + return err + } + } else { + log.Print("WARNING: An SQLite DB without Write Ahead Logging will most likely fail. See http://camlistore.org/issues/114") + } + _, err = db.Exec(fmt.Sprintf(`REPLACE INTO meta VALUES ('version', '%d')`, SchemaVersion())) + return err +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlite/sqlite_cond.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlite/sqlite_cond.go new file mode 100644 index 00000000..b53ff42e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlite/sqlite_cond.go @@ -0,0 +1,11 @@ +// +build with_sqlite + +package sqlite + +import ( + _ "camlistore.org/third_party/github.com/mattn/go-sqlite3" +) + +func init() { + compiled = true +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlite/sqlitekv.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlite/sqlitekv.go new file mode 100644 index 00000000..04f92802 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlite/sqlitekv.go @@ -0,0 +1,122 @@ +/* +Copyright 2012 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package sqlite provides an implementation of sorted.KeyValue +// using an SQLite database file. +package sqlite + +import ( + "database/sql" + "errors" + "fmt" + "os" + + "camlistore.org/pkg/env" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/sorted/sqlkv" +) + +func init() { + sorted.RegisterKeyValue("sqlite", newKeyValueFromConfig) +} + +func newKeyValueFromConfig(cfg jsonconfig.Obj) (sorted.KeyValue, error) { + if !compiled { + return nil, ErrNotCompiled + } + + file := cfg.RequiredString("file") + if err := cfg.Validate(); err != nil { + return nil, err + } + + fi, err := os.Stat(file) + if os.IsNotExist(err) || (err == nil && fi.Size() == 0) { + if err := initDB(file); err != nil { + return nil, fmt.Errorf("could not initialize sqlite DB at %s: %v", file, err) + } + } + db, err := sql.Open("sqlite3", file) + if err != nil { + return nil, err + } + kv := &keyValue{ + file: file, + db: db, + KeyValue: &sqlkv.KeyValue{ + DB: db, + Serial: true, + }, + } + + version, err := kv.SchemaVersion() + if err != nil { + return nil, fmt.Errorf("error getting schema version (need to init database with 'camtool dbinit %s'?): %v", file, err) + } + + if err := kv.ping(); err != nil { + return nil, err + } + + if version != requiredSchemaVersion { + if env.IsDev() { + // Good signal that we're using the devcam server, so help out + // the user with a more useful tip: + return nil, fmt.Errorf("database schema version is %d; expect %d (run \"devcam server --wipe\" to wipe both your blobs and re-populate the database schema)", version, requiredSchemaVersion) + } + return nil, fmt.Errorf("database schema version is %d; expect %d (need to re-init/upgrade database?)", + version, requiredSchemaVersion) + } + + return kv, nil + +} + +type keyValue struct { + *sqlkv.KeyValue + + file string + db *sql.DB +} + +var compiled = false + +// CompiledIn returns whether SQLite support is compiled in. +// If it returns false, the build tag "with_sqlite" was not specified. +func CompiledIn() bool { + return compiled +} + +var ErrNotCompiled = errors.New("camlistored was not built with SQLite support. If you built with make.go, use go run make.go --sqlite=true. If you used go get or get install, use go {get,install} --tags=with_sqlite" + compileHint()) + +func compileHint() string { + if _, err := os.Stat("/etc/apt"); err == nil { + return " (Hint: apt-get install libsqlite3-dev)" + } + return "" +} + +func (kv *keyValue) ping() error { + // TODO(bradfitz): something more efficient here? + _, err := kv.SchemaVersion() + return err +} + +func (kv *keyValue) SchemaVersion() (version int, err error) { + err = kv.db.QueryRow("SELECT value FROM meta WHERE metakey='version'").Scan(&version) + return +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlite/sqlitekv_test.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlite/sqlitekv_test.go new file mode 100644 index 00000000..570f082e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlite/sqlitekv_test.go @@ -0,0 +1,48 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlite + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/sorted" + "camlistore.org/pkg/sorted/kvtest" +) + +func TestSQLiteKV(t *testing.T) { + if !CompiledIn() { + t.Skip(ErrNotCompiled.Error()) + } + tmpDir, err := ioutil.TempDir("", "camlistore-sqlitekv_test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + dbname := filepath.Join(tmpDir, "testdb.sqlite") + kv, err := sorted.NewKeyValue(jsonconfig.Obj{ + "type": "sqlite", + "file": dbname, + }) + if err != nil { + t.Fatalf("Could not create sqlite sorted kv at %v: %v", dbname, err) + } + kvtest.TestSorted(t, kv) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlkv/sqlkv.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlkv/sqlkv.go new file mode 100644 index 00000000..93b51abb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlkv/sqlkv.go @@ -0,0 +1,296 @@ +/* +Copyright 2012 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package sqlkv implements the sorted.KeyValue interface using an *sql.DB. +package sqlkv + +import ( + "database/sql" + "errors" + "fmt" + "log" + "regexp" + "strings" + "sync" + + "camlistore.org/pkg/leak" + "camlistore.org/pkg/sorted" +) + +// KeyValue implements the sorted.KeyValue interface using an *sql.DB. +type KeyValue struct { + DB *sql.DB + + // SetFunc is an optional func to use when REPLACE INTO does not exist + SetFunc func(*sql.DB, string, string) error + BatchSetFunc func(*sql.Tx, string, string) error + + // PlaceHolderFunc optionally replaces ? placeholders + // with the right ones for the rdbms in use. + PlaceHolderFunc func(string) string + + // Serial determines whether a Go-level mutex protects DB from + // concurrent access. This isn't perfect and exists just for + // SQLite, whose driver likes to return "the database is + // locked" (camlistore.org/issue/114), so this keeps some + // pressure off. But we still trust SQLite to deal with + // concurrency in most cases. + Serial bool + + // TablePrefix optionally provides a prefix for SQL table + // names. This is typically "dbname.", ending in a period. + TablePrefix string + + mu sync.Mutex // the mutex used, if Serial is set + + queriesInitOnce sync.Once // guards initialization of both queries and replacer + replacer *strings.Replacer + + queriesMu sync.RWMutex + queries map[string]string +} + +// sql returns the query, replacing placeholders using PlaceHolderFunc, +// and /*TPRE*/ with TablePrefix. +func (kv *KeyValue) sql(sqlStmt string) string { + // string manipulation is done only once + kv.queriesInitOnce.Do(func() { + kv.queries = make(map[string]string, 8) // we have 8 queries in this file + kv.replacer = strings.NewReplacer("/*TPRE*/", kv.TablePrefix) + }) + kv.queriesMu.RLock() + sqlQuery, ok := kv.queries[sqlStmt] + kv.queriesMu.RUnlock() + if ok { + return sqlQuery + } + kv.queriesMu.Lock() + // check again, now holding the lock + if sqlQuery, ok = kv.queries[sqlStmt]; ok { + kv.queriesMu.Unlock() + return sqlQuery + } + sqlQuery = sqlStmt + if f := kv.PlaceHolderFunc; f != nil { + sqlQuery = f(sqlQuery) + } + sqlQuery = kv.replacer.Replace(sqlQuery) + kv.queries[sqlStmt] = sqlQuery + kv.queriesMu.Unlock() + return sqlQuery +} + +type batchTx struct { + tx *sql.Tx + err error // sticky + kv *KeyValue +} + +func (b *batchTx) Set(key, value string) { + if b.err != nil { + return + } + if err := sorted.CheckSizes(key, value); err != nil { + if err == sorted.ErrKeyTooLarge { + b.err = fmt.Errorf("%v: %v", err, key) + } else { + b.err = fmt.Errorf("%v: %v", err, value) + } + return + } + if b.kv.BatchSetFunc != nil { + b.err = b.kv.BatchSetFunc(b.tx, key, value) + return + } + _, b.err = b.tx.Exec(b.kv.sql("REPLACE INTO /*TPRE*/rows (k, v) VALUES (?, ?)"), key, value) +} + +func (b *batchTx) Delete(key string) { + if b.err != nil { + return + } + _, b.err = b.tx.Exec(b.kv.sql("DELETE FROM /*TPRE*/rows WHERE k=?"), key) +} + +func (kv *KeyValue) BeginBatch() sorted.BatchMutation { + if kv.Serial { + kv.mu.Lock() + } + tx, err := kv.DB.Begin() + if err != nil { + log.Printf("SQL BEGIN BATCH: %v", err) + } + return &batchTx{ + tx: tx, + err: err, + kv: kv, + } +} + +func (kv *KeyValue) CommitBatch(b sorted.BatchMutation) error { + if kv.Serial { + defer kv.mu.Unlock() + } + bt, ok := b.(*batchTx) + if !ok { + return fmt.Errorf("wrong BatchMutation type %T", b) + } + if bt.err != nil { + return bt.err + } + return bt.tx.Commit() +} + +func (kv *KeyValue) Get(key string) (value string, err error) { + if kv.Serial { + kv.mu.Lock() + defer kv.mu.Unlock() + } + err = kv.DB.QueryRow(kv.sql("SELECT v FROM /*TPRE*/rows WHERE k=?"), key).Scan(&value) + if err == sql.ErrNoRows { + err = sorted.ErrNotFound + } + return +} + +func (kv *KeyValue) Set(key, value string) error { + if err := sorted.CheckSizes(key, value); err != nil { + return err + } + if kv.Serial { + kv.mu.Lock() + defer kv.mu.Unlock() + } + if kv.SetFunc != nil { + return kv.SetFunc(kv.DB, key, value) + } + _, err := kv.DB.Exec(kv.sql("REPLACE INTO /*TPRE*/rows (k, v) VALUES (?, ?)"), key, value) + return err +} + +func (kv *KeyValue) Delete(key string) error { + if kv.Serial { + kv.mu.Lock() + defer kv.mu.Unlock() + } + _, err := kv.DB.Exec(kv.sql("DELETE FROM /*TPRE*/rows WHERE k=?"), key) + return err +} + +func (kv *KeyValue) Wipe() error { + if kv.Serial { + kv.mu.Lock() + defer kv.mu.Unlock() + } + _, err := kv.DB.Exec(kv.sql("DELETE FROM /*TPRE*/rows")) + return err +} + +func (kv *KeyValue) Close() error { return kv.DB.Close() } + +func (kv *KeyValue) Find(start, end string) sorted.Iterator { + if kv.Serial { + kv.mu.Lock() + // TODO(mpl): looks like sqlite considers the db locked until we've closed + // the iterator, so we can't do anything else until then. We should probably + // move that Unlock to the closing of the iterator. Investigating. + defer kv.mu.Unlock() + } + var rows *sql.Rows + var err error + if end == "" { + rows, err = kv.DB.Query(kv.sql("SELECT k, v FROM /*TPRE*/rows WHERE k >= ? ORDER BY k "), start) + } else { + rows, err = kv.DB.Query(kv.sql("SELECT k, v FROM /*TPRE*/rows WHERE k >= ? AND k < ? ORDER BY k "), start, end) + } + if err != nil { + log.Printf("unexpected query error: %v", err) + return &iter{err: err} + } + + it := &iter{ + kv: kv, + rows: rows, + closeCheck: leak.NewChecker(), + } + return it +} + +var wordThenPunct = regexp.MustCompile(`^\w+\W$`) + +// iter is a iterator over sorted key/value pairs in rows. +type iter struct { + kv *KeyValue + end string // optional end bound + err error // accumulated error, returned at Close + + closeCheck *leak.Checker + + rows *sql.Rows // if non-nil, the rows we're reading from + + key sql.RawBytes + val sql.RawBytes + skey, sval *string // if non-nil, it's been stringified +} + +var errClosed = errors.New("sqlkv: Iterator already closed") + +func (t *iter) KeyBytes() []byte { return t.key } +func (t *iter) Key() string { + if t.skey != nil { + return *t.skey + } + str := string(t.key) + t.skey = &str + return str +} + +func (t *iter) ValueBytes() []byte { return t.val } +func (t *iter) Value() string { + if t.sval != nil { + return *t.sval + } + str := string(t.val) + t.sval = &str + return str +} + +func (t *iter) Close() error { + t.closeCheck.Close() + if t.rows != nil { + t.rows.Close() + t.rows = nil + } + err := t.err + t.err = errClosed + return err +} + +func (t *iter) Next() bool { + if t.err != nil { + return false + } + t.skey, t.sval = nil, nil + if !t.rows.Next() { + return false + } + t.err = t.rows.Scan(&t.key, &t.val) + if t.err != nil { + log.Printf("unexpected Scan error: %v", t.err) + return false + } + return true +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlkv/sqlkv_test.go b/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlkv/sqlkv_test.go new file mode 100644 index 00000000..6cb30290 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/sorted/sqlkv/sqlkv_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2015 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlkv + +import ( + "strings" + "testing" +) + +var queries = []string{ + "REPLACE INTO /*TPRE*/rows (k, v) VALUES (?, ?)", + "DELETE FROM /*TPRE*/rows WHERE k=?", + "SELECT v FROM /*TPRE*/rows WHERE k=?", + "REPLACE INTO /*TPRE*/rows (k, v) VALUES (?, ?)", + "DELETE FROM /*TPRE*/rows WHERE k=?", + "DELETE FROM /*TPRE*/rows", + "SELECT k, v FROM /*TPRE*/rows WHERE k >= ? ORDER BY k ", + "SELECT k, v FROM /*TPRE*/rows WHERE k >= ? AND k < ? ORDER BY k ", +} + +var ( + qmarkRepl = strings.NewReplacer("?", ":placeholder") + + kv = &KeyValue{ + TablePrefix: "T_", + PlaceHolderFunc: func(q string) string { return qmarkRepl.Replace(q) }, + } +) + +func TestSql(t *testing.T) { + repl := strings.NewReplacer("/*TPRE*/", "T_", "?", ":placeholder") + for i, q := range queries { + want := repl.Replace(q) + got := kv.sql(q) + if want != got { + t.Errorf("%d. got %q, wanted %q.", i, got, want) + } + } +} + +func BenchmarkSql(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, s := range queries { + kv.sql(s) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/strutil/intern.go b/vendor/github.com/camlistore/camlistore/pkg/strutil/intern.go new file mode 100644 index 00000000..633ebb36 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/strutil/intern.go @@ -0,0 +1,39 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strutil + +var internStr = map[string]string{} + +// RegisterCommonString adds common strings to the interned string +// table. This should be called during init from the main +// goroutine, not later at runtime. +func RegisterCommonString(s ...string) { + for _, v := range s { + internStr[v] = v + } +} + +// StringFromBytes returns string(v), minimizing copies for common values of v +// as previously registered with RegisterCommonString. +func StringFromBytes(v []byte) string { + // In Go 1.3, this string conversion in the map lookup does not allocate + // to make a new string. We depend on Go 1.3, so this is always free: + if s, ok := internStr[string(v)]; ok { + return s + } + return string(v) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/strutil/strconv.go b/vendor/github.com/camlistore/camlistore/pkg/strutil/strconv.go new file mode 100644 index 00000000..9d4ccfff --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/strutil/strconv.go @@ -0,0 +1,117 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strutil + +import ( + "errors" + "strconv" +) + +// ParseUintBytes is like strconv.ParseUint, but using a []byte. +func ParseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { + var cutoff, maxVal uint64 + + if bitSize == 0 { + bitSize = int(strconv.IntSize) + } + + s0 := s + switch { + case len(s) < 1: + err = strconv.ErrSyntax + goto Error + + case 2 <= base && base <= 36: + // valid base; nothing to do + + case base == 0: + // Look for octal, hex prefix. + switch { + case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): + base = 16 + s = s[2:] + if len(s) < 1 { + err = strconv.ErrSyntax + goto Error + } + case s[0] == '0': + base = 8 + default: + base = 10 + } + + default: + err = errors.New("invalid base " + strconv.Itoa(base)) + goto Error + } + + n = 0 + cutoff = cutoff64(base) + maxVal = 1<= base { + n = 0 + err = strconv.ErrSyntax + goto Error + } + + if n >= cutoff { + // n*base overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n *= uint64(base) + + n1 := n + uint64(v) + if n1 < n || n1 > maxVal { + // n+v overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n = n1 + } + + return n, nil + +Error: + return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} +} + +// Return the first number n such that n*base >= 1<<64. +func cutoff64(base int) uint64 { + if base < 2 { + return 0 + } + return (1<<64-1)/uint64(base) + 1 +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/strutil/strutil.go b/vendor/github.com/camlistore/camlistore/pkg/strutil/strutil.go new file mode 100644 index 00000000..41ce9797 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/strutil/strutil.go @@ -0,0 +1,200 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package strutil contains string and byte processing functions. +package strutil + +import ( + "strings" + "unicode" + "unicode/utf8" +) + +// Fork of Go's implementation in pkg/strings/strings.go: +// Generic split: splits after each instance of sep, +// including sepSave bytes of sep in the subarrays. +func genSplit(dst []string, s, sep string, sepSave, n int) []string { + if n == 0 { + return nil + } + if sep == "" { + panic("sep is empty") + } + if n < 0 { + n = strings.Count(s, sep) + 1 + } + c := sep[0] + start := 0 + na := 0 + for i := 0; i+len(sep) <= len(s) && na+1 < n; i++ { + if s[i] == c && (len(sep) == 1 || s[i:i+len(sep)] == sep) { + dst = append(dst, s[start:i+sepSave]) + na++ + start = i + len(sep) + i += len(sep) - 1 + } + } + dst = append(dst, s[start:]) + return dst +} + +// AppendSplitN is like strings.SplitN but appends to and returns dst. +// Unlike strings.SplitN, an empty separator is not supported. +// The count n determines the number of substrings to return: +// n > 0: at most n substrings; the last substring will be the unsplit remainder. +// n == 0: the result is nil (zero substrings) +// n < 0: all substrings +func AppendSplitN(dst []string, s, sep string, n int) []string { + return genSplit(dst, s, sep, 0, n) +} + +// equalFoldRune compares a and b runes whether they fold equally. +// +// The code comes from strings.EqualFold, but shortened to only one rune. +func equalFoldRune(sr, tr rune) bool { + if sr == tr { + return true + } + // Make sr < tr to simplify what follows. + if tr < sr { + sr, tr = tr, sr + } + // Fast check for ASCII. + if tr < utf8.RuneSelf && 'A' <= sr && sr <= 'Z' { + // ASCII, and sr is upper case. tr must be lower case. + if tr == sr+'a'-'A' { + return true + } + return false + } + + // General case. SimpleFold(x) returns the next equivalent rune > x + // or wraps around to smaller values. + r := unicode.SimpleFold(sr) + for r != sr && r < tr { + r = unicode.SimpleFold(r) + } + if r == tr { + return true + } + return false +} + +// HasPrefixFold is like strings.HasPrefix but uses Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + if prefix == "" { + return true + } + for _, pr := range prefix { + if s == "" { + return false + } + // step with s, too + sr, size := utf8.DecodeRuneInString(s) + if sr == utf8.RuneError { + return false + } + s = s[size:] + if !equalFoldRune(sr, pr) { + return false + } + } + return true +} + +// HasSuffixFold is like strings.HasPrefix but uses Unicode case-folding. +func HasSuffixFold(s, suffix string) bool { + if suffix == "" { + return true + } + // count the runes and bytes in s, but only till rune count of suffix + bo, so := len(s), len(suffix) + for bo > 0 && so > 0 { + r, size := utf8.DecodeLastRuneInString(s[:bo]) + if r == utf8.RuneError { + return false + } + bo -= size + + sr, size := utf8.DecodeLastRuneInString(suffix[:so]) + if sr == utf8.RuneError { + return false + } + so -= size + + if !equalFoldRune(r, sr) { + return false + } + } + return so == 0 +} + +// ContainsFold is like strings.Contains but uses Unicode case-folding. +func ContainsFold(s, substr string) bool { + if substr == "" { + return true + } + if s == "" { + return false + } + firstRune := rune(substr[0]) + if firstRune >= utf8.RuneSelf { + firstRune, _ = utf8.DecodeRuneInString(substr) + } + for i, rune := range s { + if equalFoldRune(rune, firstRune) && HasPrefixFold(s[i:], substr) { + return true + } + } + return false +} + +// IsPlausibleJSON reports whether s likely contains a JSON object, without +// actually parsing it. It's meant to be a light heuristic. +func IsPlausibleJSON(s string) bool { + return startsWithOpenBrace(s) && endsWithCloseBrace(s) +} + +func isASCIIWhite(b byte) bool { return b == ' ' || b == '\n' || b == '\r' || b == '\t' } + +func startsWithOpenBrace(s string) bool { + for len(s) > 0 { + switch { + case s[0] == '{': + return true + case isASCIIWhite(s[0]): + s = s[1:] + default: + return false + } + } + return false +} + +func endsWithCloseBrace(s string) bool { + for len(s) > 0 { + last := len(s) - 1 + switch { + case s[last] == '}': + return true + case isASCIIWhite(s[last]): + s = s[:last] + default: + return false + } + } + return false +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/strutil/strutil_test.go b/vendor/github.com/camlistore/camlistore/pkg/strutil/strutil_test.go new file mode 100644 index 00000000..fa93ee95 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/strutil/strutil_test.go @@ -0,0 +1,230 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strutil + +import ( + "reflect" + "strings" + "testing" +) + +func TestAppendSplitN(t *testing.T) { + var got []string + tests := []struct { + s, sep string + n int + }{ + {"foo", "|", 1}, + {"foo", "|", -1}, + {"foo|bar", "|", 1}, + {"foo|bar", "|", -1}, + {"foo|bar|", "|", 2}, + {"foo|bar|", "|", -1}, + {"foo|bar|baz", "|", 1}, + {"foo|bar|baz", "|", 2}, + {"foo|bar|baz", "|", 3}, + {"foo|bar|baz", "|", -1}, + } + for _, tt := range tests { + want := strings.SplitN(tt.s, tt.sep, tt.n) + got = AppendSplitN(got[:0], tt.s, tt.sep, tt.n) + if !reflect.DeepEqual(want, got) { + t.Errorf("AppendSplitN(%q, %q, %d) = %q; want %q", + tt.s, tt.sep, tt.n, got, want) + } + } +} + +func TestStringFromBytes(t *testing.T) { + for _, s := range []string{"foo", "permanode", "file", "zzzz"} { + got := StringFromBytes([]byte(s)) + if got != s { + t.Errorf("StringFromBytes(%q) didn't round-trip; got %q instead", s, got) + } + } +} + +func TestHasPrefixFold(t *testing.T) { + tests := []struct { + s, prefix string + result bool + }{ + {"camli", "CAML", true}, + {"CAMLI", "caml", true}, + {"cam", "Cam", true}, + {"camli", "car", false}, + {"caml", "camli", false}, + {"Hello, 世界 dasdsa", "HeLlO, 世界", true}, + {"Hello, 世界", "HeLlO, 世界-", false}, + + {"kelvin", "\u212A" + "elvin", true}, // "\u212A" is the Kelvin temperature sign + {"Kelvin", "\u212A" + "elvin", true}, + {"kelvin", "\u212A" + "el", true}, + {"Kelvin", "\u212A" + "el", true}, + {"\u212A" + "elvin", "Kelvin", true}, + {"\u212A" + "elvin", "kelvin", true}, + {"\u212A" + "elvin", "Kel", true}, + {"\u212A" + "elvin", "kel", true}, + } + for _, tt := range tests { + r := HasPrefixFold(tt.s, tt.prefix) + if r != tt.result { + t.Errorf("HasPrefixFold(%q, %q) returned %v", tt.s, tt.prefix, r) + } + } +} + +func TestHasSuffixFold(t *testing.T) { + tests := []struct { + s, suffix string + result bool + }{ + {"camli", "AMLI", true}, + {"CAMLI", "amli", true}, + {"mli", "MLI", true}, + {"camli", "ali", false}, + {"amli", "camli", false}, + {"asas Hello, 世界", "HeLlO, 世界", true}, + {"Hello, 世界", "HeLlO, 世界-", false}, + {"KkkkKKkelvin", "\u212A" + "elvin", true}, // "\u212A" is the Kelvin temperature sign + + {"kelvin", "\u212A" + "elvin", true}, // "\u212A" is the Kelvin temperature sign + {"Kelvin", "\u212A" + "elvin", true}, + {"\u212A" + "elvin", "Kelvin", true}, + {"\u212A" + "elvin", "kelvin", true}, + {"\u212A" + "elvin", "vin", true}, + {"\u212A" + "elvin", "viN", true}, + } + for _, tt := range tests { + r := HasSuffixFold(tt.s, tt.suffix) + if r != tt.result { + t.Errorf("HasSuffixFold(%q, %q) returned %v", tt.s, tt.suffix, r) + } + } +} + +func TestContainsFold(t *testing.T) { + // TODO: more tests, more languages. + tests := []struct { + s, substr string + result bool + }{ + {"camli", "CAML", true}, + {"CAMLI", "caml", true}, + {"cam", "Cam", true}, + {"мир", "ми", true}, + {"МИP", "ми", true}, + {"КАМЛИЙСТОР", "камлийс", true}, + {"КаМлИйСтОр", "КаМлИйС", true}, + {"camli", "car", false}, + {"caml", "camli", false}, + + {"camli", "AMLI", true}, + {"CAMLI", "amli", true}, + {"mli", "MLI", true}, + {"мир", "ир", true}, + {"МИP", "ми", true}, + {"КАМЛИЙСТОР", "лийстор", true}, + {"КаМлИйСтОр", "лИйСтОр", true}, + {"мир", "р", true}, + {"camli", "ali", false}, + {"amli", "camli", false}, + + {"МИP", "и", true}, + {"мир", "и", true}, + {"КАМЛИЙСТОР", "лийс", true}, + {"КаМлИйСтОр", "лИйС", true}, + + {"árvíztűrő tükörfúrógép", "árvíztŰrŐ", true}, + {"I love ☕", "i love ☕", true}, + + {"k", "\u212A", true}, // "\u212A" is the Kelvin temperature sign + {"\u212A" + "elvin", "k", true}, + {"kelvin", "\u212A" + "elvin", true}, + {"Kelvin", "\u212A" + "elvin", true}, + {"\u212A" + "elvin", "Kelvin", true}, + {"\u212A" + "elvin", "kelvin", true}, + {"273.15 kelvin", "\u212A" + "elvin", true}, + {"273.15 Kelvin", "\u212A" + "elvin", true}, + {"273.15 \u212A" + "elvin", "Kelvin", true}, + {"273.15 \u212A" + "elvin", "kelvin", true}, + } + for _, tt := range tests { + r := ContainsFold(tt.s, tt.substr) + if r != tt.result { + t.Errorf("ContainsFold(%q, %q) returned %v", tt.s, tt.substr, r) + } + } +} + +func TestIsPlausibleJSON(t *testing.T) { + tests := []struct { + in string + want bool + }{ + {"{}", true}, + {" {}", true}, + {"{} ", true}, + {"\n\r\t {}\t \r \n", true}, + + {"\n\r\t {x\t \r \n", false}, + {"{x", false}, + {"x}", false}, + {"x", false}, + {"", false}, + } + for _, tt := range tests { + got := IsPlausibleJSON(tt.in) + if got != tt.want { + t.Errorf("IsPlausibleJSON(%q) = %v; want %v", tt.in, got, tt.want) + } + } +} + +func BenchmarkHasSuffixFoldToLower(tb *testing.B) { + a, b := "camlik", "AMLI\u212A" + for i := 0; i < tb.N; i++ { + if !strings.HasSuffix(strings.ToLower(a), strings.ToLower(b)) { + tb.Fatalf("%q should have the same suffix as %q", a, b) + } + } +} +func BenchmarkHasSuffixFold(tb *testing.B) { + a, b := "camlik", "AMLI\u212A" + for i := 0; i < tb.N; i++ { + if !HasSuffixFold(a, b) { + tb.Fatalf("%q should have the same suffix as %q", a, b) + } + } +} + +func BenchmarkHasPrefixFoldToLower(tb *testing.B) { + a, b := "kamlistore", "\u212AAMLI" + for i := 0; i < tb.N; i++ { + if !strings.HasPrefix(strings.ToLower(a), strings.ToLower(b)) { + tb.Fatalf("%q should have the same suffix as %q", a, b) + } + } +} +func BenchmarkHasPrefixFold(tb *testing.B) { + a, b := "kamlistore", "\u212AAMLI" + for i := 0; i < tb.N; i++ { + if !HasPrefixFold(a, b) { + tb.Fatalf("%q should have the same suffix as %q", a, b) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/syncutil/gate.go b/vendor/github.com/camlistore/camlistore/pkg/syncutil/gate.go new file mode 100644 index 00000000..497c7a5a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/syncutil/gate.go @@ -0,0 +1,42 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package syncutil provides various concurrency mechanisms. +package syncutil + +// A Gate limits concurrency. +type Gate struct { + c chan struct{} +} + +// NewGate returns a new gate that will only permit max operations at once. +func NewGate(max int) *Gate { + return &Gate{make(chan struct{}, max)} +} + +// Start starts an operation, blocking until the gate has room. +func (g *Gate) Start() { + g.c <- struct{}{} +} + +// Done finishes an operation. +func (g *Gate) Done() { + select { + case <-g.c: + default: + panic("Done called more than Start") + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/syncutil/group.go b/vendor/github.com/camlistore/camlistore/pkg/syncutil/group.go new file mode 100644 index 00000000..dacef4c4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/syncutil/group.go @@ -0,0 +1,64 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syncutil + +import "sync" + +// A Group is like a sync.WaitGroup and coordinates doing +// multiple things at once. Its zero value is ready to use. +type Group struct { + wg sync.WaitGroup + mu sync.Mutex // guards errs + errs []error +} + +// Go runs fn in its own goroutine, but does not wait for it to complete. +// Call Err or Errs to wait for all the goroutines to complete. +func (g *Group) Go(fn func() error) { + g.wg.Add(1) + go func() { + defer g.wg.Done() + err := fn() + if err != nil { + g.mu.Lock() + defer g.mu.Unlock() + g.errs = append(g.errs, err) + } + }() +} + +// Wait waits for all the previous calls to Go to complete. +func (g *Group) Wait() { + g.wg.Wait() +} + +// Err waits for all previous calls to Go to complete and returns the +// first non-nil error, or nil. +func (g *Group) Err() error { + g.wg.Wait() + if len(g.errs) > 0 { + return g.errs[0] + } + return nil +} + +// Errs waits for all previous calls to Go to complete and returns +// all non-nil errors. +func (g *Group) Errs() []error { + g.wg.Wait() + return g.errs +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/syncutil/lock.go b/vendor/github.com/camlistore/camlistore/pkg/syncutil/lock.go new file mode 100644 index 00000000..52de8e48 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/syncutil/lock.go @@ -0,0 +1,191 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syncutil + +import ( + "bytes" + "fmt" + "log" + "runtime" + "sync" + "sync/atomic" + "time" + + "camlistore.org/pkg/strutil" +) + +// RWMutexTracker is a sync.RWMutex that tracks who owns the current +// exclusive lock. It's used for debugging deadlocks. +type RWMutexTracker struct { + mu sync.RWMutex + + // Atomic counters for number waiting and having read and write locks. + nwaitr int32 + nwaitw int32 + nhaver int32 + nhavew int32 // should always be 0 or 1 + + logOnce sync.Once + + hmu sync.Mutex + holder []byte + holdr map[int64]bool // goroutines holding read lock +} + +const stackBufSize = 16 << 20 + +var stackBuf = make(chan []byte, 8) + +func getBuf() []byte { + select { + case b := <-stackBuf: + return b[:stackBufSize] + default: + return make([]byte, stackBufSize) + } +} + +func putBuf(b []byte) { + select { + case stackBuf <- b: + default: + } +} + +var goroutineSpace = []byte("goroutine ") + +func GoroutineID() int64 { + b := getBuf() + defer putBuf(b) + b = b[:runtime.Stack(b, false)] + // Parse the 4707 otu of "goroutine 4707 [" + b = bytes.TrimPrefix(b, goroutineSpace) + i := bytes.IndexByte(b, ' ') + if i < 0 { + panic(fmt.Sprintf("No space found in %q", b)) + } + b = b[:i] + n, err := strutil.ParseUintBytes(b, 10, 64) + if err != nil { + panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) + } + return int64(n) +} + +func (m *RWMutexTracker) startLogger() { + go func() { + var buf bytes.Buffer + for { + time.Sleep(1 * time.Second) + buf.Reset() + m.hmu.Lock() + for gid := range m.holdr { + fmt.Fprintf(&buf, " [%d]", gid) + } + m.hmu.Unlock() + log.Printf("Mutex %p: waitW %d haveW %d waitR %d haveR %d %s", + m, + atomic.LoadInt32(&m.nwaitw), + atomic.LoadInt32(&m.nhavew), + atomic.LoadInt32(&m.nwaitr), + atomic.LoadInt32(&m.nhaver), buf.Bytes()) + } + }() +} + +func (m *RWMutexTracker) Lock() { + m.logOnce.Do(m.startLogger) + atomic.AddInt32(&m.nwaitw, 1) + m.mu.Lock() + atomic.AddInt32(&m.nwaitw, -1) + atomic.AddInt32(&m.nhavew, 1) + + m.hmu.Lock() + defer m.hmu.Unlock() + if len(m.holder) == 0 { + m.holder = make([]byte, stackBufSize) + } + m.holder = m.holder[:runtime.Stack(m.holder[:stackBufSize], false)] + log.Printf("Lock at %s", string(m.holder)) +} + +func (m *RWMutexTracker) Unlock() { + m.hmu.Lock() + m.holder = nil + m.hmu.Unlock() + + atomic.AddInt32(&m.nhavew, -1) + m.mu.Unlock() +} + +func (m *RWMutexTracker) RLock() { + m.logOnce.Do(m.startLogger) + atomic.AddInt32(&m.nwaitr, 1) + + // Catch read-write-read lock. See if somebody (us? via + // another goroutine?) already has a read lock, and then + // somebody else is waiting to write, meaning our second read + // will deadlock. + if atomic.LoadInt32(&m.nhaver) > 0 && atomic.LoadInt32(&m.nwaitw) > 0 { + buf := getBuf() + buf = buf[:runtime.Stack(buf, false)] + log.Printf("Potential R-W-R deadlock at: %s", buf) + putBuf(buf) + } + + m.mu.RLock() + atomic.AddInt32(&m.nwaitr, -1) + atomic.AddInt32(&m.nhaver, 1) + + gid := GoroutineID() + m.hmu.Lock() + defer m.hmu.Unlock() + if m.holdr == nil { + m.holdr = make(map[int64]bool) + } + if m.holdr[gid] { + buf := getBuf() + buf = buf[:runtime.Stack(buf, false)] + log.Fatalf("Recursive call to RLock: %s", buf) + } + m.holdr[gid] = true +} + +func stack() []byte { + buf := make([]byte, 1024) + return buf[:runtime.Stack(buf, false)] +} + +func (m *RWMutexTracker) RUnlock() { + atomic.AddInt32(&m.nhaver, -1) + + gid := GoroutineID() + m.hmu.Lock() + delete(m.holdr, gid) + m.hmu.Unlock() + + m.mu.RUnlock() +} + +// Holder returns the stack trace of the current exclusive lock holder's stack +// when it acquired the lock (with Lock). It returns the empty string if the lock +// is not currently held. +func (m *RWMutexTracker) Holder() string { + m.hmu.Lock() + defer m.hmu.Unlock() + return string(m.holder) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/syncutil/once.go b/vendor/github.com/camlistore/camlistore/pkg/syncutil/once.go new file mode 100644 index 00000000..1123f092 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/syncutil/once.go @@ -0,0 +1,60 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syncutil + +import ( + "sync" + "sync/atomic" +) + +// A Once will perform a successful action exactly once. +// +// Unlike a sync.Once, this Once's func returns an error +// and is re-armed on failure. +type Once struct { + m sync.Mutex + done uint32 +} + +// Do calls the function f if and only if Do has not been invoked +// without error for this instance of Once. In other words, given +// var once Once +// if once.Do(f) is called multiple times, only the first call will +// invoke f, even if f has a different value in each invocation unless +// f returns an error. A new instance of Once is required for each +// function to execute. +// +// Do is intended for initialization that must be run exactly once. Since f +// is niladic, it may be necessary to use a function literal to capture the +// arguments to a function to be invoked by Do: +// err := config.once.Do(func() error { return config.init(filename) }) +func (o *Once) Do(f func() error) error { + if atomic.LoadUint32(&o.done) == 1 { + return nil + } + // Slow-path. + o.m.Lock() + defer o.m.Unlock() + var err error + if o.done == 0 { + err = f() + if err == nil { + atomic.StoreUint32(&o.done, 1) + } + } + return err +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/syncutil/once_test.go b/vendor/github.com/camlistore/camlistore/pkg/syncutil/once_test.go new file mode 100644 index 00000000..e321d509 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/syncutil/once_test.go @@ -0,0 +1,57 @@ +package syncutil + +import ( + "errors" + "testing" +) + +func TestOnce(t *testing.T) { + timesRan := 0 + f := func() error { + timesRan++ + return nil + } + + once := Once{} + grp := Group{} + + for i := 0; i < 10; i++ { + grp.Go(func() error { return once.Do(f) }) + } + + if grp.Err() != nil { + t.Errorf("Expected no errors, got %v", grp.Err()) + } + + if timesRan != 1 { + t.Errorf("Expected to run one time, ran %d", timesRan) + } +} + +// TestOnceErroring verifies we retry on every error, but stop after +// the first success. +func TestOnceErroring(t *testing.T) { + timesRan := 0 + f := func() error { + timesRan++ + if timesRan < 3 { + return errors.New("retry") + } + return nil + } + + once := Once{} + grp := Group{} + + for i := 0; i < 10; i++ { + grp.Go(func() error { return once.Do(f) }) + } + + if len(grp.Errs()) != 2 { + t.Errorf("Expected two errors, got %d", len(grp.Errs())) + } + + if timesRan != 3 { + t.Errorf("Expected to run two times, ran %d", timesRan) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/syncutil/sem.go b/vendor/github.com/camlistore/camlistore/pkg/syncutil/sem.go new file mode 100644 index 00000000..092655ff --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/syncutil/sem.go @@ -0,0 +1,64 @@ +package syncutil + +import ( + "fmt" + "log" + "sync" +) + +type debugT bool + +var debug = debugT(false) + +func (d debugT) Printf(format string, args ...interface{}) { + if bool(d) { + log.Printf(format, args...) + } +} + +// Sem implements a semaphore that can have multiple units acquired/released +// at a time. +type Sem struct { + c *sync.Cond // Protects size + max, free int64 +} + +// NewSem creates a semaphore with max units available for acquisition. +func NewSem(max int64) *Sem { + return &Sem{ + c: sync.NewCond(new(sync.Mutex)), + free: max, + max: max, + } +} + +// Acquire will deduct n units from the semaphore. If the deduction would +// result in the available units falling below zero, the call will block until +// another go routine returns units via a call to Release. If more units are +// requested than the semaphore is configured to hold, error will be non-nil. +func (s *Sem) Acquire(n int64) error { + if n > s.max { + return fmt.Errorf("sem: attempt to acquire more units than semaphore size %d > %d", n, s.max) + } + s.c.L.Lock() + defer s.c.L.Unlock() + for { + debug.Printf("Acquire check max %d free %d, n %d", s.max, s.free, n) + if s.free >= n { + s.free -= n + return nil + } + debug.Printf("Acquire Wait max %d free %d, n %d", s.max, s.free, n) + s.c.Wait() + } +} + +// Release will return n units to the semaphore and notify any currently +// blocking Acquire calls. +func (s *Sem) Release(n int64) { + s.c.L.Lock() + defer s.c.L.Unlock() + debug.Printf("Release max %d free %d, n %d", s.max, s.free, n) + s.free += n + s.c.Broadcast() +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/syncutil/sem_test.go b/vendor/github.com/camlistore/camlistore/pkg/syncutil/sem_test.go new file mode 100644 index 00000000..f6981afe --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/syncutil/sem_test.go @@ -0,0 +1,33 @@ +package syncutil_test + +import ( + "testing" + + "camlistore.org/pkg/syncutil" +) + +func TestSem(t *testing.T) { + s := syncutil.NewSem(5) + + if err := s.Acquire(2); err != nil { + t.Fatal(err) + } + if err := s.Acquire(2); err != nil { + t.Fatal(err) + } + + go func() { + s.Release(2) + s.Release(2) + }() + if err := s.Acquire(5); err != nil { + t.Fatal(err) + } +} + +func TestSemErr(t *testing.T) { + s := syncutil.NewSem(5) + if err := s.Acquire(6); err == nil { + t.Fatal("Didn't get expected error for large acquire.") + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/syncutil/syncutil_test.go b/vendor/github.com/camlistore/camlistore/pkg/syncutil/syncutil_test.go new file mode 100644 index 00000000..99332c74 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/syncutil/syncutil_test.go @@ -0,0 +1,30 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syncutil + +import "testing" + +func TestGoroutineID(t *testing.T) { + c := make(chan int64, 2) + c <- GoroutineID() + go func() { + c <- GoroutineID() + }() + if a, b := <-c, <-c; a == b { + t.Errorf("both goroutine IDs were %d; expected different", a) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/asserts/asserts.go b/vendor/github.com/camlistore/camlistore/pkg/test/asserts/asserts.go new file mode 100644 index 00000000..d2546ce3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/asserts/asserts.go @@ -0,0 +1,108 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package asserts provides a bad implementation of test predicate +// helpers. This package should either go away or dramatically +// improve. +package asserts + +import ( + "strings" + "testing" +) + +// NOTE: THESE FUNCTIONS ARE DEPRECATED. PLEASE DO NOT USE THEM IN +// NEW CODE. + +func Expect(t *testing.T, got bool, what string) { + if !got { + t.Errorf("%s: got %v; expected %v", what, got, true) + } +} + +func Assert(t *testing.T, got bool, what string) { + if !got { + t.Fatalf("%s: got %v; expected %v", what, got, true) + } +} + +func ExpectErrorContains(t *testing.T, err error, substr, msg string) { + errorContains((*testing.T).Errorf, t, err, substr, msg) +} + +func AssertErrorContains(t *testing.T, err error, substr, msg string) { + errorContains((*testing.T).Fatalf, t, err, substr, msg) +} + +func errorContains(f func(*testing.T, string, ...interface{}), t *testing.T, err error, substr, msg string) { + if err == nil { + f(t, "%s: got nil error; expected error containing %q", msg, substr) + return + } + if !strings.Contains(err.Error(), substr) { + f(t, "%s: expected error containing %q; got instead error %q", msg, substr, err.Error()) + } +} + +func ExpectString(t *testing.T, expect, got string, what string) { + if expect != got { + t.Errorf("%s: got %q; expected %q", what, got, expect) + } +} + +func AssertString(t *testing.T, expect, got string, what string) { + if expect != got { + t.Fatalf("%s: got %q; expected %q", what, got, expect) + } +} + +func ExpectBool(t *testing.T, expect, got bool, what string) { + if expect != got { + t.Errorf("%s: got %v; expected %v", what, got, expect) + } +} + +func AssertBool(t *testing.T, expect, got bool, what string) { + if expect != got { + t.Fatalf("%s: got %v; expected %v", what, got, expect) + } +} + +func ExpectInt(t *testing.T, expect, got int, what string) { + if expect != got { + t.Errorf("%s: got %d; expected %d", what, got, expect) + } +} + +func AssertInt(t *testing.T, expect, got int, what string) { + if expect != got { + t.Fatalf("%s: got %d; expected %d", what, got, expect) + } +} + +func ExpectNil(t *testing.T, v interface{}, what string) { + if v == nil { + return + } + t.Errorf("%s: expected nil; got %v", what, v) +} + +func AssertNil(t *testing.T, v interface{}, what string) { + if v == nil { + return + } + t.Fatalf("%s: expected nil; got %v", what, v) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/blob.go b/vendor/github.com/camlistore/camlistore/pkg/test/blob.go new file mode 100644 index 00000000..9844d5de --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/blob.go @@ -0,0 +1,93 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "crypto/sha1" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/types" +) + +// Blob is a utility class for unit tests. +type Blob struct { + Contents string // the contents of the blob +} + +func (tb *Blob) Blob() *blob.Blob { + s := tb.Contents + return blob.NewBlob(tb.BlobRef(), tb.Size(), func() types.ReadSeekCloser { + return struct { + io.ReadSeeker + io.Closer + }{ + io.NewSectionReader(strings.NewReader(s), 0, int64(len(s))), + ioutil.NopCloser(nil), + } + }) +} + +func (tb *Blob) BlobRef() blob.Ref { + h := sha1.New() + h.Write([]byte(tb.Contents)) + return blob.RefFromHash(h) +} + +func (tb *Blob) SizedRef() blob.SizedRef { + return blob.SizedRef{tb.BlobRef(), tb.Size()} +} + +func (tb *Blob) BlobRefSlice() []blob.Ref { + return []blob.Ref{tb.BlobRef()} +} + +func (tb *Blob) Size() uint32 { + // Check that it's not larger than a uint32 (possible with + // 64-bit ints). But while we're here, be more paranoid and + // check for over the default max blob size of 16 MB. + if len(tb.Contents) > 16<<20 { + panic(fmt.Sprintf("test blob of %d bytes is larger than max 16MB allowed in testing", len(tb.Contents))) + } + return uint32(len(tb.Contents)) +} + +func (tb *Blob) Reader() io.Reader { + return strings.NewReader(tb.Contents) +} + +func (tb *Blob) AssertMatches(t *testing.T, sb blob.SizedRef) { + if sb.Size != tb.Size() { + t.Fatalf("Got size %d; expected %d", sb.Size, tb.Size()) + } + if sb.Ref != tb.BlobRef() { + t.Fatalf("Got blob %q; expected %q", sb.Ref.String(), tb.BlobRef()) + } +} + +func (tb *Blob) MustUpload(t *testing.T, ds blobserver.BlobReceiver) { + sb, err := ds.ReceiveBlob(tb.BlobRef(), tb.Reader()) + if err != nil { + t.Fatalf("failed to upload blob %v (%q): %v", tb.BlobRef(), tb.Contents, err) + } + tb.AssertMatches(t, sb) // TODO: better error reporting +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/diff.go b/vendor/github.com/camlistore/camlistore/pkg/test/diff.go new file mode 100644 index 00000000..ccc5d208 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/diff.go @@ -0,0 +1,52 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "bytes" + "io/ioutil" + "os" + "os/exec" +) + +// Diff returns the unified diff (from running "diff -u") or +// returns an error string. +func Diff(a, b []byte) string { + if bytes.Equal(a, b) { + return "" + } + ta, err := ioutil.TempFile("", "") + if err != nil { + return err.Error() + } + tb, err := ioutil.TempFile("", "") + if err != nil { + return err.Error() + } + defer os.Remove(ta.Name()) + defer os.Remove(tb.Name()) + // Lqzy... + ta.Write(a) + tb.Write(b) + ta.Close() + tb.Close() + out, err := exec.Command("diff", "-u", ta.Name(), tb.Name()).CombinedOutput() + if err != nil && len(out) == 0 { + return err.Error() + } + return string(out) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/doc.go b/vendor/github.com/camlistore/camlistore/pkg/test/doc.go new file mode 100644 index 00000000..44a721a1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package test provides common Camlistore test objects. +package test diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/dockertest/docker.go b/vendor/github.com/camlistore/camlistore/pkg/test/dockertest/docker.go new file mode 100644 index 00000000..5137f063 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/dockertest/docker.go @@ -0,0 +1,275 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package dockertest contains helper functions for setting up and tearing down docker containers to aid in testing. +*/ +package dockertest + +import ( + "bytes" + "database/sql" + "encoding/json" + "errors" + "fmt" + "log" + "os/exec" + "strings" + "testing" + "time" + + "camlistore.org/pkg/netutil" +) + +// Debug, if set, prevents any container from being removed. +var Debug bool + +/// runLongTest checks all the conditions for running a docker container +// based on image. +func runLongTest(t *testing.T, image string) { + if testing.Short() { + t.Skip("skipping in short mode") + } + if !haveDocker() { + t.Skip("skipping test; 'docker' command not found") + } + if ok, err := haveImage(image); !ok || err != nil { + if err != nil { + t.Skipf("Error running docker to check for %s: %v", image, err) + } + log.Printf("Pulling docker image %s ...", image) + if err := Pull(image); err != nil { + t.Skipf("Error pulling %s: %v", image, err) + } + } +} + +// haveDocker returns whether the "docker" command was found. +func haveDocker() bool { + _, err := exec.LookPath("docker") + return err == nil +} + +func haveImage(name string) (ok bool, err error) { + out, err := exec.Command("docker", "images", "--no-trunc").Output() + if err != nil { + return + } + return bytes.Contains(out, []byte(name)), nil +} + +func run(args ...string) (containerID string, err error) { + cmd := exec.Command("docker", append([]string{"run"}, args...)...) + var stdout, stderr bytes.Buffer + cmd.Stdout, cmd.Stderr = &stdout, &stderr + if err = cmd.Run(); err != nil { + err = fmt.Errorf("%v%v", stderr.String(), err) + return + } + containerID = strings.TrimSpace(stdout.String()) + if containerID == "" { + return "", errors.New("unexpected empty output from `docker run`") + } + return +} + +func KillContainer(container string) error { + return exec.Command("docker", "kill", container).Run() +} + +// Pull retrieves the docker image with 'docker pull'. +func Pull(image string) error { + out, err := exec.Command("docker", "pull", image).CombinedOutput() + if err != nil { + err = fmt.Errorf("%v: %s", err, out) + } + return err +} + +// IP returns the IP address of the container. +func IP(containerID string) (string, error) { + out, err := exec.Command("docker", "inspect", containerID).Output() + if err != nil { + return "", err + } + type networkSettings struct { + IPAddress string + } + type container struct { + NetworkSettings networkSettings + } + var c []container + if err := json.NewDecoder(bytes.NewReader(out)).Decode(&c); err != nil { + return "", err + } + if len(c) == 0 { + return "", errors.New("no output from docker inspect") + } + if ip := c[0].NetworkSettings.IPAddress; ip != "" { + return ip, nil + } + return "", errors.New("could not find an IP. Not running?") +} + +type ContainerID string + +func (c ContainerID) IP() (string, error) { + return IP(string(c)) +} + +func (c ContainerID) Kill() error { + return KillContainer(string(c)) +} + +// Remove runs "docker rm" on the container +func (c ContainerID) Remove() error { + if Debug { + return nil + } + return exec.Command("docker", "rm", "-v", string(c)).Run() +} + +// KillRemove calls Kill on the container, and then Remove if there was +// no error. It logs any error to t. +func (c ContainerID) KillRemove(t *testing.T) { + if err := c.Kill(); err != nil { + t.Log(err) + return + } + if err := c.Remove(); err != nil { + t.Log(err) + } +} + +// lookup retrieves the ip address of the container, and tries to reach +// before timeout the tcp address at this ip and given port. +func (c ContainerID) lookup(port int, timeout time.Duration) (ip string, err error) { + ip, err = c.IP() + if err != nil { + err = fmt.Errorf("error getting IP: %v", err) + return + } + addr := fmt.Sprintf("%s:%d", ip, port) + err = netutil.AwaitReachable(addr, timeout) + return +} + +// setupContainer sets up a container, using the start function to run the given image. +// It also looks up the IP address of the container, and tests this address with the given +// port and timeout. It returns the container ID and its IP address, or makes the test +// fail on error. +func setupContainer(t *testing.T, image string, port int, timeout time.Duration, + start func() (string, error)) (c ContainerID, ip string) { + runLongTest(t, image) + + containerID, err := start() + if err != nil { + t.Fatalf("docker run: %v", err) + } + c = ContainerID(containerID) + ip, err = c.lookup(port, timeout) + if err != nil { + c.KillRemove(t) + t.Skipf("Skipping test for container %v: %v", c, err) + } + return +} + +const ( + mongoImage = "mpl7/mongo" + // TODO(mpl): there's now an official mysql image at + // https://registry.hub.docker.com/_/mysql/ . We should either directly use one from + // there or fetch one there anyway to host it at + // https://console.developers.google.com/project/camlistore-website + mysqlImage = "orchardup/mysql" + MySQLUsername = "root" + MySQLPassword = "root" + postgresImage = "nornagon/postgres" + PostgresUsername = "docker" // set up by the dockerfile of postgresImage + PostgresPassword = "docker" // set up by the dockerfile of postgresImage +) + +// SetupMongoContainer sets up a real MongoDB instance for testing purposes, +// using a Docker container. It returns the container ID and its IP address, +// or makes the test fail on error. +// Currently using https://index.docker.io/u/robinvdvleuten/mongo/ +func SetupMongoContainer(t *testing.T) (c ContainerID, ip string) { + return setupContainer(t, mongoImage, 27017, 10*time.Second, func() (string, error) { + return run("-d", mongoImage, "--nojournal") + }) +} + +// SetupMySQLContainer sets up a real MySQL instance for testing purposes, +// using a Docker container. It returns the container ID and its IP address, +// or makes the test fail on error. +// Currently using https://index.docker.io/u/orchardup/mysql/ +func SetupMySQLContainer(t *testing.T, dbname string) (c ContainerID, ip string) { + return setupContainer(t, mysqlImage, 3306, 10*time.Second, func() (string, error) { + return run("-d", "-e", "MYSQL_ROOT_PASSWORD="+MySQLPassword, "-e", "MYSQL_DATABASE="+dbname, mysqlImage) + }) +} + +// SetupPostgreSQLContainer sets up a real PostgreSQL instance for testing purposes, +// using a Docker container. It returns the container ID and its IP address, +// or makes the test fail on error. +// Currently using https://index.docker.io/u/nornagon/postgres +func SetupPostgreSQLContainer(t *testing.T, dbname string) (c ContainerID, ip string) { + c, ip = setupContainer(t, postgresImage, 5432, 15*time.Second, func() (string, error) { + return run("-d", postgresImage) + }) + cleanupAndDie := func(err error) { + c.KillRemove(t) + t.Fatal(err) + } + rootdb, err := sql.Open("postgres", + fmt.Sprintf("user=%s password=%s host=%s dbname=postgres sslmode=disable", PostgresUsername, PostgresPassword, ip)) + if err != nil { + cleanupAndDie(fmt.Errorf("Could not open postgres rootdb: %v", err)) + } + if _, err := sqlExecRetry(rootdb, + "CREATE DATABASE "+dbname+" LC_COLLATE = 'C' TEMPLATE = template0", + 50); err != nil { + cleanupAndDie(fmt.Errorf("Could not create database %v: %v", dbname, err)) + } + return +} + +// sqlExecRetry keeps calling http://golang.org/pkg/database/sql/#DB.Exec on db +// with stmt until it succeeds or until it has been tried maxTry times. +// It sleeps in between tries, twice longer after each new try, starting with +// 100 milliseconds. +func sqlExecRetry(db *sql.DB, stmt string, maxTry int) (sql.Result, error) { + if maxTry <= 0 { + return nil, errors.New("did not try at all") + } + interval := 100 * time.Millisecond + try := 0 + var err error + var result sql.Result + for { + result, err = db.Exec(stmt) + if err == nil { + return result, nil + } + try++ + if try == maxTry { + break + } + time.Sleep(interval) + interval *= 2 + } + return result, fmt.Errorf("failed %v times: %v", try, err) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/fakeindex.go b/vendor/github.com/camlistore/camlistore/pkg/test/fakeindex.go new file mode 100644 index 00000000..53eb23ac --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/fakeindex.go @@ -0,0 +1,225 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "fmt" + "log" + "os" + "strings" + "sync" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/context" + "camlistore.org/pkg/types/camtypes" +) + +var ClockOrigin = time.Unix(1322443956, 123456) + +// A FakeIndex implements parts of search.Index and provides methods +// to controls the results, such as AddMeta, AddClaim, +// AddSignerAttrValue. +type FakeIndex struct { + lk sync.Mutex + meta map[blob.Ref]camtypes.BlobMeta + claims map[blob.Ref][]camtypes.Claim // permanode -> claims + signerAttrValue map[string]blob.Ref // "\0\0" -> blobref + path map[string]*camtypes.Path // "\0\0" -> path + + cllk sync.RWMutex + clock time.Time +} + +func NewFakeIndex() *FakeIndex { + return &FakeIndex{ + meta: make(map[blob.Ref]camtypes.BlobMeta), + claims: make(map[blob.Ref][]camtypes.Claim), + signerAttrValue: make(map[string]blob.Ref), + path: make(map[string]*camtypes.Path), + clock: ClockOrigin, + } +} + +// +// Test methods +// + +func (fi *FakeIndex) nextDate() time.Time { + fi.cllk.Lock() + defer fi.cllk.Unlock() + fi.clock = fi.clock.Add(1 * time.Second) + return fi.clock.UTC() +} + +func (fi *FakeIndex) LastTime() time.Time { + fi.cllk.RLock() + defer fi.cllk.RUnlock() + return fi.clock +} + +func camliTypeFromMime(mime string) string { + if v := strings.TrimPrefix(mime, "application/json; camliType="); v != mime { + return v + } + return "" +} + +func (fi *FakeIndex) AddMeta(br blob.Ref, camliType string, size uint32) { + fi.lk.Lock() + defer fi.lk.Unlock() + fi.meta[br] = camtypes.BlobMeta{ + Ref: br, + Size: size, + CamliType: camliType, + } +} + +func (fi *FakeIndex) AddClaim(owner, permanode blob.Ref, claimType, attr, value string) { + fi.lk.Lock() + defer fi.lk.Unlock() + date := fi.nextDate() + + claim := camtypes.Claim{ + Permanode: permanode, + Signer: owner, + BlobRef: blob.Ref{}, + Date: date, + Type: claimType, + Attr: attr, + Value: value, + } + fi.claims[permanode] = append(fi.claims[permanode], claim) + + if claimType == "set-attribute" && strings.HasPrefix(attr, "camliPath:") { + suffix := attr[len("camliPath:"):] + path := &camtypes.Path{ + Target: blob.MustParse(value), + Suffix: suffix, + } + fi.path[fmt.Sprintf("%s\x00%s\x00%s", owner, permanode, suffix)] = path + } +} + +func (fi *FakeIndex) AddSignerAttrValue(signer blob.Ref, attr, val string, latest blob.Ref) { + fi.lk.Lock() + defer fi.lk.Unlock() + fi.signerAttrValue[fmt.Sprintf("%s\x00%s\x00%s", signer, attr, val)] = latest +} + +// +// Interface implementation +// + +func (fi *FakeIndex) KeyId(blob.Ref) (string, error) { + panic("NOIMPL") +} + +func (fi *FakeIndex) GetRecentPermanodes(dest chan<- camtypes.RecentPermanode, owner blob.Ref, limit int, before time.Time) error { + panic("NOIMPL") +} + +// TODO(mpl): write real tests +func (fi *FakeIndex) SearchPermanodesWithAttr(dest chan<- blob.Ref, request *camtypes.PermanodeByAttrRequest) error { + panic("NOIMPL") +} + +func (fi *FakeIndex) AppendClaims(dst []camtypes.Claim, permaNode blob.Ref, + signerFilter blob.Ref, + attrFilter string) ([]camtypes.Claim, error) { + fi.lk.Lock() + defer fi.lk.Unlock() + + for _, cl := range fi.claims[permaNode] { + if signerFilter.Valid() && cl.Signer != signerFilter { + continue + } + if attrFilter != "" && cl.Attr != attrFilter { + continue + } + dst = append(dst, cl) + } + return dst, nil +} + +func (fi *FakeIndex) GetBlobMeta(br blob.Ref) (camtypes.BlobMeta, error) { + fi.lk.Lock() + defer fi.lk.Unlock() + bm, ok := fi.meta[br] + if !ok { + return camtypes.BlobMeta{}, os.ErrNotExist + } + return bm, nil +} + +func (fi *FakeIndex) ExistingFileSchemas(bytesRef blob.Ref) ([]blob.Ref, error) { + panic("NOIMPL") +} + +func (fi *FakeIndex) GetFileInfo(fileRef blob.Ref) (camtypes.FileInfo, error) { + panic("NOIMPL") +} + +func (fi *FakeIndex) GetImageInfo(fileRef blob.Ref) (camtypes.ImageInfo, error) { + panic("NOIMPL") +} + +func (fi *FakeIndex) GetMediaTags(fileRef blob.Ref) (tags map[string]string, err error) { + panic("NOIMPL") +} + +func (fi *FakeIndex) GetDirMembers(dir blob.Ref, dest chan<- blob.Ref, limit int) error { + panic("NOIMPL") +} + +func (fi *FakeIndex) PermanodeOfSignerAttrValue(signer blob.Ref, attr, val string) (blob.Ref, error) { + fi.lk.Lock() + defer fi.lk.Unlock() + if b, ok := fi.signerAttrValue[fmt.Sprintf("%s\x00%s\x00%s", signer, attr, val)]; ok { + return b, nil + } + return blob.Ref{}, os.ErrNotExist +} + +func (fi *FakeIndex) PathsOfSignerTarget(signer, target blob.Ref) ([]*camtypes.Path, error) { + panic("NOIMPL") +} + +func (fi *FakeIndex) PathsLookup(signer, base blob.Ref, suffix string) ([]*camtypes.Path, error) { + panic("NOIMPL") +} + +func (fi *FakeIndex) PathLookup(signer, base blob.Ref, suffix string, at time.Time) (*camtypes.Path, error) { + if !at.IsZero() { + panic("PathLookup with non-zero 'at' time not supported") + } + fi.lk.Lock() + defer fi.lk.Unlock() + if p, ok := fi.path[fmt.Sprintf("%s\x00%s\x00%s", signer, base, suffix)]; ok { + return p, nil + } + log.Printf("PathLookup miss for signer %q, base %q, suffix %q", signer, base, suffix) + return nil, os.ErrNotExist +} + +func (fi *FakeIndex) EdgesTo(ref blob.Ref, opts *camtypes.EdgesToOpts) ([]*camtypes.Edge, error) { + panic("NOIMPL") +} + +func (fi *FakeIndex) EnumerateBlobMeta(ctx *context.Context, ch chan<- camtypes.BlobMeta) error { + panic("NOIMPL") +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/fetcher.go b/vendor/github.com/camlistore/camlistore/pkg/test/fetcher.go new file mode 100644 index 00000000..4a5d1b44 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/fetcher.go @@ -0,0 +1,89 @@ +/* +Copyright 2011 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "io" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/blobserver/memory" +) + +// Fetcher is an in-memory implementation of the blobserver Storage +// interface. It started as just a fetcher and grew. It also includes +// other convenience methods for testing. +type Fetcher struct { + memory.Storage + + // ReceiveErr optionally returns the error to return on receive. + ReceiveErr error + + // FetchErr, if non-nil, specifies the error to return on the next fetch call. + // If it returns nil, fetches proceed as normal. + FetchErr func() error +} + +var ( + _ blobserver.Storage = (*Fetcher)(nil) + _ blobserver.BlobStreamer = (*Fetcher)(nil) +) + +func (tf *Fetcher) Fetch(ref blob.Ref) (file io.ReadCloser, size uint32, err error) { + if tf.FetchErr != nil { + if err = tf.FetchErr(); err != nil { + return + } + } + file, size, err = tf.Storage.Fetch(ref) + if err != nil { + return + } + return file, size, nil +} + +func (tf *Fetcher) SubFetch(ref blob.Ref, offset, length int64) (io.ReadCloser, error) { + if tf.FetchErr != nil { + if err := tf.FetchErr(); err != nil { + return nil, err + } + } + rc, err := tf.Storage.SubFetch(ref, offset, length) + if err != nil { + return rc, err + } + return rc, nil +} + +func (tf *Fetcher) ReceiveBlob(br blob.Ref, source io.Reader) (blob.SizedRef, error) { + sb, err := tf.Storage.ReceiveBlob(br, source) + if err != nil { + return sb, err + } + if err := tf.ReceiveErr; err != nil { + tf.RemoveBlobs([]blob.Ref{br}) + return sb, err + } + return sb, nil +} + +func (tf *Fetcher) AddBlob(b *Blob) { + _, err := tf.ReceiveBlob(b.BlobRef(), b.Reader()) + if err != nil { + panic(err) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/fetcher_test.go b/vendor/github.com/camlistore/camlistore/pkg/test/fetcher_test.go new file mode 100644 index 00000000..3fef9bda --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/fetcher_test.go @@ -0,0 +1,31 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test_test + +import ( + "testing" + + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/blobserver/storagetest" + "camlistore.org/pkg/test" +) + +func TestFetcher(t *testing.T) { + storagetest.Test(t, func(t *testing.T) (sto blobserver.Storage, cleanup func()) { + return new(test.Fetcher), func() {} + }) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/integration/camget_test.go b/vendor/github.com/camlistore/camlistore/pkg/test/integration/camget_test.go new file mode 100644 index 00000000..fd2357f4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/integration/camget_test.go @@ -0,0 +1,215 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "camlistore.org/pkg/test" + "camlistore.org/pkg/test/asserts" +) + +// Test that `camget -o' can restore a symlink correctly. +func TestCamgetSymlink(t *testing.T) { + w := test.GetWorld(t) + + srcDir, err := ioutil.TempDir("", "camget-test-") + if err != nil { + t.Fatalf("ioutil.TempDir(): %v", err) + } + defer os.RemoveAll(srcDir) + + targetBase := "a" + target := filepath.Join(srcDir, targetBase) + targetFD, err := os.Create(target) + if err != nil { + t.Fatalf("os.Create(): %v", err) + } + targetFD.Close() + + subdirBase := "child" + subdirName := filepath.Join(srcDir, subdirBase) + linkBase := "b" + linkName := filepath.Join(subdirName, linkBase) + err = os.Mkdir(subdirName, 0777) + if err != nil { + t.Fatalf("os.Mkdir(): %v", err) + } + + err = os.Symlink("../"+targetBase, linkName) + if err != nil { + t.Fatalf("os.Symlink(): %v", err) + } + + out := test.MustRunCmd(t, w.Cmd("camput", "file", srcDir)) + // TODO(mpl): rm call and delete pkg. + asserts.ExpectBool(t, true, out != "", "camput") + br := strings.Split(out, "\n")[0] + dstDir, err := ioutil.TempDir("", "camget-test-") + if err != nil { + t.Fatalf("ioutil.TempDir(): %v", err) + } + defer os.RemoveAll(dstDir) + + // Now restore the symlink + _ = test.MustRunCmd(t, w.Cmd("camget", "-o", dstDir, br)) + + symlink := filepath.Join(dstDir, filepath.Base(srcDir), subdirBase, + linkBase) + link, err := os.Readlink(symlink) + if err != nil { + t.Fatalf("os.Readlink(): %v", err) + } + expected := "../a" + if expected != link { + t.Fatalf("os.Readlink(): Expected: %s, got %s", expected, + link) + } + + // Ensure that the link is not broken + _, err = os.Stat(symlink) + if err != nil { + t.Fatalf("os.Stat(): %v", err) + } +} + +// Test that `camget -o' can restore a fifo correctly. +func TestCamgetFIFO(t *testing.T) { + if runtime.GOOS == "windows" { + t.SkipNow() + } + + fifo, cleanup := mkTmpFIFO(t) + defer cleanup() + + // Upload the fifo + w := test.GetWorld(t) + out := test.MustRunCmd(t, w.Cmd("camput", "file", fifo)) + br := strings.Split(out, "\n")[0] + + // Try and get it back + tdir, err := ioutil.TempDir("", "fifo-test-") + if err != nil { + t.Fatalf("ioutil.TempDir(): %v", err) + } + defer os.RemoveAll(tdir) + test.MustRunCmd(t, w.Cmd("camget", "-o", tdir, br)) + + // Ensure it is actually a fifo + name := filepath.Join(tdir, filepath.Base(fifo)) + fi, err := os.Lstat(name) + if err != nil { + t.Fatalf("os.Lstat(): %v", err) + } + if mask := fi.Mode() & os.ModeNamedPipe; mask == 0 { + t.Fatalf("Retrieved file %s: Not a FIFO", name) + } +} + +// Test that `camget -o' can restore a socket correctly. +func TestCamgetSocket(t *testing.T) { + if runtime.GOOS == "windows" { + t.SkipNow() + } + + socket, cleanup := mkTmpSocket(t) + defer cleanup() + + // Upload the socket + w := test.GetWorld(t) + out := test.MustRunCmd(t, w.Cmd("camput", "file", socket)) + br := strings.Split(out, "\n")[0] + + // Try and get it back + tdir, err := ioutil.TempDir("", "socket-test-") + if err != nil { + t.Fatalf("ioutil.TempDir(): %v", err) + } + defer os.RemoveAll(tdir) + test.MustRunCmd(t, w.Cmd("camget", "-o", tdir, br)) + + // Ensure it is actually a socket + name := filepath.Join(tdir, filepath.Base(socket)) + fi, err := os.Lstat(name) + if err != nil { + t.Fatalf("os.Lstat(): %v", err) + } + if mask := fi.Mode() & os.ModeSocket; mask == 0 { + t.Fatalf("Retrieved file %s: Not a socket", name) + } +} + +// Test that: +// 1) `camget -contents' can restore a regular file correctly. +// 2) if the file already exists, and has the same size as the one held by the server, +// stop early and do not even fetch it from the server. +func TestCamgetFile(t *testing.T) { + dirName, err := ioutil.TempDir("", "camli-TestCamgetFile") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dirName) + f, err := os.Create(filepath.Join(dirName, "test.txt")) + if err != nil { + t.Fatal(err) + } + filename := f.Name() + contents := "not empty anymore" + if _, err := f.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + outDir := filepath.Join(dirName, "fetched") + if err := os.Mkdir(outDir, 0700); err != nil { + t.Fatal(err) + } + + w := test.GetWorld(t) + out := test.MustRunCmd(t, w.Cmd("camput", "file", filename)) + + br := strings.Split(out, "\n")[0] + _ = test.MustRunCmd(t, w.Cmd("camget", "-o", outDir, "-contents", br)) + + fetchedName := filepath.Join(outDir, "test.txt") + b, err := ioutil.ReadFile(fetchedName) + if err != nil { + t.Fatal(err) + } + if string(b) != contents { + t.Fatalf("fetched file different from original file, got contents %q, wanted %q", b, contents) + } + + var stderr bytes.Buffer + c := w.Cmd("camget", "-o", outDir, "-contents", "-verbose", br) + c.Stderr = &stderr + if err := c.Run(); err != nil { + t.Fatalf("running second camget: %v", err) + } + if !strings.Contains(stderr.String(), fmt.Sprintf("Skipping %s; already exists.", fetchedName)) { + t.Fatal(errors.New("Was expecting info message about local file already existing")) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/integration/camlistore_test.go b/vendor/github.com/camlistore/camlistore/pkg/test/integration/camlistore_test.go new file mode 100644 index 00000000..5b9370b1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/integration/camlistore_test.go @@ -0,0 +1,242 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "bufio" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "camlistore.org/pkg/test" + "camlistore.org/third_party/github.com/gorilla/websocket" +) + +// Test that running: +// $ camput permanode +// ... creates and uploads a permanode, and that we can camget it back. +func TestCamputPermanode(t *testing.T) { + w := test.GetWorld(t) + br := w.NewPermanode(t) + + out := test.MustRunCmd(t, w.Cmd("camget", br.String())) + mustHave := []string{ + `{"camliVersion": 1,`, + `"camliSigner": "`, + `"camliType": "permanode",`, + `random": "`, + `,"camliSig":"`, + } + for _, str := range mustHave { + if !strings.Contains(out, str) { + t.Errorf("Expected permanode response to contain %q; it didn't. Got: %s", str, out) + } + } +} + +func TestWebsocketQuery(t *testing.T) { + w := test.GetWorld(t) + pn := w.NewPermanode(t) + test.MustRunCmd(t, w.Cmd("camput", "attr", pn.String(), "tag", "foo")) + + check := func(err error) { + if err != nil { + t.Fatal(err) + } + } + + const bufSize = 1 << 20 + + c, err := net.Dial("tcp", w.Addr()) + if err != nil { + t.Fatalf("Dial: %v", err) + } + + wc, _, err := websocket.NewClient(c, &url.URL{Host: w.Addr(), Path: w.SearchHandlerPath() + "ws"}, nil, bufSize, bufSize) + check(err) + + msg, err := wc.NextWriter(websocket.TextMessage) + check(err) + + _, err = msg.Write([]byte(`{"tag": "foo", "query": { "expression": "tag:foo" }}`)) + check(err) + check(msg.Close()) + + errc := make(chan error, 1) + go func() { + inType, inMsg, err := wc.ReadMessage() + if err != nil { + errc <- err + return + } + if !strings.HasPrefix(string(inMsg), `{"tag":"_status"`) { + errc <- fmt.Errorf("unexpected message type=%d msg=%q, wanted status update", inType, inMsg) + return + } + inType, inMsg, err = wc.ReadMessage() + if err != nil { + errc <- err + return + } + if strings.Contains(string(inMsg), pn.String()) { + errc <- nil + return + } + errc <- fmt.Errorf("unexpected message type=%d msg=%q", inType, inMsg) + }() + select { + case err := <-errc: + if err != nil { + t.Error(err) + } + case <-time.After(5 * time.Second): + t.Error("timeout") + } +} + +func TestInternalHandler(t *testing.T) { + w := test.GetWorld(t) + tests := map[string]int{ + "/no-http-storage/": 401, + "/no-http-handler/": 401, + "/good-status/": 200, + "/bs-and-maybe-also-index/camli": 400, + "/bs/camli/sha1-b2201302e129a4396a323cb56283cddeef11bbe8": 404, + "/no-http-storage/camli/sha1-b2201302e129a4396a323cb56283cddeef11bbe8": 401, + } + for suffix, want := range tests { + res, err := http.Get(w.ServerBaseURL() + suffix) + if err != nil { + t.Fatalf("On %s: %v", suffix, err) + } + if res.StatusCode != want { + t.Errorf("For %s: Status = %d; want %d", suffix, res.StatusCode, want) + } + res.Body.Close() + } +} + +func mustTempDir(t *testing.T) (name string, cleanup func()) { + dir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + return dir, func() { os.RemoveAll(dir) } +} + +func mustWriteFile(t *testing.T, path, contents string) { + err := ioutil.WriteFile(path, []byte(contents), 0644) + if err != nil { + t.Fatal(err) + } +} + +// Run camput in the environment it runs in under the Android app. +// This matches how camput is used in UploadThread.java. +func TestAndroidCamputFile(t *testing.T) { + w := test.GetWorld(t) + // UploadThread.java sets: + // CAMLI_AUTH (set by w.CmdWithEnv) + // CAMLI_TRUSTED_CERT (not needed) + // CAMLI_CACHE_DIR + // CAMPUT_ANDROID_OUTPUT=1 + cacheDir, clean := mustTempDir(t) + defer clean() + env := []string{ + "CAMPUT_ANDROID_OUTPUT=1", + "CAMLI_CACHE_DIR=" + cacheDir, + } + cmd := w.CmdWithEnv("camput", + env, + "--server="+w.ServerBaseURL(), + "file", + "-stdinargs", + "-vivify") + cmd.Stderr = os.Stderr + in, err := cmd.StdinPipe() + if err != nil { + t.Fatal(err) + } + out, err := cmd.StdoutPipe() + if err != nil { + t.Fatal(err) + } + if err := w.Ping(); err != nil { + t.Fatal(err) + } + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + defer cmd.Process.Kill() + + srcDir, clean := mustTempDir(t) + defer clean() + + file1 := filepath.Join(srcDir, "file1.txt") + mustWriteFile(t, file1, "contents 1") + file2 := filepath.Join(srcDir, "file2.txt") + mustWriteFile(t, file2, "contents 2 longer length") + + go func() { + fmt.Fprintf(in, "%s\n", file1) + fmt.Fprintf(in, "%s\n", file2) + }() + + waitc := make(chan error) + go func() { + sc := bufio.NewScanner(out) + fileUploaded := 0 + for sc.Scan() { + t.Logf("Got: %q", sc.Text()) + f := strings.Fields(sc.Text()) + if len(f) == 0 { + t.Logf("empty text?") + continue + } + if f[0] == "FILE_UPLOADED" { + fileUploaded++ + if fileUploaded == 2 { + break + } + } + } + in.Close() + if err := sc.Err(); err != nil { + t.Error(err) + } + }() + + defer cmd.Process.Kill() + go func() { + waitc <- cmd.Wait() + }() + select { + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for camput to end") + case err := <-waitc: + if err != nil { + t.Errorf("camput exited uncleanly: %v", err) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/integration/camput_test.go b/vendor/github.com/camlistore/camlistore/pkg/test/integration/camput_test.go new file mode 100644 index 00000000..a60e1733 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/integration/camput_test.go @@ -0,0 +1,105 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/test" +) + +// mkTmpFIFO makes a fifo in a temporary directory and returns the +// path it and a function to clean-up when done. +func mkTmpFIFO(t *testing.T) (path string, cleanup func()) { + tdir, err := ioutil.TempDir("", "fifo-test-") + if err != nil { + t.Fatalf("iouti.TempDir(): %v", err) + } + cleanup = func() { + os.RemoveAll(tdir) + } + + path = filepath.Join(tdir, "fifo") + err = osutil.Mkfifo(path, 0660) + if err != nil { + t.Fatalf("osutil.mkfifo(): %v", err) + } + + return +} + +// Test that `camput' can upload fifos correctly. +func TestCamputFIFO(t *testing.T) { + if runtime.GOOS == "windows" { + t.SkipNow() + } + + fifo, cleanup := mkTmpFIFO(t) + defer cleanup() + + // Can we successfully upload a fifo? + w := test.GetWorld(t) + out := test.MustRunCmd(t, w.Cmd("camput", "file", fifo)) + + br := strings.Split(out, "\n")[0] + out = test.MustRunCmd(t, w.Cmd("camget", br)) + t.Logf("Retrieved stored fifo schema: %s", out) +} + +// mkTmpSocket makes a socket in a temporary directory and returns the +// path to it and a function to clean-up when done. +func mkTmpSocket(t *testing.T) (path string, cleanup func()) { + tdir, err := ioutil.TempDir("", "socket-test-") + if err != nil { + t.Fatalf("iouti.TempDir(): %v", err) + } + cleanup = func() { + os.RemoveAll(tdir) + } + + path = filepath.Join(tdir, "socket") + err = osutil.Mksocket(path) + if err != nil { + t.Fatalf("osutil.Mksocket(): %v", err) + } + + return +} + +// Test that `camput' can upload sockets correctly. +func TestCamputSocket(t *testing.T) { + if runtime.GOOS == "windows" { + t.SkipNow() + } + + socket, cleanup := mkTmpSocket(t) + defer cleanup() + + // Can we successfully upload a socket? + w := test.GetWorld(t) + out := test.MustRunCmd(t, w.Cmd("camput", "file", socket)) + + br := strings.Split(out, "\n")[0] + out = test.MustRunCmd(t, w.Cmd("camget", br)) + t.Logf("Retrieved stored socket schema: %s", out) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/integration/diskpacked_test.go b/vendor/github.com/camlistore/camlistore/pkg/test/integration/diskpacked_test.go new file mode 100644 index 00000000..d6846ac6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/integration/diskpacked_test.go @@ -0,0 +1,101 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "bufio" + "math/rand" + "os" + "path/filepath" + "testing" + + "camlistore.org/pkg/test" +) + +var ( + testFileRel = filepath.Join("pkg", "test", "integration", "100M.dat") + testFileSize = 100 * 1024 * 1024 +) + +func BenchmarkLocal(b *testing.B) { + benchmarkWrite(b, "bench-localdisk-server-config.json") +} + +func BenchmarkDiskpacked(b *testing.B) { + benchmarkWrite(b, "bench-diskpacked-server-config.json") +} + +func benchmarkWrite(b *testing.B, cfg string) { + w, err := test.WorldFromConfig(cfg) + if err != nil { + b.Fatalf("could not create server for config: %v\nError: %v", cfg, err) + } + testFile := filepath.Join(w.CamliSourceRoot(), testFileRel) + createTestFile(b, testFile, testFileSize) + defer os.Remove(testFile) + b.ResetTimer() + b.StopTimer() + for i := 0; i < b.N; i++ { + err = w.Start() + if err != nil { + b.Fatalf("could not start server for config: %v\nError: %v", cfg, err) + } + b.StartTimer() + test.MustRunCmd(b, w.Cmd("camput", "file", testFile)) + b.StopTimer() + w.Stop() + } + + b.SetBytes(int64(testFileSize)) +} + +func createTestFile(tb testing.TB, file string, n int) { + f, err := os.Create(file) + if err != nil { + tb.Fatal(err) + } + w := bufio.NewWriter(f) + tot := 0 + var b [8]byte + for tot < n { + c := rand.Int63() + b = [8]byte{ + byte(c), + byte(c >> 8), + byte(c >> 16), + byte(c >> 24), + byte(c >> 32), + byte(c >> 40), + byte(c >> 48), + byte(c >> 56), + } + wn, err := w.Write(b[:]) + if err != nil { + tb.Fatal(err) + } + if wn < len(b) { + tb.Fatalf("short write, got %d expected %d", wn, len(b)) + } + tot += wn + } + if err := w.Flush(); err != nil { + tb.Fatal(err) + } + if err := f.Close(); err != nil { + tb.Fatal(err) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/integration/integration.go b/vendor/github.com/camlistore/camlistore/pkg/test/integration/integration.go new file mode 100644 index 00000000..a4dd88ba --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/integration/integration.go @@ -0,0 +1,3 @@ +package integration + +// Dummy stub file. Required as of Go tip (pre-Go 1.3)? diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/integration/non-utf8_test.go b/vendor/github.com/camlistore/camlistore/pkg/test/integration/non-utf8_test.go new file mode 100644 index 00000000..5e1b80c1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/integration/non-utf8_test.go @@ -0,0 +1,121 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "bytes" + "encoding/hex" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "camlistore.org/pkg/test" +) + +var nonUTF8 = "416c697ae965202d204d6f69204c6f6c6974612e6d7033" // hex-encoding + +func tempDir(t *testing.T) (path string, cleanup func()) { + path, err := ioutil.TempDir("", "camtest-") + if err != nil { + t.Fatalf("ioutil.TempDir(): %v", err) + } + + cleanup = func() { + os.RemoveAll(path) + } + + return +} + +// Test that we can camput and camget a file whose name is not utf8, +// that we don't panic in the process and that the results are +// correct. +func TestNonUTF8FileName(t *testing.T) { + srcDir, cleanup := tempDir(t) + defer cleanup() + + base, err := hex.DecodeString(nonUTF8) + if err != nil { + t.Fatalf("hex.DecodeString(): %v", err) + } + + fd, err := os.Create(filepath.Join(srcDir, string(base))) + if err != nil { + t.Fatalf("os.Create(): %v", err) + } + fd.Close() + + w := test.GetWorld(t) + out := test.MustRunCmd(t, w.Cmd("camput", "file", fd.Name())) + br := strings.Split(out, "\n")[0] + + // camput was a success. Can we get the file back in another directory? + dstDir, cleanup := tempDir(t) + defer cleanup() + + _ = test.MustRunCmd(t, w.Cmd("camget", "-o", dstDir, br)) + _, err = os.Lstat(filepath.Join(dstDir, string(base))) + if err != nil { + t.Fatalf("Failed to stat file %s in directory %s", + fd.Name(), dstDir) + } +} + +// Test that we can camput and camget a symbolic link whose target is +// not utf8, that we do no panic in the process and that the results +// are correct. +func TestNonUTF8SymlinkTarget(t *testing.T) { + srcDir, cleanup := tempDir(t) + defer cleanup() + + base, err := hex.DecodeString(nonUTF8) + if err != nil { + t.Fatalf("hex.DecodeString(): %v", err) + } + + fd, err := os.Create(filepath.Join(srcDir, string(base))) + if err != nil { + t.Fatalf("os.Create(): %v", err) + } + defer fd.Close() + + err = os.Symlink(string(base), filepath.Join(srcDir, "link")) + if err != nil { + t.Fatalf("os.Symlink(): %v", err) + } + + w := test.GetWorld(t) + out := test.MustRunCmd(t, w.Cmd("camput", "file", filepath.Join(srcDir, "link"))) + br := strings.Split(out, "\n")[0] + + // See if we can camget it back correctly + dstDir, cleanup := tempDir(t) + defer cleanup() + + _ = test.MustRunCmd(t, w.Cmd("camget", "-o", dstDir, br)) + target, err := os.Readlink(filepath.Join(dstDir, "link")) + if err != nil { + t.Fatalf("os.Readlink(): %v", err) + } + + if !bytes.Equal([]byte(target), base) { + t.Fatalf("Retrieved symlink contains points to unexpected target") + } + +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/integration/share_test.go b/vendor/github.com/camlistore/camlistore/pkg/test/integration/share_test.go new file mode 100644 index 00000000..06c38a66 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/integration/share_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "camlistore.org/pkg/test" +) + +func TestFileSharing(t *testing.T) { + share(t, "share_test.go") +} + +func TestDirSharing(t *testing.T) { + share(t, filepath.FromSlash("../integration")) +} + +func share(t *testing.T, file string) { + w := test.GetWorld(t) + out := test.MustRunCmd(t, w.Cmd("camput", "file", file)) + fileRef := strings.Split(out, "\n")[0] + + out = test.MustRunCmd(t, w.Cmd("camput", "share", "-transitive", fileRef)) + shareRef := strings.Split(out, "\n")[0] + + testDir, err := ioutil.TempDir("", "camli-share-test-") + if err != nil { + t.Fatalf("ioutil.TempDir(): %v", err) + } + defer os.RemoveAll(testDir) + + // test that we can get it through the share + test.MustRunCmd(t, w.Cmd("camget", "-o", testDir, "-shared", fmt.Sprintf("%v/share/%v", w.ServerBaseURL(), shareRef))) + filePath := filepath.Join(testDir, filepath.Base(file)) + fi, err := os.Stat(filePath) + if err != nil { + t.Fatalf("camget -shared failed to get %v: %v", file, err) + } + if fi.IsDir() { + // test that we also get the dir contents + d, err := os.Open(filePath) + if err != nil { + t.Fatal(err) + } + defer d.Close() + names, err := d.Readdirnames(-1) + if err != nil { + t.Fatal(err) + } + if len(names) == 0 { + t.Fatalf("camget did not fetch contents of directory %v", file) + } + } + + // test that we're not allowed to get it directly + fileURL := fmt.Sprintf("%v/share/%v", w.ServerBaseURL(), fileRef) + _, err = test.RunCmd(w.Cmd("camget", "-shared", fileURL)) + if err == nil { + t.Fatal("Was expecting error for 'camget -shared " + fileURL + "'") + } + if !strings.Contains(err.Error(), "client: got status code 401") { + t.Fatalf("'camget -shared %v': got error %v, was expecting 401", fileURL, err) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/integration/z_test.go b/vendor/github.com/camlistore/camlistore/pkg/test/integration/z_test.go new file mode 100644 index 00000000..0f657d3d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/integration/z_test.go @@ -0,0 +1,32 @@ +/* +Copyright 2013 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "testing" + + "camlistore.org/pkg/test" +) + +// Make sure that the camlistored process started +// by the World gets terminated when all the tests +// are done. +// This works only as long as TestZLastTest is the +// last test to run in the package. +func TestZLastTest(t *testing.T) { + test.GetWorldMaybe(t).Stop() +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/loader.go b/vendor/github.com/camlistore/camlistore/pkg/test/loader.go new file mode 100644 index 00000000..1415b8af --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/loader.go @@ -0,0 +1,100 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "errors" + "log" + "strings" + "sync" + + "camlistore.org/pkg/blobserver" +) + +// NewLoader +func NewLoader() *Loader { + return &Loader{} +} + +type Loader struct { + mu sync.Mutex + sto map[string]blobserver.Storage +} + +var _ blobserver.Loader = (*Loader)(nil) + +func (ld *Loader) FindHandlerByType(handlerType string) (prefix string, handler interface{}, err error) { + panic("NOIMPL") +} + +func (ld *Loader) AllHandlers() (map[string]string, map[string]interface{}) { + panic("NOIMPL") +} + +func (ld *Loader) MyPrefix() string { + return "/lies/" +} + +func (ld *Loader) BaseURL() string { + return "http://localhost:1234" +} + +func (ld *Loader) GetHandlerType(prefix string) string { + log.Printf("test.Loader: GetHandlerType called but not implemented.") + return "" +} + +func (ld *Loader) GetHandler(prefix string) (interface{}, error) { + log.Printf("test.Loader: GetHandler called but not implemented.") + return nil, errors.New("doesn't exist") +} + +func (ld *Loader) SetStorage(prefix string, s blobserver.Storage) { + ld.mu.Lock() + defer ld.mu.Unlock() + if ld.sto == nil { + ld.sto = make(map[string]blobserver.Storage) + } + ld.sto[prefix] = s +} + +func (ld *Loader) GetStorage(prefix string) (blobserver.Storage, error) { + ld.mu.Lock() + defer ld.mu.Unlock() + if bs, ok := ld.sto[prefix]; ok { + return bs, nil + } + if ld.sto == nil { + ld.sto = make(map[string]blobserver.Storage) + } + sto, err := ld.genStorage(prefix) + if err != nil { + return nil, err + } + ld.sto[prefix] = sto + return sto, nil +} + +func (ld *Loader) genStorage(prefix string) (blobserver.Storage, error) { + if strings.HasPrefix(prefix, "/good") { + return &Fetcher{}, nil + } + if strings.HasPrefix(prefix, "/fail") { + return &Fetcher{ReceiveErr: errors.New("test.Loader intentional failure for /fail storage handler")}, nil + } + panic("test.Loader.GetStorage: unrecognized prefix type") +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/test.go b/vendor/github.com/camlistore/camlistore/pkg/test/test.go new file mode 100644 index 00000000..19e4ff8b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/test.go @@ -0,0 +1,70 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "log" + "os" + "strconv" + "strings" + "testing" +) + +// BrokenTest marks the test as broken and calls t.Skip, unless the environment +// variable RUN_BROKEN_TESTS is set to 1 (or some other boolean true value). +func BrokenTest(t *testing.T) { + if v, _ := strconv.ParseBool(os.Getenv("RUN_BROKEN_TESTS")); !v { + t.Skipf("Skipping broken tests without RUN_BROKEN_TESTS=1") + } +} + +// TLog changes the log package's output to log to t and returns a function +// to reset it back to stderr. +func TLog(t testing.TB) func() { + log.SetOutput(twriter{t: t}) + return func() { + log.SetOutput(os.Stderr) + } +} + +type twriter struct { + t testing.TB + quietPhrases []string +} + +func (w twriter) Write(p []byte) (n int, err error) { + if len(w.quietPhrases) > 0 { + s := string(p) + for _, phrase := range w.quietPhrases { + if strings.Contains(s, phrase) { + return len(p), nil + } + } + } + if w.t != nil { + w.t.Log(strings.TrimSuffix(string(p), "\n")) + } + return len(p), nil +} + +// NewLogger returns a logger that logs to t with the given prefix. +// +// The optional quietPhrases are substrings to match in writes to +// determine whether those log messages are muted. +func NewLogger(t *testing.T, prefix string, quietPhrases ...string) *log.Logger { + return log.New(twriter{t: t, quietPhrases: quietPhrases}, prefix, log.LstdFlags) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/test_test.go b/vendor/github.com/camlistore/camlistore/pkg/test/test_test.go new file mode 100644 index 00000000..169f6330 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/test_test.go @@ -0,0 +1,56 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test_test + +import ( + "log" + "reflect" + "testing" + + "camlistore.org/pkg/index" + . "camlistore.org/pkg/test" +) + +var _ index.Interface = (*FakeIndex)(nil) + +type tbLogger struct { + testing.TB + log []string +} + +func (l *tbLogger) Log(args ...interface{}) { + l.log = append(l.log, args[0].(string)) +} + +func TestTLog(t *testing.T) { + tb := new(tbLogger) + defer TLog(tb)() + defer log.SetFlags(log.Flags()) + log.SetFlags(0) + + log.Printf("hello") + log.Printf("hello\n") + log.Printf("some text\nand more text\n") + want := []string{ + "hello", + "hello", + "some text\nand more text", + } + if !reflect.DeepEqual(tb.log, want) { + t.Errorf("Got %q; want %q", tb.log, want) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/testdata/bench-diskpacked-server-config.json b/vendor/github.com/camlistore/camlistore/pkg/test/testdata/bench-diskpacked-server-config.json new file mode 100644 index 00000000..7a26fe00 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/testdata/bench-diskpacked-server-config.json @@ -0,0 +1,20 @@ +{ + "handlerConfig": true, + "auth": "userpass:testuser:passTestWorld:+localhost", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/bs/": { + "handler": "storage-diskpacked", + "handlerArgs": { + "path": ["_env", "${CAMLI_ROOT}"] + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/testdata/bench-localdisk-server-config.json b/vendor/github.com/camlistore/camlistore/pkg/test/testdata/bench-localdisk-server-config.json new file mode 100644 index 00000000..e3d45c32 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/testdata/bench-localdisk-server-config.json @@ -0,0 +1,20 @@ +{ + "handlerConfig": true, + "auth": "userpass:testuser:passTestWorld:+localhost", + "https": false, + "listen": "localhost:3179", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "blobRoot": "/bs/" + } + }, + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": ["_env", "${CAMLI_ROOT}"] + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/testdata/server-config.json b/vendor/github.com/camlistore/camlistore/pkg/test/testdata/server-config.json new file mode 100644 index 00000000..9ab2ea9b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/testdata/server-config.json @@ -0,0 +1,90 @@ +{ "_for-emacs": "-*- mode: js2;-*-", + "handlerConfig": true, + "https": false, + "baseURL": ["_env", "${CAMLI_BASE_URL}"], + "auth": "userpass:testuser:passTestWorld:+localhost", + "prefixes": { + "/": { + "handler": "root", + "handlerArgs": { + "ownerName": "test", + "blobRoot": "/bs-and-maybe-also-index/", + "helpRoot": "/help/", + "statusRoot": "/status/", + "searchRoot": "/my-search/", + "stealth": false + } + }, + + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": ["/bs/", "/index-mem/"] + } + }, + + "/no-http-storage/": { + "internal": true, + "handler": "storage-replica", + "handlerArgs": { + "backends": ["/bs/"] + } + }, + + "/no-http-handler/": { + "internal": true, + "handler": "status" + }, + + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "write": { + "if": "isSchema", + "then": "/bs-and-index/", + "else": "/bs/" + }, + "read": "/bs/" + } + }, + + "/bs/": { + "handler": "storage-filesystem", + "handlerArgs": { + "path": ["_env", "${CAMLI_ROOT}"] + } + }, + + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "secretRing": ["_env", "${CAMLI_SECRET_RING}"], + "keyId": "26F5ABDA", + "publicKeyDest": "/bs/" + } + }, + + "/index-mem/": { + "handler": "storage-memory-only-dev-indexer", + "handlerArgs": { + "blobSource": "/bs/" + } + }, + + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/index-mem/", + "slurpToMemory": true, + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4" + } + }, + + "/share/": { + "handler": "share", + "handlerArgs": { + "blobRoot": "/bs/" + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/testdep.go b/vendor/github.com/camlistore/camlistore/pkg/test/testdep.go new file mode 100644 index 00000000..7e693231 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/testdep.go @@ -0,0 +1,34 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "os" + "strconv" + "testing" +) + +// DependencyErrorOrSkip is called when a test's dependency +// isn't found. It either skips the current test (if SKIP_DEP_TESTS is set), +// or calls t.Error with an error. +func DependencyErrorOrSkip(t *testing.T) { + b, _ := strconv.ParseBool(os.Getenv("SKIP_DEP_TESTS")) + if b { + t.Skip("SKIP_DEP_TESTS is set; skipping test.") + } + t.Error("External test dependencies not found, and environment SKIP_DEP_TESTS not set.") +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/wait.go b/vendor/github.com/camlistore/camlistore/pkg/test/wait.go new file mode 100644 index 00000000..a26faa30 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/wait.go @@ -0,0 +1,33 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import "time" + +// WaitFor returns true if condition returns true before maxWait. +// It is checked immediately, and then every checkInterval. +func WaitFor(condition func() bool, maxWait, checkInterval time.Duration) bool { + t0 := time.Now() + tmax := t0.Add(maxWait) + for time.Now().Before(tmax) { + if condition() { + return true + } + time.Sleep(checkInterval) + } + return false +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/test/world.go b/vendor/github.com/camlistore/camlistore/pkg/test/world.go new file mode 100644 index 00000000..b3c4b79f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/test/world.go @@ -0,0 +1,377 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync/atomic" + "testing" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/osutil" +) + +// World defines an integration test world. +// +// It's used to run the actual Camlistore binaries (camlistored, +// camput, camget, camtool, etc) together in large tests, including +// building them, finding them, and wiring them up in an isolated way. +type World struct { + camRoot string // typically $GOPATH[0]/src/camlistore.org + config string // server config file relative to pkg/test/testdata + tempDir string + listener net.Listener // randomly chosen 127.0.0.1 port for the server + port int + + server *exec.Cmd + isRunning int32 // state of the camlistored server. Access with sync/atomic only. + serverErr error + + cammount *os.Process +} + +// CamliSourceRoot returns the root of the source tree, or an error. +func camliSourceRoot() (string, error) { + if os.Getenv("GOPATH") == "" { + return "", errors.New("GOPATH environment variable isn't set; required to run Camlistore integration tests") + } + root, err := osutil.GoPackagePath("camlistore.org") + if err == os.ErrNotExist { + return "", errors.New("Directory \"camlistore.org\" not found under GOPATH/src; can't run Camlistore integration tests.") + } + return root, nil +} + +// NewWorld returns a new test world. +// It requires that GOPATH is set to find the "camlistore.org" root. +func NewWorld() (*World, error) { + return WorldFromConfig("server-config.json") +} + +// WorldFromConfig returns a new test world based on the given configuration file. +// This cfg is the server config relative to pkg/test/testdata. +// It requires that GOPATH is set to find the "camlistore.org" root. +func WorldFromConfig(cfg string) (*World, error) { + root, err := camliSourceRoot() + if err != nil { + return nil, err + } + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + + return &World{ + camRoot: root, + config: cfg, + listener: ln, + port: ln.Addr().(*net.TCPAddr).Port, + }, nil +} + +func (w *World) Addr() string { + return w.listener.Addr().String() +} + +// CamliSourceRoot returns the root of the source tree. +func (w *World) CamliSourceRoot() string { + return w.camRoot +} + +// Start builds the Camlistore binaries and starts a server. +func (w *World) Start() error { + var err error + w.tempDir, err = ioutil.TempDir("", "camlistore-test-") + if err != nil { + return err + } + // Build. + { + targs := []string{ + "camget", + "camput", + "camtool", + "camlistored", + } + // TODO(mpl): investigate why we still rebuild camlistored everytime if run through devcam test. + // it looks like it's because we always resync the UI files and hence redo the embeds. Next CL. + var latestModtime time.Time + for _, target := range targs { + binPath := filepath.Join(w.camRoot, "bin", target) + fi, err := os.Stat(binPath) + if err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("could not stat %v: %v", binPath, err) + } + } else { + modTime := fi.ModTime() + if modTime.After(latestModtime) { + latestModtime = modTime + } + } + } + cmd := exec.Command("go", "run", "make.go", + fmt.Sprintf("--if_mods_since=%d", latestModtime.Unix()), + ) + if testing.Verbose() { + // TODO(mpl): do the same when -verbose with devcam test. Even better: see if testing.Verbose + // can be made true if devcam test -verbose ? + cmd.Args = append(cmd.Args, "-v=true") + } + cmd.Dir = w.camRoot + log.Print("Running make.go to build camlistore binaries for testing...") + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Error building world: %v, %s", err, string(out)) + } + if testing.Verbose() { + log.Printf("%s\n", out) + } + log.Print("Ran make.go.") + } + // Start camlistored. + { + w.server = exec.Command( + filepath.Join(w.camRoot, "bin", "camlistored"), + "--openbrowser=false", + "--configfile="+filepath.Join(w.camRoot, "pkg", "test", "testdata", w.config), + "--listen=FD:3", + "--pollparent=true", + ) + var buf bytes.Buffer + if testing.Verbose() { + w.server.Stdout = os.Stdout + w.server.Stderr = os.Stderr + } else { + w.server.Stdout = &buf + w.server.Stderr = &buf + } + w.server.Dir = w.tempDir + w.server.Env = append(os.Environ(), + "CAMLI_DEBUG=1", + "CAMLI_ROOT="+w.tempDir, + "CAMLI_SECRET_RING="+filepath.Join(w.camRoot, filepath.FromSlash("pkg/jsonsign/testdata/test-secring.gpg")), + "CAMLI_BASE_URL=http://127.0.0.1:"+strconv.Itoa(w.port), + ) + listenerFD, err := w.listener.(*net.TCPListener).File() + if err != nil { + return err + } + w.server.ExtraFiles = []*os.File{listenerFD} + if err := w.server.Start(); err != nil { + w.serverErr = fmt.Errorf("starting camlistored: %v", err) + return w.serverErr + } + + atomic.StoreInt32(&w.isRunning, 1) + waitc := make(chan error, 1) + go func() { + err := w.server.Wait() + w.serverErr = fmt.Errorf("%v: %s", err, buf.String()) + atomic.StoreInt32(&w.isRunning, 0) + waitc <- w.serverErr + }() + upc := make(chan bool) + timeoutc := make(chan bool) + go func() { + for i := 0; i < 100; i++ { + res, err := http.Get("http://127.0.0.1:" + strconv.Itoa(w.port)) + if err == nil { + res.Body.Close() + upc <- true + return + } + time.Sleep(50 * time.Millisecond) + } + w.serverErr = errors.New(buf.String()) + atomic.StoreInt32(&w.isRunning, 0) + timeoutc <- true + }() + + select { + case <-waitc: + return fmt.Errorf("server exited: %v", w.serverErr) + case <-timeoutc: + return fmt.Errorf("server never became reachable: %v", w.serverErr) + case <-upc: + if err := w.Ping(); err != nil { + return err + } + // Success. + } + } + return nil +} + +// Ping returns an error if the world's camlistored is not running. +func (w *World) Ping() error { + if atomic.LoadInt32(&w.isRunning) != 1 { + return fmt.Errorf("camlistored not running: %v", w.serverErr) + } + return nil +} + +func (w *World) Stop() { + if w == nil { + return + } + if err := w.server.Process.Kill(); err != nil { + log.Fatalf("killed failed: %v", err) + } + + if d := w.tempDir; d != "" { + os.RemoveAll(d) + } +} + +func (w *World) NewPermanode(t *testing.T) blob.Ref { + if err := w.Ping(); err != nil { + t.Fatal(err) + } + out := MustRunCmd(t, w.Cmd("camput", "permanode")) + br, ok := blob.Parse(strings.TrimSpace(out)) + if !ok { + t.Fatalf("Expected permanode in camput stdout; got %q", out) + } + return br +} + +func (w *World) Cmd(binary string, args ...string) *exec.Cmd { + return w.CmdWithEnv(binary, os.Environ(), args...) +} + +func (w *World) CmdWithEnv(binary string, env []string, args ...string) *exec.Cmd { + hasVerbose := func() bool { + for _, v := range args { + if v == "-verbose" || v == "--verbose" { + return true + } + } + return false + } + var cmd *exec.Cmd + switch binary { + case "camget", "camput", "camtool", "cammount": + // TODO(mpl): lift the camput restriction when we have a unified logging mechanism + if binary == "camput" && !hasVerbose() { + // camput and camtool are the only ones to have a -verbose flag through cmdmain + // but camtool is never used. (and cammount does not even have a -verbose). + args = append([]string{"-verbose"}, args...) + } + cmd = exec.Command(filepath.Join(w.camRoot, "bin", binary), args...) + clientConfigDir := filepath.Join(w.camRoot, "config", "dev-client-dir") + cmd.Env = append([]string{ + "CAMLI_CONFIG_DIR=" + clientConfigDir, + // Respected by env expansions in config/dev-client-dir/client-config.json: + "CAMLI_SERVER=" + w.ServerBaseURL(), + "CAMLI_SECRET_RING=" + w.SecretRingFile(), + "CAMLI_KEYID=" + w.ClientIdentity(), + "CAMLI_AUTH=userpass:testuser:passTestWorld", + }, env...) + default: + panic("Unknown binary " + binary) + } + return cmd +} + +func (w *World) ServerBaseURL() string { + return fmt.Sprintf("http://127.0.0.1:%d", w.port) +} + +var theWorld *World + +// GetWorld returns (creating if necessary) a test singleton world. +// It calls Fatal on the provided test if there are problems. +func GetWorld(t *testing.T) *World { + w := theWorld + if w == nil { + var err error + w, err = NewWorld() + if err != nil { + t.Fatalf("Error finding test world: %v", err) + } + err = w.Start() + if err != nil { + t.Fatalf("Error starting test world: %v", err) + } + theWorld = w + } + return w +} + +// GetWorldMaybe returns the current World. It might be nil. +func GetWorldMaybe(t *testing.T) *World { + return theWorld +} + +// RunCmd runs c (which is assumed to be something short-lived, like a +// camput or camget command), capturing its stdout for return, and +// also capturing its stderr, just in the case of errors. +// If there's an error, the return error fully describes the command and +// all output. +func RunCmd(c *exec.Cmd) (output string, err error) { + var stdout, stderr bytes.Buffer + if testing.Verbose() { + c.Stderr = io.MultiWriter(os.Stderr, &stderr) + c.Stdout = io.MultiWriter(os.Stdout, &stdout) + } else { + c.Stderr = &stderr + c.Stdout = &stdout + } + err = c.Run() + if err != nil { + return "", fmt.Errorf("Error running command %+v: Stdout:\n%s\nStderr:\n%s\n", c, stdout.String(), stderr.String()) + } + return stdout.String(), nil +} + +// MustRunCmd wraps RunCmd, failing t if RunCmd returns an error. +func MustRunCmd(t testing.TB, c *exec.Cmd) string { + out, err := RunCmd(c) + if err != nil { + t.Fatal(err) + } + return out +} + +// ClientIdentity returns the GPG identity to use in World tests, suitable +// for setting in CAMLI_KEYID. +func (w *World) ClientIdentity() string { + return "26F5ABDA" +} + +// SecretRingFile returns the GnuPG secret ring, suitable for setting +// in CAMLI_SECRET_RING. +func (w *World) SecretRingFile() string { + return filepath.Join(w.camRoot, "pkg", "jsonsign", "testdata", "test-secring.gpg") +} + +// SearchHandlerPath returns the path to the search handler, with trailing slash. +func (w *World) SearchHandlerPath() string { return "/my-search/" } diff --git a/vendor/github.com/camlistore/camlistore/pkg/throttle/throttle.go b/vendor/github.com/camlistore/camlistore/pkg/throttle/throttle.go new file mode 100644 index 00000000..5c697234 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/throttle/throttle.go @@ -0,0 +1,137 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package throttle provides a net.Listener that returns +// artificially-delayed connections for testing real-world +// connectivity. +package throttle + +import ( + "fmt" + "net" + "sync" + "time" +) + +const unitSize = 1400 // read/write chunk size. ~MTU size. + +type Rate struct { + KBps int // or 0, to not rate-limit bandwidth + Latency time.Duration +} + +// byteTime returns the time required for n bytes. +func (r Rate) byteTime(n int) time.Duration { + if r.KBps == 0 { + return 0 + } + return time.Duration(float64(n)/1024/float64(r.KBps)) * time.Second +} + +type Listener struct { + net.Listener + Down Rate // server Writes to Client + Up Rate // server Reads from client +} + +func (ln *Listener) Accept() (net.Conn, error) { + c, err := ln.Listener.Accept() + time.Sleep(ln.Up.Latency) + if err != nil { + return nil, err + } + tc := &conn{Conn: c, Down: ln.Down, Up: ln.Up} + tc.start() + return tc, nil +} + +type nErr struct { + n int + err error +} + +type writeReq struct { + writeAt time.Time + p []byte + resc chan nErr +} + +type conn struct { + net.Conn + Down Rate // for reads + Up Rate // for writes + + wchan chan writeReq + closeOnce sync.Once + closeErr error +} + +func (c *conn) start() { + c.wchan = make(chan writeReq, 1024) + go c.writeLoop() +} + +func (c *conn) writeLoop() { + for req := range c.wchan { + time.Sleep(req.writeAt.Sub(time.Now())) + var res nErr + for len(req.p) > 0 && res.err == nil { + writep := req.p + if len(writep) > unitSize { + writep = writep[:unitSize] + } + n, err := c.Conn.Write(writep) + time.Sleep(c.Up.byteTime(len(writep))) + res.n += n + res.err = err + req.p = req.p[n:] + } + req.resc <- res + } +} + +func (c *conn) Close() error { + c.closeOnce.Do(func() { + err := c.Conn.Close() + close(c.wchan) + c.closeErr = err + }) + return c.closeErr +} + +func (c *conn) Write(p []byte) (n int, err error) { + defer func() { + if e := recover(); e != nil { + n = 0 + err = fmt.Errorf("%v", err) + return + } + }() + resc := make(chan nErr, 1) + c.wchan <- writeReq{time.Now().Add(c.Up.Latency), p, resc} + res := <-resc + return res.n, res.err +} + +func (c *conn) Read(p []byte) (n int, err error) { + const max = 1024 + if len(p) > max { + p = p[:max] + } + n, err = c.Conn.Read(p) + time.Sleep(c.Down.byteTime(n)) + return +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/types/atomics.go b/vendor/github.com/camlistore/camlistore/pkg/types/atomics.go new file mode 100644 index 00000000..27669527 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/types/atomics.go @@ -0,0 +1,55 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "sync/atomic" +) + +// AtomicBool is an atomic boolean. +// It can be accessed from concurrent goroutines. +type AtomicBool struct { + v uint32 // 0 or 1, atomically +} + +func (b *AtomicBool) Get() bool { + return atomic.LoadUint32(&b.v) != 0 +} + +func (b *AtomicBool) Set(v bool) { + if v { + atomic.StoreUint32(&b.v, 1) + return + } + atomic.StoreUint32(&b.v, 0) +} + +type AtomicInt64 struct { + v int64 +} + +func (a *AtomicInt64) Get() int64 { + return atomic.LoadInt64(&a.v) +} + +func (a *AtomicInt64) Set(v int64) { + atomic.StoreInt64(&a.v, v) +} + +func (a *AtomicInt64) Add(delta int64) int64 { + return atomic.AddInt64(&a.v, delta) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/camtypes.go b/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/camtypes.go new file mode 100644 index 00000000..438a9930 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/camtypes.go @@ -0,0 +1,20 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package camtypes is like the types package, but higher-level and contains +// Camlistore-specific types. It exists mostly to break circular dependencies +// between index, search, and schema. +package camtypes diff --git a/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/discovery.go b/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/discovery.go new file mode 100644 index 00000000..471b25e3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/discovery.go @@ -0,0 +1,103 @@ +/* +Copyright 2015 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package camtypes + +import ( + "camlistore.org/pkg/blob" + "camlistore.org/pkg/types" +) + +// Discovery is the JSON response for discovery requests. +type Discovery struct { + BlobRoot string `json:"blobRoot"` + JSONSignRoot string `json:"jsonSignRoot"` + HelpRoot string `json:"helpRoot"` + ImporterRoot string `json:"importerRoot"` + SearchRoot string `json:"searchRoot"` + StatusRoot string `json:"statusRoot"` + + OwnerName string `json:"ownerName"` // Name of the owner. + UserName string `json:"userName"` // Name of the user. + + // StorageGeneration is the UUID for the storage generation. + StorageGeneration string `json:"storageGeneration,omitempty"` + // StorageGenerationError is the error that occurred on generating the storage, if any. + StorageGenerationError string `json:"storageGenerationError,omitempty"` + // StorageInitTime is the initialization time of the storage. + StorageInitTime types.Time3339 `json:"storageInitTime,omitempty"` + + ThumbVersion string `json:"thumbVersion"` // Thumbnailing version. + WSAuthToken string `json:"wsAuthToken"` // Authentication token for the WebSocket. + + // SyncHandlers lists discovery information about the available sync handlers. + SyncHandlers []SyncHandlerDiscovery `json:"syncHanlders,omitempty"` + // Signing contains discovery information for signing. + Signing *SignDiscovery `json:"signing,omitempty"` + // UIDiscovery contains discovery information for the UI. + *UIDiscovery +} + +// SignDiscovery contains discovery information for jsonsign. +// It is part of the server's JSON response for discovery requests. +type SignDiscovery struct { + // PublicKey is the path to the public signing key. + PublicKey string `json:"publicKey,omitempty"` + // PublicKeyBlobRef is the blob.Ref for the public key. + PublicKeyBlobRef blob.Ref `json:"publicKeyBlobRef,omitempty"` + // PublicKeyID is the ID of the public key. + PublicKeyID string `json:"publicKeyId"` + // SignHandler is the URL path prefix to the signing handler. + SignHandler string `json:"signHandler"` + // VerifyHandler it the URL path prefix to the signature verification handler. + VerifyHandler string `json:"verifyHandler"` +} + +// SyncHandlerDiscovery contains discovery information about a sync handler. +// It is part of the JSON response to discovery requests. +type SyncHandlerDiscovery struct { + // From is the source of the sync handler. + From string `json:"from"` + // To is the destination of the sync handler. + To string `json:"to"` + // ToIndex is true if the sync is from a blob storage to an index. + ToIndex bool `json:"toIndex"` +} + +// UIDiscovery contains discovery information for the user interface. +// It is part of the JSON response to discovery requests. +type UIDiscovery struct { + // UIRoot is the URL prefix path to the UI handler. + UIRoot string `json:"uiRoot"` + // UploadHelper is the path to the upload helper. + UploadHelper string `json:"uploadHelper"` + // DirectoryHelper is the path to the directory helper. + DirectoryHelper string `json:"directoryHelper"` + // DownloaderHelper is the path to the downloader helper. + DownloadHelper string `json:"downloadHelper"` + // PublishRoots lists discovery information for all publishing roots, + // mapped by the respective root name. + PublishRoots map[string]*PublishRootDiscovery `json:"publishRoots"` +} + +// PublishRootDiscovery contains discovery information for the publish roots. +type PublishRootDiscovery struct { + Name string `json:"name"` + // Prefix lists prefixes belonging to the publishing root. + Prefix []string `json:"prefix"` + // CurrentPermanode is the permanode associated with the publishing root. + CurrentPermanode blob.Ref `json:"currentPermanode"` +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/errors.go b/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/errors.go new file mode 100644 index 00000000..0e96fab8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/errors.go @@ -0,0 +1,86 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package camtypes + +import ( + "fmt" + "log" + + "camlistore.org/pkg/osutil" +) + +// TODO(mpl): move pkg/camerrors stuff in here + +var camErrors = map[string]*camErr{} + +var ( + ErrClientNoServer = addCamError("client-no-server", funcStr(func() string { + return fmt.Sprintf("No valid server defined. It can be set with the CAMLI_SERVER environment variable, or the --server flag, or in the \"servers\" section of %q (see https://camlistore.org/docs/client-config).", osutil.UserClientConfigPath()) + })) + ErrClientNoPublicKey = addCamError("client-no-public-key", str("No public key configured: see 'camput init'.")) +) + +type str string + +func (s str) String() string { return string(s) } + +type funcStr func() string + +func (f funcStr) String() string { return f() } + +type camErr struct { + key string + des fmt.Stringer +} + +func (ce *camErr) Error() string { + return ce.des.String() +} + +func (ce *camErr) Fatal() { + log.Fatalf("%v error. See %v", ce.key, ce.URL()) +} + +func (ce *camErr) Warn() { + log.Printf("%v error. See %v.", ce.key, ce.URL()) +} + +func (ce *camErr) URL() string { + return fmt.Sprintf("https://camlistore.org/err/%s", ce.key) +} + +// Err returns the error registered for key. +// It panics for an unregistered key. +func Err(key string) error { + v, ok := camErrors[key] + if !ok { + panic(fmt.Sprintf("unknown/unregistered error key %v", key)) + } + return v +} + +func addCamError(key string, des fmt.Stringer) *camErr { + if e, ok := camErrors[key]; ok { + panic(fmt.Sprintf("error %v already registered as %q", key, e.Error())) + } + e := &camErr{ + key: key, + des: des, + } + camErrors[key] = e + return e +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/search.go b/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/search.go new file mode 100644 index 00000000..c132680f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/search.go @@ -0,0 +1,254 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package camtypes + +import ( + "bytes" + "fmt" + "path/filepath" + "strings" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/types" +) + +type RecentPermanode struct { + Permanode blob.Ref + Signer blob.Ref // may be zero (!Valid()) + LastModTime time.Time +} + +func (a RecentPermanode) Equal(b RecentPermanode) bool { + return a.Permanode == b.Permanode && + a.Signer == b.Signer && + a.LastModTime.Equal(b.LastModTime) +} + +type Claim struct { + // TODO: document/decide how to represent "multi" claims here. One Claim each? Add Multi in here? + // Move/merge this in with the schema package? + + BlobRef, Signer blob.Ref + + Date time.Time + Type string // "set-attribute", "add-attribute", etc + + // If an attribute modification + Attr, Value string + Permanode blob.Ref + + // If a DeleteClaim or a ShareClaim + Target blob.Ref +} + +func (c *Claim) String() string { + return fmt.Sprintf( + "camtypes.Claim{BlobRef: %s, Signer: %s, Permanode: %s, Date: %s, Type: %s, Attr: %s, Value: %s}", + c.BlobRef, c.Signer, c.Permanode, c.Date, c.Type, c.Attr, c.Value) +} + +type ClaimPtrsByDate []*Claim + +func (cl ClaimPtrsByDate) Len() int { return len(cl) } +func (cl ClaimPtrsByDate) Less(i, j int) bool { return cl[i].Date.Before(cl[j].Date) } +func (cl ClaimPtrsByDate) Swap(i, j int) { cl[i], cl[j] = cl[j], cl[i] } + +type ClaimsByDate []Claim + +func (cl ClaimsByDate) Len() int { return len(cl) } +func (cl ClaimsByDate) Less(i, j int) bool { return cl[i].Date.Before(cl[j].Date) } +func (cl ClaimsByDate) Swap(i, j int) { cl[i], cl[j] = cl[j], cl[i] } + +func (cl ClaimsByDate) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "[%d claims: ", len(cl)) + for _, r := range cl { + buf.WriteString(r.String()) + } + buf.WriteString("]") + return buf.String() +} + +// FileInfo describes a file or directory. +type FileInfo struct { + // FileName is the base name of the file or directory. + FileName string `json:"fileName"` + + // TODO(mpl): I've noticed that Size is actually set to the + // number of entries in the dir. fix the doc or the behaviour? + + // Size is the size of file. It is not set for directories. + Size int64 `json:"size"` + + // MIMEType may be set for files, but never for directories. + MIMEType string `json:"mimeType,omitempty"` + + // Time is the earliest of any modtime, creation time, or EXIF + // original/modification times found. It may be omitted (zero) + // if unknown. + Time *types.Time3339 `json:"time,omitempty"` + + // ModTime is the latest of any modtime, creation time, or EXIF + // original/modification times found. If ModTime doesn't differ + // from Time, ModTime is omitted (zero). + ModTime *types.Time3339 `json:"modTime,omitempty"` + + // WholeRef is the digest of the entire file contents. + // This will be zero for non-regular files, and may also be zero + // for files above a certain size threshold. + WholeRef blob.Ref `json:"wholeRef,omitempty"` +} + +func (fi *FileInfo) IsImage() bool { + return strings.HasPrefix(fi.MIMEType, "image/") +} + +var videoExtensions = map[string]bool{ + "3gp": true, + "avi": true, + "flv": true, + "m1v": true, + "m2v": true, + "m4v": true, + "mkv": true, + "mov": true, + "mp4": true, + "mpeg": true, + "mpg": true, + "ogv": true, + "wmv": true, +} + +func (fi *FileInfo) IsVideo() bool { + if strings.HasPrefix(fi.MIMEType, "video/") { + return true + } + + var ext string + if e := filepath.Ext(fi.FileName); strings.HasPrefix(e, ".") { + ext = e[1:] + } else { + return false + } + + // Case-insensitive lookup. + // Optimistically assume a short ASCII extension and be + // allocation-free in that case. + var buf [10]byte + lower := buf[:0] + const utf8RuneSelf = 0x80 // from utf8 package, but not importing it. + for i := 0; i < len(ext); i++ { + c := ext[i] + if c >= utf8RuneSelf { + // Slow path. + return videoExtensions[strings.ToLower(ext)] + } + if 'A' <= c && c <= 'Z' { + lower = append(lower, c+('a'-'A')) + } else { + lower = append(lower, c) + } + } + // The conversion from []byte to string doesn't allocate in + // a map lookup. + return videoExtensions[string(lower)] +} + +// ImageInfo describes an image file. +// +// The Width and Height are uint16s to save memory in index/corpus.go, and that's +// the max size of a JPEG anyway. If we want to deal with larger sizes, we can use +// MaxUint16 as a sentinel to mean to look elsewhere. Or ditch this optimization. +type ImageInfo struct { + // Width is the visible width of the image (after any necessary EXIF rotation). + Width uint16 `json:"width"` + // Height is the visible height of the image (after any necessary EXIF rotation). + Height uint16 `json:"height"` +} + +type Path struct { + Claim, Base, Target blob.Ref + ClaimDate time.Time + Suffix string // ?? +} + +func (p *Path) String() string { + return fmt.Sprintf("Path{Claim: %v, %v; Base: %v + Suffix %q => Target %v}", + p.Claim, p.ClaimDate, p.Base, p.Suffix, p.Target) +} + +type PermanodeByAttrRequest struct { + Signer blob.Ref + + // Attribute to search. currently supported: "tag", "title" + // If FuzzyMatch is set, this can be blank to search all + // attributes. + Attribute string + + // The attribute value to find exactly (or roughly, if + // FuzzyMatch is set) + // If blank, the permanodes with Attribute as an attribute + // (set to any value) are searched. + Query string + + FuzzyMatch bool // by default, an exact match is required + MaxResults int // optional max results +} + +type EdgesToOpts struct { + Max int + // TODO: filter by type? +} + +type Edge struct { + From blob.Ref + FromType string // "permanode", "directory", etc + FromTitle string // name of source permanode or directory + To blob.Ref + BlobRef blob.Ref // the blob responsible for the edge relationship +} + +func (e *Edge) String() string { + return fmt.Sprintf("[edge from:%s to:%s type:%s title:%s]", e.From, e.To, e.FromType, e.FromTitle) +} + +// BlobMeta is the metadata kept for each known blob in the in-memory +// search index. It's kept as small as possible to save memory. +type BlobMeta struct { + Ref blob.Ref + Size uint32 + + // CamliType is non-empty if this blob is a Camlistore JSON + // schema blob. If so, this is its "camliType" attribute. + CamliType string + + // TODO(bradfitz): change CamliTypethis *string to save 8 bytes +} + +// SearchErrorResponse is the JSON error response for a search request. +type SearchErrorResponse struct { + Error string `json:"error,omitempty"` // The error message. + ErrorType string `json:"errorType,omitempty"` // The type of the error. +} + +// FileSearchResponse is the JSON response to a file search request. +type FileSearchResponse struct { + SearchErrorResponse + + Files []blob.Ref `json:"files"` // Refs of the result files. Never nil. +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/search_test.go b/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/search_test.go new file mode 100644 index 00000000..dd009570 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/search_test.go @@ -0,0 +1,42 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package camtypes + +import "testing" + +var fileInfoVideoTable = []struct { + fi *FileInfo + video bool +}{ + {&FileInfo{FileName: "some.mp4", MIMEType: "application/octet-stream"}, true}, + {&FileInfo{FileName: "IMG_1231.MOV", MIMEType: "application/octet-stream"}, true}, + {&FileInfo{FileName: "movie.mkv", MIMEType: "application/octet-stream"}, true}, + {&FileInfo{FileName: "movie.məv", MIMEType: "application/octet-stream"}, false}, + {&FileInfo{FileName: "tape", MIMEType: "video/webm"}, true}, + {&FileInfo{FileName: "tape", MIMEType: "application/ogg"}, false}, + {&FileInfo{FileName: "IMG_12312.jpg", MIMEType: "application/octet-stream"}, false}, + {&FileInfo{FileName: "IMG_12312.jpg", MIMEType: "image/jpeg"}, false}, +} + +func TestIsVideo(t *testing.T) { + for _, example := range fileInfoVideoTable { + if example.fi.IsVideo() != example.video { + t.Errorf("IsVideo failed video=%t filename=%s mimetype=%s", + example.video, example.fi.FileName, example.fi.MIMEType) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/sign.go b/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/sign.go new file mode 100644 index 00000000..d0a17803 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/sign.go @@ -0,0 +1,29 @@ +/* +Copyright 2015 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package camtypes + +// VerifyResponse is the JSON response for a signature verification request. +type VerifyResponse struct { + // SignatureValid is true if the signature is valid. + SignatureValid bool `json:"signatureValid"` + // ErrorMessage contains the error that occurred, if any. + ErrorMessage string `json:"errorMessage,omitempty"` + // SignerKeyId is the ID of the signing key. + SignerKeyId string `json:"signerKeyId,omitempty"` + // VerifiedData contains the JSON values from the payload that we signed. + VerifiedData map[string]interface{} `json:"verifiedData,omitempty"` +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/statustype.go b/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/statustype.go new file mode 100644 index 00000000..74705ab5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/types/camtypes/statustype.go @@ -0,0 +1,22 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package camtypes + +type StatusError struct { + Error string `json:"error"` + URL string `json:"url,omitempty"` // optional +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/types/clientconfig/config.go b/vendor/github.com/camlistore/camlistore/pkg/types/clientconfig/config.go new file mode 100644 index 00000000..53e6fc70 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/types/clientconfig/config.go @@ -0,0 +1,163 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package clientconfig provides types related to the client configuration +// file. +package clientconfig + +import ( + "errors" + "fmt" + "strings" + + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/wkfs" +) + +// Config holds the values from the JSON client config file. +type Config struct { + Servers map[string]*Server `json:"servers"` // maps server alias to server config. + Identity string `json:"identity"` // GPG identity. + IdentitySecretRing string `json:"identitySecretRing,omitempty"` // location of the secret ring file. + IgnoredFiles []string `json:"ignoredFiles,omitempty"` // list of files that camput should ignore. +} + +// Server holds the values specific to each server found in the JSON client +// config file. +type Server struct { + Server string `json:"server"` // server URL (scheme + hostname). + Auth string `json:"auth"` // auth scheme and values (ex: userpass:foo:bar). + IsDefault bool `json:"default,omitempty"` // whether this server is the default one. + TrustedCerts []string `json:"trustedCerts,omitempty"` // list of trusted certificates fingerprints. +} + +// Alias returns the alias of the server from conf that matches server, or the +// empty string if no match. A match means the server from the config is a +// prefix of the input server. The longest match prevails. +func (conf *Config) Alias(server string) string { + longestMatch := "" + serverAlias := "" + for alias, serverConf := range conf.Servers { + if strings.HasPrefix(server, serverConf.Server) { + if len(serverConf.Server) > len(longestMatch) { + longestMatch = serverConf.Server + serverAlias = alias + } + } + } + return serverAlias +} + +// GenerateClientConfig retuns a client configuration which can be used to +// access a server defined by the provided low-level server configuration. +func GenerateClientConfig(serverConfig jsonconfig.Obj) (*Config, error) { + missingConfig := func(param string) (*Config, error) { + return nil, fmt.Errorf("required value for '%s' not found", param) + } + + if serverConfig == nil { + return nil, errors.New("server config is a required parameter") + } + param := "auth" + auth := serverConfig.OptionalString(param, "") + if auth == "" { + return missingConfig(param) + } + + listen := serverConfig.OptionalString("listen", "") + baseURL := serverConfig.OptionalString("baseURL", "") + if listen == "" { + listen = baseURL + } + if listen == "" { + return nil, errors.New("required value for 'listen' or 'baseURL' not found") + } + + https := serverConfig.OptionalBool("https", false) + if !strings.HasPrefix(listen, "http://") && !strings.HasPrefix(listen, "https://") { + if !https { + listen = "http://" + listen + } else { + listen = "https://" + listen + } + } + + param = "httpsCert" + httpsCert := serverConfig.OptionalString(param, "") + if https && httpsCert == "" { + return missingConfig(param) + } + + // TODO(mpl): See if we can detect that the cert is not self-signed,and in + // that case not add it to the trustedCerts + var trustedList []string + if https && httpsCert != "" { + certPEMBlock, err := wkfs.ReadFile(httpsCert) + if err != nil { + return nil, fmt.Errorf("could not read certificate: %v", err) + } + sig, err := httputil.CertFingerprint(certPEMBlock) + if err != nil { + return nil, fmt.Errorf("could not get fingerprints of certificate: %v", err) + } + trustedList = []string{sig} + } + + param = "prefixes" + prefixes := serverConfig.OptionalObject(param) + if len(prefixes) == 0 { + return missingConfig(param) + } + + param = "/sighelper/" + sighelper := prefixes.OptionalObject(param) + if len(sighelper) == 0 { + return missingConfig(param) + } + + param = "handlerArgs" + handlerArgs := sighelper.OptionalObject(param) + if len(handlerArgs) == 0 { + return missingConfig(param) + } + + param = "keyId" + keyId := handlerArgs.OptionalString(param, "") + if keyId == "" { + return missingConfig(param) + } + + param = "secretRing" + secretRing := handlerArgs.OptionalString(param, "") + if secretRing == "" { + return missingConfig(param) + } + + return &Config{ + Servers: map[string]*Server{ + "default": { + Server: listen, + Auth: auth, + IsDefault: true, + TrustedCerts: trustedList, + }, + }, + Identity: keyId, + IdentitySecretRing: secretRing, + IgnoredFiles: []string{".DS_Store"}, + }, nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/types/example_test.go b/vendor/github.com/camlistore/camlistore/pkg/types/example_test.go new file mode 100644 index 00000000..44553880 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/types/example_test.go @@ -0,0 +1,22 @@ +package types + +import ( + "expvar" + "fmt" + "io" + "io/ioutil" + "strings" +) + +func ExampleNewStatsReader() { + var ( + // r is the io.Reader we'd like to count read from. + r = strings.NewReader("Hello world") + v = expvar.NewInt("read-bytes") + sw = NewStatsReader(v, r) + ) + // Read from the wrapped io.Reader, StatReader will count the bytes. + io.Copy(ioutil.Discard, sw) + fmt.Printf("Read %s bytes\n", v.String()) + // Output: Read 11 bytes +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/types/fakeseeker.go b/vendor/github.com/camlistore/camlistore/pkg/types/fakeseeker.go new file mode 100644 index 00000000..f0ee776a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/types/fakeseeker.go @@ -0,0 +1,70 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "errors" + "fmt" + "io" + "os" +) + +// fakeSeeker can seek to the ends but any read not at the current +// position will fail. +type fakeSeeker struct { + r io.Reader + size int64 + + fakePos int64 + realPos int64 +} + +// NewFakeSeeker returns a ReadSeeker that can pretend to Seek (based +// on the provided total size of the reader's content), but any reads +// will fail if the fake seek position doesn't match reality. +func NewFakeSeeker(r io.Reader, size int64) io.ReadSeeker { + return &fakeSeeker{r: r, size: size} +} + +func (fs *fakeSeeker) Seek(offset int64, whence int) (int64, error) { + var newo int64 + switch whence { + default: + return 0, errors.New("invalid whence") + case os.SEEK_SET: + newo = offset + case os.SEEK_CUR: + newo = fs.fakePos + offset + case os.SEEK_END: + newo = fs.size + offset + } + if newo < 0 { + return 0, errors.New("negative seek") + } + fs.fakePos = newo + return newo, nil +} + +func (fs *fakeSeeker) Read(p []byte) (n int, err error) { + if fs.fakePos != fs.realPos { + return 0, fmt.Errorf("attempt to read from fake seek offset %d; real offset is %d", fs.fakePos, fs.realPos) + } + n, err = fs.r.Read(p) + fs.fakePos += int64(n) + fs.realPos += int64(n) + return +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/types/fakeseeker_test.go b/vendor/github.com/camlistore/camlistore/pkg/types/fakeseeker_test.go new file mode 100644 index 00000000..aa2b55a5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/types/fakeseeker_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "os" + "strings" + "testing" +) + +func TestFakeSeeker(t *testing.T) { + rs := NewFakeSeeker(strings.NewReader("foobar"), 6) + if pos, err := rs.Seek(0, os.SEEK_END); err != nil || pos != 6 { + t.Fatalf("SEEK_END = %d, %v; want 6, nil", pos, err) + } + if pos, err := rs.Seek(0, os.SEEK_CUR); err != nil || pos != 6 { + t.Fatalf("SEEK_CUR = %d, %v; want 6, nil", pos, err) + } + if pos, err := rs.Seek(0, os.SEEK_SET); err != nil || pos != 0 { + t.Fatalf("SEEK_SET = %d, %v; want 0, nil", pos, err) + } + + buf := make([]byte, 3) + if n, err := rs.Read(buf); n != 3 || err != nil || string(buf) != "foo" { + t.Fatalf("First read = %d, %v (buf = %q); want foo", n, err, buf) + } + if pos, err := rs.Seek(0, os.SEEK_CUR); err != nil || pos != 3 { + t.Fatalf("Seek cur pos after first read = %d, %v; want 3, nil", pos, err) + } + if n, err := rs.Read(buf); n != 3 || err != nil || string(buf) != "bar" { + t.Fatalf("Second read = %d, %v (buf = %q); want foo", n, err, buf) + } + + if pos, err := rs.Seek(1, os.SEEK_SET); err != nil || pos != 1 { + t.Fatalf("SEEK_SET = %d, %v; want 1, nil", pos, err) + } + const msg = "attempt to read from fake seek offset" + if _, err := rs.Read(buf); err == nil || !strings.Contains(err.Error(), msg) { + t.Fatalf("bogus Read after seek = %v; want something containing %q", err, msg) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/types/serverconfig/config.go b/vendor/github.com/camlistore/camlistore/pkg/types/serverconfig/config.go new file mode 100644 index 00000000..a6d29062 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/types/serverconfig/config.go @@ -0,0 +1,115 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package serverconfig provides types related to the server configuration file. +package serverconfig + +import ( + "camlistore.org/pkg/types" +) + +// Config holds the values from the JSON (high-level) server config +// file that is exposed to users (and is by default at +// osutil.UserServerConfigPath). From this simpler configuration, a +// complete, low-level one, is generated by +// serverinit.genLowLevelConfig, and used to configure the various +// Camlistore components. +type Config struct { + Auth string `json:"auth"` // auth scheme and values (ex: userpass:foo:bar). + BaseURL string `json:"baseURL,omitempty"` // Base URL the server advertizes. For when behind a proxy. + Listen string `json:"listen"` // address (of the form host|ip:port) on which the server will listen on. + Identity string `json:"identity"` // GPG identity. + IdentitySecretRing string `json:"identitySecretRing"` // path to the secret ring file. + // alternative source tree, to override the embedded ui and/or closure resources. + // If non empty, the ui files will be expected at + // sourceRoot + "/server/camlistored/ui" and the closure library at + // sourceRoot + "/third_party/closure/lib" + // Also used by the publish handler. + SourceRoot string `json:"sourceRoot,omitempty"` + OwnerName string `json:"ownerName,omitempty"` + + // Blob storage. + MemoryStorage bool `json:"memoryStorage,omitempty"` // do not store anything (blobs or queues) on localdisk, use memory instead. + BlobPath string `json:"blobPath,omitempty"` // path to the directory containing the blobs. + PackBlobs bool `json:"packBlobs,omitempty"` // use "diskpacked" instead of the default filestorage. (exclusive with PackRelated) + PackRelated bool `json:"packRelated,omitempty"` // use "blobpacked" instead of the default storage (exclusive with PackBlobs) + S3 string `json:"s3,omitempty"` // Amazon S3 credentials: access_key_id:secret_access_key:bucket[:hostname]. + GoogleCloudStorage string `json:"googlecloudstorage,omitempty"` // Google Cloud credentials: clientId:clientSecret:refreshToken:bucket[/optional/dir] or ":bucket[/optional/dir/]" for auto on GCE + GoogleDrive string `json:"googledrive,omitempty"` // Google Drive credentials: clientId:clientSecret:refreshToken:parentId. + ShareHandler bool `json:"shareHandler,omitempty"` // enable the share handler. If true, and shareHandlerPath is empty then shareHandlerPath will default to "/share/" when generating the low-level config. + ShareHandlerPath string `json:"shareHandlerPath,omitempty"` // URL prefix for the share handler. If set, overrides shareHandler. + + // HTTPS. + HTTPS bool `json:"https,omitempty"` // enable HTTPS. + HTTPSCert string `json:"httpsCert,omitempty"` // path to the HTTPS certificate file. + HTTPSKey string `json:"httpsKey,omitempty"` // path to the HTTPS key file. + + // Index. + RunIndex types.InvertedBool `json:"runIndex,omitempty"` // if logically false: no search, no UI, etc. + CopyIndexToMemory types.InvertedBool `json:"copyIndexToMemory,omitempty"` // copy disk-based index to memory on start-up. + MemoryIndex bool `json:"memoryIndex,omitempty"` // use memory-only indexer. + DBName string `json:"dbname,omitempty"` // name of the database for mysql, postgres, mongo. + LevelDB string `json:"levelDB,omitempty"` // path to the levelDB directory, for indexing with github.com/syndtr/goleveldb. + KVFile string `json:"kvIndexFile,omitempty"` // path to the kv file, for indexing with github.com/cznic/kv. + MySQL string `json:"mysql,omitempty"` // MySQL credentials (username@host:password), for indexing with MySQL. + Mongo string `json:"mongo,omitempty"` // MongoDB credentials ([username:password@]host), for indexing with MongoDB. + PostgreSQL string `json:"postgres,omitempty"` // PostgreSQL credentials (username@host:password), for indexing with PostgreSQL. + SQLite string `json:"sqlite,omitempty"` // path to the SQLite file, for indexing with SQLite. + + // DBNames lists which database names to use for various types of key/value stores. The keys may be: + // "index" (overrides 'dbname' key above) + // "queue-sync-to-index" (the sync queue to index things) + // "queue-sync-to-s3" (the sync queue to replicate to s3) + // "blobpacked_index" (the index for blobpacked, the 'packRelated' option) + // "ui_thumbcache" + DBNames map[string]string `json:"dbNames"` + + ReplicateTo []interface{} `json:"replicateTo,omitempty"` // NOOP for now. + // Publish maps a URL prefix path used as a root for published paths (a.k.a. a camliRoot path), to the configuration of the publish handler that serves all the published paths under this root. + Publish map[string]*Publish `json:"publish,omitempty"` + + // TODO(mpl): map of importers instead? + Flickr string `json:"flickr,omitempty"` // flicker importer. + Picasa string `json:"picasa,omitempty"` // picasa importer. +} + +// Publish holds the server configuration values specific to a publisher, i.e. to a publish prefix. +type Publish struct { + // Program is the server app program to run as the publisher. + // Defaults to "publisher". + Program string `json:"program"` + + // CamliRoot value that defines our root permanode for this + // publisher. The root permanode is used as the root for all the + // paths served by this publisher. + CamliRoot string `json:"camliRoot"` + + // Base URL the app will run at. + BaseURL string `json:"baseURL,omitempty"` + + // GoTemplate is the name of the Go template file used by this + // publisher to represent the data. This file should live in + // app/publisher/. + GoTemplate string `json:"goTemplate"` + + // CacheRoot is the path that will be used as the root for the + // caching blobserver (for images). No caching if empty. + // An example value is Config.BlobPath + "/cache". + CacheRoot string `json:"cacheRoot,omitempty"` + + HTTPSCert string `json:"httpsCert,omitempty"` // path to the HTTPS certificate file. + HTTPSKey string `json:"httpsKey,omitempty"` // path to the HTTPS key file. +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/types/types.go b/vendor/github.com/camlistore/camlistore/pkg/types/types.go new file mode 100644 index 00000000..7b31bc6f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/types/types.go @@ -0,0 +1,260 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package types provides various common types. +package types + +import ( + "bytes" + "encoding/json" + "expvar" + "fmt" + "io" + "io/ioutil" + "math" + "regexp" + "runtime" + "strings" + "sync" + "time" +) + +var ( + goVersion = runtime.Version() + dotNumbers = regexp.MustCompile(`\.\d+`) + null_b = []byte("null") +) + +// NopCloser is an io.Closer that does nothing. +var NopCloser io.Closer = ioutil.NopCloser(nil) + +// EmptyBody is a ReadCloser that returns EOF on Read and does nothing +// on Close. +var EmptyBody io.ReadCloser = ioutil.NopCloser(strings.NewReader("")) + +// Time3339 is a time.Time which encodes to and from JSON +// as an RFC 3339 time in UTC. +type Time3339 time.Time + +var ( + _ json.Marshaler = Time3339{} + _ json.Unmarshaler = (*Time3339)(nil) +) + +func (t Time3339) String() string { + return time.Time(t).UTC().Format(time.RFC3339Nano) +} + +func (t Time3339) MarshalJSON() ([]byte, error) { + if t.Time().IsZero() { + return null_b, nil + } + return json.Marshal(t.String()) +} + +func (t *Time3339) UnmarshalJSON(b []byte) error { + if bytes.Equal(b, null_b) { + *t = Time3339{} + return nil + } + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("types: failed to unmarshal non-string value %q as an RFC 3339 time", b) + } + s := string(b[1 : len(b)-1]) + if s == "" { + *t = Time3339{} + return nil + } + tm, err := time.Parse(time.RFC3339Nano, s) + if err != nil { + if strings.HasPrefix(s, "0000-00-00T00:00:00") { + *t = Time3339{} + return nil + } + return err + } + *t = Time3339(tm) + return nil +} + +// ParseTime3339OrZero parses a string in RFC3339 format. If it's invalid, +// the zero time value is returned instead. +func ParseTime3339OrZero(v string) Time3339 { + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return Time3339{} + } + return Time3339(t) +} + +func ParseTime3339OrNil(v string) *Time3339 { + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return nil + } + tm := Time3339(t) + return &tm +} + +// Time returns the time as a time.Time with slightly less stutter +// than a manual conversion. +func (t Time3339) Time() time.Time { + return time.Time(t) +} + +// IsZero returns whether the time is Go zero or Unix zero. +func (t *Time3339) IsZero() bool { + return t == nil || time.Time(*t).IsZero() || time.Time(*t).Unix() == 0 +} + +// ByTime sorts times. +type ByTime []time.Time + +func (s ByTime) Len() int { return len(s) } +func (s ByTime) Less(i, j int) bool { return s[i].Before(s[j]) } +func (s ByTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// A ReadSeekCloser can Read, Seek, and Close. +type ReadSeekCloser interface { + io.Reader + io.Seeker + io.Closer +} + +type ReaderAtCloser interface { + io.ReaderAt + io.Closer +} + +type SizeReaderAt interface { + io.ReaderAt + Size() int64 +} + +// TODO(wathiede): make sure all the stat readers work with code that +// type asserts ReadFrom/WriteTo. + +type varStatReader struct { + *expvar.Int + r io.Reader +} + +// NewReaderStats returns an io.Reader that will have the number of bytes +// read from r added to v. +func NewStatsReader(v *expvar.Int, r io.Reader) io.Reader { + return &varStatReader{v, r} +} + +func (v *varStatReader) Read(p []byte) (int, error) { + n, err := v.r.Read(p) + v.Int.Add(int64(n)) + return n, err +} + +type varStatReadSeeker struct { + *expvar.Int + rs io.ReadSeeker +} + +// NewReaderStats returns an io.ReadSeeker that will have the number of bytes +// read from rs added to v. +func NewStatsReadSeeker(v *expvar.Int, r io.ReadSeeker) io.ReadSeeker { + return &varStatReadSeeker{v, r} +} + +func (v *varStatReadSeeker) Read(p []byte) (int, error) { + n, err := v.rs.Read(p) + v.Int.Add(int64(n)) + return n, err +} + +func (v *varStatReadSeeker) Seek(offset int64, whence int) (int64, error) { + return v.rs.Seek(offset, whence) +} + +// InvertedBool is a bool that marshals to and from JSON with the opposite of its in-memory value. +type InvertedBool bool + +func (ib InvertedBool) MarshalJSON() ([]byte, error) { + return json.Marshal(!bool(ib)) +} + +func (ib *InvertedBool) UnmarshalJSON(b []byte) error { + var bo bool + if err := json.Unmarshal(b, &bo); err != nil { + return err + } + *ib = InvertedBool(!bo) + return nil +} + +// Get returns the logical value of ib. +func (ib InvertedBool) Get() bool { + return !bool(ib) +} + +// U32 converts n to an uint32, or panics if n is out of range +func U32(n int64) uint32 { + if n < 0 || n > math.MaxUint32 { + panic("bad size " + fmt.Sprint(n)) + } + return uint32(n) +} + +// NewOnceCloser returns a Closer wrapping c which only calls Close on c +// once. Subsequent calls to Close return nil. +func NewOnceCloser(c io.Closer) io.Closer { + return &onceCloser{c: c} +} + +type onceCloser struct { + mu sync.Mutex + c io.Closer +} + +func (c *onceCloser) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + if c.c == nil { + return nil + } + err := c.c.Close() + c.c = nil + return err +} + +// TB is a copy of testing.TB so things can take a TB without linking +// in the testing package (which defines its own flags, etc). +type TB interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool +} + +// CloseFunc implements io.Closer with a function. +type CloseFunc func() error + +func (fn CloseFunc) Close() error { return fn() } diff --git a/vendor/github.com/camlistore/camlistore/pkg/types/types_test.go b/vendor/github.com/camlistore/camlistore/pkg/types/types_test.go new file mode 100644 index 00000000..8708c7dd --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/types/types_test.go @@ -0,0 +1,152 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "encoding/json" + "strings" + "testing" + "time" +) + +func TestTime3339(t *testing.T) { + tm := time.Unix(123, 456) + t3 := Time3339(tm) + type O struct { + SomeTime Time3339 `json:"someTime"` + } + o := &O{SomeTime: t3} + got, err := json.Marshal(o) + if err != nil { + t.Fatal(err) + } + goodEnc := "{\"someTime\":\"1970-01-01T00:02:03.000000456Z\"}" + if string(got) != goodEnc { + t.Errorf("Encoding wrong.\n Got: %q\nWant: %q", got, goodEnc) + } + ogot := &O{} + err = json.Unmarshal([]byte(goodEnc), ogot) + if err != nil { + t.Fatal(err) + } + if !tm.Equal(ogot.SomeTime.Time()) { + t.Errorf("Unmarshal got time %v; want %v", ogot.SomeTime.Time(), tm) + } +} + +func TestTime3339_Marshal(t *testing.T) { + tests := []struct { + in time.Time + want string + }{ + {time.Time{}, "null"}, + {time.Unix(1, 0), `"1970-01-01T00:00:01Z"`}, + } + for i, tt := range tests { + got, err := Time3339(tt.in).MarshalJSON() + if err != nil { + t.Errorf("%d. marshal(%v) got error: %v", i, tt.in, err) + continue + } + if string(got) != tt.want { + t.Errorf("%d. marshal(%v) = %q; want %q", i, tt.in, got, tt.want) + } + } +} + +func TestTime3339_empty(t *testing.T) { + tests := []struct { + enc string + z bool + }{ + {enc: "null", z: true}, + {enc: `""`, z: true}, + {enc: "0000-00-00T00:00:00Z", z: true}, + {enc: "0001-01-01T00:00:00Z", z: true}, + {enc: "1970-01-01T00:00:00Z", z: true}, + {enc: "2001-02-03T04:05:06Z", z: false}, + {enc: "2001-02-03T04:05:06+06:00", z: false}, + {enc: "2001-02-03T04:05:06-06:00", z: false}, + {enc: "2001-02-03T04:05:06.123456789Z", z: false}, + {enc: "2001-02-03T04:05:06.123456789+06:00", z: false}, + {enc: "2001-02-03T04:05:06.123456789-06:00", z: false}, + } + for _, tt := range tests { + var tm Time3339 + enc := tt.enc + if strings.Contains(enc, "T") { + enc = "\"" + enc + "\"" + } + err := json.Unmarshal([]byte(enc), &tm) + if err != nil { + t.Errorf("unmarshal %q = %v", enc, err) + } + if tm.IsZero() != tt.z { + t.Errorf("unmarshal %q = %v (%d), %v; zero=%v; want %v", tt.enc, tm.Time(), tm.Time().Unix(), err, + !tt.z, tt.z) + } + } +} + +func TestInvertedBool_Unmarshal(t *testing.T) { + tests := []struct { + json string + want bool + }{ + {json: `{}`, want: true}, + {json: `{"key": true}`, want: true}, + {json: `{"key": false}`, want: false}, + } + type O struct { + Key InvertedBool + } + for _, tt := range tests { + obj := &O{} + if err := json.Unmarshal([]byte(tt.json), obj); err != nil { + t.Fatalf("Could not unmarshal %s: %v", tt.json, err) + } + if obj.Key.Get() != tt.want { + t.Errorf("Unmarshaled %s as InvertedBool; got %v, wanted %v", tt.json, obj.Key.Get(), tt.want) + } + } +} + +func TestInvertedBool_Marshal(t *testing.T) { + tests := []struct { + internalVal bool + want string + }{ + {internalVal: true, want: `{"key":false}`}, + {internalVal: false, want: `{"key":true}`}, + } + type O struct { + Key InvertedBool `json:"key"` + } + for _, tt := range tests { + + obj := &O{ + Key: InvertedBool(tt.internalVal), + } + b, err := json.Marshal(obj) + if err != nil { + t.Fatalf("Could not marshal %v: %v", tt.internalVal, err) + } + if string(b) != tt.want { + t.Errorf("Marshaled InvertedBool %v; got %v, wanted %v", tt.internalVal, string(b), tt.want) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/handler.go b/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/handler.go new file mode 100644 index 00000000..16e2778e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/handler.go @@ -0,0 +1,79 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package thumbnail + +import ( + "log" + "net/http" + "strings" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/schema" +) + +// serveRef gets the file at ref from fetcher and serves its contents. +// It is used by Service as a one time handler to serve to the thumbnail child process on localhost. +func serveRef(rw http.ResponseWriter, req *http.Request, ref blob.Ref, fetcher blob.Fetcher) { + + if !httputil.IsGet(req) { + http.Error(rw, "Invalid download method.", 400) + return + } + + if !httputil.IsLocalhost(req) { + http.Error(rw, "Forbidden.", 403) + return + } + + parts := strings.Split(req.URL.Path, "/") + if len(parts) < 2 { + http.Error(rw, "Malformed GET URL.", 400) + return + } + + blobRef, ok := blob.Parse(parts[1]) + if !ok { + http.Error(rw, "Malformed GET URL.", 400) + return + } + + // only serves its ref + if blobRef != ref { + log.Printf("videothumbnail: access to %v forbidden; wrong blobref for handler", blobRef) + http.Error(rw, "Forbidden.", 403) + return + } + + rw.Header().Set("Content-Type", "application/octet-stream") + + fr, err := schema.NewFileReader(fetcher, ref) + if err != nil { + httputil.ServeError(rw, req, err) + return + } + defer fr.Close() + + http.ServeContent(rw, req, "", time.Now(), fr) +} + +func createVideothumbnailHandler(ref blob.Ref, fetcher blob.Fetcher) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + serveRef(rw, req, ref, fetcher) + }) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/handler_test.go b/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/handler_test.go new file mode 100644 index 00000000..3dc077cb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/handler_test.go @@ -0,0 +1,75 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package thumbnail + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/test" +) + +func TestHandlerWrongRef(t *testing.T) { + storage := new(test.Fetcher) + ref := blob.MustParse("sha1-f1d2d2f924e986ac86fdf7b36c94bcdf32beec15") + wrongRefString := "sha1-e242ed3bffccdf271b7fbaf34ed72d089537b42f" + ts := httptest.NewServer(createVideothumbnailHandler(ref, storage)) + defer ts.Close() + + resp, err := http.Get(ts.URL + "/" + wrongRefString) + if err != nil { + t.Fatal(err) + } + if resp.StatusCode != 403 { + t.Fatalf("excepted forbidden status when the wrong ref is requested") + } +} + +func TestHandlerRightRef(t *testing.T) { + b := test.Blob{Contents: "Foo"} + storage := new(test.Fetcher) + ref, err := schema.WriteFileFromReader(storage, "", b.Reader()) + if err != nil { + t.Fatal(err) + } + if err != nil { + t.Fatal(err) + } + + ts := httptest.NewServer(createVideothumbnailHandler(ref, storage)) + defer ts.Close() + + resp, err := http.Get(ts.URL + "/" + ref.String()) + + if err != nil { + t.Fatal(err) + } + if resp.StatusCode != 200 { + t.Fatalf("expected 200 status: %v", resp) + } + content, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if string(content) != b.Contents { + t.Errorf("excepted handler to serve data") + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/service.go b/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/service.go new file mode 100644 index 00000000..1d0c7b35 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/service.go @@ -0,0 +1,161 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package thumbnail generates image thumbnails from videos. + +(*Service).Generate spawns an HTTP server listening on a local random +port to serve the video to an external program (see Thumbnailer interface). +The external program is expected to output the thumbnail image on its +standard output. + +The default implementation uses ffmpeg. + +See ServiceFromConfig for accepted configuration. +*/ +package thumbnail + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/netutil" + "camlistore.org/pkg/syncutil" +) + +// A Service controls the generation of video thumbnails. +type Service struct { + thumbnailer Thumbnailer + // Timeout is the maximum duration for the thumbnailing subprocess execution. + timeout time.Duration + gate *syncutil.Gate // of subprocesses. +} + +// ServiceFromConfig builds a new Service from configuration. +// Example expected configuration object (all keys are optional) : +// { +// // command defaults to FFmpegThumbnailer and $uri is replaced by +// // the real value at runtime. +// "command": ["/opt/local/bin/ffmpeg", "-i", "$uri", "pipe:1"], +// // Maximun number of milliseconds for running the thumbnailing subprocess. +// // A zero or negative timeout means no timeout. +// "timeout": 2000, +// // Maximum number of thumbnailing subprocess running at same time. +// // A zero or negative maxProcs means no limit. +// "maxProcs": 5 +// } +func ServiceFromConfig(conf jsonconfig.Obj) (*Service, error) { + th := thumbnailerFromConfig(conf) + timeout := conf.OptionalInt("timeout", 5000) + maxProc := conf.OptionalInt("maxProcs", 5) + + err := conf.Validate() + if err != nil { + return nil, err + } + + return NewService(th, time.Millisecond*time.Duration(timeout), maxProc), nil +} + +// NewService builds a new Service. Zero timeout or maxProcs means no limit. +func NewService(th Thumbnailer, timeout time.Duration, maxProcs int) *Service { + + var g *syncutil.Gate + if maxProcs > 0 { + g = syncutil.NewGate(maxProcs) + } + + return &Service{ + thumbnailer: th, + timeout: timeout, + gate: g, + } +} + +var errTimeout = errors.New("timeout.") + +// Generate reads the video given by videoRef from src and writes its thumbnail image to w. +func (s *Service) Generate(videoRef blob.Ref, w io.Writer, src blob.Fetcher) error { + + if s.gate != nil { + s.gate.Start() + defer s.gate.Done() + } + + ln, err := netutil.ListenOnLocalRandomPort() + if err != nil { + return err + } + defer ln.Close() + + videoUri := &url.URL{ + Scheme: "http", + Host: ln.Addr().String(), + Path: videoRef.String(), + } + + cmdErrc := make(chan error, 1) + cmd := buildCmd(s.thumbnailer, videoUri, w) + cmdErrOut, err := cmd.StderrPipe() + if err != nil { + return err + } + if err := cmd.Start(); err != nil { + return err + } + defer cmd.Process.Kill() + go func() { + out, err := ioutil.ReadAll(cmdErrOut) + if err != nil { + cmdErrc <- err + return + } + cmd.Wait() + if cmd.ProcessState.Success() { + cmdErrc <- nil + return + } + cmdErrc <- fmt.Errorf("thumbnail subprocess failed:\n%s", out) + }() + + servErrc := make(chan error, 1) + go func() { + servErrc <- http.Serve(ln, createVideothumbnailHandler(videoRef, src)) + }() + + select { + case err := <-cmdErrc: + return err + case err := <-servErrc: + return err + case <-s.timer(): + return errTimeout + } +} + +func (s *Service) timer() <-chan time.Time { + if s.timeout <= 0 { + return make(<-chan time.Time) + } + return time.After(s.timeout) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/service_test.go b/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/service_test.go new file mode 100644 index 00000000..340cf73a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/service_test.go @@ -0,0 +1,154 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package thumbnail + +import ( + "bytes" + "io/ioutil" + "net/url" + "os" + "os/exec" + "testing" + "time" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/magic" + "camlistore.org/pkg/schema" + "camlistore.org/pkg/test" +) + +const testFilepath = "testdata/small.webm" + +func storageAndBlobRef(t *testing.T) (blobserver.Storage, blob.Ref) { + storage := new(test.Fetcher) + inFile, err := os.Open(testFilepath) + if err != nil { + t.Fatal(err) + } + ref, err := schema.WriteFileFromReader(storage, "small.webm", inFile) + if err != nil { + t.Fatal(err) + } + return storage, ref +} + +func TestStorage(t *testing.T) { + store, ref := storageAndBlobRef(t) + fr, err := schema.NewFileReader(store, ref) + if err != nil { + t.Fatal(err) + } + inFile, err := os.Open(testFilepath) + if err != nil { + t.Fatal(err) + } + data, err := ioutil.ReadAll(inFile) + if err != nil { + t.Fatal(err) + } + bd, err := ioutil.ReadAll(fr) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bd, data) { + t.Error("expected to be the same") + } +} + +func TestMakeThumbnail(t *testing.T) { + if _, err := exec.LookPath("ffmpeg"); err != nil { + t.Skip(err) + } + + store, ref := storageAndBlobRef(t) + tmpFile, _ := ioutil.TempFile(os.TempDir(), "camlitest") + defer tmpFile.Close() + service := NewService(DefaultThumbnailer, 2*time.Second, 5) + err := service.Generate(ref, tmpFile, store) + + if err != nil { + t.Fatal(err) + } + + tmpFile.Seek(0, 0) + + typ, _ := magic.MIMETypeFromReader(tmpFile) + if typ != "image/png" { + t.Errorf("excepted thumbnail mimetype to be `image/png` was `%s`", typ) + } + +} + +func TestMakeThumbnailWithZeroMaxProcsAndTimeout(t *testing.T) { + if _, err := exec.LookPath("ffmpeg"); err != nil { + t.Skip(err) + } + + store, ref := storageAndBlobRef(t) + tmpFile, _ := ioutil.TempFile(os.TempDir(), "camlitest") + defer tmpFile.Close() + service := NewService(DefaultThumbnailer, 0, 0) + err := service.Generate(ref, tmpFile, store) + + if err != nil { + t.Fatal(err) + } +} + +type failingThumbnailer struct{} + +func (failingThumbnailer) Command(*url.URL) (string, []string) { + return "failcommand", []string{} +} + +func TestMakeThumbnailFailure(t *testing.T) { + if _, err := exec.LookPath("ffmpeg"); err != nil { + t.Skip(err) + } + + store, ref := storageAndBlobRef(t) + service := NewService(failingThumbnailer{}, 2*time.Second, 5) + err := service.Generate(ref, ioutil.Discard, store) + + if err == nil { + t.Error("expected to fail.") + } + t.Logf("err output: %v", err) + +} + +type sleepyThumbnailer struct{} + +func (sleepyThumbnailer) Command(*url.URL) (string, []string) { + return "bash", []string{"-c", `echo "MAY SHOW" 1>&2; sleep 10; echo "SHOULD NEVER SHOW" 1>&2`} +} + +func TestThumbnailGenerateTimeout(t *testing.T) { + + if _, err := exec.LookPath("bash"); err != nil { + t.Skip("bash not in PATH.") + } + + store, ref := storageAndBlobRef(t) + service := NewService(sleepyThumbnailer{}, time.Duration(time.Millisecond), 5) + err := service.Generate(ref, ioutil.Discard, store) + + if err != errTimeout { + t.Errorf("expected to timeout: %v", err) + } +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/testdata/small.webm b/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/testdata/small.webm new file mode 100644 index 00000000..da946da5 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/testdata/small.webm differ diff --git a/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/thumbnailer.go b/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/thumbnailer.go new file mode 100644 index 00000000..8de5e507 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/video/thumbnail/thumbnailer.go @@ -0,0 +1,92 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package thumbnail + +import ( + "io" + "net/url" + "os/exec" + + "camlistore.org/pkg/jsonconfig" +) + +// Thumbnailer is the interface that wraps the Command method. +// +// Command receives the (HTTP) uri from where to get the video to generate a +// thumbnail and returns program and arguments. +// The command is expected to output the thumbnail image on its stdout, or exit +// with an error code. +// +// See FFmpegThumbnailer.Command for example. +type Thumbnailer interface { + Command(*url.URL) (prog string, args []string) +} + +// DefaultThumbnailer is the default Thumbnailer when no config is set. +var DefaultThumbnailer Thumbnailer = FFmpegThumbnailer{} + +// FFmpegThumbnailer is a Thumbnailer that generates a thumbnail with ffmpeg. +type FFmpegThumbnailer struct{} + +var _ Thumbnailer = (*FFmpegThumbnailer)(nil) + +// Command implements the Command method for the Thumbnailer interface. +func (f FFmpegThumbnailer) Command(uri *url.URL) (string, []string) { + return "ffmpeg", []string{ + "-seekable", "1", + "-i", uri.String(), + "-vf", "thumbnail", + "-frames:v", "1", + "-f", "image2pipe", + "-c:v", "png", + "pipe:1", + } +} + +type configThumbnailer struct { + prog string + args []string +} + +var _ Thumbnailer = (*configThumbnailer)(nil) + +func (ct *configThumbnailer) Command(uri *url.URL) (string, []string) { + args := make([]string, len(ct.args)) + for index, arg := range ct.args { + if arg == "$uri" { + args[index] = uri.String() + } else { + args[index] = arg + } + } + return ct.prog, args +} + +func buildCmd(tn Thumbnailer, uri *url.URL, out io.Writer) *exec.Cmd { + prog, args := tn.Command(uri) + cmd := exec.Command(prog, args...) + cmd.Stdout = out + return cmd +} + +func thumbnailerFromConfig(config jsonconfig.Obj) Thumbnailer { + command := config.OptionalList("command") + if len(command) < 1 { + return DefaultThumbnailer + } + return &configThumbnailer{prog: command[0], args: command[1:]} +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/webserver/envpipe_unix.go b/vendor/github.com/camlistore/camlistore/pkg/webserver/envpipe_unix.go new file mode 100644 index 00000000..cc9905d6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/webserver/envpipe_unix.go @@ -0,0 +1,38 @@ +// +build !windows + +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webserver + +import ( + "fmt" + "log" + "os" + "strconv" +) + +func pipeFromEnvFd(env string) (*os.File, error) { + fdStr := os.Getenv(env) + if fdStr == "" { + return nil, fmt.Errorf("Environment variable %q was blank", env) + } + fd, err := strconv.Atoi(fdStr) + if err != nil { + log.Fatalf("Bogus test harness fd '%s': %v", fdStr, err) + } + return os.NewFile(uintptr(fd), "testingpipe-"+env), nil +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/webserver/envpipe_windows.go b/vendor/github.com/camlistore/camlistore/pkg/webserver/envpipe_windows.go new file mode 100644 index 00000000..f3ef96ec --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/webserver/envpipe_windows.go @@ -0,0 +1,28 @@ +// +build windows + +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webserver + +import ( + "errors" + "os" +) + +func pipeFromEnvFd(env string) (*os.File, error) { + return nil, errors.New("not implemented") +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/webserver/webserver.go b/vendor/github.com/camlistore/camlistore/pkg/webserver/webserver.go new file mode 100644 index 00000000..8b9726b2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/webserver/webserver.go @@ -0,0 +1,271 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package webserver implements a superset wrapper of http.Server. +// +// Among other things, it can throttle its connections, inherit its +// listening socket from a file descriptor in the environment, and +// log all activity. +package webserver + +import ( + "bufio" + "crypto/rand" + "crypto/tls" + "fmt" + "io" + "log" + "net" + "net/http" + "os" + "strconv" + "strings" + "sync" + "time" + + "camlistore.org/pkg/throttle" + "camlistore.org/pkg/wkfs" + "camlistore.org/third_party/github.com/bradfitz/runsit/listen" + "github.com/bradfitz/http2" +) + +type Server struct { + mux *http.ServeMux + listener net.Listener + verbose bool // log HTTP requests and response codes + + Logger *log.Logger // or nil. + + // H2Server is the HTTP/2 server config. + H2Server http2.Server + + enableTLS bool + tlsCertFile, tlsKeyFile string + + mu sync.Mutex + reqs int64 +} + +func New() *Server { + verbose, _ := strconv.ParseBool(os.Getenv("CAMLI_HTTP_DEBUG")) + return &Server{ + mux: http.NewServeMux(), + verbose: verbose, + } +} + +func (s *Server) printf(format string, v ...interface{}) { + if s.Logger != nil { + s.Logger.Printf(format, v...) + return + } + log.Printf(format, v...) +} + +func (s *Server) fatalf(format string, v ...interface{}) { + if s.Logger != nil { + s.Logger.Fatalf(format, v...) + return + } + log.Fatalf(format, v...) +} + +func (s *Server) SetTLS(certFile, keyFile string) { + s.enableTLS = true + s.tlsCertFile = certFile + s.tlsKeyFile = keyFile +} + +func (s *Server) ListenURL() string { + scheme := "http" + if s.enableTLS { + scheme = "https" + } + if s.listener != nil { + if taddr, ok := s.listener.Addr().(*net.TCPAddr); ok { + if taddr.IP.IsUnspecified() { + return fmt.Sprintf("%s://localhost:%d", scheme, taddr.Port) + } + return fmt.Sprintf("%s://%s", scheme, s.listener.Addr()) + } + } + return "" +} + +func (s *Server) HandleFunc(pattern string, fn func(http.ResponseWriter, *http.Request)) { + s.mux.HandleFunc(pattern, fn) +} + +func (s *Server) Handle(pattern string, handler http.Handler) { + s.mux.Handle(pattern, handler) +} + +func (s *Server) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + var n int64 + if s.verbose { + s.mu.Lock() + s.reqs++ + n = s.reqs + s.mu.Unlock() + s.printf("Request #%d: %s %s (from %s) ...", n, req.Method, req.RequestURI, req.RemoteAddr) + rw = &trackResponseWriter{ResponseWriter: rw} + } + s.mux.ServeHTTP(rw, req) + if s.verbose { + tw := rw.(*trackResponseWriter) + s.printf("Request #%d: %s %s = code %d, %d bytes", n, req.Method, req.RequestURI, tw.code, tw.resSize) + } +} + +type trackResponseWriter struct { + http.ResponseWriter + code int + resSize int64 +} + +func (tw *trackResponseWriter) WriteHeader(code int) { + tw.code = code + tw.ResponseWriter.WriteHeader(code) +} + +func (tw *trackResponseWriter) Write(p []byte) (int, error) { + if tw.code == 0 { + tw.code = 200 + } + tw.resSize += int64(len(p)) + return tw.ResponseWriter.Write(p) +} + +// Listen starts listening on the given host:port addr. +func (s *Server) Listen(addr string) error { + if s.listener != nil { + return nil + } + + doLog := os.Getenv("TESTING_PORT_WRITE_FD") == "" // Don't make noise during unit tests + if addr == "" { + return fmt.Errorf(": needs to be provided to start listening") + } + + var err error + s.listener, err = listen.Listen(addr) + if err != nil { + return fmt.Errorf("Failed to listen on %s: %v", addr, err) + } + base := s.ListenURL() + if doLog { + s.printf("Starting to listen on %s\n", base) + } + + if s.enableTLS { + config := &tls.Config{ + Rand: rand.Reader, + Time: time.Now, + NextProtos: []string{http2.NextProtoTLS, "http/1.1"}, + } + config.Certificates = make([]tls.Certificate, 1) + + config.Certificates[0], err = loadX509KeyPair(s.tlsCertFile, s.tlsKeyFile) + if err != nil { + return fmt.Errorf("Failed to load TLS cert: %v", err) + } + s.listener = tls.NewListener(s.listener, config) + } + + if doLog && strings.HasSuffix(base, ":0") { + s.printf("Now listening on %s\n", s.ListenURL()) + } + + return nil +} + +func (s *Server) throttleListener() net.Listener { + kBps, _ := strconv.Atoi(os.Getenv("DEV_THROTTLE_KBPS")) + ms, _ := strconv.Atoi(os.Getenv("DEV_THROTTLE_LATENCY_MS")) + if kBps == 0 && ms == 0 { + return s.listener + } + rate := throttle.Rate{ + KBps: kBps, + Latency: time.Duration(ms) * time.Millisecond, + } + return &throttle.Listener{ + Listener: s.listener, + Down: rate, + Up: rate, // TODO: separate rates? + } +} + +func (s *Server) Serve() { + if err := s.Listen(""); err != nil { + s.fatalf("Listen error: %v", err) + } + go runTestHarnessIntegration(s.listener) + + srv := &http.Server{ + Handler: s, + } + // TODO: allow configuring src.ErrorLog (and plumb through to + // Google Cloud Logging when run on GCE, eventually) + + // Setup the NPN NextProto map for HTTP/2 support: + http2.ConfigureServer(srv, &s.H2Server) + + err := srv.Serve(s.throttleListener()) + if err != nil { + s.printf("Error in http server: %v\n", err) + os.Exit(1) + } +} + +// Signals the test harness that we've started listening. +// TODO: write back the port number that we randomly selected? +// For now just writes back a single byte. +func runTestHarnessIntegration(listener net.Listener) { + writePipe, err := pipeFromEnvFd("TESTING_PORT_WRITE_FD") + if err != nil { + return + } + readPipe, _ := pipeFromEnvFd("TESTING_CONTROL_READ_FD") + + if writePipe != nil { + writePipe.Write([]byte(listener.Addr().String() + "\n")) + } + + if readPipe != nil { + bufr := bufio.NewReader(readPipe) + for { + line, err := bufr.ReadString('\n') + if err == io.EOF || line == "EXIT\n" { + os.Exit(0) + } + return + } + } +} + +// loadX509KeyPair is a copy of tls.LoadX509KeyPair but using wkfs. +func loadX509KeyPair(certFile, keyFile string) (cert tls.Certificate, err error) { + certPEMBlock, err := wkfs.ReadFile(certFile) + if err != nil { + return + } + keyPEMBlock, err := wkfs.ReadFile(keyFile) + if err != nil { + return + } + return tls.X509KeyPair(certPEMBlock, keyPEMBlock) +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/wkfs/gcs/gcs.go b/vendor/github.com/camlistore/camlistore/pkg/wkfs/gcs/gcs.go new file mode 100644 index 00000000..c7489a1e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/wkfs/gcs/gcs.go @@ -0,0 +1,204 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package gcs registers a Google Cloud Storage filesystem at the +// well-known /gcs/ filesystem path if the current machine is running +// on Google Compute Engine. +package gcs + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "strings" + "sync" + "time" + + "camlistore.org/pkg/googlestorage" + "camlistore.org/pkg/wkfs" + "google.golang.org/cloud/compute/metadata" +) + +// Max size for all files read or written. This filesystem is only +// supposed to be for configuration data only, so this is very +// generous. +const maxSize = 1 << 20 + +func init() { + if !metadata.OnGCE() { + return + } + client, err := googlestorage.NewServiceClient() + wkfs.RegisterFS("/gcs/", &gcsFS{client, err}) +} + +type gcsFS struct { + client *googlestorage.Client + err error // sticky error +} + +func (fs *gcsFS) parseName(name string) (bucket, key string, err error) { + if fs.err != nil { + return "", "", fs.err + } + name = strings.TrimPrefix(name, "/gcs/") + i := strings.Index(name, "/") + if i < 0 { + return name, "", nil + } + return name[:i], name[i+1:], nil +} + +func (fs *gcsFS) Open(name string) (wkfs.File, error) { + bucket, key, err := fs.parseName(name) + if err != nil { + return nil, fs.err + } + rc, size, err := fs.client.GetObject(&googlestorage.Object{ + Bucket: bucket, + Key: key, + }) + if err != nil { + return nil, err + } + defer rc.Close() + if size > maxSize { + return nil, fmt.Errorf("file %s too large (%d bytes) for /gcs/ filesystem", name, size) + } + slurp, err := ioutil.ReadAll(io.LimitReader(rc, size)) + if err != nil { + return nil, err + } + return &file{ + name: name, + Reader: bytes.NewReader(slurp), + }, nil +} + +func (fs *gcsFS) Stat(name string) (os.FileInfo, error) { return fs.Lstat(name) } +func (fs *gcsFS) Lstat(name string) (os.FileInfo, error) { + bucket, key, err := fs.parseName(name) + if err != nil { + return nil, err + } + size, exists, err := fs.client.StatObject(&googlestorage.Object{ + Bucket: bucket, + Key: key, + }) + if err != nil { + return nil, err + } + if !exists { + return nil, os.ErrNotExist + } + return &statInfo{ + name: name, + size: size, + }, nil +} + +func (fs *gcsFS) MkdirAll(path string, perm os.FileMode) error { return nil } + +func (fs *gcsFS) OpenFile(name string, flag int, perm os.FileMode) (wkfs.FileWriter, error) { + bucket, key, err := fs.parseName(name) + if err != nil { + return nil, err + } + switch flag { + case os.O_WRONLY | os.O_CREATE | os.O_EXCL: + case os.O_WRONLY | os.O_CREATE | os.O_TRUNC: + default: + return nil, fmt.Errorf("Unsupported OpenFlag flag mode %d on Google Cloud Storage", flag) + } + if flag&os.O_EXCL != 0 { + if _, err := fs.Stat(name); err == nil { + return nil, os.ErrExist + } + } + return &fileWriter{ + fs: fs, + name: name, + bucket: bucket, + key: key, + flag: flag, + perm: perm, + }, nil +} + +type fileWriter struct { + fs *gcsFS + name, bucket, key string + flag int + perm os.FileMode + + buf bytes.Buffer + + mu sync.Mutex + closed bool +} + +func (w *fileWriter) Write(p []byte) (n int, err error) { + if len(p)+w.buf.Len() > maxSize { + return 0, &os.PathError{ + Op: "Write", + Path: w.name, + Err: errors.New("file too large"), + } + } + return w.buf.Write(p) +} + +func (w *fileWriter) Close() (err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.closed { + return nil + } + w.closed = true + return w.fs.client.PutObject(&googlestorage.Object{ + Bucket: w.bucket, + Key: w.key, + }, ioutil.NopCloser(bytes.NewReader(w.buf.Bytes()))) +} + +type statInfo struct { + name string + size int64 + isDir bool + modtime time.Time +} + +func (si *statInfo) IsDir() bool { return si.isDir } +func (si *statInfo) ModTime() time.Time { return si.modtime } +func (si *statInfo) Mode() os.FileMode { return 0644 } +func (si *statInfo) Name() string { return path.Base(si.name) } +func (si *statInfo) Size() int64 { return si.size } +func (si *statInfo) Sys() interface{} { return nil } + +type file struct { + name string + *bytes.Reader +} + +func (*file) Close() error { return nil } +func (f *file) Name() string { return path.Base(f.name) } +func (f *file) Stat() (os.FileInfo, error) { + panic("Stat not implemented on /gcs/ files yet") +} diff --git a/vendor/github.com/camlistore/camlistore/pkg/wkfs/wkfs.go b/vendor/github.com/camlistore/camlistore/pkg/wkfs/wkfs.go new file mode 100644 index 00000000..502a893c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/pkg/wkfs/wkfs.go @@ -0,0 +1,132 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package wkfs implements the pluggable "well-known filesystem" abstraction layer. +// +// Instead of accessing files directly through the operating system +// using os.Open or os.Stat, code should use wkfs.Open or wkfs.Stat, +// which first try to intercept paths at well-known top-level +// directories representing previously-registered mount types, +// otherwise fall through to the operating system paths. +// +// Example of top-level well-known directories that might be +// registered include /gcs/bucket/object for Google Cloud Storage or +// /s3/bucket/object for AWS S3. +package wkfs + +import ( + "io" + "io/ioutil" + "os" + "strings" +) + +type File interface { + io.Reader + io.ReaderAt + io.Closer + io.Seeker + Name() string + Stat() (os.FileInfo, error) +} + +type FileWriter interface { + io.Writer + io.Closer +} + +func Open(name string) (File, error) { return fs(name).Open(name) } +func Stat(name string) (os.FileInfo, error) { return fs(name).Stat(name) } +func Lstat(name string) (os.FileInfo, error) { return fs(name).Lstat(name) } +func MkdirAll(path string, perm os.FileMode) error { return fs(path).MkdirAll(path, perm) } +func OpenFile(name string, flag int, perm os.FileMode) (FileWriter, error) { + return fs(name).OpenFile(name, flag, perm) +} +func Create(name string) (FileWriter, error) { + // like os.Create but WRONLY instead of RDWR because we don't + // expose a Reader here. + return OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) +} + +func fs(name string) FileSystem { + for pfx, fs := range wkFS { + if strings.HasPrefix(name, pfx) { + return fs + } + } + return osFS{} +} + +type osFS struct{} + +func (osFS) Open(name string) (File, error) { return os.Open(name) } +func (osFS) Stat(name string) (os.FileInfo, error) { return os.Stat(name) } +func (osFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } +func (osFS) MkdirAll(path string, perm os.FileMode) error { return os.MkdirAll(path, perm) } +func (osFS) OpenFile(name string, flag int, perm os.FileMode) (FileWriter, error) { + return os.OpenFile(name, flag, perm) +} + +type FileSystem interface { + Open(name string) (File, error) + OpenFile(name string, flag int, perm os.FileMode) (FileWriter, error) + Stat(name string) (os.FileInfo, error) + Lstat(name string) (os.FileInfo, error) + MkdirAll(path string, perm os.FileMode) error +} + +// well-known filesystems +var wkFS = map[string]FileSystem{} + +// RegisterFS registers a well-known filesystem. It intercepts +// anything beginning with prefix (which must start and end with a +// forward slash) and forwards it to fs. +func RegisterFS(prefix string, fs FileSystem) { + if !strings.HasPrefix(prefix, "/") || !strings.HasSuffix(prefix, "/") { + panic("bogus prefix: " + prefix) + } + if _, dup := wkFS[prefix]; dup { + panic("duplication registration of " + prefix) + } + wkFS[prefix] = fs +} + +// WriteFile writes data to a file named by filename. +// If the file does not exist, WriteFile creates it with permissions perm; +// otherwise WriteFile truncates it before writing. +func WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +func ReadFile(filename string) ([]byte, error) { + f, err := Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + return ioutil.ReadAll(f) +} diff --git a/vendor/github.com/camlistore/camlistore/server/.gitignore b/vendor/github.com/camlistore/camlistore/server/.gitignore new file mode 100644 index 00000000..f3c7a7c5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/.gitignore @@ -0,0 +1 @@ +Makefile diff --git a/vendor/github.com/camlistore/camlistore/server/appengine/README b/vendor/github.com/camlistore/camlistore/server/appengine/README new file mode 100644 index 00000000..3512b0e2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/appengine/README @@ -0,0 +1,9 @@ +We typically just use the "devcam appengine" command to hack on this code. (To build devcam: go install ../../dev/devcam). + +But to run by hand: + +$ dev_appserver.py --high_replication . + +Other useful flags: + -a 0.0.0.0 (listen on all addresses) + -c (wipe the datastore) diff --git a/vendor/github.com/camlistore/camlistore/server/appengine/app.yaml b/vendor/github.com/camlistore/camlistore/server/appengine/app.yaml new file mode 100644 index 00000000..bb8cd554 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/appengine/app.yaml @@ -0,0 +1,8 @@ +application: camlistore +version: 1 +runtime: go +api_version: go1 + +handlers: +- url: /.* + script: _go_app diff --git a/vendor/github.com/camlistore/camlistore/server/appengine/build_test.go b/vendor/github.com/camlistore/camlistore/server/appengine/build_test.go new file mode 100644 index 00000000..4bfcf55b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/appengine/build_test.go @@ -0,0 +1,119 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appengine_test + +import ( + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + + "camlistore.org/pkg/osutil" +) + +func TestAppEngineBuilds(t *testing.T) { + t.Skip("Currently broken until App Engine supports Go 1.3") + if runtime.GOOS == "windows" { + t.Skip("skipping on Windows; don't want to deal with escaping backslashes") + } + camRoot, err := osutil.GoPackagePath("camlistore.org") + if err != nil { + t.Errorf("No camlistore.org package in GOPATH: %v", err) + } + sdkLink := filepath.Join(camRoot, "appengine-sdk") + if _, err := os.Lstat(sdkLink); os.IsNotExist(err) { + t.Skipf("Skipping test; no App Engine SDK symlink at %s pointing to App Engine SDK.", sdkLink) + } + sdk, err := os.Readlink(sdkLink) + if err != nil { + t.Fatal(err) + } + + td, err := ioutil.TempDir("", "camli-appengine") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + gab := filepath.Join(sdk, "goroot", "bin", "go-app-builder") + if runtime.GOOS == "windows" { + gab += ".exe" + } + + appBase := filepath.Join(camRoot, "server", "appengine") + f, err := os.Open(filepath.Join(appBase, "camli")) + if err != nil { + t.Fatal(err) + } + defer f.Close() + srcFilesAll, err := f.Readdirnames(-1) + if err != nil { + t.Fatal(err) + } + + appenginePkg := filepath.Join(sdk, "goroot", "pkg", runtime.GOOS+"_"+runtime.GOARCH+"_appengine") + cmd := exec.Command(gab, + "-app_base", appBase, + "-arch", archChar(), + "-binary_name", "_go_app", + "-dynamic", + "-extra_imports", "appengine_internal/init", + "-goroot", filepath.Join(sdk, "goroot"), + "-gcflags", "-I,"+appenginePkg, + "-ldflags", "-L,"+appenginePkg, + "-nobuild_files", "^^$", + "-unsafe", + "-work_dir", td, + "-gopath", os.Getenv("GOPATH"), + // "-v", + ) + for _, f := range srcFilesAll { + if strings.HasSuffix(f, ".go") { + cmd.Args = append(cmd.Args, filepath.Join("camli", f)) + } + } + for _, pair := range os.Environ() { + if strings.HasPrefix(pair, "GOROOT=") { + continue + } + cmd.Env = append(cmd.Env, pair) + } + + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Error: %v\n%s", err, out) + } + target := filepath.Join(td, "_go_app") + if _, err := os.Stat(target); os.IsNotExist(err) { + t.Errorf("target binary doesn't exist") + } +} + +func archChar() string { + switch runtime.GOARCH { + case "386": + return "8" + case "amd64": + return "6" + case "arm": + return "5" + } + panic("unknown arch " + runtime.GOARCH) +} diff --git a/vendor/github.com/camlistore/camlistore/server/appengine/camli/aeindex.go b/vendor/github.com/camlistore/camlistore/server/appengine/camli/aeindex.go new file mode 100644 index 00000000..234936f6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/appengine/camli/aeindex.go @@ -0,0 +1,227 @@ +// +build appengine + +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appengine + +import ( + "io" + + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/index" + "camlistore.org/pkg/jsonconfig" + "camlistore.org/pkg/sorted" + + "appengine" + "appengine/datastore" +) + +const indexDebug = false + +var ( + indexRowKind = "IndexRow" +) + +// A row of the index. Keyed by "|" +type indexRowEnt struct { + Value []byte +} + +type indexStorage struct { + ns string +} + +func (is *indexStorage) key(c appengine.Context, key string) *datastore.Key { + return datastore.NewKey(c, indexRowKind, key, 0, datastore.NewKey(c, indexRowKind, is.ns, 0, nil)) +} + +func (is *indexStorage) BeginBatch() sorted.BatchMutation { + return sorted.NewBatchMutation() +} + +func (is *indexStorage) CommitBatch(bm sorted.BatchMutation) error { + type mutationser interface { + Mutations() []sorted.Mutation + } + var muts []sorted.Mutation + if m, ok := bm.(mutationser); ok { + muts = m.Mutations() + } else { + panic("unexpected type") + } + tryFunc := func(c appengine.Context) error { + for _, m := range muts { + dk := is.key(c, m.Key()) + if m.IsDelete() { + if err := datastore.Delete(c, dk); err != nil { + return err + } + } else { + // A put. + ent := &indexRowEnt{ + Value: []byte(m.Value()), + } + if _, err := datastore.Put(c, dk, ent); err != nil { + return err + } + } + } + return nil + } + c := ctxPool.Get() + defer c.Return() + return datastore.RunInTransaction(c, tryFunc, crossGroupTransaction) +} + +func (is *indexStorage) Get(key string) (string, error) { + c := ctxPool.Get() + defer c.Return() + row := new(indexRowEnt) + err := datastore.Get(c, is.key(c, key), row) + if indexDebug { + c.Infof("indexStorage.Get(%q) = %q, %v", key, row.Value, err) + } + if err != nil { + if err == datastore.ErrNoSuchEntity { + err = sorted.ErrNotFound + } + return "", err + } + return string(row.Value), nil +} + +func (is *indexStorage) Set(key, value string) error { + c := ctxPool.Get() + defer c.Return() + row := &indexRowEnt{ + Value: []byte(value), + } + _, err := datastore.Put(c, is.key(c, key), row) + return err +} + +func (is *indexStorage) Delete(key string) error { + c := ctxPool.Get() + defer c.Return() + return datastore.Delete(c, is.key(c, key)) +} + +func (is *indexStorage) Find(start, end string) sorted.Iterator { + c := ctxPool.Get() + if indexDebug { + c.Infof("IndexStorage Find(%q, %q)", start, end) + } + it := &iter{ + is: is, + cl: c, + after: start, + endKey: end, + nsk: datastore.NewKey(c, indexRowKind, is.ns, 0, nil), + } + it.Closer = &onceCloser{fn: func() { + c.Return() + it.nsk = nil + }} + return it +} + +func (is *indexStorage) Close() error { return nil } + +type iter struct { + cl ContextLoan + after string + endKey string // optional + io.Closer + nsk *datastore.Key + is *indexStorage + + it *datastore.Iterator + n int // rows seen for this batch + + key, value string + end bool +} + +func (it *iter) Next() bool { + if it.nsk == nil { + // already closed + return false + } + if it.it == nil { + q := datastore.NewQuery(indexRowKind).Filter("__key__>=", it.is.key(it.cl, it.after)) + if it.endKey != "" { + q = q.Filter("__key__<", it.is.key(it.cl, it.endKey)) + } + it.it = q.Run(it.cl) + it.n = 0 + } + var ent indexRowEnt + key, err := it.it.Next(&ent) + if indexDebug { + it.cl.Infof("For after %q; key = %#v, err = %v", it.after, key, err) + } + if err == datastore.Done { + if it.n == 0 { + return false + } + return it.Next() + } + if err != nil { + it.cl.Warningf("Error iterating over index after %q: %v", it.after, err) + return false + } + it.n++ + it.key = key.StringID() + it.value = string(ent.Value) + it.after = it.key + return true +} + +func (it *iter) Key() string { return it.key } +func (it *iter) Value() string { return it.value } + +// TODO(bradfit): optimize the string<->[]byte copies in this iterator, as done in the other +// sorted.KeyValue iterators. +func (it *iter) KeyBytes() []byte { return []byte(it.key) } +func (it *iter) ValueBytes() []byte { return []byte(it.value) } + +func indexFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { + is := &indexStorage{} + var ( + blobPrefix = config.RequiredString("blobSource") + ns = config.OptionalString("namespace", "") + ) + if err := config.Validate(); err != nil { + return nil, err + } + sto, err := ld.GetStorage(blobPrefix) + if err != nil { + return nil, err + } + is.ns, err = sanitizeNamespace(ns) + if err != nil { + return nil, err + } + + ix, err := index.New(is) + if err != nil { + return nil, err + } + ix.BlobSource = sto + ix.KeyFetcher = ix.BlobSource // TODO(bradfitz): global search? something else? + return ix, nil +} diff --git a/vendor/github.com/camlistore/camlistore/server/appengine/camli/common.go b/vendor/github.com/camlistore/camlistore/server/appengine/camli/common.go new file mode 100644 index 00000000..0fe97104 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/appengine/camli/common.go @@ -0,0 +1,39 @@ +// +build appengine + +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appengine + +import ( + "fmt" + "strings" +) + +func sanitizeNamespace(ns string) (outns string, err error) { + outns = ns + switch { + case strings.Contains(ns, "|"): + err = fmt.Errorf("no pipe allowed in namespace %q", ns) + case strings.Contains(ns, "\x00"): + err = fmt.Errorf("no zero byte allowed in namespace %q", ns) + case ns == "-": + err = fmt.Errorf("reserved namespace %q", ns) + case ns == "": + outns = "-" + } + return +} diff --git a/vendor/github.com/camlistore/camlistore/server/appengine/camli/contextpool.go b/vendor/github.com/camlistore/camlistore/server/appengine/camli/contextpool.go new file mode 100644 index 00000000..175e7bd1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/appengine/camli/contextpool.go @@ -0,0 +1,115 @@ +// +build appengine + +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appengine + +import ( + "sync" + + "appengine" +) + +type ContextPool struct { + mu sync.Mutex // guards live + + // Live HTTP requests + live map[appengine.Context]*sync.WaitGroup +} + +// HandlerBegin notes that the provided context is beginning and it can be +// shared until HandlerEnd is called. +func (p *ContextPool) HandlerBegin(c appengine.Context) { + p.mu.Lock() + defer p.mu.Unlock() + if p.live == nil { + p.live = make(map[appengine.Context]*sync.WaitGroup) + } + if _, ok := p.live[c]; ok { + // dup; ignore. + return + } + p.live[c] = new(sync.WaitGroup) +} + +// HandlerEnd notes that the provided context is about to go out of service, +// removes it from the pool of available contexts, and blocks until everybody +// is done using it. +func (p *ContextPool) HandlerEnd(c appengine.Context) { + p.mu.Lock() + wg := p.live[c] + delete(p.live, c) + p.mu.Unlock() + if wg != nil { + wg.Wait() + } +} + +// A ContextLoan is a superset of a Context, so can passed anywhere +// that needs an appengine.Context. +// +// When done, Return it. +type ContextLoan interface { + appengine.Context + + // Return returns the Context to the pool. + // Return must be called exactly once. + Return() +} + +// Get returns a valid App Engine context from some active HTTP request +// which is guaranteed to stay valid. Be sure to return it. +// +// Typical use: +// ctx := pool.Get() +// defer ctx.Return() +func (p *ContextPool) Get() ContextLoan { + p.mu.Lock() + defer p.mu.Unlock() + + // Pick a random active context. TODO: pick the "right" one, + // using some TLS-like-guess/hack from runtume.Stacks. + var c appengine.Context + var wg *sync.WaitGroup + for c, wg = range p.live { + break + } + if c == nil { + panic("ContextPool.Get called with no live HTTP requests") + } + wg.Add(1) + cl := &contextLoan{Context: c, wg: wg} + // TODO: set warning finalizer on this? + return cl +} + +type contextLoan struct { + appengine.Context + + mu sync.Mutex + wg *sync.WaitGroup +} + +func (cl *contextLoan) Return() { + cl.mu.Lock() + defer cl.mu.Unlock() + if cl.wg == nil { + panic("Return called twice") + } + cl.wg.Done() + cl.wg = nil +} diff --git a/vendor/github.com/camlistore/camlistore/server/appengine/camli/main.go b/vendor/github.com/camlistore/camlistore/server/appengine/camli/main.go new file mode 100644 index 00000000..62572221 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/appengine/camli/main.go @@ -0,0 +1,108 @@ +// +build appengine + +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appengine + +import ( + "fmt" + "net/http" + "sync" + + "appengine" + + "camlistore.org/pkg/blobserver" // storage interface definition + _ "camlistore.org/pkg/blobserver/cond" + _ "camlistore.org/pkg/blobserver/replica" + _ "camlistore.org/pkg/blobserver/shard" + _ "camlistore.org/pkg/server" // handlers: UI, publish, thumbnailing, etc + "camlistore.org/pkg/serverinit" // wiring up the world from a JSON description + + // TODO(bradfitz): uncomment these config setup + // Both require an App Engine context to make HTTP requests too. + //_ "camlistore.org/pkg/blobserver/remote" + //_ "camlistore.org/pkg/blobserver/s3" +) + +// lazyInit is our root handler for App Engine. We don't have an App Engine +// context until the first request and we need that context to figure out +// our serving URL. So we use this to defer setting up our environment until +// the first request. +type lazyInit struct { + mu sync.Mutex + ready bool + mux *http.ServeMux +} + +func (li *lazyInit) ServeHTTP(w http.ResponseWriter, r *http.Request) { + c := appengine.NewContext(r) + ctxPool.HandlerBegin(c) + defer ctxPool.HandlerEnd(c) + + li.mu.Lock() + if !li.ready { + li.ready = realInit(w, r) + } + li.mu.Unlock() + if li.ready { + li.mux.ServeHTTP(w, r) + } +} + +var ctxPool ContextPool + +var root = new(lazyInit) + +func init() { + // TODO(bradfitz): rename some of this to be consistent + blobserver.RegisterStorageConstructor("appengine", blobserver.StorageConstructor(newFromConfig)) + blobserver.RegisterStorageConstructor("aeindex", blobserver.StorageConstructor(indexFromConfig)) + http.Handle("/", root) +} + +func realInit(w http.ResponseWriter, r *http.Request) bool { + ctx := appengine.NewContext(r) + + errf := func(format string, args ...interface{}) bool { + ctx.Errorf("In init: "+format, args...) + http.Error(w, fmt.Sprintf(format, args...), 500) + return false + } + + config, err := serverinit.Load("./config.json") + if err != nil { + return errf("Could not load server config: %v", err) + } + + // Update the config to use the URL path derived from the first App Engine request. + // TODO(bslatkin): Support hostnames that aren't x.appspot.com + scheme := "http" + if r.TLS != nil { + scheme = "https" + } + + baseURL := fmt.Sprintf("%s://%s/", scheme, appengine.DefaultVersionHostname(ctx)) + ctx.Infof("baseurl = %q", baseURL) + + root.mux = http.NewServeMux() + _, err = config.InstallHandlers(root.mux, baseURL, false, r) + if err != nil { + return errf("Error installing handlers: %v", err) + } + + return true +} diff --git a/vendor/github.com/camlistore/camlistore/server/appengine/camli/ownerauth.go b/vendor/github.com/camlistore/camlistore/server/appengine/camli/ownerauth.go new file mode 100644 index 00000000..95b6a000 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/appengine/camli/ownerauth.go @@ -0,0 +1,82 @@ +// +build appengine + +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appengine + +import ( + "net/http" + + "camlistore.org/pkg/auth" + "camlistore.org/pkg/httputil" + + "appengine" + "appengine/user" +) + +func init() { + auth.RegisterAuth("appengine_app_owner", newOwnerAuth) +} + +type ownerAuth struct { + fallback auth.AuthMode +} + +var _ auth.UnauthorizedSender = (*ownerAuth)(nil) + +func newOwnerAuth(arg string) (auth.AuthMode, error) { + m := &ownerAuth{} + if arg != "" { + f, err := auth.FromConfig(arg) + if err != nil { + return nil, err + } + m.fallback = f + } + return m, nil +} + +func (o *ownerAuth) AllowedAccess(req *http.Request) auth.Operation { + c := appengine.NewContext(req) + if user.IsAdmin(c) { + return auth.OpAll + } + if o.fallback != nil { + return o.fallback.AllowedAccess(req) + } + return 0 +} + +func (o *ownerAuth) SendUnauthorized(rw http.ResponseWriter, req *http.Request) bool { + if !httputil.IsGet(req) { + return false + } + c := appengine.NewContext(req) + loginURL, err := user.LoginURL(c, req.URL.String()) + if err != nil { + c.Errorf("Fetching LoginURL: %v", err) + return false + } + http.Redirect(rw, req, loginURL, http.StatusFound) + return true +} + +func (o *ownerAuth) AddAuthHeader(req *http.Request) { + // TODO(bradfitz): split the auth interface into a server part + // and a client part. + panic("Not applicable. should not be called.") +} diff --git a/vendor/github.com/camlistore/camlistore/server/appengine/camli/storage.go b/vendor/github.com/camlistore/camlistore/server/appengine/camli/storage.go new file mode 100644 index 00000000..25d8a911 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/appengine/camli/storage.go @@ -0,0 +1,377 @@ +// +build appengine + +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appengine + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" + "sync" + + "appengine" + "appengine/blobstore" + "appengine/datastore" + + "camlistore.org/pkg/blob" + "camlistore.org/pkg/blobserver" + "camlistore.org/pkg/context" + "camlistore.org/pkg/jsonconfig" +) + +const ( + blobKind = "Blob" + memKind = "NsBlobMember" // blob membership in a namespace +) + +var _ blobserver.Storage = (*appengineStorage)(nil) + +type appengineStorage struct { + namespace string // never empty; config initializes to at least "-" +} + +// blobEnt is stored once per unique blob, keyed by blobref. +type blobEnt struct { + Size int64 `datastore:"Size,noindex"` + BlobKey appengine.BlobKey `datastore:"BlobKey,noindex"` + Namespaces string `datastore:"Namespaces,noindex"` // |-separated string of namespaces + + // TODO(bradfitz): IsCamliSchemaBlob bool? ... probably want + // on enumeration (memEnt) too. +} + +// memEnt is stored once per blob in a namespace, keyed by "ns|blobref" +type memEnt struct { + Size int64 `datastore:"Size,noindex"` +} + +func byteDecSize(b []byte) (int64, error) { + var size int64 + n, err := fmt.Fscanf(bytes.NewBuffer(b), "%d", &size) + if n != 1 || err != nil { + return 0, fmt.Errorf("invalid Size column in datastore: %q", string(b)) + } + return size, nil +} + +func (b *blobEnt) inNamespace(ns string) (out bool) { + for _, in := range strings.Split(b.Namespaces, "|") { + if ns == in { + return true + } + } + return false +} + +func entKey(c appengine.Context, br blob.Ref) *datastore.Key { + return datastore.NewKey(c, blobKind, br.String(), 0, nil) +} + +func (s *appengineStorage) memKey(c appengine.Context, br blob.Ref) *datastore.Key { + return datastore.NewKey(c, memKind, fmt.Sprintf("%s|%s", s.namespace, br.String()), 0, nil) +} + +func fetchEnt(c appengine.Context, br blob.Ref) (*blobEnt, error) { + row := new(blobEnt) + err := datastore.Get(c, entKey(c, br), row) + if err != nil { + return nil, err + } + return row, nil +} + +func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) { + sto := &appengineStorage{ + namespace: config.OptionalString("namespace", ""), + } + if err := config.Validate(); err != nil { + return nil, err + } + sto.namespace, err = sanitizeNamespace(sto.namespace) + if err != nil { + return nil, err + } + return sto, nil +} + +func (sto *appengineStorage) Fetch(br blob.Ref) (file io.ReadCloser, size uint32, err error) { + loan := ctxPool.Get() + ctx := loan + defer func() { + if loan != nil { + loan.Return() + } + }() + + row, err := fetchEnt(ctx, br) + if err == datastore.ErrNoSuchEntity { + err = os.ErrNotExist + return + } + if err != nil { + return + } + if !row.inNamespace(sto.namespace) { + err = os.ErrNotExist + return + } + + closeLoan := loan + var c io.Closer = &onceCloser{fn: func() { closeLoan.Return() }} + loan = nil // take it, so it's not defer-closed + + reader := blobstore.NewReader(ctx, appengine.BlobKey(string(row.BlobKey))) + type readCloser struct { + io.Reader + io.Closer + } + return readCloser{reader, c}, uint32(row.Size), nil +} + +type onceCloser struct { + once sync.Once + fn func() +} + +func (oc *onceCloser) Close() error { + oc.once.Do(oc.fn) + return nil +} + +var crossGroupTransaction = &datastore.TransactionOptions{XG: true} + +func (sto *appengineStorage) ReceiveBlob(br blob.Ref, in io.Reader) (sb blob.SizedRef, err error) { + loan := ctxPool.Get() + defer loan.Return() + ctx := loan + + var b bytes.Buffer + written, err := io.Copy(&b, in) + if err != nil { + return + } + + // bkey is non-empty once we've uploaded the blob. + var bkey appengine.BlobKey + + // uploadBlob uploads the blob, unless it's already been done. + uploadBlob := func(ctx appengine.Context) error { + if len(bkey) > 0 { + return nil // already done in previous transaction attempt + } + bw, err := blobstore.Create(ctx, "application/octet-stream") + if err != nil { + return err + } + _, err = io.Copy(bw, &b) + if err != nil { + // TODO(bradfitz): try to clean up; close it, see if we can find the key, delete it. + ctx.Errorf("blobstore Copy error: %v", err) + return err + } + err = bw.Close() + if err != nil { + // TODO(bradfitz): try to clean up; see if we can find the key, delete it. + ctx.Errorf("blobstore Close error: %v", err) + return err + } + k, err := bw.Key() + if err == nil { + bkey = k + } + return err + } + + tryFunc := func(tc appengine.Context) error { + row, err := fetchEnt(tc, br) + switch err { + case datastore.ErrNoSuchEntity: + if err := uploadBlob(tc); err != nil { + tc.Errorf("uploadBlob failed: %v", err) + return err + } + row = &blobEnt{ + Size: written, + BlobKey: bkey, + Namespaces: sto.namespace, + } + _, err = datastore.Put(tc, entKey(tc, br), row) + if err != nil { + return err + } + case nil: + if row.inNamespace(sto.namespace) { + // Nothing to do + return nil + } + row.Namespaces = row.Namespaces + "|" + sto.namespace + _, err = datastore.Put(tc, entKey(tc, br), row) + if err != nil { + return err + } + default: + return err + } + + // Add membership row + _, err = datastore.Put(tc, sto.memKey(tc, br), &memEnt{ + Size: written, + }) + return err + } + err = datastore.RunInTransaction(ctx, tryFunc, crossGroupTransaction) + if err != nil { + if len(bkey) > 0 { + // If we just created this blob but we + // ultimately failed, try our best to delete + // it so it's not orphaned. + blobstore.Delete(ctx, bkey) + } + return + } + return blob.SizedRef{br, uint32(written)}, nil +} + +// NOTE(bslatkin): No fucking clue if this works. +func (sto *appengineStorage) RemoveBlobs(blobs []blob.Ref) error { + loan := ctxPool.Get() + defer loan.Return() + ctx := loan + + tryFunc := func(tc appengine.Context, br blob.Ref) error { + // TODO(bslatkin): Make the DB gets in this a multi-get. + // Remove the namespace from the blobEnt + row, err := fetchEnt(tc, br) + switch err { + case datastore.ErrNoSuchEntity: + // Doesn't exist, that means there should be no memEnt, but let's be + // paranoid and double check anyways. + case nil: + // blobEnt exists, remove our namespace from it if possible. + newNS := []string{} + for _, val := range strings.Split(string(row.Namespaces), "|") { + if val != sto.namespace { + newNS = append(newNS, val) + } + } + if v := strings.Join(newNS, "|"); v != row.Namespaces { + row.Namespaces = v + _, err = datastore.Put(tc, entKey(tc, br), row) + if err != nil { + return err + } + } + default: + return err + } + + // Blindly delete the memEnt. + err = datastore.Delete(tc, sto.memKey(tc, br)) + return err + } + + for _, br := range blobs { + ret := datastore.RunInTransaction( + ctx, + func(tc appengine.Context) error { + return tryFunc(tc, br) + }, + crossGroupTransaction) + if ret != nil { + return ret + } + } + return nil +} + +func (sto *appengineStorage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error { + loan := ctxPool.Get() + defer loan.Return() + ctx := loan + + var ( + keys = make([]*datastore.Key, 0, len(blobs)) + out = make([]interface{}, 0, len(blobs)) + errs = make([]error, len(blobs)) + ) + for _, br := range blobs { + keys = append(keys, sto.memKey(ctx, br)) + out = append(out, new(memEnt)) + } + err := datastore.GetMulti(ctx, keys, out) + if merr, ok := err.(appengine.MultiError); ok { + errs = []error(merr) + err = nil + } + if err != nil { + return err + } + for i, br := range blobs { + thisErr := errs[i] + if thisErr == datastore.ErrNoSuchEntity { + continue + } + if thisErr != nil { + err = errs[i] // just return last one found? + continue + } + ent := out[i].(*memEnt) + dest <- blob.SizedRef{br, uint32(ent.Size)} + } + return err +} + +func (sto *appengineStorage) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) error { + defer close(dest) + + loan := ctxPool.Get() + defer loan.Return() + actx := loan + + prefix := sto.namespace + "|" + keyBegin := datastore.NewKey(actx, memKind, prefix+after, 0, nil) + keyEnd := datastore.NewKey(actx, memKind, sto.namespace+"~", 0, nil) + + q := datastore.NewQuery(memKind).Limit(int(limit)).Filter("__key__>", keyBegin).Filter("__key__<", keyEnd) + it := q.Run(actx) + var row memEnt + for { + key, err := it.Next(&row) + if err == datastore.Done { + break + } + if err != nil { + return err + } + select { + case dest <- blob.SizedRef{blob.ParseOrZero(key.StringID()[len(prefix):]), uint32(row.Size)}: + case <-ctx.Done(): + return context.ErrCanceled + } + } + return nil +} + +// TODO(bslatkin): sync does not work on App Engine yet because there are no +// background threads to do the sync loop. The plan is to break the +// syncer code up into two parts: 1) accepts notifications of new blobs to +// sync, 2) does one unit of work enumerating recent blobs and syncing them. +// In App Engine land, 1) will result in a task to be enqueued, and 2) will +// be called from within that queue context. diff --git a/vendor/github.com/camlistore/camlistore/server/appengine/config.json b/vendor/github.com/camlistore/camlistore/server/appengine/config.json new file mode 100644 index 00000000..c3ed7390 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/appengine/config.json @@ -0,0 +1,96 @@ +{ "_for-emacs": "-*- mode: js2;-*-", + "handlerConfig": true, + "auth": "appengine_app_owner:userpass:camlistore:pass3179", + "prefixes": { + + "/": { + "handler": "root", + "handlerArgs": { + "ownerName": "TODO:AppEngineOwnerName", + "blobRoot": "/bs-and-maybe-also-index/", + "helpRoot": "/help/", + "statusRoot": "/status/", + "searchRoot": "/my-search/", + "stealth": false + } + }, + + "/ui/": { + "handler": "ui", + "handlerArgs": { + "jsonSignRoot": "/sighelper/" + } + }, + + "/status/": { + "handler": "status" + }, + + "/bs-and-maybe-also-index/": { + "handler": "storage-cond", + "handlerArgs": { + "write": { + "if": "isSchema", + "then": "/bs-and-index/", + "else": "/bs/" + }, + "read": "/bs/" + } + }, + + "/bs-and-index/": { + "handler": "storage-replica", + "handlerArgs": { + "backends": ["/bs/", "/indexer/"] + } + }, + + "/sighelper/": { + "handler": "jsonsign", + "handlerArgs": { + "secretRing": "test-secring.gpg", + "keyId": "26F5ABDA", + "publicKeyDest": "/bs/" + } + }, + + "/bs/": { + "handler": "storage-appengine", + "handlerArgs": { + } + }, + + "/bs2/": { + "handler": "storage-appengine", + "handlerArgs": { + "namespace": "two" + } + }, + + "/sync/": { + "enabled": false, + "handler": "sync", + "handlerArgs": { + "from": "/bs/", + "to": "/indexer/" + } + }, + + "/indexer/": { + "handler": "storage-aeindex", + "handlerArgs": { + "namespace": "idx1", + "blobSource": "/bs/" + } + }, + + "/my-search/": { + "handler": "search", + "handlerArgs": { + "index": "/indexer/", + "owner": "sha1-f2b0b7da718b97ce8c31591d8ed4645c777f3ef4" + } + } + + } +} diff --git a/vendor/github.com/camlistore/camlistore/server/appengine/test-secring.gpg b/vendor/github.com/camlistore/camlistore/server/appengine/test-secring.gpg new file mode 120000 index 00000000..9518746e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/appengine/test-secring.gpg @@ -0,0 +1 @@ +../../pkg/jsonsign/testdata/test-secring.gpg \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/.gitignore b/vendor/github.com/camlistore/camlistore/server/camlistored/.gitignore new file mode 100644 index 00000000..48821930 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/.gitignore @@ -0,0 +1 @@ +camlistored diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/README b/vendor/github.com/camlistore/camlistore/server/camlistored/README new file mode 100644 index 00000000..6d6695b1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/README @@ -0,0 +1,7 @@ +This is the main Camlistore server. + +See also: + - The storage interface is in /pkg/blobserver + - The storage implementations are under that e.g. /pkg/blobserver/localdisk + - The HTTP handlers are implemented in /pkg/blobserver/handlers + - The UI code is in /server/camlistored/ui diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/camlistored.go b/vendor/github.com/camlistore/camlistore/server/camlistored/camlistored.go new file mode 100644 index 00000000..5fa3f9e9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/camlistored.go @@ -0,0 +1,415 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// The camlistored binary is the Camlistore server. +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "os" + "os/signal" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "time" + + "camlistore.org/pkg/buildinfo" + "camlistore.org/pkg/env" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/legal/legalprint" + "camlistore.org/pkg/netutil" + "camlistore.org/pkg/osutil" + "camlistore.org/pkg/serverinit" + "camlistore.org/pkg/webserver" + "camlistore.org/pkg/wkfs" + + // VM environments: + "camlistore.org/pkg/osutil/gce" // for init side-effects + LogWriter + + // Storage options: + _ "camlistore.org/pkg/blobserver/blobpacked" + _ "camlistore.org/pkg/blobserver/cond" + _ "camlistore.org/pkg/blobserver/diskpacked" + _ "camlistore.org/pkg/blobserver/encrypt" + _ "camlistore.org/pkg/blobserver/google/cloudstorage" + _ "camlistore.org/pkg/blobserver/google/drive" + _ "camlistore.org/pkg/blobserver/localdisk" + _ "camlistore.org/pkg/blobserver/mongo" + _ "camlistore.org/pkg/blobserver/proxycache" + _ "camlistore.org/pkg/blobserver/remote" + _ "camlistore.org/pkg/blobserver/replica" + _ "camlistore.org/pkg/blobserver/s3" + _ "camlistore.org/pkg/blobserver/shard" + // Indexers: (also present themselves as storage targets) + "camlistore.org/pkg/index" + // KeyValue implementations: + _ "camlistore.org/pkg/sorted/kvfile" + _ "camlistore.org/pkg/sorted/leveldb" + _ "camlistore.org/pkg/sorted/mongo" + _ "camlistore.org/pkg/sorted/mysql" + _ "camlistore.org/pkg/sorted/postgres" + "camlistore.org/pkg/sorted/sqlite" // for sqlite.CompiledIn() + + // Handlers: + _ "camlistore.org/pkg/search" + _ "camlistore.org/pkg/server" // UI, publish, etc + + // Importers: + _ "camlistore.org/pkg/importer/allimporters" +) + +var ( + flagVersion = flag.Bool("version", false, "show version") + flagConfigFile = flag.String("configfile", "", + "Config file to use, relative to the Camlistore configuration directory root. "+ + "If blank, the default is used or auto-generated. "+ + "If it starts with 'http:' or 'https:', it is fetched from the network.") + flagListen = flag.String("listen", "", "host:port to listen on, or :0 to auto-select. If blank, the value in the config will be used instead.") + flagOpenBrowser = flag.Bool("openbrowser", true, "Launches the UI on startup") + flagReindex = flag.Bool("reindex", false, "Reindex all blobs on startup") + flagPollParent bool +) + +func init() { + if debug, _ := strconv.ParseBool(os.Getenv("CAMLI_DEBUG")); debug { + flag.BoolVar(&flagPollParent, "pollparent", false, "Camlistored regularly polls its parent process to detect if it has been orphaned, and terminates in that case. Mainly useful for tests.") + } +} + +func exitf(pattern string, args ...interface{}) { + if !strings.HasSuffix(pattern, "\n") { + pattern = pattern + "\n" + } + fmt.Fprintf(os.Stderr, pattern, args...) + osExit(1) +} + +func slurpURL(urls string, limit int64) ([]byte, error) { + res, err := http.Get(urls) + if err != nil { + return nil, err + } + defer res.Body.Close() + return ioutil.ReadAll(io.LimitReader(res.Body, limit)) +} + +// loadConfig returns the server's parsed config file, locating it using the provided arg. +// +// The arg may be of the form: +// - empty, to mean automatic (will write a default high-level config if +// no cloud config is available) +// - a filepath absolute or relative to the user's configuration directory, +// - a URL +func loadConfig(arg string) (conf *serverinit.Config, isNewConfig bool, err error) { + if strings.HasPrefix(arg, "http://") || strings.HasPrefix(arg, "https://") { + contents, err := slurpURL(arg, 256<<10) + if err != nil { + return nil, false, err + } + conf, err = serverinit.Load(contents) + return conf, false, err + } + var absPath string + switch { + case arg == "": + absPath = osutil.UserServerConfigPath() + _, err = wkfs.Stat(absPath) + if err != nil { + if !os.IsNotExist(err) { + return + } + conf, err = serverinit.DefaultEnvConfig() + if err != nil || conf != nil { + return + } + err = wkfs.MkdirAll(osutil.CamliConfigDir(), 0700) + if err != nil { + return + } + log.Printf("Generating template config file %s", absPath) + if err = serverinit.WriteDefaultConfigFile(absPath, sqlite.CompiledIn()); err == nil { + isNewConfig = true + } + } + case filepath.IsAbs(arg): + absPath = arg + default: + absPath = filepath.Join(osutil.CamliConfigDir(), arg) + } + conf, err = serverinit.LoadFile(absPath) + return +} + +// 1) We do not want to force the user to buy a cert. +// 2) We still want our client (camput) to be able to +// verify the cert's authenticity. +// 3) We want to avoid MITM attacks and warnings in +// the browser. +// Using a simple self-signed won't do because of 3), +// as Chrome offers no way to set a self-signed as +// trusted when importing it. (same on android). +// We could have created a self-signed CA (that we +// would import in the browsers) and create another +// cert (signed by that CA) which would be the one +// used in camlistore. +// We're doing even simpler: create a self-signed +// CA and directly use it as a self-signed cert +// (and install it as a CA in the browsers). +// 2) is satisfied by doing our own checks, +// See pkg/client +func setupTLS(ws *webserver.Server, config *serverinit.Config, hostname string) { + cert, key := config.OptionalString("httpsCert", ""), config.OptionalString("httpsKey", "") + if !config.OptionalBool("https", true) { + return + } + if (cert != "") != (key != "") { + exitf("httpsCert and httpsKey must both be either present or absent") + } + + defCert := osutil.DefaultTLSCert() + defKey := osutil.DefaultTLSKey() + const hint = "You must add this certificate's fingerprint to your client's trusted certs list to use it. Like so:\n\"trustedCerts\": [\"%s\"]," + if cert == defCert && key == defKey { + _, err1 := wkfs.Stat(cert) + _, err2 := wkfs.Stat(key) + if err1 != nil || err2 != nil { + if os.IsNotExist(err1) || os.IsNotExist(err2) { + sig, err := httputil.GenSelfTLSFiles(hostname, defCert, defKey) + if err != nil { + exitf("Could not generate self-signed TLS cert: %q", err) + } + log.Printf(hint, sig) + } else { + exitf("Could not stat cert or key: %q, %q", err1, err2) + } + } + } + // Always generate new certificates if the config's httpsCert and httpsKey are empty. + if cert == "" && key == "" { + sig, err := httputil.GenSelfTLSFiles(hostname, defCert, defKey) + if err != nil { + exitf("Could not generate self signed creds: %q", err) + } + log.Printf(hint, sig) + cert = defCert + key = defKey + } + data, err := wkfs.ReadFile(cert) + if err != nil { + exitf("Failed to read pem certificate: %s", err) + } + sig, err := httputil.CertFingerprint(data) + if err != nil { + exitf("certificate error: %v", err) + } + log.Printf("TLS enabled, with SHA-256 certificate fingerprint: %v", sig) + ws.SetTLS(cert, key) +} + +var osExit = os.Exit // testing hook + +func handleSignals(shutdownc <-chan io.Closer) { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + signal.Notify(c, syscall.SIGINT) + for { + sig := <-c + sysSig, ok := sig.(syscall.Signal) + if !ok { + log.Fatal("Not a unix signal") + } + switch sysSig { + case syscall.SIGHUP: + log.Print("SIGHUP: restarting camli") + err := osutil.RestartProcess() + if err != nil { + log.Fatal("Failed to restart: " + err.Error()) + } + case syscall.SIGINT: + log.Print("Got SIGINT: shutting down") + donec := make(chan bool) + go func() { + cl := <-shutdownc + if err := cl.Close(); err != nil { + exitf("Error shutting down: %v", err) + } + donec <- true + }() + select { + case <-donec: + log.Printf("Shut down.") + osExit(0) + case <-time.After(2 * time.Second): + exitf("Timeout shutting down. Exiting uncleanly.") + } + default: + log.Fatal("Received another signal, should not happen.") + } + } +} + +// listenAndBaseURL finds the configured, default, or inferred listen address +// and base URL from the command-line flags and provided config. +func listenAndBaseURL(config *serverinit.Config) (listen, baseURL string) { + baseURL = config.OptionalString("baseURL", "") + listen = *flagListen + listenConfig := config.OptionalString("listen", "") + // command-line takes priority over config + if listen == "" { + listen = listenConfig + if listen == "" { + exitf("\"listen\" needs to be specified either in the config or on the command line") + } + } + return +} + +func redirectFromHTTP(base string) { + http.ListenAndServe(":80", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, base, http.StatusFound) + })) +} + +// certHostname figures out the name to use for the TLS certificates, using baseURL +// and falling back to the listen address if baseURL is empty or invalid. +func certHostname(listen, baseURL string) (string, error) { + hostPort, err := netutil.HostPort(baseURL) + if err != nil { + hostPort = listen + } + hostname, _, err := net.SplitHostPort(hostPort) + if err != nil { + return "", fmt.Errorf("failed to find hostname for cert from address %q: %v", hostPort, err) + } + return hostname, nil +} + +// main wraps Main so tests (which generate their own func main) can still run Main. +func main() { + Main(nil, nil) +} + +// Main sends on up when it's running, and shuts down when it receives from down. +func Main(up chan<- struct{}, down <-chan struct{}) { + flag.Parse() + + if *flagVersion { + fmt.Fprintf(os.Stderr, "camlistored version: %s\nGo version: %s (%s/%s)\n", + buildinfo.Version(), runtime.Version(), runtime.GOOS, runtime.GOARCH) + return + } + if legalprint.MaybePrint(os.Stderr) { + return + } + if env.OnGCE() { + log.SetOutput(gce.LogWriter()) + } + + if *flagReindex { + index.SetImpendingReindex() + } + + log.Printf("Starting camlistored version %s; Go %s (%s/%s)", buildinfo.Version(), runtime.Version(), + runtime.GOOS, runtime.GOARCH) + + shutdownc := make(chan io.Closer, 1) // receives io.Closer to cleanly shut down + go handleSignals(shutdownc) + + // In case we're running in a Docker container with no + // filesytem from which to load the root CAs, this + // conditionally installs a static set if necessary. We do + // this before we load the config file, which might come from + // an https URL. + httputil.InstallCerts() + + config, isNewConfig, err := loadConfig(*flagConfigFile) + if err != nil { + exitf("Error loading config file: %v", err) + } + + ws := webserver.New() + listen, baseURL := listenAndBaseURL(config) + + hostname, err := certHostname(listen, baseURL) + if err != nil { + exitf("Bad baseURL or listen address: %v", err) + } + setupTLS(ws, config, hostname) + + err = ws.Listen(listen) + if err != nil { + exitf("Listen: %v", err) + } + + if baseURL == "" { + baseURL = ws.ListenURL() + } + + shutdownCloser, err := config.InstallHandlers(ws, baseURL, *flagReindex, nil) + if err != nil { + exitf("Error parsing config: %v", err) + } + shutdownc <- shutdownCloser + + urlToOpen := baseURL + if !isNewConfig { + // user may like to configure the server at the initial startup, + // open UI if this is not the first run with a new config file. + urlToOpen += config.UIPath + } + + if *flagOpenBrowser { + go osutil.OpenURL(urlToOpen) + } + + go ws.Serve() + if flagPollParent { + osutil.DieOnParentDeath() + } + + if err := config.StartApps(); err != nil { + exitf("StartApps: %v", err) + } + + for appName, appURL := range config.AppURL() { + addr, err := netutil.HostPort(appURL) + if err != nil { + log.Printf("Could not get app %v address: %v", appName, err) + continue + } + if err := netutil.AwaitReachable(addr, 5*time.Second); err != nil { + log.Printf("Could not reach app %v: %v", appName, err) + } + } + log.Printf("Available on %s", urlToOpen) + + if env.OnGCE() && strings.HasPrefix(baseURL, "https://") { + go redirectFromHTTP(baseURL) + } + + // Block forever, except during tests. + up <- struct{}{} + <-down + osExit(0) +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/run_test.go b/vendor/github.com/camlistore/camlistore/server/camlistored/run_test.go new file mode 100644 index 00000000..25c62a7f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/run_test.go @@ -0,0 +1,101 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "camlistore.org/pkg/osutil" +) + +func TestStarts(t *testing.T) { + td, err := ioutil.TempDir("", "camlistored-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + fakeHome := filepath.Join(td, "fakeHome") + confDir := filepath.Join(fakeHome, "conf") + varDir := filepath.Join(fakeHome, "var") + + defer pushEnv("CAMLI_CONFIG_DIR", confDir)() + defer pushEnv("CAMLI_VAR_DIR", varDir)() + + if _, err := os.Stat(osutil.CamliConfigDir()); !os.IsNotExist(err) { + t.Fatalf("expected conf dir %q to not exist", osutil.CamliConfigDir()) + } + if !strings.Contains(osutil.CamliBlobRoot(), td) { + t.Fatalf("blob root %q should contain the temp dir %q", osutil.CamliBlobRoot(), td) + } + if _, err := os.Stat(osutil.CamliBlobRoot()); !os.IsNotExist(err) { + t.Fatalf("expected blobroot dir %q to not exist", osutil.CamliBlobRoot()) + } + if fi, err := os.Stat(osutil.UserServerConfigPath()); !os.IsNotExist(err) { + t.Errorf("expected no server config file; got %v, %v", fi, err) + } + + mkdir(t, confDir) + *flagOpenBrowser = false + *flagListen = ":0" + + up := make(chan struct{}) + down := make(chan struct{}) + dead := make(chan int, 1) + osExit = func(status int) { + dead <- status + close(dead) + runtime.Goexit() + } + go Main(up, down) + select { + case status := <-dead: + t.Errorf("os.Exit(%d) before server came up", status) + return + case <-up: + t.Logf("server is up") + case <-time.After(10 * time.Second): + t.Fatal("timeout starting server") + } + + if _, err := os.Stat(osutil.UserServerConfigPath()); err != nil { + t.Errorf("expected a server config file; got %v", err) + } + + down <- struct{}{} + <-dead +} + +func pushEnv(k, v string) func() { + old := os.Getenv(k) + os.Setenv(k, v) + return func() { + os.Setenv(k, old) + } +} + +func mkdir(t *testing.T, dir string) { + if err := os.MkdirAll(dir, 0700); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/setup.go b/vendor/github.com/camlistore/camlistore/server/camlistored/setup.go new file mode 100644 index 00000000..b257da6e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/setup.go @@ -0,0 +1,51 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "net" + "net/http" + "syscall" + + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/netutil" +) + +func setupHome(rw http.ResponseWriter, req *http.Request) { + port := httputil.RequestTargetPort(req) + localhostAddr, err := netutil.Localhost() + if err != nil { + httputil.ServeError(rw, req, err) + } + ourAddr := &net.TCPAddr{IP: localhostAddr, Port: port} + rAddr, err := net.ResolveTCPAddr("tcp", req.RemoteAddr) + if err != nil { + fmt.Printf("camlistored: unable to resolve RemoteAddr %q: %v", req.RemoteAddr, err) + return + } + uid, err := netutil.AddrPairUserid(rAddr, ourAddr) + if err != nil { + httputil.ServeError(rw, req, err) + } + + fmt.Fprintf(rw, "Hello %q\n", req.RemoteAddr) + fmt.Fprintf(rw, "

    uid = %d\n", syscall.Getuid()) + fmt.Fprintf(rw, "

    euid = %d\n", syscall.Geteuid()) + + fmt.Fprintf(rw, "

    http_local_uid(%q => %q) = %d (%v)\n", req.RemoteAddr, ourAddr, uid, err) +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/TODO b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/TODO new file mode 100644 index 00000000..0dc293b7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/TODO @@ -0,0 +1,14 @@ +- Show a placeholder item while an upload is in progress. +- Hook through upload progress percentage from the camli javascript + API to the upload placeholder item. +- Add dragdrop of selected blobs to put them in sets? +- Fix resample quality - looks pretty crappy right now +- Can we put the type o a file within the icon, or other kind of + preview-type information? +- Permanode functionality is just weird... do we need this in the main + UI? should be a command-line only thing I think +- Add support for uploading entire folders +- Make the toolbar a Medium/Quip style floating thing - get temporary + icons from the noun project +- Infinite scroll + - bonus: some cool effect as items load! diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/animation_loop.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/animation_loop.js new file mode 100644 index 00000000..dd1481da --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/animation_loop.js @@ -0,0 +1,89 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.AnimationLoop'); + +goog.require('goog.events.EventTarget'); + +// Provides an easier-to-use interface around window.requestAnimationFrame(), and abstracts away browser differences. +// @param {Window} win +cam.AnimationLoop = function(win) { + goog.base(this); + + this.win_ = win; + + this.requestAnimationFrame_ = win.requestAnimationFrame || win.mozRequestAnimationFrame || win.webkitRequestAnimationFrame || win.msRequestAnimationFrame; + + this.handleFrame_ = this.handleFrame_.bind(this); + + this.lastTimestamp_ = 0; + + if (this.requestAnimationFrame_) { + this.requestAnimationFrame_ = this.requestAnimationFrame_.bind(win); + } else { + this.requestAnimationFrame_ = this.simulateAnimationFrame_.bind(this); + } +}; + +goog.inherits(cam.AnimationLoop, goog.events.EventTarget); + +cam.AnimationLoop.FRAME_EVENT_TYPE = 'frame'; + +cam.AnimationLoop.prototype.isRunning = function() { + return Boolean(this.lastTimestamp_); +}; + +cam.AnimationLoop.prototype.start = function() { + if (this.isRunning()) { + return; + } + + this.lastTimestamp_ = -1; + this.schedule_(); +}; + +cam.AnimationLoop.prototype.stop = function() { + this.lastTimestamp_ = 0; +}; + +cam.AnimationLoop.prototype.schedule_ = function() { + this.requestAnimationFrame_(this.handleFrame_); +}; + +cam.AnimationLoop.prototype.handleFrame_ = function(opt_timestamp) { + if (this.lastTimestamp_ == 0) { + return; + } + + var timestamp = opt_timestamp || new Date().getTime(); + if (this.lastTimestamp_ == -1) { + this.lastTimestamp_ = timestamp; + } else { + this.dispatchEvent({ + type: this.constructor.FRAME_EVENT_TYPE, + delay: timestamp - this.lastTimestamp_ + }); + this.lastTimestamp_ = timestamp; + } + + this.schedule_(); +}; + +cam.AnimationLoop.prototype.simulateAnimationFrame_ = function(fn) { + this.win_.setTimeout(function() { + fn(new Date().getTime()); + }, 0); +}; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob.js new file mode 100644 index 00000000..07e54cd5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob.js @@ -0,0 +1,67 @@ +/* +Copyright 2014 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.blob'); + +goog.require('goog.crypt'); +goog.require('goog.crypt.Sha1'); + +// Returns the Camlistore blobref for hash object. The only supported hash function is currently sha1, but more might be added later. +// @param {!goog.crypt.Hash} hash +// @returns {!string} +cam.blob.refFromHash = function(hash) { + if (hash instanceof goog.crypt.Sha1) { + return 'sha1-' + goog.crypt.byteArrayToHex(hash.digest()); + } + throw new Error('Unsupported hash function type'); +}; + +// Returns the Camlistore blobref for a string using the currently recommended hash function. +// @param {!string} str +// @returns {!string} +cam.blob.refFromString = function(str) { + var hash = cam.blob.createHash(); + // update only supports 8 bit chars: http://docs.closure-library.googlecode.com/git/class_goog_crypt_Sha1.html + hash.update(goog.crypt.stringToUtf8ByteArray(str)); + return cam.blob.refFromHash(hash); +}; + +// Returns the Camlistore blobref for a DOM blob (different from Camlistore blob) using the currently recommended hash function. This function currently only works within workers. +// @param {Blob} blob +// @returns {!string} +cam.blob.refFromDOMBlob = function(blob) { + if (!goog.global.FileReaderSync) { + // TODO(aa): If necessary, we can also implement this using FileReader for use on the main thread. But beware that should not be done for very large objects without checking the effect on framerate carefully. + throw new Error('FileReaderSync not available. Perhaps we are on the main thread?'); + } + + var fr = new FileReaderSync(); + var hash = cam.blob.createHash(); + var chunkSize = 1024 * 1024; + for (var start = 0; start < blob.size; start += chunkSize) { + var end = Math.min(start + chunkSize, blob.size); + var slice = blob.slice(start, end); + hash.update(new Uint8Array(fr.readAsArrayBuffer(slice))); + } + + return cam.blob.refFromHash(hash); +}; + +// Creates an instance of the currently recommened hash function. +// @return {!goog.crypt.Hash'} +cam.blob.createHash = function() { + return new goog.crypt.Sha1(); +}; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_detail.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_detail.js new file mode 100644 index 00000000..8a2445c7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_detail.js @@ -0,0 +1,204 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.BlobDetail'); + +goog.require('cam.blobref'); +goog.require('cam.ServerConnection'); + +goog.require('goog.labs.Promise'); + +cam.BlobDetail = React.createClass({ + displayName: 'BlobDetail', + + BLOBREF_PATTERN_: new RegExp(cam.blobref.PATTERN, 'g'), + propTypes: { + getDetailURL: React.PropTypes.func.isRequired, + meta: React.PropTypes.object.isRequired, + serverConnection: React.PropTypes.instanceOf(cam.ServerConnection).isRequired, + }, + + getInitialState: function() { + return { + content: null, + metadata: null, + claims: null, + refs: null, + }; + }, + + componentWillMount: function() { + var sc = this.props.serverConnection; + + sc.getBlobContents(this.props.meta.blobRef, this.handleBlobContents_); + sc.permanodeClaims(this.props.meta.blobRef, this.handleClaims_); + + goog.labs.Promise.all([ + new goog.labs.Promise(sc.pathsOfSignerTarget.bind(sc, this.props.meta.blobRef)), + new goog.labs.Promise(sc.search.bind(sc, { + permanode: { + attr: 'camliMember', + value: this.props.meta.blobRef, + }, + }, null, null, null)) + ]).then(this.handleRefs_); + }, + + render: function() { + return React.DOM.div( + { + style: { + fontFamily: 'Open Sans', + margin: '1.5em 2em', + } + }, + this.getSection_("Blob content", this.state.content), + this.getSection_("Indexer metadata", this.props.meta), + this.getSection_("Mutation claims", this.state.claims), + this.getReferencesSection_(this.state.refs) + ); + }, + + getReferencesSection_: function(refs) { + if (!refs) { + return this.getReferencesBlock_("Loading..."); + } + + if (refs.length <= 0) { + return this.getReferencesBlock_("No references"); + } + + return this.getReferencesBlock_( + React.DOM.ul( + null, + refs.map(function(blobref) { + return React.DOM.li( + {}, + React.DOM.a( + { + href: this.props.getDetailURL(blobref), + }, + blobref + ) + ); + }, this) + ) + ); + }, + + getReferencesBlock_: function(content) { + return React.DOM.div( + { + key: 'References', + }, + this.getHeader_("Referenced by"), + content + ); + }, + + getSection_: function(title, content) { + return React.DOM.div( + { + key: title + }, + this.getHeader_(title), + this.getCodeBlock_(content) + ); + }, + + getHeader_: function(title) { + return React.DOM.h1( + { + key: 'header', + style: { + fontSize: '1.5em', + } + }, + title + ); + }, + + getCodeBlock_: function(stuff) { + return React.DOM.pre( + { + key: 'code-block', + style: { + overflowX: 'auto', + }, + }, + stuff ? this.linkify_(JSON.stringify(stuff, null, 2)) : "No data" + ); + }, + + linkify_: function(code) { + var result = []; + var match; + var index = 0; + while ((match = this.BLOBREF_PATTERN_.exec(code)) !== null) { + result.push(code.substring(index, match.index)); + result.push(React.DOM.a({key: match.index, href: this.props.getDetailURL(match[0]).toString()}, match[0])); + index = match.index + match[0].length; + } + result.push(code.substring(index)); + return result; + }, + + handleBlobContents_: function(data) { + this.setState({content: JSON.parse(data)}); + }, + + handleClaims_: function(data) { + this.setState({claims: data}); + }, + + handleRefs_: function(results) { + var refs = []; + if (results[0].paths) { + refs = refs.concat(results[0].paths.map(function(path) { + return path.baseRef; + })); + } + if (results[1].blobs) { + refs = refs.concat(results[1].blobs.map(function(blob) { + return blob.blob; + })); + } + this.setState({refs: refs}); + }, +}); + +cam.BlobDetail.getAspect = function(getDetailURL, serverConnection, blobref, targetSearchSession) { + if(!targetSearchSession) { + return; + } + + var m = targetSearchSession.getMeta(blobref); + if (!m) { + return null; + } + + return { + fragment: 'blob', + title: 'Blob', + createContent: function(size) { + return cam.BlobDetail({ + getDetailURL: getDetailURL, + meta: m, + serverConnection: serverConnection, + }); + }, + }; +}; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item.css new file mode 100644 index 00000000..4bb6b4bb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item.css @@ -0,0 +1,120 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +@import (less) "prefix-free.css"; + + +/* Tile view of BlobItem */ +.cam-blobitem { + display: inline-block; + font-size: 0.8em; +} + +.cam-blobitem>a { + text-decoration: none; +} + +.cam-blobitem-thumbclip { + position: relative; + overflow: hidden; +} + +.cam-blobitem-loading { + background-color: rgb(240, 240, 240); +} + +.cam-blobitem-thumb { + display: block; + position: relative; +} + +.cam-blobitemcontainer-50 { + font-size: 10px; +} + +.cam-blobitemcontainer-75 { + font-size: 12px; +} + +.cam-blobitemcontainer-100 { + font-size: 12px; +} + +.cam-blobitemcontainer-150 { + font-size: 13px; +} + +.cam-blobitemcontainer-200 { + font-size: 14px; +} + +.cam-blobitemcontainer-250 { + font-size: 14px; +} + +.cam-blobitem-thumbtitle { + overflow: hidden; + padding: 0 1ex; + text-align: center; + text-overflow: ellipsis; + display: block; + color: #222; +} + +.cam-blobitem.cam-dropactive { + border: 1px solid #acf!important; + outline: 1px solid #acf!important; + background: #e5efff; +} + +.cam-blobitem .checkmark { + background-image: url('checkmark2.svg'); + background-position: 5px 5px; + background-repeat: no-repeat; + background-size: 42px 42px; + cursor: pointer; + height: 52px; + left: 0; + opacity: 0; + position: absolute; + top: 0; + .transition(opacity 0.2s ease); + width: 64px; + z-index: 2; + + /* To force us into a graphics layer, otherwise we get weird effects as we transition in and out of one during animation. See: https://camlistore.org/issue/284. */ + .transform(scale3d(1, 1, 1)); +} + +.cam-blobitem.goog-control-disabled .checkmark { + display: none; +} + +.cam-blobitem.goog-control-hover .checkmark { + opacity: 0.6; +} + +.cam-blobitem.goog-control-hover .checkmark:hover { + opacity: 1!important; +} + +.cam-blobitem.goog-control-checked .checkmark { + opacity: 1!important; +} + +.cam-blobitem.goog-control-checked .checkmark { + background-image: url('checkmark2_blue.svg'); +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_container.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_container.css new file mode 100644 index 00000000..c64760b0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_container.css @@ -0,0 +1,53 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +@import (less) "prefix-free.css"; + + +.cam-blobitemcontainer { + outline: 0; /* Do not show an outline when container has focus. */ + border: 1px solid rgba(0,0,0,0); + position: relative; + white-space: nowrap; +} + +.cam-blobitemcontainer-transform { + position: absolute; + left: 0; + top: 0; + .transition-transform(100ms ease-out); +} + +.cam-blobitemcontainer.cam-dropactive { + border-color: #acf; + background: #e5efff; +} +.cam-blobitemcontainer-hidden { + display: none; +} + +.cam-blobitemcontainer>.cam-blobitemcontainer-transform>.cam-blobitem { + position: absolute; +} + +.cam-blobitemcontainer-no-results { + position: relative; + + color: #444; + font-family: 'Open Sans', sans-serif; + font-size: 24px; + text-align: center; +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_container_react.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_container_react.js new file mode 100644 index 00000000..7c8dd139 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_container_react.js @@ -0,0 +1,398 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.BlobItemContainerReact'); + +goog.require('goog.array'); +goog.require('goog.async.Throttle'); +goog.require('goog.dom'); +goog.require('goog.events.EventHandler'); +goog.require('goog.object'); +goog.require('goog.math.Coordinate'); +goog.require('goog.math.Size'); +goog.require('goog.style'); + +goog.require('cam.BlobItemReact'); +goog.require('cam.SearchSession'); +goog.require('cam.SpritedImage'); + +cam.BlobItemContainerReact = React.createClass({ + displayName: 'BlobItemContainerReact', + + // Margin between items in the layout. + BLOB_ITEM_MARGIN_: 7, + + // If the last row uses at least this much of the available width before adjustments, we'll call it "close enough" and adjust things so that it fills the entire row. Less than this, and we'll leave the last row unaligned. + LAST_ROW_CLOSE_ENOUGH_TO_FULL_: 0.85, + + // Distance from the bottom of the page at which we will trigger loading more data. + INFINITE_SCROLL_THRESHOLD_PX_: 100, + + propTypes: { + availHeight: React.PropTypes.number.isRequired, + availWidth: React.PropTypes.number.isRequired, + detailURL: React.PropTypes.func.isRequired, // string->string (blobref->complete detail URL) + handlers: React.PropTypes.array.isRequired, + history: React.PropTypes.shape({replaceState:React.PropTypes.func.isRequired}).isRequired, + onSelectionChange: React.PropTypes.func, + scale: React.PropTypes.number.isRequired, + scaleEnabled: React.PropTypes.bool.isRequired, + scrolling: React.PropTypes.shape({ + target:React.PropTypes.shape({addEventListener:React.PropTypes.func.isRequired, removeEventListener:React.PropTypes.func.isRequired}), + get: React.PropTypes.func.isRequired, + set: React.PropTypes.func.isRequired, + }).isRequired, + searchSession: React.PropTypes.shape({getCurrentResults:React.PropTypes.func.isRequired, addEventListener:React.PropTypes.func.isRequired, loadMoreResults:React.PropTypes.func.isRequired}), + selection: React.PropTypes.object.isRequired, + style: React.PropTypes.object, + thumbnailSize: React.PropTypes.number.isRequired, + }, + + getDefaultProps: function() { + return { + style: {}, + }; + }, + + componentWillMount: function() { + this.eh_ = new goog.events.EventHandler(this); + this.lastCheckedIndex_ = -1; + this.layoutHeight_ = 0; + + // Minimal information we keep about every single child. We construct the actual child lazily when the user scrolls it into view using handler. + // @type {Array.<{{position:goog.math.Position, size:goog.math.Size, blobref:string, handler>} + this.childItems_ = null; + + // TODO(aa): This can be removed when https://code.google.com/p/chromium/issues/detail?id=50298 is fixed and deployed. + this.updateHistoryThrottle_ = new goog.async.Throttle(this.updateHistory_, 2000); + + // TODO(aa): This can be removed when https://code.google.com/p/chromium/issues/detail?id=312427 is fixed and deployed. + this.lastWheelItem_ = ''; + }, + + componentDidMount: function() { + this.eh_.listen(this.props.searchSession, cam.SearchSession.SEARCH_SESSION_CHANGED, this.handleSearchSessionChanged_); + this.eh_.listen(this.props.scrolling.target, 'scroll', this.handleScroll_); + if (this.props.history.state && this.props.history.state.scroll) { + this.props.scrolling.set(this.props.history.state.scroll); + } + this.fillVisibleAreaWithResults_(); + }, + + componentWillReceiveProps: function(nextProps) { + if (nextProps.searchSession != this.props.searchSession) { + this.eh_.unlisten(this.props.searchSession, cam.SearchSession.SEARCH_SESSION_CHANGED, this.handleSearchSessionChanged_); + this.eh_.listen(nextProps.searchSession, cam.SearchSession.SEARCH_SESSION_CHANGED, this.handleSearchSessionChanged_); + nextProps.searchSession.loadMoreResults(); + } + + this.childItems_ = null; + }, + + componentWillUnmount: function() { + this.eh_.dispose(); + this.updateHistoryThrottle_.dispose(); + }, + + getInitialState: function() { + return { + scroll:0, + }; + }, + + render: function() { + this.updateChildItems_(); + + var childControls = this.childItems_.filter(function(item) { + var visible = this.isVisible_(item.position.y) || this.isVisible_(item.position.y + item.size.height); + var isLastWheelItem = item.blobref == this.lastWheelItem_; + return visible || isLastWheelItem; + }, this).map(function(item) { + return cam.BlobItemReact({ + key: item.blobref, + blobref: item.blobref, + checked: Boolean(this.props.selection[item.blobref]), + onCheckClick: this.props.onSelectionChange ? this.handleCheckClick_ : null, + onWheel: this.handleChildWheel_, + position: item.position, + }, + item.handler.createContent(item.size) + ); + }, this); + + // If we haven't filled the window with results, add some more. + this.fillVisibleAreaWithResults_(); + + if (childControls.length == 0 && this.props.searchSession.isComplete()) { + childControls.push(this.getNoResultsMessage_()); + } + + var transformStyle = {}; + var scale = this.props.scaleEnabled ? this.props.scale : 1; + transformStyle[cam.reactUtil.getVendorProp('transform')] = goog.string.subs('scale3d(%s, %s, 1)', scale, scale); + transformStyle[cam.reactUtil.getVendorProp('transformOrigin')] = goog.string.subs('left %spx 0', this.state.scroll); + + return React.DOM.div( + { + className: 'cam-blobitemcontainer', + style: cam.object.extend(this.props.style, { + height: this.layoutHeight_, + width: this.props.availWidth, + }), + onMouseDown: this.handleMouseDown_, + }, + React.DOM.div( + { + className: 'cam-blobitemcontainer-transform', + style: transformStyle, + }, + childControls + ) + ); + }, + + updateChildItems_: function() { + if (this.childItems_ !== null) { + return; + } + + this.childItems_ = []; + + var results = this.props.searchSession.getCurrentResults(); + var items = results.blobs.map(function(blob) { + var blobref = blob.blob; + var self = this; + var href = self.props.detailURL(blobref).toString(); + var handler = null; + this.props.handlers.some(function(h) { return handler = h(blobref, self.props.searchSession, href); }); + return { + blobref: blobref, + handler: handler, + position: null, + size: null, + }; + }.bind(this)); + + var currentTop = this.BLOB_ITEM_MARGIN_; + var currentWidth = this.BLOB_ITEM_MARGIN_; + var rowStart = 0; + var lastItem = results.blobs.length - 1; + + for (var i = rowStart; i <= lastItem; i++) { + var item = items[i]; + var availWidth = this.props.availWidth; + var nextWidth = currentWidth + this.props.thumbnailSize * item.handler.getAspectRatio() + this.BLOB_ITEM_MARGIN_; + if (i != lastItem && nextWidth < availWidth) { + currentWidth = nextWidth; + continue; + } + + // Decide how many items are going to be in this row. We choose the number that will result in the smallest adjustment to the image sizes having to be done. + var rowEnd, rowWidth; + + // For the last item we always use all the rest of the items in this row. + if (i == lastItem) { + rowEnd = lastItem; + rowWidth = nextWidth; + if (nextWidth / availWidth < this.LAST_ROW_CLOSE_ENOUGH_TO_FULL_) { + availWidth = nextWidth; + } + + // If we have at least one item in this row, and the adjustment to the row width is less without the next item than with it, then we leave the next item for the next row. + } else if (i > rowStart && (availWidth - currentWidth <= nextWidth - availWidth)) { + rowEnd = i - 1; + rowWidth = currentWidth; + + // Otherwise we include the next item in this row. + } else { + rowEnd = i; + rowWidth = nextWidth; + } + + currentTop += this.updateChildItemsRow_(items, rowStart, rowEnd, availWidth, rowWidth, currentTop) + this.BLOB_ITEM_MARGIN_; + + currentWidth = this.BLOB_ITEM_MARGIN_; + rowStart = rowEnd + 1; + i = rowEnd; + } + + this.layoutHeight_ = currentTop; + }, + + updateChildItemsRow_: function(items, startIndex, endIndex, availWidth, usedWidth, top) { + var currentLeft = 0; + var rowHeight = Number.POSITIVE_INFINITY; + + var numItems = endIndex - startIndex + 1; + + // Doesn't seem like this should be necessary. Subpixel bug? Aaron can't math? + var fudge = 1; + + var availThumbWidth = availWidth - (this.BLOB_ITEM_MARGIN_ * (numItems + 1)) - fudge; + var usedThumbWidth = usedWidth - (this.BLOB_ITEM_MARGIN_ * (numItems + 1)); + + for (var i = startIndex; i <= endIndex; i++) { + // We figure out the amount to adjust each item in this slightly non-intuitive way so that the adjustment is split up as fairly as possible. Figuring out a ratio up front and applying it to all items uniformly can end up with a large amount left over because of rounding. + var item = items[i]; + var numItemsLeft = (endIndex + 1) - i; + var delta = Math.round((availThumbWidth - usedThumbWidth) / numItemsLeft); + var originalWidth = this.props.thumbnailSize * item.handler.getAspectRatio(); + var width = originalWidth + delta; + var ratio = width / originalWidth; + var height = Math.round(this.props.thumbnailSize * ratio); + + item.position = new goog.math.Coordinate(currentLeft + this.BLOB_ITEM_MARGIN_, top); + item.size = new goog.math.Size(width, height); + this.childItems_.push(item); + + currentLeft += width + this.BLOB_ITEM_MARGIN_; + usedThumbWidth += delta; + rowHeight = Math.min(rowHeight, height); + } + + for (var i = startIndex; i <= endIndex; i++) { + this.childItems_[i].size.height = rowHeight; + } + + return rowHeight; + }, + + getNoResultsMessage_: function() { + var piggyWidth = 88; + var piggyHeight = 62; + var w = 350; + var h = 100; + + return React.DOM.div( + { + key: 'no-results', + className: 'cam-blobitemcontainer-no-results', + style: { + width: w, + height: h, + left: (this.props.availWidth - w) / 2, + top: (this.props.availHeight - h) / 3 + }, + }, + React.DOM.div(null, 'No results found'), + cam.SpritedImage( + { + index: 6, + sheetWidth: 10, + spriteWidth: piggyWidth, + spriteHeight: piggyHeight, + src: 'glitch/npc_piggy__x1_rooked1_png_1354829442.png', + style: { + 'margin-left': (w - piggyWidth) / 2 + } + } + ) + ); + }, + + getScrollFraction_: function() { + var max = this.layoutHeight_; + if (max == 0) + return 0; + return this.state.scroll / max; + }, + + getTranslation_: function() { + var maxOffset = (1 - this.props.scale) * this.layoutHeight_; + var currentOffset = maxOffset * this.getScrollFraction_(); + return currentOffset; + }, + + transformY_: function(y) { + return y * this.props.scale + this.getTranslation_(); + }, + + getScrollBottom_: function() { + return this.state.scroll + this.props.availHeight; + }, + + isVisible_: function(y) { + y = this.transformY_(y); + return y >= this.state.scroll && y < this.getScrollBottom_(); + }, + + handleSearchSessionChanged_: function() { + this.childItems_ = null; + this.forceUpdate(); + }, + + handleCheckClick_: function(blobref, e) { + var blobs = this.props.searchSession.getCurrentResults().blobs; + var index = goog.array.findIndex(blobs, function(b) { return b.blob == blobref }); + var newSelection = cam.object.extend(this.props.selection, {}); + + if (e.shiftKey && this.lastCheckedIndex_ > -1) { + var low = Math.min(this.lastCheckedIndex_, index); + var high = Math.max(this.lastCheckedIndex_, index); + for (var i = low; i <= high; i++) { + newSelection[blobs[i].blob] = true; + } + } else { + if (newSelection[blobref]) { + delete newSelection[blobref]; + } else { + newSelection[blobref] = true; + } + } + + this.lastCheckedIndex_ = index; + this.forceUpdate(); + + this.props.onSelectionChange(newSelection); + }, + + handleMouseDown_: function(e) { + // Prevent the default selection behavior. + if (e.shiftKey) { + e.preventDefault(); + } + }, + + handleScroll_: function() { + this.setState({scroll:this.props.scrolling.get()}, function() { + this.updateHistoryThrottle_.fire(); + this.fillVisibleAreaWithResults_(); + }.bind(this)); + }, + + handleChildWheel_: function(child) { + this.lastWheelItem_ = child.props.blobref; + }, + + // NOTE: This method causes the URL bar to throb for a split second (at least on Chrome), so it should not be called constantly. + updateHistory_: function() { + // second argument (title) is ignored on Firefox, but not optional. + this.props.history.replaceState(cam.object.extend(this.props.history.state, {scroll:this.state.scroll}), ''); + }, + + fillVisibleAreaWithResults_: function() { + if (!this.isMounted()) { + return; + } + + var layoutEnd = this.transformY_(this.layoutHeight_); + if ((layoutEnd - this.getScrollBottom_()) > this.INFINITE_SCROLL_THRESHOLD_PX_) { + return; + } + + this.props.searchSession.loadMoreResults(); + }, +}); diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_container_test.html b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_container_test.html new file mode 100644 index 00000000..7117dbfc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_container_test.html @@ -0,0 +1,136 @@ + + + + + + + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_demo_content.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_demo_content.js new file mode 100644 index 00000000..0f8252a3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_demo_content.js @@ -0,0 +1,99 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.BlobItemDemoContent'); + +goog.require('goog.math.Size'); + +// BlobItemDemoContent is a demo node type, useful for giving talks and showing +// how a custom renderer can be invoked just by making a permanode, setting +// its "camliNodeType" attribute to "camlistore.org:demo", and then changing its +// background color with the "color" property or text with the "title" property. +cam.BlobItemDemoContent = React.createClass({ + displayName: 'BlobItemDemoContent', + + propTypes: { + blobref: React.PropTypes.string.isRequired, + href: React.PropTypes.string.isRequired, + title: React.PropTypes.string.isRequired, + size: React.PropTypes.instanceOf(goog.math.Size).isRequired, + color: React.PropTypes.string.isRequired + }, + + getInitialState: function () { + return { + mouseover: false, + }; + }, + + render: function () { + return React.DOM.a({ + href: this.props.href, + style: { + backgroundColor: this.props.color, + width: this.props.size.width + "px", + height: this.props.size.height + "px", + display: 'block' + }, + onMouseEnter: this.handleMouseOver_, + onMouseLeave: this.handleMouseOut_ + }, + this.props.title + (this.state.mouseover ? ', mouseover' : '') + ); + }, + + handleMouseOver_: function () { + this.setState({ + mouseover: true + }); + }, + + handleMouseOut_: function () { + this.setState({ + mouseover: false + }); + }, +}); + +cam.BlobItemDemoContent.getHandler = function (blobref, searchSession, href) { + var m = searchSession.getMeta(blobref); + if (m.camliType == 'permanode') { + var typ = cam.permanodeUtils.getCamliNodeType(m.permanode); + if (typ == 'camlistore.org:demo') { + return new cam.BlobItemDemoContent.Handler(m, href) + } + } + return null; +}; + +cam.BlobItemDemoContent.Handler = function (meta, href) { + this.meta_ = meta; + this.href_ = href; +}; + +cam.BlobItemDemoContent.Handler.prototype.getAspectRatio = function () { + return 1; +}; + +cam.BlobItemDemoContent.Handler.prototype.createContent = function (size) { + return cam.BlobItemDemoContent({ + blobref: this.meta_.blobRef, + color: cam.permanodeUtils.getSingleAttr(this.meta_.permanode, 'color') || '#777', + title: cam.permanodeUtils.getSingleAttr(this.meta_.permanode, 'title') || '', + href: this.href_, + size: size, + }); +}; \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_foursquare.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_foursquare.css new file mode 100644 index 00000000..59b532be --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_foursquare.css @@ -0,0 +1,74 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +@import (less) "prefix-free.css"; + +.cam-blobitem-fs-checkin { + display: block; + border-radius: 7%; + position: relative; + overflow: hidden; + height: 100%; + color: white; + text-shadow:1px 1px 2px black; + white-space: normal; +} + +.cam-blobitem-fs-checkin-content { + position: relative; + background: rgba(80,80,80,0.4); + border-radius: 7%; + position: absolute; + z-index: 1; + width: 100%; + height: 100%; + padding: 6%; + text-align: center; +} + +.cam-blobitem-fs-checkin-content table { + border-collapse: collapse; + height: 100%; + width: 100%; +} + +.cam-blobitem-fs-checkin-content td { + height: 100%; + width: 100%; + vertical-align: middle; +} + +.cam-blobitem-fs-checkin-content img { + position: absolute; + top: 4%; + left: 50%; + margin-left: -59px; + width: 118px; + height: 32px; +} + +.cam-blobitem-fs-checkin-venue { + font-weight: bold; + font-size: 150%; + line-height: 1em; +} + +.cam-blobitem-fs-checkin-when { + position: absolute; + bottom: 4%; + left: 0; + width: 100%; +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_foursquare_content.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_foursquare_content.js new file mode 100644 index 00000000..30157cf8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_foursquare_content.js @@ -0,0 +1,139 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.BlobItemFoursquareContent'); + +goog.require('goog.array'); +goog.require('goog.math.Size'); +goog.require('goog.object'); +goog.require('goog.string'); + +goog.require('cam.dateUtils'); +goog.require('cam.math'); +goog.require('cam.permanodeUtils'); +goog.require('cam.Thumber'); + +cam.BlobItemFoursquareContent = React.createClass({ + propTypes: { + href: React.PropTypes.string.isRequired, + size: React.PropTypes.instanceOf(goog.math.Size).isRequired, + venueId: React.PropTypes.string.isRequired, + venueName: React.PropTypes.string.isRequired, + photo: React.PropTypes.string.isRequired, + date: React.PropTypes.number.isRequired, + }, + + render: function() { + return React.DOM.a({ + href: this.props.href, + className: 'cam-blobitem-fs-checkin', + style: { + backgroundImage: 'url(' + this.props.photo + ')', + width: this.props.size.width, + height: this.props.size.height, + }, + }, + React.DOM.div({className:'cam-blobitem-fs-checkin-content'}, + React.DOM.img({src: 'foursquare-logo.png'}), + React.DOM.table(null, + React.DOM.tr(null, + React.DOM.td(null, + React.DOM.div({className:'cam-blobitem-fs-checkin-intro'}, 'Check-in at'), + React.DOM.div({className:'cam-blobitem-fs-checkin-venue'}, this.props.venueName) + ) + ) + ), + React.DOM.div({className:'cam-blobitem-fs-checkin-when'}, cam.dateUtils.formatDateShort(this.props.date)) + ) + ); + }, +}); + +// Blech, we need this to prevent images from flashing when data changes server-side. +cam.BlobItemFoursquareContent.photoMeta_ = {}; + +cam.BlobItemFoursquareContent.getPhotoMeta_ = function(blobref, venueMeta, searchSession) { + var photoMeta = this.photoMeta_[blobref]; + if (photoMeta) { + return photoMeta; + } + + var photosBlobref = cam.permanodeUtils.getSingleAttr(venueMeta.permanode, 'camliPath:photos') + var photosMeta = searchSession.getMeta(photosBlobref); + var photoIds = (photosMeta && photosMeta.permanode && goog.object.getKeys(photosMeta.permanode.attr).filter(function(k) { return goog.string.startsWith(k, 'camliPath:') })) || []; + + photoMeta = (photoIds.length && cam.permanodeUtils.getSingleAttr(photosMeta.permanode, photoIds[goog.string.hashCode(blobref) % photoIds.length])) || null; + if (photoMeta) { + photoMeta = this.photoMeta_[blobref] = searchSession.getMeta(photoMeta); + } + + return photoMeta; +}; + +cam.BlobItemFoursquareContent.getHandler = function(blobref, searchSession, href) { + var m = searchSession.getMeta(blobref); + if (m.camliType != 'permanode') { + return null; + } + + if (cam.permanodeUtils.getCamliNodeType(m.permanode) != 'foursquare.com:checkin') { + return null; + } + + var startDate = cam.permanodeUtils.getSingleAttr(m.permanode, 'startDate'); + var venueBlobref = cam.permanodeUtils.getSingleAttr(m.permanode, 'foursquareVenuePermanode'); + if (!startDate || !venueBlobref) { + return null; + } + + + var venueMeta = searchSession.getResolvedMeta(venueBlobref); + if (!venueMeta) { + return null; + } + + var venueId = cam.permanodeUtils.getSingleAttr(venueMeta.permanode, 'foursquareId'); + var venueName = cam.permanodeUtils.getSingleAttr(venueMeta.permanode, 'title'); + if (!venueId || !venueName) { + return null; + } + + return new cam.BlobItemFoursquareContent.Handler(href, venueId, venueName, + cam.BlobItemFoursquareContent.getPhotoMeta_(blobref, venueMeta, searchSession), Date.parse(startDate)); +}; + +cam.BlobItemFoursquareContent.Handler = function(href, venueId, venueName, venuePhotoMeta, startDate) { + this.href_ = href; + this.venueId_ = venueId; + this.venueName_ = venueName; + this.startDate_ = startDate; + this.thumber_ = venuePhotoMeta ? new cam.Thumber.fromImageMeta(venuePhotoMeta) : null; +}; + +cam.BlobItemFoursquareContent.Handler.prototype.getAspectRatio = function() { + return 1.0; +}; + +cam.BlobItemFoursquareContent.Handler.prototype.createContent = function(size) { + return cam.BlobItemFoursquareContent({ + href: this.href_, + size: size, + venueId: this.venueId_, + venueName: this.venueName_, + photo: this.thumber_ ? this.thumber_.getSrc(size) : '', + date: this.startDate_, + }); +}; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_generic_content.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_generic_content.js new file mode 100644 index 00000000..dc09ddac --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_generic_content.js @@ -0,0 +1,137 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.BlobItemGenericContent'); + +goog.require('goog.math.Size'); + +goog.require('cam.math'); +goog.require('cam.object'); +goog.require('cam.permanodeUtils'); + +// Renders the content of blob items that are not known to be some more specific type. A generic file or folder icon is shown, along with a title if one can be determined. +cam.BlobItemGenericContent = React.createClass({ + displayName: 'BlobItemGenericContent', + + TITLE_HEIGHT: 22, + + propTypes: { + href: React.PropTypes.string.isRequired, + size: React.PropTypes.instanceOf(goog.math.Size).isRequired, + thumbSrc: React.PropTypes.string.isRequired, + thumbAspect: React.PropTypes.number.isRequired, + title: React.PropTypes.string.isRequired, + }, + + render: function() { + var thumbClipSize = this.getThumbClipSize_(); + // TODO(aa): I think we don't need/want the thumb clip div anymore. We can just make the anchor position:relative position the thumb inside it. + return React.DOM.a({href:this.props.href}, + React.DOM.div({className:this.getThumbClipClassName_(), style:thumbClipSize}, + this.getThumb_(thumbClipSize) + ), + this.getLabel_() + ); + }, + + getThumbClipClassName_: function() { + return React.addons.classSet({ + 'cam-blobitem-thumbclip': true, + 'cam-blobitem-loading': false, + }); + }, + + getThumb_: function(thumbClipSize) { + var thumbSize = this.getThumbSize_(thumbClipSize); + var pos = cam.math.center(thumbSize, thumbClipSize); + return React.DOM.img({ + className: 'cam-blobitem-thumb', + ref: 'thumb', + src: this.props.thumbSrc, + style: {left:pos.x, top:pos.y}, + width: thumbSize.width, + height: thumbSize.height, + }) + }, + + getLabel_: function() { + return React.DOM.span({className:'cam-blobitem-thumbtitle', style:{width:this.props.size.width}}, this.props.title); + }, + + getThumbSize_: function(available) { + var bleed = false; + return cam.math.scaleToFit(new goog.math.Size(this.props.thumbAspect, 1), available, bleed); + }, + + getThumbClipSize_: function() { + return new goog.math.Size(this.props.size.width, this.props.size.height - this.TITLE_HEIGHT); + }, +}); + +cam.BlobItemGenericContent.getHandler = function(blobref, searchSession, href) { + return new cam.BlobItemGenericContent.Handler(blobref, searchSession, href); +}; + + +cam.BlobItemGenericContent.Handler = function(blobref, searchSession, href) { + this.blobref_ = blobref; + this.searchSession_ = searchSession; + this.href_ = href; + this.thumbType_ = this.getThumbType_(); +}; + +cam.BlobItemGenericContent.Handler.ICON_ASPECT = { + FILE: 260 / 300, + FOLDER: 300 / 300, +}; + +cam.BlobItemGenericContent.Handler.prototype.getAspectRatio = function() { + return this.thumbType_ == 'folder' ? this.constructor.ICON_ASPECT.FOLDER : this.constructor.ICON_ASPECT.FILE; +}; + +cam.BlobItemGenericContent.Handler.prototype.createContent = function(size) { + // TODO(aa): In the case of a permanode that is a container (cam.permanodeUtils.isContainer()) and has a camliContentImage, it would be nice to show that image somehow along with the folder icon. + return cam.BlobItemGenericContent({ + href: this.href_, + size: size, + thumbSrc: this.thumbType_ + '.png', + thumbAspect: this.getAspectRatio(), + title: this.searchSession_.getTitle(this.blobref_), + }); +}; + +cam.BlobItemGenericContent.Handler.prototype.getThumbType_ = function() { + var m = this.searchSession_.getMeta(this.blobref_); + var rm = this.searchSession_.getResolvedMeta(this.blobref_); + + if (rm) { + if (rm.camliType == 'file') { + return 'file'; + } + + if (rm.camliType == 'directory' || rm.camliType == 'static-set') { + return 'folder'; + } + } + + // Using the directory icon for any random permanode is a bit weird. Ideally we'd use file for that. The problem is that we can't tell the difference between a permanode that is representing an empty dynamic set and a permanode that is representing something else entirely. + // And unfortunately, the UI has a big prominent button that says 'new set', and it looks funny if the new set is shown as a file icon :( + if (m.camliType == 'permanode') { + return 'folder'; + } + + return 'file'; +}; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_image_content.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_image_content.js new file mode 100644 index 00000000..d9f42074 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_image_content.js @@ -0,0 +1,153 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.BlobItemImageContent'); + +goog.require('goog.math.Size'); + +goog.require('cam.math'); +goog.require('cam.permanodeUtils'); +goog.require('cam.PyramidThrobber'); +goog.require('cam.Thumber'); + +// Renders image blob items. Handles the following cases: +// a) camliType == 'file', and also has an 'image' property. +// b) permanode with camliContent pointing to (a) +// c) permanode with 'camliImageContent' attribute pointing to (a) +cam.BlobItemImageContent = React.createClass({ + displayName: 'BlobItemImageContent', + + propTypes: { + aspect: React.PropTypes.number.isRequired, + href: React.PropTypes.string.isRequired, + size: React.PropTypes.instanceOf(goog.math.Size).isRequired, + src: React.PropTypes.string.isRequired, + title: React.PropTypes.string, + }, + + getInitialState: function() { + return { + loaded: false, + }; + }, + + componentWillMount: function() { + this.currentIntrinsicThumbHeight_ = 0; + }, + + componentDidUpdate: function(prevProps, prevState) { + // TODO(aa): It seems like we would not need this if we always use this component with the 'key' prop. + if (prevProps.blobref != this.props.blobref) { + this.currentIntrinsicThumbHeight_ = 0; + this.setState({loaded: false}); + } + }, + + render: function() { + var thumbClipSize = new goog.math.Size(this.props.size.width, this.props.size.height); + return React.DOM.a({href:this.props.href}, + React.DOM.div({className:this.getThumbClipClassName_(), style:thumbClipSize}, + this.getThrobber_(thumbClipSize), + this.getThumb_(thumbClipSize) + ) + ); + }, + + onThumbLoad_: function() { + this.setState({loaded:true}); + }, + + getThumbClipClassName_: function() { + return React.addons.classSet({ + 'cam-blobitem-thumbclip': true, + 'cam-blobitem-loading': !this.state.loaded, + }); + }, + + getThrobber_: function(thumbClipSize) { + if (this.state.loaded) { + return null; + } + return cam.PyramidThrobber({pos:cam.math.center(cam.PyramidThrobber.SIZE, thumbClipSize)}); + }, + + getThumb_: function(thumbClipSize) { + var thumbSize = this.getThumbSize_(thumbClipSize); + var pos = cam.math.center(thumbSize, thumbClipSize); + return React.DOM.img({ + className: 'cam-blobitem-thumb', + onLoad: this.onThumbLoad_, + src: this.props.src, + style: {left:pos.x, top:pos.y, visibility:(this.state.loaded ? 'visible' : 'hidden')}, + title: this.props.title, + width: thumbSize.width, + height: thumbSize.height, + }) + }, + + getThumbSize_: function(thumbClipSize) { + var bleed = true; + return cam.math.scaleToFit(new goog.math.Size(this.props.aspect, 1), thumbClipSize, bleed); + }, +}); + +cam.BlobItemImageContent.getHandler = function(blobref, searchSession, href) { + var rm = searchSession.getResolvedMeta(blobref); + if (rm && rm.image) { + return new cam.BlobItemImageContent.Handler(rm, href, searchSession.getTitle(blobref)); + } + + var m = searchSession.getMeta(blobref); + if (m.camliType != 'permanode') { + return null; + } + + // Sets can have the camliContentImage attr to indicate a user-chosen "cover image" for the entire set. Until we have some rendering for those, the folder in the generic handler is a better fit than the single image. + if (cam.permanodeUtils.isContainer(m.permanode)) { + return null; + } + + var cci = cam.permanodeUtils.getSingleAttr(m.permanode, 'camliContentImage'); + if (cci) { + var ccim = searchSession.getResolvedMeta(cci); + if (ccim) { + return new cam.BlobItemImageContent.Handler(ccim, href, searchSession.getTitle(blobref)); + } + } + + return null; +}; + +cam.BlobItemImageContent.Handler = function(imageMeta, href, title) { + this.imageMeta_ = imageMeta; + this.href_ = href; + this.title_ = title; + this.thumber_ = cam.Thumber.fromImageMeta(imageMeta); +}; + +cam.BlobItemImageContent.Handler.prototype.getAspectRatio = function() { + return this.imageMeta_.image.width / this.imageMeta_.image.height; +}; + +cam.BlobItemImageContent.Handler.prototype.createContent = function(size) { + return cam.BlobItemImageContent({ + aspect: this.getAspectRatio(), + href: this.href_, + size: size, + src: this.thumber_.getSrc(size.height), + title: this.title_, + }); +}; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_progress_test.html b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_progress_test.html new file mode 100644 index 00000000..684027cd --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_progress_test.html @@ -0,0 +1,17 @@ + + + + Camlistored progress + + + + +

    hello

    +
    +
    +
    +
    +
    +
    + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_react.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_react.js new file mode 100644 index 00000000..b587f146 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_react.js @@ -0,0 +1,92 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.BlobItemReact'); + +goog.require('goog.string'); +goog.require('goog.math.Coordinate'); + +cam.BlobItemReact = React.createClass({ + displayName: 'BlobItemReact', + + propTypes: { + blobref: React.PropTypes.string.isRequired, + checked: React.PropTypes.bool.isRequired, + onCheckClick: React.PropTypes.func, // (string,event)->void + onWheel: React.PropTypes.func.isRequired, + position: React.PropTypes.instanceOf(goog.math.Coordinate).isRequired, + }, + + getInitialState: function() { + return { + hovered: false, + }; + }, + + render: function() { + return React.DOM.div({ + className: this.getRootClassName_(), + style: this.getRootStyle_(), + onMouseEnter: this.handleMouseEnter_, + onMouseLeave: this.handleMouseLeave_, + onWheel: this.handleWheel_, + }, + this.getCheckmark_(), + this.props.children + ); + }, + + getRootClassName_: function() { + return React.addons.classSet({ + 'cam-blobitem': true, + 'goog-control-hover': this.state.hovered, + 'goog-control-checked': this.props.checked, + }); + }, + + getCheckmark_: function() { + if (this.props.onCheckClick) { + return React.DOM.div({className:'checkmark', onClick:this.handleCheckClick_}); + } else { + return null; + } + }, + + getRootStyle_: function() { + return { + left: this.props.position.x, + top: this.props.position.y, + }; + }, + + handleMouseEnter_: function() { + this.setState({hovered:true}); + }, + + handleMouseLeave_: function() { + this.setState({hovered:false}); + }, + + handleCheckClick_: function(e) { + this.props.onCheckClick(this.props.blobref, e); + }, + + handleWheel_: function() { + if (this.props.onWheel) { + this.props.onWheel(this); + } + }, +}); diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_twitter.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_twitter.css new file mode 100644 index 00000000..897ff8f4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_twitter.css @@ -0,0 +1,66 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +@import (less) "prefix-free.css"; + + +.cam-blobitem-twitter-tweet { + color: black; + display: block; + font-size: 90%; + position: relative; + overflow: hidden; + white-space: normal; + border-radius: 7%; +} + +.cam-blobitem-twitter-tweet table { + border-spacing: 0; + background-color: #e1e8ed; + width: 100%; + height: 100%; +} + +.cam-blobitem-twitter-tweet-icon { + position: absolute; + width: 100%; + bottom: 0; +} + +.cam-blobitem-twitter-tweet-icon img { + width: 4em; + height: 4em; + position: absolute; + bottom: 1em; + right: 1em; + opacity: 1; +} + +.cam-blobitem-twitter-tweet-meta { + text-align: left; + vertical-align: top; + padding: 0.8em; +} + +.cam-blobitem-twitter-tweet-date { + color: #aaa; +} + +.cam-blobitem-twitter-tweet-image { + background-position: top; + background-size: cover; + height: 100%; +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_twitter_content.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_twitter_content.js new file mode 100644 index 00000000..0948b4d4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_twitter_content.js @@ -0,0 +1,129 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.BlobItemTwitterContent'); + +goog.require('goog.math.Size'); + +goog.require('cam.dateUtils'); +goog.require('cam.math'); +goog.require('cam.permanodeUtils'); +goog.require('cam.Thumber'); + +cam.BlobItemTwitterContent = React.createClass({ + propTypes: { + date: React.PropTypes.number.isRequired, + href: React.PropTypes.string.isRequired, + image: React.PropTypes.string, + size: React.PropTypes.instanceOf(goog.math.Size).isRequired, + username: React.PropTypes.string.isRequired, + }, + + getImageRow_: function() { + if (!this.props.image) { + return null; + } + + return React.DOM.tr(null, + React.DOM.td({ + className: 'cam-blobitem-twitter-tweet-image', + colSpan: 2, + src: 'twitter-icon.png', + style: { + backgroundImage: 'url(' + this.props.image + ')', + }, + }) + ); + }, + + render: function() { + return React.DOM.a({ + href: this.props.href, + className: 'cam-blobitem-twitter-tweet', + style: { + width: this.props.size.width, + height: this.props.size.height, + }, + }, + React.DOM.table({height: this.props.image ? '100%' : ''}, + React.DOM.tr(null, + React.DOM.td({className: 'cam-blobitem-twitter-tweet-meta'}, + React.DOM.span({className: 'cam-blobitem-twitter-tweet-date'}, cam.dateUtils.formatDateShort(this.props.date)), + React.DOM.br(), + React.DOM.span({className: ' cam-blobitem-twitter-tweet-content'}, this.props.content) + ) + ), + this.getImageRow_(), + React.DOM.tr(null, + React.DOM.td({className: 'cam-blobitem-twitter-tweet-icon'}, + React.DOM.img({src: 'twitter-logo.png'}) + ) + ) + ) + ); + }, +}); + +cam.BlobItemTwitterContent.getHandler = function(blobref, searchSession, href) { + var m = searchSession.getMeta(blobref); + if (m.camliType != 'permanode') { + return null; + } + + if (cam.permanodeUtils.getCamliNodeType(m.permanode) != 'twitter.com:tweet') { + return null; + } + + var date = cam.permanodeUtils.getSingleAttr(m.permanode, 'startDate'); + var username = cam.permanodeUtils.getSingleAttr(m.permanode, 'url'); + if (!date || !username) { + return null; + } + + username = username.match(/^https:\/\/twitter.com\/(.+?)\//)[1]; + + // It's OK to not have any content. Tweets can be just images or whatever. + var content = cam.permanodeUtils.getSingleAttr(m.permanode, 'content'); + var imageMeta = cam.permanodeUtils.getSingleAttr(m.permanode, 'camliContentImage'); + if (imageMeta) { + imageMeta = searchSession.getResolvedMeta(imageMeta); + } + + return new cam.BlobItemTwitterContent.Handler(content, Date.parse(date), href, imageMeta, username); +}; + +cam.BlobItemTwitterContent.Handler = function(content, date, href, imageMeta, username) { + this.content_ = content; + this.date_ = date; + this.href_ = href; + this.username_ = username; + this.thumber_ = imageMeta ? new cam.Thumber.fromImageMeta(imageMeta) : null; +}; + +cam.BlobItemTwitterContent.Handler.prototype.getAspectRatio = function() { + return 1.0; +}; + +cam.BlobItemTwitterContent.Handler.prototype.createContent = function(size) { + return cam.BlobItemTwitterContent({ + content: this.content_, + date: this.date_, + href: this.href_, + image: this.thumber_ ? this.thumber_.getSrc(size) : null, + size: size, + username: this.username_, + }); +}; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_video.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_video.css new file mode 100644 index 00000000..d2315ece --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_video.css @@ -0,0 +1,53 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +@import (less) "prefix-free.css"; + + +.cam-blobitem-video { + display: block; +} + +.cam-blobitem-video a { + text-decoration: none; +} + +.cam-blobitem-video .fa-video-camera { + color: #ccc; + display: block; + text-align: center; +} + +.cam-blobitem-video .fa-play, +.cam-blobitem-video .fa-pause { + cursor: default; + position: absolute; + left: 35%; + top: 40%; + color: rgba(125, 125, 125, 0.85); + line-height: 100%; +} + +.cam-blobitem-video .fa-pause { + left: 32%; +} + +.cam-blobitem-video-loaded .fa-play { + left: 40%; +} +.cam-blobitem-video-loaded .fa-pause { + left: 37%; +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_video_content.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_video_content.js new file mode 100644 index 00000000..e338f170 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_item_video_content.js @@ -0,0 +1,197 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.BlobItemVideoContent'); + +goog.require('goog.math.Size'); + +// Renders video blob items. Currently recognizes movies by looking for a filename with a common movie extension. +cam.BlobItemVideoContent = React.createClass({ + displayName: 'BlobItemVideoContent', + + MIN_PREVIEW_SIZE: 128, + + propTypes: { + blobref: React.PropTypes.string.isRequired, + filename: React.PropTypes.string.isRequired, + href: React.PropTypes.string.isRequired, + size: React.PropTypes.instanceOf(goog.math.Size).isRequired, + }, + + getInitialState: function() { + return { + loaded: false, + mouseover: false, + playing: false, + }; + }, + + render: function() { + return React.DOM.div({ + className: React.addons.classSet({ + 'cam-blobitem-video': true, + 'cam-blobitem-video-loaded': this.state.loaded, + }), + onMouseEnter: this.handleMouseOver_, + onMouseLeave: this.handleMouseOut_, + }, + React.DOM.a({href: this.props.href}, + this.getVideo_(), + this.getPoster_() + ), + this.getPlayPauseButton_() + ); + }, + + getPoster_: function() { + if (this.state.loaded) { + return null; + } + // TODO(aa): When server indexes videos and provides a poster image, render it here. + return React.DOM.i({ + className: 'fa fa-video-camera', + style: { + fontSize: this.props.size.height / 1.5 + 'px', + lineHeight: this.props.size.height + 'px', + width: this.props.size.width, + } + }) + }, + + getVideo_: function() { + if (!this.state.loaded) { + return null; + } + return React.DOM.video({ + autoPlay: true, + src: goog.string.subs('%s%s/%s', goog.global.CAMLISTORE_CONFIG.downloadHelper, this.props.blobref, this.props.filename), + width: this.props.size.width, + height: this.props.size.height, + }) + }, + + getPlayPauseButton_: function() { + if (!this.state.mouseover || this.props.size.width < this.MIN_PREVIEW_SIZE || this.props.size.height < this.MIN_PREVIEW_SIZE) { + return null; + } + return React.DOM.i({ + className: React.addons.classSet({ + 'fa': true, + 'fa-play': !this.state.playing, + 'fa-pause': this.state.playing, + }), + onClick: this.handlePlayPauseClick_, + style: { + fontSize: this.props.size.height / 5 + 'px', + } + }) + }, + + handlePlayPauseClick_: function(e) { + this.setState({ + loaded: true, + playing: !this.state.playing, + }); + + if (this.state.loaded) { + var video = this.getDOMNode().querySelector('video'); + if (this.state.playing) { + video.pause(); + } else { + video.play(); + } + } + }, + + handleMouseOver_: function() { + this.setState({mouseover:true}); + }, + + handleMouseOut_: function() { + this.setState({mouseover:false}); + }, +}); + +cam.BlobItemVideoContent.isVideo = function(rm) { + // From http://en.wikipedia.org/wiki/List_of_file_formats + // TODO(aa): Fix this quick hack once the server indexes movies and gives us more information. + var extensions = [ + '3gp', + 'aav', + 'asf', + 'avi', + 'dat', + 'm1v', + 'm2v', + 'm4v', + 'mov', + 'mp4', + 'mpe', + 'mpeg', + 'mpg', + 'ogg', + 'wmv', + ]; + return rm && rm.file && goog.array.some(extensions, goog.string.endsWith.bind(null, rm.file.fileName.toLowerCase())); +}; + +cam.BlobItemVideoContent.getHandler = function(blobref, searchSession, href) { + var rm = searchSession.getResolvedMeta(blobref); + + // From http://en.wikipedia.org/wiki/List_of_file_formats + // TODO(aa): Fix this quick hack once the server indexes movies and gives us more information. + var extensions = [ + '3gp', + 'aav', + 'asf', + 'avi', + 'dat', + 'm1v', + 'm2v', + 'm4v', + 'mov', + 'mp4', + 'mpe', + 'mpeg', + 'mpg', + 'ogg', + 'wmv', + ]; + if (cam.BlobItemVideoContent.isVideo(rm)) { + return new cam.BlobItemVideoContent.Handler(rm, href) + } + + return null; +}; + +cam.BlobItemVideoContent.Handler = function(rm, href) { + this.rm_ = rm; + this.href_ = href; +}; + +cam.BlobItemVideoContent.Handler.prototype.getAspectRatio = function() { + // TODO(aa): Provide the right value here once server indexes movies. + return 1; +}; + +cam.BlobItemVideoContent.Handler.prototype.createContent = function(size) { + return cam.BlobItemVideoContent({ + blobref: this.rm_.blobRef, + filename: this.rm_.file.fileName, + href: this.href_, + size: size, + }); +}; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_test.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_test.js new file mode 100644 index 00000000..d5eab3f4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blob_test.js @@ -0,0 +1,105 @@ +/* +Copyright 2014 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.require('goog.crypt.Hash'); +goog.require('goog.crypt.Sha1'); +goog.require('goog.string'); + +var assert = require('assert'); + +goog.require('cam.blob'); + + +var MockDOMBlob = function(buffer, start, end) { + this.buffer_ = buffer; + this.start_ = start; + this.size = end - start; +}; + +MockDOMBlob.fromSize = function(size, chr) { + var arr = new Uint8Array(size); + for (var i = 0; i < arr.length; i++) { + arr[i] = chr.charCodeAt(0); + } + return new MockDOMBlob(arr.buffer, 0, arr.length); +}; + +MockDOMBlob.prototype.slice = function(start, end) { + if (start < 0 || start >= this.size) { + throw new Error(goog.strings.subs("start '%s' out of range [0,%s)", start, this.size)); + } + if (end < this.start_ || end > this.size) { + throw new Error(goog.string.subs("end '%s' out of range [0,%s)", end, this.size)); + } + if (end < start) { + throw new Error(goog.string.subs("end '%s' is less than start '%s'", start, end)); + } + + return new MockDOMBlob(this.buffer_, this.start_ + start, this.start_ + end); +}; + +MockDOMBlob.prototype.getArrayBuffer = function() { + return new Uint8Array(this.buffer_, this.start_, this.size); +}; + + +var MockFileReaderSync = function() { +}; + +MockFileReaderSync.prototype.readAsArrayBuffer = function(blob) { + return blob.getArrayBuffer(); +}; + + +describe('cam.blob', function() { + describe('#refFromHash', function() { + it('should calculate the right hash', function() { + var hash = new goog.crypt.Sha1(); + assert.equal(cam.blob.refFromHash(hash), 'sha1-da39a3ee5e6b4b0d3255bfef95601890afd80709'); + + hash.reset(); + hash.update('The quick brown fox jumps over the lazy dog'); + assert.equal(cam.blob.refFromHash(hash), 'sha1-2fd4e1c67a2d28fced849ee1bb76e7391b93eb12'); + }); + + it('should complain about wrong hash function', function() { + function FooHash() {}; + goog.inherits(FooHash, goog.crypt.Hash); + assert.throws(cam.blob.refFromHash.bind(null, new FooHash()), /Unsupported hash function type/); + }); + }); + + describe('#refFromString', function() { + it('should calculate the right hash', function() { + assert.equal(cam.blob.refFromString(''), 'sha1-da39a3ee5e6b4b0d3255bfef95601890afd80709'); + assert.equal(cam.blob.refFromString('The quick brown fox jumps over the lazy dog'), 'sha1-2fd4e1c67a2d28fced849ee1bb76e7391b93eb12'); + assert.equal(cam.blob.refFromString('Les caractères accentués, quelle plaie.'), 'sha1-2ad8f499b8721a7fe35504bce86df451db37dd66'); + }); + }); + + describe('#refFromDOMBlob', function() { + it('should calculate the right hash', function() { + blob = MockDOMBlob.fromSize(1000001, 'a'); + goog.global.FileReaderSync = MockFileReaderSync; + try { + // Verified with openssl. + assert.equal(cam.blob.refFromDOMBlob(blob), 'sha1-432e7e01de7086c5246b6ac57f5f435b58f13752'); + } finally { + delete goog.global.FileReaderSync; + } + }); + }); +}); diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blobref.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blobref.js new file mode 100644 index 00000000..13134972 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blobref.js @@ -0,0 +1,20 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.blobref'); + +// TODO(aa): Need to eventually implement something like ref.go, which understands all the different types of hashes. +cam.blobref.PATTERN = 'sha1-[0-9a-f]{40}'; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blog.html b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blog.html new file mode 100644 index 00000000..f0029781 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/blog.html @@ -0,0 +1,6 @@ + + + + TODO + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/cache_buster_iframe.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/cache_buster_iframe.js new file mode 100644 index 00000000..3360eb29 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/cache_buster_iframe.js @@ -0,0 +1,107 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.CacheBusterIframe'); + +goog.require('goog.Uri'); + +goog.require('cam.Navigator'); + +// Reload/shift-reload doesn't actually reload iframes from server in Chrome. +// We should implement content stamping, but for now, this is a workaround. +cam.CacheBusterIframe = React.createClass({ + propTypes: { + baseURL: React.PropTypes.instanceOf(goog.Uri).isRequired, + height: React.PropTypes.number.isRequired, + onChildFrameClick: React.PropTypes.func, + src: React.PropTypes.instanceOf(goog.Uri).isRequired, + width: React.PropTypes.number.isRequired, + }, + + componentDidMount: function() { + this.getDOMNode().contentWindow.addEventListener('DOMContentLoaded', this.handleDOMContentLoaded_); + }, + + componentDidUpdate: function() { + this.componentDidMount(); + }, + + getInitialState: function() { + return { + height: this.props.height, + r: Date.now(), + } + }, + + render: function() { + var uri = this.props.src.clone(); + uri.setParameterValue('r', this.state.r); + return React.DOM.iframe({ + height: this.state.height, + src: uri.toString(), + style: { + border: 'none', + }, + width: this.props.width, + }); + }, + + handleDOMContentLoaded_: function() { + this.updateSize_(); + if (this.props.onChildFrameClick) { + this.getDOMNode().contentWindow.addEventListener('click', this.handleChildFrameClick_); + } + }, + + handleChildFrameClick_: function(e) { + var elm = cam.Navigator.shouldHandleClick(e); + if (!elm) { + return; + } + + var oldURL = new goog.Uri(e.target.href); + var newURL = this.props.baseURL.clone(); + var query = oldURL.getParameterValue('q'); + + if (query) { + newURL.setParameterValue('q', query); + } else { + newURL.setPath(newURL.getPath() + (oldURL.getParameterValue('p') || oldURL.getParameterValue('d') || oldURL.getParameterValue('b'))); + } + + try { + if (this.props.onChildFrameClick(newURL)) { + e.preventDefault(); + } + } catch (ex) { + e.preventDefault(); + throw ex; + } + }, + + updateSize_: function() { + if (!this.isMounted()) { + return; + } + + var node = this.getDOMNode(); + if (node && node.contentDocument && node.contentDocument.body) { + node.contentDocument.body.style.overflowY = 'hidden'; + this.setState({height: node.contentDocument.documentElement.offsetHeight }); + } + window.setTimeout(this.updateSize_, 200); + }, +}); diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/checkmark2.svg b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/checkmark2.svg new file mode 100644 index 00000000..539745c0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/checkmark2.svg @@ -0,0 +1,72 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/checkmark2_blue.svg b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/checkmark2_blue.svg new file mode 100644 index 00000000..531ad35a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/checkmark2_blue.svg @@ -0,0 +1,71 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/circled_plus.svg b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/circled_plus.svg new file mode 100644 index 00000000..0a8f4c8b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/circled_plus.svg @@ -0,0 +1,87 @@ + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/clear.svg b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/clear.svg new file mode 100644 index 00000000..9dd6d136 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/clear.svg @@ -0,0 +1,81 @@ + + + + + + + + + + + + image/svg+xml + + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/close.svg b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/close.svg new file mode 100644 index 00000000..70afcef5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/close.svg @@ -0,0 +1,68 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/closure-toolbar-bg.png b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/closure-toolbar-bg.png new file mode 100644 index 00000000..469c353b Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/closure-toolbar-bg.png differ diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/closure/closure.go b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/closure/closure.go new file mode 100644 index 00000000..bc327a66 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/closure/closure.go @@ -0,0 +1,143 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package closure + +import ( + "archive/zip" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "strings" + "sync" + "time" +) + +// ZipData is either the empty string (when compiling with "go get", +// or the devcam server), or is initialized to a base64-encoded zip file +// of the Closure library (when using make.go, which puts an extra +// file in this package containing an init function to set ZipData). +var ZipData string +var ZipModTime time.Time + +func FileSystem() (http.FileSystem, error) { + if ZipData == "" { + return nil, os.ErrNotExist + } + zr, err := zip.NewReader(strings.NewReader(ZipData), int64(len(ZipData))) + if err != nil { + return nil, err + } + m := make(map[string]*fileInfo) + for _, zf := range zr.File { + if !strings.HasPrefix(zf.Name, "closure/") { + continue + } + fi, err := newFileInfo(zf) + if err != nil { + return nil, fmt.Errorf("Error reading zip file %q: %v", zf.Name, err) + } + m[strings.TrimPrefix(zf.Name, "closure")] = fi + } + return &fs{zr, m}, nil + +} + +type fs struct { + zr *zip.Reader + m map[string]*fileInfo // keyed by what Open gets. see Open's comment. +} + +var nopCloser = ioutil.NopCloser(nil) + +// Open is called with names like "/goog/base.js", but the zip contains Files named like "closure/goog/base.js". +func (s *fs) Open(name string) (http.File, error) { + fi, ok := s.m[name] + if !ok { + return nil, os.ErrNotExist + } + return &file{fileInfo: fi}, nil +} + +// a file is an http.File, wrapping a *fileInfo with a lazily-constructed SectionReader. +type file struct { + *fileInfo + once sync.Once // for making the SectionReader + sr *io.SectionReader +} + +func (f *file) Read(p []byte) (n int, err error) { + f.once.Do(f.initReader) + return f.sr.Read(p) +} + +func (f *file) Seek(offset int64, whence int) (ret int64, err error) { + f.once.Do(f.initReader) + return f.sr.Seek(offset, whence) +} + +func (f *file) initReader() { + f.sr = io.NewSectionReader(f.fileInfo.ra, 0, f.Size()) +} + +func newFileInfo(zf *zip.File) (*fileInfo, error) { + rc, err := zf.Open() + if err != nil { + return nil, err + } + all, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + rc.Close() + return &fileInfo{ + fullName: zf.Name, + regdata: all, + Closer: nopCloser, + ra: bytes.NewReader(all), + }, nil +} + +type fileInfo struct { + fullName string + regdata []byte // non-nil if regular file + ra io.ReaderAt // over regdata + io.Closer +} + +func (f *fileInfo) IsDir() bool { return f.regdata == nil } +func (f *fileInfo) Size() int64 { return int64(len(f.regdata)) } +func (f *fileInfo) ModTime() time.Time { return ZipModTime } +func (f *fileInfo) Name() string { return path.Base(f.fullName) } +func (f *fileInfo) Stat() (os.FileInfo, error) { return f, nil } +func (f *fileInfo) Sys() interface{} { return nil } + +func (f *fileInfo) Readdir(count int) ([]os.FileInfo, error) { + // TODO: implement. + return nil, errors.New("TODO") +} + +func (f *fileInfo) Mode() os.FileMode { + if f.IsDir() { + return 0755 | os.ModeDir + } + return 0644 +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/date_utils.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/date_utils.js new file mode 100644 index 00000000..7dc839d5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/date_utils.js @@ -0,0 +1,47 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.dateUtils'); + +cam.dateUtils.formatDateShort = function(date) { + // TODO(aa): Do something better based on Closure date/i18n utils. + // I think I would prefer this to return (in en-us) either '11:18 PM', 'Jun 11', or 'June 11 1952', depending on how far back it is. I don't find '5 hours ago' that useful. + var seconds = Math.floor((Date.now() - date) / 1000); + var interval = Math.floor(seconds / 31536000); + + return (function() { + if (interval > 1) { + return interval + ' years'; + } + interval = Math.floor(seconds / 2592000); + if (interval > 1) { + return interval + ' months'; + } + interval = Math.floor(seconds / 86400); + if (interval > 1) { + return interval + ' days'; + } + interval = Math.floor(seconds / 3600); + if (interval > 1) { + return interval + ' hours'; + } + interval = Math.floor(seconds / 60); + if (interval > 1) { + return interval + ' minutes'; + } + return Math.floor(seconds) + ' seconds'; + })() + ' ago'; +}; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/debug.html b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/debug.html new file mode 100644 index 00000000..b5eee3d2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/debug.html @@ -0,0 +1,47 @@ + + + + Camlistored UI + + + + + + + +
    +

    Root Discovery

    +

    +
    (discovery results)
    + +

    Signing Discovery

    +

    +
    (jsonsign discovery results)
    + +

    Signing Debug

    + + + + + + + + + + + + + + + +
    JSON blob to sign: Signed blob:Verification details:
    +
    + + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/debug_console.html b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/debug_console.html new file mode 100644 index 00000000..390d6174 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/debug_console.html @@ -0,0 +1,54 @@ + + + + + + + + + + + + + + + + + + + + Brad's (somewhat less) ghetto console thing + + + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/debug_console.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/debug_console.js new file mode 100644 index 00000000..f37f9af7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/debug_console.js @@ -0,0 +1,259 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.DebugConsole'); + +goog.require('cam.reactUtil'); + +goog.require('cam.ServerConnection'); + +goog.require('goog.labs.Promise'); + +goog.require('goog.object'); + +cam.DebugConsole = React.createClass({ + HELP_TEXT: "-help", + HANDLERS: { + selected: { + execute: function(client, input, callback) { + var blobrefs = goog.object.getKeys(client.getSelectedItems()); + + if (!blobrefs.length) { + callback('Please select at least one item'); + } else { + callback(goog.object.getKeys(client.getSelectedItems()).join(', ')); + } + }, + help: function(callback) { + callback('Usage: selected | Blobrefs of the selected items will be written to console output'); + }, + }, + tag: { + execute: function(client, input, callback) { + var blobrefs = goog.object.getKeys(client.getSelectedItems()); + var parts = cam.DebugConsole.parseCommandAndArgs(input); + var mode = parts['command']; + var tags = parts['args'].split(',').map(function(s) { return s.trim(); }); + var prettyTags = tags.join(', '); + + if (!blobrefs.length) { + callback('Please select at least one item'); + return; + } else if (!mode) { + callback('Please provide a mode of operation for tag'); + return; + } else if (mode != 'clear' && tags.some(function(t) { return !t })) { + callback('At least one invalid tag value was supplied: ' + prettyTags); + return; + } + + var sc = client.serverConnection; + var promises = []; + + // TODO(mr): do we need to restrict add/removal of tags based upon existing values? ex: Don't delete tag 'taco' if item is not tagged with 'taco' + switch (mode) { + case "add": { + if (tags.length == 1 && tags[0] == '') { + callback('Please provide at least one tag value to add'); + return; + } + + blobrefs.forEach(function(permanode) { + tags.forEach(function(tag) { + console.log('add-tag-promise for: ' + permanode + ", tag: " + tag); + promises.push(new goog.labs.Promise(sc.newAddAttributeClaim.bind(sc, permanode, 'tag', tag))); + }); + }); + break; + } + case "del": { + if (tags.length == 1 && tags[0] == '') { + callback('Please provide at least one tag value to delete'); + return; + } + + blobrefs.forEach(function(permanode) { + tags.forEach(function(tag) { + console.log('del-tag-promise for: ' + permanode + ", tag: " + tag); + promises.push(new goog.labs.Promise(sc.newDelAttributeClaim.bind(sc, permanode, 'tag', tag))); + }); + }); + break; + } + case "set": { + if (tags.length == 1 && tags[0] == '') { + callback('Please provide at least one tag value to set'); + return; + } + + // 'set' tags using first value supplied then 'add' any additional + var numTags = tags.length; + blobrefs.forEach(function(permanode) { + console.log('set-tag-promise for: ' + permanode + ", tag: " + tags[0]); + promises.push(new goog.labs.Promise(sc.newSetAttributeClaim.bind(sc, permanode, 'tag', tags[0]))); + + for (var i = 1; i < numTags; i++) { + console.log('add-tag-promise for: ' + permanode + ", tag: " + tags[i]); + promises.push(new goog.labs.Promise(sc.newAddAttributeClaim.bind(sc, permanode, 'tag', tags[i]))); + } + }); + break; + } + case "clear": { + blobrefs.forEach(function(permanode) { + console.log('clear-tag-promise for: ' + permanode); + promises.push(new goog.labs.Promise(sc.newDelAttributeClaim.bind(sc, permanode, 'tag', ''))); + }); + break; + } + default: { + callback('tag command does not support : ' + mode); + return; + } + } + + goog.labs.Promise.all(promises).thenCatch(function(e) { + console.error('promise rejected: %s', e); + callback('The system encountered an error executing tag ' + mode + ': ' + e); + }).then(function(results) { + if (results) { + console.log('successfully completed %d of %d promises', results.length, promises.length); + + if (mode == 'add') { + callback('Successfully added the tag(s) {' + prettyTags + '} to ' + blobrefs.length + ' items'); + } else if (mode == 'del') { + callback('Successfully deleted the tag(s) {' + prettyTags + '} from ' + blobrefs.length + ' items'); + } else if (mode == 'set') { + callback('Successfully reset ' + blobrefs.length + ' items to have the tag(s) {' + prettyTags + '}'); + } else if (mode == 'clear') { + callback('Successfully deleted all tags from ' + blobrefs.length + ' items'); + } + } else { + // else: intentionally left blank. empty error object returned upon promise rejection + } + }).then(function() { + console.log('tag operation complete'); + }); + callback('executing tag operation'); + }, + help: function(callback) { + callback('Usage: tag [val1,val2,...] | Add, delete, set, or clear tag attributes on the selected permanodes | Examples: tag add val1,val2,val3 | tag del val1,val2 | tag set val1 | tag clear'); + }, + }, + }, + + getPlaceholderText_: function() { + return this.getAvailableCommands_() + " (" + this.HELP_TEXT + ")"; + }, + + getStaticHelpText_: function() { + return 'Further usage information is available by ' + this.HELP_TEXT; + }, + + getAvailableCommands_: function() { + return goog.object.getKeys(this.HANDLERS).join(', '); + }, + + handleInputChange_: function(e) { + this.setState({commandInput:e.target.value}); + }, + + handleSubmit_: function(e) { + e.preventDefault(); + var parts = cam.DebugConsole.parseCommandAndArgs(this.state.commandInput); + var h = this.HANDLERS[parts['command']]; + if (h) { + if (parts['args'] == this.HELP_TEXT) { + h.help(this.handleOutput_); + } else { + h.execute(this.props.client, parts['args'], this.handleOutput_); + } + } else { + this.handleOutput_('Command not found. Available commands are: ' + this.getAvailableCommands_() + '. ' + this.getStaticHelpText_()); + } + }, + + handleOutput_: function(out) { + this.setState({commandResult:out}); + this.setState({commandInput:''}); + this.refs.consoleInput.getDOMNode().focus(); + }, + + /* + * ReactJS #ComponentSpec + */ + getInitialState: function() { + return { + commandInput: '', + commandResult: 'Enter a command and hit Go or press the Enter key to execute. ' + this.getStaticHelpText_() + }; + }, + + propTypes: { + client: React.PropTypes.shape({ + getSelectedItems: React.PropTypes.func.isRequired, + // TODO(mr): JS warning in Chrome console, I assume here, though no exact line # provided. "invalid prop 'serverConnection' supplied to '<>', expected instance of '<>'" + serverConnection: React.PropTypes.instanceOf(cam.ServerConnection).isRequired, + }), + }, + + render: function() { + // TODO(aa): Figure out flexbox to lay this out correctly. + return React.DOM.div(null, + React.DOM.div(null, "Input"), + React.DOM.div(null, + React.DOM.form({onSubmit:this.handleSubmit_}, + React.DOM.input({ + type: 'text', + ref: 'consoleInput', + placeholder: this.getPlaceholderText_(), + style: {width:275}, + onChange: this.handleInputChange_, + value: this.state.commandInput + }), + React.DOM.button(null, 'Go') + ) + ), + React.DOM.div(null, "Output"), + React.DOM.textarea({ + readOnly: true, + style: {overflow:'auto', width:310, height:150}, + value: this.state.commandResult + }) + ); + }, + + statics: { + /** + * @return {'command' : 'x', 'args': 'y'} The first word (command) and remaining arguments of the input string + */ + parseCommandAndArgs : function(s) { + var parts = s.split(' '); + var firstCommand = parts.shift(); + var arguments = parts.join(' ').trim(); + + return {'command':firstCommand, 'args':arguments}; + } + }, + + /* + * ReactJS #Lifecycle Methods + */ + componentDidMount: function() { + // allow immediate entry of commands + this.refs.consoleInput.getDOMNode().focus(); + }, +}); diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/detail.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/detail.css new file mode 100644 index 00000000..163561cc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/detail.css @@ -0,0 +1,113 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +@import (less) "prefix-free.css"; + + +/* TODO(aa): All this needs to get renamed to image-detail */ +.detail-view { + background: black; + left: 0; + position: absolute; + overflow: hidden; + top: 0; +} + +.detail-view-img { + opacity: 0; + position: absolute; + .transition(opacity 200ms linear); +} + +.detail-view-img-loaded { + opacity: 1; +} + +.detail-img-leave { + .transition(opacity 200ms linear); + opacity: 1; +} +.detail-img-leave.detail-img-leave-active { + opacity: 0; +} + +.detail-view-sidebar { + background: #f9f9f9; + bottom: 0; + overflow: auto; + position: absolute; + right: 0; + top: 0; +} + +.detail-view-piggy { + position: absolute; +} + +.detail-view-piggy.detail-view-piggy-backward { + .transform(scaleX(-1)); +} + +.detail-piggy-leave { + .transition(opacity 200ms linear); + opacity: 1; +} + +.detail-piggy-leave.detail-piggy-leave-active { + opacity: 0; +} + +.detail-title { + font-size: inherit; + margin: 0; +} + +.detail-description { + margin-bottom: 0; +} + +.cam-detail { + position: absolute; + background: #222; + left: 0; + top: 0; + width: 100%; + overflow: hidden; +} + +.cam-detail iframe { + background: white; +} + +.cam-detail-aspect-nav { + position: absolute; + bottom: 0; + left: 0; + width: 100%; +} + +.cam-detail-aspect-nav a { + color: #bbb; + display: inline-block; + font-family: 'Open Sans', sans-serif; + font-size: 14px; + margin-left: 1em; + padding: 0.5ex; +} + +.cam-detail-aspect-nav a:hover { + color: #ddd; +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/detail.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/detail.js new file mode 100644 index 00000000..f8e035fc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/detail.js @@ -0,0 +1,213 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.DetailView'); + +goog.require('goog.array'); +goog.require('goog.events.EventHandler'); +goog.require('goog.math.Size'); +goog.require('goog.object'); +goog.require('goog.string'); + +goog.require('cam.AnimationLoop'); +goog.require('cam.ImageDetail'); +goog.require('cam.Navigator'); +goog.require('cam.reactUtil'); +goog.require('cam.SearchSession'); +goog.require('cam.SpritedAnimation'); + +// Top-level control for the detail view. Handles loading data specified in URL and left/right navigation. +// The details of the actual rendering are left up to child controls which are chosen based on the type of data loaded. However, currently there is only one type of child control: cam.ImageDetail. +cam.DetailView = React.createClass({ + displayName: 'DetailView', + + propTypes: { + aspects: cam.reactUtil.mapOf(React.PropTypes.shape({ + getTitle: React.PropTypes.func.isRequired, + createContent: React.PropTypes.func.isRequired, + })).isRequired, + blobref: React.PropTypes.string.isRequired, + getDetailURL: React.PropTypes.func.isRequired, + history: React.PropTypes.shape({go:React.PropTypes.func.isRequired}).isRequired, + height: React.PropTypes.number.isRequired, + keyEventTarget: React.PropTypes.object.isRequired, // An event target we will addEventListener() on to receive key events. + navigator: React.PropTypes.instanceOf(cam.Navigator).isRequired, + searchSession: React.PropTypes.instanceOf(cam.SearchSession).isRequired, + searchURL: React.PropTypes.instanceOf(goog.Uri).isRequired, + width: React.PropTypes.number.isRequired, + }, + + getInitialState: function() { + return { + lastNavigateWasBackward: false, + selectedAspect: '', + }; + }, + + componentWillMount: function() { + this.pendingNavigation_ = 0; + this.navCount_ = 1; + this.eh_ = new goog.events.EventHandler(this); + }, + + componentDidMount: function(root) { + this.eh_.listen(this.props.searchSession, cam.SearchSession.SEARCH_SESSION_CHANGED, this.searchUpdated_); + this.eh_.listen(this.props.keyEventTarget, 'keyup', this.handleKeyUp_); + this.searchUpdated_(); + }, + + render: function() { + var activeAspects = null; + var selectedAspect = null; + + if (this.dataIsLoaded_()) { + activeAspects = goog.object.filter( + goog.object.map(this.props.aspects, function(f) { + return f(this.props.blobref, this.props.searchSession); + }, this), + function(a) { + return a != null; + } + ); + + selectedAspect = activeAspects[this.state.selectedAspect] || goog.object.getAnyValue(activeAspects); + } + + return React.DOM.div({className: 'cam-detail', style: {height: this.props.height}}, + this.getAspectNav_(activeAspects), + + // TODO(aa): Actually pick this based on the current URL + this.getAspectView_(selectedAspect) + ); + }, + + getAspectNav_: function(aspects) { + if (!aspects) { + return null; + } + var items = goog.object.getValues(goog.object.map(aspects, function(aspect, name) { + // TODO(aa): URLs involving k I guess? + return React.DOM.a({href: '#', onClick: this.handleAspectClick_.bind(this, name)}, aspect.getTitle()); + }, this)); + items.push(React.DOM.a({href: this.props.searchURL.toString()}, 'Back to search')); + return React.DOM.div({className: 'cam-detail-aspect-nav'}, items); + }, + + getAspectView_: function(aspect) { + if (aspect) { + // TODO(aa): Why doesn't parent pass us |Size| instead of width/height? + return aspect.createContent(new goog.math.Size(this.props.width, this.props.height - 25), this.state.lastNavigateWasBackward); + } else { + return null; + } + }, + + componentWillUnmount: function() { + this.eh_.dispose(); + }, + + handleAspectClick_: function(name, e) { + // Mathieu requests that middle and right-click do nothing until we can make real URLs work. + if (e.button == 0) { + this.setState({ + selectedAspect: name, + }); + } + return false; + }, + + handleKeyUp_: function(e) { + if (e.keyCode == goog.events.KeyCodes.LEFT) { + this.navigate_(-1); + } else if (e.keyCode == goog.events.KeyCodes.RIGHT) { + this.navigate_(1); + } else if (e.keyCode == goog.events.KeyCodes.ESC) { + this.handleEscape_(e); + } + }, + + navigate_: function(offset) { + this.pendingNavigation_ = offset; + ++this.navCount_; + this.setState({lastNavigateWasBackward: offset < 0}); + this.handlePendingNavigation_(); + }, + + handleEscape_: function(e) { + e.preventDefault(); + e.stopPropagation(); + history.go(-this.navCount_); + }, + + handlePendingNavigation_: function() { + if (!this.pendingNavigation_) { + return; + } + + var results = this.props.searchSession.getCurrentResults(); + var index = goog.array.findIndex(results.blobs, function(elm) { + return elm.blob == this.props.blobref; + }.bind(this)); + + if (index == -1) { + this.props.searchSession.loadMoreResults(); + return; + } + + index += this.pendingNavigation_; + if (index < 0) { + this.pendingNavigation_ = 0; + console.log('Cannot navigate past beginning of search result.'); + return; + } + + if (index >= results.blobs.length) { + if (this.props.searchSession.isComplete()) { + this.pendingNavigation_ = 0; + console.log('Cannot navigate past end of search result.'); + } else { + this.props.searchSession.loadMoreResults(); + } + return; + } + + this.props.navigator.navigate(this.props.getDetailURL(results.blobs[index].blob)); + }, + + searchUpdated_: function() { + this.handlePendingNavigation_(); + + if (this.dataIsLoaded_()) { + this.forceUpdate(); + return; + } + + if (this.props.searchSession.isComplete()) { + // TODO(aa): 404 UI. + var error = goog.string.subs('Could not find blobref %s in search session.', this.props.blobref); + alert(error); + throw new Error(error); + } + + // TODO(aa): This can be inefficient in the case of a fresh page load if we have to load lots of pages to find the blobref. + // Our search protocol needs to be updated to handle the case of paging ahead to a particular item. + this.props.searchSession.loadMoreResults(); + }, + + dataIsLoaded_: function() { + return Boolean(this.props.searchSession.getMeta(this.props.blobref)); + }, +}); diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/dialog.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/dialog.css new file mode 100644 index 00000000..c2d49742 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/dialog.css @@ -0,0 +1,49 @@ +/* +Copyright 2014 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +@import (less) "prefix-free.css"; + + +.cam-dialog-mask { + position: fixed; + top: 0; + left: 0; + width: 100%; + height: 100%; + z-index: 2; + background: rgba(200,200,200,0.85); +} + +.cam-dialog { + position: fixed; + padding: 0 2em; + border: solid #E56A5E; + background: #eee; + color: #444; + z-index: 3; + box-shadow: 0 0 1em 0.2em rgba(0, 0, 0, 0.4); + font-family: 'Open Sans', sans-serif; + font-size: 24px; + text-align: center; +} + +.cam-dialog .cam-dialog-close { + position: absolute; + right: 14px; + top: 14px; + color: #555; + cursor: pointer; +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/dialog.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/dialog.js new file mode 100644 index 00000000..8ead3ca4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/dialog.js @@ -0,0 +1,61 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.Dialog'); + +cam.Dialog = React.createClass({ + propTypes: { + availWidth: React.PropTypes.number.isRequired, + availHeight: React.PropTypes.number.isRequired, + width: React.PropTypes.number.isRequired, + height: React.PropTypes.number.isRequired, + borderWidth: React.PropTypes.number.isRequired, + onClose: React.PropTypes.func, + }, + + render: function() { + return React.DOM.div( + { + className: 'cam-dialog-mask', + }, + React.DOM.div( + { + className: 'cam-dialog', + style: { + 'width': this.props.width, + 'height': this.props.height, + 'left': (this.props.availWidth - this.props.width) / 2, + 'top': (this.props.availHeight - this.props.height) / 2, + 'border-width': this.props.borderWidth, + }, + }, + this.getClose_(), + this.props.children + ) + ); + }, + + getClose_: function() { + if (!this.props.onClose) { + return null; + } + + return React.DOM.i({ + className: 'fa fa-times fa-lg fa-border cam-dialog-close', + onClick: this.props.onClose, + }); + }, +}); diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/directory_detail.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/directory_detail.js new file mode 100644 index 00000000..a674d62a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/directory_detail.js @@ -0,0 +1,47 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.DirectoryDetail'); + +goog.require('cam.CacheBusterIframe'); + +// TODO(aa): Rename file. +cam.DirectoryDetail.getAspect = function(baseURL, onChildFrameClick, blobref, targetSearchSession) { + if (!targetSearchSession) { + return; + } + + var rm = targetSearchSession.getResolvedMeta(blobref); + if (!rm || rm.camliType != 'directory') { + return null; + } + + return { + fragment: 'directory', + title: 'Directory', + createContent: function(size) { + var url = baseURL.clone(); + url.setParameterValue('d', rm.blobRef); + return cam.CacheBusterIframe({ + baseURL: baseURL, + height: size.height, + onChildFrameClick: onChildFrameClick, + src: url, + width: size.width, + }); + }, + }; +}; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/down.svg b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/down.svg new file mode 100644 index 00000000..f5653853 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/down.svg @@ -0,0 +1,82 @@ + + + + + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/file.png b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/file.png new file mode 100644 index 00000000..e38332e2 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/file.png differ diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/fileembed.go b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/fileembed.go new file mode 100644 index 00000000..3a14023f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/fileembed.go @@ -0,0 +1,35 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package ui contains the resources for the Camlistore web UI. + +The below is read by genfileembed.go to determine the files to embed in the +server binary. Crazy, but true. +#fileembed pattern .+\.(js|css|html|png|svg)$ +*/ +package ui + +import ( + "camlistore.org/pkg/fileembed" +) + +const GaeSourceRoot = "source_root" + +var ( + Files *fileembed.Files + IsAppEngine, IsProdAppEngine bool +) diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/fileembed_appengine.go b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/fileembed_appengine.go new file mode 100644 index 00000000..de57dbbb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/fileembed_appengine.go @@ -0,0 +1,28 @@ +// +build appengine + +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ui + +import ( + "appengine" +) + +func init() { + IsAppEngine = true + IsProdAppEngine = !appengine.IsDevAppServer() +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/fileembed_normal.go b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/fileembed_normal.go new file mode 100644 index 00000000..523e80aa --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/fileembed_normal.go @@ -0,0 +1,29 @@ +// +build !appengine + +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ui + +import ( + "camlistore.org/pkg/fileembed" +) + +func init() { + Files = &fileembed.Files{ + Listable: true, + } +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/filetree.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/filetree.css new file mode 100644 index 00000000..9e77b373 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/filetree.css @@ -0,0 +1,32 @@ +/* +Copyright 2011 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +.cam-filetree-page { + font: 16px/1.4 normal Arial, sans-serif; +} +.cam-filetree-nav:before { + content: "["; +} +.cam-filetree-nav:after { + content: "]"; +} +.cam-filetree-newp { + text-decoration: underline; + cursor: pointer; + color: darkgreen; + margin-left: .4em; + font-size: 80%; +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/filetree.html b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/filetree.html new file mode 100644 index 00000000..480094e5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/filetree.html @@ -0,0 +1,23 @@ + + + + Filetree + + + + + + + + +

    FileTree for

    + +
    + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/filetree.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/filetree.js new file mode 100644 index 00000000..938a7d1e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/filetree.js @@ -0,0 +1,223 @@ +/* +Copyright 2011 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.FiletreePage'); + +goog.require('goog.dom'); +goog.require('goog.events.EventType'); +goog.require('goog.ui.Component'); + +goog.require('cam.ServerConnection'); + +// @param {cam.ServerType.DiscoveryDocument} config Global config of the current server this page is being rendered for. +// @param {goog.dom.DomHelper=} opt_domHelper DOM helper to use. +// @extends {goog.ui.Component} +// @constructor +cam.FiletreePage = function(config, opt_domHelper) { + goog.base(this, opt_domHelper); + + this.config_ = config; + this.connection_ = new cam.ServerConnection(config); + +}; +goog.inherits(cam.FiletreePage, goog.ui.Component); + +cam.FiletreePage.prototype.indentStep_ = 20; + +function getDirBlobrefParam() { + var blobRef = getQueryParam('d'); + return (blobRef && isPlausibleBlobRef(blobRef)) ? blobRef : null; +} + +// Returns the first value from the query string corresponding to |key|. Returns null if the key isn't present. +getQueryParam = function(key) { + var params = document.location.search.substring(1).split('&'); + for (var i = 0; i < params.length; ++i) { + var parts = params[i].split('='); + if (parts.length == 2 && decodeURIComponent(parts[0]) == key) + return decodeURIComponent(parts[1]); + } + return null; +}; + +// Returns true if the passed-in string might be a blobref. +isPlausibleBlobRef = function(blobRef) { + return /^\w+-[a-f0-9]+$/.test(blobRef); +}; + +cam.FiletreePage.prototype.enterDocument = function() { + cam.FiletreePage.superClass_.enterDocument.call(this); + var blobref = getDirBlobrefParam(); + + if (blobref) { + this.connection_.search({blobRefPrefix: blobref}, cam.ServerConnection.DESCRIBE_REQUEST, null, null, + goog.bind(this.handleDescribeBlob_, this, blobref) + ); + } +} + +// @param {string} blobref blob to describe. +// @param {cam.ServerType.DescribeResponse} response +cam.FiletreePage.prototype.handleDescribeBlob_ = +function(blobref, response) { + if (!response || !response.description || !response.description.meta) { + alert("did not get fully described response"); + return; + } + var meta = response.description.meta; + var binfo = meta[blobref]; + if (!binfo) { + alert("Error describing blob " + blobref); + return; + } + if (binfo.camliType != "directory") { + alert("Does not contain a directory"); + return; + } + this.connection_.getBlobContents( + blobref, + goog.bind(function(data) { + var finfo = JSON.parse(data); + var fileName = finfo.fileName; + var curDir = document.getElementById('curDir'); + curDir.innerHTML = "" + fileName + ""; + this.buildTree_(); + }, this), + function(msg) { + alert("failed to get blobcontents: " + msg); + } + ); +} + +cam.FiletreePage.prototype.buildTree_ = function() { + var blobref = getDirBlobrefParam(); + var children = goog.dom.getElement("children"); + this.connection_.getFileTree(blobref, + goog.bind(function(jres) { + this.onChildrenFound_(children, 0, jres); + }, this) + ); +} + +// @param {string} div node used as root for the tree +// @param {number} depth how deep we are in the tree, for indenting +// @param {cam.ServerType.DescribeResponse} jres describe result +cam.FiletreePage.prototype.onChildrenFound_ = function(div, depth, jres) { + var indent = depth// cam.FiletreePage.prototype.indentStep_; + div.innerHTML = ""; + for (var i = 0; i < jres.children.length; i++) { + var children = jres.children; + var pdiv = goog.dom.createElement("div"); + var alink = goog.dom.createElement("a"); + alink.style.paddingLeft=indent + "px" + alink.id = children[i].blobRef; + switch (children[i].type) { + case 'directory': + goog.dom.setTextContent(alink, "+ " + children[i].name); + goog.events.listen(alink, + goog.events.EventType.CLICK, + goog.bind(function (b, d) { + this.unFold_(b, d); + }, this, alink.id, depth), + false, this + ); + break; + case 'file': + goog.dom.setTextContent(alink, " " + children[i].name); + alink.href = "./?b=" + alink.id; + break; + default: + alert("not a file or dir"); + break; + } + var newPerm = goog.dom.createElement("span"); + newPerm.className = "cam-filetree-newp"; + goog.dom.setTextContent(newPerm, "P"); + goog.events.listen(newPerm, + goog.events.EventType.CLICK, + this.newPermWithContent_(alink.id), + false, this + ); + goog.dom.appendChild(pdiv, alink); + goog.dom.appendChild(pdiv, newPerm); + goog.dom.appendChild(div, pdiv); + } +} + +cam.FiletreePage.prototype.newPermWithContent_ = function(content) { + var fun = function(e) { + this.connection_.createPermanode( + goog.bind(function(permanode) { + this.connection_.newAddAttributeClaim( + permanode, "camliContent", content, + function() { + alert("permanode created"); + }, + function(msg) { + // TODO(mpl): "cancel" new permanode + alert("set permanode content failed: " + msg); + } + ); + }, this), + function(msg) { + alert("create permanode failed: " + msg); + } + ); + } + return goog.bind(fun, this); +} + +// @param {string} blobref dir to unfold. +// @param {number} depth so we know how much to indent. +cam.FiletreePage.prototype.unFold_ = function(blobref, depth) { + var node = goog.dom.getElement(blobref); + var div = goog.dom.createElement("div"); + this.connection_.getFileTree(blobref, + goog.bind(function(jres) { + this.onChildrenFound_(div, depth+1, jres); + insertAfter(node, div); + goog.events.removeAll(node); + goog.events.listen(node, + goog.events.EventType.CLICK, + goog.bind(function(b, d) { + this.fold_(b, d); + }, this, blobref, depth), + false, this + ); + }, this) + ); +} + +function insertAfter( referenceNode, newNode ) { + // nextSibling X2 because of the "P" span + referenceNode.parentNode.insertBefore( newNode, referenceNode.nextSibling.nextSibling ); +} + +// @param {string} nodeid id of the node to fold. +// @param {depth} depth so we know how much to indent. +cam.FiletreePage.prototype.fold_ = function(nodeid, depth) { + var node = goog.dom.getElement(nodeid); + // nextSibling X2 because of the "P" span + node.parentNode.removeChild(node.nextSibling.nextSibling); + goog.events.removeAll(node); + goog.events.listen(node, + goog.events.EventType.CLICK, + goog.bind(function(b, d) { + this.unFold_(b, d); + }, this, nodeid, depth), + false, this + ); +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/folder.png b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/folder.png new file mode 100644 index 00000000..286c0382 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/folder.png differ diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/foursquare-logo.png b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/foursquare-logo.png new file mode 100644 index 00000000..0cd2786c Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/foursquare-logo.png differ diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/hash_worker.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/hash_worker.js new file mode 100644 index 00000000..7defe42a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/hash_worker.js @@ -0,0 +1,30 @@ +/* +Copyright 2014 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// These two lines are required setup to make goog.require() work throughout the codebase. +var CLOSURE_BASE_PATH = 'closure/goog/'; +importScripts('closure/goog/bootstrap/webworkers.js', 'closure/goog/base.js', 'deps.js'); + +goog.require('cam.blob'); +goog.require('cam.WorkerMessageRouter'); + +// This is a simple webworker that expects to receive a single message containing a file, and sends back that file's sha1 hash. +// We do this in a worker because we observed that doing it on the main thread decreased the framerate significantly, even when chunking, and even when the chunk sizes were as small as 32k. + +var router = new cam.WorkerMessageRouter(goog.global); +router.registerHandler('ref', function(msg, sendReply) { + sendReply(cam.blob.refFromDOMBlob(msg)); +}); diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/header.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/header.css new file mode 100644 index 00000000..26e2c834 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/header.css @@ -0,0 +1,167 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +@import (less) "prefix-free.css"; + + +@cam-header-bg: #3a3a3a; + + +.cam-header { + cursor: default; + font-family: 'Open Sans', sans-serif; + font-weight: 700; + font-size: 13px; + left: 0; + position: fixed; + top: 0; + z-index: 1; + .transform(translateZ(0)); +} + +.cam-header-main { + background: @cam-header-bg; + border-collapse: true; + border-spacing: 0; + box-shadow: 0.1em 0 0.5em 0.1em rgba(0, 0, 0, 0.4); + color: #e4e4e4; + height: 38px; + position: relative; + width: 100%; + z-index: 3; +} + +.cam-header.cam-header-sub-active .cam-header-main { + box-shadow: none; +} + +.cam-header.cam-header-sub-active .cam-header-sub { + box-shadow: 0.1em 0 0.5em 0.1em rgba(0, 0, 0, 0.4); + .transform(translate3d(0, 0, 0)); +} + +.cam-header-item { + padding: 0; + position: relative; + vertical-align: middle; + white-space: nowrap; +} + +.cam-header-title { + margin-top: 10px; + padding-right: 2em; +} + +.cam-header-title span { + font-size: 14px; +} + +.cam-header-title span:first-child { + margin-right: 5px; +} + +.cam-header-menu-dropdown { + box-shadow: 0.1em 0 1em 0.3em rgba(0, 0, 0, 0.25); + cursor: default; + left: 0; + font-weight: 500; + position: absolute; + top: 38px; + white-space: nowrap; + min-width: 140px; + max-width: 280px; + .transition-transform(100ms ease-out); + z-index: 2; +} + +.cam-header-menu-item { + background: @cam-header-bg; + border-top: 1px solid #666; + color: #eee; + cursor: pointer; + display: block; + padding: 10px 24px; + position: relative; + text-decoration: none; + .transition(background-color 100ms ease-out); + overflow: hidden; + text-overflow: ellipsis; +} + +.cam-header-menu-item-icon { + position: absolute; + left: 8px; + top: 13px; +} + +.cam-header-menu-item-error { + color: rgb(255, 157, 148); + .transition(background-color 100ms ease-out); +} + +.cam-header-menu-item:hover { + background-color: #444; +} + +.cam-header-main input { + color: white; + height: 30px; + width: 100%; + background: #444; + border: 1px solid #555; + outline: none; + padding: 1ex; + font-size: 110%; + font-family: default; + font-weight: normal; + .transition(border-color 100ms ease-out); +} + +.cam-header-main input:focus { + background: #4c4c4c; + border-color: #888; +} + +.cam-header-main-controls { + padding-left: 1em; +} + +.cam-header-main-controls.cam-header-main-controls-empty { + padding-left: 6px; +} + +.cam-header-main-controls a { + color: #eee; + display: inline-block; + height: 38px; + font-family: 'Open Sans', sans-serif; + font-weight: 500; + font-size: 13px; + line-height: 38px; + padding-left: 1em; + padding-right: 1em; + text-decoration: none; + white-space: nowrap; +} + +.cam-header-main-controls a:hover { + background: #555; +} + +.cam-header-main-controls a.cam-header-main-control-active { + height: 38px; + border-bottom: 3px solid rgb(232,139,131); +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/header.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/header.js new file mode 100644 index 00000000..23a75adb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/header.js @@ -0,0 +1,320 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.Header'); + +goog.require('goog.Uri'); + +goog.require('cam.reactUtil'); +goog.require('cam.SpritedImage'); + +cam.Header = React.createClass({ + displayName: 'Header', + + PIGGY_NATIVE_WIDTH: 88, + PIGGY_NATIVE_HEIGHT: 62, + PIGGY_MARGIN: { + LEFT: 1, + RIGHT: 4, + TOP: -1, + BOTTOM: 1, + }, + + SEARCH_MARGIN: { + LEFT: 180, + RIGHT: 145, + }, + + propTypes: { + currentSearch: React.PropTypes.string, + errors: React.PropTypes.arrayOf( + React.PropTypes.shape({ + error: React.PropTypes.string.isRequired, + onClick: React.PropTypes.func, + url: React.PropTypes.string, + }).isRequired + ).isRequired, + height: React.PropTypes.number.isRequired, + helpURL: React.PropTypes.instanceOf(goog.Uri).isRequired, + homeURL: React.PropTypes.instanceOf(goog.Uri).isRequired, + importersURL: React.PropTypes.instanceOf(goog.Uri).isRequired, + mainControls: React.PropTypes.arrayOf(React.PropTypes.renderable), + onNewPermanode: React.PropTypes.func, + onSearch: React.PropTypes.func, + searchRootsURL: React.PropTypes.instanceOf(goog.Uri).isRequired, + statusURL: React.PropTypes.instanceOf(goog.Uri).isRequired, + timer: React.PropTypes.shape({setTimeout:React.PropTypes.func.isRequired, clearTimeout:React.PropTypes.func.isRequired}).isRequired, + width: React.PropTypes.number.isRequired, + }, + + focusSearch: function() { + this.getSearchNode_().focus(); + this.getSearchNode_().select(); + }, + + getInitialState: function() { + return { + currentSearch: this.props.currentSearch, + menuVisible: false, + }; + }, + + componentWillReceiveProps: function(nextProps) { + if (nextProps.currentSearch != this.props.currentSearch) { + this.setState({currentSearch: nextProps.currentSearch}); + } + }, + + render: function() { + return React.DOM.div( + { + className: 'cam-header', + style: { + width: this.props.width, + }, + }, + React.DOM.table( + { + className: 'cam-header-main', + }, + React.DOM.tr(null, + this.getPiggy_(), + this.getTitle_(), + this.getSearchbox_(), + this.getMainControls_() + ) + ), + this.getMenuDropdown_() + ) + }, + + getPiggy_: function() { + var props = { + sheetWidth: 11, + spriteWidth: this.PIGGY_NATIVE_WIDTH, + spriteHeight: this.PIGGY_NATIVE_HEIGHT, + style: cam.reactUtil.getVendorProps({ + position: 'absolute', + left: this.PIGGY_MARGIN.LEFT, + top: this.PIGGY_MARGIN.TOP, + transform: 'scale(' + this.getPiggyScale_() + ')', + transformOrigin: '0 0', + }), + }; + + var image = function() { + if (this.props.errors.length) { + return cam.SpritedAnimation(cam.object.extend(props, { + key: 'error', + loopDelay: 10 * 1000, + numFrames: 65, + src: 'glitch/npc_piggy__x1_too_much_nibble_png_1354829441.png', + })); + } else { + return cam.SpritedImage(cam.object.extend(props, { + key: 'ok', + index: 5, + src: 'glitch/npc_piggy__x1_chew_png_1354829433.png', + })); + } + }; + + return React.DOM.td( + { + className: 'cam-header-item', + style: { + minWidth: this.getPiggyWidth_() + this.PIGGY_MARGIN.LEFT + this.PIGGY_MARGIN.RIGHT, + }, + onClick: this.handleClick_, + onMouseEnter: this.handleMouseEnter_, + onMouseLeave: this.handleMouseLeave_, + }, + image.call(this) + ) + }, + + getTitle_: function() { + return React.DOM.td( + { + className: 'cam-header-item cam-header-title', + onClick: this.handleClick_, + onMouseEnter: this.handleMouseEnter_, + onMouseLeave: this.handleMouseLeave_, + }, + React.DOM.span(null, 'Pudgy'), + React.DOM.span(null, '\u25BE') + ); + }, + + getSearchbox_: function() { + return React.DOM.td( + { + className: 'cam-header-item', + style: { + width: '100%', + } + }, + React.DOM.form( + { + onSubmit: this.handleSearchSubmit_, + }, + React.DOM.input( + { + onChange: this.handleSearchChange_, + placeholder: 'Search...', + ref: 'searchbox', + value: this.state.currentSearch, + } + ) + ) + ) + }, + + getMainControls_: function() { + return React.DOM.td( + { + className: React.addons.classSet({ + 'cam-header-item': true, + 'cam-header-main-controls': true, + 'cam-header-main-controls-empty': !this.props.mainControls.length, + }), + }, + this.props.mainControls + ); + }, + + getMenuDropdown_: function() { + var errorItems = this.props.errors.map(function(err) { + var children = [ + React.DOM.i({className: 'fa fa-exclamation-circle cam-header-menu-item-icon'}), + err.error + ]; + return this.getMenuItem_(children, err.url, err.onClick, 'cam-header-menu-item-error'); + }, this); + + return React.DOM.div( + { + className: 'cam-header-menu-dropdown', + onClick: this.handleDropdownClick_, + onMouseEnter: this.handleMouseEnter_, + onMouseLeave: this.handleMouseLeave_, + style: cam.reactUtil.getVendorProps({ + transform: 'translate3d(0, ' + this.getMenuTranslate_() + '%, 0)', + }), + }, + this.getMenuItem_('Home', this.props.homeURL), + this.getMenuItem_('Upload...', null, this.props.onUpload), + + // TODO(aa): Create a new permanode UI that delays creating the permanode until the user confirms, then change this to a link to that UI. + // TODO(aa): Also I keep going back and forth about whether we should call this 'permanode' or 'set' in the UI. Hrm. + this.getMenuItem_('New set', null, this.props.onNewPermanode), + + this.getMenuItem_('Importers', this.props.importersURL), + this.getMenuItem_('Server status', this.props.statusURL), + this.getMenuItem_('Search roots', this.props.searchRootsURL), + this.getMenuItem_('Help', this.props.helpURL), + errorItems + ); + }, + + getMenuItem_: function(text, opt_link, opt_onClick, opt_class) { + if (!text || (!opt_onClick && !opt_link)) { + return null; + } + + var className = 'cam-header-menu-item'; + if (opt_class) { + className += ' ' + opt_class; + } + + var ctor = opt_link ? React.DOM.a : React.DOM.div; + return ctor( + { + className: className, + href: opt_link, + onClick: opt_onClick, + }, + text + ); + }, + + getMenuTranslate_: function() { + if (this.state.menuVisible) { + return 0; + } else { + // 110% because it has a shadow that we don't want to double-up with the shadow from the header. + return -110; + } + }, + + getPiggyHeight_: function() { + return this.props.height - this.PIGGY_MARGIN.TOP - this.PIGGY_MARGIN.BOTTOM; + }, + + getPiggyWidth_: function() { + return this.getPiggyScale_() * this.PIGGY_NATIVE_WIDTH; + }, + + getPiggyScale_: function() { + return this.getPiggyHeight_() / this.PIGGY_NATIVE_HEIGHT; + }, + + handleClick_: function() { + this.setState({menuVisible: !this.state.menuVisible}); + }, + + handleMouseEnter_: function() { + this.clearTimer_(); + this.setTimer_(true); + }, + + handleMouseLeave_: function() { + this.clearTimer_(); + this.setTimer_(false); + }, + + handleDropdownClick_: function(e) { + this.clearTimer_(); + this.setState({menuVisible:false}); + }, + + setTimer_: function(show) { + this.timerId_ = this.props.timer.setTimeout(this.handleTimer_.bind(null, show), 250); + }, + + clearTimer_: function() { + if (this.timerId_) { + this.props.timer.clearTimeout(this.timerId_); + } + }, + + handleTimer_: function(show) { + this.setState({menuVisible:show}); + }, + + handleSearchChange_: function(e) { + this.setState({currentSearch: e.target.value}); + }, + + handleSearchSubmit_: function(e) { + this.props.onSearch(this.getSearchNode_().value); + e.preventDefault(); + }, + + getSearchNode_: function() { + return this.refs['searchbox'].getDOMNode(); + }, +}); diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/icon_16716.svg b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/icon_16716.svg new file mode 100644 index 00000000..1e5a8147 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/icon_16716.svg @@ -0,0 +1,58 @@ + +image/svg+xml + + + + + + \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/icon_27307.svg b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/icon_27307.svg new file mode 100644 index 00000000..81278d3c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/icon_27307.svg @@ -0,0 +1,68 @@ + +image/svg+xml + + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/image_detail.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/image_detail.js new file mode 100644 index 00000000..c9de256f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/image_detail.js @@ -0,0 +1,183 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.ImageDetail'); + +goog.require('cam.BlobItemVideoContent'); +goog.require('cam.Thumber'); + +// Renders the guts of the detail view for images. +cam.ImageDetail = React.createClass({ + displayName: 'ImageDetail', + + IMG_MARGIN: 20, + PIGGY_WIDTH: 88, + PIGGY_HEIGHT: 62, + + propTypes: { + backwardPiggy: React.PropTypes.bool.isRequired, + height: React.PropTypes.number.isRequired, + permanodeMeta: React.PropTypes.object, + resolvedMeta: React.PropTypes.object.isRequired, + width: React.PropTypes.number.isRequired, + }, + + isVideo_: function() { + return !this.isImage_(); + }, + + isImage_: function() { + return Boolean(this.props.resolvedMeta.image); + }, + + componentWillReceiveProps: function(nextProps) { + if (this.props == nextProps || this.props.resolvedMeta.blobRef != nextProps.resolvedMeta.blobRef) { + this.thumber_ = nextProps.resolvedMeta.image && cam.Thumber.fromImageMeta(nextProps.resolvedMeta); + this.setState({imgHasLoaded: false}); + } + }, + + componentWillMount: function() { + this.componentWillReceiveProps(this.props, true); + }, + + render: function() { + this.imgSize_ = this.getImgSize_(); + return React.DOM.div({className:'detail-view', style: this.getStyle_()}, + this.getImg_(), + this.getPiggy_() + ); + }, + + getSinglePermanodeAttr_: function(name) { + return cam.permanodeUtils.getSingleAttr(this.props.permanodeMeta.permanode, name); + }, + + onImgLoad_: function() { + this.setState({imgHasLoaded:true}); + }, + + getImg_: function() { + var transition = React.addons.TransitionGroup({transitionName: 'detail-img'}, []); + if (this.imgSize_) { + var ctor = this.props.resolvedMeta.image ? React.DOM.img : React.DOM.video; + transition.props.children.push( + ctor({ + className: React.addons.classSet({ + 'detail-view-img': true, + 'detail-view-img-loaded': this.isImage_() ? this.state.imgHasLoaded : true, + }), + controls: true, + // We want each image to have its own node in the DOM so that during the crossfade, we don't see the image jump to the next image's size. + key: 'img' + this.props.resolvedMeta.blobRef, + onLoad: this.isImage_() ? this.onImgLoad_ : null, + src: this.isImage_() ? this.thumber_.getSrc(this.imgSize_.height) : './download/' + this.props.resolvedMeta.blobRef + '/' + this.props.resolvedMeta.file.fileName, + style: this.getCenteredProps_(this.imgSize_.width, this.imgSize_.height) + }) + ); + } + return transition; + }, + + getPiggy_: function() { + var transition = React.addons.TransitionGroup({transitionName: 'detail-piggy'}, []); + if (this.isImage_() && !this.state.imgHasLoaded) { + transition.props.children.push( + cam.SpritedAnimation({ + key: 'piggy-sprite', + src: 'glitch/npc_piggy__x1_walk_png_1354829432.png', + className: React.addons.classSet({ + 'detail-view-piggy': true, + 'detail-view-piggy-backward': this.props.backwardPiggy + }), + numFrames: 24, + spriteWidth: this.PIGGY_WIDTH, + spriteHeight: this.PIGGY_HEIGHT, + sheetWidth: 8, + style: this.getCenteredProps_(this.PIGGY_WIDTH, this.PIGGY_HEIGHT) + })); + } + return transition; + }, + + getCenteredProps_: function(w, h) { + var avail = new goog.math.Size(this.props.width, this.props.height); + return { + top: (avail.height - h) / 2, + left: (avail.width - w) / 2, + width: w, + height: h + } + }, + + getImgSize_: function() { + if (this.isVideo_()) { + return new goog.math.Size(this.props.width, this.props.height); + } + var rawSize = new goog.math.Size(this.props.resolvedMeta.image.width, this.props.resolvedMeta.image.height); + var available = new goog.math.Size( + this.props.width - this.IMG_MARGIN * 2, + this.props.height - this.IMG_MARGIN * 2); + if (rawSize.height <= available.height && rawSize.width <= available.width) { + return rawSize; + } + return rawSize.scaleToFit(available); + }, + + getStyle_: function() { + return { + width: this.props.width, + height: this.props.height + } + }, +}); + +cam.ImageDetail.getAspect = function(blobref, searchSession) { + if (!blobref) { + return null; + } + + var rm = searchSession.getResolvedMeta(blobref); + var pm = searchSession.getMeta(blobref); + + if (!pm) { + return null; + } + + if (pm.camliType != 'permanode') { + pm = null; + } + + // We don't handle camliContentImage like BlobItemImage.getHandler does because that only tells us what image to display in the search results. It doesn't actually make the permanode an image or anything. + if (rm && (rm.image || cam.BlobItemVideoContent.isVideo(rm))) { + return { + fragment: 'image', + title: 'Image', + createContent: function(size, backwardPiggy) { + return cam.ImageDetail({ + backwardPiggy: backwardPiggy, + key: 'image', + height: size.height, + permanodeMeta: pm, + resolvedMeta: rm, + width: size.width, + }); + }, + }; + } else { + return null; + } +}; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/index.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/index.css new file mode 100644 index 00000000..b5ec048f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/index.css @@ -0,0 +1,49 @@ +/* +Copyright 2012 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +@import (less) "prefix-free.css"; + + +/* http://www.paulirish.com/2012/box-sizing-border-box-ftw/ */ +*, *:before, *:after { + .box-sizing(border-box); +} + +body { + margin: 0; + overflow-x: hidden; + overflow-y: scroll; +} + +.cam-index-page { + font: 16px/1.4 normal Arial, sans-serif; +} + +.cam-index-title { + display: inline-block; +} + +.cam-content-wrap { + position: relative; +} + +.cam-unselectable { + .user-select(none); +} + +.cam-index-upload-dialog>* { + vertical-align: middle; +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/index.html b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/index.html new file mode 100644 index 00000000..6c4c47e3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/index.html @@ -0,0 +1,100 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/index.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/index.js new file mode 100644 index 00000000..50a24476 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/index.js @@ -0,0 +1,1084 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// IndexPage is the top level React component class that owns all the other +// components of the web UI. +// See the React documentation and in particular +// https://facebook.github.io/react/docs/component-specs.html to learn about +// components. +goog.provide('cam.IndexPage'); + +goog.require('goog.array'); +goog.require('goog.dom'); +goog.require('goog.dom.classlist'); +goog.require('goog.events.EventHandler'); +goog.require('goog.format'); +goog.require('goog.functions'); +goog.require('goog.labs.Promise'); +goog.require('goog.object'); +goog.require('goog.string'); +goog.require('goog.Uri'); + +goog.require('cam.BlobDetail'); +goog.require('cam.BlobItemContainerReact'); +goog.require('cam.BlobItemDemoContent'); +goog.require('cam.BlobItemFoursquareContent'); +goog.require('cam.BlobItemGenericContent'); +goog.require('cam.BlobItemImageContent'); +goog.require('cam.BlobItemTwitterContent'); +goog.require('cam.BlobItemVideoContent'); +goog.require('cam.blobref'); +goog.require('cam.DetailView'); +goog.require('cam.Dialog'); +goog.require('cam.DirectoryDetail'); +goog.require('cam.Header'); +goog.require('cam.Navigator'); +goog.require('cam.PermanodeDetail'); +goog.require('cam.permanodeUtils'); +goog.require('cam.reactUtil'); +goog.require('cam.SearchSession'); +goog.require('cam.ServerConnection'); +goog.require('cam.Sidebar'); +goog.require('cam.TagsControl'); + +cam.IndexPage = React.createClass({ + displayName: 'IndexPage', + + SIDEBAR_OPEN_WIDTH_: 250, + + HEADER_HEIGHT_: 38, + SEARCH_PREFIX_: { + RAW: 'raw' + }, + THUMBNAIL_SIZE_: 200, + + SEARCH_SESSION_CACHE_SIZE_: 3, + + // Note that these are ordered by priority. + BLOB_ITEM_HANDLERS_: [ + cam.BlobItemDemoContent.getHandler, + cam.BlobItemFoursquareContent.getHandler, + cam.BlobItemTwitterContent.getHandler, + cam.BlobItemImageContent.getHandler, + cam.BlobItemVideoContent.getHandler, + cam.BlobItemGenericContent.getHandler + ], + + BLOBREF_PATTERN_: new RegExp('^' + cam.blobref.PATTERN + '$'), + + propTypes: { + availWidth: React.PropTypes.number.isRequired, + availHeight: React.PropTypes.number.isRequired, + config: React.PropTypes.object.isRequired, + eventTarget: React.PropTypes.shape({addEventListener:React.PropTypes.func.isRequired}).isRequired, + history: React.PropTypes.shape({pushState:React.PropTypes.func.isRequired, replaceState:React.PropTypes.func.isRequired, go:React.PropTypes.func.isRequired, state:React.PropTypes.object}).isRequired, + openWindow: React.PropTypes.func.isRequired, + location: React.PropTypes.shape({href:React.PropTypes.string.isRequired, reload:React.PropTypes.func.isRequired}).isRequired, + scrolling: cam.BlobItemContainerReact.originalSpec.propTypes.scrolling, + serverConnection: React.PropTypes.instanceOf(cam.ServerConnection).isRequired, + timer: cam.Header.originalSpec.propTypes.timer, + }, + + // Invoked once right before initial rendering. This is essentially IndexPage's + // constructor. We populate non-React helpers that live for the entire lifetime + // of IndexPage here. + componentWillMount: function() { + this.baseURL_ = null; + this.dragEndTimer_ = 0; + this.navigator_ = null; + this.searchSessionCache_ = []; + this.targetSearchSession_ = null; + this.childSearchSession_ = null; + + this.eh_ = new goog.events.EventHandler(this); + + var newURL = new goog.Uri(this.props.location.href); + this.baseURL_ = newURL.resolve(new goog.Uri(this.props.config.uiRoot)); + + this.navigator_ = new cam.Navigator(this.props.eventTarget, this.props.location, this.props.history); + this.navigator_.onWillNavigate = this.handleWillNavigate_; + this.navigator_.onDidNavigate = this.handleDidNavigate_; + + this.handleWillNavigate_(newURL); + this.handleDidNavigate_(); + }, + + // Invoked right after initial rendering. + componentDidMount: function() { + // TODO(aa): This supports some of the old iframed pages. We can remove it once they are dead. + goog.global.getSearchSession = function() { + return this.childSearchSession_; + }.bind(this); + this.eh_.listen(this.props.eventTarget, 'keypress', this.handleKeyPress_); + this.eh_.listen(this.props.eventTarget, 'keyup', this.handleKeyUp_); + }, + + componentWillUnmount: function() { + this.eh_.dispose(); + this.clearDragTimer_(); + }, + + // Invoked once before everything else on initial rendering. Values are + // subsequently in this.state. We use this to set the initial state and + // also to document what state fields are possible + getInitialState: function() { + return { + backwardPiggy: false, + currentURL: null, + currentSet: '', + dropActive: false, + selection: {}, + serverStatus: null, + + // TODO: This should be calculated by whether selection is empty, and not need separate state. + sidebarVisible: false, + + uploadDialogVisible: false, + totalBytesToUpload: 0, + totalBytesComplete: 0, + }; + }, + + // render() is called by React every time a component is determined to need + // re-rendering. This is typically caused by a call to setState() or a parent + // component re-rendering. + render: function() { + var aspects = this.getAspects_(); + var selectedAspect = goog.array.findIndex(aspects, function(v) { + return v.fragment == this.state.currentURL.getFragment(); + }, this); + + if (selectedAspect == -1) { + selectedAspect = 0; + } + + var contentSize = new goog.math.Size(this.props.availWidth, this.props.availHeight - this.HEADER_HEIGHT_); + return React.DOM.div({onDragEnter:this.handleDragStart_, onDragOver:this.handleDragStart_, onDrop:this.handleDrop_}, + this.getHeader_(aspects, selectedAspect), + React.DOM.div( + { + className: 'cam-content-wrap', + style: { + top: this.HEADER_HEIGHT_, + }, + }, + aspects[selectedAspect] && aspects[selectedAspect].createContent(contentSize, this.state.backwardPiggy) + ), + this.getSidebar_(aspects[selectedAspect]), + this.getUploadDialog_() + ); + }, + + setSelection_: function(selection) { + this.props.history.replaceState(cam.object.extend(this.props.history.state, { + selection: selection, + }), '', this.props.location.href); + + this.setState({selection: selection}); + this.setState({sidebarVisible: !goog.object.isEmpty(selection)}); + }, + + getTargetBlobref_: function(opt_url) { + var url = opt_url || this.state.currentURL; + var suffix = url.getPath().substr(this.baseURL_.getPath().length); + + // TODO(aa): Need to implement something like ref.go that knows about the other hash types. + var match = suffix.match(this.BLOBREF_PATTERN_); + return match && match[0]; + }, + + getAspects_: function() { + var childFrameClickHandler = this.navigator_.navigate.bind(this.navigator_); + var target = this.getTargetBlobref_(); + var getAspect = function(f) { + return f(target, this.targetSearchSession_); + }.bind(this); + + var specificAspects = [ + cam.ImageDetail.getAspect, + cam.DirectoryDetail.getAspect.bind(null, this.baseURL_, childFrameClickHandler), + ].map(getAspect).filter(goog.functions.identity); + + var generalAspects = [ + this.getSearchAspect_.bind(null, specificAspects), + cam.PermanodeDetail.getAspect.bind(null, this.props.serverConnection, this.props.timer), + cam.BlobDetail.getAspect.bind(null, this.getDetailURL_, this.props.serverConnection), + ].map(getAspect).filter(goog.functions.identity); + + return specificAspects.concat(generalAspects); + }, + + getSearchAspect_: function(specificAspects, blobref, targetSearchSession) { + if (blobref) { + var m = targetSearchSession.getMeta(blobref); + if (!m || !m.permanode) { + // We have a target, but it's not a permanode. So don't show the contents view. + // TODO(aa): Maybe we do want to for directories though? + return null; + } + + // If the permanode already has children, we always show the container view. + // Otherwise, show the container view only if there is no more specific type. + var showSearchAspect = false; + if (cam.permanodeUtils.isContainer(m.permanode)) { + showSearchAspect = true; + } else if (!cam.permanodeUtils.getCamliNodeType(m.permanode) && specificAspects.length == 0) { + showSearchAspect = true; + } + + if (!showSearchAspect) { + return null; + } + } + + // This can happen when a user types a raw (JSON) query that is invalid. + if (!this.childSearchSession_) { + return null; + } + + return { + title: blobref ? 'Contents' : 'Search', + fragment: blobref ? 'contents': 'search', + createContent: this.getBlobItemContainer_.bind(null, this), + }; + }, + + handleDragStart_: function(e) { + this.clearDragTimer_(); + e.preventDefault(); + this.dragEndTimer_ = window.setTimeout(this.handleDragStop_, 2000); + this.setState({ + dropActive: true, + uploadDialogVisible: false, + }); + }, + + handleDragStop_: function() { + this.clearDragTimer_(); + this.setState({dropActive: false}); + }, + + clearDragTimer_: function() { + if (this.dragEndTimer_) { + window.clearTimeout(this.dragEndTimer_); + this.dragEndTimer_ = 0; + } + }, + + onUploadStart_: function(files) { + var numFiles = files.length; + var totalBytes = Array.prototype.reduce.call(files, function(sum, file) { return sum + file.size; }, 0); + + this.setState({ + dropActive: false, + totalBytesToUpload: totalBytes, + totalBytesComplete: 0, + }); + + console.log('Uploading %d files (%d bytes)...', numFiles, totalBytes); + }, + + onUploadProgressUpdate_: function(file) { + var completedBytes = this.state.totalBytesComplete + file.size; + + this.setState({ + totalBytesComplete: completedBytes + }); + + console.log('Uploaded %d of %d bytes', completedBytes, this.state.totalBytesToUpload); + }, + + onUploadComplete_: function() { + console.log('Upload complete!'); + + this.setState({ + totalBytesToUpload: 0, + totalBytesComplete: 0, + }); + }, + + handleDrop_: function(e) { + if (!e.nativeEvent.dataTransfer.files) { + return; + } + + e.preventDefault(); + + var files = e.nativeEvent.dataTransfer.files; + var sc = this.props.serverConnection; + + this.onUploadStart_(files); + + goog.labs.Promise.all( + Array.prototype.map.call(files, function(file) { + return uploadFile(file) + .then(fetchExistingPermanode) + .then(createPermanodeIfNotExists) + .then(nameResults) + .then(createPermanodeAssociations.bind(this)) + .thenCatch(function(e) { + console.error('File upload fall down go boom. file: %s, error: %s', file.name, e); + }) + .then(this.onUploadProgressUpdate_.bind(this, file)); + }.bind(this)) + ).thenCatch(function(e) { + console.error('File upload failed with error: %s', e); + }).then(this.onUploadComplete_); + + function uploadFile(file) { + var uploadFile = new goog.labs.Promise(sc.uploadFile.bind(sc, file)); + return goog.labs.Promise.all([uploadFile]); + } + + function fetchExistingPermanode(blobIds) { + var fileRef = blobIds[0]; + var fileUploaded = new goog.labs.Promise.resolve(fileRef); + var getPermanode = new goog.labs.Promise(sc.getPermanodeWithContent.bind(sc, fileRef)); + return goog.labs.Promise.all([fileUploaded, getPermanode]); + } + + function createPermanodeIfNotExists(results) { + var fileRef = results[0]; + var permanode = results[1]; + if (!permanode) { + var fileUploaded = new goog.labs.Promise.resolve(fileRef); + var createPermanode = new goog.labs.Promise(sc.createPermanode.bind(sc)); + return goog.labs.Promise.all([fileUploaded, createPermanode]); + } + // Empty values so the next in chain knows that we're in the "permanode already exists" case. + return goog.labs.Promise.resolve(["", ""]); + } + + // 'readable-ify' the blob references returned from upload/create + function nameResults(blobIds) { + return { + 'fileRef': blobIds[0], + 'permanodeRef': blobIds[1] + }; + } + + function createPermanodeAssociations(refs) { + if (refs.permanodeRef == "") { + // Any value would do, but boolean helps make it clear that we end + // here, by resolving the file upload promise chain. + return goog.labs.Promise.resolve(true); + } + + // associate uploaded file to new permanode + var camliContent = new goog.labs.Promise(sc.newSetAttributeClaim.bind(sc, refs.permanodeRef, 'camliContent', refs.fileRef)); + var promises = [camliContent]; + + // if currently viewing a set, make new permanode a member of the set + var parentPermanodeRef = this.getTargetBlobref_(); + if (parentPermanodeRef) { + var camliMember = new goog.labs.Promise(sc.newAddAttributeClaim.bind(sc, parentPermanodeRef, 'camliMember', refs.permanodeRef)); + promises.push(camliMember); + } + + return goog.labs.Promise.all(promises); + } + }, + + handleWillNavigate_: function(newURL) { + if (!goog.string.startsWith(newURL.toString(), this.baseURL_.toString())) { + return false; + } + + var targetBlobref = this.getTargetBlobref_(newURL); + this.updateTargetSearchSession_(targetBlobref, newURL); + this.updateChildSearchSession_(targetBlobref, newURL); + this.pruneSearchSessionCache_(); + this.setState({ + backwardPiggy: false, + currentURL: newURL, + }); + return true; + }, + + handleDidNavigate_: function() { + var s = this.props.history.state && this.props.history.state.selection; + this.setSelection_(s || {}); + }, + + updateTargetSearchSession_: function(targetBlobref, newURL) { + this.targetSearchSession_ = null; + if (targetBlobref) { + var query = this.queryAsBlob_(targetBlobref); + var parentPermanode = newURL.getParameterValue('p'); + if (parentPermanode) { + query = this.queryFromParentPermanode_(parentPermanode); + } else { + var queryString = newURL.getParameterValue('q'); + if (queryString) { + query = this.queryFromSearchParam_(queryString); + } + } + this.targetSearchSession_ = this.getSearchSession_(targetBlobref, query); + } + }, + + updateChildSearchSession_: function(targetBlobref, newURL) { + var query = ' '; + if (targetBlobref) { + query = this.queryFromParentPermanode_(targetBlobref); + } else { + var queryString = newURL.getParameterValue('q'); + if (queryString) { + query = this.queryFromSearchParam_(queryString); + } + } + this.childSearchSession_ = this.getSearchSession_(null, query); + }, + + queryFromSearchParam_: function(queryString) { + // TODO(aa): Remove this when the server can do something like the 'raw' operator. + if (goog.string.startsWith(queryString, this.SEARCH_PREFIX_.RAW + ':')) { + try { + return JSON.parse(queryString.substring(this.SEARCH_PREFIX_.RAW.length + 1)); + } catch (e) { + console.error('Raw search is invalid JSON', e); + return null; + } + } else { + return queryString; + } + }, + + queryFromParentPermanode_: function(blobRef) { + return { + permanode: { + relation: { + relation: 'parent', + any: { blobRefPrefix: blobRef }, + }, + }, + }; + }, + + queryAsBlob_: function(blobRef) { + return { + blobRefPrefix: blobRef, + } + }, + + // Finds an existing cached SearchSession that meets criteria, or creates a new one. + // + // If opt_query is present, the returned query must be exactly equivalent. + // If opt_targetBlobref is present, the returned query must have current results that contain opt_targetBlobref. Otherwise, the returned query must contain the first result. + // + // If only opt_targetBlobref is set, then any query that happens to currently contain that blobref is acceptable to the caller. + getSearchSession_: function(opt_targetBlobref, opt_query) { + // This whole business of reusing search session relies on the assumption that we use the same describe rules for both detail queries and search queries. + var queryString = JSON.stringify(opt_query); + + var cached = goog.array.findIndex(this.searchSessionCache_, function(ss) { + if (opt_targetBlobref) { + if (!ss.getMeta(opt_targetBlobref)) { + return false; + } + if (!opt_query) { + return true; + } + } + + if (JSON.stringify(ss.getQuery()) != queryString) { + return false; + } + + if (!opt_targetBlobref) { + return !ss.getAround(); + } + + // If there's a targetBlobref, we require that it is not at the very edge of the results so that we can implement lefr/right in detail views. + var targetIndex = goog.array.findIndex(ss.getCurrentResults().blobs, function(b) { + return b.blob == opt_targetBlobref; + }); + return (targetIndex > 0) && (targetIndex < (ss.getCurrentResults().blobs.length - 1)); + }); + + if (cached > -1) { + this.searchSessionCache_.splice(0, 0, this.searchSessionCache_.splice(cached, 1)[0]); + return this.searchSessionCache_[0]; + } + + console.log('Creating new search session for query %s', queryString); + var ss = new cam.SearchSession(this.props.serverConnection, this.baseURL_.clone(), opt_query, opt_targetBlobref); + this.eh_.listen(ss, cam.SearchSession.SEARCH_SESSION_CHANGED, function() { + this.forceUpdate(); + }); + this.eh_.listen(ss, cam.SearchSession.SEARCH_SESSION_STATUS, function(e) { + this.setState({ + serverStatus: e.status, + }); + }); + this.eh_.listen(ss, cam.SearchSession.SEARCH_SESSION_ERROR, function() { + this.forceUpdate(); + }); + ss.loadMoreResults(); + this.searchSessionCache_.splice(0, 0, ss); + return ss; + }, + + pruneSearchSessionCache_: function() { + for (var i = this.SEARCH_SESSION_CACHE_SIZE_; i < this.searchSessionCache_.length; i++) { + this.searchSessionCache_[i].close(); + } + + this.searchSessionCache_.length = Math.min(this.searchSessionCache_.length, this.SEARCH_SESSION_CACHE_SIZE_); + }, + + getHeader_: function(aspects, selectedAspectIndex) { + // We don't show the chooser if there's only one thing to choose from. + if (aspects.length == 1) { + aspects = []; + } + + // TODO(aa): It would be cool to normalize the query and single target case, by supporting searches like is:, that way we can always show something in the searchbox, even when we're not in a listview. + var target = this.getTargetBlobref_(); + var query = ''; + if (target) { + query = 'ref:' + target; + } else { + query = this.state.currentURL.getParameterValue('q') || ''; + } + + return cam.Header( + { + currentSearch: query, + errors: this.getErrors_(), + height: 38, + helpURL: this.baseURL_.resolve(new goog.Uri(this.props.config.helpRoot)), + homeURL: this.baseURL_, + importersURL: this.baseURL_.resolve(new goog.Uri(this.props.config.importerRoot)), + mainControls: aspects.map(function(val, idx) { + return React.DOM.a( + { + key: val.title, + className: React.addons.classSet({ + 'cam-header-main-control-active': idx == selectedAspectIndex, + }), + href: this.state.currentURL.clone().setFragment(val.fragment).toString(), + }, + val.title + ); + }, this), + onUpload: this.handleUpload_, + onNewPermanode: this.handleCreateSetWithSelection_, + onSearch: this.setSearch_, + searchRootsURL: this.getSearchRootsURL_(), + statusURL: this.baseURL_.resolve(new goog.Uri(this.props.config.statusRoot)), + ref: 'header', + timer: this.props.timer, + width: this.props.availWidth, + } + ) + }, + + handleNewPermanode_: function() { + this.props.serverConnection.createPermanode(this.getDetailURL_.bind(this)); + }, + + getSearchRootsURL_: function() { + return this.baseURL_.clone().setParameterValue( + 'q', + this.SEARCH_PREFIX_.RAW + ':' + JSON.stringify({ + permanode: { + attr: 'camliRoot', + numValue: { + min: 1 + } + } + }) + ); + }, + + handleSelectAsCurrentSet_: function() { + this.setState({ + currentSet: goog.object.getAnyKey(this.state.selection), + }); + this.setSelection_({}); + alert('Now, select the items to add to this set and click "Add to picked set" in the sidebar.\n\n' + + 'Sorry this is lame, we\'re working on it.'); + }, + + handleAddToSet_: function() { + this.addMembersToSet_(this.state.currentSet, goog.object.getKeys(this.state.selection)); + alert('Done!'); + }, + + handleUpload_: function() { + this.setState({ + uploadDialogVisible: true, + }); + }, + + handleCreateSetWithSelection_: function() { + var selection = goog.object.getKeys(this.state.selection); + this.props.serverConnection.createPermanode(function(permanode) { + this.props.serverConnection.newSetAttributeClaim(permanode, 'title', 'New set', function() { + this.addMembersToSet_(permanode, selection); + }.bind(this)); + }.bind(this)); + }, + + addMembersToSet_: function(permanode, blobrefs) { + var numComplete = -1; + var callback = function() { + if (++numComplete == blobrefs.length) { + this.setSelection_({}); + this.refreshIfNecessary_(); + this.navigator_.navigate(this.getDetailURL_(permanode)); + } + }.bind(this); + + callback(); + + blobrefs.forEach(function(br) { + this.props.serverConnection.newAddAttributeClaim(permanode, 'camliMember', br, callback); + }.bind(this)); + }, + + handleClearSelection_: function() { + this.setSelection_({}); + }, + + handleDeleteSelection_: function() { + var blobrefs = goog.object.getKeys(this.state.selection); + var msg = 'Delete'; + if (blobrefs.length > 1) { + msg += goog.string.subs(' %s items?', blobrefs.length); + } else { + msg += ' item?'; + } + if (!confirm(msg)) { + return null; + } + + var numDeleted = 0; + blobrefs.forEach(function(br) { + this.props.serverConnection.newDeleteClaim(br, function() { + if (++numDeleted == blobrefs.length) { + this.setSelection_({}); + this.refreshIfNecessary_(); + } + }.bind(this)); + }.bind(this)); + }, + + handleOpenWindow_: function(url) { + this.props.openWindow(url); + }, + + handleKeyPress_: function(e) { + if (e.target.tagName == 'INPUT' || e.target.tagName == 'TEXTAREA') { + return; + } + + switch (String.fromCharCode(e.charCode)) { + case '/': { + this.refs['header'].focusSearch(); + e.preventDefault(); + break; + } + + case '|': { + window.__debugConsoleClient = { + getSelectedItems: function() { + return this.state.selection; + }.bind(this), + serverConnection: this.props.serverConnection, + }; + window.open('debug_console.html', 'debugconsole', 'width=400,height=300'); + break; + } + } + }, + + handleKeyUp_: function(e) { + var isEsc = (e.keyCode == 27); + var isRight = (e.keyCode == 39); + var isLeft = (e.keyCode == 37); + + if (isEsc) { + // TODO: This isn't right, it should go back to the context URL if there is one. + this.navigator_.navigate(this.baseURL_); + return; + } + + if (!isRight && !isLeft) { + return; + } + + if (!this.targetSearchSession_) { + return; + } + + var blobs = this.targetSearchSession_.getCurrentResults().blobs; + var target = this.getTargetBlobref_(); + var idx = goog.array.findIndex(blobs, function(item) { + return item.blob == target; + }); + + if (isRight) { + if (idx >= (blobs.length - 1)) { + return; + } + idx++; + } else { + if (idx <= 0) { + return; + } + idx--; + } + + var url = this.getDetailURL_(blobs[idx].blob, this.state.currentURL.getFragment()); + ['q', 'p'].forEach(function(p) { + var v = this.state.currentURL.getParameterValue(p); + if (v) { + url.setParameterValue(p, v); + } + }, this); + this.navigator_.navigate(url); + this.setState({ + backwardPiggy: isLeft, + }); + }, + + handleDetailURL_: function(blobref) { + return this.getChildDetailURL_(blobref); + }, + + getChildDetailURL_: function(blobref, opt_fragment) { + var query = this.state.currentURL.getParameterValue('q'); + var targetBlobref = this.getTargetBlobref_(); + var url = this.getDetailURL_(blobref, opt_fragment); + if (targetBlobref) { + url.setParameterValue('p', targetBlobref); + } else { + url.setParameterValue('q', query || ' '); + } + return url; + }, + + getDetailURL_: function(blobref, opt_fragment) { + var query = this.state.currentURL.getParameterValue('q'); + var targetBlobref = this.getTargetBlobref_(); + return url = this.baseURL_.clone().setPath(this.baseURL_.getPath() + blobref).setFragment(opt_fragment || ''); + }, + + setSearch_: function(query) { + var searchURL; + var match = query.match(/^ref:(.+)/); + if (match) { + searchURL = this.getDetailURL_(match[1]); + } else { + searchURL = this.baseURL_.clone().setParameterValue('q', query); + } + this.navigator_.navigate(searchURL); + }, + + getSelectAsCurrentSetItem_: function() { + if (goog.object.getCount(this.state.selection) != 1) { + return null; + } + + var blobref = goog.object.getAnyKey(this.state.selection); + var m = this.childSearchSession_.getMeta(blobref); + if (!m || m.camliType != 'permanode') { + return null; + } + + return React.DOM.button( + { + key:'selectascurrent', + onClick:this.handleSelectAsCurrentSet_ + }, + 'Add items to set' + ); + }, + + getAddToCurrentSetItem_: function() { + if (!this.state.currentSet) { + return null; + } + + return React.DOM.button( + { + key:'addtoset', + onClick:this.handleAddToSet_ + }, + 'Add to picked set' + ); + }, + + getCreateSetWithSelectionItem_: function() { + return React.DOM.button( + { + key:'createsetwithselection', + onClick:this.handleCreateSetWithSelection_ + }, + 'Create set with items' + ); + }, + + getClearSelectionItem_: function() { + return React.DOM.button( + { + key:'clearselection', + onClick:this.handleClearSelection_ + }, + 'Clear selection' + ); + }, + + getDeleteSelectionItem_: function() { + return React.DOM.button( + { + key:'deleteselection', + onClick:this.handleDeleteSelection_ + }, + 'Delete items' + ); + }, + + getViewOriginalSelectionItem_: function() { + if (goog.object.getCount(this.state.selection) != 1) { + return null; + } + + var blobref = goog.object.getAnyKey(this.state.selection); + var rm = this.childSearchSession_.getResolvedMeta(blobref); + if (!rm || !rm.file) { + return null; + } + + var fileName = ''; + if (rm.file.fileName) { + fileName = goog.string.subs('/%s', rm.file.fileName); + } + + var downloadUrl = goog.string.subs('%s%s%s', this.props.config.downloadHelper, rm.blobRef, fileName); + return React.DOM.button( + { + key:'viewSelection', + onClick: this.handleOpenWindow_.bind(null, downloadUrl), + }, + 'View original' + ); + }, + + getSidebar_: function(selectedAspect) { + if (selectedAspect) { + if (selectedAspect.fragment == 'search' || selectedAspect.fragment == 'contents') { + var count = goog.object.getCount(this.state.selection); + return cam.Sidebar( { + isExpanded: this.state.sidebarVisible, + header: React.DOM.span( + { + className: 'header', + }, + goog.string.subs('%s selected item%s', count, count > 1 ? 's' : '') + ), + mainControls: [ + { + "displayTitle": "Update tags", + "control": this.getTagsControl_() + } + ].filter(goog.functions.identity), + selectionControls: [ + this.getClearSelectionItem_(), + this.getCreateSetWithSelectionItem_(), + this.getSelectAsCurrentSetItem_(), + this.getAddToCurrentSetItem_(), + this.getDeleteSelectionItem_(), + this.getViewOriginalSelectionItem_(), + ].filter(goog.functions.identity), + selectedItems: this.state.selection + }); + } + } + + return null; + }, + + getTagsControl_: function() { + return cam.TagsControl( + { + selectedItems: this.state.selection, + searchSession: this.childSearchSession_, + serverConnection: this.props.serverConnection + } + ); + }, + + isUploading_: function() { + return this.state.totalBytesToUpload > 0; + }, + + getUploadDialog_: function() { + if (!this.state.uploadDialogVisible && !this.state.dropActive && !this.state.totalBytesToUpload) { + return null; + } + + var piggyWidth = 88; + var piggyHeight = 62; + var borderWidth = 18; + var w = this.props.availWidth * 0.8; + var h = this.props.availHeight * 0.8; + var iconProps = { + key: 'icon', + sheetWidth: 10, + spriteWidth: piggyWidth, + spriteHeight: piggyHeight, + style: { + 'margin-right': 3, + position: 'relative', + display: 'inline-block', + } + }; + + function getIcon() { + if (this.isUploading_()) { + return cam.SpritedAnimation(cam.object.extend(iconProps, { + numFrames: 48, + src: 'glitch/npc_piggy__x1_chew_png_1354829433.png', + })); + } else if (this.state.dropActive) { + return cam.SpritedAnimation(cam.object.extend(iconProps, { + loopDelay: 4000, + numFrames: 48, + src: 'glitch/npc_piggy__x1_look_screen_png_1354829434.png', + startFrame: 6, + })); + } else { + return cam.SpritedImage(cam.object.extend(iconProps, { + index: 0, + src: 'glitch/npc_piggy__x1_look_screen_png_1354829434.png', + })); + } + } + + function getText() { + if (this.isUploading_()) { + return goog.string.subs('Uploaded %s (%s%)', + goog.format.numBytesToString(this.state.totalBytesComplete, 2), + getUploadProgressPercent.call(this)); + } else { + return 'Drop files here to upload...'; + } + } + + function getUploadProgressPercent() { + if (!this.state.totalBytesToUpload) { + return 0; + } + + return Math.round(100 * (this.state.totalBytesComplete / this.state.totalBytesToUpload)); + } + + return cam.Dialog( + { + availWidth: this.props.availWidth, + availHeight: this.props.availHeight, + width: w, + height: h, + borderWidth: borderWidth, + onClose: this.state.uploadDialogVisible ? this.handleCloseUploadDialog_ : null, + }, + React.DOM.div( + { + className: 'cam-index-upload-dialog', + style: { + 'text-align': 'center', + position: 'relative', + left: -piggyWidth / 2, + top: (h - piggyHeight - borderWidth * 2) / 2, + }, + }, + getIcon.call(this), + getText.call(this) + ) + ); + }, + + handleCloseUploadDialog_: function() { + this.setState({ + uploadDialogVisible: false, + }); + }, + + handleSelectionChange_: function(newSelection) { + this.setSelection_(newSelection); + }, + + getBlobItemContainer_: function() { + var sidebarClosedWidth = this.props.availWidth; + var sidebarOpenWidth = sidebarClosedWidth - this.SIDEBAR_OPEN_WIDTH_; + var scale = sidebarOpenWidth / sidebarClosedWidth; + + return cam.BlobItemContainerReact({ + key: 'blobitemcontainer', + ref: 'blobItemContainer', + availHeight: this.props.availHeight, + availWidth: this.props.availWidth, + detailURL: this.handleDetailURL_, + handlers: this.BLOB_ITEM_HANDLERS_, + history: this.props.history, + onSelectionChange: this.handleSelectionChange_, + scale: scale, + scaleEnabled: this.state.sidebarVisible, + scrolling: this.props.scrolling, + searchSession: this.childSearchSession_, + selection: this.state.selection, + style: this.getBlobItemContainerStyle_(), + thumbnailSize: this.THUMBNAIL_SIZE_, + }); + }, + + getBlobItemContainerStyle_: function() { + return { + left: 0, + overflowY: this.state.dropActive ? 'hidden' : '', + position: 'absolute', + top: 0, + }; + }, + + getContentWidth_: function() { + return this.props.availWidth; + }, + + refreshIfNecessary_: function() { + if (this.targetSearchSession_) { + this.targetSearchSession_.refreshIfNecessary(); + } + if (this.childSearchSession_) { + this.childSearchSession_.refreshIfNecessary(); + } + }, + + getErrors_: function() { + var errors = (this.state.serverStatus && this.state.serverStatus.errors) || []; + if ((this.targetSearchSession_ && this.targetSearchSession_.hasSocketError()) || + (this.childSearchSession_ && this.childSearchSession_.hasSocketError())) { + errors.push({ + error: 'WebSocket error - click to reload', + onClick: this.props.location.reload.bind(null, this.props.location, true), + }); + } + return errors; + }, +}); diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/js-notes.txt b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/js-notes.txt new file mode 100644 index 00000000..cbf76e2a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/js-notes.txt @@ -0,0 +1,16 @@ +FormData +http://hacks.mozilla.org/2010/05/formdata-interface-coming-to-firefox/ + +window.atob / window.btoa +https://developer.mozilla.org/en/DOM/window.atob +http://demos.hacks.mozilla.org/openweb/imageUploader/js/extends/xhr.js + +File Writer +http://www.w3.org/TR/file-writer-api/ + +File API +http://www.w3.org/TR/2009/WD-FileAPI-20091117/ + +Uint8Array +https://developer.mozilla.org/en/JavaScript_typed_arrays/Uint8Array + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/magnifying_glass.svg b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/magnifying_glass.svg new file mode 100644 index 00000000..a66a72f7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/magnifying_glass.svg @@ -0,0 +1,73 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/math.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/math.js new file mode 100644 index 00000000..da680b59 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/math.js @@ -0,0 +1,20 @@ +goog.provide('cam.math'); + +goog.require('goog.math.Coordinate'); +goog.require('goog.math.Size'); + +// @param goog.math.Size subject +// @param goog.math.Size frame +// @param =boolean opt_bleed If true, subject will be scaled such that its area is greater or equal to frame. Otherwise, it will be scaled such that its area is less than or equal to frame. +// @return goog.math.Size +cam.math.scaleToFit = function(subject, frame, opt_bleed) { + var s = (!opt_bleed && subject.aspectRatio() > frame.aspectRatio()) || (opt_bleed && subject.aspectRatio() <= frame.aspectRatio()) ? frame.width / subject.width : frame.height / subject.height; + return subject.scale(s); +}; + +// @param goog.math.Size subject +// @param goog.math.Size frame +// @return goog.math.Coordinate the left and top coordinat subject should be positioned at relative to frame to be centered within it. This might be negative if subject is larger than frame. +cam.math.center = function(subject, frame) { + return new goog.math.Coordinate((frame.width - subject.width) / 2, (frame.height - subject.height) / 2); +}; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/mobile.html b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/mobile.html new file mode 100644 index 00000000..f310eae5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/mobile.html @@ -0,0 +1,53 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/mobile_setup.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/mobile_setup.css new file mode 100644 index 00000000..99d422ec --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/mobile_setup.css @@ -0,0 +1,70 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +.mobile-setup-page { + background-color: rgb(51, 51, 51); + color: rgba(255, 255, 255, 1); + font-family: 'Open Sans', sans-serif; + font-size: 19px; + margin: 0 auto; + padding-top: 9px; + width: 456px; +} + +.mobile-setup-page img { + height: 456px; + width: 456px; +} + +.mobile-setup-page label { + display: block; + padding: 8px 0; +} + +.mobile-setup-page input { + background: none; + border: 1px rgba(255, 255, 255, 0); + border-style: solid none; + color: rgba(255, 255, 255, 1); + cursor: default; + font-family: 'Open Sans', sans-serif; + font-size: 19px; + margin: 0; + outline: none; + padding: 0; +} + +.mobile-setup-page .mobile-setup-auto-upload input { + float: right; + margin: 9px; +} + +.mobile-setup-page .mobile-setup-helptext { + display: block; + font-size: 16px; + font-weight: lighter; +} + +.mobile-setup-page .mobile-setup-max-cache-size input[type="text"] { + border-bottom: 1px solid rgba(255, 255, 255, 0.25); + width: 5em; + text-align: right; +} + +.mobile-setup-page input[type="text"] { + border-bottom: 1px solid rgba(255, 255, 255, 0.25); + width: 100%; +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/mobile_setup.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/mobile_setup.js new file mode 100644 index 00000000..b741b651 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/mobile_setup.js @@ -0,0 +1,129 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +goog.provide('cam.MobileSetupView'); + +goog.require('goog.Uri'); + +cam.MobileSetupView = React.createClass({ + displayName: 'MobileSetupView', + + propTypes: { + baseURL: React.PropTypes.object.isRequired, + defaultUsername: React.PropTypes.string.isRequired, + }, + + getInitialState: function() { + var serverURL = this.props.baseURL.clone().setPath('').setQuery(''); + return { + autoUpload: false, + // TODO(wathiede): autopopulate this, not sure how. + certFingerprint: '', + maxCacheSize: 256, + server: serverURL.toString() + }; + }, + + getQRURL_: function() { + // TODO(wathiede): I'm not sure what the Android and iPhone requirements are for registering a URL handler are. If they can't be the same for both platforms, then we'll need this to be conditional based on a checkbox in the form. + var settingsURL = goog.Uri.parse('camli://settings/'); + if (this.state.username != '') { + settingsURL.setParameterValue('username', this.state.username); + } + if (this.state.server != '') { + settingsURL.setParameterValue('server', this.state.server); + } + if (this.state.autoUpload) { + settingsURL.setParameterValue('autoUpload', 1); + } + settingsURL.setParameterValue('maxCacheSize', this.state.maxCacheSize); + if (this.state.certFingerprint != '') { + settingsURL.setParameterValue('certFingerprint', this.state.certFingerprint); + } + + var qrURL = this.props.baseURL.clone(); + qrURL.setPath(qrURL.getPath() + '/qr/').setParameterValue('url', settingsURL.toString()); + return qrURL.toString(); + }, + + handleServerChange_: function(e) { + this.setState({server: e.target.value}); + }, + + handleUsernameChange_: function(e) { + this.setState({username: e.target.value}); + }, + + handleAutoUploadChange_: function(e) { + this.setState({autoUpload: e.target.checked}); + }, + + handleMaxCacheSizeChange_: function(e) { + this.setState({maxCacheSize: e.target.value}); + }, + + handleCertFingerprintChange_: function(e) { + this.setState({certFingerprint: e.target.value}); + }, + + render: function() { + return ( + React.DOM.div({}, + React.DOM.img({src:this.getQRURL_()}), + React.DOM.form({ref:'form', onSubmit:this.handleChange_}, + React.DOM.label({}, 'Camlistore Server:', + React.DOM.input({ + defaultValue: this.state.server, + onChange: this.handleServerChange_, + placeholder: 'e.g. https://foo.example.com or example.com:3179', + type: 'text' + })), + React.DOM.label({}, 'Username:', + React.DOM.input({ + defaultValue: this.props.defaultUsername, + onChange: this.handleUsernameChange_, + placeholder: '', + type: 'text' + })), + React.DOM.label({className: 'mobile-setup-auto-upload'}, + React.DOM.input({ + onChange: this.handleAutoUploadChange_, + type: 'checkbox' + }), + 'Auto-Upload', + React.DOM.span({className: 'mobile-setup-helptext'}, 'Upload SD card files as created')), + // TODO(wathiede): add suboptions to auto-upload? + React.DOM.label({className: 'mobile-setup-max-cache-size'}, + 'Maximum cache size', + React.DOM.input({ + defaultValue: this.state.maxCacheSize, + onChange: this.handleMaxCacheSizeChange_, + type: 'text' + }), + 'MB'), + React.DOM.label({}, 'Self-signed cert fingerprint:', + React.DOM.input({ + onChange: this.handleCertFingerprintChange_, + placeholder: '', + type: 'text' + })) + ))); + }, + + handleChange_: function() { + var u = this.getQRURL_(); + console.log(u); + }, +}); diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/navigator.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/navigator.js new file mode 100644 index 00000000..84ea7823 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/navigator.js @@ -0,0 +1,127 @@ +/* +Copyright 2014 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.Navigator'); + +goog.require('cam.object'); +goog.require('goog.Uri'); + +// Navigator intercepts various types of browser navgiations and gives its client an opportunity to decide whether the navigation should be handled with JavaScript or not. +// Currently, 'click' events on hyperlinks and 'popstate' events are intercepted. Clients can also call navigate() to manually initiate navigation. +// +// @param Window win The window to listen for click and popstate events within to potentially interpret as navigations. +// @param Location location Network navigation will be executed using this location object. +// @param History history PushState navigation will be executed using this history object. +cam.Navigator = function(win, location, history) { + this.win_ = win; + this.location_ = location; + this.history_ = history; + this.handlers_ = []; + + // This is needed so that in handlePopState_, we can differentiate navigating back to this frame from the initial load. + // We can't just initialize to {} because there can already be interesting state (e.g., in the case of the user pressing the refresh button). + history.replaceState(cam.object.extend(history.state), '', location.href); + + this.win_.addEventListener('click', this.handleClick_.bind(this)); + this.win_.addEventListener('popstate', this.handlePopState_.bind(this)); +}; + +cam.Navigator.shouldHandleClick = function(e) { + // We are conservative and only try to handle left clicks that are unmodified. + // For any other kind of click, assume that something fancy (e.g., context menu, open in new tab, etc) is about to happen and let whatever it happen as normal. + if (e.button != 0 || e.altKey || e.ctrlKey || e.metaKey || e.shiftKey) { + return null; + } + + for (var elm = e.target; ; elm = elm.parentElement) { + if (!elm) { + return null; + } + if (elm.nodeName == 'A' && elm.href) { + return elm; + } + } + + throw new Error('Should never get here'); + return null; +}; + +// Client should set this to handle navigation. +// +// This is called before the navigation has actually taken place: location.href will refer to the old URL, not the new one. Also, history.state will refer to previous state. +// +// If client returns true, then Navigator considers the navigation handled locally, and will add an entry to history using pushState(). If this method returns false, Navigator lets the navigation fall through to the browser. +// @param goog.Uri newURL The URL to navigate to. +// @return boolean Whether the navigation was handled locally. +cam.Navigator.prototype.onWillNavigate = function(newURL) {}; + +// Called after a local (pushState) navigation has been performed. At this point, location.href and history.state have been updated. +cam.Navigator.prototype.onDidNavigate = function() {}; + +// Programmatically initiate a navigation to a URL. Useful for triggering navigations from things other than hyperlinks. +// @param goog.Uri url The URL to navigate to. +// @return boolean Whether the navigation was handled locally. +cam.Navigator.prototype.navigate = function(url) { + if (this.dispatchImpl_(url, true)) { + return true; + } + this.location_.href = url.toString(); + return false; +}; + +// Handles navigations initiated via clicking a hyperlink. +cam.Navigator.prototype.handleClick_ = function(e) { + var elm = cam.Navigator.shouldHandleClick(e); + if (!elm) { + return; + } + + try { + if (this.dispatchImpl_(new goog.Uri(elm.href), true)) { + e.preventDefault(); + } + } catch (ex) { + // Prevent the navigation so that we can see the error. + e.preventDefault(); + throw ex; + } + // Otherwise, the event continues bubbling and navigation should happen as normal via the browser. +}; + +// Handles navigation via popstate. +cam.Navigator.prototype.handlePopState_ = function(e) { + // WebKit and older Chrome versions will fire a spurious initial popstate event after load. + // We can differentiate this event from ones corresponding to frames we generated ourselves with pushState() or replaceState() because our own frames always have a non-empty state. + // See: http://stackoverflow.com/questions/6421769/popstate-on-pages-load-in-chrome + if (!e.state) { + return; + } + if (!this.dispatchImpl_(new goog.Uri(this.location_.href), false)) { + this.location_.reload(); + } +}; + +cam.Navigator.prototype.dispatchImpl_ = function(url, addState) { + if (this.onWillNavigate(url)) { + if (addState) { + // Pass an empty object rather than null or undefined so that we can filter out spurious initial popstate events in handlePopState_. + this.history_.pushState({}, '', url.toString()); + } + this.onDidNavigate(); + return true; + } + return false; +}; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/navigator_test.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/navigator_test.js new file mode 100644 index 00000000..e8f35750 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/navigator_test.js @@ -0,0 +1,146 @@ +/* +Copyright 2014 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.require('goog.Uri'); +goog.require('goog.events.EventTarget'); +var assert = require('assert'); + +goog.require('cam.Navigator'); + +var MockLocation = function() { + goog.base(this); + this.href = ''; + this.reloadCount = 0; +}; +goog.inherits(MockLocation, goog.events.EventTarget); +MockLocation.prototype.reload = function() { + this.reloadCount++; +}; + +var MockHistory = function() { + this.states = [null]; +}; +MockHistory.prototype.pushState = function(a, b, url) { + this.states.push({state:a, url:url}); +}; +MockHistory.prototype.replaceState = function(a, b, url) { + this.states[this.states.length - 1] = {state:a, url:url}; +} + +var Handler = function() { + this.lastURL = null; + this.returnsTrue = false; + this.handle = this.handle.bind(this); +}; +Handler.prototype.handle = function(url) { + this.lastURL = url; + return this.returnsTrue; +}; + +describe('cam.Navigator', function() { + var mockWindow, mockLocation, mockHistory, handler, navigator; + var url = new goog.Uri('http://www.camlistore.org/foobar'); + + beforeEach(function() { + mockWindow = new goog.events.EventTarget(); + mockLocation = new MockLocation(); + mockHistory = new MockHistory(); + handler = new Handler(); + navigator = new cam.Navigator(mockWindow, mockLocation, mockHistory); + navigator.onWillNavigate = handler.handle; + }); + + it ('#constructor - seed initial state', function() { + assert.deepEqual(mockHistory.states, [{state:{}, url:''}]); + }); + + it('#navigate - no handler', function() { + // We should do network navigation. + navigator.onWillNavigate = function(){}; + var handledLocally = navigator.navigate(url); + assert.equal(mockLocation.href, url.toString()); + assert.equal(mockHistory.states.length, 1); + assert.equal(handledLocally, false); + }); + + it('#navigate - handler returns false', function() { + // Both handlers should get called, we should do network navigation. + var handledLocally = navigator.navigate(url); + assert.equal(handler.lastURL, url); + assert.equal(mockLocation.href, url.toString()); + assert.equal(mockHistory.states.length, 1); + assert.equal(handledLocally, false); + }); + + it('#navigate - handler returns true', function() { + // Both handlers should get called, we should do pushState() navigation. + handler.returnsTrue = true; + var handledLocally = navigator.navigate(url); + assert.equal(handler.lastURL, url); + assert.equal(mockLocation.href, ''); + assert.deepEqual(mockHistory.states, [{state:{}, url:''}, {state:{}, url:url.toString()}]); + assert.equal(handledLocally, true); + }); + + it('#handleClick_ - handled', function() { + handler.returnsTrue = true; + var ev = new goog.events.Event('click'); + ev.button = 0; + ev.target = { + nodeName: 'A', + href: url.toString() + }; + mockWindow.dispatchEvent(ev); + assert.equal(mockLocation.href, ''); + assert.deepEqual(mockHistory.states, [{state:{}, url:''}, {state:{}, url:url.toString()}]); + }); + + it('#handleClick_ - not handled', function() { + var ev = new goog.events.Event('click'); + ev.button = 0; + ev.target = { + nodeName: 'A', + href: url.toString() + }; + mockWindow.dispatchEvent(ev); + assert.equal(mockLocation.href, ''); + assert.deepEqual(mockHistory.states, [{state:{}, url:''}]); + assert.equal(ev.defaultPrevented, false); + }); + + it('#handlePopState_ - handled', function() { + handler.returnsTrue = true; + mockWindow.dispatchEvent({type:'popstate', state:{}}); + assert.equal(mockLocation.reloadCount, 0); + assert.deepEqual(mockHistory.states, [{state:{}, url:''}]); + }); + + it('#handlePopState_ - not handled', function() { + mockWindow.dispatchEvent({type:'popstate', state:{}}); + assert.equal(mockLocation.reloadCount, 1); + assert.deepEqual(mockHistory.states, [{state:{}, url:''}]); + }); + + it('#handlePopState_ - ignore initial popstate', function() { + // Fire a popstate with no state property. This simulates what happens in buggy browsers onload. This one should be ignored. + mockWindow.dispatchEvent({type:'popstate', state:null}); + assert.equal(mockLocation.reloadCount, 0); + + // Now fire a popstate with a state property it should be handled. + mockWindow.dispatchEvent({type:'popstate', state:{}}); + assert.equal(mockLocation.reloadCount, 1); + }); +}); diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/new_permanode.svg b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/new_permanode.svg new file mode 100644 index 00000000..c1463f73 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/new_permanode.svg @@ -0,0 +1,107 @@ + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/node.png b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/node.png new file mode 100644 index 00000000..8cb6df02 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/node.png differ diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/object.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/object.js new file mode 100644 index 00000000..029f278c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/object.js @@ -0,0 +1,15 @@ +/** + * Object related utilities beyond what exist in Closure. + */ +goog.provide('cam.object'); + +cam.object.extend = function(o, n) { + var obj = {}; + if (o) { + goog.mixin(obj, o); + } + if (n) { + goog.mixin(obj, n); + } + return obj; +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode.css new file mode 100644 index 00000000..d81663cf --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode.css @@ -0,0 +1,51 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +.cam-permanode-page { + font: 16px/1.4 normal Arial, sans-serif; +} +.cam-permanode-nav:before { + content: "["; +} +.cam-permanode-nav:after { + content: "]"; +} +.cam-permanode-del { + text-decoration: underline; + cursor: pointer; + color: darkred; + margin-left: .4em; + font-size: 80%; +} +.cam-permanode-tag-c { + margin-right: .5em; +} +.cam-permanode-tag { + font-style: italic; +} +.cam-permanode-dnd { + border: 2px dashed black; + min-height: 250px; + padding: 10px; +} +.cam-permanode-dnd-item { + margin: 0.25em; + border: 1px solid #888; + padding: 0.25em; +} +.cam-permanode-dnd-over { + background: #eee; +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode.html b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode.html new file mode 100644 index 00000000..61a0e630 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode.html @@ -0,0 +1,103 @@ + + + + + + Permanode + + + + + + + + + + +

    Permanode

    + +

    + Permalink: + + +

    + +
    +

    + Title: + + +

    +
    + +
    +

    + Tags: + + + +

    + +
    +

    + Access: + + + + ... with URL: + + + +

    +
    + +
    + +
    + +
    +
    + +
    + +
    +
    + + +
    +

    + or drag & drop files here +

    +
    
    +	
    + +

    Current object attributes

    +
    
    +
    +	
    +
    +
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode_detail.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode_detail.css
    new file mode 100644
    index 00000000..f41471af
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode_detail.css
    @@ -0,0 +1,82 @@
    +/*
    +Copyright 2014 The Camlistore Authors
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +@import (less) "prefix-free.css";
    +
    +
    +.cam-permanode-detail {
    +	font-family: 'Open Sans', sans-serif;
    +	margin: 1.5em 2em;
    +}
    +
    +.cam-permanode-detail h1 {
    +	font-size: 1.5em;
    +}
    +
    +.cam-permanode-detail table {
    +	border-collapse: collapse;
    +	border-spacing: 0;
    +	width: 100%;
    +}
    +
    +.cam-permanode-detail th {
    +	border-bottom: 1px solid black;
    +	cursor: pointer;
    +	padding: 0.6em 1em 0.4em;
    +	text-align: left;
    +}
    +
    +.cam-permanode-detail th i {
    +	margin-left: 5px;
    +}
    +
    +.cam-permanode-detail td {
    +	border: 1px solid #aaa;
    +	padding: 0.6em 1em 0.4em;
    +	text-align: left;
    +}
    +
    +.cam-permanode-detail tr>*:nth-child(1) {
    +	width: 50%;
    +}
    +
    +.cam-permanode-detail tr>*:nth-child(2) {
    +	width: 50%;
    +}
    +
    +.cam-permanode-detail tr>*:nth-child(3) {
    +	color: #444;
    +	text-align: center;
    +	width: 0;
    +}
    +
    +.cam-permanode-detail-delete-attribute {
    +	cursor: pointer;
    +}
    +
    +.cam-permanode-detail td input[type=text] {
    +	border: none;
    +	font: inherit;
    +	width: 100%;
    +}
    +
    +.cam-permanode-detail-status {
    +	background: #eee;
    +	bottom: 1em;
    +	left: 1em;
    +	padding: 1em;
    +	position: fixed;
    +}
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode_detail.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode_detail.js
    new file mode 100644
    index 00000000..eb07dea7
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode_detail.js
    @@ -0,0 +1,307 @@
    +/*
    +Copyright 2014 The Camlistore Authors
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +goog.provide('cam.PermanodeDetail');
    +
    +goog.require('goog.array');
    +goog.require('goog.labs.Promise');
    +goog.require('goog.object');
    +
    +goog.require('cam.ServerConnection');
    +
    +cam.PermanodeDetail = React.createClass({
    +	displayName: 'PermanodeDetail',
    +
    +	propTypes: {
    +		meta: React.PropTypes.object.isRequired,
    +		serverConnection: React.PropTypes.instanceOf(cam.ServerConnection).isRequired,
    +		timer: React.PropTypes.shape({
    +			setTimeout: React.PropTypes.func.isRequired,
    +		}).isRequired,
    +	},
    +
    +	getInitialState: function() {
    +		return {
    +			newRow: {},
    +			rows: this.getInitialRows_(),
    +			sortBy: 'name',
    +			sortAsc: true,
    +			status: '',
    +		};
    +	},
    +
    +	render: function() {
    +		return React.DOM.div({className: 'cam-permanode-detail'},
    +			React.DOM.h1(null, 'Current attributes'),
    +			this.getAttributesTable_(),
    +			this.getStatus_()
    +		);
    +	},
    +
    +	getStatus_: function() {
    +		if (this.state.status) {
    +			return React.DOM.div(
    +				{className: 'cam-permanode-detail-status'},
    +				this.state.status
    +			);
    +		} else {
    +			return null;
    +		}
    +	},
    +
    +	getInitialRows_: function() {
    +		var rows = [];
    +		for (var name in this.props.meta.permanode.attr) {
    +			var values = this.props.meta.permanode.attr[name];
    +			for (var i = 0; i < values.length; i++) {
    +				rows.push({
    +					'name': name,
    +					'value': values[i],
    +				});
    +			}
    +		}
    +		return rows;
    +	},
    +
    +	getAttributesTable_: function() {
    +		var headerText = function(name, column) {
    +			var children = [name];
    +			if (this.state.sortBy == column) {
    +				children.push(
    +					React.DOM.i({
    +						key: goog.string.subs('%s-sort-icon', name),
    +						className: React.addons.classSet({
    +							'fa': true,
    +							'fa-caret-up': this.state.sortAsc,
    +							'fa-caret-down': !this.state.sortAsc,
    +						}),
    +					})
    +				);
    +			}
    +			return React.DOM.span(null, children);
    +		}.bind(this);
    +
    +		var header = function(content, onclick) {
    +			return React.DOM.th(
    +				{
    +					className: 'cam-unselectable',
    +					onClick: onclick,
    +				},
    +				content
    +			);
    +		};
    +
    +		return React.DOM.table(null,
    +			React.DOM.tbody(null,
    +				React.DOM.tr(
    +					{key: 'header'},
    +					header(headerText('Name', 'name'), this.handleSort_.bind(null, 'name')),
    +					header(headerText('Value', 'value'), this.handleSort_.bind(null, 'value')),
    +					header('')
    +				),
    +				cam.PermanodeDetail.AttributeRow({
    +					className: 'cam-permanode-detail-new-row',
    +					key: 'new',
    +					onBlur: this.handleBlur_,
    +					onChange: this.handleChange_,
    +					row: this.state.newRow,
    +				}),
    +				this.state.rows.map(function(r, i) {
    +					return cam.PermanodeDetail.AttributeRow({
    +						key: i,
    +						onBlur: this.handleBlur_,
    +						onChange: this.handleChange_,
    +						onDelete: this.handleDelete_.bind(null, r),
    +						row: r,
    +					});
    +				}, this)
    +			)
    +		);
    +	},
    +
    +	handleChange_: function(row, column, e) {
    +		row[column] = e.target.value;
    +		this.forceUpdate();
    +	},
    +
    +	handleDelete_: function(row) {
    +		this.setState({
    +			rows: this.state.rows.filter(function(r) { return r != row; }),
    +		}, function() {
    +			this.commitChanges_();
    +		}.bind(this));
    +	},
    +
    +	handleBlur_: function(row) {
    +		if (row == this.state.newRow) {
    +			if (row.name && row.value) {
    +				this.state.rows.splice(0, 0, row);
    +				this.state.newRow = {};
    +				this.forceUpdate();
    +				this.commitChanges_();
    +			}
    +		} else {
    +			this.commitChanges_();
    +		}
    +	},
    +
    +	handleSort_: function(sortBy) {
    +		var sortAsc = true;
    +		if (this.state.sortBy == sortBy) {
    +			sortAsc = !this.state.sortAsc;
    +		}
    +		this.setState({
    +			rows: this.getSortedRows_(sortBy, sortAsc),
    +			sortAsc: sortAsc,
    +			sortBy: sortBy,
    +		});
    +	},
    +
    +	getSortedRows_: function(sortBy, sortAsc) {
    +		var numericSort = function(a, b) {
    +			return parseFloat(a) - parseFloat(b);
    +		}
    +		var stringSort = function(a, b) {
    +			return a.localeCompare(b);
    +		}
    +
    +		var rows = goog.array.clone(this.state.rows);
    +		var sort = rows.some(function(r) {
    +			return isNaN(parseFloat(r[sortBy]));
    +		}) ? stringSort : numericSort;
    +
    +		rows.sort(function(a, b) {
    +			if (!sortAsc) {
    +				var tmp = a;
    +				a = b;
    +				b = tmp;
    +			}
    +			return sort(a[sortBy], b[sortBy]);
    +		});
    +
    +		return rows;
    +	},
    +
    +	getChanges_: function() {
    +		var key = function(r) {
    +			return r.name + ':' + r.value;
    +		};
    +		var before = goog.array.toObject(this.getInitialRows_(), key);
    +		var after = goog.array.toObject(this.state.rows, key);
    +
    +		var adds = goog.object.filter(after, function(v, k) { return !(k in before); });
    +		var deletes = goog.object.filter(before, function(v, k) { return !(k in after); });
    +
    +		return {
    +			adds: goog.object.getValues(adds),
    +			deletes: goog.object.getValues(deletes),
    +		};
    +	},
    +
    +	commitChanges_: function() {
    +		var changes = this.getChanges_();
    +		if (changes.adds.length == 0 && changes.deletes.length == 0) {
    +			return;
    +		}
    +		this.setState({
    +			status: 'Saving...',
    +		});
    +		var promises = changes.adds.map(function(add) {
    +			return new goog.labs.Promise(this.props.serverConnection.newAddAttributeClaim.bind(this.props.serverConnection, this.props.meta.blobRef, add.name, add.value));
    +		}, this).concat(changes.deletes.map(function(del) {
    +			return new goog.labs.Promise(this.props.serverConnection.newDelAttributeClaim.bind(this.props.serverConnection, this.props.meta.blobRef, del.name, del.value));
    +		}, this));
    +		goog.labs.Promise.all(promises).then(function() {
    +			this.props.timer.setTimeout(function() {
    +				this.setState({
    +					status: '',
    +				});
    +			}.bind(this), 500);
    +		}.bind(this));
    +	}
    +});
    +
    +cam.PermanodeDetail.AttributeRow = React.createClass({
    +	displayName: 'AttributeRow',
    +
    +	propTypes: {
    +		className: React.PropTypes.string,
    +		onBlur: React.PropTypes.func,
    +		onDelete: React.PropTypes.func,
    +		onChange: React.PropTypes.func.isRequired,
    +		row: React.PropTypes.object,
    +	},
    +
    +	render: function() {
    +		var deleteButton = function(onDelete) {
    +			if (onDelete) {
    +				return React.DOM.i({
    +					className: 'fa fa-times-circle-o cam-permanode-detail-delete-attribute',
    +					onClick: onDelete,
    +				});
    +			} else {
    +				return null;
    +			}
    +		};
    +
    +		return React.DOM.tr(
    +			{
    +				className: this.props.className,
    +				onBlur: this.props.onBlur && this.props.onBlur.bind(null, this.props.row),
    +			},
    +			React.DOM.td(null,
    +				React.DOM.input({
    +					onChange: this.props.onChange.bind(null, this.props.row, 'name'),
    +					placeholder: this.props.row.name ? '': 'New attribute name',
    +					type: 'text',
    +					value: this.props.row.name || '',
    +				})
    +			),
    +			React.DOM.td(null,
    +				React.DOM.input({
    +					onChange: this.props.onChange.bind(null, this.props.row, 'value'),
    +					placeholder: this.props.row.value ? '' : 'New attribute value',
    +					type: 'text',
    +					value: this.props.row.value || '',
    +				})
    +			),
    +			React.DOM.td(null, deleteButton(this.props.onDelete))
    +		);
    +	},
    +});
    +
    +cam.PermanodeDetail.getAspect = function(serverConnection, timer, blobref, targetSearchSession) {
    +	if (!targetSearchSession) {
    +		return null;
    +	}
    +
    +	var pm = targetSearchSession.getMeta(blobref);
    +	if (!pm || pm.camliType != 'permanode') {
    +		return null;
    +	}
    +
    +	return {
    +		fragment: 'permanode',
    +		title: 'Permanode',
    +		createContent: function(size) {
    +			return cam.PermanodeDetail({
    +				meta: pm,
    +				serverConnection: serverConnection,
    +				timer: timer,
    +			});
    +		},
    +	};
    +};
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode_utils.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode_utils.js
    new file mode 100644
    index 00000000..1bff20b5
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode_utils.js
    @@ -0,0 +1,35 @@
    +/*
    +Copyright 2014 The Camlistore Authors
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +goog.provide('cam.permanodeUtils');
    +
    +goog.require('goog.array');
    +
    +cam.permanodeUtils.getSingleAttr = function(permanode, name) {
    +	var val = permanode.attr[name];
    +	if (val) {
    +		return goog.isArray(val) ? val[0] : val;
    +	}
    +	return null;
    +};
    +
    +cam.permanodeUtils.isContainer = function(permanode) {
    +	return goog.object.some(permanode.attr, function(v, k) { return k == 'camliMember' || goog.string.startsWith(k, 'camliPath:'); });
    +};
    +
    +cam.permanodeUtils.getCamliNodeType = function(permanode) {
    +	return cam.permanodeUtils.getSingleAttr(permanode, 'camliNodeType');
    +};
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode_utils_test.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode_utils_test.js
    new file mode 100644
    index 00000000..3e5745b4
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/permanode_utils_test.js
    @@ -0,0 +1,52 @@
    +/*
    +Copyright 2014 Google Inc.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +     http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +var assert = require('assert');
    +
    +goog.require('cam.permanodeUtils');
    +
    +
    +describe('cam.permanodeUtils', function() {
    +  describe('#getSingleAttr', function() {
    +    it('should return null if attr unknown or empty string', function() {
    +        var pn = {
    +            attr: {
    +                foo: '',
    +            },
    +        };
    +        assert.strictEqual(null, cam.permanodeUtils.getSingleAttr(pn, 'foo'));
    +        assert.strictEqual(null, cam.permanodeUtils.getSingleAttr(pn, 'bar'));
    +    });
    +
    +    it('should return first array val', function() {
    +        var pn = {
    +            attr: {
    +                foo: ['bar', 'baz'],
    +            },
    +        };
    +        assert.equal('bar', cam.permanodeUtils.getSingleAttr(pn, 'foo'));
    +    });
    +
    +    it('should return string val', function() {
    +        var pn = {
    +            attr: {
    +                foo: 'bar',
    +            },
    +        };
    +        assert.equal('bar', cam.permanodeUtils.getSingleAttr(pn, 'foo'));
    +    });
    +  });
    +});
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/prefix-free.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/prefix-free.css
    new file mode 100644
    index 00000000..81024fc7
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/prefix-free.css
    @@ -0,0 +1,389 @@
    +//---------------------------------------------------
    +//  LESS Prefixer
    +//---------------------------------------------------
    +//
    +//  All of the CSS3 fun, none of the prefixes!
    +//
    +//  As a rule, you can use the CSS properties you
    +//  would expect just by adding a '.':
    +//
    +//  box-shadow => .box-shadow(@args)
    +//
    +//  Also, when shorthand is available, arguments are
    +//  not parameterized. Learn CSS, not LESS Prefixer.
    +//
    +//  -------------------------------------------------
    +//  TABLE OF CONTENTS
    +//  (*) denotes a syntax-sugar helper
    +//  -------------------------------------------------
    +//
    +//      .animation(@args)
    +//          .animation-delay(@delay)
    +//          .animation-direction(@direction)
    +//          .animation-duration(@duration)
    +//          .animation-fill-mode(@mode)
    +//          .animation-iteration-count(@count)
    +//          .animation-name(@name)
    +//          .animation-play-state(@state)
    +//          .animation-timing-function(@function)
    +//      .background-size(@args)
    +//      .border-radius(@args)
    +//      .box-shadow(@args)
    +//          .inner-shadow(@args) *
    +//      .box-sizing(@args)
    +//          .border-box() *
    +//          .content-box() *
    +//      .columns(@args)
    +//          .column-count(@count)
    +//          .column-gap(@gap)
    +//          .column-rule(@args)
    +//          .column-width(@width)
    +//      .gradient(@default,@start,@stop) *
    +//          .linear-gradient-top(@default,@color1,@stop1,@color2,@stop2,[@color3,@stop3,@color4,@stop4])*
    +//          .linear-gradient-left(@default,@color1,@stop1,@color2,@stop2,[@color3,@stop3,@color4,@stop4])*
    +//      .opacity(@factor)
    +//      .transform(@args)
    +//          .transform-origin(@args)
    +//          .transform-style(@style)
    +//          .rotate(@deg)
    +//          .scale(@factor)
    +//          .translate(@x,@y)
    +//          .translate3d(@x,@y,@z)
    +//          .translateHardware(@x,@y) *
    +//      .text-shadow(@args)
    +//      .transition(@args)
    +//          .transition-delay(@delay)
    +//          .transition-duration(@duration)
    +//          .transition-property(@property)
    +//          .transition-timing-function(@function)
    +//
    +//
    +//
    +//  Credit to LESS Elements for the motivation and
    +//  to CSS3Please.com for implementation.
    +//
    +//  Copyright (c) 2012 Joel Sutherland
    +//  MIT Licensed:
    +//  http://www.opensource.org/licenses/mit-license.php
    +//
    +//---------------------------------------------------
    +
    +
    +// Animation
    +
    +.animation(@args) {
    +    -webkit-animation: @args;
    +    -moz-animation: @args;
    +    -ms-animation: @args;
    +    -o-animation: @args;
    +    animation: @args;
    +}
    +.animation-delay(@delay) {
    +    -webkit-animation-delay: @delay;
    +    -moz-animation-delay: @delay;
    +    -ms-animation-delay: @delay;
    +    -o-animation-delay: @delay;
    +    animation-delay: @delay;
    +}
    +.animation-direction(@direction) {
    +    -webkit-animation-direction: @direction;
    +    -moz-animation-direction: @direction;
    +    -ms-animation-direction: @direction;
    +    -o-animation-direction: @direction;
    +}
    +.animation-duration(@duration) {
    +    -webkit-animation-duration: @duration;
    +    -moz-animation-duration: @duration;
    +    -ms-animation-duration: @duration;
    +    -o-animation-duration: @duration;
    +}
    +.animation-fill-mode(@mode) {
    +    -webkit-animation-fill-mode: @mode;
    +    -moz-animation-fill-mode: @mode;
    +    -ms-animation-fill-mode: @mode;
    +    -o-animation-fill-mode: @mode;
    +    animation-fill-mode: @mode;
    +}
    +.animation-iteration-count(@count) {
    +    -webkit-animation-iteration-count: @count;
    +    -moz-animation-iteration-count: @count;
    +    -ms-animation-iteration-count: @count;
    +    -o-animation-iteration-count: @count;
    +    animation-iteration-count: @count;
    +}
    +.animation-name(@name) {
    +    -webkit-animation-name: @name;
    +    -moz-animation-name: @name;
    +    -ms-animation-name: @name;
    +    -o-animation-name: @name;
    +    animation-name: @name;
    +}
    +.animation-play-state(@state) {
    +    -webkit-animation-play-state: @state;
    +    -moz-animation-play-state: @state;
    +    -ms-animation-play-state: @state;
    +    -o-animation-play-state: @state;
    +    animation-play-state: @state;
    +}
    +.animation-timing-function(@function) {
    +    -webkit-animation-timing-function: @function;
    +    -moz-animation-timing-function: @function;
    +    -ms-animation-timing-function: @function;
    +    -o-animation-timing-function: @function;
    +    animation-timing-function: @function;
    +}
    +
    +
    +// Background Size
    +
    +.background-size(@args) {
    +    -webkit-background-size: @args;
    +    background-size: @args;
    +}
    +
    +
    +// Border Radius
    +
    +.border-radius(@args) {
    +	-webkit-border-radius: @args;
    +    border-radius: @args;
    +
    +    background-clip: padding-box;
    +}
    +
    +
    +// Box Shadows
    +
    +.box-shadow(@args) {
    +    -webkit-box-shadow: @args;
    +    box-shadow: @args;
    +}
    +.inner-shadow(@args) {
    +    .box-shadow(inset @args);
    +}
    +
    +
    +// Box Sizing
    +
    +.box-sizing(@args) {
    +    -webkit-box-sizing: @args;
    +    -moz-box-sizing: @args;
    +    box-sizing: @args;
    +}
    +.border-box(){
    +    .box-sizing(border-box);
    +}
    +.content-box(){
    +    .box-sizing(content-box);
    +}
    +
    +
    +// Columns
    +
    +.columns(@args) {
    +    -webkit-columns: @args;
    +    -moz-columns: @args;
    +    columns: @args;
    +}
    +.column-count(@count) {
    +    -webkit-column-count: @count;
    +    -moz-column-count: @count;
    +    column-count: @count;
    +}
    +.column-gap(@gap) {
    +    -webkit-column-gap: @gap;
    +    -moz-column-gap: @gap;
    +    column-gap: @gap;
    +}
    +.column-width(@width) {
    +    -webkit-column-width: @width;
    +    -moz-column-width: @width;
    +    column-width: @width;
    +}
    +.column-rule(@args) {
    +    -webkit-column-rule: @args;
    +    -moz-column-rule: @args;
    +    column-rule: @args;
    +}
    +
    +
    +// Gradients
    +
    +.gradient(@default: #F5F5F5, @start: #EEE, @stop: #FFF) {
    +    .linear-gradient-top(@default,@start,0%,@stop,100%);
    +}
    +.linear-gradient-top(@default,@color1,@stop1,@color2,@stop2) {
    +    background-color: @default;
    +    background-image: -webkit-gradient(linear, left top, left bottom, color-stop(@stop1, @color1), color-stop(@stop2 @color2));
    +    background-image: -webkit-linear-gradient(top, @color1 @stop1, @color2 @stop2);
    +    background-image: -moz-linear-gradient(top, @color1 @stop1, @color2 @stop2);
    +    background-image: -ms-linear-gradient(top, @color1 @stop1, @color2 @stop2);
    +    background-image: -o-linear-gradient(top, @color1 @stop1, @color2 @stop2);
    +    background-image: linear-gradient(top, @color1 @stop1, @color2 @stop2);
    +}
    +.linear-gradient-top(@default,@color1,@stop1,@color2,@stop2,@color3,@stop3) {
    +    background-color: @default;
    +    background-image: -webkit-gradient(linear, left top, left bottom, color-stop(@stop1, @color1), color-stop(@stop2 @color2), color-stop(@stop3 @color3));
    +    background-image: -webkit-linear-gradient(top, @color1 @stop1, @color2 @stop2, @color3 @stop3);
    +    background-image: -moz-linear-gradient(top, @color1 @stop1, @color2 @stop2, @color3 @stop3);
    +    background-image: -ms-linear-gradient(top, @color1 @stop1, @color2 @stop2, @color3 @stop3);
    +    background-image: -o-linear-gradient(top, @color1 @stop1, @color2 @stop2, @color3 @stop3);
    +    background-image: linear-gradient(top, @color1 @stop1, @color2 @stop2, @color3 @stop3);
    +}
    +.linear-gradient-top(@default,@color1,@stop1,@color2,@stop2,@color3,@stop3,@color4,@stop4) {
    +    background-color: @default;
    +    background-image: -webkit-gradient(linear, left top, left bottom, color-stop(@stop1, @color1), color-stop(@stop2 @color2), color-stop(@stop3 @color3), color-stop(@stop4 @color4));
    +    background-image: -webkit-linear-gradient(top, @color1 @stop1, @color2 @stop2, @color3 @stop3, @color4 @stop4);
    +    background-image: -moz-linear-gradient(top, @color1 @stop1, @color2 @stop2, @color3 @stop3, @color4 @stop4);
    +    background-image: -ms-linear-gradient(top, @color1 @stop1, @color2 @stop2, @color3 @stop3, @color4 @stop4);
    +    background-image: -o-linear-gradient(top, @color1 @stop1, @color2 @stop2, @color3 @stop3, @color4 @stop4);
    +    background-image: linear-gradient(top, @color1 @stop1, @color2 @stop2, @color3 @stop3, @color4 @stop4);
    +}
    +.linear-gradient-left(@default,@color1,@stop1,@color2,@stop2) {
    +    background-color: @default;
    +    background-image: -webkit-gradient(linear, left top, left top, color-stop(@stop1, @color1), color-stop(@stop2 @color2));
    +    background-image: -webkit-linear-gradient(left, @color1 @stop1, @color2 @stop2);
    +    background-image: -moz-linear-gradient(left, @color1 @stop1, @color2 @stop2);
    +    background-image: -ms-linear-gradient(left, @color1 @stop1, @color2 @stop2);
    +    background-image: -o-linear-gradient(left, @color1 @stop1, @color2 @stop2);
    +    background-image: linear-gradient(left, @color1 @stop1, @color2 @stop2);
    +}
    +.linear-gradient-left(@default,@color1,@stop1,@color2,@stop2,@color3,@stop3) {
    +    background-color: @default;
    +    background-image: -webkit-gradient(linear, left top, left top, color-stop(@stop1, @color1), color-stop(@stop2 @color2), color-stop(@stop3 @color3));
    +    background-image: -webkit-linear-gradient(left, @color1 @stop1, @color2 @stop2, @color3 @stop3);
    +    background-image: -moz-linear-gradient(left, @color1 @stop1, @color2 @stop2, @color3 @stop3);
    +    background-image: -ms-linear-gradient(left, @color1 @stop1, @color2 @stop2, @color3 @stop3);
    +    background-image: -o-linear-gradient(left, @color1 @stop1, @color2 @stop2, @color3 @stop3);
    +    background-image: linear-gradient(left, @color1 @stop1, @color2 @stop2, @color3 @stop3);
    +}
    +.linear-gradient-left(@default,@color1,@stop1,@color2,@stop2,@color3,@stop3,@color4,@stop4) {
    +    background-color: @default;
    +    background-image: -webkit-gradient(linear, left top, left top, color-stop(@stop1, @color1), color-stop(@stop2 @color2), color-stop(@stop3 @color3), color-stop(@stop4 @color4));
    +    background-image: -webkit-linear-gradient(left, @color1 @stop1, @color2 @stop2, @color3 @stop3, @color4 @stop4);
    +    background-image: -moz-linear-gradient(left, @color1 @stop1, @color2 @stop2, @color3 @stop3, @color4 @stop4);
    +    background-image: -ms-linear-gradient(left, @color1 @stop1, @color2 @stop2, @color3 @stop3, @color4 @stop4);
    +    background-image: -o-linear-gradient(left, @color1 @stop1, @color2 @stop2, @color3 @stop3, @color4 @stop4);
    +    background-image: linear-gradient(left, @color1 @stop1, @color2 @stop2, @color3 @stop3, @color4 @stop4);
    +}
    +
    +
    +// Opacity
    +
    +.opacity(@factor) {
    +    @iefactor: @factor*100;
    +    -ms-filter: "progid:DXImageTransform.Microsoft.Alpha(Opacity=@{iefactor})";
    +	filter: ~"alpha(opacity=(@{iefactor}))";
    +    opacity: @factor;
    +}
    +
    +
    +// Text Shadow
    +
    +.text-shadow(@args) {
    +    text-shadow: @args;
    +}
    +
    +
    +// Transforms
    +
    +.transform(@args) {
    +    -webkit-transform: @args;
    +    -moz-transform: @args;
    +    -ms-transform: @args;
    +    -o-transform: @args;
    +    transform: @args;
    +}
    +.transform-origin(@args) {
    +    -webkit-transform-origin: @args;
    +    -moz-transform-origin: @args;
    +    -ms-transform-origin: @args;
    +    -o-transform-origin: @args;
    +    transform-origin: @args;
    +}
    +.transform-style(@style) {
    +    -webkit-transform-style: @style;
    +    -moz-transform-style: @style;
    +    -ms-transform-style: @style;
    +    -o-transform-style: @style;
    +    transform-style: @style;
    +}
    +.rotate(@deg:45deg){
    +    .transform(rotate(@deg));
    +}
    +.scale(@factor:.5){
    +    .transform(scale(@factor));
    +}
    +.translate(@x,@y){
    +    .transform(translate(@x,@y));
    +}
    +.translate3d(@x,@y,@z) {
    +    .transform(translate3d(@x,@y,@z));
    +}
    +.translateHardware(@x,@y) {
    +    .translate(@x,@y);
    +    -webkit-transform: translate3d(@x,@y,0);
    +    -moz-transform: translate3d(@x,@y,0);
    +    -o-transform: translate3d(@x,@y,0);
    +    -ms-transform: translate3d(@x,@y,0);
    +    transform: translate3d(@x,@y,0);
    +}
    +
    +
    +// Transitions
    +
    +.transition(@args:200ms) {
    +    -webkit-transition: @args;
    +    -moz-transition: @args;
    +    -o-transition: @args;
    +    -ms-transition: @args;
    +    transition: @args;
    +}
    +
    +/* Added by elsigh */
    +.transition-transform(@args) {
    +    -webkit-transition: -webkit-transform @args;
    +    -moz-transition: -moz-transform @args;
    +    -ms-transition: -ms-transform @args;
    +    -o-transition: -o-transform @args;
    +    transition: transform @args;
    +    }
    +
    +.transition-delay(@delay:0) {
    +    -webkit-transition-delay: @delay;
    +    -moz-transition-delay: @delay;
    +    -o-transition-delay: @delay;
    +    -ms-transition-delay: @delay;
    +    transition-delay: @delay;
    +}
    +.transition-duration(@duration:200ms) {
    +    -webkit-transition-duration: @duration;
    +    -moz-transition-duration: @duration;
    +    -o-transition-duration: @duration;
    +    -ms-transition-duration: @duration;
    +    transition-duration: @duration;
    +}
    +.transition-property(@property:all) {
    +    -webkit-transition-property: @property;
    +    -moz-transition-property: @property;
    +    -o-transition-property: @property;
    +    -ms-transition-property: @property;
    +    transition-property: @property;
    +}
    +.transition-timing-function(@function:ease) {
    +    -webkit-transition-timing-function: @function;
    +    -moz-transition-timing-function: @function;
    +    -o-transition-timing-function: @function;
    +    -ms-transition-timing-function: @function;
    +    transition-timing-function: @function;
    +}
    +
    +/* Added by elsigh */
    +.user-select(@args) {
    +    -webkit-user-select: @args;
    +    -moz-user-select: @args;
    +    -khtml-user-select: @args;
    +    -o-user-select: @args;
    +    -ms-user-select: @args;
    +    user-select: @args;
    +}
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/property_sheet.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/property_sheet.css
    new file mode 100644
    index 00000000..19ebaa4a
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/property_sheet.css
    @@ -0,0 +1,54 @@
    +/*
    +Copyright 2014 The Camlistore Authors
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +@import (less) "prefix-free.css";
    +
    +
    +.cam-property-sheet-container,
    +.cam-property-sheet-container td {
    +	font-size: 15px;
    +}
    +
    +.cam-property-sheet-title {
    +	background: #e5e5e5;
    +	color: #999;
    +	padding: 2px 6px;
    +}
    +
    +.cam-property-sheet-content {
    +	padding: 2px 6px;
    +	margin-bottom: 1em;
    +}
    +
    +.cam-property-sheet-content table {
    +	border-collapse: collapse;
    +	display: block;
    +	margin: -2px -6px;
    +}
    +
    +.cam-property-sheet-content td {
    +	border-bottom: 1px dashed #ccc;
    +	padding: 2px 6px;
    +}
    +
    +.cam-property-sheet-content td:first-child {
    +	border-right: 1px dashed #ccc;
    +	color: #666;
    +}
    +
    +.cam-property-sheet-content td:last-child {
    +	width: 100%;
    +}
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/property_sheet.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/property_sheet.js
    new file mode 100644
    index 00000000..41c60c09
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/property_sheet.js
    @@ -0,0 +1,56 @@
    +/*
    +Copyright 2014 The Camlistore Authors
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +goog.provide('cam.PropertySheet');
    +goog.provide('cam.PropertySheetContainer');
    +
    +goog.require('cam.style.ClassNameBuilder');
    +
    +cam.PropertySheet = React.createClass({
    +	displayName: 'PropertySheet',
    +
    +	propTypes: {
    +		className: React.PropTypes.string,
    +		title: React.PropTypes.string.isRequired,
    +	},
    +
    +	render: function() {
    +		return (
    +			React.DOM.div({className: new cam.style.ClassNameBuilder().add('cam-property-sheet').add(this.props.className).build()}, [
    +				React.DOM.div({className: 'cam-property-sheet-title'}, this.props.title),
    +				React.DOM.div({className: 'cam-property-sheet-content'}, this.props.children),
    +			])
    +		);
    +	},
    +});
    +
    +cam.PropertySheetContainer = React.createClass({
    +	displayName: 'PropertySheetContainer',
    +
    +	propTypes: {
    +		className: React.PropTypes.string,
    +		style: React.PropTypes.object,
    +	},
    +
    +	render: function() {
    +		return React.DOM.div({
    +				className: new cam.style.ClassNameBuilder().add('cam-property-sheet-container').add(this.props.className).build(),
    +				style: this.props.style,
    +			},
    +			this.props.children
    +		);
    +	},
    +});
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/pyramid_throbber.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/pyramid_throbber.css
    new file mode 100644
    index 00000000..1647fb1f
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/pyramid_throbber.css
    @@ -0,0 +1,98 @@
    +/*
    +Copyright 2014 The Camlistore Authors
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +@import (less) "prefix-free.css";
    +
    +
    +.cam-pyramid-throbber {
    +	position: relative;
    +}
    +
    +.cam-pyramid-throbber .lefttop {
    +	position: absolute;
    +	width: 0;
    +	height: 0;
    +	border-bottom: 50px solid rgb(220,220,220);
    +	border-left: 35px solid transparent;
    +	.animation(leftcolors 1.0s infinite ease-in-out);
    +}
    +
    +.cam-pyramid-throbber .leftbottom {
    +	position: absolute;
    +	top: 50px;
    +	width: 0;
    +	height: 0;
    +	border-top: 15px solid rgb(220,220,220);
    +	border-left: 35px solid transparent;
    +	.animation(leftcolors 1.0s infinite ease-in-out);
    +}
    +
    +.cam-pyramid-throbber .righttop {
    +	position: absolute;
    +	left: 35px;
    +	width: 0;
    +	height: 0;
    +	border-bottom: 50px solid rgb(180,180,180);
    +	border-right: 35px solid transparent;
    +	.animation(rightcolors 1.0s infinite ease-in-out);
    +}
    +
    +.cam-pyramid-throbber .rightbottom {
    +	position: absolute;
    +	left: 35px;
    +	top: 50px;
    +	width: 0;
    +	height: 0;
    +	border-top: 15px solid rgb(180,180,180);
    +	border-right: 35px solid transparent;
    +	.animation(rightcolors 1.0s infinite ease-in-out);
    +}
    +
    +@-webkit-keyframes leftcolors {
    +	0% { border-bottom-color: rgb(220,220,220); border-top-color: rgb(220,220,220) }
    +	50% { border-bottom-color: rgb(180,180,180); border-top-color: rgb(180,180,180) }
    +	100% { border-bottom-color: rgb(220,220,220); border-top-color: rgb(220,220,220) }
    +}
    +
    +@-webkit-keyframes rightcolors {
    +	0% { border-bottom-color: rgb(180,180,180); border-top-color: rgb(180,180,180) }
    +	50% { border-bottom-color: rgb(220,220,220); border-top-color: rgb(220,220,220) }
    +	100% { border-bottom-color: rgb(180,180,180); border-top-color: rgb(180,180,180) }
    +}
    +
    +@-moz-keyframes leftcolors {
    +	0% { border-bottom-color: rgb(220,220,220); border-top-color: rgb(220,220,220) }
    +	50% { border-bottom-color: rgb(180,180,180); border-top-color: rgb(180,180,180) }
    +	100% { border-bottom-color: rgb(220,220,220); border-top-color: rgb(220,220,220) }
    +}
    +
    +@-moz-keyframes rightcolors {
    +	0% { border-bottom-color: rgb(180,180,180); border-top-color: rgb(180,180,180) }
    +	50% { border-bottom-color: rgb(220,220,220); border-top-color: rgb(220,220,220) }
    +	100% { border-bottom-color: rgb(180,180,180); border-top-color: rgb(180,180,180) }
    +}
    +
    +@keyframes leftcolors {
    +	0% { border-bottom-color: rgb(220,220,220); border-top-color: rgb(220,220,220) }
    +	50% { border-bottom-color: rgb(180,180,180); border-top-color: rgb(180,180,180) }
    +	100% { border-bottom-color: rgb(220,220,220); border-top-color: rgb(220,220,220) }
    +}
    +
    +@keyframes rightcolors {
    +	0% { border-bottom-color: rgb(180,180,180); border-top-color: rgb(180,180,180) }
    +	50% { border-bottom-color: rgb(220,220,220); border-top-color: rgb(220,220,220) }
    +	100% { border-bottom-color: rgb(180,180,180); border-top-color: rgb(180,180,180) }
    +}
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/pyramid_throbber.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/pyramid_throbber.js
    new file mode 100644
    index 00000000..588f74ea
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/pyramid_throbber.js
    @@ -0,0 +1,46 @@
    +/*
    +Copyright 2014 The Camlistore Authors
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +goog.provide('cam.PyramidThrobber');
    +
    +goog.require('goog.math.Coordinate');
    +goog.require('goog.math.Size');
    +
    +cam.PyramidThrobber = React.createClass({
    +	propTypes: {
    +		pos: React.PropTypes.instanceOf(goog.math.Coordinate),
    +	},
    +
    +	render: function() {
    +		return React.DOM.div({style:this.getStyle_(), className:'cam-pyramid-throbber'},
    +			React.DOM.div({className:'lefttop'}),
    +			React.DOM.div({className:'leftbottom'}),
    +			React.DOM.div({className:'righttop'}),
    +			React.DOM.div({className:'rightbottom'})
    +		);
    +	},
    +
    +	getStyle_: function() {
    +		var result = {};
    +		if (goog.isDef(this.props.pos)) {
    +			result.left = this.props.pos.x;
    +			result.top = this.props.pos.y;
    +		}
    +		return result;
    +	}
    +});
    +
    +cam.PyramidThrobber.SIZE = new goog.math.Size(70, 85);
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/react_util.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/react_util.js
    new file mode 100644
    index 00000000..c61f1da4
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/react_util.js
    @@ -0,0 +1,87 @@
    +/*
    +Copyright 2014 The Camlistore Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +goog.provide('cam.reactUtil');
    +
    +goog.require('goog.string');
    +
    +cam.reactUtil.mapOf = function(validator) {
    +	var validator = function(props, propName, componentName) {
    +		if (!props[propName]) {
    +			return;
    +		}
    +
    +		React.PropTypes.isObject(props, propName, componentName);
    +
    +		for (var child in props[propName]) {
    +			var childName = goog.string.subs('%s[%s]', componentName, child);
    +			validator(props[propName], child, childName);
    +		}
    +	};
    +
    +	validator.isRequired = React.PropTypes.object.isRequired;
    +	return validator;
    +};
    +
    +// Returns the appropriate vendor prefixed style property name. This is figured out by testing the presence of various property names on an actual DOM style object.
    +// The returned property is of the form 'fooBar' (if no prefix is needed), or 'WebkitFooBar' if a prefix is needed, which is the form React expects.
    +// @param {string} prop The property name to find.
    +// @param {CSSStyleDeclaration=} style A style object to test on. This can be any DOM style object, e.g., document.body.style.
    +// @return {?string} The appropriate property name to use, or null if the property is not supported in this environment.
    +cam.reactUtil.getVendorProp = function(prop, opt_testStyle) {
    +	if (!goog.isDef(opt_testStyle)) {
    +		opt_testStyle = document.body.style;
    +	}
    +
    +	if (goog.isDef(opt_testStyle[prop])) {
    +		return prop;
    +	}
    +
    +	var prefixes = ['webkit', 'moz', 'ie'];
    +	for (var i = 0, p; p = prefixes[i]; i++) {
    +		var candidate = p + goog.string.toTitleCase(prop);
    +		if (goog.isDef(opt_testStyle[candidate])) {
    +			// React expects vendor prefixed property names to be TitleCase.
    +			return goog.string.toTitleCase(candidate);
    +		}
    +	}
    +
    +	return null;
    +};
    +
    +// Returns a copy of an object with all properties vendor-prefixed as required by the current ua.
    +// @param {object} o The object to fix.
    +// @param {CSSStyleDeclaration=} style A style object to test on. This can be any DOM style object, e.g., document.body.style.
    +// @return {object} A copy of o with all properties vendor-prefixed as appropriate.
    +cam.reactUtil.getVendorProps = function(o, opt_testStyle) {
    +	var n = {};
    +	for (var p in o) {
    +		n[cam.reactUtil.getVendorProp(p, opt_testStyle)] = o[p];
    +	}
    +	return n;
    +};
    +
    +// Like cam.object.extend(), except that special care is taken to also merge together some known child properties that are part of React specifications.
    +// @param Object parentSpec
    +// @param Object childSpec
    +// @return Object merged spec
    +cam.reactUtil.extend = function(parentSpec, childSpec) {
    +	var result = cam.object.extend(parentSpec, childSpec);
    +	if (childSpec.propTypes) {
    +		result.propTypes = cam.object.extend(parentSpec.propTypes, childSpec.propTypes);
    +	}
    +	return result;
    +}
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/safe-no-wheel.svg b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/safe-no-wheel.svg
    new file mode 100755
    index 00000000..2eea3683
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/safe-no-wheel.svg
    @@ -0,0 +1,23 @@
    +
    +
    +
    +
    +
    +
    +	
    +
    +
    \ No newline at end of file
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/safe-wheel.svg b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/safe-wheel.svg
    new file mode 100755
    index 00000000..883aeb2b
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/safe-wheel.svg
    @@ -0,0 +1,28 @@
    +
    +
    +
    +
    +
    +
    +	
    +		
    +		
    +		
    +		
    +	
    +
    +
    \ No newline at end of file
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/safe1-16.png b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/safe1-16.png
    new file mode 100644
    index 00000000..4e958c2d
    Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/safe1-16.png differ
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/safe1-32.png b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/safe1-32.png
    new file mode 100644
    index 00000000..94f6563b
    Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/safe1-32.png differ
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/safe1.svg b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/safe1.svg
    new file mode 100755
    index 00000000..22bdd96e
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/safe1.svg
    @@ -0,0 +1,13 @@
    +
    +
    +
    +
    +	
    +		
    +		
    +		
    +		
    +	
    +	
    +
    +
    \ No newline at end of file
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/search_session.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/search_session.js
    new file mode 100644
    index 00000000..8b311148
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/search_session.js
    @@ -0,0 +1,279 @@
    +/*
    +Copyright 2013 Google Inc.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +goog.provide('cam.SearchSession');
    +
    +goog.require('goog.events.EventTarget');
    +goog.require('goog.Uri');
    +goog.require('goog.Uri.QueryData');
    +goog.require('goog.uri.utils');
    +
    +goog.require('cam.ServerConnection');
    +
    +// A search session is a standing query that notifies you when results change. It caches previous results and handles merging new data as it is received. It does not tell you _what_ changed; clients must reconcile as they see fit.
    +//
    +// TODO(aa): Only deltas should be sent from server to client
    +// TODO(aa): Need some way to avoid the duplicate query when websocket starts. Ideas:
    +// - Initial XHR query can also specify tag. This tag times out if not used rapidly. Send this same tag in socket query.
    +// - Socket assumes that client already has first batch of results (slightly racey though)
    +// - Prefer to use socket on client-side, test whether it works and fall back to XHR if not.
    +cam.SearchSession = function(connection, currentUri, query, opt_aroundBlobref) {
    +	goog.base(this);
    +
    +	this.connection_ = connection;
    +	this.currentUri_ = currentUri;
    +	this.initSocketUri_(currentUri);
    +	this.hasSocketError_ = false;
    +	this.query_ = query;
    +	this.around_ = opt_aroundBlobref;
    +	this.tag_ = 'q' + (this.constructor.instanceCount_++);
    +	this.continuation_ = this.getContinuation_(this.constructor.SEARCH_SESSION_CHANGE_TYPE.NEW);
    +	this.socket_ = null;
    +	this.supportsWebSocket_ = false;
    +	this.isComplete_ = false;
    +
    +	this.resetData_();
    +};
    +goog.inherits(cam.SearchSession, goog.events.EventTarget);
    +
    +// We fire this event when the data changes in any way.
    +cam.SearchSession.SEARCH_SESSION_CHANGED = 'search-session-change';
    +
    +// We fire this event when the search session receives general server status data.
    +cam.SearchSession.SEARCH_SESSION_STATUS = 'search-session-status';
    +
    +// We fire this event when the search session encounters an error.
    +cam.SearchSession.SEARCH_SESSION_ERROR = 'search-session-error';
    +
    +// TODO(aa): This is only used by BlobItemContainer. Once we switch over to BlobItemContainerReact completely, it can be removed.
    +cam.SearchSession.SEARCH_SESSION_CHANGE_TYPE = {
    +	NEW: 1,
    +	APPEND: 2,
    +	UPDATE: 3
    +};
    +
    +cam.SearchSession.PAGE_SIZE_ = 50;
    +
    +cam.SearchSession.instanceCount_ = 0;
    +
    +cam.SearchSession.prototype.getQuery = function() {
    +	return this.query_;
    +};
    +
    +cam.SearchSession.prototype.getAround = function() {
    +	return this.around_;
    +};
    +
    +// Returns all the data we currently have loaded.
    +// It is guaranteed to return the following properties:
    +// blobs // non-null
    +// description
    +// description.meta
    +cam.SearchSession.prototype.getCurrentResults = function() {
    +	return this.data_;
    +};
    +
    +cam.SearchSession.prototype.hasSocketError = function() {
    +	return this.hasSocketError_;
    +};
    +
    +// Loads the next page of data. This is safe to call while a load is in progress; multiple calls for the same page will be collapsed. The SEARCH_SESSION_CHANGED event will be dispatched when the new data is available.
    +cam.SearchSession.prototype.loadMoreResults = function() {
    +	if (!this.continuation_) {
    +		return;
    +	}
    +
    +	var c = this.continuation_;
    +	this.continuation_ = null;
    +	c();
    +};
    +
    +// Returns true if it is known that all data which can be loaded for this query has been.
    +cam.SearchSession.prototype.isComplete = function() {
    +	return this.isComplete_;
    +}
    +
    +cam.SearchSession.prototype.supportsChangeNotifications = function() {
    +	return this.supportsWebSocket_;
    +};
    +
    +cam.SearchSession.prototype.refreshIfNecessary = function() {
    +	if (this.supportsWebSocket_) {
    +		return;
    +	}
    +
    +	this.continuation_ = this.getContinuation_(this.constructor.SEARCH_SESSION_CHANGE_TYPE.UPDATE, null, Math.max(this.data_.blobs.length, this.constructor.PAGE_SIZE_));
    +	this.resetData_();
    +	this.loadMoreResults();
    +};
    +
    +cam.SearchSession.prototype.close = function() {
    +	if (this.socket_) {
    +		this.socket_.onerror = null;
    +		this.socket_.onclose = null;
    +		this.socket_.close();
    +	}
    +};
    +
    +cam.SearchSession.prototype.getMeta = function(blobref) {
    +	return this.data_.description.meta[blobref];
    +};
    +
    +cam.SearchSession.prototype.getResolvedMeta = function(blobref) {
    +	var meta = this.data_.description.meta[blobref];
    +	if (meta && meta.camliType == 'permanode') {
    +		var camliContent = cam.permanodeUtils.getSingleAttr(meta.permanode, 'camliContent');
    +		if (camliContent) {
    +			return this.data_.description.meta[camliContent];
    +		}
    +	}
    +	return meta;
    +};
    +
    +cam.SearchSession.prototype.getTitle = function(blobref) {
    +	var meta = this.getMeta(blobref);
    +	if (meta.camliType == 'permanode') {
    +		var title = cam.permanodeUtils.getSingleAttr(meta.permanode, 'title');
    +		if (title) {
    +			return title;
    +		}
    +	}
    +	var rm = this.getResolvedMeta(blobref);
    +	return (rm && rm.camliType == 'file' && rm.file.fileName) || (rm && rm.camliType == 'directory' && rm.dir.fileName) || '';
    +};
    +
    +cam.SearchSession.prototype.resetData_ = function() {
    +	this.data_ = {
    +		blobs: [],
    +		description: {
    +			meta: {}
    +		}
    +	};
    +};
    +
    +cam.SearchSession.prototype.initSocketUri_ = function(currentUri) {
    +	if (!goog.global.WebSocket) {
    +		return;
    +	}
    +
    +	this.socketUri_ = currentUri;
    +	this.socketUri_.setFragment('');
    +	var config = this.connection_.getConfig();
    +	this.socketUri_.setPath(goog.uri.utils.appendPath(config.searchRoot, 'camli/search/ws'));
    +	this.socketUri_.setQuery(goog.Uri.QueryData.createFromMap({authtoken: config.wsAuthToken || ''}));
    +	if (this.socketUri_.getScheme() == "https") {
    +		this.socketUri_.setScheme("wss");
    +	} else {
    +		this.socketUri_.setScheme("ws");
    +	}
    +};
    +
    +cam.SearchSession.prototype.getContinuation_ = function(changeType, opt_continuationToken, opt_limit) {
    +	return this.connection_.search.bind(this.connection_, this.query_, cam.ServerConnection.DESCRIBE_REQUEST, opt_limit || this.constructor.PAGE_SIZE_, opt_continuationToken,
    +		this.searchDone_.bind(this, changeType));
    +};
    +
    +cam.SearchSession.prototype.searchDone_ = function(changeType, result) {
    +	if (!result) {
    +		result = {};
    +	}
    +	if (!result.blobs) {
    +		result.blobs = [];
    +	}
    +	if (!result.description) {
    +		result.description = {};
    +	}
    +
    +	var changes = false;
    +
    +	if (changeType == this.constructor.SEARCH_SESSION_CHANGE_TYPE.APPEND) {
    +		changes = Boolean(result.blobs.length);
    +		this.data_.blobs = this.data_.blobs.concat(result.blobs);
    +		goog.mixin(this.data_.description.meta, result.description.meta);
    +	} else {
    +		changes = true;
    +		this.data_.blobs = result.blobs;
    +		this.data_.description = result.description;
    +	}
    +
    +	if (result.continue) {
    +		this.continuation_ = this.getContinuation_(this.constructor.SEARCH_SESSION_CHANGE_TYPE.APPEND, result.continue);
    +	} else {
    +		this.continuation_ = null;
    +		this.isComplete_ = true;
    +	}
    +
    +	if (changes) {
    +		this.dispatchEvent({type: this.constructor.SEARCH_SESSION_CHANGED, changeType: changeType});
    +
    +		if (changeType == this.constructor.SEARCH_SESSION_CHANGE_TYPE.NEW || changeType == this.constructor.SEARCH_SESSION_CHANGE_TYPE.APPEND) {
    +			this.startSocketQuery_();
    +		}
    +	}
    +};
    +
    +cam.SearchSession.prototype.handleError_ = function(message) {
    +	this.hasSocketError_ = true;
    +	this.dispatchEvent({type: this.constructor.SEARCH_SESSION_ERROR});
    +};
    +
    +cam.SearchSession.prototype.handleStatus_ = function(data) {
    +	if (data.tag == '_status') {
    +		this.dispatchEvent({
    +			type: this.constructor.SEARCH_SESSION_STATUS,
    +			status: data.status,
    +		});
    +	}
    +};
    +
    +cam.SearchSession.prototype.startSocketQuery_ = function() {
    +	if (!this.socketUri_) {
    +		return;
    +	}
    +
    +	this.close();
    +
    +	var numResults = 0;
    +	if (this.data_ && this.data_.blobs) {
    +		numResults = this.data_.blobs.length;
    +	}
    +	var query = this.connection_.buildQuery(this.query_, cam.ServerConnection.DESCRIBE_REQUEST, Math.max(numResults, this.constructor.PAGE_SIZE_), null, this.around_);
    +
    +	this.socket_ = new WebSocket(this.socketUri_.toString());
    +	this.socket_.onopen = function() {
    +		var message = {
    +			tag: this.tag_,
    +			query: query
    +		};
    +		this.socket_.send(JSON.stringify(message));
    +	}.bind(this);
    +	this.socket_.onclose =
    +	this.socket_.onerror = function(e) {
    +		this.handleError_('WebSocket error - click to reload');
    +	}.bind(this);
    +	this.socket_.onmessage = function(e) {
    +		this.supportsWebSocket_ = true;
    +		this.handleStatus_(JSON.parse(e.data));
    +		// Ignore the first response.
    +		this.socket_.onmessage = function(e) {
    +			var result = JSON.parse(e.data);
    +			this.handleStatus_(result);
    +			if (result.tag == this.tag_) {
    +				this.searchDone_(this.constructor.SEARCH_SESSION_CHANGE_TYPE.UPDATE, result.result);
    +			}
    +		}.bind(this);
    +	}.bind(this);
    +};
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/search_session_test.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/search_session_test.js
    new file mode 100644
    index 00000000..6fecf9ad
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/search_session_test.js
    @@ -0,0 +1,169 @@
    +/*
    +Copyright 2014 The Camlistore Authors
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	 http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +var assert = require('assert');
    +
    +goog.require('cam.SearchSession');
    +
    +
    +function MockServerConnection(response) {
    +	this.response_ = response;
    +}
    +
    +MockServerConnection.prototype.search = function(query, describe, limit, continuationToken, callback) {
    +	setImmediate(callback.bind(null, this.response_));
    +};
    +
    +
    +describe('cam.SearchSession', function() {
    +	var session = null;
    +	var response = {
    +		blobs: [
    +			{'blob': 'a'},
    +			{'blob': 'b'},
    +			{'blob': 'c'},
    +			{'blob': 'd'},
    +			{'blob': 'e'},
    +		],
    +		description: {
    +			meta: {
    +				a: {
    +					blobRef: 'a',
    +					camliType: 'file',
    +					file: {
    +						fileName: 'foo.txt',
    +					},
    +				},
    +				a2: {
    +					blobRef: 'a2',
    +					camliType: 'file',
    +					file: {
    +					},
    +				},
    +				b: {
    +					blobRef: 'b',
    +					camliType: 'permanode',
    +					permanode: {
    +						attr: {
    +							camliContent: ['a'],
    +							title: ['permanode b'],
    +						}
    +					}
    +				},
    +				b2: {
    +					blobRef: 'b2',
    +					camliType: 'permanode',
    +					permanode: {
    +						attr: {
    +							camliContent: ['a'],
    +						}
    +					}
    +				},
    +				c: {
    +					blobRef: 'c',
    +					camliType: 'permanode',
    +					permanode: {
    +						attr: {
    +						},
    +					}
    +				},
    +				d: {
    +					blobRef: 'd',
    +					camliType: 'permanode',
    +					permanode: {
    +						attr: {
    +							camliContent: ['b'],
    +						}
    +					}
    +				},
    +				e: {
    +					blobRef: 'e',
    +					camliType: 'permanode',
    +					permanode: {
    +						attr: {
    +							camliContent: ['_non_existant_'],
    +							title: 'permanode e',
    +						}
    +					}
    +				},
    +			}
    +		}
    +	};
    +
    +	before(function(done) {
    +		var currentUri = null;
    +		var query = null;
    +		session = new cam.SearchSession(new MockServerConnection(response), currentUri, query);
    +		session.addEventListener(cam.SearchSession.SEARCH_SESSION_CHANGED, function() {
    +			assert.equal(response.description.meta.a, session.getResolvedMeta('a'));
    +			done();
    +		});
    +		session.loadMoreResults();
    +	});
    +
    +	describe('#getResolvedMeta', function() {
    +		it('should resolve blobrefs correctly', function() {
    +			// a is not a permanode, so its resolved value is itself.
    +			assert.equal(response.description.meta.a, session.getResolvedMeta('a'));
    +
    +			// b is a permanode that points to a.
    +			assert.equal(response.description.meta.a, session.getResolvedMeta('b'));
    +
    +			// c is a permanode, but has no camliContent, so its resolved value is itself.
    +			assert.equal(response.description.meta.c, session.getResolvedMeta('c'));
    +
    +			// We currently only resolve one level of indirection via permanodes.
    +			assert.equal(response.description.meta.b, session.getResolvedMeta('d'));
    +
    +			// e is a permanode, but its camliContent doesn't exist. This is legitimate and can happen for a variety of reasons (e.g., during sync).
    +			assert.equal(null, session.getResolvedMeta('e'));
    +
    +			// z doesn't exist at all.
    +			assert.equal(null, session.getResolvedMeta('z'));
    +		});
    +	});
    +
    +	describe('#getTitle', function() {
    +		it('should create correct titles', function() {
    +			assert.strictEqual(response.description.meta.a.file.fileName, session.getTitle('a'));
    +			assert.strictEqual('', session.getTitle('a2'));
    +			assert.strictEqual('permanode b', session.getTitle('b'));
    +			assert.strictEqual(response.description.meta.a.file.fileName, session.getTitle('b2'));
    +			assert.strictEqual('permanode e', session.getTitle('e'));
    +		});
    +	});
    +});
    +
    +describe('cam.SearchSession', function() {
    +	var session = null;
    +	var response = {};
    +
    +	before(function() {
    +		var currentUri = null;
    +		var query = null;
    +		session = new cam.SearchSession(new MockServerConnection(response), currentUri, query);
    +	});
    +
    +	describe('new session, no results', function() {
    +		it('should not hit a null', function() {
    +			// resetData_ gives us a safe to use data_ (non null fields).
    +			assert(session.data_.blobs);
    +			assert(session.data_.description);
    +			assert(session.data_.description.meta);
    +		});
    +	});
    +
    +});
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/server_connection.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/server_connection.js
    new file mode 100644
    index 00000000..e8a28d0e
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/server_connection.js
    @@ -0,0 +1,633 @@
    +/*
    +Copyright 2013 Google Inc.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +goog.provide('cam.ServerConnection');
    +
    +goog.require('goog.string');
    +goog.require('goog.net.XhrIo');
    +goog.require('goog.Uri'); // because goog.net.XhrIo forgot to include it.
    +goog.require('goog.debug.ErrorHandler'); // because goog.net.Xhrio forgot to include it.
    +goog.require('goog.uri.utils');
    +
    +goog.require('cam.blob');
    +goog.require('cam.ServerType');
    +goog.require('cam.WorkerMessageRouter');
    +
    +// @fileoverview Connection to the blob server and API for the RPCs it provides. All blob index UI code should use this connection to contact the server.
    +// @param {cam.ServerType.DiscoveryDocument} config Discovery document for the current server.
    +// @param {Function=} opt_sendXhr Function for sending XHRs for testing.
    +// @constructor
    +cam.ServerConnection = function(config, opt_sendXhr) {
    +	this.config_ = config;
    +	this.sendXhr_ = opt_sendXhr || goog.net.XhrIo.send;
    +	this.worker_ = null;
    +};
    +
    +cam.ServerConnection.DESCRIBE_REQUEST = {
    +	// TODO(aa): This is not perfect. The describe request will return some data we don't care about:
    +	// - Properties we don't use
    +	// See: https://camlistore.org/issue/319
    +
    +	depth: 1,
    +	rules: [
    +		{
    +			attrs: ['camliContent', 'camliContentImage']
    +		},
    +		{
    +			ifCamliNodeType: 'foursquare.com:checkin',
    +			attrs: ['foursquareVenuePermanode']
    +		},
    +		{
    +			ifCamliNodeType: 'foursquare.com:venue',
    +			attrs: ['camliPath:photos'],
    +			rules: [
    +				{ attrs: ['camliPath:*'] }
    +			]
    +		}
    +	]
    +};
    +
    +cam.ServerConnection.prototype.getPermanodeWithContent = function(contentRef, success, opt_fail) {
    +	var query = {
    +		permanode: {
    +			attr: "camliContent",
    +			value: contentRef,
    +		},
    +	};
    +	var callback = function(result) {
    +		if (!result || !result.blobs || result.blobs.length == 0) {
    +			success();
    +			return;
    +		}
    +		success(result.blobs[0].blob);
    +	}
    +	this.search(query, null, null, null, callback);
    +};
    +
    +cam.ServerConnection.prototype.getWorker_ = function() {
    +	if (!this.worker_) {
    +		var r = new Date().getTime(); // For cachebusting the worker. Sigh. We need content stamping.
    +		this.worker_ = new cam.WorkerMessageRouter(new Worker('hash_worker.js?r=' + r));
    +	}
    +	return this.worker_;
    +};
    +
    +cam.ServerConnection.prototype.getConfig = function() {
    +	return this.config_;
    +};
    +
    +// @param {string} blobref blobref whose contents we want.
    +// @param {Function} success callback with data.
    +// @param {?Function} opt_fail optional failure calback
    +cam.ServerConnection.prototype.getBlobContents = function(blobref, success, opt_fail) {
    +	var path = goog.uri.utils.appendPath(
    +		this.config_.blobRoot, 'camli/' + blobref
    +	);
    +	this.sendXhr_(path,
    +		goog.bind(this.handleXhrResponseText_, this,
    +			{success: success, fail: opt_fail}
    +		)
    +	);
    +};
    +
    +// @param {goog.events.Event} e Event that triggered this
    +cam.ServerConnection.prototype.handleXhrResponseJson_ = function(callbacks, e) {
    +	var success = callbacks.success
    +	var fail = callbacks.fail
    +	var xhr = e.target;
    +	var error = !xhr.isSuccess();
    +	var result = null;
    +
    +	try {
    +		result = xhr.getResponseJson();
    +	} catch(err) {
    +		result = "Response was not valid JSON: " + xhr.getResponseText();
    +	}
    +
    +	if (error) {
    +		if (fail) {
    +			fail(result.error || result);
    +		} else {
    +			console.log('Failed XHR (JSON) in ServerConnection: ' + result.error || result);
    +		}
    +	} else {
    +		success(result);
    +	}
    +};
    +
    +// @param {Function} success callback with data.
    +// @param {?Function} opt_fail optional failure calback
    +cam.ServerConnection.prototype.discoSignRoot = function(success, opt_fail) {
    +	var path = goog.uri.utils.appendPath(this.config_.jsonSignRoot, '/camli/sig/discovery');
    +	this.sendXhr_(path, goog.bind(this.handleXhrResponseJson_, this, {success: success, fail: opt_fail}));
    +};
    +
    +// @param {function(cam.ServerType.StatusResponse)} success.
    +cam.ServerConnection.prototype.serverStatus = function(success) {
    +	var path = goog.uri.utils.appendPath(this.config_.statusRoot, 'status.json');
    +
    +	this.sendXhr_(path,
    +		goog.bind(this.handleXhrResponseJson_, this, {success: success, fail: function(msg) {
    +			console.log("serverStatus error: " + msg);
    +		}}));
    +};
    +
    +// @param {string} blobref root of the tree
    +// @param {Function} success callback with data.
    +// @param {?Function} opt_fail optional failure calback
    +cam.ServerConnection.prototype.getFileTree = function(blobref, success, opt_fail) {
    +	// TODO(mpl): do it relatively to a discovered root?
    +	var path = "./tree/" + blobref;
    +	this.sendXhr_(path, goog.bind(this.handleXhrResponseJson_, this, {success: success, fail: opt_fail}));
    +};
    +
    +
    +// @param {string} signer permanode must belong to signer.
    +// @param {string} attr searched attribute.
    +// @param {string} value value of the searched attribute.
    +// @param {Function} success.
    +// @param {Function=} opt_fail Optional fail callback.
    +cam.ServerConnection.prototype.permanodeOfSignerAttrValue = function(signer, attr, value, success, opt_fail) {
    +	var path = goog.uri.utils.appendPath(this.config_.searchRoot, 'camli/search/signerattrvalue');
    +	path = goog.uri.utils.appendParams(path,
    +		'signer', signer, 'attr', attr, 'value', value
    +	);
    +
    +	this.sendXhr_(
    +		path,
    +		goog.bind(this.handleXhrResponseJson_, this,
    +			{success: success, fail: opt_fail}
    +		)
    +	);
    +};
    +
    +// @param {string|object} query If string, will be sent as 'expression', otherwise will be sent as 'constraint'.
    +// @param {?object} opt_describe The describe property to send for the query
    +cam.ServerConnection.prototype.buildQuery = function(callerQuery, opt_describe, opt_limit, opt_continuationToken, opt_around) {
    +	var query = {
    +		// TODO(mpl): it'd be better to not ask for a sort when none is needed (less work for server),
    +		// e.g. for a plain BlobRefPrefix query.
    +		sort: "-created"
    +	};
    +
    +	if (goog.isString(callerQuery)) {
    +		query.expression = callerQuery;
    +	} else {
    +		query.constraint = callerQuery;
    +	}
    +
    +	if (opt_describe) {
    +		query.describe = opt_describe;
    +	}
    +	if (opt_limit) {
    +		query.limit = opt_limit;
    +	}
    +	if (opt_around) {
    +		query.around = opt_around;
    +	} else if (opt_continuationToken) {
    +		query.continue = opt_continuationToken;
    +	}
    +
    +	return query;
    +}
    +
    +// @param {string|object} query If string, will be sent as 'expression', otherwise will be sent as 'constraint'.
    +// @param {?object} opt_describe The describe property to send for the query
    +cam.ServerConnection.prototype.search = function(query, opt_describe, opt_limit, opt_continuationToken, callback) {
    +	var path = goog.uri.utils.appendPath(this.config_.searchRoot, 'camli/search/query');
    +	this.sendXhr_(path,
    +		goog.bind(this.handleXhrResponseJson_, this, {success: callback}),
    +		"POST", JSON.stringify(this.buildQuery(query, opt_describe, opt_limit, opt_continuationToken)));
    +};
    +
    +// Where is the target accessed via? (paths it's at)
    +// @param {string} signer owner of permanode.
    +// @param {string} target blobref of permanode we want to find paths to
    +// @param {Function} success.
    +// @param {Function=} opt_fail Optional fail callback.
    +cam.ServerConnection.prototype.pathsOfSignerTarget = function(target, success, opt_fail) {
    +	var path = goog.uri.utils.appendPath(
    +		this.config_.searchRoot, 'camli/search/signerpaths'
    +	);
    +	path = goog.uri.utils.appendParams(path, 'signer', this.config_.signing.publicKeyBlobRef, 'target', target);
    +	this.sendXhr_(path,
    +		goog.bind(this.handleXhrResponseJson_, this, {success: success, fail: opt_fail}));
    +};
    +
    +// @param {string} permanode Permanode blobref.
    +// @param {Function} success.
    +// @param {Function=} opt_fail Optional fail callback.
    +cam.ServerConnection.prototype.permanodeClaims = function(permanode, success, opt_fail) {
    +	var path = goog.uri.utils.appendPath(
    +		this.config_.searchRoot, 'camli/search/claims?permanode=' + permanode
    +	);
    +
    +	this.sendXhr_(
    +		path,
    +		goog.bind(this.handleXhrResponseJson_, this,
    +			{success: success, fail: opt_fail}
    +		)
    +	);
    +};
    +
    +// @param {Object} clearObj Unsigned object.
    +// @param {Function} success Success callback.
    +// @param {?Function} opt_fail Optional fail callback.
    +cam.ServerConnection.prototype.sign_ = function(clearObj, success, opt_fail) {
    +	var sigConf = this.config_.signing;
    +	if (!sigConf || !sigConf.publicKeyBlobRef) {
    +		this.failOrLog_(opt_fail, "Missing Camli.config.signing.publicKeyBlobRef");
    +		return;
    +	}
    +
    +	clearObj.camliSigner = sigConf.publicKeyBlobRef;
    +	var camVersion = clearObj.camliVersion;
    +	if (camVersion) {
    +		 delete clearObj.camliVersion;
    +	}
    +	var clearText = JSON.stringify(clearObj, null, "	");
    +	if (camVersion) {
    +		 clearText = "{\"camliVersion\":" + camVersion + ",\n" + clearText.substr("{\n".length);
    +	}
    +
    +	this.sendXhr_(
    +		sigConf.signHandler,
    +		goog.bind(this.handleXhrResponseText_, this,
    +			{success: success, fail: opt_fail}),
    +		"POST",
    +		"json=" + encodeURIComponent(clearText),
    +		{"Content-Type": "application/x-www-form-urlencoded"}
    +	);
    +};
    +
    +// @param {Object} signed Signed JSON blob (string) to verify.
    +// @param {Function} success Success callback.
    +// @param {?Function} opt_fail Optional fail callback.
    +cam.ServerConnection.prototype.verify_ = function(signed, success, opt_fail) {
    +	var sigConf = this.config_.signing;
    +	if (!sigConf || !sigConf.publicKeyBlobRef) {
    +		if (opt_fail) {
    +			opt_fail("Missing Camli.config.signing.publicKeyBlobRef");
    +		} else {
    +			console.log("Missing Camli.config.signing.publicKeyBlobRef");
    +		}
    +		return;
    +	}
    +	this.sendXhr_(
    +		sigConf.verifyHandler,
    +		goog.bind(this.handleXhrResponseText_, this,
    +			{success: success, fail: opt_fail}),
    +		"POST",
    +		"sjson=" + encodeURIComponent(signed),
    +		{"Content-Type": "application/x-www-form-urlencoded"}
    +	);
    +};
    +
    +// @param {goog.events.Event} e Event that triggered this
    +cam.ServerConnection.prototype.handleXhrResponseText_ = function(callbacks, e) {
    +	var fail = callbacks.fail;
    +	var xhr = e.target;
    +	var error = !xhr.isSuccess();
    +	var result = null;
    +	if (!error) {
    +		result = xhr.getResponseText();
    +		error = !result;
    +	}
    +	if (error) {
    +		if (fail) {
    +			fail(xhr.getLastError());
    +		} else {
    +			// TODO(bslatkin): Add a default failure event handler to this class.
    +			console.log('Failed XHR (text) in ServerConnection: ' + xhr.getLastError());
    +		}
    +		return;
    +	}
    +	callbacks.success(result);
    +};
    +
    +// @param {string} s String to upload.
    +// @param {Function} success Success callback.
    +// @param {?Function} opt_fail Optional fail callback.
    +cam.ServerConnection.prototype.uploadString_ = function(s, success, opt_fail) {
    +	var blobref = cam.blob.refFromString(s);
    +	var parts = [s];
    +	var bb = new Blob(parts);
    +	var fd = new FormData();
    +	fd.append(blobref, bb);
    +
    +	// TODO: hack, hard-coding the upload URL here.
    +	// Change the spec now that App Engine permits 32 MB requests
    +	// and permit a PUT request on the sha1?	Or at least let us
    +	// specify the well-known upload URL?	In cases like this, uploading
    +	// a new permanode, it's silly to even stat.
    +	this.sendXhr_(
    +		this.config_.blobRoot + "camli/upload",
    +		goog.bind(this.handleUploadString_, this,
    +			blobref,
    +			{success: success, fail: opt_fail}
    +		),
    +		"POST",
    +		fd
    +	);
    +};
    +
    +// @param {string} blobref Uploaded blobRef.
    +// @param {goog.events.Event} e Event that triggered this
    +cam.ServerConnection.prototype.handleUploadString_ = function(blobref, callbacks, e) {
    +	this.handleXhrResponseText_({
    +		success: function(resj) {
    +			if (!resj) {
    +				alert("upload failed; no response");
    +				return;
    +			}
    +			var resObj = JSON.parse(resj);
    +			if (!resObj.received || !resObj.received[0] || !resObj.received[0].blobRef) {
    +				alert("upload permanode fail, expected blobRef not in response");
    +				return;
    +			}
    +			if (callbacks.success) {
    +				callbacks.success(blobref);
    +			}
    +		},
    +		fail: callbacks.fail},
    +		e
    +	)
    +};
    +
    +cam.ServerConnection.prototype.failOrLog_ = function(fail, msg) {
    +	if (fail) {
    +		fail(msg);
    +	} else {
    +		console.log(msg);
    +	}
    +};
    +
    +// @param {Function} success Success callback.
    +// @param {?Function} opt_fail Optional fail callback.
    +cam.ServerConnection.prototype.createPermanode = function(success, opt_fail) {
    +	var json = {
    +		"camliVersion": 1,
    +		"camliType": "permanode",
    +		"random": ""+Math.random()
    +	};
    +	this.sign_(json,
    +		goog.bind(function(signed) {
    +			this.uploadString_(signed, success, opt_fail)
    +		}, this),
    +		goog.bind(function(msg) {
    +			this.failOrLog_(opt_fail, "create permanode: signing failed: " + msg);
    +		}, this)
    +	);
    +};
    +
    +// @param {string} permanode Permanode to change.
    +// @param {string} claimType What kind of claim: "add-attribute", "set-attribute"...
    +// @param {string} attribute What attribute the claim applies to.
    +// @param {string} value Attribute value.
    +// @param {Function} success Success callback.
    +// @param {?Function} opt_fail Optional fail callback.
    +cam.ServerConnection.prototype.changeAttribute_ = function(permanode, claimType, attribute, value, success, opt_fail) {
    +	var json = {
    +		"camliVersion": 1,
    +		"camliType": "claim",
    +		"permaNode": permanode,
    +		"claimType": claimType,
    +		// TODO(mpl): to (im)port.
    +		"claimDate": dateToRfc3339String(new Date()),
    +		"attribute": attribute,
    +		"value": value
    +	};
    +	this.sign_(json,
    +		goog.bind(function(signed) {
    +			this.uploadString_(signed, success, opt_fail)
    +		}, this),
    +		goog.bind(function(msg) {
    +			this.failOrLog_(opt_fail, "change attribute: signing failed: " + msg);
    +		}, this)
    +	);
    +};
    +
    +// @param {string} permanode Permanode to delete.
    +// @param {Function} success Success callback.
    +// @param {?Function} opt_fail Optional fail callback.
    +cam.ServerConnection.prototype.newDeleteClaim = function(permanode, success, opt_fail) {
    +	var json = {
    +		"camliVersion": 1,
    +		"camliType": "claim",
    +		"target": permanode,
    +		"claimType": "delete",
    +		"claimDate": dateToRfc3339String(new Date())
    +	};
    +	this.sign_(json,
    +		goog.bind(function(signed) {
    +			this.uploadString_(signed, success, opt_fail)
    +		}, this),
    +		goog.bind(function(msg) {
    +			this.failOrLog_(opt_fail, "delete attribute: signing failed: " + msg);
    +		}, this)
    +	);
    +};
    +
    +// @param {string} permanode Permanode blobref.
    +// @param {string} attribute Name of the attribute to set.
    +// @param {string} value Value to set the attribute to.
    +// @param {function(string)} success Success callback, called with blobref of uploaded file.
    +// @param {?Function} opt_fail Optional fail callback.
    +cam.ServerConnection.prototype.newSetAttributeClaim = function(permanode, attribute, value, success, opt_fail) {
    +	this.changeAttribute_(permanode, "set-attribute", attribute, value,
    +		success, opt_fail
    +	);
    +};
    +
    +
    +// @param {string} permanode Permanode blobref.
    +// @param {string} attribute Name of the attribute to add.
    +// @param {string} value Value of the added attribute.
    +// @param {function(string)} success Success callback, called with blobref of uploaded file.
    +// @param {?Function} opt_fail Optional fail callback.
    +cam.ServerConnection.prototype.newAddAttributeClaim = function(permanode, attribute, value, success, opt_fail) {
    +	this.changeAttribute_(permanode, "add-attribute", attribute, value,
    +		success, opt_fail
    +	);
    +};
    +
    +// @param {string} permanode Permanode blobref.
    +// @param {string} attribute Name of the attribute to delete.
    +// @param {string} value Value of the attribute to delete.
    +// @param {function(string)} success Success callback, called with blobref of uploaded file.
    +// @param {?Function} opt_fail Optional fail callback.
    +cam.ServerConnection.prototype.newDelAttributeClaim = function(permanode, attribute, value, success, opt_fail) {
    +	this.changeAttribute_(permanode, "del-attribute", attribute, value,
    +		success, opt_fail
    +	);
    +};
    +
    +// @param {File} file File to be uploaded.
    +// @param {function(string)} success Success callback, called with blobref of
    +// uploaded file.
    +// @param {?Function} opt_fail Optional fail callback.
    +// @param {?Function} opt_onContentsRef Optional callback to set contents during upload.
    +cam.ServerConnection.prototype.uploadFile = function(file, success, opt_fail, opt_onContentsRef) {
    +	this.getWorker_().sendMessage('ref', file, function(ref) {
    +		if (opt_onContentsRef) {
    +			opt_onContentsRef(ref);
    +		}
    +		this.camliUploadFileHelper_(file, ref, success, opt_fail);
    +	}.bind(this));
    +};
    +
    +// camliUploadFileHelper uploads the provided file with contents blobref contentsBlobRef
    +// and returns a blobref of a file blob.	It does not create any permanodes.
    +// Most callers will use camliUploadFile instead of this helper.
    +//
    +// camliUploadFileHelper only uploads chunks of the file if they don't already exist
    +// on the server. It starts by assuming the file might already exist on the server
    +// and, if so, uses an existing (but re-verified) file schema ref instead.
    +// @param {File} file File to be uploaded.
    +// @param {string} contentsBlobRef Blob ref of file as sha1'd locally.
    +// @param {function(string)} success function(fileBlobRef) of the
    +// server-validated or just-uploaded file schema blob.
    +// @param {?Function} opt_fail Optional fail callback.
    +cam.ServerConnection.prototype.camliUploadFileHelper_ = function(file, contentsBlobRef, success, opt_fail) {
    +	if (!this.config_.uploadHelper) {
    +		this.failOrLog_(opt_fail, "no uploadHelper available");
    +		return;
    +	}
    +
    +	var doUpload = goog.bind(function() {
    +		var fd = new FormData();
    +		fd.append("modtime", dateToRfc3339String(file.lastModifiedDate));
    +		fd.append("ui-upload-file-helper-form", file);
    +		this.sendXhr_(
    +			this.config_.uploadHelper,
    +			goog.bind(this.handleUpload_, this,
    +				file, contentsBlobRef, {success: success, fail: opt_fail}
    +			),
    +			"POST",
    +			fd
    +		);
    +	}, this);
    +
    +	this.findExistingFileSchemas_(
    +		contentsBlobRef,
    +		goog.bind(this.dupCheck_, this,
    +			doUpload, contentsBlobRef, success
    +		),
    +		opt_fail
    +	)
    +}
    +
    +// @param {File} file File to be uploaded.
    +// @param {string} contentsBlobRef Blob ref of file as sha1'd locally.
    +// @param {goog.events.Event} e Event that triggered this
    +cam.ServerConnection.prototype.handleUpload_ = function(file, contentsBlobRef, callbacks, e) {
    +	this.handleXhrResponseText_({
    +		success: goog.bind(function(res) {
    +			var resObj = JSON.parse(res);
    +			if (resObj.got && resObj.got.length == 1 && resObj.got[0].fileref) {
    +				var fileblob = resObj.got[0].fileref;
    +				console.log("uploaded " + contentsBlobRef + " => file blob " + fileblob);
    +				callbacks.success(fileblob);
    +			} else {
    +				this.failOrLog_(callbacks.fail, "failed to upload " + file.name + ": " + contentsBlobRef + ": " + JSON.stringify(res, null, 2));
    +			}
    +		}, this),
    +		fail: callbacks.fail},
    +		e
    +	)
    +};
    +
    +// @param {string} wholeDigestRef file digest.
    +// @param {Function} success callback with data.
    +// @param {?Function} opt_fail optional failure calback
    +cam.ServerConnection.prototype.findExistingFileSchemas_ = function(wholeDigestRef, success, opt_fail) {
    +	var path = goog.uri.utils.appendPath(this.config_.searchRoot, 'camli/search/files');
    +	path = goog.uri.utils.appendParam(path, 'wholedigest', wholeDigestRef);
    +
    +	this.sendXhr_(
    +		path,
    +		goog.bind(this.handleXhrResponseJson_, this,
    +			{success: success, fail: opt_fail}
    +		)
    +	);
    +};
    +
    +// @param {Function} doUpload fun that takes care of uploading.
    +// @param {string} contentsBlobRef Blob ref of file as sha1'd locally.
    +// @param {Function} success Success callback.
    +// @param {Object} res result from the wholedigest search.
    +cam.ServerConnection.prototype.dupCheck_ = function(doUpload, contentsBlobRef, success, res) {
    +	var remain = res.files;
    +	var checkNext = goog.bind(function(files) {
    +		if (files.length == 0) {
    +			doUpload();
    +			return;
    +		}
    +		// TODO: verify filename and other file metadata in the
    +		// file json schema match too, not just the contents
    +		var checkFile = files[0];
    +		console.log("integrity checking the reported dup " + checkFile);
    +
    +		// TODO(mpl): see about passing directly a ref of files maybe instead of a copy?
    +		// just being careful for now.
    +		this.sendXhr_(
    +			this.config_.downloadHelper + checkFile + "/?verifycontents=" + contentsBlobRef,
    +			goog.bind(this.handleVerifycontents_, this,
    +				contentsBlobRef, files.slice(), checkNext, success),
    +			"HEAD"
    +		);
    +	}, this);
    +	checkNext(remain);
    +}
    +
    +// @param {string} contentsBlobRef Blob ref of file as sha1'd locally.
    +// @param {Array.} files files to check.
    +// @param {Function} checkNext fun, recursive call.
    +// @param {Function} success Success callback.
    +// @param {goog.events.Event} e Event that triggered this
    +cam.ServerConnection.prototype.handleVerifycontents_ = function(contentsBlobRef, files, checkNext, success, e) {
    +	var xhr = e.target;
    +	var error = !(xhr.isComplete() && xhr.getStatus() == 200);
    +	var checkFile = files.shift();
    +
    +	if (error) {
    +		console.log("integrity check failed on " + checkFile);
    +		checkNext(files);
    +		return;
    +	}
    +	if (xhr.getResponseHeader("X-Camli-Contents") == contentsBlobRef) {
    +		console.log("integrity check passed on " + checkFile + "; using it.");
    +		success(checkFile);
    +	} else {
    +		checkNext(files);
    +	}
    +};
    +
    +// Format |dateVal| as specified by RFC 3339.
    +function dateToRfc3339String(dateVal) {
    +	// Return a string containing |num| zero-padded to |length| digits.
    +	var pad = function(num, length) {
    +		var numStr = "" + num;
    +		while (numStr.length < length) {
    +			numStr = "0" + numStr;
    +		}
    +		return numStr;
    +	};
    +
    +	return goog.string.subs("%s-%s-%sT%s:%s:%sZ",
    +		dateVal.getUTCFullYear(), pad(dateVal.getUTCMonth() + 1, 2), pad(dateVal.getUTCDate(), 2),
    +		pad(dateVal.getUTCHours(), 2), pad(dateVal.getUTCMinutes(), 2), pad(dateVal.getUTCSeconds(), 2));
    +};
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/server_type.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/server_type.js
    new file mode 100644
    index 00000000..339eb9c9
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/server_type.js
    @@ -0,0 +1,136 @@
    +/**
    + * @fileoverview Helpers and types for JSON objects returned by the server.
    + */
    +goog.provide('cam.ServerType');
    +
    +
    +/**
    + * @typedef {{
    + *   currentPermanode: string,
    + *   name: string,
    + *   prefix: Array.
    + * }}
    + */
    +cam.ServerType.DiscoveryRoot;
    +
    +
    +/**
    + * @typedef {{
    + *   blobRoot: string,
    + *   directoryHelper: string,
    + *   downloadHelper: string,
    + *   helpRoot: string,
    + *   jsonSignRoot: string,
    + *   ownerName: string,
    + *   publishRoots: Array.,
    + *   searchRoot: string,
    + *   statusRoot: string,
    + *   storageGeneration: string,
    + *   storageInitTime: string,
    + *   signing: cam.ServerType.SigningDiscoveryDocument,
    + *   uploadHelper: string
    + * }}
    + */
    +cam.ServerType.DiscoveryDocument;
    +
    +/**
    + * @typedef {{
    + *   publicKey: string,
    + *   publicKeyBlobRef: string,
    + *   publicKeyId: string,
    + *   signHandler: string,
    + *   verifyHandler: string
    + * }}
    + */
    +cam.ServerType.SigningDiscoveryDocument;
    +
    +/**
    + * @typedef {{
    + *   fileName: string,
    + *   mimeType: string,
    + *   size: number
    + * }}
    + */
    +cam.ServerType.IndexerFileMeta;
    +
    +
    +/**
    + * @typedef {{
    + *   title: string,
    + *   camliContent: Array.
    + * }}
    + */
    +cam.ServerType.IndexerPermanodeAttrMeta;
    +
    +
    +/**
    + * @typedef {{
    + *   attr: cam.ServerType.IndexerPermanodeAttrMeta?
    + * }}
    + */
    +cam.ServerType.IndexerPermanodeMeta;
    +
    +
    +/**
    + * @typedef {{
    + *   blobRef: string,
    + *   camliType: string,
    + *   file: cam.ServerType.IndexerFileMeta?,
    + *   mimeType: string,
    + *   permanode: cam.ServerType.IndexerPermanodeMeta?,
    + *   size: number,
    + * }}
    + */
    +cam.ServerType.IndexerMeta;
    +
    +
    +/**
    + * @typedef {Object.}
    + */
    +cam.ServerType.IndexerMetaBag;
    +
    +/**
    + * @typedef {{
    + *   blobref: string,
    + *   modtime: string,
    + *   owner: string
    + * }}
    +*/
    +cam.ServerType.SearchRecentItem;
    +
    +/**
    + * @typedef {{
    + *   recent: Array.,
    + *   meta: cam.ServerType.IndexerMetaBag
    + * }}
    +*/
    +cam.ServerType.SearchRecentResponse;
    +
    +/**
    + * @typedef {{
    + *   permanode: string
    + * }}
    +*/
    +cam.ServerType.SearchWithAttrItem;
    +
    +/**
    + * @typedef {{
    + *   withAttr: Array.,
    + *   meta: cam.ServerType.IndexerMetaBag
    + * }}
    +*/
    +cam.ServerType.SearchWithAttrResponse;
    +
    +/**
    + * @typedef {{
    + *   meta: cam.ServerType.IndexerMetaBag
    + * }}
    +*/
    +cam.ServerType.DescribeResponse;
    +
    +/**
    + * @typedef {{
    + *   version: string,
    + * }}
    +*/
    +cam.ServerType.StatusResponse;
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/sidebar.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/sidebar.css
    new file mode 100644
    index 00000000..157d9ff6
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/sidebar.css
    @@ -0,0 +1,100 @@
    +/*
    +Copyright 2014 The Camlistore Authors.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +@import (less) "prefix-free.css";
    +
    +/* TODO: can the positioning (top: 38px) be pulled from the header.css? */
    +.cam-sidebar {
    +	width: 250px;
    +	height: 100%;
    +	position: fixed;
    +	top: 38px;
    +	right: 0;
    +
    +	background-color: #e6e6e6;
    +	color: #444;
    +
    +	.transform(translate3d(0, 0, 0));
    +	.transition-transform(100ms ease-out);
    +
    +	&.cam-sidebar-hidden {
    +		.transform(translate3d(100%, 0, 0));
    +	}
    +}
    +
    +.cam-sidebar {
    +	padding: 5px 0px;
    +}
    +
    +.cam-sidebar, .cam-sidebar-collapsible-section-header {
    +	> .header {
    +		border-bottom: 1px solid #ccc;
    +		display: inline-block;
    +		width: 100%;
    +		cursor: default;
    +		font-family: 'Open Sans', sans-serif;
    +		font-weight: 100;
    +		font-size: 14px;
    +		line-height: 38px;
    +		padding: 0 28px;
    +		text-align: left;
    +		vertical-align: middle;
    +		white-space: nowrap;
    +	}
    +
    +	> button {
    +		width: 100%;
    +		height: 38px;
    +		cursor: pointer;
    +		background: transparent;
    +		border: none;
    +		font-family: 'Open Sans', sans-serif;
    +		font-weight: 100;
    +		font-size: 14px;
    +		padding: 0 28px;
    +		position: relative;
    +		text-align: left;
    +		white-space: nowrap;
    +
    +		> i {
    +			color: #666;
    +			cursor: pointer;
    +			display: block;
    +			left: 2px;
    +			line-height: 38px;
    +			position: absolute;
    +			text-align: center;
    +			top: 0;
    +			width: 26px;
    +		}
    +
    +		&:focus {
    +			outline: none;
    +		}
    +
    +		&:hover {
    +			background: #d6d6d6;
    +		}
    +
    +		&:active {
    +			outline: none;
    +		}
    +	}
    +}
    +
    +.cam-sidebar-section {
    +	padding: 0 28px;
    +}
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/sidebar.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/sidebar.js
    new file mode 100644
    index 00000000..4093730b
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/sidebar.js
    @@ -0,0 +1,147 @@
    +/*
    +Copyright 2014 The Camlistore Authors
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +goog.provide('cam.Sidebar');
    +
    +goog.require('goog.array');
    +goog.require('goog.object');
    +goog.require('goog.string');
    +
    +goog.require('cam.ServerConnection');
    +
    +cam.Sidebar = React.createClass({
    +	displayName: 'Sidebar',
    +
    +	propTypes: {
    +		isExpanded: React.PropTypes.bool.isRequired,
    +		header: React.PropTypes.renderable,
    +		mainControls: React.PropTypes.arrayOf(
    +			React.PropTypes.shape(
    +				{
    +					displayTitle: React.PropTypes.string.isRequired,
    +					control: React.PropTypes.renderable.isRequired,
    +				}
    +			)
    +		),
    +		selectionControls: React.PropTypes.arrayOf(React.PropTypes.renderable).isRequired,
    +		selectedItems: React.PropTypes.object.isRequired,
    +	},
    +
    +	getInitialState: function() {
    +		return {
    +			openControls: [],	// all controls that are currently 'open'
    +		};
    +	},
    +
    +	render: function() {
    +		return React.DOM.div(
    +			{
    +				className: React.addons.classSet({
    +					'cam-sidebar': true,
    +					'cam-sidebar-hidden': !this.props.isExpanded,
    +				})
    +			},
    +			this.props.header,
    +			this.props.selectionControls,
    +			this.getMainControls_()
    +		);
    +	},
    +
    +	getMainControls_: function() {
    +		return this.props.mainControls.map(
    +			function(c) {
    +				return cam.CollapsibleControl(
    +				{
    +					key: c.displayTitle,
    +					control: c.control,
    +					isOpen: this.isControlOpen_(c.displayTitle),
    +					onToggleOpen: this.handleToggleControlOpen_,
    +					title: c.displayTitle
    +				});
    +			}.bind(this)
    +		);
    +	},
    +
    +	handleToggleControlOpen_: function(displayTitle) {
    +		var currentlyOpen = this.state.openControls;
    +
    +		if(!this.isControlOpen_(displayTitle)) {
    +			currentlyOpen.push(displayTitle);
    +		} else {
    +			goog.array.remove(currentlyOpen, displayTitle);
    +		}
    +
    +		this.setState({openControls : currentlyOpen});
    +	},
    +
    +	isControlOpen_: function(displayTitle) {
    +		return goog.array.contains(this.state.openControls, displayTitle);
    +	}
    +});
    +
    +cam.CollapsibleControl = React.createClass({
    +	displayName: 'CollapsibleControl',
    +
    +	propTypes: {
    +		control: React.PropTypes.renderable.isRequired,
    +		isOpen: React.PropTypes.bool.isRequired,
    +		onToggleOpen: React.PropTypes.func,
    +		title: React.PropTypes.string.isRequired
    +	},
    +
    +	getControl_: function() {
    +		if(!this.props.control || !this.props.isOpen) {
    +			return null;
    +		}
    +
    +		return React.DOM.div(
    +			{
    +				className: 'cam-sidebar-section'
    +			},
    +			this.props.control
    +		);
    +	},
    +
    +	render: function() {
    +		return React.DOM.div(
    +			{
    +				className: 'cam-sidebar-collapsible-section-header'
    +			},
    +			React.DOM.button(
    +				{
    +					onClick: this.handleToggleOpenClick_,
    +				},
    +				React.DOM.i(
    +					{
    +						className: React.addons.classSet({
    +							'fa': true,
    +							'fa-angle-down': this.props.isOpen,
    +							'fa-angle-right': !this.props.isOpen
    +						}),
    +						key: 'toggle-sidebar-section'
    +					}
    +				),
    +				this.props.title
    +			),
    +			this.getControl_()
    +		);
    +	},
    +
    +	handleToggleOpenClick_: function(e) {
    +		e.preventDefault();
    +		this.props.onToggleOpen(this.props.title);
    +	}
    +});
    diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/sigdebug.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/sigdebug.js
    new file mode 100644
    index 00000000..ce47b1ed
    --- /dev/null
    +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/sigdebug.js
    @@ -0,0 +1,128 @@
    +/*
    +Copyright 2011 Google Inc.
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +*/
    +
    +goog.provide('cam.DebugPage');
    +
    +goog.require('goog.dom');
    +goog.require('goog.events.EventType');
    +goog.require('goog.ui.Component');
    +
    +goog.require('cam.ServerConnection');
    +
    +// TODO(mpl): add button on index page (toolbar?) to come here.
    +// @param {cam.ServerType.DiscoveryDocument} config Global config of the current server this page is being rendered for.
    +// @param {goog.dom.DomHelper=} opt_domHelper DOM helper to use.
    +cam.DebugPage = function(config, opt_domHelper) {
    +	goog.base(this, opt_domHelper);
    +
    +	this.config_ = config;
    +	this.sigdisco_ = null;
    +	this.connection_ = new cam.ServerConnection(config);
    +
    +};
    +goog.inherits(cam.DebugPage, goog.ui.Component);
    +
    +cam.DebugPage.prototype.enterDocument = function() {
    +	cam.DebugPage.superClass_.enterDocument.call(this);
    +
    +	// set up listeners
    +	goog.events.listen(goog.dom.getElement('discobtn'),
    +		goog.events.EventType.CLICK,
    +		this.discoRoot_,
    +		false, this);
    +	goog.events.listen(goog.dom.getElement('sigdiscobtn'),
    +		goog.events.EventType.CLICK,
    +		this.discoJsonSignRoot_,
    +		false, this);
    +	goog.events.listen(goog.dom.getElement('addkeyref'),
    +		goog.events.EventType.CLICK,
    +		this.addKeyRef_,
    +		false, this);
    +	goog.events.listen(goog.dom.getElement('sign'),
    +		goog.events.EventType.CLICK,
    +		this.doSign_,
    +		false, this);
    +	goog.events.listen(goog.dom.getElement('verify'),
    +		goog.events.EventType.CLICK,
    +		this.doVerify_,
    +		false, this);
    +};
    +
    +cam.DebugPage.prototype.exitDocument = function() {
    +	cam.DebugPage.superClass_.exitDocument.call(this);
    +};
    +
    +cam.DebugPage.prototype.discoRoot_ = function(e) {
    +	var disco = "
    " + JSON.stringify(this.config_, null, 2) + "
    "; + goog.dom.getElement("discores").innerHTML = disco; +}; + +cam.DebugPage.prototype.discoJsonSignRoot_ = function() { + this.connection_.discoSignRoot( + goog.bind(function(sigdisco) { + this.sigdisco_ = sigdisco; + var disco = "
    " + JSON.stringify(sigdisco, null, 2) + "
    "; + goog.dom.getElement("sigdiscores").innerHTML = disco; + }, this) + ) +}; + +cam.DebugPage.prototype.addKeyRef_ = function() { + if (!this.sigdisco_) { + alert("must do jsonsign discovery first"); + return; + } + var clearta = goog.dom.getElement("clearjson"); + var j; + try { + j = JSON.parse(clearta.value); + } catch (x) { + alert(x); + return + } + j.camliSigner = this.sigdisco_.publicKeyBlobRef; + clearta.value = JSON.stringify(j, null, 2); +} + +cam.DebugPage.prototype.doSign_ = function() { + // We actually do not need sigdisco since sign_ will pull all the needed info from the config_ instead. But I'm leaving the check as the debug check is also a sort of demo. + if (!this.sigdisco_) { + alert("must do jsonsign discovery first"); + return; + } + var clearta = goog.dom.getElement("clearjson"); + var clearObj = JSON.parse(clearta.value); + this.connection_.sign_(clearObj, + function(response) { + goog.dom.getElement("signedjson").value = response; + } + ) +} + +cam.DebugPage.prototype.doVerify_ = function() { + // We actually do not need sigdisco since sign_ will pull all the needed info from the config_ instead. But I'm leaving the check as the debug check is also a sort of demo. + if (!this.sigdisco_) { + alert("must do jsonsign discovery first"); + return; + } + var signedta = goog.dom.getElement("signedjson"); + this.connection_.verify_(signedta.value, + function(response) { + var text = "
    " + response + "
    "; + goog.dom.getElement("verifyinfo").innerHTML = text; + } + ) +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/spinner.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/spinner.css new file mode 100644 index 00000000..3ab94aa4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/spinner.css @@ -0,0 +1,30 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +.cam-spinner { + position: relative; + background-size: 100%; + overflow: hidden; +} + +.cam-spinner>div { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + background-size: 100%; +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/spinner.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/spinner.js new file mode 100644 index 00000000..63a1b4f0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/spinner.js @@ -0,0 +1,90 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.Spinner'); + +goog.require('goog.dom'); +goog.require('goog.events.EventHandler'); +goog.require('goog.style'); +goog.require('goog.math.Coordinate'); +goog.require('goog.math.Size'); +goog.require('goog.ui.Control'); + +goog.require('cam.AnimationLoop'); +goog.require('cam.style'); + +// An indeterminite progress meter using the safe icon. +// @param {goog.dom.DomHelper} domHelper +cam.Spinner = function(domHelper) { + goog.base(this, null, this.dom_); + + this.dom_ = domHelper; + this.eh_ = new goog.events.EventHandler(this); + this.animationLoop_ = new cam.AnimationLoop(this.dom_.getWindow()); + this.currentRotation_ = 0; +}; + +goog.inherits(cam.Spinner, goog.ui.Control); + +cam.Spinner.prototype.backgroundImage = "safe-no-wheel.svg"; + +cam.Spinner.prototype.foregroundImage = "safe-wheel.svg"; + +cam.Spinner.prototype.degreesPerSecond = 500; + +// The origin the safe wheel rotates around, expressed as a fraction of the image's width and height. +cam.Spinner.prototype.wheelRotationOrigin_ = new goog.math.Coordinate(0.37, 0.505); + +cam.Spinner.prototype.createDom = function() { + this.background_ = this.dom_.createDom('div', 'cam-spinner', this.dom_.createDom('div')); + this.foreground_ = this.background_.firstChild; + + cam.style.setURLStyle(this.background_, 'background-image', this.backgroundImage); + cam.style.setURLStyle(this.foreground_, 'background-image', this.foregroundImage); + + // TODO(aa): This will need to be configurable. Not sure how makes sense yet. + var size = new goog.math.Size(75, 75); + goog.style.setSize(this.background_, size); + + // We should be able to set the origin as a percentage directly, but the browsers end up rounding differently, and we get less off-center spinning on the whole if we set this using pixels. + var origin = new goog.math.Coordinate(size.width, size.height); + cam.style.setTransformOrigin( + this.foreground_, + origin.scale(this.wheelRotationOrigin_.x, this.wheelRotationOrigin_.y)); + + this.eh_.listen(this.animationLoop_, cam.AnimationLoop.FRAME_EVENT_TYPE, this.updateRotation_); + + this.decorateInternal(this.background_); +}; + +cam.Spinner.prototype.isRunning = function() { + return this.animationLoop_.isRunning(); +}; + +cam.Spinner.prototype.start = function() { + this.animationLoop_.start(); +}; + +cam.Spinner.prototype.stop = function() { + this.animationLoop_.stop(); +}; + +cam.Spinner.prototype.updateRotation_ = function(e) { + rotation = e.delay / 1000 * this.degreesPerSecond; + this.currentRotation_ += rotation; + this.currentRotation_ %= 360; + cam.style.setRotation(this.foreground_, this.currentRotation_); +}; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/spinner_test.html b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/spinner_test.html new file mode 100644 index 00000000..c744ef10 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/spinner_test.html @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/sprited_animation.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/sprited_animation.js new file mode 100644 index 00000000..f95370d7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/sprited_animation.js @@ -0,0 +1,70 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.SpritedAnimation'); + +goog.require('cam.SpritedImage'); +goog.require('cam.object'); + +cam.SpritedAnimation = React.createClass({ + propTypes: { + className: React.PropTypes.string, + loopDelay: React.PropTypes.number, + interval: React.PropTypes.number, + numFrames: React.PropTypes.number.isRequired, + sheetWidth: React.PropTypes.number.isRequired, + spriteHeight: React.PropTypes.number.isRequired, + spriteWidth: React.PropTypes.number.isRequired, + src: React.PropTypes.string.isRequired, + startFrame: React.PropTypes.number, + style: React.PropTypes.object, + }, + + getInitialState: function() { + return { + index: this.props.startFrame || 0, + } + }, + + componentDidMount: function(root) { + this.scheduleFrame_(); + }, + + scheduleFrame_: function() { + var interval = function() { + if (goog.isDef(this.props.loopDelay) && this.state.index == (this.props.numFrames - 1)) { + return this.props.loopDelay; + } + if (goog.isDef(this.props.interval)) { + return this.props.interval; + } + return 30; + }; + this.timerId_ = window.setTimeout(function() { + this.setState({ + index: ++this.state.index % this.props.numFrames + }, this.scheduleFrame_); + }.bind(this), interval.call(this)); + }, + + componentWillUnmount: function() { + window.clearInterval(this.timerId_); + }, + + render: function() { + return cam.SpritedImage(cam.object.extend(this.props, {index: this.state.index})); + } +}); diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/sprited_image.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/sprited_image.js new file mode 100644 index 00000000..9e7cf703 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/sprited_image.js @@ -0,0 +1,56 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.SpritedImage'); + +goog.require('goog.object'); +goog.require('goog.string'); + +goog.require('cam.object'); +goog.require('cam.reactUtil'); + +cam.SpritedImage = React.createClass({ + propTypes: { + className: React.PropTypes.string, + index: React.PropTypes.number.isRequired, + sheetWidth: React.PropTypes.number.isRequired, + spriteHeight: React.PropTypes.number.isRequired, + spriteWidth: React.PropTypes.number.isRequired, + src: React.PropTypes.string.isRequired, + style: React.PropTypes.object, + }, + + render: function() { + return ( + React.DOM.div({ + className: this.props.className, + style: cam.object.extend(this.props.style, { + height: this.props.spriteHeight, + overflow: 'hidden', + width: this.props.spriteWidth, + }) + }, + React.DOM.img({src: this.props.src, style: this.getImgStyle_()}))); + }, + + getImgStyle_: function() { + var x = this.props.index % this.props.sheetWidth; + var y = Math.floor(this.props.index / this.props.sheetWidth); + return cam.reactUtil.getVendorProps({ + transform: goog.string.subs('translate3d(%spx, %spx, 0)', -x * this.props.spriteWidth, -y * this.props.spriteHeight), + }); + } +}); diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/style.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/style.js new file mode 100644 index 00000000..20064067 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/style.js @@ -0,0 +1,87 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.style'); +goog.provide('cam.style.ClassNameBuilder'); + +goog.require('goog.math.Coordinate'); +goog.require('goog.string'); +goog.require('goog.style'); + +// Returns |url| wrapped in url() so that it can be used as a CSS property value. +// @param {string} url +// @returns {string} +cam.style.getURLValue = function(url) { + return goog.string.subs('url(%s)', url); +}; + +// Sets a style property to a URL value. +// @param {Element} elm +// @param {string} dashedCSSProperty The CSS property to set, formatted with dashes, in the CSS style, not camelCase. +// @param {string} url +cam.style.setURLStyle = function(elm, dashedCSSProperty, url) { + goog.style.setStyle(elm, dashedCSSProperty, cam.style.getURLValue(url)); +}; + +// @param {Element} elm +// @param {goog.math.Coordinate} origin +// @param {string=} opt_unit The CSS units the origin is in. If unspecified, defaults to pixels. +cam.style.setTransformOrigin = function(elm, origin, opt_unit) { + var unit = opt_unit || 'px'; + goog.style.setStyle(elm, 'transform-origin', goog.string.subs('%s%s %s%s', origin.x, unit, origin.y, unit)); +}; + +// Note that this currently clears any previous CSS transform. Currently we only +// needs to support rotate(). +// @param {Element} elm +// @param {number} degrees +cam.style.setRotation = function(elm, degrees) { + goog.style.setStyle(elm, 'transform', goog.string.subs('rotate(%sdeg)', degrees)); +}; + + +// Utility to build a space-separated className property. +cam.style.ClassNameBuilder = function() { + this.names_ = {}; +}; + +// Maybe add the specified class. +// @param {?string} name Class to add. If falsey, not added. +// @param {boolean=} yes Whether to add. If unspecified or falsey, not added. +// @return {cam.style.ClassNameBuilder} +cam.style.ClassNameBuilder.prototype.add = function(name, yes) { + if (!name) { + return this; + } + + if (!goog.isDef(yes)) { + yes = true; + } + + if (yes) { + this.names_[name] = true; + } else { + delete this.names_[name]; + } + + return this; +}; + +// Return the space-separated className. +// @return {string} +cam.style.ClassNameBuilder.prototype.build = function() { + return goog.object.getKeys(this.names_).join(' '); +}; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/tags_control.css b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/tags_control.css new file mode 100644 index 00000000..9f28aca5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/tags_control.css @@ -0,0 +1,114 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +@import (less) "prefix-free.css"; + +@color-button-border: #39463C; +@color-button-partial: #eee; +@color-button-full: #81A18A; +@color-button-hover: #576D5D; + +@control-width: 300px; + +.cam-addtagsinput-form { + margin: 5px 0; + + > input { + width: 100%; + padding: 0; + } + + > div { + margin: 3px; + color: red; + font-size: 12px; + font-style: italic; + } +} + +.cam-edittagscontrol-main { + max-height: 300px; + overflow-y: auto; +} + +.cam-edittagscontrol-button-group { + margin: 3px; + display: inline-block; + + > button { + font-size: 13px; + border: 1px solid @color-button-border; + } + + > button:active:enabled { + position: relative; + top: 1px; + } +} + +.cam-edittagscontrol-button-all-tagged { + background-color: @color-button-full; + color: #fff; + padding: 2px 7px 2px 10px; + margin-right: 0px; + margin-left: 0px; + + border-top-left-radius: 10px; + border-bottom-left-radius: 10px; + border-top-right-radius: 0px; + border-bottom-right-radius: 0px; + + text-shadow: 0px 1px 0px #2f6627; +} + +.cam-edittagscontrol-button-some-tagged { + background-color: @color-button-partial; + padding: 2px 10px; + + margin-right: 0px; + margin-left: 0px; + + cursor: pointer; + + border-top-left-radius: 10px; + border-bottom-left-radius: 10px; + border-top-right-radius: 0px; + border-bottom-right-radius: 0px; + + &:hover:enabled { + background-color: @color-button-full; + color: #ffffff; + } +} + +.cam-edittagscontrol-button-remove-tag { + background-color: @color-button-full; + color: #fff; + margin-left: -1px; + margin-right: 0px; + padding: 2px 10px 2px 7px; + + cursor: pointer; + + border-top-left-radius: 0px; + border-bottom-left-radius: 0px; + border-top-right-radius: 10px; + border-bottom-right-radius: 10px; + + &:hover:enabled { + background-color: @color-button-hover; + } +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/tags_control.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/tags_control.js new file mode 100644 index 00000000..e3321c1a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/tags_control.js @@ -0,0 +1,340 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ENHANCEMENTS: +// should the control have a hot-key to launch? +// should the control be draggable within the window? Is there a better strategy for not hiding permanodes you want to select (@see: http://jsfiddle.net/Af9Jt/2/) +// discuss: create type-ahead list of existing tags for add tag input (this will require mods to the supporting service - likely creation of attribute index?) + +goog.provide('cam.TagsControl'); + +goog.require('goog.array'); +goog.require('goog.labs.Promise'); +goog.require('goog.object'); +goog.require('goog.Uri'); + +goog.require('cam.permanodeUtils'); +goog.require('cam.reactUtil'); +goog.require('cam.ServerConnection'); + +cam.TagsControl = React.createClass({ + displayName: 'TagsControl', + + propTypes: { + selectedItems: React.PropTypes.object.isRequired, + searchSession: React.PropTypes.shape({getMeta:React.PropTypes.func.isRequired}), + serverConnection: React.PropTypes.instanceOf(cam.ServerConnection).isRequired, + }, + + doesBlobHaveTag: function(blobref, tag) { + var blobmeta = this.props.searchSession.getMeta(blobref); + + if (blobmeta && blobmeta.camliType == 'permanode') { + var tags = blobmeta.permanode.attr.tag; + + if (tags) { + return goog.array.contains(tags, tag); + } + } + + return false; + }, + + executePromises: function(componentId, promises, callbackSuccess) { + goog.labs.Promise.all(promises).thenCatch(function(e) { + console.error('%s: error executing promises: %s', componentId, e); + alert('The system encountered an error updating tags: ' + e); + }).then(function(results) { + if (results) { + console.log('%s: successfully completed %d of %d promises', componentId, results.length, promises.length); + + if (callbackSuccess) { + callbackSuccess(); + } + } else { + // TODO: I'm not sure this is ever reached, but keep for now and monitor + console.error('%s: results object is empty', componentId); + } + }).then(function() { + console.log('%s: operation complete', componentId); + }); + }, + + render: function() { + var props = this.props; + var blobrefs = goog.object.getKeys(props.selectedItems); + var blobs = blobrefs.map(function(blobref) { + return props.searchSession.getMeta(blobref); + }); + + return React.DOM.div( + { + className: 'cam-tagscontrol-main' + }, + React.DOM.div( + { + className: 'cam-tagscontrol-header' + } + ), + cam.AddTagsInput( + { + blobrefs: blobrefs, + serverConnection: this.props.serverConnection, + doesBlobHaveTag: this.doesBlobHaveTag, + executePromises: this.executePromises + } + ), + cam.EditTagsControl( + { + blobs: blobs, + blobrefs: blobrefs, + serverConnection: this.props.serverConnection, + doesBlobHaveTag: this.doesBlobHaveTag, + executePromises: this.executePromises + } + ) + ); + } +}); + +cam.AddTagsInput = React.createClass({ + displayName: 'AddTagsInput', + + PLACEHOLDER: 'Add tag(s) [val1,val2,...]', + + propTypes: { + blobrefs: React.PropTypes.array.isRequired, + serverConnection: React.PropTypes.instanceOf(cam.ServerConnection).isRequired, + doesBlobHaveTag: React.PropTypes.func.isRequired, + executePromises: React.PropTypes.func.isRequired + }, + + getInitialState: function() { + return { + inputValue: null, + statusMessage: null + }; + }, + + componentDidMount: function() { + this.getInputNode().focus(); + }, + + getInputNode: function() { + return this.refs['inputField'].getDOMNode(); + }, + + handleOnSubmit_: function(e) { + e.preventDefault(); + + var inputVal = this.getInputNode().value; + if (goog.string.isEmpty(inputVal)) { + this.setState({statusMessage: 'Please provide at least one tag value'}); + } else { + var tags = inputVal.split(',').map(function(s) { return s.trim(); }); + if (tags.some(function(t) { return !t })) { + this.setState({statusMessage: 'At least one invalid value was supplied'}); + } else { + this.executeAddTags_(tags); + } + } + }, + + handleOnChange_: function(e) { + this.setState({statusMessage: null}); + this.setState({inputValue: e.target.value}); + }, + + handleOnFocus_: function(e) { + this.setState({statusMessage: null}); + }, + + handleAddSuccess_: function() { + this.setState({inputValue: ''}); + }, + + executeAddTags_: function(tags) { + var blobrefs = this.props.blobrefs; + var doesBlobHaveTag = this.props.doesBlobHaveTag; + var sc = this.props.serverConnection; + var promises = []; + + blobrefs.forEach(function(pm) { + tags.forEach(function(tag) { + if (!doesBlobHaveTag(pm, tag)) { + promises.push(new goog.labs.Promise(sc.newAddAttributeClaim.bind(sc, pm, 'tag', tag))); + } + }); + }); + + this.props.executePromises('AddTag', promises, this.handleAddSuccess_); + }, + + getStatusMessageItem_: function() { + if (!this.state.statusMessage) { + return null; + } + + return React.DOM.div({}, this.state.statusMessage); + }, + + render: function() { + return React.DOM.form( + { + className: 'cam-addtagsinput-form', + onSubmit: this.handleOnSubmit_, + }, + React.DOM.input( + { + onChange: this.handleOnChange_, + onFocus: this.handleOnFocus_, + placeholder: this.PLACEHOLDER, + ref: 'inputField', + value: this.state.inputValue, + } + ), + this.getStatusMessageItem_() + ); + } +}); + +cam.EditTagsControl = React.createClass({ + displayName: 'EditTagsControl', + + propTypes: { + blobs: React.PropTypes.array.isRequired, + blobrefs: React.PropTypes.array.isRequired, + serverConnection: React.PropTypes.instanceOf(cam.ServerConnection).isRequired, + doesBlobHaveTag: React.PropTypes.func.isRequired, + executePromises: React.PropTypes.func.isRequired + }, + + handleApplyTag_: function(e) { + e.preventDefault(); + var tag = e.target.value; + this.executeApplyTag_(tag); + }, + + handleRemoveTag_: function(e) { + e.preventDefault(); + var tag = e.target.value; + this.executeRemoveTag_(tag); + }, + + executeApplyTag_: function(tag) { + var blobrefs = this.props.blobrefs; + var doesBlobHaveTag = this.props.doesBlobHaveTag; + var sc = this.props.serverConnection; + var promises = []; + + blobrefs.forEach(function(pm) { + if (!doesBlobHaveTag(pm, tag)) { + promises.push(new goog.labs.Promise(sc.newAddAttributeClaim.bind(sc, pm, 'tag', tag))); + } + }); + + this.props.executePromises('ApplyTag', promises); + }, + + executeRemoveTag_: function(tag) { + var blobrefs = this.props.blobrefs; + var doesBlobHaveTag = this.props.doesBlobHaveTag; + var sc = this.props.serverConnection; + var promises = []; + + blobrefs.forEach(function(pm) { + if (doesBlobHaveTag(pm, tag)) { + promises.push(new goog.labs.Promise(sc.newDelAttributeClaim.bind(sc, pm, 'tag', tag))); + } + }); + + this.props.executePromises('DeleteTag', promises); + }, + + getApplyTagButton_: function(numBlobs, tag, allTags) { + var totalHits = allTags[tag]; + if (totalHits == numBlobs) { + return React.DOM.button( + { + key: 'apply-tag-' + tag, + className: 'cam-edittagscontrol-button-all-tagged', + disabled: true + }, + tag + ); + } + + return React.DOM.button( + { + key: 'apply-tag-' + tag, + className: 'cam-edittagscontrol-button-some-tagged', + title: 'Apply tag to all selected items', + onClick: this.handleApplyTag_, + value: tag + }, + tag + ); + }, + + render: function() { + var tagControls = []; + var allTags = {}; + + var numBlobs = this.props.blobs.length; + + this.props.blobs.forEach(function(blobmeta) { + if (blobmeta && blobmeta.camliType == 'permanode') { + var tags = blobmeta.permanode.attr.tag; + if (tags) { + tags.forEach(function(tag) { + if (!allTags.hasOwnProperty(tag)) { + allTags[tag] = 0; + } + ++allTags[tag]; + }); + } + } else { + console.log('EditTagsControl: blob not a permanode!'); + } + }); + + goog.object.getKeys(allTags).sort().forEach(function(tag) { + tagControls.push(React.DOM.div( + { + className: 'cam-edittagscontrol-button-group' + }, + this.getApplyTagButton_(numBlobs, tag, allTags), + React.DOM.button( + { + key:'del-tag-' + tag, + title: 'Remove tag from all selected items', + className: 'cam-edittagscontrol-button-remove-tag', + onClick: this.handleRemoveTag_, + value: tag + }, + 'x' + ) + )); + }.bind(this)); + + return React.DOM.div( + { + className: 'cam-edittagscontrol-main', + }, + tagControls + ); + } +}); diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/target.svg b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/target.svg new file mode 100644 index 00000000..1fe9f0a1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/target.svg @@ -0,0 +1,85 @@ + + + + + + + + + + + + image/svg+xml + + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/thumber.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/thumber.js new file mode 100644 index 00000000..dc683961 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/thumber.js @@ -0,0 +1,65 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.Thumber'); + +goog.require('goog.string'); + +// Utility to efficiently choose thumbnail URLs for use by the UI. +// +// Sizes are bucketized for cache friendliness. Also, the last requested size is remembered, and if the requested size is smaller than the last size, then we continue using the old URL. +cam.Thumber = function(pathname, opt_aspect) { + this.pathname_ = pathname; + this.lastHeight_ = 0; + this.aspect_ = opt_aspect || 1; +}; + +// We originally just used powers of 2, but we need sizes between 200 and 400 all the time in the UI and it seemed wasteful to jump to 512. Having an explicit list will make it easier to tune the buckets more in the future if necessary. +cam.Thumber.SIZES = [64, 128, 256, 375, 500, 750, 1000, 1500, 2000]; + +cam.Thumber.fromImageMeta = function(imageMeta) { + return new cam.Thumber(goog.string.subs('thumbnail/%s/%s', imageMeta.blobRef, (imageMeta.file && imageMeta.file.fileName) || imageMeta.blobRef + '.jpg'), + imageMeta.image.width / imageMeta.image.height); +}; + +// @param {number|goog.math.Size} minSize The minimum size of the required thumbnail. If this is a number, it is the minimum height. If it is goog.math.Size, then it is the min size of both dimensions. +cam.Thumber.prototype.getSrc = function(minSize) { + var minWidth, minHeight; + if (typeof minSize == 'number') { + minHeight = minSize; + minWidth = 0; + } else { + minWidth = minSize.width; + minHeight = minSize.height; + } + + this.lastHeight_ = this.getSizeToRequest_(minWidth, minHeight); + return goog.string.subs('%s?mh=%s&tv=%s', this.pathname_, this.lastHeight_, goog.global.CAMLISTORE_CONFIG ? goog.global.CAMLISTORE_CONFIG.thumbVersion : 1); +}; + +cam.Thumber.prototype.getSizeToRequest_ = function(minWidth, minHeight) { + if (this.lastHeight_ >= minHeight && ((this.lastHeight_ * this.aspect_) >= minWidth)) { + return this.lastHeight_; + } + var newHeight; + for (var i = 0; i < cam.Thumber.SIZES.length; i++) { + newHeight = cam.Thumber.SIZES[i]; + if (newHeight >= minHeight && ((newHeight * this.aspect_) >= minWidth)) { + break; + } + } + return newHeight; +}; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/thumber_test.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/thumber_test.js new file mode 100644 index 00000000..099df549 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/thumber_test.js @@ -0,0 +1,62 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +var assert = require('assert'); + +goog.require('goog.math.Size'); +goog.require('goog.Uri'); + +goog.require('cam.Thumber'); + + +describe('cam.Thumber', function() { + describe('#getSrc', function() { + it('it should bucketize properly', function() { + var thumber = new cam.Thumber('foo.png'); + assert.equal(128, goog.Uri.parse(thumber.getSrc(100)).getParameterValue('mh')); + assert.equal(128, goog.Uri.parse(thumber.getSrc(128)).getParameterValue('mh')); + assert.equal(256, goog.Uri.parse(thumber.getSrc(129)).getParameterValue('mh')); + assert.equal(256, goog.Uri.parse(thumber.getSrc(256)).getParameterValue('mh')); + }); + + it('should max out at a sane size', function() { + var thumber = new cam.Thumber('foo.png'); + var maxSize = cam.Thumber.SIZES[cam.Thumber.SIZES.length - 1]; + assert.equal(maxSize, goog.Uri.parse(thumber.getSrc(1999)).getParameterValue('mh')); + assert.equal(maxSize, goog.Uri.parse(thumber.getSrc(2000)).getParameterValue('mh')); + assert.equal(maxSize, goog.Uri.parse(thumber.getSrc(2001)).getParameterValue('mh')); + }); + + it('should only increase in size, never decrease', function() { + var thumber = new cam.Thumber('foo.png'); + assert.equal(64, goog.Uri.parse(thumber.getSrc(50)).getParameterValue('mh')); + assert.equal(64, goog.Uri.parse(thumber.getSrc(64)).getParameterValue('mh')); + assert.equal(128, goog.Uri.parse(thumber.getSrc(65)).getParameterValue('mh')); + assert.equal(128, goog.Uri.parse(thumber.getSrc(50)).getParameterValue('mh')); + assert.equal(256, goog.Uri.parse(thumber.getSrc(129)).getParameterValue('mh')); + }); + + it('should handle Size objects properly', function() { + var thumber = new cam.Thumber('foo.png', 2); + assert.equal(128, goog.Uri.parse(thumber.getSrc(new goog.math.Size(100, 100))).getParameterValue('mh')); + thumber = new cam.Thumber('foo.png', 0.5); + assert.equal(256, goog.Uri.parse(thumber.getSrc(new goog.math.Size(100, 100))).getParameterValue('mh')); + + assert.equal(256, goog.Uri.parse(thumber.getSrc(new goog.math.Size(128, 100))).getParameterValue('mh')); + assert.equal(375, goog.Uri.parse(thumber.getSrc(new goog.math.Size(129, 100))).getParameterValue('mh')); + }); + }); +}); diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/trash.svg b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/trash.svg new file mode 100644 index 00000000..b7d534c3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/trash.svg @@ -0,0 +1,147 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/twitter-logo.png b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/twitter-logo.png new file mode 100644 index 00000000..1b1562d4 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/twitter-logo.png differ diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/ui_test.go b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/ui_test.go new file mode 100644 index 00000000..2c721d5c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/ui_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2014 The Camlistore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ui + +import ( + "testing" + + "camlistore.org/pkg/misc/closure/jstest" +) + +func TestJS(t *testing.T) { + jstest.TestCwd(t) +} diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/up.svg b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/up.svg new file mode 100644 index 00000000..6d8a0ff4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/up.svg @@ -0,0 +1,82 @@ + + + + + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/worker_message_router.js b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/worker_message_router.js new file mode 100644 index 00000000..f5e06382 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/worker_message_router.js @@ -0,0 +1,101 @@ +/* +Copyright 2014 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +goog.provide('cam.WorkerMessageRouter'); + +goog.require('goog.string'); + +// Convenience for sending request/response style messages to and from workers. +// @param {!Worker} worker The DOM worker to wrap. +// @constructor +cam.WorkerMessageRouter = function(worker) { + this.worker_ = worker; + this.nextMessageId_ = 1; + + // name->handler - See registerHandler() + // @type Object. + this.handlers_ = {}; + + // messageid->callback - See sendMessage() + // @type Object. + this.pendingMessages_ = {}; + + this.worker_.addEventListener('message', this.handleMessage_.bind(this)); +}; + +// Send a message over the worker, optionally expecting a response. +// @param {!string} name The name of the message to send. +// @param {!*} msg The message content +// @param {?function(*)} opt_callback The function to receive the response. +cam.WorkerMessageRouter.prototype.sendMessage = function(name, msg, opt_callback) { + var messageId = 0; + if (opt_callback) { + messageId = this.nextMessageId_++; + this.pendingMessages_[messageId] = opt_callback; + } + this.worker_.postMessage({ + messageId: messageId, + name: name, + message: msg + }); +}; + +// Registers a function to handle a particular named message type. +// @param {!string} name The name of the message type to handle. +// @param {!function(*, function(*))} handler The function to call to return the reply to the client. +cam.WorkerMessageRouter.prototype.registerHandler = function(name, handler) { + this.handlers_[name] = handler; +}; + +cam.WorkerMessageRouter.prototype.handleMessage_ = function(e) { + if (!goog.isObject(e.data) || !goog.isDef(e.data.messageId)) { + return; + } + + if (goog.isDef(e.data.name)) { + this.handleRequest_(e.data); + } else { + this.handleReply_(e.data); + } +}; + +cam.WorkerMessageRouter.prototype.handleRequest_ = function(request) { + var handler = this.handlers_[request.name]; + if (!handler) { + throw new Error(goog.string.subs('No registered handler with name: %s', request.name)); + } + + var sendReply = function(reply) { + if (!request.messageId) { + return; + } + this.worker_.postMessage({ + messageId: request.messageId, + message: reply + }); + }.bind(this); + + handler(request.message, sendReply); +}; + +cam.WorkerMessageRouter.prototype.handleReply_ = function(reply) { + var callback = this.pendingMessages_[reply.messageId]; + if (!callback) { + throw new Error('Could not find callback for pending message: %s', reply.messageId); + } + delete this.pendingMessages_[reply.messageId]; + callback(reply.message); +}; diff --git a/vendor/github.com/camlistore/camlistore/server/camlistored/ui/wsdebug.html b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/wsdebug.html new file mode 100644 index 00000000..8cf98359 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/camlistored/ui/wsdebug.html @@ -0,0 +1,74 @@ + + + + + + + + + +

    websocket debug

    + +
    +
    + + + diff --git a/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/README b/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/README new file mode 100644 index 00000000..4717393c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/README @@ -0,0 +1,5 @@ +This was an early App Engine blobserver-only implementation in Python. It had no +index, no search, no UI, no crypto, etc. + +The blob server is this directory is no longer actively developed, superceded +by the main Go implementation, which can now run on App Engine. diff --git a/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/app.yaml b/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/app.yaml new file mode 100644 index 00000000..570f6dd1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/app.yaml @@ -0,0 +1,27 @@ +application: camlistore +version: 1 +api_version: 1 +runtime: python + +handlers: +- url: /remote_api + script: $PYTHON_LIB/google/appengine/ext/remote_api/handler.py + login: admin + +# Upload completion URL must not be accessible by any users. Only by +# going through Blobstore API upload URL. +- url: /upload_complete + login: admin + script: main.py + +- url: /js + static_dir: ../../clients/js + +- url: /static + static_dir: static + +# off for now: +# secure: always + +- url: .* + script: main.py diff --git a/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/config.py b/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/config.py new file mode 100644 index 00000000..7a5a5b6c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/config.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python + +# TODO(bslatkin): Do something with this password. +# Used for Basic Auth over HTTPS. +PASSWORD = 'foo' + +MAX_UPLOAD_SIZE = 2 * 1024 * 1024 diff --git a/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/index.yaml b/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/index.yaml new file mode 100644 index 00000000..de27a470 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/index.yaml @@ -0,0 +1,9 @@ +# AUTOGENERATED + +# This index.yaml is automatically updated whenever the dev_appserver +# detects that a new type of query is run. If you want to manage the +# index.yaml file manually, remove the above marker line (the line +# saying "# AUTOGENERATED"). If you want to manage some indexes +# manually, move them above the marker line. The index.yaml file is +# automatically uploaded to the admin console when you next deploy +# your application using appcfg.py. diff --git a/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/main.py b/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/main.py new file mode 100644 index 00000000..788e1e29 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/main.py @@ -0,0 +1,346 @@ +#!/usr/bin/env python +# +# Camlistore blob server for App Engine. +# +# Derived from Brad's Brackup-gae utility: +# http://github.com/bradfitz/brackup-gae-server +# +# Copyright 2010 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Upload server for camlistore. + +To test: + +# Stat -- 200 response +curl -v \ + -d camliversion=1 \ + http://localhost:8080/camli/stat + +# Upload -- 200 response +curl -v -L \ + -F sha1-126249fd8c18cbb5312a5705746a2af87fba9538=@./test_data.txt \ + + +# Put with bad blob_ref parameter -- 400 response +curl -v -L \ + -F sha1-22a7fdd575f4c3e7caa3a55cc83db8b8a6714f0f=@./test_data.txt \ + + +# Get present -- the blob +curl -v http://localhost:8080/camli/\ +sha1-126249fd8c18cbb5312a5705746a2af87fba9538 + +# Get missing -- 404 +curl -v http://localhost:8080/camli/\ +sha1-22a7fdd575f4c3e7caa3a55cc83db8b8a6714f0f + +# Check present -- 200 with only headers +curl -I http://localhost:8080/camli/\ +sha1-126249fd8c18cbb5312a5705746a2af87fba9538 + +# Check missing -- 404 with empty list response +curl -I http://localhost:8080/camli/\ +sha1-22a7fdd575f4c3e7caa3a55cc83db8b8a6714f0f + +# List -- 200 with list of blobs (just one) +curl -v http://localhost:8080/camli/enumerate-blobs&limit=1 + +# List offset -- 200 with list of no blobs +curl -v http://localhost:8080/camli/enumerate-blobs?after=\ +sha1-126249fd8c18cbb5312a5705746a2af87fba9538 + +""" + +import cgi +import hashlib +import logging +import urllib +import wsgiref.handlers + +from google.appengine.ext import blobstore +from google.appengine.ext import db +from google.appengine.ext import webapp +from google.appengine.ext.webapp import blobstore_handlers + +import config + + +class Blob(db.Model): + """Some content-addressable blob. + + The key is the algorithm, dash, and the lowercase hex digest: + "sha1-f1d2d2f924e986ac86fdf7b36c94bcdf32beec15" + """ + + # The actual bytes. + blob = blobstore.BlobReferenceProperty(indexed=False) + + # Size. (already in the blobinfo, but denormalized for speed) + size = db.IntegerProperty(indexed=False) + + +class HelloHandler(webapp.RequestHandler): + """Present ourselves to the world.""" + + def get(self): + self.response.out.write('Hello! This is an AppEngine Camlistore ' + 'blob server.

    ') + self.response.out.write('js frontend') + + +class ListHandler(webapp.RequestHandler): + """Return chunks that the server has.""" + + def get(self): + after_blob_ref = self.request.get('after') + limit = max(1, min(1000, int(self.request.get('limit') or 1000))) + query = Blob.all().order('__key__') + if after_blob_ref: + query.filter('__key__ >', db.Key.from_path(Blob.kind(), after_blob_ref)) + blob_ref_list = query.fetch(limit) + + self.response.headers['Content-Type'] = 'text/javascript' + out = [ + '{\n' + ' "blobs": [' + ] + if blob_ref_list: + out.extend([ + '\n ', + ',\n '.join( + '{"blobRef": "%s", "size": %d}' % + (b.key().name(), b.size) for b in blob_ref_list), + '\n ', + ]) + if blob_ref_list and len(blob_ref_list) == limit: + out.append( + '],' + '\n "continueAfter": "%s"\n' + '}' % blob_ref_list[-1].key().name()) + else: + out.append( + ']\n' + '}' + ) + self.response.out.write(''.join(out)) + + +class GetHandler(blobstore_handlers.BlobstoreDownloadHandler): + """Gets a blob with the given ref.""" + + def head(self, blob_ref): + self.get(blob_ref) + + def get(self, blob_ref): + blob = Blob.get_by_key_name(blob_ref) + if not blob: + self.error(404) + return + self.send_blob(blob.blob, 'application/octet-stream') + + +class StatHandler(webapp.RequestHandler): + """Handler to return a URL for a script to get an upload URL.""" + + def stat_key(self): + return "stat" + + def get(self): + self.handle() + + def post(self): + self.handle() + + def handle(self): + if self.request.get('camliversion') != '1': + self.response.headers['Content-Type'] = 'text/plain' + self.response.out.write('Bad parameter: "camliversion"') + self.response.set_status(400) + return + + blob_ref_list = [] + for key, value in self.request.params.items(): + if not key.startswith('blob'): + continue + try: + int(key[len('blob'):]) + except ValueError: + logging.exception('Bad parameter: %s', key) + self.response.headers['Content-Type'] = 'text/plain' + self.response.out.write('Bad parameter: "%s"' % key) + self.response.set_status(400) + return + else: + blob_ref_list.append(value) + + key_name = self.stat_key() + + self.response.headers['Content-Type'] = 'text/javascript' + out = [ + '{\n' + ' "maxUploadSize": %d,\n' + ' "uploadUrl": "%s",\n' + ' "uploadUrlExpirationSeconds": 600,\n' + ' "%s": [\n' + % (config.MAX_UPLOAD_SIZE, + blobstore.create_upload_url('/upload_complete'), + key_name) + ] + + already_have = db.get([ + db.Key.from_path(Blob.kind(), b) for b in blob_ref_list]) + if already_have: + out.extend([ + '\n ', + ',\n '.join( + '{"blobRef": "%s", "size": %d}' % + (b.key().name(), b.size) for b in already_have if b is not None), + '\n ', + ]) + out.append( + ']\n' + '}' + ) + self.response.out.write(''.join(out)) + + +class PostUploadHandler(StatHandler): + + def stat_key(self): + return "received" + + +class UploadHandler(blobstore_handlers.BlobstoreUploadHandler): + """Handle blobstore post, as forwarded by notification agent.""" + + def compute_blob_ref(self, hash_func, blob_key): + """Computes the blob ref for a blob stored using the given hash function. + + Args: + hash_func: The name of the hash function (sha1, md5) + blob_key: The BlobKey of the App Engine blob containing the blob's data. + + Returns: + A newly computed blob_ref for the data. + """ + hasher = hashlib.new(hash_func) + last_index = 0 + while True: + data = blobstore.fetch_data( + blob_key, last_index, last_index + blobstore.MAX_BLOB_FETCH_SIZE - 1) + if not data: + break + hasher.update(data) + last_index += len(data) + + return '%s-%s' % (hash_func, hasher.hexdigest()) + + def store_blob(self, blob_ref, blob_info, error_messages): + """Store blob information. + + Writes a Blob to the datastore for the uploaded file. + + Args: + blob_ref: The file that was uploaded. + upload_file: List of BlobInfo records representing the uploads. + error_messages: Empty list for storing error messages to report to user. + """ + if not blob_ref.startswith('sha1-'): + error_messages.append('Only sha1 supported for now.') + return + + if len(blob_ref) != (len('sha1-') + 40): + error_messages.append('Bogus blobRef.') + return + + found_blob_ref = self.compute_blob_ref('sha1', blob_info.key()) + if blob_ref != found_blob_ref: + error_messages.append('Found blob ref %s, expected %s' % + (found_blob_ref, blob_ref)) + return + + def txn(): + logging.info('Saving blob "%s" with size %d', blob_ref, blob_info.size) + blob = Blob(key_name=blob_ref, blob=blob_info.key(), size=blob_info.size) + blob.put() + db.run_in_transaction(txn) + + def post(self): + """Do upload post.""" + error_messages = [] + blob_info_dict = {} + + for key, value in self.request.params.items(): + if isinstance(value, cgi.FieldStorage): + if 'blob-key' in value.type_options: + blob_info = blobstore.parse_blob_info(value) + blob_info_dict[value.name] = blob_info + logging.info("got blob: %s" % value.name) + self.store_blob(value.name, blob_info, error_messages) + + if error_messages: + logging.error('Upload errors: %r', error_messages) + blobstore.delete(blob_info_dict.values()) + self.response.set_status(303) + # TODO: fix up this format + self.response.headers.add_header("Location", '/error?%s' % '&'.join( + 'error_message=%s' % urllib.quote(m) for m in error_messages)) + else: + query = ['/nonstandard/upload_complete?camliversion=1'] + query.extend('blob%d=%s' % (i + 1, k) + for i, k in enumerate(blob_info_dict.iterkeys())) + self.response.set_status(303) + self.response.headers.add_header("Location", str('&'.join(query))) + + +class ErrorHandler(webapp.RequestHandler): + """The blob put failed.""" + + def get(self): + self.response.headers['Content-Type'] = 'text/plain' + self.response.out.write('\n'.join(self.request.get_all('error_message'))) + self.response.set_status(400) + + +class DebugUploadForm(webapp.RequestHandler): + def get(self): + self.response.headers['Content-Type'] = 'text/html' + uploadurl = blobstore.create_upload_url('/upload_complete') + self.response.out.write('

    ' % uploadurl) + self.response.out.write('') + self.response.out.write('
    ') + + +APP = webapp.WSGIApplication( + [ + ('/', HelloHandler), + ('/debug/upform', DebugUploadForm), + ('/camli/enumerate-blobs', ListHandler), + ('/camli/stat', StatHandler), + ('/camli/([^/]+)', GetHandler), + ('/nonstandard/upload_complete', PostUploadHandler), + ('/upload_complete', UploadHandler), # Admin only. + ('/error', ErrorHandler), + ], + debug=True) + + +def main(): + wsgiref.handlers.CGIHandler().run(APP) + + +if __name__ == '__main__': + main() diff --git a/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/static/style.css b/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/static/style.css new file mode 100644 index 00000000..70b786d1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/static/style.css @@ -0,0 +1 @@ +// TODO diff --git a/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/test_data.txt b/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/test_data.txt new file mode 100644 index 00000000..a26826a5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/gae-py-blobserver/test_data.txt @@ -0,0 +1 @@ +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque at tortor in tellus accumsan euismod. Quisque scelerisque velit vel nisi ornare lacinia. Vivamus viverra eleifend congue. Maecenas dolor magna, rhoncus vitae fermentum id, convallis id. diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/.gitignore b/vendor/github.com/camlistore/camlistore/server/sigserver/.gitignore new file mode 100644 index 00000000..426570e3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/sigserver/.gitignore @@ -0,0 +1,4 @@ +camsigd +sigserver +*.6 +*.8 diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/camsigd.go b/vendor/github.com/camlistore/camlistore/server/sigserver/camsigd.go new file mode 100644 index 00000000..45bd9fd3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/sigserver/camsigd.go @@ -0,0 +1,83 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// The sigserver is a stand-alone JSON signing and verification server. +// +// TODO(bradfitz): as of 2012-01-10 this is very old and superceded by +// the general server and pkg/serverconfig. We should just make it +// possible to configure a signing-only server with +// serverconfig/genconfig.go. I think we basically already can. Then +// we can delete this. +package main + +import ( + "flag" + "fmt" + "log" + "net/http" + + "camlistore.org/pkg/auth" + "camlistore.org/pkg/blob" + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/webserver" +) + +var accessPassword string + +var flagPubKeyDir = flag.String("pubkey-dir", "test/pubkey-blobs", + "Temporary development hack; directory to dig-xxxx.camli public keys.") + +// TODO: for now, the only implementation of the blobref.Fetcher +// interface for fetching public keys is the "local, from disk" +// implementation used for testing. In reality we'd want to be able +// to fetch these from blobservers. +var pubKeyFetcher = blob.NewSimpleDirectoryFetcher(*flagPubKeyDir) + +func handleRoot(conn http.ResponseWriter, req *http.Request) { + fmt.Fprintf(conn, "camsigd") +} + +func handleCamliSig(conn http.ResponseWriter, req *http.Request) { + handler := func(conn http.ResponseWriter, req *http.Request) { + httputil.BadRequestError(conn, "Unsupported path or method.") + } + + switch req.Method { + case "POST": + switch req.URL.Path { + case "/camli/sig/sign": + handler = auth.RequireAuth(handleSign, auth.OpSign) + case "/camli/sig/verify": + handler = handleVerify + } + } + handler(conn, req) +} + +func main() { + flag.Parse() + + mode, err := auth.FromEnv() + if err != nil { + log.Fatal(err) + } + auth.SetMode(mode) + + ws := webserver.New() + ws.HandleFunc("/", handleRoot) + ws.HandleFunc("/camli/sig/", handleCamliSig) + ws.Serve() +} diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/client.pl b/vendor/github.com/camlistore/camlistore/server/sigserver/client.pl new file mode 100755 index 00000000..cce21000 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/sigserver/client.pl @@ -0,0 +1,50 @@ +#!/usr/bin/perl + +use strict; +use LWP::UserAgent; +use HTTP::Request; +use HTTP::Request::Common; +use Getopt::Long; + +my $keyid = "26F5ABDA"; +my $server = "http://localhost:2856"; +GetOptions("keyid=s" => \$keyid, + "server=s" => \$server) + or usage(); + +$server =~ s!/$!!; + +my $file = shift or usage(); +-f $file or usage("$file isn't a file"); + +my $json = do { undef $/; open(my $fh, $file); <$fh> }; + +sub usage { + my $err = shift; + if ($err) { + print STDERR "Error: $err\n"; + } + print STDERR "Usage: client.pl [OPTS] \n"; + print STDERR "Options:\n"; + print STDERR " --keyid=\n"; + print STDERR " --server=http://host:port\n"; + exit(1); +} + +my $req = POST("$server/camli/sig/sign", + "Authorization" => "Basic dGVzdDp0ZXN0", # test:test + Content => { + "json" => $json, + "keyid" => $keyid, + }); + +my $ua = LWP::UserAgent->new; +my $res = $ua->request($req); +unless ($res->is_success) { + die "Failure: " . $res->status_line . ": " . $res->content; +} + +print $res->content; + + + diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/run.sh b/vendor/github.com/camlistore/camlistore/server/sigserver/run.sh new file mode 100755 index 00000000..27a38b25 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/sigserver/run.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +export CAMLI_PASSWORD=test +make && ./sigserver "$@" diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/sign.go b/vendor/github.com/camlistore/camlistore/server/sigserver/sign.go new file mode 100644 index 00000000..b262c461 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/sigserver/sign.go @@ -0,0 +1,54 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/jsonsign" + "fmt" + "net/http" +) + +const kMaxJsonLength = 1024 * 1024 + +func handleSign(conn http.ResponseWriter, req *http.Request) { + if !(req.Method == "POST" && req.URL.Path == "/camli/sig/sign") { + httputil.BadRequestError(conn, "Inconfigured handler.") + return + } + + req.ParseForm() + + jsonStr := req.FormValue("json") + if jsonStr == "" { + httputil.BadRequestError(conn, "Missing json parameter") + return + } + if len(jsonStr) > kMaxJsonLength { + httputil.BadRequestError(conn, "json parameter too large") + return + } + + sreq := &jsonsign.SignRequest{UnsignedJSON: jsonStr, Fetcher: pubKeyFetcher} + signedJson, err := sreq.Sign() + if err != nil { + // TODO: some aren't really a "bad request" + httputil.BadRequestError(conn, fmt.Sprintf("%v", err)) + return + } + conn.Write([]byte(signedJson)) +} diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/spec.txt b/vendor/github.com/camlistore/camlistore/server/sigserver/spec.txt new file mode 100644 index 00000000..3381caa6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/sigserver/spec.txt @@ -0,0 +1,44 @@ +Sign: + +(https) POST /camli/sig/sign +WWW-Authenticate: [user] [b64pass] + + json=[json to sign] + keyid=[GnuPG key id / implementation dependent] + +On good response: + HTTP 200 OK + (signed blob) + +else: (if signing fails) + HTTP 4xx/5xx + + +TODO(bslatkin): Should the sign response be a more specific value, so +we can tell the difference between a temporary server error and a signing +failure? For verification purposes we need that characteristic anyways. + +--- + +Verify: + +(https) POST /camli/sig/verify + + sjson=[signed json to verify] + (proposed) keyarmored=[GnuPG armored key] + +On good response: + HTTP 200 OK + + YES + +else: (if verification fails) + HTTP 200 OK + + + + +Verify will look in the object to find the "camliSigner" key and use that +blobref's contents (assumed to be a public key) to verify the signature on +the object. Configuring the signing server to have the public key blobref +is out of scope. diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/test/00-start.t b/vendor/github.com/camlistore/camlistore/server/sigserver/test/00-start.t new file mode 100644 index 00000000..e1c41d2c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/sigserver/test/00-start.t @@ -0,0 +1,20 @@ +#!/usr/bin/perl + +use strict; +use Test::More; +use FindBin; +use lib "$FindBin::Bin"; +use CamsigdTest; + +my $server = CamsigdTest::start(); + +ok($server, "Started the server") or BAIL_OUT("can't start the server"); + +my $ua = LWP::UserAgent->new; +my $req = HTTP::Request->new("GET", $server->root . "/"); +my $res = $ua->request($req); +ok($res, "got an HTTP response") or done_testing(); +ok($res->is_success, "HTTP response is successful"); + +done_testing(3); + diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/test/10-sign.t b/vendor/github.com/camlistore/camlistore/server/sigserver/test/10-sign.t new file mode 100644 index 00000000..bc430c58 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/sigserver/test/10-sign.t @@ -0,0 +1,96 @@ +#!/usr/bin/perl + +use strict; +use Test::More; +use FindBin; +use lib "$FindBin::Bin"; +use CamsigdTest; +use JSON::Any; +use HTTP::Request::Common; + +my $server = CamsigdTest::start(); +ok($server, "Started the server") or BAIL_OUT("can't start the server"); + +my $ua = LWP::UserAgent->new; + +use constant CAMLI_SIGNER => "sha1-82e6f3494f698aa498d5906349c0aa0a183d89a6"; + +my $j = JSON::Any->new; +my $json = $j->objToJson({ "camliVersion" => 1, + "camliSigner" => CAMLI_SIGNER, + "foo" => "bar", + }); + +# Sign it. +my $sjson; +{ + my $req = req("sign", { "json" => $json }); + my $res = $ua->request($req); + ok($res, "got an HTTP sig response") or done_testing(); + ok($res->is_success, "HTTP sig response is successful") or done_testing(); + $sjson = $res->content; + print "Got signed: $sjson"; + like($sjson, qr/camliSig/, "contains camliSig substring"); + + my $sobj = $j->jsonToObj($sjson); + is($sobj->{"foo"}, "bar", "key foo is still bar"); + is($sobj->{"camliVersion"}, 1, "key camliVersion is still 1"); + ok(defined $sobj->{"camliSig"}, "has camliSig key"); + ok(defined $sobj->{"camliSigner"}, "has camliSigner key"); + is(scalar keys %$sobj, 4, "total of 3 keys in signed object"); +} + +# Verify it. +{ + my $req = req("verify", { "sjson" => $sjson }); + my $res = $ua->request($req); + ok($res, "got an HTTP verify response") or done_testing(); + ok($res->is_success, "HTTP verify response is successful") or done_testing(); + print "Verify response: " . $res->content; + my $vobj = $j->jsonToObj($res->content); + ok(defined($vobj->{'signatureValid'}), "has 'signatureValid' key"); + ok($vobj->{'signatureValid'}, "signature is valid"); + my $vdat = $vobj->{'verifiedData'}; + ok(defined($vdat), "has verified data"); + is($vdat->{'camliSigner'}, CAMLI_SIGNER, "signer matches"); + is($vdat->{'foo'}, "bar") +} + +# Verification that should fail. +{ + my $req = req("verify", { "sjson" => "{}" }); + my $res = $ua->request($req); + ok($res, "got an HTTP verify response") or done_testing(); + ok($res->is_success, "HTTP verify response is successful") or done_testing(); + print "Verify response: " . $res->content; + my $vobj = $j->jsonToObj($res->content); + ok(defined($vobj->{'signatureValid'}), "has 'signatureValid' key"); + is(0, $vobj->{'signatureValid'}, "signature is properly invalid"); + ok(!defined($vobj->{'verifiedData'}), "no verified data key"); + ok(defined($vobj->{'errorMessage'}), "has an error message"); +} + +# Imposter! Verification should fail. +{ + my $eviljson = q{{"camliVersion":1,"camliSigner":"sha1-82e6f3494f698aa498d5906349c0aa0a183d89a6","foo":"evilbar","camliSig":"iQEcBAABAgAGBQJM+tnUAAoJEIUeCLJL7Fq1ruwH/RplOpmrTK51etXUHayRGN0RM0Jxttjwa0pPuiHr7fJifaZo2pvMZOMAttjFEP/HMjvpSVi8P7awBFXXlCTj0CAlexsmCsPEHzITXe3siFzH+XCSmfHNPYYti0apQ2+OcWNnzqWXLiEfP5yRVXxcxoWuxYlnFu+mfw5VdjrJpIa+n3Ys5D4zUPVCSNtF4XV537czqfd9AiSfKCY/aL2NuZykl4WtP3JgYl8btE84EjNLFasQDstcWOvp7rrP6T8hQQotw5/F4SmmFM6ybkWXk/Wkax3XpzW9qL00VqhxHd4JIWaSzSV/WcSQwCoLWc7uXttOWgVtMIhzpjeMlqt1gc0==QYU2"}}; + my $req = req("verify", { "sjson" => $eviljson }); + my $res = $ua->request($req); + ok($res, "got an HTTP verify response") or done_testing(); + ok($res->is_success, "HTTP verify response is successful") or done_testing(); + print "Verify response: " . $res->content; + my $vobj = $j->jsonToObj($res->content); + ok(defined($vobj->{'signatureValid'}), "has 'signatureValid' key"); + is(0, $vobj->{'signatureValid'}, "signature is properly invalid"); + ok(!defined($vobj->{'verifiedData'}), "no verified data key"); + ok(defined($vobj->{'errorMessage'}), "has an error message"); + like($vobj->{'errorMessage'}, qr/bad signature: RSA verification error/, "verification error"); +} + +done_testing(29); + +sub req { + my ($method, $post_params) = @_; + return POST($server->root . "/camli/sig/" . $method, + "Authorization" => "Basic dGVzdDp0ZXN0", # test:test + Content => $post_params); +} diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/test/CamsigdTest.pm b/vendor/github.com/camlistore/camlistore/server/sigserver/test/CamsigdTest.pm new file mode 100644 index 00000000..b238898d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/sigserver/test/CamsigdTest.pm @@ -0,0 +1,78 @@ +#!/usr/bin/perl +# +# Common test library for camsigd (sigserver) + +package CamsigdTest; + +use strict; +use Test::More; +use FindBin; +use LWP::UserAgent; +use HTTP::Request; +use Fcntl; + +our $BINARY = "$FindBin::Bin/../sigserver"; + +sub start { + my ($port_rd, $port_wr, $exit_rd, $exit_wr); + my $flags; + pipe $port_rd, $port_wr; + pipe $exit_rd, $exit_wr; + + $flags = fcntl($port_wr, F_GETFD, 0); + fcntl($port_wr, F_SETFD, $flags & ~FD_CLOEXEC); + $flags = fcntl($exit_rd, F_GETFD, 0); + fcntl($exit_rd, F_SETFD, $flags & ~FD_CLOEXEC); + + $ENV{TESTING_PORT_WRITE_FD} = fileno($port_wr); + $ENV{TESTING_CONTROL_READ_FD} = fileno($exit_rd); + $ENV{CAMLI_PASSWORD} = "test"; + + die "Binary $BINARY doesn't exist\n" unless -x $BINARY; + + my $pid = fork; + die "Failed to fork" unless defined($pid); + if ($pid == 0) { + # child + exec $BINARY, "-listen=:0"; + die "failed to exec: $!\n"; + } + close($exit_rd); # child owns this side + close($port_wr); # child owns this side + + print "Waiting for server to start...\n"; + my $line = <$port_rd>; + close($port_rd); + + # Parse the port line out + chomp $line; + # print "Got port line: $line\n"; + die "Failed to start, no port info." unless $line =~ /:(\d+)$/; + my $port = $1; + + return CamsigdTest::Server->new($pid, $port, $exit_wr); +} + +package CamsigdTest::Server; + +sub new { + my ($class, $pid, $port, $pipe_writer) = @_; + return bless { + pid => $pid, + port => $port, + pipe_writer => $pipe_writer, + }; +} + +sub DESTROY { + my $self = shift; + my $pipe = $self->{pipe_writer}; + syswrite($pipe, "EXIT\n", 5); +} + +sub root { + my $self = shift; + return "http://localhost:$self->{port}"; +} + +1; diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/test/doc.tmp b/vendor/github.com/camlistore/camlistore/server/sigserver/test/doc.tmp new file mode 100644 index 00000000..2dcc98c1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/sigserver/test/doc.tmp @@ -0,0 +1 @@ +{"camliVersion":1,"foo":"bar" \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/test/pubkey-blobs/sha1-82e6f3494f698aa498d5906349c0aa0a183d89a6.camli b/vendor/github.com/camlistore/camlistore/server/sigserver/test/pubkey-blobs/sha1-82e6f3494f698aa498d5906349c0aa0a183d89a6.camli new file mode 100644 index 00000000..bb94ce58 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/sigserver/test/pubkey-blobs/sha1-82e6f3494f698aa498d5906349c0aa0a183d89a6.camli @@ -0,0 +1,30 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.4.10 (GNU/Linux) + +mQENBEzgoVsBCAC/56aEJ9BNIGV9FVP+WzenTAkg12k86YqlwJVAB/VwdMlyXxvi +bCT1RVRfnYxscs14LLfcMWF3zMucw16mLlJCBSLvbZ0jn4h+/8vK5WuAdjw2YzLs +WtBcjWn3lV6tb4RJz5gtD/o1w8VWxwAnAVIWZntKAWmkcChCRgdUeWso76+plxE5 +aRYBJqdT1mctGqNEISd/WYPMgwnWXQsVi3x4z1dYu2tD9uO1dkAff12z1kyZQIBQ +rexKYRRRh9IKAayD4kgS0wdlULjBU98aeEaMz1ckuB46DX3lAYqmmTEL/Rl9cOI0 +Enpn/oOOfYFa5h0AFndZd1blMvruXfdAobjVABEBAAG0JUNhbWxpIFRlc3RlciA8 +Y2FtbGktdGVzdEBleGFtcGxlLmNvbT6JATgEEwECACIFAkzgoVsCGwMGCwkIBwMC +BhUIAgkKCwQWAgMBAh4BAheAAAoJECkxpnwm9avaHE0IAJ/pMZgiURl3kefrFMAV +7ei0XDfTekZOwDRcZWTVQ/A97phpzO8t78qLYbFeHuq3myNhrlVO9Gyp+2V904rN +dudoHLhpegf5TNeHGmAGHBxcooMPMp0JyIDnUBxtCNGxgWfbKpEDRsQAjkCc7sR0 +H+OegzlEf6JZGzEhV5ohOioTsC1DmJNoQsRz5Kes7sLoAzpQCbCv4yv+1o+mnzgW +9qPJXKxcScc0t2YTvcvpJ7LV8no1OP6vpYqB1A9Pzze6XFBlcXOUKbRKk0fEIV/u +pU3ph1fF7wlyRgA4A3iPwDC4BgVmHYkz9nYPn+7IcT/dDig5SWU+n7WZgGeyv75y +0Ue5AQ0ETOChWwEIALuHxKI+oSH+eeMSXhxcSUXnhp4cUeyvOV7oNPYcmsDclF0Y +7y8NrSPiEZod9vSTEDMq7hd3BG+feCBqjgR4qtmoXguJhWcnJqDBk5iAMuuAph9O +CC8QLACMJPhoxQ0UtDPKlpG4X8kLK1woHd716ulPl2KLjTgd6K4kCGj+CV5Ekn6u +IJj+3IPbYDOwk1l06ksimwQAY4dA1CXOTviH1bVqR6CzuzVPg4hcryWDva1rEO5c +LcOR8Wk/thANFLSNjqX8UgtGXhFZRWxKetFDQiX5f2BKoqTVYvD3pqt+zzyLNFAz +xhMc3cyFfqM8yQdzdEey/DIWtMoDqZCSVMJ63N8AEQEAAYkBHwQYAQIACQUCTOCh +WwIbDAAKCRApMaZ8JvWr2mHACACkco+fAfRK+gmprF2m8E0Bp1frwFH0g4RJVHXQ +BUDbg7OZbWumzD4Br28si6XDVMP6fLOeyD0EHYb6LhAHDkBLqx6e3kKG1mQ8fMIV +O4YMQfskYH2FJqlCtgMnM8N3oslPBTpZedNPSUq7HJh2pKr9GIDi1V+Hgc/qEigE +dj9f2zSSaKZdC4eL73GvlQOh+4XqgaMnMiKfI+/2WlRaJs1KOgKmIp5yHt0qY0ef +y+40BY/z9pMjyUvr/Wwp8KXArw0NAwzp8NUl5fNxRg9XWQWLn6hW8ydR20X3t2ym +iNSWzNQiTT6k7fumOABCoSZsow/AJxQSxqKOJBjgpKjIKCgY +=ru0J +-----END PGP PUBLIC KEY BLOCK----- diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/test/sig.tmp b/vendor/github.com/camlistore/camlistore/server/sigserver/test/sig.tmp new file mode 100644 index 00000000..95538f6b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/sigserver/test/sig.tmp @@ -0,0 +1,6 @@ +-----BEGIN PGP SIGNATURE----- +Version: GnuPG v1.4.10 (GNU/Linux) + +iQEcBAABAgAGBQJM9KDoAAoJECkxpnwm9avabeYH/2+Rm1FjSDKIxUlF+RCvaKWYflJuCtazJTWezud3CL+q2DSWEl8o7z6TUDB15w8nzRlLDEXqqGYPec76eyoyh4R98A2oxmms1nJY1HFXWN4LFUcinOBnM175f5qyiFr0c64sSMaBt21Qkt6Ncecg7NpTyl31Uz3JmlG7SZRm5yL08shbNR0AvTSnwUAwyWiy+v9qwvK3VoAxA2CXgJDTudEjf8MoMna0MmF43hWSdqGkqVao5rJtpru+iMHXkaqrgX24go1PRwVOyz6mJdgkqnYMqGinYAw+w05s09wfpQ/xLEuCCYfehtLGcSPEPkfFD701hgo/9OR1w+hdrrFKSNo= +=Nzxs +-----END PGP SIGNATURE----- diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/test/test-keyring.gpg b/vendor/github.com/camlistore/camlistore/server/sigserver/test/test-keyring.gpg new file mode 100644 index 00000000..3d20ba68 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/server/sigserver/test/test-keyring.gpg differ diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/test/test-keyring2.gpg b/vendor/github.com/camlistore/camlistore/server/sigserver/test/test-keyring2.gpg new file mode 100644 index 00000000..ded7d515 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/server/sigserver/test/test-keyring2.gpg differ diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/test/test-secring.gpg b/vendor/github.com/camlistore/camlistore/server/sigserver/test/test-secring.gpg new file mode 100644 index 00000000..bca3ad03 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/server/sigserver/test/test-secring.gpg differ diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/test/test-secring2.gpg b/vendor/github.com/camlistore/camlistore/server/sigserver/test/test-secring2.gpg new file mode 100644 index 00000000..f4b7ed22 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/server/sigserver/test/test-secring2.gpg differ diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/test/test.json b/vendor/github.com/camlistore/camlistore/server/sigserver/test/test.json new file mode 100644 index 00000000..c9a36957 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/sigserver/test/test.json @@ -0,0 +1,5 @@ +{ + "foo": "bar", + "blah": "baz" } + + diff --git a/vendor/github.com/camlistore/camlistore/server/sigserver/verify.go b/vendor/github.com/camlistore/camlistore/server/sigserver/verify.go new file mode 100644 index 00000000..d2d96464 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/sigserver/verify.go @@ -0,0 +1,64 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +/* + + $ gpg --no-default-keyring --keyring=/tmp/foo --import --armor test/pubkey-blobs/sha1-82e6f3494f69 + + $ gpg --no-default-keyring --keyring=/tmp/foo --verify sig.tmp doc.tmp ; echo $? + gpg: Signature made Mon 29 Nov 2010 10:59:52 PM PST using RSA key ID 26F5ABDA + gpg: Good signature from "Camli Tester " + gpg: WARNING: This key is not certified with a trusted signature! + gpg: There is no indication that the signature belongs to the owner. + Primary key fingerprint: FBB8 9AA3 20A2 806F E497 C049 2931 A67C 26F5 ABDA0 + +*/ + +import ( + "camlistore.org/pkg/httputil" + "camlistore.org/pkg/jsonsign" + "net/http" +) + +func handleVerify(conn http.ResponseWriter, req *http.Request) { + if !(req.Method == "POST" && req.URL.Path == "/camli/sig/verify") { + httputil.BadRequestError(conn, "Inconfigured handler.") + return + } + + req.ParseForm() + sjson := req.FormValue("sjson") + if sjson == "" { + httputil.BadRequestError(conn, "Missing sjson parameter.") + return + } + + m := make(map[string]interface{}) + + vreq := jsonsign.NewVerificationRequest(sjson, pubKeyFetcher) + if vreq.Verify() { + m["signatureValid"] = 1 + m["verifiedData"] = vreq.PayloadMap + } else { + m["signatureValid"] = 0 + m["errorMessage"] = vreq.Err.Error() + } + + conn.WriteHeader(http.StatusOK) // no HTTP response code fun, error info in JSON + httputil.ReturnJSON(conn, m) +} diff --git a/vendor/github.com/camlistore/camlistore/server/tester/bs-test.pl b/vendor/github.com/camlistore/camlistore/server/tester/bs-test.pl new file mode 100755 index 00000000..5fad55ef --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/server/tester/bs-test.pl @@ -0,0 +1,430 @@ +#!/usr/bin/perl +# +# Test script to run against a Camli blobserver to test its compliance +# with the spec. + +use strict; +use Getopt::Long; +use LWP; +use Test::More; + +my $user; +my $password; +my $implopt; +GetOptions("user" => \$user, + "password" => \$password, + "impl=s" => \$implopt, + ) or usage(); + +my $impl; +my %args = (user => $user, password => $password); +if ($implopt eq "go") { + $impl = Impl::Go->new(%args); +} elsif ($implopt eq "appengine") { + $impl = Impl::AppEngine->new(%args); +} else { + die "The --impl flag must be 'go' or 'appengine'.\n"; +} + +ok($impl->start, "Server started"); + +$impl->verify_no_blobs; # also tests some of enumerate +$impl->test_stat_and_upload; +$impl->test_upload_corrupt_blob; # blobref digest doesn't match + +# TODO: test multiple uploads in a batch +# TODO: test uploads in serial (using each response's next uploadUrl) +# TODO: test enumerate boundaries +# TODO: interrupt a POST upload in the middle; verify no straggler on +# disk in subsequent GET +# .... +# test auth works on bogus password? (auth still undefined) +# TODO: test stat with both GET and POST (currently just POST) + +done_testing(); + +sub usage { + die "Usage: bs-test.pl [--user= --password=] --impl={go,appengine}\n"; +} + +package Impl; +use HTTP::Request::Common; +use LWP::UserAgent; +use JSON::Any; +use Test::More; +use Digest::SHA1 qw(sha1_hex); +use URI::URL (); +use Data::Dumper; + +sub new { + my ($class, %args) = @_; + return bless \%args, $class; +} + +sub post { + my ($self, $path, $form) = @_; + $path ||= ""; + $form ||= {}; + return POST($self->path($path), + "Authorization" => "Basic dGVzdDp0ZXN0", # test:test + Content => $form); +} + +sub upload_request { + my ($self, $upload_url, $blobref_to_blob_map) = @_; + my @content; + my $n = 0; + foreach my $key (sort keys %$blobref_to_blob_map) { + $n++; + # TODO: the App Engine client refused to work unless the Content-Type + # is set. This should be clarified in the docs (MUST?) and update the + # test suite and Go server accordingly (to fail if not present). + push @content, $key => [ + undef, "filename$n", + "Content-Type" => "application/octet-stream", + Content => $blobref_to_blob_map->{$key}, + ]; + } + + return POST($upload_url, + "Content_Type" => 'form-data', + "Authorization" => "Basic dGVzdDp0ZXN0", # test:test + Content => \@content); +} + +sub get { + my ($self, $path, $form) = @_; + $path ||= ""; + $form ||= {}; + return GET($self->path($path), + "Authorization" => "Basic dGVzdDp0ZXN0", # test:test + %$form); +} + +sub head { + my ($self, $path, $form) = @_; + $path ||= ""; + $form ||= {}; + return HEAD($self->path($path), + "Authorization" => "Basic dGVzdDp0ZXN0", # test:test + %$form); +} + +sub ua { + my $self = shift; + return ($self->{_ua} ||= LWP::UserAgent->new(agent => "camli/blobserver-tester")); +} + +sub root { + my $self= shift; + return $self->{root} or die "No 'root' for $self"; +} + +sub path { + my $self = shift; + my $path = shift || ""; + return $self->root . $path; +} + +sub get_json { + my ($self, $req, $msg, $opts) = @_; + $opts ||= {}; + + my $res = $self->ua->request($req); + ok(defined($res), "got response for HTTP request '$msg'"); + + if ($res->code =~ m!^30[123]$! && $opts->{follow_redirect}) { + my $location = $res->header("Location"); + if ($res->code == "303") { + $req->method("GET"); + } + my $new_uri = URI::URL->new($location, $req->uri)->abs; + diag("Old URI was " . $req->uri); + diag("New is " . $new_uri); + diag("Redirecting HTTP request '$msg' to $location ($new_uri)"); + $req->uri($new_uri); + $res = $self->ua->request($req); + ok(defined($res), "got redirected response for HTTP request '$msg'"); + } + + ok($res->is_success, "successful response for HTTP request '$msg'") + or diag("Status was: " . $res->status_line); + my $json = JSON::Any->jsonToObj($res->content); + is("HASH", ref($json), "JSON parsed for HTTP request '$msg'") + or BAIL_OUT("expected JSON response"); + return $json; +} + +sub get_upload_json { + my ($self, $req) = @_; + return $self->get_json($req, "upload", { follow_redirect => 1 }) +} + +sub verify_no_blobs { + my $self = shift; + my $req = $self->get("/camli/enumerate-blobs", { + "after" => "", + "limit" => 10, + }); + my $json = $self->get_json($req, "enumerate empty blobs"); + ok(defined($json->{'blobs'}), "enumerate has a 'blobs' key"); + is("ARRAY", ref($json->{'blobs'}), "enumerate's blobs key is an array"); + is(0, scalar @{$json->{'blobs'}}, "no blobs on server"); +} + +sub test_stat_and_upload { + my $self = shift; + my ($req, $res); + + my $blob = "This is a line.\r\nWith mixed newlines\rFoo\nAnd binary\0data.\0\n\r."; + my $blobref = "sha1-" . sha1_hex($blob); + + # Bogus method. + $req = $self->head("/camli/stat", { + "camliversion" => 1, + "blob1" => $blobref, + }); + $res = $self->ua->request($req); + ok(!$res->is_success, "returns failure for HEAD on /camli/stat"); + + # Correct method, but missing camliVersion. + $req = $self->post("/camli/stat", { + "blob1" => $blobref, + }); + $res = $self->ua->request($req); + ok(!$res->is_success, "returns failure for missing camliVersion param on stat"); + + # Valid pre-upload + $req = $self->post("/camli/stat", { + "camliversion" => 1, + "blob1" => $blobref, + }); + my $jres = $self->get_json($req, "valid stat"); + diag("stat response: " . Dumper($jres)); + ok($jres, "valid stat JSON response"); + for my $f (qw(stat maxUploadSize uploadUrl uploadUrlExpirationSeconds)) { + ok(defined($jres->{$f}), "required field '$f' present"); + } + is(scalar(keys %$jres), 4, "Exactly 4 JSON keys returned"); + my $statList = $jres->{stat}; + is(ref($statList), "ARRAY", "stat is an array"); + is(scalar(@$statList), 0, "server doesn't have this blob yet."); + like($jres->{uploadUrlExpirationSeconds}, qr/^\d+$/, "uploadUrlExpirationSeconds is numeric"); + my $upload_url = URI::URL->new($jres->{uploadUrl}, $self->root)->abs; + ok($upload_url, "valid uploadUrl"); + # TODO: test & clarify in spec: are relative URLs allowed in uploadUrl? + # App Engine seems to do it already, and makes it easier, so probably + # best to clarify that they're relative. + + # Do the actual upload + my $upreq = $self->upload_request($upload_url, { + $blobref => $blob, + }); + diag("upload request: " . $upreq->as_string); + my $upres = $self->get_upload_json($upreq); + ok($upres, "Upload was success"); + print STDERR "# upload response: ", Dumper($upres); + + for my $f (qw(uploadUrlExpirationSeconds uploadUrl maxUploadSize received)) { + ok(defined($upres->{$f}), "required upload response field '$f' present"); + } + is(scalar(keys %$upres), 4, "Exactly 4 JSON keys returned"); + + like($upres->{uploadUrlExpirationSeconds}, qr/^\d+$/, "uploadUrlExpirationSeconds is numeric"); + is(ref($upres->{received}), "ARRAY", "'received' is an array") + or BAIL_OUT(); + my $got = $upres->{received}; + is(scalar(@$got), 1, "got one file"); + is($got->[0]{blobRef}, $blobref, "received[0] 'blobRef' matches"); + is($got->[0]{size}, length($blob), "received[0] 'size' matches"); + + # TODO: do a get request, verify that we get it back. +} + +sub test_upload_corrupt_blob { + my $self = shift; + my ($req, $res); + + my $blob = "A blob, pre-corruption."; + my $blobref = "sha1-" . sha1_hex($blob); + $blob .= "OIEWUROIEWURLKJDSLKj CORRUPT"; + + $req = $self->post("/camli/stat", { + "camliversion" => 1, + "blob1" => $blobref, + }); + my $jres = $self->get_json($req, "valid stat"); + my $upload_url = URI::URL->new($jres->{uploadUrl}, $self->root)->abs; + # TODO: test & clarify in spec: are relative URLs allowed in uploadUrl? + # App Engine seems to do it already, and makes it easier, so probably + # best to clarify that they're relative. + + # Do the actual upload + my $upreq = $self->upload_request($upload_url, { + $blobref => $blob, + }); + diag("corrupt upload request: " . $upreq->as_string); + my $upres = $self->get_upload_json($upreq); + my $got = $upres->{received}; + is(ref($got), "ARRAY", "corrupt upload returned a 'received' array"); + is(scalar(@$got), 0, "didn't get any files (it was corrupt)"); +} + +package Impl::Go; +use base 'Impl'; +use FindBin; +use LWP::UserAgent; +use HTTP::Request; +use Fcntl; +use File::Temp (); + +sub start { + my $self = shift; + + $self->{_tmpdir_obj} = File::Temp->newdir(); + my $tmpdir = $self->{_tmpdir_obj}->dirname; + + die "Failed to create temporary directory." unless -d $tmpdir; + + system("$FindBin::Bin/../../build.pl", "server/go/blobserver") + and die "Failed to build Go blobserver."; + + my $bindir = "$FindBin::Bin/../go/blobserver/"; + my $binary = "$bindir/blobserver"; + + chdir($bindir) or die "filed to chdir to $bindir: $!"; + system("make") and die "failed to run make in $bindir"; + + my ($port_rd, $port_wr, $exit_rd, $exit_wr); + my $flags; + pipe $port_rd, $port_wr; + pipe $exit_rd, $exit_wr; + + $flags = fcntl($port_wr, F_GETFD, 0); + fcntl($port_wr, F_SETFD, $flags & ~FD_CLOEXEC); + $flags = fcntl($exit_rd, F_GETFD, 0); + fcntl($exit_rd, F_SETFD, $flags & ~FD_CLOEXEC); + + $ENV{TESTING_PORT_WRITE_FD} = fileno($port_wr); + $ENV{TESTING_CONTROL_READ_FD} = fileno($exit_rd); + $ENV{CAMLI_PASSWORD} = "test"; + + die "Binary $binary doesn't exist\n" unless -x $binary; + + my $pid = fork; + die "Failed to fork" unless defined($pid); + if ($pid == 0) { + # child + my @args = ($binary, "-listen=:0", "-root=$tmpdir"); + print STDERR "# Running: [@args]\n"; + exec @args; + die "failed to exec: $!\n"; + } + close($exit_rd); # child owns this side + close($port_wr); # child owns this side + + print "Waiting for Go server to start...\n"; + my $line = <$port_rd>; + close($port_rd); + + # Parse the port line out + chomp $line; + # print "Got port line: $line\n"; + die "Failed to start, no port info." unless $line =~ /:(\d+)$/; + $self->{port} = $1; + $self->{root} = "http://localhost:$self->{port}"; + print STDERR "# Running on $self->{root} ...\n"; + + # Keep a reference to this to write "EXIT\n" to in order + # to cleanly shutdown the child camlistored process. + # If we close it, the child also dies, though. + $self->{_exit_wr} = $exit_wr; + return 1; +} + +sub DESTROY { + my $self = shift; + syswrite($self->{_exit_wr}, "EXIT\n"); +} + +package Impl::AppEngine; +use base 'Impl'; +use IO::Socket::INET; +use Time::HiRes (); + +sub start { + my $self = shift; + + my $dev_appserver = `which dev_appserver.py`; + chomp $dev_appserver; + unless ($dev_appserver && -x $dev_appserver) { + $dev_appserver = "$ENV{HOME}/sdk/google_appengine/dev_appserver.py"; + unless (-x $dev_appserver) { + die "No dev_appserver.py in \$PATH nor in \$HOME/sdk/google_appengine/dev_appserver.py\n"; + } + } + + $self->{_tempdir_blobstore_obj} = File::Temp->newdir(); + $self->{_tempdir_datastore_obj} = File::Temp->newdir(); + my $datapath = $self->{_tempdir_blobstore_obj}->dirname . "/datastore-file"; + my $blobdir = $self->{_tempdir_datastore_obj}->dirname; + + my $port; + while (1) { + $port = int(rand(30000) + 1024); + my $sock = IO::Socket::INET->new(Listen => 5, + LocalAddr => '127.0.0.1', + LocalPort => $port, + ReuseAddr => 1, + Proto => 'tcp'); + if ($sock) { + last; + } + } + $self->{port} = $port; + $self->{root} = "http://localhost:$self->{port}"; + + my $pid = fork; + die "Failed to fork" unless defined($pid); + if ($pid == 0) { + my $appdir = "$FindBin::Bin/../appengine/blobserver"; + + # child + my @args = ($dev_appserver, + "--clear_datastore", # kinda redundant as we made a temp dir + "--datastore_path=$datapath", + "--blobstore_path=$blobdir", + "--port=$port", + $appdir); + print STDERR "# Running: [@args]\n"; + exec @args; + die "failed to exec: $!\n"; + } + $self->{pid} = $pid; + + my $last_print = 0; + for (1..15) { + my $now = time(); + if ($now != $last_print) { + print STDERR "# Waiting for appengine app to start...\n"; + $last_print = $now; + } + my $res = $self->ua->request($self->get("/")); + if ($res && $res->is_success) { + print STDERR "# Up."; + last; + } + Time::HiRes::sleep(0.1); + } + return 1; +} + +sub DESTROY { + my $self = shift; + kill 3, $self->{pid} if $self->{pid}; +} + +1; + + + diff --git a/vendor/github.com/camlistore/camlistore/third_party/README b/vendor/github.com/camlistore/camlistore/third_party/README new file mode 100644 index 00000000..501e46fc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/README @@ -0,0 +1,10 @@ +External packages which Camlistore depends on. + +These are not under Camlistore copyright/license. See the respective projects +for their copyright & licensing details. + +These are mirrored into Camlistore for hermetic build reasons, as well +as enabling local patching to work with an ever-changing upstream Go +project. (not all projects will follow Go tip as closely) + + diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/.gitattributes b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/.gitattributes new file mode 100644 index 00000000..b65f2a9f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/.gitattributes @@ -0,0 +1,2 @@ +*.go filter=gofmt +*.cgo filter=gofmt diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/.gitignore b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/.gitignore new file mode 100644 index 00000000..2b286ca9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/.gitignore @@ -0,0 +1,8 @@ +*~ +.#* +## the next line needs to start with a backslash to avoid looking like +## a comment +\#*# +.*.swp + +*.test diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/LICENSE new file mode 100644 index 00000000..d369cb82 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/LICENSE @@ -0,0 +1,93 @@ +Copyright (c) 2013, 2014 Tommi Virtanen. +Copyright (c) 2009, 2011, 2012 The Go Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +The following included software components have additional copyright +notices and license terms that may differ from the above. + + +File fuse.go: + +// Adapted from Plan 9 from User Space's src/cmd/9pfuse/fuse.c, +// which carries this notice: +// +// The files in this directory are subject to the following license. +// +// The author of this software is Russ Cox. +// +// Copyright (c) 2006 Russ Cox +// +// Permission to use, copy, modify, and distribute this software for any +// purpose without fee is hereby granted, provided that this entire notice +// is included in all copies of any software which is or includes a copy +// or modification of this software and in all copies of the supporting +// documentation for such software. +// +// THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED +// WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR WARRANTY +// OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS +// FITNESS FOR ANY PARTICULAR PURPOSE. + + +File fuse_kernel.go: + +// Derived from FUSE's fuse_kernel.h +/* + This file defines the kernel interface of FUSE + Copyright (C) 2001-2007 Miklos Szeredi + + + This -- and only this -- header file may also be distributed under + the terms of the BSD Licence as follows: + + Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + SUCH DAMAGE. +*/ diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/README.md b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/README.md new file mode 100644 index 00000000..471b2b25 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/README.md @@ -0,0 +1,23 @@ +bazil.org/fuse -- Filesystems in Go +=================================== + +`bazil.org/fuse` is a Go library for writing FUSE userspace +filesystems. + +It is a from-scratch implementation of the kernel-userspace +communication protocol, and does not use the C library from the +project called FUSE. `bazil.org/fuse` embraces Go fully for safety and +ease of programming. + +Here’s how to get going: + + go get bazil.org/fuse + +Website: http://bazil.org/fuse/ + +Github repository: https://github.com/bazillion/fuse + +API docs: http://godoc.org/bazil.org/fuse + +Our thanks to Russ Cox for his fuse library, which this project is +based on. diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/debug.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/debug.go new file mode 100644 index 00000000..be9f900d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/debug.go @@ -0,0 +1,21 @@ +package fuse + +import ( + "runtime" +) + +func stack() string { + buf := make([]byte, 1024) + return string(buf[:runtime.Stack(buf, false)]) +} + +func nop(msg interface{}) {} + +// Debug is called to output debug messages, including protocol +// traces. The default behavior is to do nothing. +// +// The messages have human-friendly string representations and are +// safe to marshal to JSON. +// +// Implementations must not retain msg. +var Debug func(msg interface{}) = nop diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/.gitignore b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/.gitignore new file mode 100644 index 00000000..6ebe2d17 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/.gitignore @@ -0,0 +1,4 @@ +/*.seq.svg + +# not ignoring *.seq.png; we want those committed to the repo +# for embedding on Github diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/README.md b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/README.md new file mode 100644 index 00000000..54ed0e59 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/README.md @@ -0,0 +1,6 @@ +# bazil.org/fuse documentation + +See also API docs at http://godoc.org/bazil.org/fuse + +- [The mount sequence](mount-sequence.md) +- [Writing documentation](writing-docs.md) diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-linux-error-init.seq b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-linux-error-init.seq new file mode 100644 index 00000000..89cf1515 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-linux-error-init.seq @@ -0,0 +1,32 @@ +seqdiag { + app; + fuse [label="bazil.org/fuse"]; + fusermount; + kernel; + mounts; + + app; + fuse [label="bazil.org/fuse"]; + fusermount; + kernel; + mounts; + + app -> fuse [label="Mount"]; + fuse -> fusermount [label="spawn, pass socketpair fd"]; + fusermount -> kernel [label="open /dev/fuse"]; + fusermount -> kernel [label="mount(2)"]; + kernel ->> mounts [label="mount is visible"]; + fusermount <-- kernel [label="mount(2) returns"]; + fuse <<-- fusermount [diagonal, label="exit, receive /dev/fuse fd", leftnote="on Linux, successful exit here\nmeans the mount has happened,\nthough InitRequest might not have yet"]; + app <-- fuse [label="Mount returns\nConn.Ready is already closed"]; + + app -> fuse [label="fs.Serve"]; + fuse => kernel [label="read /dev/fuse fd", note="starts with InitRequest"]; + fuse -> app [label="Init"]; + fuse <-- app [color=red]; + fuse -> kernel [label="write /dev/fuse fd", color=red]; + kernel -> kernel [label="set connection\nstate to error", color=red]; + fuse <-- kernel; + ... conn.MountError == nil, so it is still mounted ... + ... call conn.Close to clean up ... +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-linux-error-init.seq.png b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-linux-error-init.seq.png new file mode 100644 index 00000000..fea214f7 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-linux-error-init.seq.png differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-linux.seq b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-linux.seq new file mode 100644 index 00000000..a1cafc7a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-linux.seq @@ -0,0 +1,41 @@ +seqdiag { + // seqdiag -T svg -o doc/mount-osx.svg doc/mount-osx.seq + app; + fuse [label="bazil.org/fuse"]; + fusermount; + kernel; + mounts; + + app -> fuse [label="Mount"]; + fuse -> fusermount [label="spawn, pass socketpair fd"]; + fusermount -> kernel [label="open /dev/fuse"]; + fusermount -> kernel [label="mount(2)"]; + kernel ->> mounts [label="mount is visible"]; + fusermount <-- kernel [label="mount(2) returns"]; + fuse <<-- fusermount [diagonal, label="exit, receive /dev/fuse fd", leftnote="on Linux, successful exit here\nmeans the mount has happened,\nthough InitRequest might not have yet"]; + app <-- fuse [label="Mount returns\nConn.Ready is already closed", rightnote="InitRequest and StatfsRequest\nmay or may not be seen\nbefore Conn.Ready,\ndepending on platform"]; + + app -> fuse [label="fs.Serve"]; + fuse => kernel [label="read /dev/fuse fd", note="starts with InitRequest"]; + fuse => app [label="FS/Node/Handle methods"]; + fuse => kernel [label="write /dev/fuse fd"]; + ... repeat ... + + ... shutting down ... + app -> fuse [label="Unmount"]; + fuse -> fusermount [label="fusermount -u"]; + fusermount -> kernel; + kernel <<-- mounts; + fusermount <-- kernel; + fuse <<-- fusermount [diagonal]; + app <-- fuse [label="Unmount returns"]; + + // actually triggers before above + fuse <<-- kernel [diagonal, label="/dev/fuse EOF"]; + app <-- fuse [label="fs.Serve returns"]; + + app -> fuse [label="conn.Close"]; + fuse -> kernel [label="close /dev/fuse fd"]; + fuse <-- kernel; + app <-- fuse; +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-linux.seq.png b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-linux.seq.png new file mode 100644 index 00000000..af373dd2 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-linux.seq.png differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-osx-error-init.seq b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-osx-error-init.seq new file mode 100644 index 00000000..3bb2b39a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-osx-error-init.seq @@ -0,0 +1,32 @@ +seqdiag { + app; + fuse [label="bazil.org/fuse"]; + wait [label="callMount\nhelper goroutine"]; + mount_osxfusefs; + kernel; + + app -> fuse [label="Mount"]; + fuse -> kernel [label="open /dev/osxfuseN"]; + fuse -> mount_osxfusefs [label="spawn, pass fd"]; + fuse -> wait [label="goroutine", note="blocks on cmd.Wait"]; + app <-- fuse [label="Mount returns"]; + + mount_osxfusefs -> kernel [label="mount(2)"]; + + app -> fuse [label="fs.Serve"]; + fuse => kernel [label="read /dev/osxfuseN fd", note="starts with InitRequest,\nalso seen before mount exits:\ntwo StatfsRequest calls"]; + fuse -> app [label="Init"]; + fuse <-- app [color=red]; + fuse -> kernel [label="write /dev/osxfuseN fd", color=red]; + fuse <-- kernel; + + mount_osxfusefs <-- kernel [label="mount(2) returns", color=red]; + wait <<-- mount_osxfusefs [diagonal, label="exit", color=red]; + app <<-- wait [diagonal, label="mount has failed,\nclose Conn.Ready", color=red]; + + // actually triggers before above + fuse <<-- kernel [diagonal, label="/dev/osxfuseN EOF"]; + app <-- fuse [label="fs.Serve returns"]; + ... conn.MountError != nil, so it was was never mounted ... + ... call conn.Close to clean up ... +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-osx-error-init.seq.png b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-osx-error-init.seq.png new file mode 100644 index 00000000..e96589c1 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-osx-error-init.seq.png differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-osx.seq b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-osx.seq new file mode 100644 index 00000000..c6914a84 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-osx.seq @@ -0,0 +1,45 @@ +seqdiag { + // seqdiag -T svg -o doc/mount-osx.svg doc/mount-osx.seq + app; + fuse [label="bazil.org/fuse"]; + wait [label="callMount\nhelper goroutine"]; + mount_osxfusefs; + kernel; + mounts; + + app -> fuse [label="Mount"]; + fuse -> kernel [label="open /dev/osxfuseN"]; + fuse -> mount_osxfusefs [label="spawn, pass fd"]; + fuse -> wait [label="goroutine", note="blocks on cmd.Wait"]; + app <-- fuse [label="Mount returns"]; + + mount_osxfusefs -> kernel [label="mount(2)"]; + + app -> fuse [label="fs.Serve"]; + fuse => kernel [label="read /dev/osxfuseN fd", note="starts with InitRequest,\nalso seen before mount exits:\ntwo StatfsRequest calls"]; + fuse => app [label="FS/Node/Handle methods"]; + fuse => kernel [label="write /dev/osxfuseN fd"]; + ... repeat ... + + kernel ->> mounts [label="mount is visible"]; + mount_osxfusefs <-- kernel [label="mount(2) returns"]; + wait <<-- mount_osxfusefs [diagonal, label="exit", leftnote="on OS X, successful exit\nhere means we finally know\nthe mount has happened\n(can't trust InitRequest,\nkernel might have timed out\nwaiting for InitResponse)"]; + + app <<-- wait [diagonal, label="mount is ready,\nclose Conn.Ready", rightnote="InitRequest and StatfsRequest\nmay or may not be seen\nbefore Conn.Ready,\ndepending on platform"]; + + ... shutting down ... + app -> fuse [label="Unmount"]; + fuse -> kernel [label="umount(2)"]; + kernel <<-- mounts; + fuse <-- kernel; + app <-- fuse [label="Unmount returns"]; + + // actually triggers before above + fuse <<-- kernel [diagonal, label="/dev/osxfuseN EOF"]; + app <-- fuse [label="fs.Serve returns"]; + + app -> fuse [label="conn.Close"]; + fuse -> kernel [label="close /dev/osxfuseN"]; + fuse <-- kernel; + app <-- fuse; +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-osx.seq.png b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-osx.seq.png new file mode 100644 index 00000000..7e310f91 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-osx.seq.png differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-sequence.md b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-sequence.md new file mode 100644 index 00000000..fb2a5224 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/mount-sequence.md @@ -0,0 +1,30 @@ +# The mount sequence + +FUSE mounting is a little bit tricky. There's a userspace helper tool +that performs the handshake with the kernel, and then steps out of the +way. This helper behaves differently on different platforms, forcing a +more complex API on us. + +## Successful runs + +On Linux, the mount is immediate and file system accesses wait until +the requests are served. + +![Diagram of Linux FUSE mount sequence](mount-linux.seq.png) + +On OS X, the mount becomes visible only after `InitRequest` (and maybe +more) have been served. + +![Diagram of OSXFUSE mount sequence](mount-osx.seq.png) + + +## Errors + +Let's see what happens if `InitRequest` gets an error response. On +Linux, the mountpoint is there but all operations will fail: + +![Diagram of Linux error handling](mount-linux-error-init.seq.png) + +On OS X, the mount never happened: + +![Diagram of OS X error handling](mount-osx-error-init.seq.png) diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/writing-docs.md b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/writing-docs.md new file mode 100644 index 00000000..ab5dbf8c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/doc/writing-docs.md @@ -0,0 +1,16 @@ +# Writing documentation + +## Sequence diagrams + +The sequence diagrams are generated with `seqdiag`: +http://blockdiag.com/en/seqdiag/index.html + +An easy way to work on them is to automatically update the generated +files with https://github.com/cespare/reflex : + + reflex -g 'doc/[^.]*.seq' -- seqdiag -T svg -o '{}.svg' '{}' & + + reflex -g 'doc/[^.]*.seq' -- seqdiag -T png -o '{}.png' '{}' & + +The markdown files refer to PNG images because of Github limitations, +but the SVG is generally more pleasant to view. diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/error_darwin.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/error_darwin.go new file mode 100644 index 00000000..adb9789a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/error_darwin.go @@ -0,0 +1,36 @@ +package fuse + +import ( + "syscall" +) + +type getxattrError struct { + error +} + +func (getxattrError) Errno() Errno { + return Errno(syscall.ENOATTR) +} + +// getxattr return value for "extended attribute does not exist" is +// ENOATTR on OS X, and ENODATA on Linux and apparently at least +// NetBSD. There may be a #define ENOATTR too, but the value is +// ENODATA in the actual syscalls. ENOATTR is not in any of the +// standards, ENODATA exists but is only used for STREAMs. +// +// https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/getxattr.2.html +// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013090.html +// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013097.html +// http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html +func translateGetxattrError(err Error) Error { + ferr, ok := err.(ErrorNumber) + if !ok { + return err + } + + if ferr.Errno() != ENODATA { + return err + } + + return getxattrError{err} +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/error_std.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/error_std.go new file mode 100644 index 00000000..3cae6c82 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/error_std.go @@ -0,0 +1,7 @@ +// +build !darwin + +package fuse + +func translateGetxattrError(err Error) Error { + return err +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/bench/bench_test.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/bench/bench_test.go new file mode 100644 index 00000000..f7ca51a4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/bench/bench_test.go @@ -0,0 +1,267 @@ +package bench_test + +import ( + "io" + "io/ioutil" + "os" + "path" + "testing" + + "camlistore.org/third_party/bazil.org/fuse" + "camlistore.org/third_party/bazil.org/fuse/fs" + "camlistore.org/third_party/bazil.org/fuse/fs/fstestutil" +) + +type benchConfig struct { + directIO bool +} + +type benchFS struct { + conf *benchConfig +} + +var _ = fs.FS(benchFS{}) +var _ = fs.FSIniter(benchFS{}) + +func (benchFS) Init(req *fuse.InitRequest, resp *fuse.InitResponse, intr fs.Intr) fuse.Error { + resp.MaxReadahead = 64 * 1024 * 1024 + resp.Flags |= fuse.InitAsyncRead + return nil +} + +func (f benchFS) Root() (fs.Node, fuse.Error) { + return benchDir{conf: f.conf}, nil +} + +type benchDir struct { + conf *benchConfig +} + +var _ = fs.Node(benchDir{}) +var _ = fs.NodeStringLookuper(benchDir{}) +var _ = fs.Handle(benchDir{}) +var _ = fs.HandleReadDirer(benchDir{}) + +func (benchDir) Attr() fuse.Attr { + return fuse.Attr{Inode: 1, Mode: os.ModeDir | 0555} +} + +func (d benchDir) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + if name == "bench" { + return benchFile{conf: d.conf}, nil + } + return nil, fuse.ENOENT +} + +func (benchDir) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) { + l := []fuse.Dirent{ + {Inode: 2, Name: "bench", Type: fuse.DT_File}, + } + return l, nil +} + +type benchFile struct { + conf *benchConfig +} + +var _ = fs.Node(benchFile{}) +var _ = fs.NodeOpener(benchFile{}) +var _ = fs.NodeFsyncer(benchFile{}) +var _ = fs.Handle(benchFile{}) +var _ = fs.HandleReader(benchFile{}) +var _ = fs.HandleWriter(benchFile{}) + +func (benchFile) Attr() fuse.Attr { + return fuse.Attr{Inode: 2, Mode: 0644, Size: 9999999999999999} +} + +func (f benchFile) Open(req *fuse.OpenRequest, resp *fuse.OpenResponse, intr fs.Intr) (fs.Handle, fuse.Error) { + if f.conf.directIO { + resp.Flags |= fuse.OpenDirectIO + } + // TODO configurable? + resp.Flags |= fuse.OpenKeepCache + return f, nil +} + +func (benchFile) Read(req *fuse.ReadRequest, resp *fuse.ReadResponse, intr fs.Intr) fuse.Error { + resp.Data = resp.Data[:cap(resp.Data)] + return nil +} + +func (benchFile) Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr fs.Intr) fuse.Error { + resp.Size = len(req.Data) + return nil +} + +func (benchFile) Fsync(req *fuse.FsyncRequest, intr fs.Intr) fuse.Error { + return nil +} + +func benchmark(b *testing.B, fn func(b *testing.B, mnt string), conf *benchConfig) { + srv := &fs.Server{ + FS: benchFS{ + conf: conf, + }, + } + mnt, err := fstestutil.Mounted(srv) + if err != nil { + b.Fatal(err) + } + defer mnt.Close() + + fn(b, mnt.Dir) +} + +type zero struct{} + +func (zero) Read(p []byte) (n int, err error) { + return len(p), nil +} + +var Zero io.Reader = zero{} + +func doWrites(size int64) func(b *testing.B, mnt string) { + return func(b *testing.B, mnt string) { + p := path.Join(mnt, "bench") + + f, err := os.Create(p) + if err != nil { + b.Fatalf("create: %v", err) + } + defer f.Close() + + b.ResetTimer() + b.SetBytes(size) + + for i := 0; i < b.N; i++ { + _, err = io.CopyN(f, Zero, size) + if err != nil { + b.Fatalf("write: %v", err) + } + } + } +} + +func BenchmarkWrite100(b *testing.B) { + benchmark(b, doWrites(100), &benchConfig{}) +} + +func BenchmarkWrite10MB(b *testing.B) { + benchmark(b, doWrites(10*1024*1024), &benchConfig{}) +} + +func BenchmarkWrite100MB(b *testing.B) { + benchmark(b, doWrites(100*1024*1024), &benchConfig{}) +} + +func BenchmarkDirectWrite100(b *testing.B) { + benchmark(b, doWrites(100), &benchConfig{ + directIO: true, + }) +} + +func BenchmarkDirectWrite10MB(b *testing.B) { + benchmark(b, doWrites(10*1024*1024), &benchConfig{ + directIO: true, + }) +} + +func BenchmarkDirectWrite100MB(b *testing.B) { + benchmark(b, doWrites(100*1024*1024), &benchConfig{ + directIO: true, + }) +} + +func doWritesSync(size int64) func(b *testing.B, mnt string) { + return func(b *testing.B, mnt string) { + p := path.Join(mnt, "bench") + + f, err := os.Create(p) + if err != nil { + b.Fatalf("create: %v", err) + } + defer f.Close() + + b.ResetTimer() + b.SetBytes(size) + + for i := 0; i < b.N; i++ { + _, err = io.CopyN(f, Zero, size) + if err != nil { + b.Fatalf("write: %v", err) + } + + if err := f.Sync(); err != nil { + b.Fatalf("sync: %v", err) + } + } + } +} + +func BenchmarkWriteSync100(b *testing.B) { + benchmark(b, doWritesSync(100), &benchConfig{}) +} + +func BenchmarkWriteSync10MB(b *testing.B) { + benchmark(b, doWritesSync(10*1024*1024), &benchConfig{}) +} + +func BenchmarkWriteSync100MB(b *testing.B) { + benchmark(b, doWritesSync(100*1024*1024), &benchConfig{}) +} + +func doReads(size int64) func(b *testing.B, mnt string) { + return func(b *testing.B, mnt string) { + p := path.Join(mnt, "bench") + + f, err := os.Open(p) + if err != nil { + b.Fatalf("close: %v", err) + } + defer f.Close() + + b.ResetTimer() + b.SetBytes(size) + + for i := 0; i < b.N; i++ { + n, err := io.CopyN(ioutil.Discard, f, size) + if err != nil { + b.Fatalf("read: %v", err) + } + if n != size { + b.Errorf("unexpected size: %d != %d", n, size) + } + } + } +} + +func BenchmarkRead100(b *testing.B) { + benchmark(b, doReads(100), &benchConfig{}) +} + +func BenchmarkRead10MB(b *testing.B) { + benchmark(b, doReads(10*1024*1024), &benchConfig{}) +} + +func BenchmarkRead100MB(b *testing.B) { + benchmark(b, doReads(100*1024*1024), &benchConfig{}) +} + +func BenchmarkDirectRead100(b *testing.B) { + benchmark(b, doReads(100), &benchConfig{ + directIO: true, + }) +} + +func BenchmarkDirectRead10MB(b *testing.B) { + benchmark(b, doReads(10*1024*1024), &benchConfig{ + directIO: true, + }) +} + +func BenchmarkDirectRead100MB(b *testing.B) { + benchmark(b, doReads(100*1024*1024), &benchConfig{ + directIO: true, + }) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/bench/doc.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/bench/doc.go new file mode 100644 index 00000000..f6da48d9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/bench/doc.go @@ -0,0 +1,5 @@ +// Package bench contains benchmarks. +// +// It is kept in a separate package to avoid conflicting with the +// debug-heavy defaults for the actual tests. +package bench diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/debug.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/debug.go new file mode 100644 index 00000000..8014c7ce --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/debug.go @@ -0,0 +1,65 @@ +package fstestutil + +import ( + "flag" + "log" + "strconv" + + "camlistore.org/third_party/bazil.org/fuse" +) + +type flagDebug bool + +var debug flagDebug + +var _ = flag.Value(&debug) + +func (f *flagDebug) IsBoolFlag() bool { + return true +} + +func nop(msg interface{}) {} + +func (f *flagDebug) Set(s string) error { + v, err := strconv.ParseBool(s) + if err != nil { + return err + } + *f = flagDebug(v) + if v { + fuse.Debug = logMsg + } else { + fuse.Debug = nop + } + return nil +} + +func (f *flagDebug) String() string { + return strconv.FormatBool(bool(*f)) +} + +func logMsg(msg interface{}) { + log.Printf("FUSE: %s\n", msg) +} + +func init() { + flag.Var(&debug, "fuse.debug", "log FUSE processing details") +} + +// DebugByDefault changes the default of the `-fuse.debug` flag to +// true. +// +// This package registers a command line flag `-fuse.debug` and when +// run with that flag (and activated inside the tests), logs FUSE +// debug messages. +// +// This is disabled by default, as most callers probably won't care +// about FUSE details. Use DebugByDefault for tests where you'd +// normally be passing `-fuse.debug` all the time anyway. +// +// Call from an init function. +func DebugByDefault() { + f := flag.Lookup("fuse.debug") + f.DefValue = "true" + f.Value.Set(f.DefValue) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/mounted.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/mounted.go new file mode 100644 index 00000000..c00d49d9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/mounted.go @@ -0,0 +1,113 @@ +package fstestutil + +import ( + "errors" + "io/ioutil" + "log" + "os" + "testing" + "time" + + "camlistore.org/third_party/bazil.org/fuse" + "camlistore.org/third_party/bazil.org/fuse/fs" +) + +// Mount contains information about the mount for the test to use. +type Mount struct { + // Dir is the temporary directory where the filesystem is mounted. + Dir string + + Conn *fuse.Conn + + // Error will receive the return value of Serve. + Error <-chan error + + done <-chan struct{} + closed bool +} + +// Close unmounts the filesystem and waits for fs.Serve to return. Any +// returned error will be stored in Err. It is safe to call Close +// multiple times. +func (mnt *Mount) Close() { + if mnt.closed { + return + } + mnt.closed = true + for tries := 0; tries < 1000; tries++ { + err := fuse.Unmount(mnt.Dir) + if err != nil { + // TODO do more than log? + log.Printf("unmount error: %v", err) + time.Sleep(10 * time.Millisecond) + continue + } + break + } + <-mnt.done + mnt.Conn.Close() + os.Remove(mnt.Dir) +} + +// Mounted mounts the fuse.Server at a temporary directory. +// +// It also waits until the filesystem is known to be visible (OS X +// workaround). +// +// After successful return, caller must clean up by calling Close. +func Mounted(srv *fs.Server, options ...fuse.MountOption) (*Mount, error) { + dir, err := ioutil.TempDir("", "fusetest") + if err != nil { + return nil, err + } + c, err := fuse.Mount(dir, options...) + if err != nil { + return nil, err + } + + done := make(chan struct{}) + serveErr := make(chan error, 1) + mnt := &Mount{ + Dir: dir, + Conn: c, + Error: serveErr, + done: done, + } + go func() { + defer close(done) + serveErr <- srv.Serve(c) + }() + + select { + case <-mnt.Conn.Ready: + if mnt.Conn.MountError != nil { + return nil, err + } + return mnt, err + case err = <-mnt.Error: + // Serve quit early + if err != nil { + return nil, err + } + return nil, errors.New("Serve exited early") + } +} + +// MountedT mounts the filesystem at a temporary directory, +// directing it's debug log to the testing logger. +// +// See Mounted for usage. +// +// The debug log is not enabled by default. Use `-fuse.debug` or call +// DebugByDefault to enable. +func MountedT(t testing.TB, filesys fs.FS, options ...fuse.MountOption) (*Mount, error) { + srv := &fs.Server{ + FS: filesys, + } + if debug { + srv.Debug = func(msg interface{}) { + t.Logf("FUSE: %s", msg) + } + } + return Mounted(srv, options...) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/mountinfo.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/mountinfo.go new file mode 100644 index 00000000..4e410ebd --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/mountinfo.go @@ -0,0 +1,14 @@ +package fstestutil + +// MountInfo describes a mounted file system. +type MountInfo struct { + FSName string + Type string +} + +// GetMountInfo finds information about the mount at mnt. It is +// intended for use by tests only, and only fetches information +// relevant to the current tests. +func GetMountInfo(mnt string) (*MountInfo, error) { + return getMountInfo(mnt) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/mountinfo_darwin.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/mountinfo_darwin.go new file mode 100644 index 00000000..dc88b0b0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/mountinfo_darwin.go @@ -0,0 +1,41 @@ +package fstestutil + +import ( + "regexp" + "syscall" +) + +// cstr converts a nil-terminated C string into a Go string +func cstr(ca []int8) string { + s := make([]byte, 0, len(ca)) + for _, c := range ca { + if c == 0x00 { + break + } + s = append(s, byte(c)) + } + return string(s) +} + +var re = regexp.MustCompile(`\\(.)`) + +// unescape removes backslash-escaping. The escaped characters are not +// mapped in any way; that is, unescape(`\n` ) == `n`. +func unescape(s string) string { + return re.ReplaceAllString(s, `$1`) +} + +func getMountInfo(mnt string) (*MountInfo, error) { + var st syscall.Statfs_t + err := syscall.Statfs(mnt, &st) + if err != nil { + return nil, err + } + i := &MountInfo{ + // osx getmntent(3) fails to un-escape the data, so we do it.. + // this might lead to double-unescaping in the future. fun. + // TestMountOptionFSNameEvilBackslashDouble checks for that. + FSName: unescape(cstr(st.Mntfromname[:])), + } + return i, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/mountinfo_linux.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/mountinfo_linux.go new file mode 100644 index 00000000..44aff963 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/mountinfo_linux.go @@ -0,0 +1,57 @@ +package fstestutil + +import ( + "errors" + + "camlistore.org/third_party/github.com/artyom/mtab" +) + +// Inventory of mount information parsing packages out there: +// +// https://github.com/cratonica/gomounts +// +// Does it "right" by using getmntent(3), but that needs CGo which +// prevents cross-compiling easily. +// +// https://github.com/antage/mntent +// +// Does not handle escaping at all. +// +// https://github.com/deniswernert/go-fstab +// +// Does not handle escaping at all. Has trivial bugs like +// https://github.com/deniswernert/go-fstab/issues/1 +// +// http://godoc.org/github.com/docker/docker/pkg/mount +// +// Does not handle escaping at all. Part of an overly large source +// tree. +// +// https://github.com/artyom/mtab +// +// Does not split options. Otherwise seems to work. + +func findMount(mnt string) (*mtab.Entry, error) { + mounts, err := mtab.Entries("/proc/mounts") + if err != nil { + return nil, err + } + for _, m := range mounts { + if m.Dir == mnt { + return &m, nil + } + } + return nil, errors.New("mount not found") +} + +func getMountInfo(mnt string) (*MountInfo, error) { + m, err := findMount(mnt) + if err != nil { + return nil, err + } + i := &MountInfo{ + FSName: m.Fsname, + Type: m.Type, + } + return i, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/record/buffer.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/record/buffer.go new file mode 100644 index 00000000..2820459b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/record/buffer.go @@ -0,0 +1,28 @@ +package record + +import ( + "bytes" + "io" + "sync" +) + +// Buffer is like bytes.Buffer but safe to access from multiple +// goroutines. +type Buffer struct { + mu sync.Mutex + buf bytes.Buffer +} + +var _ = io.Writer(&Buffer{}) + +func (b *Buffer) Write(p []byte) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.buf.Write(p) +} + +func (b *Buffer) Bytes() []byte { + b.mu.Lock() + defer b.mu.Unlock() + return b.buf.Bytes() +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/record/record.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/record/record.go new file mode 100644 index 00000000..96bf8a37 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/record/record.go @@ -0,0 +1,381 @@ +package record + +import ( + "sync" + "sync/atomic" + + "camlistore.org/third_party/bazil.org/fuse" + "camlistore.org/third_party/bazil.org/fuse/fs" +) + +// Writes gathers data from FUSE Write calls. +type Writes struct { + buf Buffer +} + +var _ = fs.HandleWriter(&Writes{}) + +func (w *Writes) Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr fs.Intr) fuse.Error { + n, err := w.buf.Write(req.Data) + resp.Size = n + if err != nil { + // TODO hiding error + return fuse.EIO + } + return nil +} + +func (w *Writes) RecordedWriteData() []byte { + return w.buf.Bytes() +} + +// Counter records number of times a thing has occurred. +type Counter struct { + count uint32 +} + +func (r *Counter) Inc() { + atomic.StoreUint32(&r.count, 1) +} + +func (r *Counter) Count() uint32 { + return atomic.LoadUint32(&r.count) +} + +// MarkRecorder records whether a thing has occurred. +type MarkRecorder struct { + count Counter +} + +func (r *MarkRecorder) Mark() { + r.count.Inc() +} + +func (r *MarkRecorder) Recorded() bool { + return r.count.Count() > 0 +} + +// Flushes notes whether a FUSE Flush call has been seen. +type Flushes struct { + rec MarkRecorder +} + +var _ = fs.HandleFlusher(&Flushes{}) + +func (r *Flushes) Flush(req *fuse.FlushRequest, intr fs.Intr) fuse.Error { + r.rec.Mark() + return nil +} + +func (r *Flushes) RecordedFlush() bool { + return r.rec.Recorded() +} + +type Recorder struct { + mu sync.Mutex + val interface{} +} + +// Record that we've seen value. A nil value is indistinguishable from +// no value recorded. +func (r *Recorder) Record(value interface{}) { + r.mu.Lock() + r.val = value + r.mu.Unlock() +} + +func (r *Recorder) Recorded() interface{} { + r.mu.Lock() + val := r.val + r.mu.Unlock() + return val +} + +type RequestRecorder struct { + rec Recorder +} + +// Record a fuse.Request, after zeroing header fields that are hard to +// reproduce. +// +// Make sure to record a copy, not the original request. +func (r *RequestRecorder) RecordRequest(req fuse.Request) { + hdr := req.Hdr() + *hdr = fuse.Header{} + r.rec.Record(req) +} + +func (r *RequestRecorder) Recorded() fuse.Request { + val := r.rec.Recorded() + if val == nil { + return nil + } + return val.(fuse.Request) +} + +// Setattrs records a Setattr request and its fields. +type Setattrs struct { + rec RequestRecorder +} + +var _ = fs.NodeSetattrer(&Setattrs{}) + +func (r *Setattrs) Setattr(req *fuse.SetattrRequest, resp *fuse.SetattrResponse, intr fs.Intr) fuse.Error { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil +} + +func (r *Setattrs) RecordedSetattr() fuse.SetattrRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.SetattrRequest{} + } + return *(val.(*fuse.SetattrRequest)) +} + +// Fsyncs records an Fsync request and its fields. +type Fsyncs struct { + rec RequestRecorder +} + +var _ = fs.NodeFsyncer(&Fsyncs{}) + +func (r *Fsyncs) Fsync(req *fuse.FsyncRequest, intr fs.Intr) fuse.Error { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil +} + +func (r *Fsyncs) RecordedFsync() fuse.FsyncRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.FsyncRequest{} + } + return *(val.(*fuse.FsyncRequest)) +} + +// Mkdirs records a Mkdir request and its fields. +type Mkdirs struct { + rec RequestRecorder +} + +var _ = fs.NodeMkdirer(&Mkdirs{}) + +// Mkdir records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Mkdirs) Mkdir(req *fuse.MkdirRequest, intr fs.Intr) (fs.Node, fuse.Error) { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil, fuse.EIO +} + +// RecordedMkdir returns information about the Mkdir request. +// If no request was seen, returns a zero value. +func (r *Mkdirs) RecordedMkdir() fuse.MkdirRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.MkdirRequest{} + } + return *(val.(*fuse.MkdirRequest)) +} + +// Symlinks records a Symlink request and its fields. +type Symlinks struct { + rec RequestRecorder +} + +var _ = fs.NodeSymlinker(&Symlinks{}) + +// Symlink records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Symlinks) Symlink(req *fuse.SymlinkRequest, intr fs.Intr) (fs.Node, fuse.Error) { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil, fuse.EIO +} + +// RecordedSymlink returns information about the Symlink request. +// If no request was seen, returns a zero value. +func (r *Symlinks) RecordedSymlink() fuse.SymlinkRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.SymlinkRequest{} + } + return *(val.(*fuse.SymlinkRequest)) +} + +// Links records a Link request and its fields. +type Links struct { + rec RequestRecorder +} + +var _ = fs.NodeLinker(&Links{}) + +// Link records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Links) Link(req *fuse.LinkRequest, old fs.Node, intr fs.Intr) (fs.Node, fuse.Error) { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil, fuse.EIO +} + +// RecordedLink returns information about the Link request. +// If no request was seen, returns a zero value. +func (r *Links) RecordedLink() fuse.LinkRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.LinkRequest{} + } + return *(val.(*fuse.LinkRequest)) +} + +// Mknods records a Mknod request and its fields. +type Mknods struct { + rec RequestRecorder +} + +var _ = fs.NodeMknoder(&Mknods{}) + +// Mknod records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Mknods) Mknod(req *fuse.MknodRequest, intr fs.Intr) (fs.Node, fuse.Error) { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil, fuse.EIO +} + +// RecordedMknod returns information about the Mknod request. +// If no request was seen, returns a zero value. +func (r *Mknods) RecordedMknod() fuse.MknodRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.MknodRequest{} + } + return *(val.(*fuse.MknodRequest)) +} + +// Opens records a Open request and its fields. +type Opens struct { + rec RequestRecorder +} + +var _ = fs.NodeOpener(&Opens{}) + +// Open records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Opens) Open(req *fuse.OpenRequest, resp *fuse.OpenResponse, intr fs.Intr) (fs.Handle, fuse.Error) { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil, fuse.EIO +} + +// RecordedOpen returns information about the Open request. +// If no request was seen, returns a zero value. +func (r *Opens) RecordedOpen() fuse.OpenRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.OpenRequest{} + } + return *(val.(*fuse.OpenRequest)) +} + +// Getxattrs records a Getxattr request and its fields. +type Getxattrs struct { + rec RequestRecorder +} + +var _ = fs.NodeGetxattrer(&Getxattrs{}) + +// Getxattr records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Getxattrs) Getxattr(req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse, intr fs.Intr) fuse.Error { + tmp := *req + r.rec.RecordRequest(&tmp) + return fuse.ENODATA +} + +// RecordedGetxattr returns information about the Getxattr request. +// If no request was seen, returns a zero value. +func (r *Getxattrs) RecordedGetxattr() fuse.GetxattrRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.GetxattrRequest{} + } + return *(val.(*fuse.GetxattrRequest)) +} + +// Listxattrs records a Listxattr request and its fields. +type Listxattrs struct { + rec RequestRecorder +} + +var _ = fs.NodeListxattrer(&Listxattrs{}) + +// Listxattr records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Listxattrs) Listxattr(req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse, intr fs.Intr) fuse.Error { + tmp := *req + r.rec.RecordRequest(&tmp) + return fuse.ENODATA +} + +// RecordedListxattr returns information about the Listxattr request. +// If no request was seen, returns a zero value. +func (r *Listxattrs) RecordedListxattr() fuse.ListxattrRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.ListxattrRequest{} + } + return *(val.(*fuse.ListxattrRequest)) +} + +// Setxattrs records a Setxattr request and its fields. +type Setxattrs struct { + rec RequestRecorder +} + +var _ = fs.NodeSetxattrer(&Setxattrs{}) + +// Setxattr records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Setxattrs) Setxattr(req *fuse.SetxattrRequest, intr fs.Intr) fuse.Error { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil +} + +// RecordedSetxattr returns information about the Setxattr request. +// If no request was seen, returns a zero value. +func (r *Setxattrs) RecordedSetxattr() fuse.SetxattrRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.SetxattrRequest{} + } + return *(val.(*fuse.SetxattrRequest)) +} + +// Removexattrs records a Removexattr request and its fields. +type Removexattrs struct { + rec RequestRecorder +} + +var _ = fs.NodeRemovexattrer(&Removexattrs{}) + +// Removexattr records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Removexattrs) Removexattr(req *fuse.RemovexattrRequest, intr fs.Intr) fuse.Error { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil +} + +// RecordedRemovexattr returns information about the Removexattr request. +// If no request was seen, returns a zero value. +func (r *Removexattrs) RecordedRemovexattr() fuse.RemovexattrRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.RemovexattrRequest{} + } + return *(val.(*fuse.RemovexattrRequest)) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/record/wait.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/record/wait.go new file mode 100644 index 00000000..040a91ae --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/record/wait.go @@ -0,0 +1,54 @@ +package record + +import ( + "sync" + "time" + + "camlistore.org/third_party/bazil.org/fuse" + "camlistore.org/third_party/bazil.org/fuse/fs" +) + +type nothing struct{} + +// ReleaseWaiter notes whether a FUSE Release call has been seen. +// +// Releases are not guaranteed to happen synchronously with any client +// call, so they must be waited for. +type ReleaseWaiter struct { + once sync.Once + seen chan nothing +} + +var _ = fs.HandleReleaser(&ReleaseWaiter{}) + +func (r *ReleaseWaiter) init() { + r.once.Do(func() { + r.seen = make(chan nothing, 1) + }) +} + +func (r *ReleaseWaiter) Release(req *fuse.ReleaseRequest, intr fs.Intr) fuse.Error { + r.init() + close(r.seen) + return nil +} + +// WaitForRelease waits for Release to be called. +// +// With zero duration, wait forever. Otherwise, timeout early +// in a more controller way than `-test.timeout`. +// +// Returns whether a Release was seen. Always true if dur==0. +func (r *ReleaseWaiter) WaitForRelease(dur time.Duration) bool { + r.init() + var timeout <-chan time.Time + if dur > 0 { + timeout = time.After(dur) + } + select { + case <-r.seen: + return true + case <-timeout: + return false + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/testfs.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/testfs.go new file mode 100644 index 00000000..6081cfe2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/fstestutil/testfs.go @@ -0,0 +1,29 @@ +package fstestutil + +import ( + "os" + + "camlistore.org/third_party/bazil.org/fuse" + "camlistore.org/third_party/bazil.org/fuse/fs" +) + +// SimpleFS is a trivial FS that just implements the Root method. +type SimpleFS struct { + Node fs.Node +} + +var _ = fs.FS(SimpleFS{}) + +func (f SimpleFS) Root() (fs.Node, fuse.Error) { + return f.Node, nil +} + +// File can be embedded in a struct to make it look like a file. +type File struct{} + +func (f File) Attr() fuse.Attr { return fuse.Attr{Mode: 0666} } + +// Dir can be embedded in a struct to make it look like a directory. +type Dir struct{} + +func (f Dir) Attr() fuse.Attr { return fuse.Attr{Mode: os.ModeDir | 0777} } diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/serve.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/serve.go new file mode 100644 index 00000000..b30455fc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/serve.go @@ -0,0 +1,1317 @@ +// FUSE service loop, for servers that wish to use it. + +package fs + +import ( + "encoding/binary" + "fmt" + "hash/fnv" + "io" + "reflect" + "strings" + "sync" + "time" +) + +import ( + "camlistore.org/third_party/bazil.org/fuse" + "camlistore.org/third_party/bazil.org/fuse/fuseutil" +) + +const ( + attrValidTime = 1 * time.Minute + entryValidTime = 1 * time.Minute +) + +// TODO: FINISH DOCS + +// An Intr is a channel that signals that a request has been interrupted. +// Being able to receive from the channel means the request has been +// interrupted. +type Intr chan struct{} + +func (Intr) String() string { return "fuse.Intr" } + +// An FS is the interface required of a file system. +// +// Other FUSE requests can be handled by implementing methods from the +// FS* interfaces, for example FSIniter. +type FS interface { + // Root is called to obtain the Node for the file system root. + Root() (Node, fuse.Error) +} + +type FSIniter interface { + // Init is called to initialize the FUSE connection. + // It can inspect the request and adjust the response as desired. + // Init must return promptly. + Init(req *fuse.InitRequest, resp *fuse.InitResponse, intr Intr) fuse.Error +} + +type FSStatfser interface { + // Statfs is called to obtain file system metadata. + // It should write that data to resp. + Statfs(req *fuse.StatfsRequest, resp *fuse.StatfsResponse, intr Intr) fuse.Error +} + +type FSDestroyer interface { + // Destroy is called when the file system is shutting down. + // + // Linux only sends this request for block device backed (fuseblk) + // filesystems, to allow them to flush writes to disk before the + // unmount completes. + // + // On normal FUSE filesystems, use Forget of the root Node to + // do actions at unmount time. + Destroy() +} + +type FSInodeGenerator interface { + // GenerateInode is called to pick a dynamic inode number when it + // would otherwise be 0. + // + // Not all filesystems bother tracking inodes, but FUSE requires + // the inode to be set, and fewer duplicates in general makes UNIX + // tools work better. + // + // Operations where the nodes may return 0 inodes include Getattr, + // Setattr and ReadDir. + // + // If FS does not implement FSInodeGenerator, GenerateDynamicInode + // is used. + // + // Implementing this is useful to e.g. constrain the range of + // inode values used for dynamic inodes. + GenerateInode(parentInode uint64, name string) uint64 +} + +// A Node is the interface required of a file or directory. +// See the documentation for type FS for general information +// pertaining to all methods. +// +// Other FUSE requests can be handled by implementing methods from the +// Node* interfaces, for example NodeOpener. +type Node interface { + Attr() fuse.Attr +} + +type NodeGetattrer interface { + // Getattr obtains the standard metadata for the receiver. + // It should store that metadata in resp. + // + // If this method is not implemented, the attributes will be + // generated based on Attr(), with zero values filled in. + Getattr(req *fuse.GetattrRequest, resp *fuse.GetattrResponse, intr Intr) fuse.Error +} + +type NodeSetattrer interface { + // Setattr sets the standard metadata for the receiver. + Setattr(req *fuse.SetattrRequest, resp *fuse.SetattrResponse, intr Intr) fuse.Error +} + +type NodeSymlinker interface { + // Symlink creates a new symbolic link in the receiver, which must be a directory. + // + // TODO is the above true about directories? + Symlink(req *fuse.SymlinkRequest, intr Intr) (Node, fuse.Error) +} + +// This optional request will be called only for symbolic link nodes. +type NodeReadlinker interface { + // Readlink reads a symbolic link. + Readlink(req *fuse.ReadlinkRequest, intr Intr) (string, fuse.Error) +} + +type NodeLinker interface { + // Link creates a new directory entry in the receiver based on an + // existing Node. Receiver must be a directory. + Link(req *fuse.LinkRequest, old Node, intr Intr) (Node, fuse.Error) +} + +type NodeRemover interface { + // Remove removes the entry with the given name from + // the receiver, which must be a directory. The entry to be removed + // may correspond to a file (unlink) or to a directory (rmdir). + Remove(req *fuse.RemoveRequest, intr Intr) fuse.Error +} + +type NodeAccesser interface { + // Access checks whether the calling context has permission for + // the given operations on the receiver. If so, Access should + // return nil. If not, Access should return EPERM. + // + // Note that this call affects the result of the access(2) system + // call but not the open(2) system call. If Access is not + // implemented, the Node behaves as if it always returns nil + // (permission granted), relying on checks in Open instead. + Access(req *fuse.AccessRequest, intr Intr) fuse.Error +} + +type NodeStringLookuper interface { + // Lookup looks up a specific entry in the receiver, + // which must be a directory. Lookup should return a Node + // corresponding to the entry. If the name does not exist in + // the directory, Lookup should return nil, err. + // + // Lookup need not to handle the names "." and "..". + Lookup(name string, intr Intr) (Node, fuse.Error) +} + +type NodeRequestLookuper interface { + // Lookup looks up a specific entry in the receiver. + // See NodeStringLookuper for more. + Lookup(req *fuse.LookupRequest, resp *fuse.LookupResponse, intr Intr) (Node, fuse.Error) +} + +type NodeMkdirer interface { + Mkdir(req *fuse.MkdirRequest, intr Intr) (Node, fuse.Error) +} + +type NodeOpener interface { + // Open opens the receiver. + // XXX note about access. XXX OpenFlags. + // XXX note that the Node may be a file or directory. + Open(req *fuse.OpenRequest, resp *fuse.OpenResponse, intr Intr) (Handle, fuse.Error) +} + +type NodeCreater interface { + // Create creates a new directory entry in the receiver, which + // must be a directory. + Create(req *fuse.CreateRequest, resp *fuse.CreateResponse, intr Intr) (Node, Handle, fuse.Error) +} + +type NodeForgetter interface { + Forget() +} + +type NodeRenamer interface { + Rename(req *fuse.RenameRequest, newDir Node, intr Intr) fuse.Error +} + +type NodeMknoder interface { + Mknod(req *fuse.MknodRequest, intr Intr) (Node, fuse.Error) +} + +// TODO this should be on Handle not Node +type NodeFsyncer interface { + Fsync(req *fuse.FsyncRequest, intr Intr) fuse.Error +} + +type NodeGetxattrer interface { + // Getxattr gets an extended attribute by the given name from the + // node. + // + // If there is no xattr by that name, returns fuse.ENODATA. This + // will be translated to the platform-specific correct error code + // by the framework. + Getxattr(req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse, intr Intr) fuse.Error +} + +type NodeListxattrer interface { + // Listxattr lists the extended attributes recorded for the node. + Listxattr(req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse, intr Intr) fuse.Error +} + +type NodeSetxattrer interface { + // Setxattr sets an extended attribute with the given name and + // value for the node. + Setxattr(req *fuse.SetxattrRequest, intr Intr) fuse.Error +} + +type NodeRemovexattrer interface { + // Removexattr removes an extended attribute for the name. + // + // If there is no xattr by that name, returns fuse.ENODATA. This + // will be translated to the platform-specific correct error code + // by the framework. + Removexattr(req *fuse.RemovexattrRequest, intr Intr) fuse.Error +} + +var startTime = time.Now() + +func nodeAttr(n Node) (attr fuse.Attr) { + attr = n.Attr() + if attr.Nlink == 0 { + attr.Nlink = 1 + } + if attr.Atime.IsZero() { + attr.Atime = startTime + } + if attr.Mtime.IsZero() { + attr.Mtime = startTime + } + if attr.Ctime.IsZero() { + attr.Ctime = startTime + } + if attr.Crtime.IsZero() { + attr.Crtime = startTime + } + return +} + +// A Handle is the interface required of an opened file or directory. +// See the documentation for type FS for general information +// pertaining to all methods. +// +// Other FUSE requests can be handled by implementing methods from the +// Node* interfaces. The most common to implement are +// HandleReader, HandleReadDirer, and HandleWriter. +// +// TODO implement methods: Getlk, Setlk, Setlkw +type Handle interface { +} + +type HandleFlusher interface { + // Flush is called each time the file or directory is closed. + // Because there can be multiple file descriptors referring to a + // single opened file, Flush can be called multiple times. + Flush(req *fuse.FlushRequest, intr Intr) fuse.Error +} + +type HandleReadAller interface { + ReadAll(intr Intr) ([]byte, fuse.Error) +} + +type HandleReadDirer interface { + ReadDir(intrt Intr) ([]fuse.Dirent, fuse.Error) +} + +type HandleReader interface { + Read(req *fuse.ReadRequest, resp *fuse.ReadResponse, intr Intr) fuse.Error +} + +type HandleWriter interface { + Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr Intr) fuse.Error +} + +type HandleReleaser interface { + Release(req *fuse.ReleaseRequest, intr Intr) fuse.Error +} + +type Server struct { + FS FS + + // Function to send debug log messages to. If nil, use fuse.Debug. + // Note that changing this or fuse.Debug may not affect existing + // calls to Serve. + // + // See fuse.Debug for the rules that log functions must follow. + Debug func(msg interface{}) +} + +// Serve serves the FUSE connection by making calls to the methods +// of fs and the Nodes and Handles it makes available. It returns only +// when the connection has been closed or an unexpected error occurs. +func (s *Server) Serve(c *fuse.Conn) error { + sc := serveConn{ + fs: s.FS, + debug: s.Debug, + req: map[fuse.RequestID]*serveRequest{}, + dynamicInode: GenerateDynamicInode, + } + if sc.debug == nil { + sc.debug = fuse.Debug + } + if dyn, ok := sc.fs.(FSInodeGenerator); ok { + sc.dynamicInode = dyn.GenerateInode + } + + root, err := sc.fs.Root() + if err != nil { + return fmt.Errorf("cannot obtain root node: %v", err) + } + sc.node = append(sc.node, nil, &serveNode{inode: 1, node: root, refs: 1}) + sc.handle = append(sc.handle, nil) + + for { + req, err := c.ReadRequest() + if err != nil { + if err == io.EOF { + break + } + return err + } + + go sc.serve(req) + } + return nil +} + +// Serve serves a FUSE connection with the default settings. See +// Server.Serve. +func Serve(c *fuse.Conn, fs FS) error { + server := Server{ + FS: fs, + } + return server.Serve(c) +} + +type nothing struct{} + +type serveConn struct { + meta sync.Mutex + fs FS + req map[fuse.RequestID]*serveRequest + node []*serveNode + handle []*serveHandle + freeNode []fuse.NodeID + freeHandle []fuse.HandleID + nodeGen uint64 + debug func(msg interface{}) + dynamicInode func(parent uint64, name string) uint64 +} + +type serveRequest struct { + Request fuse.Request + Intr Intr +} + +type serveNode struct { + inode uint64 + node Node + refs uint64 +} + +func (sn *serveNode) attr() (attr fuse.Attr) { + attr = nodeAttr(sn.node) + if attr.Inode == 0 { + attr.Inode = sn.inode + } + return +} + +type serveHandle struct { + handle Handle + readData []byte + nodeID fuse.NodeID +} + +// NodeRef can be embedded in a Node to recognize the same Node being +// returned from multiple Lookup, Create etc calls. +// +// Without this, each Node will get a new NodeID, causing spurious +// cache invalidations, extra lookups and aliasing anomalies. This may +// not matter for a simple, read-only filesystem. +type NodeRef struct { + id fuse.NodeID + generation uint64 +} + +// nodeRef is only ever accessed while holding serveConn.meta +func (n *NodeRef) nodeRef() *NodeRef { + return n +} + +type nodeRef interface { + nodeRef() *NodeRef +} + +func (c *serveConn) saveNode(inode uint64, node Node) (id fuse.NodeID, gen uint64) { + c.meta.Lock() + defer c.meta.Unlock() + + var ref *NodeRef + if nodeRef, ok := node.(nodeRef); ok { + ref = nodeRef.nodeRef() + + if ref.id != 0 { + // dropNode guarantees that NodeRef is zeroed at the same + // time as the NodeID is removed from serveConn.node, as + // guarded by c.meta; this means sn cannot be nil here + sn := c.node[ref.id] + sn.refs++ + return ref.id, ref.generation + } + } + + sn := &serveNode{inode: inode, node: node, refs: 1} + if n := len(c.freeNode); n > 0 { + id = c.freeNode[n-1] + c.freeNode = c.freeNode[:n-1] + c.node[id] = sn + c.nodeGen++ + } else { + id = fuse.NodeID(len(c.node)) + c.node = append(c.node, sn) + } + gen = c.nodeGen + if ref != nil { + ref.id = id + ref.generation = gen + } + return +} + +func (c *serveConn) saveHandle(handle Handle, nodeID fuse.NodeID) (id fuse.HandleID) { + c.meta.Lock() + shandle := &serveHandle{handle: handle, nodeID: nodeID} + if n := len(c.freeHandle); n > 0 { + id = c.freeHandle[n-1] + c.freeHandle = c.freeHandle[:n-1] + c.handle[id] = shandle + } else { + id = fuse.HandleID(len(c.handle)) + c.handle = append(c.handle, shandle) + } + c.meta.Unlock() + return +} + +type nodeRefcountDropBug struct { + N uint64 + Refs uint64 + Node fuse.NodeID +} + +func (n *nodeRefcountDropBug) String() string { + return fmt.Sprintf("bug: trying to drop %d of %d references to %v", n.N, n.Refs, n.Node) +} + +func (c *serveConn) dropNode(id fuse.NodeID, n uint64) (forget bool) { + c.meta.Lock() + defer c.meta.Unlock() + snode := c.node[id] + + if snode == nil { + // this should only happen if refcounts kernel<->us disagree + // *and* two ForgetRequests for the same node race each other; + // this indicates a bug somewhere + c.debug(nodeRefcountDropBug{N: n, Node: id}) + + // we may end up triggering Forget twice, but that's better + // than not even once, and that's the best we can do + return true + } + + if n > snode.refs { + c.debug(nodeRefcountDropBug{N: n, Refs: snode.refs, Node: id}) + n = snode.refs + } + + snode.refs -= n + if snode.refs == 0 { + c.node[id] = nil + if nodeRef, ok := snode.node.(nodeRef); ok { + ref := nodeRef.nodeRef() + *ref = NodeRef{} + } + c.freeNode = append(c.freeNode, id) + return true + } + return false +} + +func (c *serveConn) dropHandle(id fuse.HandleID) { + c.meta.Lock() + c.handle[id] = nil + c.freeHandle = append(c.freeHandle, id) + c.meta.Unlock() +} + +type missingHandle struct { + Handle fuse.HandleID + MaxHandle fuse.HandleID +} + +func (m missingHandle) String() string { + return fmt.Sprint("missing handle", m.Handle, m.MaxHandle) +} + +// Returns nil for invalid handles. +func (c *serveConn) getHandle(id fuse.HandleID) (shandle *serveHandle) { + c.meta.Lock() + defer c.meta.Unlock() + if id < fuse.HandleID(len(c.handle)) { + shandle = c.handle[uint(id)] + } + if shandle == nil { + c.debug(missingHandle{ + Handle: id, + MaxHandle: fuse.HandleID(len(c.handle)), + }) + } + return +} + +type request struct { + Op string + Request *fuse.Header + In interface{} `json:",omitempty"` +} + +func (r request) String() string { + return fmt.Sprintf("<- %s", r.In) +} + +type logResponseHeader struct { + ID fuse.RequestID +} + +func (m logResponseHeader) String() string { + return fmt.Sprintf("ID=%#x", m.ID) +} + +type response struct { + Op string + Request logResponseHeader + Out interface{} `json:",omitempty"` + // Errno contains the errno value as a string, for example "EPERM". + Errno string `json:",omitempty"` + // Error may contain a free form error message. + Error string `json:",omitempty"` +} + +func (r response) errstr() string { + s := r.Errno + if r.Error != "" { + // prefix the errno constant to the long form message + s = s + ": " + r.Error + } + return s +} + +func (r response) String() string { + switch { + case r.Errno != "" && r.Out != nil: + return fmt.Sprintf("-> %s error=%s %s", r.Request, r.errstr(), r.Out) + case r.Errno != "": + return fmt.Sprintf("-> %s error=%s", r.Request, r.errstr()) + case r.Out != nil: + // make sure (seemingly) empty values are readable + switch r.Out.(type) { + case string: + return fmt.Sprintf("-> %s %q", r.Request, r.Out) + case []byte: + return fmt.Sprintf("-> %s [% x]", r.Request, r.Out) + default: + return fmt.Sprintf("-> %s %s", r.Request, r.Out) + } + default: + return fmt.Sprintf("-> %s", r.Request) + } +} + +type logMissingNode struct { + MaxNode fuse.NodeID +} + +func opName(req fuse.Request) string { + t := reflect.Indirect(reflect.ValueOf(req)).Type() + s := t.Name() + s = strings.TrimSuffix(s, "Request") + return s +} + +type logLinkRequestOldNodeNotFound struct { + Request *fuse.Header + In *fuse.LinkRequest +} + +func (m *logLinkRequestOldNodeNotFound) String() string { + return fmt.Sprintf("In LinkRequest (request %#x), node %d not found", m.Request.Hdr().ID, m.In.OldNode) +} + +type renameNewDirNodeNotFound struct { + Request *fuse.Header + In *fuse.RenameRequest +} + +func (m *renameNewDirNodeNotFound) String() string { + return fmt.Sprintf("In RenameRequest (request %#x), node %d not found", m.Request.Hdr().ID, m.In.NewDir) +} + +func (c *serveConn) serve(r fuse.Request) { + intr := make(Intr) + req := &serveRequest{Request: r, Intr: intr} + + c.debug(request{ + Op: opName(r), + Request: r.Hdr(), + In: r, + }) + var node Node + var snode *serveNode + c.meta.Lock() + hdr := r.Hdr() + if id := hdr.Node; id != 0 { + if id < fuse.NodeID(len(c.node)) { + snode = c.node[uint(id)] + } + if snode == nil { + c.meta.Unlock() + c.debug(response{ + Op: opName(r), + Request: logResponseHeader{ID: hdr.ID}, + Error: fuse.ESTALE.ErrnoName(), + // this is the only place that sets both Error and + // Out; not sure if i want to do that; might get rid + // of len(c.node) things altogether + Out: logMissingNode{ + MaxNode: fuse.NodeID(len(c.node)), + }, + }) + r.RespondError(fuse.ESTALE) + return + } + node = snode.node + } + if c.req[hdr.ID] != nil { + // This happens with OSXFUSE. Assume it's okay and + // that we'll never see an interrupt for this one. + // Otherwise everything wedges. TODO: Report to OSXFUSE? + // + // TODO this might have been because of missing done() calls + intr = nil + } else { + c.req[hdr.ID] = req + } + c.meta.Unlock() + + // Call this before responding. + // After responding is too late: we might get another request + // with the same ID and be very confused. + done := func(resp interface{}) { + msg := response{ + Op: opName(r), + Request: logResponseHeader{ID: hdr.ID}, + } + if err, ok := resp.(error); ok { + msg.Error = err.Error() + if ferr, ok := err.(fuse.ErrorNumber); ok { + errno := ferr.Errno() + msg.Errno = errno.ErrnoName() + if errno == err { + // it's just a fuse.Errno with no extra detail; + // skip the textual message for log readability + msg.Error = "" + } + } else { + msg.Errno = fuse.DefaultErrno.ErrnoName() + } + } else { + msg.Out = resp + } + c.debug(msg) + + c.meta.Lock() + delete(c.req, hdr.ID) + c.meta.Unlock() + } + + switch r := r.(type) { + default: + // Note: To FUSE, ENOSYS means "this server never implements this request." + // It would be inappropriate to return ENOSYS for other operations in this + // switch that might only be unavailable in some contexts, not all. + done(fuse.ENOSYS) + r.RespondError(fuse.ENOSYS) + + // FS operations. + case *fuse.InitRequest: + s := &fuse.InitResponse{ + MaxWrite: 128 * 1024, + Flags: fuse.InitBigWrites, + } + if fs, ok := c.fs.(FSIniter); ok { + if err := fs.Init(r, s, intr); err != nil { + done(err) + r.RespondError(err) + break + } + } + done(s) + r.Respond(s) + + case *fuse.StatfsRequest: + s := &fuse.StatfsResponse{} + if fs, ok := c.fs.(FSStatfser); ok { + if err := fs.Statfs(r, s, intr); err != nil { + done(err) + r.RespondError(err) + break + } + } + done(s) + r.Respond(s) + + // Node operations. + case *fuse.GetattrRequest: + s := &fuse.GetattrResponse{} + if n, ok := node.(NodeGetattrer); ok { + if err := n.Getattr(r, s, intr); err != nil { + done(err) + r.RespondError(err) + break + } + } else { + s.AttrValid = attrValidTime + s.Attr = snode.attr() + } + done(s) + r.Respond(s) + + case *fuse.SetattrRequest: + s := &fuse.SetattrResponse{} + if n, ok := node.(NodeSetattrer); ok { + if err := n.Setattr(r, s, intr); err != nil { + done(err) + r.RespondError(err) + break + } + done(s) + r.Respond(s) + break + } + + if s.AttrValid == 0 { + s.AttrValid = attrValidTime + } + s.Attr = snode.attr() + done(s) + r.Respond(s) + + case *fuse.SymlinkRequest: + s := &fuse.SymlinkResponse{} + n, ok := node.(NodeSymlinker) + if !ok { + done(fuse.EIO) // XXX or EPERM like Mkdir? + r.RespondError(fuse.EIO) + break + } + n2, err := n.Symlink(r, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + c.saveLookup(&s.LookupResponse, snode, r.NewName, n2) + done(s) + r.Respond(s) + + case *fuse.ReadlinkRequest: + n, ok := node.(NodeReadlinker) + if !ok { + done(fuse.EIO) /// XXX or EPERM? + r.RespondError(fuse.EIO) + break + } + target, err := n.Readlink(r, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + done(target) + r.Respond(target) + + case *fuse.LinkRequest: + n, ok := node.(NodeLinker) + if !ok { + done(fuse.EIO) /// XXX or EPERM? + r.RespondError(fuse.EIO) + break + } + c.meta.Lock() + var oldNode *serveNode + if int(r.OldNode) < len(c.node) { + oldNode = c.node[r.OldNode] + } + c.meta.Unlock() + if oldNode == nil { + c.debug(logLinkRequestOldNodeNotFound{ + Request: r.Hdr(), + In: r, + }) + done(fuse.EIO) + r.RespondError(fuse.EIO) + break + } + n2, err := n.Link(r, oldNode.node, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + s := &fuse.LookupResponse{} + c.saveLookup(s, snode, r.NewName, n2) + done(s) + r.Respond(s) + + case *fuse.RemoveRequest: + n, ok := node.(NodeRemover) + if !ok { + done(fuse.EIO) /// XXX or EPERM? + r.RespondError(fuse.EIO) + break + } + err := n.Remove(r, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + done(nil) + r.Respond() + + case *fuse.AccessRequest: + if n, ok := node.(NodeAccesser); ok { + if err := n.Access(r, intr); err != nil { + done(err) + r.RespondError(err) + break + } + } + done(nil) + r.Respond() + + case *fuse.LookupRequest: + var n2 Node + var err fuse.Error + s := &fuse.LookupResponse{} + if n, ok := node.(NodeStringLookuper); ok { + n2, err = n.Lookup(r.Name, intr) + } else if n, ok := node.(NodeRequestLookuper); ok { + n2, err = n.Lookup(r, s, intr) + } else { + done(fuse.ENOENT) + r.RespondError(fuse.ENOENT) + break + } + if err != nil { + done(err) + r.RespondError(err) + break + } + c.saveLookup(s, snode, r.Name, n2) + done(s) + r.Respond(s) + + case *fuse.MkdirRequest: + s := &fuse.MkdirResponse{} + n, ok := node.(NodeMkdirer) + if !ok { + done(fuse.EPERM) + r.RespondError(fuse.EPERM) + break + } + n2, err := n.Mkdir(r, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + c.saveLookup(&s.LookupResponse, snode, r.Name, n2) + done(s) + r.Respond(s) + + case *fuse.OpenRequest: + s := &fuse.OpenResponse{} + var h2 Handle + if n, ok := node.(NodeOpener); ok { + hh, err := n.Open(r, s, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + h2 = hh + } else { + h2 = node + } + s.Handle = c.saveHandle(h2, hdr.Node) + done(s) + r.Respond(s) + + case *fuse.CreateRequest: + n, ok := node.(NodeCreater) + if !ok { + // If we send back ENOSYS, FUSE will try mknod+open. + done(fuse.EPERM) + r.RespondError(fuse.EPERM) + break + } + s := &fuse.CreateResponse{OpenResponse: fuse.OpenResponse{}} + n2, h2, err := n.Create(r, s, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + c.saveLookup(&s.LookupResponse, snode, r.Name, n2) + s.Handle = c.saveHandle(h2, hdr.Node) + done(s) + r.Respond(s) + + case *fuse.GetxattrRequest: + n, ok := node.(NodeGetxattrer) + if !ok { + done(fuse.ENOTSUP) + r.RespondError(fuse.ENOTSUP) + break + } + s := &fuse.GetxattrResponse{} + err := n.Getxattr(r, s, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + if r.Size != 0 && uint64(len(s.Xattr)) > uint64(r.Size) { + done(fuse.ERANGE) + r.RespondError(fuse.ERANGE) + break + } + done(s) + r.Respond(s) + + case *fuse.ListxattrRequest: + n, ok := node.(NodeListxattrer) + if !ok { + done(fuse.ENOTSUP) + r.RespondError(fuse.ENOTSUP) + break + } + s := &fuse.ListxattrResponse{} + err := n.Listxattr(r, s, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + if r.Size != 0 && uint64(len(s.Xattr)) > uint64(r.Size) { + done(fuse.ERANGE) + r.RespondError(fuse.ERANGE) + break + } + done(s) + r.Respond(s) + + case *fuse.SetxattrRequest: + n, ok := node.(NodeSetxattrer) + if !ok { + done(fuse.ENOTSUP) + r.RespondError(fuse.ENOTSUP) + break + } + err := n.Setxattr(r, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + done(nil) + r.Respond() + + case *fuse.RemovexattrRequest: + n, ok := node.(NodeRemovexattrer) + if !ok { + done(fuse.ENOTSUP) + r.RespondError(fuse.ENOTSUP) + break + } + err := n.Removexattr(r, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + done(nil) + r.Respond() + + case *fuse.ForgetRequest: + forget := c.dropNode(hdr.Node, r.N) + if forget { + n, ok := node.(NodeForgetter) + if ok { + n.Forget() + } + } + done(nil) + r.Respond() + + // Handle operations. + case *fuse.ReadRequest: + shandle := c.getHandle(r.Handle) + if shandle == nil { + done(fuse.ESTALE) + r.RespondError(fuse.ESTALE) + return + } + handle := shandle.handle + + s := &fuse.ReadResponse{Data: make([]byte, 0, r.Size)} + if r.Dir { + if h, ok := handle.(HandleReadDirer); ok { + if shandle.readData == nil { + dirs, err := h.ReadDir(intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + var data []byte + for _, dir := range dirs { + if dir.Inode == 0 { + dir.Inode = c.dynamicInode(snode.inode, dir.Name) + } + data = fuse.AppendDirent(data, dir) + } + shandle.readData = data + } + fuseutil.HandleRead(r, s, shandle.readData) + done(s) + r.Respond(s) + break + } + } else { + if h, ok := handle.(HandleReadAller); ok { + if shandle.readData == nil { + data, err := h.ReadAll(intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + if data == nil { + data = []byte{} + } + shandle.readData = data + } + fuseutil.HandleRead(r, s, shandle.readData) + done(s) + r.Respond(s) + break + } + h, ok := handle.(HandleReader) + if !ok { + fmt.Printf("NO READ FOR %T\n", handle) + done(fuse.EIO) + r.RespondError(fuse.EIO) + break + } + if err := h.Read(r, s, intr); err != nil { + done(err) + r.RespondError(err) + break + } + } + done(s) + r.Respond(s) + + case *fuse.WriteRequest: + shandle := c.getHandle(r.Handle) + if shandle == nil { + done(fuse.ESTALE) + r.RespondError(fuse.ESTALE) + return + } + + s := &fuse.WriteResponse{} + if h, ok := shandle.handle.(HandleWriter); ok { + if err := h.Write(r, s, intr); err != nil { + done(err) + r.RespondError(err) + break + } + done(s) + r.Respond(s) + break + } + done(fuse.EIO) + r.RespondError(fuse.EIO) + + case *fuse.FlushRequest: + shandle := c.getHandle(r.Handle) + if shandle == nil { + done(fuse.ESTALE) + r.RespondError(fuse.ESTALE) + return + } + handle := shandle.handle + + if h, ok := handle.(HandleFlusher); ok { + if err := h.Flush(r, intr); err != nil { + done(err) + r.RespondError(err) + break + } + } + done(nil) + r.Respond() + + case *fuse.ReleaseRequest: + shandle := c.getHandle(r.Handle) + if shandle == nil { + done(fuse.ESTALE) + r.RespondError(fuse.ESTALE) + return + } + handle := shandle.handle + + // No matter what, release the handle. + c.dropHandle(r.Handle) + + if h, ok := handle.(HandleReleaser); ok { + if err := h.Release(r, intr); err != nil { + done(err) + r.RespondError(err) + break + } + } + done(nil) + r.Respond() + + case *fuse.DestroyRequest: + if fs, ok := c.fs.(FSDestroyer); ok { + fs.Destroy() + } + done(nil) + r.Respond() + + case *fuse.RenameRequest: + c.meta.Lock() + var newDirNode *serveNode + if int(r.NewDir) < len(c.node) { + newDirNode = c.node[r.NewDir] + } + c.meta.Unlock() + if newDirNode == nil { + c.debug(renameNewDirNodeNotFound{ + Request: r.Hdr(), + In: r, + }) + done(fuse.EIO) + r.RespondError(fuse.EIO) + break + } + n, ok := node.(NodeRenamer) + if !ok { + done(fuse.EIO) // XXX or EPERM like Mkdir? + r.RespondError(fuse.EIO) + break + } + err := n.Rename(r, newDirNode.node, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + done(nil) + r.Respond() + + case *fuse.MknodRequest: + n, ok := node.(NodeMknoder) + if !ok { + done(fuse.EIO) + r.RespondError(fuse.EIO) + break + } + n2, err := n.Mknod(r, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + s := &fuse.LookupResponse{} + c.saveLookup(s, snode, r.Name, n2) + done(s) + r.Respond(s) + + case *fuse.FsyncRequest: + n, ok := node.(NodeFsyncer) + if !ok { + done(fuse.EIO) + r.RespondError(fuse.EIO) + break + } + err := n.Fsync(r, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + done(nil) + r.Respond() + + case *fuse.InterruptRequest: + c.meta.Lock() + ireq := c.req[r.IntrID] + if ireq != nil && ireq.Intr != nil { + close(ireq.Intr) + ireq.Intr = nil + } + c.meta.Unlock() + done(nil) + r.Respond() + + /* case *FsyncdirRequest: + done(ENOSYS) + r.RespondError(ENOSYS) + + case *GetlkRequest, *SetlkRequest, *SetlkwRequest: + done(ENOSYS) + r.RespondError(ENOSYS) + + case *BmapRequest: + done(ENOSYS) + r.RespondError(ENOSYS) + + case *SetvolnameRequest, *GetxtimesRequest, *ExchangeRequest: + done(ENOSYS) + r.RespondError(ENOSYS) + */ + } +} + +func (c *serveConn) saveLookup(s *fuse.LookupResponse, snode *serveNode, elem string, n2 Node) { + s.Attr = nodeAttr(n2) + if s.Attr.Inode == 0 { + s.Attr.Inode = c.dynamicInode(snode.inode, elem) + } + + s.Node, s.Generation = c.saveNode(s.Attr.Inode, n2) + if s.EntryValid == 0 { + s.EntryValid = entryValidTime + } + if s.AttrValid == 0 { + s.AttrValid = attrValidTime + } +} + +// DataHandle returns a read-only Handle that satisfies reads +// using the given data. +func DataHandle(data []byte) Handle { + return &dataHandle{data} +} + +type dataHandle struct { + data []byte +} + +func (d *dataHandle) ReadAll(intr Intr) ([]byte, fuse.Error) { + return d.data, nil +} + +// GenerateDynamicInode returns a dynamic inode. +// +// The parent inode and current entry name are used as the criteria +// for choosing a pseudorandom inode. This makes it likely the same +// entry will get the same inode on multiple runs. +func GenerateDynamicInode(parent uint64, name string) uint64 { + h := fnv.New64a() + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], parent) + _, _ = h.Write(buf[:]) + _, _ = h.Write([]byte(name)) + var inode uint64 + for { + inode = h.Sum64() + if inode != 0 { + break + } + // there's a tiny probability that result is zero; change the + // input a little and try again + _, _ = h.Write([]byte{'x'}) + } + return inode +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/serve_test.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/serve_test.go new file mode 100644 index 00000000..90f07b21 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/serve_test.go @@ -0,0 +1,1767 @@ +package fs_test + +import ( + "bytes" + "errors" + "flag" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strings" + "syscall" + "testing" + "time" + + "camlistore.org/third_party/bazil.org/fuse" + "camlistore.org/third_party/bazil.org/fuse/fs" + "camlistore.org/third_party/bazil.org/fuse/fs/fstestutil" + "camlistore.org/third_party/bazil.org/fuse/fs/fstestutil/record" + "camlistore.org/third_party/bazil.org/fuse/fuseutil" + "camlistore.org/third_party/bazil.org/fuse/syscallx" +) + +// TO TEST: +// Lookup(*LookupRequest, *LookupResponse) +// Getattr(*GetattrRequest, *GetattrResponse) +// Attr with explicit inode +// Setattr(*SetattrRequest, *SetattrResponse) +// Access(*AccessRequest) +// Open(*OpenRequest, *OpenResponse) +// Write(*WriteRequest, *WriteResponse) +// Flush(*FlushRequest, *FlushResponse) + +func init() { + fstestutil.DebugByDefault() +} + +var childMode bool + +func init() { + flag.BoolVar(&childMode, "fuse.internal.childmode", false, "internal use only") +} + +// childCmd prepares a test function to be run in a subprocess, with +// childMode set to true. Caller must still call Run or Start. +// +// Re-using the test executable as the subprocess is useful because +// now test executables can e.g. be cross-compiled, transferred +// between hosts, and run in settings where the whole Go development +// environment is not installed. +func childCmd(testName string) (*exec.Cmd, error) { + // caller may set cwd, so we can't rely on relative paths + executable, err := filepath.Abs(os.Args[0]) + if err != nil { + return nil, err + } + testName = regexp.QuoteMeta(testName) + cmd := exec.Command(executable, "-test.run=^"+testName+"$", "-fuse.internal.childmode") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd, nil +} + +// childMapFS is an FS with one fixed child named "child". +type childMapFS map[string]fs.Node + +var _ = fs.FS(childMapFS{}) +var _ = fs.Node(childMapFS{}) +var _ = fs.NodeStringLookuper(childMapFS{}) + +func (f childMapFS) Attr() fuse.Attr { + return fuse.Attr{Inode: 1, Mode: os.ModeDir | 0777} +} + +func (f childMapFS) Root() (fs.Node, fuse.Error) { + return f, nil +} + +func (f childMapFS) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + child, ok := f[name] + if !ok { + return nil, fuse.ENOENT + } + return child, nil +} + +// symlink can be embedded in a struct to make it look like a symlink. +type symlink struct { + target string +} + +func (f symlink) Attr() fuse.Attr { return fuse.Attr{Mode: os.ModeSymlink | 0666} } + +// fifo can be embedded in a struct to make it look like a named pipe. +type fifo struct{} + +func (f fifo) Attr() fuse.Attr { return fuse.Attr{Mode: os.ModeNamedPipe | 0666} } + +type badRootFS struct{} + +func (badRootFS) Root() (fs.Node, fuse.Error) { + // pick a really distinct error, to identify it later + return nil, fuse.Errno(syscall.ENAMETOOLONG) +} + +func TestRootErr(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, badRootFS{}) + if err == nil { + // path for synchronous mounts (linux): started out fine, now + // wait for Serve to cycle through + err = <-mnt.Error + // without this, unmount will keep failing with EBUSY; nudge + // kernel into realizing InitResponse will not happen + mnt.Conn.Close() + mnt.Close() + } + + if err == nil { + t.Fatal("expected an error") + } + // TODO this should not be a textual comparison, Serve hides + // details + if err.Error() != "cannot obtain root node: file name too long" { + t.Errorf("Unexpected error: %v", err) + } +} + +type testStatFS struct{} + +func (f testStatFS) Root() (fs.Node, fuse.Error) { + return f, nil +} + +func (f testStatFS) Attr() fuse.Attr { + return fuse.Attr{Inode: 1, Mode: os.ModeDir | 0777} +} + +func (f testStatFS) Statfs(req *fuse.StatfsRequest, resp *fuse.StatfsResponse, int fs.Intr) fuse.Error { + resp.Blocks = 42 + resp.Files = 13 + return nil +} + +func TestStatfs(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, testStatFS{}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + { + var st syscall.Statfs_t + err = syscall.Statfs(mnt.Dir, &st) + if err != nil { + t.Errorf("Statfs failed: %v", err) + } + t.Logf("Statfs got: %#v", st) + if g, e := st.Blocks, uint64(42); g != e { + t.Errorf("got Blocks = %d; want %d", g, e) + } + if g, e := st.Files, uint64(13); g != e { + t.Errorf("got Files = %d; want %d", g, e) + } + } + + { + var st syscall.Statfs_t + f, err := os.Open(mnt.Dir) + if err != nil { + t.Errorf("Open for fstatfs failed: %v", err) + } + defer f.Close() + err = syscall.Fstatfs(int(f.Fd()), &st) + if err != nil { + t.Errorf("Fstatfs failed: %v", err) + } + t.Logf("Fstatfs got: %#v", st) + if g, e := st.Blocks, uint64(42); g != e { + t.Errorf("got Blocks = %d; want %d", g, e) + } + if g, e := st.Files, uint64(13); g != e { + t.Errorf("got Files = %d; want %d", g, e) + } + } + +} + +// Test Stat of root. + +type root struct{} + +func (f root) Root() (fs.Node, fuse.Error) { + return f, nil +} + +func (root) Attr() fuse.Attr { + return fuse.Attr{Inode: 1, Mode: os.ModeDir | 0555} +} + +func TestStatRoot(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, root{}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + fi, err := os.Stat(mnt.Dir) + if err != nil { + t.Fatalf("root getattr failed with %v", err) + } + mode := fi.Mode() + if (mode & os.ModeType) != os.ModeDir { + t.Errorf("root is not a directory: %#v", fi) + } + if mode.Perm() != 0555 { + t.Errorf("root has weird access mode: %v", mode.Perm()) + } + switch stat := fi.Sys().(type) { + case *syscall.Stat_t: + if stat.Ino != 1 { + t.Errorf("root has wrong inode: %v", stat.Ino) + } + if stat.Nlink != 1 { + t.Errorf("root has wrong link count: %v", stat.Nlink) + } + if stat.Uid != 0 { + t.Errorf("root has wrong uid: %d", stat.Uid) + } + if stat.Gid != 0 { + t.Errorf("root has wrong gid: %d", stat.Gid) + } + } +} + +// Test Read calling ReadAll. + +type readAll struct { + fstestutil.File +} + +const hi = "hello, world" + +func (readAll) Attr() fuse.Attr { + return fuse.Attr{ + Mode: 0666, + Size: uint64(len(hi)), + } +} + +func (readAll) ReadAll(intr fs.Intr) ([]byte, fuse.Error) { + return []byte(hi), nil +} + +func testReadAll(t *testing.T, path string) { + data, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("readAll: %v", err) + } + if string(data) != hi { + t.Errorf("readAll = %q, want %q", data, hi) + } +} + +func TestReadAll(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, childMapFS{"child": readAll{}}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + testReadAll(t, mnt.Dir+"/child") +} + +// Test Read. + +type readWithHandleRead struct { + fstestutil.File +} + +func (readWithHandleRead) Attr() fuse.Attr { + return fuse.Attr{ + Mode: 0666, + Size: uint64(len(hi)), + } +} + +func (readWithHandleRead) Read(req *fuse.ReadRequest, resp *fuse.ReadResponse, intr fs.Intr) fuse.Error { + fuseutil.HandleRead(req, resp, []byte(hi)) + return nil +} + +func TestReadAllWithHandleRead(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, childMapFS{"child": readWithHandleRead{}}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + testReadAll(t, mnt.Dir+"/child") +} + +// Test Release. + +type release struct { + fstestutil.File + record.ReleaseWaiter +} + +func TestRelease(t *testing.T) { + t.Parallel() + r := &release{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": r}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + f, err := os.Open(mnt.Dir + "/child") + if err != nil { + t.Fatal(err) + } + f.Close() + if !r.WaitForRelease(1 * time.Second) { + t.Error("Close did not Release in time") + } +} + +// Test Write calling basic Write, with an fsync thrown in too. + +type write struct { + fstestutil.File + record.Writes + record.Fsyncs +} + +func TestWrite(t *testing.T) { + t.Parallel() + w := &write{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": w}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + f, err := os.Create(mnt.Dir + "/child") + if err != nil { + t.Fatalf("Create: %v", err) + } + defer f.Close() + n, err := f.Write([]byte(hi)) + if err != nil { + t.Fatalf("Write: %v", err) + } + if n != len(hi) { + t.Fatalf("short write; n=%d; hi=%d", n, len(hi)) + } + + err = syscall.Fsync(int(f.Fd())) + if err != nil { + t.Fatalf("Fsync = %v", err) + } + if w.RecordedFsync() == (fuse.FsyncRequest{}) { + t.Errorf("never received expected fsync call") + } + + err = f.Close() + if err != nil { + t.Fatalf("Close: %v", err) + } + + if got := string(w.RecordedWriteData()); got != hi { + t.Errorf("write = %q, want %q", got, hi) + } +} + +// Test Write of a larger buffer. + +type writeLarge struct { + fstestutil.File + record.Writes +} + +func TestWriteLarge(t *testing.T) { + t.Parallel() + w := &write{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": w}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + f, err := os.Create(mnt.Dir + "/child") + if err != nil { + t.Fatalf("Create: %v", err) + } + defer f.Close() + const one = "xyzzyfoo" + large := bytes.Repeat([]byte(one), 8192) + n, err := f.Write(large) + if err != nil { + t.Fatalf("Write: %v", err) + } + if g, e := n, len(large); g != e { + t.Fatalf("short write: %d != %d", g, e) + } + + err = f.Close() + if err != nil { + t.Fatalf("Close: %v", err) + } + + got := w.RecordedWriteData() + if g, e := len(got), len(large); g != e { + t.Errorf("write wrong length: %d != %d", g, e) + } + if g := strings.Replace(string(got), one, "", -1); g != "" { + t.Errorf("write wrong data: expected repeats of %q, also got %q", one, g) + } +} + +// Test Write calling Setattr+Write+Flush. + +type writeTruncateFlush struct { + fstestutil.File + record.Writes + record.Setattrs + record.Flushes +} + +func TestWriteTruncateFlush(t *testing.T) { + t.Parallel() + w := &writeTruncateFlush{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": w}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + err = ioutil.WriteFile(mnt.Dir+"/child", []byte(hi), 0666) + if err != nil { + t.Fatalf("WriteFile: %v", err) + } + if w.RecordedSetattr() == (fuse.SetattrRequest{}) { + t.Errorf("writeTruncateFlush expected Setattr") + } + if !w.RecordedFlush() { + t.Errorf("writeTruncateFlush expected Setattr") + } + if got := string(w.RecordedWriteData()); got != hi { + t.Errorf("writeTruncateFlush = %q, want %q", got, hi) + } +} + +// Test Mkdir. + +type mkdir1 struct { + fstestutil.Dir + record.Mkdirs +} + +func (f *mkdir1) Mkdir(req *fuse.MkdirRequest, intr fs.Intr) (fs.Node, fuse.Error) { + f.Mkdirs.Mkdir(req, intr) + return &mkdir1{}, nil +} + +func TestMkdir(t *testing.T) { + t.Parallel() + f := &mkdir1{} + mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + // uniform umask needed to make os.Mkdir's mode into something + // reproducible + defer syscall.Umask(syscall.Umask(0022)) + err = os.Mkdir(mnt.Dir+"/foo", 0771) + if err != nil { + t.Fatalf("mkdir: %v", err) + } + want := fuse.MkdirRequest{Name: "foo", Mode: os.ModeDir | 0751} + if g, e := f.RecordedMkdir(), want; g != e { + t.Errorf("mkdir saw %+v, want %+v", g, e) + } +} + +// Test Create (and fsync) + +type create1file struct { + fstestutil.File + record.Fsyncs +} + +type create1 struct { + fstestutil.Dir + f create1file +} + +func (f *create1) Create(req *fuse.CreateRequest, resp *fuse.CreateResponse, intr fs.Intr) (fs.Node, fs.Handle, fuse.Error) { + if req.Name != "foo" { + log.Printf("ERROR create1.Create unexpected name: %q\n", req.Name) + return nil, nil, fuse.EPERM + } + flags := req.Flags + + // OS X does not pass O_TRUNC here, Linux does; as this is a + // Create, that's acceptable + flags &^= fuse.OpenTruncate + + if runtime.GOOS == "linux" { + // Linux <3.7 accidentally leaks O_CLOEXEC through to FUSE; + // avoid spurious test failures + flags &^= fuse.OpenFlags(syscall.O_CLOEXEC) + } + + if g, e := flags, fuse.OpenReadWrite|fuse.OpenCreate; g != e { + log.Printf("ERROR create1.Create unexpected flags: %v != %v\n", g, e) + return nil, nil, fuse.EPERM + } + if g, e := req.Mode, os.FileMode(0644); g != e { + log.Printf("ERROR create1.Create unexpected mode: %v != %v\n", g, e) + return nil, nil, fuse.EPERM + } + return &f.f, &f.f, nil +} + +func TestCreate(t *testing.T) { + t.Parallel() + f := &create1{} + mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + // uniform umask needed to make os.Create's 0666 into something + // reproducible + defer syscall.Umask(syscall.Umask(0022)) + ff, err := os.Create(mnt.Dir + "/foo") + if err != nil { + t.Fatalf("create1 WriteFile: %v", err) + } + defer ff.Close() + + err = syscall.Fsync(int(ff.Fd())) + if err != nil { + t.Fatalf("Fsync = %v", err) + } + + if f.f.RecordedFsync() == (fuse.FsyncRequest{}) { + t.Errorf("never received expected fsync call") + } + + ff.Close() +} + +// Test Create + Write + Remove + +type create3file struct { + fstestutil.File + record.Writes +} + +type create3 struct { + fstestutil.Dir + f create3file + fooCreated record.MarkRecorder + fooRemoved record.MarkRecorder +} + +func (f *create3) Create(req *fuse.CreateRequest, resp *fuse.CreateResponse, intr fs.Intr) (fs.Node, fs.Handle, fuse.Error) { + if req.Name != "foo" { + log.Printf("ERROR create3.Create unexpected name: %q\n", req.Name) + return nil, nil, fuse.EPERM + } + f.fooCreated.Mark() + return &f.f, &f.f, nil +} + +func (f *create3) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + if f.fooCreated.Recorded() && !f.fooRemoved.Recorded() && name == "foo" { + return &f.f, nil + } + return nil, fuse.ENOENT +} + +func (f *create3) Remove(r *fuse.RemoveRequest, intr fs.Intr) fuse.Error { + if f.fooCreated.Recorded() && !f.fooRemoved.Recorded() && + r.Name == "foo" && !r.Dir { + f.fooRemoved.Mark() + return nil + } + return fuse.ENOENT +} + +func TestCreateWriteRemove(t *testing.T) { + t.Parallel() + f := &create3{} + mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + err = ioutil.WriteFile(mnt.Dir+"/foo", []byte(hi), 0666) + if err != nil { + t.Fatalf("create3 WriteFile: %v", err) + } + if got := string(f.f.RecordedWriteData()); got != hi { + t.Fatalf("create3 write = %q, want %q", got, hi) + } + + err = os.Remove(mnt.Dir + "/foo") + if err != nil { + t.Fatalf("Remove: %v", err) + } + err = os.Remove(mnt.Dir + "/foo") + if err == nil { + t.Fatalf("second Remove = nil; want some error") + } +} + +// Test symlink + readlink + +// is a Node that is a symlink to target +type symlink1link struct { + symlink + target string +} + +func (f symlink1link) Readlink(*fuse.ReadlinkRequest, fs.Intr) (string, fuse.Error) { + return f.target, nil +} + +type symlink1 struct { + fstestutil.Dir + record.Symlinks +} + +func (f *symlink1) Symlink(req *fuse.SymlinkRequest, intr fs.Intr) (fs.Node, fuse.Error) { + f.Symlinks.Symlink(req, intr) + return symlink1link{target: req.Target}, nil +} + +func TestSymlink(t *testing.T) { + t.Parallel() + f := &symlink1{} + mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + const target = "/some-target" + + err = os.Symlink(target, mnt.Dir+"/symlink.file") + if err != nil { + t.Fatalf("os.Symlink: %v", err) + } + + want := fuse.SymlinkRequest{NewName: "symlink.file", Target: target} + if g, e := f.RecordedSymlink(), want; g != e { + t.Errorf("symlink saw %+v, want %+v", g, e) + } + + gotName, err := os.Readlink(mnt.Dir + "/symlink.file") + if err != nil { + t.Fatalf("os.Readlink: %v", err) + } + if gotName != target { + t.Errorf("os.Readlink = %q; want %q", gotName, target) + } +} + +// Test link + +type link1 struct { + fstestutil.Dir + record.Links +} + +func (f *link1) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + if name == "old" { + return fstestutil.File{}, nil + } + return nil, fuse.ENOENT +} + +func (f *link1) Link(r *fuse.LinkRequest, old fs.Node, intr fs.Intr) (fs.Node, fuse.Error) { + f.Links.Link(r, old, intr) + return fstestutil.File{}, nil +} + +func TestLink(t *testing.T) { + t.Parallel() + f := &link1{} + mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + err = os.Link(mnt.Dir+"/old", mnt.Dir+"/new") + if err != nil { + t.Fatalf("Link: %v", err) + } + + got := f.RecordedLink() + want := fuse.LinkRequest{ + NewName: "new", + // unpredictable + OldNode: got.OldNode, + } + if g, e := got, want; g != e { + t.Fatalf("link saw %+v, want %+v", g, e) + } +} + +// Test Rename + +type rename1 struct { + fstestutil.Dir + renamed record.Counter +} + +func (f *rename1) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + if name == "old" { + return fstestutil.File{}, nil + } + return nil, fuse.ENOENT +} + +func (f *rename1) Rename(r *fuse.RenameRequest, newDir fs.Node, intr fs.Intr) fuse.Error { + if r.OldName == "old" && r.NewName == "new" && newDir == f { + f.renamed.Inc() + return nil + } + return fuse.EIO +} + +func TestRename(t *testing.T) { + t.Parallel() + f := &rename1{} + mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + err = os.Rename(mnt.Dir+"/old", mnt.Dir+"/new") + if err != nil { + t.Fatalf("Rename: %v", err) + } + if g, e := f.renamed.Count(), uint32(1); g != e { + t.Fatalf("expected rename didn't happen: %d != %d", g, e) + } + err = os.Rename(mnt.Dir+"/old2", mnt.Dir+"/new2") + if err == nil { + t.Fatal("expected error on second Rename; got nil") + } +} + +// Test mknod + +type mknod1 struct { + fstestutil.Dir + record.Mknods +} + +func (f *mknod1) Mknod(r *fuse.MknodRequest, intr fs.Intr) (fs.Node, fuse.Error) { + f.Mknods.Mknod(r, intr) + return fifo{}, nil +} + +func TestMknod(t *testing.T) { + t.Parallel() + if os.Getuid() != 0 { + t.Skip("skipping unless root") + } + + f := &mknod1{} + mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + defer syscall.Umask(syscall.Umask(0)) + err = syscall.Mknod(mnt.Dir+"/node", syscall.S_IFIFO|0666, 123) + if err != nil { + t.Fatalf("Mknod: %v", err) + } + + want := fuse.MknodRequest{ + Name: "node", + Mode: os.FileMode(os.ModeNamedPipe | 0666), + Rdev: uint32(123), + } + if runtime.GOOS == "linux" { + // Linux fuse doesn't echo back the rdev if the node + // isn't a device (we're using a FIFO here, as that + // bit is portable.) + want.Rdev = 0 + } + if g, e := f.RecordedMknod(), want; g != e { + t.Fatalf("mknod saw %+v, want %+v", g, e) + } +} + +// Test Read served with DataHandle. + +type dataHandleTest struct { + fstestutil.File +} + +func (dataHandleTest) Attr() fuse.Attr { + return fuse.Attr{ + Mode: 0666, + Size: uint64(len(hi)), + } +} + +func (dataHandleTest) Open(*fuse.OpenRequest, *fuse.OpenResponse, fs.Intr) (fs.Handle, fuse.Error) { + return fs.DataHandle([]byte(hi)), nil +} + +func TestDataHandle(t *testing.T) { + t.Parallel() + f := &dataHandleTest{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + data, err := ioutil.ReadFile(mnt.Dir + "/child") + if err != nil { + t.Errorf("readAll: %v", err) + return + } + if string(data) != hi { + t.Errorf("readAll = %q, want %q", data, hi) + } +} + +// Test interrupt + +type interrupt struct { + fstestutil.File + + // strobes to signal we have a read hanging + hanging chan struct{} +} + +func (interrupt) Attr() fuse.Attr { + return fuse.Attr{ + Mode: 0666, + Size: 1, + } +} + +func (it *interrupt) Read(req *fuse.ReadRequest, resp *fuse.ReadResponse, intr fs.Intr) fuse.Error { + select { + case it.hanging <- struct{}{}: + default: + } + <-intr + return fuse.EINTR +} + +func TestInterrupt(t *testing.T) { + t.Parallel() + f := &interrupt{} + f.hanging = make(chan struct{}, 1) + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + // start a subprocess that can hang until signaled + cmd := exec.Command("cat", mnt.Dir+"/child") + + err = cmd.Start() + if err != nil { + t.Errorf("interrupt: cannot start cat: %v", err) + return + } + + // try to clean up if child is still alive when returning + defer cmd.Process.Kill() + + // wait till we're sure it's hanging in read + <-f.hanging + + err = cmd.Process.Signal(os.Interrupt) + if err != nil { + t.Errorf("interrupt: cannot interrupt cat: %v", err) + return + } + + p, err := cmd.Process.Wait() + if err != nil { + t.Errorf("interrupt: cat bork: %v", err) + return + } + switch ws := p.Sys().(type) { + case syscall.WaitStatus: + if ws.CoreDump() { + t.Errorf("interrupt: didn't expect cat to dump core: %v", ws) + } + + if ws.Exited() { + t.Errorf("interrupt: didn't expect cat to exit normally: %v", ws) + } + + if !ws.Signaled() { + t.Errorf("interrupt: expected cat to get a signal: %v", ws) + } else { + if ws.Signal() != os.Interrupt { + t.Errorf("interrupt: cat got wrong signal: %v", ws) + } + } + default: + t.Logf("interrupt: this platform has no test coverage") + } +} + +// Test truncate + +type truncate struct { + fstestutil.File + record.Setattrs +} + +func testTruncate(t *testing.T, toSize int64) { + t.Parallel() + f := &truncate{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + err = os.Truncate(mnt.Dir+"/child", toSize) + if err != nil { + t.Fatalf("Truncate: %v", err) + } + gotr := f.RecordedSetattr() + if gotr == (fuse.SetattrRequest{}) { + t.Fatalf("no recorded SetattrRequest") + } + if g, e := gotr.Size, uint64(toSize); g != e { + t.Errorf("got Size = %q; want %q", g, e) + } + if g, e := gotr.Valid&^fuse.SetattrLockOwner, fuse.SetattrSize; g != e { + t.Errorf("got Valid = %q; want %q", g, e) + } + t.Logf("Got request: %#v", gotr) +} + +func TestTruncate42(t *testing.T) { + testTruncate(t, 42) +} + +func TestTruncate0(t *testing.T) { + testTruncate(t, 0) +} + +// Test ftruncate + +type ftruncate struct { + fstestutil.File + record.Setattrs +} + +func testFtruncate(t *testing.T, toSize int64) { + t.Parallel() + f := &ftruncate{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + { + fil, err := os.OpenFile(mnt.Dir+"/child", os.O_WRONLY, 0666) + if err != nil { + t.Error(err) + return + } + defer fil.Close() + + err = fil.Truncate(toSize) + if err != nil { + t.Fatalf("Ftruncate: %v", err) + } + } + gotr := f.RecordedSetattr() + if gotr == (fuse.SetattrRequest{}) { + t.Fatalf("no recorded SetattrRequest") + } + if g, e := gotr.Size, uint64(toSize); g != e { + t.Errorf("got Size = %q; want %q", g, e) + } + if g, e := gotr.Valid&^fuse.SetattrLockOwner, fuse.SetattrHandle|fuse.SetattrSize; g != e { + t.Errorf("got Valid = %q; want %q", g, e) + } + t.Logf("Got request: %#v", gotr) +} + +func TestFtruncate42(t *testing.T) { + testFtruncate(t, 42) +} + +func TestFtruncate0(t *testing.T) { + testFtruncate(t, 0) +} + +// Test opening existing file truncates + +type truncateWithOpen struct { + fstestutil.File + record.Setattrs +} + +func TestTruncateWithOpen(t *testing.T) { + t.Parallel() + f := &truncateWithOpen{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + fil, err := os.OpenFile(mnt.Dir+"/child", os.O_WRONLY|os.O_TRUNC, 0666) + if err != nil { + t.Error(err) + return + } + fil.Close() + + gotr := f.RecordedSetattr() + if gotr == (fuse.SetattrRequest{}) { + t.Fatalf("no recorded SetattrRequest") + } + if g, e := gotr.Size, uint64(0); g != e { + t.Errorf("got Size = %q; want %q", g, e) + } + // osxfuse sets SetattrHandle here, linux does not + if g, e := gotr.Valid&^(fuse.SetattrLockOwner|fuse.SetattrHandle), fuse.SetattrSize; g != e { + t.Errorf("got Valid = %q; want %q", g, e) + } + t.Logf("Got request: %#v", gotr) +} + +// Test readdir + +type readdir struct { + fstestutil.Dir +} + +func (d *readdir) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) { + return []fuse.Dirent{ + {Name: "one", Inode: 11, Type: fuse.DT_Dir}, + {Name: "three", Inode: 13}, + {Name: "two", Inode: 12, Type: fuse.DT_File}, + }, nil +} + +func TestReadDir(t *testing.T) { + t.Parallel() + f := &readdir{} + mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + fil, err := os.Open(mnt.Dir) + if err != nil { + t.Error(err) + return + } + defer fil.Close() + + // go Readdir is just Readdirnames + Lstat, there's no point in + // testing that here; we have no consumption API for the real + // dirent data + names, err := fil.Readdirnames(100) + if err != nil { + t.Error(err) + return + } + + t.Logf("Got readdir: %q", names) + + if len(names) != 3 || + names[0] != "one" || + names[1] != "three" || + names[2] != "two" { + t.Errorf(`expected 3 entries of "one", "three", "two", got: %q`, names) + return + } +} + +// Test Chmod. + +type chmod struct { + fstestutil.File + record.Setattrs +} + +func (f *chmod) Setattr(req *fuse.SetattrRequest, resp *fuse.SetattrResponse, intr fs.Intr) fuse.Error { + if !req.Valid.Mode() { + log.Printf("setattr not a chmod: %v", req.Valid) + return fuse.EIO + } + f.Setattrs.Setattr(req, resp, intr) + return nil +} + +func TestChmod(t *testing.T) { + t.Parallel() + f := &chmod{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + err = os.Chmod(mnt.Dir+"/child", 0764) + if err != nil { + t.Errorf("chmod: %v", err) + return + } + got := f.RecordedSetattr() + if g, e := got.Mode, os.FileMode(0764); g != e { + t.Errorf("wrong mode: %v != %v", g, e) + } +} + +// Test open + +type open struct { + fstestutil.File + record.Opens +} + +func (f *open) Open(req *fuse.OpenRequest, resp *fuse.OpenResponse, intr fs.Intr) (fs.Handle, fuse.Error) { + f.Opens.Open(req, resp, intr) + // pick a really distinct error, to identify it later + return nil, fuse.Errno(syscall.ENAMETOOLONG) + +} + +func TestOpen(t *testing.T) { + t.Parallel() + f := &open{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + // node: mode only matters with O_CREATE + fil, err := os.OpenFile(mnt.Dir+"/child", os.O_WRONLY|os.O_APPEND, 0) + if err == nil { + t.Error("Open err == nil, expected ENAMETOOLONG") + fil.Close() + return + } + + switch err2 := err.(type) { + case *os.PathError: + if err2.Err == syscall.ENAMETOOLONG { + break + } + t.Errorf("unexpected inner error: %#v", err2) + default: + t.Errorf("unexpected error: %v", err) + } + + want := fuse.OpenRequest{Dir: false, Flags: fuse.OpenWriteOnly | fuse.OpenAppend} + if runtime.GOOS == "darwin" { + // osxfuse does not let O_APPEND through at all + // + // https://code.google.com/p/macfuse/issues/detail?id=233 + // https://code.google.com/p/macfuse/issues/detail?id=132 + // https://code.google.com/p/macfuse/issues/detail?id=133 + want.Flags &^= fuse.OpenAppend + } + got := f.RecordedOpen() + + if runtime.GOOS == "linux" { + // Linux <3.7 accidentally leaks O_CLOEXEC through to FUSE; + // avoid spurious test failures + got.Flags &^= fuse.OpenFlags(syscall.O_CLOEXEC) + } + + if g, e := got, want; g != e { + t.Errorf("open saw %v, want %v", g, e) + return + } +} + +// Test Fsync on a dir + +type fsyncDir struct { + fstestutil.Dir + record.Fsyncs +} + +func TestFsyncDir(t *testing.T) { + t.Parallel() + f := &fsyncDir{} + mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + fil, err := os.Open(mnt.Dir) + if err != nil { + t.Errorf("fsyncDir open: %v", err) + return + } + defer fil.Close() + err = fil.Sync() + if err != nil { + t.Errorf("fsyncDir sync: %v", err) + return + } + + got := f.RecordedFsync() + want := fuse.FsyncRequest{ + Flags: 0, + Dir: true, + // unpredictable + Handle: got.Handle, + } + if runtime.GOOS == "darwin" { + // TODO document the meaning of these flags, figure out why + // they differ + want.Flags = 1 + } + if g, e := got, want; g != e { + t.Fatalf("fsyncDir saw %+v, want %+v", g, e) + } +} + +// Test Getxattr + +type getxattr struct { + fstestutil.File + record.Getxattrs +} + +func (f *getxattr) Getxattr(req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse, intr fs.Intr) fuse.Error { + f.Getxattrs.Getxattr(req, resp, intr) + resp.Xattr = []byte("hello, world") + return nil +} + +func TestGetxattr(t *testing.T) { + t.Parallel() + f := &getxattr{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + buf := make([]byte, 8192) + n, err := syscallx.Getxattr(mnt.Dir+"/child", "not-there", buf) + if err != nil { + t.Errorf("unexpected error: %v", err) + return + } + buf = buf[:n] + if g, e := string(buf), "hello, world"; g != e { + t.Errorf("wrong getxattr content: %#v != %#v", g, e) + } + seen := f.RecordedGetxattr() + if g, e := seen.Name, "not-there"; g != e { + t.Errorf("wrong getxattr name: %#v != %#v", g, e) + } +} + +// Test Getxattr that has no space to return value + +type getxattrTooSmall struct { + fstestutil.File +} + +func (f *getxattrTooSmall) Getxattr(req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse, intr fs.Intr) fuse.Error { + resp.Xattr = []byte("hello, world") + return nil +} + +func TestGetxattrTooSmall(t *testing.T) { + t.Parallel() + f := &getxattrTooSmall{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + buf := make([]byte, 3) + _, err = syscallx.Getxattr(mnt.Dir+"/child", "whatever", buf) + if err == nil { + t.Error("Getxattr = nil; want some error") + } + if err != syscall.ERANGE { + t.Errorf("unexpected error: %v", err) + return + } +} + +// Test Getxattr used to probe result size + +type getxattrSize struct { + fstestutil.File +} + +func (f *getxattrSize) Getxattr(req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse, intr fs.Intr) fuse.Error { + resp.Xattr = []byte("hello, world") + return nil +} + +func TestGetxattrSize(t *testing.T) { + t.Parallel() + f := &getxattrSize{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + n, err := syscallx.Getxattr(mnt.Dir+"/child", "whatever", nil) + if err != nil { + t.Errorf("Getxattr unexpected error: %v", err) + return + } + if g, e := n, len("hello, world"); g != e { + t.Errorf("Getxattr incorrect size: %d != %d", g, e) + } +} + +// Test Listxattr + +type listxattr struct { + fstestutil.File + record.Listxattrs +} + +func (f *listxattr) Listxattr(req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse, intr fs.Intr) fuse.Error { + f.Listxattrs.Listxattr(req, resp, intr) + resp.Append("one", "two") + return nil +} + +func TestListxattr(t *testing.T) { + t.Parallel() + f := &listxattr{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + buf := make([]byte, 8192) + n, err := syscallx.Listxattr(mnt.Dir+"/child", buf) + if err != nil { + t.Errorf("unexpected error: %v", err) + return + } + buf = buf[:n] + if g, e := string(buf), "one\x00two\x00"; g != e { + t.Errorf("wrong listxattr content: %#v != %#v", g, e) + } + + want := fuse.ListxattrRequest{ + Size: 8192, + } + if g, e := f.RecordedListxattr(), want; g != e { + t.Fatalf("listxattr saw %+v, want %+v", g, e) + } +} + +// Test Listxattr that has no space to return value + +type listxattrTooSmall struct { + fstestutil.File +} + +func (f *listxattrTooSmall) Listxattr(req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse, intr fs.Intr) fuse.Error { + resp.Xattr = []byte("one\x00two\x00") + return nil +} + +func TestListxattrTooSmall(t *testing.T) { + t.Parallel() + f := &listxattrTooSmall{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + buf := make([]byte, 3) + _, err = syscallx.Listxattr(mnt.Dir+"/child", buf) + if err == nil { + t.Error("Listxattr = nil; want some error") + } + if err != syscall.ERANGE { + t.Errorf("unexpected error: %v", err) + return + } +} + +// Test Listxattr used to probe result size + +type listxattrSize struct { + fstestutil.File +} + +func (f *listxattrSize) Listxattr(req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse, intr fs.Intr) fuse.Error { + resp.Xattr = []byte("one\x00two\x00") + return nil +} + +func TestListxattrSize(t *testing.T) { + t.Parallel() + f := &listxattrSize{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + n, err := syscallx.Listxattr(mnt.Dir+"/child", nil) + if err != nil { + t.Errorf("Listxattr unexpected error: %v", err) + return + } + if g, e := n, len("one\x00two\x00"); g != e { + t.Errorf("Getxattr incorrect size: %d != %d", g, e) + } +} + +// Test Setxattr + +type setxattr struct { + fstestutil.File + record.Setxattrs +} + +func testSetxattr(t *testing.T, size int) { + const linux_XATTR_NAME_MAX = 64 * 1024 + if size > linux_XATTR_NAME_MAX && runtime.GOOS == "linux" { + t.Skip("large xattrs are not supported by linux") + } + + t.Parallel() + f := &setxattr{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + const g = "hello, world" + greeting := strings.Repeat(g, size/len(g)+1)[:size] + err = syscallx.Setxattr(mnt.Dir+"/child", "greeting", []byte(greeting), 0) + if err != nil { + t.Errorf("unexpected error: %v", err) + return + } + + // fuse.SetxattrRequest contains a byte slice and thus cannot be + // directly compared + got := f.RecordedSetxattr() + + if g, e := got.Name, "greeting"; g != e { + t.Errorf("Setxattr incorrect name: %q != %q", g, e) + } + + if g, e := got.Flags, uint32(0); g != e { + t.Errorf("Setxattr incorrect flags: %d != %d", g, e) + } + + if g, e := string(got.Xattr), greeting; g != e { + t.Errorf("Setxattr incorrect data: %q != %q", g, e) + } +} + +func TestSetxattr(t *testing.T) { + testSetxattr(t, 20) +} + +func TestSetxattr64kB(t *testing.T) { + testSetxattr(t, 64*1024) +} + +func TestSetxattr16MB(t *testing.T) { + testSetxattr(t, 16*1024*1024) +} + +// Test Removexattr + +type removexattr struct { + fstestutil.File + record.Removexattrs +} + +func TestRemovexattr(t *testing.T) { + t.Parallel() + f := &removexattr{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + err = syscallx.Removexattr(mnt.Dir+"/child", "greeting") + if err != nil { + t.Errorf("unexpected error: %v", err) + return + } + + want := fuse.RemovexattrRequest{Name: "greeting"} + if g, e := f.RecordedRemovexattr(), want; g != e { + t.Errorf("removexattr saw %v, want %v", g, e) + } +} + +// Test default error. + +type defaultErrno struct { + fstestutil.Dir +} + +func (f defaultErrno) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + return nil, errors.New("bork") +} + +func TestDefaultErrno(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{defaultErrno{}}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + _, err = os.Stat(mnt.Dir + "/trigger") + if err == nil { + t.Fatalf("expected error") + } + + switch err2 := err.(type) { + case *os.PathError: + if err2.Err == syscall.EIO { + break + } + t.Errorf("unexpected inner error: Err=%v %#v", err2.Err, err2) + default: + t.Errorf("unexpected error: %v", err) + } +} + +// Test custom error. + +type customErrNode struct { + fstestutil.Dir +} + +type myCustomError struct { + fuse.ErrorNumber +} + +var _ = fuse.ErrorNumber(myCustomError{}) + +func (myCustomError) Error() string { + return "bork" +} + +func (f customErrNode) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + return nil, myCustomError{ + ErrorNumber: fuse.Errno(syscall.ENAMETOOLONG), + } +} + +func TestCustomErrno(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{customErrNode{}}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + _, err = os.Stat(mnt.Dir + "/trigger") + if err == nil { + t.Fatalf("expected error") + } + + switch err2 := err.(type) { + case *os.PathError: + if err2.Err == syscall.ENAMETOOLONG { + break + } + t.Errorf("unexpected inner error: %#v", err2) + default: + t.Errorf("unexpected error: %v", err) + } +} + +// Test Mmap writing + +type inMemoryFile struct { + data []byte +} + +func (f *inMemoryFile) Attr() fuse.Attr { + return fuse.Attr{ + Mode: 0666, + Size: uint64(len(f.data)), + } +} + +func (f *inMemoryFile) Read(req *fuse.ReadRequest, resp *fuse.ReadResponse, intr fs.Intr) fuse.Error { + fuseutil.HandleRead(req, resp, f.data) + return nil +} + +func (f *inMemoryFile) Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr fs.Intr) fuse.Error { + resp.Size = copy(f.data[req.Offset:], req.Data) + return nil +} + +type mmap struct { + inMemoryFile + // We don't actually care about whether the fsync happened or not; + // this just lets us force the page cache to send the writes to + // FUSE, so we can reliably verify they came through. + record.Fsyncs +} + +func TestMmap(t *testing.T) { + const size = 16 * 4096 + writes := map[int]byte{ + 10: 'a', + 4096: 'b', + 4097: 'c', + size - 4096: 'd', + size - 1: 'z', + } + + // Run the mmap-using parts of the test in a subprocess, to avoid + // an intentional page fault hanging the whole process (because it + // would need to be served by the same process, and there might + // not be a thread free to do that). Merely bumping GOMAXPROCS is + // not enough to prevent the hangs reliably. + if childMode { + f, err := os.Create("child") + if err != nil { + t.Fatalf("Create: %v", err) + } + defer f.Close() + + data, err := syscall.Mmap(int(f.Fd()), 0, size, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED) + if err != nil { + t.Fatalf("Mmap: %v", err) + } + + for i, b := range writes { + data[i] = b + } + + if err := syscallx.Msync(data, syscall.MS_SYNC); err != nil { + t.Fatalf("Msync: %v", err) + } + + if err := syscall.Munmap(data); err != nil { + t.Fatalf("Munmap: %v", err) + } + + if err := f.Sync(); err != nil { + t.Fatalf("Fsync = %v", err) + } + + err = f.Close() + if err != nil { + t.Fatalf("Close: %v", err) + } + + return + } + + w := &mmap{} + w.data = make([]byte, size) + mnt, err := fstestutil.MountedT(t, childMapFS{"child": w}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + child, err := childCmd("TestMmap") + if err != nil { + t.Fatal(err) + } + child.Dir = mnt.Dir + if err := child.Run(); err != nil { + t.Fatal(err) + } + + got := w.data + if g, e := len(got), size; g != e { + t.Fatalf("bad write length: %d != %d", g, e) + } + for i, g := range got { + // default '\x00' for writes[i] is good here + if e := writes[i]; g != e { + t.Errorf("wrong byte at offset %d: %q != %q", i, g, e) + } + } +} + +// Test direct Read. + +type directRead struct { + fstestutil.File +} + +// explicitly not defining Attr and setting Size + +func (f directRead) Open(req *fuse.OpenRequest, resp *fuse.OpenResponse, intr fs.Intr) (fs.Handle, fuse.Error) { + // do not allow the kernel to use page cache + resp.Flags |= fuse.OpenDirectIO + return f, nil +} + +func (directRead) Read(req *fuse.ReadRequest, resp *fuse.ReadResponse, intr fs.Intr) fuse.Error { + fuseutil.HandleRead(req, resp, []byte(hi)) + return nil +} + +func TestDirectRead(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, childMapFS{"child": directRead{}}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + testReadAll(t, mnt.Dir+"/child") +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/tree.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/tree.go new file mode 100644 index 00000000..5a12071e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fs/tree.go @@ -0,0 +1,96 @@ +// FUSE directory tree, for servers that wish to use it with the service loop. + +package fs + +import ( + "os" + pathpkg "path" + "strings" +) + +import ( + "camlistore.org/third_party/bazil.org/fuse" +) + +// A Tree implements a basic read-only directory tree for FUSE. +// The Nodes contained in it may still be writable. +type Tree struct { + tree +} + +func (t *Tree) Root() (Node, fuse.Error) { + return &t.tree, nil +} + +// Add adds the path to the tree, resolving to the given node. +// If path or a prefix of path has already been added to the tree, +// Add panics. +// +// Add is only safe to call before starting to serve requests. +func (t *Tree) Add(path string, node Node) { + path = pathpkg.Clean("/" + path)[1:] + elems := strings.Split(path, "/") + dir := Node(&t.tree) + for i, elem := range elems { + dt, ok := dir.(*tree) + if !ok { + panic("fuse: Tree.Add for " + strings.Join(elems[:i], "/") + " and " + path) + } + n := dt.lookup(elem) + if n != nil { + if i+1 == len(elems) { + panic("fuse: Tree.Add for " + path + " conflicts with " + elem) + } + dir = n + } else { + if i+1 == len(elems) { + dt.add(elem, node) + } else { + dir = &tree{} + dt.add(elem, dir) + } + } + } +} + +type treeDir struct { + name string + node Node +} + +type tree struct { + dir []treeDir +} + +func (t *tree) lookup(name string) Node { + for _, d := range t.dir { + if d.name == name { + return d.node + } + } + return nil +} + +func (t *tree) add(name string, n Node) { + t.dir = append(t.dir, treeDir{name, n}) +} + +func (t *tree) Attr() fuse.Attr { + return fuse.Attr{Mode: os.ModeDir | 0555} +} + +func (t *tree) Lookup(name string, intr Intr) (Node, fuse.Error) { + n := t.lookup(name) + if n != nil { + return n, nil + } + return nil, fuse.ENOENT +} + +func (t *tree) ReadDir(intr Intr) ([]fuse.Dirent, fuse.Error) { + var out []fuse.Dirent + for _, d := range t.dir { + out = append(out, fuse.Dirent{Name: d.name}) + } + return out, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse.go new file mode 100644 index 00000000..c6b81793 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse.go @@ -0,0 +1,2043 @@ +// See the file LICENSE for copyright and licensing information. +// Adapted from Plan 9 from User Space's src/cmd/9pfuse/fuse.c, +// which carries this notice: +// +// The files in this directory are subject to the following license. +// +// The author of this software is Russ Cox. +// +// Copyright (c) 2006 Russ Cox +// +// Permission to use, copy, modify, and distribute this software for any +// purpose without fee is hereby granted, provided that this entire notice +// is included in all copies of any software which is or includes a copy +// or modification of this software and in all copies of the supporting +// documentation for such software. +// +// THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED +// WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR WARRANTY +// OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS +// FITNESS FOR ANY PARTICULAR PURPOSE. + +// Package fuse enables writing FUSE file systems on Linux, OS X, and FreeBSD. +// +// On OS X, it requires OSXFUSE (http://osxfuse.github.com/). +// +// There are two approaches to writing a FUSE file system. The first is to speak +// the low-level message protocol, reading from a Conn using ReadRequest and +// writing using the various Respond methods. This approach is closest to +// the actual interaction with the kernel and can be the simplest one in contexts +// such as protocol translators. +// +// Servers of synthesized file systems tend to share common +// bookkeeping abstracted away by the second approach, which is to +// call fs.Serve to serve the FUSE protocol using an implementation of +// the service methods in the interfaces FS* (file system), Node* (file +// or directory), and Handle* (opened file or directory). +// There are a daunting number of such methods that can be written, +// but few are required. +// The specific methods are described in the documentation for those interfaces. +// +// The hellofs subdirectory contains a simple illustration of the fs.Serve approach. +// +// Service Methods +// +// The required and optional methods for the FS, Node, and Handle interfaces +// have the general form +// +// Op(req *OpRequest, resp *OpResponse, intr Intr) Error +// +// where Op is the name of a FUSE operation. Op reads request parameters +// from req and writes results to resp. An operation whose only result is +// the error result omits the resp parameter. Multiple goroutines may call +// service methods simultaneously; the methods being called are responsible +// for appropriate synchronization. +// +// Interrupted Operations +// +// In some file systems, some operations +// may take an undetermined amount of time. For example, a Read waiting for +// a network message or a matching Write might wait indefinitely. If the request +// is cancelled and no longer needed, the package will close intr, a chan struct{}. +// Blocking operations should select on a receive from intr and attempt to +// abort the operation early if the receive succeeds (meaning the channel is closed). +// To indicate that the operation failed because it was aborted, return fuse.EINTR. +// +// If an operation does not block for an indefinite amount of time, the intr parameter +// can be ignored. +// +// Authentication +// +// All requests types embed a Header, meaning that the method can inspect +// req.Pid, req.Uid, and req.Gid as necessary to implement permission checking. +// Alternately, XXX. +// +// Mount Options +// +// Behavior and metadata of the mounted file system can be changed by +// passing MountOption values to Mount. +// +package fuse + +// BUG(rsc): The mount code for FreeBSD has not been written yet. + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "sync" + "syscall" + "time" + "unsafe" +) + +// A Conn represents a connection to a mounted FUSE file system. +type Conn struct { + // Ready is closed when the mount is complete or has failed. + Ready <-chan struct{} + + // MountError stores any error from the mount process. Only valid + // after Ready is closed. + MountError error + + // File handle for kernel communication. Only safe to access if + // rio or wio is held. + dev *os.File + buf []byte + wio sync.Mutex + rio sync.RWMutex +} + +// Mount mounts a new FUSE connection on the named directory +// and returns a connection for reading and writing FUSE messages. +// +// After a successful return, caller must call Close to free +// resources. +// +// Even on successful return, the new mount is not guaranteed to be +// visible until after Conn.Ready is closed. See Conn.MountError for +// possible errors. Incoming requests on Conn must be served to make +// progress. +func Mount(dir string, options ...MountOption) (*Conn, error) { + conf := MountConfig{ + options: make(map[string]string), + } + for _, option := range options { + if err := option(&conf); err != nil { + return nil, err + } + } + + ready := make(chan struct{}, 1) + c := &Conn{ + Ready: ready, + } + f, err := mount(dir, &conf, ready, &c.MountError) + if err != nil { + return nil, err + } + c.dev = f + return c, nil +} + +// A Request represents a single FUSE request received from the kernel. +// Use a type switch to determine the specific kind. +// A request of unrecognized type will have concrete type *Header. +type Request interface { + // Hdr returns the Header associated with this request. + Hdr() *Header + + // RespondError responds to the request with the given error. + RespondError(Error) + + String() string +} + +// A RequestID identifies an active FUSE request. +type RequestID uint64 + +// A NodeID is a number identifying a directory or file. +// It must be unique among IDs returned in LookupResponses +// that have not yet been forgotten by ForgetRequests. +type NodeID uint64 + +// A HandleID is a number identifying an open directory or file. +// It only needs to be unique while the directory or file is open. +type HandleID uint64 + +// The RootID identifies the root directory of a FUSE file system. +const RootID NodeID = rootID + +// A Header describes the basic information sent in every request. +type Header struct { + Conn *Conn `json:"-"` // connection this request was received on + ID RequestID // unique ID for request + Node NodeID // file or directory the request is about + Uid uint32 // user ID of process making request + Gid uint32 // group ID of process making request + Pid uint32 // process ID of process making request + + // for returning to reqPool + msg *message +} + +func (h *Header) String() string { + return fmt.Sprintf("ID=%#x Node=%#x Uid=%d Gid=%d Pid=%d", h.ID, h.Node, h.Uid, h.Gid, h.Pid) +} + +func (h *Header) Hdr() *Header { + return h +} + +func (h *Header) noResponse() { + putMessage(h.msg) +} + +func (h *Header) respond(out *outHeader, n uintptr) { + h.Conn.respond(out, n) + putMessage(h.msg) +} + +func (h *Header) respondData(out *outHeader, n uintptr, data []byte) { + h.Conn.respondData(out, n, data) + putMessage(h.msg) +} + +// An Error is a FUSE error. +// +// Errors messages will be visible in the debug log as part of the +// response. +// +// The FUSE interface can only communicate POSIX errno error numbers +// to file system clients, the message is not visible to file system +// clients. The returned error can implement ErrorNumber to control +// the errno returned. Without ErrorNumber, a generic errno (EIO) is +// returned. +type Error error + +// An ErrorNumber is an error with a specific error number. +// +// Operations may return an error value that implements ErrorNumber to +// control what specific error number (errno) to return. +type ErrorNumber interface { + // Errno returns the the error number (errno) for this error. + Errno() Errno +} + +const ( + // ENOSYS indicates that the call is not supported. + ENOSYS = Errno(syscall.ENOSYS) + + // ESTALE is used by Serve to respond to violations of the FUSE protocol. + ESTALE = Errno(syscall.ESTALE) + + ENOENT = Errno(syscall.ENOENT) + EIO = Errno(syscall.EIO) + EPERM = Errno(syscall.EPERM) + + // EINTR indicates request was interrupted by an InterruptRequest. + // See also fs.Intr. + EINTR = Errno(syscall.EINTR) + + ENODATA = Errno(syscall.ENODATA) + ERANGE = Errno(syscall.ERANGE) + ENOTSUP = Errno(syscall.ENOTSUP) + EEXIST = Errno(syscall.EEXIST) +) + +// DefaultErrno is the errno used when error returned does not +// implement ErrorNumber. +const DefaultErrno = EIO + +var errnoNames = map[Errno]string{ + ENOSYS: "ENOSYS", + ESTALE: "ESTALE", + ENOENT: "ENOENT", + EIO: "EIO", + EPERM: "EPERM", + EINTR: "EINTR", + ENODATA: "ENODATA", + EEXIST: "EEXIST", +} + +// Errno implements Error and ErrorNumber using a syscall.Errno. +type Errno syscall.Errno + +var _ = ErrorNumber(Errno(0)) +var _ = Error(Errno(0)) +var _ = error(Errno(0)) + +func (e Errno) Errno() Errno { + return e +} + +func (e Errno) String() string { + return syscall.Errno(e).Error() +} + +func (e Errno) Error() string { + return syscall.Errno(e).Error() +} + +// ErrnoName returns the short non-numeric identifier for this errno. +// For example, "EIO". +func (e Errno) ErrnoName() string { + s := errnoNames[e] + if s == "" { + s = fmt.Sprint(e.Errno()) + } + return s +} + +func (e Errno) MarshalText() ([]byte, error) { + s := e.ErrnoName() + return []byte(s), nil +} + +func (h *Header) RespondError(err Error) { + errno := DefaultErrno + if ferr, ok := err.(ErrorNumber); ok { + errno = ferr.Errno() + } + // FUSE uses negative errors! + // TODO: File bug report against OSXFUSE: positive error causes kernel panic. + out := &outHeader{Error: -int32(errno), Unique: uint64(h.ID)} + h.respond(out, unsafe.Sizeof(*out)) +} + +// Maximum file write size we are prepared to receive from the kernel. +const maxWrite = 16 * 1024 * 1024 + +// All requests read from the kernel, without data, are shorter than +// this. +var maxRequestSize = syscall.Getpagesize() +var bufSize = maxRequestSize + maxWrite + +// reqPool is a pool of messages. +// +// Lifetime of a logical message is from getMessage to putMessage. +// getMessage is called by ReadRequest. putMessage is called by +// Conn.ReadRequest, Request.Respond, or Request.RespondError. +// +// Messages in the pool are guaranteed to have conn and off zeroed, +// buf allocated and len==bufSize, and hdr set. +var reqPool = sync.Pool{ + New: allocMessage, +} + +func allocMessage() interface{} { + m := &message{buf: make([]byte, bufSize)} + m.hdr = (*inHeader)(unsafe.Pointer(&m.buf[0])) + return m +} + +func getMessage(c *Conn) *message { + m := reqPool.Get().(*message) + m.conn = c + return m +} + +func putMessage(m *message) { + m.buf = m.buf[:bufSize] + m.conn = nil + m.off = 0 + reqPool.Put(m) +} + +// a message represents the bytes of a single FUSE message +type message struct { + conn *Conn + buf []byte // all bytes + hdr *inHeader // header + off int // offset for reading additional fields +} + +func (m *message) len() uintptr { + return uintptr(len(m.buf) - m.off) +} + +func (m *message) data() unsafe.Pointer { + var p unsafe.Pointer + if m.off < len(m.buf) { + p = unsafe.Pointer(&m.buf[m.off]) + } + return p +} + +func (m *message) bytes() []byte { + return m.buf[m.off:] +} + +func (m *message) Header() Header { + h := m.hdr + return Header{ + Conn: m.conn, + ID: RequestID(h.Unique), + Node: NodeID(h.Nodeid), + Uid: h.Uid, + Gid: h.Gid, + Pid: h.Pid, + + msg: m, + } +} + +// fileMode returns a Go os.FileMode from a Unix mode. +func fileMode(unixMode uint32) os.FileMode { + mode := os.FileMode(unixMode & 0777) + switch unixMode & syscall.S_IFMT { + case syscall.S_IFREG: + // nothing + case syscall.S_IFDIR: + mode |= os.ModeDir + case syscall.S_IFCHR: + mode |= os.ModeCharDevice | os.ModeDevice + case syscall.S_IFBLK: + mode |= os.ModeDevice + case syscall.S_IFIFO: + mode |= os.ModeNamedPipe + case syscall.S_IFLNK: + mode |= os.ModeSymlink + case syscall.S_IFSOCK: + mode |= os.ModeSocket + default: + // no idea + mode |= os.ModeDevice + } + if unixMode&syscall.S_ISUID != 0 { + mode |= os.ModeSetuid + } + if unixMode&syscall.S_ISGID != 0 { + mode |= os.ModeSetgid + } + return mode +} + +type noOpcode struct { + Opcode uint32 +} + +func (m noOpcode) String() string { + return fmt.Sprintf("No opcode %v", m.Opcode) +} + +type malformedMessage struct { +} + +func (malformedMessage) String() string { + return "malformed message" +} + +// Close closes the FUSE connection. +func (c *Conn) Close() error { + c.wio.Lock() + defer c.wio.Unlock() + c.rio.Lock() + defer c.rio.Unlock() + return c.dev.Close() +} + +// caller must hold wio or rio +func (c *Conn) fd() int { + return int(c.dev.Fd()) +} + +// ReadRequest returns the next FUSE request from the kernel. +// +// Caller must call either Request.Respond or Request.RespondError in +// a reasonable time. Caller must not retain Request after that call. +func (c *Conn) ReadRequest() (Request, error) { + m := getMessage(c) +loop: + c.rio.RLock() + n, err := syscall.Read(c.fd(), m.buf) + c.rio.RUnlock() + if err == syscall.EINTR { + // OSXFUSE sends EINTR to userspace when a request interrupt + // completed before it got sent to userspace? + goto loop + } + if err != nil && err != syscall.ENODEV { + putMessage(m) + return nil, err + } + if n <= 0 { + putMessage(m) + return nil, io.EOF + } + m.buf = m.buf[:n] + + if n < inHeaderSize { + putMessage(m) + return nil, errors.New("fuse: message too short") + } + + // FreeBSD FUSE sends a short length in the header + // for FUSE_INIT even though the actual read length is correct. + if n == inHeaderSize+initInSize && m.hdr.Opcode == opInit && m.hdr.Len < uint32(n) { + m.hdr.Len = uint32(n) + } + + // OSXFUSE sometimes sends the wrong m.hdr.Len in a FUSE_WRITE message. + if m.hdr.Len < uint32(n) && m.hdr.Len >= uint32(unsafe.Sizeof(writeIn{})) && m.hdr.Opcode == opWrite { + m.hdr.Len = uint32(n) + } + + if m.hdr.Len != uint32(n) { + // prepare error message before returning m to pool + err := fmt.Errorf("fuse: read %d opcode %d but expected %d", n, m.hdr.Opcode, m.hdr.Len) + putMessage(m) + return nil, err + } + + m.off = inHeaderSize + + // Convert to data structures. + // Do not trust kernel to hand us well-formed data. + var req Request + switch m.hdr.Opcode { + default: + Debug(noOpcode{Opcode: m.hdr.Opcode}) + goto unrecognized + + case opLookup: + buf := m.bytes() + n := len(buf) + if n == 0 || buf[n-1] != '\x00' { + goto corrupt + } + req = &LookupRequest{ + Header: m.Header(), + Name: string(buf[:n-1]), + } + + case opForget: + in := (*forgetIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &ForgetRequest{ + Header: m.Header(), + N: in.Nlookup, + } + + case opGetattr: + req = &GetattrRequest{ + Header: m.Header(), + } + + case opSetattr: + in := (*setattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &SetattrRequest{ + Header: m.Header(), + Valid: SetattrValid(in.Valid), + Handle: HandleID(in.Fh), + Size: in.Size, + Atime: time.Unix(int64(in.Atime), int64(in.AtimeNsec)), + Mtime: time.Unix(int64(in.Mtime), int64(in.MtimeNsec)), + Mode: fileMode(in.Mode), + Uid: in.Uid, + Gid: in.Gid, + Bkuptime: in.BkupTime(), + Chgtime: in.Chgtime(), + Flags: in.Flags(), + } + + case opReadlink: + if len(m.bytes()) > 0 { + goto corrupt + } + req = &ReadlinkRequest{ + Header: m.Header(), + } + + case opSymlink: + // m.bytes() is "newName\0target\0" + names := m.bytes() + if len(names) == 0 || names[len(names)-1] != 0 { + goto corrupt + } + i := bytes.IndexByte(names, '\x00') + if i < 0 { + goto corrupt + } + newName, target := names[0:i], names[i+1:len(names)-1] + req = &SymlinkRequest{ + Header: m.Header(), + NewName: string(newName), + Target: string(target), + } + + case opLink: + in := (*linkIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + newName := m.bytes()[unsafe.Sizeof(*in):] + if len(newName) < 2 || newName[len(newName)-1] != 0 { + goto corrupt + } + newName = newName[:len(newName)-1] + req = &LinkRequest{ + Header: m.Header(), + OldNode: NodeID(in.Oldnodeid), + NewName: string(newName), + } + + case opMknod: + in := (*mknodIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + name := m.bytes()[unsafe.Sizeof(*in):] + if len(name) < 2 || name[len(name)-1] != '\x00' { + goto corrupt + } + name = name[:len(name)-1] + req = &MknodRequest{ + Header: m.Header(), + Mode: fileMode(in.Mode), + Rdev: in.Rdev, + Name: string(name), + } + + case opMkdir: + in := (*mkdirIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + name := m.bytes()[unsafe.Sizeof(*in):] + i := bytes.IndexByte(name, '\x00') + if i < 0 { + goto corrupt + } + req = &MkdirRequest{ + Header: m.Header(), + Name: string(name[:i]), + // observed on Linux: mkdirIn.Mode & syscall.S_IFMT == 0, + // and this causes fileMode to go into it's "no idea" + // code branch; enforce type to directory + Mode: fileMode((in.Mode &^ syscall.S_IFMT) | syscall.S_IFDIR), + } + + case opUnlink, opRmdir: + buf := m.bytes() + n := len(buf) + if n == 0 || buf[n-1] != '\x00' { + goto corrupt + } + req = &RemoveRequest{ + Header: m.Header(), + Name: string(buf[:n-1]), + Dir: m.hdr.Opcode == opRmdir, + } + + case opRename: + in := (*renameIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + newDirNodeID := NodeID(in.Newdir) + oldNew := m.bytes()[unsafe.Sizeof(*in):] + // oldNew should be "old\x00new\x00" + if len(oldNew) < 4 { + goto corrupt + } + if oldNew[len(oldNew)-1] != '\x00' { + goto corrupt + } + i := bytes.IndexByte(oldNew, '\x00') + if i < 0 { + goto corrupt + } + oldName, newName := string(oldNew[:i]), string(oldNew[i+1:len(oldNew)-1]) + req = &RenameRequest{ + Header: m.Header(), + NewDir: newDirNodeID, + OldName: oldName, + NewName: newName, + } + + case opOpendir, opOpen: + in := (*openIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &OpenRequest{ + Header: m.Header(), + Dir: m.hdr.Opcode == opOpendir, + Flags: openFlags(in.Flags), + } + + case opRead, opReaddir: + in := (*readIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &ReadRequest{ + Header: m.Header(), + Dir: m.hdr.Opcode == opReaddir, + Handle: HandleID(in.Fh), + Offset: int64(in.Offset), + Size: int(in.Size), + } + + case opWrite: + in := (*writeIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + r := &WriteRequest{ + Header: m.Header(), + Handle: HandleID(in.Fh), + Offset: int64(in.Offset), + Flags: WriteFlags(in.WriteFlags), + } + buf := m.bytes()[unsafe.Sizeof(*in):] + if uint32(len(buf)) < in.Size { + goto corrupt + } + r.Data = buf + req = r + + case opStatfs: + req = &StatfsRequest{ + Header: m.Header(), + } + + case opRelease, opReleasedir: + in := (*releaseIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &ReleaseRequest{ + Header: m.Header(), + Dir: m.hdr.Opcode == opReleasedir, + Handle: HandleID(in.Fh), + Flags: openFlags(in.Flags), + ReleaseFlags: ReleaseFlags(in.ReleaseFlags), + LockOwner: in.LockOwner, + } + + case opFsync, opFsyncdir: + in := (*fsyncIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &FsyncRequest{ + Dir: m.hdr.Opcode == opFsyncdir, + Header: m.Header(), + Handle: HandleID(in.Fh), + Flags: in.FsyncFlags, + } + + case opSetxattr: + in := (*setxattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + m.off += int(unsafe.Sizeof(*in)) + name := m.bytes() + i := bytes.IndexByte(name, '\x00') + if i < 0 { + goto corrupt + } + xattr := name[i+1:] + if uint32(len(xattr)) < in.Size { + goto corrupt + } + xattr = xattr[:in.Size] + req = &SetxattrRequest{ + Header: m.Header(), + Flags: in.Flags, + Position: in.position(), + Name: string(name[:i]), + Xattr: xattr, + } + + case opGetxattr: + in := (*getxattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + name := m.bytes()[unsafe.Sizeof(*in):] + i := bytes.IndexByte(name, '\x00') + if i < 0 { + goto corrupt + } + req = &GetxattrRequest{ + Header: m.Header(), + Name: string(name[:i]), + Size: in.Size, + Position: in.position(), + } + + case opListxattr: + in := (*getxattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &ListxattrRequest{ + Header: m.Header(), + Size: in.Size, + Position: in.position(), + } + + case opRemovexattr: + buf := m.bytes() + n := len(buf) + if n == 0 || buf[n-1] != '\x00' { + goto corrupt + } + req = &RemovexattrRequest{ + Header: m.Header(), + Name: string(buf[:n-1]), + } + + case opFlush: + in := (*flushIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &FlushRequest{ + Header: m.Header(), + Handle: HandleID(in.Fh), + Flags: in.FlushFlags, + LockOwner: in.LockOwner, + } + + case opInit: + in := (*initIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &InitRequest{ + Header: m.Header(), + Major: in.Major, + Minor: in.Minor, + MaxReadahead: in.MaxReadahead, + Flags: InitFlags(in.Flags), + } + + case opGetlk: + panic("opGetlk") + case opSetlk: + panic("opSetlk") + case opSetlkw: + panic("opSetlkw") + + case opAccess: + in := (*accessIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &AccessRequest{ + Header: m.Header(), + Mask: in.Mask, + } + + case opCreate: + in := (*createIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + name := m.bytes()[unsafe.Sizeof(*in):] + i := bytes.IndexByte(name, '\x00') + if i < 0 { + goto corrupt + } + req = &CreateRequest{ + Header: m.Header(), + Flags: openFlags(in.Flags), + Mode: fileMode(in.Mode), + Name: string(name[:i]), + } + + case opInterrupt: + in := (*interruptIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &InterruptRequest{ + Header: m.Header(), + IntrID: RequestID(in.Unique), + } + + case opBmap: + panic("opBmap") + + case opDestroy: + req = &DestroyRequest{ + Header: m.Header(), + } + + // OS X + case opSetvolname: + panic("opSetvolname") + case opGetxtimes: + panic("opGetxtimes") + case opExchange: + panic("opExchange") + } + + return req, nil + +corrupt: + Debug(malformedMessage{}) + putMessage(m) + return nil, fmt.Errorf("fuse: malformed message") + +unrecognized: + // Unrecognized message. + // Assume higher-level code will send a "no idea what you mean" error. + h := m.Header() + return &h, nil +} + +type bugShortKernelWrite struct { + Written int64 + Length int64 + Error string + Stack string +} + +func (b bugShortKernelWrite) String() string { + return fmt.Sprintf("short kernel write: written=%d/%d error=%q stack=\n%s", b.Written, b.Length, b.Error, b.Stack) +} + +// safe to call even with nil error +func errorString(err error) string { + if err == nil { + return "" + } + return err.Error() +} + +func (c *Conn) respond(out *outHeader, n uintptr) { + c.wio.Lock() + defer c.wio.Unlock() + out.Len = uint32(n) + msg := (*[1 << 30]byte)(unsafe.Pointer(out))[:n] + nn, err := syscall.Write(c.fd(), msg) + if nn != len(msg) || err != nil { + Debug(bugShortKernelWrite{ + Written: int64(nn), + Length: int64(len(msg)), + Error: errorString(err), + Stack: stack(), + }) + } +} + +func (c *Conn) respondData(out *outHeader, n uintptr, data []byte) { + c.wio.Lock() + defer c.wio.Unlock() + // TODO: use writev + out.Len = uint32(n + uintptr(len(data))) + msg := make([]byte, out.Len) + copy(msg, (*[1 << 30]byte)(unsafe.Pointer(out))[:n]) + copy(msg[n:], data) + syscall.Write(c.fd(), msg) +} + +// An InitRequest is the first request sent on a FUSE file system. +type InitRequest struct { + Header `json:"-"` + Major uint32 + Minor uint32 + // Maximum readahead in bytes that the kernel plans to use. + MaxReadahead uint32 + Flags InitFlags +} + +var _ = Request(&InitRequest{}) + +func (r *InitRequest) String() string { + return fmt.Sprintf("Init [%s] %d.%d ra=%d fl=%v", &r.Header, r.Major, r.Minor, r.MaxReadahead, r.Flags) +} + +// An InitResponse is the response to an InitRequest. +type InitResponse struct { + // Maximum readahead in bytes that the kernel can use. Ignored if + // greater than InitRequest.MaxReadahead. + MaxReadahead uint32 + Flags InitFlags + // Maximum size of a single write operation. + // Linux enforces a minimum of 4 KiB. + MaxWrite uint32 +} + +func (r *InitResponse) String() string { + return fmt.Sprintf("Init %+v", *r) +} + +// Respond replies to the request with the given response. +func (r *InitRequest) Respond(resp *InitResponse) { + out := &initOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Major: kernelVersion, + Minor: kernelMinorVersion, + MaxReadahead: resp.MaxReadahead, + Flags: uint32(resp.Flags), + MaxWrite: resp.MaxWrite, + } + // MaxWrite larger than our receive buffer would just lead to + // errors on large writes. + if out.MaxWrite > maxWrite { + out.MaxWrite = maxWrite + } + r.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A StatfsRequest requests information about the mounted file system. +type StatfsRequest struct { + Header `json:"-"` +} + +var _ = Request(&StatfsRequest{}) + +func (r *StatfsRequest) String() string { + return fmt.Sprintf("Statfs [%s]\n", &r.Header) +} + +// Respond replies to the request with the given response. +func (r *StatfsRequest) Respond(resp *StatfsResponse) { + out := &statfsOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + St: kstatfs{ + Blocks: resp.Blocks, + Bfree: resp.Bfree, + Bavail: resp.Bavail, + Files: resp.Files, + Bsize: resp.Bsize, + Namelen: resp.Namelen, + Frsize: resp.Frsize, + }, + } + r.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A StatfsResponse is the response to a StatfsRequest. +type StatfsResponse struct { + Blocks uint64 // Total data blocks in file system. + Bfree uint64 // Free blocks in file system. + Bavail uint64 // Free blocks in file system if you're not root. + Files uint64 // Total files in file system. + Ffree uint64 // Free files in file system. + Bsize uint32 // Block size + Namelen uint32 // Maximum file name length? + Frsize uint32 // Fragment size, smallest addressable data size in the file system. +} + +func (r *StatfsResponse) String() string { + return fmt.Sprintf("Statfs %+v", *r) +} + +// An AccessRequest asks whether the file can be accessed +// for the purpose specified by the mask. +type AccessRequest struct { + Header `json:"-"` + Mask uint32 +} + +var _ = Request(&AccessRequest{}) + +func (r *AccessRequest) String() string { + return fmt.Sprintf("Access [%s] mask=%#x", &r.Header, r.Mask) +} + +// Respond replies to the request indicating that access is allowed. +// To deny access, use RespondError. +func (r *AccessRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.respond(out, unsafe.Sizeof(*out)) +} + +// An Attr is the metadata for a single file or directory. +type Attr struct { + Inode uint64 // inode number + Size uint64 // size in bytes + Blocks uint64 // size in blocks + Atime time.Time // time of last access + Mtime time.Time // time of last modification + Ctime time.Time // time of last inode change + Crtime time.Time // time of creation (OS X only) + Mode os.FileMode // file mode + Nlink uint32 // number of links + Uid uint32 // owner uid + Gid uint32 // group gid + Rdev uint32 // device numbers + Flags uint32 // chflags(2) flags (OS X only) +} + +func unix(t time.Time) (sec uint64, nsec uint32) { + nano := t.UnixNano() + sec = uint64(nano / 1e9) + nsec = uint32(nano % 1e9) + return +} + +func (a *Attr) attr() (out attr) { + out.Ino = a.Inode + out.Size = a.Size + out.Blocks = a.Blocks + out.Atime, out.AtimeNsec = unix(a.Atime) + out.Mtime, out.MtimeNsec = unix(a.Mtime) + out.Ctime, out.CtimeNsec = unix(a.Ctime) + out.SetCrtime(unix(a.Crtime)) + out.Mode = uint32(a.Mode) & 0777 + switch { + default: + out.Mode |= syscall.S_IFREG + case a.Mode&os.ModeDir != 0: + out.Mode |= syscall.S_IFDIR + case a.Mode&os.ModeDevice != 0: + if a.Mode&os.ModeCharDevice != 0 { + out.Mode |= syscall.S_IFCHR + } else { + out.Mode |= syscall.S_IFBLK + } + case a.Mode&os.ModeNamedPipe != 0: + out.Mode |= syscall.S_IFIFO + case a.Mode&os.ModeSymlink != 0: + out.Mode |= syscall.S_IFLNK + case a.Mode&os.ModeSocket != 0: + out.Mode |= syscall.S_IFSOCK + } + if a.Mode&os.ModeSetuid != 0 { + out.Mode |= syscall.S_ISUID + } + if a.Mode&os.ModeSetgid != 0 { + out.Mode |= syscall.S_ISGID + } + out.Nlink = a.Nlink + if out.Nlink < 1 { + out.Nlink = 1 + } + out.Uid = a.Uid + out.Gid = a.Gid + out.Rdev = a.Rdev + out.SetFlags(a.Flags) + + return +} + +// A GetattrRequest asks for the metadata for the file denoted by r.Node. +type GetattrRequest struct { + Header `json:"-"` +} + +var _ = Request(&GetattrRequest{}) + +func (r *GetattrRequest) String() string { + return fmt.Sprintf("Getattr [%s]", &r.Header) +} + +// Respond replies to the request with the given response. +func (r *GetattrRequest) Respond(resp *GetattrResponse) { + out := &attrOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + AttrValid: uint64(resp.AttrValid / time.Second), + AttrValidNsec: uint32(resp.AttrValid % time.Second / time.Nanosecond), + Attr: resp.Attr.attr(), + } + r.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A GetattrResponse is the response to a GetattrRequest. +type GetattrResponse struct { + AttrValid time.Duration // how long Attr can be cached + Attr Attr // file attributes +} + +func (r *GetattrResponse) String() string { + return fmt.Sprintf("Getattr %+v", *r) +} + +// A GetxattrRequest asks for the extended attributes associated with r.Node. +type GetxattrRequest struct { + Header `json:"-"` + + // Maximum size to return. + Size uint32 + + // Name of the attribute requested. + Name string + + // Offset within extended attributes. + // + // Only valid for OS X, and then only with the resource fork + // attribute. + Position uint32 +} + +var _ = Request(&GetxattrRequest{}) + +func (r *GetxattrRequest) String() string { + return fmt.Sprintf("Getxattr [%s] %q %d @%d", &r.Header, r.Name, r.Size, r.Position) +} + +// Respond replies to the request with the given response. +func (r *GetxattrRequest) Respond(resp *GetxattrResponse) { + if r.Size == 0 { + out := &getxattrOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Size: uint32(len(resp.Xattr)), + } + r.respond(&out.outHeader, unsafe.Sizeof(*out)) + } else { + out := &outHeader{Unique: uint64(r.ID)} + r.respondData(out, unsafe.Sizeof(*out), resp.Xattr) + } +} + +func (r *GetxattrRequest) RespondError(err Error) { + err = translateGetxattrError(err) + r.Header.RespondError(err) +} + +// A GetxattrResponse is the response to a GetxattrRequest. +type GetxattrResponse struct { + Xattr []byte +} + +func (r *GetxattrResponse) String() string { + return fmt.Sprintf("Getxattr %x", r.Xattr) +} + +// A ListxattrRequest asks to list the extended attributes associated with r.Node. +type ListxattrRequest struct { + Header `json:"-"` + Size uint32 // maximum size to return + Position uint32 // offset within attribute list +} + +var _ = Request(&ListxattrRequest{}) + +func (r *ListxattrRequest) String() string { + return fmt.Sprintf("Listxattr [%s] %d @%d", &r.Header, r.Size, r.Position) +} + +// Respond replies to the request with the given response. +func (r *ListxattrRequest) Respond(resp *ListxattrResponse) { + if r.Size == 0 { + out := &getxattrOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Size: uint32(len(resp.Xattr)), + } + r.respond(&out.outHeader, unsafe.Sizeof(*out)) + } else { + out := &outHeader{Unique: uint64(r.ID)} + r.respondData(out, unsafe.Sizeof(*out), resp.Xattr) + } +} + +// A ListxattrResponse is the response to a ListxattrRequest. +type ListxattrResponse struct { + Xattr []byte +} + +func (r *ListxattrResponse) String() string { + return fmt.Sprintf("Listxattr %x", r.Xattr) +} + +// Append adds an extended attribute name to the response. +func (r *ListxattrResponse) Append(names ...string) { + for _, name := range names { + r.Xattr = append(r.Xattr, name...) + r.Xattr = append(r.Xattr, '\x00') + } +} + +// A RemovexattrRequest asks to remove an extended attribute associated with r.Node. +type RemovexattrRequest struct { + Header `json:"-"` + Name string // name of extended attribute +} + +var _ = Request(&RemovexattrRequest{}) + +func (r *RemovexattrRequest) String() string { + return fmt.Sprintf("Removexattr [%s] %q", &r.Header, r.Name) +} + +// Respond replies to the request, indicating that the attribute was removed. +func (r *RemovexattrRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.respond(out, unsafe.Sizeof(*out)) +} + +func (r *RemovexattrRequest) RespondError(err Error) { + err = translateGetxattrError(err) + r.Header.RespondError(err) +} + +// A SetxattrRequest asks to set an extended attribute associated with a file. +type SetxattrRequest struct { + Header `json:"-"` + + // Flags can make the request fail if attribute does/not already + // exist. Unfortunately, the constants are platform-specific and + // not exposed by Go1.2. Look for XATTR_CREATE, XATTR_REPLACE. + // + // TODO improve this later + // + // TODO XATTR_CREATE and exist -> EEXIST + // + // TODO XATTR_REPLACE and not exist -> ENODATA + Flags uint32 + + // Offset within extended attributes. + // + // Only valid for OS X, and then only with the resource fork + // attribute. + Position uint32 + + Name string + Xattr []byte +} + +var _ = Request(&SetxattrRequest{}) + +func trunc(b []byte, max int) ([]byte, string) { + if len(b) > max { + return b[:max], "..." + } + return b, "" +} + +func (r *SetxattrRequest) String() string { + xattr, tail := trunc(r.Xattr, 16) + return fmt.Sprintf("Setxattr [%s] %q %x%s fl=%v @%#x", &r.Header, r.Name, xattr, tail, r.Flags, r.Position) +} + +// Respond replies to the request, indicating that the extended attribute was set. +func (r *SetxattrRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.respond(out, unsafe.Sizeof(*out)) +} + +func (r *SetxattrRequest) RespondError(err Error) { + err = translateGetxattrError(err) + r.Header.RespondError(err) +} + +// A LookupRequest asks to look up the given name in the directory named by r.Node. +type LookupRequest struct { + Header `json:"-"` + Name string +} + +var _ = Request(&LookupRequest{}) + +func (r *LookupRequest) String() string { + return fmt.Sprintf("Lookup [%s] %q", &r.Header, r.Name) +} + +// Respond replies to the request with the given response. +func (r *LookupRequest) Respond(resp *LookupResponse) { + out := &entryOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Nodeid: uint64(resp.Node), + Generation: resp.Generation, + EntryValid: uint64(resp.EntryValid / time.Second), + EntryValidNsec: uint32(resp.EntryValid % time.Second / time.Nanosecond), + AttrValid: uint64(resp.AttrValid / time.Second), + AttrValidNsec: uint32(resp.AttrValid % time.Second / time.Nanosecond), + Attr: resp.Attr.attr(), + } + r.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A LookupResponse is the response to a LookupRequest. +type LookupResponse struct { + Node NodeID + Generation uint64 + EntryValid time.Duration + AttrValid time.Duration + Attr Attr +} + +func (r *LookupResponse) String() string { + return fmt.Sprintf("Lookup %+v", *r) +} + +// An OpenRequest asks to open a file or directory +type OpenRequest struct { + Header `json:"-"` + Dir bool // is this Opendir? + Flags OpenFlags +} + +var _ = Request(&OpenRequest{}) + +func (r *OpenRequest) String() string { + return fmt.Sprintf("Open [%s] dir=%v fl=%v", &r.Header, r.Dir, r.Flags) +} + +// Respond replies to the request with the given response. +func (r *OpenRequest) Respond(resp *OpenResponse) { + out := &openOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Fh: uint64(resp.Handle), + OpenFlags: uint32(resp.Flags), + } + r.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A OpenResponse is the response to a OpenRequest. +type OpenResponse struct { + Handle HandleID + Flags OpenResponseFlags +} + +func (r *OpenResponse) String() string { + return fmt.Sprintf("Open %+v", *r) +} + +// A CreateRequest asks to create and open a file (not a directory). +type CreateRequest struct { + Header `json:"-"` + Name string + Flags OpenFlags + Mode os.FileMode +} + +var _ = Request(&CreateRequest{}) + +func (r *CreateRequest) String() string { + return fmt.Sprintf("Create [%s] %q fl=%v mode=%v", &r.Header, r.Name, r.Flags, r.Mode) +} + +// Respond replies to the request with the given response. +func (r *CreateRequest) Respond(resp *CreateResponse) { + out := &createOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + + Nodeid: uint64(resp.Node), + Generation: resp.Generation, + EntryValid: uint64(resp.EntryValid / time.Second), + EntryValidNsec: uint32(resp.EntryValid % time.Second / time.Nanosecond), + AttrValid: uint64(resp.AttrValid / time.Second), + AttrValidNsec: uint32(resp.AttrValid % time.Second / time.Nanosecond), + Attr: resp.Attr.attr(), + + Fh: uint64(resp.Handle), + OpenFlags: uint32(resp.Flags), + } + r.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A CreateResponse is the response to a CreateRequest. +// It describes the created node and opened handle. +type CreateResponse struct { + LookupResponse + OpenResponse +} + +func (r *CreateResponse) String() string { + return fmt.Sprintf("Create %+v", *r) +} + +// A MkdirRequest asks to create (but not open) a directory. +type MkdirRequest struct { + Header `json:"-"` + Name string + Mode os.FileMode +} + +var _ = Request(&MkdirRequest{}) + +func (r *MkdirRequest) String() string { + return fmt.Sprintf("Mkdir [%s] %q mode=%v", &r.Header, r.Name, r.Mode) +} + +// Respond replies to the request with the given response. +func (r *MkdirRequest) Respond(resp *MkdirResponse) { + out := &entryOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Nodeid: uint64(resp.Node), + Generation: resp.Generation, + EntryValid: uint64(resp.EntryValid / time.Second), + EntryValidNsec: uint32(resp.EntryValid % time.Second / time.Nanosecond), + AttrValid: uint64(resp.AttrValid / time.Second), + AttrValidNsec: uint32(resp.AttrValid % time.Second / time.Nanosecond), + Attr: resp.Attr.attr(), + } + r.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A MkdirResponse is the response to a MkdirRequest. +type MkdirResponse struct { + LookupResponse +} + +func (r *MkdirResponse) String() string { + return fmt.Sprintf("Mkdir %+v", *r) +} + +// A ReadRequest asks to read from an open file. +type ReadRequest struct { + Header `json:"-"` + Dir bool // is this Readdir? + Handle HandleID + Offset int64 + Size int +} + +var _ = Request(&ReadRequest{}) + +func (r *ReadRequest) String() string { + return fmt.Sprintf("Read [%s] %#x %d @%#x dir=%v", &r.Header, r.Handle, r.Size, r.Offset, r.Dir) +} + +// Respond replies to the request with the given response. +func (r *ReadRequest) Respond(resp *ReadResponse) { + out := &outHeader{Unique: uint64(r.ID)} + r.respondData(out, unsafe.Sizeof(*out), resp.Data) +} + +// A ReadResponse is the response to a ReadRequest. +type ReadResponse struct { + Data []byte +} + +func (r *ReadResponse) String() string { + return fmt.Sprintf("Read %d", len(r.Data)) +} + +type jsonReadResponse struct { + Len uint64 +} + +func (r *ReadResponse) MarshalJSON() ([]byte, error) { + j := jsonReadResponse{ + Len: uint64(len(r.Data)), + } + return json.Marshal(j) +} + +// A ReleaseRequest asks to release (close) an open file handle. +type ReleaseRequest struct { + Header `json:"-"` + Dir bool // is this Releasedir? + Handle HandleID + Flags OpenFlags // flags from OpenRequest + ReleaseFlags ReleaseFlags + LockOwner uint32 +} + +var _ = Request(&ReleaseRequest{}) + +func (r *ReleaseRequest) String() string { + return fmt.Sprintf("Release [%s] %#x fl=%v rfl=%v owner=%#x", &r.Header, r.Handle, r.Flags, r.ReleaseFlags, r.LockOwner) +} + +// Respond replies to the request, indicating that the handle has been released. +func (r *ReleaseRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.respond(out, unsafe.Sizeof(*out)) +} + +// A DestroyRequest is sent by the kernel when unmounting the file system. +// No more requests will be received after this one, but it should still be +// responded to. +type DestroyRequest struct { + Header `json:"-"` +} + +var _ = Request(&DestroyRequest{}) + +func (r *DestroyRequest) String() string { + return fmt.Sprintf("Destroy [%s]", &r.Header) +} + +// Respond replies to the request. +func (r *DestroyRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.respond(out, unsafe.Sizeof(*out)) +} + +// A ForgetRequest is sent by the kernel when forgetting about r.Node +// as returned by r.N lookup requests. +type ForgetRequest struct { + Header `json:"-"` + N uint64 +} + +var _ = Request(&ForgetRequest{}) + +func (r *ForgetRequest) String() string { + return fmt.Sprintf("Forget [%s] %d", &r.Header, r.N) +} + +// Respond replies to the request, indicating that the forgetfulness has been recorded. +func (r *ForgetRequest) Respond() { + // Don't reply to forget messages. + r.noResponse() +} + +// A Dirent represents a single directory entry. +type Dirent struct { + // Inode this entry names. + Inode uint64 + + // Type of the entry, for example DT_File. + // + // Setting this is optional. The zero value (DT_Unknown) means + // callers will just need to do a Getattr when the type is + // needed. Providing a type can speed up operations + // significantly. + Type DirentType + + // Name of the entry + Name string +} + +// Type of an entry in a directory listing. +type DirentType uint32 + +const ( + // These don't quite match os.FileMode; especially there's an + // explicit unknown, instead of zero value meaning file. They + // are also not quite syscall.DT_*; nothing says the FUSE + // protocol follows those, and even if they were, we don't + // want each fs to fiddle with syscall. + + // The shift by 12 is hardcoded in the FUSE userspace + // low-level C library, so it's safe here. + + DT_Unknown DirentType = 0 + DT_Socket DirentType = syscall.S_IFSOCK >> 12 + DT_Link DirentType = syscall.S_IFLNK >> 12 + DT_File DirentType = syscall.S_IFREG >> 12 + DT_Block DirentType = syscall.S_IFBLK >> 12 + DT_Dir DirentType = syscall.S_IFDIR >> 12 + DT_Char DirentType = syscall.S_IFCHR >> 12 + DT_FIFO DirentType = syscall.S_IFIFO >> 12 +) + +func (t DirentType) String() string { + switch t { + case DT_Unknown: + return "unknown" + case DT_Socket: + return "socket" + case DT_Link: + return "link" + case DT_File: + return "file" + case DT_Block: + return "block" + case DT_Dir: + return "dir" + case DT_Char: + return "char" + case DT_FIFO: + return "fifo" + } + return "invalid" +} + +// AppendDirent appends the encoded form of a directory entry to data +// and returns the resulting slice. +func AppendDirent(data []byte, dir Dirent) []byte { + de := dirent{ + Ino: dir.Inode, + Namelen: uint32(len(dir.Name)), + Type: uint32(dir.Type), + } + de.Off = uint64(len(data) + direntSize + (len(dir.Name)+7)&^7) + data = append(data, (*[direntSize]byte)(unsafe.Pointer(&de))[:]...) + data = append(data, dir.Name...) + n := direntSize + uintptr(len(dir.Name)) + if n%8 != 0 { + var pad [8]byte + data = append(data, pad[:8-n%8]...) + } + return data +} + +// A WriteRequest asks to write to an open file. +type WriteRequest struct { + Header + Handle HandleID + Offset int64 + Data []byte + Flags WriteFlags +} + +var _ = Request(&WriteRequest{}) + +func (r *WriteRequest) String() string { + return fmt.Sprintf("Write [%s] %#x %d @%d fl=%v", &r.Header, r.Handle, len(r.Data), r.Offset, r.Flags) +} + +type jsonWriteRequest struct { + Handle HandleID + Offset int64 + Len uint64 + Flags WriteFlags +} + +func (r *WriteRequest) MarshalJSON() ([]byte, error) { + j := jsonWriteRequest{ + Handle: r.Handle, + Offset: r.Offset, + Len: uint64(len(r.Data)), + Flags: r.Flags, + } + return json.Marshal(j) +} + +// Respond replies to the request with the given response. +func (r *WriteRequest) Respond(resp *WriteResponse) { + out := &writeOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Size: uint32(resp.Size), + } + r.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A WriteResponse replies to a write indicating how many bytes were written. +type WriteResponse struct { + Size int +} + +func (r *WriteResponse) String() string { + return fmt.Sprintf("Write %+v", *r) +} + +// A SetattrRequest asks to change one or more attributes associated with a file, +// as indicated by Valid. +type SetattrRequest struct { + Header `json:"-"` + Valid SetattrValid + Handle HandleID + Size uint64 + Atime time.Time + Mtime time.Time + Mode os.FileMode + Uid uint32 + Gid uint32 + + // OS X only + Bkuptime time.Time + Chgtime time.Time + Crtime time.Time + Flags uint32 // see chflags(2) +} + +var _ = Request(&SetattrRequest{}) + +func (r *SetattrRequest) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "Setattr [%s]", &r.Header) + if r.Valid.Mode() { + fmt.Fprintf(&buf, " mode=%v", r.Mode) + } + if r.Valid.Uid() { + fmt.Fprintf(&buf, " uid=%d", r.Uid) + } + if r.Valid.Gid() { + fmt.Fprintf(&buf, " gid=%d", r.Gid) + } + if r.Valid.Size() { + fmt.Fprintf(&buf, " size=%d", r.Size) + } + if r.Valid.Atime() { + fmt.Fprintf(&buf, " atime=%v", r.Atime) + } + if r.Valid.AtimeNow() { + fmt.Fprintf(&buf, " atime=now") + } + if r.Valid.Mtime() { + fmt.Fprintf(&buf, " mtime=%v", r.Mtime) + } + if r.Valid.MtimeNow() { + fmt.Fprintf(&buf, " mtime=now") + } + if r.Valid.Handle() { + fmt.Fprintf(&buf, " handle=%#x", r.Handle) + } else { + fmt.Fprintf(&buf, " handle=INVALID-%#x", r.Handle) + } + if r.Valid.LockOwner() { + fmt.Fprintf(&buf, " lockowner") + } + if r.Valid.Crtime() { + fmt.Fprintf(&buf, " crtime=%v", r.Crtime) + } + if r.Valid.Chgtime() { + fmt.Fprintf(&buf, " chgtime=%v", r.Chgtime) + } + if r.Valid.Bkuptime() { + fmt.Fprintf(&buf, " bkuptime=%v", r.Bkuptime) + } + if r.Valid.Flags() { + fmt.Fprintf(&buf, " flags=%#x", r.Flags) + } + return buf.String() +} + +// Respond replies to the request with the given response, +// giving the updated attributes. +func (r *SetattrRequest) Respond(resp *SetattrResponse) { + out := &attrOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + AttrValid: uint64(resp.AttrValid / time.Second), + AttrValidNsec: uint32(resp.AttrValid % time.Second / time.Nanosecond), + Attr: resp.Attr.attr(), + } + r.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A SetattrResponse is the response to a SetattrRequest. +type SetattrResponse struct { + AttrValid time.Duration // how long Attr can be cached + Attr Attr // file attributes +} + +func (r *SetattrResponse) String() string { + return fmt.Sprintf("Setattr %+v", *r) +} + +// A FlushRequest asks for the current state of an open file to be flushed +// to storage, as when a file descriptor is being closed. A single opened Handle +// may receive multiple FlushRequests over its lifetime. +type FlushRequest struct { + Header `json:"-"` + Handle HandleID + Flags uint32 + LockOwner uint64 +} + +var _ = Request(&FlushRequest{}) + +func (r *FlushRequest) String() string { + return fmt.Sprintf("Flush [%s] %#x fl=%#x lk=%#x", &r.Header, r.Handle, r.Flags, r.LockOwner) +} + +// Respond replies to the request, indicating that the flush succeeded. +func (r *FlushRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.respond(out, unsafe.Sizeof(*out)) +} + +// A RemoveRequest asks to remove a file or directory from the +// directory r.Node. +type RemoveRequest struct { + Header `json:"-"` + Name string // name of the entry to remove + Dir bool // is this rmdir? +} + +var _ = Request(&RemoveRequest{}) + +func (r *RemoveRequest) String() string { + return fmt.Sprintf("Remove [%s] %q dir=%v", &r.Header, r.Name, r.Dir) +} + +// Respond replies to the request, indicating that the file was removed. +func (r *RemoveRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.respond(out, unsafe.Sizeof(*out)) +} + +// A SymlinkRequest is a request to create a symlink making NewName point to Target. +type SymlinkRequest struct { + Header `json:"-"` + NewName, Target string +} + +var _ = Request(&SymlinkRequest{}) + +func (r *SymlinkRequest) String() string { + return fmt.Sprintf("Symlink [%s] from %q to target %q", &r.Header, r.NewName, r.Target) +} + +// Respond replies to the request, indicating that the symlink was created. +func (r *SymlinkRequest) Respond(resp *SymlinkResponse) { + out := &entryOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Nodeid: uint64(resp.Node), + Generation: resp.Generation, + EntryValid: uint64(resp.EntryValid / time.Second), + EntryValidNsec: uint32(resp.EntryValid % time.Second / time.Nanosecond), + AttrValid: uint64(resp.AttrValid / time.Second), + AttrValidNsec: uint32(resp.AttrValid % time.Second / time.Nanosecond), + Attr: resp.Attr.attr(), + } + r.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A SymlinkResponse is the response to a SymlinkRequest. +type SymlinkResponse struct { + LookupResponse +} + +// A ReadlinkRequest is a request to read a symlink's target. +type ReadlinkRequest struct { + Header `json:"-"` +} + +var _ = Request(&ReadlinkRequest{}) + +func (r *ReadlinkRequest) String() string { + return fmt.Sprintf("Readlink [%s]", &r.Header) +} + +func (r *ReadlinkRequest) Respond(target string) { + out := &outHeader{Unique: uint64(r.ID)} + r.respondData(out, unsafe.Sizeof(*out), []byte(target)) +} + +// A LinkRequest is a request to create a hard link. +type LinkRequest struct { + Header `json:"-"` + OldNode NodeID + NewName string +} + +var _ = Request(&LinkRequest{}) + +func (r *LinkRequest) String() string { + return fmt.Sprintf("Link [%s] node %d to %q", &r.Header, r.OldNode, r.NewName) +} + +func (r *LinkRequest) Respond(resp *LookupResponse) { + out := &entryOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Nodeid: uint64(resp.Node), + Generation: resp.Generation, + EntryValid: uint64(resp.EntryValid / time.Second), + EntryValidNsec: uint32(resp.EntryValid % time.Second / time.Nanosecond), + AttrValid: uint64(resp.AttrValid / time.Second), + AttrValidNsec: uint32(resp.AttrValid % time.Second / time.Nanosecond), + Attr: resp.Attr.attr(), + } + r.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A RenameRequest is a request to rename a file. +type RenameRequest struct { + Header `json:"-"` + NewDir NodeID + OldName, NewName string +} + +var _ = Request(&RenameRequest{}) + +func (r *RenameRequest) String() string { + return fmt.Sprintf("Rename [%s] from %q to dirnode %d %q", &r.Header, r.OldName, r.NewDir, r.NewName) +} + +func (r *RenameRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.respond(out, unsafe.Sizeof(*out)) +} + +type MknodRequest struct { + Header `json:"-"` + Name string + Mode os.FileMode + Rdev uint32 +} + +var _ = Request(&MknodRequest{}) + +func (r *MknodRequest) String() string { + return fmt.Sprintf("Mknod [%s] Name %q mode %v rdev %d", &r.Header, r.Name, r.Mode, r.Rdev) +} + +func (r *MknodRequest) Respond(resp *LookupResponse) { + out := &entryOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Nodeid: uint64(resp.Node), + Generation: resp.Generation, + EntryValid: uint64(resp.EntryValid / time.Second), + EntryValidNsec: uint32(resp.EntryValid % time.Second / time.Nanosecond), + AttrValid: uint64(resp.AttrValid / time.Second), + AttrValidNsec: uint32(resp.AttrValid % time.Second / time.Nanosecond), + Attr: resp.Attr.attr(), + } + r.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +type FsyncRequest struct { + Header `json:"-"` + Handle HandleID + // TODO bit 1 is datasync, not well documented upstream + Flags uint32 + Dir bool +} + +var _ = Request(&FsyncRequest{}) + +func (r *FsyncRequest) String() string { + return fmt.Sprintf("Fsync [%s] Handle %v Flags %v", &r.Header, r.Handle, r.Flags) +} + +func (r *FsyncRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.respond(out, unsafe.Sizeof(*out)) +} + +// An InterruptRequest is a request to interrupt another pending request. The +// response to that request should return an error status of EINTR. +type InterruptRequest struct { + Header `json:"-"` + IntrID RequestID // ID of the request to be interrupt. +} + +var _ = Request(&InterruptRequest{}) + +func (r *InterruptRequest) Respond() { + // nothing to do here + r.noResponse() +} + +func (r *InterruptRequest) String() string { + return fmt.Sprintf("Interrupt [%s] ID %v", &r.Header, r.IntrID) +} + +/*{ + +// A XXXRequest xxx. +type XXXRequest struct { + Header `json:"-"` + xxx +} + +var _ = Request(&XXXRequest{}) + +func (r *XXXRequest) String() string { + return fmt.Sprintf("XXX [%s] xxx", &r.Header) +} + +// Respond replies to the request with the given response. +func (r *XXXRequest) Respond(resp *XXXResponse) { + out := &xxxOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + xxx, + } + r.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A XXXResponse is the response to a XXXRequest. +type XXXResponse struct { + xxx +} + +func (r *XXXResponse) String() string { + return fmt.Sprintf("XXX %+v", *r) +} + + } +*/ diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse_kernel.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse_kernel.go new file mode 100644 index 00000000..5fba53db --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse_kernel.go @@ -0,0 +1,639 @@ +// See the file LICENSE for copyright and licensing information. + +// Derived from FUSE's fuse_kernel.h +/* + This file defines the kernel interface of FUSE + Copyright (C) 2001-2007 Miklos Szeredi + + + This -- and only this -- header file may also be distributed under + the terms of the BSD Licence as follows: + + Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + SUCH DAMAGE. +*/ + +package fuse + +import ( + "fmt" + "syscall" + "unsafe" +) + +// Version is the FUSE version implemented by the package. +const Version = "7.8" + +const ( + kernelVersion = 7 + kernelMinorVersion = 8 + rootID = 1 +) + +type kstatfs struct { + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Bsize uint32 + Namelen uint32 + Frsize uint32 + Padding uint32 + Spare [6]uint32 +} + +type fileLock struct { + Start uint64 + End uint64 + Type uint32 + Pid uint32 +} + +// The SetattrValid are bit flags describing which fields in the SetattrRequest +// are included in the change. +type SetattrValid uint32 + +const ( + SetattrMode SetattrValid = 1 << 0 + SetattrUid SetattrValid = 1 << 1 + SetattrGid SetattrValid = 1 << 2 + SetattrSize SetattrValid = 1 << 3 + SetattrAtime SetattrValid = 1 << 4 + SetattrMtime SetattrValid = 1 << 5 + SetattrHandle SetattrValid = 1 << 6 + + // Linux only(?) + SetattrAtimeNow SetattrValid = 1 << 7 + SetattrMtimeNow SetattrValid = 1 << 8 + SetattrLockOwner SetattrValid = 1 << 9 // http://www.mail-archive.com/git-commits-head@vger.kernel.org/msg27852.html + + // OS X only + SetattrCrtime SetattrValid = 1 << 28 + SetattrChgtime SetattrValid = 1 << 29 + SetattrBkuptime SetattrValid = 1 << 30 + SetattrFlags SetattrValid = 1 << 31 +) + +func (fl SetattrValid) Mode() bool { return fl&SetattrMode != 0 } +func (fl SetattrValid) Uid() bool { return fl&SetattrUid != 0 } +func (fl SetattrValid) Gid() bool { return fl&SetattrGid != 0 } +func (fl SetattrValid) Size() bool { return fl&SetattrSize != 0 } +func (fl SetattrValid) Atime() bool { return fl&SetattrAtime != 0 } +func (fl SetattrValid) Mtime() bool { return fl&SetattrMtime != 0 } +func (fl SetattrValid) Handle() bool { return fl&SetattrHandle != 0 } +func (fl SetattrValid) AtimeNow() bool { return fl&SetattrAtimeNow != 0 } +func (fl SetattrValid) MtimeNow() bool { return fl&SetattrMtimeNow != 0 } +func (fl SetattrValid) LockOwner() bool { return fl&SetattrLockOwner != 0 } +func (fl SetattrValid) Crtime() bool { return fl&SetattrCrtime != 0 } +func (fl SetattrValid) Chgtime() bool { return fl&SetattrChgtime != 0 } +func (fl SetattrValid) Bkuptime() bool { return fl&SetattrBkuptime != 0 } +func (fl SetattrValid) Flags() bool { return fl&SetattrFlags != 0 } + +func (fl SetattrValid) String() string { + return flagString(uint32(fl), setattrValidNames) +} + +var setattrValidNames = []flagName{ + {uint32(SetattrMode), "SetattrMode"}, + {uint32(SetattrUid), "SetattrUid"}, + {uint32(SetattrGid), "SetattrGid"}, + {uint32(SetattrSize), "SetattrSize"}, + {uint32(SetattrAtime), "SetattrAtime"}, + {uint32(SetattrMtime), "SetattrMtime"}, + {uint32(SetattrHandle), "SetattrHandle"}, + {uint32(SetattrAtimeNow), "SetattrAtimeNow"}, + {uint32(SetattrMtimeNow), "SetattrMtimeNow"}, + {uint32(SetattrLockOwner), "SetattrLockOwner"}, + {uint32(SetattrCrtime), "SetattrCrtime"}, + {uint32(SetattrChgtime), "SetattrChgtime"}, + {uint32(SetattrBkuptime), "SetattrBkuptime"}, + {uint32(SetattrFlags), "SetattrFlags"}, +} + +// Flags that can be seen in OpenRequest.Flags. +const ( + // Access modes. These are not 1-bit flags, but alternatives where + // only one can be chosen. See the IsReadOnly etc convenience + // methods. + OpenReadOnly OpenFlags = syscall.O_RDONLY + OpenWriteOnly OpenFlags = syscall.O_WRONLY + OpenReadWrite OpenFlags = syscall.O_RDWR + + OpenAppend OpenFlags = syscall.O_APPEND + OpenCreate OpenFlags = syscall.O_CREAT + OpenExclusive OpenFlags = syscall.O_EXCL + OpenSync OpenFlags = syscall.O_SYNC + OpenTruncate OpenFlags = syscall.O_TRUNC +) + +// OpenAccessModeMask is a bitmask that separates the access mode +// from the other flags in OpenFlags. +const OpenAccessModeMask OpenFlags = syscall.O_ACCMODE + +// OpenFlags are the O_FOO flags passed to open/create/etc calls. For +// example, os.O_WRONLY | os.O_APPEND. +type OpenFlags uint32 + +func (fl OpenFlags) String() string { + // O_RDONLY, O_RWONLY, O_RDWR are not flags + s := accModeName(fl & OpenAccessModeMask) + flags := uint32(fl &^ OpenAccessModeMask) + if flags != 0 { + s = s + "+" + flagString(flags, openFlagNames) + } + return s +} + +// Return true if OpenReadOnly is set. +func (fl OpenFlags) IsReadOnly() bool { + return fl&OpenAccessModeMask == OpenReadOnly +} + +// Return true if OpenWriteOnly is set. +func (fl OpenFlags) IsWriteOnly() bool { + return fl&OpenAccessModeMask == OpenWriteOnly +} + +// Return true if OpenReadWrite is set. +func (fl OpenFlags) IsReadWrite() bool { + return fl&OpenAccessModeMask == OpenReadWrite +} + +func accModeName(flags OpenFlags) string { + switch flags { + case OpenReadOnly: + return "OpenReadOnly" + case OpenWriteOnly: + return "OpenWriteOnly" + case OpenReadWrite: + return "OpenReadWrite" + default: + return "" + } +} + +var openFlagNames = []flagName{ + {uint32(OpenCreate), "OpenCreate"}, + {uint32(OpenExclusive), "OpenExclusive"}, + {uint32(OpenTruncate), "OpenTruncate"}, + {uint32(OpenAppend), "OpenAppend"}, + {uint32(OpenSync), "OpenSync"}, +} + +// The OpenResponseFlags are returned in the OpenResponse. +type OpenResponseFlags uint32 + +const ( + OpenDirectIO OpenResponseFlags = 1 << 0 // bypass page cache for this open file + OpenKeepCache OpenResponseFlags = 1 << 1 // don't invalidate the data cache on open + OpenNonSeekable OpenResponseFlags = 1 << 2 // (Linux?) + + OpenPurgeAttr OpenResponseFlags = 1 << 30 // OS X + OpenPurgeUBC OpenResponseFlags = 1 << 31 // OS X +) + +func (fl OpenResponseFlags) String() string { + return flagString(uint32(fl), openResponseFlagNames) +} + +var openResponseFlagNames = []flagName{ + {uint32(OpenDirectIO), "OpenDirectIO"}, + {uint32(OpenKeepCache), "OpenKeepCache"}, + {uint32(OpenPurgeAttr), "OpenPurgeAttr"}, + {uint32(OpenPurgeUBC), "OpenPurgeUBC"}, +} + +// The InitFlags are used in the Init exchange. +type InitFlags uint32 + +const ( + InitAsyncRead InitFlags = 1 << 0 + InitPosixLocks InitFlags = 1 << 1 + InitFileOps InitFlags = 1 << 2 + InitAtomicTrunc InitFlags = 1 << 3 + InitExportSupport InitFlags = 1 << 4 + InitBigWrites InitFlags = 1 << 5 + InitDontMask InitFlags = 1 << 6 + InitSpliceWrite InitFlags = 1 << 7 + InitSpliceMove InitFlags = 1 << 8 + InitSpliceRead InitFlags = 1 << 9 + InitFlockLocks InitFlags = 1 << 10 + InitHasIoctlDir InitFlags = 1 << 11 + InitAutoInvalData InitFlags = 1 << 12 + InitDoReaddirplus InitFlags = 1 << 13 + InitReaddirplusAuto InitFlags = 1 << 14 + InitAsyncDIO InitFlags = 1 << 15 + InitWritebackCache InitFlags = 1 << 16 + InitNoOpenSupport InitFlags = 1 << 17 + + InitCaseSensitive InitFlags = 1 << 29 // OS X only + InitVolRename InitFlags = 1 << 30 // OS X only + InitXtimes InitFlags = 1 << 31 // OS X only +) + +type flagName struct { + bit uint32 + name string +} + +var initFlagNames = []flagName{ + {uint32(InitAsyncRead), "InitAsyncRead"}, + {uint32(InitPosixLocks), "InitPosixLocks"}, + {uint32(InitFileOps), "InitFileOps"}, + {uint32(InitAtomicTrunc), "InitAtomicTrunc"}, + {uint32(InitExportSupport), "InitExportSupport"}, + {uint32(InitBigWrites), "InitBigWrites"}, + {uint32(InitDontMask), "InitDontMask"}, + {uint32(InitSpliceWrite), "InitSpliceWrite"}, + {uint32(InitSpliceMove), "InitSpliceMove"}, + {uint32(InitSpliceRead), "InitSpliceRead"}, + {uint32(InitFlockLocks), "InitFlockLocks"}, + {uint32(InitHasIoctlDir), "InitHasIoctlDir"}, + {uint32(InitAutoInvalData), "InitAutoInvalData"}, + {uint32(InitDoReaddirplus), "InitDoReaddirplus"}, + {uint32(InitReaddirplusAuto), "InitReaddirplusAuto"}, + {uint32(InitAsyncDIO), "InitAsyncDIO"}, + {uint32(InitWritebackCache), "InitWritebackCache"}, + {uint32(InitNoOpenSupport), "InitNoOpenSupport"}, + + {uint32(InitCaseSensitive), "InitCaseSensitive"}, + {uint32(InitVolRename), "InitVolRename"}, + {uint32(InitXtimes), "InitXtimes"}, +} + +func (fl InitFlags) String() string { + return flagString(uint32(fl), initFlagNames) +} + +func flagString(f uint32, names []flagName) string { + var s string + + if f == 0 { + return "0" + } + + for _, n := range names { + if f&n.bit != 0 { + s += "+" + n.name + f &^= n.bit + } + } + if f != 0 { + s += fmt.Sprintf("%+#x", f) + } + return s[1:] +} + +// The ReleaseFlags are used in the Release exchange. +type ReleaseFlags uint32 + +const ( + ReleaseFlush ReleaseFlags = 1 << 0 +) + +func (fl ReleaseFlags) String() string { + return flagString(uint32(fl), releaseFlagNames) +} + +var releaseFlagNames = []flagName{ + {uint32(ReleaseFlush), "ReleaseFlush"}, +} + +// Opcodes +const ( + opLookup = 1 + opForget = 2 // no reply + opGetattr = 3 + opSetattr = 4 + opReadlink = 5 + opSymlink = 6 + opMknod = 8 + opMkdir = 9 + opUnlink = 10 + opRmdir = 11 + opRename = 12 + opLink = 13 + opOpen = 14 + opRead = 15 + opWrite = 16 + opStatfs = 17 + opRelease = 18 + opFsync = 20 + opSetxattr = 21 + opGetxattr = 22 + opListxattr = 23 + opRemovexattr = 24 + opFlush = 25 + opInit = 26 + opOpendir = 27 + opReaddir = 28 + opReleasedir = 29 + opFsyncdir = 30 + opGetlk = 31 + opSetlk = 32 + opSetlkw = 33 + opAccess = 34 + opCreate = 35 + opInterrupt = 36 + opBmap = 37 + opDestroy = 38 + opIoctl = 39 // Linux? + opPoll = 40 // Linux? + + // OS X + opSetvolname = 61 + opGetxtimes = 62 + opExchange = 63 +) + +type entryOut struct { + outHeader + Nodeid uint64 // Inode ID + Generation uint64 // Inode generation + EntryValid uint64 // Cache timeout for the name + AttrValid uint64 // Cache timeout for the attributes + EntryValidNsec uint32 + AttrValidNsec uint32 + Attr attr +} + +type forgetIn struct { + Nlookup uint64 +} + +type attrOut struct { + outHeader + AttrValid uint64 // Cache timeout for the attributes + AttrValidNsec uint32 + Dummy uint32 + Attr attr +} + +// OS X +type getxtimesOut struct { + outHeader + Bkuptime uint64 + Crtime uint64 + BkuptimeNsec uint32 + CrtimeNsec uint32 +} + +type mknodIn struct { + Mode uint32 + Rdev uint32 + // "filename\x00" follows. +} + +type mkdirIn struct { + Mode uint32 + Padding uint32 + // filename follows +} + +type renameIn struct { + Newdir uint64 + // "oldname\x00newname\x00" follows +} + +// OS X +type exchangeIn struct { + Olddir uint64 + Newdir uint64 + Options uint64 +} + +type linkIn struct { + Oldnodeid uint64 +} + +type setattrInCommon struct { + Valid uint32 + Padding uint32 + Fh uint64 + Size uint64 + LockOwner uint64 // unused on OS X? + Atime uint64 + Mtime uint64 + Unused2 uint64 + AtimeNsec uint32 + MtimeNsec uint32 + Unused3 uint32 + Mode uint32 + Unused4 uint32 + Uid uint32 + Gid uint32 + Unused5 uint32 +} + +type openIn struct { + Flags uint32 + Unused uint32 +} + +type openOut struct { + outHeader + Fh uint64 + OpenFlags uint32 + Padding uint32 +} + +type createIn struct { + Flags uint32 + Mode uint32 +} + +type createOut struct { + outHeader + + Nodeid uint64 // Inode ID + Generation uint64 // Inode generation + EntryValid uint64 // Cache timeout for the name + AttrValid uint64 // Cache timeout for the attributes + EntryValidNsec uint32 + AttrValidNsec uint32 + Attr attr + + Fh uint64 + OpenFlags uint32 + Padding uint32 +} + +type releaseIn struct { + Fh uint64 + Flags uint32 + ReleaseFlags uint32 + LockOwner uint32 +} + +type flushIn struct { + Fh uint64 + FlushFlags uint32 + Padding uint32 + LockOwner uint64 +} + +type readIn struct { + Fh uint64 + Offset uint64 + Size uint32 + Padding uint32 +} + +type writeIn struct { + Fh uint64 + Offset uint64 + Size uint32 + WriteFlags uint32 +} + +type writeOut struct { + outHeader + Size uint32 + Padding uint32 +} + +// The WriteFlags are passed in WriteRequest. +type WriteFlags uint32 + +func (fl WriteFlags) String() string { + return flagString(uint32(fl), writeFlagNames) +} + +var writeFlagNames = []flagName{} + +const compatStatfsSize = 48 + +type statfsOut struct { + outHeader + St kstatfs +} + +type fsyncIn struct { + Fh uint64 + FsyncFlags uint32 + Padding uint32 +} + +type setxattrInCommon struct { + Size uint32 + Flags uint32 +} + +func (setxattrInCommon) position() uint32 { + return 0 +} + +type getxattrInCommon struct { + Size uint32 + Padding uint32 +} + +func (getxattrInCommon) position() uint32 { + return 0 +} + +type getxattrOut struct { + outHeader + Size uint32 + Padding uint32 +} + +type lkIn struct { + Fh uint64 + Owner uint64 + Lk fileLock +} + +type lkOut struct { + outHeader + Lk fileLock +} + +type accessIn struct { + Mask uint32 + Padding uint32 +} + +type initIn struct { + Major uint32 + Minor uint32 + MaxReadahead uint32 + Flags uint32 +} + +const initInSize = int(unsafe.Sizeof(initIn{})) + +type initOut struct { + outHeader + Major uint32 + Minor uint32 + MaxReadahead uint32 + Flags uint32 + Unused uint32 + MaxWrite uint32 +} + +type interruptIn struct { + Unique uint64 +} + +type bmapIn struct { + Block uint64 + BlockSize uint32 + Padding uint32 +} + +type bmapOut struct { + outHeader + Block uint64 +} + +type inHeader struct { + Len uint32 + Opcode uint32 + Unique uint64 + Nodeid uint64 + Uid uint32 + Gid uint32 + Pid uint32 + Padding uint32 +} + +const inHeaderSize = int(unsafe.Sizeof(inHeader{})) + +type outHeader struct { + Len uint32 + Error int32 + Unique uint64 +} + +type dirent struct { + Ino uint64 + Off uint64 + Namelen uint32 + Type uint32 + Name [0]byte +} + +const direntSize = 8 + 8 + 4 + 4 diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse_kernel_darwin.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse_kernel_darwin.go new file mode 100644 index 00000000..4f9347d0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse_kernel_darwin.go @@ -0,0 +1,86 @@ +package fuse + +import ( + "time" +) + +type attr struct { + Ino uint64 + Size uint64 + Blocks uint64 + Atime uint64 + Mtime uint64 + Ctime uint64 + Crtime_ uint64 // OS X only + AtimeNsec uint32 + MtimeNsec uint32 + CtimeNsec uint32 + CrtimeNsec uint32 // OS X only + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint32 + Flags_ uint32 // OS X only; see chflags(2) +} + +func (a *attr) SetCrtime(s uint64, ns uint32) { + a.Crtime_, a.CrtimeNsec = s, ns +} + +func (a *attr) SetFlags(f uint32) { + a.Flags_ = f +} + +type setattrIn struct { + setattrInCommon + + // OS X only + Bkuptime_ uint64 + Chgtime_ uint64 + Crtime uint64 + BkuptimeNsec uint32 + ChgtimeNsec uint32 + CrtimeNsec uint32 + Flags_ uint32 // see chflags(2) +} + +func (in *setattrIn) BkupTime() time.Time { + return time.Unix(int64(in.Bkuptime_), int64(in.BkuptimeNsec)) +} + +func (in *setattrIn) Chgtime() time.Time { + return time.Unix(int64(in.Chgtime_), int64(in.ChgtimeNsec)) +} + +func (in *setattrIn) Flags() uint32 { + return in.Flags_ +} + +func openFlags(flags uint32) OpenFlags { + return OpenFlags(flags) +} + +type getxattrIn struct { + getxattrInCommon + + // OS X only + Position uint32 + Padding uint32 +} + +func (g *getxattrIn) position() uint32 { + return g.Position +} + +type setxattrIn struct { + setxattrInCommon + + // OS X only + Position uint32 + Padding uint32 +} + +func (s *setxattrIn) position() uint32 { + return s.Position +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse_kernel_linux.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse_kernel_linux.go new file mode 100644 index 00000000..6a752457 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse_kernel_linux.go @@ -0,0 +1,70 @@ +package fuse + +import "time" + +type attr struct { + Ino uint64 + Size uint64 + Blocks uint64 + Atime uint64 + Mtime uint64 + Ctime uint64 + AtimeNsec uint32 + MtimeNsec uint32 + CtimeNsec uint32 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint32 + // Blksize uint32 // Only in protocol 7.9 + // padding_ uint32 // Only in protocol 7.9 +} + +func (a *attr) Crtime() time.Time { + return time.Time{} +} + +func (a *attr) SetCrtime(s uint64, ns uint32) { + // Ignored on Linux. +} + +func (a *attr) SetFlags(f uint32) { + // Ignored on Linux. +} + +type setattrIn struct { + setattrInCommon +} + +func (in *setattrIn) BkupTime() time.Time { + return time.Time{} +} + +func (in *setattrIn) Chgtime() time.Time { + return time.Time{} +} + +func (in *setattrIn) Flags() uint32 { + return 0 +} + +func openFlags(flags uint32) OpenFlags { + // on amd64, the 32-bit O_LARGEFILE flag is always seen; + // on i386, the flag probably depends on the app + // requesting, but in any case should be utterly + // uninteresting to us here; our kernel protocol messages + // are not directly related to the client app's kernel + // API/ABI + flags &^= 0x8000 + + return OpenFlags(flags) +} + +type getxattrIn struct { + getxattrInCommon +} + +type setxattrIn struct { + setxattrInCommon +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse_kernel_std.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse_kernel_std.go new file mode 100644 index 00000000..074cfd32 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse_kernel_std.go @@ -0,0 +1 @@ +package fuse diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse_kernel_test.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse_kernel_test.go new file mode 100644 index 00000000..9b5727fd --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuse_kernel_test.go @@ -0,0 +1,31 @@ +package fuse_test + +import ( + "os" + "testing" + + "camlistore.org/third_party/bazil.org/fuse" +) + +func TestOpenFlagsAccmodeMask(t *testing.T) { + var f = fuse.OpenFlags(os.O_RDWR | os.O_SYNC) + if g, e := f&fuse.OpenAccessModeMask, fuse.OpenReadWrite; g != e { + t.Fatalf("OpenAccessModeMask behaves wrong: %v: %o != %o", f, g, e) + } + if f.IsReadOnly() { + t.Fatalf("IsReadOnly is wrong: %v", f) + } + if f.IsWriteOnly() { + t.Fatalf("IsWriteOnly is wrong: %v", f) + } + if !f.IsReadWrite() { + t.Fatalf("IsReadWrite is wrong: %v", f) + } +} + +func TestOpenFlagsString(t *testing.T) { + var f = fuse.OpenFlags(os.O_RDWR | os.O_SYNC | os.O_APPEND) + if g, e := f.String(), "OpenReadWrite+OpenAppend+OpenSync"; g != e { + t.Fatalf("OpenFlags.String: %q != %q", g, e) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuseutil/fuseutil.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuseutil/fuseutil.go new file mode 100644 index 00000000..04c5a8ab --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/fuseutil/fuseutil.go @@ -0,0 +1,20 @@ +package fuseutil + +import ( + "camlistore.org/third_party/bazil.org/fuse" +) + +// HandleRead handles a read request assuming that data is the entire file content. +// It adjusts the amount returned in resp according to req.Offset and req.Size. +func HandleRead(req *fuse.ReadRequest, resp *fuse.ReadResponse, data []byte) { + if req.Offset >= int64(len(data)) { + data = nil + } else { + data = data[req.Offset:] + } + if len(data) > req.Size { + data = data[:req.Size] + } + n := copy(resp.Data[:req.Size], data) + resp.Data = resp.Data[:n] +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/hellofs/hello.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/hellofs/hello.go new file mode 100644 index 00000000..c6292241 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/hellofs/hello.go @@ -0,0 +1,95 @@ +// Hellofs implements a simple "hello world" file system. +package main + +import ( + "flag" + "fmt" + "log" + "os" + + "camlistore.org/third_party/bazil.org/fuse" + "camlistore.org/third_party/bazil.org/fuse/fs" + _ "camlistore.org/third_party/bazil.org/fuse/fs/fstestutil" +) + +var Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fmt.Fprintf(os.Stderr, " %s MOUNTPOINT\n", os.Args[0]) + flag.PrintDefaults() +} + +func main() { + flag.Usage = Usage + flag.Parse() + + if flag.NArg() != 1 { + Usage() + os.Exit(2) + } + mountpoint := flag.Arg(0) + + c, err := fuse.Mount( + mountpoint, + fuse.FSName("helloworld"), + fuse.Subtype("hellofs"), + fuse.LocalVolume(), + fuse.VolumeName("Hello world!"), + ) + if err != nil { + log.Fatal(err) + } + defer c.Close() + + err = fs.Serve(c, FS{}) + if err != nil { + log.Fatal(err) + } + + // check if the mount process has an error to report + <-c.Ready + if err := c.MountError; err != nil { + log.Fatal(err) + } +} + +// FS implements the hello world file system. +type FS struct{} + +func (FS) Root() (fs.Node, fuse.Error) { + return Dir{}, nil +} + +// Dir implements both Node and Handle for the root directory. +type Dir struct{} + +func (Dir) Attr() fuse.Attr { + return fuse.Attr{Inode: 1, Mode: os.ModeDir | 0555} +} + +func (Dir) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + if name == "hello" { + return File{}, nil + } + return nil, fuse.ENOENT +} + +var dirDirs = []fuse.Dirent{ + {Inode: 2, Name: "hello", Type: fuse.DT_File}, +} + +func (Dir) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) { + return dirDirs, nil +} + +// File implements both Node and Handle for the hello file. +type File struct{} + +const greeting = "hello, world\n" + +func (File) Attr() fuse.Attr { + return fuse.Attr{Inode: 2, Mode: 0444, Size: uint64(len(greeting))} +} + +func (File) ReadAll(intr fs.Intr) ([]byte, fuse.Error) { + return []byte(greeting), nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/mount_darwin.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/mount_darwin.go new file mode 100644 index 00000000..6253ce82 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/mount_darwin.go @@ -0,0 +1,126 @@ +package fuse + +import ( + "bytes" + "errors" + "fmt" + "os" + "os/exec" + "strconv" + "strings" + "syscall" +) + +var errNoAvail = errors.New("no available fuse devices") + +var errNotLoaded = errors.New("osxfusefs is not loaded") + +func loadOSXFUSE() error { + cmd := exec.Command("/Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs") + cmd.Dir = "/" + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + return err +} + +func openOSXFUSEDev() (*os.File, error) { + var f *os.File + var err error + for i := uint64(0); ; i++ { + path := "/dev/osxfuse" + strconv.FormatUint(i, 10) + f, err = os.OpenFile(path, os.O_RDWR, 0000) + if os.IsNotExist(err) { + if i == 0 { + // not even the first device was found -> fuse is not loaded + return nil, errNotLoaded + } + + // we've run out of kernel-provided devices + return nil, errNoAvail + } + + if err2, ok := err.(*os.PathError); ok && err2.Err == syscall.EBUSY { + // try the next one + continue + } + + if err != nil { + return nil, err + } + return f, nil + } +} + +func callMount(dir string, conf *MountConfig, f *os.File, ready chan<- struct{}, errp *error) error { + bin := "/Library/Filesystems/osxfusefs.fs/Support/mount_osxfusefs" + + for k, v := range conf.options { + if strings.Contains(k, ",") || strings.Contains(v, ",") { + // Silly limitation but the mount helper does not + // understand any escaping. See TestMountOptionCommaError. + return fmt.Errorf("mount options cannot contain commas on OS X: %q=%q", k, v) + } + } + cmd := exec.Command( + bin, + "-o", conf.getOptions(), + // Tell osxfuse-kext how large our buffer is. It must split + // writes larger than this into multiple writes. + // + // OSXFUSE seems to ignore InitResponse.MaxWrite, and uses + // this instead. + "-o", "iosize="+strconv.FormatUint(maxWrite, 10), + // refers to fd passed in cmd.ExtraFiles + "3", + dir, + ) + cmd.ExtraFiles = []*os.File{f} + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "MOUNT_FUSEFS_CALL_BY_LIB=") + // TODO this is used for fs typenames etc, let app influence it + cmd.Env = append(cmd.Env, "MOUNT_FUSEFS_DAEMON_PATH="+bin) + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + + err := cmd.Start() + if err != nil { + return err + } + go func() { + err = cmd.Wait() + if err != nil { + if buf.Len() > 0 { + output := buf.Bytes() + output = bytes.TrimRight(output, "\n") + msg := err.Error() + ": " + string(output) + err = errors.New(msg) + } + } + *errp = err + close(ready) + }() + return err +} + +func mount(dir string, conf *MountConfig, ready chan<- struct{}, errp *error) (*os.File, error) { + f, err := openOSXFUSEDev() + if err == errNotLoaded { + err = loadOSXFUSE() + if err != nil { + return nil, err + } + // try again + f, err = openOSXFUSEDev() + } + if err != nil { + return nil, err + } + err = callMount(dir, conf, f, ready, errp) + if err != nil { + f.Close() + return nil, err + } + return f, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/mount_linux.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/mount_linux.go new file mode 100644 index 00000000..0748c0a5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/mount_linux.go @@ -0,0 +1,72 @@ +package fuse + +import ( + "fmt" + "net" + "os" + "os/exec" + "syscall" +) + +func mount(dir string, conf *MountConfig, ready chan<- struct{}, errp *error) (fusefd *os.File, err error) { + // linux mount is never delayed + close(ready) + + fds, err := syscall.Socketpair(syscall.AF_FILE, syscall.SOCK_STREAM, 0) + if err != nil { + return nil, fmt.Errorf("socketpair error: %v", err) + } + defer syscall.Close(fds[0]) + defer syscall.Close(fds[1]) + + cmd := exec.Command( + "fusermount", + "-o", conf.getOptions(), + "--", + dir, + ) + cmd.Env = append(os.Environ(), "_FUSE_COMMFD=3") + + writeFile := os.NewFile(uintptr(fds[0]), "fusermount-child-writes") + defer writeFile.Close() + cmd.ExtraFiles = []*os.File{writeFile} + + out, err := cmd.CombinedOutput() + if len(out) > 0 || err != nil { + return nil, fmt.Errorf("fusermount: %q, %v", out, err) + } + + readFile := os.NewFile(uintptr(fds[1]), "fusermount-parent-reads") + defer readFile.Close() + c, err := net.FileConn(readFile) + if err != nil { + return nil, fmt.Errorf("FileConn from fusermount socket: %v", err) + } + defer c.Close() + + uc, ok := c.(*net.UnixConn) + if !ok { + return nil, fmt.Errorf("unexpected FileConn type; expected UnixConn, got %T", c) + } + + buf := make([]byte, 32) // expect 1 byte + oob := make([]byte, 32) // expect 24 bytes + _, oobn, _, _, err := uc.ReadMsgUnix(buf, oob) + scms, err := syscall.ParseSocketControlMessage(oob[:oobn]) + if err != nil { + return nil, fmt.Errorf("ParseSocketControlMessage: %v", err) + } + if len(scms) != 1 { + return nil, fmt.Errorf("expected 1 SocketControlMessage; got scms = %#v", scms) + } + scm := scms[0] + gotFds, err := syscall.ParseUnixRights(&scm) + if err != nil { + return nil, fmt.Errorf("syscall.ParseUnixRights: %v", err) + } + if len(gotFds) != 1 { + return nil, fmt.Errorf("wanted 1 fd; got %#v", gotFds) + } + f := os.NewFile(uintptr(gotFds[0]), "/dev/fuse") + return f, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/options.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/options.go new file mode 100644 index 00000000..643a9492 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/options.go @@ -0,0 +1,100 @@ +package fuse + +import ( + "errors" + "strings" +) + +// MountConfig holds the configuration for a mount operation. +// Use it by passing MountOption values to Mount. +type MountConfig struct { + options map[string]string +} + +func escapeComma(s string) string { + s = strings.Replace(s, `\`, `\\`, -1) + s = strings.Replace(s, `,`, `\,`, -1) + return s +} + +// getOptions makes a string of options suitable for passing to FUSE +// mount flag `-o`. Returns an empty string if no options were set. +// Any platform specific adjustments should happen before the call. +func (m *MountConfig) getOptions() string { + var opts []string + for k, v := range m.options { + k = escapeComma(k) + if v != "" { + k += "=" + escapeComma(v) + } + opts = append(opts, k) + } + return strings.Join(opts, ",") +} + +// MountOption is passed to Mount to change the behavior of the mount. +type MountOption func(*MountConfig) error + +// FSName sets the file system name (also called source) that is +// visible in the list of mounted file systems. +func FSName(name string) MountOption { + return func(conf *MountConfig) error { + conf.options["fsname"] = name + return nil + } +} + +// Subtype sets the subtype of the mount. The main type is always +// `fuse`. The type in a list of mounted file systems will look like +// `fuse.foo`. +// +// OS X ignores this option. +func Subtype(fstype string) MountOption { + return func(conf *MountConfig) error { + conf.options["subtype"] = fstype + return nil + } +} + +// LocalVolume sets the volume to be local (instead of network), +// changing the behavior of Finder, Spotlight, and such. +// +// OS X only. Others ignore this option. +func LocalVolume() MountOption { + return localVolume +} + +// VolumeName sets the volume name shown in Finder. +// +// OS X only. Others ignore this option. +func VolumeName(name string) MountOption { + return volumeName(name) +} + +var ErrCannotCombineAllowOtherAndAllowRoot = errors.New("cannot combine AllowOther and AllowRoot") + +// AllowOther allows other users to access the file system. +// +// Only one of AllowOther or AllowRoot can be used. +func AllowOther() MountOption { + return func(conf *MountConfig) error { + if _, ok := conf.options["allow_root"]; ok { + return ErrCannotCombineAllowOtherAndAllowRoot + } + conf.options["allow_other"] = "" + return nil + } +} + +// AllowRoot allows other users to access the file system. +// +// Only one of AllowOther or AllowRoot can be used. +func AllowRoot() MountOption { + return func(conf *MountConfig) error { + if _, ok := conf.options["allow_other"]; ok { + return ErrCannotCombineAllowOtherAndAllowRoot + } + conf.options["allow_root"] = "" + return nil + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/options_darwin.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/options_darwin.go new file mode 100644 index 00000000..15aedbcf --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/options_darwin.go @@ -0,0 +1,13 @@ +package fuse + +func localVolume(conf *MountConfig) error { + conf.options["local"] = "" + return nil +} + +func volumeName(name string) MountOption { + return func(conf *MountConfig) error { + conf.options["volname"] = name + return nil + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/options_darwin_test.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/options_darwin_test.go new file mode 100644 index 00000000..dd264667 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/options_darwin_test.go @@ -0,0 +1,27 @@ +package fuse_test + +import ( + "testing" + + "camlistore.org/third_party/bazil.org/fuse" + "camlistore.org/third_party/bazil.org/fuse/fs/fstestutil" +) + +func TestMountOptionCommaError(t *testing.T) { + t.Parallel() + // this test is not tied to FSName, but needs just some option + // with string content + var name = "FuseTest,Marker" + mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, + fuse.FSName(name), + ) + switch { + case err == nil: + mnt.Close() + t.Fatal("expected an error about commas") + case err.Error() == `mount options cannot contain commas on OS X: "fsname"="FuseTest,Marker"`: + // all good + default: + t.Fatalf("expected an error about commas, got: %v", err) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/options_linux.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/options_linux.go new file mode 100644 index 00000000..69dd406b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/options_linux.go @@ -0,0 +1,13 @@ +package fuse + +func dummyOption(conf *MountConfig) error { + return nil +} + +func localVolume(conf *MountConfig) error { + return nil +} + +func volumeName(name string) MountOption { + return dummyOption +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/options_test.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/options_test.go new file mode 100644 index 00000000..aa212931 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/options_test.go @@ -0,0 +1,141 @@ +package fuse_test + +import ( + "runtime" + "testing" + + "camlistore.org/third_party/bazil.org/fuse" + "camlistore.org/third_party/bazil.org/fuse/fs/fstestutil" +) + +func init() { + fstestutil.DebugByDefault() +} + +func TestMountOptionFSName(t *testing.T) { + t.Parallel() + const name = "FuseTestMarker" + mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, + fuse.FSName(name), + ) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + info, err := fstestutil.GetMountInfo(mnt.Dir) + if err != nil { + t.Fatal(err) + } + if g, e := info.FSName, name; g != e { + t.Errorf("wrong FSName: %q != %q", g, e) + } +} + +func testMountOptionFSNameEvil(t *testing.T, evil string) { + t.Parallel() + var name = "FuseTest" + evil + "Marker" + mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, + fuse.FSName(name), + ) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + info, err := fstestutil.GetMountInfo(mnt.Dir) + if err != nil { + t.Fatal(err) + } + if g, e := info.FSName, name; g != e { + t.Errorf("wrong FSName: %q != %q", g, e) + } +} + +func TestMountOptionFSNameEvilComma(t *testing.T) { + if runtime.GOOS == "darwin" { + // see TestMountOptionCommaError for a test that enforces we + // at least give a nice error, instead of corrupting the mount + // options + t.Skip("TODO: OS X gets this wrong, commas in mount options cannot be escaped at all") + } + testMountOptionFSNameEvil(t, ",") +} + +func TestMountOptionFSNameEvilSpace(t *testing.T) { + testMountOptionFSNameEvil(t, " ") +} + +func TestMountOptionFSNameEvilTab(t *testing.T) { + testMountOptionFSNameEvil(t, "\t") +} + +func TestMountOptionFSNameEvilNewline(t *testing.T) { + testMountOptionFSNameEvil(t, "\n") +} + +func TestMountOptionFSNameEvilBackslash(t *testing.T) { + testMountOptionFSNameEvil(t, `\`) +} + +func TestMountOptionFSNameEvilBackslashDouble(t *testing.T) { + // catch double-unescaping, if it were to happen + testMountOptionFSNameEvil(t, `\\`) +} + +func TestMountOptionSubtype(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("OS X does not support Subtype") + } + t.Parallel() + const name = "FuseTestMarker" + mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, + fuse.Subtype(name), + ) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + info, err := fstestutil.GetMountInfo(mnt.Dir) + if err != nil { + t.Fatal(err) + } + if g, e := info.Type, "fuse."+name; g != e { + t.Errorf("wrong Subtype: %q != %q", g, e) + } +} + +// TODO test LocalVolume + +// TODO test AllowOther; hard because needs system-level authorization + +func TestMountOptionAllowOtherThenAllowRoot(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, + fuse.AllowOther(), + fuse.AllowRoot(), + ) + if err == nil { + mnt.Close() + } + if g, e := err, fuse.ErrCannotCombineAllowOtherAndAllowRoot; g != e { + t.Fatalf("wrong error: %v != %v", g, e) + } +} + +// TODO test AllowRoot; hard because needs system-level authorization + +func TestMountOptionAllowRootThenAllowOther(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, + fuse.AllowRoot(), + fuse.AllowOther(), + ) + if err == nil { + mnt.Close() + } + if g, e := err, fuse.ErrCannotCombineAllowOtherAndAllowRoot; g != e { + t.Fatalf("wrong error: %v != %v", g, e) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/doc.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/doc.go new file mode 100644 index 00000000..8ceee43b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/doc.go @@ -0,0 +1,13 @@ +// Package syscallx provides wrappers that make syscalls on various +// platforms more interoperable. +// +// The API intentionally omits the OS X-specific position and option +// arguments for extended attribute calls. +// +// Not having position means it might not be useful for accessing the +// resource fork. If that's needed by code inside fuse, a function +// with a different name may be added on the side. +// +// Options can be implemented with separate wrappers, in the style of +// Linux getxattr/lgetxattr/fgetxattr. +package syscallx diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/generate b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/generate new file mode 100755 index 00000000..476a282b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/generate @@ -0,0 +1,34 @@ +#!/bin/sh +set -e + +mksys="$(go env GOROOT)/src/pkg/syscall/mksyscall.pl" + +fix() { + sed 's,^package syscall$,&x\nimport "syscall",' \ + | gofmt -r='BytePtrFromString -> syscall.BytePtrFromString' \ + | gofmt -r='Syscall6 -> syscall.Syscall6' \ + | gofmt -r='Syscall -> syscall.Syscall' \ + | gofmt -r='SYS_GETXATTR -> syscall.SYS_GETXATTR' \ + | gofmt -r='SYS_LISTXATTR -> syscall.SYS_LISTXATTR' \ + | gofmt -r='SYS_SETXATTR -> syscall.SYS_SETXATTR' \ + | gofmt -r='SYS_REMOVEXATTR -> syscall.SYS_REMOVEXATTR' \ + | gofmt -r='SYS_MSYNC -> syscall.SYS_MSYNC' +} + +cd "$(dirname "$0")" + +$mksys xattr_darwin.go \ + | fix \ + >xattr_darwin_amd64.go + +$mksys -l32 xattr_darwin.go \ + | fix \ + >xattr_darwin_386.go + +$mksys msync.go \ + | fix \ + >msync_amd64.go + +$mksys -l32 msync.go \ + | fix \ + >msync_386.go diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/msync.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/msync.go new file mode 100644 index 00000000..30737e6d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/msync.go @@ -0,0 +1,9 @@ +package syscallx + +/* This is the source file for msync_*.go, to regenerate run + + ./generate + +*/ + +//sys Msync(b []byte, flags int) (err error) diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/msync_386.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/msync_386.go new file mode 100644 index 00000000..67259942 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/msync_386.go @@ -0,0 +1,24 @@ +// mksyscall.pl -l32 msync.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package syscallx + +import "syscall" + +import "unsafe" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall(syscall.SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/msync_amd64.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/msync_amd64.go new file mode 100644 index 00000000..0bbe1ab8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/msync_amd64.go @@ -0,0 +1,24 @@ +// mksyscall.pl msync.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package syscallx + +import "syscall" + +import "unsafe" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall(syscall.SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/syscallx.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/syscallx.go new file mode 100644 index 00000000..eb099129 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/syscallx.go @@ -0,0 +1,4 @@ +package syscallx + +// make us look more like package syscall, so mksyscall.pl output works +var _zero uintptr diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/syscallx_std.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/syscallx_std.go new file mode 100644 index 00000000..57353a53 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/syscallx_std.go @@ -0,0 +1,26 @@ +// +build !darwin + +package syscallx + +// This file just contains wrappers for platforms that already have +// the right stuff in stdlib. + +import ( + "syscall" +) + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + return syscall.Getxattr(path, attr, dest) +} + +func Listxattr(path string, dest []byte) (sz int, err error) { + return syscall.Listxattr(path, dest) +} + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return syscall.Setxattr(path, attr, data, flags) +} + +func Removexattr(path string, attr string) (err error) { + return syscall.Removexattr(path, attr) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/xattr_darwin.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/xattr_darwin.go new file mode 100644 index 00000000..b00f9020 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/xattr_darwin.go @@ -0,0 +1,38 @@ +package syscallx + +/* This is the source file for syscallx_darwin_*.go, to regenerate run + + ./generate + +*/ + +// cannot use dest []byte here because OS X getxattr really wants a +// NULL to trigger size probing, size==0 is not enough +// +//sys getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var destp *byte + if len(dest) > 0 { + destp = &dest[0] + } + return getxattr(path, attr, destp, len(dest), 0, 0) +} + +//sys listxattr(path string, dest []byte, options int) (sz int, err error) + +func Listxattr(path string, dest []byte) (sz int, err error) { + return listxattr(path, dest, 0) +} + +//sys setxattr(path string, attr string, data []byte, position uint32, flags int) (err error) + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return setxattr(path, attr, data, 0, flags) +} + +//sys removexattr(path string, attr string, options int) (err error) + +func Removexattr(path string, attr string) (err error) { + return removexattr(path, attr, 0) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/xattr_darwin_386.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/xattr_darwin_386.go new file mode 100644 index 00000000..ffc357ae --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/xattr_darwin_386.go @@ -0,0 +1,97 @@ +// mksyscall.pl -l32 xattr_darwin.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package syscallx + +import "syscall" + +import "unsafe" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + sz = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func listxattr(path string, dest []byte, options int) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(options), 0, 0) + sz = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setxattr(path string, attr string, data []byte, position uint32, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(position), uintptr(flags)) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func removexattr(path string, attr string, options int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := syscall.Syscall(syscall.SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/xattr_darwin_amd64.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/xattr_darwin_amd64.go new file mode 100644 index 00000000..864c4c1d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/syscallx/xattr_darwin_amd64.go @@ -0,0 +1,97 @@ +// mksyscall.pl xattr_darwin.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package syscallx + +import "syscall" + +import "unsafe" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + sz = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func listxattr(path string, dest []byte, options int) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(options), 0, 0) + sz = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setxattr(path string, attr string, data []byte, position uint32, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(position), uintptr(flags)) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func removexattr(path string, attr string, options int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := syscall.Syscall(syscall.SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/unmount.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/unmount.go new file mode 100644 index 00000000..ffe3f155 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/unmount.go @@ -0,0 +1,6 @@ +package fuse + +// Unmount tries to unmount the filesystem mounted at dir. +func Unmount(dir string) error { + return unmount(dir) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/unmount_linux.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/unmount_linux.go new file mode 100644 index 00000000..088f0cfe --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/unmount_linux.go @@ -0,0 +1,21 @@ +package fuse + +import ( + "bytes" + "errors" + "os/exec" +) + +func unmount(dir string) error { + cmd := exec.Command("fusermount", "-u", dir) + output, err := cmd.CombinedOutput() + if err != nil { + if len(output) > 0 { + output = bytes.TrimRight(output, "\n") + msg := err.Error() + ": " + string(output) + err = errors.New(msg) + } + return err + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/unmount_std.go b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/unmount_std.go new file mode 100644 index 00000000..d6efe276 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/bazil.org/fuse/unmount_std.go @@ -0,0 +1,17 @@ +// +build !linux + +package fuse + +import ( + "os" + "syscall" +) + +func unmount(dir string) error { + err := syscall.Unmount(dir, 0) + if err != nil { + err = &os.PathError{Op: "unmount", Path: dir, Err: err} + return err + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/closure/lib/AUTHORS b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/AUTHORS new file mode 100644 index 00000000..c8ad75a8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/AUTHORS @@ -0,0 +1,17 @@ +# This is a list of contributors to the Closure Library. + +# Names should be added to this file like so: +# Name or Organization + +Google Inc. +Mohamed Mansour +Bjorn Tipling +SameGoal LLC +Guido Tapia +Andrew Mattie +Ilia Mirkin +Ivan Kozik +Rich Dougherty +Chad Killingsworth +Dan Pupius + diff --git a/vendor/github.com/camlistore/camlistore/third_party/closure/lib/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/LICENSE new file mode 100644 index 00000000..d9a10c0d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/LICENSE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/camlistore/camlistore/third_party/closure/lib/README b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/README new file mode 100644 index 00000000..7a61255c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/README @@ -0,0 +1,6 @@ +Closure Library is a powerful, low level JavaScript library designed +for building complex and scalable web applications. It is used by many +major Google web applications, such as Gmail and Google Docs. + +For more information about Closure Library, visit: +http://code.google.com/closure/library diff --git a/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/a11y/aria/aria.js b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/a11y/aria/aria.js new file mode 100644 index 00000000..d9f04e33 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/a11y/aria/aria.js @@ -0,0 +1,364 @@ +// Copyright 2007 The Closure Library Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +/** + * @fileoverview Utilities for adding, removing and setting ARIA roles and + * states as defined by W3C ARIA standard: http://www.w3.org/TR/wai-aria/ + * All modern browsers have some form of ARIA support, so no browser checks are + * performed when adding ARIA to components. + * + */ + +goog.provide('goog.a11y.aria'); + +goog.require('goog.a11y.aria.Role'); +goog.require('goog.a11y.aria.State'); +goog.require('goog.a11y.aria.datatables'); +goog.require('goog.array'); +goog.require('goog.asserts'); +goog.require('goog.dom'); +goog.require('goog.dom.TagName'); +goog.require('goog.object'); + + +/** + * ARIA states/properties prefix. + * @private + */ +goog.a11y.aria.ARIA_PREFIX_ = 'aria-'; + + +/** + * ARIA role attribute. + * @private + */ +goog.a11y.aria.ROLE_ATTRIBUTE_ = 'role'; + + +/** + * A list of tag names for which we don't need to set ARIA role and states + * because they have well supported semantics for screen readers or because + * they don't contain content to be made accessible. + * @private + */ +goog.a11y.aria.TAGS_WITH_ASSUMED_ROLES_ = [ + goog.dom.TagName.A, + goog.dom.TagName.AREA, + goog.dom.TagName.BUTTON, + goog.dom.TagName.HEAD, + goog.dom.TagName.INPUT, + goog.dom.TagName.LINK, + goog.dom.TagName.MENU, + goog.dom.TagName.META, + goog.dom.TagName.OPTGROUP, + goog.dom.TagName.OPTION, + goog.dom.TagName.PROGRESS, + goog.dom.TagName.STYLE, + goog.dom.TagName.SELECT, + goog.dom.TagName.SOURCE, + goog.dom.TagName.TEXTAREA, + goog.dom.TagName.TITLE, + goog.dom.TagName.TRACK +]; + + +/** + * Sets the role of an element. If the roleName is + * empty string or null, the role for the element is removed. + * We encourage clients to call the goog.a11y.aria.removeRole + * method instead of setting null and empty string values. + * Special handling for this case is added to ensure + * backword compatibility with existing code. + * + * @param {!Element} element DOM node to set role of. + * @param {!goog.a11y.aria.Role|string} roleName role name(s). + */ +goog.a11y.aria.setRole = function(element, roleName) { + if (!roleName) { + // Setting the ARIA role to empty string is not allowed + // by the ARIA standard. + goog.a11y.aria.removeRole(element); + } else { + if (goog.asserts.ENABLE_ASSERTS) { + goog.asserts.assert(goog.object.containsValue( + goog.a11y.aria.Role, roleName), 'No such ARIA role ' + roleName); + } + element.setAttribute(goog.a11y.aria.ROLE_ATTRIBUTE_, roleName); + } +}; + + +/** + * Gets role of an element. + * @param {!Element} element DOM element to get role of. + * @return {?goog.a11y.aria.Role} ARIA Role name. + */ +goog.a11y.aria.getRole = function(element) { + var role = element.getAttribute(goog.a11y.aria.ROLE_ATTRIBUTE_); + return /** @type {goog.a11y.aria.Role} */ (role) || null; +}; + + +/** + * Removes role of an element. + * @param {!Element} element DOM element to remove the role from. + */ +goog.a11y.aria.removeRole = function(element) { + element.removeAttribute(goog.a11y.aria.ROLE_ATTRIBUTE_); +}; + + +/** + * Sets the state or property of an element. + * @param {!Element} element DOM node where we set state. + * @param {!(goog.a11y.aria.State|string)} stateName State attribute being set. + * Automatically adds prefix 'aria-' to the state name if the attribute is + * not an extra attribute. + * @param {string|boolean|number|!goog.array.ArrayLike.} value Value + * for the state attribute. + */ +goog.a11y.aria.setState = function(element, stateName, value) { + if (goog.isArrayLike(value)) { + var array = /** @type {!goog.array.ArrayLike.} */ (value); + value = array.join(' '); + } + var attrStateName = goog.a11y.aria.getAriaAttributeName_(stateName); + if (value === '' || value == undefined) { + var defaultValueMap = goog.a11y.aria.datatables.getDefaultValuesMap(); + // Work around for browsers that don't properly support ARIA. + // According to the ARIA W3C standard, user agents should allow + // setting empty value which results in setting the default value + // for the ARIA state if such exists. The exact text from the ARIA W3C + // standard (http://www.w3.org/TR/wai-aria/states_and_properties): + // "When a value is indicated as the default, the user agent + // MUST follow the behavior prescribed by this value when the state or + // property is empty or undefined." + // The defaultValueMap contains the default values for the ARIA states + // and has as a key the goog.a11y.aria.State constant for the state. + if (stateName in defaultValueMap) { + element.setAttribute(attrStateName, defaultValueMap[stateName]); + } else { + element.removeAttribute(attrStateName); + } + } else { + element.setAttribute(attrStateName, value); + } +}; + + +/** + * Remove the state or property for the element. + * @param {!Element} element DOM node where we set state. + * @param {!goog.a11y.aria.State} stateName State name. + */ +goog.a11y.aria.removeState = function(element, stateName) { + element.removeAttribute(goog.a11y.aria.getAriaAttributeName_(stateName)); +}; + + +/** + * Gets value of specified state or property. + * @param {!Element} element DOM node to get state from. + * @param {!goog.a11y.aria.State|string} stateName State name. + * @return {string} Value of the state attribute. + */ +goog.a11y.aria.getState = function(element, stateName) { + // TODO(user): return properly typed value result -- + // boolean, number, string, null. We should be able to chain + // getState(...) and setState(...) methods. + + var attr = + /** @type {string|number|boolean} */ (element.getAttribute( + goog.a11y.aria.getAriaAttributeName_(stateName))); + var isNullOrUndefined = attr == null || attr == undefined; + return isNullOrUndefined ? '' : String(attr); +}; + + +/** + * Returns the activedescendant element for the input element by + * using the activedescendant ARIA property of the given element. + * @param {!Element} element DOM node to get activedescendant + * element for. + * @return {?Element} DOM node of the activedescendant, if found. + */ +goog.a11y.aria.getActiveDescendant = function(element) { + var id = goog.a11y.aria.getState( + element, goog.a11y.aria.State.ACTIVEDESCENDANT); + return goog.dom.getOwnerDocument(element).getElementById(id); +}; + + +/** + * Sets the activedescendant ARIA property value for an element. + * If the activeElement is not null, it should have an id set. + * @param {!Element} element DOM node to set activedescendant ARIA property to. + * @param {?Element} activeElement DOM node being set as activedescendant. + */ +goog.a11y.aria.setActiveDescendant = function(element, activeElement) { + var id = ''; + if (activeElement) { + id = activeElement.id; + goog.asserts.assert(id, 'The active element should have an id.'); + } + + goog.a11y.aria.setState(element, goog.a11y.aria.State.ACTIVEDESCENDANT, id); +}; + + +/** + * Gets the label of the given element. + * @param {!Element} element DOM node to get label from. + * @return {string} label The label. + */ +goog.a11y.aria.getLabel = function(element) { + return goog.a11y.aria.getState(element, goog.a11y.aria.State.LABEL); +}; + + +/** + * Sets the label of the given element. + * @param {!Element} element DOM node to set label to. + * @param {string} label The label to set. + */ +goog.a11y.aria.setLabel = function(element, label) { + goog.a11y.aria.setState(element, goog.a11y.aria.State.LABEL, label); +}; + + +/** + * Asserts that the element has a role set if it's not an HTML element whose + * semantics is well supported by most screen readers. + * Only to be used internally by the ARIA library in goog.a11y.aria.*. + * @param {!Element} element The element to assert an ARIA role set. + * @param {!goog.array.ArrayLike.} allowedRoles The child roles of + * the roles. + */ +goog.a11y.aria.assertRoleIsSetInternalUtil = function(element, allowedRoles) { + if (goog.array.contains(goog.a11y.aria.TAGS_WITH_ASSUMED_ROLES_, + element.tagName)) { + return; + } + var elementRole = /** @type {string}*/ (goog.a11y.aria.getRole(element)); + goog.asserts.assert(elementRole != null, + 'The element ARIA role cannot be null.'); + + goog.asserts.assert(goog.array.contains(allowedRoles, elementRole), + 'Non existing or incorrect role set for element.' + + 'The role set is "' + elementRole + + '". The role should be any of "' + allowedRoles + + '". Check the ARIA specification for more details ' + + 'http://www.w3.org/TR/wai-aria/roles.'); +}; + + +/** + * Gets the boolean value of an ARIA state/property. + * @param {!Element} element The element to get the ARIA state for. + * @param {!goog.a11y.aria.State|string} stateName the ARIA state name. + * @return {?boolean} Boolean value for the ARIA state value or null if + * the state value is not 'true', not 'false', or not set. + */ +goog.a11y.aria.getStateBoolean = function(element, stateName) { + var attr = + /** @type {string|boolean} */ (element.getAttribute( + goog.a11y.aria.getAriaAttributeName_(stateName))); + goog.asserts.assert( + goog.isBoolean(attr) || attr == null || attr == 'true' || + attr == 'false'); + if (attr == null) { + return attr; + } + return goog.isBoolean(attr) ? attr : attr == 'true'; +}; + + +/** + * Gets the number value of an ARIA state/property. + * @param {!Element} element The element to get the ARIA state for. + * @param {!goog.a11y.aria.State|string} stateName the ARIA state name. + * @return {?number} Number value for the ARIA state value or null if + * the state value is not a number or not set. + */ +goog.a11y.aria.getStateNumber = function(element, stateName) { + var attr = + /** @type {string|number} */ (element.getAttribute( + goog.a11y.aria.getAriaAttributeName_(stateName))); + goog.asserts.assert((attr == null || !isNaN(Number(attr))) && + !goog.isBoolean(attr)); + return attr == null ? null : Number(attr); +}; + + +/** + * Gets the string value of an ARIA state/property. + * @param {!Element} element The element to get the ARIA state for. + * @param {!goog.a11y.aria.State|string} stateName the ARIA state name. + * @return {?string} String value for the ARIA state value or null if + * the state value is empty string or not set. + */ +goog.a11y.aria.getStateString = function(element, stateName) { + var attr = element.getAttribute( + goog.a11y.aria.getAriaAttributeName_(stateName)); + goog.asserts.assert((attr == null || goog.isString(attr)) && + isNaN(Number(attr)) && attr != 'true' && attr != 'false'); + return attr == null ? null : attr; +}; + + +/** + * Gets array of strings value of the specified state or + * property for the element. + * Only to be used internally by the ARIA library in goog.a11y.aria.*. + * @param {!Element} element DOM node to get state from. + * @param {!goog.a11y.aria.State} stateName State name. + * @return {!goog.array.ArrayLike.} string Array + * value of the state attribute. + */ +goog.a11y.aria.getStringArrayStateInternalUtil = function(element, stateName) { + var attrValue = element.getAttribute( + goog.a11y.aria.getAriaAttributeName_(stateName)); + return goog.a11y.aria.splitStringOnWhitespace_(attrValue); +}; + + +/** + * Splits the input stringValue on whitespace. + * @param {string} stringValue The value of the string to split. + * @return {!goog.array.ArrayLike.} string Array + * value as result of the split. + * @private + */ +goog.a11y.aria.splitStringOnWhitespace_ = function(stringValue) { + return stringValue ? stringValue.split(/\s+/) : []; +}; + + +/** + * Adds the 'aria-' prefix to ariaName. + * @param {string} ariaName ARIA state/property name. + * @private + * @return {string} The ARIA attribute name with added 'aria-' prefix. + * @throws {Error} If no such attribute exists. + */ +goog.a11y.aria.getAriaAttributeName_ = function(ariaName) { + if (goog.asserts.ENABLE_ASSERTS) { + goog.asserts.assert(ariaName, 'ARIA attribute cannot be empty.'); + goog.asserts.assert(goog.object.containsValue( + goog.a11y.aria.State, ariaName), + 'No such ARIA attribute ' + ariaName); + } + return goog.a11y.aria.ARIA_PREFIX_ + ariaName; +}; diff --git a/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/a11y/aria/attributes.js b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/a11y/aria/attributes.js new file mode 100644 index 00000000..f4e0a3d0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/a11y/aria/attributes.js @@ -0,0 +1,389 @@ +// Copyright 2013 The Closure Library Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +/** + * @fileoverview The file contains generated enumerations for ARIA states + * and properties as defined by W3C ARIA standard: + * http://www.w3.org/TR/wai-aria/. + * + * This is auto-generated code. Do not manually edit! For more details + * about how to edit it via the generator check go/closure-ariagen. + */ + +goog.provide('goog.a11y.aria.AutoCompleteValues'); +goog.provide('goog.a11y.aria.CheckedValues'); +goog.provide('goog.a11y.aria.DropEffectValues'); +goog.provide('goog.a11y.aria.ExpandedValues'); +goog.provide('goog.a11y.aria.GrabbedValues'); +goog.provide('goog.a11y.aria.InvalidValues'); +goog.provide('goog.a11y.aria.LivePriority'); +goog.provide('goog.a11y.aria.OrientationValues'); +goog.provide('goog.a11y.aria.PressedValues'); +goog.provide('goog.a11y.aria.RelevantValues'); +goog.provide('goog.a11y.aria.SelectedValues'); +goog.provide('goog.a11y.aria.SortValues'); +goog.provide('goog.a11y.aria.State'); + + +/** + * ARIA states and properties. + * @enum {string} + */ +goog.a11y.aria.State = { + // ARIA property for setting the currently active descendant of an element, + // for example the selected item in a list box. Value: ID of an element. + ACTIVEDESCENDANT: 'activedescendant', + + // ARIA property that, if true, indicates that all of a changed region should + // be presented, instead of only parts. Value: one of {true, false}. + ATOMIC: 'atomic', + + // ARIA property to specify that input completion is provided. Value: + // one of {'inline', 'list', 'both', 'none'}. + AUTOCOMPLETE: 'autocomplete', + + // ARIA state to indicate that an element and its subtree are being updated. + // Value: one of {true, false}. + BUSY: 'busy', + + // ARIA state for a checked item. Value: one of {'true', 'false', 'mixed', + // undefined}. + CHECKED: 'checked', + + // ARIA property that identifies the element or elements whose contents or + // presence are controlled by this element. + // Value: space-separated IDs of other elements. + CONTROLS: 'controls', + + // ARIA property that identifies the element or elements that describe + // this element. Value: space-separated IDs of other elements. + DESCRIBEDBY: 'describedby', + + // ARIA state for a disabled item. Value: one of {true, false}. + DISABLED: 'disabled', + + // ARIA property that indicates what functions can be performed when a + // dragged object is released on the drop target. Value: one of + // {'copy', 'move', 'link', 'execute', 'popup', 'none'}. + DROPEFFECT: 'dropeffect', + + // ARIA state for setting whether the element like a tree node is expanded. + // Value: one of {true, false, undefined}. + EXPANDED: 'expanded', + + // ARIA property that identifies the next element (or elements) in the + // recommended reading order of content. Value: space-separated ids of + // elements to flow to. + FLOWTO: 'flowto', + + // ARIA state that indicates an element's "grabbed" state in drag-and-drop. + // Value: one of {true, false, undefined}. + GRABBED: 'grabbed', + + // ARIA property indicating whether the element has a popup. + // Value: one of {true, false}. + HASPOPUP: 'haspopup', + + // ARIA state indicating that the element is not visible or perceivable + // to any user. Value: one of {true, false}. + HIDDEN: 'hidden', + + // ARIA state indicating that the entered value does not conform. Value: + // one of {false, true, 'grammar', 'spelling'} + INVALID: 'invalid', + + // ARIA property that provides a label to override any other text, value, or + // contents used to describe this element. Value: string. + LABEL: 'label', + + // ARIA property for setting the element which labels another element. + // Value: space-separated IDs of elements. + LABELLEDBY: 'labelledby', + + // ARIA property for setting the level of an element in the hierarchy. + // Value: integer. + LEVEL: 'level', + + // ARIA property indicating that an element will be updated, and + // describes the types of updates the user agents, assistive technologies, + // and user can expect from the live region. Value: one of {'off', 'polite', + // 'assertive'}. + LIVE: 'live', + + // ARIA property indicating whether a text box can accept multiline input. + // Value: one of {true, false}. + MULTILINE: 'multiline', + + // ARIA property indicating if the user may select more than one item. + // Value: one of {true, false}. + MULTISELECTABLE: 'multiselectable', + + // ARIA property indicating if the element is horizontal or vertical. + // Value: one of {'vertical', 'horizontal'}. + ORIENTATION: 'orientation', + + // ARIA property creating a visual, functional, or contextual parent/child + // relationship when the DOM hierarchy can't be used to represent it. + // Value: Space-separated IDs of elements. + OWNS: 'owns', + + // ARIA property that defines an element's number of position in a list. + // Value: integer. + POSINSET: 'posinset', + + // ARIA state for a pressed item. + // Value: one of {true, false, undefined, 'mixed'}. + PRESSED: 'pressed', + + // ARIA property indicating that an element is not editable. + // Value: one of {true, false}. + READONLY: 'readonly', + + // ARIA property indicating that change notifications within this subtree + // of a live region should be announced. Value: one of {'additions', + // 'removals', 'text', 'all', 'additions text'}. + RELEVANT: 'relevant', + + // ARIA property indicating that user input is required on this element + // before a form may be submitted. Value: one of {true, false}. + REQUIRED: 'required', + + // ARIA state for setting the currently selected item in the list. + // Value: one of {true, false, undefined}. + SELECTED: 'selected', + + // ARIA property defining the number of items in a list. Value: integer. + SETSIZE: 'setsize', + + // ARIA property indicating if items are sorted. Value: one of {'ascending', + // 'descending', 'none', 'other'}. + SORT: 'sort', + + // ARIA property for slider maximum value. Value: number. + VALUEMAX: 'valuemax', + + // ARIA property for slider minimum value. Value: number. + VALUEMIN: 'valuemin', + + // ARIA property for slider active value. Value: number. + VALUENOW: 'valuenow', + + // ARIA property for slider active value represented as text. + // Value: string. + VALUETEXT: 'valuetext' +}; + + +/** + * ARIA state values for AutoCompleteValues. + * @enum {string} + */ +goog.a11y.aria.AutoCompleteValues = { + // The system provides text after the caret as a suggestion + // for how to complete the field. + INLINE: 'inline', + // A list of choices appears from which the user can choose, + // but the edit box retains focus. + LIST: 'list', + // A list of choices appears and the currently selected suggestion + // also appears inline. + BOTH: 'both', + // No input completion suggestions are provided. + NONE: 'none' +}; + + +/** + * ARIA state values for DropEffectValues. + * @enum {string} + */ +goog.a11y.aria.DropEffectValues = { + // A duplicate of the source object will be dropped into the target. + COPY: 'copy', + // The source object will be removed from its current location + // and dropped into the target. + MOVE: 'move', + // A reference or shortcut to the dragged object + // will be created in the target object. + LINK: 'link', + // A function supported by the drop target is + // executed, using the drag source as an input. + EXECUTE: 'execute', + // There is a popup menu or dialog that allows the user to choose + // one of the drag operations (copy, move, link, execute) and any other + // drag functionality, such as cancel. + POPUP: 'popup', + // No operation can be performed; effectively + // cancels the drag operation if an attempt is made to drop on this object. + NONE: 'none' +}; + + +/** + * ARIA state values for LivePriority. + * @enum {string} + */ +goog.a11y.aria.LivePriority = { + // Updates to the region will not be presented to the user + // unless the assitive technology is currently focused on that region. + OFF: 'off', + // (Background change) Assistive technologies SHOULD announce + // updates at the next graceful opportunity, such as at the end of + // speaking the current sentence or when the user pauses typing. + POLITE: 'polite', + // This information has the highest priority and assistive + // technologies SHOULD notify the user immediately. + // Because an interruption may disorient users or cause them to not complete + // their current task, authors SHOULD NOT use the assertive value unless the + // interruption is imperative. + ASSERTIVE: 'assertive' +}; + + +/** + * ARIA state values for OrientationValues. + * @enum {string} + */ +goog.a11y.aria.OrientationValues = { + // The element is oriented vertically. + VERTICAL: 'vertical', + // The element is oriented horizontally. + HORIZONTAL: 'horizontal' +}; + + +/** + * ARIA state values for RelevantValues. + * @enum {string} + */ +goog.a11y.aria.RelevantValues = { + // Element nodes are added to the DOM within the live region. + ADDITIONS: 'additions', + // Text or element nodes within the live region are removed from the DOM. + REMOVALS: 'removals', + // Text is added to any DOM descendant nodes of the live region. + TEXT: 'text', + // Equivalent to the combination of all values, "additions removals text". + ALL: 'all' +}; + + +/** + * ARIA state values for SortValues. + * @enum {string} + */ +goog.a11y.aria.SortValues = { + // Items are sorted in ascending order by this column. + ASCENDING: 'ascending', + // Items are sorted in descending order by this column. + DESCENDING: 'descending', + // There is no defined sort applied to the column. + NONE: 'none', + // A sort algorithm other than ascending or descending has been applied. + OTHER: 'other' +}; + + +/** + * ARIA state values for CheckedValues. + * @enum {string} + */ +goog.a11y.aria.CheckedValues = { + // The selectable element is checked. + TRUE: 'true', + // The selectable element is not checked. + FALSE: 'false', + // Indicates a mixed mode value for a tri-state + // checkbox or menuitemcheckbox. + MIXED: 'mixed', + // The element does not support being checked. + UNDEFINED: 'undefined' +}; + + +/** + * ARIA state values for ExpandedValues. + * @enum {string} + */ +goog.a11y.aria.ExpandedValues = { + // The element, or another grouping element it controls, is expanded. + TRUE: 'true', + // The element, or another grouping element it controls, is collapsed. + FALSE: 'false', + // The element, or another grouping element + // it controls, is neither expandable nor collapsible; all its + // child elements are shown or there are no child elements. + UNDEFINED: 'undefined' +}; + + +/** + * ARIA state values for GrabbedValues. + * @enum {string} + */ +goog.a11y.aria.GrabbedValues = { + // Indicates that the element has been "grabbed" for dragging. + TRUE: 'true', + // Indicates that the element supports being dragged. + FALSE: 'false', + // Indicates that the element does not support being dragged. + UNDEFINED: 'undefined' +}; + + +/** + * ARIA state values for InvalidValues. + * @enum {string} + */ +goog.a11y.aria.InvalidValues = { + // There are no detected errors in the value. + FALSE: 'false', + // The value entered by the user has failed validation. + TRUE: 'true', + // A grammatical error was detected. + GRAMMAR: 'grammar', + // A spelling error was detected. + SPELLING: 'spelling' +}; + + +/** + * ARIA state values for PressedValues. + * @enum {string} + */ +goog.a11y.aria.PressedValues = { + // The element is pressed. + TRUE: 'true', + // The element supports being pressed but is not currently pressed. + FALSE: 'false', + // Indicates a mixed mode value for a tri-state toggle button. + MIXED: 'mixed', + // The element does not support being pressed. + UNDEFINED: 'undefined' +}; + + +/** + * ARIA state values for SelectedValues. + * @enum {string} + */ +goog.a11y.aria.SelectedValues = { + // The selectable element is selected. + TRUE: 'true', + // The selectable element is not selected. + FALSE: 'false', + // The element is not selectable. + UNDEFINED: 'undefined' +}; diff --git a/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/a11y/aria/datatables.js b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/a11y/aria/datatables.js new file mode 100644 index 00000000..c97df208 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/a11y/aria/datatables.js @@ -0,0 +1,68 @@ +// Copyright 2013 The Closure Library Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + + +/** + * @fileoverview The file contains data tables generated from the ARIA + * standard schema http://www.w3.org/TR/wai-aria/. + * + * This is auto-generated code. Do not manually edit! + */ + +goog.provide('goog.a11y.aria.datatables'); + +goog.require('goog.a11y.aria.State'); +goog.require('goog.object'); + + +/** + * A map that contains mapping between an ARIA state and the default value + * for it. Note that not all ARIA states have default values. + * + * @type {Object.} + */ +goog.a11y.aria.DefaultStateValueMap_; + + +/** + * A method that creates a map that contains mapping between an ARIA state and + * the default value for it. Note that not all ARIA states have default values. + * + * @return {Object.} + * The names for each of the notification methods. + */ +goog.a11y.aria.datatables.getDefaultValuesMap = function() { + if (!goog.a11y.aria.DefaultStateValueMap_) { + goog.a11y.aria.DefaultStateValueMap_ = goog.object.create( + goog.a11y.aria.State.ATOMIC, false, + goog.a11y.aria.State.AUTOCOMPLETE, 'none', + goog.a11y.aria.State.DROPEFFECT, 'none', + goog.a11y.aria.State.HASPOPUP, false, + goog.a11y.aria.State.LIVE, 'off', + goog.a11y.aria.State.MULTILINE, false, + goog.a11y.aria.State.MULTISELECTABLE, false, + goog.a11y.aria.State.ORIENTATION, 'vertical', + goog.a11y.aria.State.READONLY, false, + goog.a11y.aria.State.RELEVANT, 'additions text', + goog.a11y.aria.State.REQUIRED, false, + goog.a11y.aria.State.SORT, 'none', + goog.a11y.aria.State.BUSY, false, + goog.a11y.aria.State.DISABLED, false, + goog.a11y.aria.State.HIDDEN, false, + goog.a11y.aria.State.INVALID, 'false'); + } + + return goog.a11y.aria.DefaultStateValueMap_; +}; diff --git a/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/a11y/aria/roles.js b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/a11y/aria/roles.js new file mode 100644 index 00000000..a282cc2d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/a11y/aria/roles.js @@ -0,0 +1,216 @@ +// Copyright 2013 The Closure Library Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +/** + * @fileoverview The file contains generated enumerations for ARIA roles + * as defined by W3C ARIA standard: http://www.w3.org/TR/wai-aria/. + * + * This is auto-generated code. Do not manually edit! For more details + * about how to edit it via the generator check go/closure-ariagen. + */ + +goog.provide('goog.a11y.aria.Role'); + + +/** + * ARIA role values. + * @enum {string} + */ +goog.a11y.aria.Role = { + // ARIA role for an alert element that doesn't need to be explicitly closed. + ALERT: 'alert', + + // ARIA role for an alert dialog element that takes focus and must be closed. + ALERTDIALOG: 'alertdialog', + + // ARIA role for an application that implements its own keyboard navigation. + APPLICATION: 'application', + + // ARIA role for an article. + ARTICLE: 'article', + + // ARIA role for a banner containing mostly site content, not page content. + BANNER: 'banner', + + // ARIA role for a button element. + BUTTON: 'button', + + // ARIA role for a checkbox button element; use with the CHECKED state. + CHECKBOX: 'checkbox', + + // ARIA role for a column header of a table or grid. + COLUMNHEADER: 'columnheader', + + // ARIA role for a combo box element. + COMBOBOX: 'combobox', + + // ARIA role for a supporting section of the document. + COMPLEMENTARY: 'complementary', + + // ARIA role for a large perceivable region that contains information + // about the parent document. + CONTENTINFO: 'contentinfo', + + // ARIA role for a definition of a term or concept. + DEFINITION: 'definition', + + // ARIA role for a dialog, some descendant must take initial focus. + DIALOG: 'dialog', + + // ARIA role for a directory, like a table of contents. + DIRECTORY: 'directory', + + // ARIA role for a part of a page that's a document, not a web application. + DOCUMENT: 'document', + + // ARIA role for a landmark region logically considered one form. + FORM: 'form', + + // ARIA role for an interactive control of tabular data. + GRID: 'grid', + + // ARIA role for a cell in a grid. + GRIDCELL: 'gridcell', + + // ARIA role for a group of related elements like tree item siblings. + GROUP: 'group', + + // ARIA role for a heading element. + HEADING: 'heading', + + // ARIA role for a container of elements that together comprise one image. + IMG: 'img', + + // ARIA role for a link. + LINK: 'link', + + // ARIA role for a list of non-interactive list items. + LIST: 'list', + + // ARIA role for a listbox. + LISTBOX: 'listbox', + + // ARIA role for a list item. + LISTITEM: 'listitem', + + // ARIA role for a live region where new information is added. + LOG: 'log', + + // ARIA landmark role for the main content in a document. Use only once. + MAIN: 'main', + + // ARIA role for a live region of non-essential information that changes. + MARQUEE: 'marquee', + + // ARIA role for a mathematical expression. + MATH: 'math', + + // ARIA role for a popup menu. + MENU: 'menu', + + // ARIA role for a menubar element containing menu elements. + MENUBAR: 'menubar', + + // ARIA role for menu item elements. + MENU_ITEM: 'menuitem', + + // ARIA role for a checkbox box element inside a menu. + MENU_ITEM_CHECKBOX: 'menuitemcheckbox', + + // ARIA role for a radio button element inside a menu. + MENU_ITEM_RADIO: 'menuitemradio', + + // ARIA landmark role for a collection of navigation links. + NAVIGATION: 'navigation', + + // ARIA role for a section ancillary to the main content. + NOTE: 'note', + + // ARIA role for option items that are children of combobox, listbox, menu, + // radiogroup, or tree elements. + OPTION: 'option', + + // ARIA role for ignorable cosmetic elements with no semantic significance. + PRESENTATION: 'presentation', + + // ARIA role for a progress bar element. + PROGRESSBAR: 'progressbar', + + // ARIA role for a radio button element. + RADIO: 'radio', + + // ARIA role for a group of connected radio button elements. + RADIOGROUP: 'radiogroup', + + // ARIA role for an important region of the page. + REGION: 'region', + + // ARIA role for a row of cells in a grid. + ROW: 'row', + + // ARIA role for a group of one or more rows in a grid. + ROWGROUP: 'rowgroup', + + // ARIA role for a row header of a table or grid. + ROWHEADER: 'rowheader', + + // ARIA role for a scrollbar element. + SCROLLBAR: 'scrollbar', + + // ARIA landmark role for a part of the page providing search functionality. + SEARCH: 'search', + + // ARIA role for a menu separator. + SEPARATOR: 'separator', + + // ARIA role for a slider. + SLIDER: 'slider', + + // ARIA role for a spin button. + SPINBUTTON: 'spinbutton', + + // ARIA role for a live region with advisory info less severe than an alert. + STATUS: 'status', + + // ARIA role for a tab button. + TAB: 'tab', + + // ARIA role for a tab bar (i.e. a list of tab buttons). + TAB_LIST: 'tablist', + + // ARIA role for a tab page (i.e. the element holding tab contents). + TAB_PANEL: 'tabpanel', + + // ARIA role for a textbox element. + TEXTBOX: 'textbox', + + // ARIA role for an element displaying elapsed time or time remaining. + TIMER: 'timer', + + // ARIA role for a toolbar element. + TOOLBAR: 'toolbar', + + // ARIA role for a tooltip element. + TOOLTIP: 'tooltip', + + // ARIA role for a tree. + TREE: 'tree', + + // ARIA role for a grid whose rows can be expanded and collapsed like a tree. + TREEGRID: 'treegrid', + + // ARIA role for a tree item that sometimes may be expanded or collapsed. + TREEITEM: 'treeitem' +}; diff --git a/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/array/array.js b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/array/array.js new file mode 100644 index 00000000..d782cba8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/array/array.js @@ -0,0 +1,1526 @@ +// Copyright 2006 The Closure Library Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Utilities for manipulating arrays. + * + */ + + +goog.provide('goog.array'); +goog.provide('goog.array.ArrayLike'); + +goog.require('goog.asserts'); + + +/** + * @define {boolean} NATIVE_ARRAY_PROTOTYPES indicates whether the code should + * rely on Array.prototype functions, if available. + * + * The Array.prototype functions can be defined by external libraries like + * Prototype and setting this flag to false forces closure to use its own + * goog.array implementation. + * + * If your javascript can be loaded by a third party site and you are wary about + * relying on the prototype functions, specify + * "--define goog.NATIVE_ARRAY_PROTOTYPES=false" to the JSCompiler. + * + * Setting goog.TRUSTED_SITE to false will automatically set + * NATIVE_ARRAY_PROTOTYPES to false. + */ +goog.define('goog.NATIVE_ARRAY_PROTOTYPES', goog.TRUSTED_SITE); + + +/** + * @typedef {Array|NodeList|Arguments|{length: number}} + */ +goog.array.ArrayLike; + + +/** + * Returns the last element in an array without removing it. + * @param {Array.|goog.array.ArrayLike} array The array. + * @return {T} Last item in array. + * @template T + */ +goog.array.peek = function(array) { + return array[array.length - 1]; +}; + + +/** + * Reference to the original {@code Array.prototype}. + * @private + */ +goog.array.ARRAY_PROTOTYPE_ = Array.prototype; + + +// NOTE(arv): Since most of the array functions are generic it allows you to +// pass an array-like object. Strings have a length and are considered array- +// like. However, the 'in' operator does not work on strings so we cannot just +// use the array path even if the browser supports indexing into strings. We +// therefore end up splitting the string. + + +/** + * Returns the index of the first element of an array with a specified value, or + * -1 if the element is not present in the array. + * + * See {@link http://tinyurl.com/developer-mozilla-org-array-indexof} + * + * @param {Array.|goog.array.ArrayLike} arr The array to be searched. + * @param {T} obj The object for which we are searching. + * @param {number=} opt_fromIndex The index at which to start the search. If + * omitted the search starts at index 0. + * @return {number} The index of the first matching array element. + * @template T + */ +goog.array.indexOf = goog.NATIVE_ARRAY_PROTOTYPES && + goog.array.ARRAY_PROTOTYPE_.indexOf ? + function(arr, obj, opt_fromIndex) { + goog.asserts.assert(arr.length != null); + + return goog.array.ARRAY_PROTOTYPE_.indexOf.call(arr, obj, opt_fromIndex); + } : + function(arr, obj, opt_fromIndex) { + var fromIndex = opt_fromIndex == null ? + 0 : (opt_fromIndex < 0 ? + Math.max(0, arr.length + opt_fromIndex) : opt_fromIndex); + + if (goog.isString(arr)) { + // Array.prototype.indexOf uses === so only strings should be found. + if (!goog.isString(obj) || obj.length != 1) { + return -1; + } + return arr.indexOf(obj, fromIndex); + } + + for (var i = fromIndex; i < arr.length; i++) { + if (i in arr && arr[i] === obj) + return i; + } + return -1; + }; + + +/** + * Returns the index of the last element of an array with a specified value, or + * -1 if the element is not present in the array. + * + * See {@link http://tinyurl.com/developer-mozilla-org-array-lastindexof} + * + * @param {!Array.|!goog.array.ArrayLike} arr The array to be searched. + * @param {T} obj The object for which we are searching. + * @param {?number=} opt_fromIndex The index at which to start the search. If + * omitted the search starts at the end of the array. + * @return {number} The index of the last matching array element. + * @template T + */ +goog.array.lastIndexOf = goog.NATIVE_ARRAY_PROTOTYPES && + goog.array.ARRAY_PROTOTYPE_.lastIndexOf ? + function(arr, obj, opt_fromIndex) { + goog.asserts.assert(arr.length != null); + + // Firefox treats undefined and null as 0 in the fromIndex argument which + // leads it to always return -1 + var fromIndex = opt_fromIndex == null ? arr.length - 1 : opt_fromIndex; + return goog.array.ARRAY_PROTOTYPE_.lastIndexOf.call(arr, obj, fromIndex); + } : + function(arr, obj, opt_fromIndex) { + var fromIndex = opt_fromIndex == null ? arr.length - 1 : opt_fromIndex; + + if (fromIndex < 0) { + fromIndex = Math.max(0, arr.length + fromIndex); + } + + if (goog.isString(arr)) { + // Array.prototype.lastIndexOf uses === so only strings should be found. + if (!goog.isString(obj) || obj.length != 1) { + return -1; + } + return arr.lastIndexOf(obj, fromIndex); + } + + for (var i = fromIndex; i >= 0; i--) { + if (i in arr && arr[i] === obj) + return i; + } + return -1; + }; + + +/** + * Calls a function for each element in an array. Skips holes in the array. + * See {@link http://tinyurl.com/developer-mozilla-org-array-foreach} + * + * @param {Array.|goog.array.ArrayLike} arr Array or array like object over + * which to iterate. + * @param {?function(this: S, T, number, ?): ?} f The function to call for every + * element. This function takes 3 arguments (the element, the index and the + * array). The return value is ignored. + * @param {S=} opt_obj The object to be used as the value of 'this' within f. + * @template T,S + */ +goog.array.forEach = goog.NATIVE_ARRAY_PROTOTYPES && + goog.array.ARRAY_PROTOTYPE_.forEach ? + function(arr, f, opt_obj) { + goog.asserts.assert(arr.length != null); + + goog.array.ARRAY_PROTOTYPE_.forEach.call(arr, f, opt_obj); + } : + function(arr, f, opt_obj) { + var l = arr.length; // must be fixed during loop... see docs + var arr2 = goog.isString(arr) ? arr.split('') : arr; + for (var i = 0; i < l; i++) { + if (i in arr2) { + f.call(opt_obj, arr2[i], i, arr); + } + } + }; + + +/** + * Calls a function for each element in an array, starting from the last + * element rather than the first. + * + * @param {Array.|goog.array.ArrayLike} arr Array or array + * like object over which to iterate. + * @param {?function(this: S, T, number, ?): ?} f The function to call for every + * element. This function + * takes 3 arguments (the element, the index and the array). The return + * value is ignored. + * @param {S=} opt_obj The object to be used as the value of 'this' + * within f. + * @template T,S + */ +goog.array.forEachRight = function(arr, f, opt_obj) { + var l = arr.length; // must be fixed during loop... see docs + var arr2 = goog.isString(arr) ? arr.split('') : arr; + for (var i = l - 1; i >= 0; --i) { + if (i in arr2) { + f.call(opt_obj, arr2[i], i, arr); + } + } +}; + + +/** + * Calls a function for each element in an array, and if the function returns + * true adds the element to a new array. + * + * See {@link http://tinyurl.com/developer-mozilla-org-array-filter} + * + * @param {Array.|goog.array.ArrayLike} arr Array or array + * like object over which to iterate. + * @param {?function(this:S, T, number, ?):boolean} f The function to call for + * every element. This function + * takes 3 arguments (the element, the index and the array) and must + * return a Boolean. If the return value is true the element is added to the + * result array. If it is false the element is not included. + * @param {S=} opt_obj The object to be used as the value of 'this' + * within f. + * @return {!Array.} a new array in which only elements that passed the test + * are present. + * @template T,S + */ +goog.array.filter = goog.NATIVE_ARRAY_PROTOTYPES && + goog.array.ARRAY_PROTOTYPE_.filter ? + function(arr, f, opt_obj) { + goog.asserts.assert(arr.length != null); + + return goog.array.ARRAY_PROTOTYPE_.filter.call(arr, f, opt_obj); + } : + function(arr, f, opt_obj) { + var l = arr.length; // must be fixed during loop... see docs + var res = []; + var resLength = 0; + var arr2 = goog.isString(arr) ? arr.split('') : arr; + for (var i = 0; i < l; i++) { + if (i in arr2) { + var val = arr2[i]; // in case f mutates arr2 + if (f.call(opt_obj, val, i, arr)) { + res[resLength++] = val; + } + } + } + return res; + }; + + +/** + * Calls a function for each element in an array and inserts the result into a + * new array. + * + * See {@link http://tinyurl.com/developer-mozilla-org-array-map} + * + * @param {Array.|goog.array.ArrayLike} arr Array or array + * like object over which to iterate. + * @param {?function(this:S, T, number, ?):?} f The function to call for every + * element. This function + * takes 3 arguments (the element, the index and the array) and should + * return something. The result will be inserted into a new array. + * @param {S=} opt_obj The object to be used as the value of 'this' + * within f. + * @return {!Array} a new array with the results from f. + * @template T,S + */ +goog.array.map = goog.NATIVE_ARRAY_PROTOTYPES && + goog.array.ARRAY_PROTOTYPE_.map ? + function(arr, f, opt_obj) { + goog.asserts.assert(arr.length != null); + + return goog.array.ARRAY_PROTOTYPE_.map.call(arr, f, opt_obj); + } : + function(arr, f, opt_obj) { + var l = arr.length; // must be fixed during loop... see docs + var res = new Array(l); + var arr2 = goog.isString(arr) ? arr.split('') : arr; + for (var i = 0; i < l; i++) { + if (i in arr2) { + res[i] = f.call(opt_obj, arr2[i], i, arr); + } + } + return res; + }; + + +/** + * Passes every element of an array into a function and accumulates the result. + * + * See {@link http://tinyurl.com/developer-mozilla-org-array-reduce} + * + * For example: + * var a = [1, 2, 3, 4]; + * goog.array.reduce(a, function(r, v, i, arr) {return r + v;}, 0); + * returns 10 + * + * @param {Array.|goog.array.ArrayLike} arr Array or array + * like object over which to iterate. + * @param {?function(this:S, R, T, number, ?) : R} f The function to call for + * every element. This function + * takes 4 arguments (the function's previous result or the initial value, + * the value of the current array element, the current array index, and the + * array itself) + * function(previousValue, currentValue, index, array). + * @param {?} val The initial value to pass into the function on the first call. + * @param {S=} opt_obj The object to be used as the value of 'this' + * within f. + * @return {R} Result of evaluating f repeatedly across the values of the array. + * @template T,S,R + */ +goog.array.reduce = goog.NATIVE_ARRAY_PROTOTYPES && + goog.array.ARRAY_PROTOTYPE_.reduce ? + function(arr, f, val, opt_obj) { + goog.asserts.assert(arr.length != null); + if (opt_obj) { + f = goog.bind(f, opt_obj); + } + return goog.array.ARRAY_PROTOTYPE_.reduce.call(arr, f, val); + } : + function(arr, f, val, opt_obj) { + var rval = val; + goog.array.forEach(arr, function(val, index) { + rval = f.call(opt_obj, rval, val, index, arr); + }); + return rval; + }; + + +/** + * Passes every element of an array into a function and accumulates the result, + * starting from the last element and working towards the first. + * + * See {@link http://tinyurl.com/developer-mozilla-org-array-reduceright} + * + * For example: + * var a = ['a', 'b', 'c']; + * goog.array.reduceRight(a, function(r, v, i, arr) {return r + v;}, ''); + * returns 'cba' + * + * @param {Array.|goog.array.ArrayLike} arr Array or array + * like object over which to iterate. + * @param {?function(this:S, R, T, number, ?) : R} f The function to call for + * every element. This function + * takes 4 arguments (the function's previous result or the initial value, + * the value of the current array element, the current array index, and the + * array itself) + * function(previousValue, currentValue, index, array). + * @param {?} val The initial value to pass into the function on the first call. + * @param {S=} opt_obj The object to be used as the value of 'this' + * within f. + * @return {R} Object returned as a result of evaluating f repeatedly across the + * values of the array. + * @template T,S,R + */ +goog.array.reduceRight = goog.NATIVE_ARRAY_PROTOTYPES && + goog.array.ARRAY_PROTOTYPE_.reduceRight ? + function(arr, f, val, opt_obj) { + goog.asserts.assert(arr.length != null); + if (opt_obj) { + f = goog.bind(f, opt_obj); + } + return goog.array.ARRAY_PROTOTYPE_.reduceRight.call(arr, f, val); + } : + function(arr, f, val, opt_obj) { + var rval = val; + goog.array.forEachRight(arr, function(val, index) { + rval = f.call(opt_obj, rval, val, index, arr); + }); + return rval; + }; + + +/** + * Calls f for each element of an array. If any call returns true, some() + * returns true (without checking the remaining elements). If all calls + * return false, some() returns false. + * + * See {@link http://tinyurl.com/developer-mozilla-org-array-some} + * + * @param {Array.|goog.array.ArrayLike} arr Array or array + * like object over which to iterate. + * @param {?function(this:S, T, number, ?) : boolean} f The function to call for + * for every element. This function takes 3 arguments (the element, the + * index and the array) and should return a boolean. + * @param {S=} opt_obj The object to be used as the value of 'this' + * within f. + * @return {boolean} true if any element passes the test. + * @template T,S + */ +goog.array.some = goog.NATIVE_ARRAY_PROTOTYPES && + goog.array.ARRAY_PROTOTYPE_.some ? + function(arr, f, opt_obj) { + goog.asserts.assert(arr.length != null); + + return goog.array.ARRAY_PROTOTYPE_.some.call(arr, f, opt_obj); + } : + function(arr, f, opt_obj) { + var l = arr.length; // must be fixed during loop... see docs + var arr2 = goog.isString(arr) ? arr.split('') : arr; + for (var i = 0; i < l; i++) { + if (i in arr2 && f.call(opt_obj, arr2[i], i, arr)) { + return true; + } + } + return false; + }; + + +/** + * Call f for each element of an array. If all calls return true, every() + * returns true. If any call returns false, every() returns false and + * does not continue to check the remaining elements. + * + * See {@link http://tinyurl.com/developer-mozilla-org-array-every} + * + * @param {Array.|goog.array.ArrayLike} arr Array or array + * like object over which to iterate. + * @param {?function(this:S, T, number, ?) : boolean} f The function to call for + * for every element. This function takes 3 arguments (the element, the + * index and the array) and should return a boolean. + * @param {S=} opt_obj The object to be used as the value of 'this' + * within f. + * @return {boolean} false if any element fails the test. + * @template T,S + */ +goog.array.every = goog.NATIVE_ARRAY_PROTOTYPES && + goog.array.ARRAY_PROTOTYPE_.every ? + function(arr, f, opt_obj) { + goog.asserts.assert(arr.length != null); + + return goog.array.ARRAY_PROTOTYPE_.every.call(arr, f, opt_obj); + } : + function(arr, f, opt_obj) { + var l = arr.length; // must be fixed during loop... see docs + var arr2 = goog.isString(arr) ? arr.split('') : arr; + for (var i = 0; i < l; i++) { + if (i in arr2 && !f.call(opt_obj, arr2[i], i, arr)) { + return false; + } + } + return true; + }; + + +/** + * Counts the array elements that fulfill the predicate, i.e. for which the + * callback function returns true. Skips holes in the array. + * + * @param {!(Array.|goog.array.ArrayLike)} arr Array or array like object + * over which to iterate. + * @param {function(this: S, T, number, ?): boolean} f The function to call for + * every element. Takes 3 arguments (the element, the index and the array). + * @param {S=} opt_obj The object to be used as the value of 'this' within f. + * @return {number} The number of the matching elements. + * @template T,S + */ +goog.array.count = function(arr, f, opt_obj) { + var count = 0; + goog.array.forEach(arr, function(element, index, arr) { + if (f.call(opt_obj, element, index, arr)) { + ++count; + } + }, opt_obj); + return count; +}; + + +/** + * Search an array for the first element that satisfies a given condition and + * return that element. + * @param {Array.|goog.array.ArrayLike} arr Array or array + * like object over which to iterate. + * @param {?function(this:S, T, number, ?) : boolean} f The function to call + * for every element. This function takes 3 arguments (the element, the + * index and the array) and should return a boolean. + * @param {S=} opt_obj An optional "this" context for the function. + * @return {T} The first array element that passes the test, or null if no + * element is found. + * @template T,S + */ +goog.array.find = function(arr, f, opt_obj) { + var i = goog.array.findIndex(arr, f, opt_obj); + return i < 0 ? null : goog.isString(arr) ? arr.charAt(i) : arr[i]; +}; + + +/** + * Search an array for the first element that satisfies a given condition and + * return its index. + * @param {Array.|goog.array.ArrayLike} arr Array or array + * like object over which to iterate. + * @param {?function(this:S, T, number, ?) : boolean} f The function to call for + * every element. This function + * takes 3 arguments (the element, the index and the array) and should + * return a boolean. + * @param {S=} opt_obj An optional "this" context for the function. + * @return {number} The index of the first array element that passes the test, + * or -1 if no element is found. + * @template T,S + */ +goog.array.findIndex = function(arr, f, opt_obj) { + var l = arr.length; // must be fixed during loop... see docs + var arr2 = goog.isString(arr) ? arr.split('') : arr; + for (var i = 0; i < l; i++) { + if (i in arr2 && f.call(opt_obj, arr2[i], i, arr)) { + return i; + } + } + return -1; +}; + + +/** + * Search an array (in reverse order) for the last element that satisfies a + * given condition and return that element. + * @param {Array.|goog.array.ArrayLike} arr Array or array + * like object over which to iterate. + * @param {?function(this:S, T, number, ?) : boolean} f The function to call + * for every element. This function + * takes 3 arguments (the element, the index and the array) and should + * return a boolean. + * @param {S=} opt_obj An optional "this" context for the function. + * @return {T} The last array element that passes the test, or null if no + * element is found. + * @template T,S + */ +goog.array.findRight = function(arr, f, opt_obj) { + var i = goog.array.findIndexRight(arr, f, opt_obj); + return i < 0 ? null : goog.isString(arr) ? arr.charAt(i) : arr[i]; +}; + + +/** + * Search an array (in reverse order) for the last element that satisfies a + * given condition and return its index. + * @param {Array.|goog.array.ArrayLike} arr Array or array + * like object over which to iterate. + * @param {?function(this:S, T, number, ?) : boolean} f The function to call + * for every element. This function + * takes 3 arguments (the element, the index and the array) and should + * return a boolean. + * @param {Object=} opt_obj An optional "this" context for the function. + * @return {number} The index of the last array element that passes the test, + * or -1 if no element is found. + * @template T,S + */ +goog.array.findIndexRight = function(arr, f, opt_obj) { + var l = arr.length; // must be fixed during loop... see docs + var arr2 = goog.isString(arr) ? arr.split('') : arr; + for (var i = l - 1; i >= 0; i--) { + if (i in arr2 && f.call(opt_obj, arr2[i], i, arr)) { + return i; + } + } + return -1; +}; + + +/** + * Whether the array contains the given object. + * @param {goog.array.ArrayLike} arr The array to test for the presence of the + * element. + * @param {*} obj The object for which to test. + * @return {boolean} true if obj is present. + */ +goog.array.contains = function(arr, obj) { + return goog.array.indexOf(arr, obj) >= 0; +}; + + +/** + * Whether the array is empty. + * @param {goog.array.ArrayLike} arr The array to test. + * @return {boolean} true if empty. + */ +goog.array.isEmpty = function(arr) { + return arr.length == 0; +}; + + +/** + * Clears the array. + * @param {goog.array.ArrayLike} arr Array or array like object to clear. + */ +goog.array.clear = function(arr) { + // For non real arrays we don't have the magic length so we delete the + // indices. + if (!goog.isArray(arr)) { + for (var i = arr.length - 1; i >= 0; i--) { + delete arr[i]; + } + } + arr.length = 0; +}; + + +/** + * Pushes an item into an array, if it's not already in the array. + * @param {Array.} arr Array into which to insert the item. + * @param {T} obj Value to add. + * @template T + */ +goog.array.insert = function(arr, obj) { + if (!goog.array.contains(arr, obj)) { + arr.push(obj); + } +}; + + +/** + * Inserts an object at the given index of the array. + * @param {goog.array.ArrayLike} arr The array to modify. + * @param {*} obj The object to insert. + * @param {number=} opt_i The index at which to insert the object. If omitted, + * treated as 0. A negative index is counted from the end of the array. + */ +goog.array.insertAt = function(arr, obj, opt_i) { + goog.array.splice(arr, opt_i, 0, obj); +}; + + +/** + * Inserts at the given index of the array, all elements of another array. + * @param {goog.array.ArrayLike} arr The array to modify. + * @param {goog.array.ArrayLike} elementsToAdd The array of elements to add. + * @param {number=} opt_i The index at which to insert the object. If omitted, + * treated as 0. A negative index is counted from the end of the array. + */ +goog.array.insertArrayAt = function(arr, elementsToAdd, opt_i) { + goog.partial(goog.array.splice, arr, opt_i, 0).apply(null, elementsToAdd); +}; + + +/** + * Inserts an object into an array before a specified object. + * @param {Array.} arr The array to modify. + * @param {T} obj The object to insert. + * @param {T=} opt_obj2 The object before which obj should be inserted. If obj2 + * is omitted or not found, obj is inserted at the end of the array. + * @template T + */ +goog.array.insertBefore = function(arr, obj, opt_obj2) { + var i; + if (arguments.length == 2 || (i = goog.array.indexOf(arr, opt_obj2)) < 0) { + arr.push(obj); + } else { + goog.array.insertAt(arr, obj, i); + } +}; + + +/** + * Removes the first occurrence of a particular value from an array. + * @param {Array.|goog.array.ArrayLike} arr Array from which to remove + * value. + * @param {T} obj Object to remove. + * @return {boolean} True if an element was removed. + * @template T + */ +goog.array.remove = function(arr, obj) { + var i = goog.array.indexOf(arr, obj); + var rv; + if ((rv = i >= 0)) { + goog.array.removeAt(arr, i); + } + return rv; +}; + + +/** + * Removes from an array the element at index i + * @param {goog.array.ArrayLike} arr Array or array like object from which to + * remove value. + * @param {number} i The index to remove. + * @return {boolean} True if an element was removed. + */ +goog.array.removeAt = function(arr, i) { + goog.asserts.assert(arr.length != null); + + // use generic form of splice + // splice returns the removed items and if successful the length of that + // will be 1 + return goog.array.ARRAY_PROTOTYPE_.splice.call(arr, i, 1).length == 1; +}; + + +/** + * Removes the first value that satisfies the given condition. + * @param {Array.|goog.array.ArrayLike} arr Array or array + * like object over which to iterate. + * @param {?function(this:S, T, number, ?) : boolean} f The function to call + * for every element. This function + * takes 3 arguments (the element, the index and the array) and should + * return a boolean. + * @param {S=} opt_obj An optional "this" context for the function. + * @return {boolean} True if an element was removed. + * @template T,S + */ +goog.array.removeIf = function(arr, f, opt_obj) { + var i = goog.array.findIndex(arr, f, opt_obj); + if (i >= 0) { + goog.array.removeAt(arr, i); + return true; + } + return false; +}; + + +/** + * Returns a new array that is the result of joining the arguments. If arrays + * are passed then their items are added, however, if non-arrays are passed they + * will be added to the return array as is. + * + * Note that ArrayLike objects will be added as is, rather than having their + * items added. + * + * goog.array.concat([1, 2], [3, 4]) -> [1, 2, 3, 4] + * goog.array.concat(0, [1, 2]) -> [0, 1, 2] + * goog.array.concat([1, 2], null) -> [1, 2, null] + * + * There is bug in all current versions of IE (6, 7 and 8) where arrays created + * in an iframe become corrupted soon (not immediately) after the iframe is + * destroyed. This is common if loading data via goog.net.IframeIo, for example. + * This corruption only affects the concat method which will start throwing + * Catastrophic Errors (#-2147418113). + * + * See http://endoflow.com/scratch/corrupted-arrays.html for a test case. + * + * Internally goog.array should use this, so that all methods will continue to + * work on these broken array objects. + * + * @param {...*} var_args Items to concatenate. Arrays will have each item + * added, while primitives and objects will be added as is. + * @return {!Array} The new resultant array. + */ +goog.array.concat = function(var_args) { + return goog.array.ARRAY_PROTOTYPE_.concat.apply( + goog.array.ARRAY_PROTOTYPE_, arguments); +}; + + +/** + * Converts an object to an array. + * @param {goog.array.ArrayLike} object The object to convert to an array. + * @return {!Array} The object converted into an array. If object has a + * length property, every property indexed with a non-negative number + * less than length will be included in the result. If object does not + * have a length property, an empty array will be returned. + */ +goog.array.toArray = function(object) { + var length = object.length; + + // If length is not a number the following it false. This case is kept for + // backwards compatibility since there are callers that pass objects that are + // not array like. + if (length > 0) { + var rv = new Array(length); + for (var i = 0; i < length; i++) { + rv[i] = object[i]; + } + return rv; + } + return []; +}; + + +/** + * Does a shallow copy of an array. + * @param {goog.array.ArrayLike} arr Array or array-like object to clone. + * @return {!Array} Clone of the input array. + */ +goog.array.clone = goog.array.toArray; + + +/** + * Extends an array with another array, element, or "array like" object. + * This function operates 'in-place', it does not create a new Array. + * + * Example: + * var a = []; + * goog.array.extend(a, [0, 1]); + * a; // [0, 1] + * goog.array.extend(a, 2); + * a; // [0, 1, 2] + * + * @param {Array} arr1 The array to modify. + * @param {...*} var_args The elements or arrays of elements to add to arr1. + */ +goog.array.extend = function(arr1, var_args) { + for (var i = 1; i < arguments.length; i++) { + var arr2 = arguments[i]; + // If we have an Array or an Arguments object we can just call push + // directly. + var isArrayLike; + if (goog.isArray(arr2) || + // Detect Arguments. ES5 says that the [[Class]] of an Arguments object + // is "Arguments" but only V8 and JSC/Safari gets this right. We instead + // detect Arguments by checking for array like and presence of "callee". + (isArrayLike = goog.isArrayLike(arr2)) && + // The getter for callee throws an exception in strict mode + // according to section 10.6 in ES5 so check for presence instead. + Object.prototype.hasOwnProperty.call(arr2, 'callee')) { + arr1.push.apply(arr1, arr2); + } else if (isArrayLike) { + // Otherwise loop over arr2 to prevent copying the object. + var len1 = arr1.length; + var len2 = arr2.length; + for (var j = 0; j < len2; j++) { + arr1[len1 + j] = arr2[j]; + } + } else { + arr1.push(arr2); + } + } +}; + + +/** + * Adds or removes elements from an array. This is a generic version of Array + * splice. This means that it might work on other objects similar to arrays, + * such as the arguments object. + * + * @param {Array.|goog.array.ArrayLike} arr The array to modify. + * @param {number|undefined} index The index at which to start changing the + * array. If not defined, treated as 0. + * @param {number} howMany How many elements to remove (0 means no removal. A + * value below 0 is treated as zero and so is any other non number. Numbers + * are floored). + * @param {...T} var_args Optional, additional elements to insert into the + * array. + * @return {!Array.} the removed elements. + * @template T + */ +goog.array.splice = function(arr, index, howMany, var_args) { + goog.asserts.assert(arr.length != null); + + return goog.array.ARRAY_PROTOTYPE_.splice.apply( + arr, goog.array.slice(arguments, 1)); +}; + + +/** + * Returns a new array from a segment of an array. This is a generic version of + * Array slice. This means that it might work on other objects similar to + * arrays, such as the arguments object. + * + * @param {Array.|goog.array.ArrayLike} arr The array from + * which to copy a segment. + * @param {number} start The index of the first element to copy. + * @param {number=} opt_end The index after the last element to copy. + * @return {!Array.} A new array containing the specified segment of the + * original array. + * @template T + */ +goog.array.slice = function(arr, start, opt_end) { + goog.asserts.assert(arr.length != null); + + // passing 1 arg to slice is not the same as passing 2 where the second is + // null or undefined (in that case the second argument is treated as 0). + // we could use slice on the arguments object and then use apply instead of + // testing the length + if (arguments.length <= 2) { + return goog.array.ARRAY_PROTOTYPE_.slice.call(arr, start); + } else { + return goog.array.ARRAY_PROTOTYPE_.slice.call(arr, start, opt_end); + } +}; + + +/** + * Removes all duplicates from an array (retaining only the first + * occurrence of each array element). This function modifies the + * array in place and doesn't change the order of the non-duplicate items. + * + * For objects, duplicates are identified as having the same unique ID as + * defined by {@link goog.getUid}. + * + * Alternatively you can specify a custom hash function that returns a unique + * value for each item in the array it should consider unique. + * + * Runtime: N, + * Worstcase space: 2N (no dupes) + * + * @param {Array.|goog.array.ArrayLike} arr The array from which to remove + * duplicates. + * @param {Array=} opt_rv An optional array in which to return the results, + * instead of performing the removal inplace. If specified, the original + * array will remain unchanged. + * @param {function(T):string=} opt_hashFn An optional function to use to + * apply to every item in the array. This function should return a unique + * value for each item in the array it should consider unique. + * @template T + */ +goog.array.removeDuplicates = function(arr, opt_rv, opt_hashFn) { + var returnArray = opt_rv || arr; + var defaultHashFn = function(item) { + // Prefix each type with a single character representing the type to + // prevent conflicting keys (e.g. true and 'true'). + return goog.isObject(current) ? 'o' + goog.getUid(current) : + (typeof current).charAt(0) + current; + }; + var hashFn = opt_hashFn || defaultHashFn; + + var seen = {}, cursorInsert = 0, cursorRead = 0; + while (cursorRead < arr.length) { + var current = arr[cursorRead++]; + var key = hashFn(current); + if (!Object.prototype.hasOwnProperty.call(seen, key)) { + seen[key] = true; + returnArray[cursorInsert++] = current; + } + } + returnArray.length = cursorInsert; +}; + + +/** + * Searches the specified array for the specified target using the binary + * search algorithm. If no opt_compareFn is specified, elements are compared + * using goog.array.defaultCompare, which compares the elements + * using the built in < and > operators. This will produce the expected + * behavior for homogeneous arrays of String(s) and Number(s). The array + * specified must be sorted in ascending order (as defined by the + * comparison function). If the array is not sorted, results are undefined. + * If the array contains multiple instances of the specified target value, any + * of these instances may be found. + * + * Runtime: O(log n) + * + * @param {goog.array.ArrayLike} arr The array to be searched. + * @param {*} target The sought value. + * @param {Function=} opt_compareFn Optional comparison function by which the + * array is ordered. Should take 2 arguments to compare, and return a + * negative number, zero, or a positive number depending on whether the + * first argument is less than, equal to, or greater than the second. + * @return {number} Lowest index of the target value if found, otherwise + * (-(insertion point) - 1). The insertion point is where the value should + * be inserted into arr to preserve the sorted property. Return value >= 0 + * iff target is found. + */ +goog.array.binarySearch = function(arr, target, opt_compareFn) { + return goog.array.binarySearch_(arr, + opt_compareFn || goog.array.defaultCompare, false /* isEvaluator */, + target); +}; + + +/** + * Selects an index in the specified array using the binary search algorithm. + * The evaluator receives an element and determines whether the desired index + * is before, at, or after it. The evaluator must be consistent (formally, + * goog.array.map(goog.array.map(arr, evaluator, opt_obj), goog.math.sign) + * must be monotonically non-increasing). + * + * Runtime: O(log n) + * + * @param {goog.array.ArrayLike} arr The array to be searched. + * @param {Function} evaluator Evaluator function that receives 3 arguments + * (the element, the index and the array). Should return a negative number, + * zero, or a positive number depending on whether the desired index is + * before, at, or after the element passed to it. + * @param {Object=} opt_obj The object to be used as the value of 'this' + * within evaluator. + * @return {number} Index of the leftmost element matched by the evaluator, if + * such exists; otherwise (-(insertion point) - 1). The insertion point is + * the index of the first element for which the evaluator returns negative, + * or arr.length if no such element exists. The return value is non-negative + * iff a match is found. + */ +goog.array.binarySelect = function(arr, evaluator, opt_obj) { + return goog.array.binarySearch_(arr, evaluator, true /* isEvaluator */, + undefined /* opt_target */, opt_obj); +}; + + +/** + * Implementation of a binary search algorithm which knows how to use both + * comparison functions and evaluators. If an evaluator is provided, will call + * the evaluator with the given optional data object, conforming to the + * interface defined in binarySelect. Otherwise, if a comparison function is + * provided, will call the comparison function against the given data object. + * + * This implementation purposefully does not use goog.bind or goog.partial for + * performance reasons. + * + * Runtime: O(log n) + * + * @param {goog.array.ArrayLike} arr The array to be searched. + * @param {Function} compareFn Either an evaluator or a comparison function, + * as defined by binarySearch and binarySelect above. + * @param {boolean} isEvaluator Whether the function is an evaluator or a + * comparison function. + * @param {*=} opt_target If the function is a comparison function, then this is + * the target to binary search for. + * @param {Object=} opt_selfObj If the function is an evaluator, this is an + * optional this object for the evaluator. + * @return {number} Lowest index of the target value if found, otherwise + * (-(insertion point) - 1). The insertion point is where the value should + * be inserted into arr to preserve the sorted property. Return value >= 0 + * iff target is found. + * @private + */ +goog.array.binarySearch_ = function(arr, compareFn, isEvaluator, opt_target, + opt_selfObj) { + var left = 0; // inclusive + var right = arr.length; // exclusive + var found; + while (left < right) { + var middle = (left + right) >> 1; + var compareResult; + if (isEvaluator) { + compareResult = compareFn.call(opt_selfObj, arr[middle], middle, arr); + } else { + compareResult = compareFn(opt_target, arr[middle]); + } + if (compareResult > 0) { + left = middle + 1; + } else { + right = middle; + // We are looking for the lowest index so we can't return immediately. + found = !compareResult; + } + } + // left is the index if found, or the insertion point otherwise. + // ~left is a shorthand for -left - 1. + return found ? left : ~left; +}; + + +/** + * Sorts the specified array into ascending order. If no opt_compareFn is + * specified, elements are compared using + * goog.array.defaultCompare, which compares the elements using + * the built in < and > operators. This will produce the expected behavior + * for homogeneous arrays of String(s) and Number(s), unlike the native sort, + * but will give unpredictable results for heterogenous lists of strings and + * numbers with different numbers of digits. + * + * This sort is not guaranteed to be stable. + * + * Runtime: Same as Array.prototype.sort + * + * @param {Array.} arr The array to be sorted. + * @param {?function(T,T):number=} opt_compareFn Optional comparison + * function by which the + * array is to be ordered. Should take 2 arguments to compare, and return a + * negative number, zero, or a positive number depending on whether the + * first argument is less than, equal to, or greater than the second. + * @template T + */ +goog.array.sort = function(arr, opt_compareFn) { + // TODO(arv): Update type annotation since null is not accepted. + goog.asserts.assert(arr.length != null); + + goog.array.ARRAY_PROTOTYPE_.sort.call( + arr, opt_compareFn || goog.array.defaultCompare); +}; + + +/** + * Sorts the specified array into ascending order in a stable way. If no + * opt_compareFn is specified, elements are compared using + * goog.array.defaultCompare, which compares the elements using + * the built in < and > operators. This will produce the expected behavior + * for homogeneous arrays of String(s) and Number(s). + * + * Runtime: Same as Array.prototype.sort, plus an additional + * O(n) overhead of copying the array twice. + * + * @param {Array.} arr The array to be sorted. + * @param {?function(T, T): number=} opt_compareFn Optional comparison function + * by which the array is to be ordered. Should take 2 arguments to compare, + * and return a negative number, zero, or a positive number depending on + * whether the first argument is less than, equal to, or greater than the + * second. + * @template T + */ +goog.array.stableSort = function(arr, opt_compareFn) { + for (var i = 0; i < arr.length; i++) { + arr[i] = {index: i, value: arr[i]}; + } + var valueCompareFn = opt_compareFn || goog.array.defaultCompare; + function stableCompareFn(obj1, obj2) { + return valueCompareFn(obj1.value, obj2.value) || obj1.index - obj2.index; + }; + goog.array.sort(arr, stableCompareFn); + for (var i = 0; i < arr.length; i++) { + arr[i] = arr[i].value; + } +}; + + +/** + * Sorts an array of objects by the specified object key and compare + * function. If no compare function is provided, the key values are + * compared in ascending order using goog.array.defaultCompare. + * This won't work for keys that get renamed by the compiler. So use + * {'foo': 1, 'bar': 2} rather than {foo: 1, bar: 2}. + * @param {Array.} arr An array of objects to sort. + * @param {string} key The object key to sort by. + * @param {Function=} opt_compareFn The function to use to compare key + * values. + */ +goog.array.sortObjectsByKey = function(arr, key, opt_compareFn) { + var compare = opt_compareFn || goog.array.defaultCompare; + goog.array.sort(arr, function(a, b) { + return compare(a[key], b[key]); + }); +}; + + +/** + * Tells if the array is sorted. + * @param {!Array.} arr The array. + * @param {?function(T,T):number=} opt_compareFn Function to compare the + * array elements. + * Should take 2 arguments to compare, and return a negative number, zero, + * or a positive number depending on whether the first argument is less + * than, equal to, or greater than the second. + * @param {boolean=} opt_strict If true no equal elements are allowed. + * @return {boolean} Whether the array is sorted. + * @template T + */ +goog.array.isSorted = function(arr, opt_compareFn, opt_strict) { + var compare = opt_compareFn || goog.array.defaultCompare; + for (var i = 1; i < arr.length; i++) { + var compareResult = compare(arr[i - 1], arr[i]); + if (compareResult > 0 || compareResult == 0 && opt_strict) { + return false; + } + } + return true; +}; + + +/** + * Compares two arrays for equality. Two arrays are considered equal if they + * have the same length and their corresponding elements are equal according to + * the comparison function. + * + * @param {goog.array.ArrayLike} arr1 The first array to compare. + * @param {goog.array.ArrayLike} arr2 The second array to compare. + * @param {Function=} opt_equalsFn Optional comparison function. + * Should take 2 arguments to compare, and return true if the arguments + * are equal. Defaults to {@link goog.array.defaultCompareEquality} which + * compares the elements using the built-in '===' operator. + * @return {boolean} Whether the two arrays are equal. + */ +goog.array.equals = function(arr1, arr2, opt_equalsFn) { + if (!goog.isArrayLike(arr1) || !goog.isArrayLike(arr2) || + arr1.length != arr2.length) { + return false; + } + var l = arr1.length; + var equalsFn = opt_equalsFn || goog.array.defaultCompareEquality; + for (var i = 0; i < l; i++) { + if (!equalsFn(arr1[i], arr2[i])) { + return false; + } + } + return true; +}; + + +/** + * @deprecated Use {@link goog.array.equals}. + * @param {goog.array.ArrayLike} arr1 See {@link goog.array.equals}. + * @param {goog.array.ArrayLike} arr2 See {@link goog.array.equals}. + * @param {Function=} opt_equalsFn See {@link goog.array.equals}. + * @return {boolean} See {@link goog.array.equals}. + */ +goog.array.compare = function(arr1, arr2, opt_equalsFn) { + return goog.array.equals(arr1, arr2, opt_equalsFn); +}; + + +/** + * 3-way array compare function. + * @param {!goog.array.ArrayLike} arr1 The first array to compare. + * @param {!goog.array.ArrayLike} arr2 The second array to compare. + * @param {?function(?, ?): number=} opt_compareFn Optional comparison function + * by which the array is to be ordered. Should take 2 arguments to compare, + * and return a negative number, zero, or a positive number depending on + * whether the first argument is less than, equal to, or greater than the + * second. + * @return {number} Negative number, zero, or a positive number depending on + * whether the first argument is less than, equal to, or greater than the + * second. + */ +goog.array.compare3 = function(arr1, arr2, opt_compareFn) { + var compare = opt_compareFn || goog.array.defaultCompare; + var l = Math.min(arr1.length, arr2.length); + for (var i = 0; i < l; i++) { + var result = compare(arr1[i], arr2[i]); + if (result != 0) { + return result; + } + } + return goog.array.defaultCompare(arr1.length, arr2.length); +}; + + +/** + * Compares its two arguments for order, using the built in < and > + * operators. + * @param {*} a The first object to be compared. + * @param {*} b The second object to be compared. + * @return {number} A negative number, zero, or a positive number as the first + * argument is less than, equal to, or greater than the second. + */ +goog.array.defaultCompare = function(a, b) { + return a > b ? 1 : a < b ? -1 : 0; +}; + + +/** + * Compares its two arguments for equality, using the built in === operator. + * @param {*} a The first object to compare. + * @param {*} b The second object to compare. + * @return {boolean} True if the two arguments are equal, false otherwise. + */ +goog.array.defaultCompareEquality = function(a, b) { + return a === b; +}; + + +/** + * Inserts a value into a sorted array. The array is not modified if the + * value is already present. + * @param {Array.} array The array to modify. + * @param {T} value The object to insert. + * @param {?function(T,T):number=} opt_compareFn Optional comparison function by + * which the + * array is ordered. Should take 2 arguments to compare, and return a + * negative number, zero, or a positive number depending on whether the + * first argument is less than, equal to, or greater than the second. + * @return {boolean} True if an element was inserted. + * @template T + */ +goog.array.binaryInsert = function(array, value, opt_compareFn) { + var index = goog.array.binarySearch(array, value, opt_compareFn); + if (index < 0) { + goog.array.insertAt(array, value, -(index + 1)); + return true; + } + return false; +}; + + +/** + * Removes a value from a sorted array. + * @param {Array} array The array to modify. + * @param {*} value The object to remove. + * @param {Function=} opt_compareFn Optional comparison function by which the + * array is ordered. Should take 2 arguments to compare, and return a + * negative number, zero, or a positive number depending on whether the + * first argument is less than, equal to, or greater than the second. + * @return {boolean} True if an element was removed. + */ +goog.array.binaryRemove = function(array, value, opt_compareFn) { + var index = goog.array.binarySearch(array, value, opt_compareFn); + return (index >= 0) ? goog.array.removeAt(array, index) : false; +}; + + +/** + * Splits an array into disjoint buckets according to a splitting function. + * @param {Array.} array The array. + * @param {function(this:S, T,number,Array.):?} sorter Function to call for + * every element. This takes 3 arguments (the element, the index and the + * array) and must return a valid object key (a string, number, etc), or + * undefined, if that object should not be placed in a bucket. + * @param {S=} opt_obj The object to be used as the value of 'this' within + * sorter. + * @return {!Object} An object, with keys being all of the unique return values + * of sorter, and values being arrays containing the items for + * which the splitter returned that key. + * @template T,S + */ +goog.array.bucket = function(array, sorter, opt_obj) { + var buckets = {}; + + for (var i = 0; i < array.length; i++) { + var value = array[i]; + var key = sorter.call(opt_obj, value, i, array); + if (goog.isDef(key)) { + // Push the value to the right bucket, creating it if necessary. + var bucket = buckets[key] || (buckets[key] = []); + bucket.push(value); + } + } + + return buckets; +}; + + +/** + * Creates a new object built from the provided array and the key-generation + * function. + * @param {Array.|goog.array.ArrayLike} arr Array or array like object over + * which to iterate whose elements will be the values in the new object. + * @param {?function(this:S, T, number, ?) : string} keyFunc The function to + * call for every element. This function takes 3 arguments (the element, the + * index and the array) and should return a string that will be used as the + * key for the element in the new object. If the function returns the same + * key for more than one element, the value for that key is + * implementation-defined. + * @param {S=} opt_obj The object to be used as the value of 'this' + * within keyFunc. + * @return {!Object.} The new object. + * @template T,S + */ +goog.array.toObject = function(arr, keyFunc, opt_obj) { + var ret = {}; + goog.array.forEach(arr, function(element, index) { + ret[keyFunc.call(opt_obj, element, index, arr)] = element; + }); + return ret; +}; + + +/** + * Creates a range of numbers in an arithmetic progression. + * + * Range takes 1, 2, or 3 arguments: + *
    + * range(5) is the same as range(0, 5, 1) and produces [0, 1, 2, 3, 4]
    + * range(2, 5) is the same as range(2, 5, 1) and produces [2, 3, 4]
    + * range(-2, -5, -1) produces [-2, -3, -4]
    + * range(-2, -5, 1) produces [], since stepping by 1 wouldn't ever reach -5.
    + * 
    + * + * @param {number} startOrEnd The starting value of the range if an end argument + * is provided. Otherwise, the start value is 0, and this is the end value. + * @param {number=} opt_end The optional end value of the range. + * @param {number=} opt_step The step size between range values. Defaults to 1 + * if opt_step is undefined or 0. + * @return {!Array.} An array of numbers for the requested range. May be + * an empty array if adding the step would not converge toward the end + * value. + */ +goog.array.range = function(startOrEnd, opt_end, opt_step) { + var array = []; + var start = 0; + var end = startOrEnd; + var step = opt_step || 1; + if (opt_end !== undefined) { + start = startOrEnd; + end = opt_end; + } + + if (step * (end - start) < 0) { + // Sign mismatch: start + step will never reach the end value. + return []; + } + + if (step > 0) { + for (var i = start; i < end; i += step) { + array.push(i); + } + } else { + for (var i = start; i > end; i += step) { + array.push(i); + } + } + return array; +}; + + +/** + * Returns an array consisting of the given value repeated N times. + * + * @param {*} value The value to repeat. + * @param {number} n The repeat count. + * @return {!Array} An array with the repeated value. + */ +goog.array.repeat = function(value, n) { + var array = []; + for (var i = 0; i < n; i++) { + array[i] = value; + } + return array; +}; + + +/** + * Returns an array consisting of every argument with all arrays + * expanded in-place recursively. + * + * @param {...*} var_args The values to flatten. + * @return {!Array} An array containing the flattened values. + */ +goog.array.flatten = function(var_args) { + var result = []; + for (var i = 0; i < arguments.length; i++) { + var element = arguments[i]; + if (goog.isArray(element)) { + result.push.apply(result, goog.array.flatten.apply(null, element)); + } else { + result.push(element); + } + } + return result; +}; + + +/** + * Rotates an array in-place. After calling this method, the element at + * index i will be the element previously at index (i - n) % + * array.length, for all values of i between 0 and array.length - 1, + * inclusive. + * + * For example, suppose list comprises [t, a, n, k, s]. After invoking + * rotate(array, 1) (or rotate(array, -4)), array will comprise [s, t, a, n, k]. + * + * @param {!Array.} array The array to rotate. + * @param {number} n The amount to rotate. + * @return {!Array.} The array. + * @template T + */ +goog.array.rotate = function(array, n) { + goog.asserts.assert(array.length != null); + + if (array.length) { + n %= array.length; + if (n > 0) { + goog.array.ARRAY_PROTOTYPE_.unshift.apply(array, array.splice(-n, n)); + } else if (n < 0) { + goog.array.ARRAY_PROTOTYPE_.push.apply(array, array.splice(0, -n)); + } + } + return array; +}; + + +/** + * Moves one item of an array to a new position keeping the order of the rest + * of the items. Example use case: keeping a list of JavaScript objects + * synchronized with the corresponding list of DOM elements after one of the + * elements has been dragged to a new position. + * @param {!(Array|Arguments|{length:number})} arr The array to modify. + * @param {number} fromIndex Index of the item to move between 0 and + * {@code arr.length - 1}. + * @param {number} toIndex Target index between 0 and {@code arr.length - 1}. + */ +goog.array.moveItem = function(arr, fromIndex, toIndex) { + goog.asserts.assert(fromIndex >= 0 && fromIndex < arr.length); + goog.asserts.assert(toIndex >= 0 && toIndex < arr.length); + // Remove 1 item at fromIndex. + var removedItems = goog.array.ARRAY_PROTOTYPE_.splice.call(arr, fromIndex, 1); + // Insert the removed item at toIndex. + goog.array.ARRAY_PROTOTYPE_.splice.call(arr, toIndex, 0, removedItems[0]); + // We don't use goog.array.insertAt and goog.array.removeAt, because they're + // significantly slower than splice. +}; + + +/** + * Creates a new array for which the element at position i is an array of the + * ith element of the provided arrays. The returned array will only be as long + * as the shortest array provided; additional values are ignored. For example, + * the result of zipping [1, 2] and [3, 4, 5] is [[1,3], [2, 4]]. + * + * This is similar to the zip() function in Python. See {@link + * http://docs.python.org/library/functions.html#zip} + * + * @param {...!goog.array.ArrayLike} var_args Arrays to be combined. + * @return {!Array.} A new array of arrays created from provided arrays. + */ +goog.array.zip = function(var_args) { + if (!arguments.length) { + return []; + } + var result = []; + for (var i = 0; true; i++) { + var value = []; + for (var j = 0; j < arguments.length; j++) { + var arr = arguments[j]; + // If i is larger than the array length, this is the shortest array. + if (i >= arr.length) { + return result; + } + value.push(arr[i]); + } + result.push(value); + } +}; + + +/** + * Shuffles the values in the specified array using the Fisher-Yates in-place + * shuffle (also known as the Knuth Shuffle). By default, calls Math.random() + * and so resets the state of that random number generator. Similarly, may reset + * the state of the any other specified random number generator. + * + * Runtime: O(n) + * + * @param {!Array} arr The array to be shuffled. + * @param {function():number=} opt_randFn Optional random function to use for + * shuffling. + * Takes no arguments, and returns a random number on the interval [0, 1). + * Defaults to Math.random() using JavaScript's built-in Math library. + */ +goog.array.shuffle = function(arr, opt_randFn) { + var randFn = opt_randFn || Math.random; + + for (var i = arr.length - 1; i > 0; i--) { + // Choose a random array index in [0, i] (inclusive with i). + var j = Math.floor(randFn() * (i + 1)); + + var tmp = arr[i]; + arr[i] = arr[j]; + arr[j] = tmp; + } +}; diff --git a/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/asserts/asserts.js b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/asserts/asserts.js new file mode 100644 index 00000000..c37c8c55 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/asserts/asserts.js @@ -0,0 +1,315 @@ +// Copyright 2008 The Closure Library Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Utilities to check the preconditions, postconditions and + * invariants runtime. + * + * Methods in this package should be given special treatment by the compiler + * for type-inference. For example, goog.asserts.assert(foo) + * will restrict foo to a truthy value. + * + * The compiler has an option to disable asserts. So code like: + * + * var x = goog.asserts.assert(foo()); goog.asserts.assert(bar()); + * + * will be transformed into: + * + * var x = foo(); + * + * The compiler will leave in foo() (because its return value is used), + * but it will remove bar() because it assumes it does not have side-effects. + * + */ + +goog.provide('goog.asserts'); +goog.provide('goog.asserts.AssertionError'); + +goog.require('goog.debug.Error'); +goog.require('goog.dom.NodeType'); +goog.require('goog.string'); + + +/** + * @define {boolean} Whether to strip out asserts or to leave them in. + */ +goog.define('goog.asserts.ENABLE_ASSERTS', goog.DEBUG); + + + +/** + * Error object for failed assertions. + * @param {string} messagePattern The pattern that was used to form message. + * @param {!Array.<*>} messageArgs The items to substitute into the pattern. + * @constructor + * @extends {goog.debug.Error} + * @final + */ +goog.asserts.AssertionError = function(messagePattern, messageArgs) { + messageArgs.unshift(messagePattern); + goog.debug.Error.call(this, goog.string.subs.apply(null, messageArgs)); + // Remove the messagePattern afterwards to avoid permenantly modifying the + // passed in array. + messageArgs.shift(); + + /** + * The message pattern used to format the error message. Error handlers can + * use this to uniquely identify the assertion. + * @type {string} + */ + this.messagePattern = messagePattern; +}; +goog.inherits(goog.asserts.AssertionError, goog.debug.Error); + + +/** @override */ +goog.asserts.AssertionError.prototype.name = 'AssertionError'; + + +/** + * Throws an exception with the given message and "Assertion failed" prefixed + * onto it. + * @param {string} defaultMessage The message to use if givenMessage is empty. + * @param {Array.<*>} defaultArgs The substitution arguments for defaultMessage. + * @param {string|undefined} givenMessage Message supplied by the caller. + * @param {Array.<*>} givenArgs The substitution arguments for givenMessage. + * @throws {goog.asserts.AssertionError} When the value is not a number. + * @private + */ +goog.asserts.doAssertFailure_ = + function(defaultMessage, defaultArgs, givenMessage, givenArgs) { + var message = 'Assertion failed'; + if (givenMessage) { + message += ': ' + givenMessage; + var args = givenArgs; + } else if (defaultMessage) { + message += ': ' + defaultMessage; + args = defaultArgs; + } + // The '' + works around an Opera 10 bug in the unit tests. Without it, + // a stack trace is added to var message above. With this, a stack trace is + // not added until this line (it causes the extra garbage to be added after + // the assertion message instead of in the middle of it). + throw new goog.asserts.AssertionError('' + message, args || []); +}; + + +/** + * Checks if the condition evaluates to true if goog.asserts.ENABLE_ASSERTS is + * true. + * @param {*} condition The condition to check. + * @param {string=} opt_message Error message in case of failure. + * @param {...*} var_args The items to substitute into the failure message. + * @return {*} The value of the condition. + * @throws {goog.asserts.AssertionError} When the condition evaluates to false. + */ +goog.asserts.assert = function(condition, opt_message, var_args) { + if (goog.asserts.ENABLE_ASSERTS && !condition) { + goog.asserts.doAssertFailure_('', null, opt_message, + Array.prototype.slice.call(arguments, 2)); + } + return condition; +}; + + +/** + * Fails if goog.asserts.ENABLE_ASSERTS is true. This function is useful in case + * when we want to add a check in the unreachable area like switch-case + * statement: + * + *
    + *  switch(type) {
    + *    case FOO: doSomething(); break;
    + *    case BAR: doSomethingElse(); break;
    + *    default: goog.assert.fail('Unrecognized type: ' + type);
    + *      // We have only 2 types - "default:" section is unreachable code.
    + *  }
    + * 
    + * + * @param {string=} opt_message Error message in case of failure. + * @param {...*} var_args The items to substitute into the failure message. + * @throws {goog.asserts.AssertionError} Failure. + */ +goog.asserts.fail = function(opt_message, var_args) { + if (goog.asserts.ENABLE_ASSERTS) { + throw new goog.asserts.AssertionError( + 'Failure' + (opt_message ? ': ' + opt_message : ''), + Array.prototype.slice.call(arguments, 1)); + } +}; + + +/** + * Checks if the value is a number if goog.asserts.ENABLE_ASSERTS is true. + * @param {*} value The value to check. + * @param {string=} opt_message Error message in case of failure. + * @param {...*} var_args The items to substitute into the failure message. + * @return {number} The value, guaranteed to be a number when asserts enabled. + * @throws {goog.asserts.AssertionError} When the value is not a number. + */ +goog.asserts.assertNumber = function(value, opt_message, var_args) { + if (goog.asserts.ENABLE_ASSERTS && !goog.isNumber(value)) { + goog.asserts.doAssertFailure_('Expected number but got %s: %s.', + [goog.typeOf(value), value], opt_message, + Array.prototype.slice.call(arguments, 2)); + } + return /** @type {number} */ (value); +}; + + +/** + * Checks if the value is a string if goog.asserts.ENABLE_ASSERTS is true. + * @param {*} value The value to check. + * @param {string=} opt_message Error message in case of failure. + * @param {...*} var_args The items to substitute into the failure message. + * @return {string} The value, guaranteed to be a string when asserts enabled. + * @throws {goog.asserts.AssertionError} When the value is not a string. + */ +goog.asserts.assertString = function(value, opt_message, var_args) { + if (goog.asserts.ENABLE_ASSERTS && !goog.isString(value)) { + goog.asserts.doAssertFailure_('Expected string but got %s: %s.', + [goog.typeOf(value), value], opt_message, + Array.prototype.slice.call(arguments, 2)); + } + return /** @type {string} */ (value); +}; + + +/** + * Checks if the value is a function if goog.asserts.ENABLE_ASSERTS is true. + * @param {*} value The value to check. + * @param {string=} opt_message Error message in case of failure. + * @param {...*} var_args The items to substitute into the failure message. + * @return {!Function} The value, guaranteed to be a function when asserts + * enabled. + * @throws {goog.asserts.AssertionError} When the value is not a function. + */ +goog.asserts.assertFunction = function(value, opt_message, var_args) { + if (goog.asserts.ENABLE_ASSERTS && !goog.isFunction(value)) { + goog.asserts.doAssertFailure_('Expected function but got %s: %s.', + [goog.typeOf(value), value], opt_message, + Array.prototype.slice.call(arguments, 2)); + } + return /** @type {!Function} */ (value); +}; + + +/** + * Checks if the value is an Object if goog.asserts.ENABLE_ASSERTS is true. + * @param {*} value The value to check. + * @param {string=} opt_message Error message in case of failure. + * @param {...*} var_args The items to substitute into the failure message. + * @return {!Object} The value, guaranteed to be a non-null object. + * @throws {goog.asserts.AssertionError} When the value is not an object. + */ +goog.asserts.assertObject = function(value, opt_message, var_args) { + if (goog.asserts.ENABLE_ASSERTS && !goog.isObject(value)) { + goog.asserts.doAssertFailure_('Expected object but got %s: %s.', + [goog.typeOf(value), value], + opt_message, Array.prototype.slice.call(arguments, 2)); + } + return /** @type {!Object} */ (value); +}; + + +/** + * Checks if the value is an Array if goog.asserts.ENABLE_ASSERTS is true. + * @param {*} value The value to check. + * @param {string=} opt_message Error message in case of failure. + * @param {...*} var_args The items to substitute into the failure message. + * @return {!Array} The value, guaranteed to be a non-null array. + * @throws {goog.asserts.AssertionError} When the value is not an array. + */ +goog.asserts.assertArray = function(value, opt_message, var_args) { + if (goog.asserts.ENABLE_ASSERTS && !goog.isArray(value)) { + goog.asserts.doAssertFailure_('Expected array but got %s: %s.', + [goog.typeOf(value), value], opt_message, + Array.prototype.slice.call(arguments, 2)); + } + return /** @type {!Array} */ (value); +}; + + +/** + * Checks if the value is a boolean if goog.asserts.ENABLE_ASSERTS is true. + * @param {*} value The value to check. + * @param {string=} opt_message Error message in case of failure. + * @param {...*} var_args The items to substitute into the failure message. + * @return {boolean} The value, guaranteed to be a boolean when asserts are + * enabled. + * @throws {goog.asserts.AssertionError} When the value is not a boolean. + */ +goog.asserts.assertBoolean = function(value, opt_message, var_args) { + if (goog.asserts.ENABLE_ASSERTS && !goog.isBoolean(value)) { + goog.asserts.doAssertFailure_('Expected boolean but got %s: %s.', + [goog.typeOf(value), value], opt_message, + Array.prototype.slice.call(arguments, 2)); + } + return /** @type {boolean} */ (value); +}; + + +/** + * Checks if the value is a DOM Element if goog.asserts.ENABLE_ASSERTS is true. + * @param {*} value The value to check. + * @param {string=} opt_message Error message in case of failure. + * @param {...*} var_args The items to substitute into the failure message. + * @return {!Element} The value, likely to be a DOM Element when asserts are + * enabled. + * @throws {goog.asserts.AssertionError} When the value is not a boolean. + */ +goog.asserts.assertElement = function(value, opt_message, var_args) { + if (goog.asserts.ENABLE_ASSERTS && (!goog.isObject(value) || + value.nodeType != goog.dom.NodeType.ELEMENT)) { + goog.asserts.doAssertFailure_('Expected Element but got %s: %s.', + [goog.typeOf(value), value], opt_message, + Array.prototype.slice.call(arguments, 2)); + } + return /** @type {!Element} */ (value); +}; + + +/** + * Checks if the value is an instance of the user-defined type if + * goog.asserts.ENABLE_ASSERTS is true. + * + * The compiler may tighten the type returned by this function. + * + * @param {*} value The value to check. + * @param {function(new: T, ...)} type A user-defined constructor. + * @param {string=} opt_message Error message in case of failure. + * @param {...*} var_args The items to substitute into the failure message. + * @throws {goog.asserts.AssertionError} When the value is not an instance of + * type. + * @return {!T} + * @template T + */ +goog.asserts.assertInstanceof = function(value, type, opt_message, var_args) { + if (goog.asserts.ENABLE_ASSERTS && !(value instanceof type)) { + goog.asserts.doAssertFailure_('instanceof check failed.', null, + opt_message, Array.prototype.slice.call(arguments, 3)); + } + return value; +}; + + +/** + * Checks that no enumerable keys are present in Object.prototype. Such keys + * would break most code that use {@code for (var ... in ...)} loops. + */ +goog.asserts.assertObjectPrototypeIsIntact = function() { + for (var key in Object.prototype) { + goog.asserts.fail(key + ' should not be enumerable in Object.prototype.'); + } +}; diff --git a/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/async/nexttick.js b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/async/nexttick.js new file mode 100644 index 00000000..47b2c442 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/async/nexttick.js @@ -0,0 +1,176 @@ +// Copyright 2013 The Closure Library Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Provides a function to schedule running a function as soon + * as possible after the current JS execution stops and yields to the event + * loop. + * + */ + +goog.provide('goog.async.nextTick'); + +goog.require('goog.debug.entryPointRegistry'); +goog.require('goog.functions'); + + +/** + * Fires the provided callbacks as soon as possible after the current JS + * execution context. setTimeout(…, 0) always takes at least 5ms for legacy + * reasons. + * @param {function(this:SCOPE)} callback Callback function to fire as soon as + * possible. + * @param {SCOPE=} opt_context Object in whose scope to call the listener. + * @template SCOPE + */ +goog.async.nextTick = function(callback, opt_context) { + var cb = callback; + if (opt_context) { + cb = goog.bind(callback, opt_context); + } + cb = goog.async.nextTick.wrapCallback_(cb); + // Introduced and currently only supported by IE10. + if (goog.isFunction(goog.global.setImmediate)) { + goog.global.setImmediate(cb); + return; + } + // Look for and cache the custom fallback version of setImmediate. + if (!goog.async.nextTick.setImmediate_) { + goog.async.nextTick.setImmediate_ = + goog.async.nextTick.getSetImmediateEmulator_(); + } + goog.async.nextTick.setImmediate_(cb); +}; + + +/** + * Cache for the setImmediate implementation. + * @type {function(function())} + * @private + */ +goog.async.nextTick.setImmediate_; + + +/** + * Determines the best possible implementation to run a function as soon as + * the JS event loop is idle. + * @return {function(function())} The "setImmediate" implementation. + * @private + */ +goog.async.nextTick.getSetImmediateEmulator_ = function() { + // Create a private message channel and use it to postMessage empty messages + // to ourselves. + var Channel = goog.global['MessageChannel']; + // If MessageChannel is not available and we are in a browser, implement + // an iframe based polyfill in browsers that have postMessage and + // document.addEventListener. The latter excludes IE8 because it has a + // synchronous postMessage implementation. + if (typeof Channel === 'undefined' && typeof window !== 'undefined' && + window.postMessage && window.addEventListener) { + /** @constructor */ + Channel = function() { + // Make an empty, invisible iframe. + var iframe = document.createElement('iframe'); + iframe.style.display = 'none'; + iframe.src = ''; + document.documentElement.appendChild(iframe); + var win = iframe.contentWindow; + var doc = win.document; + doc.open(); + doc.write(''); + doc.close(); + var message = 'callImmediate' + Math.random(); + var origin = win.location.protocol + '//' + win.location.host; + var onmessage = goog.bind(function(e) { + // Validate origin and message to make sure that this message was + // intended for us. + if (e.origin != origin && e.data != message) { + return; + } + this['port1'].onmessage(); + }, this); + win.addEventListener('message', onmessage, false); + this['port1'] = {}; + this['port2'] = { + postMessage: function() { + win.postMessage(message, origin); + } + }; + }; + } + if (typeof Channel !== 'undefined') { + var channel = new Channel(); + // Use a fifo linked list to call callbacks in the right order. + var head = {}; + var tail = head; + channel['port1'].onmessage = function() { + head = head.next; + var cb = head.cb; + head.cb = null; + cb(); + }; + return function(cb) { + tail.next = { + cb: cb + }; + tail = tail.next; + channel['port2'].postMessage(0); + }; + } + // Implementation for IE6-8: Script elements fire an asynchronous + // onreadystatechange event when inserted into the DOM. + if (typeof document !== 'undefined' && 'onreadystatechange' in + document.createElement('script')) { + return function(cb) { + var script = document.createElement('script'); + script.onreadystatechange = function() { + // Clean up and call the callback. + script.onreadystatechange = null; + script.parentNode.removeChild(script); + script = null; + cb(); + cb = null; + }; + document.documentElement.appendChild(script); + }; + } + // Fall back to setTimeout with 0. In browsers this creates a delay of 5ms + // or more. + return function(cb) { + goog.global.setTimeout(cb, 0); + }; +}; + + +/** + * Helper function that is overrided to protect callbacks with entry point + * monitor if the application monitors entry points. + * @param {function()} callback Callback function to fire as soon as possible. + * @return {function()} The wrapped callback. + * @private + */ +goog.async.nextTick.wrapCallback_ = goog.functions.identity; + + +// Register the callback function as an entry point, so that it can be +// monitored for exception handling, etc. This has to be done in this file +// since it requires special code to handle all browsers. +goog.debug.entryPointRegistry.register( + /** + * @param {function(!Function): !Function} transformer The transforming + * function. + */ + function(transformer) { + goog.async.nextTick.wrapCallback_ = transformer; + }); diff --git a/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/async/run.js b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/async/run.js new file mode 100644 index 00000000..d152c3ef --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/async/run.js @@ -0,0 +1,118 @@ +// Copyright 2013 The Closure Library Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +goog.provide('goog.async.run'); +goog.provide('goog.async.throwException'); + +goog.require('goog.async.nextTick'); +goog.require('goog.testing.watchers'); + + +/** + * Throw an item without interrupting the current execution context. For + * example, if processing a group of items in a loop, sometimes it is useful + * to report an error while still allowing the rest of the batch to be + * processed. + * @param {*} exception + */ +goog.async.throwException = function(exception) { + // Each throw needs to be in its own context. + goog.async.nextTick(function() { throw exception; }); +}; + + +/** + * Fires the provided callback just before the current callstack unwinds, or as + * soon as possible after the current JS execution context. + * @param {function(this:THIS)} callback + * @param {THIS=} opt_context Object to use as the "this value" when calling + * the provided function. + * @template THIS + */ +goog.async.run = function(callback, opt_context) { + if (!goog.async.run.workQueueScheduled_) { + // Nothing is currently scheduled, schedule it now. + goog.async.nextTick(goog.async.run.processWorkQueue); + goog.async.run.workQueueScheduled_ = true; + } + + goog.async.run.workQueue_.push( + new goog.async.run.WorkItem_(callback, opt_context)); +}; + + +/** @private {boolean} */ +goog.async.run.workQueueScheduled_ = false; + + +/** @private {!Array.} */ +goog.async.run.workQueue_ = []; + + +if (goog.DEBUG) { + /** + * Reset the event queue. + * @private + */ + goog.async.run.resetQueue_ = function() { + goog.async.run.workQueueScheduled_ = false; + goog.async.run.workQueue_ = []; + }; + + // If there is a clock implemenation in use for testing + // and it is reset, reset the queue. + goog.testing.watchers.watchClockReset(goog.async.run.resetQueue_); +} + + +/** + * Run any pending goog.async.run work items. This function is not intended + * for general use, but for use by entry point handlers to run items ahead of + * goog.async.nextTick. + */ +goog.async.run.processWorkQueue = function() { + // NOTE: additional work queue items may be pushed while processing. + while (goog.async.run.workQueue_.length) { + // Don't let the work queue grow indefinitely. + var workItems = goog.async.run.workQueue_; + goog.async.run.workQueue_ = []; + for (var i = 0; i < workItems.length; i++) { + var workItem = workItems[i]; + try { + workItem.fn.call(workItem.scope); + } catch (e) { + goog.async.throwException(e); + } + } + } + + // There are no more work items, reset the work queue. + goog.async.run.workQueueScheduled_ = false; +}; + + + +/** + * @constructor + * @final + * @struct + * @private + * + * @param {function()} fn + * @param {Object|null|undefined} scope + */ +goog.async.run.WorkItem_ = function(fn, scope) { + /** @const */ this.fn = fn; + /** @const */ this.scope = scope; +}; diff --git a/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/async/throttle.js b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/async/throttle.js new file mode 100644 index 00000000..346a5b68 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/async/throttle.js @@ -0,0 +1,191 @@ +// Copyright 2007 The Closure Library Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Definition of the goog.async.Throttle class. + * + * @see ../demos/timers.html + */ + +goog.provide('goog.Throttle'); +goog.provide('goog.async.Throttle'); + +goog.require('goog.Disposable'); +goog.require('goog.Timer'); + + + +/** + * Throttle will perform an action that is passed in no more than once + * per interval (specified in milliseconds). If it gets multiple signals + * to perform the action while it is waiting, it will only perform the action + * once at the end of the interval. + * @param {Function} listener Function to callback when the action is triggered. + * @param {number} interval Interval over which to throttle. The handler can + * only be called once per interval. + * @param {Object=} opt_handler Object in whose scope to call the listener. + * @constructor + * @extends {goog.Disposable} + * @final + */ +goog.async.Throttle = function(listener, interval, opt_handler) { + goog.Disposable.call(this); + + /** + * Function to callback + * @type {Function} + * @private + */ + this.listener_ = listener; + + /** + * Interval for the throttle time + * @type {number} + * @private + */ + this.interval_ = interval; + + /** + * "this" context for the listener + * @type {Object|undefined} + * @private + */ + this.handler_ = opt_handler; + + /** + * Cached callback function invoked after the throttle timeout completes + * @type {Function} + * @private + */ + this.callback_ = goog.bind(this.onTimer_, this); +}; +goog.inherits(goog.async.Throttle, goog.Disposable); + + + +/** + * A deprecated alias. + * @deprecated Use goog.async.Throttle instead. + * @constructor + * @final + */ +goog.Throttle = goog.async.Throttle; + + +/** + * Indicates that the action is pending and needs to be fired. + * @type {boolean} + * @private + */ +goog.async.Throttle.prototype.shouldFire_ = false; + + +/** + * Indicates the count of nested pauses currently in effect on the throttle. + * When this count is not zero, fired actions will be postponed until the + * throttle is resumed enough times to drop the pause count to zero. + * @type {number} + * @private + */ +goog.async.Throttle.prototype.pauseCount_ = 0; + + +/** + * Timer for scheduling the next callback + * @type {?number} + * @private + */ +goog.async.Throttle.prototype.timer_ = null; + + +/** + * Notifies the throttle that the action has happened. It will throttle the call + * so that the callback is not called too often according to the interval + * parameter passed to the constructor. + */ +goog.async.Throttle.prototype.fire = function() { + if (!this.timer_ && !this.pauseCount_) { + this.doAction_(); + } else { + this.shouldFire_ = true; + } +}; + + +/** + * Cancels any pending action callback. The throttle can be restarted by + * calling {@link #fire}. + */ +goog.async.Throttle.prototype.stop = function() { + if (this.timer_) { + goog.Timer.clear(this.timer_); + this.timer_ = null; + this.shouldFire_ = false; + } +}; + + +/** + * Pauses the throttle. All pending and future action callbacks will be + * delayed until the throttle is resumed. Pauses can be nested. + */ +goog.async.Throttle.prototype.pause = function() { + this.pauseCount_++; +}; + + +/** + * Resumes the throttle. If doing so drops the pausing count to zero, pending + * action callbacks will be executed as soon as possible, but still no sooner + * than an interval's delay after the previous call. Future action callbacks + * will be executed as normal. + */ +goog.async.Throttle.prototype.resume = function() { + this.pauseCount_--; + if (!this.pauseCount_ && this.shouldFire_ && !this.timer_) { + this.shouldFire_ = false; + this.doAction_(); + } +}; + + +/** @override */ +goog.async.Throttle.prototype.disposeInternal = function() { + goog.async.Throttle.superClass_.disposeInternal.call(this); + this.stop(); +}; + + +/** + * Handler for the timer to fire the throttle + * @private + */ +goog.async.Throttle.prototype.onTimer_ = function() { + this.timer_ = null; + + if (this.shouldFire_ && !this.pauseCount_) { + this.shouldFire_ = false; + this.doAction_(); + } +}; + + +/** + * Calls the callback + * @private + */ +goog.async.Throttle.prototype.doAction_ = function() { + this.timer_ = goog.Timer.callOnce(this.callback_, this.interval_); + this.listener_.call(this.handler_); +}; diff --git a/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/base.js b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/base.js new file mode 100644 index 00000000..66674e65 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/closure/lib/closure/goog/base.js @@ -0,0 +1,1631 @@ +// Copyright 2006 The Closure Library Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Bootstrap for the Google JS Library (Closure). + * + * In uncompiled mode base.js will write out Closure's deps file, unless the + * global CLOSURE_NO_DEPS is set to true. This allows projects to + * include their own deps file(s) from different locations. + * + * + * @provideGoog + */ + + +/** + * @define {boolean} Overridden to true by the compiler when --closure_pass + * or --mark_as_compiled is specified. + */ +var COMPILED = false; + + +/** + * Base namespace for the Closure library. Checks to see goog is already + * defined in the current scope before assigning to prevent clobbering if + * base.js is loaded more than once. + * + * @const + */ +var goog = goog || {}; + + +/** + * Reference to the global context. In most cases this will be 'window'. + */ +goog.global = this; + + +/** + * A hook for overriding the define values in uncompiled mode. + * + * In uncompiled mode, {@code CLOSURE_DEFINES} may be defined before loading + * base.js. If a key is defined in {@code CLOSURE_DEFINES}, {@code goog.define} + * will use the value instead of the default value. This allows flags to be + * overwritten without compilation (this is normally accomplished with the + * compiler's "define" flag). + * + * Example: + *
    + *   var CLOSURE_DEFINES = {'goog.DEBUG': false};
    + * 
    + * + * @type {Object.|undefined} + */ +goog.global.CLOSURE_DEFINES; + + +/** + * Builds an object structure for the provided namespace path, ensuring that + * names that already exist are not overwritten. For example: + * "a.b.c" -> a = {};a.b={};a.b.c={}; + * Used by goog.provide and goog.exportSymbol. + * @param {string} name name of the object that this file defines. + * @param {*=} opt_object the object to expose at the end of the path. + * @param {Object=} opt_objectToExportTo The object to add the path to; default + * is |goog.global|. + * @private + */ +goog.exportPath_ = function(name, opt_object, opt_objectToExportTo) { + var parts = name.split('.'); + var cur = opt_objectToExportTo || goog.global; + + // Internet Explorer exhibits strange behavior when throwing errors from + // methods externed in this manner. See the testExportSymbolExceptions in + // base_test.html for an example. + if (!(parts[0] in cur) && cur.execScript) { + cur.execScript('var ' + parts[0]); + } + + // Certain browsers cannot parse code in the form for((a in b); c;); + // This pattern is produced by the JSCompiler when it collapses the + // statement above into the conditional loop below. To prevent this from + // happening, use a for-loop and reserve the init logic as below. + + // Parentheses added to eliminate strict JS warning in Firefox. + for (var part; parts.length && (part = parts.shift());) { + if (!parts.length && opt_object !== undefined) { + // last part and we have an object; use it + cur[part] = opt_object; + } else if (cur[part]) { + cur = cur[part]; + } else { + cur = cur[part] = {}; + } + } +}; + + +/** + * Defines a named value. In uncompiled mode, the value is retreived from + * CLOSURE_DEFINES if the object is defined and has the property specified, + * and otherwise used the defined defaultValue. When compiled, the default + * can be overridden using compiler command-line options. + * + * @param {string} name The distinguished name to provide. + * @param {string|number|boolean} defaultValue + */ +goog.define = function(name, defaultValue) { + var value = defaultValue; + if (!COMPILED) { + if (goog.global.CLOSURE_DEFINES && Object.prototype.hasOwnProperty.call( + goog.global.CLOSURE_DEFINES, name)) { + value = goog.global.CLOSURE_DEFINES[name]; + } + } + goog.exportPath_(name, value); +}; + + +/** + * @define {boolean} DEBUG is provided as a convenience so that debugging code + * that should not be included in a production js_binary can be easily stripped + * by specifying --define goog.DEBUG=false to the JSCompiler. For example, most + * toString() methods should be declared inside an "if (goog.DEBUG)" conditional + * because they are generally used for debugging purposes and it is difficult + * for the JSCompiler to statically determine whether they are used. + */ +goog.DEBUG = true; + + +/** + * @define {string} LOCALE defines the locale being used for compilation. It is + * used to select locale specific data to be compiled in js binary. BUILD rule + * can specify this value by "--define goog.LOCALE=" as JSCompiler + * option. + * + * Take into account that the locale code format is important. You should use + * the canonical Unicode format with hyphen as a delimiter. Language must be + * lowercase, Language Script - Capitalized, Region - UPPERCASE. + * There are few examples: pt-BR, en, en-US, sr-Latin-BO, zh-Hans-CN. + * + * See more info about locale codes here: + * http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers + * + * For language codes you should use values defined by ISO 693-1. See it here + * http://www.w3.org/WAI/ER/IG/ert/iso639.htm. There is only one exception from + * this rule: the Hebrew language. For legacy reasons the old code (iw) should + * be used instead of the new code (he), see http://wiki/Main/IIISynonyms. + */ +goog.define('goog.LOCALE', 'en'); // default to en + + +/** + * @define {boolean} Whether this code is running on trusted sites. + * + * On untrusted sites, several native functions can be defined or overridden by + * external libraries like Prototype, Datejs, and JQuery and setting this flag + * to false forces closure to use its own implementations when possible. + * + * If your JavaScript can be loaded by a third party site and you are wary about + * relying on non-standard implementations, specify + * "--define goog.TRUSTED_SITE=false" to the JSCompiler. + */ +goog.define('goog.TRUSTED_SITE', true); + + +/** + * Creates object stubs for a namespace. The presence of one or more + * goog.provide() calls indicate that the file defines the given + * objects/namespaces. Build tools also scan for provide/require statements + * to discern dependencies, build dependency files (see deps.js), etc. + * @see goog.require + * @param {string} name Namespace provided by this file in the form + * "goog.package.part". + */ +goog.provide = function(name) { + if (!COMPILED) { + // Ensure that the same namespace isn't provided twice. This is intended + // to teach new developers that 'goog.provide' is effectively a variable + // declaration. And when JSCompiler transforms goog.provide into a real + // variable declaration, the compiled JS should work the same as the raw + // JS--even when the raw JS uses goog.provide incorrectly. + if (goog.isProvided_(name)) { + throw Error('Namespace "' + name + '" already declared.'); + } + delete goog.implicitNamespaces_[name]; + + var namespace = name; + while ((namespace = namespace.substring(0, namespace.lastIndexOf('.')))) { + if (goog.getObjectByName(namespace)) { + break; + } + goog.implicitNamespaces_[namespace] = true; + } + } + + goog.exportPath_(name); +}; + + +/** + * Marks that the current file should only be used for testing, and never for + * live code in production. + * + * In the case of unit tests, the message may optionally be an exact namespace + * for the test (e.g. 'goog.stringTest'). The linter will then ignore the extra + * provide (if not explicitly defined in the code). + * + * @param {string=} opt_message Optional message to add to the error that's + * raised when used in production code. + */ +goog.setTestOnly = function(opt_message) { + if (COMPILED && !goog.DEBUG) { + opt_message = opt_message || ''; + throw Error('Importing test-only code into non-debug environment' + + opt_message ? ': ' + opt_message : '.'); + } +}; + + +if (!COMPILED) { + + /** + * Check if the given name has been goog.provided. This will return false for + * names that are available only as implicit namespaces. + * @param {string} name name of the object to look for. + * @return {boolean} Whether the name has been provided. + * @private + */ + goog.isProvided_ = function(name) { + return !goog.implicitNamespaces_[name] && !!goog.getObjectByName(name); + }; + + /** + * Namespaces implicitly defined by goog.provide. For example, + * goog.provide('goog.events.Event') implicitly declares that 'goog' and + * 'goog.events' must be namespaces. + * + * @type {Object} + * @private + */ + goog.implicitNamespaces_ = {}; +} + + +/** + * Returns an object based on its fully qualified external name. If you are + * using a compilation pass that renames property names beware that using this + * function will not find renamed properties. + * + * @param {string} name The fully qualified name. + * @param {Object=} opt_obj The object within which to look; default is + * |goog.global|. + * @return {?} The value (object or primitive) or, if not found, null. + */ +goog.getObjectByName = function(name, opt_obj) { + var parts = name.split('.'); + var cur = opt_obj || goog.global; + for (var part; part = parts.shift(); ) { + if (goog.isDefAndNotNull(cur[part])) { + cur = cur[part]; + } else { + return null; + } + } + return cur; +}; + + +/** + * Globalizes a whole namespace, such as goog or goog.lang. + * + * @param {Object} obj The namespace to globalize. + * @param {Object=} opt_global The object to add the properties to. + * @deprecated Properties may be explicitly exported to the global scope, but + * this should no longer be done in bulk. + */ +goog.globalize = function(obj, opt_global) { + var global = opt_global || goog.global; + for (var x in obj) { + global[x] = obj[x]; + } +}; + + +/** + * Adds a dependency from a file to the files it requires. + * @param {string} relPath The path to the js file. + * @param {Array} provides An array of strings with the names of the objects + * this file provides. + * @param {Array} requires An array of strings with the names of the objects + * this file requires. + */ +goog.addDependency = function(relPath, provides, requires) { + if (goog.DEPENDENCIES_ENABLED) { + var provide, require; + var path = relPath.replace(/\\/g, '/'); + var deps = goog.dependencies_; + for (var i = 0; provide = provides[i]; i++) { + deps.nameToPath[provide] = path; + if (!(path in deps.pathToNames)) { + deps.pathToNames[path] = {}; + } + deps.pathToNames[path][provide] = true; + } + for (var j = 0; require = requires[j]; j++) { + if (!(path in deps.requires)) { + deps.requires[path] = {}; + } + deps.requires[path][require] = true; + } + } +}; + + + + +// NOTE(nnaze): The debug DOM loader was included in base.js as an original way +// to do "debug-mode" development. The dependency system can sometimes be +// confusing, as can the debug DOM loader's asynchronous nature. +// +// With the DOM loader, a call to goog.require() is not blocking -- the script +// will not load until some point after the current script. If a namespace is +// needed at runtime, it needs to be defined in a previous script, or loaded via +// require() with its registered dependencies. +// User-defined namespaces may need their own deps file. See http://go/js_deps, +// http://go/genjsdeps, or, externally, DepsWriter. +// http://code.google.com/closure/library/docs/depswriter.html +// +// Because of legacy clients, the DOM loader can't be easily removed from +// base.js. Work is being done to make it disableable or replaceable for +// different environments (DOM-less JavaScript interpreters like Rhino or V8, +// for example). See bootstrap/ for more information. + + +/** + * @define {boolean} Whether to enable the debug loader. + * + * If enabled, a call to goog.require() will attempt to load the namespace by + * appending a script tag to the DOM (if the namespace has been registered). + * + * If disabled, goog.require() will simply assert that the namespace has been + * provided (and depend on the fact that some outside tool correctly ordered + * the script). + */ +goog.define('goog.ENABLE_DEBUG_LOADER', true); + + +/** + * Implements a system for the dynamic resolution of dependencies that works in + * parallel with the BUILD system. Note that all calls to goog.require will be + * stripped by the JSCompiler when the --closure_pass option is used. + * @see goog.provide + * @param {string} name Namespace to include (as was given in goog.provide()) in + * the form "goog.package.part". + */ +goog.require = function(name) { + + // If the object already exists we do not need do do anything. + // TODO(arv): If we start to support require based on file name this has to + // change. + // TODO(arv): If we allow goog.foo.* this has to change. + // TODO(arv): If we implement dynamic load after page load we should probably + // not remove this code for the compiled output. + if (!COMPILED) { + if (goog.isProvided_(name)) { + return; + } + + if (goog.ENABLE_DEBUG_LOADER) { + var path = goog.getPathFromDeps_(name); + if (path) { + goog.included_[path] = true; + goog.writeScripts_(); + return; + } + } + + var errorMessage = 'goog.require could not find: ' + name; + if (goog.global.console) { + goog.global.console['error'](errorMessage); + } + + + throw Error(errorMessage); + + } +}; + + +/** + * Path for included scripts. + * @type {string} + */ +goog.basePath = ''; + + +/** + * A hook for overriding the base path. + * @type {string|undefined} + */ +goog.global.CLOSURE_BASE_PATH; + + +/** + * Whether to write out Closure's deps file. By default, the deps are written. + * @type {boolean|undefined} + */ +goog.global.CLOSURE_NO_DEPS; + + +/** + * A function to import a single script. This is meant to be overridden when + * Closure is being run in non-HTML contexts, such as web workers. It's defined + * in the global scope so that it can be set before base.js is loaded, which + * allows deps.js to be imported properly. + * + * The function is passed the script source, which is a relative URI. It should + * return true if the script was imported, false otherwise. + */ +goog.global.CLOSURE_IMPORT_SCRIPT; + + +/** + * Null function used for default values of callbacks, etc. + * @return {void} Nothing. + */ +goog.nullFunction = function() {}; + + +/** + * The identity function. Returns its first argument. + * + * @param {*=} opt_returnValue The single value that will be returned. + * @param {...*} var_args Optional trailing arguments. These are ignored. + * @return {?} The first argument. We can't know the type -- just pass it along + * without type. + * @deprecated Use goog.functions.identity instead. + */ +goog.identityFunction = function(opt_returnValue, var_args) { + return opt_returnValue; +}; + + +/** + * When defining a class Foo with an abstract method bar(), you can do: + * Foo.prototype.bar = goog.abstractMethod + * + * Now if a subclass of Foo fails to override bar(), an error will be thrown + * when bar() is invoked. + * + * Note: This does not take the name of the function to override as an argument + * because that would make it more difficult to obfuscate our JavaScript code. + * + * @type {!Function} + * @throws {Error} when invoked to indicate the method should be overridden. + */ +goog.abstractMethod = function() { + throw Error('unimplemented abstract method'); +}; + + +/** + * Adds a {@code getInstance} static method that always returns the same + * instance object. + * @param {!Function} ctor The constructor for the class to add the static + * method to. + */ +goog.addSingletonGetter = function(ctor) { + ctor.getInstance = function() { + if (ctor.instance_) { + return ctor.instance_; + } + if (goog.DEBUG) { + // NOTE: JSCompiler can't optimize away Array#push. + goog.instantiatedSingletons_[goog.instantiatedSingletons_.length] = ctor; + } + return ctor.instance_ = new ctor; + }; +}; + + +/** + * All singleton classes that have been instantiated, for testing. Don't read + * it directly, use the {@code goog.testing.singleton} module. The compiler + * removes this variable if unused. + * @type {!Array.} + * @private + */ +goog.instantiatedSingletons_ = []; + + +/** + * True if goog.dependencies_ is available. + * @const {boolean} + */ +goog.DEPENDENCIES_ENABLED = !COMPILED && goog.ENABLE_DEBUG_LOADER; + + +if (goog.DEPENDENCIES_ENABLED) { + /** + * Object used to keep track of urls that have already been added. This record + * allows the prevention of circular dependencies. + * @type {Object} + * @private + */ + goog.included_ = {}; + + + /** + * This object is used to keep track of dependencies and other data that is + * used for loading scripts. + * @private + * @type {Object} + */ + goog.dependencies_ = { + pathToNames: {}, // 1 to many + nameToPath: {}, // 1 to 1 + requires: {}, // 1 to many + // Used when resolving dependencies to prevent us from visiting file twice. + visited: {}, + written: {} // Used to keep track of script files we have written. + }; + + + /** + * Tries to detect whether is in the context of an HTML document. + * @return {boolean} True if it looks like HTML document. + * @private + */ + goog.inHtmlDocument_ = function() { + var doc = goog.global.document; + return typeof doc != 'undefined' && + 'write' in doc; // XULDocument misses write. + }; + + + /** + * Tries to detect the base path of base.js script that bootstraps Closure. + * @private + */ + goog.findBasePath_ = function() { + if (goog.global.CLOSURE_BASE_PATH) { + goog.basePath = goog.global.CLOSURE_BASE_PATH; + return; + } else if (!goog.inHtmlDocument_()) { + return; + } + var doc = goog.global.document; + var scripts = doc.getElementsByTagName('script'); + // Search backwards since the current script is in almost all cases the one + // that has base.js. + for (var i = scripts.length - 1; i >= 0; --i) { + var src = scripts[i].src; + var qmark = src.lastIndexOf('?'); + var l = qmark == -1 ? src.length : qmark; + if (src.substr(l - 7, 7) == 'base.js') { + goog.basePath = src.substr(0, l - 7); + return; + } + } + }; + + + /** + * Imports a script if, and only if, that script hasn't already been imported. + * (Must be called at execution time) + * @param {string} src Script source. + * @private + */ + goog.importScript_ = function(src) { + var importScript = goog.global.CLOSURE_IMPORT_SCRIPT || + goog.writeScriptTag_; + if (!goog.dependencies_.written[src] && importScript(src)) { + goog.dependencies_.written[src] = true; + } + }; + + + /** + * The default implementation of the import function. Writes a script tag to + * import the script. + * + * @param {string} src The script source. + * @return {boolean} True if the script was imported, false otherwise. + * @private + */ + goog.writeScriptTag_ = function(src) { + if (goog.inHtmlDocument_()) { + var doc = goog.global.document; + + // If the user tries to require a new symbol after document load, + // something has gone terribly wrong. Doing a document.write would + // wipe out the page. + if (doc.readyState == 'complete') { + // Certain test frameworks load base.js multiple times, which tries + // to write deps.js each time. If that happens, just fail silently. + // These frameworks wipe the page between each load of base.js, so this + // is OK. + var isDeps = /\bdeps.js$/.test(src); + if (isDeps) { + return false; + } else { + throw Error('Cannot write "' + src + '" after document load'); + } + } + + doc.write( + '" that closes the next token. If + // non-empty, the subsequent call to Next will return a raw or RCDATA text + // token: one that treats "

    " as text instead of an element. + // rawTag's contents are lower-cased. + rawTag string + // textIsRaw is whether the current text token's data is not escaped. + textIsRaw bool + // convertNUL is whether NUL bytes in the current token's data should + // be converted into \ufffd replacement characters. + convertNUL bool + // allowCDATA is whether CDATA sections are allowed in the current context. + allowCDATA bool +} + +// AllowCDATA sets whether or not the tokenizer recognizes as +// the text "foo". The default value is false, which means to recognize it as +// a bogus comment "" instead. +// +// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and +// only if tokenizing foreign content, such as MathML and SVG. However, +// tracking foreign-contentness is difficult to do purely in the tokenizer, +// as opposed to the parser, due to HTML integration points: an element +// can contain a that is foreign-to-SVG but not foreign-to- +// HTML. For strict compliance with the HTML5 tokenization algorithm, it is the +// responsibility of the user of a tokenizer to call AllowCDATA as appropriate. +// In practice, if using the tokenizer without caring whether MathML or SVG +// CDATA is text or comments, such as tokenizing HTML to find all the anchor +// text, it is acceptable to ignore this responsibility. +func (z *Tokenizer) AllowCDATA(allowCDATA bool) { + z.allowCDATA = allowCDATA +} + +// NextIsNotRawText instructs the tokenizer that the next token should not be +// considered as 'raw text'. Some elements, such as script and title elements, +// normally require the next token after the opening tag to be 'raw text' that +// has no child elements. For example, tokenizing "a<b>c</b>d" +// yields a start tag token for "", a text token for "a<b>c</b>d", and +// an end tag token for "". There are no distinct start tag or end tag +// tokens for the "" and "". +// +// This tokenizer implementation will generally look for raw text at the right +// times. Strictly speaking, an HTML5 compliant tokenizer should not look for +// raw text if in foreign content: generally needs raw text, but a +// <title> inside an <svg> does not. Another example is that a <textarea> +// generally needs raw text, but a <textarea> is not allowed as an immediate +// child of a <select>; in normal parsing, a <textarea> implies </select>, but +// one cannot close the implicit element when parsing a <select>'s InnerHTML. +// Similarly to AllowCDATA, tracking the correct moment to override raw-text- +// ness is difficult to do purely in the tokenizer, as opposed to the parser. +// For strict compliance with the HTML5 tokenization algorithm, it is the +// responsibility of the user of a tokenizer to call NextIsNotRawText as +// appropriate. In practice, like AllowCDATA, it is acceptable to ignore this +// responsibility for basic usage. +// +// Note that this 'raw text' concept is different from the one offered by the +// Tokenizer.Raw method. +func (z *Tokenizer) NextIsNotRawText() { + z.rawTag = "" +} + +// Err returns the error associated with the most recent ErrorToken token. +// This is typically io.EOF, meaning the end of tokenization. +func (z *Tokenizer) Err() error { + if z.tt != ErrorToken { + return nil + } + return z.err +} + +// readByte returns the next byte from the input stream, doing a buffered read +// from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte +// slice that holds all the bytes read so far for the current token. +// It sets z.err if the underlying reader returns an error. +// Pre-condition: z.err == nil. +func (z *Tokenizer) readByte() byte { + if z.raw.end >= len(z.buf) { + // Our buffer is exhausted and we have to read from z.r. Check if the + // previous read resulted in an error. + if z.readErr != nil { + z.err = z.readErr + return 0 + } + // We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length + // z.raw.end - z.raw.start is more than half the capacity of z.buf, then we + // allocate a new buffer before the copy. + c := cap(z.buf) + d := z.raw.end - z.raw.start + var buf1 []byte + if 2*d > c { + buf1 = make([]byte, d, 2*c) + } else { + buf1 = z.buf[:d] + } + copy(buf1, z.buf[z.raw.start:z.raw.end]) + if x := z.raw.start; x != 0 { + // Adjust the data/attr spans to refer to the same contents after the copy. + z.data.start -= x + z.data.end -= x + z.pendingAttr[0].start -= x + z.pendingAttr[0].end -= x + z.pendingAttr[1].start -= x + z.pendingAttr[1].end -= x + for i := range z.attr { + z.attr[i][0].start -= x + z.attr[i][0].end -= x + z.attr[i][1].start -= x + z.attr[i][1].end -= x + } + } + z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d] + // Now that we have copied the live bytes to the start of the buffer, + // we read from z.r into the remainder. + var n int + n, z.readErr = readAtLeastOneByte(z.r, buf1[d:cap(buf1)]) + if n == 0 { + z.err = z.readErr + return 0 + } + z.buf = buf1[:d+n] + } + x := z.buf[z.raw.end] + z.raw.end++ + if z.maxBuf > 0 && z.raw.end-z.raw.start >= z.maxBuf { + z.err = ErrBufferExceeded + return 0 + } + return x +} + +// Buffered returns a slice containing data buffered but not yet tokenized. +func (z *Tokenizer) Buffered() []byte { + return z.buf[z.raw.end:] +} + +// readAtLeastOneByte wraps an io.Reader so that reading cannot return (0, nil). +// It returns io.ErrNoProgress if the underlying r.Read method returns (0, nil) +// too many times in succession. +func readAtLeastOneByte(r io.Reader, b []byte) (int, error) { + for i := 0; i < 100; i++ { + n, err := r.Read(b) + if n != 0 || err != nil { + return n, err + } + } + return 0, io.ErrNoProgress +} + +// skipWhiteSpace skips past any white space. +func (z *Tokenizer) skipWhiteSpace() { + if z.err != nil { + return + } + for { + c := z.readByte() + if z.err != nil { + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + // No-op. + default: + z.raw.end-- + return + } + } +} + +// readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and +// is typically something like "script" or "textarea". +func (z *Tokenizer) readRawOrRCDATA() { + if z.rawTag == "script" { + z.readScript() + z.textIsRaw = true + z.rawTag = "" + return + } +loop: + for { + c := z.readByte() + if z.err != nil { + break loop + } + if c != '<' { + continue loop + } + c = z.readByte() + if z.err != nil { + break loop + } + if c != '/' { + continue loop + } + if z.readRawEndTag() || z.err != nil { + break loop + } + } + z.data.end = z.raw.end + // A textarea's or title's RCDATA can contain escaped entities. + z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title" + z.rawTag = "" +} + +// readRawEndTag attempts to read a tag like "</foo>", where "foo" is z.rawTag. +// If it succeeds, it backs up the input position to reconsume the tag and +// returns true. Otherwise it returns false. The opening "</" has already been +// consumed. +func (z *Tokenizer) readRawEndTag() bool { + for i := 0; i < len(z.rawTag); i++ { + c := z.readByte() + if z.err != nil { + return false + } + if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') { + z.raw.end-- + return false + } + } + c := z.readByte() + if z.err != nil { + return false + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // The 3 is 2 for the leading "</" plus 1 for the trailing character c. + z.raw.end -= 3 + len(z.rawTag) + return true + } + z.raw.end-- + return false +} + +// readScript reads until the next </script> tag, following the byzantine +// rules for escaping/hiding the closing tag. +func (z *Tokenizer) readScript() { + defer func() { + z.data.end = z.raw.end + }() + var c byte + +scriptData: + c = z.readByte() + if z.err != nil { + return + } + if c == '<' { + goto scriptDataLessThanSign + } + goto scriptData + +scriptDataLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '/': + goto scriptDataEndTagOpen + case '!': + goto scriptDataEscapeStart + } + z.raw.end-- + goto scriptData + +scriptDataEndTagOpen: + if z.readRawEndTag() || z.err != nil { + return + } + goto scriptData + +scriptDataEscapeStart: + c = z.readByte() + if z.err != nil { + return + } + if c == '-' { + goto scriptDataEscapeStartDash + } + z.raw.end-- + goto scriptData + +scriptDataEscapeStartDash: + c = z.readByte() + if z.err != nil { + return + } + if c == '-' { + goto scriptDataEscapedDashDash + } + z.raw.end-- + goto scriptData + +scriptDataEscaped: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDash + case '<': + goto scriptDataEscapedLessThanSign + } + goto scriptDataEscaped + +scriptDataEscapedDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDashDash + case '<': + goto scriptDataEscapedLessThanSign + } + goto scriptDataEscaped + +scriptDataEscapedDashDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDashDash + case '<': + goto scriptDataEscapedLessThanSign + case '>': + goto scriptData + } + goto scriptDataEscaped + +scriptDataEscapedLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + if c == '/' { + goto scriptDataEscapedEndTagOpen + } + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { + goto scriptDataDoubleEscapeStart + } + z.raw.end-- + goto scriptData + +scriptDataEscapedEndTagOpen: + if z.readRawEndTag() || z.err != nil { + return + } + goto scriptDataEscaped + +scriptDataDoubleEscapeStart: + z.raw.end-- + for i := 0; i < len("script"); i++ { + c = z.readByte() + if z.err != nil { + return + } + if c != "script"[i] && c != "SCRIPT"[i] { + z.raw.end-- + goto scriptDataEscaped + } + } + c = z.readByte() + if z.err != nil { + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/', '>': + goto scriptDataDoubleEscaped + } + z.raw.end-- + goto scriptDataEscaped + +scriptDataDoubleEscaped: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDashDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedDashDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDashDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + case '>': + goto scriptData + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + if c == '/' { + goto scriptDataDoubleEscapeEnd + } + z.raw.end-- + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapeEnd: + if z.readRawEndTag() { + z.raw.end += len("</script>") + goto scriptDataEscaped + } + if z.err != nil { + return + } + goto scriptDataDoubleEscaped +} + +// readComment reads the next comment token starting with "<!--". The opening +// "<!--" has already been consumed. +func (z *Tokenizer) readComment() { + z.data.start = z.raw.end + defer func() { + if z.data.end < z.data.start { + // It's a comment with no data, like <!-->. + z.data.end = z.data.start + } + }() + for dashCount := 2; ; { + c := z.readByte() + if z.err != nil { + // Ignore up to two dashes at EOF. + if dashCount > 2 { + dashCount = 2 + } + z.data.end = z.raw.end - dashCount + return + } + switch c { + case '-': + dashCount++ + continue + case '>': + if dashCount >= 2 { + z.data.end = z.raw.end - len("-->") + return + } + case '!': + if dashCount >= 2 { + c = z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + if c == '>' { + z.data.end = z.raw.end - len("--!>") + return + } + } + } + dashCount = 0 + } +} + +// readUntilCloseAngle reads until the next ">". +func (z *Tokenizer) readUntilCloseAngle() { + z.data.start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + if c == '>' { + z.data.end = z.raw.end - len(">") + return + } + } +} + +// readMarkupDeclaration reads the next token starting with "<!". It might be +// a "<!--comment-->", a "<!DOCTYPE foo>", a "<![CDATA[section]]>" or +// "<!a bogus comment". The opening "<!" has already been consumed. +func (z *Tokenizer) readMarkupDeclaration() TokenType { + z.data.start = z.raw.end + var c [2]byte + for i := 0; i < 2; i++ { + c[i] = z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return CommentToken + } + } + if c[0] == '-' && c[1] == '-' { + z.readComment() + return CommentToken + } + z.raw.end -= 2 + if z.readDoctype() { + return DoctypeToken + } + if z.allowCDATA && z.readCDATA() { + z.convertNUL = true + return TextToken + } + // It's a bogus comment. + z.readUntilCloseAngle() + return CommentToken +} + +// readDoctype attempts to read a doctype declaration and returns true if +// successful. The opening "<!" has already been consumed. +func (z *Tokenizer) readDoctype() bool { + const s = "DOCTYPE" + for i := 0; i < len(s); i++ { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return false + } + if c != s[i] && c != s[i]+('a'-'A') { + // Back up to read the fragment of "DOCTYPE" again. + z.raw.end = z.data.start + return false + } + } + if z.skipWhiteSpace(); z.err != nil { + z.data.start = z.raw.end + z.data.end = z.raw.end + return true + } + z.readUntilCloseAngle() + return true +} + +// readCDATA attempts to read a CDATA section and returns true if +// successful. The opening "<!" has already been consumed. +func (z *Tokenizer) readCDATA() bool { + const s = "[CDATA[" + for i := 0; i < len(s); i++ { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return false + } + if c != s[i] { + // Back up to read the fragment of "[CDATA[" again. + z.raw.end = z.data.start + return false + } + } + z.data.start = z.raw.end + brackets := 0 + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return true + } + switch c { + case ']': + brackets++ + case '>': + if brackets >= 2 { + z.data.end = z.raw.end - len("]]>") + return true + } + brackets = 0 + default: + brackets = 0 + } + } +} + +// startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end] +// case-insensitively matches any element of ss. +func (z *Tokenizer) startTagIn(ss ...string) bool { +loop: + for _, s := range ss { + if z.data.end-z.data.start != len(s) { + continue loop + } + for i := 0; i < len(s); i++ { + c := z.buf[z.data.start+i] + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + if c != s[i] { + continue loop + } + } + return true + } + return false +} + +// readStartTag reads the next start tag token. The opening "<a" has already +// been consumed, where 'a' means anything in [A-Za-z]. +func (z *Tokenizer) readStartTag() TokenType { + z.readTag(true) + if z.err != nil { + return ErrorToken + } + // Several tags flag the tokenizer's next token as raw. + c, raw := z.buf[z.data.start], false + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + switch c { + case 'i': + raw = z.startTagIn("iframe") + case 'n': + raw = z.startTagIn("noembed", "noframes", "noscript") + case 'p': + raw = z.startTagIn("plaintext") + case 's': + raw = z.startTagIn("script", "style") + case 't': + raw = z.startTagIn("textarea", "title") + case 'x': + raw = z.startTagIn("xmp") + } + if raw { + z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end])) + } + // Look for a self-closing token like "<br/>". + if z.err == nil && z.buf[z.raw.end-2] == '/' { + return SelfClosingTagToken + } + return StartTagToken +} + +// readTag reads the next tag token and its attributes. If saveAttr, those +// attributes are saved in z.attr, otherwise z.attr is set to an empty slice. +// The opening "<a" or "</a" has already been consumed, where 'a' means anything +// in [A-Za-z]. +func (z *Tokenizer) readTag(saveAttr bool) { + z.attr = z.attr[:0] + z.nAttrReturned = 0 + // Read the tag name and attribute key/value pairs. + z.readTagName() + if z.skipWhiteSpace(); z.err != nil { + return + } + for { + c := z.readByte() + if z.err != nil || c == '>' { + break + } + z.raw.end-- + z.readTagAttrKey() + z.readTagAttrVal() + // Save pendingAttr if saveAttr and that attribute has a non-empty key. + if saveAttr && z.pendingAttr[0].start != z.pendingAttr[0].end { + z.attr = append(z.attr, z.pendingAttr) + } + if z.skipWhiteSpace(); z.err != nil { + break + } + } +} + +// readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end) +// is positioned such that the first byte of the tag name (the "d" in "<div") +// has already been consumed. +func (z *Tokenizer) readTagName() { + z.data.start = z.raw.end - 1 + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + z.data.end = z.raw.end - 1 + return + case '/', '>': + z.raw.end-- + z.data.end = z.raw.end + return + } + } +} + +// readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>". +// Precondition: z.err == nil. +func (z *Tokenizer) readTagAttrKey() { + z.pendingAttr[0].start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[0].end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/': + z.pendingAttr[0].end = z.raw.end - 1 + return + case '=', '>': + z.raw.end-- + z.pendingAttr[0].end = z.raw.end + return + } + } +} + +// readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>". +func (z *Tokenizer) readTagAttrVal() { + z.pendingAttr[1].start = z.raw.end + z.pendingAttr[1].end = z.raw.end + if z.skipWhiteSpace(); z.err != nil { + return + } + c := z.readByte() + if z.err != nil { + return + } + if c != '=' { + z.raw.end-- + return + } + if z.skipWhiteSpace(); z.err != nil { + return + } + quote := z.readByte() + if z.err != nil { + return + } + switch quote { + case '>': + z.raw.end-- + return + + case '\'', '"': + z.pendingAttr[1].start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[1].end = z.raw.end + return + } + if c == quote { + z.pendingAttr[1].end = z.raw.end - 1 + return + } + } + + default: + z.pendingAttr[1].start = z.raw.end - 1 + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[1].end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + z.pendingAttr[1].end = z.raw.end - 1 + return + case '>': + z.raw.end-- + z.pendingAttr[1].end = z.raw.end + return + } + } + } +} + +// Next scans the next token and returns its type. +func (z *Tokenizer) Next() TokenType { + z.raw.start = z.raw.end + z.data.start = z.raw.end + z.data.end = z.raw.end + if z.err != nil { + z.tt = ErrorToken + return z.tt + } + if z.rawTag != "" { + if z.rawTag == "plaintext" { + // Read everything up to EOF. + for z.err == nil { + z.readByte() + } + z.data.end = z.raw.end + z.textIsRaw = true + } else { + z.readRawOrRCDATA() + } + if z.data.end > z.data.start { + z.tt = TextToken + z.convertNUL = true + return z.tt + } + } + z.textIsRaw = false + z.convertNUL = false + +loop: + for { + c := z.readByte() + if z.err != nil { + break loop + } + if c != '<' { + continue loop + } + + // Check if the '<' we have just read is part of a tag, comment + // or doctype. If not, it's part of the accumulated text token. + c = z.readByte() + if z.err != nil { + break loop + } + var tokenType TokenType + switch { + case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z': + tokenType = StartTagToken + case c == '/': + tokenType = EndTagToken + case c == '!' || c == '?': + // We use CommentToken to mean any of "<!--actual comments-->", + // "<!DOCTYPE declarations>" and "<?xml processing instructions?>". + tokenType = CommentToken + default: + continue + } + + // We have a non-text token, but we might have accumulated some text + // before that. If so, we return the text first, and return the non- + // text token on the subsequent call to Next. + if x := z.raw.end - len("<a"); z.raw.start < x { + z.raw.end = x + z.data.end = x + z.tt = TextToken + return z.tt + } + switch tokenType { + case StartTagToken: + z.tt = z.readStartTag() + return z.tt + case EndTagToken: + c = z.readByte() + if z.err != nil { + break loop + } + if c == '>' { + // "</>" does not generate a token at all. Generate an empty comment + // to allow passthrough clients to pick up the data using Raw. + // Reset the tokenizer state and start again. + z.tt = CommentToken + return z.tt + } + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { + z.readTag(false) + if z.err != nil { + z.tt = ErrorToken + } else { + z.tt = EndTagToken + } + return z.tt + } + z.raw.end-- + z.readUntilCloseAngle() + z.tt = CommentToken + return z.tt + case CommentToken: + if c == '!' { + z.tt = z.readMarkupDeclaration() + return z.tt + } + z.raw.end-- + z.readUntilCloseAngle() + z.tt = CommentToken + return z.tt + } + } + if z.raw.start < z.raw.end { + z.data.end = z.raw.end + z.tt = TextToken + return z.tt + } + z.tt = ErrorToken + return z.tt +} + +// Raw returns the unmodified text of the current token. Calling Next, Token, +// Text, TagName or TagAttr may change the contents of the returned slice. +func (z *Tokenizer) Raw() []byte { + return z.buf[z.raw.start:z.raw.end] +} + +// convertNewlines converts "\r" and "\r\n" in s to "\n". +// The conversion happens in place, but the resulting slice may be shorter. +func convertNewlines(s []byte) []byte { + for i, c := range s { + if c != '\r' { + continue + } + + src := i + 1 + if src >= len(s) || s[src] != '\n' { + s[i] = '\n' + continue + } + + dst := i + for src < len(s) { + if s[src] == '\r' { + if src+1 < len(s) && s[src+1] == '\n' { + src++ + } + s[dst] = '\n' + } else { + s[dst] = s[src] + } + src++ + dst++ + } + return s[:dst] + } + return s +} + +var ( + nul = []byte("\x00") + replacement = []byte("\ufffd") +) + +// Text returns the unescaped text of a text, comment or doctype token. The +// contents of the returned slice may change on the next call to Next. +func (z *Tokenizer) Text() []byte { + switch z.tt { + case TextToken, CommentToken, DoctypeToken: + s := z.buf[z.data.start:z.data.end] + z.data.start = z.raw.end + z.data.end = z.raw.end + s = convertNewlines(s) + if (z.convertNUL || z.tt == CommentToken) && bytes.Contains(s, nul) { + s = bytes.Replace(s, nul, replacement, -1) + } + if !z.textIsRaw { + s = unescape(s, false) + } + return s + } + return nil +} + +// TagName returns the lower-cased name of a tag token (the `img` out of +// `<IMG SRC="foo">`) and whether the tag has attributes. +// The contents of the returned slice may change on the next call to Next. +func (z *Tokenizer) TagName() (name []byte, hasAttr bool) { + if z.data.start < z.data.end { + switch z.tt { + case StartTagToken, EndTagToken, SelfClosingTagToken: + s := z.buf[z.data.start:z.data.end] + z.data.start = z.raw.end + z.data.end = z.raw.end + return lower(s), z.nAttrReturned < len(z.attr) + } + } + return nil, false +} + +// TagAttr returns the lower-cased key and unescaped value of the next unparsed +// attribute for the current tag token and whether there are more attributes. +// The contents of the returned slices may change on the next call to Next. +func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) { + if z.nAttrReturned < len(z.attr) { + switch z.tt { + case StartTagToken, SelfClosingTagToken: + x := z.attr[z.nAttrReturned] + z.nAttrReturned++ + key = z.buf[x[0].start:x[0].end] + val = z.buf[x[1].start:x[1].end] + return lower(key), unescape(convertNewlines(val), true), z.nAttrReturned < len(z.attr) + } + } + return nil, nil, false +} + +// Token returns the next Token. The result's Data and Attr values remain valid +// after subsequent Next calls. +func (z *Tokenizer) Token() Token { + t := Token{Type: z.tt} + switch z.tt { + case TextToken, CommentToken, DoctypeToken: + t.Data = string(z.Text()) + case StartTagToken, SelfClosingTagToken, EndTagToken: + name, moreAttr := z.TagName() + for moreAttr { + var key, val []byte + key, val, moreAttr = z.TagAttr() + t.Attr = append(t.Attr, Attribute{"", atom.String(key), string(val)}) + } + if a := atom.Lookup(name); a != 0 { + t.DataAtom, t.Data = a, a.String() + } else { + t.DataAtom, t.Data = 0, string(name) + } + } + return t +} + +// SetMaxBuf sets a limit on the amount of data buffered during tokenization. +// A value of 0 means unlimited. +func (z *Tokenizer) SetMaxBuf(n int) { + z.maxBuf = n +} + +// NewTokenizer returns a new HTML Tokenizer for the given Reader. +// The input is assumed to be UTF-8 encoded. +func NewTokenizer(r io.Reader) *Tokenizer { + return NewTokenizerFragment(r, "") +} + +// NewTokenizerFragment returns a new HTML Tokenizer for the given Reader, for +// tokenizing an exisitng element's InnerHTML fragment. contextTag is that +// element's tag, such as "div" or "iframe". +// +// For example, how the InnerHTML "a<b" is tokenized depends on whether it is +// for a <p> tag or a <script> tag. +// +// The input is assumed to be UTF-8 encoded. +func NewTokenizerFragment(r io.Reader, contextTag string) *Tokenizer { + z := &Tokenizer{ + r: r, + buf: make([]byte, 0, 4096), + } + if contextTag != "" { + switch s := strings.ToLower(contextTag); s { + case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "title", "textarea", "xmp": + z.rawTag = s + } + } + return z +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/go.net/html/token_test.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/go.net/html/token_test.go new file mode 100644 index 00000000..38d80d7f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/go.net/html/token_test.go @@ -0,0 +1,743 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "io" + "io/ioutil" + "reflect" + "runtime" + "strings" + "testing" +) + +type tokenTest struct { + // A short description of the test case. + desc string + // The HTML to parse. + html string + // The string representations of the expected tokens, joined by '$'. + golden string +} + +var tokenTests = []tokenTest{ + { + "empty", + "", + "", + }, + // A single text node. The tokenizer should not break text nodes on whitespace, + // nor should it normalize whitespace within a text node. + { + "text", + "foo bar", + "foo bar", + }, + // An entity. + { + "entity", + "one < two", + "one < two", + }, + // A start, self-closing and end tag. The tokenizer does not care if the start + // and end tokens don't match; that is the job of the parser. + { + "tags", + "<a>b<c/>d</e>", + "<a>$b$<c/>$d$</e>", + }, + // Angle brackets that aren't a tag. + { + "not a tag #0", + "<", + "<", + }, + { + "not a tag #1", + "</", + "</", + }, + { + "not a tag #2", + "</>", + "<!---->", + }, + { + "not a tag #3", + "a</>b", + "a$<!---->$b", + }, + { + "not a tag #4", + "</ >", + "<!-- -->", + }, + { + "not a tag #5", + "</.", + "<!--.-->", + }, + { + "not a tag #6", + "</.>", + "<!--.-->", + }, + { + "not a tag #7", + "a < b", + "a < b", + }, + { + "not a tag #8", + "<.>", + "<.>", + }, + { + "not a tag #9", + "a<<<b>>>c", + "a<<$<b>$>>c", + }, + { + "not a tag #10", + "if x<0 and y < 0 then x*y>0", + "if x<0 and y < 0 then x*y>0", + }, + // EOF in a tag name. + { + "tag name eof #0", + "<a", + "", + }, + { + "tag name eof #1", + "<a ", + "", + }, + { + "tag name eof #2", + "a<b", + "a", + }, + { + "tag name eof #3", + "<a><b", + "<a>", + }, + { + "tag name eof #4", + `<a x`, + ``, + }, + // Some malformed tags that are missing a '>'. + { + "malformed tag #0", + `<p</p>`, + `<p< p="">`, + }, + { + "malformed tag #1", + `<p </p>`, + `<p <="" p="">`, + }, + { + "malformed tag #2", + `<p id`, + ``, + }, + { + "malformed tag #3", + `<p id=`, + ``, + }, + { + "malformed tag #4", + `<p id=>`, + `<p id="">`, + }, + { + "malformed tag #5", + `<p id=0`, + ``, + }, + { + "malformed tag #6", + `<p id=0</p>`, + `<p id="0</p">`, + }, + { + "malformed tag #7", + `<p id="0</p>`, + ``, + }, + { + "malformed tag #8", + `<p id="0"</p>`, + `<p id="0" <="" p="">`, + }, + { + "malformed tag #9", + `<p></p id`, + `<p>`, + }, + // Raw text and RCDATA. + { + "basic raw text", + "<script><a></b></script>", + "<script>$<a></b>$</script>", + }, + { + "unfinished script end tag", + "<SCRIPT>a</SCR", + "<script>$a</SCR", + }, + { + "broken script end tag", + "<SCRIPT>a</SCR ipt>", + "<script>$a</SCR ipt>", + }, + { + "EOF in script end tag", + "<SCRIPT>a</SCRipt", + "<script>$a</SCRipt", + }, + { + "scriptx end tag", + "<SCRIPT>a</SCRiptx", + "<script>$a</SCRiptx", + }, + { + "' ' completes script end tag", + "<SCRIPT>a</SCRipt ", + "<script>$a", + }, + { + "'>' completes script end tag", + "<SCRIPT>a</SCRipt>", + "<script>$a$</script>", + }, + { + "self-closing script end tag", + "<SCRIPT>a</SCRipt/>", + "<script>$a$</script>", + }, + { + "nested script tag", + "<SCRIPT>a</SCRipt<script>", + "<script>$a</SCRipt<script>", + }, + { + "script end tag after unfinished", + "<SCRIPT>a</SCRipt</script>", + "<script>$a</SCRipt$</script>", + }, + { + "script/style mismatched tags", + "<script>a</style>", + "<script>$a</style>", + }, + { + "style element with entity", + "<style>'", + "<style>$&apos;", + }, + { + "textarea with tag", + "<textarea><div></textarea>", + "<textarea>$<div>$</textarea>", + }, + { + "title with tag and entity", + "<title><b>K&R C</b>", + "$<b>K&R C</b>$", + }, + // DOCTYPE tests. + { + "Proper DOCTYPE", + "", + "", + }, + { + "DOCTYPE with no space", + "", + "", + }, + { + "DOCTYPE with two spaces", + "", + "", + }, + { + "looks like DOCTYPE but isn't", + "", + "", + }, + { + "DOCTYPE at EOF", + "", + }, + // XML processing instructions. + { + "XML processing instruction", + "", + "", + }, + // Comments. + { + "comment0", + "abcdef", + "abc$$$$def", + }, + { + "comment1", + "az", + "a$$z", + }, + { + "comment2", + "az", + "a$$z", + }, + { + "comment3", + "az", + "a$$z", + }, + { + "comment4", + "az", + "a$$z", + }, + { + "comment5", + "az", + "a$$z", + }, + { + "comment6", + "az", + "a$$z", + }, + { + "comment7", + "a", + }, + { + "comment8", + "a", + }, + { + "comment9", + "a", + }, + { + "comment10", + "a", + }, + { + "comment11", + "a", + }, + { + "comment12", + "a", + }, + { + "comment13", + "az", + "a$$z", + }, + // An attribute with a backslash. + { + "backslash", + `

    `, + `

    `, + }, + // Entities, tag name and attribute key lower-casing, and whitespace + // normalization within a tag. + { + "tricky", + "

    te<&;xt

    ", + `

    $$te<&;xt$$

    `, + }, + // A nonexistent entity. Tokenizing and converting back to a string should + // escape the "&" to become "&". + { + "noSuchEntity", + `<&alsoDoesntExist;&`, + `$<&alsoDoesntExist;&`, + }, + { + "entity without semicolon", + `¬it;∉`, + `¬it;∉$`, + }, + { + "entity with digits", + "½", + "½", + }, + // Attribute tests: + // http://dev.w3.org/html5/spec/Overview.html#attributes-0 + { + "Empty attribute", + ``, + ``, + }, + { + "Empty attribute, whitespace", + ``, + ``, + }, + { + "Unquoted attribute value", + ``, + ``, + }, + { + "Unquoted attribute value, spaces", + ``, + ``, + }, + { + "Unquoted attribute value, trailing space", + ``, + ``, + }, + { + "Single-quoted attribute value", + ``, + ``, + }, + { + "Single-quoted attribute value, trailing space", + ``, + ``, + }, + { + "Double-quoted attribute value", + ``, + ``, + }, + { + "Attribute name characters", + ``, + ``, + }, + { + "Mixed attributes", + `a

    z`, + `a$

    $z`, + }, + { + "Attributes with a solitary single quote", + `

    `, + `

    $

    `, + }, +} + +func TestTokenizer(t *testing.T) { +loop: + for _, tt := range tokenTests { + z := NewTokenizer(strings.NewReader(tt.html)) + if tt.golden != "" { + for i, s := range strings.Split(tt.golden, "$") { + if z.Next() == ErrorToken { + t.Errorf("%s token %d: want %q got error %v", tt.desc, i, s, z.Err()) + continue loop + } + actual := z.Token().String() + if s != actual { + t.Errorf("%s token %d: want %q got %q", tt.desc, i, s, actual) + continue loop + } + } + } + z.Next() + if z.Err() != io.EOF { + t.Errorf("%s: want EOF got %q", tt.desc, z.Err()) + } + } +} + +func TestMaxBuffer(t *testing.T) { + // Exceeding the maximum buffer size generates ErrBufferExceeded. + z := NewTokenizer(strings.NewReader("<" + strings.Repeat("t", 10))) + z.SetMaxBuf(5) + tt := z.Next() + if got, want := tt, ErrorToken; got != want { + t.Fatalf("token type: got: %v want: %v", got, want) + } + if got, want := z.Err(), ErrBufferExceeded; got != want { + t.Errorf("error type: got: %v want: %v", got, want) + } + if got, want := string(z.Raw()), " 0 { + result.Write(z.Text()) + } + case StartTagToken, EndTagToken: + tn, _ := z.TagName() + if len(tn) == 1 && tn[0] == 'a' { + if tt == StartTagToken { + depth++ + } else { + depth-- + } + } + } + } + u := "14567" + v := string(result.Bytes()) + if u != v { + t.Errorf("TestBufAPI: want %q got %q", u, v) + } +} + +func TestConvertNewlines(t *testing.T) { + testCases := map[string]string{ + "Mac\rDOS\r\nUnix\n": "Mac\nDOS\nUnix\n", + "Unix\nMac\rDOS\r\n": "Unix\nMac\nDOS\n", + "DOS\r\nDOS\r\nDOS\r\n": "DOS\nDOS\nDOS\n", + "": "", + "\n": "\n", + "\n\r": "\n\n", + "\r": "\n", + "\r\n": "\n", + "\r\n\n": "\n\n", + "\r\n\r": "\n\n", + "\r\n\r\n": "\n\n", + "\r\r": "\n\n", + "\r\r\n": "\n\n", + "\r\r\n\n": "\n\n\n", + "\r\r\r\n": "\n\n\n", + "\r \n": "\n \n", + "xyz": "xyz", + } + for in, want := range testCases { + if got := string(convertNewlines([]byte(in))); got != want { + t.Errorf("input %q: got %q, want %q", in, got, want) + } + } +} + +func TestReaderEdgeCases(t *testing.T) { + const s = "

    An io.Reader can return (0, nil) or (n, io.EOF).

    " + testCases := []io.Reader{ + &zeroOneByteReader{s: s}, + &eofStringsReader{s: s}, + &stuckReader{}, + } + for i, tc := range testCases { + got := []TokenType{} + z := NewTokenizer(tc) + for { + tt := z.Next() + if tt == ErrorToken { + break + } + got = append(got, tt) + } + if err := z.Err(); err != nil && err != io.EOF { + if err != io.ErrNoProgress { + t.Errorf("i=%d: %v", i, err) + } + continue + } + want := []TokenType{ + StartTagToken, + TextToken, + EndTagToken, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("i=%d: got %v, want %v", i, got, want) + continue + } + } +} + +// zeroOneByteReader is like a strings.Reader that alternates between +// returning 0 bytes and 1 byte at a time. +type zeroOneByteReader struct { + s string + n int +} + +func (r *zeroOneByteReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + if len(r.s) == 0 { + return 0, io.EOF + } + r.n++ + if r.n%2 != 0 { + return 0, nil + } + p[0], r.s = r.s[0], r.s[1:] + return 1, nil +} + +// eofStringsReader is like a strings.Reader but can return an (n, err) where +// n > 0 && err != nil. +type eofStringsReader struct { + s string +} + +func (r *eofStringsReader) Read(p []byte) (int, error) { + n := copy(p, r.s) + r.s = r.s[n:] + if r.s != "" { + return n, nil + } + return n, io.EOF +} + +// stuckReader is an io.Reader that always returns no data and no error. +type stuckReader struct{} + +func (*stuckReader) Read(p []byte) (int, error) { + return 0, nil +} + +const ( + rawLevel = iota + lowLevel + highLevel +) + +func benchmarkTokenizer(b *testing.B, level int) { + buf, err := ioutil.ReadFile("testdata/go1.html") + if err != nil { + b.Fatalf("could not read testdata/go1.html: %v", err) + } + b.SetBytes(int64(len(buf))) + runtime.GC() + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + z := NewTokenizer(bytes.NewBuffer(buf)) + for { + tt := z.Next() + if tt == ErrorToken { + if err := z.Err(); err != nil && err != io.EOF { + b.Fatalf("tokenizer error: %v", err) + } + break + } + switch level { + case rawLevel: + // Calling z.Raw just returns the raw bytes of the token. It does + // not unescape < to <, or lower-case tag names and attribute keys. + z.Raw() + case lowLevel: + // Caling z.Text, z.TagName and z.TagAttr returns []byte values + // whose contents may change on the next call to z.Next. + switch tt { + case TextToken, CommentToken, DoctypeToken: + z.Text() + case StartTagToken, SelfClosingTagToken: + _, more := z.TagName() + for more { + _, _, more = z.TagAttr() + } + case EndTagToken: + z.TagName() + } + case highLevel: + // Calling z.Token converts []byte values to strings whose validity + // extend beyond the next call to z.Next. + z.Token() + } + } + } +} + +func BenchmarkRawLevelTokenizer(b *testing.B) { benchmarkTokenizer(b, rawLevel) } +func BenchmarkLowLevelTokenizer(b *testing.B) { benchmarkTokenizer(b, lowLevel) } +func BenchmarkHighLevelTokenizer(b *testing.B) { benchmarkTokenizer(b, highLevel) } diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/.hgtags b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/.hgtags new file mode 100644 index 00000000..a6df8480 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/.hgtags @@ -0,0 +1,2 @@ +379476c9e05c5275356e0a82ca079e61869e9192 release +4ee7c273e92e663ef8dc0c476d395350a586ad75 weekly diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/AUTHORS b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/AUTHORS new file mode 100644 index 00000000..5ad2b581 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/AUTHORS @@ -0,0 +1,11 @@ +# This is the official list of goauth2 authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Google Inc. diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/CONTRIBUTORS b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/CONTRIBUTORS new file mode 100644 index 00000000..2de444d4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the goauth2 repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name +# +# An entry with two email addresses specifies that the +# first address should be used in the submit logs and +# that the second address should be recognized as the +# same person when interacting with Rietveld. + +# Please keep the list sorted. + +Andrew Gerrand +Brad Fitzpatrick +Mark-Antoine Ruel +Manu Garg diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/LICENSE new file mode 100644 index 00000000..6765f090 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The goauth2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/PATENTS b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/PATENTS new file mode 100644 index 00000000..9e871635 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the goauth2 project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/lib/codereview/codereview.cfg b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/lib/codereview/codereview.cfg new file mode 100644 index 00000000..93b55c0a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/lib/codereview/codereview.cfg @@ -0,0 +1 @@ +defaultcc: golang-dev@googlegroups.com diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/oauth/oauth.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/oauth/oauth.go new file mode 100644 index 00000000..29dc8d73 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/oauth/oauth.go @@ -0,0 +1,461 @@ +// Copyright 2011 The goauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth supports making OAuth2-authenticated HTTP requests. +// +// Example usage: +// +// // Specify your configuration. (typically as a global variable) +// var config = &oauth.Config{ +// ClientId: YOUR_CLIENT_ID, +// ClientSecret: YOUR_CLIENT_SECRET, +// Scope: "https://www.googleapis.com/auth/buzz", +// AuthURL: "https://accounts.google.com/o/oauth2/auth", +// TokenURL: "https://accounts.google.com/o/oauth2/token", +// RedirectURL: "http://you.example.org/handler", +// } +// +// // A landing page redirects to the OAuth provider to get the auth code. +// func landing(w http.ResponseWriter, r *http.Request) { +// http.Redirect(w, r, config.AuthCodeURL("foo"), http.StatusFound) +// } +// +// // The user will be redirected back to this handler, that takes the +// // "code" query parameter and Exchanges it for an access token. +// func handler(w http.ResponseWriter, r *http.Request) { +// t := &oauth.Transport{Config: config} +// t.Exchange(r.FormValue("code")) +// // The Transport now has a valid Token. Create an *http.Client +// // with which we can make authenticated API requests. +// c := t.Client() +// c.Post(...) +// // ... +// // btw, r.FormValue("state") == "foo" +// } +// +package oauth + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync" + "time" +) + +// OAuthError is the error type returned by many operations. +// +// In retrospect it should not exist. Don't depend on it. +type OAuthError struct { + prefix string + msg string +} + +func (oe OAuthError) Error() string { + return "OAuthError: " + oe.prefix + ": " + oe.msg +} + +// Cache specifies the methods that implement a Token cache. +type Cache interface { + Token() (*Token, error) + PutToken(*Token) error +} + +// CacheFile implements Cache. Its value is the name of the file in which +// the Token is stored in JSON format. +type CacheFile string + +func (f CacheFile) Token() (*Token, error) { + file, err := os.Open(string(f)) + if err != nil { + return nil, OAuthError{"CacheFile.Token", err.Error()} + } + defer file.Close() + tok := &Token{} + if err := json.NewDecoder(file).Decode(tok); err != nil { + return nil, OAuthError{"CacheFile.Token", err.Error()} + } + return tok, nil +} + +func (f CacheFile) PutToken(tok *Token) error { + file, err := os.OpenFile(string(f), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return OAuthError{"CacheFile.PutToken", err.Error()} + } + if err := json.NewEncoder(file).Encode(tok); err != nil { + file.Close() + return OAuthError{"CacheFile.PutToken", err.Error()} + } + if err := file.Close(); err != nil { + return OAuthError{"CacheFile.PutToken", err.Error()} + } + return nil +} + +// Config is the configuration of an OAuth consumer. +type Config struct { + // ClientId is the OAuth client identifier used when communicating with + // the configured OAuth provider. + ClientId string + + // ClientSecret is the OAuth client secret used when communicating with + // the configured OAuth provider. + ClientSecret string + + // Scope identifies the level of access being requested. Multiple scope + // values should be provided as a space-delimited string. + Scope string + + // AuthURL is the URL the user will be directed to in order to grant + // access. + AuthURL string + + // TokenURL is the URL used to retrieve OAuth tokens. + TokenURL string + + // RedirectURL is the URL to which the user will be returned after + // granting (or denying) access. + RedirectURL string + + // TokenCache allows tokens to be cached for subsequent requests. + TokenCache Cache + + // AccessType is an OAuth extension that gets sent as the + // "access_type" field in the URL from AuthCodeURL. + // See https://developers.google.com/accounts/docs/OAuth2WebServer. + // It may be "online" (the default) or "offline". + // If your application needs to refresh access tokens when the + // user is not present at the browser, then use offline. This + // will result in your application obtaining a refresh token + // the first time your application exchanges an authorization + // code for a user. + AccessType string + + // ApprovalPrompt indicates whether the user should be + // re-prompted for consent. If set to "auto" (default) the + // user will be prompted only if they haven't previously + // granted consent and the code can only be exchanged for an + // access token. + // If set to "force" the user will always be prompted, and the + // code can be exchanged for a refresh token. + ApprovalPrompt string +} + +// Token contains an end-user's tokens. +// This is the data you must store to persist authentication. +type Token struct { + AccessToken string + RefreshToken string + Expiry time.Time // If zero the token has no (known) expiry time. + + // Extra optionally contains extra metadata from the server + // when updating a token. The only current key that may be + // populated is "id_token". It may be nil and will be + // initialized as needed. + Extra map[string]string +} + +// Expired reports whether the token has expired or is invalid. +func (t *Token) Expired() bool { + if t.AccessToken == "" { + return true + } + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Before(time.Now()) +} + +// Transport implements http.RoundTripper. When configured with a valid +// Config and Token it can be used to make authenticated HTTP requests. +// +// t := &oauth.Transport{config} +// t.Exchange(code) +// // t now contains a valid Token +// r, _, err := t.Client().Get("http://example.org/url/requiring/auth") +// +// It will automatically refresh the Token if it can, +// updating the supplied Token in place. +type Transport struct { + *Config + *Token + + // mu guards modifying the token. + mu sync.Mutex + + // Transport is the HTTP transport to use when making requests. + // It will default to http.DefaultTransport if nil. + // (It should never be an oauth.Transport.) + Transport http.RoundTripper +} + +// Client returns an *http.Client that makes OAuth-authenticated requests. +func (t *Transport) Client() *http.Client { + return &http.Client{Transport: t} +} + +func (t *Transport) transport() http.RoundTripper { + if t.Transport != nil { + return t.Transport + } + return http.DefaultTransport +} + +// AuthCodeURL returns a URL that the end-user should be redirected to, +// so that they may obtain an authorization code. +func (c *Config) AuthCodeURL(state string) string { + url_, err := url.Parse(c.AuthURL) + if err != nil { + panic("AuthURL malformed: " + err.Error()) + } + q := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientId}, + "redirect_uri": {c.RedirectURL}, + "scope": {c.Scope}, + "state": {state}, + "access_type": {c.AccessType}, + "approval_prompt": {c.ApprovalPrompt}, + }.Encode() + if url_.RawQuery == "" { + url_.RawQuery = q + } else { + url_.RawQuery += "&" + q + } + return url_.String() +} + +// Exchange takes a code and gets access Token from the remote server. +func (t *Transport) Exchange(code string) (*Token, error) { + if t.Config == nil { + return nil, OAuthError{"Exchange", "no Config supplied"} + } + + // If the transport or the cache already has a token, it is + // passed to `updateToken` to preserve existing refresh token. + tok := t.Token + if tok == nil && t.TokenCache != nil { + tok, _ = t.TokenCache.Token() + } + if tok == nil { + tok = new(Token) + } + err := t.updateToken(tok, url.Values{ + "grant_type": {"authorization_code"}, + "redirect_uri": {t.RedirectURL}, + "scope": {t.Scope}, + "code": {code}, + }) + if err != nil { + return nil, err + } + t.Token = tok + if t.TokenCache != nil { + return tok, t.TokenCache.PutToken(tok) + } + return tok, nil +} + +// RoundTrip executes a single HTTP transaction using the Transport's +// Token as authorization headers. +// +// This method will attempt to renew the Token if it has expired and may return +// an error related to that Token renewal before attempting the client request. +// If the Token cannot be renewed a non-nil os.Error value will be returned. +// If the Token is invalid callers should expect HTTP-level errors, +// as indicated by the Response's StatusCode. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + accessToken, err := t.getAccessToken() + if err != nil { + return nil, err + } + // To set the Authorization header, we must make a copy of the Request + // so that we don't modify the Request we were given. + // This is required by the specification of http.RoundTripper. + req = cloneRequest(req) + req.Header.Set("Authorization", "Bearer "+accessToken) + + // Make the HTTP request. + return t.transport().RoundTrip(req) +} + +func (t *Transport) getAccessToken() (string, error) { + t.mu.Lock() + defer t.mu.Unlock() + + if t.Token == nil { + if t.Config == nil { + return "", OAuthError{"RoundTrip", "no Config supplied"} + } + if t.TokenCache == nil { + return "", OAuthError{"RoundTrip", "no Token supplied"} + } + var err error + t.Token, err = t.TokenCache.Token() + if err != nil { + return "", err + } + } + + // Refresh the Token if it has expired. + if t.Expired() { + if err := t.Refresh(); err != nil { + return "", err + } + } + if t.AccessToken == "" { + return "", errors.New("no access token obtained from refresh") + } + return t.AccessToken, nil +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +// Refresh renews the Transport's AccessToken using its RefreshToken. +func (t *Transport) Refresh() error { + if t.Token == nil { + return OAuthError{"Refresh", "no existing Token"} + } + if t.RefreshToken == "" { + return OAuthError{"Refresh", "Token expired; no Refresh Token"} + } + if t.Config == nil { + return OAuthError{"Refresh", "no Config supplied"} + } + + err := t.updateToken(t.Token, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {t.RefreshToken}, + }) + if err != nil { + return err + } + if t.TokenCache != nil { + return t.TokenCache.PutToken(t.Token) + } + return nil +} + +// AuthenticateClient gets an access Token using the client_credentials grant +// type. +func (t *Transport) AuthenticateClient() error { + if t.Config == nil { + return OAuthError{"Exchange", "no Config supplied"} + } + if t.Token == nil { + t.Token = &Token{} + } + return t.updateToken(t.Token, url.Values{"grant_type": {"client_credentials"}}) +} + +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client_secret in Authorization header. +// - Dropbox accepts either, but not both. +// - Google only accepts client_secret (not spec compliant?) +func providerAuthHeaderWorks(tokenURL string) bool { + if strings.HasPrefix(tokenURL, "https://accounts.google.com/") { + // Google fails to implement the OAuth2 spec fully? + return false + } + return true +} + +// updateToken mutates both tok and v. +func (t *Transport) updateToken(tok *Token, v url.Values) error { + v.Set("client_id", t.ClientId) + bustedAuth := !providerAuthHeaderWorks(t.TokenURL) + if bustedAuth { + v.Set("client_secret", t.ClientSecret) + } + client := &http.Client{Transport: t.transport()} + req, err := http.NewRequest("POST", t.TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if !bustedAuth { + req.SetBasicAuth(t.ClientId, t.ClientSecret) + } + r, err := client.Do(req) + if err != nil { + return err + } + defer r.Body.Close() + if r.StatusCode != 200 { + return OAuthError{"updateToken", "Unexpected HTTP status " + r.Status} + } + var b struct { + Access string `json:"access_token"` + Refresh string `json:"refresh_token"` + ExpiresIn int64 `json:"expires_in"` // seconds + Id string `json:"id_token"` + } + + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return err + } + + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return err + } + + b.Access = vals.Get("access_token") + b.Refresh = vals.Get("refresh_token") + b.ExpiresIn, _ = strconv.ParseInt(vals.Get("expires_in"), 10, 64) + b.Id = vals.Get("id_token") + default: + if err = json.Unmarshal(body, &b); err != nil { + return fmt.Errorf("got bad response from server: %q", body) + } + } + if b.Access == "" { + return errors.New("received empty access token from authorization server") + } + tok.AccessToken = b.Access + // Don't overwrite `RefreshToken` with an empty value + if b.Refresh != "" { + tok.RefreshToken = b.Refresh + } + if b.ExpiresIn == 0 { + tok.Expiry = time.Time{} + } else { + tok.Expiry = time.Now().Add(time.Duration(b.ExpiresIn) * time.Second) + } + if b.Id != "" { + if tok.Extra == nil { + tok.Extra = make(map[string]string) + } + tok.Extra["id_token"] = b.Id + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/oauth/oauth_test.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/oauth/oauth_test.go new file mode 100644 index 00000000..81d4992a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/goauth2/oauth/oauth_test.go @@ -0,0 +1,219 @@ +// Copyright 2011 The goauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth + +import ( + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "runtime" + "testing" + "time" +) + +var requests = []struct { + path, query, auth string // request + contenttype, body string // response +}{ + { + path: "/token", + query: "grant_type=authorization_code&code=c0d3&client_id=cl13nt1d", + contenttype: "application/json", + auth: "Basic Y2wxM250MWQ6czNjcjN0", + body: ` + { + "access_token":"token1", + "refresh_token":"refreshtoken1", + "id_token":"idtoken1", + "expires_in":3600 + } + `, + }, + {path: "/secure", auth: "Bearer token1", body: "first payload"}, + { + path: "/token", + query: "grant_type=refresh_token&refresh_token=refreshtoken1&client_id=cl13nt1d", + contenttype: "application/json", + auth: "Basic Y2wxM250MWQ6czNjcjN0", + body: ` + { + "access_token":"token2", + "refresh_token":"refreshtoken2", + "id_token":"idtoken2", + "expires_in":3600 + } + `, + }, + {path: "/secure", auth: "Bearer token2", body: "second payload"}, + { + path: "/token", + query: "grant_type=refresh_token&refresh_token=refreshtoken2&client_id=cl13nt1d", + contenttype: "application/x-www-form-urlencoded", + body: "access_token=token3&refresh_token=refreshtoken3&id_token=idtoken3&expires_in=3600", + auth: "Basic Y2wxM250MWQ6czNjcjN0", + }, + {path: "/secure", auth: "Bearer token3", body: "third payload"}, + { + path: "/token", + query: "grant_type=client_credentials&client_id=cl13nt1d", + contenttype: "application/json", + auth: "Basic Y2wxM250MWQ6czNjcjN0", + body: ` + { + "access_token":"token4", + "expires_in":3600 + } + `, + }, + {path: "/secure", auth: "Bearer token4", body: "fourth payload"}, +} + +func TestOAuth(t *testing.T) { + // Set up test server. + n := 0 + handler := func(w http.ResponseWriter, r *http.Request) { + if n >= len(requests) { + t.Errorf("too many requests: %d", n) + return + } + req := requests[n] + n++ + + // Check request. + if g, w := r.URL.Path, req.path; g != w { + t.Errorf("request[%d] got path %s, want %s", n, g, w) + } + want, _ := url.ParseQuery(req.query) + for k := range want { + if g, w := r.FormValue(k), want.Get(k); g != w { + t.Errorf("query[%s] = %s, want %s", k, g, w) + } + } + if g, w := r.Header.Get("Authorization"), req.auth; w != "" && g != w { + t.Errorf("Authorization: %v, want %v", g, w) + } + + // Send response. + w.Header().Set("Content-Type", req.contenttype) + io.WriteString(w, req.body) + } + server := httptest.NewServer(http.HandlerFunc(handler)) + defer server.Close() + + config := &Config{ + ClientId: "cl13nt1d", + ClientSecret: "s3cr3t", + Scope: "https://example.net/scope", + AuthURL: server.URL + "/auth", + TokenURL: server.URL + "/token", + } + + // TODO(adg): test AuthCodeURL + + transport := &Transport{Config: config} + _, err := transport.Exchange("c0d3") + if err != nil { + t.Fatalf("Exchange: %v", err) + } + checkToken(t, transport.Token, "token1", "refreshtoken1", "idtoken1") + + c := transport.Client() + resp, err := c.Get(server.URL + "/secure") + if err != nil { + t.Fatalf("Get: %v", err) + } + checkBody(t, resp, "first payload") + + // test automatic refresh + transport.Expiry = time.Now().Add(-time.Hour) + resp, err = c.Get(server.URL + "/secure") + if err != nil { + t.Fatalf("Get: %v", err) + } + checkBody(t, resp, "second payload") + checkToken(t, transport.Token, "token2", "refreshtoken2", "idtoken2") + + // refresh one more time, but get URL-encoded token instead of JSON + transport.Expiry = time.Now().Add(-time.Hour) + resp, err = c.Get(server.URL + "/secure") + if err != nil { + t.Fatalf("Get: %v", err) + } + checkBody(t, resp, "third payload") + checkToken(t, transport.Token, "token3", "refreshtoken3", "idtoken3") + + transport.Token = &Token{} + err = transport.AuthenticateClient() + if err != nil { + t.Fatalf("AuthenticateClient: %v", err) + } + checkToken(t, transport.Token, "token4", "", "") + resp, err = c.Get(server.URL + "/secure") + if err != nil { + t.Fatalf("Get: %v", err) + } + checkBody(t, resp, "fourth payload") +} + +func checkToken(t *testing.T, tok *Token, access, refresh, id string) { + if g, w := tok.AccessToken, access; g != w { + t.Errorf("AccessToken = %q, want %q", g, w) + } + if g, w := tok.RefreshToken, refresh; g != w { + t.Errorf("RefreshToken = %q, want %q", g, w) + } + if g, w := tok.Extra["id_token"], id; g != w { + t.Errorf("Extra['id_token'] = %q, want %q", g, w) + } + if tok.Expiry.IsZero() { + t.Errorf("Expiry is zero; want ~1 hour") + } else { + exp := tok.Expiry.Sub(time.Now()) + const slop = 3 * time.Second // time moving during test + if (time.Hour-slop) > exp || exp > time.Hour { + t.Errorf("Expiry = %v, want ~1 hour", exp) + } + } +} + +func checkBody(t *testing.T, r *http.Response, body string) { + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("reading reponse body: %v, want %q", err, body) + } + if g, w := string(b), body; g != w { + t.Errorf("request body mismatch: got %q, want %q", g, w) + } +} + +func TestCachePermissions(t *testing.T) { + if runtime.GOOS == "windows" { + // Windows doesn't support file mode bits. + return + } + + td, err := ioutil.TempDir("", "oauth-test") + if err != nil { + t.Fatalf("ioutil.TempDir: %v", err) + } + defer os.RemoveAll(td) + tempFile := filepath.Join(td, "cache-file") + + cf := CacheFile(tempFile) + if err := cf.PutToken(new(Token)); err != nil { + t.Fatalf("PutToken: %v", err) + } + fi, err := os.Stat(tempFile) + if err != nil { + t.Fatalf("os.Stat: %v", err) + } + if fi.Mode()&0077 != 0 { + t.Errorf("Created cache file has mode %#o, want non-accessible to group+other", fi.Mode()) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/.hgignore b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/.hgignore new file mode 100644 index 00000000..055f43c9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/.hgignore @@ -0,0 +1,29 @@ +syntax:glob +.DS_Store +.git +.gitignore +*.[568ao] +*.ao +*.so +*.pyc +._* +.nfs.* +[568a].out +*~ +*.orig +*.rej +*.exe +.*.swp +core +*.cgo*.go +*.cgo*.c +_cgo_* +_obj +_test +_testmain.go +build.out +test.out +y.tab.[ch] + +syntax:regexp +^.*/core.[0-9]*$ diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/AUTHORS b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/AUTHORS new file mode 100644 index 00000000..b7f7dc29 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/AUTHORS @@ -0,0 +1,12 @@ +# This is the official list of LevelDB-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Christoph Hack +Google Inc. diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/CONTRIBUTORS b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/CONTRIBUTORS new file mode 100644 index 00000000..e3b4f67b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/CONTRIBUTORS @@ -0,0 +1,31 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the LevelDB-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Brad Fitzpatrick +Christoph Hack +Nigel Tao diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/LICENSE new file mode 100644 index 00000000..fec05ce1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The LevelDB-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/README b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/README new file mode 100644 index 00000000..79a059c0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/README @@ -0,0 +1,11 @@ +This is a LevelDB library for the Go programming language. + +To download and install from source: +$ go get code.google.com/p/leveldb-go/leveldb + +Unless otherwise noted, the LevelDB-Go source files are distributed +under the BSD-style license found in the LICENSE file. + +Contributions should follow the same procedure as for the Go project: +http://golang.org/doc/contribute.html + diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/crc/crc.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/crc/crc.go new file mode 100644 index 00000000..b21aeab4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/crc/crc.go @@ -0,0 +1,35 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package crc implements the checksum algorithm used throughout leveldb. +// +// The algorithm is CRC-32 with Castagnoli's polynomial, followed by a bit +// rotation and an additional delta. The additional processing is to lessen +// the probability of arbitrary key/value data coincidental contains bytes +// that look like a checksum. +// +// To calculate the uint32 checksum of some data: +// var u uint32 = crc.New(data).Value() +// In leveldb, the uint32 value is then stored in little-endian format. +package crc + +import ( + "hash/crc32" +) + +var table = crc32.MakeTable(crc32.Castagnoli) + +type CRC uint32 + +func New(b []byte) CRC { + return CRC(0).Update(b) +} + +func (c CRC) Update(b []byte) CRC { + return CRC(crc32.Update(uint32(c), table, b)) +} + +func (c CRC) Value() uint32 { + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/db/comparer.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/db/comparer.go new file mode 100644 index 00000000..358526c4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/db/comparer.go @@ -0,0 +1,86 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package db + +import ( + "bytes" +) + +// Comparer defines a total ordering over the space of []byte keys: a 'less +// than' relationship. +type Comparer interface { + // Compare returns -1, 0, or +1 depending on whether a is 'less than', + // 'equal to' or 'greater than' b. The two arguments can only be 'equal' + // if their contents are exactly equal. Furthermore, the empty slice + // must be 'less than' any non-empty slice. + Compare(a, b []byte) int + + // AppendSeparator appends a sequence of bytes x to dst such that + // a <= x && x < b, where 'less than' is consistent with Compare. + // It returns the enlarged slice, like the built-in append function. + // + // Precondition: either a is 'less than' b, or b is an empty slice. + // In the latter case, empty means 'positive infinity', and appending any + // x such that a <= x will be valid. + // + // An implementation may simply be "return append(dst, a...)" but appending + // fewer bytes will result in smaller tables. + // + // For example, if dst, a and b are the []byte equivalents of the strings + // "aqua", "black" and "blue", then the result may be "aquablb". + // Similarly, if the arguments were "aqua", "green" and "", then the result + // may be "aquah". + AppendSeparator(dst, a, b []byte) []byte +} + +// DefaultComparer is the default implementation of the Comparer interface. +// It uses the natural ordering, consistent with bytes.Compare. +var DefaultComparer Comparer = defCmp{} + +type defCmp struct{} + +func (defCmp) Compare(a, b []byte) int { + return bytes.Compare(a, b) +} + +func (defCmp) AppendSeparator(dst, a, b []byte) []byte { + i, n := SharedPrefixLen(a, b), len(dst) + dst = append(dst, a...) + if len(b) > 0 { + if i == len(a) { + return dst + } + if i == len(b) { + panic("a < b is a precondition, but b is a prefix of a") + } + if a[i] == 0xff || a[i]+1 >= b[i] { + // This isn't optimal, but it matches the C++ Level-DB implementation, and + // it's good enough. For example, if a is "1357" and b is "2", then the + // optimal (i.e. shortest) result is appending "14", but we append "1357". + return dst + } + } + i += n + for ; i < len(dst); i++ { + if dst[i] != 0xff { + dst[i]++ + return dst[:i+1] + } + } + return dst +} + +// SharedPrefixLen returns the largest i such that a[:i] equals b[:i]. +// This function can be useful in implementing the Comparer interface. +func SharedPrefixLen(a, b []byte) int { + i, n := 0, len(a) + if n > len(b) { + n = len(b) + } + for i < n && a[i] == b[i] { + i++ + } + return i +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/db/comparer_test.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/db/comparer_test.go new file mode 100644 index 00000000..65ed049c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/db/comparer_test.go @@ -0,0 +1,50 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package db + +import ( + "testing" +) + +func TestDefCmp(t *testing.T) { + testCases := []struct { + a, b, want string + }{ + // Examples from the doc comments. + {"black", "blue", "blb"}, + {"green", "", "h"}, + // Non-empty b values. The C++ Level-DB code calls these separators. + {"", "2", ""}, + {"1", "2", "1"}, + {"1", "29", "1"}, + {"13", "19", "14"}, + {"13", "99", "2"}, + {"135", "19", "14"}, + {"1357", "19", "14"}, + {"1357", "2", "1357"}, + {"13\xff", "14", "13\xff"}, + {"13\xff", "19", "14"}, + {"1\xff\xff", "19", "1\xff\xff"}, + {"1\xff\xff", "2", "1\xff\xff"}, + {"1\xff\xff", "9", "2"}, + // Empty b values. The C++ Level-DB code calls these successors. + {"", "", ""}, + {"1", "", "2"}, + {"11", "", "2"}, + {"11\xff", "", "2"}, + {"1\xff", "", "2"}, + {"1\xff\xff", "", "2"}, + {"\xff", "", "\xff"}, + {"\xff\xff", "", "\xff\xff"}, + {"\xff\xff\xff", "", "\xff\xff\xff"}, + } + for _, tc := range testCases { + const s = "pqrs" + got := string(DefaultComparer.AppendSeparator([]byte(s), []byte(tc.a), []byte(tc.b))) + if got != s+tc.want { + t.Errorf("a, b = %q, %q: got %q, want %q", tc.a, tc.b, got, s+tc.want) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/db/db.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/db/db.go new file mode 100644 index 00000000..e591d77d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/db/db.go @@ -0,0 +1,121 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package db defines the interfaces for a key/value store. +// +// A DB's basic operations (Get, Set, Delete) should be self-explanatory. Get +// and Delete will return ErrNotFound if the requested key is not in the store. +// Callers are free to ignore this error. +// +// A DB also allows for iterating over the key/value pairs in key order. If d +// is a DB, the code below prints all key/value pairs whose keys are 'greater +// than or equal to' k: +// +// iter := d.Find(k) +// for iter.Next() { +// fmt.Printf("key=%q value=%q\n", iter.Key(), iter.Value()) +// } +// return iter.Close() +// +// Other leveldb packages provide implementations of these interfaces. The +// Options struct in this package holds the optional parameters for these +// implementations, including a Comparer to define a 'less than' relationship +// over keys. It is always valid to pass a nil *Options, which means to use +// the default parameter values. Any zero field of a non-nil *Options also +// means to use the default value for that parameter. Thus, the code below +// uses a custom Comparer, but the default values for every other parameter: +// +// db := memdb.New(&db.Options{ +// Comparer: myComparer, +// }) +package db + +import ( + "errors" +) + +// ErrNotFound means that a get or delete call did not find the requested key. +var ErrNotFound = errors.New("leveldb/db: not found") + +// Iterator iterates over a DB's key/value pairs in key order. +// +// An iterator must be closed after use, but it is not necessary to read an +// iterator until exhaustion. +// +// An iterator is not necessarily goroutine-safe, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// +// It is also safe to use an iterator concurrently with modifying its +// underlying DB, if that DB permits modification. However, the resultant +// key/value pairs are not guaranteed to be a consistent snapshot of that DB +// at a particular point in time. +type Iterator interface { + // Next moves the iterator to the next key/value pair. + // It returns whether the iterator is exhausted. + Next() bool + + // Key returns the key of the current key/value pair, or nil if done. + // The caller should not modify the contents of the returned slice, and + // its contents may change on the next call to Next. + Key() []byte + + // Value returns the value of the current key/value pair, or nil if done. + // The caller should not modify the contents of the returned slice, and + // its contents may change on the next call to Next. + Value() []byte + + // Close closes the iterator and returns any accumulated error. Exhausting + // all the key/value pairs in a table is not considered to be an error. + // It is valid to call Close multiple times. Other methods should not be + // called after the iterator has been closed. + Close() error +} + +// DB is a key/value store. +// +// It is safe to call Get and Find from concurrent goroutines. It is not +// necessarily safe to do so for Set and Delete. +// +// Some implementations may impose additional restrictions. For example: +// - Set calls may need to be in increasing key order. +// - a DB may be read-only or write-only. +type DB interface { + // Get gets the value for the given key. It returns ErrNotFound if the DB + // does not contain the key. + // + // The caller should not modify the contents of the returned slice, but + // it is safe to modify the contents of the argument after Get returns. + Get(key []byte, o *ReadOptions) (value []byte, err error) + + // Set sets the value for the given key. It overwrites any previous value + // for that key; a DB is not a multi-map. + // + // It is safe to modify the contents of the arguments after Set returns. + Set(key, value []byte, o *WriteOptions) error + + // Delete deletes the value for the given key. It returns ErrNotFound if + // the DB does not contain the key. + // + // It is safe to modify the contents of the arguments after Delete returns. + Delete(key []byte, o *WriteOptions) error + + // Find returns an iterator positioned before the first key/value pair + // whose key is 'greater than or equal to' the given key. There may be no + // such pair, in which case the iterator will return false on Next. + // + // Any error encountered will be implicitly returned via the iterator. An + // error-iterator will yield no key/value pairs and closing that iterator + // will return that error. + // + // It is safe to modify the contents of the argument after Find returns. + Find(key []byte, o *ReadOptions) Iterator + + // Close closes the DB. It may or may not close any underlying io.Reader + // or io.Writer, depending on how the DB was created. + // + // It is not safe to close a DB until all outstanding iterators are closed. + // It is valid to call Close multiple times. Other methods should not be + // called after the DB has been closed. + Close() error +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/db/options.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/db/options.go new file mode 100644 index 00000000..5018b6ee --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/db/options.go @@ -0,0 +1,132 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package db + +// Compression is the per-block compression algorithm to use. +type Compression int + +const ( + DefaultCompression Compression = iota + NoCompression + SnappyCompression + nCompression +) + +// Options holds the optional parameters for leveldb's DB implementations. +// These options apply to the DB at large; per-query options are defined by +// the ReadOptions and WriteOptions types. +// +// Options are typically passed to a constructor function as a struct literal. +// The GetXxx methods are used inside the DB implementations; they return the +// default parameter value if the *Options receiver is nil or the field value +// is zero. +// +// Read/Write options: +// - Comparer +// Read options: +// - VerifyChecksums +// Write options: +// - BlockRestartInterval +// - BlockSize +// - Compression +type Options struct { + // BlockRestartInterval is the number of keys between restart points + // for delta encoding of keys. + // + // The default value is 16. + BlockRestartInterval int + + // BlockSize is the minimum uncompressed size in bytes of each table block. + // + // The default value is 4096. + BlockSize int + + // Comparer defines a total ordering over the space of []byte keys: a 'less + // than' relationship. The same comparison algorithm must be used for reads + // and writes over the lifetime of the DB. + // + // The default value uses the same ordering as bytes.Compare. + Comparer Comparer + + // Compression defines the per-block compression to use. + // + // The default value (DefaultCompression) uses snappy compression. + Compression Compression + + // VerifyChecksums is whether to verify the per-block checksums in a DB. + // + // The default value is false. + VerifyChecksums bool +} + +func (o *Options) GetBlockRestartInterval() int { + if o == nil || o.BlockRestartInterval <= 0 { + return 16 + } + return o.BlockRestartInterval +} + +func (o *Options) GetBlockSize() int { + if o == nil || o.BlockSize <= 0 { + return 4096 + } + return o.BlockSize +} + +func (o *Options) GetComparer() Comparer { + if o == nil || o.Comparer == nil { + return DefaultComparer + } + return o.Comparer +} + +func (o *Options) GetCompression() Compression { + if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression { + // Default to SnappyCompression. + return SnappyCompression + } + return o.Compression +} + +func (o *Options) GetVerifyChecksums() bool { + if o == nil { + return false + } + return o.VerifyChecksums +} + +// ReadOptions hold the optional per-query parameters for Get and Find +// operations. +// +// Like Options, a nil *ReadOptions is valid and means to use the default +// values. +type ReadOptions struct { + // No fields so far. +} + +// WriteOptions hold the optional per-query parameters for Set and Delete +// operations. +// +// Like Options, a nil *WriteOptions is valid and means to use the default +// values. +type WriteOptions struct { + // Sync is whether to sync underlying writes from the OS buffer cache + // through to actual disk, if applicable. Setting Sync can result in + // slower writes. + // + // If false, and the machine crashes, then some recent writes may be lost. + // Note that if it is just the process that crashes (and the machine does + // not) then no writes will be lost. + // + // In other words, Sync being false has the same semantics as a write + // system call. Sync being true means write followed by fsync. + // + // The default value is false. + Sync bool +} + +func (o *WriteOptions) GetSync() bool { + return o != nil && o.Sync +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/leveldb.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/leveldb.go new file mode 100644 index 00000000..16c04c2b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/leveldb.go @@ -0,0 +1,18 @@ +// Copyright 2012 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package leveldb provides an ordered key/value store. +// +// BUG: This package is incomplete. +package leveldb + +// This file is a placeholder for listing import dependencies. + +import ( + _ "camlistore.org/third_party/code.google.com/p/leveldb-go/leveldb/crc" + _ "camlistore.org/third_party/code.google.com/p/leveldb-go/leveldb/db" + _ "camlistore.org/third_party/code.google.com/p/leveldb-go/leveldb/memdb" + _ "camlistore.org/third_party/code.google.com/p/leveldb-go/leveldb/record" + _ "camlistore.org/third_party/code.google.com/p/leveldb-go/leveldb/table" +) diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/memdb/memdb.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/memdb/memdb.go new file mode 100644 index 00000000..3e41c047 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/memdb/memdb.go @@ -0,0 +1,318 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package memdb provides a memory-backed implementation of the db.DB +// interface. +// +// A MemDB's memory consumption increases monotonically, even if keys are +// deleted or values are updated with shorter slices. Callers of the package +// are responsible for explicitly compacting a MemDB into a separate DB +// (whether in-memory or on-disk) when appropriate. +package memdb + +import ( + "encoding/binary" + "math/rand" + "sync" + + "camlistore.org/third_party/code.google.com/p/leveldb-go/leveldb/db" +) + +// maxHeight is the maximum height of a MemDB's skiplist. +const maxHeight = 12 + +// A MemDB's skiplist consists of a number of nodes, and each node is +// represented by a variable number of ints: a key-offset, a value-offset, and +// between 1 and maxHeight next nodes. The key-offset and value-offset encode +// the node's key/value pair and are offsets into a MemDB's kvData slice. +// The remaining ints, for the next nodes in the skiplist's linked lists, are +// offsets into a MemDB's nodeData slice. +// +// The fXxx constants represent how to find the Xxx field of a node in the +// nodeData. For example, given an int 30 representing a node, and given +// nodeData[30:36] that looked like [60, 71, 82, 83, 84, 85], then +// nodeData[30 + fKey] = 60 would be the node's key-offset, +// nodeData[30 + fVal] = 71 would be the node's value-offset, and +// nodeData[30 + fNxt + 0] = 82 would be the next node at the height-0 list, +// nodeData[30 + fNxt + 1] = 83 would be the next node at the height-1 list, +// and so on. A node's height is implied by the skiplist construction: a node +// of height x appears in the height-h list iff 0 <= h && h < x. +const ( + fKey = iota + fVal + fNxt +) + +const ( + // zeroNode represents the end of a linked list. + zeroNode = 0 + // headNode represents the start of the linked list. It is equal to -fNxt + // so that the next nodes at height-h are at nodeData[h]. + // The head node is an artificial node and has no key or value. + headNode = -fNxt +) + +// A node's key-offset and value-offset fields are offsets into a MemDB's +// kvData slice that stores varint-prefixed strings: the node's key and value. +// A negative offset means a zero-length string, whether explicitly set to +// empty or implicitly set by deletion. +const ( + kvOffsetEmptySlice = -1 + kvOffsetDeletedNode = -2 +) + +// MemDB is a memory-backed implementation of the db.DB interface. +// +// It is safe to call Get, Set, Delete and Find concurrently. +type MemDB struct { + mutex sync.RWMutex + // height is the number of such lists, which can increase over time. + height int + // cmp defines an ordering on keys. + cmp db.Comparer + // kvData is an append-only buffer that holds varint-prefixed strings. + kvData []byte + // nodeData is an append-only buffer that holds a node's fields. + nodeData []int +} + +// MemDB implements the db.DB interface. +var _ db.DB = &MemDB{} + +// load loads a []byte from m.kvData. +func (m *MemDB) load(kvOffset int) (b []byte) { + if kvOffset < 0 { + return nil + } + bLen, n := binary.Uvarint(m.kvData[kvOffset:]) + return m.kvData[kvOffset+n : kvOffset+n+int(bLen)] +} + +// save saves a []byte to m.kvData. +func (m *MemDB) save(b []byte) (kvOffset int) { + if len(b) == 0 { + return kvOffsetEmptySlice + } + kvOffset = len(m.kvData) + var buf [binary.MaxVarintLen64]byte + length := binary.PutUvarint(buf[:], uint64(len(b))) + m.kvData = append(m.kvData, buf[:length]...) + m.kvData = append(m.kvData, b...) + return kvOffset +} + +// findNode returns the first node n whose key is >= the given key (or nil if +// there is no such node) and whether n's key equals key. The search is based +// solely on the contents of a node's key. Whether or not that key was +// previously deleted from the MemDB is not relevant. +// +// If prev is non-nil, it also sets the first m.height elements of prev to the +// preceding node at each height. +func (m *MemDB) findNode(key []byte, prev *[maxHeight]int) (n int, exactMatch bool) { + for h, p := m.height-1, headNode; h >= 0; h-- { + // Walk the skiplist at height h until we find either a zero node + // or one whose key is >= the given key. + n = m.nodeData[p+fNxt+h] + for { + if n == zeroNode { + exactMatch = false + break + } + kOff := m.nodeData[n+fKey] + if c := m.cmp.Compare(m.load(kOff), key); c >= 0 { + exactMatch = c == 0 + break + } + p, n = n, m.nodeData[n+fNxt+h] + } + if prev != nil { + (*prev)[h] = p + } + } + return n, exactMatch +} + +// Get implements DB.Get, as documented in the leveldb/db package. +func (m *MemDB) Get(key []byte, o *db.ReadOptions) (value []byte, err error) { + m.mutex.RLock() + defer m.mutex.RUnlock() + n, exactMatch := m.findNode(key, nil) + vOff := m.nodeData[n+fVal] + if !exactMatch || vOff == kvOffsetDeletedNode { + return nil, db.ErrNotFound + } + return m.load(vOff), nil +} + +// Set implements DB.Set, as documented in the leveldb/db package. +func (m *MemDB) Set(key, value []byte, o *db.WriteOptions) error { + m.mutex.Lock() + defer m.mutex.Unlock() + // Find the node, and its predecessors at all heights. + var prev [maxHeight]int + n, exactMatch := m.findNode(key, &prev) + if exactMatch { + m.nodeData[n+fVal] = m.save(value) + return nil + } + // Choose the new node's height, branching with 25% probability. + h := 1 + for h < maxHeight && rand.Intn(4) == 0 { + h++ + } + // Raise the skiplist's height to the node's height, if necessary. + if m.height < h { + for i := m.height; i < h; i++ { + prev[i] = headNode + } + m.height = h + } + // Insert the new node. + var x [fNxt + maxHeight]int + n1 := len(m.nodeData) + x[fKey] = m.save(key) + x[fVal] = m.save(value) + for i := 0; i < h; i++ { + j := prev[i] + fNxt + i + x[fNxt+i] = m.nodeData[j] + m.nodeData[j] = n1 + } + m.nodeData = append(m.nodeData, x[:fNxt+h]...) + return nil +} + +// Delete implements DB.Delete, as documented in the leveldb/db package. +func (m *MemDB) Delete(key []byte, o *db.WriteOptions) error { + m.mutex.Lock() + defer m.mutex.Unlock() + n, exactMatch := m.findNode(key, nil) + if !exactMatch || m.nodeData[n+fVal] == kvOffsetDeletedNode { + return db.ErrNotFound + } + m.nodeData[n+fVal] = kvOffsetDeletedNode + return nil +} + +// Find implements DB.Find, as documented in the leveldb/db package. +func (m *MemDB) Find(key []byte, o *db.ReadOptions) db.Iterator { + m.mutex.RLock() + defer m.mutex.RUnlock() + n, _ := m.findNode(key, nil) + for n != zeroNode && m.nodeData[n+fVal] == kvOffsetDeletedNode { + n = m.nodeData[n+fNxt] + } + t := &iterator{ + m: m, + restartNode: n, + } + t.fill() + // The iterator is positioned at the first node >= key. The iterator API + // requires that the caller the Next first, so we set t.i0 to -1. + t.i0 = -1 + return t +} + +// Close implements DB.Close, as documented in the leveldb/db package. +func (m *MemDB) Close() error { + return nil +} + +// ApproximateMemoryUsage returns the approximate memory usage of the MemDB. +func (m *MemDB) ApproximateMemoryUsage() int { + m.mutex.RLock() + defer m.mutex.RUnlock() + return len(m.kvData) +} + +// New returns a new MemDB. +func New(o *db.Options) *MemDB { + return &MemDB{ + height: 1, + cmp: o.GetComparer(), + kvData: make([]byte, 0, 4096), + // The first maxHeight values of nodeData are the next nodes after the + // head node at each possible height. Their initial value is zeroNode. + nodeData: make([]int, maxHeight, 256), + } +} + +// iterator is a MemDB iterator that buffers upcoming results, so that it does +// not have to acquire the MemDB's mutex on each Next call. +type iterator struct { + m *MemDB + // restartNode is the node to start refilling the buffer from. + restartNode int + // i0 is the current iterator position with respect to buf. A value of -1 + // means that the iterator is at the start, end or both of the iteration. + // i1 is the number of buffered entries. + // Invariant: -1 <= i0 && i0 < i1 && i1 <= len(buf). + i0, i1 int + // buf buffers up to 32 key/value pairs. + buf [32][2][]byte +} + +// iterator implements the db.Iterator interface. +var _ db.Iterator = &iterator{} + +// fill fills the iterator's buffer with key/value pairs from the MemDB. +// +// Precondition: t.m.mutex is locked for reading. +func (t *iterator) fill() { + i, n := 0, t.restartNode + for i < len(t.buf) && n != zeroNode { + if t.m.nodeData[n+fVal] != kvOffsetDeletedNode { + t.buf[i][fKey] = t.m.load(t.m.nodeData[n+fKey]) + t.buf[i][fVal] = t.m.load(t.m.nodeData[n+fVal]) + i++ + } + n = t.m.nodeData[n+fNxt] + } + if i == 0 { + // There were no non-deleted nodes on or after t.restartNode. + // The iterator is exhausted. + t.i0 = -1 + } else { + t.i0 = 0 + } + t.i1 = i + t.restartNode = n +} + +// Next implements Iterator.Next, as documented in the leveldb/db package. +func (t *iterator) Next() bool { + t.i0++ + if t.i0 < t.i1 { + return true + } + if t.restartNode == zeroNode { + t.i0 = -1 + t.i1 = 0 + return false + } + t.m.mutex.RLock() + defer t.m.mutex.RUnlock() + t.fill() + return true +} + +// Key implements Iterator.Key, as documented in the leveldb/db package. +func (t *iterator) Key() []byte { + if t.i0 < 0 { + return nil + } + return t.buf[t.i0][fKey] +} + +// Value implements Iterator.Value, as documented in the leveldb/db package. +func (t *iterator) Value() []byte { + if t.i0 < 0 { + return nil + } + return t.buf[t.i0][fVal] +} + +// Close implements Iterator.Close, as documented in the leveldb/db package. +func (t *iterator) Close() error { + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/memdb/memdb_test.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/memdb/memdb_test.go new file mode 100644 index 00000000..ba3416e3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/memdb/memdb_test.go @@ -0,0 +1,222 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package memdb + +import ( + "fmt" + "math/rand" + "strconv" + "strings" + "testing" + + "camlistore.org/third_party/code.google.com/p/leveldb-go/leveldb/db" +) + +// count returns the number of entries in a DB. +func count(d db.DB) (n int) { + x := d.Find(nil, nil) + for x.Next() { + n++ + } + if x.Close() != nil { + return -1 + } + return n +} + +// compact compacts a MemDB. +func compact(m *MemDB) (*MemDB, error) { + n, x := New(nil), m.Find(nil, nil) + for x.Next() { + if err := n.Set(x.Key(), x.Value(), nil); err != nil { + return nil, err + } + } + if err := x.Close(); err != nil { + return nil, err + } + return n, nil +} + +func TestBasic(t *testing.T) { + // Check the empty DB. + m := New(nil) + if got, want := count(m), 0; got != want { + t.Fatalf("0.count: got %v, want %v", got, want) + } + v, err := m.Get([]byte("cherry"), nil) + if string(v) != "" || err != db.ErrNotFound { + t.Fatalf("1.get: got (%q, %v), want (%q, %v)", v, err, "", db.ErrNotFound) + } + // Add some key/value pairs. + m.Set([]byte("cherry"), []byte("red"), nil) + m.Set([]byte("peach"), []byte("yellow"), nil) + m.Set([]byte("grape"), []byte("red"), nil) + m.Set([]byte("grape"), []byte("green"), nil) + m.Set([]byte("plum"), []byte("purple"), nil) + if got, want := count(m), 4; got != want { + t.Fatalf("2.count: got %v, want %v", got, want) + } + // Delete a key twice. + if got, want := m.Delete([]byte("grape"), nil), error(nil); got != want { + t.Fatalf("3.delete: got %v, want %v", got, want) + } + if got, want := m.Delete([]byte("grape"), nil), db.ErrNotFound; got != want { + t.Fatalf("4.delete: got %v, want %v", got, want) + } + if got, want := count(m), 3; got != want { + t.Fatalf("5.count: got %v, want %v", got, want) + } + // Get keys that are and aren't in the DB. + v, err = m.Get([]byte("plum"), nil) + if string(v) != "purple" || err != nil { + t.Fatalf("6.get: got (%q, %v), want (%q, %v)", v, err, "purple", error(nil)) + } + v, err = m.Get([]byte("lychee"), nil) + if string(v) != "" || err != db.ErrNotFound { + t.Fatalf("7.get: got (%q, %v), want (%q, %v)", v, err, "", db.ErrNotFound) + } + // Check an iterator. + s, x := "", m.Find([]byte("mango"), nil) + for x.Next() { + s += fmt.Sprintf("%s/%s.", x.Key(), x.Value()) + } + if want := "peach/yellow.plum/purple."; s != want { + t.Fatalf("8.iter: got %q, want %q", s, want) + } + if err = x.Close(); err != nil { + t.Fatalf("9.close: %v", err) + } + // Check some more sets and deletes. + if got, want := m.Delete([]byte("cherry"), nil), error(nil); got != want { + t.Fatalf("10.delete: got %v, want %v", got, want) + } + if got, want := count(m), 2; got != want { + t.Fatalf("11.count: got %v, want %v", got, want) + } + if err := m.Set([]byte("apricot"), []byte("orange"), nil); err != nil { + t.Fatalf("12.set: %v", err) + } + if got, want := count(m), 3; got != want { + t.Fatalf("13.count: got %v, want %v", got, want) + } + // Clean up. + if err := m.Close(); err != nil { + t.Fatalf("14.close: %v", err) + } +} + +func TestCount(t *testing.T) { + m := New(nil) + for i := 0; i < 200; i++ { + if j := count(m); j != i { + t.Fatalf("count: got %d, want %d", j, i) + } + m.Set([]byte{byte(i)}, nil, nil) + } + if err := m.Close(); err != nil { + t.Fatal(err) + } +} + +func Test1000Entries(t *testing.T) { + // Initialize the DB. + const N = 1000 + m0 := New(nil) + for i := 0; i < N; i++ { + k := []byte(strconv.Itoa(i)) + v := []byte(strings.Repeat("x", i)) + m0.Set(k, v, nil) + } + // Delete one third of the entries, update another third, + // and leave the last third alone. + for i := 0; i < N; i++ { + switch i % 3 { + case 0: + k := []byte(strconv.Itoa(i)) + m0.Delete(k, nil) + case 1: + k := []byte(strconv.Itoa(i)) + v := []byte(strings.Repeat("y", i)) + m0.Set(k, v, nil) + case 2: + // No-op. + } + } + // Check the DB count. + if got, want := count(m0), 666; got != want { + t.Fatalf("count: got %v, want %v", got, want) + } + // Check random-access lookup. + r := rand.New(rand.NewSource(0)) + for i := 0; i < 3*N; i++ { + j := r.Intn(N) + k := []byte(strconv.Itoa(j)) + v, err := m0.Get(k, nil) + var c uint8 + if len(v) != 0 { + c = v[0] + } + switch j % 3 { + case 0: + if err != db.ErrNotFound { + t.Fatalf("get: j=%d, got err=%v, want %v", j, err, db.ErrNotFound) + } + case 1: + if len(v) != j || c != 'y' { + t.Fatalf("get: j=%d, got len(v),c=%d,%c, want %d,%c", j, len(v), c, j, 'y') + } + case 2: + if len(v) != j || c != 'x' { + t.Fatalf("get: j=%d, got len(v),c=%d,%c, want %d,%c", j, len(v), c, j, 'x') + } + } + } + // Check that iterating through the middle of the DB looks OK. + // Keys are in lexicographic order, not numerical order. + // Multiples of 3 are not present. + wants := []string{ + "499", + "5", + "50", + "500", + "502", + "503", + "505", + "506", + "508", + "509", + "511", + } + x := m0.Find([]byte(wants[0]), nil) + for _, want := range wants { + if !x.Next() { + t.Fatalf("iter: next failed, want=%q", want) + } + if got := string(x.Key()); got != want { + t.Fatalf("iter: got %q, want %q", got, want) + } + } + if err := x.Close(); err != nil { + t.Fatalf("close: %v", err) + } + // Check that compaction reduces memory usage by at least one third. + amu0 := m0.ApproximateMemoryUsage() + if amu0 == 0 { + t.Fatalf("compact: memory usage is zero") + } + m1, err := compact(m0) + if err != nil { + t.Fatalf("compact: %v", err) + } + amu1 := m1.ApproximateMemoryUsage() + if ratio := float64(amu1) / float64(amu0); ratio > 0.667 { + t.Fatalf("compact: memory usage before=%d, after=%d, ratio=%f", amu0, amu1, ratio) + } + // Clean up. + if err := m0.Close(); err != nil { + t.Fatalf("close: %v", err) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/record/record.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/record/record.go new file mode 100644 index 00000000..cc5fed95 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/record/record.go @@ -0,0 +1,377 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package record reads and writes sequences of records. Each record is a stream +// of bytes that completes before the next record starts. +// +// When reading, call Next to obtain an io.Reader for the next record. Next will +// return io.EOF when there are no more records. It is valid to call Next +// without reading the current record to exhaustion. +// +// When writing, call Next to obtain an io.Writer for the next record. Calling +// Next finishes the current record. Call Close to finish the final record. +// +// Optionally, call Flush to finish the current record and flush the underlying +// writer without starting a new record. To start a new record after flushing, +// call Next. +// +// Neither Readers or Writers are safe to use concurrently. +// +// Example code: +// func read(r io.Reader) ([]string, error) { +// var ss []string +// sr := record.NewReader(r) +// for { +// err := sr.Next() +// if err == io.EOF { +// break +// } +// if err != nil { +// return nil, err +// } +// s, err := ioutil.ReadAll(sr) +// if err != nil { +// return nil, err +// } +// ss = append(ss, string(s)) +// } +// return ss, nil +// } +// +// func write(w io.Writer, ss []string) error { +// sw := record.NewWriter(w) +// for _, s := range ss { +// sw.Next() +// if _, err := sw.Write([]byte(s)), err != nil { +// return err +// } +// } +// return sw.Close() +// } +// +// The wire format is that the stream is divided into 32KiB blocks, and each +// block contains a number of tightly packed chunks. Chunks cannot cross block +// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a +// block must be zero. +// +// A record maps to one or more chunks. Each chunk has a 7 byte header (a 4 +// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type) +// followed by a payload. The checksum is over the chunk type and the payload. +// +// There are four chunk types: whether the chunk is the full record, or the +// first, middle or last chunk of a multi-chunk record. A multi-chunk record +// has one first chunk, zero or more middle chunks, and one last chunk. +// +// The wire format allows for limited recovery in the face of data corruption: +// on a format error (such as a checksum mismatch), the reader moves to the +// next block and looks for the next full or first chunk. +package record + +// TODO: implement the recovery algorithm. + +// The C++ Level-DB code calls this the log, but it has been renamed to record +// to avoid clashing with the standard log package, and because it is generally +// useful outside of logging. The C++ code also uses the term "physical record" +// instead of "chunk", but "chunk" is shorter and less confusing. + +import ( + "encoding/binary" + "errors" + "io" + + "camlistore.org/third_party/code.google.com/p/leveldb-go/leveldb/crc" +) + +// These constants are part of the wire format and should not be changed. +const ( + fullChunkType = 1 + firstChunkType = 2 + middleChunkType = 3 + lastChunkType = 4 +) + +const ( + blockSize = 32 * 1024 + headerSize = 7 +) + +type flusher interface { + Flush() error +} + +// Reader reads records from an underlying io.Reader. +type Reader struct { + // r is the underlying reader. + r io.Reader + // seq is the sequence number of the current record. + seq int + // buf[i:j] is the unread portion of the current chunk's payload. + // The low bound, i, excludes the chunk header. + i, j int + // n is the number of bytes of buf that are valid. Once reading has started, + // only the final block can have n < blockSize. + n int + // started is whether Next has been called at all. + started bool + // last is whether the current chunk is the last chunk of the record. + last bool + // err is any accumulated error. + err error + // buf is the buffer. + buf [blockSize]byte +} + +// NewReader returns a new reader. +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + } +} + +// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the +// next block into the buffer if necessary. +func (r *Reader) nextChunk(wantFirst bool) error { + for { + if r.j+headerSize <= r.n { + checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4]) + length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6]) + chunkType := int(r.buf[r.j+6]) + + r.i = r.j + headerSize + r.j = r.j + headerSize + int(length) + if r.j > r.n { + return errors.New("leveldb/record: invalid chunk (length overflows block)") + } + if checksum != crc.New(r.buf[r.i-1:r.j]).Value() { + return errors.New("leveldb/record: invalid chunk (checksum mismatch)") + } + if wantFirst { + if chunkType != fullChunkType && chunkType != firstChunkType { + continue + } + } + r.last = chunkType == fullChunkType || chunkType == lastChunkType + return nil + } + if r.n < blockSize && r.started { + if r.j != r.n { + return io.ErrUnexpectedEOF + } + return io.EOF + } + n, err := io.ReadFull(r.r, r.buf[:]) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + r.i, r.j, r.n = 0, 0, n + } + panic("unreachable") +} + +// Next returns a reader for the next record. It returns io.EOF if there are no +// more records. The reader returned becomes stale after the next Next call, +// and should no longer be used. +func (r *Reader) Next() (io.Reader, error) { + r.seq++ + if r.err != nil { + return nil, r.err + } + r.i = r.j + r.err = r.nextChunk(true) + if r.err != nil { + return nil, r.err + } + r.started = true + return singleReader{r, r.seq}, nil +} + +type singleReader struct { + r *Reader + seq int +} + +func (x singleReader) Read(p []byte) (int, error) { + r := x.r + if r.seq != x.seq { + return 0, errors.New("leveldb/record: stale reader") + } + if r.err != nil { + return 0, r.err + } + for r.i == r.j { + if r.last { + return 0, io.EOF + } + if r.err = r.nextChunk(false); r.err != nil { + return 0, r.err + } + } + n := copy(p, r.buf[r.i:r.j]) + r.i += n + return n, nil +} + +// Writer writes records to an underlying io.Writer. +type Writer struct { + // w is the underlying writer. + w io.Writer + // seq is the sequence number of the current record. + seq int + // f is w as a flusher. + f flusher + // buf[i:j] is the bytes that will become the current chunk. + // The low bound, i, includes the chunk header. + i, j int + // buf[:written] has already been written to w. + // written is zero unless Flush has been called. + written int + // first is whether the current chunk is the first chunk of the record. + first bool + // pending is whether a chunk is buffered but not yet written. + pending bool + // err is any accumulated error. + err error + // buf is the buffer. + buf [blockSize]byte +} + +// NewWriter returns a new Writer. +func NewWriter(w io.Writer) *Writer { + f, _ := w.(flusher) + return &Writer{ + w: w, + f: f, + } +} + +// fillHeader fills in the header for the pending chunk. +func (w *Writer) fillHeader(last bool) { + if w.i+headerSize > w.j || w.j > blockSize { + panic("leveldb/record: bad writer state") + } + if last { + if w.first { + w.buf[w.i+6] = fullChunkType + } else { + w.buf[w.i+6] = lastChunkType + } + } else { + if w.first { + w.buf[w.i+6] = firstChunkType + } else { + w.buf[w.i+6] = middleChunkType + } + } + binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], crc.New(w.buf[w.i+6:w.j]).Value()) + binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize)) +} + +// writeBlock writes the buffered block to the underlying writer, and reserves +// space for the next chunk's header. +func (w *Writer) writeBlock() { + _, w.err = w.w.Write(w.buf[w.written:]) + w.i = 0 + w.j = headerSize + w.written = 0 +} + +// writePending finishes the current record and writes the buffer to the +// underlying writer. +func (w *Writer) writePending() { + if w.err != nil { + return + } + if w.pending { + w.fillHeader(true) + w.pending = false + } + _, w.err = w.w.Write(w.buf[w.written:w.j]) + w.written = w.j +} + +// Close finishes the current record and closes the writer. +func (w *Writer) Close() error { + w.seq++ + w.writePending() + if w.err != nil { + return w.err + } + w.err = errors.New("leveldb/record: closed Writer") + return nil +} + +// Flush finishes the current record, writes to the underlying writer, and +// flushes it if that writer implements interface{ Flush() error }. +func (w *Writer) Flush() error { + w.seq++ + w.writePending() + if w.err != nil { + return w.err + } + if w.f != nil { + w.err = w.f.Flush() + return w.err + } + return nil +} + +// Next returns a writer for the next record. The writer returned becomes stale +// after the next Close, Flush or Next call, and should no longer be used. +func (w *Writer) Next() (io.Writer, error) { + w.seq++ + if w.err != nil { + return nil, w.err + } + if w.pending { + w.fillHeader(true) + } + w.i = w.j + w.j = w.j + headerSize + // Check if there is room in the block for the header. + if w.j > blockSize { + // Fill in the rest of the block with zeroes. + for k := w.i; k < blockSize; k++ { + w.buf[k] = 0 + } + w.writeBlock() + if w.err != nil { + return nil, w.err + } + } + w.first = true + w.pending = true + return singleWriter{w, w.seq}, nil +} + +type singleWriter struct { + w *Writer + seq int +} + +func (x singleWriter) Write(p []byte) (int, error) { + w := x.w + if w.seq != x.seq { + return 0, errors.New("leveldb/record: stale writer") + } + if w.err != nil { + return 0, w.err + } + n0 := len(p) + for len(p) > 0 { + // Write a block, if it is full. + if w.j == blockSize { + w.fillHeader(false) + w.writeBlock() + if w.err != nil { + return 0, w.err + } + w.first = false + } + // Copy bytes into the buffer. + n := copy(w.buf[w.j:], p) + w.j += n + p = p[n:] + } + return n0, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/record/record_test.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/record/record_test.go new file mode 100644 index 00000000..af034e75 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/record/record_test.go @@ -0,0 +1,314 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package record + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math/rand" + "strings" + "testing" +) + +func short(s string) string { + if len(s) < 64 { + return s + } + return fmt.Sprintf("%s...(skipping %d bytes)...%s", s[:20], len(s)-40, s[len(s)-20:]) +} + +// big returns a string of length n, composed of repetitions of partial. +func big(partial string, n int) string { + return strings.Repeat(partial, n/len(partial)+1)[:n] +} + +func TestEmpty(t *testing.T) { + buf := new(bytes.Buffer) + r := NewReader(buf) + if _, err := r.Next(); err != io.EOF { + t.Fatalf("got %v, want %v", err, io.EOF) + } +} + +func testGenerator(t *testing.T, reset func(), gen func() (string, bool)) { + buf := new(bytes.Buffer) + + reset() + w := NewWriter(buf) + for { + s, ok := gen() + if !ok { + break + } + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write([]byte(s)); err != nil { + t.Fatal(err) + } + } + if err := w.Close(); err != nil { + t.Fatal(err) + } + + reset() + r := NewReader(buf) + for { + s, ok := gen() + if !ok { + break + } + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + x, err := ioutil.ReadAll(rr) + if err != nil { + t.Fatal(err) + } + if string(x) != s { + t.Fatalf("got %q, want %q", short(string(x)), short(s)) + } + } + if _, err := r.Next(); err != io.EOF { + t.Fatalf("got %v, want %v", err, io.EOF) + } +} + +func testLiterals(t *testing.T, s []string) { + var i int + reset := func() { + i = 0 + } + gen := func() (string, bool) { + if i == len(s) { + return "", false + } + i++ + return s[i-1], true + } + testGenerator(t, reset, gen) +} + +func TestMany(t *testing.T) { + const n = 1e5 + var i int + reset := func() { + i = 0 + } + gen := func() (string, bool) { + if i == n { + return "", false + } + i++ + return fmt.Sprintf("%d.", i-1), true + } + testGenerator(t, reset, gen) +} + +func TestRandom(t *testing.T) { + const n = 1e2 + var ( + i int + r *rand.Rand + ) + reset := func() { + i, r = 0, rand.New(rand.NewSource(0)) + } + gen := func() (string, bool) { + if i == n { + return "", false + } + i++ + return strings.Repeat(string(uint8(i)), r.Intn(2*blockSize+16)), true + } + testGenerator(t, reset, gen) +} + +func TestBasic(t *testing.T) { + testLiterals(t, []string{ + strings.Repeat("a", 1000), + strings.Repeat("b", 97270), + strings.Repeat("c", 8000), + }) +} + +func TestBoundary(t *testing.T) { + for i := blockSize - 16; i < blockSize+16; i++ { + s0 := big("abcd", i) + for j := blockSize - 16; j < blockSize+16; j++ { + s1 := big("ABCDE", j) + testLiterals(t, []string{s0, s1}) + testLiterals(t, []string{s0, "", s1}) + testLiterals(t, []string{s0, "x", s1}) + } + } +} + +func TestFlush(t *testing.T) { + buf := new(bytes.Buffer) + w := NewWriter(buf) + // Write a couple of records. Everything should still be held + // in the record.Writer buffer, so that buf.Len should be 0. + w0, _ := w.Next() + w0.Write([]byte("0")) + w1, _ := w.Next() + w1.Write([]byte("11")) + if got, want := buf.Len(), 0; got != want { + t.Fatalf("buffer length #0: got %d want %d", got, want) + } + // Flush the record.Writer buffer, which should yield 17 bytes. + // 17 = 2*7 + 1 + 2, which is two headers and 1 + 2 payload bytes. + if err := w.Flush(); err != nil { + t.Fatal(err) + } + if got, want := buf.Len(), 17; got != want { + t.Fatalf("buffer length #1: got %d want %d", got, want) + } + // Do another write, one that isn't large enough to complete the block. + // The write should not have flowed through to buf. + w2, _ := w.Next() + w2.Write(bytes.Repeat([]byte("2"), 10000)) + if got, want := buf.Len(), 17; got != want { + t.Fatalf("buffer length #2: got %d want %d", got, want) + } + // Flushing should get us up to 10024 bytes written. + // 10024 = 17 + 7 + 10000. + if err := w.Flush(); err != nil { + t.Fatal(err) + } + if got, want := buf.Len(), 10024; got != want { + t.Fatalf("buffer length #3: got %d want %d", got, want) + } + // Do a bigger write, one that completes the current block. + // We should now have 32768 bytes (a complete block), without + // an explicit flush. + w3, _ := w.Next() + w3.Write(bytes.Repeat([]byte("3"), 40000)) + if got, want := buf.Len(), 32768; got != want { + t.Fatalf("buffer length #4: got %d want %d", got, want) + } + // Flushing should get us up to 50038 bytes written. + // 50038 = 10024 + 2*7 + 40000. There are two headers because + // the one record was split into two chunks. + if err := w.Flush(); err != nil { + t.Fatal(err) + } + if got, want := buf.Len(), 50038; got != want { + t.Fatalf("buffer length #5: got %d want %d", got, want) + } + // Check that reading those records give the right lengths. + r := NewReader(buf) + wants := []int64{1, 2, 10000, 40000} + for i, want := range wants { + rr, _ := r.Next() + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #%d: %v", i, err) + } + if n != want { + t.Fatalf("read #%d: got %d bytes want %d", i, n, want) + } + } +} + +func TestNonExhaustiveRead(t *testing.T) { + const n = 100 + buf := new(bytes.Buffer) + p := make([]byte, 10) + rnd := rand.New(rand.NewSource(1)) + + w := NewWriter(buf) + for i := 0; i < n; i++ { + length := len(p) + rnd.Intn(3*blockSize) + s := string(uint8(i)) + "123456789abcdefgh" + ww, _ := w.Next() + ww.Write([]byte(big(s, length))) + } + if err := w.Close(); err != nil { + t.Fatal(err) + } + + r := NewReader(buf) + for i := 0; i < n; i++ { + rr, _ := r.Next() + _, err := io.ReadFull(rr, p) + if err != nil { + t.Fatal(err) + } + want := string(uint8(i)) + "123456789" + if got := string(p); got != want { + t.Fatalf("read #%d: got %q want %q", i, got, want) + } + } +} + +func TestStaleReader(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w0, err := w.Next() + if err != nil { + t.Fatal(err) + } + w0.Write([]byte("0")) + w1, err := w.Next() + if err != nil { + t.Fatal(err) + } + w1.Write([]byte("11")) + if err := w.Close(); err != nil { + t.Fatal(err) + } + + r := NewReader(buf) + r0, err := r.Next() + if err != nil { + t.Fatal(err) + } + r1, err := r.Next() + if err != nil { + t.Fatal(err) + } + p := make([]byte, 1) + if _, err := r0.Read(p); err == nil || !strings.Contains(err.Error(), "stale") { + t.Fatalf("stale read #0: unexpected error: %v", err) + } + if _, err := r1.Read(p); err != nil { + t.Fatalf("fresh read #1: got %v want nil error", err) + } + if p[0] != '1' { + t.Fatalf("fresh read #1: byte contents: got '%c' want '1'", p[0]) + } +} + +func TestStaleWriter(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w0, err := w.Next() + if err != nil { + t.Fatal(err) + } + w1, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := w0.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { + t.Fatalf("stale write #0: unexpected error: %v", err) + } + if _, err := w1.Write([]byte("11")); err != nil { + t.Fatalf("fresh write #1: got %v want nil error", err) + } + if err := w.Flush(); err != nil { + t.Fatalf("flush: %v", err) + } + if _, err := w1.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { + t.Fatalf("stale write #1: unexpected error: %v", err) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/table/reader.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/table/reader.go new file mode 100644 index 00000000..163a38d6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/table/reader.go @@ -0,0 +1,403 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package table + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "sort" + + "camlistore.org/third_party/code.google.com/p/leveldb-go/leveldb/crc" + "camlistore.org/third_party/code.google.com/p/leveldb-go/leveldb/db" + "camlistore.org/third_party/code.google.com/p/snappy-go/snappy" +) + +// blockHandle is the file offset and length of a block. +type blockHandle struct { + offset, length uint64 +} + +// decodeBlockHandle returns the block handle encoded at the start of src, as +// well as the number of bytes it occupies. It returns zero if given invalid +// input. +func decodeBlockHandle(src []byte) (blockHandle, int) { + offset, n := binary.Uvarint(src) + length, m := binary.Uvarint(src[n:]) + if n == 0 || m == 0 { + return blockHandle{}, 0 + } + return blockHandle{offset, length}, n + m +} + +func encodeBlockHandle(dst []byte, b blockHandle) int { + n := binary.PutUvarint(dst, b.offset) + m := binary.PutUvarint(dst[n:], b.length) + return n + m +} + +// block is a []byte that holds a sequence of key/value pairs plus an index +// over those pairs. +type block []byte + +// seek returns a blockIter positioned at the first key/value pair whose key is +// >= the given key. If there is no such key, the blockIter returned is done. +func (b block) seek(c db.Comparer, key []byte) (*blockIter, error) { + numRestarts := int(binary.LittleEndian.Uint32(b[len(b)-4:])) + if numRestarts == 0 { + return nil, errors.New("leveldb/table: invalid table (block has no restart points)") + } + n := len(b) - 4*(1+numRestarts) + var offset int + if len(key) > 0 { + // Find the index of the smallest restart point whose key is > the key + // sought; index will be numRestarts if there is no such restart point. + index := sort.Search(numRestarts, func(i int) bool { + o := int(binary.LittleEndian.Uint32(b[n+4*i:])) + // For a restart point, there are 0 bytes shared with the previous key. + // The varint encoding of 0 occupies 1 byte. + o++ + // Decode the key at that restart point, and compare it to the key sought. + v1, n1 := binary.Uvarint(b[o:]) + _, n2 := binary.Uvarint(b[o+n1:]) + m := o + n1 + n2 + s := b[m : m+int(v1)] + return c.Compare(s, key) > 0 + }) + // Since keys are strictly increasing, if index > 0 then the restart + // point at index-1 will be the largest whose key is <= the key sought. + // If index == 0, then all keys in this block are larger than the key + // sought, and offset remains at zero. + if index > 0 { + offset = int(binary.LittleEndian.Uint32(b[n+4*(index-1):])) + } + } + // Initialize the blockIter to the restart point. + i := &blockIter{ + data: b[offset:n], + key: make([]byte, 0, 256), + } + // Iterate from that restart point to somewhere >= the key sought. + for i.Next() && c.Compare(i.key, key) < 0 { + } + if i.err != nil { + return nil, i.err + } + i.soi = !i.eoi + return i, nil +} + +// blockIter is an iterator over a single block of data. +type blockIter struct { + data []byte + key, val []byte + err error + // soi and eoi mark the start and end of iteration. + // Both cannot simultaneously be true. + soi, eoi bool +} + +// blockIter implements the db.Iterator interface. +var _ db.Iterator = (*blockIter)(nil) + +// Next implements Iterator.Next, as documented in the leveldb/db package. +func (i *blockIter) Next() bool { + if i.eoi || i.err != nil { + return false + } + if i.soi { + i.soi = false + return true + } + if len(i.data) == 0 { + i.Close() + return false + } + v0, n0 := binary.Uvarint(i.data) + v1, n1 := binary.Uvarint(i.data[n0:]) + v2, n2 := binary.Uvarint(i.data[n0+n1:]) + n := n0 + n1 + n2 + i.key = append(i.key[:v0], i.data[n:n+int(v1)]...) + i.val = i.data[n+int(v1) : n+int(v1+v2)] + i.data = i.data[n+int(v1+v2):] + return true +} + +// Key implements Iterator.Key, as documented in the leveldb/db package. +func (i *blockIter) Key() []byte { + if i.soi { + return nil + } + return i.key +} + +// Value implements Iterator.Value, as documented in the leveldb/db package. +func (i *blockIter) Value() []byte { + if i.soi { + return nil + } + return i.val +} + +// Close implements Iterator.Close, as documented in the leveldb/db package. +func (i *blockIter) Close() error { + i.key = nil + i.val = nil + i.eoi = true + return i.err +} + +// tableIter is an iterator over an entire table of data. It is a two-level +// iterator: to seek for a given key, it first looks in the index for the +// block that contains that key, and then looks inside that block. +type tableIter struct { + reader *Reader + data *blockIter + index *blockIter + err error +} + +// tableIter implements the db.Iterator interface. +var _ db.Iterator = (*tableIter)(nil) + +// nextBlock loads the next block and positions i.data at the first key in that +// block which is >= the given key. If unsuccessful, it sets i.err to any error +// encountered, which may be nil if we have simply exhausted the entire table. +func (i *tableIter) nextBlock(key []byte) bool { + if !i.index.Next() { + i.err = i.index.err + return false + } + // Load the next block. + v := i.index.Value() + h, n := decodeBlockHandle(v) + if n == 0 || n != len(v) { + i.err = errors.New("leveldb/table: corrupt index entry") + return false + } + k, err := i.reader.readBlock(h) + if err != nil { + i.err = err + return false + } + // Look for the key inside that block. + data, err := k.seek(i.reader.comparer, key) + if err != nil { + i.err = err + return false + } + i.data = data + return true +} + +// Next implements Iterator.Next, as documented in the leveldb/db package. +func (i *tableIter) Next() bool { + if i.data == nil { + return false + } + for { + if i.data.Next() { + return true + } + if i.data.err != nil { + i.err = i.data.err + break + } + if !i.nextBlock(nil) { + break + } + } + i.Close() + return false +} + +// Key implements Iterator.Key, as documented in the leveldb/db package. +func (i *tableIter) Key() []byte { + if i.data == nil { + return nil + } + return i.data.Key() +} + +// Value implements Iterator.Value, as documented in the leveldb/db package. +func (i *tableIter) Value() []byte { + if i.data == nil { + return nil + } + return i.data.Value() +} + +// Close implements Iterator.Close, as documented in the leveldb/db package. +func (i *tableIter) Close() error { + i.data = nil + return i.err +} + +// Reader is a table reader. It implements the DB interface, as documented +// in the leveldb/db package. +type Reader struct { + file File + err error + index block + comparer db.Comparer + verifyChecksums bool + // TODO: add a (goroutine-safe) LRU block cache. +} + +// Reader implements the db.DB interface. +var _ db.DB = (*Reader)(nil) + +// Close implements DB.Close, as documented in the leveldb/db package. +func (r *Reader) Close() error { + if r.err != nil { + if r.file != nil { + r.file.Close() + r.file = nil + } + return r.err + } + if r.file != nil { + r.err = r.file.Close() + r.file = nil + if r.err != nil { + return r.err + } + } + // Make any future calls to Get, Find or Close return an error. + r.err = errors.New("leveldb/table: reader is closed") + return nil +} + +// Get implements DB.Get, as documented in the leveldb/db package. +func (r *Reader) Get(key []byte, o *db.ReadOptions) (value []byte, err error) { + if r.err != nil { + return nil, r.err + } + i := r.Find(key, o) + if !i.Next() || !bytes.Equal(key, i.Key()) { + err := i.Close() + if err == nil { + err = db.ErrNotFound + } + return nil, err + } + return i.Value(), i.Close() +} + +// Set is provided to implement the DB interface, but returns an error, as a +// Reader cannot write to a table. +func (r *Reader) Set(key, value []byte, o *db.WriteOptions) error { + return errors.New("leveldb/table: cannot Set into a read-only table") +} + +// Delete is provided to implement the DB interface, but returns an error, as a +// Reader cannot write to a table. +func (r *Reader) Delete(key []byte, o *db.WriteOptions) error { + return errors.New("leveldb/table: cannot Delete from a read-only table") +} + +// Find implements DB.Find, as documented in the leveldb/db package. +func (r *Reader) Find(key []byte, o *db.ReadOptions) db.Iterator { + if r.err != nil { + return &tableIter{err: r.err} + } + index, err := r.index.seek(r.comparer, key) + if err != nil { + return &tableIter{err: err} + } + i := &tableIter{ + reader: r, + index: index, + } + i.nextBlock(key) + return i +} + +// readBlock reads and decompresses a block from disk into memory. +func (r *Reader) readBlock(bh blockHandle) (block, error) { + b := make([]byte, bh.length+blockTrailerLen) + if _, err := r.file.ReadAt(b, int64(bh.offset)); err != nil { + return nil, err + } + if r.verifyChecksums { + checksum0 := binary.LittleEndian.Uint32(b[bh.length+1:]) + checksum1 := crc.New(b[:bh.length+1]).Value() + if checksum0 != checksum1 { + return nil, errors.New("leveldb/table: invalid table (checksum mismatch)") + } + } + switch b[bh.length] { + case noCompressionBlockType: + return b[:bh.length], nil + case snappyCompressionBlockType: + b, err := snappy.Decode(nil, b[:bh.length]) + if err != nil { + return nil, err + } + return b, nil + } + return nil, fmt.Errorf("leveldb/table: unknown block compression: %d", b[bh.length]) +} + +// TODO(nigeltao): move the File interface to the standard package library? +// Package http already defines something similar. + +// File holds the raw bytes for a table. +type File interface { + io.Closer + io.ReaderAt + io.Writer + Stat() (os.FileInfo, error) +} + +// NewReader returns a new table reader for the file. Closing the reader will +// close the file. +func NewReader(f File, o *db.Options) *Reader { + r := &Reader{ + file: f, + comparer: o.GetComparer(), + verifyChecksums: o.GetVerifyChecksums(), + } + if f == nil { + r.err = errors.New("leveldb/table: nil file") + return r + } + stat, err := f.Stat() + if err != nil { + r.err = fmt.Errorf("leveldb/table: invalid table (could not stat file): %v", err) + return r + } + var footer [footerLen]byte + if stat.Size() < int64(len(footer)) { + r.err = errors.New("leveldb/table: invalid table (file size is too small)") + return r + } + _, err = f.ReadAt(footer[:], stat.Size()-int64(len(footer))) + if err != nil && err != io.EOF { + r.err = fmt.Errorf("leveldb/table: invalid table (could not read footer): %v", err) + return r + } + if string(footer[footerLen-len(magic):footerLen]) != magic { + r.err = errors.New("leveldb/table: invalid table (bad magic number)") + return r + } + // Ignore the metaindex. + _, n := decodeBlockHandle(footer[:]) + if n == 0 { + r.err = errors.New("leveldb/table: invalid table (bad metaindex block handle)") + return r + } + // Read the index into memory. + indexBH, n := decodeBlockHandle(footer[n:]) + if n == 0 { + r.err = errors.New("leveldb/table: invalid table (bad index block handle)") + return r + } + r.index, r.err = r.readBlock(indexBH) + return r +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/table/table.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/table/table.go new file mode 100644 index 00000000..0beb6ab3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/table/table.go @@ -0,0 +1,137 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package table implements readers and writers of leveldb tables. + +Tables are either opened for reading or created for writing but not both. + +A reader can create iterators, which yield all key/value pairs whose keys +are 'greater than or equal' to a starting key. There may be multiple key/ +value pairs that have the same key. + +A reader can be used concurrently. Multiple goroutines can call Find +concurrently, and each iterator can run concurrently with other iterators. +However, any particular iterator should not be used concurrently, and +iterators should not be used once a reader is closed. + +A writer writes key/value pairs in increasing key order, and cannot be used +concurrently. A table cannot be read until the writer has finished. + +Readers and writers can be created with various options. Passing a nil +Options pointer is valid and means to use the default values. + +One such option is to define the 'less than' ordering for keys. The default +Comparer uses the natural ordering consistent with bytes.Compare. The same +ordering should be used for reading and writing a table. + +To return the value for a key: + + r := table.NewReader(file, options) + defer r.Close() + return r.Get(key) + +To count the number of entries in a table: + + i, n := r.Find(nil), 0 + for i.Next() { + n++ + } + if err := i.Close(); err != nil { + return 0, err + } + return n, nil + +To write a table with three entries: + + w := table.NewWriter(file, options) + if err := w.Set([]byte("apple"), []byte("red")); err != nil { + w.Close() + return err + } + if err := w.Set([]byte("banana"), []byte("yellow")); err != nil { + w.Close() + return err + } + if err := w.Set([]byte("cherry"), []byte("red")); err != nil { + w.Close() + return err + } + return w.Close() +*/ +package table + +/* +The table file format looks like: + + +[data block 0] +[data block 1] +... +[data block N-1] +[meta block 0] +[meta block 1] +... +[meta block K-1] +[metaindex block] +[index block] +[footer] + + +Each block consists of some data and a 5 byte trailer: a 1 byte block type and +a 4 byte checksum of the compressed data. The block type gives the per-block +compression used; each block is compressed independently. The checksum +algorithm is described in the leveldb/crc package. + +The decompressed block data consists of a sequence of key/value entries +followed by a trailer. Each key is encoded as a shared prefix length and a +remainder string. For example, if two adjacent keys are "tweedledee" and +"tweedledum", then the second key would be encoded as {8, "um"}. The shared +prefix length is varint encoded. The remainder string and the value are +encoded as a varint-encoded length followed by the literal contents. To +continue the example, suppose that the key "tweedledum" mapped to the value +"socks". The encoded key/value entry would be: "\x08\x02\x05umsocks". + +Every block has a restart interval I. Every I'th key/value entry in that block +is called a restart point, and shares no key prefix with the previous entry. +Continuing the example above, if the key after "tweedledum" was "two", but was +part of a restart point, then that key would be encoded as {0, "two"} instead +of {2, "o"}. If a block has P restart points, then the block trailer consists +of (P+1)*4 bytes: (P+1) little-endian uint32 values. The first P of these +uint32 values are the block offsets of each restart point. The final uint32 +value is P itself. Thus, when seeking for a particular key, one can use binary +search to find the largest restart point whose key is <= the key sought. + +An index block is a block with N key/value entries. The i'th value is the +encoded block handle of the i'th data block. The i'th key is a separator for +i < N-1, and a successor for i == N-1. The separator between blocks i and i+1 +is a key that is >= every key in block i and is < every key i block i+1. The +successor for the final block is a key that is >= every key in block N-1. The +index block restart interval is 1: every entry is a restart point. + +The table footer is exactly 48 bytes long: + - the block handle for the metaindex block, + - the block handle for the index block, + - padding to take the two items above up to 40 bytes, + - an 8-byte magic string. + +A block handle is an offset and a length; the length does not include the 5 +byte trailer. Both numbers are varint-encoded, with no padding between the two +values. The maximum size of an encoded block handle is therefore 20 bytes. +*/ + +const ( + blockTrailerLen = 5 + footerLen = 48 + + magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb" + + // The block type gives the per-block compression format. + // These constants are part of the file format and should not be changed. + // They are different from the db.Compression constants because the latter + // are designed so that the zero value of the db.Compression type means to + // use the default compression (which is snappy). + noCompressionBlockType = 0 + snappyCompressionBlockType = 1 +) diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/table/table_test.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/table/table_test.go new file mode 100644 index 00000000..71925707 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/table/table_test.go @@ -0,0 +1,279 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package table + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "sort" + "strings" + "testing" + "time" + + "camlistore.org/third_party/code.google.com/p/leveldb-go/leveldb/db" +) + +type memFile []byte + +func (f *memFile) Close() error { + return nil +} + +func (f *memFile) ReadAt(p []byte, off int64) (int, error) { + return copy(p, (*f)[off:]), nil +} + +func (f *memFile) Stat() (os.FileInfo, error) { + return f, nil +} + +func (f *memFile) Write(p []byte) (int, error) { + *f = append(*f, p...) + return len(p), nil +} + +func (f *memFile) Size() int64 { + return int64(len(*f)) +} + +func (f *memFile) Sys() interface{} { + return nil +} + +func (f *memFile) IsDir() bool { + return false +} + +func (f *memFile) ModTime() time.Time { + return time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) +} + +func (f *memFile) Mode() os.FileMode { + return os.FileMode(0755) +} + +func (f *memFile) Name() string { + return "testdata" +} + +var wordCount = map[string]string{} + +func init() { + f, err := os.Open("../../testdata/h.txt") + if err != nil { + panic(err) + } + defer f.Close() + r := bufio.NewReader(f) + for { + s, err := r.ReadBytes('\n') + if err == io.EOF { + break + } + if err != nil { + panic(err) + } + k := strings.TrimSpace(string(s[8:])) + v := strings.TrimSpace(string(s[:8])) + wordCount[k] = v + } + if len(wordCount) != 1710 { + panic(fmt.Sprintf("h.txt entry count: got %d, want %d", len(wordCount), 1710)) + } +} + +func check(f File) error { + r := NewReader(f, &db.Options{ + VerifyChecksums: true, + }) + // Check that each key/value pair in wordCount is also in the table. + for k, v := range wordCount { + // Check using Get. + if v1, err := r.Get([]byte(k), nil); string(v1) != string(v) || err != nil { + return fmt.Errorf("Get %q: got (%q, %v), want (%q, %v)", k, v1, err, v, error(nil)) + } + + // Check using Find. + i := r.Find([]byte(k), nil) + if !i.Next() || string(i.Key()) != k { + return fmt.Errorf("Find %q: key was not in the table", k) + } + if string(i.Value()) != v { + return fmt.Errorf("Find %q: got value %q, want %q", k, i.Value(), v) + } + if err := i.Close(); err != nil { + return err + } + } + + // Check that nonsense words are not in the table. + var nonsenseWords = []string{ + "", + "\x00", + "kwyjibo", + "\xff", + } + for _, s := range nonsenseWords { + // Check using Get. + if _, err := r.Get([]byte(s), nil); err != db.ErrNotFound { + return fmt.Errorf("Get %q: got %v, want ErrNotFound", s, err) + } + + // Check using Find. + i := r.Find([]byte(s), nil) + if i.Next() && s == string(i.Key()) { + return fmt.Errorf("Find %q: unexpectedly found key in the table", s) + } + if err := i.Close(); err != nil { + return err + } + } + + // Check that the number of keys >= a given start key matches the expected number. + var countTests = []struct { + count int + start string + }{ + // cat h.txt | cut -c 9- | wc -l gives 1710. + {1710, ""}, + // cat h.txt | cut -c 9- | grep -v "^[a-b]" | wc -l gives 1522. + {1522, "c"}, + // cat h.txt | cut -c 9- | grep -v "^[a-j]" | wc -l gives 940. + {940, "k"}, + // cat h.txt | cut -c 9- | grep -v "^[a-x]" | wc -l gives 12. + {12, "y"}, + // cat h.txt | cut -c 9- | grep -v "^[a-z]" | wc -l gives 0. + {0, "~"}, + } + for _, ct := range countTests { + n, i := 0, r.Find([]byte(ct.start), nil) + for i.Next() { + n++ + } + if err := i.Close(); err != nil { + return err + } + if n != ct.count { + return fmt.Errorf("count %q: got %d, want %d", ct.start, n, ct.count) + } + } + + return r.Close() +} + +func build(compression db.Compression) (*memFile, error) { + // Create a sorted list of wordCount's keys. + keys := make([]string, len(wordCount)) + i := 0 + for k := range wordCount { + keys[i] = k + i++ + } + sort.Strings(keys) + + // Write the key/value pairs to a new table, in increasing key order. + f := new(memFile) + w := NewWriter(f, &db.Options{ + Compression: compression, + }) + for _, k := range keys { + v := wordCount[k] + if err := w.Set([]byte(k), []byte(v), nil); err != nil { + return nil, err + } + } + if err := w.Close(); err != nil { + return nil, err + } + return f, nil +} + +func TestReader(t *testing.T) { + // Check that we can read a pre-made table. + f, err := os.Open("../../testdata/h.sst") + if err != nil { + t.Fatal(err) + } + err = check(f) + if err != nil { + t.Fatal(err) + } +} + +func TestWriter(t *testing.T) { + // Check that we can read a freshly made table. + f, err := build(db.DefaultCompression) + if err != nil { + t.Fatal(err) + } + err = check(f) + if err != nil { + t.Fatal(err) + } +} + +func TestNoCompressionOutput(t *testing.T) { + // Check that a freshly made NoCompression table is byte-for-byte equal + // to a pre-made table. + a, err := ioutil.ReadFile("../../testdata/h.no-compression.sst") + if err != nil { + t.Fatal(err) + } + b, err := build(db.NoCompression) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(a, []byte(*b)) { + t.Fatal("built table does not match pre-made table") + } +} + +func TestBlockIter(t *testing.T) { + // k is a block that maps three keys "apple", "apricot", "banana" to empty strings. + k := block([]byte("\x00\x05\x00apple\x02\x05\x00ricot\x00\x06\x00banana\x00\x00\x00\x00\x01\x00\x00\x00")) + var testcases = []struct { + index int + key string + }{ + {0, ""}, + {0, "a"}, + {0, "aaaaaaaaaaaaaaa"}, + {0, "app"}, + {0, "apple"}, + {1, "appliance"}, + {1, "apricos"}, + {1, "apricot"}, + {2, "azzzzzzzzzzzzzz"}, + {2, "b"}, + {2, "banan"}, + {2, "banana"}, + {3, "banana\x00"}, + {3, "c"}, + } + for _, tc := range testcases { + i, err := k.seek(db.DefaultComparer, []byte(tc.key)) + if err != nil { + t.Fatal(err) + } + for j, kWant := range []string{"apple", "apricot", "banana"}[tc.index:] { + if !i.Next() { + t.Fatalf("key=%q, index=%d, j=%d: Next got false, want true", tc.key, tc.index, j) + } + if kGot := string(i.Key()); kGot != kWant { + t.Fatalf("key=%q, index=%d, j=%d: got %q, want %q", tc.key, tc.index, j, kGot, kWant) + } + } + if i.Next() { + t.Fatalf("key=%q, index=%d: Next got true, want false", tc.key, tc.index) + } + if err := i.Close(); err != nil { + t.Fatalf("key=%q, index=%d: got err=%v", tc.key, tc.index, err) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/table/writer.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/table/writer.go new file mode 100644 index 00000000..d105991b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/leveldb/table/writer.go @@ -0,0 +1,309 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package table + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "camlistore.org/third_party/code.google.com/p/leveldb-go/leveldb/crc" + "camlistore.org/third_party/code.google.com/p/leveldb-go/leveldb/db" + "camlistore.org/third_party/code.google.com/p/snappy-go/snappy" +) + +// indexEntry is a block handle and the length of the separator key. +type indexEntry struct { + bh blockHandle + keyLen int +} + +// Writer is a table writer. It implements the DB interface, as documented +// in the leveldb/db package. +type Writer struct { + writer io.Writer + bufWriter *bufio.Writer + closer io.Closer + err error + // The next four fields are copied from a db.Options. + blockRestartInterval int + blockSize int + cmp db.Comparer + compression db.Compression + // A table is a series of blocks and a block's index entry contains a + // separator key between one block and the next. Thus, a finished block + // cannot be written until the first key in the next block is seen. + // pendingBH is the blockHandle of a finished block that is waiting for + // the next call to Set. If the writer is not in this state, pendingBH + // is zero. + pendingBH blockHandle + // offset is the offset (relative to the table start) of the next block + // to be written. + offset uint64 + // prevKey is a copy of the key most recently passed to Set. + prevKey []byte + // indexKeys and indexEntries hold the separator keys between each block + // and the successor key for the final block. indexKeys contains the key's + // bytes concatenated together. The keyLen field of each indexEntries + // element is the length of the respective separator key. + indexKeys []byte + indexEntries []indexEntry + // The next three fields hold data for the current block: + // - buf is the accumulated uncompressed bytes, + // - nEntries is the number of entries, + // - restarts are the offsets (relative to the block start) of each + // restart point. + buf bytes.Buffer + nEntries int + restarts []uint32 + // compressedBuf is the destination buffer for snappy compression. It is + // re-used over the lifetime of the writer, avoiding the allocation of a + // temporary buffer for each block. + compressedBuf []byte + // tmp is a scratch buffer, large enough to hold either footerLen bytes, + // blockTrailerLen bytes, or (5 * binary.MaxVarintLen64) bytes. + tmp [50]byte +} + +// Writer implements the db.DB interface. +var _ db.DB = (*Writer)(nil) + +// Get is provided to implement the DB interface, but returns an error, as a +// Writer cannot read from a table. +func (w *Writer) Get(key []byte, o *db.ReadOptions) ([]byte, error) { + return nil, errors.New("leveldb/table: cannot Get from a write-only table") +} + +// Delete is provided to implement the DB interface, but returns an error, as a +// Writer can only append key/value pairs. +func (w *Writer) Delete(key []byte, o *db.WriteOptions) error { + return errors.New("leveldb/table: cannot Delete from a table") +} + +// Find is provided to implement the DB interface, but returns an error, as a +// Writer cannot read from a table. +func (w *Writer) Find(key []byte, o *db.ReadOptions) db.Iterator { + return &tableIter{ + err: errors.New("leveldb/table: cannot Find from a write-only table"), + } +} + +// Set implements DB.Set, as documented in the leveldb/db package. For a given +// Writer, the keys passed to Set must be in increasing order. +func (w *Writer) Set(key, value []byte, o *db.WriteOptions) error { + if w.err != nil { + return w.err + } + if w.cmp.Compare(w.prevKey, key) >= 0 { + w.err = fmt.Errorf("leveldb/table: Set called in non-increasing key order: %q, %q", w.prevKey, key) + return w.err + } + w.flushPendingBH(key) + w.append(key, value, w.nEntries%w.blockRestartInterval == 0) + // If the estimated block size is sufficiently large, finish the current block. + if w.buf.Len()+4*(len(w.restarts)+1) >= w.blockSize { + bh, err := w.finishBlock() + if err != nil { + w.err = err + return w.err + } + w.pendingBH = bh + } + return nil +} + +// flushPendingBH adds any pending block handle to the index entries. +func (w *Writer) flushPendingBH(key []byte) { + if w.pendingBH.length == 0 { + // A valid blockHandle must be non-zero. + // In particular, it must have a non-zero length. + return + } + n0 := len(w.indexKeys) + w.indexKeys = w.cmp.AppendSeparator(w.indexKeys, w.prevKey, key) + n1 := len(w.indexKeys) + w.indexEntries = append(w.indexEntries, indexEntry{w.pendingBH, n1 - n0}) + w.pendingBH = blockHandle{} +} + +// append appends a key/value pair, which may also be a restart point. +func (w *Writer) append(key, value []byte, restart bool) { + nShared := 0 + if restart { + w.restarts = append(w.restarts, uint32(w.buf.Len())) + } else { + nShared = db.SharedPrefixLen(w.prevKey, key) + } + w.prevKey = append(w.prevKey[:0], key...) + w.nEntries++ + n := binary.PutUvarint(w.tmp[0:], uint64(nShared)) + n += binary.PutUvarint(w.tmp[n:], uint64(len(key)-nShared)) + n += binary.PutUvarint(w.tmp[n:], uint64(len(value))) + w.buf.Write(w.tmp[:n]) + w.buf.Write(key[nShared:]) + w.buf.Write(value) +} + +// finishBlock finishes the current block and returns its block handle, which is +// its offset and length in the table. +func (w *Writer) finishBlock() (blockHandle, error) { + // Write the restart points to the buffer. + if w.nEntries == 0 { + // Every block must have at least one restart point. + w.restarts = w.restarts[:1] + w.restarts[0] = 0 + } + tmp4 := w.tmp[:4] + for _, x := range w.restarts { + binary.LittleEndian.PutUint32(tmp4, x) + w.buf.Write(tmp4) + } + binary.LittleEndian.PutUint32(tmp4, uint32(len(w.restarts))) + w.buf.Write(tmp4) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + b := w.buf.Bytes() + w.tmp[0] = noCompressionBlockType + if w.compression == db.SnappyCompression { + compressed, err := snappy.Encode(w.compressedBuf, b) + if err != nil { + return blockHandle{}, err + } + w.compressedBuf = compressed[:cap(compressed)] + if len(compressed) < len(b)-len(b)/8 { + w.tmp[0] = snappyCompressionBlockType + b = compressed + } + } + + // Calculate the checksum. + checksum := crc.New(b).Update(w.tmp[:1]).Value() + binary.LittleEndian.PutUint32(w.tmp[1:5], checksum) + + // Write the bytes to the file. + if _, err := w.writer.Write(b); err != nil { + return blockHandle{}, err + } + if _, err := w.writer.Write(w.tmp[:5]); err != nil { + return blockHandle{}, err + } + bh := blockHandle{w.offset, uint64(len(b))} + w.offset += uint64(len(b)) + blockTrailerLen + + // Reset the per-block state. + w.buf.Reset() + w.nEntries = 0 + w.restarts = w.restarts[:0] + return bh, nil +} + +// Close implements DB.Close, as documented in the leveldb/db package. +func (w *Writer) Close() (err error) { + defer func() { + if w.closer == nil { + return + } + err1 := w.closer.Close() + if err == nil { + err = err1 + } + w.closer = nil + }() + if w.err != nil { + return w.err + } + + // Finish the last data block, or force an empty data block if there + // aren't any data blocks at all. + if w.nEntries > 0 || len(w.indexEntries) == 0 { + bh, err := w.finishBlock() + if err != nil { + w.err = err + return w.err + } + w.pendingBH = bh + w.flushPendingBH(nil) + } + + // Write the (empty) metaindex block. + metaindexBlockHandle, err := w.finishBlock() + if err != nil { + w.err = err + return w.err + } + + // Write the index block. + // writer.append uses w.tmp[:3*binary.MaxVarintLen64]. + i0, tmp := 0, w.tmp[3*binary.MaxVarintLen64:5*binary.MaxVarintLen64] + for _, ie := range w.indexEntries { + n := encodeBlockHandle(tmp, ie.bh) + i1 := i0 + ie.keyLen + w.append(w.indexKeys[i0:i1], tmp[:n], true) + i0 = i1 + } + indexBlockHandle, err := w.finishBlock() + if err != nil { + w.err = err + return w.err + } + + // Write the table footer. + footer := w.tmp[:footerLen] + for i := range footer { + footer[i] = 0 + } + n := encodeBlockHandle(footer, metaindexBlockHandle) + encodeBlockHandle(footer[n:], indexBlockHandle) + copy(footer[footerLen-len(magic):], magic) + if _, err := w.writer.Write(footer); err != nil { + w.err = err + return w.err + } + + // Flush the buffer. + if w.bufWriter != nil { + if err := w.bufWriter.Flush(); err != nil { + w.err = err + return err + } + } + + // Make any future calls to Set or Close return an error. + w.err = errors.New("leveldb/table: writer is closed") + return nil +} + +// NewWriter returns a new table writer for the file. Closing the writer will +// close the file. +func NewWriter(f File, o *db.Options) *Writer { + w := &Writer{ + closer: f, + blockRestartInterval: o.GetBlockRestartInterval(), + blockSize: o.GetBlockSize(), + cmp: o.GetComparer(), + compression: o.GetCompression(), + prevKey: make([]byte, 0, 256), + restarts: make([]uint32, 0, 256), + } + if f == nil { + w.err = errors.New("leveldb/table: nil file") + return w + } + // If f does not have a Flush method, do our own buffering. + type flusher interface { + Flush() error + } + if _, ok := f.(flusher); ok { + w.writer = f + } else { + w.bufWriter = bufio.NewWriter(f) + w.writer = w.bufWriter + } + return w +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/lib/codereview/codereview.cfg b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/lib/codereview/codereview.cfg new file mode 100644 index 00000000..93b55c0a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/lib/codereview/codereview.cfg @@ -0,0 +1 @@ +defaultcc: golang-dev@googlegroups.com diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-1/000003.log b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-1/000003.log new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-1/CURRENT b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-1/CURRENT new file mode 100644 index 00000000..1a848522 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-1/CURRENT @@ -0,0 +1 @@ +MANIFEST-000002 diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-1/LOCK b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-1/LOCK new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-1/LOG b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-1/LOG new file mode 100644 index 00000000..ed125ed2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-1/LOG @@ -0,0 +1 @@ +2012/02/03-18:31:28.752463 7ff183bca740 Delete type=3 #1 diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-1/MANIFEST-000002 b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-1/MANIFEST-000002 new file mode 100644 index 00000000..dbf594ea Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-1/MANIFEST-000002 differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-2/000003.log b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-2/000003.log new file mode 100644 index 00000000..147db71a Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-2/000003.log differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-2/CURRENT b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-2/CURRENT new file mode 100644 index 00000000..1a848522 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-2/CURRENT @@ -0,0 +1 @@ +MANIFEST-000002 diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-2/LOCK b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-2/LOCK new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-2/LOG b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-2/LOG new file mode 100644 index 00000000..7fd79744 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-2/LOG @@ -0,0 +1 @@ +2012/02/03-18:32:06.283846 7fa954064740 Delete type=3 #1 diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-2/MANIFEST-000002 b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-2/MANIFEST-000002 new file mode 100644 index 00000000..dbf594ea Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-2/MANIFEST-000002 differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/000005.sst b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/000005.sst new file mode 100644 index 00000000..f4a60d28 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/000005.sst differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/000006.log b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/000006.log new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/CURRENT b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/CURRENT new file mode 100644 index 00000000..cacca757 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/CURRENT @@ -0,0 +1 @@ +MANIFEST-000004 diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/LOCK b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/LOCK new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/LOG b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/LOG new file mode 100644 index 00000000..9b8ff68d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/LOG @@ -0,0 +1,5 @@ +2012/02/03-18:32:34.790995 7f8f2d339740 Recovering log #3 +2012/02/03-18:32:34.791037 7f8f2d339740 Level-0 table #5: started +2012/02/03-18:32:34.850300 7f8f2d339740 Level-0 table #5: 165 bytes OK +2012/02/03-18:32:34.917482 7f8f2d339740 Delete type=3 #2 +2012/02/03-18:32:34.917520 7f8f2d339740 Delete type=0 #3 diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/LOG.old b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/LOG.old new file mode 100644 index 00000000..de536958 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/LOG.old @@ -0,0 +1 @@ +2012/02/03-18:32:34.790486 7f8f2d339740 Delete type=3 #1 diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/MANIFEST-000004 b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/MANIFEST-000004 new file mode 100644 index 00000000..a1fd797b Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-3/MANIFEST-000004 differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/000005.sst b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/000005.sst new file mode 100644 index 00000000..f4a60d28 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/000005.sst differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/000006.log b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/000006.log new file mode 100644 index 00000000..1638b770 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/000006.log differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/CURRENT b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/CURRENT new file mode 100644 index 00000000..cacca757 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/CURRENT @@ -0,0 +1 @@ +MANIFEST-000004 diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/LOCK b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/LOCK new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/LOG b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/LOG new file mode 100644 index 00000000..84474976 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/LOG @@ -0,0 +1,5 @@ +2012/02/03-18:39:40.556778 7f7d66252740 Recovering log #3 +2012/02/03-18:39:40.556810 7f7d66252740 Level-0 table #5: started +2012/02/03-18:39:40.614757 7f7d66252740 Level-0 table #5: 165 bytes OK +2012/02/03-18:39:40.715229 7f7d66252740 Delete type=3 #2 +2012/02/03-18:39:40.715271 7f7d66252740 Delete type=0 #3 diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/LOG.old b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/LOG.old new file mode 100644 index 00000000..5ac81afe --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/LOG.old @@ -0,0 +1 @@ +2012/02/03-18:39:40.556281 7f7d66252740 Delete type=3 #1 diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/MANIFEST-000004 b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/MANIFEST-000004 new file mode 100644 index 00000000..a1fd797b Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/db-stage-4/MANIFEST-000004 differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/h.no-compression.sst b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/h.no-compression.sst new file mode 100644 index 00000000..f9c8f092 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/h.no-compression.sst differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/h.sst b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/h.sst new file mode 100644 index 00000000..2e7a9d60 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/h.sst differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/h.txt b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/h.txt new file mode 100644 index 00000000..ed8f750a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/h.txt @@ -0,0 +1,1710 @@ + 97 a + 2 aboard + 2 about + 1 above + 1 abroad + 1 absurd + 1 abused + 1 accord + 1 account + 1 achievements + 1 acquaint + 5 act + 1 action + 1 actions + 1 addition + 1 address + 4 adieu + 1 admiration + 1 adoption + 1 adulterate + 1 advantage + 1 advice + 2 affair + 3 affection + 1 after + 1 afternoon + 13 again + 5 against + 2 ah + 5 air + 1 airs + 1 alas + 36 all + 1 alleys + 1 allow + 4 almost + 4 alone + 2 along + 1 already + 1 always + 9 am + 1 amazed + 1 ambiguous + 1 ambitious + 13 an + 227 and + 1 angel + 1 angels + 1 anger + 1 angry + 1 another + 4 answer + 1 antic + 6 any + 1 apparel + 2 apparition + 4 appear + 1 appears + 1 appetite + 1 approve + 1 apt + 21 are + 2 arm + 2 armed + 1 armour + 2 arms + 1 arrant + 6 art + 1 artery + 1 article + 1 articles + 56 as + 1 aside + 1 asking + 1 assail + 1 assistant + 2 assume + 18 at + 1 attendants + 1 attent + 1 attribute + 1 audience + 2 aught + 1 auspicious + 1 avoid + 1 avouch + 1 awake + 7 away + 2 awhile + 7 ay + 1 baby + 1 back + 1 baked + 1 bark + 1 barr + 1 base + 1 baser + 1 bawds + 42 be + 5 bear + 1 beard + 1 bearers + 1 bears + 2 beast + 1 beating + 1 beauty + 1 beaver + 2 beckons + 4 bed + 4 been + 1 beetles + 1 befitted + 6 before + 1 beg + 1 beguile + 1 behold + 1 behoves + 4 being + 1 belief + 6 believe + 1 bell + 2 bend + 5 beneath + 1 benefit + 30 bernardo + 2 beseech + 1 besmirch + 5 best + 1 beteem + 1 bethought + 2 better + 2 between + 2 beware + 1 beyond + 2 bid + 2 bird + 3 birth + 1 bites + 1 bitter + 1 black + 1 blast + 1 blastments + 1 blasts + 1 blazes + 1 blazon + 3 blessing + 7 blood + 1 blossoms + 1 blows + 1 bodes + 5 body + 1 bonds + 1 bones + 1 book + 1 books + 2 born + 1 borrower + 1 borrowing + 1 bosom + 3 both + 2 bound + 1 bounteous + 1 bow + 2 boy + 2 brain + 1 bray + 1 brazen + 1 breach + 3 break + 1 breaking + 1 breath + 1 breathing + 1 brief + 1 bring + 1 brokers + 6 brother + 1 brow + 1 bruit + 1 bulk + 1 buried + 2 burns + 1 burnt + 2 burst + 4 business + 58 but + 1 buttons + 1 buy + 31 by + 4 call + 1 calumnious + 2 came + 5 can + 1 canker + 2 cannon + 3 cannot + 1 canon + 1 canonized + 2 canst + 1 cap + 1 carefully + 1 carriage + 1 carrying + 1 carve + 3 cast + 2 castle + 1 catch + 1 cautel + 1 caution + 1 celebrated + 1 celestial + 1 cellarage + 2 censure + 1 cerements + 1 certain + 1 chances + 1 change + 1 character + 3 charge + 1 chariest + 1 charitable + 1 charm + 1 chaste + 1 cheer + 2 chief + 1 chiefest + 2 choice + 1 choose + 1 circumscribed + 2 circumstance + 1 clad + 8 claudius + 1 clearly + 1 clepe + 1 cliff + 1 climatures + 1 cloak + 2 clouds + 5 cock + 2 cold + 1 coldly + 1 colleagued + 1 colour + 1 combat + 1 combated + 1 combined + 17 come + 7 comes + 1 comest + 1 comfort + 1 coming + 1 command + 1 commandment + 2 commend + 1 commendable + 4 common + 1 compact + 1 competent + 1 complete + 1 complexion + 1 compulsatory + 1 comrade + 1 conceal + 1 condolement + 1 confess + 1 confine + 1 confined + 1 conqueror + 3 consent + 1 constantly + 1 contagious + 1 contracted + 1 contrive + 1 conveniently + 1 convoy + 1 copied + 4 cornelius + 1 coronation + 1 corruption + 2 corse + 1 costly + 1 couch + 2 could + 2 countenance + 1 country + 1 countrymen + 1 couple + 2 course + 1 courses + 1 court + 1 courteous + 1 courtier + 2 cousin + 1 covenant + 1 crack + 1 credent + 1 crescent + 2 crew + 1 cried + 1 cries + 1 crimes + 1 cross + 1 crowing + 2 crown + 1 crows + 1 crust + 1 curd + 2 cursed + 3 custom + 1 customary + 1 cut + 54 d + 1 daily + 1 dalliance + 1 damn + 2 damned + 3 dane + 1 danger + 1 dared + 1 dares + 2 daughter + 1 dawning + 8 day + 1 days + 7 dead + 4 dear + 2 dearest + 1 dearly + 6 death + 1 decline + 1 deed + 1 deeds + 1 deep + 1 defeated + 1 defect + 1 defend + 1 dejected + 1 delated + 1 delight + 2 deliver + 1 demonstrated + 13 denmark + 1 denote + 1 depart + 1 depends + 1 deprive + 1 design + 6 desire + 1 desperate + 1 desperation + 3 dew + 1 dews + 1 dexterity + 14 did + 1 didst + 1 die + 1 died + 1 diet + 1 dignity + 1 direct + 1 dirge + 1 disappointed + 1 disasters + 1 disclosed + 1 discourse + 1 discretion + 1 disjoint + 2 dispatch + 3 disposition + 1 distilled + 1 distilment + 1 distracted + 1 divide + 36 do + 3 does + 1 dole + 3 done + 1 doom + 1 doomsday + 7 doth + 2 double + 4 doubt + 1 doubtful + 7 down + 1 drains + 1 dram + 1 draughts + 1 draw + 1 draws + 1 dread + 1 dreaded + 2 dreadful + 1 dream + 1 dreamt + 1 drink + 1 drinks + 1 dropping + 1 droppings + 1 drum + 1 drunkards + 1 dull + 1 duller + 1 dulls + 2 dumb + 1 dust + 1 duties + 7 duty + 1 dwelling + 1 dye + 1 e + 5 each + 2 eager + 1 eale + 5 ear + 3 ears + 9 earth + 1 earthly + 2 ease + 1 east + 1 eastward + 1 eclipse + 1 edge + 2 effect + 1 eleven + 4 else + 2 elsinore + 1 embark + 1 empire + 1 emulate + 2 en + 1 encounter + 1 encumber + 1 end + 1 enemy + 1 enmity + 1 enough + 12 enter + 1 enterprise + 1 entertainment + 1 entrance + 1 entreated + 1 entreatments + 1 equal + 5 er + 4 ere + 1 ergrowth + 1 ermaster + 1 erring + 1 eruption + 1 erwhelm + 1 esteem + 1 et + 1 eternal + 1 eternity + 8 even + 1 events + 6 ever + 1 everlasting + 3 every + 1 exactly + 1 excellent + 8 exeunt + 6 exit + 2 express + 1 extinct + 1 extorted + 1 extravagant + 6 eye + 7 eyes + 2 face + 1 faded + 1 fail + 3 fair + 1 fairy + 4 faith + 1 falling + 1 false + 1 familiar + 1 fancy + 2 fantasy + 2 far + 2 fare + 8 farewell + 3 fashion + 2 fast + 1 fat + 2 fate + 1 fates + 28 father + 1 fathers + 1 fathoms + 4 fault + 2 favour + 9 fear + 1 fearful + 1 fed + 1 fee + 2 fell + 2 fellow + 3 few + 4 fie + 1 fierce + 3 figure + 1 filial + 2 find + 1 fingers + 4 fire + 1 fires + 1 first + 2 fit + 1 fits + 1 fitting + 2 fix + 1 flames + 1 flat + 2 flesh + 1 flood + 1 flourish + 1 flushing + 1 foe + 9 follow + 1 follows + 1 fond + 1 food + 1 fool + 1 fools + 1 foot + 45 for + 1 forbid + 1 forced + 1 foreign + 1 foreknowing + 1 foresaid + 1 forfeit + 1 forged + 1 forget + 4 form + 2 forms + 3 forth + 1 fortified + 6 fortinbras + 1 forts + 1 fortune + 1 forward + 1 fought + 6 foul + 1 frailty + 1 frame + 3 france + 10 francisco + 1 free + 1 freely + 1 freeze + 1 fretful + 3 friend + 1 friending + 5 friends + 21 from + 1 frown + 1 frowningly + 1 fruitful + 2 full + 3 funeral + 1 furnish + 4 further + 1 gaged + 2 gainst + 1 gait + 1 galled + 1 galls + 1 gape + 1 garbage + 1 garden + 1 gates + 1 gaudy + 1 general + 1 generous + 1 gentle + 5 gentlemen + 4 gertrude + 1 get + 26 ghost + 1 gibber + 3 gifts + 1 gins + 1 girl + 13 give + 4 given + 3 giving + 2 glad + 1 glimpses + 1 globe + 1 glow + 15 go + 1 goblin + 8 god + 3 goes + 1 going + 4 gone + 20 good + 1 goodly + 6 grace + 1 graces + 2 gracious + 1 grapple + 1 grave + 1 graves + 1 great + 1 greatness + 2 green + 1 greeting + 3 grief + 1 grizzled + 2 gross + 3 ground + 2 grow + 1 grown + 2 grows + 1 guard + 2 guilty + 1 ha + 2 habit + 13 had + 1 hail + 1 hair + 1 hallow + 100 hamlet + 5 hand + 4 hands + 2 hang + 1 hap + 1 happily + 1 harbingers + 2 hard + 1 hardy + 1 harrow + 1 harrows + 3 has + 4 hast + 7 haste + 1 hatch + 15 hath + 31 have + 1 havior + 34 he + 6 head + 1 headed + 1 headshake + 3 health + 9 hear + 4 heard + 1 hearing + 2 hears + 1 hearsed + 10 heart + 3 heartily + 1 hearts + 1 heat + 21 heaven + 1 heavens + 1 heavy + 1 hebenon + 1 height + 1 held + 3 hell + 2 help + 8 her + 1 heraldry + 1 hercules + 11 here + 1 hereafter + 3 herein + 1 hic + 1 hideous + 1 hies + 2 high + 1 higher + 1 hill + 2 hillo + 21 him + 3 himself + 57 his + 1 hither + 1 hitherto + 5 ho + 9 hold + 1 holding + 2 holds + 1 holla + 1 holy + 2 honest + 5 honour + 1 honourable + 1 hoops + 85 horatio + 4 horrible + 1 horridly + 1 host + 1 hot + 6 hour + 2 house + 7 how + 1 howsoever + 1 humbly + 1 hundred + 1 husbandry + 1 hyperion + 124 i + 1 ice + 22 if + 1 ignorance + 1 ii + 1 iii + 1 illume + 1 illusion + 1 image + 1 imagination + 1 immediate + 1 imminent + 1 immortal + 3 impart + 1 impartment + 1 impatient + 1 imperfections + 1 imperial + 1 impious + 1 implements + 1 implorators + 1 importing + 1 importuned + 1 importunity + 1 impotent + 1 impress + 118 in + 1 incest + 2 incestuous + 1 incorrect + 1 increase + 8 indeed + 1 infants + 1 infinite + 1 influence + 1 inform + 1 inheritance + 1 inky + 2 instant + 1 instrumental + 1 intent + 1 intents + 5 into + 1 inurn + 1 investments + 1 invites + 1 invulnerable + 1 inward + 62 is + 1 issue + 126 it + 1 its + 9 itself + 1 iv + 1 jaws + 1 jelly + 1 jocund + 2 joint + 1 jointress + 1 joy + 1 judgment + 1 juice + 1 julius + 1 jump + 3 keep + 1 keeps + 1 kept + 1 kettle + 1 key + 1 kin + 1 kind + 23 king + 1 kingdom + 1 knave + 1 knew + 1 knotted + 17 know + 2 known + 1 knows + 1 labourer + 1 laboursome + 1 lack + 1 lacks + 16 laertes + 2 land + 3 lands + 1 larger + 3 last + 1 lasting + 3 late + 2 law + 1 lawless + 1 lay + 1 lazar + 1 lead + 2 least + 8 leave + 1 leavens + 1 left + 1 leisure + 1 lend + 1 lender + 1 lends + 1 length + 1 leperous + 2 less + 1 lesson + 23 let + 1 lethe + 1 lets + 1 levies + 1 lewdness + 1 libertine + 1 lids + 1 liegemen + 1 lies + 7 life + 1 lifted + 1 light + 1 lightest + 23 like + 1 link + 1 lion + 1 lips + 1 liquid + 6 list + 1 lists + 3 little + 3 live + 1 livery + 1 lives + 18 ll + 1 lo + 1 loan + 1 loathsome + 1 lock + 1 locks + 1 lodge + 1 lofty + 4 long + 3 longer + 10 look + 2 looks + 1 loose + 60 lord + 1 lords + 1 lordship + 2 lose + 1 loses + 1 loss + 5 lost + 1 loud + 8 love + 5 loves + 2 loving + 2 lust + 1 luxury + 2 m + 4 madam + 8 made + 1 madness + 1 maid + 1 maiden + 2 main + 1 majestical + 1 majesty + 8 make + 2 makes + 2 making + 1 malicious + 11 man + 1 manner + 1 manners + 1 mantle + 2 many + 1 marble + 46 marcellus + 2 march + 2 mark + 3 marriage + 2 married + 1 marrow + 3 marry + 1 mart + 1 martial + 1 marvel + 1 matin + 1 matter + 19 may + 47 me + 2 mean + 2 means + 1 meats + 1 meditation + 3 meet + 1 meeting + 1 melt + 5 memory + 3 men + 2 mercy + 1 mere + 1 merely + 1 message + 1 met + 2 methinks + 1 methought + 1 mettle + 1 middle + 7 might + 1 mightiest + 1 milk + 6 mind + 6 mine + 1 ministers + 1 minute + 1 minutes + 1 mirth + 1 mock + 1 mockery + 1 moderate + 1 moiety + 1 moist + 2 mole + 1 moment + 3 month + 1 months + 1 moods + 2 moon + 19 more + 3 morn + 3 morning + 28 most + 1 mote + 5 mother + 1 motion + 2 motive + 1 mourn + 1 mourning + 1 mouse + 1 mouth + 1 moved + 9 much + 3 murder + 14 must + 126 my + 4 myself + 2 name + 1 nations + 2 native + 2 natural + 13 nature + 7 nay + 1 ne + 3 near + 1 necessaries + 1 need + 1 needful + 1 needs + 1 neither + 1 nemean + 1 nephew + 1 neptune + 1 nerve + 6 never + 1 new + 2 news + 22 night + 1 nighted + 1 nightly + 3 nights + 1 niobe + 1 nipping + 28 no + 1 nobility + 5 noble + 2 none + 14 nor + 5 norway + 80 not + 2 note + 2 nothing + 19 now + 30 o + 1 oath + 3 obey + 1 object + 1 obligation + 1 obsequious + 1 observance + 1 observant + 1 observation + 1 obstinate + 1 occasion + 1 odd + 176 of + 6 off + 2 offence + 1 offend + 1 offended + 2 offer + 7 oft + 4 old + 1 omen + 25 on + 8 once + 6 one + 1 oped + 1 open + 15 ophelia + 1 opinion + 1 opposed + 1 opposition + 1 oppress + 28 or + 2 orchard + 1 ordnance + 1 origin + 4 other + 45 our + 2 ourself + 1 ourselves + 8 out + 6 own + 1 ownself + 4 pale + 1 pales + 1 palm + 1 palmy + 1 pardon + 1 parle + 1 parley + 6 part + 6 particular + 1 partisan + 1 passeth + 1 passing + 1 past + 1 pastors + 1 path + 1 patrick + 1 pay + 1 pe + 2 peace + 1 peevish + 2 perchance + 1 perform + 1 perfume + 1 perhaps + 1 perilous + 1 permanent + 1 pernicious + 1 persever + 1 person + 1 personal + 1 persons + 1 perturbed + 1 pester + 1 petition + 1 petty + 1 philosophy + 3 phrase + 1 piece + 1 pin + 1 pioner + 1 pious + 1 pith + 1 pity + 3 place + 1 plain + 1 planets + 5 platform + 1 plausive + 2 play + 1 please + 1 pledge + 2 point + 1 polacks + 1 pole + 13 polonius + 1 ponderous + 1 pooh + 9 poor + 1 porches + 1 porpentine + 1 portentous + 1 possess + 1 posset + 3 post + 1 pour + 3 power + 7 pray + 1 prayers + 1 preceding + 1 precepts + 1 precurse + 1 preparations + 1 presence + 1 present + 1 pressures + 1 prey + 2 prick + 1 pride + 1 primrose + 1 primy + 1 prince + 1 prison + 1 private + 1 privy + 1 probation + 1 process + 1 proclaims + 2 prodigal + 1 prologue + 1 promise + 1 pronouncing + 1 prophetic + 1 proportions + 1 propose + 1 puff + 1 pure + 1 purged + 1 purpose + 1 purse + 1 pursuest + 2 put + 1 puts + 1 quarrel + 7 queen + 2 question + 1 questionable + 1 quicksilver + 1 quiet + 1 quietly + 1 quills + 1 radiant + 2 rank + 1 rankly + 1 rate + 1 ratified + 2 re + 1 reaches + 1 rear + 5 reason + 1 rebels + 1 reckless + 1 reckoning + 1 recks + 1 records + 1 recover + 1 red + 1 rede + 1 reels + 1 relief + 1 relieved + 1 remain + 6 remember + 1 remembrance + 1 remove + 1 removed + 1 render + 1 reply + 1 report + 1 request + 1 requite + 1 reserve + 1 resolutes + 1 resolve + 2 rest + 1 retrograde + 2 return + 1 reveal + 1 revel + 3 revenge + 1 revisit + 1 rhenish + 1 rich + 1 rid + 3 right + 1 rise + 1 rivals + 1 river + 1 roar + 1 romage + 1 roman + 1 rome + 2 room + 1 roots + 1 rotten + 1 roughly + 2 rouse + 2 royal + 1 ruled + 1 running + 1 russet + 39 s + 1 sable + 2 safety + 3 said + 1 sail + 1 saint + 1 salt + 5 same + 1 sanctified + 1 sate + 1 satyr + 1 saviour + 6 saw + 1 saws + 11 say + 1 saying + 3 says + 1 scale + 1 scandal + 1 scanter + 1 scapes + 1 scarcely + 5 scene + 1 scent + 1 scholar + 1 scholars + 1 school + 2 scope + 3 sea + 2 seal + 4 season + 1 seat + 1 second + 1 secrecy + 1 secret + 1 secrets + 2 secure + 1 seduce + 7 see + 1 seed + 1 seeing + 1 seek + 2 seem + 1 seeming + 3 seems + 8 seen + 1 seized + 1 select + 1 self + 1 sense + 1 sensible + 1 sent + 1 sepulchre + 1 serious + 2 serpent + 1 servant + 1 servants + 1 service + 4 set + 2 shake + 22 shall + 1 shalt + 1 shame + 1 shameful + 2 shape + 1 shapes + 1 shark + 6 she + 1 sheeted + 1 sheets + 1 shift + 1 shipwrights + 1 shoes + 2 shot + 6 should + 1 shoulder + 1 shouldst + 6 show + 2 shows + 1 shrewdly + 1 shrill + 1 shrunk + 2 sick + 1 side + 3 sight + 1 silence + 1 silver + 1 simple + 1 sin + 1 since + 1 sinews + 1 singeth + 3 sir + 1 sirs + 3 sister + 4 sit + 2 sits + 1 skirts + 1 slander + 1 slaughter + 1 slay + 1 sledded + 1 sleep + 3 sleeping + 2 slow + 2 smile + 1 smiles + 2 smiling + 1 smooth + 1 smote + 48 so + 1 soe + 2 soft + 2 soil + 1 soldier + 1 soldiers + 2 solemn + 1 solid + 13 some + 3 something + 1 sometime + 1 sometimes + 1 somewhat + 3 son + 1 songs + 1 sore + 3 sorrow + 1 sorry + 1 sort + 8 soul + 1 souls + 2 sound + 1 sounding + 1 source + 1 sovereignty + 27 speak + 1 speaking + 1 speech + 1 speed + 1 spend + 1 spheres + 8 spirit + 1 spirits + 1 spite + 1 spoke + 2 spring + 1 springes + 1 squeak + 4 st + 1 stale + 1 stalk + 1 stalks + 1 stamp + 5 stand + 1 stands + 3 star + 2 stars + 1 start + 1 started + 8 state + 1 stately + 1 station + 7 stay + 2 steel + 1 steep + 1 sterling + 1 stiffly + 8 still + 2 sting + 2 stir + 1 stirring + 1 stole + 1 stomach + 2 stood + 1 stop + 1 story + 6 strange + 1 stranger + 1 streets + 1 strict + 2 strike + 1 strokes + 1 strong + 2 struck + 1 stubbornness + 1 student + 1 stung + 3 subject + 1 substance + 10 such + 1 sudden + 1 suit + 3 suits + 1 sulphurous + 1 summit + 1 summons + 2 sun + 1 sunday + 1 suppliance + 1 supposal + 1 suppress + 1 sure + 1 surprised + 1 surrender + 1 survivor + 1 suspiration + 1 sustain + 1 swaggering + 10 swear + 1 sweaty + 1 sweep + 2 sweet + 2 swift + 1 swinish + 5 sword + 2 sworn + 18 t + 1 ta + 1 table + 2 tables + 1 taint + 10 take + 1 taken + 3 takes + 1 tale + 1 talk + 1 task + 1 tax + 2 teach + 2 tears + 9 tell + 1 temple + 1 tempt + 1 tenable + 1 tenantless + 1 tend + 2 tender + 3 tenders + 2 term + 2 terms + 1 tether + 1 tetter + 15 than + 2 thanks + 83 that + 1 thaw + 237 the + 23 thee + 10 their + 10 them + 1 theme + 15 then + 18 there + 4 therefore + 1 thereto + 13 these + 1 thews + 14 they + 1 thin + 3 thine + 6 thing + 3 things + 16 think + 1 thinking + 1 third + 67 this + 1 thorns + 1 thorny + 7 those + 28 thou + 10 though + 2 thought + 4 thoughts + 1 thrice + 2 thrift + 1 throat + 2 throne + 3 through + 1 throw + 1 thunder + 9 thus + 36 thy + 1 thyself + 4 till + 10 time + 1 times + 22 tis + 192 to + 1 toe + 7 together + 1 toils + 2 told + 4 tongue + 9 too + 1 top + 1 tormenting + 3 touching + 4 toward + 1 toy + 1 toys + 1 traduced + 1 tragedy + 1 trains + 1 traitorous + 1 trappings + 1 treads + 2 treasure + 1 tremble + 1 tried + 1 trifling + 1 triumph + 1 trivial + 1 trouble + 1 troubles + 2 truant + 5 true + 1 truepenny + 1 truly + 2 trumpet + 1 trumpets + 1 truncheon + 1 truster + 2 truth + 2 tush + 3 twelve + 1 twere + 2 twice + 2 twill + 1 twixt + 5 two + 1 ubique + 1 unanel + 5 uncle + 1 undergo + 1 understand + 2 understanding + 1 uneffectual + 1 unfledged + 3 unfold + 1 unforced + 1 unfortified + 1 ungracious + 1 unhand + 1 unholy + 1 unhousel + 1 unimproved + 1 unmanly + 1 unmask + 1 unmaster + 1 unmix + 2 unnatural + 1 unprevailing + 1 unprofitable + 1 unproportioned + 1 unrighteous + 1 unschool + 1 unsifted + 4 unto + 1 unvalued + 1 unweeded + 10 up + 1 uphoarded + 18 upon + 19 us + 1 use + 1 uses + 1 usurp + 1 v + 1 vailed + 1 vain + 2 valiant + 1 vanish + 1 vanquisher + 1 vast + 9 very + 1 vial + 1 vicious + 1 vigour + 1 vile + 5 villain + 2 violence + 1 violet + 3 virtue + 1 virtues + 1 virtuous + 1 visage + 1 vision + 2 visit + 5 voice + 4 voltimand + 1 volume + 1 vow + 3 vows + 2 vulgar + 1 wake + 6 walk + 1 walks + 1 wants + 1 war + 2 warlike + 1 warning + 1 warrant + 1 wars + 1 wary + 17 was + 1 wassail + 12 watch + 1 watchman + 3 waves + 2 waxes + 2 way + 1 ways + 34 we + 1 weak + 1 wears + 1 weary + 1 wedding + 1 weed + 1 week + 2 weigh + 1 weighing + 3 welcome + 14 well + 1 went + 3 were + 1 west + 1 westward + 1 wharf + 42 what + 1 whatsoever + 8 when + 1 whence + 9 where + 1 wherefore + 4 wherein + 2 whereof + 1 whether + 16 which + 2 while + 1 whiles + 1 whilst + 1 whirling + 1 whisper + 8 who + 3 whole + 2 wholesome + 8 whose + 13 why + 3 wicked + 1 wide + 1 wife + 1 wild + 25 will + 1 willing + 1 willingly + 1 wilt + 2 wind + 2 winds + 1 windy + 1 wings + 1 wipe + 1 wisdom + 1 wisdoms + 1 wisest + 1 wishes + 2 wit + 1 witch + 1 witchcraft + 65 with + 2 withal + 11 within + 3 without + 1 witness + 4 wittenberg + 3 woe + 2 woman + 1 womb + 1 won + 1 wonder + 1 wonderful + 1 wondrous + 1 wont + 1 woodcocks + 3 word + 2 words + 1 wore + 2 work + 3 world + 1 worm + 1 worth + 1 worthy + 14 would + 3 wouldst + 1 wretch + 2 writ + 1 writing + 1 wrong + 1 wrung + 1 yea + 4 yes + 1 yesternight + 7 yet + 1 yielding + 1 yon + 1 yond + 110 you + 6 young + 49 your + 7 yourself + 5 youth diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/hamlet-act-1.txt b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/hamlet-act-1.txt new file mode 100644 index 00000000..2491678e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/hamlet-act-1.txt @@ -0,0 +1,1234 @@ +The Tragedy of Hamlet, Prince of Denmark + +ACT I + +SCENE I. Elsinore. A platform before the castle. + +FRANCISCO at his post. Enter to him BERNARDO +BERNARDO +Who's there? +FRANCISCO +Nay, answer me: stand, and unfold yourself. +BERNARDO +Long live the king! +FRANCISCO +Bernardo? +BERNARDO +He. +FRANCISCO +You come most carefully upon your hour. +BERNARDO +'Tis now struck twelve; get thee to bed, Francisco. +FRANCISCO +For this relief much thanks: 'tis bitter cold, +And I am sick at heart. +BERNARDO +Have you had quiet guard? +FRANCISCO +Not a mouse stirring. +BERNARDO +Well, good night. +If you do meet Horatio and Marcellus, +The rivals of my watch, bid them make haste. +FRANCISCO +I think I hear them. Stand, ho! Who's there? +Enter HORATIO and MARCELLUS + +HORATIO +Friends to this ground. +MARCELLUS +And liegemen to the Dane. +FRANCISCO +Give you good night. +MARCELLUS +O, farewell, honest soldier: +Who hath relieved you? +FRANCISCO +Bernardo has my place. +Give you good night. +Exit + +MARCELLUS +Holla! Bernardo! +BERNARDO +Say, +What, is Horatio there? +HORATIO +A piece of him. +BERNARDO +Welcome, Horatio: welcome, good Marcellus. +MARCELLUS +What, has this thing appear'd again to-night? +BERNARDO +I have seen nothing. +MARCELLUS +Horatio says 'tis but our fantasy, +And will not let belief take hold of him +Touching this dreaded sight, twice seen of us: +Therefore I have entreated him along +With us to watch the minutes of this night; +That if again this apparition come, +He may approve our eyes and speak to it. +HORATIO +Tush, tush, 'twill not appear. +BERNARDO +Sit down awhile; +And let us once again assail your ears, +That are so fortified against our story +What we have two nights seen. +HORATIO +Well, sit we down, +And let us hear Bernardo speak of this. +BERNARDO +Last night of all, +When yond same star that's westward from the pole +Had made his course to illume that part of heaven +Where now it burns, Marcellus and myself, +The bell then beating one,-- +Enter Ghost + +MARCELLUS +Peace, break thee off; look, where it comes again! +BERNARDO +In the same figure, like the king that's dead. +MARCELLUS +Thou art a scholar; speak to it, Horatio. +BERNARDO +Looks it not like the king? mark it, Horatio. +HORATIO +Most like: it harrows me with fear and wonder. +BERNARDO +It would be spoke to. +MARCELLUS +Question it, Horatio. +HORATIO +What art thou that usurp'st this time of night, +Together with that fair and warlike form +In which the majesty of buried Denmark +Did sometimes march? by heaven I charge thee, speak! +MARCELLUS +It is offended. +BERNARDO +See, it stalks away! +HORATIO +Stay! speak, speak! I charge thee, speak! +Exit Ghost + +MARCELLUS +'Tis gone, and will not answer. +BERNARDO +How now, Horatio! you tremble and look pale: +Is not this something more than fantasy? +What think you on't? +HORATIO +Before my God, I might not this believe +Without the sensible and true avouch +Of mine own eyes. +MARCELLUS +Is it not like the king? +HORATIO +As thou art to thyself: +Such was the very armour he had on +When he the ambitious Norway combated; +So frown'd he once, when, in an angry parle, +He smote the sledded Polacks on the ice. +'Tis strange. +MARCELLUS +Thus twice before, and jump at this dead hour, +With martial stalk hath he gone by our watch. +HORATIO +In what particular thought to work I know not; +But in the gross and scope of my opinion, +This bodes some strange eruption to our state. +MARCELLUS +Good now, sit down, and tell me, he that knows, +Why this same strict and most observant watch +So nightly toils the subject of the land, +And why such daily cast of brazen cannon, +And foreign mart for implements of war; +Why such impress of shipwrights, whose sore task +Does not divide the Sunday from the week; +What might be toward, that this sweaty haste +Doth make the night joint-labourer with the day: +Who is't that can inform me? +HORATIO +That can I; +At least, the whisper goes so. Our last king, +Whose image even but now appear'd to us, +Was, as you know, by Fortinbras of Norway, +Thereto prick'd on by a most emulate pride, +Dared to the combat; in which our valiant Hamlet-- +For so this side of our known world esteem'd him-- +Did slay this Fortinbras; who by a seal'd compact, +Well ratified by law and heraldry, +Did forfeit, with his life, all those his lands +Which he stood seized of, to the conqueror: +Against the which, a moiety competent +Was gaged by our king; which had return'd +To the inheritance of Fortinbras, +Had he been vanquisher; as, by the same covenant, +And carriage of the article design'd, +His fell to Hamlet. Now, sir, young Fortinbras, +Of unimproved mettle hot and full, +Hath in the skirts of Norway here and there +Shark'd up a list of lawless resolutes, +For food and diet, to some enterprise +That hath a stomach in't; which is no other-- +As it doth well appear unto our state-- +But to recover of us, by strong hand +And terms compulsatory, those foresaid lands +So by his father lost: and this, I take it, +Is the main motive of our preparations, +The source of this our watch and the chief head +Of this post-haste and romage in the land. +BERNARDO +I think it be no other but e'en so: +Well may it sort that this portentous figure +Comes armed through our watch; so like the king +That was and is the question of these wars. +HORATIO +A mote it is to trouble the mind's eye. +In the most high and palmy state of Rome, +A little ere the mightiest Julius fell, +The graves stood tenantless and the sheeted dead +Did squeak and gibber in the Roman streets: +As stars with trains of fire and dews of blood, +Disasters in the sun; and the moist star +Upon whose influence Neptune's empire stands +Was sick almost to doomsday with eclipse: +And even the like precurse of fierce events, +As harbingers preceding still the fates +And prologue to the omen coming on, +Have heaven and earth together demonstrated +Unto our climatures and countrymen.-- +But soft, behold! lo, where it comes again! +Re-enter Ghost + +I'll cross it, though it blast me. Stay, illusion! +If thou hast any sound, or use of voice, +Speak to me: +If there be any good thing to be done, +That may to thee do ease and grace to me, +Speak to me: +Cock crows + +If thou art privy to thy country's fate, +Which, happily, foreknowing may avoid, O, speak! +Or if thou hast uphoarded in thy life +Extorted treasure in the womb of earth, +For which, they say, you spirits oft walk in death, +Speak of it: stay, and speak! Stop it, Marcellus. +MARCELLUS +Shall I strike at it with my partisan? +HORATIO +Do, if it will not stand. +BERNARDO +'Tis here! +HORATIO +'Tis here! +MARCELLUS +'Tis gone! +Exit Ghost + +We do it wrong, being so majestical, +To offer it the show of violence; +For it is, as the air, invulnerable, +And our vain blows malicious mockery. +BERNARDO +It was about to speak, when the cock crew. +HORATIO +And then it started like a guilty thing +Upon a fearful summons. I have heard, +The cock, that is the trumpet to the morn, +Doth with his lofty and shrill-sounding throat +Awake the god of day; and, at his warning, +Whether in sea or fire, in earth or air, +The extravagant and erring spirit hies +To his confine: and of the truth herein +This present object made probation. +MARCELLUS +It faded on the crowing of the cock. +Some say that ever 'gainst that season comes +Wherein our Saviour's birth is celebrated, +The bird of dawning singeth all night long: +And then, they say, no spirit dares stir abroad; +The nights are wholesome; then no planets strike, +No fairy takes, nor witch hath power to charm, +So hallow'd and so gracious is the time. +HORATIO +So have I heard and do in part believe it. +But, look, the morn, in russet mantle clad, +Walks o'er the dew of yon high eastward hill: +Break we our watch up; and by my advice, +Let us impart what we have seen to-night +Unto young Hamlet; for, upon my life, +This spirit, dumb to us, will speak to him. +Do you consent we shall acquaint him with it, +As needful in our loves, fitting our duty? +MARCELLUS +Let's do't, I pray; and I this morning know +Where we shall find him most conveniently. +Exeunt + +SCENE II. A room of state in the castle. + +Enter KING CLAUDIUS, QUEEN GERTRUDE, HAMLET, POLONIUS, LAERTES, VOLTIMAND, CORNELIUS, Lords, and Attendants +KING CLAUDIUS +Though yet of Hamlet our dear brother's death +The memory be green, and that it us befitted +To bear our hearts in grief and our whole kingdom +To be contracted in one brow of woe, +Yet so far hath discretion fought with nature +That we with wisest sorrow think on him, +Together with remembrance of ourselves. +Therefore our sometime sister, now our queen, +The imperial jointress to this warlike state, +Have we, as 'twere with a defeated joy,-- +With an auspicious and a dropping eye, +With mirth in funeral and with dirge in marriage, +In equal scale weighing delight and dole,-- +Taken to wife: nor have we herein barr'd +Your better wisdoms, which have freely gone +With this affair along. For all, our thanks. +Now follows, that you know, young Fortinbras, +Holding a weak supposal of our worth, +Or thinking by our late dear brother's death +Our state to be disjoint and out of frame, +Colleagued with the dream of his advantage, +He hath not fail'd to pester us with message, +Importing the surrender of those lands +Lost by his father, with all bonds of law, +To our most valiant brother. So much for him. +Now for ourself and for this time of meeting: +Thus much the business is: we have here writ +To Norway, uncle of young Fortinbras,-- +Who, impotent and bed-rid, scarcely hears +Of this his nephew's purpose,--to suppress +His further gait herein; in that the levies, +The lists and full proportions, are all made +Out of his subject: and we here dispatch +You, good Cornelius, and you, Voltimand, +For bearers of this greeting to old Norway; +Giving to you no further personal power +To business with the king, more than the scope +Of these delated articles allow. +Farewell, and let your haste commend your duty. +CORNELIUS VOLTIMAND +In that and all things will we show our duty. +KING CLAUDIUS +We doubt it nothing: heartily farewell. +Exeunt VOLTIMAND and CORNELIUS + +And now, Laertes, what's the news with you? +You told us of some suit; what is't, Laertes? +You cannot speak of reason to the Dane, +And loose your voice: what wouldst thou beg, Laertes, +That shall not be my offer, not thy asking? +The head is not more native to the heart, +The hand more instrumental to the mouth, +Than is the throne of Denmark to thy father. +What wouldst thou have, Laertes? +LAERTES +My dread lord, +Your leave and favour to return to France; +From whence though willingly I came to Denmark, +To show my duty in your coronation, +Yet now, I must confess, that duty done, +My thoughts and wishes bend again toward France +And bow them to your gracious leave and pardon. +KING CLAUDIUS +Have you your father's leave? What says Polonius? +LORD POLONIUS +He hath, my lord, wrung from me my slow leave +By laboursome petition, and at last +Upon his will I seal'd my hard consent: +I do beseech you, give him leave to go. +KING CLAUDIUS +Take thy fair hour, Laertes; time be thine, +And thy best graces spend it at thy will! +But now, my cousin Hamlet, and my son,-- +HAMLET +[Aside] A little more than kin, and less than kind. +KING CLAUDIUS +How is it that the clouds still hang on you? +HAMLET +Not so, my lord; I am too much i' the sun. +QUEEN GERTRUDE +Good Hamlet, cast thy nighted colour off, +And let thine eye look like a friend on Denmark. +Do not for ever with thy vailed lids +Seek for thy noble father in the dust: +Thou know'st 'tis common; all that lives must die, +Passing through nature to eternity. +HAMLET +Ay, madam, it is common. +QUEEN GERTRUDE +If it be, +Why seems it so particular with thee? +HAMLET +Seems, madam! nay it is; I know not 'seems.' +'Tis not alone my inky cloak, good mother, +Nor customary suits of solemn black, +Nor windy suspiration of forced breath, +No, nor the fruitful river in the eye, +Nor the dejected 'havior of the visage, +Together with all forms, moods, shapes of grief, +That can denote me truly: these indeed seem, +For they are actions that a man might play: +But I have that within which passeth show; +These but the trappings and the suits of woe. +KING CLAUDIUS +'Tis sweet and commendable in your nature, Hamlet, +To give these mourning duties to your father: +But, you must know, your father lost a father; +That father lost, lost his, and the survivor bound +In filial obligation for some term +To do obsequious sorrow: but to persever +In obstinate condolement is a course +Of impious stubbornness; 'tis unmanly grief; +It shows a will most incorrect to heaven, +A heart unfortified, a mind impatient, +An understanding simple and unschool'd: +For what we know must be and is as common +As any the most vulgar thing to sense, +Why should we in our peevish opposition +Take it to heart? Fie! 'tis a fault to heaven, +A fault against the dead, a fault to nature, +To reason most absurd: whose common theme +Is death of fathers, and who still hath cried, +From the first corse till he that died to-day, +'This must be so.' We pray you, throw to earth +This unprevailing woe, and think of us +As of a father: for let the world take note, +You are the most immediate to our throne; +And with no less nobility of love +Than that which dearest father bears his son, +Do I impart toward you. For your intent +In going back to school in Wittenberg, +It is most retrograde to our desire: +And we beseech you, bend you to remain +Here, in the cheer and comfort of our eye, +Our chiefest courtier, cousin, and our son. +QUEEN GERTRUDE +Let not thy mother lose her prayers, Hamlet: +I pray thee, stay with us; go not to Wittenberg. +HAMLET +I shall in all my best obey you, madam. +KING CLAUDIUS +Why, 'tis a loving and a fair reply: +Be as ourself in Denmark. Madam, come; +This gentle and unforced accord of Hamlet +Sits smiling to my heart: in grace whereof, +No jocund health that Denmark drinks to-day, +But the great cannon to the clouds shall tell, +And the king's rouse the heavens all bruit again, +Re-speaking earthly thunder. Come away. +Exeunt all but HAMLET + +HAMLET +O, that this too too solid flesh would melt +Thaw and resolve itself into a dew! +Or that the Everlasting had not fix'd +His canon 'gainst self-slaughter! O God! God! +How weary, stale, flat and unprofitable, +Seem to me all the uses of this world! +Fie on't! ah fie! 'tis an unweeded garden, +That grows to seed; things rank and gross in nature +Possess it merely. That it should come to this! +But two months dead: nay, not so much, not two: +So excellent a king; that was, to this, +Hyperion to a satyr; so loving to my mother +That he might not beteem the winds of heaven +Visit her face too roughly. Heaven and earth! +Must I remember? why, she would hang on him, +As if increase of appetite had grown +By what it fed on: and yet, within a month-- +Let me not think on't--Frailty, thy name is woman!-- +A little month, or ere those shoes were old +With which she follow'd my poor father's body, +Like Niobe, all tears:--why she, even she-- +O, God! a beast, that wants discourse of reason, +Would have mourn'd longer--married with my uncle, +My father's brother, but no more like my father +Than I to Hercules: within a month: +Ere yet the salt of most unrighteous tears +Had left the flushing in her galled eyes, +She married. O, most wicked speed, to post +With such dexterity to incestuous sheets! +It is not nor it cannot come to good: +But break, my heart; for I must hold my tongue. +Enter HORATIO, MARCELLUS, and BERNARDO + +HORATIO +Hail to your lordship! +HAMLET +I am glad to see you well: +Horatio,--or I do forget myself. +HORATIO +The same, my lord, and your poor servant ever. +HAMLET +Sir, my good friend; I'll change that name with you: +And what make you from Wittenberg, Horatio? Marcellus? +MARCELLUS +My good lord-- +HAMLET +I am very glad to see you. Good even, sir. +But what, in faith, make you from Wittenberg? +HORATIO +A truant disposition, good my lord. +HAMLET +I would not hear your enemy say so, +Nor shall you do mine ear that violence, +To make it truster of your own report +Against yourself: I know you are no truant. +But what is your affair in Elsinore? +We'll teach you to drink deep ere you depart. +HORATIO +My lord, I came to see your father's funeral. +HAMLET +I pray thee, do not mock me, fellow-student; +I think it was to see my mother's wedding. +HORATIO +Indeed, my lord, it follow'd hard upon. +HAMLET +Thrift, thrift, Horatio! the funeral baked meats +Did coldly furnish forth the marriage tables. +Would I had met my dearest foe in heaven +Or ever I had seen that day, Horatio! +My father!--methinks I see my father. +HORATIO +Where, my lord? +HAMLET +In my mind's eye, Horatio. +HORATIO +I saw him once; he was a goodly king. +HAMLET +He was a man, take him for all in all, +I shall not look upon his like again. +HORATIO +My lord, I think I saw him yesternight. +HAMLET +Saw? who? +HORATIO +My lord, the king your father. +HAMLET +The king my father! +HORATIO +Season your admiration for awhile +With an attent ear, till I may deliver, +Upon the witness of these gentlemen, +This marvel to you. +HAMLET +For God's love, let me hear. +HORATIO +Two nights together had these gentlemen, +Marcellus and Bernardo, on their watch, +In the dead vast and middle of the night, +Been thus encounter'd. A figure like your father, +Armed at point exactly, cap-a-pe, +Appears before them, and with solemn march +Goes slow and stately by them: thrice he walk'd +By their oppress'd and fear-surprised eyes, +Within his truncheon's length; whilst they, distilled +Almost to jelly with the act of fear, +Stand dumb and speak not to him. This to me +In dreadful secrecy impart they did; +And I with them the third night kept the watch; +Where, as they had deliver'd, both in time, +Form of the thing, each word made true and good, +The apparition comes: I knew your father; +These hands are not more like. +HAMLET +But where was this? +MARCELLUS +My lord, upon the platform where we watch'd. +HAMLET +Did you not speak to it? +HORATIO +My lord, I did; +But answer made it none: yet once methought +It lifted up its head and did address +Itself to motion, like as it would speak; +But even then the morning cock crew loud, +And at the sound it shrunk in haste away, +And vanish'd from our sight. +HAMLET +'Tis very strange. +HORATIO +As I do live, my honour'd lord, 'tis true; +And we did think it writ down in our duty +To let you know of it. +HAMLET +Indeed, indeed, sirs, but this troubles me. +Hold you the watch to-night? +MARCELLUS BERNARDO +We do, my lord. +HAMLET +Arm'd, say you? +MARCELLUS BERNARDO +Arm'd, my lord. +HAMLET +From top to toe? +MARCELLUS BERNARDO +My lord, from head to foot. +HAMLET +Then saw you not his face? +HORATIO +O, yes, my lord; he wore his beaver up. +HAMLET +What, look'd he frowningly? +HORATIO +A countenance more in sorrow than in anger. +HAMLET +Pale or red? +HORATIO +Nay, very pale. +HAMLET +And fix'd his eyes upon you? +HORATIO +Most constantly. +HAMLET +I would I had been there. +HORATIO +It would have much amazed you. +HAMLET +Very like, very like. Stay'd it long? +HORATIO +While one with moderate haste might tell a hundred. +MARCELLUS BERNARDO +Longer, longer. +HORATIO +Not when I saw't. +HAMLET +His beard was grizzled--no? +HORATIO +It was, as I have seen it in his life, +A sable silver'd. +HAMLET +I will watch to-night; +Perchance 'twill walk again. +HORATIO +I warrant it will. +HAMLET +If it assume my noble father's person, +I'll speak to it, though hell itself should gape +And bid me hold my peace. I pray you all, +If you have hitherto conceal'd this sight, +Let it be tenable in your silence still; +And whatsoever else shall hap to-night, +Give it an understanding, but no tongue: +I will requite your loves. So, fare you well: +Upon the platform, 'twixt eleven and twelve, +I'll visit you. +All +Our duty to your honour. +HAMLET +Your loves, as mine to you: farewell. +Exeunt all but HAMLET + +My father's spirit in arms! all is not well; +I doubt some foul play: would the night were come! +Till then sit still, my soul: foul deeds will rise, +Though all the earth o'erwhelm them, to men's eyes. +Exit + +SCENE III. A room in Polonius' house. + +Enter LAERTES and OPHELIA +LAERTES +My necessaries are embark'd: farewell: +And, sister, as the winds give benefit +And convoy is assistant, do not sleep, +But let me hear from you. +OPHELIA +Do you doubt that? +LAERTES +For Hamlet and the trifling of his favour, +Hold it a fashion and a toy in blood, +A violet in the youth of primy nature, +Forward, not permanent, sweet, not lasting, +The perfume and suppliance of a minute; No more. +OPHELIA +No more but so? +LAERTES +Think it no more; +For nature, crescent, does not grow alone +In thews and bulk, but, as this temple waxes, +The inward service of the mind and soul +Grows wide withal. Perhaps he loves you now, +And now no soil nor cautel doth besmirch +The virtue of his will: but you must fear, +His greatness weigh'd, his will is not his own; +For he himself is subject to his birth: +He may not, as unvalued persons do, +Carve for himself; for on his choice depends +The safety and health of this whole state; +And therefore must his choice be circumscribed +Unto the voice and yielding of that body +Whereof he is the head. Then if he says he loves you, +It fits your wisdom so far to believe it +As he in his particular act and place +May give his saying deed; which is no further +Than the main voice of Denmark goes withal. +Then weigh what loss your honour may sustain, +If with too credent ear you list his songs, +Or lose your heart, or your chaste treasure open +To his unmaster'd importunity. +Fear it, Ophelia, fear it, my dear sister, +And keep you in the rear of your affection, +Out of the shot and danger of desire. +The chariest maid is prodigal enough, +If she unmask her beauty to the moon: +Virtue itself 'scapes not calumnious strokes: +The canker galls the infants of the spring, +Too oft before their buttons be disclosed, +And in the morn and liquid dew of youth +Contagious blastments are most imminent. +Be wary then; best safety lies in fear: +Youth to itself rebels, though none else near. +OPHELIA +I shall the effect of this good lesson keep, +As watchman to my heart. But, good my brother, +Do not, as some ungracious pastors do, +Show me the steep and thorny way to heaven; +Whiles, like a puff'd and reckless libertine, +Himself the primrose path of dalliance treads, +And recks not his own rede. +LAERTES +O, fear me not. +I stay too long: but here my father comes. +Enter POLONIUS + +A double blessing is a double grace, +Occasion smiles upon a second leave. +LORD POLONIUS +Yet here, Laertes! aboard, aboard, for shame! +The wind sits in the shoulder of your sail, +And you are stay'd for. There; my blessing with thee! +And these few precepts in thy memory +See thou character. Give thy thoughts no tongue, +Nor any unproportioned thought his act. +Be thou familiar, but by no means vulgar. +Those friends thou hast, and their adoption tried, +Grapple them to thy soul with hoops of steel; +But do not dull thy palm with entertainment +Of each new-hatch'd, unfledged comrade. Beware +Of entrance to a quarrel, but being in, +Bear't that the opposed may beware of thee. +Give every man thy ear, but few thy voice; +Take each man's censure, but reserve thy judgment. +Costly thy habit as thy purse can buy, +But not express'd in fancy; rich, not gaudy; +For the apparel oft proclaims the man, +And they in France of the best rank and station +Are of a most select and generous chief in that. +Neither a borrower nor a lender be; +For loan oft loses both itself and friend, +And borrowing dulls the edge of husbandry. +This above all: to thine ownself be true, +And it must follow, as the night the day, +Thou canst not then be false to any man. +Farewell: my blessing season this in thee! +LAERTES +Most humbly do I take my leave, my lord. +LORD POLONIUS +The time invites you; go; your servants tend. +LAERTES +Farewell, Ophelia; and remember well +What I have said to you. +OPHELIA +'Tis in my memory lock'd, +And you yourself shall keep the key of it. +LAERTES +Farewell. +Exit + +LORD POLONIUS +What is't, Ophelia, be hath said to you? +OPHELIA +So please you, something touching the Lord Hamlet. +LORD POLONIUS +Marry, well bethought: +'Tis told me, he hath very oft of late +Given private time to you; and you yourself +Have of your audience been most free and bounteous: +If it be so, as so 'tis put on me, +And that in way of caution, I must tell you, +You do not understand yourself so clearly +As it behoves my daughter and your honour. +What is between you? give me up the truth. +OPHELIA +He hath, my lord, of late made many tenders +Of his affection to me. +LORD POLONIUS +Affection! pooh! you speak like a green girl, +Unsifted in such perilous circumstance. +Do you believe his tenders, as you call them? +OPHELIA +I do not know, my lord, what I should think. +LORD POLONIUS +Marry, I'll teach you: think yourself a baby; +That you have ta'en these tenders for true pay, +Which are not sterling. Tender yourself more dearly; +Or--not to crack the wind of the poor phrase, +Running it thus--you'll tender me a fool. +OPHELIA +My lord, he hath importuned me with love +In honourable fashion. +LORD POLONIUS +Ay, fashion you may call it; go to, go to. +OPHELIA +And hath given countenance to his speech, my lord, +With almost all the holy vows of heaven. +LORD POLONIUS +Ay, springes to catch woodcocks. I do know, +When the blood burns, how prodigal the soul +Lends the tongue vows: these blazes, daughter, +Giving more light than heat, extinct in both, +Even in their promise, as it is a-making, +You must not take for fire. From this time +Be somewhat scanter of your maiden presence; +Set your entreatments at a higher rate +Than a command to parley. For Lord Hamlet, +Believe so much in him, that he is young +And with a larger tether may he walk +Than may be given you: in few, Ophelia, +Do not believe his vows; for they are brokers, +Not of that dye which their investments show, +But mere implorators of unholy suits, +Breathing like sanctified and pious bawds, +The better to beguile. This is for all: +I would not, in plain terms, from this time forth, +Have you so slander any moment leisure, +As to give words or talk with the Lord Hamlet. +Look to't, I charge you: come your ways. +OPHELIA +I shall obey, my lord. +Exeunt + +SCENE IV. The platform. + +Enter HAMLET, HORATIO, and MARCELLUS +HAMLET +The air bites shrewdly; it is very cold. +HORATIO +It is a nipping and an eager air. +HAMLET +What hour now? +HORATIO +I think it lacks of twelve. +HAMLET +No, it is struck. +HORATIO +Indeed? I heard it not: then it draws near the season +Wherein the spirit held his wont to walk. +A flourish of trumpets, and ordnance shot off, within + +What does this mean, my lord? +HAMLET +The king doth wake to-night and takes his rouse, +Keeps wassail, and the swaggering up-spring reels; +And, as he drains his draughts of Rhenish down, +The kettle-drum and trumpet thus bray out +The triumph of his pledge. +HORATIO +Is it a custom? +HAMLET +Ay, marry, is't: +But to my mind, though I am native here +And to the manner born, it is a custom +More honour'd in the breach than the observance. +This heavy-headed revel east and west +Makes us traduced and tax'd of other nations: +They clepe us drunkards, and with swinish phrase +Soil our addition; and indeed it takes +From our achievements, though perform'd at height, +The pith and marrow of our attribute. +So, oft it chances in particular men, +That for some vicious mole of nature in them, +As, in their birth--wherein they are not guilty, +Since nature cannot choose his origin-- +By the o'ergrowth of some complexion, +Oft breaking down the pales and forts of reason, +Or by some habit that too much o'er-leavens +The form of plausive manners, that these men, +Carrying, I say, the stamp of one defect, +Being nature's livery, or fortune's star,-- +Their virtues else--be they as pure as grace, +As infinite as man may undergo-- +Shall in the general censure take corruption +From that particular fault: the dram of eale +Doth all the noble substance of a doubt +To his own scandal. +HORATIO +Look, my lord, it comes! +Enter Ghost + +HAMLET +Angels and ministers of grace defend us! +Be thou a spirit of health or goblin damn'd, +Bring with thee airs from heaven or blasts from hell, +Be thy intents wicked or charitable, +Thou comest in such a questionable shape +That I will speak to thee: I'll call thee Hamlet, +King, father, royal Dane: O, answer me! +Let me not burst in ignorance; but tell +Why thy canonized bones, hearsed in death, +Have burst their cerements; why the sepulchre, +Wherein we saw thee quietly inurn'd, +Hath oped his ponderous and marble jaws, +To cast thee up again. What may this mean, +That thou, dead corse, again in complete steel +Revisit'st thus the glimpses of the moon, +Making night hideous; and we fools of nature +So horridly to shake our disposition +With thoughts beyond the reaches of our souls? +Say, why is this? wherefore? what should we do? +Ghost beckons HAMLET + +HORATIO +It beckons you to go away with it, +As if it some impartment did desire +To you alone. +MARCELLUS +Look, with what courteous action +It waves you to a more removed ground: +But do not go with it. +HORATIO +No, by no means. +HAMLET +It will not speak; then I will follow it. +HORATIO +Do not, my lord. +HAMLET +Why, what should be the fear? +I do not set my life in a pin's fee; +And for my soul, what can it do to that, +Being a thing immortal as itself? +It waves me forth again: I'll follow it. +HORATIO +What if it tempt you toward the flood, my lord, +Or to the dreadful summit of the cliff +That beetles o'er his base into the sea, +And there assume some other horrible form, +Which might deprive your sovereignty of reason +And draw you into madness? think of it: +The very place puts toys of desperation, +Without more motive, into every brain +That looks so many fathoms to the sea +And hears it roar beneath. +HAMLET +It waves me still. +Go on; I'll follow thee. +MARCELLUS +You shall not go, my lord. +HAMLET +Hold off your hands. +HORATIO +Be ruled; you shall not go. +HAMLET +My fate cries out, +And makes each petty artery in this body +As hardy as the Nemean lion's nerve. +Still am I call'd. Unhand me, gentlemen. +By heaven, I'll make a ghost of him that lets me! +I say, away! Go on; I'll follow thee. +Exeunt Ghost and HAMLET + +HORATIO +He waxes desperate with imagination. +MARCELLUS +Let's follow; 'tis not fit thus to obey him. +HORATIO +Have after. To what issue will this come? +MARCELLUS +Something is rotten in the state of Denmark. +HORATIO +Heaven will direct it. +MARCELLUS +Nay, let's follow him. +Exeunt + +SCENE V. Another part of the platform. + +Enter GHOST and HAMLET +HAMLET +Where wilt thou lead me? speak; I'll go no further. +Ghost +Mark me. +HAMLET +I will. +Ghost +My hour is almost come, +When I to sulphurous and tormenting flames +Must render up myself. +HAMLET +Alas, poor ghost! +Ghost +Pity me not, but lend thy serious hearing +To what I shall unfold. +HAMLET +Speak; I am bound to hear. +Ghost +So art thou to revenge, when thou shalt hear. +HAMLET +What? +Ghost +I am thy father's spirit, +Doom'd for a certain term to walk the night, +And for the day confined to fast in fires, +Till the foul crimes done in my days of nature +Are burnt and purged away. But that I am forbid +To tell the secrets of my prison-house, +I could a tale unfold whose lightest word +Would harrow up thy soul, freeze thy young blood, +Make thy two eyes, like stars, start from their spheres, +Thy knotted and combined locks to part +And each particular hair to stand on end, +Like quills upon the fretful porpentine: +But this eternal blazon must not be +To ears of flesh and blood. List, list, O, list! +If thou didst ever thy dear father love-- +HAMLET +O God! +Ghost +Revenge his foul and most unnatural murder. +HAMLET +Murder! +Ghost +Murder most foul, as in the best it is; +But this most foul, strange and unnatural. +HAMLET +Haste me to know't, that I, with wings as swift +As meditation or the thoughts of love, +May sweep to my revenge. +Ghost +I find thee apt; +And duller shouldst thou be than the fat weed +That roots itself in ease on Lethe wharf, +Wouldst thou not stir in this. Now, Hamlet, hear: +'Tis given out that, sleeping in my orchard, +A serpent stung me; so the whole ear of Denmark +Is by a forged process of my death +Rankly abused: but know, thou noble youth, +The serpent that did sting thy father's life +Now wears his crown. +HAMLET +O my prophetic soul! My uncle! +Ghost +Ay, that incestuous, that adulterate beast, +With witchcraft of his wit, with traitorous gifts,-- +O wicked wit and gifts, that have the power +So to seduce!--won to his shameful lust +The will of my most seeming-virtuous queen: +O Hamlet, what a falling-off was there! +From me, whose love was of that dignity +That it went hand in hand even with the vow +I made to her in marriage, and to decline +Upon a wretch whose natural gifts were poor +To those of mine! +But virtue, as it never will be moved, +Though lewdness court it in a shape of heaven, +So lust, though to a radiant angel link'd, +Will sate itself in a celestial bed, +And prey on garbage. +But, soft! methinks I scent the morning air; +Brief let me be. Sleeping within my orchard, +My custom always of the afternoon, +Upon my secure hour thy uncle stole, +With juice of cursed hebenon in a vial, +And in the porches of my ears did pour +The leperous distilment; whose effect +Holds such an enmity with blood of man +That swift as quicksilver it courses through +The natural gates and alleys of the body, +And with a sudden vigour doth posset +And curd, like eager droppings into milk, +The thin and wholesome blood: so did it mine; +And a most instant tetter bark'd about, +Most lazar-like, with vile and loathsome crust, +All my smooth body. +Thus was I, sleeping, by a brother's hand +Of life, of crown, of queen, at once dispatch'd: +Cut off even in the blossoms of my sin, +Unhousel'd, disappointed, unanel'd, +No reckoning made, but sent to my account +With all my imperfections on my head: +O, horrible! O, horrible! most horrible! +If thou hast nature in thee, bear it not; +Let not the royal bed of Denmark be +A couch for luxury and damned incest. +But, howsoever thou pursuest this act, +Taint not thy mind, nor let thy soul contrive +Against thy mother aught: leave her to heaven +And to those thorns that in her bosom lodge, +To prick and sting her. Fare thee well at once! +The glow-worm shows the matin to be near, +And 'gins to pale his uneffectual fire: +Adieu, adieu! Hamlet, remember me. +Exit + +HAMLET +O all you host of heaven! O earth! what else? +And shall I couple hell? O, fie! Hold, hold, my heart; +And you, my sinews, grow not instant old, +But bear me stiffly up. Remember thee! +Ay, thou poor ghost, while memory holds a seat +In this distracted globe. Remember thee! +Yea, from the table of my memory +I'll wipe away all trivial fond records, +All saws of books, all forms, all pressures past, +That youth and observation copied there; +And thy commandment all alone shall live +Within the book and volume of my brain, +Unmix'd with baser matter: yes, by heaven! +O most pernicious woman! +O villain, villain, smiling, damned villain! +My tables,--meet it is I set it down, +That one may smile, and smile, and be a villain; +At least I'm sure it may be so in Denmark: +Writing + +So, uncle, there you are. Now to my word; +It is 'Adieu, adieu! remember me.' +I have sworn 't. +MARCELLUS HORATIO +[Within] My lord, my lord,-- +MARCELLUS +[Within] Lord Hamlet,-- +HORATIO +[Within] Heaven secure him! +HAMLET +So be it! +HORATIO +[Within] Hillo, ho, ho, my lord! +HAMLET +Hillo, ho, ho, boy! come, bird, come. +Enter HORATIO and MARCELLUS + +MARCELLUS +How is't, my noble lord? +HORATIO +What news, my lord? +HAMLET +O, wonderful! +HORATIO +Good my lord, tell it. +HAMLET +No; you'll reveal it. +HORATIO +Not I, my lord, by heaven. +MARCELLUS +Nor I, my lord. +HAMLET +How say you, then; would heart of man once think it? +But you'll be secret? +HORATIO MARCELLUS +Ay, by heaven, my lord. +HAMLET +There's ne'er a villain dwelling in all Denmark +But he's an arrant knave. +HORATIO +There needs no ghost, my lord, come from the grave +To tell us this. +HAMLET +Why, right; you are i' the right; +And so, without more circumstance at all, +I hold it fit that we shake hands and part: +You, as your business and desire shall point you; +For every man has business and desire, +Such as it is; and for mine own poor part, +Look you, I'll go pray. +HORATIO +These are but wild and whirling words, my lord. +HAMLET +I'm sorry they offend you, heartily; +Yes, 'faith heartily. +HORATIO +There's no offence, my lord. +HAMLET +Yes, by Saint Patrick, but there is, Horatio, +And much offence too. Touching this vision here, +It is an honest ghost, that let me tell you: +For your desire to know what is between us, +O'ermaster 't as you may. And now, good friends, +As you are friends, scholars and soldiers, +Give me one poor request. +HORATIO +What is't, my lord? we will. +HAMLET +Never make known what you have seen to-night. +HORATIO MARCELLUS +My lord, we will not. +HAMLET +Nay, but swear't. +HORATIO +In faith, +My lord, not I. +MARCELLUS +Nor I, my lord, in faith. +HAMLET +Upon my sword. +MARCELLUS +We have sworn, my lord, already. +HAMLET +Indeed, upon my sword, indeed. +Ghost +[Beneath] Swear. +HAMLET +Ah, ha, boy! say'st thou so? art thou there, +truepenny? +Come on--you hear this fellow in the cellarage-- +Consent to swear. +HORATIO +Propose the oath, my lord. +HAMLET +Never to speak of this that you have seen, +Swear by my sword. +Ghost +[Beneath] Swear. +HAMLET +Hic et ubique? then we'll shift our ground. +Come hither, gentlemen, +And lay your hands again upon my sword: +Never to speak of this that you have heard, +Swear by my sword. +Ghost +[Beneath] Swear. +HAMLET +Well said, old mole! canst work i' the earth so fast? +A worthy pioner! Once more remove, good friends. +HORATIO +O day and night, but this is wondrous strange! +HAMLET +And therefore as a stranger give it welcome. +There are more things in heaven and earth, Horatio, +Than are dreamt of in your philosophy. But come; +Here, as before, never, so help you mercy, +How strange or odd soe'er I bear myself, +As I perchance hereafter shall think meet +To put an antic disposition on, +That you, at such times seeing me, never shall, +With arms encumber'd thus, or this headshake, +Or by pronouncing of some doubtful phrase, +As 'Well, well, we know,' or 'We could, an if we would,' +Or 'If we list to speak,' or 'There be, an if they might,' +Or such ambiguous giving out, to note +That you know aught of me: this not to do, +So grace and mercy at your most need help you, Swear. +Ghost +[Beneath] Swear. +HAMLET +Rest, rest, perturbed spirit! +They swear + +So, gentlemen, +With all my love I do commend me to you: +And what so poor a man as Hamlet is +May do, to express his love and friending to you, +God willing, shall not lack. Let us go in together; +And still your fingers on your lips, I pray. +The time is out of joint: O cursed spite, +That ever I was born to set it right! +Nay, come, let's go together. +Exeunt diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/make-db.cc b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/make-db.cc new file mode 100644 index 00000000..20ef4f7a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/make-db.cc @@ -0,0 +1,116 @@ +// Copyright 2012 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This program creates a leveldb db at /tmp/db. + +#include + +#include "leveldb/db.h" + +static const char* dbname = "/tmp/db"; + +// The program consists of up to 4 stages. If stage is in the range [1, 4], +// the program will exit after the stage'th stage. +// 1. create an empty DB. +// 2. add some key/value pairs. +// 3. close and re-open the DB, which forces a compaction. +// 4. add some more key/value pairs. +static const int stage = 4; + +int main(int argc, char** argv) { + leveldb::Status status; + leveldb::Options o; + leveldb::WriteOptions wo; + leveldb::DB* db; + + o.create_if_missing = true; + o.error_if_exists = true; + + if (stage < 1) { + return 0; + } + cout << "Stage 1" << endl; + + status = leveldb::DB::Open(o, dbname, &db); + if (!status.ok()) { + cerr << "DB::Open " << status.ToString() << endl; + return 1; + } + + if (stage < 2) { + return 0; + } + cout << "Stage 2" << endl; + + status = db->Put(wo, "foo", "one"); + if (!status.ok()) { + cerr << "DB::Put " << status.ToString() << endl; + return 1; + } + + status = db->Put(wo, "bar", "two"); + if (!status.ok()) { + cerr << "DB::Put " << status.ToString() << endl; + return 1; + } + + status = db->Put(wo, "baz", "three"); + if (!status.ok()) { + cerr << "DB::Put " << status.ToString() << endl; + return 1; + } + + status = db->Put(wo, "foo", "four"); + if (!status.ok()) { + cerr << "DB::Put " << status.ToString() << endl; + return 1; + } + + status = db->Delete(wo, "bar"); + if (!status.ok()) { + cerr << "DB::Delete " << status.ToString() << endl; + return 1; + } + + if (stage < 3) { + return 0; + } + cout << "Stage 3" << endl; + + delete db; + db = NULL; + o.create_if_missing = false; + o.error_if_exists = false; + + status = leveldb::DB::Open(o, dbname, &db); + if (!status.ok()) { + cerr << "DB::Open " << status.ToString() << endl; + return 1; + } + + if (stage < 4) { + return 0; + } + cout << "Stage 4" << endl; + + status = db->Put(wo, "foo", "five"); + if (!status.ok()) { + cerr << "DB::Put " << status.ToString() << endl; + return 1; + } + + status = db->Put(wo, "quux", "six"); + if (!status.ok()) { + cerr << "DB::Put " << status.ToString() << endl; + return 1; + } + + status = db->Delete(wo, "baz"); + if (!status.ok()) { + cerr << "DB::Delete " << status.ToString() << endl; + return 1; + } + + return 0; +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/make-table.cc b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/make-table.cc new file mode 100644 index 00000000..1f6b3f47 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/leveldb-go/testdata/make-table.cc @@ -0,0 +1,106 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This program adds N lines from infile to a leveldb table at outfile. +// The h.txt infile was generated via: +// cat hamlet-act-1.txt | tr '[:upper:]' '[:lower:]' | grep -o -E '\w+' | sort | uniq -c > infile + +#include +#include +#include + +#include "leveldb/env.h" +#include "leveldb/table.h" +#include "leveldb/table_builder.h" + +const int N = 1000000; +const char* infile = "h.txt"; +const char* outfile = "h.sst"; + +int write() { + leveldb::Status status; + + leveldb::WritableFile* wf; + status = leveldb::Env::Default()->NewWritableFile(outfile, &wf); + if (!status.ok()) { + cerr << "Env::NewWritableFile: " << status.ToString() << endl; + return 1; + } + + leveldb::Options o; + // o.compression = leveldb::kNoCompression; + leveldb::TableBuilder* tb = new leveldb::TableBuilder(o, wf); + ifstream in(infile); + string s; + for (int i = 0; i < N && getline(in, s); i++) { + string key(s, 8); + string val(s, 0, 7); + val = val.substr(1 + val.rfind(' ')); + tb->Add(key.c_str(), val.c_str()); + } + + status = tb->Finish(); + if (!status.ok()) { + cerr << "TableBuilder::Finish: " << status.ToString() << endl; + return 1; + } + + status = wf->Close(); + if (!status.ok()) { + cerr << "WritableFile::Close: " << status.ToString() << endl; + return 1; + } + + cout << "wrote " << tb->NumEntries() << " entries" << endl; + delete tb; + delete wf; + return 0; +} + +int read() { + leveldb::Status status; + + leveldb::RandomAccessFile* raf; + status = leveldb::Env::Default()->NewRandomAccessFile(outfile, &raf); + if (!status.ok()) { + cerr << "Env::NewRandomAccessFile: " << status.ToString() << endl; + return 1; + } + + uint64_t file_size; + status = leveldb::Env::Default()->GetFileSize(outfile, &file_size); + if (!status.ok()) { + cerr << "Env::GetFileSize: " << status.ToString() << endl; + return 1; + } + + leveldb::Options o; + leveldb::Table* t; + status = leveldb::Table::Open(o, raf, file_size, &t); + if (!status.ok()) { + cerr << "Table::Open: " << status.ToString() << endl; + return 1; + } + + leveldb::ReadOptions ro; + leveldb::Iterator* i = t->NewIterator(ro); + uint64_t n = 0; + for (i->SeekToFirst(); i->Valid(); i->Next()) { + n++; + } + + cout << "read " << n << " entries" << endl; + delete i; + delete t; + delete raf; + return 0; +} + +int main(int argc, char** argv) { + int ret = write(); + if (ret != 0) { + return ret; + } + return read(); +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/gf256/blog_test.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/gf256/blog_test.go new file mode 100644 index 00000000..4fbf7ecb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/gf256/blog_test.go @@ -0,0 +1,85 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains a straightforward implementation of +// Reed-Solomon encoding, along with a benchmark. +// It goes with http://research.swtch.com/field. +// +// For an optimized implementation, see gf256.go. + +package gf256 + +import ( + "bytes" + "fmt" + "testing" +) + +// BlogECC writes to check the error correcting code bytes +// for data using the given Reed-Solomon parameters. +func BlogECC(rs *RSEncoder, m []byte, check []byte) { + if len(check) < rs.c { + panic("gf256: invalid check byte length") + } + if rs.c == 0 { + return + } + + // The check bytes are the remainder after dividing + // data padded with c zeros by the generator polynomial. + + // p = data padded with c zeros. + var p []byte + n := len(m) + rs.c + if len(rs.p) >= n { + p = rs.p + } else { + p = make([]byte, n) + } + copy(p, m) + for i := len(m); i < len(p); i++ { + p[i] = 0 + } + + gen := rs.gen + + // Divide p by gen, leaving the remainder in p[len(data):]. + // p[0] is the most significant term in p, and + // gen[0] is the most significant term in the generator. + for i := 0; i < len(m); i++ { + k := f.Mul(p[i], f.Inv(gen[0])) // k = pi / g0 + // p -= k·g + for j, g := range gen { + p[i+j] = f.Add(p[i+j], f.Mul(k, g)) + } + } + + copy(check, p[len(m):]) + rs.p = p +} + +func BenchmarkBlogECC(b *testing.B) { + data := []byte{0x10, 0x20, 0x0c, 0x56, 0x61, 0x80, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11, 0x10, 0x20, 0x0c, 0x56, 0x61, 0x80, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11} + check := []byte{0x29, 0x41, 0xb3, 0x93, 0x8, 0xe8, 0xa3, 0xe7, 0x63, 0x8f} + out := make([]byte, len(check)) + rs := NewRSEncoder(f, len(check)) + for i := 0; i < b.N; i++ { + BlogECC(rs, data, out) + } + b.SetBytes(int64(len(data))) + if !bytes.Equal(out, check) { + fmt.Printf("have %#v want %#v\n", out, check) + } +} + +func TestBlogECC(t *testing.T) { + data := []byte{0x10, 0x20, 0x0c, 0x56, 0x61, 0x80, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11} + check := []byte{0xa5, 0x24, 0xd4, 0xc1, 0xed, 0x36, 0xc7, 0x87, 0x2c, 0x55} + out := make([]byte, len(check)) + rs := NewRSEncoder(f, len(check)) + BlogECC(rs, data, out) + if !bytes.Equal(out, check) { + t.Errorf("have %x want %x", out, check) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/gf256/gf256.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/gf256/gf256.go new file mode 100644 index 00000000..feab9187 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/gf256/gf256.go @@ -0,0 +1,241 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gf256 implements arithmetic over the Galois Field GF(256). +package gf256 + +import "strconv" + +// A Field represents an instance of GF(256) defined by a specific polynomial. +type Field struct { + log [256]byte // log[0] is unused + exp [510]byte +} + +// NewField returns a new field corresponding to the polynomial poly +// and generator α. The Reed-Solomon encoding in QR codes uses +// polynomial 0x11d with generator 2. +// +// The choice of generator α only affects the Exp and Log operations. +func NewField(poly, α int) *Field { + if poly < 0x100 || poly >= 0x200 || reducible(poly) { + panic("gf256: invalid polynomial: " + strconv.Itoa(poly)) + } + + var f Field + x := 1 + for i := 0; i < 255; i++ { + if x == 1 && i != 0 { + panic("gf256: invalid generator " + strconv.Itoa(α) + + " for polynomial " + strconv.Itoa(poly)) + } + f.exp[i] = byte(x) + f.exp[i+255] = byte(x) + f.log[x] = byte(i) + x = mul(x, α, poly) + } + f.log[0] = 255 + for i := 0; i < 255; i++ { + if f.log[f.exp[i]] != byte(i) { + panic("bad log") + } + if f.log[f.exp[i+255]] != byte(i) { + panic("bad log") + } + } + for i := 1; i < 256; i++ { + if f.exp[f.log[i]] != byte(i) { + panic("bad log") + } + } + + return &f +} + +// nbit returns the number of significant in p. +func nbit(p int) uint { + n := uint(0) + for ; p > 0; p >>= 1 { + n++ + } + return n +} + +// polyDiv divides the polynomial p by q and returns the remainder. +func polyDiv(p, q int) int { + np := nbit(p) + nq := nbit(q) + for ; np >= nq; np-- { + if p&(1<<(np-1)) != 0 { + p ^= q << (np - nq) + } + } + return p +} + +// mul returns the product x*y mod poly, a GF(256) multiplication. +func mul(x, y, poly int) int { + z := 0 + for x > 0 { + if x&1 != 0 { + z ^= y + } + x >>= 1 + y <<= 1 + if y&0x100 != 0 { + y ^= poly + } + } + return z +} + +// reducible reports whether p is reducible. +func reducible(p int) bool { + // Multiplying n-bit * n-bit produces (2n-1)-bit, + // so if p is reducible, one of its factors must be + // of np/2+1 bits or fewer. + np := nbit(p) + for q := 2; q < 1<<(np/2+1); q++ { + if polyDiv(p, q) == 0 { + return true + } + } + return false +} + +// Add returns the sum of x and y in the field. +func (f *Field) Add(x, y byte) byte { + return x ^ y +} + +// Exp returns the the base-α exponential of e in the field. +// If e < 0, Exp returns 0. +func (f *Field) Exp(e int) byte { + if e < 0 { + return 0 + } + return f.exp[e%255] +} + +// Log returns the base-α logarithm of x in the field. +// If x == 0, Log returns -1. +func (f *Field) Log(x byte) int { + if x == 0 { + return -1 + } + return int(f.log[x]) +} + +// Inv returns the multiplicative inverse of x in the field. +// If x == 0, Inv returns 0. +func (f *Field) Inv(x byte) byte { + if x == 0 { + return 0 + } + return f.exp[255-f.log[x]] +} + +// Mul returns the product of x and y in the field. +func (f *Field) Mul(x, y byte) byte { + if x == 0 || y == 0 { + return 0 + } + return f.exp[int(f.log[x])+int(f.log[y])] +} + +// An RSEncoder implements Reed-Solomon encoding +// over a given field using a given number of error correction bytes. +type RSEncoder struct { + f *Field + c int + gen []byte + lgen []byte + p []byte +} + +func (f *Field) gen(e int) (gen, lgen []byte) { + // p = 1 + p := make([]byte, e+1) + p[e] = 1 + + for i := 0; i < e; i++ { + // p *= (x + Exp(i)) + // p[j] = p[j]*Exp(i) + p[j+1]. + c := f.Exp(i) + for j := 0; j < e; j++ { + p[j] = f.Mul(p[j], c) ^ p[j+1] + } + p[e] = f.Mul(p[e], c) + } + + // lp = log p. + lp := make([]byte, e+1) + for i, c := range p { + if c == 0 { + lp[i] = 255 + } else { + lp[i] = byte(f.Log(c)) + } + } + + return p, lp +} + +// NewRSEncoder returns a new Reed-Solomon encoder +// over the given field and number of error correction bytes. +func NewRSEncoder(f *Field, c int) *RSEncoder { + gen, lgen := f.gen(c) + return &RSEncoder{f: f, c: c, gen: gen, lgen: lgen} +} + +// ECC writes to check the error correcting code bytes +// for data using the given Reed-Solomon parameters. +func (rs *RSEncoder) ECC(data []byte, check []byte) { + if len(check) < rs.c { + panic("gf256: invalid check byte length") + } + if rs.c == 0 { + return + } + + // The check bytes are the remainder after dividing + // data padded with c zeros by the generator polynomial. + + // p = data padded with c zeros. + var p []byte + n := len(data) + rs.c + if len(rs.p) >= n { + p = rs.p + } else { + p = make([]byte, n) + } + copy(p, data) + for i := len(data); i < len(p); i++ { + p[i] = 0 + } + + // Divide p by gen, leaving the remainder in p[len(data):]. + // p[0] is the most significant term in p, and + // gen[0] is the most significant term in the generator, + // which is always 1. + // To avoid repeated work, we store various values as + // lv, not v, where lv = log[v]. + f := rs.f + lgen := rs.lgen[1:] + for i := 0; i < len(data); i++ { + c := p[i] + if c == 0 { + continue + } + q := p[i+1:] + exp := f.exp[f.log[c]:] + for j, lg := range lgen { + if lg != 255 { // lgen uses 255 for log 0 + q[j] ^= exp[lg] + } + } + } + copy(check, p[len(data):]) + rs.p = p +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/gf256/gf256_test.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/gf256/gf256_test.go new file mode 100644 index 00000000..f77fa7d6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/gf256/gf256_test.go @@ -0,0 +1,194 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gf256 + +import ( + "bytes" + "fmt" + "testing" +) + +var f = NewField(0x11d, 2) // x^8 + x^4 + x^3 + x^2 + 1 + +func TestBasic(t *testing.T) { + if f.Exp(0) != 1 || f.Exp(1) != 2 || f.Exp(255) != 1 { + panic("bad Exp") + } +} + +func TestECC(t *testing.T) { + data := []byte{0x10, 0x20, 0x0c, 0x56, 0x61, 0x80, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11} + check := []byte{0xa5, 0x24, 0xd4, 0xc1, 0xed, 0x36, 0xc7, 0x87, 0x2c, 0x55} + out := make([]byte, len(check)) + rs := NewRSEncoder(f, len(check)) + rs.ECC(data, out) + if !bytes.Equal(out, check) { + t.Errorf("have %x want %x", out, check) + } +} + +func TestLinear(t *testing.T) { + d1 := []byte{0x00, 0x00} + c1 := []byte{0x00, 0x00} + out := make([]byte, len(c1)) + rs := NewRSEncoder(f, len(c1)) + if rs.ECC(d1, out); !bytes.Equal(out, c1) { + t.Errorf("ECBytes(%x, %d) = %x, want 0", d1, len(c1), out) + } + d2 := []byte{0x00, 0x01} + c2 := make([]byte, 2) + rs.ECC(d2, c2) + d3 := []byte{0x00, 0x02} + c3 := make([]byte, 2) + rs.ECC(d3, c3) + cx := make([]byte, 2) + for i := range cx { + cx[i] = c2[i] ^ c3[i] + } + d4 := []byte{0x00, 0x03} + c4 := make([]byte, 2) + rs.ECC(d4, c4) + if !bytes.Equal(cx, c4) { + t.Errorf("ECBytes(%x, 2) = %x\nECBytes(%x, 2) = %x\nxor = %x\nECBytes(%x, 2) = %x", + d2, c2, d3, c3, cx, d4, c4) + } +} + +func TestGaussJordan(t *testing.T) { + rs := NewRSEncoder(f, 2) + m := make([][]byte, 16) + for i := range m { + m[i] = make([]byte, 4) + m[i][i/8] = 1 << uint(i%8) + rs.ECC(m[i][:2], m[i][2:]) + } + if false { + fmt.Printf("---\n") + for _, row := range m { + fmt.Printf("%x\n", row) + } + } + b := []uint{0, 1, 2, 3, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25, 26, 27} + for i := 0; i < 16; i++ { + bi := b[i] + if m[i][bi/8]&(1<<(7-bi%8)) == 0 { + for j := i + 1; ; j++ { + if j >= len(m) { + t.Errorf("lost track for %d", bi) + break + } + if m[j][bi/8]&(1<<(7-bi%8)) != 0 { + m[i], m[j] = m[j], m[i] + break + } + } + } + for j := i + 1; j < len(m); j++ { + if m[j][bi/8]&(1<<(7-bi%8)) != 0 { + for k := range m[j] { + m[j][k] ^= m[i][k] + } + } + } + } + if false { + fmt.Printf("---\n") + for _, row := range m { + fmt.Printf("%x\n", row) + } + } + for i := 15; i >= 0; i-- { + bi := b[i] + for j := i - 1; j >= 0; j-- { + if m[j][bi/8]&(1<<(7-bi%8)) != 0 { + for k := range m[j] { + m[j][k] ^= m[i][k] + } + } + } + } + if false { + fmt.Printf("---\n") + for _, row := range m { + fmt.Printf("%x", row) + out := make([]byte, 2) + if rs.ECC(row[:2], out); !bytes.Equal(out, row[2:]) { + fmt.Printf(" - want %x", out) + } + fmt.Printf("\n") + } + } +} + +func BenchmarkECC(b *testing.B) { + data := []byte{0x10, 0x20, 0x0c, 0x56, 0x61, 0x80, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11, 0x10, 0x20, 0x0c, 0x56, 0x61, 0x80, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11} + check := []byte{0x29, 0x41, 0xb3, 0x93, 0x8, 0xe8, 0xa3, 0xe7, 0x63, 0x8f} + out := make([]byte, len(check)) + rs := NewRSEncoder(f, len(check)) + for i := 0; i < b.N; i++ { + rs.ECC(data, out) + } + b.SetBytes(int64(len(data))) + if !bytes.Equal(out, check) { + fmt.Printf("have %#v want %#v\n", out, check) + } +} + +func TestGen(t *testing.T) { + for i := 0; i < 256; i++ { + _, lg := f.gen(i) + if lg[0] != 0 { + t.Errorf("#%d: %x", i, lg) + } + } +} + +func TestReducible(t *testing.T) { + var count = []int{1, 2, 3, 6, 9, 18, 30, 56, 99, 186} // oeis.org/A1037 + for i, want := range count { + n := 0 + for p := 1 << uint(i+2); p < 1< 0 { + n := nbit + if n > 8 { + n = 8 + } + if b.nbit%8 == 0 { + b.b = append(b.b, 0) + } else { + m := -b.nbit & 7 + if n > m { + n = m + } + } + b.nbit += n + sh := uint(nbit - n) + b.b[len(b.b)-1] |= uint8(v >> sh << uint(-b.nbit&7)) + v -= v >> sh << sh + nbit -= n + } +} + +// Num is the encoding for numeric data. +// The only valid characters are the decimal digits 0 through 9. +type Num string + +func (s Num) String() string { + return fmt.Sprintf("Num(%#q)", string(s)) +} + +func (s Num) Check() error { + for _, c := range s { + if c < '0' || '9' < c { + return fmt.Errorf("non-numeric string %#q", string(s)) + } + } + return nil +} + +var numLen = [3]int{10, 12, 14} + +func (s Num) Bits(v Version) int { + return 4 + numLen[v.sizeClass()] + (10*len(s)+2)/3 +} + +func (s Num) Encode(b *Bits, v Version) { + b.Write(1, 4) + b.Write(uint(len(s)), numLen[v.sizeClass()]) + var i int + for i = 0; i+3 <= len(s); i += 3 { + w := uint(s[i]-'0')*100 + uint(s[i+1]-'0')*10 + uint(s[i+2]-'0') + b.Write(w, 10) + } + switch len(s) - i { + case 1: + w := uint(s[i] - '0') + b.Write(w, 4) + case 2: + w := uint(s[i]-'0')*10 + uint(s[i+1]-'0') + b.Write(w, 7) + } +} + +// Alpha is the encoding for alphanumeric data. +// The valid characters are 0-9A-Z$%*+-./: and space. +type Alpha string + +const alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:" + +func (s Alpha) String() string { + return fmt.Sprintf("Alpha(%#q)", string(s)) +} + +func (s Alpha) Check() error { + for _, c := range s { + if strings.IndexRune(alphabet, c) < 0 { + return fmt.Errorf("non-alphanumeric string %#q", string(s)) + } + } + return nil +} + +var alphaLen = [3]int{9, 11, 13} + +func (s Alpha) Bits(v Version) int { + return 4 + alphaLen[v.sizeClass()] + (11*len(s)+1)/2 +} + +func (s Alpha) Encode(b *Bits, v Version) { + b.Write(2, 4) + b.Write(uint(len(s)), alphaLen[v.sizeClass()]) + var i int + for i = 0; i+2 <= len(s); i += 2 { + w := uint(strings.IndexRune(alphabet, rune(s[i])))*45 + + uint(strings.IndexRune(alphabet, rune(s[i+1]))) + b.Write(w, 11) + } + + if i < len(s) { + w := uint(strings.IndexRune(alphabet, rune(s[i]))) + b.Write(w, 6) + } +} + +// String is the encoding for 8-bit data. All bytes are valid. +type String string + +func (s String) String() string { + return fmt.Sprintf("String(%#q)", string(s)) +} + +func (s String) Check() error { + return nil +} + +var stringLen = [3]int{8, 16, 16} + +func (s String) Bits(v Version) int { + return 4 + stringLen[v.sizeClass()] + 8*len(s) +} + +func (s String) Encode(b *Bits, v Version) { + b.Write(4, 4) + b.Write(uint(len(s)), stringLen[v.sizeClass()]) + for i := 0; i < len(s); i++ { + b.Write(uint(s[i]), 8) + } +} + +// A Pixel describes a single pixel in a QR code. +type Pixel uint32 + +const ( + Black Pixel = 1 << iota + Invert +) + +func (p Pixel) Offset() uint { + return uint(p >> 6) +} + +func OffsetPixel(o uint) Pixel { + return Pixel(o << 6) +} + +func (r PixelRole) Pixel() Pixel { + return Pixel(r << 2) +} + +func (p Pixel) Role() PixelRole { + return PixelRole(p>>2) & 15 +} + +func (p Pixel) String() string { + s := p.Role().String() + if p&Black != 0 { + s += "+black" + } + if p&Invert != 0 { + s += "+invert" + } + s += "+" + strconv.FormatUint(uint64(p.Offset()), 10) + return s +} + +// A PixelRole describes the role of a QR pixel. +type PixelRole uint32 + +const ( + _ PixelRole = iota + Position // position squares (large) + Alignment // alignment squares (small) + Timing // timing strip between position squares + Format // format metadata + PVersion // version pattern + Unused // unused pixel + Data // data bit + Check // error correction check bit + Extra +) + +var roles = []string{ + "", + "position", + "alignment", + "timing", + "format", + "pversion", + "unused", + "data", + "check", + "extra", +} + +func (r PixelRole) String() string { + if Position <= r && r <= Check { + return roles[r] + } + return strconv.Itoa(int(r)) +} + +// A Level represents a QR error correction level. +// From least to most tolerant of errors, they are L, M, Q, H. +type Level int + +const ( + L Level = iota + M + Q + H +) + +func (l Level) String() string { + if L <= l && l <= H { + return "LMQH"[l : l+1] + } + return strconv.Itoa(int(l)) +} + +// A Code is a square pixel grid. +type Code struct { + Bitmap []byte // 1 is black, 0 is white + Size int // number of pixels on a side + Stride int // number of bytes per row +} + +func (c *Code) Black(x, y int) bool { + return 0 <= x && x < c.Size && 0 <= y && y < c.Size && + c.Bitmap[y*c.Stride+x/8]&(1<= pad { + break + } + b.Write(0x11, 8) + } + } +} + +func (b *Bits) AddCheckBytes(v Version, l Level) { + nd := v.DataBytes(l) + if b.nbit < nd*8 { + b.Pad(nd*8 - b.nbit) + } + if b.nbit != nd*8 { + panic("qr: too much data") + } + + dat := b.Bytes() + vt := &vtab[v] + lev := &vt.level[l] + db := nd / lev.nblock + extra := nd % lev.nblock + chk := make([]byte, lev.check) + rs := gf256.NewRSEncoder(Field, lev.check) + for i := 0; i < lev.nblock; i++ { + if i == lev.nblock-extra { + db++ + } + rs.ECC(dat[:db], chk) + b.Append(chk) + dat = dat[db:] + } + + if len(b.Bytes()) != vt.bytes { + panic("qr: internal error") + } +} + +func (p *Plan) Encode(text ...Encoding) (*Code, error) { + var b Bits + for _, t := range text { + if err := t.Check(); err != nil { + return nil, err + } + t.Encode(&b, p.Version) + } + if b.Bits() > p.DataBytes*8 { + return nil, fmt.Errorf("cannot encode %d bits into %d-bit code", b.Bits(), p.DataBytes*8) + } + b.AddCheckBytes(p.Version, p.Level) + bytes := b.Bytes() + + // Now we have the checksum bytes and the data bytes. + // Construct the actual code. + c := &Code{Size: len(p.Pixel), Stride: (len(p.Pixel) + 7) &^ 7} + c.Bitmap = make([]byte, c.Stride*c.Size) + crow := c.Bitmap + for _, row := range p.Pixel { + for x, pix := range row { + switch pix.Role() { + case Data, Check: + o := pix.Offset() + if bytes[o/8]&(1< 40 { + return nil, fmt.Errorf("invalid QR version %d", int(v)) + } + siz := 17 + int(v)*4 + m := grid(siz) + p.Pixel = m + + // Timing markers (overwritten by boxes). + const ti = 6 // timing is in row/column 6 (counting from 0) + for i := range m { + p := Timing.Pixel() + if i&1 == 0 { + p |= Black + } + m[i][ti] = p + m[ti][i] = p + } + + // Position boxes. + posBox(m, 0, 0) + posBox(m, siz-7, 0) + posBox(m, 0, siz-7) + + // Alignment boxes. + info := &vtab[v] + for x := 4; x+5 < siz; { + for y := 4; y+5 < siz; { + // don't overwrite timing markers + if (x < 7 && y < 7) || (x < 7 && y+5 >= siz-7) || (x+5 >= siz-7 && y < 7) { + } else { + alignBox(m, x, y) + } + if y == 4 { + y = info.apos + } else { + y += info.astride + } + } + if x == 4 { + x = info.apos + } else { + x += info.astride + } + } + + // Version pattern. + pat := vtab[v].pattern + if pat != 0 { + v := pat + for x := 0; x < 6; x++ { + for y := 0; y < 3; y++ { + p := PVersion.Pixel() + if v&1 != 0 { + p |= Black + } + m[siz-11+y][x] = p + m[x][siz-11+y] = p + v >>= 1 + } + } + } + + // One lonely black pixel + m[siz-8][8] = Unused.Pixel() | Black + + return p, nil +} + +// fplan adds the format pixels +func fplan(l Level, m Mask, p *Plan) error { + // Format pixels. + fb := uint32(l^1) << 13 // level: L=01, M=00, Q=11, H=10 + fb |= uint32(m) << 10 // mask + const formatPoly = 0x537 + rem := fb + for i := 14; i >= 10; i-- { + if rem&(1<>i)&1 == 1 { + pix |= Black + } + if (invert>>i)&1 == 1 { + pix ^= Invert | Black + } + // top left + switch { + case i < 6: + p.Pixel[i][8] = pix + case i < 8: + p.Pixel[i+1][8] = pix + case i < 9: + p.Pixel[8][7] = pix + default: + p.Pixel[8][14-i] = pix + } + // bottom right + switch { + case i < 8: + p.Pixel[8][siz-1-int(i)] = pix + default: + p.Pixel[siz-1-int(14-i)][8] = pix + } + } + return nil +} + +// lplan edits a version-only Plan to add information +// about the error correction levels. +func lplan(v Version, l Level, p *Plan) error { + p.Level = l + + nblock := vtab[v].level[l].nblock + ne := vtab[v].level[l].check + nde := (vtab[v].bytes - ne*nblock) / nblock + extra := (vtab[v].bytes - ne*nblock) % nblock + dataBits := (nde*nblock + extra) * 8 + checkBits := ne * nblock * 8 + + p.DataBytes = vtab[v].bytes - ne*nblock + p.CheckBytes = ne * nblock + p.Blocks = nblock + + // Make data + checksum pixels. + data := make([]Pixel, dataBits) + for i := range data { + data[i] = Data.Pixel() | OffsetPixel(uint(i)) + } + check := make([]Pixel, checkBits) + for i := range check { + check[i] = Check.Pixel() | OffsetPixel(uint(i+dataBits)) + } + + // Split into blocks. + dataList := make([][]Pixel, nblock) + checkList := make([][]Pixel, nblock) + for i := 0; i < nblock; i++ { + // The last few blocks have an extra data byte (8 pixels). + nd := nde + if i >= nblock-extra { + nd++ + } + dataList[i], data = data[0:nd*8], data[nd*8:] + checkList[i], check = check[0:ne*8], check[ne*8:] + } + if len(data) != 0 || len(check) != 0 { + panic("data/check math") + } + + // Build up bit sequence, taking first byte of each block, + // then second byte, and so on. Then checksums. + bits := make([]Pixel, dataBits+checkBits) + dst := bits + for i := 0; i < nde+1; i++ { + for _, b := range dataList { + if i*8 < len(b) { + copy(dst, b[i*8:(i+1)*8]) + dst = dst[8:] + } + } + } + for i := 0; i < ne; i++ { + for _, b := range checkList { + if i*8 < len(b) { + copy(dst, b[i*8:(i+1)*8]) + dst = dst[8:] + } + } + } + if len(dst) != 0 { + panic("dst math") + } + + // Sweep up pair of columns, + // then down, assigning to right then left pixel. + // Repeat. + // See Figure 2 of http://www.pclviewer.com/rs2/qrtopology.htm + siz := len(p.Pixel) + rem := make([]Pixel, 7) + for i := range rem { + rem[i] = Extra.Pixel() + } + src := append(bits, rem...) + for x := siz; x > 0; { + for y := siz - 1; y >= 0; y-- { + if p.Pixel[y][x-1].Role() == 0 { + p.Pixel[y][x-1], src = src[0], src[1:] + } + if p.Pixel[y][x-2].Role() == 0 { + p.Pixel[y][x-2], src = src[0], src[1:] + } + } + x -= 2 + if x == 7 { // vertical timing strip + x-- + } + for y := 0; y < siz; y++ { + if p.Pixel[y][x-1].Role() == 0 { + p.Pixel[y][x-1], src = src[0], src[1:] + } + if p.Pixel[y][x-2].Role() == 0 { + p.Pixel[y][x-2], src = src[0], src[1:] + } + } + x -= 2 + } + return nil +} + +// mplan edits a version+level-only Plan to add the mask. +func mplan(m Mask, p *Plan) error { + p.Mask = m + for y, row := range p.Pixel { + for x, pix := range row { + if r := pix.Role(); (r == Data || r == Check || r == Extra) && p.Mask.Invert(y, x) { + row[x] ^= Black | Invert + } + } + } + return nil +} + +// posBox draws a position (large) box at upper left x, y. +func posBox(m [][]Pixel, x, y int) { + pos := Position.Pixel() + // box + for dy := 0; dy < 7; dy++ { + for dx := 0; dx < 7; dx++ { + p := pos + if dx == 0 || dx == 6 || dy == 0 || dy == 6 || 2 <= dx && dx <= 4 && 2 <= dy && dy <= 4 { + p |= Black + } + m[y+dy][x+dx] = p + } + } + // white border + for dy := -1; dy < 8; dy++ { + if 0 <= y+dy && y+dy < len(m) { + if x > 0 { + m[y+dy][x-1] = pos + } + if x+7 < len(m) { + m[y+dy][x+7] = pos + } + } + } + for dx := -1; dx < 8; dx++ { + if 0 <= x+dx && x+dx < len(m) { + if y > 0 { + m[y-1][x+dx] = pos + } + if y+7 < len(m) { + m[y+7][x+dx] = pos + } + } + } +} + +// alignBox draw an alignment (small) box at upper left x, y. +func alignBox(m [][]Pixel, x, y int) { + // box + align := Alignment.Pixel() + for dy := 0; dy < 5; dy++ { + for dx := 0; dx < 5; dx++ { + p := align + if dx == 0 || dx == 4 || dy == 0 || dy == 4 || dx == 2 && dy == 2 { + p |= Black + } + m[y+dy][x+dx] = p + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/coding/qr_test.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/coding/qr_test.go new file mode 100644 index 00000000..d667a8bb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/coding/qr_test.go @@ -0,0 +1,133 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package coding + +import ( + "bytes" + "testing" + + "camlistore.org/third_party/code.google.com/p/rsc/gf256" + "camlistore.org/third_party/code.google.com/p/rsc/qr/libqrencode" +) + +func test(t *testing.T, v Version, l Level, text ...Encoding) bool { + s := "" + ty := libqrencode.EightBit + switch x := text[0].(type) { + case String: + s = string(x) + case Alpha: + s = string(x) + ty = libqrencode.Alphanumeric + case Num: + s = string(x) + ty = libqrencode.Numeric + } + key, err := libqrencode.Encode(libqrencode.Version(v), libqrencode.Level(l), ty, s) + if err != nil { + t.Errorf("libqrencode.Encode(%v, %v, %d, %#q): %v", v, l, ty, s, err) + return false + } + mask := (^key.Pixel[8][2]&1)<<2 | (key.Pixel[8][3]&1)<<1 | (^key.Pixel[8][4] & 1) + p, err := NewPlan(v, l, Mask(mask)) + if err != nil { + t.Errorf("NewPlan(%v, L, %d): %v", v, err, mask) + return false + } + if len(p.Pixel) != len(key.Pixel) { + t.Errorf("%v: NewPlan uses %dx%d, libqrencode uses %dx%d", v, len(p.Pixel), len(p.Pixel), len(key.Pixel), len(key.Pixel)) + return false + } + c, err := p.Encode(text...) + if err != nil { + t.Errorf("Encode: %v", err) + return false + } + badpix := 0 +Pixel: + for y, prow := range p.Pixel { + for x, pix := range prow { + pix &^= Black + if c.Black(x, y) { + pix |= Black + } + + keypix := key.Pixel[y][x] + want := Pixel(0) + switch { + case keypix&libqrencode.Finder != 0: + want = Position.Pixel() + case keypix&libqrencode.Alignment != 0: + want = Alignment.Pixel() + case keypix&libqrencode.Timing != 0: + want = Timing.Pixel() + case keypix&libqrencode.Format != 0: + want = Format.Pixel() + want |= OffsetPixel(pix.Offset()) // sic + want |= pix & Invert + case keypix&libqrencode.PVersion != 0: + want = PVersion.Pixel() + case keypix&libqrencode.DataECC != 0: + if pix.Role() == Check || pix.Role() == Extra { + want = pix.Role().Pixel() + } else { + want = Data.Pixel() + } + want |= OffsetPixel(pix.Offset()) + want |= pix & Invert + default: + want = Unused.Pixel() + } + if keypix&libqrencode.Black != 0 { + want |= Black + } + if pix != want { + t.Errorf("%v/%v: Pixel[%d][%d] = %v, want %v %#x", v, mask, y, x, pix, want, keypix) + if badpix++; badpix >= 100 { + t.Errorf("stopping after %d bad pixels", badpix) + break Pixel + } + } + } + } + return badpix == 0 +} + +var input = []Encoding{ + String("hello"), + Num("1"), + Num("12"), + Num("123"), + Alpha("AB"), + Alpha("ABC"), +} + +func TestVersion(t *testing.T) { + badvers := 0 +Version: + for v := Version(1); v <= 40; v++ { + for l := L; l <= H; l++ { + for _, in := range input { + if !test(t, v, l, in) { + if badvers++; badvers >= 10 { + t.Errorf("stopping after %d bad versions", badvers) + break Version + } + } + } + } + } +} + +func TestEncode(t *testing.T) { + data := []byte{0x10, 0x20, 0x0c, 0x56, 0x61, 0x80, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11, 0xec, 0x11} + check := []byte{0xa5, 0x24, 0xd4, 0xc1, 0xed, 0x36, 0xc7, 0x87, 0x2c, 0x55} + rs := gf256.NewRSEncoder(Field, len(check)) + out := make([]byte, len(check)) + rs.ECC(data, out) + if !bytes.Equal(out, check) { + t.Errorf("have %x want %x", out, check) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/png.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/png.go new file mode 100644 index 00000000..db49d057 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/png.go @@ -0,0 +1,400 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package qr + +// PNG writer for QR codes. + +import ( + "bytes" + "encoding/binary" + "hash" + "hash/crc32" +) + +// PNG returns a PNG image displaying the code. +// +// PNG uses a custom encoder tailored to QR codes. +// Its compressed size is about 2x away from optimal, +// but it runs about 20x faster than calling png.Encode +// on c.Image(). +func (c *Code) PNG() []byte { + var p pngWriter + return p.encode(c) +} + +type pngWriter struct { + tmp [16]byte + wctmp [4]byte + buf bytes.Buffer + zlib bitWriter + crc hash.Hash32 +} + +var pngHeader = []byte("\x89PNG\r\n\x1a\n") + +func (w *pngWriter) encode(c *Code) []byte { + scale := c.Scale + siz := c.Size + + w.buf.Reset() + + // Header + w.buf.Write(pngHeader) + + // Header block + binary.BigEndian.PutUint32(w.tmp[0:4], uint32((siz+8)*scale)) + binary.BigEndian.PutUint32(w.tmp[4:8], uint32((siz+8)*scale)) + w.tmp[8] = 1 // 1-bit + w.tmp[9] = 0 // gray + w.tmp[10] = 0 + w.tmp[11] = 0 + w.tmp[12] = 0 + w.writeChunk("IHDR", w.tmp[:13]) + + // Comment + w.writeChunk("tEXt", comment) + + // Data + w.zlib.writeCode(c) + w.writeChunk("IDAT", w.zlib.bytes.Bytes()) + + // End + w.writeChunk("IEND", nil) + + return w.buf.Bytes() +} + +var comment = []byte("Software\x00QR-PNG http://qr.swtch.com/") + +func (w *pngWriter) writeChunk(name string, data []byte) { + if w.crc == nil { + w.crc = crc32.NewIEEE() + } + binary.BigEndian.PutUint32(w.wctmp[0:4], uint32(len(data))) + w.buf.Write(w.wctmp[0:4]) + w.crc.Reset() + copy(w.wctmp[0:4], name) + w.buf.Write(w.wctmp[0:4]) + w.crc.Write(w.wctmp[0:4]) + w.buf.Write(data) + w.crc.Write(data) + crc := w.crc.Sum32() + binary.BigEndian.PutUint32(w.wctmp[0:4], crc) + w.buf.Write(w.wctmp[0:4]) +} + +func (b *bitWriter) writeCode(c *Code) { + const ftNone = 0 + + b.adler32.Reset() + b.bytes.Reset() + b.nbit = 0 + + scale := c.Scale + siz := c.Size + + // zlib header + b.tmp[0] = 0x78 + b.tmp[1] = 0 + b.tmp[1] += uint8(31 - (uint16(b.tmp[0])<<8+uint16(b.tmp[1]))%31) + b.bytes.Write(b.tmp[0:2]) + + // Start flate block. + b.writeBits(1, 1, false) // final block + b.writeBits(1, 2, false) // compressed, fixed Huffman tables + + // White border. + // First row. + b.byte(ftNone) + n := (scale*(siz+8) + 7) / 8 + b.byte(255) + b.repeat(n-1, 1) + // 4*scale rows total. + b.repeat((4*scale-1)*(1+n), 1+n) + + for i := 0; i < 4*scale; i++ { + b.adler32.WriteNByte(ftNone, 1) + b.adler32.WriteNByte(255, n) + } + + row := make([]byte, 1+n) + for y := 0; y < siz; y++ { + row[0] = ftNone + j := 1 + var z uint8 + nz := 0 + for x := -4; x < siz+4; x++ { + // Raw data. + for i := 0; i < scale; i++ { + z <<= 1 + if !c.Black(x, y) { + z |= 1 + } + if nz++; nz == 8 { + row[j] = z + j++ + nz = 0 + } + } + } + if j < len(row) { + row[j] = z + } + for _, z := range row { + b.byte(z) + } + + // Scale-1 copies. + b.repeat((scale-1)*(1+n), 1+n) + + b.adler32.WriteN(row, scale) + } + + // White border. + // First row. + b.byte(ftNone) + b.byte(255) + b.repeat(n-1, 1) + // 4*scale rows total. + b.repeat((4*scale-1)*(1+n), 1+n) + + for i := 0; i < 4*scale; i++ { + b.adler32.WriteNByte(ftNone, 1) + b.adler32.WriteNByte(255, n) + } + + // End of block. + b.hcode(256) + b.flushBits() + + // adler32 + binary.BigEndian.PutUint32(b.tmp[0:], b.adler32.Sum32()) + b.bytes.Write(b.tmp[0:4]) +} + +// A bitWriter is a write buffer for bit-oriented data like deflate. +type bitWriter struct { + bytes bytes.Buffer + bit uint32 + nbit uint + + tmp [4]byte + adler32 adigest +} + +func (b *bitWriter) writeBits(bit uint32, nbit uint, rev bool) { + // reverse, for huffman codes + if rev { + br := uint32(0) + for i := uint(0); i < nbit; i++ { + br |= ((bit >> i) & 1) << (nbit - 1 - i) + } + bit = br + } + b.bit |= bit << b.nbit + b.nbit += nbit + for b.nbit >= 8 { + b.bytes.WriteByte(byte(b.bit)) + b.bit >>= 8 + b.nbit -= 8 + } +} + +func (b *bitWriter) flushBits() { + if b.nbit > 0 { + b.bytes.WriteByte(byte(b.bit)) + b.nbit = 0 + b.bit = 0 + } +} + +func (b *bitWriter) hcode(v int) { + /* + Lit Value Bits Codes + --------- ---- ----- + 0 - 143 8 00110000 through + 10111111 + 144 - 255 9 110010000 through + 111111111 + 256 - 279 7 0000000 through + 0010111 + 280 - 287 8 11000000 through + 11000111 + */ + switch { + case v <= 143: + b.writeBits(uint32(v)+0x30, 8, true) + case v <= 255: + b.writeBits(uint32(v-144)+0x190, 9, true) + case v <= 279: + b.writeBits(uint32(v-256)+0, 7, true) + case v <= 287: + b.writeBits(uint32(v-280)+0xc0, 8, true) + default: + panic("invalid hcode") + } +} + +func (b *bitWriter) byte(x byte) { + b.hcode(int(x)) +} + +func (b *bitWriter) codex(c int, val int, nx uint) { + b.hcode(c + val>>nx) + b.writeBits(uint32(val)&(1<= 258+3; n -= 258 { + b.repeat1(258, d) + } + if n > 258 { + // 258 < n < 258+3 + b.repeat1(10, d) + b.repeat1(n-10, d) + return + } + if n < 3 { + panic("invalid flate repeat") + } + b.repeat1(n, d) +} + +func (b *bitWriter) repeat1(n, d int) { + /* + Extra Extra Extra + Code Bits Length(s) Code Bits Lengths Code Bits Length(s) + ---- ---- ------ ---- ---- ------- ---- ---- ------- + 257 0 3 267 1 15,16 277 4 67-82 + 258 0 4 268 1 17,18 278 4 83-98 + 259 0 5 269 2 19-22 279 4 99-114 + 260 0 6 270 2 23-26 280 4 115-130 + 261 0 7 271 2 27-30 281 5 131-162 + 262 0 8 272 2 31-34 282 5 163-194 + 263 0 9 273 3 35-42 283 5 195-226 + 264 0 10 274 3 43-50 284 5 227-257 + 265 1 11,12 275 3 51-58 285 0 258 + 266 1 13,14 276 3 59-66 + */ + switch { + case n <= 10: + b.codex(257, n-3, 0) + case n <= 18: + b.codex(265, n-11, 1) + case n <= 34: + b.codex(269, n-19, 2) + case n <= 66: + b.codex(273, n-35, 3) + case n <= 130: + b.codex(277, n-67, 4) + case n <= 257: + b.codex(281, n-131, 5) + case n == 258: + b.hcode(285) + default: + panic("invalid repeat length") + } + + /* + Extra Extra Extra + Code Bits Dist Code Bits Dist Code Bits Distance + ---- ---- ---- ---- ---- ------ ---- ---- -------- + 0 0 1 10 4 33-48 20 9 1025-1536 + 1 0 2 11 4 49-64 21 9 1537-2048 + 2 0 3 12 5 65-96 22 10 2049-3072 + 3 0 4 13 5 97-128 23 10 3073-4096 + 4 1 5,6 14 6 129-192 24 11 4097-6144 + 5 1 7,8 15 6 193-256 25 11 6145-8192 + 6 2 9-12 16 7 257-384 26 12 8193-12288 + 7 2 13-16 17 7 385-512 27 12 12289-16384 + 8 3 17-24 18 8 513-768 28 13 16385-24576 + 9 3 25-32 19 8 769-1024 29 13 24577-32768 + */ + if d <= 4 { + b.writeBits(uint32(d-1), 5, true) + } else if d <= 32768 { + nbit := uint(16) + for d <= 1<<(nbit-1) { + nbit-- + } + v := uint32(d - 1) + v &^= 1 << (nbit - 1) // top bit is implicit + code := uint32(2*nbit - 2) // second bit is low bit of code + code |= v >> (nbit - 2) + v &^= 1 << (nbit - 2) + b.writeBits(code, 5, true) + // rest of bits follow + b.writeBits(uint32(v), nbit-2, false) + } else { + panic("invalid repeat distance") + } +} + +func (b *bitWriter) run(v byte, n int) { + if n == 0 { + return + } + b.byte(v) + if n-1 < 3 { + for i := 0; i < n-1; i++ { + b.byte(v) + } + } else { + b.repeat(n-1, 1) + } +} + +type adigest struct { + a, b uint32 +} + +func (d *adigest) Reset() { d.a, d.b = 1, 0 } + +const amod = 65521 + +func aupdate(a, b uint32, pi byte, n int) (aa, bb uint32) { + // TODO(rsc): 6g doesn't do magic multiplies for b %= amod, + // only for b = b%amod. + + // invariant: a, b < amod + if pi == 0 { + b += uint32(n%amod) * a + b = b % amod + return a, b + } + + // n times: + // a += pi + // b += a + // is same as + // b += n*a + n*(n+1)/2*pi + // a += n*pi + m := uint32(n) + b += (m % amod) * a + b = b % amod + b += (m * (m + 1) / 2) % amod * uint32(pi) + b = b % amod + a += (m % amod) * uint32(pi) + a = a % amod + return a, b +} + +func afinish(a, b uint32) uint32 { + return b<<16 | a +} + +func (d *adigest) WriteN(p []byte, n int) { + for i := 0; i < n; i++ { + for _, pi := range p { + d.a, d.b = aupdate(d.a, d.b, pi, 1) + } + } +} + +func (d *adigest) WriteNByte(pi byte, n int) { + d.a, d.b = aupdate(d.a, d.b, pi, n) +} + +func (d *adigest) Sum32() uint32 { return afinish(d.a, d.b) } diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/png_test.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/png_test.go new file mode 100644 index 00000000..27a62292 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/png_test.go @@ -0,0 +1,73 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package qr + +import ( + "bytes" + "image" + "image/color" + "image/png" + "io/ioutil" + "testing" +) + +func TestPNG(t *testing.T) { + c, err := Encode("hello, world", L) + if err != nil { + t.Fatal(err) + } + pngdat := c.PNG() + if true { + ioutil.WriteFile("x.png", pngdat, 0666) + } + m, err := png.Decode(bytes.NewBuffer(pngdat)) + if err != nil { + t.Fatal(err) + } + gm := m.(*image.Gray) + + scale := c.Scale + siz := c.Size + nbad := 0 + for y := 0; y < scale*(8+siz); y++ { + for x := 0; x < scale*(8+siz); x++ { + v := byte(255) + if c.Black(x/scale-4, y/scale-4) { + v = 0 + } + if gv := gm.At(x, y).(color.Gray).Y; gv != v { + t.Errorf("%d,%d = %d, want %d", x, y, gv, v) + if nbad++; nbad >= 20 { + t.Fatalf("too many bad pixels") + } + } + } + } +} + +func BenchmarkPNG(b *testing.B) { + c, err := Encode("0123456789012345678901234567890123456789", L) + if err != nil { + panic(err) + } + var bytes []byte + for i := 0; i < b.N; i++ { + bytes = c.PNG() + } + b.SetBytes(int64(len(bytes))) +} + +func BenchmarkImagePNG(b *testing.B) { + c, err := Encode("0123456789012345678901234567890123456789", L) + if err != nil { + panic(err) + } + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + png.Encode(&buf, c.Image()) + } + b.SetBytes(int64(buf.Len())) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/qr.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/qr.go new file mode 100644 index 00000000..3de4a1a4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/qr.go @@ -0,0 +1,116 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package qr encodes QR codes. +*/ +package qr + +import ( + "errors" + "image" + "image/color" + + "camlistore.org/third_party/code.google.com/p/rsc/qr/coding" +) + +// A Level denotes a QR error correction level. +// From least to most tolerant of errors, they are L, M, Q, H. +type Level int + +const ( + L Level = iota // 20% redundant + M // 38% redundant + Q // 55% redundant + H // 65% redundant +) + +// Encode returns an encoding of text at the given error correction level. +func Encode(text string, level Level) (*Code, error) { + // Pick data encoding, smallest first. + // We could split the string and use different encodings + // but that seems like overkill for now. + var enc coding.Encoding + switch { + case coding.Num(text).Check() == nil: + enc = coding.Num(text) + case coding.Alpha(text).Check() == nil: + enc = coding.Alpha(text) + default: + enc = coding.String(text) + } + + // Pick size. + l := coding.Level(level) + var v coding.Version + for v = coding.MinVersion; ; v++ { + if v > coding.MaxVersion { + return nil, errors.New("text too long to encode as QR") + } + if enc.Bits(v) <= v.DataBytes(l)*8 { + break + } + } + + // Build and execute plan. + p, err := coding.NewPlan(v, l, 0) + if err != nil { + return nil, err + } + cc, err := p.Encode(enc) + if err != nil { + return nil, err + } + + // TODO: Pick appropriate mask. + + return &Code{cc.Bitmap, cc.Size, cc.Stride, 8}, nil +} + +// A Code is a square pixel grid. +// It implements image.Image and direct PNG encoding. +type Code struct { + Bitmap []byte // 1 is black, 0 is white + Size int // number of pixels on a side + Stride int // number of bytes per row + Scale int // number of image pixels per QR pixel +} + +// Black returns true if the pixel at (x,y) is black. +func (c *Code) Black(x, y int) bool { + return 0 <= x && x < c.Size && 0 <= y && y < c.Size && + c.Bitmap[y*c.Stride+x/8]&(1<> 24), byte(rgba >> 16), byte(rgba >> 8), byte(rgba)} + draw.Draw(c, r, u, image.ZP, draw.Src) + } + } + + if csize != 0 { + if font == "" { + font = "data/luxisr.ttf" + } + ctxt := fs.NewContext(req) + dat, _, err := ctxt.Read(font) + if err != nil { + panic(err) + } + tfont, err := freetype.ParseFont(dat) + if err != nil { + panic(err) + } + ft := freetype.NewContext() + ft.SetDst(c) + ft.SetDPI(100) + ft.SetFont(tfont) + ft.SetFontSize(float64(pt)) + ft.SetSrc(image.NewUniform(color.Black)) + ft.SetClip(image.Rect(0, 0, 0, 0)) + wid, err := ft.DrawString(caption, freetype.Pt(0, 0)) + if err != nil { + panic(err) + } + p := freetype.Pt(d, d+3*pt/2) + p.X -= wid.X + p.X /= 2 + ft.SetClip(c.Bounds()) + ft.DrawString(caption, p) + } + + return c +} + +func makeFrame(req *http.Request, font string, pt, vers, l, scale, dots int) image.Image { + lev := coding.Level(l) + p, err := coding.NewPlan(coding.Version(vers), lev, 0) + if err != nil { + panic(err) + } + + nd := p.DataBytes / p.Blocks + nc := p.CheckBytes / p.Blocks + extra := p.DataBytes - nd*p.Blocks + + cap := fmt.Sprintf("QR v%d, %s", vers, lev) + if dots > 0 { + cap = fmt.Sprintf("QR v%d order, from bottom right", vers) + } + m := makeImage(req, cap, font, pt, len(p.Pixel), 0, scale, func(x, y int) uint32 { + pix := p.Pixel[y][x] + switch pix.Role() { + case coding.Data: + if dots > 0 { + return 0xffffffff + } + off := int(pix.Offset() / 8) + nd := nd + var i int + for i = 0; i < p.Blocks; i++ { + if i == extra { + nd++ + } + if off < nd { + break + } + off -= nd + } + return blockColors[i%len(blockColors)] + case coding.Check: + if dots > 0 { + return 0xffffffff + } + i := (int(pix.Offset()/8) - p.DataBytes) / nc + return dark(blockColors[i%len(blockColors)]) + } + if pix&coding.Black != 0 { + return 0x000000ff + } + return 0xffffffff + }) + + if dots > 0 { + b := m.Bounds() + for y := 0; y <= len(p.Pixel); y++ { + for x := 0; x < b.Dx(); x++ { + m.SetRGBA(x, y*scale-(y/len(p.Pixel)), color.RGBA{127, 127, 127, 255}) + } + } + for x := 0; x <= len(p.Pixel); x++ { + for y := 0; y < b.Dx(); y++ { + m.SetRGBA(x*scale-(x/len(p.Pixel)), y, color.RGBA{127, 127, 127, 255}) + } + } + order := make([]image.Point, (p.DataBytes+p.CheckBytes)*8+1) + for y, row := range p.Pixel { + for x, pix := range row { + if r := pix.Role(); r != coding.Data && r != coding.Check { + continue + } + // draw.Draw(m, m.Bounds().Add(image.Pt(x*scale, y*scale)), dot, image.ZP, draw.Over) + order[pix.Offset()] = image.Point{x*scale + scale/2, y*scale + scale/2} + } + } + + for mode := 0; mode < 2; mode++ { + for i, p := range order { + q := order[i+1] + if q.X == 0 { + break + } + line(m, p, q, mode) + } + } + } + return m +} + +func line(m *image.RGBA, p, q image.Point, mode int) { + x := 0 + y := 0 + dx := q.X - p.X + dy := q.Y - p.Y + xsign := +1 + ysign := +1 + if dx < 0 { + xsign = -1 + dx = -dx + } + if dy < 0 { + ysign = -1 + dy = -dy + } + pt := func() { + switch mode { + case 0: + for dx := -2; dx <= 2; dx++ { + for dy := -2; dy <= 2; dy++ { + if dy*dx <= -4 || dy*dx >= 4 { + continue + } + m.SetRGBA(p.X+x*xsign+dx, p.Y+y*ysign+dy, color.RGBA{255, 192, 192, 255}) + } + } + + case 1: + m.SetRGBA(p.X+x*xsign, p.Y+y*ysign, color.RGBA{128, 0, 0, 255}) + } + } + if dx > dy { + for x < dx || y < dy { + pt() + x++ + if float64(x)*float64(dy)/float64(dx)-float64(y) > 0.5 { + y++ + } + } + } else { + for x < dx || y < dy { + pt() + y++ + if float64(y)*float64(dx)/float64(dy)-float64(x) > 0.5 { + x++ + } + } + } + pt() +} + +func pngEncode(c image.Image) []byte { + var b bytes.Buffer + png.Encode(&b, c) + return b.Bytes() +} + +// Frame handles a request for a single QR frame. +func Frame(w http.ResponseWriter, req *http.Request) { + arg := func(s string) int { x, _ := strconv.Atoi(req.FormValue(s)); return x } + v := arg("v") + scale := arg("scale") + if scale == 0 { + scale = 8 + } + + w.Header().Set("Cache-Control", "public, max-age=3600") + w.Write(pngEncode(makeFrame(req, req.FormValue("font"), arg("pt"), v, arg("l"), scale, arg("dots")))) +} + +// Frames handles a request for multiple QR frames. +func Frames(w http.ResponseWriter, req *http.Request) { + vs := strings.Split(req.FormValue("v"), ",") + + arg := func(s string) int { x, _ := strconv.Atoi(req.FormValue(s)); return x } + scale := arg("scale") + if scale == 0 { + scale = 8 + } + font := req.FormValue("font") + pt := arg("pt") + dots := arg("dots") + + var images []image.Image + l := arg("l") + for _, v := range vs { + l := l + if i := strings.Index(v, "."); i >= 0 { + l, _ = strconv.Atoi(v[i+1:]) + v = v[:i] + } + vv, _ := strconv.Atoi(v) + images = append(images, makeFrame(req, font, pt, vv, l, scale, dots)) + } + + b := images[len(images)-1].Bounds() + + dx := arg("dx") + if dx == 0 { + dx = b.Dx() + } + x, y := 0, 0 + xmax := 0 + sep := arg("sep") + if sep == 0 { + sep = 10 + } + var points []image.Point + for i, m := range images { + if x > 0 { + x += sep + } + if x > 0 && x+m.Bounds().Dx() > dx { + y += sep + images[i-1].Bounds().Dy() + x = 0 + } + points = append(points, image.Point{x, y}) + x += m.Bounds().Dx() + if x > xmax { + xmax = x + } + + } + + c := image.NewRGBA(image.Rect(0, 0, xmax, y+b.Dy())) + for i, m := range images { + draw.Draw(c, c.Bounds().Add(points[i]), m, image.ZP, draw.Src) + } + + w.Header().Set("Cache-Control", "public, max-age=3600") + w.Write(pngEncode(c)) +} + +// Mask handles a request for a single QR mask. +func Mask(w http.ResponseWriter, req *http.Request) { + arg := func(s string) int { x, _ := strconv.Atoi(req.FormValue(s)); return x } + v := arg("v") + m := arg("m") + scale := arg("scale") + if scale == 0 { + scale = 8 + } + + w.Header().Set("Cache-Control", "public, max-age=3600") + w.Write(pngEncode(makeMask(req, req.FormValue("font"), arg("pt"), v, m, scale))) +} + +// Masks handles a request for multiple QR masks. +func Masks(w http.ResponseWriter, req *http.Request) { + arg := func(s string) int { x, _ := strconv.Atoi(req.FormValue(s)); return x } + v := arg("v") + scale := arg("scale") + if scale == 0 { + scale = 8 + } + font := req.FormValue("font") + pt := arg("pt") + var mm []image.Image + for m := 0; m < 8; m++ { + mm = append(mm, makeMask(req, font, pt, v, m, scale)) + } + dx := mm[0].Bounds().Dx() + dy := mm[0].Bounds().Dy() + + sep := arg("sep") + if sep == 0 { + sep = 10 + } + c := image.NewRGBA(image.Rect(0, 0, (dx+sep)*4-sep, (dy+sep)*2-sep)) + for m := 0; m < 8; m++ { + x := (m % 4) * (dx + sep) + y := (m / 4) * (dy + sep) + draw.Draw(c, c.Bounds().Add(image.Pt(x, y)), mm[m], image.ZP, draw.Src) + } + + w.Header().Set("Cache-Control", "public, max-age=3600") + w.Write(pngEncode(c)) +} + +var maskName = []string{ + "(x+y) % 2", + "y % 2", + "x % 3", + "(x+y) % 3", + "(y/2 + x/3) % 2", + "xy%2 + xy%3", + "(xy%2 + xy%3) % 2", + "(xy%3 + (x+y)%2) % 2", +} + +func makeMask(req *http.Request, font string, pt int, vers, mask, scale int) image.Image { + p, err := coding.NewPlan(coding.Version(vers), coding.L, coding.Mask(mask)) + if err != nil { + panic(err) + } + m := makeImage(req, maskName[mask], font, pt, len(p.Pixel), 0, scale, func(x, y int) uint32 { + pix := p.Pixel[y][x] + switch pix.Role() { + case coding.Data, coding.Check: + if pix&coding.Invert != 0 { + return 0x000000ff + } + } + return 0xffffffff + }) + return m +} + +var blockColors = []uint32{ + 0x7777ffff, + 0xffff77ff, + 0xff7777ff, + 0x77ffffff, + 0x1e90ffff, + 0xffffe0ff, + 0x8b6969ff, + 0x77ff77ff, + 0x9b30ffff, + 0x00bfffff, + 0x90e890ff, + 0xfff68fff, + 0xffec8bff, + 0xffa07aff, + 0xffa54fff, + 0xeee8aaff, + 0x98fb98ff, + 0xbfbfbfff, + 0x54ff9fff, + 0xffaeb9ff, + 0xb23aeeff, + 0xbbffffff, + 0x7fffd4ff, + 0xff7a7aff, + 0x00007fff, +} + +func dark(x uint32) uint32 { + r, g, b, a := byte(x>>24), byte(x>>16), byte(x>>8), byte(x) + r = r/2 + r/4 + g = g/2 + g/4 + b = b/2 + b/4 + return uint32(r)<<24 | uint32(g)<<16 | uint32(b)<<8 | uint32(a) +} + +func clamp(x int) byte { + if x < 0 { + return 0 + } + if x > 255 { + return 255 + } + return byte(x) +} + +func max(x, y int) int { + if x > y { + return x + } + return y +} + +// Arrow handles a request for an arrow pointing in a given direction. +func Arrow(w http.ResponseWriter, req *http.Request) { + arg := func(s string) int { x, _ := strconv.Atoi(req.FormValue(s)); return x } + dir := arg("dir") + size := arg("size") + if size == 0 { + size = 50 + } + del := size / 10 + + m := image.NewRGBA(image.Rect(0, 0, size, size)) + + if dir == 4 { + draw.Draw(m, m.Bounds(), image.Black, image.ZP, draw.Src) + draw.Draw(m, image.Rect(5, 5, size-5, size-5), image.White, image.ZP, draw.Src) + } + + pt := func(x, y int, c color.RGBA) { + switch dir { + case 0: + m.SetRGBA(x, y, c) + case 1: + m.SetRGBA(y, size-1-x, c) + case 2: + m.SetRGBA(size-1-x, size-1-y, c) + case 3: + m.SetRGBA(size-1-y, x, c) + } + } + + for y := 0; y < size/2; y++ { + for x := 0; x < del && x < y; x++ { + pt(x, y, color.RGBA{0, 0, 0, 255}) + } + for x := del; x < y-del; x++ { + pt(x, y, color.RGBA{128, 128, 255, 255}) + } + for x := max(y-del, 0); x <= y; x++ { + pt(x, y, color.RGBA{0, 0, 0, 255}) + } + } + for y := size / 2; y < size; y++ { + for x := 0; x < del && x < size-1-y; x++ { + pt(x, y, color.RGBA{0, 0, 0, 255}) + } + for x := del; x < size-1-y-del; x++ { + pt(x, y, color.RGBA{128, 128, 192, 255}) + } + for x := max(size-1-y-del, 0); x <= size-1-y; x++ { + pt(x, y, color.RGBA{0, 0, 0, 255}) + } + } + + w.Header().Set("Cache-Control", "public, max-age=3600") + w.Write(pngEncode(m)) +} + +// Encode encodes a string using the given version, level, and mask. +func Encode(w http.ResponseWriter, req *http.Request) { + val := func(s string) int { + v, _ := strconv.Atoi(req.FormValue(s)) + return v + } + + l := coding.Level(val("l")) + v := coding.Version(val("v")) + enc := coding.String(req.FormValue("t")) + m := coding.Mask(val("m")) + + p, err := coding.NewPlan(v, l, m) + if err != nil { + panic(err) + } + cc, err := p.Encode(enc) + if err != nil { + panic(err) + } + + c := &qr.Code{Bitmap: cc.Bitmap, Size: cc.Size, Stride: cc.Stride, Scale: 8} + w.Header().Set("Content-Type", "image/png") + w.Header().Set("Cache-Control", "public, max-age=3600") + w.Write(c.PNG()) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/web/play.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/web/play.go new file mode 100644 index 00000000..68cd0078 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/web/play.go @@ -0,0 +1,1118 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +QR data layout + +qr/ + upload/ + id.png + id.fix + flag/ + id + +*/ +// TODO: Random seed taken from GET for caching, repeatability. +// TODO: Flag for abuse button + some kind of dashboard. +// TODO: +1 button on web page? permalink? +// TODO: Flag for abuse button on permalinks too? +// TODO: Make the page prettier. +// TODO: Cache headers. + +package web + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/json" + "fmt" + "html/template" + "image" + "image/color" + _ "image/gif" + "image/png" + "io" + "math/rand" + "net/http" + "net/url" + "os" + "sort" + "strconv" + "strings" + "time" + + "camlistore.org/third_party/code.google.com/p/rsc/appfs/fs" + "camlistore.org/third_party/code.google.com/p/rsc/gf256" + "camlistore.org/third_party/code.google.com/p/rsc/qr" + "camlistore.org/third_party/code.google.com/p/rsc/qr/coding" + "camlistore.org/third_party/code.google.com/p/rsc/qr/web/resize" + _ "camlistore.org/third_party/go/pkg/image/jpeg" +) + +func runTemplate(c *fs.Context, w http.ResponseWriter, name string, data interface{}) { + t := template.New("main") + + main, _, err := c.Read(name) + if err != nil { + panic(err) + } + style, _, _ := c.Read("style.html") + main = append(main, style...) + _, err = t.Parse(string(main)) + if err != nil { + panic(err) + } + + var buf bytes.Buffer + if err := t.Execute(&buf, &data); err != nil { + panic(err) + } + w.Write(buf.Bytes()) +} + +func isImgName(s string) bool { + if len(s) != 32 { + return false + } + for i := 0; i < len(s); i++ { + if '0' <= s[i] && s[i] <= '9' || 'a' <= s[i] && s[i] <= 'f' { + continue + } + return false + } + return true +} + +func isTagName(s string) bool { + if len(s) != 16 { + return false + } + for i := 0; i < len(s); i++ { + if '0' <= s[i] && s[i] <= '9' || 'a' <= s[i] && s[i] <= 'f' { + continue + } + return false + } + return true +} + +// Draw is the handler for drawing a QR code. +func Draw(w http.ResponseWriter, req *http.Request) { + ctxt := fs.NewContext(req) + + url := req.FormValue("url") + if url == "" { + url = "http://swtch.com/qr" + } + if req.FormValue("upload") == "1" { + upload(w, req, url) + return + } + + t0 := time.Now() + img := req.FormValue("i") + if !isImgName(img) { + img = "pjw" + } + if req.FormValue("show") == "png" { + i := loadSize(ctxt, img, 48) + var buf bytes.Buffer + png.Encode(&buf, i) + w.Write(buf.Bytes()) + return + } + if req.FormValue("flag") == "1" { + flag(w, req, img, ctxt) + return + } + if req.FormValue("x") == "" { + var data = struct { + Name string + URL string + }{ + Name: img, + URL: url, + } + runTemplate(ctxt, w, "qr/main.html", &data) + return + } + + arg := func(s string) int { x, _ := strconv.Atoi(req.FormValue(s)); return x } + targ := makeTarg(ctxt, img, 17+4*arg("v")+arg("z")) + + m := &Image{ + Name: img, + Dx: arg("x"), + Dy: arg("y"), + URL: req.FormValue("u"), + Version: arg("v"), + Mask: arg("m"), + RandControl: arg("r") > 0, + Dither: arg("i") > 0, + OnlyDataBits: arg("d") > 0, + SaveControl: arg("c") > 0, + Scale: arg("scale"), + Target: targ, + Seed: int64(arg("s")), + Rotation: arg("o"), + Size: arg("z"), + } + if m.Version > 8 { + m.Version = 8 + } + + if m.Scale == 0 { + if arg("l") > 1 { + m.Scale = 8 + } else { + m.Scale = 4 + } + } + if m.Version >= 12 && m.Scale >= 4 { + m.Scale /= 2 + } + + if arg("l") == 1 { + data, err := json.Marshal(m) + if err != nil { + panic(err) + } + h := md5.New() + h.Write(data) + tag := fmt.Sprintf("%x", h.Sum(nil))[:16] + if err := ctxt.Write("qrsave/"+tag, data); err != nil { + panic(err) + } + http.Redirect(w, req, "/qr/show/"+tag, http.StatusTemporaryRedirect) + return + } + + if err := m.Encode(req); err != nil { + fmt.Fprintf(w, "%s\n", err) + return + } + + var dat []byte + switch { + case m.SaveControl: + dat = m.Control + default: + dat = m.Code.PNG() + } + + if arg("l") > 0 { + w.Header().Set("Content-Type", "image/png") + w.Write(dat) + return + } + + w.Header().Set("Content-Type", "text/html; charset=utf-8") + fmt.Fprint(w, "

    ") + fmt.Fprintf(w, "
    \n", m.Link()) + fmt.Fprintf(w, "
    \n") + fmt.Fprintf(w, "
    %v
    \n", time.Now().Sub(t0)) +} + +func (m *Image) Small() bool { + return 8*(17+4*int(m.Version)) < 512 +} + +func (m *Image) Link() string { + s := fmt.Sprint + b := func(v bool) string { + if v { + return "1" + } + return "0" + } + val := url.Values{ + "i": {m.Name}, + "x": {s(m.Dx)}, + "y": {s(m.Dy)}, + "z": {s(m.Size)}, + "u": {m.URL}, + "v": {s(m.Version)}, + "m": {s(m.Mask)}, + "r": {b(m.RandControl)}, + "t": {b(m.Dither)}, + "d": {b(m.OnlyDataBits)}, + "c": {b(m.SaveControl)}, + "s": {s(m.Seed)}, + } + return "/qr/draw?" + val.Encode() +} + +// Show is the handler for showing a stored QR code. +func Show(w http.ResponseWriter, req *http.Request) { + ctxt := fs.NewContext(req) + tag := req.URL.Path[len("/qr/show/"):] + png := strings.HasSuffix(tag, ".png") + if png { + tag = tag[:len(tag)-len(".png")] + } + if !isTagName(tag) { + fmt.Fprintf(w, "Sorry, QR code not found\n") + return + } + if req.FormValue("flag") == "1" { + flag(w, req, tag, ctxt) + return + } + data, _, err := ctxt.Read("qrsave/" + tag) + if err != nil { + fmt.Fprintf(w, "Sorry, QR code not found.\n") + return + } + + var m Image + if err := json.Unmarshal(data, &m); err != nil { + panic(err) + } + m.Tag = tag + + switch req.FormValue("size") { + case "big": + m.Scale *= 2 + case "small": + m.Scale /= 2 + } + + if png { + if err := m.Encode(req); err != nil { + panic(err) + return + } + w.Header().Set("Cache-Control", "public, max-age=3600") + w.Write(m.Code.PNG()) + return + } + + w.Header().Set("Cache-Control", "public, max-age=300") + runTemplate(ctxt, w, "qr/permalink.html", &m) +} + +func upload(w http.ResponseWriter, req *http.Request, link string) { + // Upload of a new image. + // Copied from Moustachio demo. + f, _, err := req.FormFile("image") + if err != nil { + fmt.Fprintf(w, "You need to select an image to upload.\n") + return + } + defer f.Close() + + i, _, err := image.Decode(f) + if err != nil { + panic(err) + } + + // Convert image to 128x128 gray+alpha. + b := i.Bounds() + const max = 128 + // If it's gigantic, it's more efficient to downsample first + // and then resize; resizing will smooth out the roughness. + var i1 *image.RGBA + if b.Dx() > 4*max || b.Dy() > 4*max { + w, h := 2*max, 2*max + if b.Dx() > b.Dy() { + h = b.Dy() * h / b.Dx() + } else { + w = b.Dx() * w / b.Dy() + } + i1 = resize.Resample(i, b, w, h) + } else { + // "Resample" to same size, just to convert to RGBA. + i1 = resize.Resample(i, b, b.Dx(), b.Dy()) + } + b = i1.Bounds() + + // Encode to PNG. + dx, dy := 128, 128 + if b.Dx() > b.Dy() { + dy = b.Dy() * dx / b.Dx() + } else { + dx = b.Dx() * dy / b.Dy() + } + i128 := resize.ResizeRGBA(i1, i1.Bounds(), dx, dy) + + var buf bytes.Buffer + if err := png.Encode(&buf, i128); err != nil { + panic(err) + } + + h := md5.New() + h.Write(buf.Bytes()) + tag := fmt.Sprintf("%x", h.Sum(nil))[:32] + + ctxt := fs.NewContext(req) + if err := ctxt.Write("qr/upload/"+tag+".png", buf.Bytes()); err != nil { + panic(err) + } + + // Redirect with new image tag. + // Redirect to draw with new image tag. + http.Redirect(w, req, req.URL.Path+"?"+url.Values{"i": {tag}, "url": {link}}.Encode(), 302) +} + +func flag(w http.ResponseWriter, req *http.Request, img string, ctxt *fs.Context) { + if !isImgName(img) && !isTagName(img) { + fmt.Fprintf(w, "Invalid image.\n") + return + } + data, _, _ := ctxt.Read("qr/flag/" + img) + data = append(data, '!') + ctxt.Write("qr/flag/"+img, data) + + fmt.Fprintf(w, "Thank you. The image has been reported.\n") +} + +func loadSize(ctxt *fs.Context, name string, max int) *image.RGBA { + data, _, err := ctxt.Read("qr/upload/" + name + ".png") + if err != nil { + panic(err) + } + i, _, err := image.Decode(bytes.NewBuffer(data)) + if err != nil { + panic(err) + } + b := i.Bounds() + dx, dy := max, max + if b.Dx() > b.Dy() { + dy = b.Dy() * dx / b.Dx() + } else { + dx = b.Dx() * dy / b.Dy() + } + var irgba *image.RGBA + switch i := i.(type) { + case *image.RGBA: + irgba = resize.ResizeRGBA(i, i.Bounds(), dx, dy) + case *image.NRGBA: + irgba = resize.ResizeNRGBA(i, i.Bounds(), dx, dy) + } + return irgba +} + +func makeTarg(ctxt *fs.Context, name string, max int) [][]int { + i := loadSize(ctxt, name, max) + b := i.Bounds() + dx, dy := b.Dx(), b.Dy() + targ := make([][]int, dy) + arr := make([]int, dx*dy) + for y := 0; y < dy; y++ { + targ[y], arr = arr[:dx], arr[dx:] + row := targ[y] + for x := 0; x < dx; x++ { + p := i.Pix[y*i.Stride+4*x:] + r, g, b, a := p[0], p[1], p[2], p[3] + if a == 0 { + row[x] = -1 + } else { + row[x] = int((299*uint32(r) + 587*uint32(g) + 114*uint32(b) + 500) / 1000) + } + } + } + return targ +} + +type Image struct { + Name string + Target [][]int + Dx int + Dy int + URL string + Tag string + Version int + Mask int + Scale int + Rotation int + Size int + + // RandControl says to pick the pixels randomly. + RandControl bool + Seed int64 + + // Dither says to dither instead of using threshold pixel layout. + Dither bool + + // OnlyDataBits says to use only data bits, not check bits. + OnlyDataBits bool + + // Code is the final QR code. + Code *qr.Code + + // Control is a PNG showing the pixels that we controlled. + // Pixels we don't control are grayed out. + SaveControl bool + Control []byte +} + +type Pixinfo struct { + X int + Y int + Pix coding.Pixel + Targ byte + DTarg int + Contrast int + HardZero bool + Block *BitBlock + Bit uint +} + +type Pixorder struct { + Off int + Priority int +} + +type byPriority []Pixorder + +func (x byPriority) Len() int { return len(x) } +func (x byPriority) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byPriority) Less(i, j int) bool { return x[i].Priority > x[j].Priority } + +func (m *Image) target(x, y int) (targ byte, contrast int) { + tx := x + m.Dx + ty := y + m.Dy + if ty < 0 || ty >= len(m.Target) || tx < 0 || tx >= len(m.Target[ty]) { + return 255, -1 + } + + v0 := m.Target[ty][tx] + if v0 < 0 { + return 255, -1 + } + targ = byte(v0) + + n := 0 + sum := 0 + sumsq := 0 + const del = 5 + for dy := -del; dy <= del; dy++ { + for dx := -del; dx <= del; dx++ { + if 0 <= ty+dy && ty+dy < len(m.Target) && 0 <= tx+dx && tx+dx < len(m.Target[ty+dy]) { + v := m.Target[ty+dy][tx+dx] + sum += v + sumsq += v * v + n++ + } + } + } + + avg := sum / n + contrast = sumsq/n - avg*avg + return +} + +func (m *Image) rotate(p *coding.Plan, rot int) { + if rot == 0 { + return + } + + N := len(p.Pixel) + pix := make([][]coding.Pixel, N) + apix := make([]coding.Pixel, N*N) + for i := range pix { + pix[i], apix = apix[:N], apix[N:] + } + + switch rot { + case 0: + // ok + case 1: + for y := 0; y < N; y++ { + for x := 0; x < N; x++ { + pix[y][x] = p.Pixel[x][N-1-y] + } + } + case 2: + for y := 0; y < N; y++ { + for x := 0; x < N; x++ { + pix[y][x] = p.Pixel[N-1-y][N-1-x] + } + } + case 3: + for y := 0; y < N; y++ { + for x := 0; x < N; x++ { + pix[y][x] = p.Pixel[N-1-x][y] + } + } + } + + p.Pixel = pix +} + +func (m *Image) Encode(req *http.Request) error { + p, err := coding.NewPlan(coding.Version(m.Version), coding.L, coding.Mask(m.Mask)) + if err != nil { + return err + } + + m.rotate(p, m.Rotation) + + rand := rand.New(rand.NewSource(m.Seed)) + + // QR parameters. + nd := p.DataBytes / p.Blocks + nc := p.CheckBytes / p.Blocks + extra := p.DataBytes - nd*p.Blocks + rs := gf256.NewRSEncoder(coding.Field, nc) + + // Build information about pixels, indexed by data/check bit number. + pixByOff := make([]Pixinfo, (p.DataBytes+p.CheckBytes)*8) + expect := make([][]bool, len(p.Pixel)) + for y, row := range p.Pixel { + expect[y] = make([]bool, len(row)) + for x, pix := range row { + targ, contrast := m.target(x, y) + if m.RandControl && contrast >= 0 { + contrast = rand.Intn(128) + 64*((x+y)%2) + 64*((x+y)%3%2) + } + expect[y][x] = pix&coding.Black != 0 + if r := pix.Role(); r == coding.Data || r == coding.Check { + pixByOff[pix.Offset()] = Pixinfo{X: x, Y: y, Pix: pix, Targ: targ, Contrast: contrast} + } + } + } + +Again: + // Count fixed initial data bits, prepare template URL. + url := m.URL + "#" + var b coding.Bits + coding.String(url).Encode(&b, p.Version) + coding.Num("").Encode(&b, p.Version) + bbit := b.Bits() + dbit := p.DataBytes*8 - bbit + if dbit < 0 { + return fmt.Errorf("cannot encode URL into available bits") + } + num := make([]byte, dbit/10*3) + for i := range num { + num[i] = '0' + } + b.Pad(dbit) + b.Reset() + coding.String(url).Encode(&b, p.Version) + coding.Num(num).Encode(&b, p.Version) + b.AddCheckBytes(p.Version, p.Level) + data := b.Bytes() + + doff := 0 // data offset + coff := 0 // checksum offset + mbit := bbit + dbit/10*10 + + // Choose pixels. + bitblocks := make([]*BitBlock, p.Blocks) + for blocknum := 0; blocknum < p.Blocks; blocknum++ { + if blocknum == p.Blocks-extra { + nd++ + } + + bdata := data[doff/8 : doff/8+nd] + cdata := data[p.DataBytes+coff/8 : p.DataBytes+coff/8+nc] + bb := newBlock(nd, nc, rs, bdata, cdata) + bitblocks[blocknum] = bb + + // Determine which bits in this block we can try to edit. + lo, hi := 0, nd*8 + if lo < bbit-doff { + lo = bbit - doff + if lo > hi { + lo = hi + } + } + if hi > mbit-doff { + hi = mbit - doff + if hi < lo { + hi = lo + } + } + + // Preserve [0, lo) and [hi, nd*8). + for i := 0; i < lo; i++ { + if !bb.canSet(uint(i), (bdata[i/8]>>uint(7-i&7))&1) { + return fmt.Errorf("cannot preserve required bits") + } + } + for i := hi; i < nd*8; i++ { + if !bb.canSet(uint(i), (bdata[i/8]>>uint(7-i&7))&1) { + return fmt.Errorf("cannot preserve required bits") + } + } + + // Can edit [lo, hi) and checksum bits to hit target. + // Determine which ones to try first. + order := make([]Pixorder, (hi-lo)+nc*8) + for i := lo; i < hi; i++ { + order[i-lo].Off = doff + i + } + for i := 0; i < nc*8; i++ { + order[hi-lo+i].Off = p.DataBytes*8 + coff + i + } + if m.OnlyDataBits { + order = order[:hi-lo] + } + for i := range order { + po := &order[i] + po.Priority = pixByOff[po.Off].Contrast<<8 | rand.Intn(256) + } + sort.Sort(byPriority(order)) + + const mark = false + for i := range order { + po := &order[i] + pinfo := &pixByOff[po.Off] + bval := pinfo.Targ + if bval < 128 { + bval = 1 + } else { + bval = 0 + } + pix := pinfo.Pix + if pix&coding.Invert != 0 { + bval ^= 1 + } + if pinfo.HardZero { + bval = 0 + } + + var bi int + if pix.Role() == coding.Data { + bi = po.Off - doff + } else { + bi = po.Off - p.DataBytes*8 - coff + nd*8 + } + if bb.canSet(uint(bi), bval) { + pinfo.Block = bb + pinfo.Bit = uint(bi) + if mark { + p.Pixel[pinfo.Y][pinfo.X] = coding.Black + } + } else { + if pinfo.HardZero { + panic("hard zero") + } + if mark { + p.Pixel[pinfo.Y][pinfo.X] = 0 + } + } + } + bb.copyOut() + + const cheat = false + for i := 0; i < nd*8; i++ { + pinfo := &pixByOff[doff+i] + pix := p.Pixel[pinfo.Y][pinfo.X] + if bb.B[i/8]&(1<= 128 { + // want white + pval = 0 + v = 255 + } + + bval := pval // bit value + if pix&coding.Invert != 0 { + bval ^= 1 + } + if pinfo.HardZero && bval != 0 { + bval ^= 1 + pval ^= 1 + v ^= 255 + } + + // Set pixel value as we want it. + pinfo.Block.reset(pinfo.Bit, bval) + + _, _ = x, y + + err := targ - v + if x+1 < len(row) { + addDither(pixByOff, row[x+1], err*7/16) + } + if false && y+1 < len(p.Pixel) { + if x > 0 { + addDither(pixByOff, p.Pixel[y+1][x-1], err*3/16) + } + addDither(pixByOff, p.Pixel[y+1][x], err*5/16) + if x+1 < len(row) { + addDither(pixByOff, p.Pixel[y+1][x+1], err*1/16) + } + } + } + } + + for _, bb := range bitblocks { + bb.copyOut() + } + } + + noops := 0 + // Copy numbers back out. + for i := 0; i < dbit/10; i++ { + // Pull out 10 bits. + v := 0 + for j := 0; j < 10; j++ { + bi := uint(bbit + 10*i + j) + v <<= 1 + v |= int((data[bi/8] >> (7 - bi&7)) & 1) + } + // Turn into 3 digits. + if v >= 1000 { + // Oops - too many 1 bits. + // We know the 512, 256, 128, 64, 32 bits are all set. + // Pick one at random to clear. This will break some + // checksum bits, but so be it. + println("oops", i, v) + pinfo := &pixByOff[bbit+10*i+3] // TODO random + pinfo.Contrast = 1e9 >> 8 + pinfo.HardZero = true + noops++ + } + num[i*3+0] = byte(v/100 + '0') + num[i*3+1] = byte(v/10%10 + '0') + num[i*3+2] = byte(v%10 + '0') + } + if noops > 0 { + goto Again + } + + var b1 coding.Bits + coding.String(url).Encode(&b1, p.Version) + coding.Num(num).Encode(&b1, p.Version) + b1.AddCheckBytes(p.Version, p.Level) + if !bytes.Equal(b.Bytes(), b1.Bytes()) { + fmt.Printf("mismatch\n%d %x\n%d %x\n", len(b.Bytes()), b.Bytes(), len(b1.Bytes()), b1.Bytes()) + panic("byte mismatch") + } + + cc, err := p.Encode(coding.String(url), coding.Num(num)) + if err != nil { + return err + } + + if !m.Dither { + for y, row := range expect { + for x, pix := range row { + if cc.Black(x, y) != pix { + println("mismatch", x, y, p.Pixel[y][x].String()) + } + } + } + } + + m.Code = &qr.Code{Bitmap: cc.Bitmap, Size: cc.Size, Stride: cc.Stride, Scale: m.Scale} + + if m.SaveControl { + m.Control = pngEncode(makeImage(req, "", "", 0, cc.Size, 4, m.Scale, func(x, y int) (rgba uint32) { + pix := p.Pixel[y][x] + if pix.Role() == coding.Data || pix.Role() == coding.Check { + pinfo := &pixByOff[pix.Offset()] + if pinfo.Block != nil { + if cc.Black(x, y) { + return 0x000000ff + } + return 0xffffffff + } + } + if cc.Black(x, y) { + return 0x3f3f3fff + } + return 0xbfbfbfff + })) + } + + return nil +} + +func addDither(pixByOff []Pixinfo, pix coding.Pixel, err int) { + if pix.Role() != coding.Data && pix.Role() != coding.Check { + return + } + pinfo := &pixByOff[pix.Offset()] + println("add", pinfo.X, pinfo.Y, pinfo.DTarg, err) + pinfo.DTarg += err +} + +func readTarget(name string) ([][]int, error) { + f, err := os.Open(name) + if err != nil { + return nil, err + } + m, err := png.Decode(f) + if err != nil { + return nil, fmt.Errorf("decode %s: %v", name, err) + } + rect := m.Bounds() + target := make([][]int, rect.Dy()) + for i := range target { + target[i] = make([]int, rect.Dx()) + } + for y, row := range target { + for x := range row { + a := int(color.RGBAModel.Convert(m.At(x, y)).(color.RGBA).A) + t := int(color.GrayModel.Convert(m.At(x, y)).(color.Gray).Y) + if a == 0 { + t = -1 + } + row[x] = t + } + } + return target, nil +} + +type BitBlock struct { + DataBytes int + CheckBytes int + B []byte + M [][]byte + Tmp []byte + RS *gf256.RSEncoder + bdata []byte + cdata []byte +} + +func newBlock(nd, nc int, rs *gf256.RSEncoder, dat, cdata []byte) *BitBlock { + b := &BitBlock{ + DataBytes: nd, + CheckBytes: nc, + B: make([]byte, nd+nc), + Tmp: make([]byte, nc), + RS: rs, + bdata: dat, + cdata: cdata, + } + copy(b.B, dat) + rs.ECC(b.B[:nd], b.B[nd:]) + b.check() + if !bytes.Equal(b.Tmp, cdata) { + panic("cdata") + } + + b.M = make([][]byte, nd*8) + for i := range b.M { + row := make([]byte, nd+nc) + b.M[i] = row + for j := range row { + row[j] = 0 + } + row[i/8] = 1 << (7 - uint(i%8)) + rs.ECC(row[:nd], row[nd:]) + } + return b +} + +func (b *BitBlock) check() { + b.RS.ECC(b.B[:b.DataBytes], b.Tmp) + if !bytes.Equal(b.B[b.DataBytes:], b.Tmp) { + fmt.Printf("ecc mismatch\n%x\n%x\n", b.B[b.DataBytes:], b.Tmp) + panic("mismatch") + } +} + +func (b *BitBlock) reset(bi uint, bval byte) { + if (b.B[bi/8]>>(7-bi&7))&1 == bval { + // already has desired bit + return + } + // rows that have already been set + m := b.M[len(b.M):cap(b.M)] + for _, row := range m { + if row[bi/8]&(1<<(7-bi&7)) != 0 { + // Found it. + for j, v := range row { + b.B[j] ^= v + } + return + } + } + panic("reset of unset bit") +} + +func (b *BitBlock) canSet(bi uint, bval byte) bool { + found := false + m := b.M + for j, row := range m { + if row[bi/8]&(1<<(7-bi&7)) == 0 { + continue + } + if !found { + found = true + if j != 0 { + m[0], m[j] = m[j], m[0] + } + continue + } + for k := range row { + row[k] ^= m[0][k] + } + } + if !found { + return false + } + + targ := m[0] + + // Subtract from saved-away rows too. + for _, row := range m[len(m):cap(m)] { + if row[bi/8]&(1<<(7-bi&7)) == 0 { + continue + } + for k := range row { + row[k] ^= targ[k] + } + } + + // Found a row with bit #bi == 1 and cut that bit from all the others. + // Apply to data and remove from m. + if (b.B[bi/8]>>(7-bi&7))&1 != bval { + for j, v := range targ { + b.B[j] ^= v + } + } + b.check() + n := len(m) - 1 + m[0], m[n] = m[n], m[0] + b.M = m[:n] + + for _, row := range b.M { + if row[bi/8]&(1<<(7-bi&7)) != 0 { + panic("did not reduce") + } + } + + return true +} + +func (b *BitBlock) copyOut() { + b.check() + copy(b.bdata, b.B[:b.DataBytes]) + copy(b.cdata, b.B[b.DataBytes:]) +} + +func showtable(w http.ResponseWriter, b *BitBlock, gray func(int) bool) { + nd := b.DataBytes + nc := b.CheckBytes + + fmt.Fprintf(w, "\n") + line := func() { + fmt.Fprintf(w, "\n") + for i := 0; i < (nd+nc)*8; i++ { + fmt.Fprintf(w, "> uint(7-i&7) & 1 + if gray(i) { + fmt.Fprintf(w, " class='gray'") + } + fmt.Fprintf(w, ">") + if v == 1 { + fmt.Fprintf(w, "1") + } + } + line() + } + + m := b.M[len(b.M):cap(b.M)] + for i := len(m) - 1; i >= 0; i-- { + dorow(m[i]) + } + m = b.M + for _, row := range b.M { + dorow(row) + } + + fmt.Fprintf(w, "
    \n", (nd+nc)*8) + } + line() + dorow := func(row []byte) { + fmt.Fprintf(w, "
    \n") +} + +func BitsTable(w http.ResponseWriter, req *http.Request) { + nd := 2 + nc := 2 + fmt.Fprintf(w, ` + + `) + rs := gf256.NewRSEncoder(coding.Field, nc) + dat := make([]byte, nd+nc) + b := newBlock(nd, nc, rs, dat[:nd], dat[nd:]) + for i := 0; i < nd*8; i++ { + b.canSet(uint(i), 0) + } + showtable(w, b, func(i int) bool { return i < nd*8 }) + + b = newBlock(nd, nc, rs, dat[:nd], dat[nd:]) + for j := 0; j < (nd+nc)*8; j += 2 { + b.canSet(uint(j), 0) + } + showtable(w, b, func(i int) bool { return i%2 == 0 }) + +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/web/resize/resize.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/web/resize/resize.go new file mode 100644 index 00000000..02c8b004 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/rsc/qr/web/resize/resize.go @@ -0,0 +1,152 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package resize + +import ( + "image" + "image/color" +) + +// average convert the sums to averages and returns the result. +func average(sum []uint64, w, h int, n uint64) *image.RGBA { + ret := image.NewRGBA(image.Rect(0, 0, w, h)) + for y := 0; y < h; y++ { + for x := 0; x < w; x++ { + index := 4 * (y*w + x) + pix := ret.Pix[y*ret.Stride+x*4:] + pix[0] = uint8(sum[index+0] / n) + pix[1] = uint8(sum[index+1] / n) + pix[2] = uint8(sum[index+2] / n) + pix[3] = uint8(sum[index+3] / n) + } + } + return ret +} + +// ResizeRGBA returns a scaled copy of the RGBA image slice r of m. +// The returned image has width w and height h. +func ResizeRGBA(m *image.RGBA, r image.Rectangle, w, h int) *image.RGBA { + ww, hh := uint64(w), uint64(h) + dx, dy := uint64(r.Dx()), uint64(r.Dy()) + // See comment in Resize. + n, sum := dx*dy, make([]uint64, 4*w*h) + for y := r.Min.Y; y < r.Max.Y; y++ { + pix := m.Pix[(y-r.Min.Y)*m.Stride:] + for x := r.Min.X; x < r.Max.X; x++ { + // Get the source pixel. + p := pix[(x-r.Min.X)*4:] + r64 := uint64(p[0]) + g64 := uint64(p[1]) + b64 := uint64(p[2]) + a64 := uint64(p[3]) + // Spread the source pixel over 1 or more destination rows. + py := uint64(y) * hh + for remy := hh; remy > 0; { + qy := dy - (py % dy) + if qy > remy { + qy = remy + } + // Spread the source pixel over 1 or more destination columns. + px := uint64(x) * ww + index := 4 * ((py/dy)*ww + (px / dx)) + for remx := ww; remx > 0; { + qx := dx - (px % dx) + if qx > remx { + qx = remx + } + qxy := qx * qy + sum[index+0] += r64 * qxy + sum[index+1] += g64 * qxy + sum[index+2] += b64 * qxy + sum[index+3] += a64 * qxy + index += 4 + px += qx + remx -= qx + } + py += qy + remy -= qy + } + } + } + return average(sum, w, h, n) +} + +// ResizeNRGBA returns a scaled copy of the RGBA image slice r of m. +// The returned image has width w and height h. +func ResizeNRGBA(m *image.NRGBA, r image.Rectangle, w, h int) *image.RGBA { + ww, hh := uint64(w), uint64(h) + dx, dy := uint64(r.Dx()), uint64(r.Dy()) + // See comment in Resize. + n, sum := dx*dy, make([]uint64, 4*w*h) + for y := r.Min.Y; y < r.Max.Y; y++ { + pix := m.Pix[(y-r.Min.Y)*m.Stride:] + for x := r.Min.X; x < r.Max.X; x++ { + // Get the source pixel. + p := pix[(x-r.Min.X)*4:] + r64 := uint64(p[0]) + g64 := uint64(p[1]) + b64 := uint64(p[2]) + a64 := uint64(p[3]) + r64 = (r64 * a64) / 255 + g64 = (g64 * a64) / 255 + b64 = (b64 * a64) / 255 + // Spread the source pixel over 1 or more destination rows. + py := uint64(y) * hh + for remy := hh; remy > 0; { + qy := dy - (py % dy) + if qy > remy { + qy = remy + } + // Spread the source pixel over 1 or more destination columns. + px := uint64(x) * ww + index := 4 * ((py/dy)*ww + (px / dx)) + for remx := ww; remx > 0; { + qx := dx - (px % dx) + if qx > remx { + qx = remx + } + qxy := qx * qy + sum[index+0] += r64 * qxy + sum[index+1] += g64 * qxy + sum[index+2] += b64 * qxy + sum[index+3] += a64 * qxy + index += 4 + px += qx + remx -= qx + } + py += qy + remy -= qy + } + } + } + return average(sum, w, h, n) +} + +// Resample returns a resampled copy of the image slice r of m. +// The returned image has width w and height h. +func Resample(m image.Image, r image.Rectangle, w, h int) *image.RGBA { + if w < 0 || h < 0 { + return nil + } + if w == 0 || h == 0 || r.Dx() <= 0 || r.Dy() <= 0 { + return image.NewRGBA(image.Rect(0, 0, w, h)) + } + curw, curh := r.Dx(), r.Dy() + img := image.NewRGBA(image.Rect(0, 0, w, h)) + for y := 0; y < h; y++ { + for x := 0; x < w; x++ { + // Get a source pixel. + subx := x * curw / w + suby := y * curh / h + r32, g32, b32, a32 := m.At(subx, suby).RGBA() + r := uint8(r32 >> 8) + g := uint8(g32 >> 8) + b := uint8(b32 >> 8) + a := uint8(a32 >> 8) + img.SetRGBA(x, y, color.RGBA{r, g, b, a}) + } + } + return img +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/.hgignore b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/.hgignore new file mode 100644 index 00000000..055f43c9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/.hgignore @@ -0,0 +1,29 @@ +syntax:glob +.DS_Store +.git +.gitignore +*.[568ao] +*.ao +*.so +*.pyc +._* +.nfs.* +[568a].out +*~ +*.orig +*.rej +*.exe +.*.swp +core +*.cgo*.go +*.cgo*.c +_cgo_* +_obj +_test +_testmain.go +build.out +test.out +y.tab.[ch] + +syntax:regexp +^.*/core.[0-9]*$ diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/AUTHORS b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/AUTHORS new file mode 100644 index 00000000..6a87d7ce --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/AUTHORS @@ -0,0 +1,11 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Google Inc. diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/CONTRIBUTORS b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/CONTRIBUTORS new file mode 100644 index 00000000..17b42543 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/CONTRIBUTORS @@ -0,0 +1,32 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Kai Backman +Nigel Tao +Rob Pike +Russ Cox diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/LICENSE new file mode 100644 index 00000000..6050c10f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/README b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/README new file mode 100644 index 00000000..3cf8be1f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/README @@ -0,0 +1,11 @@ +This is a Snappy library for the Go programming language. + +To download and install from source: +$ go get code.google.com/p/snappy-go/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + +Contributions should follow the same procedure as for the Go project: +http://golang.org/doc/contribute.html + diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/lib/codereview/codereview.cfg b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/lib/codereview/codereview.cfg new file mode 100644 index 00000000..93b55c0a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/lib/codereview/codereview.cfg @@ -0,0 +1 @@ +defaultcc: golang-dev@googlegroups.com diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/snappy/decode.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/snappy/decode.go new file mode 100644 index 00000000..d169beab --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/snappy/decode.go @@ -0,0 +1,121 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" +) + +// ErrCorrupt reports that the input is invalid. +var ErrCorrupt = errors.New("snappy: corrupt input") + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n == 0 { + return 0, 0, ErrCorrupt + } + if uint64(int(v)) != v { + return 0, 0, errors.New("snappy: decoded block is too large") + } + return int(v), n, nil +} + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if len(dst) < dLen { + dst = make([]byte, dLen) + } + + var d, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint(src[s] >> 2) + switch { + case x < 60: + s += 1 + case x == 60: + s += 2 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-1]) + case x == 61: + s += 3 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-2]) | uint(src[s-1])<<8 + case x == 62: + s += 4 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16 + case x == 63: + s += 5 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24 + } + length = int(x + 1) + if length <= 0 { + return nil, errors.New("snappy: unsupported literal length") + } + if length > len(dst)-d || length > len(src)-s { + return nil, ErrCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if s > len(src) { + return nil, ErrCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) + + case tagCopy2: + s += 3 + if s > len(src) { + return nil, ErrCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(src[s-2]) | int(src[s-1])<<8 + + case tagCopy4: + return nil, errors.New("snappy: unsupported COPY_4 tag") + } + + end := d + length + if offset > d || end > len(dst) { + return nil, ErrCorrupt + } + for ; d < end; d++ { + dst[d] = dst[d-offset] + } + } + return dst, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/snappy/encode.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/snappy/encode.go new file mode 100644 index 00000000..a403ab96 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/snappy/encode.go @@ -0,0 +1,178 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" +) + +// We limit how far copy back-references can go, the same as the C++ code. +const maxOffset = 1 << 15 + +// equal4 returns whether b[i:i+4] equals b[j:j+4]. +func equal4(b []byte, i, j int) bool { + return b[i] == b[j] && + b[i+1] == b[j+1] && + b[i+2] == b[j+2] && + b[i+3] == b[j+3] +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + case n < 1<<16: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + case n < 1<<24: + dst[0] = 62<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + dst[3] = uint8(n >> 16) + i = 4 + case int64(n) < 1<<32: + dst[0] = 63<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + dst[3] = uint8(n >> 16) + dst[4] = uint8(n >> 24) + i = 5 + default: + panic("snappy: source buffer is too long") + } + if copy(dst[i:], lit) != len(lit) { + panic("snappy: destination buffer is too short") + } + return i + len(lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +func emitCopy(dst []byte, offset, length int) int { + i := 0 + for length > 0 { + x := length - 4 + if 0 <= x && x < 1<<3 && offset < 1<<11 { + dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + i += 2 + break + } + + x = length + if x > 1<<6 { + x = 1 << 6 + } + dst[i+0] = uint8(x-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= x + } + return i +} + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// It is valid to pass a nil dst. +func Encode(dst, src []byte) ([]byte, error) { + if n := MaxEncodedLen(len(src)); len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + // Return early if src is short. + if len(src) <= 4 { + d += emitLiteral(dst[d:], src) + return dst[:d], nil + } + + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + const maxTableSize = 1 << 14 + shift, tableSize := uint(32-8), 1<<8 + for tableSize < maxTableSize && tableSize < len(src) { + shift-- + tableSize *= 2 + } + var table [maxTableSize]int + for i := 0; i < tableSize; i++ { + table[i] = -1 + } + + // Iterate over the source bytes. + var ( + s int // The iterator position. + t int // The last position with the same hash as s. + lit int // The start position of any pending literal bytes. + ) + for s+3 < len(src) { + // Update the hash table. + h := uint32(src[s]) | uint32(src[s+1])<<8 | uint32(src[s+2])<<16 | uint32(src[s+3])<<24 + h = (h * 0x1e35a7bd) >> shift + t, table[h] = table[h], s + // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. + if t < 0 || s-t >= maxOffset || !equal4(src, t, s) { + s++ + continue + } + // Otherwise, we have a match. First, emit any pending literal bytes. + if lit != s { + d += emitLiteral(dst[d:], src[lit:s]) + } + // Extend the match to be as long as possible. + s0 := s + s, t = s+4, t+4 + for s < len(src) && src[s] == src[t] { + s++ + t++ + } + // Emit the copied bytes. + d += emitCopy(dst[d:], s-t, s-s0) + lit = s + } + + // Emit any final pending literal bytes and return. + if lit != len(src) { + d += emitLiteral(dst[d:], src[lit:]) + } + return dst[:d], nil +} + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +func MaxEncodedLen(srcLen int) int { + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + return 32 + srcLen + srcLen/6 +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/snappy/snappy.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/snappy/snappy.go new file mode 100644 index 00000000..2f1b790d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/snappy/snappy.go @@ -0,0 +1,38 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the snappy block-based compression format. +// It aims for very high speeds and reasonable compression. +// +// The C++ snappy implementation is at http://code.google.com/p/snappy/ +package snappy + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer supported. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/snappy/snappy_test.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/snappy/snappy_test.go new file mode 100644 index 00000000..13ee5c29 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/snappy-go/snappy/snappy_test.go @@ -0,0 +1,117 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "bytes" + "fmt" + "io/ioutil" + "math/rand" + "strings" + "testing" +) + +func roundtrip(b []byte) error { + e, err := Encode(nil, b) + if err != nil { + return fmt.Errorf("encoding error: %v", err) + } + d, err := Decode(nil, e) + if err != nil { + return fmt.Errorf("decoding error: %v", err) + } + if !bytes.Equal(b, d) { + return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d) + } + return nil +} + +func TestSmallCopy(t *testing.T) { + for i := 0; i < 32; i++ { + s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" + if err := roundtrip([]byte(s)); err != nil { + t.Fatalf("i=%d: %v", i, err) + } + } +} + +func TestSmallRand(t *testing.T) { + rand.Seed(27354294) + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i, _ := range b { + b[i] = uint8(rand.Uint32()) + } + if err := roundtrip(b); err != nil { + t.Fatal(err) + } + } +} + +func TestSmallRegular(t *testing.T) { + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i, _ := range b { + b[i] = uint8(i%10 + 'a') + } + if err := roundtrip(b); err != nil { + t.Fatal(err) + } + } +} + +func benchWords(b *testing.B, n int, decode bool) { + b.StopTimer() + + // Make src, a []byte of length n containing copies of the words file. + words, err := ioutil.ReadFile("/usr/share/dict/words") + if err != nil { + panic(err) + } + if len(words) == 0 { + panic("/usr/share/dict/words has zero length") + } + src := make([]byte, n) + for x := src; len(x) > 0; { + n := copy(x, words) + x = x[n:] + } + + // If benchmarking decoding, encode the src. + if decode { + src, err = Encode(nil, src) + if err != nil { + panic(err) + } + } + b.SetBytes(int64(len(src))) + + // Allocate a sufficiently large dst buffer. + var dst []byte + if decode { + dst = make([]byte, n) + } else { + dst = make([]byte, MaxEncodedLen(n)) + } + + // Run the loop. + b.StartTimer() + for i := 0; i < b.N; i++ { + if decode { + Decode(dst, src) + } else { + Encode(dst, src) + } + } +} + +func BenchmarkDecodeWords1e3(b *testing.B) { benchWords(b, 1e3, true) } +func BenchmarkDecodeWords1e4(b *testing.B) { benchWords(b, 1e4, true) } +func BenchmarkDecodeWords1e5(b *testing.B) { benchWords(b, 1e5, true) } +func BenchmarkDecodeWords1e6(b *testing.B) { benchWords(b, 1e6, true) } +func BenchmarkEncodeWords1e3(b *testing.B) { benchWords(b, 1e3, false) } +func BenchmarkEncodeWords1e4(b *testing.B) { benchWords(b, 1e4, false) } +func BenchmarkEncodeWords1e5(b *testing.B) { benchWords(b, 1e5, false) } +func BenchmarkEncodeWords1e6(b *testing.B) { benchWords(b, 1e6, false) } diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/xsrftoken/COPYING b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/xsrftoken/COPYING new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/xsrftoken/COPYING @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/xsrftoken/xsrf.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/xsrftoken/xsrf.go new file mode 100644 index 00000000..bb0b5cce --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/xsrftoken/xsrf.go @@ -0,0 +1,94 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package xsrftoken provides methods for generating and validating secure XSRF tokens. +package xsrftoken + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "fmt" + "strconv" + "strings" + "time" +) + +// The duration that XSRF tokens are valid. +// It is exported so clients may set cookie timeouts that match generated tokens. +const Timeout = 24 * time.Hour + +// clean sanitizes a string for inclusion in a token by replacing all ":"s. +func clean(s string) string { + return strings.Replace(s, ":", "_", -1) +} + +// Generate returns a URL-safe secure XSRF token that expires in 24 hours. +// +// key is a secret key for your application. +// userID is a unique identifier for the user. +// actionID is the action the user is taking (e.g. POSTing to a particular path). +func Generate(key, userID, actionID string) string { + return generateAtTime(key, userID, actionID, time.Now()) +} + +// generateAtTime is like Generate, but returns a token that expires 24 hours from now. +func generateAtTime(key, userID, actionID string, now time.Time) string { + h := hmac.New(sha1.New, []byte(key)) + fmt.Fprintf(h, "%s:%s:%d", clean(userID), clean(actionID), now.UnixNano()) + tok := fmt.Sprintf("%s:%d", h.Sum(nil), now.UnixNano()) + return base64.URLEncoding.EncodeToString([]byte(tok)) +} + +// Valid returns true if token is a valid, unexpired token returned by Generate. +func Valid(token, key, userID, actionID string) bool { + return validAtTime(token, key, userID, actionID, time.Now()) +} + +// validAtTime is like Valid, but it uses now to check if the token is expired. +func validAtTime(token, key, userID, actionID string, now time.Time) bool { + // Decode the token. + data, err := base64.URLEncoding.DecodeString(token) + if err != nil { + return false + } + + // Extract the issue time of the token. + sep := bytes.LastIndex(data, []byte{':'}) + if sep < 0 { + return false + } + nanos, err := strconv.ParseInt(string(data[sep+1:]), 10, 64) + if err != nil { + return false + } + issueTime := time.Unix(0, nanos) + + // Check that the token is not expired. + if now.Sub(issueTime) >= Timeout { + return false + } + + // Check that the token is not from the future. + // Allow 1 minute grace period in case the token is being verified on a + // machine whose clock is behind the machine that issued the token. + if issueTime.After(now.Add(1 * time.Minute)) { + return false + } + + // Check that the token matches the expected value. + expected := generateAtTime(key, userID, actionID, issueTime) + return token == expected +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/xsrftoken/xsrf_test.go b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/xsrftoken/xsrf_test.go new file mode 100644 index 00000000..6c3f71e5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/code.google.com/p/xsrftoken/xsrf_test.go @@ -0,0 +1,92 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package xsrftoken + +import ( + "encoding/base64" + "testing" + "time" +) + +const ( + key = "quay" + userID = "12345678" + actionID = "POST /form" +) + +var ( + now = time.Now() + oneMinuteFromNow = now.Add(1 * time.Minute) +) + +func TestValidToken(t *testing.T) { + tok := generateAtTime(key, userID, actionID, now) + if !validAtTime(tok, key, userID, actionID, oneMinuteFromNow) { + t.Error("One second later: Expected token to be valid") + } + if !validAtTime(tok, key, userID, actionID, now.Add(Timeout-1*time.Nanosecond)) { + t.Error("Just before timeout: Expected token to be valid") + } + if !validAtTime(tok, key, userID, actionID, now.Add(-1*time.Minute)) { + t.Error("One minute in the past: Expected token to be valid") + } +} + +// TestSeparatorReplacement tests that separators are being correctly substituted +func TestSeparatorReplacement(t *testing.T) { + tok := generateAtTime("foo:bar", "baz", "wah", now) + tok2 := generateAtTime("foo", "bar:baz", "wah", now) + if tok == tok2 { + t.Errorf("Expected generated tokens to be different") + } +} + +func TestInvalidToken(t *testing.T) { + invalidTokenTests := []struct { + name, key, userID, actionID string + t time.Time + }{ + {"Bad key", "foobar", userID, actionID, oneMinuteFromNow}, + {"Bad userID", key, "foobar", actionID, oneMinuteFromNow}, + {"Bad actionID", key, userID, "foobar", oneMinuteFromNow}, + {"Expired", key, userID, actionID, now.Add(Timeout)}, + {"More than 1 minute from the future", key, userID, actionID, now.Add(-1*time.Nanosecond - 1*time.Minute)}, + } + + tok := generateAtTime(key, userID, actionID, now) + for _, itt := range invalidTokenTests { + if validAtTime(tok, itt.key, itt.userID, itt.actionID, itt.t) { + t.Errorf("%v: Expected token to be invalid", itt.name) + } + } +} + +// TestValidateBadData primarily tests that no unexpected panics are triggered +// during parsing +func TestValidateBadData(t *testing.T) { + badDataTests := []struct { + name, tok string + }{ + {"Invalid Base64", "ASDab24(@)$*=="}, + {"No delimiter", base64.URLEncoding.EncodeToString([]byte("foobar12345678"))}, + {"Invalid time", base64.URLEncoding.EncodeToString([]byte("foobar:foobar"))}, + } + + for _, bdt := range badDataTests { + if validAtTime(bdt.tok, key, userID, actionID, oneMinuteFromNow) { + t.Errorf("%v: Expected token to be invalid", bdt.name) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/fontawesome/LICENSE.txt b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/LICENSE.txt new file mode 100644 index 00000000..1066d2f8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/LICENSE.txt @@ -0,0 +1,4 @@ +fonts/*: SIL OFL 1.1 (http://scripts.sil.org/OFL) +css/*: MIT (http://opensource.org/licenses/mit-license.html) + +Details at http://fortawesome.github.io/Font-Awesome/license/ diff --git a/vendor/github.com/camlistore/camlistore/third_party/fontawesome/VERSION.txt b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/VERSION.txt new file mode 100644 index 00000000..c4e41f94 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/VERSION.txt @@ -0,0 +1 @@ +4.0.3 diff --git a/vendor/github.com/camlistore/camlistore/third_party/fontawesome/css/font-awesome.css b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/css/font-awesome.css new file mode 100644 index 00000000..048cff97 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/css/font-awesome.css @@ -0,0 +1,1338 @@ +/*! + * Font Awesome 4.0.3 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */ +/* FONT PATH + * -------------------------- */ +@font-face { + font-family: 'FontAwesome'; + src: url('../fonts/fontawesome-webfont.eot?v=4.0.3'); + src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.0.3') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff?v=4.0.3') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.0.3') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.0.3#fontawesomeregular') format('svg'); + font-weight: normal; + font-style: normal; +} +.fa { + display: inline-block; + font-family: FontAwesome; + font-style: normal; + font-weight: normal; + line-height: 1; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} +/* makes the font 33% larger relative to the icon container */ +.fa-lg { + font-size: 1.3333333333333333em; + line-height: 0.75em; + vertical-align: -15%; +} +.fa-2x { + font-size: 2em; +} +.fa-3x { + font-size: 3em; +} +.fa-4x { + font-size: 4em; +} +.fa-5x { + font-size: 5em; +} +.fa-fw { + width: 1.2857142857142858em; + text-align: center; +} +.fa-ul { + padding-left: 0; + margin-left: 2.142857142857143em; + list-style-type: none; +} +.fa-ul > li { + position: relative; +} +.fa-li { + position: absolute; + left: -2.142857142857143em; + width: 2.142857142857143em; + top: 0.14285714285714285em; + text-align: center; +} +.fa-li.fa-lg { + left: -1.8571428571428572em; +} +.fa-border { + padding: .2em .25em .15em; + border: solid 0.08em #eeeeee; + border-radius: .1em; +} +.pull-right { + float: right; +} +.pull-left { + float: left; +} +.fa.pull-left { + margin-right: .3em; +} +.fa.pull-right { + margin-left: .3em; +} +.fa-spin { + -webkit-animation: spin 2s infinite linear; + -moz-animation: spin 2s infinite linear; + -o-animation: spin 2s infinite linear; + animation: spin 2s infinite linear; +} +@-moz-keyframes spin { + 0% { + -moz-transform: rotate(0deg); + } + 100% { + -moz-transform: rotate(359deg); + } +} +@-webkit-keyframes spin { + 0% { + -webkit-transform: rotate(0deg); + } + 100% { + -webkit-transform: rotate(359deg); + } +} +@-o-keyframes spin { + 0% { + -o-transform: rotate(0deg); + } + 100% { + -o-transform: rotate(359deg); + } +} +@-ms-keyframes spin { + 0% { + -ms-transform: rotate(0deg); + } + 100% { + -ms-transform: rotate(359deg); + } +} +@keyframes spin { + 0% { + transform: rotate(0deg); + } + 100% { + transform: rotate(359deg); + } +} +.fa-rotate-90 { + filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=1); + -webkit-transform: rotate(90deg); + -moz-transform: rotate(90deg); + -ms-transform: rotate(90deg); + -o-transform: rotate(90deg); + transform: rotate(90deg); +} +.fa-rotate-180 { + filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2); + -webkit-transform: rotate(180deg); + -moz-transform: rotate(180deg); + -ms-transform: rotate(180deg); + -o-transform: rotate(180deg); + transform: rotate(180deg); +} +.fa-rotate-270 { + filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3); + -webkit-transform: rotate(270deg); + -moz-transform: rotate(270deg); + -ms-transform: rotate(270deg); + -o-transform: rotate(270deg); + transform: rotate(270deg); +} +.fa-flip-horizontal { + filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1); + -webkit-transform: scale(-1, 1); + -moz-transform: scale(-1, 1); + -ms-transform: scale(-1, 1); + -o-transform: scale(-1, 1); + transform: scale(-1, 1); +} +.fa-flip-vertical { + filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1); + -webkit-transform: scale(1, -1); + -moz-transform: scale(1, -1); + -ms-transform: scale(1, -1); + -o-transform: scale(1, -1); + transform: scale(1, -1); +} +.fa-stack { + position: relative; + display: inline-block; + width: 2em; + height: 2em; + line-height: 2em; + vertical-align: middle; +} +.fa-stack-1x, +.fa-stack-2x { + position: absolute; + left: 0; + width: 100%; + text-align: center; +} +.fa-stack-1x { + line-height: inherit; +} +.fa-stack-2x { + font-size: 2em; +} +.fa-inverse { + color: #ffffff; +} +/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen + readers do not read off random characters that represent icons */ +.fa-glass:before { + content: "\f000"; +} +.fa-music:before { + content: "\f001"; +} +.fa-search:before { + content: "\f002"; +} +.fa-envelope-o:before { + content: "\f003"; +} +.fa-heart:before { + content: "\f004"; +} +.fa-star:before { + content: "\f005"; +} +.fa-star-o:before { + content: "\f006"; +} +.fa-user:before { + content: "\f007"; +} +.fa-film:before { + content: "\f008"; +} +.fa-th-large:before { + content: "\f009"; +} +.fa-th:before { + content: "\f00a"; +} +.fa-th-list:before { + content: "\f00b"; +} +.fa-check:before { + content: "\f00c"; +} +.fa-times:before { + content: "\f00d"; +} +.fa-search-plus:before { + content: "\f00e"; +} +.fa-search-minus:before { + content: "\f010"; +} +.fa-power-off:before { + content: "\f011"; +} +.fa-signal:before { + content: "\f012"; +} +.fa-gear:before, +.fa-cog:before { + content: "\f013"; +} +.fa-trash-o:before { + content: "\f014"; +} +.fa-home:before { + content: "\f015"; +} +.fa-file-o:before { + content: "\f016"; +} +.fa-clock-o:before { + content: "\f017"; +} +.fa-road:before { + content: "\f018"; +} +.fa-download:before { + content: "\f019"; +} +.fa-arrow-circle-o-down:before { + content: "\f01a"; +} +.fa-arrow-circle-o-up:before { + content: "\f01b"; +} +.fa-inbox:before { + content: "\f01c"; +} +.fa-play-circle-o:before { + content: "\f01d"; +} +.fa-rotate-right:before, +.fa-repeat:before { + content: "\f01e"; +} +.fa-refresh:before { + content: "\f021"; +} +.fa-list-alt:before { + content: "\f022"; +} +.fa-lock:before { + content: "\f023"; +} +.fa-flag:before { + content: "\f024"; +} +.fa-headphones:before { + content: "\f025"; +} +.fa-volume-off:before { + content: "\f026"; +} +.fa-volume-down:before { + content: "\f027"; +} +.fa-volume-up:before { + content: "\f028"; +} +.fa-qrcode:before { + content: "\f029"; +} +.fa-barcode:before { + content: "\f02a"; +} +.fa-tag:before { + content: "\f02b"; +} +.fa-tags:before { + content: "\f02c"; +} +.fa-book:before { + content: "\f02d"; +} +.fa-bookmark:before { + content: "\f02e"; +} +.fa-print:before { + content: "\f02f"; +} +.fa-camera:before { + content: "\f030"; +} +.fa-font:before { + content: "\f031"; +} +.fa-bold:before { + content: "\f032"; +} +.fa-italic:before { + content: "\f033"; +} +.fa-text-height:before { + content: "\f034"; +} +.fa-text-width:before { + content: "\f035"; +} +.fa-align-left:before { + content: "\f036"; +} +.fa-align-center:before { + content: "\f037"; +} +.fa-align-right:before { + content: "\f038"; +} +.fa-align-justify:before { + content: "\f039"; +} +.fa-list:before { + content: "\f03a"; +} +.fa-dedent:before, +.fa-outdent:before { + content: "\f03b"; +} +.fa-indent:before { + content: "\f03c"; +} +.fa-video-camera:before { + content: "\f03d"; +} +.fa-picture-o:before { + content: "\f03e"; +} +.fa-pencil:before { + content: "\f040"; +} +.fa-map-marker:before { + content: "\f041"; +} +.fa-adjust:before { + content: "\f042"; +} +.fa-tint:before { + content: "\f043"; +} +.fa-edit:before, +.fa-pencil-square-o:before { + content: "\f044"; +} +.fa-share-square-o:before { + content: "\f045"; +} +.fa-check-square-o:before { + content: "\f046"; +} +.fa-arrows:before { + content: "\f047"; +} +.fa-step-backward:before { + content: "\f048"; +} +.fa-fast-backward:before { + content: "\f049"; +} +.fa-backward:before { + content: "\f04a"; +} +.fa-play:before { + content: "\f04b"; +} +.fa-pause:before { + content: "\f04c"; +} +.fa-stop:before { + content: "\f04d"; +} +.fa-forward:before { + content: "\f04e"; +} +.fa-fast-forward:before { + content: "\f050"; +} +.fa-step-forward:before { + content: "\f051"; +} +.fa-eject:before { + content: "\f052"; +} +.fa-chevron-left:before { + content: "\f053"; +} +.fa-chevron-right:before { + content: "\f054"; +} +.fa-plus-circle:before { + content: "\f055"; +} +.fa-minus-circle:before { + content: "\f056"; +} +.fa-times-circle:before { + content: "\f057"; +} +.fa-check-circle:before { + content: "\f058"; +} +.fa-question-circle:before { + content: "\f059"; +} +.fa-info-circle:before { + content: "\f05a"; +} +.fa-crosshairs:before { + content: "\f05b"; +} +.fa-times-circle-o:before { + content: "\f05c"; +} +.fa-check-circle-o:before { + content: "\f05d"; +} +.fa-ban:before { + content: "\f05e"; +} +.fa-arrow-left:before { + content: "\f060"; +} +.fa-arrow-right:before { + content: "\f061"; +} +.fa-arrow-up:before { + content: "\f062"; +} +.fa-arrow-down:before { + content: "\f063"; +} +.fa-mail-forward:before, +.fa-share:before { + content: "\f064"; +} +.fa-expand:before { + content: "\f065"; +} +.fa-compress:before { + content: "\f066"; +} +.fa-plus:before { + content: "\f067"; +} +.fa-minus:before { + content: "\f068"; +} +.fa-asterisk:before { + content: "\f069"; +} +.fa-exclamation-circle:before { + content: "\f06a"; +} +.fa-gift:before { + content: "\f06b"; +} +.fa-leaf:before { + content: "\f06c"; +} +.fa-fire:before { + content: "\f06d"; +} +.fa-eye:before { + content: "\f06e"; +} +.fa-eye-slash:before { + content: "\f070"; +} +.fa-warning:before, +.fa-exclamation-triangle:before { + content: "\f071"; +} +.fa-plane:before { + content: "\f072"; +} +.fa-calendar:before { + content: "\f073"; +} +.fa-random:before { + content: "\f074"; +} +.fa-comment:before { + content: "\f075"; +} +.fa-magnet:before { + content: "\f076"; +} +.fa-chevron-up:before { + content: "\f077"; +} +.fa-chevron-down:before { + content: "\f078"; +} +.fa-retweet:before { + content: "\f079"; +} +.fa-shopping-cart:before { + content: "\f07a"; +} +.fa-folder:before { + content: "\f07b"; +} +.fa-folder-open:before { + content: "\f07c"; +} +.fa-arrows-v:before { + content: "\f07d"; +} +.fa-arrows-h:before { + content: "\f07e"; +} +.fa-bar-chart-o:before { + content: "\f080"; +} +.fa-twitter-square:before { + content: "\f081"; +} +.fa-facebook-square:before { + content: "\f082"; +} +.fa-camera-retro:before { + content: "\f083"; +} +.fa-key:before { + content: "\f084"; +} +.fa-gears:before, +.fa-cogs:before { + content: "\f085"; +} +.fa-comments:before { + content: "\f086"; +} +.fa-thumbs-o-up:before { + content: "\f087"; +} +.fa-thumbs-o-down:before { + content: "\f088"; +} +.fa-star-half:before { + content: "\f089"; +} +.fa-heart-o:before { + content: "\f08a"; +} +.fa-sign-out:before { + content: "\f08b"; +} +.fa-linkedin-square:before { + content: "\f08c"; +} +.fa-thumb-tack:before { + content: "\f08d"; +} +.fa-external-link:before { + content: "\f08e"; +} +.fa-sign-in:before { + content: "\f090"; +} +.fa-trophy:before { + content: "\f091"; +} +.fa-github-square:before { + content: "\f092"; +} +.fa-upload:before { + content: "\f093"; +} +.fa-lemon-o:before { + content: "\f094"; +} +.fa-phone:before { + content: "\f095"; +} +.fa-square-o:before { + content: "\f096"; +} +.fa-bookmark-o:before { + content: "\f097"; +} +.fa-phone-square:before { + content: "\f098"; +} +.fa-twitter:before { + content: "\f099"; +} +.fa-facebook:before { + content: "\f09a"; +} +.fa-github:before { + content: "\f09b"; +} +.fa-unlock:before { + content: "\f09c"; +} +.fa-credit-card:before { + content: "\f09d"; +} +.fa-rss:before { + content: "\f09e"; +} +.fa-hdd-o:before { + content: "\f0a0"; +} +.fa-bullhorn:before { + content: "\f0a1"; +} +.fa-bell:before { + content: "\f0f3"; +} +.fa-certificate:before { + content: "\f0a3"; +} +.fa-hand-o-right:before { + content: "\f0a4"; +} +.fa-hand-o-left:before { + content: "\f0a5"; +} +.fa-hand-o-up:before { + content: "\f0a6"; +} +.fa-hand-o-down:before { + content: "\f0a7"; +} +.fa-arrow-circle-left:before { + content: "\f0a8"; +} +.fa-arrow-circle-right:before { + content: "\f0a9"; +} +.fa-arrow-circle-up:before { + content: "\f0aa"; +} +.fa-arrow-circle-down:before { + content: "\f0ab"; +} +.fa-globe:before { + content: "\f0ac"; +} +.fa-wrench:before { + content: "\f0ad"; +} +.fa-tasks:before { + content: "\f0ae"; +} +.fa-filter:before { + content: "\f0b0"; +} +.fa-briefcase:before { + content: "\f0b1"; +} +.fa-arrows-alt:before { + content: "\f0b2"; +} +.fa-group:before, +.fa-users:before { + content: "\f0c0"; +} +.fa-chain:before, +.fa-link:before { + content: "\f0c1"; +} +.fa-cloud:before { + content: "\f0c2"; +} +.fa-flask:before { + content: "\f0c3"; +} +.fa-cut:before, +.fa-scissors:before { + content: "\f0c4"; +} +.fa-copy:before, +.fa-files-o:before { + content: "\f0c5"; +} +.fa-paperclip:before { + content: "\f0c6"; +} +.fa-save:before, +.fa-floppy-o:before { + content: "\f0c7"; +} +.fa-square:before { + content: "\f0c8"; +} +.fa-bars:before { + content: "\f0c9"; +} +.fa-list-ul:before { + content: "\f0ca"; +} +.fa-list-ol:before { + content: "\f0cb"; +} +.fa-strikethrough:before { + content: "\f0cc"; +} +.fa-underline:before { + content: "\f0cd"; +} +.fa-table:before { + content: "\f0ce"; +} +.fa-magic:before { + content: "\f0d0"; +} +.fa-truck:before { + content: "\f0d1"; +} +.fa-pinterest:before { + content: "\f0d2"; +} +.fa-pinterest-square:before { + content: "\f0d3"; +} +.fa-google-plus-square:before { + content: "\f0d4"; +} +.fa-google-plus:before { + content: "\f0d5"; +} +.fa-money:before { + content: "\f0d6"; +} +.fa-caret-down:before { + content: "\f0d7"; +} +.fa-caret-up:before { + content: "\f0d8"; +} +.fa-caret-left:before { + content: "\f0d9"; +} +.fa-caret-right:before { + content: "\f0da"; +} +.fa-columns:before { + content: "\f0db"; +} +.fa-unsorted:before, +.fa-sort:before { + content: "\f0dc"; +} +.fa-sort-down:before, +.fa-sort-asc:before { + content: "\f0dd"; +} +.fa-sort-up:before, +.fa-sort-desc:before { + content: "\f0de"; +} +.fa-envelope:before { + content: "\f0e0"; +} +.fa-linkedin:before { + content: "\f0e1"; +} +.fa-rotate-left:before, +.fa-undo:before { + content: "\f0e2"; +} +.fa-legal:before, +.fa-gavel:before { + content: "\f0e3"; +} +.fa-dashboard:before, +.fa-tachometer:before { + content: "\f0e4"; +} +.fa-comment-o:before { + content: "\f0e5"; +} +.fa-comments-o:before { + content: "\f0e6"; +} +.fa-flash:before, +.fa-bolt:before { + content: "\f0e7"; +} +.fa-sitemap:before { + content: "\f0e8"; +} +.fa-umbrella:before { + content: "\f0e9"; +} +.fa-paste:before, +.fa-clipboard:before { + content: "\f0ea"; +} +.fa-lightbulb-o:before { + content: "\f0eb"; +} +.fa-exchange:before { + content: "\f0ec"; +} +.fa-cloud-download:before { + content: "\f0ed"; +} +.fa-cloud-upload:before { + content: "\f0ee"; +} +.fa-user-md:before { + content: "\f0f0"; +} +.fa-stethoscope:before { + content: "\f0f1"; +} +.fa-suitcase:before { + content: "\f0f2"; +} +.fa-bell-o:before { + content: "\f0a2"; +} +.fa-coffee:before { + content: "\f0f4"; +} +.fa-cutlery:before { + content: "\f0f5"; +} +.fa-file-text-o:before { + content: "\f0f6"; +} +.fa-building-o:before { + content: "\f0f7"; +} +.fa-hospital-o:before { + content: "\f0f8"; +} +.fa-ambulance:before { + content: "\f0f9"; +} +.fa-medkit:before { + content: "\f0fa"; +} +.fa-fighter-jet:before { + content: "\f0fb"; +} +.fa-beer:before { + content: "\f0fc"; +} +.fa-h-square:before { + content: "\f0fd"; +} +.fa-plus-square:before { + content: "\f0fe"; +} +.fa-angle-double-left:before { + content: "\f100"; +} +.fa-angle-double-right:before { + content: "\f101"; +} +.fa-angle-double-up:before { + content: "\f102"; +} +.fa-angle-double-down:before { + content: "\f103"; +} +.fa-angle-left:before { + content: "\f104"; +} +.fa-angle-right:before { + content: "\f105"; +} +.fa-angle-up:before { + content: "\f106"; +} +.fa-angle-down:before { + content: "\f107"; +} +.fa-desktop:before { + content: "\f108"; +} +.fa-laptop:before { + content: "\f109"; +} +.fa-tablet:before { + content: "\f10a"; +} +.fa-mobile-phone:before, +.fa-mobile:before { + content: "\f10b"; +} +.fa-circle-o:before { + content: "\f10c"; +} +.fa-quote-left:before { + content: "\f10d"; +} +.fa-quote-right:before { + content: "\f10e"; +} +.fa-spinner:before { + content: "\f110"; +} +.fa-circle:before { + content: "\f111"; +} +.fa-mail-reply:before, +.fa-reply:before { + content: "\f112"; +} +.fa-github-alt:before { + content: "\f113"; +} +.fa-folder-o:before { + content: "\f114"; +} +.fa-folder-open-o:before { + content: "\f115"; +} +.fa-smile-o:before { + content: "\f118"; +} +.fa-frown-o:before { + content: "\f119"; +} +.fa-meh-o:before { + content: "\f11a"; +} +.fa-gamepad:before { + content: "\f11b"; +} +.fa-keyboard-o:before { + content: "\f11c"; +} +.fa-flag-o:before { + content: "\f11d"; +} +.fa-flag-checkered:before { + content: "\f11e"; +} +.fa-terminal:before { + content: "\f120"; +} +.fa-code:before { + content: "\f121"; +} +.fa-reply-all:before { + content: "\f122"; +} +.fa-mail-reply-all:before { + content: "\f122"; +} +.fa-star-half-empty:before, +.fa-star-half-full:before, +.fa-star-half-o:before { + content: "\f123"; +} +.fa-location-arrow:before { + content: "\f124"; +} +.fa-crop:before { + content: "\f125"; +} +.fa-code-fork:before { + content: "\f126"; +} +.fa-unlink:before, +.fa-chain-broken:before { + content: "\f127"; +} +.fa-question:before { + content: "\f128"; +} +.fa-info:before { + content: "\f129"; +} +.fa-exclamation:before { + content: "\f12a"; +} +.fa-superscript:before { + content: "\f12b"; +} +.fa-subscript:before { + content: "\f12c"; +} +.fa-eraser:before { + content: "\f12d"; +} +.fa-puzzle-piece:before { + content: "\f12e"; +} +.fa-microphone:before { + content: "\f130"; +} +.fa-microphone-slash:before { + content: "\f131"; +} +.fa-shield:before { + content: "\f132"; +} +.fa-calendar-o:before { + content: "\f133"; +} +.fa-fire-extinguisher:before { + content: "\f134"; +} +.fa-rocket:before { + content: "\f135"; +} +.fa-maxcdn:before { + content: "\f136"; +} +.fa-chevron-circle-left:before { + content: "\f137"; +} +.fa-chevron-circle-right:before { + content: "\f138"; +} +.fa-chevron-circle-up:before { + content: "\f139"; +} +.fa-chevron-circle-down:before { + content: "\f13a"; +} +.fa-html5:before { + content: "\f13b"; +} +.fa-css3:before { + content: "\f13c"; +} +.fa-anchor:before { + content: "\f13d"; +} +.fa-unlock-alt:before { + content: "\f13e"; +} +.fa-bullseye:before { + content: "\f140"; +} +.fa-ellipsis-h:before { + content: "\f141"; +} +.fa-ellipsis-v:before { + content: "\f142"; +} +.fa-rss-square:before { + content: "\f143"; +} +.fa-play-circle:before { + content: "\f144"; +} +.fa-ticket:before { + content: "\f145"; +} +.fa-minus-square:before { + content: "\f146"; +} +.fa-minus-square-o:before { + content: "\f147"; +} +.fa-level-up:before { + content: "\f148"; +} +.fa-level-down:before { + content: "\f149"; +} +.fa-check-square:before { + content: "\f14a"; +} +.fa-pencil-square:before { + content: "\f14b"; +} +.fa-external-link-square:before { + content: "\f14c"; +} +.fa-share-square:before { + content: "\f14d"; +} +.fa-compass:before { + content: "\f14e"; +} +.fa-toggle-down:before, +.fa-caret-square-o-down:before { + content: "\f150"; +} +.fa-toggle-up:before, +.fa-caret-square-o-up:before { + content: "\f151"; +} +.fa-toggle-right:before, +.fa-caret-square-o-right:before { + content: "\f152"; +} +.fa-euro:before, +.fa-eur:before { + content: "\f153"; +} +.fa-gbp:before { + content: "\f154"; +} +.fa-dollar:before, +.fa-usd:before { + content: "\f155"; +} +.fa-rupee:before, +.fa-inr:before { + content: "\f156"; +} +.fa-cny:before, +.fa-rmb:before, +.fa-yen:before, +.fa-jpy:before { + content: "\f157"; +} +.fa-ruble:before, +.fa-rouble:before, +.fa-rub:before { + content: "\f158"; +} +.fa-won:before, +.fa-krw:before { + content: "\f159"; +} +.fa-bitcoin:before, +.fa-btc:before { + content: "\f15a"; +} +.fa-file:before { + content: "\f15b"; +} +.fa-file-text:before { + content: "\f15c"; +} +.fa-sort-alpha-asc:before { + content: "\f15d"; +} +.fa-sort-alpha-desc:before { + content: "\f15e"; +} +.fa-sort-amount-asc:before { + content: "\f160"; +} +.fa-sort-amount-desc:before { + content: "\f161"; +} +.fa-sort-numeric-asc:before { + content: "\f162"; +} +.fa-sort-numeric-desc:before { + content: "\f163"; +} +.fa-thumbs-up:before { + content: "\f164"; +} +.fa-thumbs-down:before { + content: "\f165"; +} +.fa-youtube-square:before { + content: "\f166"; +} +.fa-youtube:before { + content: "\f167"; +} +.fa-xing:before { + content: "\f168"; +} +.fa-xing-square:before { + content: "\f169"; +} +.fa-youtube-play:before { + content: "\f16a"; +} +.fa-dropbox:before { + content: "\f16b"; +} +.fa-stack-overflow:before { + content: "\f16c"; +} +.fa-instagram:before { + content: "\f16d"; +} +.fa-flickr:before { + content: "\f16e"; +} +.fa-adn:before { + content: "\f170"; +} +.fa-bitbucket:before { + content: "\f171"; +} +.fa-bitbucket-square:before { + content: "\f172"; +} +.fa-tumblr:before { + content: "\f173"; +} +.fa-tumblr-square:before { + content: "\f174"; +} +.fa-long-arrow-down:before { + content: "\f175"; +} +.fa-long-arrow-up:before { + content: "\f176"; +} +.fa-long-arrow-left:before { + content: "\f177"; +} +.fa-long-arrow-right:before { + content: "\f178"; +} +.fa-apple:before { + content: "\f179"; +} +.fa-windows:before { + content: "\f17a"; +} +.fa-android:before { + content: "\f17b"; +} +.fa-linux:before { + content: "\f17c"; +} +.fa-dribbble:before { + content: "\f17d"; +} +.fa-skype:before { + content: "\f17e"; +} +.fa-foursquare:before { + content: "\f180"; +} +.fa-trello:before { + content: "\f181"; +} +.fa-female:before { + content: "\f182"; +} +.fa-male:before { + content: "\f183"; +} +.fa-gittip:before { + content: "\f184"; +} +.fa-sun-o:before { + content: "\f185"; +} +.fa-moon-o:before { + content: "\f186"; +} +.fa-archive:before { + content: "\f187"; +} +.fa-bug:before { + content: "\f188"; +} +.fa-vk:before { + content: "\f189"; +} +.fa-weibo:before { + content: "\f18a"; +} +.fa-renren:before { + content: "\f18b"; +} +.fa-pagelines:before { + content: "\f18c"; +} +.fa-stack-exchange:before { + content: "\f18d"; +} +.fa-arrow-circle-o-right:before { + content: "\f18e"; +} +.fa-arrow-circle-o-left:before { + content: "\f190"; +} +.fa-toggle-left:before, +.fa-caret-square-o-left:before { + content: "\f191"; +} +.fa-dot-circle-o:before { + content: "\f192"; +} +.fa-wheelchair:before { + content: "\f193"; +} +.fa-vimeo-square:before { + content: "\f194"; +} +.fa-turkish-lira:before, +.fa-try:before { + content: "\f195"; +} +.fa-plus-square-o:before { + content: "\f196"; +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/fontawesome/css/font-awesome.min.css b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/css/font-awesome.min.css new file mode 100644 index 00000000..449d6ac5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/css/font-awesome.min.css @@ -0,0 +1,4 @@ +/*! + * Font Awesome 4.0.3 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:'FontAwesome';src:url('../fonts/fontawesome-webfont.eot?v=4.0.3');src:url('../fonts/fontawesome-webfont.eot?#iefix&v=4.0.3') format('embedded-opentype'),url('../fonts/fontawesome-webfont.woff?v=4.0.3') format('woff'),url('../fonts/fontawesome-webfont.ttf?v=4.0.3') format('truetype'),url('../fonts/fontawesome-webfont.svg?v=4.0.3#fontawesomeregular') format('svg');font-weight:normal;font-style:normal}.fa{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.3333333333333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.2857142857142858em;text-align:center}.fa-ul{padding-left:0;margin-left:2.142857142857143em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.142857142857143em;width:2.142857142857143em;top:.14285714285714285em;text-align:center}.fa-li.fa-lg{left:-1.8571428571428572em}.fa-border{padding:.2em .25em .15em;border:solid .08em #eee;border-radius:.1em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:spin 2s infinite linear;-moz-animation:spin 2s infinite linear;-o-animation:spin 2s infinite linear;animation:spin 2s infinite linear}@-moz-keyframes spin{0%{-moz-transform:rotate(0deg)}100%{-moz-transform:rotate(359deg)}}@-webkit-keyframes spin{0%{-webkit-transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg)}}@-o-keyframes spin{0%{-o-transform:rotate(0deg)}100%{-o-transform:rotate(359deg)}}@-ms-keyframes spin{0%{-ms-transform:rotate(0deg)}100%{-ms-transform:rotate(359deg)}}@keyframes spin{0%{transform:rotate(0deg)}100%{transform:rotate(359deg)}}.fa-rotate-90{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=1);-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2);-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=3);-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=0,mirror=1);-webkit-transform:scale(-1,1);-moz-transform:scale(-1,1);-ms-transform:scale(-1,1);-o-transform:scale(-1,1);transform:scale(-1,1)}.fa-flip-vertical{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2,mirror=1);-webkit-transform:scale(1,-1);-moz-transform:scale(1,-1);-ms-transform:scale(1,-1);-o-transform:scale(1,-1);transform:scale(1,-1)}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\f000"}.fa-music:before{content:"\f001"}.fa-search:before{content:"\f002"}.fa-envelope-o:before{content:"\f003"}.fa-heart:before{content:"\f004"}.fa-star:before{content:"\f005"}.fa-star-o:before{content:"\f006"}.fa-user:before{content:"\f007"}.fa-film:before{content:"\f008"}.fa-th-large:before{content:"\f009"}.fa-th:before{content:"\f00a"}.fa-th-list:before{content:"\f00b"}.fa-check:before{content:"\f00c"}.fa-times:before{content:"\f00d"}.fa-search-plus:before{content:"\f00e"}.fa-search-minus:before{content:"\f010"}.fa-power-off:before{content:"\f011"}.fa-signal:before{content:"\f012"}.fa-gear:before,.fa-cog:before{content:"\f013"}.fa-trash-o:before{content:"\f014"}.fa-home:before{content:"\f015"}.fa-file-o:before{content:"\f016"}.fa-clock-o:before{content:"\f017"}.fa-road:before{content:"\f018"}.fa-download:before{content:"\f019"}.fa-arrow-circle-o-down:before{content:"\f01a"}.fa-arrow-circle-o-up:before{content:"\f01b"}.fa-inbox:before{content:"\f01c"}.fa-play-circle-o:before{content:"\f01d"}.fa-rotate-right:before,.fa-repeat:before{content:"\f01e"}.fa-refresh:before{content:"\f021"}.fa-list-alt:before{content:"\f022"}.fa-lock:before{content:"\f023"}.fa-flag:before{content:"\f024"}.fa-headphones:before{content:"\f025"}.fa-volume-off:before{content:"\f026"}.fa-volume-down:before{content:"\f027"}.fa-volume-up:before{content:"\f028"}.fa-qrcode:before{content:"\f029"}.fa-barcode:before{content:"\f02a"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-book:before{content:"\f02d"}.fa-bookmark:before{content:"\f02e"}.fa-print:before{content:"\f02f"}.fa-camera:before{content:"\f030"}.fa-font:before{content:"\f031"}.fa-bold:before{content:"\f032"}.fa-italic:before{content:"\f033"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-align-left:before{content:"\f036"}.fa-align-center:before{content:"\f037"}.fa-align-right:before{content:"\f038"}.fa-align-justify:before{content:"\f039"}.fa-list:before{content:"\f03a"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-indent:before{content:"\f03c"}.fa-video-camera:before{content:"\f03d"}.fa-picture-o:before{content:"\f03e"}.fa-pencil:before{content:"\f040"}.fa-map-marker:before{content:"\f041"}.fa-adjust:before{content:"\f042"}.fa-tint:before{content:"\f043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\f044"}.fa-share-square-o:before{content:"\f045"}.fa-check-square-o:before{content:"\f046"}.fa-arrows:before{content:"\f047"}.fa-step-backward:before{content:"\f048"}.fa-fast-backward:before{content:"\f049"}.fa-backward:before{content:"\f04a"}.fa-play:before{content:"\f04b"}.fa-pause:before{content:"\f04c"}.fa-stop:before{content:"\f04d"}.fa-forward:before{content:"\f04e"}.fa-fast-forward:before{content:"\f050"}.fa-step-forward:before{content:"\f051"}.fa-eject:before{content:"\f052"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-plus-circle:before{content:"\f055"}.fa-minus-circle:before{content:"\f056"}.fa-times-circle:before{content:"\f057"}.fa-check-circle:before{content:"\f058"}.fa-question-circle:before{content:"\f059"}.fa-info-circle:before{content:"\f05a"}.fa-crosshairs:before{content:"\f05b"}.fa-times-circle-o:before{content:"\f05c"}.fa-check-circle-o:before{content:"\f05d"}.fa-ban:before{content:"\f05e"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrow-down:before{content:"\f063"}.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-expand:before{content:"\f065"}.fa-compress:before{content:"\f066"}.fa-plus:before{content:"\f067"}.fa-minus:before{content:"\f068"}.fa-asterisk:before{content:"\f069"}.fa-exclamation-circle:before{content:"\f06a"}.fa-gift:before{content:"\f06b"}.fa-leaf:before{content:"\f06c"}.fa-fire:before{content:"\f06d"}.fa-eye:before{content:"\f06e"}.fa-eye-slash:before{content:"\f070"}.fa-warning:before,.fa-exclamation-triangle:before{content:"\f071"}.fa-plane:before{content:"\f072"}.fa-calendar:before{content:"\f073"}.fa-random:before{content:"\f074"}.fa-comment:before{content:"\f075"}.fa-magnet:before{content:"\f076"}.fa-chevron-up:before{content:"\f077"}.fa-chevron-down:before{content:"\f078"}.fa-retweet:before{content:"\f079"}.fa-shopping-cart:before{content:"\f07a"}.fa-folder:before{content:"\f07b"}.fa-folder-open:before{content:"\f07c"}.fa-arrows-v:before{content:"\f07d"}.fa-arrows-h:before{content:"\f07e"}.fa-bar-chart-o:before{content:"\f080"}.fa-twitter-square:before{content:"\f081"}.fa-facebook-square:before{content:"\f082"}.fa-camera-retro:before{content:"\f083"}.fa-key:before{content:"\f084"}.fa-gears:before,.fa-cogs:before{content:"\f085"}.fa-comments:before{content:"\f086"}.fa-thumbs-o-up:before{content:"\f087"}.fa-thumbs-o-down:before{content:"\f088"}.fa-star-half:before{content:"\f089"}.fa-heart-o:before{content:"\f08a"}.fa-sign-out:before{content:"\f08b"}.fa-linkedin-square:before{content:"\f08c"}.fa-thumb-tack:before{content:"\f08d"}.fa-external-link:before{content:"\f08e"}.fa-sign-in:before{content:"\f090"}.fa-trophy:before{content:"\f091"}.fa-github-square:before{content:"\f092"}.fa-upload:before{content:"\f093"}.fa-lemon-o:before{content:"\f094"}.fa-phone:before{content:"\f095"}.fa-square-o:before{content:"\f096"}.fa-bookmark-o:before{content:"\f097"}.fa-phone-square:before{content:"\f098"}.fa-twitter:before{content:"\f099"}.fa-facebook:before{content:"\f09a"}.fa-github:before{content:"\f09b"}.fa-unlock:before{content:"\f09c"}.fa-credit-card:before{content:"\f09d"}.fa-rss:before{content:"\f09e"}.fa-hdd-o:before{content:"\f0a0"}.fa-bullhorn:before{content:"\f0a1"}.fa-bell:before{content:"\f0f3"}.fa-certificate:before{content:"\f0a3"}.fa-hand-o-right:before{content:"\f0a4"}.fa-hand-o-left:before{content:"\f0a5"}.fa-hand-o-up:before{content:"\f0a6"}.fa-hand-o-down:before{content:"\f0a7"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-globe:before{content:"\f0ac"}.fa-wrench:before{content:"\f0ad"}.fa-tasks:before{content:"\f0ae"}.fa-filter:before{content:"\f0b0"}.fa-briefcase:before{content:"\f0b1"}.fa-arrows-alt:before{content:"\f0b2"}.fa-group:before,.fa-users:before{content:"\f0c0"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-cloud:before{content:"\f0c2"}.fa-flask:before{content:"\f0c3"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-copy:before,.fa-files-o:before{content:"\f0c5"}.fa-paperclip:before{content:"\f0c6"}.fa-save:before,.fa-floppy-o:before{content:"\f0c7"}.fa-square:before{content:"\f0c8"}.fa-bars:before{content:"\f0c9"}.fa-list-ul:before{content:"\f0ca"}.fa-list-ol:before{content:"\f0cb"}.fa-strikethrough:before{content:"\f0cc"}.fa-underline:before{content:"\f0cd"}.fa-table:before{content:"\f0ce"}.fa-magic:before{content:"\f0d0"}.fa-truck:before{content:"\f0d1"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-square:before{content:"\f0d3"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-plus:before{content:"\f0d5"}.fa-money:before{content:"\f0d6"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-up:before{content:"\f0d8"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-columns:before{content:"\f0db"}.fa-unsorted:before,.fa-sort:before{content:"\f0dc"}.fa-sort-down:before,.fa-sort-asc:before{content:"\f0dd"}.fa-sort-up:before,.fa-sort-desc:before{content:"\f0de"}.fa-envelope:before{content:"\f0e0"}.fa-linkedin:before{content:"\f0e1"}.fa-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-legal:before,.fa-gavel:before{content:"\f0e3"}.fa-dashboard:before,.fa-tachometer:before{content:"\f0e4"}.fa-comment-o:before{content:"\f0e5"}.fa-comments-o:before{content:"\f0e6"}.fa-flash:before,.fa-bolt:before{content:"\f0e7"}.fa-sitemap:before{content:"\f0e8"}.fa-umbrella:before{content:"\f0e9"}.fa-paste:before,.fa-clipboard:before{content:"\f0ea"}.fa-lightbulb-o:before{content:"\f0eb"}.fa-exchange:before{content:"\f0ec"}.fa-cloud-download:before{content:"\f0ed"}.fa-cloud-upload:before{content:"\f0ee"}.fa-user-md:before{content:"\f0f0"}.fa-stethoscope:before{content:"\f0f1"}.fa-suitcase:before{content:"\f0f2"}.fa-bell-o:before{content:"\f0a2"}.fa-coffee:before{content:"\f0f4"}.fa-cutlery:before{content:"\f0f5"}.fa-file-text-o:before{content:"\f0f6"}.fa-building-o:before{content:"\f0f7"}.fa-hospital-o:before{content:"\f0f8"}.fa-ambulance:before{content:"\f0f9"}.fa-medkit:before{content:"\f0fa"}.fa-fighter-jet:before{content:"\f0fb"}.fa-beer:before{content:"\f0fc"}.fa-h-square:before{content:"\f0fd"}.fa-plus-square:before{content:"\f0fe"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angle-down:before{content:"\f107"}.fa-desktop:before{content:"\f108"}.fa-laptop:before{content:"\f109"}.fa-tablet:before{content:"\f10a"}.fa-mobile-phone:before,.fa-mobile:before{content:"\f10b"}.fa-circle-o:before{content:"\f10c"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-spinner:before{content:"\f110"}.fa-circle:before{content:"\f111"}.fa-mail-reply:before,.fa-reply:before{content:"\f112"}.fa-github-alt:before{content:"\f113"}.fa-folder-o:before{content:"\f114"}.fa-folder-open-o:before{content:"\f115"}.fa-smile-o:before{content:"\f118"}.fa-frown-o:before{content:"\f119"}.fa-meh-o:before{content:"\f11a"}.fa-gamepad:before{content:"\f11b"}.fa-keyboard-o:before{content:"\f11c"}.fa-flag-o:before{content:"\f11d"}.fa-flag-checkered:before{content:"\f11e"}.fa-terminal:before{content:"\f120"}.fa-code:before{content:"\f121"}.fa-reply-all:before{content:"\f122"}.fa-mail-reply-all:before{content:"\f122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\f123"}.fa-location-arrow:before{content:"\f124"}.fa-crop:before{content:"\f125"}.fa-code-fork:before{content:"\f126"}.fa-unlink:before,.fa-chain-broken:before{content:"\f127"}.fa-question:before{content:"\f128"}.fa-info:before{content:"\f129"}.fa-exclamation:before{content:"\f12a"}.fa-superscript:before{content:"\f12b"}.fa-subscript:before{content:"\f12c"}.fa-eraser:before{content:"\f12d"}.fa-puzzle-piece:before{content:"\f12e"}.fa-microphone:before{content:"\f130"}.fa-microphone-slash:before{content:"\f131"}.fa-shield:before{content:"\f132"}.fa-calendar-o:before{content:"\f133"}.fa-fire-extinguisher:before{content:"\f134"}.fa-rocket:before{content:"\f135"}.fa-maxcdn:before{content:"\f136"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-html5:before{content:"\f13b"}.fa-css3:before{content:"\f13c"}.fa-anchor:before{content:"\f13d"}.fa-unlock-alt:before{content:"\f13e"}.fa-bullseye:before{content:"\f140"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-rss-square:before{content:"\f143"}.fa-play-circle:before{content:"\f144"}.fa-ticket:before{content:"\f145"}.fa-minus-square:before{content:"\f146"}.fa-minus-square-o:before{content:"\f147"}.fa-level-up:before{content:"\f148"}.fa-level-down:before{content:"\f149"}.fa-check-square:before{content:"\f14a"}.fa-pencil-square:before{content:"\f14b"}.fa-external-link-square:before{content:"\f14c"}.fa-share-square:before{content:"\f14d"}.fa-compass:before{content:"\f14e"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:"\f150"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:"\f151"}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:"\f152"}.fa-euro:before,.fa-eur:before{content:"\f153"}.fa-gbp:before{content:"\f154"}.fa-dollar:before,.fa-usd:before{content:"\f155"}.fa-rupee:before,.fa-inr:before{content:"\f156"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:"\f157"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:"\f158"}.fa-won:before,.fa-krw:before{content:"\f159"}.fa-bitcoin:before,.fa-btc:before{content:"\f15a"}.fa-file:before{content:"\f15b"}.fa-file-text:before{content:"\f15c"}.fa-sort-alpha-asc:before{content:"\f15d"}.fa-sort-alpha-desc:before{content:"\f15e"}.fa-sort-amount-asc:before{content:"\f160"}.fa-sort-amount-desc:before{content:"\f161"}.fa-sort-numeric-asc:before{content:"\f162"}.fa-sort-numeric-desc:before{content:"\f163"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbs-down:before{content:"\f165"}.fa-youtube-square:before{content:"\f166"}.fa-youtube:before{content:"\f167"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-youtube-play:before{content:"\f16a"}.fa-dropbox:before{content:"\f16b"}.fa-stack-overflow:before{content:"\f16c"}.fa-instagram:before{content:"\f16d"}.fa-flickr:before{content:"\f16e"}.fa-adn:before{content:"\f170"}.fa-bitbucket:before{content:"\f171"}.fa-bitbucket-square:before{content:"\f172"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-long-arrow-down:before{content:"\f175"}.fa-long-arrow-up:before{content:"\f176"}.fa-long-arrow-left:before{content:"\f177"}.fa-long-arrow-right:before{content:"\f178"}.fa-apple:before{content:"\f179"}.fa-windows:before{content:"\f17a"}.fa-android:before{content:"\f17b"}.fa-linux:before{content:"\f17c"}.fa-dribbble:before{content:"\f17d"}.fa-skype:before{content:"\f17e"}.fa-foursquare:before{content:"\f180"}.fa-trello:before{content:"\f181"}.fa-female:before{content:"\f182"}.fa-male:before{content:"\f183"}.fa-gittip:before{content:"\f184"}.fa-sun-o:before{content:"\f185"}.fa-moon-o:before{content:"\f186"}.fa-archive:before{content:"\f187"}.fa-bug:before{content:"\f188"}.fa-vk:before{content:"\f189"}.fa-weibo:before{content:"\f18a"}.fa-renren:before{content:"\f18b"}.fa-pagelines:before{content:"\f18c"}.fa-stack-exchange:before{content:"\f18d"}.fa-arrow-circle-o-right:before{content:"\f18e"}.fa-arrow-circle-o-left:before{content:"\f190"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:"\f191"}.fa-dot-circle-o:before{content:"\f192"}.fa-wheelchair:before{content:"\f193"}.fa-vimeo-square:before{content:"\f194"}.fa-turkish-lira:before,.fa-try:before{content:"\f195"}.fa-plus-square-o:before{content:"\f196"} \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fileembed.go b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fileembed.go new file mode 100644 index 00000000..197cbff9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fileembed.go @@ -0,0 +1,30 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package fontawesome provides access to the Font Awesome font library and +embeds them into the Go binary when compiled with the genfileembed +tool. + +See http://fortawesome.github.io/Font-Awesome/ + +#fileembed pattern .*\.(css|eot|svg|ttf|woff|otf)$ +*/ +package fontawesome + +import "camlistore.org/pkg/fileembed" + +var Files = &fileembed.Files{} diff --git a/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fonts/FontAwesome.otf b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fonts/FontAwesome.otf new file mode 100644 index 00000000..8b0f54e4 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fonts/FontAwesome.otf differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fonts/fontawesome-webfont.eot b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fonts/fontawesome-webfont.eot new file mode 100755 index 00000000..7c79c6a6 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fonts/fontawesome-webfont.eot differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fonts/fontawesome-webfont.svg b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fonts/fontawesome-webfont.svg new file mode 100755 index 00000000..45fdf338 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fonts/fontawesome-webfont.svg @@ -0,0 +1,414 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fonts/fontawesome-webfont.ttf b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fonts/fontawesome-webfont.ttf new file mode 100755 index 00000000..e89738de Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fonts/fontawesome-webfont.ttf differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fonts/fontawesome-webfont.woff b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fonts/fontawesome-webfont.woff new file mode 100755 index 00000000..8c1748aa Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/fontawesome/fonts/fontawesome-webfont.woff differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/bradfitz/gomemcache/memcache/memcache.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/bradfitz/gomemcache/memcache/memcache.go new file mode 100644 index 00000000..2a85881a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/bradfitz/gomemcache/memcache/memcache.go @@ -0,0 +1,579 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package memcache provides a client for the memcached cache server. +package memcache + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + + "strconv" + "strings" + "sync" + "time" +) + +// Similar to: +// http://code.google.com/appengine/docs/go/memcache/reference.html + +var ( + // ErrCacheMiss means that a Get failed because the item wasn't present. + ErrCacheMiss = errors.New("memcache: cache miss") + + // ErrCASConflict means that a CompareAndSwap call failed due to the + // cached value being modified between the Get and the CompareAndSwap. + // If the cached value was simply evicted rather than replaced, + // ErrNotStored will be returned instead. + ErrCASConflict = errors.New("memcache: compare-and-swap conflict") + + // ErrNotStored means that a conditional write operation (i.e. Add or + // CompareAndSwap) failed because the condition was not satisfied. + ErrNotStored = errors.New("memcache: item not stored") + + // ErrServer means that a server error occurred. + ErrServerError = errors.New("memcache: server error") + + // ErrNoStats means that no statistics were available. + ErrNoStats = errors.New("memcache: no statistics available") + + // ErrMalformedKey is returned when an invalid key is used. + // Keys must be at maximum 250 bytes long, ASCII, and not + // contain whitespace or control characters. + ErrMalformedKey = errors.New("malformed: key is too long or contains invalid characters") + + // ErrNoServers is returned when no servers are configured or available. + ErrNoServers = errors.New("memcache: no servers configured or available") +) + +// DefaultTimeout is the default socket read/write timeout. +const DefaultTimeout = time.Duration(100) * time.Millisecond + +const ( + buffered = 8 // arbitrary buffered channel size, for readability + maxIdleConnsPerAddr = 2 // TODO(bradfitz): make this configurable? +) + +// resumableError returns true if err is only a protocol-level cache error. +// This is used to determine whether or not a server connection should +// be re-used or not. If an error occurs, by default we don't reuse the +// connection, unless it was just a cache error. +func resumableError(err error) bool { + switch err { + case ErrCacheMiss, ErrCASConflict, ErrNotStored, ErrMalformedKey: + return true + } + return false +} + +func legalKey(key string) bool { + if len(key) > 250 { + return false + } + for i := 0; i < len(key); i++ { + if key[i] <= ' ' || key[i] > 0x7e { + return false + } + } + return true +} + +var ( + crlf = []byte("\r\n") + resultStored = []byte("STORED\r\n") + resultNotStored = []byte("NOT_STORED\r\n") + resultExists = []byte("EXISTS\r\n") + resultNotFound = []byte("NOT_FOUND\r\n") + resultDeleted = []byte("DELETED\r\n") + resultEnd = []byte("END\r\n") + + resultClientErrorPrefix = []byte("CLIENT_ERROR ") +) + +// New returns a memcache client using the provided server(s) +// with equal weight. If a server is listed multiple times, +// it gets a proportional amount of weight. +func New(server ...string) *Client { + ss := new(ServerList) + ss.SetServers(server...) + return NewFromSelector(ss) +} + +// NewFromSelector returns a new Client using the provided ServerSelector. +func NewFromSelector(ss ServerSelector) *Client { + return &Client{selector: ss} +} + +// Client is a memcache client. +// It is safe for unlocked use by multiple concurrent goroutines. +type Client struct { + // Timeout specifies the socket read/write timeout. + // If zero, DefaultTimeout is used. + Timeout time.Duration + + selector ServerSelector + + lk sync.Mutex + freeconn map[net.Addr][]*conn +} + +// Item is an item to be got or stored in a memcached server. +type Item struct { + // Key is the Item's key (250 bytes maximum). + Key string + + // Value is the Item's value. + Value []byte + + // Object is the Item's value for use with a Codec. + Object interface{} + + // Flags are server-opaque flags whose semantics are entirely + // up to the app. + Flags uint32 + + // Expiration is the cache expiration time, in seconds: either a relative + // time from now (up to 1 month), or an absolute Unix epoch time. + // Zero means the Item has no expiration time. + Expiration int32 + + // Compare and swap ID. + casid uint64 +} + +// conn is a connection to a server. +type conn struct { + nc net.Conn + rw *bufio.ReadWriter + addr net.Addr + c *Client +} + +// release returns this connection back to the client's free pool +func (cn *conn) release() { + cn.c.putFreeConn(cn.addr, cn) +} + +func (cn *conn) extendDeadline() { + cn.nc.SetDeadline(time.Now().Add(cn.c.netTimeout())) +} + +// condRelease releases this connection if the error pointed to by err +// is is nil (not an error) or is only a protocol level error (e.g. a +// cache miss). The purpose is to not recycle TCP connections that +// are bad. +func (cn *conn) condRelease(err *error) { + if *err == nil || resumableError(*err) { + cn.release() + } +} + +func (c *Client) putFreeConn(addr net.Addr, cn *conn) { + c.lk.Lock() + defer c.lk.Unlock() + if c.freeconn == nil { + c.freeconn = make(map[net.Addr][]*conn) + } + freelist := c.freeconn[addr] + if len(freelist) >= maxIdleConnsPerAddr { + cn.nc.Close() + return + } + c.freeconn[addr] = append(freelist, cn) +} + +func (c *Client) getFreeConn(addr net.Addr) (cn *conn, ok bool) { + c.lk.Lock() + defer c.lk.Unlock() + if c.freeconn == nil { + return nil, false + } + freelist, ok := c.freeconn[addr] + if !ok || len(freelist) == 0 { + return nil, false + } + cn = freelist[len(freelist)-1] + c.freeconn[addr] = freelist[:len(freelist)-1] + return cn, true +} + +func (c *Client) netTimeout() time.Duration { + if c.Timeout != 0 { + return c.Timeout + } + return DefaultTimeout +} + +// ConnectTimeoutError is the error type used when it takes +// too long to connect to the desired host. This level of +// detail can generally be ignored. +type ConnectTimeoutError struct { + Addr net.Addr +} + +func (cte *ConnectTimeoutError) Error() string { + return "memcache: connect timeout to " + cte.Addr.String() +} + +func (c *Client) dial(addr net.Addr) (net.Conn, error) { + type connError struct { + cn net.Conn + err error + } + ch := make(chan connError) + go func() { + nc, err := net.Dial(addr.Network(), addr.String()) + ch <- connError{nc, err} + }() + select { + case ce := <-ch: + return ce.cn, ce.err + case <-time.After(c.netTimeout()): + // Too slow. Fall through. + } + // Close the conn if it does end up finally coming in + go func() { + ce := <-ch + if ce.err == nil { + ce.cn.Close() + } + }() + return nil, &ConnectTimeoutError{addr} +} + +func (c *Client) getConn(addr net.Addr) (*conn, error) { + cn, ok := c.getFreeConn(addr) + if ok { + cn.extendDeadline() + return cn, nil + } + nc, err := c.dial(addr) + if err != nil { + return nil, err + } + cn = &conn{ + nc: nc, + addr: addr, + rw: bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)), + c: c, + } + cn.extendDeadline() + return cn, nil +} + +func (c *Client) onItem(item *Item, fn func(*Client, *bufio.ReadWriter, *Item) error) error { + addr, err := c.selector.PickServer(item.Key) + if err != nil { + return err + } + cn, err := c.getConn(addr) + if err != nil { + return err + } + defer cn.condRelease(&err) + if err := fn(c, cn.rw, item); err != nil { + return err + } + return nil +} + +// Get gets the item for the given key. ErrCacheMiss is returned for a +// memcache cache miss. The key must be at most 250 bytes in length. +func (c *Client) Get(key string) (item *Item, err error) { + err = c.withKeyAddr(key, func(addr net.Addr) error { + return c.getFromAddr(addr, []string{key}, func(it *Item) { item = it }) + }) + if err == nil && item == nil { + err = ErrCacheMiss + } + return +} + +func (c *Client) withKeyAddr(key string, fn func(net.Addr) error) (err error) { + if !legalKey(key) { + return ErrMalformedKey + } + addr, err := c.selector.PickServer(key) + if err != nil { + return err + } + return fn(addr) +} + +func (c *Client) withAddrRw(addr net.Addr, fn func(*bufio.ReadWriter) error) (err error) { + cn, err := c.getConn(addr) + if err != nil { + return err + } + defer cn.condRelease(&err) + return fn(cn.rw) +} + +func (c *Client) withKeyRw(key string, fn func(*bufio.ReadWriter) error) error { + return c.withKeyAddr(key, func(addr net.Addr) error { + return c.withAddrRw(addr, fn) + }) +} + +func (c *Client) getFromAddr(addr net.Addr, keys []string, cb func(*Item)) error { + return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { + if _, err := fmt.Fprintf(rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil { + return err + } + if err := rw.Flush(); err != nil { + return err + } + if err := parseGetResponse(rw.Reader, cb); err != nil { + return err + } + return nil + }) +} + +// GetMulti is a batch version of Get. The returned map from keys to +// items may have fewer elements than the input slice, due to memcache +// cache misses. Each key must be at most 250 bytes in length. +// If no error is returned, the returned map will also be non-nil. +func (c *Client) GetMulti(keys []string) (map[string]*Item, error) { + var lk sync.Mutex + m := make(map[string]*Item) + addItemToMap := func(it *Item) { + lk.Lock() + defer lk.Unlock() + m[it.Key] = it + } + + keyMap := make(map[net.Addr][]string) + for _, key := range keys { + if !legalKey(key) { + return nil, ErrMalformedKey + } + addr, err := c.selector.PickServer(key) + if err != nil { + return nil, err + } + keyMap[addr] = append(keyMap[addr], key) + } + + ch := make(chan error, buffered) + for addr, keys := range keyMap { + go func(addr net.Addr, keys []string) { + ch <- c.getFromAddr(addr, keys, addItemToMap) + }(addr, keys) + } + + var err error + for _ = range keyMap { + if ge := <-ch; ge != nil { + err = ge + } + } + return m, err +} + +// parseGetResponse reads a GET response from r and calls cb for each +// read and allocated Item +func parseGetResponse(r *bufio.Reader, cb func(*Item)) error { + for { + line, err := r.ReadSlice('\n') + if err != nil { + return err + } + if bytes.Equal(line, resultEnd) { + return nil + } + it := new(Item) + var size int + n, err := fmt.Sscanf(string(line), "VALUE %s %d %d %d\r\n", + &it.Key, &it.Flags, &size, &it.casid) + if err != nil { + return err + } + if n != 4 { + return fmt.Errorf("memcache: unexpected line in get response: %q", string(line)) + } + it.Value, err = ioutil.ReadAll(io.LimitReader(r, int64(size)+2)) + if err != nil { + return err + } + if !bytes.HasSuffix(it.Value, crlf) { + return fmt.Errorf("memcache: corrupt get result read") + } + it.Value = it.Value[:size] + cb(it) + } + panic("unreached") +} + +// Set writes the given item, unconditionally. +func (c *Client) Set(item *Item) error { + return c.onItem(item, (*Client).set) +} + +func (c *Client) set(rw *bufio.ReadWriter, item *Item) error { + return c.populateOne(rw, "set", item) +} + +// Add writes the given item, if no value already exists for its +// key. ErrNotStored is returned if that condition is not met. +func (c *Client) Add(item *Item) error { + return c.onItem(item, (*Client).add) +} + +func (c *Client) add(rw *bufio.ReadWriter, item *Item) error { + return c.populateOne(rw, "add", item) +} + +// CompareAndSwap writes the given item that was previously returned +// by Get, if the value was neither modified or evicted between the +// Get and the CompareAndSwap calls. The item's Key should not change +// between calls but all other item fields may differ. ErrCASConflict +// is returned if the value was modified in between the +// calls. ErrNotStored is returned if the value was evicted in between +// the calls. +func (c *Client) CompareAndSwap(item *Item) error { + return c.onItem(item, (*Client).cas) +} + +func (c *Client) cas(rw *bufio.ReadWriter, item *Item) error { + return c.populateOne(rw, "cas", item) +} + +func (c *Client) populateOne(rw *bufio.ReadWriter, verb string, item *Item) error { + if !legalKey(item.Key) { + return ErrMalformedKey + } + var err error + if verb == "cas" { + _, err = fmt.Fprintf(rw, "%s %s %d %d %d %d\r\n", + verb, item.Key, item.Flags, item.Expiration, len(item.Value), item.casid) + } else { + _, err = fmt.Fprintf(rw, "%s %s %d %d %d\r\n", + verb, item.Key, item.Flags, item.Expiration, len(item.Value)) + } + if err != nil { + return err + } + if _, err = rw.Write(item.Value); err != nil { + return err + } + if _, err := rw.Write(crlf); err != nil { + return err + } + if err := rw.Flush(); err != nil { + return err + } + line, err := rw.ReadSlice('\n') + if err != nil { + return err + } + switch { + case bytes.Equal(line, resultStored): + return nil + case bytes.Equal(line, resultNotStored): + return ErrNotStored + case bytes.Equal(line, resultExists): + return ErrCASConflict + case bytes.Equal(line, resultNotFound): + return ErrCacheMiss + } + return fmt.Errorf("memcache: unexpected response line from %q: %q", verb, string(line)) +} + +func writeReadLine(rw *bufio.ReadWriter, format string, args ...interface{}) ([]byte, error) { + _, err := fmt.Fprintf(rw, format, args...) + if err != nil { + return nil, err + } + if err := rw.Flush(); err != nil { + return nil, err + } + line, err := rw.ReadSlice('\n') + return line, err +} + +func writeExpectf(rw *bufio.ReadWriter, expect []byte, format string, args ...interface{}) error { + line, err := writeReadLine(rw, format, args...) + if err != nil { + return err + } + switch { + case bytes.Equal(line, expect): + return nil + case bytes.Equal(line, resultNotStored): + return ErrNotStored + case bytes.Equal(line, resultExists): + return ErrCASConflict + case bytes.Equal(line, resultNotFound): + return ErrCacheMiss + } + return fmt.Errorf("memcache: unexpected response line: %q", string(line)) +} + +// Delete deletes the item with the provided key. The error ErrCacheMiss is +// returned if the item didn't already exist in the cache. +func (c *Client) Delete(key string) error { + return c.withKeyRw(key, func(rw *bufio.ReadWriter) error { + return writeExpectf(rw, resultDeleted, "delete %s\r\n", key) + }) +} + +// Increment atomically increments key by delta. The return value is +// the new value after being incremented or an error. If the value +// didn't exist in memcached the error is ErrCacheMiss. The value in +// memcached must be an decimal number, or an error will be returned. +// On 64-bit overflow, the new value wraps around. +func (c *Client) Increment(key string, delta uint64) (newValue uint64, err error) { + return c.incrDecr("incr", key, delta) +} + +// Decrement atomically decrements key by delta. The return value is +// the new value after being decremented or an error. If the value +// didn't exist in memcached the error is ErrCacheMiss. The value in +// memcached must be an decimal number, or an error will be returned. +// On underflow, the new value is capped at zero and does not wrap +// around. +func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error) { + return c.incrDecr("decr", key, delta) +} + +func (c *Client) incrDecr(verb, key string, delta uint64) (uint64, error) { + var val uint64 + err := c.withKeyRw(key, func(rw *bufio.ReadWriter) error { + line, err := writeReadLine(rw, "%s %s %d\r\n", verb, key, delta) + if err != nil { + return err + } + switch { + case bytes.Equal(line, resultNotFound): + return ErrCacheMiss + case bytes.HasPrefix(line, resultClientErrorPrefix): + errMsg := line[len(resultClientErrorPrefix) : len(line)-2] + return errors.New("memcache: client error: " + string(errMsg)) + } + val, err = strconv.ParseUint(string(line[:len(line)-2]), 10, 64) + if err != nil { + return err + } + return nil + }) + return val, err +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/bradfitz/gomemcache/memcache/memcache_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/bradfitz/gomemcache/memcache/memcache_test.go new file mode 100644 index 00000000..e4b9d3c0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/bradfitz/gomemcache/memcache/memcache_test.go @@ -0,0 +1,164 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package memcache provides a client for the memcached cache server. +package memcache + +import ( + "fmt" + "net" + "os" + "os/exec" + "strings" + "testing" + "time" +) + +const testServer = "localhost:11211" + +func setup(t *testing.T) bool { + c, err := net.Dial("tcp", testServer) + if err != nil { + t.Logf("skipping test; no server running at %s", testServer) + return false + } + c.Write([]byte("flush_all\r\n")) + c.Close() + return true +} + +func TestLocalhost(t *testing.T) { + if !setup(t) { + return + } + testWithClient(t, New(testServer)) +} + +// Run the memcached binary as a child process and connect to its unix socket. +func TestUnixSocket(t *testing.T) { + sock := fmt.Sprintf("/tmp/test-gomemcache-%d.sock", os.Getpid()) + cmd := exec.Command("memcached", "-s", sock) + if err := cmd.Start(); err != nil { + t.Logf("skipping test; couldn't find memcached") + return + } + defer cmd.Wait() + defer cmd.Process.Kill() + + // Wait a bit for the socket to appear. + for i := 0; i < 10; i++ { + if _, err := os.Stat(sock); err == nil { + break + } + time.Sleep(time.Duration(25*i) * time.Millisecond) + } + + testWithClient(t, New(sock)) +} + +func testWithClient(t *testing.T, c *Client) { + checkErr := func(err error, format string, args ...interface{}) { + if err != nil { + t.Fatalf(format, args...) + } + } + + mustSet := func(it *Item) { + if err := c.Set(it); err != nil { + t.Fatalf("failed to Set %#v: %v", *it, err) + } + } + + // Set + foo := &Item{Key: "foo", Value: []byte("fooval"), Flags: 123} + err := c.Set(foo) + checkErr(err, "first set(foo): %v", err) + err = c.Set(foo) + checkErr(err, "second set(foo): %v", err) + + // Get + it, err := c.Get("foo") + checkErr(err, "get(foo): %v", err) + if it.Key != "foo" { + t.Errorf("get(foo) Key = %q, want foo", it.Key) + } + if string(it.Value) != "fooval" { + t.Errorf("get(foo) Value = %q, want fooval", string(it.Value)) + } + if it.Flags != 123 { + t.Errorf("get(foo) Flags = %v, want 123", it.Flags) + } + + // Add + bar := &Item{Key: "bar", Value: []byte("barval")} + err = c.Add(bar) + checkErr(err, "first add(foo): %v", err) + if err := c.Add(bar); err != ErrNotStored { + t.Fatalf("second add(foo) want ErrNotStored, got %v", err) + } + + // GetMulti + m, err := c.GetMulti([]string{"foo", "bar"}) + checkErr(err, "GetMulti: %v", err) + if g, e := len(m), 2; g != e { + t.Errorf("GetMulti: got len(map) = %d, want = %d", g, e) + } + if _, ok := m["foo"]; !ok { + t.Fatalf("GetMulti: didn't get key 'foo'") + } + if _, ok := m["bar"]; !ok { + t.Fatalf("GetMulti: didn't get key 'bar'") + } + if g, e := string(m["foo"].Value), "fooval"; g != e { + t.Errorf("GetMulti: foo: got %q, want %q", g, e) + } + if g, e := string(m["bar"].Value), "barval"; g != e { + t.Errorf("GetMulti: bar: got %q, want %q", g, e) + } + + // Delete + err = c.Delete("foo") + checkErr(err, "Delete: %v", err) + it, err = c.Get("foo") + if err != ErrCacheMiss { + t.Errorf("post-Delete want ErrCacheMiss, got %v", err) + } + + // Incr/Decr + mustSet(&Item{Key: "num", Value: []byte("42")}) + n, err := c.Increment("num", 8) + checkErr(err, "Increment num + 8: %v", err) + if n != 50 { + t.Fatalf("Increment num + 8: want=50, got=%d", n) + } + n, err = c.Decrement("num", 49) + checkErr(err, "Decrement: %v", err) + if n != 1 { + t.Fatalf("Decrement 49: want=1, got=%d", n) + } + err = c.Delete("num") + checkErr(err, "delete num: %v", err) + n, err = c.Increment("num", 1) + if err != ErrCacheMiss { + t.Fatalf("increment post-delete: want ErrCacheMiss, got %v", err) + } + mustSet(&Item{Key: "num", Value: []byte("not-numeric")}) + n, err = c.Increment("num", 1) + if err == nil || !strings.Contains(err.Error(), "client error") { + t.Fatalf("increment non-number: want client error, got %v", err) + } + +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/bradfitz/gomemcache/memcache/selector.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/bradfitz/gomemcache/memcache/selector.go new file mode 100644 index 00000000..3893cc9e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/bradfitz/gomemcache/memcache/selector.go @@ -0,0 +1,84 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package memcache + +import ( + "hash/crc32" + "net" + "strings" + "sync" +) + +// ServerSelector is the interface that selects a memcache server +// as a function of the item's key. +// +// All ServerSelector implementations must be threadsafe. +type ServerSelector interface { + // PickServer returns the server address that a given item + // should be shared onto. + PickServer(key string) (net.Addr, error) +} + +// ServerList is a simple ServerSelector. Its zero value is usable. +type ServerList struct { + lk sync.RWMutex + addrs []net.Addr +} + +// SetServers changes a ServerList's set of servers at runtime and is +// threadsafe. +// +// Each server is given equal weight. A server is given more weight +// if it's listed multiple times. +// +// SetServers returns an error if any of the server names fail to +// resolve. No attempt is made to connect to the server. If any error +// is returned, no changes are made to the ServerList. +func (ss *ServerList) SetServers(servers ...string) error { + naddr := make([]net.Addr, len(servers)) + for i, server := range servers { + if strings.Contains(server, "/") { + addr, err := net.ResolveUnixAddr("unix", server) + if err != nil { + return err + } + naddr[i] = addr + } else { + tcpaddr, err := net.ResolveTCPAddr("tcp", server) + if err != nil { + return err + } + naddr[i] = tcpaddr + } + } + + ss.lk.Lock() + defer ss.lk.Unlock() + ss.addrs = naddr + return nil +} + +func (ss *ServerList) PickServer(key string) (net.Addr, error) { + ss.lk.RLock() + defer ss.lk.RUnlock() + if len(ss.addrs) == 0 { + return nil, ErrNoServers + } + // TODO-GO: remove this copy + cs := crc32.ChecksumIEEE([]byte(key)) + return ss.addrs[cs%uint32(len(ss.addrs))], nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/bradfitz/latlong/latlong.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/bradfitz/latlong/latlong.go new file mode 100644 index 00000000..ae5d55a3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/bradfitz/latlong/latlong.go @@ -0,0 +1,253 @@ +/* +Copyright 2014 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package latlong maps from a latitude and longitude to a timezone. +// +// It uses the data from http://efele.net/maps/tz/world/ compressed down +// to an internal form optimized for low memory overhead and fast lookups +// at the expense of perfect accuracy when close to borders. The data files +// are compiled in to this package and do not require explicit loading. +package latlong + +import ( + "bufio" + "compress/gzip" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "sort" + "strings" + "sync" +) + +// Populated by z_gen_tables.go: +var ( + degPixels = -1 + zoomLevels [6]*zoomLevel + uniqueLeavesPacked string + leaf []zoneLooker +) + +// LookupZoneName returns the timezone name at the given latitude and +// longitude. The returned name is either the empty string (if not +// found) or a name suitable for passing to time.LoadLocation. For +// example, "America/New_York". +func LookupZoneName(lat, long float64) string { + x := int((long + 180) * float64(degPixels)) + y := int((90 - lat) * float64(degPixels)) + if x < 0 { + x = 0 + } else if x >= 360*degPixels { + x = 360*degPixels - 1 + } + if y < 0 { + y = 0 + } else if y >= 180*degPixels { + y = 180*degPixels - 1 + } + return lookupPixel(x, y) +} + +func lookupPixel(x, y int) string { + if degPixels == -1 { + return "tables not generated yet" + } + unpackOnce.Do(unpackTables) + + for level := 5; level >= 0; level-- { + shift := 3 + uint8(level) + xt := uint16(x >> shift) + yt := uint16(y >> shift) + tk := newTileKey(uint8(level), xt, yt) + zone, ok := zoomLevels[level].LookupZone(x, y, tk) + if ok { + return zone + } + } + return "" +} + +var unpackOnce sync.Once + +func unpackTables() { + for _, zl := range zoomLevels { + zr, err := gzip.NewReader( + base64.NewDecoder(base64.StdEncoding, + strings.NewReader(zl.gzipData))) + check(err) + slurp, err := ioutil.ReadAll(zr) + check(err) + if len(slurp)%6 != 0 { + panic("bogus encoded tileLooker length") + } + zl.tiles = make([]tileLooker, len(slurp)/6) + for i := range zl.tiles { + idx := i * 6 + zl.tiles[i] = tileLooker{ + tileKey(binary.BigEndian.Uint32(slurp[idx : idx+4])), + binary.BigEndian.Uint16(slurp[idx+4 : idx+6]), + } + } + } + + zr, err := gzip.NewReader( + base64.NewDecoder(base64.StdEncoding, + strings.NewReader(uniqueLeavesPacked))) + check(err) + br := bufio.NewReader(zr) + var buf [128]byte + for i := range leaf { + t, err := br.ReadByte() + check(err) + switch t { + default: + panic("unknown leaf type: " + fmt.Sprintf("%q", t)) + case 'S': // static zone + v, err := br.ReadBytes(0) // null-terminated + check(err) + leaf[i] = staticZone(string(v[:len(v)-1])) + case '2': // two-timezone 1bpp bitmap (pass.bitmapPixmapBytes) + _, err := io.ReadFull(br, buf[:12]) + check(err) + t := oneBitTile{ + idx: [2]uint16{ + binary.BigEndian.Uint16(buf[0:2]), + binary.BigEndian.Uint16(buf[2:4]), + }, + } + bits := binary.BigEndian.Uint64(buf[4:12]) + for y := range t.rows { + for x := 0; x < 8; x++ { + if bits&(1<> 28) +} + +func (v tileKey) x() uint16 { + return uint16(v & (1<<14 - 1)) +} + +func (v tileKey) y() uint16 { + return uint16((v >> 14) & (1<<14 - 1)) +} + +type tileLooker struct { + tile tileKey + idx uint16 // index into leaf +} + +type zoomLevel struct { + gzipData string // compressed [tilekey][uint16_idx], repeated + tiles []tileLooker // lazily populated +} + +func (zl *zoomLevel) LookupZone(x, y int, tk tileKey) (zone string, ok bool) { + pos := sort.Search(len(zl.tiles), func(i int) bool { + return zl.tiles[i].tile >= tk + }) + if pos >= len(zl.tiles) { + return + } + tl := zl.tiles[pos] + if tl.tile != tk { + return + } + return leaf[tl.idx].LookupZone(x, y, tk) +} + +// A oneBitTile represents a fully opaque 8x8 grid tile that only has +// two colors. The idx represents the indexes of the two colors (the palette) +// table, and the rows are the bits. +type oneBitTile struct { + idx [2]uint16 // bit 0 and bit 1's index into []leaf + rows [8]uint8 // [y], then 1<, ADDR: or +// named ports. +// Listeners are always TCP. +func Listen(addr string) (net.Listener, error) { + a := &Addr{s: addr} + return a.Listen() +} + +// Usage returns a descriptive usage message for a flag given the name +// of thing being addressed. +func Usage(name string) string { + if name == "" { + name = "Listen address" + } + if !strings.HasSuffix(name, " address") { + name += " address" + } + return name + "; may be port, :port, ip:port, FD:, or ADDR: to use named runsit ports" +} + +// Addr is a flag variable. Use like: +// +// var webPort listen.Addr +// flag.Var(&webPort, "web_addr", listen.Usage("Web server address")) +// flag.Parse() +// webListener, err := webPort.Listen() +type Addr struct { + s string + ln net.Listener + err error +} + +func (a *Addr) String() string { + return a.s +} + +// Set implements the flag.Value interface. +func (a *Addr) Set(v string) error { + a.s = v + + // Try the requested port by runsit port name first. + fd, ok, err := namedPort(v) + if err != nil { + return err + } + if ok { + return a.listenOnFD(fd) + } + + if strings.HasPrefix(v, "FD:") { + fdStr := v[len("FD:"):] + fd, err := strconv.ParseUint(fdStr, 10, 32) + if err != nil { + return fmt.Errorf("invalid file descriptor %q: %v", fdStr, err) + } + return a.listenOnFD(uintptr(fd)) + } + + ipPort := v + if isPort(v) { + ipPort = ":" + v + } + + _, _, err = net.SplitHostPort(ipPort) + if err != nil { + return fmt.Errorf("invalid PORT or IP:PORT %q: %v", v, err) + } + a.ln, err = net.Listen("tcp", ipPort) + return err +} + +func isPort(s string) bool { + _, err := strconv.ParseUint(s, 10, 16) + return err == nil +} + +func (a *Addr) listenOnFD(fd uintptr) (err error) { + f := os.NewFile(fd, fmt.Sprintf("fd #%d from runsit parent", fd)) + a.ln, err = net.FileListener(f) + return +} + +func namedPort(name string) (fd uintptr, ok bool, err error) { + s := os.Getenv("RUNSIT_PORTFD_" + name) + if s == "" { + return + } + u64, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return + } + return uintptr(u64), true, nil +} + +var _ flag.Value = (*Addr)(nil) + +// Listen returns the address's TCP listener. +func (a *Addr) Listen() (net.Listener, error) { + // Start the listener now, if there's a default + // and nothing's called Set yet. + if a.err == nil && a.ln == nil && a.s != "" { + if err := a.Set(a.s); err != nil { + return nil, err + } + } + if a.err != nil { + return nil, a.err + } + if a.ln != nil { + return a.ln, nil + } + return nil, errors.New("listen: no error or listener") +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/COPYING b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/COPYING new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/COPYING @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/README.txt b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/README.txt new file mode 100644 index 00000000..a9eeb33d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/README.txt @@ -0,0 +1,3 @@ +File locking library. + +See http://godoc.org/github.com/camlistore/lock diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock.go new file mode 100644 index 00000000..6268527b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock.go @@ -0,0 +1,158 @@ +/* +Copyright 2013 The Go Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lock + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sync" +) + +// Lock locks the given file, creating the file if necessary. If the +// file already exists, it must have zero size or an error is returned. +// The lock is an exclusive lock (a write lock), but locked files +// should neither be read from nor written to. Such files should have +// zero size and only exist to co-ordinate ownership across processes. +// +// A nil Closer is returned if an error occurred. Otherwise, close that +// Closer to release the lock. +// +// On Linux, FreeBSD and OSX, a lock has the same semantics as fcntl(2)'s +// advisory locks. In particular, closing any other file descriptor for the +// same file will release the lock prematurely. +// +// Attempting to lock a file that is already locked by the current process +// has undefined behavior. +// +// On other operating systems, lock will fallback to using the presence and +// content of a file named name + '.lock' to implement locking behavior. +func Lock(name string) (io.Closer, error) { + return lockFn(name) +} + +var lockFn = lockPortable + +// Portable version not using fcntl. Doesn't handle crashes as gracefully, +// since it can leave stale lock files. +// TODO: write pid of owner to lock file and on race see if pid is +// still alive? +func lockPortable(name string) (io.Closer, error) { + absName, err := filepath.Abs(name) + if err != nil { + return nil, fmt.Errorf("can't Lock file %q: can't find abs path: %v", name, err) + } + fi, err := os.Stat(absName) + if err == nil && fi.Size() > 0 { + if isStaleLock(absName) { + os.Remove(absName) + } else { + return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name) + } + } + f, err := os.OpenFile(absName, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_EXCL, 0666) + if err != nil { + return nil, fmt.Errorf("failed to create lock file %s %v", absName, err) + } + if err := json.NewEncoder(f).Encode(&pidLockMeta{OwnerPID: os.Getpid()}); err != nil { + return nil, err + } + return &lockCloser{f: f, abs: absName}, nil +} + +type pidLockMeta struct { + OwnerPID int +} + +func isStaleLock(path string) bool { + f, err := os.Open(path) + if err != nil { + return false + } + defer f.Close() + var meta pidLockMeta + if json.NewDecoder(f).Decode(&meta) != nil { + return false + } + if meta.OwnerPID == 0 { + return false + } + p, err := os.FindProcess(meta.OwnerPID) + if err != nil { + // e.g. on Windows + return true + } + // On unix, os.FindProcess always is true, so we have to send + // it a signal to see if it's alive. + if signalZero != nil { + if p.Signal(signalZero) != nil { + return true + } + } + return false +} + +var signalZero os.Signal // nil or set by lock_sigzero.go + +type lockCloser struct { + f *os.File + abs string + once sync.Once + err error +} + +func (lc *lockCloser) Close() error { + lc.once.Do(lc.close) + return lc.err +} + +func (lc *lockCloser) close() { + if err := lc.f.Close(); err != nil { + lc.err = err + } + if err := os.Remove(lc.abs); err != nil { + lc.err = err + } +} + +var ( + lockmu sync.Mutex + locked = map[string]bool{} // abs path -> true +) + +// unlocker is used by the darwin and linux implementations with fcntl +// advisory locks. +type unlocker struct { + f *os.File + abs string +} + +func (u *unlocker) Close() error { + lockmu.Lock() + // Remove is not necessary but it's nice for us to clean up. + // If we do do this, though, it needs to be before the + // u.f.Close below. + os.Remove(u.abs) + if err := u.f.Close(); err != nil { + return err + } + delete(locked, u.abs) + lockmu.Unlock() + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_appengine.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_appengine.go new file mode 100644 index 00000000..ab4cad6a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_appengine.go @@ -0,0 +1,32 @@ +// +build appengine + +/* +Copyright 2013 The Go Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lock + +import ( + "errors" + "io" +) + +func init() { + lockFn = lockAppEngine +} + +func lockAppEngine(name string) (io.Closer, error) { + return nil, errors.New("Lock not available on App Engine") +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_darwin_amd64.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_darwin_amd64.go new file mode 100644 index 00000000..9fea51fe --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_darwin_amd64.go @@ -0,0 +1,80 @@ +// +build darwin,amd64 +// +build !appengine + +/* +Copyright 2013 The Go Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lock + +import ( + "fmt" + "io" + "os" + "path/filepath" + "syscall" + "unsafe" +) + +func init() { + lockFn = lockFcntl +} + +func lockFcntl(name string) (io.Closer, error) { + abs, err := filepath.Abs(name) + if err != nil { + return nil, err + } + lockmu.Lock() + if locked[abs] { + lockmu.Unlock() + return nil, fmt.Errorf("file %q already locked", abs) + } + locked[abs] = true + lockmu.Unlock() + + fi, err := os.Stat(name) + if err == nil && fi.Size() > 0 { + return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name) + } + + f, err := os.Create(name) + if err != nil { + return nil, fmt.Errorf("Lock Create of %s (abs: %s) failed: %v", name, abs, err) + } + + // This type matches C's "struct flock" defined in /usr/include/sys/fcntl.h. + // TODO: move this into the standard syscall package. + k := struct { + Start uint64 // sizeof(off_t): 8 + Len uint64 // sizeof(off_t): 8 + Pid uint32 // sizeof(pid_t): 4 + Type uint16 // sizeof(short): 2 + Whence uint16 // sizeof(short): 2 + }{ + Type: syscall.F_WRLCK, + Whence: uint16(os.SEEK_SET), + Start: 0, + Len: 0, // 0 means to lock the entire file. + Pid: uint32(os.Getpid()), + } + + _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k))) + if errno != 0 { + f.Close() + return nil, errno + } + return &unlocker{f, abs}, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_freebsd.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_freebsd.go new file mode 100644 index 00000000..d3835d62 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_freebsd.go @@ -0,0 +1,79 @@ +/* +Copyright 2013 The Go Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lock + +import ( + "fmt" + "io" + "os" + "path/filepath" + "syscall" + "unsafe" +) + +func init() { + lockFn = lockFcntl +} + +func lockFcntl(name string) (io.Closer, error) { + abs, err := filepath.Abs(name) + if err != nil { + return nil, err + } + lockmu.Lock() + if locked[abs] { + lockmu.Unlock() + return nil, fmt.Errorf("file %q already locked", abs) + } + locked[abs] = true + lockmu.Unlock() + + fi, err := os.Stat(name) + if err == nil && fi.Size() > 0 { + return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name) + } + + f, err := os.Create(name) + if err != nil { + return nil, err + } + + // This type matches C's "struct flock" defined in /usr/include/fcntl.h. + // TODO: move this into the standard syscall package. + k := struct { + Start int64 /* off_t starting offset */ + Len int64 /* off_t len = 0 means until end of file */ + Pid int32 /* pid_t lock owner */ + Type int16 /* short lock type: read/write, etc. */ + Whence int16 /* short type of l_start */ + Sysid int32 /* int remote system id or zero for local */ + }{ + Start: 0, + Len: 0, // 0 means to lock the entire file. + Pid: int32(os.Getpid()), + Type: syscall.F_WRLCK, + Whence: int16(os.SEEK_SET), + Sysid: 0, + } + + _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k))) + if errno != 0 { + f.Close() + return nil, errno + } + return &unlocker{f, abs}, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_linux_amd64.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_linux_amd64.go new file mode 100644 index 00000000..3a7eb00a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_linux_amd64.go @@ -0,0 +1,80 @@ +// +build linux,amd64 +// +build !appengine + +/* +Copyright 2013 The Go Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lock + +import ( + "fmt" + "io" + "os" + "path/filepath" + "syscall" + "unsafe" +) + +func init() { + lockFn = lockFcntl +} + +func lockFcntl(name string) (io.Closer, error) { + abs, err := filepath.Abs(name) + if err != nil { + return nil, err + } + lockmu.Lock() + if locked[abs] { + lockmu.Unlock() + return nil, fmt.Errorf("file %q already locked", abs) + } + locked[abs] = true + lockmu.Unlock() + + fi, err := os.Stat(name) + if err == nil && fi.Size() > 0 { + return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name) + } + + f, err := os.Create(name) + if err != nil { + return nil, err + } + + // This type matches C's "struct flock" defined in /usr/include/bits/fcntl.h. + // TODO: move this into the standard syscall package. + k := struct { + Type uint32 + Whence uint32 + Start uint64 + Len uint64 + Pid uint32 + }{ + Type: syscall.F_WRLCK, + Whence: uint32(os.SEEK_SET), + Start: 0, + Len: 0, // 0 means to lock the entire file. + Pid: uint32(os.Getpid()), + } + + _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k))) + if errno != 0 { + f.Close() + return nil, errno + } + return &unlocker{f, abs}, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_linux_arm.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_linux_arm.go new file mode 100644 index 00000000..c2a0a102 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_linux_arm.go @@ -0,0 +1,81 @@ +// +build linux,arm +// +build !appengine + +/* +Copyright 2013 The Go Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lock + +import ( + "fmt" + "io" + "os" + "path/filepath" + "syscall" + "unsafe" +) + +func init() { + lockFn = lockFcntl +} + +func lockFcntl(name string) (io.Closer, error) { + abs, err := filepath.Abs(name) + if err != nil { + return nil, err + } + lockmu.Lock() + if locked[abs] { + lockmu.Unlock() + return nil, fmt.Errorf("file %q already locked", abs) + } + locked[abs] = true + lockmu.Unlock() + + fi, err := os.Stat(name) + if err == nil && fi.Size() > 0 { + return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name) + } + + f, err := os.Create(name) + if err != nil { + return nil, err + } + + // This type matches C's "struct flock" defined in /usr/include/bits/fcntl.h. + // TODO: move this into the standard syscall package. + k := struct { + Type uint16 + Whence uint16 + Start uint32 + Len uint32 + Pid uint32 + }{ + Type: syscall.F_WRLCK, + Whence: uint16(os.SEEK_SET), + Start: 0, + Len: 0, // 0 means to lock the entire file. + Pid: uint32(os.Getpid()), + } + + const F_SETLK = 6 // actual value. syscall package is wrong: golang.org/issue/7059 + _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(F_SETLK), uintptr(unsafe.Pointer(&k))) + if errno != 0 { + f.Close() + return nil, errno + } + return &unlocker{f, abs}, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_sigzero.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_sigzero.go new file mode 100644 index 00000000..fd3ba2db --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_sigzero.go @@ -0,0 +1,26 @@ +// +build !appengine +// +build linux darwin freebsd openbsd netbsd dragonfly + +/* +Copyright 2013 The Go Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lock + +import "syscall" + +func init() { + signalZero = syscall.Signal(0) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_test.go new file mode 100644 index 00000000..518d2f02 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/camlistore/lock/lock_test.go @@ -0,0 +1,131 @@ +/* +Copyright 2013 The Go Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lock + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "strconv" + "testing" +) + +func TestLock(t *testing.T) { + testLock(t, false) +} + +func TestLockPortable(t *testing.T) { + testLock(t, true) +} + +func TestLockInChild(t *testing.T) { + f := os.Getenv("TEST_LOCK_FILE") + if f == "" { + // not child + return + } + lock := Lock + if v, _ := strconv.ParseBool(os.Getenv("TEST_LOCK_PORTABLE")); v { + lock = lockPortable + } + + lk, err := lock(f) + if err != nil { + log.Fatalf("Lock failed: %v", err) + } + + if v, _ := strconv.ParseBool(os.Getenv("TEST_LOCK_CRASH")); v { + // Simulate a crash, or at least not unlocking the + // lock. We still exit 0 just to simplify the parent + // process exec code. + os.Exit(0) + } + lk.Close() +} + +func testLock(t *testing.T, portable bool) { + lock := Lock + if portable { + lock = lockPortable + } + + td, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + path := filepath.Join(td, "foo.lock") + + childLock := func(crash bool) error { + cmd := exec.Command(os.Args[0], "-test.run=LockInChild$") + cmd.Env = []string{"TEST_LOCK_FILE=" + path} + if portable { + cmd.Env = append(cmd.Env, "TEST_LOCK_PORTABLE=1") + } + if crash { + cmd.Env = append(cmd.Env, "TEST_LOCK_CRASH=1") + } + out, err := cmd.CombinedOutput() + t.Logf("Child output: %q (err %v)", out, err) + if err != nil { + return fmt.Errorf("Child Process lock of %s failed: %v %s", path, err, out) + } + return nil + } + + t.Logf("Locking in crashing child...") + if err := childLock(true); err != nil { + t.Fatalf("first lock in child process: %v", err) + } + + t.Logf("Locking+unlocking in child...") + if err := childLock(false); err != nil { + t.Fatalf("lock in child process after crashing child: %v", err) + } + + t.Logf("Locking in parent...") + lk1, err := lock(path) + if err != nil { + t.Fatal(err) + } + + t.Logf("Again in parent...") + _, err = lock(path) + if err == nil { + t.Fatal("expected second lock to fail") + } + + t.Logf("Locking in child...") + if childLock(false) == nil { + t.Fatalf("expected lock in child process to fail") + } + + t.Logf("Unlocking lock in parent") + if err := lk1.Close(); err != nil { + t.Fatal(err) + } + + lk3, err := lock(path) + if err != nil { + t.Fatal(err) + } + lk3.Close() +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/bufs/Makefile b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/bufs/Makefile new file mode 100644 index 00000000..0bf557a7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/bufs/Makefile @@ -0,0 +1,30 @@ +# Copyright 2013 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +all: clean + go fmt + go test -i + go test + go build + go vet + go install + make todo + +todo: + @grep -n ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alnum:]] *.go || true + @grep -n TODO *.go || true + @grep -n FIXME *.go || true + @grep -n BUG *.go || true + +clean: + rm -f bufs.test mem.out *~ + +demo: + go test -bench . -benchmem + go test -c + ./bufs.test -test.v -test.run Foo -test.memprofile mem.out \ + -test.memprofilerate 1 + go tool pprof bufs.test mem.out --alloc_space --nodefraction 0.0001 \ + --edgefraction 0 -web + @echo "Note: Foo vs FooBufs allocated memory is in hundreds of MBs vs 8 kB." diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/bufs/README.md b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/bufs/README.md new file mode 100644 index 00000000..3f3d56f1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/bufs/README.md @@ -0,0 +1,8 @@ +bufs +==== + +Package bufs implements a simple buffer cache. + + installation: go get github.com/cznic/bufs + +documentation: http://godoc.org/github.com/cznic/bufs diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/bufs/bufs.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/bufs/bufs.go new file mode 100644 index 00000000..f4e0eee2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/bufs/bufs.go @@ -0,0 +1,391 @@ +// Copyright 2014 The bufs Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bufs implements a simple buffer cache. +// +// The intended use scheme is like: +// +// type Foo struct { +// buffers bufs.Buffers +// ... +// } +// +// // Bar can call Qux, but not the other way around (in this example). +// const maxFooDepth = 2 +// +// func NewFoo() *Foo { +// return &Foo{buffers: bufs.New(maxFooDepth), ...} +// } +// +// func (f *Foo) Bar(n int) { +// buf := f.buffers.Alloc(n) // needed locally for computation and/or I/O +// defer f.buffers.Free() +// ... +// f.Qux(whatever) +// } +// +// func (f *Foo) Qux(n int) { +// buf := f.buffers.Alloc(n) // needed locally for computation and/or I/O +// defer f.buffers.Free() +// ... +// } +// +// The whole idea behind 'bufs' is that when calling e.g. Foo.Bar N times, then +// normally, without using 'bufs', there will be 2*N (in this example) []byte +// buffers allocated. While using 'bufs', only 2 buffers (in this example) +// will ever be created. For large N it can be a substantial difference. +// +// It's not a good idea to use Buffers to cache too big buffers. The cost of +// having a cached buffer is that the buffer is naturally not eligible for +// garbage collection. Of course, that holds only while the Foo instance is +// reachable, in the above example. +// +// The buffer count limit is intentionally "hard" (read panicking), although +// configurable in New(). The rationale is to prevent recursive calls, using +// Alloc, to cause excessive, "static" memory consumption. Tune the limit +// carefully or do not use Buffers from within [mutually] recursive functions +// where the nesting depth is not realistically bounded to some rather small +// number. +// +// Buffers cannot guarantee improvements to you program performance. There may +// be a gain in case where they fit well. Firm grasp on what your code is +// actually doing, when and in what order is essential to proper use of +// Buffers. It's _highly_ recommended to first do profiling and memory +// profiling before even thinking about using 'bufs'. The real world example, +// and cause for this package, was a first correct, yet no optimizations done +// version of a program; producing few MB of useful data while allocating 20+GB +// of memory. Of course the garbage collector properly kicked in, yet the +// memory abuse caused ~80+% of run time to be spent memory management. The +// program _was_ expected to be slow in its still development phase, but the +// bottleneck was guessed to be in I/O. Actually the hard disk was waiting for +// the billions bytes being allocated and zeroed. Garbage collect on low +// memory, rinse and repeat. +// +// In the provided tests, TestFoo and TestFooBufs do the same simulated work, +// except the later uses Buffers while the former does not. Suggested test runs +// which show the differences: +// +// $ go test -bench . -benchmem +// +// or +// +// $ go test -c +// $ ./bufs.test -test.v -test.run Foo -test.memprofile mem.out -test.memprofilerate 1 +// $ go tool pprof bufs.test mem.out --alloc_space --nodefraction 0.0001 --edgefraction 0 -web +// $ # Note: Foo vs FooBufs allocated memory is in hundreds of MBs vs 8 kB. +// +// or +// +// $ make demo # same as all of the above +// +// +// NOTE: Alloc/Free calls must be properly nested in the same way as in for +// example BeginTransaction/EndTransaction pairs. If your code can panic then +// the pairing should be enforced by deferred calls. +// +// NOTE: Buffers objects do not allocate any space until requested by Alloc, +// the mechanism works on demand only. +// +// FAQ: Why the 'bufs' package name? +// +// Package name 'bufs' was intentionally chosen instead of the perhaps more +// conventional 'buf'. There are already too many 'buf' named things in the +// code out there and that'll be a source of a lot of trouble. It's a bit +// similar situation as in the case of package "strings" (not "string"). +package bufs + +import ( + "errors" + "sort" + "sync" +) + +// Buffers type represents a buffer ([]byte) cache. +// +// NOTE: Do not modify Buffers directly, use only its methods. Do not create +// additional values (copies) of Buffers, that'll break its functionality. Use +// a pointer instead to refer to a single instance from different +// places/scopes. +type Buffers [][]byte + +// New returns a newly created instance of Buffers with a maximum capacity of n +// buffers. +// +// NOTE: 'bufs.New(n)' is the same as 'make(bufs.Buffers, n)'. +func New(n int) Buffers { + return make(Buffers, n) +} + +// Alloc will return a buffer such that len(r) == n. It will firstly try to +// find an existing and unused buffer of big enough size. Only when there is no +// such, then one of the buffer slots is reallocated to a bigger size. +// +// It's okay to use append with buffers returned by Alloc. But it can cause +// allocation in that case and will again be producing load for the garbage +// collector. The best use of Alloc is for I/O buffers where the needed size of +// the buffer is figured out at some point of the code path in a 'final size' +// sense. Another real world example are compression/decompression buffers. +// +// NOTE: The buffer returned by Alloc _is not_ zeroed. That's okay for e.g. +// passing a buffer to io.Reader. If you need a zeroed buffer use Calloc. +// +// NOTE: Buffers returned from Alloc _must not_ be exposed/returned to your +// clients. Those buffers are intended to be used strictly internally, within +// the methods of some "object". +// +// NOTE: Alloc will panic if there are no buffers (buffer slots) left. +func (p *Buffers) Alloc(n int) (r []byte) { + b := *p + if len(b) == 0 { + panic(errors.New("Buffers.Alloc: out of buffers")) + } + + biggest, best, biggestI, bestI := -1, -1, -1, -1 + for i, v := range b { + //ln := len(v) + // The above was correct, buts it's just confusing. It worked + // because not the buffers, but slices of them are returned in + // the 'if best >= n' code path. + ln := cap(v) + + if ln >= biggest { + biggest, biggestI = ln, i + } + + if ln >= n && (bestI < 0 || best > ln) { + best, bestI = ln, i + if ln == n { + break + } + } + } + + last := len(b) - 1 + if best >= n { + r = b[bestI] + b[last], b[bestI] = b[bestI], b[last] + *p = b[:last] + return r[:n] + } + + r = make([]byte, n, overCommit(n)) + b[biggestI] = r + b[last], b[biggestI] = b[biggestI], b[last] + *p = b[:last] + return +} + +// Calloc will acquire a buffer using Alloc and then clears it to zeros. The +// zeroing goes up to n, not cap(r). +func (p *Buffers) Calloc(n int) (r []byte) { + r = p.Alloc(n) + for i := range r { + r[i] = 0 + } + return +} + +// Free makes the lastly allocated by Alloc buffer free (available) again for +// Alloc. +// +// NOTE: Improper Free invocations, like in the sequence {New, Alloc, Free, +// Free}, will panic. +func (p *Buffers) Free() { + b := *p + b = b[:len(b)+1] + *p = b +} + +// Stats reports memory consumed by Buffers, without accounting for some +// (smallish) additional overhead. +func (p *Buffers) Stats() (bytes int) { + b := *p + b = b[:cap(b)] + for _, v := range b { + bytes += cap(v) + } + return +} + +// Cache caches buffers ([]byte). A zero value of Cache is ready for use. +// +// NOTE: Do not modify a Cache directly, use only its methods. Do not create +// additional values (copies) of a Cache, that'll break its functionality. Use +// a pointer instead to refer to a single instance from different +// places/scopes. +type Cache [][]byte + +// Get returns a buffer ([]byte) of length n. If no such buffer is cached then +// a biggest cached buffer is resized to have length n and returned. If there +// are no cached items at all, Get returns a newly allocated buffer. +// +// In other words the cache policy is: +// +// - If the cache is empty, the buffer must be newly created and returned. +// Cache remains empty. +// +// - If a buffer of sufficient size is found in the cache, remove it from the +// cache and return it. +// +// - Otherwise the cache is non empty, but no cached buffer is big enough. +// Enlarge the biggest cached buffer, remove it from the cache and return it. +// This provide cached buffers size adjustment based on demand. +// +// In short, if the cache is not empty, Get guarantees to make it always one +// item less. This rules prevent uncontrolled cache grow in some scenarios. +// The older policy was not preventing that. Another advantage is better cached +// buffers sizes "auto tuning", although not in every possible use case. +// +// NOTE: The buffer returned by Get _is not guaranteed_ to be zeroed. That's +// okay for e.g. passing a buffer to io.Reader. If you need a zeroed buffer +// use Cget. +func (c *Cache) Get(n int) []byte { + r, _ := c.get(n) + return r +} + +func (c *Cache) get(n int) (r []byte, isZeroed bool) { + s := *c + lens := len(s) + if lens == 0 { + r, isZeroed = make([]byte, n, overCommit(n)), true + return + } + + i := sort.Search(lens, func(x int) bool { return len(s[x]) >= n }) + if i == lens { + i-- + s[i] = make([]byte, n, overCommit(n)) + } + r = s[i][:n] + copy(s[i:], s[i+1:]) + s[lens-1] = nil + s = s[:lens-1] + *c = s + return r, false +} + +// Cget will acquire a buffer using Get and then clears it to zeros. The +// zeroing goes up to n, not cap(r). +func (c *Cache) Cget(n int) (r []byte) { + r, ok := c.get(n) + if ok { + return + } + + for i := range r { + r[i] = 0 + } + return +} + +// Put caches b for possible later reuse (via Get). No other references to b's +// backing array may exist. Otherwise a big mess is sooner or later inevitable. +func (c *Cache) Put(b []byte) { + b = b[:cap(b)] + lenb := len(b) + if lenb == 0 { + return + } + + s := *c + lens := len(s) + i := sort.Search(lens, func(x int) bool { return len(s[x]) >= lenb }) + s = append(s, nil) + copy(s[i+1:], s[i:]) + s[i] = b + *c = s + return +} + +// Stats reports memory consumed by a Cache, without accounting for some +// (smallish) additional overhead. 'n' is the number of cached buffers, bytes +// is their combined capacity. +func (c Cache) Stats() (n, bytes int) { + n = len(c) + for _, v := range c { + bytes += cap(v) + } + return +} + +// CCache is a Cache which is safe for concurrent use by multiple goroutines. +type CCache struct { + c Cache + mu sync.Mutex +} + +// Get returns a buffer ([]byte) of length n. If no such buffer is cached then +// a biggest cached buffer is resized to have length n and returned. If there +// are no cached items at all, Get returns a newly allocated buffer. +// +// In other words the cache policy is: +// +// - If the cache is empty, the buffer must be newly created and returned. +// Cache remains empty. +// +// - If a buffer of sufficient size is found in the cache, remove it from the +// cache and return it. +// +// - Otherwise the cache is non empty, but no cached buffer is big enough. +// Enlarge the biggest cached buffer, remove it from the cache and return it. +// This provide cached buffers size adjustment based on demand. +// +// In short, if the cache is not empty, Get guarantees to make it always one +// item less. This rules prevent uncontrolled cache grow in some scenarios. +// The older policy was not preventing that. Another advantage is better cached +// buffers sizes "auto tuning", although not in every possible use case. +// +// NOTE: The buffer returned by Get _is not guaranteed_ to be zeroed. That's +// okay for e.g. passing a buffer to io.Reader. If you need a zeroed buffer +// use Cget. +func (c *CCache) Get(n int) []byte { + c.mu.Lock() + r, _ := c.c.get(n) + c.mu.Unlock() + return r +} + +// Cget will acquire a buffer using Get and then clears it to zeros. The +// zeroing goes up to n, not cap(r). +func (c *CCache) Cget(n int) (r []byte) { + c.mu.Lock() + r = c.c.Cget(n) + c.mu.Unlock() + return +} + +// Put caches b for possible later reuse (via Get). No other references to b's +// backing array may exist. Otherwise a big mess is sooner or later inevitable. +func (c *CCache) Put(b []byte) { + c.mu.Lock() + c.c.Put(b) + c.mu.Unlock() +} + +// Stats reports memory consumed by a Cache, without accounting for some +// (smallish) additional overhead. 'n' is the number of cached buffers, bytes +// is their combined capacity. +func (c *CCache) Stats() (n, bytes int) { + c.mu.Lock() + n, bytes = c.c.Stats() + c.mu.Unlock() + return +} + +// GCache is a ready to use global instance of a CCache. +var GCache CCache + +func overCommit(n int) int { + switch { + case n < 8: + return 8 + case n < 1e5: + return 2 * n + case n < 1e6: + return 3 * n / 2 + default: + return n + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/bufs/bufs_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/bufs/bufs_test.go new file mode 100644 index 00000000..62400b00 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/bufs/bufs_test.go @@ -0,0 +1,174 @@ +// Copyright 2014 The bufs Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bufs + +import ( + "fmt" + "path" + "runtime" + "testing" +) + +var dbg = func(s string, va ...interface{}) { + _, fn, fl, _ := runtime.Caller(1) + fmt.Printf("%s:%d: ", path.Base(fn), fl) + fmt.Printf(s, va...) + fmt.Println() +} + +func Test0(t *testing.T) { + b := New(0) + defer func() { + recover() + }() + + b.Alloc(1) + t.Fatal("unexpected success") +} + +func Test1(t *testing.T) { + b := New(1) + expected := false + defer func() { + if e := recover(); e != nil && !expected { + t.Fatal(fmt.Errorf("%v", e)) + } + }() + + b.Alloc(1) + expected = true + b.Alloc(1) + t.Fatal("unexpected success") +} + +func Test2(t *testing.T) { + b := New(1) + expected := false + defer func() { + if e := recover(); e != nil && !expected { + t.Fatal(fmt.Errorf("%v", e)) + } + }() + + b.Alloc(1) + b.Free() + b.Alloc(1) + expected = true + b.Alloc(1) + t.Fatal("unexpected success") +} + +func Test3(t *testing.T) { + b := New(1) + expected := false + defer func() { + if e := recover(); e != nil && !expected { + t.Fatal(fmt.Errorf("%v", e)) + } + }() + + b.Alloc(1) + b.Free() + expected = true + b.Free() + t.Fatal("unexpected success") +} + +const ( + N = 1e5 + bufSize = 1 << 12 +) + +type Foo struct { + result []byte +} + +func NewFoo() *Foo { + return &Foo{} +} + +func (f *Foo) Bar(n int) { + buf := make([]byte, n) + sum := 0 + for _, v := range buf { + sum += int(v) + } + f.result = append(f.result, byte(sum)) + f.Qux(n) +} + +func (f *Foo) Qux(n int) { + buf := make([]byte, n) + sum := 0 + for _, v := range buf { + sum += int(v) + } + f.result = append(f.result, byte(sum)) +} + +type FooBufs struct { + buffers Buffers + result []byte +} + +const maxFooDepth = 2 + +func NewFooBufs() *FooBufs { + return &FooBufs{buffers: New(maxFooDepth)} +} + +func (f *FooBufs) Bar(n int) { + buf := f.buffers.Alloc(n) + defer f.buffers.Free() + + sum := 0 + for _, v := range buf { + sum += int(v) + } + f.result = append(f.result, byte(sum)) + f.Qux(n) +} + +func (f *FooBufs) Qux(n int) { + buf := f.buffers.Alloc(n) + defer f.buffers.Free() + + sum := 0 + for _, v := range buf { + sum += int(v) + } + f.result = append(f.result, byte(sum)) +} + +func TestFoo(t *testing.T) { + foo := NewFoo() + for i := 0; i < N; i++ { + foo.Bar(bufSize) + } +} + +func TestFooBufs(t *testing.T) { + foo := NewFooBufs() + for i := 0; i < N; i++ { + foo.Bar(bufSize) + } + t.Log("buffers.Stats()", foo.buffers.Stats()) +} + +func BenchmarkFoo(b *testing.B) { + b.SetBytes(2 * bufSize) + foo := NewFoo() + for i := 0; i < b.N; i++ { + foo.Bar(bufSize) + } +} + +func BenchmarkFooBufs(b *testing.B) { + b.SetBytes(2 * bufSize) + foo := NewFooBufs() + for i := 0; i < b.N; i++ { + foo.Bar(bufSize) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/README.md b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/README.md new file mode 100644 index 00000000..1607f2e8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/README.md @@ -0,0 +1,10 @@ +exp +=== + +This repository holds experimental packages. + +Warning: Packages here are experimental and unreliable. Some may one day be +promoted to their own repository or they may be modified arbitrarily or even +disappear altogether. + +In short, code in this repository is not subject to any compatibility promise. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/LICENSE new file mode 100644 index 00000000..65d761bc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/Makefile b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/Makefile new file mode 100644 index 00000000..0b06ff5e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/Makefile @@ -0,0 +1,23 @@ +.PHONY: all editor todo clean bench + +all: editor + go build + go vet + go install + make todo + +editor: + go fmt + go test -i + go test + +todo: + @grep -n ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alnum:]] *.go || true + @grep -n TODO *.go || true + @grep -n BUG *.go || true + +clean: + rm -f *~ cov cov.html test.db + +bench: + go test -run NONE -bench B diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/README.md b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/README.md new file mode 100644 index 00000000..36518837 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/README.md @@ -0,0 +1,9 @@ +dbm +=== + +Package dbm implements a simple database engine, a hybrid of a hierarchical +and/or a key-value one. + +Installation: $ go get github.com/cznic/exp/dbm + +Documentation: [godoc.org/github.com/cznic/exp/dbm](http://godoc.org/github.com/cznic/exp/dbm) diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/all_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/all_test.go new file mode 100644 index 00000000..3c4d0d68 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/all_test.go @@ -0,0 +1,2901 @@ +// Copyright 2014 The dbm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dbm + +//DONE 2012-04-24 15:56 go test -race -cpu 4 -bench . +//DONE 2012-04-24 16:05 go test -race -cpu 4 -bench . -xact +//DONE 2012-04-24 16:25 go test -race -cpu 4 -bench . -wall + +import ( + "bytes" + "encoding/hex" + "flag" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "sync/atomic" + "testing" + "time" + + "camlistore.org/third_party/github.com/cznic/exp/lldb" + "camlistore.org/third_party/github.com/cznic/fileutil" +) + +var ( + oNoZip = flag.Bool("nozip", false, "disable compression") + oACIDEnableWAL = flag.Bool("wal", false, "enable WAL") + oACIDEnableXACT = flag.Bool("xact", false, "enable structural transactions") + oACIDGrace = flag.Duration("grace", time.Second, "Grace period for -wal") + oBench = flag.Bool("tbench", false, "enable (long) TestBench* tests") +) + +// Bench knobs. +const ( + fileTestChunkSize = 32e3 + fileTotalSize = 10e6 +) + +func init() { + flag.Parse() + compress = !*oNoZip + if *oACIDEnableXACT { + o.ACID = ACIDTransactions + } + if *oACIDEnableWAL { + o.ACID = ACIDFull + o.GracePeriod = *oACIDGrace + } +} + +func dbg(s string, va ...interface{}) { + _, fn, fl, _ := runtime.Caller(1) + fmt.Printf("%s:%d: ", path.Base(fn), fl) + fmt.Printf(s, va...) + fmt.Println() +} + +func use(...interface{}) {} + +var o = &Options{} + +func temp() (dir, name string) { + dir, err := ioutil.TempDir("", "test-dbm-") + if err != nil { + panic(err) + } + + name = filepath.Join(dir, "test.db") + return +} + +func Test0(t *testing.T) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + t.Fatal(err) + } + + if err = db.Close(); err != nil { + t.Error(err) + return + } + + if db, err = Open(dbname, o); err != nil { + t.Error(err) + return + } + + if _, err = db.root(); err != nil { + t.Error(err) + return + } + + if err = db.Close(); err != nil { + t.Error(err) + return + } + + if db, err = Open(dbname, o); err != nil { + t.Error(err) + return + } + + if _, err = db.root(); err != nil { + t.Error(err) + return + } + + var tr *lldb.BTree + if tr, err = db.acache.getTree(db, arraysPrefix, "Test0", false, aCacheSize); err != nil { + t.Error(err) + return + } + + if tr != nil { + t.Error(tr) + return + } + + if err = db.filer.BeginUpdate(); err != nil { + t.Error(tr) + return + } + + if tr, err = db.acache.getTree(db, arraysPrefix, "Test0", true, aCacheSize); err != nil { + t.Error(err) + return + } + + if err = db.filer.EndUpdate(); err != nil { + t.Error(tr) + return + } + + if tr == nil { + t.Error(tr) + return + } + + if err = db.Close(); err != nil { + t.Error(err) + return + } + + if db, err = Open(dbname, o); err != nil { + t.Error(err) + return + } + + if err = db.filer.BeginUpdate(); err != nil { + t.Error(tr) + return + } + + if tr, err = db.acache.getTree(db, arraysPrefix, "Test0", true, aCacheSize); err != nil { + t.Error(err) + return + } + + if err = db.filer.EndUpdate(); err != nil { + t.Error(tr) + return + } + + if tr == nil { + t.Error(tr) + return + } + + if err = db.Close(); err != nil { + t.Error(err) + return + } +} + +func TestSet0(t *testing.T) { + N := 4000 + if *oACIDEnableWAL { + N = 4000 + } + + dir, dbname := temp() + defer os.RemoveAll(dir) + + rng := rand.New(rand.NewSource(42)) + ref := map[int]int{} + + db, err := Create(dbname, o) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < N; i++ { + k, v := rng.Int(), rng.Int() + ref[k] = v + if err := db.Set(v, "TestSet0", k); err != nil { + t.Fatal(err) + } + } + + if err = db.Close(); err != nil { + t.Error(err) + return + } + + if db, err = Open(dbname, o); err != nil { + t.Error(err) + return + } + + for k, v := range ref { + val, err := db.Get("TestSet0", k) + if err != nil { + t.Error(err) + return + } + + switch x := val.(type) { + case int64: + if g, e := x, int64(v); g != e { + t.Error(g, e) + return + } + default: + t.Errorf("%T != int64", x) + } + } + + if err = db.Close(); err != nil { + t.Error(err) + return + } +} + +func TestDocEx(t *testing.T) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + t.Fatal(err) + } + + var g, e interface{} + + dump := func(name string, clear bool) { + array, err := db.Array(name) + if err != nil { + t.Fatal(err) + } + + s, err := dump(array.tree) + if err != nil { + t.Fatal(err) + } + + t.Logf("\nDump of %q\n%s", name, s) + + if clear { + if err = array.Clear(); err != nil { + t.Fatal(err) + } + } + + } + + db.Set(3, "Stock", "slip dress", 4, "blue", "floral") + + g, _ = db.Get("Stock", "slip dress", 4, "blue", "floral") // → 3 + if e = int64(3); g != e { + t.Error(g, e) + return + } + + dump("Stock", true) + + stock, _ := db.Array("Stock") + stock.Set(3, "slip dress", 4, "blue", "floral") + + g, _ = db.Get("Stock", "slip dress", 4, "blue", "floral") // → 3 + if e = int64(3); g != e { + t.Error(g, e) + return + } + + g, _ = stock.Get("slip dress", 4, "blue", "floral") // → 3 + if e = int64(3); g != e { + t.Error(g, e) + return + } + + dump("Stock", true) + + blueDress, _ := db.Array("Stock", "slip dress", 4, "blue") + blueDress.Set(3, "floral") + + g, _ = db.Get("Stock", "slip dress", 4, "blue", "floral") // → 3 + if e = int64(3); g != e { + t.Error(g, e) + return + } + + g, _ = blueDress.Get("floral") // → 3 + if e = int64(3); g != e { + t.Error(g, e) + return + } + + dump("Stock", true) + + parts := []struct{ num, qty, price int }{ + {100001, 2, 300}, + {100004, 5, 600}, + } + invoiceNum := 314159 + customer := "Google" + when := time.Now().UnixNano() + + invoice, _ := db.Array("Invoice") + invoice.Set(when, invoiceNum, "Date") + invoice.Set(customer, invoiceNum, "Customer") + invoice.Set(len(parts), invoiceNum, "Items") // # of Items in the invoice + for i, part := range parts { + invoice.Set(part.num, invoiceNum, "Items", i, "Part") + invoice.Set(part.qty, invoiceNum, "Items", i, "Quantity") + invoice.Set(part.price, invoiceNum, "Items", i, "Price") + } + + g, _ = db.Get("Invoice", invoiceNum, "Customer") // → customer + if e = customer; g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + g, _ = db.Get("Invoice", invoiceNum, "Date") // → time.Then().UnixName + if e = when; g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + g, _ = invoice.Get(invoiceNum, "Customer") // → customer + if e = customer; g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + g, _ = invoice.Get(invoiceNum, "Date") // → time.Then().UnixName + if e = when; g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + g, _ = invoice.Get(invoiceNum, "Items") // → len(parts) + if e = int64(len(parts)); g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + for i, part := range parts { + g, _ = invoice.Get(invoiceNum, "Items", i, "Part") // → part[0].part + if e = int64(part.num); g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + g, _ = invoice.Get(invoiceNum, "Items", i, "Quantity") // → part[0].qty + if e = int64(part.qty); g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + g, _ = invoice.Get(invoiceNum, "Items", i, "Price") // → part[0].price + if e = int64(part.price); g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + } + + dump("Invoice", true) + + invoice, _ = db.Array("Invoice", invoiceNum) + invoice.Set(when, "Date") + invoice.Set(customer, "Customer") + items, _ := invoice.Array("Items") + items.Set(len(parts)) // # of Items in the invoice + for i, part := range parts { + items.Set(part.num, i, "Part") + items.Set(part.qty, i, "Quantity") + items.Set(part.price, i, "Price") + } + + g, _ = db.Get("Invoice", invoiceNum, "Customer") // → customer + if e = customer; g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + g, _ = db.Get("Invoice", invoiceNum, "Date") // → time.Then().UnixName + if e = when; g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + g, _ = invoice.Get("Customer") // → customer + if e = customer; g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + g, _ = invoice.Get("Date") // → time.Then().UnixName + if e = when; g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + g, _ = items.Get() // → len(parts) + if e = int64(len(parts)); g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + for i, part := range parts { + g, _ = items.Get(i, "Part") // → parts[i].part + if e = int64(part.num); g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + g, _ = items.Get(i, "Quantity") // → part[0].qty + if e = int64(part.qty); g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + g, _ = items.Get(i, "Price") // → part[0].price + if e = int64(part.price); g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + } + + dump("Invoice", true) + + invoice, _ = db.Array("Invoice", invoiceNum) + invoice.Set(when, "Date") + invoice.Set(customer, "Customer") + items, _ = invoice.Array("Items") + items.Set(len(parts)) // # of Items in the invoice + for i, part := range parts { + items.Set([]interface{}{part.num, part.qty, part.price}, i) + } + + dump("Invoice", false) + + g, _ = db.Get("Invoice", invoiceNum, "Customer") // → customer + if e = customer; g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + g, _ = db.Get("Invoice", invoiceNum, "Date") // → time.Then().UnixName + if e = when; g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + g, _ = invoice.Get("Customer") // → customer + if e = customer; g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + g, _ = invoice.Get("Date") // → time.Then().UnixName + if e = when; g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + g, _ = items.Get() // → len(parts) + if e = int64(len(parts)); g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + for i, part := range parts { + g, _ = items.Get(i) // → []interface{parts[i].num, parts[0].qty, parts[i].price} + gg, ok := g.([]interface{}) + if !ok || len(gg) != 3 { + t.Error(g) + return + } + + if g, e = gg[0], int64(part.num); g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + if g, e = gg[1], int64(part.qty); g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + + if g, e = gg[2], int64(part.price); g != e { + t.Errorf("|%#v| |%#v|", g, e) + return + } + } + + dump("Invoice", true) + + if err = db.Close(); err != nil { + t.Error(err) + return + } +} + +func dump(t *lldb.BTree) (r string, err error) { + var b bytes.Buffer + if err = t.Dump(&b); err != nil { + if err = noEof(err); err != nil { + return "", err + } + } + + return fmt.Sprintf("IsMem: %t\n%s", t.IsMem(), b.String()), nil +} + +func strings2D(s string) (r [][]interface{}) { + for _, v := range strings.Split(s, "|") { + r = append(r, strings1D(v)) + } + return +} + +func strings1D(s string) (r []interface{}) { + for _, v := range strings.Split(s, ",") { + if v != "" { + r = append(r, v) + } + } + return +} + +func TestSlice0(t *testing.T) { + table := []struct{ prefix, keys, from, to, exp string }{ + // Slice.from == nil && Slice.to == nil + {"", "", "", "", ""}, // 0 + {"", "a", "", "", "a"}, // 1 + {"", "a|b", "", "", "a|b"}, // 2 + {"", "d|c", "", "", "c|d"}, // 3 + {"", "a|a,b|a,c|b", "", "", "a|a,b|a,c|b"}, // 4 + + // Slice.from == nil && Slice.to != nil + {"", "", "", "a", ""}, // 5 + {"", "m", "", "a", ""}, // 6 + {"", "m", "", "m", "m"}, // 7 + {"", "m", "", "z", "m"}, // 8 + {"", "k|p", "", "a", ""}, // 9 + {"", "k|p", "", "j", ""}, // 10 + {"", "k|p", "", "k", "k"}, // 11 + {"", "k|p", "", "l", "k"}, // 12 + {"", "k|p", "", "o", "k"}, // 13 + {"", "k|p", "", "p", "k|p"}, // 14 + {"", "k|p", "", "q", "k|p"}, // 15 + {"", "k|m|o", "", "j", ""}, // 16 + {"", "k|m|o", "", "k", "k"}, // 17 + {"", "k|m|o", "", "l", "k"}, // 18 + {"", "k|m|o", "", "m", "k|m"}, // 19 + {"", "k|m|o", "", "n", "k|m"}, // 20 + {"", "k|m|o", "", "o", "k|m|o"}, // 21 + {"", "k|m|o", "", "p", "k|m|o"}, // 22 + {"", "k|k,m|k,o|p", "", "j", ""}, // 23 + {"", "k|k,m|k,o|p", "", "k", "k"}, // 24 + {"", "k|k,m|k,o|p", "", "k,l", "k"}, // 25 + {"", "k|k,m|k,o|p", "", "k,m", "k|k,m"}, // 26 + {"", "k|k,m|k,o|p", "", "k,n", "k|k,m"}, // 27 + {"", "k|k,m|k,o|p", "", "k,o", "k|k,m|k,o"}, // 28 + {"", "k|k,m|k,o|p", "", "k,z", "k|k,m|k,o"}, // 29 + {"", "k|k,m|k,o|p", "", "o", "k|k,m|k,o"}, // 30 + {"", "k|k,m|k,o|p", "", "p", "k|k,m|k,o|p"}, // 31 + {"", "k|k,m|k,o|p", "", "q", "k|k,m|k,o|p"}, // 32 + + // Slice.from != nil && Slice.to == nil + {"", "", "m", "", ""}, // 33 + {"", "a", "0", "", "a"}, // 34 + {"", "a", "a", "", "a"}, // 35 + {"", "a", "b", "", ""}, // 36 + {"", "a|c", "0", "", "a|c"}, // 37 + {"", "a|c", "a", "", "a|c"}, // 38 + {"", "a|c", "b", "", "c"}, // 39 + {"", "a|c", "c", "", "c"}, // 40 + {"", "a|c", "d", "", ""}, // 41 + {"", "k|k,m|k,o|p", "j", "", "k|k,m|k,o|p"}, // 42 + {"", "k|k,m|k,o|p", "k", "", "k|k,m|k,o|p"}, // 43 + {"", "k|k,m|k,o|p", "k,l", "", "k,m|k,o|p"}, // 44 + {"", "k|k,m|k,o|p", "k,m", "", "k,m|k,o|p"}, // 45 + {"", "k|k,m|k,o|p", "k,n", "", "k,o|p"}, // 46 + {"", "k|k,m|k,o|p", "k,z", "", "p"}, // 47 + {"", "k|k,m|k,o|p", "o", "", "p"}, // 48 + {"", "k|k,m|k,o|p", "p", "", "p"}, // 49 + {"", "k|k,m|k,o|p", "q", "", ""}, // 50 + + // Slice.from != nil && Slice.to != nil + {"", "", "m", "p", ""}, + + {"", "b|d|e", "a", "a", ""}, + {"", "b|d|e", "a", "b", "b"}, + {"", "b|d|e", "a", "c", "b"}, + {"", "b|d|e", "a", "d", "b|d"}, + {"", "b|d|e", "a", "e", "b|d|e"}, + {"", "b|d|e", "a", "f", "b|d|e"}, + + {"", "b|d|e", "b", "a", ""}, + {"", "b|d|e", "b", "b", "b"}, + {"", "b|d|e", "b", "c", "b"}, + {"", "b|d|e", "b", "d", "b|d"}, + {"", "b|d|e", "b", "e", "b|d|e"}, + {"", "b|d|e", "b", "f", "b|d|e"}, + + {"", "b|d|e", "c", "a", ""}, + {"", "b|d|e", "c", "b", ""}, + {"", "b|d|e", "c", "c", ""}, + {"", "b|d|e", "c", "d", "d"}, + {"", "b|d|e", "c", "e", "d|e"}, + {"", "b|d|e", "c", "f", "d|e"}, + + {"", "b|d|e", "d", "a", ""}, + {"", "b|d|e", "d", "b", ""}, + {"", "b|d|e", "d", "c", ""}, + {"", "b|d|e", "d", "d", "d"}, + {"", "b|d|e", "d", "e", "d|e"}, + {"", "b|d|e", "d", "f", "d|e"}, + + {"", "b|d|e", "d", "a", ""}, + {"", "b|d|e", "d", "b", ""}, + {"", "b|d|e", "d", "c", ""}, + {"", "b|d|e", "d", "d", "d"}, + {"", "b|d|e", "d", "e", "d|e"}, + {"", "b|d|e", "d", "f", "d|e"}, + + {"", "b|d|e", "e", "a", ""}, + {"", "b|d|e", "e", "b", ""}, + {"", "b|d|e", "e", "c", ""}, + {"", "b|d|e", "e", "d", ""}, + {"", "b|d|e", "e", "e", "e"}, + {"", "b|d|e", "e", "f", "e"}, + + {"", "b|d|e", "f", "a", ""}, + {"", "b|d|e", "f", "b", ""}, + {"", "b|d|e", "f", "c", ""}, + {"", "b|d|e", "f", "d", ""}, + {"", "b|d|e", "f", "e", ""}, + {"", "b|d|e", "f", "f", ""}, + + // more levels + {"", "b|d,f|h,j|l", "a", "a", ""}, + {"", "b|d,f|h,j|l", "a", "z", "b|d,f|h,j|l"}, + {"", "b|d,f|h,j|l", "c", "k", "d,f|h,j"}, + + // w/ prefix + {"B", "", "M", "P", ""}, + {"B", "", "A", "Z", ""}, + + {"B", "D|E", "", "", "D|E"}, + {"B", "D|E", "", "A", ""}, + {"B", "D|E", "", "B", ""}, + {"B", "D|E", "", "C", ""}, + {"B", "D|E", "", "D", "D"}, + {"B", "D|E", "", "E", "D|E"}, + {"B", "D|E", "", "F", "D|E"}, + + {"B", "D|E", "A", "", "D|E"}, + {"B", "D|E", "A", "A", ""}, + {"B", "D|E", "A", "B", ""}, + {"B", "D|E", "A", "C", ""}, + {"B", "D|E", "A", "D", "D"}, + {"B", "D|E", "A", "E", "D|E"}, + {"B", "D|E", "A", "F", "D|E"}, + + {"B", "D|E", "B", "", "D|E"}, + {"B", "D|E", "B", "A", ""}, + {"B", "D|E", "B", "B", ""}, + {"B", "D|E", "B", "C", ""}, + {"B", "D|E", "B", "D", "D"}, + {"B", "D|E", "B", "E", "D|E"}, + {"B", "D|E", "B", "F", "D|E"}, + + {"B", "D|E", "C", "", "D|E"}, + {"B", "D|E", "C", "A", ""}, + {"B", "D|E", "C", "B", ""}, + {"B", "D|E", "C", "C", ""}, + {"B", "D|E", "C", "D", "D"}, + {"B", "D|E", "C", "E", "D|E"}, + {"B", "D|E", "C", "F", "D|E"}, + + {"B", "D|E", "D", "", "D|E"}, + {"B", "D|E", "D", "A", ""}, + {"B", "D|E", "D", "B", ""}, + {"B", "D|E", "D", "C", ""}, + {"B", "D|E", "D", "D", "D"}, + {"B", "D|E", "D", "E", "D|E"}, + {"B", "D|E", "D", "F", "D|E"}, + + {"B", "D|E", "E", "", "E"}, + {"B", "D|E", "E", "A", ""}, + {"B", "D|E", "E", "B", ""}, + {"B", "D|E", "E", "C", ""}, + {"B", "D|E", "E", "D", ""}, + {"B", "D|E", "E", "E", "E"}, + {"B", "D|E", "E", "F", "E"}, + + {"B", "D|E", "F", "", ""}, + {"B", "D|E", "F", "A", ""}, + {"B", "D|E", "F", "B", ""}, + {"B", "D|E", "F", "C", ""}, + {"B", "D|E", "F", "D", ""}, + {"B", "D|E", "F", "E", ""}, + {"B", "D|E", "F", "F", ""}, + } + + for i, test := range table { + prefix := strings1D(test.prefix) + keys := strings2D(test.keys) + from := strings1D(test.from) + to := strings1D(test.to) + exp := test.exp + + a0, _ := MemArray() + + a, err := a0.Array(prefix...) + if err != nil { + t.Fatal(err) + } + + if test.prefix != "" { + a0.Set(-1, "@") + a0.Set(-1, "Z") + } + for i, v := range keys { + if err = a.Set(i, v...); err != nil { + t.Error(err) + return + } + } + d, err := dump(a.tree) + if err != nil { + t.Fatal(err) + } + + t.Logf("%d: %q, %q, dump:\n%s", i, test.prefix, test.keys, d) + + s, err := a.Slice(from, to) + if err != nil { + t.Fatal(err) + } + + var ga []string + + if err := s.Do(func(k, v []interface{}) (more bool, err error) { + a := []string{} + for _, v := range k { + a = append(a, v.(string)) + } + ga = append(ga, strings.Join(a, ",")) + return true, nil + }); err != nil { + if !fileutil.IsEOF(err) { + t.Fatal(i, err) + } + } + + g := strings.Join(ga, "|") + t.Logf("%q", g) + if g != exp { + t.Fatalf("%d\n%s\n%s", i, g, exp) + } + } +} + +func TestSlice1(t *testing.T) { + f := func(s, val []interface{}) (k, v string) { + if len(s) != 1 || len(val) != 1 { + t.Fatal(s, val) + } + + k, ok := s[0].(string) + if !ok { + t.Fatal(s) + } + + v, ok = val[0].(string) + if !ok { + t.Fatal(val) + } + + return + } + + a0, err := MemArray() + if err != nil { + t.Fatal(err) + } + + a, err := a0.Array("b") + if err != nil { + t.Fatal(err) + } + + a.Set("1", "d") + a.Set("2", "f") + + d, err := dump(a0.tree) + if err != nil { + t.Fatal(err) + } + + t.Logf("\n%s", d) + + s, err := a.Slice(nil, nil) + if err != nil { + t.Fatal(err) + } + + state := 0 + err = s.Do(func(s, val []interface{}) (bool, error) { + k, v := f(s, val) + switch state { + case 0: + if k != "d" || v != "1" { + t.Error(s, val) + return false, nil + } + + a.Set("3", k) + state++ + case 1: + if k != "f" || v != "2" { + t.Error(s, val) + return false, nil + } + + a.Set("4", k) + state++ + default: + t.Error(state) + return false, nil + } + return true, nil + }) + + if err != nil { + t.Fatal(err) + } + + if g, e := state, 2; g != e { + t.Fatal(state) + } + + d, err = dump(a0.tree) + if err != nil { + t.Fatal(err) + } + + t.Logf("\n%s", d) + + v3, err := a0.Get("b", "d") + if err != nil { + t.Fatal(err) + } + + if g, e := v3, interface{}("3"); g != e { + t.Fatal(g, e) + } + + v4, err := a0.Get("b", "f") + if err != nil { + t.Fatal(err) + } + + if g, e := v4, interface{}("4"); g != e { + t.Fatal(g, e) + } +} + +func TestClear(t *testing.T) { + table := []struct{ prefix, keys, subscripts, exp string }{ + {"", "", "", ""}, + + {"", "b", "", ""}, + {"", "b", "a", "b"}, + {"", "b", "b", ""}, + {"", "b", "c", "b"}, + + {"", "b|d|f", "", ""}, + {"", "b|d|f", "a", "b|d|f"}, + {"", "b|d|f", "b", "d|f"}, + {"", "b|d|f", "c", "b|d|f"}, + {"", "b|d|f", "d", "b|f"}, + {"", "b|d|f", "e", "b|d|f"}, + {"", "b|d|f", "f", "b|d"}, + {"", "b|d|f", "g", "b|d|f"}, + + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "", ""}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "a", "b,d|b,f|d,f|d,h|f,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "b", "d,f|d,h|f,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "b,c", "b,d|b,f|d,f|d,h|f,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "b,d", "b,f|d,f|d,h|f,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "b,e", "b,d|b,f|d,f|d,h|f,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "b,f", "b,d|d,f|d,h|f,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "c", "b,d|b,f|d,f|d,h|f,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "d", "b,d|b,f|f,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "d,e", "b,d|b,f|d,f|d,h|f,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "d,f", "b,d|b,f|d,h|f,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "d,g", "b,d|b,f|d,f|d,h|f,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "d,h", "b,d|b,f|d,f|f,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "d,i", "b,d|b,f|d,f|d,h|f,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "e", "b,d|b,f|d,f|d,h|f,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "f", "b,d|b,f|d,f|d,h"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "f,g", "b,d|b,f|d,f|d,h|f,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "f,h", "b,d|b,f|d,f|d,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "f,i", "b,d|b,f|d,f|d,h|f,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "f,j", "b,d|b,f|d,f|d,h|f,h"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "f,k", "b,d|b,f|d,f|d,h|f,h|f,j"}, + {"", "b,d|b,f|d,f|d,h|f,h|f,j", "g", "b,d|b,f|d,f|d,h|f,h|f,j"}, + + {"b", "", "", ""}, + {"b", "d", "c", "b,d"}, + {"b", "d", "d", ""}, + {"b", "d", "e", "b,d"}, + + {"b", "d|f", "", ""}, + {"b", "d|f", "c", "b,d|b,f"}, + {"b", "d|f", "d", "b,f"}, + {"b", "d|f", "e", "b,d|b,f"}, + {"b", "d|f", "f", "b,d"}, + {"b", "d|f", "g", "b,d|b,f"}, + } + + for i, test := range table { + prefix := strings1D(test.prefix) + keys := strings2D(test.keys) + subscripts := strings1D(test.subscripts) + exp := test.exp + + a0, err := MemArray() + if err != nil { + t.Fatal(err) + } + + a, err := a0.Array(prefix...) + if err != nil { + t.Fatal(err) + } + + for i, v := range keys { + a.Set(i, v...) + } + d, err := dump(a.tree) + if err != nil { + t.Fatal(err) + } + + t.Logf("before Clear(%v)\n%s", subscripts, d) + + err = a.Clear(subscripts...) + if err != nil { + t.Fatal(err) + } + + d, err = dump(a.tree) + if err != nil { + t.Fatal(err) + } + + t.Logf(" after Clear(%v)\n%s", subscripts, d) + + s, err := a0.Slice(nil, nil) + if err != nil { + t.Fatal(err) + } + + var ga []string + + if err := s.Do(func(k, v []interface{}) (more bool, err error) { + a := []string{} + for _, v := range k { + a = append(a, v.(string)) + } + ga = append(ga, strings.Join(a, ",")) + return true, nil + }); err != nil { + t.Fatal(err) + } + + g := strings.Join(ga, "|") + t.Log(g) + if g != exp { + t.Fatalf("i %d\ng: %s\ne: %s", i, g, exp) + } + } +} + +func BenchmarkClear(b *testing.B) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, &Options{}) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + a, err := db.Array("test") + if err != nil { + b.Error(err) + return + } + + ref := map[int]struct{}{} + for i := 0; i < b.N; i++ { + ref[i] = struct{}{} + } + for i := range ref { + a.Set(i, i) + } + if err := db.Close(); err != nil { + b.Fatal(err) + return + } + + db2, err := Open(dbname, o) + if err != nil { + b.Error(err) + return + } + + defer db2.Close() + + a, err = db2.Array("test") + if err != nil { + b.Error(err) + return + } + + runtime.GC() + b.ResetTimer() + a.Clear() + b.StopTimer() +} + +func BenchmarkDelete(b *testing.B) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, &Options{}) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + a, err := db.Array("test") + if err != nil { + b.Error(err) + return + } + + ref := map[int]struct{}{} + for i := 0; i < b.N; i++ { + ref[i] = struct{}{} + } + for i := range ref { + a.Set(i, i) + } + ref = map[int]struct{}{} + for i := 0; i < b.N; i++ { + ref[i] = struct{}{} + } + if err := db.Close(); err != nil { + b.Error(err) + return + } + + db2, err := Open(dbname, o) + if err != nil { + b.Error(err) + return + } + + defer db2.Close() + + a, err = db2.Array("test") + if err != nil { + b.Error(err) + return + } + + runtime.GC() + b.ResetTimer() + for i := range ref { + a.Delete(i) + } + b.StopTimer() +} + +func BenchmarkGet(b *testing.B) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, &Options{}) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + a, err := db.Array("test") + if err != nil { + b.Error(err) + return + } + + ref := map[int]struct{}{} + for i := 0; i < b.N; i++ { + ref[i] = struct{}{} + } + ref = map[int]struct{}{} + for i := 0; i < b.N; i++ { + ref[i] = struct{}{} + } + if err := db.Close(); err != nil { + b.Error(err) + return + } + + db2, err := Open(dbname, o) + if err != nil { + b.Error(err) + return + } + + defer db2.Close() + + a, err = db2.Array("test") + if err != nil { + b.Error(err) + return + } + + runtime.GC() + b.ResetTimer() + for i := range ref { + a.Get(i) + } + b.StopTimer() +} + +func BenchmarkSet(b *testing.B) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + a, err := db.Array("test") + if err != nil { + b.Error(err) + return + } + + ref := map[int]struct{}{} + for i := 0; i < b.N; i++ { + ref[i] = struct{}{} + } + runtime.GC() + b.ResetTimer() + for i := range ref { + a.Set(i, i) + } + b.StopTimer() +} + +func BenchmarkDo(b *testing.B) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, &Options{}) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + a, err := db.Array("test") + if err != nil { + b.Error(err) + return + } + + ref := map[int]struct{}{} + for i := 0; i < b.N; i++ { + ref[i] = struct{}{} + } + for i := range ref { + a.Set(i, i) + } + if err := db.Close(); err != nil { + b.Error(err) + return + } + + db2, err := Open(dbname, o) + if err != nil { + b.Error(err) + return + } + + a, err = db2.Array("test") + if err != nil { + b.Error(err) + return + } + + s, err := a.Slice(nil, nil) + if err != nil { + b.Error(err) + return + } + + runtime.GC() + b.ResetTimer() + s.Do(func(subscripts, value []interface{}) (bool, error) { + return true, nil + }) + b.StopTimer() +} + +func TestRemoveArray0(t *testing.T) { + const aname = "TestRemoveArray0" + + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + t.Fatal(err) + } + + err = db.Set(42, aname, 1, 2) + if err != nil { + t.Error(err) + return + } + + _, err = db.Get(aname, 1, 2) + if err != nil { + t.Error(err) + return + } + + err = db.RemoveArray(aname) + if err != nil { + t.Error(err) + return + } + + if err = db.enter(); err != nil { + t.Error(err) + return + } + + tr, err := db.acache.getTree(db, arraysPrefix, aname, false, aCacheSize) + if err != nil { + db.leave(&err) + t.Error(err) + return + } + + if err = db.leave(&err); err != nil { + t.Error(err) + return + } + + if tr != nil { + t.Error(tr) + return + } + + if err = db.Close(); err != nil { + t.Error(err) + return + } + + if db, err = Open(dbname, o); err != nil { + t.Error(err) + return + } + + for { + <-time.After(time.Second) + if atomic.LoadInt32(&activeVictors) == 0 { + break + } + } + + if err := db.BeginUpdate(); err != nil { + t.Error(err) + return + } + + err = db.alloc.Verify( + lldb.NewMemFiler(), + func(err error) bool { + t.Error(err) + return true + }, + nil, + ) + + if err != nil { + t.Error(err) + return + } + + if err := db.EndUpdate(); err != nil { + t.Error(err) + return + } + + if err = db.Close(); err != nil { + t.Error(err) + return + } +} + +func (db *DB) dumpAll(w io.Writer, msg string) { + fmt.Fprintln(w, msg) + root, err := db.root() + if err != nil { + fmt.Fprintln(w, "\nerror: ", err) + return + } + + fmt.Fprintln(w, "====\nroot\n====") + if err = root.tree.Dump(w); err != nil { + fmt.Fprintln(w, "\nerror: ", err) + return + } + + s, err := root.Slice(nil, nil) + if err != nil { + fmt.Fprintln(w, "\nerror: ", err) + return + } + + if err = s.Do(func(subscripts, value []interface{}) (bool, error) { + v, err := root.get(subscripts...) + if err != nil { + fmt.Fprintln(w, "\nerror: ", err) + return false, nil + } + + h := v.(int64) + t, err := lldb.OpenBTree(db.alloc, collate, h) + if err != nil { + fmt.Fprintln(w, "\nerror: ", err) + return false, err + } + + fmt.Fprintf(w, "----\n%#v @ %d\n----\n", subscripts[1], h) + if err = t.Dump(w); err != nil { + fmt.Fprintln(w, "\nerror: ", err) + return false, err + } + + return true, nil + }); err != nil { + fmt.Fprintln(w, "\nerror: ", err) + return + } +} + +func TestRemoveFile0(t *testing.T) { + const fname = "TestRemoveFile0" + + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + t.Fatal(err) + } + + f, err := db.File(fname) + if err != nil { + t.Error(err) + return + } + + n, err := f.WriteAt([]byte{42}, 314) + if n != 1 || err != nil { + t.Error(err) + return + } + + files, err := db.Files() + if err != nil { + t.Error(err) + return + } + + v, err := files.Get(fname) + if v == nil || err != nil { + t.Error(err, v) + return + } + + err = db.RemoveFile(fname) + if err != nil { + t.Error(err) + return + } + + v, err = files.Get(fname) + if v != nil || err != nil { + t.Errorf("%#v %#v", err, v) + return + } + + if err = db.Close(); err != nil { + t.Error(err) + return + } +} + +func TestRemove1(t *testing.T) { + const ( + aname = "TestRemove1" + N = 100 + ) + + compress = false // Test may correctly fail w/ compression. + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + t.Fatal(err) + } + + sz0, err := db.Size() + if err != nil { + t.Error(err) + return + } + + for i := 0; i < N; i++ { + if err = db.Set(fmt.Sprintf("V%06d", i), aname, fmt.Sprintf("K%06d", i)); err != nil { + t.Error(err) + return + } + } + sz1, err := db.Size() + if err != nil { + t.Error(err) + return + } + + err = db.RemoveArray(aname) + if err != nil { + t.Error(err) + return + } + + err = db.Close() + if err != nil { + t.Error(err) + return + } + + fi, err := os.Stat(dbname) + if err != nil { + t.Error(err) + return + } + + sz2 := fi.Size() + + if db, err = Open(dbname, o); err != nil { + t.Error(err) + return + } + + for i := 0; i < N/2+1; i++ { + runtime.Gosched() + } + sz3, err := db.Size() + if err != nil { + t.Error(err) + return + } + + if err = db.Close(); err != nil { + t.Error(err) + return + } + + if db, err = Open(dbname, o); err != nil { + t.Error(err) + return + } + + for i := 0; i < 2*N; i++ { + runtime.Gosched() + } + sz4, err := db.Size() + if err != nil { + t.Error(err) + return + } + + if err = db.Close(); err != nil { + t.Error(err) + return + } + + t.Log(sz0) + t.Log(sz1) + t.Log(sz2) + t.Log(sz3) + t.Log(sz4) + + // Unstable + // if !(sz4 < sz3) { + // t.Error(sz3, sz4) + // } +} + +func enumStrKeys(a Array) (k []string, err error) { + s, err := a.Slice(nil, nil) + if err != nil { + return + } + + return k, s.Do(func(subscripts, value []interface{}) (bool, error) { + if len(subscripts) != 1 { + return false, (fmt.Errorf("internal error: %#v", subscripts)) + } + + v, ok := subscripts[0].(string) + if !ok { + return false, (fmt.Errorf("internal error: %T %#v", subscripts, subscripts)) + } + + k = append(k, v) + return true, nil + }) +} + +func TestArrays(t *testing.T) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + t.Fatal(err) + } + + defer db.Close() + + a, err := db.Arrays() + if err != nil { + t.Error(err) + return + } + + names, err := enumStrKeys(a) + if err != nil { + t.Error(err) + return + } + + if g, e := len(names), 0; g != e { + t.Error(g, e) + return + } + + if err = db.Set(nil, "foo"); err != nil { + t.Error(err) + return + } + + names, err = enumStrKeys(a) + if err != nil { + t.Error(err) + return + } + + if g, e := len(names), 1; g != e { + t.Error(g, e) + return + } + + if g, e := names[0], "foo"; g != e { + t.Error(g, e) + return + } + + if err = db.Close(); err != nil { + t.Error(err) + return + } + +} + +func TestFiles(t *testing.T) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + t.Fatal(err) + } + + defer db.Close() + + a, err := db.Files() + if err != nil { + t.Error(err) + return + } + + names, err := enumStrKeys(a) + if err != nil { + t.Error(err) + return + } + + if g, e := len(names), 0; g != e { + t.Error(g, e) + return + } + + f, err := db.File("foo") + if err != nil { + t.Error(err) + return + } + + if n, err := f.WriteAt([]byte{42}, 0); n != 1 { + t.Error(err) + return + } + + names, err = enumStrKeys(a) + if err != nil { + t.Error(err) + return + } + + if g, e := len(names), 1; g != e { + t.Error(g, e) + return + } + + if g, e := names[0], "foo"; g != e { + t.Error(g, e) + return + } + + if err = db.Close(); err != nil { + t.Error(err) + return + } + +} + +func TestInc0(t *testing.T) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + t.Fatal(err) + } + + defer db.Close() + + db.Set(10, "TestInc", "ten") + db.Set(nil, "TestInc", "nil") + db.Set("string", "TestInc", "string") + + a, err := db.Array("TestInc") + if err != nil { + t.Fatal(err) + } + + d, err := dump(a.tree) + if err != nil { + t.Fatal(err) + } + + t.Logf("\n%s", d) + + n, err := db.Inc(1, "TestInc", "nonexisting") + if err != nil || n != 1 { + t.Error(n, err) + return + } + + n, err = db.Inc(2, "TestInc", "ten") + if err != nil || n != 12 { + t.Error(n, err) + return + } + + n, err = db.Inc(3, "TestInc", "nil") + if err != nil || n != 3 { + t.Error(n, err) + return + } + + n, err = db.Inc(4, "TestInc", "string") + if err != nil || n != 4 { + t.Error(n, err) + return + } + + d, err = dump(a.tree) + if err != nil { + t.Fatal(err) + } + + t.Logf("\n%s", d) +} + +func TestInc1(t *testing.T) { + const ( + M = 3 + ) + N := 1000 + if *oACIDEnableWAL { + N = 20 + } + + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + t.Fatal(err) + } + + defer db.Close() + + runtime.GOMAXPROCS(M) + c := make(chan int64, M) + for i := 0; i < M; i++ { + go func() { + sum := int64(0) + for i := 0; i < N; i++ { + n, err := db.Inc(1, "TestInc1", "Invoice", 314159, "Items") + if err != nil { + t.Error(err) + break + } + + sum += n + } + c <- sum + }() + } + total := int64(0) + for i := 0; i < M; i++ { + select { + case <-time.After(time.Second * 20): + t.Error("timeouted") + return + case v := <-c: + total += v + } + } + + nn := int64(M * N) + if g, e := total, int64((nn*nn+nn)/2); g != e { + t.Error(g, e) + return + } +} + +func BenchmarkInc(b *testing.B) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + a, err := db.Array("test") + if err != nil { + b.Error(err) + return + } + + runtime.GC() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Inc(279, 314) + } + b.StopTimer() +} + +func TestFile0(t *testing.T) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + t.Fatal(err) + } + + defer db.Close() + + a, err := db.Files() + if err != nil { + t.Error(err) + return + } + + names, err := enumStrKeys(a) + if err != nil { + t.Error(err) + return + } + + if g, e := len(names), 0; g != e { + t.Error(g, e) + return + } + + f, err := db.File("foo") + if err != nil { + t.Error(err) + return + } + + if _, err = f.WriteAt([]byte("ABCDEF"), 4096); err != nil { + t.Error(err) + return + } + + names, err = enumStrKeys(a) + if err != nil { + t.Error(err) + return + } + + if g, e := len(names), 1; g != e { + t.Error(g, e) + return + } + + if g, e := names[0], "foo"; g != e { + t.Error(g, e) + return + } + + if err = db.Close(); err != nil { + t.Error(err) + return + } +} + +func TestFileTruncate0(t *testing.T) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + t.Fatal(err) + } + + defer db.Close() + + f, err := db.File("TestFileTruncate") + if err != nil { + t.Error(err) + return + } + + fsz := func() int64 { + n, err := f.Size() + if err != nil { + t.Fatal(err) + } + return n + } + + // Check Truncate works. + sz := int64(1e6) + if err := f.Truncate(sz); err != nil { + t.Error(err) + return + } + + if g, e := fsz(), sz; g != e { + t.Error(g, e) + return + } + + sz *= 2 + if err := f.Truncate(sz); err != nil { + t.Error(err) + return + } + + if g, e := fsz(), sz; g != e { + t.Error(g, e) + return + } + + sz = 0 + if err := f.Truncate(sz); err != nil { + t.Error(err) + return + } + + if g, e := fsz(), sz; g != e { + t.Error(g, e) + return + } + + // Check Truncate(-1) doesn't work. + sz = -1 + if err := f.Truncate(sz); err == nil { + t.Error(err) + return + } + + d, err := dump(f.tree) + if err != nil { + t.Fatal(err) + } + + t.Logf("\n%s", d) +} + +func TestFileReadAtWriteAt(t *testing.T) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + t.Fatal(err) + } + + defer db.Close() + + f, err := db.File("TestFileReadAtWriteAt") + if err != nil { + t.Error(err) + return + } + + fsz := func() int64 { + n, err := f.Size() + if err != nil { + t.Fatal(err) + } + return n + } + + const ( + N = 1 << 16 + M = 200 + ) + + s := make([]byte, N) + e := make([]byte, N) + rnd := rand.New(rand.NewSource(42)) + for i := range e { + s[i] = byte(rnd.Intn(256)) + } + n2 := 0 + for i := 0; i < M; i++ { + var from, to int + for { + from = rnd.Intn(N) + to = rnd.Intn(N) + if from != to { + break + } + } + if from > to { + from, to = to, from + } + for i := range s[from:to] { + s[from+i] = byte(rnd.Intn(256)) + } + copy(e[from:to], s[from:to]) + if to > n2 { + n2 = to + } + n, err := f.WriteAt(s[from:to], int64(from)) + if err != nil { + t.Error(err) + return + } + + if g, e := n, to-from; g != e { + t.Error(g, e) + return + } + } + + if g, e := fsz(), int64(n2); g != e { + t.Error(g, e) + return + } + + b := make([]byte, n2) + for i := 0; i <= M; i++ { + from := rnd.Intn(n2) + to := rnd.Intn(n2) + if from > to { + from, to = to, from + } + if i == M { + from, to = 0, n2 + } + n, err := f.ReadAt(b[from:to], int64(from)) + if err != nil && (!fileutil.IsEOF(err) && n != 0) { + t.Error(fsz(), from, to, err) + return + } + + if g, e := n, to-from; g != e { + t.Error(g, e) + return + } + + if g, e := b[from:to], e[from:to]; !bytes.Equal(g, e) { + t.Errorf( + "i %d from %d to %d len(g) %d len(e) %d\n---- got ----\n%s\n---- exp ----\n%s", + i, from, to, len(g), len(e), hex.Dump(g), hex.Dump(e), + ) + return + } + } + + mf := f + buf := &bytes.Buffer{} + if _, err := mf.WriteTo(buf); err != nil { + t.Error(err) + return + } + + if g, e := buf.Bytes(), e[:n2]; !bytes.Equal(g, e) { + t.Errorf("\nlen %d\n%s\nlen %d\n%s", len(g), hex.Dump(g), len(e), hex.Dump(e)) + return + } + + if err := mf.Truncate(0); err != nil { + t.Error(err) + return + } + + if _, err := mf.ReadFrom(buf); err != nil { + t.Error(err) + return + } + + roundTrip := make([]byte, n2) + if n, err := mf.ReadAt(roundTrip, 0); err != nil && n == 0 { + t.Error(err) + return + } + + if g, e := roundTrip, e[:n2]; !bytes.Equal(g, e) { + t.Errorf("\nlen %d\n%s\nlen %d\n%s", len(g), hex.Dump(g), len(e), hex.Dump(e)) + return + } +} + +func TestFileReadAtHole(t *testing.T) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + t.Fatal(err) + } + + defer db.Close() + + f, err := db.File("TestFileReadAtHole") + if err != nil { + t.Error(err) + return + } + + n, err := f.WriteAt([]byte{1}, 40000) + if err != nil { + t.Error(err) + return + } + + if n != 1 { + t.Error(n) + return + } + + n, err = f.ReadAt(make([]byte, 1000), 20000) + if err != nil { + t.Error(err) + return + } + + if n != 1000 { + t.Error(n) + return + } +} + +func BenchmarkFileWrSeq(b *testing.B) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + buf := make([]byte, fileTestChunkSize) + for i := range buf { + buf[i] = byte(rand.Int()) + } + b.SetBytes(fileTestChunkSize) + f, err := db.File("BenchmarkMemFilerWrSeq") + if err != nil { + b.Error(err) + return + } + + runtime.GC() + b.ResetTimer() + var ofs int64 + for i := 0; i < b.N; i++ { + _, err := f.WriteAt(buf, ofs) + if err != nil { + b.Fatal(err) + } + + ofs = (ofs + fileTestChunkSize) % fileTotalSize + } + b.StopTimer() +} + +func BenchmarkFileRdSeq(b *testing.B) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, &Options{}) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + buf := make([]byte, fileTestChunkSize) + for i := range buf { + buf[i] = byte(rand.Int()) + } + b.SetBytes(fileTestChunkSize) + f, err := db.File("BenchmarkFileRdSeq") + if err != nil { + b.Error(err) + return + } + + var ofs int64 + for i := 0; i < b.N; i++ { + _, err := f.WriteAt(buf, ofs) + if err != nil { + b.Fatal(err) + } + + ofs = (ofs + fileTestChunkSize) % fileTotalSize + } + if err := db.Close(); err != nil { + b.Fatal(err) + return + } + + db2, err := Open(dbname, o) + if err != nil { + b.Error(err) + return + } + + defer db2.Close() + + f, err = db2.File("BenchmarkFileRdSeq") + if err != nil { + b.Error(err) + return + } + + runtime.GC() + b.ResetTimer() + ofs = 0 + for i := 0; i < b.N; i++ { + n, err := f.ReadAt(buf, ofs) + if err != nil && n == 0 { + b.Fatal(err) + } + + ofs = (ofs + fileTestChunkSize) % fileTotalSize + } + b.StopTimer() +} + +func TestBits0(t *testing.T) { + const ( + M = 1024 + ) + + N := 100 + if *oACIDEnableWAL { + N = 50 + } + + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + t.Fatal(err) + } + + defer db.Close() + + f, err := db.File("TestBits0") + if err != nil { + t.Error(err) + return + } + + b := f.Bits() + ref := map[uint64]bool{} + + rng := rand.New(rand.NewSource(42)) + for i := 0; i < N; i++ { + bit := uint64(rng.Int63()) + run := uint64(rng.Intn(M)) + if rng.Int()&1 == 1 { + run = 1 + } + op := rng.Intn(3) + + switch op { + case opOn: + if err = b.On(bit, run); err != nil { + t.Error(err) + return + } + for i := bit; i < bit+run; i++ { + ref[i] = true + } + case opOff: + if err = b.Off(bit, run); err != nil { + t.Error(err) + return + } + for i := bit; i < bit+run; i++ { + ref[i] = false + } + case opCpl: + if err = b.Cpl(bit, run); err != nil { + t.Error(err) + return + } + for i := bit; i < bit+run; i++ { + ref[i] = !ref[i] + } + } + + } + + for bit, v := range ref { + gv, err := b.Get(bit) + if err != nil { + t.Error(err) + return + } + + if gv != v { + d, err := dump(f.tree) + if err != nil { + t.Log(err) + } + t.Logf("\n%s", d) + t.Errorf("%#x %t %t", bit, gv, v) + return + } + } +} + +func benchmarkBitsOn(b *testing.B, n uint64) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + f, err := db.File("TestBits0") + if err != nil { + b.Error(err) + return + } + + bits := f.Bits() + + rng := rand.New(rand.NewSource(42)) + a := make([]uint64, 1024*1024) + for i := range a { + a[i] = uint64(rng.Int63()) + } + + b.SetBytes(int64(n) / 8) + runtime.GC() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bits.On(a[i&0xfffff], n) + } + + b.StopTimer() +} + +func BenchmarkBitsOn16(b *testing.B) { + benchmarkBitsOn(b, 16) +} + +func BenchmarkBitsOn1024(b *testing.B) { + benchmarkBitsOn(b, 1024) +} + +func BenchmarkBitsOn65536(b *testing.B) { + benchmarkBitsOn(b, 65536) +} + +func BenchmarkBitsGetSeq(b *testing.B) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + f, err := db.File("TestBitsGetSeq") + if err != nil { + b.Error(err) + return + } + + rng := rand.New(rand.NewSource(42)) + buf := make([]byte, 1024*1024) + for i := range buf { + buf[i] = byte(rng.Int63()) + } + + if _, err := f.WriteAt(buf, 0); err != nil { + b.Fatal(err) + } + + bits := f.Bits() + runtime.GC() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bits.Get(uint64(i) & 0x7fffff) + } + b.StopTimer() +} + +func BenchmarkBitsGetRnd(b *testing.B) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + f, err := db.File("TestBitsGetRnd") + if err != nil { + b.Error(err) + return + } + + rng := rand.New(rand.NewSource(42)) + buf := make([]byte, 1024*1024) + for i := range buf { + buf[i] = byte(rng.Int63()) + } + + if _, err := f.WriteAt(buf, 0); err != nil { + b.Fatal(err) + } + + bits := f.Bits() + + a := make([]uint64, 1024*1024) + for i := range a { + a[i] = uint64(rng.Int63() & 0x7fffff) + } + + runtime.GC() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bits.Get(a[i&0xfffff]) + } + b.StopTimer() +} + +func TestTmpDirRemoval(t *testing.T) { + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + t.Fatal(err) + } + + defer db.Close() + + names := []string{"b", "/b", "/b/", "tmp", "/tmp", "/tmp/", "/tmp/foo", "z", "/z", "/z/"} + + for i, name := range names { + if err := db.Set(i, name, 1, 2, 3); err != nil { + t.Error(err) + return + } + } + + for i, name := range names { + + f, err := db.File(name) + if err != nil { + t.Error(err) + return + } + + if _, err := f.WriteAt([]byte{byte(i)}, int64(i)); err != nil { + t.Error(err) + return + } + } + + if err = db.Close(); err != nil { + t.Fatal(err) + } + + db, err = Open(dbname, o) + if err != nil { + t.Fatal(err) + } + + ref := map[string]bool{} + for _, name := range names { + ref[name] = true + } + + aa, err := db.Arrays() + if err != nil { + t.Error(err) + return + } + + s, err := aa.Slice(nil, nil) + if err := s.Do(func(subscripts, value []interface{}) (bool, error) { + k := subscripts[0].(string) + delete(ref, k) + return true, nil + }); err != nil { + t.Error(err) + return + } + + if len(ref) == 0 { + t.Error(0) + return + } + + for k := range ref { + if !strings.HasPrefix(k, "/tmp/") { + t.Error(k) + return + } + } + + ref = map[string]bool{} + for _, name := range names { + ref[name] = true + } + + ff, err := db.Files() + if err != nil { + t.Error(err) + return + } + + s, err = ff.Slice(nil, nil) + if err := s.Do(func(subscripts, value []interface{}) (bool, error) { + k := subscripts[0].(string) + delete(ref, k) + return true, nil + }); err != nil { + t.Error(err) + return + } + + if len(ref) == 0 { + t.Error(0) + return + } + + for k := range ref { + if !strings.HasPrefix(k, "/tmp/") { + t.Error(k) + return + } + } + +} + +/* + +2013-04-25 +========== + +(15:54) jnml@fsc-r550:~/src/github.com/cznic/exp/dbm$ . bench +++ go test -v -run Bench -keep -tbench -cpu 4 +=== RUN TestBenchArraySetGet-4 +--- PASS: TestBenchArraySetGet-4 (114.30 seconds) + all_test.go:2820: WR: 51580 ops in 6.000e+01 s, 8.597e+02 ops/s, 1.163e-03 s/op + all_test.go:2869: RD: 51580 ops in 5.425e+01 s, 9.508e+02 ops/s, 1.052e-03 s/op +PASS +ok github.com/cznic/exp/dbm 114.311s +++ go test -v -run Bench -keep -tbench -cpu 4 -xact +=== RUN TestBenchArraySetGet-4 +--- PASS: TestBenchArraySetGet-4 (112.85 seconds) + all_test.go:2820: WR: 46338 ops in 6.000e+01 s, 7.723e+02 ops/s, 1.295e-03 s/op + all_test.go:2869: RD: 46338 ops in 5.279e+01 s, 8.778e+02 ops/s, 1.139e-03 s/op +PASS +ok github.com/cznic/exp/dbm 112.859s +++ go test -v -run Bench -keep -tbench -cpu 4 -wal -grace 0ms +=== RUN TestBenchArraySetGet-4 +--- PASS: TestBenchArraySetGet-4 (60.38 seconds) + all_test.go:2820: WR: 602 ops in 6.009e+01 s, 1.002e+01 ops/s, 9.982e-02 s/op, max WAL size 7056 + all_test.go:2869: RD: 602 ops in 1.244e-01 s, 4.838e+03 ops/s, 2.067e-04 s/op, max WAL size 0 +PASS +ok github.com/cznic/exp/dbm 60.396s +++ go test -v -run Bench -keep -tbench -cpu 4 -wal -grace 1ms +=== RUN TestBenchArraySetGet-4 +--- PASS: TestBenchArraySetGet-4 (94.13 seconds) + all_test.go:2820: WR: 33664 ops in 6.003e+01 s, 5.608e+02 ops/s, 1.783e-03 s/op, max WAL size 37328 + all_test.go:2869: RD: 33664 ops in 3.380e+01 s, 9.961e+02 ops/s, 1.004e-03 s/op, max WAL size 0 +PASS +ok github.com/cznic/exp/dbm 94.140s +++ go test -v -run Bench -keep -tbench -cpu 4 -wal -grace 10ms +=== RUN TestBenchArraySetGet-4 +--- PASS: TestBenchArraySetGet-4 (99.36 seconds) + all_test.go:2820: WR: 37880 ops in 6.000e+01 s, 6.313e+02 ops/s, 1.584e-03 s/op, max WAL size 48224 + all_test.go:2869: RD: 37880 ops in 3.916e+01 s, 9.673e+02 ops/s, 1.034e-03 s/op, max WAL size 0 +PASS +ok github.com/cznic/exp/dbm 99.372s +++ go test -v -run Bench -keep -tbench -cpu 4 -wal -grace 100ms +=== RUN TestBenchArraySetGet-4 +--- PASS: TestBenchArraySetGet-4 (100.20 seconds) + all_test.go:2820: WR: 38464 ops in 6.018e+01 s, 6.392e+02 ops/s, 1.564e-03 s/op, max WAL size 46928 + all_test.go:2869: RD: 38464 ops in 3.981e+01 s, 9.661e+02 ops/s, 1.035e-03 s/op, max WAL size 0 +PASS +ok github.com/cznic/exp/dbm 100.213s +++ go test -v -run Bench -keep -tbench -cpu 4 -wal -grace 1s +=== RUN TestBenchArraySetGet-4 +--- PASS: TestBenchArraySetGet-4 (108.00 seconds) + all_test.go:2820: WR: 44508 ops in 6.000e+01 s, 7.418e+02 ops/s, 1.348e-03 s/op, max WAL size 57264 + all_test.go:2869: RD: 44508 ops in 4.781e+01 s, 9.310e+02 ops/s, 1.074e-03 s/op, max WAL size 0 +PASS +ok github.com/cznic/exp/dbm 108.016s +++ go test -v -run Bench -keep -tbench -cpu 4 -wal -grace 10s +=== RUN TestBenchArraySetGet-4 +--- PASS: TestBenchArraySetGet-4 (111.36 seconds) + all_test.go:2820: WR: 47565 ops in 6.000e+01 s, 7.927e+02 ops/s, 1.261e-03 s/op, max WAL size 162992 + all_test.go:2869: RD: 47565 ops in 5.113e+01 s, 9.302e+02 ops/s, 1.075e-03 s/op, max WAL size 0 +PASS +ok github.com/cznic/exp/dbm 111.376s +(16:08) jnml@fsc-r550:~/src/github.com/cznic/exp/dbm$ + +2013-05-09 +========== + +# 16:05 +jnml@fsc-r630:~/src/github.com/cznic/exp/dbm$ go test -tbench -run TestBench -v -wal -xact +=== RUN TestBenchArraySetGet +--- PASS: TestBenchArraySetGet (106.22 seconds) + all_test.go:2759: WR: 27178 ops in 6.000e+01 s, 4.530e+02 ops/s, 2.208e-03 s/op, max WAL size 35120 + all_test.go:2808: RD: 27178 ops in 4.600e+01 s, 5.909e+02 ops/s, 1.692e-03 s/op, max WAL size 0 +PASS +ok github.com/cznic/exp/dbm 106.234s +jnml@fsc-r630:~/src/github.com/cznic/exp/dbm$ + +*/ +func TestBenchArraySetGet(t *testing.T) { + if !*oBench { + t.Log("Must be enabled by -tbench") + return + } + + dir, dbname := temp() + defer os.RemoveAll(dir) + + db, err := Create(dbname, o) + if err != nil { + t.Fatal(err) + } + + defer db.Close() + + a, err := db.Array("test") + if err != nil { + t.Error(err) + return + } + + c := time.After(time.Minute) + t0 := time.Now() + var maxSet int64 +loop: + for i := 0; ; { + select { + case <-c: + maxSet = int64(i - 1) + ftot := float64(time.Since(t0)) / float64(time.Second) + s := "" + if af, ok := db.filer.(*lldb.ACIDFiler0); ok { + s = fmt.Sprintf(", max WAL size %d", af.PeakWALSize()) + } + t.Logf("WR: %d ops in %8.3e s, %8.3e ops/s, %8.3e s/op%s", i, ftot, float64(i)/ftot, ftot/float64(i), s) + break loop + default: + } + + if err = a.Set(i^0x55555555, i); err != nil { + t.Error(err) + return + } + + i++ + } + + if err = db.Close(); err != nil { + t.Error(err) + return + } + + if db, err = Open(dbname, o); err != nil { + t.Error(err) + return + } + + a, err = db.Array("test") + if err != nil { + t.Error(err) + return + } + + t0 = time.Now() + for i := int64(0); i <= maxSet; i++ { + v, err := a.Get(i) + if err != nil { + t.Error(err) + return + } + + if g, e := v, int64(i^0x55555555); g != e { + t.Errorf("i %d: %T(%v) %T(%v)", i, g, g, e, e) + return + } + } + + ftot := float64(time.Since(t0)) / float64(time.Second) + i := maxSet + 1 + s := "" + if af, ok := db.filer.(*lldb.ACIDFiler0); ok { + s = fmt.Sprintf(", max WAL size %d", af.PeakWALSize()) + } + t.Logf("RD: %d ops in %8.3e s, %8.3e ops/s, %8.3e s/op%s", i, ftot, float64(i)/ftot, ftot/float64(i), s) +} + +func TestLocking(t *testing.T) { + db, err := CreateTemp("", "test-dbm-", ".db", &Options{}) + if err != nil { + t.Fatal(err) + } + + defer func() { + if db != nil { + n := db.Name() + db.Close() + os.Remove(n) + } + }() + + // Must fail on lock or file exists + if _, err = Create(db.Name(), &Options{}); err == nil { + t.Error(err) + return + } + + t.Log(err) + // Must fail on lock + if _, err = Open(db.Name(), &Options{}); err == nil { + t.Error(err) + return + } + + t.Log(err) + n := db.Name() + if err = db.Close(); err != nil { + t.Error(err) + return + } + + // Must fail on DB file exists + if _, err = Create(n, &Options{}); err == nil { + t.Error(err) + return + } + + t.Log(err) + // Must succeed + if db, err = Open(n, &Options{}); err != nil { + t.Error(err) + return + } +} + +func TestBug20130712(t *testing.T) { + db, err := CreateMem(&Options{}) + if err != nil { + t.Fatal(err) + } + + a, err := db.Array("t") + if err != nil { + t.Fatal(err) + } + + if err := a.Set(nil, 1, 2); err != nil { + t.Fatal(err) + } + + if err := a.Set(nil, 171, 1); err != nil { + t.Fatal(err) + } + + a, err = a.Array(1) + if err != nil { + t.Fatal(err) + } + + s, err := a.Slice(nil, nil) + if err != nil { + t.Fatal(err) + } + + if err := s.Do(func(subscripts, value []interface{}) (bool, error) { + t.Log(subscripts, value) + return true, nil + }); err != nil { + t.Fatal(err) + } +} + +func TestCreateWithEmptyWAL(t *testing.T) { + dir, err := ioutil.TempDir("", "dbm-test-create") + if err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(dir) + dbName := filepath.Join(dir, "test.db") + var o Options + walName := o.walName(dbName, "") + wal, err := os.Create(walName) + if err != nil { + t.Error(err) + return + } + + wal.Close() + defer os.Remove(walName) + + db, err := Create(dbName, &Options{}) + if err != nil { + t.Error(err) + return + } + + if err = db.Set("val", "subscript"); err != nil { + t.Error(err) + } + db.Close() +} + +func TestCreateWithNonEmptyWAL(t *testing.T) { + dir, err := ioutil.TempDir("", "dbm-test-create") + if err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(dir) + dbName := filepath.Join(dir, "test.db") + var o Options + walName := o.walName(dbName, "") + wal, err := os.Create(walName) + if err != nil { + t.Error(err) + return + } + + if n, err := wal.Write([]byte{0}); n != 1 || err != nil { + t.Error(n, err) + return + } + + wal.Close() + defer os.Remove(walName) + + if _, err = Create(dbName, &Options{ACID: ACIDFull}); err == nil { + t.Error("Unexpected success") + return + } +} + +func TestIsMem(t *testing.T) { + db, err := CreateTemp("", "dbm-test", ".tmp", &Options{}) + if err != nil { + t.Fatal(err) + } + + defer func() { + nm := db.Name() + db.Close() + os.Remove(nm) + }() + + if g, e := db.IsMem(), false; g != e { + t.Error(g, e) + return + } + + db2, err := CreateMem(&Options{}) + if err != nil { + t.Fatal(err) + } + + if g, e := db2.IsMem(), true; g != e { + t.Error(g, e) + return + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/array.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/array.go new file mode 100644 index 00000000..a7bb08e8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/array.go @@ -0,0 +1,547 @@ +// Copyright 2014 The dbm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dbm + +import ( + "fmt" + "io" + + "camlistore.org/third_party/github.com/cznic/exp/lldb" +) + +// Array is a reference to a subtree of an array. +type Array struct { + db *DB + tree *lldb.BTree + prefix []byte + name string + namespace byte +} + +// MemArray returns an Array associated with a subtree of an anonymous array, +// determined by subscripts. MemArrays are resource limited as they are +// completely held in memory and are not automatically persisted. +func MemArray(subscripts ...interface{}) (a Array, err error) { + a.db = &DB{} + if a, err = a.Array(subscripts...); err != nil { + return a, err + } + + a.tree = lldb.NewBTree(collate) + return +} + +func bpack(a []byte) []byte { + if cap(a) > len(a) { + return append([]byte(nil), a...) + } + + return a +} + +// Named trees (arrays) can get removed, but references to them (Arrays) may +// outlive that. db.bkl locked is assumed. ok => a.tree != nil && err == nil. +func (a *Array) validate(canCreate bool) (ok bool, err error) { + if a.tree != nil && (a.tree.Handle() == 1 || a.tree.IsMem()) { + return true, nil + } + + switch a.namespace { + case arraysPrefix: + a.tree, err = a.db.acache.getTree(a.db, arraysPrefix, a.name, canCreate, aCacheSize) + case filesPrefix: + a.tree, err = a.db.fcache.getTree(a.db, filesPrefix, a.name, canCreate, fCacheSize) + case systemPrefix: + a.tree, err = a.db.scache.getTree(a.db, systemPrefix, a.name, canCreate, sCacheSize) + default: + panic("internal error") + } + + switch { + case a.tree == nil && err == nil: + return false, nil + case a.tree == nil && err != nil: + return false, err + case a.tree != nil && err == nil: + return true, nil + //case a.tree != nil && err != nil: + } + panic("internal error") +} + +// Array returns an object associated with a subtree of array 'a', determined +// by subscripts. +func (a *Array) Array(subscripts ...interface{}) (r Array, err error) { + if err = a.db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + a.db.leave(&err) + }() + + return a.array(subscripts...) +} + +func (a *Array) array(subscripts ...interface{}) (r Array, err error) { + r = *a + prefix, err := lldb.EncodeScalars(subscripts...) + if err != nil { + return + } + + r.prefix = append(bpack(r.prefix), prefix...) + return +} + +func (a *Array) bset(val, key []byte) (err error) { + err = a.tree.Set(append(a.prefix, key...), val) + return +} + +func (a *Array) binc(delta int64, key []byte) (r int64, err error) { + _, _, err = a.tree.Put( + nil, //TODO buffers + append(a.prefix, key...), + func(key []byte, old []byte) (new []byte, write bool, err error) { + write = true + if len(old) != 0 { + decoded, err := lldb.DecodeScalars(old) + switch { + case err != nil: + // nop + case len(decoded) != 1: + // nop + default: + r, _ = decoded[0].(int64) + } + } + r += delta + new, err = lldb.EncodeScalars(r) + return + }, + ) + return +} + +func (a *Array) bget(key []byte) (value []byte, err error) { + return a.tree.Get(nil, append(a.prefix, key...)) +} + +func (a *Array) bdelete(key []byte) (err error) { + return a.tree.Delete(append(a.prefix, key...)) +} + +// Set sets the value at subscripts in subtree 'a'. Any previous value, if +// existed, is overwritten by the new one. +func (a *Array) Set(value interface{}, subscripts ...interface{}) (err error) { + if err = a.db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + a.db.leave(&err) + }() + + if t := a.tree; t != nil && !t.IsMem() && a.tree.Handle() == 1 { + return &lldb.ErrPERM{Src: "dbm.Array.Set"} + } + + if ok, err := a.validate(true); !ok { + return err + } + + return a.set(value, subscripts...) +} + +func (a *Array) set(value interface{}, subscripts ...interface{}) (err error) { + val, err := encVal(value) + if err != nil { + return + } + + key, err := lldb.EncodeScalars(subscripts...) + if err != nil { + return + } + + return a.bset(val, key) +} + +// Inc atomically increments the value at subscripts by delta and returns the +// new value. If the value doesn't exists before calling Inc or if the value is +// not an integer then the value is considered to be zero. +func (a *Array) Inc(delta int64, subscripts ...interface{}) (val int64, err error) { + if err = a.db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + a.db.leave(&err) + }() + + if t := a.tree; t != nil && !t.IsMem() && a.tree.Handle() == 1 { + return 0, &lldb.ErrPERM{Src: "dbm.Array.Inc"} + } + + if ok, err := a.validate(true); !ok { + return 0, err + } + + return a.inc(delta, subscripts...) +} + +func (a *Array) inc(delta int64, subscripts ...interface{}) (val int64, err error) { + key, err := lldb.EncodeScalars(subscripts...) + if err != nil { + return + } + + return a.binc(delta, key) +} + +// Get returns the value at subscripts in subtree 'a', or nil if no such value +// exists. +func (a *Array) Get(subscripts ...interface{}) (value interface{}, err error) { + if err = a.db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + a.db.leave(&err) + }() + + if ok, e := a.validate(false); !ok || err != nil { + err = e + return + } + + value, err = a.get(subscripts...) + if value == nil { + return + } + + if t := a.tree; t != nil && !t.IsMem() && t.Handle() == 1 { + value = 0 + } + return +} + +func (a *Array) get(subscripts ...interface{}) (value interface{}, err error) { + key, err := lldb.EncodeScalars(subscripts...) + if err != nil { + return + } + + val, err := a.bget(key) + if err != nil { + return + } + + if val == nil { + return + } + + va, err := lldb.DecodeScalars(val) + if err != nil { + return nil, err + } + + value = va + if len(va) == 1 { + value = va[0] + } + return +} + +// Delete deletes the value at subscripts in array. +func (a *Array) Delete(subscripts ...interface{}) (err error) { + if err = a.db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + a.db.leave(&err) + }() + + if t := a.tree; t != nil && !t.IsMem() && a.tree.Handle() == 1 { + return &lldb.ErrPERM{Src: "dbm.Array.Delete"} + } + + if ok, err := a.validate(false); !ok { + return err + } + + return a.delete(subscripts...) +} + +func (a *Array) delete(subscripts ...interface{}) (err error) { + key, err := lldb.EncodeScalars(subscripts...) + if err != nil { + return + } + + return a.bdelete(key) +} + +// Clear empties the subtree at subscripts in 'a'. +func (a *Array) Clear(subscripts ...interface{}) (err error) { + //TODO optimize for clear "everything" + + if err = a.db.enter(); err != nil { + return + } + + doLeave := true + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + if doLeave { + a.db.leave(&err) + } + }() + + if t := a.tree; t != nil && !t.IsMem() && a.tree.Handle() == 1 { + return &lldb.ErrPERM{Src: "dbm.Array.Clear"} + } + + if ok, err := a.validate(false); !ok { + return err + } + + doLeave = false + if a.db.leave(&err) != nil { + return + } + + a0 := *a + a0.prefix = nil + + prefix, err := lldb.DecodeScalars(a.prefix) + if err != nil { + panic("internal error") + } + + subscripts = append(prefix, subscripts...) + n := len(subscripts) + + bSubscripts, err := lldb.EncodeScalars(subscripts...) + if err != nil { + panic("internal error") + } + + s, err := a0.Slice(nil, nil) + if err != nil { + return + } + + return s.Do(func(actualSubscripts, value []interface{}) (more bool, err error) { + if len(actualSubscripts) < n { + return + } + + common := actualSubscripts[:n] + bcommon, err := lldb.EncodeScalars(common...) + if err != nil { + panic("internal error") + } + + switch collate(bcommon, bSubscripts) { + case -1: + return true, nil + case 0: + return true, a0.Delete(actualSubscripts...) + } + // case 1: + return false, nil + }) +} + +// Slice returns a new Slice from Array, with a subscripts range of [from, to]. +// If from is nil it works as 'from lowest existing key'. If to is nil it +// works as 'to highest existing key'. +func (a *Array) Slice(from, to []interface{}) (s *Slice, err error) { + if err = a.db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + a.db.leave(&err) + }() + + prefix, err := lldb.DecodeScalars(a.prefix) + if err != nil { + return + } + + return &Slice{ + a: a, + prefix: prefix, + from: from, + to: to, + }, nil +} + +// Dump outputs a human readable dump of a to w. Intended use is only for +// examples or debugging. Some type information is lost in the rendering, for +// example a float value '17.' and an integer value '17' may both output as +// '17'. +// +// Note: Dump will lock the database until finished. +func (a *Array) Dump(w io.Writer) (err error) { + if err = a.db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + a.db.leave(&err) + }() + + return a.tree.Dump(w) +} + +func (a *Array) Tree() (tr *lldb.BTree, err error) { + _, err = a.validate(false) + if err != nil { + return + } + + return a.tree, nil +} + +// Enumerator returns a "raw" enumerator of the whole array. It's initially +// positioned on the first (asc is true) or last (asc is false) +// subscripts/value pair in the array. +// +// This method is safe for concurrent use by multiple goroutines. +func (a *Array) Enumerator(asc bool) (en *Enumerator, err error) { + if err = a.db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + switch x := e.(type) { + case error: + err = x + default: + err = fmt.Errorf("%v", e) + } + } + a.db.leave(&err) + }() + + var e Enumerator + switch asc { + case true: + e.en, err = a.tree.SeekFirst() + default: + e.en, err = a.tree.SeekLast() + } + if err != nil { + return + } + + e.db = a.db + return &e, nil +} + +// Enumerator provides visiting all K/V pairs in a DB/range. +type Enumerator struct { + db *DB + en *lldb.BTreeEnumerator +} + +// Next returns the currently enumerated raw KV pair, if it exists and moves to +// the next KV in the key collation order. If there is no KV pair to return, +// err == io.EOF is returned. +// +// This method is safe for concurrent use by multiple goroutines. +func (e *Enumerator) Next() (key, value []interface{}, err error) { + if err = e.db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + switch x := e.(type) { + case error: + err = x + default: + err = fmt.Errorf("%v", e) + } + } + e.db.leave(&err) + }() + + k, v, err := e.en.Next() + if err != nil { + return + } + + if key, err = lldb.DecodeScalars(k); err != nil { + return + } + + value, err = lldb.DecodeScalars(v) + return +} + +// Prev returns the currently enumerated raw KV pair, if it exists and moves to +// the previous KV in the key collation order. If there is no KV pair to +// return, err == io.EOF is returned. +// +// This method is safe for concurrent use by multiple goroutines. +func (e *Enumerator) Prev() (key, value []interface{}, err error) { + if err = e.db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + switch x := e.(type) { + case error: + err = x + default: + err = fmt.Errorf("%v", e) + } + } + e.db.leave(&err) + }() + + k, v, err := e.en.Prev() + if err != nil { + return + } + + if key, err = lldb.DecodeScalars(k); err != nil { + return + } + + value, err = lldb.DecodeScalars(v) + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/bench b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/bench new file mode 100644 index 00000000..7b01b770 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/bench @@ -0,0 +1,9 @@ +set -x +go test -v -run Bench -tbench -cpu 4 $OPTS +go test -v -run Bench -tbench -cpu 4 -xact $OPTS +go test -v -run Bench -tbench -cpu 4 -wal -grace 0ms $OPTS +go test -v -run Bench -tbench -cpu 4 -wal -grace 1ms $OPTS +go test -v -run Bench -tbench -cpu 4 -wal -grace 10ms $OPTS +go test -v -run Bench -tbench -cpu 4 -wal -grace 100ms $OPTS +go test -v -run Bench -tbench -cpu 4 -wal -grace 1s $OPTS +go test -v -run Bench -tbench -cpu 4 -wal -grace 10s $OPTS diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/bits.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/bits.go new file mode 100644 index 00000000..aa51957f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/bits.go @@ -0,0 +1,306 @@ +// Copyright 2014 The dbm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dbm + +import ( + "sync" + + "camlistore.org/third_party/github.com/cznic/mathutil" +) + +const ( + opOn = iota + opOff + opCpl +) + +const ( + bitCacheBits = 11 + bitCacheSize = 1 << bitCacheBits + bitCacheMask = bitCacheSize - 1 +) + +/* + +bitCacheBits: 8 +BenchmarkBitsGetSeq 20000000 124 ns/op +BenchmarkBitsGetRnd 10000 146852 ns/op + +bitCacheBits: 9 +BenchmarkBitsGetSeq 20000000 99.9 ns/op +BenchmarkBitsGetRnd 10000 146174 ns/op + +bitCacheBits: 10 +BenchmarkBitsGetSeq 20000000 88.3 ns/op +BenchmarkBitsGetRnd 10000 148670 ns/op + +bitCacheBits: 11 +BenchmarkBitsGetSeq 20000000 80.9 ns/op +BenchmarkBitsGetRnd 10000 146512 ns/op + +bitCacheBits: 12 +BenchmarkBitsGetSeq 20000000 80.9 ns/op +BenchmarkBitsGetRnd 10000 146713 ns/op + +bitCacheBits: 13 +BenchmarkBitsGetSeq 20000000 79.4 ns/op +BenchmarkBitsGetRnd 10000 146347 ns/op + +bitCacheBits: 14 +BenchmarkBitsGetSeq 20000000 79.0 ns/op +BenchmarkBitsGetRnd 10000 146128 ns/op + +bitCacheBits: 15 +BenchmarkBitsGetSeq 20000000 78.2 ns/op +BenchmarkBitsGetRnd 10000 146194 ns/op + +bitCacheBits: 16 +BenchmarkBitsGetSeq 20000000 78.0 ns/op +BenchmarkBitsGetRnd 10000 144808 ns/op + +*/ + +var ( + byteMask = [8][8]byte{ // [from][to] + [8]uint8{0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff}, + [8]uint8{0x00, 0x02, 0x06, 0x0e, 0x1e, 0x3e, 0x7e, 0xfe}, + [8]uint8{0x00, 0x00, 0x04, 0x0c, 0x1c, 0x3c, 0x7c, 0xfc}, + [8]uint8{0x00, 0x00, 0x00, 0x08, 0x18, 0x38, 0x78, 0xf8}, + [8]uint8{0x00, 0x00, 0x00, 0x00, 0x10, 0x30, 0x70, 0xf0}, + [8]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x60, 0xe0}, + [8]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0xc0}, + [8]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80}, + } + + bitMask = [8]byte{0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80} + + onePage [pgSize]byte +) + +func init() { + for i := range onePage { + onePage[i] = 0xff + } +} + +// Bits is a File with a bit-manipulation set of methods. It can be useful as +// e.g. a bitmap index[1]. +// +// Mutating or reading single bits in a disk file is not a fast operation. Bits +// include a memory cache improving sequential scan/access by Get. The cache is +// coherent with writes/updates but _is not_ coherent with other Bits instances +// of the same underlying File. It is thus recommended to share a single *Bits +// instance between all writers and readers of the same bit file. Concurrent +// overlapping updates are safe, but the order of their execution is +// unspecified and they may even interleave. Coordination in the dbm client is +// needed in such case. +// +// [1]: http://en.wikipedia.org/wiki/Bitmap_index +type Bits struct { + f *File + rwmu sync.RWMutex + page int64 + cache [bitCacheSize]byte +} + +func (b *Bits) pageBytes(pgI int64, pgFrom, pgTo, op int) (err error) { + f := b.f + a := (*Array)(f) + switch op { + case opOn: + if pgFrom == 0 && pgTo == pgSize*8-1 { + return a.Set(onePage[:], pgI) + } + + _, err = f.writeAt(onePage[pgFrom:pgTo+1], pgI*pgSize+int64(pgFrom), true) + return + case opOff: + if pgFrom == 0 && pgTo == pgSize*8-1 { + return a.Delete(pgI) + } + + _, err = f.writeAt(zeroPage[pgFrom:pgTo+1], pgI*pgSize+int64(pgFrom), true) + return + } + + // case opCpl: + var buf [pgSize]byte + var n int + if n, err = f.readAt(buf[:], pgSize, true); n != pgSize { + return + } + + for i, v := range buf[pgFrom : pgTo+1] { + buf[i] = ^v + } + if buf == zeroPage { + return a.Delete(pgI) + } + + _, err = f.writeAt(buf[:], pgI*pgSize+int64(pgFrom), true) + return +} + +func (b *Bits) pageByte(off int64, fromBit, toBit, op int) (err error) { + f := b.f + var buf [1]byte + if _, err = f.readAt(buf[:], off, true); err != nil { + return + } + + switch op { + case opOn: + buf[0] |= byteMask[fromBit][toBit] + case opOff: + buf[0] &^= byteMask[fromBit][toBit] + case opCpl: + buf[0] ^= byteMask[fromBit][toBit] + } + _, err = f.writeAt(buf[:], off, true) + return +} + +func (b *Bits) pageBits(pgI int64, fromBit, toBit, op int) (err error) { + pgFrom, pgTo := fromBit>>3, toBit>>3 + from, to := fromBit&7, toBit&7 + switch { + case from == 0 && to == 7: + return b.pageBytes(pgI, pgFrom, pgTo, op) + case from == 0 && to != 7: + switch pgTo - pgFrom { + case 0: + return b.pageByte(pgI*pgSize+int64(pgFrom), from, to, op) + case 1: + if err = b.pageByte(pgI*pgSize+int64(pgFrom), from, 7, op); err != nil { + return + } + + return b.pageByte(pgI*pgSize+int64(pgTo), 0, to, op) + default: + if err = b.pageByte(pgI*pgSize+int64(pgFrom), from, 7, op); err != nil { + return + } + + if err = b.pageBytes(pgI, pgFrom+1, pgTo-1, op); err != nil { + return + } + + return b.pageByte(pgI*pgSize+int64(pgTo), 0, to, op) + } + case from != 0 && to == 7: + switch pgTo - pgFrom { + case 0: + return b.pageByte(pgI*pgSize+int64(pgFrom), from, 7, op) + case 1: + if err = b.pageByte(pgI*pgSize+int64(pgFrom), from, 7, op); err != nil { + return + } + + return b.pageByte(pgI*pgSize+int64(pgTo), 0, 7, op) + default: + if err = b.pageByte(pgI*pgSize+int64(pgFrom), from, 7, op); err != nil { + return + } + + if err = b.pageBytes(pgI, pgFrom+1, pgTo-1, op); err != nil { + return + } + + return b.pageByte(pgI*pgSize+int64(pgTo), 0, 7, op) + } + } + // case from != 0 && to != 7: + switch pgTo - pgFrom { + case 0: + return b.pageByte(pgI*pgSize+int64(pgFrom), from, to, op) + case 1: + if err = b.pageByte(pgI*pgSize+int64(pgFrom), from, 7, op); err != nil { + return + } + + return b.pageByte(pgI*pgSize+int64(pgTo), 0, to, op) + default: + if err = b.pageByte(pgI*pgSize+int64(pgFrom), from, 7, op); err != nil { + return + } + + if err = b.pageBytes(pgI, pgFrom+1, pgTo-1, op); err != nil { + return + } + + return b.pageByte(pgI*pgSize+int64(pgTo), 0, to, op) + } +} + +func (b *Bits) ops(fromBit, toBit uint64, op int) (err error) { + const ( + bitsPerPage = pgSize * 8 + bitsPerPageMask = bitsPerPage - 1 + ) + + b.page = -1 + rem := toBit - fromBit + 1 + pgI := int64(fromBit >> (pgBits + 3)) + for rem != 0 { + pgFrom := fromBit & bitsPerPageMask + pgTo := mathutil.MinUint64(bitsPerPage-1, pgFrom+rem-1) + n := pgTo - pgFrom + 1 + if err = b.pageBits(pgI, int(pgFrom), int(pgTo), op); err != nil { + return + } + + pgI++ + rem -= n + fromBit += n + } + return +} + +// On sets run bits starting from bit. +func (b *Bits) On(bit, run uint64) (err error) { + if run == 0 { + return + } + + return b.ops(bit, bit+run-1, opOn) +} + +// Off resets run bits starting from bit. +func (b *Bits) Off(bit, run uint64) (err error) { + if run == 0 { + return + } + + return b.ops(bit, bit+run-1, opOff) +} + +// Cpl complements run bits starting from bit. +func (b *Bits) Cpl(bit, run uint64) (err error) { + if run == 0 { + return + } + + return b.ops(bit, bit+run-1, opCpl) +} + +// Get returns the value at bit. +func (b *Bits) Get(bit uint64) (val bool, err error) { + f := b.f + byte_ := bit >> 3 + pg := int64(byte_ >> bitCacheBits) + b.rwmu.Lock() + if pg != b.page { + if _, err = f.readAt(b.cache[:], pg*bitCacheSize, true); err != nil { + b.rwmu.Unlock() + b.page = -1 + return + } + b.page = pg + } + + val = b.cache[byte_&bitCacheMask]&bitMask[bit&7] != 0 + b.rwmu.Unlock() + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/db_bench/Makefile b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/db_bench/Makefile new file mode 100644 index 00000000..00393708 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/db_bench/Makefile @@ -0,0 +1,20 @@ +all: + go fmt + go test -i + go test -timeout 1h + go build + go vet + make todo + +todo: + @grep -n ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* *.go || true + @grep -n TODO *.go || true + @grep -n BUG *.go || true + @grep -n println *.go || true + +clean: + rm -f *~ cov cov.html bad-dump good-dump lldb.test old.txt new.txt \ + test-acidfiler0-* + +gocov: + gocov test $(COV) | gocov-html > cov.html diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/db_bench/main.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/db_bench/main.go new file mode 100644 index 00000000..f9137fbe --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/db_bench/main.go @@ -0,0 +1,195 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + +From: https://code.google.com/p/leveldb/ + +Performance + +Here is a performance report (with explanations) from the run of the included +db_bench program. The results are somewhat noisy, but should be enough to get a +ballpark performance estimate. + +Setup + +We use a database with a million entries. Each entry has a 16 byte key, and a +100 byte value. Values used by the benchmark compress to about half their +original size. + + LevelDB: version 1.1 + Date: Sun May 1 12:11:26 2011 + CPU: 4 x Intel(R) Core(TM)2 Quad CPU Q6600 @ 2.40GHz + CPUCache: 4096 KB + Keys: 16 bytes each + Values: 100 bytes each (50 bytes after compression) + Entries: 1000000 + Raw Size: 110.6 MB (estimated) + File Size: 62.9 MB (estimated) + +Write performance + +The "fill" benchmarks create a brand new database, in either sequential, or +random order. The "fillsync" benchmark flushes data from the operating system +to the disk after every operation; the other write operations leave the data +sitting in the operating system buffer cache for a while. The "overwrite" +benchmark does random writes that update existing keys in the database. + + fillseq : 1.765 micros/op; 62.7 MB/s + fillsync : 268.409 micros/op; 0.4 MB/s (10000 ops) + fillrandom : 2.460 micros/op; 45.0 MB/s + overwrite : 2.380 micros/op; 46.5 MB/s + +Each "op" above corresponds to a write of a single key/value pair. I.e., a +random write benchmark goes at approximately 400,000 writes per second. + +Each "fillsync" operation costs much less (0.3 millisecond) than a disk seek +(typically 10 milliseconds). We suspect that this is because the hard disk +itself is buffering the update in its memory and responding before the data has +been written to the platter. This may or may not be safe based on whether or +not the hard disk has enough power to save its memory in the event of a power +failure. + +Read performance + +We list the performance of reading sequentially in both the forward and reverse +direction, and also the performance of a random lookup. Note that the database +created by the benchmark is quite small. Therefore the report characterizes the +performance of leveldb when the working set fits in memory. The cost of reading +a piece of data that is not present in the operating system buffer cache will +be dominated by the one or two disk seeks needed to fetch the data from disk. +Write performance will be mostly unaffected by whether or not the working set +fits in memory. + + readrandom : 16.677 micros/op; (approximately 60,000 reads per second) + readseq : 0.476 micros/op; 232.3 MB/s + readreverse : 0.724 micros/op; 152.9 MB/s + +LevelDB compacts its underlying storage data in the background to improve read +performance. The results listed above were done immediately after a lot of +random writes. The results after compactions (which are usually triggered +automatically) are better. + + readrandom : 11.602 micros/op; (approximately 85,000 reads per second) + readseq : 0.423 micros/op; 261.8 MB/s + readreverse : 0.663 micros/op; 166.9 MB/s + +Some of the high cost of reads comes from repeated decompression of blocks read +from disk. If we supply enough cache to the leveldb so it can hold the +uncompressed blocks in memory, the read performance improves again: + + readrandom : 9.775 micros/op; (approximately 100,000 reads per second before compaction) + readrandom : 5.215 micros/op; (approximately 190,000 reads per second after compaction) + +*/ + +/* + +Executing leveldb's db_bench on local machine: + +(10:49) jnml@fsc-r550:~/src/code.google.com/p/leveldb$ ./db_bench +LevelDB: version 1.10 +Date: Fri May 17 10:49:37 2013 +CPU: 4 * Intel(R) Xeon(R) CPU X5450 @ 3.00GHz +CPUCache: 6144 KB +Keys: 16 bytes each +Values: 100 bytes each (50 bytes after compression) +Entries: 1000000 +RawSize: 110.6 MB (estimated) +FileSize: 62.9 MB (estimated) +------------------------------------------------ +fillseq : 5.334 micros/op; 20.7 MB/s +fillsync : 41386.875 micros/op; 0.0 MB/s (1000 ops) +fillrandom : 9.583 micros/op; 11.5 MB/s +overwrite : 15.441 micros/op; 7.2 MB/s +readrandom : 12.136 micros/op; (1000000 of 1000000 found) +readrandom : 8.612 micros/op; (1000000 of 1000000 found) +readseq : 0.303 micros/op; 365.1 MB/s +readreverse : 0.560 micros/op; 197.5 MB/s +compact : 2394003.000 micros/op; +readrandom : 6.504 micros/op; (1000000 of 1000000 found) +readseq : 0.271 micros/op; 407.5 MB/s +readreverse : 0.515 micros/op; 214.7 MB/s +fill100K : 4793.916 micros/op; 19.9 MB/s (1000 ops) +crc32c : 3.709 micros/op; 1053.2 MB/s (4K per op) +snappycomp : 9.545 micros/op; 409.3 MB/s (output: 55.1%) +snappyuncomp : 1.506 micros/op; 2593.9 MB/s +acquireload : 0.349 micros/op; (each op is 1000 loads) +(10:51) jnml@fsc-r550:~/src/code.google.com/p/leveldb$ + +*/ + +package main + +import ( + "fmt" + "log" + "os" + "time" + + "camlistore.org/third_party/github.com/cznic/exp/dbm" +) + +const ( + N = 1e6 +) + +var value100 = []byte("Here is a performance report (with explanatioaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + +func main() { + log.SetFlags(log.Lshortfile | log.Ltime) + fmt.Printf( + `dbm: version exp +Keys: 16 bytes each +Values: 100 bytes each (50 bytes after compression) +Entries: 1000000 +RawSize: 110.6 MB (estimated) +FileSize: 62.9 MB (estimated) +------------------------------------------------ +`) + fillseq(dbm.ACIDNone) + fillseq(dbm.ACIDTransactions) + fillseq(dbm.ACIDFull) +} + +func fillseq(acid int) { + dbname := os.Args[0] + ".db" + db, err := dbm.Create(dbname, &dbm.Options{ACID: acid, GracePeriod: time.Second}) + if err != nil { + log.Fatal(err) + } + + defer func() { + os.Remove(dbname) + }() + + a, err := db.Array("") + if err != nil { + log.Println(err) + return + } + + t0 := time.Now() + for i := 0; i < N; i++ { + if err = a.Set(value100, i); err != nil { + log.Println(err) + return + } + } + if err := db.Close(); err != nil { + log.Println(err) + return + } + + d := time.Since(t0) + fi, err := os.Stat(dbname) + if err != nil { + log.Println(err) + return + } + + secs := float64(d/time.Nanosecond) / float64(time.Second) + sz := fi.Size() + fmt.Printf("fillseq :%19v/op;%7.1f MB/s (%g secs, %d bytes)\n", d/N, float64(sz)/secs/1e6, secs, sz) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/db_bench/main_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/db_bench/main_test.go new file mode 100644 index 00000000..ed83a74f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/db_bench/main_test.go @@ -0,0 +1,26 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "camlistore.org/third_party/github.com/cznic/zappy" + "testing" +) + +func Test(t *testing.T) { + + if n := len(value100); n != 100 { + t.Fatal(n) + } + + c, err := zappy.Encode(nil, value100) + if err != nil { + t.Fatal(err) + } + + if n := len(c); n != 50 { + t.Fatal(n) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/dbm.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/dbm.go new file mode 100644 index 00000000..22c55687 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/dbm.go @@ -0,0 +1,1151 @@ +// Copyright 2014 The dbm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dbm + +//DONE +Top level Sync? Optional? (Measure it) +// Too slow. Added db.Sync() instead. + +//DONE user defined collating +// - on DB create (sets the default) +// - per Array? (probably a MUST HAVE feature) +//---- +// After Go will support Unicode locale collating. But that would have +// to bee a too different API then. (package udbm?) + +import ( + "fmt" + "os" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "camlistore.org/third_party/github.com/cznic/exp/lldb" + "camlistore.org/third_party/github.com/cznic/fileutil" +) + +const ( + aCacheSize = 500 + fCacheSize = 500 + sCacheSize = 50 + + rname = "2remove" // Array shredder queue + arraysPrefix = 'A' + filesPrefix = 'F' + systemPrefix = 'S' + + magic = "\x60\xdb\xf1\x1e" +) + +// Test hooks +var ( + compress = true // Dev hook + activeVictors int32 +) + +const ( + stDisabled = iota // stDisabled must be zero + stIdle + stCollecting + stIdleArmed + stCollectingArmed + stCollectingTriggered + stEndUpdateFailed +) + +func init() { + if stDisabled != 0 { + panic("stDisabled != 0") + } +} + +type DB struct { + _root *Array // Root directory, do not access directly + acache treeCache // Arrays cache + acidNest int // Grace period nesting level + acidState int // Grace period FSM state. + acidTimer *time.Timer // Grace period timer + alloc *lldb.Allocator // The machinery. Wraps filer + bkl sync.Mutex // Big Kernel Lock + closeMu sync.Mutex // Close() coordination + closed chan bool + emptySize int64 // Any header size including FLT. + f *os.File // Underlying file. Potentially nil (if filer is lldb.MemFiler) + fcache treeCache // Files cache + filer lldb.Filer // Wraps f + gracePeriod time.Duration // WAL grace period + isMem bool // No signal capture + lastCommitErr error + lock *os.File // The DB file lock + removing map[int64]bool // BTrees being removed + removingMu sync.Mutex // Remove() coordination + scache treeCache // System arrays cache + stop chan int // Remove() coordination + wg sync.WaitGroup // Remove() coordination + xact bool // Updates are made within automatic structural transactions +} + +// Create creates the named DB file mode 0666 (before umask). The file must not +// already exist. If successful, methods on the returned DB can be used for +// I/O; the associated file descriptor has mode os.O_RDWR. If there is an +// error, it will be of type *os.PathError. +// +// For the meaning of opts please see documentation of Options. +func Create(name string, opts *Options) (db *DB, err error) { + f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) + if err != nil { + return + } + + return create(f, lldb.NewSimpleFileFiler(f), opts, false) +} + +func create(f *os.File, filer lldb.Filer, opts *Options, isMem bool) (db *DB, err error) { + defer func() { + lock := opts.lock + if err != nil && lock != nil { + n := lock.Name() + lock.Close() + os.Remove(n) + db = nil + } + }() + + if err = opts.check(filer.Name(), true, !isMem); err != nil { + return + } + + b := [16]byte{byte(magic[0]), byte(magic[1]), byte(magic[2]), byte(magic[3]), 0x00} // ver 0x00 + if n, err := filer.WriteAt(b[:], 0); n != 16 { + return nil, &os.PathError{Op: "dbm.Create.WriteAt", Path: filer.Name(), Err: err} + } + + db = &DB{emptySize: 128, f: f, lock: opts.lock, closed: make(chan bool)} + + if filer, err = opts.acidFiler(db, filer); err != nil { + return nil, err + } + + db.filer = filer + if err = filer.BeginUpdate(); err != nil { + return + } + + defer func() { + if e := filer.EndUpdate(); e != nil { + if err == nil { + err = e + } + } + }() + + if db.alloc, err = lldb.NewAllocator(lldb.NewInnerFiler(filer, 16), &lldb.Options{}); err != nil { + return nil, &os.PathError{Op: "dbm.Create", Path: filer.Name(), Err: err} + } + + db.alloc.Compress = compress + db.isMem = isMem + return db, db.boot() +} + +// CreateMem creates an in-memory DB not backed by a disk file. Memory DBs are +// resource limited as they are completely held in memory and are not +// automatically persisted. +// +// For the meaning of opts please see documentation of Options. +func CreateMem(opts *Options) (db *DB, err error) { + f := lldb.NewMemFiler() + if opts.ACID == ACIDFull { + opts.ACID = ACIDTransactions + } + return create(nil, f, opts, true) +} + +// CreateTemp creates a new temporary DB in the directory dir with a basename +// beginning with prefix and name ending in suffix. If dir is the empty string, +// CreateTemp uses the default directory for temporary files (see os.TempDir). +// Multiple programs calling CreateTemp simultaneously will not choose the same +// file name for the DB. The caller can use Name() to find the pathname of the +// DB file. It is the caller's responsibility to remove the file when no longer +// needed. +// +// For the meaning of opts please see documentation of Options. +func CreateTemp(dir, prefix, suffix string, opts *Options) (db *DB, err error) { + f, err := fileutil.TempFile(dir, prefix, suffix) + if err != nil { + return + } + + return create(f, lldb.NewSimpleFileFiler(f), opts, false) +} + +// Open opens the named DB file for reading/writing. If successful, methods on +// the returned DB can be used for I/O; the associated file descriptor has mode +// os.O_RDWR. If there is an error, it will be of type *os.PathError. +// +// For the meaning of opts please see documentation of Options. +func Open(name string, opts *Options) (db *DB, err error) { + defer func() { + lock := opts.lock + if err != nil && lock != nil { + n := lock.Name() + lock.Close() + os.Remove(n) + db = nil + } + if err != nil { + if db != nil { + db.Close() + db = nil + } + } + }() + + if err = opts.check(name, false, true); err != nil { + return + } + + f, err := os.OpenFile(name, os.O_RDWR, 0666) + if err != nil { + return + } + + filer := lldb.Filer(lldb.NewSimpleFileFiler(f)) + sz, err := filer.Size() + if err != nil { + return + } + + if sz%16 != 0 { + return nil, &os.PathError{Op: "dbm.Open:", Path: name, Err: fmt.Errorf("file size %d(%#x) is not 0 (mod 16)", sz, sz)} + } + + var b [16]byte + if n, err := filer.ReadAt(b[:], 0); n != 16 || err != nil { + return nil, &os.PathError{Op: "dbm.Open.ReadAt", Path: name, Err: err} + } + + var h header + if err = h.rd(b[:]); err != nil { + return nil, &os.PathError{Op: "dbm.Open:validate header", Path: name, Err: err} + } + + db = &DB{f: f, lock: opts.lock, closed: make(chan bool)} + if filer, err = opts.acidFiler(db, filer); err != nil { + return nil, err + } + + db.filer = filer + switch h.ver { + default: + return nil, &os.PathError{Op: "dbm.Open", Path: name, Err: fmt.Errorf("unknown dbm file format version %#x", h.ver)} + case 0x00: + return open00(name, db) + } + +} + +// Close closes the DB, rendering it unusable for I/O. It returns an error, if +// any. Failing to call Close before exiting a program can render the DB +// unusable or, in case of using WAL/2PC, the last committed transaction may +// get lost. +// +// Close is idempotent. +func (db *DB) Close() (err error) { + if err = db.enter(); err != nil { + return + } + + doLeave := true + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + if doLeave { + db.leave(&err) + } + }() + + db.closeMu.Lock() + defer db.closeMu.Unlock() + + select { + case _ = <-db.closed: + return + default: + } + + defer close(db.closed) + + if db.acidTimer != nil { + db.acidTimer.Stop() + } + + var e error + for db.acidNest > 0 { + db.acidNest-- + if err := db.filer.EndUpdate(); err != nil { + e = err + } + } + err = e + + doLeave = false + e = db.leave(&err) + if err = db.close(); err == nil { + err = e + } + + if lock := db.lock; lock != nil { + n := lock.Name() + e1 := lock.Close() + db.lock = nil + e2 := os.Remove(n) + if err == nil { + err = e1 + } + if err == nil { + err = e2 + } + } + return +} + +func (db *DB) close() (err error) { + if db.stop != nil { + close(db.stop) + db.wg.Wait() + db.stop = nil + } + + if db.f == nil { // lldb.MemFiler + return + } + + err = db.filer.Sync() + if err2 := db.filer.Close(); err2 != nil && err == nil { + err = err2 + } + return +} + +func (db *DB) root() (r *Array, err error) { + if r = db._root; r != nil { + return + } + + sz, err := db.filer.Size() + if err != nil { + return + } + + switch { + case sz < db.emptySize: + panic(fmt.Errorf("internal error: %d", sz)) + case sz == db.emptySize: + tree, h, err := lldb.CreateBTree(db.alloc, collate) + if err != nil { + return nil, err + } + + if h != 1 { + panic("internal error") + } + + r = &Array{db, tree, nil, "", 0} + db._root = r + return r, nil + default: + tree, err := lldb.OpenBTree(db.alloc, collate, 1) + if err != nil { + return nil, err + } + + r = &Array{db, tree, nil, "", 0} + db._root = r + return r, nil + } +} + +// Array returns an Array associated with a subtree of array, determined by +// subscripts. +func (db *DB) Array(array string, subscripts ...interface{}) (a Array, err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + return db.array_(false, array, subscripts...) +} + +func (db *DB) array_(canCreate bool, array string, subscripts ...interface{}) (a Array, err error) { + a.db = db + if a, err = a.array(subscripts...); err != nil { + return + } + a.tree, err = db.acache.getTree(db, arraysPrefix, array, canCreate, aCacheSize) + a.name = array + a.namespace = arraysPrefix + return +} + +func (db *DB) sysArray(canCreate bool, array string) (a Array, err error) { + a.db = db + a.tree, err = db.scache.getTree(db, systemPrefix, array, canCreate, sCacheSize) + a.name = array + a.namespace = systemPrefix + return a, err +} + +func (db *DB) fileArray(canCreate bool, name string) (f File, err error) { + var a Array + a.db = db + a.tree, err = db.fcache.getTree(db, filesPrefix, name, canCreate, fCacheSize) + a.name = name + a.namespace = filesPrefix + return File(a), err +} + +// Set sets the value at subscripts in array. Any previous value, if existed, +// is overwritten by the new one. +func (db *DB) Set(value interface{}, array string, subscripts ...interface{}) (err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + a, err := db.array_(true, array, subscripts...) + if err != nil { + return + } + + return a.set(value) +} + +// Get returns the value at subscripts in array, or nil if no such value +// exists. +func (db *DB) Get(array string, subscripts ...interface{}) (value interface{}, err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + a, err := db.array_(false, array, subscripts...) + if a.tree == nil || err != nil { + return + } + + return a.get() +} + +// Slice returns a new Slice of array, with a subscripts range of [from, to]. +// If from is nil it works as 'from lowest existing key'. If to is nil it +// works as 'to highest existing key'. +func (db *DB) Slice(array string, subscripts, from, to []interface{}) (s *Slice, err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + a, err := db.array_(false, array, subscripts...) + if a.tree == nil || err != nil { + return + } + + return a.Slice(from, to) +} + +// Delete deletes the value at subscripts in array. +func (db *DB) Delete(array string, subscripts ...interface{}) (err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + a, err := db.array_(false, array, subscripts...) + if a.tree == nil || err != nil { + return + } + + return a.delete(subscripts...) +} + +// Clear empties the subtree at subscripts in array. +func (db *DB) Clear(array string, subscripts ...interface{}) (err error) { + if err = db.enter(); err != nil { + return + } + + doLeave := true + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + if doLeave { + db.leave(&err) + } + }() + + a, err := db.array_(false, array, subscripts...) + if a.tree == nil || err != nil { + return + } + + doLeave = false + e := db.leave(&err) + if err = a.Clear(); err == nil { + err = e + } + return +} + +// Name returns the name of the DB file. +func (db *DB) Name() string { + return db.filer.Name() +} + +// Size returns the size of the DB file. +func (db *DB) Size() (sz int64, err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + return db.filer.Size() +} + +func (db *DB) setRemoving(h int64, flag bool) (r bool) { + db.removingMu.Lock() + defer db.removingMu.Unlock() + + if db.removing == nil { + db.removing = map[int64]bool{h: flag} + return + } + + r = db.removing[h] + switch flag { + case true: + db.removing[h] = flag + case false: + delete(db.removing, h) + } + return +} + +// RemoveArray removes array from the DB. +func (db *DB) RemoveArray(array string) (err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + return db.removeArray(arraysPrefix, array) +} + +// RemoveFile removes file from the DB. +func (db *DB) RemoveFile(file string) (err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + return db.removeArray(filesPrefix, file) +} + +func (db *DB) removeArray(prefix int, array string) (err error) { + if db.stop == nil { + db.stop = make(chan int) + } + + t, err := db.acache.getTree(db, prefix, array, false, aCacheSize) + if t == nil || err != nil { + return + } + + h := t.Handle() + if db.setRemoving(h, true) { + return + } + + delete(db.acache, array) + + root, err := db.root() + if err != nil { + return + } + + removes, err := db.sysArray(true, rname) + if err != nil { + return + } + + if err = removes.set(nil, h); err != nil { + return + } + + if err = root.delete(prefix, array); err != nil { + return + } + + db.wg.Add(1) + go db.victor(removes, h) + + return +} + +func (db *DB) boot() (err error) { + const tmp = "/tmp/" + + aa, err := db.Arrays() + if err != nil { + return + } + + s, err := aa.Slice([]interface{}{tmp}, nil) + if err = noEof(err); err != nil { + return + } + + s.Do(func(subscripts, value []interface{}) (r bool, err error) { + k := subscripts[0].(string) + if !strings.HasPrefix(k, tmp) { + return false, nil + } + + return true, db.RemoveArray(k) + + }) + + ff, err := db.Files() + if err != nil { + return + } + + s, err = ff.Slice([]interface{}{tmp}, nil) + if err = noEof(err); err != nil { + return + } + + s.Do(func(subscripts, value []interface{}) (r bool, err error) { + k := subscripts[0].(string) + if !strings.HasPrefix(k, tmp) { + return false, nil + } + + return true, db.RemoveFile(k) + + }) + + removes, err := db.sysArray(false, rname) + if removes.tree == nil || err != nil { + return + } + + s, err = removes.Slice(nil, nil) + if err = noEof(err); err != nil { + return + } + + var a []int64 + s.Do(func(subscripts, value []interface{}) (r bool, err error) { + r = true + switch { + case len(subscripts) == 1: + h, ok := subscripts[0].(int64) + if ok { + a = append(a, h) + return + } + + fallthrough + default: + err = removes.Delete(subscripts) + return + } + }) + + if db.stop == nil { + db.stop = make(chan int) + } + + for _, h := range a { + if db.setRemoving(h, true) { + continue + } + + db.wg.Add(1) + go db.victor(removes, h) + } + return +} + +func (db *DB) victor(removes Array, h int64) { + atomic.AddInt32(&activeVictors, 1) + var err error + var finished bool + defer func() { + if finished { + func() { + db.enter() + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + lldb.RemoveBTree(db.alloc, h) + removes.delete(h) + db.setRemoving(h, false) + }() + } + db.wg.Done() + atomic.AddInt32(&activeVictors, -1) + }() + + db.enter() + + doLeave := true + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + if doLeave { + db.leave(&err) + } + }() + + t, err := lldb.OpenBTree(db.alloc, collate, h) + if err != nil { + finished = true + return + } + + doLeave = false + if db.leave(&err) != nil { + return + } + + for { + runtime.Gosched() + select { + case _, ok := <-db.stop: + if !ok { + return + } + default: + } + + db.enter() + doLeave = true + if finished, err = t.DeleteAny(); finished || err != nil { + return + } + + doLeave = false + if db.leave(&err) != nil { + return + } + } +} + +// Arrays returns a read-only meta array which registers other arrays by name +// as its keys. The associated values are meaningless but non-nil if the value +// exists. +func (db *DB) Arrays() (a Array, err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + p, err := db.root() + if err != nil { + return a, err + } + + return p.array(arraysPrefix) +} + +// Files returns a read-only meta array which registers all Files in the DB by +// name as its keys. The associated values are meaningless but non-nil if the +// value exists. +func (db *DB) Files() (a Array, err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + p, err := db.root() + if err != nil { + return a, err + } + + return p.array(filesPrefix) +} + +func (db *DB) enter() (err error) { + db.bkl.Lock() + switch db.acidState { + default: + panic("internal error") + case stDisabled: + // nop + case stIdle: + if err = db.filer.BeginUpdate(); err != nil { + return + } + + db.acidNest = 1 + db.acidTimer = time.AfterFunc(db.gracePeriod, db.timeout) + db.acidState = stCollecting + case stCollecting: + db.acidNest++ + case stIdleArmed: + db.acidNest = 1 + db.acidState = stCollectingArmed + case stCollectingArmed: + db.acidNest++ + case stCollectingTriggered: + db.acidNest++ + case stEndUpdateFailed: + return db.leave(&err) + } + + if db.xact { + err = db.filer.BeginUpdate() + } + return +} + +func (db *DB) leave(err *error) error { + switch db.acidState { + default: + panic("internal error") + case stDisabled: + // nop + case stIdle: + panic("internal error") + case stCollecting: + db.acidNest-- + if db.acidNest == 0 { + db.acidState = stIdleArmed + } + case stIdleArmed: + panic("internal error") + case stCollectingArmed: + db.acidNest-- + if db.acidNest == 0 { + db.acidState = stIdleArmed + } + case stCollectingTriggered: + db.acidNest-- + if db.acidNest == 0 { + if e := db.filer.EndUpdate(); e != nil && err == nil { + *err = e + } + db.acidState = stIdle + } + case stEndUpdateFailed: + db.bkl.Unlock() + return fmt.Errorf("Last transaction commit failed: %v", db.lastCommitErr) + } + + if db.xact { + switch { + case *err != nil: + db.filer.Rollback() // return the original, input error + default: + *err = db.filer.EndUpdate() + if *err != nil { + db.acidState = stEndUpdateFailed + db.lastCommitErr = *err + } + } + } + db.bkl.Unlock() + return *err +} + +func (db *DB) timeout() { + db.bkl.Lock() + defer db.bkl.Unlock() + + select { + case _ = <-db.closed: + return + default: + } + + switch db.acidState { + default: + panic("internal error") + case stIdle: + panic("internal error") + case stCollecting: + db.acidState = stCollectingTriggered + case stIdleArmed: + if err := db.filer.EndUpdate(); err != nil { // If EndUpdate fails, no WAL was written (automatic Rollback) + db.acidState = stEndUpdateFailed + db.lastCommitErr = err + return + } + + db.acidState = stIdle + case stCollectingArmed: + db.acidState = stCollectingTriggered + case stCollectingTriggered: + panic("internal error") + } +} + +// Sync commits the current contents of the DB file to stable storage. +// Typically, this means flushing the file system's in-memory copy of recently +// written data to disk. +// +// NOTE: There's no good reason to invoke Sync if db uses 2PC/WAL (see +// Options.ACID). +func (db *DB) Sync() (err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + return db.filer.Sync() +} + +// File returns a File associated with name. +func (db *DB) File(name string) (f File, err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + f, err = db.fileArray(false, name) + if err != nil { + panic(fmt.Errorf("internal error: \"%v\"", err)) + } + + return +} + +// Inc atomically increments the value at subscripts of array by delta and +// returns the new value. If the value doesn't exists before calling Inc or if +// the value is not an integer then the value is considered to be zero. +func (db *DB) Inc(delta int64, array string, subscripts ...interface{}) (val int64, err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + a, err := db.array_(true, array, subscripts...) + if err != nil { + return + } + + return a.inc(delta) +} + +// BeginUpdate increments a "nesting" counter (initially zero). Every +// call to BeginUpdate must be eventually "balanced" by exactly one of +// EndUpdate or Rollback. Calls to BeginUpdate may nest. +func (db *DB) BeginUpdate() (err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + return db.filer.BeginUpdate() +} + +// EndUpdate decrements the "nesting" counter. If it's zero after that then +// assume the "storage" has reached structural integrity (after a batch of +// partial updates). Invocation of an unbalanced EndUpdate is an error. +func (db *DB) EndUpdate() (err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + return db.filer.EndUpdate() +} + +// Rollback cancels and undoes the innermost pending update level (if +// transactions are eanbled). Rollback decrements the "nesting" counter. +// Invocation of an unbalanced Rollback is an error. +func (db *DB) Rollback() (err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + return db.filer.Rollback() +} + +// Verify attempts to find any structural errors in DB wrt the organization of +// it as defined by lldb.Allocator. 'bitmap' is a scratch pad for necessary +// bookkeeping and will grow to at most to DB size/128 (0,78%). Any problems +// found are reported to 'log' except non verify related errors like disk read +// fails etc. If 'log' returns false or the error doesn't allow to (reliably) +// continue, the verification process is stopped and an error is returned from +// the Verify function. Passing a nil log works like providing a log function +// always returning false. Any non-structural errors, like for instance Filer +// read errors, are NOT reported to 'log', but returned as the Verify's return +// value, because Verify cannot proceed in such cases. Verify returns nil only +// if it fully completed verifying DB without detecting any error. +// +// It is recommended to limit the number reported problems by returning false +// from 'log' after reaching some limit. Huge and corrupted DB can produce an +// overwhelming error report dataset. +// +// The verifying process will scan the whole DB at least 3 times (a trade +// between processing space and time consumed). It doesn't read the content of +// free blocks above the head/tail info bytes. If the 3rd phase detects lost +// free space, then a 4th scan (a faster one) is performed to precisely report +// all of them. +// +// Statistics are returned via 'stats' if non nil. The statistics are valid +// only if Verify succeeded, ie. it didn't reported anything to log and it +// returned a nil error. +func (db *DB) Verify(log func(error) bool, stats *lldb.AllocStats) (err error) { + bitmapf, err := fileutil.TempFile(".", "verifier", ".tmp") + if err != nil { + return + } + + defer func() { + tn := bitmapf.Name() + bitmapf.Close() + os.Remove(tn) + }() + + bitmap := lldb.NewSimpleFileFiler(bitmapf) + + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + return db.alloc.Verify(bitmap, log, stats) +} + +// PeakWALSize reports the maximum size WAL has ever used. +func (db *DB) PeakWALSize() int64 { + af, ok := db.filer.(*lldb.ACIDFiler0) + if !ok { + return 0 + } + + return af.PeakWALSize() +} + +// IsMem reports whether db is backed by memory only. +func (db *DB) IsMem() bool { + return db.isMem +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/doc.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/doc.go new file mode 100644 index 00000000..89107e32 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/doc.go @@ -0,0 +1,303 @@ +// Copyright 2014 The dbm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + +Package dbm (experimental/WIP) implements a simple database engine, a hybrid of +a hierarchical[1] and/or a key-value one[2]. + +A dbm database stores arbitrary data in named multidimensional arrays and/or +named flat Files. It aims more for small DB footprint rather than for access +speed. Dbm was written for a project running on an embedded ARM Linux system. + +Experimental release notes + +This is an experimental release. However, it is now nearly feature complete. + +Key collating respecting client supplied locale is not yet implemented. Planned +when exp/locale materializes. Because of this, the dbm API doesn't yet allow +to really define other than default collating of keys. At least some sort +of client defined collating will be incorporated after Go 1.1 release. + +No serious attempts to profile and/or improve performance were made (TODO). + + WARNING: THE DBM API IS SUBJECT TO CHANGE. + WARNING: THE DBM FILE FORMAT IS SUBJECT TO CHANGE. + WARNING: NOT READY FOR USE IN PRODUCTION. + +Targeted use cases + +ATM using disk based dbm DBs with 2PC/WAL/recovery enabled is supposed to be +safe (modulo any unknown bugs). + +Concurrent access + +All of the dbm API is (intended to be) safe for concurrent use by multiple +goroutines. However, data races stemming from, for example, one goroutine +seeing a value in a tree and another deleting it before the first one gets back +to process it, must be handled outside of dbm. Still any CRUD operations, as +in this date race example, are atomic and safe per se and will not corrupt the +database structural integrity. Non coordinated updates of a DB may corrupt its +semantic and/or schema integrity, though. Failed DB updates performed not +within a structural transaction may corrupt the DB. + +Also please note that passing racy arguments to an otherwise concurrent safe +API makes that API act racy as well. + +Scalars + +Keys and values of an Array are multi-valued and every value must be a +"scalar". Types called "scalar" are: + + nil (the typeless one) + bool + all integral types: [u]int8, [u]int16, [u]int32, [u]int, [u]int64 + all floating point types: float32, float64 + all complex types: complex64, complex128 + []byte (64kB max) + string (64kb max) + +Collating + +Values in an Array are always ordered in the collating order of the respective +keys. For details about the collating order please see lldb.Collate. There's a +plan for a mechanism respecting user-supplied locale applied to string +collating, but the required API differences call for a whole different package +perhaps emerging in the future. + +Multidimensional sparse arrays + +A multidimensional array can have many subscripts. Each subscript must be one +of the bellow types: + + nil (typeless) + bool + int int8 int16 int32 int64 + uint byte uint8 uint16 uint32 uint64 + float32 float64 + complex64 complex128 + []byte + string + +The "outer" ordering is: nil, bool, number, []byte, string. IOW, nil is +"smaller" than anything else except other nil, numbers collate before []byte, +[]byte collate before strings, etc. + +By using single item subscripts the multidimensional array "degrades" to a +plain key-value map. As the arrays are named, both models can coexist in the +same database. Dbm arrays are modeled after those of MUMPS[3], so the acronym +is for DB/M instead of Data Base Manager[4]. For a more detailed discussion of +multidimensional arrays please see [5]. Some examples from the same source +rewritten and/or modified for dbm. Note: Error values and error checking is not +present in the bellow examples. + +This is a MUMPS statement + ^Stock("slip dress", 4, "blue", "floral") = 3 + +This is its dbm equivalent + db.Set(3, "Stock", "slip dress", 4, "blue", "floral") + +Dump of "Stock" + "slip dress", 4, "blue", "floral" → 3 + ---- + db.Get("Stock", "slip dress", 4, "blue", "floral") → 3 + +Or for the same effect: + stock := db.Array("Stock") + stock.Set(3, "slip dress", 4, "blue", "floral") + +Dump of "Stock" + "slip dress", 4, "blue", "floral" → 3 + ---- + db.Get("Stock", "slip dress", 4, "blue", "floral") → 3 + stock.Get("slip dress", 4, "blue", "floral") → 3 + +Or + blueDress := db.Array("Stock", "slip dress", 4, "blue") + blueDress.Set(3, "floral") + +Dump of "Stock" + "slip dress", 4, "blue", "floral" → 3 + ---- + db.Get("Stock", "slip dress", 4, "blue", "floral") → 3 + blueDress.Get("floral") → 3 + +Similarly: + invoiceNum := 314159 + customer := "Google" + when := time.Now().UnixNano() + parts := []struct{ num, qty, price int }{ + {100001, 2, 300}, + {100004, 5, 600}, + } + + invoice := db.Array("Invoice") + invoice.Set(when, invoiceNum, "Date") + invoice.Set(customer, invoiceNum, "Customer") + invoice.Set(len(parts), invoiceNum, "Items") // # of Items in the invoice + for i, part := range parts { + invoice.Set(part.num, invoiceNum, "Items", i, "Part") + invoice.Set(part.qty, invoiceNum, "Items", i, "Quantity") + invoice.Set(part.price, invoiceNum, "Items", i, "Price") + } + +Dump of "Invoice" + 314159, "Customer" → "Google" + 314159, "Date" → 1363864307518685049 + 314159, "Items" → 2 + 314159, "Items", 0, "Part" → 100001 + 314159, "Items", 0, "Price" → 300 + 314159, "Items", 0, "Quantity" → 2 + 314159, "Items", 1, "Part" → 100004 + 314159, "Items", 1, "Price" → 600 + 314159, "Items", 1, "Quantity" → 5 + ---- + db.Get("Invoice", invoiceNum, "Customer") → customer + db.Get("Invoice", invoiceNum, "Date") → when + ... + invoice.Get(invoiceNum, "Customer") → customer + invoice.Get(invoiceNum, "Date") → time.Then().UnixName + invoice.Get(invoiceNum, "Items") → len(parts) + invoice.Get(invoiceNum, "Items", 0, "Part") → parts[0].part + invoice.Get(invoiceNum, "Items", 0, "Quantity") → parts[0].qty + invoice.Get(invoiceNum, "Items", 0, "Price") → parts[0].price + invoice.Get(invoiceNum, "Items", 1, "Part") → parts[1].part + ... + +Or for the same effect + invoice := db.Array("Invoice", invoiceNum) + invoice.Set(when, "Date") + invoice.Set(customer, "Customer") + items := invoice.Array("Items") + items.Set(len(parts)) // # of Items in the invoice + for i, part := range parts { + items.Set(part.num, i, "Part") + items.Set(part.qty, i, "Quantity") + items.Set(part.price, i, "Price") + } + +Dump of "Invoice" + 314159, "Customer" → "Google" + 314159, "Date" → 1363865032036475263 + 314159, "Items" → 2 + 314159, "Items", 0, "Part" → 100001 + 314159, "Items", 0, "Price" → 300 + 314159, "Items", 0, "Quantity" → 2 + 314159, "Items", 1, "Part" → 100004 + 314159, "Items", 1, "Price" → 600 + 314159, "Items", 1, "Quantity" → 5 + ---- + db.Get("Invoice", invoiceNum, "Customer") → customer + ... + invoice.Get("Customer") → customer + invoice.Get("Date") → time.Then().UnixName + items.Get() → len(parts) + items.Get(0, "Part") → parts[0].part + items.Get(0, "Quantity") → parts[0].qty + items.Get(0, "Price") → parts[0].price + items.Get(1, "Part") → parts[1].part + ... + +Values are not limited to a single item. The DB "schema" used above can be +changed to use a "record" for the invoice item details: + invoice := db.Array("Invoice", invoiceNum) + invoice.Set(when, "Date") + invoice.Set(customer, "Customer") + items := invoice.Array("Items") + items.Set(len(parts)) // # of Items in the invoice + for i, part := range parts { + items.Set([]interface{}{part.num, part.qty, part.price}, i) + } + +Dump of "Invoice" + 314159, "Customer" → "Google" + 314159, "Date" → 1363865958506983228 + 314159, "Items" → 2 + 314159, "Items", 0 → []interface{100001, 2, 300} + 314159, "Items", 1 → []interface{100004, 5, 600} + ---- + items.Get() → len(parts) + items.Get(0) → []interface{parts[0].num, parts[0].qty, parts[O].price} + items.Get(1) → []interface{parts[1].num, parts[1].qty, parts[1].price} + ... + + +Naming issues + +Array and File names can by any string value, including en empty string or a +non UTF-8 string. Names are limited in size to approximately 64 kB. For +compatibility with future dbm versions and/or with other dbm based products, it +is recommended to use only array names which are a valid and exported[6] Go +identifier or rooted names. + +Rooted names + +Rooted name is a pathname beginning in a slash ('/'). The base name of such +path should be (by recommendation) again a valid and exported Go identifier. + +Name spaces + +Arrays namespace and Files namespace are disjoint. Entities in any namespace +having a rooted name with prefix '/tmp/' are removed from the DB automatically +on Open. + +Access denied errors + +Attemtps to mutate Arrays or Files or any other forbidden action return +lldb.ErrPERM. + +ACID Finite State Machine + +For Options.ACID == ACIDFull and GracePeriod != 0 the state transition table +for transaction collecting is: + + +------------+-----------------+---------------+-----------------+ + |\ Event | | | | + | \--------\ | enter | leave | timeout | + | State \| | | | + +------------+-----------------+---------------+-----------------+ + | idle | BeginUpdate | panic | panic | + | | nest = 1 | | | + | | start timer | | | + | | S = collecting | | | + +------------+-----------------+---------------+-----------------+ + | collecting | nest++ | nest-- | S = collecting- | + | | | if nest == 0 | triggered | + | | | S = idle- | | + | | | armed | | + +------------+-----------------+---------------+-----------------+ + | idle- | nest = 1 | panic | EndUpdate | + | aremd | S = collecting- | | S = idle | + | | armed | | | + +------------+-----------------+---------------+-----------------+ + | collecting-| nest++ | nest-- | S = collecting- | + | armed | | if nest == 0 | triggered | + | | | S = idle- | | + | | | armed | | + +------------+-----------------+---------------+-----------------+ + | collecting-| nest++ | nest-- | panic | + | triggered | | if nest == 0 | | + | | | EndUpdate | | + | | | S = idle | | + +------------+-----------------+---------------+-----------------+ + + 'enter': Invoking any DB state mutating operation. + 'leave': Returning from any DB state mutating operation. + +NOTE: The collecting "interval" can be modified by invoking db.BeginUpdate and +db.EndUpdate. + +References + +Links fom the above godocs. + + [1]: http://en.wikipedia.org/wiki/Hierarchical_database_model + [2]: http://en.wikipedia.org/wiki/NoSQL#Key.E2.80.93value_store + [3]: http://en.wikipedia.org/wiki/MUMPS + [4]: http://en.wikipedia.org/wiki/Dbm + [5]: http://www.intersystems.com/cache/technology/techguide/cache_tech-guide_02.html + [6]: http://golang.org/pkg/go/ast/#IsExported + +*/ +package dbm diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/etc.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/etc.go new file mode 100644 index 00000000..42b1d147 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/etc.go @@ -0,0 +1,156 @@ +// Copyright 2014 The dbm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dbm + +import ( + "bytes" + "fmt" + + "camlistore.org/third_party/github.com/cznic/exp/lldb" + "camlistore.org/third_party/github.com/cznic/fileutil" + "camlistore.org/third_party/github.com/cznic/mathutil" +) + +type header struct { + magic []byte + ver byte + reserved []byte +} + +func (h *header) rd(b []byte) error { + if len(b) != 16 { + panic("internal error") + } + + if h.magic = b[:4]; bytes.Compare(h.magic, []byte(magic)) != 0 { + return fmt.Errorf("Unknown file format") + } + + b = b[4:] + h.ver = b[0] + h.reserved = b[1:] + return nil +} + +// Get a 7B int64 from b +func b2h(b []byte) (h int64) { + for _, v := range b[:7] { + h = h<<8 | int64(v) + } + return +} + +// Put a 7B int64 into b +func h2b(b []byte, h int64) []byte { + for i := range b[:7] { + b[i], h = byte(h>>48), h<<8 + } + return b +} + +func collate(a, b []byte) (r int) { + da, err := lldb.DecodeScalars(a) + if err != nil { + panic(err) + } + + db, err := lldb.DecodeScalars(b) + if err != nil { + panic(err) + } + + r, err = lldb.Collate(da, db, nil) + if err != nil { + panic(err) + } + + return +} + +type treeCache map[string]*lldb.BTree + +func (t *treeCache) get() (r map[string]*lldb.BTree) { + if r = *t; r != nil { + return + } + + *t = map[string]*lldb.BTree{} + return *t +} + +func (t *treeCache) getTree(db *DB, prefix int, name string, canCreate bool, cacheSize int) (r *lldb.BTree, err error) { + m := t.get() + r, ok := m[name] + if ok { + return + } + + root, err := db.root() + if err != nil { + return + } + + val, err := root.get(prefix, name) + if err != nil { + return + } + + switch x := val.(type) { + case nil: + if !canCreate { + return + } + + var h int64 + r, h, err = lldb.CreateBTree(db.alloc, collate) + if err != nil { + return nil, err + } + + if err = root.set(h, prefix, name); err != nil { + return nil, err + } + case int64: + if r, err = lldb.OpenBTree(db.alloc, collate, x); err != nil { + return nil, err + } + default: + return nil, &lldb.ErrINVAL{Src: "corrupted root directory value for", Val: fmt.Sprintf("%q, %q", prefix, name)} + } + + if len(m) > cacheSize { + i, j, n := 0, cacheSize/2, mathutil.Min(cacheSize/20, 10) + loop: + for k := range m { + if i++; i >= j { + delete(m, k) + if n == 0 { + break loop + } + + n-- + } + } + } + + m[name] = r + return +} + +func encVal(val interface{}) (r []byte, err error) { + switch x := val.(type) { + case []interface{}: + return lldb.EncodeScalars(x...) + default: + return lldb.EncodeScalars(x) + } +} + +func noEof(e error) (err error) { + if !fileutil.IsEOF(e) { + err = e + } + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/file.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/file.go new file mode 100644 index 00000000..a52c4903 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/file.go @@ -0,0 +1,452 @@ +// Copyright 2014 The dbm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dbm + +import ( + "bytes" + "fmt" + "io" + + "camlistore.org/third_party/github.com/cznic/exp/lldb" + "camlistore.org/third_party/github.com/cznic/fileutil" + "camlistore.org/third_party/github.com/cznic/mathutil" +) + +/* + +File benchmarks +--------------- +pgBits: 10 +BenchmarkFileWrSeq 100 63036813 ns/op 0.51 MB/s +BenchmarkFileRdSeq 100 26363452 ns/op 1.21 MB/s + +pgBits: 11 +BenchmarkFileWrSeq 100 26437121 ns/op 1.21 MB/s +BenchmarkFileRdSeq 200 13490639 ns/op 2.37 MB/s + +pgBits: 12 +BenchmarkFileWrSeq 200 17363191 ns/op 1.84 MB/s +BenchmarkFileRdSeq 500 8960257 ns/op 3.57 MB/s + +pgBits: 13 +BenchmarkFileWrSeq 500 10005011 ns/op 3.20 MB/s +BenchmarkFileRdSeq 1000 3328100 ns/op 9.62 MB/s + +pgBits: 14 +BenchmarkFileWrSeq 500 6414419 ns/op 4.99 MB/s +BenchmarkFileRdSeq 1000 1877981 ns/op 17.04 MB/s + +pgBits: 15 +BenchmarkFileWrSeq 500 4991456 ns/op 6.41 MB/s +BenchmarkFileRdSeq 1000 1144174 ns/op 27.97 MB/s + +pgBits: 16 +BenchmarkFileWrSeq 500 5019710 ns/op 6.37 MB/s +BenchmarkFileRdSeq 2000 1003166 ns/op 31.90 MB/s + +Bits benchmarks +--------------- +pgBits: 7 +BenchmarkBitsOn16 2000 3598167 ns/op 0.56 kB/s +BenchmarkBitsOn1024 1000 7736769 ns/op 16.54 kB/s +BenchmarkBitsOn65536 100 123242143 ns/op 66.47 kB/s + +pgBits: 8 +BenchmarkBitsOn16 2000 3735512 ns/op 0.54 kB/s +BenchmarkBitsOn1024 1000 5131015 ns/op 24.95 kB/s +BenchmarkBitsOn65536 100 50443447 ns/op 162.40 kB/s + +pgBits: 9 +BenchmarkBitsOn16 1000 2681974 ns/op 0.75 kB/s +BenchmarkBitsOn1024 2000 5708185 ns/op 22.42 kB/s +BenchmarkBitsOn65536 100 25916396 ns/op 316.09 kB/s + +pgBits: 10 +BenchmarkBitsOn16 2000 3931464 ns/op 0.51 kB/s +BenchmarkBitsOn1024 2000 4757425 ns/op 26.91 kB/s +BenchmarkBitsOn65536 200 14795335 ns/op 553.69 kB/s + +pgBits: 11 +BenchmarkBitsOn16 2000 3917548 ns/op 0.51 kB/s +BenchmarkBitsOn1024 2000 4294720 ns/op 29.80 kB/s +BenchmarkBitsOn65536 500 12468406 ns/op 657.02 kB/s + +pgBits: 12 +BenchmarkBitsOn16 1000 2883289 ns/op 0.69 kB/s +BenchmarkBitsOn1024 1000 3094400 ns/op 41.37 kB/s +BenchmarkBitsOn65536 500 8869794 ns/op 923.58 kB/s + +pgBits: 13 +BenchmarkBitsOn16 1000 3216570 ns/op 0.62 kB/s +BenchmarkBitsOn1024 1000 3329923 ns/op 38.44 kB/s +BenchmarkBitsOn65536 500 7135497 ns/op 1148.06 kB/s + +pgBits: 14 +BenchmarkBitsOn16 1000 3883990 ns/op 0.51 kB/s +BenchmarkBitsOn1024 1000 3828543 ns/op 33.43 kB/s +BenchmarkBitsOn65536 500 5282395 ns/op 1550.81 kB/s + +pgBits: 15 +BenchmarkBitsOn16 500 4054525 ns/op 0.49 kB/s +BenchmarkBitsOn1024 500 4126241 ns/op 31.02 kB/s +BenchmarkBitsOn65536 500 5782308 ns/op 1416.74 kB/s + +pgBits: 16 +BenchmarkBitsOn16 500 6809287 ns/op 0.29 kB/s +BenchmarkBitsOn1024 500 6941766 ns/op 18.44 kB/s +BenchmarkBitsOn65536 500 8043347 ns/op 1018.48 kB/s +*/ + +const ( + fSize = "size" + + pgBits = 16 + pgSize = 1 << pgBits + pgMask = pgSize - 1 +) + +var ( + bfSize = []byte(fSize) +) + +// File is a database blob with a file-like API. Values in Arrays are limited +// in size to about 64kB. To put a larger value into an Array, the value can be +// written to a File and the path stored in the Array instead of the too big +// value. +type File Array + +// As os.File.Name(). +func (f *File) Name() string { + return f.name +} + +// As os.File.FileInfo().Size(). +func (f *File) Size() (sz int64, err error) { + if err = f.db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + f.db.leave(&err) + }() + + if ok, err := (*Array)(f).validate(false); !ok { + return 0, err + } + + return f.size() +} + +func (f *File) size() (sz int64, err error) { + a := (*Array)(f) + v, err := a.get(fSize) + if err != nil { + return + } + + switch x := v.(type) { + case int64: + return x, nil + case nil: + return + } + + return 0, &lldb.ErrINVAL{Src: "dbm.File.Size", Val: v} +} + +// PunchHole deallocates space inside a "file" in the byte range starting at +// off and continuing for size bytes. The Filer size (as reported by `Size()` +// does not change when hole punching, even when puching the end of a file off. +func (f *File) PunchHole(off, size int64) (err error) { + if off < 0 { + return &lldb.ErrINVAL{Src: f.Name() + ":PunchHole off", Val: off} + } + + fsize, err := f.Size() + if err != nil { + return + } + + if size < 0 || off+size > fsize { + return &lldb.ErrINVAL{Src: f.Name() + ":PunchHole size", Val: size} + } + + first := off >> pgBits + if off&pgMask != 0 { + first++ + } + off += size - 1 + last := off >> pgBits + if off&pgMask != 0 { + last-- + } + if limit := fsize >> pgBits; last > limit { + last = limit + } + for pg := first; pg <= last; pg++ { + if err = (*Array)(f).Delete(pg); err != nil { + return + } + } + return +} + +var zeroPage [pgSize]byte + +// As os.File.ReadAt. +func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + return f.readAt(b, off, false) +} + +func (f *File) readAt(b []byte, off int64, bits bool) (n int, err error) { + var fsize int64 + if !bits { + fsize, err = f.Size() + if err != nil { + return + } + } + + avail := fsize - off + pgI := off >> pgBits + pgO := int(off & pgMask) + rem := len(b) + if !bits && int64(rem) >= avail { + rem = int(avail) + err = io.EOF + } + for rem != 0 && (bits || avail > 0) { + v, err := (*Array)(f).Get(pgI) + if err != nil { + return n, err + } + + pg, _ := v.([]byte) + if len(pg) == 0 { + pg = zeroPage[:] + } + + nc := copy(b[:mathutil.Min(rem, pgSize)], pg[pgO:]) + pgI++ + pgO = 0 + rem -= nc + n += nc + b = b[nc:] + } + return +} + +// As os.File.WriteAt(). +func (f *File) WriteAt(b []byte, off int64) (n int, err error) { + return f.writeAt(b, off, false) +} + +func (f *File) writeAt(b []byte, off int64, bits bool) (n int, err error) { + var fsize int64 + a := (*Array)(f) + if !bits { + fsize, err = f.Size() + if err != nil { + return + } + } + + pgI := off >> pgBits + pgO := int(off & pgMask) + rem := len(b) + var nc int + for rem != 0 { + if pgO == 0 && rem >= pgSize && bytes.Equal(b[:pgSize], zeroPage[:]) { + if err = a.Delete(pgI); err != nil { + return + } + + nc = pgSize + n += nc + } else { + v, err := a.Get(pgI) + if err != nil { + return n, err + } + + pg, _ := v.([]byte) + if len(pg) == 0 { + pg = make([]byte, pgSize) + } + + nc = copy(pg[pgO:], b) + n += nc + if err = a.Set(pg, pgI); err != nil { + return n, err + } + + } + pgI++ + pgO = 0 + rem -= nc + b = b[nc:] + } + if !bits { + if newSize := mathutil.MaxInt64(fsize, off+int64(n)); newSize != fsize { + return n, a.Set(newSize, fSize) + } + } + + return +} + +// As os.File.Truncate(). +func (f *File) Truncate(size int64) (err error) { + if err = f.db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + + a := (*Array)(f) + switch { + case size < 0: + if f.db.leave(&err); err != nil { + return + } + + return &lldb.ErrINVAL{Src: "dbm.File.Truncate size", Val: size} + case size == 0: + if f.db.leave(&err); err != nil { + return + } + + return a.Clear() + } + + if f.db.leave(&err) != nil { + return + } + + first := size >> pgBits + if size&pgMask != 0 { + first++ + } + + fsize, err := f.Size() + if err != nil { + return + } + + last := fsize >> pgBits + if fsize&pgMask != 0 { + last++ + } + for ; first < last; first++ { + if err = a.Delete(first); err != nil { + return + } + } + + return a.Set(size, fSize) +} + +// ReadFrom is a helper to populate File's content from r. 'n' reports the +// number of bytes read from 'r'. +func (f *File) ReadFrom(r io.Reader) (n int64, err error) { + if err = f.Truncate(0); err != nil { + return + } + + var ( + b [pgSize]byte + rn int + off int64 + ) + + var rerr error + for rerr == nil { + if rn, rerr = r.Read(b[:]); rn != 0 { + f.WriteAt(b[:rn], off) + off += int64(rn) + n += int64(rn) + } + } + if !fileutil.IsEOF(rerr) { + err = rerr + } + return +} + +// WriteTo is a helper to copy/persist File's content to w. If w is also +// an io.WriterAt then WriteTo may attempt to _not_ write any big, for some +// value of big, runs of zeros, i.e. it will attempt to punch holes, where +// possible, in `w` if that happens to be a freshly created or to zero length +// truncated OS file. 'n' reports the number of bytes written to 'w'. +func (f *File) WriteTo(w io.Writer) (n int64, err error) { + var ( + b [pgSize]byte + wn, rn int + off int64 + rerr error + ) + + if wa, ok := w.(io.WriterAt); ok { + fsize, err := f.Size() + if err != nil { + return n, err + } + + lastPgI := fsize >> pgBits + for pgI := int64(0); pgI <= lastPgI; pgI++ { + sz := pgSize + if pgI == lastPgI { + sz = int(fsize & pgMask) + } + v, err := (*Array)(f).Get(pgI) + if err != nil { + return n, err + } + + pg, _ := v.([]byte) + if len(pg) != 0 { + wn, err = wa.WriteAt(pg[:sz], off) + if err != nil { + return n, err + } + + n += int64(wn) + off += int64(sz) + if wn != sz { + return n, io.ErrShortWrite + } + } + } + return n, err + } + + var werr error + for rerr == nil { + if rn, rerr = f.ReadAt(b[:], off); rn != 0 { + off += int64(rn) + if wn, werr = w.Write(b[:rn]); werr != nil { + return n, werr + } + + n += int64(wn) + } + } + if !fileutil.IsEOF(rerr) { + err = rerr + } + return +} + +// Bits return a bitmap index backed by f. +func (f *File) Bits() *Bits { + return &Bits{f: f, page: -1} +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/http.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/http.go new file mode 100644 index 00000000..eefe69e5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/http.go @@ -0,0 +1,121 @@ +// Copyright 2014 The dbm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// http VFS support + +package dbm + +import ( + "fmt" + "net/http" + "os" + "path" + "path/filepath" + "time" +) + +// HttpDir returns an object implementing http.FileSystem using the DB file +// system restricted to a specific directory tree. +// +// 'root' must be an absolute path beginning with '/'. +func (db *DB) HttpDir(root string) http.FileSystem { + dir := filepath.Clean(root) + if dir == "." { + dir = "/" + } + if dir[0] != '/' { + return &httpFileSystem{err: fmt.Errorf("HttpDir: invalid root %q", dir)} + } + + return &httpFileSystem{db: db, root: dir} +} + +type httpFileSystem struct { + db *DB + err error + root string +} + +// Implements http.FileSystem +func (fs *httpFileSystem) Open(name string) (r http.File, err error) { + if err = fs.err; err != nil { + return + } + + var f File + if f, err = fs.db.File(filepath.Join(fs.root, path.Clean("/"+name))); err != nil { + return + } + + return &httpFile{f: f}, nil +} + +type httpFile struct { + closed bool + f File + fp int64 +} + +// Implements http.File +func (f *httpFile) Close() error { + f.closed = true + return nil +} + +// Implements http.File +func (f *httpFile) Stat() (os.FileInfo, error) { + return f, nil +} + +// Implements http.File +func (f *httpFile) Readdir(count int) ([]os.FileInfo, error) { + panic("TODO") +} + +// Implements http.File +func (f *httpFile) Read(b []byte) (n int, err error) { + n, err = f.f.ReadAt(b, f.fp) + f.fp += int64(n) + return +} + +// Implements http.File +func (f *httpFile) Seek(offset int64, whence int) (int64, error) { + panic("TODO") +} + +// Implements os.FileInfo +func (f *httpFile) Name() string { + return f.f.Name() +} + +// Implements os.FileInfo +func (f *httpFile) Size() int64 { + sz, err := f.f.Size() + if err != nil { + return 0 + } + + return sz +} + +// Implements os.FileInfo +func (f *httpFile) Mode() os.FileMode { + panic("TODO") +} + +// Implements os.FileInfo +func (f *httpFile) ModTime() time.Time { + return time.Now() +} + +// Implements os.FileInfo +func (f *httpFile) IsDir() bool { + return false +} + +// Implements os.FileInfo +func (f *httpFile) Sys() interface{} { + panic("TODO") +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/options.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/options.go new file mode 100644 index 00000000..43e3399f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/options.go @@ -0,0 +1,206 @@ +// Copyright 2014 The dbm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dbm + +import ( + "crypto/sha1" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "time" + + "camlistore.org/third_party/github.com/cznic/exp/lldb" +) + +const ( + // BeginUpdate/EndUpdate/Rollback will be no-ops. All operations + // updating a DB will be written immediately including partial updates + // during operation's progress. If any update fails, the DB can become + // unusable. The same applies to DB crashes and/or any other non clean + // DB shutdown. + ACIDNone = iota + + // Enable transactions. BeginUpdate/EndUpdate/Rollback will be + // effective. All operations on the DB will be automatically performed + // within a transaction. Operations will thus either succeed completely + // or have no effect at all - they will be rollbacked in case of any + // error. If any update fails the DB will not be corrupted. DB crashes + // and/or any other non clean DB shutdown may still render the DB + // unusable. + ACIDTransactions + + // Enable durability. Same as ACIDTransactions plus enables 2PC and + // WAL. Updates to the DB will be first made permanent in a WAL and + // only after that reflected in the DB. A DB will automatically recover + // from crashes and/or any other non clean DB shutdown. Only last + // uncommited transaction (transaction in progress ATM of a crash) can + // get lost. + // + // NOTE: Options.GracePeriod may extend the span of a single + // transaction to a batch of multiple transactions. + // + // NOTE2: Non zero GracePeriod requires GOMAXPROCS > 1 to work. Dbm + // checks GOMAXPROCS in such case and if the value is 1 it + // automatically sets GOMAXPROCS = 2. + ACIDFull +) + +// Options are passed to the DB create/open functions to amend the behavior of +// those functions. The compatibility promise is the same as of struct types in +// the Go standard library - introducing changes can be made only by adding new +// exported fields, which is backward compatible as long as client code uses +// field names to assign values of imported struct types literals. +type Options struct { + // See the ACID* constants documentation. + ACID int + + // The write ahead log pathname. Applicable iff ACID == ACIDFull. May + // be left empty in which case an unspecified pathname will be chosen, + // which is computed from the DB name and which will be in the same + // directory as the DB. Moving or renaming the DB while it is shut down + // will break it's connection to the automatically computed name. + // Moving both the files (the DB and the WAL) into another directory + // with no renaming is safe. + // + // On opening an existing DB the WAL file must exist if it should be + // used. If it is of zero size then a clean shutdown of the DB is + // assumed, otherwise an automatic DB recovery is performed. + // + // On creating a new DB the WAL file must not exist or it must be + // empty. It's not safe to write to a non empty WAL file as it may + // contain unprocessed DB recovery data. + WAL string + + // Time to collect transactions before committing them into the WAL. + // Applicable iff ACID == ACIDFull. All updates are held in memory + // during the grace period so it should not be more than few seconds at + // most. + // + // Recommended value for GracePeriod is 1 second. + // + // NOTE: Using small GracePeriod values will make DB updates very slow. + // Zero GracePeriod will make every single update a separate 2PC/WAL + // transaction. Values smaller than about 100-200 milliseconds + // (particularly for mechanical, rotational HDs) are not recommended + // and they may not be always honored. + GracePeriod time.Duration + wal *os.File + lock *os.File +} + +func (o *Options) check(dbname string, new, lock bool) (err error) { + var lname string + if lock { + lname = o.lockName(dbname) + if o.lock, err = os.OpenFile(lname, os.O_CREATE|os.O_EXCL|os.O_RDONLY, 0666); err != nil { + if os.IsExist(err) { + err = fmt.Errorf("cannot access DB %q: lock file %q exists", dbname, lname) + } + return + } + } + + switch o.ACID { + default: + return fmt.Errorf("Unsupported Options.ACID: %d", o.ACID) + case ACIDNone, ACIDTransactions: + case ACIDFull: + o.WAL = o.walName(dbname, o.WAL) + if lname == o.WAL { + panic("internal error") + } + + switch new { + case true: + if o.wal, err = os.OpenFile(o.WAL, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666); err != nil { + if os.IsExist(err) { + fi, e := os.Stat(o.WAL) + if e != nil { + return e + } + + if sz := fi.Size(); sz != 0 { + return fmt.Errorf("cannot create DB %q: non empty WAL file %q (size %d) exists", dbname, o.WAL, sz) + } + + o.wal, err = os.OpenFile(o.WAL, os.O_RDWR, 0666) + } + return + } + case false: + if o.wal, err = os.OpenFile(o.WAL, os.O_RDWR, 0666); err != nil { + if os.IsNotExist(err) { + err = fmt.Errorf("cannot open DB %q: WAL file %q doesn't exist", dbname, o.WAL) + } + return + } + } + } + + return +} + +func (o *Options) lockName(dbname string) (r string) { + base := filepath.Base(filepath.Clean(dbname)) + "lockfile" + h := sha1.New() + io.WriteString(h, base) + return filepath.Join(filepath.Dir(dbname), fmt.Sprintf(".%x", h.Sum(nil))) +} + +func (o *Options) walName(dbname, wal string) (r string) { + if wal != "" { + return filepath.Clean(wal) + } + + base := filepath.Base(filepath.Clean(dbname)) + h := sha1.New() + io.WriteString(h, base) + return filepath.Join(filepath.Dir(dbname), fmt.Sprintf(".%x", h.Sum(nil))) +} + +func (o *Options) acidFiler(db *DB, f lldb.Filer) (r lldb.Filer, err error) { + switch o.ACID { + default: + panic("internal error") + case ACIDNone: + r = f + case ACIDTransactions: + var rf *lldb.RollbackFiler + if rf, err = lldb.NewRollbackFiler( + f, + func(sz int64) error { + return f.Truncate(sz) + }, + f, + ); err != nil { + return + } + + db.xact = true + r = rf + case ACIDFull: + if r, err = lldb.NewACIDFiler(f, o.wal); err != nil { + return + } + + db.acidState = stIdle + db.gracePeriod = o.GracePeriod + db.xact = true + if o.GracePeriod == 0 { + db.acidState = stDisabled + break + } + + // Ensure GOMAXPROCS > 1, required for ACID FSM + if n := runtime.GOMAXPROCS(0); n > 1 { + return + } + + runtime.GOMAXPROCS(2) + } + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/slice.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/slice.go new file mode 100644 index 00000000..e16089fb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/slice.go @@ -0,0 +1,290 @@ +// Copyright 2014 The dbm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Functions analogous to package "os". + +package dbm + +import ( + "camlistore.org/third_party/github.com/cznic/exp/lldb" +) + +// Slice represents a slice of an Array. +type Slice struct { + a *Array + prefix []interface{} + from, to []interface{} +} + +// Do calls f for every subscripts-value pair in s in ascending collation order +// of the subscripts. Do returns non nil error for general errors (eg. file +// read error). If f returns false or a non nil error then Do terminates and +// returns the value of error from f. +// +// Note: f can get called with a subscripts-value pair which actually may no +// longer exist - if some other goroutine introduces such data race. +// Coordination required to avoid this situation, if applicable/desirable, must +// be provided by the client of dbm. +func (s *Slice) Do(f func(subscripts, value []interface{}) (bool, error)) (err error) { + var ( + db = s.a.db + noVal bool + ) + + if err = db.enter(); err != nil { + return + } + + doLeave := true + defer func() { + if doLeave { + db.leave(&err) + } + }() + + ok, err := s.a.validate(false) + if !ok { + return err + } + + tree := s.a.tree + if !tree.IsMem() && tree.Handle() == 1 { + noVal = true + } + + switch { + case s.from == nil && s.to == nil: + bprefix, err := lldb.EncodeScalars(s.prefix...) + if err != nil { + return err + } + + enum, _, err := tree.Seek(bprefix) + if err != nil { + return noEof(err) + } + + for { + bk, bv, err := enum.Next() + if err != nil { + return noEof(err) + } + + k, err := lldb.DecodeScalars(bk) + if err != nil { + return noEof(err) + } + + if n := len(s.prefix); n != 0 { + if len(k) < len(s.prefix) { + return nil + } + + c, err := lldb.Collate(k[:n], s.prefix, nil) + if err != nil { + return err + } + + if c > 0 { + return nil + } + } + + v, err := lldb.DecodeScalars(bv) + if err != nil { + return err + } + + doLeave = false + if db.leave(&err) != nil { + return err + } + + if noVal && v != nil { + v = []interface{}{0} + } + if more, err := f(k[len(s.prefix):], v); !more || err != nil { + return noEof(err) + } + + if err = db.enter(); err != nil { + return err + } + + doLeave = true + } + case s.from == nil && s.to != nil: + bprefix, err := lldb.EncodeScalars(s.prefix...) + if err != nil { + return err + } + + enum, _, err := tree.Seek(bprefix) + if err != nil { + return noEof(err) + } + + to := append(append([]interface{}(nil), s.prefix...), s.to...) + for { + bk, bv, err := enum.Next() + if err != nil { + return noEof(err) + } + + k, err := lldb.DecodeScalars(bk) + if err != nil { + return err + } + + c, err := lldb.Collate(k, to, nil) + if err != nil { + return err + } + + if c > 0 { + return err + } + + v, err := lldb.DecodeScalars(bv) + if err != nil { + return noEof(err) + } + + doLeave = false + if db.leave(&err) != nil { + return err + } + + if noVal && v != nil { + v = []interface{}{0} + } + if more, err := f(k[len(s.prefix):], v); !more || err != nil { + return noEof(err) + } + + if err = db.enter(); err != nil { + return err + } + + doLeave = true + } + case s.from != nil && s.to == nil: + bprefix, err := lldb.EncodeScalars(append(s.prefix, s.from...)...) + if err != nil { + return err + } + + enum, _, err := tree.Seek(bprefix) + if err != nil { + return noEof(err) + } + + for { + bk, bv, err := enum.Next() + if err != nil { + return noEof(err) + } + + k, err := lldb.DecodeScalars(bk) + if err != nil { + return noEof(err) + } + + if n := len(s.prefix); n != 0 { + if len(k) < len(s.prefix) { + return nil + } + + c, err := lldb.Collate(k[:n], s.prefix, nil) + if err != nil { + return err + } + + if c > 0 { + return nil + } + } + + v, err := lldb.DecodeScalars(bv) + if err != nil { + return err + } + + doLeave = false + if db.leave(&err) != nil { + return err + } + + if noVal && v != nil { + v = []interface{}{0} + } + if more, err := f(k[len(s.prefix):], v); !more || err != nil { + return noEof(err) + } + + if err = db.enter(); err != nil { + return err + } + + doLeave = true + } + case s.from != nil && s.to != nil: + bprefix, err := lldb.EncodeScalars(append(s.prefix, s.from...)...) + if err != nil { + return err + } + + enum, _, err := tree.Seek(bprefix) + if err != nil { + return noEof(err) + } + + to := append(append([]interface{}(nil), s.prefix...), s.to...) + for { + bk, bv, err := enum.Next() + if err != nil { + return noEof(err) + } + + k, err := lldb.DecodeScalars(bk) + if err != nil { + return noEof(err) + } + + c, err := lldb.Collate(k, to, nil) + if err != nil { + return err + } + + if c > 0 { + return err + } + + v, err := lldb.DecodeScalars(bv) + if err != nil { + return err + } + + doLeave = false + if db.leave(&err) != nil { + return err + } + + if noVal && v != nil { + v = []interface{}{0} + } + if more, err := f(k[len(s.prefix):], v); !more || err != nil { + return noEof(err) + } + + if err = db.enter(); err != nil { + return err + } + + doLeave = true + } + default: + panic("slice.go: internal error") + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/v0.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/v0.go new file mode 100644 index 00000000..24c09607 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/dbm/v0.go @@ -0,0 +1,23 @@ +// Copyright 2014 The dbm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dbm + +import ( + "os" + + "camlistore.org/third_party/github.com/cznic/exp/lldb" +) + +func open00(name string, in *DB) (db *DB, err error) { + db = in + if db.alloc, err = lldb.NewAllocator(lldb.NewInnerFiler(db.filer, 16), &lldb.Options{}); err != nil { + return nil, &os.PathError{Op: "dbm.Open", Path: name, Err: err} + } + + db.alloc.Compress = compress + db.emptySize = 128 + + return db, db.boot() +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/2pc.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/2pc.go new file mode 100644 index 00000000..a01c3a03 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/2pc.go @@ -0,0 +1,324 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Two Phase Commit & Structural ACID + +package lldb + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + "os" + + "camlistore.org/third_party/github.com/cznic/fileutil" + "camlistore.org/third_party/github.com/cznic/mathutil" +) + +var _ Filer = &ACIDFiler0{} // Ensure ACIDFiler0 is a Filer + +type acidWrite struct { + b []byte + off int64 +} + +type acidWriter0 ACIDFiler0 + +func (a *acidWriter0) WriteAt(b []byte, off int64) (n int, err error) { + f := (*ACIDFiler0)(a) + if f.bwal == nil { // new epoch + f.data = f.data[:0] + f.bwal = bufio.NewWriter(f.wal) + if err = a.writePacket([]interface{}{wpt00Header, walTypeACIDFiler0, ""}); err != nil { + return + } + } + + if err = a.writePacket([]interface{}{wpt00WriteData, b, off}); err != nil { + return + } + + f.data = append(f.data, acidWrite{b, off}) + return len(b), nil +} + +func (a *acidWriter0) writePacket(items []interface{}) (err error) { + f := (*ACIDFiler0)(a) + b, err := EncodeScalars(items...) + if err != nil { + return + } + + var b4 [4]byte + binary.BigEndian.PutUint32(b4[:], uint32(len(b))) + if _, err = f.bwal.Write(b4[:]); err != nil { + return + } + + if _, err = f.bwal.Write(b); err != nil { + return + } + + if m := (4 + len(b)) % 16; m != 0 { + var pad [15]byte + _, err = f.bwal.Write(pad[:16-m]) + } + return +} + +// WAL Packet Tags +const ( + wpt00Header = iota + wpt00WriteData + wpt00Checkpoint +) + +const ( + walTypeACIDFiler0 = iota +) + +// ACIDFiler0 is a very simple, synchronous implementation of 2PC. It uses a +// single write ahead log file to provide the structural atomicity +// (BeginUpdate/EndUpdate/Rollback) and durability (DB can be recovered from +// WAL if a crash occurred). +// +// ACIDFiler0 is a Filer. +// +// NOTE: Durable synchronous 2PC involves three fsyncs in this implementation +// (WAL, DB, zero truncated WAL). Where possible, it's recommended to collect +// transactions for, say one second before performing the two phase commit as +// the typical performance for rotational hard disks is about few tens of +// fsyncs per second atmost. For an example of such collective transaction +// approach please see the colecting FSM STT in Dbm's documentation[1]. +// +// [1]: http://godoc.org/github.com/cznic/exp/dbm +type ACIDFiler0 struct { + *RollbackFiler + wal *os.File + bwal *bufio.Writer + data []acidWrite + testHook bool // keeps WAL untruncated (once) + peakWal int64 // tracks WAL maximum used size + peakBitFilerPages int // track maximum transaction memory +} + +// NewACIDFiler0 returns a newly created ACIDFiler0 with WAL in wal. +// +// If the WAL is zero sized then a previous clean shutdown of db is taken for +// granted and no recovery procedure is taken. +// +// If the WAL is of non zero size then it is checked for having a +// commited/fully finished transaction not yet been reflected in db. If such +// transaction exists it's committed to db. If the recovery process finishes +// successfully, the WAL is truncated to zero size and fsync'ed prior to return +// from NewACIDFiler0. +func NewACIDFiler(db Filer, wal *os.File) (r *ACIDFiler0, err error) { + fi, err := wal.Stat() + if err != nil { + return + } + + r = &ACIDFiler0{wal: wal} + + if fi.Size() != 0 { + if err = r.recoverDb(db); err != nil { + return + } + } + + acidWriter := (*acidWriter0)(r) + + if r.RollbackFiler, err = NewRollbackFiler( + db, + func(sz int64) (err error) { + // Checkpoint + if err = acidWriter.writePacket([]interface{}{wpt00Checkpoint, sz}); err != nil { + return + } + + if err = r.bwal.Flush(); err != nil { + return + } + + r.bwal = nil + + if err = r.wal.Sync(); err != nil { + return + } + + wfi, err := r.wal.Stat() + switch err != nil { + case true: + // unexpected, but ignored + case false: + r.peakWal = mathutil.MaxInt64(wfi.Size(), r.peakWal) + } + + // Phase 1 commit complete + + for _, v := range r.data { + if _, err := db.WriteAt(v.b, v.off); err != nil { + return err + } + } + + if err = db.Truncate(sz); err != nil { + return + } + + if err = db.Sync(); err != nil { + return + } + + // Phase 2 commit complete + + if !r.testHook { + if err = r.wal.Truncate(0); err != nil { + return + } + + if _, err = r.wal.Seek(0, 0); err != nil { + return + } + } + + r.testHook = false + return r.wal.Sync() + + }, + acidWriter, + ); err != nil { + return + } + + return r, nil +} + +// PeakWALSize reports the maximum size WAL has ever used. +func (a ACIDFiler0) PeakWALSize() int64 { + return a.peakWal +} + +func (a *ACIDFiler0) readPacket(f *bufio.Reader) (items []interface{}, err error) { + var b4 [4]byte + n, err := io.ReadAtLeast(f, b4[:], 4) + if n != 4 { + return + } + + ln := int(binary.BigEndian.Uint32(b4[:])) + m := (4 + ln) % 16 + padd := (16 - m) % 16 + b := make([]byte, ln+padd) + if n, err = io.ReadAtLeast(f, b, len(b)); n != len(b) { + return + } + + return DecodeScalars(b[:ln]) +} + +func (a *ACIDFiler0) recoverDb(db Filer) (err error) { + fi, err := a.wal.Stat() + if err != nil { + return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: err} + } + + if sz := fi.Size(); sz%16 != 0 { + return &ErrILSEQ{Type: ErrFileSize, Name: a.wal.Name(), Arg: sz} + } + + f := bufio.NewReader(a.wal) + items, err := a.readPacket(f) + if err != nil { + return + } + + if len(items) != 3 || items[0] != int64(wpt00Header) || items[1] != int64(walTypeACIDFiler0) { + return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("invalid packet items %#v", items)} + } + + tr := NewBTree(nil) + + for { + items, err = a.readPacket(f) + if err != nil { + return + } + + if len(items) < 2 { + return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("too few packet items %#v", items)} + } + + switch items[0] { + case int64(wpt00WriteData): + if len(items) != 3 { + return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("invalid data packet items %#v", items)} + } + + b, off := items[1].([]byte), items[2].(int64) + var key [8]byte + binary.BigEndian.PutUint64(key[:], uint64(off)) + if err = tr.Set(key[:], b); err != nil { + return + } + case int64(wpt00Checkpoint): + var b1 [1]byte + if n, err := f.Read(b1[:]); n != 0 || err == nil { + return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("checkpoint n %d, err %v", n, err)} + } + + if len(items) != 2 { + return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("checkpoint packet invalid items %#v", items)} + } + + sz := items[1].(int64) + enum, err := tr.seekFirst() + if err != nil { + return err + } + + for { + k, v, err := enum.current() + if err != nil { + if fileutil.IsEOF(err) { + break + } + + return err + } + + if _, err = db.WriteAt(v, int64(binary.BigEndian.Uint64(k))); err != nil { + return err + } + + if err = enum.next(); err != nil { + if fileutil.IsEOF(err) { + break + } + + return err + } + } + + if err = db.Truncate(sz); err != nil { + return err + } + + if err = db.Sync(); err != nil { + return err + } + + // Recovery complete + + if err = a.wal.Truncate(0); err != nil { + return err + } + + return a.wal.Sync() + default: + return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("packet tag %v", items[0])} + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/2pc_docs.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/2pc_docs.go new file mode 100644 index 00000000..02c993b8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/2pc_docs.go @@ -0,0 +1,44 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + +Anatomy of a WAL file + +WAL file + A sequence of packets + +WAL packet, parts in slice notation + [0:4], 4 bytes: N uint32 // network byte order + [4:4+N], N bytes: payload []byte // gb encoded scalars + +Packets, including the 4 byte 'size' prefix, MUST BE padded to size == 0 (mod +16). The values of the padding bytes MUST BE zero. + +Encoded scalars first item is a packet type number (packet tag). The meaning of +any other item(s) of the payload depends on the packet tag. + +Packet definitions + + {wpt00Header int, typ int, s string} + typ: Must be zero (ACIDFiler0 file). + s: Any comment string, empty string is okay. + + This packet must be present only once - as the first packet of + a WAL file. + + {wpt00WriteData int, b []byte, off int64} + Write data (WriteAt(b, off)). + + {wpt00Checkpoint int, sz int64} + Checkpoint (Truncate(sz)). + + This packet must be present only once - as the last packet of + a WAL file. + +*/ + +package lldb + +//TODO optimize bitfiler/wal/2pc data above final size diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/2pc_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/2pc_test.go new file mode 100644 index 00000000..61d0f4f1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/2pc_test.go @@ -0,0 +1,285 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Two Phase Commit & Structural ACID + +package lldb + +import ( + "bytes" + "encoding/binary" + "io/ioutil" + "math/rand" + "os" + "testing" + + "camlistore.org/third_party/github.com/cznic/mathutil" +) + +var _ Filer = &truncFiler{} + +type truncFiler struct { + f Filer + fake *MemFiler + totalWritten int // Including silently dropped + realWritten int + limit int // -1: unlimited, n: silently stop writing after limit bytes +} + +func NewTruncFiler(f Filer, limit int) *truncFiler { + return &truncFiler{f: f, fake: NewMemFiler(), limit: limit} +} + +func (f *truncFiler) BeginUpdate() error { panic("internal error") } +func (f *truncFiler) Close() error { return f.f.Close() } +func (f *truncFiler) EndUpdate() error { panic("internal error") } +func (f *truncFiler) Name() string { return f.f.Name() } +func (f *truncFiler) PunchHole(off, sz int64) error { panic("internal error") } +func (f *truncFiler) ReadAt(b []byte, off int64) (int, error) { return f.fake.ReadAt(b, off) } +func (f *truncFiler) Rollback() error { panic("internal error") } +func (f *truncFiler) Size() (int64, error) { return f.fake.Size() } +func (f *truncFiler) Sync() error { return f.f.Sync() } + +func (f *truncFiler) Truncate(sz int64) error { + f.fake.Truncate(sz) + return f.f.Truncate(sz) +} + +func (f *truncFiler) WriteAt(b []byte, off int64) (n int, err error) { + rq := len(b) + n = f.totalWritten + if lim := f.limit; lim >= 0 && n+rq > lim { + over := n + rq - lim + rq -= over + rq = mathutil.Max(rq, 0) + } + + if n, err = f.fake.WriteAt(b, off); err != nil { + return + } + + f.totalWritten += n + if rq != 0 { + n, err := f.f.WriteAt(b[:rq], off) + if err != nil { + return n, err + } + f.realWritten += n + } + return +} + +// Verify memory BTrees don't have maxRq limits. +func TestACID0MemBTreeCaps(t *testing.T) { + rng := rand.New(rand.NewSource(42)) + tr := NewBTree(nil) + b := make([]byte, 2*maxRq) + for i := range b { + b[i] = byte(rng.Int()) + } + + if err := tr.Set(nil, b); err != nil { + t.Fatal(len(b), err) + } + + g, err := tr.Get(nil, nil) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(g, b) { + t.Fatal("data mismatach") + } +} + +func TestACIDFiler0(t *testing.T) { + const SZ = 1 << 17 + + // Phase 1: Create a DB, fill with it with data. + + wal, err := ioutil.TempFile("", "test-acidfiler0-wal-") + if err != nil { + t.Fatal(err) + } + + if !*oKeep { + defer os.Remove(wal.Name()) + } + + db, err := ioutil.TempFile("", "test-acidfiler0-db-") + if err != nil { + t.Fatal(err) + } + + dbName := db.Name() + if !*oKeep { + defer os.Remove(db.Name()) + } + + realFiler := NewSimpleFileFiler(db) + truncFiler := NewTruncFiler(realFiler, -1) + acidFiler, err := NewACIDFiler(truncFiler, wal) + if err != nil { + t.Error(err) + return + } + + if err = acidFiler.BeginUpdate(); err != nil { + t.Error(err) + return + } + + a, err := NewAllocator(acidFiler, &Options{}) + if err != nil { + t.Error(err) + return + } + + a.Compress = true + + tr, h, err := CreateBTree(a, nil) + if h != 1 || err != nil { + t.Error(h, err) + return + } + + rng := rand.New(rand.NewSource(42)) + var key, val [8]byte + ref := map[int64]int64{} + + for { + sz, err := acidFiler.Size() + if err != nil { + t.Error(err) + return + } + + if sz > SZ { + break + } + + k, v := rng.Int63(), rng.Int63() + ref[k] = v + binary.BigEndian.PutUint64(key[:], uint64(k)) + binary.BigEndian.PutUint64(val[:], uint64(v)) + if err := tr.Set(key[:], val[:]); err != nil { + t.Error(err) + return + } + } + + acidFiler.testHook = true // keep WAL + + if err := acidFiler.EndUpdate(); err != nil { + t.Error(err) + return + } + + if err := acidFiler.Close(); err != nil { + t.Error(err) + return + } + + if err := wal.Sync(); err != nil { + t.Error(err) + return + } + + if _, err = wal.Seek(0, 0); err != nil { + t.Error(err) + return + } + + // Phase 2: Reopen and verify structure and data. + db, err = os.OpenFile(dbName, os.O_RDWR, 0666) + if err != nil { + t.Error(err) + return + } + + filer := NewSimpleFileFiler(db) + a, err = NewAllocator(filer, &Options{}) + if err != nil { + t.Error(err) + return + } + + if err = a.Verify(NewMemFiler(), nil, nil); err != nil { + t.Error(err) + return + } + + tr, err = OpenBTree(a, nil, 1) + for k, v := range ref { + binary.BigEndian.PutUint64(key[:], uint64(k)) + binary.BigEndian.PutUint64(val[:], uint64(v)) + var b []byte + b, err = tr.Get(b, key[:]) + if err != nil || b == nil || !bytes.Equal(b, val[:]) { + t.Error(err, b, val[:]) + return + } + } + + okImage, err := ioutil.ReadFile(dbName) + if err != nil { + t.Error(err) + return + } + + // Phase 3: Simulate a crash + sz, err := filer.Size() + if err != nil { + t.Error(err) + return + } + + sz /= 2 + if err := db.Truncate(sz); err != nil { + t.Error(err) + return + } + + z := make([]byte, sz/3) + n, err := db.WriteAt(z, sz/3) + if n != len(z) { + t.Error(n, err) + return + } + + if err := db.Sync(); err != nil { + t.Error(err) + return + } + + // Phase 4: Open the corrupted DB + filer = NewSimpleFileFiler(db) + acidFiler, err = NewACIDFiler(filer, wal) + if err != nil { + t.Error(err) + return + } + + if err = acidFiler.Sync(); err != nil { + t.Error(err) + return + } + + if err = acidFiler.Close(); err != nil { + t.Error(err) + return + } + + // Phase 5: Verify DB was recovered. + newImage, err := ioutil.ReadFile(dbName) + if err != nil { + t.Error(err) + return + } + + if !bytes.Equal(okImage, newImage) { + t.Error(err) + return + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/LICENSE new file mode 100644 index 00000000..65d761bc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/Makefile b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/Makefile new file mode 100644 index 00000000..a0e7aca5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/Makefile @@ -0,0 +1,34 @@ +.PHONY: all editor clean cover nuke + +grep=--include=*.go + +all: editor + go build + go vet + go install + make todo + +clean: + go clean + rm -f *~ cov cov.html bad-dump good-dump lldb.test old.txt new.txt \ + test-acidfiler0-* _test.db _wal + +cover: + t=$(shell tempfile) ; go test -coverprofile $$t && go tool cover -html $$t && unlink $$t + +editor: + go fmt + go test -i + go test -timeout 1h + +nuke: clean + go clean -i + +todo: + @grep -nr $(grep) BUG * || true + @grep -nr $(grep) LATER * || true + @grep -nr $(grep) MAYBE * || true + @grep -nr $(grep) TODO * || true + @grep -nr $(grep) FIXME * || true + @grep -nr $(grep) ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* * || true + @grep -nr $(grep) println * || true diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/README.md b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/README.md new file mode 100644 index 00000000..470bf541 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/README.md @@ -0,0 +1,8 @@ +lldb +==== + +Package lldb (WIP) implements a low level database engine. + +Installation: $ go get github.com/cznic/exp/lldb + +Documentation: [godoc.org/github.com/cznic/exp/lldb](http://godoc.org/github.com/cznic/exp/lldb) diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/all_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/all_test.go new file mode 100644 index 00000000..451302de --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/all_test.go @@ -0,0 +1,43 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lldb + +import ( + "encoding/hex" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "time" +) + +const ( + testDbName = "_test.db" + walName = "_wal" +) + +func now() time.Time { return time.Now() } + +func hdump(b []byte) string { + return hex.Dump(b) +} + +func die() { + os.Exit(1) +} + +func stack() string { + buf := make([]byte, 1<<16) + return string(buf[:runtime.Stack(buf, false)]) +} + +func temp() (dir, name string) { + dir, err := ioutil.TempDir("", "test-lldb-") + if err != nil { + panic(err) + } + + return dir, filepath.Join(dir, "test.tmp") +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/btree.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/btree.go new file mode 100644 index 00000000..dc8fab08 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/btree.go @@ -0,0 +1,2276 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lldb + +import ( + "bytes" + "errors" + "fmt" + "io" + "sort" + "strings" + + "camlistore.org/third_party/github.com/cznic/bufs" + "camlistore.org/third_party/github.com/cznic/fileutil" + "camlistore.org/third_party/github.com/cznic/sortutil" +) + +const ( + kData = 256 // [1, 512] + kIndex = 256 // [2, 2048] + kKV = 19 // Size of the key/value field in btreeDataPage + kSz = kKV - 1 - 7 // Content prefix size + kH = kKV - 7 // Content field offset for handle + tagBTreeDataPage = 1 + tagBTreeIndexPage = 0 +) + +// BTree is a B+tree[1][2], i.e. a variant which speeds up +// enumeration/iteration of the BTree. According to its origin it can be +// volatile (backed only by memory) or non-volatile (backed by a non-volatile +// Allocator). +// +// The specific implementation of BTrees in this package are B+trees with +// delayed split/concatenation (discussed in e.g. [3]). +// +// Note: No BTree methods returns io.EOF for physical Filer reads/writes. The +// io.EOF is returned only by bTreeEnumerator methods to indicate "no more K-V +// pair". +// +// [1]: http://en.wikipedia.org/wiki/B+tree +// [2]: http://zgking.com:8080/home/donghui/publications/books/dshandbook_BTree.pdf +// [3]: http://people.cs.aau.dk/~simas/aalg06/UbiquitBtree.pdf +type BTree struct { + store btreeStore + root btree + collate func(a, b []byte) int + serial uint64 +} + +// NewBTree returns a new, memory-only BTree. +func NewBTree(collate func(a, b []byte) int) *BTree { + store := newMemBTreeStore() + root, err := newBTree(store) + if err != nil { // should not happen + panic(err.Error()) + } + + return &BTree{store, root, collate, 0} +} + +// IsMem reports if t is a memory only BTree. +func (t *BTree) IsMem() (r bool) { + _, r = t.store.(*memBTreeStore) + return +} + +// Clear empties the tree. +func (t *BTree) Clear() (err error) { + if t == nil { + err = errors.New("BTree method invoked on nil receiver") + return + } + + t.serial++ + return t.root.clear(t.store) +} + +// Delete deletes key and its associated value from the tree. +func (t *BTree) Delete(key []byte) (err error) { + if t == nil { + err = errors.New("BTree method invoked on nil receiver") + return + } + + t.serial++ + _, err = t.root.extract(t.store, nil, t.collate, key) + return +} + +// DeleteAny deletes one key and its associated value from the tree. If the +// tree is empty on return then empty is true. +func (t *BTree) DeleteAny() (empty bool, err error) { + if t == nil { + err = errors.New("BTree method invoked on nil receiver") + return + } + + t.serial++ + return t.root.deleteAny(t.store) +} + +func elem(v interface{}) string { + switch x := v.(type) { + default: + panic("internal error") + case nil: + return "nil" + case bool: + if x { + return "true" + } + + return "false" + case int64: + return fmt.Sprint(x) + case uint64: + return fmt.Sprint(x) + case float64: + s := fmt.Sprintf("%g", x) + if !strings.Contains(s, ".") { + s += "." + } + return s + case complex128: + s := fmt.Sprint(x) + return s[1 : len(s)-1] + case []byte: + return fmt.Sprintf("[]byte{% 02x}", x) + case string: + return fmt.Sprintf("%q", x) + } +} + +// Dump outputs a human readable dump of t to w. It is usable iff t keys and +// values are encoded scalars (see EncodeScalars). Intended use is only for +// examples or debugging. Some type information is lost in the rendering, for +// example a float value '17.' and an integer value '17' may both output as +// '17'. +func (t *BTree) Dump(w io.Writer) (err error) { + enum, err := t.seekFirst() + if err != nil { + return + } + + for { + bkey, bval, err := enum.current() + if err != nil { + return err + } + + key, err := DecodeScalars(bkey) + if err != nil { + return err + } + + val, err := DecodeScalars(bval) + if err != nil { + return err + } + + kk := []string{} + if key == nil { + kk = []string{"null"} + } + for _, v := range key { + kk = append(kk, elem(v)) + } + vv := []string{} + if val == nil { + vv = []string{"null"} + } + for _, v := range val { + vv = append(vv, elem(v)) + } + skey := strings.Join(kk, ", ") + sval := strings.Join(vv, ", ") + if len(vv) > 1 { + sval = fmt.Sprintf("[]interface{%s}", sval) + } + if _, err = fmt.Fprintf(w, "%s → %s\n", skey, sval); err != nil { + return err + } + + err = enum.next() + if err != nil { + if fileutil.IsEOF(err) { + err = nil + break + } + + return err + } + } + return +} + +// Extract is a combination of Get and Delete. If the key exists in the tree, +// it is returned (like Get) and also deleted from a tree in a more efficient +// way which doesn't walk it twice. The returned slice may be a sub-slice of +// buf if buf was large enough to hold the entire content. Otherwise, a newly +// allocated slice will be returned. It is valid to pass a nil buf. +func (t *BTree) Extract(buf, key []byte) (value []byte, err error) { + if t == nil { + err = errors.New("BTree method invoked on nil receiver") + return + } + + t.serial++ + return t.root.extract(t.store, buf, t.collate, key) +} + +// First returns the first KV pair of the tree, if it exists. Otherwise key == nil +// and value == nil. +func (t *BTree) First() (key, value []byte, err error) { + if t == nil { + err = errors.New("BTree method invoked on nil receiver") + return + } + + var p btreeDataPage + if _, p, err = t.root.first(t.store); err != nil || p == nil { + return + } + + if key, err = p.key(t.store, 0); err != nil { + return + } + + value, err = p.value(t.store, 0) + return +} + +// Get returns the value associated with key, or nil if no such value exists. +// The returned slice may be a sub-slice of buf if buf was large enough to hold +// the entire content. Otherwise, a newly allocated slice will be returned. +// It is valid to pass a nil buf. +func (t *BTree) Get(buf, key []byte) (value []byte, err error) { + if t == nil { + err = errors.New("BTree method invoked on nil receiver") + return + } + + buffer := bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(buffer) + if buffer, err = t.root.get(t.store, buffer, t.collate, key); buffer == nil || err != nil { + return + } + + value = need(len(buffer), buf) + copy(value, buffer) + return +} + +// Handle reports t's handle. +func (t *BTree) Handle() int64 { + return int64(t.root) +} + +// Last returns the last KV pair of the tree, if it exists. Otherwise key == nil +// and value == nil. +func (t *BTree) Last() (key, value []byte, err error) { + if t == nil { + err = errors.New("BTree method invoked on nil receiver") + return + } + + var p btreeDataPage + if _, p, err = t.root.last(t.store); err != nil || p == nil { + return + } + + index := p.len() - 1 + if key, err = p.key(t.store, index); err != nil { + return + } + + value, err = p.value(t.store, index) + return +} + +// Put combines Get and Set in a more efficient way where the tree is walked +// only once. The upd(ater) receives the current (key, old-value), if that +// exists or (key, nil) otherwise. It can then return a (new-value, true, nil) +// to create or overwrite the existing value in the KV pair, or (whatever, +// false, nil) if it decides not to create or not to update the value of the KV +// pair. +// +// tree.Set(k, v) +// +// conceptually equals +// +// tree.Put(k, func(k, v []byte){ return v, true }([]byte, bool)) +// +// modulo the differing return values. +// +// The returned slice may be a sub-slice of buf if buf was large enough to hold +// the entire content. Otherwise, a newly allocated slice will be returned. +// It is valid to pass a nil buf. +func (t *BTree) Put(buf, key []byte, upd func(key, old []byte) (new []byte, write bool, err error)) (old []byte, written bool, err error) { + if t == nil { + err = errors.New("BTree method invoked on nil receiver") + return + } + + t.serial++ + return t.root.put2(buf, t.store, t.collate, key, upd) +} + +// Seek returns an Enumerator with "position" or an error of any. Normally the +// position is on a KV pair such that key >= KV.key. Then hit is key == KV.key. +// The position is possibly "after" the last KV pair, but that is not an error. +func (t *BTree) Seek(key []byte) (enum *BTreeEnumerator, hit bool, err error) { + enum0, hit, err := t.seek(key) + if err != nil { + return + } + + enum = &BTreeEnumerator{ + enum: enum0, + firstHit: hit, + key: append([]byte(nil), key...), + } + return +} + +func (t *BTree) seek(key []byte) (enum *bTreeEnumerator, hit bool, err error) { + if t == nil { + err = errors.New("BTree method invoked on nil receiver") + return + } + + r := &bTreeEnumerator{t: t, collate: t.collate, serial: t.serial} + if r.p, r.index, hit, err = t.root.seek(t.store, r.collate, key); err != nil { + return + } + + enum = r + return +} + +// IndexSeek returns an Enumerator with "position" or an error of any. Normally +// the position is on a KV pair such that key >= KV.key. Then hit is key == +// KV.key. The position is possibly "after" the last KV pair, but that is not +// an error. The collate function originally passed to CreateBTree is used for +// enumerating the tree but a custom collate function c is used for IndexSeek. +func (t *BTree) IndexSeek(key []byte, c func(a, b []byte) int) (enum *BTreeEnumerator, hit bool, err error) { //TODO +test + enum0, hit, err := t.indexSeek(key, c) + if err != nil { + return + } + + enum = &BTreeEnumerator{ + enum: enum0, + firstHit: hit, + key: append([]byte(nil), key...), + } + return +} + +func (t *BTree) indexSeek(key []byte, c func(a, b []byte) int) (enum *bTreeEnumerator, hit bool, err error) { + if t == nil { + err = errors.New("BTree method invoked on nil receiver") + return + } + + r := &bTreeEnumerator{t: t, collate: t.collate, serial: t.serial} + if r.p, r.index, hit, err = t.root.seek(t.store, c, key); err != nil { + return + } + + enum = r + return +} + +// seekFirst returns an enumerator positioned on the first KV pair in the tree, +// if any. For an empty tree, err == io.EOF is returend. +func (t *BTree) SeekFirst() (enum *BTreeEnumerator, err error) { + enum0, err := t.seekFirst() + if err != nil { + return + } + + var key []byte + if key, _, err = enum0.current(); err != nil { + return + } + + enum = &BTreeEnumerator{ + enum: enum0, + firstHit: true, + key: append([]byte(nil), key...), + } + return +} + +func (t *BTree) seekFirst() (enum *bTreeEnumerator, err error) { + if t == nil { + err = errors.New("BTree method invoked on nil receiver") + return + } + + var p btreeDataPage + if _, p, err = t.root.first(t.store); err == nil && p == nil { + err = io.EOF + } + if err != nil { + return + } + + return &bTreeEnumerator{t: t, collate: t.collate, p: p, index: 0, serial: t.serial}, nil +} + +// seekLast returns an enumerator positioned on the last KV pair in the tree, +// if any. For an empty tree, err == io.EOF is returend. +func (t *BTree) SeekLast() (enum *BTreeEnumerator, err error) { + enum0, err := t.seekLast() + if err != nil { + return + } + + var key []byte + if key, _, err = enum0.current(); err != nil { + return + } + + enum = &BTreeEnumerator{ + enum: enum0, + firstHit: true, + key: append([]byte(nil), key...), + } + return +} + +func (t *BTree) seekLast() (enum *bTreeEnumerator, err error) { + if t == nil { + err = errors.New("BTree method invoked on nil receiver") + return + } + + var p btreeDataPage + if _, p, err = t.root.last(t.store); err == nil && p == nil { + err = io.EOF + } + if err != nil { + return + } + + return &bTreeEnumerator{t: t, collate: t.collate, p: p, index: p.len() - 1, serial: t.serial}, nil +} + +// Set sets the value associated with key. Any previous value, if existed, is +// overwritten by the new one. +func (t *BTree) Set(key, value []byte) (err error) { + if t == nil { + err = errors.New("BTree method invoked on nil receiver") + return + } + + t.serial++ + dst := bufs.GCache.Get(maxBuf) + _, err = t.root.put(dst, t.store, t.collate, key, value, true) + bufs.GCache.Put(dst) + return +} + +// bTreeEnumerator is a closure of a BTree and a position. It is returned from +// BTree.seek. +// +// NOTE: bTreeEnumerator cannot be used after its BTree was mutated after the +// bTreeEnumerator was acquired from any of the seek, seekFirst, seekLast +// methods. +type bTreeEnumerator struct { + t *BTree + collate func(a, b []byte) int + p btreeDataPage + index int + serial uint64 +} + +// Current returns the KV pair the enumerator is currently positioned on. If +// the position is before the first KV pair in the tree or after the last KV +// pair in the tree then err == io.EOF is returned. +// +// If the enumerator has been invalidated by updating the tree, ErrINVAL is +// returned. +func (e *bTreeEnumerator) current() (key, value []byte, err error) { + if e == nil { + err = errors.New("bTreeEnumerator method invoked on nil receiver") + return + } + + if e.serial != e.t.serial { + err = &ErrINVAL{Src: "bTreeEnumerator invalidated by updating the tree"} + return + } + + if e.p == nil || e.index == e.p.len() { + return nil, nil, io.EOF + } + + if key, err = e.p.key(e.t.store, e.index); err != nil { + return + } + + value, err = e.p.value(e.t.store, e.index) + return +} + +// Next attempts to position the enumerator onto the next KV pair wrt the +// current position. If there is no "next" KV pair, io.EOF is returned. +// +// If the enumerator has been invalidated by updating the tree, ErrINVAL is +// returned. +func (e *bTreeEnumerator) next() (err error) { + if e == nil { + err = errors.New("bTreeEnumerator method invoked on nil receiver") + return + } + + if e.serial != e.t.serial { + err = &ErrINVAL{Src: "bTreeEnumerator invalidated by updating the tree"} + return + } + + if e.p == nil { + return io.EOF + } + + switch { + case e.index < e.p.len()-1: + e.index++ + default: + ph := e.p.next() + if ph == 0 { + err = io.EOF + break + } + + if e.p, err = e.t.store.Get(e.p, ph); err != nil { + e.p = nil + return + } + e.index = 0 + } + return +} + +// Prev attempts to position the enumerator onto the previous KV pair wrt the +// current position. If there is no "previous" KV pair, io.EOF is returned. +// +// If the enumerator has been invalidated by updating the tree, ErrINVAL is +// returned. +func (e *bTreeEnumerator) prev() (err error) { + if e == nil { + err = errors.New("bTreeEnumerator method invoked on nil receiver") + return + } + + if e.serial != e.t.serial { + err = &ErrINVAL{Src: "bTreeEnumerator invalidated by updating the tree"} + return + } + + if e.p == nil { + return io.EOF + } + + switch { + case e.index > 0: + e.index-- + default: + ph := e.p.prev() + if ph == 0 { + err = io.EOF + break + } + + if e.p, err = e.t.store.Get(e.p, ph); err != nil { + e.p = nil + return + } + e.index = e.p.len() - 1 + } + return +} + +// BTreeEnumerator captures the state of enumerating a tree. It is returned +// from the Seek* methods. The enumerator is aware of any mutations made to +// the tree in the process of enumerating it and automatically resumes the +// enumeration. +type BTreeEnumerator struct { + enum *bTreeEnumerator + err error + key []byte + firstHit bool +} + +// Next returns the currently enumerated KV pair, if it exists and moves to the +// next KV in the key collation order. If there is no KV pair to return, err == +// io.EOF is returned. +func (e *BTreeEnumerator) Next() (key, value []byte, err error) { + if err = e.err; err != nil { + return + } + + canRetry := true +retry: + if key, value, err = e.enum.current(); err != nil { + if _, ok := err.(*ErrINVAL); !ok || !canRetry { + e.err = err + return + } + + canRetry = false + var hit bool + if e.enum, hit, err = e.enum.t.seek(e.key); err != nil { + e.err = err + return + } + + if !e.firstHit && hit { + err = e.enum.next() + if err != nil { + e.err = err + return + } + } + + goto retry + } + + e.firstHit = false + e.key = append([]byte(nil), key...) + e.err = e.enum.next() + return +} + +// Prev returns the currently enumerated KV pair, if it exists and moves to the +// previous KV in the key collation order. If there is no KV pair to return, +// err == io.EOF is returned. +func (e *BTreeEnumerator) Prev() (key, value []byte, err error) { + if err = e.err; err != nil { + return + } + + canRetry := true +retry: + if key, value, err = e.enum.current(); err != nil { + if _, ok := err.(*ErrINVAL); !ok || !canRetry { + e.err = err + return + } + + canRetry = false + var hit bool + if e.enum, hit, err = e.enum.t.seek(e.key); err != nil { + e.err = err + return + } + + if !e.firstHit && hit { + err = e.enum.prev() + if err != nil { + e.err = err + return + } + } + + goto retry + } + + e.firstHit = false + e.key = append([]byte(nil), key...) + e.err = e.enum.prev() + return +} + +// CreateBTree creates a new BTree in store. It returns the tree, its (freshly +// assigned) handle (for OpenBTree or RemoveBTree) or an error, if any. +func CreateBTree(store *Allocator, collate func(a, b []byte) int) (bt *BTree, handle int64, err error) { + r := &BTree{store: store, collate: collate} + if r.root, err = newBTree(store); err != nil { + return + } + + return r, int64(r.root), nil +} + +// OpenBTree opens a store's BTree using handle. It returns the tree or an +// error, if any. The same tree may be opened more than once, but operations on +// the separate instances should not ever overlap or void the other instances. +// However, the intended API usage is to open the same tree handle only once +// (handled by some upper layer "dispatcher"). +func OpenBTree(store *Allocator, collate func(a, b []byte) int, handle int64) (bt *BTree, err error) { + r := &BTree{store: store, root: btree(handle), collate: collate} + b := bufs.GCache.Get(7) + defer bufs.GCache.Put(b) + if b, err = store.Get(b, handle); err != nil { + return + } + + if len(b) != 7 { + return nil, &ErrILSEQ{Off: h2off(handle), More: "btree.go:671"} + } + + return r, nil +} + +// RemoveBTree removes tree, represented by handle from store. Empty trees are +// cheap, each uses only few bytes of the store. If there's a chance that a +// tree will eventually get reused (non empty again), it's recommended to +// not/never remove it. One advantage of such approach is a stable handle of +// such tree. +func RemoveBTree(store *Allocator, handle int64) (err error) { + tree, err := OpenBTree(store, nil, handle) + if err != nil { + return + } + + if err = tree.Clear(); err != nil { + return + } + + return store.Free(handle) +} + +type btreeStore interface { + Alloc(b []byte) (handle int64, err error) + Free(handle int64) (err error) + Get(dst []byte, handle int64) (b []byte, err error) + Realloc(handle int64, b []byte) (err error) +} + +// Read only zero bytes +var zeros [2 * kKV]byte + +func init() { + if kData < 1 || kData > 512 { + panic(fmt.Errorf("kData %d: out of limits", kData)) + } + + if kIndex < 2 || kIndex > 2048 { + panic(fmt.Errorf("kIndex %d: out of limits", kIndex)) + } + + if kKV < 8 || kKV > 23 { + panic(fmt.Errorf("kKV %d: out of limits", kKV)) + } + + if n := len(zeros); n < 15 { + panic(fmt.Errorf("not enough zeros: %d", n)) + } +} + +type memBTreeStore struct { + h int64 + m map[int64][]byte +} + +func newMemBTreeStore() *memBTreeStore { + return &memBTreeStore{h: 0, m: map[int64][]byte{}} +} + +func (s *memBTreeStore) String() string { + var a sortutil.Int64Slice + for k := range s.m { + a = append(a, k) + } + sort.Sort(a) + var sa []string + for _, k := range a { + sa = append(sa, fmt.Sprintf("%#x:|% x|", k, s.m[k])) + } + return strings.Join(sa, "\n") +} + +func (s *memBTreeStore) Alloc(b []byte) (handle int64, err error) { + s.h++ + handle = s.h + s.m[handle] = bpack(b) + return +} + +func (s *memBTreeStore) Free(handle int64) (err error) { + if _, ok := s.m[handle]; !ok { + return &ErrILSEQ{Type: ErrOther, Off: h2off(handle), More: "btree.go:754"} + } + + delete(s.m, handle) + return +} + +func (s *memBTreeStore) Get(dst []byte, handle int64) (b []byte, err error) { + r, ok := s.m[handle] + if !ok { + return nil, &ErrILSEQ{Type: ErrOther, Off: h2off(handle), More: "btree.go:764"} + } + + b = need(len(r), dst) + copy(b, r) + return +} + +func (s *memBTreeStore) Realloc(handle int64, b []byte) (err error) { + if _, ok := s.m[handle]; !ok { + return &ErrILSEQ{Type: ErrOther, Off: h2off(handle), More: "btree.go:774"} + } + + s.m[handle] = bpack(b) + return +} + +/* + +0...0 (1 bytes): +Flag + + 0 + +---+ + | 0 | + +---+ + +0 indicates an index page + +1...count*14-1 +"array" of items, 14 bytes each. Count of items in kIndex-1..2*kIndex+2 + + Count = (len(raw) - 8) / 14 + + 0..6 7..13 + +-------+----------+ + | Child | DataPage | + +-------+----------+ + + Child == handle of a child index page + DataPage == handle of a data page + +Offsets into the raw []byte: +Child[X] == 1+14*X +DataPage[X] == 8+14*X + +*/ +type btreeIndexPage []byte + +func newBTreeIndexPage(leftmostChild int64) (p btreeIndexPage) { + p = bufs.GCache.Get(1 + (kIndex+1)*2*7)[:8] + p[0] = tagBTreeIndexPage + h2b(p[1:], leftmostChild) + return +} + +func (p btreeIndexPage) len() int { + return (len(p) - 8) / 14 +} + +func (p btreeIndexPage) child(index int) int64 { + return b2h(p[1+14*index:]) +} + +func (p btreeIndexPage) setChild(index int, dp int64) { + h2b(p[1+14*index:], dp) +} + +func (p btreeIndexPage) dataPage(index int) int64 { + return b2h(p[8+14*index:]) +} + +func (p btreeIndexPage) setDataPage(index int, dp int64) { + h2b(p[8+14*index:], dp) +} + +func (q btreeIndexPage) insert(index int) btreeIndexPage { + switch len0 := q.len(); { + case index < len0: + has := len(q) + need := has + 14 + switch { + case cap(q) >= need: + q = q[:need] + default: + q = append(q, zeros[:14]...) + } + copy(q[8+14*(index+1):8+14*(index+1)+2*(len0-index)*7], q[8+14*index:]) + case index == len0: + has := len(q) + need := has + 14 + switch { + case cap(q) >= need: + q = q[:need] + default: + q = append(q, zeros[:14]...) + } + } + return q +} + +func (p btreeIndexPage) insert3(index int, dataPage, child int64) btreeIndexPage { + p = p.insert(index) + p.setDataPage(index, dataPage) + p.setChild(index+1, child) + return p +} + +func (p btreeIndexPage) cmp(a btreeStore, c func(a, b []byte) int, keyA []byte, keyBIndex int) (int, error) { + b := bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(b) + dp, err := a.Get(b, p.dataPage(keyBIndex)) + if err != nil { + return 0, err + } + + return btreeDataPage(dp).cmp(a, c, keyA, 0) +} + +func (q btreeIndexPage) setLen(n int) btreeIndexPage { + q = q[:cap(q)] + need := 8 + 14*n + if need < len(q) { + return q[:need] + } + return append(q, make([]byte, need-len(q))...) +} + +func (p btreeIndexPage) split(a btreeStore, root btree, ph *int64, parent int64, parentIndex int, index *int) (btreeIndexPage, error) { + right := newBTreeIndexPage(0) + canRecycle := true + defer func() { + if canRecycle { + bufs.GCache.Put(right) + } + }() + right = right.setLen(kIndex) + copy(right[1:1+(2*kIndex+1)*7], p[1+14*(kIndex+1):]) + p = p.setLen(kIndex) + if err := a.Realloc(*ph, p); err != nil { + return nil, err + } + + rh, err := a.Alloc(right) + if err != nil { + return nil, err + } + + if parentIndex >= 0 { + var pp btreeIndexPage = bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(pp) + if pp, err = a.Get(pp, parent); err != nil { + return nil, err + } + pp = pp.insert3(parentIndex, p.dataPage(kIndex), rh) + if err = a.Realloc(parent, pp); err != nil { + return nil, err + } + + } else { + nr := newBTreeIndexPage(*ph) + defer bufs.GCache.Put(nr) + nr = nr.insert3(0, p.dataPage(kIndex), rh) + nrh, err := a.Alloc(nr) + if err != nil { + return nil, err + } + + if err = a.Realloc(int64(root), h2b(make([]byte, 7), nrh)); err != nil { + return nil, err + } + } + if *index > kIndex { + p = right + canRecycle = false + *ph = rh + *index -= kIndex + 1 + } + return p, nil +} + +// p is dirty on return +func (p btreeIndexPage) extract(index int) btreeIndexPage { + n := p.len() - 1 + if index < n { + sz := (n-index)*14 + 7 + copy(p[1+14*index:1+14*index+sz], p[1+14*(index+1):]) + } + return p.setLen(n) +} + +// must persist all changes made +func (p btreeIndexPage) underflow(a btreeStore, root, iroot, parent int64, ph *int64, parentIndex int, index *int) (btreeIndexPage, error) { + lh, rh, err := checkSiblings(a, parent, parentIndex) + if err != nil { + return nil, err + } + + var left btreeIndexPage = bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(left) + + if lh != 0 { + if left, err = a.Get(left, lh); err != nil { + return nil, err + } + + if lc := btreeIndexPage(left).len(); lc > kIndex { + var pp = bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(pp) + if pp, err = a.Get(pp, parent); err != nil { + return nil, err + } + + pc := p.len() + p = p.setLen(pc + 1) + di, si, sz := 1+1*14, 1+0*14, (2*pc+1)*7 + copy(p[di:di+sz], p[si:]) + p.setChild(0, btreeIndexPage(left).child(lc)) + p.setDataPage(0, btreeIndexPage(pp).dataPage(parentIndex-1)) + *index++ + btreeIndexPage(pp).setDataPage(parentIndex-1, btreeIndexPage(left).dataPage(lc-1)) + left = left.setLen(lc - 1) + if err = a.Realloc(parent, pp); err != nil { + return nil, err + } + + if err = a.Realloc(*ph, p); err != nil { + return nil, err + } + + return p, a.Realloc(lh, left) + } + } + + if rh != 0 { + right := bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(right) + if right, err = a.Get(right, rh); err != nil { + return nil, err + } + + if rc := btreeIndexPage(right).len(); rc > kIndex { + pp := bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(pp) + if pp, err = a.Get(pp, parent); err != nil { + return nil, err + } + + pc := p.len() + p = p.setLen(pc + 1) + p.setDataPage(pc, btreeIndexPage(pp).dataPage(parentIndex)) + pc++ + p.setChild(pc, btreeIndexPage(right).child(0)) + btreeIndexPage(pp).setDataPage(parentIndex, btreeIndexPage(right).dataPage(0)) + di, si, sz := 1+0*14, 1+1*14, (2*rc+1)*7 + copy(right[di:di+sz], right[si:]) + right = btreeIndexPage(right).setLen(rc - 1) + if err = a.Realloc(parent, pp); err != nil { + return nil, err + } + + if err = a.Realloc(*ph, p); err != nil { + return nil, err + } + + return p, a.Realloc(rh, right) + } + } + + if lh != 0 { + *index += left.len() + 1 + if left, err = left.concat(a, root, iroot, parent, lh, *ph, parentIndex-1); err != nil { + return p, err + } + + p, *ph = left, lh + return p, nil + } + + return p.concat(a, root, iroot, parent, *ph, rh, parentIndex) +} + +// must persist all changes made +func (p btreeIndexPage) concat(a btreeStore, root, iroot, parent, ph, rh int64, parentIndex int) (btreeIndexPage, error) { + pp := bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(pp) + pp, err := a.Get(pp, parent) + if err != nil { + return nil, err + } + + right := bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(right) + if right, err = a.Get(right, rh); err != nil { + return nil, err + } + + pc := p.len() + rc := btreeIndexPage(right).len() + p = p.setLen(pc + rc + 1) + p.setDataPage(pc, btreeIndexPage(pp).dataPage(parentIndex)) + di, si, sz := 1+14*(pc+1), 1+0*14, (2*rc+1)*7 + copy(p[di:di+sz], right[si:]) + if err := a.Realloc(ph, p); err != nil { + return nil, err + } + + if err := a.Free(rh); err != nil { + return nil, err + } + + if pc := btreeIndexPage(pp).len(); pc > 1 { + if parentIndex < pc-1 { + di, si, sz := 8+parentIndex*14, 8+(parentIndex+1)*14, 2*(pc-1-parentIndex)*7 + copy(pp[di:si+sz], pp[si:]) + } + pp = btreeIndexPage(pp).setLen(pc - 1) + return p, a.Realloc(parent, pp) + } + + if err := a.Free(iroot); err != nil { + return nil, err + } + + b7 := bufs.GCache.Get(7) + defer bufs.GCache.Put(b7) + return p, a.Realloc(root, h2b(b7[:7], ph)) +} + +/* + +0...0 (1 bytes): +Flag + + 0 + +---+ + | 1 | + +---+ + +1 indicates a data page + +1...14 (14 bytes) + + 1..7 8..14 + +------+------+ + | Prev | Next | + +------+------+ + + Prev, Next == Handles of the data pages doubly linked list + + Count = (len(raw) - 15) / (2*kKV) + +15...count*2*kKV-1 +"array" of items, 2*kKV bytes each. Count of items in kData-1..2*kData + +Item + 0..kKV-1 kKV..2*kKV-1 + +----------+--------------+ + | Key | Value | + +----------+--------------+ + +Key/Value encoding + +Length 0...kKV-1 + + 0 1...N N+1...kKV-1 + +---+---------+-------------+ + | N | Data | Padding | + +---+---------+-------------+ + + N == content length + Data == Key or Value content + Padding == MUST be zero bytes + +Length >= kKV + + 0 1...kkV-8 kKV-7...kkV-1 + +------+-----------+--------------+ + | 0xFF | Data | H | + +------+-----------+--------------+ + + Data == Key or Value content, first kKV-7 bytes + H == Handle to THE REST of the content, w/o the first bytes in Data. + +Offsets into the raw []byte: +Key[X] == 15+2*kKV*X +Value[X] == 15+kKV+2*kKV*X +*/ +type btreeDataPage []byte + +func newBTreeDataPage() (p btreeDataPage) { + p = bufs.GCache.Cget(1 + 2*7 + (kData+1)*2*kKV)[:1+2*7] + p[0] = tagBTreeDataPage + return +} + +func newBTreeDataPageAlloc(a btreeStore) (p btreeDataPage, h int64, err error) { + p = newBTreeDataPage() + h, err = a.Alloc(p) + return +} + +func (p btreeDataPage) len() int { + return (len(p) - 15) / (2 * kKV) +} + +func (q btreeDataPage) setLen(n int) btreeDataPage { + q = q[:cap(q)] + need := 15 + 2*kKV*n + if need < len(q) { + return q[:need] + } + return append(q, make([]byte, need-len(q))...) +} + +func (p btreeDataPage) prev() int64 { + return b2h(p[1:]) +} + +func (p btreeDataPage) next() int64 { + return b2h(p[8:]) +} + +func (p btreeDataPage) setPrev(h int64) { + h2b(p[1:], h) +} + +func (p btreeDataPage) setNext(h int64) { + h2b(p[8:], h) +} + +func (q btreeDataPage) insert(index int) btreeDataPage { + switch len0 := q.len(); { + case index < len0: + has := len(q) + need := has + 2*kKV + switch { + case cap(q) >= need: + q = q[:need] + default: + q = append(q, zeros[:2*kKV]...) + } + q.copy(q, index+1, index, len0-index) + return q + case index == len0: + has := len(q) + need := has + 2*kKV + switch { + case cap(q) >= need: + return q[:need] + default: + return append(q, zeros[:2*kKV]...) + } + } + panic("internal error") +} + +func (p btreeDataPage) contentField(off int) (b []byte, h int64) { + p = p[off:] + switch n := int(p[0]); { + case n >= kKV: // content has a handle + b = append([]byte(nil), p[1:1+kSz]...) + h = b2h(p[kH:]) + default: // content is embedded + b, h = append([]byte(nil), p[1:1+n]...), 0 + } + return +} + +func (p btreeDataPage) content(a btreeStore, off int) (b []byte, err error) { + b, h := p.contentField(off) + if h == 0 { + return + } + + // content has a handle + b2, err := a.Get(nil, h) //TODO buffers: Later, not a public API + if err != nil { + return nil, err + } + + return append(b, b2...), nil +} + +func (p btreeDataPage) setContent(a btreeStore, off int, b []byte) (err error) { + p = p[off:] + switch { + case p[0] >= kKV: // existing content has a handle + switch n := len(b); { + case n < kKV: + p[0] = byte(n) + if err = a.Free(b2h(p[kH:])); err != nil { + return + } + copy(p[1:], b) + default: + // reuse handle + copy(p[1:1+kSz], b) + return a.Realloc(b2h(p[kH:]), b[kSz:]) + } + default: // existing content is embedded + switch n := len(b); { + case n < kKV: + p[0] = byte(n) + copy(p[1:], b) + default: + p[0] = 0xff + copy(p[1:1+kSz], b) + h, err := a.Alloc(b[kSz:]) + if err != nil { + return err + } + + h2b(p[kH:], h) + } + } + return +} + +func (p btreeDataPage) keyField(index int) (b []byte, h int64) { + return p.contentField(15 + 2*kKV*index) +} + +func (p btreeDataPage) key(a btreeStore, index int) (b []byte, err error) { + return p.content(a, 15+2*kKV*index) +} + +func (p btreeDataPage) valueField(index int) (b []byte, h int64) { + return p.contentField(15 + kKV + 2*kKV*index) +} + +func (p btreeDataPage) value(a btreeStore, index int) (b []byte, err error) { + return p.content(a, 15+kKV+2*kKV*index) +} + +func (p btreeDataPage) valueCopy(a btreeStore, index int) (b []byte, err error) { + if b, err = p.content(a, 15+kKV+2*kKV*index); err != nil { + return + } + + return append([]byte(nil), b...), nil +} + +func (p btreeDataPage) setKey(a btreeStore, index int, key []byte) (err error) { + return p.setContent(a, 15+2*kKV*index, key) +} + +func (p btreeDataPage) setValue(a btreeStore, index int, value []byte) (err error) { + return p.setContent(a, 15+kKV+2*kKV*index, value) +} + +func (p btreeDataPage) cmp(a btreeStore, c func(a, b []byte) int, keyA []byte, keyBIndex int) (y int, err error) { + var keyB []byte + if keyB, err = p.content(a, 15+2*kKV*keyBIndex); err != nil { + return + } + + return c(keyA, keyB), nil +} + +func (p btreeDataPage) copy(src btreeDataPage, di, si, n int) { + do, so := 15+2*kKV*di, 15+2*kKV*si + copy(p[do:do+2*kKV*n], src[so:]) +} + +// {p,left} dirty on exit +func (p btreeDataPage) moveLeft(left btreeDataPage, n int) (btreeDataPage, btreeDataPage) { + nl, np := left.len(), p.len() + left = left.setLen(nl + n) + left.copy(p, nl, 0, n) + p.copy(p, 0, n, np-n) + return p.setLen(np - n), left +} + +func (p btreeDataPage) moveRight(right btreeDataPage, n int) (btreeDataPage, btreeDataPage) { + nr, np := right.len(), p.len() + right = right.setLen(nr + n) + right.copy(right, n, 0, nr) + right.copy(p, 0, np-n, n) + return p.setLen(np - n), right +} + +func (p btreeDataPage) insertItem(a btreeStore, index int, key, value []byte) (btreeDataPage, error) { + p = p.insert(index) + di, sz := 15+2*kKV*index, 2*kKV + copy(p[di:di+sz], zeros[:sz]) + if err := p.setKey(a, index, key); err != nil { + return nil, err + } + return p, p.setValue(a, index, value) +} + +func (p btreeDataPage) split(a btreeStore, root, ph, parent int64, parentIndex, index int, key, value []byte) (btreeDataPage, error) { + right, rh, err := newBTreeDataPageAlloc(a) + // fails defer bufs.GCache.Put(right) + if err != nil { + return nil, err + } + + if next := p.next(); next != 0 { + right.setNext(p.next()) + nxh := right.next() + nx := bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(nx) + if nx, err = a.Get(nx, nxh); err != nil { + return nil, err + } + + btreeDataPage(nx).setPrev(rh) + if err = a.Realloc(nxh, nx); err != nil { + return nil, err + } + } + + p.setNext(rh) + right.setPrev(ph) + right = right.setLen(kData) + right.copy(p, 0, kData, kData) + p = p.setLen(kData) + + if parentIndex >= 0 { + var pp btreeIndexPage = bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(pp) + if pp, err = a.Get(pp, parent); err != nil { + return nil, err + } + + pp = pp.insert3(parentIndex, rh, rh) + if err = a.Realloc(parent, pp); err != nil { + return nil, err + } + + } else { + nr := newBTreeIndexPage(ph) + defer bufs.GCache.Put(nr) + nr = nr.insert3(0, rh, rh) + nrh, err := a.Alloc(nr) + if err != nil { + return nil, err + } + + if err = a.Realloc(root, h2b(make([]byte, 7), nrh)); err != nil { + return nil, err + } + + } + if index > kData { + if right, err = right.insertItem(a, index-kData, key, value); err != nil { + return nil, err + } + } else { + if p, err = p.insertItem(a, index, key, value); err != nil { + return nil, err + } + } + if err = a.Realloc(ph, p); err != nil { + return nil, err + } + + return p, a.Realloc(rh, right) +} + +func (p btreeDataPage) overflow(a btreeStore, root, ph, parent int64, parentIndex, index int, key, value []byte) (btreeDataPage, error) { + leftH, rightH, err := checkSiblings(a, parent, parentIndex) + if err != nil { + return nil, err + } + + if leftH != 0 { + left := btreeDataPage(bufs.GCache.Get(maxBuf)) + defer bufs.GCache.Put(left) + if left, err = a.Get(left, leftH); err != nil { + return nil, err + } + + if left.len() < 2*kData { + + p, left = p.moveLeft(left, 1) + if err = a.Realloc(leftH, left); err != nil { + return nil, err + } + + if p, err = p.insertItem(a, index-1, key, value); err != nil { + return nil, err + } + + return p, a.Realloc(ph, p) + } + } + + if rightH != 0 { + right := btreeDataPage(bufs.GCache.Get(maxBuf)) + defer bufs.GCache.Put(right) + if right, err = a.Get(right, rightH); err != nil { + return nil, err + } + + if right.len() < 2*kData { + if index < 2*kData { + p, right = p.moveRight(right, 1) + if err = a.Realloc(rightH, right); err != nil { + return nil, err + } + + if p, err = p.insertItem(a, index, key, value); err != nil { + return nil, err + } + + return p, a.Realloc(ph, p) + } else { + if right, err = right.insertItem(a, 0, key, value); err != nil { + return nil, err + } + + return p, a.Realloc(rightH, right) + } + } + } + return p.split(a, root, ph, parent, parentIndex, index, key, value) +} + +func (p btreeDataPage) swap(a btreeStore, di int, value []byte, canOverwrite bool) (oldValue []byte, err error) { + if oldValue, err = p.value(a, di); err != nil { + return + } + + if !canOverwrite { + return + } + + oldValue = append([]byte(nil), oldValue...) + err = p.setValue(a, di, value) + return +} + +type btreePage []byte + +func (p btreePage) isIndex() bool { + return p[0] == tagBTreeIndexPage +} + +func (p btreePage) len() int { + if p.isIndex() { + return btreeIndexPage(p).len() + } + + return btreeDataPage(p).len() +} + +func (p btreePage) find(a btreeStore, c func(a, b []byte) int, key []byte) (index int, ok bool, err error) { + l := 0 + h := p.len() - 1 + isIndex := p.isIndex() + if c == nil { + c = bytes.Compare + } + for l <= h { + index = (l + h) >> 1 + var cmp int + if isIndex { + if cmp, err = btreeIndexPage(p).cmp(a, c, key, index); err != nil { + return + } + } else { + if cmp, err = btreeDataPage(p).cmp(a, c, key, index); err != nil { + return + } + } + switch ok = cmp == 0; { + case cmp > 0: + l = index + 1 + case ok: + return + default: + h = index - 1 + } + } + return l, false, nil +} + +// p is dirty after extract! +func (p btreeDataPage) extract(a btreeStore, index int) (btreeDataPage, []byte, error) { + value, err := p.valueCopy(a, index) + if err != nil { + return nil, nil, err + } + + if _, h := p.keyField(index); h != 0 { + if err = a.Free(h); err != nil { + return nil, nil, err + } + } + + if _, h := p.valueField(index); h != 0 { + if err = a.Free(h); err != nil { + return nil, nil, err + } + } + + n := p.len() - 1 + if index < n { + p.copy(p, index, index+1, n-index) + } + return p.setLen(n), value, nil +} + +func checkSiblings(a btreeStore, parent int64, parentIndex int) (left, right int64, err error) { + if parentIndex >= 0 { + var p btreeIndexPage = bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(p) + if p, err = a.Get(p, parent); err != nil { + return + } + + if parentIndex > 0 { + left = p.child(parentIndex - 1) + } + if parentIndex < p.len() { + right = p.child(parentIndex + 1) + } + } + return +} + +// underflow must persist all changes made. +func (p btreeDataPage) underflow(a btreeStore, root, iroot, parent, ph int64, parentIndex int) (err error) { + lh, rh, err := checkSiblings(a, parent, parentIndex) + if err != nil { + return err + } + + if lh != 0 { + left := bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(left) + if left, err = a.Get(left, lh); err != nil { + return err + } + + if btreeDataPage(left).len()+p.len() >= 2*kData { + left, p = btreeDataPage(left).moveRight(p, 1) + if err = a.Realloc(lh, left); err != nil { + return err + } + + return a.Realloc(ph, p) + } + } + + if rh != 0 { + right := bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(right) + if right, err = a.Get(right, rh); err != nil { + return err + } + + if p.len()+btreeDataPage(right).len() > 2*kData { + right, p = btreeDataPage(right).moveLeft(p, 1) + if err = a.Realloc(rh, right); err != nil { + return err + } + + return a.Realloc(ph, p) + } + } + + if lh != 0 { + left := bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(left) + if left, err = a.Get(left, lh); err != nil { + return err + } + + if err = a.Realloc(ph, p); err != nil { + return err + } + + return btreeDataPage(left).concat(a, root, iroot, parent, lh, ph, parentIndex-1) + } + + return p.concat(a, root, iroot, parent, ph, rh, parentIndex) +} + +// concat must persist all changes made. +func (p btreeDataPage) concat(a btreeStore, root, iroot, parent, ph, rh int64, parentIndex int) (err error) { + right := bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(right) + if right, err = a.Get(right, rh); err != nil { + return err + } + + right, p = btreeDataPage(right).moveLeft(p, btreeDataPage(right).len()) + nxh := btreeDataPage(right).next() + if nxh != 0 { + nx := bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(nx) + if nx, err = a.Get(nx, nxh); err != nil { + return err + } + + btreeDataPage(nx).setPrev(ph) + if err = a.Realloc(nxh, nx); err != nil { + return err + } + } + p.setNext(nxh) + if err = a.Free(rh); err != nil { + return err + } + + pp := bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(pp) + if pp, err = a.Get(pp, parent); err != nil { + return err + } + + if btreeIndexPage(pp).len() > 1 { + pp = btreeIndexPage(pp).extract(parentIndex) + btreeIndexPage(pp).setChild(parentIndex, ph) + if err = a.Realloc(parent, pp); err != nil { + return err + } + + return a.Realloc(ph, p) + } + + if err = a.Free(iroot); err != nil { + return err + } + + if err = a.Realloc(ph, p); err != nil { + return err + } + + var b7 [7]byte + return a.Realloc(root, h2b(b7[:], ph)) +} + +// external "root" is stable and contains the real root. +type btree int64 + +func newBTree(a btreeStore) (btree, error) { + r, err := a.Alloc(zeros[:7]) + return btree(r), err +} + +func (root btree) String(a btreeStore) string { + r := bufs.GCache.Get(16) + defer bufs.GCache.Put(r) + r, err := a.Get(r, int64(root)) + if err != nil { + panic(err) + } + + iroot := b2h(r) + m := map[int64]bool{int64(root): true} + + s := []string{fmt.Sprintf("tree %#x -> %#x\n====", root, iroot)} + if iroot == 0 { + return s[0] + } + + var f func(int64, string) + f = func(h int64, ind string) { + if m[h] { + return + } + + m[h] = true + var b btreePage = bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(b) + var err error + if b, err = a.Get(b, h); err != nil { + panic(err) + } + + s = append(s, fmt.Sprintf("%s@%#x", ind, h)) + switch b.isIndex() { + case true: + da := []int64{} + b := btreeIndexPage(b) + for i := 0; i < b.len(); i++ { + c, d := b.child(i), b.dataPage(i) + s = append(s, fmt.Sprintf("%schild[%d] %#x dataPage[%d] %#x", ind, i, c, i, d)) + da = append(da, c) + da = append(da, d) + } + i := b.len() + c := b.child(i) + s = append(s, fmt.Sprintf("%schild[%d] %#x", ind, i, c)) + for _, c := range da { + f(c, ind+" ") + } + f(c, ind+" ") + case false: + b := btreeDataPage(b) + s = append(s, fmt.Sprintf("%sprev %#x next %#x", ind, b.prev(), b.next())) + for i := 0; i < b.len(); i++ { + k, err := b.key(a, i) + if err != nil { + panic(err) + } + + v, err := b.value(a, i) + if err != nil { + panic(err) + } + + s = append(s, fmt.Sprintf("%sK[%d]|% x| V[%d]|% x|", ind, i, k, i, v)) + } + } + } + + f(int64(iroot), "") + return strings.Join(s, "\n") +} + +func (root btree) put(dst []byte, a btreeStore, c func(a, b []byte) int, key, value []byte, canOverwrite bool) (prev []byte, err error) { + prev, _, err = root.put2(dst, a, c, key, func(key, old []byte) (new []byte, write bool, err error) { + new, write = value, true + return + }) + return +} + +func (root btree) put2(dst []byte, a btreeStore, c func(a, b []byte) int, key []byte, upd func(key, old []byte) (new []byte, write bool, err error)) (old []byte, written bool, err error) { + var r, value []byte + if r, err = a.Get(dst, int64(root)); err != nil { + return + } + + iroot := b2h(r) + var h int64 + if iroot == 0 { + p := newBTreeDataPage() + defer bufs.GCache.Put(p) + if value, written, err = upd(key, nil); err != nil || !written { + return + } + + if p, err = p.insertItem(a, 0, key, value); err != nil { + return + } + + h, err = a.Alloc(p) + if err != nil { + return nil, true, err + } + + err = a.Realloc(int64(root), h2b(r, h)[:7]) + return + } + + parentIndex := -1 + var parent int64 + ph := iroot + + p := bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(p) + + for { + if p, err = a.Get(p[:cap(p)], ph); err != nil { + return + } + + var index int + var ok bool + + if index, ok, err = btreePage(p).find(a, c, key); err != nil { + return + } + + switch { + case ok: // Key found + if btreePage(p).isIndex() { + ph = btreeIndexPage(p).dataPage(index) + if p, err = a.Get(p, ph); err != nil { + return + } + + if old, err = btreeDataPage(p).valueCopy(a, 0); err != nil { + return + } + + if value, written, err = upd(key, old); err != nil || !written { + return + } + + if _, err = btreeDataPage(p).swap(a, 0, value, true); err != nil { + return + } + + err = a.Realloc(ph, p) + return + } + + if old, err = btreeDataPage(p).valueCopy(a, index); err != nil { + return + } + + if value, written, err = upd(key, old); err != nil || !written { + return + } + + if _, err = btreeDataPage(p).swap(a, index, value, true); err != nil { + return + } + + err = a.Realloc(ph, p) + return + case btreePage(p).isIndex(): + if btreePage(p).len() > 2*kIndex { + if p, err = btreeIndexPage(p).split(a, root, &ph, parent, parentIndex, &index); err != nil { + return + } + } + parentIndex = index + parent = ph + ph = btreeIndexPage(p).child(index) + default: + if value, written, err = upd(key, nil); err != nil || !written { + return + } + + if btreePage(p).len() < 2*kData { // page is not full + if p, err = btreeDataPage(p).insertItem(a, index, key, value); err != nil { + return + } + + err = a.Realloc(ph, p) + return + } + + // page is full + p, err = btreeDataPage(p).overflow(a, int64(root), ph, parent, parentIndex, index, key, value) + return + } + } +} + +//TODO actually use 'dst' to return 'value' +func (root btree) get(a btreeStore, dst []byte, c func(a, b []byte) int, key []byte) (b []byte, err error) { + var r []byte + if r, err = a.Get(dst, int64(root)); err != nil { + return + } + + iroot := b2h(r) + if iroot == 0 { + return + } + + ph := iroot + + for { + var p btreePage + if p, err = a.Get(p, ph); err != nil { + return + } + + var index int + var ok bool + if index, ok, err = p.find(a, c, key); err != nil { + return + } + + switch { + case ok: + if p.isIndex() { + dh := btreeIndexPage(p).dataPage(index) + dp, err := a.Get(dst, dh) + if err != nil { + return nil, err + } + + return btreeDataPage(dp).value(a, 0) + } + + return btreeDataPage(p).value(a, index) + case p.isIndex(): + ph = btreeIndexPage(p).child(index) + default: + return + } + } +} + +//TODO actually use 'dst' to return 'value' +func (root btree) extract(a btreeStore, dst []byte, c func(a, b []byte) int, key []byte) (value []byte, err error) { + var r []byte + if r, err = a.Get(dst, int64(root)); err != nil { + return + } + + iroot := b2h(r) + if iroot == 0 { + return + } + + ph := iroot + parentIndex := -1 + var parent int64 + + p := bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(p) + + for { + if p, err = a.Get(p[:cap(p)], ph); err != nil { + return + } + + var index int + var ok bool + if index, ok, err = btreePage(p).find(a, c, key); err != nil { + return + } + + if ok { + if btreePage(p).isIndex() { + dph := btreeIndexPage(p).dataPage(index) + dp, err := a.Get(dst, dph) + if err != nil { + return nil, err + } + + if btreeDataPage(dp).len() > kData { + if dp, value, err = btreeDataPage(dp).extract(a, 0); err != nil { + return nil, err + } + + return value, a.Realloc(dph, dp) + } + + if btreeIndexPage(p).len() < kIndex && ph != iroot { + var err error + if p, err = btreeIndexPage(p).underflow(a, int64(root), iroot, parent, &ph, parentIndex, &index); err != nil { + return nil, err + } + } + parentIndex = index + 1 + parent = ph + ph = btreeIndexPage(p).child(parentIndex) + continue + } + + p, value, err = btreeDataPage(p).extract(a, index) + if btreePage(p).len() >= kData { + err = a.Realloc(ph, p) + return + } + + if ph != iroot { + err = btreeDataPage(p).underflow(a, int64(root), iroot, parent, ph, parentIndex) + return + } + + if btreePage(p).len() == 0 { + if err = a.Free(ph); err != nil { + return + } + + err = a.Realloc(int64(root), zeros[:7]) + return + } + err = a.Realloc(ph, p) + return + } + + if !btreePage(p).isIndex() { + return + } + + if btreePage(p).len() < kIndex && ph != iroot { + if p, err = btreeIndexPage(p).underflow(a, int64(root), iroot, parent, &ph, parentIndex, &index); err != nil { + return nil, err + } + } + parentIndex = index + parent = ph + ph = btreeIndexPage(p).child(index) + } +} + +func (root btree) deleteAny(a btreeStore) (bool, error) { + r := bufs.GCache.Get(7) + defer bufs.GCache.Put(r) + var err error + if r, err = a.Get(r, int64(root)); err != nil { + return false, err + } + + iroot := b2h(r) + if iroot == 0 { + return true, nil + } + + ph := iroot + parentIndex := -1 + var parent int64 + p := bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(p) + + for { + if p, err = a.Get(p, ph); err != nil { + return false, err + } + + index := btreePage(p).len() / 2 + if btreePage(p).isIndex() { + dph := btreeIndexPage(p).dataPage(index) + dp := bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(dp) + if dp, err = a.Get(dp, dph); err != nil { + return false, err + } + + if btreeDataPage(dp).len() > kData { + if dp, _, err = btreeDataPage(dp).extract(a, 0); err != nil { + return false, err + } + + return false, a.Realloc(dph, dp) + } + + if btreeIndexPage(p).len() < kIndex && ph != iroot { + if p, err = btreeIndexPage(p).underflow(a, int64(root), iroot, parent, &ph, parentIndex, &index); err != nil { + return false, err + } + } + parentIndex = index + 1 + parent = ph + ph = btreeIndexPage(p).child(parentIndex) + continue + } + + p, _, err = btreeDataPage(p).extract(a, index) + if btreePage(p).len() >= kData { + err = a.Realloc(ph, p) + return false, err + } + + if ph != iroot { + err = btreeDataPage(p).underflow(a, int64(root), iroot, parent, ph, parentIndex) + return false, err + } + + if btreePage(p).len() == 0 { + if err = a.Free(ph); err != nil { + return true, err + } + + return true, a.Realloc(int64(root), zeros[:7]) + } + + return false, a.Realloc(ph, p) + } +} + +func (root btree) first(a btreeStore) (ph int64, p btreeDataPage, err error) { + r := bufs.GCache.Get(7) + defer bufs.GCache.Put(r) + if r, err = a.Get(r, int64(root)); err != nil { + return + } + + for ph = b2h(r); ph != 0; ph = btreeIndexPage(p).child(0) { + if p, err = a.Get(p, ph); err != nil { + return + } + + if !btreePage(p).isIndex() { + break + } + } + + return +} + +func (root btree) last(a btreeStore) (ph int64, p btreeDataPage, err error) { + r := bufs.GCache.Get(7) + defer bufs.GCache.Put(r) + if r, err = a.Get(r, int64(root)); err != nil { + return + } + + for ph = b2h(r); ph != 0; ph = btreeIndexPage(p).child(btreeIndexPage(p).len()) { + if p, err = a.Get(p, ph); err != nil { + return + } + + if !btreePage(p).isIndex() { + break + } + } + + return +} + +// key >= p[index].key +func (root btree) seek(a btreeStore, c func(a, b []byte) int, key []byte) (p btreeDataPage, index int, equal bool, err error) { + r := bufs.GCache.Get(7) + defer bufs.GCache.Put(r) + if r, err = a.Get(r, int64(root)); err != nil { + return + } + + for ph := b2h(r); ph != 0; ph = btreeIndexPage(p).child(index) { + if p, err = a.Get(p, ph); err != nil { + break + } + + if index, equal, err = btreePage(p).find(a, c, key); err != nil { + break + } + + if equal { + if !btreePage(p).isIndex() { + break + } + + p, err = a.Get(p, btreeIndexPage(p).dataPage(index)) + index = 0 + break + } + + if !btreePage(p).isIndex() { + break + } + } + return +} + +func (root btree) clear(a btreeStore) (err error) { + r := bufs.GCache.Get(7) + defer bufs.GCache.Put(r) + if r, err = a.Get(r, int64(root)); err != nil { + return + } + + iroot := b2h(r) + if iroot == 0 { + return + } + + if err = root.clear2(a, iroot); err != nil { + return + } + + var b [7]byte + return a.Realloc(int64(root), b[:]) +} + +func (root btree) clear2(a btreeStore, ph int64) (err error) { + var p = bufs.GCache.Get(maxBuf) + defer bufs.GCache.Put(p) + if p, err = a.Get(p, ph); err != nil { + return + } + + switch btreePage(p).isIndex() { + case true: + ip := btreeIndexPage(p) + for i := 0; i <= ip.len(); i++ { + root.clear2(a, ip.child(i)) + + } + case false: + dp := btreeDataPage(p) + for i := 0; i < dp.len(); i++ { + if err = dp.setKey(a, i, nil); err != nil { + return + } + + if err = dp.setValue(a, i, nil); err != nil { + return + } + } + } + return a.Free(ph) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/btree_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/btree_test.go new file mode 100644 index 00000000..d6fb6409 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/btree_test.go @@ -0,0 +1,1887 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lldb + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "flag" + "fmt" + "math" + "math/rand" + "os" + "runtime" + "testing" + + "camlistore.org/third_party/github.com/cznic/fileutil" + "camlistore.org/third_party/github.com/cznic/mathutil" +) + +var ( + testFrom = flag.Uint("from", 0, "test I [-from, -N)") + noGrow = flag.Bool("noGrow", false, "check only embeded keys/values") +) + +func verifyPageLinks(a btreeStore, tree btree, n int) (err error) { + var p btreeDataPage + var ph int64 + if ph, p, err = tree.first(a); err != nil { + return + } + + if n == 0 { + if ph != 0 || p != nil { + return fmt.Errorf("first() should returned nil page") + } + + ph2, p2, err := tree.last(a) + if err != nil { + return err + } + + if ph2 != 0 || p2 != nil { + return fmt.Errorf("last() should returned nil page") + } + + } + + n0 := n + var prev int64 + var lastKey []byte + for ph != 0 { + if p, err = a.Get(nil, ph); err != nil { + return + } + + if g, e := p.prev(), prev; g != e { + return fmt.Errorf("broken L-R DLL chain: p %d p.prev %d, exp %d", ph, g, e) + } + + for i := 0; i < p.len(); i++ { + key, err := p.key(a, i) + if err != nil { + return err + } + + if key == nil { + return fmt.Errorf("nil key") + } + + if lastKey != nil && !(bytes.Compare(lastKey, key) < 0) { + return fmt.Errorf("L-R key ordering broken") + } + + lastKey = key + n-- + } + + prev, ph = ph, p.next() + } + + if n != 0 { + return fmt.Errorf("# of keys off by %d (L-R)", n) + } + + n = n0 + if ph, p, err = tree.last(a); err != nil { + return + } + + lastKey = nil + var next int64 + + for ph != 0 { + if p, err = a.Get(nil, ph); err != nil { + return + } + + if g, e := p.next(), next; g != e { + return fmt.Errorf("broken R-L DLL chain") + } + + for i := p.len() - 1; i >= 0; i-- { + key, err := p.key(a, i) + if err != nil { + return err + } + + if key == nil { + return fmt.Errorf("nil key") + } + + if lastKey != nil && !(bytes.Compare(key, lastKey) < 0) { + return fmt.Errorf("R-L key ordering broken") + } + + lastKey = key + n-- + } + + next, ph = ph, p.prev() + } + + if n != 0 { + return fmt.Errorf("# of keys off by %d (R-L)", n) + } + + return +} + +func testBTreePut1(t *testing.T, nf func() btreeStore, grow, from, to, xor int64) (tree btree) { + if *noGrow { + grow = 0 + } + + a := nf() + if a == nil { + t.Fatal(a) + } + + var err error + tree, err = newBTree(a) + if err != nil { + t.Fatal(err) + } + + if err := verifyPageLinks(a, tree, 0); err != nil { + t.Fatal(err) + } + + // Write and read back + var k, v, prevValue [7]byte + + n := 0 + for i := from; i < to; i++ { + h2b(k[:], 0x0100000000+i^xor) + h2b(v[:], 0x0200000000+i^xor) + kk := append(make([]byte, grow*i), k[:]...) + vv := append(make([]byte, grow*i), v[:]...) + prev, err := tree.put(nil, a, nil, kk, vv, true) + if err != nil || len(prev) != 0 { + t.Fatal(i, prev, err) + } + + var buf []byte + if buf, err = tree.get(a, nil, nil, kk); err != nil { + t.Fatal(err) + } + + if !bytes.Equal(buf, vv) { + t.Fatalf("\nK %sG %sE %s%s", hex.Dump(kk), hex.Dump(buf), hex.Dump(vv), tree.String(a)) + } + + n++ + } + + if err := verifyPageLinks(a, tree, n); err != nil { + t.Fatalf("%s\n%s", err, tree.String(a)) + } + + // Overwrite, read and extract + for i := from; i < to; i++ { + h2b(k[:], 0x0100000000+i^xor) + h2b(prevValue[:], 0x0200000000+i^xor) + h2b(v[:], 0x0300000000+i^xor) + kk := append(make([]byte, grow*i), k[:]...) + vv := append(make([]byte, grow*i), v[:]...) + expPrev := append(make([]byte, grow*i), prevValue[:]...) + gotPrev, err := tree.put(nil, a, nil, kk, vv, true) + if err != nil { + t.Fatal(i, err) + } + + if !bytes.Equal(gotPrev, expPrev) { + t.Fatalf("\nK %sG %sE %s%s", hex.Dump(kk), hex.Dump(gotPrev), hex.Dump(expPrev), tree.String(a)) + } + + var buf []byte + if buf, err = tree.get(a, nil, nil, kk); err != nil { + t.Fatal(err) + } + + if !bytes.Equal(buf, vv) { + t.Fatalf("\n%s%s%s%s", hex.Dump(kk), hex.Dump(buf), hex.Dump(vv), tree.String(a)) + } + + buf = nil + if buf, err = tree.extract(a, nil, nil, kk); err != nil { + t.Fatal(err) + } + + if !bytes.Equal(buf, vv) { + t.Fatalf("i %d, from [%d, %d)\nK %sG %sE %s%s", i, from, to, hex.Dump(kk), hex.Dump(buf), hex.Dump(vv), tree.String(a)) + } + + buf = nil + if buf, err = tree.get(a, nil, nil, kk); err != nil { + t.Fatal(err) + } + + if buf != nil { + t.Fatalf("\nK %sB %s%s", hex.Dump(kk), hex.Dump(buf), tree.String(a)) + } + + buf = nil + if buf, err = tree.extract(a, nil, nil, kk); err != nil { + t.Fatal(err) + } + + if buf != nil { + t.Fatalf("\n%s\n%s%s", hex.Dump(kk), hex.Dump(buf), tree.String(a)) + } + + n-- + if err := verifyPageLinks(a, tree, n); err != nil { + t.Fatalf("%s\n%s", err, tree.String(a)) + } + } + + return +} + +var xors = [...]int64{0, 0xffffffff, 0x55555555, 0xaaaaaaaa} + +func TestBTreePutGetExtract(t *testing.T) { + N := int64(3 * kData) + from := int64(*testFrom) + + for grow := 0; grow < 2; grow++ { + for _, x := range xors { + var s *memBTreeStore + tree := testBTreePut1(t, func() btreeStore { s = newMemBTreeStore(); return s }, int64(grow), from, N, x) + if err := verifyPageLinks(s, tree, 0); err != nil { + t.Fatal(err) + } + + if g, e := len(s.m), 1; g != e { + t.Fatalf("leak(s) %d %d\n%s", g, e, s) + } + } + } +} + +func testBTreePut2(t *testing.T, nf func() btreeStore, grow, n int) (tree btree) { + if *noGrow { + grow = 0 + } + rng, err := mathutil.NewFC32(math.MinInt32, math.MaxInt32, true) + if err != nil { + t.Fatal(err) + } + + a := nf() + if a == nil { + t.Fatal(a) + } + + tree, err = newBTree(a) + if err != nil { + t.Fatal(err) + } + + var k, v [7]byte + for i := 0; i < n; i++ { + ik, iv := int64(rng.Next()), int64(rng.Next()) + h2b(k[:], ik) + h2b(v[:], iv) + kk := append(make([]byte, grow*i), k[:]...) + vv := append(make([]byte, grow*i), v[:]...) + prev, err := tree.put(nil, a, nil, kk, vv, true) + if err != nil || len(prev) != 0 { + t.Fatal(i, prev, err) + } + + var buf []byte + if buf, err = tree.get(a, nil, nil, kk); err != nil { + t.Fatal(err) + } + + if !bytes.Equal(buf, vv) { + t.Fatalf("\n%s%s%s%s", hex.Dump(kk), hex.Dump(buf), hex.Dump(vv), tree.String(a)) + } + } + + if err := verifyPageLinks(a, tree, n); err != nil { + t.Fatalf("%s\n%s\n", err, tree.String(a)) + } + + rng.Seek(0) + for i := 0; i < n; i++ { + ik, iv := int64(rng.Next()), int64(rng.Next()) + h2b(k[:], ik) + h2b(v[:], iv) + kk := append(make([]byte, grow*i), k[:]...) + vv := append(make([]byte, grow*i), v[:]...) + var buf []byte + buf, err := tree.extract(a, nil, nil, kk) + if err != nil { + t.Fatal(i, err) + } + + if !bytes.Equal(buf, vv) { + t.Fatalf("\n%s\n%s\n%s\n%s", hex.Dump(kk), hex.Dump(buf), hex.Dump(vv), tree.String(a)) + } + + if err := verifyPageLinks(a, tree, n-i-1); err != nil { + t.Fatalf("%s\n%s", err, tree.String(a)) + } + } + + return +} + +func TestBTreePutGetExtractRnd(t *testing.T) { + N := *testN + + for grow := 0; grow < 2; grow++ { + var s *memBTreeStore + tree := testBTreePut2(t, func() btreeStore { s = newMemBTreeStore(); return s }, grow, N) + if err := verifyPageLinks(s, tree, 0); err != nil { + t.Fatal(err) + } + + if g, e := len(s.m), 1; g != e { + t.Fatalf("leak(s) %d %d\n%s", g, e, s) + } + } +} + +func benchmarkBTreePut(b *testing.B, v []byte) { + b.StopTimer() + rng := rand.New(rand.NewSource(42)) + ka := make([][7]byte, b.N) + for _, v := range ka { + h2b(v[:], int64(rng.Int63())) + } + a := newMemBTreeStore() + tree, err := newBTree(a) + if err != nil { + b.Fatal(err) + } + + runtime.GC() + b.StartTimer() + for _, k := range ka { + tree.put(nil, a, bytes.Compare, k[:], v, true) + } +} + +func BenchmarkBTreePut1(b *testing.B) { + v := make([]byte, 1) + benchmarkBTreePut(b, v) +} + +func BenchmarkBTreePut8(b *testing.B) { + v := make([]byte, 8) + benchmarkBTreePut(b, v) +} + +func BenchmarkBTreePut16(b *testing.B) { + v := make([]byte, 16) + benchmarkBTreePut(b, v) +} + +func BenchmarkBTreePut32(b *testing.B) { + v := make([]byte, 32) + benchmarkBTreePut(b, v) +} + +func benchmarkBTreeGet(b *testing.B, v []byte) { + b.StopTimer() + rng := rand.New(rand.NewSource(42)) + ka := make([][7]byte, b.N) + for _, v := range ka { + h2b(v[:], int64(rng.Int63())) + } + a := newMemBTreeStore() + tree, err := newBTree(a) + if err != nil { + b.Fatal(err) + } + + for _, k := range ka { + tree.put(nil, a, bytes.Compare, k[:], v, true) + } + buf := make([]byte, len(v)) + runtime.GC() + b.StartTimer() + for _, k := range ka { + tree.get(a, buf, bytes.Compare, k[:]) + } +} + +func BenchmarkBTreeGet1(b *testing.B) { + v := make([]byte, 1) + benchmarkBTreeGet(b, v) +} + +func BenchmarkBTreeGet8(b *testing.B) { + v := make([]byte, 8) + benchmarkBTreeGet(b, v) +} + +func BenchmarkBTreeGet16(b *testing.B) { + v := make([]byte, 16) + benchmarkBTreeGet(b, v) +} + +func BenchmarkBTreeGet32(b *testing.B) { + v := make([]byte, 32) + benchmarkBTreeGet(b, v) +} + +func TestbTreeSeek(t *testing.T) { + N := int64(*testN) + + tree := NewBTree(nil) + + // Fill + for i := int64(1); i <= N; i++ { + tree.Set(enc8(10*i), enc8(10*i+1)) + } + + // Check + a, root := tree.store, tree.root + for i := int64(1); i <= N; i++ { + // Exact match + lowKey := enc8(10*i - 1) + key := enc8(10 * i) + highKey := enc8(10*i + 1) + p, index, eq, err := root.seek(a, nil, key) + if err != nil { + t.Fatal(err) + } + + if !eq { + t.Fatal(i) + } + + if btreePage(p).isIndex() { + t.Fatal(i, "need btreeDataPage") + } + + dp := btreeDataPage(p) + n := dp.len() + if n < 0 || n > 2*kData { + t.Fatal(i, n) + } + + if index < 0 || index >= n { + t.Fatal(index) + } + + g, err := dp.key(a, index) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(g, key) { + t.Fatal(i) + } + + g, err = dp.value(a, index) + if err != nil { + t.Fatal(err) + } + + value := enc8(10*i + 1) + if !bytes.Equal(g, value) { + t.Fatal(i) + } + + // Nonexistent "low" key. Search for 9 should return the key 10. + p, index, eq, err = root.seek(a, nil, lowKey) + if err != nil { + t.Fatal(err) + } + + if eq { + t.Fatal(i) + } + + if btreePage(p).isIndex() { + t.Fatal(i, "need btreeDataPage") + } + + dp = btreeDataPage(p) + n = dp.len() + if n < 0 || n > 2*kData { + t.Fatal(i, n) + } + + if index < 0 || index > n { + t.Fatal(index, n) + } + + if index == n { + ph := dp.next() + index = 0 + if dp, err = a.Get(p, ph); err != nil { + t.Fatal(err) + } + } + + g, err = dp.key(a, index) + if err != nil { + t.Fatal(err) + } + + expKey := key + if !bytes.Equal(g, expKey) { + fmt.Println(root.String(a)) + //t.Fatalf("%d low|% x| g|% x| e|% x|", i, lowKey, g, expKey) + } + + g, err = dp.value(a, index) + if err != nil { + t.Fatal(err) + } + + value = enc8(10*i + 1) + if !bytes.Equal(g, value) { + t.Fatal(i) + } + + // Nonexistent "high" key. Search for 11 should return the key 20. + p, index, eq, err = root.seek(a, nil, highKey) + if err != nil { + t.Fatal(err) + } + + if eq { + t.Fatal(i) + } + + if btreePage(p).isIndex() { + t.Fatal(i, "need btreeDataPage") + } + + dp = btreeDataPage(p) + n = dp.len() + if n < 0 || n > 2*kData { + t.Fatal(i, n) + } + + if index < 0 || index > n { + t.Fatal(index, n) + } + + if index == n { + ph := dp.next() + if i == N { + if ph != 0 { + t.Fatal(ph) + } + + continue + } + + index = 0 + if dp, err = a.Get(p, ph); err != nil { + t.Fatal(err) + } + } + + g, err = dp.key(a, index) + if err != nil { + t.Fatal(err) + } + + expKey = enc8(10 * (i + 1)) + if !bytes.Equal(g, expKey) { + //fmt.Println(root.String(a)) + t.Fatalf("%d low|% x| g|% x| e|% x|", i, lowKey, g, expKey) + } + + g, err = dp.value(a, index) + if err != nil { + t.Fatal(err) + } + + value = enc8(10*(i+1) + 1) + if !bytes.Equal(g, value) { + t.Fatal(i) + } + + } +} + +func enc8(n int64) []byte { + var b [8]byte + h2b(b[:], n) + return b[:] +} + +func dec8(b []byte) (int64, error) { + if len(b) != 0 { + return 0, fmt.Errorf("dec8: len != 8 but %d", len(b)) + } + + return b2h(b), nil +} + +func TestbTreeNext(t *testing.T) { + N := int64(*testN) + + tree := NewBTree(nil) + enum, _, err := tree.seek(enc8(0)) + if err != nil { + t.Fatal(err) + } + + if _, _, err = enum.current(); !fileutil.IsEOF(err) { + t.Fatal(err) + } + + if err = enum.next(); !fileutil.IsEOF(err) { + t.Fatal(err) + } + + if err = enum.prev(); !fileutil.IsEOF(err) { + t.Fatal(err) + } + + // Fill + for i := int64(1); i <= N; i++ { + tree.Set(enc8(10*i), enc8(10*i+1)) + } + + var eq bool + + enum, eq, err = tree.seek(enc8(0)) + if err != nil { + t.Fatal(err) + } + + if eq { + t.Fatal(eq) + } + + // index: 0 + if _, _, err = enum.current(); err != nil { + t.Fatal(err) + } + + if err = enum.next(); N > 1 && err != nil { + t.Fatal(err) + } + + enum, eq, err = tree.seek(enc8(N * 10)) + if err != nil { + t.Fatal(err) + } + + if !eq { + t.Fatal(eq) + } + + // index: N-1 + if _, _, err = enum.current(); err != nil { + t.Fatal(err) + } + + if err = enum.next(); N > 1 && !fileutil.IsEOF(err) { + t.Fatal(err) + } + + enum, eq, err = tree.seek(enc8(N*10 + 1)) + if err != nil { + t.Fatal(err) + } + + if eq { + t.Fatal(eq) + } + + // index: N + if _, _, err = enum.current(); !fileutil.IsEOF(err) { + t.Fatal(err) + } + + if err = enum.next(); N > 1 && !fileutil.IsEOF(err) { + t.Fatal(err) + } + + enum, _, err = tree.seek(enc8(0)) + if err != nil { + t.Fatal(err) + } + + for i := int64(1); i <= N; i++ { + expKey, expValue := enc8(10*i), enc8(10*i+1) + k, v, err := enum.current() + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(k, expKey) { + t.Fatal(i) + } + + if !bytes.Equal(v, expValue) { + t.Fatal(i) + } + + switch { + case i == N: + if err := enum.next(); !fileutil.IsEOF(err) { + t.Fatal(err) + } + default: + if err := enum.next(); err != nil { + t.Fatal(err) + } + } + } +} + +func TestbTreePrev(t *testing.T) { + N := int64(*testN) + + tree := NewBTree(nil) + enum, _, err := tree.seek(enc8(0)) + if err != nil { + t.Fatal(err) + } + + if _, _, err = enum.current(); !fileutil.IsEOF(err) { + t.Fatal(err) + } + + if err = enum.next(); !fileutil.IsEOF(err) { + t.Fatal(err) + } + + if err = enum.prev(); !fileutil.IsEOF(err) { + t.Fatal(err) + } + + // Fill + for i := int64(1); i <= N; i++ { + tree.Set(enc8(10*i), enc8(10*i+1)) + } + + var eq bool + + enum, eq, err = tree.seek(enc8(0)) + if err != nil { + t.Fatal(err) + } + + if eq { + t.Fatal(eq) + } + + // index: 0 + if _, _, err = enum.current(); err != nil { + t.Fatal(err) + } + + if err = enum.prev(); !fileutil.IsEOF(err) { + t.Fatal(err) + } + + enum, eq, err = tree.seek(enc8(N * 10)) + if err != nil { + t.Fatal(err) + } + + if !eq { + t.Fatal(eq) + } + + // index: N-1 + if _, _, err = enum.current(); err != nil { + t.Fatal(err) + } + + if err = enum.prev(); N > 1 && err != nil { + t.Fatal(err) + } + + enum, eq, err = tree.seek(enc8(N*10 + 1)) + if err != nil { + t.Fatal(err) + } + + if eq { + t.Fatal(eq) + } + + // index: N + if _, _, err = enum.current(); !fileutil.IsEOF(err) { + t.Fatal(err) + } + + if err = enum.prev(); err != nil { + t.Fatal(err) + } + + enum, _, err = tree.seek(enc8(N * 10)) + if err != nil { + t.Fatal(err) + } + + for i := N; i >= 1; i-- { + expKey, expValue := enc8(10*i), enc8(10*i+1) + k, v, err := enum.current() + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(k, expKey) { + t.Fatalf("%d k|% x| expK|% x| %s\n", i, k, expKey, tree.root.String(tree.store)) + } + + if !bytes.Equal(v, expValue) { + t.Fatal(i) + } + + switch { + case i == 1: + if err := enum.prev(); !fileutil.IsEOF(err) { + t.Fatal(err) + } + default: + if err := enum.prev(); err != nil { + t.Fatal(i, err) + } + } + } +} + +func TestBTreeClear(t *testing.T) { + N := int64(*testN) + + var err error + var p []byte + for n := int64(0); n <= N; n = n*3/2 + 1 { + tree := NewBTree(nil) + for i := int64(0); i < n; i++ { + k := append(make([]byte, kKV), enc8(10*i+1)...) + v := append(make([]byte, kKV+1), enc8(10*i+2)...) + if err = tree.Set(k, v); err != nil { + t.Fatal(err) + } + } + + if err = tree.Clear(); err != nil { + t.Fatal(err) + } + + if g, e := len(tree.store.(*memBTreeStore).m), 1; g != e { + t.Fatalf("%v %v %v\n%s", n, g, e, tree.store.(*memBTreeStore).String()) + } + + if p, err = tree.store.Get(p, 1); err != nil { + t.Fatal(err) + } + + if g, e := p, zeros[:7]; len(g) != 0 && !bytes.Equal(g, e) { + t.Fatalf("|% x| |% x|", g, e) + } + } +} + +func TestBTreeRemove(t *testing.T) { + N := int64(*testN) + + for n := int64(0); n <= N; n = n*3/2 + 1 { + f := NewMemFiler() + store, err := NewAllocator(f, &Options{}) + if err != nil { + t.Fatal(err) + } + + sz0, err := f.Size() + if err != nil { + t.Fatal(err) + } + + tree, handle, err := CreateBTree(store, nil) + if err != nil { + t.Fatal(err) + } + + for i := int64(0); i < n; i++ { + k := append(make([]byte, kKV), enc8(10*i+1)...) + v := append(make([]byte, kKV+1), enc8(10*i+2)...) + if err = tree.Set(k, v); err != nil { + t.Fatal(err) + } + } + + if err = RemoveBTree(store, handle); err != nil { + t.Fatal(err) + } + + sz, err := f.Size() + if err != nil { + t.Fatal(err) + } + + if g, e := sz-sz0, int64(0); g != e { + t.Fatal(g, e) + } + } +} + +func collate(a, b []byte) (r int) { + da, err := DecodeScalars(a) + if err != nil { + panic(err) + } + + db, err := DecodeScalars(b) + if err != nil { + panic(err) + } + + r, err = Collate(da, db, nil) + if err != nil { + panic(err) + } + + return +} + +func TestBTreeCollatingBug(t *testing.T) { + tree := NewBTree(collate) + + date, err := EncodeScalars("Date") + if err != nil { + t.Fatal(err) + } + + customer, err := EncodeScalars("Customer") + if err != nil { + t.Fatal(err) + } + + if g, e := collate(customer, date), -1; g != e { + t.Fatal(g, e) + } + + if g, e := collate(date, customer), 1; g != e { + t.Fatal(g, e) + } + + err = tree.Set(date, nil) + if err != nil { + t.Fatal(err) + } + + err = tree.Set(customer, nil) + if err != nil { + t.Fatal(err) + } + + var b bytes.Buffer + tree.Dump(&b) + t.Logf("\n%s", b.String()) + + key, _, err := tree.First() + if err != nil { + t.Fatal(err) + } + + if g, e := key, customer; !bytes.Equal(g, e) { + t.Fatal(g, e) + } + +} + +func TestExtract(t *testing.T) { // Test of the exported wrapper only, .extract tested elsewhere + bt := NewBTree(nil) + bt.Set([]byte("a"), []byte("b")) + bt.Set([]byte("c"), []byte("d")) + bt.Set([]byte("e"), []byte("f")) + + if v, err := bt.Get(nil, []byte("a")); string(v) != "b" || err != nil { + t.Fatal(v, err) + } + + if v, err := bt.Get(nil, []byte("c")); string(v) != "d" || err != nil { + t.Fatal(v, err) + } + + if v, err := bt.Get(nil, []byte("e")); string(v) != "f" || err != nil { + t.Fatal(v, err) + } + + if v, err := bt.Extract(nil, []byte("c")); string(v) != "d" || err != nil { + t.Fatal(v, err) + } + + if v, err := bt.Get(nil, []byte("a")); string(v) != "b" || err != nil { + t.Fatal(v, err) + } + + if v, err := bt.Get(nil, []byte("c")); v != nil || err != nil { + t.Fatal(v, err) + } + + if v, err := bt.Get(nil, []byte("e")); string(v) != "f" || err != nil { + t.Fatal(v, err) + } +} + +func TestFirst(t *testing.T) { + bt := NewBTree(nil) + + if k, v, err := bt.First(); k != nil || v != nil || err != nil { + t.Fatal(k, v, err) + } + + bt.Set([]byte("a"), []byte("b")) + bt.Set([]byte("c"), []byte("d")) + + if k, v, err := bt.First(); string(k) != "a" || string(v) != "b" || err != nil { + t.Fatal(k, v, err) + } + + if err := bt.Delete([]byte("a")); err != nil { + t.Fatal(err) + } + + if k, v, err := bt.First(); string(k) != "c" || string(v) != "d" || err != nil { + t.Fatal(k, v, err) + } + + if err := bt.Delete([]byte("c")); err != nil { + t.Fatal(err) + } + + if k, v, err := bt.First(); k != nil || v != nil || err != nil { + t.Fatal(k, v, err) + } +} + +func TestLast(t *testing.T) { + bt := NewBTree(nil) + + if k, v, err := bt.First(); k != nil || v != nil || err != nil { + t.Fatal(k, v, err) + } + + bt.Set([]byte("a"), []byte("b")) + bt.Set([]byte("c"), []byte("d")) + + if k, v, err := bt.Last(); string(k) != "c" || string(v) != "d" || err != nil { + t.Fatal(k, v, err) + } + + if err := bt.Delete([]byte("c")); err != nil { + t.Fatal(err) + } + + if k, v, err := bt.First(); string(k) != "a" || string(v) != "b" || err != nil { + t.Fatal(k, v, err) + } + + if err := bt.Delete([]byte("a")); err != nil { + t.Fatal(err) + } + + if k, v, err := bt.First(); k != nil || v != nil || err != nil { + t.Fatal(k, v, err) + } +} + +func TestseekFirst(t *testing.T) { + bt := NewBTree(nil) + + enum, err := bt.seekFirst() + if !fileutil.IsEOF(err) { + t.Fatal(err) + } + + bt.Set([]byte("c"), []byte("d")) + enum, err = bt.seekFirst() + if err != nil { + t.Fatal(err) + } + + err = enum.prev() + if !fileutil.IsEOF(err) { + t.Fatal(err) + } + + err = enum.next() + if !fileutil.IsEOF(err) { + t.Fatal(err) + } + + k, v, err := enum.current() + if err != nil { + t.Fatal(err) + } + + if string(k) != "c" || string(v) != "d" { + t.Fatal(k, v) + } + + bt.Set([]byte("a"), []byte("b")) + enum, err = bt.seekFirst() + if err != nil { + t.Fatal(err) + } + + err = enum.prev() + if !fileutil.IsEOF(err) { + t.Fatal(err) + } + + k, v, err = enum.current() + if err != nil { + t.Fatal(err) + } + + if string(k) != "a" || string(v) != "b" { + t.Fatal(k, v) + } + + err = enum.next() + if err != nil { + t.Fatal(err) + } + + k, v, err = enum.current() + if err != nil { + t.Fatal(err) + } + + if string(k) != "c" || string(v) != "d" { + t.Fatal(k, v) + } +} + +func TestseekLast(t *testing.T) { + bt := NewBTree(nil) + + enum, err := bt.seekFirst() + if !fileutil.IsEOF(err) { + t.Fatal(err) + } + + bt.Set([]byte("a"), []byte("b")) + enum, err = bt.seekFirst() + if err != nil { + t.Fatal(err) + } + + err = enum.prev() + if !fileutil.IsEOF(err) { + t.Fatal(err) + } + + err = enum.next() + if !fileutil.IsEOF(err) { + t.Fatal(err) + } + + k, v, err := enum.current() + if err != nil { + t.Fatal(err) + } + + if string(k) != "a" || string(v) != "b" { + t.Fatal(k, v) + } + + bt.Set([]byte("c"), []byte("d")) + enum, err = bt.seekLast() + if err != nil { + t.Fatal(err) + } + + err = enum.next() + if !fileutil.IsEOF(err) { + t.Fatal(err) + } + + k, v, err = enum.current() + if err != nil { + t.Fatal(err) + } + + if string(k) != "c" || string(v) != "d" { + t.Fatal(k, v) + } + + err = enum.prev() + if err != nil { + t.Fatal(err) + } + + k, v, err = enum.current() + if err != nil { + t.Fatal(err) + } + + if string(k) != "a" || string(v) != "b" { + t.Fatal(k, v) + } +} + +func TestDeleteAny(t *testing.T) { + const N = 1e4 + rng := rand.New(rand.NewSource(42)) + ref := map[uint32]bool{} + tr := NewBTree(nil) + data := []byte{42} + var key [4]byte + for i := 0; i < N; i++ { + k := uint32(rng.Int()) + binary.LittleEndian.PutUint32(key[:], k) + if err := tr.Set(key[:], data); err != nil { + t.Fatal(err) + } + + ref[k] = true + } + + for i := len(ref); i != 0; i-- { + empty, err := tr.DeleteAny() + if err != nil { + t.Fatal(err) + } + + if empty && i != 1 { + t.Fatal(i) + } + } +} + +func benchmarkBTreeSetFiler(b *testing.B, f Filer, sz int) { + if err := f.BeginUpdate(); err != nil { + b.Error(err) + return + } + + a, err := NewAllocator(f, &Options{}) + if err != nil { + b.Error(err) + return + } + + tr, _, err := CreateBTree(a, nil) + if err != nil { + f.EndUpdate() + b.Error(err) + return + } + + if err = f.EndUpdate(); err != nil { + b.Error(err) + return + } + + keys := make([][8]byte, b.N) + for i := range keys { + binary.BigEndian.PutUint64(keys[i][:], uint64(i)) + } + v := make([]byte, sz) + runtime.GC() + b.ResetTimer() + for _, k := range keys { + if err = f.BeginUpdate(); err != nil { + b.Error(err) + return + } + + if err := tr.Set(k[:], v); err != nil { + f.EndUpdate() + b.Error(err) + return + } + + if err = f.EndUpdate(); err != nil { + b.Error(err) + return + } + } +} + +func benchmarkBTreeSetMemFiler(b *testing.B, sz int) { + f := NewMemFiler() + benchmarkBTreeSetFiler(b, f, sz) +} + +func BenchmarkBTreeSetMemFiler0(b *testing.B) { + benchmarkBTreeSetMemFiler(b, 0) +} + +func BenchmarkBTreeSetMemFiler1e1(b *testing.B) { + benchmarkBTreeSetMemFiler(b, 1e1) +} + +func BenchmarkBTreeSetMemFiler1e2(b *testing.B) { + benchmarkBTreeSetMemFiler(b, 1e2) +} + +func BenchmarkBTreeSetMemFiler1e3(b *testing.B) { + benchmarkBTreeSetMemFiler(b, 1e3) +} + +func benchmarkBTreeSetSimpleFileFiler(b *testing.B, sz int) { + dir, testDbName := temp() + defer os.RemoveAll(dir) + + f, err := os.OpenFile(testDbName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) + if err != nil { + b.Fatal(err) + } + + defer f.Close() + + benchmarkBTreeSetFiler(b, NewSimpleFileFiler(f), sz) +} + +func BenchmarkBTreeSetSimpleFileFiler0(b *testing.B) { + benchmarkBTreeSetSimpleFileFiler(b, 0) +} + +func BenchmarkBTreeSetSimpleFileFiler1e1(b *testing.B) { + benchmarkBTreeSetSimpleFileFiler(b, 1e1) +} + +func BenchmarkBTreeSetSimpleFileFiler1e2(b *testing.B) { + benchmarkBTreeSetSimpleFileFiler(b, 1e2) +} + +func BenchmarkBTreeSetSimpleFileFiler1e3(b *testing.B) { + benchmarkBTreeSetSimpleFileFiler(b, 1e3) +} + +func benchmarkBTreeSetRollbackFiler(b *testing.B, sz int) { + dir, testDbName := temp() + defer os.RemoveAll(dir) + + f, err := os.OpenFile(testDbName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) + if err != nil { + b.Fatal(err) + } + + defer f.Close() + + g := NewSimpleFileFiler(f) + var filer *RollbackFiler + if filer, err = NewRollbackFiler( + g, + func(sz int64) error { + if err = g.Truncate(sz); err != nil { + return err + } + + return g.Sync() + }, + g, + ); err != nil { + b.Error(err) + return + } + + benchmarkBTreeSetFiler(b, filer, sz) +} + +func BenchmarkBTreeSetRollbackFiler0(b *testing.B) { + benchmarkBTreeSetRollbackFiler(b, 0) +} + +func BenchmarkBTreeSetRollbackFiler1e1(b *testing.B) { + benchmarkBTreeSetRollbackFiler(b, 1e1) +} + +func BenchmarkBTreeSetRollbackFiler1e2(b *testing.B) { + benchmarkBTreeSetRollbackFiler(b, 1e2) +} + +func BenchmarkBTreeSetRollbackFiler1e3(b *testing.B) { + benchmarkBTreeSetRollbackFiler(b, 1e3) +} + +func benchmarkBTreeSetACIDFiler(b *testing.B, sz int) { + dir, testDbName := temp() + defer os.RemoveAll(dir) + + f, err := os.OpenFile(testDbName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) + if err != nil { + b.Fatal(err) + } + + defer f.Close() + + wal, err := os.OpenFile(testDbName+".wal", os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) + if err != nil { + b.Fatal(err) + } + + defer wal.Close() + + filer, err := NewACIDFiler(NewSimpleFileFiler(f), wal) + if err != nil { + b.Error(err) + return + } + + benchmarkBTreeSetFiler(b, filer, sz) +} + +func BenchmarkBTreeSetACIDFiler0(b *testing.B) { + benchmarkBTreeSetACIDFiler(b, 0) +} + +func BenchmarkBTreeSetACIDFiler1e1(b *testing.B) { + benchmarkBTreeSetACIDFiler(b, 1e1) +} + +func BenchmarkBTreeSetACIDFiler1e2(b *testing.B) { + benchmarkBTreeSetACIDFiler(b, 1e2) +} + +func BenchmarkBTreeSetACIDFiler1e3(b *testing.B) { + benchmarkBTreeSetACIDFiler(b, 1e3) +} + +func testbTreeEnumeratorInvalidating(t *testing.T, mutate func(b *BTree) error) { + b := NewBTree(nil) + if err := b.Set([]byte{1}, []byte{2}); err != nil { + t.Fatal(err) + } + + if err := b.Set([]byte{3}, []byte{4}); err != nil { + t.Fatal(err) + } + + e, err := b.seekFirst() + if err != nil { + t.Fatal(err) + } + + if _, _, err = e.current(); err != nil { + t.Fatal(err) + } + + if err = e.next(); err != nil { + t.Fatal(err) + } + + if err = e.prev(); err != nil { + t.Fatal(err) + } + + if err = mutate(b); err != nil { + t.Fatal(err) + } + + if _, _, err = e.current(); err == nil { + t.Fatal(err) + } + + if _, ok := err.(*ErrINVAL); !ok { + t.Fatalf("%T", err) + } + + err = e.next() + if err == nil { + t.Fatal(err) + } + + if _, ok := err.(*ErrINVAL); !ok { + t.Fatalf("%T", err) + } + + err = e.prev() + if err == nil { + t.Fatal(err) + } + + if _, ok := err.(*ErrINVAL); !ok { + t.Fatalf("%T", err) + } + +} + +func TestBTreeEnumeratorInvalidating(t *testing.T) { + testbTreeEnumeratorInvalidating(t, func(b *BTree) error { return b.Clear() }) + testbTreeEnumeratorInvalidating(t, func(b *BTree) error { return b.Delete([]byte{1}) }) + testbTreeEnumeratorInvalidating(t, func(b *BTree) error { _, err := b.DeleteAny(); return err }) + testbTreeEnumeratorInvalidating(t, func(b *BTree) error { _, err := b.Extract(nil, []byte{1}); return err }) + testbTreeEnumeratorInvalidating(t, func(b *BTree) error { + _, _, err := b.Put( + nil, + []byte{1}, + func(k, o []byte) ([]byte, bool, error) { return nil, false, nil }, + ) + return err + }) + testbTreeEnumeratorInvalidating(t, func(b *BTree) error { return b.Set([]byte{4}, []byte{5}) }) +} + +func n2b(n int) []byte { + var b [8]byte + binary.BigEndian.PutUint64(b[:], uint64(n)) + return b[:] +} + +func b2n(b []byte) int { + if len(b) != 8 { + return mathutil.MinInt + } + + return int(binary.BigEndian.Uint64(b)) +} + +func TestBTreeSeekNext(t *testing.T) { + // seeking within 3 keys: 10, 20, 30 + table := []struct { + k int + hit bool + keys []int + }{ + {5, false, []int{10, 20, 30}}, + {10, true, []int{10, 20, 30}}, + {15, false, []int{20, 30}}, + {20, true, []int{20, 30}}, + {25, false, []int{30}}, + {30, true, []int{30}}, + {35, false, []int{}}, + } + + for i, test := range table { + up := test.keys + db := NewBTree(nil) + + if err := db.Set(n2b(10), n2b(100)); err != nil { + t.Fatal(i, err) + } + + if err := db.Set(n2b(20), n2b(200)); err != nil { + t.Fatal(i, err) + } + + if err := db.Set(n2b(30), n2b(300)); err != nil { + t.Fatal(i, err) + } + + for brokenSerial := 0; brokenSerial < 16; brokenSerial++ { + en, hit, err := db.Seek(n2b(test.k)) + if err != nil { + t.Fatal(err) + } + + if g, e := hit, test.hit; g != e { + t.Fatal(i, g, e) + } + + j := 0 + for { + if brokenSerial&(1<= len(up) { + t.Fatal(i, j, brokenSerial) + } + + if g, e := b2n(k), up[j]; g != e { + t.Fatal(i, j, brokenSerial, g, e) + } + + if g, e := len(v), 8; g != e { + t.Fatal(i, g, e) + } + + if g, e := b2n(v), 10*up[j]; g != e { + t.Fatal(i, g, e) + } + + j++ + + } + + if g, e := j, len(up); g != e { + t.Fatal(i, j, g, e) + } + } + + } +} + +func TestBTreeSeekPrev(t *testing.T) { + // seeking within 3 keys: 10, 20, 30 + table := []struct { + k int + hit bool + keys []int + }{ + {5, false, []int{10}}, + {10, true, []int{10}}, + {15, false, []int{20, 10}}, + {20, true, []int{20, 10}}, + {25, false, []int{30, 20, 10}}, + {30, true, []int{30, 20, 10}}, + {35, false, []int{}}, + } + + for i, test := range table { + down := test.keys + db := NewBTree(nil) + if err := db.Set(n2b(10), n2b(100)); err != nil { + t.Fatal(i, err) + } + + if err := db.Set(n2b(20), n2b(200)); err != nil { + t.Fatal(i, err) + } + + if err := db.Set(n2b(30), n2b(300)); err != nil { + t.Fatal(i, err) + } + + for brokenSerial := 0; brokenSerial < 16; brokenSerial++ { + en, hit, err := db.Seek(n2b(test.k)) + if err != nil { + t.Fatal(err) + } + + if g, e := hit, test.hit; g != e { + t.Fatal(i, g, e) + } + + j := 0 + for { + if brokenSerial&(1<= len(down) { + t.Fatal(i, j, brokenSerial) + } + + if g, e := b2n(k), down[j]; g != e { + t.Fatal(i, j, brokenSerial, g, e) + } + + if g, e := len(v), 8; g != e { + t.Fatal(i, g, e) + } + + if g, e := b2n(v), 10*down[j]; g != e { + t.Fatal(i, g, e) + } + + j++ + + } + + if g, e := j, len(down); g != e { + t.Fatal(i, j, g, e) + } + } + + } +} + +func TestBTreeSeekFirst(t *testing.T) { + db := NewBTree(nil) + en, err := db.SeekFirst() + if err == nil { + t.Fatal(err) + } + + if err := db.Set(n2b(100), n2b(1000)); err != nil { + t.Fatal(err) + } + + if en, err = db.SeekFirst(); err != nil { + t.Fatal(err) + } + + k, v, err := en.Next() + if err != nil { + t.Fatal(err) + } + + if g, e := b2n(k), 100; g != e { + t.Fatal(g, e) + } + + if g, e := b2n(v), 1000; g != e { + t.Fatal(g, e) + } + + if err := db.Set(n2b(110), n2b(1100)); err != nil { + t.Fatal(err) + } + + if en, err = db.SeekFirst(); err != nil { + t.Fatal(err) + } + + if k, v, err = en.Next(); err != nil { + t.Fatal(err) + } + + if g, e := b2n(k), 100; g != e { + t.Fatal(g, e) + } + + if g, e := b2n(v), 1000; g != e { + t.Fatal(g, e) + } + + if err := db.Set(n2b(90), n2b(900)); err != nil { + t.Fatal(err) + } + + if en, err = db.SeekFirst(); err != nil { + t.Fatal(err) + } + + if k, v, err = en.Next(); err != nil { + t.Fatal(err) + } + + if g, e := b2n(k), 90; g != e { + t.Fatal(g, e) + } + + if g, e := b2n(v), 900; g != e { + t.Fatal(g, e) + } + +} + +func TestBTreeSeekLast(t *testing.T) { + db := NewBTree(nil) + en, err := db.SeekLast() + if err == nil { + t.Fatal(err) + } + + if err := db.Set(n2b(100), n2b(1000)); err != nil { + t.Fatal(err) + } + + if en, err = db.SeekLast(); err != nil { + t.Fatal(err) + } + + k, v, err := en.Next() + if err != nil { + t.Fatal(err) + } + + if g, e := b2n(k), 100; g != e { + t.Fatal(g, e) + } + + if g, e := b2n(v), 1000; g != e { + t.Fatal(g, e) + } + + if err := db.Set(n2b(90), n2b(900)); err != nil { + t.Fatal(err) + } + + if en, err = db.SeekLast(); err != nil { + t.Fatal(err) + } + + if k, v, err = en.Next(); err != nil { + t.Fatal(err) + } + + if g, e := b2n(k), 100; g != e { + t.Fatal(g, e) + } + + if g, e := b2n(v), 1000; g != e { + t.Fatal(g, e) + } + + if err := db.Set(n2b(110), n2b(1100)); err != nil { + t.Fatal(err) + } + + if en, err = db.SeekLast(); err != nil { + t.Fatal(err) + } + + if k, v, err = en.Next(); err != nil { + t.Fatal(err) + } + + if g, e := b2n(k), 110; g != e { + t.Fatal(g, e) + } + + if g, e := b2n(v), 1100; g != e { + t.Fatal(g, e) + } + +} + +// https://camlistore.org/issue/216 +func TestBug216(t *testing.T) { + const S = 2*kKV + 2 // 2*kKV+1 ok + const N = 300000 + rng, err := mathutil.NewFC32(math.MinInt32, math.MaxInt32, true) + if err != nil { + t.Fatal(err) + } + k := make([]byte, S/2) + v := make([]byte, S-S/2) + tr := NewBTree(nil) + for i := 0; i < N; i++ { + for i := range k { + k[i] = byte(rng.Next()) + } + for i := range v { + v[i] = byte(rng.Next()) + } + + if err := tr.Set(h2b(k, int64(i)), h2b(v, int64(i))); err != nil { + t.Fatal(i, err) + } + + if (i+1)%10000 == 0 { + //dbg("%v", i+1) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/db_bench/Makefile b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/db_bench/Makefile new file mode 100644 index 00000000..8c6048c7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/db_bench/Makefile @@ -0,0 +1,21 @@ +all: + go fmt + go test -i + go test + go build + go vet + make todo + +todo: + @grep -n ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* *.go || true + @grep -n TODO *.go || true + @grep -n BUG *.go || true + @grep -n println *.go || true + +clean: + @go clean + rm -f *~ cov cov.html bad-dump good-dump lldb.test old.txt new.txt \ + test-acidfiler0-* db_bench.test + +gocov: + gocov test $(COV) | gocov-html > cov.html diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/db_bench/main.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/db_bench/main.go new file mode 100644 index 00000000..40c26a66 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/db_bench/main.go @@ -0,0 +1,237 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + +From: https://code.google.com/p/leveldb/ + +Performance + +Here is a performance report (with explanations) from the run of the included +db_bench program. The results are somewhat noisy, but should be enough to get a +ballpark performance estimate. + +Setup + +We use a database with a million entries. Each entry has a 16 byte key, and a +100 byte value. Values used by the benchmark compress to about half their +original size. + + LevelDB: version 1.1 + Date: Sun May 1 12:11:26 2011 + CPU: 4 x Intel(R) Core(TM)2 Quad CPU Q6600 @ 2.40GHz + CPUCache: 4096 KB + Keys: 16 bytes each + Values: 100 bytes each (50 bytes after compression) + Entries: 1000000 + Raw Size: 110.6 MB (estimated) + File Size: 62.9 MB (estimated) + +Write performance + +The "fill" benchmarks create a brand new database, in either sequential, or +random order. The "fillsync" benchmark flushes data from the operating system +to the disk after every operation; the other write operations leave the data +sitting in the operating system buffer cache for a while. The "overwrite" +benchmark does random writes that update existing keys in the database. + + fillseq : 1.765 micros/op; 62.7 MB/s + fillsync : 268.409 micros/op; 0.4 MB/s (10000 ops) + fillrandom : 2.460 micros/op; 45.0 MB/s + overwrite : 2.380 micros/op; 46.5 MB/s + +Each "op" above corresponds to a write of a single key/value pair. I.e., a +random write benchmark goes at approximately 400,000 writes per second. + +Each "fillsync" operation costs much less (0.3 millisecond) than a disk seek +(typically 10 milliseconds). We suspect that this is because the hard disk +itself is buffering the update in its memory and responding before the data has +been written to the platter. This may or may not be safe based on whether or +not the hard disk has enough power to save its memory in the event of a power +failure. + +Read performance + +We list the performance of reading sequentially in both the forward and reverse +direction, and also the performance of a random lookup. Note that the database +created by the benchmark is quite small. Therefore the report characterizes the +performance of leveldb when the working set fits in memory. The cost of reading +a piece of data that is not present in the operating system buffer cache will +be dominated by the one or two disk seeks needed to fetch the data from disk. +Write performance will be mostly unaffected by whether or not the working set +fits in memory. + + readrandom : 16.677 micros/op; (approximately 60,000 reads per second) + readseq : 0.476 micros/op; 232.3 MB/s + readreverse : 0.724 micros/op; 152.9 MB/s + +LevelDB compacts its underlying storage data in the background to improve read +performance. The results listed above were done immediately after a lot of +random writes. The results after compactions (which are usually triggered +automatically) are better. + + readrandom : 11.602 micros/op; (approximately 85,000 reads per second) + readseq : 0.423 micros/op; 261.8 MB/s + readreverse : 0.663 micros/op; 166.9 MB/s + +Some of the high cost of reads comes from repeated decompression of blocks read +from disk. If we supply enough cache to the leveldb so it can hold the +uncompressed blocks in memory, the read performance improves again: + + readrandom : 9.775 micros/op; (approximately 100,000 reads per second before compaction) + readrandom : 5.215 micros/op; (approximately 190,000 reads per second after compaction) + +*/ + +/* + +Executing leveldb's db_bench on local machine: + +(10:49) jnml@fsc-r550:~/src/code.google.com/p/leveldb$ ./db_bench +LevelDB: version 1.10 +Date: Fri May 17 10:49:37 2013 +CPU: 4 * Intel(R) Xeon(R) CPU X5450 @ 3.00GHz +CPUCache: 6144 KB +Keys: 16 bytes each +Values: 100 bytes each (50 bytes after compression) +Entries: 1000000 +RawSize: 110.6 MB (estimated) +FileSize: 62.9 MB (estimated) +------------------------------------------------ +fillseq : 5.334 micros/op; 20.7 MB/s +fillsync : 41386.875 micros/op; 0.0 MB/s (1000 ops) +fillrandom : 9.583 micros/op; 11.5 MB/s +overwrite : 15.441 micros/op; 7.2 MB/s +readrandom : 12.136 micros/op; (1000000 of 1000000 found) +readrandom : 8.612 micros/op; (1000000 of 1000000 found) +readseq : 0.303 micros/op; 365.1 MB/s +readreverse : 0.560 micros/op; 197.5 MB/s +compact : 2394003.000 micros/op; +readrandom : 6.504 micros/op; (1000000 of 1000000 found) +readseq : 0.271 micros/op; 407.5 MB/s +readreverse : 0.515 micros/op; 214.7 MB/s +fill100K : 4793.916 micros/op; 19.9 MB/s (1000 ops) +crc32c : 3.709 micros/op; 1053.2 MB/s (4K per op) +snappycomp : 9.545 micros/op; 409.3 MB/s (output: 55.1%) +snappyuncomp : 1.506 micros/op; 2593.9 MB/s +acquireload : 0.349 micros/op; (each op is 1000 loads) +(10:51) jnml@fsc-r550:~/src/code.google.com/p/leveldb$ + +*/ + +package main + +import ( + "encoding/binary" + "flag" + "fmt" + "log" + "os" + "runtime" + "runtime/debug" + "runtime/pprof" + "time" + + "camlistore.org/third_party/github.com/cznic/bufs" + "camlistore.org/third_party/github.com/cznic/exp/lldb" +) + +const ( + N = 1e6 +) + +var ( + value100 = []byte("Here is a performance report (with explanatioaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + + memprofile = flag.String("memprofile", "", "write memory profile to this file") +) + +func main() { + flag.Parse() + log.SetFlags(log.Lshortfile | log.Ltime) + fmt.Printf( + `lldb: version exp +Keys: 16 bytes each +Values: 100 bytes each (50 bytes after compression) +Entries: 1000000 +RawSize: 110.6 MB (estimated) +FileSize: 62.9 MB (estimated) +------------------------------------------------ +`) + if *memprofile != "" { + runtime.MemProfileRate = 1 + } + fillseq() + if *memprofile != "" { + f, err := os.Create(*memprofile) + if err != nil { + log.Fatal(err) + } + pprof.WriteHeapProfile(f) + f.Close() + return + } + +} + +func fillseq() { + dbname := os.Args[0] + ".db" + f, err := os.OpenFile(dbname, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666) + if err != nil { + log.Fatal(err) + } + + defer func() { + f.Close() + os.Remove(f.Name()) + }() + + filer := lldb.NewSimpleFileFiler(f) + a, err := lldb.NewAllocator(filer, &lldb.Options{}) + if err != nil { + log.Println(err) + return + } + + a.Compress = true + b, _, err := lldb.CreateBTree(a, nil) + if err != nil { + log.Println(err) + return + } + + var keys [N][16]byte + for i := range keys { + binary.BigEndian.PutUint32(keys[i][:], uint32(i)) + } + + debug.FreeOSMemory() + t0 := time.Now() + for _, key := range keys { + if err = b.Set(key[:], value100); err != nil { + log.Println(err) + return + } + } + if err := filer.Sync(); err != nil { + log.Println(err) + return + } + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + + d := time.Since(t0) + fi, err := f.Stat() + if err != nil { + log.Println(err) + return + } + + secs := float64(d/time.Nanosecond) / float64(time.Second) + sz := fi.Size() + fmt.Printf("fillseq :%19v/op;%7.1f MB/s (%g secs, %d bytes)\n", d/N, float64(sz)/secs/1e6, secs, sz) + nn, bytes := bufs.GCache.Stats() + fmt.Printf("%d %d\n", nn, bytes) + fmt.Printf("%+v\n", ms) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/db_bench/main_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/db_bench/main_test.go new file mode 100644 index 00000000..8e8bdffb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/db_bench/main_test.go @@ -0,0 +1,126 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "encoding/binary" + "io/ioutil" + "os" + "runtime" + "testing" + + "camlistore.org/third_party/github.com/cznic/bufs" + "camlistore.org/third_party/github.com/cznic/exp/lldb" + "camlistore.org/third_party/github.com/cznic/zappy" +) + +func Test(t *testing.T) { + + if n := len(value100); n != 100 { + t.Fatal(n) + } + + c, err := zappy.Encode(nil, value100) + if err != nil { + t.Fatal(err) + } + + if n := len(c); n != 50 { + t.Fatal(n) + } +} + +func TestProf(t *testing.T) { + dbname := os.Args[0] + ".db" + f, err := os.OpenFile(dbname, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666) + if err != nil { + t.Fatal(err) + } + + defer func() { + f.Close() + os.Remove(f.Name()) + }() + + filer := lldb.NewSimpleFileFiler(f) // file + //filer := lldb.NewMemFiler() // mem + a, err := lldb.NewAllocator(filer, &lldb.Options{}) + if err != nil { + t.Error(err) + return + } + + a.Compress = true + b, _, err := lldb.CreateBTree(a, nil) + if err != nil { + t.Error(err) + return + } + + var key [16]byte + for i := uint32(0); int(i) < 1e6; i++ { + binary.BigEndian.PutUint32(key[:], i) + if err = b.Set(key[:], value100); err != nil { + t.Error(err) + return + } + } + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + bufsU, bufsT, bytesU, bytesT, h, m := a.CacheStats() + const p = 100.0 + t.Logf( + "cache: buffers %d/%d(%.1f%%), bytes %d/%d(%.1f%%), hits %d(%.1f%%), misses %d(%.1f%%)", + bufsU, bufsT, p*float64(bufsU)/float64(bufsT), + bytesU, bytesT, p*float64(bytesU)/float64(bytesT), + h, p*float64(h)/float64(h+m), + m, p*float64(m)/float64(h+m), + ) + nn, bts := bufs.GCache.Stats() + t.Logf("bufs.GCache.Stats() {%d, %d}", nn, bts) + t.Logf("%+v\n", ms) +} + +func BenchmarkMem(b *testing.B) { + f, err := ioutil.TempFile("", "") + if err != nil { + b.Fatal(err) + } + + defer func() { + f.Close() + os.Remove(f.Name()) + }() + + filer := lldb.NewSimpleFileFiler(f) + a, err := lldb.NewAllocator(filer, &lldb.Options{}) + if err != nil { + b.Error(err) + return + } + + a.Compress = true + + t, _, err := lldb.CreateBTree(a, nil) + if err != nil { + b.Error(err) + return + } + + b.ResetTimer() + var key [16]byte + for i := uint32(0); int(i) < b.N; i++ { + binary.BigEndian.PutUint32(key[:], i) + if err = t.Set(key[:], value100); err != nil { + b.Error(err) + return + } + } + + if err := filer.Sync(); err != nil { + b.Error(err) + return + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/errors.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/errors.go new file mode 100644 index 00000000..7dffe7f1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/errors.go @@ -0,0 +1,170 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Some errors returned by this package. +// +// Note that this package can return more errors than declared here, for +// example io.EOF from Filer.ReadAt(). + +package lldb + +import ( + "fmt" +) + +// ErrDecodeScalars is possibly returned from DecodeScalars +type ErrDecodeScalars struct { + B []byte // Data being decoded + I int // offending offset +} + +// Error implements the built in error type. +func (e *ErrDecodeScalars) Error() string { + return fmt.Sprintf("DecodeScalars: corrupted data @ %d/%d", e.I, len(e.B)) +} + +// ErrINVAL reports invalid values passed as parameters, for example negative +// offsets where only non-negative ones are allowed or read from the DB. +type ErrINVAL struct { + Src string + Val interface{} +} + +// Error implements the built in error type. +func (e *ErrINVAL) Error() string { + return fmt.Sprintf("%s: %+v", e.Src, e.Val) +} + +// ErrPERM is for example reported when a Filer is closed while BeginUpdate(s) +// are not balanced with EndUpdate(s)/Rollback(s) or when EndUpdate or Rollback +// is invoked which is not paired with a BeginUpdate. +type ErrPERM struct { + Src string +} + +// Error implements the built in error type. +func (e *ErrPERM) Error() string { + return fmt.Sprintf("%s: Operation not permitted", string(e.Src)) +} + +// ErrTag represents an ErrILSEQ kind. +type ErrType int + +// ErrILSEQ types +const ( + ErrOther ErrType = iota + + ErrAdjacentFree // Adjacent free blocks (.Off and .Arg) + ErrDecompress // Used compressed block: corrupted compression + ErrExpFreeTag // Expected a free block tag, got .Arg + ErrExpUsedTag // Expected a used block tag, got .Arg + ErrFLT // Free block is invalid or referenced multiple times + ErrFLTLoad // FLT truncated to .Off, need size >= .Arg + ErrFLTSize // Free block size (.Arg) doesn't belong to its list min size: .Arg2 + ErrFileSize // File .Name size (.Arg) != 0 (mod 16) + ErrFreeChaining // Free block, .prev.next doesn't point back to this block + ErrFreeTailBlock // Last block is free + ErrHead // Head of a free block list has non zero Prev (.Arg) + ErrInvalidRelocTarget // Reloc doesn't target (.Arg) a short or long used block + ErrInvalidWAL // Corrupted write ahead log. .Name: file name, .More: more + ErrLongFreeBlkTooLong // Long free block spans beyond EOF, size .Arg + ErrLongFreeBlkTooShort // Long free block must have at least 2 atoms, got only .Arg + ErrLongFreeNextBeyondEOF // Long free block .Next (.Arg) spans beyond EOF + ErrLongFreePrevBeyondEOF // Long free block .Prev (.Arg) spans beyond EOF + ErrLongFreeTailTag // Expected a long free block tail tag, got .Arg + ErrLostFreeBlock // Free block is not in any FLT list + ErrNullReloc // Used reloc block with nil target + ErrRelocBeyondEOF // Used reloc points (.Arg) beyond EOF + ErrShortFreeTailTag // Expected a short free block tail tag, got .Arg + ErrSmall // Request for a free block (.Arg) returned a too small one (.Arg2) at .Off + ErrTailTag // Block at .Off has invalid tail CC (compression code) tag, got .Arg + ErrUnexpReloc // Unexpected reloc block referred to from reloc block .Arg + ErrVerifyPadding // Used block has nonzero padding + ErrVerifyTailSize // Long free block size .Arg but tail size .Arg2 + ErrVerifyUsedSpan // Used block size (.Arg) spans beyond EOF +) + +// ErrILSEQ reports a corrupted file format. Details in fields according to Type. +type ErrILSEQ struct { + Type ErrType + Off int64 + Arg int64 + Arg2 int64 + Arg3 int64 + Name string + More interface{} +} + +// Error implements the built in error type. +func (e *ErrILSEQ) Error() string { + switch e.Type { + case ErrAdjacentFree: + return fmt.Sprintf("Adjacent free blocks at offset %#x and %#x", e.Off, e.Arg) + case ErrDecompress: + return fmt.Sprintf("Compressed block at offset %#x: Corrupted compressed content", e.Off) + case ErrExpFreeTag: + return fmt.Sprintf("Block at offset %#x: Expected a free block tag, got %#2x", e.Off, e.Arg) + case ErrExpUsedTag: + return fmt.Sprintf("Block at ofset %#x: Expected a used block tag, got %#2x", e.Off, e.Arg) + case ErrFLT: + return fmt.Sprintf("Free block at offset %#x is invalid or referenced multiple times", e.Off) + case ErrFLTLoad: + return fmt.Sprintf("FLT truncated to size %d, expected at least %d", e.Off, e.Arg) + case ErrFLTSize: + return fmt.Sprintf("Free block at offset %#x has size (%#x) should be at least (%#x)", e.Off, e.Arg, e.Arg2) + case ErrFileSize: + return fmt.Sprintf("File %q size (%#x) != 0 (mod 16)", e.Name, e.Arg) + case ErrFreeChaining: + return fmt.Sprintf("Free block at offset %#x: .prev.next doesn point back here.", e.Off) + case ErrFreeTailBlock: + return fmt.Sprintf("Free block at offset %#x: Cannot be last file block", e.Off) + case ErrHead: + return fmt.Sprintf("Block at offset %#x: Head of free block list has non zero .prev %#x", e.Off, e.Arg) + case ErrInvalidRelocTarget: + return fmt.Sprintf("Used reloc block at offset %#x: Target (%#x) is not a short or long used block", e.Off, e.Arg) + case ErrInvalidWAL: + return fmt.Sprintf("Corrupted write ahead log file: %q %v", e.Name, e.More) + case ErrLongFreeBlkTooLong: + return fmt.Sprintf("Long free block at offset %#x: Size (%#x) beyond EOF", e.Off, e.Arg) + case ErrLongFreeBlkTooShort: + return fmt.Sprintf("Long free block at offset %#x: Size (%#x) too small", e.Off, e.Arg) + case ErrLongFreeNextBeyondEOF: + return fmt.Sprintf("Long free block at offset %#x: Next (%#x) points beyond EOF", e.Off, e.Arg) + case ErrLongFreePrevBeyondEOF: + return fmt.Sprintf("Long free block at offset %#x: Prev (%#x) points beyond EOF", e.Off, e.Arg) + case ErrLongFreeTailTag: + return fmt.Sprintf("Block at offset %#x: Expected long free tail tag, got %#2x", e.Off, e.Arg) + case ErrLostFreeBlock: + return fmt.Sprintf("Free block at offset %#x: not in any FLT list", e.Off) + case ErrNullReloc: + return fmt.Sprintf("Used reloc block at offset %#x: Nil target", e.Off) + case ErrRelocBeyondEOF: + return fmt.Sprintf("Used reloc block at offset %#x: Link (%#x) points beyond EOF", e.Off, e.Arg) + case ErrShortFreeTailTag: + return fmt.Sprintf("Block at offset %#x: Expected short free tail tag, got %#2x", e.Off, e.Arg) + case ErrSmall: + return fmt.Sprintf("Request for of free block of size %d returned a too small (%d) one at offset %#x", e.Arg, e.Arg2, e.Off) + case ErrTailTag: + return fmt.Sprintf("Block at offset %#x: Invalid tail CC tag, got %#2x", e.Off, e.Arg) + case ErrUnexpReloc: + return fmt.Sprintf("Block at offset %#x: Unexpected reloc block. Referred to from reloc block at offset %#x", e.Off, e.Arg) + case ErrVerifyPadding: + return fmt.Sprintf("Used block at offset %#x: Nonzero padding", e.Off) + case ErrVerifyTailSize: + return fmt.Sprintf("Long free block at offset %#x: Size %#x, but tail size %#x", e.Off, e.Arg, e.Arg2) + case ErrVerifyUsedSpan: + return fmt.Sprintf("Used block at offset %#x: Size %#x spans beyond EOF", e.Off, e.Arg) + } + + more := "" + if e.More != nil { + more = fmt.Sprintf(", %v", e.More) + } + off := "" + if e.Off != 0 { + off = fmt.Sprintf(", off: %#x", e.Off) + } + + return fmt.Sprintf("Error%s%s", off, more) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/falloc.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/falloc.go new file mode 100644 index 00000000..71cfed0e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/falloc.go @@ -0,0 +1,1970 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The storage space management. + +package lldb + +import ( + "bytes" + "errors" + "fmt" + "io" + "sort" + "strings" + + "camlistore.org/third_party/github.com/cznic/bufs" + "camlistore.org/third_party/github.com/cznic/mathutil" + "camlistore.org/third_party/github.com/cznic/zappy" +) + +const ( + maxBuf = maxRq + 20 // bufs,Buffers.Alloc +) + +// Options are passed to the NewAllocator to amend some configuration. The +// compatibility promise is the same as of struct types in the Go standard +// library - introducing changes can be made only by adding new exported +// fields, which is backward compatible as long as client code uses field names +// to assign values of imported struct types literals. +// +// NOTE: No options are currently defined. +type Options struct{} + +// AllocStats record statistics about a Filer. It can be optionally filled by +// Allocator.Verify, if successful. +type AllocStats struct { + Handles int64 // total valid handles in use + Compression int64 // number of compressed blocks + TotalAtoms int64 // total number of atoms == AllocAtoms + FreeAtoms + AllocBytes int64 // bytes allocated (after decompression, if/where used) + AllocAtoms int64 // atoms allocated/used, including relocation atoms + Relocations int64 // number of relocated used blocks + FreeAtoms int64 // atoms unused + AllocMap map[int64]int64 // allocated block size in atoms -> count of such blocks + FreeMap map[int64]int64 // free block size in atoms -> count of such blocks +} + +/* + +Allocator implements "raw" storage space management (allocation and +deallocation) for a low level of a DB engine. The storage is an abstraction +provided by a Filer. + +The terms MUST or MUST NOT, if/where used in the documentation of Allocator, +written in all caps as seen here, are a requirement for any possible +alternative implementations aiming for compatibility with this one. + +Filer file + +A Filer file, or simply 'file', is a linear, contiguous sequence of blocks. +Blocks may be either free (currently unused) or allocated (currently used). +Some blocks may eventually become virtual in a sense as they may not be +realized in the storage (sparse files). + +Free Lists Table + +File starts with a FLT. This table records heads of 14 doubly linked free +lists. The zero based index (I) vs minimal size of free blocks in that list, +except the last one which registers free blocks of size 4112+ atoms: + + MinSize == 2^I + + For example 0 -> 1, 1 -> 2, ... 12 -> 4096. + +Each entry in the FLT is 8 bytes in netwtork order, MSB MUST be zero, ie. the +slot value is effectively only 7 bytes. The value is the handle of the head of +the respective doubly linked free list. The FLT size is 14*8 == 112(0x70) +bytes. If the free blocks list for any particular size is empty, the respective +FLT slot is zero. Sizes of free blocks in one list MUST NOT overlap with sizes +of free lists in other list. For example, even though a free block of size 2 +technically is of minimal size >= 1, it MUST NOT be put to the list for slot 0 +(minimal size 1), but in slot 1( minimal size 2). + + slot 0: sizes [1, 2) + slot 1: sizes [2, 4) + slot 2: sizes [4, 8) + ... + slot 11: sizes [2048, 4096) + slot 12: sizes [4096, 4112) + slot 13: sizes [4112, inf) + +The last FLT slot collects all free blocks bigger than its minimal size. That +still respects the 'no overlap' invariant. + +File blocks + +A block is a linear, contiguous sequence of atoms. The first and last atoms of +a block provide information about, for example, whether the block is free or +used, what is the size of the block, etc. Details are discussed elsewhere. The +first block of a file starts immediately after FLT, ie. at file offset +112(0x70). + +Block atoms + +An atom is a fixed size piece of a block (and thus of a file too); it is 16 +bytes long. A consequence is that for a valid file: + + filesize == 0 (mod 16) + +The first atom of the first block is considered to be atom #1. + +Block handles + +A handle is an integer referring to a block. The reference is the number of the +atom the block starts with. Put in other way: + + handle == offset/16 - 6 + offset == 16 * (handle + 6) + +`offset` is the offset of the first byte of the block, measured in bytes +- as in fseek(3). Handle has type `int64`, but only the lower 7 bytes may be +nonzero while referring to a block, both in code as well as when persisted in +the the file's internal bookkeeping structures - see 'Block types' bellow. So a +handle is effectively only `uint56`. This also means that the maximum usable +size of a file is 2^56 atoms. That is 2^60 bytes == 1 exabyte (10^18 bytes). + +Nil handles + +A handle with numeric value of '0' refers to no block. + +Zero padding + +A padding is used to round-up a block size to be a whole number of atoms. Any +padding, if present, MUST be all zero bytes. Note that the size of padding is +in [0, 15]. + +Content wiping + +When a block is deallocated, its data content is not wiped as the added +overhead may be substantial while not necessarily needed. Client code should +however overwrite the content of any block having sensitive data with eg. zeros +(good compression) - before deallocating the block. + +Block tags + +Every block is tagged in its first byte (a head tag) and last byte (tail tag). +Block types are: + + 1. Short content used block (head tags 0x00-0xFB) + 2. Long content used block (head tag 0xFC) + 3. Relocated used block (head tag 0xFD) + 4. Short, single atom, free block (head tag 0xFE) + 5. Long free block (head tag 0xFF) + +Note: Relocated used block, 3. above (head tag 0xFD) MUST NOT refer to blocks +other then 1. or 2. above (head tags 0x00-0xFC). + +Content blocks + +Used blocks (head tags 0x00-0xFC) tail tag distinguish used/unused block and if +content is compressed or not. + +Content compression + +The tail flag of an used block is one of + + CC == 0 // Content is not compressed. + CC == 1 // Content is in zappy compression format. + +If compression of written content is enabled, there are two cases: If +compressed size < original size then the compressed content should be written +if it will save at least one atom of the block. If compressed size >= original +size then the compressed content should not be used. + +It's recommended to use compression. For example the BTrees implementation +assumes compression is used. Using compression may cause a slowdown in some +cases while it may as well cause a speedup. + +Short content block + +Short content block carries content of length between N == 0(0x00) and N == +251(0xFB) bytes. + + |<-first atom start ... last atom end->| + +---++-- ... --+-- ... --++------+ + | 0 || 1... | 0x*...0x*E || 0x*F | + +---++-- ... --+-- ... --++------+ + | N || content | padding || CC | + +---++-- ... --+-- ... --++------+ + + A == (N+1)/16 + 1 // The number of atoms in the block [1, 16] + padding == 15 - (N+1)%16 // Length of the zero padding + +Long content block + +Long content block carries content of length between N == 252(0xFC) and N == +65787(0x100FB) bytes. + + |<-first atom start ... last atom end->| + +------++------+-- ... --+-- ... --++------+ + | 0 || 1..2 | 3... | 0x*...0x*E || 0x*F | + +------++------+-- ... --+-- ... --++------+ + | 0xFC || M | content | padding || CC | + +------++------+-- ... --+-- ... --++------+ + + A == (N+3)/16 + 1 // The number of atoms in the block [16, 4112] + M == N % 0x10000 // Stored as 2 bytes in network byte order + padding == 15 - (N+3)%16 // Length of the zero padding + +Relocated used block + +Relocated block allows to permanently assign a handle to some content and +resize the content anytime afterwards without having to update all the possible +existing references; the handle can be constant while the content size may be +dynamic. When relocating a block, any space left by the original block content, +above this single atom block, MUST be reclaimed. + +Relocations MUST point only to a used short or long block == blocks with tags +0x00...0xFC. + + +------++------+---------++----+ + | 0 || 1..7 | 8...14 || 15 | + +------++------+---------++----+ + | 0xFD || H | padding || 0 | + +------++------+---------++----+ + +H is the handle of the relocated block in network byte order. + +Free blocks + +Free blocks are the result of space deallocation. Free blocks are organized in +one or more doubly linked lists, abstracted by the FLT interface. Free blocks +MUST be "registered" by putting them in such list. Allocator MUST reuse a big +enough free block, if such exists, before growing the file size. When a free +block is created by deallocation or reallocation it MUST be joined with any +adjacently existing free blocks before "registering". If the resulting free +block is now a last block of a file, the free block MUST be discarded and the +file size MUST be truncated accordingly instead. Put differently, there MUST +NOT ever be a free block at the file end. + +A single free atom + +Is an unused block of size 1 atom. + + +------++------+--------++------+ + | 0 || 1..7 | 8...14 || 15 | + +------++------+--------++------+ + | 0xFE || P | N || 0xFE | + +------++------+--------++------+ + +P and N, stored in network byte order, are the previous and next free block +handles in the doubly linked list to which this free block belongs. + +A long unused block + +Is an unused block of size > 1 atom. + + +------++------+-------+---------+- ... -+----------++------+ + | 0 || 1..7 | 8..14 | 15...21 | | Z-7..Z-1 || Z | + +------++------+-------+---------+- ... -+----------++------+ + | 0xFF || S | P | N | Leak | S || 0xFF | + +------++------+-------+---------+- ... -+----------++------+ + + Z == 16 * S - 1 + +S is the size of this unused block in atoms. P and N are the previous and next +free block handles in the doubly linked list to which this free block belongs. +Leak contains any data the block had before deallocating this block. See also +the subtitle 'Content wiping' above. S, P and N are stored in network byte +order. Large free blocks may trigger a consideration of file hole punching of +the Leak field - for some value of 'large'. + +Note: Allocator methods vs CRUD[1]: + + Alloc [C]reate + Get [R]ead + Realloc [U]pdate + Free [D]elete + +Note: No Allocator method returns io.EOF. + + [1]: http://en.wikipedia.org/wiki/Create,_read,_update_and_delete + +*/ +type Allocator struct { + f Filer + flt flt + Compress bool // enables content compression + cache cache + m map[int64]*node + lru lst + expHit int64 + expMiss int64 + cacheSz int + hit uint16 + miss uint16 +} + +// NewAllocator returns a new Allocator. To open an existing file, pass its +// Filer. To create a "new" file, pass a Filer which file is of zero size. +func NewAllocator(f Filer, opts *Options) (a *Allocator, err error) { + if opts == nil { // Enforce *Options is always passed + return nil, errors.New("NewAllocator: nil opts passed") + } + + a = &Allocator{ + f: f, + cacheSz: 10, + } + + a.cinit() + switch x := f.(type) { + case *RollbackFiler: + x.afterRollback = func() error { + a.cinit() + return a.flt.load(a.f, 0) + } + case *ACIDFiler0: + x.RollbackFiler.afterRollback = func() error { + a.cinit() + return a.flt.load(a.f, 0) + } + } + + sz, err := f.Size() + if err != nil { + return + } + + a.flt.init() + if sz == 0 { + var b [fltSz]byte + if err = a.f.BeginUpdate(); err != nil { + return + } + + if _, err = f.WriteAt(b[:], 0); err != nil { + a.f.Rollback() + return + } + + return a, a.f.EndUpdate() + } + + return a, a.flt.load(f, 0) +} + +// CacheStats reports cache statistics. +// +//TODO return a struct perhaps. +func (a *Allocator) CacheStats() (buffersUsed, buffersTotal int, bytesUsed, bytesTotal, hits, misses int64) { + buffersUsed = len(a.m) + buffersTotal = buffersUsed + len(a.cache) + bytesUsed = a.lru.size() + bytesTotal = bytesUsed + a.cache.size() + hits = a.expHit + misses = a.expMiss + return +} + +func (a *Allocator) cinit() { + for h, n := range a.m { + a.cache.put(a.lru.remove(n)) + delete(a.m, h) + } + if a.m == nil { + a.m = map[int64]*node{} + } +} + +func (a *Allocator) cadd(b []byte, h int64) { + if len(a.m) < a.cacheSz { + n := a.cache.get(len(b)) + n.h = h + copy(n.b, b) + a.m[h] = a.lru.pushFront(n) + return + } + + // cache full + delete(a.m, a.cache.put(a.lru.removeBack()).h) + n := a.cache.get(len(b)) + n.h = h + copy(n.b, b) + a.m[h] = a.lru.pushFront(n) + return +} + +func (a *Allocator) cfree(h int64) { + n, ok := a.m[h] + if !ok { // must have been evicted + return + } + + a.cache.put(a.lru.remove(n)) + delete(a.m, h) +} + +// Alloc allocates storage space for b and returns the handle of the new block +// with content set to b or an error, if any. The returned handle is valid only +// while the block is used - until the block is deallocated. No two valid +// handles share the same value within the same Filer, but any value of a +// handle not referring to any used block may become valid any time as a result +// of Alloc. +// +// Invoking Alloc on an empty Allocator is guaranteed to return handle with +// value 1. The intended use of content of handle 1 is a root "directory" of +// other data held by an Allocator. +// +// Passing handles not obtained initially from Alloc or not anymore valid to +// any other Allocator methods can result in an irreparably corrupted database. +func (a *Allocator) Alloc(b []byte) (handle int64, err error) { + buf := bufs.GCache.Get(zappy.MaxEncodedLen(len(b))) + defer bufs.GCache.Put(buf) + buf, _, cc, err := a.makeUsedBlock(buf, b) + if err != nil { + return + } + + if handle, err = a.alloc(buf, cc); err == nil { + a.cadd(b, handle) + } + return +} + +func (a *Allocator) alloc(b []byte, cc byte) (h int64, err error) { + rqAtoms := n2atoms(len(b)) + if h = a.flt.find(rqAtoms); h == 0 { // must grow + var sz int64 + if sz, err = a.f.Size(); err != nil { + return + } + + h = off2h(sz) + err = a.writeUsedBlock(h, cc, b) + return + } + + // Handle is the first item of a free blocks list. + tag, s, prev, next, err := a.nfo(h) + if err != nil { + return + } + + if tag != tagFreeShort && tag != tagFreeLong { + err = &ErrILSEQ{Type: ErrExpFreeTag, Off: h2off(h), Arg: int64(tag)} + return + } + + if prev != 0 { + err = &ErrILSEQ{Type: ErrHead, Off: h2off(h), Arg: prev} + return + } + + if s < int64(rqAtoms) { + err = &ErrILSEQ{Type: ErrSmall, Arg: int64(rqAtoms), Arg2: s, Off: h2off(h)} + return + } + + if err = a.unlink(h, s, prev, next); err != nil { + return + } + + if s > int64(rqAtoms) { + freeH := h + int64(rqAtoms) + freeAtoms := s - int64(rqAtoms) + if err = a.link(freeH, freeAtoms); err != nil { + return + } + } + return h, a.writeUsedBlock(h, cc, b) +} + +// Free deallocates the block referred to by handle or returns an error, if +// any. +// +// After Free succeeds, handle is invalid and must not be used. +// +// Handle must have been obtained initially from Alloc and must be still valid, +// otherwise a database may get irreparably corrupted. +func (a *Allocator) Free(handle int64) (err error) { + if handle <= 0 || handle > maxHandle { + return &ErrINVAL{"Allocator.Free: handle out of limits", handle} + } + + a.cfree(handle) + return a.free(handle, 0, true) +} + +func (a *Allocator) free(h, from int64, acceptRelocs bool) (err error) { + tag, atoms, _, n, err := a.nfo(h) + if err != nil { + return + } + + switch tag { + default: + // nop + case tagUsedLong: + // nop + case tagUsedRelocated: + if !acceptRelocs { + return &ErrILSEQ{Type: ErrUnexpReloc, Off: h2off(h), Arg: h2off(from)} + } + + if err = a.free(n, h, false); err != nil { + return + } + case tagFreeShort, tagFreeLong: + return &ErrINVAL{"Allocator.Free: attempt to free a free block at off", h2off(h)} + } + + return a.free2(h, atoms) +} + +func (a *Allocator) free2(h, atoms int64) (err error) { + sz, err := a.f.Size() + if err != nil { + return + } + + ltag, latoms, lp, ln, err := a.leftNfo(h) + if err != nil { + return + } + + if ltag != tagFreeShort && ltag != tagFreeLong { + latoms = 0 + } + + var rtag byte + var ratoms, rp, rn int64 + + isTail := h2off(h)+atoms*16 == sz + if !isTail { + if rtag, ratoms, rp, rn, err = a.nfo(h + atoms); err != nil { + return + } + } + + if rtag != tagFreeShort && rtag != tagFreeLong { + ratoms = 0 + } + + switch { + case latoms == 0 && ratoms == 0: + // -> isolated <- + if isTail { // cut tail + return a.f.Truncate(h2off(h)) + } + + return a.link(h, atoms) + case latoms == 0 && ratoms != 0: + // right join -> + if err = a.unlink(h+atoms, ratoms, rp, rn); err != nil { + return + } + + return a.link(h, atoms+ratoms) + case latoms != 0 && ratoms == 0: + // <- left join + if err = a.unlink(h-latoms, latoms, lp, ln); err != nil { + return + } + + if isTail { + return a.f.Truncate(h2off(h - latoms)) + } + + return a.link(h-latoms, latoms+atoms) + } + + // case latoms != 0 && ratoms != 0: + // <- middle join -> + lh, rh := h-latoms, h+atoms + if err = a.unlink(lh, latoms, lp, ln); err != nil { + return + } + + // Prev unlink may have invalidated rp or rn + if _, _, rp, rn, err = a.nfo(rh); err != nil { + return + } + + if err = a.unlink(rh, ratoms, rp, rn); err != nil { + return + } + + return a.link(h-latoms, latoms+atoms+ratoms) +} + +// Add a free block h to the appropriate free list +func (a *Allocator) link(h, atoms int64) (err error) { + if err = a.makeFree(h, atoms, 0, a.flt.head(atoms)); err != nil { + return + } + + return a.flt.setHead(h, atoms, a.f) +} + +// Remove free block h from the free list +func (a *Allocator) unlink(h, atoms, p, n int64) (err error) { + switch { + case p == 0 && n == 0: + // single item list, must be head + return a.flt.setHead(0, atoms, a.f) + case p == 0 && n != 0: + // head of list (has next item[s]) + if err = a.prev(n, 0); err != nil { + return + } + + // new head + return a.flt.setHead(n, atoms, a.f) + case p != 0 && n == 0: + // last item in list + return a.next(p, 0) + } + // case p != 0 && n != 0: + // intermediate item in a list + if err = a.next(p, n); err != nil { + return + } + + return a.prev(n, p) +} + +//TODO remove ? +// Return len(slice) == n, reuse src if possible. +func need(n int, src []byte) []byte { + if cap(src) < n { + bufs.GCache.Put(src) + return bufs.GCache.Get(n) + } + + return src[:n] +} + +// Get returns the data content of a block referred to by handle or an error if +// any. The returned slice may be a sub-slice of buf if buf was large enough +// to hold the entire content. Otherwise, a newly allocated slice will be +// returned. It is valid to pass a nil buf. +// +// If the content was stored using compression then it is transparently +// returned decompressed. +// +// Handle must have been obtained initially from Alloc and must be still valid, +// otherwise invalid data may be returned without detecting the error. +func (a *Allocator) Get(buf []byte, handle int64) (b []byte, err error) { + buf = buf[:cap(buf)] + if n, ok := a.m[handle]; ok { + a.lru.moveToFront(n) + b = need(len(n.b), buf) + copy(b, n.b) + a.expHit++ + a.hit++ + return + } + + a.expMiss++ + a.miss++ + if a.miss > 10 && len(a.m) < 500 { + if 100*a.hit/a.miss < 95 { + a.cacheSz++ + } + a.hit, a.miss = 0, 0 + } + defer func(h int64) { + if err == nil { + a.cadd(b, h) + } + }(handle) + + first := bufs.GCache.Get(16) + defer bufs.GCache.Put(first) + relocated := false + relocSrc := handle +reloc: + if handle <= 0 || handle > maxHandle { + return nil, &ErrINVAL{"Allocator.Get: handle out of limits", handle} + } + + off := h2off(handle) + if err = a.read(first, off); err != nil { + return + } + + switch tag := first[0]; tag { + default: + dlen := int(tag) + atoms := n2atoms(dlen) + switch atoms { + case 1: + switch tag := first[15]; tag { + default: + return nil, &ErrILSEQ{Type: ErrTailTag, Off: off, Arg: int64(tag)} + case tagNotCompressed: + b = need(dlen, buf) + copy(b, first[1:]) + return + case tagCompressed: + return zappy.Decode(buf, first[1:dlen+1]) + } + default: + cc := bufs.GCache.Get(1) + defer bufs.GCache.Put(cc) + dlen := int(tag) + atoms := n2atoms(dlen) + tailOff := off + 16*int64(atoms) - 1 + if err = a.read(cc, tailOff); err != nil { + return + } + + switch tag := cc[0]; tag { + default: + return nil, &ErrILSEQ{Type: ErrTailTag, Off: off, Arg: int64(tag)} + case tagNotCompressed: + b = need(dlen, buf) + off += 1 + if err = a.read(b, off); err != nil { + b = buf[:0] + } + return + case tagCompressed: + zbuf := bufs.GCache.Get(dlen) + defer bufs.GCache.Put(zbuf) + off += 1 + if err = a.read(zbuf, off); err != nil { + return buf[:0], err + } + + return zappy.Decode(buf, zbuf) + } + } + case 0: + return buf[:0], nil + case tagUsedLong: + cc := bufs.GCache.Get(1) + defer bufs.GCache.Put(cc) + dlen := m2n(int(first[1])<<8 | int(first[2])) + atoms := n2atoms(dlen) + tailOff := off + 16*int64(atoms) - 1 + if err = a.read(cc, tailOff); err != nil { + return + } + + switch tag := cc[0]; tag { + default: + return nil, &ErrILSEQ{Type: ErrTailTag, Off: off, Arg: int64(tag)} + case tagNotCompressed: + b = need(dlen, buf) + off += 3 + if err = a.read(b, off); err != nil { + b = buf[:0] + } + return + case tagCompressed: + zbuf := bufs.GCache.Get(dlen) + defer bufs.GCache.Put(zbuf) + off += 3 + if err = a.read(zbuf, off); err != nil { + return buf[:0], err + } + + return zappy.Decode(buf, zbuf) + } + case tagFreeShort, tagFreeLong: + return nil, &ErrILSEQ{Type: ErrExpUsedTag, Off: off, Arg: int64(tag)} + case tagUsedRelocated: + if relocated { + return nil, &ErrILSEQ{Type: ErrUnexpReloc, Off: off, Arg: relocSrc} + } + + handle = b2h(first[1:]) + relocated = true + goto reloc + } +} + +var reallocTestHook bool + +// Realloc sets the content of a block referred to by handle or returns an +// error, if any. +// +// Handle must have been obtained initially from Alloc and must be still valid, +// otherwise a database may get irreparably corrupted. +func (a *Allocator) Realloc(handle int64, b []byte) (err error) { + if handle <= 0 || handle > maxHandle { + return &ErrINVAL{"Realloc: handle out of limits", handle} + } + + a.cfree(handle) + if err = a.realloc(handle, b); err != nil { + return + } + + if reallocTestHook { + if err = cacheAudit(a.m, &a.lru); err != nil { + return + } + } + + a.cadd(b, handle) + return +} + +func (a *Allocator) realloc(handle int64, b []byte) (err error) { + var dlen, needAtoms0 int + + b8 := bufs.GCache.Get(8) + defer bufs.GCache.Put(b8) + dst := bufs.GCache.Get(zappy.MaxEncodedLen(len(b))) + defer bufs.GCache.Put(dst) + b, needAtoms0, cc, err := a.makeUsedBlock(dst, b) + if err != nil { + return + } + + needAtoms := int64(needAtoms0) + off := h2off(handle) + if err = a.read(b8[:], off); err != nil { + return + } + + switch tag := b8[0]; tag { + default: + dlen = int(b8[0]) + case tagUsedLong: + dlen = m2n(int(b8[1])<<8 | int(b8[2])) + case tagUsedRelocated: + if err = a.free(b2h(b8[1:]), handle, false); err != nil { + return err + } + + dlen = 0 + case tagFreeShort, tagFreeLong: + return &ErrINVAL{"Allocator.Realloc: invalid handle", handle} + } + + atoms := int64(n2atoms(dlen)) +retry: + switch { + case needAtoms < atoms: + // in place shrink + if err = a.writeUsedBlock(handle, cc, b); err != nil { + return + } + + fh, fa := handle+needAtoms, atoms-needAtoms + sz, err := a.f.Size() + if err != nil { + return err + } + + if h2off(fh)+16*fa == sz { + return a.f.Truncate(h2off(fh)) + } + + return a.free2(fh, fa) + case needAtoms == atoms: + // in place replace + return a.writeUsedBlock(handle, cc, b) + } + + // case needAtoms > atoms: + // in place extend or relocate + var sz int64 + if sz, err = a.f.Size(); err != nil { + return + } + + off = h2off(handle) + switch { + case off+atoms*16 == sz: + // relocating tail block - shortcut + return a.writeUsedBlock(handle, cc, b) + default: + if off+atoms*16 < sz { + // handle is not a tail block, check right neighbour + rh := handle + atoms + rtag, ratoms, p, n, e := a.nfo(rh) + if e != nil { + return e + } + + if rtag == tagFreeShort || rtag == tagFreeLong { + // Right neighbour is a free block + if needAtoms <= atoms+ratoms { + // can expand in place + if err = a.unlink(rh, ratoms, p, n); err != nil { + return + } + + atoms += ratoms + goto retry + + } + } + } + } + + if atoms > 1 { + if err = a.realloc(handle, nil); err != nil { + return + } + } + + var newH int64 + if newH, err = a.alloc(b, cc); err != nil { + return err + } + + rb := bufs.GCache.Cget(16) + defer bufs.GCache.Put(rb) + rb[0] = tagUsedRelocated + h2b(rb[1:], newH) + if err = a.writeAt(rb[:], h2off(handle)); err != nil { + return + } + + return a.writeUsedBlock(newH, cc, b) +} + +func (a *Allocator) writeAt(b []byte, off int64) (err error) { + var n int + if n, err = a.f.WriteAt(b, off); err != nil { + return + } + + if n != len(b) { + err = io.ErrShortWrite + } + return +} + +func (a *Allocator) write(off int64, b ...[]byte) (err error) { + rq := 0 + for _, part := range b { + rq += len(part) + } + buf := bufs.GCache.Get(rq) + defer bufs.GCache.Put(buf) + buf = buf[:0] + for _, part := range b { + buf = append(buf, part...) + } + return a.writeAt(buf, off) +} + +func (a *Allocator) read(b []byte, off int64) (err error) { + var rn int + if rn, err = a.f.ReadAt(b, off); rn != len(b) { + return &ErrILSEQ{Type: ErrOther, Off: off, More: err} + } + + return nil +} + +// nfo returns h's tag. If it's a free block then return also (s)ize (in +// atoms), (p)rev and (n)ext. If it's a used block then only (s)ize is returned +// (again in atoms). If it's a used relocate block then (n)ext is set to the +// relocation target handle. +func (a *Allocator) nfo(h int64) (tag byte, s, p, n int64, err error) { + off := h2off(h) + rq := int64(22) + sz, err := a.f.Size() + if err != nil { + return + } + + if off+rq >= sz { + if rq = sz - off; rq < 15 { + err = io.ErrUnexpectedEOF + return + } + } + + buf := bufs.GCache.Get(22) + defer bufs.GCache.Put(buf) + if err = a.read(buf[:rq], off); err != nil { + return + } + + switch tag = buf[0]; tag { + default: + s = int64(n2atoms(int(tag))) + case tagUsedLong: + s = int64(n2atoms(m2n(int(buf[1])<<8 | int(buf[2])))) + case tagFreeLong: + if rq < 22 { + err = io.ErrUnexpectedEOF + return + } + + s, p, n = b2h(buf[1:]), b2h(buf[8:]), b2h(buf[15:]) + case tagUsedRelocated: + s, n = 1, b2h(buf[1:]) + case tagFreeShort: + s, p, n = 1, b2h(buf[1:]), b2h(buf[8:]) + } + return +} + +// leftNfo returns nfo for h's left neighbor if h > 1 and the left neighbor is +// a free block. Otherwise all zero values are returned instead. +func (a *Allocator) leftNfo(h int64) (tag byte, s, p, n int64, err error) { + if !(h > 1) { + return + } + + buf := bufs.GCache.Get(8) + defer bufs.GCache.Put(buf) + off := h2off(h) + if err = a.read(buf[:], off-8); err != nil { + return + } + + switch tag := buf[7]; tag { + case tagFreeShort: + return a.nfo(h - 1) + case tagFreeLong: + return a.nfo(h - b2h(buf[:])) + } + return +} + +// Set h.prev = p +func (a *Allocator) prev(h, p int64) (err error) { + b := bufs.GCache.Get(7) + defer bufs.GCache.Put(b) + off := h2off(h) + if err = a.read(b[:1], off); err != nil { + return + } + + switch tag := b[0]; tag { + default: + return &ErrILSEQ{Type: ErrExpFreeTag, Off: off, Arg: int64(tag)} + case tagFreeShort: + off += 1 + case tagFreeLong: + off += 8 + } + return a.writeAt(h2b(b[:7], p), off) +} + +// Set h.next = n +func (a *Allocator) next(h, n int64) (err error) { + b := bufs.GCache.Get(7) + defer bufs.GCache.Put(b) + off := h2off(h) + if err = a.read(b[:1], off); err != nil { + return + } + + switch tag := b[0]; tag { + default: + return &ErrILSEQ{Type: ErrExpFreeTag, Off: off, Arg: int64(tag)} + case tagFreeShort: + off += 8 + case tagFreeLong: + off += 15 + } + return a.writeAt(h2b(b[:7], n), off) +} + +// Make the filer image @h a free block. +func (a *Allocator) makeFree(h, atoms, prev, next int64) (err error) { + buf := bufs.GCache.Get(22) + defer bufs.GCache.Put(buf) + switch { + case atoms == 1: + buf[0], buf[15] = tagFreeShort, tagFreeShort + h2b(buf[1:], prev) + h2b(buf[8:], next) + if err = a.write(h2off(h), buf[:16]); err != nil { + return + } + default: + + buf[0] = tagFreeLong + h2b(buf[1:], atoms) + h2b(buf[8:], prev) + h2b(buf[15:], next) + if err = a.write(h2off(h), buf[:22]); err != nil { + return + } + + h2b(buf[:], atoms) + buf[7] = tagFreeLong + if err = a.write(h2off(h+atoms)-8, buf[:8]); err != nil { + return + } + } + if prev != 0 { + if err = a.next(prev, h); err != nil { + return + } + } + + if next != 0 { + err = a.prev(next, h) + } + return +} + +func (a *Allocator) makeUsedBlock(dst []byte, b []byte) (w []byte, rqAtoms int, cc byte, err error) { + cc = tagNotCompressed + w = b + + var n int + if n = len(b); n > maxRq { + return nil, 0, 0, &ErrINVAL{"Allocator.makeUsedBlock: content size out of limits", n} + } + + rqAtoms = n2atoms(n) + if a.Compress && n > 14 { // attempt compression + if dst, err = zappy.Encode(dst, b); err != nil { + return + } + + n2 := len(dst) + if rqAtoms2 := n2atoms(n2); rqAtoms2 < rqAtoms { // compression saved at least a single atom + w, n, rqAtoms, cc = dst, n2, rqAtoms2, tagCompressed + } + } + return +} + +func (a *Allocator) writeUsedBlock(h int64, cc byte, b []byte) (err error) { + n := len(b) + rq := n2atoms(n) << 4 + buf := bufs.GCache.Get(rq) + defer bufs.GCache.Put(buf) + switch n <= maxShort { + case true: + buf[0] = byte(n) + copy(buf[1:], b) + case false: + m := n2m(n) + buf[0], buf[1], buf[2] = tagUsedLong, byte(m>>8), byte(m) + copy(buf[3:], b) + } + if p := n2padding(n); p != 0 { + copy(buf[rq-1-p:], zeros[:]) + } + buf[rq-1] = cc + return a.writeAt(buf, h2off(h)) +} + +func (a *Allocator) verifyUnused(h, totalAtoms int64, tag byte, log func(error) bool, fast bool) (atoms, prev, next int64, err error) { + switch tag { + default: + panic("internal error") + case tagFreeShort: + var b [16]byte + off := h2off(h) + if err = a.read(b[:], off); err != nil { + return + } + + if b[15] != tagFreeShort { + err = &ErrILSEQ{Type: ErrShortFreeTailTag, Off: off, Arg: int64(b[15])} + log(err) + return + } + + atoms, prev, next = 1, b2h(b[1:]), b2h(b[8:]) + case tagFreeLong: + var b [22]byte + off := h2off(h) + if err = a.read(b[:], off); err != nil { + return + } + + atoms, prev, next = b2h(b[1:]), b2h(b[8:]), b2h(b[15:]) + if fast { + return + } + + if atoms < 2 { + err = &ErrILSEQ{Type: ErrLongFreeBlkTooShort, Off: off, Arg: int64(atoms)} + break + } + + if h+atoms-1 > totalAtoms { + err = &ErrILSEQ{Type: ErrLongFreeBlkTooLong, Off: off, Arg: atoms} + break + } + + if prev > totalAtoms { + err = &ErrILSEQ{Type: ErrLongFreePrevBeyondEOF, Off: off, Arg: next} + break + } + + if next > totalAtoms { + err = &ErrILSEQ{Type: ErrLongFreeNextBeyondEOF, Off: off, Arg: next} + break + } + + toff := h2off(h+atoms) - 8 + if err = a.read(b[:8], toff); err != nil { + return + } + + if b[7] != tag { + err = &ErrILSEQ{Type: ErrLongFreeTailTag, Off: off, Arg: int64(b[7])} + break + } + + if s2 := b2h(b[:]); s2 != atoms { + err = &ErrILSEQ{Type: ErrVerifyTailSize, Off: off, Arg: atoms, Arg2: s2} + break + } + + } + if err != nil { + log(err) + } + return +} + +func (a *Allocator) verifyUsed(h, totalAtoms int64, tag byte, buf, ubuf []byte, log func(error) bool, fast bool) (compressed bool, dlen int, atoms, link int64, err error) { + var ( + padding int + doff int64 + padZeros [15]byte + tailBuf [16]byte + ) + + switch tag { + default: // Short used + dlen = int(tag) + atoms = int64((dlen+1)/16) + 1 + padding = 15 - (dlen+1)%16 + doff = h2off(h) + 1 + case tagUsedLong: + off := h2off(h) + 1 + var b2 [2]byte + if err = a.read(b2[:], off); err != nil { + return + } + + dlen = m2n(int(b2[0])<<8 | int(b2[1])) + atoms = int64((dlen+3)/16) + 1 + padding = 15 - (dlen+3)%16 + doff = h2off(h) + 3 + case tagUsedRelocated: + dlen = 7 + atoms = 1 + padding = 7 + doff = h2off(h) + 1 + case tagFreeShort, tagFreeLong: + panic("internal error") + } + + if fast { + if tag == tagUsedRelocated { + dlen = 0 + if err = a.read(buf[:7], doff); err != nil { + return + } + + link = b2h(buf) + } + + return false, dlen, atoms, link, nil + } + + if ok := h+atoms-1 <= totalAtoms; !ok { // invalid last block + err = &ErrILSEQ{Type: ErrVerifyUsedSpan, Off: h2off(h), Arg: atoms} + log(err) + return + } + + tailsz := 1 + padding + off := h2off(h) + 16*atoms - int64(tailsz) + if err = a.read(tailBuf[:tailsz], off); err != nil { + return false, 0, 0, 0, err + } + + if ok := bytes.Equal(padZeros[:padding], tailBuf[:padding]); !ok { + err = &ErrILSEQ{Type: ErrVerifyPadding, Off: h2off(h)} + log(err) + return + } + + var cc byte + switch cc = tailBuf[padding]; cc { + default: + err = &ErrILSEQ{Type: ErrTailTag, Off: h2off(h)} + log(err) + return + case tagCompressed: + compressed = true + if tag == tagUsedRelocated { + err = &ErrILSEQ{Type: ErrTailTag, Off: h2off(h)} + log(err) + return + } + + fallthrough + case tagNotCompressed: + if err = a.read(buf[:dlen], doff); err != nil { + return false, 0, 0, 0, err + } + } + + if cc == tagCompressed { + if ubuf, err = zappy.Decode(ubuf, buf[:dlen]); err != nil || len(ubuf) > maxRq { + err = &ErrILSEQ{Type: ErrDecompress, Off: h2off(h)} + log(err) + return + } + + dlen = len(ubuf) + } + + if tag == tagUsedRelocated { + link = b2h(buf) + if link == 0 { + err = &ErrILSEQ{Type: ErrNullReloc, Off: h2off(h)} + log(err) + return + } + + if link > totalAtoms { // invalid last block + err = &ErrILSEQ{Type: ErrRelocBeyondEOF, Off: h2off(h), Arg: link} + log(err) + return + } + } + + return +} + +var nolog = func(error) bool { return false } + +// Verify attempts to find any structural errors in a Filer wrt the +// organization of it as defined by Allocator. 'bitmap' is a scratch pad for +// necessary bookkeeping and will grow to at most to Allocator's +// Filer.Size()/128 (0,78%). Any problems found are reported to 'log' except +// non verify related errors like disk read fails etc. If 'log' returns false +// or the error doesn't allow to (reliably) continue, the verification process +// is stopped and an error is returned from the Verify function. Passing a nil +// log works like providing a log function always returning false. Any +// non-structural errors, like for instance Filer read errors, are NOT reported +// to 'log', but returned as the Verify's return value, because Verify cannot +// proceed in such cases. Verify returns nil only if it fully completed +// verifying Allocator's Filer without detecting any error. +// +// It is recommended to limit the number reported problems by returning false +// from 'log' after reaching some limit. Huge and corrupted DB can produce an +// overwhelming error report dataset. +// +// The verifying process will scan the whole DB at least 3 times (a trade +// between processing space and time consumed). It doesn't read the content of +// free blocks above the head/tail info bytes. If the 3rd phase detects lost +// free space, then a 4th scan (a faster one) is performed to precisely report +// all of them. +// +// If the DB/Filer to be verified is reasonably small, respective if its +// size/128 can comfortably fit within process's free memory, then it is +// recommended to consider using a MemFiler for the bit map. +// +// Statistics are returned via 'stats' if non nil. The statistics are valid +// only if Verify succeeded, ie. it didn't reported anything to log and it +// returned a nil error. +func (a *Allocator) Verify(bitmap Filer, log func(error) bool, stats *AllocStats) (err error) { + if log == nil { + log = nolog + } + + n, err := bitmap.Size() + if err != nil { + return + } + + if n != 0 { + return &ErrINVAL{"Allocator.Verify: bit map initial size non zero (%d)", n} + } + + var bits int64 + bitMask := [8]byte{1, 2, 4, 8, 16, 32, 64, 128} + byteBuf := []byte{0} + + //DONE + // +performance, this implementation is hopefully correct but _very_ + // naive, probably good as a prototype only. Use maybe a MemFiler + // "cache" etc. + // ---- + // Turns out the OS caching is as effective as it can probably get. + bit := func(on bool, h int64) (wasOn bool, err error) { + m := bitMask[h&7] + off := h >> 3 + var v byte + sz, err := bitmap.Size() + if err != nil { + return + } + + if off < sz { + if n, err := bitmap.ReadAt(byteBuf, off); n != 1 { + return false, &ErrILSEQ{Type: ErrOther, Off: off, More: fmt.Errorf("Allocator.Verify - reading bitmap: %s", err)} + } + + v = byteBuf[0] + } + switch wasOn = v&m != 0; on { + case true: + if !wasOn { + v |= m + bits++ + } + case false: + if wasOn { + v ^= m + bits-- + } + } + byteBuf[0] = v + if n, err := bitmap.WriteAt(byteBuf, off); n != 1 || err != nil { + return false, &ErrILSEQ{Type: ErrOther, Off: off, More: fmt.Errorf("Allocator.Verify - writing bitmap: %s", err)} + } + + return + } + + // Phase 1 - sequentially scan a.f to reliably determine block + // boundaries. Set a bit for every block start. + var ( + buf, ubuf [maxRq]byte + prevH, h, atoms int64 + wasOn bool + tag byte + st = AllocStats{ + AllocMap: map[int64]int64{}, + FreeMap: map[int64]int64{}, + } + dlen int + ) + + fsz, err := a.f.Size() + if err != nil { + return + } + + ok := fsz%16 == 0 + totalAtoms := (fsz - fltSz) / atomLen + if !ok { + err = &ErrILSEQ{Type: ErrFileSize, Name: a.f.Name(), Arg: fsz} + log(err) + return + } + + st.TotalAtoms = totalAtoms + prevTag := -1 + lastH := int64(-1) + + for h = 1; h <= totalAtoms; h += atoms { + prevH = h // For checking last block == used + + off := h2off(h) + if err = a.read(buf[:1], off); err != nil { + return + } + + switch tag = buf[0]; tag { + default: // Short used + fallthrough + case tagUsedLong, tagUsedRelocated: + var compressed bool + if compressed, dlen, atoms, _, err = a.verifyUsed(h, totalAtoms, tag, buf[:], ubuf[:], log, false); err != nil { + return + } + + if compressed { + st.Compression++ + } + st.AllocAtoms += atoms + switch { + case tag == tagUsedRelocated: + st.AllocMap[1]++ + st.Relocations++ + default: + st.AllocMap[atoms]++ + st.AllocBytes += int64(dlen) + st.Handles++ + } + case tagFreeShort, tagFreeLong: + if prevTag == tagFreeShort || prevTag == tagFreeLong { + err = &ErrILSEQ{Type: ErrAdjacentFree, Off: h2off(lastH), Arg: off} + log(err) + return + } + + if atoms, _, _, err = a.verifyUnused(h, totalAtoms, tag, log, false); err != nil { + return + } + + st.FreeMap[atoms]++ + st.FreeAtoms += atoms + } + + if wasOn, err = bit(true, h); err != nil { + return + } + + if wasOn { + panic("internal error") + } + + prevTag = int(tag) + lastH = h + } + + if totalAtoms != 0 && (tag == tagFreeShort || tag == tagFreeLong) { + err = &ErrILSEQ{Type: ErrFreeTailBlock, Off: h2off(prevH)} + log(err) + return + } + + // Phase 2 - check used blocks, turn off the map bit for every used + // block. + for h = 1; h <= totalAtoms; h += atoms { + off := h2off(h) + if err = a.read(buf[:1], off); err != nil { + return + } + + var link int64 + switch tag = buf[0]; tag { + default: // Short used + fallthrough + case tagUsedLong, tagUsedRelocated: + if _, _, atoms, link, err = a.verifyUsed(h, totalAtoms, tag, buf[:], ubuf[:], log, true); err != nil { + return + } + case tagFreeShort, tagFreeLong: + if atoms, _, _, err = a.verifyUnused(h, totalAtoms, tag, log, true); err != nil { + return + } + } + + turnoff := true + switch tag { + case tagUsedRelocated: + if err = a.read(buf[:1], h2off(link)); err != nil { + return + } + + switch linkedTag := buf[0]; linkedTag { + case tagFreeShort, tagFreeLong, tagUsedRelocated: + err = &ErrILSEQ{Type: ErrInvalidRelocTarget, Off: off, Arg: link} + log(err) + return + } + + case tagFreeShort, tagFreeLong: + turnoff = false + } + + if !turnoff { + continue + } + + if wasOn, err = bit(false, h); err != nil { + return + } + + if !wasOn { + panic("internal error") + } + + } + + // Phase 3 - using the flt check heads link to proper free blocks. For + // every free block, walk the list, verify the {next, prev} links and + // turn the respective map bit off. After processing all free lists, + // the map bits count should be zero. Otherwise there are "lost" free + // blocks. + + var prev, next, fprev, fnext int64 + rep := a.flt + + for _, list := range rep { + prev, next = 0, list.head + for ; next != 0; prev, next = next, fnext { + if wasOn, err = bit(false, next); err != nil { + return + } + + if !wasOn { + err = &ErrILSEQ{Type: ErrFLT, Off: h2off(next), Arg: h} + log(err) + return + } + + off := h2off(next) + if err = a.read(buf[:1], off); err != nil { + return + } + + switch tag = buf[0]; tag { + default: + panic("internal error") + case tagFreeShort, tagFreeLong: + if atoms, fprev, fnext, err = a.verifyUnused(next, totalAtoms, tag, log, true); err != nil { + return + } + + if min := list.minSize; atoms < min { + err = &ErrILSEQ{Type: ErrFLTSize, Off: h2off(next), Arg: atoms, Arg2: min} + log(err) + return + } + + if fprev != prev { + err = &ErrILSEQ{Type: ErrFreeChaining, Off: h2off(next)} + log(err) + return + } + } + } + + } + + if bits == 0 { // Verify succeeded + if stats != nil { + *stats = st + } + return + } + + // Phase 4 - if after phase 3 there are lost free blocks, report all of + // them to 'log' + for i := range ubuf { // setup zeros for compares + ubuf[i] = 0 + } + + var off, lh int64 + rem, err := bitmap.Size() + if err != nil { + return err + } + + for rem != 0 { + rq := int(mathutil.MinInt64(64*1024, rem)) + var n int + if n, err = bitmap.ReadAt(buf[:rq], off); n != rq { + return &ErrILSEQ{Type: ErrOther, Off: off, More: fmt.Errorf("bitmap ReadAt(size %d, off %#x): %s", rq, off, err)} + } + + if !bytes.Equal(buf[:rq], ubuf[:rq]) { + for d, v := range buf[:rq] { + if v != 0 { + for i, m := range bitMask { + if v&m != 0 { + lh = 8*(off+int64(d)) + int64(i) + err = &ErrILSEQ{Type: ErrLostFreeBlock, Off: h2off(lh)} + log(err) + return + } + } + } + } + } + + off += int64(rq) + rem -= int64(rq) + } + + return +} + +type fltSlot struct { + head int64 + minSize int64 +} + +func (f fltSlot) String() string { + return fmt.Sprintf("head %#x, minSize %#x\n", f.head, f.minSize) +} + +type flt [14]fltSlot + +func (f *flt) init() { + sz := 1 + for i := range *f { + f[i].minSize, f[i].head = int64(sz), 0 + sz <<= 1 + } + f[13].minSize = 4112 +} + +func (f *flt) load(fi Filer, off int64) (err error) { + b := bufs.GCache.Get(fltSz) + defer bufs.GCache.Put(b) + if _, err = fi.ReadAt(b[:], off); err != nil { + return + } + + for i := range *f { + off := 8*i + 1 + f[i].head = b2h(b[off:]) + } + return +} + +func (f *flt) find(rq int) (h int64) { + switch { + case rq < 1: + panic(rq) + case rq >= maxFLTRq: + h, f[13].head = f[13].head, 0 + return + default: + g := f[mathutil.Log2Uint16(uint16(rq)):] + for i := range g { + p := &g[i] + if rq <= int(p.minSize) { + if h = p.head; h != 0 { + p.head = 0 + return + } + } + } + return + } +} + +func (f *flt) head(atoms int64) (h int64) { + switch { + case atoms < 1: + panic(atoms) + case atoms >= maxFLTRq: + return f[13].head + default: + lg := mathutil.Log2Uint16(uint16(atoms)) + g := f[lg:] + for i := range g { + if atoms < g[i+1].minSize { + return g[i].head + } + } + panic("internal error") + } +} + +func (f *flt) setHead(h, atoms int64, fi Filer) (err error) { + switch { + case atoms < 1: + panic(atoms) + case atoms >= maxFLTRq: + b := bufs.GCache.Get(7) + defer bufs.GCache.Put(b) + if _, err = fi.WriteAt(h2b(b[:], h), 8*13+1); err != nil { + return + } + + f[13].head = h + return + default: + lg := mathutil.Log2Uint16(uint16(atoms)) + g := f[lg:] + for i := range f { + if atoms < g[i+1].minSize { + b := bufs.GCache.Get(7) + defer bufs.GCache.Put(b) + if _, err = fi.WriteAt(h2b(b[:], h), 8*int64(i+lg)+1); err != nil { + return + } + + g[i].head = h + return + } + } + panic("internal error") + } +} + +func (f *flt) String() string { + a := []string{} + for i, v := range *f { + a = append(a, fmt.Sprintf("[%2d] %s", i, v)) + } + return strings.Join(a, "") +} + +type node struct { + b []byte + h int64 + prev, next *node +} + +type cache []*node + +func (c *cache) get(n int) *node { + r, _ := c.get2(n) + return r +} + +func (c *cache) get2(n int) (r *node, isZeroed bool) { + s := *c + lens := len(s) + if lens == 0 { + return &node{b: make([]byte, n, mathutil.Min(2*n, maxBuf))}, true + } + + i := sort.Search(lens, func(x int) bool { return len(s[x].b) >= n }) + if i == lens { + i-- + s[i].b, isZeroed = make([]byte, n, mathutil.Min(2*n, maxBuf)), true + } + + r = s[i] + r.b = r.b[:n] + copy(s[i:], s[i+1:]) + s = s[:lens-1] + *c = s + return +} + +func (c *cache) cget(n int) (r *node) { + r, ok := c.get2(n) + if ok { + return + } + + for i := range r.b { + r.b[i] = 0 + } + return +} + +func (c *cache) size() (sz int64) { + for _, n := range *c { + sz += int64(cap(n.b)) + } + return +} + +func (c *cache) put(n *node) *node { + s := *c + n.b = n.b[:cap(n.b)] + lenb := len(n.b) + lens := len(s) + i := sort.Search(lens, func(x int) bool { return len(s[x].b) >= lenb }) + s = append(s, nil) + copy(s[i+1:], s[i:]) + s[i] = n + *c = s + return n +} + +type lst struct { + front, back *node +} + +func (l *lst) pushFront(n *node) *node { + if l.front == nil { + l.front, l.back, n.prev, n.next = n, n, nil, nil + return n + } + + n.prev, n.next, l.front.prev, l.front = nil, l.front, n, n + return n +} + +func (l *lst) remove(n *node) *node { + if n.prev == nil { + l.front = n.next + } else { + n.prev.next = n.next + } + if n.next == nil { + l.back = n.prev + } else { + n.next.prev = n.prev + } + n.prev, n.next = nil, nil + return n +} + +func (l *lst) removeBack() *node { + return l.remove(l.back) +} + +func (l *lst) moveToFront(n *node) *node { + return l.pushFront(l.remove(n)) +} + +func (l *lst) size() (sz int64) { + for n := l.front; n != nil; n = n.next { + sz += int64(cap(n.b)) + } + return +} + +func cacheAudit(m map[int64]*node, l *lst) (err error) { + cnt := 0 + for h, n := range m { + if g, e := n.h, h; g != e { + return fmt.Errorf("cacheAudit: invalid node handle %d != %d", g, e) + } + + if cnt, err = l.audit(n, true); err != nil { + return + } + } + + if g, e := cnt, len(m); g != e { + return fmt.Errorf("cacheAudit: invalid cache size %d != %d", g, e) + } + + return +} + +func (l *lst) audit(n *node, onList bool) (cnt int, err error) { + if !onList && (n.prev != nil || n.next != nil) { + return -1, fmt.Errorf("lst.audit: free node with non nil linkage") + } + + if l.front == nil && l.back != nil || l.back == nil && l.front != nil { + return -1, fmt.Errorf("lst.audit: one of .front/.back is nil while the other is non nil") + } + + if l.front == l.back && l.front != nil { + x := l.front + if x.prev != nil || x.next != nil { + return -1, fmt.Errorf("lst.audit: single node has non nil linkage") + } + + if onList && x != n { + return -1, fmt.Errorf("lst.audit: single node is alien") + } + } + + seen := false + var prev *node + x := l.front + for x != nil { + cnt++ + if x.prev != prev { + return -1, fmt.Errorf("lst.audit: broken .prev linkage") + } + + if x == n { + seen = true + } + + prev = x + x = x.next + } + + if prev != l.back { + return -1, fmt.Errorf("lst.audit: broken .back linkage") + } + + if onList && !seen { + return -1, fmt.Errorf("lst.audit: node missing in list") + } + + if !onList && seen { + return -1, fmt.Errorf("lst.audit: node should not be on the list") + } + + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/falloc_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/falloc_test.go new file mode 100644 index 00000000..1297528a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/falloc_test.go @@ -0,0 +1,1833 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lldb + +import ( + "bytes" + "encoding/hex" + "flag" + "fmt" + "math/rand" + "os" + "runtime" + "sort" + "strings" + "testing" + "time" + + "camlistore.org/third_party/github.com/cznic/bufs" + "camlistore.org/third_party/github.com/cznic/sortutil" + "camlistore.org/third_party/github.com/cznic/zappy" +) + +var ( + allocRndTestLimit = flag.Uint("lim", 2*maxShort, "Allocator rnd test initial blocks size limit") + allocRndTestHardLimit = flag.Uint("hlim", 0, "Allocator rnd test initial blocks size hard limit") + testN = flag.Int("N", 128, "Allocator rnd test block count") + allocRndDump = flag.Bool("dump", false, "Produce dump files on TestAllocatorRnd crash") + oKeep = flag.Bool("keep", false, "do not delete testing DB/WAL (where applicable)") +) + +func init() { + reallocTestHook = true +} + +func mfBytes(f Filer) []byte { + var b bytes.Buffer + if _, err := f.(*MemFiler).WriteTo(&b); err != nil { + panic(err) + } + + return b.Bytes() +} + +// Paranoid Allocator, automatically verifies whenever possible. +type pAllocator struct { + *Allocator + errors []error + logger func(error) bool + lastKnownGood *MemFiler + lastKnownGoodFLT flt + lastOp string + stats AllocStats +} + +func newPAllocator(f Filer) (*pAllocator, error) { + a, err := NewAllocator(f, &Options{}) + if err != nil { + return nil, err + } + + r := &pAllocator{Allocator: a, lastKnownGood: NewMemFiler()} + r.logger = func(err error) bool { + r.errors = append(r.errors, err) + return len(r.errors) < 100 + } + + return r, nil +} + +func (a *pAllocator) err() error { + var n int + if n = len(a.errors); n == 0 { + return nil + } + + s := make([]string, n) + for i, e := range a.errors { + s[i] = e.Error() + } + return fmt.Errorf("\n%s", strings.Join(s, "\n")) +} + +func (a *pAllocator) preMortem(s string) { + var e error + if e := a.lastKnownGood.Truncate(0); e != nil { + panic(e) + } + b := mfBytes(a.Allocator.f) + if _, e = a.lastKnownGood.WriteAt(b, 0); e != nil { + return + } + a.lastKnownGoodFLT = a.flt + a.lastOp = s +} + +func (a *pAllocator) Alloc(b []byte) (handle int64, err error) { + if *allocRndDump { + a.preMortem("") + defer func() { a.lastOp = fmt.Sprintf("Alloc(%d bytes): h %#x", len(b), handle) }() + } + + if handle, err = a.Allocator.Alloc(b); err != nil { + return + } + + if err = a.Allocator.Verify(NewMemFiler(), a.logger, &a.stats); err != nil { + err = fmt.Errorf("'%s': %v", err, a.err()) + return + } + + err = a.err() + return +} + +func (a *pAllocator) Free(handle int64) (err error) { + if *allocRndDump { + a.preMortem(fmt.Sprintf("Free(h %#x)", handle)) + } + + if err = a.Allocator.Free(handle); err != nil { + return + } + + if err = a.Allocator.Verify(NewMemFiler(), a.logger, &a.stats); err != nil { + err = fmt.Errorf("'%s': %v", err, a.err()) + return + } + + err = a.err() + return +} + +func (a *pAllocator) Realloc(handle int64, b []byte) (err error) { + if *allocRndDump { + a.preMortem(fmt.Sprintf("Realloc(h %#x, %d bytes)", handle, len(b))) + } + + if err = a.Allocator.Realloc(handle, b); err != nil { + return + } + + if err = cacheAudit(a.Allocator.m, &a.Allocator.lru); err != nil { + return + } + + if err = a.Allocator.Verify(NewMemFiler(), a.logger, &a.stats); err != nil { + err = fmt.Errorf("'%s': %v", err, a.err()) + return + } + + err = a.err() + return +} + +func dump(a *pAllocator, t *testing.T) { + m := a.f.(*MemFiler) + sz, err := m.Size() + if err != nil { + t.Fatal(err) + } + + t.Logf("MemFiler.Size() == %d(%#x)", sz, sz) + if !*allocRndDump { + return + } + + fn := "good-dump" + f, err := os.Create(fn) + if err != nil { + t.Fatal(err) + } + + defer f.Close() + sz, err = a.lastKnownGood.WriteTo(f) + if err != nil { + t.Error(err) + return + } + + t.Logf("%d(%#x) writen to %q", sz, sz, fn) + + fn = "bad-dump" + g, err := os.Create(fn) + if err != nil { + t.Fatal(err) + } + + defer g.Close() + sz, err = m.WriteTo(g) + if err != nil { + t.Error(err) + return + } + + t.Logf("%d(%#x) writen to %q", sz, sz, fn) + + t.Log("Last known good FLT") + for _, slot := range a.lastKnownGoodFLT { + if h := slot.head; h != 0 { + t.Logf("min %d head %#x off %#x", slot.minSize, h, h2off(h)) + } + } + + t.Log("Current FLT") + r := a.flt + for _, slot := range r { + if h := slot.head; h != 0 { + t.Logf("min %d head %#x off %#x", slot.minSize, h, h2off(h)) + } + } + t.Logf("Last op: %q", a.lastOp) +} + +func init() { + if *testN <= 0 { + *testN = 1 + } +} + +func TestVerify0(t *testing.T) { + // All must fail + tab := []string{ + + // 0: Reloc, links beyond EOF + "" + + "fd 00 00 00 00 00 00 02 00 00 00 00 00 00 00 00", + // 1: Reloc, links beyond EOF + "" + + "fd 00 00 00 00 00 00 03 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 2: Reloc, broken target + "" + + "fd 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 3: Free block at file tail + "" + + "fe 00 00 00 00 00 00 00 00 00 00 00 00 00 00 fe", + // 4: Free block at file tail + "" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 00 00 00 00 00 00 00 00 fe", + // 5: Reloc, invalid target 0xfe + "" + + "fd 00 00 00 00 00 00 02 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 00 00 00 00 00 00 00 00 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 6: Reloc, invalid target 0xfd + "" + + "fd 00 00 00 00 00 00 02 00 00 00 00 00 00 00 00" + + "fd 00 00 00 00 00 00 01 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 7: Lost free block @ 0x00 + "" + + "fe 00 00 00 00 00 00 02 00 00 00 00 00 00 00 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 8: Lost free block @ 0x10 + "" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 02 00 00 00 00 00 00 00 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 9: Invalid padding + "" + + "00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 10: Invalid padding + "" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 00", + // 11: Invalid padding + "" + + "01 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 12: Invalid padding + "" + + "01 00 00 00 00 00 00 00 00 00 00 00 00 00 01 00", + // 13: Invalid padding + "" + + "0d 00 00 00 00 00 00 00 00 00 00 00 00 00 01 00", + // 14: Invalid CC (tail tag) + "" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 02", + // 15: Invalid CC (tail tag) + "" + + "fd 00 00 00 00 00 00 02 00 00 00 00 00 00 00 01", + // 16: Cannot decompress + "" + + "0e 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01", + // 17: Invalid reloc target + "" + + "fd 00 00 00 00 00 00 03 00 00 00 00 00 00 00 00" + + "ff 00 00 00 00 00 00 02 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 02 ff" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 18: Invalid tail tag @1 + "" + + "fe 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 19: Invalid size @1 + "" + + "ff 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ff" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 20: Invalid size @1 + "" + + "ff 00 00 00 00 00 00 01 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 ff" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 21: Invalid size @1 + "" + + "ff 00 00 00 00 00 00 04 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 02 ff" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 22: Invalid .next @1 + "" + + "ff 00 00 00 00 00 00 02 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 04 00 00 00 00 00 00 00 00 02 ff" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 23: Invalid .prev @1 + "" + + "ff 00 00 00 00 00 00 02 00 00 00 00 00 00 04 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 02 ff" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 24: Invalid tail tag @1 + "" + + "ff 00 00 00 00 00 00 02 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 02 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 25: Invalid tail size @1 + "" + + "ff 00 00 00 00 00 00 02 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ff" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 26: Invalid tail size @1 + "" + + "ff 00 00 00 00 00 00 02 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 ff" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 27: Invalid tail size @1 + "" + + "ff 00 00 00 00 00 00 02 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 03 ff" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f + } + + for i, test := range tab { + errors := []error{} + + f := NewMemFiler() + b := s2b(test) + b = append(make([]byte, fltSz), b...) + n := len(b) + if n == 0 { + t.Fatal(n) + } + + if m, err := f.ReadFrom(bytes.NewBuffer(b)); m != int64(n) || err != nil { + t.Fatal(m, err) + } + + sz, err := f.Size() + if err != nil { + t.Fatal(err) + } + + if g, e := sz, int64(n); g != e { + t.Fatal(g, e) + } + + a, err := newPAllocator(f) + if err != nil { + t.Fatal(err) + } + + err = a.Verify( + NewMemFiler(), + func(err error) bool { + if err == nil { + t.Fatal("nil error") + } + errors = append(errors, err) + return false + }, + nil, + ) + if err == nil { + t.Fatal(i, "unexpected success") + } + + t.Log(i, err, errors) + } +} + +func TestVerify1(t *testing.T) { + f := NewMemFiler() + bitmap := NewMemFiler() + if n, err := bitmap.WriteAt([]byte{0}, 0); n != 1 || err != nil { + t.Fatal(n, err) + } + + a, err := newPAllocator(f) + if err != nil { + t.Fatal(err) + } + + if err := a.Verify( + bitmap, + func(error) bool { + panic("intrnal error") + }, + nil, + ); err == nil { + t.Fatal("unexpected success") + } +} + +func repDump(a flt) string { + b := []string{} + for _, v := range a { + if h := v.head; h != 0 { + b = append(b, fmt.Sprintf("min:%d, h:%d", v.minSize, h)) + } + } + return strings.Join(b, ";") +} + +func TestVerify2(t *testing.T) { + // All must fail for the fixed (see bellow) FLT.Report() + tab := []string{ + + // 0: FLT broken linkage (missing free blocks @2,4) + "" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 1: FLT broken linkage (missing free block @4) + "" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 00 00 00 00 00 00 00 00 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 2: bad size @4 + "" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 00 00 00 00 00 00 00 00 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 00 00 00 00 00 00 00 00 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // // 3: bad size @4 + "" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 00 00 00 00 00 00 00 04 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "ff 00 00 00 00 00 00 01 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 ff" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // // 4: bad .next @6 from @2 + "" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 00 00 00 00 00 00 00 06 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "ff 00 00 00 00 00 00 02 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 02 ff" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 00 00 00 00 00 00 00 00 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // // 5: bad .prev @7 + "" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 00 00 00 00 00 00 00 07 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "ff 00 00 00 00 00 00 02 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 02 ff" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 00 00 00 00 00 00 00 00 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // // 6: bad .next @7 + "" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 00 00 00 00 00 00 00 07 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "ff 00 00 00 00 00 00 02 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 02 ff" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 02 00 00 00 00 00 00 07 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // // 7: bad .next @5 + "" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 00 00 00 00 00 00 00 07 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "ff 00 00 00 00 00 00 02 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 02 ff" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 02 00 00 00 00 00 00 01 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // // 8: bad chaining + "" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 00 00 00 00 00 00 00 07 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "ff 00 00 00 00 00 00 02 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 02 ff" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 02 00 00 00 00 00 00 01 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // // 9: lost free block @8 + "" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 00 00 00 00 00 00 00 0f fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "ff 00 00 00 00 00 00 02 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 02 ff" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 00 00 00 00 00 00 00 00 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + + "fe 00 00 00 00 00 00 02 00 00 00 00 00 00 00 fe" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + // 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f + } + + for i, test := range tab { + errors := []error{} + + f := NewMemFiler() + b := s2b(test) + b = append(make([]byte, fltSz), b...) + n := len(b) + if n == 0 { + t.Fatal(n) + } + + if m, err := f.ReadFrom(bytes.NewBuffer(b)); m != int64(n) || err != nil { + t.Fatal(m, err) + } + + sz, err := f.Size() + if err != nil { + t.Fatal(err) + } + + if g, e := sz, int64(n); g != e { + t.Fatal(g, e) + } + + a, err := newPAllocator(f) + if err != nil { + t.Fatal(err) + } + + a.flt.setHead(2, 1, a.f) + a.flt.setHead(4, 2, a.f) + err = a.Verify( + NewMemFiler(), + func(err error) bool { + if err == nil { + t.Fatal("nil error") + } + t.Log(i, "logged: ", err) + errors = append(errors, err) + return true + }, + nil, + ) + if err == nil { + t.Fatal(i, "unexpected success") + } + + t.Log(i, err, errors) + } +} + +// Allocation in an empty DB. +func TestAllocatorAlloc0(t *testing.T) { + tab := []struct { + h int64 + b, f, fc string + }{ + {1, // len 0 + "" + + "", + "" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + "" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00"}, + {1, // len 1 + "" + + "42", + "" + + "01 42 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + "" + + "01 42 00 00 00 00 00 00 00 00 00 00 00 00 00 00"}, + {1, // max single atom, not compressible + "" + + "01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e", + "" + + "0e 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 00", + "" + + "0e 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 00"}, + {1, // max single atom, compressible, but not eligible for it + "" + + "01 02 03 04 05 06 07 08 99 01 02 03 04 05", + "" + + "0e 01 02 03 04 05 06 07 08 99 01 02 03 04 05 00", + "" + + "0e 01 02 03 04 05 06 07 08 99 01 02 03 04 05 00"}, + {1, // > 1 atom, not compressible + "" + + "01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f", + "" + + "0f 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + "" + + "0f 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00"}, + {1, // > 1 atom, compressible + "" + + "01 02 03 04 05 06 07 08 99 01 02 03 04 05 06 07" + + "08", + "" + + "11 01 02 03 04 05 06 07 08 99 01 02 03 04 05 06" + + "07 08 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + "" + + "0e 11 12 01 02 03 04 05 06 07 08 99 01 0d 09 01"}, + {1, // longest short + "" + + "00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "10 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "20 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "30 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "40 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "50 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "60 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "70 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "80 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "90 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "a0 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "b0 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "c0 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "d0 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "e0 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "f0 01 02 03 04 05 06 07 08 09 0a", + "" + + "" + + "fb 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e" + + "0f 10 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e" + + "0f 20 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e" + + "0f 30 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e" + + "0f 40 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e" + + "0f 50 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e" + + "0f 60 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e" + + "0f 70 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e" + + "0f 80 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e" + + "0f 90 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e" + + "0f a0 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e" + + "0f b0 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e" + + "0f c0 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e" + + "0f d0 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e" + + "0f e0 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e" + + "0f f0 01 02 03 04 05 06 07 08 09 0a 00 00 00 00", + "" + + "" + + "4e fb 01 20 00 01 02 03 04 05 06 07 08 09 0a 0b" + + "0c 0d 0e 0f 10 1d 10 00 20 1d 10 00 30 1d 10 00" + + "40 1d 10 00 50 1d 10 00 60 1d 10 00 70 1d 10 00" + + "80 1d 10 00 90 1d 10 00 a0 1d 10 00 b0 1d 10 00" + + "c0 1d 10 00 d0 1d 10 00 e0 1d 10 00 f0 13 10 01"}, + + {1, // shortest long + "" + + "00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "10 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "20 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "30 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "40 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "50 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "60 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "70 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "80 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "90 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "a0 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "b0 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "c0 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "d0 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "e0 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" + + "f0 01 02 03 04 05 06 07 08 09 0a 0b", + "" + + "" + + "fc 00 fc 00 01 02 03 04 05 06 07 08 09 0a 0b 0c" + + "0d 0e 0f 10 01 02 03 04 05 06 07 08 09 0a 0b 0c" + + "0d 0e 0f 20 01 02 03 04 05 06 07 08 09 0a 0b 0c" + + "0d 0e 0f 30 01 02 03 04 05 06 07 08 09 0a 0b 0c" + + "0d 0e 0f 40 01 02 03 04 05 06 07 08 09 0a 0b 0c" + + "0d 0e 0f 50 01 02 03 04 05 06 07 08 09 0a 0b 0c" + + "0d 0e 0f 60 01 02 03 04 05 06 07 08 09 0a 0b 0c" + + "0d 0e 0f 70 01 02 03 04 05 06 07 08 09 0a 0b 0c" + + "0d 0e 0f 80 01 02 03 04 05 06 07 08 09 0a 0b 0c" + + "0d 0e 0f 90 01 02 03 04 05 06 07 08 09 0a 0b 0c" + + "0d 0e 0f a0 01 02 03 04 05 06 07 08 09 0a 0b 0c" + + "0d 0e 0f b0 01 02 03 04 05 06 07 08 09 0a 0b 0c" + + "0d 0e 0f c0 01 02 03 04 05 06 07 08 09 0a 0b 0c" + + "0d 0e 0f d0 01 02 03 04 05 06 07 08 09 0a 0b 0c" + + "0d 0e 0f e0 01 02 03 04 05 06 07 08 09 0a 0b 0c" + + "0d 0e 0f f0 01 02 03 04 05 06 07 08 09 0a 0b 00", + "" + + "" + + "4e fc 01 20 00 01 02 03 04 05 06 07 08 09 0a 0b" + + "0c 0d 0e 0f 10 1d 10 00 20 1d 10 00 30 1d 10 00" + + "40 1d 10 00 50 1d 10 00 60 1d 10 00 70 1d 10 00" + + "80 1d 10 00 90 1d 10 00 a0 1d 10 00 b0 1d 10 00" + + "c0 1d 10 00 d0 1d 10 00 e0 1d 10 00 f0 15 10 01"}, + } + + for i, test := range tab { + f := func(compress bool, e []byte) { + f := NewMemFiler() + a, err := newPAllocator(f) + if err != nil { + t.Fatal(err) + } + + a.Compress = compress + h, err := a.Alloc(s2b(test.b)) + if err != nil { + t.Fatalf("%d %#v\n%s", i, err, hex.Dump(mfBytes(f))) + } + + if g, e := h, test.h; g != e { + t.Fatal(i, g, e) + } + + g := mfBytes(f) + if g = g[fltSz:]; !bytes.Equal(g, e) { + t.Fatalf("\ni: %d compress: %t\ng:\n%se:\n%s", i, compress, hex.Dump(g), hex.Dump(e)) + } + } + f(false, s2b(test.f)) + f(true, s2b(test.fc)) + } +} + +func TestAllocatorMakeUsedBlock(t *testing.T) { + f := NewMemFiler() + a, err := NewAllocator(f, &Options{}) + if err != nil { + t.Fatal(err) + } + + dst := bufs.GCache.Get(zappy.MaxEncodedLen(maxRq + 1)) + defer bufs.GCache.Put(dst) + if _, _, _, err := a.makeUsedBlock(dst, make([]byte, maxRq)); err != nil { + t.Fatal(err) + } + + if _, _, _, err := a.makeUsedBlock(dst, make([]byte, maxRq+1)); err == nil { + t.Fatal("unexpected success") + } +} + +func stableRef(m map[int64][]byte) (r []struct { + h int64 + b []byte +}) { + a := make(sortutil.Int64Slice, 0, len(m)) + for k := range m { + a = append(a, k) + } + sort.Sort(a) + for _, v := range a { + r = append(r, struct { + h int64 + b []byte + }{v, m[v]}) + } + return +} + +func TestAllocatorRnd(t *testing.T) { + N := *testN + + for cc := 0; cc < 2; cc++ { + rng := rand.New(rand.NewSource(42)) + f := NewMemFiler() + a, err := newPAllocator(f) + if err != nil { + t.Fatal(err) + } + + balance := 0 + + bad := func() bool { + if a.Compress { + return false + } + + actual := a.stats.TotalAtoms - a.stats.FreeAtoms - a.stats.Relocations + if int64(balance) != actual { + t.Logf("balance: %d, actual %d\n%#v", balance, actual, a.stats) + return true + } + + return false + } + + if cc != 0 { + a.Compress = true + } + ref := map[int64][]byte{} + + for pass := 0; pass < 2; pass++ { + + // A) Alloc N blocks + for i := 0; i < N; i++ { + rq := rng.Int31n(int32(*allocRndTestLimit)) + if rq%127 == 0 { + rq = 3 * maxRq / 4 + } + if rq%11 == 0 { + rq %= 23 + } + if hl := *allocRndTestHardLimit; hl != 0 { + rq = rq % int32(hl) + } + b := make([]byte, rq) + for j := range b { + b[j] = byte(rng.Int()) + } + if rq > 300 { + for i := 100; i < 200; i++ { + b[i] = 'A' // give compression a chance + } + } + + balance += n2atoms(len(b)) + h, err := a.Alloc(b) + if err != nil || bad() { + dump(a, t) + t.Fatalf( + "A) N %d, kind %d, pass %d, i:%d, len(b):%d(%#x), err %v", + N, 0, pass, i, len(b), len(b), err, + ) + } + + ref[h] = b + } + + var rb []byte + + // B) Check them back + for h, wb := range ref { + if rb, err = a.Get(rb, h); err != nil { + dump(a, t) + t.Fatal("B)", err) + } + + if !bytes.Equal(rb, wb) { + dump(a, t) + t.Fatalf("B) h %d", h) + } + } + + nf := 0 + // C) Free every third block + for _, v := range stableRef(ref) { + h, b := v.h, v.b + if rng.Int()%3 != 0 { + continue + } + + balance -= n2atoms(len(b)) + if err = a.Free(h); err != nil || bad() { + dump(a, t) + t.Fatal(err) + } + + delete(ref, h) + nf++ + } + + // D) Check them back + for h, wb := range ref { + if rb, err = a.Get(rb, h); err != nil { + dump(a, t) + t.Fatal("D)", err) + } + + if !bytes.Equal(rb, wb) { + dump(a, t) + t.Fatalf("D) h %d", h) + } + } + + // E) Resize every block remaining + for _, v := range stableRef(ref) { + h, wb := v.h, append([]byte(nil), v.b...) + len0 := len(wb) + switch rng.Int() & 1 { + case 0: + wb = wb[:len(wb)*3/4] + case 1: + wb = append(wb, wb...) + } + if len(wb) > maxRq { + wb = wb[:maxRq] + } + + for j := range wb { + wb[j] = byte(rng.Int()) + } + if len(wb) > 300 { + for i := 100; i < 200; i++ { + wb[i] = 'D' // give compression a chance + } + } + a0, a1 := n2atoms(len0), n2atoms(len(wb)) + balance = balance - a0 + a1 + if err := a.Realloc(h, wb); err != nil || bad() { + dump(a, t) + t.Fatalf( + "D) h:%#x, len(b):%#4x, len(wb): %#x, err %v", + h, len0, len(wb), err, + ) + } + + if err = cacheAudit(a.m, &a.lru); err != nil { + t.Fatal(err) + } + + ref[h] = wb + } + + // F) Check them back + for h, wb := range ref { + if rb, err = a.Get(rb, h); err != nil { + dump(a, t) + t.Fatal("E)", err) + } + + if !bytes.Equal(rb, wb) { + dump(a, t) + t.Fatalf("E) h %d", h) + } + } + } + + if cc == 0 { + sz, err := f.Size() + if err != nil { + t.Fatal(err) + } + + t.Logf( + "kind %d, AllocAtoms %7d, AllocBytes %7d, FreeAtoms %7d, Relocations %7d, TotalAtoms %7d, f.Size %7d, space eff %.2f%%", + 0, a.stats.AllocAtoms, a.stats.AllocBytes, a.stats.FreeAtoms, a.stats.Relocations, a.stats.TotalAtoms, sz, 100*float64(a.stats.AllocBytes)/float64(sz), + ) + } + // Free everything + for h, b := range ref { + balance -= n2atoms(len(b)) + if err = a.Free(h); err != nil || bad() { + dump(a, t) + t.Fatal(err) + } + } + + sz, err := a.f.Size() + if err != nil { + t.Fatal(err) + } + + if g, e := sz, int64(fltSz); g != e { + dump(a, t) + t.Fatal(g, e) + } + } +} + +func TestRollbackAllocator(t *testing.T) { + f := NewMemFiler() + var r *RollbackFiler + r, err := NewRollbackFiler(f, + func(sz int64) (err error) { + if err = f.Truncate(sz); err != nil { + return err + } + + return f.Sync() + }, + f, + ) + if err != nil { + t.Fatal(err) + } + + if err := r.BeginUpdate(); err != nil { // BeginUpdate 0->1 + t.Fatal(err) + } + + a, err := NewAllocator(r, &Options{}) + if err != nil { + t.Fatal(err) + } + + h, err := a.Alloc(nil) + if err != nil { + t.Fatal(err) + } + + if h != 1 { + t.Fatal(h) + } + + // | 1 | + + h, err = a.Alloc(nil) + if err != nil { + t.Fatal(err) + } + + if h != 2 { + t.Fatal(h) + } + + // | 1 | 2 | + h, err = a.Alloc(nil) + if err != nil { + t.Fatal(err) + } + + if h != 3 { + t.Fatal(h) + } + + // | 1 | 2 | 3 | + if err = a.Free(2); err != nil { + t.Fatal(err) + } + + // | 1 | free | 3 | + if err := r.BeginUpdate(); err != nil { // BeginUpdate 1->2 + t.Fatal(err) + } + + h, err = a.Alloc(nil) + if err != nil { + t.Fatal(err) + } + + if h != 2 { + t.Fatal(h) + } + + // | 1 | 2 | 3 | + if err := r.Rollback(); err != nil { // Rollback 2->1 + t.Fatal(err) + } + + // | 1 | free | 3 | + h, err = a.Alloc(nil) + if err != nil { + t.Fatal(err) + } + + if h != 2 { + t.Fatal(h) + } + + // | 1 | 2 | 3 | + if err := a.Verify(NewMemFiler(), nil, nil); err != nil { + t.Fatal(err) + } +} + +func benchmarkAllocatorAlloc(b *testing.B, f Filer, sz int) { + if err := f.BeginUpdate(); err != nil { + b.Error(err) + return + } + + a, err := NewAllocator(f, &Options{}) + if err != nil { + b.Error(err) + return + } + + if err = f.EndUpdate(); err != nil { + b.Error(err) + return + } + + v := make([]byte, sz) + runtime.GC() + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err = f.BeginUpdate(); err != nil { + b.Error(err) + return + } + + if h, err := a.Alloc(v); h <= 0 || err != nil { + f.EndUpdate() + b.Error(h, err) + return + } + + if err = f.EndUpdate(); err != nil { + b.Error(err) + return + } + } +} + +func benchmarkAllocatorAllocMemFiler(b *testing.B, sz int) { + f := NewMemFiler() + benchmarkAllocatorAlloc(b, f, sz) +} + +func BenchmarkAllocatorAllocMemFiler1e0(b *testing.B) { + benchmarkAllocatorAllocMemFiler(b, 0) +} + +func BenchmarkAllocatorAllocMemFiler1e1(b *testing.B) { + benchmarkAllocatorAllocMemFiler(b, 1e1) +} + +func BenchmarkAllocatorAllocMemFiler1e2(b *testing.B) { + benchmarkAllocatorAllocMemFiler(b, 1e2) +} + +func BenchmarkAllocatorAllocMemFiler1e3(b *testing.B) { + benchmarkAllocatorAllocMemFiler(b, 1e3) +} + +func benchmarkAllocatorAllocSimpleFileFiler(b *testing.B, sz int) { + dir, testDbName := temp() + defer os.RemoveAll(dir) + + f, err := os.OpenFile(testDbName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) + if err != nil { + b.Fatal(err) + } + + defer f.Close() + + benchmarkAllocatorAlloc(b, NewSimpleFileFiler(f), sz) +} + +func BenchmarkAllocatorAllocSimpleFileFiler0(b *testing.B) { + benchmarkAllocatorAllocSimpleFileFiler(b, 0) +} + +func BenchmarkAllocatorAllocSimpleFileFiler1e1(b *testing.B) { + benchmarkAllocatorAllocSimpleFileFiler(b, 1e1) +} + +func BenchmarkAllocatorAllocSimpleFileFiler1e2(b *testing.B) { + benchmarkAllocatorAllocSimpleFileFiler(b, 1e2) +} + +func BenchmarkAllocatorAllocSimpleFileFiler1e3(b *testing.B) { + benchmarkAllocatorAllocSimpleFileFiler(b, 1e3) +} + +func benchmarkAllocatorAllocRollbackFiler(b *testing.B, sz int) { + dir, testDbName := temp() + defer os.RemoveAll(dir) + + f, err := os.OpenFile(testDbName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) + if err != nil { + b.Fatal(err) + } + + defer f.Close() + + g := NewSimpleFileFiler(f) + var filer *RollbackFiler + if filer, err = NewRollbackFiler( + g, + func(sz int64) error { + if err = g.Truncate(sz); err != nil { + return err + } + + return g.Sync() + }, + g, + ); err != nil { + b.Error(err) + return + } + + benchmarkAllocatorAlloc(b, filer, sz) +} + +func BenchmarkAllocatorAllocRollbackFiler0(b *testing.B) { + benchmarkAllocatorAllocRollbackFiler(b, 0) +} + +func BenchmarkAllocatorAllocRollbackFiler1e1(b *testing.B) { + benchmarkAllocatorAllocRollbackFiler(b, 1e1) +} + +func BenchmarkAllocatorAllocRollbackFiler1e2(b *testing.B) { + benchmarkAllocatorAllocRollbackFiler(b, 1e2) +} + +func BenchmarkAllocatorAllocRollbackFiler1e3(b *testing.B) { + benchmarkAllocatorAllocRollbackFiler(b, 1e3) +} + +func benchmarkAllocatorAllocACIDFiler(b *testing.B, sz int) { + dir, testDbName := temp() + defer os.RemoveAll(dir) + + f, err := os.OpenFile(testDbName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) + if err != nil { + b.Fatal(err) + } + + defer f.Close() + + wal, err := os.OpenFile(testDbName+".wal", os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) + if err != nil { + b.Fatal(err) + } + + defer wal.Close() + + filer, err := NewACIDFiler(NewSimpleFileFiler(f), wal) + if err != nil { + b.Error(err) + return + } + + benchmarkAllocatorAlloc(b, filer, sz) +} + +func BenchmarkAllocatorAllocACIDFiler0(b *testing.B) { + benchmarkAllocatorAllocACIDFiler(b, 0) +} + +func BenchmarkAllocatorAllocACIDFiler1e1(b *testing.B) { + benchmarkAllocatorAllocACIDFiler(b, 1e1) +} + +func BenchmarkAllocatorAllocACIDFiler1e2(b *testing.B) { + benchmarkAllocatorAllocACIDFiler(b, 1e2) +} + +func BenchmarkAllocatorAllocACIDFiler1e3(b *testing.B) { + benchmarkAllocatorAllocACIDFiler(b, 1e3) +} + +func benchmarkAllocatorRndFree(b *testing.B, f Filer, sz int) { + if err := f.BeginUpdate(); err != nil { + b.Error(err) + return + } + + a, err := NewAllocator(f, &Options{}) + if err != nil { + b.Error(err) + return + } + + if err = f.EndUpdate(); err != nil { + b.Error(err) + return + } + + v := make([]byte, sz) + ref := map[int64]struct{}{} + for i := 0; i < b.N; i++ { + if err = f.BeginUpdate(); err != nil { + b.Error(err) + return + } + + h, err := a.Alloc(v) + if h <= 0 || err != nil { + f.EndUpdate() + b.Error(h, err) + return + } + + ref[h] = struct{}{} + + if err = f.EndUpdate(); err != nil { + b.Error(err) + return + } + } + runtime.GC() + b.ResetTimer() + for h := range ref { + if err = f.BeginUpdate(); err != nil { + b.Error(err) + return + } + + if err = a.Free(h); err != nil { + f.EndUpdate() + b.Error(h, err) + return + } + + if err = f.EndUpdate(); err != nil { + b.Error(err) + return + } + } +} + +func benchmarkAllocatorRndFreeMemFiler(b *testing.B, sz int) { + f := NewMemFiler() + benchmarkAllocatorRndFree(b, f, sz) +} + +func BenchmarkAllocatorRndFreeMemFiler0(b *testing.B) { + benchmarkAllocatorRndFreeMemFiler(b, 0) +} + +func BenchmarkAllocatorRndFreeMemFiler1e1(b *testing.B) { + benchmarkAllocatorRndFreeMemFiler(b, 1e1) +} + +func BenchmarkAllocatorRndFreeMemFiler1e2(b *testing.B) { + benchmarkAllocatorRndFreeMemFiler(b, 1e2) +} + +func BenchmarkAllocatorRndFreeMemFiler1e3(b *testing.B) { + benchmarkAllocatorRndFreeMemFiler(b, 1e3) +} + +func benchmarkAllocatorRndFreeSimpleFileFiler(b *testing.B, sz int) { + dir, testDbName := temp() + defer os.RemoveAll(dir) + + f, err := os.OpenFile(testDbName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) + if err != nil { + b.Fatal(err) + } + + defer f.Close() + + benchmarkAllocatorRndFree(b, NewSimpleFileFiler(f), sz) +} + +func BenchmarkAllocatorRndFreeSimpleFileFiler0(b *testing.B) { + benchmarkAllocatorRndFreeSimpleFileFiler(b, 0) +} + +func BenchmarkAllocatorRndFreeSimpleFileFiler1e1(b *testing.B) { + benchmarkAllocatorRndFreeSimpleFileFiler(b, 1e1) +} + +func BenchmarkAllocatorRndFreeSimpleFileFiler1e2(b *testing.B) { + benchmarkAllocatorRndFreeSimpleFileFiler(b, 1e2) +} + +func BenchmarkAllocatorRndFreeSimpleFileFiler1e3(b *testing.B) { + benchmarkAllocatorRndFreeSimpleFileFiler(b, 1e3) +} + +func benchmarkAllocatorRndFreeRollbackFiler(b *testing.B, sz int) { + dir, testDbName := temp() + defer os.RemoveAll(dir) + + f, err := os.OpenFile(testDbName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) + if err != nil { + b.Fatal(err) + } + + defer f.Close() + + g := NewSimpleFileFiler(f) + var filer *RollbackFiler + if filer, err = NewRollbackFiler( + g, + func(sz int64) error { + if err = g.Truncate(sz); err != nil { + return err + } + + return g.Sync() + }, + g, + ); err != nil { + b.Error(err) + return + } + + benchmarkAllocatorRndFree(b, filer, sz) +} + +func BenchmarkAllocatorRndFreeRollbackFiler0(b *testing.B) { + benchmarkAllocatorRndFreeRollbackFiler(b, 0) +} + +func BenchmarkAllocatorRndFreeRollbackFiler1e1(b *testing.B) { + benchmarkAllocatorRndFreeRollbackFiler(b, 1e1) +} + +func BenchmarkAllocatorRndFreeRollbackFiler1e2(b *testing.B) { + benchmarkAllocatorRndFreeRollbackFiler(b, 1e2) +} + +func BenchmarkAllocatorRndFreeRollbackFiler1e3(b *testing.B) { + benchmarkAllocatorRndFreeRollbackFiler(b, 1e3) +} + +func benchmarkAllocatorRndFreeACIDFiler(b *testing.B, sz int) { + dir, testDbName := temp() + defer os.RemoveAll(dir) + + f, err := os.OpenFile(testDbName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) + if err != nil { + b.Fatal(err) + } + + defer f.Close() + + wal, err := os.OpenFile(testDbName+".wal", os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) + if err != nil { + b.Fatal(err) + } + + defer wal.Close() + + filer, err := NewACIDFiler(NewSimpleFileFiler(f), wal) + if err != nil { + b.Error(err) + return + } + + benchmarkAllocatorRndFree(b, filer, sz) +} + +func BenchmarkAllocatorRndFreeACIDFiler0(b *testing.B) { + benchmarkAllocatorRndFreeACIDFiler(b, 0) +} + +func BenchmarkAllocatorRndFreeACIDFiler1e1(b *testing.B) { + benchmarkAllocatorRndFreeACIDFiler(b, 1e1) +} + +func BenchmarkAllocatorRndFreeACIDFiler1e2(b *testing.B) { + benchmarkAllocatorRndFreeACIDFiler(b, 1e2) +} + +func BenchmarkAllocatorRndFreeACIDFiler1e3(b *testing.B) { + benchmarkAllocatorRndFreeACIDFiler(b, 1e3) +} + +func benchmarkAllocatorRndGet(b *testing.B, f Filer, sz int) { + if err := f.BeginUpdate(); err != nil { + b.Error(err) + return + } + + a, err := NewAllocator(f, &Options{}) + if err != nil { + b.Error(err) + return + } + + if err = f.EndUpdate(); err != nil { + b.Error(err) + return + } + + v := make([]byte, sz) + ref := map[int64]struct{}{} + if err = f.BeginUpdate(); err != nil { + b.Error(err) + return + } + + for i := 0; i < b.N; i++ { + h, err := a.Alloc(v) + if h <= 0 || err != nil { + f.EndUpdate() + b.Error(h, err) + return + } + + ref[h] = struct{}{} + + } + if err = f.EndUpdate(); err != nil { + b.Error(err) + return + } + + runtime.GC() + b.ResetTimer() + for h := range ref { + if err = f.BeginUpdate(); err != nil { + b.Error(err) + return + } + + if _, err = a.Get(v, h); err != nil { + f.EndUpdate() + b.Error(h, err) + return + } + + if err = f.EndUpdate(); err != nil { + b.Error(err) + return + } + } +} + +func benchmarkAllocatorRndGetMemFiler(b *testing.B, sz int) { + f := NewMemFiler() + benchmarkAllocatorRndGet(b, f, sz) +} + +func BenchmarkAllocatorRndGetMemFiler0(b *testing.B) { + benchmarkAllocatorRndGetMemFiler(b, 0) +} + +func BenchmarkAllocatorRndGetMemFiler1e1(b *testing.B) { + benchmarkAllocatorRndGetMemFiler(b, 1e1) +} + +func BenchmarkAllocatorRndGetMemFiler1e2(b *testing.B) { + benchmarkAllocatorRndGetMemFiler(b, 1e2) +} + +func BenchmarkAllocatorRndGetMemFiler1e3(b *testing.B) { + benchmarkAllocatorRndGetMemFiler(b, 1e3) +} + +func benchmarkAllocatorRndGetSimpleFileFiler(b *testing.B, sz int) { + os.Remove(testDbName) + <-time.After(5 * time.Second) + f, err := os.OpenFile(testDbName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) + if err != nil { + b.Fatal(err) + } + + defer func() { + f.Close() + os.Remove(testDbName) + }() + + benchmarkAllocatorRndGet(b, NewSimpleFileFiler(f), sz) +} + +func BenchmarkAllocatorRndGetSimpleFileFiler0(b *testing.B) { + benchmarkAllocatorRndGetSimpleFileFiler(b, 0) +} + +func BenchmarkAllocatorRndGetSimpleFileFiler1e1(b *testing.B) { + benchmarkAllocatorRndGetSimpleFileFiler(b, 1e1) +} + +func BenchmarkAllocatorRndGetSimpleFileFiler1e2(b *testing.B) { + benchmarkAllocatorRndGetSimpleFileFiler(b, 1e2) +} + +func BenchmarkAllocatorRndGetSimpleFileFiler1e3(b *testing.B) { + benchmarkAllocatorRndGetSimpleFileFiler(b, 1e3) +} + +func benchmarkAllocatorRndGetRollbackFiler(b *testing.B, sz int) { + os.Remove(testDbName) + f, err := os.OpenFile(testDbName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) + if err != nil { + b.Fatal(err) + } + + defer func() { + f.Close() + os.Remove(testDbName) + }() + + g := NewSimpleFileFiler(f) + var filer *RollbackFiler + if filer, err = NewRollbackFiler( + g, + func(sz int64) error { + if err = g.Truncate(sz); err != nil { + return err + } + + return g.Sync() + }, + g, + ); err != nil { + b.Error(err) + return + } + + benchmarkAllocatorRndGet(b, filer, sz) +} + +func BenchmarkAllocatorRndGetRollbackFiler0(b *testing.B) { + benchmarkAllocatorRndGetRollbackFiler(b, 0) +} + +func BenchmarkAllocatorRndGetRollbackFiler1e1(b *testing.B) { + benchmarkAllocatorRndGetRollbackFiler(b, 1e1) +} + +func BenchmarkAllocatorRndGetRollbackFiler1e2(b *testing.B) { + benchmarkAllocatorRndGetRollbackFiler(b, 1e2) +} + +func BenchmarkAllocatorRndGetRollbackFiler1e3(b *testing.B) { + benchmarkAllocatorRndGetRollbackFiler(b, 1e3) +} + +func benchmarkAllocatorRndGetACIDFiler(b *testing.B, sz int) { + os.Remove(testDbName) + os.Remove(walName) + f, err := os.OpenFile(testDbName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) + if err != nil { + b.Fatal(err) + } + + defer func() { + f.Close() + os.Remove(testDbName) + }() + + wal, err := os.OpenFile(walName, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600) + if err != nil { + b.Fatal(err) + } + + defer func() { + wal.Close() + os.Remove(walName) + }() + + filer, err := NewACIDFiler(NewSimpleFileFiler(f), wal) + if err != nil { + b.Error(err) + return + } + + benchmarkAllocatorRndGet(b, filer, sz) +} + +func BenchmarkAllocatorRndGetACIDFiler0(b *testing.B) { + benchmarkAllocatorRndGetACIDFiler(b, 0) +} + +func BenchmarkAllocatorRndGetACIDFiler1e1(b *testing.B) { + benchmarkAllocatorRndGetACIDFiler(b, 1e1) +} + +func BenchmarkAllocatorRndGetACIDFiler1e2(b *testing.B) { + benchmarkAllocatorRndGetACIDFiler(b, 1e2) +} + +func BenchmarkAllocatorRndGetACIDFiler1e3(b *testing.B) { + benchmarkAllocatorRndGetACIDFiler(b, 1e3) +} + +func TestFltFind(t *testing.T) { + var f flt + + f.init() + if h := f.find(1); h != 0 { + t.Fatal(h) + } + + // [0] + f.init() + f[0].head = 1 + if h := f.find(1); h != 1 || f[0].head != 0 { + t.Fatal(h) + } + + f.init() + f[0].head = 1 + if h := f.find(2); h != 0 || f[0].head == 0 { + t.Fatal(h) + } + + // [1] + f.init() + f[1].head = 1 + if h := f.find(1); h != 1 || f[1].head != 0 { + t.Fatal("\n", f, h) + } + + f.init() + f[1].head = 1 + if h := f.find(2); h != 1 || f[1].head != 0 { + t.Fatal(f, h) + } + + f.init() + f[1].head = 1 + if h := f.find(3); h != 0 || f[1].head == 0 { + t.Fatal(h) + } + + // [2] + f.init() + f[2].head = 1 + if h := f.find(1); h != 1 || f[2].head != 0 { + t.Fatal(h) + } + + f.init() + f[2].head = 1 + if h := f.find(2); h != 1 || f[2].head != 0 { + t.Fatal(h) + } + + f.init() + f[2].head = 1 + if h := f.find(3); h != 1 || f[2].head != 0 { + t.Fatal(h) + } + + f.init() + f[2].head = 1 + if h := f.find(4); h != 1 || f[2].head != 0 { + t.Fatal(h) + } + + f.init() + f[2].head = 1 + if h := f.find(5); h != 0 || f[2].head == 0 { + t.Fatal(h) + } +} + +func TestFltHead(t *testing.T) { + var f flt + f.init() + if h := f.head(1); h != 0 { + t.Fatal(h) + } + + // [0] + f.init() + f[0].head = 1 + if h := f.head(1); h != 1 { + t.Fatal(h) + } + + f.init() + f[0].head = 1 + if h := f.head(2); h != 0 { + t.Fatal(h) + } + + // [1] + f.init() + f[1].head = 1 + if h := f.head(1); h != 0 { + t.Fatal(h) + } + + f.init() + f[1].head = 1 + if h := f.head(2); h != 1 { + t.Fatal(h) + } + + f.init() + f[1].head = 1 + if h := f.head(3); h != 1 { + t.Fatal(h) + } + + f.init() + f[1].head = 1 + if h := f.head(4); h != 0 { + t.Fatal(h) + } + + // [2] + f.init() + f[2].head = 1 + if h := f.head(1); h != 0 { + t.Fatal(h) + } + + f.init() + f[2].head = 1 + if h := f.head(2); h != 0 { + t.Fatal(h) + } + + f.init() + f[2].head = 1 + if h := f.head(3); h != 0 { + t.Fatal(h) + } + + f.init() + f[2].head = 1 + if h := f.head(4); h != 1 { + t.Fatal(h) + } + + f.init() + f[2].head = 1 + if h := f.head(5); h != 1 { + t.Fatal(h) + } + + f.init() + f[2].head = 1 + if h := f.head(6); h != 1 { + t.Fatal(h) + } + + f.init() + f[2].head = 1 + if h := f.head(7); h != 1 { + t.Fatal(h) + } + + f.init() + f[2].head = 1 + if h := f.head(8); h != 0 { + t.Fatal(h) + } + +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/filer.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/filer.go new file mode 100644 index 00000000..0f601453 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/filer.go @@ -0,0 +1,192 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// An abstraction of file like (persistent) storage with optional (abstracted) +// support for structural integrity. + +package lldb + +import ( + "fmt" + + "camlistore.org/third_party/github.com/cznic/mathutil" +) + +func doubleTrouble(first, second error) error { + return fmt.Errorf("%q. Additionally, while attempting to recover (rollback): %q", first, second) +} + +// A Filer is a []byte-like model of a file or similar entity. It may +// optionally implement support for structural transaction safety. In contrast +// to a file stream, a Filer is not sequentially accessible. ReadAt and WriteAt +// are always "addressed" by an offset and are assumed to perform atomically. +// A Filer is not safe for concurrent access, it's designed for consumption by +// the other objects in package, which should use a Filer from one goroutine +// only or via a mutex. BeginUpdate, EndUpdate and Rollback must be either all +// implemented by a Filer for structural integrity - or they should be all +// no-ops; where/if that requirement is relaxed. +// +// If a Filer wraps another Filer implementation, it usually invokes the same +// methods on the "inner" one, after some possible argument translations etc. +// If a Filer implements the structural transactions handling methods +// (BeginUpdate, EndUpdate and Rollback) as no-ops _and_ wraps another Filer: +// it then still MUST invoke those methods on the inner Filer. This is +// important for the case where a RollbackFiler exists somewhere down the +// chain. It's also important for an Allocator - to know when it must +// invalidate its FLT cache. +type Filer interface { + // BeginUpdate increments the "nesting" counter (initially zero). Every + // call to BeginUpdate must be eventually "balanced" by exactly one of + // EndUpdate or Rollback. Calls to BeginUpdate may nest. + BeginUpdate() error + + // Analogous to os.File.Close(). + Close() error + + // EndUpdate decrements the "nesting" counter. If it's zero after that + // then assume the "storage" has reached structural integrity (after a + // batch of partial updates). If a Filer implements some support for + // that (write ahead log, journal, etc.) then the appropriate actions + // are to be taken for nesting == 0. Invocation of an unbalanced + // EndUpdate is an error. + EndUpdate() error + + // Analogous to os.File.Name(). + Name() string + + // PunchHole deallocates space inside a "file" in the byte range + // starting at off and continuing for size bytes. The actual hole + // created by PunchHole may be smaller than requested. The Filer size + // (as reported by `Size()` does not change when hole punching, even + // when punching the end of a file off. In contrast to the Linux + // implementation of FALLOC_FL_PUNCH_HOLE in `fallocate`(2); a Filer is + // free not only to ignore `PunchHole()` (implement it as a nop), but + // additionally no guarantees about the content of the hole, when + // eventually read back, are required, i.e. any data, not only zeros, + // can be read from the "hole", including just anything what was left + // there - with all of the possible security problems. + PunchHole(off, size int64) error + + // As os.File.ReadAt. Note: `off` is an absolute "file pointer" + // address and cannot be negative even when a Filer is a InnerFiler. + ReadAt(b []byte, off int64) (n int, err error) + + // Rollback cancels and undoes the innermost pending update level. + // Rollback decrements the "nesting" counter. If a Filer implements + // some support for keeping structural integrity (write ahead log, + // journal, etc.) then the appropriate actions are to be taken. + // Invocation of an unbalanced Rollback is an error. + Rollback() error + + // Analogous to os.File.FileInfo().Size(). + Size() (int64, error) + + // Analogous to os.Sync(). + Sync() (err error) + + // Analogous to os.File.Truncate(). + Truncate(size int64) error + + // Analogous to os.File.WriteAt(). Note: `off` is an absolute "file + // pointer" address and cannot be negative even when a Filer is a + // InnerFiler. + WriteAt(b []byte, off int64) (n int, err error) +} + +var _ Filer = &InnerFiler{} // Ensure InnerFiler is a Filer. + +// A InnerFiler is a Filer with added addressing/size translation. +type InnerFiler struct { + outer Filer + off int64 +} + +// NewInnerFiler returns a new InnerFiler wrapped by `outer` in a way which +// adds `off` to every access. +// +// For example, considering: +// +// inner := NewInnerFiler(outer, 10) +// +// then +// +// inner.WriteAt([]byte{42}, 4) +// +// translates to +// +// outer.WriteAt([]byte{42}, 14) +// +// But an attempt to emulate +// +// outer.WriteAt([]byte{17}, 9) +// +// by +// +// inner.WriteAt([]byte{17}, -1) +// +// will fail as the `off` parameter can never be < 0. Also note that +// +// inner.Size() == outer.Size() - off, +// +// i.e. `inner` pretends no `outer` exists. Finally, after e.g. +// +// inner.Truncate(7) +// outer.Size() == 17 +// +// will be true. +func NewInnerFiler(outer Filer, off int64) *InnerFiler { return &InnerFiler{outer, off} } + +// BeginUpdate implements Filer. +func (f *InnerFiler) BeginUpdate() error { return f.outer.BeginUpdate() } + +// Close implements Filer. +func (f *InnerFiler) Close() (err error) { return f.outer.Close() } + +// EndUpdate implements Filer. +func (f *InnerFiler) EndUpdate() error { return f.outer.EndUpdate() } + +// Name implements Filer. +func (f *InnerFiler) Name() string { return f.outer.Name() } + +// PunchHole implements Filer. `off`, `size` must be >= 0. +func (f *InnerFiler) PunchHole(off, size int64) error { return f.outer.PunchHole(f.off+off, size) } + +// ReadAt implements Filer. `off` must be >= 0. +func (f *InnerFiler) ReadAt(b []byte, off int64) (n int, err error) { + if off < 0 { + return 0, &ErrINVAL{f.outer.Name() + ":ReadAt invalid off", off} + } + + return f.outer.ReadAt(b, f.off+off) +} + +// Rollback implements Filer. +func (f *InnerFiler) Rollback() error { return f.outer.Rollback() } + +// Size implements Filer. +func (f *InnerFiler) Size() (int64, error) { + sz, err := f.outer.Size() + if err != nil { + return 0, err + } + + return mathutil.MaxInt64(sz-f.off, 0), nil +} + +// Sync() implements Filer. +func (f *InnerFiler) Sync() (err error) { + return f.outer.Sync() +} + +// Truncate implements Filer. +func (f *InnerFiler) Truncate(size int64) error { return f.outer.Truncate(size + f.off) } + +// WriteAt implements Filer. `off` must be >= 0. +func (f *InnerFiler) WriteAt(b []byte, off int64) (n int, err error) { + if off < 0 { + return 0, &ErrINVAL{f.outer.Name() + ":WriteAt invalid off", off} + } + + return f.outer.WriteAt(b, f.off+off) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/filer_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/filer_test.go new file mode 100644 index 00000000..d455b26b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/filer_test.go @@ -0,0 +1,764 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lldb + +import ( + "bytes" + "encoding/hex" + "io/ioutil" + "math/rand" + "os" + "runtime" + "testing" + + "camlistore.org/third_party/github.com/cznic/fileutil" +) + +// Bench knobs. +const ( + filerTestChunkSize = 32e3 + filerTotalSize = 10e6 +) + +type newFunc func() Filer + +type testFileFiler struct { + Filer +} + +func (t *testFileFiler) Close() (err error) { + n := t.Name() + err = t.Filer.Close() + if errDel := os.Remove(n); errDel != nil && err == nil { + err = errDel + } + return +} + +var ( + newFileFiler = func() Filer { + file, err := ioutil.TempFile("", "lldb-test-file") + if err != nil { + panic(err) + } + + return &testFileFiler{NewSimpleFileFiler(file)} + } + + newOSFileFiler = func() Filer { + file, err := ioutil.TempFile("", "lldb-test-osfile") + if err != nil { + panic(err) + } + + return &testFileFiler{NewOSFiler(file)} + } + + newMemFiler = func() Filer { + return NewMemFiler() + } + + nwBitFiler = func() Filer { + f, err := newBitFiler(NewMemFiler()) + if err != nil { + panic(err) + } + + return f + } + + newRollbackFiler = func() Filer { + f := NewMemFiler() + + var r Filer + + checkpoint := func(sz int64) (err error) { + return f.Truncate(sz) + } + + r, err := NewRollbackFiler(f, checkpoint, f) + if err != nil { + panic(err) + } + + return r + } +) + +func TestFilerNesting(t *testing.T) { + testFilerNesting(t, newFileFiler) + testFilerNesting(t, newOSFileFiler) + testFilerNesting(t, newMemFiler) + testFilerNesting(t, newRollbackFiler) +} + +func testFilerNesting(t *testing.T, nf newFunc) { + // Check {Create, Close} works. + f := nf() + t.Log(f.Name()) + if err := f.Close(); err != nil { + t.Fatal(err) + } + + // Check {Create, EndUpdate} doesn't work. + f = nf() + t.Log(f.Name()) + if err := f.EndUpdate(); err == nil { + f.Close() + t.Fatal("unexpected success") + } + + if err := f.Close(); err != nil { + t.Fatal(err) + } + + // Check {Create, BeginUpdate, Close} doesn't work. + f = nf() + t.Log(f.Name()) + f.BeginUpdate() + + if err := f.Close(); err == nil { + t.Fatal("unexpected success") + } + + // Check {Create, BeginUpdate, EndUpdate, Close} works. + f = nf() + t.Log(f.Name()) + f.BeginUpdate() + if err := f.EndUpdate(); err != nil { + f.Close() + t.Fatal(err) + } + + if err := f.Close(); err != nil { + t.Fatal(err) + } +} + +func TestFilerTruncate(t *testing.T) { + testFilerTruncate(t, newFileFiler) + testFilerTruncate(t, newOSFileFiler) + testFilerTruncate(t, newMemFiler) + testFilerTruncate(t, nwBitFiler) + testFilerTruncate(t, newRollbackFiler) +} + +func testFilerTruncate(t *testing.T, nf newFunc) { + f := nf() + t.Log(f.Name()) + defer func() { + if err := f.Close(); err != nil { + t.Error(err) + } + }() + + if _, ok := f.(*RollbackFiler); ok { + if err := f.BeginUpdate(); err != nil { + t.Fatal(err) + } + + defer func() { + if err := f.EndUpdate(); err != nil { + t.Error(err) + } + }() + } + + // Check Truncate works. + sz := int64(1e6) + if err := f.Truncate(sz); err != nil { + t.Error(err) + return + } + + fsz, err := f.Size() + if err != nil { + t.Error(err) + return + } + + if g, e := fsz, sz; g != e { + t.Error(g, e) + return + } + + sz *= 2 + if err := f.Truncate(sz); err != nil { + t.Error(err) + return + } + + fsz, err = f.Size() + if err != nil { + t.Error(err) + return + } + + if g, e := fsz, sz; g != e { + t.Error(g, e) + return + } + + sz = 0 + if err := f.Truncate(sz); err != nil { + t.Error(err) + return + } + + fsz, err = f.Size() + if err != nil { + t.Error(err) + return + } + + if g, e := fsz, sz; g != e { + t.Error(g, e) + return + } + + // Check Truncate(-1) doesn't work. + sz = -1 + if err := f.Truncate(sz); err == nil { + t.Error(err) + return + } + +} + +func TestFilerReadAtWriteAt(t *testing.T) { + testFilerReadAtWriteAt(t, newFileFiler) + testFilerReadAtWriteAt(t, newOSFileFiler) + testFilerReadAtWriteAt(t, newMemFiler) + testFilerReadAtWriteAt(t, nwBitFiler) + testFilerReadAtWriteAt(t, newRollbackFiler) +} + +func testFilerReadAtWriteAt(t *testing.T, nf newFunc) { + f := nf() + t.Log(f.Name()) + defer func() { + if err := f.Close(); err != nil { + t.Error(err) + } + }() + + if _, ok := f.(*RollbackFiler); ok { + if err := f.BeginUpdate(); err != nil { + t.Fatal(err) + } + + defer func() { + if err := f.EndUpdate(); err != nil { + t.Error(err) + } + }() + } + + const ( + N = 1 << 16 + M = 2e2 + ) + + s := make([]byte, N) + e := make([]byte, N) + rnd := rand.New(rand.NewSource(42)) + for i := range e { + s[i] = byte(rnd.Intn(256)) + } + n2 := 0 + for i := 0; i < M; i++ { + var from, to int + for { + from = rnd.Intn(N) + to = rnd.Intn(N) + if from != to { + break + } + } + if from > to { + from, to = to, from + } + for i := range s[from:to] { + s[from+i] = byte(rnd.Intn(256)) + } + copy(e[from:to], s[from:to]) + if to > n2 { + n2 = to + } + n, err := f.WriteAt(s[from:to], int64(from)) + if err != nil { + t.Error(err) + return + } + + if g, e := n, to-from; g != e { + t.Error(g, e) + return + } + } + + fsz, err := f.Size() + if err != nil { + t.Error(err) + return + } + + if g, e := fsz, int64(n2); g != e { + t.Error(g, e) + return + } + + b := make([]byte, n2) + for i := 0; i <= M; i++ { + from := rnd.Intn(n2) + to := rnd.Intn(n2) + if from > to { + from, to = to, from + } + if i == M { + from, to = 0, n2 + } + n, err := f.ReadAt(b[from:to], int64(from)) + if err != nil && (!fileutil.IsEOF(err) && n != 0) { + fsz, err = f.Size() + if err != nil { + t.Error(err) + return + } + + t.Error(fsz, from, to, err) + return + } + + if g, e := n, to-from; g != e { + t.Error(g, e) + return + } + + if g, e := b[from:to], e[from:to]; !bytes.Equal(g, e) { + if x, ok := f.(*MemFiler); ok { + for i := int64(0); i <= 3; i++ { + t.Logf("pg %d\n----\n%s", i, hex.Dump(x.m[i][:])) + } + } + t.Errorf( + "i %d from %d to %d len(g) %d len(e) %d\n---- got ----\n%s\n---- exp ----\n%s", + i, from, to, len(g), len(e), hex.Dump(g), hex.Dump(e), + ) + return + } + } + + mf, ok := f.(*MemFiler) + if !ok { + return + } + + buf := &bytes.Buffer{} + if _, err := mf.WriteTo(buf); err != nil { + t.Error(err) + return + } + + if g, e := buf.Bytes(), e[:n2]; !bytes.Equal(g, e) { + t.Errorf("\nlen %d\n%s\nlen %d\n%s", len(g), hex.Dump(g), len(e), hex.Dump(e)) + return + } + + if err := mf.Truncate(0); err != nil { + t.Error(err) + return + } + + if _, err := mf.ReadFrom(buf); err != nil { + t.Error(err) + return + } + + roundTrip := make([]byte, n2) + if n, err := mf.ReadAt(roundTrip, 0); err != nil && n == 0 { + t.Error(err) + return + } + + if g, e := roundTrip, e[:n2]; !bytes.Equal(g, e) { + t.Errorf("\nlen %d\n%s\nlen %d\n%s", len(g), hex.Dump(g), len(e), hex.Dump(e)) + return + } +} + +func TestInnerFiler(t *testing.T) { + testInnerFiler(t, newFileFiler) + testInnerFiler(t, newOSFileFiler) + testInnerFiler(t, newMemFiler) + testInnerFiler(t, nwBitFiler) + testInnerFiler(t, newRollbackFiler) +} + +func testInnerFiler(t *testing.T, nf newFunc) { + const ( + HDR_SIZE = 42 + LONG_OFF = 3330 + ) + outer := nf() + t.Log(outer.Name()) + inner := NewInnerFiler(outer, HDR_SIZE) + defer func() { + if err := outer.Close(); err != nil { + t.Error(err) + } + }() + + if _, ok := outer.(*RollbackFiler); ok { + if err := outer.BeginUpdate(); err != nil { + t.Fatal(err) + } + + defer func() { + if err := outer.EndUpdate(); err != nil { + t.Error(err) + } + }() + } + + b := []byte{2, 5, 11} + n, err := inner.WriteAt(b, -1) + if err == nil { + t.Error("unexpected success") + return + } + + n, err = inner.ReadAt(make([]byte, 10), -1) + if err == nil { + t.Error("unexpected success") + return + } + + n, err = inner.WriteAt(b, 0) + if err != nil { + t.Error(err) + return + } + + if g, e := n, len(b); g != e { + t.Error(g, e) + return + } + + osz, err := outer.Size() + if err != nil { + t.Error(err) + return + } + + if g, e := osz, int64(HDR_SIZE+3); g != e { + t.Error(g, e) + return + } + + isz, err := inner.Size() + if err != nil { + t.Error(err) + return + } + + if g, e := isz, int64(3); g != e { + t.Error(g, e) + return + } + + rbuf := make([]byte, 3) + if n, err = outer.ReadAt(rbuf, 0); err != nil && n == 0 { + t.Error(err) + return + } + + if g, e := n, len(rbuf); g != e { + t.Error(g, e) + return + } + + if g, e := rbuf, make([]byte, 3); !bytes.Equal(g, e) { + t.Error(g, e) + } + + rbuf = make([]byte, 3) + if n, err = outer.ReadAt(rbuf, HDR_SIZE); err != nil && n == 0 { + t.Error(err) + return + } + + if g, e := n, len(rbuf); g != e { + t.Error(g, e) + return + } + + if g, e := rbuf, []byte{2, 5, 11}; !bytes.Equal(g, e) { + t.Error(g, e) + } + + rbuf = make([]byte, 3) + if n, err = inner.ReadAt(rbuf, 0); err != nil && n == 0 { + t.Error(err) + return + } + + if g, e := n, len(rbuf); g != e { + t.Error(g, e) + return + } + + if g, e := rbuf, []byte{2, 5, 11}; !bytes.Equal(g, e) { + t.Error(g, e) + } + + b = []byte{22, 55, 111} + if n, err = inner.WriteAt(b, LONG_OFF); err != nil { + t.Error(err) + return + } + + if g, e := n, len(b); g != e { + t.Error(g, e) + return + } + + osz, err = outer.Size() + if err != nil { + t.Error(err) + return + } + + if g, e := osz, int64(HDR_SIZE+LONG_OFF+3); g != e { + t.Error(g, e) + return + } + + isz, err = inner.Size() + if err != nil { + t.Error(err) + return + } + + if g, e := isz, int64(LONG_OFF+3); g != e { + t.Error(g, e) + return + } + + rbuf = make([]byte, 3) + if n, err = outer.ReadAt(rbuf, HDR_SIZE+LONG_OFF); err != nil && n == 0 { + t.Error(err) + return + } + + if g, e := n, len(rbuf); g != e { + t.Error(g, e) + return + } + + if g, e := rbuf, []byte{22, 55, 111}; !bytes.Equal(g, e) { + t.Error(g, e) + } + + rbuf = make([]byte, 3) + if n, err = inner.ReadAt(rbuf, LONG_OFF); err != nil && n == 0 { + t.Error(err) + return + } + + if g, e := n, len(rbuf); g != e { + t.Error(g, e) + return + } + + if g, e := rbuf, []byte{22, 55, 111}; !bytes.Equal(g, e) { + t.Error(g, e) + return + } + + if err = inner.Truncate(1); err != nil { + t.Error(err) + return + } + + isz, err = inner.Size() + if err != nil { + t.Error(err) + return + } + + if g, e := isz, int64(1); g != e { + t.Error(g, e) + return + } + + osz, err = outer.Size() + if err != nil { + t.Error(err) + return + } + + if g, e := osz, int64(HDR_SIZE+1); g != e { + t.Error(g, e) + return + } +} + +func TestFileReadAtHole(t *testing.T) { + testFileReadAtHole(t, newFileFiler) + testFileReadAtHole(t, newOSFileFiler) + testFileReadAtHole(t, newMemFiler) + testFileReadAtHole(t, nwBitFiler) + testFileReadAtHole(t, newRollbackFiler) +} + +func testFileReadAtHole(t *testing.T, nf newFunc) { + f := nf() + t.Log(f.Name()) + defer func() { + if err := f.Close(); err != nil { + t.Error(err) + } + }() + + if _, ok := f.(*RollbackFiler); ok { + if err := f.BeginUpdate(); err != nil { + t.Fatal(err) + } + + defer func() { + if err := f.EndUpdate(); err != nil { + t.Error(err) + } + }() + } + + n, err := f.WriteAt([]byte{1}, 40000) + if err != nil { + t.Error(err) + return + } + + if n != 1 { + t.Error(n) + return + } + + n, err = f.ReadAt(make([]byte, 1000), 20000) + if err != nil { + t.Error(err) + return + } + + if n != 1000 { + t.Error(n) + return + } +} + +func BenchmarkMemFilerWrSeq(b *testing.B) { + b.StopTimer() + buf := make([]byte, filerTestChunkSize) + for i := range buf { + buf[i] = byte(rand.Int()) + } + f := newMemFiler() + runtime.GC() + b.StartTimer() + var ofs int64 + for i := 0; i < b.N; i++ { + _, err := f.WriteAt(buf, ofs) + if err != nil { + b.Fatal(err) + } + + ofs = (ofs + filerTestChunkSize) % filerTotalSize + } +} + +func BenchmarkMemFilerRdSeq(b *testing.B) { + b.StopTimer() + buf := make([]byte, filerTestChunkSize) + for i := range buf { + buf[i] = byte(rand.Int()) + } + f := newMemFiler() + var ofs int64 + for i := 0; i < b.N; i++ { + _, err := f.WriteAt(buf, ofs) + if err != nil { + b.Fatal(err) + } + + ofs = (ofs + filerTestChunkSize) % filerTotalSize + } + runtime.GC() + b.StartTimer() + ofs = 0 + for i := 0; i < b.N; i++ { + n, err := f.ReadAt(buf, ofs) + if err != nil && n == 0 { + b.Fatal(err) + } + + ofs = (ofs + filerTestChunkSize) % filerTotalSize + } +} + +func BenchmarkMemFilerWrRand(b *testing.B) { + b.StopTimer() + rng := rand.New(rand.NewSource(42)) + f := newMemFiler() + var bytes int64 + + var ofs, runs []int + for i := 0; i < b.N; i++ { + ofs = append(ofs, rng.Intn(1<<31-1)) + runs = append(runs, rng.Intn(1<<31-1)%(2*pgSize)) + } + data := make([]byte, 2*pgSize) + for i := range data { + data[i] = byte(rng.Int()) + } + + runtime.GC() + b.StartTimer() + for i, v := range ofs { + n := runs[i] + bytes += int64(n) + f.WriteAt(data[:n], int64(v)) + } + b.StopTimer() +} + +func BenchmarkMemFilerRdRand(b *testing.B) { + b.StopTimer() + rng := rand.New(rand.NewSource(42)) + f := newMemFiler() + var bytes int64 + + var ofs, runs []int + for i := 0; i < b.N; i++ { + ofs = append(ofs, rng.Intn(1<<31-1)) + runs = append(runs, rng.Intn(1<<31-1)%(2*pgSize)) + } + data := make([]byte, 2*pgSize) + for i := range data { + data[i] = byte(rng.Int()) + } + + for i, v := range ofs { + n := runs[i] + bytes += int64(n) + f.WriteAt(data[:n], int64(v)) + } + + runtime.GC() + b.StartTimer() + for _, v := range ofs { + f.ReadAt(data, int64(v)) + } + b.StopTimer() +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/gb.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/gb.go new file mode 100644 index 00000000..34ec1e21 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/gb.go @@ -0,0 +1,812 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Utilities to encode/decode and collate Go predeclared scalar types (and the +// typeless nil and []byte). The encoding format is a variation of the one +// used by the "encoding/gob" package. + +package lldb + +import ( + "bytes" + "fmt" + "math" + + "camlistore.org/third_party/github.com/cznic/mathutil" +) + +const ( + gbNull = iota // 0x00 + gbFalse // 0x01 + gbTrue // 0x02 + gbFloat0 // 0x03 + gbFloat1 // 0x04 + gbFloat2 // 0x05 + gbFloat3 // 0x06 + gbFloat4 // 0x07 + gbFloat5 // 0x08 + gbFloat6 // 0x09 + gbFloat7 // 0x0a + gbFloat8 // 0x0b + gbComplex0 // 0x0c + gbComplex1 // 0x0d + gbComplex2 // 0x0e + gbComplex3 // 0x0f + gbComplex4 // 0x10 + gbComplex5 // 0x11 + gbComplex6 // 0x12 + gbComplex7 // 0x13 + gbComplex8 // 0x14 + gbBytes00 // 0x15 + gbBytes01 // 0x16 + gbBytes02 // 0x17 + gbBytes03 // 0x18 + gbBytes04 // 0x19 + gbBytes05 // 0x1a + gbBytes06 // 0x1b + gbBytes07 // 0x1c + gbBytes08 // 0x1d + gbBytes09 // 0x1e + gbBytes10 // 0x1f + gbBytes11 // 0x20 + gbBytes12 // 0x21 + gbBytes13 // 0x22 + gbBytes14 // 0x23 + gbBytes15 // 0x24 + gbBytes16 // 0x25 + gbBytes17 // Ox26 + gbBytes1 // 0x27 + gbBytes2 // 0x28: Offset by one to allow 64kB sized []byte. + gbString00 // 0x29 + gbString01 // 0x2a + gbString02 // 0x2b + gbString03 // 0x2c + gbString04 // 0x2d + gbString05 // 0x2e + gbString06 // 0x2f + gbString07 // 0x30 + gbString08 // 0x31 + gbString09 // 0x32 + gbString10 // 0x33 + gbString11 // 0x34 + gbString12 // 0x35 + gbString13 // 0x36 + gbString14 // 0x37 + gbString15 // 0x38 + gbString16 // 0x39 + gbString17 // 0x3a + gbString1 // 0x3b + gbString2 // 0x3c + gbUintP1 // 0x3d + gbUintP2 // 0x3e + gbUintP3 // 0x3f + gbUintP4 // 0x40 + gbUintP5 // 0x41 + gbUintP6 // 0x42 + gbUintP7 // 0x43 + gbUintP8 // 0x44 + gbIntM8 // 0x45 + gbIntM7 // 0x46 + gbIntM6 // 0x47 + gbIntM5 // 0x48 + gbIntM4 // 0x49 + gbIntM3 // 0x4a + gbIntM2 // 0x4b + gbIntM1 // 0x4c + gbIntP1 // 0x4d + gbIntP2 // 0x4e + gbIntP3 // 0x4f + gbIntP4 // 0x50 + gbIntP5 // 0x51 + gbIntP6 // 0x52 + gbIntP7 // 0x53 + gbIntP8 // 0x54 + gbInt0 // 0x55 + + gbIntMax = 255 - gbInt0 // 0xff == 170 +) + +// EncodeScalars encodes a vector of predeclared scalar type values to a +// []byte, making it suitable to store it as a "record" in a DB or to use it as +// a key of a BTree. +func EncodeScalars(scalars ...interface{}) (b []byte, err error) { + for _, scalar := range scalars { + switch x := scalar.(type) { + default: + return nil, &ErrINVAL{"EncodeScalars: unsupported type", fmt.Sprintf("%T in `%#v`", x, scalars)} + + case nil: + b = append(b, gbNull) + + case bool: + switch x { + case false: + b = append(b, gbFalse) + case true: + b = append(b, gbTrue) + } + + case float32: + encFloat(float64(x), &b) + case float64: + encFloat(x, &b) + + case complex64: + encComplex(complex128(x), &b) + case complex128: + encComplex(x, &b) + + case string: + n := len(x) + if n <= 17 { + b = append(b, byte(gbString00+n)) + b = append(b, []byte(x)...) + break + } + + if n > 65535 { + return nil, fmt.Errorf("EncodeScalars: cannot encode string of length %d (limit 65536)", n) + } + + pref := byte(gbString1) + if n > 255 { + pref++ + } + b = append(b, pref) + encUint0(uint64(n), &b) + b = append(b, []byte(x)...) + + case int8: + encInt(int64(x), &b) + case int16: + encInt(int64(x), &b) + case int32: + encInt(int64(x), &b) + case int64: + encInt(x, &b) + case int: + encInt(int64(x), &b) + + case uint8: + encUint(uint64(x), &b) + case uint16: + encUint(uint64(x), &b) + case uint32: + encUint(uint64(x), &b) + case uint64: + encUint(x, &b) + case uint: + encUint(uint64(x), &b) + case []byte: + n := len(x) + if n <= 17 { + b = append(b, byte(gbBytes00+n)) + b = append(b, []byte(x)...) + break + } + + if n > 655356 { + return nil, fmt.Errorf("EncodeScalars: cannot encode []byte of length %d (limit 65536)", n) + } + + pref := byte(gbBytes1) + if n > 255 { + pref++ + } + b = append(b, pref) + if n <= 255 { + b = append(b, byte(n)) + } else { + n-- + b = append(b, byte(n>>8), byte(n)) + } + b = append(b, x...) + } + } + return +} + +func encComplex(f complex128, b *[]byte) { + encFloatPrefix(gbComplex0, real(f), b) + encFloatPrefix(gbComplex0, imag(f), b) +} + +func encFloatPrefix(prefix byte, f float64, b *[]byte) { + u := math.Float64bits(f) + var n uint64 + for i := 0; i < 8; i++ { + n <<= 8 + n |= u & 0xFF + u >>= 8 + } + bits := mathutil.BitLenUint64(n) + if bits == 0 { + *b = append(*b, prefix) + return + } + + // 0 1 2 3 4 5 6 7 8 9 + // . 1 1 1 1 1 1 1 1 2 + encUintPrefix(prefix+1+byte((bits-1)>>3), n, b) +} + +func encFloat(f float64, b *[]byte) { + encFloatPrefix(gbFloat0, f, b) +} + +func encUint0(n uint64, b *[]byte) { + switch { + case n <= 0xff: + *b = append(*b, byte(n)) + case n <= 0xffff: + *b = append(*b, byte(n>>8), byte(n)) + case n <= 0xffffff: + *b = append(*b, byte(n>>16), byte(n>>8), byte(n)) + case n <= 0xffffffff: + *b = append(*b, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) + case n <= 0xffffffffff: + *b = append(*b, byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) + case n <= 0xffffffffffff: + *b = append(*b, byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) + case n <= 0xffffffffffffff: + *b = append(*b, byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) + case n <= math.MaxUint64: + *b = append(*b, byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) + } +} + +func encUintPrefix(prefix byte, n uint64, b *[]byte) { + *b = append(*b, prefix) + encUint0(n, b) +} + +func encUint(n uint64, b *[]byte) { + bits := mathutil.Max(1, mathutil.BitLenUint64(n)) + encUintPrefix(gbUintP1+byte((bits-1)>>3), n, b) +} + +func encInt(n int64, b *[]byte) { + switch { + case n < -0x100000000000000: + *b = append(*b, byte(gbIntM8), byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) + case n < -0x1000000000000: + *b = append(*b, byte(gbIntM7), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) + case n < -0x10000000000: + *b = append(*b, byte(gbIntM6), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) + case n < -0x100000000: + *b = append(*b, byte(gbIntM5), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) + case n < -0x1000000: + *b = append(*b, byte(gbIntM4), byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) + case n < -0x10000: + *b = append(*b, byte(gbIntM3), byte(n>>16), byte(n>>8), byte(n)) + case n < -0x100: + *b = append(*b, byte(gbIntM2), byte(n>>8), byte(n)) + case n < 0: + *b = append(*b, byte(gbIntM1), byte(n)) + case n <= gbIntMax: + *b = append(*b, byte(gbInt0+n)) + case n <= 0xff: + *b = append(*b, gbIntP1, byte(n)) + case n <= 0xffff: + *b = append(*b, gbIntP2, byte(n>>8), byte(n)) + case n <= 0xffffff: + *b = append(*b, gbIntP3, byte(n>>16), byte(n>>8), byte(n)) + case n <= 0xffffffff: + *b = append(*b, gbIntP4, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) + case n <= 0xffffffffff: + *b = append(*b, gbIntP5, byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) + case n <= 0xffffffffffff: + *b = append(*b, gbIntP6, byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) + case n <= 0xffffffffffffff: + *b = append(*b, gbIntP7, byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) + case n <= 0x7fffffffffffffff: + *b = append(*b, gbIntP8, byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) + } +} + +func decodeFloat(b []byte) float64 { + var u uint64 + for i, v := range b { + u |= uint64(v) << uint((i+8-len(b))*8) + } + return math.Float64frombits(u) +} + +// DecodeScalars decodes a []byte produced by EncodeScalars. +func DecodeScalars(b []byte) (scalars []interface{}, err error) { + b0 := b + for len(b) != 0 { + switch tag := b[0]; tag { + //default: + //return nil, fmt.Errorf("tag %d(%#x) not supported", b[0], b[0]) + case gbNull: + scalars = append(scalars, nil) + b = b[1:] + case gbFalse: + scalars = append(scalars, false) + b = b[1:] + case gbTrue: + scalars = append(scalars, true) + b = b[1:] + case gbFloat0: + scalars = append(scalars, 0.0) + b = b[1:] + case gbFloat1, gbFloat2, gbFloat3, gbFloat4, gbFloat5, gbFloat6, gbFloat7, gbFloat8: + n := 1 + int(tag) - gbFloat0 + if len(b) < n-1 { + goto corrupted + } + + scalars = append(scalars, decodeFloat(b[1:n])) + b = b[n:] + case gbComplex0, gbComplex1, gbComplex2, gbComplex3, gbComplex4, gbComplex5, gbComplex6, gbComplex7, gbComplex8: + n := 1 + int(tag) - gbComplex0 + if len(b) < n-1 { + goto corrupted + } + + re := decodeFloat(b[1:n]) + b = b[n:] + + if len(b) == 0 { + goto corrupted + } + + tag = b[0] + if tag < gbComplex0 || tag > gbComplex8 { + goto corrupted + } + + n = 1 + int(tag) - gbComplex0 + if len(b) < n-1 { + goto corrupted + } + + scalars = append(scalars, complex(re, decodeFloat(b[1:n]))) + b = b[n:] + case gbBytes00, gbBytes01, gbBytes02, gbBytes03, gbBytes04, + gbBytes05, gbBytes06, gbBytes07, gbBytes08, gbBytes09, + gbBytes10, gbBytes11, gbBytes12, gbBytes13, gbBytes14, + gbBytes15, gbBytes16, gbBytes17: + n := int(tag - gbBytes00) + if len(b) < n+1 { + goto corrupted + } + + scalars = append(scalars, append([]byte(nil), b[1:n+1]...)) + b = b[n+1:] + case gbBytes1: + if len(b) < 2 { + goto corrupted + } + + n := int(b[1]) + b = b[2:] + if len(b) < n { + goto corrupted + } + + scalars = append(scalars, append([]byte(nil), b[:n]...)) + b = b[n:] + case gbBytes2: + if len(b) < 3 { + goto corrupted + } + + n := int(b[1])<<8 | int(b[2]) + 1 + b = b[3:] + if len(b) < n { + goto corrupted + } + + scalars = append(scalars, append([]byte(nil), b[:n]...)) + b = b[n:] + case gbString00, gbString01, gbString02, gbString03, gbString04, + gbString05, gbString06, gbString07, gbString08, gbString09, + gbString10, gbString11, gbString12, gbString13, gbString14, + gbString15, gbString16, gbString17: + n := int(tag - gbString00) + if len(b) < n+1 { + goto corrupted + } + + scalars = append(scalars, string(b[1:n+1])) + b = b[n+1:] + case gbString1: + if len(b) < 2 { + goto corrupted + } + + n := int(b[1]) + b = b[2:] + if len(b) < n { + goto corrupted + } + + scalars = append(scalars, string(b[:n])) + b = b[n:] + case gbString2: + if len(b) < 3 { + goto corrupted + } + + n := int(b[1])<<8 | int(b[2]) + b = b[3:] + if len(b) < n { + goto corrupted + } + + scalars = append(scalars, string(b[:n])) + b = b[n:] + case gbUintP1, gbUintP2, gbUintP3, gbUintP4, gbUintP5, gbUintP6, gbUintP7, gbUintP8: + b = b[1:] + n := 1 + int(tag) - gbUintP1 + if len(b) < n { + goto corrupted + } + + var u uint64 + for _, v := range b[:n] { + u = u<<8 | uint64(v) + } + scalars = append(scalars, u) + b = b[n:] + case gbIntM8, gbIntM7, gbIntM6, gbIntM5, gbIntM4, gbIntM3, gbIntM2, gbIntM1: + b = b[1:] + n := 8 - (int(tag) - gbIntM8) + if len(b) < n { + goto corrupted + } + u := uint64(math.MaxUint64) + for _, v := range b[:n] { + u = u<<8 | uint64(v) + } + scalars = append(scalars, int64(u)) + b = b[n:] + case gbIntP1, gbIntP2, gbIntP3, gbIntP4, gbIntP5, gbIntP6, gbIntP7, gbIntP8: + b = b[1:] + n := 1 + int(tag) - gbIntP1 + if len(b) < n { + goto corrupted + } + + i := int64(0) + for _, v := range b[:n] { + i = i<<8 | int64(v) + } + scalars = append(scalars, i) + b = b[n:] + default: + scalars = append(scalars, int64(b[0])-gbInt0) + b = b[1:] + } + } + return append([]interface{}(nil), scalars...), nil + +corrupted: + return nil, &ErrDecodeScalars{append([]byte(nil), b0...), len(b0) - len(b)} +} + +func collateComplex(x, y complex128) int { + switch rx, ry := real(x), real(y); { + case rx < ry: + return -1 + case rx == ry: + switch ix, iy := imag(x), imag(y); { + case ix < iy: + return -1 + case ix == iy: + return 0 + case ix > iy: + return 1 + } + } + //case rx > ry: + return 1 +} + +func collateFloat(x, y float64) int { + switch { + case x < y: + return -1 + case x == y: + return 0 + } + //case x > y: + return 1 +} + +func collateInt(x, y int64) int { + switch { + case x < y: + return -1 + case x == y: + return 0 + } + //case x > y: + return 1 +} + +func collateUint(x, y uint64) int { + switch { + case x < y: + return -1 + case x == y: + return 0 + } + //case x > y: + return 1 +} + +func collateIntUint(x int64, y uint64) int { + if y > math.MaxInt64 { + return -1 + } + + return collateInt(x, int64(y)) +} + +func collateUintInt(x uint64, y int64) int { + return -collateIntUint(y, x) +} + +func collateType(i interface{}) (r interface{}, err error) { + switch x := i.(type) { + default: + return nil, fmt.Errorf("invalid collate type %T", x) + case nil: + return i, nil + case bool: + return i, nil + case int8: + return int64(x), nil + case int16: + return int64(x), nil + case int32: + return int64(x), nil + case int64: + return i, nil + case int: + return int64(x), nil + case uint8: + return uint64(x), nil + case uint16: + return uint64(x), nil + case uint32: + return uint64(x), nil + case uint64: + return i, nil + case uint: + return uint64(x), nil + case float32: + return float64(x), nil + case float64: + return i, nil + case complex64: + return complex128(x), nil + case complex128: + return i, nil + case []byte: + return i, nil + case string: + return i, nil + } +} + +// Collate collates two arrays of Go predeclared scalar types (and the typeless +// nil or []byte). If any other type appears in x or y, Collate will return a +// non nil error. String items are collated using strCollate or lexically +// byte-wise (as when using Go comparison operators) when strCollate is nil. +// []byte items are collated using bytes.Compare. +// +// Collate returns: +// +// -1 if x < y +// 0 if x == y +// +1 if x > y +// +// The same value as defined above must be returned from strCollate. +// +// The "outer" ordering is: nil, bool, number, []byte, string. IOW, nil is +// "smaller" than anything else except other nil, numbers collate before +// []byte, []byte collate before strings, etc. +// +// Integers and real numbers collate as expected in math. However, complex +// numbers are not ordered in Go. Here the ordering is defined: Complex numbers +// are in comparison considered first only by their real part. Iff the result +// is equality then the imaginary part is used to determine the ordering. In +// this "second order" comparing, integers and real numbers are considered as +// complex numbers with a zero imaginary part. +func Collate(x, y []interface{}, strCollate func(string, string) int) (r int, err error) { + nx, ny := len(x), len(y) + + switch { + case nx == 0 && ny != 0: + return -1, nil + case nx == 0 && ny == 0: + return 0, nil + case nx != 0 && ny == 0: + return 1, nil + } + + r = 1 + if nx > ny { + x, y, r = y, x, -r + } + + var c int + for i, xi0 := range x { + yi0 := y[i] + xi, err := collateType(xi0) + if err != nil { + return 0, err + } + + yi, err := collateType(yi0) + if err != nil { + return 0, err + } + + switch x := xi.(type) { + default: + panic(fmt.Errorf("internal error: %T", x)) + + case nil: + switch yi.(type) { + case nil: + // nop + default: + return -r, nil + } + + case bool: + switch y := yi.(type) { + case nil: + return r, nil + case bool: + switch { + case !x && y: + return -r, nil + case x == y: + // nop + case x && !y: + return r, nil + } + default: + return -r, nil + } + + case int64: + switch y := yi.(type) { + case nil, bool: + return r, nil + case int64: + c = collateInt(x, y) + case uint64: + c = collateIntUint(x, y) + case float64: + c = collateFloat(float64(x), y) + case complex128: + c = collateComplex(complex(float64(x), 0), y) + case []byte: + return -r, nil + case string: + return -r, nil + } + + if c != 0 { + return c * r, nil + } + + case uint64: + switch y := yi.(type) { + case nil, bool: + return r, nil + case int64: + c = collateUintInt(x, y) + case uint64: + c = collateUint(x, y) + case float64: + c = collateFloat(float64(x), y) + case complex128: + c = collateComplex(complex(float64(x), 0), y) + case []byte: + return -r, nil + case string: + return -r, nil + } + + if c != 0 { + return c * r, nil + } + + case float64: + switch y := yi.(type) { + case nil, bool: + return r, nil + case int64: + c = collateFloat(x, float64(y)) + case uint64: + c = collateFloat(x, float64(y)) + case float64: + c = collateFloat(x, y) + case complex128: + c = collateComplex(complex(x, 0), y) + case []byte: + return -r, nil + case string: + return -r, nil + } + + if c != 0 { + return c * r, nil + } + + case complex128: + switch y := yi.(type) { + case nil, bool: + return r, nil + case int64: + c = collateComplex(x, complex(float64(y), 0)) + case uint64: + c = collateComplex(x, complex(float64(y), 0)) + case float64: + c = collateComplex(x, complex(y, 0)) + case complex128: + c = collateComplex(x, y) + case []byte: + return -r, nil + case string: + return -r, nil + } + + if c != 0 { + return c * r, nil + } + + case []byte: + switch y := yi.(type) { + case nil, bool, int64, uint64, float64, complex128: + return r, nil + case []byte: + c = bytes.Compare(x, y) + case string: + return -r, nil + } + + if c != 0 { + return c * r, nil + } + + case string: + switch y := yi.(type) { + case nil, bool, int64, uint64, float64, complex128: + return r, nil + case []byte: + return r, nil + case string: + switch { + case strCollate != nil: + c = strCollate(x, y) + case x < y: + return -r, nil + case x == y: + c = 0 + case x > y: + return r, nil + } + } + + if c != 0 { + return c * r, nil + } + } + } + + if nx == ny { + return 0, nil + } + + return -r, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/gb_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/gb_test.go new file mode 100644 index 00000000..a9923ca5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/gb_test.go @@ -0,0 +1,364 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Utilities to encode/decode and collate Go predeclared scalar types. The +// encoding format reused the one used by the "encoding/gob" package. + +package lldb + +import ( + "bytes" + "math" + "testing" +) + +const s256 = "" + + "0123456789abcdef" + + "0123456789abcdef" + + "0123456789abcdef" + + "0123456789abcdef" + + "0123456789abcdef" + + "0123456789abcdef" + + "0123456789abcdef" + + "0123456789abcdef" + + "0123456789abcdef" + + "0123456789abcdef" + + "0123456789abcdef" + + "0123456789abcdef" + + "0123456789abcdef" + + "0123456789abcdef" + + "0123456789abcdef" + + "0123456789abcdef" + +func TestEncodeDecodeScalars(t *testing.T) { + table := []struct{ v, exp interface{} }{ + {nil, "00"}, + {false, "01"}, + {true, "02"}, + {math.Float64frombits(0), []byte{gbFloat0}}, + {17., []byte{gbFloat2, 0x31, 0x40}}, + {math.Float64frombits(0x4031320000000000), []byte{gbFloat3, 0x32, 0x31, 0x40}}, + {math.Float64frombits(0x4031323300000000), []byte{gbFloat4, 0x33, 0x32, 0x31, 0x40}}, + {math.Float64frombits(0x4031323334000000), []byte{gbFloat5, 0x34, 0x33, 0x32, 0x31, 0x40}}, + {math.Float64frombits(0x4031323334350000), []byte{gbFloat6, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}}, + {math.Float64frombits(0x4031323334353600), []byte{gbFloat7, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}}, + {math.Float64frombits(0x4031323334353637), []byte{gbFloat8, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}}, + {0 + 0i, []byte{gbComplex0, gbComplex0}}, + {17 + 17i, []byte{gbComplex2, 0x31, 0x40, gbComplex2, 0x31, 0x40}}, + {complex(math.Float64frombits(0x4041420000000000), math.Float64frombits(0x4031320000000000)), []byte{gbComplex3, 0x42, 0x41, 0x40, gbComplex3, 0x32, 0x31, 0x40}}, + {complex(math.Float64frombits(0x4041424300000000), math.Float64frombits(0x4031323300000000)), []byte{gbComplex4, 0x43, 0x42, 0x41, 0x40, gbComplex4, 0x33, 0x32, 0x31, 0x40}}, + {complex(math.Float64frombits(0x4041424344000000), math.Float64frombits(0x4031323334000000)), []byte{gbComplex5, 0x44, 0x43, 0x42, 0x41, 0x40, gbComplex5, 0x34, 0x33, 0x32, 0x31, 0x40}}, + {complex(math.Float64frombits(0x4041424344450000), math.Float64frombits(0x4031323334350000)), []byte{gbComplex6, 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, gbComplex6, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}}, + {complex(math.Float64frombits(0x4041424344454600), math.Float64frombits(0x4031323334353600)), []byte{gbComplex7, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, gbComplex7, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}}, + {complex(math.Float64frombits(0x4041424344454647), math.Float64frombits(0x4031323334353637)), []byte{gbComplex8, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, gbComplex8, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}}, + {[]byte(""), []byte{gbBytes00}}, + {[]byte("f"), []byte{gbBytes01, 'f'}}, + {[]byte("fo"), []byte{gbBytes02, 'f', 'o'}}, + {[]byte("0123456789abcdefx"), []byte{gbBytes17, '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'x'}}, + {[]byte("0123456789abcdefxy"), []byte{gbBytes1, 18, '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'x', 'y'}}, + {[]byte(s256[:255]), append([]byte{gbBytes1, 0xff}, []byte(s256[:255])...)}, + {[]byte(s256), append([]byte{gbBytes2, 0x00, 0xff}, []byte(s256)...)}, + {"", []byte{gbString00}}, + {"f", []byte{gbString01, 'f'}}, + {"fo", []byte{gbString02, 'f', 'o'}}, + {"0123456789abcdefx", []byte{gbString17, '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'x'}}, + {"0123456789abcdefxy", []byte{gbString1, 18, '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'x', 'y'}}, + {s256[:255], append([]byte{gbString1, 0xff}, []byte(s256[:255])...)}, + {s256, append([]byte{gbString2, 0x01, 0x00}, []byte(s256)...)}, + {uint64(0xff), []byte{gbUintP1, 255}}, + {uint64(0xffff), []byte{gbUintP2, 255, 255}}, + {uint64(0xffffff), []byte{gbUintP3, 255, 255, 255}}, + {uint64(0xffffffff), []byte{gbUintP4, 255, 255, 255, 255}}, + {uint64(0xffffffffff), []byte{gbUintP5, 255, 255, 255, 255, 255}}, + {uint64(0xffffffffffff), []byte{gbUintP6, 255, 255, 255, 255, 255, 255}}, + {uint64(0xffffffffffffff), []byte{gbUintP7, 255, 255, 255, 255, 255, 255, 255}}, + {uint64(0xffffffffffffffff), []byte{gbUintP8, 255, 255, 255, 255, 255, 255, 255, 255}}, + {int64(math.MinInt64), []byte{gbIntM8, 128, 0, 0, 0, 0, 0, 0, 0}}, + {-int64(0x100000000000000), []byte{gbIntM7, 0, 0, 0, 0, 0, 0, 0}}, + {-int64(0x1000000000000), []byte{gbIntM6, 0, 0, 0, 0, 0, 0}}, + {-int64(0x10000000000), []byte{gbIntM5, 0, 0, 0, 0, 0}}, + {-int64(0x100000000), []byte{gbIntM4, 0, 0, 0, 0}}, + {-int64(0x1000000), []byte{gbIntM3, 0, 0, 0}}, + {-int64(0x10000), []byte{gbIntM2, 0, 0}}, + {-int64(0x100), []byte{gbIntM1, 0}}, + {-int64(0xff), []byte{gbIntM1, 1}}, + {-int64(1), []byte{gbIntM1, 255}}, + {int64(gbIntMax + 1), []byte{gbIntP1, gbIntMax + 1}}, + {int64(0xff), []byte{gbIntP1, 255}}, + {int64(0xffff), []byte{gbIntP2, 255, 255}}, + {int64(0xffffff), []byte{gbIntP3, 255, 255, 255}}, + {int64(0xffffffff), []byte{gbIntP4, 255, 255, 255, 255}}, + {int64(0xffffffffff), []byte{gbIntP5, 255, 255, 255, 255, 255}}, + {int64(0xffffffffffff), []byte{gbIntP6, 255, 255, 255, 255, 255, 255}}, + {int64(0xffffffffffffff), []byte{gbIntP7, 255, 255, 255, 255, 255, 255, 255}}, + {int64(0x7fffffffffffffff), []byte{gbIntP8, 127, 255, 255, 255, 255, 255, 255, 255}}, + {int64(0), []byte{0 + gbInt0}}, + {int64(1), []byte{1 + gbInt0}}, + {int64(2), []byte{2 + gbInt0}}, + {int64(gbIntMax - 2), "fd"}, + {int64(gbIntMax - 1), "fe"}, + {int64(gbIntMax), "ff"}, + } + + for i, v := range table { + g, err := EncodeScalars(v.v) + if err != nil { + t.Fatal(i, err) + } + + var e []byte + switch x := v.exp.(type) { + case string: + e = s2b(x) + case []byte: + e = x + } + + if !bytes.Equal(g, e) { + t.Fatalf("%d %v\n|% 02x|\n|% 02x|", i, v.v, g, e) + } + + t.Logf("%#v |% 02x|", v.v, g) + + dec, err := DecodeScalars(g) + if err != nil { + t.Fatal(err) + } + + if g, e := len(dec), 1; g != e { + t.Fatalf("%d %d %#v", g, e, dec) + } + + if g, ok := dec[0].([]byte); ok { + if e := v.v.([]byte); !bytes.Equal(g, e) { + t.Fatal(g, e) + } + + continue + } + + if g, e := dec[0], v.v; g != e { + t.Fatal(g, e) + } + } +} + +func strcmp(a, b string) (r int) { + if a < b { + return -1 + } + + if a == b { + return 0 + } + + return 1 +} + +func TestCollateScalars(t *testing.T) { + // all cases must return -1 + table := []struct{ x, y []interface{} }{ + {[]interface{}{}, []interface{}{1}}, + {[]interface{}{1}, []interface{}{2}}, + {[]interface{}{1, 2}, []interface{}{2, 3}}, + + {[]interface{}{nil}, []interface{}{nil, true}}, + {[]interface{}{nil}, []interface{}{false}}, + {[]interface{}{nil}, []interface{}{nil, 1}}, + {[]interface{}{nil}, []interface{}{1}}, + {[]interface{}{nil}, []interface{}{nil, uint(1)}}, + {[]interface{}{nil}, []interface{}{uint(1)}}, + {[]interface{}{nil}, []interface{}{nil, 3.14}}, + {[]interface{}{nil}, []interface{}{3.14}}, + {[]interface{}{nil}, []interface{}{nil, 3.14 + 1i}}, + {[]interface{}{nil}, []interface{}{3.14 + 1i}}, + {[]interface{}{nil}, []interface{}{nil, []byte("foo")}}, + {[]interface{}{nil}, []interface{}{[]byte("foo")}}, + {[]interface{}{nil}, []interface{}{nil, "foo"}}, + {[]interface{}{nil}, []interface{}{"foo"}}, + + {[]interface{}{false}, []interface{}{false, false}}, + {[]interface{}{false}, []interface{}{false, true}}, + {[]interface{}{false}, []interface{}{true}}, + {[]interface{}{false}, []interface{}{false, 1}}, + {[]interface{}{false}, []interface{}{1}}, + {[]interface{}{false}, []interface{}{false, uint(1)}}, + {[]interface{}{false}, []interface{}{uint(1)}}, + {[]interface{}{false}, []interface{}{false, 1.5}}, + {[]interface{}{false}, []interface{}{1.5}}, + {[]interface{}{false}, []interface{}{false, 1.5 + 3i}}, + {[]interface{}{false}, []interface{}{1.5 + 3i}}, + {[]interface{}{false}, []interface{}{false, []byte("foo")}}, + {[]interface{}{false}, []interface{}{[]byte("foo")}}, + {[]interface{}{false}, []interface{}{false, "foo"}}, + {[]interface{}{false}, []interface{}{"foo"}}, + + {[]interface{}{1}, []interface{}{1, 2}}, + {[]interface{}{1}, []interface{}{1, 1}}, + {[]interface{}{1}, []interface{}{1, uint(2)}}, + {[]interface{}{1}, []interface{}{uint(2)}}, + {[]interface{}{1}, []interface{}{1, 1.1}}, + {[]interface{}{1}, []interface{}{1.1}}, + {[]interface{}{1}, []interface{}{1, 1.1 + 2i}}, + {[]interface{}{1}, []interface{}{1.1 + 2i}}, + {[]interface{}{1}, []interface{}{1, []byte("foo")}}, + {[]interface{}{1}, []interface{}{[]byte("foo")}}, + {[]interface{}{1}, []interface{}{1, "foo"}}, + {[]interface{}{1}, []interface{}{"foo"}}, + + {[]interface{}{uint(1)}, []interface{}{uint(1), uint(1)}}, + {[]interface{}{uint(1)}, []interface{}{uint(2)}}, + {[]interface{}{uint(1)}, []interface{}{uint(1), 2.}}, + {[]interface{}{uint(1)}, []interface{}{2.}}, + {[]interface{}{uint(1)}, []interface{}{uint(1), 2. + 0i}}, + {[]interface{}{uint(1)}, []interface{}{2. + 0i}}, + {[]interface{}{uint(1)}, []interface{}{uint(1), []byte("foo")}}, + {[]interface{}{uint(1)}, []interface{}{[]byte("foo")}}, + {[]interface{}{uint(1)}, []interface{}{uint(1), "foo"}}, + {[]interface{}{uint(1)}, []interface{}{"foo"}}, + + {[]interface{}{1.}, []interface{}{1., 1}}, + {[]interface{}{1.}, []interface{}{2}}, + {[]interface{}{1.}, []interface{}{1., uint(1)}}, + {[]interface{}{1.}, []interface{}{uint(2)}}, + {[]interface{}{1.}, []interface{}{1., 1.}}, + {[]interface{}{1.}, []interface{}{1.1}}, + {[]interface{}{1.}, []interface{}{1., []byte("foo")}}, + {[]interface{}{1.}, []interface{}{[]byte("foo")}}, + {[]interface{}{1.}, []interface{}{1., "foo"}}, + {[]interface{}{1.}, []interface{}{"foo"}}, + + {[]interface{}{1 + 2i}, []interface{}{1 + 2i, 1}}, + {[]interface{}{1 + 2i}, []interface{}{2}}, + {[]interface{}{1 + 2i}, []interface{}{1 + 2i, uint(1)}}, + {[]interface{}{1 + 2i}, []interface{}{uint(2)}}, + {[]interface{}{1 + 2i}, []interface{}{1 + 2i, 1.1}}, + {[]interface{}{1 + 2i}, []interface{}{1.1}}, + {[]interface{}{1 + 2i}, []interface{}{1 + 2i, []byte("foo")}}, + {[]interface{}{1 + 2i}, []interface{}{[]byte("foo")}}, + {[]interface{}{1 + 2i}, []interface{}{1 + 2i, "foo"}}, + {[]interface{}{1 + 2i}, []interface{}{"foo"}}, + + {[]interface{}{[]byte("bar")}, []interface{}{[]byte("bar"), []byte("bar")}}, + {[]interface{}{[]byte("bar")}, []interface{}{[]byte("foo")}}, + {[]interface{}{[]byte("bar")}, []interface{}{[]byte("c")}}, + {[]interface{}{[]byte("bar")}, []interface{}{[]byte("bas")}}, + {[]interface{}{[]byte("bar")}, []interface{}{[]byte("bara")}}, + + {[]interface{}{[]byte("bar")}, []interface{}{"bap"}}, + {[]interface{}{[]byte("bar")}, []interface{}{"bar"}}, + {[]interface{}{[]byte("bar")}, []interface{}{"bas"}}, + + {[]interface{}{"bar"}, []interface{}{"bar", "bar"}}, + {[]interface{}{"bar"}, []interface{}{"foo"}}, + {[]interface{}{"bar"}, []interface{}{"c"}}, + {[]interface{}{"bar"}, []interface{}{"bas"}}, + {[]interface{}{"bar"}, []interface{}{"bara"}}, + + {[]interface{}{1 + 2i}, []interface{}{1 + 3i}}, + {[]interface{}{int64(math.MaxInt64)}, []interface{}{uint64(math.MaxInt64 + 1)}}, + {[]interface{}{int8(1)}, []interface{}{int16(2)}}, + {[]interface{}{int32(1)}, []interface{}{uint8(2)}}, + {[]interface{}{uint16(1)}, []interface{}{uint32(2)}}, + {[]interface{}{float32(1)}, []interface{}{complex(float32(2), 0)}}, + + // resolved bugs + {[]interface{}{"Customer"}, []interface{}{"Date"}}, + {[]interface{}{"Customer"}, []interface{}{"Items", 1, "Quantity"}}, + } + + more := []interface{}{42, nil, 1, uint(2), 3.0, 4 + 5i, "..."} + + collate := func(x, y []interface{}, strCollate func(string, string) int) (r int) { + var err error + r, err = Collate(x, y, strCollate) + if err != nil { + t.Fatal(err) + } + + return + } + + for _, scf := range []func(string, string) int{nil, strcmp} { + for _, prefix := range more { + for i, test := range table { + var x, y []interface{} + if prefix != 42 { + x = append(x, prefix) + y = append(y, prefix) + } + x = append(x, test.x...) + y = append(y, test.y...) + + // cmp(x, y) == -1 + if g, e := collate(x, y, scf), -1; g != e { + t.Fatal(i, g, e, x, y) + } + + // cmp(y, x) == 1 + if g, e := collate(y, x, scf), 1; g != e { + t.Fatal(i, g, e, y, x) + } + + src := x + for ix := len(src) - 1; ix > 0; ix-- { + if g, e := collate(src[:ix], src[:ix], scf), 0; g != e { + t.Fatal(ix, g, e) + } + + if g, e := collate(src[:ix], src, scf), -1; g != e { + t.Fatal(ix, g, e) + } + + } + + src = y + for ix := len(src) - 1; ix > 0; ix-- { + if g, e := collate(src[:ix], src[:ix], scf), 0; g != e { + t.Fatal(ix, g, e) + } + + if g, e := collate(src[:ix], src, scf), -1; g != e { + t.Fatal(ix, g, e) + } + + } + } + } + } +} + +func TestEncodingBug(t *testing.T) { + bits := uint64(0) + for i := 0; i <= 64; i++ { + encoded, err := EncodeScalars(math.Float64frombits(bits)) + if err != nil { + t.Fatal(err) + } + + t.Logf("bits %016x, enc |% x|", bits, encoded) + decoded, err := DecodeScalars(encoded) + if err != nil { + t.Fatal(err) + } + + if g, e := len(decoded), 1; g != e { + t.Fatal(g, e) + } + + f, ok := decoded[0].(float64) + if !ok { + t.Fatal(err) + } + + if g, e := math.Float64bits(f), bits; g != e { + t.Fatal(err) + } + + t.Log(f) + + bits >>= 1 + bits |= 1 << 63 + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/lldb.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/lldb.go new file mode 100644 index 00000000..8f77ec8a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/lldb.go @@ -0,0 +1,155 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lldb (WIP) implements a low level database engine. The database +// model used could be considered a specific implementation of some small(est) +// intersection of models listed in [1]. As a settled term is lacking, it'll be +// called here a 'Virtual memory model' (VMM). +// +// Experimental release notes +// +// This is an experimental release. Don't open a DB from two applications or +// two instances of an application - it will get corrupted (no file locking is +// implemented and this task is delegated to lldb's clients). +// +// WARNING: THE LLDB API IS SUBJECT TO CHANGE. +// +// Filers +// +// A Filer is an abstraction of storage. A Filer may be a part of some process' +// virtual address space, an OS file, a networked, remote file etc. Persistence +// of the storage is optional, opaque to VMM and it is specific to a concrete +// Filer implementation. +// +// Space management +// +// Mechanism to allocate, reallocate (resize), deallocate (and later reclaim +// the unused) contiguous parts of a Filer, called blocks. Blocks are +// identified and referred to by a handle, an int64. +// +// BTrees +// +// In addition to the VMM like services, lldb provides volatile and +// non-volatile BTrees. Keys and values of a BTree are limited in size to 64kB +// each (a bit more actually). Support for larger keys/values, if desired, can +// be built atop a BTree to certain limits. +// +// Handles vs pointers +// +// A handle is the abstracted storage counterpart of a memory address. There +// is one fundamental difference, though. Resizing a block never results in a +// change to the handle which refers to the resized block, so a handle is more +// akin to an unique numeric id/key. Yet it shares one property of pointers - +// handles can be associated again with blocks after the original handle block +// was deallocated. In other words, a handle uniqueness domain is the state of +// the database and is not something comparable to e.g. an ever growing +// numbering sequence. +// +// Also, as with memory pointers, dangling handles can be created and blocks +// overwritten when such handles are used. Using a zero handle to refer to a +// block will not panic; however, the resulting error is effectively the same +// exceptional situation as dereferencing a nil pointer. +// +// Blocks +// +// Allocated/used blocks, are limited in size to only a little bit more than +// 64kB. Bigger semantic entities/structures must be built in lldb's client +// code. The content of a block has no semantics attached, it's only a fully +// opaque `[]byte`. +// +// Scalars +// +// Use of "scalars" applies to EncodeScalars, DecodeScalars and Collate. Those +// first two "to bytes" and "from bytes" functions are suggested for handling +// multi-valued Allocator content items and/or keys/values of BTrees (using +// Collate for keys). Types called "scalar" are: +// +// nil (the typeless one) +// bool +// all integral types: [u]int8, [u]int16, [u]int32, [u]int, [u]int64 +// all floating point types: float32, float64 +// all complex types: complex64, complex128 +// []byte (64kB max) +// string (64kb max) +// +// Specific implementations +// +// Included are concrete implementations of some of the VMM interfaces included +// to ease serving simple client code or for testing and possibly as an +// example. More details in the documentation of such implementations. +// +// [1]: http://en.wikipedia.org/wiki/Database_model +package lldb + +const ( + fltSz = 0x70 // size of the FLT + maxShort = 251 + maxRq = 65787 + maxFLTRq = 4112 + maxHandle = 1<<56 - 1 + atomLen = 16 + tagUsedLong = 0xfc + tagUsedRelocated = 0xfd + tagFreeShort = 0xfe + tagFreeLong = 0xff + tagNotCompressed = 0 + tagCompressed = 1 +) + +// Content size n -> blocksize in atoms. +func n2atoms(n int) int { + if n > maxShort { + n += 2 + } + return (n+1)/16 + 1 +} + +// Content size n -> number of padding zeros. +func n2padding(n int) int { + if n > maxShort { + n += 2 + } + return 15 - (n+1)&15 +} + +// Handle <-> offset +func h2off(h int64) int64 { return (h + 6) * 16 } +func off2h(off int64) int64 { return off/16 - 6 } + +// Get a 7B int64 from b +func b2h(b []byte) (h int64) { + for _, v := range b[:7] { + h = h<<8 | int64(v) + } + return +} + +// Put a 7B int64 into b +func h2b(b []byte, h int64) []byte { + for i := range b[:7] { + b[i], h = byte(h>>48), h<<8 + } + return b +} + +// Content length N (must be in [252, 65787]) to long used block M field. +func n2m(n int) (m int) { + return n % 0x10000 +} + +// Long used block M (must be in [0, 65535]) field to content length N. +func m2n(m int) (n int) { + if m <= maxShort { + m += 0x10000 + } + return m +} + +func bpack(a []byte) []byte { + if cap(a) > len(a) { + return append([]byte(nil), a...) + } + + return a +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/lldb_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/lldb_test.go new file mode 100644 index 00000000..408125a5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/lldb_test.go @@ -0,0 +1,217 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lldb + +import ( + "bytes" + "encoding/hex" + "fmt" + "path" + "runtime" + "strings" + "testing" +) + +var dbg = func(s string, va ...interface{}) { + _, fn, fl, _ := runtime.Caller(1) + fmt.Printf("%s:%d: ", path.Base(fn), fl) + fmt.Printf(s+"\n", va...) +} + +func use(...interface{}) {} + +func TestN2Atoms(t *testing.T) { + tab := []struct{ n, a int }{ + {0, 1}, + {1, 1}, + {2, 1}, + {3, 1}, + {4, 1}, + {5, 1}, + {6, 1}, + {7, 1}, + {8, 1}, + {9, 1}, + {10, 1}, + {11, 1}, + {12, 1}, + {13, 1}, + {14, 1}, + + {15, 2}, + {16, 2}, + {17, 2}, + {18, 2}, + {19, 2}, + {20, 2}, + {21, 2}, + {22, 2}, + {23, 2}, + {24, 2}, + {25, 2}, + {26, 2}, + {27, 2}, + {28, 2}, + {29, 2}, + {30, 2}, + + {31, 3}, + + {252, 16}, + {253, 17}, + {254, 17}, + {255, 17}, + {256, 17}, + {257, 17}, + {258, 17}, + {259, 17}, + {260, 17}, + {261, 17}, + {262, 17}, + {263, 17}, + {264, 17}, + {265, 17}, + {266, 17}, + {267, 17}, + {268, 17}, + {269, 18}, + {65532, 4096}, + {65533, 4097}, + {65787, 4112}, + } + + for i, test := range tab { + if g, e := n2atoms(test.n), test.a; g != e { + t.Errorf("(%d) %d %d %d", i, test.n, g, e) + } + } +} + +func TestN2Padding(t *testing.T) { + tab := []struct{ n, p int }{ + {0, 14}, + {1, 13}, + {2, 12}, + {3, 11}, + {4, 10}, + {5, 9}, + {6, 8}, + {7, 7}, + {8, 6}, + {9, 5}, + {10, 4}, + {11, 3}, + {12, 2}, + {13, 1}, + {14, 0}, + + {15, 15}, + {16, 14}, + {17, 13}, + {18, 12}, + {19, 11}, + {20, 10}, + {21, 9}, + {22, 8}, + {23, 7}, + {24, 6}, + {25, 5}, + {26, 4}, + {27, 3}, + {28, 2}, + {29, 1}, + {30, 0}, + + {31, 15}, + + {252, 0}, + {253, 15}, + {254, 14}, + {255, 13}, + {256, 12}, + {257, 11}, + {258, 10}, + {259, 9}, + {260, 8}, + {261, 7}, + {262, 6}, + {263, 5}, + {264, 4}, + {265, 3}, + {266, 2}, + {267, 1}, + {268, 0}, + {269, 15}, + } + + for i, test := range tab { + if g, e := n2padding(test.n), test.p; g != e { + t.Errorf("(%d) %d %d %d", i, test.n, g, e) + } + } +} + +func TestH2Off(t *testing.T) { + tab := []struct{ h, off int64 }{ + {-1, fltSz - 32}, + {0, fltSz - 16}, + {1, fltSz + 0}, + {2, fltSz + 16}, + {3, fltSz + 32}, + } + + for i, test := range tab { + if g, e := h2off(test.h), test.off; g != e { + t.Error("h2off", i, g, e) + } + if g, e := off2h(test.off), test.h; g != e { + t.Error("off2h", i, g, e) + } + } +} + +func TestB2H(t *testing.T) { + tab := []struct { + b []byte + h int64 + }{ + {[]byte{0, 0, 0, 0, 0, 0, 0}, 0}, + {[]byte{0, 0, 0, 0, 0, 0, 1}, 1}, + {[]byte{0, 0, 0, 0, 0, 0, 1, 2}, 1}, + {[]byte{0, 0, 0, 0, 0, 0x32, 0x10}, 0x3210}, + {[]byte{0, 0, 0, 0, 0x54, 0x32, 0x10}, 0x543210}, + {[]byte{0, 0, 0, 0x76, 0x54, 0x32, 0x10}, 0x76543210}, + {[]byte{0, 0, 0x98, 0x76, 0x54, 0x32, 0x10}, 0x9876543210}, + {[]byte{0, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10}, 0xba9876543210}, + {[]byte{0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10}, 0xdcba9876543210}, + } + + for i, test := range tab { + if g, e := b2h(test.b), test.h; g != e { + t.Errorf("b2h: %d %#8x %#8x", i, g, e) + } + var g [7]byte + h2b(g[:], test.h) + if e := test.b; !bytes.Equal(g[:], e[:7]) { + t.Errorf("b2h: %d g: % 0x e: % 0x", i, g, e) + } + } +} + +func s2b(s string) []byte { + if s == "" { + return nil + } + + s = strings.Replace(s, " ", "", -1) + if n := len(s) & 1; n != 0 { + panic(n) + } + b, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return b +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/memfiler.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/memfiler.go new file mode 100644 index 00000000..fe8c4da3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/memfiler.go @@ -0,0 +1,344 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// A memory-only implementation of Filer. + +/* + +pgBits: 8 +BenchmarkMemFilerWrSeq 100000 19430 ns/op 1646.93 MB/s +BenchmarkMemFilerRdSeq 100000 17390 ns/op 1840.13 MB/s +BenchmarkMemFilerWrRand 1000000 1903 ns/op 133.94 MB/s +BenchmarkMemFilerRdRand 1000000 1153 ns/op 221.16 MB/s + +pgBits: 9 +BenchmarkMemFilerWrSeq 100000 16195 ns/op 1975.80 MB/s +BenchmarkMemFilerRdSeq 200000 13011 ns/op 2459.39 MB/s +BenchmarkMemFilerWrRand 1000000 2248 ns/op 227.28 MB/s +BenchmarkMemFilerRdRand 1000000 1177 ns/op 433.94 MB/s + +pgBits: 10 +BenchmarkMemFilerWrSeq 100000 16169 ns/op 1979.04 MB/s +BenchmarkMemFilerRdSeq 200000 12673 ns/op 2524.91 MB/s +BenchmarkMemFilerWrRand 1000000 5550 ns/op 184.30 MB/s +BenchmarkMemFilerRdRand 1000000 1699 ns/op 601.79 MB/s + +pgBits: 11 +BenchmarkMemFilerWrSeq 100000 13449 ns/op 2379.31 MB/s +BenchmarkMemFilerRdSeq 200000 12058 ns/op 2653.80 MB/s +BenchmarkMemFilerWrRand 500000 4335 ns/op 471.47 MB/s +BenchmarkMemFilerRdRand 1000000 2843 ns/op 719.47 MB/s + +pgBits: 12 +BenchmarkMemFilerWrSeq 200000 11976 ns/op 2672.00 MB/s +BenchmarkMemFilerRdSeq 200000 12255 ns/op 2611.06 MB/s +BenchmarkMemFilerWrRand 200000 8058 ns/op 507.14 MB/s +BenchmarkMemFilerRdRand 500000 4365 ns/op 936.15 MB/s + +pgBits: 13 +BenchmarkMemFilerWrSeq 200000 10852 ns/op 2948.69 MB/s +BenchmarkMemFilerRdSeq 200000 11561 ns/op 2767.77 MB/s +BenchmarkMemFilerWrRand 200000 9748 ns/op 840.15 MB/s +BenchmarkMemFilerRdRand 500000 7236 ns/op 1131.59 MB/s + +pgBits: 14 +BenchmarkMemFilerWrSeq 200000 10328 ns/op 3098.12 MB/s +BenchmarkMemFilerRdSeq 200000 11292 ns/op 2833.66 MB/s +BenchmarkMemFilerWrRand 100000 16768 ns/op 978.75 MB/s +BenchmarkMemFilerRdRand 200000 13033 ns/op 1258.43 MB/s + +pgBits: 15 +BenchmarkMemFilerWrSeq 200000 10309 ns/op 3103.93 MB/s +BenchmarkMemFilerRdSeq 200000 11126 ns/op 2876.12 MB/s +BenchmarkMemFilerWrRand 50000 31985 ns/op 1021.74 MB/s +BenchmarkMemFilerRdRand 100000 25217 ns/op 1297.65 MB/s + +pgBits: 16 +BenchmarkMemFilerWrSeq 200000 10324 ns/op 3099.45 MB/s +BenchmarkMemFilerRdSeq 200000 11201 ns/op 2856.80 MB/s +BenchmarkMemFilerWrRand 20000 55226 ns/op 1184.76 MB/s +BenchmarkMemFilerRdRand 50000 48316 ns/op 1355.16 MB/s + +pgBits: 17 +BenchmarkMemFilerWrSeq 200000 10377 ns/op 3083.53 MB/s +BenchmarkMemFilerRdSeq 200000 11018 ns/op 2904.18 MB/s +BenchmarkMemFilerWrRand 10000 143425 ns/op 913.12 MB/s +BenchmarkMemFilerRdRand 20000 95267 ns/op 1376.99 MB/s + +pgBits: 18 +BenchmarkMemFilerWrSeq 200000 10312 ns/op 3102.96 MB/s +BenchmarkMemFilerRdSeq 200000 11069 ns/op 2890.84 MB/s +BenchmarkMemFilerWrRand 5000 280910 ns/op 934.14 MB/s +BenchmarkMemFilerRdRand 10000 188500 ns/op 1388.17 MB/s + +*/ + +package lldb + +import ( + "bytes" + "fmt" + "io" + + "camlistore.org/third_party/github.com/cznic/fileutil" + "camlistore.org/third_party/github.com/cznic/mathutil" +) + +const ( + pgBits = 16 + pgSize = 1 << pgBits + pgMask = pgSize - 1 +) + +var _ Filer = &MemFiler{} // Ensure MemFiler is a Filer. + +type memFilerMap map[int64]*[pgSize]byte + +// MemFiler is a memory backed Filer. It implements BeginUpdate, EndUpdate and +// Rollback as no-ops. MemFiler is not automatically persistent, but it has +// ReadFrom and WriteTo methods. +type MemFiler struct { + m memFilerMap + nest int + size int64 +} + +// NewMemFiler returns a new MemFiler. +func NewMemFiler() *MemFiler { + return &MemFiler{m: memFilerMap{}} +} + +// BeginUpdate implements Filer. +func (f *MemFiler) BeginUpdate() error { + f.nest++ + return nil +} + +// Close implements Filer. +func (f *MemFiler) Close() (err error) { + if f.nest != 0 { + return &ErrPERM{(f.Name() + ":Close")} + } + + return +} + +// EndUpdate implements Filer. +func (f *MemFiler) EndUpdate() (err error) { + if f.nest == 0 { + return &ErrPERM{(f.Name() + ": EndUpdate")} + } + + f.nest-- + return +} + +// Name implements Filer. +func (f *MemFiler) Name() string { + return fmt.Sprintf("%p.memfiler", f) +} + +// PunchHole implements Filer. +func (f *MemFiler) PunchHole(off, size int64) (err error) { + if off < 0 { + return &ErrINVAL{f.Name() + ": PunchHole off", off} + } + + if size < 0 || off+size > f.size { + return &ErrINVAL{f.Name() + ": PunchHole size", size} + } + + first := off >> pgBits + if off&pgMask != 0 { + first++ + } + off += size - 1 + last := off >> pgBits + if off&pgMask != 0 { + last-- + } + if limit := f.size >> pgBits; last > limit { + last = limit + } + for pg := first; pg <= last; pg++ { + delete(f.m, pg) + } + return +} + +var zeroPage [pgSize]byte + +// ReadAt implements Filer. +func (f *MemFiler) ReadAt(b []byte, off int64) (n int, err error) { + avail := f.size - off + pgI := off >> pgBits + pgO := int(off & pgMask) + rem := len(b) + if int64(rem) >= avail { + rem = int(avail) + err = io.EOF + } + for rem != 0 && avail > 0 { + pg := f.m[pgI] + if pg == nil { + pg = &zeroPage + } + nc := copy(b[:mathutil.Min(rem, pgSize)], pg[pgO:]) + pgI++ + pgO = 0 + rem -= nc + n += nc + b = b[nc:] + } + return +} + +// ReadFrom is a helper to populate MemFiler's content from r. 'n' reports the +// number of bytes read from 'r'. +func (f *MemFiler) ReadFrom(r io.Reader) (n int64, err error) { + if err = f.Truncate(0); err != nil { + return + } + + var ( + b [pgSize]byte + rn int + off int64 + ) + + var rerr error + for rerr == nil { + if rn, rerr = r.Read(b[:]); rn != 0 { + f.WriteAt(b[:rn], off) + off += int64(rn) + n += int64(rn) + } + } + if !fileutil.IsEOF(rerr) { + err = rerr + } + return +} + +// Rollback implements Filer. +func (f *MemFiler) Rollback() (err error) { return } + +// Size implements Filer. +func (f *MemFiler) Size() (int64, error) { + return f.size, nil +} + +// Sync implements Filer. +func (f *MemFiler) Sync() error { + return nil +} + +// Truncate implements Filer. +func (f *MemFiler) Truncate(size int64) (err error) { + switch { + case size < 0: + return &ErrINVAL{"Truncate size", size} + case size == 0: + f.m = memFilerMap{} + f.size = 0 + return + } + + first := size >> pgBits + if size&pgMask != 0 { + first++ + } + last := f.size >> pgBits + if f.size&pgMask != 0 { + last++ + } + for ; first < last; first++ { + delete(f.m, first) + } + + f.size = size + return +} + +// WriteAt implements Filer. +func (f *MemFiler) WriteAt(b []byte, off int64) (n int, err error) { + pgI := off >> pgBits + pgO := int(off & pgMask) + n = len(b) + rem := n + var nc int + for rem != 0 { + if pgO == 0 && rem >= pgSize && bytes.Equal(b[:pgSize], zeroPage[:]) { + delete(f.m, pgI) + nc = pgSize + } else { + pg := f.m[pgI] + if pg == nil { + pg = new([pgSize]byte) + f.m[pgI] = pg + } + nc = copy((*pg)[pgO:], b) + } + pgI++ + pgO = 0 + rem -= nc + b = b[nc:] + } + f.size = mathutil.MaxInt64(f.size, off+int64(n)) + return +} + +// WriteTo is a helper to copy/persist MemFiler's content to w. If w is also +// an io.WriterAt then WriteTo may attempt to _not_ write any big, for some +// value of big, runs of zeros, i.e. it will attempt to punch holes, where +// possible, in `w` if that happens to be a freshly created or to zero length +// truncated OS file. 'n' reports the number of bytes written to 'w'. +func (f *MemFiler) WriteTo(w io.Writer) (n int64, err error) { + var ( + b [pgSize]byte + wn, rn int + off int64 + rerr error + ) + + if wa, ok := w.(io.WriterAt); ok { + lastPgI := f.size >> pgBits + for pgI := int64(0); pgI <= lastPgI; pgI++ { + sz := pgSize + if pgI == lastPgI { + sz = int(f.size & pgMask) + } + pg := f.m[pgI] + if pg != nil { + wn, err = wa.WriteAt(pg[:sz], off) + if err != nil { + return + } + + n += int64(wn) + off += int64(sz) + if wn != sz { + return n, io.ErrShortWrite + } + } + } + return + } + + var werr error + for rerr == nil { + if rn, rerr = f.ReadAt(b[:], off); rn != 0 { + off += int64(rn) + if wn, werr = w.Write(b[:rn]); werr != nil { + return n, werr + } + + n += int64(wn) + } + } + if !fileutil.IsEOF(rerr) { + err = rerr + } + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/memfiler_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/memfiler_test.go new file mode 100644 index 00000000..319f4bbb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/memfiler_test.go @@ -0,0 +1,132 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lldb + +import ( + "bytes" + "math/rand" + "testing" +) + +// Test automatic page releasing (hole punching) of zero pages +func TestMemFilerWriteAt(t *testing.T) { + f := NewMemFiler() + + // Add page index 0 + if _, err := f.WriteAt([]byte{1}, 0); err != nil { + t.Fatal(err) + } + + if g, e := len(f.m), 1; g != e { + t.Fatal(g, e) + } + + // Add page index 1 + if _, err := f.WriteAt([]byte{2}, pgSize); err != nil { + t.Fatal(err) + } + + if g, e := len(f.m), 2; g != e { + t.Fatal(g, e) + } + + // Add page index 2 + if _, err := f.WriteAt([]byte{3}, 2*pgSize); err != nil { + t.Fatal(err) + } + + if g, e := len(f.m), 3; g != e { + t.Fatal(g, e) + } + + // Remove page index 1 + if _, err := f.WriteAt(make([]byte, 2*pgSize), pgSize/2); err != nil { + t.Fatal(err) + } + + if g, e := len(f.m), 2; g != e { + t.Logf("%#v", f.m) + t.Fatal(g, e) + } + + if err := f.Truncate(1); err != nil { + t.Fatal(err) + } + + if g, e := len(f.m), 1; g != e { + t.Logf("%#v", f.m) + t.Fatal(g, e) + } + + if err := f.Truncate(0); err != nil { + t.Fatal(err) + } + + if g, e := len(f.m), 0; g != e { + t.Logf("%#v", f.m) + t.Fatal(g, e) + } +} + +func TestMemFilerWriteTo(t *testing.T) { + const max = 1e5 + var b [max]byte + rng := rand.New(rand.NewSource(42)) + for sz := 0; sz < 1e5; sz += 2053 { + for i := range b[:sz] { + b[i] = byte(rng.Int()) + } + f := NewMemFiler() + if n, err := f.WriteAt(b[:sz], 0); n != sz || err != nil { + t.Fatal(n, err) + } + + var buf bytes.Buffer + if n, err := f.WriteTo(&buf); n != int64(sz) || err != nil { + t.Fatal(n, err) + } + + if !bytes.Equal(b[:sz], buf.Bytes()) { + t.Fatal("content differs") + } + } +} + +func TestMemFilerReadFromWriteTo(t *testing.T) { + const ( + sz = 1e2 * pgSize + hole = 1e1 * pgSize + ) + rng := rand.New(rand.NewSource(42)) + data := make([]byte, sz) + for i := range data { + data[i] = byte(rng.Int()) + } + f := NewMemFiler() + buf := bytes.NewBuffer(data) + if n, err := f.ReadFrom(buf); n != int64(len(data)) || err != nil { + t.Fatal(n, err) + } + + buf = bytes.NewBuffer(nil) + if n, err := f.WriteTo(buf); n != int64(len(data)) || err != nil { + t.Fatal(n, err) + } + + rd := buf.Bytes() + if !bytes.Equal(data, rd) { + t.Fatal("corrupted data") + } + + n0 := len(f.m) + data = make([]byte, hole) + f.WriteAt(data, sz/2) + n := len(f.m) + t.Log(n0, n) + d := n0 - n + if d*pgSize < hole-2 || d*pgSize > hole { + t.Fatal(n0, n, d) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/osfiler.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/osfiler.go new file mode 100644 index 00000000..76b3d264 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/osfiler.go @@ -0,0 +1,130 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lldb + +import ( + "io" + "os" + + "camlistore.org/third_party/github.com/cznic/mathutil" +) + +var _ Filer = (*OSFiler)(nil) + +// OSFile is an os.File like minimal set of methods allowing to construct a +// Filer. +type OSFile interface { + Name() string + Stat() (fi os.FileInfo, err error) + Sync() (err error) + Truncate(size int64) (err error) + io.Closer + io.Reader + io.ReaderAt + io.Seeker + io.Writer + io.WriterAt +} + +// OSFiler is like a SimpleFileFiler but based on an OSFile. +type OSFiler struct { + f OSFile + nest int + size int64 // not set if < 0 +} + +// NewOSFiler returns a Filer from an OSFile. This Filer is like the +// SimpleFileFiler, it does not implement the transaction related methods. +func NewOSFiler(f OSFile) (r *OSFiler) { + return &OSFiler{ + f: f, + size: -1, + } +} + +// BeginUpdate implements Filer. +func (f *OSFiler) BeginUpdate() (err error) { + f.nest++ + return nil +} + +// Close implements Filer. +func (f *OSFiler) Close() (err error) { + if f.nest != 0 { + return &ErrPERM{(f.Name() + ":Close")} + } + + return f.f.Close() +} + +// EndUpdate implements Filer. +func (f *OSFiler) EndUpdate() (err error) { + if f.nest == 0 { + return &ErrPERM{(f.Name() + ":EndUpdate")} + } + + f.nest-- + return +} + +// Name implements Filer. +func (f *OSFiler) Name() string { + return f.f.Name() +} + +// PunchHole implements Filer. +func (f *OSFiler) PunchHole(off, size int64) (err error) { + return +} + +// ReadAt implements Filer. +func (f *OSFiler) ReadAt(b []byte, off int64) (n int, err error) { + return f.f.ReadAt(b, off) +} + +// Rollback implements Filer. +func (f *OSFiler) Rollback() (err error) { return } + +// Size implements Filer. +func (f *OSFiler) Size() (n int64, err error) { + if f.size < 0 { // boot + fi, err := f.f.Stat() + if err != nil { + return 0, err + } + + f.size = fi.Size() + } + return f.size, nil +} + +// Sync implements Filer. +func (f *OSFiler) Sync() (err error) { + return f.f.Sync() +} + +// Truncate implements Filer. +func (f *OSFiler) Truncate(size int64) (err error) { + if size < 0 { + return &ErrINVAL{"Truncate size", size} + } + + f.size = size + return f.f.Truncate(size) +} + +// WriteAt implements Filer. +func (f *OSFiler) WriteAt(b []byte, off int64) (n int, err error) { + if f.size < 0 { // boot + fi, err := os.Stat(f.f.Name()) + if err != nil { + return 0, err + } + + f.size = fi.Size() + } + f.size = mathutil.MaxInt64(f.size, int64(len(b))+off) + return f.f.WriteAt(b, off) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/simplefilefiler.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/simplefilefiler.go new file mode 100644 index 00000000..e01f5544 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/simplefilefiler.go @@ -0,0 +1,123 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// A basic os.File backed Filer. + +package lldb + +import ( + "os" + + "camlistore.org/third_party/github.com/cznic/fileutil" + "camlistore.org/third_party/github.com/cznic/mathutil" +) + +var _ Filer = &SimpleFileFiler{} // Ensure SimpleFileFiler is a Filer. + +// SimpleFileFiler is an os.File backed Filer intended for use where structural +// consistency can be reached by other means (SimpleFileFiler is for example +// wrapped in eg. an RollbackFiler or ACIDFiler0) or where persistence is not +// required (temporary/working data sets). +// +// SimpleFileFiler is the most simple os.File backed Filer implementation as it +// does not really implement BeginUpdate and EndUpdate/Rollback in any way +// which would protect the structural integrity of data. If misused e.g. as a +// real database storage w/o other measures, it can easily cause data loss +// when, for example, a power outage occurs or the updating process terminates +// abruptly. +type SimpleFileFiler struct { + file *os.File + nest int + size int64 // not set if < 0 +} + +// NewSimpleFileFiler returns a new SimpleFileFiler. +func NewSimpleFileFiler(f *os.File) *SimpleFileFiler { + return &SimpleFileFiler{file: f, size: -1} +} + +// BeginUpdate implements Filer. +func (f *SimpleFileFiler) BeginUpdate() error { + f.nest++ + return nil +} + +// Close implements Filer. +func (f *SimpleFileFiler) Close() (err error) { + if f.nest != 0 { + return &ErrPERM{(f.Name() + ":Close")} + } + + return f.file.Close() +} + +// EndUpdate implements Filer. +func (f *SimpleFileFiler) EndUpdate() (err error) { + if f.nest == 0 { + return &ErrPERM{(f.Name() + ":EndUpdate")} + } + + f.nest-- + return +} + +// Name implements Filer. +func (f *SimpleFileFiler) Name() string { + return f.file.Name() +} + +// PunchHole implements Filer. +func (f *SimpleFileFiler) PunchHole(off, size int64) (err error) { + return fileutil.PunchHole(f.file, off, size) +} + +// ReadAt implements Filer. +func (f *SimpleFileFiler) ReadAt(b []byte, off int64) (n int, err error) { + return f.file.ReadAt(b, off) +} + +// Rollback implements Filer. +func (f *SimpleFileFiler) Rollback() (err error) { return } + +// Size implements Filer. +func (f *SimpleFileFiler) Size() (int64, error) { + if f.size < 0 { // boot + fi, err := os.Stat(f.file.Name()) + if err != nil { + return 0, err + } + + f.size = fi.Size() + } + return f.size, nil +} + +// Sync implements Filer. +func (f *SimpleFileFiler) Sync() error { + return f.file.Sync() +} + +// Truncate implements Filer. +func (f *SimpleFileFiler) Truncate(size int64) (err error) { + if size < 0 { + return &ErrINVAL{"Truncate size", size} + } + + f.size = size + return f.file.Truncate(size) +} + +// WriteAt implements Filer. +func (f *SimpleFileFiler) WriteAt(b []byte, off int64) (n int, err error) { + if f.size < 0 { // boot + fi, err := os.Stat(f.file.Name()) + if err != nil { + return 0, err + } + + f.size = fi.Size() + } + f.size = mathutil.MaxInt64(f.size, int64(len(b))+off) + return f.file.WriteAt(b, off) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/xact.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/xact.go new file mode 100644 index 00000000..e953f386 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/xact.go @@ -0,0 +1,629 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Structural transactions. + +package lldb + +//DONE+ TransactionalMemoryFiler +// ---- +// Use NewRollbackFiler(myMemFiler, ...) + +/* + +bfBits: 3 +BenchmarkRollbackFiler 20000000 102 ns/op 9.73 MB/s + +bfBits: 4 +BenchmarkRollbackFiler 50000000 55.7 ns/op 17.95 MB/s + +bfBits: 5 +BenchmarkRollbackFiler 100000000 32.2 ns/op 31.06 MB/s + +bfBits: 6 +BenchmarkRollbackFiler 100000000 20.6 ns/op 48.46 MB/s + +bfBits: 7 +BenchmarkRollbackFiler 100000000 15.1 ns/op 66.12 MB/s + +bfBits: 8 +BenchmarkRollbackFiler 100000000 10.5 ns/op 95.66 MB/s + +bfBits: 9 +BenchmarkRollbackFiler 200000000 8.02 ns/op 124.74 MB/s + +bfBits: 10 +BenchmarkRollbackFiler 200000000 9.25 ns/op 108.09 MB/s + +bfBits: 11 +BenchmarkRollbackFiler 100000000 11.7 ns/op 85.47 MB/s + +bfBits: 12 +BenchmarkRollbackFiler 100000000 17.2 ns/op 57.99 MB/s + +bfBits: 13 +BenchmarkRollbackFiler 100000000 32.7 ns/op 30.58 MB/s + +bfBits: 14 +BenchmarkRollbackFiler 50000000 39.6 ns/op 25.27 MB/s + +*/ + +import ( + "fmt" + "io" + "sync" + + "camlistore.org/third_party/github.com/cznic/fileutil" + "camlistore.org/third_party/github.com/cznic/mathutil" +) + +var ( + _ Filer = &bitFiler{} // Ensure bitFiler is a Filer. + _ Filer = &RollbackFiler{} // ditto +) + +const ( + bfBits = 9 + bfSize = 1 << bfBits + bfMask = bfSize - 1 +) + +var ( + bitmask = [8]byte{1, 2, 4, 8, 16, 32, 64, 128} + bitZeroPage bitPage + allDirtyFlags [bfSize >> 3]byte +) + +func init() { + for i := range allDirtyFlags { + allDirtyFlags[i] = 0xff + } +} + +type ( + bitPage struct { + prev, next *bitPage + data [bfSize]byte + flags [bfSize >> 3]byte + dirty bool + } + + bitFilerMap map[int64]*bitPage + + bitFiler struct { + parent Filer + m bitFilerMap + size int64 + } +) + +func newBitFiler(parent Filer) (f *bitFiler, err error) { + sz, err := parent.Size() + if err != nil { + return + } + + return &bitFiler{parent: parent, m: bitFilerMap{}, size: sz}, nil +} + +func (f *bitFiler) BeginUpdate() error { panic("internal error") } +func (f *bitFiler) EndUpdate() error { panic("internal error") } +func (f *bitFiler) Rollback() error { panic("internal error") } +func (f *bitFiler) Sync() error { panic("internal error") } + +func (f *bitFiler) Close() (err error) { return } +func (f *bitFiler) Name() string { return fmt.Sprintf("%p.bitfiler", f) } +func (f *bitFiler) Size() (int64, error) { return f.size, nil } + +func (f *bitFiler) PunchHole(off, size int64) (err error) { + first := off >> bfBits + if off&bfMask != 0 { + first++ + } + off += size - 1 + last := off >> bfBits + if off&bfMask != 0 { + last-- + } + if limit := f.size >> bfBits; last > limit { + last = limit + } + for pgI := first; pgI <= last; pgI++ { + pg := &bitPage{} + pg.flags = allDirtyFlags + f.m[pgI] = pg + } + return +} + +func (f *bitFiler) ReadAt(b []byte, off int64) (n int, err error) { + avail := f.size - off + pgI := off >> bfBits + pgO := int(off & bfMask) + rem := len(b) + if int64(rem) >= avail { + rem = int(avail) + err = io.EOF + } + for rem != 0 && avail > 0 { + pg := f.m[pgI] + if pg == nil { + pg = &bitPage{} + if f.parent != nil { + _, err = f.parent.ReadAt(pg.data[:], off&^bfMask) + if err != nil && !fileutil.IsEOF(err) { + return + } + + err = nil + } + f.m[pgI] = pg + } + nc := copy(b[:mathutil.Min(rem, bfSize)], pg.data[pgO:]) + pgI++ + pgO = 0 + rem -= nc + n += nc + b = b[nc:] + off += int64(nc) + } + return +} + +func (f *bitFiler) Truncate(size int64) (err error) { + switch { + case size < 0: + return &ErrINVAL{"Truncate size", size} + case size == 0: + f.m = bitFilerMap{} + f.size = 0 + return + } + + first := size >> bfBits + if size&bfMask != 0 { + first++ + } + last := f.size >> bfBits + if f.size&bfMask != 0 { + last++ + } + for ; first < last; first++ { + delete(f.m, first) + } + + f.size = size + return +} + +func (f *bitFiler) WriteAt(b []byte, off int64) (n int, err error) { + off0 := off + pgI := off >> bfBits + pgO := int(off & bfMask) + n = len(b) + rem := n + var nc int + for rem != 0 { + pg := f.m[pgI] + if pg == nil { + pg = &bitPage{} + if f.parent != nil { + _, err = f.parent.ReadAt(pg.data[:], off&^bfMask) + if err != nil && !fileutil.IsEOF(err) { + return + } + + err = nil + } + f.m[pgI] = pg + } + nc = copy(pg.data[pgO:], b) + pgI++ + pg.dirty = true + for i := pgO; i < pgO+nc; i++ { + pg.flags[i>>3] |= bitmask[i&7] + } + pgO = 0 + rem -= nc + b = b[nc:] + off += int64(nc) + } + f.size = mathutil.MaxInt64(f.size, off0+int64(n)) + return +} + +func (f *bitFiler) link() { + for pgI, pg := range f.m { + nx, ok := f.m[pgI+1] + if !ok || !nx.dirty { + continue + } + + nx.prev, pg.next = pg, nx + } +} + +func (f *bitFiler) dumpDirty(w io.WriterAt) (nwr int, err error) { + f.link() + for pgI, pg := range f.m { + if !pg.dirty { + continue + } + + for pg.prev != nil && pg.prev.dirty { + pg = pg.prev + pgI-- + } + + for pg != nil && pg.dirty { + last := false + var off int64 + first := -1 + for i := 0; i < bfSize; i++ { + flag := pg.flags[i>>3]&bitmask[i&7] != 0 + switch { + case flag && !last: // Leading edge detected + off = pgI<= 0 { + i := bfSize + n, err := w.WriteAt(pg.data[first:i], off) + if n != i-first { + return 0, err + } + + nwr++ + } + + pg.dirty = false + pg = pg.next + pgI++ + } + } + return +} + +// RollbackFiler is a Filer implementing structural transaction handling. +// Structural transactions should be small and short lived because all non +// committed data are held in memory until committed or discarded by a +// Rollback. +// +// While using RollbackFiler, every intended update of the wrapped Filler, by +// WriteAt, Truncate or PunchHole, _must_ be made within a transaction. +// Attempts to do it outside of a transaction will return ErrPERM. OTOH, +// invoking ReadAt outside of a transaction is not a problem. +// +// No nested transactions: All updates within a transaction are held in memory. +// On a matching EndUpdate the updates held in memory are actually written to +// the wrapped Filer. +// +// Nested transactions: Correct data will be seen from RollbackFiler when any +// level of a nested transaction is rollbacked. The actual writing to the +// wrapped Filer happens only when the outer most transaction nesting level is +// closed. +// +// Invoking Rollback is an alternative to EndUpdate. It discards all changes +// made at the current transaction level and returns the "state" (possibly not +// yet persisted) of the Filer to what it was before the corresponding +// BeginUpdate. +// +// During an open transaction, all reads (using ReadAt) are "dirty" reads, +// seeing the uncommitted changes made to the Filer's data. +// +// Lldb databases should be based upon a RollbackFiler. +// +// With a wrapped MemFiler one gets transactional memory. With, for example a +// wrapped disk based SimpleFileFiler it protects against at least some HW +// errors - if Rollback is properly invoked on such failures and/or if there's +// some WAL or 2PC or whatever other safe mechanism based recovery procedure +// used by the client. +// +// The "real" writes to the wrapped Filer (or WAL instead) go through the +// writerAt supplied to NewRollbackFiler. +// +// List of functions/methods which are recommended to be wrapped in a +// BeginUpdate/EndUpdate structural transaction: +// +// Allocator.Alloc +// Allocator.Free +// Allocator.Realloc +// +// CreateBTree +// RemoveBTree +// BTree.Clear +// BTree.Delete +// BTree.DeleteAny +// BTree.Clear +// BTree.Extract +// BTree.Get (it can mutate the DB) +// BTree.Put +// BTree.Set +// +// NOTE: RollbackFiler is a generic solution intended to wrap Filers provided +// by this package which do not implement any of the transactional methods. +// RollbackFiler thus _does not_ invoke any of the transactional methods of its +// wrapped Filer. +// +// RollbackFiler is safe for concurrent use by multiple goroutines. +type RollbackFiler struct { + mu sync.RWMutex + inCallback bool + inCallbackMu sync.RWMutex + bitFiler *bitFiler + checkpoint func(int64) error + closed bool + f Filer + parent Filer + tlevel int // transaction nesting level, 0 == not in transaction + writerAt io.WriterAt + + // afterRollback, if not nil, is called after performing Rollback + // without errros. + afterRollback func() error +} + +// NewRollbackFiler returns a RollbackFiler wrapping f. +// +// The checkpoint parameter +// +// The checkpoint function is called after closing (by EndUpdate) the upper +// most level open transaction if all calls of writerAt were successful and the +// DB (or eg. a WAL) is thus now in a consistent state (virtually, in the ideal +// world with no write caches, no HW failures, no process crashes, ...). +// +// NOTE: In, for example, a 2PC it is necessary to reflect also the sz +// parameter as the new file size (as in the parameter to Truncate). All +// changes were successfully written already by writerAt before invoking +// checkpoint. +// +// The writerAt parameter +// +// The writerAt interface is used to commit the updates of the wrapped Filer. +// If any invocation of writerAt fails then a non nil error will be returned +// from EndUpdate and checkpoint will _not_ ne called. Neither is necessary to +// call Rollback. The rule of thumb: The [structural] transaction [level] is +// closed by invoking exactly once one of EndUpdate _or_ Rollback. +// +// It is presumed that writerAt uses WAL or 2PC or whatever other safe +// mechanism to physically commit the updates. +// +// Updates performed by invocations of writerAt are byte-precise, but not +// necessarily maximum possible length precise. IOW, for example an update +// crossing page boundaries may be performed by more than one writerAt +// invocation. No offset sorting is performed. This may change if it proves +// to be a problem. Such change would be considered backward compatible. +// +// NOTE: Using RollbackFiler, but failing to ever invoke a matching "closing" +// EndUpdate after an "opening" BeginUpdate means neither writerAt or +// checkpoint will ever get called - with all the possible data loss +// consequences. +func NewRollbackFiler(f Filer, checkpoint func(sz int64) error, writerAt io.WriterAt) (r *RollbackFiler, err error) { + if f == nil || checkpoint == nil || writerAt == nil { + return nil, &ErrINVAL{Src: "lldb.NewRollbackFiler, nil argument"} + } + + return &RollbackFiler{ + checkpoint: checkpoint, + f: f, + writerAt: writerAt, + }, nil +} + +// Implements Filer. +func (r *RollbackFiler) BeginUpdate() (err error) { + r.mu.Lock() + defer r.mu.Unlock() + + parent := r.f + if r.tlevel != 0 { + parent = r.bitFiler + } + r.bitFiler, err = newBitFiler(parent) + if err != nil { + return + } + + r.tlevel++ + return +} + +// Implements Filer. +// +// Close will return an error if not invoked at nesting level 0. However, to +// allow emergency closing from eg. a signal handler; if Close is invoked +// within an open transaction(s), it rollbacks any non committed open +// transactions and performs the Close operation. +// +// IOW: Regardless of the transaction nesting level the Close is always +// performed but any uncommitted transaction data are lost. +func (r *RollbackFiler) Close() (err error) { + r.mu.Lock() + defer r.mu.Unlock() + + if r.closed { + return &ErrPERM{r.f.Name() + ": Already closed"} + } + + r.closed = true + if err = r.f.Close(); err != nil { + return + } + + if r.tlevel != 0 { + err = &ErrPERM{r.f.Name() + ": Close inside an open transaction"} + } + + return +} + +// Implements Filer. +func (r *RollbackFiler) EndUpdate() (err error) { + r.mu.Lock() + defer r.mu.Unlock() + + if r.tlevel == 0 { + return &ErrPERM{r.f.Name() + " : EndUpdate outside of a transaction"} + } + + sz, err := r.size() // Cannot call .Size() -> deadlock + if err != nil { + return + } + + r.tlevel-- + bf := r.bitFiler + parent := bf.parent + w := r.writerAt + if r.tlevel != 0 { + w = parent + } + nwr, err := bf.dumpDirty(w) + if err != nil { + return + } + + switch { + case r.tlevel == 0: + r.bitFiler = nil + if nwr == 0 { + return + } + + return r.checkpoint(sz) + default: + r.bitFiler = parent.(*bitFiler) + sz, _ := bf.Size() // bitFiler.Size() never returns err != nil + return parent.Truncate(sz) + } +} + +// Implements Filer. +func (r *RollbackFiler) Name() string { + r.mu.RLock() + defer r.mu.RUnlock() + + return r.f.Name() +} + +// Implements Filer. +func (r *RollbackFiler) PunchHole(off, size int64) error { + r.mu.Lock() + defer r.mu.Unlock() + + if r.tlevel == 0 { + return &ErrPERM{r.f.Name() + ": PunchHole outside of a transaction"} + } + + if off < 0 { + return &ErrINVAL{r.f.Name() + ": PunchHole off", off} + } + + if size < 0 || off+size > r.bitFiler.size { + return &ErrINVAL{r.f.Name() + ": PunchHole size", size} + } + + return r.bitFiler.PunchHole(off, size) +} + +// Implements Filer. +func (r *RollbackFiler) ReadAt(b []byte, off int64) (n int, err error) { + r.inCallbackMu.RLock() + defer r.inCallbackMu.RUnlock() + if !r.inCallback { + r.mu.RLock() + defer r.mu.RUnlock() + } + if r.tlevel == 0 { + return r.f.ReadAt(b, off) + } + + return r.bitFiler.ReadAt(b, off) +} + +// Implements Filer. +func (r *RollbackFiler) Rollback() (err error) { + r.mu.Lock() + defer r.mu.Unlock() + + if r.tlevel == 0 { + return &ErrPERM{r.f.Name() + ": Rollback outside of a transaction"} + } + + if r.tlevel > 1 { + r.bitFiler = r.bitFiler.parent.(*bitFiler) + } + r.tlevel-- + if f := r.afterRollback; f != nil { + r.inCallbackMu.Lock() + r.inCallback = true + r.inCallbackMu.Unlock() + defer func() { + r.inCallbackMu.Lock() + r.inCallback = false + r.inCallbackMu.Unlock() + }() + return f() + } + return +} + +func (r *RollbackFiler) size() (sz int64, err error) { + if r.tlevel == 0 { + return r.f.Size() + } + + return r.bitFiler.Size() +} + +// Implements Filer. +func (r *RollbackFiler) Size() (sz int64, err error) { + r.mu.Lock() + defer r.mu.Unlock() + + return r.size() +} + +// Implements Filer. +func (r *RollbackFiler) Sync() error { + r.mu.Lock() + defer r.mu.Unlock() + + return r.f.Sync() +} + +// Implements Filer. +func (r *RollbackFiler) Truncate(size int64) error { + r.mu.Lock() + defer r.mu.Unlock() + + if r.tlevel == 0 { + return &ErrPERM{r.f.Name() + ": Truncate outside of a transaction"} + } + + return r.bitFiler.Truncate(size) +} + +// Implements Filer. +func (r *RollbackFiler) WriteAt(b []byte, off int64) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + + if r.tlevel == 0 { + return 0, &ErrPERM{r.f.Name() + ": WriteAt outside of a transaction"} + } + + return r.bitFiler.WriteAt(b, off) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/xact_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/xact_test.go new file mode 100644 index 00000000..4e3f3f0b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/exp/lldb/xact_test.go @@ -0,0 +1,400 @@ +// Copyright 2014 The lldb Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lldb + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "math/rand" + "testing" + + "camlistore.org/third_party/github.com/cznic/fileutil" + "camlistore.org/third_party/github.com/cznic/mathutil" +) + +func (f *bitFiler) dump(w io.Writer) { + fmt.Fprintf(w, "bitFiler @ %p, size: %d(%#x)\n", f, f.size, f.size) + for k, v := range f.m { + fmt.Fprintf(w, "bitPage @ %p: pgI %d(%#x): %#v\n", v, k, k, *v) + } +} + +func filerBytes(f Filer) []byte { + sz, err := f.Size() + if err != nil { + panic(err) + } + + b := make([]byte, int(sz)) + n, err := f.ReadAt(b, 0) + if n != len(b) { + panic(fmt.Errorf("sz %d n %d err %v", sz, n, err)) + } + + return b +} + +func cmpFilerBytes(t *testing.T, fg, fe Filer) { + g, e := filerBytes(fg), filerBytes(fe) + if !bytes.Equal(g, e) { + t.Fatalf("Filer content doesn't match: got\n%sexp:\n%s", hex.Dump(g), hex.Dump(e)) + } +} + +func TestRollbackFiler0(t *testing.T) { + var r *RollbackFiler + f, g := NewMemFiler(), NewMemFiler() + + checkpoint := func(sz int64) (err error) { + return f.Truncate(sz) + } + + r, err := NewRollbackFiler(f, checkpoint, f) + if err != nil { + t.Fatal(err) + } + + if err = r.BeginUpdate(); err != nil { + t.Fatal(err) + } + + if err = r.EndUpdate(); err != nil { + t.Fatal(err) + } + + cmpFilerBytes(t, f, g) +} + +func TestRollbackFiler1(t *testing.T) { + const ( + N = 1e6 + O = 1234 + ) + + var r *RollbackFiler + f, g := NewMemFiler(), NewMemFiler() + + checkpoint := func(sz int64) (err error) { + return f.Truncate(sz) + } + + r, err := NewRollbackFiler(f, checkpoint, f) + if err != nil { + t.Fatal(err) + } + + if err = r.BeginUpdate(); err != nil { + t.Fatal(err) + } + + rng := rand.New(rand.NewSource(42)) + b := make([]byte, N) + for i := range b { + b[i] = byte(rng.Int()) + } + + if _, err = g.WriteAt(b, O); err != nil { + t.Fatal(err) + } + + if _, err = r.WriteAt(b, O); err != nil { + t.Fatal(err) + } + + b = filerBytes(f) + if n := len(b); n != 0 { + t.Fatal(n) + } + + if err = r.EndUpdate(); err != nil { + t.Fatal(err) + } + + cmpFilerBytes(t, f, g) +} + +func TestRollbackFiler2(t *testing.T) { + const ( + N = 1e6 + O = 1234 + ) + + var r *RollbackFiler + f, g := NewMemFiler(), NewMemFiler() + + checkpoint := func(sz int64) (err error) { + return f.Truncate(sz) + } + + r, err := NewRollbackFiler(f, checkpoint, f) + if err != nil { + t.Fatal(err) + } + + if err = r.BeginUpdate(); err != nil { + t.Fatal(err) + } + + rng := rand.New(rand.NewSource(42)) + b := make([]byte, N) + for i := range b { + b[i] = byte(rng.Int()) + } + + if _, err = r.WriteAt(b, O); err != nil { + t.Fatal(err) + } + + b = filerBytes(f) + if n := len(b); n != 0 { + t.Fatal(n) + } + + if err = r.Rollback(); err != nil { + t.Fatal(err) + } + + cmpFilerBytes(t, f, g) +} + +func rndBytes(rng *rand.Rand, n int) []byte { + r := make([]byte, n) + for i := range r { + r[i] = byte(rng.Int()) + } + return r +} + +func TestRollbackFiler3(t *testing.T) { + var r *RollbackFiler + f := NewMemFiler() + + checkpoint := func(sz int64) (err error) { + return f.Truncate(sz) + } + + r, err := NewRollbackFiler(f, checkpoint, f) + if err != nil { + t.Fatal(err) + } + + n, err := r.ReadAt([]byte{0}, 0) + if n != 0 || !fileutil.IsEOF(err) { + t.Fatal(n, err) + } + + n, err = r.ReadAt([]byte{0}, 1e6) + if n != 0 || !fileutil.IsEOF(err) { + t.Fatal(n, err) + } + + if err = r.BeginUpdate(); err != nil { // BeginUpdate: 0 -> 1 + t.Fatal(err) + } + + rng := rand.New(rand.NewSource(42)) + + buf := rndBytes(rng, 100) + if n, err := r.WriteAt(buf, 1e6); n != 100 || err != nil { + t.Fatal(err) + } + + buf = make([]byte, 100) + if n, err := r.ReadAt(buf, 1e6-200); n != 100 || err != nil { + t.Fatal(err) + } + + for i, v := range buf { + if v != 0 { + t.Fatal(i, v) + } + } + + if err := r.Truncate(1e5); err != nil { + t.Fatal(err) + } + + if err = r.BeginUpdate(); err != nil { // BeginUpdate: 1 -> 2 + t.Fatal(err) + } + + if n, err := r.ReadAt(buf, 1e6); n != 0 || err == nil { + t.Fatal(n, err) + } + + if err := r.Truncate(2e6); err != nil { + t.Fatal(err) + } + + if err = r.BeginUpdate(); err != nil { // BeginUpdate: 2 -> 3 + t.Fatal(err) + } + + if n, err := r.ReadAt(buf, 1e6); n == 0 || err != nil { + t.Fatal(n, err) + } + + for i, v := range buf { + if v != 0 { + t.Fatal(i, v) + } + } +} + +func TestRollbackFiler4(t *testing.T) { + const ( + maxSize = 1e6 + maxChange = maxSize/100 + 4 + maxChanges = 10 + maxNest = 3 + ) + + var r *RollbackFiler + f := NewMemFiler() + + checkpoint := func(sz int64) (err error) { + return f.Truncate(sz) + } + + r, err := NewRollbackFiler(f, checkpoint, f) + if err != nil { + t.Fatal(err) + } + + rng := rand.New(rand.NewSource(42)) + + ref := make([]byte, 2*maxSize) + for i := range ref { + ref[i] = byte(rng.Int()) + } + + var finalSize int + + var fn func(int, int, []byte) (int, []byte) + fn = func(nest, inSize int, in []byte) (outSize int, out []byte) { + defer func() { + for i := outSize; i < len(out); i++ { + out[i] = 0 + } + finalSize = mathutil.Max(finalSize, outSize) + }() + + out = make([]byte, len(in), 2*maxSize) + copy(out, in) + if err := r.BeginUpdate(); err != nil { + t.Fatal(err) + } + + for i := 0; i < maxChanges; i++ { + changeLen := rng.Intn(maxChange) + 4 + changeOff := rng.Intn(maxSize * 3 / 2) + b := make([]byte, changeLen) + for i := range b { + b[i] = byte(rng.Int()) + } + if n, err := r.WriteAt(b, int64(changeOff)); n != len(b) || err != nil { + t.Fatal(n, len(b), err) + } + } + + if err := r.Rollback(); err != nil { + t.Fatal(err) + } + + if err := r.BeginUpdate(); err != nil { + t.Fatal(err) + } + + for i := 0; i < maxChanges; i++ { + changeLen := rng.Intn(maxChange) + 4 + changeOff := rng.Intn(maxSize * 3 / 2) + b := make([]byte, changeLen) + for i := range b { + b[i] = byte(rng.Int()) + } + if n, err := r.WriteAt(b, int64(changeOff)); n != len(b) || err != nil { + t.Fatal(n, len(b), err) + } + copy(out[changeOff:], b) + copy(ref[changeOff:], b) + } + + newSize := rng.Intn(maxSize*3/2) + 4 + if nest == maxNest { + if err := r.EndUpdate(); err != nil { + t.Fatal(err) + } + + return newSize, out + } + + outSize, out = fn(nest+1, newSize, out) + if err := r.EndUpdate(); err != nil { + t.Fatal(err) + } + + return + } + + sz, result := fn(0, maxSize, ref) + if g, e := sz, finalSize; g != e { + t.Fatal(err) + } + + g, e := result[:sz], ref[:sz] + if !bytes.Equal(g, e) { + if len(g) == len(e) { + x := make([]byte, len(g)) + for i := range x { + if g[i] != e[i] { + x[i] = 'X' + } + } + //t.Logf("Data diff\n%s", hex.Dump(x)) + } + //t.Fatalf("Data don't match: got\n%sexp:\n%s", hex.Dump(g), hex.Dump(e)) + t.Fatalf("Data don't match") + } +} + +func BenchmarkRollbackFiler(b *testing.B) { + rng := rand.New(rand.NewSource(42)) + type t struct { + off int64 + b []byte + } + a := []t{} + for rem := b.N; rem > 0; { + off := rng.Int63() + n := mathutil.Min(rng.Intn(1e3)+1, rem) + a = append(a, t{off, rndBytes(rng, n)}) + rem -= n + } + + var r *RollbackFiler + f := NewMemFiler() + + checkpoint := func(sz int64) (err error) { + return f.Truncate(sz) + } + + r, err := NewRollbackFiler(f, checkpoint, f) + if err != nil { + b.Fatal(err) + } + + if err := r.BeginUpdate(); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for _, v := range a { + if _, err := r.WriteAt(v.b, v.off); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/AUTHORS b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/AUTHORS new file mode 100644 index 00000000..288ea0b1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/AUTHORS @@ -0,0 +1,14 @@ +# This file lists authors for copyright purposes. This file is distinct from +# the CONTRIBUTORS files. See the latter for an explanation. +# +# Names should be added to this file as: +# Name or Organization +# +# The email address is not required for organizations. +# +# Please keep the list sorted. + +CZ.NIC z.s.p.o. +Jan Mercl <0xjnml@gmail.com> +Aaron Bieber + diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/CONTRIBUTORS b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/CONTRIBUTORS new file mode 100644 index 00000000..223b21a3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/CONTRIBUTORS @@ -0,0 +1,14 @@ +# This file lists people who contributed code to this repository. The AUTHORS +# file lists the copyright holders; this file lists people. +# +# Names should be added to this file like so: +# Name +# +# Please keep the list sorted. + +Bill Thiede +Gary Burd +Jan Mercl <0xjnml@gmail.com> +Nick Owens +Tamás Gulácsi +Aaron Bieber diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/LICENSE new file mode 100644 index 00000000..50bbdd24 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014 The fileutil Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the names of the authors nor the names of the +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/Makefile b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/Makefile new file mode 100644 index 00000000..5849cc20 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/Makefile @@ -0,0 +1,27 @@ +# Copyright (c) 2014 The fileutil authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +.PHONY: all clean editor todo + +all: editor + go vet + golint . + go install + make todo + +editor: + go fmt + go test -i + go test + go build + +todo: + @grep -n ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* *.go || true + @grep -n TODO *.go || true + @grep -n BUG *.go || true + @grep -n println *.go || true + +clean: + @go clean + rm -f y.output diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/README b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/README new file mode 100644 index 00000000..f43d5f00 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/README @@ -0,0 +1,16 @@ +This is a goinstall-able mirror of modified code already published at: +http://git.nic.cz/redmine/projects/gofileutil/repository + +Packages in this repository: + +Install: $go get github.com/cznic/fileutil +Godocs: http://godoc.org/github.com/cznic/fileutil + +Install: $go get github.com/cznic/fileutil/storage +Godocs: http://godoc.org/github.com/cznic/fileutil/storage + +Install: $go get github.com/cznic/fileutil/falloc +Godocs: http://godoc.org/github.com/cznic/fileutil/falloc + +Install: $go get github.com/cznic/fileutil/hdb +Godocs: http://godoc.org/github.com/cznic/fileutil/hdb diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/all_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/all_test.go new file mode 100644 index 00000000..21a8fa94 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/all_test.go @@ -0,0 +1,39 @@ +// Copyright (c) 2014 The fileutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fileutil + +import ( + "os" + "path/filepath" + "strings" + "testing" +) + +func TestTempFile(t *testing.T) { + f, err := TempFile("", "abc", "mno.xyz") + if err != nil { + t.Fatal(err) + } + + n := f.Name() + t.Log(n) + defer func() { + f.Close() + os.Remove(n) + }() + + base := filepath.Base(n) + if base == "abcmno.xyz" { + t.Fatal(base) + } + + if !strings.HasPrefix(base, "abc") { + t.Fatal(base) + } + + if !strings.HasSuffix(base, "mno.xyz") { + t.Fatal(base) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/LICENSE new file mode 100644 index 00000000..1e92e33d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of CZ.NIC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/README b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/README new file mode 100644 index 00000000..23313fc5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/README @@ -0,0 +1,5 @@ +This is a goinstall-able mirror of modified code already published at: +https://git.nic.cz/redmine/projects/gofileutil/repository/show/falloc + +Install: $go get github.com/cznic/fileutil/falloc +Godocs: http://gopkgdoc.appspot.com/pkg/github.com/cznic/fileutil/falloc diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/all_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/all_test.go new file mode 100644 index 00000000..034d5087 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/all_test.go @@ -0,0 +1,3105 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +package falloc + +import ( + "bytes" + "errors" + "flag" + "fmt" + "io/ioutil" + "log" + "math" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "camlistore.org/third_party/github.com/cznic/fileutil" + "camlistore.org/third_party/github.com/cznic/fileutil/storage" + "camlistore.org/third_party/github.com/cznic/mathutil" +) + +var ( + blockFlag = flag.Uint("block", 256, "block size for some of the dev tests") + cacheTotalFlag = flag.Int64("cachemax", 1<<25, "cache total bytes") + cachedFlag = flag.Bool("cache", false, "enable caching store") + devFlag = flag.Bool("dev", false, "enable dev tests") + dropFlag = flag.Bool("drop", false, "drop system file cache for some of the dev tests before measurement") + fadviseFlag = flag.Bool("fadvise", false, "hint kernel about random file access") + nFlag = flag.Int("n", 1, "parameter for some of the dev tests") + probeFlag = flag.Bool("probe", false, "report store probe statistics") + optGo = flag.Int("go", 3, "GOMAXPROCS") +) + +func init() { + flag.Parse() + runtime.GOMAXPROCS(*optGo) +} + +func temp() (dir, name string) { + dir, err := ioutil.TempDir("", "test-falloc-") + if err != nil { + panic(err) + } + + name = filepath.Join(dir, "test.db") + return dir, name +} + +type balancedAcid struct { + storage.Accessor + nesting int +} + +func newBalancedAcid(store storage.Accessor) storage.Accessor { + return &balancedAcid{store, 0} +} + +func (b *balancedAcid) BeginUpdate() error { + if b.nesting < 0 { + return errors.New("BeginUpdate with nesting < 0") + } + + b.nesting++ + return nil +} + +func (b *balancedAcid) EndUpdate() error { + if b.nesting <= 0 { + return errors.New("EndUpdate with nesting <= 0") + } + + b.nesting-- + return nil +} + +func (b *balancedAcid) Close() error { + if b.nesting != 1 { + return fmt.Errorf("before Close(): nesting %d %p", b.nesting, b) + } + + if err := b.Accessor.Close(); err != nil { + return err + } + + if b.nesting != 1 { + return fmt.Errorf("after Close(): nesting %d", b.nesting) + } + + return nil +} + +func fopen(fn string) (f *File, err error) { + var store storage.Accessor + if store, err = storage.OpenFile(fn, os.O_RDWR, 0666); err != nil { + return + } + + var advise func(int64, int, bool) + if *fadviseFlag { + file := store.(*storage.FileAccessor).File + if err = fileutil.Fadvise(file, 0, 0, fileutil.POSIX_FADV_RANDOM); err != nil { + return + } + advise = func(off int64, len int, write bool) { + if err = fileutil.Fadvise(file, off, off+int64(len), fileutil.POSIX_FADV_DONTNEED); err != nil { + log.Fatal("advisor advise err", err) + } + } + } + + var prob *storage.Probe + if *probeFlag { + prob = storage.NewProbe(store, nil) + store = prob + } + if *cachedFlag { + if store, err = storage.NewCache(store, *cacheTotalFlag, advise); err != nil { + return + } + + if *probeFlag { + store = storage.NewProbe(store, prob) + } + } + f, err = Open(newBalancedAcid(store)) + return +} + +func fcreate(fn string) (f *File, err error) { + var store storage.Accessor + if store, err = storage.OpenFile(fn, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666); err != nil { + return + } + + var advise func(int64, int, bool) + if *fadviseFlag { + file := store.(*storage.FileAccessor).File + if err = fileutil.Fadvise(file, 0, 0, fileutil.POSIX_FADV_RANDOM); err != nil { + return + } + advise = func(off int64, len int, write bool) { + if err = fileutil.Fadvise(file, off, off+int64(len), fileutil.POSIX_FADV_DONTNEED); err != nil { + log.Fatal("advisor advise err", err) + } + } + } + + var prob *storage.Probe + if *probeFlag { + prob = storage.NewProbe(store, nil) + store = prob + } + if *cachedFlag { + if store, err = storage.NewCache(store, *cacheTotalFlag, advise); err != nil { + return + } + + if *probeFlag { + store = storage.NewProbe(store, prob) + } + } + f, err = New(newBalancedAcid(store)) + return +} + +func probed(t *testing.T, f *File) { + if f == nil { + return + } + + dump := func(p *storage.Probe) { + t.Logf("OpsRd %d OpsWr %d BytesRd %d(avg %.1f) BytesWr %d(avg %.1f) SectorsRd %d(%d, +%d, x%.2f) SectorsWr %d(%d, +%d, x%.2f)", + p.OpsRd, p.OpsWr, + p.BytesRd, float64(p.BytesRd)/float64(p.OpsRd), + p.BytesWr, float64(p.BytesWr)/float64(p.OpsWr), + p.SectorsRd, + p.SectorsRd<<9, + p.SectorsRd<<9-p.BytesRd, + float64(p.SectorsRd<<9)/float64(p.BytesRd), + p.SectorsWr, + p.SectorsWr<<9, + p.SectorsWr<<9-p.BytesWr, + float64(p.SectorsWr<<9)/float64(p.BytesWr), + ) + } + + if ph, ok := f.Accessor().(*storage.Probe); ok { + dump(ph) + if c, ok := ph.Accessor.(*storage.Cache); ok { + if pl, ok := c.Accessor().(*storage.Probe); ok { + dump(pl) + } + } + } +} + +func (f *File) audit() (usedblocks, totalblocks int64, err error) { + defer func() { + if e := recover(); e != nil { + err = e.(error) + } + }() + + fi, err := f.f.Stat() + if err != nil { + panic(err) + } + + freemap := map[int64]int64{} + fp := int64(0) + buf := make([]byte, 22) + freeblocks := int64(0) + + // linear scan + for fp < fi.Size() { + totalblocks++ + typ, size := f.getInfo(fp >> 4) + f.read(buf[:1], fp+size<<4-1) + last := buf[0] + switch { + default: + panic("internal error") + case typ == 0: + if last != 0 { + panic(fmt.Errorf("@%#x used empty, last @%#x: %#x != 0", fp, fp+size<<4-1, last)) + } + case typ >= 0x1 && typ <= 0xed: + if last >= 0xfe { + panic(fmt.Errorf("@%#x used short, last @%#x: %#x > 0xfe", fp, fp+size<<4-1, last)) + } + case typ >= 0xee && typ <= 0xfb: + if last > 1 { + panic(fmt.Errorf("@%#x used esc short, last @%#x: %#x > 1", fp, fp+size<<4-1, last)) + } + case typ == 0xfc: + f.read(buf[:2], fp+1) + switch n := int(buf[0])<<8 + int(buf[1]); { + default: + panic(fmt.Errorf("@%#x used long, illegal content length %#x < 0xee(238)", fp, n)) + case n >= 0xee && n <= 0xf0f0: + if last >= 0xfe { + panic(fmt.Errorf("@%#x used long, last @%#x: %#x > 0xfe", fp, fp+size<<4-1, last)) + } + case n >= 0xf0f1 && n <= 0xffff: + if last > 1 { + panic(fmt.Errorf("@%#x used esc long, last @%#x: %#x > 1", fp, fp+size<<4-1, last)) + } + } + case typ == 0xfd: + if last != 0 { + panic(fmt.Errorf("@%#x reloc, last @%#x: %#x != 0", fp, fp+size<<4-1, last)) + } + + var target int64 + f.read(buf[:7], fp+1) + (*Handle)(&target).Get(buf) + if target >= f.atoms { + panic(fmt.Errorf("@%#x illegal reloc, target %#x > f.atoms(%#x)", fp, target, f.atoms)) + } + + ttyp, _ := f.getInfo(target) + if ttyp >= 0xfe { + panic(fmt.Errorf("@%#x reloc, points to unused @%#x", fp, target)) + } + + if ttyp == 0xfd { + panic(fmt.Errorf("@%#x reloc, points to reloc @%#x", fp, target)) + } + case typ == 0xfe: + if size < 2 { + panic(fmt.Errorf("@%#x illegal free block, atoms %d < 2", fp, size)) + } + + if fp>>4 < f.canfree { + panic(fmt.Errorf("@%#x illegal free block @ < f.canfree", fp)) + } + + f.read(buf[:22], fp) + var prev, next, sz int64 + (*Handle)(&prev).Get(buf[1:]) + (*Handle)(&next).Get(buf[8:]) + f.checkPrevNext(fp, prev, next) + f.read(buf[:7], fp+size<<4-8) + (*Handle)(&sz).Get(buf) + if sz != size { + panic(fmt.Errorf("@%#x mismatch size, %d != %d", fp, sz, size)) + } + + if last != 0xfe { + panic(fmt.Errorf("@%#x free atom, last @%#x: %#x != 0xff", fp, fp+size<<4-1, last)) + } + freemap[fp>>4] = size + freeblocks++ + case typ == 0xff: + f.read(buf[:14], fp+1) + var prev, next int64 + (*Handle)(&prev).Get(buf) + (*Handle)(&next).Get(buf[7:]) + f.checkPrevNext(fp, prev, next) + if last != 0xff { + panic(fmt.Errorf("@%#x free atom, last @%#x: %#x != 0xff", fp, fp+size<<4-1, last)) + } + freemap[fp>>4] = size + freeblocks++ + } + fp += size << 4 + } + usedblocks = totalblocks - freeblocks + + // check free table + for size := len(f.freetab) - 1; size > 0; size-- { + var prev, next, fprev int64 + this := f.freetab[size] + for this != 0 { + sz, ok := freemap[this] + if !ok { + panic(fmt.Errorf("bad freetab[%d] item @%#x", size, this)) + } + + delete(freemap, this) + + if sz < int64(size) { + panic(fmt.Errorf("bad freetab[%d] item size @%#x %d", size, this, sz)) + } + + if sz == 1 { + f.read(buf[:15], this<<4) + (*Handle)(&fprev).Get(buf[1:]) + if fprev != prev { + panic(fmt.Errorf("bad fprev %#x, exp %#x", fprev, prev)) + } + + (*Handle)(&next).Get(buf[8:]) + } else { + f.read(buf, this<<4) + (*Handle)(&fprev).Get(buf[1:]) + if fprev != prev { + panic(fmt.Errorf("bad fprev %#x, exp %#x", fprev, prev)) + } + var fsz int64 + (*Handle)(&fsz).Get(buf[15:]) + if fsz != sz { + panic(fmt.Errorf("bad fsz %d @%#x, exp %#x", fsz, this<<4, sz)) + } + + (*Handle)(&next).Get(buf[8:]) + } + + prev, this = this, next + } + } + + if n := len(freemap); n != 0 { + for h, s := range freemap { + panic(fmt.Errorf("%d lost free blocks in freemap, e.g. %d free atoms @%#x", n, s, h)) + } + } + + return + +} + +func (f *File) checkPrevNext(fp, prev, next int64) { + if prev != 0 && prev < f.canfree { + panic(fmt.Errorf("@%#x illegal free atom, prev %#x < f.canfree(%#x)", fp, prev, f.canfree)) + } + + if prev >= f.atoms { + panic(fmt.Errorf("@%#x illegal free atom, prev %#x > f.atoms", fp, prev)) + } + + if next != 0 && next < f.canfree { + panic(fmt.Errorf("@%#x illegal free atom, next %#x < f.canfree(%#x)", fp, next, f.canfree)) + } + + if next >= f.atoms { + panic(fmt.Errorf("@%#x illegal free atom, next %#x > f.atoms", fp, next)) + } +} + +func reaudit(t *testing.T, f *File, fn string) (of *File) { + var err error + if _, _, err := f.audit(); err != nil { + t.Fatal(err) + } + + if err := f.Close(); err != nil { + t.Fatal(err) + } + + f = nil + runtime.GC() + if of, err = fopen(fn); err != nil { + t.Fatal(err) + } + + if _, _, err := of.audit(); err != nil { + t.Fatal(err) + } + + return +} + +func TestCreate(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + err := os.Remove(name) + if err != nil { + t.Fatal(err) + } + }() + + f.Accessor().Sync() + probed(t, f) + if err = f.Close(); err != nil { + t.Log(f.f.(*balancedAcid).nesting) + t.Fatal(err) + } + + b, err := ioutil.ReadFile(name) + if err != nil { + t.Fatal(err) + } + + x := b[:16] + if !bytes.Equal(x, hdr) { + t.Fatalf("\n% x\n% x", x, hdr) + } + + x = b[16:32] + if !bytes.Equal(x, empty) { + t.Fatalf("\n% x\n% x", x, hdr) + } +} + +func TestOpen(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + probed(t, f) + ec := f.Close() + er := os.Remove(name) + if ec != nil { + t.Fatal(ec) + } + + if er != nil { + t.Fatal(er) + } + }() + + if err := f.Close(); err != nil { + t.Fatal(err) + } + + if f, err = fopen(name); err != nil { + t.Fatal(err) + } + + for i, p := range f.freetab { + if p != 0 { + t.Fatal(i+1, p) + } + } +} + +func alloc(f *File, b []byte) (y int64) { + if h, err := f.Alloc(b); err != nil { + panic(err) + } else { + y = int64(h) + } + return +} + +func realloc(f *File, atom int64, b []byte, keepHandle bool) (y int64) { + if h, err := f.Realloc(Handle(atom), b, keepHandle); err != nil { + panic(err) + } else { + y = int64(h) + } + return +} + +func testContentEncodingDecoding(t *testing.T, min, max int) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + ec := f.Close() + er := os.Remove(name) + if ec != nil { + t.Fatal(ec) + } + + if er != nil { + t.Fatal(er) + } + }() + + b := make([]byte, max) + r, err := mathutil.NewFC32(math.MinInt32, math.MaxInt32, true) + if err != nil { + t.Fatal(err) + } + + blocks := int64(3) + a := make([]int64, 0, 4*(max-min+1)) + for cl := min; cl <= max; cl++ { + src := b[:cl] + for i := range src { + b[i] = byte(r.Next()) + } + a = append(a, alloc(f, src)) + blocks++ + if cl == 0 { + continue + } + + for i := range src { + b[i] = byte(r.Next()) + } + src[cl-1] = 0xfd + a = append(a, alloc(f, src)) + blocks++ + for i := range src { + b[i] = byte(r.Next()) + } + src[cl-1] = 0xfe + a = append(a, alloc(f, src)) + blocks++ + for i := range src { + b[i] = byte(r.Next()) + } + src[cl-1] = 0xff + a = append(a, alloc(f, src)) + blocks++ + } + + f.Accessor().Sync() + probed(t, f) + if err := f.Close(); err != nil { + t.Fatal(err) + } + + f = nil + runtime.GC() + if f, err = fopen(name); err != nil { + t.Fatal(err) + } + + r.Seek(0) + ai := 0 + for cl := min; cl <= max; cl++ { + h := a[ai] + ai++ + src := b[:cl] + for i := range src { + b[i] = byte(r.Next()) + } + got, _ := f.readUsed(h) + if !bytes.Equal(src, got) { + t.Fatalf("cl %d atom %#x\nexp % x\ngot % x", cl, h, src, got) + } + if cl == 0 { + continue + } + + for i := range src { + b[i] = byte(r.Next()) + } + src[cl-1] = 0xfd + h = a[ai] + ai++ + got, _ = f.readUsed(h) + if !bytes.Equal(src, got) { + t.Fatalf("cl %d atom %#x\nexp % x\ngot % x", cl, h, src, got) + } + + for i := range src { + b[i] = byte(r.Next()) + } + src[cl-1] = 0xfe + h = a[ai] + ai++ + got, _ = f.readUsed(h) + if !bytes.Equal(src, got) { + t.Fatalf("cl %d atom %#x\nexp % x\ngot % x", cl, h, src, got) + } + + for i := range src { + b[i] = byte(r.Next()) + } + src[cl-1] = 0xff + h = a[ai] + ai++ + got, _ = f.readUsed(h) + if !bytes.Equal(src, got) { + t.Fatalf("cl %d atom %#x\nexp % x\ngot % x", cl, h, src, got) + } + } + + auditblocks, _, err := f.audit() + if err != nil { + t.Fatal(err) + } + + if auditblocks != blocks { + t.Fatal(auditblocks, blocks) + } + + if f = reaudit(t, f, name); err != nil { + t.Fatal(err) + } +} + +func TestContentEncodingDecoding(t *testing.T) { + testContentEncodingDecoding(t, 0, 1024) + testContentEncodingDecoding(t, 61680-17, 61680) +} + +type freeItem struct { + size int64 + head int64 +} + +func (f *File) reportFree() (report []freeItem) { + for size, head := range f.freetab { + if size != 0 && head != 0 { + report = append(report, freeItem{int64(size), head}) + } + } + return +} + +func free(f *File, h int64) { + if err := f.Free(Handle(h)); err != nil { + panic(err) + } +} + +func testFreeTail(t *testing.T, b []byte) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + ec := f.Close() + er := os.Remove(name) + if ec != nil { + t.Fatal(ec) + } + + if er != nil { + t.Fatal(er) + } + }() + + fs0 := f.atoms + used0, total0, err := f.audit() + if err != nil { + panic(err) + } + + if used0 != total0 { + t.Fatal(used0, total0) + } + + handle := alloc(f, b) + free(f, handle) + if fs1 := f.atoms; fs1 != fs0 { + t.Fatal(fs1, fs0) + } + + if rep := f.reportFree(); len(rep) != 0 { + t.Fatal(rep) + } + + if err := f.Close(); err != nil { + t.Fatal(err) + } + + f = nil + runtime.GC() + if f, err = fopen(name); err != nil { + t.Fatal(err) + } + + used, total, err := f.audit() + if err != nil { + panic(err) + } + + if used != used0 { + t.Fatal(used, used0) + } + + if total != total0 { + t.Fatal(total, total0) + } +} + +func TestFreeTail(t *testing.T) { + b := make([]byte, 61680) + for n := 0; n <= 253+16; n++ { + data := b[:n] + testFreeTail(t, data) + if n == 0 { + continue + } + + data[n-1] = 0xff + testFreeTail(t, data) + data[n-1] = 0 + } + + for n := 61680 - 16; n <= 61680; n++ { + data := b[:n] + testFreeTail(t, data) + data[n-1] = 0xff + testFreeTail(t, data) + data[n-1] = 0 + } +} + +func testFreeTail2(t *testing.T, b []byte) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + ec := f.Close() + er := os.Remove(name) + if ec != nil { + t.Fatal(ec) + } + + if er != nil { + t.Fatal(er) + } + }() + + fs0 := f.atoms + used0, total0, err := f.audit() + if err != nil { + panic(err) + } + + if used0 != total0 { + t.Fatal(used0, total0) + } + + handle := alloc(f, b) + handle2 := alloc(f, b) + free(f, handle) + free(f, handle2) + if fs1 := f.atoms; fs1 != fs0 { + t.Fatal(fs1, fs0) + } + + if rep := f.reportFree(); len(rep) != 0 { + t.Fatal(rep) + } + + if err := f.Close(); err != nil { + t.Fatal(err) + } + + f = nil + runtime.GC() + if f, err = fopen(name); err != nil { + t.Fatal(err) + } + + used, total, err := f.audit() + if err != nil { + panic(err) + } + + if used != used0 { + t.Fatal(used, used0) + } + + if total != total0 { + t.Fatal(total, total0) + } +} + +func TestFreeTail2(t *testing.T) { + b := make([]byte, 61680) + for n := 0; n <= 253+16; n++ { + data := b[:n] + testFreeTail2(t, data) + if n == 0 { + continue + } + + data[n-1] = 0xff + testFreeTail2(t, data) + data[n-1] = 0 + } + + for n := 61680 - 16; n <= 61680; n++ { + data := b[:n] + testFreeTail2(t, data) + data[n-1] = 0xff + testFreeTail2(t, data) + data[n-1] = 0 + } +} + +func testFreeIsolated(t *testing.T, b []byte) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + ec := f.Close() + er := os.Remove(name) + if ec != nil { + t.Fatal(ec) + } + + if er != nil { + t.Fatal(er) + } + }() + + rqAtoms := rq2Atoms(len(b)) + left := alloc(f, nil) + handle := alloc(f, b) + right := alloc(f, nil) + + fs0 := f.atoms + used0, total0, err := f.audit() + if err != nil { + panic(err) + } + + if used0 != total0 { + t.Fatal(used0, total0) + } + + free(f, handle) + if fs1 := f.atoms; fs1 != fs0 { + t.Fatal(fs1, fs0) + } + + rep := f.reportFree() + if len(rep) != 1 { + t.Fatal(rep) + } + + if x := rep[0]; x.size != rqAtoms || x.head != handle { + t.Fatal(x) + } + + used, total, err := f.audit() + if err != nil { + panic(err) + } + + if n, free := f.getSize(left); n != 1 || free { + t.Fatal(n, free) + } + + if n, free := f.getSize(right); n != 1 || free { + t.Fatal(n, free) + } + + if used != used0-1 { + t.Fatal(used, used0) + } + + if total != total0 { + t.Fatal(total, total0) + } + + if free := total - used; free != 1 { + t.Fatal(free) + } + + // verify persisted file correct + if err := f.Close(); err != nil { + t.Fatal(err) + } + + f = nil + runtime.GC() + if f, err = fopen(name); err != nil { + t.Fatal(err) + } + + if fs1 := f.atoms; fs1 != fs0 { + t.Fatal(fs1, fs0) + } + + rep = f.reportFree() + if len(rep) != 1 { + t.Fatal(rep) + } + + if x := rep[0]; x.size != rqAtoms || x.head != handle { + t.Fatal(x) + } + + used, total, err = f.audit() + if err != nil { + panic(err) + } + + if n, free := f.getSize(left); n != 1 || free { + t.Fatal(n, free) + } + + if n, free := f.getSize(right); n != 1 || free { + t.Fatal(n, free) + } + + if used != used0-1 { + t.Fatal(used, used0) + } + + if total != total0 { + t.Fatal(total, total0) + } + + if free := total - used; free != 1 { + t.Fatal(free) + } + +} + +func TestFreeIsolated(t *testing.T) { + b := make([]byte, 61680) + for n := 0; n <= 253+16; n++ { + data := b[:n] + testFreeIsolated(t, data) + } + + for n := 61680 - 16; n <= 61680; n++ { + data := b[:n] + testFreeIsolated(t, data) + } +} + +func testFreeBlockList(t *testing.T, a, b int) { + var h [2]int64 + + t.Log(a, b) + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + used0, total0, err := f.audit() + if err != nil { + t.Fatal(err) + } + + alloc(f, nil) + h[0] = alloc(f, nil) + alloc(f, nil) + h[1] = alloc(f, nil) + alloc(f, nil) + + if err := f.Close(); err != nil { + t.Fatal(err) + } + + f = nil + runtime.GC() + if f, err = fopen(name); err != nil { + t.Fatal(err) + } + + used, total, err := f.audit() + if err != nil { + t.Fatal(err) + } + + if used-used0 != 5 || total-total0 != 5 || used != total { + t.Fatal(used0, total0, used, total) + } + + free(f, h[a]) + free(f, h[b]) + + used, total, err = f.audit() + if err != nil { + t.Fatal(err) + } + + if used-used0 != 3 || total-total0 != 5 || total-used != 2 { + t.Fatal(used0, total0, used, total) + } + + if err := f.Close(); err != nil { + t.Fatal(err) + } + + f = nil + runtime.GC() + if f, err = fopen(name); err != nil { + t.Fatal(err) + } + + used, total, err = f.audit() + if err != nil { + t.Fatal(err) + } + + if used-used0 != 3 || total-total0 != 5 || total-used != 2 { + t.Fatal(used0, total0, used, total) + } +} + +func TestFreeBlockList(t *testing.T) { + testFreeBlockList(t, 0, 1) + testFreeBlockList(t, 1, 0) +} + +func testFreeBlockList2(t *testing.T, a, b, c int) { + var h [3]int64 + + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + used0, total0, err := f.audit() + if err != nil { + t.Fatal(err) + } + + alloc(f, nil) + h[0] = alloc(f, nil) + alloc(f, nil) + h[1] = alloc(f, nil) + alloc(f, nil) + h[2] = alloc(f, nil) + alloc(f, nil) + + if err := f.Close(); err != nil { + t.Fatal(err) + } + + f = nil + runtime.GC() + if f, err = fopen(name); err != nil { + t.Fatal(err) + } + + used, total, err := f.audit() + if err != nil { + t.Fatal(err) + } + + if used-used0 != 7 || total-total0 != 7 || used != total { + t.Fatal(used0, total0, used, total) + } + + free(f, h[a]) + free(f, h[b]) + free(f, h[c]) + + used, total, err = f.audit() + if err != nil { + t.Fatal(err) + } + + if used-used0 != 4 || total-total0 != 7 || total-used != 3 { + t.Fatal(used0, total0, used, total) + } + + if err := f.Close(); err != nil { + t.Fatal(err) + } + + f = nil + runtime.GC() + if f, err = fopen(name); err != nil { + t.Fatal(err) + } + + used, total, err = f.audit() + if err != nil { + t.Fatal(err) + } + + if used-used0 != 4 || total-total0 != 7 || total-used != 3 { + t.Fatal(used0, total0, used, total) + } +} + +func TestFreeBlockList2(t *testing.T) { + testFreeBlockList2(t, 0, 1, 2) + testFreeBlockList2(t, 0, 2, 1) + testFreeBlockList2(t, 1, 0, 2) + testFreeBlockList2(t, 1, 2, 0) + testFreeBlockList2(t, 2, 0, 1) + testFreeBlockList2(t, 2, 1, 0) +} + +var crng *mathutil.FC32 + +func init() { + var err error + if crng, err = mathutil.NewFC32(0, math.MaxInt32, true); err != nil { + panic(err) + } +} + +func content(b []byte, h int64) (c []byte) { + crng.Seed(h) + crng.Seek(0) + c = b[:crng.Next()%61681] + for i := range c { + c[i] = byte(crng.Next()) + } + return +} + +func testFreeBlockList3(t *testing.T, n, mod int) { + rng, err := mathutil.NewFC32(0, n-1, true) + if err != nil { + t.Fatal(err) + } + + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + ha := make([]int64, n) + b := make([]byte, 61680) + for i := range ha { + h := f.atoms + ha[i] = h + c := content(b, h) + if alloc(f, c) != h { + t.Fatal(h) + } + } + f = reaudit(t, f, name) + del := map[int64]bool{} + for _ = range ha { + i := rng.Next() + if i%mod != 0 { + h := ha[i] + free(f, h) + del[h] = true + } + } + f = reaudit(t, f, name) + for _, h := range ha { + if !del[h] { + exp := content(b, h) + got, _ := f.readUsed(h) + if !bytes.Equal(exp, got) { + t.Fatal(len(got), len(exp)) + } + } + } +} + +func TestFreeBlockList3(t *testing.T) { + testFreeBlockList3(t, 111, 1) + testFreeBlockList3(t, 151, 2) + testFreeBlockList3(t, 170, 3) + testFreeBlockList3(t, 170, 4) +} + +func TestRealloc1(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + c := content(b, 10) + + h10 := alloc(f, nil) + h20 := alloc(f, nil) + + used0, total0, err := f.audit() + if err != nil { + t.Fatal(err) + } + + exp := c[:15] + if handle := realloc(f, h10, exp, false); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + used, total, err := f.audit() + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != 0 || diftotal != 0 || free != 0 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRealloc1Keep(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + c := content(b, 10) + + h10 := alloc(f, nil) + h20 := alloc(f, nil) + + used0, total0, err := f.audit() + if err != nil { + t.Fatal(err) + } + + exp := c[:15] + if handle := realloc(f, h10, exp, true); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + used, total, err := f.audit() + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != 0 || diftotal != 0 || free != 0 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRealloc2(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + c := content(b, 10) + + h10 := alloc(f, c[:31]) + h20 := alloc(f, nil) + + used0, total0, err := f.audit() + if err != nil { + t.Fatal(err) + } + + exp := c[:15] + if handle := realloc(f, h10, exp, false); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + used, total, err := f.audit() + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != 0 || diftotal != 1 || free != 1 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRealloc2Keep(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + c := content(b, 10) + + h10 := alloc(f, c[:31]) + h20 := alloc(f, nil) + + used0, total0, err := f.audit() + if err != nil { + t.Fatal(err) + } + + exp := c[:15] + if handle := realloc(f, h10, exp, true); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + used, total, err := f.audit() + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != 0 || diftotal != 1 || free != 1 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRealloc3(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + c := content(b, 10) + + h10 := alloc(f, nil) + h20 := alloc(f, nil) + + used0, total0, err := f.audit() + if err != nil { + t.Fatal(err) + } + + exp := c[:31] + var handle int64 + if handle = realloc(f, h10, exp, false); handle == h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(handle); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(handle); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + used, total, err := f.audit() + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != 0 || diftotal != 1 || free != 1 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRealloc3Keep(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + c := content(b, 10) + + h10 := alloc(f, nil) + h20 := alloc(f, nil) + + used0, total0, err := f.audit() + if err != nil { + t.Fatal(err) + } + + exp := c[:31] + var handle int64 + if handle = realloc(f, h10, exp, true); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(handle); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(handle); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + used, total, err := f.audit() + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != 1 || diftotal != 1 || free != 0 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRealloc4Keep(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + c := content(b, 10) + + h10 := alloc(f, c[:31]) + h20 := alloc(f, nil) + + used0, total0, err := f.audit() + if err != nil { + t.Fatal(err) + } + + exp := c[:47] + var handle int64 + if handle = realloc(f, h10, exp, true); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(handle); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(handle); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + used, total, err := f.audit() + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != 1 || diftotal != 2 || free != 1 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRealloc5(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + c := content(b, 10) + + h10 := alloc(f, nil) + h15 := alloc(f, nil) + h20 := alloc(f, nil) + + used0, total0, err := f.audit() + if err != nil { + t.Fatal(err) + } + + free(f, h15) + exp := c[:31] + var handle int64 + if handle = realloc(f, h10, exp, false); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(handle); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(handle); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + used, total, err := f.audit() + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != -1 || diftotal != -1 || free != 0 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRealloc5Keep(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + c := content(b, 10) + + h10 := alloc(f, nil) + h15 := alloc(f, nil) + h20 := alloc(f, nil) + + used0, total0, err := f.audit() + if err != nil { + t.Fatal(err) + } + + free(f, h15) + exp := c[:31] + var handle int64 + if handle = realloc(f, h10, exp, true); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(handle); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(handle); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + used, total, err := f.audit() + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != -1 || diftotal != -1 || free != 0 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRealloc6(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + c := content(b, 10) + + h10 := alloc(f, nil) + h15 := alloc(f, c[:31]) + h20 := alloc(f, nil) + + used0, total0, err := f.audit() + if err != nil { + t.Fatal(err) + } + + free(f, h15) + exp := c[:31] + var handle int64 + if handle = realloc(f, h10, exp, false); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(handle); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(handle); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + used, total, err := f.audit() + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != -1 || diftotal != 0 || free != 1 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRealloc6Keep(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + c := content(b, 10) + + h10 := alloc(f, nil) + h15 := alloc(f, c[:31]) + h20 := alloc(f, nil) + + used0, total0, err := f.audit() + if err != nil { + t.Fatal(err) + } + + free(f, h15) + exp := c[:31] + var handle int64 + if handle = realloc(f, h10, exp, true); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(handle); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(handle); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + used, total, err := f.audit() + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != -1 || diftotal != 0 || free != 1 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRelocRealloc1(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + + h10 := alloc(f, nil) + h20 := alloc(f, nil) + var handle int64 + if handle = realloc(f, h10, b[:31], true); handle != h10 { + t.Fatal(handle, h10) + } + + used0, total0, err := f.audit() // c+3, c+3 + if err != nil { + t.Fatal(err) + } + + c := content(b, 10) + exp := c[:15] + if handle = realloc(f, h10, exp, false); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + used, total, err := f.audit() // c+2, c+2 + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != -1 || diftotal != -1 || free != 0 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRelocRealloc1Keep(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + + h10 := alloc(f, nil) + h20 := alloc(f, nil) + var handle int64 + if handle = realloc(f, h10, b[:31], true); handle != h10 { + t.Fatal(handle, h10) + } + + used0, total0, err := f.audit() // c+3, c+3 + if err != nil { + t.Fatal(err) + } + + c := content(b, 10) + exp := c[:15] + if handle = realloc(f, h10, exp, true); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + if got, _ := f.readUsed(h20); len(got) != 0 { + t.Fatal(len(got), 0) + } + + used, total, err := f.audit() // c+2, c+2 + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != -1 || diftotal != -1 || free != 0 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRelocRealloc2(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + + h10 := alloc(f, nil) + h20 := alloc(f, nil) + var handle int64 + if handle = realloc(f, h10, b[:31], true); handle != h10 { + t.Fatal(handle, h10) + } + + free(f, h20) + + used0, total0, err := f.audit() // c+2, c+3 + if err != nil { + t.Fatal(err) + } + + c := content(b, 10) + exp := c[:31] + if handle = realloc(f, h10, exp, false); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + used, total, err := f.audit() // c+1, c+1 + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != -1 || diftotal != -2 || free != 0 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRelocRealloc2Keep(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + + h10 := alloc(f, nil) + h20 := alloc(f, nil) + var handle int64 + if handle = realloc(f, h10, b[:31], true); handle != h10 { + t.Fatal(handle, h10) + } + + free(f, h20) + + used0, total0, err := f.audit() // c+2, c+3 + if err != nil { + t.Fatal(err) + } + + c := content(b, 10) + exp := c[:31] + if handle = realloc(f, h10, exp, true); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + used, total, err := f.audit() // c+1, c+1 + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != -1 || diftotal != -2 || free != 0 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRelocRealloc3(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + + h10 := alloc(f, nil) + h20 := alloc(f, b[:31]) + var handle int64 + if handle = realloc(f, h10, b[:31], true); handle != h10 { + t.Fatal(handle, h10) + } + + free(f, h20) + + used0, total0, err := f.audit() // c+2, c+3 + if err != nil { + t.Fatal(err) + } + + c := content(b, 10) + exp := c[:31] + if handle = realloc(f, h10, exp, false); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + used, total, err := f.audit() // c+1, c+1 + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != -1 || diftotal != -2 || free != 0 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRelocRealloc3Keep(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + + h10 := alloc(f, nil) + h20 := alloc(f, b[:31]) + var handle int64 + if handle = realloc(f, h10, b[:31], true); handle != h10 { + t.Fatal(handle, h10) + } + + free(f, h20) + + used0, total0, err := f.audit() // c+2, c+3 + if err != nil { + t.Fatal(err) + } + + c := content(b, 10) + exp := c[:31] + if handle = realloc(f, h10, exp, true); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + used, total, err := f.audit() // c+1, c+1 + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != -1 || diftotal != -2 || free != 0 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRelocRealloc4(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + + h10 := alloc(f, nil) + _ = alloc(f, nil) + var handle int64 + if handle = realloc(f, h10, b[:47], true); handle != h10 { + t.Fatal(handle, h10) + } + + _ = alloc(f, nil) + + if handle = realloc(f, h10, b[:31], true); handle != h10 { + t.Fatal(handle, h10) + } + + used0, total0, err := f.audit() // c+4, c+5 + if err != nil { + t.Fatal(err) + } + + c := content(b, 10) + exp := c[:47] + if handle = realloc(f, h10, exp, false); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + used, total, err := f.audit() // c+4, c+4 + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != 0 || diftotal != -1 || free != 0 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRelocRealloc4Keep(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + + h10 := alloc(f, nil) + _ = alloc(f, nil) + var handle int64 + if handle = realloc(f, h10, b[:47], true); handle != h10 { + t.Fatal(handle, h10) + } + + _ = alloc(f, nil) + + if handle = realloc(f, h10, b[:31], true); handle != h10 { + t.Fatal(handle, h10) + } + + used0, total0, err := f.audit() // c+4, c+5 + if err != nil { + t.Fatal(err) + } + + c := content(b, 10) + exp := c[:47] + if handle = realloc(f, h10, exp, true); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + used, total, err := f.audit() // c+4, c+4 + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != 0 || diftotal != -1 || free != 0 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRelocRealloc5(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + + h10 := alloc(f, nil) + _ = alloc(f, nil) + var handle int64 + if handle = realloc(f, h10, b[:31], true); handle != h10 { + t.Fatal(handle, h10) + } + + _ = alloc(f, nil) + + used0, total0, err := f.audit() // c+4, c+4 + if err != nil { + t.Fatal(err) + } + + c := content(b, 10) + exp := c[:47] + if handle = realloc(f, h10, exp, false); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + used, total, err := f.audit() // c+4, c+5 + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != 0 || diftotal != 1 || free != 1 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRelocRealloc5Keep(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + + h10 := alloc(f, nil) + _ = alloc(f, nil) + var handle int64 + if handle = realloc(f, h10, b[:31], true); handle != h10 { + t.Fatal(handle, h10) + } + + _ = alloc(f, nil) + + used0, total0, err := f.audit() // c+4, c+4 + if err != nil { + t.Fatal(err) + } + + c := content(b, 10) + exp := c[:47] + if handle = realloc(f, h10, exp, true); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + used, total, err := f.audit() // c+4, c+5 + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != 0 || diftotal != 1 || free != 1 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRelocRealloc6(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + + h10 := alloc(f, b[:31]) + h20 := alloc(f, nil) + _ = alloc(f, nil) + free(f, h20) + + used0, total0, err := f.audit() // c+2, c+3 + if err != nil { + t.Fatal(err) + } + + c := content(b, 10) + exp := c[:15] + if handle := realloc(f, h10, exp, false); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + used, total, err := f.audit() // c+2, c+3 + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != 0 || diftotal != 0 || free != 1 { + t.Fatal(difused, diftotal, free) + } +} + +func TestRelocRealloc6Keep(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + + h10 := alloc(f, b[:31]) + h20 := alloc(f, nil) + _ = alloc(f, nil) + free(f, h20) + + used0, total0, err := f.audit() // c+2, c+3 + if err != nil { + t.Fatal(err) + } + + c := content(b, 10) + exp := c[:15] + if handle := realloc(f, h10, exp, true); handle != h10 { + t.Fatal(handle, h10) + } + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, exp) { + t.Fatal(len(got), len(exp)) + } + + used, total, err := f.audit() // c+2, c+3 + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != 0 || diftotal != 0 || free != 1 { + t.Fatal(difused, diftotal, free) + } +} + +func TestFreespaceReuse(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + c := content(b, 10) + + c10 := c[0 : 0+15] + c20 := c[16:63] + c50 := c[64 : 64+15] + h10 := alloc(f, c10) + h201 := alloc(f, nil) + h202 := alloc(f, nil) + h203 := alloc(f, nil) + h50 := alloc(f, c50) + free(f, h201) + free(f, h202) + free(f, h203) + used0, total0, err := f.audit() // c+2, c+3 + if err != nil { + t.Fatal(err) + } + + h20 := alloc(f, c20) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, c10) { + t.Fatal() + } + + if got, _ := f.readUsed(h20); !bytes.Equal(got, c20) { + t.Fatal() + } + + if got, _ := f.readUsed(h50); !bytes.Equal(got, c50) { + t.Fatal() + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, c10) { + t.Fatal() + } + + if got, _ := f.readUsed(h20); !bytes.Equal(got, c20) { + t.Fatal() + } + + if got, _ := f.readUsed(h50); !bytes.Equal(got, c50) { + t.Fatal() + } + + used, total, err := f.audit() // c+3, c+3 + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != 1 || diftotal != 0 || free != 0 { + t.Fatal(difused, diftotal, free) + } +} + +func TestFreespaceReuse2(t *testing.T) { + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + c := content(b, 10) + + c10 := c[0 : 0+15] + c20 := c[16:47] + c50 := c[64 : 64+15] + h10 := alloc(f, c10) + h201 := alloc(f, nil) + h202 := alloc(f, nil) + h203 := alloc(f, nil) + h50 := alloc(f, c50) + free(f, h201) + free(f, h202) + free(f, h203) + used0, total0, err := f.audit() // c+2, c+3 + if err != nil { + t.Fatal(err) + } + + h20 := alloc(f, c20) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, c10) { + t.Fatal() + } + + if got, _ := f.readUsed(h20); !bytes.Equal(got, c20) { + t.Fatal() + } + + if got, _ := f.readUsed(h50); !bytes.Equal(got, c50) { + t.Fatal() + } + + f = reaudit(t, f, name) + + if got, _ := f.readUsed(h10); !bytes.Equal(got, c10) { + t.Fatal() + } + + if got, _ := f.readUsed(h20); !bytes.Equal(got, c20) { + t.Fatal() + } + + if got, _ := f.readUsed(h50); !bytes.Equal(got, c50) { + t.Fatal() + } + + used, total, err := f.audit() // c+3, c+4 + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != 1 || diftotal != 1 || free != 1 { + t.Fatal(difused, diftotal, free) + } +} + +func testBug1(t *testing.T, swap bool) { + // Free lists table item for size 3856 points to list of free blocks + // NOT of size 3856 but at least 3856. + + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + _ = alloc(f, nil) + b := make([]byte, 61680) + f1 := alloc(f, b) + f2 := alloc(f, b) + _ = alloc(f, nil) + + used0, total0, err := f.audit() // c+4, c+4 + if err != nil { + t.Fatal(err) + } + + if swap { + f1, f2 = f2, f1 + } + free(f, f1) + free(f, f2) + _ = alloc(f, nil) + + f = reaudit(t, f, name) + + used, total, err := f.audit() // c+3, c+4 + if err != nil { + t.Fatal(err) + } + + if difused, diftotal, free := used-used0, total-total0, total-used; difused != -1 || diftotal != 0 || free != 1 { + t.Fatal(difused, diftotal, free) + } +} + +func TestBug1(t *testing.T) { + testBug1(t, false) + testBug1(t, true) +} + +func TestMix(t *testing.T) { + if testing.Short() { + t.Log("skipped") + return + } + + const ( + n = 1 << 10 + ) + + if testing.Short() { + t.Log("skipped") + return + } + + t.Log(n) + dir, name := temp() + defer os.RemoveAll(dir) + + f, err := fcreate(name) + if err != nil { + t.Fatal(err) + } + + defer func() { + if f != nil { + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + f = nil + runtime.GC() + os.Remove(name) + }() + + b := make([]byte, 61680) + rng, err := mathutil.NewFC32(0, n-1, true) + if err != nil { + t.Fatal(err) + } + + ha := make([]int64, n) + payload := 0 + + t0 := time.Now() + // Alloc n block with upper half of content + for _ = range ha { + r := rng.Next() + c := content(b, int64(r)) + c = c[len(c)/2:] + ha[r] = alloc(f, c) + payload += len(c) + } + dt := float64(time.Now().Sub(t0)) / 1e9 + t.Logf("write time A %.3g", dt) + + // verify + f = reaudit(t, f, name) + t.Logf("size A %d for %d bytes (fill factor %3.1f%%)", f.atoms<<4, payload, 100*float64(payload)/float64(f.atoms<<4)) + t0 = time.Now() + for _ = range ha { + r := rng.Next() + c := content(b, int64(r)) + c = c[len(c)/2:] + if got, _ := f.readUsed(ha[r]); !bytes.Equal(got, c) { + t.Fatal() + } + } + dt = float64(time.Now().Sub(t0)) / 1e9 + t.Logf("read time A %.3g", dt) + // free half of the blocks + t0 = time.Now() + for i := 0; i < n/2; i++ { + free(f, ha[i]) + ha[i] = 0 + } + dt = float64(time.Now().Sub(t0)) / 1e9 + t.Logf("free time A %.3g", dt) + + // verify + f = reaudit(t, f, name) + t.Logf("size B %d (freeing half of the blocks)", f.atoms<<4) + t0 = time.Now() + for _ = range ha { + r := rng.Next() + h := ha[r] + if h == 0 { + continue + } + + c := content(b, int64(r)) + c = c[len(c)/2:] + if got, _ := f.readUsed(h); !bytes.Equal(got, c) { + t.Fatal() + } + } + dt = float64(time.Now().Sub(t0)) / 1e9 + t.Logf("read time B %.3g", dt) + + // reloc extend + t0 = time.Now() + for _ = range ha { + r := rng.Next() + h := ha[r] + if h == 0 { + continue + } + + c := content(b, int64(r)) + //f = reaudit(t, f, name) + if h2 := realloc(f, h, c, true); h2 != h { + t.Fatal() + } + } + dt = float64(time.Now().Sub(t0)) / 1e9 + t.Logf("realoc time B %.3g", dt) + + // verify + f = reaudit(t, f, name) + t.Logf("size C %d for %d bytes (reallocated all used blocks to double size, fill factor %3.1f%%", f.atoms<<4, payload, 100*float64(payload)/float64(f.atoms<<4)) + + t0 = time.Now() + for _ = range ha { + r := rng.Next() + h := ha[r] + if h == 0 { + continue + } + + c := content(b, int64(r)) + if got, _ := f.readUsed(ha[r]); !bytes.Equal(got, c) { + t.Fatal() + } + } + dt = float64(time.Now().Sub(t0)) / 1e9 + t.Logf("read time C %.3g", dt) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/docs.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/docs.go new file mode 100644 index 00000000..21772fcd --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/docs.go @@ -0,0 +1,251 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +/* + +WIP: Package falloc provides allocation/deallocation of space within a +file/store (WIP, unstable API). + +Overall structure: + File == n blocks. + Block == n atoms. + Atom == 16 bytes. + +x6..x0 == least significant 7 bytes of a 64 bit integer, highest (7th) byte is +0 and is not stored in the file. + +Block first byte + +Aka block type tag. + +------------------------------------------------------------------------------ + +0xFF: Free atom (free block of size 1). + +------++---------++---------++------+ + | 0 || 1...7 || 8...14 || 15 | + +------++---------++---------++------+ + | 0xFF || p6...p0 || n6...n0 || 0xFF | + +------++---------++---------++------+ + +Link to the previous free block (atom addressed) is p6...p0, next dtto in +n6...n0. Doubly linked lists of "compatible" free blocks allows for free space +reclaiming and merging. "Compatible" == of size at least some K. Heads of all +such lists are organized per K or intervals of Ks elsewhere. + +------------------------------------------------------------------------------ + +0xFE: Free block, size == s6...s0 atoms. + +------++---------++---------++---------++-- + | +0 || 1...7 || 8...14 || 15...21 || 22...16*size-1 + +------++---------++---------++---------++-- + | 0xFE || p6...p0 || n6...n0 || s6...s0 || ... + +------++---------++---------++---------++-- + +Prev and next links as in the 0xFF first byte case. End of this block - see +"Block last byte": 0xFE bellow. Data between == undefined. + +------------------------------------------------------------------------------ + +0xFD: Relocated block. + +------++---------++-----------++------+ + | 0 || 1...7 || 8...14 || 15 | + +------++---------++-----------++------+ + | 0xFD || r6...r0 || undefined || 0x00 | // == used block + +------++---------++-----------++------+ + +Relocation link is r6..r0 == atom address. Relocations MUST NOT chain and MUST +point to a "content" block, i.e. one with the first byte in 0x00...0xFC. + +Relocated block allows to permanently assign a handle/file pointer ("atom" +address) to some content and resize the content anytime afterwards w/o having +to update all the possible existing references to the original handle. + +------------------------------------------------------------------------------ + +0xFC: Used long block. + +------++---------++--------------------++---------+---+ + | 0 || 1...2 || 3...N+2 || | | + +------++---------++--------------------++---------+---+ + | 0xFC || n1...n0 || N bytes of content || padding | Z | + +------++---------++--------------------++---------+---+ + +This block type is used for content of length in N == 238...61680 bytes. N is +encoded as a 2 byte unsigned integer n1..n0 in network byte order. Values +bellow 238 are reserved, those content lengths are to be carried by the +0x00..0xFB block types. + + 1. n in 0x00EE...0xF0F0 is used for content under the same rules + as in the 0x01..0xED type. + + 2. If the last byte of the content is not the last byte of an atom then + the last byte of the block is 0x00. + + 3. If the last byte of the content IS the last byte of an atom: + + 3.1 If the last byte of content is in 0x00..0xFD then everything is OK. + + 3.2 If the last byte of content is 0xFE or 0xFF then the escape + via n > 0xF0F0 MUST be used AND the block's last byte is 0x00 or 0x01, + meaning value 0xFE and 0xFF respectively. + + 4. n in 0xF0F1...0xFFFF is like the escaped 0xEE..0xFB block. + N == 13 + 16(n - 0xF0F1). + +Discussion of the padding and Z fields - see the 0x01..0xED block type. + +------------------------------------------------------------------------------ + +0xEE...0xFB: Used escaped short block. + +---++----------------------++---+ + | 0 || 1...N-1 || | + +---++----------------------++---+ + | X || N-1 bytes of content || Z | + +---++----------------------++---+ + +N == 15 + 16(X - 0xEE). Z is the content last byte encoded as follows. + +case Z == 0x00: The last byte of content is 0xFE + +case Z == 0x01: The last byte of content is 0xFF + +------------------------------------------------------------------------------ + +0x01...0xED: Used short block. + +---++--------------------++---------+---+ + | 0 || 1...N || | | + +---++--------------------++---------+---+ + | N || N bytes of content || padding | Z | + +---++--------------------++---------+---+ + +This block type is used for content of length in 1...237 bytes. The value of +the "padding" field, if of non zero length, is undefined. + +If the last byte of content is the last byte of an atom (== its file byte +offset & 0xF == 0xF) then such last byte MUST be in 0x00...0xFD. + +If the last byte of content is the last byte of an atom AND the last byte of +content is 0xFE or 0xFF then the short escape block type (0xEE...0xFB) MUST be +used. + +If the last byte of content is not the last byte of an atom, then the last byte +of such block, i.e. the Z field, which is also a last byte of some atom, MUST +be 0x00 (i.e. the used block marker). Other "tail" values are reserved. + +------------------------------------------------------------------------------ + +0x00: Used empty block. + +------++-----------++------+ + | 0 || 1...14 || 15 | + +------++-----------++------+ + | 0x00 || undefined || 0x00 | // == used block, other "tail" values reserved. + +------++-----------++------+ + +All of the rules for 0x01..0xED applies. Depicted only for its different +semantics (e.g. an allocated [existing] string but with length of zero). + +============================================================================== + +Block last byte + +------------------------------------------------------------------------------ + +0xFF: Free atom. Layout - see "Block first byte": FF. + +------------------------------------------------------------------------------ + +0xFE: Free block, size n atoms. Preceding 7 bytes == size (s6...s0) of the free +block in atoms, network byte order + --++---------++------+ + || -8...-2 || -1 | + --++---------++------+ + ... || s6...s0 || 0xFE | <- block's last byte + --++---------++------+ + +Layout at start of this block - see "Block first byte": FE. + +------------------------------------------------------------------------------ + +0x00...0xFD: Used (non free) block. + +============================================================================== + +Free lists table + +The free lists table content is stored in the standard layout of a used block. + +A table item is a 7 byte size field followed by a 7 byte atom address field +(both in network byte order), thus every item is 14 contiguous bytes. The +item's address field is pointing to a free block. The size field determines +the minimal size (in atoms) of free blocks on that list. + +The free list table is n above items, thus the content has 14n bytes. Note that +the largest block content is 61680 bytes and as there are 14 bytes per table +item, so the table is limited to at most 4405 entries. + +Items in the table do not have to be sorted according to their size field values. + +No two items can have the same value of the size field. + +When freeing blocks, the block MUST be linked into an item list with the +highest possible size field, which is less or equal to the number of atoms in +the new free block. + +When freeing a block, the block MUST be first merged with any adjacent free +blocks (thus possibly creating a bigger free block) using information derived +from the adjacent blocks first and last bytes. Such merged free blocks MUST be +removed from their original doubly linked lists. Afterwards the new bigger free +block is put to the free list table in the appropriate item. + +Items with address field == 0 are legal. Such item is a placeholder for a empty +list of free blocks of the item's size. + +Items with size field == 0 are legal. Such item is a placeholder, used e.g. to +avoid further reallocations/redirecting of the free lists table. + +The largest possible allocation request (for content length 61680 bytes) is +0xF10 (3856) atoms. All free blocks of this or bigger size are presumably put +into a single table item with the size 3856. It may be useful to additionally +have a free lists table item which links free blocks of some bigger size (say +1M+) and then use the OS sparse file support (if present) to save the physical +space used by such free blocks. + +Smaller (<3856 atoms) free blocks can be organized exactly (every distinct size +has its table item) or the sizes can run using other schema like e.g. "1, 2, +4, 8, ..." (powers of 2) or "1, 2, 3, 5, 8, 13, ..." (the Fibonacci sequence) +or they may be fine tuned to a specific usage pattern. + +============================================================================== + +Header + +The first block of a file (atom address == file offset == 0) is the file header. +The header block has the standard layout of a used short non escaped block. + +Special conditions apply: The header block and its content MUST be like this: + + +------+---------+---------+------+ + | 0 | 1...7 | 8...14 | 15 | + +------+---------+---------+------+ + | 0x0F | m6...m0 | f6...f0 | FLTT | + +------+---------+---------+------+ + +m6..m0 is a "magic" value 0xF1C1A1FE51B1E. + +f6...f0 is the atom address of the free lists table (discussed elsewhere). +If f6...f0 == 0x00 the there is no free lists table (yet). + +FLTT describes the type of the Free List Table. Currently defined values: + +------------------------------------------------------------------------------ + +FLTT == 0: Free List Table is fixed at atom address 2. It has a fixed size for 3856 entries +for free list of size 1..3855 atoms and the last is for the list of free block >= 3856 atoms. +*/ +package falloc + +const ( + INVALID_HANDLE = Handle(-1) +) diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/error.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/error.go new file mode 100644 index 00000000..dad3d29e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/error.go @@ -0,0 +1,130 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +package falloc + +import "fmt" + +// EBadRequest is an error produced for invalid operation, e.g. for data of more than maximum allowed. +type EBadRequest struct { + Name string + Size int +} + +func (e *EBadRequest) Error() string { + return fmt.Sprintf("%s: size %d", e.Name, e.Size) +} + +// EClose is a file/store close error. +type EClose struct { + Name string + Err error +} + +func (e *EClose) Error() string { + return fmt.Sprintf("%sx: %s", e.Name, e.Err) +} + +// ECorrupted is a file/store format error. +type ECorrupted struct { + Name string + Ofs int64 +} + +func (e *ECorrupted) Error() string { + return fmt.Sprintf("%s: corrupted data @%#x", e.Name, e.Ofs) +} + +// ECreate is a file/store create error. +type ECreate struct { + Name string + Err error +} + +func (e *ECreate) Error() string { + return fmt.Sprintf("%s: %s", e.Name, e.Err) +} + +// EFreeList is a file/store format error. +type EFreeList struct { + Name string + Size int64 + Block int64 +} + +func (e *EFreeList) Error() string { + return fmt.Sprintf("%s: invalid free list item, size %#x, block %#x", e.Name, e.Size, e.Block) +} + +// EHandle is an error type reported for invalid Handles. +type EHandle struct { + Name string + Handle Handle +} + +func (e EHandle) Error() string { + return fmt.Sprintf("%s: invalid handle %#x", e.Name, e.Handle) +} + +// EHeader is a file/store format error. +type EHeader struct { + Name string + Header []byte + Expected []byte +} + +func (e *EHeader) Error() string { + return fmt.Sprintf("%s: invalid header, got [% x], expected [% x]", e.Name, e.Header, e.Expected) +} + +// ENullHandle is a file/store access error via a null handle. +type ENullHandle string + +func (e ENullHandle) Error() string { + return fmt.Sprintf("%s: access via null handle", e) +} + +// EOpen is a file/store open error. +type EOpen struct { + Name string + Err error +} + +func (e *EOpen) Error() string { + return fmt.Sprintf("%s: %s", e.Name, e.Err) +} + +// ERead is a file/store read error. +type ERead struct { + Name string + Ofs int64 + Err error +} + +func (e *ERead) Error() string { + return fmt.Sprintf("%s, %#x: %s", e.Name, e.Ofs, e.Err) +} + +// ESize is a file/store size error. +type ESize struct { + Name string + Size int64 +} + +func (e *ESize) Error() string { + return fmt.Sprintf("%s: invalid size %#x(%d), size %%16 != 0", e.Name, e.Size, e.Size) +} + +// EWrite is a file/store write error. +type EWrite struct { + Name string + Ofs int64 + Err error +} + +func (e *EWrite) Error() string { + return fmt.Sprintf("%s, %#x: %s", e.Name, e.Ofs, e.Err) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/falloc.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/falloc.go new file mode 100644 index 00000000..e70cdd44 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/falloc.go @@ -0,0 +1,676 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +/* + +This is an mostly (WIP) conforming implementation of the "specs" in docs.go. + +The main incompletness is support for only one kind of FTL, though this table kind is still per "specs". + +*/ + +package falloc + +import ( + "bytes" + "camlistore.org/third_party/github.com/cznic/fileutil/storage" + "sync" +) + +// Handle is a reference to a block in a file/store. +// Handle is an uint56 wrapped in an in64, i.e. the most significant byte must be always zero. +type Handle int64 + +// Put puts the 7 least significant bytes of h into b. The MSB of h should be zero. +func (h Handle) Put(b []byte) { + for ofs := 6; ofs >= 0; ofs-- { + b[ofs] = byte(h) + h >>= 8 + } +} + +// Get gets the 7 least significant bytes of h from b. The MSB of h is zeroed. +func (h *Handle) Get(b []byte) { + var x Handle + for ofs := 0; ofs <= 6; ofs++ { + x = x<<8 | Handle(b[ofs]) + } + *h = x +} + +// File is a file/store with space allocation/deallocation support. +type File struct { + f storage.Accessor + atoms int64 // current file size in atom units + canfree int64 // only blocks >= canfree can be subject to Free() + freetab [3857]int64 // freetab[0] is unused, freetab[1] is size 1 ptr, freetab[2] is size 2 ptr, ... + rwm sync.RWMutex +} + +func (f *File) read(b []byte, off int64) { + if n, err := f.f.ReadAt(b, off); n != len(b) { + panic(&ERead{f.f.Name(), off, err}) + } +} + +func (f *File) write(b []byte, off int64) { + if n, err := f.f.WriteAt(b, off); n != len(b) { + panic(&EWrite{f.f.Name(), off, err}) + } +} + +var ( // R/O + hdr = []byte{0x0f, 0xf1, 0xc1, 0xa1, 0xfe, 0xa5, 0x1b, 0x1e, 0, 0, 0, 0, 0, 0, 2, 0} // free lists table @2 + empty = make([]byte, 16) + zero = []byte{0} + zero7 = make([]byte, 7) +) + +// New returns a new File backed by store or an error if any. +// Any existing data in store are discarded. +func New(store storage.Accessor) (f *File, err error) { + f = &File{f: store} + return f, storage.Mutate(store, func() (err error) { + if err = f.f.Truncate(0); err != nil { + return &ECreate{f.f.Name(), err} + } + + if _, err = f.Alloc(hdr[1:]); err != nil { //TODO internal panicking versions of the exported fns. + return + } + + if _, err = f.Alloc(nil); err != nil { // (empty) root @1 + return + } + + b := make([]byte, 3856*14) + for i := 1; i <= 3856; i++ { + Handle(i).Put(b[(i-1)*14:]) + } + if _, err = f.Alloc(b); err != nil { + return + } + + f.canfree = f.atoms + return + }) +} + +// Open returns a new File backed by store or an error if any. +// Store already has to be in a valid format. +func Open(store storage.Accessor) (f *File, err error) { + defer func() { + if e := recover(); e != nil { + f = nil + err = e.(error) + } + }() + + fi, err := store.Stat() + if err != nil { + panic(&EOpen{store.Name(), err}) + } + + fs := fi.Size() + if fs&0xf != 0 { + panic(&ESize{store.Name(), fi.Size()}) + } + + f = &File{f: store, atoms: fs >> 4} + b := make([]byte, len(hdr)) + f.read(b, 0) + if !bytes.Equal(b, hdr) { + panic(&EHeader{store.Name(), b, append([]byte{}, hdr...)}) + } + + var atoms int64 + b, atoms = f.readUsed(2) + f.canfree = atoms + 2 + ofs := 0 + var size, p Handle + for ofs < len(b) { + size.Get(b[ofs:]) + ofs += 7 + p.Get(b[ofs:]) + ofs += 7 + if sz, pp := int64(size), int64(p); size == 0 || size > 3856 || (pp != 0 && pp < f.canfree) || pp<<4 > fs-16 { + panic(&EFreeList{store.Name(), sz, pp}) + } + + f.freetab[size] = int64(p) + } + return +} + +// Accessor returns the File's underlying Accessor. +func (f *File) Accessor() storage.Accessor { + return f.f +} + +// Close closes f and returns an error if any. +func (f *File) Close() (err error) { + return storage.Mutate(f.Accessor(), func() (err error) { + if err = f.f.Close(); err != nil { + err = &EClose{f.f.Name(), err} + } + return + }) +} + +// Root returns the handle of the DB root (top level directory, ...). +func (f *File) Root() Handle { + return 1 +} + +func (f *File) readUsed(atom int64) (content []byte, atoms int64) { + b, redirected := make([]byte, 7), false +redir: + ofs := atom << 4 + f.read(b[:1], ofs) + switch pre := b[0]; { + default: + panic(&ECorrupted{f.f.Name(), ofs}) + case pre == 0x00: // Empty block + case pre >= 1 && pre <= 237: // Short + content = make([]byte, pre) + f.read(content, ofs+1) + case pre >= 0xee && pre <= 0xfb: // Short esc + content = make([]byte, 15+16*(pre-0xee)) + f.read(content, ofs+1) + content[len(content)-1] += 0xfe + case pre == 0xfc: // Long + f.read(b[:2], ofs+1) + n := int(b[0])<<8 + int(b[1]) + switch { + default: + panic(&ECorrupted{f.f.Name(), ofs + 1}) + case n >= 238 && n <= 61680: // Long non esc + content = make([]byte, n) + f.read(content, ofs+3) + case n >= 61681: // Long esc + content = make([]byte, 13+16*(n-0xf0f1)) + f.read(content, ofs+3) + content[len(content)-1] += 0xfe + } + case pre == 0xfd: // redir + if redirected { + panic(&ECorrupted{f.f.Name(), ofs}) + } + + f.read(b[:7], ofs+1) + (*Handle)(&atom).Get(b) + redirected = true + goto redir + } + return content, rq2Atoms(len(content)) +} + +func (f *File) writeUsed(b []byte, atom int64) { + n := len(b) + switch ofs, atoms, endmark := atom<<4, rq2Atoms(n), true; { + default: + panic("internal error") + case n == 0: + f.write(empty, ofs) + case n <= 237: + if (n+1)&0xf == 0 { // content end == atom end + if v := b[n-1]; v >= 0xfe { // escape + pre := []byte{byte((16*0xee + n - 15) >> 4)} + f.write(pre, ofs) + f.write(b[:n-1], ofs+1) + f.write([]byte{v - 0xfe}, ofs+atoms<<4-1) + return + } + endmark = false + } + // non esacpe + pre := []byte{byte(n)} + f.write(pre, ofs) + f.write(b, ofs+1) + if endmark { + f.write(zero, ofs+atoms<<4-1) // last block byte <- used block + } + case n > 237 && n <= 61680: + if (n+3)&0xf == 0 { // content end == atom end + if v := b[n-1]; v >= 0xfe { // escape + x := (16*0xf0f1 + n - 13) >> 4 + pre := []byte{0xFC, byte(x >> 8), byte(x)} + f.write(pre, ofs) + f.write(b[:n-1], ofs+3) + f.write([]byte{v - 0xfe}, ofs+atoms<<4-1) + return + } + endmark = false + } + // non esacpe + pre := []byte{0xfc, byte(n >> 8), byte(n)} + f.write(pre, ofs) + f.write(b, ofs+3) + if endmark { + f.write(zero, ofs+atoms<<4-1) // last block byte <- used block + } + } +} + +func rq2Atoms(rqbytes int) (rqatoms int64) { + if rqbytes > 237 { + rqbytes += 2 + } + return int64(rqbytes>>4 + 1) +} + +func (f *File) extend(b []byte) (handle int64) { + handle = f.atoms + f.writeUsed(b, handle) + f.atoms += rq2Atoms(len(b)) + return +} + +// Alloc stores b in a newly allocated space and returns its handle and an error if any. +func (f *File) Alloc(b []byte) (handle Handle, err error) { + err = storage.Mutate(f.Accessor(), func() (err error) { + rqAtoms := rq2Atoms(len(b)) + if rqAtoms > 3856 { + return &EBadRequest{f.f.Name(), len(b)} + } + + for foundsize, foundp := range f.freetab[rqAtoms:] { + if foundp != 0 { + // this works only for the current unique sizes list (except the last item!) + size := int64(foundsize) + rqAtoms + handle = Handle(foundp) + if size == 3856 { + buf := make([]byte, 7) + f.read(buf, int64(handle)<<4+15) + (*Handle)(&size).Get(buf) + } + f.delFree(int64(handle), size) + if rqAtoms < size { + f.addFree(int64(handle)+rqAtoms, size-rqAtoms) + } + f.writeUsed(b, int64(handle)) + return + } + } + + handle = Handle(f.extend(b)) + return + }) + return +} + +// checkLeft returns the atom size of a free bleck left adjacent to block @atom. +// If that block is not free the returned size is 0. +func (f *File) checkLeft(atom int64) (size int64) { + if atom <= f.canfree { + return + } + + b := make([]byte, 7) + fp := atom << 4 + f.read(b[:1], fp-1) + switch last := b[0]; { + case last <= 0xfd: + // used block + case last == 0xfe: + f.read(b, fp-8) + (*Handle)(&size).Get(b) + case last == 0xff: + size = 1 + } + return +} + +// getInfo returns the block @atom type and size. +func (f *File) getInfo(atom int64) (pref byte, size int64) { + b := make([]byte, 7) + fp := atom << 4 + f.read(b[:1], fp) + switch pref = b[0]; { + case pref == 0: // Empty used + size = 1 + case pref >= 1 && pref <= 237: // Short + size = rq2Atoms(int(pref)) + case pref >= 0xee && pref <= 0xfb: // Short esc + size = rq2Atoms(15 + 16*int(pref-0xee)) + case pref == 0xfc: // Long + f.read(b[:2], fp+1) + n := int(b[0])<<8 + int(b[1]) + switch { + default: + panic(&ECorrupted{f.f.Name(), fp + 1}) + case n >= 238 && n <= 61680: // Long non esc + size = rq2Atoms(n) + case n >= 61681: // Long esc + size = rq2Atoms(13 + 16*(n-0xf0f1)) + } + case pref == 0xfd: // reloc + size = 1 + case pref == 0xfe: + f.read(b, fp+15) + (*Handle)(&size).Get(b) + case pref == 0xff: + size = 1 + } + return +} + +// getSize returns the atom size of the block @atom and wheter it is free. +func (f *File) getSize(atom int64) (size int64, isFree bool) { + var typ byte + typ, size = f.getInfo(atom) + isFree = typ >= 0xfe + return +} + +// checkRight returns the atom size of a free bleck right adjacent to block @atom,atoms. +// If that block is not free the returned size is 0. +func (f *File) checkRight(atom, atoms int64) (size int64) { + if atom+atoms >= f.atoms { + return + } + + if sz, free := f.getSize(atom + atoms); free { + size = sz + } + return +} + +// delFree removes the atoms@atom free block from the free block list +func (f *File) delFree(atom, atoms int64) { + b := make([]byte, 15) + size := int(atoms) + if n := len(f.freetab); atoms >= int64(n) { + size = n - 1 + } + fp := atom << 4 + f.read(b[1:], fp+1) + var prev, next Handle + prev.Get(b[1:]) + next.Get(b[8:]) + + switch { + case prev == 0 && next != 0: + next.Put(b) + f.write(b[:7], int64(32+3+7+(size-1)*14)) + f.write(zero7, int64(next)<<4+1) + f.freetab[size] = int64(next) + case prev != 0 && next == 0: + f.write(zero7, int64(prev)<<4+8) + case prev != 0 && next != 0: + prev.Put(b) + f.write(b[:7], int64(next)<<4+1) + next.Put(b) + f.write(b[:7], int64(prev)<<4+8) + default: // prev == 0 && next == 0: + f.write(zero7, int64(32+3+7+(size-1)*14)) + f.freetab[size] = 0 + } +} + +// addFree adds atoms@atom to the free block lists and marks it free. +func (f *File) addFree(atom, atoms int64) { + b := make([]byte, 7) + size := int(atoms) + if n := len(f.freetab); atoms >= int64(n) { + size = n - 1 + } + head := f.freetab[size] + if head == 0 { // empty list + f.makeFree(0, atom, atoms, 0) + Handle(atom).Put(b) + f.write(b, int64(32+3+7+(size-1)*14)) + f.freetab[size] = atom + return + } + + Handle(atom).Put(b) + f.write(b, head<<4+1) // head.prev = atom + f.makeFree(0, atom, atoms, head) // atom.next = head + f.write(b, int64(32+3+7+(size-1)*14)) + f.freetab[size] = atom +} + +// makeFree sets up the content of a free block atoms@atom, fills the prev and next links. +func (f *File) makeFree(prev, atom, atoms, next int64) { + b := make([]byte, 23) + fp := atom << 4 + if atoms == 1 { + b[0] = 0xff + Handle(prev).Put(b[1:]) + Handle(next).Put(b[8:]) + b[15] = 0xff + f.write(b[:16], fp) + return + } + + b[0] = 0xfe + Handle(prev).Put(b[1:]) + Handle(next).Put(b[8:]) + Handle(atoms).Put(b[15:]) + f.write(b[:22], fp) + b[22] = 0xfe + f.write(b[15:], fp+atoms<<4-8) +} + +// Read reads and return the data associated with handle and an error if any. +// Passing an invalid handle to Read may return invalid data without error. +// It's like getting garbage via passing an invalid pointer to C.memcopy(). +func (f *File) Read(handle Handle) (b []byte, err error) { + defer func() { + if e := recover(); e != nil { + b = nil + err = e.(error) + } + }() + + switch handle { + case 0: + panic(ENullHandle(f.f.Name())) + case 2: + panic(&EHandle{f.f.Name(), handle}) + default: + b, _ = f.readUsed(int64(handle)) + } + return +} + +// Free frees space associated with handle and returns an error if any. Passing an invalid +// handle to Free or reusing handle afterwards will probably corrupt the database or provide +// invalid data on Read. It's like corrupting memory via passing an invalid pointer to C.free() +// or reusing that pointer. +func (f *File) Free(handle Handle) (err error) { + return storage.Mutate(f.Accessor(), func() (err error) { + atom := int64(handle) + atoms, isFree := f.getSize(atom) + if isFree || atom < f.canfree { + return &EHandle{f.f.Name(), handle} + } + + leftFree, rightFree := f.checkLeft(atom), f.checkRight(atom, atoms) + switch { + case leftFree != 0 && rightFree != 0: + f.delFree(atom-leftFree, leftFree) + f.delFree(atom+atoms, rightFree) + f.addFree(atom-leftFree, leftFree+atoms+rightFree) + case leftFree != 0 && rightFree == 0: + f.delFree(atom-leftFree, leftFree) + if atom+atoms == f.atoms { // the left free neighbour and this block together are an empy tail + f.atoms = atom - leftFree + f.f.Truncate(f.atoms << 4) + return + } + + f.addFree(atom-leftFree, leftFree+atoms) + case leftFree == 0 && rightFree != 0: + f.delFree(atom+atoms, rightFree) + f.addFree(atom, atoms+rightFree) + default: // leftFree == 0 && rightFree == 0 + if atom+atoms < f.atoms { // isolated inner block + f.addFree(atom, atoms) + return + } + + f.f.Truncate(atom << 4) // isolated tail block, shrink file + f.atoms = atom + } + return + }) +} + +// Realloc reallocates space associted with handle to acomodate b, returns the newhandle +// newly associated with b and an error if any. If keepHandle == true then Realloc guarantees +// newhandle == handle even if the new data are larger then the previous content associated +// with handle. If !keepHandle && newhandle != handle then reusing handle will probably corrupt +// the database. +// The above effects are like corrupting memory/data via passing an invalid pointer to C.realloc(). +func (f *File) Realloc(handle Handle, b []byte, keepHandle bool) (newhandle Handle, err error) { + err = storage.Mutate(f.Accessor(), func() (err error) { + switch handle { + case 0, 2: + return &EHandle{f.f.Name(), handle} + case 1: + keepHandle = true + } + newhandle = handle + atom, newatoms := int64(handle), rq2Atoms(len(b)) + if newatoms > 3856 { + return &EBadRequest{f.f.Name(), len(b)} + } + + typ, oldatoms := f.getInfo(atom) + switch { + default: + return &ECorrupted{f.f.Name(), atom << 4} + case typ <= 0xfc: // non relocated used block + switch { + case newatoms == oldatoms: // in place replace + f.writeUsed(b, atom) + case newatoms < oldatoms: // in place shrink + rightFree := f.checkRight(atom, oldatoms) + if rightFree > 0 { // right join + f.delFree(atom+oldatoms, rightFree) + } + f.addFree(atom+newatoms, oldatoms+rightFree-newatoms) + f.writeUsed(b, atom) + case newatoms > oldatoms: + if rightFree := f.checkRight(atom, oldatoms); rightFree > 0 && newatoms <= oldatoms+rightFree { + f.delFree(atom+oldatoms, rightFree) + if newatoms < oldatoms+rightFree { + f.addFree(atom+newatoms, oldatoms+rightFree-newatoms) + } + f.writeUsed(b, atom) + return + } + + if !keepHandle { + f.Free(Handle(atom)) + newhandle, err = f.Alloc(b) + return + } + + // reloc + newatom, e := f.Alloc(b) + if e != nil { + return e + } + + buf := make([]byte, 16) + buf[0] = 0xfd + Handle(newatom).Put(buf[1:]) + f.Realloc(Handle(atom), buf[1:], true) + f.write(buf[:1], atom<<4) + } + case typ == 0xfd: // reloc + var target Handle + buf := make([]byte, 7) + f.read(buf, atom<<4+1) + target.Get(buf) + switch { + case newatoms == 1: + f.writeUsed(b, atom) + f.Free(target) + default: + if rightFree := f.checkRight(atom, 1); rightFree > 0 && newatoms <= 1+rightFree { + f.delFree(atom+1, rightFree) + if newatoms < 1+rightFree { + f.addFree(atom+newatoms, 1+rightFree-newatoms) + } + f.writeUsed(b, atom) + f.Free(target) + return + } + + newtarget, e := f.Realloc(Handle(target), b, false) + if e != nil { + return e + } + + if newtarget != target { + Handle(newtarget).Put(buf) + f.write(buf, atom<<4+1) + } + } + } + return + }) + return +} + +// Lock locks f for writing. If the lock is already locked for reading or writing, +// Lock blocks until the lock is available. To ensure that the lock eventually becomes available, +// a blocked Lock call excludes new readers from acquiring the lock. +func (f *File) Lock() { + f.rwm.Lock() +} + +// RLock locks f for reading. If the lock is already locked for writing or there is a writer +// already waiting to release the lock, RLock blocks until the writer has released the lock. +func (f *File) RLock() { + f.rwm.RLock() +} + +// Unlock unlocks f for writing. It is a run-time error if f is not locked for writing on entry to Unlock. +// +// As with Mutexes, a locked RWMutex is not associated with a particular goroutine. +// One goroutine may RLock (Lock) f and then arrange for another goroutine to RUnlock (Unlock) it. +func (f *File) Unlock() { + f.rwm.Unlock() +} + +// RUnlock undoes a single RLock call; it does not affect other simultaneous readers. +// It is a run-time error if f is not locked for reading on entry to RUnlock. +func (f *File) RUnlock() { + f.rwm.RUnlock() +} + +// LockedAlloc wraps Alloc in a Lock/Unlock pair. +func (f *File) LockedAlloc(b []byte) (handle Handle, err error) { + f.Lock() + defer f.Unlock() + return f.Alloc(b) +} + +// LockedFree wraps Free in a Lock/Unlock pair. +func (f *File) LockedFree(handle Handle) (err error) { + f.Lock() + defer f.Unlock() + return f.Free(handle) +} + +// LockedRead wraps Read in a RLock/RUnlock pair. +func (f *File) LockedRead(handle Handle) (b []byte, err error) { + f.RLock() + defer f.RUnlock() + return f.Read(handle) +} + +// LockedRealloc wraps Realloc in a Lock/Unlock pair. +func (f *File) LockedRealloc(handle Handle, b []byte, keepHandle bool) (newhandle Handle, err error) { + f.Lock() + defer f.Unlock() + return f.Realloc(handle, b, keepHandle) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/test_deps.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/test_deps.go new file mode 100644 index 00000000..4437a8b3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/falloc/test_deps.go @@ -0,0 +1,15 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +package falloc + +// Pull test dependencies too. +// Enables easy 'go test X' after 'go get X' +import ( + _ "camlistore.org/third_party/github.com/cznic/fileutil" + _ "camlistore.org/third_party/github.com/cznic/fileutil/storage" + _ "camlistore.org/third_party/github.com/cznic/mathutil" +) diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil.go new file mode 100644 index 00000000..2f0f7ab1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil.go @@ -0,0 +1,223 @@ +// Copyright (c) 2014 The fileutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fileutil collects some file utility functions. +package fileutil + +import ( + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strconv" + "sync" + "time" +) + +// GoMFile is a concurrent access safe version of MFile. +type GoMFile struct { + mfile *MFile + mutex sync.Mutex +} + +// NewGoMFile return a newly created GoMFile. +func NewGoMFile(fname string, flag int, perm os.FileMode, delta_ns int64) (m *GoMFile, err error) { + m = &GoMFile{} + if m.mfile, err = NewMFile(fname, flag, perm, delta_ns); err != nil { + m = nil + } + return +} + +func (m *GoMFile) File() (file *os.File, err error) { + m.mutex.Lock() + defer m.mutex.Unlock() + return m.mfile.File() +} + +func (m *GoMFile) SetChanged() { + m.mutex.Lock() + defer m.mutex.Unlock() + m.mfile.SetChanged() +} + +func (m *GoMFile) SetHandler(h MFileHandler) { + m.mutex.Lock() + defer m.mutex.Unlock() + m.mfile.SetHandler(h) +} + +// MFileHandler resolves modifications of File. +// Possible File context is expected to be a part of the handler's closure. +type MFileHandler func(*os.File) error + +// MFile represents an os.File with a guard/handler on change/modification. +// Example use case is an app with a configuration file which can be modified at any time +// and have to be reloaded in such event prior to performing something configurable by that +// file. The checks are made only on access to the MFile file by +// File() and a time threshold/hysteresis value can be chosen on creating a new MFile. +type MFile struct { + file *os.File + handler MFileHandler + t0 int64 + delta int64 + ctime int64 +} + +// NewMFile returns a newly created MFile or Error if any. +// The fname, flag and perm parameters have the same meaning as in os.Open. +// For meaning of the delta_ns parameter please see the (m *MFile) File() docs. +func NewMFile(fname string, flag int, perm os.FileMode, delta_ns int64) (m *MFile, err error) { + m = &MFile{} + m.t0 = time.Now().UnixNano() + if m.file, err = os.OpenFile(fname, flag, perm); err != nil { + return + } + + var fi os.FileInfo + if fi, err = m.file.Stat(); err != nil { + return + } + + m.ctime = fi.ModTime().UnixNano() + m.delta = delta_ns + runtime.SetFinalizer(m, func(m *MFile) { + m.file.Close() + }) + return +} + +// SetChanged forces next File() to unconditionally handle modification of the wrapped os.File. +func (m *MFile) SetChanged() { + m.ctime = -1 +} + +// SetHandler sets a function to be invoked when modification of MFile is to be processed. +func (m *MFile) SetHandler(h MFileHandler) { + m.handler = h +} + +// File returns an os.File from MFile. If time elapsed between the last invocation of this function +// and now is at least delta_ns ns (a parameter of NewMFile) then the file is checked for +// change/modification. For delta_ns == 0 the modification is checked w/o getting os.Time(). +// If a change is detected a handler is invoked on the MFile file. +// Any of these steps can produce an Error. If that happens the function returns nil, Error. +func (m *MFile) File() (file *os.File, err error) { + var now int64 + + mustCheck := m.delta == 0 + if !mustCheck { + now = time.Now().UnixNano() + mustCheck = now-m.t0 > m.delta + } + + if mustCheck { // check interval reached + var fi os.FileInfo + if fi, err = m.file.Stat(); err != nil { + return + } + + if fi.ModTime().UnixNano() != m.ctime { // modification detected + if m.handler == nil { + return nil, fmt.Errorf("no handler set for modified file %q", m.file.Name()) + } + if err = m.handler(m.file); err != nil { + return + } + + m.ctime = fi.ModTime().UnixNano() + } + m.t0 = now + } + + return m.file, nil +} + +// Read reads buf from r. It will either fill the full buf or fail. +// It wraps the functionality of an io.Reader which may return less bytes than requested, +// but may block if not all data are ready for the io.Reader. +func Read(r io.Reader, buf []byte) (err error) { + have := 0 + remain := len(buf) + got := 0 + for remain > 0 { + if got, err = r.Read(buf[have:]); err != nil { + return + } + + remain -= got + have += got + } + return +} + +// "os" and/or "syscall" extensions + +// FadviseAdvice is used by Fadvise. +type FadviseAdvice int + +// FAdviseAdvice values. +const ( + // $ grep FADV /usr/include/bits/fcntl.h + POSIX_FADV_NORMAL FadviseAdvice = iota // No further special treatment. + POSIX_FADV_RANDOM // Expect random page references. + POSIX_FADV_SEQUENTIAL // Expect sequential page references. + POSIX_FADV_WILLNEED // Will need these pages. + POSIX_FADV_DONTNEED // Don't need these pages. + POSIX_FADV_NOREUSE // Data will be accessed once. +) + +// TempFile creates a new temporary file in the directory dir with a name +// ending with suffix, basename starting with prefix, opens the file for +// reading and writing, and returns the resulting *os.File. If dir is the +// empty string, TempFile uses the default directory for temporary files (see +// os.TempDir). Multiple programs calling TempFile simultaneously will not +// choose the same file. The caller can use f.Name() to find the pathname of +// the file. It is the caller's responsibility to remove the file when no +// longer needed. +// +// NOTE: This function differs from ioutil.TempFile. +func TempFile(dir, prefix, suffix string) (f *os.File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextInfix()+suffix) + f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + rand = reseed() + } + continue + } + break + } + return +} + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextInfix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_arm.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_arm.go new file mode 100644 index 00000000..9410d1bb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_arm.go @@ -0,0 +1,25 @@ +// Copyright (c) 2014 The fileutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fileutil + +import ( + "io" + "os" +) + +// PunchHole deallocates space inside a file in the byte range starting at +// offset and continuing for len bytes. Not supported on ARM. +func PunchHole(f *os.File, off, len int64) error { + return nil +} + +// Fadvise predeclares an access pattern for file data. See also 'man 2 +// posix_fadvise'. Not supported on ARM. +func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error { + return nil +} + +// IsEOF reports whether err is an EOF condition. +func IsEOF(err error) bool { return err == io.EOF } diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_darwin.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_darwin.go new file mode 100644 index 00000000..a19723fc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_darwin.go @@ -0,0 +1,25 @@ +// Copyright (c) 2014 The fileutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fileutil + +import ( + "io" + "os" +) + +// PunchHole deallocates space inside a file in the byte range starting at +// offset and continuing for len bytes. Not supported on OSX. +func PunchHole(f *os.File, off, len int64) error { + return nil +} + +// Fadvise predeclares an access pattern for file data. See also 'man 2 +// posix_fadvise'. Not supported on OSX. +func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error { + return nil +} + +// IsEOF reports whether err is an EOF condition. +func IsEOF(err error) bool { return err == io.EOF } diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_freebsd.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_freebsd.go new file mode 100644 index 00000000..0865093e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_freebsd.go @@ -0,0 +1,25 @@ +// Copyright (c) 2014 The fileutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fileutil + +import ( + "io" + "os" +) + +// PunchHole deallocates space inside a file in the byte range starting at +// offset and continuing for len bytes. Unimplemented on FreeBSD. +func PunchHole(f *os.File, off, len int64) error { + return nil +} + +// Fadvise predeclares an access pattern for file data. See also 'man 2 +// posix_fadvise'. Unimplemented on FreeBSD. +func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error { + return nil +} + +// IsEOF reports whether err is an EOF condition. +func IsEOF(err error) bool { return err == io.EOF } diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_linux.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_linux.go new file mode 100644 index 00000000..8babfc55 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_linux.go @@ -0,0 +1,96 @@ +// Copyright (c) 2014 The fileutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !arm + +package fileutil + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "strconv" + "syscall" +) + +func n(s []byte) byte { + for i, c := range s { + if c < '0' || c > '9' { + s = s[:i] + break + } + } + v, _ := strconv.Atoi(string(s)) + return byte(v) +} + +func init() { + b, err := ioutil.ReadFile("/proc/sys/kernel/osrelease") + if err != nil { + panic(err) + } + + tokens := bytes.Split(b, []byte(".")) + if len(tokens) > 3 { + tokens = tokens[:3] + } + switch len(tokens) { + case 3: + // Supported since kernel 2.6.38 + if bytes.Compare([]byte{n(tokens[0]), n(tokens[1]), n(tokens[2])}, []byte{2, 6, 38}) < 0 { + puncher = func(*os.File, int64, int64) error { return nil } + } + case 2: + if bytes.Compare([]byte{n(tokens[0]), n(tokens[1])}, []byte{2, 7}) < 0 { + puncher = func(*os.File, int64, int64) error { return nil } + } + default: + puncher = func(*os.File, int64, int64) error { return nil } + } +} + +var puncher = func(f *os.File, off, len int64) error { + const ( + /* + /usr/include/linux$ grep FL_ falloc.h + */ + _FALLOC_FL_KEEP_SIZE = 0x01 // default is extend size + _FALLOC_FL_PUNCH_HOLE = 0x02 // de-allocates range + ) + + _, _, errno := syscall.Syscall6( + syscall.SYS_FALLOCATE, + uintptr(f.Fd()), + uintptr(_FALLOC_FL_KEEP_SIZE|_FALLOC_FL_PUNCH_HOLE), + uintptr(off), + uintptr(len), + 0, 0) + if errno != 0 { + return os.NewSyscallError("SYS_FALLOCATE", errno) + } + return nil +} + +// PunchHole deallocates space inside a file in the byte range starting at +// offset and continuing for len bytes. No-op for kernels < 2.6.38 (or < 2.7). +func PunchHole(f *os.File, off, len int64) error { + return puncher(f, off, len) +} + +// Fadvise predeclares an access pattern for file data. See also 'man 2 +// posix_fadvise'. +func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error { + _, _, errno := syscall.Syscall6( + syscall.SYS_FADVISE64, + uintptr(f.Fd()), + uintptr(off), + uintptr(len), + uintptr(advice), + 0, 0) + return os.NewSyscallError("SYS_FADVISE64", errno) +} + +// IsEOF reports whether err is an EOF condition. +func IsEOF(err error) bool { return err == io.EOF } diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_openbsd.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_openbsd.go new file mode 100644 index 00000000..428171bd --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_openbsd.go @@ -0,0 +1,25 @@ +// Copyright (c) 2014 The fileutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fileutil + +import ( + "io" + "os" +) + +// PunchHole deallocates space inside a file in the byte range starting at +// offset and continuing for len bytes. Similar to FreeBSD, this is +// unimplemented. +func PunchHole(f *os.File, off, len int64) error { + return nil +} + +// Unimplemented on OpenBSD. +func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error { + return nil +} + +// IsEOF reports whether err is an EOF condition. +func IsEOF(err error) bool { return err == io.EOF } diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_plan9.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_plan9.go new file mode 100644 index 00000000..a2db64e2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_plan9.go @@ -0,0 +1,25 @@ +// Copyright (c) 2014 The fileutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fileutil + +import ( + "io" + "os" +) + +// PunchHole deallocates space inside a file in the byte range starting at +// offset and continuing for len bytes. Unimplemented on Plan 9. +func PunchHole(f *os.File, off, len int64) error { + return nil +} + +// Fadvise predeclares an access pattern for file data. See also 'man 2 +// posix_fadvise'. Unimplemented on Plan 9. +func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error { + return nil +} + +// IsEOF reports whether err is an EOF condition. +func IsEOF(err error) bool { return err == io.EOF } diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_solaris.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_solaris.go new file mode 100644 index 00000000..61dfcde3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_solaris.go @@ -0,0 +1,27 @@ +// Copyright (c) 2013 jnml. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.3 + +package fileutil + +import ( + "io" + "os" +) + +// PunchHole deallocates space inside a file in the byte range starting at +// offset and continuing for len bytes. Not supported on Solaris. +func PunchHole(f *os.File, off, len int64) error { + return nil +} + +// Fadvise predeclares an access pattern for file data. See also 'man 2 +// posix_fadvise'. Not supported on Solaris. +func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error { + return nil +} + +// IsEOF reports whether err is an EOF condition. +func IsEOF(err error) bool { return err == io.EOF } diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_windows.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_windows.go new file mode 100644 index 00000000..3a81f2fc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/fileutil_windows.go @@ -0,0 +1,183 @@ +// Copyright (c) 2014 The fileutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fileutil + +import ( + "io" + "os" + "sync" + "syscall" + "unsafe" +) + +// PunchHole deallocates space inside a file in the byte range starting at +// offset and continuing for len bytes. Not supported on Windows. +func PunchHole(f *os.File, off, len int64) error { + return puncher(f, off, len) +} + +// Fadvise predeclares an access pattern for file data. See also 'man 2 +// posix_fadvise'. Not supported on Windows. +func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error { + return nil +} + +// IsEOF reports whether err is an EOF condition. +func IsEOF(err error) bool { + if err == io.EOF { + return true + } + + // http://social.technet.microsoft.com/Forums/windowsserver/en-US/1a16311b-c625-46cf-830b-6a26af488435/how-to-solve-error-38-0x26-errorhandleeof-using-fsctlgetretrievalpointers + x, ok := err.(*os.PathError) + return ok && x.Op == "read" && x.Err.(syscall.Errno) == 0x26 +} + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procDeviceIOControl = modkernel32.NewProc("DeviceIoControl") + + sparseFilesMu sync.Mutex + sparseFiles map[uintptr]struct{} +) + +func init() { + // sparseFiles is an fd set for already "sparsed" files - according to + // msdn.microsoft.com/en-us/library/windows/desktop/aa364225(v=vs.85).aspx + // the file handles are unique per process. + sparseFiles = make(map[uintptr]struct{}) +} + +// puncHoleWindows punches a hole into the given file starting at offset, +// measuring "size" bytes +// (http://msdn.microsoft.com/en-us/library/windows/desktop/aa364597%28v=vs.85%29.aspx) +func puncher(file *os.File, offset, size int64) error { + if err := ensureFileSparse(file); err != nil { + return err + } + + // http://msdn.microsoft.com/en-us/library/windows/desktop/aa364411%28v=vs.85%29.aspx + // typedef struct _FILE_ZERO_DATA_INFORMATION { + // LARGE_INTEGER FileOffset; + // LARGE_INTEGER BeyondFinalZero; + //} FILE_ZERO_DATA_INFORMATION, *PFILE_ZERO_DATA_INFORMATION; + type fileZeroDataInformation struct { + FileOffset, BeyondFinalZero int64 + } + + lpInBuffer := fileZeroDataInformation{ + FileOffset: offset, + BeyondFinalZero: offset + size} + return deviceIOControl(false, file.Fd(), uintptr(unsafe.Pointer(&lpInBuffer)), 16) +} + +// // http://msdn.microsoft.com/en-us/library/windows/desktop/cc948908%28v=vs.85%29.aspx +// type fileSetSparseBuffer struct { +// SetSparse bool +// } + +func ensureFileSparse(file *os.File) (err error) { + fd := file.Fd() + sparseFilesMu.Lock() + if _, ok := sparseFiles[fd]; ok { + sparseFilesMu.Unlock() + return nil + } + + if err = deviceIOControl(true, fd, 0, 0); err == nil { + sparseFiles[fd] = struct{}{} + } + sparseFilesMu.Unlock() + return err +} + +func deviceIOControl(setSparse bool, fd, inBuf, inBufLen uintptr) (err error) { + const ( + //http://source.winehq.org/source/include/winnt.h#L4605 + file_read_data = 1 + file_write_data = 2 + + // METHOD_BUFFERED 0 + method_buffered = 0 + // FILE_ANY_ACCESS 0 + file_any_access = 0 + // FILE_DEVICE_FILE_SYSTEM 0x00000009 + file_device_file_system = 0x00000009 + // FILE_SPECIAL_ACCESS (FILE_ANY_ACCESS) + file_special_access = file_any_access + file_read_access = file_read_data + file_write_access = file_write_data + + // http://source.winehq.org/source/include/winioctl.h + // #define CTL_CODE ( DeviceType, + // Function, + // Method, + // Access ) + // ((DeviceType) << 16) | ((Access) << 14) | ((Function) << 2) | (Method) + + // FSCTL_SET_COMPRESSION CTL_CODE(FILE_DEVICE_FILE_SYSTEM, 16, METHOD_BUFFERED, FILE_READ_DATA | FILE_WRITE_DATA) + fsctl_set_compression = (file_device_file_system << 16) | ((file_read_access | file_write_access) << 14) | (16 << 2) | method_buffered + // FSCTL_SET_SPARSE CTL_CODE(FILE_DEVICE_FILE_SYSTEM, 49, METHOD_BUFFERED, FILE_SPECIAL_ACCESS) + fsctl_set_sparse = (file_device_file_system << 16) | (file_special_access << 14) | (49 << 2) | method_buffered + // FSCTL_SET_ZERO_DATA CTL_CODE(FILE_DEVICE_FILE_SYSTEM, 50, METHOD_BUFFERED, FILE_WRITE_DATA) + fsctl_set_zero_data = (file_device_file_system << 16) | (file_write_data << 14) | (50 << 2) | method_buffered + ) + retPtr := uintptr(unsafe.Pointer(&(make([]byte, 8)[0]))) + var r1 uintptr + var e1 syscall.Errno + if setSparse { + // BOOL + // WINAPI + // DeviceIoControl( (HANDLE) hDevice, // handle to a file + // FSCTL_SET_SPARSE, // dwIoControlCode + // (PFILE_SET_SPARSE_BUFFER) lpInBuffer, // input buffer + // (DWORD) nInBufferSize, // size of input buffer + // NULL, // lpOutBuffer + // 0, // nOutBufferSize + // (LPDWORD) lpBytesReturned, // number of bytes returned + // (LPOVERLAPPED) lpOverlapped ); // OVERLAPPED structure + r1, _, e1 = syscall.Syscall9(procDeviceIOControl.Addr(), 8, + fd, + uintptr(fsctl_set_sparse), + // If the lpInBuffer parameter is NULL, the operation will behave the same as if the SetSparse member of the FILE_SET_SPARSE_BUFFER structure were TRUE. In other words, the operation sets the file to a sparse file. + 0, // uintptr(unsafe.Pointer(&lpInBuffer)), + 0, // 1, + 0, + 0, + retPtr, + 0, + 0) + } else { + // BOOL + // WINAPI + // DeviceIoControl( (HANDLE) hDevice, // handle to a file + // FSCTL_SET_ZERO_DATA, // dwIoControlCode + // (LPVOID) lpInBuffer, // input buffer + // (DWORD) nInBufferSize, // size of input buffer + // NULL, // lpOutBuffer + // 0, // nOutBufferSize + // (LPDWORD) lpBytesReturned, // number of bytes returned + // (LPOVERLAPPED) lpOverlapped ); // OVERLAPPED structure + r1, _, e1 = syscall.Syscall9(procDeviceIOControl.Addr(), 8, + fd, + uintptr(fsctl_set_zero_data), + inBuf, + inBufLen, + 0, + 0, + retPtr, + 0, + 0) + } + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return err +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/hdb/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/hdb/LICENSE new file mode 100644 index 00000000..1e92e33d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/hdb/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of CZ.NIC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/hdb/README b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/hdb/README new file mode 100644 index 00000000..05ce89f9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/hdb/README @@ -0,0 +1,5 @@ +This is a goinstall-able mirror of modified code already published at: +https://git.nic.cz/redmine/projects/gofileutil/repository/show/hdb + +Install: $go get github.com/cznic/fileutil/hdb +Godocs: http://gopkgdoc.appspot.com/pkg/github.com/cznic/fileutil/hdb diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/hdb/all_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/hdb/all_test.go new file mode 100644 index 00000000..a980587f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/hdb/all_test.go @@ -0,0 +1,15 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +package hdb + +import ( + "testing" +) + +func TestPlaceholder(t *testing.T) { + t.Log("TODO") //TODO +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/hdb/hdb.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/hdb/hdb.go new file mode 100644 index 00000000..220fd94c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/hdb/hdb.go @@ -0,0 +1,153 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +/* +WIP: Package hdb provides a "handle"/value DB like store, but actually it's +closer to the model of a process's virtual memory and its alloc, free and move +methods. + +The hdb package is a thin layer around falloc.File providing stable-only +handles and the basic synchronizing primitives. The central functionality of +hdb are the New, Set, Get and Delete methods of Store. + +Conceptual analogy: + New alloc(sizeof(content)), return new "memory" pointer (a handle). + + Get memmove() from "memory" "pointed to" by handle to the result content. + Note: Handle "knows" the size of its content. + + Set memmove() from content to "memory" pointed to by handle. + In contrast to real memory, the new content may have different + size than the previously stored one w/o additional handling + and the "pointer" handle remains the same. + + Delete free() the "memory" "pointed to" by handle. +*/ +package hdb + +import ( + "camlistore.org/third_party/github.com/cznic/fileutil/falloc" + "camlistore.org/third_party/github.com/cznic/fileutil/storage" +) + +type Store struct { + f *falloc.File +} + +// New returns a newly created Store backed by accessor, discarding its conents if any. +// If successful, methods on the returned Store can be used for I/O. +// It returns the Store and an error, if any. +func New(accessor storage.Accessor) (store *Store, err error) { + s := &Store{} + if s.f, err = falloc.New(accessor); err == nil { + store = s + } + return +} + +// Open opens the Store from accessor. +// If successful, methods on the returned Store can be used for data exchange. +// It returns the Store and an error, if any. +func Open(accessor storage.Accessor) (store *Store, err error) { + s := &Store{} + if s.f, err = falloc.Open(accessor); err == nil { + store = s + } + return +} + +// Close closes the store. Further access to the store has undefined behavior and may panic. +// It returns an error, if any. +func (s *Store) Close() (err error) { + defer func() { + s.f = nil + }() + + return s.f.Close() +} + +// Delete deletes the data associated with handle. +// It returns an error if any. +func (s *Store) Delete(handle falloc.Handle) (err error) { + return s.f.Free(handle) +} + +// Get gets the data associated with handle. +// It returns the data and an error, if any. +func (s *Store) Get(handle falloc.Handle) (b []byte, err error) { + return s.f.Read(handle) +} + +// New associates data with a new handle. +// It returns the handle and an error, if any. +func (s *Store) New(b []byte) (handle falloc.Handle, err error) { + return s.f.Alloc(b) +} + +// Set associates data with an existing handle. +// It returns an error, if any. +func (s *Store) Set(handle falloc.Handle, b []byte) (err error) { + _, err = s.f.Realloc(handle, b, true) + return +} + +// Root returns the handle of the DB root (top level directory, ...). +func (s *Store) Root() falloc.Handle { + return s.f.Root() +} + +// File returns the underlying falloc.File of 's'. +func (s *Store) File() *falloc.File { + return s.f +} + +// Lock locks 's' for writing. If the lock is already locked for reading or writing, +// Lock blocks until the lock is available. To ensure that the lock eventually becomes available, +// a blocked Lock call excludes new readers from acquiring the lock. +func (s *Store) Lock() { + s.f.Lock() +} + +// RLock locks 's' for reading. If the lock is already locked for writing or there is a writer +// already waiting to release the lock, RLock blocks until the writer has released the lock. +func (s *Store) RLock() { + s.f.RLock() +} + +// Unlock unlocks 's' for writing. It's a run-time error if 's' is not locked for writing on entry to Unlock. +// +// As with Mutexes, a locked RWMutex is not associated with a particular goroutine. +// One goroutine may RLock (Lock) 's' and then arrange for another goroutine to RUnlock (Unlock) it. +func (s *Store) Unlock() { + s.f.Unlock() +} + +// RUnlock undoes a single RLock call; it does not affect other simultaneous readers. +// It's a run-time error if 's' is not locked for reading on entry to RUnlock. +func (s *Store) RUnlock() { + s.f.RUnlock() +} + +// LockedNew wraps New in a Lock/Unlock pair. +func (s *Store) LockedNew(b []byte) (handle falloc.Handle, err error) { + return s.f.LockedAlloc(b) +} + +// LockedDelete wraps Delete in a Lock/Unlock pair. +func (s *Store) LockedDelete(handle falloc.Handle) (err error) { + return s.f.LockedFree(handle) +} + +// LockedGet wraps Get in a RLock/RUnlock pair. +func (s *Store) LockedGet(handle falloc.Handle) (b []byte, err error) { + return s.f.LockedRead(handle) +} + +// LockedSet wraps Set in a Lock/Unlock pair. +func (s *Store) LockedSet(handle falloc.Handle, b []byte) (err error) { + _, err = s.f.Realloc(handle, b, true) + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/hdb/test_deps.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/hdb/test_deps.go new file mode 100644 index 00000000..3164f63a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/hdb/test_deps.go @@ -0,0 +1,13 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +package hdb + +// Pull test dependencies too. +// Enables easy 'go test X' after 'go get X' +import ( +// nothing yet +) diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/punch_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/punch_test.go new file mode 100644 index 00000000..766f6f4b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/punch_test.go @@ -0,0 +1,55 @@ +// Copyright (c) 2014 The fileutil authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fileutil + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestPunch(t *testing.T) { + file, err := ioutil.TempFile("", "punchhole-") + if err != nil { + t.Error(err) + } + defer os.Remove(file.Name()) + defer file.Close() + buf := make([]byte, 10<<20) + for i := range buf { + buf[i] = byte(1 + (i+1)&0xfe) + } + if _, err = file.Write(buf); err != nil { + t.Errorf("error writing to the temp file: %v", err) + t.FailNow() + } + if err = file.Sync(); err != nil { + t.Logf("error syncing %q: %v", file.Name(), err) + } + for i, j := range []int{1, 31, 1 << 10} { + if err = PunchHole(file, int64(j), int64(j)); err != nil { + t.Errorf("%d. error punching at %d, size %d: %v", i, j, j, err) + continue + } + // read back, with 1-1 bytes overlaid + n, err := file.ReadAt(buf[:j+2], int64(j-1)) + if err != nil { + t.Errorf("%d. error reading file: %v", i, err) + continue + } + buf = buf[:n] + if buf[0] == 0 { + t.Errorf("%d. file at %d has been overwritten with 0!", i, j-1) + } + if buf[n-1] == 0 { + t.Errorf("%d. file at %d has been overwritten with 0!", i, j-1+n) + } + for k, v := range buf[1 : n-1] { + if v != 0 { + t.Errorf("%d. error reading file at %d got %d, want 0.", i, k, v) + } + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/LICENSE new file mode 100644 index 00000000..1e92e33d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of CZ.NIC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/README b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/README new file mode 100644 index 00000000..2a400fce --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/README @@ -0,0 +1,5 @@ +This is a goinstall-able mirror of modified code already published at: +https://git.nic.cz/redmine/projects/gofileutil/repository/show/storage + +Install: $go get github.com/cznic/fileutil/storage +Godocs: http://gopkgdoc.appspot.com/pkg/github.com/cznic/fileutil/storage diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/all_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/all_test.go new file mode 100644 index 00000000..8947a0a7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/all_test.go @@ -0,0 +1,22 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +package storage + +import ( + "flag" + "runtime" +) + +var ( + devFlag = flag.Bool("dev", false, "enable dev tests") + goFlag = flag.Int("go", 1, "GOMAXPROCS") +) + +func init() { + flag.Parse() + runtime.GOMAXPROCS(*goFlag) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/cache.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/cache.go new file mode 100644 index 00000000..3a6115a7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/cache.go @@ -0,0 +1,322 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +package storage + +import ( + "container/list" + "io" + "math" + "os" + "sync" + "sync/atomic" +) + +type cachepage struct { + b [512]byte + dirty bool + lru *list.Element + pi int64 + valid int // page content is b[:valid] +} + +func (p *cachepage) wr(b []byte, off int) (wasDirty bool) { + copy(p.b[off:], b) + if n := off + len(b); n > p.valid { + p.valid = n + } + wasDirty = p.dirty + p.dirty = true + return +} + +func (c *Cache) rd(off int64, read bool) (p *cachepage, ok bool) { + c.Rq++ + pi := off >> 9 + if p, ok = c.m[pi]; ok { + c.lru.MoveToBack(p.lru) + return + } + + if !read { + return + } + + fp := off &^ 511 + if fp >= c.size { + return + } + + rq := 512 + if fp+512 > c.size { + rq = int(c.size - fp) + } + p = &cachepage{pi: pi, valid: rq} + p.lru = c.lru.PushBack(p) + if n, err := c.f.ReadAt(p.b[:p.valid], fp); n != rq { + panic(err) + } + + c.Load++ + if c.advise != nil { + c.advise(fp, 512, false) + } + c.m[pi], ok = p, true + return +} + +func (c *Cache) wr(off int64) (p *cachepage) { + var ok bool + if p, ok = c.rd(off, false); ok { + return + } + + pi := off >> 9 + p = &cachepage{pi: pi} + p.lru = c.lru.PushBack(p) + c.m[pi] = p + return +} + +// Cache provides caching support for another store Accessor. +type Cache struct { + advise func(int64, int, bool) + clean chan bool + cleaning int32 + close chan bool + f Accessor + fi *FileInfo + lock sync.Mutex + lru *list.List + m map[int64]*cachepage + maxpages int + size int64 + sync chan bool + wlist *list.List + write chan bool + writing int32 + Rq int64 // Pages requested from cache + Load int64 // Pages loaded (cache miss) + Purge int64 // Pages purged + Top int // "High water" pages +} + +// Implementation of Accessor. +func (c *Cache) BeginUpdate() error { return nil } + +// Implementation of Accessor. +func (c *Cache) EndUpdate() error { return nil } + +// NewCache creates a caching Accessor from store with total of maxcache bytes. +// NewCache returns the new Cache, implementing Accessor or an error if any. +// +// The LRU mechanism is used, so the cache tries to keep often accessed pages cached. +// +func NewCache(store Accessor, maxcache int64, advise func(int64, int, bool)) (c *Cache, err error) { + var fi os.FileInfo + if fi, err = store.Stat(); err != nil { + return + } + + x := maxcache >> 9 + if x > math.MaxInt32/2 { + x = math.MaxInt32 / 2 + } + c = &Cache{ + advise: advise, + clean: make(chan bool, 1), + close: make(chan bool), + f: store, + lru: list.New(), // front == oldest used, back == last recently used + m: make(map[int64]*cachepage), + maxpages: int(x), + size: fi.Size(), + sync: make(chan bool), + wlist: list.New(), + write: make(chan bool, 1), + } + c.fi = NewFileInfo(fi, c) + go c.writer() + go c.cleaner(int((int64(c.maxpages) * 95) / 100)) // hysteresis + return +} + +func (c *Cache) Accessor() Accessor { + return c.f +} + +func (c *Cache) Close() (err error) { + close(c.write) + <-c.close + close(c.clean) + <-c.close + return c.f.Close() +} + +func (c *Cache) Name() (s string) { + return c.f.Name() +} + +func (c *Cache) ReadAt(b []byte, off int64) (n int, err error) { + po := int(off) & 0x1ff + bp := 0 + rem := len(b) + m := 0 + for rem != 0 { + c.lock.Lock() // X1+ + p, ok := c.rd(off, true) + if !ok { + c.lock.Unlock() // X1- + return -1, io.EOF + } + + rq := rem + if po+rq > 512 { + rq = 512 - po + } + if n := copy(b[bp:bp+rq], p.b[po:p.valid]); n != rq { + c.lock.Unlock() // X1- + return -1, io.EOF + } + + m = len(c.m) + c.lock.Unlock() // X1- + po = 0 + bp += rq + off += int64(rq) + rem -= rq + n += rq + } + if m > c.maxpages && atomic.CompareAndSwapInt32(&c.cleaning, 0, 1) { + if m > c.Top { + c.Top = m + } + c.clean <- true + } + return +} + +func (c *Cache) Stat() (fi os.FileInfo, err error) { + c.lock.Lock() + defer c.lock.Unlock() + return c.fi, nil +} + +func (c *Cache) Sync() (err error) { + c.write <- false + <-c.sync + return +} + +func (c *Cache) Truncate(size int64) (err error) { + c.Sync() //TODO improve (discard pages, the writer goroutine should also be aware, ...) + c.lock.Lock() + defer c.lock.Unlock() + c.size = size + return c.f.Truncate(size) +} + +func (c *Cache) WriteAt(b []byte, off int64) (n int, err error) { + po := int(off) & 0x1ff + bp := 0 + rem := len(b) + m := 0 + for rem != 0 { + c.lock.Lock() // X+ + p := c.wr(off) + rq := rem + if po+rq > 512 { + rq = 512 - po + } + if wasDirty := p.wr(b[bp:bp+rq], po); !wasDirty { + c.wlist.PushBack(p) + } + m = len(c.m) + po = 0 + bp += rq + off += int64(rq) + if off > c.size { + c.size = off + } + c.lock.Unlock() // X- + rem -= rq + n += rq + } + if atomic.CompareAndSwapInt32(&c.writing, 0, 1) { + c.write <- true + } + if m > c.maxpages && atomic.CompareAndSwapInt32(&c.cleaning, 0, 1) { + if m > c.Top { + c.Top = m + } + c.clean <- true + } + return +} + +func (c *Cache) writer() { + for ok := true; ok; { + var wr bool + var off int64 + wr, ok = <-c.write + for { + c.lock.Lock() // X1+ + item := c.wlist.Front() + if item == nil { + c.lock.Unlock() // X1- + break + } + + p := item.Value.(*cachepage) + off = p.pi << 9 + if n, err := c.f.WriteAt(p.b[:p.valid], off); n != p.valid { + c.lock.Unlock() // X1- + panic("TODO Cache.writer errchan") //TODO +errchan + panic(err) + } + + p.dirty = false + c.wlist.Remove(item) + if c.advise != nil { + c.advise(off, 512, true) + } + c.lock.Unlock() // X1- + } + switch { + case wr: + atomic.AddInt32(&c.writing, -1) + case ok: + c.sync <- true + } + } + c.close <- true +} + +func (c *Cache) cleaner(limit int) { + for _ = range c.clean { + var item *list.Element + for { + c.lock.Lock() // X1+ + if len(c.m) < limit { + c.lock.Unlock() // X1- + break + } + + if item == nil { + item = c.lru.Front() + } + if p := item.Value.(*cachepage); !p.dirty { + delete(c.m, p.pi) + c.lru.Remove(item) + c.Purge++ + } + item = item.Next() + c.lock.Unlock() // X1- + } + atomic.AddInt32(&c.cleaning, -1) + } + c.close <- true +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/cache_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/cache_test.go new file mode 100644 index 00000000..8dbad088 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/cache_test.go @@ -0,0 +1,83 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +package storage + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func newfile(t *testing.T) (string, string, Accessor) { + dir, err := ioutil.TempDir("", "test-storage-") + if err != nil { + panic(err) + } + + name := filepath.Join(dir, "test.tmp") + f, err := NewFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) + if err != nil { + t.Fatal("newfile", err) + } + + return dir, name, f +} + +func readfile(t *testing.T, name string) (b []byte) { + var err error + if b, err = ioutil.ReadFile(name); err != nil { + t.Fatal("readfile") + } + + return +} + +func newcache(t *testing.T) (dir, name string, c *Cache) { + dir, name, f := newfile(t) + var err error + if c, err = NewCache(f, 1<<20, nil); err != nil { + t.Fatal("newCache", err) + } + + return +} + +func TestCache0(t *testing.T) { + dir, name, c := newcache(t) + defer os.RemoveAll(dir) + + if err := c.Close(); err != nil { + t.Fatal(10, err) + } + + if b := readfile(t, name); len(b) != 0 { + t.Fatal(20, len(b), 0) + } +} + +func TestCache1(t *testing.T) { + dir, name, c := newcache(t) + defer os.RemoveAll(dir) + + if n, err := c.WriteAt([]byte{0xa5}, 0); n != 1 { + t.Fatal(20, n, err) + } + + if err := c.Close(); err != nil { + t.Fatal(10, err) + } + + b := readfile(t, name) + if len(b) != 1 { + t.Fatal(30, len(b), 1) + } + + if b[0] != 0xa5 { + t.Fatal(40, b[0], 0xa5) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/dev_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/dev_test.go new file mode 100644 index 00000000..7287a27a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/dev_test.go @@ -0,0 +1,18 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +package storage + +import ( + "testing" +) + +func TestDevNothing(t *testing.T) { + if !*devFlag { + t.Log("not enabled") + return + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/file.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/file.go new file mode 100644 index 00000000..94feda5e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/file.go @@ -0,0 +1,50 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +package storage + +import ( + "os" +) + +// FileAccessor is the concrete type returned by NewFile and OpenFile. +type FileAccessor struct { + *os.File +} + +// Implementation of Accessor. +func (f *FileAccessor) BeginUpdate() error { return nil } + +// Implementation of Accessor. +func (f *FileAccessor) EndUpdate() error { return nil } + +// NewFile returns an Accessor backed by an os.File named name, It opens the +// named file with specified flag (os.O_RDWR etc.) and perm, (0666 etc.) if +// applicable. If successful, methods on the returned Accessor can be used for +// I/O. It returns the Accessor and an Error, if any. +// +// NOTE: The returned Accessor implements BeginUpdate and EndUpdate as a no op. +func NewFile(name string, flag int, perm os.FileMode) (store Accessor, err error) { + var f FileAccessor + if f.File, err = os.OpenFile(name, flag, perm); err == nil { + store = &f + } + return +} + +// OpenFile returns an Accessor backed by an existing os.File named name, It +// opens the named file with specified flag (os.O_RDWR etc.) and perm, (0666 +// etc.) if applicable. If successful, methods on the returned Accessor can be +// used for I/O. It returns the Accessor and an Error, if any. +// +// NOTE: The returned Accessor implements BeginUpdate and EndUpdate as a no op. +func OpenFile(name string, flag int, perm os.FileMode) (store Accessor, err error) { + var f FileAccessor + if f.File, err = os.OpenFile(name, flag, perm); err == nil { + store = &f + } + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/mem.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/mem.go new file mode 100644 index 00000000..7cda0b66 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/mem.go @@ -0,0 +1,161 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +package storage + +import ( + "errors" + "fmt" + "io/ioutil" + "math" + "os" +) + +//TODO -> exported type w/ exported fields +type memaccessor struct { + f *os.File + fi *FileInfo + b []byte +} + +// Implementation of Accessor. +func (m *memaccessor) BeginUpdate() error { return nil } + +// Implementation of Accessor. +func (f *memaccessor) EndUpdate() error { return nil } + +// NewMem returns a new Accessor backed by an os.File. The returned Accessor +// keeps all of the store content in memory. The memory and file images are +// synced only by Sync and Close. Recomended for small amounts of data only +// and content which may be lost on process kill/crash. NewMem return the +// Accessor or an error of any. +// +// NOTE: The returned Accessor implements BeginUpdate and EndUpdate as a no op. +func NewMem(f *os.File) (store Accessor, err error) { + a := &memaccessor{f: f} + if err = f.Truncate(0); err != nil { + return + } + + var fi os.FileInfo + if fi, err = a.f.Stat(); err != nil { + return + } + + a.fi = NewFileInfo(fi, a) + store = a + return +} + +// OpenMem return a new Accessor backed by an os.File. The store content is +// loaded from f. The returned Accessor keeps all of the store content in +// memory. The memory and file images are synced only Sync and Close. +// Recomended for small amounts of data only and content which may be lost on +// process kill/crash. OpenMem return the Accessor or an error of any. +// +// NOTE: The returned Accessor implements BeginUpdate and EndUpdate as a no op. +func OpenMem(f *os.File) (store Accessor, err error) { + a := &memaccessor{f: f} + if a.b, err = ioutil.ReadAll(a.f); err != nil { + a.f.Close() + return + } + + var fi os.FileInfo + if fi, err = a.f.Stat(); err != nil { + a.f.Close() + return + } + + a.fi = NewFileInfo(fi, a) + store = a + return +} + +// Close implements Accessor. Specifically it synchronizes the memory and file images. +func (a *memaccessor) Close() (err error) { + defer func() { + a.b = nil + if a.f != nil { + if e := a.f.Close(); e != nil && err == nil { + err = e + } + } + a.f = nil + }() + + return a.Sync() +} + +func (a *memaccessor) Name() string { + return a.f.Name() +} + +func (a *memaccessor) ReadAt(b []byte, off int64) (n int, err error) { + if off < 0 || off > math.MaxInt32 { + return -1, fmt.Errorf("ReadAt: illegal offset %#x", off) + } + + rq, fp := len(b), int(off) + if fp+rq > len(a.b) { + return -1, fmt.Errorf("ReadAt: illegal rq %#x @ offset %#x, len %#x", rq, fp, len(a.b)) + } + + copy(b, a.b[fp:]) + return +} + +func (a *memaccessor) Stat() (fi os.FileInfo, err error) { + i := a.fi + i.FSize = int64(len(a.b)) + fi = i + return +} + +// Sync implements Accessor. Specifically it synchronizes the memory and file images. +func (a *memaccessor) Sync() (err error) { + var n int + if n, err = a.f.WriteAt(a.b, 0); n != len(a.b) { + return + } + + return a.f.Truncate(int64(len(a.b))) +} + +func (a *memaccessor) Truncate(size int64) (err error) { + defer func() { + if e := recover(); e != nil { + err = e.(error) + } + }() + + if size > math.MaxInt32 { + panic(errors.New("truncate: illegal size")) + } + + a.b = a.b[:int(size)] + return +} + +func (a *memaccessor) WriteAt(b []byte, off int64) (n int, err error) { + if off < 0 || off > math.MaxInt32 { + return -1, errors.New("WriteAt: illegal offset") + } + + rq, fp, size := len(b), int(off), len(a.b) + if need := rq + fp; need > size { + if need <= cap(a.b) { + a.b = a.b[:need] + } else { + nb := make([]byte, need, 2*need) + copy(nb, a.b) + a.b = nb + } + } + + copy(a.b[int(off):], b) + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/mem_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/mem_test.go new file mode 100644 index 00000000..921948c6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/mem_test.go @@ -0,0 +1,15 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +package storage + +import ( + "testing" +) + +func Test(t *testing.T) { + t.Log("TODO placeholder") //TODO +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/probe.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/probe.go new file mode 100644 index 00000000..53b146a6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/probe.go @@ -0,0 +1,74 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +package storage + +import "sync/atomic" + +// Probe collects usage statistics of the embeded Accessor. +// Probe itself IS an Accessor. +type Probe struct { + Accessor + Chain *Probe + OpsRd int64 + OpsWr int64 + BytesRd int64 + BytesWr int64 + SectorsRd int64 // Assuming 512 byte sector size + SectorsWr int64 +} + +// NewProbe returns a newly created probe which embedes the src Accessor. +// The retuned *Probe satisfies Accessor. if chain != nil then Reset() +// is cascaded down the chained Probes. +func NewProbe(src Accessor, chain *Probe) *Probe { + return &Probe{Accessor: src, Chain: chain} +} + +func reset(n *int64) { + atomic.AddInt64(n, -atomic.AddInt64(n, 0)) +} + +// Reset zeroes the collected statistics of p. +func (p *Probe) Reset() { + if p.Chain != nil { + p.Chain.Reset() + } + reset(&p.OpsRd) + reset(&p.OpsWr) + reset(&p.BytesRd) + reset(&p.BytesWr) + reset(&p.SectorsRd) + reset(&p.SectorsWr) +} + +func (p *Probe) ReadAt(b []byte, off int64) (n int, err error) { + n, err = p.Accessor.ReadAt(b, off) + atomic.AddInt64(&p.OpsRd, 1) + atomic.AddInt64(&p.BytesRd, int64(n)) + if n <= 0 { + return + } + + sectorFirst := off >> 9 + sectorLast := (off + int64(n) - 1) >> 9 + atomic.AddInt64(&p.SectorsRd, sectorLast-sectorFirst+1) + return +} + +func (p *Probe) WriteAt(b []byte, off int64) (n int, err error) { + n, err = p.Accessor.WriteAt(b, off) + atomic.AddInt64(&p.OpsWr, 1) + atomic.AddInt64(&p.BytesWr, int64(n)) + if n <= 0 { + return + } + + sectorFirst := off >> 9 + sectorLast := (off + int64(n) - 1) >> 9 + atomic.AddInt64(&p.SectorsWr, sectorLast-sectorFirst+1) + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/probe_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/probe_test.go new file mode 100644 index 00000000..00eca8ee --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/probe_test.go @@ -0,0 +1,86 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +package storage + +import ( + "os" + "testing" +) + +func (p *Probe) assert(t *testing.T, msg int, opsRd, opsWr, bytesRd, bytesWr, sectorsRd, sectorsWr int64) { + if n := p.OpsRd; n != opsRd { + t.Fatal(msg, n, opsRd) + } + + if n := p.OpsWr; n != opsWr { + t.Fatal(msg+1, n, opsWr) + } + + if n := p.BytesRd; n != bytesRd { + t.Fatal(msg+2, n, bytesRd) + } + + if n := p.BytesWr; n != bytesWr { + t.Fatal(msg+3, n, bytesWr) + } + + if n := p.SectorsRd; n != sectorsRd { + t.Fatal(msg+4, n, sectorsRd) + } + + if n := p.SectorsWr; n != sectorsWr { + t.Fatal(msg+5, n, sectorsWr) + } +} + +func TestProbe(t *testing.T) { + return //TODO disabled due to atomic.AddInt64 failing on W32 + const fn = "test.tmp" + + store, err := NewFile(fn, os.O_CREATE|os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + t.Fatal(10, err) + } + + defer func() { + ec := store.Close() + er := os.Remove(fn) + if ec != nil { + t.Fatal(10000, ec) + } + if er != nil { + t.Fatal(10001, er) + } + }() + + probe := NewProbe(store, nil) + if n, err := probe.WriteAt([]byte{1}, 0); n != 1 { + t.Fatal(20, err) + } + + probe.assert(t, 30, 0, 1, 0, 1, 0, 1) + b := []byte{0} + if n, err := probe.ReadAt(b, 0); n != 1 { + t.Fatal(40, err) + } + + if n := b[0]; n != 1 { + t.Fatal(50, n, 1) + } + + probe.assert(t, 60, 1, 1, 1, 1, 1, 1) + if n, err := probe.WriteAt([]byte{2, 3}, 510); n != 2 { + t.Fatal(70, err) + } + + probe.assert(t, 80, 1, 2, 1, 3, 1, 2) + if n, err := probe.WriteAt([]byte{2, 3}, 511); n != 2 { + t.Fatal(90, err) + } + + probe.assert(t, 100, 1, 3, 1, 5, 1, 4) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/storage.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/storage.go new file mode 100644 index 00000000..4956053a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/storage.go @@ -0,0 +1,141 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +// WIP: Package storage defines and implements storage providers and store accessors. +package storage + +import ( + "os" + "sync" + "time" +) + +// FileInfo is a type implementing os.FileInfo which has setable fields, like +// the older os.FileInfo used to have. It is used wehere e.g. the Size is +// needed to be faked (encapsulated/memory only file, file cache, etc.). +type FileInfo struct { + FName string // base name of the file + FSize int64 // length in bytes + FMode os.FileMode // file mode bits + FModTime time.Time // modification time + FIsDir bool // abbreviation for Mode().IsDir() + sys interface{} // underlying data source (can be nil) +} + +// NewFileInfo creates FileInfo from os.FileInfo fi. +func NewFileInfo(fi os.FileInfo, sys interface{}) *FileInfo { + return &FileInfo{fi.Name(), fi.Size(), fi.Mode(), fi.ModTime(), fi.IsDir(), sys} +} + +// Implementation of os.FileInfo +func (fi *FileInfo) Name() string { + return fi.FName +} + +// Implementation of os.FileInfo +func (fi *FileInfo) Size() int64 { + return fi.FSize +} + +// Implementation of os.FileInfo +func (fi *FileInfo) Mode() os.FileMode { + return fi.FMode +} + +// Implementation of os.FileInfo +func (fi *FileInfo) ModTime() time.Time { + return fi.FModTime +} + +// Implementation of os.FileInfo +func (fi *FileInfo) IsDir() bool { + return fi.FIsDir +} + +func (fi *FileInfo) Sys() interface{} { + return fi.sys +} + +// Accessor provides I/O methods to access a store. +type Accessor interface { + + // Close closes the store, rendering it unusable for I/O. It returns an + // error, if any. + Close() error + + // Name returns the name of the file as presented to Open. + Name() string + + // ReadAt reads len(b) bytes from the store starting at byte offset off. + // It returns the number of bytes read and the error, if any. + // EOF is signaled by a zero count with err set to os.EOF. + // ReadAt always returns a non-nil Error when n != len(b). + ReadAt(b []byte, off int64) (n int, err error) + + // Stat returns the FileInfo structure describing the store. It returns + // the os.FileInfo and an error, if any. + Stat() (fi os.FileInfo, err error) + + // Sync commits the current contents of the store to stable storage. + // Typically, this means flushing the file system's in-memory copy of + // recently written data to disk. + Sync() (err error) + + // Truncate changes the size of the store. It does not change the I/O + // offset. + Truncate(size int64) error + + // WriteAt writes len(b) bytes to the store starting at byte offset off. + // It returns the number of bytes written and an error, if any. + // WriteAt returns a non-nil Error when n != len(b). + WriteAt(b []byte, off int64) (n int, err error) + + // Before every [structural] change of a store the BeginUpdate is to be + // called and paired with EndUpdate after the change makes the store's + // state consistent again. Invocations of BeginUpdate may nest. On + // invoking the last non nested EndUpdate an implicit "commit" should + // be performed by the store/provider. The concrete mechanism is + // unspecified. It could be for example a write-ahead log. Stores may + // implement BeginUpdate and EndUpdate as a (documented) no op. + BeginUpdate() error + EndUpdate() error +} + +// Mutate is a helper/wrapper for executing f in between a.BeginUpdate and +// a.EndUpdate. Any parameters and/or return values except an error should be +// captured by a function literal passed as f. The returned err is either nil +// or the first non nil error returned from the sequence of execution: +// BeginUpdate, [f,] EndUpdate. The pair BeginUpdate/EndUpdate *is* invoked +// always regardles of any possible errors produced. Mutate doesn't handle +// panic, it should be used only with a function [literal] which doesn't panic. +// Otherwise the pairing of BeginUpdate/EndUpdate is not guaranteed. +// +// NOTE: If BeginUpdate, which is invoked before f, returns a non-nil error, +// then f is not invoked at all (but EndUpdate still is). +func Mutate(a Accessor, f func() error) (err error) { + defer func() { + if e := a.EndUpdate(); e != nil && err == nil { + err = e + } + }() + + if err = a.BeginUpdate(); err != nil { + return + } + + return f() +} + +// LockedMutate wraps Mutate in yet another layer consisting of a +// l.Lock/l.Unlock pair. All other limitations apply as in Mutate, e.g. no +// panics are allowed to happen - otherwise no guarantees can be made about +// Unlock matching the Lock. +func LockedMutate(a Accessor, l sync.Locker, f func() error) (err error) { + l.Lock() + defer l.Unlock() + + return Mutate(a, f) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/test_deps.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/test_deps.go new file mode 100644 index 00000000..92ac44a5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/storage/test_deps.go @@ -0,0 +1,13 @@ +// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +package storage + +// Pull test dependencies too. +// Enables easy 'go test X' after 'go get X' +import ( +// nothing yet +) diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/test_deps.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/test_deps.go new file mode 100644 index 00000000..eec608ab --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/fileutil/test_deps.go @@ -0,0 +1,13 @@ +// Copyright (c) 2014 The fileutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// blame: jnml, labs.nic.cz + +package fileutil + +// Pull test dependencies too. +// Enables easy 'go test X' after 'go get X' +import ( +// nothing yet +) diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/LICENSE new file mode 100644 index 00000000..65d761bc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/Makefile b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/Makefile new file mode 100644 index 00000000..b04fc722 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/Makefile @@ -0,0 +1,28 @@ +.PHONY: all editor todo clean nuke + +grep=--include=*.go --include=*.run --include=*.y + +all: editor + go build + go vet + go install + make todo + +clean: + go clean + rm -f *~ _testdata/temp* + +editor: + go fmt + go test -i + go test + +nuke: clean + go clean -i +todo: + @grep -nr $(grep) ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* * || true + @grep -nrw $(grep) BUG * || true + @grep -nrw $(grep) LATER * || true + @grep -nrw $(grep) MAYBE * || true + @grep -nrw $(grep) TODO * || true + @grep -nrw $(grep) println * || true diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/README.md b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/README.md new file mode 100644 index 00000000..a22116d6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/README.md @@ -0,0 +1,10 @@ +kv +== + +Package kv implements a simple and easy to use persistent key/value (KV) store. + +Installation + + $ go get github.com/cznic/kv + +Documentation: [godoc.org/github.com/cznic/kv](http://godoc.org/github.com/cznic/kv) diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/_testdata/.2196ad2c3cbc669595720f0cfb6f0dd888bc64bc b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/_testdata/.2196ad2c3cbc669595720f0cfb6f0dd888bc64bc new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/_testdata/open.db b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/_testdata/open.db new file mode 100644 index 00000000..dd626dfe Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/_testdata/open.db differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/all_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/all_test.go new file mode 100644 index 00000000..38f70213 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/all_test.go @@ -0,0 +1,1910 @@ +// Copyright 2014 The kv Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package kv + +import ( + "encoding/binary" + "flag" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "path" + "path/filepath" + "runtime" + "runtime/debug" + "strings" + "sync" + "testing" + + "camlistore.org/third_party/github.com/cznic/fileutil" + "camlistore.org/third_party/github.com/cznic/mathutil" +) + +const sz0 = 144 // size of an empty KV DB + +var ( + oDB = flag.String("db", "", "DB to use in BenchmarkEnumerateDB") + oKeep = flag.Bool("keep", false, "do not delete test DB (some tests)") +) + +func dbg(s string, va ...interface{}) { + if s == "" { + s = strings.Repeat("%v ", len(va)) + } + _, fn, fl, _ := runtime.Caller(1) + fmt.Printf("%s:%d: ", path.Base(fn), fl) + fmt.Printf(s, va...) + fmt.Println() +} + +func opts() *Options { + return &Options{ + noClone: true, + VerifyDbBeforeOpen: true, + VerifyDbAfterOpen: true, + VerifyDbBeforeClose: true, + VerifyDbAfterClose: true, + } +} + +func temp() (dir, name string) { + dir, err := ioutil.TempDir("", "test-kv-") + if err != nil { + panic(err) + } + + return dir, filepath.Join(dir, "test.tmp") +} + +func TestCreate(t *testing.T) { + o := opts() + + dir, name := temp() + defer os.RemoveAll(dir) + + db, err := Create(name, o) + if err != nil { + t.Fatal(err) + } + + defer func() { + if err := db.Close(); err != nil { + t.Error(err) + } + }() + + if _, err = Create(name, opts()); err == nil { + t.Error("unexpected success") + return + } + + if _, err = Open(name, opts()); err == nil { + t.Error("unexpected success") + return + } +} + +func TestCreateMem(t *testing.T) { + db, err := CreateMem(opts()) + if err != nil { + t.Fatal(err) + } + + if err = db.Close(); err != nil { + t.Fatal(err) + } +} + +func TestCreateTemp(t *testing.T) { + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + t.Fatal(err) + } + + defer func() { + if err := db.Close(); err != nil { + t.Error(err) + } + }() +} + +func cp(dest, src string) { + b, err := ioutil.ReadFile(src) + if err != nil { + panic(err) + } + + f, err := os.Create(dest) + if err != nil { + panic(err) + } + + n, err := f.Write(b) + if n != len(b) { + panic(n) + } + + if err != nil { + panic(err) + } +} + +func TestOpen(t *testing.T) { + dir, _ := temp() + defer os.RemoveAll(dir) + + name := filepath.Join(dir, "open.db") + cp(name, "_testdata/open.db") + cp(filepath.Join(dir, ".2196ad2c3cbc669595720f0cfb6f0dd888bc64bc"), "_testdata/.2196ad2c3cbc669595720f0cfb6f0dd888bc64bc") + db, err := Open(name, opts()) + if err != nil { + t.Fatal(err) + } + + if err = db.Close(); err != nil { + t.Fatal(err) + } +} + +func TestClose(t *testing.T) { + o := opts() + + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + t.Fatal(err) + } + + defer func() { + db.Close() + }() + + go db.Close() + if err := db.Close(); err != nil { + t.Error(err) + return + } + + if err := db.Close(); err != nil { + t.Error(err) + return + } +} + +func TestName(t *testing.T) { + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + t.Fatal(err) + } + + defer db.Close() + + if n := db.Name(); n == "" || + !strings.Contains(n, dir) || + !strings.HasPrefix(filepath.Base(n), "temp") || + !strings.HasSuffix(filepath.Base(n), ".db") || + path.Base(n) == "temp.db" { + t.Error(n) + } +} + +func TestSize(t *testing.T) { + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + t.Fatal(err) + } + + defer db.Close() + + sz, err := db.Size() + if err != nil { + t.Error(err) + return + } + + if sz != sz0 { + t.Error(sz, sz0) + } +} + +func TestVerify(t *testing.T) { + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + t.Fatal(err) + } + + defer db.Close() + + t.Log(db.Name(), o._WAL) + if err := db.Verify(nil, nil); err != nil { + t.Error(err) + } +} + +//DONE xacts test +// ---- tested in lldb extensively + +func n2b(n int) []byte { + var b [8]byte + binary.BigEndian.PutUint64(b[:], uint64(n)) + return b[:] +} + +func b2n(b []byte) int { + if len(b) != 8 { + return mathutil.MinInt + } + + return int(binary.BigEndian.Uint64(b)) +} + +func fc() *mathutil.FC32 { + r, err := mathutil.NewFC32(0, math.MaxInt32, false) + if err != nil { + panic(err) + } + + return r +} + +func TestDelete(t *testing.T) { + const ( + n = 500 + m = 4 + ) + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + t.Fatal(err) + } + + dbname := db.Name() + defer db.Close() + + rng := fc() + var keys []int + for i := 0; i < n*m; i++ { + k, v := rng.Next(), rng.Next() + keys = append(keys, k) + if err := db.Set(n2b(k), n2b(v)); err != nil { + t.Error(err) + return + } + } + + c := make(chan int) + var wg sync.WaitGroup + x := 0 + for i := 0; i < m; i++ { + wg.Add(1) + go func(start int) { + defer wg.Done() + <-c + for _, k := range keys[start : start+n] { + if err := db.Delete(n2b(k)); err != nil { + t.Error(err) + return + } + } + }(x) + x += n + } + close(c) + wg.Wait() + + if err := db.Close(); err != nil { + t.Error(err) + return + } + + fi, err := os.Stat(dbname) + if err != nil { + t.Error(err) + return + } + + if sz := fi.Size(); sz != sz0 { + t.Error(sz, sz0) + } +} + +func BenchmarkDelete16(b *testing.B) { + const ( + m = 4 + ) + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + rng := fc() + var keys []int + for i := 0; i < b.N; i++ { + k, v := rng.Next(), rng.Next() + keys = append(keys, k) + if err := db.Set(n2b(k), n2b(v)); err != nil { + b.Error(err) + return + } + } + + c := make(chan int) + var wg sync.WaitGroup + x := 0 + for i := 0; i < m; i++ { + wg.Add(1) + go func(start int) { + defer wg.Done() + <-c + for _, k := range keys[start : start+b.N/m] { + db.Delete(n2b(k)) + } + }(x) + x += b.N / m + } + b.ResetTimer() + close(c) + wg.Wait() + b.StopTimer() +} + +func TestExtract(t *testing.T) { + const ( + n = 500 + m = 4 + ) + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + t.Fatal(err) + } + + dbname := db.Name() + defer db.Close() + + rng := fc() + var keys, vals []int + for i := 0; i < n*m; i++ { + k, v := rng.Next(), rng.Next() + keys = append(keys, k) + vals = append(vals, v) + if err := db.Set(n2b(k), n2b(v)); err != nil { + t.Error(err) + return + } + } + + c := make(chan int) + var wg sync.WaitGroup + x := 0 + for i := 0; i < m; i++ { + wg.Add(1) + go func(start int) { + defer wg.Done() + <-c + for i, k := range keys[start : start+n] { + v, err := db.Extract(nil, n2b(k)) + if err != nil { + t.Error(err) + return + } + + if g, e := len(v), 8; g != e { + t.Error(err) + return + } + + if g, e := b2n(v), vals[start+i]; g != e { + t.Errorf("index %#x, key %#x, got %#x, want %#x", i, k, g, e) + return + } + } + }(x) + x += n + } + close(c) + wg.Wait() + + if err := db.Close(); err != nil { + t.Error(err) + return + } + + fi, err := os.Stat(dbname) + if err != nil { + t.Error(err) + return + } + + if sz := fi.Size(); sz != sz0 { + t.Error(sz, sz0) + } +} + +func BenchmarkExtract16(b *testing.B) { + const ( + m = 4 + ) + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + rng := fc() + var keys, vals []int + for i := 0; i < b.N; i++ { + k, v := rng.Next(), rng.Next() + keys = append(keys, k) + vals = append(vals, v) + if err := db.Set(n2b(k), n2b(v)); err != nil { + b.Error(err) + return + } + } + + c := make(chan int) + var wg sync.WaitGroup + x := 0 + for i := 0; i < m; i++ { + wg.Add(1) + go func(start int) { + defer wg.Done() + buf := make([]byte, 8) + <-c + for _, k := range keys[start : start+b.N/m] { + db.Extract(buf, n2b(k)) + } + }(x) + x += b.N / m + } + b.ResetTimer() + close(c) + wg.Wait() + b.StopTimer() +} + +func TestFirst(t *testing.T) { + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + t.Fatal(err) + } + + defer db.Close() + + k, v, err := db.First() + if err != nil { + t.Error(err) + return + } + + if k != nil { + t.Error(k) + return + } + + if v != nil { + t.Error(v) + return + } + + if err := db.Set(n2b(10), n2b(100)); err != nil { + t.Error(err) + return + } + + k, v, err = db.First() + if err != nil { + t.Error(err) + return + } + + if len(k) != 8 { + t.Error(k) + return + } + + if g, e := b2n(k), 10; g != e { + t.Error(g, e) + return + } + + if len(v) != 8 { + t.Error(v) + return + } + + if g, e := b2n(v), 100; g != e { + t.Error(g, e) + return + } + + if err := db.Set(n2b(20), n2b(200)); err != nil { + t.Error(err) + return + } + + k, v, err = db.First() + if err != nil { + t.Error(err) + return + } + + if len(k) != 8 { + t.Error(k) + return + } + + if g, e := b2n(k), 10; g != e { + t.Error(g, e) + return + } + + if len(v) != 8 { + t.Error(v) + return + } + + if g, e := b2n(v), 100; g != e { + t.Error(g, e) + return + } + + if err := db.Set(n2b(5), n2b(50)); err != nil { + t.Error(err) + return + } + + k, v, err = db.First() + if err != nil { + t.Error(err) + return + } + + if len(k) != 8 { + t.Error(k) + return + } + + if g, e := b2n(k), 5; g != e { + t.Error(g, e) + return + } + + if len(v) != 8 { + t.Error(v) + return + } + + if g, e := b2n(v), 50; g != e { + t.Error(g, e) + return + } + +} + +func BenchmarkFirst16(b *testing.B) { + const n = 5000 + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + b.Fatal(err) + } + + defer func() { + db.Close() + os.Remove(o._WAL) + }() + + rng := fc() + for i := 0; i < n; i++ { + if err := db.Set(n2b(rng.Next()), n2b(rng.Next())); err != nil { + b.Fatal(err) + } + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + db.First() + } + b.StopTimer() +} + +func TestGet(t *testing.T) { + const ( + n = 800 + m = 4 + ) + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + t.Fatal(err) + } + + defer func() { + db.Close() + os.Remove(o._WAL) + }() + + rng := fc() + var keys, vals []int + for i := 0; i < n*m; i++ { + k, v := rng.Next(), rng.Next() + keys = append(keys, k) + vals = append(vals, v) + if err := db.Set(n2b(k), n2b(v)); err != nil { + t.Error(err) + return + } + } + + c := make(chan int) + var wg sync.WaitGroup + x := 0 + for i := 0; i < m; i++ { + wg.Add(1) + go func(start int) { + defer wg.Done() + buf := make([]byte, 8) + <-c + for i, k := range keys[start : start+n] { + v, err := db.Get(buf, n2b(k)) + if err != nil { + t.Error(err) + return + } + + if g, e := len(v), 8; g != e { + t.Error(err) + return + } + + if g, e := b2n(v), vals[start+i]; g != e { + t.Errorf("index %#x, key %#x, got %#x, want %#x", i, k, g, e) + return + } + } + }(x) + x += n + } + close(c) + wg.Wait() +} + +func BenchmarkGet16(b *testing.B) { + const ( + m = 4 + ) + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + rng := fc() + var keys, vals []int + for i := 0; i < b.N; i++ { + k, v := rng.Next(), rng.Next() + keys = append(keys, k) + vals = append(vals, v) + if err := db.Set(n2b(k), n2b(v)); err != nil { + b.Error(err) + return + } + } + + c := make(chan int) + var wg sync.WaitGroup + x := 0 + for i := 0; i < m; i++ { + wg.Add(1) + go func(start int) { + defer wg.Done() + buf := make([]byte, 8) + <-c + for _, k := range keys[start : start+b.N/m] { + db.Get(buf, n2b(k)) + } + }(x) + x += b.N / m + } + b.ResetTimer() + close(c) + wg.Wait() + b.StopTimer() +} + +func TestInc(t *testing.T) { + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + t.Fatal(err) + } + + defer func() { + db.Close() + }() + + v, err := db.Inc(nil, 1) + if err != nil { + t.Error(err) + return + } + + if g, e := v, int64(1); g != e { + t.Error(g, e) + return + } + + v, err = db.Inc(nil, 2) + if err != nil { + t.Error(err) + return + } + + if g, e := v, int64(3); g != e { + t.Error(g, e) + return + } + + if err := db.Set(nil, nil); err != nil { + t.Error(err) + return + } + + v, err = db.Inc(nil, 4) + if err != nil { + t.Error(err) + return + } + + if g, e := v, int64(4); g != e { + t.Error(g, e) + return + } + + if err := db.Set(nil, []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}); err != nil { + t.Error(err) + return + } + + v, err = db.Inc(nil, 5) + if err != nil { + t.Error(err) + return + } + + if g, e := v, int64(5); g != e { + t.Error(g, e) + return + } + +} + +func TestInc2(t *testing.T) { + const ( + n = 10000 + m = 4 + ) + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + t.Fatal(err) + } + + defer func() { + db.Close() + }() + + c := make(chan int) + var wg sync.WaitGroup + sum := 0 + for i := 0; i < m; i++ { + wg.Add(1) + go func(n, delta int) { + defer wg.Done() + <-c + for i := 0; i < n; i++ { + if _, err := db.Inc(nil, int64(delta)); err != nil { + t.Error(err) + return + } + } + }(n, i) + sum += n * i + } + close(c) + wg.Wait() + v, err := db.Get(nil, nil) + if err != nil { + t.Error(err) + return + } + + if n := len(v); n != 8 { + t.Error(n, 8) + return + } + + if g, e := b2n(v), sum; g != e { + t.Errorf("%#x %#x", g, e) + } +} + +func BenchmarkInc(b *testing.B) { + const ( + m = 4 + ) + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + c := make(chan int) + var wg sync.WaitGroup + for i := 0; i < m; i++ { + wg.Add(1) + go func(n, delta int) { + defer wg.Done() + <-c + for i := 0; i < b.N/m; i++ { + db.Inc(nil, int64(delta)) + } + }(3*i, 5*i) + } + b.ResetTimer() + close(c) + wg.Wait() + b.StopTimer() +} + +func TestLast(t *testing.T) { + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + t.Fatal(err) + } + + defer func() { + db.Close() + }() + + k, v, err := db.Last() + if err != nil { + t.Error(err) + return + } + + if k != nil { + t.Error(k) + return + } + + if v != nil { + t.Error(v) + return + } + + if err := db.Set(n2b(10), n2b(100)); err != nil { + t.Error(err) + return + } + + k, v, err = db.Last() + if err != nil { + t.Error(err) + return + } + + if len(k) != 8 { + t.Error(k) + return + } + + if g, e := b2n(k), 10; g != e { + t.Error(g, e) + return + } + + if len(v) != 8 { + t.Error(v) + return + } + + if g, e := b2n(v), 100; g != e { + t.Error(g, e) + return + } + + if err := db.Set(n2b(5), n2b(50)); err != nil { + t.Error(err) + return + } + + k, v, err = db.Last() + if err != nil { + t.Error(err) + return + } + + if len(k) != 8 { + t.Error(k) + return + } + + if g, e := b2n(k), 10; g != e { + t.Error(g, e) + return + } + + if len(v) != 8 { + t.Error(v) + return + } + + if g, e := b2n(v), 100; g != e { + t.Error(g, e) + return + } + + if err := db.Set(n2b(20), n2b(200)); err != nil { + t.Error(err) + return + } + + k, v, err = db.Last() + if err != nil { + t.Error(err) + return + } + + if len(k) != 8 { + t.Error(k) + return + } + + if g, e := b2n(k), 20; g != e { + t.Error(g, e) + return + } + + if len(v) != 8 { + t.Error(v) + return + } + + if g, e := b2n(v), 200; g != e { + t.Error(g, e) + return + } + +} + +func BenchmarkLast16(b *testing.B) { + const n = 5000 + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + o := opts() + db, err := CreateTemp("_testdata", "temp", ".db", o) + if err != nil { + b.Fatal(err) + } + + dbname := db.Name() + defer func(n string) { + db.Close() + os.Remove(n) + os.Remove(o._WAL) + }(dbname) + + rng := fc() + for i := 0; i < n; i++ { + if err := db.Set(n2b(rng.Next()), n2b(rng.Next())); err != nil { + b.Fatal(err) + } + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + db.Last() + } + b.StopTimer() +} + +func TestPut(t *testing.T) { + const ( + n = 800 + m = 4 + ) + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + t.Fatal(err) + } + + defer db.Close() + + rng := fc() + var keys, vals []int + for i := 0; i < n*m; i++ { + k, v := rng.Next(), rng.Next() + keys = append(keys, k) + vals = append(vals, v) + } + + c := make(chan int) + var wg sync.WaitGroup + x := 0 + for i := 0; i < m; i++ { + wg.Add(1) + go func(start int) { + defer wg.Done() + buf := make([]byte, 8) + <-c + for i, k := range keys[start : start+n] { + if _, _, err := db.Put(buf, n2b(k), func(key, old []byte) (new []byte, write bool, err error) { + return n2b(vals[start+i]), true, nil + }); err != nil { + t.Error(err) + return + } + } + }(x) + x += n + } + close(c) + wg.Wait() + buf := make([]byte, 8) + for i, k := range keys { + v, err := db.Get(buf, n2b(k)) + if err != nil { + t.Error(err) + return + } + + if g, e := len(v), 8; g != e { + t.Error(g, e) + } + + if g, e := b2n(v), vals[i]; g != e { + t.Error(g, e) + return + } + } +} + +func BenchmarkPut16(b *testing.B) { + const ( + m = 4 + ) + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + rng := fc() + var keys, vals []int + for i := 0; i < b.N; i++ { + k, v := rng.Next(), rng.Next() + keys = append(keys, k) + vals = append(vals, v) + } + + c := make(chan int) + var wg sync.WaitGroup + x := 0 + for i := 0; i < m; i++ { + wg.Add(1) + go func(start int) { + defer wg.Done() + buf := make([]byte, 8) + <-c + for _, k := range keys[start : start+b.N/m] { + db.Put(buf, n2b(k), func(key, old []byte) (new []byte, write bool, err error) { + return buf, true, nil + }) + } + }(x) + x += b.N / m + } + b.ResetTimer() + close(c) + wg.Wait() + b.StopTimer() +} + +func TestSet(t *testing.T) { + const ( + n = 800 + m = 4 + ) + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + t.Fatal(err) + } + + defer db.Close() + + rng := fc() + var keys, vals []int + for i := 0; i < n*m; i++ { + k, v := rng.Next(), rng.Next() + keys = append(keys, k) + vals = append(vals, v) + } + + c := make(chan int) + var wg sync.WaitGroup + x := 0 + for i := 0; i < m; i++ { + wg.Add(1) + go func(start int) { + defer wg.Done() + <-c + for i, k := range keys[start : start+n] { + if err := db.Set(n2b(k), n2b(vals[start+i])); err != nil { + t.Error(err) + return + } + } + }(x) + x += n + } + close(c) + wg.Wait() + buf := make([]byte, 8) + for i, k := range keys { + v, err := db.Get(buf, n2b(k)) + if err != nil { + t.Error(err) + return + } + + if g, e := len(v), 8; g != e { + t.Error(g, e) + } + + if g, e := b2n(v), vals[i]; g != e { + t.Error(g, e) + return + } + } +} + +func BenchmarkSet16(b *testing.B) { + const ( + m = 4 + ) + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + rng := fc() + var keys, vals []int + for i := 0; i < b.N; i++ { + k, v := rng.Next(), rng.Next() + keys = append(keys, k) + vals = append(vals, v) + } + + c := make(chan int) + var wg sync.WaitGroup + x := 0 + for i := 0; i < m; i++ { + wg.Add(1) + go func(start int) { + defer wg.Done() + buf := make([]byte, 8) + <-c + for _, k := range keys[start : start+b.N/m] { + db.Set(n2b(k), buf) + } + }(x) + x += b.N / m + } + b.ResetTimer() + close(c) + wg.Wait() + b.StopTimer() +} + +func TestSeekNext(t *testing.T) { + // seeking within 3 keys: 10, 20, 30 + table := []struct { + k int + hit bool + keys []int + }{ + {5, false, []int{10, 20, 30}}, + {10, true, []int{10, 20, 30}}, + {15, false, []int{20, 30}}, + {20, true, []int{20, 30}}, + {25, false, []int{30}}, + {30, true, []int{30}}, + {35, false, []int{}}, + } + + for i, test := range table { + up := test.keys + db, err := CreateMem(opts()) + if err != nil { + t.Fatal(i, err) + } + + if err := db.Set(n2b(10), n2b(100)); err != nil { + t.Fatal(i, err) + } + + if err := db.Set(n2b(20), n2b(200)); err != nil { + t.Fatal(i, err) + } + + if err := db.Set(n2b(30), n2b(300)); err != nil { + t.Fatal(i, err) + } + + for brokenSerial := 0; brokenSerial < 16; brokenSerial++ { + en, hit, err := db.Seek(n2b(test.k)) + if err != nil { + t.Fatal(err) + } + + if g, e := hit, test.hit; g != e { + t.Fatal(i, g, e) + } + + j := 0 + for { + if brokenSerial&(1<= len(up) { + t.Fatal(i, j, brokenSerial) + } + + if g, e := b2n(k), up[j]; g != e { + t.Fatal(i, j, brokenSerial, g, e) + } + + if g, e := len(v), 8; g != e { + t.Fatal(i, g, e) + } + + if g, e := b2n(v), 10*up[j]; g != e { + t.Fatal(i, g, e) + } + + j++ + + } + + if g, e := j, len(up); g != e { + t.Fatal(i, j, g, e) + } + } + + } +} + +func TestSeekPrev(t *testing.T) { + // seeking within 3 keys: 10, 20, 30 + table := []struct { + k int + hit bool + keys []int + }{ + {5, false, []int{10}}, + {10, true, []int{10}}, + {15, false, []int{20, 10}}, + {20, true, []int{20, 10}}, + {25, false, []int{30, 20, 10}}, + {30, true, []int{30, 20, 10}}, + {35, false, []int{}}, + } + + for i, test := range table { + down := test.keys + db, err := CreateMem(opts()) + if err != nil { + t.Fatal(i, err) + } + + if err := db.Set(n2b(10), n2b(100)); err != nil { + t.Fatal(i, err) + } + + if err := db.Set(n2b(20), n2b(200)); err != nil { + t.Fatal(i, err) + } + + if err := db.Set(n2b(30), n2b(300)); err != nil { + t.Fatal(i, err) + } + + for brokenSerial := 0; brokenSerial < 16; brokenSerial++ { + en, hit, err := db.Seek(n2b(test.k)) + if err != nil { + t.Fatal(err) + } + + if g, e := hit, test.hit; g != e { + t.Fatal(i, g, e) + } + + j := 0 + for { + if brokenSerial&(1<= len(down) { + t.Fatal(i, j, brokenSerial) + } + + if g, e := b2n(k), down[j]; g != e { + t.Fatal(i, j, brokenSerial, g, e) + } + + if g, e := len(v), 8; g != e { + t.Fatal(i, g, e) + } + + if g, e := b2n(v), 10*down[j]; g != e { + t.Fatal(i, g, e) + } + + j++ + + } + + if g, e := j, len(down); g != e { + t.Fatal(i, j, g, e) + } + } + + } +} + +func BenchmarkSeek(b *testing.B) { + const ( + m = 4 + ) + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + rng := fc() + var keys, vals []int + for i := 0; i < b.N; i++ { + k, v := rng.Next(), rng.Next() + keys = append(keys, k) + vals = append(vals, v) + if err := db.Set(n2b(k), n2b(v)); err != nil { + b.Error(err) + return + } + } + + c := make(chan int) + var wg sync.WaitGroup + x := 0 + for i := 0; i < m; i++ { + wg.Add(1) + go func(start int) { + defer wg.Done() + <-c + for _, k := range keys[start : start+b.N/m] { + db.Seek(n2b(k)) + } + }(x) + x += b.N / m + } + b.ResetTimer() + close(c) + wg.Wait() + b.StopTimer() +} + +func BenchmarkNext1e3(b *testing.B) { + const N = int(1e3) + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + o := opts() + dir, _ := temp() + defer os.RemoveAll(dir) + + db, err := CreateTemp(dir, "temp", ".db", o) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + + for i := 0; i < N; i++ { + if err := db.Set(n2b(i), n2b(17*i)); err != nil { + b.Error(err) + return + } + } + + b.ResetTimer() + b.StopTimer() + var n int + for i := 0; i < b.N; i++ { + en, err := db.SeekFirst() + if err != nil { + b.Error(err) + return + } + + b.StartTimer() + for n = 0; ; n++ { + if _, _, err := en.Next(); err != nil { + break + } + } + b.StopTimer() + if g, e := n, N; g != e { + b.Error(g, e) + return + } + } + b.StopTimer() +} + +func TestSeekFirst(t *testing.T) { + db, err := CreateMem(opts()) + if err != nil { + t.Fatal(err) + } + + en, err := db.SeekFirst() + if err == nil { + t.Fatal(err) + } + + if err := db.Set(n2b(100), n2b(1000)); err != nil { + t.Fatal(err) + } + + if en, err = db.SeekFirst(); err != nil { + t.Fatal(err) + } + + k, v, err := en.Next() + if err != nil { + t.Fatal(err) + } + + if g, e := b2n(k), 100; g != e { + t.Fatal(g, e) + } + + if g, e := b2n(v), 1000; g != e { + t.Fatal(g, e) + } + + if err := db.Set(n2b(110), n2b(1100)); err != nil { + t.Fatal(err) + } + + if en, err = db.SeekFirst(); err != nil { + t.Fatal(err) + } + + if k, v, err = en.Next(); err != nil { + t.Fatal(err) + } + + if g, e := b2n(k), 100; g != e { + t.Fatal(g, e) + } + + if g, e := b2n(v), 1000; g != e { + t.Fatal(g, e) + } + + if err := db.Set(n2b(90), n2b(900)); err != nil { + t.Fatal(err) + } + + if en, err = db.SeekFirst(); err != nil { + t.Fatal(err) + } + + if k, v, err = en.Next(); err != nil { + t.Fatal(err) + } + + if g, e := b2n(k), 90; g != e { + t.Fatal(g, e) + } + + if g, e := b2n(v), 900; g != e { + t.Fatal(g, e) + } + +} + +func TestSeekLast(t *testing.T) { + db, err := CreateMem(opts()) + if err != nil { + t.Fatal(err) + } + + en, err := db.SeekLast() + if err == nil { + t.Fatal(err) + } + + if err := db.Set(n2b(100), n2b(1000)); err != nil { + t.Fatal(err) + } + + if en, err = db.SeekLast(); err != nil { + t.Fatal(err) + } + + k, v, err := en.Next() + if err != nil { + t.Fatal(err) + } + + if g, e := b2n(k), 100; g != e { + t.Fatal(g, e) + } + + if g, e := b2n(v), 1000; g != e { + t.Fatal(g, e) + } + + if err := db.Set(n2b(90), n2b(900)); err != nil { + t.Fatal(err) + } + + if en, err = db.SeekLast(); err != nil { + t.Fatal(err) + } + + if k, v, err = en.Next(); err != nil { + t.Fatal(err) + } + + if g, e := b2n(k), 100; g != e { + t.Fatal(g, e) + } + + if g, e := b2n(v), 1000; g != e { + t.Fatal(g, e) + } + + if err := db.Set(n2b(110), n2b(1100)); err != nil { + t.Fatal(err) + } + + if en, err = db.SeekLast(); err != nil { + t.Fatal(err) + } + + if k, v, err = en.Next(); err != nil { + t.Fatal(err) + } + + if g, e := b2n(k), 110; g != e { + t.Fatal(g, e) + } + + if g, e := b2n(v), 1100; g != e { + t.Fatal(g, e) + } + +} + +func TestWALName(t *testing.T) { + db, err := CreateTemp("", "kv-wal-name", ".test", opts()) + if err != nil { + t.Fatal(err) + } + + defer func(n, wn string) { + if _, err := os.Stat(n); err != nil { + t.Error(err) + } else { + if err := os.Remove(n); err != nil { + t.Error(err) + } + } + if _, err := os.Stat(wn); err != nil { + t.Error(err) + } else { + if err := os.Remove(wn); err != nil { + t.Error(err) + } + } + t.Logf("%q\n%q", n, wn) + + }(db.Name(), db.WALName()) + + if err := db.Close(); err != nil { + t.Error(err) + return + } + + if n := db.WALName(); n != "" { + t.Error(n) + } +} + +func TestCreateWithEmptyWAL(t *testing.T) { + dir, err := ioutil.TempDir("", "kv-test-create") + if err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(dir) + dbName := filepath.Join(dir, "test.db") + var o Options + walName := o.walName(dbName, "") + wal, err := os.Create(walName) + if err != nil { + t.Error(err) + return + } + + wal.Close() + defer os.Remove(walName) + + db, err := Create(dbName, &Options{}) + if err != nil { + t.Error(err) + return + } + + if err = db.Set([]byte("foo"), []byte("bar")); err != nil { + t.Error(err) + } + db.Close() +} + +func TestCreateWithNonEmptyWAL(t *testing.T) { + dir, err := ioutil.TempDir("", "kv-test-create") + if err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(dir) + dbName := filepath.Join(dir, "test.db") + var o Options + walName := o.walName(dbName, "") + wal, err := os.Create(walName) + if err != nil { + t.Error(err) + return + } + + if n, err := wal.Write([]byte{0}); n != 1 || err != nil { + t.Error(n, err) + return + } + + wal.Close() + defer os.Remove(walName) + + if _, err = Create(dbName, &Options{}); err == nil { + t.Error("Unexpected success") + return + } +} + +func BenchmarkEnumerateDB(b *testing.B) { + g := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(g) + var db *DB + var err error + switch nm := *oDB; { + case nm != "": + db, err = Open(nm, &Options{ + VerifyDbBeforeOpen: true, + VerifyDbAfterOpen: true, + VerifyDbBeforeClose: true, + VerifyDbAfterClose: true, + }) + if err != nil { + b.Fatal(err) + } + + defer db.Close() + default: + db, err = CreateMem(&Options{}) + if err != nil { + b.Fatal(err) + } + + for i := 0; i < 1e3; i++ { + if err := db.Set(n2b(i), n2b(i)); err != nil { + b.Fatal(err) + } + } + } + + var n int + debug.FreeOSMemory() + b.ResetTimer() + for i := 0; i < b.N; i++ { + n = 0 + en, err := db.SeekFirst() + if err != nil { + b.Fatal(err) + } + + for { + _, _, err := en.Next() + if err != nil { + if err == io.EOF { + break + } + + b.Fatal(err) + } + + n++ + } + } + b.StopTimer() +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/doc.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/doc.go new file mode 100644 index 00000000..8157be8b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/doc.go @@ -0,0 +1,82 @@ +// Copyright 2014 The kv Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + +Package kv implements a simple and easy to use persistent key/value (KV) store. + +The stored KV pairs are sorted in the key collation order defined by an user +supplied 'compare' function (passed as a field in Options). + +Keys and Values Limits + +Keys, as well as the values associated with them, are opaque []bytes. Maximum +size of a "native" key or value is 65787 bytes. Larger keys or values have to +be composed of the "native" ones in client code. + +Database limits + +The maximum DB size kv can handle is 2^60 bytes (1 exabyte). See also [4]: +"Block handles". + +ACID and transactional properties + +Transactions are resource limited. All changes made by a transaction are held +in memory until the top level transaction is committed. ACID[1] implementation +notes/details follows. + +Atomicity + +A successfully committed transaction appears (by its effects on the database) +to be indivisible ("atomic") iff the transaction is performed in isolation. An +aborted (via RollBack) transaction appears like it never happened under the +same limitation. + +Atomic updates to the DB, via functions like Set, Inc, etc., are performed in +their own automatic transaction. If the partial progress of any such function +fails at any point, the automatic transaction is canceled via Rollback before +returning from the function. A non nil error is returned in that case. + +Consistency + +All reads, including those made from any other concurrent non isolated +transaction(s), performed during a not yet committed transaction, are dirty +reads, i.e. the data returned are consistent with the in-progress state of the +open transaction, or all of the open transactions. Obviously, conflicts, data +races and inconsistent states can happen, but iff non isolated transactions are +performed. + +Performing a Rollback at a nested transaction level properly returns the +transaction state (and data read from the DB) to what it was before the +respective BeginTransaction. + +Isolation + +Transactions of the atomic updating functions (Set, Put, Delete ...) are always +isolated. Transactions controlled by BeginTransaction/Commit/RollBack, are +isolated iff their execution is serialized. + +Durability + +Transactions are committed using the two phase commit protocol(2PC)[2] and a +write ahead log(WAL)[3]. DB recovery after a crash is performed automatically +using data from the WAL. Last transaction data, either of an in progress +transaction or a transaction being committed at the moment of the crash, can get +lost. + +No protection from non readable files, files corrupted by other processes or by +memory faults or other HW problems, is provided. Always properly backup your DB +data file(s). + +Links + +Referenced from above: + + [1]: http://en.wikipedia.org/wiki/ACID + [2]: http://en.wikipedia.org/wiki/2PC + [3]: http://en.wikipedia.org/wiki/Write_ahead_logging + [4]: http://godoc.org/github.com/cznic/exp/lldb#Allocator + +*/ +package kv diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/etc.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/etc.go new file mode 100644 index 00000000..9ba70725 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/etc.go @@ -0,0 +1,56 @@ +// Copyright 2014 The kv Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package kv + +import ( + "bytes" + "fmt" + + "camlistore.org/third_party/github.com/cznic/fileutil" +) + +type header struct { + magic []byte + ver byte + reserved []byte +} + +func (h *header) rd(b []byte) error { + if len(b) != 16 { + panic("internal error") + } + + if h.magic = b[:4]; bytes.Compare(h.magic, []byte(magic)) != 0 { + return fmt.Errorf("Unknown file format") + } + + b = b[4:] + h.ver = b[0] + h.reserved = b[1:] + return nil +} + +// Get a 7B int64 from b +func b2h(b []byte) (h int64) { + for _, v := range b[:7] { + h = h<<8 | int64(v) + } + return +} + +// Put a 7B int64 into b +func h2b(b []byte, h int64) []byte { + for i := range b[:7] { + b[i], h = byte(h>>48), h<<8 + } + return b +} + +func noEof(e error) (err error) { + if !fileutil.IsEOF(e) { + err = e + } + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/kv.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/kv.go new file mode 100644 index 00000000..7ff90bb1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/kv.go @@ -0,0 +1,831 @@ +// Copyright 2014 The kv Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package kv + +import ( + "encoding/binary" + "fmt" + "io" + "os" + "sync" + "time" + + "camlistore.org/third_party/github.com/cznic/bufs" + "camlistore.org/third_party/github.com/cznic/exp/lldb" + "camlistore.org/third_party/github.com/cznic/fileutil" +) + +const ( + magic = "\x60\xdbKV" +) + +const ( + stDisabled = iota // stDisabled must be zero + stIdle + stCollecting + stIdleArmed + stCollectingArmed + stCollectingTriggered + stEndUpdateFailed +) + +func init() { + if stDisabled != 0 { + panic("stDisabled != 0") + } +} + +// DB represents the database (the KV store). +type DB struct { + acidNest int // Grace period nesting level + acidState int // Grace period FSM state. + acidTimer *time.Timer // Grace period timer + alloc *lldb.Allocator // The machinery. Wraps filer + bkl sync.Mutex // Big Kernel Lock + buffers bufs.Cache + closeMu sync.Mutex // Close() coordination + closed bool // it was + f *os.File // Underlying file. Potentially nil (if filer is lldb.MemFiler) + filer lldb.Filer // Wraps f + gracePeriod time.Duration // WAL grace period + isMem bool // No signal capture + lastCommitErr error // from failed EndUpdate + lock io.Closer // The DB file lock + opts *Options + root *lldb.BTree // The KV layer + wal *os.File // WAL if any +} + +// Create creates the named DB file mode 0666 (before umask). The file must not +// already exist. If successful, methods on the returned DB can be used for +// I/O; the associated file descriptor has mode os.O_RDWR. If there is an +// error, it will be of type *os.PathError. +// +// For the meaning of opts please see documentation of Options. +func Create(name string, opts *Options) (db *DB, err error) { + opts = opts.clone() + opts._ACID = _ACIDFull + f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) + if err != nil { + return + } + + return create(f, lldb.NewSimpleFileFiler(f), opts, false) +} + +func create(f *os.File, filer lldb.Filer, opts *Options, isMem bool) (db *DB, err error) { + defer func() { + if db != nil { + db.opts = opts + } + }() + defer func() { + lock := opts.lock + if err != nil && lock != nil { + lock.Close() + db = nil + } + }() + + if err = opts.check(filer.Name(), true, !isMem); err != nil { + return + } + + b := [16]byte{byte(magic[0]), byte(magic[1]), byte(magic[2]), byte(magic[3]), 0x00} // ver 0x00 + if n, err := filer.WriteAt(b[:], 0); n != 16 { + return nil, &os.PathError{Op: "kv.create.WriteAt", Path: filer.Name(), Err: err} + } + + db = &DB{f: f, lock: opts.lock} + + if filer, err = opts.acidFiler(db, filer); err != nil { + return nil, err + } + + db.filer = filer + if err = filer.BeginUpdate(); err != nil { + return + } + + defer func() { + if e := filer.EndUpdate(); e != nil { + if err == nil { + err = e + } + } + }() + + if db.alloc, err = lldb.NewAllocator(lldb.NewInnerFiler(filer, 16), &lldb.Options{}); err != nil { + return nil, &os.PathError{Op: "kv.create", Path: filer.Name(), Err: err} + } + + db.alloc.Compress = true + db.isMem = isMem + var h int64 + if db.root, h, err = lldb.CreateBTree(db.alloc, opts.Compare); err != nil { + return + } + + if h != 1 { + panic("internal error") + } + + db.wal = opts.wal + return +} + +// CreateMem creates a new instance of an in-memory DB not backed by a disk +// file. Memory DBs are resource limited as they are completely held in memory +// and are not automatically persisted. +// +// For the meaning of opts please see documentation of Options. +func CreateMem(opts *Options) (db *DB, err error) { + opts = opts.clone() + opts._ACID = _ACIDTransactions + f := lldb.NewMemFiler() + return create(nil, f, opts, true) +} + +// CreateTemp creates a new temporary DB in the directory dir with a basename +// beginning with prefix and name ending in suffix. If dir is the empty string, +// CreateTemp uses the default directory for temporary files (see os.TempDir). +// Multiple programs calling CreateTemp simultaneously will not choose the same +// file name for the DB. The caller can use Name() to find the pathname of the +// DB file. It is the caller's responsibility to remove the file when no longer +// needed. +// +// For the meaning of opts please see documentation of Options. +func CreateTemp(dir, prefix, suffix string, opts *Options) (db *DB, err error) { + opts = opts.clone() + opts._ACID = _ACIDFull + f, err := fileutil.TempFile(dir, prefix, suffix) + if err != nil { + return + } + + return create(f, lldb.NewSimpleFileFiler(f), opts, false) +} + +// Open opens the named DB file for reading/writing. If successful, methods on +// the returned DB can be used for I/O; the associated file descriptor has mode +// os.O_RDWR. If there is an error, it will be of type *os.PathError. +// +// Note: While a DB is opened, it is locked and cannot be simultaneously opened +// again. +// +// For the meaning of opts please see documentation of Options. +func Open(name string, opts *Options) (db *DB, err error) { + opts = opts.clone() + opts._ACID = _ACIDFull + defer func() { + if db != nil { + db.opts = opts + } + }() + defer func() { + lock := opts.lock + if err != nil && lock != nil { + lock.Close() + db = nil + } + if err != nil { + if db != nil { + db.Close() + db = nil + } + } + }() + + if err = opts.check(name, false, true); err != nil { + return + } + + f, err := os.OpenFile(name, os.O_RDWR, 0666) + if err != nil { + return + } + + filer := lldb.Filer(lldb.NewSimpleFileFiler(f)) + sz, err := filer.Size() + if err != nil { + return + } + + if sz%16 != 0 { + return nil, &os.PathError{Op: "kv.Open:", Path: name, Err: fmt.Errorf("file size %d(%#x) is not 0 (mod 16)", sz, sz)} + } + + var b [16]byte + if n, err := filer.ReadAt(b[:], 0); n != 16 || err != nil { + return nil, &os.PathError{Op: "kv.Open.ReadAt", Path: name, Err: err} + } + + var h header + if err = h.rd(b[:]); err != nil { + return nil, &os.PathError{Op: "kv.Open:validate header", Path: name, Err: err} + } + + db = &DB{f: f, lock: opts.lock} + if filer, err = opts.acidFiler(db, filer); err != nil { + return nil, err + } + + db.filer = filer + switch h.ver { + default: + return nil, &os.PathError{Op: "kv.Open", Path: name, Err: fmt.Errorf("unknown/unsupported kv file format version %#x", h.ver)} + case 0x00: + if _, err = open00(name, db); err != nil { + return nil, err + } + } + + db.root, err = lldb.OpenBTree(db.alloc, opts.Compare, 1) + db.wal = opts.wal + if opts.VerifyDbAfterOpen { + err = verifyAllocator(db.alloc) + } + return +} + +// Close closes the DB, rendering it unusable for I/O. It returns an error, if +// any. Failing to call Close before exiting a program can lose the last open +// or being committed transaction. +// +// Successful Close is idempotent. +func (db *DB) Close() (err error) { + db.closeMu.Lock() + defer db.closeMu.Unlock() + if db.closed { + return + } + + db.closed = true + + if err = db.enter(); err != nil { + return + } + + doLeave := true + defer func() { + db.wal = nil + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + if doLeave { + db.leave(&err) + } + }() + + if db.acidTimer != nil { + db.acidTimer.Stop() + } + + var e error + for db.acidNest > 0 { + db.acidNest-- + if e = db.filer.EndUpdate(); err == nil { + err = e + } + } + + doLeave = false + if e = db.leave(&err); err == nil { + err = e + } + if db.opts.VerifyDbBeforeClose { + if e = verifyAllocator(db.alloc); err == nil { + err = e + } + } + if e = db.close(); err == nil { + err = e + } + if lock := db.lock; lock != nil { + if e = lock.Close(); err == nil { + err = e + } + } + if wal := db.wal; wal != nil { + e = wal.Close() + db.wal = nil + if err == nil { + err = e + } + } + return +} + +func (db *DB) close() (err error) { + // We are safe to close due to locked db.closeMu, but not safe aginst + // any other goroutine concurrently calling other exported db methods, + // causing a race[0] in the db.enter() mechanism. So we must lock + // db.bkl. + // + // [0]: https://github.com/cznic/kv/issues/17#issuecomment-31960658 + db.bkl.Lock() + defer db.bkl.Unlock() + + if db.f == nil { // lldb.MemFiler + return + } + + err = db.filer.Sync() + if e := db.filer.Close(); err == nil { + err = e + } + if db.opts.VerifyDbAfterClose { + if e := verifyDbFile(db.Name()); err == nil { + err = e + } + } + return +} + +// Name returns the name of the DB file. +func (db *DB) Name() string { + return db.filer.Name() +} + +// Size returns the size of the DB file. +func (db *DB) Size() (sz int64, err error) { + db.bkl.Lock() + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.bkl.Unlock() + }() + + return db.filer.Size() +} + +func (db *DB) enter() (err error) { + db.bkl.Lock() + switch db.acidState { + default: + panic("internal error") + case stDisabled: + // nop + case stIdle: + if err = db.filer.BeginUpdate(); err != nil { + return + } + + db.acidNest = 1 + db.acidTimer = time.AfterFunc(db.gracePeriod, db.timeout) + db.acidState = stCollecting + case stCollecting: + db.acidNest++ + case stIdleArmed: + db.acidNest = 1 + db.acidState = stCollectingArmed + case stCollectingArmed: + db.acidNest++ + case stCollectingTriggered: + db.acidNest++ + case stEndUpdateFailed: + return db.leave(&err) + } + + err = db.filer.BeginUpdate() + return +} + +func (db *DB) leave(err *error) error { + switch db.acidState { + default: + panic("internal error") + case stDisabled: + // nop + case stCollecting: + db.acidNest-- + if db.acidNest == 0 { + db.acidState = stIdleArmed + } + case stCollectingArmed: + db.acidNest-- + if db.acidNest == 0 { + db.acidState = stIdleArmed + } + case stCollectingTriggered: + db.acidNest-- + if db.acidNest == 0 { + if e := db.filer.EndUpdate(); e != nil && err == nil { + *err = e + } + db.acidState = stIdle + } + case stEndUpdateFailed: + db.bkl.Unlock() + return fmt.Errorf("Last transaction commit failed: %v", db.lastCommitErr) + } + + switch { + case *err != nil: + db.filer.Rollback() // return the original, input error + default: + *err = db.filer.EndUpdate() + if *err != nil { + db.acidState = stEndUpdateFailed + db.lastCommitErr = *err + } + } + db.bkl.Unlock() + return *err +} + +func (db *DB) timeout() { + db.closeMu.Lock() + defer db.closeMu.Unlock() + if db.closed { + return + } + + db.bkl.Lock() + defer db.bkl.Unlock() + + switch db.acidState { + default: + panic("internal error") + case stIdle: + panic("internal error") + case stCollecting: + db.acidState = stCollectingTriggered + case stIdleArmed: + if err := db.filer.EndUpdate(); err != nil { // If EndUpdate fails, no WAL was written (automatic Rollback) + db.acidState = stEndUpdateFailed + db.lastCommitErr = err + return + } + + db.acidState = stIdle + case stCollectingArmed: + db.acidState = stCollectingTriggered + case stCollectingTriggered: + panic("internal error") + } +} + +// BeginTransaction starts a new transaction. Every call to BeginTransaction +// must be eventually "balanced" by exactly one call to Commit or Rollback (but +// not both). Calls to BeginTransaction may nest. +// +// BeginTransaction is atomic and it is safe for concurrent use by multiple +// goroutines (if/when that makes sense). +func (db *DB) BeginTransaction() (err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + return db.filer.BeginUpdate() +} + +// Commit commits the current transaction. If the transaction is the top level +// one, then all of the changes made within the transaction are atomically made +// persistent in the DB. Invocation of an unbalanced Commit is an error. +// +// Commit is atomic and it is safe for concurrent use by multiple goroutines +// (if/when that makes sense). +func (db *DB) Commit() (err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + return db.filer.EndUpdate() +} + +// Rollback cancels and undoes the innermost transaction level. If the +// transaction is the top level one, then no of the changes made within the +// transactions are persisted. Invocation of an unbalanced Rollback is an +// error. +// +// Rollback is atomic and it is safe for concurrent use by multiple goroutines +// (if/when that makes sense). +func (db *DB) Rollback() (err error) { + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + return db.filer.Rollback() +} + +// Verify attempts to find any structural errors in DB wrt the organization of +// it as defined by lldb.Allocator. Any problems found are reported to 'log' +// except non verify related errors like disk read fails etc. If 'log' returns +// false or the error doesn't allow to (reliably) continue, the verification +// process is stopped and an error is returned from the Verify function. +// Passing a nil log works like providing a log function always returning +// false. Any non-structural errors, like for instance Filer read errors, are +// NOT reported to 'log', but returned as the Verify's return value, because +// Verify cannot proceed in such cases. Verify returns nil only if it fully +// completed verifying DB without detecting any error. +// +// It is recommended to limit the number reported problems by returning false +// from 'log' after reaching some limit. Huge and corrupted DB can produce an +// overwhelming error report dataset. +// +// The verifying process will scan the whole DB at least 3 times (a trade +// between processing space and time consumed). It doesn't read the content of +// free blocks above the head/tail info bytes. If the 3rd phase detects lost +// free space, then a 4th scan (a faster one) is performed to precisely report +// all of them. +// +// Statistics are returned via 'stats' if non nil. The statistics are valid +// only if Verify succeeded, ie. it didn't reported anything to log and it +// returned a nil error. +func (db *DB) Verify(log func(error) bool, stats *lldb.AllocStats) (err error) { + bitmapf, err := fileutil.TempFile("", "verifier", ".tmp") + if err != nil { + return + } + + defer func() { + tn := bitmapf.Name() + bitmapf.Close() + os.Remove(tn) + }() + + bitmap := lldb.NewSimpleFileFiler(bitmapf) + + if err = db.enter(); err != nil { + return + } + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + db.leave(&err) + }() + + return db.alloc.Verify(bitmap, log, stats) +} + +// Delete deletes key and its associated value from the DB. +// +// Delete is atomic and it is safe for concurrent use by multiple goroutines. +func (db *DB) Delete(key []byte) (err error) { + if err = db.enter(); err != nil { + return + } + + err = db.root.Delete(key) + return db.leave(&err) +} + +// Extract is a combination of Get and Delete. If the key exists in the DB, it +// is returned (like Get) and also deleted from the DB in a more efficient way +// which doesn't search for the key twice. The returned slice may be a +// sub-slice of buf if buf was large enough to hold the entire content. +// Otherwise, a newly allocated slice will be returned. It is valid to pass a +// nil buf. +// +// Extract is atomic and it is safe for concurrent use by multiple goroutines. +func (db *DB) Extract(buf, key []byte) (value []byte, err error) { + if err = db.enter(); err != nil { + return + } + + value, err = db.root.Extract(buf, key) + db.leave(&err) + return +} + +// First returns the first KV pair in the DB, if it exists. Otherwise key == +// nil and value == nil. +// +// First is atomic and it is safe for concurrent use by multiple goroutines. +func (db *DB) First() (key, value []byte, err error) { + db.bkl.Lock() + defer db.bkl.Unlock() + return db.root.First() +} + +// Get returns the value associated with key, or nil if no such value exists. +// The returned slice may be a sub-slice of buf if buf was large enough to hold +// the entire content. Otherwise, a newly allocated slice will be returned. It +// is valid to pass a nil buf. +// +// Get is atomic and it is safe for concurrent use by multiple goroutines. +func (db *DB) Get(buf, key []byte) (value []byte, err error) { + db.bkl.Lock() + defer db.bkl.Unlock() + return db.root.Get(buf, key) +} + +// Last returns the last KV pair of the DB, if it exists. Otherwise key == +// nil and value == nil. +// +// Last is atomic and it is safe for concurrent use by multiple goroutines. +func (db *DB) Last() (key, value []byte, err error) { + db.bkl.Lock() + defer db.bkl.Unlock() + return db.root.Last() +} + +// Put combines Get and Set in a more efficient way where the DB is searched +// for the key only once. The upd(ater) receives the current (key, old-value), +// if that exists or (key, nil) otherwise. It can then return a (new-value, +// true, nil) to create or overwrite the existing value in the KV pair, or +// (whatever, false, nil) if it decides not to create or not to update the +// value of the KV pair. +// +// db.Set(k, v) +// +// conceptually equals +// +// db.Put(k, func(k, v []byte){ return v, true }([]byte, bool)) +// +// modulo the differing return values. +// +// The returned slice may be a sub-slice of buf if buf was large enough to hold +// the entire content. Otherwise, a newly allocated slice will be returned. It +// is valid to pass a nil buf. +// +// Put is atomic and it is safe for concurrent use by multiple goroutines. +func (db *DB) Put(buf, key []byte, upd func(key, old []byte) (new []byte, write bool, err error)) (old []byte, written bool, err error) { + if err = db.enter(); err != nil { + return + } + + old, written, err = db.root.Put(buf, key, upd) + db.leave(&err) + return +} + +// Seek returns an enumerator positioned on the first key/value pair whose key +// is 'greater than or equal to' the given key. There may be no such pair, in +// which case the Next,Prev methods of the returned enumerator will always +// return io.EOF. +// +// Seek is atomic and it is safe for concurrent use by multiple goroutines. +func (db *DB) Seek(key []byte) (enum *Enumerator, hit bool, err error) { + db.bkl.Lock() + defer db.bkl.Unlock() + enum0, hit, err := db.root.Seek(key) + if err != nil { + return + } + + enum = &Enumerator{ + db: db, + enum: enum0, + } + return +} + +// SeekFirst returns an enumerator positioned on the first KV pair in the DB, +// if any. For an empty DB, err == io.EOF is returned. +// +// SeekFirst is atomic and it is safe for concurrent use by multiple +// goroutines. +func (db *DB) SeekFirst() (enum *Enumerator, err error) { + db.bkl.Lock() + defer db.bkl.Unlock() + enum0, err := db.root.SeekFirst() + if err != nil { + return + } + + enum = &Enumerator{ + db: db, + enum: enum0, + } + return +} + +// SeekLast returns an enumerator positioned on the last KV pair in the DB, +// if any. For an empty DB, err == io.EOF is returned. +// +// SeekLast is atomic and it is safe for concurrent use by multiple +// goroutines. +func (db *DB) SeekLast() (enum *Enumerator, err error) { + db.bkl.Lock() + defer db.bkl.Unlock() + enum0, err := db.root.SeekLast() + if err != nil { + return + } + + enum = &Enumerator{ + db: db, + enum: enum0, + } + return +} + +// Set sets the value associated with key. Any previous value, if existed, is +// overwritten by the new one. +// +// Set is atomic and it is safe for concurrent use by multiple goroutines. +func (db *DB) Set(key, value []byte) (err error) { + if err = db.enter(); err != nil { + return + } + + err = db.root.Set(key, value) + db.leave(&err) + return +} + +// Enumerator captures the state of enumerating a DB. It is returned from the +// Seek* methods. Multiple enumerations may be in progress simultaneously. The +// enumerator is aware of any mutations made to the tree in the process of +// enumerating it and automatically resumes the enumeration. +// +// Multiple concurrently executing enumerations may be in progress. +type Enumerator struct { + db *DB + enum *lldb.BTreeEnumerator +} + +// Next returns the currently enumerated KV pair, if it exists and moves to the +// next KV in the key collation order. If there is no KV pair to return, err == +// io.EOF is returned. +// +// Next is atomic and it is safe for concurrent use by multiple goroutines. +func (e *Enumerator) Next() (key, value []byte, err error) { + e.db.bkl.Lock() + defer e.db.bkl.Unlock() + return e.enum.Next() +} + +// Prev returns the currently enumerated KV pair, if it exists and moves to the +// previous KV in the key collation order. If there is no KV pair to return, +// err == io.EOF is returned. +// +// Prev is atomic and it is safe for concurrent use by multiple goroutines. +func (e *Enumerator) Prev() (key, value []byte, err error) { + e.db.bkl.Lock() + defer e.db.bkl.Unlock() + return e.enum.Prev() +} + +// Inc atomically increments the value associated with key by delta and +// returns the new value. If the value doesn't exists before calling Inc or if +// the value is not an [8]byte, the value is considered to be zero before peforming Inc. +// +// Inc is atomic and it is safe for concurrent use by multiple goroutines. +func (db *DB) Inc(key []byte, delta int64) (val int64, err error) { + if err = db.enter(); err != nil { + return + } + + defer db.leave(&err) + + buf := db.buffers.Get(8) + defer db.buffers.Put(buf) + _, _, err = db.root.Put( + buf, + key, + func(key []byte, old []byte) (new []byte, write bool, err error) { + write = true + if len(old) == 8 { + val = int64(binary.BigEndian.Uint64(old)) + } else { + old = make([]byte, 8) + val = 0 + } + val += delta + binary.BigEndian.PutUint64(old, uint64(val)) + new = old + return + }, + ) + + return +} + +// WALName returns the name of the WAL file in use or an empty string for memory +// or closed databases. +func (db *DB) WALName() string { + if f := db.wal; f != nil { + return f.Name() + } + + return "" +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/lock.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/lock.go new file mode 100644 index 00000000..183523f9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/lock.go @@ -0,0 +1,58 @@ +// Copyright 2014 The kv Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package kv + +import ( + "crypto/sha1" + "fmt" + "io" + "os" + "path/filepath" + "sync" +) + +func lockName(dbname string) string { + base := filepath.Base(filepath.Clean(dbname)) + "lockfile" + h := sha1.New() + io.WriteString(h, base) + return filepath.Join(filepath.Dir(dbname), fmt.Sprintf(".%x", h.Sum(nil))) +} + +func defaultLocker(dbname string) (io.Closer, error) { + lname := lockName(dbname) + abs, err := filepath.Abs(lname) + if err != nil { + return nil, err + } + f, err := os.OpenFile(abs, os.O_CREATE|os.O_EXCL|os.O_RDONLY, 0666) + if os.IsExist(err) { + return nil, fmt.Errorf("cannot access DB %q: lock file %q exists", dbname, abs) + } + if err != nil { + return nil, err + } + return &lockCloser{f: f, abs: abs}, nil +} + +type lockCloser struct { + f *os.File + abs string + once sync.Once + err error +} + +func (lc *lockCloser) Close() error { + lc.once.Do(lc.close) + return lc.err +} + +func (lc *lockCloser) close() { + if err := lc.f.Close(); err != nil { + lc.err = err + } + if err := os.Remove(lc.abs); err != nil { + lc.err = err + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/options.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/options.go new file mode 100644 index 00000000..fcc9bbd4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/options.go @@ -0,0 +1,253 @@ +// Copyright 2014 The kv Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package kv + +import ( + "crypto/sha1" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "time" + + "camlistore.org/third_party/github.com/cznic/exp/lldb" +) + +const ( + // BeginUpdate/EndUpdate/Rollback will be no-ops. All operations + // updating a DB will be written immediately including partial updates + // during operation's progress. If any update fails, the DB can become + // unusable. The same applies to DB crashes and/or any other non clean + // DB shutdown. + _ACIDNone = iota + + // Enable transactions. BeginUpdate/EndUpdate/Rollback will be + // effective. All operations on the DB will be automatically performed + // within a transaction. Operations will thus either succeed completely + // or have no effect at all - they will be rollbacked in case of any + // error. If any update fails the DB will not be corrupted. DB crashes + // and/or any other non clean DB shutdown may still render the DB + // unusable. + _ACIDTransactions + + // Enable durability. Same as ACIDTransactions plus enables 2PC and + // WAL. Updates to the DB will be first made permanent in a WAL and + // only after that reflected in the DB. A DB will automatically recover + // from crashes and/or any other non clean DB shutdown. Only last + // uncommited transaction (transaction in progress ATM of a crash) can + // get lost. + // + // NOTE: Options.GracePeriod may extend the span of a single + // transaction to a batch of multiple transactions. + // + // NOTE2: Non zero GracePeriod requires GOMAXPROCS > 1 to work. Dbm + // checks GOMAXPROCS in such case and if the value is 1 it + // automatically sets GOMAXPROCS = 2. + _ACIDFull +) + +// Options are passed to the DB create/open functions to amend the behavior of +// those functions. The compatibility promise is the same as of struct types in +// the Go standard library - introducing changes can be made only by adding new +// exported fields, which is backward compatible as long as client code uses +// field names to assign values of imported struct types literals. +type Options struct { + // Compare compares x and y. Compare may be nil, then bytes.Compare is + // used instead. + // + // Compare returns: + // + // -1 if x < y + // 0 if x == y + // +1 if x > y + Compare func(x, y []byte) int + + // Locker specifies a function to lock a named file. + // On success it returns an io.Closer to release the lock. + // If nil, a default implementation is used. + Locker func(name string) (io.Closer, error) + + // See the ACID* constants documentation. + _ACID int + + // The write ahead log pathname. Applicable iff ACID == ACIDFull. May + // be left empty in which case an unspecified pathname will be chosen, + // which is computed from the DB name and which will be in the same + // directory as the DB. Moving or renaming the DB while it is shut down + // will break it's connection to the automatically computed name. + // Moving both the files (the DB and the WAL) into another directory + // with no renaming is safe. + // + // On opening an existing DB the WAL file must exist if it should be + // used. If it is of zero size then a clean shutdown of the DB is + // assumed, otherwise an automatic DB recovery is performed. + // + // On creating a new DB the WAL file must not exist or it must be + // empty. It's not safe to write to a non empty WAL file as it may + // contain unprocessed DB recovery data. + _WAL string + + // Time to collect transactions before committing them into the WAL. + // Applicable iff ACID == ACIDFull. All updates are held in memory + // during the grace period so it should not be more than few seconds at + // most. + // + // Recommended value for GracePeriod is 1 second. + // + // NOTE: Using small GracePeriod values will make DB updates very slow. + // Zero GracePeriod will make every single update a separate 2PC/WAL + // transaction. Values smaller than about 100-200 milliseconds + // (particularly for mechanical, rotational HDs) are not recommended + // and they may not be always honored. + _GracePeriod time.Duration + wal *os.File + lock io.Closer + + noClone bool // test hook + + // VerifyDbBeforeOpen turns on structural verification of the DB before + // it is opened. This verification may legitimately fail if the DB + // crashed and a yet-to-be-processed non empty WAL file exists. + VerifyDbBeforeOpen bool + + // VerifyDbAfterOpen turns on structural verification of the DB after + // it is opened and possibly recovered from WAL. + VerifyDbAfterOpen bool + + // VerifyDbBeforeClose turns on structural verification of the DB + // before it is closed. + VerifyDbBeforeClose bool + + // VerifyDbAfterClose turns on structural verification of the DB after + // it is closed. + VerifyDbAfterClose bool + + // Turns on verification of every single mutation of the DB. Before any + // such mutation a snapshot of the DB is created and the specific + // mutation operation and parameters are recorded. After the mutation + // the whole DB is verified. If the verification fails the last known + // good state (the snapshot discussed above) and the corrupted state + // are "core" dumped to a well known location (TBD). + // + //MAYBE ParanoidUpdates bool +} + +func (o *Options) locker(dbname string) (io.Closer, error) { + if o == nil || o.Locker == nil { + return defaultLocker(dbname) + } + return o.Locker(dbname) +} + +func (o *Options) clone() *Options { + if o.noClone { + return o + } + + r := &Options{} + *r = *o + return r +} + +func (o *Options) check(dbname string, new, lock bool) (err error) { + if lock { + if o.lock, err = o.locker(dbname); err != nil { + return + } + } + + if o.VerifyDbBeforeOpen && !new { + if err = verifyDbFile(dbname); err != nil { + return + } + } + + switch o._ACID { + default: + panic("internal error") + case _ACIDTransactions: + case _ACIDFull: + o._GracePeriod = time.Second + o._WAL = o.walName(dbname, o._WAL) + + switch new { + case true: + if o.wal, err = os.OpenFile(o._WAL, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666); err != nil { + if os.IsExist(err) { + fi, e := os.Stat(o._WAL) + if e != nil { + return e + } + + if sz := fi.Size(); sz != 0 { + return fmt.Errorf("cannot create DB %q: non empty WAL file %q (size %d) exists", dbname, o._WAL, sz) + } + + o.wal, err = os.OpenFile(o._WAL, os.O_RDWR, 0666) + } + return + } + case false: + if o.wal, err = os.OpenFile(o._WAL, os.O_RDWR, 0666); err != nil { + if os.IsNotExist(err) { + err = fmt.Errorf("cannot open DB %q: WAL file %q doesn't exist", dbname, o._WAL) + } + return + } + } + } + + return +} + +func (o *Options) walName(dbname, wal string) (r string) { + if wal != "" { + return filepath.Clean(wal) + } + + base := filepath.Base(filepath.Clean(dbname)) + h := sha1.New() + io.WriteString(h, base) + return filepath.Join(filepath.Dir(dbname), fmt.Sprintf(".%x", h.Sum(nil))) +} + +func (o *Options) acidFiler(db *DB, f lldb.Filer) (r lldb.Filer, err error) { + switch o._ACID { + default: + panic("internal error") + case _ACIDTransactions: + var rf *lldb.RollbackFiler + if rf, err = lldb.NewRollbackFiler( + f, + func(sz int64) error { + return f.Truncate(sz) + }, + f, + ); err != nil { + return + } + + r = rf + case _ACIDFull: + if r, err = lldb.NewACIDFiler(f, o.wal); err != nil { + return + } + + db.acidState = stIdle + db.gracePeriod = o._GracePeriod + if o._GracePeriod == 0 { + panic("internal error") + } + + // Ensure GOMAXPROCS > 1, required for ACID FSM + if n := runtime.GOMAXPROCS(0); n > 1 { + return + } + + runtime.GOMAXPROCS(2) + } + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/v0.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/v0.go new file mode 100644 index 00000000..25798da3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/v0.go @@ -0,0 +1,21 @@ +// Copyright 2014 The kv Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package kv + +import ( + "os" + + "camlistore.org/third_party/github.com/cznic/exp/lldb" +) + +func open00(name string, in *DB) (db *DB, err error) { + db = in + if db.alloc, err = lldb.NewAllocator(lldb.NewInnerFiler(db.filer, 16), &lldb.Options{}); err != nil { + return nil, &os.PathError{Op: "kv.open00", Path: name, Err: err} + } + + db.alloc.Compress = true + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/verify.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/verify.go new file mode 100644 index 00000000..41e47d5f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/kv/verify.go @@ -0,0 +1,81 @@ +// Copyright 2014 The kv Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package kv + +import ( + "io" + "io/ioutil" + "os" + + "camlistore.org/third_party/github.com/cznic/exp/lldb" +) + +func verifyAllocator(a *lldb.Allocator) error { + bits, err := ioutil.TempFile("", "kv-verify-") + if err != nil { + return err + } + + defer func() { + nm := bits.Name() + bits.Close() + os.Remove(nm) + }() + + var lerr error + if err = a.Verify( + lldb.NewSimpleFileFiler(bits), + func(err error) bool { + lerr = err + return false + }, + nil, + ); err != nil { + return err + } + + if lerr != nil { + return lerr + } + + t, err := lldb.OpenBTree(a, nil, 1) + if err != nil { + return err + } + + e, err := t.SeekFirst() + if err != nil { + if err == io.EOF { + err = nil + } + return err + } + + for { + _, _, err := e.Next() + if err != nil { + if err == io.EOF { + err = nil + } + return err + } + } +} + +func verifyDbFile(fn string) error { + f, err := os.Open(fn) // O_RDONLY + if err != nil { + return err + } + + defer f.Close() + + a, err := lldb.NewAllocator(lldb.NewInnerFiler(lldb.NewSimpleFileFiler(f), 16), &lldb.Options{}) + if err != nil { + return err + } + + return verifyAllocator(a) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/GO-LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/GO-LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/GO-LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/LICENSE new file mode 100644 index 00000000..1e92e33d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of CZ.NIC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/README b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/README new file mode 100644 index 00000000..dd468334 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/README @@ -0,0 +1,10 @@ +This is a goinstall-able mirror of modified code already published at: +http://git.nic.cz/redmine/projects/gornd/repository + +Packages in this repository: + +Install: $ go get github.com/cznic/mathutil +Godocs: http://gopkgdoc.appspot.com/pkg/github.com/cznic/mathutil + +Install: $ go get github.com/cznic/mathutil/mersenne +Godocs: http://gopkgdoc.appspot.com/pkg/github.com/cznic/mathutil/mersenne diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/all_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/all_test.go new file mode 100644 index 00000000..8ea413ca --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/all_test.go @@ -0,0 +1,3485 @@ +// Copyright (c) 2014 The mathutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mathutil + +import ( + "math" + "math/big" + "math/rand" + "runtime" + "sort" + "sync" + "testing" +) + +func r32() *FC32 { + r, err := NewFC32(math.MinInt32, math.MaxInt32, true) + if err != nil { + panic(err) + } + + return r +} + +var ( + r64lo = big.NewInt(math.MinInt64) + r64hi = big.NewInt(math.MaxInt64) + _3 = big.NewInt(3) + MinIntM1 = MinInt + MaxIntP1 = MaxInt + MaxUintP1 uint = MaxUint +) + +func init() { + MinIntM1-- + MaxIntP1++ + MaxUintP1++ +} + +func r64() *FCBig { + r, err := NewFCBig(r64lo, r64hi, true) + if err != nil { + panic(err) + } + + return r +} + +func benchmark1eN(b *testing.B, r *FC32) { + b.StartTimer() + for i := 0; i < b.N; i++ { + r.Next() + } +} + +func BenchmarkFC1e3(b *testing.B) { + b.StopTimer() + r, _ := NewFC32(0, 1e3, false) + benchmark1eN(b, r) +} + +func BenchmarkFC1e6(b *testing.B) { + b.StopTimer() + r, _ := NewFC32(0, 1e6, false) + benchmark1eN(b, r) +} + +func BenchmarkFC1e9(b *testing.B) { + b.StopTimer() + r, _ := NewFC32(0, 1e9, false) + benchmark1eN(b, r) +} + +func Test0(t *testing.T) { + const N = 10000 + for n := 1; n < N; n++ { + lo, hi := 0, n-1 + period := int64(hi) - int64(lo) + 1 + r, err := NewFC32(lo, hi, false) + if err != nil { + t.Fatal(err) + } + if r.Cycle()-period > period { + t.Fatalf("Cycle exceeds 2 * period") + } + } + for n := 1; n < N; n++ { + lo, hi := 0, n-1 + period := int64(hi) - int64(lo) + 1 + r, err := NewFC32(lo, hi, true) + if err != nil { + t.Fatal(err) + } + if r.Cycle()-2*period > period { + t.Fatalf("Cycle exceeds 3 * period") + } + } +} + +func Test1(t *testing.T) { + const ( + N = 360 + S = 3 + ) + for hq := 0; hq <= 1; hq++ { + for n := 1; n < N; n++ { + for seed := 0; seed < S; seed++ { + lo, hi := -n, 2*n + period := int64(hi - lo + 1) + r, err := NewFC32(lo, hi, hq == 1) + if err != nil { + t.Fatal(err) + } + r.Seed(int64(seed)) + m := map[int]bool{} + v := make([]int, period, period) + p := make([]int64, period, period) + for i := lo; i <= hi; i++ { + x := r.Next() + p[i-lo] = r.Pos() + if x < lo || x > hi { + t.Fatal("t1.0") + } + if m[x] { + t.Fatal("t1.1") + } + m[x] = true + v[i-lo] = x + } + for i := lo; i <= hi; i++ { + x := r.Next() + if x < lo || x > hi { + t.Fatal("t1.2") + } + if !m[x] { + t.Fatal("t1.3") + } + if x != v[i-lo] { + t.Fatal("t1.4") + } + if r.Pos() != p[i-lo] { + t.Fatal("t1.5") + } + m[x] = false + } + for i := lo; i <= hi; i++ { + r.Seek(p[i-lo] + 1) + x := r.Prev() + if x < lo || x > hi { + t.Fatal("t1.6") + } + if x != v[i-lo] { + t.Fatal("t1.7") + } + } + } + } + } +} + +func Test2(t *testing.T) { + const ( + N = 370 + S = 3 + ) + for hq := 0; hq <= 1; hq++ { + for n := 1; n < N; n++ { + for seed := 0; seed < S; seed++ { + lo, hi := -n, 2*n + period := int64(hi - lo + 1) + r, err := NewFC32(lo, hi, hq == 1) + if err != nil { + t.Fatal(err) + } + r.Seed(int64(seed)) + m := map[int]bool{} + v := make([]int, period, period) + p := make([]int64, period, period) + for i := lo; i <= hi; i++ { + x := r.Prev() + p[i-lo] = r.Pos() + if x < lo || x > hi { + t.Fatal("t2.0") + } + if m[x] { + t.Fatal("t2.1") + } + m[x] = true + v[i-lo] = x + } + for i := lo; i <= hi; i++ { + x := r.Prev() + if x < lo || x > hi { + t.Fatal("t2.2") + } + if !m[x] { + t.Fatal("t2.3") + } + if x != v[i-lo] { + t.Fatal("t2.4") + } + if r.Pos() != p[i-lo] { + t.Fatal("t2.5") + } + m[x] = false + } + for i := lo; i <= hi; i++ { + s := p[i-lo] - 1 + if s < 0 { + s = r.Cycle() - 1 + } + r.Seek(s) + x := r.Next() + if x < lo || x > hi { + t.Fatal("t2.6") + } + if x != v[i-lo] { + t.Fatal("t2.7") + } + } + } + } + } +} + +func benchmarkBig1eN(b *testing.B, r *FCBig) { + b.StartTimer() + for i := 0; i < b.N; i++ { + r.Next() + } +} + +func BenchmarkFCBig1e3(b *testing.B) { + b.StopTimer() + hi := big.NewInt(0).SetInt64(1e3) + r, _ := NewFCBig(big0, hi, false) + benchmarkBig1eN(b, r) +} + +func BenchmarkFCBig1e6(b *testing.B) { + b.StopTimer() + hi := big.NewInt(0).SetInt64(1e6) + r, _ := NewFCBig(big0, hi, false) + benchmarkBig1eN(b, r) +} + +func BenchmarkFCBig1e9(b *testing.B) { + b.StopTimer() + hi := big.NewInt(0).SetInt64(1e9) + r, _ := NewFCBig(big0, hi, false) + benchmarkBig1eN(b, r) +} + +func BenchmarkFCBig1e12(b *testing.B) { + b.StopTimer() + hi := big.NewInt(0).SetInt64(1e12) + r, _ := NewFCBig(big0, hi, false) + benchmarkBig1eN(b, r) +} + +func BenchmarkFCBig1e15(b *testing.B) { + b.StopTimer() + hi := big.NewInt(0).SetInt64(1e15) + r, _ := NewFCBig(big0, hi, false) + benchmarkBig1eN(b, r) +} + +func BenchmarkFCBig1e18(b *testing.B) { + b.StopTimer() + hi := big.NewInt(0).SetInt64(1e18) + r, _ := NewFCBig(big0, hi, false) + benchmarkBig1eN(b, r) +} + +var ( + big0 = big.NewInt(0) +) + +func TestBig0(t *testing.T) { + const N = 7400 + lo := big.NewInt(0) + hi := big.NewInt(0) + period := big.NewInt(0) + c := big.NewInt(0) + for n := int64(1); n < N; n++ { + hi.SetInt64(n - 1) + period.Set(hi) + period.Sub(period, lo) + period.Add(period, _1) + r, err := NewFCBig(lo, hi, false) + if err != nil { + t.Fatal(err) + } + if r.cycle.Cmp(period) < 0 { + t.Fatalf("Period exceeds cycle") + } + c.Set(r.Cycle()) + c.Sub(c, period) + if c.Cmp(period) > 0 { + t.Fatalf("Cycle exceeds 2 * period") + } + } + for n := int64(1); n < N; n++ { + hi.SetInt64(n - 1) + period.Set(hi) + period.Sub(period, lo) + period.Add(period, _1) + r, err := NewFCBig(lo, hi, true) + if err != nil { + t.Fatal(err) + } + if r.cycle.Cmp(period) < 0 { + t.Fatalf("Period exceeds cycle") + } + c.Set(r.Cycle()) + c.Sub(c, period) + c.Sub(c, period) + if c.Cmp(period) > 0 { + t.Fatalf("Cycle exceeds 3 * period") + } + } +} + +func TestBig1(t *testing.T) { + const ( + N = 120 + S = 3 + ) + lo := big.NewInt(0) + hi := big.NewInt(0) + seek := big.NewInt(0) + for hq := 0; hq <= 1; hq++ { + for n := int64(1); n < N; n++ { + for seed := 0; seed < S; seed++ { + lo64 := -n + hi64 := 2 * n + lo.SetInt64(lo64) + hi.SetInt64(hi64) + period := hi64 - lo64 + 1 + r, err := NewFCBig(lo, hi, hq == 1) + if err != nil { + t.Fatal(err) + } + r.Seed(int64(seed)) + m := map[int64]bool{} + v := make([]int64, period, period) + p := make([]int64, period, period) + for i := lo64; i <= hi64; i++ { + x := r.Next().Int64() + p[i-lo64] = r.Pos().Int64() + if x < lo64 || x > hi64 { + t.Fatal("tb1.0") + } + if m[x] { + t.Fatal("tb1.1") + } + m[x] = true + v[i-lo64] = x + } + for i := lo64; i <= hi64; i++ { + x := r.Next().Int64() + if x < lo64 || x > hi64 { + t.Fatal("tb1.2") + } + if !m[x] { + t.Fatal("tb1.3") + } + if x != v[i-lo64] { + t.Fatal("tb1.4") + } + if r.Pos().Int64() != p[i-lo64] { + t.Fatal("tb1.5") + } + m[x] = false + } + for i := lo64; i <= hi64; i++ { + r.Seek(seek.SetInt64(p[i-lo64] + 1)) + x := r.Prev().Int64() + if x < lo64 || x > hi64 { + t.Fatal("tb1.6") + } + if x != v[i-lo64] { + t.Fatal("tb1.7") + } + } + } + } + } +} + +func TestBig2(t *testing.T) { + const ( + N = 120 + S = 3 + ) + lo := big.NewInt(0) + hi := big.NewInt(0) + seek := big.NewInt(0) + for hq := 0; hq <= 1; hq++ { + for n := int64(1); n < N; n++ { + for seed := 0; seed < S; seed++ { + lo64, hi64 := -n, 2*n + lo.SetInt64(lo64) + hi.SetInt64(hi64) + period := hi64 - lo64 + 1 + r, err := NewFCBig(lo, hi, hq == 1) + if err != nil { + t.Fatal(err) + } + r.Seed(int64(seed)) + m := map[int64]bool{} + v := make([]int64, period, period) + p := make([]int64, period, period) + for i := lo64; i <= hi64; i++ { + x := r.Prev().Int64() + p[i-lo64] = r.Pos().Int64() + if x < lo64 || x > hi64 { + t.Fatal("tb2.0") + } + if m[x] { + t.Fatal("tb2.1") + } + m[x] = true + v[i-lo64] = x + } + for i := lo64; i <= hi64; i++ { + x := r.Prev().Int64() + if x < lo64 || x > hi64 { + t.Fatal("tb2.2") + } + if !m[x] { + t.Fatal("tb2.3") + } + if x != v[i-lo64] { + t.Fatal("tb2.4") + } + if r.Pos().Int64() != p[i-lo64] { + t.Fatal("tb2.5") + } + m[x] = false + } + for i := lo64; i <= hi64; i++ { + s := p[i-lo64] - 1 + if s < 0 { + s = r.Cycle().Int64() - 1 + } + r.Seek(seek.SetInt64(s)) + x := r.Next().Int64() + if x < lo64 || x > hi64 { + t.Fatal("tb2.6") + } + if x != v[i-lo64] { + t.Fatal("tb2.7") + } + } + } + } + } +} + +func TestPermutations(t *testing.T) { + data := sort.IntSlice{3, 2, 1} + check := [][]int{ + {1, 2, 3}, + {1, 3, 2}, + {2, 1, 3}, + {2, 3, 1}, + {3, 1, 2}, + {3, 2, 1}, + } + i := 0 + for PermutationFirst(data); ; i++ { + if i >= len(check) { + t.Fatalf("too much permutations generated: %d > %d", i+1, len(check)) + } + + for j, v := range check[i] { + got := data[j] + if got != v { + t.Fatalf("permutation %d:\ndata: %v\ncheck: %v\nexpected data[%d] == %d, got %d", i, data, check[i], j, v, got) + } + } + + if !PermutationNext(data) { + if i != len(check)-1 { + t.Fatal("permutations generated", i, "expected", len(check)) + } + break + } + } +} + +func TestIsPrime(t *testing.T) { + const p4M = 283146 // # of primes < 4e6 + n := 0 + for i := uint32(0); i <= 4e6; i++ { + if IsPrime(i) { + n++ + } + } + t.Log(n) + if n != p4M { + t.Fatal(n) + } +} + +func BenchmarkIsPrime(b *testing.B) { + b.StopTimer() + n := make([]uint32, b.N) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < b.N; i++ { + n[i] = rng.Uint32() + } + b.StartTimer() + for _, n := range n { + IsPrime(n) + } +} + +func BenchmarkNextPrime(b *testing.B) { + b.StopTimer() + n := make([]uint32, b.N) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < b.N; i++ { + n[i] = rng.Uint32() + } + b.StartTimer() + for _, n := range n { + NextPrime(n) + } +} + +func BenchmarkIsPrimeUint64(b *testing.B) { + const N = 1 << 16 + b.StopTimer() + a := make([]uint64, N) + r := r64() + for i := range a { + a[i] = uint64(r.Next().Int64()) + } + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + IsPrimeUint64(a[i&(N-1)]) + } +} + +func BenchmarkNextPrimeUint64(b *testing.B) { + b.StopTimer() + n := make([]uint64, b.N) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < b.N; i++ { + n[i] = uint64(rng.Int63()) + if i&1 == 0 { + n[i] ^= 1 << 63 + } + } + b.StartTimer() + for _, n := range n { + NextPrimeUint64(n) + } +} + +func TestNextPrime(t *testing.T) { + const p4M = 283146 // # of primes < 4e6 + n := 0 + var p uint32 + for { + p, _ = NextPrime(p) + if p >= 4e6 { + break + } + n++ + } + t.Log(n) + if n != p4M { + t.Fatal(n) + } +} + +func TestNextPrime2(t *testing.T) { + type data struct { + x uint32 + y uint32 + ok bool + } + tests := []data{ + {0, 2, true}, + {1, 2, true}, + {2, 3, true}, + {3, 5, true}, + {math.MaxUint32, 0, false}, + {math.MaxUint32 - 1, 0, false}, + {math.MaxUint32 - 2, 0, false}, + {math.MaxUint32 - 3, 0, false}, + {math.MaxUint32 - 4, 0, false}, + {math.MaxUint32 - 5, math.MaxUint32 - 4, true}, + } + + for _, test := range tests { + y, ok := NextPrime(test.x) + if ok != test.ok || ok && y != test.y { + t.Fatalf("x %d, got y %d ok %t, expected y %d ok %t", test.x, y, ok, test.y, test.ok) + } + } +} + +func TestNextPrimeUint64(t *testing.T) { + const ( + lo = 2000000000000000000 + hi = 2000000000000100000 + k = 2346 // PrimePi(hi)-PrimePi(lo) + ) + n := 0 + p := uint64(lo) - 1 + var ok bool + for { + p0 := p + p, ok = NextPrimeUint64(p) + if !ok { + t.Fatal(p0) + } + + if p > hi { + break + } + + n++ + } + if n != k { + t.Fatal(n, k) + } +} + +func TestISqrt(t *testing.T) { + for n := int64(0); n < 5e6; n++ { + x := int64(ISqrt(uint32(n))) + if x2 := x * x; x2 > n { + t.Fatalf("got ISqrt(%d) == %d, too big", n, x) + } + if x2 := x*x + 2*x + 1; x2 < n { + t.Fatalf("got ISqrt(%d) == %d, too low", n, x) + } + } + for n := int64(math.MaxUint32); n > math.MaxUint32-5e6; n-- { + x := int64(ISqrt(uint32(n))) + if x2 := x * x; x2 > n { + t.Fatalf("got ISqrt(%d) == %d, too big", n, x) + } + if x2 := x*x + 2*x + 1; x2 < n { + t.Fatalf("got ISqrt(%d) == %d, too low", n, x) + } + } + rng := rand.New(rand.NewSource(1)) + for i := 0; i < 5e6; i++ { + n := int64(rng.Uint32()) + x := int64(ISqrt(uint32(n))) + if x2 := x * x; x2 > n { + t.Fatalf("got ISqrt(%d) == %d, too big", n, x) + } + if x2 := x*x + 2*x + 1; x2 < n { + t.Fatalf("got ISqrt(%d) == %d, too low", n, x) + } + } +} + +func TestSqrtUint64(t *testing.T) { + for n := uint64(0); n < 2e6; n++ { + x := SqrtUint64(n) + if x > math.MaxUint32 { + t.Fatalf("got Sqrt(%d) == %d, too big", n, x) + } + if x2 := x * x; x2 > n { + t.Fatalf("got Sqrt(%d) == %d, too big", n, x) + } + if x2 := x*x + 2*x + 1; x2 < n { + t.Fatalf("got Sqrt(%d) == %d, too low", n, x) + } + } + const H = uint64(18446744056529682436) + for n := H; n > H-2e6; n-- { + x := SqrtUint64(n) + if x > math.MaxUint32 { + t.Fatalf("got Sqrt(%d) == %d, too big", n, x) + } + if x2 := x * x; x2 > n { + t.Fatalf("got Sqrt(%d) == %d, too big", n, x) + } + if x2 := x*x + 2*x + 1; x2 < n { + t.Fatalf("got Sqrt(%d) == %d, too low", n, x) + } + } + rng := rand.New(rand.NewSource(1)) + for i := 0; i < 2e6; i++ { + n := uint64(rng.Uint32())<<31 | uint64(rng.Uint32()) + x := SqrtUint64(n) + if x2 := x * x; x2 > n { + t.Fatalf("got Sqrt(%d) == %d, too big", n, x) + } + if x2 := x*x + 2*x + 1; x2 < n { + t.Fatalf("got Sqrt(%d) == %d, too low", n, x) + } + } +} + +func TestSqrtBig(t *testing.T) { + const N = 3e4 + var n, lim, x2 big.Int + lim.SetInt64(N) + for n.Cmp(&lim) != 0 { + x := SqrtBig(&n) + x2.Mul(x, x) + if x.Cmp(&n) > 0 { + t.Fatalf("got sqrt(%s) == %s, too big", &n, x) + } + x2.Add(&x2, x) + x2.Add(&x2, x) + x2.Add(&x2, _1) + if x2.Cmp(&n) < 0 { + t.Fatalf("got sqrt(%s) == %s, too low", &n, x) + } + n.Add(&n, _1) + } + rng := rand.New(rand.NewSource(1)) + var h big.Int + h.SetBit(&h, 1e3, 1) + for i := 0; i < N; i++ { + n.Rand(rng, &h) + x := SqrtBig(&n) + x2.Mul(x, x) + if x.Cmp(&n) > 0 { + t.Fatalf("got sqrt(%s) == %s, too big", &n, x) + } + x2.Add(&x2, x) + x2.Add(&x2, x) + x2.Add(&x2, _1) + if x2.Cmp(&n) < 0 { + t.Fatalf("got sqrt(%s) == %s, too low", &n, x) + } + } +} + +func TestFactorInt(t *testing.T) { + chk := func(n uint64, f []FactorTerm) bool { + if n < 2 { + return len(f) == 0 + } + + for i := 1; i < len(f); i++ { // verify ordering + if t, u := f[i-1], f[i]; t.Prime >= u.Prime { + return false + } + } + + x := uint64(1) + for _, v := range f { + if p := v.Prime; p < 0 || !IsPrime(uint32(v.Prime)) { + return false + } + + for i := uint32(0); i < v.Power; i++ { + x *= uint64(v.Prime) + if x > math.MaxUint32 { + return false + } + } + } + return x == n + } + + for n := uint64(0); n < 3e5; n++ { + f := FactorInt(uint32(n)) + if !chk(n, f) { + t.Fatalf("bad FactorInt(%d): %v", n, f) + } + } + for n := uint64(math.MaxUint32); n > math.MaxUint32-12e4; n-- { + f := FactorInt(uint32(n)) + if !chk(n, f) { + t.Fatalf("bad FactorInt(%d): %v", n, f) + } + } + rng := rand.New(rand.NewSource(1)) + for i := 0; i < 13e4; i++ { + n := rng.Uint32() + f := FactorInt(n) + if !chk(uint64(n), f) { + t.Fatalf("bad FactorInt(%d): %v", n, f) + } + } +} + +func TestFactorIntB(t *testing.T) { + const N = 3e5 // must be < math.MaxInt32 + factors := make([][]FactorTerm, N+1) + // set up the divisors + for prime := uint32(2); prime <= N; prime, _ = NextPrime(prime) { + for n := int(prime); n <= N; n += int(prime) { + factors[n] = append(factors[n], FactorTerm{prime, 0}) + } + } + // set up the powers + for n := 2; n <= N; n++ { + f := factors[n] + m := uint32(n) + for i, v := range f { + for m%v.Prime == 0 { + m /= v.Prime + v.Power++ + } + f[i] = v + } + factors[n] = f + } + // check equal + for n, e := range factors { + g := FactorInt(uint32(n)) + if len(e) != len(g) { + t.Fatal(n, "len", g, "!=", e) + } + + for i, ev := range e { + gv := g[i] + if ev.Prime != gv.Prime { + t.Fatal(n, "prime", gv, ev) + } + + if ev.Power != gv.Power { + t.Fatal(n, "power", gv, ev) + } + } + } +} + +func BenchmarkISqrt(b *testing.B) { + b.StopTimer() + n := make([]uint32, b.N) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < b.N; i++ { + n[i] = rng.Uint32() + } + b.StartTimer() + for _, n := range n { + ISqrt(n) + } +} + +func BenchmarkSqrtUint64(b *testing.B) { + b.StopTimer() + n := make([]uint64, b.N) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < b.N; i++ { + n[i] = uint64(rng.Uint32())<<32 | uint64(rng.Uint32()) + } + b.StartTimer() + for _, n := range n { + SqrtUint64(n) + } +} + +func benchmarkSqrtBig(b *testing.B, bits int) { + b.StopTimer() + n := make([]*big.Int, b.N) + rng := rand.New(rand.NewSource(1)) + var nn, h big.Int + h.SetBit(&h, bits, 1) + for i := 0; i < b.N; i++ { + n[i] = nn.Rand(rng, &h) + } + runtime.GC() + b.StartTimer() + for _, n := range n { + SqrtBig(n) + } +} + +func BenchmarkSqrtBig2e1e1(b *testing.B) { + benchmarkSqrtBig(b, 1e1) +} + +func BenchmarkSqrtBig2e1e2(b *testing.B) { + benchmarkSqrtBig(b, 1e2) +} + +func BenchmarkSqrtBig2e1e3(b *testing.B) { + benchmarkSqrtBig(b, 1e3) +} + +func BenchmarkSqrtBig2e1e4(b *testing.B) { + benchmarkSqrtBig(b, 1e4) +} + +func BenchmarkSqrtBig2e1e5(b *testing.B) { + benchmarkSqrtBig(b, 1e5) +} + +func BenchmarkFactorInt(b *testing.B) { + b.StopTimer() + n := make([]uint32, b.N) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < b.N; i++ { + n[i] = rng.Uint32() + } + b.StartTimer() + for _, n := range n { + FactorInt(n) + } +} + +func TestIsPrimeUint16(t *testing.T) { + for n := 0; n <= math.MaxUint16; n++ { + if IsPrimeUint16(uint16(n)) != IsPrime(uint32(n)) { + t.Fatal(n) + } + } +} + +func BenchmarkIsPrimeUint16(b *testing.B) { + b.StopTimer() + n := make([]uint16, b.N) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < b.N; i++ { + n[i] = uint16(rng.Uint32()) + } + b.StartTimer() + for _, n := range n { + IsPrimeUint16(n) + } +} + +func TestNextPrimeUint16(t *testing.T) { + for n := 0; n <= math.MaxUint16; n++ { + p, ok := NextPrimeUint16(uint16(n)) + p2, ok2 := NextPrime(uint32(n)) + switch { + case ok: + if !ok2 || uint32(p) != p2 { + t.Fatal(n, p, ok) + } + case !ok && ok2: + if p2 < 65536 { + t.Fatal(n, p, ok) + } + } + } +} + +func BenchmarkNextPrimeUint16(b *testing.B) { + b.StopTimer() + n := make([]uint16, b.N) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < b.N; i++ { + n[i] = uint16(rng.Uint32()) + } + b.StartTimer() + for _, n := range n { + NextPrimeUint16(n) + } +} + +/* + +From: http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetKernighan + +Counting bits set, Brian Kernighan's way + +unsigned int v; // count the number of bits set in v +unsigned int c; // c accumulates the total bits set in v +for (c = 0; v; c++) +{ + v &= v - 1; // clear the least significant bit set +} + +Brian Kernighan's method goes through as many iterations as there are set bits. +So if we have a 32-bit word with only the high bit set, then it will only go +once through the loop. + +Published in 1988, the C Programming Language 2nd Ed. (by Brian W. Kernighan +and Dennis M. Ritchie) mentions this in exercise 2-9. On April 19, 2006 Don +Knuth pointed out to me that this method "was first published by Peter Wegner +in CACM 3 (1960), 322. (Also discovered independently by Derrick Lehmer and +published in 1964 in a book edited by Beckenbach.)" +*/ +func bcnt(v uint64) (c int) { + for ; v != 0; c++ { + v &= v - 1 + } + return +} + +func TestPopCount(t *testing.T) { + const N = 4e5 + maxUint64 := big.NewInt(0) + maxUint64.SetBit(maxUint64, 64, 1) + maxUint64.Sub(maxUint64, big.NewInt(1)) + rng := r64() + for i := 0; i < N; i++ { + n := uint64(rng.Next().Int64()) + if g, e := PopCountByte(byte(n)), bcnt(uint64(byte(n))); g != e { + t.Fatal(n, g, e) + } + + if g, e := PopCountUint16(uint16(n)), bcnt(uint64(uint16(n))); g != e { + t.Fatal(n, g, e) + } + + if g, e := PopCountUint32(uint32(n)), bcnt(uint64(uint32(n))); g != e { + t.Fatal(n, g, e) + } + + if g, e := PopCount(int(n)), bcnt(uint64(uint(n))); g != e { + t.Fatal(n, g, e) + } + + if g, e := PopCountUint(uint(n)), bcnt(uint64(uint(n))); g != e { + t.Fatal(n, g, e) + } + + if g, e := PopCountUint64(n), bcnt(n); g != e { + t.Fatal(n, g, e) + } + + if g, e := PopCountUintptr(uintptr(n)), bcnt(uint64(uintptr(n))); g != e { + t.Fatal(n, g, e) + } + } +} + +var gcds = []struct{ a, b, gcd uint64 }{ + {8, 12, 4}, + {12, 18, 6}, + {42, 56, 14}, + {54, 24, 6}, + {252, 105, 21}, + {1989, 867, 51}, + {1071, 462, 21}, + {2 * 3 * 5 * 7 * 11, 5 * 7 * 11 * 13 * 17, 5 * 7 * 11}, + {2 * 3 * 5 * 7 * 7 * 11, 5 * 7 * 7 * 11 * 13 * 17, 5 * 7 * 7 * 11}, + {2 * 3 * 5 * 7 * 7 * 11, 5 * 7 * 7 * 13 * 17, 5 * 7 * 7}, + {2 * 3 * 5 * 7 * 11, 13 * 17 * 19, 1}, +} + +func TestGCD(t *testing.T) { + for i, v := range gcds { + if v.a <= math.MaxUint16 && v.b <= math.MaxUint16 { + if g, e := uint64(GCDUint16(uint16(v.a), uint16(v.b))), v.gcd; g != e { + t.Errorf("%d: got gcd(%d, %d) %d, exp %d", i, v.a, v.b, g, e) + } + if g, e := uint64(GCDUint16(uint16(v.b), uint16(v.a))), v.gcd; g != e { + t.Errorf("%d: got gcd(%d, %d) %d, exp %d", i, v.b, v.a, g, e) + } + } + if v.a <= math.MaxUint32 && v.b <= math.MaxUint32 { + if g, e := uint64(GCDUint32(uint32(v.a), uint32(v.b))), v.gcd; g != e { + t.Errorf("%d: got gcd(%d, %d) %d, exp %d", i, v.a, v.b, g, e) + } + if g, e := uint64(GCDUint32(uint32(v.b), uint32(v.a))), v.gcd; g != e { + t.Errorf("%d: got gcd(%d, %d) %d, exp %d", i, v.b, v.a, g, e) + } + } + if g, e := GCDUint64(v.a, v.b), v.gcd; g != e { + t.Errorf("%d: got gcd(%d, %d) %d, exp %d", i, v.a, v.b, g, e) + } + if g, e := GCDUint64(v.b, v.a), v.gcd; g != e { + t.Errorf("%d: got gcd(%d, %d) %d, exp %d", i, v.b, v.a, g, e) + } + } +} + +func lg2(n uint64) (lg int) { + if n == 0 { + return -1 + } + + for n >>= 1; n != 0; n >>= 1 { + lg++ + } + return +} + +func TestLog2(t *testing.T) { + if g, e := Log2Byte(0), -1; g != e { + t.Error(g, e) + } + if g, e := Log2Uint16(0), -1; g != e { + t.Error(g, e) + } + if g, e := Log2Uint32(0), -1; g != e { + t.Error(g, e) + } + if g, e := Log2Uint64(0), -1; g != e { + t.Error(g, e) + } + const N = 1e6 + rng := r64() + for i := 0; i < N; i++ { + n := uint64(rng.Next().Int64()) + if g, e := Log2Uint64(n), lg2(n); g != e { + t.Fatalf("%b %d %d", n, g, e) + } + if g, e := Log2Uint32(uint32(n)), lg2(n&0xffffffff); g != e { + t.Fatalf("%b %d %d", n, g, e) + } + if g, e := Log2Uint16(uint16(n)), lg2(n&0xffff); g != e { + t.Fatalf("%b %d %d", n, g, e) + } + if g, e := Log2Byte(byte(n)), lg2(n&0xff); g != e { + t.Fatalf("%b %d %d", n, g, e) + } + } +} + +func TestBitLen(t *testing.T) { + if g, e := BitLenByte(0), 0; g != e { + t.Error(g, e) + } + if g, e := BitLenUint16(0), 0; g != e { + t.Error(g, e) + } + if g, e := BitLenUint32(0), 0; g != e { + t.Error(g, e) + } + if g, e := BitLenUint64(0), 0; g != e { + t.Error(g, e) + } + if g, e := BitLenUintptr(0), 0; g != e { + t.Error(g, e) + } + const N = 1e6 + rng := r64() + for i := 0; i < N; i++ { + n := uint64(rng.Next().Int64()) + if g, e := BitLenUintptr(uintptr(n)), lg2(uint64(uintptr(n)))+1; g != e { + t.Fatalf("%b %d %d", n, g, e) + } + if g, e := BitLenUint64(n), lg2(n)+1; g != e { + t.Fatalf("%b %d %d", n, g, e) + } + if g, e := BitLenUint32(uint32(n)), lg2(n&0xffffffff)+1; g != e { + t.Fatalf("%b %d %d", n, g, e) + } + if g, e := BitLen(int(n)), lg2(uint64(uint(n)))+1; g != e { + t.Fatalf("%b %d %d", n, g, e) + } + if g, e := BitLenUint(uint(n)), lg2(uint64(uint(n)))+1; g != e { + t.Fatalf("%b %d %d", n, g, e) + } + if g, e := BitLenUint16(uint16(n)), lg2(n&0xffff)+1; g != e { + t.Fatalf("%b %d %d", n, g, e) + } + if g, e := BitLenByte(byte(n)), lg2(n&0xff)+1; g != e { + t.Fatalf("%b %d %d", n, g, e) + } + } +} + +func BenchmarkGCDByte(b *testing.B) { + const N = 1 << 16 + type t byte + type u struct{ a, b t } + b.StopTimer() + rng := r32() + a := make([]u, N) + for i := range a { + a[i] = u{t(rng.Next()), t(rng.Next())} + } + b.StartTimer() + for i := 0; i < b.N; i++ { + v := a[i&(N-1)] + GCDByte(byte(v.a), byte(v.b)) + } +} + +func BenchmarkGCDUint16(b *testing.B) { + const N = 1 << 16 + type t uint16 + type u struct{ a, b t } + b.StopTimer() + rng := r32() + a := make([]u, N) + for i := range a { + a[i] = u{t(rng.Next()), t(rng.Next())} + } + b.StartTimer() + for i := 0; i < b.N; i++ { + v := a[i&(N-1)] + GCDUint16(uint16(v.a), uint16(v.b)) + } +} + +func BenchmarkGCDUint32(b *testing.B) { + const N = 1 << 16 + type t uint32 + type u struct{ a, b t } + b.StopTimer() + rng := r32() + a := make([]u, N) + for i := range a { + a[i] = u{t(rng.Next()), t(rng.Next())} + } + b.StartTimer() + for i := 0; i < b.N; i++ { + v := a[i&(N-1)] + GCDUint32(uint32(v.a), uint32(v.b)) + } +} + +func BenchmarkGCDUint64(b *testing.B) { + const N = 1 << 16 + type t uint64 + type u struct{ a, b t } + b.StopTimer() + rng := r64() + a := make([]u, N) + for i := range a { + a[i] = u{t(rng.Next().Int64()), t(rng.Next().Int64())} + } + b.StartTimer() + for i := 0; i < b.N; i++ { + v := a[i&(N-1)] + GCDUint64(uint64(v.a), uint64(v.b)) + } +} + +func BenchmarkLog2Byte(b *testing.B) { + const N = 1 << 16 + type t byte + b.StopTimer() + rng := r32() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + Log2Byte(byte(a[i&(N-1)])) + } +} + +func BenchmarkLog2Uint16(b *testing.B) { + const N = 1 << 16 + type t uint16 + b.StopTimer() + rng := r32() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + Log2Uint16(uint16(a[i&(N-1)])) + } +} + +func BenchmarkLog2Uint32(b *testing.B) { + const N = 1 << 16 + type t uint32 + b.StopTimer() + rng := r32() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + Log2Uint32(uint32(a[i&(N-1)])) + } +} + +func BenchmarkLog2Uint64(b *testing.B) { + const N = 1 << 16 + type t uint64 + b.StopTimer() + rng := r64() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next().Int64()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + Log2Uint64(uint64(a[i&(N-1)])) + } +} +func BenchmarkBitLenByte(b *testing.B) { + const N = 1 << 16 + type t byte + b.StopTimer() + rng := r32() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + BitLenByte(byte(a[i&(N-1)])) + } +} + +func BenchmarkBitLenUint16(b *testing.B) { + const N = 1 << 16 + type t uint16 + b.StopTimer() + rng := r32() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + BitLenUint16(uint16(a[i&(N-1)])) + } +} + +func BenchmarkBitLenUint32(b *testing.B) { + const N = 1 << 16 + type t uint32 + b.StopTimer() + rng := r32() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + BitLenUint32(uint32(a[i&(N-1)])) + } +} + +func BenchmarkBitLen(b *testing.B) { + const N = 1 << 16 + type t int + b.StopTimer() + rng := r64() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next().Int64()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + BitLen(int(a[i&(N-1)])) + } +} + +func BenchmarkBitLenUint(b *testing.B) { + const N = 1 << 16 + type t uint + b.StopTimer() + rng := r64() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next().Int64()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + BitLenUint(uint(a[i&(N-1)])) + } +} + +func BenchmarkBitLenUintptr(b *testing.B) { + const N = 1 << 16 + type t uintptr + b.StopTimer() + rng := r64() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next().Int64()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + BitLenUintptr(uintptr(a[i&(N-1)])) + } +} + +func BenchmarkBitLenUint64(b *testing.B) { + const N = 1 << 16 + type t uint64 + b.StopTimer() + rng := r64() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next().Int64()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + BitLenUint64(uint64(a[i&(N-1)])) + } +} + +func BenchmarkPopCountByte(b *testing.B) { + const N = 1 << 16 + type t byte + b.StopTimer() + rng := r32() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + PopCountByte(byte(a[i&(N-1)])) + } +} + +func BenchmarkPopCountUint16(b *testing.B) { + const N = 1 << 16 + type t uint16 + b.StopTimer() + rng := r32() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + PopCountUint16(uint16(a[i&(N-1)])) + } +} + +func BenchmarkPopCountUint32(b *testing.B) { + const N = 1 << 16 + type t uint32 + b.StopTimer() + rng := r32() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + PopCountUint32(uint32(a[i&(N-1)])) + } +} + +func BenchmarkPopCount(b *testing.B) { + const N = 1 << 16 + type t int + b.StopTimer() + rng := r64() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next().Int64()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + PopCount(int(a[i&(N-1)])) + } +} + +func BenchmarkPopCountUint(b *testing.B) { + const N = 1 << 16 + type t uint + b.StopTimer() + rng := r64() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next().Int64()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + PopCountUint(uint(a[i&(N-1)])) + } +} + +func BenchmarkPopCountUintptr(b *testing.B) { + const N = 1 << 16 + type t uintptr + b.StopTimer() + rng := r64() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next().Int64()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + PopCountUintptr(uintptr(a[i&(N-1)])) + } +} + +func BenchmarkPopCountUint64(b *testing.B) { + const N = 1 << 16 + type t uint64 + b.StopTimer() + rng := r64() + a := make([]t, N) + for i := range a { + a[i] = t(rng.Next().Int64()) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + PopCountUint64(uint64(a[i&(N-1)])) + } +} + +func TestUintptrBits(t *testing.T) { + switch g := UintptrBits(); g { + case 32, 64: + // ok + t.Log(g) + default: + t.Fatalf("got %d, expected 32 or 64", g) + } +} + +func BenchmarkUintptrBits(b *testing.B) { + for i := 0; i < b.N; i++ { + UintptrBits() + } +} + +func TestModPowByte(t *testing.T) { + data := []struct{ b, e, m, r byte }{ + {0, 1, 1, 0}, + {0, 2, 1, 0}, + {0, 3, 1, 0}, + + {1, 0, 1, 0}, + {1, 1, 1, 0}, + {1, 2, 1, 0}, + {1, 3, 1, 0}, + + {2, 0, 1, 0}, + {2, 1, 1, 0}, + {2, 2, 1, 0}, + {2, 3, 1, 0}, + + {2, 11, 23, 1}, // 23|M11 + {2, 11, 89, 1}, // 89|M11 + {2, 23, 47, 1}, // 47|M23 + {5, 3, 13, 8}, + } + + for _, v := range data { + if g, e := ModPowByte(v.b, v.e, v.m), v.r; g != e { + t.Errorf("b %d e %d m %d: got %d, exp %d", v.b, v.e, v.m, g, e) + } + } +} + +func TestModPowUint16(t *testing.T) { + data := []struct{ b, e, m, r uint16 }{ + {0, 1, 1, 0}, + {0, 2, 1, 0}, + {0, 3, 1, 0}, + + {1, 0, 1, 0}, + {1, 1, 1, 0}, + {1, 2, 1, 0}, + {1, 3, 1, 0}, + + {2, 0, 1, 0}, + {2, 1, 1, 0}, + {2, 2, 1, 0}, + {2, 3, 1, 0}, + + {2, 11, 23, 1}, // 23|M11 + {2, 11, 89, 1}, // 89|M11 + {2, 23, 47, 1}, // 47|M23 + {2, 929, 13007, 1}, // 13007|M929 + {4, 13, 497, 445}, + {5, 3, 13, 8}, + } + + for _, v := range data { + if g, e := ModPowUint16(v.b, v.e, v.m), v.r; g != e { + t.Errorf("b %d e %d m %d: got %d, exp %d", v.b, v.e, v.m, g, e) + } + } +} + +func TestModPowUint32(t *testing.T) { + data := []struct{ b, e, m, r uint32 }{ + {0, 1, 1, 0}, + {0, 2, 1, 0}, + {0, 3, 1, 0}, + + {1, 0, 1, 0}, + {1, 1, 1, 0}, + {1, 2, 1, 0}, + {1, 3, 1, 0}, + + {2, 0, 1, 0}, + {2, 1, 1, 0}, + {2, 2, 1, 0}, + {2, 3, 1, 0}, + + {2, 23, 47, 1}, // 47|M23 + {2, 67, 193707721, 1}, // 193707721|M67 + {2, 929, 13007, 1}, // 13007|M929 + {4, 13, 497, 445}, + {5, 3, 13, 8}, + {2, 500471, 264248689, 1}, + {2, 1000249, 112027889, 1}, + {2, 2000633, 252079759, 1}, + {2, 3000743, 222054983, 1}, + {2, 4000741, 1920355681, 1}, + {2, 5000551, 330036367, 1}, + {2, 6000479, 1020081431, 1}, + {2, 7000619, 840074281, 1}, + {2, 8000401, 624031279, 1}, + {2, 9000743, 378031207, 1}, + {2, 10000961, 380036519, 1}, + {2, 20000723, 40001447, 1}, + } + + for _, v := range data { + if g, e := ModPowUint32(v.b, v.e, v.m), v.r; g != e { + t.Errorf("b %d e %d m %d: got %d, exp %d", v.b, v.e, v.m, g, e) + } + } +} + +func TestModPowUint64(t *testing.T) { + data := []struct{ b, e, m, r uint64 }{ + {0, 1, 1, 0}, + {0, 2, 1, 0}, + {0, 3, 1, 0}, + + {1, 0, 1, 0}, + {1, 1, 1, 0}, + {1, 2, 1, 0}, + {1, 3, 1, 0}, + + {2, 0, 1, 0}, + {2, 1, 1, 0}, + {2, 2, 1, 0}, + {2, 3, 1, 0}, + + {2, 23, 47, 1}, // 47|M23 + {2, 67, 193707721, 1}, // 193707721|M67 + {2, 929, 13007, 1}, // 13007|M929 + {4, 13, 497, 445}, + {5, 3, 13, 8}, + {2, 500471, 264248689, 1}, // m|Me ... + {2, 1000249, 112027889, 1}, + {2, 2000633, 252079759, 1}, + {2, 3000743, 222054983, 1}, + {2, 4000741, 1920355681, 1}, + {2, 5000551, 330036367, 1}, + {2, 6000479, 1020081431, 1}, + {2, 7000619, 840074281, 1}, + {2, 8000401, 624031279, 1}, + {2, 9000743, 378031207, 1}, + {2, 10000961, 380036519, 1}, + {2, 20000723, 40001447, 1}, + {2, 1000099, 1872347344039, 1}, + + {9223372036854775919, 9223372036854776030, 9223372036854776141, 7865333882915297658}, + } + + for _, v := range data { + if g, e := ModPowUint64(v.b, v.e, v.m), v.r; g != e { + t.Errorf("b %d e %d m %d: got %d, exp %d", v.b, v.e, v.m, g, e) + } + } +} + +func TestModPowBigInt(t *testing.T) { + data := []struct { + b, e int64 + m interface{} + r int64 + }{ + {0, 1, 1, 0}, + {0, 2, 1, 0}, + {0, 3, 1, 0}, + + {1, 0, 1, 0}, + {1, 1, 1, 0}, + {1, 2, 1, 0}, + {1, 3, 1, 0}, + + {2, 0, 1, 0}, + {2, 1, 1, 0}, + {2, 2, 1, 0}, + {2, 3, 1, 0}, + + {2, 23, 47, 1}, // 47|M23 + {2, 67, 193707721, 1}, // 193707721|M67 + {2, 929, 13007, 1}, // 13007|M929 + {4, 13, 497, 445}, + {5, 3, 13, 8}, + {2, 500471, 264248689, 1}, // m|Me ... + {2, 1000249, 112027889, 1}, + {2, 2000633, 252079759, 1}, + {2, 3000743, 222054983, 1}, + {2, 4000741, 1920355681, 1}, + {2, 5000551, 330036367, 1}, + {2, 6000479, 1020081431, 1}, + {2, 7000619, 840074281, 1}, + {2, 8000401, 624031279, 1}, + {2, 9000743, 378031207, 1}, + {2, 10000961, 380036519, 1}, + {2, 20000723, 40001447, 1}, + {2, 100279, "11502865265922183403581252152383", 1}, + + {2, 7293457, "533975545077050000610542659519277030089249998649", 1}, + } + + for _, v := range data { + var m big.Int + switch x := v.m.(type) { + case int: + m.SetInt64(int64(x)) + case string: + m.SetString(x, 10) + } + b, e, r := big.NewInt(v.b), big.NewInt(v.e), big.NewInt(v.r) + if g, e := ModPowBigInt(b, e, &m), r; g.Cmp(e) != 0 { + t.Errorf("b %s e %s m %v: got %s, exp %s", b, e, m, g, e) + } + } + + s := func(n string) *big.Int { + i, ok := big.NewInt(0).SetString(n, 10) + if !ok { + t.Fatal(ok) + } + + return i + } + + if g, e := ModPowBigInt( + s("36893488147419103343"), s("36893488147419103454"), s("36893488147419103565")), s("34853007610367449339"); g.Cmp(e) != 0 { + t.Fatal(g, e) + } +} + +func BenchmarkModPowByte(b *testing.B) { + const N = 1 << 16 + b.StopTimer() + type t struct{ b, e, m byte } + a := make([]t, N) + r := r32() + for i := range a { + a[i] = t{ + byte(r.Next() | 2), + byte(r.Next() | 2), + byte(r.Next() | 2), + } + } + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + v := a[i&(N-1)] + ModPowByte(v.b, v.e, v.m) + } +} + +func BenchmarkModPowUint16(b *testing.B) { + const N = 1 << 16 + b.StopTimer() + type t struct{ b, e, m uint16 } + a := make([]t, N) + r := r32() + for i := range a { + a[i] = t{ + uint16(r.Next() | 2), + uint16(r.Next() | 2), + uint16(r.Next() | 2), + } + } + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + v := a[i&(N-1)] + ModPowUint16(v.b, v.e, v.m) + } +} + +func BenchmarkModPowUint32(b *testing.B) { + const N = 1 << 16 + b.StopTimer() + type t struct{ b, e, m uint32 } + a := make([]t, N) + r := r32() + for i := range a { + a[i] = t{ + uint32(r.Next() | 2), + uint32(r.Next() | 2), + uint32(r.Next() | 2), + } + } + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + v := a[i&(N-1)] + ModPowUint32(v.b, v.e, v.m) + } +} + +func BenchmarkModPowUint64(b *testing.B) { + const N = 1 << 16 + b.StopTimer() + type t struct{ b, e, m uint64 } + a := make([]t, N) + r := r64() + for i := range a { + a[i] = t{ + uint64(r.Next().Int64() | 2), + uint64(r.Next().Int64() | 2), + uint64(r.Next().Int64() | 2), + } + } + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + v := a[i&(N-1)] + ModPowUint64(v.b, v.e, v.m) + } +} + +func BenchmarkModPowBigInt(b *testing.B) { + const N = 1 << 16 + b.StopTimer() + type t struct{ b, e, m *big.Int } + a := make([]t, N) + mx := big.NewInt(math.MaxInt64) + mx.Mul(mx, mx) + r, err := NewFCBig(big.NewInt(2), mx, true) + if err != nil { + b.Fatal(err) + } + for i := range a { + a[i] = t{ + r.Next(), + r.Next(), + r.Next(), + } + } + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + v := a[i&(N-1)] + ModPowBigInt(v.b, v.e, v.m) + } +} + +func TestAdd128(t *testing.T) { + const N = 1e5 + r := r64() + var mm big.Int + for i := 0; i < N; i++ { + a, b := uint64(r.Next().Int64()), uint64(r.Next().Int64()) + aa, bb := big.NewInt(0).SetUint64(a), big.NewInt(0).SetUint64(b) + mhi, mlo := AddUint128_64(a, b) + m := big.NewInt(0).SetUint64(mhi) + m.Lsh(m, 64) + m.Add(m, big.NewInt(0).SetUint64(mlo)) + mm.Add(aa, bb) + if m.Cmp(&mm) != 0 { + t.Fatalf("%d\na %40d\nb %40d\ng %40s %032x\ne %40s %032x", i, a, b, m, m, &mm, &mm) + } + } +} + +func TestMul128(t *testing.T) { + const N = 1e5 + r := r64() + var mm big.Int + f := func(a, b uint64) { + aa, bb := big.NewInt(0).SetUint64(a), big.NewInt(0).SetUint64(b) + mhi, mlo := MulUint128_64(a, b) + m := big.NewInt(0).SetUint64(mhi) + m.Lsh(m, 64) + m.Add(m, big.NewInt(0).SetUint64(mlo)) + mm.Mul(aa, bb) + if m.Cmp(&mm) != 0 { + t.Fatalf("\na %40d\nb %40d\ng %40s %032x\ne %40s %032x", a, b, m, m, &mm, &mm) + } + } + for i := 0; i < N; i++ { + f(uint64(r.Next().Int64()), uint64(r.Next().Int64())) + } + for x := 0; x <= 1<<9; x++ { + for y := 0; y <= 1<<9; y++ { + f(math.MaxUint64-uint64(x), math.MaxUint64-uint64(y)) + } + } +} + +func BenchmarkMul128(b *testing.B) { + const N = 1 << 16 + b.StopTimer() + type t struct{ a, b uint64 } + a := make([]t, N) + r := r64() + for i := range a { + a[i] = t{ + uint64(r.Next().Int64()), + uint64(r.Next().Int64()), + } + } + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + v := a[i&(N-1)] + MulUint128_64(v.a, v.b) + } +} + +func BenchmarkMul128Big(b *testing.B) { + const N = 1 << 16 + b.StopTimer() + type t struct{ a, b *big.Int } + a := make([]t, N) + r := r64() + for i := range a { + a[i] = t{ + big.NewInt(r.Next().Int64()), + big.NewInt(r.Next().Int64()), + } + } + var x big.Int + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + v := a[i&(N-1)] + x.Mul(v.a, v.b) + } +} + +func TestIsPrimeUint64(t *testing.T) { + f := func(lo, hi uint64, exp int) { + got := 0 + for n := lo; n <= hi; { + if IsPrimeUint64(n) { + got++ + } + n0 := n + n++ + if n < n0 { + break + } + } + if got != exp { + t.Fatal(lo, hi, got, exp) + } + } + + // lo, hi, PrimePi(hi)-PrimePi(lo) + f(0, 1e4, 1229) + f(1e5, 1e5+1e4, 861) + f(1e6, 1e6+1e4, 753) + f(1e7, 1e7+1e4, 614) + f(1e8, 1e8+1e4, 551) + f(1e9, 1e9+1e4, 487) + f(1e10, 1e10+1e4, 406) + f(1e11, 1e11+1e4, 394) + f(1e12, 1e12+1e4, 335) + f(1e13, 1e13+1e4, 354) + f(1e14, 1e14+1e4, 304) + f(1e15, 1e15+1e4, 263) + f(1e16, 1e16+1e4, 270) + f(1e17, 1e17+1e4, 265) + f(1e18, 1e18+1e4, 241) + f(1e19, 1e19+1e4, 255) + f(1<<64-1e4, 1<<64-1, 218) +} + +func TestProbablyPrimeUint32(t *testing.T) { + f := func(n, firstFail uint32, primes []uint32) { + for ; n <= firstFail; n += 2 { + prp := true + for _, a := range primes { + if !ProbablyPrimeUint32(n, a) { + prp = false + break + } + } + if prp != IsPrime(n) && n != firstFail { + t.Fatal(n) + } + } + } + if !ProbablyPrimeUint32(5, 2) { + t.Fatal(false) + } + if !ProbablyPrimeUint32(7, 2) { + t.Fatal(false) + } + if ProbablyPrimeUint32(9, 2) { + t.Fatal(true) + } + if !ProbablyPrimeUint32(11, 2) { + t.Fatal(false) + } + // http://oeis.org/A014233 + f(5, 2047, []uint32{2}) + f(2047, 1373653, []uint32{2, 3}) + f(1373653, 25326001, []uint32{2, 3, 5}) +} + +func BenchmarkProbablyPrimeUint32(b *testing.B) { + const N = 1 << 16 + b.StopTimer() + type t struct{ n, a uint32 } + data := make([]t, N) + r := r32() + for i := range data { + n := uint32(r.Next()) | 1 + if n <= 3 { + n += 5 + } + a := uint32(r.Next()) + if a <= 1 { + a += 2 + } + data[i] = t{n, a} + } + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + v := data[i&(N-1)] + ProbablyPrimeUint32(v.n, v.a) + } +} + +func TestProbablyPrimeUint64_32(t *testing.T) { + f := func(n, firstFail uint64, primes []uint32) { + for ; n <= firstFail; n += 2 { + prp := true + for _, a := range primes { + if !ProbablyPrimeUint64_32(n, a) { + prp = false + break + } + } + if prp != IsPrimeUint64(n) && n != firstFail { + t.Fatal(n) + } + } + } + if !ProbablyPrimeUint64_32(5, 2) { + t.Fatal(false) + } + if !ProbablyPrimeUint64_32(7, 2) { + t.Fatal(false) + } + if ProbablyPrimeUint64_32(9, 2) { + t.Fatal(true) + } + if !ProbablyPrimeUint64_32(11, 2) { + t.Fatal(false) + } + // http://oeis.org/A014233 + f(5, 2047, []uint32{2}) + f(2047, 1373653, []uint32{2, 3}) +} + +func BenchmarkProbablyPrimeUint64_32(b *testing.B) { + const N = 1 << 16 + b.StopTimer() + type t struct { + n uint64 + a uint32 + } + data := make([]t, N) + r := r32() + r2 := r64() + for i := range data { + var n uint64 + for n <= 3 { + n = uint64(r2.Next().Int64()) | 1 + } + var a uint32 + for a <= 1 { + a = uint32(r.Next()) + } + data[i] = t{n, a} + } + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + v := data[i&(N-1)] + ProbablyPrimeUint64_32(v.n, v.a) + } +} + +func TestProbablyPrimeBigInt_32(t *testing.T) { + f := func(n0, firstFail0 uint64, primes []uint32) { + n, firstFail := big.NewInt(0).SetUint64(n0), big.NewInt(0).SetUint64(firstFail0) + for ; n.Cmp(firstFail) <= 0; n.Add(n, _2) { + prp := true + for _, a := range primes { + if !ProbablyPrimeBigInt_32(n, a) { + prp = false + break + } + } + if prp != IsPrimeUint64(n0) && n0 != firstFail0 { + t.Fatal(n) + } + n0 += 2 + } + } + if !ProbablyPrimeBigInt_32(big.NewInt(5), 2) { + t.Fatal(false) + } + if !ProbablyPrimeBigInt_32(big.NewInt(7), 2) { + t.Fatal(false) + } + if ProbablyPrimeBigInt_32(big.NewInt(9), 2) { + t.Fatal(true) + } + if !ProbablyPrimeBigInt_32(big.NewInt(11), 2) { + t.Fatal(false) + } + // http://oeis.org/A014233 + f(5, 2047, []uint32{2}) + f(2047, 1373653, []uint32{2, 3}) +} + +func BenchmarkProbablyPrimeBigInt_32(b *testing.B) { + const N = 1 << 16 + b.StopTimer() + type t struct { + n *big.Int + a uint32 + } + data := make([]t, N) + r := r32() + r2 := r64() + for i := range data { + var n uint64 + for n <= 3 { + n = uint64(r2.Next().Int64()) | 1 + } + var a uint32 + for a <= 1 { + a = uint32(r.Next()) + } + data[i] = t{big.NewInt(0).SetUint64(n), a} + } + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + v := data[i&(N-1)] + ProbablyPrimeBigInt_32(v.n, v.a) + } +} + +func TestProbablyPrimeBigInt(t *testing.T) { + f := func(n0, firstFail0 uint64, primes []uint32) { + n, firstFail := big.NewInt(0).SetUint64(n0), big.NewInt(0).SetUint64(firstFail0) + for ; n.Cmp(firstFail) <= 0; n.Add(n, _2) { + prp := true + var a big.Int + for _, a0 := range primes { + a.SetInt64(int64(a0)) + if !ProbablyPrimeBigInt(n, &a) { + prp = false + break + } + } + if prp != IsPrimeUint64(n0) && n0 != firstFail0 { + t.Fatal(n) + } + n0 += 2 + } + } + if !ProbablyPrimeBigInt(big.NewInt(5), _2) { + t.Fatal(false) + } + if !ProbablyPrimeBigInt(big.NewInt(7), _2) { + t.Fatal(false) + } + if ProbablyPrimeBigInt(big.NewInt(9), _2) { + t.Fatal(true) + } + if !ProbablyPrimeBigInt(big.NewInt(11), _2) { + t.Fatal(false) + } + // http://oeis.org/A014233 + f(5, 2047, []uint32{2}) + f(2047, 1373653, []uint32{2, 3}) +} + +var once2059 sync.Once + +func BenchmarkProbablyPrimeBigInt64(b *testing.B) { + const N = 1 << 16 + b.StopTimer() + once2059.Do(func() { b.Log("64 bit n, 64 bit a\n") }) + type t struct { + n, a *big.Int + } + data := make([]t, N) + r := r64() + for i := range data { + var n uint64 + for n <= 3 { + n = uint64(r.Next().Int64()) | 1 + } + var a uint64 + for a <= 1 { + a = uint64(r.Next().Int64()) + } + data[i] = t{big.NewInt(0).SetUint64(n), big.NewInt(0).SetUint64(uint64(a))} + } + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + v := data[i&(N-1)] + ProbablyPrimeBigInt(v.n, v.a) + } +} + +var once2090 sync.Once + +func BenchmarkProbablyPrimeBigInt128(b *testing.B) { + const N = 1 << 16 + b.StopTimer() + once2090.Do(func() { b.Log("128 bit n, 128 bit a\n") }) + type t struct { + n, a *big.Int + } + data := make([]t, N) + r := r64() + for i := range data { + n := big.NewInt(0).SetUint64(uint64(r.Next().Int64())) + n.Lsh(n, 64) + n.Add(n, big.NewInt(0).SetUint64(uint64(r.Next().Int64())|1)) + a := big.NewInt(0).SetUint64(uint64(r.Next().Int64())) + a.Lsh(a, 64) + a.Add(a, big.NewInt(0).SetUint64(uint64(r.Next().Int64()))) + data[i] = t{n, a} + } + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + v := data[i&(N-1)] + ProbablyPrimeBigInt(v.n, v.a) + } +} + +func TestQCmpUint32(t *testing.T) { + const N = 6e4 + r := r32() + var x, y big.Rat + for i := 0; i < N; i++ { + a, b, c, d := uint32(r.Next()), uint32(r.Next()), uint32(r.Next()), uint32(r.Next()) + x.SetFrac64(int64(a), int64(b)) + y.SetFrac64(int64(c), int64(d)) + if g, e := QCmpUint32(a, b, c, d), x.Cmp(&y); g != e { + t.Fatal(a, b, c, d, g, e) + } + } +} + +func TestQScaleUint32(t *testing.T) { + const N = 4e4 + r := r32() + var x, y big.Rat + var a uint64 + var b, c, d uint32 + for i := 0; i < N; i++ { + for { + b, c, d = uint32(r.Next()), uint32(r.Next()), uint32(r.Next()) + a = QScaleUint32(b, c, d) + if a <= math.MaxInt64 { + break + } + } + x.SetFrac64(int64(a), int64(b)) + y.SetFrac64(int64(c), int64(d)) + if g := x.Cmp(&y); g < 0 { + t.Fatal(a, b, c, d, g, "expexted 1 or 0") + } + + if a != 0 { + x.SetFrac64(int64(a-1), int64(b)) + if g := x.Cmp(&y); g > 0 { + t.Fatal(a, b, c, d, g, "expected -1 or 0") + } + } + } +} + +var smalls = []uint32{2, 3, 5, 7, 11, 13, 17, 19, 23, 29} + +func isPrimorialProduct(t FactorTerms, maxp uint32) bool { + if len(t) == 0 { + return false + } + + pmax := uint32(32) + for i, v := range t { + if v.Prime != smalls[i] || v.Power > pmax || v.Power > maxp { + return false + } + pmax = v.Power + } + return true +} + +func TestPrimorialProductsUint32(t *testing.T) { + r := PrimorialProductsUint32(2*3*5*7*11*13*17*19+1, math.MaxUint32, 1) + if len(r) != 1 { + t.Fatal(len(r)) + } + + if r[0] != 2*3*5*7*11*13*17*19*23 { + t.Fatal(r[0]) + } + + r = PrimorialProductsUint32(0, math.MaxUint32, math.MaxUint32) + if g, e := len(r), 1679; g != e { + t.Fatal(g, e) + } + + m := map[uint32]struct{}{} + for _, v := range r { + if _, ok := m[v]; ok { + t.Fatal(v) + } + + m[v] = struct{}{} + } + + for lo := uint32(0); lo < 5e4; lo += 1e3 { + hi := 1e2 * lo + for max := uint32(0); max <= 32; max++ { + m := map[uint32]struct{}{} + for i, v := range PrimorialProductsUint32(lo, hi, max) { + f := FactorInt(v) + if v < lo || v > hi { + t.Fatal(lo, hi, max, v) + } + + if _, ok := m[v]; ok { + t.Fatal(i, lo, hi, max, v, f) + } + + m[v] = struct{}{} + if !isPrimorialProduct(f, max) { + t.Fatal(i, v) + } + + for _, v := range f { + if v.Power > max { + t.Fatal(i, v, f) + } + } + } + } + } +} + +func BenchmarkPrimorialProductsUint32(b *testing.B) { + for i := 0; i < b.N; i++ { + PrimorialProductsUint32(0, math.MaxUint32, math.MaxUint32) + } +} + +func powerizeUint32BigInt(b uint32, n *big.Int) (e uint32, p *big.Int) { + p = big.NewInt(1) + bb := big.NewInt(int64(b)) + for p.Cmp(n) < 0 { + p.Mul(p, bb) + e++ + } + return +} + +func TestPowerizeUint32BigInt(t *testing.T) { + var data = []struct{ b, n, e, p int }{ + {0, 10, 0, -1}, + {1, 10, 0, -1}, + {2, -1, 0, -1}, + {2, 0, 0, 1}, + {2, 1, 0, 1}, + {2, 2, 1, 2}, + {2, 3, 2, 4}, + {3, 0, 0, 1}, + {3, 1, 0, 1}, + {3, 2, 1, 3}, + {3, 3, 1, 3}, + {3, 4, 2, 9}, + {3, 8, 2, 9}, + {3, 9, 2, 9}, + {3, 10, 3, 27}, + {3, 80, 4, 81}, + } + + var n big.Int + for _, v := range data { + b := v.b + n.SetInt64(int64(v.n)) + e, p := PowerizeUint32BigInt(uint32(b), &n) + if e != uint32(v.e) { + t.Fatal(b, &n, e, p, v.e, v.p) + } + + if v.p < 0 { + if p != nil { + t.Fatal(b, &n, e, p, v.e, v.p) + } + continue + } + + if p.Int64() != int64(v.p) { + t.Fatal(b, &n, e, p, v.e, v.p) + } + } + const N = 1e5 + var nn big.Int + for _, base := range []uint32{2, 3, 15, 17} { + for n := 0; n <= N; n++ { + nn.SetInt64(int64(n)) + ge, gp := PowerizeUint32BigInt(base, &nn) + ee, ep := powerizeUint32BigInt(base, &nn) + if ge != ee || gp.Cmp(ep) != 0 { + t.Fatal(base, n, ge, gp, ee, ep) + } + + gp.Div(gp, big.NewInt(int64(base))) + if gp.Sign() > 0 && gp.Cmp(&nn) >= 0 { + t.Fatal(gp.Sign(), gp.Cmp(&nn)) + } + } + } +} + +func benchmarkPowerizeUint32BigInt(b *testing.B, base uint32, exp int) { + b.StopTimer() + var n big.Int + n.SetBit(&n, exp, 1) + b.StartTimer() + for i := 0; i < b.N; i++ { + PowerizeUint32BigInt(base, &n) + } +} + +func BenchmarkPowerizeUint32BigInt_2_2e1e1(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 2, 1e1) +} + +func BenchmarkPowerizeUint32BigInt_2_2e1e2(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 2, 1e2) +} + +func BenchmarkPowerizeUint32BigInt_2_2e1e3(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 2, 1e3) +} + +func BenchmarkPowerizeUint32BigInt_2_2e1e4(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 2, 1e4) +} + +func BenchmarkPowerizeUint32BigInt_2_2e1e5(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 2, 1e5) +} + +func BenchmarkPowerizeUint32BigInt_2_2e1e6(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 2, 1e6) +} + +func BenchmarkPowerizeUint32BigInt_2_2e1e7(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 2, 1e7) +} + +func BenchmarkPowerizeUint32BigInt_3_2e1e1(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 3, 1e1) +} + +func BenchmarkPowerizeUint32BigInt_3_2e1e2(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 3, 1e2) +} + +func BenchmarkPowerizeUint32BigInt_3_2e1e3(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 3, 1e3) +} + +func BenchmarkPowerizeUint32BigInt_3_2e1e4(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 3, 1e4) +} + +func BenchmarkPowerizeUint32BigInt_3_2e1e5(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 3, 1e5) +} + +func BenchmarkPowerizeUint32BigInt_3_2e1e6(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 3, 1e6) +} + +func BenchmarkPowerizeUint32BigInt_15_2e1e1(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 15, 1e1) +} + +func BenchmarkPowerizeUint32BigInt_15_2e1e2(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 15, 1e2) +} + +func BenchmarkPowerizeUint32BigInt_15_2e1e3(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 15, 1e3) +} + +func BenchmarkPowerizeUint32BigInt_15_2e1e4(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 15, 1e4) +} + +func BenchmarkPowerizeUint32BigInt_15_2e1e5(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 15, 1e5) +} + +func BenchmarkPowerizeUint32BigInt_15_2e1e6(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 15, 1e6) +} + +func BenchmarkPowerizeUint32BigInt_17_2e1e1(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 17, 1e1) +} + +func BenchmarkPowerizeUint32BigInt_17_2e1e2(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 17, 1e2) +} + +func BenchmarkPowerizeUint32BigInt_17_2e1e3(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 17, 1e3) +} + +func BenchmarkPowerizeUint32BigInt_17_2e1e4(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 17, 1e4) +} + +func BenchmarkPowerizeUint32BigInt_17_2e1e5(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 17, 1e5) +} + +func BenchmarkPowerizeUint32BigInt_17_2e1e6(b *testing.B) { + benchmarkPowerizeUint32BigInt(b, 17, 1e6) +} + +func TestPowerizeBigInt(t *testing.T) { + var data = []struct{ b, n, e, p int }{ + {0, 10, 0, -1}, + {1, 10, 0, -1}, + {2, -1, 0, -1}, + {2, 0, 0, 1}, + {2, 1, 0, 1}, + {2, 2, 1, 2}, + {2, 3, 2, 4}, + {3, 0, 0, 1}, + {3, 1, 0, 1}, + {3, 2, 1, 3}, + {3, 3, 1, 3}, + {3, 4, 2, 9}, + {3, 8, 2, 9}, + {3, 9, 2, 9}, + {3, 10, 3, 27}, + {3, 80, 4, 81}, + } + + var b, n big.Int + for _, v := range data { + b.SetInt64(int64(v.b)) + n.SetInt64(int64(v.n)) + e, p := PowerizeBigInt(&b, &n) + if e != uint32(v.e) { + t.Fatal(&b, &n, e, p, v.e, v.p) + } + + if v.p < 0 { + if p != nil { + t.Fatal(&b, &n, e, p, v.e, v.p) + } + continue + } + + if p.Int64() != int64(v.p) { + t.Fatal(&b, &n, e, p, v.e, v.p) + } + } + const N = 1e5 + var nn big.Int + for _, base := range []uint32{2, 3, 15, 17} { + b.SetInt64(int64(base)) + for n := 0; n <= N; n++ { + nn.SetInt64(int64(n)) + ge, gp := PowerizeBigInt(&b, &nn) + ee, ep := powerizeUint32BigInt(base, &nn) + if ge != ee || gp.Cmp(ep) != 0 { + t.Fatal(base, n, ge, gp, ee, ep) + } + + gp.Div(gp, &b) + if gp.Sign() > 0 && gp.Cmp(&nn) >= 0 { + t.Fatal(gp.Sign(), gp.Cmp(&nn)) + } + } + } +} + +func benchmarkPowerizeBigInt(b *testing.B, base uint32, exp int) { + b.StopTimer() + var bb, n big.Int + n.SetBit(&n, exp, 1) + bb.SetInt64(int64(base)) + b.StartTimer() + for i := 0; i < b.N; i++ { + PowerizeBigInt(&bb, &n) + } +} + +func BenchmarkPowerizeBigInt_2_2e1e1(b *testing.B) { + benchmarkPowerizeBigInt(b, 2, 1e1) +} + +func BenchmarkPowerizeBigInt_2_2e1e2(b *testing.B) { + benchmarkPowerizeBigInt(b, 2, 1e2) +} + +func BenchmarkPowerizeBigInt_2_2e1e3(b *testing.B) { + benchmarkPowerizeBigInt(b, 2, 1e3) +} + +func BenchmarkPowerizeBigInt_2_2e1e4(b *testing.B) { + benchmarkPowerizeBigInt(b, 2, 1e4) +} + +func BenchmarkPowerizeBigInt_2_2e1e5(b *testing.B) { + benchmarkPowerizeBigInt(b, 2, 1e5) +} + +func BenchmarkPowerizeBigInt_2_2e1e6(b *testing.B) { + benchmarkPowerizeBigInt(b, 2, 1e6) +} + +func BenchmarkPowerizeBigInt_2_2e1e7(b *testing.B) { + benchmarkPowerizeBigInt(b, 2, 1e7) +} + +func BenchmarkPowerizeBigInt_3_2e1e1(b *testing.B) { + benchmarkPowerizeBigInt(b, 3, 1e1) +} + +func BenchmarkPowerizeBigInt_3_2e1e2(b *testing.B) { + benchmarkPowerizeBigInt(b, 3, 1e2) +} + +func BenchmarkPowerizeBigInt_3_2e1e3(b *testing.B) { + benchmarkPowerizeBigInt(b, 3, 1e3) +} + +func BenchmarkPowerizeBigInt_3_2e1e4(b *testing.B) { + benchmarkPowerizeBigInt(b, 3, 1e4) +} + +func BenchmarkPowerizeBigInt_3_2e1e5(b *testing.B) { + benchmarkPowerizeBigInt(b, 3, 1e5) +} + +func BenchmarkPowerizeBigInt_3_2e1e6(b *testing.B) { + benchmarkPowerizeBigInt(b, 3, 1e6) +} + +func BenchmarkPowerizeBigInt_15_2e1e1(b *testing.B) { + benchmarkPowerizeBigInt(b, 15, 1e1) +} + +func BenchmarkPowerizeBigInt_15_2e1e2(b *testing.B) { + benchmarkPowerizeBigInt(b, 15, 1e2) +} + +func BenchmarkPowerizeBigInt_15_2e1e3(b *testing.B) { + benchmarkPowerizeBigInt(b, 15, 1e3) +} + +func BenchmarkPowerizeBigInt_15_2e1e4(b *testing.B) { + benchmarkPowerizeBigInt(b, 15, 1e4) +} + +func BenchmarkPowerizeBigInt_15_2e1e5(b *testing.B) { + benchmarkPowerizeBigInt(b, 15, 1e5) +} + +func BenchmarkPowerizeBigInt_15_2e1e6(b *testing.B) { + benchmarkPowerizeBigInt(b, 15, 1e6) +} + +func BenchmarkPowerizeBigInt_17_2e1e1(b *testing.B) { + benchmarkPowerizeBigInt(b, 17, 1e1) +} + +func BenchmarkPowerizeBigInt_17_2e1e2(b *testing.B) { + benchmarkPowerizeBigInt(b, 17, 1e2) +} + +func BenchmarkPowerizeBigInt_17_2e1e3(b *testing.B) { + benchmarkPowerizeBigInt(b, 17, 1e3) +} + +func BenchmarkPowerizeBigInt_17_2e1e4(b *testing.B) { + benchmarkPowerizeBigInt(b, 17, 1e4) +} + +func BenchmarkPowerizeBigInt_17_2e1e5(b *testing.B) { + benchmarkPowerizeBigInt(b, 17, 1e5) +} + +func BenchmarkPowerizeBigInt_17_2e1e6(b *testing.B) { + benchmarkPowerizeBigInt(b, 17, 1e6) +} + +func TestEnvelope(t *testing.T) { + const prec = 1e-3 + type check struct { + approx Approximation + x, y float64 + } + data := []struct { + points []float64 + checks []check + }{ + { + []float64{0, 1}, + []check{ + {Linear, 0, 0}, + {Linear, 0.25, 0.25}, + {Linear, 0.5, 0.5}, + {Linear, 0.75, 0.75}, + {Linear, 0.9999, 1}, + }, + }, + { + []float64{-1, 0}, + []check{ + {Linear, 0, -1}, + {Linear, 0.25, -0.75}, + {Linear, 0.5, -0.5}, + {Linear, 0.75, -0.25}, + {Linear, 0.9999, 0}, + }, + }, + { + []float64{-1, 1}, + []check{ + {Linear, 0, -1}, + {Linear, 0.25, -0.5}, + {Linear, 0.5, 0}, + {Linear, 0.75, 0.5}, + {Linear, 0.9999, 1}, + }, + }, + { + []float64{-1, 1, -2}, + []check{ + {Linear, 0, -1}, + {Linear, 0.25, 0}, + {Linear, 0.5, 1}, + {Linear, 0.75, -0.5}, + {Linear, 0.9, -1.4}, + {Linear, 0.9999, -2}, + }, + }, + { + []float64{-1, 1}, + []check{ + {Sinusoidal, 0, -1}, + {Sinusoidal, 0.25, -math.Sqrt2 / 2}, + {Sinusoidal, 0.5, 0}, + {Sinusoidal, 0.75, math.Sqrt2 / 2}, + {Sinusoidal, 0.9999, 1}, + }, + }, + { + []float64{-1, 1, -2}, + []check{ + {Sinusoidal, 0, -1}, + {Sinusoidal, 1. / 8, -math.Sqrt2 / 2}, + {Sinusoidal, 2. / 8, 0}, + {Sinusoidal, 3. / 8, math.Sqrt2 / 2}, + {Sinusoidal, 4. / 8, 1}, + {Sinusoidal, 5. / 8, (3*math.Sqrt2 - 2) / 4}, + {Sinusoidal, 6. / 8, -0.5}, + {Sinusoidal, 7. / 8, (-3*math.Sqrt2 - 2) / 4}, + {Sinusoidal, 0.9999, -2}, + }, + }, + } + for i, suite := range data { + for j, test := range suite.checks { + e, g := test.y, Envelope(test.x, suite.points, test.approx) + d := math.Abs(e - g) + if d > prec { + t.Errorf( + "i %d, j %d, x %v, e %v, g %v, d %v, prec %v", + i, j, test.x, e, g, d, prec, + ) + } + } + } +} + +func TestMaxInt(t *testing.T) { + n := int64(MaxInt) + if n != math.MaxInt32 && n != math.MaxInt64 { + t.Error(n) + } + + t.Logf("64 bit ints: %t, MaxInt: %d", n == math.MaxInt64, n) +} + +func TestMinInt(t *testing.T) { + n := int64(MinInt) + if n != math.MinInt32 && n != math.MinInt64 { + t.Error(n) + } + + t.Logf("64 bit ints: %t. MinInt: %d", n == math.MinInt64, n) +} + +func TestMaxUint(t *testing.T) { + n := uint64(MaxUint) + if n != math.MaxUint32 && n != math.MaxUint64 { + t.Error(n) + } + + t.Logf("64 bit uints: %t. MaxUint: %d", n == math.MaxUint64, n) +} + +func TestMax(t *testing.T) { + tests := []struct{ a, b, e int }{ + {MinInt, MinIntM1, MaxInt}, + {MinIntM1, MinInt, MaxInt}, + {MinIntM1, MinIntM1, MaxInt}, + + {MinInt, MinInt, MinInt}, + {MinInt + 1, MinInt, MinInt + 1}, + {MinInt, MinInt + 1, MinInt + 1}, + + {-1, -1, -1}, + {-1, 0, 0}, + {-1, 1, 1}, + + {0, -1, 0}, + {0, 0, 0}, + {0, 1, 1}, + + {1, -1, 1}, + {1, 0, 1}, + {1, 1, 1}, + + {MaxInt, MaxInt, MaxInt}, + {MaxInt - 1, MaxInt, MaxInt}, + {MaxInt, MaxInt - 1, MaxInt}, + + {MaxIntP1, MaxInt, MaxInt}, + {MaxInt, MaxIntP1, MaxInt}, + {MaxIntP1, MaxIntP1, MinInt}, + } + + for _, test := range tests { + if g, e := Max(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestMin(t *testing.T) { + tests := []struct{ a, b, e int }{ + {MinIntM1, MinInt, MinInt}, + {MinInt, MinIntM1, MinInt}, + {MinIntM1, MinIntM1, MaxInt}, + + {MinInt, MinInt, MinInt}, + {MinInt + 1, MinInt, MinInt}, + {MinInt, MinInt + 1, MinInt}, + + {-1, -1, -1}, + {-1, 0, -1}, + {-1, 1, -1}, + + {0, -1, -1}, + {0, 0, 0}, + {0, 1, 0}, + + {1, -1, -1}, + {1, 0, 0}, + {1, 1, 1}, + + {MaxInt, MaxInt, MaxInt}, + {MaxInt - 1, MaxInt, MaxInt - 1}, + {MaxInt, MaxInt - 1, MaxInt - 1}, + + {MaxIntP1, MaxInt, MinInt}, + {MaxInt, MaxIntP1, MinInt}, + {MaxIntP1, MaxIntP1, MinInt}, + } + + for _, test := range tests { + if g, e := Min(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestUMax(t *testing.T) { + tests := []struct{ a, b, e uint }{ + {0, 0, 0}, + {0, 1, 1}, + {1, 0, 1}, + + {10, 10, 10}, + {10, 11, 11}, + {11, 10, 11}, + {11, 11, 11}, + + {MaxUint, MaxUint, MaxUint}, + {MaxUint, MaxUint - 1, MaxUint}, + {MaxUint - 1, MaxUint, MaxUint}, + {MaxUint - 1, MaxUint - 1, MaxUint - 1}, + + {MaxUint, MaxUintP1, MaxUint}, + {MaxUintP1, MaxUint, MaxUint}, + {MaxUintP1, MaxUintP1, 0}, + } + + for _, test := range tests { + if g, e := UMax(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestUMin(t *testing.T) { + tests := []struct{ a, b, e uint }{ + {0, 0, 0}, + {0, 1, 0}, + {1, 0, 0}, + + {10, 10, 10}, + {10, 11, 10}, + {11, 10, 10}, + {11, 11, 11}, + + {MaxUint, MaxUint, MaxUint}, + {MaxUint, MaxUint - 1, MaxUint - 1}, + {MaxUint - 1, MaxUint, MaxUint - 1}, + {MaxUint - 1, MaxUint - 1, MaxUint - 1}, + + {MaxUint, MaxUintP1, 0}, + {MaxUintP1, MaxUint, 0}, + {MaxUintP1, MaxUintP1, 0}, + } + + for _, test := range tests { + if g, e := UMin(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestMaxByte(t *testing.T) { + tests := []struct{ a, b, e byte }{ + {0, 0, 0}, + {0, 1, 1}, + {1, 0, 1}, + + {10, 10, 10}, + {10, 11, 11}, + {11, 10, 11}, + {11, 11, 11}, + + {math.MaxUint8, math.MaxUint8, math.MaxUint8}, + {math.MaxUint8, math.MaxUint8 - 1, math.MaxUint8}, + {math.MaxUint8 - 1, math.MaxUint8, math.MaxUint8}, + {math.MaxUint8 - 1, math.MaxUint8 - 1, math.MaxUint8 - 1}, + } + + for _, test := range tests { + if g, e := MaxByte(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestMinByte(t *testing.T) { + tests := []struct{ a, b, e byte }{ + {0, 0, 0}, + {0, 1, 0}, + {1, 0, 0}, + + {10, 10, 10}, + {10, 11, 10}, + {11, 10, 10}, + {11, 11, 11}, + + {math.MaxUint8, math.MaxUint8, math.MaxUint8}, + {math.MaxUint8, math.MaxUint8 - 1, math.MaxUint8 - 1}, + {math.MaxUint8 - 1, math.MaxUint8, math.MaxUint8 - 1}, + {math.MaxUint8 - 1, math.MaxUint8 - 1, math.MaxUint8 - 1}, + } + + for _, test := range tests { + if g, e := MinByte(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestMaxUint16(t *testing.T) { + tests := []struct{ a, b, e uint16 }{ + {0, 0, 0}, + {0, 1, 1}, + {1, 0, 1}, + + {10, 10, 10}, + {10, 11, 11}, + {11, 10, 11}, + {11, 11, 11}, + + {math.MaxUint16, math.MaxUint16, math.MaxUint16}, + {math.MaxUint16, math.MaxUint16 - 1, math.MaxUint16}, + {math.MaxUint16 - 1, math.MaxUint16, math.MaxUint16}, + {math.MaxUint16 - 1, math.MaxUint16 - 1, math.MaxUint16 - 1}, + } + + for _, test := range tests { + if g, e := MaxUint16(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestMinUint16(t *testing.T) { + tests := []struct{ a, b, e uint16 }{ + {0, 0, 0}, + {0, 1, 0}, + {1, 0, 0}, + + {10, 10, 10}, + {10, 11, 10}, + {11, 10, 10}, + {11, 11, 11}, + + {math.MaxUint16, math.MaxUint16, math.MaxUint16}, + {math.MaxUint16, math.MaxUint16 - 1, math.MaxUint16 - 1}, + {math.MaxUint16 - 1, math.MaxUint16, math.MaxUint16 - 1}, + {math.MaxUint16 - 1, math.MaxUint16 - 1, math.MaxUint16 - 1}, + } + + for _, test := range tests { + if g, e := MinUint16(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestMaxUint32(t *testing.T) { + tests := []struct{ a, b, e uint32 }{ + {0, 0, 0}, + {0, 1, 1}, + {1, 0, 1}, + + {10, 10, 10}, + {10, 11, 11}, + {11, 10, 11}, + {11, 11, 11}, + + {math.MaxUint32, math.MaxUint32, math.MaxUint32}, + {math.MaxUint32, math.MaxUint32 - 1, math.MaxUint32}, + {math.MaxUint32 - 1, math.MaxUint32, math.MaxUint32}, + {math.MaxUint32 - 1, math.MaxUint32 - 1, math.MaxUint32 - 1}, + } + + for _, test := range tests { + if g, e := MaxUint32(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestMinUint32(t *testing.T) { + tests := []struct{ a, b, e uint32 }{ + {0, 0, 0}, + {0, 1, 0}, + {1, 0, 0}, + + {10, 10, 10}, + {10, 11, 10}, + {11, 10, 10}, + {11, 11, 11}, + + {math.MaxUint32, math.MaxUint32, math.MaxUint32}, + {math.MaxUint32, math.MaxUint32 - 1, math.MaxUint32 - 1}, + {math.MaxUint32 - 1, math.MaxUint32, math.MaxUint32 - 1}, + {math.MaxUint32 - 1, math.MaxUint32 - 1, math.MaxUint32 - 1}, + } + + for _, test := range tests { + if g, e := MinUint32(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestMaxUint64(t *testing.T) { + tests := []struct{ a, b, e uint64 }{ + {0, 0, 0}, + {0, 1, 1}, + {1, 0, 1}, + + {10, 10, 10}, + {10, 11, 11}, + {11, 10, 11}, + {11, 11, 11}, + + {math.MaxUint64, math.MaxUint64, math.MaxUint64}, + {math.MaxUint64, math.MaxUint64 - 1, math.MaxUint64}, + {math.MaxUint64 - 1, math.MaxUint64, math.MaxUint64}, + {math.MaxUint64 - 1, math.MaxUint64 - 1, math.MaxUint64 - 1}, + } + + for _, test := range tests { + if g, e := MaxUint64(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestMinUint64(t *testing.T) { + tests := []struct{ a, b, e uint64 }{ + {0, 0, 0}, + {0, 1, 0}, + {1, 0, 0}, + + {10, 10, 10}, + {10, 11, 10}, + {11, 10, 10}, + {11, 11, 11}, + + {math.MaxUint64, math.MaxUint64, math.MaxUint64}, + {math.MaxUint64, math.MaxUint64 - 1, math.MaxUint64 - 1}, + {math.MaxUint64 - 1, math.MaxUint64, math.MaxUint64 - 1}, + {math.MaxUint64 - 1, math.MaxUint64 - 1, math.MaxUint64 - 1}, + } + + for _, test := range tests { + if g, e := MinUint64(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestMaxInt8(t *testing.T) { + tests := []struct{ a, b, e int8 }{ + {math.MinInt8, math.MinInt8, math.MinInt8}, + {math.MinInt8 + 1, math.MinInt8, math.MinInt8 + 1}, + {math.MinInt8, math.MinInt8 + 1, math.MinInt8 + 1}, + + {-1, -1, -1}, + {-1, 0, 0}, + {-1, 1, 1}, + + {0, -1, 0}, + {0, 0, 0}, + {0, 1, 1}, + + {1, -1, 1}, + {1, 0, 1}, + {1, 1, 1}, + + {math.MaxInt8, math.MaxInt8, math.MaxInt8}, + {math.MaxInt8 - 1, math.MaxInt8, math.MaxInt8}, + {math.MaxInt8, math.MaxInt8 - 1, math.MaxInt8}, + } + + for _, test := range tests { + if g, e := MaxInt8(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestMinInt8(t *testing.T) { + tests := []struct{ a, b, e int8 }{ + {math.MinInt8, math.MinInt8, math.MinInt8}, + {math.MinInt8 + 1, math.MinInt8, math.MinInt8}, + {math.MinInt8, math.MinInt8 + 1, math.MinInt8}, + + {-1, -1, -1}, + {-1, 0, -1}, + {-1, 1, -1}, + + {0, -1, -1}, + {0, 0, 0}, + {0, 1, 0}, + + {1, -1, -1}, + {1, 0, 0}, + {1, 1, 1}, + + {math.MaxInt8, math.MaxInt8, math.MaxInt8}, + {math.MaxInt8 - 1, math.MaxInt8, math.MaxInt8 - 1}, + {math.MaxInt8, math.MaxInt8 - 1, math.MaxInt8 - 1}, + } + + for _, test := range tests { + if g, e := MinInt8(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestMaxInt16(t *testing.T) { + tests := []struct{ a, b, e int16 }{ + {math.MinInt16, math.MinInt16, math.MinInt16}, + {math.MinInt16 + 1, math.MinInt16, math.MinInt16 + 1}, + {math.MinInt16, math.MinInt16 + 1, math.MinInt16 + 1}, + + {-1, -1, -1}, + {-1, 0, 0}, + {-1, 1, 1}, + + {0, -1, 0}, + {0, 0, 0}, + {0, 1, 1}, + + {1, -1, 1}, + {1, 0, 1}, + {1, 1, 1}, + + {math.MaxInt16, math.MaxInt16, math.MaxInt16}, + {math.MaxInt16 - 1, math.MaxInt16, math.MaxInt16}, + {math.MaxInt16, math.MaxInt16 - 1, math.MaxInt16}, + } + + for _, test := range tests { + if g, e := MaxInt16(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestMinInt16(t *testing.T) { + tests := []struct{ a, b, e int16 }{ + {math.MinInt16, math.MinInt16, math.MinInt16}, + {math.MinInt16 + 1, math.MinInt16, math.MinInt16}, + {math.MinInt16, math.MinInt16 + 1, math.MinInt16}, + + {-1, -1, -1}, + {-1, 0, -1}, + {-1, 1, -1}, + + {0, -1, -1}, + {0, 0, 0}, + {0, 1, 0}, + + {1, -1, -1}, + {1, 0, 0}, + {1, 1, 1}, + + {math.MaxInt16, math.MaxInt16, math.MaxInt16}, + {math.MaxInt16 - 1, math.MaxInt16, math.MaxInt16 - 1}, + {math.MaxInt16, math.MaxInt16 - 1, math.MaxInt16 - 1}, + } + + for _, test := range tests { + if g, e := MinInt16(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestMaxInt32(t *testing.T) { + tests := []struct{ a, b, e int32 }{ + {math.MinInt32, math.MinInt32, math.MinInt32}, + {math.MinInt32 + 1, math.MinInt32, math.MinInt32 + 1}, + {math.MinInt32, math.MinInt32 + 1, math.MinInt32 + 1}, + + {-1, -1, -1}, + {-1, 0, 0}, + {-1, 1, 1}, + + {0, -1, 0}, + {0, 0, 0}, + {0, 1, 1}, + + {1, -1, 1}, + {1, 0, 1}, + {1, 1, 1}, + + {math.MaxInt32, math.MaxInt32, math.MaxInt32}, + {math.MaxInt32 - 1, math.MaxInt32, math.MaxInt32}, + {math.MaxInt32, math.MaxInt32 - 1, math.MaxInt32}, + } + + for _, test := range tests { + if g, e := MaxInt32(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestMinInt32(t *testing.T) { + tests := []struct{ a, b, e int32 }{ + {math.MinInt32, math.MinInt32, math.MinInt32}, + {math.MinInt32 + 1, math.MinInt32, math.MinInt32}, + {math.MinInt32, math.MinInt32 + 1, math.MinInt32}, + + {-1, -1, -1}, + {-1, 0, -1}, + {-1, 1, -1}, + + {0, -1, -1}, + {0, 0, 0}, + {0, 1, 0}, + + {1, -1, -1}, + {1, 0, 0}, + {1, 1, 1}, + + {math.MaxInt32, math.MaxInt32, math.MaxInt32}, + {math.MaxInt32 - 1, math.MaxInt32, math.MaxInt32 - 1}, + {math.MaxInt32, math.MaxInt32 - 1, math.MaxInt32 - 1}, + } + + for _, test := range tests { + if g, e := MinInt32(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestMaxInt64(t *testing.T) { + tests := []struct{ a, b, e int64 }{ + {math.MinInt64, math.MinInt64, math.MinInt64}, + {math.MinInt64 + 1, math.MinInt64, math.MinInt64 + 1}, + {math.MinInt64, math.MinInt64 + 1, math.MinInt64 + 1}, + + {-1, -1, -1}, + {-1, 0, 0}, + {-1, 1, 1}, + + {0, -1, 0}, + {0, 0, 0}, + {0, 1, 1}, + + {1, -1, 1}, + {1, 0, 1}, + {1, 1, 1}, + + {math.MaxInt64, math.MaxInt64, math.MaxInt64}, + {math.MaxInt64 - 1, math.MaxInt64, math.MaxInt64}, + {math.MaxInt64, math.MaxInt64 - 1, math.MaxInt64}, + } + + for _, test := range tests { + if g, e := MaxInt64(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestMinInt64(t *testing.T) { + tests := []struct{ a, b, e int64 }{ + {math.MinInt64, math.MinInt64, math.MinInt64}, + {math.MinInt64 + 1, math.MinInt64, math.MinInt64}, + {math.MinInt64, math.MinInt64 + 1, math.MinInt64}, + + {-1, -1, -1}, + {-1, 0, -1}, + {-1, 1, -1}, + + {0, -1, -1}, + {0, 0, 0}, + {0, 1, 0}, + + {1, -1, -1}, + {1, 0, 0}, + {1, 1, 1}, + + {math.MaxInt64, math.MaxInt64, math.MaxInt64}, + {math.MaxInt64 - 1, math.MaxInt64, math.MaxInt64 - 1}, + {math.MaxInt64, math.MaxInt64 - 1, math.MaxInt64 - 1}, + } + + for _, test := range tests { + if g, e := MinInt64(test.a, test.b), test.e; g != e { + t.Fatal(test.a, test.b, g, e) + } + } +} + +func TestPopCountBigInt(t *testing.T) { + const N = 1e4 + rng := rand.New(rand.NewSource(42)) + lim := big.NewInt(0) + lim.SetBit(lim, 1e3, 1) + z := big.NewInt(0) + m1 := big.NewInt(-1) + for i := 0; i < N; i++ { + z.Rand(rng, lim) + g := PopCountBigInt(z) + e := 0 + for bit := 0; bit < z.BitLen(); bit++ { + if z.Bit(bit) != 0 { + e++ + } + } + if g != e { + t.Fatal(g, e) + } + + z.Mul(z, m1) + if g := PopCountBigInt(z); g != e { + t.Fatal(g, e) + } + } +} + +func benchmarkPopCountBigInt(b *testing.B, bits int) { + lim := big.NewInt(0) + lim.SetBit(lim, bits, 1) + z := big.NewInt(0) + z.Rand(rand.New(rand.NewSource(42)), lim) + b.ResetTimer() + for i := 0; i < b.N; i++ { + PopCountBigInt(z) + } +} + +func BenchmarkPopCountBigInt1e1(b *testing.B) { + benchmarkPopCountBigInt(b, 1e1) +} + +func BenchmarkPopCountBigInt1e2(b *testing.B) { + benchmarkPopCountBigInt(b, 1e2) +} + +func BenchmarkPopCountBigInt1e3(b *testing.B) { + benchmarkPopCountBigInt(b, 1e3) +} + +func BenchmarkPopCountBigIbnt1e4(b *testing.B) { + benchmarkPopCountBigInt(b, 1e4) +} + +func BenchmarkPopCountBigInt1e5(b *testing.B) { + benchmarkPopCountBigInt(b, 1e5) +} + +func BenchmarkPopCountBigInt1e6(b *testing.B) { + benchmarkPopCountBigInt(b, 1e6) +} + +func TestToBase(t *testing.T) { + x := ToBase(big.NewInt(0), 42) + e := []int{0} + if g, e := len(x), len(e); g != e { + t.Fatal(g, e) + } + + for i, g := range x { + if e := e[i]; g != e { + t.Fatal(i, g, e) + } + } + + x = ToBase(big.NewInt(2047), 22) + e = []int{1, 5, 4} + if g, e := len(x), len(e); g != e { + t.Fatal(g, e) + } + + for i, g := range x { + if e := e[i]; g != e { + t.Fatal(i, g, e) + } + } + + x = ToBase(big.NewInt(-2047), 22) + e = []int{-1, -5, -4} + if g, e := len(x), len(e); g != e { + t.Fatal(g, e) + } + + for i, g := range x { + if e := e[i]; g != e { + t.Fatal(i, g, e) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/bits.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/bits.go new file mode 100644 index 00000000..6eaa4e30 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/bits.go @@ -0,0 +1,207 @@ +// Copyright (c) 2014 The mathutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mathutil + +import ( + "math/big" +) + +// BitLenByte returns the bit width of the non zero part of n. +func BitLenByte(n byte) int { + return log2[n] + 1 +} + +// BitLenUint16 returns the bit width of the non zero part of n. +func BitLenUint16(n uint16) int { + if b := n >> 8; b != 0 { + return log2[b] + 8 + 1 + } + + return log2[n] + 1 +} + +// BitLenUint32 returns the bit width of the non zero part of n. +func BitLenUint32(n uint32) int { + if b := n >> 24; b != 0 { + return log2[b] + 24 + 1 + } + + if b := n >> 16; b != 0 { + return log2[b] + 16 + 1 + } + + if b := n >> 8; b != 0 { + return log2[b] + 8 + 1 + } + + return log2[n] + 1 +} + +// BitLen returns the bit width of the non zero part of n. +func BitLen(n int) int { // Should handle correctly [future] 64 bit Go ints + if IntBits == 64 { + return BitLenUint64(uint64(n)) + } + + if b := byte(n >> 24); b != 0 { + return log2[b] + 24 + 1 + } + + if b := byte(n >> 16); b != 0 { + return log2[b] + 16 + 1 + } + + if b := byte(n >> 8); b != 0 { + return log2[b] + 8 + 1 + } + + return log2[byte(n)] + 1 +} + +// BitLenUint returns the bit width of the non zero part of n. +func BitLenUint(n uint) int { // Should handle correctly [future] 64 bit Go uints + if IntBits == 64 { + return BitLenUint64(uint64(n)) + } + + if b := n >> 24; b != 0 { + return log2[b] + 24 + 1 + } + + if b := n >> 16; b != 0 { + return log2[b] + 16 + 1 + } + + if b := n >> 8; b != 0 { + return log2[b] + 8 + 1 + } + + return log2[n] + 1 +} + +// BitLenUint64 returns the bit width of the non zero part of n. +func BitLenUint64(n uint64) int { + if b := n >> 56; b != 0 { + return log2[b] + 56 + 1 + } + + if b := n >> 48; b != 0 { + return log2[b] + 48 + 1 + } + + if b := n >> 40; b != 0 { + return log2[b] + 40 + 1 + } + + if b := n >> 32; b != 0 { + return log2[b] + 32 + 1 + } + + if b := n >> 24; b != 0 { + return log2[b] + 24 + 1 + } + + if b := n >> 16; b != 0 { + return log2[b] + 16 + 1 + } + + if b := n >> 8; b != 0 { + return log2[b] + 8 + 1 + } + + return log2[n] + 1 +} + +// BitLenUintptr returns the bit width of the non zero part of n. +func BitLenUintptr(n uintptr) int { + if b := n >> 56; b != 0 { + return log2[b] + 56 + 1 + } + + if b := n >> 48; b != 0 { + return log2[b] + 48 + 1 + } + + if b := n >> 40; b != 0 { + return log2[b] + 40 + 1 + } + + if b := n >> 32; b != 0 { + return log2[b] + 32 + 1 + } + + if b := n >> 24; b != 0 { + return log2[b] + 24 + 1 + } + + if b := n >> 16; b != 0 { + return log2[b] + 16 + 1 + } + + if b := n >> 8; b != 0 { + return log2[b] + 8 + 1 + } + + return log2[n] + 1 +} + +// PopCountByte returns population count of n (number of bits set in n). +func PopCountByte(n byte) int { + return int(popcnt[byte(n)]) +} + +// PopCountUint16 returns population count of n (number of bits set in n). +func PopCountUint16(n uint16) int { + return int(popcnt[byte(n>>8)]) + int(popcnt[byte(n)]) +} + +// PopCountUint32 returns population count of n (number of bits set in n). +func PopCountUint32(n uint32) int { + return int(popcnt[byte(n>>24)]) + int(popcnt[byte(n>>16)]) + + int(popcnt[byte(n>>8)]) + int(popcnt[byte(n)]) +} + +// PopCount returns population count of n (number of bits set in n). +func PopCount(n int) int { // Should handle correctly [future] 64 bit Go ints + if IntBits == 64 { + return PopCountUint64(uint64(n)) + } + + return PopCountUint32(uint32(n)) +} + +// PopCountUint returns population count of n (number of bits set in n). +func PopCountUint(n uint) int { // Should handle correctly [future] 64 bit Go uints + if IntBits == 64 { + return PopCountUint64(uint64(n)) + } + + return PopCountUint32(uint32(n)) +} + +// PopCountUintptr returns population count of n (number of bits set in n). +func PopCountUintptr(n uintptr) int { + if UintPtrBits == 64 { + return PopCountUint64(uint64(n)) + } + + return PopCountUint32(uint32(n)) +} + +// PopCountUint64 returns population count of n (number of bits set in n). +func PopCountUint64(n uint64) int { + return int(popcnt[byte(n>>56)]) + int(popcnt[byte(n>>48)]) + + int(popcnt[byte(n>>40)]) + int(popcnt[byte(n>>32)]) + + int(popcnt[byte(n>>24)]) + int(popcnt[byte(n>>16)]) + + int(popcnt[byte(n>>8)]) + int(popcnt[byte(n)]) +} + +// PopCountBigInt returns population count of |n| (number of bits set in |n|). +func PopCountBigInt(n *big.Int) (r int) { + for _, v := range n.Bits() { + r += PopCountUintptr(uintptr(v)) + } + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/envelope.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/envelope.go new file mode 100644 index 00000000..ff8e6012 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/envelope.go @@ -0,0 +1,46 @@ +// Copyright (c) 2014 The mathutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mathutil + +import ( + "math" +) + +// Approximation type determines approximation methods used by e.g. Envelope. +type Approximation int + +// Specific approximation method tags +const ( + _ Approximation = iota + Linear // As named + Sinusoidal // Smooth for all derivations +) + +// Envelope is an utility for defining simple curves using a small (usually) +// set of data points. Envelope returns a value defined by x, points and +// approximation. The value of x must be in [0,1) otherwise the result is +// undefined or the function may panic. Points are interpreted as dividing the +// [0,1) interval in len(points)-1 sections, so len(points) must be > 1 or the +// function may panic. According to the left and right points closing/adjacent +// to the section the resulting value is interpolated using the chosen +// approximation method. Unsupported values of approximation are silently +// interpreted as 'Linear'. +func Envelope(x float64, points []float64, approximation Approximation) float64 { + step := 1 / float64(len(points)-1) + fslot := math.Floor(x / step) + mod := x - fslot*step + slot := int(fslot) + l, r := points[slot], points[slot+1] + rmod := mod / step + switch approximation { + case Sinusoidal: + k := (math.Sin(math.Pi*(rmod-0.5)) + 1) / 2 + return l + (r-l)*k + case Linear: + fallthrough + default: + return l + (r-l)*rmod + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/ff/main.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/ff/main.go new file mode 100644 index 00000000..71a3cf9a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/ff/main.go @@ -0,0 +1,83 @@ +// Copyright (c) jnml. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// Factor Finder - searches for Mersenne number factors of one specific special +// form. +package main + +import ( + "flag" + "fmt" + "math/big" + "runtime" + "time" + + "camlistore.org/third_party/github.com/cznic/mathutil" +) + +const ( + pp = 1 + pp2 = 10 +) + +var ( + _1 = big.NewInt(1) + _2 = big.NewInt(2) +) + +func main() { + runtime.GOMAXPROCS(2) + oClass := flag.Uint64("c", 2, `factor "class" number`) + oDuration := flag.Duration("d", time.Second, "duration to spend on one class") + flag.Parse() + class := *oClass + for class&1 != 0 { + class >>= 1 + } + class = mathutil.MaxUint64(class, 2) + + for { + c := time.After(*oDuration) + factor := big.NewInt(0) + factor.SetUint64(class) + exp := big.NewInt(0) + oneClass: + for { + select { + case <-c: + break oneClass + default: + } + + exp.Set(factor) + factor.Lsh(factor, 1) + factor.Add(factor, _1) + if !factor.ProbablyPrime(pp) { + continue + } + + if !exp.ProbablyPrime(pp) { + continue + } + + if mathutil.ModPowBigInt(_2, exp, factor).Cmp(_1) != 0 { + continue + } + + if !factor.ProbablyPrime(pp2) { + continue + } + + if !exp.ProbablyPrime(pp2) { + continue + } + + fmt.Printf("%d: %s | M%s (%d bits)\n", class, factor, exp, factor.BitLen()) + } + + class += 2 + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/mathutil.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/mathutil.go new file mode 100644 index 00000000..e8f3f562 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/mathutil.go @@ -0,0 +1,829 @@ +// Copyright (c) 2014 The mathutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mathutil provides utilities supplementing the standard 'math' and +// 'math/rand' packages. +// +// Compatibility issues +// +// 2013-12-13: The following functions have been REMOVED +// +// func Uint64ToBigInt(n uint64) *big.Int +// func Uint64FromBigInt(n *big.Int) (uint64, bool) +// +// 2013-05-13: The following functions are now DEPRECATED +// +// func Uint64ToBigInt(n uint64) *big.Int +// func Uint64FromBigInt(n *big.Int) (uint64, bool) +// +// These functions will be REMOVED with Go release 1.1+1. +// +// 2013-01-21: The following functions have been REMOVED +// +// func MaxInt() int +// func MinInt() int +// func MaxUint() uint +// func UintPtrBits() int +// +// They are now replaced by untyped constants +// +// MaxInt +// MinInt +// MaxUint +// UintPtrBits +// +// Additionally one more untyped constant was added +// +// IntBits +// +// This change breaks any existing code depending on the above removed +// functions. They should have not been published in the first place, that was +// unfortunate. Instead, defining such architecture and/or implementation +// specific integer limits and bit widths as untyped constants improves +// performance and allows for static dead code elimination if it depends on +// these values. Thanks to minux for pointing it out in the mail list +// (https://groups.google.com/d/msg/golang-nuts/tlPpLW6aJw8/NT3mpToH-a4J). +// +// 2012-12-12: The following functions will be DEPRECATED with Go release +// 1.0.3+1 and REMOVED with Go release 1.0.3+2, b/c of +// http://code.google.com/p/go/source/detail?r=954a79ee3ea8 +// +// func Uint64ToBigInt(n uint64) *big.Int +// func Uint64FromBigInt(n *big.Int) (uint64, bool) +package mathutil + +import ( + "math" + "math/big" +) + +// Architecture and/or implementation specific integer limits and bit widths. +const ( + MaxInt = 1<<(IntBits-1) - 1 + MinInt = -MaxInt - 1 + MaxUint = 1<>32&1 + ^uint(0)>>16&1 + ^uint(0)>>8&1 + 3) + UintPtrBits = 1 << (^uintptr(0)>>32&1 + ^uintptr(0)>>16&1 + ^uintptr(0)>>8&1 + 3) +) + +var ( + _1 = big.NewInt(1) + _2 = big.NewInt(2) +) + +// GCDByte returns the greatest common divisor of a and b. Based on: +// http://en.wikipedia.org/wiki/Euclidean_algorithm#Implementations +func GCDByte(a, b byte) byte { + for b != 0 { + a, b = b, a%b + } + return a +} + +// GCDUint16 returns the greatest common divisor of a and b. +func GCDUint16(a, b uint16) uint16 { + for b != 0 { + a, b = b, a%b + } + return a +} + +// GCD returns the greatest common divisor of a and b. +func GCDUint32(a, b uint32) uint32 { + for b != 0 { + a, b = b, a%b + } + return a +} + +// GCD64 returns the greatest common divisor of a and b. +func GCDUint64(a, b uint64) uint64 { + for b != 0 { + a, b = b, a%b + } + return a +} + +// ISqrt returns floor(sqrt(n)). Typical run time is few hundreds of ns. +func ISqrt(n uint32) (x uint32) { + if n == 0 { + return + } + + if n >= math.MaxUint16*math.MaxUint16 { + return math.MaxUint16 + } + + var px, nx uint32 + for x = n; ; px, x = x, nx { + nx = (x + n/x) / 2 + if nx == x || nx == px { + break + } + } + return +} + +// SqrtUint64 returns floor(sqrt(n)). Typical run time is about 0.5 µs. +func SqrtUint64(n uint64) (x uint64) { + if n == 0 { + return + } + + if n >= math.MaxUint32*math.MaxUint32 { + return math.MaxUint32 + } + + var px, nx uint64 + for x = n; ; px, x = x, nx { + nx = (x + n/x) / 2 + if nx == x || nx == px { + break + } + } + return +} + +// SqrtBig returns floor(sqrt(n)). It panics on n < 0. +func SqrtBig(n *big.Int) (x *big.Int) { + switch n.Sign() { + case -1: + panic(-1) + case 0: + return big.NewInt(0) + } + + var px, nx big.Int + x = big.NewInt(0) + x.SetBit(x, n.BitLen()/2+1, 1) + for { + nx.Rsh(nx.Add(x, nx.Div(n, x)), 1) + if nx.Cmp(x) == 0 || nx.Cmp(&px) == 0 { + break + } + px.Set(x) + x.Set(&nx) + } + return +} + +// Log2Byte returns log base 2 of n. It's the same as index of the highest +// bit set in n. For n == 0 -1 is returned. +func Log2Byte(n byte) int { + return log2[n] +} + +// Log2Uint16 returns log base 2 of n. It's the same as index of the highest +// bit set in n. For n == 0 -1 is returned. +func Log2Uint16(n uint16) int { + if b := n >> 8; b != 0 { + return log2[b] + 8 + } + + return log2[n] +} + +// Log2Uint32 returns log base 2 of n. It's the same as index of the highest +// bit set in n. For n == 0 -1 is returned. +func Log2Uint32(n uint32) int { + if b := n >> 24; b != 0 { + return log2[b] + 24 + } + + if b := n >> 16; b != 0 { + return log2[b] + 16 + } + + if b := n >> 8; b != 0 { + return log2[b] + 8 + } + + return log2[n] +} + +// Log2Uint64 returns log base 2 of n. It's the same as index of the highest +// bit set in n. For n == 0 -1 is returned. +func Log2Uint64(n uint64) int { + if b := n >> 56; b != 0 { + return log2[b] + 56 + } + + if b := n >> 48; b != 0 { + return log2[b] + 48 + } + + if b := n >> 40; b != 0 { + return log2[b] + 40 + } + + if b := n >> 32; b != 0 { + return log2[b] + 32 + } + + if b := n >> 24; b != 0 { + return log2[b] + 24 + } + + if b := n >> 16; b != 0 { + return log2[b] + 16 + } + + if b := n >> 8; b != 0 { + return log2[b] + 8 + } + + return log2[n] +} + +// ModPowByte computes (b^e)%m. It panics for m == 0 || b == e == 0. +// +// See also: http://en.wikipedia.org/wiki/Modular_exponentiation#Right-to-left_binary_method +func ModPowByte(b, e, m byte) byte { + if b == 0 && e == 0 { + panic(0) + } + + if m == 1 { + return 0 + } + + r := uint16(1) + for b, m := uint16(b), uint16(m); e > 0; b, e = b*b%m, e>>1 { + if e&1 == 1 { + r = r * b % m + } + } + return byte(r) +} + +// ModPowByte computes (b^e)%m. It panics for m == 0 || b == e == 0. +func ModPowUint16(b, e, m uint16) uint16 { + if b == 0 && e == 0 { + panic(0) + } + + if m == 1 { + return 0 + } + + r := uint32(1) + for b, m := uint32(b), uint32(m); e > 0; b, e = b*b%m, e>>1 { + if e&1 == 1 { + r = r * b % m + } + } + return uint16(r) +} + +// ModPowUint32 computes (b^e)%m. It panics for m == 0 || b == e == 0. +func ModPowUint32(b, e, m uint32) uint32 { + if b == 0 && e == 0 { + panic(0) + } + + if m == 1 { + return 0 + } + + r := uint64(1) + for b, m := uint64(b), uint64(m); e > 0; b, e = b*b%m, e>>1 { + if e&1 == 1 { + r = r * b % m + } + } + return uint32(r) +} + +// ModPowUint64 computes (b^e)%m. It panics for m == 0 || b == e == 0. +func ModPowUint64(b, e, m uint64) (r uint64) { + if b == 0 && e == 0 { + panic(0) + } + + if m == 1 { + return 0 + } + + return modPowBigInt(big.NewInt(0).SetUint64(b), big.NewInt(0).SetUint64(e), big.NewInt(0).SetUint64(m)).Uint64() +} + +func modPowBigInt(b, e, m *big.Int) (r *big.Int) { + r = big.NewInt(1) + for i, n := 0, e.BitLen(); i < n; i++ { + if e.Bit(i) != 0 { + r.Mod(r.Mul(r, b), m) + } + b.Mod(b.Mul(b, b), m) + } + return +} + +// ModPowBigInt computes (b^e)%m. Returns nil for e < 0. It panics for m == 0 || b == e == 0. +func ModPowBigInt(b, e, m *big.Int) (r *big.Int) { + if b.Sign() == 0 && e.Sign() == 0 { + panic(0) + } + + if m.Cmp(_1) == 0 { + return big.NewInt(0) + } + + if e.Sign() < 0 { + return + } + + return modPowBigInt(big.NewInt(0).Set(b), big.NewInt(0).Set(e), m) +} + +var uint64ToBigIntDelta big.Int + +func init() { + uint64ToBigIntDelta.SetBit(&uint64ToBigIntDelta, 63, 1) +} + +var uintptrBits int + +func init() { + x := uint64(math.MaxUint64) + uintptrBits = BitLenUintptr(uintptr(x)) +} + +// UintptrBits returns the bit width of an uintptr at the executing machine. +func UintptrBits() int { + return uintptrBits +} + +// AddUint128_64 returns the uint128 sum of uint64 a and b. +func AddUint128_64(a, b uint64) (hi uint64, lo uint64) { + lo = a + b + if lo < a { + hi = 1 + } + return +} + +// MulUint128_64 returns the uint128 bit product of uint64 a and b. +func MulUint128_64(a, b uint64) (hi, lo uint64) { + /* + 2^(2 W) ahi bhi + 2^W alo bhi + 2^W ahi blo + alo blo + + FEDCBA98 76543210 FEDCBA98 76543210 + ---- alo*blo ---- + ---- alo*bhi ---- + ---- ahi*blo ---- + ---- ahi*bhi ---- + */ + const w = 32 + const m = 1<>w, b>>w, a&m, b&m + lo = alo * blo + mid1 := alo * bhi + mid2 := ahi * blo + c1, lo := AddUint128_64(lo, mid1<>w+mid2>>w+uint64(c1+c2)) + return +} + +// PowerizeBigInt returns (e, p) such that e is the smallest number for which p +// == b^e is greater or equal n. For n < 0 or b < 2 (0, nil) is returned. +// +// NOTE: Run time for large values of n (above about 2^1e6 ~= 1e300000) can be +// significant and/or unacceptabe. For any smaller values of n the function +// typically performs in sub second time. For "small" values of n (cca bellow +// 2^1e3 ~= 1e300) the same can be easily below 10 µs. +// +// A special (and trivial) case of b == 2 is handled separately and performs +// much faster. +func PowerizeBigInt(b, n *big.Int) (e uint32, p *big.Int) { + switch { + case b.Cmp(_2) < 0 || n.Sign() < 0: + return + case n.Sign() == 0 || n.Cmp(_1) == 0: + return 0, big.NewInt(1) + case b.Cmp(_2) == 0: + p = big.NewInt(0) + e = uint32(n.BitLen() - 1) + p.SetBit(p, int(e), 1) + if p.Cmp(n) < 0 { + p.Mul(p, _2) + e++ + } + return + } + + bw := b.BitLen() + nw := n.BitLen() + p = big.NewInt(1) + var bb, r big.Int + for { + switch p.Cmp(n) { + case -1: + x := uint32((nw - p.BitLen()) / bw) + if x == 0 { + x = 1 + } + e += x + switch x { + case 1: + p.Mul(p, b) + default: + r.Set(_1) + bb.Set(b) + e := x + for { + if e&1 != 0 { + r.Mul(&r, &bb) + } + if e >>= 1; e == 0 { + break + } + + bb.Mul(&bb, &bb) + } + p.Mul(p, &r) + } + case 0, 1: + return + } + } +} + +// PowerizeUint32BigInt returns (e, p) such that e is the smallest number for +// which p == b^e is greater or equal n. For n < 0 or b < 2 (0, nil) is +// returned. +// +// More info: see PowerizeBigInt. +func PowerizeUint32BigInt(b uint32, n *big.Int) (e uint32, p *big.Int) { + switch { + case b < 2 || n.Sign() < 0: + return + case n.Sign() == 0 || n.Cmp(_1) == 0: + return 0, big.NewInt(1) + case b == 2: + p = big.NewInt(0) + e = uint32(n.BitLen() - 1) + p.SetBit(p, int(e), 1) + if p.Cmp(n) < 0 { + p.Mul(p, _2) + e++ + } + return + } + + var bb big.Int + bb.SetInt64(int64(b)) + return PowerizeBigInt(&bb, n) +} + +/* +ProbablyPrimeUint32 returns true if n is prime or n is a pseudoprime to base a. +It implements the Miller-Rabin primality test for one specific value of 'a' and +k == 1. + +Wrt pseudocode shown at +http://en.wikipedia.org/wiki/Miller-Rabin_primality_test#Algorithm_and_running_time + + Input: n > 3, an odd integer to be tested for primality; + Input: k, a parameter that determines the accuracy of the test + Output: composite if n is composite, otherwise probably prime + write n − 1 as 2^s·d with d odd by factoring powers of 2 from n − 1 + LOOP: repeat k times: + pick a random integer a in the range [2, n − 2] + x ← a^d mod n + if x = 1 or x = n − 1 then do next LOOP + for r = 1 .. s − 1 + x ← x^2 mod n + if x = 1 then return composite + if x = n − 1 then do next LOOP + return composite + return probably prime + +... this function behaves like passing 1 for 'k' and additionaly a +fixed/non-random 'a'. Otherwise it's the same algorithm. + +See also: http://mathworld.wolfram.com/Rabin-MillerStrongPseudoprimeTest.html +*/ +func ProbablyPrimeUint32(n, a uint32) bool { + d, s := n-1, 0 + for ; d&1 == 0; d, s = d>>1, s+1 { + } + x := uint64(ModPowUint32(a, d, n)) + if x == 1 || uint32(x) == n-1 { + return true + } + + for ; s > 1; s-- { + if x = x * x % uint64(n); x == 1 { + return false + } + + if uint32(x) == n-1 { + return true + } + } + return false +} + +// ProbablyPrimeUint64_32 returns true if n is prime or n is a pseudoprime to +// base a. It implements the Miller-Rabin primality test for one specific value +// of 'a' and k == 1. See also ProbablyPrimeUint32. +func ProbablyPrimeUint64_32(n uint64, a uint32) bool { + d, s := n-1, 0 + for ; d&1 == 0; d, s = d>>1, s+1 { + } + x := ModPowUint64(uint64(a), d, n) + if x == 1 || x == n-1 { + return true + } + + bx, bn := big.NewInt(0).SetUint64(x), big.NewInt(0).SetUint64(n) + for ; s > 1; s-- { + if x = bx.Mod(bx.Mul(bx, bx), bn).Uint64(); x == 1 { + return false + } + + if x == n-1 { + return true + } + } + return false +} + +// ProbablyPrimeBigInt_32 returns true if n is prime or n is a pseudoprime to +// base a. It implements the Miller-Rabin primality test for one specific value +// of 'a' and k == 1. See also ProbablyPrimeUint32. +func ProbablyPrimeBigInt_32(n *big.Int, a uint32) bool { + var d big.Int + d.Set(n) + d.Sub(&d, _1) // d <- n-1 + s := 0 + for ; d.Bit(s) == 0; s++ { + } + nMinus1 := big.NewInt(0).Set(&d) + d.Rsh(&d, uint(s)) + + x := ModPowBigInt(big.NewInt(int64(a)), &d, n) + if x.Cmp(_1) == 0 || x.Cmp(nMinus1) == 0 { + return true + } + + for ; s > 1; s-- { + if x = x.Mod(x.Mul(x, x), n); x.Cmp(_1) == 0 { + return false + } + + if x.Cmp(nMinus1) == 0 { + return true + } + } + return false +} + +// ProbablyPrimeBigInt returns true if n is prime or n is a pseudoprime to base +// a. It implements the Miller-Rabin primality test for one specific value of +// 'a' and k == 1. See also ProbablyPrimeUint32. +func ProbablyPrimeBigInt(n, a *big.Int) bool { + var d big.Int + d.Set(n) + d.Sub(&d, _1) // d <- n-1 + s := 0 + for ; d.Bit(s) == 0; s++ { + } + nMinus1 := big.NewInt(0).Set(&d) + d.Rsh(&d, uint(s)) + + x := ModPowBigInt(a, &d, n) + if x.Cmp(_1) == 0 || x.Cmp(nMinus1) == 0 { + return true + } + + for ; s > 1; s-- { + if x = x.Mod(x.Mul(x, x), n); x.Cmp(_1) == 0 { + return false + } + + if x.Cmp(nMinus1) == 0 { + return true + } + } + return false +} + +// Max returns the larger of a and b. +func Max(a, b int) int { + if a > b { + return a + } + + return b +} + +// Min returns the smaller of a and b. +func Min(a, b int) int { + if a < b { + return a + } + + return b +} + +// UMax returns the larger of a and b. +func UMax(a, b uint) uint { + if a > b { + return a + } + + return b +} + +// UMin returns the smaller of a and b. +func UMin(a, b uint) uint { + if a < b { + return a + } + + return b +} + +// MaxByte returns the larger of a and b. +func MaxByte(a, b byte) byte { + if a > b { + return a + } + + return b +} + +// MinByte returns the smaller of a and b. +func MinByte(a, b byte) byte { + if a < b { + return a + } + + return b +} + +// MaxInt8 returns the larger of a and b. +func MaxInt8(a, b int8) int8 { + if a > b { + return a + } + + return b +} + +// MinInt8 returns the smaller of a and b. +func MinInt8(a, b int8) int8 { + if a < b { + return a + } + + return b +} + +// MaxUint16 returns the larger of a and b. +func MaxUint16(a, b uint16) uint16 { + if a > b { + return a + } + + return b +} + +// MinUint16 returns the smaller of a and b. +func MinUint16(a, b uint16) uint16 { + if a < b { + return a + } + + return b +} + +// MaxInt16 returns the larger of a and b. +func MaxInt16(a, b int16) int16 { + if a > b { + return a + } + + return b +} + +// MinInt16 returns the smaller of a and b. +func MinInt16(a, b int16) int16 { + if a < b { + return a + } + + return b +} + +// MaxUint32 returns the larger of a and b. +func MaxUint32(a, b uint32) uint32 { + if a > b { + return a + } + + return b +} + +// MinUint32 returns the smaller of a and b. +func MinUint32(a, b uint32) uint32 { + if a < b { + return a + } + + return b +} + +// MaxInt32 returns the larger of a and b. +func MaxInt32(a, b int32) int32 { + if a > b { + return a + } + + return b +} + +// MinInt32 returns the smaller of a and b. +func MinInt32(a, b int32) int32 { + if a < b { + return a + } + + return b +} + +// MaxUint64 returns the larger of a and b. +func MaxUint64(a, b uint64) uint64 { + if a > b { + return a + } + + return b +} + +// MinUint64 returns the smaller of a and b. +func MinUint64(a, b uint64) uint64 { + if a < b { + return a + } + + return b +} + +// MaxInt64 returns the larger of a and b. +func MaxInt64(a, b int64) int64 { + if a > b { + return a + } + + return b +} + +// MinInt64 returns the smaller of a and b. +func MinInt64(a, b int64) int64 { + if a < b { + return a + } + + return b +} + +// ToBase produces n in base b. For example +// +// ToBase(2047, 22) -> [1, 5, 4] +// +// 1 * 22^0 1 +// 5 * 22^1 110 +// 4 * 22^2 1936 +// ---- +// 2047 +// +// ToBase panics for bases < 2. +func ToBase(n *big.Int, b int) []int { + var nn big.Int + nn.Set(n) + if b < 2 { + panic("invalid base") + } + + k := 1 + switch nn.Sign() { + case -1: + nn.Neg(&nn) + k = -1 + case 0: + return []int{0} + } + + bb := big.NewInt(int64(b)) + var r []int + rem := big.NewInt(0) + for nn.Sign() != 0 { + nn.QuoRem(&nn, bb, rem) + r = append(r, k*int(rem.Int64())) + } + return r +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/mersenne/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/mersenne/LICENSE new file mode 100644 index 00000000..7150ce3e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/mersenne/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 jnml. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of jnml nor the names of his +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/mersenne/README b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/mersenne/README new file mode 100644 index 00000000..afa401c7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/mersenne/README @@ -0,0 +1,2 @@ +Install: $ go get github.com/cznic/mathutil/mersenne +Godocs: http://gopkgdoc.appspot.com/pkg/github.com/cznic/mathutil/mersenne diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/mersenne/all_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/mersenne/all_test.go new file mode 100644 index 00000000..fb0e51c4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/mersenne/all_test.go @@ -0,0 +1,938 @@ +// Copyright (c) 2011 jnml. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mersenne + +import ( + "camlistore.org/third_party/github.com/cznic/mathutil" + "math" + "math/big" + "math/rand" + "runtime" + "sync" + "testing" +) + +func r32() *mathutil.FC32 { + r, err := mathutil.NewFC32(math.MinInt32, math.MaxInt32, true) + if err != nil { + panic(err) + } + + return r +} + +var ( + r64lo = big.NewInt(math.MinInt64) + r64hi = big.NewInt(math.MaxInt64) +) + +func r64() *mathutil.FCBig { + r, err := mathutil.NewFCBig(r64lo, r64hi, true) + if err != nil { + panic(err) + } + + return r +} + +func TestNew(t *testing.T) { + const N = 1e4 + data := []struct{ n, m uint32 }{ + {0, 0}, + {1, 1}, + {2, 3}, + {3, 7}, + {4, 15}, + {5, 31}, + {6, 63}, + {7, 127}, + {8, 255}, + {9, 511}, + {10, 1023}, + {11, 2047}, + {12, 4095}, + {13, 8191}, + {14, 16383}, + {15, 32767}, + {16, 65535}, + {17, 131071}, + } + + e := big.NewInt(0) + for _, v := range data { + g := New(v.n) + e.SetInt64(int64(v.m)) + if g.Cmp(e) != 0 { + t.Errorf("%d: got %s, exp %s", v.n, g, e) + } + } + + r := r32() + for i := 0; i < N; i++ { + exp := uint32(r.Next()) % 1e6 + g := New(exp) + b0 := g.BitLen() + g.Add(g, _1) + b1 := g.BitLen() + if b1-b0 != 1 { + t.Fatal(i, exp, b1, b0) + } + } +} + +func benchmarkNew(b *testing.B, max uint32) { + const N = 1 << 16 + b.StopTimer() + a := make([]uint32, N) + r := r32() + for i := range a { + a[i] = uint32(r.Next()) % max + } + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + New(a[i&(N-1)]) + } +} + +func BenchmarkNew_1e1(b *testing.B) { + benchmarkNew(b, 1e1) +} + +func BenchmarkNew_1e2(b *testing.B) { + benchmarkNew(b, 1e2) +} + +func BenchmarkNew_1e3(b *testing.B) { + benchmarkNew(b, 1e3) +} + +func BenchmarkNew_1e4(b *testing.B) { + benchmarkNew(b, 1e4) +} + +func BenchmarkNew_1e5(b *testing.B) { + benchmarkNew(b, 1e5) +} + +func BenchmarkNew_1e6(b *testing.B) { + benchmarkNew(b, 1e6) +} + +func BenchmarkNew_1e7(b *testing.B) { + benchmarkNew(b, 1e7) +} + +func BenchmarkNew_1e8(b *testing.B) { + benchmarkNew(b, 1e8) +} + +func TestHasFactorUint32(t *testing.T) { + data := []struct { + d, e uint32 + r bool + }{ + {0, 42, false}, + {1, 24, true}, + {2, 22, false}, + {3, 2, true}, + {3, 3, false}, + {3, 4, true}, + {3, 5, false}, + {3, 6, true}, + {5, 4, true}, + {5, 5, false}, + {5, 6, false}, + {5, 7, false}, + {5, 8, true}, + {5, 9, false}, + {5, 10, false}, + {5, 11, false}, + {5, 12, true}, + {7, 3, true}, + {7, 6, true}, + {7, 9, true}, + {9, 6, true}, + {9, 12, true}, + {9, 18, true}, + {11, 10, true}, + {23, 11, true}, + {89, 11, true}, + {47, 23, true}, + {193707721, 67, true}, + {13007, 929, true}, + {264248689, 500471, true}, + {112027889, 1000249, true}, + {252079759, 2000633, true}, + {222054983, 3000743, true}, + {1920355681, 4000741, true}, + {330036367, 5000551, true}, + {1020081431, 6000479, true}, + {840074281, 7000619, true}, + {624031279, 8000401, true}, + {378031207, 9000743, true}, + {380036519, 10000961, true}, + {40001447, 20000723, true}, + } + + for _, v := range data { + if g, e := HasFactorUint32(v.d, v.e), v.r; g != e { + t.Errorf("d %d e %d: got %t, exp %t", v.d, v.e, g, e) + } + } +} + +func TestHasFactorUint64(t *testing.T) { + data := []struct { + d uint64 + e uint32 + r bool + }{ + {0, 42, false}, + {1, 24, true}, + {2, 22, false}, + {3, 2, true}, + {3, 3, false}, + {3, 4, true}, + {3, 5, false}, + {3, 6, true}, + {5, 4, true}, + {5, 5, false}, + {5, 6, false}, + {5, 7, false}, + {5, 8, true}, + {5, 9, false}, + {5, 10, false}, + {5, 11, false}, + {5, 12, true}, + {7, 3, true}, + {7, 6, true}, + {7, 9, true}, + {9, 6, true}, + {9, 12, true}, + {9, 18, true}, + {11, 10, true}, + {23, 11, true}, + {89, 11, true}, + {47, 23, true}, + {193707721, 67, true}, + {13007, 929, true}, + {264248689, 500471, true}, + {112027889, 1000249, true}, + {252079759, 2000633, true}, + {222054983, 3000743, true}, + {1920355681, 4000741, true}, + {330036367, 5000551, true}, + {1020081431, 6000479, true}, + {840074281, 7000619, true}, + {624031279, 8000401, true}, + {378031207, 9000743, true}, + {380036519, 10000961, true}, + {40001447, 20000723, true}, + {1872347344039, 1000099, true}, + } + + for _, v := range data { + if g, e := HasFactorUint64(v.d, v.e), v.r; g != e { + t.Errorf("d %d e %d: got %t, exp %t", v.d, v.e, g, e) + } + } +} + +func TestHasFactorBigInt(t *testing.T) { + data := []struct { + d interface{} + e uint32 + r bool + }{ + {0, 42, false}, + {1, 24, true}, + {2, 22, false}, + {3, 2, true}, + {3, 3, false}, + {3, 4, true}, + {3, 5, false}, + {3, 6, true}, + {5, 4, true}, + {5, 5, false}, + {5, 6, false}, + {5, 7, false}, + {5, 8, true}, + {5, 9, false}, + {5, 10, false}, + {5, 11, false}, + {5, 12, true}, + {7, 3, true}, + {7, 6, true}, + {7, 9, true}, + {9, 6, true}, + {9, 12, true}, + {9, 18, true}, + {11, 10, true}, + {23, 11, true}, + {89, 11, true}, + {47, 23, true}, + {193707721, 67, true}, + {13007, 929, true}, + {264248689, 500471, true}, + {112027889, 1000249, true}, + {252079759, 2000633, true}, + {222054983, 3000743, true}, + {1920355681, 4000741, true}, + {330036367, 5000551, true}, + {1020081431, 6000479, true}, + {840074281, 7000619, true}, + {624031279, 8000401, true}, + {378031207, 9000743, true}, + {380036519, 10000961, true}, + {40001447, 20000723, true}, + {"1872347344039", 1000099, true}, + {"11502865265922183403581252152383", 100279, true}, + {"533975545077050000610542659519277030089249998649", 7293457, true}, + } + + var d big.Int + for _, v := range data { + bigInt(&d, v.d) + if g, e := HasFactorBigInt(&d, v.e), v.r; g != e { + t.Errorf("d %s e %d: got %t, exp %t", &d, v.e, g, e) + } + } +} + +var once309 sync.Once + +func BenchmarkHasFactorUint32Rnd(b *testing.B) { + const N = 1 << 16 + b.StopTimer() + type t struct{ d, e uint32 } + a := make([]t, N) + r := r32() + for i := range a { + a[i] = t{ + uint32(r.Next()) | 1, + uint32(r.Next()), + } + } + once309.Do(func() { b.Log("Random 32 bit factor, random 32 bit exponent\n") }) + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + v := a[i&(N-1)] + HasFactorUint32(v.d, v.e) + } +} + +var once332 sync.Once + +func BenchmarkHasFactorUint64Rnd(b *testing.B) { + const N = 1 << 16 + b.StopTimer() + type t struct { + d uint64 + e uint32 + } + a := make([]t, N) + r := r64() + for i := range a { + a[i] = t{ + uint64(r.Next().Int64()) | 1, + uint32(r.Next().Int64()), + } + } + once332.Do(func() { b.Log("Random 64 bit factor, random 32 bit exponent\n") }) + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + v := a[i&(N-1)] + HasFactorUint64(v.d, v.e) + } +} + +var once358 sync.Once + +func BenchmarkHasFactorBigIntRnd_128b(b *testing.B) { + const N = 1 << 16 + b.StopTimer() + type t struct { + d *big.Int + e uint32 + } + a := make([]t, N) + r, err := mathutil.NewFCBig(_1, New(128), true) + if err != nil { + b.Fatal(err) + } + r2 := r32() + for i := range a { + dd := r.Next() + a[i] = t{ + dd.SetBit(dd, 0, 1), + uint32(r2.Next()), + } + } + once358.Do(func() { b.Log("Random 128 bit factor, random 32 bit exponent\n") }) + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + v := a[i&(N-1)] + HasFactorBigInt(v.d, v.e) + } +} + +var ( + f104b, _ = big.NewInt(0).SetString( // 104 bit factor of M100279 + "11502865265922183403581252152383", + 10, + ) + f137b, _ = big.NewInt(0).SetString( // 137 bit factor of M7293457 + "533975545077050000610542659519277030089249998649", + 10, + ) +) + +var once396 sync.Once + +func BenchmarkHasFactorBigInt_104b(b *testing.B) { + b.StopTimer() + once396.Do(func() { b.Log("Verify a 104 bit factor of M100279 (16.6 bit exponent)\n") }) + runtime.GC() + var r bool + b.StartTimer() + for i := 0; i < b.N; i++ { + r = HasFactorBigInt(f104b, 100279) + } + if !r { + b.Fatal() + } +} + +var once412 sync.Once + +func BenchmarkHasFactorBigIntMod104b(b *testing.B) { + b.StopTimer() + once412.Do(func() { b.Log("Verify a 104 bit factor of M100279 (16.6 bit exponent) using big.Int.Mod\n") }) + runtime.GC() + m := New(100279) + var x big.Int + b.StartTimer() + for i := 0; i < b.N; i++ { + x.Mod(m, f104b) + } + if x.Cmp(_0) != 0 { + b.Fatal() + } +} + +var once429 sync.Once + +func BenchmarkHasFactorBigInt_137b(b *testing.B) { + b.StopTimer() + once429.Do(func() { b.Log("Verify a 137 bit factor of M7293457 (22.8 bit exponent)\n") }) + runtime.GC() + var r bool + b.StartTimer() + for i := 0; i < b.N; i++ { + r = HasFactorBigInt(f137b, 7293457) + } + if !r { + b.Fatal() + } +} + +var once445 sync.Once + +func BenchmarkHasFactorBigIntMod137b(b *testing.B) { + b.StopTimer() + once445.Do(func() { b.Log("Verify a 137 bit factor of M7293457 (22.8 bit exponent) using big.Int.Mod\n") }) + runtime.GC() + m := New(7293457) + var x big.Int + b.StartTimer() + for i := 0; i < b.N; i++ { + x.Mod(m, f137b) + } + if x.Cmp(_0) != 0 { + b.Fatal() + } +} + +func bigInt(b *big.Int, v interface{}) { + switch v := v.(type) { + case int: + b.SetInt64(int64(v)) + case string: + if _, ok := b.SetString(v, 10); !ok { + panic("bigInt: bad decimal string") + } + default: + panic("bigInt: bad v.(type)") + } +} + +func TestFromFactorBigInt(t *testing.T) { + data := []struct { + d interface{} + n uint32 + }{ + {0, 0}, + {1, 1}, + {2, 0}, + {3, 2}, + {4, 0}, + {5, 4}, + {7, 3}, + {9, 6}, + {11, 10}, + {23, 11}, + {89, 11}, + {"7432339208719", 101}, + {"198582684439", 1009}, + {"20649907789079", 1009}, + {"21624641697047", 1009}, + {"30850253615723594284324529", 1009}, + {"1134327302421596486779379019599", 1009}, + {35311753, 10009}, + {"104272300687", 10009}, + {"10409374085465521", 10009}, + {"890928517778601397463", 10009}, + {6400193, 100003}, + } + + f := func(d *big.Int, max, e uint32) { + if g := FromFactorBigInt(d, max); g != e { + t.Fatalf("%s %d %d %d", d, max, g, e) + } + } + + var d big.Int + for _, v := range data { + bigInt(&d, v.d) + switch { + case v.n > 0: + f(&d, v.n-1, 0) + default: // v.n == 0 + f(&d, 100, 0) + } + f(&d, v.n, v.n) + } +} + +var f20b = big.NewInt(200000447) // 20 bit factor of M100000223 + +func benchmarkFromFactorBigInt(b *testing.B, f *big.Int, max uint32) { + var n uint32 + for i := 0; i < b.N; i++ { + n = FromFactorBigInt(f, max) + } + if n != 0 { + b.Fatal(n) + } +} + +func BenchmarkFromFactorBigInt20b_1e1(b *testing.B) { + benchmarkFromFactorBigInt(b, f20b, 1e1) +} + +func BenchmarkFromFactorBigInt20b_1e2(b *testing.B) { + benchmarkFromFactorBigInt(b, f20b, 1e2) +} + +func BenchmarkFromFactorBigInt20b_1e3(b *testing.B) { + benchmarkFromFactorBigInt(b, f20b, 1e3) +} + +func BenchmarkFromFactorBigInt20b_1e4(b *testing.B) { + benchmarkFromFactorBigInt(b, f20b, 1e4) +} + +func BenchmarkFromFactorBigInt20b_1e5(b *testing.B) { + benchmarkFromFactorBigInt(b, f20b, 1e5) +} + +func BenchmarkFromFactorBigInt20b_1e6(b *testing.B) { + benchmarkFromFactorBigInt(b, f20b, 1e6) +} + +func BenchmarkFromFactorBigInt137b_1e1(b *testing.B) { + benchmarkFromFactorBigInt(b, f137b, 1e1) +} + +func BenchmarkFromFactorBigInt137b_1e2(b *testing.B) { + benchmarkFromFactorBigInt(b, f137b, 1e2) +} + +func BenchmarkFromFactorBigInt137b_1e3(b *testing.B) { + benchmarkFromFactorBigInt(b, f137b, 1e3) +} + +func BenchmarkFromFactorBigInt137b_1e4(b *testing.B) { + benchmarkFromFactorBigInt(b, f137b, 1e4) +} + +func BenchmarkFromFactorBigInt137b_1e5(b *testing.B) { + benchmarkFromFactorBigInt(b, f137b, 1e5) +} + +func BenchmarkFromFactorBigInt137b_1e6(b *testing.B) { + benchmarkFromFactorBigInt(b, f137b, 1e6) +} +func TestMod(t *testing.T) { + const N = 1e4 + data := []struct { + mod, n int64 + exp uint32 + }{ + {0, 0x00, 3}, + {1, 0x01, 3}, + {3, 0x03, 3}, + {0, 0x07, 3}, + {1, 0x0f, 3}, + {3, 0x1f, 3}, + {0, 0x3f, 3}, + {1, 0x7f, 3}, + {3, 0xff, 3}, + {0, 0x1ff, 3}, + } + + var mod, n big.Int + for _, v := range data { + n.SetInt64(v.n) + p := Mod(&mod, &n, v.exp) + if p != &mod { + t.Fatal() + } + + if g, e := mod.Int64(), v.mod; g != e { + t.Fatal(v.n, v.exp, g, e) + } + } + + f := func(in int64, exp uint32) { + n.SetInt64(in) + mod.Mod(&n, New(exp)) + e := mod.Int64() + Mod(&mod, &n, exp) + g := mod.Int64() + if g != e { + t.Fatal(in, exp, g, e) + } + } + + r32, _ := mathutil.NewFC32(1, 1e6, true) + r64, _ := mathutil.NewFCBig(_0, mathutil.Uint64ToBigInt(math.MaxInt64), true) + for i := 0; i < N; i++ { + f(r64.Next().Int64(), uint32(r32.Next())) + } +} + +func benchmarkMod(b *testing.B, w, exp uint32) { + b.StopTimer() + var n, mod big.Int + n.Rand(rand.New(rand.NewSource(1)), New(w)) + n.SetBit(&n, int(w), 1) + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + Mod(&mod, &n, exp) + } +} + +func benchmarkModBig(b *testing.B, w, exp uint32) { + b.StopTimer() + var n, mod big.Int + n.Rand(rand.New(rand.NewSource(1)), New(w)) + n.SetBit(&n, int(w), 1) + runtime.GC() + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + mod.Mod(&n, New(exp)) + } +} + +func BenchmarkMod_1e2(b *testing.B) { + benchmarkMod(b, 1e2+2, 1e2) +} + +func BenchmarkModBig_1e2(b *testing.B) { + benchmarkModBig(b, 1e2+2, 1e2) +} + +func BenchmarkMod_1e3(b *testing.B) { + benchmarkMod(b, 1e3+2, 1e3) +} + +func BenchmarkModBig_1e3(b *testing.B) { + benchmarkModBig(b, 1e3+2, 1e3) +} + +func BenchmarkMod_1e4(b *testing.B) { + benchmarkMod(b, 1e4+2, 1e4) +} + +func BenchmarkModBig_1e4(b *testing.B) { + benchmarkModBig(b, 1e4+2, 1e4) +} + +func BenchmarkMod_1e5(b *testing.B) { + benchmarkMod(b, 1e5+2, 1e5) +} + +func BenchmarkModBig_1e5(b *testing.B) { + benchmarkModBig(b, 1e5+2, 1e5) +} + +func BenchmarkMod_1e6(b *testing.B) { + benchmarkMod(b, 1e6+2, 1e6) +} + +func BenchmarkModBig_1e6(b *testing.B) { + benchmarkModBig(b, 1e6+2, 1e6) +} + +func BenchmarkMod_1e7(b *testing.B) { + benchmarkMod(b, 1e7+2, 1e7) +} + +func BenchmarkModBig_1e7(b *testing.B) { + benchmarkModBig(b, 1e7+2, 1e7) +} + +func BenchmarkMod_1e8(b *testing.B) { + benchmarkMod(b, 1e8+2, 1e8) +} + +func BenchmarkModBig_1e8(b *testing.B) { + benchmarkModBig(b, 1e8+2, 1e8) +} + +func BenchmarkMod_5e8(b *testing.B) { + benchmarkMod(b, 5e8+2, 5e8) +} + +func BenchmarkModBig_5e8(b *testing.B) { + benchmarkModBig(b, 5e8+2, 5e8) +} + +func TestModPow(t *testing.T) { + const N = 2e2 + data := []struct{ b, e, m, r uint32 }{ + {0, 1, 1, 0}, + {0, 2, 1, 0}, + {0, 3, 1, 0}, + + {1, 0, 1, 0}, + {1, 1, 1, 0}, + {1, 2, 1, 0}, + {1, 3, 1, 0}, + + {2, 0, 1, 0}, + {2, 1, 1, 0}, + {2, 2, 1, 0}, + {2, 3, 1, 0}, + + {2, 3, 4, 8}, + {2, 3, 5, 4}, + {2, 4, 3, 1}, + {3, 3, 3, 3}, + {3, 4, 5, 30}, + } + + f := func(b, e, m uint32, expect *big.Int) { + got := ModPow(b, e, m) + if got.Cmp(expect) != 0 { + t.Fatal(b, e, m, got, expect) + } + } + + var r big.Int + for _, v := range data { + r.SetInt64(int64(v.r)) + f(v.b, v.e, v.m, &r) + } + + rg, _ := mathutil.NewFC32(2, 1<<10, true) + var bb big.Int + for i := 0; i < N; i++ { + b, e, m := uint32(rg.Next()), uint32(rg.Next()), uint32(rg.Next()) + bb.SetInt64(int64(b)) + f(b, e, m, mathutil.ModPowBigInt(&bb, New(e), New(m))) + } +} + +func benchmarkModPow2(b *testing.B, e, m uint32) { + b.StopTimer() + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + ModPow2(e, m) + } +} + +func benchmarkModPow(b *testing.B, base, e, m uint32) { + b.StopTimer() + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + ModPow(base, e, m) + } +} + +func benchmarkModPowBig(b *testing.B, base, e, m uint32) { + b.StopTimer() + bb := big.NewInt(int64(base)) + ee := New(e) + mm := New(m) + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + mathutil.ModPowBigInt(bb, ee, mm) + } +} + +func BenchmarkModPow2_1e2(b *testing.B) { + benchmarkModPow2(b, 1e2, 1e2+1) +} + +func BenchmarkModPow_2_1e2(b *testing.B) { + benchmarkModPow(b, 2, 1e2, 1e2+1) +} + +func BenchmarkModPowB_2_1e2(b *testing.B) { + benchmarkModPowBig(b, 2, 1e2, 1e2+1) +} + +func BenchmarkModPow_3_1e2(b *testing.B) { + benchmarkModPow(b, 3, 1e2, 1e2+1) +} + +func BenchmarkModPowB_3_1e2(b *testing.B) { + benchmarkModPowBig(b, 3, 1e2, 1e2+1) +} + +// ---- + +func BenchmarkModPow2_1e3(b *testing.B) { + benchmarkModPow2(b, 1e3, 1e3+1) +} + +func BenchmarkModPow_2_1e3(b *testing.B) { + benchmarkModPow(b, 2, 1e3, 1e3+1) +} + +func BenchmarkModPowB_2_1e3(b *testing.B) { + benchmarkModPowBig(b, 2, 1e3, 1e3+1) +} + +func BenchmarkModPow_3_1e3(b *testing.B) { + benchmarkModPow(b, 3, 1e3, 1e3+1) +} + +func BenchmarkModPowB_3_1e3(b *testing.B) { + benchmarkModPowBig(b, 3, 1e3, 1e3+1) +} + +// ---- + +func BenchmarkModPow2_1e4(b *testing.B) { + benchmarkModPow2(b, 1e4, 1e4+1) +} + +func BenchmarkModPow_2_1e4(b *testing.B) { + benchmarkModPow(b, 2, 1e4, 1e4+1) +} + +func BenchmarkModPowB_2_1e4(b *testing.B) { + benchmarkModPowBig(b, 2, 1e4, 1e4+1) +} + +func BenchmarkModPow_3_1e4(b *testing.B) { + benchmarkModPow(b, 3, 1e4, 1e4+1) +} + +func BenchmarkModPowB_3_1e4(b *testing.B) { + benchmarkModPowBig(b, 3, 1e4, 1e4+1) +} + +// ---- + +func BenchmarkModPow2_1e5(b *testing.B) { + benchmarkModPow2(b, 1e5, 1e5+1) +} + +func BenchmarkModPow2_1e6(b *testing.B) { + benchmarkModPow2(b, 1e6, 1e6+1) +} + +func BenchmarkModPow2_1e7(b *testing.B) { + benchmarkModPow2(b, 1e7, 1e7+1) +} + +func BenchmarkModPow2_1e8(b *testing.B) { + benchmarkModPow2(b, 1e8, 1e8+1) +} + +func BenchmarkModPow2_1e9(b *testing.B) { + benchmarkModPow2(b, 1e9, 1e9+1) +} + +func TestModPow2(t *testing.T) { + const N = 1e3 + data := []struct{ e, m uint32 }{ + // e == 0 -> x == 0 + {0, 2}, + {0, 3}, + {0, 4}, + + {1, 2}, + {1, 3}, + {1, 4}, + {1, 5}, + + {2, 2}, + {2, 3}, + {2, 4}, + {2, 5}, + + {3, 2}, + {3, 3}, + {3, 4}, + {3, 5}, + {3, 6}, + {3, 7}, + {3, 8}, + {3, 9}, + + {4, 2}, + {4, 3}, + {4, 4}, + {4, 5}, + {4, 6}, + {4, 7}, + {4, 8}, + {4, 9}, + } + + var got big.Int + f := func(e, m uint32) { + x := ModPow2(e, m) + exp := ModPow(2, e, m) + got.SetInt64(0) + got.SetBit(&got, int(x), 1) + if got.Cmp(exp) != 0 { + t.Fatalf("\ne %d, m %d\ng: %s\ne: %s", e, m, &got, exp) + } + } + + for _, v := range data { + f(v.e, v.m) + } + + rg, _ := mathutil.NewFC32(2, 1<<10, true) + for i := 0; i < N; i++ { + f(uint32(rg.Next()), uint32(rg.Next())) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/mersenne/mersenne.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/mersenne/mersenne.go new file mode 100644 index 00000000..c8b377dc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/mersenne/mersenne.go @@ -0,0 +1,288 @@ +// Copyright (c) 2011 jnml. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mersenne collects utilities related to Mersenne numbers[1] and/or some +of their properties. + +Exponent + +In this documentatoin the term 'exponent' refers to 'n' of a Mersenne number Mn +equal to 2^n-1. This package supports only uint32 sized exponents. New() +currently supports exponents only up to math.MaxInt32 (31 bits, up to 256 MB +required to represent such Mn in memory as a big.Int). + +Links + +Referenced from above: + [1] http://en.wikipedia.org/wiki/Mersenne_number +*/ +package mersenne + +import ( + "camlistore.org/third_party/github.com/cznic/mathutil" + "math" + "math/big" +) + +var ( + _0 = big.NewInt(0) + _1 = big.NewInt(1) + _2 = big.NewInt(2) +) + +// Knowns list the exponent of currently (March 2012) known Mersenne primes +// exponents in order. See also: http://oeis.org/A000043 for a partial list. +var Knowns = []uint32{ + 2, // #1 + 3, // #2 + 5, // #3 + 7, // #4 + 13, // #5 + 17, // #6 + 19, // #7 + 31, // #8 + 61, // #9 + 89, // #10 + + 107, // #11 + 127, // #12 + 521, // #13 + 607, // #14 + 1279, // #15 + 2203, // #16 + 2281, // #17 + 3217, // #18 + 4253, // #19 + 4423, // #20 + + 9689, // #21 + 9941, // #22 + 11213, // #23 + 19937, // #24 + 21701, // #25 + 23209, // #26 + 44497, // #27 + 86243, // #28 + 110503, // #29 + 132049, // #30 + + 216091, // #31 + 756839, // #32 + 859433, // #33 + 1257787, // #34 + 1398269, // #35 + 2976221, // #36 + 3021377, // #37 + 6972593, // #38 + 13466917, // #39 + 20996011, // #40 + + 24036583, // #41 + 25964951, // #42 + 30402457, // #43 + 32582657, // #44 + 37156667, // #45 + 42643801, // #46 + 43112609, // #47 + 57885161, // #48 +} + +// Known maps the exponent of known Mersenne primes its ordinal number/rank. +// Ranks > 41 are currently provisional. +var Known map[uint32]int + +func init() { + Known = map[uint32]int{} + for i, v := range Knowns { + Known[v] = i + 1 + } +} + +// New returns Mn == 2^n-1 for n <= math.MaxInt32 or nil otherwise. +func New(n uint32) (m *big.Int) { + if n > math.MaxInt32 { + return + } + + m = big.NewInt(0) + return m.Sub(m.SetBit(m, int(n), 1), _1) +} + +// HasFactorUint32 returns true if d | Mn. Typical run time for a 32 bit factor +// and a 32 bit exponent is < 1 µs. +func HasFactorUint32(d, n uint32) bool { + return d == 1 || d&1 != 0 && mathutil.ModPowUint32(2, n, d) == 1 +} + +// HasFactorUint64 returns true if d | Mn. Typical run time for a 64 bit factor +// and a 32 bit exponent is < 30 µs. +func HasFactorUint64(d uint64, n uint32) bool { + return d == 1 || d&1 != 0 && mathutil.ModPowUint64(2, uint64(n), d) == 1 +} + +// HasFactorBigInt returns true if d | Mn, d > 0. Typical run time for a 128 +// bit factor and a 32 bit exponent is < 75 µs. +func HasFactorBigInt(d *big.Int, n uint32) bool { + return d.Cmp(_1) == 0 || d.Sign() > 0 && d.Bit(0) == 1 && + mathutil.ModPowBigInt(_2, big.NewInt(int64(n)), d).Cmp(_1) == 0 +} + +/* +FromFactorBigInt returns n such that d | Mn if n <= max and d is odd. In other +cases zero is returned. + +It is conjectured that every odd d ∊ N divides infinitely many Mersenne numbers. +The returned n should be the exponent of smallest such Mn. + +NOTE: The computation of n from a given d performs roughly in O(n). It is +thus highly recomended to use the 'max' argument to limit the "searched" +exponent upper bound as appropriate. Otherwise the computation can take a long +time as a large factor can be a divisor of a Mn with exponent above the uint32 +limits. + +The FromFactorBigInt function is a modification of the original Will +Edgington's "reverse method", discussed here: +http://tech.groups.yahoo.com/group/primenumbers/message/15061 +*/ +func FromFactorBigInt(d *big.Int, max uint32) (n uint32) { + if d.Bit(0) == 0 { + return + } + + var m big.Int + for n < max { + m.Add(&m, d) + i := 0 + for ; m.Bit(i) == 1; i++ { + if n == math.MaxUint32 { + return 0 + } + + n++ + } + m.Rsh(&m, uint(i)) + if m.Sign() == 0 { + if n > max { + n = 0 + } + return + } + } + return 0 +} + +// Mod sets mod to n % Mexp and returns mod. It panics for exp == 0 || exp >= +// math.MaxInt32 || n < 0. +func Mod(mod, n *big.Int, exp uint32) *big.Int { + if exp == 0 || exp >= math.MaxInt32 || n.Sign() < 0 { + panic(0) + } + + m := New(exp) + mod.Set(n) + var x big.Int + for mod.BitLen() > int(exp) { + x.Set(mod) + x.Rsh(&x, uint(exp)) + mod.And(mod, m) + mod.Add(mod, &x) + } + if mod.BitLen() == int(exp) && mod.Cmp(m) == 0 { + mod.SetInt64(0) + } + return mod +} + +// ModPow2 returns x such that 2^Me % Mm == 2^x. It panics for m < 2. Typical +// run time is < 1 µs. Use instead of ModPow(2, e, m) wherever possible. +func ModPow2(e, m uint32) (x uint32) { + /* + m < 2 -> panic + e == 0 -> x == 0 + e == 1 -> x == 1 + + 2^M1 % M2 == 2^1 % 3 == 2^1 10 // 2^1, 3, 5, 7 ... +2k + 2^M1 % M3 == 2^1 % 7 == 2^1 010 // 2^1, 4, 7, ... +3k + 2^M1 % M4 == 2^1 % 15 == 2^1 0010 // 2^1, 5, 9, 13... +4k + 2^M1 % M5 == 2^1 % 31 == 2^1 00010 // 2^1, 6, 11, 16... +5k + + 2^M2 % M2 == 2^3 % 3 == 2^1 10.. // 2^3, 5, 7, 9, 11, ... +2k + 2^M2 % M3 == 2^3 % 7 == 2^0 001... // 2^3, 6, 9, 12, 15, ... +3k + 2^M2 % M4 == 2^3 % 15 == 2^3 1000 // 2^3, 7, 11, 15, 19, ... +4k + 2^M2 % M5 == 2^3 % 31 == 2^3 01000 // 2^3, 8, 13, 18, 23, ... +5k + + 2^M3 % M2 == 2^7 % 3 == 2^1 10..--.. // 2^3, 5, 7... +2k + 2^M3 % M3 == 2^7 % 7 == 2^1 010...--- // 2^1, 4, 7... +3k + 2^M3 % M4 == 2^7 % 15 == 2^3 1000.... // +4k + 2^M3 % M5 == 2^7 % 31 == 2^2 00100..... // +5k + 2^M3 % M6 == 2^7 % 63 == 2^1 000010...... // +6k + 2^M3 % M7 == 2^7 % 127 == 2^0 0000001....... + 2^M3 % M8 == 2^7 % 255 == 2^7 10000000 + 2^M3 % M9 == 2^7 % 511 == 2^7 010000000 + + 2^M4 % M2 == 2^15 % 3 == 2^1 10..--..--..--.. + 2^M4 % M3 == 2^15 % 7 == 2^0 1...---...---... + 2^M4 % M4 == 2^15 % 15 == 2^3 1000....----.... + 2^M4 % M5 == 2^15 % 31 == 2^0 1.....-----..... + 2^M4 % M6 == 2^15 % 63 == 2^3 1000......------ + 2^M4 % M7 == 2^15 % 127 == 2^1 10.......------- + 2^M4 % M8 == 2^15 % 255 == 2^7 10000000........ + 2^M4 % M9 == 2^15 % 511 == 2^6 1000000......... + */ + switch { + case m < 2: + panic(0) + case e < 2: + return e + } + + if x = mathutil.ModPowUint32(2, e, m); x == 0 { + return m - 1 + } + + return x - 1 +} + +// ModPow returns b^Me % Mm. Run time grows quickly with 'e' and/or 'm' when b +// != 2 (then ModPow2 is used). +func ModPow(b, e, m uint32) (r *big.Int) { + if m == 1 { + return big.NewInt(0) + } + + if b == 2 { + x := ModPow2(e, m) + r = big.NewInt(0) + r.SetBit(r, int(x), 1) + return + } + + bb := big.NewInt(int64(b)) + r = big.NewInt(1) + for ; e != 0; e-- { + r.Mul(r, bb) + Mod(r, r, m) + bb.Mul(bb, bb) + Mod(bb, bb, m) + } + return +} + +// ProbablyPrime returns true if Mn is prime or is a pseudoprime to base a. +// Note: Every Mp, prime p, is a prime or is a pseudoprime to base 2, actually +// to every base 2^i, i ∊ [1, p). In contrast - it is conjectured (w/o any +// known counterexamples) that no composite Mp, prime p, is a pseudoprime to +// base 3. +func ProbablyPrime(n, a uint32) bool { + //TODO +test, +bench + if a == 2 { + return ModPow2(n-1, n) == 0 + } + + nMinus1 := New(n) + nMinus1.Sub(nMinus1, _1) + x := ModPow(a, n-1, n) + return x.Cmp(_1) == 0 || x.Cmp(nMinus1) == 0 +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/nist-sts-2-1-1-report b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/nist-sts-2-1-1-report new file mode 100644 index 00000000..20e686c6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/nist-sts-2-1-1-report @@ -0,0 +1,267 @@ +$ ./example -max 100000000 > rnd.dat +$ ./assess 1000000 + G E N E R A T O R S E L E C T I O N + ______________________________________ + + [0] Input File [1] Linear Congruential + [2] Quadratic Congruential I [3] Quadratic Congruential II + [4] Cubic Congruential [5] XOR + [6] Modular Exponentiation [7] Blum-Blum-Shub + [8] Micali-Schnorr [9] G Using SHA-1 + + Enter Choice: 0 + + + User Prescribed Input File: rnd.dat + + S T A T I S T I C A L T E S T S + _________________________________ + + [01] Frequency [02] Block Frequency + [03] Cumulative Sums [04] Runs + [05] Longest Run of Ones [06] Rank + [07] Discrete Fourier Transform [08] Nonperiodic Template Matchings + [09] Overlapping Template Matchings [10] Universal Statistical + [11] Approximate Entropy [12] Random Excursions + [13] Random Excursions Variant [14] Serial + [15] Linear Complexity + + INSTRUCTIONS + Enter 0 if you DO NOT want to apply all of the + statistical tests to each sequence and 1 if you DO. + + Enter Choice: 1 + + P a r a m e t e r A d j u s t m e n t s + ----------------------------------------- + [1] Block Frequency Test - block length(M): 128 + [2] NonOverlapping Template Test - block length(m): 9 + [3] Overlapping Template Test - block length(m): 9 + [4] Approximate Entropy Test - block length(m): 10 + [5] Serial Test - block length(m): 16 + [6] Linear Complexity Test - block length(M): 500 + + Select Test (0 to continue): 0 + + How many bitstreams? 200 + + Input File Format: + [0] ASCII - A sequence of ASCII 0's and 1's + [1] Binary - Each byte in data file contains 8 bits of data + + Select input mode: 1 + + Statistical Testing In Progress......... + + Statistical Testing Complete!!!!!!!!!!!! + +$ cat experiments/AlgorithmTesting/finalAnalysisReport.txt +------------------------------------------------------------------------------ +RESULTS FOR THE UNIFORMITY OF P-VALUES AND THE PROPORTION OF PASSING SEQUENCES +------------------------------------------------------------------------------ + generator is +------------------------------------------------------------------------------ + C1 C2 C3 C4 C5 C6 C7 C8 C9 C10 P-VALUE PROPORTION STATISTICAL TEST +------------------------------------------------------------------------------ + 28 22 17 19 15 8 24 23 19 25 0.093720 198/200 Frequency + 20 18 24 14 18 17 16 28 21 24 0.504219 199/200 BlockFrequency + 25 22 17 24 19 21 22 15 16 19 0.825505 197/200 CumulativeSums + 27 17 16 22 14 26 14 25 19 20 0.304126 199/200 CumulativeSums + 22 19 14 23 22 22 13 28 13 24 0.224821 199/200 Runs + 20 24 18 21 15 13 22 23 24 20 0.719747 197/200 LongestRun + 22 26 18 22 26 15 17 22 20 12 0.410055 199/200 Rank + 25 22 26 22 20 16 20 20 16 13 0.585209 195/200 FFT + 22 11 15 26 33 24 21 13 14 21 0.013102 197/200 NonOverlappingTemplate + 17 11 16 27 19 24 19 20 28 19 0.219006 200/200 NonOverlappingTemplate + 23 27 24 15 21 11 18 27 15 19 0.162606 197/200 NonOverlappingTemplate + 21 18 13 20 19 23 20 17 26 23 0.749884 197/200 NonOverlappingTemplate + 24 22 24 24 24 21 13 15 17 16 0.494392 196/200 NonOverlappingTemplate + 24 16 23 15 23 18 25 16 18 22 0.699313 199/200 NonOverlappingTemplate + 19 23 21 16 27 18 17 20 18 21 0.859637 198/200 NonOverlappingTemplate + 12 20 16 19 26 14 30 20 24 19 0.141256 198/200 NonOverlappingTemplate + 18 21 17 21 20 14 25 19 24 21 0.859637 198/200 NonOverlappingTemplate + 24 25 21 18 23 15 23 17 16 18 0.749884 199/200 NonOverlappingTemplate + 20 22 22 18 16 22 28 16 14 22 0.574903 198/200 NonOverlappingTemplate + 18 23 22 17 24 25 19 16 23 13 0.626709 199/200 NonOverlappingTemplate + 17 22 14 19 21 21 18 19 24 25 0.842937 198/200 NonOverlappingTemplate + 18 17 26 21 22 15 22 18 21 20 0.883171 197/200 NonOverlappingTemplate + 19 25 16 32 15 19 20 18 16 20 0.236810 199/200 NonOverlappingTemplate + 19 18 15 21 24 22 18 21 20 22 0.964295 200/200 NonOverlappingTemplate + 21 14 17 23 26 19 20 22 20 18 0.834308 196/200 NonOverlappingTemplate + 15 21 17 27 26 23 21 17 24 9 0.129620 198/200 NonOverlappingTemplate + 25 17 19 19 18 22 21 22 21 16 0.951205 196/200 NonOverlappingTemplate + 20 19 24 21 19 24 16 18 17 22 0.946308 197/200 NonOverlappingTemplate + 27 16 19 18 23 19 22 17 22 17 0.807412 197/200 NonOverlappingTemplate + 14 18 21 23 23 20 14 22 20 25 0.719747 198/200 NonOverlappingTemplate + 18 22 19 12 24 25 25 22 18 15 0.474986 198/200 NonOverlappingTemplate + 21 18 23 17 19 18 28 19 20 17 0.825505 198/200 NonOverlappingTemplate + 20 19 15 16 27 20 26 17 20 20 0.657933 198/200 NonOverlappingTemplate + 17 25 21 21 11 19 22 16 27 21 0.401199 198/200 NonOverlappingTemplate + 19 16 15 18 24 19 25 25 19 20 0.769527 199/200 NonOverlappingTemplate + 18 20 20 26 20 12 24 25 19 16 0.524101 198/200 NonOverlappingTemplate + 14 16 18 23 21 21 19 19 28 21 0.668321 197/200 NonOverlappingTemplate + 21 20 23 25 21 22 19 17 14 18 0.875539 197/200 NonOverlappingTemplate + 14 16 29 22 23 13 20 29 17 17 0.099513 197/200 NonOverlappingTemplate + 14 19 27 19 17 23 18 24 20 19 0.709558 199/200 NonOverlappingTemplate + 18 15 21 19 27 22 21 23 17 17 0.779188 198/200 NonOverlappingTemplate + 13 23 13 22 22 23 22 21 21 20 0.689019 199/200 NonOverlappingTemplate + 17 14 26 26 16 21 30 15 21 14 0.096578 199/200 NonOverlappingTemplate + 18 21 24 23 21 13 23 23 19 15 0.719747 197/200 NonOverlappingTemplate + 19 21 14 32 20 15 16 18 24 21 0.202268 199/200 NonOverlappingTemplate + 27 22 20 21 21 14 15 22 14 24 0.474986 196/200 NonOverlappingTemplate + 31 12 25 11 21 18 19 16 24 23 0.050305 197/200 NonOverlappingTemplate + 17 26 20 22 15 27 22 19 12 20 0.383827 199/200 NonOverlappingTemplate + 15 22 14 14 31 15 27 18 23 21 0.078086 194/200 NonOverlappingTemplate + 19 19 14 15 24 21 25 21 20 22 0.788728 197/200 NonOverlappingTemplate + 20 21 19 22 25 18 13 24 28 10 0.153763 195/200 NonOverlappingTemplate + 23 17 21 25 21 20 13 30 14 16 0.196920 196/200 NonOverlappingTemplate + 17 31 17 22 16 15 28 23 11 20 0.050305 197/200 NonOverlappingTemplate + 15 21 26 27 15 18 19 21 18 20 0.605916 198/200 NonOverlappingTemplate + 23 18 15 14 20 21 20 20 20 29 0.554420 200/200 NonOverlappingTemplate + 22 19 19 18 19 17 22 21 31 12 0.311542 199/200 NonOverlappingTemplate + 16 22 23 21 19 19 18 24 21 17 0.960198 197/200 NonOverlappingTemplate + 21 21 17 20 16 23 25 22 18 17 0.917870 200/200 NonOverlappingTemplate + 27 17 17 16 21 20 22 18 21 21 0.859637 197/200 NonOverlappingTemplate + 18 24 15 27 18 21 18 16 24 19 0.657933 199/200 NonOverlappingTemplate + 13 16 21 21 15 25 18 22 29 20 0.326749 198/200 NonOverlappingTemplate + 18 17 23 23 15 19 26 30 11 18 0.125927 198/200 NonOverlappingTemplate + 30 21 18 22 17 21 15 17 21 18 0.544254 195/200 NonOverlappingTemplate + 12 18 19 24 16 24 18 24 28 17 0.311542 199/200 NonOverlappingTemplate + 20 15 23 15 18 30 23 18 17 21 0.410055 196/200 NonOverlappingTemplate + 15 18 23 16 29 21 22 16 19 21 0.544254 200/200 NonOverlappingTemplate + 18 16 27 13 21 22 22 21 16 24 0.534146 199/200 NonOverlappingTemplate + 20 25 18 21 16 21 17 28 21 13 0.484646 200/200 NonOverlappingTemplate + 23 22 13 22 14 20 26 18 19 23 0.574903 197/200 NonOverlappingTemplate + 21 24 25 13 19 22 18 13 24 21 0.504219 199/200 NonOverlappingTemplate + 19 13 18 25 22 15 23 28 19 18 0.410055 195/200 NonOverlappingTemplate + 20 15 27 22 26 26 14 13 21 16 0.181557 198/200 NonOverlappingTemplate + 18 18 19 23 18 20 19 21 24 20 0.991468 200/200 NonOverlappingTemplate + 18 23 17 14 20 25 22 22 22 17 0.816537 198/200 NonOverlappingTemplate + 26 15 15 11 23 21 21 16 36 16 0.005557 196/200 NonOverlappingTemplate + 27 13 21 23 21 16 19 20 16 24 0.544254 198/200 NonOverlappingTemplate + 16 15 32 17 20 23 22 19 20 16 0.262249 200/200 NonOverlappingTemplate + 26 19 24 13 24 16 18 18 13 29 0.137282 199/200 NonOverlappingTemplate + 15 18 14 27 32 21 15 20 19 19 0.112047 198/200 NonOverlappingTemplate + 22 23 22 18 20 23 19 22 16 15 0.924076 196/200 NonOverlappingTemplate + 18 17 21 22 14 17 22 24 20 25 0.798139 199/200 NonOverlappingTemplate + 15 17 19 24 21 23 17 25 23 16 0.739918 196/200 NonOverlappingTemplate + 22 11 15 26 32 25 21 13 14 21 0.017305 197/200 NonOverlappingTemplate + 22 16 19 23 22 21 21 19 17 20 0.985788 200/200 NonOverlappingTemplate + 22 28 18 24 14 20 23 21 20 10 0.230755 198/200 NonOverlappingTemplate + 14 13 22 28 14 28 17 22 23 19 0.129620 197/200 NonOverlappingTemplate + 22 16 22 20 21 21 16 19 18 25 0.935716 198/200 NonOverlappingTemplate + 15 20 23 17 19 22 21 23 18 22 0.951205 200/200 NonOverlappingTemplate + 20 24 21 19 17 19 19 24 15 22 0.930026 198/200 NonOverlappingTemplate + 18 21 15 21 17 28 24 22 20 14 0.534146 200/200 NonOverlappingTemplate + 19 15 19 19 20 20 15 25 23 25 0.779188 198/200 NonOverlappingTemplate + 17 24 25 16 15 21 18 19 23 22 0.788728 198/200 NonOverlappingTemplate + 15 20 18 25 24 15 21 31 18 13 0.141256 200/200 NonOverlappingTemplate + 24 17 19 20 18 21 15 22 24 20 0.924076 196/200 NonOverlappingTemplate + 23 18 17 21 17 28 23 21 18 14 0.605916 197/200 NonOverlappingTemplate + 21 19 22 23 16 17 20 21 22 19 0.985788 200/200 NonOverlappingTemplate + 27 17 21 27 24 15 15 17 15 22 0.304126 199/200 NonOverlappingTemplate + 25 28 20 24 13 14 16 22 19 19 0.304126 197/200 NonOverlappingTemplate + 27 16 14 24 22 18 24 20 18 17 0.564639 196/200 NonOverlappingTemplate + 18 18 24 19 19 19 26 11 27 19 0.375313 195/200 NonOverlappingTemplate + 20 15 29 19 26 16 21 11 18 25 0.141256 197/200 NonOverlappingTemplate + 19 14 21 25 11 23 22 25 26 14 0.176657 199/200 NonOverlappingTemplate + 18 23 20 17 19 18 29 22 26 8 0.102526 199/200 NonOverlappingTemplate + 22 17 18 16 18 20 19 19 25 26 0.834308 198/200 NonOverlappingTemplate + 25 18 14 16 16 24 18 18 30 21 0.268917 198/200 NonOverlappingTemplate + 24 21 23 13 12 22 20 23 20 22 0.554420 196/200 NonOverlappingTemplate + 18 21 21 30 22 17 19 14 18 20 0.534146 197/200 NonOverlappingTemplate + 25 20 22 21 15 18 17 20 17 25 0.825505 199/200 NonOverlappingTemplate + 18 21 22 21 18 20 26 16 20 18 0.941144 197/200 NonOverlappingTemplate + 23 18 22 25 12 16 17 19 26 22 0.474986 198/200 NonOverlappingTemplate + 22 18 29 23 19 23 17 17 15 17 0.534146 198/200 NonOverlappingTemplate + 19 21 17 26 18 15 22 26 15 21 0.626709 197/200 NonOverlappingTemplate + 16 20 20 23 18 21 18 18 25 21 0.955835 199/200 NonOverlappingTemplate + 23 21 20 21 22 10 15 27 15 26 0.186566 198/200 NonOverlappingTemplate + 18 26 20 26 26 18 17 17 20 12 0.358641 198/200 NonOverlappingTemplate + 24 20 21 18 24 12 19 27 14 21 0.401199 195/200 NonOverlappingTemplate + 16 25 15 21 24 18 18 25 22 16 0.657933 199/200 NonOverlappingTemplate + 24 14 17 26 15 17 17 25 21 24 0.428095 200/200 NonOverlappingTemplate + 22 24 11 20 22 24 19 18 12 28 0.176657 196/200 NonOverlappingTemplate + 27 16 27 18 27 14 13 16 21 21 0.141256 197/200 NonOverlappingTemplate + 23 25 20 18 23 17 15 23 19 17 0.834308 196/200 NonOverlappingTemplate + 19 21 20 27 16 16 18 25 16 22 0.678686 199/200 NonOverlappingTemplate + 25 22 21 19 15 19 22 19 25 13 0.657933 197/200 NonOverlappingTemplate + 19 28 21 25 20 12 18 13 29 15 0.073417 198/200 NonOverlappingTemplate + 20 24 21 19 21 15 17 24 20 19 0.941144 198/200 NonOverlappingTemplate + 18 29 23 17 24 19 17 18 16 19 0.585209 200/200 NonOverlappingTemplate + 18 28 18 16 25 21 18 20 14 22 0.544254 198/200 NonOverlappingTemplate + 22 19 23 22 22 21 21 26 12 12 0.401199 199/200 NonOverlappingTemplate + 22 15 25 16 21 27 14 22 21 17 0.484646 199/200 NonOverlappingTemplate + 18 25 20 23 30 17 13 22 18 14 0.213309 200/200 NonOverlappingTemplate + 20 23 21 21 23 29 16 13 16 18 0.410055 199/200 NonOverlappingTemplate + 21 19 16 22 31 18 20 17 18 18 0.514124 198/200 NonOverlappingTemplate + 26 22 12 14 23 17 21 24 21 20 0.455937 197/200 NonOverlappingTemplate + 21 17 18 17 14 32 21 26 18 16 0.162606 197/200 NonOverlappingTemplate + 22 24 22 23 11 15 17 18 29 19 0.230755 198/200 NonOverlappingTemplate + 19 27 20 19 23 15 24 15 21 17 0.657933 198/200 NonOverlappingTemplate + 20 25 16 10 24 13 23 21 21 27 0.149495 200/200 NonOverlappingTemplate + 19 21 21 27 17 17 19 21 21 17 0.904708 200/200 NonOverlappingTemplate + 18 23 15 19 24 21 23 21 13 23 0.719747 198/200 NonOverlappingTemplate + 26 16 28 19 19 18 17 17 16 24 0.474986 199/200 NonOverlappingTemplate + 24 32 17 18 20 13 18 18 19 21 0.236810 195/200 NonOverlappingTemplate + 26 25 18 17 12 19 20 23 21 19 0.585209 196/200 NonOverlappingTemplate + 18 26 25 12 18 16 24 19 18 24 0.410055 199/200 NonOverlappingTemplate + 27 21 22 27 21 14 18 14 23 13 0.219006 197/200 NonOverlappingTemplate + 18 23 24 16 19 21 16 26 20 17 0.798139 199/200 NonOverlappingTemplate + 19 30 15 27 14 19 24 11 22 19 0.073417 198/200 NonOverlappingTemplate + 20 23 22 20 22 15 22 21 18 17 0.964295 198/200 NonOverlappingTemplate + 22 31 16 26 13 19 17 22 24 10 0.037566 197/200 NonOverlappingTemplate + 18 24 22 14 23 19 16 18 19 27 0.637119 197/200 NonOverlappingTemplate + 19 20 21 22 21 18 19 22 20 18 0.999438 198/200 NonOverlappingTemplate + 27 15 21 18 28 18 15 23 18 17 0.375313 195/200 NonOverlappingTemplate + 26 23 20 20 23 19 20 23 14 12 0.514124 199/200 NonOverlappingTemplate + 18 19 11 15 21 24 20 26 23 23 0.428095 198/200 NonOverlappingTemplate + 19 16 21 25 19 21 15 24 24 16 0.749884 197/200 NonOverlappingTemplate + 17 26 23 18 20 26 23 14 18 15 0.494392 198/200 NonOverlappingTemplate + 15 17 19 24 21 23 17 25 23 16 0.739918 196/200 NonOverlappingTemplate + 26 19 20 20 24 22 22 13 14 20 0.605916 198/200 OverlappingTemplate + 29 24 17 21 18 13 18 21 17 22 0.446556 196/200 Universal + 22 18 22 20 20 21 22 21 18 16 0.992952 198/200 ApproximateEntropy + 14 8 13 9 11 13 13 8 7 10 0.719747 106/106 RandomExcursions + 13 18 9 7 12 12 9 6 12 8 0.236810 104/106 RandomExcursions + 11 15 10 7 11 14 9 6 12 11 0.595549 106/106 RandomExcursions + 15 7 12 12 9 11 16 8 10 6 0.350485 106/106 RandomExcursions + 10 10 12 16 10 12 10 7 13 6 0.554420 106/106 RandomExcursions + 8 7 12 10 11 16 11 13 10 8 0.657933 106/106 RandomExcursions + 9 6 12 12 14 9 11 13 10 10 0.816537 104/106 RandomExcursions + 10 10 7 12 11 9 10 13 14 10 0.911413 105/106 RandomExcursions + 8 8 12 9 10 5 13 12 17 12 0.319084 104/106 RandomExcursionsVariant + 5 11 10 11 7 11 10 15 11 15 0.455937 104/106 RandomExcursionsVariant + 6 12 11 8 12 12 12 13 13 7 0.699313 104/106 RandomExcursionsVariant + 14 10 11 6 12 9 8 12 11 13 0.779188 104/106 RandomExcursionsVariant + 12 12 10 7 17 6 6 12 13 11 0.262249 103/106 RandomExcursionsVariant + 13 8 14 13 7 6 6 13 15 11 0.249284 102/106 RandomExcursionsVariant + 12 12 12 13 7 9 6 13 12 10 0.739918 105/106 RandomExcursionsVariant + 13 15 12 8 9 10 6 9 14 10 0.574903 106/106 RandomExcursionsVariant + 10 15 9 12 14 10 8 11 7 10 0.739918 105/106 RandomExcursionsVariant + 13 12 8 11 12 11 9 10 11 9 0.978072 103/106 RandomExcursionsVariant + 10 13 12 12 8 13 8 9 14 7 0.739918 104/106 RandomExcursionsVariant + 12 10 10 14 7 8 7 13 14 11 0.657933 106/106 RandomExcursionsVariant + 10 13 10 10 13 10 12 6 10 12 0.897763 106/106 RandomExcursionsVariant + 9 12 15 8 13 8 12 8 11 10 0.779188 106/106 RandomExcursionsVariant + 9 13 15 10 10 10 8 14 6 11 0.616305 106/106 RandomExcursionsVariant + 7 17 9 12 9 11 10 16 4 11 0.129620 106/106 RandomExcursionsVariant + 10 9 10 15 7 12 7 8 12 16 0.419021 106/106 RandomExcursionsVariant + 9 12 11 8 8 9 15 12 9 13 0.798139 106/106 RandomExcursionsVariant + 17 34 11 22 22 17 19 20 13 25 0.026057 199/200 Serial + 22 20 16 22 20 18 20 18 23 21 0.989786 199/200 Serial + 12 33 25 29 21 11 21 15 14 19 0.003996 199/200 LinearComplexity + + +- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +The minimum pass rate for each statistical test with the exception of the +random excursion (variant) test is approximately = 193 for a +sample size = 200 binary sequences. + +The minimum pass rate for the random excursion (variant) test +is approximately = 101 for a sample size = 106 binary sequences. + +For further guidelines construct a probability table using the MAPLE program +provided in the addendum section of the documentation. +- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +$ diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/permute.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/permute.go new file mode 100644 index 00000000..bf828b69 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/permute.go @@ -0,0 +1,39 @@ +// Copyright (c) 2014 The mathutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mathutil + +import ( + "sort" +) + +// Generate the first permutation of data. +func PermutationFirst(data sort.Interface) { + sort.Sort(data) +} + +// Generate the next permutation of data if possible and return true. +// Return false if there is no more permutation left. +// Based on the algorithm described here: +// http://en.wikipedia.org/wiki/Permutation#Generation_in_lexicographic_order +func PermutationNext(data sort.Interface) bool { + var k, l int + for k = data.Len() - 2; ; k-- { // 1. + if k < 0 { + return false + } + + if data.Less(k, k+1) { + break + } + } + for l = data.Len() - 1; !data.Less(k, l); l-- { // 2. + } + data.Swap(k, l) // 3. + for i, j := k+1, data.Len()-1; i < j; i++ { // 4. + data.Swap(i, j) + j-- + } + return true +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/primes.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/primes.go new file mode 100644 index 00000000..2c82eb03 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/primes.go @@ -0,0 +1,342 @@ +// Copyright (c) 2014 The mathutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mathutil + +import ( + "math" +) + +// IsPrimeUint16 returns true if n is prime. Typical run time is few ns. +func IsPrimeUint16(n uint16) bool { + return n > 0 && primes16[n-1] == 1 +} + +// NextPrimeUint16 returns first prime > n and true if successful or an +// undefined value and false if there is no next prime in the uint16 limits. +// Typical run time is few ns. +func NextPrimeUint16(n uint16) (p uint16, ok bool) { + return n + uint16(primes16[n]), n < 65521 +} + +// IsPrime returns true if n is prime. Typical run time is about 100 ns. +// +//TODO rename to IsPrimeUint32 +func IsPrime(n uint32) bool { + switch { + case n&1 == 0: + return n == 2 + case n%3 == 0: + return n == 3 + case n%5 == 0: + return n == 5 + case n%7 == 0: + return n == 7 + case n%11 == 0: + return n == 11 + case n%13 == 0: + return n == 13 + case n%17 == 0: + return n == 17 + case n%19 == 0: + return n == 19 + case n%23 == 0: + return n == 23 + case n%29 == 0: + return n == 29 + case n%31 == 0: + return n == 31 + case n%37 == 0: + return n == 37 + case n%41 == 0: + return n == 41 + case n%43 == 0: + return n == 43 + case n%47 == 0: + return n == 47 + case n%53 == 0: + return n == 53 // Benchmarked optimum + case n < 65536: + // use table data + return IsPrimeUint16(uint16(n)) + default: + mod := ModPowUint32(2, (n+1)/2, n) + if mod != 2 && mod != n-2 { + return false + } + blk := &lohi[n>>24] + lo, hi := blk.lo, blk.hi + for lo <= hi { + index := (lo + hi) >> 1 + liar := liars[index] + switch { + case n > liar: + lo = index + 1 + case n < liar: + hi = index - 1 + default: + return false + } + } + return true + } +} + +// IsPrimeUint64 returns true if n is prime. Typical run time is few tens of µs. +// +// SPRP bases: http://miller-rabin.appspot.com +func IsPrimeUint64(n uint64) bool { + switch { + case n%2 == 0: + return n == 2 + case n%3 == 0: + return n == 3 + case n%5 == 0: + return n == 5 + case n%7 == 0: + return n == 7 + case n%11 == 0: + return n == 11 + case n%13 == 0: + return n == 13 + case n%17 == 0: + return n == 17 + case n%19 == 0: + return n == 19 + case n%23 == 0: + return n == 23 + case n%29 == 0: + return n == 29 + case n%31 == 0: + return n == 31 + case n%37 == 0: + return n == 37 + case n%41 == 0: + return n == 41 + case n%43 == 0: + return n == 43 + case n%47 == 0: + return n == 47 + case n%53 == 0: + return n == 53 + case n%59 == 0: + return n == 59 + case n%61 == 0: + return n == 61 + case n%67 == 0: + return n == 67 + case n%71 == 0: + return n == 71 + case n%73 == 0: + return n == 73 + case n%79 == 0: + return n == 79 + case n%83 == 0: + return n == 83 + case n%89 == 0: + return n == 89 // Benchmarked optimum + case n <= math.MaxUint16: + return IsPrimeUint16(uint16(n)) + case n <= math.MaxUint32: + return ProbablyPrimeUint32(uint32(n), 11000544) && + ProbablyPrimeUint32(uint32(n), 31481107) + case n < 105936894253: + return ProbablyPrimeUint64_32(n, 2) && + ProbablyPrimeUint64_32(n, 1005905886) && + ProbablyPrimeUint64_32(n, 1340600841) + case n < 31858317218647: + return ProbablyPrimeUint64_32(n, 2) && + ProbablyPrimeUint64_32(n, 642735) && + ProbablyPrimeUint64_32(n, 553174392) && + ProbablyPrimeUint64_32(n, 3046413974) + case n < 3071837692357849: + return ProbablyPrimeUint64_32(n, 2) && + ProbablyPrimeUint64_32(n, 75088) && + ProbablyPrimeUint64_32(n, 642735) && + ProbablyPrimeUint64_32(n, 203659041) && + ProbablyPrimeUint64_32(n, 3613982119) + default: + return ProbablyPrimeUint64_32(n, 2) && + ProbablyPrimeUint64_32(n, 325) && + ProbablyPrimeUint64_32(n, 9375) && + ProbablyPrimeUint64_32(n, 28178) && + ProbablyPrimeUint64_32(n, 450775) && + ProbablyPrimeUint64_32(n, 9780504) && + ProbablyPrimeUint64_32(n, 1795265022) + } +} + +// NextPrime returns first prime > n and true if successful or an undefined value and false if there +// is no next prime in the uint32 limits. Typical run time is about 2 µs. +// +//TODO rename to NextPrimeUint32 +func NextPrime(n uint32) (p uint32, ok bool) { + switch { + case n < 65521: + p16, _ := NextPrimeUint16(uint16(n)) + return uint32(p16), true + case n >= math.MaxUint32-4: + return + } + + n++ + var d0, d uint32 + switch mod := n % 6; mod { + case 0: + d0, d = 1, 4 + case 1: + d = 4 + case 2, 3, 4: + d0, d = 5-mod, 2 + case 5: + d = 2 + } + + p = n + d0 + if p < n { // overflow + return + } + + for { + if IsPrime(p) { + return p, true + } + + p0 := p + p += d + if p < p0 { // overflow + break + } + + d ^= 6 + } + return +} + +// NextPrimeUint64 returns first prime > n and true if successful or an undefined value and false if there +// is no next prime in the uint64 limits. Typical run time is in hundreds of µs. +func NextPrimeUint64(n uint64) (p uint64, ok bool) { + switch { + case n < 65521: + p16, _ := NextPrimeUint16(uint16(n)) + return uint64(p16), true + case n >= 18446744073709551557: // last uint64 prime + return + } + + n++ + var d0, d uint64 + switch mod := n % 6; mod { + case 0: + d0, d = 1, 4 + case 1: + d = 4 + case 2, 3, 4: + d0, d = 5-mod, 2 + case 5: + d = 2 + } + + p = n + d0 + if p < n { // overflow + return + } + + for { + if ok = IsPrimeUint64(p); ok { + break + } + + p0 := p + p += d + if p < p0 { // overflow + break + } + + d ^= 6 + } + return +} + +// FactorTerm is one term of an integer factorization. +type FactorTerm struct { + Prime uint32 // The divisor + Power uint32 // Term == Prime^Power +} + +// FactorTerms represent a factorization of an integer +type FactorTerms []FactorTerm + +// FactorInt returns prime factorization of n > 1 or nil otherwise. +// Resulting factors are ordered by Prime. Typical run time is few µs. +func FactorInt(n uint32) (f FactorTerms) { + switch { + case n < 2: + return + case IsPrime(n): + return []FactorTerm{{n, 1}} + } + + f, w := make([]FactorTerm, 9), 0 + prime16 := uint16(0) + for { + var ok bool + if prime16, ok = NextPrimeUint16(prime16); !ok { + break + } + + prime := uint32(prime16) + if prime*prime > n { + break + } + + power := uint32(0) + for n%prime == 0 { + n /= prime + power++ + } + if power != 0 { + f[w] = FactorTerm{prime, power} + w++ + } + if n == 1 { + break + } + } + if n != 1 { + f[w] = FactorTerm{n, 1} + w++ + } + return f[:w] +} + +// PrimorialProductsUint32 returns a slice of numbers in [lo, hi] which are a +// product of max 'max' primorials. The slice is not sorted. +// +// See also: http://en.wikipedia.org/wiki/Primorial +func PrimorialProductsUint32(lo, hi, max uint32) (r []uint32) { + lo64, hi64 := int64(lo), int64(hi) + if max > 31 { // N/A + max = 31 + } + + var f func(int64, int64, uint32) + f = func(n, p int64, emax uint32) { + e := uint32(1) + for n <= hi64 && e <= emax { + n *= p + if n >= lo64 && n <= hi64 { + r = append(r, uint32(n)) + } + if n < hi64 { + p, _ := NextPrime(uint32(p)) + f(n, int64(p), e) + } + e++ + } + } + + f(1, 2, max) + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/rat.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/rat.go new file mode 100644 index 00000000..91b1c6fb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/rat.go @@ -0,0 +1,27 @@ +// Copyright (c) 2014 The mathutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mathutil + +// QCmpUint32 compares a/b and c/d and returns: +// +// -1 if a/b < c/d +// 0 if a/b == c/d +// +1 if a/b > c/d +// +func QCmpUint32(a, b, c, d uint32) int { + switch x, y := uint64(a)*uint64(d), uint64(b)*uint64(c); { + case x < y: + return -1 + case x == y: + return 0 + default: // x > y + return 1 + } +} + +// QScaleUint32 returns a such that a/b >= c/d. +func QScaleUint32(b, c, d uint32) (a uint64) { + return 1 + (uint64(b)*uint64(c))/uint64(d) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/rnd.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/rnd.go new file mode 100644 index 00000000..dc2058b6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/rnd.go @@ -0,0 +1,383 @@ +// Copyright (c) 2014 The mathutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mathutil + +import ( + "fmt" + "math" + "math/big" +) + +// FC32 is a full cycle PRNG covering the 32 bit signed integer range. +// In contrast to full cycle generators shown at e.g. http://en.wikipedia.org/wiki/Full_cycle, +// this code doesn't produce values at constant delta (mod cycle length). +// The 32 bit limit is per this implementation, the algorithm used has no intrinsic limit on the cycle size. +// Properties include: +// - Adjustable limits on creation (hi, lo). +// - Positionable/randomly accessible (Pos, Seek). +// - Repeatable (deterministic). +// - Can run forward or backward (Next, Prev). +// - For a billion numbers cycle the Next/Prev PRN can be produced in cca 100-150ns. +// That's like 5-10 times slower compared to PRNs generated using the (non FC) rand package. +type FC32 struct { + cycle int64 // On average: 3 * delta / 2, (HQ: 2 * delta) + delta int64 // hi - lo + factors [][]int64 // This trades some space for hopefully a bit of speed (multiple adding vs multiplying). + lo int + mods []int // pos % set + pos int64 // Within cycle. + primes []int64 // Ordered. ∏ primes == cycle. + set []int64 // Reordered primes (magnitude order bases) according to seed. +} + +// NewFC32 returns a newly created FC32 adjusted for the closed interval [lo, hi] or an Error if any. +// If hq == true then trade some generation time for improved (pseudo)randomness. +func NewFC32(lo, hi int, hq bool) (r *FC32, err error) { + if lo > hi { + return nil, fmt.Errorf("invalid range %d > %d", lo, hi) + } + + delta := int64(hi) - int64(lo) + if delta > math.MaxUint32 { + return nil, fmt.Errorf("range out of int32 limits %d, %d", lo, hi) + } + + // Find the primorial covering whole delta + n, set, p := int64(1), []int64{}, uint32(2) + if hq { + p++ + } + for { + set = append(set, int64(p)) + n *= int64(p) + if n > delta { + break + } + p, _ = NextPrime(p) + } + + // Adjust the set so n ∊ [delta, 2 * delta] (HQ: [delta, 3 * delta]) + // while keeping the cardinality of the set (correlates with the statistic "randomness quality") + // at max, i.e. discard atmost one member. + i := -1 // no candidate prime + if n > 2*(delta+1) { + for j, p := range set { + q := n / p + if q < delta+1 { + break + } + + i = j // mark the highest candidate prime set index + } + } + if i >= 0 { // shrink the inner cycle + n = n / set[i] + set = delete(set, i) + } + r = &FC32{ + cycle: n, + delta: delta, + factors: make([][]int64, len(set)), + lo: lo, + mods: make([]int, len(set)), + primes: set, + } + r.Seed(1) // the default seed should be always non zero + return +} + +// Cycle reports the length of the inner FCPRNG cycle. +// Cycle is atmost the double (HQ: triple) of the generator period (hi - lo + 1). +func (r *FC32) Cycle() int64 { + return r.cycle +} + +// Next returns the first PRN after Pos. +func (r *FC32) Next() int { + return r.step(1) +} + +// Pos reports the current position within the inner cycle. +func (r *FC32) Pos() int64 { + return r.pos +} + +// Prev return the first PRN before Pos. +func (r *FC32) Prev() int { + return r.step(-1) +} + +// Seed uses the provided seed value to initialize the generator to a deterministic state. +// A zero seed produces a "canonical" generator with worse randomness than for most non zero seeds. +// Still, the FC property holds for any seed value. +func (r *FC32) Seed(seed int64) { + u := uint64(seed) + r.set = mix(r.primes, &u) + n := int64(1) + for i, p := range r.set { + k := make([]int64, p) + v := int64(0) + for j := range k { + k[j] = v + v += n + } + n *= p + r.factors[i] = mix(k, &u) + } +} + +// Seek sets Pos to |pos| % Cycle. +func (r *FC32) Seek(pos int64) { //vet:ignore + if pos < 0 { + pos = -pos + } + pos %= r.cycle + r.pos = pos + for i, p := range r.set { + r.mods[i] = int(pos % p) + } +} + +func (r *FC32) step(dir int) int { + for { // avg loops per step: 3/2 (HQ: 2) + y := int64(0) + pos := r.pos + pos += int64(dir) + switch { + case pos < 0: + pos = r.cycle - 1 + case pos >= r.cycle: + pos = 0 + } + r.pos = pos + for i, mod := range r.mods { + mod += dir + p := int(r.set[i]) + switch { + case mod < 0: + mod = p - 1 + case mod >= p: + mod = 0 + } + r.mods[i] = mod + y += r.factors[i][mod] + } + if y <= r.delta { + return int(y) + r.lo + } + } +} + +func delete(set []int64, i int) (y []int64) { + for j, v := range set { + if j != i { + y = append(y, v) + } + } + return +} + +func mix(set []int64, seed *uint64) (y []int64) { + for len(set) != 0 { + *seed = rol(*seed) + i := int(*seed % uint64(len(set))) + y = append(y, set[i]) + set = delete(set, i) + } + return +} + +func rol(u uint64) (y uint64) { + y = u << 1 + if int64(u) < 0 { + y |= 1 + } + return +} + +// FCBig is a full cycle PRNG covering ranges outside of the int32 limits. +// For more info see the FC32 docs. +// Next/Prev PRN on a 1e15 cycle can be produced in about 2 µsec. +type FCBig struct { + cycle *big.Int // On average: 3 * delta / 2, (HQ: 2 * delta) + delta *big.Int // hi - lo + factors [][]*big.Int // This trades some space for hopefully a bit of speed (multiple adding vs multiplying). + lo *big.Int + mods []int // pos % set + pos *big.Int // Within cycle. + primes []int64 // Ordered. ∏ primes == cycle. + set []int64 // Reordered primes (magnitude order bases) according to seed. +} + +// NewFCBig returns a newly created FCBig adjusted for the closed interval [lo, hi] or an Error if any. +// If hq == true then trade some generation time for improved (pseudo)randomness. +func NewFCBig(lo, hi *big.Int, hq bool) (r *FCBig, err error) { + if lo.Cmp(hi) > 0 { + return nil, fmt.Errorf("invalid range %d > %d", lo, hi) + } + + delta := big.NewInt(0) + delta.Add(delta, hi).Sub(delta, lo) + + // Find the primorial covering whole delta + n, set, pp, p := big.NewInt(1), []int64{}, big.NewInt(0), uint32(2) + if hq { + p++ + } + for { + set = append(set, int64(p)) + pp.SetInt64(int64(p)) + n.Mul(n, pp) + if n.Cmp(delta) > 0 { + break + } + p, _ = NextPrime(p) + } + + // Adjust the set so n ∊ [delta, 2 * delta] (HQ: [delta, 3 * delta]) + // while keeping the cardinality of the set (correlates with the statistic "randomness quality") + // at max, i.e. discard atmost one member. + dd1 := big.NewInt(1) + dd1.Add(dd1, delta) + dd2 := big.NewInt(0) + dd2.Lsh(dd1, 1) + i := -1 // no candidate prime + if n.Cmp(dd2) > 0 { + q := big.NewInt(0) + for j, p := range set { + pp.SetInt64(p) + q.Set(n) + q.Div(q, pp) + if q.Cmp(dd1) < 0 { + break + } + + i = j // mark the highest candidate prime set index + } + } + if i >= 0 { // shrink the inner cycle + pp.SetInt64(set[i]) + n.Div(n, pp) + set = delete(set, i) + } + r = &FCBig{ + cycle: n, + delta: delta, + factors: make([][]*big.Int, len(set)), + lo: lo, + mods: make([]int, len(set)), + pos: big.NewInt(0), + primes: set, + } + r.Seed(1) // the default seed should be always non zero + return +} + +// Cycle reports the length of the inner FCPRNG cycle. +// Cycle is atmost the double (HQ: triple) of the generator period (hi - lo + 1). +func (r *FCBig) Cycle() *big.Int { + return r.cycle +} + +// Next returns the first PRN after Pos. +func (r *FCBig) Next() *big.Int { + return r.step(1) +} + +// Pos reports the current position within the inner cycle. +func (r *FCBig) Pos() *big.Int { + return r.pos +} + +// Prev return the first PRN before Pos. +func (r *FCBig) Prev() *big.Int { + return r.step(-1) +} + +// Seed uses the provided seed value to initialize the generator to a deterministic state. +// A zero seed produces a "canonical" generator with worse randomness than for most non zero seeds. +// Still, the FC property holds for any seed value. +func (r *FCBig) Seed(seed int64) { + u := uint64(seed) + r.set = mix(r.primes, &u) + n := big.NewInt(1) + v := big.NewInt(0) + pp := big.NewInt(0) + for i, p := range r.set { + k := make([]*big.Int, p) + v.SetInt64(0) + for j := range k { + k[j] = big.NewInt(0) + k[j].Set(v) + v.Add(v, n) + } + pp.SetInt64(p) + n.Mul(n, pp) + r.factors[i] = mixBig(k, &u) + } +} + +// Seek sets Pos to |pos| % Cycle. +func (r *FCBig) Seek(pos *big.Int) { + r.pos.Set(pos) + r.pos.Abs(r.pos) + r.pos.Mod(r.pos, r.cycle) + mod := big.NewInt(0) + pp := big.NewInt(0) + for i, p := range r.set { + pp.SetInt64(p) + r.mods[i] = int(mod.Mod(r.pos, pp).Int64()) + } +} + +func (r *FCBig) step(dir int) (y *big.Int) { + y = big.NewInt(0) + d := big.NewInt(int64(dir)) + for { // avg loops per step: 3/2 (HQ: 2) + r.pos.Add(r.pos, d) + switch { + case r.pos.Sign() < 0: + r.pos.Add(r.pos, r.cycle) + case r.pos.Cmp(r.cycle) >= 0: + r.pos.SetInt64(0) + } + for i, mod := range r.mods { + mod += dir + p := int(r.set[i]) + switch { + case mod < 0: + mod = p - 1 + case mod >= p: + mod = 0 + } + r.mods[i] = mod + y.Add(y, r.factors[i][mod]) + } + if y.Cmp(r.delta) <= 0 { + y.Add(y, r.lo) + return + } + y.SetInt64(0) + } +} + +func deleteBig(set []*big.Int, i int) (y []*big.Int) { + for j, v := range set { + if j != i { + y = append(y, v) + } + } + return +} + +func mixBig(set []*big.Int, seed *uint64) (y []*big.Int) { + for len(set) != 0 { + *seed = rol(*seed) + i := int(*seed % uint64(len(set))) + y = append(y, set[i]) + set = deleteBig(set, i) + } + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/tables.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/tables.go new file mode 100644 index 00000000..f32952c0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/tables.go @@ -0,0 +1,6995 @@ +// Copyright (c) 2014 The mathutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// "Static" data + +package mathutil + +var ( + // Set bits count in a byte + popcnt = [256]byte{ + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, // 0 + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, // 1 + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, // 2 + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, // 3 + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, // 4 + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, // 5 + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, // 6 + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, // 7 + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, // 8 + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, // 9 + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, // 10 + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, // 11 + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, // 12 + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, // 13 + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, // 14 + 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8, // 15 + } + + // Highest set bit index in a byte + log2 = [256]int{ + -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 0 + + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, // 1 + + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, // 2 + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, // 3 + + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, // 4 + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, // 5 + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, // 6 + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, // 7 + + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // 8 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // 9 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // 10 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // 11 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // 12 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // 13 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // 14 + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // 15 + } + + // "Predivisors": 2-53 + liars = [3660]uint32{ + 31621, 42799, 49141, 49981, 65077, 65281, 80581, 83333, 88357, 90751, + 104653, 130561, 164737, 188057, 194221, 196093, 215749, 219781, 220729, 253241, + 256999, 271951, 280601, 282133, 357761, 390937, 458989, 486737, 489997, 514447, + 580337, 587861, 611701, 647089, 653333, 657901, 665281, 665333, 688213, 710533, + 721801, 722261, 738541, 741751, 742813, 745889, 769757, 818201, 838861, 873181, + 877099, 916327, 976873, 983401, 1016801, 1018921, 1053761, 1064053, 1073021, 1082401, + 1109461, 1132657, 1145257, 1168513, 1194649, 1207361, 1251949, 1252697, 1302451, 1325843, + 1357441, 1373653, 1397419, 1441091, 1493857, 1507963, 1509709, 1530787, 1584133, 1678541, + 1690501, 1730977, 1735841, 1811573, 1876393, 1969417, 1987021, 2004403, 2081713, 2163001, + 2181961, 2205967, 2261953, 2264369, 2269093, 2284453, 2304167, 2387797, 2487941, 2510569, + 2670361, 2746477, 2748023, 2757241, 2811271, 2909197, 2944261, 2976487, 3048841, 3090091, + 3116107, 3125281, 3225601, 3363121, 3375041, 3400013, 3413533, 3429037, 3539101, 3542533, + 3567481, 3568661, 3605429, 3656449, 3763801, 3828001, 3898129, 3911197, 3985921, 4072729, + 4181921, 4188889, 4209661, 4360621, 4469471, 4480477, 4513841, 4835209, 4863127, 4869313, + 4877641, 4922413, 5016191, 5044033, 5095177, 5173169, 5173601, 5176153, 5256091, 5271841, + 5284333, 5351537, 5489641, 5590621, 5672041, 5919187, 6027193, 6118141, 6140161, 6159301, + 6189121, 6226193, 6233977, 6236257, 6278533, 6334351, 6368689, 6386993, 6631549, 6658669, + 6779137, 6787327, 6836233, 6952037, 6955541, 6998881, 7017193, 7232321, 7306261, 7306561, + 7429117, 7462001, 7674967, 7725901, 7759937, 7820201, 7883731, 8036033, 8095447, 8239477, + 8384513, 8534233, 8725753, 8727391, 8902741, 9006401, 9056501, 9073513, 9131401, 9345541, + 9371251, 9439201, 9480461, 9533701, 9564169, 9567673, 9588151, 9591661, 9729301, 9774181, + 9863461, 10024561, 10084177, 10323769, 10331141, 10386241, 10425511, 10610063, 10700761, 10712857, + 10763653, 10974881, 11081459, 11115037, 11335501, 11541307, 11585293, 11592397, 11777599, 12032021, + 12096613, 12263131, 12322133, 12327121, 12599233, 12854437, 13057787, 13338371, 13446253, 13500313, + 13635289, 13694761, 13747361, 13773061, 14026897, 14154337, 14179537, 14324473, 14469841, 14671801, + 14676481, 14709241, 14794081, 14796289, 14865121, 15101893, 15139199, 15162941, 15188557, 15220951, + 15247621, 15479777, 15525241, 15603391, 15621409, 15700301, 15802681, 15976747, 15978007, 16070429, + 16132321, 16149169, 16153633, 16324001, 16349477, 16360381, 16705021, 16773121, 16822081, 16843009, + 16853077, 16879501, 16973393, 17098369, 17116837, 17134043, 17208601, 17236801, 17327773, 17375249, + 17405537, 17585969, 17870561, 18067501, 18073817, 18366937, 18443701, 18454921, 18535177, 18653353, + 18740971, 19328653, 19384289, 19404139, 19471033, 19607561, 20261251, 20417311, 20647621, 20968501, + 21042001, 21303343, 21306157, 21359521, 21397381, 21400481, 21623659, 21654533, 22075579, 22087477, + 22369621, 22591301, 22669501, 22711873, 22849481, 22953673, 23247901, 23382529, 23464033, 23577497, + 23634181, 23734901, 23828017, 23872213, 23963869, 24214051, 24356377, 25080101, 25150501, 25276421, + 25326001, 25457833, 25629913, 25696133, 25768261, 25909453, 26280073, 26377921, 26821601, 26840269, + 26877421, 26886817, 27108397, 27118601, 27219697, 27271151, 27279409, 27331921, 27380831, 27392041, + 27409541, 27491237, 27509653, 27664033, 27798461, 27808463, 28325881, 28527049, 28572961, 29111881, + 29214541, 29581501, 30022129, 30090817, 30185569, 30219757, 30295141, 30338593, 30388753, 30418957, + 30576151, 30662497, 30740417, 30881551, 30894307, 31040833, 31166803, 31436123, 31735621, 31759121, + 32091781, 32095057, 32168117, 32285041, 32497921, 32676481, 33146717, 33298337, 33600533, 33627301, + 33704101, 33872593, 34003061, 34043101, 34124641, 34540801, 34856167, 34944001, 35576599, 35703361, + 35820937, 35851037, 36291193, 36307981, 36861901, 36919681, 36974341, 37109467, 37376509, 37439201, + 37964809, 37988497, 38010307, 38046817, 38118763, 38210323, 39465091, 39512773, 39655153, 39684157, + 40165093, 40238797, 40315441, 40361197, 40629601, 40782589, 40827473, 40987201, 41121433, 41568101, + 41604109, 41642681, 41662297, 41840809, 42009217, 42485119, 42623017, 42984589, 43224397, 43363601, + 43661257, 44070841, 44314129, 44465221, 44482901, 45100177, 45175201, 45219329, 45414433, 45819541, + 45879941, 46094401, 46325029, 46386589, 46469809, 46517857, 46679761, 46860001, 47220367, 47903701, + 47918581, 48064021, 48191653, 48269761, 48316969, 48400753, 48448661, 48551161, 48563089, 49075417, + 49303801, 49411801, 49459801, 50155733, 50201089, 50443201, 50523661, 51030601, 51129781, 51302353, + 51500521, 52072021, 52119289, 52204237, 53283169, 53399449, 53656021, 53675623, 53695721, 53711113, + 54029741, 54449431, 55109401, 55176097, 55318957, 55729957, 56052361, 56420033, 56479897, 56810137, + 57762433, 58003213, 58422409, 58449847, 58509977, 58679941, 58755877, 59631211, 59840537, 59913157, + 59953741, 60155201, 60352921, 60547831, 60566431, 60581401, 60696661, 60738257, 60957361, 61201009, + 61219789, 61377109, 61832377, 62756641, 63001801, 63002501, 63065281, 63167743, 63318169, 63328469, + 63346999, 63388033, 64148717, 64605041, 64735897, 65144501, 65254393, 65301013, 65350801, 65359477, + 66096253, 67194401, 67642513, 67928221, 68102641, 68154001, 68165761, 68512867, 68621701, 68839597, + 69030901, 69128641, 69176647, 69228967, 69231061, 69485281, 69612061, 69885649, 70149631, 70463489, + 70593931, 70728121, 71079661, 71734417, 72498253, 72543547, 73562833, 73645001, 74411131, 74927161, + 75140137, 75565873, 76725091, 76745101, 77533123, 77648941, 77812153, 77817979, 78939089, 79398901, + 79411201, 79417801, 79464533, 79786523, 80142761, 80146909, 80375707, 80556337, 80687881, 80891009, + 81433591, 81954133, 82273201, 82506439, 82870517, 82929001, 83083001, 83103329, 83204801, 84164033, + 84350561, 84421081, 84487457, 84998503, 85328717, 85519337, 85823401, 86027329, 86438857, 86530621, + 86999837, 87499651, 87694261, 88256449, 88368853, 88661861, 89308771, 89784581, 90270613, 90278161, + 90341197, 90665789, 90698401, 91433281, 91659283, 92438581, 92625121, 93431521, 93541537, 93571633, + 93643201, 93677761, 93926197, 94316401, 94502701, 95451361, 95452781, 96135601, 96618397, 96791881, + 96888641, 96895441, 96904081, 96925921, 97255801, 97496449, 97796953, 97863529, 97924217, 99036001, + 99115297, 99486889, 99789673, 99898801, 100463443, 100618933, 100943201, 101152133, 101218921, 101270251, + 101276579, 101649241, 102004421, 102678031, 102690677, 102690901, 103301633, 104078857, 104524421, 104988673, + 105305443, 105919633, 106485121, 106622353, 106743073, 107360641, 107543333, 108596953, 109231229, 109437751, + 109541461, 109879837, 110135821, 110139499, 110312773, 110413333, 110717861, 111370141, 111654401, 112032001, + 112402981, 112828801, 113589601, 113605201, 113730481, 113892589, 114305441, 114329881, 114701341, 114842677, + 114910489, 115039081, 115174681, 115497901, 115804501, 115873801, 116090081, 116321617, 116617289, 116682721, + 116696161, 116998669, 117987841, 118466401, 118901521, 119092801, 119204809, 119261113, 119327041, 119558011, + 119743537, 119940853, 120296677, 120517021, 120838609, 121062001, 121374241, 121472359, 121609489, 122166307, + 122396737, 122941981, 123481777, 123671671, 123877081, 123987793, 124145473, 124630273, 124818601, 125284141, + 125686241, 125848577, 126132553, 127050067, 128079409, 128124151, 128396921, 128468957, 128665319, 128987429, + 129205781, 129256273, 129357061, 129461617, 129524669, 130556329, 130693393, 130944133, 131023201, 131567929, + 131938561, 132332201, 132338881, 132440521, 132575071, 133216381, 133302781, 133467517, 133800661, 134696801, + 134767153, 134868029, 135263269, 135296053, 135308881, 135945853, 135969401, 136043641, 136661201, 136722433, + 137415821, 137763037, 138030721, 138403981, 138828821, 139295701, 139487041, 140197051, 142525333, 142922413, + 143106133, 143168581, 145348529, 146156617, 146272901, 146659801, 146843929, 146884393, 147028001, 147287141, + 148109473, 148171769, 148910653, 149389633, 150379693, 150960239, 150988753, 151533377, 151589881, 152716537, + 152922001, 152991841, 153369061, 153589801, 153754873, 153928133, 154287451, 154513633, 154944533, 155203361, + 156114061, 156532799, 157069189, 157368661, 157405249, 157725829, 158068153, 158192317, 158397247, 158496911, + 158544401, 158895281, 160348189, 160378861, 160491329, 160587841, 160672201, 160730389, 161184013, 161216021, + 161289649, 161304001, 161423377, 162026869, 162067441, 162690481, 162771337, 162776041, 163442551, 163954561, + 164111281, 165061909, 165224321, 165938653, 166082309, 166339057, 166406561, 166827943, 167579497, 167582377, + 167692141, 167881121, 168566501, 169655641, 170640961, 170782921, 170856533, 171454321, 172116181, 172436713, + 172947529, 173401621, 174479729, 176030977, 176597821, 176609441, 176977921, 177167233, 177254533, 177693521, + 177927641, 177951973, 178837201, 178956971, 179083601, 179285137, 179820257, 180115489, 180497633, 180703451, + 181285001, 181285537, 181542601, 181647497, 182383111, 183677341, 184411567, 185653333, 186183469, 186393481, + 186983521, 187050529, 187667969, 187761241, 188516329, 188985961, 189714193, 189738361, 189941761, 190212181, + 190382161, 190913297, 191233813, 191648161, 191981609, 192346153, 192857761, 193330237, 193638337, 193949641, + 194556451, 196035001, 196049701, 196231393, 198982759, 199674721, 200143351, 200753281, 201261061, 202130197, + 202156813, 202538857, 203505697, 204280501, 204582457, 204766381, 205057561, 206304961, 206453509, 206504033, + 206529737, 207008569, 207030541, 207132481, 207477001, 207618781, 208051201, 208969223, 209246701, 209404369, + 209990881, 210592873, 210842113, 213035761, 214038533, 214110541, 214852609, 214858717, 215436241, 216821881, + 217123069, 217875571, 218603617, 218642029, 218947121, 219621781, 220531501, 220883521, 221368153, 221415781, + 221884001, 222010721, 222630193, 223449463, 223625851, 223782263, 224074369, 224136013, 224769241, 224957893, + 225853633, 226359547, 226450297, 227132641, 227444101, 227475481, 228652201, 228842209, 228988033, 229589413, + 230357761, 231383461, 231405701, 231927781, 232114433, 232460821, 232771501, 233110081, 234869009, 235426913, + 235928071, 237791143, 238001653, 238833421, 240068041, 240371713, 240694513, 240785047, 241505377, 242067841, + 242650717, 242860069, 243583201, 243955141, 244883981, 245006623, 245950561, 246099317, 246282511, 246434761, + 246658441, 247318957, 247321301, 247416101, 249582481, 250436033, 250958401, 250988173, 251528401, 251663837, + 251855893, 252853921, 253610281, 253893397, 255416897, 256831433, 257590661, 258020473, 258043229, 258234401, + 258944401, 259763093, 259765747, 260156101, 260518801, 260736341, 260963389, 261186001, 261703417, 262979501, + 263428181, 264269449, 264384469, 265020001, 265584133, 265735969, 265836161, 266790481, 266925601, 270525737, + 271272569, 271763467, 271826629, 271950829, 273361789, 273480637, 274701913, 274810241, 274919401, 275283401, + 275619961, 276018913, 276131137, 276542401, 276638321, 277787141, 278943061, 279377281, 280885153, 282253141, + 282471853, 282769771, 283900961, 284166877, 284301751, 284736091, 284834299, 285820501, 286316801, 287160301, + 287449091, 287715121, 288099001, 288117721, 288735277, 290643601, 290706781, 290953921, 291088513, 291461633, + 292153681, 292290181, 292433321, 292902481, 293346637, 293847721, 293938261, 295419097, 295743017, 297624961, + 297798961, 298212601, 299367877, 299736181, 301413001, 302635351, 304080001, 307629401, 307694323, 307972801, + 308483209, 309666361, 310474249, 310978027, 311177213, 311411629, 311655829, 311671361, 312408113, 312614021, + 314184487, 315034513, 315351521, 317137969, 317365933, 317641171, 317796119, 319053281, 319374577, 319440769, + 319726177, 320326003, 321324589, 321850849, 322469701, 322941881, 324477697, 325028089, 325352101, 325546873, + 326266051, 326405713, 326469137, 326628721, 326694301, 326695141, 327073601, 327093409, 327398009, 328302901, + 329153653, 329769721, 330198331, 330759617, 331658081, 331934989, 337135501, 337420679, 337665901, 337783981, + 338125537, 338458807, 338914369, 339195097, 339492169, 339794641, 341958121, 341994131, 343017529, 343052833, + 344201441, 344255551, 344776301, 346080391, 348989101, 349752913, 350031973, 350244577, 351058753, 351177769, + 352802803, 352932337, 353815801, 353932801, 354062809, 356604421, 356836819, 357348601, 357872971, 358416577, + 359394751, 359727073, 360145633, 360375181, 360787771, 361307521, 361312337, 362569201, 363170837, 363430637, + 364550761, 365077373, 365231401, 366487201, 366532321, 366652201, 367559501, 367632301, 368016949, 368476501, + 369667561, 371011801, 371611153, 372167101, 373012777, 373533617, 373669453, 373906513, 374346361, 374988661, + 376957153, 377192353, 377334497, 377458849, 377806687, 377869031, 378792649, 379732501, 380137633, 382304161, + 384100001, 385175113, 385319089, 387072661, 388695301, 390609941, 390612221, 391014937, 392679737, 393611653, + 394723177, 396864469, 399156661, 399302581, 399647221, 400385701, 400557109, 401100881, 403095967, 403293313, + 405739681, 405782623, 407737201, 407889161, 409302001, 409458241, 410613809, 410680357, 411618241, 411851389, + 412836689, 413138881, 413429801, 413778817, 414216461, 414368641, 415200361, 415204501, 415476343, 416964241, + 417767201, 417779909, 418044563, 418226581, 418616161, 418617281, 418667401, 419184481, 420607441, 421942951, + 422429041, 422928101, 423384001, 423465001, 424175761, 424411501, 424431541, 425967301, 426174101, 426219649, + 426770437, 426783811, 427294141, 428180191, 428758201, 429135841, 429509837, 430046857, 430381921, 430646401, + 430733701, 432227449, 434042801, 435016187, 435358657, 435993301, 436465501, 437247841, 437462101, 437597101, + 437866087, 439309261, 441354497, 441650591, 441758461, 442050577, 442181291, 442543553, 444660421, 445429693, + 446414621, 446619617, 449501761, 450807481, 450866021, 450872573, 452990401, 453366029, 453967739, 454745773, + 455198563, 457274161, 457320533, 459785089, 460251733, 460585861, 461151121, 461272267, 461329601, 462587329, + 462639409, 462701513, 464012033, 464955857, 465505633, 466290949, 466758181, 467100937, 468410113, 468950021, + 470120257, 470268137, 470644021, 471535373, 471664513, 472814413, 473581057, 474892741, 474970501, 474983881, + 475723849, 478614067, 479962009, 480668347, 481153501, 481239361, 482488393, 482824669, 482921297, 483006889, + 483029821, 483945601, 484200289, 486063001, 486902929, 487896601, 488104681, 488169289, 488585521, 488656981, + 489994201, 490950461, 491738801, 493108481, 494288677, 495909871, 496109729, 496560349, 497148599, 497285713, + 498662561, 498706651, 498905189, 500747293, 501172241, 501472333, 502686713, 504870241, 505473263, 505532773, + 505798213, 506349421, 507142567, 507323521, 508606771, 509302873, 509551201, 510925609, 511098521, 511215521, + 511611673, 512330281, 514738981, 516045197, 516259657, 516764063, 517662001, 518216201, 518548801, 521501473, + 522390109, 522758233, 523756711, 526067821, 526359289, 526686889, 528013333, 528043753, 528220117, 530630701, + 531095029, 531681281, 532126801, 532758241, 532800133, 533429881, 534782293, 535252867, 535428577, 535517581, + 536003333, 536114197, 536342419, 536870911, 540207097, 540621181, 540654409, 540680141, 542497201, 542536457, + 544861633, 545550433, 545622401, 546102481, 546117301, 546322201, 548080513, 548989561, 549308761, 550132741, + 550230409, 550635373, 550853137, 551313001, 552573793, 553027201, 554487121, 554599051, 554964001, 555321007, + 555465601, 556001377, 556069849, 556095433, 556114609, 557165209, 558235109, 558900821, 558977761, 561448487, + 562367821, 563298061, 563947141, 564298489, 564689381, 565664761, 565707061, 567358513, 567596401, 568902001, + 568967221, 569332177, 569495809, 570941881, 572123521, 572228929, 572430769, 572567353, 572936869, 573817861, + 573862021, 574998841, 575326033, 576724219, 577210181, 577352641, 577613261, 579606301, 579956653, 581618143, + 582389641, 582799951, 585261637, 586706821, 587343541, 588049001, 591242653, 591822001, 592467451, 592468777, + 593682169, 593728489, 595405201, 595590841, 597537361, 597717121, 599135767, 599945293, 600893921, 601606487, + 602379181, 604584221, 605454917, 605961049, 606872449, 607148653, 607750681, 608421637, 608917753, 609361567, + 609813781, 611097401, 611374453, 611770513, 611812321, 611817421, 612006253, 613849601, 614742241, 615361183, + 615760133, 615895897, 616280897, 617087701, 619239457, 619365121, 619480601, 620169409, 620544961, 620755537, + 621769669, 622137601, 623735953, 624303241, 624732421, 625060801, 625482001, 626717471, 627886657, 628868467, + 629134081, 630496621, 630622753, 630811513, 631767943, 631974613, 633289807, 635155291, 635291077, 635319361, + 636287653, 636337073, 636936697, 638502913, 638837761, 639305921, 639807781, 640650931, 640977373, 643036321, + 643316461, 643552909, 644004817, 644453633, 644457551, 644731357, 644900257, 645556481, 648056449, 648328801, + 651011329, 651064681, 651151801, 651514753, 652469641, 653235841, 653260633, 655264369, 657732349, 659526601, + 659846021, 660095641, 660754117, 661122881, 661207177, 662134201, 663760681, 665462081, 668498321, 670976641, + 670987021, 671716921, 672103001, 672108193, 673778827, 675260477, 676359391, 678481693, 680983817, 681019921, + 681124207, 681303241, 682528687, 683316001, 683362681, 684350833, 686059921, 687741401, 689537441, 690035713, + 690562601, 691131349, 692535637, 693456521, 694116893, 696042901, 696321949, 696998251, 697821857, 698192041, + 698819711, 702683101, 705303457, 705351583, 706728377, 707691601, 709409993, 710382401, 710617861, 710721001, + 714490481, 717096641, 717653129, 717831211, 720767521, 722955773, 724160251, 724969087, 725508241, 731276521, + 732805681, 734166217, 736668013, 739444021, 739576801, 740988151, 741182401, 741214237, 742017181, 742550401, + 744500641, 745493761, 745745461, 746331041, 747406801, 748638001, 749172821, 749640161, 750632137, 751226401, + 751705597, 752186593, 753233717, 753574537, 753594001, 754020361, 754874257, 756205633, 756271909, 756980137, + 758581651, 758687581, 758901701, 759252367, 759266621, 759638881, 762699649, 763907741, 764033999, 764240611, + 765378241, 766303693, 766823797, 770201221, 770909107, 770937931, 771043201, 771337891, 772495777, 773131927, + 773807401, 775368901, 775896181, 776443769, 777218989, 781471001, 782823281, 784450393, 784777393, 784783477, + 784966297, 787085857, 787209277, 788046901, 788931361, 789082001, 790453049, 791118043, 792144161, 792145729, + 794201333, 794399041, 794937601, 795064909, 796072003, 796200901, 796560703, 797418997, 797834017, 799162561, + 799630753, 799898833, 799916101, 801093011, 801227269, 801866647, 804978721, 805505957, 805771501, 807115753, + 807218413, 808214161, 809790881, 810023881, 810455101, 811110301, 811478533, 811607777, 811730923, 815430533, + 815796413, 816024161, 816215401, 816549121, 817832329, 818401321, 819466201, 819743233, 822018961, 822531841, + 824389441, 826004467, 829512001, 830664451, 831933901, 832048447, 832127489, 832169857, 833610751, 837766217, + 839268139, 839280691, 839908217, 840749761, 841217653, 841660961, 842785841, 842824981, 842960981, 843161887, + 844545271, 845376533, 846961321, 848090377, 848755969, 849548671, 852432769, 854094781, 854868257, 855734401, + 857100421, 857902861, 858687103, 859096477, 860334301, 862082677, 862678081, 863196181, 863609113, 863984881, + 865242841, 867022747, 867110501, 867638201, 868088341, 868111597, 868691401, 870985223, 871157233, 871195561, + 871908481, 876850801, 877542481, 878492941, 878940833, 879995689, 880870513, 880922657, 883276549, 884304037, + 884952001, 886180429, 887795221, 888868441, 892740853, 893692819, 894264337, 896901461, 897087361, 897283213, + 899019353, 900736411, 901848301, 902566501, 903108821, 903390643, 905040953, 907378669, 907670501, 907711561, + 908005249, 910202509, 910867481, 911484421, 914348737, 914906539, 920375821, 920696653, 921858631, 922845241, + 923437213, 926756881, 927106561, 927877001, 929159941, 930530701, 932148253, 933729421, 935794081, 936421141, + 937675393, 938376181, 939947009, 940123801, 941056273, 941734657, 943271569, 944832533, 946034057, 946787377, + 947878081, 949317217, 949697233, 952893881, 954924013, 957600541, 957631249, 958131157, 958735681, 960269377, + 960946321, 962442001, 962489557, 962523169, 964412837, 965501857, 967266451, 967287751, 967790401, 968283247, + 968413217, 968751241, 969528337, 970586713, 971975071, 974113601, 974471243, 974774401, 975576281, 976396961, + 977483449, 979363153, 980056507, 980725201, 981484561, 983456377, 984133441, 984252001, 985052881, 985075681, + 987842101, 994133479, 995586373, 995650921, 997836841, 998489017, 998590601, 998596741, 998724481, 999828727, + 1002261781, 1003062061, 1005402133, 1005833971, 1006800829, 1008777001, 1008839999, 1009025263, 1009140161, 1011319501, + 1011333061, 1011570457, 1011909271, 1012438391, 1013833153, 1015339441, 1015626151, 1017748057, 1020515761, 1021281301, + 1022336611, 1024041853, 1024123501, 1024605121, 1025035129, 1026738161, 1027744453, 1028494429, 1034252929, 1034958601, + 1040234231, 1049584313, 1050102901, 1050535501, 1054999441, 1055009117, 1056121453, 1057426651, 1063212481, 1065508321, + 1065602281, 1066972301, 1069388497, 1070639389, 1070941987, 1071512749, 1071643249, 1072898711, 1073159281, 1073288581, + 1073484823, 1075100041, 1077133397, 1078467589, 1081798061, 1082472553, 1084241341, 1084444481, 1090858081, 1093150081, + 1093352833, 1093526353, 1094042321, 1097416321, 1098743563, 1100624857, 1101623381, 1101673501, 1102573501, 1102750013, + 1104194521, 1105038871, 1106529761, 1106580817, 1106595493, 1107138961, 1108135381, 1109304913, 1110582947, 1111205873, + 1111939201, 1112671603, 1114277221, 1116379301, 1117202557, 1117785881, 1117828001, 1117890019, 1119412321, 1120076281, + 1120981021, 1121176981, 1123406047, 1123625501, 1123727617, 1124396521, 1125038377, 1127040769, 1130933429, 1134367777, + 1138289041, 1138607233, 1139137057, 1140573601, 1142466151, 1147434289, 1148578201, 1150229761, 1151670001, 1153164097, + 1153440289, 1154343961, 1154691409, 1154987209, 1155939709, 1156761911, 1156993373, 1157839381, 1159421509, 1160844821, + 1163098249, 1163227759, 1164218641, 1165717129, 1166475601, 1166598217, 1168221121, 1168256953, 1168492417, 1173229201, + 1173545533, 1174300093, 1180970407, 1181566219, 1183338241, 1184554801, 1186325981, 1187235193, 1191153937, 1191216133, + 1192314817, 1192412033, 1192903531, 1193229577, 1193557093, 1195524181, 1196852273, 1198650961, 1198880261, 1200456577, + 1200778753, 1202142061, 1204205449, 1205606533, 1205772499, 1209998077, 1210393801, 1210562701, 1210653541, 1213619761, + 1217181061, 1217823517, 1217924159, 1219816261, 1219858921, 1220114377, 1221127013, 1222861271, 1223531677, 1223941657, + 1225128829, 1226230297, 1226855293, 1227220801, 1229491063, 1229751667, 1230446653, 1231362793, 1232445677, 1234125721, + 1234646533, 1235188597, 1235864033, 1236313501, 1236442421, 1238825569, 1242171349, 1242858317, 1249166881, 1249785941, + 1250656621, 1252236421, 1254277909, 1255665613, 1257102001, 1258903981, 1260332137, 1263293281, 1264145401, 1265477791, + 1266003461, 1266273793, 1266425101, 1267345081, 1269295201, 1269835201, 1270193401, 1270489621, 1270667353, 1272558739, + 1272866167, 1282447477, 1282568741, 1285636801, 1286298133, 1286298263, 1296613501, 1297443913, 1299072721, 1299784141, + 1299963601, 1301509249, 1301926081, 1302745481, 1306836001, 1307004641, 1307520469, 1307823661, 1308758533, 1308998741, + 1309723213, 1309983901, 1310329567, 1311255661, 1311616153, 1312332001, 1312573123, 1313396221, 1315858381, 1316169541, + 1318126321, 1318717531, 1319978701, 1319992181, 1320793813, 1321058213, 1323668917, 1325172421, 1325329297, 1328256247, + 1329174601, 1329431689, 1331973329, 1341010577, 1341926401, 1343575381, 1344597577, 1344975721, 1345514101, 1345523401, + 1347387361, 1348964401, 1350685001, 1351126261, 1352453257, 1353051517, 1356241321, 1356328121, 1357459183, 1362463807, + 1362515701, 1362742561, 1365662917, 1366587661, 1366608377, 1368769681, 1371908137, 1372681861, 1375322101, 1376799577, + 1378646179, 1379464633, 1382453333, 1383283129, 1385656829, 1386705433, 1388972353, 1389353941, 1389975149, 1391890033, + 1393851553, 1394640941, 1394746081, 1394942473, 1397357851, 1398883201, 1400859847, 1401840833, 1404008369, 1404253369, + 1406826241, 1406851249, 1409372779, 1413803197, 1414154827, 1414529533, 1415969101, 1417986901, 1421475031, 1424503849, + 1425860101, 1426319563, 1426534201, 1427771089, 1428966001, 1432354901, 1435091377, 1438648993, 1440231941, 1440922891, + 1441139641, 1441678411, 1442945689, 1443388481, 1443742273, 1446298309, 1446434677, 1446818651, 1448921633, 1451635201, + 1454282449, 1454445413, 1456527461, 1457378449, 1461307717, 1463065501, 1463178817, 1463992661, 1464568381, 1465908193, + 1465945417, 1468540477, 1468824787, 1469059481, 1469960377, 1470080501, 1470650851, 1471628401, 1472221921, 1473580001, + 1477289941, 1481626513, 1482274513, 1482876673, 1483873861, 1483918801, 1485061471, 1486564301, 1493114149, 1495190699, + 1497221281, 1497965713, 1499971457, 1499989177, 1500142001, 1501165097, 1502171117, 1502403121, 1503240559, 1503705601, + 1504139521, 1504832033, 1507746241, 1509156013, 1510870241, 1511558533, 1515175087, 1515785041, 1517039371, 1518014689, + 1518290707, 1520190341, 1521221473, 1522302121, 1526732803, 1529648231, 1529819971, 1530495289, 1532419099, 1532569681, + 1532755369, 1533343261, 1534063081, 1535020133, 1536112001, 1536251047, 1536883357, 1537433899, 1537641691, 1538012449, + 1539583921, 1539804001, 1540454761, 1540550413, 1541047813, 1541849761, 1541955409, 1544145121, 1545019813, 1545177581, + 1546106773, 1546340401, 1546508057, 1547140841, 1547543161, 1547712601, 1550924873, 1554270481, 1557118081, 1560312001, + 1560620041, 1561800833, 1565893201, 1566594551, 1567830241, 1568916311, 1574362441, 1574601601, 1577983489, 1578009401, + 1580449201, 1581576641, 1581714481, 1582783777, 1583230241, 1583658649, 1586436193, 1587650401, 1590394313, 1593706201, + 1595647351, 1595887921, 1598197201, 1602517949, 1603765021, 1603810561, 1603994701, 1609916491, 1609935913, 1612121473, + 1614508267, 1617795181, 1617921667, 1619447741, 1620646177, 1627103521, 1627898401, 1628692201, 1630062253, 1630307617, + 1631314609, 1632286673, 1632513601, 1633044241, 1636185601, 1637434657, 1637436457, 1637930893, 1638294661, 1639351981, + 1639846391, 1641971701, 1642814653, 1644637051, 1645413001, 1647225529, 1648076041, 1649430889, 1650265549, 1650682153, + 1654940509, 1655660761, 1656229921, 1656280033, 1656917377, 1659009601, 1661202113, 1668037621, 1668926629, 1669893661, + 1671603667, 1671714241, 1672125131, 1674091141, 1674658133, 1675978193, 1678274581, 1679130641, 1680901381, 1683174533, + 1685433413, 1686001861, 1687248001, 1691745821, 1692605041, 1694128129, 1695158921, 1696893101, 1698707377, 1699279441, + 1700250049, 1709909293, 1710753001, 1712392321, 1714322377, 1716160321, 1716714793, 1716774481, 1718013133, 1718088301, + 1719197621, 1721061497, 1721986313, 1722007169, 1722685777, 1725675451, 1726372441, 1731048937, 1731995497, 1732924001, + 1734059291, 1734285601, 1735071913, 1736481601, 1738687469, 1740214841, 1742288881, 1742815621, 1743166441, 1744605097, + 1746692641, 1746721681, 1749124829, 1750412161, 1754818561, 1757148121, 1760014561, 1766984389, 1767234613, 1769091241, + 1769267761, 1770236893, 1771303801, 1772267281, 1773582977, 1776439261, 1776820033, 1779649381, 1779892577, 1784306273, + 1784638309, 1785843547, 1786005521, 1787934881, 1790023861, 1791426787, 1792442737, 1792588813, 1794814103, 1801558201, + 1801774081, 1802510669, 1803768091, 1804906517, 1805947313, 1809888967, 1816408273, 1817067169, 1819829749, 1820306953, + 1821514633, 1828682101, 1828887061, 1831258601, 1835114401, 1837156049, 1837599769, 1839568981, 1841034961, 1841099261, + 1841479501, 1844028961, 1846171781, 1847811673, 1849964117, 1850233897, 1850598961, 1852496761, 1853926777, 1854084649, + 1854940231, 1856689453, 1857221281, 1858098497, 1858197961, 1860373241, 1861026133, 1861880689, 1862880401, 1866409861, + 1867906721, 1868682241, 1871987041, 1872937057, 1873177693, 1874634721, 1874849929, 1878691753, 1879111697, 1879623157, + 1879775501, 1883509633, 1883785681, 1885915841, 1894909141, 1894955311, 1897700113, 1899081757, 1899525601, 1900687381, + 1903447841, 1904658913, 1905958891, 1908088001, 1909566073, 1910134309, 1911197947, 1912950241, 1914303841, 1915391521, + 1916987593, 1917397637, 1920301951, 1921309633, 1922092567, 1922687293, 1923224689, 1923311317, 1923845801, 1924201501, + 1925042737, 1928903971, 1929862849, 1930403333, 1930447501, 1930534453, 1930915169, 1934350351, 1938264241, 1940048881, + 1943951041, 1944125633, 1945042181, 1950987193, 1952513369, 1952968753, 1957705177, 1959659857, 1960708261, 1963149553, + 1965007601, 1968002149, 1970065681, 1974474049, 1977257441, 1982123893, 1982826961, 1988071801, 1988713189, 1988835713, + 1988965861, 1989192277, 1991063449, 1995784961, 1995830761, 1996231189, 1996339649, 1997844157, 1998780001, 1999053601, + 1999111801, 1999743661, 2004299641, 2007646961, 2013554869, 2013834961, 2016481477, 2017021333, 2017509601, 2019564769, + 2021392369, 2021884343, 2027675701, 2028279793, 2028631361, 2028812399, 2029830409, 2030600833, 2036224321, 2043173273, + 2049293401, 2050617713, 2052149221, 2054711381, 2055634561, 2057267941, 2057835781, 2058072041, 2059739221, 2062612033, + 2068867841, 2070739441, 2072624761, 2076192007, 2081039297, 2081551753, 2082146617, 2083034113, 2083997441, 2085453649, + 2085882661, 2086645009, 2093300401, 2095627153, 2096046457, 2097317377, 2100292841, 2101470541, 2101744837, 2104994449, + 2106147457, 2107148761, 2114643217, 2115769633, 2115986557, 2116483027, 2116541221, 2117031263, 2117555641, 2118621097, + 2120096161, 2123601751, 2124078653, 2124691213, 2127197489, 2128104001, 2129304997, 2130134533, 2131004737, 2131811501, + 2140699681, 2140771609, 2141340833, 2144961253, 2147418113, 2147429509, 2152627801, 2154446641, 2155416251, 2156151313, + 2164282177, 2168431201, 2170282969, 2172155819, 2173499329, 2173540951, 2173579801, 2175126601, 2175406201, 2175646177, + 2177374321, 2177645557, 2178082901, 2178939221, 2180221201, 2182281601, 2182802689, 2185362233, 2187717761, 2193980881, + 2199617701, 2200115713, 2201924341, 2202101761, 2202205897, 2203649197, 2203856497, 2206095589, 2210578759, 2213431729, + 2216960929, 2217879901, 2219072017, 2224252801, 2229468697, 2231332357, 2233031701, 2240507821, 2241880033, 2241982009, + 2244932281, 2245519981, 2246762899, 2248354153, 2251732033, 2254314241, 2254757077, 2256197761, 2256748777, 2256751837, + 2262861901, 2269307587, 2274584089, 2283289681, 2284416181, 2289251669, 2289624793, 2290316377, 2290910257, 2291205461, + 2292068143, 2295209281, 2296995121, 2299190401, 2300628601, 2300795353, 2301745249, 2304120001, 2308966661, 2309241601, + 2309405617, 2311558021, 2311575001, 2315137261, 2320527613, 2323147201, 2324867399, 2329584217, 2330569541, 2331181621, + 2335341601, 2338157597, 2338728001, 2340460487, 2345907961, 2347597981, 2352371251, 2354453561, 2355230749, 2355320101, + 2355622721, 2355649921, 2355735089, 2358534361, 2360261989, 2370771181, 2370928337, 2371350101, 2372976563, 2374232977, + 2375415841, 2377166401, 2378309041, 2381782597, 2382678101, 2383164577, 2385574201, 2389072321, 2389544977, 2393708761, + 2394311233, 2398393661, 2404912501, 2411128441, 2412172153, 2412675721, 2413973071, 2422296241, 2423401681, 2425249601, + 2428648967, 2428870753, 2428986913, 2429407961, 2430697513, 2431136401, 2431144801, 2432761633, 2432860273, 2433791593, + 2434964321, 2434974433, 2435091221, 2436691321, 2437907779, 2438778413, 2442050353, 2442454561, 2443708961, 2444950561, + 2448039497, 2448374689, 2453473049, 2454285751, 2456536681, 2457846161, 2463713281, 2471205361, 2473120961, 2473189441, + 2473823353, 2474308069, 2474676949, 2476283239, 2477814193, 2478643907, 2480147521, 2480343553, 2482435981, 2482682131, + 2484408301, 2486017249, 2488420801, 2488591117, 2492480233, 2494660033, 2494984321, 2495834329, 2499327041, 2501012599, + 2501771329, 2502525637, 2504008609, 2506529257, 2506733189, 2507121037, 2508178843, 2513230891, 2516684801, 2519297089, + 2525070241, 2526566041, 2528291341, 2529410281, 2529827821, 2529854713, 2530351561, 2532630787, 2533465661, 2533797017, + 2535516173, 2537105761, 2539406281, 2539736257, 2540469901, 2541660367, 2542479481, 2544590161, 2545934077, 2548051801, + 2550139253, 2550780277, 2551365769, 2552418761, 2553272929, 2555391481, 2561945401, 2564536201, 2565186137, 2570087521, + 2571180247, 2575060949, 2575737361, 2577345541, 2582092189, 2582246701, 2582952769, 2583322381, 2584460701, 2588054401, + 2588582089, 2590663681, 2593065721, 2595276353, 2597289241, 2597294701, 2598933481, 2600611861, 2602343521, 2602378721, + 2604465013, 2604803701, 2611122229, 2611461529, 2613382201, 2614688801, 2616180821, 2617563031, 2621080741, 2621977627, + 2622993661, 2624549929, 2625903601, 2626783921, 2627284987, 2630643401, 2632605049, 2634284801, 2634804481, 2634820813, + 2638067881, 2639099233, 2642159809, 2642582251, 2646751249, 2646790033, 2648662777, 2649907201, 2650820329, 2651507713, + 2654716321, 2656494271, 2658630913, 2658696301, 2659265701, 2668095181, 2668469431, 2670972949, 2672605657, 2672651521, + 2676053333, 2677147201, 2677821121, 2678785621, 2681041843, 2682823681, 2683742491, 2684284441, 2687655169, 2688124001, + 2689427281, 2690408533, 2690867401, 2693739751, 2695115473, 2700818017, 2700891839, 2701878941, 2704957909, 2706863833, + 2707661501, 2716157989, 2716275007, 2717428033, 2719319513, 2721666817, 2721721939, 2723859001, 2725357249, 2733156029, + 2736316301, 2738184697, 2740336561, 2744329909, 2746021741, 2753333227, 2753538001, 2759392633, 2765323397, 2766006253, + 2767672189, 2769080161, 2769602333, 2774295577, 2777887297, 2778304273, 2779477741, 2781117721, 2781226477, 2786028337, + 2787998641, 2789218909, 2800352011, 2805762961, 2809635901, 2812672981, 2814748201, 2823570433, 2824256377, 2824804693, + 2824854913, 2828205397, 2832384133, 2832743713, 2837697773, 2837917633, 2840634109, 2840871041, 2841190381, 2847894377, + 2848466281, 2848722131, 2855046421, 2855071801, 2855512909, 2862066481, 2865483601, 2866005139, 2866527841, 2870377309, + 2871536561, 2872527733, 2872948321, 2874382853, 2877769501, 2881429741, 2882370481, 2885594497, 2887955533, 2890316801, + 2890414873, 2892426029, 2894667781, 2895004927, 2899294889, 2903776129, 2915953633, 2916247819, 2918295451, 2920691161, + 2923042141, 2924158001, 2929062533, 2929106753, 2930831641, 2931708097, 2932327549, 2936227603, 2936958181, 2941174897, + 2941343633, 2944555681, 2944677961, 2945208001, 2945549881, 2951136343, 2956724317, 2957320351, 2965700233, 2967053953, + 2968206601, 2974506841, 2975377429, 2976930001, 2978766341, 2980689601, 2986025677, 2987414977, 2990152901, 2993462713, + 2993495041, 2994098281, 2994415201, 2998202353, 2998919873, 3000688381, 3001561441, 3002647829, 3004443679, 3009628301, + 3011421841, 3014101261, 3015502181, 3016957381, 3017444761, 3018147217, 3018576689, 3019916461, 3025350343, 3026575553, + 3028586471, 3030393901, 3033332641, 3034402681, 3034817209, 3035375047, 3036079729, 3037295801, 3037781251, 3038880473, + 3039681457, 3041984353, 3042630533, 3048159841, 3050190163, 3056100623, 3056160929, 3057886591, 3058670677, 3059397793, + 3063685633, 3065998717, 3076505209, 3077122133, 3079496551, 3082054697, 3082068013, 3083053387, 3083537689, 3083884651, + 3088408429, 3089013313, 3091019777, 3094763851, 3099670657, 3103800701, 3112974481, 3114125071, 3115667521, 3120445697, + 3122287981, 3129914881, 3133899409, 3135040133, 3143282221, 3145410761, 3150972917, 3156599161, 3156643141, 3157579861, + 3163106953, 3166504273, 3167442721, 3170262409, 3172658653, 3175204531, 3175255717, 3178375201, 3181356263, 3181391641, + 3182606857, 3182655361, 3182891401, 3185472001, 3187035113, 3187421077, 3187939921, 3196397821, 3196431829, 3197565001, + 3197632441, 3197911001, 3197911741, 3199164901, 3205663921, 3207297773, 3208902491, 3212465437, 3215031751, 3217412881, + 3219808411, 3221580281, 3222693421, 3224143441, 3225081473, 3227082823, 3227209057, 3229131137, 3233558021, 3237992101, + 3242533897, 3248236309, 3250348417, 3250700737, 3252148621, 3257334541, 3258647809, 3258892801, 3261114601, 3263097641, + 3263568901, 3263626957, 3264820001, 3265122451, 3267417677, 3268506541, 3268841941, 3270933121, 3271999249, 3272030401, + 3272702497, 3274264033, 3275671969, 3276075709, 3277047649, 3278640289, 3280067129, 3282974857, 3287174129, 3288757249, + 3295362727, 3296403601, 3299246833, 3302322241, 3304307341, 3305829073, 3306686659, 3306957593, 3310858777, 3312489577, + 3312536569, 3313196881, 3315139717, 3320669437, 3323308501, 3323590463, 3323829169, 3328354801, 3332800021, 3334350781, + 3340214413, 3342005633, 3344191241, 3346172189, 3347908801, 3349218881, 3350993969, 3352091557, 3355382857, 3355953001, + 3357417181, 3359737921, 3360511981, 3369139201, 3371024521, 3371452921, 3371693063, 3372667121, 3373086601, 3381052177, + 3381901921, 3385842877, 3386603221, 3387014401, 3387487351, 3389030261, 3395091311, 3399205591, 3399890413, 3402234749, + 3407609221, 3407772817, 3407952169, 3408135121, 3409339393, 3411250081, 3411574801, 3411829693, 3412575097, 3415379701, + 3415832137, 3417522841, 3420143941, 3421845001, 3423222757, 3423580481, 3427050673, 3428133103, 3429457921, 3429982081, + 3430804297, 3432695921, 3432997537, 3433458073, 3434575327, 3435973837, 3440195713, 3443704261, 3449768513, 3450717901, + 3453900913, 3458257741, 3461861761, 3463907761, 3464236901, 3466158361, 3470716657, 3474335437, 3480174001, 3482161261, + 3485747521, 3489958697, 3491763493, 3492178873, 3492883081, 3493262761, 3497607433, 3504132113, 3512030497, 3512291021, + 3512369857, 3513604657, 3516565057, 3519318721, 3524086333, 3525088961, 3529119361, 3529864391, 3532687201, 3533662129, + 3533856913, 3538213381, 3542303047, 3543203333, 3548378341, 3549286001, 3549988261, 3552158521, 3553567057, 3557646401, + 3562963973, 3563340457, 3566428301, 3574891757, 3582711841, 3583249921, 3583604161, 3584800801, 3586833253, 3587553971, + 3589937261, 3590409439, 3593276353, 3594110081, 3596491907, 3596815169, 3598772761, 3602006101, 3605151241, 3611571121, + 3612298321, 3612825221, 3614770573, 3616574081, 3620631169, 3628526287, 3630596257, 3631828481, 3632452741, 3635993089, + 3649116277, 3649965281, 3650158849, 3651572609, 3656355841, 3658730893, 3662387977, 3662503093, 3663084541, 3668926801, + 3669587533, 3672754633, 3677180797, 3679657997, 3682471321, 3685647701, 3685775741, 3692307161, 3695628133, 3697278427, + 3700801861, 3705582073, 3705623281, 3708123301, 3708905341, 3709626961, 3712887289, 3713287801, 3713448769, 3718226401, + 3721486081, 3723410161, 3723699373, 3725016749, 3727828501, 3729097633, 3733761739, 3736293461, 3745192001, 3746101189, + 3749383681, 3751554581, 3751782737, 3754680403, 3756668401, 3759781369, 3760622689, 3760896133, 3762110881, 3767640601, + 3773061337, 3774337201, 3784123501, 3787491457, 3798040471, 3798626833, 3799111681, 3800084401, 3805699501, 3807112123, + 3807308269, 3807749821, 3809018947, 3813919453, 3817561777, 3817706621, 3821233121, 3827035237, 3832807681, 3833208961, + 3842941741, 3846174151, 3846532801, 3847106803, 3850058689, 3852800033, 3863326897, 3865604023, 3867183937, 3874471147, + 3874523017, 3875096893, 3875965417, 3886515361, 3886643801, 3887423437, 3887635753, 3891892421, 3891919417, 3894053311, + 3896079281, 3897241129, 3897869201, 3898906129, 3900327241, 3903711841, 3905533721, 3905876501, 3907577521, 3907752241, + 3912174421, 3914880337, 3914923211, 3915467341, 3915604421, 3915921241, 3918227437, 3922321561, 3926912669, 3929293061, + 3934940833, 3935864017, 3936123601, 3945165841, 3947233201, 3947383201, 3953408801, 3953949421, 3955572001, 3958597301, + 3958930441, 3959578801, 3960728641, 3962037061, 3966350203, 3967343161, 3971095301, 3973556837, 3979485931, 3982017601, + 3987528793, 3987960913, 3991124341, 3992697997, 3997536427, 4005660961, 4007365741, 4011996871, 4015548769, 4017684529, + 4018283501, 4020144133, 4026822577, 4027012021, 4027518961, 4028465873, 4028771849, 4031223841, 4034969401, 4034993269, + 4035498409, 4036395581, 4042538497, 4044601751, 4044884689, 4048493983, 4053267217, 4054039841, 4057195309, 4058433931, + 4059776533, 4060942381, 4061009971, 4064633821, 4067039461, 4067887501, 4068671881, 4071644893, 4075241633, 4075721921, + 4076009857, 4079665633, 4079682361, 4083376067, 4085074909, 4088147617, 4088838913, 4092929149, 4098258707, 4099180801, + 4100934241, 4103745689, 4105691393, 4108970251, 4109461709, 4109711581, 4110320663, 4113013141, 4115891893, 4117058221, + 4117447441, 4121286907, 4127050621, 4129914673, 4133928761, 4135847101, 4136916001, 4137262541, 4138838401, 4139015987, + 4150174393, 4155375349, 4157008813, 4162880401, 4166032873, 4183664101, 4185636781, 4186561633, 4187360341, 4191864013, + 4192060699, 4195843037, 4196323561, 4204344601, 4206006229, 4206295433, 4212105409, 4215885697, 4218900001, 4220122321, + 4232966251, 4234224601, 4237212061, 4243744201, 4244022301, 4244663651, 4247990917, 4250920459, 4251904273, 4255695013, + 4257003353, 4261352869, 4271267333, 4275011401, 4277526901, 4278305651, 4282867213, 4285148981, 4293088801, 4294901761, + } + + primes16 = [65536]byte{ + 2, 1, 1, 2, 1, 2, 1, 4, 3, 2, // 0-9 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 10-19 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 20-29 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 30-39 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 40-49 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 50-59 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 60-69 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 70-79 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 80-89 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 90-99 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 100-109 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 110-119 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 120-129 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 130-139 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 140-149 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 150-159 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 160-169 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 170-179 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 180-189 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 190-199 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 200-209 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 210-219 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 220-229 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 230-239 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 240-249 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 250-259 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 260-269 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 270-279 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 280-289 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 290-299 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 300-309 + 1, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 310-319 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 320-329 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 330-339 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 340-349 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 350-359 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 360-369 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 370-379 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 380-389 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 390-399 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 400-409 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 410-419 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 420-429 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 430-439 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 440-449 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 450-459 + 1, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 460-469 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 470-479 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 480-489 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 490-499 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 500-509 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 510-519 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 520-529 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 530-539 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 540-549 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 550-559 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 560-569 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 570-579 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 580-589 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 590-599 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 600-609 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 610-619 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 620-629 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 630-639 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 640-649 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 650-659 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 660-669 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 670-679 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 680-689 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 690-699 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 700-709 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 710-719 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 720-729 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 730-739 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 740-749 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 750-759 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 760-769 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 770-779 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 780-789 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 790-799 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 800-809 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 810-819 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 10, // 820-829 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 830-839 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 840-849 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 850-859 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 860-869 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 870-879 + 1, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 880-889 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 890-899 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 900-909 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 910-919 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 920-929 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 930-939 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 940-949 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 950-959 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 960-969 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 970-979 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 980-989 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 990-999 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 1000-1009 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 1010-1019 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 1020-1029 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 1030-1039 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 1040-1049 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 1050-1059 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 1060-1069 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 1070-1079 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 1080-1089 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 1090-1099 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 1100-1109 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 1110-1119 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 1120-1129 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 1130-1139 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 1140-1149 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 1150-1159 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 1160-1169 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 1170-1179 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 1180-1189 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 1190-1199 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 1200-1209 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 1210-1219 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 1220-1229 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 1230-1239 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 1240-1249 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 1250-1259 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 1260-1269 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 1270-1279 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 1280-1289 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 1290-1299 + 1, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 1300-1309 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 1310-1319 + 1, 6, 5, 4, 3, 2, 1, 34, 33, 32, // 1320-1329 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 1330-1339 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 1340-1349 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 1350-1359 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 1360-1369 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 1370-1379 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 1380-1389 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 1390-1399 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 1400-1409 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 1410-1419 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 1420-1429 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 1430-1439 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 1440-1449 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 1450-1459 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 1460-1469 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 1470-1479 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 1480-1489 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 1490-1499 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 1500-1509 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 1510-1519 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 1520-1529 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 1530-1539 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 1540-1549 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 1550-1559 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 1560-1569 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 1570-1579 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 1580-1589 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 1590-1599 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 1600-1609 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 1610-1619 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 1620-1629 + 7, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 1630-1639 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 1640-1649 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 1650-1659 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 24, // 1660-1669 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 1670-1679 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 1680-1689 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 10, // 1690-1699 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 1700-1709 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 1710-1719 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 1720-1729 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 1730-1739 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 1740-1749 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 1750-1759 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 1760-1769 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 1770-1779 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 1780-1789 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 1790-1799 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 1800-1809 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 1810-1819 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 1820-1829 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 1830-1839 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 1840-1849 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 1850-1859 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 1860-1869 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 10, // 1870-1879 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 1880-1889 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 1890-1899 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 1900-1909 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 1910-1919 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 1920-1929 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 1930-1939 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 1940-1949 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 1950-1959 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 1960-1969 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 1970-1979 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 1980-1989 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 1990-1999 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 2000-2009 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 2010-2019 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 2020-2029 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 2030-2039 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 2040-2049 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 2050-2059 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 2060-2069 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 2070-2079 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 10, // 2080-2089 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 2090-2099 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 2100-2109 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 2110-2119 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 2120-2129 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 2130-2139 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 2140-2149 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 2150-2159 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 2160-2169 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 2170-2179 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 2180-2189 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 2190-2199 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 2200-2209 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 2210-2219 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 2220-2229 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 2230-2239 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 2240-2249 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 2250-2259 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 2260-2269 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 2270-2279 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 2280-2289 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 2290-2299 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 2300-2309 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 2310-2319 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 2320-2329 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 2330-2339 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 2340-2349 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 2350-2359 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 2360-2369 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 2370-2379 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 2380-2389 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 2390-2399 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 2400-2409 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 2410-2419 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 2420-2429 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 2430-2439 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 2440-2449 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 2450-2459 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 2460-2469 + 3, 2, 1, 4, 3, 2, 1, 26, 25, 24, // 2470-2479 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 2480-2489 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 2490-2499 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 2500-2509 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 2510-2519 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 2520-2529 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 2530-2539 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 2540-2549 + 1, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 2550-2559 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 2560-2569 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 2570-2579 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 2580-2589 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 2590-2599 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 2600-2609 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 2610-2619 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 2620-2629 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 2630-2639 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 2640-2649 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 2650-2659 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 2660-2669 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 2670-2679 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 2680-2689 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 2690-2699 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 2700-2709 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 2710-2719 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 2720-2729 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 2730-2739 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 2740-2749 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 2750-2759 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 2760-2769 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 2770-2779 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 2780-2789 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 2790-2799 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 2800-2809 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 2810-2819 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 2820-2829 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 2830-2839 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 2840-2849 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 2850-2859 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 2860-2869 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 2870-2879 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 2880-2889 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 2890-2899 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 2900-2909 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 2910-2919 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 2920-2929 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 2930-2939 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 2940-2949 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 2950-2959 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 2960-2969 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 2970-2979 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 2980-2989 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 2990-2999 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 3000-3009 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 3010-3019 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 3020-3029 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 3030-3039 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 3040-3049 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 3050-3059 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 3060-3069 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 3070-3079 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 20, // 3080-3089 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 3090-3099 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 3100-3109 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 3110-3119 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 3120-3129 + 7, 6, 5, 4, 3, 2, 1, 26, 25, 24, // 3130-3139 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 3140-3149 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 3150-3159 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 3160-3169 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 3170-3179 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 3180-3189 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 3190-3199 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 3200-3209 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 3210-3219 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 3220-3229 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 3230-3239 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 3240-3249 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 3250-3259 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 3260-3269 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 3270-3279 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 3280-3289 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 3290-3299 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 3300-3309 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 3310-3319 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 3320-3329 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 3330-3339 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 3340-3349 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 3350-3359 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 3360-3369 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 3370-3379 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 3380-3389 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 3390-3399 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 3400-3409 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 3410-3419 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 3420-3429 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 3430-3439 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 3440-3449 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 3450-3459 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 22, // 3460-3469 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 3470-3479 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 3480-3489 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 3490-3499 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 3500-3509 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 3510-3519 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 3520-3529 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 3530-3539 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 3540-3549 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 3550-3559 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 3560-3569 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 3570-3579 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 3580-3589 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 3590-3599 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 3600-3609 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 3610-3619 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 3620-3629 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 3630-3639 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 3640-3649 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 3650-3659 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 3660-3669 + 1, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 3670-3679 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 3680-3689 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 3690-3699 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 3700-3709 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 3710-3719 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 3720-3729 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 3730-3739 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 3740-3749 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 3750-3759 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 3760-3769 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 3770-3779 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 3780-3789 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 3790-3799 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 3800-3809 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 3810-3819 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 3820-3829 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 3830-3839 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 3840-3849 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 3850-3859 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 3860-3869 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 3870-3879 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 3880-3889 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 3890-3899 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 3900-3909 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 3910-3919 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 3920-3929 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 3930-3939 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 3940-3949 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 3950-3959 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 3960-3969 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 3970-3979 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 3980-3989 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 3990-3999 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 4000-4009 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 4010-4019 + 1, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 4020-4029 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 4030-4039 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 4040-4049 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 4050-4059 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 4060-4069 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 4070-4079 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4080-4089 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 4090-4099 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4100-4109 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 4110-4119 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 4120-4129 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 4130-4139 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 4140-4149 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 18, // 4150-4159 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 4160-4169 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 4170-4179 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 4180-4189 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4190-4199 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4200-4209 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 4210-4219 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 4220-4229 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4230-4239 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 4240-4249 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 4250-4259 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4260-4269 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 4270-4279 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 4280-4289 + 7, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 4290-4299 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 4300-4309 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 4310-4319 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 4320-4329 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 4330-4339 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 4340-4349 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 4350-4359 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 4360-4369 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 4370-4379 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4380-4389 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 4390-4399 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 4400-4409 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4410-4419 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 4420-4429 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4430-4439 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 4440-4449 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 4450-4459 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 4460-4469 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4470-4479 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 4480-4489 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 4490-4499 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 4500-4509 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 4510-4519 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 4520-4529 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 4530-4539 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 4540-4549 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4550-4559 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 4560-4569 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 4570-4579 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 4580-4589 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 4590-4599 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 4600-4609 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4610-4619 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 4620-4629 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 4630-4639 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 4640-4649 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 4650-4659 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 4660-4669 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 4670-4679 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4680-4689 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 4690-4699 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 4700-4709 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4710-4719 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 4720-4729 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 4730-4739 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4740-4749 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 4750-4759 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 4760-4769 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 4770-4779 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 4780-4789 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 4790-4799 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 4800-4809 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 4810-4819 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4820-4829 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 4830-4839 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 4840-4849 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4850-4859 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4860-4869 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 4870-4879 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 4880-4889 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 4890-4899 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 4900-4909 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 4910-4919 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 4920-4929 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 4930-4939 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 4940-4949 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 4950-4959 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 4960-4969 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 4970-4979 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 4980-4989 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 4990-4999 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 5000-5009 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 5010-5019 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 5020-5029 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 5030-5039 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 5040-5049 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 5050-5059 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 5060-5069 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 5070-5079 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 5080-5089 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 5090-5099 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 5100-5109 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 28, // 5110-5119 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 5120-5129 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 5130-5139 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 5140-5149 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 5150-5159 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 5160-5169 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 5170-5179 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 5180-5189 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 5190-5199 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 5200-5209 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 5210-5219 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 5220-5229 + 1, 2, 1, 4, 3, 2, 1, 24, 23, 22, // 5230-5239 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 5240-5249 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 5250-5259 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 5260-5269 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 5270-5279 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 5280-5289 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 5290-5299 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 5300-5309 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 5310-5319 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 5320-5329 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 5330-5339 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 5340-5349 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 5350-5359 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 5360-5369 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 5370-5379 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 5380-5389 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 5390-5399 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 5400-5409 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 5410-5419 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 5420-5429 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 5430-5439 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 5440-5449 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 5450-5459 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 5460-5469 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 5470-5479 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 5480-5489 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 5490-5499 + 1, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 5500-5509 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 5510-5519 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 5520-5529 + 1, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 5530-5539 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 5540-5549 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 5550-5559 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 5560-5569 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 5570-5579 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 5580-5589 + 1, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 5590-5599 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 5600-5609 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 5610-5619 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 5620-5629 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 5630-5639 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 5640-5649 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 10, // 5650-5659 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 5660-5669 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 5670-5679 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 5680-5689 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 5690-5699 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 5700-5709 + 1, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 5710-5719 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 5720-5729 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 5730-5739 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 30, // 5740-5749 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 5750-5759 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 5760-5769 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 5770-5779 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 5780-5789 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 5790-5799 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 5800-5809 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 5810-5819 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 5820-5829 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 5830-5839 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 5840-5849 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 5850-5859 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 5860-5869 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 5870-5879 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 5880-5889 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 5890-5899 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 5900-5909 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 5910-5919 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 5920-5929 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 5930-5939 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 5940-5949 + 3, 2, 1, 28, 27, 26, 25, 24, 23, 22, // 5950-5959 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 5960-5969 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 5970-5979 + 1, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 5980-5989 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 5990-5999 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 6000-6009 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 6010-6019 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 6020-6029 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 6030-6039 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 6040-6049 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 6050-6059 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 6060-6069 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 6070-6079 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 6080-6089 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 6090-6099 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 6100-6109 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 6110-6119 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 6120-6129 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 6130-6139 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 6140-6149 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 6150-6159 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 6160-6169 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 6170-6179 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 6180-6189 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 6190-6199 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 6200-6209 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 6210-6219 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 6220-6229 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 6230-6239 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 6240-6249 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 6250-6259 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 6260-6269 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 6270-6279 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 6280-6289 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 6290-6299 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 6300-6309 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 6310-6319 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 6320-6329 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 6330-6339 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 6340-6349 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 6350-6359 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 6360-6369 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 6370-6379 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 6380-6389 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 6390-6399 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 6400-6409 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 6410-6419 + 1, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 6420-6429 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 6430-6439 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 6440-6449 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 6450-6459 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 6460-6469 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 6470-6479 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 6480-6489 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 6490-6499 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 6500-6509 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 6510-6519 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 6520-6529 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 6530-6539 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 6540-6549 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 6550-6559 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 6560-6569 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 6570-6579 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 6580-6589 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 6590-6599 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 6600-6609 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 6610-6619 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 6620-6629 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 6630-6639 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 6640-6649 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 6650-6659 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 6660-6669 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 6670-6679 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 6680-6689 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 6690-6699 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 6700-6709 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 6710-6719 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 6720-6729 + 3, 2, 1, 4, 3, 2, 1, 24, 23, 22, // 6730-6739 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 6740-6749 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 6750-6759 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 6760-6769 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 6770-6779 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 6780-6789 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 6790-6799 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 6800-6809 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 6810-6819 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 6820-6829 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 6830-6839 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 6840-6849 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 6850-6859 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 6860-6869 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 6870-6879 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 6880-6889 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 6890-6899 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 6900-6909 + 1, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 6910-6919 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 6920-6929 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 6930-6939 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 6940-6949 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 6950-6959 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 6960-6969 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 6970-6979 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 6980-6989 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 6990-6999 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 7000-7009 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 7010-7019 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 7020-7029 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 7030-7039 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 7040-7049 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 7050-7059 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 7060-7069 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 7070-7079 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 7080-7089 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 7090-7099 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 7100-7109 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 7110-7119 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 22, // 7120-7129 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 7130-7139 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 7140-7149 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 7150-7159 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 7160-7169 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 7170-7179 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 7180-7189 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 7190-7199 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 7200-7209 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 7210-7219 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 7220-7229 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 7230-7239 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 7240-7249 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 7250-7259 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 7260-7269 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 7270-7279 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 7280-7289 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 7290-7299 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 7300-7309 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 7310-7319 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 7320-7329 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 7330-7339 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 7340-7349 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 7350-7359 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 7360-7369 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 7370-7379 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 7380-7389 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 7390-7399 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 7400-7409 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 7410-7419 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 7420-7429 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 7430-7439 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 7440-7449 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 18, // 7450-7459 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 7460-7469 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 7470-7479 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 7480-7489 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 7490-7499 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 7500-7509 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 7510-7519 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 7520-7529 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 7530-7539 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 7540-7549 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 7550-7559 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 7560-7569 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 7570-7579 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 7580-7589 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 7590-7599 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 7600-7609 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 7610-7619 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 7620-7629 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 7630-7639 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 20, // 7640-7649 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 7650-7659 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 7660-7669 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 7670-7679 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 7680-7689 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 7690-7699 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 7700-7709 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 7710-7719 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 7720-7729 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 7730-7739 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 7740-7749 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 30, // 7750-7759 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 7760-7769 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 7770-7779 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 7780-7789 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 7790-7799 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 7800-7809 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 7810-7819 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 7820-7829 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 7830-7839 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 7840-7849 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 7850-7859 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 7860-7869 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 7870-7879 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 7880-7889 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 7890-7899 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 7900-7909 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 7910-7919 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 7920-7929 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 7930-7939 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 7940-7949 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 7950-7959 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 7960-7969 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 7970-7979 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 7980-7989 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 7990-7999 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 8000-8009 + 1, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 8010-8019 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 8020-8029 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 8030-8039 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 8040-8049 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 8050-8059 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 8060-8069 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 8070-8079 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 8080-8089 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 8090-8099 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 8100-8109 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 8110-8119 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 8120-8129 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 8130-8139 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 8140-8149 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 8150-8159 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 8160-8169 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 8170-8179 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 8180-8189 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 8190-8199 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 8200-8209 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 8210-8219 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 8220-8229 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 8230-8239 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 8240-8249 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 8250-8259 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 8260-8269 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 8270-8279 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 8280-8289 + 1, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 8290-8299 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 8300-8309 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 8310-8319 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 8320-8329 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 8330-8339 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 8340-8349 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 8350-8359 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 8360-8369 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 8370-8379 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 30, // 8380-8389 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 8390-8399 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 8400-8409 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 8410-8419 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 8420-8429 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 8430-8439 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 8440-8449 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 8450-8459 + 1, 6, 5, 4, 3, 2, 1, 34, 33, 32, // 8460-8469 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 8470-8479 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 8480-8489 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 8490-8499 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 8500-8509 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 8510-8519 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 8520-8529 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 8530-8539 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 8540-8549 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 8550-8559 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 8560-8569 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 8570-8579 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 8580-8589 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 8590-8599 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 8600-8609 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 8610-8619 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 8620-8629 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 8630-8639 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 8640-8649 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 8650-8659 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 8660-8669 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 8670-8679 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 8680-8689 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 8690-8699 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 8700-8709 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 8710-8719 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 8720-8729 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 8730-8739 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 8740-8749 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 8750-8759 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 8760-8769 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 8770-8779 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 8780-8789 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 8790-8799 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 8800-8809 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 8810-8819 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 8820-8829 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 8830-8839 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 8840-8849 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 8850-8859 + 1, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 8860-8869 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 8870-8879 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 8880-8889 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 8890-8899 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 8900-8909 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 8910-8919 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 8920-8929 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 8930-8939 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 8940-8949 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 8950-8959 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 8960-8969 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 8970-8979 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 8980-8989 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 8990-8999 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 9000-9009 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 9010-9019 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 9020-9029 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9030-9039 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 9040-9049 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 9050-9059 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 9060-9069 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 9070-9079 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9080-9089 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 9090-9099 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 9100-9109 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 9110-9119 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 9120-9129 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 9130-9139 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9140-9149 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 9150-9159 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 9160-9169 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 9170-9179 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 9180-9189 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 9190-9199 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 9200-9209 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9210-9219 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 9220-9229 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 9230-9239 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 9240-9249 + 7, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 9250-9259 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 9260-9269 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 9270-9279 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 9280-9289 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 9290-9299 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9300-9309 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 9310-9319 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 9320-9329 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 9330-9339 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 9340-9349 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 9350-9359 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9360-9369 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 9370-9379 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9380-9389 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 9390-9399 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 9400-9409 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 9410-9419 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9420-9429 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 22, // 9430-9439 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 9440-9449 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9450-9459 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 9460-9469 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 9470-9479 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9480-9489 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 9490-9499 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9500-9509 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9510-9519 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 9520-9529 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 9530-9539 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 9540-9549 + 1, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 9550-9559 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 9560-9569 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 9570-9579 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 9580-9589 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9590-9599 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 9600-9609 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 9610-9619 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 9620-9629 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 9630-9639 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 9640-9649 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9650-9659 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 9660-9669 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 9670-9679 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 9680-9689 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 9690-9699 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 9700-9709 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 9710-9719 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 9720-9729 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 9730-9739 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 9740-9749 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 9750-9759 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 9760-9769 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9770-9779 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 9780-9789 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 9790-9799 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 9800-9809 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 9810-9819 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 9820-9829 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 9830-9839 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9840-9849 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 9850-9859 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9860-9869 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 9870-9879 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 9880-9889 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9890-9899 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 9900-9909 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 9910-9919 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 9920-9929 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 9930-9939 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 9940-9949 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 9950-9959 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 9960-9969 + 3, 2, 1, 34, 33, 32, 31, 30, 29, 28, // 9970-9979 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 9980-9989 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 9990-9999 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 28, // 10000-10009 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 10010-10019 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 10020-10029 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 22, // 10030-10039 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 10040-10049 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 10050-10059 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 10060-10069 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 10070-10079 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 10080-10089 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 10090-10099 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 10100-10109 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 10110-10119 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 10120-10129 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 10130-10139 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 10140-10149 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 10150-10159 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 10160-10169 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 10170-10179 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 10180-10189 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 10190-10199 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 10200-10209 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 10210-10219 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 10220-10229 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 10230-10239 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 10240-10249 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 10250-10259 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 10260-10269 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 10270-10279 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 10280-10289 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 10290-10299 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 10300-10309 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 10310-10319 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 10320-10329 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 10330-10339 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 10340-10349 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 10350-10359 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 10360-10369 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 10370-10379 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 10380-10389 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 28, // 10390-10399 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 10400-10409 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 10410-10419 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 10420-10429 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 10430-10439 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 10440-10449 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 10450-10459 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 10460-10469 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 10470-10479 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 10480-10489 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 10490-10499 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 10500-10509 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 10510-10519 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 10520-10529 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 10530-10539 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 10540-10549 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 10550-10559 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 10560-10569 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 10570-10579 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 10580-10589 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 10590-10599 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 10600-10609 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 10610-10619 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 10620-10629 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 10630-10639 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 10640-10649 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 10650-10659 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 10660-10669 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 10670-10679 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 10680-10689 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 10690-10699 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 10700-10709 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 10710-10719 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 10720-10729 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 10730-10739 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 10740-10749 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 10750-10759 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 10760-10769 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 10770-10779 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 10780-10789 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 32, // 10790-10799 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 10800-10809 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 10810-10819 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 10820-10829 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 10830-10839 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 10840-10849 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 10850-10859 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 10860-10869 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 10870-10879 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 10880-10889 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 10890-10899 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 28, // 10900-10909 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 10910-10919 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 10920-10929 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 10930-10939 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 10940-10949 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 10950-10959 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 10960-10969 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 10970-10979 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 10980-10989 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 10990-10999 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 11000-11009 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 11010-11019 + 7, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 11020-11029 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 11030-11039 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 11040-11049 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 11050-11059 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 11060-11069 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 11070-11079 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 11080-11089 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 11090-11099 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 11100-11109 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 11110-11119 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 11120-11129 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 11130-11139 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 11140-11149 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 11150-11159 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 11160-11169 + 1, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 11170-11179 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 11180-11189 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 11190-11199 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 11200-11209 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 11210-11219 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 11220-11229 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 11230-11239 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 11240-11249 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 11250-11259 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 11260-11269 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 11270-11279 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 11280-11289 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 11290-11299 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 11300-11309 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 11310-11319 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 11320-11329 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 11330-11339 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 11340-11349 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 11350-11359 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 11360-11369 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 11370-11379 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 11380-11389 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 11390-11399 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 11400-11409 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 11410-11419 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 11420-11429 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 11430-11439 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 11440-11449 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 11450-11459 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 11460-11469 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 11470-11479 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 11480-11489 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 11490-11499 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 11500-11509 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 11510-11519 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 11520-11529 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 11530-11539 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 11540-11549 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 11550-11559 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 11560-11569 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 11570-11579 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 11580-11589 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 11590-11599 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 11600-11609 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 11610-11619 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 11620-11629 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 11630-11639 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 11640-11649 + 7, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 11650-11659 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 11660-11669 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 11670-11679 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 11680-11689 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 11690-11699 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 11700-11709 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 11710-11719 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 11720-11729 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 11730-11739 + 3, 2, 1, 34, 33, 32, 31, 30, 29, 28, // 11740-11749 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 11750-11759 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 11760-11769 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 11770-11779 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 11780-11789 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 11790-11799 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 11800-11809 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 11810-11819 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 11820-11829 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 24, // 11830-11839 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 11840-11849 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 11850-11859 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 11860-11869 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 11870-11879 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 11880-11889 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 11890-11899 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 11900-11909 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 11910-11919 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 11920-11929 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 11930-11939 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 11940-11949 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 11950-11959 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 11960-11969 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 11970-11979 + 1, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 11980-11989 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 11990-11999 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 12000-12009 + 1, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 12010-12019 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 12020-12029 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 12030-12039 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 12040-12049 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 12050-12059 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 12060-12069 + 1, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 12070-12079 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 12080-12089 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 12090-12099 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 12100-12109 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 24, // 12110-12119 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 12120-12129 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 12130-12139 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 12140-12149 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 12150-12159 + 1, 2, 1, 34, 33, 32, 31, 30, 29, 28, // 12160-12169 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 12170-12179 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 12180-12189 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 12190-12199 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 12200-12209 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 12210-12219 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 12220-12229 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 12230-12239 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 12240-12249 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 12250-12259 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 12260-12269 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 12270-12279 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 12280-12289 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 12290-12299 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 12300-12309 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 12310-12319 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 12320-12329 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 12330-12339 + 3, 2, 1, 4, 3, 2, 1, 26, 25, 24, // 12340-12349 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 12350-12359 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 12360-12369 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 12370-12379 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 12380-12389 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 12390-12399 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 12400-12409 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 12410-12419 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 12420-12429 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 12430-12439 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 12440-12449 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 12450-12459 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 12460-12469 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 12470-12479 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 12480-12489 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 12490-12499 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 12500-12509 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 12510-12519 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 12520-12529 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 12530-12539 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 12540-12549 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 12550-12559 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 12560-12569 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 12570-12579 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 12580-12589 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 12590-12599 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 12600-12609 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 12610-12619 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 12620-12629 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 12630-12639 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 12640-12649 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 12650-12659 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 12660-12669 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 12670-12679 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 12680-12689 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 12690-12699 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 12700-12709 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 12710-12719 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 12720-12729 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 12730-12739 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 12740-12749 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 12750-12759 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 12760-12769 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 12770-12779 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 12780-12789 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 12790-12799 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 12800-12809 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 12810-12819 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 12820-12829 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 12830-12839 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 12840-12849 + 3, 2, 1, 36, 35, 34, 33, 32, 31, 30, // 12850-12859 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 12860-12869 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 12870-12879 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 12880-12889 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 12890-12899 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 12900-12909 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 12910-12919 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 12920-12929 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 12930-12939 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 12940-12949 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 12950-12959 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 12960-12969 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 12970-12979 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 12980-12989 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 12990-12999 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 24, // 13000-13009 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 13010-13019 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 13020-13029 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 13030-13039 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 13040-13049 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 13050-13059 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 13060-13069 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 13070-13079 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 13080-13089 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 13090-13099 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 13100-13109 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 13110-13119 + 1, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 13120-13129 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 13130-13139 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 13140-13149 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 13150-13159 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 13160-13169 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 13170-13179 + 3, 2, 1, 4, 3, 2, 1, 30, 29, 28, // 13180-13189 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 13190-13199 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 13200-13209 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 13210-13219 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 13220-13229 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 13230-13239 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 13240-13249 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 13250-13259 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 13260-13269 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 13270-13279 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 13280-13289 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 13290-13299 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 13300-13309 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 13310-13319 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 13320-13329 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 28, // 13330-13339 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 13340-13349 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 13350-13359 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 13360-13369 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 13370-13379 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 13380-13389 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 13390-13399 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 13400-13409 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 13410-13419 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 13420-13429 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 13430-13439 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 13440-13449 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 13450-13459 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 13460-13469 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 13470-13479 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 13480-13489 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 13490-13499 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 13500-13509 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 13510-13519 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 13520-13529 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 13530-13539 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 13540-13549 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 13550-13559 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 13560-13569 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 13570-13579 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 13580-13589 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 13590-13599 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 13600-13609 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 13610-13619 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 13620-13629 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 13630-13639 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 20, // 13640-13649 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 13650-13659 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 13660-13669 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 13670-13679 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 13680-13689 + 1, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 13690-13699 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 13700-13709 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 13710-13719 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 13720-13729 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 13730-13739 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 13740-13749 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 13750-13759 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 13760-13769 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 13770-13779 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 13780-13789 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 13790-13799 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 13800-13809 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 13810-13819 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 13820-13829 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 13830-13839 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 13840-13849 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 13850-13859 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 13860-13869 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 13870-13879 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 13880-13889 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 13890-13899 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 13900-13909 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 13910-13919 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 13920-13929 + 1, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 13930-13939 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 13940-13949 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 13950-13959 + 3, 2, 1, 4, 3, 2, 1, 30, 29, 28, // 13960-13969 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 13970-13979 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 13980-13989 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 13990-13999 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 14000-14009 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 14010-14019 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 14020-14029 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 14030-14039 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 14040-14049 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 14050-14059 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 14060-14069 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 14070-14079 + 1, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 14080-14089 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 14090-14099 + 7, 6, 5, 4, 3, 2, 1, 36, 35, 34, // 14100-14109 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 14110-14119 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 14120-14129 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 14130-14139 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 14140-14149 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 14150-14159 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 14160-14169 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 14170-14179 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 14180-14189 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 14190-14199 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 14200-14209 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 14210-14219 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 14220-14229 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 14230-14239 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 14240-14249 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 14250-14259 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 14260-14269 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 14270-14279 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 14280-14289 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 14290-14299 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 14300-14309 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 14310-14319 + 1, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 14320-14329 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 14330-14339 + 1, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 14340-14349 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 14350-14359 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 14360-14369 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 14370-14379 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 14380-14389 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 14390-14399 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 14400-14409 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 14410-14419 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 14420-14429 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 14430-14439 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 14440-14449 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 14450-14459 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 14460-14469 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 14470-14479 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 14480-14489 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 14490-14499 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 14500-14509 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 14510-14519 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 14520-14529 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 14530-14539 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 14540-14549 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 14550-14559 + 1, 2, 1, 28, 27, 26, 25, 24, 23, 22, // 14560-14569 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 14570-14579 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 14580-14589 + 1, 2, 1, 28, 27, 26, 25, 24, 23, 22, // 14590-14599 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 14600-14609 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 14610-14619 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 14620-14629 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 14630-14639 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 14640-14649 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 14650-14659 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 14660-14669 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 14670-14679 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 14680-14689 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 14690-14699 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 14700-14709 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 14710-14719 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 14720-14729 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 14730-14739 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 14740-14749 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 14750-14759 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 14760-14769 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 14770-14779 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 14780-14789 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 14790-14799 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 14800-14809 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 14810-14819 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 14820-14829 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 14830-14839 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 14840-14849 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 14850-14859 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 14860-14869 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 14870-14879 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 14880-14889 + 1, 6, 5, 4, 3, 2, 1, 26, 25, 24, // 14890-14899 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 14900-14909 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 14910-14919 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 14920-14929 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 14930-14939 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 14940-14949 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 14950-14959 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 14960-14969 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 14970-14979 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 14980-14989 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 14990-14999 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 15000-15009 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 15010-15019 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15020-15029 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 15030-15039 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 15040-15049 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 15050-15059 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 15060-15069 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 15070-15079 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 15080-15089 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15090-15099 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 15100-15109 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15110-15119 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15120-15129 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 15130-15139 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 15140-15149 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15150-15159 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 15160-15169 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 15170-15179 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 15180-15189 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 15190-15199 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 15200-15209 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 15210-15219 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 15220-15229 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 15230-15239 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 15240-15249 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 15250-15259 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 15260-15269 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 15270-15279 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 15280-15289 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 15290-15299 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 15300-15309 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 15310-15319 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 15320-15329 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 15330-15339 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 15340-15349 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 15350-15359 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 15360-15369 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 15370-15379 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 15380-15389 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15390-15399 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 15400-15409 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 15410-15419 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 15420-15429 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 15430-15439 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 15440-15449 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15450-15459 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 15460-15469 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 15470-15479 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 15480-15489 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 15490-15499 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15500-15509 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 15510-15519 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 15520-15529 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15530-15539 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15540-15549 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 15550-15559 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 15560-15569 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15570-15579 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 15580-15589 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15590-15599 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 15600-15609 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 15610-15619 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 15620-15629 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15630-15639 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 15640-15649 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15650-15659 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 15660-15669 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 15670-15679 + 3, 2, 1, 44, 43, 42, 41, 40, 39, 38, // 15680-15689 + 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 15690-15699 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 15700-15709 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 15710-15719 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 15720-15729 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 10, // 15730-15739 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 15740-15749 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15750-15759 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 15760-15769 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 15770-15779 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 15780-15789 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 15790-15799 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 15800-15809 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 15810-15819 + 3, 2, 1, 36, 35, 34, 33, 32, 31, 30, // 15820-15829 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 15830-15839 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 15840-15849 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 15850-15859 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 15860-15869 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 15870-15879 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 15880-15889 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15890-15899 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 15900-15909 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 15910-15919 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 15920-15929 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 15930-15939 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 15940-15949 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 15950-15959 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15960-15969 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 15970-15979 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15980-15989 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 15990-15999 + 1, 6, 5, 4, 3, 2, 1, 26, 25, 24, // 16000-16009 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 16010-16019 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 16020-16029 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 16030-16039 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 16040-16049 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 16050-16059 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 16060-16069 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 16070-16079 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 16080-16089 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 16090-16099 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 16100-16109 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 16110-16119 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 16120-16129 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 16130-16139 + 1, 42, 41, 40, 39, 38, 37, 36, 35, 34, // 16140-16149 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 16150-16159 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 16160-16169 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 16170-16179 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 16180-16189 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 16190-16199 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 16200-16209 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 16210-16219 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 16220-16229 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 16230-16239 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 16240-16249 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 16250-16259 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 16260-16269 + 3, 2, 1, 28, 27, 26, 25, 24, 23, 22, // 16270-16279 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 16280-16289 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 16290-16299 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 16300-16309 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 16310-16319 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 16320-16329 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 16330-16339 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 16340-16349 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 16350-16359 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 16360-16369 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 16370-16379 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 16380-16389 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 16390-16399 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 16400-16409 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 16410-16419 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 16420-16429 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 16430-16439 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 16440-16449 + 1, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 16450-16459 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 16460-16469 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 16470-16479 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 16480-16489 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 16490-16499 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 16500-16509 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 16510-16519 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 16520-16529 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 16530-16539 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 16540-16549 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 16550-16559 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 16560-16569 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 16570-16579 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 16580-16589 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 16590-16599 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 16600-16609 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 16610-16619 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 16620-16629 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 16630-16639 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 16640-16649 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 16650-16659 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 16660-16669 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 16670-16679 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 16680-16689 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 16690-16699 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 16700-16709 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 16710-16719 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 16720-16729 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 16730-16739 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 16740-16749 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 16750-16759 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 16760-16769 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 16770-16779 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 16780-16789 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 16790-16799 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 16800-16809 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 16810-16819 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 16820-16829 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 16830-16839 + 3, 2, 1, 28, 27, 26, 25, 24, 23, 22, // 16840-16849 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 16850-16859 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 16860-16869 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 16870-16879 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 16880-16889 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 16890-16899 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 16900-16909 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 16910-16919 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 16920-16929 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 16930-16939 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 16940-16949 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 16950-16959 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 16960-16969 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 16970-16979 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 16980-16989 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 16990-16999 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 17000-17009 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 17010-17019 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 17020-17029 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 17030-17039 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 17040-17049 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 17050-17059 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 17060-17069 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 17070-17079 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 17080-17089 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 17090-17099 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 17100-17109 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 17110-17119 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 17120-17129 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 17130-17139 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 17140-17149 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 17150-17159 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 17160-17169 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 17170-17179 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 17180-17189 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 17190-17199 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 22, // 17200-17209 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 17210-17219 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 17220-17229 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 17230-17239 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 17240-17249 + 7, 6, 5, 4, 3, 2, 1, 34, 33, 32, // 17250-17259 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 17260-17269 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 17270-17279 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 17280-17289 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 17290-17299 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 17300-17309 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 17310-17319 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 17320-17329 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 17330-17339 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 17340-17349 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 17350-17359 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 17360-17369 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 17370-17379 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 17380-17389 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 17390-17399 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 17400-17409 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 17410-17419 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 17420-17429 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 17430-17439 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 17440-17449 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 17450-17459 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 17460-17469 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 17470-17479 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 17480-17489 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 17490-17499 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 17500-17509 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 20, // 17510-17519 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 17520-17529 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 17530-17539 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 17540-17549 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 17550-17559 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 17560-17569 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 17570-17579 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 17580-17589 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 17590-17599 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 17600-17609 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 17610-17619 + 3, 2, 1, 4, 3, 2, 1, 30, 29, 28, // 17620-17629 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 17630-17639 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 17640-17649 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 17650-17659 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 17660-17669 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 17670-17679 + 1, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 17680-17689 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 17690-17699 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 17700-17709 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 17710-17719 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 17720-17729 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 17730-17739 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 17740-17749 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 17750-17759 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 17760-17769 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 17770-17779 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 17780-17789 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 17790-17799 + 7, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 17800-17809 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 17810-17819 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 17820-17829 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 17830-17839 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 17840-17849 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 17850-17859 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 17860-17869 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 17870-17879 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 17880-17889 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 17890-17899 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 17900-17909 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 17910-17919 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 17920-17929 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 17930-17939 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 17940-17949 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 17950-17959 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 17960-17969 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 17970-17979 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 24, // 17980-17989 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 17990-17999 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 18000-18009 + 3, 2, 1, 28, 27, 26, 25, 24, 23, 22, // 18010-18019 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 18020-18029 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 18030-18039 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 10, // 18040-18049 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 18050-18059 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 18060-18069 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 18070-18079 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 18080-18089 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 18090-18099 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 18100-18109 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 18110-18119 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 18120-18129 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 18130-18139 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 20, // 18140-18149 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 18150-18159 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 18160-18169 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 18170-18179 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 18180-18189 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 18190-18199 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 18200-18209 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 18210-18219 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 18220-18229 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 18230-18239 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 18240-18249 + 1, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 18250-18259 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 18260-18269 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 18270-18279 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 18280-18289 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 18290-18299 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 18300-18309 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 18310-18319 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 18320-18329 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 18330-18339 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 18340-18349 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 18350-18359 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 18360-18369 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 18370-18379 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 18380-18389 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 18390-18399 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 18400-18409 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 18410-18419 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 18420-18429 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 18430-18439 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 18440-18449 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 18450-18459 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 18460-18469 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 18470-18479 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 18480-18489 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 18490-18499 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 18500-18509 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 18510-18519 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 18520-18529 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 18530-18539 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 18540-18549 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 18550-18559 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 18560-18569 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 18570-18579 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 18580-18589 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 18590-18599 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 18600-18609 + 7, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 18610-18619 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 18620-18629 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 18630-18639 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 18640-18649 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 18650-18659 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 18660-18669 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 18670-18679 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 18680-18689 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 18690-18699 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 18700-18709 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 18710-18719 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 18720-18729 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 18730-18739 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 18740-18749 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 18750-18759 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 18760-18769 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 18770-18779 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 18780-18789 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 18790-18799 + 3, 2, 1, 36, 35, 34, 33, 32, 31, 30, // 18800-18809 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 18810-18819 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 18820-18829 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 20, // 18830-18839 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 18840-18849 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 18850-18859 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 30, // 18860-18869 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 18870-18879 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 18880-18889 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 18890-18899 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 18900-18909 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 28, // 18910-18919 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 18920-18929 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 18930-18939 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 18940-18949 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 18950-18959 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 18960-18969 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 18970-18979 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 18980-18989 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 18990-18999 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 19000-19009 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 19010-19019 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 19020-19029 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 19030-19039 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 19040-19049 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 19050-19059 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 19060-19069 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 19070-19079 + 1, 6, 5, 4, 3, 2, 1, 34, 33, 32, // 19080-19089 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 19090-19099 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 19100-19109 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 19110-19119 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 19120-19129 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 19130-19139 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 19140-19149 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 19150-19159 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 19160-19169 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 19170-19179 + 1, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 19180-19189 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 19190-19199 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 19200-19209 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 19210-19219 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 19220-19229 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 19230-19239 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 19240-19249 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 19250-19259 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 19260-19269 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 19270-19279 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 19280-19289 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 19290-19299 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 19300-19309 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 19310-19319 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 19320-19329 + 3, 2, 1, 40, 39, 38, 37, 36, 35, 34, // 19330-19339 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 19340-19349 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 19350-19359 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 19360-19369 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 19370-19379 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 19380-19389 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 19390-19399 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 19400-19409 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 19410-19419 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 19420-19429 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 19430-19439 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 19440-19449 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 19450-19459 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 19460-19469 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 19470-19479 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 19480-19489 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 19490-19499 + 1, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 19500-19509 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 19510-19519 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 19520-19529 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 19530-19539 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 19540-19549 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 19550-19559 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 19560-19569 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 19570-19579 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 19580-19589 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 19590-19599 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 52, // 19600-19609 + 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, // 19610-19619 + 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, // 19620-19629 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 19630-19639 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 19640-19649 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 19650-19659 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 19660-19669 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 19670-19679 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 19680-19689 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 19690-19699 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 19700-19709 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 19710-19719 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 19720-19729 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 19730-19739 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 19740-19749 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 19750-19759 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 19760-19769 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 19770-19779 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 19780-19789 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 19790-19799 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 19800-19809 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 19810-19819 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 19820-19829 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 19830-19839 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 19840-19849 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 19850-19859 + 1, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 19860-19869 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 19870-19879 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 19880-19889 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 19890-19899 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 19900-19909 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 19910-19919 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 19920-19929 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 19930-19939 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 19940-19949 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 19950-19959 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 19960-19969 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 19970-19979 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 19980-19989 + 1, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 19990-19999 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 20000-20009 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 20010-20019 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 20020-20029 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 20030-20039 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 20040-20049 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 20050-20059 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 20060-20069 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 20070-20079 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 20080-20089 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 20090-20099 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 20100-20109 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 20110-20119 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 20120-20129 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 20130-20139 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 20140-20149 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 20150-20159 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 20160-20169 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 20170-20179 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 20180-20189 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 20190-20199 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 20200-20209 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 20210-20219 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 20220-20229 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 20230-20239 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 20240-20249 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 20250-20259 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 20260-20269 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 20270-20279 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 20280-20289 + 7, 6, 5, 4, 3, 2, 1, 26, 25, 24, // 20290-20299 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 20300-20309 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 20310-20319 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 20320-20329 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 20330-20339 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 20340-20349 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 10, // 20350-20359 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 20, // 20360-20369 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 20370-20379 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 20380-20389 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 20390-20399 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 20400-20409 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 20410-20419 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 20420-20429 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 20430-20439 + 1, 2, 1, 34, 33, 32, 31, 30, 29, 28, // 20440-20449 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 20450-20459 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 20460-20469 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 20470-20479 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 20480-20489 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 20490-20499 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 20500-20509 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 20510-20519 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 20520-20529 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 20530-20539 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 20540-20549 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 20550-20559 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 20560-20569 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 20570-20579 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 20580-20589 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 20590-20599 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 20600-20609 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 20610-20619 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 20620-20629 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 20630-20639 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 20640-20649 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 20650-20659 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 20660-20669 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 20670-20679 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 20680-20689 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 20690-20699 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 20700-20709 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 20710-20719 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 20720-20729 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 20730-20739 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 20740-20749 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 20750-20759 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 20760-20769 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 20770-20779 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 20780-20789 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 20790-20799 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 40, // 20800-20809 + 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, // 20810-20819 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 20820-20829 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 20830-20839 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 20840-20849 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 20850-20859 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 20860-20869 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 20870-20879 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 20880-20889 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 20890-20899 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 20900-20909 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 20910-20919 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 20920-20929 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 20930-20939 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 20940-20949 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 20950-20959 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 20960-20969 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 20970-20979 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 20980-20989 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 20990-20999 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 21000-21009 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 21010-21019 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 21020-21029 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 21030-21039 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 21040-21049 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 21050-21059 + 1, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 21060-21069 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 21070-21079 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 21080-21089 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 21090-21099 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 21100-21109 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 21110-21119 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 21120-21129 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 21130-21139 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 21140-21149 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 21150-21159 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 21160-21169 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 21170-21179 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 21180-21189 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 21190-21199 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 21200-21209 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 21210-21219 + 1, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 21220-21229 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 21230-21239 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 21240-21249 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 21250-21259 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 21260-21269 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 21270-21279 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 21280-21289 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 21290-21299 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 21300-21309 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 21310-21319 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 21320-21329 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 21330-21339 + 1, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 21340-21349 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 21350-21359 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 21360-21369 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 21370-21379 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 21380-21389 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 21390-21399 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 21400-21409 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 21410-21419 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 21420-21429 + 3, 2, 1, 34, 33, 32, 31, 30, 29, 28, // 21430-21439 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 21440-21449 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 21450-21459 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 21460-21469 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 21470-21479 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 21480-21489 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 21490-21499 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 21500-21509 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 21510-21519 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 28, // 21520-21529 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 21530-21539 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 21540-21549 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 21550-21559 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 21560-21569 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 21570-21579 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 21580-21589 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 21590-21599 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 21600-21609 + 1, 2, 1, 4, 3, 2, 1, 30, 29, 28, // 21610-21619 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 21620-21629 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 21630-21639 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 21640-21649 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 21650-21659 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 21660-21669 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 21670-21679 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 21680-21689 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 21690-21699 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 21700-21709 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 21710-21719 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 21720-21729 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 21730-21739 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 21740-21749 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 21750-21759 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 21760-21769 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 21770-21779 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 21780-21789 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 21790-21799 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 21800-21809 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 21810-21819 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 21820-21829 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 21830-21839 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 21840-21849 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 21850-21859 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 21860-21869 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 21870-21879 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 21880-21889 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 21890-21899 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 21900-21909 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 21910-21919 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 21920-21929 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 21930-21939 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 21940-21949 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 21950-21959 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 21960-21969 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 21970-21979 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 21980-21989 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 21990-21999 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 22000-22009 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 22010-22019 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 22020-22029 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 22030-22039 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 22040-22049 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 22050-22059 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 22060-22069 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 22070-22079 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 22080-22089 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 22090-22099 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 22100-22109 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 22110-22119 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 22120-22129 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 22130-22139 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 22140-22149 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 22150-22159 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 22160-22169 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 22170-22179 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 22180-22189 + 3, 2, 1, 36, 35, 34, 33, 32, 31, 30, // 22190-22199 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 22200-22209 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 22210-22219 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 22220-22229 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 22230-22239 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 22240-22249 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 22250-22259 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 22260-22269 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 22270-22279 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 22280-22289 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 22290-22299 + 3, 2, 1, 4, 3, 2, 1, 36, 35, 34, // 22300-22309 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 22310-22319 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 22320-22329 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 22330-22339 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 22340-22349 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 22350-22359 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 22360-22369 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 22370-22379 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 22380-22389 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 22390-22399 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 22400-22409 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 22410-22419 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 22420-22429 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 22430-22439 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 22440-22449 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 22450-22459 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 22460-22469 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 22470-22479 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 22480-22489 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 22490-22499 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 22500-22509 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 22510-22519 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 22520-22529 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 22530-22539 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 22540-22549 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 22550-22559 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 22560-22569 + 1, 2, 1, 40, 39, 38, 37, 36, 35, 34, // 22570-22579 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 22580-22589 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 22590-22599 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 22600-22609 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 22610-22619 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 22620-22629 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 22630-22639 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 22640-22649 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 22650-22659 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 22660-22669 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 22670-22679 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 22680-22689 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 22690-22699 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 22700-22709 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 22710-22719 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 22720-22729 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 22730-22739 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 22740-22749 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 22750-22759 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 22760-22769 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 22770-22779 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 22780-22789 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 22790-22799 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 22800-22809 + 1, 6, 5, 4, 3, 2, 1, 36, 35, 34, // 22810-22819 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 22820-22829 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 22830-22839 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 22840-22849 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 22850-22859 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 22860-22869 + 1, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 22870-22879 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 22880-22889 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 22890-22899 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 22900-22909 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 22910-22919 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 22920-22929 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 22930-22939 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 22940-22949 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 22950-22959 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 22960-22969 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 22970-22979 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 22980-22989 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 22990-22999 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 23000-23009 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 23010-23019 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 23020-23029 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 23030-23039 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 23040-23049 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 23050-23059 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 23060-23069 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 23070-23079 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 23080-23089 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 23090-23099 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 23100-23109 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 23110-23119 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 23120-23129 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 23130-23139 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 23140-23149 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 23150-23159 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 23160-23169 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 23170-23179 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 23180-23189 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 23190-23199 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 23200-23209 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 23210-23219 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 23220-23229 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 23230-23239 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 23240-23249 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 23250-23259 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 23260-23269 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 23270-23279 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 23280-23289 + 1, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 23290-23299 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 23300-23309 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 23310-23319 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 23320-23329 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 23330-23339 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 23340-23349 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 23350-23359 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 23360-23369 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 23370-23379 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 23380-23389 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 23390-23399 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 23400-23409 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 23410-23419 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 23420-23429 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 23430-23439 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 23440-23449 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 23450-23459 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 23460-23469 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 23470-23479 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 23480-23489 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 23490-23499 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 23500-23509 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 23510-23519 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 23520-23529 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 23530-23539 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 23540-23549 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 23550-23559 + 1, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 23560-23569 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 23570-23579 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 23580-23589 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 23590-23599 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 23600-23609 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 23610-23619 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 23620-23629 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 23630-23639 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 23640-23649 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 23650-23659 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 23660-23669 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 23670-23679 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 30, // 23680-23689 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 23690-23699 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 23700-23709 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 23710-23719 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 23720-23729 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 23730-23739 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 23740-23749 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 23750-23759 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 23760-23769 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 23770-23779 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 23780-23789 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 23790-23799 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 23800-23809 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 23810-23819 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 23820-23829 + 1, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 23830-23839 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 23840-23849 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 23850-23859 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 23860-23869 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 23870-23879 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 23880-23889 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 23890-23899 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 23900-23909 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 23910-23919 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 28, // 23920-23929 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 23930-23939 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 23940-23949 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 23950-23959 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 23960-23969 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 23970-23979 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 23980-23989 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 23990-23999 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 24000-24009 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 24010-24019 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 24020-24029 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 24030-24039 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 24040-24049 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 24050-24059 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 24060-24069 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 24070-24079 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 24080-24089 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 24090-24099 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 24100-24109 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 24110-24119 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 24120-24129 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 24130-24139 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 24140-24149 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 24150-24159 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 24160-24169 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 24170-24179 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 24180-24189 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 24190-24199 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 24200-24209 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 24210-24219 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 24220-24229 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 24230-24239 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 24240-24249 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 24250-24259 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 24260-24269 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 24270-24279 + 1, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 24280-24289 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 24290-24299 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 24300-24309 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 24310-24319 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 24320-24329 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 24330-24339 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 24340-24349 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 24350-24359 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 24360-24369 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 24370-24379 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 24380-24389 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 24390-24399 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 24400-24409 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 24410-24419 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 24420-24429 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 24430-24439 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 24440-24449 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 24450-24459 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 24460-24469 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 24470-24479 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 24480-24489 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 24490-24499 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 24500-24509 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 24510-24519 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 24520-24529 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 24530-24539 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 24540-24549 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 24550-24559 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 24560-24569 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 24570-24579 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 24580-24589 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 24590-24599 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 24600-24609 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 24610-24619 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 24620-24629 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 24630-24639 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 24640-24649 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 24650-24659 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 24660-24669 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 24670-24679 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 24680-24689 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 24690-24699 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 24700-24709 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 24710-24719 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 24720-24729 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 24730-24739 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 24740-24749 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 24750-24759 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 24760-24769 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 24770-24779 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 24780-24789 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 24790-24799 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 24800-24809 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 24810-24819 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 24820-24829 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 24830-24839 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 24840-24849 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 24850-24859 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 24860-24869 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 24870-24879 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 24880-24889 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 24890-24899 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 24900-24909 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 24910-24919 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 24920-24929 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 24930-24939 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 24940-24949 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 24950-24959 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 24960-24969 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 24970-24979 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 24980-24989 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 24990-24999 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 25000-25009 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 25010-25019 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 25020-25029 + 1, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 25030-25039 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 25040-25049 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 25050-25059 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 25060-25069 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 25070-25079 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 25080-25089 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 25090-25099 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 25100-25109 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 25110-25119 + 1, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 25120-25129 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 25130-25139 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 25140-25149 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 25150-25159 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 25160-25169 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 25170-25179 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 30, // 25180-25189 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 25190-25199 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 25200-25209 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 25210-25219 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 25220-25229 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 25230-25239 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 25240-25249 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 25250-25259 + 1, 40, 39, 38, 37, 36, 35, 34, 33, 32, // 25260-25269 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 25270-25279 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 25280-25289 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 25290-25299 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 25300-25309 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 25310-25319 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 25320-25329 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 25330-25339 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 25340-25349 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 25350-25359 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 25360-25369 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 25370-25379 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 25380-25389 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 25390-25399 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 25400-25409 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 25410-25419 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 25420-25429 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 25430-25439 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 25440-25449 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 25450-25459 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 25460-25469 + 1, 52, 51, 50, 49, 48, 47, 46, 45, 44, // 25470-25479 + 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, // 25480-25489 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 25490-25499 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 25500-25509 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 25510-25519 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 25520-25529 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 25530-25539 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 25540-25549 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 25550-25559 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 25560-25569 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 25570-25579 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 25580-25589 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 25590-25599 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 25600-25609 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 25610-25619 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 25620-25629 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 25630-25639 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 25640-25649 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 25650-25659 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 25660-25669 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 25670-25679 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 25680-25689 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 25690-25699 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 25700-25709 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 25710-25719 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 25720-25729 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 25730-25739 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 25740-25749 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 25750-25759 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 25760-25769 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 25770-25779 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 25780-25789 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 25790-25799 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 25800-25809 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 25810-25819 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 25820-25829 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 25830-25839 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 18, // 25840-25849 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 25850-25859 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 25860-25869 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 25870-25879 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 25880-25889 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 25890-25899 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 25900-25909 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 25910-25919 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 25920-25929 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 25930-25939 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 25940-25949 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 25950-25959 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 25960-25969 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 25970-25979 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 25980-25989 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 25990-25999 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 26000-26009 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 26010-26019 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 26020-26029 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 26030-26039 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 26040-26049 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 26050-26059 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 26060-26069 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 26070-26079 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 26080-26089 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 26090-26099 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 26100-26109 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 26110-26119 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 26120-26129 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 26130-26139 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 26140-26149 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 26150-26159 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 26160-26169 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 26170-26179 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 26180-26189 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 26190-26199 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 26200-26209 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 26210-26219 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 26220-26229 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 26230-26239 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 26240-26249 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 26250-26259 + 1, 2, 1, 4, 3, 2, 1, 26, 25, 24, // 26260-26269 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 26270-26279 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 26280-26289 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 26290-26299 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 26300-26309 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 26310-26319 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 26320-26329 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 26330-26339 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 26340-26349 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 26350-26359 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 26360-26369 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 26370-26379 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 26380-26389 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 26390-26399 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 26400-26409 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 26410-26419 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 26420-26429 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 26430-26439 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 26440-26449 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 20, // 26450-26459 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 26460-26469 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 26470-26479 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 26480-26489 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 26490-26499 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 26500-26509 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 26510-26519 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 26520-26529 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 26530-26539 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 26540-26549 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 26550-26559 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 26560-26569 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 26570-26579 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 26580-26589 + 1, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 26590-26599 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 26600-26609 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 26610-26619 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 26620-26629 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 26630-26639 + 1, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 26640-26649 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 26650-26659 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 26660-26669 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 26670-26679 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 26680-26689 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 26690-26699 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 26700-26709 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 26710-26719 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 26720-26729 + 1, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 26730-26739 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 26740-26749 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 26750-26759 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 26760-26769 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 26770-26779 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 26780-26789 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 26790-26799 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 26800-26809 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 26810-26819 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 26820-26829 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 26830-26839 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 26840-26849 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 26850-26859 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 26860-26869 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 26870-26879 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 26880-26889 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 26890-26899 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 26900-26909 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 26910-26919 + 1, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 26920-26929 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 26930-26939 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 26940-26949 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 26950-26959 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 26960-26969 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 26970-26979 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 26980-26989 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 26990-26999 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 27000-27009 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 27010-27019 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 27020-27029 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 27030-27039 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 27040-27049 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 27050-27059 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 27060-27069 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 27070-27079 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 27080-27089 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 27090-27099 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 18, // 27100-27109 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 27110-27119 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 27120-27129 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 27130-27139 + 3, 2, 1, 36, 35, 34, 33, 32, 31, 30, // 27140-27149 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 27150-27159 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 27160-27169 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 27170-27179 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 27180-27189 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 27190-27199 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 27200-27209 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 27210-27219 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 27220-27229 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 27230-27239 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 27240-27249 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 27250-27259 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 27260-27269 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 27270-27279 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 27280-27289 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 30, // 27290-27299 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 27300-27309 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 27310-27319 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 27320-27329 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 27330-27339 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 27340-27349 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 27350-27359 + 1, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 27360-27369 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 27370-27379 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 27380-27389 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 27390-27399 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 18, // 27400-27409 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 27410-27419 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 27420-27429 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 27430-27439 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 27440-27449 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 27450-27459 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 27460-27469 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 27470-27479 + 1, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 27480-27489 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 27490-27499 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 27500-27509 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 27510-27519 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 27520-27529 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 27530-27539 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 27540-27549 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 27550-27559 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 27560-27569 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 27570-27579 + 1, 2, 1, 28, 27, 26, 25, 24, 23, 22, // 27580-27589 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 27590-27599 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 27600-27609 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 27610-27619 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 27620-27629 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 27630-27639 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 27640-27649 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 27650-27659 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 27660-27669 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 27670-27679 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 27680-27689 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 27690-27699 + 1, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 27700-27709 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 27710-27719 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 27720-27729 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 27730-27739 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 27740-27749 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 27750-27759 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 27760-27769 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 27770-27779 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 27780-27789 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 27790-27799 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 27800-27809 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 27810-27819 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 27820-27829 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 27830-27839 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 27840-27849 + 1, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 27850-27859 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 27860-27869 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 27870-27879 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 27880-27889 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 27890-27899 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 27900-27909 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 22, // 27910-27919 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 27920-27929 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 27930-27939 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 27940-27949 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 27950-27959 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 27960-27969 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 27970-27979 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 27980-27989 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 27990-27999 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 28000-28009 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 28010-28019 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 28020-28029 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 28030-28039 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 28040-28049 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 28050-28059 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 28060-28069 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 28070-28079 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 28080-28089 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 28090-28099 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 28100-28109 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 28110-28119 + 3, 2, 1, 28, 27, 26, 25, 24, 23, 22, // 28120-28129 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 28130-28139 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 28140-28149 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 28150-28159 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 28160-28169 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 28170-28179 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 28180-28189 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 28190-28199 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 28200-28209 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 28210-28219 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 48, // 28220-28229 + 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, // 28230-28239 + 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 28240-28249 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 28250-28259 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 28260-28269 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 28270-28279 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 28280-28289 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 28290-28299 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 28300-28309 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 30, // 28310-28319 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 28320-28329 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 28330-28339 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 28340-28349 + 1, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 28350-28359 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 28360-28369 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 28370-28379 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 28380-28389 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 28390-28399 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 28400-28409 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 28410-28419 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 28420-28429 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 28430-28439 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 28440-28449 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 28450-28459 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 28460-28469 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 28470-28479 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 28480-28489 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 28490-28499 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 28500-28509 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 28510-28519 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 28520-28529 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 28530-28539 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 28540-28549 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 28550-28559 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 28560-28569 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 28570-28579 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 28580-28589 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 28590-28599 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 28600-28609 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 28610-28619 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 28620-28629 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 28630-28639 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 28640-28649 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 28650-28659 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 28660-28669 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 28670-28679 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 28680-28689 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 28690-28699 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 28700-28709 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 28710-28719 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 28720-28729 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 28730-28739 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 28740-28749 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 28750-28759 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 28760-28769 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 28770-28779 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 28780-28789 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 28790-28799 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 28800-28809 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 28810-28819 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 28820-28829 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 28830-28839 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 28840-28849 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 28850-28859 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 28860-28869 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 28870-28879 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 28880-28889 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 28890-28899 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 28900-28909 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 28910-28919 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 28920-28929 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 28930-28939 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 28940-28949 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 28950-28959 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 28960-28969 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 30, // 28970-28979 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 28980-28989 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 28990-28999 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 29000-29009 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 29010-29019 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 29020-29029 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 29030-29039 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 29040-29049 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 29050-29059 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 29060-29069 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 29070-29079 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 29080-29089 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 29090-29099 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 29100-29109 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 29110-29119 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 29120-29129 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 29130-29139 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 29140-29149 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 29150-29159 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 29160-29169 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 29170-29179 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 29180-29189 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 29190-29199 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 29200-29209 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 29210-29219 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 29220-29229 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 29230-29239 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 29240-29249 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 29250-29259 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 29260-29269 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 29270-29279 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 29280-29289 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 29290-29299 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 29300-29309 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 29310-29319 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 29320-29329 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 29330-29339 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 29340-29349 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 29350-29359 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 29360-29369 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 29370-29379 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 10, // 29380-29389 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 29390-29399 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 29400-29409 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 29410-29419 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 29420-29429 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 29430-29439 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 29440-29449 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 29450-29459 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 29460-29469 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 29470-29479 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 29480-29489 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 29490-29499 + 1, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 29500-29509 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 29510-29519 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 29520-29529 + 1, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 29530-29539 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 29540-29549 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 29550-29559 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 29560-29569 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 29570-29579 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 29580-29589 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 29590-29599 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 29600-29609 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 29610-29619 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 29620-29629 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 29630-29639 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 29640-29649 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 29650-29659 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 29660-29669 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 29670-29679 + 3, 2, 1, 34, 33, 32, 31, 30, 29, 28, // 29680-29689 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 29690-29699 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 29700-29709 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 29710-29719 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 29720-29729 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 29730-29739 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 29740-29749 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 29750-29759 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 29760-29769 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 29770-29779 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 29780-29789 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 29790-29799 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 29800-29809 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 29810-29819 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 29820-29829 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 29830-29839 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 29840-29849 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 29850-29859 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 29860-29869 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 29870-29879 + 1, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 29880-29889 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 29890-29899 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 29900-29909 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 29910-29919 + 1, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 29920-29929 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 29930-29939 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 29940-29949 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 29950-29959 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 29960-29969 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 29970-29979 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 29980-29989 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 29990-29999 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 30000-30009 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 30010-30019 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 30020-30029 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 30030-30039 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 30040-30049 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 30050-30059 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 30060-30069 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 30070-30079 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 30080-30089 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 30090-30099 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 30100-30109 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 30110-30119 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 30120-30129 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 22, // 30130-30139 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 30140-30149 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 30150-30159 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 30160-30169 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 30170-30179 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 30180-30189 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 30190-30199 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 30200-30209 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 30210-30219 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 30220-30229 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 30230-30239 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 30240-30249 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 30250-30259 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 30260-30269 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 30270-30279 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 30280-30289 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 30290-30299 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 30300-30309 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 30310-30319 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 30320-30329 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 30330-30339 + 1, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 30340-30349 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 30350-30359 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 30360-30369 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 30370-30379 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 30380-30389 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 30390-30399 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 30400-30409 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 30410-30419 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 30420-30429 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 30430-30439 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 30440-30449 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 30450-30459 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 22, // 30460-30469 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 30470-30479 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 30480-30489 + 1, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 30490-30499 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 30500-30509 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 30510-30519 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 30520-30529 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 30530-30539 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 30540-30549 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 18, // 30550-30559 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 30560-30569 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 30570-30579 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 30580-30589 + 3, 2, 1, 38, 37, 36, 35, 34, 33, 32, // 30590-30599 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 30600-30609 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 30610-30619 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 30620-30629 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 30630-30639 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 30640-30649 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 30650-30659 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 30660-30669 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 30670-30679 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 30680-30689 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 30690-30699 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 30700-30709 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 30710-30719 + 7, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 30720-30729 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 30730-30739 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 30740-30749 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 30750-30759 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 30760-30769 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 30770-30779 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 30780-30789 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 30790-30799 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 30800-30809 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 30810-30819 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 30820-30829 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 30830-30839 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 30840-30849 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 30850-30859 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 30860-30869 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 30870-30879 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 30880-30889 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 30890-30899 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 30900-30909 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 30910-30919 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 30920-30929 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 30930-30939 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 30940-30949 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 30950-30959 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 30960-30969 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 30970-30979 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 30980-30989 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 30990-30999 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 31000-31009 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 31010-31019 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 31020-31029 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 31030-31039 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 31040-31049 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 31050-31059 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 31060-31069 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 31070-31079 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 31080-31089 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 31090-31099 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 31100-31109 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 31110-31119 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 31120-31129 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 31130-31139 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 31140-31149 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 31150-31159 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 31160-31169 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 31170-31179 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 31180-31189 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 31190-31199 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 31200-31209 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 31210-31219 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 31220-31229 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 31230-31239 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 31240-31249 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 31250-31259 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 31260-31269 + 1, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 31270-31279 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 31280-31289 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 31290-31299 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 31300-31309 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 31310-31319 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 31320-31329 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 31330-31339 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 31340-31349 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 31350-31359 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 31360-31369 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 31370-31379 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 31380-31389 + 1, 2, 1, 4, 3, 2, 1, 72, 71, 70, // 31390-31399 + 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, // 31400-31409 + 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, // 31410-31419 + 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, // 31420-31429 + 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, // 31430-31439 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 31440-31449 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 31450-31459 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 31460-31469 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 31470-31479 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 31480-31489 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 31490-31499 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 31500-31509 + 1, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 31510-31519 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 31520-31529 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 31530-31539 + 1, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 31540-31549 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 31550-31559 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 31560-31569 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 31570-31579 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 31580-31589 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 31590-31599 + 1, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 31600-31609 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 31610-31619 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 31620-31629 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 31630-31639 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 31640-31649 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 31650-31659 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 31660-31669 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 31670-31679 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 31680-31689 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 31690-31699 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 31700-31709 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 31710-31719 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 31720-31729 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 31730-31739 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 31740-31749 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 31750-31759 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 31760-31769 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 31770-31779 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 31780-31789 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 31790-31799 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 31800-31809 + 7, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 31810-31819 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 31820-31829 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 31830-31839 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 31840-31849 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 31850-31859 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 31860-31869 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 31870-31879 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 31880-31889 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 31890-31899 + 7, 6, 5, 4, 3, 2, 1, 50, 49, 48, // 31900-31909 + 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, // 31910-31919 + 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 31920-31929 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 31930-31939 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 31940-31949 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 31950-31959 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 31960-31969 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 31970-31979 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 31980-31989 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 31990-31999 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 32000-32009 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 32010-32019 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 22, // 32020-32029 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 32030-32039 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 32040-32049 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 32050-32059 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 32060-32069 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 32070-32079 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 32080-32089 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 32090-32099 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 32100-32109 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 22, // 32110-32119 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 32120-32129 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 32130-32139 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 32140-32149 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 32150-32159 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 32160-32169 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 32170-32179 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 32180-32189 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 32190-32199 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 32200-32209 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 32210-32219 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 32220-32229 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 32230-32239 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 32240-32249 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 32250-32259 + 1, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 32260-32269 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 32270-32279 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 32280-32289 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 32290-32299 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 32300-32309 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 32310-32319 + 1, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 32320-32329 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 32330-32339 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 32340-32349 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 32350-32359 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 32360-32369 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 32370-32379 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 32380-32389 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 32390-32399 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 32400-32409 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 32410-32419 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 32420-32429 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 32430-32439 + 1, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 32440-32449 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 32450-32459 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 32460-32469 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 32470-32479 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 32480-32489 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 32490-32499 + 3, 2, 1, 4, 3, 2, 1, 24, 23, 22, // 32500-32509 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 32510-32519 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 32520-32529 + 1, 2, 1, 4, 3, 2, 1, 24, 23, 22, // 32530-32539 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 32540-32549 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 32550-32559 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 32560-32569 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 32570-32579 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 32580-32589 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 32590-32599 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 32600-32609 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 32610-32619 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 32620-32629 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 32630-32639 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 32640-32649 + 3, 2, 1, 34, 33, 32, 31, 30, 29, 28, // 32650-32659 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 32660-32669 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 32670-32679 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 32680-32689 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 32690-32699 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 32700-32709 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 30, // 32710-32719 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 32720-32729 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 32730-32739 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 32740-32749 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 32750-32759 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 32760-32769 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 32770-32779 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 32780-32789 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 32790-32799 + 1, 2, 1, 28, 27, 26, 25, 24, 23, 22, // 32800-32809 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 32810-32819 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 32820-32829 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 32830-32839 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 32840-32849 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 32850-32859 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 32860-32869 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 32870-32879 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 32880-32889 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 32890-32899 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 32900-32909 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 32910-32919 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 32920-32929 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 32930-32939 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 32940-32949 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 32950-32959 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 32960-32969 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 32970-32979 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 32980-32989 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 32990-32999 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 33000-33009 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 33010-33019 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 33020-33029 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 33030-33039 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 33040-33049 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 33050-33059 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 33060-33069 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 33070-33079 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 33080-33089 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 33090-33099 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 33100-33109 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 30, // 33110-33119 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 33120-33129 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 33130-33139 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 33140-33149 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 33150-33159 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 33160-33169 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 33170-33179 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 33180-33189 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 33190-33199 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 33200-33209 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 33210-33219 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 33220-33229 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 33230-33239 + 7, 6, 5, 4, 3, 2, 1, 40, 39, 38, // 33240-33249 + 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 33250-33259 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 33260-33269 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 33270-33279 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 33280-33289 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 33290-33299 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 33300-33309 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 33310-33319 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 33320-33329 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 33330-33339 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 33340-33349 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 33350-33359 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 33360-33369 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 33370-33379 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 33380-33389 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 33390-33399 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 33400-33409 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 33410-33419 + 7, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 33420-33429 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 33430-33439 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 33440-33449 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 33450-33459 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 33460-33469 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 33470-33479 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 33480-33489 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 33490-33499 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 33500-33509 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 33510-33519 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 33520-33529 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 33530-33539 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 33540-33549 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 33550-33559 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 33560-33569 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 33570-33579 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 33580-33589 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 33590-33599 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 33600-33609 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 33610-33619 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 33620-33629 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 33630-33639 + 1, 6, 5, 4, 3, 2, 1, 32, 31, 30, // 33640-33649 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 33650-33659 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 33660-33669 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 33670-33679 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 33680-33689 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 33690-33699 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 33700-33709 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 33710-33719 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 33720-33729 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 33730-33739 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 33740-33749 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 33750-33759 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 33760-33769 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 33770-33779 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 33780-33789 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 33790-33799 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 33800-33809 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 33810-33819 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 22, // 33820-33829 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 33830-33839 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 33840-33849 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 33850-33859 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 33860-33869 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 33870-33879 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 33880-33889 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 33890-33899 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 33900-33909 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 33910-33919 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 33920-33929 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 33930-33939 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 33940-33949 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 33950-33959 + 1, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 33960-33969 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 33970-33979 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 33980-33989 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 33990-33999 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 34000-34009 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 34010-34019 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 34020-34029 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 34030-34039 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 34040-34049 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 34050-34059 + 1, 62, 61, 60, 59, 58, 57, 56, 55, 54, // 34060-34069 + 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, // 34070-34079 + 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, // 34080-34089 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 34090-34099 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 34100-34109 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 34110-34119 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 34120-34129 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 34130-34139 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 34140-34149 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 34150-34159 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 34160-34169 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 34170-34179 + 3, 2, 1, 28, 27, 26, 25, 24, 23, 22, // 34180-34189 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 34190-34199 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 34200-34209 + 1, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 34210-34219 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 34220-34229 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 34230-34239 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 34240-34249 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 34250-34259 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 34260-34269 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 34270-34279 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 34280-34289 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 34290-34299 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 34300-34309 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 34310-34319 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 34320-34329 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 34330-34339 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 34340-34349 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 34350-34359 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 34360-34369 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 34370-34379 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 34380-34389 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 34390-34399 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 34400-34409 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 34410-34419 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 34420-34429 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 34430-34439 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 34440-34449 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 34450-34459 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 34460-34469 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 34470-34479 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 34480-34489 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 34490-34499 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 34500-34509 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 34510-34519 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 34520-34529 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 34530-34539 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 34, // 34540-34549 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 34550-34559 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 34560-34569 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 34570-34579 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 34580-34589 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 34590-34599 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 34600-34609 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 34610-34619 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 34620-34629 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 34630-34639 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 34640-34649 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 34650-34659 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 34660-34669 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 34670-34679 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 34680-34689 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 34690-34699 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 34700-34709 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 34710-34719 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 34720-34729 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 34730-34739 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 34740-34749 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 34750-34759 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 34760-34769 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 34770-34779 + 1, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 34780-34789 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 34790-34799 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 34800-34809 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 34810-34819 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 34820-34829 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 34830-34839 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 22, // 34840-34849 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 34850-34859 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 34860-34869 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 34870-34879 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 34880-34889 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 34890-34899 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 34900-34909 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 20, // 34910-34919 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 34920-34929 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 34930-34939 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 34940-34949 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 34950-34959 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 34960-34969 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 34970-34979 + 1, 42, 41, 40, 39, 38, 37, 36, 35, 34, // 34980-34989 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 34990-34999 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 35000-35009 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 35010-35019 + 3, 2, 1, 4, 3, 2, 1, 24, 23, 22, // 35020-35029 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 35030-35039 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35040-35049 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 35050-35059 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 35060-35069 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35070-35079 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 35080-35089 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 35090-35099 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 35100-35109 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 35110-35119 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 35120-35129 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35130-35139 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 35140-35149 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 35150-35159 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35160-35169 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 35170-35179 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 35180-35189 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35190-35199 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 35200-35209 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35210-35219 + 1, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 35220-35229 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 35230-35239 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35240-35249 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 35250-35259 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 35260-35269 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 35270-35279 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35280-35289 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 35290-35299 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35300-35309 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 35310-35319 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 35320-35329 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 35330-35339 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 35340-35349 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 35350-35359 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 35360-35369 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35370-35379 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 35380-35389 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 35390-35399 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 35400-35409 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 35410-35419 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 35420-35429 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 35430-35439 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 35440-35449 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35450-35459 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 35460-35469 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 35470-35479 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35480-35489 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 35490-35499 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 35500-35509 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35510-35519 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 35520-35529 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 35530-35539 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 35540-35549 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 35550-35559 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 35560-35569 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 35570-35579 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35580-35589 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 35590-35599 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 35600-35609 + 7, 6, 5, 4, 3, 2, 1, 54, 53, 52, // 35610-35619 + 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, // 35620-35629 + 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, // 35630-35639 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 35640-35649 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 35650-35659 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35660-35669 + 1, 6, 5, 4, 3, 2, 1, 52, 51, 50, // 35670-35679 + 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, // 35680-35689 + 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, // 35690-35699 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 35700-35709 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 35710-35719 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 35720-35729 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 35730-35739 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 35740-35749 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 35750-35759 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35760-35769 + 1, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 35770-35779 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 35780-35789 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 35790-35799 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 35800-35809 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 35810-35819 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35820-35829 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 35830-35839 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35840-35849 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 35850-35859 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 35860-35869 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 35870-35879 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 35880-35889 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 35890-35899 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35900-35909 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 35910-35919 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 35920-35929 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 35930-35939 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 35940-35949 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 35950-35959 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 35960-35969 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 35970-35979 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 35980-35989 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 35990-35999 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 36000-36009 + 1, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 36010-36019 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 36020-36029 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 36030-36039 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 36040-36049 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 36050-36059 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 36060-36069 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 36070-36079 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 36080-36089 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 36090-36099 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 22, // 36100-36109 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 36110-36119 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 36120-36129 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 36130-36139 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 36140-36149 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 36150-36159 + 1, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 36160-36169 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 36170-36179 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 36180-36189 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 36190-36199 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 36200-36209 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 36210-36219 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 36220-36229 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 36230-36239 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 36240-36249 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 36250-36259 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 36260-36269 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 36270-36279 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 36280-36289 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 36290-36299 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 36300-36309 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 36310-36319 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 36320-36329 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 36330-36339 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 36340-36349 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 36350-36359 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 36360-36369 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 36370-36379 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 44, // 36380-36389 + 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, // 36390-36399 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 36400-36409 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 36410-36419 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 36420-36429 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 36430-36439 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 36440-36449 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 36450-36459 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 36460-36469 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 36470-36479 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 36480-36489 + 3, 2, 1, 4, 3, 2, 1, 26, 25, 24, // 36490-36499 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 36500-36509 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 36510-36519 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 36520-36529 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 36530-36539 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 36540-36549 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 36550-36559 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 36560-36569 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 36570-36579 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 36580-36589 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 36590-36599 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 36600-36609 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 36610-36619 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 36620-36629 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 36630-36639 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 36640-36649 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 36650-36659 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 36660-36669 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 36670-36679 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 36680-36689 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 36690-36699 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 36700-36709 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 36710-36719 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 36720-36729 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 36730-36739 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 36740-36749 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 36750-36759 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 36760-36769 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 36770-36779 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 36780-36789 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 36790-36799 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 36800-36809 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 36810-36819 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 36820-36829 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 36830-36839 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 36840-36849 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 36850-36859 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 36860-36869 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 36870-36879 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 36880-36889 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 36890-36899 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 36900-36909 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 36910-36919 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 36920-36929 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 36930-36939 + 3, 2, 1, 4, 3, 2, 1, 26, 25, 24, // 36940-36949 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 36950-36959 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 36960-36969 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 36970-36979 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 36980-36989 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 36990-36999 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 37000-37009 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 37010-37019 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 37020-37029 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 37030-37039 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 37040-37049 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 37050-37059 + 1, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 37060-37069 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 37070-37079 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 37080-37089 + 7, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 37090-37099 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 37100-37109 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 37110-37119 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 37120-37129 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 20, // 37130-37139 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 37140-37149 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 37150-37159 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 37160-37169 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 37170-37179 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 37180-37189 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 37190-37199 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 37200-37209 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 37210-37219 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 37220-37229 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 37230-37239 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 37240-37249 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 37250-37259 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 37260-37269 + 3, 2, 1, 4, 3, 2, 1, 30, 29, 28, // 37270-37279 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 37280-37289 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 37290-37299 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 37300-37309 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 37310-37319 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 37320-37329 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 18, // 37330-37339 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 37340-37349 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 37350-37359 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 37360-37369 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 37370-37379 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 37380-37389 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 37390-37399 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 37400-37409 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 37410-37419 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 37420-37429 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 37430-37439 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 37440-37449 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 37450-37459 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 37460-37469 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 37470-37479 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 37480-37489 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 37490-37499 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 37500-37509 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 37510-37519 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 37520-37529 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 37530-37539 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 37540-37549 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 37550-37559 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 37560-37569 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 37570-37579 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 37580-37589 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 37590-37599 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 37600-37609 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 37610-37619 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 37620-37629 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 37630-37639 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 37640-37649 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 37650-37659 + 3, 2, 1, 28, 27, 26, 25, 24, 23, 22, // 37660-37669 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 37670-37679 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 37680-37689 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 37690-37699 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 37700-37709 + 7, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 37710-37719 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 37720-37729 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 37730-37739 + 7, 6, 5, 4, 3, 2, 1, 34, 33, 32, // 37740-37749 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 37750-37759 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 37760-37769 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 37770-37779 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 37780-37789 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 37790-37799 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 37800-37809 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 37810-37819 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 37820-37829 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 37830-37839 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 37840-37849 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 37850-37859 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 37860-37869 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 37870-37879 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 37880-37889 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 37890-37899 + 7, 6, 5, 4, 3, 2, 1, 44, 43, 42, // 37900-37909 + 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, // 37910-37919 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 37920-37929 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 37930-37939 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 37940-37949 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 37950-37959 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 37960-37969 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 37970-37979 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 37980-37989 + 1, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 37990-37999 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 38000-38009 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 38010-38019 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 38020-38029 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 38030-38039 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 38040-38049 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 38050-38059 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 38060-38069 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 38070-38079 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 38080-38089 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 38090-38099 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 38100-38109 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 30, // 38110-38119 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 38120-38129 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 38130-38139 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 38140-38149 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 38150-38159 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 38160-38169 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 38170-38179 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 38180-38189 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 38190-38199 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 38200-38209 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 38210-38219 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 38220-38229 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 22, // 38230-38239 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 38240-38249 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 38250-38259 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 38260-38269 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 38270-38279 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 38280-38289 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 38290-38299 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 38300-38309 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 38310-38319 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 38320-38329 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 38330-38339 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 38340-38349 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 38350-38359 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 38360-38369 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 38370-38379 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 38380-38389 + 3, 2, 1, 38, 37, 36, 35, 34, 33, 32, // 38390-38399 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 38400-38409 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 38410-38419 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 38420-38429 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 38430-38439 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 38440-38449 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 38450-38459 + 1, 40, 39, 38, 37, 36, 35, 34, 33, 32, // 38460-38469 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 38470-38479 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 38480-38489 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 38490-38499 + 1, 42, 41, 40, 39, 38, 37, 36, 35, 34, // 38500-38509 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 38510-38519 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 38520-38529 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 38530-38539 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 38540-38549 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 38550-38559 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 24, // 38560-38569 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 38570-38579 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 38580-38589 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 38590-38599 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 38600-38609 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 38610-38619 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 38620-38629 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 38630-38639 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 38640-38649 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 38650-38659 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 38660-38669 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 38670-38679 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 38680-38689 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 38690-38699 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 38700-38709 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 38710-38719 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 38720-38729 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 38730-38739 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 18, // 38740-38749 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 38750-38759 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 38760-38769 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 38770-38779 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 38780-38789 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 38790-38799 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 38800-38809 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 38810-38819 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 38820-38829 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 38830-38839 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 38840-38849 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 38850-38859 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 38860-38869 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 38870-38879 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 38880-38889 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 38890-38899 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 38900-38909 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 38910-38919 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 38920-38929 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 38930-38939 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 38940-38949 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 38950-38959 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 38960-38969 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 38970-38979 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 38980-38989 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 38990-38999 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 39000-39009 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 39010-39019 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 39020-39029 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 39030-39039 + 1, 2, 1, 4, 3, 2, 1, 32, 31, 30, // 39040-39049 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 39050-39059 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 39060-39069 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 39070-39079 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 39080-39089 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 39090-39099 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 39100-39109 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 39110-39119 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 39120-39129 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 39130-39139 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 39140-39149 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 39150-39159 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 39160-39169 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 39170-39179 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 39180-39189 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 39190-39199 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 39200-39209 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 39210-39219 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 39220-39229 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 39230-39239 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 39240-39249 + 1, 42, 41, 40, 39, 38, 37, 36, 35, 34, // 39250-39259 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 39260-39269 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 39270-39279 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 39280-39289 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 39290-39299 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 39300-39309 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 39310-39319 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 39320-39329 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 39330-39339 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 39340-39349 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 39350-39359 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 39360-39369 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 39370-39379 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 39380-39389 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 39390-39399 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 39400-39409 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 20, // 39410-39419 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 39420-39429 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 39430-39439 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 39440-39449 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 39450-39459 + 1, 38, 37, 36, 35, 34, 33, 32, 31, 30, // 39460-39469 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 39470-39479 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 39480-39489 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 39490-39499 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 39500-39509 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 39510-39519 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 39520-39529 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 39530-39539 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 39540-39549 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 39550-39559 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 39560-39569 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 39570-39579 + 1, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 39580-39589 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 39590-39599 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 39600-39609 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 39610-39619 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 39620-39629 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 39630-39639 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 39640-39649 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 39650-39659 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 39660-39669 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 39670-39679 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 39680-39689 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 39690-39699 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 39700-39709 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 39710-39719 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 39720-39729 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 39730-39739 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 39740-39749 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 39750-39759 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 39760-39769 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 39770-39779 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 39780-39789 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 39790-39799 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 39800-39809 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 39810-39819 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 39820-39829 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 39830-39839 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 39840-39849 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 39850-39859 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 39860-39869 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 39870-39879 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 39880-39889 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 39890-39899 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 39900-39909 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 39910-39919 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 39920-39929 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 39930-39939 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 39940-39949 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 39950-39959 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 39960-39969 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 39970-39979 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 20, // 39980-39989 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 39990-39999 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 40000-40009 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 40010-40019 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 40020-40029 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 24, // 40030-40039 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 40040-40049 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 40050-40059 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 40060-40069 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 40070-40079 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 40080-40089 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 40090-40099 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 40100-40109 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 40110-40119 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 22, // 40120-40129 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 40130-40139 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 40140-40149 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 40150-40159 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 40160-40169 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 40170-40179 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 40180-40189 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 40190-40199 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 40200-40209 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 40210-40219 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 40220-40229 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 40230-40239 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 40240-40249 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 40250-40259 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 40260-40269 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 40270-40279 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 54, // 40280-40289 + 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, // 40290-40299 + 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, // 40300-40309 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 40310-40319 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 40320-40329 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 40330-40339 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 40340-40349 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 40350-40359 + 1, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 40360-40369 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 40370-40379 + 7, 6, 5, 4, 3, 2, 1, 36, 35, 34, // 40380-40389 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 40390-40399 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 40400-40409 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 40410-40419 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 40420-40429 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 40430-40439 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 40440-40449 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 40450-40459 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 40460-40469 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 40470-40479 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 40480-40489 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 40490-40499 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 40500-40509 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 40510-40519 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 40520-40529 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 40530-40539 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 40540-40549 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 40550-40559 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 40560-40569 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 40570-40579 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 40580-40589 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 40590-40599 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 40600-40609 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 40610-40619 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 40620-40629 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 54, // 40630-40639 + 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, // 40640-40649 + 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, // 40650-40659 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 40660-40669 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 40670-40679 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 40680-40689 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 10, // 40690-40699 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 30, // 40700-40709 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 40710-40719 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 40720-40729 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 40730-40739 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 40740-40749 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 40750-40759 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 40760-40769 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 40770-40779 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 40780-40789 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 40790-40799 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 40800-40809 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 40810-40819 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 40820-40829 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 40830-40839 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 40840-40849 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 40850-40859 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 40860-40869 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 40870-40879 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 40880-40889 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 40890-40899 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 40900-40909 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 40910-40919 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 40920-40929 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 40930-40939 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 40940-40949 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 40950-40959 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 40960-40969 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 40970-40979 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 40980-40989 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 40990-40999 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 41000-41009 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 41010-41019 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 41020-41029 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 41030-41039 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 41040-41049 + 1, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 41050-41059 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 41060-41069 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 41070-41079 + 1, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 41080-41089 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 41090-41099 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 41100-41109 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 41110-41119 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 41120-41129 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 41130-41139 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 41140-41149 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 41150-41159 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 41160-41169 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 41170-41179 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 41180-41189 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 41190-41199 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 41200-41209 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 41210-41219 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 41220-41229 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 41230-41239 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 41240-41249 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 41250-41259 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 41260-41269 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 41270-41279 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 41280-41289 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 34, // 41290-41299 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 41300-41309 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 41310-41319 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 41320-41329 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 41330-41339 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 41340-41349 + 1, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 41350-41359 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 41360-41369 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 41370-41379 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 41380-41389 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 41390-41399 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 41400-41409 + 1, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 41410-41419 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 41420-41429 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 41430-41439 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 41440-41449 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 41450-41459 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 41460-41469 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 41470-41479 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 41480-41489 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 41490-41499 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 41500-41509 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 41510-41519 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 41520-41529 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 41530-41539 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 30, // 41540-41549 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 41550-41559 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 41560-41569 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 41570-41579 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 41580-41589 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 41590-41599 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 41600-41609 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 41610-41619 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 41620-41629 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 41630-41639 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 41640-41649 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 41650-41659 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 41660-41669 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 41670-41679 + 1, 6, 5, 4, 3, 2, 1, 32, 31, 30, // 41680-41689 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 41690-41699 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 41700-41709 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 41710-41719 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 41720-41729 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 41730-41739 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 41740-41749 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 41750-41759 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 41760-41769 + 1, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 41770-41779 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 41780-41789 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 41790-41799 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 41800-41809 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 41810-41819 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 41820-41829 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 41830-41839 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 41840-41849 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 41850-41859 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 41860-41869 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 41870-41879 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 41880-41889 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 41890-41899 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 41900-41909 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 41910-41919 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 41920-41929 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 41930-41939 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 41940-41949 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 10, // 41950-41959 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 41960-41969 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 41970-41979 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 41980-41989 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 41990-41999 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 42000-42009 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 42010-42019 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 42020-42029 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 42030-42039 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 42040-42049 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 42050-42059 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 42060-42069 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 42070-42079 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 42080-42089 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 42090-42099 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 42100-42109 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 42110-42119 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 42120-42129 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 42130-42139 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 42140-42149 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 42150-42159 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 42160-42169 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 42170-42179 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 42180-42189 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 42190-42199 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 42200-42209 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 42210-42219 + 1, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 42220-42229 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 42230-42239 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 42240-42249 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 42250-42259 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 42260-42269 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 42270-42279 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 42280-42289 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 42290-42299 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 42300-42309 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 42310-42319 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 42320-42329 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 42330-42339 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 42340-42349 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 42350-42359 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 42360-42369 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 42370-42379 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 42380-42389 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 42390-42399 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 24, // 42400-42409 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 42410-42419 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 42420-42429 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 42430-42439 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 42440-42449 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 42450-42459 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 42460-42469 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 42470-42479 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 42480-42489 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 42490-42499 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 42500-42509 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 42510-42519 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 42520-42529 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 42530-42539 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 42540-42549 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 42550-42559 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 42560-42569 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 42570-42579 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 42580-42589 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 42590-42599 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 42600-42609 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 42610-42619 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 42620-42629 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 42630-42639 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 42640-42649 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 42650-42659 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 42660-42669 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 42670-42679 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 42680-42689 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 42690-42699 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 42700-42709 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 42710-42719 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 42720-42729 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 42730-42739 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 42740-42749 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 42750-42759 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 42760-42769 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 42770-42779 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 42780-42789 + 3, 2, 1, 4, 3, 2, 1, 24, 23, 22, // 42790-42799 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 42800-42809 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 42810-42819 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 42820-42829 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 42830-42839 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 42840-42849 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 42850-42859 + 3, 2, 1, 36, 35, 34, 33, 32, 31, 30, // 42860-42869 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 42870-42879 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 42880-42889 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 42890-42899 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 42900-42909 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 42910-42919 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 42920-42929 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 42930-42939 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 42940-42949 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 42950-42959 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 42960-42969 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 42970-42979 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 42980-42989 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 42990-42999 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 43000-43009 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 43010-43019 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 43020-43029 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 43030-43039 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 43040-43049 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 43050-43059 + 3, 2, 1, 4, 3, 2, 1, 26, 25, 24, // 43060-43069 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 43070-43079 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 43080-43089 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 43090-43099 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 43100-43109 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 43110-43119 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 43120-43129 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 43130-43139 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 43140-43149 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 43150-43159 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 43160-43169 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 43170-43179 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 43180-43189 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 43190-43199 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 43200-43209 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 43210-43219 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 43220-43229 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 43230-43239 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 43240-43249 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 43250-43259 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 43260-43269 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 43270-43279 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 43280-43289 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 43290-43299 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 43300-43309 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 43310-43319 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 43320-43329 + 1, 60, 59, 58, 57, 56, 55, 54, 53, 52, // 43330-43339 + 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, // 43340-43349 + 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, // 43350-43359 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 43360-43369 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 43370-43379 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 43380-43389 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 43390-43399 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 43400-43409 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 43410-43419 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 43420-43429 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 43430-43439 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 43440-43449 + 1, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 43450-43459 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 43460-43469 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 43470-43479 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 43480-43489 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 43490-43499 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 43500-43509 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 43510-43519 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 43520-43529 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 43530-43539 + 1, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 43540-43549 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 43550-43559 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 43560-43569 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 43570-43579 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 43580-43589 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 43590-43599 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 43600-43609 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 43610-43619 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 43620-43629 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 43630-43639 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 43640-43649 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 43650-43659 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 43660-43669 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 43670-43679 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 43680-43689 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 43690-43699 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 43700-43709 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 43710-43719 + 1, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 43720-43729 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 43730-43739 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 43740-43749 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 43750-43759 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 43760-43769 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 43770-43779 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 43780-43789 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 43790-43799 + 1, 52, 51, 50, 49, 48, 47, 46, 45, 44, // 43800-43809 + 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, // 43810-43819 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 43820-43829 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 43830-43839 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 43840-43849 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 43850-43859 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 43860-43869 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 43870-43879 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 43880-43889 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 43890-43899 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 43900-43909 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 43910-43919 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 43920-43929 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 43930-43939 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 43940-43949 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 43950-43959 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 43960-43969 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 43970-43979 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 43980-43989 + 1, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 43990-43999 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 44000-44009 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 44010-44019 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 44020-44029 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 44030-44039 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 44040-44049 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 44050-44059 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 44060-44069 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 44070-44079 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 44080-44089 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 44090-44099 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 44100-44109 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 44110-44119 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 44120-44129 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 44130-44139 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 44140-44149 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 44150-44159 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 44160-44169 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 44170-44179 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 44180-44189 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 44190-44199 + 1, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 44200-44209 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 44210-44219 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 44220-44229 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 44230-44239 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 44240-44249 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 44250-44259 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 44260-44269 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 44270-44279 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 44280-44289 + 3, 2, 1, 58, 57, 56, 55, 54, 53, 52, // 44290-44299 + 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, // 44300-44309 + 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, // 44310-44319 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 44320-44329 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 44330-44339 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 44340-44349 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 44350-44359 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 44360-44369 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 44370-44379 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 28, // 44380-44389 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 44390-44399 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 44400-44409 + 7, 6, 5, 4, 3, 2, 1, 32, 31, 30, // 44410-44419 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 44420-44429 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 44430-44439 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 44440-44449 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 44450-44459 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 44460-44469 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 44470-44479 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 44480-44489 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 44490-44499 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 44500-44509 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 44510-44519 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 44520-44529 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 44530-44539 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 44540-44549 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 44550-44559 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 44560-44569 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 44570-44579 + 7, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 44580-44589 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 44590-44599 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 44600-44609 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 44610-44619 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 44620-44629 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 44630-44639 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 44640-44649 + 1, 6, 5, 4, 3, 2, 1, 26, 25, 24, // 44650-44659 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 44660-44669 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 44670-44679 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 44680-44689 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 44690-44699 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 44700-44709 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 44710-44719 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 44720-44729 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 44730-44739 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 44740-44749 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 44750-44759 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 44760-44769 + 1, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 44770-44779 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 44780-44789 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 44790-44799 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 44800-44809 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 20, // 44810-44819 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 44820-44829 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 44830-44839 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 44840-44849 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 44850-44859 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 44860-44869 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 44870-44879 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 44880-44889 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 44890-44899 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 44900-44909 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 44910-44919 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 44920-44929 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 44930-44939 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 44940-44949 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 44950-44959 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 44960-44969 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 44970-44979 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 44980-44989 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 44990-44999 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 45000-45009 + 3, 2, 1, 40, 39, 38, 37, 36, 35, 34, // 45010-45019 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 45020-45029 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 45030-45039 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 45040-45049 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 45050-45059 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 45060-45069 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 45070-45079 + 3, 2, 1, 36, 35, 34, 33, 32, 31, 30, // 45080-45089 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 45090-45099 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 45100-45109 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 45110-45119 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 45120-45129 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 22, // 45130-45139 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 45140-45149 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 45150-45159 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 45160-45169 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 45170-45179 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 45180-45189 + 1, 6, 5, 4, 3, 2, 1, 36, 35, 34, // 45190-45199 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 45200-45209 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 45210-45219 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 45220-45229 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 45230-45239 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 45240-45249 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 45250-45259 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 45260-45269 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 45270-45279 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 45280-45289 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 45290-45299 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 45300-45309 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 45310-45319 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 45320-45329 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 45330-45339 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 45340-45349 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 45350-45359 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 45360-45369 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 45370-45379 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 45380-45389 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 45390-45399 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 45400-45409 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 45410-45419 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 45420-45429 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 42, // 45430-45439 + 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, // 45440-45449 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 45450-45459 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 45460-45469 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 45470-45479 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 45480-45489 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 45490-45499 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 45500-45509 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 45510-45519 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 45520-45529 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 45530-45539 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 45540-45549 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 45550-45559 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 45560-45569 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 45570-45579 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 45580-45589 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 45590-45599 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 45600-45609 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 45610-45619 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 45620-45629 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 45630-45639 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 45640-45649 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 45650-45659 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 45660-45669 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 45670-45679 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 45680-45689 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 45690-45699 + 7, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 45700-45709 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 45710-45719 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 45720-45729 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 45730-45739 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 45740-45749 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 45750-45759 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 45760-45769 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 38, // 45770-45779 + 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 45780-45789 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 45790-45799 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 45800-45809 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 45810-45819 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 45820-45829 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 45830-45839 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 45840-45849 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 45850-45859 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 45860-45869 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 45870-45879 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 45880-45889 + 3, 2, 1, 50, 49, 48, 47, 46, 45, 44, // 45890-45899 + 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, // 45900-45909 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 45910-45919 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 45920-45929 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 45930-45939 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 45940-45949 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 45950-45959 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 45960-45969 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 45970-45979 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 32, // 45980-45989 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 45990-45999 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 46000-46009 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 46010-46019 + 1, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 46020-46029 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 46030-46039 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 46040-46049 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 46050-46059 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 46060-46069 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 46070-46079 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 46080-46089 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 46090-46099 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 46100-46109 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 46110-46119 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 46120-46129 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 46130-46139 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 46140-46149 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 46150-46159 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 46160-46169 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 46170-46179 + 1, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 46180-46189 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 20, // 46190-46199 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 46200-46209 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 46210-46219 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 46220-46229 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 46230-46239 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 46240-46249 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 46250-46259 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 46260-46269 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 46270-46279 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 46280-46289 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 46290-46299 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 18, // 46300-46309 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 46310-46319 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 46320-46329 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 46330-46339 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 46340-46349 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 46350-46359 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 46360-46369 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 46370-46379 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 46380-46389 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 46390-46399 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 46400-46409 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 46410-46419 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 46420-46429 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 46430-46439 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 46440-46449 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 46450-46459 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 46460-46469 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 46470-46479 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 46480-46489 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 46490-46499 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 46500-46509 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 46510-46519 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 46520-46529 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 46530-46539 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 46540-46549 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 46550-46559 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 46560-46569 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 46570-46579 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 46580-46589 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 46590-46599 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 46600-46609 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 46610-46619 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 46620-46629 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 46630-46639 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 46640-46649 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 46650-46659 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 46660-46669 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 46670-46679 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 46680-46689 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 46690-46699 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 46700-46709 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 46710-46719 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 46720-46729 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 46730-46739 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 46740-46749 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 46750-46759 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 46760-46769 + 1, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 46770-46779 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 46780-46789 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 46790-46799 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 46800-46809 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 46810-46819 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 46820-46829 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 46830-46839 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 46840-46849 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 46850-46859 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 46860-46869 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 46870-46879 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 46880-46889 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 46890-46899 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 46900-46909 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 46910-46919 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 46920-46929 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 46930-46939 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 46940-46949 + 7, 6, 5, 4, 3, 2, 1, 36, 35, 34, // 46950-46959 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 46960-46969 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 46970-46979 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 46980-46989 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 46990-46999 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 47000-47009 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 47010-47019 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 47020-47029 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 47030-47039 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 47040-47049 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 28, // 47050-47059 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 47060-47069 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 47070-47079 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 47080-47089 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 47090-47099 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 47100-47109 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 47110-47119 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 47120-47129 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 47130-47139 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 47140-47149 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 47150-47159 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 47160-47169 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 47170-47179 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 47180-47189 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 47190-47199 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 47200-47209 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 47210-47219 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 47220-47229 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 47230-47239 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 47240-47249 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 47250-47259 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 47260-47269 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 47270-47279 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 47280-47289 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 47290-47299 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 47300-47309 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 47310-47319 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 47320-47329 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 47330-47339 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 47340-47349 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 47350-47359 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 47360-47369 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 47370-47379 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 18, // 47380-47389 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 47390-47399 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 47400-47409 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 47410-47419 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 47420-47429 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 47430-47439 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 47440-47449 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 32, // 47450-47459 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 47460-47469 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 47470-47479 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 47480-47489 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 47490-47499 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 47500-47509 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 47510-47519 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 47520-47529 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 47530-47539 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 47540-47549 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 47550-47559 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 47560-47569 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 47570-47579 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 47580-47589 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 47590-47599 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 47600-47609 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 47610-47619 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 47620-47629 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 47630-47639 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 47640-47649 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 22, // 47650-47659 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 47660-47669 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 47670-47679 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 47680-47689 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 47690-47699 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 47700-47709 + 1, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 47710-47719 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 47720-47729 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 47730-47739 + 1, 2, 1, 34, 33, 32, 31, 30, 29, 28, // 47740-47749 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 47750-47759 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 47760-47769 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 47770-47779 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 47780-47789 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 47790-47799 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 47800-47809 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 47810-47819 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 47820-47829 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 47830-47839 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 47840-47849 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 47850-47859 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 47860-47869 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 47870-47879 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 47880-47889 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 47890-47899 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 47900-47909 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 47910-47919 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 47920-47929 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 47930-47939 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 47940-47949 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 47950-47959 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 47960-47969 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 47970-47979 + 1, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 47980-47989 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 47990-47999 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 48000-48009 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 48010-48019 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 20, // 48020-48029 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 48030-48039 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 48040-48049 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 48050-48059 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 48060-48069 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 48070-48079 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 48080-48089 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 48090-48099 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 48100-48109 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 48110-48119 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 48120-48129 + 1, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 48130-48139 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 48140-48149 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 48150-48159 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 48160-48169 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 48170-48179 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 48180-48189 + 3, 2, 1, 4, 3, 2, 1, 24, 23, 22, // 48190-48199 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 48200-48209 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 48210-48219 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 48220-48229 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 48230-48239 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 48240-48249 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 48250-48259 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 48260-48269 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 48270-48279 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 48280-48289 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 48290-48299 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 48300-48309 + 1, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 48310-48319 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 48320-48329 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 48330-48339 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 48340-48349 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 48350-48359 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 48360-48369 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 48370-48379 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 48380-48389 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 48390-48399 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 48400-48409 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 48410-48419 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 48420-48429 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 48430-48439 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 48440-48449 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 48450-48459 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 48460-48469 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 48470-48479 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 48480-48489 + 1, 6, 5, 4, 3, 2, 1, 26, 25, 24, // 48490-48499 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 48500-48509 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 48510-48519 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 48520-48529 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 48530-48539 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 48540-48549 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 48550-48559 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 48560-48569 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 48570-48579 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 48580-48589 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 48590-48599 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 48600-48609 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 48610-48619 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 48620-48629 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 48630-48639 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 48640-48649 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 48650-48659 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 48660-48669 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 52, // 48670-48679 + 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, // 48680-48689 + 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, // 48690-48699 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 48700-48709 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 48710-48719 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 48720-48729 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 48730-48739 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 48740-48749 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 48750-48759 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 48760-48769 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 48770-48779 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 48780-48789 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 48790-48799 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 48800-48809 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 48810-48819 + 1, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 48820-48829 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 48830-48839 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 48840-48849 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 48850-48859 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 48860-48869 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 48870-48879 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 48880-48889 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 48890-48899 + 7, 6, 5, 4, 3, 2, 1, 40, 39, 38, // 48900-48909 + 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 48910-48919 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 48920-48929 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 48930-48939 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 48940-48949 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 48950-48959 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 48960-48969 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 48970-48979 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 48980-48989 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 48990-48999 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 49000-49009 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 49010-49019 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 49020-49029 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 49030-49039 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 49040-49049 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 49050-49059 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 49060-49069 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 49070-49079 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 49080-49089 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 49090-49099 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 49100-49109 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 49110-49119 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 49120-49129 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 49130-49139 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 49140-49149 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 49150-49159 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 49160-49169 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 49170-49179 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 49180-49189 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 49190-49199 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 49200-49209 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 49210-49219 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 49220-49229 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 49230-49239 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 49240-49249 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 49250-49259 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 49260-49269 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 18, // 49270-49279 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 49280-49289 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 49290-49299 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 49300-49309 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 49310-49319 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 49320-49329 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 24, // 49330-49339 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 49340-49349 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 49350-49359 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 22, // 49360-49369 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 49370-49379 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 49380-49389 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 49390-49399 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 49400-49409 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 49410-49419 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 49420-49429 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 49430-49439 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 49440-49449 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 49450-49459 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 49460-49469 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 49470-49479 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 49480-49489 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 49490-49499 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 49500-49509 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 49510-49519 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 49520-49529 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 49530-49539 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 49540-49549 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 38, // 49550-49559 + 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 49560-49569 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 49570-49579 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 49580-49589 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 49590-49599 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 49600-49609 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 49610-49619 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 49620-49629 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 24, // 49630-49639 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 49640-49649 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 49650-49659 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 49660-49669 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 49670-49679 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 49680-49689 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 49690-49699 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 49700-49709 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 49710-49719 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 49720-49729 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 49730-49739 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 49740-49749 + 7, 6, 5, 4, 3, 2, 1, 26, 25, 24, // 49750-49759 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 49760-49769 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 49770-49779 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 49780-49789 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 49790-49799 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 49800-49809 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 49810-49819 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 49820-49829 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 49830-49839 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 49840-49849 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 49850-49859 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 49860-49869 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 49870-49879 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 49880-49889 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 49890-49899 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 49900-49909 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 49910-49919 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 49920-49929 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 49930-49939 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 49940-49949 + 7, 6, 5, 4, 3, 2, 1, 34, 33, 32, // 49950-49959 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 49960-49969 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 49970-49979 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 49980-49989 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 49990-49999 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 50000-50009 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 50010-50019 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 50020-50029 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 50030-50039 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 50040-50049 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 50050-50059 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 50060-50069 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 50070-50079 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 50080-50089 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 50090-50099 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 50100-50109 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 50110-50119 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 50120-50129 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 50130-50139 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 50140-50149 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 50150-50159 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 50160-50169 + 7, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 50170-50179 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 50180-50189 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 50190-50199 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 50200-50209 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 50210-50219 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 50220-50229 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 50230-50239 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 50240-50249 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 50250-50259 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 50260-50269 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 50270-50279 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 50280-50289 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 50290-50299 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 50300-50309 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 50310-50319 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 50320-50329 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 50330-50339 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 50340-50349 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 50350-50359 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 50360-50369 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 50370-50379 + 3, 2, 1, 4, 3, 2, 1, 24, 23, 22, // 50380-50389 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 50390-50399 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 50400-50409 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 50410-50419 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 50420-50429 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 50430-50439 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 50440-50449 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 50450-50459 + 1, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 50460-50469 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 50470-50479 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 50480-50489 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 50490-50499 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 50500-50509 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 50510-50519 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 50520-50529 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 50530-50539 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 50540-50549 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 50550-50559 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 50560-50569 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 50570-50579 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 50580-50589 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 28, // 50590-50599 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 50600-50609 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 50610-50619 + 7, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 50620-50629 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 50630-50639 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 50640-50649 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 50650-50659 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 50660-50669 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 50670-50679 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 50680-50689 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 50690-50699 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 50700-50709 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 50710-50719 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 50720-50729 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 50730-50739 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 50740-50749 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 50750-50759 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 50760-50769 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 50770-50779 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 32, // 50780-50789 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 50790-50799 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 50800-50809 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 50810-50819 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 50820-50829 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 50830-50839 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 50840-50849 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 50850-50859 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 50860-50869 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 50870-50879 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 50880-50889 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 50890-50899 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 50900-50909 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 50910-50919 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 50920-50929 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 50930-50939 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 50940-50949 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 50950-50959 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 50960-50969 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 50970-50979 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 50980-50989 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 50990-50999 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 51000-51009 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 51010-51019 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 51020-51029 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 51030-51039 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 51040-51049 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 51050-51059 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 51060-51069 + 1, 38, 37, 36, 35, 34, 33, 32, 31, 30, // 51070-51079 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 51080-51089 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 51090-51099 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 51100-51109 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 51110-51119 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 51120-51129 + 1, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 51130-51139 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 51140-51149 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 51150-51159 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 51160-51169 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 51170-51179 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 51180-51189 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 51190-51199 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 51200-51209 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 51210-51219 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 51220-51229 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 51230-51239 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 51240-51249 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 51250-51259 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 51260-51269 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 51270-51279 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 51280-51289 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 51290-51299 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 51300-51309 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 51310-51319 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 51320-51329 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 51330-51339 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 12, // 51340-51349 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 51350-51359 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 51360-51369 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 51370-51379 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 51380-51389 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 51390-51399 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 51400-51409 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 51410-51419 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 51420-51429 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 51430-51439 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 51440-51449 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 51450-51459 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 51460-51469 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 51470-51479 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 51480-51489 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 51490-51499 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 51500-51509 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 51510-51519 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 51520-51529 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 51530-51539 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 51540-51549 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 51550-51559 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 51560-51569 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 51570-51579 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 51580-51589 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 51590-51599 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 51600-51609 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 51610-51619 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 51620-51629 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 51630-51639 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 51640-51649 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 51650-51659 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 51660-51669 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 51670-51679 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 51680-51689 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 51690-51699 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 51700-51709 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 51710-51719 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 51720-51729 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 51730-51739 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 51740-51749 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 51750-51759 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 18, // 51760-51769 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 51770-51779 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 51780-51789 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 51790-51799 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 51800-51809 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 51810-51819 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 51820-51829 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 51830-51839 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 51840-51849 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 51850-51859 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 51860-51869 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 51870-51879 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 51880-51889 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 51890-51899 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 51900-51909 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 51910-51919 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 51920-51929 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 51930-51939 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 51940-51949 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 51950-51959 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 51960-51969 + 1, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 51970-51979 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 51980-51989 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 51990-51999 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 52000-52009 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 52010-52019 + 1, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 52020-52029 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 52030-52039 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 52040-52049 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 52050-52059 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 52060-52069 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 52070-52079 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 52080-52089 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 52090-52099 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 52100-52109 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 52110-52119 + 1, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 52120-52129 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 52130-52139 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 52140-52149 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 52150-52159 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 52160-52169 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 52170-52179 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 52180-52189 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 52190-52199 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 52200-52209 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 52210-52219 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 52220-52229 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 52230-52239 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 52240-52249 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 52250-52259 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 52260-52269 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 52270-52279 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 52280-52289 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 52290-52299 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 52300-52309 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 52310-52319 + 1, 40, 39, 38, 37, 36, 35, 34, 33, 32, // 52320-52329 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 52330-52339 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 52340-52349 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 52350-52359 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 52360-52369 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 52370-52379 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 52380-52389 + 1, 42, 41, 40, 39, 38, 37, 36, 35, 34, // 52390-52399 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 52400-52409 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 52410-52419 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 52420-52429 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 52430-52439 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 52440-52449 + 3, 2, 1, 4, 3, 2, 1, 32, 31, 30, // 52450-52459 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 52460-52469 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 52470-52479 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 52480-52489 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 52490-52499 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 52500-52509 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 52510-52519 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 52520-52529 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 52530-52539 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 52540-52549 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 52550-52559 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 52560-52569 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 52570-52579 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 52580-52589 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 52590-52599 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 52600-52609 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 52610-52619 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 52620-52629 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 28, // 52630-52639 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 52640-52649 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 52650-52659 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 52660-52669 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 52670-52679 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 52680-52689 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 52690-52699 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 52700-52709 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 52710-52719 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 52720-52729 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 52730-52739 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 52740-52749 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 52750-52759 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 52760-52769 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 52770-52779 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 52780-52789 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 52790-52799 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 52800-52809 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 52810-52819 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 52820-52829 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 52830-52839 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 52840-52849 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 52850-52859 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 52860-52869 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 52870-52879 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 52880-52889 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 52890-52899 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 52900-52909 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 52910-52919 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 52920-52929 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 52930-52939 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 52940-52949 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 52950-52959 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 52960-52969 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 52970-52979 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 52980-52989 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 52990-52999 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 53000-53009 + 7, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 53010-53019 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 53020-53029 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 53030-53039 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 53040-53049 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 53050-53059 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 53060-53069 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 53070-53079 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 53080-53089 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 53090-53099 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 53100-53109 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 53110-53119 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 53120-53129 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 53130-53139 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 53140-53149 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 53150-53159 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 53160-53169 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 53170-53179 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 53180-53189 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 53190-53199 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 53200-53209 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 53210-53219 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 53220-53229 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 28, // 53230-53239 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 53240-53249 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 53250-53259 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 53260-53269 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 53270-53279 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 53280-53289 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 53290-53299 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 53300-53309 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 53310-53319 + 3, 2, 1, 4, 3, 2, 1, 26, 25, 24, // 53320-53329 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 53330-53339 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 53340-53349 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 53350-53359 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 53360-53369 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 53370-53379 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 53380-53389 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 53390-53399 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 53400-53409 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 53410-53419 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 53420-53429 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 53430-53439 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 53440-53449 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 53450-53459 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 53460-53469 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 53470-53479 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 53480-53489 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 53490-53499 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 53500-53509 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 53510-53519 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 53520-53529 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 53530-53539 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 53540-53549 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 53550-53559 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 53560-53569 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 53570-53579 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 53580-53589 + 1, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 53590-53599 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 53600-53609 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 53610-53619 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 53620-53629 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 53630-53639 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 53640-53649 + 3, 2, 1, 4, 3, 2, 1, 24, 23, 22, // 53650-53659 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 53660-53669 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 53670-53679 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 53680-53689 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 53690-53699 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 53700-53709 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 53710-53719 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 53720-53729 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 53730-53739 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 53740-53749 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 53750-53759 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 53760-53769 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 53770-53779 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 53780-53789 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 53790-53799 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 53800-53809 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 53810-53819 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 53820-53829 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 53830-53839 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 53840-53849 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 53850-53859 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 53860-53869 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 53870-53879 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 53880-53889 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 18, // 53890-53899 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 53900-53909 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 53910-53919 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 53920-53929 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 53930-53939 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 53940-53949 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 28, // 53950-53959 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 53960-53969 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 53970-53979 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 53980-53989 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 53990-53999 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 54000-54009 + 1, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 54010-54019 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 54020-54029 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 54030-54039 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 54040-54049 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 54050-54059 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 54060-54069 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 54070-54079 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 54080-54089 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 54090-54099 + 1, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 54100-54109 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 54110-54119 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 54120-54129 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 54130-54139 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 54140-54149 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 54150-54159 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 54160-54169 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 54170-54179 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 54180-54189 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 54190-54199 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 54200-54209 + 7, 6, 5, 4, 3, 2, 1, 34, 33, 32, // 54210-54219 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 54220-54229 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 54230-54239 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 54240-54249 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 54250-54259 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 54260-54269 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 54270-54279 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 54280-54289 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 54290-54299 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 54300-54309 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 54310-54319 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 54320-54329 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 54330-54339 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 54340-54349 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 54350-54359 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 54360-54369 + 1, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 54370-54379 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 54380-54389 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 54390-54399 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 54400-54409 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 54410-54419 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 54420-54429 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 54430-54439 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 20, // 54440-54449 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 54450-54459 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 54460-54469 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 54470-54479 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 54480-54489 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 54490-54499 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 54500-54509 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 54510-54519 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 54520-54529 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 54530-54539 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 54540-54549 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 54550-54559 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 54560-54569 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 54570-54579 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 54580-54589 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 54590-54599 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 54600-54609 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 54610-54619 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 54620-54629 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 54630-54639 + 7, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 54640-54649 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 54650-54659 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 54660-54669 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 30, // 54670-54679 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 54680-54689 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 54690-54699 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 54700-54709 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 54710-54719 + 1, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 54720-54729 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 54730-54739 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 54740-54749 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 54750-54759 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 54760-54769 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 54770-54779 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 54780-54789 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 30, // 54790-54799 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 54800-54809 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 54810-54819 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 54820-54829 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 54830-54839 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 54840-54849 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 54850-54859 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 54860-54869 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 54870-54879 + 1, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 54880-54889 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 54890-54899 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 54900-54909 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 22, // 54910-54919 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 54920-54929 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 54930-54939 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 54940-54949 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 54950-54959 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 54960-54969 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 54970-54979 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 54980-54989 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 54990-54999 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 55000-55009 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 55010-55019 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 55020-55029 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 55030-55039 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 55040-55049 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 55050-55059 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 55060-55069 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 24, // 55070-55079 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 55080-55089 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 55090-55099 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 55100-55109 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 55110-55119 + 7, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 55120-55129 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 55130-55139 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 55140-55149 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 55150-55159 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 55160-55169 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 55170-55179 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 55180-55189 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 55190-55199 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 55200-55209 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 10, // 55210-55219 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 55220-55229 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 55230-55239 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 55240-55249 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 32, // 55250-55259 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 55260-55269 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 55270-55279 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 55280-55289 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 55290-55299 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 55300-55309 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 55310-55319 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 55320-55329 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 55330-55339 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 55340-55349 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 55350-55359 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 55360-55369 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 55370-55379 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 55380-55389 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 55390-55399 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 55400-55409 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 55410-55419 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 55420-55429 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 55430-55439 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 55440-55449 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 55450-55459 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 55460-55469 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 55470-55479 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 55480-55489 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 55490-55499 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 55500-55509 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 55510-55519 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 55520-55529 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 55530-55539 + 1, 6, 5, 4, 3, 2, 1, 32, 31, 30, // 55540-55549 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 55550-55559 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 55560-55569 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 55570-55579 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 55580-55589 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 55590-55599 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 55600-55609 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 55610-55619 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 55620-55629 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 55630-55639 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 55640-55649 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 55650-55659 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 55660-55669 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 55670-55679 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 55680-55689 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 55690-55699 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 55700-55709 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 55710-55719 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 55720-55729 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 55730-55739 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 55740-55749 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 55750-55759 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 55760-55769 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 55770-55779 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 55780-55789 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 55790-55799 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 55800-55809 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 55810-55819 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 55820-55829 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 55830-55839 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 55840-55849 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 55850-55859 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 55860-55869 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 55870-55879 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 55880-55889 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 55890-55899 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 55900-55909 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 55910-55919 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 55920-55929 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 55930-55939 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 55940-55949 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 55950-55959 + 7, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 55960-55969 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 55970-55979 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 55980-55989 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 55990-55999 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 30, // 56000-56009 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 56010-56019 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 56020-56029 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 56030-56039 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 56040-56049 + 3, 2, 1, 28, 27, 26, 25, 24, 23, 22, // 56050-56059 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 56060-56069 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 56070-56079 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 56080-56089 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 56090-56099 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 56100-56109 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 56110-56119 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 56120-56129 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 56130-56139 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 56140-56149 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 56150-56159 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 56160-56169 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 56170-56179 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 56180-56189 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 56190-56199 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 28, // 56200-56209 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 56210-56219 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 56220-56229 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 56230-56239 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 56240-56249 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 56250-56259 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 30, // 56260-56269 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 56270-56279 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 56280-56289 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 56290-56299 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 56300-56309 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 56310-56319 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 56320-56329 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 56330-56339 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 56340-56349 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 56350-56359 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 56360-56369 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 56370-56379 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 56380-56389 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 56390-56399 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 56400-56409 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 56410-56419 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 56420-56429 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 56430-56439 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 56440-56449 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 56450-56459 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 56460-56469 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 10, // 56470-56479 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 56480-56489 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 56490-56499 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 56500-56509 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 56510-56519 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 56520-56529 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 56530-56539 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 56540-56549 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 56550-56559 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 56560-56569 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 56570-56579 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 56580-56589 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 56590-56599 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 56600-56609 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 56610-56619 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 56620-56629 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 56630-56639 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 56640-56649 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 56650-56659 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 56660-56669 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 56670-56679 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 56680-56689 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 56690-56699 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 56700-56709 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 56710-56719 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 56720-56729 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 56730-56739 + 7, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 56740-56749 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 56750-56759 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 56760-56769 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 56770-56779 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 56780-56789 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 56790-56799 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 56800-56809 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 56810-56819 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 56820-56829 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 56830-56839 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 56840-56849 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 56850-56859 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 56860-56869 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 56870-56879 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 56880-56889 + 1, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 56890-56899 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 56900-56909 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 56910-56919 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 56920-56929 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 56930-56939 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 56940-56949 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 56950-56959 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 56960-56969 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 56970-56979 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 56980-56989 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 38, // 56990-56999 + 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 57000-57009 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 57010-57019 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 57020-57029 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 57030-57039 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 57040-57049 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 57050-57059 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 57060-57069 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 57070-57079 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 57080-57089 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 57090-57099 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 57100-57109 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 57110-57119 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 57120-57129 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 57130-57139 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 57140-57149 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 57150-57159 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 57160-57169 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 57170-57179 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 57180-57189 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 57190-57199 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 57200-57209 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 57210-57219 + 1, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 57220-57229 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 57230-57239 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 57240-57249 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 57250-57259 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 57260-57269 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 57270-57279 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 57280-57289 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 57290-57299 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 57300-57309 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 57310-57319 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 57320-57329 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 57330-57339 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 18, // 57340-57349 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 57350-57359 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 57360-57369 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 57370-57379 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 57380-57389 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 57390-57399 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 57400-57409 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 57410-57419 + 7, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 57420-57429 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 57430-57439 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 57440-57449 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 57450-57459 + 7, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 57460-57469 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 57470-57479 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 57480-57489 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 57490-57499 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 57500-57509 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 57510-57519 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 28, // 57520-57529 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 57530-57539 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 57540-57549 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 57550-57559 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 57560-57569 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 57570-57579 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 57580-57589 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 57590-57599 + 1, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 57600-57609 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 57610-57619 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 57620-57629 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 57630-57639 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 57640-57649 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 57650-57659 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 57660-57669 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 57670-57679 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 57680-57689 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 57690-57699 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 57700-57709 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 57710-57719 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 57720-57729 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 57730-57739 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 57740-57749 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 57750-57759 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 57760-57769 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 57770-57779 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 57780-57789 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 57790-57799 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 20, // 57800-57809 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 57810-57819 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 57820-57829 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 57830-57839 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 57840-57849 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 57850-57859 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 57860-57869 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 57870-57879 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 57880-57889 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 57890-57899 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 57900-57909 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 57910-57919 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 57920-57929 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 57930-57939 + 3, 2, 1, 4, 3, 2, 1, 26, 25, 24, // 57940-57949 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 57950-57959 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 57960-57969 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 57970-57979 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 57980-57989 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 57990-57999 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 58000-58009 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 58010-58019 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 58020-58029 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 58030-58039 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 58040-58049 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 58050-58059 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 58060-58069 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 58070-58079 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 58080-58089 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 58090-58099 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 58100-58109 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 58110-58119 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 58120-58129 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 58130-58139 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 58140-58149 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 58150-58159 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 58160-58169 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 58170-58179 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 58180-58189 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 58190-58199 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 58200-58209 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 58210-58219 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 58220-58229 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 58230-58239 + 3, 2, 1, 28, 27, 26, 25, 24, 23, 22, // 58240-58249 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 58250-58259 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 58260-58269 + 1, 38, 37, 36, 35, 34, 33, 32, 31, 30, // 58270-58279 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 58280-58289 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 58290-58299 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 58300-58309 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 58310-58319 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 58320-58329 + 7, 6, 5, 4, 3, 2, 1, 26, 25, 24, // 58330-58339 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 58340-58349 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 58350-58359 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 10, // 58360-58369 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 58370-58379 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 58380-58389 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 58390-58399 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 58400-58409 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 58410-58419 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 58420-58429 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 58430-58439 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 58440-58449 + 1, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 58450-58459 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 58460-58469 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 58470-58479 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 58480-58489 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 58490-58499 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 58500-58509 + 1, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 58510-58519 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 58520-58529 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 58530-58539 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 58540-58549 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 58550-58559 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 58560-58569 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 58570-58579 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 58580-58589 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 58590-58599 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 58600-58609 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 58610-58619 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 58620-58629 + 1, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 58630-58639 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 58640-58649 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 58650-58659 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 58660-58669 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 58670-58679 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 58680-58689 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 58690-58699 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 58700-58709 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 58710-58719 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 58720-58729 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 58730-58739 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 58740-58749 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 58750-58759 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 58760-58769 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 58770-58779 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 42, // 58780-58789 + 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, // 58790-58799 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 58800-58809 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 58810-58819 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 58820-58829 + 1, 58, 57, 56, 55, 54, 53, 52, 51, 50, // 58830-58839 + 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, // 58840-58849 + 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, // 58850-58859 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 58860-58869 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 58870-58879 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 58880-58889 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 58890-58899 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 58900-58909 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 58910-58919 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 58920-58929 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 58930-58939 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 58940-58949 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 58950-58959 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 58960-58969 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 58970-58979 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 58980-58989 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 58990-58999 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 59000-59009 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 59010-59019 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 59020-59029 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 59030-59039 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 59040-59049 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 59050-59059 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 59060-59069 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 59070-59079 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 59080-59089 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 59090-59099 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 59100-59109 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 59110-59119 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 59120-59129 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 59130-59139 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 59140-59149 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 59150-59159 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 59160-59169 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 59170-59179 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 59180-59189 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 59190-59199 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 59200-59209 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 59210-59219 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 59220-59229 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 59230-59239 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 59240-59249 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 59250-59259 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 59260-59269 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 59270-59279 + 1, 52, 51, 50, 49, 48, 47, 46, 45, 44, // 59280-59289 + 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, // 59290-59299 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 59300-59309 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 59310-59319 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 59320-59329 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 59330-59339 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 59340-59349 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 59350-59359 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 59360-59369 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 59370-59379 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 59380-59389 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 59390-59399 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 59400-59409 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 22, // 59410-59419 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 59420-59429 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 59430-59439 + 1, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 59440-59449 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 59450-59459 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 59460-59469 + 1, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 59470-59479 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 59480-59489 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 59490-59499 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 59500-59509 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 59510-59519 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 59520-59529 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 59530-59539 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 59540-59549 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 59550-59559 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 59560-59569 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 59570-59579 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 59580-59589 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 59590-59599 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 59600-59609 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 59610-59619 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 22, // 59620-59629 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 59630-59639 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 59640-59649 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 59650-59659 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 59660-59669 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 59670-59679 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 59680-59689 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 59690-59699 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 59700-59709 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 59710-59719 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 59720-59729 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 59730-59739 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 59740-59749 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 59750-59759 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 59760-59769 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 59770-59779 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 59780-59789 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 59790-59799 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 59800-59809 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 59810-59819 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 59820-59829 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 59830-59839 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 59840-59849 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 59850-59859 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 59860-59869 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 59870-59879 + 7, 6, 5, 4, 3, 2, 1, 34, 33, 32, // 59880-59889 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 59890-59899 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 59900-59909 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 59910-59919 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 59920-59929 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 59930-59939 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 59940-59949 + 1, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 59950-59959 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 59960-59969 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 59970-59979 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 59980-59989 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 59990-59999 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 60000-60009 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 60010-60019 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 60020-60029 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 60030-60039 + 1, 36, 35, 34, 33, 32, 31, 30, 29, 28, // 60040-60049 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 60050-60059 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 60060-60069 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 60070-60079 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 60080-60089 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 60090-60099 + 1, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 60100-60109 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 60110-60119 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 60120-60129 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 60130-60139 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 60140-60149 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 60150-60159 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 40, // 60160-60169 + 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, // 60170-60179 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 60180-60189 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 60190-60199 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 60200-60209 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 60210-60219 + 3, 2, 1, 28, 27, 26, 25, 24, 23, 22, // 60220-60229 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 60230-60239 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 60240-60249 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 60250-60259 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 60260-60269 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 60270-60279 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 60280-60289 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 60290-60299 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 60300-60309 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 60310-60319 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 60320-60329 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 60330-60339 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 60340-60349 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 60350-60359 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 60360-60369 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 60370-60379 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 60380-60389 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 60390-60399 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 60400-60409 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 60410-60419 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 60420-60429 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 60430-60439 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 60440-60449 + 7, 6, 5, 4, 3, 2, 1, 36, 35, 34, // 60450-60459 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 60460-60469 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 60470-60479 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 60480-60489 + 3, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 60490-60499 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 60500-60509 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 60510-60519 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 60520-60529 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 50, // 60530-60539 + 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, // 60540-60549 + 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, // 60550-60559 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 60560-60569 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 60570-60579 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 60580-60589 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 60590-60599 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 60600-60609 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 60610-60619 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 60620-60629 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 60630-60639 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 60640-60649 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 60650-60659 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 60660-60669 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 60670-60679 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 60680-60689 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 60690-60699 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 60700-60709 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 60710-60719 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 60720-60729 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 60730-60739 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 60740-60749 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 60750-60759 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 60760-60769 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 60770-60779 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 60780-60789 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 60790-60799 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 60800-60809 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 60810-60819 + 1, 38, 37, 36, 35, 34, 33, 32, 31, 30, // 60820-60829 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 60830-60839 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 60840-60849 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 60850-60859 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 60860-60869 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 60870-60879 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 60880-60889 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 60890-60899 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 60900-60909 + 3, 2, 1, 4, 3, 2, 1, 2, 1, 4, // 60910-60919 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 60920-60929 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 60930-60939 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 60940-60949 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 60950-60959 + 1, 40, 39, 38, 37, 36, 35, 34, 33, 32, // 60960-60969 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 60970-60979 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 60980-60989 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 60990-60999 + 1, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 61000-61009 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 61010-61019 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 61020-61029 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 61030-61039 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 61040-61049 + 1, 6, 5, 4, 3, 2, 1, 34, 33, 32, // 61050-61059 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 61060-61069 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 61070-61079 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 61080-61089 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 22, // 61090-61099 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 61100-61109 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 61110-61119 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 61120-61129 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 61130-61139 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 61140-61149 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 61150-61159 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 42, // 61160-61169 + 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, // 61170-61179 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 61180-61189 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 61190-61199 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 61200-61209 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 61210-61219 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 61220-61229 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 61230-61239 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 61240-61249 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 61250-61259 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 61260-61269 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 61270-61279 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 61280-61289 + 1, 6, 5, 4, 3, 2, 1, 34, 33, 32, // 61290-61299 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 61300-61309 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 61310-61319 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 61320-61329 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 61330-61339 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 61340-61349 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 61350-61359 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 61360-61369 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 61370-61379 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 61380-61389 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 61390-61399 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 8, // 61400-61409 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 61410-61419 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 61420-61429 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 61430-61439 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 61440-61449 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 61450-61459 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 61460-61469 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 61470-61479 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 61480-61489 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 61490-61499 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 61500-61509 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 24, // 61510-61519 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 61520-61529 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 61530-61539 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 61540-61549 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 2, // 61550-61559 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 61560-61569 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 61570-61579 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 61580-61589 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 61590-61599 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 61600-61609 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 61610-61619 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 61620-61629 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 61630-61639 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 61640-61649 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 61650-61659 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 61660-61669 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 61670-61679 + 1, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 61680-61689 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 61690-61699 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 61700-61709 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 61710-61719 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 61720-61729 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 61730-61739 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 61740-61749 + 1, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 61750-61759 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 61760-61769 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 61770-61779 + 1, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 61780-61789 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 61790-61799 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 61800-61809 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 61810-61819 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 61820-61829 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 61830-61839 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 61840-61849 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 61850-61859 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 61860-61869 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 30, // 61870-61879 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 61880-61889 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 61890-61899 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 61900-61909 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 61910-61919 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 61920-61929 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 61930-61939 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 61940-61949 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 61950-61959 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 61960-61969 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 61970-61979 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 61980-61989 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 61990-61999 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 62000-62009 + 1, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 62010-62019 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 62020-62029 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 62030-62039 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 62040-62049 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 62050-62059 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 62060-62069 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 62070-62079 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 62080-62089 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 20, // 62090-62099 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 62100-62109 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 62110-62119 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 62120-62129 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 62130-62139 + 1, 2, 1, 28, 27, 26, 25, 24, 23, 22, // 62140-62149 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 62150-62159 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 62160-62169 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 62170-62179 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 62180-62189 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 62190-62199 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 62200-62209 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 62210-62219 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 62220-62229 + 3, 2, 1, 40, 39, 38, 37, 36, 35, 34, // 62230-62239 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 62240-62249 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 62250-62259 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 62260-62269 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 62270-62279 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 62280-62289 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 62290-62299 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 62300-62309 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 62310-62319 + 3, 2, 1, 4, 3, 2, 1, 20, 19, 18, // 62320-62329 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 62330-62339 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 62340-62349 + 1, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 62350-62359 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 62360-62369 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 62370-62379 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 62380-62389 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 62390-62399 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 62400-62409 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 62410-62419 + 3, 2, 1, 36, 35, 34, 33, 32, 31, 30, // 62420-62429 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 62430-62439 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 62440-62449 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 62450-62459 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 62460-62469 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 62470-62479 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 62480-62489 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 62490-62499 + 1, 6, 5, 4, 3, 2, 1, 26, 25, 24, // 62500-62509 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 62510-62519 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 62520-62529 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 62530-62539 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 62540-62549 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 62550-62559 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 62560-62569 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 62570-62579 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 62580-62589 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 62590-62599 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 62600-62609 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 62610-62619 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 62620-62629 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 62630-62639 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 62640-62649 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 24, // 62650-62659 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 62660-62669 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 62670-62679 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 62680-62689 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 62690-62699 + 1, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 62700-62709 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 62710-62719 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 62720-62729 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 62730-62739 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 62740-62749 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 62750-62759 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 62760-62769 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 62770-62779 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 62780-62789 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 62790-62799 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 62800-62809 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 62810-62819 + 7, 6, 5, 4, 3, 2, 1, 24, 23, 22, // 62820-62829 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 62830-62839 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 62840-62849 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 62850-62859 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 62860-62869 + 3, 2, 1, 24, 23, 22, 21, 20, 19, 18, // 62870-62879 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 62880-62889 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 62890-62899 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 62900-62909 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 62910-62919 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 62920-62929 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 30, // 62930-62939 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 62940-62949 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 62950-62959 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 62960-62969 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 62970-62979 + 1, 2, 1, 4, 3, 2, 1, 2, 1, 40, // 62980-62989 + 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, // 62990-62999 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 63000-63009 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 63010-63019 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 63020-63029 + 1, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 63030-63039 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 63040-63049 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 63050-63059 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 63060-63069 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 18, // 63070-63079 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 63080-63089 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 63090-63099 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 63100-63109 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 63110-63119 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 63120-63129 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 63130-63139 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 30, // 63140-63149 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 63150-63159 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 63160-63169 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 63170-63179 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 63180-63189 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 63190-63199 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 63200-63209 + 1, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 63210-63219 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 63220-63229 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 63230-63239 + 1, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 63240-63249 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 63250-63259 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 63260-63269 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 63270-63279 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 63280-63289 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, // 63290-63299 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 63300-63309 + 1, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 63310-63319 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 63320-63329 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 63330-63339 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 63340-63349 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 63350-63359 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 63360-63369 + 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 63370-63379 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 63380-63389 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 63390-63399 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 63400-63409 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 63410-63419 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 63420-63429 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 63430-63439 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 63440-63449 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 63450-63459 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 63460-63469 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 63470-63479 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 63480-63489 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 22, // 63490-63499 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 63500-63509 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 63510-63519 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 63520-63529 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 63530-63539 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 63540-63549 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 63550-63559 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 63560-63569 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 63570-63579 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 63580-63589 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 63590-63599 + 1, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 63600-63609 + 1, 6, 5, 4, 3, 2, 1, 12, 11, 10, // 63610-63619 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 63620-63629 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 63630-63639 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 10, // 63640-63649 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 63650-63659 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 63660-63669 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 63670-63679 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 63680-63689 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 63690-63699 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 63700-63709 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 63710-63719 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 63720-63729 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 63730-63739 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 63740-63749 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 63750-63759 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 63760-63769 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 63770-63779 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 63780-63789 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 63790-63799 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 63800-63809 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 63810-63819 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 63820-63829 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 63830-63839 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 63840-63849 + 3, 2, 1, 4, 3, 2, 1, 6, 5, 4, // 63850-63859 + 3, 2, 1, 38, 37, 36, 35, 34, 33, 32, // 63860-63869 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 63870-63879 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 63880-63889 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 63890-63899 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 63900-63909 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 63910-63919 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 20, // 63920-63929 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 63930-63939 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 28, // 63940-63949 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 63950-63959 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 63960-63969 + 7, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 63970-63979 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 63980-63989 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 63990-63999 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 64000-64009 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 14, // 64010-64019 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 64020-64029 + 3, 2, 1, 4, 3, 2, 1, 26, 25, 24, // 64030-64039 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 64040-64049 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 64050-64059 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 64060-64069 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 64070-64079 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 64080-64089 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 64090-64099 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 64100-64109 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 64110-64119 + 3, 2, 1, 28, 27, 26, 25, 24, 23, 22, // 64120-64129 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 64130-64139 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 64140-64149 + 1, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 64150-64159 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 64160-64169 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 64170-64179 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 28, // 64180-64189 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 64190-64199 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 64200-64209 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 64210-64219 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 64220-64229 + 1, 6, 5, 4, 3, 2, 1, 34, 33, 32, // 64230-64239 + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, // 64240-64249 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 64250-64259 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 64260-64269 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 64270-64279 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 64280-64289 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 64290-64299 + 1, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 64300-64309 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 64310-64319 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 64320-64329 + 3, 2, 1, 40, 39, 38, 37, 36, 35, 34, // 64330-64339 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 64340-64349 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 64350-64359 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 64360-64369 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 64370-64379 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 64380-64389 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 64390-64399 + 3, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 64400-64409 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 64410-64419 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 64420-64429 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 64430-64439 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 64440-64449 + 1, 2, 1, 30, 29, 28, 27, 26, 25, 24, // 64450-64459 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 64460-64469 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 64470-64479 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 10, // 64480-64489 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 64490-64499 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 64500-64509 + 3, 2, 1, 40, 39, 38, 37, 36, 35, 34, // 64510-64519 + 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, // 64520-64529 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 64530-64539 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 64540-64549 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 64550-64559 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 64560-64569 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 64570-64579 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 64580-64589 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 64590-64599 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 64600-64609 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 64610-64619 + 1, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 64620-64629 + 3, 2, 1, 28, 27, 26, 25, 24, 23, 22, // 64630-64639 + 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, // 64640-64649 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 64650-64659 + 1, 2, 1, 4, 3, 2, 1, 12, 11, 10, // 64660-64669 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 64670-64679 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 64680-64689 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 64690-64699 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 8, // 64700-64709 + 7, 6, 5, 4, 3, 2, 1, 30, 29, 28, // 64710-64719 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 64720-64729 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 64730-64739 + 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, // 64740-64749 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 64750-64759 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 64760-64769 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 64770-64779 + 1, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 64780-64789 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 64790-64799 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 64800-64809 + 1, 6, 5, 4, 3, 2, 1, 32, 31, 30, // 64810-64819 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 64820-64829 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 64830-64839 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 64840-64849 + 3, 2, 1, 18, 17, 16, 15, 14, 13, 12, // 64850-64859 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 64860-64869 + 1, 6, 5, 4, 3, 2, 1, 2, 1, 12, // 64870-64879 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 64880-64889 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 64890-64899 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 64900-64909 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 64910-64919 + 1, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 64920-64929 + 7, 6, 5, 4, 3, 2, 1, 14, 13, 12, // 64930-64939 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 64940-64949 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 64950-64959 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 28, // 64960-64969 + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, // 64970-64979 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 64980-64989 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 64990-64999 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 65000-65009 + 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 65010-65019 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 4, // 65020-65029 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 65030-65039 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 65040-65049 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 65050-65059 + 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, // 65060-65069 + 1, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 65070-65079 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, // 65080-65089 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 65090-65099 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 65100-65109 + 1, 8, 7, 6, 5, 4, 3, 2, 1, 4, // 65110-65119 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 12, // 65120-65129 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 65130-65139 + 1, 6, 5, 4, 3, 2, 1, 20, 19, 18, // 65140-65149 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 65150-65159 + 7, 6, 5, 4, 3, 2, 1, 4, 3, 2, // 65160-65169 + 1, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 65170-65179 + 3, 2, 1, 20, 19, 18, 17, 16, 15, 14, // 65180-65189 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 65190-65199 + 3, 2, 1, 10, 9, 8, 7, 6, 5, 4, // 65200-65209 + 3, 2, 1, 26, 25, 24, 23, 22, 21, 20, // 65210-65219 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 65220-65229 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 65230-65239 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 65240-65249 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 65250-65259 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 18, // 65260-65269 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 65270-65279 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 65280-65289 + 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, // 65290-65299 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, // 65300-65309 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 65310-65319 + 3, 2, 1, 4, 3, 2, 1, 26, 25, 24, // 65320-65329 + 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, // 65330-65339 + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 65340-65349 + 3, 2, 1, 4, 3, 2, 1, 14, 13, 12, // 65350-65359 + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 65360-65369 + 1, 10, 9, 8, 7, 6, 5, 4, 3, 2, // 65370-65379 + 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, // 65380-65389 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 65390-65399 + 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, // 65400-65409 + 3, 2, 1, 6, 5, 4, 3, 2, 1, 4, // 65410-65419 + 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, // 65420-65429 + 7, 6, 5, 4, 3, 2, 1, 10, 9, 8, // 65430-65439 + 7, 6, 5, 4, 3, 2, 1, 2, 1, 30, // 65440-65449 + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, // 65450-65459 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 65460-65469 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 18, // 65470-65479 + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, // 65480-65489 + 7, 6, 5, 4, 3, 2, 1, 22, 21, 20, // 65490-65499 + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, // 65500-65509 + 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, // 65510-65519 + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 65520-65529 + 0, 0, 0, 0, 0, 0, + } + + lohi [256]struct{ lo, hi int } +) + +func init() { + for i, v := range liars { + blk := v >> 24 + x := &lohi[blk] + if x.lo == 0 || i < x.lo { + x.lo = i + } + if i > x.hi { + x.hi = i + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/test_deps.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/test_deps.go new file mode 100644 index 00000000..40054dca --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/mathutil/test_deps.go @@ -0,0 +1,11 @@ +// Copyright (c) 2014 The mathutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mathutil + +// Pull test dependencies too. +// Enables easy 'go test X' after 'go get X' +import ( +// nothing yet +) diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/sortutil/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/sortutil/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/sortutil/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/sortutil/README b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/sortutil/README new file mode 100644 index 00000000..557d3ced --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/sortutil/README @@ -0,0 +1,4 @@ +Packages in this repository: + +Install: $ go get github.com/cznic/sortutil +Godocs: http://gopkgdoc.appspot.com/pkg/github.com/cznic/sortutil diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/sortutil/all_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/sortutil/all_test.go new file mode 100644 index 00000000..c8bb3c10 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/sortutil/all_test.go @@ -0,0 +1,360 @@ +// Copyright 2014 The sortutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sortutil + +import ( + "fmt" + "math" + "path" + "runtime" + "sort" + "strings" + "testing" + + "camlistore.org/third_party/github.com/cznic/mathutil" +) + +func dbg(s string, va ...interface{}) { + if s == "" { + s = strings.Repeat("%v ", len(va)) + } + _, fn, fl, _ := runtime.Caller(1) + fmt.Printf("dbg %s:%d: ", path.Base(fn), fl) + fmt.Printf(s, va...) + fmt.Println() +} + +func caller(s string, va ...interface{}) { + _, fn, fl, _ := runtime.Caller(2) + fmt.Printf("caller: %s:%d: ", path.Base(fn), fl) + fmt.Printf(s, va...) + fmt.Println() + _, fn, fl, _ = runtime.Caller(1) + fmt.Printf("\tcallee: %s:%d: ", path.Base(fn), fl) + fmt.Println() +} + +func use(...interface{}) {} + +func TestByteSlice(t *testing.T) { + const N = 1e4 + s := make(ByteSlice, N) + for i := range s { + s[i] = byte(i) ^ 0x55 + } + s.Sort() + if !sort.IsSorted(s) { + t.Fatal(false) + } +} + +func TestSearchBytes(t *testing.T) { + const N = 1e1 + s := make(ByteSlice, N) + for i := range s { + s[i] = byte(2 * i) + } + if g, e := SearchBytes(s, 12), 6; g != e { + t.Fatal(g, e) + } +} + +func TestFloat32Slice(t *testing.T) { + const N = 1e4 + s := make(Float32Slice, N) + for i := range s { + s[i] = float32(i ^ 0x55aa55aa) + } + s.Sort() + if !sort.IsSorted(s) { + t.Fatal(false) + } +} + +func TestSearchFloat32s(t *testing.T) { + const N = 1e4 + s := make(Float32Slice, N) + for i := range s { + s[i] = float32(2 * i) + } + if g, e := SearchFloat32s(s, 12), 6; g != e { + t.Fatal(g, e) + } +} + +func TestInt8Slice(t *testing.T) { + const N = 1e4 + s := make(Int8Slice, N) + for i := range s { + s[i] = int8(i) ^ 0x55 + } + s.Sort() + if !sort.IsSorted(s) { + t.Fatal(false) + } +} + +func TestSearchInt8s(t *testing.T) { + const N = 1e1 + s := make(Int8Slice, N) + for i := range s { + s[i] = int8(2 * i) + } + if g, e := SearchInt8s(s, 12), 6; g != e { + t.Fatal(g, e) + } +} + +func TestInt16Slice(t *testing.T) { + const N = 1e4 + s := make(Int16Slice, N) + for i := range s { + s[i] = int16(i) ^ 0x55aa + } + s.Sort() + if !sort.IsSorted(s) { + t.Fatal(false) + } +} + +func TestSearchInt16s(t *testing.T) { + const N = 1e4 + s := make(Int16Slice, N) + for i := range s { + s[i] = int16(2 * i) + } + if g, e := SearchInt16s(s, 12), 6; g != e { + t.Fatal(g, e) + } +} + +func TestInt32Slice(t *testing.T) { + const N = 1e4 + s := make(Int32Slice, N) + for i := range s { + s[i] = int32(i) ^ 0x55aa55aa + } + s.Sort() + if !sort.IsSorted(s) { + t.Fatal(false) + } +} + +func TestSearchInt32s(t *testing.T) { + const N = 1e4 + s := make(Int32Slice, N) + for i := range s { + s[i] = int32(2 * i) + } + if g, e := SearchInt32s(s, 12), 6; g != e { + t.Fatal(g, e) + } +} + +func TestInt64Slice(t *testing.T) { + const N = 1e4 + s := make(Int64Slice, N) + for i := range s { + s[i] = int64(i) ^ 0x55aa55aa55aa55aa + } + s.Sort() + if !sort.IsSorted(s) { + t.Fatal(false) + } +} + +func TestSearchInt64s(t *testing.T) { + const N = 1e4 + s := make(Int64Slice, N) + for i := range s { + s[i] = int64(2 * i) + } + if g, e := SearchInt64s(s, 12), 6; g != e { + t.Fatal(g, e) + } +} + +func TestUintSlice(t *testing.T) { + const N = 1e4 + s := make(UintSlice, N) + for i := range s { + s[i] = uint(i) ^ 0x55aa55aa + } + s.Sort() + if !sort.IsSorted(s) { + t.Fatal(false) + } +} + +func TestSearchUints(t *testing.T) { + const N = 1e4 + s := make(UintSlice, N) + for i := range s { + s[i] = uint(2 * i) + } + if g, e := SearchUints(s, 12), 6; g != e { + t.Fatal(g, e) + } +} + +func TestUint16Slice(t *testing.T) { + const N = 1e4 + s := make(Uint16Slice, N) + for i := range s { + s[i] = uint16(i) ^ 0x55aa + } + s.Sort() + if !sort.IsSorted(s) { + t.Fatal(false) + } +} + +func TestSearchUint16s(t *testing.T) { + const N = 1e4 + s := make(Uint16Slice, N) + for i := range s { + s[i] = uint16(2 * i) + } + if g, e := SearchUint16s(s, 12), 6; g != e { + t.Fatal(g, e) + } +} + +func TestUint32Slice(t *testing.T) { + const N = 1e4 + s := make(Uint32Slice, N) + for i := range s { + s[i] = uint32(i) ^ 0x55aa55aa + } + s.Sort() + if !sort.IsSorted(s) { + t.Fatal(false) + } +} + +func TestSearchUint32s(t *testing.T) { + const N = 1e4 + s := make(Uint32Slice, N) + for i := range s { + s[i] = uint32(2 * i) + } + if g, e := SearchUint32s(s, 12), 6; g != e { + t.Fatal(g, e) + } +} + +func TestUint64Slice(t *testing.T) { + const N = 1e4 + s := make(Uint64Slice, N) + for i := range s { + s[i] = uint64(i) ^ 0x55aa55aa55aa55aa + } + s.Sort() + if !sort.IsSorted(s) { + t.Fatal(false) + } +} + +func TestSearchUint64s(t *testing.T) { + const N = 1e4 + s := make(Uint64Slice, N) + for i := range s { + s[i] = uint64(2 * i) + } + if g, e := SearchUint64s(s, 12), 6; g != e { + t.Fatal(g, e) + } +} + +func TestRuneSlice(t *testing.T) { + const N = 1e4 + s := make(RuneSlice, N) + for i := range s { + s[i] = rune(i ^ 0x55aa55aa) + } + s.Sort() + if !sort.IsSorted(s) { + t.Fatal(false) + } +} + +func TestSearchRunes(t *testing.T) { + const N = 1e4 + s := make(RuneSlice, N) + for i := range s { + s[i] = rune(2 * i) + } + if g, e := SearchRunes(s, rune('\x0c')), 6; g != e { + t.Fatal(g, e) + } +} + +func dedupe(a []int) (r []int) { + a = append([]int(nil), a...) + if len(a) < 2 { + return a + } + + sort.Ints(a) + if a[0] < 0 { + panic("internal error") + } + + last := -1 + for _, v := range a { + if v != last { + r = append(r, v) + last = v + } + } + return r +} + +func TestDedup(t *testing.T) { + a := []int{} + n := Dedupe(sort.IntSlice(a)) + if g, e := n, 0; g != e { + t.Fatal(g, e) + } + + if g, e := len(a), 0; g != e { + t.Fatal(g, e) + } + + for c := 1; c <= 7; c++ { + in := make([]int, c) + lim := int(mathutil.ModPowUint32(uint32(c), uint32(c), math.MaxUint32)) + for n := 0; n < lim; n++ { + m := n + for i := range in { + in[i] = m % c + m /= c + } + in0 := append([]int(nil), in...) + out0 := dedupe(in) + n := Dedupe(sort.IntSlice(in)) + if g, e := n, len(out0); g != e { + t.Fatalf("n %d, exp %d, in0 %v, in %v, out0 %v", g, e, in0, in, out0) + } + + for i, v := range out0 { + if g, e := in[i], v; g != e { + t.Fatalf("n %d, in0 %v, in %v, out0 %v", n, in0, in, out0) + } + } + } + } +} + +func ExampleDedupe() { + a := []int{4, 1, 2, 1, 3, 4, 2} + fmt.Println(a[:Dedupe(sort.IntSlice(a))]) + + b := []string{"foo", "bar", "baz", "bar", "foo", "qux", "qux"} + fmt.Println(b[:Dedupe(sort.StringSlice(b))]) + // Output: + // [1 2 3 4] + // [bar baz foo qux] +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/sortutil/sortutil.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/sortutil/sortutil.go new file mode 100644 index 00000000..c0ced542 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/sortutil/sortutil.go @@ -0,0 +1,227 @@ +// Copyright 2014 The sortutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sortutil provides utilities supplementing the standard 'sort' package. +package sortutil + +import "sort" + +// ByteSlice attaches the methods of sort.Interface to []byte, sorting in increasing order. +type ByteSlice []byte + +func (s ByteSlice) Len() int { return len(s) } +func (s ByteSlice) Less(i, j int) bool { return s[i] < s[j] } +func (s ByteSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Sort is a convenience method. +func (s ByteSlice) Sort() { + sort.Sort(s) +} + +// SearchBytes searches for x in a sorted slice of bytes and returns the index +// as specified by sort.Search. The slice must be sorted in ascending order. +func SearchBytes(a []byte, x byte) int { + return sort.Search(len(a), func(i int) bool { return a[i] >= x }) +} + +// Float32Slice attaches the methods of sort.Interface to []float32, sorting in increasing order. +type Float32Slice []float32 + +func (s Float32Slice) Len() int { return len(s) } +func (s Float32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s Float32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Sort is a convenience method. +func (s Float32Slice) Sort() { + sort.Sort(s) +} + +// SearchFloat32s searches for x in a sorted slice of float32 and returns the index +// as specified by sort.Search. The slice must be sorted in ascending order. +func SearchFloat32s(a []float32, x float32) int { + return sort.Search(len(a), func(i int) bool { return a[i] >= x }) +} + +// Int8Slice attaches the methods of sort.Interface to []int8, sorting in increasing order. +type Int8Slice []int8 + +func (s Int8Slice) Len() int { return len(s) } +func (s Int8Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s Int8Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Sort is a convenience method. +func (s Int8Slice) Sort() { + sort.Sort(s) +} + +// SearchInt8s searches for x in a sorted slice of int8 and returns the index +// as specified by sort.Search. The slice must be sorted in ascending order. +func SearchInt8s(a []int8, x int8) int { + return sort.Search(len(a), func(i int) bool { return a[i] >= x }) +} + +// Int16Slice attaches the methods of sort.Interface to []int16, sorting in increasing order. +type Int16Slice []int16 + +func (s Int16Slice) Len() int { return len(s) } +func (s Int16Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s Int16Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Sort is a convenience method. +func (s Int16Slice) Sort() { + sort.Sort(s) +} + +// SearchInt16s searches for x in a sorted slice of int16 and returns the index +// as specified by sort.Search. The slice must be sorted in ascending order. +func SearchInt16s(a []int16, x int16) int { + return sort.Search(len(a), func(i int) bool { return a[i] >= x }) +} + +// Int32Slice attaches the methods of sort.Interface to []int32, sorting in increasing order. +type Int32Slice []int32 + +func (s Int32Slice) Len() int { return len(s) } +func (s Int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s Int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Sort is a convenience method. +func (s Int32Slice) Sort() { + sort.Sort(s) +} + +// SearchInt32s searches for x in a sorted slice of int32 and returns the index +// as specified by sort.Search. The slice must be sorted in ascending order. +func SearchInt32s(a []int32, x int32) int { + return sort.Search(len(a), func(i int) bool { return a[i] >= x }) +} + +// Int64Slice attaches the methods of sort.Interface to []int64, sorting in increasing order. +type Int64Slice []int64 + +func (s Int64Slice) Len() int { return len(s) } +func (s Int64Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s Int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Sort is a convenience method. +func (s Int64Slice) Sort() { + sort.Sort(s) +} + +// SearchInt64s searches for x in a sorted slice of int64 and returns the index +// as specified by sort.Search. The slice must be sorted in ascending order. +func SearchInt64s(a []int64, x int64) int { + return sort.Search(len(a), func(i int) bool { return a[i] >= x }) +} + +// UintSlice attaches the methods of sort.Interface to []uint, sorting in increasing order. +type UintSlice []uint + +func (s UintSlice) Len() int { return len(s) } +func (s UintSlice) Less(i, j int) bool { return s[i] < s[j] } +func (s UintSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Sort is a convenience method. +func (s UintSlice) Sort() { + sort.Sort(s) +} + +// SearchUints searches for x in a sorted slice of uints and returns the index +// as specified by sort.Search. The slice must be sorted in ascending order. +func SearchUints(a []uint, x uint) int { + return sort.Search(len(a), func(i int) bool { return a[i] >= x }) +} + +// Uint16Slice attaches the methods of sort.Interface to []uint16, sorting in increasing order. +type Uint16Slice []uint16 + +func (s Uint16Slice) Len() int { return len(s) } +func (s Uint16Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s Uint16Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Sort is a convenience method. +func (s Uint16Slice) Sort() { + sort.Sort(s) +} + +// SearchUint16s searches for x in a sorted slice of uint16 and returns the index +// as specified by sort.Search. The slice must be sorted in ascending order. +func SearchUint16s(a []uint16, x uint16) int { + return sort.Search(len(a), func(i int) bool { return a[i] >= x }) +} + +// Uint32Slice attaches the methods of sort.Interface to []uint32, sorting in increasing order. +type Uint32Slice []uint32 + +func (s Uint32Slice) Len() int { return len(s) } +func (s Uint32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s Uint32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Sort is a convenience method. +func (s Uint32Slice) Sort() { + sort.Sort(s) +} + +// SearchUint32s searches for x in a sorted slice of uint32 and returns the index +// as specified by sort.Search. The slice must be sorted in ascending order. +func SearchUint32s(a []uint32, x uint32) int { + return sort.Search(len(a), func(i int) bool { return a[i] >= x }) +} + +// Uint64Slice attaches the methods of sort.Interface to []uint64, sorting in increasing order. +type Uint64Slice []uint64 + +func (s Uint64Slice) Len() int { return len(s) } +func (s Uint64Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s Uint64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Sort is a convenience method. +func (s Uint64Slice) Sort() { + sort.Sort(s) +} + +// SearchUint64s searches for x in a sorted slice of uint64 and returns the index +// as specified by sort.Search. The slice must be sorted in ascending order. +func SearchUint64s(a []uint64, x uint64) int { + return sort.Search(len(a), func(i int) bool { return a[i] >= x }) +} + +// RuneSlice attaches the methods of sort.Interface to []rune, sorting in increasing order. +type RuneSlice []rune + +func (s RuneSlice) Len() int { return len(s) } +func (s RuneSlice) Less(i, j int) bool { return s[i] < s[j] } +func (s RuneSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Sort is a convenience method. +func (s RuneSlice) Sort() { + sort.Sort(s) +} + +// SearchRunes searches for x in a sorted slice of uint64 and returns the index +// as specified by sort.Search. The slice must be sorted in ascending order. +func SearchRunes(a []rune, x rune) int { + return sort.Search(len(a), func(i int) bool { return a[i] >= x }) +} + +// Dedupe returns n, the number of distinct elements in data. The resulting +// elements are sorted in elements [0, n) or data[:n] for a slice. +func Dedupe(data sort.Interface) (n int) { + if n = data.Len(); n < 2 { + return n + } + + sort.Sort(data) + a, b := 0, 1 + for b < n { + if data.Less(a, b) { + a++ + if a != b { + data.Swap(a, b) + } + } + b++ + } + return a + 1 +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/LICENSE new file mode 100644 index 00000000..65d761bc --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/Makefile b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/Makefile new file mode 100644 index 00000000..8ec4e9d6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/Makefile @@ -0,0 +1,22 @@ +all: + go fmt + go test -i + go test + go install + go vet + make todo + +todo: + @grep -n ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alnum:]] *.go || true + @grep -n TODO *.go || true + @grep -n BUG *.go || true + @grep -n println *.go || true + +clean: + rm -f *~ cov cov.html + +gocov: + gocov test $(COV) | gocov-html > cov.html + +bench: + go test -run NONE -bench B diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/README.md b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/README.md new file mode 100644 index 00000000..6fc6f68a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/README.md @@ -0,0 +1,9 @@ +zappy +===== + +Package zappy implements the zappy block-based compression format. It aims for +a combination of good speed and reasonable compression. + +Installation: $ go get github.com/cznic/zappy + +Documentation: [godoc.org/github.com/cznic/zappy](http://godoc.org/github.com/cznic/zappy) diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/SNAPPY-GO-LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/SNAPPY-GO-LICENSE new file mode 100644 index 00000000..6050c10f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/SNAPPY-GO-LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/all_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/all_test.go new file mode 100644 index 00000000..0a66e784 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/all_test.go @@ -0,0 +1,378 @@ +// Copyright 2014 The zappy Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the SNAPPY-GO-LICENSE file. + +package zappy + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "testing" + + "camlistore.org/third_party/code.google.com/p/snappy-go/snappy" +) + +var dbg = func(s string, va ...interface{}) { + _, fn, fl, _ := runtime.Caller(1) + fmt.Printf("%s:%d: ", path.Base(fn), fl) + fmt.Printf(s, va...) + fmt.Println() +} + +func use(...interface{}) {} + +var ( + download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") + pureGo = flag.String("purego", "", "verify 'purego' build tag functionality for value `false` or `true`") +) + +func roundtrip(b, ebuf, dbuf []byte) error { + e, err := Encode(ebuf, b) + if err != nil { + return fmt.Errorf("encoding error: %v", err) + } + d, err := Decode(dbuf, e) + if err != nil { + return fmt.Errorf("decoding error: %v", err) + } + if !bytes.Equal(b, d) { + return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d) + } + return nil +} + +func TestEmpty(t *testing.T) { + if err := roundtrip(nil, nil, nil); err != nil { + t.Fatal(err) + } +} + +func TestSmallCopy(t *testing.T) { + for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for i := 0; i < 32; i++ { + s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" + if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { + t.Fatalf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) + } + } + } + } +} + +func TestSmallRand(t *testing.T) { + rand.Seed(27354294) + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i := range b { + b[i] = uint8(rand.Uint32()) + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestSmallRegular(t *testing.T) { + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i := range b { + b[i] = uint8(i%10 + 'a') + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(n, err) + } + } +} + +func benchDecode(b *testing.B, src []byte) { + encoded, err := Encode(nil, src) + if err != nil { + b.Fatal(err) + } + // Bandwidth is in amount of uncompressed data. + b.SetBytes(int64(len(src))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Decode(src, encoded) + } +} + +func benchEncode(b *testing.B, src []byte) { + // Bandwidth is in amount of uncompressed data. + b.SetBytes(int64(len(src))) + dst := make([]byte, MaxEncodedLen(len(src))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Encode(dst, src) + } +} + +func readFile(b *testing.B, filename string) []byte { + src, err := ioutil.ReadFile(filename) + if err != nil { + b.Fatalf("failed reading %s: %s", filename, err) + } + if len(src) == 0 { + b.Fatalf("%s has zero length", filename) + } + return src +} + +func readFile2(t *testing.T, filename string) []byte { + src, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatalf("failed reading %s: %s", filename, err) + } + if len(src) == 0 { + t.Fatalf("%s has zero length", filename) + } + return src +} + +// expand returns a slice of length n containing repeated copies of src. +func expand(src []byte, n int) []byte { + dst := make([]byte, n) + for x := dst; len(x) > 0; { + i := copy(x, src) + x = x[i:] + } + return dst +} + +func benchWords(b *testing.B, n int, decode bool) { + // Note: the file is OS-language dependent so the resulting values are not + // directly comparable for non-US-English OS installations. + data := expand(readFile(b, "/usr/share/dict/words"), n) + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } +func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } +func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } +func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } +func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } +func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } +func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } +func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } + +// testFiles' values are copied directly from +// https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc. +// The label field is unused in zappy. +var testFiles = []struct { + label string + filename string +}{ + {"html", "html"}, + {"urls", "urls.10K"}, + {"jpg", "house.jpg"}, + {"pdf", "mapreduce-osdi-1.pdf"}, + {"html4", "html_x_4"}, + {"cp", "cp.html"}, + {"c", "fields.c"}, + {"lsp", "grammar.lsp"}, + {"xls", "kennedy.xls"}, + {"txt1", "alice29.txt"}, + {"txt2", "asyoulik.txt"}, + {"txt3", "lcet10.txt"}, + {"txt4", "plrabn12.txt"}, + {"bin", "ptt5"}, + {"sum", "sum"}, + {"man", "xargs.1"}, + {"pb", "geo.protodata"}, + {"gaviota", "kppkn.gtb"}, +} + +// The test data files are present at this canonical URL. +const baseURL = "https://snappy.googlecode.com/svn/trunk/testdata/" + +func downloadTestdata(basename string) (errRet error) { + filename := filepath.Join("testdata", basename) + f, err := os.Create(filename) + if err != nil { + return fmt.Errorf("failed to create %s: %s", filename, err) + } + + defer f.Close() + defer func() { + if errRet != nil { + os.Remove(filename) + } + }() + resp, err := http.Get(baseURL + basename) + if err != nil { + return fmt.Errorf("failed to download %s: %s", baseURL+basename, err) + } + defer resp.Body.Close() + _, err = io.Copy(f, resp.Body) + if err != nil { + return fmt.Errorf("failed to write %s: %s", filename, err) + } + return nil +} + +func benchFile(b *testing.B, n int, decode bool) { + filename := filepath.Join("testdata", testFiles[n].filename) + if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 { + if !*download { + b.Fatal("test data not found; skipping benchmark without the -download flag") + } + // Download the official snappy C++ implementation reference test data + // files for benchmarking. + if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) { + b.Fatalf("failed to create testdata: %s", err) + } + for _, tf := range testFiles { + if err := downloadTestdata(tf.filename); err != nil { + b.Fatalf("failed to download testdata: %s", err) + } + } + } + data := readFile(b, filename) + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +// Naming convention is kept similar to what snappy's C++ implementation uses. +func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } +func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } +func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } +func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } +func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } +func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } +func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } +func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } +func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } +func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } +func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } +func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } +func Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) } +func Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) } +func Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) } +func Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) } +func Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) } +func Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) } +func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } +func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } +func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } +func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } +func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } +func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } +func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } +func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } +func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } +func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } +func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } +func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } +func Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) } +func Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) } +func Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) } +func Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) } +func Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) } +func Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) } + +func TestCmp(t *testing.T) { + var ts, tz, to int + for i := 0; i <= 17; i++ { + filename := filepath.Join("testdata", testFiles[i].filename) + if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 { + if !*download { + t.Fatal("test data not found; skipping test without the -download flag") + } + // Download the official snappy C++ implementation reference test data + // files for benchmarking. + if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) { + t.Fatalf("failed to create testdata: %s", err) + } + for _, tf := range testFiles { + if err := downloadTestdata(tf.filename); err != nil { + t.Fatalf("failed to download testdata: %s", err) + } + } + } + data := readFile2(t, filename) + orig := len(data) + to += orig + senc, err := snappy.Encode(nil, data) + if err != nil { + t.Fatal(err) + } + + ns := len(senc) + zenc, err := Encode(nil, data) + if err != nil { + t.Fatal(err) + } + + nz := len(zenc) + t.Logf("%35s: snappy %7d, zappy %7d, %.3f, orig %7d", filename, ns, nz, float64(nz)/float64(ns), orig) + ts += ns + tz += nz + } + t.Logf("%35s: snappy %7d, zappy %7d, %.3f, orig %7d", "TOTAL", ts, tz, float64(tz)/float64(ts), to) +} + +func TestBitIndex(t *testing.T) { + rng := rand.New(rand.NewSource(42)) + for n := 16; n <= 1<<16; n <<= 1 { + data := make([]byte, n) + for i := 0; i < n/1000+1; i++ { + data[rng.Int()%n] = 1 + } + + senc, err := snappy.Encode(nil, data) + if err != nil { + t.Fatal(err) + } + + ns := len(senc) + zenc, err := Encode(nil, data) + if err != nil { + t.Fatal(err) + } + + nz := len(zenc) + t.Logf("Sparse bit index %7d B: snappy %7d, zappy %7d, %.3f", n, ns, nz, float64(nz)/float64(ns)) + } +} + +func TestPureGo(t *testing.T) { + var purego bool + switch s := *pureGo; s { + case "false": + // nop + case "true": + purego = true + default: + t.Log("Not performed: %q", s) + } + + if g, e := puregoDecode(), purego; g != e { + t.Fatal("Decode", g, e) + } + + if g, e := puregoEncode(), purego; g != e { + t.Fatal("Encode", g, e) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/decode.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/decode.go new file mode 100644 index 00000000..867c8dee --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/decode.go @@ -0,0 +1,38 @@ +// Copyright 2014 The zappy Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the SNAPPY-GO-LICENSE file. + +package zappy + +import ( + "encoding/binary" + "errors" +) + +// ErrCorrupt reports that the input is invalid. +var ErrCorrupt = errors.New("zappy: corrupt input") + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n == 0 { + return 0, 0, ErrCorrupt + } + + if uint64(int(v)) != v { + return 0, 0, errors.New("zappy: decoded block is too large") + } + + return int(v), n, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/decode_cgo.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/decode_cgo.go new file mode 100644 index 00000000..993c9924 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/decode_cgo.go @@ -0,0 +1,121 @@ +// Copyright 2014 The zappy Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the SNAPPY-GO-LICENSE file. + +// +build cgo,!purego + +package zappy + +/* + +#include +#include + +// supports only uint32 encoded values +int uvarint(unsigned int* n, uint8_t* src, int len) { + int r = 0; + unsigned int v = 0; + unsigned int s = 0; + while ((len-- != 0) && (++r <= 5)) { + uint8_t b = *src++; + v = v | ((b&0x7f)<>1; + if ((u&1) != 0) + x = ~x; + *n = x; + return i; +} + +int decode(int s, int len_src, uint8_t* src, int len_dst, uint8_t* dst) { + int d = 0; + int length; + while (s < len_src) { + int n, i = varint(&n, src+s, len_src-s); + if (i <= 0) { + return -1; + } + + s += i; + if (n >= 0) { + length = n+1; + if ((length > len_dst-d) || (length > len_src-s)) + return -1; + + memcpy(dst+d, src+s, length); + d += length; + s += length; + continue; + } + + + length = -n; + int offset; + i = uvarint((unsigned int*)(&offset), src+s, len_src-s); + if (i <= 0) + return -1; + + s += i; + if (s > len_src) + return -1; + + int end = d+length; + if ((offset > d) || (end > len_dst)) + return -1; + + for( ; d < end; d++) + *(dst+d) = *(dst+d-offset); + } + return d; +} + +*/ +import "C" + +func puregoDecode() bool { return false } + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of buf if buf was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// It is valid to pass a nil buf. +func Decode(buf, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + + if dLen == 0 { + if len(src) == 1 { + return nil, nil + } + + return nil, ErrCorrupt + } + + if len(buf) < dLen { + buf = make([]byte, dLen) + } + + d := int(C.decode(C.int(s), C.int(len(src)), (*C.uint8_t)(&src[0]), C.int(len(buf)), (*C.uint8_t)(&buf[0]))) + if d != dLen { + return nil, ErrCorrupt + } + + return buf[:d], nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/decode_nocgo.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/decode_nocgo.go new file mode 100644 index 00000000..75b782f8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/decode_nocgo.go @@ -0,0 +1,89 @@ +// Copyright 2014 The zappy Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the SNAPPY-GO-LICENSE file. + +// +build !cgo purego + +package zappy + +import ( + "encoding/binary" +) + +func puregoDecode() bool { return true } + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of buf if buf was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// It is valid to pass a nil buf. +func Decode(buf, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + + if dLen == 0 { + if len(src) == 1 { + return nil, nil + } + + return nil, ErrCorrupt + } + + if len(buf) < dLen { + buf = make([]byte, dLen) + } + + var d, offset, length int + for s < len(src) { + n, i := binary.Varint(src[s:]) + if i <= 0 { + return nil, ErrCorrupt + } + + s += i + if n >= 0 { + length = int(n + 1) + if length > len(buf)-d || length > len(src)-s { + return nil, ErrCorrupt + } + + copy(buf[d:], src[s:s+length]) + d += length + s += length + continue + } + + length = int(-n) + off64, i := binary.Uvarint(src[s:]) + if i <= 0 { + return nil, ErrCorrupt + } + + offset = int(off64) + s += i + if s > len(src) { + return nil, ErrCorrupt + } + + end := d + length + if offset > d || end > len(buf) { + return nil, ErrCorrupt + } + + for s, v := range buf[d-offset : end-offset] { + buf[d+s] = v + } + d = end + + } + if d != dLen { + return nil, ErrCorrupt + } + + return buf[:d], nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/encode.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/encode.go new file mode 100644 index 00000000..27ceba03 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/encode.go @@ -0,0 +1,37 @@ +// Copyright 2014 The zappy Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the SNAPPY-GO-LICENSE file. + +package zappy + +import ( + "encoding/binary" +) + +// We limit how far copy back-references can go, the same as the snappy C++ +// code. +const maxOffset = 1 << 20 + +// emitCopy writes a copy chunk and returns the number of bytes written. +func emitCopy(dst []byte, offset, length int) (n int) { + n = binary.PutVarint(dst, int64(-length)) + n += binary.PutUvarint(dst[n:], uint64(offset)) + return +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst, lit []byte) (n int) { + n = binary.PutVarint(dst, int64(len(lit)-1)) + n += copy(dst[n:], lit) + return +} + +// MaxEncodedLen returns the maximum length of a zappy block, given its +// uncompressed length. +func MaxEncodedLen(srcLen int) int { + return 10 + srcLen +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/encode_cgo.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/encode_cgo.go new file mode 100644 index 00000000..6c343f58 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/encode_cgo.go @@ -0,0 +1,140 @@ +// Copyright 2014 The zappy Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the SNAPPY-GO-LICENSE file. + +// +build cgo,!purego + +package zappy + +/* + +#include +#include + +#define MAXOFFSET 1<<20 + +int putUvarint(uint8_t* buf, unsigned int x) { + int i = 1; + for (; x >= 0x80; i++) { + *buf++ = x|0x80; + x >>= 7; + } + *buf = x; + return i; +} + +int putVarint(uint8_t* buf, int x) { + unsigned int ux = x << 1; + if (x < 0) + ux = ~ux; + return putUvarint(buf, ux); +} + +int emitLiteral(uint8_t* dst, uint8_t* lit, int len_lit) { + int n = putVarint(dst, len_lit-1); + memcpy(dst+n, lit, len_lit); + return n+len_lit; +} + +int emitCopy(uint8_t* dst, int off, int len) { + int n = putVarint(dst, -len); + return n+putUvarint(dst+n, (unsigned int)off); +} + +int encode(int d, uint8_t* dst, uint8_t* src, int len_src) { + int table[1<<12]; + int s = 0; + int t = 0; + int lit = 0; + int lim = 0; + memset(table, 0, sizeof(table)); + for (lim = len_src-3; s < lim; ) { + // Update the hash table. + uint32_t b0 = src[s]; + uint32_t b1 = src[s+1]; + uint32_t b2 = src[s+2]; + uint32_t b3 = src[s+3]; + uint32_t h = b0 | (b1<<8) | (b2<<16) | (b3<<24); + uint32_t i; +more: + i = (h*0x1e35a7bd)>>20; + t = table[i]; + table[i] = s; + // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. + if ((t == 0) || (s-t >= MAXOFFSET) || (b0 != src[t]) || (b1 != src[t+1]) || (b2 != src[t+2]) || (b3 != src[t+3])) { + s++; + if (s >= lim) + break; + + b0 = b1; + b1 = b2; + b2 = b3; + b3 = src[s+3]; + h = (h>>8) | ((b3)<<24); + goto more; + } + + // Otherwise, we have a match. First, emit any pending literal bytes. + if (lit != s) { + d += emitLiteral(dst+d, src+lit, s-lit); + } + // Extend the match to be as long as possible. + int s0 = s; + s += 4; + t += 4; + while ((s < len_src) && (src[s] == src[t])) { + s++; + t++; + } + d += emitCopy(dst+d, s-t, s-s0); + lit = s; + } + // Emit any final pending literal bytes and return. + if (lit != len_src) { + d += emitLiteral(dst+d, src+lit, len_src-lit); + } + return d; +} + +*/ +import "C" + +import ( + "encoding/binary" + "fmt" + "math" +) + +func puregoEncode() bool { return false } + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of buf if buf was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// It is valid to pass a nil buf. +func Encode(buf, src []byte) ([]byte, error) { + if n := MaxEncodedLen(len(src)); len(buf) < n { + buf = make([]byte, n) + } + + if len(src) > math.MaxInt32 { + return nil, fmt.Errorf("zappy.Encode: too long data: %d bytes", len(src)) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(buf, uint64(len(src))) + + // Return early if src is short. + if len(src) <= 4 { + if len(src) != 0 { + d += emitLiteral(buf[d:], src) + } + return buf[:d], nil + } + + d = int(C.encode(C.int(d), (*C.uint8_t)(&buf[0]), (*C.uint8_t)(&src[0]), C.int(len(src)))) + return buf[:d], nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/encode_nocgo.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/encode_nocgo.go new file mode 100644 index 00000000..1d1df443 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/encode_nocgo.go @@ -0,0 +1,92 @@ +// Copyright 2014 The zappy Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the SNAPPY-GO-LICENSE file. + +// +build !cgo purego + +package zappy + +import ( + "encoding/binary" + "fmt" + "math" +) + +func puregoEncode() bool { return true } + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of buf if buf was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// It is valid to pass a nil buf. +func Encode(buf, src []byte) ([]byte, error) { + if n := MaxEncodedLen(len(src)); len(buf) < n { + buf = make([]byte, n) + } + + if len(src) > math.MaxInt32 { + return nil, fmt.Errorf("zappy.Encode: too long data: %d bytes", len(src)) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(buf, uint64(len(src))) + + // Return early if src is short. + if len(src) <= 4 { + if len(src) != 0 { + d += emitLiteral(buf[d:], src) + } + return buf[:d], nil + } + + // Iterate over the source bytes. + var ( + table [1 << 12]int // Hash table + s int // The iterator position. + t int // The last position with the same hash as s. + lit int // The start position of any pending literal bytes. + ) + for lim := len(src) - 3; s < lim; { + // Update the hash table. + b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3] + h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24 + more: + p := &table[(h*0x1e35a7bd)>>20] + t, *p = *p, s + // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. + if t == 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] { + s++ + if s >= lim { + break + } + + b0, b1, b2, b3 = b1, b2, b3, src[s+3] + h = h>>8 | uint32(b3)<<24 + goto more + } + + // Otherwise, we have a match. First, emit any pending literal bytes. + if lit != s { + d += emitLiteral(buf[d:], src[lit:s]) + } + // Extend the match to be as long as possible. + s0 := s + s, t = s+4, t+4 + for s < len(src) && src[s] == src[t] { + s++ + t++ + } + // Emit the copied bytes. + d += emitCopy(buf[d:], s-t, s-s0) + lit = s + } + + // Emit any final pending literal bytes and return. + if lit != len(src) { + d += emitLiteral(buf[d:], src[lit:]) + } + return buf[:d], nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/zappy.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/zappy.go new file mode 100644 index 00000000..760df34a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/cznic/zappy/zappy.go @@ -0,0 +1,241 @@ +// Copyright 2014 The zappy Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the SNAPPY-GO-LICENSE file. + +/* +Package zappy implements the zappy block-based compression format. It aims for +a combination of good speed and reasonable compression. + +Zappy is a format incompatible, API compatible fork of snappy-go[1]. The C++ +snappy implementation is at [2]. + +Reasons for the fork + +The snappy compression is pretty good. Yet it has one problem built into its +format definition[3] - the maximum length of a copy "instruction" is 64 bytes. +For some specific usage patterns with long runs of repeated data, it turns out +the compression is suboptimal. For example a 1:1000 "sparseness" 64kB bit index +with only few set bits is compressed to about 3kB (about 1000 of 64B copy, 3 +byte "instructions"). + +Format description + +Zappy uses much less complicated format than snappy. Each encoded block begins +with the uvarint-encoded[4] length of the decoded data, followed by a sequence +of chunks. Chunks begin and end on byte boundaries. The chunk starts with a +varint encoded number N: + + N >= 0: N+1 literal bytes follow. + + N < 0: copy -N bytes, starting at offset M (in the following uvarint). + +Performance issues + +Compression rate is roughly the same as of snappy for the reference data set: + + testdata/html: snappy 23320, zappy 22943, 0.984, orig 102400 + testdata/urls.10K: snappy 334437, zappy 355163, 1.062, orig 702087 + testdata/house.jpg: snappy 126711, zappy 126694, 1.000, orig 126958 + testdata/mapreduce-osdi-1.pdf: snappy 77227, zappy 77646, 1.005, orig 94330 + testdata/html_x_4: snappy 92350, zappy 22956, 0.249, orig 409600 + testdata/cp.html: snappy 11938, zappy 12961, 1.086, orig 24603 + testdata/fields.c: snappy 4825, zappy 5395, 1.118, orig 11150 + testdata/grammar.lsp: snappy 1814, zappy 1933, 1.066, orig 3721 + testdata/kennedy.xls: snappy 423518, zappy 440597, 1.040, orig 1029744 + testdata/alice29.txt: snappy 89550, zappy 104016, 1.162, orig 152089 + testdata/asyoulik.txt: snappy 79583, zappy 91345, 1.148, orig 125179 + testdata/lcet10.txt: snappy 238761, zappy 275488, 1.154, orig 426754 + testdata/plrabn12.txt: snappy 324567, zappy 376885, 1.161, orig 481861 + testdata/ptt5: snappy 96350, zappy 91465, 0.949, orig 513216 + testdata/sum: snappy 18927, zappy 20015, 1.057, orig 38240 + testdata/xargs.1: snappy 2532, zappy 2793, 1.103, orig 4227 + testdata/geo.protodata: snappy 23362, zappy 20759, 0.889, orig 118588 + testdata/kppkn.gtb: snappy 73962, zappy 87200, 1.179, orig 184320 + TOTAL: snappy 2043734, zappy 2136254, 1.045, orig 4549067 + +Zappy has better RLE handling (1/1000+1 non zero bytes in each index): + + Sparse bit index 16 B: snappy 9, zappy 9, 1.000 + Sparse bit index 32 B: snappy 10, zappy 10, 1.000 + Sparse bit index 64 B: snappy 11, zappy 10, 0.909 + Sparse bit index 128 B: snappy 16, zappy 14, 0.875 + Sparse bit index 256 B: snappy 22, zappy 14, 0.636 + Sparse bit index 512 B: snappy 36, zappy 16, 0.444 + Sparse bit index 1024 B: snappy 57, zappy 18, 0.316 + Sparse bit index 2048 B: snappy 111, zappy 32, 0.288 + Sparse bit index 4096 B: snappy 210, zappy 31, 0.148 + Sparse bit index 8192 B: snappy 419, zappy 75, 0.179 + Sparse bit index 16384 B: snappy 821, zappy 138, 0.168 + Sparse bit index 32768 B: snappy 1627, zappy 232, 0.143 + Sparse bit index 65536 B: snappy 3243, zappy 451, 0.139 + +When compiled with CGO_ENABLED=1, zappy is now faster than snappy-go. +Old=snappy-go, new=zappy: + + benchmark old MB/s new MB/s speedup + BenchmarkWordsDecode1e3 148.98 189.04 1.27x + BenchmarkWordsDecode1e4 150.29 182.51 1.21x + BenchmarkWordsDecode1e5 145.79 182.95 1.25x + BenchmarkWordsDecode1e6 167.43 187.69 1.12x + BenchmarkWordsEncode1e3 47.11 145.69 3.09x + BenchmarkWordsEncode1e4 81.47 136.50 1.68x + BenchmarkWordsEncode1e5 78.86 127.93 1.62x + BenchmarkWordsEncode1e6 96.81 142.95 1.48x + Benchmark_UFlat0 316.87 463.19 1.46x + Benchmark_UFlat1 231.56 350.32 1.51x + Benchmark_UFlat2 3656.68 8258.39 2.26x + Benchmark_UFlat3 892.56 1270.09 1.42x + Benchmark_UFlat4 315.84 959.08 3.04x + Benchmark_UFlat5 211.70 301.55 1.42x + Benchmark_UFlat6 211.59 258.29 1.22x + Benchmark_UFlat7 209.80 272.21 1.30x + Benchmark_UFlat8 254.59 301.70 1.19x + Benchmark_UFlat9 163.39 192.66 1.18x + Benchmark_UFlat10 155.46 189.70 1.22x + Benchmark_UFlat11 170.11 198.95 1.17x + Benchmark_UFlat12 148.32 178.78 1.21x + Benchmark_UFlat13 359.25 579.99 1.61x + Benchmark_UFlat14 197.27 291.33 1.48x + Benchmark_UFlat15 185.75 248.07 1.34x + Benchmark_UFlat16 362.74 582.66 1.61x + Benchmark_UFlat17 222.95 240.01 1.08x + Benchmark_ZFlat0 188.66 311.89 1.65x + Benchmark_ZFlat1 101.46 201.34 1.98x + Benchmark_ZFlat2 93.62 244.50 2.61x + Benchmark_ZFlat3 102.79 243.34 2.37x + Benchmark_ZFlat4 191.64 625.32 3.26x + Benchmark_ZFlat5 103.09 169.39 1.64x + Benchmark_ZFlat6 110.35 182.57 1.65x + Benchmark_ZFlat7 89.56 190.53 2.13x + Benchmark_ZFlat8 154.05 235.68 1.53x + Benchmark_ZFlat9 87.58 133.51 1.52x + Benchmark_ZFlat10 82.08 127.51 1.55x + Benchmark_ZFlat11 91.36 138.91 1.52x + Benchmark_ZFlat12 79.24 123.02 1.55x + Benchmark_ZFlat13 217.04 374.26 1.72x + Benchmark_ZFlat14 100.33 168.03 1.67x + Benchmark_ZFlat15 80.79 160.46 1.99x + Benchmark_ZFlat16 213.32 375.79 1.76x + Benchmark_ZFlat17 135.37 197.13 1.46x + +The package builds with CGO_ENABLED=0 as well, but the performance is worse. + + + $ CGO_ENABLED=0 go test -test.run=NONE -test.bench=. > old.benchcmp + $ CGO_ENABLED=1 go test -test.run=NONE -test.bench=. > new.benchcmp + $ benchcmp old.benchcmp new.benchcmp + benchmark old ns/op new ns/op delta + BenchmarkWordsDecode1e3 9735 5288 -45.68% + BenchmarkWordsDecode1e4 100229 55369 -44.76% + BenchmarkWordsDecode1e5 1037611 546420 -47.34% + BenchmarkWordsDecode1e6 9559352 5335307 -44.19% + BenchmarkWordsEncode1e3 16206 6629 -59.10% + BenchmarkWordsEncode1e4 140283 73161 -47.85% + BenchmarkWordsEncode1e5 1476657 781756 -47.06% + BenchmarkWordsEncode1e6 12702229 6997656 -44.91% + Benchmark_UFlat0 397307 221198 -44.33% + Benchmark_UFlat1 3890483 2008341 -48.38% + Benchmark_UFlat2 35810 15398 -57.00% + Benchmark_UFlat3 140850 74194 -47.32% + Benchmark_UFlat4 814575 426783 -47.61% + Benchmark_UFlat5 156995 81473 -48.10% + Benchmark_UFlat6 77645 43161 -44.41% + Benchmark_UFlat7 25415 13579 -46.57% + Benchmark_UFlat8 6372440 3412916 -46.44% + Benchmark_UFlat9 1453679 789956 -45.66% + Benchmark_UFlat10 1243146 660747 -46.85% + Benchmark_UFlat11 3903493 2146334 -45.02% + Benchmark_UFlat12 5106250 2696144 -47.20% + Benchmark_UFlat13 1641394 884969 -46.08% + Benchmark_UFlat14 262206 131174 -49.97% + Benchmark_UFlat15 32325 17047 -47.26% + Benchmark_UFlat16 366991 204877 -44.17% + Benchmark_UFlat17 1343988 770907 -42.64% + Benchmark_ZFlat0 579954 329812 -43.13% + Benchmark_ZFlat1 6564692 3504867 -46.61% + Benchmark_ZFlat2 902029 513700 -43.05% + Benchmark_ZFlat3 678722 384312 -43.38% + Benchmark_ZFlat4 1197389 654361 -45.35% + Benchmark_ZFlat5 262677 144939 -44.82% + Benchmark_ZFlat6 111249 60876 -45.28% + Benchmark_ZFlat7 39024 19420 -50.24% + Benchmark_ZFlat8 8046106 4387928 -45.47% + Benchmark_ZFlat9 2043167 1143139 -44.05% + Benchmark_ZFlat10 1781604 980528 -44.96% + Benchmark_ZFlat11 5478647 3078585 -43.81% + Benchmark_ZFlat12 7245995 3929863 -45.77% + Benchmark_ZFlat13 2432529 1371606 -43.61% + Benchmark_ZFlat14 420315 227494 -45.88% + Benchmark_ZFlat15 52378 26564 -49.28% + Benchmark_ZFlat16 567047 316196 -44.24% + Benchmark_ZFlat17 1630820 937310 -42.53% + + benchmark old MB/s new MB/s speedup + BenchmarkWordsDecode1e3 102.71 189.08 1.84x + BenchmarkWordsDecode1e4 99.77 180.60 1.81x + BenchmarkWordsDecode1e5 96.38 183.01 1.90x + BenchmarkWordsDecode1e6 104.61 187.43 1.79x + BenchmarkWordsEncode1e3 61.70 150.85 2.44x + BenchmarkWordsEncode1e4 71.28 136.68 1.92x + BenchmarkWordsEncode1e5 67.72 127.92 1.89x + BenchmarkWordsEncode1e6 78.73 142.90 1.82x + Benchmark_UFlat0 257.73 462.93 1.80x + Benchmark_UFlat1 180.46 349.59 1.94x + Benchmark_UFlat2 3545.30 8244.61 2.33x + Benchmark_UFlat3 669.72 1271.39 1.90x + Benchmark_UFlat4 502.84 959.74 1.91x + Benchmark_UFlat5 156.71 301.98 1.93x + Benchmark_UFlat6 143.60 258.33 1.80x + Benchmark_UFlat7 146.41 274.01 1.87x + Benchmark_UFlat8 161.59 301.72 1.87x + Benchmark_UFlat9 104.62 192.53 1.84x + Benchmark_UFlat10 100.70 189.45 1.88x + Benchmark_UFlat11 109.33 198.83 1.82x + Benchmark_UFlat12 94.37 178.72 1.89x + Benchmark_UFlat13 312.67 579.93 1.85x + Benchmark_UFlat14 145.84 291.52 2.00x + Benchmark_UFlat15 130.77 247.95 1.90x + Benchmark_UFlat16 323.14 578.82 1.79x + Benchmark_UFlat17 137.14 239.09 1.74x + Benchmark_ZFlat0 176.57 310.48 1.76x + Benchmark_ZFlat1 106.95 200.32 1.87x + Benchmark_ZFlat2 140.75 247.14 1.76x + Benchmark_ZFlat3 138.98 245.45 1.77x + Benchmark_ZFlat4 342.08 625.95 1.83x + Benchmark_ZFlat5 93.66 169.75 1.81x + Benchmark_ZFlat6 100.23 183.16 1.83x + Benchmark_ZFlat7 95.35 191.60 2.01x + Benchmark_ZFlat8 127.98 234.68 1.83x + Benchmark_ZFlat9 74.44 133.04 1.79x + Benchmark_ZFlat10 70.26 127.66 1.82x + Benchmark_ZFlat11 77.89 138.62 1.78x + Benchmark_ZFlat12 66.50 122.62 1.84x + Benchmark_ZFlat13 210.98 374.17 1.77x + Benchmark_ZFlat14 90.98 168.09 1.85x + Benchmark_ZFlat15 80.70 159.12 1.97x + Benchmark_ZFlat16 209.13 375.04 1.79x + Benchmark_ZFlat17 113.02 196.65 1.74x + $ + +Build tags + +If a constraint 'purego' appears in the build constraints [5] then a pure Go +version is built regardless of the $CGO_ENABLED value. + + $ touch zappy.go ; go install -tags purego github.com/cznic/zappy # for example + +Information sources + +... referenced from the above documentation. + + [1]: http://code.google.com/p/snappy-go/ + [2]: http://code.google.com/p/snappy/ + [3]: http://code.google.com/p/snappy/source/browse/trunk/format_description.txt + [4]: http://golang.org/pkg/encoding/binary/ + [5]: http://golang.org/pkg/go/build/#hdr-Build_Constraints +*/ +package zappy diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 00000000..2a7cfd2b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2012-2013 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/README.md b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/README.md new file mode 100644 index 00000000..9624dcc4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/README.md @@ -0,0 +1,140 @@ +go-spew +======= + +[![Build Status](https://travis-ci.org/davecgh/go-spew.png?branch=master)] +(https://travis-ci.org/davecgh/go-spew) + +Go-spew implements a deep pretty printer for Go data structures to aid in +debugging. A comprehensive suite of tests with 100% test coverage is provided +to ensure proper functionality. See `test_coverage.txt` for the gocov coverage +report. Go-spew is licensed under the liberal ISC license, so it may be used in +open source or commercial projects. + +If you're interested in reading about how this package came to life and some +of the challenges involved in providing a deep pretty printer, there is a blog +post about it +[here](https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/). + +## Documentation + +[![GoDoc](https://godoc.org/github.com/davecgh/go-spew/spew?status.png)] +(http://godoc.org/github.com/davecgh/go-spew/spew) + +Full `go doc` style documentation for the project can be viewed online without +installing this package by using the excellent GoDoc site here: +http://godoc.org/github.com/davecgh/go-spew/spew + +You can also view the documentation locally once the package is installed with +the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to +http://localhost:6060/pkg/github.com/davecgh/go-spew/spew + +## Installation + +```bash +$ go get github.com/davecgh/go-spew/spew +``` + +## Quick Start + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + +```Go +spew.Dump(myVar1, myVar2, ...) +spew.Fdump(someWriter, myVar1, myVar2, ...) +str := spew.Sdump(myVar1, myVar2, ...) +``` + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most +compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types +and pointer addresses): + +```Go +spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +``` + +## Sample Dump Output + +``` +(main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) { + (string) "one": (bool) true + } +} +([]uint8) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| +} +``` + +## Sample Formatter Output + +Double pointer to a uint8: +``` + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 +``` + +Pointer to circular struct with a uint8 field and a pointer to itself: +``` + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} +``` + +## Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available via the +spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +``` +* Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + +* MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + +* DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + +* DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + +* ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + +* SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + are supported with other types sorted according to the + reflect.Value.String() output which guarantees display stability. + Natural map order is used by default. +``` + +## License + +Go-spew is licensed under the liberal ISC License. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/cov_report.sh b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/cov_report.sh new file mode 100644 index 00000000..9579497e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/cov_report.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# This script uses gocov to generate a test coverage report. +# The gocov tool my be obtained with the following command: +# go get github.com/axw/gocov/gocov +# +# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. + +# Check for gocov. +if ! type gocov >/dev/null 2>&1; then + echo >&2 "This script requires the gocov tool." + echo >&2 "You may obtain it with the following command:" + echo >&2 "go get github.com/axw/gocov/gocov" + exit 1 +fi + +# Only run the cgo tests if gcc is installed. +if type gcc >/dev/null 2>&1; then + (cd spew && gocov test -tags testcgo | gocov report) +else + (cd spew && gocov test | gocov report) +fi diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 00000000..bc26e7e4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" + "reflect" + "sort" + "strconv" + "unsafe" +) + +// offsetPtr, offsetScalar, and offsetFlag are the offsets for the internal +// reflect.Value fields. +var offsetPtr, offsetScalar, offsetFlag uintptr + +// reflectValueOld mirrors the struct layout of the reflect package Value type +// before golang commit ecccf07e7f9d. +var reflectValueOld struct { + typ unsafe.Pointer + val unsafe.Pointer + flag uintptr +} + +// reflectValueNew mirrors the struct layout of the reflect package Value type +// after golang commit ecccf07e7f9d. +var reflectValueNew struct { + typ unsafe.Pointer + ptr unsafe.Pointer + scalar uintptr + flag uintptr +} + +func init() { + // Older versions of reflect.Value stored small integers directly in the + // ptr field (which is named val in the older versions). Newer versions + // added a new field named scalar for this purpose which unfortuantely + // comes before the flag field. Further the new field is before the + // flag field, so the offset of the flag field is different as well. + // This code constructs a new reflect.Value from a known small integer + // and checks if the val field within it matches. When it matches, the + // old style reflect.Value is being used. Otherwise it's the new style. + v := 0xf00 + vv := reflect.ValueOf(v) + upv := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + + unsafe.Offsetof(reflectValueOld.val)) + + // Assume the old style by default. + offsetPtr = unsafe.Offsetof(reflectValueOld.val) + offsetScalar = 0 + offsetFlag = unsafe.Offsetof(reflectValueOld.flag) + + // Use the new style offsets if the ptr field doesn't match the value + // since it must be in the new scalar field. + if int(*(*uintptr)(upv)) != v { + offsetPtr = unsafe.Offsetof(reflectValueNew.ptr) + offsetScalar = unsafe.Offsetof(reflectValueNew.scalar) + offsetFlag = unsafe.Offsetof(reflectValueNew.flag) + } +} + +// flagIndir indicates whether the value field of a reflect.Value is the actual +// data or a pointer to the data. +const flagIndir = 1 << 1 + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { + indirects := 1 + vt := v.Type() + upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) + rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) + if rvf&flagIndir != 0 { + vt = reflect.PtrTo(v.Type()) + indirects++ + } else if offsetScalar != 0 { + upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetScalar) + } + + pv := reflect.NewAt(vt, upv) + rv = pv + for i := 0; i < indirects; i++ { + rv = rv.Elem() + } + return rv +} + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe to bypass these restrictions + // since this package does not mutate the values. + if !v.CanInterface() { + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + var viface interface{} + if !cs.DisablePointerMethods { + if !v.CanAddr() { + v = unsafeReflectValue(v) + } + viface = v.Addr().Interface() + } else { + if v.CanAddr() { + v = v.Addr() + } + viface = v.Interface() + } + + // Is it an error or Stringer? + switch iface := viface.(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + switch s.values[i].Kind() { + case reflect.Bool: + return !s.values[i].Bool() && s.values[j].Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return s.values[i].Int() < s.values[j].Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return s.values[i].Uint() < s.values[j].Uint() + case reflect.Float32, reflect.Float64: + return s.values[i].Float() < s.values[j].Float() + case reflect.String: + return s.values[i].String() < s.values[j].String() + case reflect.Uintptr: + return s.values[i].Uint() < s.values[j].Uint() + } + return s.values[i].String() < s.values[j].String() +} + +// sortValues is a generic sort function for native types: int, uint, bool, +// string and uintptr. Other inputs are sorted according to their +// Value.String() value to ensure display stability. +func sortValues(values []reflect.Value) { + if len(values) == 0 { + return + } + sort.Sort(&valuesSorter{values}) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/common_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/common_test.go new file mode 100644 index 00000000..c5cbec6c --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/common_test.go @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "camlistore.org/third_party/github.com/davecgh/go-spew/spew" + "fmt" + "reflect" + "testing" +) + +// custom type to test Stinger interface on non-pointer receiver. +type stringer string + +// String implements the Stringer interface for testing invocation of custom +// stringers on types with non-pointer receivers. +func (s stringer) String() string { + return "stringer " + string(s) +} + +// custom type to test Stinger interface on pointer receiver. +type pstringer string + +// String implements the Stringer interface for testing invocation of custom +// stringers on types with only pointer receivers. +func (s *pstringer) String() string { + return "stringer " + string(*s) +} + +// xref1 and xref2 are cross referencing structs for testing circular reference +// detection. +type xref1 struct { + ps2 *xref2 +} +type xref2 struct { + ps1 *xref1 +} + +// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular +// reference for testing detection. +type indirCir1 struct { + ps2 *indirCir2 +} +type indirCir2 struct { + ps3 *indirCir3 +} +type indirCir3 struct { + ps1 *indirCir1 +} + +// embed is used to test embedded structures. +type embed struct { + a string +} + +// embedwrap is used to test embedded structures. +type embedwrap struct { + *embed + e *embed +} + +// panicer is used to intentionally cause a panic for testing spew properly +// handles them +type panicer int + +func (p panicer) String() string { + panic("test panic") +} + +// customError is used to test custom error interface invocation. +type customError int + +func (e customError) Error() string { + return fmt.Sprintf("error: %d", int(e)) +} + +// stringizeWants converts a slice of wanted test output into a format suitable +// for a test error message. +func stringizeWants(wants []string) string { + s := "" + for i, want := range wants { + if i > 0 { + s += fmt.Sprintf("want%d: %s", i+1, want) + } else { + s += "want: " + want + } + } + return s +} + +// testFailed returns whether or not a test failed by checking if the result +// of the test is in the slice of wanted strings. +func testFailed(result string, wants []string) bool { + for _, want := range wants { + if result == want { + return false + } + } + return true +} + +// TestSortValues ensures the sort functionality for relect.Value based sorting +// works as intended. +func TestSortValues(t *testing.T) { + getInterfaces := func(values []reflect.Value) []interface{} { + interfaces := []interface{}{} + for _, v := range values { + interfaces = append(interfaces, v.Interface()) + } + return interfaces + } + + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + embedA := v(embed{"a"}) + embedB := v(embed{"b"}) + embedC := v(embed{"c"}) + tests := []struct { + input []reflect.Value + expected []reflect.Value + }{ + // No values. + { + []reflect.Value{}, + []reflect.Value{}, + }, + // Bools. + { + []reflect.Value{v(false), v(true), v(false)}, + []reflect.Value{v(false), v(false), v(true)}, + }, + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Uints. + { + []reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))}, + []reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))}, + }, + // Floats. + { + []reflect.Value{v(2.0), v(1.0), v(3.0)}, + []reflect.Value{v(1.0), v(2.0), v(3.0)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // Uintptrs. + { + []reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))}, + []reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))}, + }, + // Invalid. + { + []reflect.Value{embedB, embedA, embedC}, + []reflect.Value{embedB, embedA, embedC}, + }, + } + for _, test := range tests { + spew.SortValues(test.input) + // reflect.DeepEqual cannot really make sense of reflect.Value, + // probably because of all the pointer tricks. For instance, + // v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{} + // instead. + input := getInterfaces(test.input) + expected := getInterfaces(test.expected) + if !reflect.DeepEqual(input, expected) { + t.Errorf("Sort mismatch:\n %v != %v", input, expected) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 00000000..e516675d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. + DisablePointerMethods bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) are supported + // with other types sorted according to the reflect.Value.String() output + // which guarantees display stability. + SortKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 00000000..2a0fb608 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + are supported with other types sorted according to the + reflect.Value.String() output which guarantees display stability. + Natural map order is used by default. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) { + (string) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter the implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 00000000..3d573067 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,474 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound == true: + d.w.Write(nilAngleBytes) + + case cycleFound == true: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type back + // into a byte slice. However, the reflect package won't give + // us an interface on certain things like unexported struct + // fields in order to enforce visibility rules. We use unsafe + // to bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be type + // asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/dump_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/dump_test.go new file mode 100644 index 00000000..f683adb5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/dump_test.go @@ -0,0 +1,912 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Test Summary: +NOTE: For each test, a nil pointer, a single pointer and double pointer to the +base test element are also tested to ensure proper indirection across all types. + +- Max int8, int16, int32, int64, int +- Max uint8, uint16, uint32, uint64, uint +- Boolean true and false +- Standard complex64 and complex128 +- Array containing standard ints +- Array containing type with custom formatter on pointer receiver only +- Array containing interfaces +- Array containing bytes +- Slice containing standard float32 values +- Slice containing type with custom formatter on pointer receiver only +- Slice containing interfaces +- Slice containing bytes +- Nil slice +- Standard string +- Nil interface +- Sub-interface +- Map with string keys and int vals +- Map with custom formatter type on pointer receiver only keys and vals +- Map with interface keys and values +- Map with nil interface value +- Struct with primitives +- Struct that contains another struct +- Struct that contains custom type with Stringer pointer interface via both + exported and unexported fields +- Struct that contains embedded struct and field to same struct +- Uintptr to 0 (null pointer) +- Uintptr address of real variable +- Unsafe.Pointer to 0 (null pointer) +- Unsafe.Pointer to address of real variable +- Nil channel +- Standard int channel +- Function with no params and no returns +- Function with param and no returns +- Function with multiple params and multiple returns +- Struct that is circular through self referencing +- Structs that are circular through cross referencing +- Structs that are indirectly circular +- Type that panics in its Stringer interface +*/ + +package spew_test + +import ( + "bytes" + "camlistore.org/third_party/github.com/davecgh/go-spew/spew" + "fmt" + "testing" + "unsafe" +) + +// dumpTest is used to describe a test to be perfomed against the Dump method. +type dumpTest struct { + in interface{} + wants []string +} + +// dumpTests houses all of the tests to be performed against the Dump method. +var dumpTests = make([]dumpTest, 0) + +// addDumpTest is a helper method to append the passed input and desired result +// to dumpTests +func addDumpTest(in interface{}, wants ...string) { + test := dumpTest{in, wants} + dumpTests = append(dumpTests, test) +} + +func addIntDumpTests() { + // Max int8. + v := int8(127) + nv := (*int8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int8" + vs := "127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Max int16. + v2 := int16(32767) + nv2 := (*int16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "int16" + v2s := "32767" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Max int32. + v3 := int32(2147483647) + nv3 := (*int32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "int32" + v3s := "2147483647" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Max int64. + v4 := int64(9223372036854775807) + nv4 := (*int64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "int64" + v4s := "9223372036854775807" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Max int. + v5 := int(2147483647) + nv5 := (*int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "int" + v5s := "2147483647" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addUintDumpTests() { + // Max uint8. + v := uint8(255) + nv := (*uint8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uint8" + vs := "255" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Max uint16. + v2 := uint16(65535) + nv2 := (*uint16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Max uint32. + v3 := uint32(4294967295) + nv3 := (*uint32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "uint32" + v3s := "4294967295" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Max uint64. + v4 := uint64(18446744073709551615) + nv4 := (*uint64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "uint64" + v4s := "18446744073709551615" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Max uint. + v5 := uint(4294967295) + nv5 := (*uint)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "uint" + v5s := "4294967295" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addBoolDumpTests() { + // Boolean true. + v := bool(true) + nv := (*bool)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "bool" + vs := "true" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Boolean false. + v2 := bool(false) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "bool" + v2s := "false" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addFloatDumpTests() { + // Standard float32. + v := float32(3.1415) + nv := (*float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "3.1415" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Standard float64. + v2 := float64(3.1415926) + nv2 := (*float64)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "float64" + v2s := "3.1415926" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addComplexDumpTests() { + // Standard complex64. + v := complex(float32(6), -2) + nv := (*complex64)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "complex64" + vs := "(6-2i)" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Standard complex128. + v2 := complex(float64(-6), 2) + nv2 := (*complex128)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "complex128" + v2s := "(-6+2i)" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addArrayDumpTests() { + // Array containing standard ints. + v := [3]int{1, 2, 3} + nv := (*[3]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int" + vs := "{\n (" + vt + ") 1,\n (" + vt + ") 2,\n (" + vt + ") 3\n}" + addDumpTest(v, "([3]"+vt+") "+vs+"\n") + addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*[3]"+vt+")()\n") + + // Array containing type with custom formatter on pointer receiver only. + v2 := [3]pstringer{"1", "2", "3"} + nv2 := (*[3]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.pstringer" + v2s := "{\n (" + v2t + ") stringer 1,\n (" + v2t + ") stringer 2,\n (" + + v2t + ") stringer 3\n}" + addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*[3]"+v2t+")()\n") + + // Array containing interfaces. + v3 := [3]interface{}{"one", int(2), uint(3)} + nv3 := (*[3]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[3]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3s := "{\n (" + v3t2 + ") \"one\",\n (" + v3t3 + ") 2,\n (" + v3t4 + + ") 3\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Array containing bytes. + v4 := [34]byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + nv4 := (*[34]byte)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[34]uint8" + v4s := "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + + " |............... |\n" + + " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + + " |!\"#$%&'()*+,-./0|\n" + + " 00000020 31 32 " + + " |12|\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") +} + +func addSliceDumpTests() { + // Slice containing standard float32 values. + v := []float32{3.14, 6.28, 12.56} + nv := (*[]float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "{\n (" + vt + ") 3.14,\n (" + vt + ") 6.28,\n (" + vt + ") 12.56\n}" + addDumpTest(v, "([]"+vt+") "+vs+"\n") + addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*[]"+vt+")()\n") + + // Slice containing type with custom formatter on pointer receiver only. + v2 := []pstringer{"1", "2", "3"} + nv2 := (*[]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.pstringer" + v2s := "{\n (" + v2t + ") stringer 1,\n (" + v2t + ") stringer 2,\n (" + + v2t + ") stringer 3\n}" + addDumpTest(v2, "([]"+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*[]"+v2t+")()\n") + + // Slice containing interfaces. + v3 := []interface{}{"one", int(2), uint(3), nil} + nv3 := (*[]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3t5 := "interface {}" + v3s := "{\n (" + v3t2 + ") \"one\",\n (" + v3t3 + ") 2,\n (" + v3t4 + + ") 3,\n (" + v3t5 + ") \n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Slice containing bytes. + v4 := []byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + nv4 := (*[]byte)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[]uint8" + v4s := "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + + " |............... |\n" + + " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + + " |!\"#$%&'()*+,-./0|\n" + + " 00000020 31 32 " + + " |12|\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Nil slice. + v5 := []int(nil) + nv5 := (*[]int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "[]int" + v5s := "" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addStringDumpTests() { + // Standard string. + v := "test" + nv := (*string)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "string" + vs := "\"test\"" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addInterfaceDumpTests() { + // Nil interface. + var v interface{} + nv := (*interface{})(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "interface {}" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Sub-interface. + v2 := interface{}(uint16(65535)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addMapDumpTests() { + // Map with string keys and int vals. + v := map[string]int{"one": 1, "two": 2} + nv := (*map[string]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "map[string]int" + vt1 := "string" + vt2 := "int" + vs := "{\n (" + vt1 + ") \"one\": (" + vt2 + ") 1,\n (" + vt1 + + ") \"two\": (" + vt2 + ") 2\n}" + vs2 := "{\n (" + vt1 + ") \"two\": (" + vt2 + ") 2,\n (" + vt1 + + ") \"one\": (" + vt2 + ") 1\n}" + addDumpTest(v, "("+vt+") "+vs+"\n", "("+vt+") "+vs2+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n", + "(*"+vt+")("+vAddr+")("+vs2+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n", + "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Map with custom formatter type on pointer receiver only keys and vals. + v2 := map[pstringer]pstringer{"one": "1"} + nv2 := (*map[pstringer]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "map[spew_test.pstringer]spew_test.pstringer" + v2t1 := "spew_test.pstringer" + v2t2 := "spew_test.pstringer" + v2s := "{\n (" + v2t1 + ") stringer one: (" + v2t2 + ") stringer 1\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Map with interface keys and values. + v3 := map[interface{}]interface{}{"one": 1} + nv3 := (*map[interface{}]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "map[interface {}]interface {}" + v3t1 := "string" + v3t2 := "int" + v3s := "{\n (" + v3t1 + ") \"one\": (" + v3t2 + ") 1\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Map with nil interface value. + v4 := map[string]interface{}{"nil": nil} + nv4 := (*map[string]interface{})(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "map[string]interface {}" + v4t1 := "string" + v4t2 := "interface {}" + v4s := "{\n (" + v4t1 + ") \"nil\": (" + v4t2 + ") \n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") +} + +func addStructDumpTests() { + // Struct with primitives. + type s1 struct { + a int8 + b uint8 + } + v := s1{127, 255} + nv := (*s1)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.s1" + vt2 := "int8" + vt3 := "uint8" + vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Struct that contains another struct. + type s2 struct { + s1 s1 + b bool + } + v2 := s2{s1{127, 255}, true} + nv2 := (*s2)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.s2" + v2t2 := "spew_test.s1" + v2t3 := "int8" + v2t4 := "uint8" + v2t5 := "bool" + v2s := "{\n s1: (" + v2t2 + ") {\n a: (" + v2t3 + ") 127,\n b: (" + + v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Struct that contains custom type with Stringer pointer interface via both + // exported and unexported fields. + type s3 struct { + s pstringer + S pstringer + } + v3 := s3{"test", "test2"} + nv3 := (*s3)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.s3" + v3t2 := "spew_test.pstringer" + v3s := "{\n s: (" + v3t2 + ") stringer test,\n S: (" + v3t2 + + ") stringer test2\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Struct that contains embedded struct and field to same struct. + e := embed{"embedstr"} + v4 := embedwrap{embed: &e, e: &e} + nv4 := (*embedwrap)(nil) + pv4 := &v4 + eAddr := fmt.Sprintf("%p", &e) + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "spew_test.embedwrap" + v4t2 := "spew_test.embed" + v4t3 := "string" + v4s := "{\n embed: (*" + v4t2 + ")(" + eAddr + ")({\n a: (" + v4t3 + + ") \"embedstr\"\n }),\n e: (*" + v4t2 + ")(" + eAddr + ")({\n" + + " a: (" + v4t3 + ") \"embedstr\"\n })\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") +} + +func addUintptrDumpTests() { + // Null pointer. + v := uintptr(0) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uintptr" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + + // Address of real variable. + i := 1 + v2 := uintptr(unsafe.Pointer(&i)) + nv2 := (*uintptr)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uintptr" + v2s := fmt.Sprintf("%p", &i) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addUnsafePointerDumpTests() { + // Null pointer. + v := unsafe.Pointer(uintptr(0)) + nv := (*unsafe.Pointer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "unsafe.Pointer" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Address of real variable. + i := 1 + v2 := unsafe.Pointer(&i) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "unsafe.Pointer" + v2s := fmt.Sprintf("%p", &i) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addChanDumpTests() { + // Nil channel. + var v chan int + pv := &v + nv := (*chan int)(nil) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "chan int" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Real channel. + v2 := make(chan int) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "chan int" + v2s := fmt.Sprintf("%p", v2) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addFuncDumpTests() { + // Function with no params and no returns. + v := addIntDumpTests + nv := (*func())(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "func()" + vs := fmt.Sprintf("%p", v) + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Function with param and no returns. + v2 := TestDump + nv2 := (*func(*testing.T))(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "func(*testing.T)" + v2s := fmt.Sprintf("%p", v2) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Function with multiple params and multiple returns. + var v3 = func(i int, s string) (b bool, err error) { + return true, nil + } + nv3 := (*func(int, string) (bool, error))(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "func(int, string) (bool, error)" + v3s := fmt.Sprintf("%p", v3) + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") +} + +func addCircularDumpTests() { + // Struct that is circular through self referencing. + type circular struct { + c *circular + } + v := circular{nil} + v.c = &v + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.circular" + vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n c: (*" + vt + ")(" + + vAddr + ")()\n })\n}" + vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")()\n}" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n") + + // Structs that are circular through cross referencing. + v2 := xref1{nil} + ts2 := xref2{&v2} + v2.ps2 = &ts2 + pv2 := &v2 + ts2Addr := fmt.Sprintf("%p", &ts2) + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.xref1" + v2t2 := "spew_test.xref2" + v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + + ")(" + v2Addr + ")({\n ps2: (*" + v2t2 + ")(" + ts2Addr + + ")()\n })\n })\n}" + v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + + ")(" + v2Addr + ")()\n })\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n") + + // Structs that are indirectly circular. + v3 := indirCir1{nil} + tic2 := indirCir2{nil} + tic3 := indirCir3{&v3} + tic2.ps3 = &tic3 + v3.ps2 = &tic2 + pv3 := &v3 + tic2Addr := fmt.Sprintf("%p", &tic2) + tic3Addr := fmt.Sprintf("%p", &tic3) + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.indirCir1" + v3t2 := "spew_test.indirCir2" + v3t3 := "spew_test.indirCir3" + v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + + ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + + ")({\n ps2: (*" + v3t2 + ")(" + tic2Addr + + ")()\n })\n })\n })\n}" + v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + + ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + + ")()\n })\n })\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n") +} + +func addPanicDumpTests() { + // Type that panics in its Stringer interface. + v := panicer(127) + nv := (*panicer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.panicer" + vs := "(PANIC=test panic)127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addErrorDumpTests() { + // Type that has a custom Error interface. + v := customError(127) + nv := (*customError)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.customError" + vs := "error: 127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +// TestDump executes all of the tests described by dumpTests. +func TestDump(t *testing.T) { + // Setup tests. + addIntDumpTests() + addUintDumpTests() + addBoolDumpTests() + addFloatDumpTests() + addComplexDumpTests() + addArrayDumpTests() + addSliceDumpTests() + addStringDumpTests() + addInterfaceDumpTests() + addMapDumpTests() + addStructDumpTests() + addUintptrDumpTests() + addUnsafePointerDumpTests() + addChanDumpTests() + addFuncDumpTests() + addCircularDumpTests() + addPanicDumpTests() + addErrorDumpTests() + addCgoDumpTests() + + t.Logf("Running %d tests", len(dumpTests)) + for i, test := range dumpTests { + buf := new(bytes.Buffer) + spew.Fdump(buf, test.in) + s := buf.String() + if testFailed(s, test.wants) { + t.Errorf("Dump #%d\n got: %s %s", i, s, stringizeWants(test.wants)) + continue + } + } +} + +func TestDumpSortedKeys(t *testing.T) { + cfg := spew.ConfigState{SortKeys: true} + s := cfg.Sdump(map[int]string{1: "1", 3: "3", 2: "2"}) + expected := `(map[int]string) { +(int) 1: (string) "1", +(int) 2: (string) "2", +(int) 3: (string) "3" +} +` + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/dumpcgo_test.go new file mode 100644 index 00000000..1e40950e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/dumpcgo_test.go @@ -0,0 +1,82 @@ +// Copyright (c) 2013 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when both cgo is supported and "-tags testcgo" is added to the go test +// command line. This means the cgo tests are only added (and hence run) when +// specifially requested. This configuration is used because spew itself +// does not require cgo to run even though it does handle certain cgo types +// specially. Rather than forcing all clients to require cgo and an external +// C compiler just to run the tests, this scheme makes them optional. +// +build cgo,testcgo + +package spew_test + +import ( + "camlistore.org/third_party/github.com/davecgh/go-spew/spew/testdata" + "fmt" +) + +func addCgoDumpTests() { + // C char pointer. + v := testdata.GetCgoCharPointer() + nv := testdata.GetCgoNullCharPointer() + pv := &v + vcAddr := fmt.Sprintf("%p", v) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "*testdata._Ctype_char" + vs := "116" + addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n") + addDumpTest(nv, "("+vt+")()\n") + + // C char array. + v2 := testdata.GetCgoCharArray() + v2t := "[6]testdata._Ctype_char" + v2s := "{\n 00000000 74 65 73 74 32 00 " + + " |test2.|\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + + // C unsigned char array. + v3 := testdata.GetCgoUnsignedCharArray() + v3t := "[6]testdata._Ctype_unsignedchar" + v3s := "{\n 00000000 74 65 73 74 33 00 " + + " |test3.|\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + + // C signed char array. + v4 := testdata.GetCgoSignedCharArray() + v4t := "[6]testdata._Ctype_schar" + v4t2 := "testdata._Ctype_schar" + v4s := "{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 + + ") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 + + ") 0\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + + // C uint8_t array. + v5 := testdata.GetCgoUint8tArray() + v5t := "[6]testdata._Ctype_uint8_t" + v5s := "{\n 00000000 74 65 73 74 35 00 " + + " |test5.|\n}" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + + // C typedefed unsigned char array. + v6 := testdata.GetCgoTypdefedUnsignedCharArray() + v6t := "[6]testdata._Ctype_custom_uchar_t" + v6s := "{\n 00000000 74 65 73 74 36 00 " + + " |test6.|\n}" + addDumpTest(v6, "("+v6t+") "+v6s+"\n") +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/dumpnocgo_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/dumpnocgo_test.go new file mode 100644 index 00000000..52a0971f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/dumpnocgo_test.go @@ -0,0 +1,26 @@ +// Copyright (c) 2013 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when either cgo is not supported or "-tags testcgo" is not added to the go +// test command line. This file intentionally does not setup any cgo tests in +// this scenario. +// +build !cgo !testcgo + +package spew_test + +func addCgoDumpTests() { + // Don't add any tests for cgo since this file is only compiled when + // there should not be any cgo tests. +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/example_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/example_test.go new file mode 100644 index 00000000..9bd7a970 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/example_test.go @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "camlistore.org/third_party/github.com/davecgh/go-spew/spew" + "fmt" +) + +type Flag int + +const ( + flagOne Flag = iota + flagTwo +) + +var flagStrings = map[Flag]string{ + flagOne: "flagOne", + flagTwo: "flagTwo", +} + +func (f Flag) String() string { + if s, ok := flagStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown flag (%d)", int(f)) +} + +type Bar struct { + flag Flag + data uintptr +} + +type Foo struct { + unexportedField Bar + ExportedField map[interface{}]interface{} +} + +// This example demonstrates how to use Dump to dump variables to stdout. +func ExampleDump() { + // The following package level declarations are assumed for this example: + /* + type Flag int + + const ( + flagOne Flag = iota + flagTwo + ) + + var flagStrings = map[Flag]string{ + flagOne: "flagOne", + flagTwo: "flagTwo", + } + + func (f Flag) String() string { + if s, ok := flagStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown flag (%d)", int(f)) + } + + type Bar struct { + flag Flag + data uintptr + } + + type Foo struct { + unexportedField Bar + ExportedField map[interface{}]interface{} + } + */ + + // Setup some sample data structures for the example. + bar := Bar{Flag(flagTwo), uintptr(0)} + s1 := Foo{bar, map[interface{}]interface{}{"one": true}} + f := Flag(5) + b := []byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + + // Dump! + spew.Dump(s1, f, b) + + // Output: + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // flag: (spew_test.Flag) flagTwo, + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) { + // (string) "one": (bool) true + // } + // } + // (spew_test.Flag) Unknown flag (5) + // ([]uint8) { + // 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + // 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + // 00000020 31 32 |12| + // } + // +} + +// This example demonstrates how to use Printf to display a variable with a +// format string and inline formatting. +func ExamplePrintf() { + // Create a double pointer to a uint 8. + ui8 := uint8(5) + pui8 := &ui8 + ppui8 := &pui8 + + // Create a circular data type. + type circular struct { + ui8 uint8 + c *circular + } + c := circular{ui8: 1} + c.c = &c + + // Print! + spew.Printf("ppui8: %v\n", ppui8) + spew.Printf("circular: %v\n", c) + + // Output: + // ppui8: <**>5 + // circular: {1 <*>{1 <*>}} +} + +// This example demonstrates how to use a ConfigState. +func ExampleConfigState() { + // Modify the indent level of the ConfigState only. The global + // configuration is not modified. + scs := spew.ConfigState{Indent: "\t"} + + // Output using the ConfigState instance. + v := map[string]int{"one": 1} + scs.Printf("v: %v\n", v) + scs.Dump(v) + + // Output: + // v: map[one:1] + // (map[string]int) { + // (string) "one": (int) 1 + // } +} + +// This example demonstrates how to use ConfigState.Dump to dump variables to +// stdout +func ExampleConfigState_Dump() { + // See the top-level Dump example for details on the types used in this + // example. + + // Create two ConfigState instances with different indentation. + scs := spew.ConfigState{Indent: "\t"} + scs2 := spew.ConfigState{Indent: " "} + + // Setup some sample data structures for the example. + bar := Bar{Flag(flagTwo), uintptr(0)} + s1 := Foo{bar, map[interface{}]interface{}{"one": true}} + + // Dump using the ConfigState instances. + scs.Dump(s1) + scs2.Dump(s1) + + // Output: + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // flag: (spew_test.Flag) flagTwo, + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) { + // (string) "one": (bool) true + // } + // } + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // flag: (spew_test.Flag) flagTwo, + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) { + // (string) "one": (bool) true + // } + // } + // +} + +// This example demonstrates how to use ConfigState.Printf to display a variable +// with a format string and inline formatting. +func ExampleConfigState_Printf() { + // See the top-level Dump example for details on the types used in this + // example. + + // Create two ConfigState instances and modify the method handling of the + // first ConfigState only. + scs := spew.NewDefaultConfig() + scs2 := spew.NewDefaultConfig() + scs.DisableMethods = true + + // Alternatively + // scs := spew.ConfigState{Indent: " ", DisableMethods: true} + // scs2 := spew.ConfigState{Indent: " "} + + // This is of type Flag which implements a Stringer and has raw value 1. + f := flagTwo + + // Dump using the ConfigState instances. + scs.Printf("f: %v\n", f) + scs2.Printf("f: %v\n", f) + + // Output: + // f: 1 + // f: flagTwo +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 00000000..b6b1fb0d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,413 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound == true: + f.fs.Write(nilAngleBytes) + + case cycleFound == true: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/format_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/format_test.go new file mode 100644 index 00000000..3cbe669b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/format_test.go @@ -0,0 +1,1483 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Test Summary: +NOTE: For each test, a nil pointer, a single pointer and double pointer to the +base test element are also tested to ensure proper indirection across all types. + +- Max int8, int16, int32, int64, int +- Max uint8, uint16, uint32, uint64, uint +- Boolean true and false +- Standard complex64 and complex128 +- Array containing standard ints +- Array containing type with custom formatter on pointer receiver only +- Array containing interfaces +- Slice containing standard float32 values +- Slice containing type with custom formatter on pointer receiver only +- Slice containing interfaces +- Nil slice +- Standard string +- Nil interface +- Sub-interface +- Map with string keys and int vals +- Map with custom formatter type on pointer receiver only keys and vals +- Map with interface keys and values +- Map with nil interface value +- Struct with primitives +- Struct that contains another struct +- Struct that contains custom type with Stringer pointer interface via both + exported and unexported fields +- Struct that contains embedded struct and field to same struct +- Uintptr to 0 (null pointer) +- Uintptr address of real variable +- Unsafe.Pointer to 0 (null pointer) +- Unsafe.Pointer to address of real variable +- Nil channel +- Standard int channel +- Function with no params and no returns +- Function with param and no returns +- Function with multiple params and multiple returns +- Struct that is circular through self referencing +- Structs that are circular through cross referencing +- Structs that are indirectly circular +- Type that panics in its Stringer interface +- Type that has a custom Error interface +- %x passthrough with uint +- %#x passthrough with uint +- %f passthrough with precision +- %f passthrough with width and precision +- %d passthrough with width +- %q passthrough with string +*/ + +package spew_test + +import ( + "bytes" + "camlistore.org/third_party/github.com/davecgh/go-spew/spew" + "fmt" + "testing" + "unsafe" +) + +// formatterTest is used to describe a test to be perfomed against NewFormatter. +type formatterTest struct { + format string + in interface{} + wants []string +} + +// formatterTests houses all of the tests to be performed against NewFormatter. +var formatterTests = make([]formatterTest, 0) + +// addFormatterTest is a helper method to append the passed input and desired +// result to formatterTests. +func addFormatterTest(format string, in interface{}, wants ...string) { + test := formatterTest{format, in, wants} + formatterTests = append(formatterTests, test) +} + +func addIntFormatterTests() { + // Max int8. + v := int8(127) + nv := (*int8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int8" + vs := "127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Max int16. + v2 := int16(32767) + nv2 := (*int16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "int16" + v2s := "32767" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Max int32. + v3 := int32(2147483647) + nv3 := (*int32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "int32" + v3s := "2147483647" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + + // Max int64. + v4 := int64(9223372036854775807) + nv4 := (*int64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "int64" + v4s := "9223372036854775807" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") + + // Max int. + v5 := int(2147483647) + nv5 := (*int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "int" + v5s := "2147483647" + addFormatterTest("%v", v5, v5s) + addFormatterTest("%v", pv5, "<*>"+v5s) + addFormatterTest("%v", &pv5, "<**>"+v5s) + addFormatterTest("%v", nv5, "") + addFormatterTest("%+v", v5, v5s) + addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) + addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%+v", nv5, "") + addFormatterTest("%#v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) + addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") + addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) + addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%#+v", nv5, "(*"+v5t+")"+"") +} + +func addUintFormatterTests() { + // Max uint8. + v := uint8(255) + nv := (*uint8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uint8" + vs := "255" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Max uint16. + v2 := uint16(65535) + nv2 := (*uint16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Max uint32. + v3 := uint32(4294967295) + nv3 := (*uint32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "uint32" + v3s := "4294967295" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + + // Max uint64. + v4 := uint64(18446744073709551615) + nv4 := (*uint64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "uint64" + v4s := "18446744073709551615" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") + + // Max uint. + v5 := uint(4294967295) + nv5 := (*uint)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "uint" + v5s := "4294967295" + addFormatterTest("%v", v5, v5s) + addFormatterTest("%v", pv5, "<*>"+v5s) + addFormatterTest("%v", &pv5, "<**>"+v5s) + addFormatterTest("%v", nv5, "") + addFormatterTest("%+v", v5, v5s) + addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) + addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%+v", nv5, "") + addFormatterTest("%#v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) + addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") + addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) + addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") +} + +func addBoolFormatterTests() { + // Boolean true. + v := bool(true) + nv := (*bool)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "bool" + vs := "true" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Boolean false. + v2 := bool(false) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "bool" + v2s := "false" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addFloatFormatterTests() { + // Standard float32. + v := float32(3.1415) + nv := (*float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "3.1415" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Standard float64. + v2 := float64(3.1415926) + nv2 := (*float64)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "float64" + v2s := "3.1415926" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") +} + +func addComplexFormatterTests() { + // Standard complex64. + v := complex(float32(6), -2) + nv := (*complex64)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "complex64" + vs := "(6-2i)" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Standard complex128. + v2 := complex(float64(-6), 2) + nv2 := (*complex128)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "complex128" + v2s := "(-6+2i)" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") +} + +func addArrayFormatterTests() { + // Array containing standard ints. + v := [3]int{1, 2, 3} + nv := (*[3]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "[3]int" + vs := "[1 2 3]" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Array containing type with custom formatter on pointer receiver only. + v2 := [3]pstringer{"1", "2", "3"} + nv2 := (*[3]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "[3]spew_test.pstringer" + v2s := "[stringer 1 stringer 2 stringer 3]" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Array containing interfaces. + v3 := [3]interface{}{"one", int(2), uint(3)} + nv3 := (*[3]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[3]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3s := "[one 2 3]" + v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") +} + +func addSliceFormatterTests() { + // Slice containing standard float32 values. + v := []float32{3.14, 6.28, 12.56} + nv := (*[]float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "[]float32" + vs := "[3.14 6.28 12.56]" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Slice containing type with custom formatter on pointer receiver only. + v2 := []pstringer{"1", "2", "3"} + nv2 := (*[]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "[]spew_test.pstringer" + v2s := "[stringer 1 stringer 2 stringer 3]" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Slice containing interfaces. + v3 := []interface{}{"one", int(2), uint(3), nil} + nv3 := (*[]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3t5 := "interface {}" + v3s := "[one 2 3 ]" + v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3 (" + v3t5 + + ")]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Nil slice. + var v4 []int + nv4 := (*[]int)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[]int" + v4s := "" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addStringFormatterTests() { + // Standard string. + v := "test" + nv := (*string)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "string" + vs := "test" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addInterfaceFormatterTests() { + // Nil interface. + var v interface{} + nv := (*interface{})(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "interface {}" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Sub-interface. + v2 := interface{}(uint16(65535)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addMapFormatterTests() { + // Map with string keys and int vals. + v := map[string]int{"one": 1, "two": 2} + nv := (*map[string]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "map[string]int" + vs := "map[one:1 two:2]" + vs2 := "map[two:2 one:1]" + addFormatterTest("%v", v, vs, vs2) + addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2) + addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs, vs2) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs, + "<**>("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs, + "(*"+vt+")("+vAddr+")"+vs2) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs, + "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Map with custom formatter type on pointer receiver only keys and vals. + v2 := map[pstringer]pstringer{"one": "1"} + nv2 := (*map[pstringer]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "map[spew_test.pstringer]spew_test.pstringer" + v2s := "map[stringer one:stringer 1]" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Map with interface keys and values. + v3 := map[interface{}]interface{}{"one": 1} + nv3 := (*map[interface{}]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "map[interface {}]interface {}" + v3t1 := "string" + v3t2 := "int" + v3s := "map[one:1]" + v3s2 := "map[(" + v3t1 + ")one:(" + v3t2 + ")1]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Map with nil interface value + v4 := map[string]interface{}{"nil": nil} + nv4 := (*map[string]interface{})(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "map[string]interface {}" + v4t1 := "interface {}" + v4s := "map[nil:]" + v4s2 := "map[nil:(" + v4t1 + ")]" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s2) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s2) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s2) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s2) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s2) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s2) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addStructFormatterTests() { + // Struct with primitives. + type s1 struct { + a int8 + b uint8 + } + v := s1{127, 255} + nv := (*s1)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.s1" + vt2 := "int8" + vt3 := "uint8" + vs := "{127 255}" + vs2 := "{a:127 b:255}" + vs3 := "{a:(" + vt2 + ")127 b:(" + vt3 + ")255}" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs2) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs2) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs3) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs3) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs3) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs3) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs3) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs3) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Struct that contains another struct. + type s2 struct { + s1 s1 + b bool + } + v2 := s2{s1{127, 255}, true} + nv2 := (*s2)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.s2" + v2t2 := "spew_test.s1" + v2t3 := "int8" + v2t4 := "uint8" + v2t5 := "bool" + v2s := "{{127 255} true}" + v2s2 := "{s1:{a:127 b:255} b:true}" + v2s3 := "{s1:(" + v2t2 + "){a:(" + v2t3 + ")127 b:(" + v2t4 + ")255} b:(" + + v2t5 + ")true}" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s2) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s2) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s2) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s3) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s3) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s3) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s3) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s3) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s3) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Struct that contains custom type with Stringer pointer interface via both + // exported and unexported fields. + type s3 struct { + s pstringer + S pstringer + } + v3 := s3{"test", "test2"} + nv3 := (*s3)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.s3" + v3t2 := "spew_test.pstringer" + v3s := "{stringer test stringer test2}" + v3s2 := "{s:stringer test S:stringer test2}" + v3s3 := "{s:(" + v3t2 + ")stringer test S:(" + v3t2 + ")stringer test2}" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s2) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s2) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s3) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s3) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s3) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s3) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s3) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s3) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Struct that contains embedded struct and field to same struct. + e := embed{"embedstr"} + v4 := embedwrap{embed: &e, e: &e} + nv4 := (*embedwrap)(nil) + pv4 := &v4 + eAddr := fmt.Sprintf("%p", &e) + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "spew_test.embedwrap" + v4t2 := "spew_test.embed" + v4t3 := "string" + v4s := "{<*>{embedstr} <*>{embedstr}}" + v4s2 := "{embed:<*>(" + eAddr + "){a:embedstr} e:<*>(" + eAddr + + "){a:embedstr}}" + v4s3 := "{embed:(*" + v4t2 + "){a:(" + v4t3 + ")embedstr} e:(*" + v4t2 + + "){a:(" + v4t3 + ")embedstr}}" + v4s4 := "{embed:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + + ")embedstr} e:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + ")embedstr}}" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s2) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s2) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s2) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s3) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s3) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s3) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s4) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s4) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s4) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addUintptrFormatterTests() { + // Null pointer. + v := uintptr(0) + nv := (*uintptr)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uintptr" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Address of real variable. + i := 1 + v2 := uintptr(unsafe.Pointer(&i)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uintptr" + v2s := fmt.Sprintf("%p", &i) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addUnsafePointerFormatterTests() { + // Null pointer. + v := unsafe.Pointer(uintptr(0)) + nv := (*unsafe.Pointer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "unsafe.Pointer" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Address of real variable. + i := 1 + v2 := unsafe.Pointer(&i) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "unsafe.Pointer" + v2s := fmt.Sprintf("%p", &i) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addChanFormatterTests() { + // Nil channel. + var v chan int + pv := &v + nv := (*chan int)(nil) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "chan int" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Real channel. + v2 := make(chan int) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "chan int" + v2s := fmt.Sprintf("%p", v2) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addFuncFormatterTests() { + // Function with no params and no returns. + v := addIntFormatterTests + nv := (*func())(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "func()" + vs := fmt.Sprintf("%p", v) + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Function with param and no returns. + v2 := TestFormatter + nv2 := (*func(*testing.T))(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "func(*testing.T)" + v2s := fmt.Sprintf("%p", v2) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Function with multiple params and multiple returns. + var v3 = func(i int, s string) (b bool, err error) { + return true, nil + } + nv3 := (*func(int, string) (bool, error))(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "func(int, string) (bool, error)" + v3s := fmt.Sprintf("%p", v3) + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") +} + +func addCircularFormatterTests() { + // Struct that is circular through self referencing. + type circular struct { + c *circular + } + v := circular{nil} + v.c = &v + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.circular" + vs := "{<*>{<*>}}" + vs2 := "{<*>}" + vs3 := "{c:<*>(" + vAddr + "){c:<*>(" + vAddr + ")}}" + vs4 := "{c:<*>(" + vAddr + ")}" + vs5 := "{c:(*" + vt + "){c:(*" + vt + ")}}" + vs6 := "{c:(*" + vt + ")}" + vs7 := "{c:(*" + vt + ")(" + vAddr + "){c:(*" + vt + ")(" + vAddr + + ")}}" + vs8 := "{c:(*" + vt + ")(" + vAddr + ")}" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs2) + addFormatterTest("%v", &pv, "<**>"+vs2) + addFormatterTest("%+v", v, vs3) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs4) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs4) + addFormatterTest("%#v", v, "("+vt+")"+vs5) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs6) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs6) + addFormatterTest("%#+v", v, "("+vt+")"+vs7) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs8) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs8) + + // Structs that are circular through cross referencing. + v2 := xref1{nil} + ts2 := xref2{&v2} + v2.ps2 = &ts2 + pv2 := &v2 + ts2Addr := fmt.Sprintf("%p", &ts2) + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.xref1" + v2t2 := "spew_test.xref2" + v2s := "{<*>{<*>{<*>}}}" + v2s2 := "{<*>{<*>}}" + v2s3 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + "){ps2:<*>(" + + ts2Addr + ")}}}" + v2s4 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + ")}}" + v2s5 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + "){ps2:(*" + v2t2 + + ")}}}" + v2s6 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + ")}}" + v2s7 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + + ")(" + v2Addr + "){ps2:(*" + v2t2 + ")(" + ts2Addr + + ")}}}" + v2s8 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + + ")(" + v2Addr + ")}}" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s2) + addFormatterTest("%v", &pv2, "<**>"+v2s2) + addFormatterTest("%+v", v2, v2s3) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s4) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s4) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s5) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s6) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s6) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s7) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s8) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s8) + + // Structs that are indirectly circular. + v3 := indirCir1{nil} + tic2 := indirCir2{nil} + tic3 := indirCir3{&v3} + tic2.ps3 = &tic3 + v3.ps2 = &tic2 + pv3 := &v3 + tic2Addr := fmt.Sprintf("%p", &tic2) + tic3Addr := fmt.Sprintf("%p", &tic3) + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.indirCir1" + v3t2 := "spew_test.indirCir2" + v3t3 := "spew_test.indirCir3" + v3s := "{<*>{<*>{<*>{<*>}}}}" + v3s2 := "{<*>{<*>{<*>}}}" + v3s3 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + + v3Addr + "){ps2:<*>(" + tic2Addr + ")}}}}" + v3s4 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + + v3Addr + ")}}}" + v3s5 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + + "){ps2:(*" + v3t2 + ")}}}}" + v3s6 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + + ")}}}" + v3s7 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + + tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + "){ps2:(*" + v3t2 + + ")(" + tic2Addr + ")}}}}" + v3s8 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + + tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + ")}}}" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s2) + addFormatterTest("%v", &pv3, "<**>"+v3s2) + addFormatterTest("%+v", v3, v3s3) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s4) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s4) + addFormatterTest("%#v", v3, "("+v3t+")"+v3s5) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s6) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s6) + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s7) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s8) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s8) +} + +func addPanicFormatterTests() { + // Type that panics in its Stringer interface. + v := panicer(127) + nv := (*panicer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.panicer" + vs := "(PANIC=test panic)127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addErrorFormatterTests() { + // Type that has a custom Error interface. + v := customError(127) + nv := (*customError)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.customError" + vs := "error: 127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addPassthroughFormatterTests() { + // %x passthrough with uint. + v := uint(4294967295) + pv := &v + vAddr := fmt.Sprintf("%x", pv) + pvAddr := fmt.Sprintf("%x", &pv) + vs := "ffffffff" + addFormatterTest("%x", v, vs) + addFormatterTest("%x", pv, vAddr) + addFormatterTest("%x", &pv, pvAddr) + + // %#x passthrough with uint. + v2 := int(2147483647) + pv2 := &v2 + v2Addr := fmt.Sprintf("%#x", pv2) + pv2Addr := fmt.Sprintf("%#x", &pv2) + v2s := "0x7fffffff" + addFormatterTest("%#x", v2, v2s) + addFormatterTest("%#x", pv2, v2Addr) + addFormatterTest("%#x", &pv2, pv2Addr) + + // %f passthrough with precision. + addFormatterTest("%.2f", 3.1415, "3.14") + addFormatterTest("%.3f", 3.1415, "3.142") + addFormatterTest("%.4f", 3.1415, "3.1415") + + // %f passthrough with width and precision. + addFormatterTest("%5.2f", 3.1415, " 3.14") + addFormatterTest("%6.3f", 3.1415, " 3.142") + addFormatterTest("%7.4f", 3.1415, " 3.1415") + + // %d passthrough with width. + addFormatterTest("%3d", 127, "127") + addFormatterTest("%4d", 127, " 127") + addFormatterTest("%5d", 127, " 127") + + // %q passthrough with string. + addFormatterTest("%q", "test", "\"test\"") +} + +// TestFormatter executes all of the tests described by formatterTests. +func TestFormatter(t *testing.T) { + // Setup tests. + addIntFormatterTests() + addUintFormatterTests() + addBoolFormatterTests() + addFloatFormatterTests() + addComplexFormatterTests() + addArrayFormatterTests() + addSliceFormatterTests() + addStringFormatterTests() + addInterfaceFormatterTests() + addMapFormatterTests() + addStructFormatterTests() + addUintptrFormatterTests() + addUnsafePointerFormatterTests() + addChanFormatterTests() + addFuncFormatterTests() + addCircularFormatterTests() + addPanicFormatterTests() + addErrorFormatterTests() + addPassthroughFormatterTests() + + t.Logf("Running %d tests", len(formatterTests)) + for i, test := range formatterTests { + buf := new(bytes.Buffer) + spew.Fprintf(buf, test.format, test.in) + s := buf.String() + if testFailed(s, test.wants) { + t.Errorf("Formatter #%d format: %s got: %s %s", i, test.format, s, + stringizeWants(test.wants)) + continue + } + } +} + +func TestPrintSortedKeys(t *testing.T) { + cfg := spew.ConfigState{SortKeys: true} + s := cfg.Sprint(map[int]string{1: "1", 3: "3", 2: "2"}) + expected := "map[1:1 2:2 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/internal_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/internal_test.go new file mode 100644 index 00000000..faac638a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/internal_test.go @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +This test file is part of the spew package rather than than the spew_test +package because it needs access to internals to properly test certain cases +which are not possible via the public interface since they should never happen. +*/ + +package spew + +import ( + "bytes" + "reflect" + "testing" + "unsafe" +) + +// dummyFmtState implements a fake fmt.State to use for testing invalid +// reflect.Value handling. This is necessary because the fmt package catches +// invalid values before invoking the formatter on them. +type dummyFmtState struct { + bytes.Buffer +} + +func (dfs *dummyFmtState) Flag(f int) bool { + if f == int('+') { + return true + } + return false +} + +func (dfs *dummyFmtState) Precision() (int, bool) { + return 0, false +} + +func (dfs *dummyFmtState) Width() (int, bool) { + return 0, false +} + +// TestInvalidReflectValue ensures the dump and formatter code handles an +// invalid reflect value properly. This needs access to internal state since it +// should never happen in real code and therefore can't be tested via the public +// API. +func TestInvalidReflectValue(t *testing.T) { + i := 1 + + // Dump invalid reflect value. + v := new(reflect.Value) + buf := new(bytes.Buffer) + d := dumpState{w: buf, cs: &Config} + d.dump(*v) + s := buf.String() + want := "" + if s != want { + t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want) + } + i++ + + // Formatter invalid reflect value. + buf2 := new(dummyFmtState) + f := formatState{value: *v, cs: &Config, fs: buf2} + f.format(*v) + s = buf2.String() + want = "" + if s != want { + t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want) + } +} + +// flagRO, flagKindShift and flagKindWidth indicate various bit flags that the +// reflect package uses internally to track kind and state information. +const flagRO = 1 << 0 +const flagKindShift = 4 +const flagKindWidth = 5 + +// changeKind uses unsafe to intentionally change the kind of a reflect.Value to +// the maximum kind value which does not exist. This is needed to test the +// fallback code which punts to the standard fmt library for new types that +// might get added to the language. +func changeKind(v *reflect.Value, readOnly bool) { + rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag)) + *rvf = *rvf | ((1< + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/spew_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/spew_test.go new file mode 100644 index 00000000..e75d0165 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/spew_test.go @@ -0,0 +1,308 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "bytes" + "camlistore.org/third_party/github.com/davecgh/go-spew/spew" + "fmt" + "io/ioutil" + "os" + "testing" +) + +// spewFunc is used to identify which public function of the spew package or +// ConfigState a test applies to. +type spewFunc int + +const ( + fCSFdump spewFunc = iota + fCSFprint + fCSFprintf + fCSFprintln + fCSPrint + fCSPrintln + fCSSdump + fCSSprint + fCSSprintf + fCSSprintln + fCSErrorf + fCSNewFormatter + fErrorf + fFprint + fFprintln + fPrint + fPrintln + fSdump + fSprint + fSprintf + fSprintln +) + +// Map of spewFunc values to names for pretty printing. +var spewFuncStrings = map[spewFunc]string{ + fCSFdump: "ConfigState.Fdump", + fCSFprint: "ConfigState.Fprint", + fCSFprintf: "ConfigState.Fprintf", + fCSFprintln: "ConfigState.Fprintln", + fCSSdump: "ConfigState.Sdump", + fCSPrint: "ConfigState.Print", + fCSPrintln: "ConfigState.Println", + fCSSprint: "ConfigState.Sprint", + fCSSprintf: "ConfigState.Sprintf", + fCSSprintln: "ConfigState.Sprintln", + fCSErrorf: "ConfigState.Errorf", + fCSNewFormatter: "ConfigState.NewFormatter", + fErrorf: "spew.Errorf", + fFprint: "spew.Fprint", + fFprintln: "spew.Fprintln", + fPrint: "spew.Print", + fPrintln: "spew.Println", + fSdump: "spew.Sdump", + fSprint: "spew.Sprint", + fSprintf: "spew.Sprintf", + fSprintln: "spew.Sprintln", +} + +func (f spewFunc) String() string { + if s, ok := spewFuncStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown spewFunc (%d)", int(f)) +} + +// spewTest is used to describe a test to be performed against the public +// functions of the spew package or ConfigState. +type spewTest struct { + cs *spew.ConfigState + f spewFunc + format string + in interface{} + want string +} + +// spewTests houses the tests to be performed against the public functions of +// the spew package and ConfigState. +// +// These tests are only intended to ensure the public functions are exercised +// and are intentionally not exhaustive of types. The exhaustive type +// tests are handled in the dump and format tests. +var spewTests []spewTest + +// redirStdout is a helper function to return the standard output from f as a +// byte slice. +func redirStdout(f func()) ([]byte, error) { + tempFile, err := ioutil.TempFile("", "ss-test") + if err != nil { + return nil, err + } + fileName := tempFile.Name() + defer os.Remove(fileName) // Ignore error + + origStdout := os.Stdout + os.Stdout = tempFile + f() + os.Stdout = origStdout + tempFile.Close() + + return ioutil.ReadFile(fileName) +} + +func initSpewTests() { + // Config states with various settings. + scsDefault := spew.NewDefaultConfig() + scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true} + scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true} + scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1} + scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true} + + // Variables for tests on types which implement Stringer interface with and + // without a pointer receiver. + ts := stringer("test") + tps := pstringer("test") + + // depthTester is used to test max depth handling for structs, array, slices + // and maps. + type depthTester struct { + ic indirCir1 + arr [1]string + slice []string + m map[string]int + } + dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"}, + map[string]int{"one": 1}} + + // Variable for tests on types which implement error interface. + te := customError(10) + + spewTests = []spewTest{ + {scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"}, + {scsDefault, fCSFprint, "", int16(32767), "32767"}, + {scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"}, + {scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"}, + {scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"}, + {scsDefault, fCSPrintln, "", uint8(255), "255\n"}, + {scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"}, + {scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"}, + {scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"}, + {scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"}, + {scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"}, + {scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"}, + {scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"}, + {scsDefault, fFprint, "", float32(3.14), "3.14"}, + {scsDefault, fFprintln, "", float64(6.28), "6.28\n"}, + {scsDefault, fPrint, "", true, "true"}, + {scsDefault, fPrintln, "", false, "false\n"}, + {scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"}, + {scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"}, + {scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"}, + {scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"}, + {scsNoMethods, fCSFprint, "", ts, "test"}, + {scsNoMethods, fCSFprint, "", &ts, "<*>test"}, + {scsNoMethods, fCSFprint, "", tps, "test"}, + {scsNoMethods, fCSFprint, "", &tps, "<*>test"}, + {scsNoPmethods, fCSFprint, "", ts, "stringer test"}, + {scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"}, + {scsNoPmethods, fCSFprint, "", tps, "test"}, + {scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"}, + {scsMaxDepth, fCSFprint, "", dt, "{{} [] [] map[]}"}, + {scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" + + " ic: (spew_test.indirCir1) {\n \n },\n" + + " arr: ([1]string) {\n \n },\n" + + " slice: ([]string) {\n \n },\n" + + " m: (map[string]int) {\n \n }\n}\n"}, + {scsContinue, fCSFprint, "", ts, "(stringer test) test"}, + {scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " + + "(stringer test) \"test\"\n"}, + {scsContinue, fCSFprint, "", te, "(error: 10) 10"}, + {scsContinue, fCSFdump, "", te, "(spew_test.customError) " + + "(error: 10) 10\n"}, + } +} + +// TestSpew executes all of the tests described by spewTests. +func TestSpew(t *testing.T) { + initSpewTests() + + t.Logf("Running %d tests", len(spewTests)) + for i, test := range spewTests { + buf := new(bytes.Buffer) + switch test.f { + case fCSFdump: + test.cs.Fdump(buf, test.in) + + case fCSFprint: + test.cs.Fprint(buf, test.in) + + case fCSFprintf: + test.cs.Fprintf(buf, test.format, test.in) + + case fCSFprintln: + test.cs.Fprintln(buf, test.in) + + case fCSPrint: + b, err := redirStdout(func() { test.cs.Print(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fCSPrintln: + b, err := redirStdout(func() { test.cs.Println(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fCSSdump: + str := test.cs.Sdump(test.in) + buf.WriteString(str) + + case fCSSprint: + str := test.cs.Sprint(test.in) + buf.WriteString(str) + + case fCSSprintf: + str := test.cs.Sprintf(test.format, test.in) + buf.WriteString(str) + + case fCSSprintln: + str := test.cs.Sprintln(test.in) + buf.WriteString(str) + + case fCSErrorf: + err := test.cs.Errorf(test.format, test.in) + buf.WriteString(err.Error()) + + case fCSNewFormatter: + fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in)) + + case fErrorf: + err := spew.Errorf(test.format, test.in) + buf.WriteString(err.Error()) + + case fFprint: + spew.Fprint(buf, test.in) + + case fFprintln: + spew.Fprintln(buf, test.in) + + case fPrint: + b, err := redirStdout(func() { spew.Print(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fPrintln: + b, err := redirStdout(func() { spew.Println(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fSdump: + str := spew.Sdump(test.in) + buf.WriteString(str) + + case fSprint: + str := spew.Sprint(test.in) + buf.WriteString(str) + + case fSprintf: + str := spew.Sprintf(test.format, test.in) + buf.WriteString(str) + + case fSprintln: + str := spew.Sprintln(test.in) + buf.WriteString(str) + + default: + t.Errorf("%v #%d unrecognized function", test.f, i) + continue + } + s := buf.String() + if test.want != s { + t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want) + continue + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go new file mode 100644 index 00000000..2e984523 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go @@ -0,0 +1,81 @@ +// Copyright (c) 2013 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when both cgo is supported and "-tags testcgo" is added to the go test +// command line. This code should really only be in the dumpcgo_test.go file, +// but unfortunately Go will not allow cgo in test files, so this is a +// workaround to allow cgo types to be tested. This configuration is used +// because spew itself does not require cgo to run even though it does handle +// certain cgo types specially. Rather than forcing all clients to require cgo +// and an external C compiler just to run the tests, this scheme makes them +// optional. +// +build cgo,testcgo + +package testdata + +/* +#include +typedef unsigned char custom_uchar_t; + +char *ncp = 0; +char *cp = "test"; +char ca[6] = {'t', 'e', 's', 't', '2', '\0'}; +unsigned char uca[6] = {'t', 'e', 's', 't', '3', '\0'}; +signed char sca[6] = {'t', 'e', 's', 't', '4', '\0'}; +uint8_t ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'}; +custom_uchar_t tuca[6] = {'t', 'e', 's', 't', '6', '\0'}; +*/ +import "C" + +// GetCgoNullCharPointer returns a null char pointer via cgo. This is only +// used for tests. +func GetCgoNullCharPointer() interface{} { + return C.ncp +} + +// GetCgoCharPointer returns a char pointer via cgo. This is only used for +// tests. +func GetCgoCharPointer() interface{} { + return C.cp +} + +// GetCgoCharArray returns a char array via cgo. This is only used for tests. +func GetCgoCharArray() interface{} { + return C.ca +} + +// GetCgoUnsignedCharArray returns an unsigned char array via cgo. This is only +// used for tests. +func GetCgoUnsignedCharArray() interface{} { + return C.uca +} + +// GetCgoSignedCharArray returns a signed char array via cgo. This is only used +// for tests. +func GetCgoSignedCharArray() interface{} { + return C.sca +} + +// GetCgoUint8tArray returns a uint8_t array via cgo. This is only used for +// tests. +func GetCgoUint8tArray() interface{} { + return C.ui8ta +} + +// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via +// cgo. This is only used for tests. +func GetCgoTypdefedUnsignedCharArray() interface{} { + return C.tuca +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/test_coverage.txt b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/test_coverage.txt new file mode 100644 index 00000000..2cd087a2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/davecgh/go-spew/test_coverage.txt @@ -0,0 +1,61 @@ + +github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88) +github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82) +github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52) +github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44) +github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39) +github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30) +github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18) +github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13) +github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12) +github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11) +github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11) +github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10) +github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8) +github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7) +github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5) +github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4) +github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4) +github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4) +github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4) +github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3) +github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3) +github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3) +github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3) +github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3) +github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1) +github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1) +github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1) +github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505) + diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/garyburd/go-oauth/.gitignore b/vendor/github.com/camlistore/camlistore/third_party/github.com/garyburd/go-oauth/.gitignore new file mode 100644 index 00000000..e45d6ac9 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/garyburd/go-oauth/.gitignore @@ -0,0 +1,5 @@ +_testmain.go +*.6 +*.a +6.out +example diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/garyburd/go-oauth/README.markdown b/vendor/github.com/camlistore/camlistore/third_party/github.com/garyburd/go-oauth/README.markdown new file mode 100644 index 00000000..def00909 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/garyburd/go-oauth/README.markdown @@ -0,0 +1,20 @@ +# Go-OAuth + +Go-OAuth is a [Go](http://golang.org/) client for the OAuth 1.0, OAuth 1.0a and RFC 5849. + +## Installation + +Use the [go tool](http://weekly.golang.org/cmd/go/) to install Go-OAuth: + + go get github.com/garyburd/go-oauth/oauth + +##License + +Go-OAuth is available under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.html). + +## Documentation + +- [Reference](http://godoc.org/github.com/garyburd/go-oauth/oauth) +- [Twitter Example](http://github.com/garyburd/go-oauth/tree/master/examples/twitter) +- [Dropbox Example](http://github.com/garyburd/go-oauth/tree/master/examples/dropbox) +- [Netflix Example](http://github.com/garyburd/go-oauth/tree/master/examples/netflix) diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/garyburd/go-oauth/oauth/examples_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/garyburd/go-oauth/oauth/examples_test.go new file mode 100644 index 00000000..e6b91d76 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/garyburd/go-oauth/oauth/examples_test.go @@ -0,0 +1,54 @@ +// Copyright 2013 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package oauth_test + +import ( + "camlistore.org/third_party/github.com/garyburd/go-oauth/oauth" + "net/http" + "net/url" + "strings" +) + +// This example shows how to sign a request when the URL Opaque field is used. +// See the note at http://golang.org/pkg/net/url/#URL for information on the +// use of the URL Opaque field. +func ExampleClient_AuthorizationHeader(client *oauth.Client, credentials *oauth.Credentials) error { + form := url.Values{"maxResults": {"100"}} + + // The last element of path contains a "/". + path := "/document/encoding%2gizp" + + // Create the request with the temporary path "/". + req, err := http.NewRequest("GET", "http://api.example.com/", strings.NewReader(form.Encode())) + if err != nil { + return err + } + + // Overwrite the temporary path with the actual request path. + req.URL.Opaque = path + + // Sign the request. + req.Header.Set("Authorization", client.AuthorizationHeader(credentials, "GET", req.URL, form)) + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + // process the response + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/garyburd/go-oauth/oauth/oauth.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/garyburd/go-oauth/oauth/oauth.go new file mode 100644 index 00000000..3bd0bc9a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/garyburd/go-oauth/oauth/oauth.go @@ -0,0 +1,442 @@ +// Copyright 2010 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package oauth is consumer interface for OAuth 1.0, OAuth 1.0a and RFC 5849. +// +// Redirection-based Authorization +// +// This section outlines how to use the oauth package in redirection-based +// authorization (http://tools.ietf.org/html/rfc5849#section-2). +// +// Step 1: Create a Client using credentials and URIs provided by the server. +// The Client can be initialized once at application startup and stored in a +// package-level variable. +// +// Step 2: Request temporary credentials using the Client +// RequestTemporaryCredentials method. The callbackURL parameter is the URL of +// the callback handler in step 4. Save the returned credential secret so that +// it can be later found using credential token as a key. The secret can be +// stored in a database keyed by the token. Another option is to store the +// token and secret in session storage or a cookie. +// +// Step 3: Redirect the user to URL returned from AuthorizationURL method. The +// AuthorizationURL method uses the temporary credentials from step 2 and other +// parameters as specified by the server. +// +// Step 4: The server redirects back to the callback URL specified in step 2 +// with the temporary token and a verifier. Use the temporary token to find the +// temporary secret saved in step 2. Using the temporary token, temporary +// secret and verifier, request token credentials using the client RequestToken +// method. Save the returned credentials for later use in the application. +// +// Signing Requests +// +// The Client type has two low-level methods for signing requests, SignForm and +// AuthorizationHeader. +// +// The SignForm method adds an OAuth signature to a form. The application makes +// an authenticated request by encoding the modified form to the query string +// or request body. +// +// The AuthorizationHeader method returns an Authorization header value with +// the OAuth signature. The application makes an authenticated request by +// adding the Authorization header to the request. The AuthorizationHeader +// method is the only way to correctly sign a request if the application sets +// the URL Opaque field when making a request. +// +// The Get and Post methods sign and invoke a request using the supplied +// net/http Client. These methods are easy to use, but not as flexible as +// constructing a request using one of the low-level methods. +package oauth + +import ( + "bytes" + "crypto/hmac" + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +// noscape[b] is true if b should not be escaped per section 3.6 of the RFC. +var noEscape = [256]bool{ + 'A': true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, + 'a': true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, + '0': true, true, true, true, true, true, true, true, true, true, + '-': true, + '.': true, + '_': true, + '~': true, +} + +// encode encodes string per section 3.6 of the RFC. If double is true, then +// the encoding is applied twice. +func encode(s string, double bool) []byte { + // Compute size of result. + m := 3 + if double { + m = 5 + } + n := 0 + for i := 0; i < len(s); i++ { + if noEscape[s[i]] { + n += 1 + } else { + n += m + } + } + + p := make([]byte, n) + + // Encode it. + j := 0 + for i := 0; i < len(s); i++ { + b := s[i] + if noEscape[b] { + p[j] = b + j += 1 + } else if double { + p[j] = '%' + p[j+1] = '2' + p[j+2] = '5' + p[j+3] = "0123456789ABCDEF"[b>>4] + p[j+4] = "0123456789ABCDEF"[b&15] + j += 5 + } else { + p[j] = '%' + p[j+1] = "0123456789ABCDEF"[b>>4] + p[j+2] = "0123456789ABCDEF"[b&15] + j += 3 + } + } + return p +} + +type keyValue struct{ key, value []byte } + +type byKeyValue []keyValue + +func (p byKeyValue) Len() int { return len(p) } +func (p byKeyValue) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p byKeyValue) Less(i, j int) bool { + sgn := bytes.Compare(p[i].key, p[j].key) + if sgn == 0 { + sgn = bytes.Compare(p[i].value, p[j].value) + } + return sgn < 0 +} + +func (p byKeyValue) appendValues(values url.Values) byKeyValue { + for k, vs := range values { + k := encode(k, true) + for _, v := range vs { + v := encode(v, true) + p = append(p, keyValue{k, v}) + } + } + return p +} + +// writeBaseString writes method, url, and params to w using the OAuth signature +// base string computation described in section 3.4.1 of the RFC. +func writeBaseString(w io.Writer, method string, u *url.URL, form url.Values, oauthParams map[string]string) { + // Method + w.Write(encode(strings.ToUpper(method), false)) + w.Write([]byte{'&'}) + + // URL + scheme := strings.ToLower(u.Scheme) + host := strings.ToLower(u.Host) + + uNoQuery := *u + uNoQuery.RawQuery = "" + path := uNoQuery.RequestURI() + + switch { + case scheme == "http" && strings.HasSuffix(host, ":80"): + host = host[:len(host)-len(":80")] + case scheme == "https" && strings.HasSuffix(host, ":443"): + host = host[:len(host)-len(":443")] + } + + w.Write(encode(scheme, false)) + w.Write(encode("://", false)) + w.Write(encode(host, false)) + w.Write(encode(path, false)) + w.Write([]byte{'&'}) + + // Create sorted slice of encoded parameters. Parameter keys and values are + // double encoded in a single step. This is safe because double encoding + // does not change the sort order. + queryParams := u.Query() + p := make(byKeyValue, 0, len(form)+len(queryParams)+len(oauthParams)) + p = p.appendValues(form) + p = p.appendValues(queryParams) + for k, v := range oauthParams { + p = append(p, keyValue{encode(k, true), encode(v, true)}) + } + sort.Sort(p) + + // Write the parameters. + encodedAmp := encode("&", false) + encodedEqual := encode("=", false) + sep := false + for _, kv := range p { + if sep { + w.Write(encodedAmp) + } else { + sep = true + } + w.Write(kv.key) + w.Write(encodedEqual) + w.Write(kv.value) + } +} + +var ( + nonceLock sync.Mutex + nonceCounter uint64 +) + +// nonce returns a unique string. +func nonce() string { + nonceLock.Lock() + defer nonceLock.Unlock() + if nonceCounter == 0 { + binary.Read(rand.Reader, binary.BigEndian, &nonceCounter) + } + result := strconv.FormatUint(nonceCounter, 16) + nonceCounter += 1 + return result +} + +// oauthParams returns the OAuth request parameters for the given credentials, +// method, URL and application params. See +// http://tools.ietf.org/html/rfc5849#section-3.4 for more information about +// signatures. +func oauthParams(clientCredentials *Credentials, credentials *Credentials, method string, u *url.URL, form url.Values) map[string]string { + oauthParams := map[string]string{ + "oauth_consumer_key": clientCredentials.Token, + "oauth_signature_method": "HMAC-SHA1", + "oauth_timestamp": strconv.FormatInt(time.Now().Unix(), 10), + "oauth_version": "1.0", + "oauth_nonce": nonce(), + } + if credentials != nil { + oauthParams["oauth_token"] = credentials.Token + } + if testingNonce != "" { + oauthParams["oauth_nonce"] = testingNonce + } + if testingTimestamp != "" { + oauthParams["oauth_timestamp"] = testingTimestamp + } + + var key bytes.Buffer + key.Write(encode(clientCredentials.Secret, false)) + key.WriteByte('&') + if credentials != nil { + key.Write(encode(credentials.Secret, false)) + } + + h := hmac.New(sha1.New, key.Bytes()) + writeBaseString(h, method, u, form, oauthParams) + sum := h.Sum(nil) + + encodedSum := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) + base64.StdEncoding.Encode(encodedSum, sum) + + oauthParams["oauth_signature"] = string(encodedSum) + return oauthParams +} + +// Client represents an OAuth client. +type Client struct { + Credentials Credentials + TemporaryCredentialRequestURI string // Also known as request token URL. + ResourceOwnerAuthorizationURI string // Also known as authorization URL. + TokenRequestURI string // Also known as access token URL. +} + +// Credentials represents client, temporary and token credentials. +type Credentials struct { + Token string // Also known as consumer key or access token. + Secret string // Also known as consumer secret or access token secret. +} + +var ( + testingTimestamp string + testingNonce string +) + +// SignForm adds an OAuth signature to form. The urlStr argument must not +// include a query string. +// +// See http://tools.ietf.org/html/rfc5849#section-3.5.2 for +// information about transmitting OAuth parameters in a request body and +// http://tools.ietf.org/html/rfc5849#section-3.5.2 for information about +// transmitting OAuth parameters in a query string. +func (c *Client) SignForm(credentials *Credentials, method, urlStr string, form url.Values) error { + u, err := url.Parse(urlStr) + switch { + case err != nil: + return err + case u.RawQuery != "": + return errors.New("oauth: urlStr argument to SignForm must not include a query string") + } + for k, v := range oauthParams(&c.Credentials, credentials, method, u, form) { + form.Set(k, v) + } + return nil +} + +// SignParam is deprecated. Use SignForm instead. +func (c *Client) SignParam(credentials *Credentials, method, urlStr string, params url.Values) { + u, _ := url.Parse(urlStr) + u.RawQuery = "" + for k, v := range oauthParams(&c.Credentials, credentials, method, u, params) { + params.Set(k, v) + } +} + +// AuthorizationHeader returns the HTTP authorization header value for given +// method, URL and parameters. +// +// See http://tools.ietf.org/html/rfc5849#section-3.5.1 for information about +// transmitting OAuth parameters in an HTTP request header. +func (c *Client) AuthorizationHeader(credentials *Credentials, method string, u *url.URL, params url.Values) string { + p := oauthParams(&c.Credentials, credentials, method, u, params) + var buf bytes.Buffer + buf.WriteString(`OAuth oauth_consumer_key="`) + buf.Write(encode(p["oauth_consumer_key"], false)) + buf.WriteString(`", oauth_nonce="`) + buf.Write(encode(p["oauth_nonce"], false)) + buf.WriteString(`", oauth_signature="`) + buf.Write(encode(p["oauth_signature"], false)) + buf.WriteString(`", oauth_signature_method="HMAC-SHA1", oauth_timestamp="`) + buf.Write(encode(p["oauth_timestamp"], false)) + if t, ok := p["oauth_token"]; ok { + buf.WriteString(`", oauth_token="`) + buf.Write(encode(t, false)) + } + buf.WriteString(`", oauth_version="1.0"`) + return buf.String() +} + +// Get issues a GET to the specified URL with form added as a query string. +func (c *Client) Get(client *http.Client, credentials *Credentials, urlStr string, form url.Values) (*http.Response, error) { + req, err := http.NewRequest("GET", urlStr, nil) + if err != nil { + return nil, err + } + if req.URL.RawQuery != "" { + return nil, errors.New("oauth: url must not contain a query string") + } + req.Header.Set("Authorization", c.AuthorizationHeader(credentials, "GET", req.URL, form)) + req.URL.RawQuery = form.Encode() + return client.Do(req) +} + +// Post issues a POST with the specified form. +func (c *Client) Post(client *http.Client, credentials *Credentials, urlStr string, form url.Values) (*http.Response, error) { + req, err := http.NewRequest("POST", urlStr, strings.NewReader(form.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Authorization", c.AuthorizationHeader(credentials, "POST", req.URL, form)) + return client.Do(req) +} + +func (c *Client) request(client *http.Client, credentials *Credentials, urlStr string, params url.Values) (*Credentials, url.Values, error) { + c.SignParam(credentials, "POST", urlStr, params) + resp, err := client.PostForm(urlStr, params) + if err != nil { + return nil, nil, err + } + p, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, nil, err + } + if resp.StatusCode != 200 && resp.StatusCode != 201 { + return nil, nil, fmt.Errorf("OAuth server status %d, %s", resp.StatusCode, string(p)) + } + m, err := url.ParseQuery(string(p)) + if err != nil { + return nil, nil, err + } + tokens := m["oauth_token"] + if len(tokens) == 0 || tokens[0] == "" { + return nil, nil, errors.New("oauth: token missing from server result") + } + secrets := m["oauth_token_secret"] + if len(secrets) == 0 { // allow "" as a valid secret. + return nil, nil, errors.New("oauth: secret mssing from server result") + } + return &Credentials{Token: tokens[0], Secret: secrets[0]}, m, nil +} + +// RequestTemporaryCredentials requests temporary credentials from the server. +// See http://tools.ietf.org/html/rfc5849#section-2.1 for information about +// temporary credentials. +func (c *Client) RequestTemporaryCredentials(client *http.Client, callbackURL string, additionalParams url.Values) (*Credentials, error) { + params := make(url.Values) + for k, vs := range additionalParams { + params[k] = vs + } + if callbackURL != "" { + params.Set("oauth_callback", callbackURL) + } + credentials, _, err := c.request(client, nil, c.TemporaryCredentialRequestURI, params) + return credentials, err +} + +// RequestToken requests token credentials from the server. See +// http://tools.ietf.org/html/rfc5849#section-2.3 for information about token +// credentials. +func (c *Client) RequestToken(client *http.Client, temporaryCredentials *Credentials, verifier string) (*Credentials, url.Values, error) { + params := make(url.Values) + if verifier != "" { + params.Set("oauth_verifier", verifier) + } + credentials, vals, err := c.request(client, temporaryCredentials, c.TokenRequestURI, params) + if err != nil { + return nil, nil, err + } + return credentials, vals, nil +} + +// AuthorizationURL returns the URL for resource owner authorization. See +// http://tools.ietf.org/html/rfc5849#section-2.2 for information about +// resource owner authorization. +func (c *Client) AuthorizationURL(temporaryCredentials *Credentials, additionalParams url.Values) string { + params := make(url.Values) + for k, vs := range additionalParams { + params[k] = vs + } + params.Set("oauth_token", temporaryCredentials.Token) + return c.ResourceOwnerAuthorizationURI + "?" + params.Encode() +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/garyburd/go-oauth/oauth/oauth_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/garyburd/go-oauth/oauth/oauth_test.go new file mode 100644 index 00000000..8849f480 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/garyburd/go-oauth/oauth/oauth_test.go @@ -0,0 +1,172 @@ +// Copyright 2010 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package oauth + +import ( + "bytes" + "net/url" + "testing" +) + +func parseURL(urlStr string) *url.URL { + u, _ := url.Parse(urlStr) + return u +} + +var oauthTests = []struct { + method string + url *url.URL + appParams url.Values + nonce string + timestamp string + + clientCredentials Credentials + credentials Credentials + + base string + header string +}{ + { + // Simple example from Twitter OAuth tool + "GET", + parseURL("https://api.twitter.com/1/"), + url.Values{"page": {"10"}}, + "8067e8abc6bdca2006818132445c8f4c", + "1355795903", + Credentials{"kMViZR2MHk2mM7hUNVw9A", "56Fgl58yOfqXOhHXX0ybvOmSnPQFvR2miYmm30A"}, + Credentials{"10212-JJ3Zc1A49qSMgdcAO2GMOpW9l7A348ESmhjmOBOU", "yF75mvq4LZMHj9O0DXwoC3ZxUnN1ptvieThYuOAYM"}, + `GET&https%3A%2F%2Fapi.twitter.com%2F1%2F&oauth_consumer_key%3DkMViZR2MHk2mM7hUNVw9A%26oauth_nonce%3D8067e8abc6bdca2006818132445c8f4c%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1355795903%26oauth_token%3D10212-JJ3Zc1A49qSMgdcAO2GMOpW9l7A348ESmhjmOBOU%26oauth_version%3D1.0%26page%3D10`, + `OAuth oauth_consumer_key="kMViZR2MHk2mM7hUNVw9A", oauth_nonce="8067e8abc6bdca2006818132445c8f4c", oauth_signature="o5cx1ggJrY9ognZuVVeUwglKV8U%3D", oauth_signature_method="HMAC-SHA1", oauth_timestamp="1355795903", oauth_token="10212-JJ3Zc1A49qSMgdcAO2GMOpW9l7A348ESmhjmOBOU", oauth_version="1.0"`, + }, + { + // Test case and port insensitivity. + "GeT", + parseURL("https://apI.twItter.com:443/1/"), + url.Values{"page": {"10"}}, + "8067e8abc6bdca2006818132445c8f4c", + "1355795903", + Credentials{"kMViZR2MHk2mM7hUNVw9A", "56Fgl58yOfqXOhHXX0ybvOmSnPQFvR2miYmm30A"}, + Credentials{"10212-JJ3Zc1A49qSMgdcAO2GMOpW9l7A348ESmhjmOBOU", "yF75mvq4LZMHj9O0DXwoC3ZxUnN1ptvieThYuOAYM"}, + `GET&https%3A%2F%2Fapi.twitter.com%2F1%2F&oauth_consumer_key%3DkMViZR2MHk2mM7hUNVw9A%26oauth_nonce%3D8067e8abc6bdca2006818132445c8f4c%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1355795903%26oauth_token%3D10212-JJ3Zc1A49qSMgdcAO2GMOpW9l7A348ESmhjmOBOU%26oauth_version%3D1.0%26page%3D10`, + `OAuth oauth_consumer_key="kMViZR2MHk2mM7hUNVw9A", oauth_nonce="8067e8abc6bdca2006818132445c8f4c", oauth_signature="o5cx1ggJrY9ognZuVVeUwglKV8U%3D", oauth_signature_method="HMAC-SHA1", oauth_timestamp="1355795903", oauth_token="10212-JJ3Zc1A49qSMgdcAO2GMOpW9l7A348ESmhjmOBOU", oauth_version="1.0"`, + }, + { + // Example generated using the Netflix OAuth tool. + "GET", + parseURL("http://api-public.netflix.com/catalog/titles"), + url.Values{"term": {"Dark Knight"}, "count": {"2"}}, + "1234", + "1355850443", + Credentials{"apiKey001", "sharedSecret002"}, + Credentials{"accessToken003", "accessSecret004"}, + `GET&http%3A%2F%2Fapi-public.netflix.com%2Fcatalog%2Ftitles&count%3D2%26oauth_consumer_key%3DapiKey001%26oauth_nonce%3D1234%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1355850443%26oauth_token%3DaccessToken003%26oauth_version%3D1.0%26term%3DDark%2520Knight`, + `OAuth oauth_consumer_key="apiKey001", oauth_nonce="1234", oauth_signature="0JAoaqt6oz6TJx8N%2B06XmhPjcOs%3D", oauth_signature_method="HMAC-SHA1", oauth_timestamp="1355850443", oauth_token="accessToken003", oauth_version="1.0"`, + }, + { + // Test special characters in form values. + "GET", + parseURL("http://PHOTOS.example.net:8001/Photos"), + url.Values{"photo size": {"300%"}, "title": {"Back of $100 Dollars Bill"}}, + "kllo~9940~pd9333jh", + "1191242096", + Credentials{"dpf43f3++p+#2l4k3l03", "secret01"}, + Credentials{"nnch734d(0)0sl2jdk", "secret02"}, + "GET&http%3A%2F%2Fphotos.example.net%3A8001%2FPhotos&oauth_consumer_key%3Ddpf43f3%252B%252Bp%252B%25232l4k3l03%26oauth_nonce%3Dkllo~9940~pd9333jh%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1191242096%26oauth_token%3Dnnch734d%25280%25290sl2jdk%26oauth_version%3D1.0%26photo%2520size%3D300%2525%26title%3DBack%2520of%2520%2524100%2520Dollars%2520Bill", + `OAuth oauth_consumer_key="dpf43f3%2B%2Bp%2B%232l4k3l03", oauth_nonce="kllo~9940~pd9333jh", oauth_signature="n1UAoQy2PoIYizZUiWvkdCxM3P0%3D", oauth_signature_method="HMAC-SHA1", oauth_timestamp="1191242096", oauth_token="nnch734d%280%290sl2jdk", oauth_version="1.0"`, + }, + { + // Test special characters in path, multiple values for same key in form. + "GET", + parseURL("http://EXAMPLE.COM:80/Space%20Craft"), + url.Values{"name": {"value", "value"}}, + "Ix4U1Ei3RFL", + "1327384901", + Credentials{"abcd", "efgh"}, + Credentials{"ijkl", "mnop"}, + "GET&http%3A%2F%2Fexample.com%2FSpace%2520Craft&name%3Dvalue%26name%3Dvalue%26oauth_consumer_key%3Dabcd%26oauth_nonce%3DIx4U1Ei3RFL%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1327384901%26oauth_token%3Dijkl%26oauth_version%3D1.0", + `OAuth oauth_consumer_key="abcd", oauth_nonce="Ix4U1Ei3RFL", oauth_signature="TZZ5u7qQorLnmKs%2Biqunb8gqkh4%3D", oauth_signature_method="HMAC-SHA1", oauth_timestamp="1327384901", oauth_token="ijkl", oauth_version="1.0"`, + }, + { + // Test with query string in URL. + "GET", + parseURL("http://EXAMPLE.COM:80/Space%20Craft?name=value"), + url.Values{"name": {"value"}}, + "Ix4U1Ei3RFL", + "1327384901", + Credentials{"abcd", "efgh"}, + Credentials{"ijkl", "mnop"}, + "GET&http%3A%2F%2Fexample.com%2FSpace%2520Craft&name%3Dvalue%26name%3Dvalue%26oauth_consumer_key%3Dabcd%26oauth_nonce%3DIx4U1Ei3RFL%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1327384901%26oauth_token%3Dijkl%26oauth_version%3D1.0", + `OAuth oauth_consumer_key="abcd", oauth_nonce="Ix4U1Ei3RFL", oauth_signature="TZZ5u7qQorLnmKs%2Biqunb8gqkh4%3D", oauth_signature_method="HMAC-SHA1", oauth_timestamp="1327384901", oauth_token="ijkl", oauth_version="1.0"`, + }, + { + // Test "/" in form value. + "POST", + parseURL("https://stream.twitter.com/1.1/statuses/filter.json"), + url.Values{"track": {"example.com/abcd"}}, + "bf2cb6d611e59f99103238fc9a3bb8d8", + "1362434376", + Credentials{"consumer_key", "consumer_secret"}, + Credentials{"token", "secret"}, + "POST&https%3A%2F%2Fstream.twitter.com%2F1.1%2Fstatuses%2Ffilter.json&oauth_consumer_key%3Dconsumer_key%26oauth_nonce%3Dbf2cb6d611e59f99103238fc9a3bb8d8%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1362434376%26oauth_token%3Dtoken%26oauth_version%3D1.0%26track%3Dexample.com%252Fabcd", + `OAuth oauth_consumer_key="consumer_key", oauth_nonce="bf2cb6d611e59f99103238fc9a3bb8d8", oauth_signature="LcxylEOnNdgoKSJi7jX07mxcvfM%3D", oauth_signature_method="HMAC-SHA1", oauth_timestamp="1362434376", oauth_token="token", oauth_version="1.0"`, + }, + { + // Test "/" in query string + "POST", + parseURL("https://stream.twitter.com/1.1/statuses/filter.json?track=example.com/query"), + url.Values{}, + "884275759fbab914654b50ae643c563a", + "1362435218", + Credentials{"consumer_key", "consumer_secret"}, + Credentials{"token", "secret"}, + "POST&https%3A%2F%2Fstream.twitter.com%2F1.1%2Fstatuses%2Ffilter.json&oauth_consumer_key%3Dconsumer_key%26oauth_nonce%3D884275759fbab914654b50ae643c563a%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1362435218%26oauth_token%3Dtoken%26oauth_version%3D1.0%26track%3Dexample.com%252Fquery", + `OAuth oauth_consumer_key="consumer_key", oauth_nonce="884275759fbab914654b50ae643c563a", oauth_signature="OAldqvRrKDXRGZ9BqSi2CqeVH0g%3D", oauth_signature_method="HMAC-SHA1", oauth_timestamp="1362435218", oauth_token="token", oauth_version="1.0"`, + }, +} + +func TestBaseString(t *testing.T) { + for _, ot := range oauthTests { + oauthParams := map[string]string{ + "oauth_consumer_key": ot.clientCredentials.Token, + "oauth_nonce": ot.nonce, + "oauth_timestamp": ot.timestamp, + "oauth_token": ot.credentials.Token, + "oauth_signature_method": "HMAC-SHA1", + "oauth_version": "1.0", + } + var buf bytes.Buffer + writeBaseString(&buf, ot.method, ot.url, ot.appParams, oauthParams) + base := buf.String() + if base != ot.base { + t.Errorf("base string for %s %s\n = %q,\n want %q", ot.method, ot.url, base, ot.base) + } + } +} + +func TestAuthorizationHeader(t *testing.T) { + defer func() { + testingNonce = "" + testingTimestamp = "" + }() + for _, ot := range oauthTests { + c := Client{Credentials: ot.clientCredentials} + testingNonce = ot.nonce + testingTimestamp = ot.timestamp + header := c.AuthorizationHeader(&ot.credentials, ot.method, ot.url, ot.appParams) + if header != ot.header { + t.Errorf("authorization header for %s %s\ngot: %s\nwant: %s", ot.method, ot.url, header, ot.header) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/AUTHORS new file mode 100644 index 00000000..90380641 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/AUTHORS @@ -0,0 +1,28 @@ +# This is the official list of Go-MySQL-Driver authors for copyright purposes. + +# If you are submitting a patch, please add your name or the name of the +# organization which holds the copyright to this list in alphabetical order. + +# Names should be added to this file as +# Name +# The email address is not required for organizations. +# Please keep the list sorted. + + +# Individual Persons + +Arne Hormann +Carlos Nieto +Hanno Braun +James Harr +Julien Schmidt +Leonardo YongUk Kim +Lucas Liu +Luke Scott +Michael Woolnough +Nicola Peduzzi +Xiaobing Jiang + +# Organizations + +Google Inc. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/CHANGELOG.md new file mode 100644 index 00000000..617e92e7 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/CHANGELOG.md @@ -0,0 +1,40 @@ +## Version 1.1 (2013-11-02) + +Changes: + + - Go-MySQL-Driver now requires Go 1.1 + - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore + - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors + - `byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")` + - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'. + - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries + - Optimized the buffer for reading + - stmt.Query now caches column metadata + - New Logo + - Changed the copyright header to include all contributors + - Improved the LOAD INFILE documentation + - The driver struct is now exported to make the driver directly accessible + - Refactored the driver tests + - Added more benchmarks and moved all to a separate file + - Other small refactoring + +New Features: + + - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure + - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs + - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used + +Bugfixes: + + - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification + - Convert to DB timezone when inserting `time.Time` + - Splitted packets (more than 16MB) are now merged correctly + - Fixed false positive `io.EOF` errors when the data was fully read + - Avoid panics on reuse of closed connections + - Fixed empty string producing false nil values + - Fixed sign byte for positive TIME fields + + +## Version 1.0 (2013-05-14) + +Initial Release diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/CONTRIBUTING.md new file mode 100644 index 00000000..f87c1982 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/CONTRIBUTING.md @@ -0,0 +1,40 @@ +# Contributing Guidelines + +## Reporting Issues + +Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed). + +Please provide the following minimum information: +* Your Go-MySQL-Driver version (or git SHA) +* Your Go version (run `go version` in your console) +* A detailed issue description +* Error Log if present +* If possible, a short example + + +## Contributing Code + +By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file. +Don't forget to add yourself to the AUTHORS file. + +### Pull Requests Checklist + +Please check the following points before submitting your pull request: +- [x] Code compiles correctly +- [x] Created tests, if possible +- [x] All tests pass +- [x] Extended the README / documentation, if necessary +- [x] Added yourself to the AUTHORS file + +### Code Review + +Everyone is invited to review and comment on pull requests. +If it looks fine to you, comment with "LGTM" (Looks good to me). + +If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes. + +Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM". + +## Development Ideas + +If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/LICENSE new file mode 100644 index 00000000..14e2f777 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/README.md new file mode 100644 index 00000000..3edd5f59 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/README.md @@ -0,0 +1,312 @@ +# Go-MySQL-Driver + +A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package + +![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin") + +**Version 1.1** (November 02, 2013) + +--------------------------------------- + * [Features](#features) + * [Requirements](#requirements) + * [Installation](#installation) + * [Usage](#usage) + * [DSN (Data Source Name)](#dsn-data-source-name) + * [Password](#password) + * [Protocol](#protocol) + * [Address](#address) + * [Parameters](#parameters) + * [Examples](#examples) + * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support) + * [time.Time support](#timetime-support) + * [Unicode support](#unicode-support) + * [Testing / Development](#testing--development) + * [License](#license) + +--------------------------------------- + +## Features + * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance") + * Native Go implementation. No C-bindings, just pure Go + * Connections over TCP/IPv4, TCP/IPv6 or Unix domain sockets + * Automatic handling of broken connections + * Automatic Connection Pooling *(by database/sql package)* + * Supports queries larger than 16MB + * Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support. + * Intelligent `LONG DATA` handling in prepared statements + * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support + * Optional `time.Time` parsing + +## Requirements + * Go 1.1 or higher (use [v1.0](https://github.com/go-sql-driver/mysql/tags) for Go 1.0.x) + * MySQL (Version 4.1 or higher), MariaDB or Percona Server + +--------------------------------------- + +## Installation +Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell: +```bash +$ go get github.com/go-sql-driver/mysql +``` +Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`. + +## Usage +_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then. + +Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`: +```go +import "database/sql" +import _ "github.com/go-sql-driver/mysql" + +db, err := sql.Open("mysql", "user:password@/dbname") +``` + +[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples"). + + +### DSN (Data Source Name) + +The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets): +``` +[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] +``` + +A DSN in its fullest form: +``` +username:password@protocol(address)/dbname?param=value +``` + +Except for the databasename, all values are optional. So the minimal DSN is: +``` +/dbname +``` + +If you do not want to preselect a database, leave `dbname` empty: +``` +/ +``` +This has the same effect as an empty DSN string: +``` + +``` + +#### Password +Passwords can consist of any character. Escaping is **not** necessary. + +#### Protocol +See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available. +In general you should use an Unix domain socket if available and TCP otherwise for best performance. + +#### Address +For TCP and UDP networks, addresses have the form `host:port`. +If `host` is a literal IPv6 address, it must be enclosed in square brackets. +The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form. + +For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`. + +#### Parameters +*Parameters are case-sensitive!* + +##### `allowAllFiles` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files. +[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html) + +##### `allowOldPasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` +`allowAllFiles=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords). + +##### `charset` + +``` +Type: string +Valid Values: +Default: none +``` + +Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`). + + +##### `clientFoundRows` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed. + + +##### `loc` + +``` +Type: string +Valid Values: +Default: UTC +``` + +Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details. + +Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`. + + +##### `parseTime` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string` + + +##### `strict` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`strict=true` enables strict mode. MySQL warnings are treated as errors. + + +##### `timeout` + +``` +Type: decimal number +Default: OS default +``` + +*Driver* side connection timeout. The value must be a string of decimal numbers, each with optional fraction and a unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout). + + +##### `tls` + +``` +Type: bool / string +Valid Values: true, false, skip-verify, +Default: false +``` + +`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). + + +##### System Variables + +All other parameters are interpreted as system variables: + * `autocommit`: `"SET autocommit="` + * `time_zone`: `"SET time_zone="` + * [`tx_isolation`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `"SET tx_isolation="` + * `param`: `"SET ="` + +*The values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!* + +#### Examples +``` +user@unix(/path/to/socket)/dbname +``` + +``` +root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local +``` + +``` +user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true +``` + +TCP via IPv6: +``` +user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s +``` + +TCP on a remote host, e.g. Amazon RDS: +``` +id:password@tcp(your-amazonaws-uri.com:3306)/dbname +``` + +TCP using default port (3306) on localhost: +``` +user:password@tcp/dbname&charset=utf8mb4,utf8&sys_var=esc%40ped +``` + +Use the default protocol (tcp) and host (localhost:3306): +``` +user:password@/dbname +``` + +No Database preselected: +``` +user:password@/ +``` + +### `LOAD DATA LOCAL INFILE` support +For this feature you need direct access to the package. Therefore you must change the import path (no `_`): +```go +import "github.com/go-sql-driver/mysql" +``` + +Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)). + +To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. + +See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details. + + +### `time.Time` support +The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm. + +However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter. + +**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes). + +Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`. + + +### Unicode support +Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default. Adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN is not necessary anymore in most cases. + +See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support. + + +## Testing / Development +To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details. + +Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated. +If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls). + +See the [Contributing Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CHANGELOG.md) for details. + +--------------------------------------- + +## License +Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) + +Mozilla summarizes the license scope as follows: +> MPL: The copyleft applies to any files containing MPLed code. + + +That means: + * You can **use** the **unchanged** source code both in private as also commercial + * You **needn't publish** the source code of your library as long the files licensed under the MPL 2.0 are **unchanged** + * You **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0) + +Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license. + +You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) + +![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow") + diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/benchmark_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/benchmark_test.go new file mode 100644 index 00000000..d72a4183 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/benchmark_test.go @@ -0,0 +1,208 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "database/sql" + "strings" + "sync" + "sync/atomic" + "testing" +) + +type TB testing.B + +func (tb *TB) check(err error) { + if err != nil { + tb.Fatal(err) + } +} + +func (tb *TB) checkDB(db *sql.DB, err error) *sql.DB { + tb.check(err) + return db +} + +func (tb *TB) checkRows(rows *sql.Rows, err error) *sql.Rows { + tb.check(err) + return rows +} + +func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt { + tb.check(err) + return stmt +} + +func initDB(b *testing.B, queries ...string) *sql.DB { + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + for _, query := range queries { + if _, err := db.Exec(query); err != nil { + b.Fatalf("Error on %q: %v", query, err) + } + } + return db +} + +const concurrencyLevel = 10 + +func BenchmarkQuery(b *testing.B) { + tb := (*TB)(b) + b.StopTimer() + b.ReportAllocs() + db := initDB(b, + "DROP TABLE IF EXISTS foo", + "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))", + `INSERT INTO foo VALUES (1, "one")`, + `INSERT INTO foo VALUES (2, "two")`, + ) + db.SetMaxIdleConns(concurrencyLevel) + defer db.Close() + + stmt := tb.checkStmt(db.Prepare("SELECT val FROM foo WHERE id=?")) + defer stmt.Close() + + remain := int64(b.N) + var wg sync.WaitGroup + wg.Add(concurrencyLevel) + defer wg.Wait() + b.StartTimer() + + for i := 0; i < concurrencyLevel; i++ { + go func() { + for { + if atomic.AddInt64(&remain, -1) < 0 { + wg.Done() + return + } + + var got string + tb.check(stmt.QueryRow(1).Scan(&got)) + if got != "one" { + b.Errorf("query = %q; want one", got) + wg.Done() + return + } + } + }() + } +} + +func BenchmarkExec(b *testing.B) { + tb := (*TB)(b) + b.StopTimer() + b.ReportAllocs() + db := tb.checkDB(sql.Open("mysql", dsn)) + db.SetMaxIdleConns(concurrencyLevel) + defer db.Close() + + stmt := tb.checkStmt(db.Prepare("DO 1")) + defer stmt.Close() + + remain := int64(b.N) + var wg sync.WaitGroup + wg.Add(concurrencyLevel) + defer wg.Wait() + b.StartTimer() + + for i := 0; i < concurrencyLevel; i++ { + go func() { + for { + if atomic.AddInt64(&remain, -1) < 0 { + wg.Done() + return + } + + if _, err := stmt.Exec(); err != nil { + b.Fatal(err.Error()) + } + } + }() + } +} + +// data, but no db writes +var roundtripSample []byte + +func initRoundtripBenchmarks() ([]byte, int, int) { + if roundtripSample == nil { + roundtripSample = []byte(strings.Repeat("0123456789abcdef", 1024*1024)) + } + return roundtripSample, 16, len(roundtripSample) +} + +func BenchmarkRoundtripTxt(b *testing.B) { + b.StopTimer() + sample, min, max := initRoundtripBenchmarks() + sampleString := string(sample) + b.ReportAllocs() + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + defer db.Close() + b.StartTimer() + var result string + for i := 0; i < b.N; i++ { + length := min + i + if length > max { + length = max + } + test := sampleString[0:length] + rows := tb.checkRows(db.Query(`SELECT "` + test + `"`)) + if !rows.Next() { + rows.Close() + b.Fatalf("crashed") + } + err := rows.Scan(&result) + if err != nil { + rows.Close() + b.Fatalf("crashed") + } + if result != test { + rows.Close() + b.Errorf("mismatch") + } + rows.Close() + } +} + +func BenchmarkRoundtripBin(b *testing.B) { + b.StopTimer() + sample, min, max := initRoundtripBenchmarks() + b.ReportAllocs() + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + defer db.Close() + stmt := tb.checkStmt(db.Prepare("SELECT ?")) + defer stmt.Close() + b.StartTimer() + var result sql.RawBytes + for i := 0; i < b.N; i++ { + length := min + i + if length > max { + length = max + } + test := sample[0:length] + rows := tb.checkRows(stmt.Query(test)) + if !rows.Next() { + rows.Close() + b.Fatalf("crashed") + } + err := rows.Scan(&result) + if err != nil { + rows.Close() + b.Fatalf("crashed") + } + if !bytes.Equal(result, test) { + rows.Close() + b.Errorf("mismatch") + } + rows.Close() + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/buffer.go new file mode 100644 index 00000000..f7424f75 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/buffer.go @@ -0,0 +1,126 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import "io" + +const defaultBufSize = 4096 + +// A buffer which is used for both reading and writing. +// This is possible since communication on each connection is synchronous. +// In other words, we can't write and read simultaneously on the same connection. +// The buffer is similar to bufio.Reader / Writer but zero-copy-ish +// Also highly optimized for this particular use case. +type buffer struct { + buf []byte + rd io.Reader + idx int + length int +} + +func newBuffer(rd io.Reader) *buffer { + var b [defaultBufSize]byte + return &buffer{ + buf: b[:], + rd: rd, + } +} + +// fill reads into the buffer until at least _need_ bytes are in it +func (b *buffer) fill(need int) error { + // move existing data to the beginning + if b.length > 0 && b.idx > 0 { + copy(b.buf[0:b.length], b.buf[b.idx:]) + } + + // grow buffer if necessary + // TODO: let the buffer shrink again at some point + // Maybe keep the org buf slice and swap back? + if need > len(b.buf) { + // Round up to the next multiple of the default size + newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize) + copy(newBuf, b.buf) + b.buf = newBuf + } + + b.idx = 0 + + for { + n, err := b.rd.Read(b.buf[b.length:]) + b.length += n + + if err == nil { + if b.length < need { + continue + } + return nil + } + if b.length >= need && err == io.EOF { + return nil + } + return err + } +} + +// returns next N bytes from buffer. +// The returned slice is only guaranteed to be valid until the next read +func (b *buffer) readNext(need int) ([]byte, error) { + if b.length < need { + // refill + if err := b.fill(need); err != nil { + return nil, err + } + } + + offset := b.idx + b.idx += need + b.length -= need + return b.buf[offset:b.idx], nil +} + +// returns a buffer with the requested size. +// If possible, a slice from the existing buffer is returned. +// Otherwise a bigger buffer is made. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeBuffer(length int) []byte { + if b.length > 0 { + return nil + } + + // test (cheap) general case first + if length <= defaultBufSize || length <= cap(b.buf) { + return b.buf[:length] + } + + if length < maxPacketSize { + b.buf = make([]byte, length) + return b.buf + } + return make([]byte, length) +} + +// shortcut which can be used if the requested buffer is guaranteed to be +// smaller than defaultBufSize +// Only one buffer (total) can be used at a time. +func (b *buffer) takeSmallBuffer(length int) []byte { + if b.length == 0 { + return b.buf[:length] + } + return nil +} + +// takeCompleteBuffer returns the complete existing buffer. +// This can be used if the necessary buffer size is unknown. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeCompleteBuffer() []byte { + if b.length == 0 { + return b.buf + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/connection.go new file mode 100644 index 00000000..5544139f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/connection.go @@ -0,0 +1,261 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/tls" + "database/sql/driver" + "errors" + "net" + "strings" + "time" +) + +type mysqlConn struct { + buf *buffer + netConn net.Conn + affectedRows uint64 + insertId uint64 + cfg *config + maxPacketAllowed int + maxWriteSize int + flags clientFlag + sequence uint8 + parseTime bool + strict bool +} + +type config struct { + user string + passwd string + net string + addr string + dbname string + params map[string]string + loc *time.Location + timeout time.Duration + tls *tls.Config + allowAllFiles bool + allowOldPasswords bool + clientFoundRows bool +} + +// Handles parameters set in DSN +func (mc *mysqlConn) handleParams() (err error) { + for param, val := range mc.cfg.params { + switch param { + // Charset + case "charset": + charsets := strings.Split(val, ",") + for i := range charsets { + // ignore errors here - a charset may not exist + err = mc.exec("SET NAMES " + charsets[i]) + if err == nil { + break + } + } + if err != nil { + return + } + + // time.Time parsing + case "parseTime": + var isBool bool + mc.parseTime, isBool = readBool(val) + if !isBool { + return errors.New("Invalid Bool value: " + val) + } + + // Strict mode + case "strict": + var isBool bool + mc.strict, isBool = readBool(val) + if !isBool { + return errors.New("Invalid Bool value: " + val) + } + + // Compression + case "compress": + err = errors.New("Compression not implemented yet") + return + + // System Vars + default: + err = mc.exec("SET " + param + "=" + val + "") + if err != nil { + return + } + } + } + + return +} + +func (mc *mysqlConn) Begin() (driver.Tx, error) { + if mc.netConn == nil { + errLog.Print(errInvalidConn) + return nil, driver.ErrBadConn + } + err := mc.exec("START TRANSACTION") + if err == nil { + return &mysqlTx{mc}, err + } + + return nil, err +} + +func (mc *mysqlConn) Close() (err error) { + // Makes Close idempotent + if mc.netConn != nil { + mc.writeCommandPacket(comQuit) + mc.netConn.Close() + mc.netConn = nil + } + + mc.cfg = nil + mc.buf = nil + + return +} + +func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { + if mc.netConn == nil { + errLog.Print(errInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := mc.writeCommandPacketStr(comStmtPrepare, query) + if err != nil { + return nil, err + } + + stmt := &mysqlStmt{ + mc: mc, + } + + // Read Result + columnCount, err := stmt.readPrepareResultPacket() + if err == nil { + if stmt.paramCount > 0 { + if err = mc.readUntilEOF(); err != nil { + return nil, err + } + } + + if columnCount > 0 { + err = mc.readUntilEOF() + } + } + + return stmt, err +} + +func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) { + if mc.netConn == nil { + errLog.Print(errInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) == 0 { // no args, fastpath + mc.affectedRows = 0 + mc.insertId = 0 + + err := mc.exec(query) + if err == nil { + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, err + } + return nil, err + } + + // with args, must use prepared stmt + return nil, driver.ErrSkip + +} + +// Internal function to execute commands +func (mc *mysqlConn) exec(query string) error { + // Send command + err := mc.writeCommandPacketStr(comQuery, query) + if err != nil { + return err + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil && resLen > 0 { + if err = mc.readUntilEOF(); err != nil { + return err + } + + err = mc.readUntilEOF() + } + + return err +} + +func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) { + if mc.netConn == nil { + errLog.Print(errInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) == 0 { // no args, fastpath + // Send command + err := mc.writeCommandPacketStr(comQuery, query) + if err == nil { + // Read Result + var resLen int + resLen, err = mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + + if resLen > 0 { + // Columns + rows.columns, err = mc.readColumns(resLen) + } + return rows, err + } + } + return nil, err + } + + // with args, must use prepared stmt + return nil, driver.ErrSkip +} + +// Gets the value of the given MySQL System Variable +// The returned byte slice is only valid until the next read +func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) { + // Send command + if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil { + return nil, err + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + + if resLen > 0 { + // Columns + if err := mc.readUntilEOF(); err != nil { + return nil, err + } + } + + dest := make([]driver.Value, resLen) + if err = rows.readRow(dest); err == nil { + return dest[0].([]byte), mc.readUntilEOF() + } + } + return nil, err +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/const.go new file mode 100644 index 00000000..eb014d57 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/const.go @@ -0,0 +1,142 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const ( + minProtocolVersion byte = 10 + maxPacketSize = 1<<24 - 1 + timeFormat = "2006-01-02 15:04:05" +) + +// MySQL constants documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +const ( + iOK byte = 0x00 + iLocalInFile byte = 0xfb + iEOF byte = 0xfe + iERR byte = 0xff +) + +type clientFlag uint32 + +const ( + clientLongPassword clientFlag = 1 << iota + clientFoundRows + clientLongFlag + clientConnectWithDB + clientNoSchema + clientCompress + clientODBC + clientLocalFiles + clientIgnoreSpace + clientProtocol41 + clientInteractive + clientSSL + clientIgnoreSIGPIPE + clientTransactions + clientReserved + clientSecureConn + clientMultiStatements + clientMultiResults +) + +const ( + comQuit byte = iota + 1 + comInitDB + comQuery + comFieldList + comCreateDB + comDropDB + comRefresh + comShutdown + comStatistics + comProcessInfo + comConnect + comProcessKill + comDebug + comPing + comTime + comDelayedInsert + comChangeUser + comBinlogDump + comTableDump + comConnectOut + comRegiserSlave + comStmtPrepare + comStmtExecute + comStmtSendLongData + comStmtClose + comStmtReset + comSetOption + comStmtFetch +) + +const ( + fieldTypeDecimal byte = iota + fieldTypeTiny + fieldTypeShort + fieldTypeLong + fieldTypeFloat + fieldTypeDouble + fieldTypeNULL + fieldTypeTimestamp + fieldTypeLongLong + fieldTypeInt24 + fieldTypeDate + fieldTypeTime + fieldTypeDateTime + fieldTypeYear + fieldTypeNewDate + fieldTypeVarChar + fieldTypeBit +) +const ( + fieldTypeNewDecimal byte = iota + 0xf6 + fieldTypeEnum + fieldTypeSet + fieldTypeTinyBLOB + fieldTypeMediumBLOB + fieldTypeLongBLOB + fieldTypeBLOB + fieldTypeVarString + fieldTypeString + fieldTypeGeometry +) + +type fieldFlag uint16 + +const ( + flagNotNULL fieldFlag = 1 << iota + flagPriKey + flagUniqueKey + flagMultipleKey + flagBLOB + flagUnsigned + flagZeroFill + flagBinary + flagEnum + flagAutoIncrement + flagTimestamp + flagSet + flagUnknown1 + flagUnknown2 + flagUnknown3 + flagUnknown4 +) + +const ( + collation_ascii_general_ci byte = 11 + collation_utf8_general_ci byte = 33 + collation_utf8mb4_general_ci byte = 45 + collation_utf8mb4_bin byte = 46 + collation_latin1_general_ci byte = 48 + collation_binary byte = 63 + collation_utf8mb4_unicode_ci byte = 224 +) diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/driver.go new file mode 100644 index 00000000..53d2b68a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/driver.go @@ -0,0 +1,109 @@ +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// The driver should be used via the database/sql package: +// +// import "database/sql" +// import _ "camlistore.org/third_party/github.com/go-sql-driver/mysql" +// +// db, err := sql.Open("mysql", "user:password@/dbname") +// +// See https://github.com/go-sql-driver/mysql#usage for details +package mysql + +import ( + "database/sql" + "database/sql/driver" + "net" +) + +// This struct is exported to make the driver directly accessible. +// In general the driver is used via the database/sql package. +type MySQLDriver struct{} + +// Open new Connection. +// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how +// the DSN string is formated +func (d *MySQLDriver) Open(dsn string) (driver.Conn, error) { + var err error + + // New mysqlConn + mc := &mysqlConn{ + maxPacketAllowed: maxPacketSize, + maxWriteSize: maxPacketSize - 1, + } + mc.cfg, err = parseDSN(dsn) + if err != nil { + return nil, err + } + + // Connect to Server + nd := net.Dialer{Timeout: mc.cfg.timeout} + mc.netConn, err = nd.Dial(mc.cfg.net, mc.cfg.addr) + if err != nil { + return nil, err + } + mc.buf = newBuffer(mc.netConn) + + // Reading Handshake Initialization Packet + cipher, err := mc.readInitPacket() + if err != nil { + mc.Close() + return nil, err + } + + // Send Client Authentication Packet + if err = mc.writeAuthPacket(cipher); err != nil { + mc.Close() + return nil, err + } + + // Read Result Packet + err = mc.readResultOK() + if err != nil { + // Retry with old authentication method, if allowed + if mc.cfg.allowOldPasswords && err == errOldPassword { + if err = mc.writeOldAuthPacket(cipher); err != nil { + mc.Close() + return nil, err + } + if err = mc.readResultOK(); err != nil { + mc.Close() + return nil, err + } + } else { + mc.Close() + return nil, err + } + + } + + // Get max allowed packet size + maxap, err := mc.getSystemVar("max_allowed_packet") + if err != nil { + mc.Close() + return nil, err + } + mc.maxPacketAllowed = stringToInt(maxap) - 1 + if mc.maxPacketAllowed < maxPacketSize { + mc.maxWriteSize = mc.maxPacketAllowed + } + + // Handle DSN Params + err = mc.handleParams() + if err != nil { + mc.Close() + return nil, err + } + + return mc, nil +} + +func init() { + sql.Register("mysql", &MySQLDriver{}) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/driver_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/driver_test.go new file mode 100644 index 00000000..854eec2d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/driver_test.go @@ -0,0 +1,1259 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/tls" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strings" + "testing" + "time" +) + +var ( + dsn string + netAddr string + available bool +) + +var ( + tDate = time.Date(2012, 6, 14, 0, 0, 0, 0, time.UTC) + sDate = "2012-06-14" + tDateTime = time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC) + sDateTime = "2011-11-20 21:27:37" + tDate0 = time.Time{} + sDate0 = "0000-00-00" + sDateTime0 = "0000-00-00 00:00:00" +) + +// See https://github.com/go-sql-driver/mysql/wiki/Testing +func init() { + env := func(key, defaultValue string) string { + if value := os.Getenv(key); value != "" { + return value + } + return defaultValue + } + user := env("MYSQL_TEST_USER", "root") + pass := env("MYSQL_TEST_PASS", "") + prot := env("MYSQL_TEST_PROT", "tcp") + addr := env("MYSQL_TEST_ADDR", "localhost:3306") + dbname := env("MYSQL_TEST_DBNAME", "gotest") + netAddr = fmt.Sprintf("%s(%s)", prot, addr) + dsn = fmt.Sprintf("%s:%s@%s/%s?timeout=30s&strict=true", user, pass, netAddr, dbname) + c, err := net.Dial(prot, addr) + if err == nil { + available = true + c.Close() + } +} + +type DBTest struct { + *testing.T + db *sql.DB +} + +func runTests(t *testing.T, dsn string, tests ...func(dbt *DBTest)) { + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + db, err := sql.Open("mysql", dsn) + if err != nil { + t.Fatalf("Error connecting: %s", err.Error()) + } + defer db.Close() + + db.Exec("DROP TABLE IF EXISTS test") + + dbt := &DBTest{t, db} + for _, test := range tests { + test(dbt) + dbt.db.Exec("DROP TABLE IF EXISTS test") + } +} + +func (dbt *DBTest) fail(method, query string, err error) { + if len(query) > 300 { + query = "[query too large to print]" + } + dbt.Fatalf("Error on %s %s: %s", method, query, err.Error()) +} + +func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result) { + res, err := dbt.db.Exec(query, args...) + if err != nil { + dbt.fail("Exec", query, err) + } + return res +} + +func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows) { + rows, err := dbt.db.Query(query, args...) + if err != nil { + dbt.fail("Query", query, err) + } + return rows +} + +func TestCRUD(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // Create Table + dbt.mustExec("CREATE TABLE test (value BOOL)") + + // Test for unexpected data + var out bool + rows := dbt.mustQuery("SELECT * FROM test") + if rows.Next() { + dbt.Error("unexpected data in empty table") + } + + // Create Data + res := dbt.mustExec("INSERT INTO test VALUES (1)") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("Expected 1 affected row, got %d", count) + } + + id, err := res.LastInsertId() + if err != nil { + dbt.Fatalf("res.LastInsertId() returned error: %s", err.Error()) + } + if id != 0 { + dbt.Fatalf("Expected InsertID 0, got %d", id) + } + + // Read + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if true != out { + dbt.Errorf("true != %t", out) + } + + if rows.Next() { + dbt.Error("unexpected data") + } + } else { + dbt.Error("no data") + } + + // Update + res = dbt.mustExec("UPDATE test SET value = ? WHERE value = ?", false, true) + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("Expected 1 affected row, got %d", count) + } + + // Check Update + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if false != out { + dbt.Errorf("false != %t", out) + } + + if rows.Next() { + dbt.Error("unexpected data") + } + } else { + dbt.Error("no data") + } + + // Delete + res = dbt.mustExec("DELETE FROM test WHERE value = ?", false) + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("Expected 1 affected row, got %d", count) + } + + // Check for unexpected rows + res = dbt.mustExec("DELETE FROM test") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 0 { + dbt.Fatalf("Expected 0 affected row, got %d", count) + } + }) +} + +func TestInt(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [5]string{"TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT"} + in := int64(42) + var out int64 + var rows *sql.Rows + + // SIGNED + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ")") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %d != %d", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + + // UNSIGNED ZEROFILL + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + " ZEROFILL)") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s ZEROFILL: %d != %d", v, in, out) + } + } else { + dbt.Errorf("%s ZEROFILL: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestFloat(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [2]string{"FLOAT", "DOUBLE"} + in := float32(42.23) + var out float32 + var rows *sql.Rows + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ")") + dbt.mustExec("INSERT INTO test VALUES (?)", in) + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %g != %g", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestString(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [6]string{"CHAR(255)", "VARCHAR(255)", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT"} + in := "κόσμε üöäßñóùéàâÿœ'îë Árvíztűrő いろはにほへとちりぬるを イロハニホヘト דג סקרן чащах น่าฟังเอย" + var out string + var rows *sql.Rows + + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ") CHARACTER SET utf8") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %s != %s", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + + // BLOB + dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8") + + id := 2 + in = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " + + "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " + + "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " + + "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. " + + "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " + + "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " + + "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " + + "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet." + dbt.mustExec("INSERT INTO test VALUES (?, ?)", id, in) + + err := dbt.db.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&out) + if err != nil { + dbt.Fatalf("Error on BLOB-Query: %s", err.Error()) + } else if out != in { + dbt.Errorf("BLOB: %s != %s", in, out) + } + }) +} + +func TestDateTime(t *testing.T) { + type testmode struct { + selectSuffix string + args []interface{} + } + type timetest struct { + in interface{} + sOut string + tOut time.Time + tIsZero bool + } + type tester func(dbt *DBTest, rows *sql.Rows, + test *timetest, sqltype, resulttype, mode string) + type setup struct { + vartype string + dsnSuffix string + test tester + } + var ( + modes = map[string]*testmode{ + "text": &testmode{}, + "binary": &testmode{" WHERE 1 = ?", []interface{}{1}}, + } + timetests = map[string][]*timetest{ + "DATE": { + {sDate, sDate, tDate, false}, + {sDate0, sDate0, tDate0, true}, + {tDate, sDate, tDate, false}, + {tDate0, sDate0, tDate0, true}, + }, + "DATETIME": { + {sDateTime, sDateTime, tDateTime, false}, + {sDateTime0, sDateTime0, tDate0, true}, + {tDateTime, sDateTime, tDateTime, false}, + {tDate0, sDateTime0, tDate0, true}, + }, + } + setups = []*setup{ + {"string", "&parseTime=false", func( + dbt *DBTest, rows *sql.Rows, test *timetest, sqltype, resulttype, mode string) { + var sOut string + if err := rows.Scan(&sOut); err != nil { + dbt.Errorf("%s (%s %s): %s", sqltype, resulttype, mode, err.Error()) + } else if test.sOut != sOut { + dbt.Errorf("%s (%s %s): %s != %s", sqltype, resulttype, mode, test.sOut, sOut) + } + }}, + {"time.Time", "&parseTime=true", func( + dbt *DBTest, rows *sql.Rows, test *timetest, sqltype, resulttype, mode string) { + var tOut time.Time + if err := rows.Scan(&tOut); err != nil { + dbt.Errorf("%s (%s %s): %s", sqltype, resulttype, mode, err.Error()) + } else if test.tOut != tOut || test.tIsZero != tOut.IsZero() { + dbt.Errorf("%s (%s %s): %s [%t] != %s [%t]", sqltype, resulttype, mode, test.tOut, test.tIsZero, tOut, tOut.IsZero()) + } + }}, + } + ) + + var s *setup + testTime := func(dbt *DBTest) { + var rows *sql.Rows + for sqltype, tests := range timetests { + dbt.mustExec("CREATE TABLE test (value " + sqltype + ")") + for _, test := range tests { + for mode, q := range modes { + dbt.mustExec("TRUNCATE test") + dbt.mustExec("INSERT INTO test VALUES (?)", test.in) + rows = dbt.mustQuery("SELECT value FROM test"+q.selectSuffix, q.args...) + if rows.Next() { + s.test(dbt, rows, test, sqltype, s.vartype, mode) + } else { + if err := rows.Err(); err != nil { + dbt.Errorf("%s (%s %s): %s", + sqltype, s.vartype, mode, err.Error()) + } else { + dbt.Errorf("%s (%s %s): no data", + sqltype, s.vartype, mode) + } + } + } + } + dbt.mustExec("DROP TABLE IF EXISTS test") + } + } + + timeDsn := dsn + "&sql_mode=ALLOW_INVALID_DATES" + for _, v := range setups { + s = v + runTests(t, timeDsn+s.dsnSuffix, testTime) + } +} + +func TestNULL(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + nullStmt, err := dbt.db.Prepare("SELECT NULL") + if err != nil { + dbt.Fatal(err) + } + defer nullStmt.Close() + + nonNullStmt, err := dbt.db.Prepare("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + defer nonNullStmt.Close() + + // NullBool + var nb sql.NullBool + // Invalid + if err = nullStmt.QueryRow().Scan(&nb); err != nil { + dbt.Fatal(err) + } + if nb.Valid { + dbt.Error("Valid NullBool which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&nb); err != nil { + dbt.Fatal(err) + } + if !nb.Valid { + dbt.Error("Invalid NullBool which should be valid") + } else if nb.Bool != true { + dbt.Errorf("Unexpected NullBool value: %t (should be true)", nb.Bool) + } + + // NullFloat64 + var nf sql.NullFloat64 + // Invalid + if err = nullStmt.QueryRow().Scan(&nf); err != nil { + dbt.Fatal(err) + } + if nf.Valid { + dbt.Error("Valid NullFloat64 which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&nf); err != nil { + dbt.Fatal(err) + } + if !nf.Valid { + dbt.Error("Invalid NullFloat64 which should be valid") + } else if nf.Float64 != float64(1) { + dbt.Errorf("Unexpected NullFloat64 value: %f (should be 1.0)", nf.Float64) + } + + // NullInt64 + var ni sql.NullInt64 + // Invalid + if err = nullStmt.QueryRow().Scan(&ni); err != nil { + dbt.Fatal(err) + } + if ni.Valid { + dbt.Error("Valid NullInt64 which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&ni); err != nil { + dbt.Fatal(err) + } + if !ni.Valid { + dbt.Error("Invalid NullInt64 which should be valid") + } else if ni.Int64 != int64(1) { + dbt.Errorf("Unexpected NullInt64 value: %d (should be 1)", ni.Int64) + } + + // NullString + var ns sql.NullString + // Invalid + if err = nullStmt.QueryRow().Scan(&ns); err != nil { + dbt.Fatal(err) + } + if ns.Valid { + dbt.Error("Valid NullString which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&ns); err != nil { + dbt.Fatal(err) + } + if !ns.Valid { + dbt.Error("Invalid NullString which should be valid") + } else if ns.String != `1` { + dbt.Error("Unexpected NullString value:" + ns.String + " (should be `1`)") + } + + // nil-bytes + var b []byte + // Read nil + if err = nullStmt.QueryRow().Scan(&b); err != nil { + dbt.Fatal(err) + } + if b != nil { + dbt.Error("Non-nil []byte wich should be nil") + } + // Read non-nil + if err = nonNullStmt.QueryRow().Scan(&b); err != nil { + dbt.Fatal(err) + } + if b == nil { + dbt.Error("Nil []byte wich should be non-nil") + } + // Insert nil + b = nil + success := false + if err = dbt.db.QueryRow("SELECT ? IS NULL", b).Scan(&success); err != nil { + dbt.Fatal(err) + } + if !success { + dbt.Error("Inserting []byte(nil) as NULL failed") + } + // Check input==output with input==nil + b = nil + if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil { + dbt.Fatal(err) + } + if b != nil { + dbt.Error("Non-nil echo from nil input") + } + // Check input==output with input!=nil + b = []byte("") + if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil { + dbt.Fatal(err) + } + if b == nil { + dbt.Error("nil echo from non-nil input") + } + + // Insert NULL + dbt.mustExec("CREATE TABLE test (dummmy1 int, value int, dummy2 int)") + + dbt.mustExec("INSERT INTO test VALUES (?, ?, ?)", 1, nil, 2) + + var out interface{} + rows := dbt.mustQuery("SELECT * FROM test") + if rows.Next() { + rows.Scan(&out) + if out != nil { + dbt.Errorf("%v != nil", out) + } + } else { + dbt.Error("no data") + } + }) +} + +func TestLongData(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + var maxAllowedPacketSize int + err := dbt.db.QueryRow("select @@max_allowed_packet").Scan(&maxAllowedPacketSize) + if err != nil { + dbt.Fatal(err) + } + maxAllowedPacketSize-- + + // don't get too ambitious + if maxAllowedPacketSize > 1<<25 { + maxAllowedPacketSize = 1 << 25 + } + + dbt.mustExec("CREATE TABLE test (value LONGBLOB)") + + in := strings.Repeat(`a`, maxAllowedPacketSize+1) + var out string + var rows *sql.Rows + + // Long text data + const nonDataQueryLen = 28 // length query w/o value + inS := in[:maxAllowedPacketSize-nonDataQueryLen] + dbt.mustExec("INSERT INTO test VALUES('" + inS + "')") + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if inS != out { + dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(inS), len(out)) + } + if rows.Next() { + dbt.Error("LONGBLOB: unexpexted row") + } + } else { + dbt.Fatalf("LONGBLOB: no data") + } + + // Empty table + dbt.mustExec("TRUNCATE TABLE test") + + // Long binary data + dbt.mustExec("INSERT INTO test VALUES(?)", in) + rows = dbt.mustQuery("SELECT value FROM test WHERE 1=?", 1) + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(in), len(out)) + } + if rows.Next() { + dbt.Error("LONGBLOB: unexpexted row") + } + } else { + if err = rows.Err(); err != nil { + dbt.Fatalf("LONGBLOB: no data (err: %s)", err.Error()) + } else { + dbt.Fatal("LONGBLOB: no data (err: )") + } + } + }) +} + +func TestLoadData(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + verifyLoadDataResult := func() { + rows, err := dbt.db.Query("SELECT * FROM test") + if err != nil { + dbt.Fatal(err.Error()) + } + + i := 0 + values := [4]string{ + "a string", + "a string containing a \t", + "a string containing a \n", + "a string containing both \t\n", + } + + var id int + var value string + + for rows.Next() { + i++ + err = rows.Scan(&id, &value) + if err != nil { + dbt.Fatal(err.Error()) + } + if i != id { + dbt.Fatalf("%d != %d", i, id) + } + if values[i-1] != value { + dbt.Fatalf("%s != %s", values[i-1], value) + } + } + err = rows.Err() + if err != nil { + dbt.Fatal(err.Error()) + } + + if i != 4 { + dbt.Fatalf("Rows count mismatch. Got %d, want 4", i) + } + } + file, err := ioutil.TempFile("", "gotest") + defer os.Remove(file.Name()) + if err != nil { + dbt.Fatal(err) + } + file.WriteString("1\ta string\n2\ta string containing a \\t\n3\ta string containing a \\n\n4\ta string containing both \\t\\n\n") + file.Close() + + dbt.db.Exec("DROP TABLE IF EXISTS test") + dbt.mustExec("CREATE TABLE test (id INT NOT NULL PRIMARY KEY, value TEXT NOT NULL) CHARACTER SET utf8") + + // Local File + RegisterLocalFile(file.Name()) + dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE '%q' INTO TABLE test", file.Name())) + verifyLoadDataResult() + // negative test + _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'doesnotexist' INTO TABLE test") + if err == nil { + dbt.Fatal("Load non-existent file didn't fail") + } else if err.Error() != "Local File 'doesnotexist' is not registered. Use the DSN parameter 'allowAllFiles=true' to allow all files" { + dbt.Fatal(err.Error()) + } + + // Empty table + dbt.mustExec("TRUNCATE TABLE test") + + // Reader + RegisterReaderHandler("test", func() io.Reader { + file, err = os.Open(file.Name()) + if err != nil { + dbt.Fatal(err) + } + return file + }) + dbt.mustExec("LOAD DATA LOCAL INFILE 'Reader::test' INTO TABLE test") + verifyLoadDataResult() + // negative test + _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'Reader::doesnotexist' INTO TABLE test") + if err == nil { + dbt.Fatal("Load non-existent Reader didn't fail") + } else if err.Error() != "Reader 'doesnotexist' is not registered" { + dbt.Fatal(err.Error()) + } + }) +} + +func TestFoundRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)") + dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)") + + res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 affected rows, got %d", count) + } + res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 affected rows, got %d", count) + } + }) + runTests(t, dsn+"&clientFoundRows=true", func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)") + dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)") + + res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 matched rows, got %d", count) + } + res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 3 { + dbt.Fatalf("Expected 3 matched rows, got %d", count) + } + }) +} + +func TestStrict(t *testing.T) { + // ALLOW_INVALID_DATES to get rid of stricter modes - we want to test for warnings, not errors + relaxedDsn := dsn + "&sql_mode=ALLOW_INVALID_DATES" + runTests(t, relaxedDsn, func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (a TINYINT NOT NULL, b CHAR(4))") + + var queries = [...]struct { + in string + codes []string + }{ + {"DROP TABLE IF EXISTS no_such_table", []string{"1051"}}, + {"INSERT INTO test VALUES(10,'mysql'),(NULL,'test'),(300,'Open Source')", []string{"1265", "1048", "1264", "1265"}}, + } + var err error + + var checkWarnings = func(err error, mode string, idx int) { + if err == nil { + dbt.Errorf("Expected STRICT error on query [%s] %s", mode, queries[idx].in) + } + + if warnings, ok := err.(MySQLWarnings); ok { + var codes = make([]string, len(warnings)) + for i := range warnings { + codes[i] = warnings[i].Code + } + if len(codes) != len(queries[idx].codes) { + dbt.Errorf("Unexpected STRICT error count on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes) + } + + for i := range warnings { + if codes[i] != queries[idx].codes[i] { + dbt.Errorf("Unexpected STRICT error codes on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes) + return + } + } + + } else { + dbt.Errorf("Unexpected error on query [%s] %s: %s", mode, queries[idx].in, err.Error()) + } + } + + // text protocol + for i := range queries { + _, err = dbt.db.Exec(queries[i].in) + checkWarnings(err, "text", i) + } + + var stmt *sql.Stmt + + // binary protocol + for i := range queries { + stmt, err = dbt.db.Prepare(queries[i].in) + if err != nil { + dbt.Errorf("Error on preparing query %s: %s", queries[i].in, err.Error()) + } + + _, err = stmt.Exec() + checkWarnings(err, "binary", i) + + err = stmt.Close() + if err != nil { + dbt.Errorf("Error on closing stmt for query %s: %s", queries[i].in, err.Error()) + } + } + }) +} + +func TestTLS(t *testing.T) { + tlsTest := func(dbt *DBTest) { + if err := dbt.db.Ping(); err != nil { + if err == errNoTLS { + dbt.Skip("Server does not support TLS") + } else { + dbt.Fatalf("Error on Ping: %s", err.Error()) + } + } + + rows := dbt.mustQuery("SHOW STATUS LIKE 'Ssl_cipher'") + + var variable, value *sql.RawBytes + for rows.Next() { + if err := rows.Scan(&variable, &value); err != nil { + dbt.Fatal(err.Error()) + } + + if value == nil { + dbt.Fatal("No Cipher") + } + } + } + + runTests(t, dsn+"&tls=skip-verify", tlsTest) + + // Verify that registering / using a custom cfg works + RegisterTLSConfig("custom-skip-verify", &tls.Config{ + InsecureSkipVerify: true, + }) + runTests(t, dsn+"&tls=custom-skip-verify", tlsTest) +} + +func TestReuseClosedConnection(t *testing.T) { + // this test does not use sql.database, it uses the driver directly + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + md := &MySQLDriver{} + conn, err := md.Open(dsn) + if err != nil { + t.Fatalf("Error connecting: %s", err.Error()) + } + stmt, err := conn.Prepare("DO 1") + if err != nil { + t.Fatalf("Error preparing statement: %s", err.Error()) + } + _, err = stmt.Exec(nil) + if err != nil { + t.Fatalf("Error executing statement: %s", err.Error()) + } + err = conn.Close() + if err != nil { + t.Fatalf("Error closing connection: %s", err.Error()) + } + + defer func() { + if err := recover(); err != nil { + t.Errorf("Panic after reusing a closed connection: %v", err) + } + }() + _, err = stmt.Exec(nil) + if err != nil && err != driver.ErrBadConn { + t.Errorf("Unexpected error '%s', expected '%s'", + err.Error(), driver.ErrBadConn.Error()) + } +} + +func TestCharset(t *testing.T) { + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + mustSetCharset := func(charsetParam, expected string) { + runTests(t, dsn+"&"+charsetParam, func(dbt *DBTest) { + rows := dbt.mustQuery("SELECT @@character_set_connection") + defer rows.Close() + + if !rows.Next() { + dbt.Fatalf("Error getting connection charset: %s", rows.Err()) + } + + var got string + rows.Scan(&got) + + if got != expected { + dbt.Fatalf("Expected connection charset %s but got %s", expected, got) + } + }) + } + + // non utf8 test + mustSetCharset("charset=ascii", "ascii") + + // when the first charset is invalid, use the second + mustSetCharset("charset=none,utf8", "utf8") + + // when the first charset is valid, use it + mustSetCharset("charset=ascii,utf8", "ascii") + mustSetCharset("charset=utf8,ascii", "utf8") +} + +func TestFailingCharset(t *testing.T) { + runTests(t, dsn+"&charset=none", func(dbt *DBTest) { + // run query to really establish connection... + _, err := dbt.db.Exec("SELECT 1") + if err == nil { + dbt.db.Close() + t.Fatalf("Connection must not succeed without a valid charset") + } + }) +} + +func TestRawBytesResultExceedsBuffer(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // defaultBufSize from buffer.go + expected := strings.Repeat("abc", defaultBufSize) + + rows := dbt.mustQuery("SELECT '" + expected + "'") + defer rows.Close() + if !rows.Next() { + dbt.Error("expected result, got none") + } + var result sql.RawBytes + rows.Scan(&result) + if expected != string(result) { + dbt.Error("result did not match expected value") + } + }) +} + +func TestTimezoneConversion(t *testing.T) { + zones := []string{"UTC", "US/Central", "US/Pacific", "Local"} + + // Regression test for timezone handling + tzTest := func(dbt *DBTest) { + + // Create table + dbt.mustExec("CREATE TABLE test (ts TIMESTAMP)") + + // Insert local time into database (should be converted) + usCentral, _ := time.LoadLocation("US/Central") + now := time.Now().In(usCentral) + dbt.mustExec("INSERT INTO test VALUE (?)", now) + + // Retrieve time from DB + rows := dbt.mustQuery("SELECT ts FROM test") + if !rows.Next() { + dbt.Fatal("Didn't get any rows out") + } + + var nowDB time.Time + err := rows.Scan(&nowDB) + if err != nil { + dbt.Fatal("Err", err) + } + + // Check that dates match + if now.Unix() != nowDB.Unix() { + dbt.Errorf("Times don't match.\n") + dbt.Errorf(" Now(%v)=%v\n", usCentral, now) + dbt.Errorf(" Now(UTC)=%v\n", nowDB) + } + } + + for _, tz := range zones { + runTests(t, dsn+"&parseTime=true&loc="+url.QueryEscape(tz), tzTest) + } +} + +// This tests for https://github.com/go-sql-driver/mysql/pull/139 +// +// An extra (invisible) nil byte was being added to the beginning of positive +// time strings. +func TestTimeSign(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + var sTimes = []struct { + value string + fieldType string + }{ + {"12:34:56", "TIME"}, + {"-12:34:56", "TIME"}, + // As described in http://dev.mysql.com/doc/refman/5.6/en/fractional-seconds.html + // they *should* work, but only in 5.6+. + // { "12:34:56.789", "TIME(3)" }, + // { "-12:34:56.789", "TIME(3)" }, + } + + for _, sTime := range sTimes { + dbt.db.Exec("DROP TABLE IF EXISTS test") + dbt.mustExec("CREATE TABLE test (id INT, time_field " + sTime.fieldType + ")") + dbt.mustExec("INSERT INTO test (id, time_field) VALUES(1, '" + sTime.value + "')") + rows := dbt.mustQuery("SELECT time_field FROM test WHERE id = ?", 1) + if rows.Next() { + var oTime string + rows.Scan(&oTime) + if oTime != sTime.value { + dbt.Errorf(`time values differ: got %q, expected %q.`, oTime, sTime.value) + } + } else { + dbt.Error("expecting at least one row.") + } + } + }) +} + +// Special cases + +func TestRowsClose(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + rows, err := dbt.db.Query("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + + err = rows.Close() + if err != nil { + dbt.Fatal(err) + } + + if rows.Next() { + dbt.Fatal("Unexpected row after rows.Close()") + } + + err = rows.Err() + if err != nil { + dbt.Fatal(err) + } + }) +} + +// dangling statements +// http://code.google.com/p/go/issues/detail?id=3865 +func TestCloseStmtBeforeRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + + rows, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows.Close() + + err = stmt.Close() + if err != nil { + dbt.Fatal(err) + } + + if !rows.Next() { + dbt.Fatal("Getting row failed") + } else { + err = rows.Err() + if err != nil { + dbt.Fatal(err) + } + + var out bool + err = rows.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + }) +} + +// It is valid to have multiple Rows for the same Stmt +// http://code.google.com/p/go/issues/detail?id=3734 +func TestStmtMultiRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare("SELECT 1 UNION SELECT 0") + if err != nil { + dbt.Fatal(err) + } + + rows1, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows1.Close() + + rows2, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows2.Close() + + var out bool + + // 1 + if !rows1.Next() { + dbt.Fatal("1st rows1.Next failed") + } else { + err = rows1.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows1.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + + if !rows2.Next() { + dbt.Fatal("1st rows2.Next failed") + } else { + err = rows2.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows2.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + + // 2 + if !rows1.Next() { + dbt.Fatal("2nd rows1.Next failed") + } else { + err = rows1.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows1.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != false { + dbt.Errorf("false != %t", out) + } + + if rows1.Next() { + dbt.Fatal("Unexpected row on rows1") + } + err = rows1.Close() + if err != nil { + dbt.Fatal(err) + } + } + + if !rows2.Next() { + dbt.Fatal("2nd rows2.Next failed") + } else { + err = rows2.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows2.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != false { + dbt.Errorf("false != %t", out) + } + + if rows2.Next() { + dbt.Fatal("Unexpected row on rows2") + } + err = rows2.Close() + if err != nil { + dbt.Fatal(err) + } + } + }) +} + +func TestConcurrent(t *testing.T) { + if enabled, _ := readBool(os.Getenv("MYSQL_TEST_CONCURRENT")); !enabled { + t.Skip("MYSQL_TEST_CONCURRENT env var not set") + } + + runTests(t, dsn, func(dbt *DBTest) { + var max int + err := dbt.db.QueryRow("SELECT @@max_connections").Scan(&max) + if err != nil { + dbt.Fatalf("%s", err.Error()) + } + dbt.Logf("Testing up to %d concurrent connections \r\n", max) + canStop := false + c := make(chan struct{}, max) + for i := 0; i < max; i++ { + go func(id int) { + tx, err := dbt.db.Begin() + if err != nil { + canStop = true + if err.Error() == "Error 1040: Too many connections" { + max-- + return + } else { + dbt.Fatalf("Error on Con %d: %s", id, err.Error()) + } + } + c <- struct{}{} + for !canStop { + _, err = tx.Exec("SELECT 1") + if err != nil { + canStop = true + dbt.Fatalf("Error on Con %d: %s", id, err.Error()) + } + } + err = tx.Commit() + if err != nil { + canStop = true + dbt.Fatalf("Error on Con %d: %s", id, err.Error()) + } + }(i) + } + for i := 0; i < max; i++ { + <-c + } + canStop = true + + dbt.Logf("Reached %d concurrent connections \r\n", max) + }) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/errors.go new file mode 100644 index 00000000..1f4b8c0a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/errors.go @@ -0,0 +1,105 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "errors" + "fmt" + "io" +) + +var ( + errInvalidConn = errors.New("Invalid Connection") + errMalformPkt = errors.New("Malformed Packet") + errNoTLS = errors.New("TLS encryption requested but server does not support TLS") + errOldPassword = errors.New("This server only supports the insecure old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords") + errOldProtocol = errors.New("MySQL-Server does not support required Protocol 41+") + errPktSync = errors.New("Commands out of sync. You can't run this command now") + errPktSyncMul = errors.New("Commands out of sync. Did you run multiple statements at once?") + errPktTooLarge = errors.New("Packet for query is too large. You can change this value on the server by adjusting the 'max_allowed_packet' variable.") +) + +// error type which represents a single MySQL error +type MySQLError struct { + Number uint16 + Message string +} + +func (me *MySQLError) Error() string { + return fmt.Sprintf("Error %d: %s", me.Number, me.Message) +} + +// error type which represents a group of one or more MySQL warnings +type MySQLWarnings []mysqlWarning + +func (mws MySQLWarnings) Error() string { + var msg string + for i, warning := range mws { + if i > 0 { + msg += "\r\n" + } + msg += fmt.Sprintf("%s %s: %s", warning.Level, warning.Code, warning.Message) + } + return msg +} + +// error type which represents a single MySQL warning +type mysqlWarning struct { + Level string + Code string + Message string +} + +func (mc *mysqlConn) getWarnings() (err error) { + rows, err := mc.Query("SHOW WARNINGS", []driver.Value{}) + if err != nil { + return + } + + var warnings = MySQLWarnings{} + var values = make([]driver.Value, 3) + + var warning mysqlWarning + var raw []byte + var ok bool + + for { + err = rows.Next(values) + switch err { + case nil: + warning = mysqlWarning{} + + if raw, ok = values[0].([]byte); ok { + warning.Level = string(raw) + } else { + warning.Level = fmt.Sprintf("%s", values[0]) + } + if raw, ok = values[1].([]byte); ok { + warning.Code = string(raw) + } else { + warning.Code = fmt.Sprintf("%s", values[1]) + } + if raw, ok = values[2].([]byte); ok { + warning.Message = string(raw) + } else { + warning.Message = fmt.Sprintf("%s", values[0]) + } + + warnings = append(warnings, warning) + + case io.EOF: + return warnings + + default: + rows.Close() + return + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/infile.go new file mode 100644 index 00000000..518946d0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/infile.go @@ -0,0 +1,152 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "fmt" + "io" + "os" + "strings" +) + +var ( + fileRegister map[string]bool + readerRegister map[string]func() io.Reader +) + +func init() { + fileRegister = make(map[string]bool) + readerRegister = make(map[string]func() io.Reader) +} + +// RegisterLocalFile adds the given file to the file whitelist, +// so that it can be used by "LOAD DATA LOCAL INFILE ". +// Alternatively you can allow the use of all local files with +// the DSN parameter 'allowAllFiles=true' +// +// filePath := "/home/gopher/data.csv" +// mysql.RegisterLocalFile(filePath) +// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterLocalFile(filePath string) { + fileRegister[strings.Trim(filePath, `"`)] = true +} + +// DeregisterLocalFile removes the given filepath from the whitelist. +func DeregisterLocalFile(filePath string) { + delete(fileRegister, strings.Trim(filePath, `"`)) +} + +// RegisterReaderHandler registers a handler function which is used +// to receive a io.Reader. +// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::". +// If the handler returns a io.ReadCloser Close() is called when the +// request is finished. +// +// mysql.RegisterReaderHandler("data", func() io.Reader { +// var csvReader io.Reader // Some Reader that returns CSV data +// ... // Open Reader here +// return csvReader +// }) +// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterReaderHandler(name string, handler func() io.Reader) { + readerRegister[name] = handler +} + +// DeregisterReaderHandler removes the ReaderHandler function with +// the given name from the registry. +func DeregisterReaderHandler(name string) { + delete(readerRegister, name) +} + +func (mc *mysqlConn) handleInFileRequest(name string) (err error) { + var rdr io.Reader + data := make([]byte, 4+mc.maxWriteSize) + + if strings.HasPrefix(name, "Reader::") { // io.Reader + name = name[8:] + handler, inMap := readerRegister[name] + if handler != nil { + rdr = handler() + } + if rdr == nil { + if !inMap { + err = fmt.Errorf("Reader '%s' is not registered", name) + } else { + err = fmt.Errorf("Reader '%s' is ", name) + } + } + } else { // File + name = strings.Trim(name, `"`) + if mc.cfg.allowAllFiles || fileRegister[name] { + rdr, err = os.Open(name) + } else { + err = fmt.Errorf("Local File '%s' is not registered. Use the DSN parameter 'allowAllFiles=true' to allow all files", name) + } + } + + if rdc, ok := rdr.(io.ReadCloser); ok { + defer func() { + if err == nil { + err = rdc.Close() + } else { + rdc.Close() + } + }() + } + + // send content packets + var ioErr error + if err == nil { + var n int + for err == nil && ioErr == nil { + n, err = rdr.Read(data[4:]) + if n > 0 { + data[0] = byte(n) + data[1] = byte(n >> 8) + data[2] = byte(n >> 16) + data[3] = mc.sequence + ioErr = mc.writePacket(data[:4+n]) + } + } + if err == io.EOF { + err = nil + } + if ioErr != nil { + errLog.Print(ioErr.Error()) + return driver.ErrBadConn + } + } + + // send empty packet (termination) + ioErr = mc.writePacket([]byte{ + 0x00, + 0x00, + 0x00, + mc.sequence, + }) + if ioErr != nil { + errLog.Print(ioErr.Error()) + return driver.ErrBadConn + } + + // read OK packet + if err == nil { + return mc.readResultOK() + } else { + mc.readPacket() + } + return err +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/packets.go new file mode 100644 index 00000000..8ee32cae --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/packets.go @@ -0,0 +1,1209 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "fmt" + "io" + "math" + "time" +) + +// Packets documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +// Read packet to buffer 'data' +func (mc *mysqlConn) readPacket() ([]byte, error) { + // Read packet header + data, err := mc.buf.readNext(4) + if err != nil { + errLog.Print(err.Error()) + mc.Close() + return nil, driver.ErrBadConn + } + + // Packet Length [24 bit] + pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16) + + if pktLen < 1 { + errLog.Print(errMalformPkt.Error()) + mc.Close() + return nil, driver.ErrBadConn + } + + // Check Packet Sync [8 bit] + if data[3] != mc.sequence { + if data[3] > mc.sequence { + return nil, errPktSyncMul + } else { + return nil, errPktSync + } + } + mc.sequence++ + + // Read packet body [pktLen bytes] + if data, err = mc.buf.readNext(pktLen); err == nil { + if pktLen < maxPacketSize { + return data, nil + } + + // Make a copy since data becomes invalid with the next read + buf := make([]byte, len(data)) + copy(buf, data) + + // More data + data, err = mc.readPacket() + if err == nil { + return append(buf, data...), nil + } + } + + // err case + mc.Close() + errLog.Print(err.Error()) + return nil, driver.ErrBadConn +} + +// Write packet buffer 'data' +// The packet header must be already included +func (mc *mysqlConn) writePacket(data []byte) error { + if len(data)-4 <= mc.maxWriteSize { // Can send data at once + // Write packet + n, err := mc.netConn.Write(data) + if err == nil && n == len(data) { + mc.sequence++ + return nil + } + + // Handle error + if err == nil { // n != len(data) + errLog.Print(errMalformPkt.Error()) + } else { + errLog.Print(err.Error()) + } + return driver.ErrBadConn + } + + // Must split packet + return mc.splitPacket(data) +} + +func (mc *mysqlConn) splitPacket(data []byte) error { + pktLen := len(data) - 4 + + if pktLen > mc.maxPacketAllowed { + return errPktTooLarge + } + + for pktLen >= maxPacketSize { + data[0] = 0xff + data[1] = 0xff + data[2] = 0xff + data[3] = mc.sequence + + // Write packet + n, err := mc.netConn.Write(data[:4+maxPacketSize]) + if err == nil && n == 4+maxPacketSize { + mc.sequence++ + data = data[maxPacketSize:] + pktLen -= maxPacketSize + continue + } + + // Handle error + if err == nil { // n != len(data) + errLog.Print(errMalformPkt.Error()) + } else { + errLog.Print(err.Error()) + } + return driver.ErrBadConn + } + + data[0] = byte(pktLen) + data[1] = byte(pktLen >> 8) + data[2] = byte(pktLen >> 16) + data[3] = mc.sequence + return mc.writePacket(data) +} + +/****************************************************************************** +* Initialisation Process * +******************************************************************************/ + +// Handshake Initialization Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake +func (mc *mysqlConn) readInitPacket() ([]byte, error) { + data, err := mc.readPacket() + if err != nil { + return nil, err + } + + if data[0] == iERR { + return nil, mc.handleErrorPacket(data) + } + + // protocol version [1 byte] + if data[0] < minProtocolVersion { + return nil, fmt.Errorf( + "Unsupported MySQL Protocol Version %d. Protocol Version %d or higher is required", + data[0], + minProtocolVersion, + ) + } + + // server version [null terminated string] + // connection id [4 bytes] + pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4 + + // first part of the password cipher [8 bytes] + cipher := data[pos : pos+8] + + // (filler) always 0x00 [1 byte] + pos += 8 + 1 + + // capability flags (lower 2 bytes) [2 bytes] + mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + if mc.flags&clientProtocol41 == 0 { + return nil, errOldProtocol + } + if mc.flags&clientSSL == 0 && mc.cfg.tls != nil { + return nil, errNoTLS + } + pos += 2 + + if len(data) > pos { + // character set [1 byte] + // status flags [2 bytes] + // capability flags (upper 2 bytes) [2 bytes] + // length of auth-plugin-data [1 byte] + // reserved (all [00]) [10 bytes] + pos += 1 + 2 + 2 + 1 + 10 + + // second part of the password cipher [12? bytes] + // The documentation is ambiguous about the length. + // The official Python library uses the fixed length 12 + // which is not documented but seems to work. + cipher = append(cipher, data[pos:pos+12]...) + + // TODO: Verify string termination + // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2) + // \NUL otherwise + // + //if data[len(data)-1] == 0 { + // return + //} + //return errMalformPkt + } + + return cipher, nil +} + +// Client Authentication Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse +func (mc *mysqlConn) writeAuthPacket(cipher []byte) error { + // Adjust client flags based on server support + clientFlags := clientProtocol41 | + clientSecureConn | + clientLongPassword | + clientTransactions | + clientLocalFiles | + mc.flags&clientLongFlag + + if mc.cfg.clientFoundRows { + clientFlags |= clientFoundRows + } + + // To enable TLS / SSL + if mc.cfg.tls != nil { + clientFlags |= clientSSL + } + + // User Password + scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.passwd)) + + pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.user) + 1 + 1 + len(scrambleBuff) + + // To specify a db name + if n := len(mc.cfg.dbname); n > 0 { + clientFlags |= clientConnectWithDB + pktLen += n + 1 + } + + // Calculate packet length and get buffer with that size + data := mc.buf.takeSmallBuffer(pktLen + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print("Busy buffer") + return driver.ErrBadConn + } + + // ClientFlags [32 bit] + data[4] = byte(clientFlags) + data[5] = byte(clientFlags >> 8) + data[6] = byte(clientFlags >> 16) + data[7] = byte(clientFlags >> 24) + + // MaxPacketSize [32 bit] (none) + data[8] = 0x00 + data[9] = 0x00 + data[10] = 0x00 + data[11] = 0x00 + + // Charset [1 byte] + data[12] = collation_utf8_general_ci + + // SSL Connection Request Packet + // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest + if mc.cfg.tls != nil { + // Packet header [24bit length + 1 byte sequence] + data[0] = byte((4 + 4 + 1 + 23)) + data[1] = byte((4 + 4 + 1 + 23) >> 8) + data[2] = byte((4 + 4 + 1 + 23) >> 16) + data[3] = mc.sequence + + // Send TLS / SSL request packet + if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil { + return err + } + + // Switch to TLS + tlsConn := tls.Client(mc.netConn, mc.cfg.tls) + if err := tlsConn.Handshake(); err != nil { + return err + } + mc.netConn = tlsConn + mc.buf.rd = tlsConn + } + + // Add the packet header [24bit length + 1 byte sequence] + data[0] = byte(pktLen) + data[1] = byte(pktLen >> 8) + data[2] = byte(pktLen >> 16) + data[3] = mc.sequence + + // Filler [23 bytes] (all 0x00) + pos := 13 + 23 + + // User [null terminated string] + if len(mc.cfg.user) > 0 { + pos += copy(data[pos:], mc.cfg.user) + } + data[pos] = 0x00 + pos++ + + // ScrambleBuffer [length encoded integer] + data[pos] = byte(len(scrambleBuff)) + pos += 1 + copy(data[pos+1:], scrambleBuff) + + // Databasename [null terminated string] + if len(mc.cfg.dbname) > 0 { + pos += copy(data[pos:], mc.cfg.dbname) + data[pos] = 0x00 + } + + // Send Auth packet + return mc.writePacket(data) +} + +// Client old authentication packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error { + // User password + scrambleBuff := scrambleOldPassword(cipher, []byte(mc.cfg.passwd)) + + // Calculate the packet lenght and add a tailing 0 + pktLen := len(scrambleBuff) + 1 + data := mc.buf.takeSmallBuffer(pktLen + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print("Busy buffer") + return driver.ErrBadConn + } + + // Add the packet header [24bit length + 1 byte sequence] + data[0] = byte(pktLen) + data[1] = byte(pktLen >> 8) + data[2] = byte(pktLen >> 16) + data[3] = mc.sequence + + // Add the scrambled password [null terminated string] + copy(data[4:], scrambleBuff) + + return mc.writePacket(data) +} + +/****************************************************************************** +* Command Packets * +******************************************************************************/ + +func (mc *mysqlConn) writeCommandPacket(command byte) error { + // Reset Packet Sequence + mc.sequence = 0 + + data := mc.buf.takeSmallBuffer(4 + 1) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print("Busy buffer") + return driver.ErrBadConn + } + + // Add the packet header [24bit length + 1 byte sequence] + data[0] = 0x01 // 1 byte long + data[1] = 0x00 + data[2] = 0x00 + data[3] = 0x00 // new command, sequence id is always 0 + + // Add command byte + data[4] = command + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { + // Reset Packet Sequence + mc.sequence = 0 + + pktLen := 1 + len(arg) + data := mc.buf.takeBuffer(pktLen + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print("Busy buffer") + return driver.ErrBadConn + } + + // Add the packet header [24bit length + 1 byte sequence] + data[0] = byte(pktLen) + data[1] = byte(pktLen >> 8) + data[2] = byte(pktLen >> 16) + data[3] = 0x00 // new command, sequence id is always 0 + + // Add command byte + data[4] = command + + // Add arg + copy(data[5:], arg) + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { + // Reset Packet Sequence + mc.sequence = 0 + + data := mc.buf.takeSmallBuffer(4 + 1 + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print("Busy buffer") + return driver.ErrBadConn + } + + // Add the packet header [24bit length + 1 byte sequence] + data[0] = 0x05 // 5 bytes long + data[1] = 0x00 + data[2] = 0x00 + data[3] = 0x00 // new command, sequence id is always 0 + + // Add command byte + data[4] = command + + // Add arg [32 bit] + data[5] = byte(arg) + data[6] = byte(arg >> 8) + data[7] = byte(arg >> 16) + data[8] = byte(arg >> 24) + + // Send CMD packet + return mc.writePacket(data) +} + +/****************************************************************************** +* Result Packets * +******************************************************************************/ + +// Returns error if Packet is not an 'Result OK'-Packet +func (mc *mysqlConn) readResultOK() error { + data, err := mc.readPacket() + if err == nil { + // packet indicator + switch data[0] { + + case iOK: + return mc.handleOkPacket(data) + + case iEOF: + // someone is using old_passwords + return errOldPassword + + default: // Error otherwise + return mc.handleErrorPacket(data) + } + } + return err +} + +// Result Set Header Packet +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset +func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) { + data, err := mc.readPacket() + if err == nil { + switch data[0] { + + case iOK: + return 0, mc.handleOkPacket(data) + + case iERR: + return 0, mc.handleErrorPacket(data) + + case iLocalInFile: + return 0, mc.handleInFileRequest(string(data[1:])) + } + + // column count + num, _, n := readLengthEncodedInteger(data) + if n-len(data) == 0 { + return int(num), nil + } + + return 0, errMalformPkt + } + return 0, err +} + +// Error Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet +func (mc *mysqlConn) handleErrorPacket(data []byte) error { + if data[0] != iERR { + return errMalformPkt + } + + // 0xff [1 byte] + + // Error Number [16 bit uint] + errno := binary.LittleEndian.Uint16(data[1:3]) + + pos := 3 + + // SQL State [optional: # + 5bytes string] + if data[3] == 0x23 { + //sqlstate := string(data[4 : 4+5]) + pos = 9 + } + + // Error Message [string] + return &MySQLError{ + Number: errno, + Message: string(data[pos:]), + } +} + +// Ok Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet +func (mc *mysqlConn) handleOkPacket(data []byte) error { + var n, m int + + // 0x00 [1 byte] + + // Affected rows [Length Coded Binary] + mc.affectedRows, _, n = readLengthEncodedInteger(data[1:]) + + // Insert id [Length Coded Binary] + mc.insertId, _, m = readLengthEncodedInteger(data[1+n:]) + + // server_status [2 bytes] + + // warning count [2 bytes] + if !mc.strict { + return nil + } else { + pos := 1 + n + m + 2 + if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 { + return mc.getWarnings() + } + return nil + } +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41 +func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) { + columns := make([]mysqlField, count) + + for i := 0; ; i++ { + data, err := mc.readPacket() + if err != nil { + return nil, err + } + + // EOF Packet + if data[0] == iEOF && (len(data) == 5 || len(data) == 1) { + if i == count { + return columns, nil + } + return nil, fmt.Errorf("ColumnsCount mismatch n:%d len:%d", count, len(columns)) + } + + // Catalog + pos, err := skipLengthEnodedString(data) + if err != nil { + return nil, err + } + + // Database [len coded string] + n, err := skipLengthEnodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Table [len coded string] + n, err = skipLengthEnodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Original table [len coded string] + n, err = skipLengthEnodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Name [len coded string] + name, _, n, err := readLengthEnodedString(data[pos:]) + if err != nil { + return nil, err + } + columns[i].name = string(name) + pos += n + + // Original name [len coded string] + n, err = skipLengthEnodedString(data[pos:]) + if err != nil { + return nil, err + } + + // Filler [1 byte] + // Charset [16 bit uint] + // Length [32 bit uint] + pos += n + 1 + 2 + 4 + + // Field type [byte] + columns[i].fieldType = data[pos] + pos++ + + // Flags [16 bit uint] + columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + //pos += 2 + + // Decimals [8 bit uint] + //pos++ + + // Default value [len coded binary] + //if pos < len(data) { + // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:]) + //} + } +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow +func (rows *textRows) readRow(dest []driver.Value) error { + mc := rows.mc + + data, err := mc.readPacket() + if err != nil { + return err + } + + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + return io.EOF + } + + // RowSet Packet + var n int + var isNull bool + pos := 0 + + for i := range dest { + // Read bytes and convert to string + dest[i], isNull, n, err = readLengthEnodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + if !mc.parseTime { + continue + } else { + switch rows.columns[i].fieldType { + case fieldTypeTimestamp, fieldTypeDateTime, + fieldTypeDate, fieldTypeNewDate: + dest[i], err = parseDateTime( + string(dest[i].([]byte)), + mc.cfg.loc, + ) + if err == nil { + continue + } + default: + continue + } + } + + } else { + dest[i] = nil + continue + } + } + return err // err != nil + } + + return nil +} + +// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read +func (mc *mysqlConn) readUntilEOF() error { + for { + data, err := mc.readPacket() + + // No Err and no EOF Packet + if err == nil && data[0] != iEOF { + continue + } + return err // Err or EOF + } +} + +/****************************************************************************** +* Prepared Statements * +******************************************************************************/ + +// Prepare Result Packets +// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html +func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) { + data, err := stmt.mc.readPacket() + if err == nil { + // packet indicator [1 byte] + if data[0] != iOK { + return 0, stmt.mc.handleErrorPacket(data) + } + + // statement id [4 bytes] + stmt.id = binary.LittleEndian.Uint32(data[1:5]) + + // Column count [16 bit uint] + columnCount := binary.LittleEndian.Uint16(data[5:7]) + + // Param count [16 bit uint] + stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9])) + + // Reserved [8 bit] + + // Warning count [16 bit uint] + if !stmt.mc.strict { + return columnCount, nil + } else { + // Check for warnings count > 0, only available in MySQL > 4.1 + if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 { + return columnCount, stmt.mc.getWarnings() + } + return columnCount, nil + } + } + return 0, err +} + +// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html +func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error { + maxLen := stmt.mc.maxPacketAllowed - 1 + pktLen := maxLen + + // After the header (bytes 0-3) follows before the data: + // 1 byte command + // 4 bytes stmtID + // 2 bytes paramID + const dataOffset = 1 + 4 + 2 + + // Can not use the write buffer since + // a) the buffer is too small + // b) it is in use + data := make([]byte, 4+1+4+2+len(arg)) + + copy(data[4+dataOffset:], arg) + + for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset { + if dataOffset+argLen < maxLen { + pktLen = dataOffset + argLen + } + + // Add the packet header [24bit length + 1 byte sequence] + data[0] = byte(pktLen) + data[1] = byte(pktLen >> 8) + data[2] = byte(pktLen >> 16) + data[3] = 0x00 // mc.sequence + + // Add command byte [1 byte] + data[4] = comStmtSendLongData + + // Add stmtID [32 bit] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // Add paramID [16 bit] + data[9] = byte(paramID) + data[10] = byte(paramID >> 8) + + // Send CMD packet + err := stmt.mc.writePacket(data[:4+pktLen]) + if err == nil { + data = data[pktLen-dataOffset:] + continue + } + return err + + } + + // Reset Packet Sequence + stmt.mc.sequence = 0 + return nil +} + +// Execute Prepared Statement +// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html +func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { + if len(args) != stmt.paramCount { + return fmt.Errorf( + "Arguments count mismatch (Got: %d Has: %d)", + len(args), + stmt.paramCount, + ) + } + + mc := stmt.mc + + // Reset packet-sequence + mc.sequence = 0 + + var data []byte + + if len(args) == 0 { + const pktLen = 1 + 4 + 1 + 4 + data = mc.buf.takeBuffer(4 + pktLen) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print("Busy buffer") + return driver.ErrBadConn + } + + // packet header [4 bytes] + data[0] = byte(pktLen) + data[1] = byte(pktLen >> 8) + data[2] = byte(pktLen >> 16) + data[3] = 0x00 // new command, sequence id is always 0 + } else { + data = mc.buf.takeCompleteBuffer() + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print("Busy buffer") + return driver.ErrBadConn + } + + // header (bytes 0-3) is added after we know the packet size + } + + // command [1 byte] + data[4] = comStmtExecute + + // statement_id [4 bytes] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte] + data[9] = 0x00 + + // iteration_count (uint32(1)) [4 bytes] + data[10] = 0x01 + data[11] = 0x00 + data[12] = 0x00 + data[13] = 0x00 + + if len(args) > 0 { + // NULL-bitmap [(len(args)+7)/8 bytes] + nullMask := uint64(0) + + pos := 4 + 1 + 4 + 1 + 4 + ((len(args) + 7) >> 3) + + // newParameterBoundFlag 1 [1 byte] + data[pos] = 0x01 + pos++ + + // type of each parameter [len(args)*2 bytes] + paramTypes := data[pos:] + pos += (len(args) << 1) + + // value of each parameter [n bytes] + paramValues := data[pos:pos] + valuesCap := cap(paramValues) + + for i := range args { + // build NULL-bitmap + if args[i] == nil { + nullMask |= 1 << uint(i) + paramTypes[i+i] = fieldTypeNULL + paramTypes[i+i+1] = 0x00 + continue + } + + // cache types and values + switch v := args[i].(type) { + case int64: + paramTypes[i+i] = fieldTypeLongLong + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + uint64(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(uint64(v))..., + ) + } + + case float64: + paramTypes[i+i] = fieldTypeDouble + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + math.Float64bits(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(math.Float64bits(v))..., + ) + } + + case bool: + paramTypes[i+i] = fieldTypeTiny + paramTypes[i+i+1] = 0x00 + + if v { + paramValues = append(paramValues, 0x01) + } else { + paramValues = append(paramValues, 0x00) + } + + case []byte: + // Common case (non-nil value) first + if v != nil { + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, v); err != nil { + return err + } + } + continue + } + + // Handle []byte(nil) as a NULL value + nullMask |= 1 << uint(i) + paramTypes[i+i] = fieldTypeNULL + paramTypes[i+i+1] = 0x00 + + case string: + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, []byte(v)); err != nil { + return err + } + } + + case time.Time: + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + var val []byte + if v.IsZero() { + val = []byte("0000-00-00") + } else { + val = []byte(v.In(mc.cfg.loc).Format(timeFormat)) + } + + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(val)), + ) + paramValues = append(paramValues, val...) + + default: + return fmt.Errorf("Can't convert type: %T", args[i]) + } + } + + // Check if param values exceeded the available buffer + // In that case we must build the data packet with the new values buffer + if valuesCap != cap(paramValues) { + data = append(data[:pos], paramValues...) + mc.buf.buf = data + } + + pos += len(paramValues) + data = data[:pos] + + pktLen := pos - 4 + + // packet header [4 bytes] + data[0] = byte(pktLen) + data[1] = byte(pktLen >> 8) + data[2] = byte(pktLen >> 16) + data[3] = mc.sequence + + // Convert nullMask to bytes + for i, max := 0, (stmt.paramCount+7)>>3; i < max; i++ { + data[i+14] = byte(nullMask >> uint(i<<3)) + } + } + + return mc.writePacket(data) +} + +// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html +func (rows *binaryRows) readRow(dest []driver.Value) error { + data, err := rows.mc.readPacket() + if err != nil { + return err + } + + // packet indicator [1 byte] + if data[0] != iOK { + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + return io.EOF + } + + // Error otherwise + return rows.mc.handleErrorPacket(data) + } + + // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes] + pos := 1 + (len(dest)+7+2)>>3 + nullMask := data[1:pos] + + for i := range dest { + // Field is NULL + // (byte >> bit-pos) % 2 == 1 + if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 { + dest[i] = nil + continue + } + + // Convert to byte-coded string + switch rows.columns[i].fieldType { + case fieldTypeNULL: + dest[i] = nil + continue + + // Numeric Types + case fieldTypeTiny: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(data[pos]) + } else { + dest[i] = int64(int8(data[pos])) + } + pos++ + continue + + case fieldTypeShort, fieldTypeYear: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2])) + } else { + dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2]))) + } + pos += 2 + continue + + case fieldTypeInt24, fieldTypeLong: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4])) + } else { + dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4]))) + } + pos += 4 + continue + + case fieldTypeLongLong: + if rows.columns[i].flags&flagUnsigned != 0 { + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + if val > math.MaxInt64 { + dest[i] = uint64ToString(val) + } else { + dest[i] = int64(val) + } + } else { + dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8])) + } + pos += 8 + continue + + case fieldTypeFloat: + dest[i] = float64(math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))) + pos += 4 + continue + + case fieldTypeDouble: + dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8])) + pos += 8 + continue + + // Length coded Binary Strings + case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, + fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, + fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, + fieldTypeVarString, fieldTypeString, fieldTypeGeometry: + var isNull bool + var n int + dest[i], isNull, n, err = readLengthEnodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + continue + } else { + dest[i] = nil + continue + } + } + return err + + // Date YYYY-MM-DD + case fieldTypeDate, fieldTypeNewDate: + num, isNull, n := readLengthEncodedInteger(data[pos:]) + pos += n + + if isNull { + dest[i] = nil + continue + } + + if rows.mc.parseTime { + dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.loc) + } else { + dest[i], err = formatBinaryDate(num, data[pos:]) + } + + if err == nil { + pos += int(num) + continue + } else { + return err + } + + // Time [-][H]HH:MM:SS[.fractal] + case fieldTypeTime: + num, isNull, n := readLengthEncodedInteger(data[pos:]) + pos += n + + if num == 0 { + if isNull { + dest[i] = nil + continue + } else { + dest[i] = []byte("00:00:00") + continue + } + } + + var sign string + if data[pos] == 1 { + sign = "-" + } + + switch num { + case 8: + dest[i] = []byte(fmt.Sprintf( + sign+"%02d:%02d:%02d", + uint16(data[pos+1])*24+uint16(data[pos+5]), + data[pos+6], + data[pos+7], + )) + pos += 8 + continue + case 12: + dest[i] = []byte(fmt.Sprintf( + sign+"%02d:%02d:%02d.%06d", + uint16(data[pos+1])*24+uint16(data[pos+5]), + data[pos+6], + data[pos+7], + binary.LittleEndian.Uint32(data[pos+8:pos+12]), + )) + pos += 12 + continue + default: + return fmt.Errorf("Invalid TIME-packet length %d", num) + } + + // Timestamp YYYY-MM-DD HH:MM:SS[.fractal] + case fieldTypeTimestamp, fieldTypeDateTime: + num, isNull, n := readLengthEncodedInteger(data[pos:]) + + pos += n + + if isNull { + dest[i] = nil + continue + } + + if rows.mc.parseTime { + dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.loc) + } else { + dest[i], err = formatBinaryDateTime(num, data[pos:]) + } + + if err == nil { + pos += int(num) + continue + } else { + return err + } + + // Please report if this happens! + default: + return fmt.Errorf("Unknown FieldType %d", rows.columns[i].fieldType) + } + } + + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/result.go new file mode 100644 index 00000000..c6438d03 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/result.go @@ -0,0 +1,22 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlResult struct { + affectedRows int64 + insertId int64 +} + +func (res *mysqlResult) LastInsertId() (int64, error) { + return res.insertId, nil +} + +func (res *mysqlResult) RowsAffected() (int64, error) { + return res.affectedRows, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/rows.go new file mode 100644 index 00000000..073d3e71 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/rows.go @@ -0,0 +1,86 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "io" +) + +type mysqlField struct { + fieldType byte + flags fieldFlag + name string +} + +type mysqlRows struct { + mc *mysqlConn + columns []mysqlField +} + +type binaryRows struct { + mysqlRows +} + +type textRows struct { + mysqlRows +} + +func (rows *mysqlRows) Columns() []string { + columns := make([]string, len(rows.columns)) + for i := range columns { + columns[i] = rows.columns[i].name + } + return columns +} + +func (rows *mysqlRows) Close() error { + mc := rows.mc + if mc == nil { + return nil + } + if mc.netConn == nil { + return errInvalidConn + } + + // Remove unread packets from stream + err := mc.readUntilEOF() + rows.mc = nil + return err +} + +func (rows *binaryRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if mc.netConn == nil { + return errInvalidConn + } + + // Fetch next row from stream + if err := rows.readRow(dest); err != io.EOF { + return err + } + rows.mc = nil + } + return io.EOF +} + +func (rows *textRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if mc.netConn == nil { + return errInvalidConn + } + + // Fetch next row from stream + if err := rows.readRow(dest); err != io.EOF { + return err + } + rows.mc = nil + } + return io.EOF +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/statement.go new file mode 100644 index 00000000..bccce5a8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/statement.go @@ -0,0 +1,112 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" +) + +type mysqlStmt struct { + mc *mysqlConn + id uint32 + paramCount int + columns []mysqlField // cached from the first query +} + +func (stmt *mysqlStmt) Close() error { + if stmt.mc == nil || stmt.mc.netConn == nil { + errLog.Print(errInvalidConn) + return driver.ErrBadConn + } + + err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id) + stmt.mc = nil + return err +} + +func (stmt *mysqlStmt) NumInput() int { + return stmt.paramCount +} + +func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { + if stmt.mc.netConn == nil { + errLog.Print(errInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, err + } + + mc := stmt.mc + + mc.affectedRows = 0 + mc.insertId = 0 + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil { + if resLen > 0 { + // Columns + err = mc.readUntilEOF() + if err != nil { + return nil, err + } + + // Rows + err = mc.readUntilEOF() + } + if err == nil { + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, nil + } + } + + return nil, err +} + +func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { + if stmt.mc.netConn == nil { + errLog.Print(errInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, err + } + + mc := stmt.mc + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return nil, err + } + + rows := new(binaryRows) + rows.mc = mc + + if resLen > 0 { + // Columns + // If not cached, read them and cache them + if stmt.columns == nil { + rows.columns, err = mc.readColumns(resLen) + stmt.columns = rows.columns + } else { + rows.columns = stmt.columns + err = mc.readUntilEOF() + } + } + + return rows, err +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/transaction.go new file mode 100644 index 00000000..4cac59f3 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/transaction.go @@ -0,0 +1,31 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlTx struct { + mc *mysqlConn +} + +func (tx *mysqlTx) Commit() (err error) { + if tx.mc == nil || tx.mc.netConn == nil { + return errInvalidConn + } + err = tx.mc.exec("COMMIT") + tx.mc = nil + return +} + +func (tx *mysqlTx) Rollback() (err error) { + if tx.mc == nil || tx.mc.netConn == nil { + return errInvalidConn + } + err = tx.mc.exec("ROLLBACK") + tx.mc = nil + return +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/utils.go new file mode 100644 index 00000000..916ebefe --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/utils.go @@ -0,0 +1,681 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/sha1" + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "net/url" + "os" + "strings" + "time" +) + +var ( + errLog *log.Logger // Error Logger + tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs + + errInvalidDSNUnescaped = errors.New("Invalid DSN: Did you forget to escape a param value?") + errInvalidDSNAddr = errors.New("Invalid DSN: Network Address not terminated (missing closing brace)") +) + +func init() { + errLog = log.New(os.Stderr, "[MySQL] ", log.Ldate|log.Ltime|log.Lshortfile) + tlsConfigRegister = make(map[string]*tls.Config) +} + +// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open. +// Use the key as a value in the DSN where tls=value. +// +// rootCertPool := x509.NewCertPool() +// pem, err := ioutil.ReadFile("/path/ca-cert.pem") +// if err != nil { +// log.Fatal(err) +// } +// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { +// log.Fatal("Failed to append PEM.") +// } +// clientCert := make([]tls.Certificate, 0, 1) +// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem") +// if err != nil { +// log.Fatal(err) +// } +// clientCert = append(clientCert, certs) +// mysql.RegisterTLSConfig("custom", &tls.Config{ +// RootCAs: rootCertPool, +// Certificates: clientCert, +// }) +// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom") +// +func RegisterTLSConfig(key string, config *tls.Config) error { + if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" { + return fmt.Errorf("Key '%s' is reserved", key) + } + + tlsConfigRegister[key] = config + return nil +} + +// DeregisterTLSConfig removes the tls.Config associated with key. +func DeregisterTLSConfig(key string) { + delete(tlsConfigRegister, key) +} + +// parseDSN parses the DSN string to a config +func parseDSN(dsn string) (cfg *config, err error) { + cfg = new(config) + + // TODO: use strings.IndexByte when we can depend on Go 1.2 + + // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] + // Find the last '/' (since the password or the net addr might contain a '/') + for i := len(dsn) - 1; i >= 0; i-- { + if dsn[i] == '/' { + var j, k int + + // left part is empty if i <= 0 + if i > 0 { + // [username[:password]@][protocol[(address)]] + // Find the last '@' in dsn[:i] + for j = i; j >= 0; j-- { + if dsn[j] == '@' { + // username[:password] + // Find the first ':' in dsn[:j] + for k = 0; k < j; k++ { + if dsn[k] == ':' { + cfg.passwd = dsn[k+1 : j] + break + } + } + cfg.user = dsn[:k] + + break + } + } + + // [protocol[(address)]] + // Find the first '(' in dsn[j+1:i] + for k = j + 1; k < i; k++ { + if dsn[k] == '(' { + // dsn[i-1] must be == ')' if an adress is specified + if dsn[i-1] != ')' { + if strings.ContainsRune(dsn[k+1:i], ')') { + return nil, errInvalidDSNUnescaped + } + return nil, errInvalidDSNAddr + } + cfg.addr = dsn[k+1 : i-1] + break + } + } + cfg.net = dsn[j+1 : k] + } + + // dbname[?param1=value1&...¶mN=valueN] + // Find the first '?' in dsn[i+1:] + for j = i + 1; j < len(dsn); j++ { + if dsn[j] == '?' { + if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { + return + } + break + } + } + cfg.dbname = dsn[i+1 : j] + + break + } + } + + // Set default network if empty + if cfg.net == "" { + cfg.net = "tcp" + } + + // Set default adress if empty + if cfg.addr == "" { + switch cfg.net { + case "tcp": + cfg.addr = "127.0.0.1:3306" + case "unix": + cfg.addr = "/tmp/mysql.sock" + default: + return nil, errors.New("Default addr for network '" + cfg.net + "' unknown") + } + + } + + // Set default location if empty + if cfg.loc == nil { + cfg.loc = time.UTC + } + + return +} + +// parseDSNParams parses the DSN "query string" +// Values must be url.QueryEscape'ed +func parseDSNParams(cfg *config, params string) (err error) { + for _, v := range strings.Split(params, "&") { + param := strings.SplitN(v, "=", 2) + if len(param) != 2 { + continue + } + + // cfg params + switch value := param[1]; param[0] { + + // Disable INFILE whitelist / enable all files + case "allowAllFiles": + var isBool bool + cfg.allowAllFiles, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Switch "rowsAffected" mode + case "clientFoundRows": + var isBool bool + cfg.clientFoundRows, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Use old authentication mode (pre MySQL 4.1) + case "allowOldPasswords": + var isBool bool + cfg.allowOldPasswords, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Time Location + case "loc": + if value, err = url.QueryUnescape(value); err != nil { + return + } + cfg.loc, err = time.LoadLocation(value) + if err != nil { + return + } + + // Dial Timeout + case "timeout": + cfg.timeout, err = time.ParseDuration(value) + if err != nil { + return + } + + // TLS-Encryption + case "tls": + boolValue, isBool := readBool(value) + if isBool { + if boolValue { + cfg.tls = &tls.Config{} + } + } else { + if strings.ToLower(value) == "skip-verify" { + cfg.tls = &tls.Config{InsecureSkipVerify: true} + } else if tlsConfig, ok := tlsConfigRegister[value]; ok { + cfg.tls = tlsConfig + } else { + return fmt.Errorf("Invalid value / unknown config name: %s", value) + } + } + + default: + // lazy init + if cfg.params == nil { + cfg.params = make(map[string]string) + } + + if cfg.params[param[0]], err = url.QueryUnescape(value); err != nil { + return + } + } + } + + return +} + +// Returns the bool value of the input. +// The 2nd return value indicates if the input was a valid bool value +func readBool(input string) (value bool, valid bool) { + switch input { + case "1", "true", "TRUE", "True": + return true, true + case "0", "false", "FALSE", "False": + return false, true + } + + // Not a valid bool value + return +} + +/****************************************************************************** +* Authentication * +******************************************************************************/ + +// Encrypt password using 4.1+ method +func scramblePassword(scramble, password []byte) []byte { + if len(password) == 0 { + return nil + } + + // stage1Hash = SHA1(password) + crypt := sha1.New() + crypt.Write(password) + stage1 := crypt.Sum(nil) + + // scrambleHash = SHA1(scramble + SHA1(stage1Hash)) + // inner Hash + crypt.Reset() + crypt.Write(stage1) + hash := crypt.Sum(nil) + + // outer Hash + crypt.Reset() + crypt.Write(scramble) + crypt.Write(hash) + scramble = crypt.Sum(nil) + + // token = scrambleHash XOR stage1Hash + for i := range scramble { + scramble[i] ^= stage1[i] + } + return scramble +} + +// Encrypt password using pre 4.1 (old password) method +// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c +type myRnd struct { + seed1, seed2 uint32 +} + +const myRndMaxVal = 0x3FFFFFFF + +// Pseudo random number generator +func newMyRnd(seed1, seed2 uint32) *myRnd { + return &myRnd{ + seed1: seed1 % myRndMaxVal, + seed2: seed2 % myRndMaxVal, + } +} + +// Tested to be equivalent to MariaDB's floating point variant +// http://play.golang.org/p/QHvhd4qved +// http://play.golang.org/p/RG0q4ElWDx +func (r *myRnd) NextByte() byte { + r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal + r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal + + return byte(uint64(r.seed1) * 31 / myRndMaxVal) +} + +// Generate binary hash from byte string using insecure pre 4.1 method +func pwHash(password []byte) (result [2]uint32) { + var add uint32 = 7 + var tmp uint32 + + result[0] = 1345345333 + result[1] = 0x12345671 + + for _, c := range password { + // skip spaces and tabs in password + if c == ' ' || c == '\t' { + continue + } + + tmp = uint32(c) + result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8) + result[1] += (result[1] << 8) ^ result[0] + add += tmp + } + + // Remove sign bit (1<<31)-1) + result[0] &= 0x7FFFFFFF + result[1] &= 0x7FFFFFFF + + return +} + +// Encrypt password using insecure pre 4.1 method +func scrambleOldPassword(scramble, password []byte) []byte { + if len(password) == 0 { + return nil + } + + scramble = scramble[:8] + + hashPw := pwHash(password) + hashSc := pwHash(scramble) + + r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1]) + + var out [8]byte + for i := range out { + out[i] = r.NextByte() + 64 + } + + mask := r.NextByte() + for i := range out { + out[i] ^= mask + } + + return out[:] +} + +/****************************************************************************** +* Time related utils * +******************************************************************************/ + +// NullTime represents a time.Time that may be NULL. +// NullTime implements the Scanner interface so +// it can be used as a scan destination: +// +// var nt NullTime +// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) +// ... +// if nt.Valid { +// // use nt.Time +// } else { +// // NULL value +// } +// +// This NullTime implementation is not driver-specific +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +// The value type must be time.Time or string / []byte (formatted time-string), +// otherwise Scan fails. +func (nt *NullTime) Scan(value interface{}) (err error) { + if value == nil { + nt.Time, nt.Valid = time.Time{}, false + return + } + + switch v := value.(type) { + case time.Time: + nt.Time, nt.Valid = v, true + return + case []byte: + nt.Time, err = parseDateTime(string(v), time.UTC) + nt.Valid = (err == nil) + return + case string: + nt.Time, err = parseDateTime(v, time.UTC) + nt.Valid = (err == nil) + return + } + + nt.Valid = false + return fmt.Errorf("Can't convert %T to time.Time", value) +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} + +func parseDateTime(str string, loc *time.Location) (t time.Time, err error) { + switch len(str) { + case 10: // YYYY-MM-DD + if str == "0000-00-00" { + return + } + t, err = time.Parse(timeFormat[:10], str) + case 19: // YYYY-MM-DD HH:MM:SS + if str == "0000-00-00 00:00:00" { + return + } + t, err = time.Parse(timeFormat, str) + default: + err = fmt.Errorf("Invalid Time-String: %s", str) + return + } + + // Adjust location + if err == nil && loc != time.UTC { + y, mo, d := t.Date() + h, mi, s := t.Clock() + t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil + } + + return +} + +func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) { + switch num { + case 0: + return time.Time{}, nil + case 4: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + 0, 0, 0, 0, + loc, + ), nil + case 7: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + 0, + loc, + ), nil + case 11: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds + loc, + ), nil + } + return nil, fmt.Errorf("Invalid DATETIME-packet length %d", num) +} + +func formatBinaryDate(num uint64, data []byte) (driver.Value, error) { + switch num { + case 0: + return []byte("0000-00-00"), nil + case 4: + return []byte(fmt.Sprintf( + "%04d-%02d-%02d", + binary.LittleEndian.Uint16(data[:2]), + data[2], + data[3], + )), nil + } + return nil, fmt.Errorf("Invalid DATE-packet length %d", num) +} + +func formatBinaryDateTime(num uint64, data []byte) (driver.Value, error) { + switch num { + case 0: + return []byte("0000-00-00 00:00:00"), nil + case 4: + return []byte(fmt.Sprintf( + "%04d-%02d-%02d 00:00:00", + binary.LittleEndian.Uint16(data[:2]), + data[2], + data[3], + )), nil + case 7: + return []byte(fmt.Sprintf( + "%04d-%02d-%02d %02d:%02d:%02d", + binary.LittleEndian.Uint16(data[:2]), + data[2], + data[3], + data[4], + data[5], + data[6], + )), nil + case 11: + return []byte(fmt.Sprintf( + "%04d-%02d-%02d %02d:%02d:%02d.%06d", + binary.LittleEndian.Uint16(data[:2]), + data[2], + data[3], + data[4], + data[5], + data[6], + binary.LittleEndian.Uint32(data[7:11]), + )), nil + } + return nil, fmt.Errorf("Invalid DATETIME-packet length %d", num) +} + +/****************************************************************************** +* Convert from and to bytes * +******************************************************************************/ + +func uint64ToBytes(n uint64) []byte { + return []byte{ + byte(n), + byte(n >> 8), + byte(n >> 16), + byte(n >> 24), + byte(n >> 32), + byte(n >> 40), + byte(n >> 48), + byte(n >> 56), + } +} + +func uint64ToString(n uint64) []byte { + var a [20]byte + i := 20 + + // U+0030 = 0 + // ... + // U+0039 = 9 + + var q uint64 + for n >= 10 { + i-- + q = n / 10 + a[i] = uint8(n-q*10) + 0x30 + n = q + } + + i-- + a[i] = uint8(n) + 0x30 + + return a[i:] +} + +// treats string value as unsigned integer representation +func stringToInt(b []byte) int { + val := 0 + for i := range b { + val *= 10 + val += int(b[i] - 0x30) + } + return val +} + +// returns the string read as a bytes slice, wheter the value is NULL, +// the number of bytes read and an error, in case the string is longer than +// the input slice +func readLengthEnodedString(b []byte) ([]byte, bool, int, error) { + // Get length + num, isNull, n := readLengthEncodedInteger(b) + if num < 1 { + return b[n:n], isNull, n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return b[n-int(num) : n], false, n, nil + } + return nil, false, n, io.EOF +} + +// returns the number of bytes skipped and an error, in case the string is +// longer than the input slice +func skipLengthEnodedString(b []byte) (int, error) { + // Get length + num, _, n := readLengthEncodedInteger(b) + if num < 1 { + return n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return n, nil + } + return n, io.EOF +} + +// returns the number read, whether the value is NULL and the number of bytes read +func readLengthEncodedInteger(b []byte) (uint64, bool, int) { + switch b[0] { + + // 251: NULL + case 0xfb: + return 0, true, 1 + + // 252: value of following 2 + case 0xfc: + return uint64(b[1]) | uint64(b[2])<<8, false, 3 + + // 253: value of following 3 + case 0xfd: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4 + + // 254: value of following 8 + case 0xfe: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | + uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | + uint64(b[7])<<48 | uint64(b[8])<<54, + false, 9 + } + + // 0-250: value of first byte + return uint64(b[0]), false, 1 +} + +// encodes a uint64 value and appends it to the given bytes slice +func appendLengthEncodedInteger(b []byte, n uint64) []byte { + switch { + case n <= 250: + return append(b, byte(n)) + + case n <= 0xffff: + return append(b, 0xfc, byte(n), byte(n>>8)) + + case n <= 0xffffff: + return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16)) + } + return b +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/utils_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/utils_test.go new file mode 100644 index 00000000..d19c9273 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/go-sql-driver/mysql/utils_test.go @@ -0,0 +1,124 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "fmt" + "testing" + "time" +) + +var testDSNs = []struct { + in string + out string + loc *time.Location +}{ + {"username:password@protocol(address)/dbname?param=value", "&{user:username passwd:password net:protocol addr:address dbname:dbname params:map[param:value] loc:%p timeout:0 tls: allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC}, + {"user@unix(/path/to/socket)/dbname?charset=utf8", "&{user:user passwd: net:unix addr:/path/to/socket dbname:dbname params:map[charset:utf8] loc:%p timeout:0 tls: allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC}, + {"user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true", "&{user:user passwd:password net:tcp addr:localhost:5555 dbname:dbname params:map[charset:utf8] loc:%p timeout:0 tls: allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC}, + {"user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify", "&{user:user passwd:password net:tcp addr:localhost:5555 dbname:dbname params:map[charset:utf8mb4,utf8] loc:%p timeout:0 tls: allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC}, + {"user:password@/dbname?loc=UTC&timeout=30s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE", "&{user:user passwd:password net:tcp addr:127.0.0.1:3306 dbname:dbname params:map[] loc:%p timeout:30000000000 tls: allowAllFiles:true allowOldPasswords:true clientFoundRows:true}", time.UTC}, + {"user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local", "&{user:user passwd:p@ss(word) net:tcp addr:[de:ad:be:ef::ca:fe]:80 dbname:dbname params:map[] loc:%p timeout:0 tls: allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.Local}, + {"/dbname", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname:dbname params:map[] loc:%p timeout:0 tls: allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC}, + {"@/", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p timeout:0 tls: allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC}, + {"/", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p timeout:0 tls: allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC}, + {"", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p timeout:0 tls: allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC}, + {"user:p@/ssword@/", "&{user:user passwd:p@/ssword net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p timeout:0 tls: allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC}, + {"unix/?arg=%2Fsome%2Fpath.ext", "&{user: passwd: net:unix addr:/tmp/mysql.sock dbname: params:map[arg:/some/path.ext] loc:%p timeout:0 tls: allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC}, +} + +func TestDSNParser(t *testing.T) { + var cfg *config + var err error + var res string + + for i, tst := range testDSNs { + cfg, err = parseDSN(tst.in) + if err != nil { + t.Error(err.Error()) + } + + // pointer not static + cfg.tls = nil + + res = fmt.Sprintf("%+v", cfg) + if res != fmt.Sprintf(tst.out, tst.loc) { + t.Errorf("%d. parseDSN(%q) => %q, want %q", i, tst.in, res, fmt.Sprintf(tst.out, tst.loc)) + } + } +} + +func TestDSNParserInvalid(t *testing.T) { + var invalidDSNs = []string{ + "@net(addr/", // no closing brace + "@tcp(/", // no closing brace + "tcp(/", // no closing brace + "(/", // no closing brace + "net(addr)//", // unescaped + //"/dbname?arg=/some/unescaped/path", + } + + for i, tst := range invalidDSNs { + if _, err := parseDSN(tst); err == nil { + t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst) + } + } +} + +func BenchmarkParseDSN(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for _, tst := range testDSNs { + if _, err := parseDSN(tst.in); err != nil { + b.Error(err.Error()) + } + } + } +} + +func TestScanNullTime(t *testing.T) { + var scanTests = []struct { + in interface{} + error bool + valid bool + time time.Time + }{ + {tDate, false, true, tDate}, + {sDate, false, true, tDate}, + {[]byte(sDate), false, true, tDate}, + {tDateTime, false, true, tDateTime}, + {sDateTime, false, true, tDateTime}, + {[]byte(sDateTime), false, true, tDateTime}, + {tDate0, false, true, tDate0}, + {sDate0, false, true, tDate0}, + {[]byte(sDate0), false, true, tDate0}, + {sDateTime0, false, true, tDate0}, + {[]byte(sDateTime0), false, true, tDate0}, + {"", true, false, tDate0}, + {"1234", true, false, tDate0}, + {0, true, false, tDate0}, + } + + var nt = NullTime{} + var err error + + for _, tst := range scanTests { + err = nt.Scan(tst.in) + if (err != nil) != tst.error { + t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil)) + } + if nt.Valid != tst.valid { + t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid) + } + if nt.Time != tst.time { + t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/golang/glog/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/golang/glog/LICENSE new file mode 100644 index 00000000..37ec93a1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/golang/glog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/golang/glog/README b/vendor/github.com/camlistore/camlistore/third_party/github.com/golang/glog/README new file mode 100644 index 00000000..5f9c1148 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/golang/glog/README @@ -0,0 +1,44 @@ +glog +==== + +Leveled execution logs for Go. + +This is an efficient pure Go implementation of leveled logs in the +manner of the open source C++ package + http://code.google.com/p/google-glog + +By binding methods to booleans it is possible to use the log package +without paying the expense of evaluating the arguments to the log. +Through the -vmodule flag, the package also provides fine-grained +control over logging at the file level. + +The comment from glog.go introduces the ideas: + + Package glog implements logging analogous to the Google-internal + C++ INFO/ERROR/V setup. It provides functions Info, Warning, + Error, Fatal, plus formatting variants such as Infof. It + also provides V-style logging controlled by the -v and + -vmodule=file=2 flags. + + Basic examples: + + glog.Info("Prepare to repel boarders") + + glog.Fatalf("Initialization failed: %s", err) + + See the documentation for the V function for an explanation + of these examples: + + if glog.V(2) { + glog.Info("Starting transaction...") + } + + glog.V(2).Infoln("Processed", nItems, "elements") + + +The repository contains an open source version of the log package +used inside Google. The master copy of the source lives inside +Google, not here. The code in this repo is for export only and is not itself +under development. Feature requests will be ignored. + +Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/golang/glog/glog.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/golang/glog/glog.go new file mode 100644 index 00000000..dbe1d65e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/golang/glog/glog.go @@ -0,0 +1,1033 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. +// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as +// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// +// Basic examples: +// +// glog.Info("Prepare to repel boarders") +// +// glog.Fatalf("Initialization failed: %s", err) +// +// See the documentation for the V function for an explanation of these examples: +// +// if glog.V(2) { +// glog.Info("Starting transaction...") +// } +// +// glog.V(2).Infoln("Processed", nItems, "elements") +// +// Log output is buffered and written periodically using Flush. Programs +// should call Flush before exiting to guarantee all log output is written. +// +// By default, all log statements write to files in a temporary directory. +// This package provides several flags that modify this behavior. +// +// -logtostderr=false +// Logs are written to standard error instead of to files. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. +// +// Other flags provide aids to debugging. +// +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". +// +package glog + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// severity identifies the sort of log: info, warning etc. It also implements +// the flag.Value interface. The -stderrthreshold flag is of type severity and +// should be modified only through the flag.Value interface. The values match +// the corresponding constants in C++. +type severity int32 // sync/atomic int32 + +const ( + infoLog severity = iota + warningLog + errorLog + fatalLog + numSeverity = 4 +) + +const severityChar = "IWEF" + +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// get returns the value of the severity. +func (s *severity) get() severity { + return severity(atomic.LoadInt32((*int32)(s))) +} + +// set sets the value of the severity. +func (s *severity) set(val severity) { + atomic.StoreInt32((*int32)(s), int32(val)) +} + +// String is part of the flag.Value interface. +func (s *severity) String() string { + return strconv.FormatInt(int64(*s), 10) +} + +// Get is part of the flag.Value interface. +func (s *severity) Get() interface{} { + return *s +} + +// Set is part of the flag.Value interface. +func (s *severity) Set(value string) error { + var threshold severity + // Is it a known name? + if v, ok := severityByName(value); ok { + threshold = v + } else { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + threshold = severity(v) + } + logging.stderrThreshold.set(threshold) + return nil +} + +func severityByName(s string) (severity, bool) { + s = strings.ToUpper(s) + for i, name := range severityName { + if name == s { + return severity(i), true + } + } + return 0, false +} + +// OutputStats tracks the number of output lines and bytes written. +type OutputStats struct { + lines int64 + bytes int64 +} + +// Lines returns the number of lines written. +func (s *OutputStats) Lines() int64 { + return atomic.LoadInt64(&s.lines) +} + +// Bytes returns the number of bytes written. +func (s *OutputStats) Bytes() int64 { + return atomic.LoadInt64(&s.bytes) +} + +// Stats tracks the number of lines of output and number of bytes +// per severity level. Values must be read with atomic.LoadInt64. +var Stats struct { + Info, Warning, Error OutputStats +} + +var severityStats = [numSeverity]*OutputStats{ + infoLog: &Stats.Info, + warningLog: &Stats.Warning, + errorLog: &Stats.Error, +} + +// Level is exported because it appears in the arguments to V and is +// the type of the v flag, which can be set programmatically. +// It's a distinct type because we want to discriminate it from logType. +// Variables of type level are only changed under logging.mu. +// The -v flag is read only with atomic ops, so the state of the logging +// module is consistent. + +// Level is treated as a sync/atomic int32. + +// Level specifies a level of verbosity for V logs. *Level implements +// flag.Value; the -v flag is of type Level and should be modified +// only through the flag.Value interface. +type Level int32 + +// get returns the value of the Level. +func (l *Level) get() Level { + return Level(atomic.LoadInt32((*int32)(l))) +} + +// set sets the value of the Level. +func (l *Level) set(val Level) { + atomic.StoreInt32((*int32)(l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(*l), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() interface{} { + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(Level(v), logging.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.Atoi(patLev[1]) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `*?[]\`) +} + +// traceLocation represents the setting of the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +// isSet reports whether the trace location has been specified. +// logging.mu is held. +func (t *traceLocation) isSet() bool { + return t.line > 0 +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +// logging.mu is held. +func (t *traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t *traceLocation) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported +func (t *traceLocation) Get() interface{} { + return nil +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +// Syntax: -log_backtrace_at=gopherflakes.go:234 +// Note that unlike vmodule the file extension is included here. +func (t *traceLocation) Set(value string) error { + if value == "" { + // Unset. + t.line = 0 + t.file = "" + } + fields := strings.Split(value, ":") + if len(fields) != 2 { + return errTraceSyntax + } + file, line := fields[0], fields[1] + if !strings.Contains(file, ".") { + return errTraceSyntax + } + v, err := strconv.Atoi(line) + if err != nil { + return errTraceSyntax + } + if v <= 0 { + return errors.New("negative or zero value for level") + } + logging.mu.Lock() + defer logging.mu.Unlock() + t.line = v + t.file = file + return nil +} + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer +} + +func init() { + flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") + flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + flag.Var(&logging.verbosity, "v", "log level for V logs") + flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") + flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + // Default stderrThreshold is ERROR. + logging.stderrThreshold = errorLog + + logging.setVState(0, nil, false) + go logging.flushDaemon() +} + +// Flush flushes all pending log I/O. +func Flush() { + logging.lockAndFlushAll() +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + // Level flag. Handled atomically. + stderrThreshold severity // The -stderrthreshold flag. + + // freeList is a list of byte buffers, maintained under freeListMu. + freeList *buffer + // freeListMu maintains the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + freeListMu sync.Mutex + + // mu protects the remaining elements of this structure and is + // used to synchronize logging. + mu sync.Mutex + // file holds writer for each of the log types. + file [numSeverity]flushSyncWriter + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 + // traceLocation is the state of the -log_backtrace_at flag. + traceLocation traceLocation + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity Level // V logging level, the value of the -v flag/ +} + +// buffer holds a byte Buffer for reuse. The zero value is ready for use. +type buffer struct { + bytes.Buffer + tmp [64]byte // temporary byte array for creating headers. + next *buffer +} + +var logging loggingT + +// setVState sets a consistent state for V logging. +// l.mu is held. +func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + logging.verbosity.set(0) + // Ditto for filter length. + logging.filterLength = 0 + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + logging.vmodule.filter = filter + logging.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&logging.filterLength, int32(len(filter))) + logging.verbosity.set(verbosity) +} + +// getBuffer returns a new, ready-to-use buffer. +func (l *loggingT) getBuffer() *buffer { + l.freeListMu.Lock() + b := l.freeList + if b != nil { + l.freeList = b.next + } + l.freeListMu.Unlock() + if b == nil { + b = new(buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// putBuffer returns a buffer to the free list. +func (l *loggingT) putBuffer(b *buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + l.freeListMu.Lock() + b.next = l.freeList + l.freeList = b + l.freeListMu.Unlock() +} + +var timeNow = time.Now // Stubbed out for testing. + +/* +header formats a log header as defined by the C++ implementation. +It returns a buffer containing the formatted header. + +Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... +where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) + mm The month (zero padded; ie May is '05') + dd The day (zero padded) + hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds + threadid The space-padded thread ID as returned by GetTID() + file The file name + line The line number + msg The user-supplied message +*/ +func (l *loggingT) header(s severity) *buffer { + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + now := timeNow() + _, file, line, ok := runtime.Caller(3) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > fatalLog { + s = infoLog // for safety. + } + buf := l.getBuffer() + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + buf.tmp[0] = severityChar[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.tmp[8] = ':' + buf.twoDigits(9, minute) + buf.tmp[11] = ':' + buf.twoDigits(12, second) + buf.tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000) + buf.tmp[21] = ' ' + buf.nDigits(5, 22, pid) // TODO: should be TID + buf.tmp[27] = ' ' + buf.Write(buf.tmp[:28]) + buf.WriteString(file) + buf.tmp[0] = ':' + n := buf.someDigits(1, line) + buf.tmp[n+1] = ']' + buf.tmp[n+2] = ' ' + buf.Write(buf.tmp[:n+3]) + return buf +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. +func (buf *buffer) twoDigits(i, d int) { + buf.tmp[i+1] = digits[d%10] + d /= 10 + buf.tmp[i] = digits[d%10] +} + +// nDigits formats a zero-prefixed n-digit integer at buf.tmp[i]. +func (buf *buffer) nDigits(n, i, d int) { + for j := n - 1; j >= 0; j-- { + buf.tmp[i+j] = digits[d%10] + d /= 10 + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. +func (buf *buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.tmp) + for { + j-- + buf.tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.tmp[i:], buf.tmp[j:]) +} + +func (l *loggingT) println(s severity, args ...interface{}) { + buf := l.header(s) + fmt.Fprintln(buf, args...) + l.output(s, buf) +} + +func (l *loggingT) print(s severity, args ...interface{}) { + buf := l.header(s) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf) +} + +func (l *loggingT) printf(s severity, format string, args ...interface{}) { + buf := l.header(s) + fmt.Fprintf(buf, format, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf) +} + +// output writes the data to the log files and releases the buffer. +func (l *loggingT) output(s severity, buf *buffer) { + l.mu.Lock() + if l.traceLocation.isSet() { + _, file, line, ok := runtime.Caller(3) // It's always the same number of frames to the user's call (same as header). + if ok && l.traceLocation.match(file, line) { + buf.Write(stacks(false)) + } + } + data := buf.Bytes() + if l.toStderr { + os.Stderr.Write(data) + } else { + if l.alsoToStderr || s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } + } + if s == fatalLog { + // Make sure we see the trace for the current goroutine on standard error. + if !l.toStderr { + os.Stderr.Write(stacks(false)) + } + // Write the stack trace for all goroutines to the files. + trace := stacks(true) + logExitFunc = func(error) {} // If we get a write error, we'll still exit below. + for log := fatalLog; log >= infoLog; log-- { + if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. + f.Write(trace) + } + } + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + } + l.putBuffer(buf) + l.mu.Unlock() + if stats := severityStats[s]; stats != nil { + atomic.AddInt64(&stats.lines, 1) + atomic.AddInt64(&stats.bytes, int64(len(data))) + } +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when glog.Fatal is called from a hook that holds +// a lock. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + } +} + +// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. +func stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} + +// logExitFunc provides a simple mechanism to override the default behavior +// of exiting on error. Used in testing and to guarantee we reach a required exit +// for fatal logs. Instead, exit could be a function rather than a method but that +// would make its use clumsier. +var logExitFunc func(error) + +// exit is called if there is trouble creating or writing log files. +// It flushes the logs and exits the program; there's no point in hanging around. +// l.mu is held. +func (l *loggingT) exit(err error) { + fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) + // If logExitFunc is set, we do that instead of exiting. + if logExitFunc != nil { + logExitFunc(err) + return + } + l.flushAll() + os.Exit(2) +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// l.mu is held for all its methods. +type syncBuffer struct { + logger *loggingT + *bufio.Writer + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= MaxSize { + if err := sb.rotateFile(time.Now()); err != nil { + sb.logger.exit(err) + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + if err != nil { + sb.logger.exit(err) + } + return +} + +// rotateFile closes the syncBuffer's file and starts a new one. +func (sb *syncBuffer) rotateFile(now time.Time) error { + if sb.file != nil { + sb.Flush() + sb.file.Close() + } + var err error + sb.file, _, err = create(severityName[sb.sev], now) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createFiles creates all the log files for severity from sev down to infoLog. +// l.mu is held. +func (l *loggingT) createFiles(sev severity) error { + now := time.Now() + // Files are created in decreasing severity order, so as soon as we find one + // has already been created, we can stop. + for s := sev; s >= infoLog && l.file[s] == nil; s-- { + sb := &syncBuffer{ + logger: l, + sev: s, + } + if err := sb.rotateFile(now); err != nil { + return err + } + l.file[s] = sb + } + return nil +} + +const flushInterval = 30 * time.Second + +// flushDaemon periodically flushes the log file buffers. +func (l *loggingT) flushDaemon() { + for _ = range time.NewTicker(flushInterval).C { + l.lockAndFlushAll() + } +} + +// lockAndFlushAll is like flushAll but locks l.mu first. +func (l *loggingT) lockAndFlushAll() { + l.mu.Lock() + l.flushAll() + l.mu.Unlock() +} + +// flushAll flushes all the logs and attempts to "sync" their data to disk. +// l.mu is held. +func (l *loggingT) flushAll() { + // Flush from fatal down, in case there's trouble flushing. + for s := fatalLog; s >= infoLog; s-- { + file := l.file[s] + if file != nil { + file.Flush() // ignore error + file.Sync() // ignore error + } + } +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// l.mu is held. +func (l *loggingT) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + if strings.HasSuffix(file, ".go") { + file = file[:len(file)-3] + } + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range l.vmodule.filter { + if filter.match(file) { + l.vmap[pc] = filter.level + return filter.level + } + } + l.vmap[pc] = 0 + return 0 +} + +// Verbose is a boolean type that implements Infof (like Printf) etc. +// See the documentation of V for more information. +type Verbose bool + +// V reports whether verbosity at the call site is at least the requested level. +// The returned value is a boolean of type Verbose, which implements Info, Infoln +// and Infof. These methods will write to the Info log if called. +// Thus, one may write either +// if glog.V(2) { glog.Info("log this") } +// or +// glog.V(2).Info("log this") +// The second form is shorter but the first is cheaper if logging is off because it does +// not evaluate its arguments. +// +// Whether an individual call to V generates a log record depends on the setting of +// the -v and --vmodule flags; both are off by default. If the level in the call to +// V is at least the value of -v, or of -vmodule for the source file containing the +// call, the V call will log. +func V(level Level) Verbose { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if logging.verbosity.get() >= level { + return Verbose(true) + } + + // It's off globally but it vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&logging.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + logging.mu.Lock() + defer logging.mu.Unlock() + if runtime.Callers(2, logging.pcs[:]) == 0 { + return Verbose(false) + } + v, ok := logging.vmap[logging.pcs[0]] + if !ok { + v = logging.setV(logging.pcs[0]) + } + return Verbose(v >= level) + } + return Verbose(false) +} + +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...interface{}) { + if v { + logging.print(infoLog, args...) + } +} + +// Infoln is equivalent to the global Infoln function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infoln(args ...interface{}) { + if v { + logging.println(infoLog, args...) + } +} + +// Infof is equivalent to the global Infof function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infof(format string, args ...interface{}) { + if v { + logging.printf(infoLog, format, args...) + } +} + +// Info logs to the INFO log. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Info(args ...interface{}) { + logging.print(infoLog, args...) +} + +// Infoln logs to the INFO log. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Infoln(args ...interface{}) { + logging.println(infoLog, args...) +} + +// Infof logs to the INFO log. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Infof(format string, args ...interface{}) { + logging.printf(infoLog, format, args...) +} + +// Warning logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Warning(args ...interface{}) { + logging.print(warningLog, args...) +} + +// Warningln logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Warningln(args ...interface{}) { + logging.println(warningLog, args...) +} + +// Warningf logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Warningf(format string, args ...interface{}) { + logging.printf(warningLog, format, args...) +} + +// Error logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Error(args ...interface{}) { + logging.print(errorLog, args...) +} + +// Errorln logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Errorln(args ...interface{}) { + logging.println(errorLog, args...) +} + +// Errorf logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Errorf(format string, args ...interface{}) { + logging.printf(errorLog, format, args...) +} + +// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Fatal(args ...interface{}) { + logging.print(fatalLog, args...) +} + +// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Fatalln(args ...interface{}) { + logging.println(fatalLog, args...) +} + +// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Fatalf(format string, args ...interface{}) { + logging.printf(fatalLog, format, args...) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/golang/glog/glog_file.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/golang/glog/glog_file.go new file mode 100644 index 00000000..65075d28 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/golang/glog/glog_file.go @@ -0,0 +1,124 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// File I/O for logs. + +package glog + +import ( + "errors" + "flag" + "fmt" + "os" + "os/user" + "path/filepath" + "strings" + "sync" + "time" +) + +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 + +// logDirs lists the candidate directories for new log files. +var logDirs []string + +// If non-empty, overrides the choice of directory in which to write logs. +// See createLogDirs for the full list of possible destinations. +var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") + +func createLogDirs() { + if *logDir != "" { + logDirs = append(logDirs, *logDir) + } + logDirs = append(logDirs, os.TempDir()) +} + +var ( + pid = os.Getpid() + program = filepath.Base(os.Args[0]) + host = "unknownhost" + userName = "unknownuser" +) + +func init() { + h, err := os.Hostname() + if err == nil { + host = shortHostname(h) + } + + current, err := user.Current() + if err == nil { + userName = current.Username + } + + // Sanitize userName since it may contain filepath separators on Windows. + userName = strings.Replace(userName, `\`, "_", -1) +} + +// shortHostname returns its argument, truncating at the first period. +// For instance, given "www.google.com" it returns "www". +func shortHostname(hostname string) string { + if i := strings.Index(hostname, "."); i >= 0 { + return hostname[:i] + } + return hostname +} + +// logName returns a new log file name containing tag, with start time t, and +// the name for the symlink for tag. +func logName(tag string, t time.Time) (name, link string) { + name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", + program, + host, + userName, + tag, + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + pid) + return name, program + "." + tag +} + +var onceLogDirs sync.Once + +// create creates a new log file and returns the file and its filename, which +// contains tag ("INFO", "FATAL", etc.) and t. If the file is created +// successfully, create also attempts to update the symlink for that tag, ignoring +// errors. +func create(tag string, t time.Time) (f *os.File, filename string, err error) { + onceLogDirs.Do(createLogDirs) + if len(logDirs) == 0 { + return nil, "", errors.New("log: no log dirs") + } + name, link := logName(tag, t) + var lastErr error + for _, dir := range logDirs { + fname := filepath.Join(dir, name) + f, err := os.Create(fname) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + return f, fname, nil + } + lastErr = err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/golang/glog/glog_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/golang/glog/glog_test.go new file mode 100644 index 00000000..e4cac5a5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/golang/glog/glog_test.go @@ -0,0 +1,333 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package glog + +import ( + "bytes" + "fmt" + "path/filepath" + "runtime" + "strings" + "testing" + "time" +) + +// Test that shortHostname works as advertised. +func TestShortHostname(t *testing.T) { + for hostname, expect := range map[string]string{ + "": "", + "host": "host", + "host.google.com": "host", + } { + if got := shortHostname(hostname); expect != got { + t.Errorf("shortHostname(%q): expected %q, got %q", hostname, expect, got) + } + } +} + +// flushBuffer wraps a bytes.Buffer to satisfy flushSyncWriter. +type flushBuffer struct { + bytes.Buffer +} + +func (f *flushBuffer) Flush() error { + return nil +} + +func (f *flushBuffer) Sync() error { + return nil +} + +// swap sets the log writers and returns the old array. +func (l *loggingT) swap(writers [numSeverity]flushSyncWriter) (old [numSeverity]flushSyncWriter) { + l.mu.Lock() + defer l.mu.Unlock() + old = l.file + for i, w := range writers { + logging.file[i] = w + } + return +} + +// newBuffers sets the log writers to all new byte buffers and returns the old array. +func (l *loggingT) newBuffers() [numSeverity]flushSyncWriter { + return l.swap([numSeverity]flushSyncWriter{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)}) +} + +// contents returns the specified log value as a string. +func contents(s severity) string { + return logging.file[s].(*flushBuffer).String() +} + +// contains reports whether the string is contained in the log. +func contains(s severity, str string, t *testing.T) bool { + return strings.Contains(contents(s), str) +} + +// setFlags configures the logging flags how the test expects them. +func setFlags() { + logging.toStderr = false +} + +// Test that Info works as advertised. +func TestInfo(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + Info("test") + if !contains(infoLog, "I", t) { + t.Errorf("Info has wrong character: %q", contents(infoLog)) + } + if !contains(infoLog, "test", t) { + t.Error("Info failed") + } +} + +// Test that the header has the correct format. +func TestHeader(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + defer func(previous func() time.Time) { timeNow = previous }(timeNow) + timeNow = func() time.Time { + return time.Date(2006, 1, 2, 15, 4, 5, .678901e9, time.Local) + } + Info("test") + var line, pid int + n, err := fmt.Sscanf(contents(infoLog), "I0102 15:04:05.678901 %d glog_test.go:%d] test\n", &pid, &line) + if n != 2 || err != nil { + t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog)) + } +} + +// Test that an Error log goes to Warning and Info. +// Even in the Info log, the source character will be E, so the data should +// all be identical. +func TestError(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + Error("test") + if !contains(errorLog, "E", t) { + t.Errorf("Error has wrong character: %q", contents(errorLog)) + } + if !contains(errorLog, "test", t) { + t.Error("Error failed") + } + str := contents(errorLog) + if !contains(warningLog, str, t) { + t.Error("Warning failed") + } + if !contains(infoLog, str, t) { + t.Error("Info failed") + } +} + +// Test that a Warning log goes to Info. +// Even in the Info log, the source character will be W, so the data should +// all be identical. +func TestWarning(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + Warning("test") + if !contains(warningLog, "W", t) { + t.Errorf("Warning has wrong character: %q", contents(warningLog)) + } + if !contains(warningLog, "test", t) { + t.Error("Warning failed") + } + str := contents(warningLog) + if !contains(infoLog, str, t) { + t.Error("Info failed") + } +} + +// Test that a V log goes to Info. +func TestV(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + logging.verbosity.Set("2") + defer logging.verbosity.Set("0") + V(2).Info("test") + if !contains(infoLog, "I", t) { + t.Errorf("Info has wrong character: %q", contents(infoLog)) + } + if !contains(infoLog, "test", t) { + t.Error("Info failed") + } +} + +// Test that a vmodule enables a log in this file. +func TestVmoduleOn(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + logging.vmodule.Set("glog_test=2") + defer logging.vmodule.Set("") + if !V(1) { + t.Error("V not enabled for 1") + } + if !V(2) { + t.Error("V not enabled for 2") + } + if V(3) { + t.Error("V enabled for 3") + } + V(2).Info("test") + if !contains(infoLog, "I", t) { + t.Errorf("Info has wrong character: %q", contents(infoLog)) + } + if !contains(infoLog, "test", t) { + t.Error("Info failed") + } +} + +// Test that a vmodule of another file does not enable a log in this file. +func TestVmoduleOff(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + logging.vmodule.Set("notthisfile=2") + defer logging.vmodule.Set("") + for i := 1; i <= 3; i++ { + if V(Level(i)) { + t.Errorf("V enabled for %d", i) + } + } + V(2).Info("test") + if contents(infoLog) != "" { + t.Error("V logged incorrectly") + } +} + +// vGlobs are patterns that match/don't match this file at V=2. +var vGlobs = map[string]bool{ + // Easy to test the numeric match here. + "glog_test=1": false, // If -vmodule sets V to 1, V(2) will fail. + "glog_test=2": true, + "glog_test=3": true, // If -vmodule sets V to 1, V(3) will succeed. + // These all use 2 and check the patterns. All are true. + "*=2": true, + "?l*=2": true, + "????_*=2": true, + "??[mno]?_*t=2": true, + // These all use 2 and check the patterns. All are false. + "*x=2": false, + "m*=2": false, + "??_*=2": false, + "?[abc]?_*t=2": false, +} + +// Test that vmodule globbing works as advertised. +func testVmoduleGlob(pat string, match bool, t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + defer logging.vmodule.Set("") + logging.vmodule.Set(pat) + if V(2) != Verbose(match) { + t.Errorf("incorrect match for %q: got %t expected %t", pat, V(2), match) + } +} + +// Test that a vmodule globbing works as advertised. +func TestVmoduleGlob(t *testing.T) { + for glob, match := range vGlobs { + testVmoduleGlob(glob, match, t) + } +} + +func TestRollover(t *testing.T) { + setFlags() + var err error + defer func(previous func(error)) { logExitFunc = previous }(logExitFunc) + logExitFunc = func(e error) { + err = e + } + defer func(previous uint64) { MaxSize = previous }(MaxSize) + MaxSize = 512 + + Info("x") // Be sure we have a file. + info, ok := logging.file[infoLog].(*syncBuffer) + if !ok { + t.Fatal("info wasn't created") + } + if err != nil { + t.Fatalf("info has initial error: %v", err) + } + fname0 := info.file.Name() + Info(strings.Repeat("x", int(MaxSize))) // force a rollover + if err != nil { + t.Fatalf("info has error after big write: %v", err) + } + + // Make sure the next log file gets a file name with a different + // time stamp. + // + // TODO: determine whether we need to support subsecond log + // rotation. C++ does not appear to handle this case (nor does it + // handle Daylight Savings Time properly). + time.Sleep(1 * time.Second) + + Info("x") // create a new file + if err != nil { + t.Fatalf("error after rotation: %v", err) + } + fname1 := info.file.Name() + if fname0 == fname1 { + t.Errorf("info.f.Name did not change: %v", fname0) + } + if info.nbytes >= MaxSize { + t.Errorf("file size was not reset: %d", info.nbytes) + } +} + +func TestLogBacktraceAt(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + // The peculiar style of this code simplifies line counting and maintenance of the + // tracing block below. + var infoLine string + setTraceLocation := func(file string, line int, ok bool, delta int) { + if !ok { + t.Fatal("could not get file:line") + } + _, file = filepath.Split(file) + infoLine = fmt.Sprintf("%s:%d", file, line+delta) + err := logging.traceLocation.Set(infoLine) + if err != nil { + t.Fatal("error setting log_backtrace_at: ", err) + } + } + { + // Start of tracing block. These lines know about each other's relative position. + _, file, line, ok := runtime.Caller(0) + setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls. + Info("we want a stack trace here") + } + numAppearances := strings.Count(contents(infoLog), infoLine) + if numAppearances < 2 { + // Need 2 appearances, one in the log header and one in the trace: + // log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here + // ... + // github.com/glog/glog_test.go:280 (0x41ba91) + // ... + // We could be more precise but that would require knowing the details + // of the traceback format, which may not be dependable. + t.Fatal("got no trace back; log is ", contents(infoLog)) + } +} + +func BenchmarkHeader(b *testing.B) { + for i := 0; i < b.N; i++ { + logging.putBuffer(logging.header(infoLog)) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/LICENSE new file mode 100644 index 00000000..09e5be61 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2013, Gorilla web toolkit +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/README.md b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/README.md new file mode 100644 index 00000000..627e2060 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/README.md @@ -0,0 +1,49 @@ +# Gorilla WebSocket + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + +### Documentation + +* [Reference](http://godoc.org/github.com/gorilla/websocket) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn WebSockets Test +Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + +### Gorilla WebSocket compared with other packages + + + + + + + + + + + + + + + +
    gorillago.net
    Protocol supportRFC 6455RFC 6455
    Limit size of received messageYesNo
    Send pings and receive pongsYesNo
    Send close messageYesNo
    Read message using io.ReaderYesNo, see note
    Write message using io.WriteCloserYesNo, see note
    Encode, decode JSON messageYesYes
    + +Note: The go.net io.Reader and io.Writer operate across WebSocket message +boundaries. Read returns when the input buffer is full or a message boundary is +encountered, Each call to Write sends a message. The Gorilla io.Reader and +io.WriteCloser operate on a single WebSocket message. + diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/client.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/client.go new file mode 100644 index 00000000..be874443 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/client.go @@ -0,0 +1,69 @@ +// Copyright 2013 Gary Burd. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "errors" + "net" + "net/http" + "net/url" + "strings" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Set-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + acceptKey := computeAcceptKey(challengeKey) + + c = newConn(netConn, false, readBufSize, writeBufSize) + p := c.writeBuf[:0] + p = append(p, "GET "...) + p = append(p, u.RequestURI()...) + p = append(p, " HTTP/1.1\r\nHost: "...) + p = append(p, u.Host...) + p = append(p, "\r\nUpgrade: websocket\r\nConnection: upgrade\r\nSec-WebSocket-Version: 13\r\nSec-WebSocket-Key: "...) + p = append(p, challengeKey...) + p = append(p, "\r\n"...) + for k, vs := range requestHeader { + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + p = append(p, v...) + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + if _, err := netConn.Write(p); err != nil { + return nil, nil, err + } + + resp, err := http.ReadResponse(c.br, &http.Request{Method: "GET", URL: u}) + if err != nil { + return nil, nil, err + } + if resp.StatusCode != 101 || + !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || + !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != acceptKey { + return nil, resp, ErrBadHandshake + } + return c, resp, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/client_server_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/client_server_test.go new file mode 100644 index 00000000..07d55400 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/client_server_test.go @@ -0,0 +1,114 @@ +// Copyright 2013 Gary Burd. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket_test + +import ( + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "camlistore.org/third_party/github.com/gorilla/websocket" +) + +type wsHandler struct { + *testing.T +} + +func (t wsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + http.Error(w, "Method not allowed", 405) + t.Logf("bad method: %s", r.Method) + return + } + if r.Header.Get("Origin") != "http://"+r.Host { + http.Error(w, "Origin not allowed", 403) + t.Logf("bad origin: %s", r.Header.Get("Origin")) + return + } + ws, err := websocket.Upgrade(w, r, http.Header{"Set-Cookie": {"sessionID=1234"}}, 1024, 1024) + if _, ok := err.(websocket.HandshakeError); ok { + t.Logf("bad handshake: %v", err) + http.Error(w, "Not a websocket handshake", 400) + return + } else if err != nil { + t.Logf("upgrade error: %v", err) + return + } + defer ws.Close() + for { + op, r, err := ws.NextReader() + if err != nil { + if err != io.EOF { + t.Logf("NextReader: %v", err) + } + return + } + if op == websocket.PongMessage { + continue + } + w, err := ws.NextWriter(op) + if err != nil { + t.Logf("NextWriter: %v", err) + return + } + if _, err = io.Copy(w, r); err != nil { + t.Logf("Copy: %v", err) + return + } + if err := w.Close(); err != nil { + t.Logf("Close: %v", err) + return + } + } +} + +func TestClientServer(t *testing.T) { + s := httptest.NewServer(wsHandler{t}) + defer s.Close() + u, _ := url.Parse(s.URL) + c, err := net.Dial("tcp", u.Host) + if err != nil { + t.Fatalf("Dial: %v", err) + } + ws, resp, err := websocket.NewClient(c, u, http.Header{"Origin": {s.URL}}, 1024, 1024) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + defer ws.Close() + + var sessionID string + for _, c := range resp.Cookies() { + if c.Name == "sessionID" { + sessionID = c.Value + } + } + if sessionID != "1234" { + t.Error("Set-Cookie not received from the server.") + } + + w, _ := ws.NextWriter(websocket.TextMessage) + io.WriteString(w, "HELLO") + w.Close() + ws.SetReadDeadline(time.Now().Add(1 * time.Second)) + op, r, err := ws.NextReader() + if err != nil { + t.Fatalf("NextReader: %v", err) + } + if op != websocket.TextMessage { + t.Fatalf("op=%d, want %d", op, websocket.TextMessage) + } + b, err := ioutil.ReadAll(r) + if err != nil { + t.Fatalf("ReadAll: %v", err) + } + if string(b) != "HELLO" { + t.Fatalf("message=%s, want %s", b, "HELLO") + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/conn.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/conn.go new file mode 100644 index 00000000..e37970d1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/conn.go @@ -0,0 +1,759 @@ +// Copyright 2013 Gary Burd. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "time" +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +var ( + continuationFrame = 0 + noFrame = -1 +) + +var ( + ErrCloseSent = errors.New("websocket: close sent") + ErrReadLimit = errors.New("websocket: read limit exceeded") +) + +var ( + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteTimeout = errors.New("websocket: write timeout") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +const ( + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + finalBit = 1 << 7 + maskBit = 1 << 7 + writeWait = time.Second +) + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos += 1 + } + return pos & 3 +} + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 32)} +} + +// Conn represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + + // Write fields + mu chan bool // used as mutex to protect write to conn and closeSent + closeSent bool // true if close message was sent + + // Message writer fields. + writeErr error + writeBuf []byte // frame is constructed in this buffer. + writePos int // end of data in writeBuf. + writeFrameType int // type of the current frame. + writeSeq int // incremented to invalidate message writers. + writeDeadline time.Time + + // Read fields + readErr error + br *bufio.Reader + readRemaining int64 // bytes remaining in current frame. + readFinal bool // true the current message has more frames. + readSeq int // incremented to invalidate message readers. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error +} + +func newConn(conn net.Conn, isServer bool, readBufSize, writeBufSize int) *Conn { + mu := make(chan bool, 1) + mu <- true + + c := &Conn{ + isServer: isServer, + br: bufio.NewReaderSize(conn, readBufSize), + conn: conn, + mu: mu, + readFinal: true, + writeBuf: make([]byte, writeBufSize+maxFrameHeaderSize), + writeFrameType: noFrame, + writePos: maxFrameHeaderSize, + } + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// Close closes the underlying network connection without sending or waiting for a close frame. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error { + <-c.mu + defer func() { c.mu <- true }() + + if c.closeSent { + return ErrCloseSent + } else if frameType == CloseMessage { + c.closeSent = true + } + + c.conn.SetWriteDeadline(deadline) + for _, buf := range bufs { + if len(buf) > 0 { + n, err := c.conn.Write(buf) + if n != len(buf) { + // Close on partial write. + c.conn.Close() + } + if err != nil { + return err + } + } + } + return nil +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := time.Hour * 1000 + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- true }() + + if c.closeSent { + return ErrCloseSent + } else if messageType == CloseMessage { + c.closeSent = true + } + + c.conn.SetWriteDeadline(deadline) + n, err := c.conn.Write(buf) + if n != 0 && n != len(buf) { + c.conn.Close() + } + return err +} + +// NextWriter returns a writer for the next message to send. The writer's +// Close method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +// +// The NextWriter method and the writers returned from the method cannot be +// accessed by more than one goroutine at a time. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + if c.writeErr != nil { + return nil, c.writeErr + } + + if c.writeFrameType != noFrame { + if err := c.flushFrame(true, nil); err != nil { + return nil, err + } + } + + if !isControl(messageType) && !isData(messageType) { + return nil, errBadWriteOpCode + } + + c.writeFrameType = messageType + return messageWriter{c, c.writeSeq}, nil +} + +func (c *Conn) flushFrame(final bool, extra []byte) error { + length := c.writePos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(c.writeFrameType) && + (!final || length > maxControlFramePayloadSize) { + c.writeSeq += 1 + c.writeFrameType = noFrame + c.writePos = maxFrameHeaderSize + return errInvalidControlFrame + } + + b0 := byte(c.writeFrameType) + if final { + b0 |= finalBit + } + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:c.writePos]) + if len(extra) > 0 { + c.writeErr = errors.New("websocket: internal error, extra used in client mode") + return c.writeErr + } + } + + // Write the buffers to the connection. + c.writeErr = c.write(c.writeFrameType, c.writeDeadline, c.writeBuf[framePos:c.writePos], extra) + + // Setup for next frame. + c.writePos = maxFrameHeaderSize + c.writeFrameType = continuationFrame + if final { + c.writeSeq += 1 + c.writeFrameType = noFrame + } + return c.writeErr +} + +type messageWriter struct { + c *Conn + seq int +} + +func (w messageWriter) err() error { + c := w.c + if c.writeSeq != w.seq { + return errWriteClosed + } + if c.writeErr != nil { + return c.writeErr + } + return nil +} + +func (w messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.c.writePos + if n <= 0 { + if err := w.c.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.c.writePos + } + if n > max { + n = max + } + return n, nil +} + +func (w messageWriter) write(final bool, p []byte) (int, error) { + if err := w.err(); err != nil { + return 0, err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.c.flushFrame(final, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.c.writePos:], p[:n]) + w.c.writePos += n + p = p[n:] + } + return nn, nil +} + +func (w messageWriter) Write(p []byte) (int, error) { + return w.write(false, p) +} + +func (w messageWriter) WriteString(p string) (int, error) { + if err := w.err(); err != nil { + return 0, err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.c.writePos:], p[:n]) + w.c.writePos += n + p = p[n:] + } + return nn, nil +} + +func (w messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if err := w.err(); err != nil { + return 0, err + } + for { + if w.c.writePos == len(w.c.writeBuf) { + err = w.c.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.c.writePos:]) + w.c.writePos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w messageWriter) Close() error { + if err := w.err(); err != nil { + return err + } + return w.c.flushFrame(true, nil) +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + wr, err := c.NextWriter(messageType) + if err != nil { + return err + } + w := wr.(messageWriter) + if _, err := w.write(true, data); err != nil { + return err + } + if c.writeSeq == w.seq { + if err := c.flushFrame(true, nil); err != nil { + return err + } + } + return nil +} + +// SetWriteDeadline sets the deadline for future calls to NextWriter and the +// io.WriteCloser returned from NextWriter. If the deadline is reached, the +// call will fail with a timeout instead of blocking. A zero value for t means +// Write will not time out. Even if Write times out, it may return n > 0, +// indicating that some of the data was successfully written. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +func (c *Conn) advanceFrame() (int, error) { + + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + + var b [8]byte + if err := c.read(b[:2]); err != nil { + return noFrame, err + } + + final := b[0]&finalBit != 0 + frameType := int(b[0] & 0xf) + reserved := int((b[0] >> 4) & 0x7) + mask := b[1]&maskBit != 0 + c.readRemaining = int64(b[1] & 0x7f) + + if reserved != 0 { + return noFrame, c.handleProtocolError("unexpected reserved bits " + strconv.Itoa(reserved)) + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + return noFrame, c.handleProtocolError("control frame length > 125") + } + if !final { + return noFrame, c.handleProtocolError("control frame not final") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + return noFrame, c.handleProtocolError("message start before final message frame") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + return noFrame, c.handleProtocolError("continuation after final message frame") + } + c.readFinal = final + default: + return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) + } + + // 3. Read and parse frame length. + + switch c.readRemaining { + case 126: + if err := c.read(b[:2]); err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint16(b[:2])) + case 127: + if err := c.read(b[:8]); err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint64(b[:8])) + } + + // 4. Handle frame masking. + + if mask != c.isServer { + return noFrame, c.handleProtocolError("incorrect mask flag") + } + + if mask { + c.readMaskPos = 0 + if err := c.read(c.readMaskKey[:]); err != nil { + return noFrame, err + } + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + payload := make([]byte, c.readRemaining) + c.readRemaining = 0 + if err := c.read(payload); err != nil { + return noFrame, err + } + maskBytes(c.readMaskKey, 0, payload) + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + c.WriteControl(CloseMessage, []byte{}, time.Now().Add(writeWait)) + if len(payload) < 2 { + return noFrame, io.EOF + } + closeCode := binary.BigEndian.Uint16(payload) + switch closeCode { + case CloseNormalClosure, CloseGoingAway: + return noFrame, io.EOF + default: + return noFrame, errors.New("websocket: close " + + strconv.Itoa(int(closeCode)) + " " + + string(payload[2:])) + } + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +func (c *Conn) read(buf []byte) error { + var err error + for len(buf) > 0 && err == nil { + var nn int + nn, err = c.br.Read(buf) + buf = buf[nn:] + } + if err == io.EOF { + if len(buf) == 0 { + err = nil + } else { + err = io.ErrUnexpectedEOF + } + } + return err +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// The NextReader method and the readers returned from the method cannot be +// accessed by more than one goroutine at a time. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + + c.readSeq += 1 + c.readLength = 0 + + for c.readErr == nil { + var frameType int + frameType, c.readErr = c.advanceFrame() + if frameType == TextMessage || frameType == BinaryMessage { + return frameType, messageReader{c, c.readSeq}, nil + } + } + return noFrame, nil, c.readErr +} + +type messageReader struct { + c *Conn + seq int +} + +func (r messageReader) Read(b []byte) (n int, err error) { + + if r.seq != r.c.readSeq { + return 0, io.EOF + } + + for r.c.readErr == nil { + + if r.c.readRemaining > 0 { + if int64(len(b)) > r.c.readRemaining { + b = b[:r.c.readRemaining] + } + r.c.readErr = r.c.read(b) + r.c.readMaskPos = maskBytes(r.c.readMaskKey, r.c.readMaskPos, b) + r.c.readRemaining -= int64(len(b)) + return len(b), r.c.readErr + } + + if r.c.readFinal { + r.c.readSeq += 1 + return 0, io.EOF + } + + var frameType int + frameType, r.c.readErr = r.c.advanceFrame() + + if frameType == TextMessage || frameType == BinaryMessage { + r.c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + return 0, r.c.readErr +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the deadline for future calls to NextReader and the +// io.Reader returned from NextReader. If the deadline is reached, the call +// will fail with a timeout instead of blocking. A zero value for t means that +// the methods will not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size for a message read from the peer. If a +// message exceeds the limit, the connection sends a close frame to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The default ping handler sends a pong to the peer. +func (c *Conn) SetPingHandler(h func(string) error) { + if h == nil { + h = func(message string) error { + c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + return nil + } + } + c.handlePing = h +} + +// SetPongHandler sets then handler for pong messages received from the peer. +// The default pong handler does nothing. +func (c *Conn) SetPongHandler(h func(string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// SetPongHandler sets the handler for +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +func FormatCloseMessage(closeCode int, text string) []byte { + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/conn_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/conn_test.go new file mode 100644 index 00000000..3a06d343 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/conn_test.go @@ -0,0 +1,140 @@ +// Copyright 2013 Gary Burd. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net" + "testing" + "testing/iotest" + "time" +) + +type fakeNetConn struct { + io.Reader + io.Writer +} + +func (c fakeNetConn) Close() error { return nil } +func (c fakeNetConn) LocalAddr() net.Addr { return nil } +func (c fakeNetConn) RemoteAddr() net.Addr { return nil } +func (c fakeNetConn) SetDeadline(t time.Time) error { return nil } +func (c fakeNetConn) SetReadDeadline(t time.Time) error { return nil } +func (c fakeNetConn) SetWriteDeadline(t time.Time) error { return nil } + +func TestFraming(t *testing.T) { + frameSizes := []int{0, 1, 2, 124, 125, 126, 127, 128, 129, 65534, 65535, 65536, 65537} + var readChunkers = []struct { + name string + f func(io.Reader) io.Reader + }{ + {"half", iotest.HalfReader}, + {"one", iotest.OneByteReader}, + {"asis", func(r io.Reader) io.Reader { return r }}, + } + + writeBuf := make([]byte, 65537) + for i := range writeBuf { + writeBuf[i] = byte(i) + } + + for _, isServer := range []bool{true, false} { + for _, chunker := range readChunkers { + + var connBuf bytes.Buffer + wc := newConn(fakeNetConn{Reader: nil, Writer: &connBuf}, isServer, 1024, 1024) + rc := newConn(fakeNetConn{Reader: chunker.f(&connBuf), Writer: nil}, !isServer, 1024, 1024) + + for _, n := range frameSizes { + for _, iocopy := range []bool{true, false} { + name := fmt.Sprintf("s:%v, r:%s, n:%d c:%v", isServer, chunker.name, n, iocopy) + + w, err := wc.NextWriter(TextMessage) + if err != nil { + t.Errorf("%s: wc.NextWriter() returned %v", name, err) + continue + } + var nn int + if iocopy { + var n64 int64 + n64, err = io.Copy(w, bytes.NewReader(writeBuf[:n])) + nn = int(n64) + } else { + nn, err = w.Write(writeBuf[:n]) + } + if err != nil || nn != n { + t.Errorf("%s: w.Write(writeBuf[:n]) returned %d, %v", name, nn, err) + continue + } + err = w.Close() + if err != nil { + t.Errorf("%s: w.Close() returned %v", name, err) + continue + } + + opCode, r, err := rc.NextReader() + if err != nil || opCode != TextMessage { + t.Errorf("%s: NextReader() returned %d, r, %v", name, opCode, err) + continue + } + rbuf, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("%s: ReadFull() returned rbuf, %v", name, err) + continue + } + + if len(rbuf) != n { + t.Errorf("%s: len(rbuf) is %d, want %d", name, len(rbuf), n) + continue + } + + for i, b := range rbuf { + if byte(i) != b { + t.Errorf("%s: bad byte at offset %d", name, i) + break + } + } + } + } + } + } +} + +func TestReadLimit(t *testing.T) { + + const readLimit = 512 + message := make([]byte, readLimit+1) + + var b1, b2 bytes.Buffer + wc := newConn(fakeNetConn{Reader: nil, Writer: &b1}, false, 1024, readLimit-2) + rc := newConn(fakeNetConn{Reader: &b1, Writer: &b2}, true, 1024, 1024) + rc.SetReadLimit(readLimit) + + // Send message at the limit with interleaved pong. + w, _ := wc.NextWriter(BinaryMessage) + w.Write(message[:readLimit-1]) + wc.WriteControl(PongMessage, []byte("this is a pong"), time.Now().Add(10*time.Second)) + w.Write(message[:1]) + w.Close() + + // Send message larger than the limit. + wc.WriteMessage(BinaryMessage, message[:readLimit+1]) + + op, _, err := rc.NextReader() + if op != BinaryMessage || err != nil { + t.Fatalf("1: NextReader() returned %d, %v", op, err) + } + op, r, err := rc.NextReader() + if op != BinaryMessage || err != nil { + t.Fatalf("2: NextReader() returned %d, %v", op, err) + } + _, err = io.Copy(ioutil.Discard, r) + if err != ErrReadLimit { + t.Fatalf("io.Copy() returned %v", err) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/doc.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/doc.go new file mode 100644 index 00000000..88aaef69 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/doc.go @@ -0,0 +1,103 @@ +// Copyright 2013 Gary Burd. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrade function from an HTTP request handler to get a pointer to a +// Conn: +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := websocket.Upgrade(w, r.Header, nil, 1024, 1024) +// if _, ok := err.(websocket.HandshakeError); ok { +// http.Error(w, "Not a websocket handshake", 400) +// return +// } else if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection WriteMessage and ReadMessages methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// return +// } +// if _, err := conn.WriteMessaage(messageType, p); err != nil { +// return err +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// snippet shows how to echo messages using the NextWriter and NextReader +// methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received ping and pong messages by invoking a callback +// function set with SetPingHandler and SetPongHandler methods. These callback +// functions can be invoked from the ReadMessage method, the NextReader method +// or from a call to the data message reader returned from NextReader. +// +// Connections handle received close messages by returning an error from the +// ReadMessage method, the NextReader method or from a call to the data message +// reader returned from NextReader. +// +// Concurrency +// +// A Conn supports a single concurrent caller to the write methods (NextWriter, +// SetWriteDeadline, WriteMessage) and a single concurrent caller to the read +// methods (NextReader, SetReadDeadline, ReadMessage). The Close and +// WriteControl methods can be called concurrently with all other methods. +// +package websocket diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/json.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/json.go new file mode 100644 index 00000000..01723b27 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/json.go @@ -0,0 +1,49 @@ +// Copyright 2013 Gary Burd. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" +) + +// DEPRECATED: use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v to the connection. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// DEPRECATED: use c.WriteJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Marshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + return json.NewDecoder(r).Decode(v) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/json_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/json_test.go new file mode 100644 index 00000000..f1b7e51d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/json_test.go @@ -0,0 +1,63 @@ +// Copyright 2013 Gary Burd. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "reflect" + "testing" +) + +func TestJSON(t *testing.T) { + var buf bytes.Buffer + c := fakeNetConn{&buf, &buf} + wc := newConn(c, true, 1024, 1024) + rc := newConn(c, false, 1024, 1024) + + var actual, expect struct { + A int + B string + } + expect.A = 1 + expect.B = "hello" + + if err := wc.WriteJSON(&expect); err != nil { + t.Fatal("write", err) + } + + if err := rc.ReadJSON(&actual); err != nil { + t.Fatal("read", err) + } + + if !reflect.DeepEqual(&actual, &expect) { + t.Fatal("equal", actual, expect) + } +} + +func TestDeprecatedJSON(t *testing.T) { + var buf bytes.Buffer + c := fakeNetConn{&buf, &buf} + wc := newConn(c, true, 1024, 1024) + rc := newConn(c, false, 1024, 1024) + + var actual, expect struct { + A int + B string + } + expect.A = 1 + expect.B = "hello" + + if err := WriteJSON(wc, &expect); err != nil { + t.Fatal("write", err) + } + + if err := ReadJSON(rc, &actual); err != nil { + t.Fatal("read", err) + } + + if !reflect.DeepEqual(&actual, &expect) { + t.Fatal("equal", actual, expect) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/server.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/server.go new file mode 100644 index 00000000..ebc43dd1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/server.go @@ -0,0 +1,128 @@ +// Copyright 2013 Gary Burd. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "net" + "net/http" + "strings" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The application is responsible for checking the request origin before +// calling Upgrade. An example implementation of the same origin policy is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", 403) +// return +// } +// +// If the endpoint supports WebSocket subprotocols, then the application is +// responsible for selecting a subprotocol that is acceptable to the client and +// echoing that value back to the client. Use the Subprotocols function to get +// the list of protocols specified by the client. Use the +// Sec-Websocket-Protocol response header to echo the selected protocol back to +// the client. +// +// Appilcations can set cookies by adding a Set-Cookie header to the response +// header. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + + if values := r.Header["Sec-Websocket-Version"]; len(values) == 0 || values[0] != "13" { + return nil, HandshakeError{"websocket: version != 13"} + } + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return nil, HandshakeError{"websocket: connection header != upgrade"} + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return nil, HandshakeError{"websocket: upgrade != websocket"} + } + + var challengeKey string + values := r.Header["Sec-Websocket-Key"] + if len(values) == 0 || values[0] == "" { + return nil, HandshakeError{"websocket: key missing or blank"} + } + challengeKey = values[0] + + var ( + netConn net.Conn + br *bufio.Reader + err error + ) + + h, ok := w.(http.Hijacker) + if !ok { + return nil, errors.New("websocket: response does not implement http.Hijacker") + } + var rw *bufio.ReadWriter + netConn, rw, err = h.Hijack() + br = rw.Reader + + if br.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + c := newConn(netConn, true, readBufSize, writeBufSize) + + p := c.writeBuf[:0] + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + for k, vs := range responseHeader { + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + + return c, nil +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/server_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/server_test.go new file mode 100644 index 00000000..709ee04d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/server_test.go @@ -0,0 +1,33 @@ +// Copyright 2013 Gary Burd. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "net/http" + "reflect" + "testing" +) + +var subprotocolTests = []struct { + h string + protocols []string +}{ + {"", nil}, + {"foo", []string{"foo"}}, + {"foo,bar", []string{"foo", "bar"}}, + {"foo, bar", []string{"foo", "bar"}}, + {" foo, bar", []string{"foo", "bar"}}, + {" foo, bar ", []string{"foo", "bar"}}, +} + +func TestSubprotocols(t *testing.T) { + for _, st := range subprotocolTests { + r := http.Request{Header: http.Header{"Sec-Websocket-Protocol": {st.h}}} + protocols := Subprotocols(&r) + if !reflect.DeepEqual(st.protocols, protocols) { + t.Errorf("SubProtocols(%q) returned %#v, want %#v", st.h, protocols, st.protocols) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/util.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/util.go new file mode 100644 index 00000000..a9cfbf97 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/gorilla/websocket/util.go @@ -0,0 +1,44 @@ +// Copyright 2013 Gary Burd. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" +) + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains token. +func tokenListContainsValue(header http.Header, name string, value string) bool { + for _, v := range header[name] { + for _, s := range strings.Split(v, ",") { + if strings.EqualFold(value, strings.TrimSpace(s)) { + return true + } + } + } + return false +} + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/.gitignore b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/.gitignore new file mode 100644 index 00000000..e4e5f6c8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/.gitignore @@ -0,0 +1 @@ +*~ \ No newline at end of file diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/LICENSE b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/LICENSE new file mode 100644 index 00000000..37ec93a1 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/README.md b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/README.md new file mode 100644 index 00000000..6970b71e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/README.md @@ -0,0 +1,40 @@ +gotaglib +======== + +Apache-licensed audio tag decoding library written in pure +go. Designed to mirror the structure of +[taglib](http://taglib.github.io/) without being a direct port. + +## tl;dr +``` +go get github.com/hjfreyer/gotaglib +``` +``` +import "github.com/hjfreyer/gotaglib" +... +func main() { + f, err := os.Open("song.mp3") + tag, err := gotaglib.Decode(f) + fmt.Print(tag.Title()) +} +``` +## Features +Currently has basic read support for id3v2.3 and id3v2.4. No writing +support yet. + +## Goals +* Pure go. +* Not necessarily feature complete, but future compatible. +* Good interfaces. +* Handle errors properly (don't panic). + +## Why didn't you just use… ? +There are many other Go projects which do tag parsing, but all the +ones I found violate at least one of the goals above. + +## Why don't you support… ? +Probably no reason other than it hasn't happened yet. If you need a +particular format, or an additional feature, or you've found a file +which gotaglib *should* parse but doesn't, please create an +[issue](https://github.com/hjfreyer/gotaglib/issues), or better yet, +send a patch. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/id3v23.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/id3v23.go new file mode 100644 index 00000000..39cbf7a5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/id3v23.go @@ -0,0 +1,308 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package id3 + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +func id3v23Err(format string, args ...interface{}) error { + return &ErrFormat{ + Format: "ID3 version 2.3", + Err: fmt.Errorf(format, args...), + } +} + +type Id3v23Tag struct { + Header Id3v23Header + ExtendedHeader Id3v23ExtendedHeader + Frames map[string][]*Id3v23Frame +} + +func getSimpleId3v23TextFrame(frames []*Id3v23Frame) string { + if len(frames) == 0 { + return "" + } + fields, err := GetId3v23TextIdentificationFrame(frames[0]) + if err != nil { + return "" + } + return strings.Join(fields, " ") +} + +func (t *Id3v23Tag) Title() string { + return getSimpleId3v23TextFrame(t.Frames["TIT2"]) +} + +func (t *Id3v23Tag) Artist() string { + return getSimpleId3v23TextFrame(t.Frames["TPE1"]) +} + +func (t *Id3v23Tag) Album() string { + return getSimpleId3v23TextFrame(t.Frames["TALB"]) +} + +func (t *Id3v23Tag) Comment() string { + return "" +} + +func (t *Id3v23Tag) Genre() string { + return getSimpleId3v23TextFrame(t.Frames["TCON"]) +} + +func (t *Id3v23Tag) Year() time.Time { + yearStr := getSimpleId3v23TextFrame(t.Frames["TYER"]) + if len(yearStr) != 4 { + return time.Time{} + } + + yearInt, err := strconv.Atoi(yearStr) + if err != nil { + return time.Time{} + } + + return time.Date(yearInt, time.January, 1, 0, 0, 0, 0, time.UTC) +} + +func (t *Id3v23Tag) Track() uint32 { + track, err := parseLeadingInt(getSimpleId3v23TextFrame(t.Frames["TRCK"])) + if err != nil { + return 0 + } + return uint32(track) +} + +func (t *Id3v23Tag) Disc() uint32 { + disc, err := parseLeadingInt(getSimpleId3v23TextFrame(t.Frames["TPOS"])) + if err != nil { + return 0 + } + return uint32(disc) +} + +func (t *Id3v23Tag) CustomFrames() map[string]string { + info := make(map[string]string) + for _, frame := range t.Frames["TXXX"] { + // See http://id3.org/id3v2.3.0#User_defined_text_information_frame. + // TXXX frames contain NUL-separated descriptions and values. + parts, err := GetId3v23TextIdentificationFrame(frame) + if err == nil && len(parts) == 2 { + info[parts[0]] = parts[1] + } + } + return info +} + +func (t *Id3v23Tag) TagSize() uint32 { + return 10 + t.Header.Size +} + +type Id3v23Header struct { + MinorVersion byte + Flags Id3v23HeaderFlags + Size uint32 +} + +type Id3v23HeaderFlags struct { + Unsynchronization bool + ExtendedHeader bool + ExperimentalIndicator bool +} + +type Id3v23ExtendedHeader struct { + Size uint32 + Flags Id3v23ExtendedHeaderFlags + PaddingSize uint32 +} + +type Id3v23ExtendedHeaderFlags struct { + CrcDataPresent bool +} + +type Id3v23Frame struct { + Header Id3v23FrameHeader + Content []byte +} + +type Id3v23FrameHeader struct { + Id string + Size uint32 + Flags Id3v23FrameHeaderFlags +} + +type Id3v23FrameHeaderFlags struct { + TagAlterPreservation bool + FileAlterPreservation bool + ReadOnly bool + + Compression bool + Encryption bool + GroupingIdentity bool +} + +func Decode23(r io.ReaderAt) (*Id3v23Tag, error) { + headerBytes := make([]byte, 10) + if _, err := r.ReadAt(headerBytes, 0); err != nil { + return nil, err + } + + header, err := parseId3v23Header(headerBytes) + if err != nil { + return nil, err + } + + br := bufio.NewReader(io.NewSectionReader(r, 10, int64(header.Size))) + + var extendedHeader Id3v23ExtendedHeader + if header.Flags.ExtendedHeader { + var err error + if extendedHeader, err = parseId3v23ExtendedHeader(br); err != nil { + return nil, err + } + } + + result := &Id3v23Tag{ + Header: header, + ExtendedHeader: extendedHeader, + Frames: make(map[string][]*Id3v23Frame), + } + + var totalSize uint32 + totalSize += extendedHeader.Size + + for totalSize < header.Size { + hasFrame, err := hasId3v23Frame(br) + if err != nil { + return nil, err + } + + if !hasFrame { + break + } + + frame, err := parseId3v23Frame(br) + if err != nil { + return nil, err + } + + // 10 bytes for the frame header, and the body. + totalSize += 10 + frame.Header.Size + + result.Frames[frame.Header.Id] = append(result.Frames[frame.Header.Id], frame) + } + return result, nil +} + +func parseId3v23Header(headerBytes []byte) (result Id3v23Header, err error) { + if !bytes.Equal(headerBytes[0:4], []byte{'I', 'D', '3', 3}) { + err = id3v23Err("invalid magic numbers") + return + } + + result.MinorVersion = headerBytes[4] + + flags := headerBytes[5] + + result.Flags.Unsynchronization = (flags & (1 << 7)) != 0 + result.Flags.ExtendedHeader = (flags & (1 << 6)) != 0 + result.Flags.ExperimentalIndicator = (flags & (1 << 5)) != 0 + + result.Size = uint32(parseBase128Int(headerBytes[6:10])) + return +} + +func parseId3v23ExtendedHeader(br *bufio.Reader) (result Id3v23ExtendedHeader, err error) { + sizeBytes := make([]byte, 4) + if _, err = io.ReadFull(br, sizeBytes); err != nil { + return + } + + result.Size = uint32(parseBase128Int(sizeBytes)) + + headerBytes := make([]byte, result.Size) + if _, err = io.ReadFull(br, headerBytes); err != nil { + return + } + + // Store the flags bytes and the size of the padding. + flags, paddingSize, headerBytes := headerBytes[0:2], headerBytes[2:6], headerBytes[6:] + + result.Flags.CrcDataPresent = (flags[0] & (1 << 7)) != 0 + + result.PaddingSize = uint32(parseBase128Int(paddingSize)) + // Don't do anything with the rest of the extended header for now. + + return +} + +func hasId3v23Frame(br *bufio.Reader) (bool, error) { + data, err := br.Peek(4) + if err != nil { + return false, err + } + + for _, c := range data { + if (c < 'A' || 'Z' < c) && (c < '0' || '9' < c) { + return false, nil + } + } + return true, nil +} + +func parseId3v23Frame(br *bufio.Reader) (*Id3v23Frame, error) { + header, err := parseId3v23FrameHeader(br) + if err != nil { + return nil, err + } + + content := make([]byte, header.Size) + if _, err := io.ReadFull(br, content); err != nil { + return nil, err + } + + return &Id3v23Frame{ + Header: header, + Content: content, + }, nil +} + +func parseId3v23FrameHeader(br *bufio.Reader) (result Id3v23FrameHeader, err error) { + headerBytes := make([]byte, 10) + if _, err = io.ReadFull(br, headerBytes); err != nil { + return + } + + idBytes, sizeBytes, flags := headerBytes[0:4], headerBytes[4:8], headerBytes[8:10] + result.Id = string(idBytes) + + // Read the size as 4 base128 bytes. + result.Size = uint32(parseBase128Int(sizeBytes)) + + result.Flags.TagAlterPreservation = (flags[0] & (1 << 7)) != 0 + result.Flags.FileAlterPreservation = (flags[0] & (1 << 6)) != 0 + result.Flags.ReadOnly = (flags[0] & (1 << 5)) != 0 + + result.Flags.Compression = (flags[1] & (1 << 7)) != 0 + result.Flags.Encryption = (flags[1] & (1 << 6)) != 0 + result.Flags.GroupingIdentity = (flags[1] & (1 << 5)) != 0 + + return result, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/id3v23frames.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/id3v23frames.go new file mode 100644 index 00000000..5e6f8b1e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/id3v23frames.go @@ -0,0 +1,19 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package id3 + +func GetId3v23TextIdentificationFrame(frame *Id3v23Frame) ([]string, error) { + return getTextIdentificationFrame(frame.Content) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/id3v24.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/id3v24.go new file mode 100644 index 00000000..2bd0334d --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/id3v24.go @@ -0,0 +1,317 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package id3 + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +type Id3v24Tag struct { + Header Id3v24Header + ExtendedHeader Id3v24ExtendedHeader + Frames map[string][]*Id3v24Frame +} + +func id3v24Err(format string, args ...interface{}) error { + return &ErrFormat{ + Format: "ID3 version 2.4", + Err: fmt.Errorf(format, args...), + } +} + +func getSimpleId3v24TextFrame(frames []*Id3v24Frame) string { + if len(frames) == 0 { + return "" + } + fields, err := GetId3v24TextIdentificationFrame(frames[0]) + if err != nil { + return "" + } + return strings.Join(fields, " ") +} + +func (t *Id3v24Tag) Title() string { + return getSimpleId3v24TextFrame(t.Frames["TIT2"]) +} + +func (t *Id3v24Tag) Artist() string { + return getSimpleId3v24TextFrame(t.Frames["TPE1"]) +} + +func (t *Id3v24Tag) Album() string { + return getSimpleId3v24TextFrame(t.Frames["TALB"]) +} + +func (t *Id3v24Tag) Comment() string { + return "" +} + +func (t *Id3v24Tag) Genre() string { + return getSimpleId3v24TextFrame(t.Frames["TCON"]) +} + +func (t *Id3v24Tag) Year() time.Time { + yearStr := getSimpleId3v24TextFrame(t.Frames["TDRC"]) + if len(yearStr) < 4 { + return time.Time{} + } + + yearInt, err := strconv.Atoi(yearStr[0:4]) + if err != nil { + return time.Time{} + } + + return time.Date(yearInt, time.January, 1, 0, 0, 0, 0, time.UTC) +} + +func (t *Id3v24Tag) Track() uint32 { + track, err := parseLeadingInt(getSimpleId3v24TextFrame(t.Frames["TRCK"])) + if err != nil { + return 0 + } + return uint32(track) +} + +func (t *Id3v24Tag) Disc() uint32 { + disc, err := parseLeadingInt(getSimpleId3v24TextFrame(t.Frames["TPOS"])) + if err != nil { + return 0 + } + return uint32(disc) +} + +func (t *Id3v24Tag) CustomFrames() map[string]string { + info := make(map[string]string) + for _, frame := range t.Frames["TXXX"] { + // See "4.2.6. User defined text information frame" at + // http://id3.org/id3v2.4.0-frames. TXXX frames contain + // NUL-separated descriptions and values. + parts, err := GetId3v24TextIdentificationFrame(frame) + if err == nil && len(parts) == 2 { + info[parts[0]] = parts[1] + } + } + return info +} + +func (t *Id3v24Tag) TagSize() uint32 { + return 10 + t.Header.Size +} + +type Id3v24Header struct { + MinorVersion byte + Flags Id3v24HeaderFlags + Size uint32 +} + +type Id3v24HeaderFlags struct { + Unsynchronization bool + ExtendedHeader bool + ExperimentalIndicator bool + FooterPresent bool +} + +type Id3v24ExtendedHeader struct { + Size uint32 + Flags Id3v24ExtendedHeaderFlags +} + +type Id3v24ExtendedHeaderFlags struct { + Update bool + CrcDataPresent bool + TagRestrictions bool +} + +type Id3v24Frame struct { + Header Id3v24FrameHeader + Content []byte +} + +type Id3v24FrameHeader struct { + Id string + Size uint32 + Flags Id3v24FrameHeaderFlags +} + +type Id3v24FrameHeaderFlags struct { + TagAlterPreservation bool + FileAlterPreservation bool + ReadOnly bool + + GroupingIdentity bool + Compression bool + Encryption bool + Unsynchronization bool + DataLengthIndicator bool +} + +func Decode24(r io.ReaderAt) (*Id3v24Tag, error) { + headerBytes := make([]byte, 10) + if _, err := r.ReadAt(headerBytes, 0); err != nil { + return nil, err + } + + header, err := parseId3v24Header(headerBytes) + if err != nil { + return nil, err + } + + br := bufio.NewReader(io.NewSectionReader(r, 10, int64(header.Size))) + + var extendedHeader Id3v24ExtendedHeader + if header.Flags.ExtendedHeader { + var err error + if extendedHeader, err = parseId3v24ExtendedHeader(br); err != nil { + return nil, err + } + } + + result := &Id3v24Tag{ + Header: header, + ExtendedHeader: extendedHeader, + Frames: make(map[string][]*Id3v24Frame), + } + + var totalSize uint32 + totalSize += extendedHeader.Size + + for totalSize < header.Size { + hasFrame, err := hasId3v24Frame(br) + if err != nil { + return nil, err + } + + if !hasFrame { + break + } + + frame, err := parseId3v24Frame(br) + if err != nil { + return nil, err + } + + // 10 bytes for the frame header, and the body. + totalSize += 10 + frame.Header.Size + + result.Frames[frame.Header.Id] = append(result.Frames[frame.Header.Id], frame) + } + return result, nil +} + +func parseId3v24Header(headerBytes []byte) (result Id3v24Header, err error) { + if !bytes.Equal(headerBytes[0:4], []byte{'I', 'D', '3', 4}) { + err = id3v24Err("invalid magic numbers") + return + } + + result.MinorVersion = headerBytes[4] + + flags := headerBytes[5] + + result.Flags.Unsynchronization = (flags & (1 << 7)) != 0 + result.Flags.ExtendedHeader = (flags & (1 << 6)) != 0 + result.Flags.ExperimentalIndicator = (flags & (1 << 5)) != 0 + result.Flags.FooterPresent = (flags & (1 << 4)) != 0 + + result.Size = uint32(parseBase128Int(headerBytes[6:10])) + return +} + +func parseId3v24ExtendedHeader(br *bufio.Reader) (result Id3v24ExtendedHeader, err error) { + sizeBytes, err := br.Peek(4) + if err != nil { + return + } + + result.Size = uint32(parseBase128Int(sizeBytes)) + + headerBytes := make([]byte, result.Size) + if _, err = io.ReadFull(br, headerBytes); err != nil { + return + } + + // Discard size and number of flags bytes, and store flags. + _, _, flags, headerBytes := headerBytes[:4], headerBytes[4], headerBytes[5], headerBytes[5:] + + result.Flags.Update = (flags & (1 << 6)) != 0 + result.Flags.CrcDataPresent = (flags & (1 << 5)) != 0 + result.Flags.TagRestrictions = (flags & (1 << 4)) != 0 + + // Don't do anything with the rest of the extended header for now. + + return +} + +func hasId3v24Frame(br *bufio.Reader) (bool, error) { + data, err := br.Peek(4) + if err != nil { + return false, err + } + + for _, c := range data { + if (c < 'A' || 'Z' < c) && (c < '0' || '9' < c) { + return false, nil + } + } + return true, nil +} + +func parseId3v24Frame(br *bufio.Reader) (*Id3v24Frame, error) { + header, err := parseId3v24FrameHeader(br) + if err != nil { + return nil, err + } + + content := make([]byte, header.Size) + if _, err := io.ReadFull(br, content); err != nil { + return nil, err + } + + return &Id3v24Frame{ + Header: header, + Content: content, + }, nil +} + +func parseId3v24FrameHeader(br *bufio.Reader) (result Id3v24FrameHeader, err error) { + headerBytes := make([]byte, 10) + if _, err = io.ReadFull(br, headerBytes); err != nil { + return + } + + idBytes, sizeBytes, flags := headerBytes[0:4], headerBytes[4:8], headerBytes[8:10] + result.Id = string(idBytes) + + // Read the size as 4 base128 bytes. + result.Size = uint32(parseBase128Int(sizeBytes)) + + result.Flags.TagAlterPreservation = (flags[0] & (1 << 6)) != 0 + result.Flags.FileAlterPreservation = (flags[0] & (1 << 5)) != 0 + result.Flags.ReadOnly = (flags[0] & (1 << 4)) != 0 + + result.Flags.GroupingIdentity = (flags[1] & (1 << 6)) != 0 + result.Flags.Compression = (flags[1] & (1 << 3)) != 0 + result.Flags.Encryption = (flags[1] & (1 << 2)) != 0 + result.Flags.Unsynchronization = (flags[1] & (1 << 1)) != 0 + result.Flags.DataLengthIndicator = (flags[1] & (1 << 0)) != 0 + + return result, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/id3v24_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/id3v24_test.go new file mode 100644 index 00000000..432d26a0 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/id3v24_test.go @@ -0,0 +1,42 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package id3 + +// import ( +// "os" +// "testing" +// ) + +// func TestParse(t *testing.T) { +// f, err := os.Open("/Users/hjfreyer/Downloads/01 Astronaut.mp3") +// if err != nil { +// t.Errorf("%v", err) +// } + +// p, err := ParseTag(f) +// if err != nil { +// t.Errorf("%v", err) +// } +// t.Logf("%+v", p.(*Id3v23Tag)) +// t.Logf("%+v", *p.(*Id3v23Tag).Frames["TALB"]) + +// s, err := p.Title() +// if err != nil { +// t.Errorf("%v", err) +// } + +// t.Log(s) +// t.Fail() +// } diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/id3v24frames.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/id3v24frames.go new file mode 100644 index 00000000..1bb20e82 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/id3v24frames.go @@ -0,0 +1,19 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package id3 + +func GetId3v24TextIdentificationFrame(frame *Id3v24Frame) ([]string, error) { + return getTextIdentificationFrame(frame.Content) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/util.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/util.go new file mode 100644 index 00000000..a8a6a648 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/id3/util.go @@ -0,0 +1,117 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package id3 + +import ( + "encoding/binary" + "fmt" + "strconv" + "strings" + "unicode/utf16" +) + +type ErrFormat struct { + Format string + Err error +} + +func (e ErrFormat) Error() string { + return fmt.Sprintf("gotaglib: error parsing format %q: %v", e.Format, e.Err) +} + +func parseBase128Int(bytes []byte) uint64 { + var result uint64 + for _, b := range bytes { + result = result << 7 + result |= uint64(b) + } + return result +} + +func parseLeadingInt(s string) (int, error) { + var intEnd int + for intEnd < len(s) && '0' <= s[intEnd] && s[intEnd] <= '9' { + intEnd++ + } + return strconv.Atoi(s[0:intEnd]) +} + +func getTextIdentificationFrame(content []byte) ([]string, error) { + normalized, err := parseText(content) + if err != nil { + return nil, err + } + return strings.Split(normalized, string([]byte{0})), nil +} + +// Parses a string from frame data. The first byte represents the encoding: +// 0x01 ISO-8859-1 +// 0x02 UTF-16 w/ BOM +// 0x03 UTF-16BE w/o BOM +// 0x04 UTF-8 +// +// Refer to section 4 of http://id3.org/id3v2.4.0-structure +func parseText(strBytes []byte) (string, error) { + encoding, strBytes := strBytes[0], strBytes[1:] + + switch encoding { + case 0: // ISO-8859-1 text. + return parseIso8859(strBytes), nil + + case 1: // UTF-16 with BOM. + return parseUtf16WithBOM(strBytes) + + case 2: // UTF-16BE without BOM. + return parseUtf16(strBytes, binary.BigEndian) + + case 3: // UTF-8 text. + return parseUtf8(strBytes) + + default: + return "", id3v24Err("invalid encoding byte %x", encoding) + } +} + +func parseIso8859(strBytes []byte) string { + runes := make([]rune, len(strBytes)) + for i, b := range strBytes { + runes[i] = rune(b) + } + return string(runes) +} + +func parseUtf16WithBOM(strBytes []byte) (string, error) { + if strBytes[0] == 0xFE && strBytes[1] == 0xFF { + return parseUtf16(strBytes[2:], binary.BigEndian) + } + if strBytes[0] == 0xFF && strBytes[1] == 0xFE { + return parseUtf16(strBytes[2:], binary.LittleEndian) + } + return "", id3v24Err("invalid byte order marker %x %x", strBytes[0], strBytes[1]) +} + +func parseUtf16(strBytes []byte, bo binary.ByteOrder) (string, error) { + shorts := make([]uint16, 0, len(strBytes)/2) + for i := 0; i < len(strBytes); i += 2 { + short := bo.Uint16(strBytes[i : i+2]) + shorts = append(shorts, short) + } + + return string(utf16.Decode(shorts)), nil +} + +func parseUtf8(strBytes []byte) (string, error) { + return string(strBytes), nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/taglib.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/taglib.go new file mode 100644 index 00000000..d02aed87 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/taglib.go @@ -0,0 +1,76 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package taglib provides utilities for parsing audio tags in +// various formats. +package taglib + +import ( + "bytes" + "errors" + "io" + "time" + + "camlistore.org/third_party/github.com/hjfreyer/taglib-go/taglib/id3" +) + +var ( + ErrUnrecognizedFormat = errors.New("taglib: format not recognized") +) + +// GenericTag is implemented by all the tag types in this project. It +// gives an incomplete view of the information in each tag type, but +// is good enough for most purposes. +type GenericTag interface { + Title() string + Artist() string + Album() string + Comment() string + Genre() string + Year() time.Time + Track() uint32 + Disc() uint32 + + // CustomFrames returns non-standard, user-defined frames as a map from + // descriptions (e.g. "PERFORMER", "MusicBrainz Album Id", etc.) to + // values. + CustomFrames() map[string]string + + // TagSize returns the total size of the tag's header and frames, + // i.e. the position at which audio data starts. + TagSize() uint32 +} + +// Decode reads r and determines which tag format the data is in, if +// any, and calls the decoding function for that format. size +// indicates the total number of bytes accessible through r. +func Decode(r io.ReaderAt, size int64) (GenericTag, error) { + magic := make([]byte, 4) + if _, err := r.ReadAt(magic, 0); err != nil { + return nil, err + } + + if !bytes.Equal(magic[:3], []byte("ID3")) { + return nil, ErrUnrecognizedFormat + } + + switch magic[3] { + case 3: + return id3.Decode23(r) + case 4: + return id3.Decode24(r) + default: + return nil, ErrUnrecognizedFormat + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/taglib_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/taglib_test.go new file mode 100644 index 00000000..4bd4fb79 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/taglib_test.go @@ -0,0 +1,55 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package taglib + +import ( + "fmt" + "os" + _ "testing" +) + +func ExampleDecode() { + f, err := os.Open("testdata/test24.mp3") + if err != nil { + panic(err) + } + fi, err := f.Stat() + if err != nil { + panic(err) + } + tag, err := Decode(f, fi.Size()) + if err != nil { + panic(err) + } + + fmt.Println("Title:", tag.Title()) + fmt.Println("Artist:", tag.Artist()) + fmt.Println("Album:", tag.Album()) + fmt.Println("Genre:", tag.Genre()) + fmt.Println("Year:", tag.Year()) + fmt.Println("Disc:", tag.Disc()) + fmt.Println("Track:", tag.Track()) + fmt.Println("Performer:", tag.CustomFrames()["PERFORMER"]) + + // Output: + // Title: Test Name + // Artist: Test Artist + // Album: Test Album + // Genre: Classical + // Year: 2008-01-01 00:00:00 +0000 UTC + // Disc: 3 + // Track: 7 + // Performer: Somebody +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/testdata/test24.mp3 b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/testdata/test24.mp3 new file mode 100644 index 00000000..4830b22e Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/third_party/github.com/hjfreyer/taglib-go/taglib/testdata/test24.mp3 differ diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/LICENSE.md b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/LICENSE.md new file mode 100644 index 00000000..5773904a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/LICENSE.md @@ -0,0 +1,8 @@ +Copyright (c) 2011-2013, 'pq' Contributors +Portions Copyright (C) 2011 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/README.md b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/README.md new file mode 100644 index 00000000..57641033 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/README.md @@ -0,0 +1,103 @@ +# pq - A pure Go postgres driver for Go's database/sql package + +## Install + + go get github.com/lib/pq + +## Docs + + + +## Use + + package main + + import ( + _ "github.com/lib/pq" + "database/sql" + ) + + func main() { + db, err := sql.Open("postgres", "user=pqgotest dbname=pqgotest sslmode=verify-full") + // ... + } + +**Connection String Parameters** + +These are a subset of the libpq connection parameters. In addition, a +number of the [environment +variables](http://www.postgresql.org/docs/9.1/static/libpq-envars.html) +supported by libpq are also supported. Just like libpq, these have +lower precedence than explicitly provided connection parameters. + +See http://www.postgresql.org/docs/9.1/static/libpq-connect.html. + +* `dbname` - The name of the database to connect to +* `user` - The user to sign in as +* `password` - The user's password +* `host` - The host to connect to. Values that start with `/` are for unix domain sockets. (default is `localhost`) +* `port` - The port to bind to. (default is `5432`) +* `sslmode` - Whether or not to use SSL (default is `require`, this is not the default for libpq) + Valid values are: + * `disable` - No SSL + * `require` - Always SSL (skip verification) + * `verify-full` - Always SSL (require verification) + +See http://golang.org/pkg/database/sql to learn how to use with `pq` through the `database/sql` package. + +## Tests + +`go test` is used for testing. A running PostgreSQL server is +required, with the ability to log in. The default database to connect +to test with is "pqgotest," but it can be overridden using environment +variables. + +Example: + + PGHOST=/var/run/postgresql go test pq + +## Features + +* SSL +* Handles bad connections for `database/sql` +* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) +* Scan binary blobs correctly (i.e. `bytea`) +* pq.ParseURL for converting urls to connection strings for sql.Open. +* Many libpq compatible environment variables +* Unix socket support + +## Future / Things you can help with + +* Notifications: `LISTEN`/`NOTIFY` +* `hstore` sugar (i.e. handling hstore in `rows.Scan`) + +## Thank you (alphabetical) + +Some of these contributors are from the original library `bmizerany/pq.go` whose +code still exists in here. + +* Andy Balholm (andybalholm) +* Ben Berkert (benburkert) +* Bill Mill (llimllib) +* Bjørn Madsen (aeons) +* Blake Gentry (bgentry) +* Brad Fitzpatrick (bradfitz) +* Chris Walsh (cwds) +* Daniel Farina (fdr) +* Everyone at The Go Team +* Ewan Chou (coocood) +* Federico Romero (federomero) +* Gary Burd (garyburd) +* Heroku (heroku) +* Jason McVetta (jmcvetta) +* Joakim Sernbrant (serbaut) +* John Gallagher (jgallagher) +* Kamil Kisiel (kisielk) +* Keith Rarick (kr) +* Maciek Sakrejda (deafbybeheading) +* Marc Brinkmann (mbr) +* Martin Olsen (martinolsen) +* Mike Lewis (mikelikespie) +* Ryan Smith (ryandotsmith) +* Samuel Stauffer (samuel) +* notedit (notedit) diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/buf.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/buf.go new file mode 100644 index 00000000..1a480aff --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/buf.go @@ -0,0 +1,81 @@ +package pq + +import ( + "bytes" + "camlistore.org/third_party/github.com/lib/pq/oid" + "encoding/binary" +) + +type readBuf []byte + +func (b *readBuf) int32() (n int) { + n = int(int32(binary.BigEndian.Uint32(*b))) + *b = (*b)[4:] + return +} + +func (b *readBuf) oid() (n oid.Oid) { + n = oid.Oid(binary.BigEndian.Uint32(*b)) + *b = (*b)[4:] + return +} + +func (b *readBuf) int16() (n int) { + n = int(binary.BigEndian.Uint16(*b)) + *b = (*b)[2:] + return +} + +var stringTerm = []byte{0} + +func (b *readBuf) string() string { + i := bytes.Index(*b, stringTerm) + if i < 0 { + errorf("invalid message format; expected string terminator") + } + s := (*b)[:i] + *b = (*b)[i+1:] + return string(s) +} + +func (b *readBuf) next(n int) (v []byte) { + v = (*b)[:n] + *b = (*b)[n:] + return +} + +func (b *readBuf) byte() byte { + return b.next(1)[0] +} + +type writeBuf []byte + +func newWriteBuf(c byte) *writeBuf { + b := make(writeBuf, 5) + b[0] = c + return &b +} + +func (b *writeBuf) int32(n int) { + x := make([]byte, 4) + binary.BigEndian.PutUint32(x, uint32(n)) + *b = append(*b, x...) +} + +func (b *writeBuf) int16(n int) { + x := make([]byte, 2) + binary.BigEndian.PutUint16(x, uint16(n)) + *b = append(*b, x...) +} + +func (b *writeBuf) string(s string) { + *b = append(*b, (s + "\000")...) +} + +func (b *writeBuf) byte(c byte) { + *b = append(*b, c) +} + +func (b *writeBuf) bytes(v []byte) { + *b = append(*b, v...) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/conn.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/conn.go new file mode 100644 index 00000000..8fb2d140 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/conn.go @@ -0,0 +1,684 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. +package pq + +import ( + "bufio" + "camlistore.org/third_party/github.com/lib/pq/oid" + "crypto/md5" + "crypto/tls" + "database/sql" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "os" + "path" + "strconv" + "strings" +) + +var ( + ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") + ErrNotSupported = errors.New("pq: invalid command") +) + +type drv struct{} + +func (d *drv) Open(name string) (driver.Conn, error) { + return Open(name) +} + +func init() { + sql.Register("postgres", &drv{}) +} + +type conn struct { + c net.Conn + buf *bufio.Reader + namei int +} + +func Open(name string) (_ driver.Conn, err error) { + defer errRecover(&err) + defer errRecoverWithPGReason(&err) + + o := make(Values) + + // A number of defaults are applied here, in this order: + // + // * Very low precedence defaults applied in every situation + // * Environment variables + // * Explicitly passed connection information + o.Set("host", "localhost") + o.Set("port", "5432") + + for k, v := range parseEnviron(os.Environ()) { + o.Set(k, v) + } + + parseOpts(name, o) + + // If a user is not provided by any other means, the last + // resort is to use the current operating system provided user + // name. + if o.Get("user") == "" { + u, err := userCurrent() + if err != nil { + return nil, err + } else { + o.Set("user", u) + } + } + + c, err := net.Dial(network(o)) + if err != nil { + return nil, err + } + + cn := &conn{c: c} + cn.ssl(o) + cn.buf = bufio.NewReader(cn.c) + cn.startup(o) + return cn, nil +} + +func network(o Values) (string, string) { + host := o.Get("host") + + if strings.HasPrefix(host, "/") { + sockPath := path.Join(host, ".s.PGSQL."+o.Get("port")) + return "unix", sockPath + } + + return "tcp", host + ":" + o.Get("port") +} + +type Values map[string]string + +func (vs Values) Set(k, v string) { + vs[k] = v +} + +func (vs Values) Get(k string) (v string) { + v, _ = vs[k] + return +} + +func parseOpts(name string, o Values) { + if len(name) == 0 { + return + } + + name = strings.TrimSpace(name) + + ps := strings.Split(name, " ") + for _, p := range ps { + kv := strings.Split(p, "=") + if len(kv) < 2 { + errorf("invalid option: %q", p) + } + o.Set(kv[0], kv[1]) + } +} + +func (cn *conn) Begin() (driver.Tx, error) { + _, err := cn.Exec("BEGIN", nil) + if err != nil { + return nil, err + } + return cn, err +} + +func (cn *conn) Commit() error { + _, err := cn.Exec("COMMIT", nil) + return err +} + +func (cn *conn) Rollback() error { + _, err := cn.Exec("ROLLBACK", nil) + return err +} + +func (cn *conn) gname() string { + cn.namei++ + return strconv.FormatInt(int64(cn.namei), 10) +} + +func (cn *conn) simpleQuery(q string) (res driver.Result, err error) { + defer errRecover(&err) + + b := newWriteBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C': + res = parseComplete(r.string()) + case 'Z': + // done + return + case 'E': + err = parseError(r) + case 'T', 'N', 'S', 'D': + // ignore + default: + errorf("unknown response for simple query: %q", t) + } + } + panic("not reached") +} + +func (cn *conn) prepareTo(q, stmtName string) (_ driver.Stmt, err error) { + defer errRecover(&err) + + st := &stmt{cn: cn, name: stmtName, query: q} + + b := newWriteBuf('P') + b.string(st.name) + b.string(q) + b.int16(0) + cn.send(b) + + b = newWriteBuf('D') + b.byte('S') + b.string(st.name) + cn.send(b) + + cn.send(newWriteBuf('S')) + + for { + t, r := cn.recv1() + switch t { + case '1', '2', 'N': + case 't': + st.nparams = int(r.int16()) + st.paramTyps = make([]oid.Oid, st.nparams, st.nparams) + + for i := 0; i < st.nparams; i += 1 { + st.paramTyps[i] = r.oid() + } + case 'T': + n := r.int16() + st.cols = make([]string, n) + st.rowTyps = make([]oid.Oid, n) + for i := range st.cols { + st.cols[i] = r.string() + r.next(6) + st.rowTyps[i] = r.oid() + r.next(8) + } + case 'n': + // no data + case 'Z': + return st, err + case 'E': + err = parseError(r) + case 'C': + // command complete + return st, err + default: + errorf("unexpected describe rows response: %q", t) + } + } + + panic("not reached") +} + +func (cn *conn) Prepare(q string) (driver.Stmt, error) { + return cn.prepareTo(q, cn.gname()) +} + +func (cn *conn) Close() (err error) { + defer errRecover(&err) + cn.send(newWriteBuf('X')) + + return cn.c.Close() +} + +// Implement the optional "Execer" interface for one-shot queries +func (cn *conn) Exec(query string, args []driver.Value) (_ driver.Result, err error) { + defer errRecover(&err) + + // Check to see if we can use the "simpleQuery" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + return cn.simpleQuery(query) + } + + // Use the unnamed statement to defer planning until bind + // time, or else value-based selectivity estimates cannot be + // used. + st, err := cn.prepareTo(query, "") + if err != nil { + panic(err) + } + + r, err := st.Exec(args) + if err != nil { + panic(err) + } + + return r, err +} + +// Assumes len(*m) is > 5 +func (cn *conn) send(m *writeBuf) { + b := (*m)[1:] + binary.BigEndian.PutUint32(b, uint32(len(b))) + + if (*m)[0] == 0 { + *m = b + } + + _, err := cn.c.Write(*m) + if err != nil { + panic(err) + } +} + +func (cn *conn) recv() (t byte, r *readBuf) { + for { + t, r = cn.recv1() + switch t { + case 'E': + panic(parseError(r)) + case 'N': + // ignore + default: + return + } + } + + panic("not reached") +} + +func (cn *conn) recv1() (byte, *readBuf) { + x := make([]byte, 5) + _, err := io.ReadFull(cn.buf, x) + if err != nil { + panic(err) + } + + b := readBuf(x[1:]) + y := make([]byte, b.int32()-4) + _, err = io.ReadFull(cn.buf, y) + if err != nil { + panic(err) + } + + return x[0], (*readBuf)(&y) +} + +func (cn *conn) ssl(o Values) { + tlsConf := tls.Config{} + switch mode := o.Get("sslmode"); mode { + case "require", "": + tlsConf.InsecureSkipVerify = true + case "verify-full": + // fall out + case "disable": + return + default: + errorf(`unsupported sslmode %q; only "require" (default), "verify-full", and "disable" supported`, mode) + } + + w := newWriteBuf(0) + w.int32(80877103) + cn.send(w) + + b := make([]byte, 1) + _, err := io.ReadFull(cn.c, b) + if err != nil { + panic(err) + } + + if b[0] != 'S' { + panic(ErrSSLNotSupported) + } + + cn.c = tls.Client(cn.c, &tlsConf) +} + +func (cn *conn) startup(o Values) { + w := newWriteBuf(0) + w.int32(196608) + w.string("user") + w.string(o.Get("user")) + w.string("database") + w.string(o.Get("dbname")) + w.string("") + cn.send(w) + + for { + t, r := cn.recv() + switch t { + case 'K', 'S': + case 'R': + cn.auth(r, o) + case 'Z': + return + default: + errorf("unknown response for startup: %q", t) + } + } +} + +func (cn *conn) auth(r *readBuf, o Values) { + switch code := r.int32(); code { + case 0: + // OK + case 3: + w := newWriteBuf('p') + w.string(o.Get("password")) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 5: + s := string(r.next(4)) + w := newWriteBuf('p') + w.string("md5" + md5s(md5s(o.Get("password")+o.Get("user"))+s)) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + default: + errorf("unknown authentication response: %d", code) + } +} + +type stmt struct { + cn *conn + name string + query string + cols []string + nparams int + rowTyps []oid.Oid + paramTyps []oid.Oid + closed bool +} + +func (st *stmt) Close() (err error) { + if st.closed { + return nil + } + + defer errRecover(&err) + + w := newWriteBuf('C') + w.byte('S') + w.string(st.name) + st.cn.send(w) + + st.cn.send(newWriteBuf('S')) + + t, _ := st.cn.recv() + if t != '3' { + errorf("unexpected close response: %q", t) + } + st.closed = true + + t, _ = st.cn.recv() + if t != 'Z' { + errorf("expected ready for query, but got: %q", t) + } + + return nil +} + +func (st *stmt) Query(v []driver.Value) (_ driver.Rows, err error) { + defer errRecover(&err) + st.exec(v) + return &rows{st: st}, nil +} + +func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { + defer errRecover(&err) + + if len(v) == 0 { + return st.cn.simpleQuery(st.query) + } + st.exec(v) + + for { + t, r := st.cn.recv1() + switch t { + case 'E': + err = parseError(r) + case 'C': + res = parseComplete(r.string()) + case 'Z': + // done + return + case 'T', 'N', 'S', 'D': + // Ignore + default: + errorf("unknown exec response: %q", t) + } + } + + panic("not reached") +} + +func (st *stmt) exec(v []driver.Value) { + w := newWriteBuf('B') + w.string("") + w.string(st.name) + w.int16(0) + w.int16(len(v)) + for i, x := range v { + if x == nil { + w.int32(-1) + } else { + b := encode(x, st.paramTyps[i]) + w.int32(len(b)) + w.bytes(b) + } + } + w.int16(0) + st.cn.send(w) + + w = newWriteBuf('E') + w.string("") + w.int32(0) + st.cn.send(w) + + st.cn.send(newWriteBuf('S')) + + var err error + for { + t, r := st.cn.recv1() + switch t { + case 'E': + err = parseError(r) + case '2': + if err != nil { + panic(err) + } + return + case 'Z': + if err != nil { + panic(err) + } + return + case 'N': + // ignore + default: + errorf("unexpected bind response: %q", t) + } + } +} + +func (st *stmt) NumInput() int { + return st.nparams +} + +type result int64 + +func (i result) RowsAffected() (int64, error) { + return int64(i), nil +} + +func (i result) LastInsertId() (int64, error) { + return 0, ErrNotSupported +} + +func parseComplete(s string) driver.Result { + parts := strings.Split(s, " ") + n, _ := strconv.ParseInt(parts[len(parts)-1], 10, 64) + return result(n) +} + +type rows struct { + st *stmt + done bool +} + +func (rs *rows) Close() error { + for { + err := rs.Next(nil) + switch err { + case nil: + case io.EOF: + return nil + default: + return err + } + } + panic("not reached") +} + +func (rs *rows) Columns() []string { + return rs.st.cols +} + +func (rs *rows) Next(dest []driver.Value) (err error) { + if rs.done { + return io.EOF + } + + defer errRecover(&err) + + for { + t, r := rs.st.cn.recv1() + switch t { + case 'E': + err = parseError(r) + case 'C', 'S', 'N': + continue + case 'Z': + rs.done = true + if err != nil { + return err + } + return io.EOF + case 'D': + n := r.int16() + for i := 0; i < len(dest) && i < n; i++ { + l := r.int32() + if l == -1 { + dest[i] = nil + continue + } + dest[i] = decode(r.next(l), rs.st.rowTyps[i]) + } + return + default: + errorf("unexpected message after execute: %q", t) + } + } + + panic("not reached") +} + +func md5s(s string) string { + h := md5.New() + h.Write([]byte(s)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +// parseEnviron tries to mimic some of libpq's environment handling +// +// To ease testing, it does not directly reference os.Environ, but is +// designed to accept its output. +// +// Environment-set connection information is intended to have a higher +// precedence than a library default but lower than any explicitly +// passed information (such as in the URL or connection string). +func parseEnviron(env []string) (out map[string]string) { + out = make(map[string]string) + + for _, v := range env { + parts := strings.SplitN(v, "=", 2) + + accrue := func(keyname string) { + out[keyname] = parts[1] + } + + // The order of these is the same as is seen in the + // PostgreSQL 9.1 manual, with omissions briefly + // noted. + switch parts[0] { + case "PGHOST": + accrue("host") + case "PGHOSTADDR": + accrue("hostaddr") + case "PGPORT": + accrue("port") + case "PGDATABASE": + accrue("dbname") + case "PGUSER": + accrue("user") + case "PGPASSWORD": + accrue("password") + // skip PGPASSFILE, PGSERVICE, PGSERVICEFILE, + // PGREALM + case "PGOPTIONS": + accrue("options") + case "PGAPPNAME": + accrue("application_name") + case "PGSSLMODE": + accrue("sslmode") + case "PGREQUIRESSL": + accrue("requiressl") + case "PGSSLCERT": + accrue("sslcert") + case "PGSSLKEY": + accrue("sslkey") + case "PGSSLROOTCERT": + accrue("sslrootcert") + case "PGSSLCRL": + accrue("sslcrl") + case "PGREQUIREPEER": + accrue("requirepeer") + case "PGKRBSRVNAME": + accrue("krbsrvname") + case "PGGSSLIB": + accrue("gsslib") + case "PGCONNECT_TIMEOUT": + accrue("connect_timeout") + case "PGCLIENTENCODING": + accrue("client_encoding") + // skip PGDATESTYLE, PGTZ, PGGEQO, PGSYSCONFDIR, + // PGLOCALEDIR + } + } + + return out +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/conn_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/conn_test.go new file mode 100644 index 00000000..461ffc90 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/conn_test.go @@ -0,0 +1,528 @@ +package pq + +import ( + "database/sql" + "database/sql/driver" + "io" + "os" + "reflect" + "testing" + "time" +) + +type Fatalistic interface { + Fatal(args ...interface{}) +} + +func openTestConn(t Fatalistic) *sql.DB { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + conn, err := sql.Open("postgres", "") + if err != nil { + t.Fatal(err) + } + + return conn +} + +func TestExec(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("CREATE TEMP TABLE temp (a int)") + if err != nil { + t.Fatal(err) + } + + r, err := db.Exec("INSERT INTO temp VALUES (1)") + if err != nil { + t.Fatal(err) + } + + if n, _ := r.RowsAffected(); n != 1 { + t.Fatalf("expected 1 row affected, not %d", n) + } + + r, err = db.Exec("INSERT INTO temp VALUES ($1), ($2), ($3)", 1, 2, 3) + if err != nil { + t.Fatal(err) + } + + if n, _ := r.RowsAffected(); n != 3 { + t.Fatalf("expected 3 rows affected, not %d", n) + } + + r, err = db.Exec("SELECT g FROM generate_series(1, 2) g") + if err != nil { + t.Fatal(err) + } + if n, _ := r.RowsAffected(); n != 2 { + t.Fatalf("expected 2 rows affected, not %d", n) + } + + r, err = db.Exec("SELECT g FROM generate_series(1, $1) g", 3) + if err != nil { + t.Fatal(err) + } + if n, _ := r.RowsAffected(); n != 3 { + t.Fatalf("expected 3 rows affected, not %d", n) + } + +} + +func TestStatment(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + st, err := db.Prepare("SELECT 1") + if err != nil { + t.Fatal(err) + } + + st1, err := db.Prepare("SELECT 2") + if err != nil { + t.Fatal(err) + } + + r, err := st.Query() + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if !r.Next() { + t.Fatal("expected row") + } + + var i int + err = r.Scan(&i) + if err != nil { + t.Fatal(err) + } + + if i != 1 { + t.Fatalf("expected 1, got %d", i) + } + + // st1 + + r1, err := st1.Query() + if err != nil { + t.Fatal(err) + } + defer r1.Close() + + if !r1.Next() { + if r.Err() != nil { + t.Fatal(r1.Err()) + } + t.Fatal("expected row") + } + + err = r1.Scan(&i) + if err != nil { + t.Fatal(err) + } + + if i != 2 { + t.Fatalf("expected 2, got %d", i) + } +} + +func TestRowsCloseBeforeDone(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + r, err := db.Query("SELECT 1") + if err != nil { + t.Fatal(err) + } + + err = r.Close() + if err != nil { + t.Fatal(err) + } + + if r.Next() { + t.Fatal("unexpected row") + } + + if r.Err() != nil { + t.Fatal(r.Err()) + } +} + +func TestEncodeDecode(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + q := ` + SELECT + '\x000102'::bytea, + 'foobar'::text, + NULL::integer, + '2000-1-1 01:02:03.04-7'::timestamptz, + 0::boolean, + 123, + 3.14::float8 + WHERE + '\x000102'::bytea = $1 + AND 'foobar'::text = $2 + AND $3::integer is NULL + ` + // AND '2000-1-1 12:00:00.000000-7'::timestamp = $3 + + exp1 := []byte{0, 1, 2} + exp2 := "foobar" + + r, err := db.Query(q, exp1, exp2, nil) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if !r.Next() { + if r.Err() != nil { + t.Fatal(r.Err()) + } + t.Fatal("expected row") + } + + var got1 []byte + var got2 string + var got3 = sql.NullInt64{Valid: true} + var got4 time.Time + var got5, got6, got7 interface{} + + err = r.Scan(&got1, &got2, &got3, &got4, &got5, &got6, &got7) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(exp1, got1) { + t.Errorf("expected %q byte: %q", exp1, got1) + } + + if !reflect.DeepEqual(exp2, got2) { + t.Errorf("expected %q byte: %q", exp2, got2) + } + + if got3.Valid { + t.Fatal("expected invalid") + } + + if got4.Year() != 2000 { + t.Fatal("wrong year") + } + + if got5 != false { + t.Fatalf("expected false, got %q", got5) + } + + if got6 != int64(123) { + t.Fatalf("expected 123, got %d", got6) + } + + if got7 != float64(3.14) { + t.Fatalf("expected 3.14, got %f", got7) + } +} + +func TestNoData(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + st, err := db.Prepare("SELECT 1 WHERE true = false") + if err != nil { + t.Fatal(err) + } + defer st.Close() + + r, err := st.Query() + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if r.Next() { + if r.Err() != nil { + t.Fatal(r.Err()) + } + t.Fatal("unexpected row") + } +} + +func TestPGError(t *testing.T) { + // Don't use the normal connection setup, this is intended to + // blow up in the startup packet from a non-existent user. + db, err := sql.Open("postgres", "user=thisuserreallydoesntexist") + if err != nil { + t.Fatal(err) + } + defer db.Close() + + _, err = db.Begin() + if err == nil { + t.Fatal("expected error") + } + + if err, ok := err.(PGError); !ok { + t.Fatalf("expected a PGError, got: %v", err) + } +} + +func TestBadConn(t *testing.T) { + var err error + + func() { + defer errRecover(&err) + panic(io.EOF) + }() + + if err != driver.ErrBadConn { + t.Fatalf("expected driver.ErrBadConn, got: %#v", err) + } + + func() { + defer errRecover(&err) + e := &pgError{c: make(map[byte]string)} + e.c['S'] = Efatal + panic(e) + }() + + if err != driver.ErrBadConn { + t.Fatalf("expected driver.ErrBadConn, got: %#v", err) + } +} + +func TestErrorOnExec(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + sql := "DO $$BEGIN RAISE unique_violation USING MESSAGE='foo'; END; $$;" + _, err := db.Exec(sql) + _, ok := err.(PGError) + if !ok { + t.Fatalf("expected PGError, was: %#v", err) + } + + _, err = db.Exec("SELECT 1 WHERE true = false") // returns no rows + if err != nil { + t.Fatal(err) + } +} + +func TestErrorOnQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + sql := "DO $$BEGIN RAISE unique_violation USING MESSAGE='foo'; END; $$;" + r, err := db.Query(sql) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if r.Next() { + t.Fatal("unexpected row, want error") + } + + _, ok := r.Err().(PGError) + if !ok { + t.Fatalf("expected PGError, was: %#v", r.Err()) + } + + r, err = db.Query("SELECT 1 WHERE true = false") // returns no rows + if err != nil { + t.Fatal(err) + } + + if r.Next() { + t.Fatal("unexpected row") + } +} + +func TestBindError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("create temp table test (i integer)") + if err != nil { + t.Fatal(err) + } + + _, err = db.Query("select * from test where i=$1", "hhh") + if err == nil { + t.Fatal("expected an error") + } + + // Should not get error here + r, err := db.Query("select * from test where i=$1", 1) + if err != nil { + t.Fatal(err) + } + defer r.Close() +} + +func TestParseEnviron(t *testing.T) { + expected := map[string]string{"dbname": "hello", "user": "goodbye"} + results := parseEnviron([]string{"PGDATABASE=hello", "PGUSER=goodbye"}) + if !reflect.DeepEqual(expected, results) { + t.Fatalf("Expected: %#v Got: %#v", expected, results) + } +} + +func TestExecerInterface(t *testing.T) { + // Gin up a straw man private struct just for the type check + cn := &conn{c: nil} + var cni interface{} = cn + + _, ok := cni.(driver.Execer) + if !ok { + t.Fatal("Driver doesn't implement Execer") + } +} + +func TestNullAfterNonNull(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + r, err := db.Query("SELECT 9::integer UNION SELECT NULL::integer") + if err != nil { + t.Fatal(err) + } + + var n sql.NullInt64 + + if !r.Next() { + if r.Err() != nil { + t.Fatal(err) + } + t.Fatal("expected row") + } + + if err := r.Scan(&n); err != nil { + t.Fatal(err) + } + + if n.Int64 != 9 { + t.Fatalf("expected 2, not %d", n.Int64) + } + + if !r.Next() { + if r.Err() != nil { + t.Fatal(err) + } + t.Fatal("expected row") + } + + if err := r.Scan(&n); err != nil { + t.Fatal(err) + } + + if n.Valid { + t.Fatal("expected n to be invalid") + } + + if n.Int64 != 0 { + t.Fatalf("expected n to 2, not %d", n.Int64) + } +} + +// Stress test the performance of parsing results from the wire. +func BenchmarkResultParsing(b *testing.B) { + b.StopTimer() + + db := openTestConn(b) + defer db.Close() + _, err := db.Exec("BEGIN") + if err != nil { + b.Fatal(err) + } + + b.StartTimer() + for i := 0; i < b.N; i++ { + res, err := db.Query("SELECT generate_series(1, 50000)") + if err != nil { + b.Fatal(err) + } + res.Close() + } +} + +func Test64BitErrorChecking(t *testing.T) { + defer func() { + if err := recover(); err != nil { + t.Fatal("panic due to 0xFFFFFFFF != -1 " + + "when int is 64 bits") + } + }() + + db := openTestConn(t) + defer db.Close() + + r, err := db.Query(`SELECT * +FROM (VALUES (0::integer, NULL::text), (1, 'test string')) AS t;`) + + if err != nil { + t.Fatal(err) + } + + defer r.Close() + + for r.Next() { + } +} + +// Open transaction, issue INSERT query inside transaction, rollback +// transaction, issue SELECT query to same db used to create the tx. No rows +// should be returned. +func TestRollback(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("CREATE TEMP TABLE temp (a int)") + if err != nil { + t.Fatal(err) + } + sqlInsert := "INSERT INTO temp VALUES (1)" + sqlSelect := "SELECT * FROM temp" + tx, err := db.Begin() + if err != nil { + t.Fatal(err) + } + _, err = tx.Query(sqlInsert) + if err != nil { + t.Fatal(err) + } + err = tx.Rollback() + if err != nil { + t.Fatal(err) + } + r, err := db.Query(sqlSelect) + if err != nil { + t.Fatal(err) + } + // Next() returns false if query returned no rows. + if r.Next() { + t.Fatal("Transaction rollback failed") + } +} + +func TestConnTrailingSpace(t *testing.T) { + o := make(Values) + expected := Values{"dbname": "hello", "user": "goodbye"} + parseOpts("dbname=hello user=goodbye ", o) + if !reflect.DeepEqual(expected, o) { + t.Fatalf("Expected: %#v Got: %#v", expected, o) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/encode.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/encode.go new file mode 100644 index 00000000..4be75cc2 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/encode.go @@ -0,0 +1,122 @@ +package pq + +import ( + "camlistore.org/third_party/github.com/lib/pq/oid" + "database/sql/driver" + "encoding/hex" + "fmt" + "strconv" + "time" +) + +func encode(x interface{}, pgtypOid oid.Oid) []byte { + switch v := x.(type) { + case int64: + return []byte(fmt.Sprintf("%d", v)) + case float32, float64: + return []byte(fmt.Sprintf("%f", v)) + case []byte: + if pgtypOid == oid.T_bytea { + return []byte(fmt.Sprintf("\\x%x", v)) + } + + return v + case string: + if pgtypOid == oid.T_bytea { + return []byte(fmt.Sprintf("\\x%x", v)) + } + + return []byte(v) + case bool: + return []byte(fmt.Sprintf("%t", v)) + case time.Time: + return []byte(v.Format(time.RFC3339Nano)) + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func decode(s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_bytea: + s = s[2:] // trim off "\\x" + d := make([]byte, hex.DecodedLen(len(s))) + _, err := hex.Decode(d, s) + if err != nil { + errorf("%s", err) + } + return d + case oid.T_timestamptz: + return mustParse("2006-01-02 15:04:05-07", typ, s) + case oid.T_timestamp: + return mustParse("2006-01-02 15:04:05", typ, s) + case oid.T_time: + return mustParse("15:04:05", typ, s) + case oid.T_timetz: + return mustParse("15:04:05-07", typ, s) + case oid.T_date: + return mustParse("2006-01-02", typ, s) + case oid.T_bool: + return s[0] == 't' + case oid.T_int8, oid.T_int2, oid.T_int4: + i, err := strconv.ParseInt(string(s), 10, 64) + if err != nil { + errorf("%s", err) + } + return i + case oid.T_float4, oid.T_float8: + bits := 64 + if typ == oid.T_float4 { + bits = 32 + } + f, err := strconv.ParseFloat(string(s), bits) + if err != nil { + errorf("%s", err) + } + return f + } + + return s +} + +func mustParse(f string, typ oid.Oid, s []byte) time.Time { + str := string(s) + + // Special case until time.Parse bug is fixed: + // http://code.google.com/p/go/issues/detail?id=3487 + if str[len(str)-2] == '.' { + str += "0" + } + + // check for a 30-minute-offset timezone + if (typ == oid.T_timestamptz || typ == oid.T_timetz) && + str[len(str)-3] == ':' { + f += ":00" + } + t, err := time.Parse(f, str) + if err != nil { + errorf("decode: %s", err) + } + return t +} + +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + nt.Time, nt.Valid = value.(time.Time) + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/encode_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/encode_test.go new file mode 100644 index 00000000..ac18ff64 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/encode_test.go @@ -0,0 +1,164 @@ +package pq + +import ( + "fmt" + "testing" + "time" +) + +func TestScanTimestamp(t *testing.T) { + var nt NullTime + tn := time.Now() + (&nt).Scan(tn) + if !nt.Valid { + t.Errorf("Expected Valid=false") + } + if nt.Time != tn { + t.Errorf("Time value mismatch") + } +} + +func TestScanNilTimestamp(t *testing.T) { + var nt NullTime + (&nt).Scan(nil) + if nt.Valid { + t.Errorf("Expected Valid=false") + } +} + +func TestTimestampWithTimeZone(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + tx, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer tx.Rollback() + + _, err = tx.Exec("create temp table test (t timestamp with time zone)") + if err != nil { + t.Fatal(err) + } + + // try several different locations, all included in Go's zoneinfo.zip + for _, locName := range []string{ + "UTC", + "America/Chicago", + "America/New_York", + "Australia/Darwin", + "Australia/Perth", + } { + loc, err := time.LoadLocation(locName) + if err != nil { + t.Logf("Could not load time zone %s - skipping", locName) + continue + } + + // Postgres timestamps have a resolution of 1 microsecond, so don't + // use the full range of the Nanosecond argument + refTime := time.Date(2012, 11, 6, 10, 23, 42, 123456000, loc) + _, err = tx.Exec("insert into test(t) values($1)", refTime) + if err != nil { + t.Fatal(err) + } + + for _, pgTimeZone := range []string{"US/Eastern", "Australia/Darwin"} { + // Switch Postgres's timezone to test different output timestamp formats + _, err = tx.Exec(fmt.Sprintf("set time zone '%s'", pgTimeZone)) + if err != nil { + t.Fatal(err) + } + + var gotTime time.Time + row := tx.QueryRow("select t from test") + err = row.Scan(&gotTime) + if err != nil { + t.Fatal(err) + } + + if !refTime.Equal(gotTime) { + t.Errorf("timestamps not equal: %s != %s", refTime, gotTime) + } + } + + _, err = tx.Exec("delete from test") + if err != nil { + t.Fatal(err) + } + } +} + +func TestTimestampWithOutTimezone(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + test := func(ts, pgts string) { + r, err := db.Query("SELECT $1::timestamp", pgts) + if err != nil { + t.Fatalf("Could not run query: %v", err) + } + + n := r.Next() + + if n != true { + t.Fatal("Expected at least one row") + } + + var result time.Time + err = r.Scan(&result) + if err != nil { + t.Fatalf("Did not expect error scanning row: %v", err) + } + + expected, err := time.Parse(time.RFC3339, ts) + if err != nil { + t.Fatalf("Could not parse test time literal: %v", err) + } + + if !result.Equal(expected) { + t.Fatalf("Expected time to match %v: got mismatch %v", + expected, result) + } + + n = r.Next() + if n != false { + t.Fatal("Expected only one row") + } + } + + test("2000-01-01T00:00:00Z", "2000-01-01T00:00:00") + + // Test higher precision time + test("2013-01-04T20:14:58.80033Z", "2013-01-04 20:14:58.80033") +} + +func TestStringWithNul(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + hello0world := string("hello\x00world") + _, err := db.Query("SELECT $1::text", &hello0world) + if err == nil { + t.Fatal("Postgres accepts a string with nul in it; " + + "injection attacks may be plausible") + } +} + +func TestByteToText(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + b := []byte("hello world") + row := db.QueryRow("SELECT $1::text", b) + + var result []byte + err := row.Scan(&result) + if err != nil { + t.Fatal(err) + } + + if string(result) != string(b) { + t.Fatalf("expected %v but got %v", b, result) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/error.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/error.go new file mode 100644 index 00000000..9384ab3e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/error.go @@ -0,0 +1,108 @@ +package pq + +import ( + "database/sql/driver" + "fmt" + "io" + "net" + "runtime" +) + +const ( + Efatal = "FATAL" + Epanic = "PANIC" + Ewarning = "WARNING" + Enotice = "NOTICE" + Edebug = "DEBUG" + Einfo = "INFO" + Elog = "LOG" +) + +type Error error + +type PGError interface { + Error() string + Fatal() bool + Get(k byte) (v string) +} +type pgError struct { + c map[byte]string +} + +func parseError(r *readBuf) *pgError { + err := &pgError{make(map[byte]string)} + for t := r.byte(); t != 0; t = r.byte() { + err.c[t] = r.string() + } + return err +} + +func (err *pgError) Get(k byte) (v string) { + v, _ = err.c[k] + return +} + +func (err *pgError) Fatal() bool { + return err.Get('S') == Efatal +} + +func (err *pgError) Error() string { + var s string + for k, v := range err.c { + s += fmt.Sprintf(" %c:%q", k, v) + } + return "pq: " + s[1:] +} + +func errorf(s string, args ...interface{}) { + panic(Error(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)))) +} + +type SimplePGError struct { + pgError +} + +func (err *SimplePGError) Error() string { + return "pq: " + err.Get('M') +} + +func errRecoverWithPGReason(err *error) { + e := recover() + switch v := e.(type) { + case nil: + // Do nothing + case *pgError: + // Return a SimplePGError in place + *err = &SimplePGError{*v} + default: + // Otherwise re-panic + panic(e) + } +} + +func errRecover(err *error) { + e := recover() + switch v := e.(type) { + case nil: + // Do nothing + case runtime.Error: + panic(v) + case *pgError: + if v.Fatal() { + *err = driver.ErrBadConn + } else { + *err = v + } + case *net.OpError: + *err = driver.ErrBadConn + case error: + if v == io.EOF || v.(error).Error() == "remote error: handshake failure" { + *err = driver.ErrBadConn + } else { + *err = v + } + + default: + panic(fmt.Sprintf("unknown error: %#v", e)) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/oid/types.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/oid/types.go new file mode 100644 index 00000000..7fd2ce2e --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/oid/types.go @@ -0,0 +1,169 @@ +package oid + +// Generated via massaging this catalog query: +// +// SELECT 'T_' || typname || ' = ' || oid +// FROM pg_type WHERE oid < 10000 +// ORDER BY oid; +// +// This should probably be done one per release. Postgres does not +// re-appropriate the system OID space below 10000 as a general rule. + +type Oid uint32 + +const ( + T_bool Oid = 16 + T_bytea = 17 + T_char = 18 + T_name = 19 + T_int8 = 20 + T_int2 = 21 + T_int2vector = 22 + T_int4 = 23 + T_regproc = 24 + T_text = 25 + T_oid = 26 + T_tid = 27 + T_xid = 28 + T_cid = 29 + T_oidvector = 30 + T_pg_type = 71 + T_pg_attribute = 75 + T_pg_proc = 81 + T_pg_class = 83 + T_json = 114 + T_xml = 142 + T__xml = 143 + T_pg_node_tree = 194 + T__json = 199 + T_smgr = 210 + T_point = 600 + T_lseg = 601 + T_path = 602 + T_box = 603 + T_polygon = 604 + T_line = 628 + T__line = 629 + T_cidr = 650 + T__cidr = 651 + T_float4 = 700 + T_float8 = 701 + T_abstime = 702 + T_reltime = 703 + T_tinterval = 704 + T_unknown = 705 + T_circle = 718 + T__circle = 719 + T_money = 790 + T__money = 791 + T_macaddr = 829 + T_inet = 869 + T__bool = 1000 + T__bytea = 1001 + T__char = 1002 + T__name = 1003 + T__int2 = 1005 + T__int2vector = 1006 + T__int4 = 1007 + T__regproc = 1008 + T__text = 1009 + T__tid = 1010 + T__xid = 1011 + T__cid = 1012 + T__oidvector = 1013 + T__bpchar = 1014 + T__varchar = 1015 + T__int8 = 1016 + T__point = 1017 + T__lseg = 1018 + T__path = 1019 + T__box = 1020 + T__float4 = 1021 + T__float8 = 1022 + T__abstime = 1023 + T__reltime = 1024 + T__tinterval = 1025 + T__polygon = 1027 + T__oid = 1028 + T_aclitem = 1033 + T__aclitem = 1034 + T__macaddr = 1040 + T__inet = 1041 + T_bpchar = 1042 + T_varchar = 1043 + T_date = 1082 + T_time = 1083 + T_timestamp = 1114 + T__timestamp = 1115 + T__date = 1182 + T__time = 1183 + T_timestamptz = 1184 + T__timestamptz = 1185 + T_interval = 1186 + T__interval = 1187 + T__numeric = 1231 + T_pg_database = 1248 + T__cstring = 1263 + T_timetz = 1266 + T__timetz = 1270 + T_bit = 1560 + T__bit = 1561 + T_varbit = 1562 + T__varbit = 1563 + T_numeric = 1700 + T_refcursor = 1790 + T__refcursor = 2201 + T_regprocedure = 2202 + T_regoper = 2203 + T_regoperator = 2204 + T_regclass = 2205 + T_regtype = 2206 + T__regprocedure = 2207 + T__regoper = 2208 + T__regoperator = 2209 + T__regclass = 2210 + T__regtype = 2211 + T_record = 2249 + T_cstring = 2275 + T_any = 2276 + T_anyarray = 2277 + T_void = 2278 + T_trigger = 2279 + T_language_handler = 2280 + T_internal = 2281 + T_opaque = 2282 + T_anyelement = 2283 + T__record = 2287 + T_anynonarray = 2776 + T_pg_authid = 2842 + T_pg_auth_members = 2843 + T__txid_snapshot = 2949 + T_uuid = 2950 + T__uuid = 2951 + T_txid_snapshot = 2970 + T_fdw_handler = 3115 + T_anyenum = 3500 + T_tsvector = 3614 + T_tsquery = 3615 + T_gtsvector = 3642 + T__tsvector = 3643 + T__gtsvector = 3644 + T__tsquery = 3645 + T_regconfig = 3734 + T__regconfig = 3735 + T_regdictionary = 3769 + T__regdictionary = 3770 + T_anyrange = 3831 + T_int4range = 3904 + T__int4range = 3905 + T_numrange = 3906 + T__numrange = 3907 + T_tsrange = 3908 + T__tsrange = 3909 + T_tstzrange = 3910 + T__tstzrange = 3911 + T_daterange = 3912 + T__daterange = 3913 + T_int8range = 3926 + T__int8range = 3927 +) diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/url.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/url.go new file mode 100644 index 00000000..4e32cea8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/url.go @@ -0,0 +1,68 @@ +package pq + +import ( + "fmt" + nurl "net/url" + "sort" + "strings" +) + +// ParseURL converts url to a connection string for driver.Open. +// Example: +// +// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" +// +// converts to: +// +// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" +// +// A minimal example: +// +// "postgres://" +// +// This will be blank, causing driver.Open to use all of the defaults +func ParseURL(url string) (string, error) { + u, err := nurl.Parse(url) + if err != nil { + return "", err + } + + if u.Scheme != "postgres" { + return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) + } + + var kvs []string + accrue := func(k, v string) { + if v != "" { + kvs = append(kvs, k+"="+v) + } + } + + if u.User != nil { + v := u.User.Username() + accrue("user", v) + + v, _ = u.User.Password() + accrue("password", v) + } + + i := strings.Index(u.Host, ":") + if i < 0 { + accrue("host", u.Host) + } else { + accrue("host", u.Host[:i]) + accrue("port", u.Host[i+1:]) + } + + if u.Path != "" { + accrue("dbname", u.Path[1:]) + } + + q := u.Query() + for k, _ := range q { + accrue(k, q.Get(k)) + } + + sort.Strings(kvs) // Makes testing easier (not a performance concern) + return strings.Join(kvs, " "), nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/url_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/url_test.go new file mode 100644 index 00000000..fce97900 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/url_test.go @@ -0,0 +1,54 @@ +package pq + +import ( + "testing" +) + +func TestSimpleParseURL(t *testing.T) { + expected := "host=hostname.remote" + str, err := ParseURL("postgres://hostname.remote") + if err != nil { + t.Fatal(err) + } + + if str != expected { + t.Fatalf("unexpected result from ParseURL:\n+ %v\n- %v", str, expected) + } +} + +func TestFullParseURL(t *testing.T) { + expected := "dbname=database host=hostname.remote password=secret port=1234 user=username" + str, err := ParseURL("postgres://username:secret@hostname.remote:1234/database") + if err != nil { + t.Fatal(err) + } + + if str != expected { + t.Fatalf("unexpected result from ParseURL:\n+ %s\n- %s", str, expected) + } +} + +func TestInvalidProtocolParseURL(t *testing.T) { + _, err := ParseURL("http://hostname.remote") + switch err { + case nil: + t.Fatal("Expected an error from parsing invalid protocol") + default: + msg := "invalid connection protocol: http" + if err.Error() != msg { + t.Fatalf("Unexpected error message:\n+ %s\n- %s", + err.Error(), msg) + } + } +} + +func TestMinimalURL(t *testing.T) { + cs, err := ParseURL("postgres://") + if err != nil { + t.Fatal(err) + } + + if cs != "" { + t.Fatalf("expected blank connection string, got: %q", cs) + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/user_posix.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/user_posix.go new file mode 100644 index 00000000..a7adf397 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/user_posix.go @@ -0,0 +1,15 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. + +// +build darwin freebsd linux netbsd openbsd solaris + +package pq + +import "os/user" + +func userCurrent() (string, error) { + u, err := user.Current() + if err != nil { + return "", err + } + return u.Username, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/user_windows.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/user_windows.go new file mode 100644 index 00000000..a7593ffb --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/lib/pq/user_windows.go @@ -0,0 +1,27 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. +package pq + +import ( + "path/filepath" + "syscall" +) + +// Perform Windows user name lookup identically to libpq. +// +// The PostgreSQL code makes use of the legacy Win32 function +// GetUserName, and that function has not been imported into stock Go. +// GetUserNameEx is available though, the difference being that a +// wider range of names are available. To get the output to be the +// same as GetUserName, only the base (or last) component of the +// result is returned. +func userCurrent() (string, error) { + pw_name := make([]uint16, 128) + pwname_size := uint32(len(pw_name)) - 1 + err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size) + if err != nil { + return "", err + } + s := syscall.UTF16ToString(pw_name) + u := filepath.Base(s) + return u, nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/mattn/go-sqlite3/README.mkd b/vendor/github.com/camlistore/camlistore/third_party/github.com/mattn/go-sqlite3/README.mkd new file mode 100644 index 00000000..0d16a638 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/mattn/go-sqlite3/README.mkd @@ -0,0 +1,22 @@ +go-sqlite3 +========== + +DESCRIPTION +----------- + +sqlite3 driver for go that using database/sql + +INSTALLATION +------------ + +It require `pkg-config`. And need to be possible to get information with `pkg-config --cflags --libs sqlite3`. +If you are using Windows, you can get pkg-config from below. + +http://ftp.gnome.org/pub/gnome/binaries/win32/dependencies/ + +Go does not support static linkage for external C library. So you should build sqlite3 with shared library. If it run on windows, it need dll. + +LICENSE +------- + +MIT: http://mattn.mit-license.org/2012 diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/mattn/go-sqlite3/sqlite3.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/mattn/go-sqlite3/sqlite3.go new file mode 100644 index 00000000..0d1dfeed --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/mattn/go-sqlite3/sqlite3.go @@ -0,0 +1,400 @@ +// Camlistore-specific addition: +// +build with_sqlite + +package sqlite + +/* +#include +#include +#include + +static int +_sqlite3_open_v2(const char *filename, sqlite3 **ppDb, int flags, const char *zVfs) { +#ifdef SQLITE_OPEN_URI + return sqlite3_open_v2(filename, ppDb, flags | SQLITE_OPEN_URI, zVfs); +#else + return sqlite3_open_v2(filename, ppDb, flags, zVfs); +#endif +} + +static int +_sqlite3_bind_text(sqlite3_stmt *stmt, int n, char *p, int np) { + return sqlite3_bind_text(stmt, n, p, np, SQLITE_TRANSIENT); +} + +static int +_sqlite3_bind_blob(sqlite3_stmt *stmt, int n, void *p, int np) { + return sqlite3_bind_blob(stmt, n, p, np, SQLITE_TRANSIENT); +} + +#include +#include + +static long +_sqlite3_last_insert_rowid(sqlite3* db) { + return (long) sqlite3_last_insert_rowid(db); +} + +static long +_sqlite3_changes(sqlite3* db) { + return (long) sqlite3_changes(db); +} + +*/ +import "C" +import ( + "database/sql" + "database/sql/driver" + "errors" + "io" + "strings" + "time" + "unsafe" +) + +// Timestamp formats understood by both this module and SQLite. +// The first format in the slice will be used when saving time values +// into the database. When parsing a string from a timestamp or +// datetime column, the formats are tried in order. +var SQLiteTimestampFormats = []string{ + "2006-01-02 15:04:05.999999999", + "2006-01-02T15:04:05.999999999", + "2006-01-02 15:04:05", + "2006-01-02T15:04:05", + "2006-01-02 15:04", + "2006-01-02T15:04", + "2006-01-02", +} + +func init() { + sql.Register("sqlite3", &SQLiteDriver{}) +} + +// Driver struct. +type SQLiteDriver struct { +} + +// Conn struct. +type SQLiteConn struct { + db *C.sqlite3 +} + +// Tx struct. +type SQLiteTx struct { + c *SQLiteConn +} + +// Stmt struct. +type SQLiteStmt struct { + c *SQLiteConn + s *C.sqlite3_stmt + t string + closed bool +} + +// Result struct. +type SQLiteResult struct { + s *SQLiteStmt +} + +// Rows struct. +type SQLiteRows struct { + s *SQLiteStmt + nc int + cols []string + decltype []string +} + +// Commit transaction. +func (tx *SQLiteTx) Commit() error { + if err := tx.c.exec("COMMIT"); err != nil { + return err + } + return nil +} + +// Rollback transaction. +func (tx *SQLiteTx) Rollback() error { + if err := tx.c.exec("ROLLBACK"); err != nil { + return err + } + return nil +} + +func (c *SQLiteConn) exec(cmd string) error { + pcmd := C.CString(cmd) + defer C.free(unsafe.Pointer(pcmd)) + rv := C.sqlite3_exec(c.db, pcmd, nil, nil, nil) + if rv != C.SQLITE_OK { + return errors.New(C.GoString(C.sqlite3_errmsg(c.db))) + } + return nil +} + +// Begin transaction. +func (c *SQLiteConn) Begin() (driver.Tx, error) { + if err := c.exec("BEGIN"); err != nil { + return nil, err + } + return &SQLiteTx{c}, nil +} + +// Open database and return a new connection. +// You can specify DSN string with URI filename. +// test.db +// file:test.db?cache=shared&mode=memory +// :memory: +// file::memory: +func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) { + if C.sqlite3_threadsafe() == 0 { + return nil, errors.New("sqlite library was not compiled for thread-safe operation") + } + + var db *C.sqlite3 + name := C.CString(dsn) + defer C.free(unsafe.Pointer(name)) + rv := C._sqlite3_open_v2(name, &db, + C.SQLITE_OPEN_FULLMUTEX| + C.SQLITE_OPEN_READWRITE| + C.SQLITE_OPEN_CREATE, + nil) + if rv != 0 { + return nil, errors.New(C.GoString(C.sqlite3_errmsg(db))) + } + if db == nil { + return nil, errors.New("sqlite succeeded without returning a database") + } + + rv = C.sqlite3_busy_timeout(db, 5000) + if rv != C.SQLITE_OK { + return nil, errors.New(C.GoString(C.sqlite3_errmsg(db))) + } + + return &SQLiteConn{db}, nil +} + +// Close the connection. +func (c *SQLiteConn) Close() error { + s := C.sqlite3_next_stmt(c.db, nil) + for s != nil { + C.sqlite3_finalize(s) + s = C.sqlite3_next_stmt(c.db, nil) + } + rv := C.sqlite3_close(c.db) + if rv != C.SQLITE_OK { + return errors.New("error while closing sqlite database connection") + } + c.db = nil + return nil +} + +// Prepare query string. Return a new statement. +func (c *SQLiteConn) Prepare(query string) (driver.Stmt, error) { + pquery := C.CString(query) + defer C.free(unsafe.Pointer(pquery)) + var s *C.sqlite3_stmt + var perror *C.char + rv := C.sqlite3_prepare_v2(c.db, pquery, -1, &s, &perror) + if rv != C.SQLITE_OK { + return nil, errors.New(C.GoString(C.sqlite3_errmsg(c.db))) + } + var t string + if perror != nil && C.strlen(perror) > 0 { + t = C.GoString(perror) + } + return &SQLiteStmt{c: c, s: s, t: t}, nil +} + +// Close the statement. +func (s *SQLiteStmt) Close() error { + if s.closed { + return nil + } + s.closed = true + if s.c == nil || s.c.db == nil { + return errors.New("sqlite statement with already closed database connection") + } + rv := C.sqlite3_finalize(s.s) + if rv != C.SQLITE_OK { + return errors.New(C.GoString(C.sqlite3_errmsg(s.c.db))) + } + return nil +} + +// Return a number of parameters. +func (s *SQLiteStmt) NumInput() int { + return int(C.sqlite3_bind_parameter_count(s.s)) +} + +func (s *SQLiteStmt) bind(args []driver.Value) error { + rv := C.sqlite3_reset(s.s) + if rv != C.SQLITE_ROW && rv != C.SQLITE_OK && rv != C.SQLITE_DONE { + return errors.New(C.GoString(C.sqlite3_errmsg(s.c.db))) + } + + for i, v := range args { + n := C.int(i + 1) + switch v := v.(type) { + case nil: + rv = C.sqlite3_bind_null(s.s, n) + case string: + if len(v) == 0 { + b := []byte{0} + rv = C._sqlite3_bind_text(s.s, n, (*C.char)(unsafe.Pointer(&b[0])), C.int(0)) + } else { + b := []byte(v) + rv = C._sqlite3_bind_text(s.s, n, (*C.char)(unsafe.Pointer(&b[0])), C.int(len(b))) + } + case int: + rv = C.sqlite3_bind_int(s.s, n, C.int(v)) + case int64: + rv = C.sqlite3_bind_int64(s.s, n, C.sqlite3_int64(v)) + case byte: + rv = C.sqlite3_bind_int(s.s, n, C.int(v)) + case bool: + if bool(v) { + rv = C.sqlite3_bind_int(s.s, n, 1) + } else { + rv = C.sqlite3_bind_int(s.s, n, 0) + } + case float32: + rv = C.sqlite3_bind_double(s.s, n, C.double(v)) + case float64: + rv = C.sqlite3_bind_double(s.s, n, C.double(v)) + case []byte: + var p *byte + if len(v) > 0 { + p = &v[0] + } + rv = C._sqlite3_bind_blob(s.s, n, unsafe.Pointer(p), C.int(len(v))) + case time.Time: + b := []byte(v.UTC().Format(SQLiteTimestampFormats[0])) + rv = C._sqlite3_bind_text(s.s, n, (*C.char)(unsafe.Pointer(&b[0])), C.int(len(b))) + } + if rv != C.SQLITE_OK { + return errors.New(C.GoString(C.sqlite3_errmsg(s.c.db))) + } + } + return nil +} + +// Query the statment with arguments. Return records. +func (s *SQLiteStmt) Query(args []driver.Value) (driver.Rows, error) { + if err := s.bind(args); err != nil { + return nil, err + } + return &SQLiteRows{s, int(C.sqlite3_column_count(s.s)), nil, nil}, nil +} + +// Return last inserted ID. +func (r *SQLiteResult) LastInsertId() (int64, error) { + return int64(C._sqlite3_last_insert_rowid(r.s.c.db)), nil +} + +// Return how many rows affected. +func (r *SQLiteResult) RowsAffected() (int64, error) { + return int64(C._sqlite3_changes(r.s.c.db)), nil +} + +// Execute the statement with arguments. Return result object. +func (s *SQLiteStmt) Exec(args []driver.Value) (driver.Result, error) { + if err := s.bind(args); err != nil { + return nil, err + } + rv := C.sqlite3_step(s.s) + if rv != C.SQLITE_ROW && rv != C.SQLITE_OK && rv != C.SQLITE_DONE { + return nil, errors.New(C.GoString(C.sqlite3_errmsg(s.c.db))) + } + return &SQLiteResult{s}, nil +} + +// Close the rows. +func (rc *SQLiteRows) Close() error { + rv := C.sqlite3_reset(rc.s.s) + if rv != C.SQLITE_OK { + return errors.New(C.GoString(C.sqlite3_errmsg(rc.s.c.db))) + } + return nil +} + +// Return column names. +func (rc *SQLiteRows) Columns() []string { + if rc.nc != len(rc.cols) { + rc.cols = make([]string, rc.nc) + for i := 0; i < rc.nc; i++ { + rc.cols[i] = C.GoString(C.sqlite3_column_name(rc.s.s, C.int(i))) + } + } + return rc.cols +} + +// Move cursor to next. +func (rc *SQLiteRows) Next(dest []driver.Value) error { + rv := C.sqlite3_step(rc.s.s) + if rv == C.SQLITE_DONE { + return io.EOF + } + if rv != C.SQLITE_ROW { + return errors.New(C.GoString(C.sqlite3_errmsg(rc.s.c.db))) + } + + if rc.decltype == nil { + rc.decltype = make([]string, rc.nc) + for i := 0; i < rc.nc; i++ { + rc.decltype[i] = strings.ToLower(C.GoString(C.sqlite3_column_decltype(rc.s.s, C.int(i)))) + } + } + + for i := range dest { + switch C.sqlite3_column_type(rc.s.s, C.int(i)) { + case C.SQLITE_INTEGER: + val := int64(C.sqlite3_column_int64(rc.s.s, C.int(i))) + switch rc.decltype[i] { + case "timestamp", "datetime": + dest[i] = time.Unix(val, 0) + case "boolean": + dest[i] = val > 0 + default: + dest[i] = val + } + case C.SQLITE_FLOAT: + dest[i] = float64(C.sqlite3_column_double(rc.s.s, C.int(i))) + case C.SQLITE_BLOB: + n := int(C.sqlite3_column_bytes(rc.s.s, C.int(i))) + p := C.sqlite3_column_blob(rc.s.s, C.int(i)) + switch dest[i].(type) { + case sql.RawBytes: + dest[i] = (*[1 << 30]byte)(unsafe.Pointer(p))[0:n] + default: + slice := make([]byte, n) + copy(slice[:], (*[1 << 30]byte)(unsafe.Pointer(p))[0:n]) + dest[i] = slice + } + case C.SQLITE_NULL: + dest[i] = nil + case C.SQLITE_TEXT: + var err error + s := C.GoString((*C.char)(unsafe.Pointer(C.sqlite3_column_text(rc.s.s, C.int(i))))) + + switch rc.decltype[i] { + case "timestamp", "datetime": + for _, format := range SQLiteTimestampFormats { + if dest[i], err = time.Parse(format, s); err == nil { + break + } + } + if err != nil { + // The column is a time value, so return the zero time on parse failure. + dest[i] = time.Time{} + } + default: + // NOTE(bradfitz): local hack, without internet access. I imagine + // this has been fixed upstream properly. (the database/sql/driver + // docs say that you can't return strings here) + dest[i] = []byte(s) + } + + } + } + return nil +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/mattn/go-sqlite3/sqlite3_other.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/mattn/go-sqlite3/sqlite3_other.go new file mode 100644 index 00000000..aa865d48 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/mattn/go-sqlite3/sqlite3_other.go @@ -0,0 +1,9 @@ +// Camlistore-specific addition: +// +build with_sqlite,!windows + +package sqlite + +/* +#cgo pkg-config: sqlite3 +*/ +import "C" diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/mattn/go-sqlite3/sqlite3_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/mattn/go-sqlite3/sqlite3_test.go new file mode 100644 index 00000000..3ea55925 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/mattn/go-sqlite3/sqlite3_test.go @@ -0,0 +1,409 @@ +package sqlite + +import ( + "database/sql" + "os" + "testing" + "time" +) + +func TestOpen(t *testing.T) { + db, err := sql.Open("sqlite3", "./foo.db") + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer os.Remove("./foo.db") + defer db.Close() + + _, err = db.Exec("drop table foo") + _, err = db.Exec("create table foo (id integer)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + if stat, err := os.Stat("./foo.db"); err != nil || stat.IsDir() { + t.Error("Failed to create ./foo.db") + } +} + +func TestInsert(t *testing.T) { + db, err := sql.Open("sqlite3", "./foo.db") + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer os.Remove("./foo.db") + defer db.Close() + + _, err = db.Exec("drop table foo") + _, err = db.Exec("create table foo (id integer)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + res, err := db.Exec("insert into foo(id) values(123)") + if err != nil { + t.Fatal("Failed to insert record:", err) + } + affected, _ := res.RowsAffected() + if affected != 1 { + t.Fatalf("Expected %d for affected rows, but %d:", 1, affected) + } + + rows, err := db.Query("select id from foo") + if err != nil { + t.Fatal("Failed to select records:", err) + } + defer rows.Close() + + rows.Next() + + var result int + rows.Scan(&result) + if result != 123 { + t.Errorf("Fetched %q; expected %q", 123, result) + } +} + +func TestUpdate(t *testing.T) { + db, err := sql.Open("sqlite3", "./foo.db") + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer os.Remove("./foo.db") + defer db.Close() + + _, err = db.Exec("drop table foo") + _, err = db.Exec("create table foo (id integer)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + res, err := db.Exec("insert into foo(id) values(123)") + if err != nil { + t.Fatal("Failed to insert record:", err) + } + expected, err := res.LastInsertId() + if err != nil { + t.Fatal("Failed to get LastInsertId:", err) + } + affected, _ := res.RowsAffected() + if err != nil { + t.Fatal("Failed to get RowsAffected:", err) + } + if affected != 1 { + t.Fatalf("Expected %d for affected rows, but %d:", 1, affected) + } + + res, err = db.Exec("update foo set id = 234") + if err != nil { + t.Fatal("Failed to update record:", err) + } + lastId, err := res.LastInsertId() + if err != nil { + t.Fatal("Failed to get LastInsertId:", err) + } + if expected != lastId { + t.Errorf("Expected %q for last Id, but %q:", expected, lastId) + } + affected, _ = res.RowsAffected() + if err != nil { + t.Fatal("Failed to get RowsAffected:", err) + } + if affected != 1 { + t.Fatalf("Expected %d for affected rows, but %d:", 1, affected) + } + + rows, err := db.Query("select id from foo") + if err != nil { + t.Fatal("Failed to select records:", err) + } + defer rows.Close() + + rows.Next() + + var result int + rows.Scan(&result) + if result != 234 { + t.Errorf("Fetched %q; expected %q", 234, result) + } +} + +func TestDelete(t *testing.T) { + db, err := sql.Open("sqlite3", "./foo.db") + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer os.Remove("./foo.db") + defer db.Close() + + _, err = db.Exec("drop table foo") + _, err = db.Exec("create table foo (id integer)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + res, err := db.Exec("insert into foo(id) values(123)") + if err != nil { + t.Fatal("Failed to insert record:", err) + } + expected, err := res.LastInsertId() + if err != nil { + t.Fatal("Failed to get LastInsertId:", err) + } + affected, err := res.RowsAffected() + if err != nil { + t.Fatal("Failed to get RowsAffected:", err) + } + if affected != 1 { + t.Errorf("Expected %d for cout of affected rows, but %q:", 1, affected) + } + + res, err = db.Exec("delete from foo where id = 123") + if err != nil { + t.Fatal("Failed to delete record:", err) + } + lastId, err := res.LastInsertId() + if err != nil { + t.Fatal("Failed to get LastInsertId:", err) + } + if expected != lastId { + t.Errorf("Expected %q for last Id, but %q:", expected, lastId) + } + affected, err = res.RowsAffected() + if err != nil { + t.Fatal("Failed to get RowsAffected:", err) + } + if affected != 1 { + t.Errorf("Expected %d for cout of affected rows, but %q:", 1, affected) + } + + rows, err := db.Query("select id from foo") + if err != nil { + t.Fatal("Failed to select records:", err) + } + defer rows.Close() + + if rows.Next() { + t.Error("Fetched row but expected not rows") + } +} + +func TestBooleanRoundtrip(t *testing.T) { + db, err := sql.Open("sqlite3", "./foo.db") + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer os.Remove("./foo.db") + defer db.Close() + + _, err = db.Exec("DROP TABLE foo") + _, err = db.Exec("CREATE TABLE foo(id INTEGER, value BOOL)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + _, err = db.Exec("INSERT INTO foo(id, value) VALUES(1, ?)", true) + if err != nil { + t.Fatal("Failed to insert true value:", err) + } + + _, err = db.Exec("INSERT INTO foo(id, value) VALUES(2, ?)", false) + if err != nil { + t.Fatal("Failed to insert false value:", err) + } + + rows, err := db.Query("SELECT id, value FROM foo") + if err != nil { + t.Fatal("Unable to query foo table:", err) + } + defer rows.Close() + + for rows.Next() { + var id int + var value bool + + if err := rows.Scan(&id, &value); err != nil { + t.Error("Unable to scan results:", err) + continue + } + + if id == 1 && !value { + t.Error("Value for id 1 should be true, not false") + + } else if id == 2 && value { + t.Error("Value for id 2 should be false, not true") + } + } +} + +func TestTimestamp(t *testing.T) { + db, err := sql.Open("sqlite3", "./foo.db") + if err != nil { + t.Fatal("Failed to open database:", err) + } + defer os.Remove("./foo.db") + defer db.Close() + + _, err = db.Exec("DROP TABLE foo") + _, err = db.Exec("CREATE TABLE foo(id INTEGER, ts timeSTAMP, dt DATETIME)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + timestamp1 := time.Date(2012, time.April, 6, 22, 50, 0, 0, time.UTC) + timestamp2 := time.Date(2006, time.January, 2, 15, 4, 5, 123456789, time.UTC) + timestamp3 := time.Date(2012, time.November, 4, 0, 0, 0, 0, time.UTC) + tests := []struct { + value interface{} + expected time.Time + }{ + {"nonsense", time.Time{}}, + {"0000-00-00 00:00:00", time.Time{}}, + {timestamp1, timestamp1}, + {timestamp1.Unix(), timestamp1}, + {timestamp1.In(time.FixedZone("TEST", -7*3600)), timestamp1}, + {timestamp1.Format("2006-01-02 15:04:05.000"), timestamp1}, + {timestamp1.Format("2006-01-02T15:04:05.000"), timestamp1}, + {timestamp1.Format("2006-01-02 15:04:05"), timestamp1}, + {timestamp1.Format("2006-01-02T15:04:05"), timestamp1}, + {timestamp2, timestamp2}, + {"2006-01-02 15:04:05.123456789", timestamp2}, + {"2006-01-02T15:04:05.123456789", timestamp2}, + {"2012-11-04", timestamp3}, + {"2012-11-04 00:00", timestamp3}, + {"2012-11-04 00:00:00", timestamp3}, + {"2012-11-04 00:00:00.000", timestamp3}, + {"2012-11-04T00:00", timestamp3}, + {"2012-11-04T00:00:00", timestamp3}, + {"2012-11-04T00:00:00.000", timestamp3}, + } + for i := range tests { + _, err = db.Exec("INSERT INTO foo(id, ts, dt) VALUES(?, ?, ?)", i, tests[i].value, tests[i].value) + if err != nil { + t.Fatal("Failed to insert timestamp:", err) + } + } + + rows, err := db.Query("SELECT id, ts, dt FROM foo ORDER BY id ASC") + if err != nil { + t.Fatal("Unable to query foo table:", err) + } + defer rows.Close() + + seen := 0 + for rows.Next() { + var id int + var ts, dt time.Time + + if err := rows.Scan(&id, &ts, &dt); err != nil { + t.Error("Unable to scan results:", err) + continue + } + if id < 0 || id >= len(tests) { + t.Error("Bad row id: ", id) + continue + } + seen++ + if !tests[id].expected.Equal(ts) { + t.Errorf("Timestamp value for id %v (%v) should be %v, not %v", id, tests[id].value, tests[id].expected, dt) + } + if !tests[id].expected.Equal(dt) { + t.Errorf("Datetime value for id %v (%v) should be %v, not %v", id, tests[id].value, tests[id].expected, dt) + } + } + + if seen != len(tests) { + t.Errorf("Expected to see %d rows", len(tests)) + } +} + +func TestBoolean(t *testing.T) { + db, err := sql.Open("sqlite3", "./foo.db") + if err != nil { + t.Fatal("Failed to open database:", err) + } + + defer os.Remove("./foo.db") + defer db.Close() + + _, err = db.Exec("CREATE TABLE foo(id INTEGER, fbool BOOLEAN)") + if err != nil { + t.Fatal("Failed to create table:", err) + } + + bool1 := true + _, err = db.Exec("INSERT INTO foo(id, fbool) VALUES(1, ?)", bool1) + if err != nil { + t.Fatal("Failed to insert boolean:", err) + } + + bool2 := false + _, err = db.Exec("INSERT INTO foo(id, fbool) VALUES(2, ?)", bool2) + if err != nil { + t.Fatal("Failed to insert boolean:", err) + } + + bool3 := "nonsense" + _, err = db.Exec("INSERT INTO foo(id, fbool) VALUES(3, ?)", bool3) + if err != nil { + t.Fatal("Failed to insert nonsense:", err) + } + + rows, err := db.Query("SELECT id, fbool FROM foo where fbool = ?", bool1) + if err != nil { + t.Fatal("Unable to query foo table:", err) + } + counter := 0 + + var id int + var fbool bool + + for rows.Next() { + if err := rows.Scan(&id, &fbool); err != nil { + t.Fatal("Unable to scan results:", err) + } + counter++ + } + + if counter != 1 { + t.Fatalf("Expected 1 row but %v", counter) + } + + if id != 1 && fbool != true { + t.Fatalf("Value for id 1 should be %v, not %v", bool1, fbool) + } + + rows, err = db.Query("SELECT id, fbool FROM foo where fbool = ?", bool2) + if err != nil { + t.Fatal("Unable to query foo table:", err) + } + + counter = 0 + + for rows.Next() { + if err := rows.Scan(&id, &fbool); err != nil { + t.Fatal("Unable to scan results:", err) + } + counter++ + } + + if counter != 1 { + t.Fatalf("Expected 1 row but %v", counter) + } + + if id != 2 && fbool != false { + t.Fatalf("Value for id 2 should be %v, not %v", bool2, fbool) + } + + // make sure "nonsense" triggered an error + rows, err = db.Query("SELECT id, fbool FROM foo where id=?;", 3) + if err != nil { + t.Fatal("Unable to query foo table:", err) + } + + rows.Next() + err = rows.Scan(&id, &fbool) + if err == nil { + t.Error("Expected error from \"nonsense\" bool") + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/buffer.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/buffer.go new file mode 100644 index 00000000..acc4b9c6 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/buffer.go @@ -0,0 +1,69 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cr2 + +import "io" + +// buffer buffers an io.Reader to satisfy io.ReaderAt. +type buffer struct { + r io.Reader + buf []byte +} + +// fill reads data from b.r until the buffer contains at least end bytes. +func (b *buffer) fill(end int) error { + m := len(b.buf) + if end > m { + if end > cap(b.buf) { + newcap := 1024 + for newcap < end { + newcap *= 2 + } + newbuf := make([]byte, end, newcap) + copy(newbuf, b.buf) + b.buf = newbuf + } else { + b.buf = b.buf[:end] + } + if n, err := io.ReadFull(b.r, b.buf[m:end]); err != nil { + end = m + n + b.buf = b.buf[:end] + return err + } + } + return nil +} + +func (b *buffer) ReadAt(p []byte, off int64) (int, error) { + o := int(off) + end := o + len(p) + if int64(end) != off+int64(len(p)) { + return 0, io.ErrUnexpectedEOF + } + + err := b.fill(end) + return copy(p, b.buf[o:end]), err +} + +// Slice returns a slice of the underlying buffer. The slice contains +// n bytes starting at offset off. +func (b *buffer) Slice(off, n int) ([]byte, error) { + end := off + n + if err := b.fill(end); err != nil { + return nil, err + } + return b.buf[off:end], nil +} + +// newReaderAt converts an io.Reader into an io.ReaderAt. +func newReaderAt(r io.Reader) io.ReaderAt { + if ra, ok := r.(io.ReaderAt); ok { + return ra + } + return &buffer{ + r: r, + buf: make([]byte, 0, 1024), + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/buffer_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/buffer_test.go new file mode 100644 index 00000000..ce735a24 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/buffer_test.go @@ -0,0 +1,36 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cr2 + +import ( + "io" + "strings" + "testing" +) + +var readAtTests = []struct { + n int + off int64 + s string + err error +}{ + {2, 0, "ab", nil}, + {6, 0, "abcdef", nil}, + {3, 3, "def", nil}, + {3, 5, "f", io.EOF}, + {3, 6, "", io.EOF}, +} + +func TestReadAt(t *testing.T) { + r := newReaderAt(strings.NewReader("abcdef")) + b := make([]byte, 10) + for _, test := range readAtTests { + n, err := r.ReadAt(b[:test.n], test.off) + s := string(b[:n]) + if s != test.s || err != test.err { + t.Errorf("buffer.ReadAt(<%v bytes>, %v): got %v, %q; want %v, %q", test.n, test.off, err, s, test.err, test.s) + } + } +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/consts.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/consts.go new file mode 100644 index 00000000..d538b4a4 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/consts.go @@ -0,0 +1,113 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cr2 + +// A tiff image file contains one or more images. The metadata +// of each image is contained in an Image File Directory (IFD), +// which contains entries of 12 bytes each and is described +// on page 14-16 of the specification. An IFD entry consists of +// +// - a tag, which describes the signification of the entry, +// - the data type and length of the entry, +// - the data itself or a pointer to it if it is more than 4 bytes. +// +// The presence of a length means that each IFD is effectively an array. + +const ( + leHeader = "\x49\x49\x2a\x00\x10\x00\x00\x00\x43\x52\x02" + ifdLen = 12 // Length of an IFD entry in bytes. +) + +// Data types (p. 14-16 of the spec). +const ( + dtByte = 1 + dtASCII = 2 + dtShort = 3 + dtLong = 4 + dtRational = 5 +) + +// The length of one instance of each data type in bytes. +var lengths = [...]uint32{0, 1, 1, 2, 4, 8} + +// Tags (see p. 28-41 of the spec). +const ( + tImageWidth = 256 + tImageLength = 257 + tBitsPerSample = 258 + tCompression = 259 + tPhotometricInterpretation = 262 + + tStripOffsets = 273 + tSamplesPerPixel = 277 + tRowsPerStrip = 278 + tStripByteCounts = 279 + + tTileWidth = 322 + tTileLength = 323 + tTileOffsets = 324 + tTileByteCounts = 325 + + tXResolution = 282 + tYResolution = 283 + tResolutionUnit = 296 + + tPredictor = 317 + tColorMap = 320 + tExtraSamples = 338 + tSampleFormat = 339 +) + +// Compression types (defined in various places in the spec and supplements). +const ( + cNone = 1 + cCCITT = 2 + cG3 = 3 // Group 3 Fax. + cG4 = 4 // Group 4 Fax. + cLZW = 5 + cJPEGOld = 6 // Superseded by cJPEG. + cJPEG = 7 + cDeflate = 8 // zlib compression. + cPackBits = 32773 + cDeflateOld = 32946 // Superseded by cDeflate. +) + +// Photometric interpretation values (see p. 37 of the spec). +const ( + pWhiteIsZero = 0 + pBlackIsZero = 1 + pRGB = 2 + pPaletted = 3 + pTransMask = 4 // transparency mask + pCMYK = 5 + pYCbCr = 6 + pCIELab = 8 +) + +// Values for the tPredictor tag (page 64-65 of the spec). +const ( + prNone = 1 + prHorizontal = 2 +) + +// Values for the tResolutionUnit tag (page 18). +const ( + resNone = 1 + resPerInch = 2 // Dots per inch. + resPerCM = 3 // Dots per centimeter. +) + +// imageMode represents the mode of the image. +type imageMode int + +const ( + mBilevel imageMode = iota + mPaletted + mGray + mGrayInvert + mRGB + mRGBA + mNRGBA +) diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/reader.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/reader.go new file mode 100644 index 00000000..0d664078 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/reader.go @@ -0,0 +1,339 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cr2 implements rudimentary support for reading Canon Camera Raw 2 +// (CR2) files. +// +// CR2 is a bastardized TIFF file with a JPEG file inside it (yeah, thanks Canon). +// This package is a stripped back version of code.google.com/p/go.image/tiff. +// +// Known limitations: +// +// Because TIFF files and CR2 files share the same first few bytes, the image +// package's file type detection will fail to recognize a cr2 if the tiff +// reader is also imported. +package cr2 + +import ( + "encoding/binary" + "image" + "image/color" + "io" + + "camlistore.org/third_party/go/pkg/image/jpeg" +) + +// A FormatError reports that the input is not a valid TIFF image. +type FormatError string + +func (e FormatError) Error() string { + return "cr2: invalid format: " + string(e) +} + +// An UnsupportedError reports that the input uses a valid but +// unimplemented feature. +type UnsupportedError string + +func (e UnsupportedError) Error() string { + return "cr2: unsupported feature: " + string(e) +} + +// An InternalError reports that an internal error was encountered. +type InternalError string + +func (e InternalError) Error() string { + return "cr2: internal error: " + string(e) +} + +type decoder struct { + r io.ReaderAt + byteOrder binary.ByteOrder + config image.Config + mode imageMode + bpp uint + features map[int][]uint + palette []color.Color + + buf []byte + off int // Current offset in buf. + v uint32 // Buffer value for reading with arbitrary bit depths. + nbits uint // Remaining number of bits in v. +} + +// firstVal returns the first uint of the features entry with the given tag, +// or 0 if the tag does not exist. +func (d *decoder) firstVal(tag int) uint { + f := d.features[tag] + if len(f) == 0 { + return 0 + } + return f[0] +} + +// ifdUint decodes the IFD entry in p, which must be of the Byte, Short +// or Long type, and returns the decoded uint values. +func (d *decoder) ifdUint(p []byte) (u []uint, err error) { + var raw []byte + datatype := d.byteOrder.Uint16(p[2:4]) + count := d.byteOrder.Uint32(p[4:8]) + if datalen := lengths[datatype] * count; datalen > 4 { + // The IFD contains a pointer to the real value. + raw = make([]byte, datalen) + _, err = d.r.ReadAt(raw, int64(d.byteOrder.Uint32(p[8:12]))) + } else { + raw = p[8 : 8+datalen] + } + if err != nil { + return nil, err + } + + u = make([]uint, count) + switch datatype { + case dtByte: + for i := uint32(0); i < count; i++ { + u[i] = uint(raw[i]) + } + case dtShort: + for i := uint32(0); i < count; i++ { + u[i] = uint(d.byteOrder.Uint16(raw[2*i : 2*(i+1)])) + } + case dtLong: + for i := uint32(0); i < count; i++ { + u[i] = uint(d.byteOrder.Uint32(raw[4*i : 4*(i+1)])) + } + default: + return nil, UnsupportedError("data type") + } + return u, nil +} + +// parseIFD decides whether the the IFD entry in p is "interesting" and +// stows away the data in the decoder. +func (d *decoder) parseIFD(p []byte) error { + tag := d.byteOrder.Uint16(p[0:2]) + switch tag { + case tBitsPerSample, + tExtraSamples, + tPhotometricInterpretation, + tCompression, + tPredictor, + tStripOffsets, + tStripByteCounts, + tRowsPerStrip, + tTileWidth, + tTileLength, + tTileOffsets, + tTileByteCounts, + tImageLength, + tImageWidth: + val, err := d.ifdUint(p) + if err != nil { + return err + } + d.features[int(tag)] = val + case tColorMap: + val, err := d.ifdUint(p) + if err != nil { + return err + } + numcolors := len(val) / 3 + if len(val)%3 != 0 || numcolors <= 0 || numcolors > 256 { + return FormatError("bad ColorMap length") + } + d.palette = make([]color.Color, numcolors) + for i := 0; i < numcolors; i++ { + d.palette[i] = color.RGBA64{ + uint16(val[i]), + uint16(val[i+numcolors]), + uint16(val[i+2*numcolors]), + 0xffff, + } + } + case tSampleFormat: + // Page 27 of the spec: If the SampleFormat is present and + // the value is not 1 [= unsigned integer data], a Baseline + // TIFF reader that cannot handle the SampleFormat value + // must terminate the import process gracefully. + val, err := d.ifdUint(p) + if err != nil { + return err + } + for _, v := range val { + if v != 1 { + return UnsupportedError("sample format") + } + } + } + return nil +} + +// readBits reads n bits from the internal buffer starting at the current offset. +func (d *decoder) readBits(n uint) uint32 { + for d.nbits < n { + d.v <<= 8 + d.v |= uint32(d.buf[d.off]) + d.off++ + d.nbits += 8 + } + d.nbits -= n + rv := d.v >> d.nbits + d.v &^= rv << d.nbits + return rv +} + +// flushBits discards the unread bits in the buffer used by readBits. +// It is used at the end of a line. +func (d *decoder) flushBits() { + d.v = 0 + d.nbits = 0 +} + +// minInt returns the smaller of x or y. +func minInt(a, b int) int { + if a <= b { + return a + } + return b +} + +func newDecoder(r io.Reader) (*decoder, error) { + d := &decoder{ + r: newReaderAt(r), + features: make(map[int][]uint), + } + + p := make([]byte, len(leHeader)) + if _, err := d.r.ReadAt(p, 0); err != nil { + return nil, err + } + if string(p[0:len(leHeader)]) != leHeader { + return nil, FormatError("malformed header") + } + d.byteOrder = binary.LittleEndian + + ifdOffset := int64(d.byteOrder.Uint32(p[4:8])) + + // The first two bytes contain the number of entries (12 bytes each). + if _, err := d.r.ReadAt(p[0:2], ifdOffset); err != nil { + return nil, err + } + numItems := int(d.byteOrder.Uint16(p[0:2])) + + // All IFD entries are read in one chunk. + p = make([]byte, ifdLen*numItems) + if _, err := d.r.ReadAt(p, ifdOffset+2); err != nil { + return nil, err + } + + for i := 0; i < len(p); i += ifdLen { + if err := d.parseIFD(p[i : i+ifdLen]); err != nil { + return nil, err + } + } + + d.config.Width = int(d.firstVal(tImageWidth)) + d.config.Height = int(d.firstVal(tImageLength)) + + if _, ok := d.features[tBitsPerSample]; !ok { + return nil, FormatError("BitsPerSample tag missing") + } + d.bpp = d.firstVal(tBitsPerSample) + + // Determine the image mode. + switch d.firstVal(tPhotometricInterpretation) { + case pRGB: + for _, b := range d.features[tBitsPerSample] { + if b != 8 { + return nil, UnsupportedError("non-8-bit RGB image") + } + } + d.config.ColorModel = color.RGBAModel + // RGB images normally have 3 samples per pixel. + // If there are more, ExtraSamples (p. 31-32 of the spec) + // gives their meaning (usually an alpha channel). + // + // This implementation does not support extra samples + // of an unspecified type. + switch len(d.features[tBitsPerSample]) { + case 3: + d.mode = mRGB + case 4: + switch d.firstVal(tExtraSamples) { + case 1: + d.mode = mRGBA + case 2: + d.mode = mNRGBA + d.config.ColorModel = color.NRGBAModel + default: + return nil, FormatError("wrong number of samples for RGB") + } + default: + return nil, FormatError("wrong number of samples for RGB") + } + case pPaletted: + d.mode = mPaletted + d.config.ColorModel = color.Palette(d.palette) + case pWhiteIsZero: + d.mode = mGrayInvert + if d.bpp == 16 { + d.config.ColorModel = color.Gray16Model + } else { + d.config.ColorModel = color.GrayModel + } + case pBlackIsZero: + d.mode = mGray + if d.bpp == 16 { + d.config.ColorModel = color.Gray16Model + } else { + d.config.ColorModel = color.GrayModel + } + default: + return nil, UnsupportedError("color model") + } + + return d, nil +} + +// DecodeConfig returns the color model and dimensions of a TIFF image without +// decoding the entire image. +func DecodeConfig(r io.Reader) (image.Config, error) { + d, err := newDecoder(r) + if err != nil { + return image.Config{}, err + } + return d.config, nil +} + +// NewReader returns an io.Reader to the JPEG thumbnail embedded in the CR2 +// image in r. This allows access to the raw bytes of the JPEG thumbnail +// without the need to decompress it first. +func NewReader(r io.Reader) (io.Reader, error) { + d, err := newDecoder(r) + if err != nil { + return nil, err + } + offset := int64(d.features[tStripOffsets][0]) + n := int64(d.features[tStripByteCounts][0]) + switch d.firstVal(tCompression) { + case cJPEG, cJPEGOld: + default: + return nil, UnsupportedError("compression") + } + + return io.NewSectionReader(d.r, offset, n), nil +} + +// Decode reads a CR2 image from r and returns the embedded JPEG thumbnail as +// an image.Image. +func Decode(r io.Reader) (image.Image, error) { + r, err := NewReader(r) + if err != nil { + return nil, err + } + return jpeg.Decode(r) +} + +func init() { + image.RegisterFormat("cr2", leHeader, Decode, DecodeConfig) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/reader_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/reader_test.go new file mode 100644 index 00000000..8f2d43ba --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/reader_test.go @@ -0,0 +1,172 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cr2 + +import ( + "crypto/sha1" + "encoding/hex" + "errors" + "fmt" + "image" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + "time" +) + +type sample struct { + File string // Local filename relative to testdata/. + OriginalURL string // Canonical URL for test image. + MirrorURL string // Preferred URL for image. + RawW, RawH int // CR2 image width & height. + ThumbW, ThumbH int // Embedded thumbnail width & height. + Checksum string // SHA-1 for file in hex. + Filesize int64 // Filesize in bytes. +} + +func (s sample) testPath() string { + return filepath.Join("testdata", s.File) +} + +func TestDecode(t *testing.T) { + for _, s := range samples { + f, err := openSampleFile(t, s) + if err != nil { + t.Fatal(err) + } + defer f.Close() + m, kind, err := image.Decode(f) + if err != nil { + t.Fatal(err) + } + if kind != "cr2" { + t.Fatal("unexpected kind:", kind) + } + r := m.Bounds() + if r.Dx() != s.ThumbW { + t.Error("width = %v, want %v", r.Dx(), s.ThumbW) + } + if r.Dy() != s.ThumbH { + t.Error("height = %v, want %v", r.Dy(), s.ThumbH) + } + } +} + +func verify(fn string, s sample) error { + st, err := os.Stat(fn) + if err != nil { + return err + } + if st.Size() != s.Filesize { + return fmt.Errorf("Size mismatch, expected %d got %d", s.Filesize, + st.Size()) + } + h := sha1.New() + r, err := os.Open(fn) + if err != nil { + return err + } + defer r.Close() + _, err = io.Copy(h, r) + if err != nil { + return err + } + checksum := hex.EncodeToString(h.Sum(nil)) + if checksum != s.Checksum { + return fmt.Errorf("Checksum mismatch, expected %s got %s", + s.Checksum, checksum) + } + return nil +} + +func dl(url string, s sample) error { + // We use fmt.Print* in this function to show progress while downloading. + // The tests can potentially take very long to setup while downloading + // testdata/, so we provide some indication things are working. + fmt.Println(url) + r, err := http.Get(url) + if err != nil { + return err + } + defer r.Body.Close() + fn := s.testPath() + f, err := os.Create(fn) + if err != nil { + return err + } + + const ( + chunkSize = 10 << 10 + width = 50 + ) + total, bLast := int64(0), int64(0) + tLast := time.Now() + for { + var n int64 + n, err = io.CopyN(f, r.Body, chunkSize) + total += n + bLast += n + if time.Since(tLast) > (300 * time.Millisecond) { + kbps := float64(bLast) / time.Since(tLast).Seconds() / 1024 + frac := int(total * width / s.Filesize) + fmt.Printf("\rDownloaded: %s>%s| %.2f Kb/s", strings.Repeat("=", frac), + strings.Repeat(" ", width-frac), kbps) + tLast = time.Now() + bLast = 0 + } + + if err != nil { + break + } + } + fmt.Println() + if err != io.EOF { + f.Close() + os.Remove(fn) + return err + } + + return verify(fn, s) +} + +func openSampleFile(t *testing.T, s sample) (io.ReadCloser, error) { + fn := s.testPath() + err := verify(fn, s) + // Already downloaded. + if err == nil { + return os.Open(fn) + } + + if !os.IsNotExist(err) { + t.Log(fn, "corrupt, redownloading:", err) + } + + t.Log("Fetching sample file", s.File) + fi, err := os.Stat("testdata") + if err == nil && !fi.IsDir() { + return nil, errors.New("testdata is not a directory") + } + if os.IsNotExist(err) { + err = os.Mkdir("testdata", 0777) + } + if err != nil { + return nil, err + } + + err = dl(s.MirrorURL, s) + if err != nil { + // Mirror download can fail, we'll fallback to canonical location. + t.Log(err) + err = dl(s.OriginalURL, s) + if err != nil { + return nil, err + } + } + + return os.Open(fn) +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/samples_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/samples_test.go new file mode 100644 index 00000000..09bbf22a --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/nf/cr2/samples_test.go @@ -0,0 +1,240 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cr2 + +// Fetch the sample files via HTTP so we don't have to check them in to git. +// There is about ~280MB of files listed below. +var mirrorHost = "https://googledrive.com/host/0B7S33WfKVjF0dTZhMGxVaUhvcnc/" +var samples = []sample{ + sample{ + File: "sample.cr2", + OriginalURL: "http://nf.wh3rd.net/img/sample.cr2", + MirrorURL: mirrorHost + "sample.cr2", + ThumbW: 5184, + ThumbH: 3456, + Filesize: 25881839, + Checksum: "bfcff428b7700926f03393aff8fe3bc84046d5e3", + }, + sample{ + File: "RAW_CANON_G10.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/g10/RAW_CANON_G10.CR2", + MirrorURL: mirrorHost + "RAW_CANON_G10.CR2", + RawW: 4416, + RawH: 3312, + ThumbW: 1600, + ThumbH: 1200, + Filesize: 20092969, + Checksum: "69ae7a8d5cee96906bf05f324a11a86209a43aed", + }, + sample{ + File: "RAW_CANON_400D_ARGB.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/400d/RAW_CANON_400D_ARGB.CR2", + MirrorURL: mirrorHost + "RAW_CANON_400D_ARGB.CR2", + RawW: 3888, + RawH: 2592, + ThumbW: 1936, + ThumbH: 1288, + Filesize: 10916213, + Checksum: "965319c524de701eeaa11d7c0a78692e704169d7", + }, + sample{ + File: "RAW_CANON_1DMARK3.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/1dm3/RAW_CANON_1DMARK3.CR2", + MirrorURL: mirrorHost + "RAW_CANON_1DMARK3.CR2", + RawW: 3888, + RawH: 2592, + ThumbW: 1936, + ThumbH: 1288, + Filesize: 14590280, + Checksum: "8dd54604e50343f84e3006ae57f7d9988e9ccdb8", + }, + sample{ + File: "RAW_CANON_20D.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/20d/RAW_CANON_20D.CR2", + MirrorURL: mirrorHost + "RAW_CANON_20D.CR2", + RawW: 3504, + RawH: 2336, + ThumbW: 1536, + ThumbH: 1024, + Filesize: 7193541, + Checksum: "f8fbe4175ddd309e5dd0bb1821dd519c221029b8", + }, + sample{ + File: "RAW_CANON_1DM2N.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/1dm2n/RAW_CANON_1DM2N.CR2", + MirrorURL: mirrorHost + "RAW_CANON_1DM2N.CR2", + RawW: 3504, + RawH: 2336, + ThumbW: 1728, + ThumbH: 1152, + Filesize: 7316992, + Checksum: "46a50c8d592362cfaddac8640648b29134f20ec9", + }, + sample{ + File: "RAW_CANON_30D.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/30d/RAW_CANON_30D.CR2", + MirrorURL: mirrorHost + "RAW_CANON_30D.CR2", + RawW: 3504, + RawH: 2336, + ThumbW: 1728, + ThumbH: 1152, + Filesize: 7699286, + Checksum: "f3f3f47eae3e300548c5ac86594ee2db1d70db03", + }, + sample{ + File: "RAW_CANON_1DSM2.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/1dsm2/RAW_CANON_1DSM2.CR2", + MirrorURL: mirrorHost + "RAW_CANON_1DSM2.CR2", + RawW: 4992, + RawH: 3328, + ThumbW: 1536, + ThumbH: 1024, + Filesize: 14914910, + Checksum: "b86061ea8fac318db674a804886c1977ec07023a", + }, + sample{ + File: "RAW_CANON_5D_ARGB.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/5d/RAW_CANON_5D_ARGB.CR2", + MirrorURL: mirrorHost + "RAW_CANON_5D_ARGB.CR2", + RawW: 4368, + RawH: 2912, + ThumbW: 2496, + ThumbH: 1664, + Filesize: 11138246, + Checksum: "0fcd2eb8c629899e3b53e1f4c45d344439da4780", + }, + sample{ + File: "RAW_CANON_1DM2.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/1dm2/RAW_CANON_1DM2.CR2", + MirrorURL: mirrorHost + "RAW_CANON_1DM2.CR2", + RawW: 3504, + RawH: 2336, + ThumbW: 1536, + ThumbH: 1024, + Filesize: 6953301, + Checksum: "22f5589e376e1030d21e63ae00ca09e537092e74", + }, + sample{ + File: "RAW_CANON_350D.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/350d/RAW_CANON_350D.CR2", + MirrorURL: mirrorHost + "RAW_CANON_350D.CR2", + RawW: 3456, + RawH: 2304, + ThumbW: 1536, + ThumbH: 1024, + Filesize: 6717063, + Checksum: "1d6203e45d3928a0299044ad0b8538be785e76b9", + }, + sample{ + File: "RAW_CANON_G9.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/g9/RAW_CANON_G9.CR2", + MirrorURL: mirrorHost + "RAW_CANON_G9.CR2", + RawW: 4000, + RawH: 3000, + ThumbW: 1600, + ThumbH: 1200, + Filesize: 13952063, + Checksum: "25a6d4c65932c66cd100233f3fba233d9f77e5e1", + }, + sample{ + File: "RAW_CANON_1DSM3.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/1dsm3/RAW_CANON_1DSM3.CR2", + MirrorURL: mirrorHost + "RAW_CANON_1DSM3.CR2", + RawW: 5616, + RawH: 3744, + ThumbW: 2784, + ThumbH: 1856, + Filesize: 20859768, + Checksum: "f9b78010aae4db394a6eb42985f9851104ec7810", + }, + sample{ + File: "RAW_CANON_450D.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/450d/RAW_CANON_450D.CR2", + MirrorURL: mirrorHost + "RAW_CANON_450D.CR2", + RawW: 4272, + RawH: 2848, + ThumbW: 2256, + ThumbH: 1504, + Filesize: 17710643, + Checksum: "85da365a765e0a3161d58c806541e2462e8fd538", + }, + sample{ + File: "RAW_CANON_40D_RAW_V105.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/40d/RAW_CANON_40D_RAW_V105.CR2", + MirrorURL: mirrorHost + "RAW_CANON_40D_RAW_V105.CR2", + RawW: 3888, + RawH: 2592, + ThumbW: 1936, + ThumbH: 1288, + Filesize: 12399795, + Checksum: "bf9e939aa38cd93014a659dc4dcdc017d37d534f", + }, + sample{ + File: "RAW_CANON_40D_SRAW_V103.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/40d/RAW_CANON_40D_SRAW_V103.CR2", + MirrorURL: mirrorHost + "RAW_CANON_40D_SRAW_V103.CR2", + RawW: 1936, + RawH: 1288, + ThumbW: 1936, + ThumbH: 1288, + Filesize: 6800865, + Checksum: "a44b1a4476f1cf0f3ca2e20b5b66d07945ececb1", + }, + sample{ + File: "RAW_CANON_40D_RAW_V103.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/40d/RAW_CANON_40D_RAW_V103.CR2", + MirrorURL: mirrorHost + "RAW_CANON_40D_RAW_V103.CR2", + RawW: 3888, + RawH: 2592, + ThumbW: 1936, + ThumbH: 1288, + Filesize: 11330256, + Checksum: "9c2d4885123f2b5551b1646c0d182ff51bde7ff7", + }, + sample{ + File: "RAW_CANON_40D_RAW_V104.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/40d/RAW_CANON_40D_RAW_V104.CR2", + MirrorURL: mirrorHost + "RAW_CANON_40D_RAW_V104.CR2", + RawW: 3888, + RawH: 2592, + ThumbW: 1936, + ThumbH: 1288, + Filesize: 11468332, + Checksum: "5429b38df6f8de09d5b379e7dbd1ae4cc26d6664", + }, + sample{ + File: "RAW_CANON_40D_RAW_V336643C.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/40d/RAW_CANON_40D_RAW_V336643C.CR2", + MirrorURL: mirrorHost + "RAW_CANON_40D_RAW_V336643C.CR2", + RawW: 3888, + RawH: 2592, + ThumbW: 1936, + ThumbH: 1288, + Filesize: 13227614, + Checksum: "30d24fa9a5773745c6a2dc90e4e55c4a858b1abd", + }, + sample{ + File: "RAW_CANON_50D.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/50d/RAW_CANON_50D.CR2", + MirrorURL: mirrorHost + "RAW_CANON_50D.CR2", + RawW: 4752, + RawH: 3168, + ThumbW: 4752, + ThumbH: 3168, + Filesize: 18935626, + Checksum: "5ec4b5b5dc828a197a70303337838801917a02a1", + }, + sample{ + File: "RAW_CANON_5DMARK2_PREPROD.CR2", + OriginalURL: "http://www.rawsamples.ch/raws/canon/5dm2/RAW_CANON_5DMARK2_PREPROD.CR2", + MirrorURL: mirrorHost + "RAW_CANON_5DMARK2_PREPROD.CR2", + RawW: 5616, + RawH: 3744, + ThumbW: 5616, + ThumbH: 3744, + Filesize: 26374329, + Checksum: "f7a5ea5e0ca970fa4861689db35bf5d266e04fa1", + }, +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/russross/blackfriday/LICENSE.txt b/vendor/github.com/camlistore/camlistore/third_party/github.com/russross/blackfriday/LICENSE.txt new file mode 100644 index 00000000..2885af36 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/russross/blackfriday/LICENSE.txt @@ -0,0 +1,29 @@ +Blackfriday is distributed under the Simplified BSD License: + +> Copyright © 2011 Russ Ross +> All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions +> are met: +> +> 1. Redistributions of source code must retain the above copyright +> notice, this list of conditions and the following disclaimer. +> +> 2. Redistributions in binary form must reproduce the above +> copyright notice, this list of conditions and the following +> disclaimer in the documentation and/or other materials provided with +> the distribution. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/russross/blackfriday/README.md b/vendor/github.com/camlistore/camlistore/third_party/github.com/russross/blackfriday/README.md new file mode 100644 index 00000000..52e3b25b --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/russross/blackfriday/README.md @@ -0,0 +1,246 @@ +Blackfriday [![Build Status](https://travis-ci.org/russross/blackfriday.svg?branch=master)](https://travis-ci.org/russross/blackfriday) +=========== + +Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It +is paranoid about its input (so you can safely feed it user-supplied +data), it is fast, it supports common extensions (tables, smart +punctuation substitutions, etc.), and it is safe for all utf-8 +(unicode) input. + +HTML output is currently supported, along with Smartypants +extensions. An experimental LaTeX output engine is also included. + +It started as a translation from C of [Sundown][3]. + + +Installation +------------ + +Blackfriday is compatible with Go 1. If you are using an older +release of Go, consider using v1.1 of blackfriday, which was based +on the last stable release of Go prior to Go 1. You can find it as a +tagged commit on github. + +With Go 1 and git installed: + + go get github.com/russross/blackfriday + +will download, compile, and install the package into your `$GOPATH` +directory hierarchy. Alternatively, you can achieve the same if you +import it into a project: + + import "github.com/russross/blackfriday" + +and `go get` without parameters. + +Usage +----- + +For basic usage, it is as simple as getting your input into a byte +slice and calling: + + output := blackfriday.MarkdownBasic(input) + +This renders it with no extensions enabled. To get a more useful +feature set, use this instead: + + output := blackfriday.MarkdownCommon(input) + +### Sanitize untrusted content + +Blackfriday itself does nothing to protect against malicious content. If you are +dealing with user-supplied markdown, we recommend running blackfriday's output +through HTML sanitizer such as +[Bluemonday](https://github.com/microcosm-cc/bluemonday). + +Here's an example of simple usage of blackfriday together with bluemonday: + +``` go +import ( + "github.com/microcosm-cc/bluemonday" + "github.com/russross/blackfriday" +) + +// ... +unsafe := blackfriday.MarkdownCommon(input) +html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) +``` + +### Custom options + +If you want to customize the set of options, first get a renderer +(currently either the HTML or LaTeX output engines), then use it to +call the more general `Markdown` function. For examples, see the +implementations of `MarkdownBasic` and `MarkdownCommon` in +`markdown.go`. + +You can also check out `blackfriday-tool` for a more complete example +of how to use it. Download and install it using: + + go get github.com/russross/blackfriday-tool + +This is a simple command-line tool that allows you to process a +markdown file using a standalone program. You can also browse the +source directly on github if you are just looking for some example +code: + +* + +Note that if you have not already done so, installing +`blackfriday-tool` will be sufficient to download and install +blackfriday in addition to the tool itself. The tool binary will be +installed in `$GOPATH/bin`. This is a statically-linked binary that +can be copied to wherever you need it without worrying about +dependencies and library versions. + + +Features +-------- + +All features of Sundown are supported, including: + +* **Compatibility**. The Markdown v1.0.3 test suite passes with + the `--tidy` option. Without `--tidy`, the differences are + mostly in whitespace and entity escaping, where blackfriday is + more consistent and cleaner. + +* **Common extensions**, including table support, fenced code + blocks, autolinks, strikethroughs, non-strict emphasis, etc. + +* **Safety**. Blackfriday is paranoid when parsing, making it safe + to feed untrusted user input without fear of bad things + happening. The test suite stress tests this and there are no + known inputs that make it crash. If you find one, please let me + know and send me the input that does it. + + NOTE: "safety" in this context means *runtime safety only*. In order to + protect yourself agains JavaScript injection in untrusted content, see + [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). + +* **Fast processing**. It is fast enough to render on-demand in + most web applications without having to cache the output. + +* **Thread safety**. You can run multiple parsers in different + goroutines without ill effect. There is no dependence on global + shared state. + +* **Minimal dependencies**. Blackfriday only depends on standard + library packages in Go. The source code is pretty + self-contained, so it is easy to add to any project, including + Google App Engine projects. + +* **Standards compliant**. Output successfully validates using the + W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. + + +Extensions +---------- + +In addition to the standard markdown syntax, this package +implements the following extensions: + +* **Intra-word emphasis supression**. The `_` character is + commonly used inside words when discussing code, so having + markdown interpret it as an emphasis command is usually the + wrong thing. Blackfriday lets you treat all emphasis markers as + normal characters when they occur inside a word. + +* **Tables**. Tables can be created by drawing them in the input + using a simple syntax: + + ``` + Name | Age + --------|------ + Bob | 27 + Alice | 23 + ``` + +* **Fenced code blocks**. In addition to the normal 4-space + indentation to mark code blocks, you can explicitly mark them + and supply a language (to make syntax highlighting simple). Just + mark it like this: + + ``` go + func getTrue() bool { + return true + } + ``` + + You can use 3 or more backticks to mark the beginning of the + block, and the same number to mark the end of the block. + +* **Autolinking**. Blackfriday can find URLs that have not been + explicitly marked as links and turn them into links. + +* **Strikethrough**. Use two tildes (`~~`) to mark text that + should be crossed out. + +* **Hard line breaks**. With this extension enabled (it is off by + default in the `MarkdownBasic` and `MarkdownCommon` convenience + functions), newlines in the input translate into line breaks in + the output. + +* **Smart quotes**. Smartypants-style punctuation substitution is + supported, turning normal double- and single-quote marks into + curly quotes, etc. + +* **LaTeX-style dash parsing** is an additional option, where `--` + is translated into `–`, and `---` is translated into + `—`. This differs from most smartypants processors, which + turn a single hyphen into an ndash and a double hyphen into an + mdash. + +* **Smart fractions**, where anything that looks like a fraction + is translated into suitable HTML (instead of just a few special + cases like most smartypant processors). For example, `4/5` + becomes `45`, which renders as + 45. + + +Other renderers +--------------- + +Blackfriday is structured to allow alternative rendering engines. Here +are a few of note: + +* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown): + provides a GitHub Flavored Markdown renderer with fenced code block + highlighting, clickable header anchor links. + + It's not customizable, and its goal is to produce HTML output + equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), + except the rendering is performed locally. + +* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, + but for markdown. + +* LaTeX output: renders output as LaTeX. This is currently part of the + main Blackfriday repository, but may be split into its own project + in the future. If you are interested in owning and maintaining the + LaTeX output component, please be in touch. + + It renders some basic documents, but is only experimental at this + point. In particular, it does not do any inline escaping, so input + that happens to look like LaTeX code will be passed through without + modification. + + +Todo +---- + +* More unit testing +* Improve unicode support. It does not understand all unicode + rules (about what constitutes a letter, a punctuation symbol, + etc.), so it may fail to detect word boundaries correctly in + some instances. It is safe on all utf-8 input. + + +License +------- + +[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) + + + [1]: http://daringfireball.net/projects/markdown/ "Markdown" + [2]: http://golang.org/ "Go Language" + [3]: https://github.com/vmg/sundown "Sundown" diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/russross/blackfriday/block.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/russross/blackfriday/block.go new file mode 100644 index 00000000..8435f655 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/russross/blackfriday/block.go @@ -0,0 +1,1389 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + + "camlistore.org/third_party/github.com/shurcooL/sanitized_anchor_name" +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *parser) block(out *bytes.Buffer, data []byte) { + if len(data) == 0 || data[len(data)-1] != '\n' { + panic("block input is missing terminating newline") + } + + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed header: + // + // # Header 1 + // ## Header 2 + // ... + // ###### Header 6 + if p.isPrefixHeader(data) { + data = data[p.prefixHeader(out, data):] + continue + } + + // block of preformatted HTML: + // + //
    + // ... + //
    + if data[0] == '<' { + if i := p.html(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.flags&EXTENSION_TITLEBLOCK != 0 { + if data[0] == '%' { + if i := p.titleBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(out, data):] + continue + } + + // fenced code block: + // + // ``` go + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCode(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.r.HRule(out) + var i int + for i = 0; data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(out, data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.flags&EXTENSION_TABLES != 0 { + if i := p.table(out, data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(out, data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_ORDERED):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_DEFINITION):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headers, too + data = data[p.paragraph(out, data):] + } + + p.nesting-- +} + +func (p *parser) isPrefixHeader(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.flags&EXTENSION_SPACE_HEADERS != 0 { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + if data[level] != ' ' { + return false + } + } + return true +} + +func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.flags&EXTENSION_HEADER_IDS != 0 { + j, k := 0, 0 + // find start/end of header id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract header id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = sanitized_anchor_name.Create(string(data[i:end])) + } + work := func() bool { + p.inline(out, data[i:end]) + return true + } + p.r.Header(out, work, level, id) + } + return skip +} + +func (p *parser) isUnderlinedHeader(data []byte) int { + // test of level 1 header + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 1 + } else { + return 0 + } + } + + // test of level 2 header + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 2 + } else { + return 0 + } + } + + return 0 +} + +func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + p.r.TitleBlock(out, data) + + return len(data) +} + +func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(out, data, doRender); size > 0 { + return size + } + + // check for an
    tag + if size := p.htmlHr(out, data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + + return i +} + +// HTML comment, lax form +func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { + return 0 + } + + i := 5 + + // scan for an end-of-comment marker, across lines if necessary + for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { + i++ + } + i++ + + // no end-of-comment marker + if i >= len(data) { + return 0 + } + + // needs to end with a blank line + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + return size + } + + return 0 +} + +// HR, which is the only self-closing block tag considered +func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
    tag after all; at least not a valid one + return 0 + } + + i := 3 + for data[i] != '>' && data[i] != '\n' { + i++ + } + + if data[i] == '>' { + i++ + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + return size + } + } + + return 0 +} + +func (p *parser) htmlFindTag(data []byte) (string, bool) { + i := 0 + for isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if blockTags[key] { + return key, true + } + return "", false +} + +func (p *parser) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (p *parser) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + return i + 1 +} + +func (p *parser) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +func (p *parser) isFencedCode(data []byte, syntax **string, oldmarker string) (skip int, marker string) { + i, size := 0, 0 + skip = 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + if i >= len(data) { + return + } + + // check for the marker characters: ~ or ` + if data[i] != '~' && data[i] != '`' { + return + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + if i >= len(data) { + return + } + + // the marker char must occur at least 3 times + if size < 3 { + return + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return + } + + if syntax != nil { + syn := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + return + } + + syntaxStart := i + + if data[i] == '{' { + i++ + syntaxStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + syn++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return + } + + // strip all whitespace at the beginning and the end + // of the {} block + for syn > 0 && isspace(data[syntaxStart]) { + syntaxStart++ + syn-- + } + + for syn > 0 && isspace(data[syntaxStart+syn-1]) { + syn-- + } + + i++ + } else { + for i < len(data) && !isspace(data[i]) { + syn++ + i++ + } + } + + language := string(data[syntaxStart : syntaxStart+syn]) + *syntax = &language + } + + i = skipChar(data, i, ' ') + if i >= len(data) || data[i] != '\n' { + return + } + + skip = i + 1 + return +} + +func (p *parser) fencedCode(out *bytes.Buffer, data []byte, doRender bool) int { + var lang *string + beg, marker := p.isFencedCode(data, &lang, "") + if beg == 0 || beg >= len(data) { + return 0 + } + + var work bytes.Buffer + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + fenceEnd, _ := p.isFencedCode(data[beg:], nil, marker) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + syntax := "" + if lang != nil { + syntax = *lang + } + + if doRender { + p.r.BlockCode(out, work.Bytes(), syntax) + } + + return beg +} + +func (p *parser) table(out *bytes.Buffer, data []byte) int { + var header bytes.Buffer + i, columns := p.tableHeader(&header, data) + if i == 0 { + return 0 + } + + var body bytes.Buffer + + for i < len(data) { + pipes, rowStart := 0, i + for ; data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + i++ + p.tableRow(&body, data[rowStart:i], columns, false) + } + + p.r.Table(out, header.Bytes(), body.Bytes(), columns) + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) { + i := 0 + colCount := 1 + for i = 0; data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + header := data[:i+1] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]int, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_LEFT + dashes++ + } + for data[i] == '-' { + i++ + dashes++ + } + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_RIGHT + dashes++ + } + for data[i] == ' ' { + i++ + } + + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.tableRow(out, header, columns, true) + size = i + 1 + return +} + +func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) { + i, col := 0, 0 + var rowWork bytes.Buffer + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for data[i] == ' ' { + i++ + } + + cellStart := i + + for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && data[cellEnd-1] == ' ' { + cellEnd-- + } + + var cellWork bytes.Buffer + p.inline(&cellWork, data[cellStart:cellEnd]) + + if header { + p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col]) + } else { + p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col]) + } + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + if header { + p.r.TableHeaderCell(&rowWork, nil, columns[col]) + } else { + p.r.TableCell(&rowWork, nil, columns[col]) + } + } + + // silently ignore rows with too many cells + + p.r.TableRow(out, rowWork.Bytes()) +} + +// returns blockquote prefix length +func (p *parser) quotePrefix(data []byte) int { + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + if data[i] == '>' { + if data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// parse a blockquote fragment +func (p *parser) quote(out *bytes.Buffer, data []byte) int { + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + for data[end] != '\n' { + end++ + } + end++ + + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.isEmpty(data[beg:]) > 0 && + (end >= len(data) || + (p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0)) { + // blockquote ends with at least one blank line + // followed by something without a blockquote prefix + break + } + + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + + var cooked bytes.Buffer + p.block(&cooked, raw.Bytes()) + p.r.BlockQuote(out, cooked.Bytes()) + return end +} + +// returns prefix length for block code +func (p *parser) codePrefix(data []byte) int { + if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *parser) code(out *bytes.Buffer, data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for data[i] != '\n' { + i++ + } + i++ + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffeu + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + p.r.BlockCode(out, work.Bytes(), "") + + return i +} + +// returns unordered list item prefix +func (p *parser) uliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // need a *, +, or - followed by a space + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *parser) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for data[i] >= '0' && data[i] <= '9' { + i++ + } + + // we need >= 1 digits followed by a dot and a space + if start == i || data[i] != '.' || data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *parser) dliPrefix(data []byte) int { + i := 0 + + // need a : followed by a spaces + if data[i] != ':' || data[i+1] != ' ' { + return 0 + } + for data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int { + i := 0 + flags |= LIST_ITEM_BEGINNING_OF_LIST + work := func() bool { + for i < len(data) { + skip := p.listItem(out, data[i:], &flags) + i += skip + + if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 { + break + } + flags &= ^LIST_ITEM_BEGINNING_OF_LIST + } + return true + } + + p.r.List(out, work, flags) + return i +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { + // keep track of the indentation of the first line + itemIndent := 0 + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^LIST_TYPE_TERM + } + } + if i == 0 { + // if in defnition list, set term flag and continue + if *flags&LIST_TYPE_DEFINITION != 0 { + *flags |= LIST_TYPE_TERM + } else { + return 0 + } + } + + // skip leading whitespace on first line + for data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + line = i + continue + } + + // calculate the indentation + indent := 0 + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + } + + chunk := data[line+indent : i] + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + if containsBlankLine { + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + // to be a nested list, it must be indented more + // if not, it is the next item in the same list + if indent <= itemIndent { + break gatherlines + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix header? + case p.isPrefixHeader(chunk): + // if the header is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= LIST_ITEM_END_OF_LIST + } + } else { + *flags |= LIST_ITEM_END_OF_LIST + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + raw.WriteByte('\n') + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + // if this line was preceeded by one or more blanks, + // re-introduce the blank into the buffer + if containsBlankLine { + containsBlankLine = false + raw.WriteByte('\n') + + } + + // add the line into the working buffer without prefix + raw.Write(data[line+indent : i]) + + line = i + } + + rawBytes := raw.Bytes() + + // render the contents of the list item + var cooked bytes.Buffer + if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.block(&cooked, rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + p.inline(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.inline(&cooked, rawBytes) + } + } + + // render the actual list item + cookedBytes := cooked.Bytes() + parsedEnd := len(cookedBytes) + + // strip trailing newlines + for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' { + parsedEnd-- + } + p.r.ListItem(out, cookedBytes[:parsedEnd], *flags) + + return line +} + +// render a single paragraph that has already been parsed out +func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + // trim trailing newline + end := len(data) - 1 + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + work := func() bool { + p.inline(out, data[beg:end]) + return true + } + p.r.Paragraph(out, work) +} + +func (p *parser) paragraph(out *bytes.Buffer, data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + p.renderParagraph(out, data[:i]) + return i + n + } + + // an underline under some text marks a header, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeader(current); level > 0 { + // render the paragraph + p.renderParagraph(out, data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + // render the header + // this ugly double closure avoids forcing variables onto the heap + work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool { + return func() bool { + pp.inline(o, d) + return true + } + }(out, p, data[prev:eol]) + + id := "" + if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = sanitized_anchor_name.Create(string(data[prev:eol])) + } + + p.r.Header(out, work, level, id) + + // find the end of the underline + for data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + if data[i] == '<' && p.html(out, current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a prefixed header or a horizontal rule after this, paragraph is over + if p.isPrefixHeader(current) || p.isHRule(current) { + p.renderParagraph(out, data[:i]) + return i + } + + // if there's a definition list item, prev line is a definition term + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(current) != 0 { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + // if there's a list after this, paragraph is over + if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + for data[i] != '\n' { + i++ + } + i++ + } + + p.renderParagraph(out, data[:i]) + return i +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/russross/blackfriday/block_test.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/russross/blackfriday/block_test.go new file mode 100644 index 00000000..f52506fd --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/russross/blackfriday/block_test.go @@ -0,0 +1,1407 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Unit tests for block parsing +// + +package blackfriday + +import ( + "testing" +) + +func runMarkdownBlockWithRenderer(input string, extensions int, renderer Renderer) string { + return string(Markdown([]byte(input), renderer, extensions)) +} + +func runMarkdownBlock(input string, extensions int) string { + htmlFlags := 0 + htmlFlags |= HTML_USE_XHTML + + renderer := HtmlRenderer(htmlFlags, "", "") + + return runMarkdownBlockWithRenderer(input, extensions, renderer) +} + +func runnerWithRendererParameters(parameters HtmlRendererParameters) func(string, int) string { + return func(input string, extensions int) string { + htmlFlags := 0 + htmlFlags |= HTML_USE_XHTML + + renderer := HtmlRendererWithParameters(htmlFlags, "", "", parameters) + + return runMarkdownBlockWithRenderer(input, extensions, renderer) + } +} + +func doTestsBlock(t *testing.T, tests []string, extensions int) { + doTestsBlockWithRunner(t, tests, extensions, runMarkdownBlock) +} + +func doTestsBlockWithRunner(t *testing.T, tests []string, extensions int, runner func(string, int) string) { + // catch and report panics + var candidate string + defer func() { + if err := recover(); err != nil { + t.Errorf("\npanic while processing [%#v]: %s\n", candidate, err) + } + }() + + for i := 0; i+1 < len(tests); i += 2 { + input := tests[i] + candidate = input + expected := tests[i+1] + actual := runner(candidate, extensions) + if actual != expected { + t.Errorf("\nInput [%#v]\nExpected[%#v]\nActual [%#v]", + candidate, expected, actual) + } + + // now test every substring to stress test bounds checking + if !testing.Short() { + for start := 0; start < len(input); start++ { + for end := start + 1; end <= len(input); end++ { + candidate = input[start:end] + _ = runMarkdownBlock(candidate, extensions) + } + } + } + } +} + +func TestPrefixHeaderNoExtensions(t *testing.T) { + var tests = []string{ + "# Header 1\n", + "

    Header 1

    \n", + + "## Header 2\n", + "

    Header 2

    \n", + + "### Header 3\n", + "

    Header 3

    \n", + + "#### Header 4\n", + "

    Header 4

    \n", + + "##### Header 5\n", + "
    Header 5
    \n", + + "###### Header 6\n", + "
    Header 6
    \n", + + "####### Header 7\n", + "
    # Header 7
    \n", + + "#Header 1\n", + "

    Header 1

    \n", + + "##Header 2\n", + "

    Header 2

    \n", + + "###Header 3\n", + "

    Header 3

    \n", + + "####Header 4\n", + "

    Header 4

    \n", + + "#####Header 5\n", + "
    Header 5
    \n", + + "######Header 6\n", + "
    Header 6
    \n", + + "#######Header 7\n", + "
    #Header 7
    \n", + + "Hello\n# Header 1\nGoodbye\n", + "

    Hello

    \n\n

    Header 1

    \n\n

    Goodbye

    \n", + + "* List\n# Header\n* List\n", + "
      \n
    • List

      \n\n

      Header

    • \n\n
    • List

    • \n
    \n", + + "* List\n#Header\n* List\n", + "
      \n
    • List

      \n\n

      Header

    • \n\n
    • List

    • \n
    \n", + + "* List\n * Nested list\n # Nested header\n", + "
      \n
    • List

      \n\n
        \n
      • Nested list

        \n\n" + + "

        Nested header

      • \n
    • \n
    \n", + + "#Header 1 \\#\n", + "

    Header 1 #

    \n", + + "#Header 1 \\# foo\n", + "

    Header 1 # foo

    \n", + + "#Header 1 #\\##\n", + "

    Header 1 ##

    \n", + } + doTestsBlock(t, tests, 0) +} + +func TestPrefixHeaderSpaceExtension(t *testing.T) { + var tests = []string{ + "# Header 1\n", + "

    Header 1

    \n", + + "## Header 2\n", + "

    Header 2

    \n", + + "### Header 3\n", + "

    Header 3

    \n", + + "#### Header 4\n", + "

    Header 4

    \n", + + "##### Header 5\n", + "
    Header 5
    \n", + + "###### Header 6\n", + "
    Header 6
    \n", + + "####### Header 7\n", + "

    ####### Header 7

    \n", + + "#Header 1\n", + "

    #Header 1

    \n", + + "##Header 2\n", + "

    ##Header 2

    \n", + + "###Header 3\n", + "

    ###Header 3

    \n", + + "####Header 4\n", + "

    ####Header 4

    \n", + + "#####Header 5\n", + "

    #####Header 5

    \n", + + "######Header 6\n", + "

    ######Header 6

    \n", + + "#######Header 7\n", + "

    #######Header 7

    \n", + + "Hello\n# Header 1\nGoodbye\n", + "

    Hello

    \n\n

    Header 1

    \n\n

    Goodbye

    \n", + + "* List\n# Header\n* List\n", + "
      \n
    • List

      \n\n

      Header

    • \n\n
    • List

    • \n
    \n", + + "* List\n#Header\n* List\n", + "
      \n
    • List\n#Header
    • \n
    • List
    • \n
    \n", + + "* List\n * Nested list\n # Nested header\n", + "
      \n
    • List

      \n\n
        \n
      • Nested list

        \n\n" + + "

        Nested header

      • \n
    • \n
    \n", + } + doTestsBlock(t, tests, EXTENSION_SPACE_HEADERS) +} + +func TestPrefixHeaderIdExtension(t *testing.T) { + var tests = []string{ + "# Header 1 {#someid}\n", + "

    Header 1

    \n", + + "# Header 1 {#someid} \n", + "

    Header 1

    \n", + + "# Header 1 {#someid}\n", + "

    Header 1

    \n", + + "# Header 1 {#someid\n", + "

    Header 1 {#someid

    \n", + + "# Header 1 {#someid\n", + "

    Header 1 {#someid

    \n", + + "# Header 1 {#someid}}\n", + "

    Header 1

    \n\n

    }

    \n", + + "## Header 2 {#someid}\n", + "

    Header 2

    \n", + + "### Header 3 {#someid}\n", + "

    Header 3

    \n", + + "#### Header 4 {#someid}\n", + "

    Header 4

    \n", + + "##### Header 5 {#someid}\n", + "
    Header 5
    \n", + + "###### Header 6 {#someid}\n", + "
    Header 6
    \n", + + "####### Header 7 {#someid}\n", + "
    # Header 7
    \n", + + "# Header 1 # {#someid}\n", + "

    Header 1

    \n", + + "## Header 2 ## {#someid}\n", + "

    Header 2

    \n", + + "Hello\n# Header 1\nGoodbye\n", + "

    Hello

    \n\n

    Header 1

    \n\n

    Goodbye

    \n", + + "* List\n# Header {#someid}\n* List\n", + "
      \n
    • List

      \n\n

      Header

    • \n\n
    • List

    • \n
    \n", + + "* List\n#Header {#someid}\n* List\n", + "
      \n
    • List

      \n\n

      Header

    • \n\n
    • List

    • \n
    \n", + + "* List\n * Nested list\n # Nested header {#someid}\n", + "
      \n
    • List

      \n\n
        \n
      • Nested list

        \n\n" + + "

        Nested header

      • \n
    • \n
    \n", + } + doTestsBlock(t, tests, EXTENSION_HEADER_IDS) +} + +func TestPrefixHeaderIdExtensionWithPrefixAndSuffix(t *testing.T) { + var tests = []string{ + "# header 1 {#someid}\n", + "

    header 1

    \n", + + "## header 2 {#someid}\n", + "

    header 2

    \n", + + "### header 3 {#someid}\n", + "

    header 3

    \n", + + "#### header 4 {#someid}\n", + "

    header 4

    \n", + + "##### header 5 {#someid}\n", + "
    header 5
    \n", + + "###### header 6 {#someid}\n", + "
    header 6
    \n", + + "####### header 7 {#someid}\n", + "
    # header 7
    \n", + + "# header 1 # {#someid}\n", + "

    header 1

    \n", + + "## header 2 ## {#someid}\n", + "

    header 2

    \n", + + "* List\n# Header {#someid}\n* List\n", + "
      \n
    • List

      \n\n

      Header

    • \n\n
    • List

    • \n
    \n", + + "* List\n#Header {#someid}\n* List\n", + "
      \n
    • List

      \n\n

      Header

    • \n\n
    • List

    • \n
    \n", + + "* List\n * Nested list\n # Nested header {#someid}\n", + "
      \n
    • List

      \n\n
        \n
      • Nested list

        \n\n" + + "

        Nested header

      • \n
    • \n
    \n", + } + + parameters := HtmlRendererParameters{ + HeaderIDPrefix: "PRE:", + HeaderIDSuffix: ":POST", + } + + doTestsBlockWithRunner(t, tests, EXTENSION_HEADER_IDS, runnerWithRendererParameters(parameters)) +} + +func TestPrefixAutoHeaderIdExtension(t *testing.T) { + var tests = []string{ + "# Header 1\n", + "

    Header 1

    \n", + + "# Header 1 \n", + "

    Header 1

    \n", + + "## Header 2\n", + "

    Header 2

    \n", + + "### Header 3\n", + "

    Header 3

    \n", + + "#### Header 4\n", + "

    Header 4

    \n", + + "##### Header 5\n", + "
    Header 5
    \n", + + "###### Header 6\n", + "
    Header 6
    \n", + + "####### Header 7\n", + "
    # Header 7
    \n", + + "Hello\n# Header 1\nGoodbye\n", + "

    Hello

    \n\n

    Header 1

    \n\n

    Goodbye

    \n", + + "* List\n# Header\n* List\n", + "
      \n
    • List

      \n\n

      Header

    • \n\n
    • List

    • \n
    \n", + + "* List\n#Header\n* List\n", + "
      \n
    • List

      \n\n

      Header

    • \n\n
    • List

    • \n
    \n", + + "* List\n * Nested list\n # Nested header\n", + "
      \n
    • List

      \n\n
        \n
      • Nested list

        \n\n" + + "

        Nested header

      • \n
    • \n
    \n", + + "# Header\n\n# Header\n", + "

    Header

    \n\n

    Header

    \n", + + "# Header 1\n\n# Header 1", + "

    Header 1

    \n\n

    Header 1

    \n", + + "# Header\n\n# Header 1\n\n# Header\n\n# Header", + "

    Header

    \n\n

    Header 1

    \n\n

    Header

    \n\n

    Header

    \n", + } + doTestsBlock(t, tests, EXTENSION_AUTO_HEADER_IDS) +} + +func TestPrefixAutoHeaderIdExtensionWithPrefixAndSuffix(t *testing.T) { + var tests = []string{ + "# Header 1\n", + "

    Header 1

    \n", + + "# Header 1 \n", + "

    Header 1

    \n", + + "## Header 2\n", + "

    Header 2

    \n", + + "### Header 3\n", + "

    Header 3

    \n", + + "#### Header 4\n", + "

    Header 4

    \n", + + "##### Header 5\n", + "
    Header 5
    \n", + + "###### Header 6\n", + "
    Header 6
    \n", + + "####### Header 7\n", + "
    # Header 7
    \n", + + "Hello\n# Header 1\nGoodbye\n", + "

    Hello

    \n\n

    Header 1

    \n\n

    Goodbye

    \n", + + "* List\n# Header\n* List\n", + "
      \n
    • List

      \n\n

      Header

    • \n\n
    • List

    • \n
    \n", + + "* List\n#Header\n* List\n", + "
      \n
    • List

      \n\n

      Header

    • \n\n
    • List

    • \n
    \n", + + "* List\n * Nested list\n # Nested header\n", + "
      \n
    • List

      \n\n
        \n
      • Nested list

        \n\n" + + "

        Nested header

      • \n
    • \n
    \n", + + "# Header\n\n# Header\n", + "

    Header

    \n\n

    Header

    \n", + + "# Header 1\n\n# Header 1", + "

    Header 1

    \n\n

    Header 1

    \n", + + "# Header\n\n# Header 1\n\n# Header\n\n# Header", + "

    Header

    \n\n

    Header 1

    \n\n

    Header

    \n\n

    Header

    \n", + } + + parameters := HtmlRendererParameters{ + HeaderIDPrefix: "PRE:", + HeaderIDSuffix: ":POST", + } + + doTestsBlockWithRunner(t, tests, EXTENSION_AUTO_HEADER_IDS, runnerWithRendererParameters(parameters)) +} + +func TestPrefixMultipleHeaderExtensions(t *testing.T) { + var tests = []string{ + "# Header\n\n# Header {#header}\n\n# Header 1", + "

    Header

    \n\n

    Header

    \n\n

    Header 1

    \n", + } + doTestsBlock(t, tests, EXTENSION_AUTO_HEADER_IDS|EXTENSION_HEADER_IDS) +} + +func TestUnderlineHeaders(t *testing.T) { + var tests = []string{ + "Header 1\n========\n", + "

    Header 1

    \n", + + "Header 2\n--------\n", + "

    Header 2

    \n", + + "A\n=\n", + "

    A

    \n", + + "B\n-\n", + "

    B

    \n", + + "Paragraph\nHeader\n=\n", + "

    Paragraph

    \n\n

    Header

    \n", + + "Header\n===\nParagraph\n", + "

    Header

    \n\n

    Paragraph

    \n", + + "Header\n===\nAnother header\n---\n", + "

    Header

    \n\n

    Another header

    \n", + + " Header\n======\n", + "

    Header

    \n", + + " Code\n========\n", + "
    Code\n
    \n\n

    ========

    \n", + + "Header with *inline*\n=====\n", + "

    Header with inline

    \n", + + "* List\n * Sublist\n Not a header\n ------\n", + "
      \n
    • List\n\n
        \n
      • Sublist\nNot a header\n------
      • \n
    • \n
    \n", + + "Paragraph\n\n\n\n\nHeader\n===\n", + "

    Paragraph

    \n\n

    Header

    \n", + + "Trailing space \n==== \n\n", + "

    Trailing space

    \n", + + "Trailing spaces\n==== \n\n", + "

    Trailing spaces

    \n", + + "Double underline\n=====\n=====\n", + "

    Double underline

    \n\n

    =====

    \n", + } + doTestsBlock(t, tests, 0) +} + +func TestUnderlineHeadersAutoIDs(t *testing.T) { + var tests = []string{ + "Header 1\n========\n", + "

    Header 1

    \n", + + "Header 2\n--------\n", + "

    Header 2

    \n", + + "A\n=\n", + "

    A

    \n", + + "B\n-\n", + "

    B

    \n", + + "Paragraph\nHeader\n=\n", + "

    Paragraph

    \n\n

    Header

    \n", + + "Header\n===\nParagraph\n", + "

    Header

    \n\n

    Paragraph

    \n", + + "Header\n===\nAnother header\n---\n", + "

    Header

    \n\n

    Another header

    \n", + + " Header\n======\n", + "

    Header

    \n", + + "Header with *inline*\n=====\n", + "

    Header with inline

    \n", + + "Paragraph\n\n\n\n\nHeader\n===\n", + "

    Paragraph

    \n\n

    Header

    \n", + + "Trailing space \n==== \n\n", + "

    Trailing space

    \n", + + "Trailing spaces\n==== \n\n", + "

    Trailing spaces

    \n", + + "Double underline\n=====\n=====\n", + "

    Double underline

    \n\n

    =====

    \n", + + "Header\n======\n\nHeader\n======\n", + "

    Header

    \n\n

    Header

    \n", + + "Header 1\n========\n\nHeader 1\n========\n", + "

    Header 1

    \n\n

    Header 1

    \n", + } + doTestsBlock(t, tests, EXTENSION_AUTO_HEADER_IDS) +} + +func TestHorizontalRule(t *testing.T) { + var tests = []string{ + "-\n", + "

    -

    \n", + + "--\n", + "

    --

    \n", + + "---\n", + "
    \n", + + "----\n", + "
    \n", + + "*\n", + "

    *

    \n", + + "**\n", + "

    **

    \n", + + "***\n", + "
    \n", + + "****\n", + "
    \n", + + "_\n", + "

    _

    \n", + + "__\n", + "

    __

    \n", + + "___\n", + "
    \n", + + "____\n", + "
    \n", + + "-*-\n", + "

    -*-

    \n", + + "- - -\n", + "
    \n", + + "* * *\n", + "
    \n", + + "_ _ _\n", + "
    \n", + + "-----*\n", + "

    -----*

    \n", + + " ------ \n", + "
    \n", + + "Hello\n***\n", + "

    Hello

    \n\n
    \n", + + "---\n***\n___\n", + "
    \n\n
    \n\n
    \n", + } + doTestsBlock(t, tests, 0) +} + +func TestUnorderedList(t *testing.T) { + var tests = []string{ + "* Hello\n", + "
      \n
    • Hello
    • \n
    \n", + + "* Yin\n* Yang\n", + "
      \n
    • Yin
    • \n
    • Yang
    • \n
    \n", + + "* Ting\n* Bong\n* Goo\n", + "
      \n
    • Ting
    • \n
    • Bong
    • \n
    • Goo
    • \n
    \n", + + "* Yin\n\n* Yang\n", + "
      \n
    • Yin

    • \n\n
    • Yang

    • \n
    \n", + + "* Ting\n\n* Bong\n* Goo\n", + "
      \n
    • Ting

    • \n\n
    • Bong

    • \n\n
    • Goo

    • \n
    \n", + + "+ Hello\n", + "
      \n
    • Hello
    • \n
    \n", + + "+ Yin\n+ Yang\n", + "
      \n
    • Yin
    • \n
    • Yang
    • \n
    \n", + + "+ Ting\n+ Bong\n+ Goo\n", + "
      \n
    • Ting
    • \n
    • Bong
    • \n
    • Goo
    • \n
    \n", + + "+ Yin\n\n+ Yang\n", + "
      \n
    • Yin

    • \n\n
    • Yang

    • \n
    \n", + + "+ Ting\n\n+ Bong\n+ Goo\n", + "
      \n
    • Ting

    • \n\n
    • Bong

    • \n\n
    • Goo

    • \n
    \n", + + "- Hello\n", + "
      \n
    • Hello
    • \n
    \n", + + "- Yin\n- Yang\n", + "
      \n
    • Yin
    • \n
    • Yang
    • \n
    \n", + + "- Ting\n- Bong\n- Goo\n", + "
      \n
    • Ting
    • \n
    • Bong
    • \n
    • Goo
    • \n
    \n", + + "- Yin\n\n- Yang\n", + "
      \n
    • Yin

    • \n\n
    • Yang

    • \n
    \n", + + "- Ting\n\n- Bong\n- Goo\n", + "
      \n
    • Ting

    • \n\n
    • Bong

    • \n\n
    • Goo

    • \n
    \n", + + "*Hello\n", + "

    *Hello

    \n", + + "* Hello \n", + "
      \n
    • Hello
    • \n
    \n", + + "* Hello \n Next line \n", + "
      \n
    • Hello\nNext line
    • \n
    \n", + + "Paragraph\n* No linebreak\n", + "

    Paragraph\n* No linebreak

    \n", + + "Paragraph\n\n* Linebreak\n", + "

    Paragraph

    \n\n
      \n
    • Linebreak
    • \n
    \n", + + "* List\n * Nested list\n", + "
      \n
    • List\n\n
        \n
      • Nested list
      • \n
    • \n
    \n", + + "* List\n\n * Nested list\n", + "
      \n
    • List

      \n\n
        \n
      • Nested list
      • \n
    • \n
    \n", + + "* List\n Second line\n\n + Nested\n", + "
      \n
    • List\nSecond line

      \n\n
        \n
      • Nested
      • \n
    • \n
    \n", + + "* List\n + Nested\n\n Continued\n", + "
      \n
    • List

      \n\n
        \n
      • Nested
      • \n
      \n\n

      Continued

    • \n
    \n", + + "* List\n * shallow indent\n", + "
      \n
    • List\n\n
        \n
      • shallow indent
      • \n
    • \n
    \n", + + "* List\n" + + " * shallow indent\n" + + " * part of second list\n" + + " * still second\n" + + " * almost there\n" + + " * third level\n", + "
      \n" + + "
    • List\n\n" + + "
        \n" + + "
      • shallow indent
      • \n" + + "
      • part of second list
      • \n" + + "
      • still second
      • \n" + + "
      • almost there\n\n" + + "
          \n" + + "
        • third level
        • \n" + + "
      • \n" + + "
    • \n" + + "
    \n", + + "* List\n extra indent, same paragraph\n", + "
      \n
    • List\n extra indent, same paragraph
    • \n
    \n", + + "* List\n\n code block\n", + "
      \n
    • List

      \n\n
      code block\n
    • \n
    \n", + + "* List\n\n code block with spaces\n", + "
      \n
    • List

      \n\n
        code block with spaces\n
    • \n
    \n", + + "* List\n\n * sublist\n\n normal text\n\n * another sublist\n", + "
      \n
    • List

      \n\n
        \n
      • sublist
      • \n
      \n\n

      normal text

      \n\n
        \n
      • another sublist
      • \n
    • \n
    \n", + } + doTestsBlock(t, tests, 0) +} + +func TestOrderedList(t *testing.T) { + var tests = []string{ + "1. Hello\n", + "
      \n
    1. Hello
    2. \n
    \n", + + "1. Yin\n2. Yang\n", + "
      \n
    1. Yin
    2. \n
    3. Yang
    4. \n
    \n", + + "1. Ting\n2. Bong\n3. Goo\n", + "
      \n
    1. Ting
    2. \n
    3. Bong
    4. \n
    5. Goo
    6. \n
    \n", + + "1. Yin\n\n2. Yang\n", + "
      \n
    1. Yin

    2. \n\n
    3. Yang

    4. \n
    \n", + + "1. Ting\n\n2. Bong\n3. Goo\n", + "
      \n
    1. Ting

    2. \n\n
    3. Bong

    4. \n\n
    5. Goo

    6. \n
    \n", + + "1 Hello\n", + "

    1 Hello

    \n", + + "1.Hello\n", + "

    1.Hello

    \n", + + "1. Hello \n", + "
      \n
    1. Hello
    2. \n
    \n", + + "1. Hello \n Next line \n", + "
      \n
    1. Hello\nNext line
    2. \n
    \n", + + "Paragraph\n1. No linebreak\n", + "

    Paragraph\n1. No linebreak

    \n", + + "Paragraph\n\n1. Linebreak\n", + "

    Paragraph

    \n\n
      \n
    1. Linebreak
    2. \n
    \n", + + "1. List\n 1. Nested list\n", + "
      \n
    1. List\n\n
        \n
      1. Nested list
      2. \n
    2. \n
    \n", + + "1. List\n\n 1. Nested list\n", + "
      \n
    1. List

      \n\n
        \n
      1. Nested list
      2. \n
    2. \n
    \n", + + "1. List\n Second line\n\n 1. Nested\n", + "
      \n
    1. List\nSecond line

      \n\n
        \n
      1. Nested
      2. \n
    2. \n
    \n", + + "1. List\n 1. Nested\n\n Continued\n", + "
      \n
    1. List

      \n\n
        \n
      1. Nested
      2. \n
      \n\n

      Continued

    2. \n
    \n", + + "1. List\n 1. shallow indent\n", + "
      \n
    1. List\n\n
        \n
      1. shallow indent
      2. \n
    2. \n
    \n", + + "1. List\n" + + " 1. shallow indent\n" + + " 2. part of second list\n" + + " 3. still second\n" + + " 4. almost there\n" + + " 1. third level\n", + "
      \n" + + "
    1. List\n\n" + + "
        \n" + + "
      1. shallow indent
      2. \n" + + "
      3. part of second list
      4. \n" + + "
      5. still second
      6. \n" + + "
      7. almost there\n\n" + + "
          \n" + + "
        1. third level
        2. \n" + + "
      8. \n" + + "
    2. \n" + + "
    \n", + + "1. List\n extra indent, same paragraph\n", + "
      \n
    1. List\n extra indent, same paragraph
    2. \n
    \n", + + "1. List\n\n code block\n", + "
      \n
    1. List

      \n\n
      code block\n
    2. \n
    \n", + + "1. List\n\n code block with spaces\n", + "
      \n
    1. List

      \n\n
        code block with spaces\n
    2. \n
    \n", + + "1. List\n * Mixted list\n", + "
      \n
    1. List\n\n
        \n
      • Mixted list
      • \n
    2. \n
    \n", + + "1. List\n * Mixed list\n", + "
      \n
    1. List\n\n
        \n
      • Mixed list
      • \n
    2. \n
    \n", + + "* Start with unordered\n 1. Ordered\n", + "
      \n
    • Start with unordered\n\n
        \n
      1. Ordered
      2. \n
    • \n
    \n", + + "* Start with unordered\n 1. Ordered\n", + "
      \n
    • Start with unordered\n\n
        \n
      1. Ordered
      2. \n
    • \n
    \n", + + "1. numbers\n1. are ignored\n", + "
      \n
    1. numbers
    2. \n
    3. are ignored
    4. \n
    \n", + } + doTestsBlock(t, tests, 0) +} + +func TestDefinitionList(t *testing.T) { + var tests = []string{ + "Term 1\n: Definition a\n", + "
    \n
    Term 1
    \n
    Definition a
    \n
    \n", + + "Term 1\n: Definition a \n", + "
    \n
    Term 1
    \n
    Definition a
    \n
    \n", + + "Term 1\n: Definition a\n: Definition b\n", + "
    \n
    Term 1
    \n
    Definition a
    \n
    Definition b
    \n
    \n", + + "Term 1\n: Definition a\n\nTerm 2\n: Definition b\n", + "
    \n" + + "
    Term 1
    \n" + + "
    Definition a
    \n" + + "
    Term 2
    \n" + + "
    Definition b
    \n" + + "
    \n", + + "Term 1\n: Definition a\n\nTerm 2\n: Definition b\n\nTerm 3\n: Definition c\n", + "
    \n" + + "
    Term 1
    \n" + + "
    Definition a
    \n" + + "
    Term 2
    \n" + + "
    Definition b
    \n" + + "
    Term 3
    \n" + + "
    Definition c
    \n" + + "
    \n", + + "Term 1\n: Definition a\n: Definition b\n\nTerm 2\n: Definition c\n", + "
    \n" + + "
    Term 1
    \n" + + "
    Definition a
    \n" + + "
    Definition b
    \n" + + "
    Term 2
    \n" + + "
    Definition c
    \n" + + "
    \n", + + "Term 1\n\n: Definition a\n\nTerm 2\n\n: Definition b\n", + "
    \n" + + "
    Term 1
    \n" + + "

    Definition a

    \n" + + "
    Term 2
    \n" + + "

    Definition b

    \n" + + "
    \n", + + "Term 1\n\n: Definition a\n\n: Definition b\n\nTerm 2\n\n: Definition c\n", + "
    \n" + + "
    Term 1
    \n" + + "

    Definition a

    \n" + + "

    Definition b

    \n" + + "
    Term 2
    \n" + + "

    Definition c

    \n" + + "
    \n", + + "Term 1\n: Definition a\nNext line\n", + "
    \n
    Term 1
    \n
    Definition a\nNext line
    \n
    \n", + + "Term 1\n: Definition a\n Next line\n", + "
    \n
    Term 1
    \n
    Definition a\nNext line
    \n
    \n", + + "Term 1\n: Definition a \n Next line \n", + "
    \n
    Term 1
    \n
    Definition a\nNext line
    \n
    \n", + + "Term 1\n: Definition a\nNext line\n\nTerm 2\n: Definition b", + "
    \n" + + "
    Term 1
    \n" + + "
    Definition a\nNext line
    \n" + + "
    Term 2
    \n" + + "
    Definition b
    \n" + + "
    \n", + + "Term 1\n: Definition a\n", + "
    \n
    Term 1
    \n
    Definition a
    \n
    \n", + + "Term 1\n:Definition a\n", + "

    Term 1\n:Definition a

    \n", + + "Term 1\n\n: Definition a\n\nTerm 2\n\n: Definition b\n\nText 1", + "
    \n" + + "
    Term 1
    \n" + + "

    Definition a

    \n" + + "
    Term 2
    \n" + + "

    Definition b

    \n" + + "
    \n" + + "\n

    Text 1

    \n", + + "Term 1\n\n: Definition a\n\nText 1\n\nTerm 2\n\n: Definition b\n\nText 2", + "
    \n" + + "
    Term 1
    \n" + + "

    Definition a

    \n" + + "
    \n" + + "\n

    Text 1

    \n" + + "\n
    \n" + + "
    Term 2
    \n" + + "

    Definition b

    \n" + + "
    \n" + + "\n

    Text 2

    \n", + } + doTestsBlock(t, tests, EXTENSION_DEFINITION_LISTS) +} + +func TestPreformattedHtml(t *testing.T) { + var tests = []string{ + "
    \n", + "
    \n", + + "
    \n
    \n", + "
    \n
    \n", + + "
    \n
    \nParagraph\n", + "

    \n
    \nParagraph

    \n", + + "
    \n
    \n", + "
    \n
    \n", + + "
    \nAnything here\n
    \n", + "
    \nAnything here\n
    \n", + + "
    \n Anything here\n
    \n", + "
    \n Anything here\n
    \n", + + "
    \nAnything here\n
    \n", + "
    \nAnything here\n
    \n", + + "
    \nThis is *not* &proceessed\n
    \n", + "
    \nThis is *not* &proceessed\n
    \n", + + "\n Something\n\n", + "

    \n Something\n

    \n", + + "
    \n Something here\n\n", + "

    \n Something here\n

    \n", + + "Paragraph\n
    \nHere? >&<\n
    \n", + "

    Paragraph\n

    \nHere? >&<\n

    \n", + + "Paragraph\n\n
    \nHow about here? >&<\n
    \n", + "

    Paragraph

    \n\n
    \nHow about here? >&<\n
    \n", + + "Paragraph\n
    \nHere? >&<\n
    \nAnd here?\n", + "

    Paragraph\n

    \nHere? >&<\n
    \nAnd here?

    \n", + + "Paragraph\n\n
    \nHow about here? >&<\n
    \nAnd here?\n", + "

    Paragraph

    \n\n

    \nHow about here? >&<\n
    \nAnd here?

    \n", + + "Paragraph\n
    \nHere? >&<\n
    \n\nAnd here?\n", + "

    Paragraph\n

    \nHere? >&<\n

    \n\n

    And here?

    \n", + + "Paragraph\n\n
    \nHow about here? >&<\n
    \n\nAnd here?\n", + "

    Paragraph

    \n\n
    \nHow about here? >&<\n
    \n\n

    And here?

    \n", + } + doTestsBlock(t, tests, 0) +} + +func TestPreformattedHtmlLax(t *testing.T) { + var tests = []string{ + "Paragraph\n
    \nHere? >&<\n
    \n", + "

    Paragraph

    \n\n
    \nHere? >&<\n
    \n", + + "Paragraph\n\n
    \nHow about here? >&<\n
    \n", + "

    Paragraph

    \n\n
    \nHow about here? >&<\n
    \n", + + "Paragraph\n
    \nHere? >&<\n
    \nAnd here?\n", + "

    Paragraph

    \n\n
    \nHere? >&<\n
    \n\n

    And here?

    \n", + + "Paragraph\n\n
    \nHow about here? >&<\n
    \nAnd here?\n", + "

    Paragraph

    \n\n
    \nHow about here? >&<\n
    \n\n

    And here?

    \n", + + "Paragraph\n
    \nHere? >&<\n
    \n\nAnd here?\n", + "

    Paragraph

    \n\n
    \nHere? >&<\n
    \n\n

    And here?

    \n", + + "Paragraph\n\n
    \nHow about here? >&<\n
    \n\nAnd here?\n", + "

    Paragraph

    \n\n
    \nHow about here? >&<\n
    \n\n

    And here?

    \n", + } + doTestsBlock(t, tests, EXTENSION_LAX_HTML_BLOCKS) +} + +func TestFencedCodeBlock(t *testing.T) { + var tests = []string{ + "``` go\nfunc foo() bool {\n\treturn true;\n}\n```\n", + "
    func foo() bool {\n\treturn true;\n}\n
    \n", + + "``` c\n/* special & char < > \" escaping */\n```\n", + "
    /* special & char < > " escaping */\n
    \n", + + "``` c\nno *inline* processing ~~of text~~\n```\n", + "
    no *inline* processing ~~of text~~\n
    \n", + + "```\nNo language\n```\n", + "
    No language\n
    \n", + + "``` {ocaml}\nlanguage in braces\n```\n", + "
    language in braces\n
    \n", + + "``` {ocaml} \nwith extra whitespace\n```\n", + "
    with extra whitespace\n
    \n", + + "```{ ocaml }\nwith extra whitespace\n```\n", + "
    with extra whitespace\n
    \n", + + "~ ~~ java\nWith whitespace\n~~~\n", + "

    ~ ~~ java\nWith whitespace\n~~~

    \n", + + "~~\nonly two\n~~\n", + "

    ~~\nonly two\n~~

    \n", + + "```` python\nextra\n````\n", + "
    extra\n
    \n", + + "~~~ perl\nthree to start, four to end\n~~~~\n", + "

    ~~~ perl\nthree to start, four to end\n~~~~

    \n", + + "~~~~ perl\nfour to start, three to end\n~~~\n", + "

    ~~~~ perl\nfour to start, three to end\n~~~

    \n", + + "~~~ bash\ntildes\n~~~\n", + "
    tildes\n
    \n", + + "``` lisp\nno ending\n", + "

    ``` lisp\nno ending

    \n", + + "~~~ lisp\nend with language\n~~~ lisp\n", + "

    ~~~ lisp\nend with language\n~~~ lisp

    \n", + + "```\nmismatched begin and end\n~~~\n", + "

    ```\nmismatched begin and end\n~~~

    \n", + + "~~~\nmismatched begin and end\n```\n", + "

    ~~~\nmismatched begin and end\n```

    \n", + + " ``` oz\nleading spaces\n```\n", + "
    leading spaces\n
    \n", + + " ``` oz\nleading spaces\n ```\n", + "
    leading spaces\n
    \n", + + " ``` oz\nleading spaces\n ```\n", + "
    leading spaces\n
    \n", + + "``` oz\nleading spaces\n ```\n", + "
    leading spaces\n
    \n", + + " ``` oz\nleading spaces\n ```\n", + "
    ``` oz\n
    \n\n

    leading spaces\n ```

    \n", + + "Bla bla\n\n``` oz\ncode blocks breakup paragraphs\n```\n\nBla Bla\n", + "

    Bla bla

    \n\n
    code blocks breakup paragraphs\n
    \n\n

    Bla Bla

    \n", + + "Some text before a fenced code block\n``` oz\ncode blocks breakup paragraphs\n```\nAnd some text after a fenced code block", + "

    Some text before a fenced code block

    \n\n
    code blocks breakup paragraphs\n
    \n\n

    And some text after a fenced code block

    \n", + + "`", + "

    `

    \n", + + "Bla bla\n\n``` oz\ncode blocks breakup paragraphs\n```\n\nBla Bla\n\n``` oz\nmultiple code blocks work okay\n```\n\nBla Bla\n", + "

    Bla bla

    \n\n
    code blocks breakup paragraphs\n
    \n\n

    Bla Bla

    \n\n
    multiple code blocks work okay\n
    \n\n

    Bla Bla

    \n", + + "Some text before a fenced code block\n``` oz\ncode blocks breakup paragraphs\n```\nSome text in between\n``` oz\nmultiple code blocks work okay\n```\nAnd some text after a fenced code block", + "

    Some text before a fenced code block

    \n\n
    code blocks breakup paragraphs\n
    \n\n

    Some text in between

    \n\n
    multiple code blocks work okay\n
    \n\n

    And some text after a fenced code block

    \n", + } + doTestsBlock(t, tests, EXTENSION_FENCED_CODE) +} + +func TestTable(t *testing.T) { + var tests = []string{ + "a | b\n---|---\nc | d\n", + "\n\n\n\n\n\n\n\n" + + "\n\n\n\n\n\n
    ab
    cd
    \n", + + "a | b\n---|--\nc | d\n", + "

    a | b\n---|--\nc | d

    \n", + + "|a|b|c|d|\n|----|----|----|---|\n|e|f|g|h|\n", + "\n\n\n\n\n\n\n\n\n\n" + + "\n\n\n\n\n\n\n\n
    abcd
    efgh
    \n", + + "*a*|__b__|[c](C)|d\n---|---|---|---\ne|f|g|h\n", + "\n\n\n\n\n\n\n\n\n\n" + + "\n\n\n\n\n\n\n\n
    abcd
    efgh
    \n", + + "a|b|c\n---|---|---\nd|e|f\ng|h\ni|j|k|l|m\nn|o|p\n", + "\n\n\n\n\n\n\n\n\n" + + "\n\n\n\n\n\n\n" + + "\n\n\n\n\n\n" + + "\n\n\n\n\n\n" + + "\n\n\n\n\n\n
    abc
    def
    gh
    ijk
    nop
    \n", + + "a|b|c\n---|---|---\n*d*|__e__|f\n", + "\n\n\n\n\n\n\n\n\n" + + "\n\n\n\n\n\n\n
    abc
    def
    \n", + + "a|b|c|d\n:--|--:|:-:|---\ne|f|g|h\n", + "\n\n\n\n\n" + + "\n\n\n\n\n" + + "\n\n\n\n" + + "\n\n\n\n
    abcd
    efgh
    \n", + + "a|b|c\n---|---|---\n", + "\n\n\n\n\n\n\n\n\n\n\n
    abc
    \n", + + "a| b|c | d | e\n---|---|---|---|---\nf| g|h | i |j\n", + "\n\n\n\n\n\n\n\n\n\n\n" + + "\n\n\n\n\n\n\n\n\n
    abcde
    fghij
    \n", + + "a|b\\|c|d\n---|---|---\nf|g\\|h|i\n", + "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
    ab|cd
    fg|hi
    \n", + } + doTestsBlock(t, tests, EXTENSION_TABLES) +} + +func TestUnorderedListWith_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK(t *testing.T) { + var tests = []string{ + "* Hello\n", + "
      \n
    • Hello
    • \n
    \n", + + "* Yin\n* Yang\n", + "
      \n
    • Yin
    • \n
    • Yang
    • \n
    \n", + + "* Ting\n* Bong\n* Goo\n", + "
      \n
    • Ting
    • \n
    • Bong
    • \n
    • Goo
    • \n
    \n", + + "* Yin\n\n* Yang\n", + "
      \n
    • Yin

    • \n\n
    • Yang

    • \n
    \n", + + "* Ting\n\n* Bong\n* Goo\n", + "
      \n
    • Ting

    • \n\n
    • Bong

    • \n\n
    • Goo

    • \n
    \n", + + "+ Hello\n", + "
      \n
    • Hello
    • \n
    \n", + + "+ Yin\n+ Yang\n", + "
      \n
    • Yin
    • \n
    • Yang
    • \n
    \n", + + "+ Ting\n+ Bong\n+ Goo\n", + "
      \n
    • Ting
    • \n
    • Bong
    • \n
    • Goo
    • \n
    \n", + + "+ Yin\n\n+ Yang\n", + "
      \n
    • Yin

    • \n\n
    • Yang

    • \n
    \n", + + "+ Ting\n\n+ Bong\n+ Goo\n", + "
      \n
    • Ting

    • \n\n
    • Bong

    • \n\n
    • Goo

    • \n
    \n", + + "- Hello\n", + "
      \n
    • Hello
    • \n
    \n", + + "- Yin\n- Yang\n", + "
      \n
    • Yin
    • \n
    • Yang
    • \n
    \n", + + "- Ting\n- Bong\n- Goo\n", + "
      \n
    • Ting
    • \n
    • Bong
    • \n
    • Goo
    • \n
    \n", + + "- Yin\n\n- Yang\n", + "
      \n
    • Yin

    • \n\n
    • Yang

    • \n
    \n", + + "- Ting\n\n- Bong\n- Goo\n", + "
      \n
    • Ting

    • \n\n
    • Bong

    • \n\n
    • Goo

    • \n
    \n", + + "*Hello\n", + "

    *Hello

    \n", + + "* Hello \n", + "
      \n
    • Hello
    • \n
    \n", + + "* Hello \n Next line \n", + "
      \n
    • Hello\nNext line
    • \n
    \n", + + "Paragraph\n* No linebreak\n", + "

    Paragraph

    \n\n
      \n
    • No linebreak
    • \n
    \n", + + "Paragraph\n\n* Linebreak\n", + "

    Paragraph

    \n\n
      \n
    • Linebreak
    • \n
    \n", + + "* List\n * Nested list\n", + "
      \n
    • List\n\n
        \n
      • Nested list
      • \n
    • \n
    \n", + + "* List\n\n * Nested list\n", + "
      \n
    • List

      \n\n
        \n
      • Nested list
      • \n
    • \n
    \n", + + "* List\n Second line\n\n + Nested\n", + "
      \n
    • List\nSecond line

      \n\n
        \n
      • Nested
      • \n
    • \n
    \n", + + "* List\n + Nested\n\n Continued\n", + "
      \n
    • List

      \n\n
        \n
      • Nested
      • \n
      \n\n

      Continued

    • \n
    \n", + + "* List\n * shallow indent\n", + "
      \n
    • List\n\n
        \n
      • shallow indent
      • \n
    • \n
    \n", + + "* List\n" + + " * shallow indent\n" + + " * part of second list\n" + + " * still second\n" + + " * almost there\n" + + " * third level\n", + "
      \n" + + "
    • List\n\n" + + "
        \n" + + "
      • shallow indent
      • \n" + + "
      • part of second list
      • \n" + + "
      • still second
      • \n" + + "
      • almost there\n\n" + + "
          \n" + + "
        • third level
        • \n" + + "
      • \n" + + "
    • \n" + + "
    \n", + + "* List\n extra indent, same paragraph\n", + "
      \n
    • List\n extra indent, same paragraph
    • \n
    \n", + + "* List\n\n code block\n", + "
      \n
    • List

      \n\n
      code block\n
    • \n
    \n", + + "* List\n\n code block with spaces\n", + "
      \n
    • List

      \n\n
        code block with spaces\n
    • \n
    \n", + + "* List\n\n * sublist\n\n normal text\n\n * another sublist\n", + "
      \n
    • List

      \n\n
        \n
      • sublist
      • \n
      \n\n

      normal text

      \n\n
        \n
      • another sublist
      • \n
    • \n
    \n", + } + doTestsBlock(t, tests, EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK) +} + +func TestOrderedList_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK(t *testing.T) { + var tests = []string{ + "1. Hello\n", + "
      \n
    1. Hello
    2. \n
    \n", + + "1. Yin\n2. Yang\n", + "
      \n
    1. Yin
    2. \n
    3. Yang
    4. \n
    \n", + + "1. Ting\n2. Bong\n3. Goo\n", + "
      \n
    1. Ting
    2. \n
    3. Bong
    4. \n
    5. Goo
    6. \n
    \n", + + "1. Yin\n\n2. Yang\n", + "
      \n
    1. Yin

    2. \n\n
    3. Yang

    4. \n
    \n", + + "1. Ting\n\n2. Bong\n3. Goo\n", + "
      \n
    1. Ting

    2. \n\n
    3. Bong

    4. \n\n
    5. Goo

    6. \n
    \n", + + "1 Hello\n", + "

    1 Hello

    \n", + + "1.Hello\n", + "

    1.Hello

    \n", + + "1. Hello \n", + "
      \n
    1. Hello
    2. \n
    \n", + + "1. Hello \n Next line \n", + "
      \n
    1. Hello\nNext line
    2. \n
    \n", + + "Paragraph\n1. No linebreak\n", + "

    Paragraph

    \n\n
      \n
    1. No linebreak
    2. \n
    \n", + + "Paragraph\n\n1. Linebreak\n", + "

    Paragraph

    \n\n
      \n
    1. Linebreak
    2. \n
    \n", + + "1. List\n 1. Nested list\n", + "
      \n
    1. List\n\n
        \n
      1. Nested list
      2. \n
    2. \n
    \n", + + "1. List\n\n 1. Nested list\n", + "
      \n
    1. List

      \n\n
        \n
      1. Nested list
      2. \n
    2. \n
    \n", + + "1. List\n Second line\n\n 1. Nested\n", + "
      \n
    1. List\nSecond line

      \n\n
        \n
      1. Nested
      2. \n
    2. \n
    \n", + + "1. List\n 1. Nested\n\n Continued\n", + "
      \n
    1. List

      \n\n
        \n
      1. Nested
      2. \n
      \n\n

      Continued

    2. \n
    \n", + + "1. List\n 1. shallow indent\n", + "
      \n
    1. List\n\n
        \n
      1. shallow indent
      2. \n
    2. \n
    \n", + + "1. List\n" + + " 1. shallow indent\n" + + " 2. part of second list\n" + + " 3. still second\n" + + " 4. almost there\n" + + " 1. third level\n", + "
      \n" + + "
    1. List\n\n" + + "
        \n" + + "
      1. shallow indent
      2. \n" + + "
      3. part of second list
      4. \n" + + "
      5. still second
      6. \n" + + "
      7. almost there\n\n" + + "
          \n" + + "
        1. third level
        2. \n" + + "
      8. \n" + + "
    2. \n" + + "
    \n", + + "1. List\n extra indent, same paragraph\n", + "
      \n
    1. List\n extra indent, same paragraph
    2. \n
    \n", + + "1. List\n\n code block\n", + "
      \n
    1. List

      \n\n
      code block\n
    2. \n
    \n", + + "1. List\n\n code block with spaces\n", + "
      \n
    1. List

      \n\n
        code block with spaces\n
    2. \n
    \n", + + "1. List\n * Mixted list\n", + "
      \n
    1. List\n\n
        \n
      • Mixted list
      • \n
    2. \n
    \n", + + "1. List\n * Mixed list\n", + "
      \n
    1. List\n\n
        \n
      • Mixed list
      • \n
    2. \n
    \n", + + "* Start with unordered\n 1. Ordered\n", + "
      \n
    • Start with unordered\n\n
        \n
      1. Ordered
      2. \n
    • \n
    \n", + + "* Start with unordered\n 1. Ordered\n", + "
      \n
    • Start with unordered\n\n
        \n
      1. Ordered
      2. \n
    • \n
    \n", + + "1. numbers\n1. are ignored\n", + "
      \n
    1. numbers
    2. \n
    3. are ignored
    4. \n
    \n", + } + doTestsBlock(t, tests, EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK) +} + +func TestFencedCodeBlock_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK(t *testing.T) { + var tests = []string{ + "``` go\nfunc foo() bool {\n\treturn true;\n}\n```\n", + "
    func foo() bool {\n\treturn true;\n}\n
    \n", + + "``` c\n/* special & char < > \" escaping */\n```\n", + "
    /* special & char < > " escaping */\n
    \n", + + "``` c\nno *inline* processing ~~of text~~\n```\n", + "
    no *inline* processing ~~of text~~\n
    \n", + + "```\nNo language\n```\n", + "
    No language\n
    \n", + + "``` {ocaml}\nlanguage in braces\n```\n", + "
    language in braces\n
    \n", + + "``` {ocaml} \nwith extra whitespace\n```\n", + "
    with extra whitespace\n
    \n", + + "```{ ocaml }\nwith extra whitespace\n```\n", + "
    with extra whitespace\n
    \n", + + "~ ~~ java\nWith whitespace\n~~~\n", + "

    ~ ~~ java\nWith whitespace\n~~~

    \n", + + "~~\nonly two\n~~\n", + "

    ~~\nonly two\n~~

    \n", + + "```` python\nextra\n````\n", + "
    extra\n
    \n", + + "~~~ perl\nthree to start, four to end\n~~~~\n", + "

    ~~~ perl\nthree to start, four to end\n~~~~

    \n", + + "~~~~ perl\nfour to start, three to end\n~~~\n", + "

    ~~~~ perl\nfour to start, three to end\n~~~

    \n", + + "~~~ bash\ntildes\n~~~\n", + "
    tildes\n
    \n", + + "``` lisp\nno ending\n", + "

    ``` lisp\nno ending

    \n", + + "~~~ lisp\nend with language\n~~~ lisp\n", + "

    ~~~ lisp\nend with language\n~~~ lisp

    \n", + + "```\nmismatched begin and end\n~~~\n", + "

    ```\nmismatched begin and end\n~~~

    \n", + + "~~~\nmismatched begin and end\n```\n", + "

    ~~~\nmismatched begin and end\n```

    \n", + + " ``` oz\nleading spaces\n```\n", + "
    leading spaces\n
    \n", + + " ``` oz\nleading spaces\n ```\n", + "
    leading spaces\n
    \n", + + " ``` oz\nleading spaces\n ```\n", + "
    leading spaces\n
    \n", + + "``` oz\nleading spaces\n ```\n", + "
    leading spaces\n
    \n", + + " ``` oz\nleading spaces\n ```\n", + "
    ``` oz\n
    \n\n

    leading spaces

    \n\n
    ```\n
    \n", + } + doTestsBlock(t, tests, EXTENSION_FENCED_CODE|EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK) +} + +func TestTitleBlock_EXTENSION_TITLEBLOCK(t *testing.T) { + var tests = []string{ + "% Some title\n" + + "% Another title line\n" + + "% Yep, more here too\n", + "

    " + + "Some title\n" + + "Another title line\n" + + "Yep, more here too\n" + + "

    ", + } + + doTestsBlock(t, tests, EXTENSION_TITLEBLOCK) + +} diff --git a/vendor/github.com/camlistore/camlistore/third_party/github.com/russross/blackfriday/html.go b/vendor/github.com/camlistore/camlistore/third_party/github.com/russross/blackfriday/html.go new file mode 100644 index 00000000..264aae58 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/third_party/github.com/russross/blackfriday/html.go @@ -0,0 +1,948 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" +) + +// Html renderer configuration options. +const ( + HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks + HTML_SKIP_STYLE // skip embedded + + + +
    + + + +
    +

    Camlistore is:

    +
      +
    • a way to store, sync, share, model and back up content
    • +
    • a work in progress
    • +
    • Open Source (Apache licensed)
    • +
    • an acronym for "Content-Addressable Multi-Layer Indexed Storage", hinting that Camlistore is about: +
        +
      • content-addressable storage
      • +
      • separate interoperable parts (storage, sync, sharing, + modeling), with well-defined protocols and roles
      • +
    • +
    • your "home directory for the web"
    • +
    • pro-JSON (yet aggressively format agnostic)
    • +
    • pro-GnuPG (for signing claims)
    • +
    • pro-paranoia and privacy
    • +
    • ambitious, but ...
    • +
    • simple!
    • +
    • programning language-agnostic (parts and different implementations in Go, Python, Java, Perl, Bash, ... the language doesn't matter.)
    • +
    • neither "Cloud" nor "Local". happily both.
    • +
    • a "20% project" from a few Google employees, but not Google-centric nor endorsed by Google (other than them letting us open source our side project)
    • +
    • not yet ready for users
    • +
    • ready for developers (at least those without strong color preferences)
    • +
    + +

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam +elementum auctor faucibus. Morbi nec nulla a magna semper ultricies ut +ut neque. Maecenas eros sapien, commodo sit amet auctor in, ultricies +quis enim. Aliquam vel nisi lectus, at consectetur sapien. Donec in +nulla ac tellus tempus auctor imperdiet vitae velit. Morbi tempor, +tortor sit amet gravida facilisis, nulla ipsum ultrices ipsum, in +volutpat sapien felis sit amet quam. Proin sagittis consequat nisi non +dignissim. Aliquam eget erat arcu, id hendrerit quam. Nam eget nisl et +risus volutpat consequat. Nulla ac blandit massa. Morbi sed dui +mauris. Nulla risus mi, sodales et volutpat non, convallis ut +odio. Quisque convallis metus sit amet enim molestie ut convallis odio +posuere.

    + +
    + +
    +Got a question? +
    + +
    +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam elementum auctor faucibus. Morbi nec nulla a magna semper ultricies ut ut neque. Maecenas eros sapien, commodo sit amet auctor in, ultricies quis enim. Aliquam vel nisi lectus, at consectetur sapien. +
    + +
    +Website design inspired by git and memcached. +
    + + + diff --git a/vendor/github.com/camlistore/camlistore/website/static/piggy.gif b/vendor/github.com/camlistore/camlistore/website/static/piggy.gif new file mode 100644 index 00000000..cbf2f7b2 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/website/static/piggy.gif differ diff --git a/vendor/github.com/camlistore/camlistore/website/static/robots.txt b/vendor/github.com/camlistore/camlistore/website/static/robots.txt new file mode 100644 index 00000000..488e4b3f --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/website/static/robots.txt @@ -0,0 +1,3 @@ +# *sigh* well-known URLs. (plug: http://tools.ietf.org/html/rfc5785) +User-agent: * +Disallow: /code/ diff --git a/vendor/github.com/camlistore/camlistore/website/static/ss/8RmuLuw.jpg b/vendor/github.com/camlistore/camlistore/website/static/ss/8RmuLuw.jpg new file mode 100644 index 00000000..60b087fe Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/website/static/ss/8RmuLuw.jpg differ diff --git a/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/README.slides b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/README.slides new file mode 100644 index 00000000..06ee69b5 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/README.slides @@ -0,0 +1,4 @@ +Slide template from: + +http://code.google.com/p/io-2011-slides/ + diff --git a/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/arch.png b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/arch.png new file mode 100644 index 00000000..d061cabb Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/arch.png differ diff --git a/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/blobjects.png b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/blobjects.png new file mode 100644 index 00000000..7ede0917 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/blobjects.png differ diff --git a/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/diagrams.odp b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/diagrams.odp new file mode 100644 index 00000000..629da123 Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/diagrams.odp differ diff --git a/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/fsbackup.png b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/fsbackup.png new file mode 100644 index 00000000..8c6d240c Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/fsbackup.png differ diff --git a/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/images/colorbar.png b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/images/colorbar.png new file mode 100755 index 00000000..189af7ae Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/images/colorbar.png differ diff --git a/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/index.html b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/index.html new file mode 100755 index 00000000..403ac3f8 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/index.html @@ -0,0 +1,747 @@ + + + + + + + Camlistore + + + + + + + + + +
    + + + +
    +

    + Camlistore +

    +

    + Brad Fitzpatrick +
    + 2011-05-07 +

    + +

    (use arrow keys or PgUp/PgDown to move slides)

    +
    + + +
    +

    + Who am I? +

    +
      +
    • + Brad Fitzpatrick <brad@danga.com> +
    • +
    • Perl Hacker since 1994
    • +
    • Projects: + + + + + + +
      Danga / 6A (Perl)Google
      +
      LiveJournal
      +
      memcached
      +
      Perlbal
      +
      MogileFS
      +
      OpenID
      +
      +
      Social Graph API (XFN / FOAF)
      +
      WebFinger
      +
      PubSubHubbub
      +
      Android
      +
      Go
      +
      +
      * decentralized social
      +
    • +
    +
    + +
    +

    + But why am I in Brazil? +

    +
      +
    • + "Hey, want to come speak at a Perl conference in Brazil?" +
    • +
    • "Yes, totally, but... I don't write much Perl these days. :-("
    • +
    • "You could speak on memcached."
    • +
    • "But that's an old topic, no?"
    • +
    • "You have any new project you're excited about?"
    • + +
    +
    + + +
    +

    + Camlistore! +

    +
    + +
    +

    + Camlistore +

    +
      +
    • New open source project
    • +
    • Almost a year old
    • +
    • Still in development
    • +
    • Starting to be useful :-)
    • +
    • Hard to easily describe...
    • +
    + +
    + + Camlistore is a way to store, sync, share, model and back up content + +
    + camlistore.org +
    +
    + +
    +

    + Motivation +

    +
      +
    • I've written too many Content Management Systems +
        +
      • blogs, comments, photos, emails, backups, scanned paperwork, ...
      • +
      • is a scanned photo a scan, a photo, or a blog post? who cares.
      • +
      • write one CMS to rule them all
      • +
      • ... or at least a good framework for higher-level CMSes
      • +
      +
    • +
    +
    + + +
    +

    + Motivation (cont) +

    +
      +
    • I still want to help solve the Decentralized Social Network Problem +
        +
      • protocols, not companies
      • +
      • gmail, hotmail: hosted versions of SMTP, IMAP
      • +
      • ... but I can run my own SMTP/IMAP server if I want.
      • +
      • ... or change my SMTP/IMAP provider
      • +
      +
    • +
    +
    + +
    +

    + Motivation (cont) +

    +
      +
    • I wanted something conceptually simple.
    • +
    • HTTP interfaces, not language-specific
    • +
    • I use lots of machines; don't want to think about sync or conflicts.
    • +
    • Data archaeology: should be easy and obvious to + reconstruct in 20 or 100 years
    • +
    +
    + +
    +

    + The Product +

    +
      +
    • one private dumping ground to store anything
    • +
    • backups, filesystems, objects, photos, likes, bookmarks, shares, my website, ...
    • +
    • live backup my phone
    • +
    • live replicate / sync my dumping group between my house & laptop & Amazon & Google
    • +
    • web UI (ala gmail, docs.google.com, etc) or FUSE filesystem
    • +
    • Easy for end-users; powerful for dorks
    • +
    +
    + +
    +

    + Security Model +

    +
      +
    • your private repo, for life
    • +
    • everything private by default
    • +
    • grant access to specific objects/trees with friends or the world
    • +
    • web UI or CLI tools let you share
    • +
    +
    + +
    +

    + So what's with the silly name? +

    +
    + +
    +

    + Camlistore +

    +
      +
    • Content-
    • +
    • Addressable
    • +
    • Multi-
    • +
    • Layer-
    • +
    • Indexed
    • +
    • Storage
    • +
    +
    + +
    +

    + Content-Addressable +

    +
      +
    • At the core, everything is stored & addressed by its digest (e.g. SHA1, MD5, etc)
    • +
    • e.g. "sha1-0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33" for the blob "foo"
    • +
    • Great properties: +
        +
      • no versions of content: change it changes the new digest too
      • +
      • no versions: no sync conflicts
      • +
      • no versions: perfect caching (have it or don't)
      • +
      +
    • +
    +
    + +
    +

    + Multi-Layer, Indexed +

    +
      +
    • Unix philosophy: small pieces with well-defined interfaces that can be chained or composed
    • +
    • Camlistore pieces include: +
        +
      • Blob storage: memory, disk, S3, Google, MySQL index, etc
      • +
      • Schema
      • +
      • Signing
      • +
      • Replication
      • +
      • Indexing: (e.g. replicate from disk to MySQL index)
      • +
      • Search
      • +
      • HTML UI
      • +
      +
    • + +
    +
    + +
    +

    Logically

    + +
    + +
    +

    In reality

    +
      +
    • End-users: use a hosted version
    • +
    • Dorks: single server binary with all the logical pieces
    • +
    +
    + +
    +

    + From the bottom up... +

    +
    + +
    +

    + Blob Server +

    +
    + +
    +

    Blob Server: how dumb it is

    +
      +
    • "Blob" == zero or more bytes. no meta-data
    • +
    • private operations, to owner of data only:
    • +
        +
      • get(blobref) → blob
      • +
      • stat(blobref+) → [(blobref, size), ...]
      • +
      • put(blobref, blob)
      • +
      • enumerate(..) → [(blobref, size)...] (sorted by blobref)
      • +
      +
    • no public (non-owner) access
    • +
    • HTTP interface: GET /camli/sha1-xxxxxxx HTTP/1.1
    • +
    • delete(blobref) is disabled by default, privileged op for GC or replication queues only
    • +
    +
    + +
    +

    Blob Server: seriously, no metadata

    +
      +
    • no filenames
    • +
    • no "mime types"
    • +
    • no "{create,mod,access} time"
    • +
    • size is implicit
    • +
    • blob: just some bytes
    • +
    • metadata? layers above.
    • +
    +
    + +
    +

    + Uh, what can you do with that? +

    +
    + +
    +

    Uh, what can you do with that?

    +
      +
    • with just a blob server?
    • +
    • not much
    • +
    • but let's start with an easy example...
    • +
    +
    + +
    +

    + Filesystem Backups +

    +
    + +
    +

    Filesystem Backups

    +
      +
    • previous project: brackup +
        +
      • good: Perl, slide/dice/encrypt S3 backup, content-addressed, good iterative backups
      • +
      • bad: large (several MB) "backup manifest" text files +
      +
    • + +
    • fossil/venti, git, etc: directories content-addressed by content of their children, hash trees, etc
    • +
    • git: "tree objects", "commmit objects", etc
    • +
    • Camlistore: "schema blobs"
    • +
    +
    + +
    +

    Schema: how to model your content

    +
      +
    • Camlistore defines one possible schema
    • +
    • but blobserver doesn't know about it all
    • +
    • tools generate schema,
    • +
    • indexer + search understand the schema.
    • +
    +
    + +
    +

    Schema Blobs

    +
      +
    • so if all blobs are just dumb blobs of bytes with no metadata,
    • +
    • how do you store metadata?
    • +
    • as blobs themselves!
    • +
    +
    + +
    +

    Minimal Schema Blob

    + +
    +
    {
    +  "camliVersion": 1,
    +  "camliType": "whatever"
    +}
    +
    +

    Whitespace doesn't matter. Just must be valid JSON in its +entirety. Use whatever JSON libraries you've got.

    + +

    That one is named
    sha1-19e851fe3eb3d1f3d9d1cefe9f92c6f3c7d754f6

    +

    or perhaps: sha512-2c6746aba012337aaf113fd63c24d994a0703d33eb5d6ed58859e45dc4e02dcf
    dae5c4d46c5c757fb85d5aff342245fe4edb780c028a6f3c994c1295236c931e

    + +
    + + + +
    +

    Schema blob; type "file"

    +
    {"camliVersion": 1,
    + "camliType": "file",
    + "fileName": "foo.dat",
    + "unixPermission": "0644",
    + ...,
    + "size": 6000133,
    + "contentParts": [
    +   {"blobRef": "sha1-...dead", "size": 111},
    +   {"blobRef": "sha1-...beef", "size": 5000000, "offset": 492 },
    +   {"size": 1000000},
    +   {"blobRef": "digalg-blobref", "size": 22},
    +  ]
    +}
    +
    + +
    +

    Schema blob; type "directory"

    +
    {"camliVersion": 1,
    + "camliType": "directory",
    + "fileName": "foodir",
    + "unixPermission": "0755",
    +  ...,
    + "entries": "sha1-c3764bc2138338d5e2936def18ff8cc9cda38455"
    +}
    +
    + +
    +

    Schema blob; type "static-set"

    +
    {"camliVersion": 1,
    + "camliType": "static-set",
    +"members": [
    +     "sha1-xxxxxxxxxxxx",
    +     "sha1-xxxxxxxxxxxx",
    +     "sha1-xxxxxxxxxxxx",
    +     "sha1-xxxxxxxxxxxx",
    +     "sha1-xxxxxxxxxxxx",
    +     "sha1-xxxxxxxxxxxx",
    +  ]
    +}
    +
    + +
    +

    + Backup a directory... +

    +
    $ camput --file $HOME
    +sha1-8659a52f726588dc44d38dfb22d84a4da2902fed
    + +

    (like git/hg/fossil, that identifier represents everything down.)

    + +

    Iterative backups are cheap, easy identifier to share, etc

    + +

    But how will you remember that identifier? (later)

    +
    + +
    +

    + But what about mutable data? +

    +
      +
    • immutable data is easy to represent & reference
    • +
        +
      • sha1-8659a52f726588dc44d38dfb22d84a4da2902fed is an immutable snapshot
      • +
      +
    • how to represent mutable data in an immutable, content-addressed world?
    • +
    • how to share a reference to a mutable object when changing an object mutates its name?
    • +
    +
    + +
    +

    + Objects & "Permanodes" +

    +
    + +
    +

    + Terminology +

    +
      +
    • blob: just dumb, immutable series of bytes
    • +
    • schema blob: a blob that's a valid JSON object w/ camliVersion & camliType
    • +
    • signed schema blob aka "claim": a schema blob with an embedded OpenPGP signature
    • +
    • object: something mutable. represented as an anchor "permanode" + a set of mutations (claims)
    • +
    • permanode: a stable reference. an anchor. just a signed schema blob, but of almost no content...
    • +
    +
    + +
    +

    + Permanode +

    +
    $ camput --permanode
    +sha1-ea799271abfbf85d8e22e4577f15f704c8349026
    +
    +$ camget sha1-ea799271abfbf85d8e22e4577f15f704c8349026
    +{"camliVersion": 1,
    +  "camliSigner": "sha1-c4da9d771661563a27704b91b67989e7ea1e50b8",
    +  "camliType": "permanode",
    +  "random": "oj)r}$Wa/[J|XQThNdhE"
    +,"camliSig":"iQEcBAABAgAGBQJNRxceAAoJEGjzeDN/6vt8ihIH/Aov7FRIq4dODAPWGDwqL
    +1X9Ko2ZtSSO1lwHxCQVdCMquDtAdI3387fDlEG/ALoT/LhmtXQgYTt8QqDxVdu
    +EK1or6/jqo3RMQ8tTgZ+rW2cj9f3Q/dg7el0Ngoq03hyYXdo3whxCH2x0jajSt4RCc
    +gdXN6XmLlOgD/LVQEJ303Du1OhCvKX1A40BIdwe1zxBc5zkLmoa8rClAlHdqwo
    +gxYFY4cwFm+jJM5YhSPemNrDe8W7KT6r0oA7SVfOan1NbIQUel65xwIZBD0ah
    +CXBx6WXvfId6AdiahnbZiBup1fWSzxeeW7Y2/RQwv5IZ8UgfBqRHvnxcbNmScrzl
    +p3V3ZoY"}
    + +
    + +
    +

    + Backup a directory... +

    +
    $ camput --file $HOME
    +sha1-8659a52f726588dc44d38dfb22d84a4da2902fed
    +$ camput --permanode --file $HOME
    +sha1-ea799271abfbf85d8e22e4577f15f704c8349026
    +$ camput --permanode --name="Brad's home directory" --file $HOME
    +sha1-ea799271abfbf85d8e22e4577f15f704c8349026
    +
      +
    • all the file data blobs, file/dir schema blobs,
    • +
    • a new permanode, owned by you
    • +
    • a mutation: permanode's content attribute == directory root
    • +
    • a mutation: permanode's name attribute == "Brad's home directory"
    • +
    +
    + +
    +

    +
    + +
    +

    Aside: Garbage Collection

    +
      +
    • Permanodes are (optionally) GC roots,
    • +
    • or anything signed by you.
    • +
    • If not a blob isn't reachable by a signed root, can be deleted.
    • +
    • If you want to keep a plain "dumb" blob, you should create a "keep" claim for it, or a permanode.
    • +
    +
    + +
    +

    + Modeling non-filesystem objects +

    +
    + +
    +

    Example: a photo gallery

    +
      +
    • Photos are objects
    • +
    • Galleries (sets) are objects
    • +
    • Photos are members of galleries
    • +
    • Photos & galleries have attributes (single-valued: "title", multi-valued: "tag")
    • +
    • Photos might be updated over time: +
        +
      • EXIF GPS updated, cropping, white balance
      • +
      • don't want to break links!
      • +
      +
    • +
    +
    + +
    +

    +
    + +
    +

    + How to make sense of that? +

    +
    + +
    +

    + Indexing & Search +

    +
    + +
    +

    + Indexing: summary +

    +

    For each blob, build an index of: +

      +
    • directed graph of inter-blob references
    • +
    • (permanode, time) => resolved attributes
    • +
    • (permanode, time) => set memberships
    • +
    • etc...
    • +
    +
    + +
    +

    + Indexing & Replication +

    +
      +
    • indexing is real-time, no polling
    • +
    • MySQL index speaks the blob server protocol
    • +
    • just replicate to the index (MySQL, etc) just like other blob servers (Amazon S3, etc)
    • +
    +
    +
    + +
    +

    + Replication Implementation +

    +
      +
    • cold bootstrap: enumerate() (sorted) all blobs from src and dst, copy all blobs that dst doesn't have. +
    • more efficient: use multiple machines, starting at sha1-0*, sha1-1*, sha1-2*, ... etc
    • +
    • once in-sync, for each (src, dst) replication pair, keep a src_to_dst_QUEUE namespace on src,
    • +
    • all new blobs to src also go into src_to_dst_QUEUE (refcount, hardlink, etc)
    • +
    • real-time watch src_to_dst_QUEUE & replicate & delete from the queue. or re-enumerate just the queue. +
    +
    + +
    +

    + Search +

    +
      +
    • Permanodes created by $who, sorted by date desc, type "photo", tagged "funny"
    • +
    • My recent backups with attribute "hostname" == "camlistore.org", +
    • All friends' galleries in which this photo appears,
    • +
    • etc...
    • +
    +

    ...similar to your email, or docs.google.com. "My stuff" or "My bookmarks".

    +
    + +
    +

    + Privacy Model +

    +
      +
    • all your blobs & objects & searches are private
    • +
    • nothing is public by default
    • +
    +
    + +
    +

    + What if you want to share with friends, or globally publish something? +

    +
    + +
    +

    + Sharing & Share Blobs +

    +

    the act of sharing involves creating a new share claim, just another blob, signed.

    +

    here is: sha1-071fda36c1bd9e4595ed16ab5e2a46d44491f708:

    +
    {"camliVersion": 1,
    +  "authType": "haveref",
    +  "camliSigner": "sha1-f019d17dd308eebbd49fd94536eb67214c2f0587",
    +  "camliType": "share",
    +  "target": "sha1-0e5e60f367cc8156ae48198c496b2b2ebdf5313d",
    +  "transitive": true
    +,"camliSig":"iQEcBAABAgAGBQJNQJGuAAoJEIUeCLJL7Fq1EuAIAL/nGoX8caGaANnam0bcIQT7C61wXMRW4qCCaFW+w67ys5z4ztfnTPKwL9ErzMF8Hd32Xe/bVcF6ZL38x/axqI7ehxN8lneKGQNoEdZDA9i752aAr0fkAba6eDehoOj9F4XxOzk3iVrq445jEXtu/+twamHV3UfRozWK1ZQb57dM+cRff47M/Y6VIBRSgW2BrABjuBs8G6PiKxycgh1mb+RL8f9KG+HB/yFuK37YJqZ0zU2OTRp6ELiOgTxbeg99koV9Duy4f4mQgxQgli46077Sv/ujzIeVbmdFL3OenGEzQnyKG0fhf8fa5WkED0XfH7zibAHLiSq3O7x11Q0406U==ANug"}
    + Target w/ ?via= parameter: sha1-0e5e60f?via=sha1-071fda & next hop +
    + +
    +

    + Sharing Details & Implementation +

    +
      +
    • blobserver is private-only. the frontend mediates access to the world, checks authentication, or lack thereof.
    • +
    • all non-owner requests must present a share blob's blobref as an access token
    • +
    • that share blob dictates: +
        +
      • what sort of authenticatation is required (or "haveref" for none, like a secret link)
      • +
      • which blob(s) are granted access (the "transitive" option)
      • +
      +
    • +
    • requests for a blob must include the path to get there, from the share root
    • +
    +
    + +
    +

    + What can be shared +

    +
      +
    • Share a single blob, +
    • Share a subtree, +
    • Share a search query and its results' reachable blobs
    • +
    • ... give out [world, girlfriend] access to all pictures you take on your phone, in real-time
    • +
    +
    + +
    +

    + Project Status +

    +
    + +
    +

    + Project Status +

    +
      +
    • Blobstore, Go (any OS), can store on disk, s3, mysqlindex +
    • Blobstore, Python (App Engine only) can store on Google +
    • Perl tests for two blob stores +
    • Android uploader (Java) +
    • Bunch of Go libraries / command-line tools: sync, put, get +
    • FUSE filesystem (read-only, currently) +
    • Search: basics working. more queries looks easy now. +
    • Simple, self-contained everything binary (blob storage, sharing, search, index, frontend) for early adopters: ~95% +
    • Web UI / JavaScript APIs: in progress +
    +
    + +
    +

    + In Review +

    +
      +
    • You own all your blobs; everything is private by default.
    • +
    • Mutable objects are made of mutation claim blobs.
    • +
    • Sync is trivial: either you have it or you don't
    • +
    • Some blobs are signed
    • +
    • Indexing & search to find your blobs / roots
    • +
    • To share you must create a declaration of sharing ...
    • +
    • ... and the system will only allow access if such claims exist.
    • +
    • Decentralized, but hostable. You can run your own server (with no central + company or point of control), but you can also let somebody else do it for + you, like email.
    • +
    +
    + + + +
    + + + diff --git a/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/prettify.js b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/prettify.js new file mode 100644 index 00000000..43816692 --- /dev/null +++ b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/prettify.js @@ -0,0 +1,1391 @@ +// Copyright (C) 2006 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +/** + * @fileoverview + * some functions for browser-side pretty printing of code contained in html. + * + *

    + * For a fairly comprehensive set of languages see the + * README + * file that came with this source. At a minimum, the lexer should work on a + * number of languages including C and friends, Java, Python, Bash, SQL, HTML, + * XML, CSS, Javascript, and Makefiles. It works passably on Ruby, PHP and Awk + * and a subset of Perl, but, because of commenting conventions, doesn't work on + * Smalltalk, Lisp-like, or CAML-like languages without an explicit lang class. + *

    + * Usage:

      + *
    1. include this source file in an html page via + * {@code } + *
    2. define style rules. See the example page for examples. + *
    3. mark the {@code
      } and {@code } tags in your source with
      + *    {@code class=prettyprint.}
      + *    You can also use the (html deprecated) {@code } tag, but the pretty
      + *    printer needs to do more substantial DOM manipulations to support that, so
      + *    some css styles may not be preserved.
      + * </ol>
      + * That's it.  I wanted to keep the API as simple as possible, so there's no
      + * need to specify which language the code is in, but if you wish, you can add
      + * another class to the {@code <pre>} or {@code <code>} element to specify the
      + * language, as in {@code <pre class="prettyprint lang-java">}.  Any class that
      + * starts with "lang-" followed by a file extension, specifies the file type.
      + * See the "lang-*.js" files in this directory for code that implements
      + * per-language file handlers.
      + * <p>
      + * Change log:<br>
      + * cbeust, 2006/08/22
      + * <blockquote>
      + *   Java annotations (start with "@") are now captured as literals ("lit")
      + * </blockquote>
      + * @requires console
      + */
      +
      +// JSLint declarations
      +/*global console, document, navigator, setTimeout, window */
      +
      +/**
      + * Split {@code prettyPrint} into multiple timeouts so as not to interfere with
      + * UI events.
      + * If set to {@code false}, {@code prettyPrint()} is synchronous.
      + */
      +window['PR_SHOULD_USE_CONTINUATION'] = true;
      +
      +/** the number of characters between tab columns */
      +window['PR_TAB_WIDTH'] = 8;
      +
      +/** Contains functions for creating and registering new language handlers.
      +  * @type {Object}
      +  */
      +window['PR']
      +
      +/** Pretty print a chunk of code.
      +  *
      +  * @param {string} sourceCodeHtml code as html
      +  * @return {string} code as html, but prettier
      +  */
      +  = window['prettyPrintOne']
      +/** Find all the {@code <pre>} and {@code <code>} tags in the DOM with
      +  * {@code class=prettyprint} and prettify them.
      +  * @param {Function?} opt_whenDone if specified, called when the last entry
      +  *     has been finished.
      +  */
      +  = window['prettyPrint'] = void 0;
      +
      +
      +(function () {
      +  // Keyword lists for various languages.
      +  var FLOW_CONTROL_KEYWORDS =
      +      "break continue do else for if return while ";
      +  var C_KEYWORDS = FLOW_CONTROL_KEYWORDS + "auto case char const default " +
      +      "double enum extern float goto int long register short signed sizeof " +
      +      "static struct switch typedef union unsigned void volatile ";
      +  var COMMON_KEYWORDS = C_KEYWORDS + "catch class delete false import " +
      +      "new operator private protected public this throw true try typeof ";
      +  var CPP_KEYWORDS = COMMON_KEYWORDS + "alignof align_union asm axiom bool " +
      +      "concept concept_map const_cast constexpr decltype " +
      +      "dynamic_cast explicit export friend inline late_check " +
      +      "mutable namespace nullptr reinterpret_cast static_assert static_cast " +
      +      "template typeid typename using virtual wchar_t where ";
      +  var JAVA_KEYWORDS = COMMON_KEYWORDS +
      +      "abstract boolean byte extends final finally implements import " +
      +      "instanceof null native package strictfp super synchronized throws " +
      +      "transient ";
      +  var CSHARP_KEYWORDS = JAVA_KEYWORDS +
      +      "as base by checked decimal delegate descending dynamic event " +
      +      "fixed foreach from group implicit in interface internal into is lock " +
      +      "object out override orderby params partial readonly ref sbyte sealed " +
      +      "stackalloc string select uint ulong unchecked unsafe ushort var ";
      +  var COFFEE_KEYWORDS = "all and by catch class else extends false finally " +
      +      "for if in is isnt loop new no not null of off on or return super then " +
      +      "true try unless until when while yes ";
      +  var JSCRIPT_KEYWORDS = COMMON_KEYWORDS +
      +      "debugger eval export function get null set undefined var with " +
      +      "Infinity NaN ";
      +  var PERL_KEYWORDS = "caller delete die do dump elsif eval exit foreach for " +
      +      "goto if import last local my next no our print package redo require " +
      +      "sub undef unless until use wantarray while BEGIN END ";
      +  var PYTHON_KEYWORDS = FLOW_CONTROL_KEYWORDS + "and as assert class def del " +
      +      "elif except exec finally from global import in is lambda " +
      +      "nonlocal not or pass print raise try with yield " +
      +      "False True None ";
      +  var RUBY_KEYWORDS = FLOW_CONTROL_KEYWORDS + "alias and begin case class def" +
      +      " defined elsif end ensure false in module next nil not or redo rescue " +
      +      "retry self super then true undef unless until when yield BEGIN END ";
      +  var SH_KEYWORDS = FLOW_CONTROL_KEYWORDS + "case done elif esac eval fi " +
      +      "function in local set then until ";
      +  var ALL_KEYWORDS = (
      +      CPP_KEYWORDS + CSHARP_KEYWORDS + JSCRIPT_KEYWORDS + PERL_KEYWORDS +
      +      PYTHON_KEYWORDS + RUBY_KEYWORDS + SH_KEYWORDS);
      +
      +  // token style names.  correspond to css classes
      +  /** token style for a string literal */
      +  var PR_STRING = 'str';
      +  /** token style for a keyword */
      +  var PR_KEYWORD = 'kwd';
      +  /** token style for a comment */
      +  var PR_COMMENT = 'com';
      +  /** token style for a type */
      +  var PR_TYPE = 'typ';
      +  /** token style for a literal value.  e.g. 1, null, true. */
      +  var PR_LITERAL = 'lit';
      +  /** token style for a punctuation string. */
      +  var PR_PUNCTUATION = 'pun';
      +  /** token style for a punctuation string. */
      +  var PR_PLAIN = 'pln';
      +
      +  /** token style for an sgml tag. */
      +  var PR_TAG = 'tag';
      +  /** token style for a markup declaration such as a DOCTYPE. */
      +  var PR_DECLARATION = 'dec';
      +  /** token style for embedded source. */
      +  var PR_SOURCE = 'src';
      +  /** token style for an sgml attribute name. */
      +  var PR_ATTRIB_NAME = 'atn';
      +  /** token style for an sgml attribute value. */
      +  var PR_ATTRIB_VALUE = 'atv';
      +
      +  /**
      +   * A class that indicates a section of markup that is not code, e.g. to allow
      +   * embedding of line numbers within code listings.
      +   */
      +  var PR_NOCODE = 'nocode';
      +
      +  /** A set of tokens that can precede a regular expression literal in
      +    * javascript.
      +    * http://www.mozilla.org/js/language/js20/rationale/syntax.html has the full
      +    * list, but I've removed ones that might be problematic when seen in
      +    * languages that don't support regular expression literals.
      +    *
      +    * <p>Specifically, I've removed any keywords that can't precede a regexp
      +    * literal in a syntactically legal javascript program, and I've removed the
      +    * "in" keyword since it's not a keyword in many languages, and might be used
      +    * as a count of inches.
      +    *
      +    * <p>The link a above does not accurately describe EcmaScript rules since
      +    * it fails to distinguish between (a=++/b/i) and (a++/b/i) but it works
      +    * very well in practice.
      +    *
      +    * @private
      +    */
      +  var REGEXP_PRECEDER_PATTERN = function () {
      +      var preceders = [
      +          "!", "!=", "!==", "#", "%", "%=", "&", "&&", "&&=",
      +          "&=", "(", "*", "*=", /* "+", */ "+=", ",", /* "-", */ "-=",
      +          "->", /*".", "..", "...", handled below */ "/", "/=", ":", "::", ";",
      +          "<", "<<", "<<=", "<=", "=", "==", "===", ">",
      +          ">=", ">>", ">>=", ">>>", ">>>=", "?", "@", "[",
      +          "^", "^=", "^^", "^^=", "{", "|", "|=", "||",
      +          "||=", "~" /* handles =~ and !~ */,
      +          "break", "case", "continue", "delete",
      +          "do", "else", "finally", "instanceof",
      +          "return", "throw", "try", "typeof"
      +          ];
      +      var pattern = '(?:^^|[+-]';
      +      for (var i = 0; i < preceders.length; ++i) {
      +        pattern += '|' + preceders[i].replace(/([^=<>:&a-z])/g, '\\$1');
      +      }
      +      pattern += ')\\s*';  // matches at end, and matches empty string
      +      return pattern;
      +      // CAVEAT: this does not properly handle the case where a regular
      +      // expression immediately follows another since a regular expression may
      +      // have flags for case-sensitivity and the like.  Having regexp tokens
      +      // adjacent is not valid in any language I'm aware of, so I'm punting.
      +      // TODO: maybe style special characters inside a regexp as punctuation.
      +    }();
      +
      +  
      +  /**
      +   * Given a group of {@link RegExp}s, returns a {@code RegExp} that globally
      +   * matches the union of the sets of strings matched by the input RegExp.
      +   * Since it matches globally, if the input strings have a start-of-input
      +   * anchor (/^.../), it is ignored for the purposes of unioning.
      +   * @param {Array.<RegExp>} regexs non multiline, non-global regexs.
      +   * @return {RegExp} a global regex.
      +   */
      +  function combinePrefixPatterns(regexs) {
      +    var capturedGroupIndex = 0;
      +  
      +    var needToFoldCase = false;
      +    var ignoreCase = false;
      +    for (var i = 0, n = regexs.length; i < n; ++i) {
      +      var regex = regexs[i];
      +      if (regex.ignoreCase) {
      +        ignoreCase = true;
      +      } else if (/[a-z]/i.test(regex.source.replace(
      +                     /\\u[0-9a-f]{4}|\\x[0-9a-f]{2}|\\[^ux]/gi, ''))) {
      +        needToFoldCase = true;
      +        ignoreCase = false;
      +        break;
      +      }
      +    }
      +  
      +    function decodeEscape(charsetPart) {
      +      if (charsetPart.charAt(0) !== '\\') { return charsetPart.charCodeAt(0); }
      +      switch (charsetPart.charAt(1)) {
      +        case 'b': return 8;
      +        case 't': return 9;
      +        case 'n': return 0xa;
      +        case 'v': return 0xb;
      +        case 'f': return 0xc;
      +        case 'r': return 0xd;
      +        case 'u': case 'x':
      +          return parseInt(charsetPart.substring(2), 16)
      +              || charsetPart.charCodeAt(1);
      +        case '0': case '1': case '2': case '3': case '4':
      +        case '5': case '6': case '7':
      +          return parseInt(charsetPart.substring(1), 8);
      +        default: return charsetPart.charCodeAt(1);
      +      }
      +    }
      +  
      +    function encodeEscape(charCode) {
      +      if (charCode < 0x20) {
      +        return (charCode < 0x10 ? '\\x0' : '\\x') + charCode.toString(16);
      +      }
      +      var ch = String.fromCharCode(charCode);
      +      if (ch === '\\' || ch === '-' || ch === '[' || ch === ']') {
      +        ch = '\\' + ch;
      +      }
      +      return ch;
      +    }
      +  
      +    function caseFoldCharset(charSet) {
      +      var charsetParts = charSet.substring(1, charSet.length - 1).match(
      +          new RegExp(
      +              '\\\\u[0-9A-Fa-f]{4}'
      +              + '|\\\\x[0-9A-Fa-f]{2}'
      +              + '|\\\\[0-3][0-7]{0,2}'
      +              + '|\\\\[0-7]{1,2}'
      +              + '|\\\\[\\s\\S]'
      +              + '|-'
      +              + '|[^-\\\\]',
      +              'g'));
      +      var groups = [];
      +      var ranges = [];
      +      var inverse = charsetParts[0] === '^';
      +      for (var i = inverse ? 1 : 0, n = charsetParts.length; i < n; ++i) {
      +        var p = charsetParts[i];
      +        switch (p) {
      +          case '\\B': case '\\b':
      +          case '\\D': case '\\d':
      +          case '\\S': case '\\s':
      +          case '\\W': case '\\w':
      +            groups.push(p);
      +            continue;
      +        }
      +        var start = decodeEscape(p);
      +        var end;
      +        if (i + 2 < n && '-' === charsetParts[i + 1]) {
      +          end = decodeEscape(charsetParts[i + 2]);
      +          i += 2;
      +        } else {
      +          end = start;
      +        }
      +        ranges.push([start, end]);
      +        // If the range might intersect letters, then expand it.
      +        if (!(end < 65 || start > 122)) {
      +          if (!(end < 65 || start > 90)) {
      +            ranges.push([Math.max(65, start) | 32, Math.min(end, 90) | 32]);
      +          }
      +          if (!(end < 97 || start > 122)) {
      +            ranges.push([Math.max(97, start) & ~32, Math.min(end, 122) & ~32]);
      +          }
      +        }
      +      }
      +  
      +      // [[1, 10], [3, 4], [8, 12], [14, 14], [16, 16], [17, 17]]
      +      // -> [[1, 12], [14, 14], [16, 17]]
      +      ranges.sort(function (a, b) { return (a[0] - b[0]) || (b[1]  - a[1]); });
      +      var consolidatedRanges = [];
      +      var lastRange = [NaN, NaN];
      +      for (var i = 0; i < ranges.length; ++i) {
      +        var range = ranges[i];
      +        if (range[0] <= lastRange[1] + 1) {
      +          lastRange[1] = Math.max(lastRange[1], range[1]);
      +        } else {
      +          consolidatedRanges.push(lastRange = range);
      +        }
      +      }
      +  
      +      var out = ['['];
      +      if (inverse) { out.push('^'); }
      +      out.push.apply(out, groups);
      +      for (var i = 0; i < consolidatedRanges.length; ++i) {
      +        var range = consolidatedRanges[i];
      +        out.push(encodeEscape(range[0]));
      +        if (range[1] > range[0]) {
      +          if (range[1] + 1 > range[0]) { out.push('-'); }
      +          out.push(encodeEscape(range[1]));
      +        }
      +      }
      +      out.push(']');
      +      return out.join('');
      +    }
      +  
      +    function allowAnywhereFoldCaseAndRenumberGroups(regex) {
      +      // Split into character sets, escape sequences, punctuation strings
      +      // like ('(', '(?:', ')', '^'), and runs of characters that do not
      +      // include any of the above.
      +      var parts = regex.source.match(
      +          new RegExp(
      +              '(?:'
      +              + '\\[(?:[^\\x5C\\x5D]|\\\\[\\s\\S])*\\]'  // a character set
      +              + '|\\\\u[A-Fa-f0-9]{4}'  // a unicode escape
      +              + '|\\\\x[A-Fa-f0-9]{2}'  // a hex escape
      +              + '|\\\\[0-9]+'  // a back-reference or octal escape
      +              + '|\\\\[^ux0-9]'  // other escape sequence
      +              + '|\\(\\?[:!=]'  // start of a non-capturing group
      +              + '|[\\(\\)\\^]'  // start/emd of a group, or line start
      +              + '|[^\\x5B\\x5C\\(\\)\\^]+'  // run of other characters
      +              + ')',
      +              'g'));
      +      var n = parts.length;
      +  
      +      // Maps captured group numbers to the number they will occupy in
      +      // the output or to -1 if that has not been determined, or to
      +      // undefined if they need not be capturing in the output.
      +      var capturedGroups = [];
      +  
      +      // Walk over and identify back references to build the capturedGroups
      +      // mapping.
      +      for (var i = 0, groupIndex = 0; i < n; ++i) {
      +        var p = parts[i];
      +        if (p === '(') {
      +          // groups are 1-indexed, so max group index is count of '('
      +          ++groupIndex;
      +        } else if ('\\' === p.charAt(0)) {
      +          var decimalValue = +p.substring(1);
      +          if (decimalValue && decimalValue <= groupIndex) {
      +            capturedGroups[decimalValue] = -1;
      +          }
      +        }
      +      }
      +  
      +      // Renumber groups and reduce capturing groups to non-capturing groups
      +      // where possible.
      +      for (var i = 1; i < capturedGroups.length; ++i) {
      +        if (-1 === capturedGroups[i]) {
      +          capturedGroups[i] = ++capturedGroupIndex;
      +        }
      +      }
      +      for (var i = 0, groupIndex = 0; i < n; ++i) {
      +        var p = parts[i];
      +        if (p === '(') {
      +          ++groupIndex;
      +          if (capturedGroups[groupIndex] === undefined) {
      +            parts[i] = '(?:';
      +          }
      +        } else if ('\\' === p.charAt(0)) {
      +          var decimalValue = +p.substring(1);
      +          if (decimalValue && decimalValue <= groupIndex) {
      +            parts[i] = '\\' + capturedGroups[groupIndex];
      +          }
      +        }
      +      }
      +  
      +      // Remove any prefix anchors so that the output will match anywhere.
      +      // ^^ really does mean an anchored match though.
      +      for (var i = 0, groupIndex = 0; i < n; ++i) {
      +        if ('^' === parts[i] && '^' !== parts[i + 1]) { parts[i] = ''; }
      +      }
      +  
      +      // Expand letters to groups to handle mixing of case-sensitive and
      +      // case-insensitive patterns if necessary.
      +      if (regex.ignoreCase && needToFoldCase) {
      +        for (var i = 0; i < n; ++i) {
      +          var p = parts[i];
      +          var ch0 = p.charAt(0);
      +          if (p.length >= 2 && ch0 === '[') {
      +            parts[i] = caseFoldCharset(p);
      +          } else if (ch0 !== '\\') {
      +            // TODO: handle letters in numeric escapes.
      +            parts[i] = p.replace(
      +                /[a-zA-Z]/g,
      +                function (ch) {
      +                  var cc = ch.charCodeAt(0);
      +                  return '[' + String.fromCharCode(cc & ~32, cc | 32) + ']';
      +                });
      +          }
      +        }
      +      }
      +  
      +      return parts.join('');
      +    }
      +  
      +    var rewritten = [];
      +    for (var i = 0, n = regexs.length; i < n; ++i) {
      +      var regex = regexs[i];
      +      if (regex.global || regex.multiline) { throw new Error('' + regex); }
      +      rewritten.push(
      +          '(?:' + allowAnywhereFoldCaseAndRenumberGroups(regex) + ')');
      +    }
      +  
      +    return new RegExp(rewritten.join('|'), ignoreCase ? 'gi' : 'g');
      +  }
      +
      +
      +  /**
      +   * Split markup into a string of source code and an array mapping ranges in
      +   * that string to the text nodes in which they appear.
      +   *
      +   * <p>
      +   * The HTML DOM structure:</p>
      +   * <pre>
      +   * (Element   "p"
      +   *   (Element "b"
      +   *     (Text  "print "))       ; #1
      +   *   (Text    "'Hello '")      ; #2
      +   *   (Element "br")            ; #3
      +   *   (Text    "  + 'World';")) ; #4
      +   * </pre>
      +   * <p>
      +   * corresponds to the HTML
      +   * {@code <p><b>print </b>'Hello '<br>  + 'World';</p>}.</p>
      +   *
      +   * <p>
      +   * It will produce the output:</p>
      +   * <pre>
      +   * {
      +   *   source: "print 'Hello '\n  + 'World';",
      +   *   //                 1         2
      +   *   //       012345678901234 5678901234567
      +   *   spans: [0, #1, 6, #2, 14, #3, 15, #4]
      +   * }
      +   * </pre>
      +   * <p>
      +   * where #1 is a reference to the {@code "print "} text node above, and so
      +   * on for the other text nodes.
      +   * </p>
      +   *
      +   * <p>
      +   * The {@code} spans array is an array of pairs.  Even elements are the start
      +   * indices of substrings, and odd elements are the text nodes (or BR elements)
      +   * that contain the text for those substrings.
      +   * Substrings continue until the next index or the end of the source.
      +   * </p>
      +   *
      +   * @param {Node} node an HTML DOM subtree containing source-code.
      +   * @return {Object} source code and the text nodes in which they occur.
      +   */
      +  function extractSourceSpans(node) {
      +    var nocode = /(?:^|\s)nocode(?:\s|$)/;
      +  
      +    var chunks = [];
      +    var length = 0;
      +    var spans = [];
      +    var k = 0;
      +  
      +    var whitespace;
      +    if (node.currentStyle) {
      +      whitespace = node.currentStyle.whiteSpace;
      +    } else if (window.getComputedStyle) {
      +      whitespace = document.defaultView.getComputedStyle(node, null)
      +          .getPropertyValue('white-space');
      +    }
      +    var isPreformatted = whitespace && 'pre' === whitespace.substring(0, 3);
      +  
      +    function walk(node) {
      +      switch (node.nodeType) {
      +        case 1:  // Element
      +          if (nocode.test(node.className)) { return; }
      +          for (var child = node.firstChild; child; child = child.nextSibling) {
      +            walk(child);
      +          }
      +          var nodeName = node.nodeName;
      +          if ('BR' === nodeName || 'LI' === nodeName) {
      +            chunks[k] = '\n';
      +            spans[k << 1] = length++;
      +            spans[(k++ << 1) | 1] = node;
      +          }
      +          break;
      +        case 3: case 4:  // Text
      +          var text = node.nodeValue;
      +          if (text.length) {
      +            if (!isPreformatted) {
      +              text = text.replace(/[ \t\r\n]+/g, ' ');
      +            } else {
      +              text = text.replace(/\r\n?/g, '\n');  // Normalize newlines.
      +            }
      +            // TODO: handle tabs here?
      +            chunks[k] = text;
      +            spans[k << 1] = length;
      +            length += text.length;
      +            spans[(k++ << 1) | 1] = node;
      +          }
      +          break;
      +      }
      +    }
      +  
      +    walk(node);
      +  
      +    return {
      +      source: chunks.join('').replace(/\n$/, ''),
      +      spans: spans
      +    };
      +  }
      +
      +
      +  /**
      +   * Apply the given language handler to sourceCode and add the resulting
      +   * decorations to out.
      +   * @param {number} basePos the index of sourceCode within the chunk of source
      +   *    whose decorations are already present on out.
      +   */
      +  function appendDecorations(basePos, sourceCode, langHandler, out) {
      +    if (!sourceCode) { return; }
      +    var job = {
      +      source: sourceCode,
      +      basePos: basePos
      +    };
      +    langHandler(job);
      +    out.push.apply(out, job.decorations);
      +  }
      +
      +  /** Given triples of [style, pattern, context] returns a lexing function,
      +    * The lexing function interprets the patterns to find token boundaries and
      +    * returns a decoration list of the form
      +    * [index_0, style_0, index_1, style_1, ..., index_n, style_n]
      +    * where index_n is an index into the sourceCode, and style_n is a style
      +    * constant like PR_PLAIN.  index_n-1 <= index_n, and style_n-1 applies to
      +    * all characters in sourceCode[index_n-1:index_n].
      +    *
      +    * The stylePatterns is a list whose elements have the form
      +    * [style : string, pattern : RegExp, DEPRECATED, shortcut : string].
      +    *
      +    * Style is a style constant like PR_PLAIN, or can be a string of the
      +    * form 'lang-FOO', where FOO is a language extension describing the
      +    * language of the portion of the token in $1 after pattern executes.
      +    * E.g., if style is 'lang-lisp', and group 1 contains the text
      +    * '(hello (world))', then that portion of the token will be passed to the
      +    * registered lisp handler for formatting.
      +    * The text before and after group 1 will be restyled using this decorator
      +    * so decorators should take care that this doesn't result in infinite
      +    * recursion.  For example, the HTML lexer rule for SCRIPT elements looks
      +    * something like ['lang-js', /<[s]cript>(.+?)<\/script>/].  This may match
      +    * '<script>foo()<\/script>', which would cause the current decorator to
      +    * be called with '<script>' which would not match the same rule since
      +    * group 1 must not be empty, so it would be instead styled as PR_TAG by
      +    * the generic tag rule.  The handler registered for the 'js' extension would
      +    * then be called with 'foo()', and finally, the current decorator would
      +    * be called with '<\/script>' which would not match the original rule and
      +    * so the generic tag rule would identify it as a tag.
      +    *
      +    * Pattern must only match prefixes, and if it matches a prefix, then that
      +    * match is considered a token with the same style.
      +    *
      +    * Context is applied to the last non-whitespace, non-comment token
      +    * recognized.
      +    *
      +    * Shortcut is an optional string of characters, any of which, if the first
      +    * character, gurantee that this pattern and only this pattern matches.
      +    *
      +    * @param {Array} shortcutStylePatterns patterns that always start with
      +    *   a known character.  Must have a shortcut string.
      +    * @param {Array} fallthroughStylePatterns patterns that will be tried in
      +    *   order if the shortcut ones fail.  May have shortcuts.
      +    *
      +    * @return {function (Object)} a
      +    *   function that takes source code and returns a list of decorations.
      +    */
      +  function createSimpleLexer(shortcutStylePatterns, fallthroughStylePatterns) {
      +    var shortcuts = {};
      +    var tokenizer;
      +    (function () {
      +      var allPatterns = shortcutStylePatterns.concat(fallthroughStylePatterns);
      +      var allRegexs = [];
      +      var regexKeys = {};
      +      for (var i = 0, n = allPatterns.length; i < n; ++i) {
      +        var patternParts = allPatterns[i];
      +        var shortcutChars = patternParts[3];
      +        if (shortcutChars) {
      +          for (var c = shortcutChars.length; --c >= 0;) {
      +            shortcuts[shortcutChars.charAt(c)] = patternParts;
      +          }
      +        }
      +        var regex = patternParts[1];
      +        var k = '' + regex;
      +        if (!regexKeys.hasOwnProperty(k)) {
      +          allRegexs.push(regex);
      +          regexKeys[k] = null;
      +        }
      +      }
      +      allRegexs.push(/[\0-\uffff]/);
      +      tokenizer = combinePrefixPatterns(allRegexs);
      +    })();
      +
      +    var nPatterns = fallthroughStylePatterns.length;
      +    var notWs = /\S/;
      +
      +    /**
      +     * Lexes job.source and produces an output array job.decorations of style
      +     * classes preceded by the position at which they start in job.source in
      +     * order.
      +     *
      +     * @param {Object} job an object like {@code
      +     *    source: {string} sourceText plain text,
      +     *    basePos: {int} position of job.source in the larger chunk of
      +     *        sourceCode.
      +     * }
      +     */
      +    var decorate = function (job) {
      +      var sourceCode = job.source, basePos = job.basePos;
      +      /** Even entries are positions in source in ascending order.  Odd enties
      +        * are style markers (e.g., PR_COMMENT) that run from that position until
      +        * the end.
      +        * @type {Array.<number|string>}
      +        */
      +      var decorations = [basePos, PR_PLAIN];
      +      var pos = 0;  // index into sourceCode
      +      var tokens = sourceCode.match(tokenizer) || [];
      +      var styleCache = {};
      +
      +      for (var ti = 0, nTokens = tokens.length; ti < nTokens; ++ti) {
      +        var token = tokens[ti];
      +        var style = styleCache[token];
      +        var match = void 0;
      +
      +        var isEmbedded;
      +        if (typeof style === 'string') {
      +          isEmbedded = false;
      +        } else {
      +          var patternParts = shortcuts[token.charAt(0)];
      +          if (patternParts) {
      +            match = token.match(patternParts[1]);
      +            style = patternParts[0];
      +          } else {
      +            for (var i = 0; i < nPatterns; ++i) {
      +              patternParts = fallthroughStylePatterns[i];
      +              match = token.match(patternParts[1]);
      +              if (match) {
      +                style = patternParts[0];
      +                break;
      +              }
      +            }
      +
      +            if (!match) {  // make sure that we make progress
      +              style = PR_PLAIN;
      +            }
      +          }
      +
      +          isEmbedded = style.length >= 5 && 'lang-' === style.substring(0, 5);
      +          if (isEmbedded && !(match && typeof match[1] === 'string')) {
      +            isEmbedded = false;
      +            style = PR_SOURCE;
      +          }
      +
      +          if (!isEmbedded) { styleCache[token] = style; }
      +        }
      +
      +        var tokenStart = pos;
      +        pos += token.length;
      +
      +        if (!isEmbedded) {
      +          decorations.push(basePos + tokenStart, style);
      +        } else {  // Treat group 1 as an embedded block of source code.
      +          var embeddedSource = match[1];
      +          var embeddedSourceStart = token.indexOf(embeddedSource);
      +          var embeddedSourceEnd = embeddedSourceStart + embeddedSource.length;
      +          if (match[2]) {
      +            // If embeddedSource can be blank, then it would match at the
      +            // beginning which would cause us to infinitely recurse on the
      +            // entire token, so we catch the right context in match[2].
      +            embeddedSourceEnd = token.length - match[2].length;
      +            embeddedSourceStart = embeddedSourceEnd - embeddedSource.length;
      +          }
      +          var lang = style.substring(5);
      +          // Decorate the left of the embedded source
      +          appendDecorations(
      +              basePos + tokenStart,
      +              token.substring(0, embeddedSourceStart),
      +              decorate, decorations);
      +          // Decorate the embedded source
      +          appendDecorations(
      +              basePos + tokenStart + embeddedSourceStart,
      +              embeddedSource,
      +              langHandlerForExtension(lang, embeddedSource),
      +              decorations);
      +          // Decorate the right of the embedded section
      +          appendDecorations(
      +              basePos + tokenStart + embeddedSourceEnd,
      +              token.substring(embeddedSourceEnd),
      +              decorate, decorations);
      +        }
      +      }
      +      job.decorations = decorations;
      +    };
      +    return decorate;
      +  }
      +
      +  /** returns a function that produces a list of decorations from source text.
      +    *
      +    * This code treats ", ', and ` as string delimiters, and \ as a string
      +    * escape.  It does not recognize perl's qq() style strings.
      +    * It has no special handling for double delimiter escapes as in basic, or
      +    * the tripled delimiters used in python, but should work on those regardless
      +    * although in those cases a single string literal may be broken up into
      +    * multiple adjacent string literals.
      +    *
      +    * It recognizes C, C++, and shell style comments.
      +    *
      +    * @param {Object} options a set of optional parameters.
      +    * @return {function (Object)} a function that examines the source code
      +    *     in the input job and builds the decoration list.
      +    */
      +  function sourceDecorator(options) {
      +    var shortcutStylePatterns = [], fallthroughStylePatterns = [];
      +    if (options['tripleQuotedStrings']) {
      +      // '''multi-line-string''', 'single-line-string', and double-quoted
      +      shortcutStylePatterns.push(
      +          [PR_STRING,  /^(?:\'\'\'(?:[^\'\\]|\\[\s\S]|\'{1,2}(?=[^\']))*(?:\'\'\'|$)|\"\"\"(?:[^\"\\]|\\[\s\S]|\"{1,2}(?=[^\"]))*(?:\"\"\"|$)|\'(?:[^\\\']|\\[\s\S])*(?:\'|$)|\"(?:[^\\\"]|\\[\s\S])*(?:\"|$))/,
      +           null, '\'"']);
      +    } else if (options['multiLineStrings']) {
      +      // 'multi-line-string', "multi-line-string"
      +      shortcutStylePatterns.push(
      +          [PR_STRING,  /^(?:\'(?:[^\\\']|\\[\s\S])*(?:\'|$)|\"(?:[^\\\"]|\\[\s\S])*(?:\"|$)|\`(?:[^\\\`]|\\[\s\S])*(?:\`|$))/,
      +           null, '\'"`']);
      +    } else {
      +      // 'single-line-string', "single-line-string"
      +      shortcutStylePatterns.push(
      +          [PR_STRING,
      +           /^(?:\'(?:[^\\\'\r\n]|\\.)*(?:\'|$)|\"(?:[^\\\"\r\n]|\\.)*(?:\"|$))/,
      +           null, '"\'']);
      +    }
      +    if (options['verbatimStrings']) {
      +      // verbatim-string-literal production from the C# grammar.  See issue 93.
      +      fallthroughStylePatterns.push(
      +          [PR_STRING, /^@\"(?:[^\"]|\"\")*(?:\"|$)/, null]);
      +    }
      +    var hc = options['hashComments'];
      +    if (hc) {
      +      if (options['cStyleComments']) {
      +        if (hc > 1) {  // multiline hash comments
      +          shortcutStylePatterns.push(
      +              [PR_COMMENT, /^#(?:##(?:[^#]|#(?!##))*(?:###|$)|.*)/, null, '#']);
      +        } else {
      +          // Stop C preprocessor declarations at an unclosed open comment
      +          shortcutStylePatterns.push(
      +              [PR_COMMENT, /^#(?:(?:define|elif|else|endif|error|ifdef|include|ifndef|line|pragma|undef|warning)\b|[^\r\n]*)/,
      +               null, '#']);
      +        }
      +        fallthroughStylePatterns.push(
      +            [PR_STRING,
      +             /^<(?:(?:(?:\.\.\/)*|\/?)(?:[\w-]+(?:\/[\w-]+)+)?[\w-]+\.h|[a-z]\w*)>/,
      +             null]);
      +      } else {
      +        shortcutStylePatterns.push([PR_COMMENT, /^#[^\r\n]*/, null, '#']);
      +      }
      +    }
      +    if (options['cStyleComments']) {
      +      fallthroughStylePatterns.push([PR_COMMENT, /^\/\/[^\r\n]*/, null]);
      +      fallthroughStylePatterns.push(
      +          [PR_COMMENT, /^\/\*[\s\S]*?(?:\*\/|$)/, null]);
      +    }
      +    if (options['regexLiterals']) {
      +      var REGEX_LITERAL = (
      +          // A regular expression literal starts with a slash that is
      +          // not followed by * or / so that it is not confused with
      +          // comments.
      +          '/(?=[^/*])'
      +          // and then contains any number of raw characters,
      +          + '(?:[^/\\x5B\\x5C]'
      +          // escape sequences (\x5C),
      +          +    '|\\x5C[\\s\\S]'
      +          // or non-nesting character sets (\x5B\x5D);
      +          +    '|\\x5B(?:[^\\x5C\\x5D]|\\x5C[\\s\\S])*(?:\\x5D|$))+'
      +          // finally closed by a /.
      +          + '/');
      +      fallthroughStylePatterns.push(
      +          ['lang-regex',
      +           new RegExp('^' + REGEXP_PRECEDER_PATTERN + '(' + REGEX_LITERAL + ')')
      +           ]);
      +    }
      +
      +    var keywords = options['keywords'].replace(/^\s+|\s+$/g, '');
      +    if (keywords.length) {
      +      fallthroughStylePatterns.push(
      +          [PR_KEYWORD,
      +           new RegExp('^(?:' + keywords.replace(/\s+/g, '|') + ')\\b'), null]);
      +    }
      +
      +    shortcutStylePatterns.push([PR_PLAIN,       /^\s+/, null, ' \r\n\t\xA0']);
      +    fallthroughStylePatterns.push(
      +        // TODO(mikesamuel): recognize non-latin letters and numerals in idents
      +        [PR_LITERAL,     /^@[a-z_$][a-z_$@0-9]*/i, null],
      +        [PR_TYPE,        /^@?[A-Z]+[a-z][A-Za-z_$@0-9]*/, null],
      +        [PR_PLAIN,       /^[a-z_$][a-z_$@0-9]*/i, null],
      +        [PR_LITERAL,
      +         new RegExp(
      +             '^(?:'
      +             // A hex number
      +             + '0x[a-f0-9]+'
      +             // or an octal or decimal number,
      +             + '|(?:\\d(?:_\\d+)*\\d*(?:\\.\\d*)?|\\.\\d\\+)'
      +             // possibly in scientific notation
      +             + '(?:e[+\\-]?\\d+)?'
      +             + ')'
      +             // with an optional modifier like UL for unsigned long
      +             + '[a-z]*', 'i'),
      +         null, '0123456789'],
      +        // Don't treat escaped quotes in bash as starting strings.  See issue 144.
      +        [PR_PLAIN,       /^\\[\s\S]?/, null],
      +        [PR_PUNCTUATION, /^.[^\s\w\.$@\'\"\`\/\#\\]*/, null]);
      +
      +    return createSimpleLexer(shortcutStylePatterns, fallthroughStylePatterns);
      +  }
      +
      +  var decorateSource = sourceDecorator({
      +        'keywords': ALL_KEYWORDS,
      +        'hashComments': true,
      +        'cStyleComments': true,
      +        'multiLineStrings': true,
      +        'regexLiterals': true
      +      });
      +
      +  /**
      +   * Given a DOM subtree, wraps it in a list, and puts each line into its own
      +   * list item.
      +   *
      +   * @param {Node} node modified in place.  Its content is pulled into an
      +   *     HTMLOListElement, and each line is moved into a separate list item.
      +   *     This requires cloning elements, so the input might not have unique
      +   *     IDs after numbering.
      +   */
      +  function numberLines(node, opt_startLineNum) {
      +    var nocode = /(?:^|\s)nocode(?:\s|$)/;
      +    var lineBreak = /\r\n?|\n/;
      +  
      +    var document = node.ownerDocument;
      +  
      +    var whitespace;
      +    if (node.currentStyle) {
      +      whitespace = node.currentStyle.whiteSpace;
      +    } else if (window.getComputedStyle) {
      +      whitespace = document.defaultView.getComputedStyle(node, null)
      +          .getPropertyValue('white-space');
      +    }
      +    // If it's preformatted, then we need to split lines on line breaks
      +    // in addition to <BR>s.
      +    var isPreformatted = whitespace && 'pre' === whitespace.substring(0, 3);
      +  
      +    var li = document.createElement('LI');
      +    while (node.firstChild) {
      +      li.appendChild(node.firstChild);
      +    }
      +    // An array of lines.  We split below, so this is initialized to one
      +    // un-split line.
      +    var listItems = [li];
      +  
      +    function walk(node) {
      +      switch (node.nodeType) {
      +        case 1:  // Element
      +          if (nocode.test(node.className)) { break; }
      +          if ('BR' === node.nodeName) {
      +            breakAfter(node);
      +            // Discard the <BR> since it is now flush against a </LI>.
      +            if (node.parentNode) {
      +              node.parentNode.removeChild(node);
      +            }
      +          } else {
      +            for (var child = node.firstChild; child; child = child.nextSibling) {
      +              walk(child);
      +            }
      +          }
      +          break;
      +        case 3: case 4:  // Text
      +          if (isPreformatted) {
      +            var text = node.nodeValue;
      +            var match = text.match(lineBreak);
      +            if (match) {
      +              var firstLine = text.substring(0, match.index);
      +              node.nodeValue = firstLine;
      +              var tail = text.substring(match.index + match[0].length);
      +              if (tail) {
      +                var parent = node.parentNode;
      +                parent.insertBefore(
      +                    document.createTextNode(tail), node.nextSibling);
      +              }
      +              breakAfter(node);
      +              if (!firstLine) {
      +                // Don't leave blank text nodes in the DOM.
      +                node.parentNode.removeChild(node);
      +              }
      +            }
      +          }
      +          break;
      +      }
      +    }
      +  
      +    // Split a line after the given node.
      +    function breakAfter(lineEndNode) {
      +      // If there's nothing to the right, then we can skip ending the line
      +      // here, and move root-wards since splitting just before an end-tag
      +      // would require us to create a bunch of empty copies.
      +      while (!lineEndNode.nextSibling) {
      +        lineEndNode = lineEndNode.parentNode;
      +        if (!lineEndNode) { return; }
      +      }
      +  
      +      function breakLeftOf(limit, copy) {
      +        // Clone shallowly if this node needs to be on both sides of the break.
      +        var rightSide = copy ? limit.cloneNode(false) : limit;
      +        var parent = limit.parentNode;
      +        if (parent) {
      +          // We clone the parent chain.
      +          // This helps us resurrect important styling elements that cross lines.
      +          // E.g. in <i>Foo<br>Bar</i>
      +          // should be rewritten to <li><i>Foo</i></li><li><i>Bar</i></li>.
      +          var parentClone = breakLeftOf(parent, 1);
      +          // Move the clone and everything to the right of the original
      +          // onto the cloned parent.
      +          var next = limit.nextSibling;
      +          parentClone.appendChild(rightSide);
      +          for (var sibling = next; sibling; sibling = next) {
      +            next = sibling.nextSibling;
      +            parentClone.appendChild(sibling);
      +          }
      +        }
      +        return rightSide;
      +      }
      +  
      +      var copiedListItem = breakLeftOf(lineEndNode.nextSibling, 0);
      +  
      +      // Walk the parent chain until we reach an unattached LI.
      +      for (var parent;
      +           // Check nodeType since IE invents document fragments.
      +           (parent = copiedListItem.parentNode) && parent.nodeType === 1;) {
      +        copiedListItem = parent;
      +      }
      +      // Put it on the list of lines for later processing.
      +      listItems.push(copiedListItem);
      +    }
      +  
      +    // Split lines while there are lines left to split.
      +    for (var i = 0;  // Number of lines that have been split so far.
      +         i < listItems.length;  // length updated by breakAfter calls.
      +         ++i) {
      +      walk(listItems[i]);
      +    }
      +  
      +    // Make sure numeric indices show correctly.
      +    if (opt_startLineNum === (opt_startLineNum|0)) {
      +      listItems[0].setAttribute('value', opt_startLineNum);
      +    }
      +  
      +    var ol = document.createElement('OL');
      +    ol.className = 'linenums';
      +    var offset = Math.max(0, ((opt_startLineNum - 1 /* zero index */)) | 0) || 0;
      +    for (var i = 0, n = listItems.length; i < n; ++i) {
      +      li = listItems[i];
      +      // Stick a class on the LIs so that stylesheets can
      +      // color odd/even rows, or any other row pattern that
      +      // is co-prime with 10.
      +      li.className = 'L' + ((i + offset) % 10);
      +      if (!li.firstChild) {
      +        li.appendChild(document.createTextNode('\xA0'));
      +      }
      +      ol.appendChild(li);
      +    }
      +  
      +    node.appendChild(ol);
      +  }
      +
      +  /**
      +   * Breaks {@code job.source} around style boundaries in {@code job.decorations}
      +   * and modifies {@code job.sourceNode} in place.
      +   * @param {Object} job like <pre>{
      +   *    source: {string} source as plain text,
      +   *    spans: {Array.<number|Node>} alternating span start indices into source
      +   *       and the text node or element (e.g. {@code <BR>}) corresponding to that
      +   *       span.
      +   *    decorations: {Array.<number|string} an array of style classes preceded
      +   *       by the position at which they start in job.source in order
      +   * }</pre>
      +   * @private
      +   */
      +  function recombineTagsAndDecorations(job) {
      +    var isIE = /\bMSIE\b/.test(navigator.userAgent);
      +    var newlineRe = /\n/g;
      +  
      +    var source = job.source;
      +    var sourceLength = source.length;
      +    // Index into source after the last code-unit recombined.
      +    var sourceIndex = 0;
      +  
      +    var spans = job.spans;
      +    var nSpans = spans.length;
      +    // Index into spans after the last span which ends at or before sourceIndex.
      +    var spanIndex = 0;
      +  
      +    var decorations = job.decorations;
      +    var nDecorations = decorations.length;
      +    // Index into decorations after the last decoration which ends at or before sourceIndex.
      +    var decorationIndex = 0;
      +  
      +    // Simplify decorations.
      +    var decPos = 0;
      +    for (var i = 0; i < nDecorations;) {
      +      // Skip over any zero-length decorations.
      +      var startPos = decorations[i];
      +      var start = i;
      +      while (start + 2 < nDecorations && decorations[start + 2] === startPos) {
      +        start += 2;
      +      }
      +      // Conflate all adjacent decorations that use the same style.
      +      var startDec = decorations[start + 1];
      +      var end = start + 2;
      +      while (end + 2 <= nDecorations
      +             && (decorations[end + 1] === startDec
      +                 || decorations[end] === decorations[end + 2])) {
      +        end += 2;
      +      }
      +      decorations[decPos++] = startPos;
      +      decorations[decPos++] = startDec;
      +      i = end;
      +    }
      +  
      +    // Strip any zero-length decoration at the end.
      +    if (decPos && decorations[decPos - 2] === sourceLength) { decPos -= 2; }
      +    nDecorations = decorations.length = decPos;
      +  
      +    var decoration = null;
      +    while (spanIndex < nSpans) {
      +      var spanStart = spans[spanIndex];
      +      var spanEnd = spans[spanIndex + 2] || sourceLength;
      +  
      +      var decStart = decorations[decorationIndex];
      +      var decEnd = decorations[decorationIndex + 2] || sourceLength;
      +  
      +      var end = Math.min(spanEnd, decEnd);
      +  
      +      var textNode = spans[spanIndex + 1];
      +      if (textNode.nodeType !== 1) {  // Don't muck with <BR>s or <LI>s
      +        var styledText = source.substring(sourceIndex, end);
      +        // This may seem bizarre, and it is.  Emitting LF on IE causes the
      +        // code to display with spaces instead of line breaks.
      +        // Emitting Windows standard issue linebreaks (CRLF) causes a blank
      +        // space to appear at the beginning of every line but the first.
      +        // Emitting an old Mac OS 9 line separator makes everything spiffy.
      +        if (isIE) { styledText = styledText.replace(newlineRe, '\r'); }
      +        textNode.nodeValue = styledText;
      +        var document = textNode.ownerDocument;
      +        var span = document.createElement('SPAN');
      +        span.className = decorations[decorationIndex + 1];
      +        var parentNode = textNode.parentNode;
      +        parentNode.replaceChild(span, textNode);
      +        span.appendChild(textNode);
      +        if (sourceIndex < spanEnd) {  // Split off a text node.
      +          spans[spanIndex + 1] = textNode
      +              // TODO: Possibly optimize by using '' if there's no flicker.
      +              = document.createTextNode(source.substring(end, spanEnd));
      +          parentNode.insertBefore(textNode, span.nextSibling);
      +        }
      +      }
      +  
      +      sourceIndex = end;
      +  
      +      if (sourceIndex >= spanEnd) {
      +        spanIndex += 2;
      +      }
      +      if (sourceIndex >= decEnd) {
      +        decorationIndex += 2;
      +      }
      +    }
      +  }
      +
      +
      +  /** Maps language-specific file extensions to handlers. */
      +  var langHandlerRegistry = {};
      +  /** Register a language handler for the given file extensions.
      +    * @param {function (Object)} handler a function from source code to a list
      +    *      of decorations.  Takes a single argument job which describes the
      +    *      state of the computation.   The single parameter has the form
      +    *      {@code {
      +    *        source: {string} as plain text.
      +    *        decorations: {Array.<number|string>} an array of style classes
      +    *                     preceded by the position at which they start in
      +    *                     job.source in order.
      +    *                     The language handler should assigned this field.
      +    *        basePos: {int} the position of source in the larger source chunk.
      +    *                 All positions in the output decorations array are relative
      +    *                 to the larger source chunk.
      +    *      } }
      +    * @param {Array.<string>} fileExtensions
      +    */
      +  function registerLangHandler(handler, fileExtensions) {
      +    for (var i = fileExtensions.length; --i >= 0;) {
      +      var ext = fileExtensions[i];
      +      if (!langHandlerRegistry.hasOwnProperty(ext)) {
      +        langHandlerRegistry[ext] = handler;
      +      } else if ('console' in window) {
      +        console['warn']('cannot override language handler %s', ext);
      +      }
      +    }
      +  }
      +  function langHandlerForExtension(extension, source) {
      +    if (!(extension && langHandlerRegistry.hasOwnProperty(extension))) {
      +      // Treat it as markup if the first non whitespace character is a < and
      +      // the last non-whitespace character is a >.
      +      extension = /^\s*</.test(source)
      +          ? 'default-markup'
      +          : 'default-code';
      +    }
      +    return langHandlerRegistry[extension];
      +  }
      +  registerLangHandler(decorateSource, ['default-code']);
      +  registerLangHandler(
      +      createSimpleLexer(
      +          [],
      +          [
      +           [PR_PLAIN,       /^[^<?]+/],
      +           [PR_DECLARATION, /^<!\w[^>]*(?:>|$)/],
      +           [PR_COMMENT,     /^<\!--[\s\S]*?(?:-\->|$)/],
      +           // Unescaped content in an unknown language
      +           ['lang-',        /^<\?([\s\S]+?)(?:\?>|$)/],
      +           ['lang-',        /^<%([\s\S]+?)(?:%>|$)/],
      +           [PR_PUNCTUATION, /^(?:<[%?]|[%?]>)/],
      +           ['lang-',        /^<xmp\b[^>]*>([\s\S]+?)<\/xmp\b[^>]*>/i],
      +           // Unescaped content in javascript.  (Or possibly vbscript).
      +           ['lang-js',      /^<script\b[^>]*>([\s\S]*?)(<\/script\b[^>]*>)/i],
      +           // Contains unescaped stylesheet content
      +           ['lang-css',     /^<style\b[^>]*>([\s\S]*?)(<\/style\b[^>]*>)/i],
      +           ['lang-in.tag',  /^(<\/?[a-z][^<>]*>)/i]
      +          ]),
      +      ['default-markup', 'htm', 'html', 'mxml', 'xhtml', 'xml', 'xsl']);
      +  registerLangHandler(
      +      createSimpleLexer(
      +          [
      +           [PR_PLAIN,        /^[\s]+/, null, ' \t\r\n'],
      +           [PR_ATTRIB_VALUE, /^(?:\"[^\"]*\"?|\'[^\']*\'?)/, null, '\"\'']
      +           ],
      +          [
      +           [PR_TAG,          /^^<\/?[a-z](?:[\w.:-]*\w)?|\/?>$/i],
      +           [PR_ATTRIB_NAME,  /^(?!style[\s=]|on)[a-z](?:[\w:-]*\w)?/i],
      +           ['lang-uq.val',   /^=\s*([^>\'\"\s]*(?:[^>\'\"\s\/]|\/(?=\s)))/],
      +           [PR_PUNCTUATION,  /^[=<>\/]+/],
      +           ['lang-js',       /^on\w+\s*=\s*\"([^\"]+)\"/i],
      +           ['lang-js',       /^on\w+\s*=\s*\'([^\']+)\'/i],
      +           ['lang-js',       /^on\w+\s*=\s*([^\"\'>\s]+)/i],
      +           ['lang-css',      /^style\s*=\s*\"([^\"]+)\"/i],
      +           ['lang-css',      /^style\s*=\s*\'([^\']+)\'/i],
      +           ['lang-css',      /^style\s*=\s*([^\"\'>\s]+)/i]
      +           ]),
      +      ['in.tag']);
      +  registerLangHandler(
      +      createSimpleLexer([], [[PR_ATTRIB_VALUE, /^[\s\S]+/]]), ['uq.val']);
      +  registerLangHandler(sourceDecorator({
      +          'keywords': CPP_KEYWORDS,
      +          'hashComments': true,
      +          'cStyleComments': true
      +        }), ['c', 'cc', 'cpp', 'cxx', 'cyc', 'm']);
      +  registerLangHandler(sourceDecorator({
      +          'keywords': 'null true false'
      +        }), ['json']);
      +  registerLangHandler(sourceDecorator({
      +          'keywords': CSHARP_KEYWORDS,
      +          'hashComments': true,
      +          'cStyleComments': true,
      +          'verbatimStrings': true
      +        }), ['cs']);
      +  registerLangHandler(sourceDecorator({
      +          'keywords': JAVA_KEYWORDS,
      +          'cStyleComments': true
      +        }), ['java']);
      +  registerLangHandler(sourceDecorator({
      +          'keywords': SH_KEYWORDS,
      +          'hashComments': true,
      +          'multiLineStrings': true
      +        }), ['bsh', 'csh', 'sh']);
      +  registerLangHandler(sourceDecorator({
      +          'keywords': PYTHON_KEYWORDS,
      +          'hashComments': true,
      +          'multiLineStrings': true,
      +          'tripleQuotedStrings': true
      +        }), ['cv', 'py']);
      +  registerLangHandler(sourceDecorator({
      +          'keywords': PERL_KEYWORDS,
      +          'hashComments': true,
      +          'multiLineStrings': true,
      +          'regexLiterals': true
      +        }), ['perl', 'pl', 'pm']);
      +  registerLangHandler(sourceDecorator({
      +          'keywords': RUBY_KEYWORDS,
      +          'hashComments': true,
      +          'multiLineStrings': true,
      +          'regexLiterals': true
      +        }), ['rb']);
      +  registerLangHandler(sourceDecorator({
      +          'keywords': JSCRIPT_KEYWORDS,
      +          'cStyleComments': true,
      +          'regexLiterals': true
      +        }), ['js']);
      +  registerLangHandler(sourceDecorator({
      +          'keywords': COFFEE_KEYWORDS,
      +          'hashComments': 3,  // ### style block comments
      +          'cStyleComments': true,
      +          'multilineStrings': true,
      +          'tripleQuotedStrings': true,
      +          'regexLiterals': true
      +        }), ['coffee']);
      +  registerLangHandler(createSimpleLexer([], [[PR_STRING, /^[\s\S]+/]]), ['regex']);
      +
      +  function applyDecorator(job) {
      +    var opt_langExtension = job.langExtension;
      +
      +    try {
      +      // Extract tags, and convert the source code to plain text.
      +      var sourceAndSpans = extractSourceSpans(job.sourceNode);
      +      /** Plain text. @type {string} */
      +      var source = sourceAndSpans.source;
      +      job.source = source;
      +      job.spans = sourceAndSpans.spans;
      +      job.basePos = 0;
      +
      +      // Apply the appropriate language handler
      +      langHandlerForExtension(opt_langExtension, source)(job);
      +
      +      // Integrate the decorations and tags back into the source code,
      +      // modifying the sourceNode in place.
      +      recombineTagsAndDecorations(job);
      +    } catch (e) {
      +      if ('console' in window) {
      +        console['log'](e && e['stack'] ? e['stack'] : e);
      +      }
      +    }
      +  }
      +
      +  /**
      +   * @param sourceCodeHtml {string} The HTML to pretty print.
      +   * @param opt_langExtension {string} The language name to use.
      +   *     Typically, a filename extension like 'cpp' or 'java'.
      +   * @param opt_numberLines {number|boolean} True to number lines,
      +   *     or the 1-indexed number of the first line in sourceCodeHtml.
      +   */
      +  function prettyPrintOne(sourceCodeHtml, opt_langExtension, opt_numberLines) {
      +    var container = document.createElement('PRE');
      +    // This could cause images to load and onload listeners to fire.
      +    // E.g. <img onerror="alert(1337)" src="nosuchimage.png">.
      +    // We assume that the inner HTML is from a trusted source.
      +    container.innerHTML = sourceCodeHtml;
      +    if (opt_numberLines) {
      +      numberLines(container, opt_numberLines);
      +    }
      +
      +    var job = {
      +      langExtension: opt_langExtension,
      +      numberLines: opt_numberLines,
      +      sourceNode: container
      +    };
      +    applyDecorator(job);
      +    return container.innerHTML;
      +  }
      +
      +  function prettyPrint(opt_whenDone) {
      +    function byTagName(tn) { return document.getElementsByTagName(tn); }
      +    // fetch a list of nodes to rewrite
      +    var codeSegments = [byTagName('pre'), byTagName('code'), byTagName('xmp')];
      +    var elements = [];
      +    for (var i = 0; i < codeSegments.length; ++i) {
      +      for (var j = 0, n = codeSegments[i].length; j < n; ++j) {
      +        elements.push(codeSegments[i][j]);
      +      }
      +    }
      +    codeSegments = null;
      +
      +    var clock = Date;
      +    if (!clock['now']) {
      +      clock = { 'now': function () { return (new Date).getTime(); } };
      +    }
      +
      +    // The loop is broken into a series of continuations to make sure that we
      +    // don't make the browser unresponsive when rewriting a large page.
      +    var k = 0;
      +    var prettyPrintingJob;
      +
      +    function doWork() {
      +      var endTime = (window['PR_SHOULD_USE_CONTINUATION'] ?
      +                     clock.now() + 250 /* ms */ :
      +                     Infinity);
      +      for (; k < elements.length && clock.now() < endTime; k++) {
      +        var cs = elements[k];
      +        if (cs.className && cs.className.indexOf('prettyprint') >= 0) {
      +          // If the classes includes a language extensions, use it.
      +          // Language extensions can be specified like
      +          //     <pre class="prettyprint lang-cpp">
      +          // the language extension "cpp" is used to find a language handler as
      +          // passed to PR.registerLangHandler.
      +          var langExtension = cs.className.match(/\blang-(\w+)\b/);
      +          if (langExtension) { langExtension = langExtension[1]; }
      +
      +          // make sure this is not nested in an already prettified element
      +          var nested = false;
      +          for (var p = cs.parentNode; p; p = p.parentNode) {
      +            if ((p.tagName === 'pre' || p.tagName === 'code' ||
      +                 p.tagName === 'xmp') &&
      +                p.className && p.className.indexOf('prettyprint') >= 0) {
      +              nested = true;
      +              break;
      +            }
      +          }
      +          if (!nested) {
      +            // Look for a class like linenums or linenums:<n> where <n> is the
      +            // 1-indexed number of the first line.
      +            var lineNums = cs.className.match(/\blinenums\b(?::(\d+))?/);
      +            lineNums = lineNums
      +                  ? lineNums[1] && lineNums[1].length ? +lineNums[1] : true
      +                  : false;
      +            if (lineNums) { numberLines(cs, lineNums); }
      +
      +            // do the pretty printing
      +            prettyPrintingJob = {
      +              langExtension: langExtension,
      +              sourceNode: cs,
      +              numberLines: lineNums
      +            };
      +            applyDecorator(prettyPrintingJob);
      +          }
      +        }
      +      }
      +      if (k < elements.length) {
      +        // finish up in a continuation
      +        setTimeout(doWork, 250);
      +      } else if (opt_whenDone) {
      +        opt_whenDone();
      +      }
      +    }
      +
      +    doWork();
      +  }
      +
      +  window['prettyPrintOne'] = prettyPrintOne;
      +  window['prettyPrint'] = prettyPrint;
      +  window['PR'] = {
      +        'createSimpleLexer': createSimpleLexer,
      +        'registerLangHandler': registerLangHandler,
      +        'sourceDecorator': sourceDecorator,
      +        'PR_ATTRIB_NAME': PR_ATTRIB_NAME,
      +        'PR_ATTRIB_VALUE': PR_ATTRIB_VALUE,
      +        'PR_COMMENT': PR_COMMENT,
      +        'PR_DECLARATION': PR_DECLARATION,
      +        'PR_KEYWORD': PR_KEYWORD,
      +        'PR_LITERAL': PR_LITERAL,
      +        'PR_NOCODE': PR_NOCODE,
      +        'PR_PLAIN': PR_PLAIN,
      +        'PR_PUNCTUATION': PR_PUNCTUATION,
      +        'PR_SOURCE': PR_SOURCE,
      +        'PR_STRING': PR_STRING,
      +        'PR_TAG': PR_TAG,
      +        'PR_TYPE': PR_TYPE
      +      };
      +})();
      diff --git a/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/repl.png b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/repl.png
      new file mode 100644
      index 00000000..81ed94e2
      Binary files /dev/null and b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/repl.png differ
      diff --git a/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/slides.js b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/slides.js
      new file mode 100644
      index 00000000..7f8ce9bf
      --- /dev/null
      +++ b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/slides.js
      @@ -0,0 +1,607 @@
      +/*
      +  Google I/O 2011 HTML slides template
      +
      +  Authors: Luke Mahé (code)
      +           Marcin Wichary (code and design)
      +           Dominic Mazzoni (browser compatibility)
      +           Charles Chen (ChromeVox support)
      +
      +  URL: http://code.google.com/p/io-2011-slides/
      +*/
      +
      +//var PERMANENT_URL_PREFIX = 'http://io-2011-slides.googlecode.com/svn/trunk/';
      +var PERMANENT_URL_PREFIX = './';
      +
      +var SLIDE_CLASSES = ['far-past', 'past', 'current', 'next', 'far-next'];
      +
      +var PM_TOUCH_SENSITIVITY = 15;
      +
      +var curSlide;
      +
      +/* ---------------------------------------------------------------------- */
      +/* classList polyfill by Eli Grey 
      + * (http://purl.eligrey.com/github/classList.js/blob/master/classList.js) */
      +
      +if (typeof document !== "undefined" && !("classList" in document.createElement("a"))) {
      +
      +(function (view) {
      +
      +var
      +    classListProp = "classList"
      +  , protoProp = "prototype"
      +  , elemCtrProto = (view.HTMLElement || view.Element)[protoProp]
      +  , objCtr = Object
      +    strTrim = String[protoProp].trim || function () {
      +    return this.replace(/^\s+|\s+$/g, "");
      +  }
      +  , arrIndexOf = Array[protoProp].indexOf || function (item) {
      +    for (var i = 0, len = this.length; i < len; i++) {
      +      if (i in this && this[i] === item) {
      +        return i;
      +      }
      +    }
      +    return -1;
      +  }
      +  // Vendors: please allow content code to instantiate DOMExceptions
      +  , DOMEx = function (type, message) {
      +    this.name = type;
      +    this.code = DOMException[type];
      +    this.message = message;
      +  }
      +  , checkTokenAndGetIndex = function (classList, token) {
      +    if (token === "") {
      +      throw new DOMEx(
      +          "SYNTAX_ERR"
      +        , "An invalid or illegal string was specified"
      +      );
      +    }
      +    if (/\s/.test(token)) {
      +      throw new DOMEx(
      +          "INVALID_CHARACTER_ERR"
      +        , "String contains an invalid character"
      +      );
      +    }
      +    return arrIndexOf.call(classList, token);
      +  }
      +  , ClassList = function (elem) {
      +    var
      +        trimmedClasses = strTrim.call(elem.className)
      +      , classes = trimmedClasses ? trimmedClasses.split(/\s+/) : []
      +    ;
      +    for (var i = 0, len = classes.length; i < len; i++) {
      +      this.push(classes[i]);
      +    }
      +    this._updateClassName = function () {
      +      elem.className = this.toString();
      +    };
      +  }
      +  , classListProto = ClassList[protoProp] = []
      +  , classListGetter = function () {
      +    return new ClassList(this);
      +  }
      +;
      +// Most DOMException implementations don't allow calling DOMException's toString()
      +// on non-DOMExceptions. Error's toString() is sufficient here.
      +DOMEx[protoProp] = Error[protoProp];
      +classListProto.item = function (i) {
      +  return this[i] || null;
      +};
      +classListProto.contains = function (token) {
      +  token += "";
      +  return checkTokenAndGetIndex(this, token) !== -1;
      +};
      +classListProto.add = function (token) {
      +  token += "";
      +  if (checkTokenAndGetIndex(this, token) === -1) {
      +    this.push(token);
      +    this._updateClassName();
      +  }
      +};
      +classListProto.remove = function (token) {
      +  token += "";
      +  var index = checkTokenAndGetIndex(this, token);
      +  if (index !== -1) {
      +    this.splice(index, 1);
      +    this._updateClassName();
      +  }
      +};
      +classListProto.toggle = function (token) {
      +  token += "";
      +  if (checkTokenAndGetIndex(this, token) === -1) {
      +    this.add(token);
      +  } else {
      +    this.remove(token);
      +  }
      +};
      +classListProto.toString = function () {
      +  return this.join(" ");
      +};
      +
      +if (objCtr.defineProperty) {
      +  var classListPropDesc = {
      +      get: classListGetter
      +    , enumerable: true
      +    , configurable: true
      +  };
      +  try {
      +    objCtr.defineProperty(elemCtrProto, classListProp, classListPropDesc);
      +  } catch (ex) { // IE 8 doesn't support enumerable:true
      +    if (ex.number === -0x7FF5EC54) {
      +      classListPropDesc.enumerable = false;
      +      objCtr.defineProperty(elemCtrProto, classListProp, classListPropDesc);
      +    }
      +  }
      +} else if (objCtr[protoProp].__defineGetter__) {
      +  elemCtrProto.__defineGetter__(classListProp, classListGetter);
      +}
      +
      +}(self));
      +
      +}
      +/* ---------------------------------------------------------------------- */
      +
      +/* Slide movement */
      +
      +function getSlideEl(no) {
      +  if ((no < 0) || (no >= slideEls.length)) { 
      +    return null;
      +  } else {
      +    return slideEls[no];
      +  }
      +};
      +
      +function updateSlideClass(slideNo, className) {
      +  var el = getSlideEl(slideNo);
      +  
      +  if (!el) {
      +    return;
      +  }
      +  
      +  if (className) {
      +    el.classList.add(className);
      +  }
      +    
      +  for (var i in SLIDE_CLASSES) {
      +    if (className != SLIDE_CLASSES[i]) {
      +      el.classList.remove(SLIDE_CLASSES[i]);
      +    }
      +  }
      +};
      +
      +function updateSlides() {
      +  for (var i = 0; i < slideEls.length; i++) {
      +    switch (i) {
      +      case curSlide - 2:
      +        updateSlideClass(i, 'far-past');
      +        break;
      +      case curSlide - 1:
      +        updateSlideClass(i, 'past');
      +        break;
      +      case curSlide: 
      +        updateSlideClass(i, 'current');
      +        break;
      +      case curSlide + 1:
      +        updateSlideClass(i, 'next');      
      +        break;
      +      case curSlide + 2:
      +        updateSlideClass(i, 'far-next');      
      +        break;
      +      default:
      +        updateSlideClass(i);
      +        break;
      +    }
      +  }
      +
      +  triggerLeaveEvent(curSlide - 1);
      +  triggerEnterEvent(curSlide);
      +
      +  window.setTimeout(function() {
      +    // Hide after the slide
      +    disableSlideFrames(curSlide - 2);
      +  }, 301);
      +
      +  enableSlideFrames(curSlide - 1);
      +  enableSlideFrames(curSlide + 2);
      +  
      +  if (isChromeVoxActive()) {
      +    speakAndSyncToNode(slideEls[curSlide]);
      +  }  
      +
      +  updateHash();
      +};
      +
      +function buildNextItem() {
      +  var toBuild  = slideEls[curSlide].querySelectorAll('.to-build');
      +
      +  if (!toBuild.length) {
      +    return false;
      +  }
      +
      +  toBuild[0].classList.remove('to-build', '');
      +
      +  if (isChromeVoxActive()) {
      +    speakAndSyncToNode(toBuild[0]);
      +  }
      +
      +  return true;
      +};
      +
      +function prevSlide() {
      +  if (curSlide > 0) {
      +    curSlide--;
      +
      +    updateSlides();
      +  }
      +};
      +
      +function nextSlide() {
      +  if (buildNextItem()) {
      +    return;
      +  }
      +
      +  if (curSlide < slideEls.length - 1) {
      +    curSlide++;
      +
      +    updateSlides();
      +  }
      +};
      +
      +/* Slide events */
      +
      +function triggerEnterEvent(no) {
      +  var el = getSlideEl(no);
      +  if (!el) {
      +    return;
      +  }
      +
      +  var onEnter = el.getAttribute('onslideenter');
      +  if (onEnter) {
      +    new Function(onEnter).call(el);
      +  }
      +
      +  var evt = document.createEvent('Event');
      +  evt.initEvent('slideenter', true, true);
      +  evt.slideNumber = no + 1; // Make it readable
      +
      +  el.dispatchEvent(evt);
      +};
      +
      +function triggerLeaveEvent(no) {
      +  var el = getSlideEl(no);
      +  if (!el) {
      +    return;
      +  }
      +
      +  var onLeave = el.getAttribute('onslideleave');
      +  if (onLeave) {
      +    new Function(onLeave).call(el);
      +  }
      +
      +  var evt = document.createEvent('Event');
      +  evt.initEvent('slideleave', true, true);
      +  evt.slideNumber = no + 1; // Make it readable
      +  
      +  el.dispatchEvent(evt);
      +};
      +
      +/* Touch events */
      +
      +function handleTouchStart(event) {
      +  if (event.touches.length == 1) {
      +    touchDX = 0;
      +    touchDY = 0;
      +
      +    touchStartX = event.touches[0].pageX;
      +    touchStartY = event.touches[0].pageY;
      +
      +    document.body.addEventListener('touchmove', handleTouchMove, true);
      +    document.body.addEventListener('touchend', handleTouchEnd, true);
      +  }
      +};
      +
      +function handleTouchMove(event) {
      +  if (event.touches.length > 1) {
      +    cancelTouch();
      +  } else {
      +    touchDX = event.touches[0].pageX - touchStartX;
      +    touchDY = event.touches[0].pageY - touchStartY;
      +  }
      +};
      +
      +function handleTouchEnd(event) {
      +  var dx = Math.abs(touchDX);
      +  var dy = Math.abs(touchDY);
      +
      +  if ((dx > PM_TOUCH_SENSITIVITY) && (dy < (dx * 2 / 3))) {
      +    if (touchDX > 0) {
      +      prevSlide();
      +    } else {
      +      nextSlide();
      +    }
      +  }
      +  
      +  cancelTouch();
      +};
      +
      +function cancelTouch() {
      +  document.body.removeEventListener('touchmove', handleTouchMove, true);
      +  document.body.removeEventListener('touchend', handleTouchEnd, true);  
      +};
      +
      +/* Preloading frames */
      +
      +function disableSlideFrames(no) {
      +  var el = getSlideEl(no);
      +  if (!el) {
      +    return;
      +  }
      +
      +  var frames = el.getElementsByTagName('iframe');
      +  for (var i = 0, frame; frame = frames[i]; i++) {
      +    disableFrame(frame);
      +  }
      +};
      +
      +function enableSlideFrames(no) {
      +  var el = getSlideEl(no);
      +  if (!el) {
      +    return;
      +  }
      +
      +  var frames = el.getElementsByTagName('iframe');
      +  for (var i = 0, frame; frame = frames[i]; i++) {
      +    enableFrame(frame);
      +  }
      +};
      +
      +function disableFrame(frame) {
      +  frame.src = 'about:blank';
      +};
      +
      +function enableFrame(frame) {
      +  var src = frame._src;
      +
      +  if (frame.src != src && src != 'about:blank') {
      +    frame.src = src;
      +  }
      +};
      +
      +function setupFrames() {
      +  var frames = document.querySelectorAll('iframe');
      +  for (var i = 0, frame; frame = frames[i]; i++) {
      +    frame._src = frame.src;
      +    disableFrame(frame);
      +  }
      +  
      +  enableSlideFrames(curSlide);
      +  enableSlideFrames(curSlide + 1);
      +  enableSlideFrames(curSlide + 2);  
      +};
      +
      +function setupInteraction() {
      +  /* Clicking and tapping */
      +  
      +  var el = document.createElement('div');
      +  el.className = 'slide-area';
      +  el.id = 'prev-slide-area';  
      +  el.addEventListener('click', prevSlide, false);
      +  document.querySelector('section.slides').appendChild(el);
      +
      +  var el = document.createElement('div');
      +  el.className = 'slide-area';
      +  el.id = 'next-slide-area';  
      +  el.addEventListener('click', nextSlide, false);
      +  document.querySelector('section.slides').appendChild(el);  
      +  
      +  /* Swiping */
      +  
      +  document.body.addEventListener('touchstart', handleTouchStart, false);
      +}
      +
      +/* ChromeVox support */
      +
      +function isChromeVoxActive() {
      +  if (typeof(cvox) == 'undefined') {
      +    return false;
      +  } else {
      +    return true;
      +  }
      +};
      +
      +function speakAndSyncToNode(node) {
      +  if (!isChromeVoxActive()) {
      +    return;
      +  }
      +  
      +  cvox.ChromeVox.navigationManager.switchToStrategy(
      +      cvox.ChromeVoxNavigationManager.STRATEGIES.LINEARDOM, 0, true);  
      +  cvox.ChromeVox.navigationManager.syncToNode(node);
      +  cvox.ChromeVoxUserCommands.finishNavCommand('');
      +  var target = node;
      +  while (target.firstChild) {
      +    target = target.firstChild;
      +  }
      +  cvox.ChromeVox.navigationManager.syncToNode(target);
      +};
      +
      +function speakNextItem() {
      +  if (!isChromeVoxActive()) {
      +    return;
      +  }
      +  
      +  cvox.ChromeVox.navigationManager.switchToStrategy(
      +      cvox.ChromeVoxNavigationManager.STRATEGIES.LINEARDOM, 0, true);
      +  cvox.ChromeVox.navigationManager.next(true);
      +  if (!cvox.DomUtil.isDescendantOfNode(
      +      cvox.ChromeVox.navigationManager.getCurrentNode(), slideEls[curSlide])){
      +    var target = slideEls[curSlide];
      +    while (target.firstChild) {
      +      target = target.firstChild;
      +    }
      +    cvox.ChromeVox.navigationManager.syncToNode(target);
      +    cvox.ChromeVox.navigationManager.next(true);
      +  }
      +  cvox.ChromeVoxUserCommands.finishNavCommand('');
      +};
      +
      +function speakPrevItem() {
      +  if (!isChromeVoxActive()) {
      +    return;
      +  }
      +  
      +  cvox.ChromeVox.navigationManager.switchToStrategy(
      +      cvox.ChromeVoxNavigationManager.STRATEGIES.LINEARDOM, 0, true);
      +  cvox.ChromeVox.navigationManager.previous(true);
      +  if (!cvox.DomUtil.isDescendantOfNode(
      +      cvox.ChromeVox.navigationManager.getCurrentNode(), slideEls[curSlide])){
      +    var target = slideEls[curSlide];
      +    while (target.lastChild){
      +      target = target.lastChild;
      +    }
      +    cvox.ChromeVox.navigationManager.syncToNode(target);
      +    cvox.ChromeVox.navigationManager.previous(true);
      +  }
      +  cvox.ChromeVoxUserCommands.finishNavCommand('');
      +};
      +
      +/* Hash functions */
      +
      +function getCurSlideFromHash() {
      +  var slideNo = parseInt(location.hash.substr(1));
      +
      +  if (slideNo) {
      +    curSlide = slideNo - 1;
      +  } else {
      +    curSlide = 0;
      +  }
      +};
      +
      +function updateHash() {
      +  location.replace('#' + (curSlide + 1));
      +};
      +
      +/* Event listeners */
      +
      +function handleBodyKeyDown(event) {
      +  switch (event.keyCode) {
      +    case 39: // right arrow
      +    case 13: // Enter
      +    case 32: // space
      +    case 34: // PgDn
      +      nextSlide();
      +      event.preventDefault();
      +      break;
      +
      +    case 37: // left arrow
      +    case 8: // Backspace
      +    case 33: // PgUp
      +      prevSlide();
      +      event.preventDefault();
      +      break;
      +
      +    case 40: // down arrow
      +      if (isChromeVoxActive()) {
      +        speakNextItem();
      +      } else {
      +        nextSlide();
      +      }
      +      event.preventDefault();
      +      break;
      +
      +    case 38: // up arrow
      +      if (isChromeVoxActive()) {
      +        speakPrevItem();
      +      } else {
      +        prevSlide();
      +      }
      +      event.preventDefault();
      +      break;
      +  }
      +};
      +
      +function addEventListeners() {
      +  document.addEventListener('keydown', handleBodyKeyDown, false);  
      +};
      +
      +/* Initialization */
      +
      +function addPrettify() {
      +  var els = document.querySelectorAll('pre');
      +  for (var i = 0, el; el = els[i]; i++) {
      +    if (!el.classList.contains('noprettyprint')) {
      +      el.classList.add('prettyprint');
      +    }
      +  }
      +  
      +  var el = document.createElement('script');
      +  el.type = 'text/javascript';
      +  el.src = PERMANENT_URL_PREFIX + 'prettify.js';
      +  el.onload = function() {
      +    prettyPrint();
      +  }
      +  document.body.appendChild(el);
      +};
      +
      +function addFontStyle() {
      +  var el = document.createElement('link');
      +  el.rel = 'stylesheet';
      +  el.type = 'text/css';
      +//  el.href = 'http://fonts.googleapis.com/css?family=' +
      +//            'Open+Sans:regular,semibold,italic,italicsemibold|Droid+Sans+Mono';
      +
      +  document.body.appendChild(el);
      +};
      +
      +function addGeneralStyle() {
      +  var el = document.createElement('link');
      +  el.rel = 'stylesheet';
      +  el.type = 'text/css';
      +  el.href = PERMANENT_URL_PREFIX + 'styles.css';
      +  document.body.appendChild(el);
      +  
      +  var el = document.createElement('meta');
      +  el.name = 'viewport';
      +  el.content = 'width=1100,height=750';
      +  document.querySelector('head').appendChild(el);
      +  
      +  var el = document.createElement('meta');
      +  el.name = 'apple-mobile-web-app-capable';
      +  el.content = 'yes';
      +  document.querySelector('head').appendChild(el);
      +};
      +
      +function makeBuildLists() {
      +  for (var i = curSlide, slide; slide = slideEls[i]; i++) {
      +    var items = slide.querySelectorAll('.build > *');
      +    for (var j = 0, item; item = items[j]; j++) {
      +      if (item.classList) {
      +        item.classList.add('to-build');
      +      }
      +    }
      +  }
      +};
      +
      +function handleDomLoaded() {
      +  slideEls = document.querySelectorAll('section.slides > article');
      +
      +  addFontStyle();
      +  addGeneralStyle();
      +  addPrettify();
      +  addEventListeners();
      +
      +  updateSlides();
      +
      +  setupInteraction();
      +  setupFrames();
      +  makeBuildLists();
      +
      +  document.body.classList.add('loaded');
      +};
      +
      +function initialize() {
      +  getCurSlideFromHash();
      +
      +  document.addEventListener('DOMContentLoaded', handleDomLoaded, false);
      +}
      +
      +initialize();
      diff --git a/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/styles.css b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/styles.css
      new file mode 100755
      index 00000000..e6faeb11
      --- /dev/null
      +++ b/vendor/github.com/camlistore/camlistore/website/talks/2011-05-07-Camlistore-Sao-Paolo/styles.css
      @@ -0,0 +1,600 @@
      +/*
      +  Google I/O 2011 HTML slides template
      +
      +  Authors: Luke Mahé (code)
      +           Marcin Wichary (code and design)
      +           Dominic Mazzoni (browser compatibility)
      +           Charles Chen (ChromeVox support)
      +
      +  URL: http://code.google.com/p/io-2011-slides/
      +*/
      +
      +/* Framework */
      +
      +html {
      +  height: 100%;
      +}
      +
      +body {
      +  margin: 0;
      +  padding: 0;
      +
      +  display: block !important;
      +
      +  height: 100%;
      +  min-height: 740px;
      +  
      +  overflow-x: hidden;
      +  overflow-y: auto;
      +
      +  background: rgb(215, 215, 215);
      +  background: -o-radial-gradient(rgb(240, 240, 240), rgb(190, 190, 190));
      +  background: -moz-radial-gradient(rgb(240, 240, 240), rgb(190, 190, 190));
      +  background: -webkit-radial-gradient(rgb(240, 240, 240), rgb(190, 190, 190));
      +  background: -webkit-gradient(radial, 50% 50%, 0, 50% 50%, 500, from(rgb(240, 240, 240)), to(rgb(190, 190, 190)));
      +
      +  -webkit-font-smoothing: antialiased;
      +}
      +
      +.slides {
      +  width: 100%;
      +  height: 100%;
      +  left: 0;
      +  top: 0;
      +  
      +  position: absolute;
      +
      +  -webkit-transform: translate3d(0, 0, 0);
      +}
      +
      +.slides > article {
      +  display: block;
      +
      +  position: absolute;
      +  overflow: hidden;
      +
      +  width: 900px;
      +  height: 700px;
      +
      +  left: 50%;
      +  top: 50%;
      +
      +  margin-left: -450px;
      +  margin-top: -350px;
      +  
      +  padding: 40px 60px;
      +
      +  box-sizing: border-box;
      +  -o-box-sizing: border-box;
      +  -moz-box-sizing: border-box;
      +  -webkit-box-sizing: border-box;
      +
      +  border-radius: 10px;
      +  -o-border-radius: 10px;
      +  -moz-border-radius: 10px;
      +  -webkit-border-radius: 10px;
      +
      +  background-color: white;
      +
      +  box-shadow: 0 2px 6px rgba(0, 0, 0, .1);
      +  border: 1px solid rgba(0, 0, 0, .3);
      +
      +  transition: transform .3s ease-out;
      +  -o-transition: -o-transform .3s ease-out;
      +  -moz-transition: -moz-transform .3s ease-out;
      +  -webkit-transition: -webkit-transform .3s ease-out;
      +}
      +.slides.layout-widescreen > article {
      +  margin-left: -550px;
      +  width: 1100px;
      +}
      +.slides.layout-faux-widescreen > article {
      +  margin-left: -550px;
      +  width: 1100px;
      +  
      +  padding: 40px 160px;
      +}
      +
      +.slides > article:not(.nobackground):not(.biglogo) {
      +  background: url(images/colorbar.png) 0 600px repeat-x;
      +
      +  background-size: 100%, 225px;  
      +
      +  background-color: white;  
      +}
      +.slides.layout-widescreen > article:not(.nobackground):not(.biglogo),
      +.slides.layout-faux-widescreen > article:not(.nobackground):not(.biglogo) {
      +  background-position-x: 0, 840px;
      +}
      +
      +/* Clickable/tappable areas */
      +
      +.slide-area {
      +  z-index: 1000;
      +
      +  position: absolute;
      +  left: 0;
      +  top: 0;
      +  width: 150px;
      +  height: 700px;  
      +
      +  left: 50%;
      +  top: 50%;
      +
      +  cursor: pointer;  
      +  margin-top: -350px;  
      +  
      +  tap-highlight-color: transparent;
      +  -o-tap-highlight-color: transparent;
      +  -moz-tap-highlight-color: transparent;
      +  -webkit-tap-highlight-color: transparent;
      +}
      +#prev-slide-area {
      +  margin-left: -550px;
      +}
      +#next-slide-area {
      +  margin-left: 400px;
      +}
      +.slides.layout-widescreen #prev-slide-area,
      +.slides.layout-faux-widescreen #prev-slide-area {
      +  margin-left: -650px;
      +}
      +.slides.layout-widescreen #next-slide-area,
      +.slides.layout-faux-widescreen #next-slide-area {
      +  margin-left: 500px;
      +}
      +
      +/* Slide styles */
      +
      +article.biglogo {
      +  background: white url(images/googleio-logo.png) 50% 50% no-repeat;
      +
      +  background-size: 600px;
      +}
      +
      +/* Slides */
      +
      +.slides > article {
      +  display: none;
      +}
      +.slides > article.far-past {
      +  display: block;
      +  transform: translate(-2040px);
      +  -o-transform: translate(-2040px);
      +  -moz-transform: translate(-2040px);
      +  -webkit-transform: translate3d(-2040px, 0, 0);
      +}
      +.slides > article.past {
      +  display: block;
      +  transform: translate(-1020px);
      +  -o-transform: translate(-1020px);
      +  -moz-transform: translate(-1020px);
      +  -webkit-transform: translate3d(-1020px, 0, 0);
      +}
      +.slides > article.current {
      +  display: block;
      +  transform: translate(0);
      +  -o-transform: translate(0);
      +  -moz-transform: translate(0);
      +  -webkit-transform: translate3d(0, 0, 0);
      +}
      +.slides > article.next {
      +  display: block;
      +  transform: translate(1020px);
      +  -o-transform: translate(1020px);
      +  -moz-transform: translate(1020px);
      +  -webkit-transform: translate3d(1020px, 0, 0);
      +}
      +.slides > article.far-next {
      +  display: block;
      +  transform: translate(2040px);
      +  -o-transform: translate(2040px);
      +  -moz-transform: translate(2040px);
      +  -webkit-transform: translate3d(2040px, 0, 0);
      +}
      +
      +.slides.layout-widescreen > article.far-past,
      +.slides.layout-faux-widescreen > article.far-past {
      +  display: block;
      +  transform: translate(-2260px);
      +  -o-transform: translate(-2260px);
      +  -moz-transform: translate(-2260px);
      +  -webkit-transform: translate3d(-2260px, 0, 0);
      +}
      +.slides.layout-widescreen > article.past,
      +.slides.layout-faux-widescreen > article.past {
      +  display: block;
      +  transform: translate(-1130px);
      +  -o-transform: translate(-1130px);
      +  -moz-transform: translate(-1130px);
      +  -webkit-transform: translate3d(-1130px, 0, 0);
      +}
      +.slides.layout-widescreen > article.current,
      +.slides.layout-faux-widescreen > article.current {
      +  display: block;
      +  transform: translate(0);
      +  -o-transform: translate(0);
      +  -moz-transform: translate(0);
      +  -webkit-transform: translate3d(0, 0, 0);
      +}
      +.slides.layout-widescreen > article.next,
      +.slides.layout-faux-widescreen > article.next {
      +  display: block;
      +  transform: translate(1130px);
      +  -o-transform: translate(1130px);
      +  -moz-transform: translate(1130px);
      +  -webkit-transform: translate3d(1130px, 0, 0);
      +}
      +.slides.layout-widescreen > article.far-next,
      +.slides.layout-faux-widescreen > article.far-next {
      +  display: block;
      +  transform: translate(2260px);
      +  -o-transform: translate(2260px);
      +  -moz-transform: translate(2260px);
      +  -webkit-transform: translate3d(2260px, 0, 0);
      +}
      +
      +/* Styles for slides */
      +
      +.slides > article {
      +  font-family: 'Open Sans', Arial, sans-serif;
      +
      +  color: rgb(102, 102, 102);
      +  text-shadow: 0 1px 1px rgba(0, 0, 0, .1);
      +
      +  font-size: 30px;
      +  line-height: 36px;
      +
      +  letter-spacing: -1px;
      +}
      +
      +b {
      +  font-weight: 600;
      +}
      +
      +.blue {
      +  color: rgb(0, 102, 204);
      +}
      +.yellow {
      +  color: rgb(255, 211, 25);
      +}
      +.green {
      +  color: rgb(0, 138, 53);
      +}
      +.red {
      +  color: rgb(255, 0, 0);
      +}
      +.black {
      +  color: black;
      +}
      +.white {
      +  color: white;
      +}
      +
      +a {
      +  color: rgb(0, 102, 204);
      +}
      +a:visited {
      +  color: rgba(0, 102, 204, .75);
      +}
      +a:hover {
      +  color: black;
      +}
      +
      +p {
      +  margin: 0;
      +  padding: 0;
      +
      +  margin-top: 20px;
      +}
      +p:first-child {
      +  margin-top: 0;
      +}
      +
      +h1 {
      +  font-size: 60px;
      +  line-height: 60px;
      +
      +  padding: 0;
      +  margin: 0;
      +  margin-top: 200px;
      +  padding-right: 40px;
      +
      +  font-weight: 600;
      +
      +  letter-spacing: -3px;
      +
      +  color: rgb(51, 51, 51);
      +}
      +
      +h2 {
      +  font-size: 45px;
      +  line-height: 45px;
      +
      +  position: absolute;
      +  bottom: 150px;
      +
      +  padding: 0;
      +  margin: 0;
      +  padding-right: 40px;
      +
      +  font-weight: 600;
      +
      +  letter-spacing: -2px;
      +
      +  color: rgb(51, 51, 51);
      +}
      +
      +h3 {
      +  font-size: 30px;
      +  line-height: 36px;
      +
      +  padding: 0;
      +  margin: 0;
      +  padding-right: 40px;
      +
      +  font-weight: 600;
      +
      +  letter-spacing: -1px;
      +
      +  color: rgb(51, 51, 51);
      +}
      +
      +article.fill h3 {
      +  background: rgba(255, 255, 255, .75);
      +  padding-top: .2em;
      +  padding-bottom: .3em;
      +  margin-top: -.2em;
      +  margin-left: -60px;
      +  padding-left: 60px;
      +  margin-right: -60px;
      +  padding-right: 60px;
      +}
      +
      +ul {
      +  list-style: none;
      +  margin: 0;
      +  padding: 0;
      +
      +  margin-top: 40px;
      +
      +  margin-left: .75em;
      +}
      +ul:first-child {
      +  margin-top: 0;
      +}
      +ul ul {
      +  margin-top: .5em;
      +}
      +li {
      +  padding: 0;
      +  margin: 0;
      +
      +  margin-bottom: .5em;
      +}
      +li::before {
      +  content: '·';
      +
      +  width: .75em;
      +  margin-left: -.75em;
      +
      +  position: absolute;
      +}
      +
      +pre {
      +  font-family: 'Droid Sans Mono', 'Courier New', monospace;
      +
      +  font-size: 20px;
      +  line-height: 28px;
      +  padding: 5px 10px;
      +  
      +  letter-spacing: -1px;
      +
      +  margin-top: 40px;
      +  margin-bottom: 40px;
      +
      +  color: black;
      +  background: rgb(240, 240, 240);
      +  border: 1px solid rgb(224, 224, 224);
      +  box-shadow: inset 0 2px 6px rgba(0, 0, 0, .1);
      +  
      +  overflow: hidden;
      +}
      +
      +code {
      +  font-size: 95%;
      +  font-family: 'Droid Sans Mono', 'Courier New', monospace;
      +
      +  color: black;
      +}
      +
      +iframe {
      +  width: 100%;
      +
      +  height: 620px;
      +
      +  background: white;
      +  border: 1px solid rgb(192, 192, 192);
      +  margin: -1px;
      +  /*box-shadow: inset 0 2px 6px rgba(0, 0, 0, .1);*/
      +}
      +
      +h3 + iframe {
      +  margin-top: 40px;
      +  height: 540px;
      +}
      +
      +article.fill iframe {
      +  position: absolute;
      +  left: 0;
      +  top: 0;
      +  width: 100%;
      +  height: 100%;
      +
      +  border: 0;
      +  margin: 0;
      +
      +  border-radius: 10px;
      +  -o-border-radius: 10px;
      +  -moz-border-radius: 10px;
      +  -webkit-border-radius: 10px;
      +
      +  z-index: -1;
      +}
      +
      +article.fill img {
      +  position: absolute;
      +  left: 0;
      +  top: 0;
      +  min-width: 100%;
      +  min-height: 100%;
      +
      +  border-radius: 10px;
      +  -o-border-radius: 10px;
      +  -moz-border-radius: 10px;
      +  -webkit-border-radius: 10px;
      +
      +  z-index: -1;
      +}
      +img.centered {
      +  margin: 0 auto;
      +  display: block;
      +}
      +
      +table {
      +  width: 100%;
      +  border-collapse: collapse;
      +  margin-top: 40px;
      +}
      +th {
      +  font-weight: 600;
      +  text-align: left;
      +}
      +td,
      +th {
      +  border: 1px solid rgb(224, 224, 224);
      +  padding: 5px 10px;
      +  vertical-align: top;
      +}
      +
      +.source {
      +  position: absolute;
      +  left: 60px;
      +  top: 644px;
      +  padding-right: 175px;
      +  
      +  font-size: 15px;
      +  letter-spacing: 0;  
      +  line-height: 18px;
      +}
      +
      +q {
      +  display: block;
      +  font-size: 60px;
      +  line-height: 72px;
      +  
      +  margin-left: 20px;
      +  
      +  margin-top: 100px;
      +  margin-right: 150px;    
      +}
      +q::before {
      +  content: '“';
      +  
      +  position: absolute;
      +  display: inline-block;
      +  margin-left: -2.1em;
      +  width: 2em;
      +  text-align: right;
      +  
      +  font-size: 90px;
      +  color: rgb(192, 192, 192);
      +}
      +q::after {
      +  content: '”';
      +
      +  position: absolute;  
      +  margin-left: .1em;
      +
      +  font-size: 90px;
      +  color: rgb(192, 192, 192);  
      +}
      +div.author {
      +  text-align: right;  
      +  font-size: 40px;
      +  
      +  margin-top: 20px;
      +  margin-right: 150px;    
      +}
      +div.author::before {
      +  content: '—';
      +}
      +
      +/* Size variants */
      +
      +article.smaller p,
      +article.smaller ul {
      +  font-size: 20px;
      +  line-height: 24px;
      +  letter-spacing: 0;
      +}
      +article.smaller table {
      +  font-size: 20px;
      +  line-height: 24px;
      +  letter-spacing: 0;
      +}
      +article.smaller pre {
      +  font-size: 15px;
      +  line-height: 20px;
      +  letter-spacing: 0;
      +}
      +article.smaller q {
      +  font-size: 40px;
      +  line-height: 48px;
      +}
      +article.smaller q::before,
      +article.smaller q::after {
      +  font-size: 60px;
      +}
      +
      +/* Builds */
      +
      +.build > * {
      +  transition: opacity 0.5s ease-in-out 0.2s;
      +  -o-transition: opacity 0.5s ease-in-out 0.2s;
      +  -moz-transition: opacity 0.5s ease-in-out 0.2s;
      +  -webkit-transition: opacity 0.5s ease-in-out 0.2s;
      +}
      +
      +.to-build {
      +  opacity: 0;
      +}
      +
      +/* Pretty print */
      +
      +.prettyprint .str, /* string content */
      +.prettyprint .atv { /* a markup attribute value */
      +  color: rgb(0, 138, 53); 
      +}  
      +.prettyprint .kwd, /* a keyword */
      +.prettyprint .tag { /* a markup tag name */
      +  color: rgb(0, 102, 204);
      +}
      +.prettyprint .com { /* a comment */
      +  color: rgb(127, 127, 127); 
      +  font-style: italic; 
      +}  
      +.prettyprint .lit { /* a literal value */
      +  color: rgb(127, 0, 0);
      +}  
      +.prettyprint .pun, /* punctuation, lisp open bracket, lisp close bracket */
      +.prettyprint .opn, 
      +.prettyprint .clo { 
      +  color: rgb(127, 127, 127); 
      +}
      +.prettyprint .typ, /* a type name */
      +.prettyprint .atn, /* a markup attribute name */ 
      +.prettyprint .dec, 
      +.prettyprint .var { /* a declaration; a variable name */
      +  color: rgb(127, 0, 127);
      +}  
      diff --git a/vendor/github.com/camlistore/camlistore/website/test.cgi b/vendor/github.com/camlistore/camlistore/website/test.cgi
      new file mode 100755
      index 00000000..6cf3a417
      --- /dev/null
      +++ b/vendor/github.com/camlistore/camlistore/website/test.cgi
      @@ -0,0 +1,30 @@
      +#!/usr/bin/perl
      +#
      +
      +use strict;
      +print "Content-Type: text/html\n\n";
      +
      +print "<html><head><title>dump output</title></head><body>\n";
      +
      +if ($ENV{'REQUEST_METHOD'} eq "GET") {
      +    my $in = $ENV{'QUERY_STRING'};
      +    print "<h2>REQUEST_METHOD was GET</h2><pre>\n";
      +    print "Stdin= [$in]\n";
      +    print "</pre>\n";
      +} elsif ($ENV{'REQUEST_METHOD'} eq "POST") {
      +    my $in;
      +    sysread(STDIN, $in, $ENV{'CONTENT_LENGTH'});
      +    print "<h2>REQUEST_METHOD was POST</h2><pre>\n";
      +    print "Stdin= [$in]\n";
      +    print "</pre>\n";
      +} 
      +
      +print "<h2>Environment variables</h2><pre>\n";
      +foreach my $key (sort(keys(%ENV))){
      +    print "<B>$key</B>", " "x(23-length($key)), "= $ENV{$key}\n";
      +}
      +
      +print "</pre>\n";
      +
      +print "</body></html>\n";
      +exit 0;
      diff --git a/vendor/github.com/camlistore/camlistore/website/tmpl/camlierror.html b/vendor/github.com/camlistore/camlistore/website/tmpl/camlierror.html
      new file mode 100644
      index 00000000..792b2636
      --- /dev/null
      +++ b/vendor/github.com/camlistore/camlistore/website/tmpl/camlierror.html
      @@ -0,0 +1,3 @@
      +<p>
      +<span class="alert" style="font-size:120%">{{.Code}}: {{.Description}}</span>
      +</p>
      diff --git a/vendor/github.com/camlistore/camlistore/website/tmpl/contrib.html b/vendor/github.com/camlistore/camlistore/website/tmpl/contrib.html
      new file mode 100644
      index 00000000..990895cd
      --- /dev/null
      +++ b/vendor/github.com/camlistore/camlistore/website/tmpl/contrib.html
      @@ -0,0 +1,10 @@
      +<h1>Contributors</h1>
      +
      +<p>Camlistore contributors include:</p>
      +
      +<ul>
      +{{range .}}<li>{{if .URL}}<a href="{{.URL}}">{{index .Names 0}}</a>{{else}}{{index .Names 0}}{{end}}{{if .Role}}:  <i>{{.Role}}</i>{{end}}</li>
      +{{end}}
      +</ul>
      +
      +<p>Want to help?  See <a href="/docs/contributing">contributing</a>.</p>
      diff --git a/vendor/github.com/camlistore/camlistore/website/tmpl/error.html b/vendor/github.com/camlistore/camlistore/website/tmpl/error.html
      new file mode 100644
      index 00000000..15487a6b
      --- /dev/null
      +++ b/vendor/github.com/camlistore/camlistore/website/tmpl/error.html
      @@ -0,0 +1,3 @@
      +<p>
      +<span class="alert" style="font-size:120%">{{.}}</span>
      +</p>
      diff --git a/vendor/github.com/camlistore/camlistore/website/tmpl/githeader.html b/vendor/github.com/camlistore/camlistore/website/tmpl/githeader.html
      new file mode 100644
      index 00000000..eebb2779
      --- /dev/null
      +++ b/vendor/github.com/camlistore/camlistore/website/tmpl/githeader.html
      @@ -0,0 +1,27 @@
      +<link rel="stylesheet" href="/static/all.css" type="text/css" media="all" charset="utf-8" />
      +
      +<div id='header'>
      +<a href="/"><img width='788' height='161' src='/static/camli-header.jpg' title="Camlistore" border='0' /></a>
      +</div>
      +
      +<center><div style="display:block; max-width: 788px;">
      +<div class='bar'><div class='hatecss'>
      +  <a href='/'>About</a>
      +  <a href='/docs/'>Docs</a>
      +  <a href='/code/'>Code</a>
      +  <a href='/contributors'>Who</a>
      +  <a href='/lists'>Lists</a>
      +  <a href='https://github.com/camlistore/camlistore/issues'>Bugs</a>
      +</div></div>
      +</div>
      +
      +<div style="font-size: 120%">
      +<code>
      +$ git clone https://camlistore.googlesource.com/camlistore
      +</code>
      +</div>
      +<div style="margin: 1em 0">
      +  Want to <a href="/docs/contributing">contribute</a>?
      +</div>
      +
      +</center>
      diff --git a/vendor/github.com/camlistore/camlistore/website/tmpl/package.html b/vendor/github.com/camlistore/camlistore/website/tmpl/package.html
      new file mode 100644
      index 00000000..349f7186
      --- /dev/null
      +++ b/vendor/github.com/camlistore/camlistore/website/tmpl/package.html
      @@ -0,0 +1,212 @@
      +<!--
      +	Copyright 2009 The Go Authors. All rights reserved.
      +	Use of this source code is governed by a BSD-style
      +	license that can be found in the LICENSE file.
      +-->
      +<!--
      +	Note: Static (i.e., not template-generated) href and id
      +	attributes start with "pkg-" to make it impossible for
      +	them to conflict with generated attributes (some of which
      +	correspond to Go identifiers).
      +-->
      +{{with .PDoc}}
      +	{{if $.IsPkg}}
      +		<div id="short-nav">
      +			<dl>
      +			<dd><code>import "{{html .ImportPath}}"</code></dd>
      +			</dl>
      +			<dl>
      +			<dd><a href="#pkg-overview" class="overviewLink">Overview</a></dd>
      +			<dd><a href="#pkg-index">Index</a></dd>
      +			{{if $.Examples}}
      +				<dd><a href="#pkg-examples">Examples</a></dd>
      +			{{end}}
      +			{{if $.PList}}
      +				<dd><a href="#pkg-other-packages">Other packages</a></dd>
      +			{{end}}
      +			{{if $.Dirs}}
      +				<dd><a href="#pkg-subdirectories">Subdirectories</a></dd>
      +			{{end}}
      +			</dl>
      +		</div>
      +		<!-- The package's Name is printed as title by the top-level template -->
      +		<div id="pkg-overview" class="toggleVisible">
      +			<div class="collapsed">
      +				<h2 class="toggleButton" title="Click to show Overview section">Overview ▹</h2>
      +			</div>
      +			<div class="expanded">
      +				<h2 class="toggleButton" title="Click to hide Overview section">Overview ▾</h2>
      +				{{comment_html .Doc}}
      +			</div>
      +		</div>
      +		{{example_html "" $.Examples $.FSet}}
      +	
      +		<h2 id="pkg-index">Index</h2>
      +		<!-- Table of contents for API; must be named manual-nav to turn off auto nav. -->
      +		<div id="manual-nav">
      +			<dl>
      +			{{if .Consts}}
      +				<dd><a href="#pkg-constants">Constants</a></dd>
      +			{{end}}
      +			{{if .Vars}}
      +				<dd><a href="#pkg-variables">Variables</a></dd>
      +			{{end}}
      +			{{range .Funcs}}
      +				{{$name_html := html .Name}}
      +				<dd><a href="#{{$name_html}}">{{node_html .Decl $.FSet}}</a></dd>
      +			{{end}}
      +			{{range .Types}}
      +				{{$tname_html := html .Name}}
      +				<dd><a href="#{{$tname_html}}">type {{$tname_html}}</a></dd>
      +				{{range .Funcs}}
      +					{{$name_html := html .Name}}
      +					<dd>&nbsp; &nbsp; <a href="#{{$name_html}}">{{node_html .Decl $.FSet}}</a></dd>
      +				{{end}}
      +				{{range .Methods}}
      +					{{$name_html := html .Name}}
      +					<dd>&nbsp; &nbsp; <a href="#{{$tname_html}}.{{$name_html}}">{{node_html .Decl $.FSet}}</a></dd>
      +				{{end}}
      +			{{end}}
      +			{{if .Bugs}}
      +				<dd><a href="#pkg-bugs">Bugs</a></dd>
      +			{{end}}
      +		</dl>
      +
      +		{{if $.Examples}}
      +			<h4 id="pkg-examples">Examples</h4>
      +			<dl>
      +			{{range $.Examples}}
      +			<dd><a class="exampleLink" href="#example_{{.Name}}">{{example_name .Name}}</a></dd>
      +			{{end}}
      +			</dl>
      +		{{end}}
      +
      +		{{with .Filenames}}
      +			<h4>Package files</h4>
      +			<p>
      +			<span style="font-size:90%">
      +			{{range .}}
      +				<a href="{{.|srcLink|html}}">{{.|filename|html}}</a>
      +			{{end}}
      +			</span>
      +			</p>
      +		{{end}}
      +	
      +		{{with .Consts}}
      +			<h2 id="pkg-constants">Constants</h2>
      +			{{range .}}
      +				<pre>{{node_html .Decl $.FSet}}</pre>
      +				{{comment_html .Doc}}
      +			{{end}}
      +		{{end}}
      +		{{with .Vars}}
      +			<h2 id="pkg-variables">Variables</h2>
      +			{{range .}}
      +				<pre>{{node_html .Decl $.FSet}}</pre>
      +				{{comment_html .Doc}}
      +			{{end}}
      +		{{end}}
      +		{{range .Funcs}}
      +			{{/* Name is a string - no need for FSet */}}
      +			{{$name_html := html .Name}}
      +			<h2 id="{{$name_html}}">func <a href="{{posLink_url .Decl $.FSet}}">{{$name_html}}</a></h2>
      +			<pre>{{node_html .Decl $.FSet}}</pre>
      +			{{comment_html .Doc}}
      +			{{example_html .Name $.Examples $.FSet}}
      +		{{end}}
      +		{{range .Types}}
      +			{{$tname := .Name}}
      +			{{$tname_html := html .Name}}
      +			<h2 id="{{$tname_html}}">type <a href="{{posLink_url .Decl $.FSet}}">{{$tname_html}}</a></h2>
      +			<pre>{{node_html .Decl $.FSet}}</pre>
      +			{{comment_html .Doc}}
      +
      +			{{range .Consts}}
      +				<pre>{{node_html .Decl $.FSet}}</pre>
      +				{{comment_html .Doc}}
      +			{{end}}
      +
      +			{{range .Vars}}
      +				<pre>{{node_html .Decl $.FSet}}</pre>
      +				{{comment_html .Doc}}
      +			{{end}}
      +
      +			{{example_html $tname $.Examples $.FSet}}
      +
      +			{{range .Funcs}}
      +				{{$name_html := html .Name}}
      +				<h3 id="{{$name_html}}">func <a href="{{posLink_url .Decl $.FSet}}">{{$name_html}}</a></h3>
      +				<pre>{{node_html .Decl $.FSet}}</pre>
      +				{{comment_html .Doc}}
      +				{{example_html .Name $.Examples $.FSet}}
      +			{{end}}
      +
      +			{{range .Methods}}
      +				{{$name_html := html .Name}}
      +				<h3 id="{{$tname_html}}.{{$name_html}}">func ({{html .Recv}}) <a href="{{posLink_url .Decl $.FSet}}">{{$name_html}}</a></h3>
      +				<pre>{{node_html .Decl $.FSet}}</pre>
      +				{{comment_html .Doc}}
      +				{{$name := printf "%s_%s" $tname .Name}}
      +				{{example_html $name $.Examples $.FSet}}
      +			{{end}}
      +		{{end}}
      +		</div>
      +	{{else}}  {{/* not a package; is a command */}}
      +		{{comment_html .Doc}}
      +	{{end}}
      +
      +	{{with .Bugs}}
      +		<h2 id="pkg-bugs">Bugs</h2>
      +		{{range .}}
      +		{{comment_html .}}
      +		{{end}}
      +	{{end}}
      +{{end}}
      +
      +{{with .PAst}}
      +	<pre>{{node_html . $.FSet}}</pre>
      +{{end}}
      +
      +{{with .PList}}
      +	<h2 id="pkg-other-packages">Other packages</h2>
      +	<p>
      +	{{/* PList entries are strings - no need for FSet */}}
      +	{{range .}}
      +	<a href="?p={{urlquery .}}">{{html .}}</a><br />
      +	{{end}}
      +	</p>
      +{{end}}
      +
      +{{with .Dirs}}
      +	{{/* DirList entries are numbers and strings - no need for FSet */}}
      +	<h2 id="pkg-subdirectories">Subdirectories</h2>
      +	<table class="dir">
      +	<tr>
      +	<th>Name</th>
      +	<th>&nbsp;&nbsp;&nbsp;&nbsp;</th>
      +	<th style="text-align: left; width: auto">Synopsis</th>
      +	</tr>
      +	{{if not $.DirFlat}}
      +		<tr>
      +		<td><a href="..">..</a></td>
      +		</tr>
      +	{{end}}
      +	{{range .List}}
      +		{{if $.DirFlat}}
      +			{{if .HasPkg}}
      +				<tr>
      +				<td class="name"><a href="{{html .Path}}/">{{html .Path}}</a></td>
      +				<td>&nbsp;&nbsp;&nbsp;&nbsp;</td>
      +				<td style="width: auto">{{html .Synopsis}}</td>
      +				</tr>
      +			{{end}}
      +		{{else}}
      +			<tr>
      +			<td class="name">{{repeat `&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;` .Depth}}<a href="{{html .Path}}/">{{html .Name}}</a></td>
      +			<td>&nbsp;&nbsp;&nbsp;&nbsp;</td>
      +			<td style="width: auto">{{html .Synopsis}}</td>
      +			</tr>
      +		{{end}}
      +	{{end}}
      +	</table>
      +{{end}}
      diff --git a/vendor/github.com/camlistore/camlistore/website/tmpl/page.html b/vendor/github.com/camlistore/camlistore/website/tmpl/page.html
      new file mode 100644
      index 00000000..904c9e46
      --- /dev/null
      +++ b/vendor/github.com/camlistore/camlistore/website/tmpl/page.html
      @@ -0,0 +1,58 @@
      +{{define "page"}}
      +<html>
      +	{{template "header" .}}
      +	<body>
      +	{{template "banner" .}}
      +	{{template "toplinks" .}}
      +	<div class='content'>
      +		<!-- Content is HTML-escaped elsewhere -->
      +		{{.Content}}
      +	</div>
      +	{{template "footer" .}}
      +	</body>
      +</html>
      +{{end}}
      +
      +{{define "header"}}
      +<html>
      +<head>
      +<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
      +<meta name="go-import" content="camlistore.org git https://camlistore.googlesource.com/camlistore">
      +{{with $x := .Title}}
      +  <title>{{$x}} - Camlistore</title>
      +{{else}}
      +  <title>Camlistore</title>
      +{{end}}
      +<!-- TODO(mpl): figure out why Index does not toggle (like Overview) -->
      +<link rel="stylesheet" href="/static/all.css" type="text/css" media="all" charset="utf-8">
      +<link href='//fonts.googleapis.com/css?family=Source+Sans+Pro:400,700' rel='stylesheet' type='text/css'>
      +<script type="text/javascript" src="/static/all-async.js" async="true"></script>
      +<script type="text/javascript" src="/static/godocs.js"></script>
      +</head>
      +{{end}}
      +
      +{{define "banner"}}
      +<div id='header'>
      +<a href="/"><img width='788' height='161' src='/static/camli-header.jpg' title="Camlistore" border='0' /></a>
      +</div>
      +{{end}}
      +
      +{{define "toplinks"}}
      +<div class='bar'><div class='hatecss'>
      +  <a href='/'>Home</a>
      +  <a href='/download'>Download</a>
      +  <a href='/docs/'>Docs</a>
      +  <a href='/code'>Code</a>
      +  <a href='/community'>Community</a>
      +</div></div>
      +{{end}}
      +
      +{{define "footer"}}
      +<div class='bar'><div class='hatecss'>
      +</div></div>
      +<div id='props'>
      +Website layout inspired by <a href="http://git-scm.com/">git</a> and <a href="http://memcached.org/">memcached</a>,<br />
      +design done by <a href="http://upallday.com/">up all day</a> creative solutions.<br />
      +Content by <a href="/contributors">the authors</a>.
      +</div>
      +{{end}}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/.gitignore b/vendor/github.com/coreos/coreos-cloudinit/.gitignore
      new file mode 100644
      index 00000000..874ddb58
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/.gitignore
      @@ -0,0 +1,4 @@
      +*.swp
      +bin/
      +coverage/
      +gopath/
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/.travis.yml b/vendor/github.com/coreos/coreos-cloudinit/.travis.yml
      new file mode 100644
      index 00000000..07494db3
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/.travis.yml
      @@ -0,0 +1,12 @@
      +language: go
      +sudo: false
      +matrix:
      +  include:
      +    - go: 1.4
      +      install:
      +        - go get golang.org/x/tools/cmd/cover
      +        - go get golang.org/x/tools/cmd/vet
      +    - go: 1.5
      +
      +script:
      + - ./test
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/CONTRIBUTING.md b/vendor/github.com/coreos/coreos-cloudinit/CONTRIBUTING.md
      new file mode 100644
      index 00000000..7ad3b312
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/CONTRIBUTING.md
      @@ -0,0 +1,68 @@
      +# How to Contribute
      +
      +CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via
      +GitHub pull requests.  This document outlines some of the conventions on
      +development workflow, commit message formatting, contact points and other
      +resources to make it easier to get your contribution accepted.
      +
      +# Certificate of Origin
      +
      +By contributing to this project you agree to the Developer Certificate of
      +Origin (DCO). This document was created by the Linux Kernel community and is a
      +simple statement that you, as a contributor, have the legal right to make the
      +contribution. See the [DCO](DCO) file for details.
      +
      +# Email and Chat
      +
      +The project currently uses the general CoreOS email list and IRC channel:
      +- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev)
      +- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org
      +
      +## Getting Started
      +
      +- Fork the repository on GitHub
      +- Read the [README](README.md) for build and test instructions
      +- Play with the project, submit bugs, submit patches!
      +
      +## Contribution Flow
      +
      +This is a rough outline of what a contributor's workflow looks like:
      +
      +- Create a topic branch from where you want to base your work (usually master).
      +- Make commits of logical units.
      +- Make sure your commit messages are in the proper format (see below).
      +- Push your changes to a topic branch in your fork of the repository.
      +- Make sure the tests pass, and add any new tests as appropriate.
      +- Submit a pull request to the original repository.
      +
      +Thanks for your contributions!
      +
      +### Format of the Commit Message
      +
      +We follow a rough convention for commit messages that is designed to answer two
      +questions: what changed and why. The subject line should feature the what and
      +the body of the commit should describe the why.
      +
      +```
      +environment: write new keys in consistent order
      +
      +Go 1.3 randomizes the ordering of keys when iterating over a map.
      +Sort the keys to make this ordering consistent.
      +
      +Fixes #38
      +```
      +
      +The format can be described more formally as follows:
      +
      +```
      +<subsystem>: <what changed>
      +<BLANK LINE>
      +<why this change was made>
      +<BLANK LINE>
      +<footer>
      +```
      +
      +The first line is the subject and should be no longer than 70 characters, the
      +second line is always blank, and other lines should be wrapped at 80 characters.
      +This allows the message to be easier to read on GitHub as well as in various
      +git tools.
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/DCO b/vendor/github.com/coreos/coreos-cloudinit/DCO
      new file mode 100644
      index 00000000..716561d5
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/DCO
      @@ -0,0 +1,36 @@
      +Developer Certificate of Origin
      +Version 1.1
      +
      +Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
      +660 York Street, Suite 102,
      +San Francisco, CA 94110 USA
      +
      +Everyone is permitted to copy and distribute verbatim copies of this
      +license document, but changing it is not allowed.
      +
      +
      +Developer's Certificate of Origin 1.1
      +
      +By making a contribution to this project, I certify that:
      +
      +(a) The contribution was created in whole or in part by me and I
      +    have the right to submit it under the open source license
      +    indicated in the file; or
      +
      +(b) The contribution is based upon previous work that, to the best
      +    of my knowledge, is covered under an appropriate open source
      +    license and I have the right under that license to submit that
      +    work with modifications, whether created in whole or in part
      +    by me, under the same open source license (unless I am
      +    permitted to submit under a different license), as indicated
      +    in the file; or
      +
      +(c) The contribution was provided directly to me by some other
      +    person who certified (a), (b) or (c) and I have not modified
      +    it.
      +
      +(d) I understand and agree that this project and the contribution
      +    are public and that a record of the contribution (including all
      +    personal information I submit with it, including my sign-off) is
      +    maintained indefinitely and may be redistributed consistent with
      +    this project or the open source license(s) involved.
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Documentation/cloud-config-deprecated.md b/vendor/github.com/coreos/coreos-cloudinit/Documentation/cloud-config-deprecated.md
      new file mode 100644
      index 00000000..8c802593
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/Documentation/cloud-config-deprecated.md
      @@ -0,0 +1,38 @@
      +# Deprecated Cloud-Config Features
      +
      +## Retrieving SSH Authorized Keys
      +
      +### From a GitHub User
      +
      +Using the `coreos-ssh-import-github` field, we can import public SSH keys from a GitHub user to use as authorized keys to a server.
      +
      +```yaml
      +#cloud-config
      +
      +users:
      +  - name: elroy
      +    coreos-ssh-import-github: elroy
      +```
      +
      +### From an HTTP Endpoint
      +
      +We can also pull public SSH keys from any HTTP endpoint which matches [GitHub's API response format](https://developer.github.com/v3/users/keys/#list-public-keys-for-a-user).
      +For example, if you have an installation of GitHub Enterprise, you can provide a complete URL with an authentication token:
      +
      +```yaml
      +#cloud-config
      +
      +users:
      +  - name: elroy
      +    coreos-ssh-import-url: https://github-enterprise.example.com/api/v3/users/elroy/keys?access_token=<TOKEN>
      +```
      +
      +You can also specify any URL whose response matches the JSON format for public keys:
      +
      +```yaml
      +#cloud-config
      +
      +users:
      +  - name: elroy
      +    coreos-ssh-import-url: https://example.com/public-keys
      +```
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Documentation/cloud-config-locations.md b/vendor/github.com/coreos/coreos-cloudinit/Documentation/cloud-config-locations.md
      new file mode 100644
      index 00000000..48d0f771
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/Documentation/cloud-config-locations.md
      @@ -0,0 +1,26 @@
      +# Cloud-Config Locations
      +
      +On every boot, coreos-cloudinit looks for a config file to configure your host. Here is a list of locations which are used by the Cloud-Config utility, depending on your CoreOS platform:
      +
      +| Location | Description |
      +| --- | --- |
      +| `/media/configvirtfs/openstack/latest/user_data` | `/media/configvirtfs` mount point with [config-2](/os/docs/latest/config-drive.html#contents-and-format) label. It should contain a `openstack/latest/user_data` relative path. Usually used by cloud providers or in VM installations. |
      +| `/media/configdrive/openstack/latest/user_data` | FAT or ISO9660 filesystem with [config-2](/os/docs/latest/config-drive.html#qemu-virtfs) label and `/media/configdrive/` mount point. It should also contain a `openstack/latest/user_data` relative path. Usually used in installations which are configured by USB Flash sticks or CDROM media. |
      +| Kernel command line: `cloud-config-url=http://example.com/user_data`. | You can find this string using this command `cat /proc/cmdline`. Usually used in [PXE](/os/docs/latest/booting-with-pxe.html) or [iPXE](/os/docs/latest/booting-with-ipxe.html) boots. |
      +| `/var/lib/coreos-install/user_data` | When you install CoreOS manually using the [coreos-install](/os/docs/latest/installing-to-disk.html) tool. Usually used in bare metal installations. |
      +| `/usr/share/oem/cloud-config.yml` | Path for OEM images. |
      +| `/var/lib/coreos-vagrant/vagrantfile-user-data`| Vagrant OEM scripts automatically store Cloud-Config into this path. |
      +| `/var/lib/waagent/CustomData`| Azure platform uses OEM path for first Cloud-Config initialization and then `/var/lib/waagent/CustomData` to apply your settings. |
      +| `http://169.254.169.254/metadata/v1/user-data` `http://169.254.169.254/2009-04-04/user-data` `https://metadata.packet.net/userdata`|DigitalOcean, EC2 and Packet cloud providers correspondingly use these URLs to download Cloud-Config.|
      +| `/usr/share/oem/bin/vmtoolsd --cmd "info-get guestinfo.coreos.config.data"` | Cloud-Config provided by [VMware Guestinfo][VMware Guestinfo] |
      +| `/usr/share/oem/bin/vmtoolsd --cmd "info-get guestinfo.coreos.config.url"` | Cloud-Config URL provided by [VMware Guestinfo][VMware Guestinfo] |
      +
      +[VMware Guestinfo]: vmware-guestinfo.md
      +
      +You can also run the `coreos-cloudinit` tool manually and provide a path to your custom Cloud-Config file:
      +
      +```sh
      +sudo coreos-cloudinit --from-file=/home/core/cloud-config.yaml
      +```
      +
      +This command will apply your custom cloud-config.
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Documentation/cloud-config-oem.md b/vendor/github.com/coreos/coreos-cloudinit/Documentation/cloud-config-oem.md
      new file mode 100644
      index 00000000..d8cc67d1
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/Documentation/cloud-config-oem.md
      @@ -0,0 +1,37 @@
      +## OEM configuration
      +
      +The `coreos.oem.*` parameters follow the [os-release spec][os-release], but have been repurposed as a way for coreos-cloudinit to know about the OEM partition on this machine. Customizing this section is only needed when generating a new OEM of CoreOS from the SDK. The fields include:
      +
      +- **id**: Lowercase string identifying the OEM
      +- **name**: Human-friendly string representing the OEM
      +- **version-id**: Lowercase string identifying the version of the OEM
      +- **home-url**: Link to the homepage of the provider or OEM
      +- **bug-report-url**: Link to a place to file bug reports about this OEM
      +
      +coreos-cloudinit renders these fields to `/etc/oem-release`.
      +If no **id** field is provided, coreos-cloudinit will ignore this section.
      +
      +For example, the following cloud-config document...
      +
      +```yaml
      +#cloud-config
      +coreos:
      +  oem:
      +    id: "rackspace"
      +    name: "Rackspace Cloud Servers"
      +    version-id: "168.0.0"
      +    home-url: "https://www.rackspace.com/cloud/servers/"
      +    bug-report-url: "https://github.com/coreos/coreos-overlay"
      +```
      +
      +...would be rendered to the following `/etc/oem-release`:
      +
      +```yaml
      +ID=rackspace
      +NAME="Rackspace Cloud Servers"
      +VERSION_ID=168.0.0
      +HOME_URL="https://www.rackspace.com/cloud/servers/"
      +BUG_REPORT_URL="https://github.com/coreos/coreos-overlay"
      +```
      +
      +[os-release]: http://www.freedesktop.org/software/systemd/man/os-release.html
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Documentation/cloud-config.md b/vendor/github.com/coreos/coreos-cloudinit/Documentation/cloud-config.md
      new file mode 100644
      index 00000000..83cb564b
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/Documentation/cloud-config.md
      @@ -0,0 +1,476 @@
      +# Using Cloud-Config
      +
      +CoreOS allows you to declaratively customize various OS-level items, such as network configuration, user accounts, and systemd units. This document describes the full list of items we can configure. The `coreos-cloudinit` program uses these files as it configures the OS after startup or during runtime.
      +
      +Your cloud-config is processed during each boot. Invalid cloud-config won't be processed but will be logged in the journal. You can validate your cloud-config with the [CoreOS online validator](https://coreos.com/validate/) or by running `coreos-cloudinit -validate`.  In addition to these two validation methods you can debug `coreos-cloudinit` system output through the `journalctl` tool:
      +
      +```sh
      +journalctl _EXE=/usr/bin/coreos-cloudinit
      +```
      +
      +It will show `coreos-cloudinit` run output which was triggered by system boot.
      +
      +## Configuration File
      +
      +The file used by this system initialization program is called a "cloud-config" file. It is inspired by the [cloud-init][cloud-init] project's [cloud-config][cloud-config] file, which is "the defacto multi-distribution package that handles early initialization of a cloud instance" ([cloud-init docs][cloud-init-docs]). Because the cloud-init project includes tools which aren't used by CoreOS, only the relevant subset of its configuration items will be implemented in our cloud-config file. In addition to those, we added a few CoreOS-specific items, such as etcd configuration, OEM definition, and systemd units.
      +
      +We've designed our implementation to allow the same cloud-config file to work across all of our supported platforms.
      +
      +[cloud-init]: https://launchpad.net/cloud-init
      +[cloud-init-docs]: http://cloudinit.readthedocs.org/en/latest/index.html
      +[cloud-config]: http://cloudinit.readthedocs.org/en/latest/topics/format.html#cloud-config-data
      +
      +### File Format
      +
      +The cloud-config file uses the [YAML][yaml] file format, which uses whitespace and new-lines to delimit lists, associative arrays, and values.
      +
      +A cloud-config file must contain a header: either `#cloud-config` for processing as cloud-config (suggested) or `#!` for processing as a shell script (advanced). If cloud-config has #cloud-config header, it should followed by an associative array which has zero or more of the following keys:
      +
      +- `coreos`
      +- `ssh_authorized_keys`
      +- `hostname`
      +- `users`
      +- `write_files`
      +- `manage_etc_hosts`
      +
      +The expected values for these keys are defined in the rest of this document.
      +
      +If cloud-config header starts on `#!` then coreos-cloudinit will recognize it as shell script which is interpreted by bash and run it as transient systemd service.
      +
      +[yaml]: https://en.wikipedia.org/wiki/YAML
      +
      +### Providing Cloud-Config with Config-Drive
      +
      +CoreOS tries to conform to each platform's native method to provide user data. Each cloud provider tends to be unique, but this complexity has been abstracted by CoreOS. You can view each platform's instructions on their documentation pages. The most universal way to provide cloud-config is [via config-drive](https://github.com/coreos/coreos-cloudinit/blob/master/Documentation/config-drive.md), which attaches a read-only device to the machine, that contains your cloud-config file.
      +
      +## Configuration Parameters
      +
      +### coreos
      +
      +#### etcd (deprecated. see etcd2)
      +
      +The `coreos.etcd.*` parameters will be translated to a partial systemd unit acting as an etcd configuration file.
      +If the platform environment supports the templating feature of coreos-cloudinit it is possible to automate etcd configuration with the `$private_ipv4` and `$public_ipv4` fields. For example, the following cloud-config document...
      +
      +```yaml
      +#cloud-config
      +
      +coreos:
      +  etcd:
      +    name: "node001"
      +    # generate a new token for each unique cluster from https://discovery.etcd.io/new
      +    discovery: "https://discovery.etcd.io/<token>"
      +    # multi-region and multi-cloud deployments need to use $public_ipv4
      +    addr: "$public_ipv4:4001"
      +    peer-addr: "$private_ipv4:7001"
      +```
      +
      +...will generate a systemd unit drop-in for etcd.service with the following contents:
      +
      +```yaml
      +[Service]
      +Environment="ETCD_NAME=node001"
      +Environment="ETCD_DISCOVERY=https://discovery.etcd.io/<token>"
      +Environment="ETCD_ADDR=203.0.113.29:4001"
      +Environment="ETCD_PEER_ADDR=192.0.2.13:7001"
      +```
      +
      +For more information about the available configuration parameters, see the [etcd documentation][etcd-config].
      +
      +_Note: The `$private_ipv4` and `$public_ipv4` substitution variables referenced in other documents are only supported on Amazon EC2, Google Compute Engine, OpenStack, Rackspace, DigitalOcean, and Vagrant._
      +
      +[etcd-config]: https://github.com/coreos/etcd/blob/release-0.4/Documentation/configuration.md
      +
      +#### etcd2
      +
      +The `coreos.etcd2.*` parameters will be translated to a partial systemd unit acting as an etcd configuration file.
      +If the platform environment supports the templating feature of coreos-cloudinit it is possible to automate etcd configuration with the `$private_ipv4` and `$public_ipv4` fields. When generating a [discovery token](https://discovery.etcd.io/new?size=3), set the `size` parameter, since etcd uses this to determine if all members have joined the cluster. After the cluster is bootstrapped, it can grow or shrink from this configured size.
      +
      +For example, the following cloud-config document...
      +
      +```yaml
      +#cloud-config
      +
      +coreos:
      +  etcd2:
      +    # generate a new token for each unique cluster from https://discovery.etcd.io/new?size=3
      +    discovery: "https://discovery.etcd.io/<token>"
      +    # multi-region and multi-cloud deployments need to use $public_ipv4
      +    advertise-client-urls: "http://$public_ipv4:2379"
      +    initial-advertise-peer-urls: "http://$private_ipv4:2380"
      +    # listen on both the official ports and the legacy ports
      +    # legacy ports can be omitted if your application doesn't depend on them
      +    listen-client-urls: "http://0.0.0.0:2379,http://0.0.0.0:4001"
      +    listen-peer-urls: "http://$private_ipv4:2380,http://$private_ipv4:7001"
      +```
      +
      +...will generate a systemd unit drop-in for etcd2.service with the following contents:
      +
      +```yaml
      +[Service]
      +Environment="ETCD_DISCOVERY=https://discovery.etcd.io/<token>"
      +Environment="ETCD_ADVERTISE_CLIENT_URLS=http://203.0.113.29:2379"
      +Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://192.0.2.13:2380"
      +Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
      +Environment="ETCD_LISTEN_PEER_URLS=http://192.0.2.13:2380,http://192.0.2.13:7001"
      +```
      +
      +For more information about the available configuration parameters, see the [etcd2 documentation][etcd2-config].
      +
      +_Note: The `$private_ipv4` and `$public_ipv4` substitution variables referenced in other documents are only supported on Amazon EC2, Google Compute Engine, OpenStack, Rackspace, DigitalOcean, and Vagrant._
      +
      +[etcd2-config]: https://github.com/coreos/etcd/blob/master/Documentation/configuration.md
      +
      +#### fleet
      +
      +The `coreos.fleet.*` parameters work very similarly to `coreos.etcd2.*`, and allow for the configuration of fleet through environment variables. For example, the following cloud-config document...
      +
      +```yaml
      +#cloud-config
      +
      +coreos:
      +  fleet:
      +      public-ip: "$public_ipv4"
      +      metadata: "region=us-west"
      +```
      +
      +...will generate a systemd unit drop-in like this:
      +
      +```yaml
      +[Service]
      +Environment="FLEET_PUBLIC_IP=203.0.113.29"
      +Environment="FLEET_METADATA=region=us-west"
      +```
      +
      +List of fleet configuration parameters:
      +
      +- **agent_ttl**: An Agent will be considered dead if it exceeds this amount of time to communicate with the Registry
      +- **engine_reconcile_interval**: Interval in seconds at which the engine should reconcile the cluster schedule in etcd
      +- **etcd_cafile**: Path to CA file used for TLS communication with etcd
      +- **etcd_certfile**: Provide TLS configuration when SSL certificate authentication is enabled in etcd endpoints
      +- **etcd_keyfile**: Path to private key file used for TLS communication with etcd
      +- **etcd_key_prefix**: etcd prefix path to be used for fleet keys
      +- **etcd_request_timeout**: Amount of time in seconds to allow a single etcd request before considering it failed
      +- **etcd_servers**: Comma separated list of etcd endpoints
      +- **metadata**: Comma separated key/value pairs that are published with the local to the fleet registry
      +- **public_ip**: IP accessible by other nodes for inter-host communication
      +- **verbosity**: Enable debug logging by setting this to an integer value greater than zero
      +
      +For more information on fleet configuration, see the [fleet documentation][fleet-config].
      +
      +[fleet-config]: https://github.com/coreos/fleet/blob/master/Documentation/deployment-and-configuration.md#configuration
      +
      +#### flannel
      +
      +The `coreos.flannel.*` parameters also work very similarly to `coreos.etcd2.*`
      +and `coreos.fleet.*`. They can be used to set environment variables for
      +flanneld. For example, the following cloud-config...
      +
      +```yaml
      +#cloud-config
      +
      +coreos:
      +  flannel:
      +      etcd_prefix: "/coreos.com/network2"
      +```
      +
      +...will generate a systemd unit drop-in like so:
      +
      +```
      +[Service]
      +Environment="FLANNELD_ETCD_PREFIX=/coreos.com/network2"
      +```
      +
      +List of flannel configuration parameters:
      +
      +- **etcd_endpoints**: Comma separated list of etcd endpoints
      +- **etcd_cafile**: Path to CA file used for TLS communication with etcd
      +- **etcd_certfile**: Path to certificate file used for TLS communication with etcd
      +- **etcd_keyfile**: Path to private key file used for TLS communication with etcd
      +- **etcd_prefix**: etcd prefix path to be used for flannel keys
      +- **ip_masq**: Install IP masquerade rules for traffic outside of flannel subnet
      +- **subnet_file**: Path to flannel subnet file to write out
      +- **interface**: Interface (name or IP) that should be used for inter-host communication
      +- **public_ip**: IP accessible by other nodes for inter-host communication
      +
      +For more information on flannel configuration, see the [flannel documentation][flannel-readme].
      +
      +[flannel-readme]: https://github.com/coreos/flannel/blob/master/README.md
      +
      +#### locksmith
      +
      +The `coreos.locksmith.*` parameters can be used to set environment variables
      +for locksmith. For example, the following cloud-config...
      +
      +```yaml
      +#cloud-config
      +
      +coreos:
      +  locksmith:
      +      endpoint: "http://example.com:2379"
      +```
      +
      +...will generate a systemd unit drop-in like so:
      +
      +```
      +[Service]
      +Environment="LOCKSMITHD_ENDPOINT=http://example.com:2379"
      +```
      +
      +List of locksmith configuration parameters:
      +
      +- **endpoint**: Comma separated list of etcd endpoints
      +- **etcd_cafile**: Path to CA file used for TLS communication with etcd
      +- **etcd_certfile**: Path to certificate file used for TLS communication with etcd
      +- **etcd_keyfile**: Path to private key file used for TLS communication with etcd
      +
      +For the complete list of locksmith configuration parameters, see the [locksmith documentation][locksmith-readme].
      +
      +[locksmith-readme]: https://github.com/coreos/locksmith/blob/master/README.md
      +
      +#### update
      +
      +The `coreos.update.*` parameters manipulate settings related to how CoreOS instances are updated.
      +
      +These fields will be written out to and replace `/etc/coreos/update.conf`. If only one of the parameters is given it will only overwrite the given field. 
      +The `reboot-strategy` parameter also affects the behaviour of [locksmith](https://github.com/coreos/locksmith). 
      +
      +- **reboot-strategy**: One of "reboot", "etcd-lock", "best-effort" or "off" for controlling when reboots are issued after an update is performed.
      +  - _reboot_: Reboot immediately after an update is applied.
      +  - _etcd-lock_: Reboot after first taking a distributed lock in etcd, this guarantees that only one host will reboot concurrently and that the cluster will remain available during the update.
      +  - _best-effort_ - If etcd is running, "etcd-lock", otherwise simply "reboot".
      +  - _off_ - Disable rebooting after updates are applied (not recommended).
      +- **server**: The location of the [CoreUpdate][coreupdate] server which will be queried for updates. Also known as the [omaha][omaha-docs] server endpoint.
      +- **group**:  signifies the channel which should be used for automatic updates.  This value defaults to the version of the image initially downloaded. (one of "master", "alpha", "beta", "stable")
      +
      +[coreupdate]: https://coreos.com/products/coreupdate
      +[omaha-docs]: https://coreos.com/docs/coreupdate/custom-apps/coreupdate-protocol/
      +
      +*Note: cloudinit will only manipulate the locksmith unit file in the systemd runtime directory (`/run/systemd/system/locksmithd.service`). If any manual modifications are made to an overriding unit configuration file (e.g. `/etc/systemd/system/locksmithd.service`), cloudinit will no longer be able to control the locksmith service unit.*
      +
      +##### Example
      +
      +```yaml
      +#cloud-config
      +coreos:
      +  update:
      +    reboot-strategy: "etcd-lock"
      +```
      +
      +#### units
      +
      +The `coreos.units.*` parameters define a list of arbitrary systemd units to start after booting. This feature is intended to help you start essential services required to mount storage and configure networking in order to join the CoreOS cluster. It is not intended to be a Chef/Puppet replacement.
      +
      +Each item is an object with the following fields:
      +
      +- **name**: String representing unit's name. Required.
      +- **runtime**: Boolean indicating whether or not to persist the unit across reboots. This is analogous to the `--runtime` argument to `systemctl enable`. The default value is false.
      +- **enable**: Boolean indicating whether or not to handle the [Install] section of the unit file. This is similar to running `systemctl enable <name>`. The default value is false.
      +- **content**: Plaintext string representing entire unit file. If no value is provided, the unit is assumed to exist already.
      +- **command**: Command to execute on unit: start, stop, reload, restart, try-restart, reload-or-restart, reload-or-try-restart. The default behavior is to not execute any commands.
      +- **mask**: Whether to mask the unit file by symlinking it to `/dev/null` (analogous to `systemctl mask <name>`). Note that unlike `systemctl mask`, **this will destructively remove any existing unit file** located at `/etc/systemd/system/<unit>`, to ensure that the mask succeeds. The default value is false.
      +- **drop-ins**: A list of unit drop-ins with the following fields:
      +  - **name**: String representing unit's name. Required.
      +  - **content**: Plaintext string representing entire file. Required.
      +
      +
      +**NOTE:** The command field is ignored for all network, netdev, and link units. The systemd-networkd.service unit will be restarted in their place.
      +
      +##### Examples
      +
      +Write a unit to disk, automatically starting it.
      +
      +```yaml
      +#cloud-config
      +
      +coreos:
      +  units:
      +    - name: "docker-redis.service"
      +      command: "start"
      +      content: |
      +        [Unit]
      +        Description=Redis container
      +        Author=Me
      +        After=docker.service
      +
      +        [Service]
      +        Restart=always
      +        ExecStart=/usr/bin/docker start -a redis_server
      +        ExecStop=/usr/bin/docker stop -t 2 redis_server
      +```
      +
      +Add the DOCKER_OPTS environment variable to docker.service.
      +
      +```yaml
      +#cloud-config
      +
      +coreos:
      +  units:
      +    - name: "docker.service"
      +      drop-ins:
      +        - name: "50-insecure-registry.conf"
      +          content: |
      +            [Service]
      +            Environment=DOCKER_OPTS='--insecure-registry="10.0.1.0/24"'
      +```
      +
      +Start the built-in `etcd2` and `fleet` services:
      +
      +```yaml
      +#cloud-config
      +
      +coreos:
      +  units:
      +    - name: "etcd2.service"
      +      command: "start"
      +    - name: "fleet.service"
      +      command: "start"
      +```
      +
      +### ssh_authorized_keys
      +
      +The `ssh_authorized_keys` parameter adds public SSH keys which will be authorized for the `core` user.
      +
      +The keys will be named "coreos-cloudinit" by default.
      +Override this by using the `--ssh-key-name` flag when calling `coreos-cloudinit`.
      +
      +```yaml
      +#cloud-config
      +
      +ssh_authorized_keys:
      +  - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0g+ZTxC7weoIJLUafOgrm+h..."
      +```
      +
      +### hostname
      +
      +The `hostname` parameter defines the system's hostname.
      +This is the local part of a fully-qualified domain name (i.e. `foo` in `foo.example.com`).
      +
      +```yaml
      +#cloud-config
      +
      +hostname: "coreos1"
      +```
      +
      +### users
      +
      +The `users` parameter adds or modifies the specified list of users. Each user is an object which consists of the following fields. Each field is optional and of type string unless otherwise noted.
      +All but the `passwd` and `ssh-authorized-keys` fields will be ignored if the user already exists.
      +
      +- **name**: Required. Login name of user
      +- **gecos**: GECOS comment of user
      +- **passwd**: Hash of the password to use for this user
      +- **homedir**: User's home directory. Defaults to /home/\<name\>
      +- **no-create-home**: Boolean. Skip home directory creation.
      +- **primary-group**: Default group for the user. Defaults to a new group created named after the user.
      +- **groups**: Add user to these additional groups
      +- **no-user-group**: Boolean. Skip default group creation.
      +- **ssh-authorized-keys**: List of public SSH keys to authorize for this user
      +- **coreos-ssh-import-github** [DEPRECATED]: Authorize SSH keys from GitHub user
      +- **coreos-ssh-import-github-users** [DEPRECATED]: Authorize SSH keys from a list of GitHub users
      +- **coreos-ssh-import-url** [DEPRECATED]: Authorize SSH keys imported from a url endpoint.
      +- **system**: Create the user as a system user. No home directory will be created.
      +- **no-log-init**: Boolean. Skip initialization of lastlog and faillog databases.
      +- **shell**: User's login shell.
      +
      +The following fields are not yet implemented:
      +
      +- **inactive**: Deactivate the user upon creation
      +- **lock-passwd**: Boolean. Disable password login for user
      +- **sudo**: Entry to add to /etc/sudoers for user. By default, no sudo access is authorized.
      +- **selinux-user**: Corresponding SELinux user
      +- **ssh-import-id**: Import SSH keys by ID from Launchpad.
      +
      +```yaml
      +#cloud-config
      +
      +users:
      +  - name: "elroy"
      +    passwd: "$6$5s2u6/jR$un0AvWnqilcgaNB3Mkxd5yYv6mTlWfOoCYHZmfi3LDKVltj.E8XNKEcwWm..."
      +    groups:
      +      - "sudo"
      +      - "docker"
      +    ssh-authorized-keys:
      +      - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0g+ZTxC7weoIJLUafOgrm+h..."
      +```
      +
      +#### Generating a password hash
      +
      +If you choose to use a password instead of an SSH key, generating a safe hash is extremely important to the security of your system. Simplified hashes like md5crypt are trivial to crack on modern GPU hardware. Here are a few ways to generate secure hashes:
      +
      +```
      +# On Debian/Ubuntu (via the package "whois")
      +mkpasswd --method=SHA-512 --rounds=4096
      +
      +# OpenSSL (note: this will only make md5crypt.  While better than plantext it should not be considered fully secure)
      +openssl passwd -1
      +
      +# Python (change password and salt values)
      +python -c "import crypt, getpass, pwd; print crypt.crypt('password', '\$6\$SALT\$')"
      +
      +# Perl (change password and salt values)
      +perl -e 'print crypt("password","\$6\$SALT\$") . "\n"'
      +```
      +
      +Using a higher number of rounds will help create more secure passwords, but given enough time, password hashes can be reversed.  On most RPM based distributions there is a tool called mkpasswd available in the `expect` package, but this does not handle "rounds" nor advanced hashing algorithms. 
      +
      +### write_files
      +
      +The `write_files` directive defines a set of files to create on the local filesystem.
      +Each item in the list may have the following keys:
      +
      +- **path**: Absolute location on disk where contents should be written
      +- **content**: Data to write at the provided `path`
      +- **permissions**: Integer representing file permissions, typically in octal notation (i.e. 0644)
      +- **owner**: User and group that should own the file written to disk. This is equivalent to the `<user>:<group>` argument to `chown <user>:<group> <path>`.
      +- **encoding**: Optional. The encoding of the data in content. If not specified this defaults to the yaml document encoding (usually utf-8). Supported encoding types are:
      +    - **b64, base64**: Base64 encoded content
      +    - **gz, gzip**: gzip encoded content, for use with the !!binary tag
      +    - **gz+b64, gz+base64, gzip+b64, gzip+base64**: Base64 encoded gzip content
      +
      +
      +```yaml
      +#cloud-config
      +write_files:
      +  - path: "/etc/resolv.conf"
      +    permissions: "0644"
      +    owner: "root"
      +    content: |
      +      nameserver 8.8.8.8
      +  - path: "/etc/motd"
      +    permissions: "0644"
      +    owner: "root"
      +    content: |
      +      Good news, everyone!
      +  - path: "/tmp/like_this"
      +    permissions: "0644"
      +    owner: "root"
      +    encoding: "gzip"
      +    content: !!binary |
      +      H4sIAKgdh1QAAwtITM5WyK1USMqvUCjPLMlQSMssS1VIya9KzVPIySwszS9SyCpNLwYARQFQ5CcAAAA=
      +  - path: "/tmp/or_like_this"
      +    permissions: "0644"
      +    owner: "root"
      +    encoding: "gzip+base64"
      +    content: |
      +      H4sIAKgdh1QAAwtITM5WyK1USMqvUCjPLMlQSMssS1VIya9KzVPIySwszS9SyCpNLwYARQFQ5CcAAAA=
      +  - path: "/tmp/todolist"
      +    permissions: "0644"
      +    owner: "root"
      +    encoding: "base64"
      +    content: |
      +      UGFjayBteSBib3ggd2l0aCBmaXZlIGRvemVuIGxpcXVvciBqdWdz
      +```
      +
      +### manage_etc_hosts
      +
      +The `manage_etc_hosts` parameter configures the contents of the `/etc/hosts` file, which is used for local name resolution.
      +Currently, the only supported value is "localhost" which will cause your system's hostname
      +to resolve to "127.0.0.1".  This is helpful when the host does not have DNS
      +infrastructure in place to resolve its own hostname, for example, when using Vagrant.
      +
      +```yaml
      +#cloud-config
      +
      +manage_etc_hosts: "localhost"
      +```
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Documentation/config-drive.md b/vendor/github.com/coreos/coreos-cloudinit/Documentation/config-drive.md
      new file mode 100644
      index 00000000..e83a51c0
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/Documentation/config-drive.md
      @@ -0,0 +1,40 @@
      +# Distribution via Config Drive
      +
      +CoreOS supports providing configuration data via [config drive][config-drive]
      +disk images. Currently only providing a single script or cloud config file is
      +supported.
      +
      +[config-drive]: http://docs.openstack.org/user-guide/cli_config_drive.html
      +
      +## Contents and Format
      +
      +The image should be a single FAT or ISO9660 file system with the label
      +`config-2` and the configuration data should be located at
      +`openstack/latest/user_data`.
      +
      +For example, to wrap up a config named `user_data` in a config drive image:
      +
      +```sh
      +mkdir -p /tmp/new-drive/openstack/latest
      +cp user_data /tmp/new-drive/openstack/latest/user_data
      +mkisofs -R -V config-2 -o configdrive.iso /tmp/new-drive
      +rm -r /tmp/new-drive
      +```
      +
      +If on OS X, replace the `mkisofs` invocation with:
      +
      +```sh
      +hdiutil makehybrid -iso -joliet -default-volume-name config-2 -o configdrive.iso /tmp/new-drive
      +```
      +
      +## QEMU virtfs
      +
      +One exception to the above, when using QEMU it is possible to skip creating an
      +image and use a plain directory containing the same contents:
      +
      +```sh
      +qemu-system-x86_64 \
      +    -fsdev local,id=conf,security_model=none,readonly,path=/tmp/new-drive \
      +    -device virtio-9p-pci,fsdev=conf,mount_tag=config-2 \
      +    [usual qemu options here...]
      +```
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Documentation/debian-interfaces.md b/vendor/github.com/coreos/coreos-cloudinit/Documentation/debian-interfaces.md
      new file mode 100644
      index 00000000..d3c64a19
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/Documentation/debian-interfaces.md
      @@ -0,0 +1,27 @@
      +#Debian Interfaces#
      +**WARNING**: This option is EXPERIMENTAL and may change or be removed at any
      +point.  
      +There is basic support for converting from a Debian network configuration to
      +networkd unit files. The -convert-netconf=debian option is used to activate
      +this feature.
      +
      +#convert-netconf#
      +Default: ""  
      +Read the network config provided in cloud-drive and translate it from the
      +specified format into networkd unit files (requires the -from-configdrive
      +flag). Currently only supports "debian" which provides support for a small
      +subset of the [Debian network configuration]
      +(https://wiki.debian.org/NetworkConfiguration). These options include:
      +
      +- interface config methods
      +	- static
      +		- address/netmask
      +		- gateway
      +		- hwaddress
      +		- dns-nameservers
      +	- dhcp
      +		- hwaddress
      +	- manual
      +	- loopback
      +- vlan_raw_device
      +- bond-slaves
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Documentation/vmware-guestinfo.md b/vendor/github.com/coreos/coreos-cloudinit/Documentation/vmware-guestinfo.md
      new file mode 100644
      index 00000000..c0089113
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/Documentation/vmware-guestinfo.md
      @@ -0,0 +1,35 @@
      +# VMWare Guestinfo Interface
      +
      +## Cloud-Config VMWare Guestinfo Variables
      +
      +coreos-cloudinit accepts configuration from the VMware RPC API's *guestinfo*
      +facility. This datasource can be enabled with the `--from-vmware-guestinfo`
      +flag to coreos-cloudinit.
      +
      +The following guestinfo variables are recognized and processed by cloudinit
      +when passed from the hypervisor to the virtual machine at boot time. Note that
      +property names are prefixed with `guestinfo.` in the VMX, e.g., `guestinfo.hostname`.
      +
      +|            guestinfo variable             |              type               |
      +|:--------------------------------------|:--------------------------------|
      +| `hostname`                            | `hostname`                      |
      +| `interface.<n>.name`                  | `string`                        |
      +| `interface.<n>.mac`                   | `MAC address`                   |
      +| `interface.<n>.dhcp`                  | `{"yes", "no"}`                 |
      +| `interface.<n>.role`                  | `{"public", "private"}`         |
      +| `interface.<n>.ip.<m>.address`        | `CIDR IP address`               |
      +| `interface.<n>.route.<l>.gateway`     | `IP address`                    |
      +| `interface.<n>.route.<l>.destination` | `CIDR IP address`               |
      +| `dns.server.<x>`                      | `IP address`                    |
      +| `coreos.config.data`                  | `string`                        |
      +| `coreos.config.data.encoding`         | `{"", "base64", "gzip+base64"}` |
      +| `coreos.config.url`                   | `URL`                           |
      +
      +Note: "n", "m", "l", and "x" are 0-indexed, incrementing integers. The
      +identifier for an `interface` does not correspond to anything outside of this
      +configuration; it serves only to distinguish between multiple `interface`s.
      +
      +The guide to [booting on VMWare][bootvmware] is the starting point for more
      +information about configuring and running CoreOS on VMWare.
      +
      +[bootvmware]: https://github.com/coreos/docs/blob/master/os/booting-on-vmware.md
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/Godeps.json b/vendor/github.com/coreos/coreos-cloudinit/Godeps/Godeps.json
      new file mode 100644
      index 00000000..77c04271
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/Godeps/Godeps.json
      @@ -0,0 +1,46 @@
      +{
      +	"ImportPath": "github.com/coreos/coreos-cloudinit",
      +	"GoVersion": "go1.3.3",
      +	"Packages": [
      +		"./..."
      +	],
      +	"Deps": [
      +		{
      +			"ImportPath": "github.com/cloudsigma/cepgo",
      +			"Rev": "1bfc4895bf5c4d3b599f3f6ee142299488c8739b"
      +		},
      +		{
      +			"ImportPath": "github.com/coreos/go-systemd/dbus",
      +			"Rev": "4fbc5060a317b142e6c7bfbedb65596d5f0ab99b"
      +		},
      +		{
      +			"ImportPath": "github.com/coreos/yaml",
      +			"Rev": "6b16a5714269b2f70720a45406b1babd947a17ef"
      +		},
      +		{
      +			"ImportPath": "github.com/dotcloud/docker/pkg/netlink",
      +			"Comment": "v0.11.1-359-g55d41c3e21e1",
      +			"Rev": "55d41c3e21e1593b944c06196ffb2ac57ab7f653"
      +		},
      +		{
      +			"ImportPath": "github.com/guelfey/go.dbus",
      +			"Rev": "f6a3a2366cc39b8479cadc499d3c735fb10fbdda"
      +		},
      +		{
      +			"ImportPath": "github.com/tarm/goserial",
      +			"Rev": "cdabc8d44e8e84f58f18074ae44337e1f2f375b9"
      +		},
      +		{
      +			"ImportPath": "github.com/sigma/vmw-guestinfo",
      +			"Rev": "95dd4126d6e8b4ef1970b3f3fe2e8cdd470d2903"
      +		},
      +		{
      +			"ImportPath": "github.com/sigma/vmw-ovflib",
      +			"Rev": "56b4f44581cac03d17d8270158bdfd0942ffe790"
      +		},
      +		{
      +			"ImportPath": "github.com/sigma/bdoor",
      +			"Rev": "babf2a4017b020d4ce04e8167076186e82645dd1"
      +		}
      +	]
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/Readme b/vendor/github.com/coreos/coreos-cloudinit/Godeps/Readme
      new file mode 100644
      index 00000000..4cdaa53d
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/Godeps/Readme
      @@ -0,0 +1,5 @@
      +This directory tree is generated automatically by godep.
      +
      +Please do not edit.
      +
      +See https://github.com/tools/godep for more information.
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/LICENSE b/vendor/github.com/coreos/coreos-cloudinit/LICENSE
      new file mode 100644
      index 00000000..e06d2081
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/LICENSE
      @@ -0,0 +1,202 @@
      +Apache License
      +                           Version 2.0, January 2004
      +                        http://www.apache.org/licenses/
      +
      +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
      +
      +   1. Definitions.
      +
      +      "License" shall mean the terms and conditions for use, reproduction,
      +      and distribution as defined by Sections 1 through 9 of this document.
      +
      +      "Licensor" shall mean the copyright owner or entity authorized by
      +      the copyright owner that is granting the License.
      +
      +      "Legal Entity" shall mean the union of the acting entity and all
      +      other entities that control, are controlled by, or are under common
      +      control with that entity. For the purposes of this definition,
      +      "control" means (i) the power, direct or indirect, to cause the
      +      direction or management of such entity, whether by contract or
      +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      +      outstanding shares, or (iii) beneficial ownership of such entity.
      +
      +      "You" (or "Your") shall mean an individual or Legal Entity
      +      exercising permissions granted by this License.
      +
      +      "Source" form shall mean the preferred form for making modifications,
      +      including but not limited to software source code, documentation
      +      source, and configuration files.
      +
      +      "Object" form shall mean any form resulting from mechanical
      +      transformation or translation of a Source form, including but
      +      not limited to compiled object code, generated documentation,
      +      and conversions to other media types.
      +
      +      "Work" shall mean the work of authorship, whether in Source or
      +      Object form, made available under the License, as indicated by a
      +      copyright notice that is included in or attached to the work
      +      (an example is provided in the Appendix below).
      +
      +      "Derivative Works" shall mean any work, whether in Source or Object
      +      form, that is based on (or derived from) the Work and for which the
      +      editorial revisions, annotations, elaborations, or other modifications
      +      represent, as a whole, an original work of authorship. For the purposes
      +      of this License, Derivative Works shall not include works that remain
      +      separable from, or merely link (or bind by name) to the interfaces of,
      +      the Work and Derivative Works thereof.
      +
      +      "Contribution" shall mean any work of authorship, including
      +      the original version of the Work and any modifications or additions
      +      to that Work or Derivative Works thereof, that is intentionally
      +      submitted to Licensor for inclusion in the Work by the copyright owner
      +      or by an individual or Legal Entity authorized to submit on behalf of
      +      the copyright owner. For the purposes of this definition, "submitted"
      +      means any form of electronic, verbal, or written communication sent
      +      to the Licensor or its representatives, including but not limited to
      +      communication on electronic mailing lists, source code control systems,
      +      and issue tracking systems that are managed by, or on behalf of, the
      +      Licensor for the purpose of discussing and improving the Work, but
      +      excluding communication that is conspicuously marked or otherwise
      +      designated in writing by the copyright owner as "Not a Contribution."
      +
      +      "Contributor" shall mean Licensor and any individual or Legal Entity
      +      on behalf of whom a Contribution has been received by Licensor and
      +      subsequently incorporated within the Work.
      +
      +   2. Grant of Copyright License. Subject to the terms and conditions of
      +      this License, each Contributor hereby grants to You a perpetual,
      +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      +      copyright license to reproduce, prepare Derivative Works of,
      +      publicly display, publicly perform, sublicense, and distribute the
      +      Work and such Derivative Works in Source or Object form.
      +
      +   3. Grant of Patent License. Subject to the terms and conditions of
      +      this License, each Contributor hereby grants to You a perpetual,
      +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      +      (except as stated in this section) patent license to make, have made,
      +      use, offer to sell, sell, import, and otherwise transfer the Work,
      +      where such license applies only to those patent claims licensable
      +      by such Contributor that are necessarily infringed by their
      +      Contribution(s) alone or by combination of their Contribution(s)
      +      with the Work to which such Contribution(s) was submitted. If You
      +      institute patent litigation against any entity (including a
      +      cross-claim or counterclaim in a lawsuit) alleging that the Work
      +      or a Contribution incorporated within the Work constitutes direct
      +      or contributory patent infringement, then any patent licenses
      +      granted to You under this License for that Work shall terminate
      +      as of the date such litigation is filed.
      +
      +   4. Redistribution. You may reproduce and distribute copies of the
      +      Work or Derivative Works thereof in any medium, with or without
      +      modifications, and in Source or Object form, provided that You
      +      meet the following conditions:
      +
      +      (a) You must give any other recipients of the Work or
      +          Derivative Works a copy of this License; and
      +
      +      (b) You must cause any modified files to carry prominent notices
      +          stating that You changed the files; and
      +
      +      (c) You must retain, in the Source form of any Derivative Works
      +          that You distribute, all copyright, patent, trademark, and
      +          attribution notices from the Source form of the Work,
      +          excluding those notices that do not pertain to any part of
      +          the Derivative Works; and
      +
      +      (d) If the Work includes a "NOTICE" text file as part of its
      +          distribution, then any Derivative Works that You distribute must
      +          include a readable copy of the attribution notices contained
      +          within such NOTICE file, excluding those notices that do not
      +          pertain to any part of the Derivative Works, in at least one
      +          of the following places: within a NOTICE text file distributed
      +          as part of the Derivative Works; within the Source form or
      +          documentation, if provided along with the Derivative Works; or,
      +          within a display generated by the Derivative Works, if and
      +          wherever such third-party notices normally appear. The contents
      +          of the NOTICE file are for informational purposes only and
      +          do not modify the License. You may add Your own attribution
      +          notices within Derivative Works that You distribute, alongside
      +          or as an addendum to the NOTICE text from the Work, provided
      +          that such additional attribution notices cannot be construed
      +          as modifying the License.
      +
      +      You may add Your own copyright statement to Your modifications and
      +      may provide additional or different license terms and conditions
      +      for use, reproduction, or distribution of Your modifications, or
      +      for any such Derivative Works as a whole, provided Your use,
      +      reproduction, and distribution of the Work otherwise complies with
      +      the conditions stated in this License.
      +
      +   5. Submission of Contributions. Unless You explicitly state otherwise,
      +      any Contribution intentionally submitted for inclusion in the Work
      +      by You to the Licensor shall be under the terms and conditions of
      +      this License, without any additional terms or conditions.
      +      Notwithstanding the above, nothing herein shall supersede or modify
      +      the terms of any separate license agreement you may have executed
      +      with Licensor regarding such Contributions.
      +
      +   6. Trademarks. This License does not grant permission to use the trade
      +      names, trademarks, service marks, or product names of the Licensor,
      +      except as required for reasonable and customary use in describing the
      +      origin of the Work and reproducing the content of the NOTICE file.
      +
      +   7. Disclaimer of Warranty. Unless required by applicable law or
      +      agreed to in writing, Licensor provides the Work (and each
      +      Contributor provides its Contributions) on an "AS IS" BASIS,
      +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      +      implied, including, without limitation, any warranties or conditions
      +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      +      PARTICULAR PURPOSE. You are solely responsible for determining the
      +      appropriateness of using or redistributing the Work and assume any
      +      risks associated with Your exercise of permissions under this License.
      +
      +   8. Limitation of Liability. In no event and under no legal theory,
      +      whether in tort (including negligence), contract, or otherwise,
      +      unless required by applicable law (such as deliberate and grossly
      +      negligent acts) or agreed to in writing, shall any Contributor be
      +      liable to You for damages, including any direct, indirect, special,
      +      incidental, or consequential damages of any character arising as a
      +      result of this License or out of the use or inability to use the
      +      Work (including but not limited to damages for loss of goodwill,
      +      work stoppage, computer failure or malfunction, or any and all
      +      other commercial damages or losses), even if such Contributor
      +      has been advised of the possibility of such damages.
      +
      +   9. Accepting Warranty or Additional Liability. While redistributing
      +      the Work or Derivative Works thereof, You may choose to offer,
      +      and charge a fee for, acceptance of support, warranty, indemnity,
      +      or other liability obligations and/or rights consistent with this
      +      License. However, in accepting such obligations, You may act only
      +      on Your own behalf and on Your sole responsibility, not on behalf
      +      of any other Contributor, and only if You agree to indemnify,
      +      defend, and hold each Contributor harmless for any liability
      +      incurred by, or claims asserted against, such Contributor by reason
      +      of your accepting any such warranty or additional liability.
      +
      +   END OF TERMS AND CONDITIONS
      +
      +   APPENDIX: How to apply the Apache License to your work.
      +
      +      To apply the Apache License to your work, attach the following
      +      boilerplate notice, with the fields enclosed by brackets "{}"
      +      replaced with your own identifying information. (Don't include
      +      the brackets!)  The text should be enclosed in the appropriate
      +      comment syntax for the file format. We also recommend that a
      +      file or class name and description of purpose be included on the
      +      same "printed page" as the copyright notice for easier
      +      identification within third-party archives.
      +
      +   Copyright {yyyy} {name of copyright owner}
      +
      +   Licensed under the Apache License, Version 2.0 (the "License");
      +   you may not use this file except in compliance with the License.
      +   You may obtain a copy of the License at
      +
      +       http://www.apache.org/licenses/LICENSE-2.0
      +
      +   Unless required by applicable law or agreed to in writing, software
      +   distributed under the License is distributed on an "AS IS" BASIS,
      +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +   See the License for the specific language governing permissions and
      +   limitations under the License.
      +
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/MAINTAINERS b/vendor/github.com/coreos/coreos-cloudinit/MAINTAINERS
      new file mode 100644
      index 00000000..a4d60bbf
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/MAINTAINERS
      @@ -0,0 +1,2 @@
      +Alex Crawford <alex.crawford@coreos.com> (@crawford)
      +Jonathan Boulle <jonathan.boulle@coreos.com> (@jonboulle)
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/NOTICE b/vendor/github.com/coreos/coreos-cloudinit/NOTICE
      new file mode 100644
      index 00000000..b39ddfa5
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/NOTICE
      @@ -0,0 +1,5 @@
      +CoreOS Project
      +Copyright 2014 CoreOS, Inc
      +
      +This product includes software developed at CoreOS, Inc.
      +(http://www.coreos.com/).
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/README.md b/vendor/github.com/coreos/coreos-cloudinit/README.md
      new file mode 100644
      index 00000000..9c4f3141
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/README.md
      @@ -0,0 +1,86 @@
      +# coreos-cloudinit [![Build Status](https://travis-ci.org/coreos/coreos-cloudinit.png?branch=master)](https://travis-ci.org/coreos/coreos-cloudinit)
      +
      +coreos-cloudinit enables a user to customize CoreOS machines by providing either a cloud-config document or an executable script through user-data.
      +
      +## Configuration with cloud-config
      +
      +A subset of the [official cloud-config spec][official-cloud-config] is implemented by coreos-cloudinit.
      +Additionally, several [CoreOS-specific options][custom-cloud-config] have been implemented to support interacting with unit files, bootstrapping etcd clusters, and more.
      +All supported cloud-config parameters are [documented here][all-cloud-config]. 
      +
      +[official-cloud-config]: http://cloudinit.readthedocs.org/en/latest/topics/format.html#cloud-config-data
      +[custom-cloud-config]: https://github.com/coreos/coreos-cloudinit/blob/master/Documentation/cloud-config.md#coreos-parameters
      +[all-cloud-config]: https://github.com/coreos/coreos-cloudinit/tree/master/Documentation/cloud-config.md
      +
      +The following is an example cloud-config document:
      +
      +```
      +#cloud-config
      +
      +coreos:
      +    units:
      +      - name: etcd.service
      +        command: start
      +
      +users:
      +  - name: core
      +    passwd: $1$allJZawX$00S5T756I5PGdQga5qhqv1
      +
      +write_files:
      +  - path: /etc/resolv.conf
      +    content: |
      +        nameserver 192.0.2.2
      +        nameserver 192.0.2.3
      +```
      +
      +## Executing a Script
      +
      +coreos-cloudinit supports executing user-data as a script instead of parsing it as a cloud-config document.
      +Make sure the first line of your user-data is a shebang and coreos-cloudinit will attempt to execute it:
      +
      +```
      +#!/bin/bash
      +
      +echo 'Hello, world!'
      +```
      +
      +## user-data Field Substitution
      +
      +coreos-cloudinit will replace the following set of tokens in your user-data with system-generated values.
      +
      +| Token         | Description |
      +| ------------- | ----------- |
      +| $public_ipv4  | Public IPv4 address of machine |
      +| $private_ipv4 | Private IPv4 address of machine |
      +
      +These values are determined by CoreOS based on the given provider on which your machine is running.
      +Read more about provider-specific functionality in the [CoreOS OEM documentation][oem-doc].
      +
      +[oem-doc]: https://coreos.com/docs/sdk-distributors/distributors/notes-for-distributors/
      +
      +For example, submitting the following user-data...
      +
      +```
      +#cloud-config
      +coreos:
      +    etcd:
      +        addr: $public_ipv4:4001
      +        peer-addr: $private_ipv4:7001
      +```
      +
      +...will result in this cloud-config document being executed:
      +
      +```
      +#cloud-config
      +coreos:
      +    etcd:
      +        addr: 203.0.113.29:4001
      +        peer-addr: 192.0.2.13:7001
      +```
      +
      +## Bugs
      +
      +Please use the [CoreOS issue tracker][bugs] to report all bugs, issues, and feature requests.
      +
      +[bugs]: https://github.com/coreos/bugs/issues/new?labels=component/cloud-init
      +
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/build b/vendor/github.com/coreos/coreos-cloudinit/build
      new file mode 100755
      index 00000000..36865afc
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/build
      @@ -0,0 +1,17 @@
      +#!/bin/bash -e
      +
      +NAME="coreos-cloudinit"
      +ORG_PATH="github.com/coreos"
      +REPO_PATH="${ORG_PATH}/${NAME}"
      +VERSION=$(git describe --dirty --tags)
      +GLDFLAGS="-X main.version \"${VERSION}\""
      +
      +if [ ! -h gopath/src/${REPO_PATH} ]; then
      +	mkdir -p gopath/src/${ORG_PATH}
      +	ln -s ../../../.. gopath/src/${REPO_PATH} || exit 255
      +fi
      +
      +export GOBIN=${PWD}/bin
      +export GOPATH=${PWD}/gopath
      +
      +go build -ldflags "${GLDFLAGS}" -o ${GOBIN}/${NAME} ${REPO_PATH}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/config/config.go b/vendor/github.com/coreos/coreos-cloudinit/config/config.go
      index 94dfe51f..ede380b4 100644
      --- a/vendor/github.com/coreos/coreos-cloudinit/config/config.go
      +++ b/vendor/github.com/coreos/coreos-cloudinit/config/config.go
      @@ -21,7 +21,7 @@ import (
       	"strings"
       	"unicode"
       
      -	"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml"
      +	"github.com/coreos/yaml"
       )
       
       // CloudConfig encapsulates the entire cloud-config configuration file and maps
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/config/validate/validate.go b/vendor/github.com/coreos/coreos-cloudinit/config/validate/validate.go
      index d8bd1e5d..cf668cf7 100644
      --- a/vendor/github.com/coreos/coreos-cloudinit/config/validate/validate.go
      +++ b/vendor/github.com/coreos/coreos-cloudinit/config/validate/validate.go
      @@ -23,7 +23,7 @@ import (
       
       	"github.com/coreos/coreos-cloudinit/config"
       
      -	"github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml"
      +	"github.com/coreos/yaml"
       )
       
       var (
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/coreos-cloudinit.go b/vendor/github.com/coreos/coreos-cloudinit/coreos-cloudinit.go
      new file mode 100644
      index 00000000..76b3d63b
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/coreos-cloudinit.go
      @@ -0,0 +1,428 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package main
      +
      +import (
      +	"bytes"
      +	"compress/gzip"
      +	"flag"
      +	"fmt"
      +	"io/ioutil"
      +	"log"
      +	"os"
      +	"runtime"
      +	"sync"
      +	"time"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +	"github.com/coreos/coreos-cloudinit/config/validate"
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +	"github.com/coreos/coreos-cloudinit/datasource/configdrive"
      +	"github.com/coreos/coreos-cloudinit/datasource/file"
      +	"github.com/coreos/coreos-cloudinit/datasource/metadata/cloudsigma"
      +	"github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean"
      +	"github.com/coreos/coreos-cloudinit/datasource/metadata/ec2"
      +	"github.com/coreos/coreos-cloudinit/datasource/metadata/packet"
      +	"github.com/coreos/coreos-cloudinit/datasource/proc_cmdline"
      +	"github.com/coreos/coreos-cloudinit/datasource/url"
      +	"github.com/coreos/coreos-cloudinit/datasource/vmware"
      +	"github.com/coreos/coreos-cloudinit/datasource/waagent"
      +	"github.com/coreos/coreos-cloudinit/initialize"
      +	"github.com/coreos/coreos-cloudinit/network"
      +	"github.com/coreos/coreos-cloudinit/pkg"
      +	"github.com/coreos/coreos-cloudinit/system"
      +)
      +
      +const (
      +	datasourceInterval    = 100 * time.Millisecond
      +	datasourceMaxInterval = 30 * time.Second
      +	datasourceTimeout     = 5 * time.Minute
      +)
      +
      +var (
      +	flags = struct {
      +		printVersion  bool
      +		ignoreFailure bool
      +		sources       struct {
      +			file                        string
      +			configDrive                 string
      +			waagent                     string
      +			metadataService             bool
      +			ec2MetadataService          string
      +			cloudSigmaMetadataService   bool
      +			digitalOceanMetadataService string
      +			packetMetadataService       string
      +			url                         string
      +			procCmdLine                 bool
      +			vmware                      bool
      +			ovfEnv                      string
      +		}
      +		convertNetconf string
      +		workspace      string
      +		sshKeyName     string
      +		oem            string
      +		validate       bool
      +	}{}
      +	version = "was not built properly"
      +)
      +
      +func init() {
      +	flag.BoolVar(&flags.printVersion, "version", false, "Print the version and exit")
      +	flag.BoolVar(&flags.ignoreFailure, "ignore-failure", false, "Exits with 0 status in the event of malformed input from user-data")
      +	flag.StringVar(&flags.sources.file, "from-file", "", "Read user-data from provided file")
      +	flag.StringVar(&flags.sources.configDrive, "from-configdrive", "", "Read data from provided cloud-drive directory")
      +	flag.StringVar(&flags.sources.waagent, "from-waagent", "", "Read data from provided waagent directory")
      +	flag.BoolVar(&flags.sources.metadataService, "from-metadata-service", false, "[DEPRECATED - Use -from-ec2-metadata] Download data from metadata service")
      +	flag.StringVar(&flags.sources.ec2MetadataService, "from-ec2-metadata", "", "Download EC2 data from the provided url")
      +	flag.BoolVar(&flags.sources.cloudSigmaMetadataService, "from-cloudsigma-metadata", false, "Download data from CloudSigma server context")
      +	flag.StringVar(&flags.sources.digitalOceanMetadataService, "from-digitalocean-metadata", "", "Download DigitalOcean data from the provided url")
      +	flag.StringVar(&flags.sources.packetMetadataService, "from-packet-metadata", "", "Download Packet data from metadata service")
      +	flag.StringVar(&flags.sources.url, "from-url", "", "Download user-data from provided url")
      +	flag.BoolVar(&flags.sources.procCmdLine, "from-proc-cmdline", false, fmt.Sprintf("Parse %s for '%s=<url>', using the cloud-config served by an HTTP GET to <url>", proc_cmdline.ProcCmdlineLocation, proc_cmdline.ProcCmdlineCloudConfigFlag))
      +	flag.BoolVar(&flags.sources.vmware, "from-vmware-guestinfo", false, "Read data from VMware guestinfo")
      +	flag.StringVar(&flags.sources.ovfEnv, "from-vmware-ovf-env", "", "Read data from OVF Environment")
      +	flag.StringVar(&flags.oem, "oem", "", "Use the settings specific to the provided OEM")
      +	flag.StringVar(&flags.convertNetconf, "convert-netconf", "", "Read the network config provided in cloud-drive and translate it from the specified format into networkd unit files")
      +	flag.StringVar(&flags.workspace, "workspace", "/var/lib/coreos-cloudinit", "Base directory coreos-cloudinit should use to store data")
      +	flag.StringVar(&flags.sshKeyName, "ssh-key-name", initialize.DefaultSSHKeyName, "Add SSH keys to the system with the given name")
      +	flag.BoolVar(&flags.validate, "validate", false, "[EXPERIMENTAL] Validate the user-data but do not apply it to the system")
      +}
      +
      +type oemConfig map[string]string
      +
      +var (
      +	oemConfigs = map[string]oemConfig{
      +		"digitalocean": oemConfig{
      +			"from-digitalocean-metadata": "http://169.254.169.254/",
      +			"convert-netconf":            "digitalocean",
      +		},
      +		"ec2-compat": oemConfig{
      +			"from-ec2-metadata": "http://169.254.169.254/",
      +			"from-configdrive":  "/media/configdrive",
      +		},
      +		"rackspace-onmetal": oemConfig{
      +			"from-configdrive": "/media/configdrive",
      +			"convert-netconf":  "debian",
      +		},
      +		"azure": oemConfig{
      +			"from-waagent": "/var/lib/waagent",
      +		},
      +		"cloudsigma": oemConfig{
      +			"from-cloudsigma-metadata": "true",
      +		},
      +		"packet": oemConfig{
      +			"from-packet-metadata": "https://metadata.packet.net/",
      +		},
      +		"vmware": oemConfig{
      +			"from-vmware-guestinfo": "true",
      +			"convert-netconf":       "vmware",
      +		},
      +	}
      +)
      +
      +func main() {
      +	failure := false
      +
      +	// Conservative Go 1.5 upgrade strategy:
      +	// keep GOMAXPROCS' default at 1 for now.
      +	if os.Getenv("GOMAXPROCS") == "" {
      +		runtime.GOMAXPROCS(1)
      +	}
      +
      +	flag.Parse()
      +
      +	if c, ok := oemConfigs[flags.oem]; ok {
      +		for k, v := range c {
      +			flag.Set(k, v)
      +		}
      +	} else if flags.oem != "" {
      +		oems := make([]string, 0, len(oemConfigs))
      +		for k := range oemConfigs {
      +			oems = append(oems, k)
      +		}
      +		fmt.Printf("Invalid option to -oem: %q. Supported options: %q\n", flags.oem, oems)
      +		os.Exit(2)
      +	}
      +
      +	if flags.printVersion == true {
      +		fmt.Printf("coreos-cloudinit %s\n", version)
      +		os.Exit(0)
      +	}
      +
      +	switch flags.convertNetconf {
      +	case "":
      +	case "debian":
      +	case "digitalocean":
      +	case "packet":
      +	case "vmware":
      +	default:
      +		fmt.Printf("Invalid option to -convert-netconf: '%s'. Supported options: 'debian, digitalocean, packet, vmware'\n", flags.convertNetconf)
      +		os.Exit(2)
      +	}
      +
      +	dss := getDatasources()
      +	if len(dss) == 0 {
      +		fmt.Println("Provide at least one of --from-file, --from-configdrive, --from-ec2-metadata, --from-cloudsigma-metadata, --from-packet-metadata, --from-digitalocean-metadata, --from-vmware-guestinfo, --from-waagent, --from-url or --from-proc-cmdline")
      +		os.Exit(2)
      +	}
      +
      +	ds := selectDatasource(dss)
      +	if ds == nil {
      +		log.Println("No datasources available in time")
      +		os.Exit(1)
      +	}
      +
      +	log.Printf("Fetching user-data from datasource of type %q\n", ds.Type())
      +	userdataBytes, err := ds.FetchUserdata()
      +	if err != nil {
      +		log.Printf("Failed fetching user-data from datasource: %v. Continuing...\n", err)
      +		failure = true
      +	}
      +	userdataBytes, err = decompressIfGzip(userdataBytes)
      +	if err != nil {
      +		log.Printf("Failed decompressing user-data from datasource: %v. Continuing...\n", err)
      +		failure = true
      +	}
      +
      +	if report, err := validate.Validate(userdataBytes); err == nil {
      +		ret := 0
      +		for _, e := range report.Entries() {
      +			log.Println(e)
      +			ret = 1
      +		}
      +		if flags.validate {
      +			os.Exit(ret)
      +		}
      +	} else {
      +		log.Printf("Failed while validating user_data (%q)\n", err)
      +		if flags.validate {
      +			os.Exit(1)
      +		}
      +	}
      +
      +	log.Printf("Fetching meta-data from datasource of type %q\n", ds.Type())
      +	metadata, err := ds.FetchMetadata()
      +	if err != nil {
      +		log.Printf("Failed fetching meta-data from datasource: %v\n", err)
      +		os.Exit(1)
      +	}
      +
      +	// Apply environment to user-data
      +	env := initialize.NewEnvironment("/", ds.ConfigRoot(), flags.workspace, flags.sshKeyName, metadata)
      +	userdata := env.Apply(string(userdataBytes))
      +
      +	var ccu *config.CloudConfig
      +	var script *config.Script
      +	switch ud, err := initialize.ParseUserData(userdata); err {
      +	case initialize.ErrIgnitionConfig:
      +		fmt.Printf("Detected an Ignition config. Exiting...")
      +		os.Exit(0)
      +	case nil:
      +		switch t := ud.(type) {
      +		case *config.CloudConfig:
      +			ccu = t
      +		case *config.Script:
      +			script = t
      +		}
      +	default:
      +		fmt.Printf("Failed to parse user-data: %v\nContinuing...\n", err)
      +		failure = true
      +	}
      +
      +	log.Println("Merging cloud-config from meta-data and user-data")
      +	cc := mergeConfigs(ccu, metadata)
      +
      +	var ifaces []network.InterfaceGenerator
      +	if flags.convertNetconf != "" {
      +		var err error
      +		switch flags.convertNetconf {
      +		case "debian":
      +			ifaces, err = network.ProcessDebianNetconf(metadata.NetworkConfig.([]byte))
      +		case "digitalocean":
      +			ifaces, err = network.ProcessDigitalOceanNetconf(metadata.NetworkConfig.(digitalocean.Metadata))
      +		case "packet":
      +			ifaces, err = network.ProcessPacketNetconf(metadata.NetworkConfig.(packet.NetworkData))
      +		case "vmware":
      +			ifaces, err = network.ProcessVMwareNetconf(metadata.NetworkConfig.(map[string]string))
      +		default:
      +			err = fmt.Errorf("Unsupported network config format %q", flags.convertNetconf)
      +		}
      +		if err != nil {
      +			log.Printf("Failed to generate interfaces: %v\n", err)
      +			os.Exit(1)
      +		}
      +	}
      +
      +	if err = initialize.Apply(cc, ifaces, env); err != nil {
      +		log.Printf("Failed to apply cloud-config: %v\n", err)
      +		os.Exit(1)
      +	}
      +
      +	if script != nil {
      +		if err = runScript(*script, env); err != nil {
      +			log.Printf("Failed to run script: %v\n", err)
      +			os.Exit(1)
      +		}
      +	}
      +
      +	if failure && !flags.ignoreFailure {
      +		os.Exit(1)
      +	}
      +}
      +
      +// mergeConfigs merges certain options from md (meta-data from the datasource)
      +// onto cc (a CloudConfig derived from user-data), if they are not already set
      +// on cc (i.e. user-data always takes precedence)
      +func mergeConfigs(cc *config.CloudConfig, md datasource.Metadata) (out config.CloudConfig) {
      +	if cc != nil {
      +		out = *cc
      +	}
      +
      +	if md.Hostname != "" {
      +		if out.Hostname != "" {
      +			log.Printf("Warning: user-data hostname (%s) overrides metadata hostname (%s)\n", out.Hostname, md.Hostname)
      +		} else {
      +			out.Hostname = md.Hostname
      +		}
      +	}
      +	for _, key := range md.SSHPublicKeys {
      +		out.SSHAuthorizedKeys = append(out.SSHAuthorizedKeys, key)
      +	}
      +	return
      +}
      +
      +// getDatasources creates a slice of possible Datasources for cloudinit based
      +// on the different source command-line flags.
      +func getDatasources() []datasource.Datasource {
      +	dss := make([]datasource.Datasource, 0, 5)
      +	if flags.sources.file != "" {
      +		dss = append(dss, file.NewDatasource(flags.sources.file))
      +	}
      +	if flags.sources.url != "" {
      +		dss = append(dss, url.NewDatasource(flags.sources.url))
      +	}
      +	if flags.sources.configDrive != "" {
      +		dss = append(dss, configdrive.NewDatasource(flags.sources.configDrive))
      +	}
      +	if flags.sources.metadataService {
      +		dss = append(dss, ec2.NewDatasource(ec2.DefaultAddress))
      +	}
      +	if flags.sources.ec2MetadataService != "" {
      +		dss = append(dss, ec2.NewDatasource(flags.sources.ec2MetadataService))
      +	}
      +	if flags.sources.cloudSigmaMetadataService {
      +		dss = append(dss, cloudsigma.NewServerContextService())
      +	}
      +	if flags.sources.digitalOceanMetadataService != "" {
      +		dss = append(dss, digitalocean.NewDatasource(flags.sources.digitalOceanMetadataService))
      +	}
      +	if flags.sources.waagent != "" {
      +		dss = append(dss, waagent.NewDatasource(flags.sources.waagent))
      +	}
      +	if flags.sources.packetMetadataService != "" {
      +		dss = append(dss, packet.NewDatasource(flags.sources.packetMetadataService))
      +	}
      +	if flags.sources.procCmdLine {
      +		dss = append(dss, proc_cmdline.NewDatasource())
      +	}
      +	if flags.sources.vmware {
      +		dss = append(dss, vmware.NewDatasource(""))
      +	}
      +	if flags.sources.ovfEnv != "" {
      +		dss = append(dss, vmware.NewDatasource(flags.sources.ovfEnv))
      +	}
      +	return dss
      +}
      +
      +// selectDatasource attempts to choose a valid Datasource to use based on its
      +// current availability. The first Datasource to report to be available is
      +// returned. Datasources will be retried if possible if they are not
      +// immediately available. If all Datasources are permanently unavailable or
      +// datasourceTimeout is reached before one becomes available, nil is returned.
      +func selectDatasource(sources []datasource.Datasource) datasource.Datasource {
      +	ds := make(chan datasource.Datasource)
      +	stop := make(chan struct{})
      +	var wg sync.WaitGroup
      +
      +	for _, s := range sources {
      +		wg.Add(1)
      +		go func(s datasource.Datasource) {
      +			defer wg.Done()
      +
      +			duration := datasourceInterval
      +			for {
      +				log.Printf("Checking availability of %q\n", s.Type())
      +				if s.IsAvailable() {
      +					ds <- s
      +					return
      +				} else if !s.AvailabilityChanges() {
      +					return
      +				}
      +				select {
      +				case <-stop:
      +					return
      +				case <-time.After(duration):
      +					duration = pkg.ExpBackoff(duration, datasourceMaxInterval)
      +				}
      +			}
      +		}(s)
      +	}
      +
      +	done := make(chan struct{})
      +	go func() {
      +		wg.Wait()
      +		close(done)
      +	}()
      +
      +	var s datasource.Datasource
      +	select {
      +	case s = <-ds:
      +	case <-done:
      +	case <-time.After(datasourceTimeout):
      +	}
      +
      +	close(stop)
      +	return s
      +}
      +
      +// TODO(jonboulle): this should probably be refactored and moved into a different module
      +func runScript(script config.Script, env *initialize.Environment) error {
      +	err := initialize.PrepWorkspace(env.Workspace())
      +	if err != nil {
      +		log.Printf("Failed preparing workspace: %v\n", err)
      +		return err
      +	}
      +	path, err := initialize.PersistScriptInWorkspace(script, env.Workspace())
      +	if err == nil {
      +		var name string
      +		name, err = system.ExecuteScript(path)
      +		initialize.PersistUnitNameInWorkspace(name, env.Workspace())
      +	}
      +	return err
      +}
      +
      +const gzipMagicBytes = "\x1f\x8b"
      +
      +func decompressIfGzip(userdataBytes []byte) ([]byte, error) {
      +	if !bytes.HasPrefix(userdataBytes, []byte(gzipMagicBytes)) {
      +		return userdataBytes, nil
      +	}
      +	gzr, err := gzip.NewReader(bytes.NewReader(userdataBytes))
      +	if err != nil {
      +		return nil, err
      +	}
      +	defer gzr.Close()
      +	return ioutil.ReadAll(gzr)
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/coreos-cloudinit_test.go b/vendor/github.com/coreos/coreos-cloudinit/coreos-cloudinit_test.go
      new file mode 100644
      index 00000000..cd5c6da4
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/coreos-cloudinit_test.go
      @@ -0,0 +1,147 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package main
      +
      +import (
      +	"bytes"
      +	"encoding/base64"
      +	"errors"
      +	"reflect"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +)
      +
      +func TestMergeConfigs(t *testing.T) {
      +	tests := []struct {
      +		cc *config.CloudConfig
      +		md datasource.Metadata
      +
      +		out config.CloudConfig
      +	}{
      +		{
      +			// If md is empty and cc is nil, result should be empty
      +			out: config.CloudConfig{},
      +		},
      +		{
      +			// If md and cc are empty, result should be empty
      +			cc:  &config.CloudConfig{},
      +			out: config.CloudConfig{},
      +		},
      +		{
      +			// If cc is empty, cc should be returned unchanged
      +			cc:  &config.CloudConfig{SSHAuthorizedKeys: []string{"abc", "def"}, Hostname: "cc-host"},
      +			out: config.CloudConfig{SSHAuthorizedKeys: []string{"abc", "def"}, Hostname: "cc-host"},
      +		},
      +		{
      +			// If cc is empty, cc should be returned unchanged(overridden)
      +			cc:  &config.CloudConfig{},
      +			md:  datasource.Metadata{Hostname: "md-host", SSHPublicKeys: map[string]string{"key": "ghi"}},
      +			out: config.CloudConfig{SSHAuthorizedKeys: []string{"ghi"}, Hostname: "md-host"},
      +		},
      +		{
      +			// If cc is nil, cc should be returned unchanged(overridden)
      +			md:  datasource.Metadata{Hostname: "md-host", SSHPublicKeys: map[string]string{"key": "ghi"}},
      +			out: config.CloudConfig{SSHAuthorizedKeys: []string{"ghi"}, Hostname: "md-host"},
      +		},
      +		{
      +			// user-data should override completely in the case of conflicts
      +			cc:  &config.CloudConfig{SSHAuthorizedKeys: []string{"abc", "def"}, Hostname: "cc-host"},
      +			md:  datasource.Metadata{Hostname: "md-host"},
      +			out: config.CloudConfig{SSHAuthorizedKeys: []string{"abc", "def"}, Hostname: "cc-host"},
      +		},
      +		{
      +			// Mixed merge should succeed
      +			cc:  &config.CloudConfig{SSHAuthorizedKeys: []string{"abc", "def"}, Hostname: "cc-host"},
      +			md:  datasource.Metadata{Hostname: "md-host", SSHPublicKeys: map[string]string{"key": "ghi"}},
      +			out: config.CloudConfig{SSHAuthorizedKeys: []string{"abc", "def", "ghi"}, Hostname: "cc-host"},
      +		},
      +		{
      +			// Completely non-conflicting merge should be fine
      +			cc:  &config.CloudConfig{Hostname: "cc-host"},
      +			md:  datasource.Metadata{SSHPublicKeys: map[string]string{"zaphod": "beeblebrox"}},
      +			out: config.CloudConfig{Hostname: "cc-host", SSHAuthorizedKeys: []string{"beeblebrox"}},
      +		},
      +		{
      +			// Non-mergeable settings in user-data should not be affected
      +			cc:  &config.CloudConfig{Hostname: "cc-host", ManageEtcHosts: config.EtcHosts("lolz")},
      +			md:  datasource.Metadata{Hostname: "md-host"},
      +			out: config.CloudConfig{Hostname: "cc-host", ManageEtcHosts: config.EtcHosts("lolz")},
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		out := mergeConfigs(tt.cc, tt.md)
      +		if !reflect.DeepEqual(tt.out, out) {
      +			t.Errorf("bad config (%d): want %#v, got %#v", i, tt.out, out)
      +		}
      +	}
      +}
      +
      +func mustDecode(in string) []byte {
      +	out, err := base64.StdEncoding.DecodeString(in)
      +	if err != nil {
      +		panic(err)
      +	}
      +	return out
      +}
      +
      +func TestDecompressIfGzip(t *testing.T) {
      +	tests := []struct {
      +		in []byte
      +
      +		out []byte
      +		err error
      +	}{
      +		{
      +			in: nil,
      +
      +			out: nil,
      +			err: nil,
      +		},
      +		{
      +			in: []byte{},
      +
      +			out: []byte{},
      +			err: nil,
      +		},
      +		{
      +			in: mustDecode("H4sIAJWV/VUAA1NOzskvTdFNzs9Ly0wHABt6mQENAAAA"),
      +
      +			out: []byte("#cloud-config"),
      +			err: nil,
      +		},
      +		{
      +			in: []byte("#cloud-config"),
      +
      +			out: []byte("#cloud-config"),
      +			err: nil,
      +		},
      +		{
      +			in: mustDecode("H4sCORRUPT=="),
      +
      +			out: nil,
      +			err: errors.New("any error"),
      +		},
      +	}
      +	for i, tt := range tests {
      +		out, err := decompressIfGzip(tt.in)
      +		if !bytes.Equal(out, tt.out) || (tt.err != nil && err == nil) {
      +			t.Errorf("bad gzip (%d): want (%s, %#v), got (%s, %#v)", i, string(tt.out), tt.err, string(out), err)
      +		}
      +	}
      +
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/cover b/vendor/github.com/coreos/coreos-cloudinit/cover
      new file mode 100755
      index 00000000..d8db5731
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/cover
      @@ -0,0 +1,27 @@
      +#!/bin/bash -e
      +#
      +# Generate coverage HTML for a package
      +# e.g. PKG=./initialize ./cover
      +#
      +
      +if [ -z "$PKG" ]; then
      +	echo "cover only works with a single package, sorry"
      +	exit 255
      +fi
      +
      +COVEROUT="coverage"
      +
      +if ! [ -d "$COVEROUT" ]; then
      +	mkdir "$COVEROUT"
      +fi
      +
      +# strip out slashes and dots
      +COVERPKG=${PKG//\//}
      +COVERPKG=${COVERPKG//./}
      +
      +# generate arg for "go test"
      +export COVER="-coverprofile ${COVEROUT}/${COVERPKG}.out"
      +
      +source ./test
      +
      +go tool cover -html=${COVEROUT}/${COVERPKG}.out
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/configdrive/configdrive.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/configdrive/configdrive.go
      new file mode 100644
      index 00000000..6c65bfcc
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/configdrive/configdrive.go
      @@ -0,0 +1,102 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package configdrive
      +
      +import (
      +	"encoding/json"
      +	"io/ioutil"
      +	"log"
      +	"os"
      +	"path"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +)
      +
      +const (
      +	openstackApiVersion = "latest"
      +)
      +
      +type configDrive struct {
      +	root     string
      +	readFile func(filename string) ([]byte, error)
      +}
      +
      +func NewDatasource(root string) *configDrive {
      +	return &configDrive{root, ioutil.ReadFile}
      +}
      +
      +func (cd *configDrive) IsAvailable() bool {
      +	_, err := os.Stat(cd.root)
      +	return !os.IsNotExist(err)
      +}
      +
      +func (cd *configDrive) AvailabilityChanges() bool {
      +	return true
      +}
      +
      +func (cd *configDrive) ConfigRoot() string {
      +	return cd.openstackRoot()
      +}
      +
      +func (cd *configDrive) FetchMetadata() (metadata datasource.Metadata, err error) {
      +	var data []byte
      +	var m struct {
      +		SSHAuthorizedKeyMap map[string]string `json:"public_keys"`
      +		Hostname            string            `json:"hostname"`
      +		NetworkConfig       struct {
      +			ContentPath string `json:"content_path"`
      +		} `json:"network_config"`
      +	}
      +
      +	if data, err = cd.tryReadFile(path.Join(cd.openstackVersionRoot(), "meta_data.json")); err != nil || len(data) == 0 {
      +		return
      +	}
      +	if err = json.Unmarshal([]byte(data), &m); err != nil {
      +		return
      +	}
      +
      +	metadata.SSHPublicKeys = m.SSHAuthorizedKeyMap
      +	metadata.Hostname = m.Hostname
      +	if m.NetworkConfig.ContentPath != "" {
      +		metadata.NetworkConfig, err = cd.tryReadFile(path.Join(cd.openstackRoot(), m.NetworkConfig.ContentPath))
      +	}
      +
      +	return
      +}
      +
      +func (cd *configDrive) FetchUserdata() ([]byte, error) {
      +	return cd.tryReadFile(path.Join(cd.openstackVersionRoot(), "user_data"))
      +}
      +
      +func (cd *configDrive) Type() string {
      +	return "cloud-drive"
      +}
      +
      +func (cd *configDrive) openstackRoot() string {
      +	return path.Join(cd.root, "openstack")
      +}
      +
      +func (cd *configDrive) openstackVersionRoot() string {
      +	return path.Join(cd.openstackRoot(), openstackApiVersion)
      +}
      +
      +func (cd *configDrive) tryReadFile(filename string) ([]byte, error) {
      +	log.Printf("Attempting to read from %q\n", filename)
      +	data, err := cd.readFile(filename)
      +	if os.IsNotExist(err) {
      +		err = nil
      +	}
      +	return data, err
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/configdrive/configdrive_test.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/configdrive/configdrive_test.go
      new file mode 100644
      index 00000000..e40a8fd5
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/configdrive/configdrive_test.go
      @@ -0,0 +1,145 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package configdrive
      +
      +import (
      +	"reflect"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +	"github.com/coreos/coreos-cloudinit/datasource/test"
      +)
      +
      +func TestFetchMetadata(t *testing.T) {
      +	for _, tt := range []struct {
      +		root  string
      +		files test.MockFilesystem
      +
      +		metadata datasource.Metadata
      +	}{
      +		{
      +			root:  "/",
      +			files: test.NewMockFilesystem(test.File{Path: "/openstack/latest/meta_data.json", Contents: ""}),
      +		},
      +		{
      +			root:  "/",
      +			files: test.NewMockFilesystem(test.File{Path: "/openstack/latest/meta_data.json", Contents: `{"ignore": "me"}`}),
      +		},
      +		{
      +			root:     "/",
      +			files:    test.NewMockFilesystem(test.File{Path: "/openstack/latest/meta_data.json", Contents: `{"hostname": "host"}`}),
      +			metadata: datasource.Metadata{Hostname: "host"},
      +		},
      +		{
      +			root: "/media/configdrive",
      +			files: test.NewMockFilesystem(test.File{Path: "/media/configdrive/openstack/latest/meta_data.json", Contents: `{"hostname": "host", "network_config": {"content_path": "config_file.json"}, "public_keys":{"1": "key1", "2": "key2"}}`},
      +				test.File{Path: "/media/configdrive/openstack/config_file.json", Contents: "make it work"},
      +			),
      +			metadata: datasource.Metadata{
      +				Hostname:      "host",
      +				NetworkConfig: []byte("make it work"),
      +				SSHPublicKeys: map[string]string{
      +					"1": "key1",
      +					"2": "key2",
      +				},
      +			},
      +		},
      +	} {
      +		cd := configDrive{tt.root, tt.files.ReadFile}
      +		metadata, err := cd.FetchMetadata()
      +		if err != nil {
      +			t.Fatalf("bad error for %+v: want %v, got %q", tt, nil, err)
      +		}
      +		if !reflect.DeepEqual(tt.metadata, metadata) {
      +			t.Fatalf("bad metadata for %+v: want %#v, got %#v", tt, tt.metadata, metadata)
      +		}
      +	}
      +}
      +
      +func TestFetchUserdata(t *testing.T) {
      +	for _, tt := range []struct {
      +		root  string
      +		files test.MockFilesystem
      +
      +		userdata string
      +	}{
      +		{
      +			"/",
      +			test.NewMockFilesystem(),
      +			"",
      +		},
      +		{
      +			"/",
      +			test.NewMockFilesystem(test.File{Path: "/openstack/latest/user_data", Contents: "userdata"}),
      +			"userdata",
      +		},
      +		{
      +			"/media/configdrive",
      +			test.NewMockFilesystem(test.File{Path: "/media/configdrive/openstack/latest/user_data", Contents: "userdata"}),
      +			"userdata",
      +		},
      +	} {
      +		cd := configDrive{tt.root, tt.files.ReadFile}
      +		userdata, err := cd.FetchUserdata()
      +		if err != nil {
      +			t.Fatalf("bad error for %+v: want %v, got %q", tt, nil, err)
      +		}
      +		if string(userdata) != tt.userdata {
      +			t.Fatalf("bad userdata for %+v: want %q, got %q", tt, tt.userdata, userdata)
      +		}
      +	}
      +}
      +
      +func TestConfigRoot(t *testing.T) {
      +	for _, tt := range []struct {
      +		root       string
      +		configRoot string
      +	}{
      +		{
      +			"/",
      +			"/openstack",
      +		},
      +		{
      +			"/media/configdrive",
      +			"/media/configdrive/openstack",
      +		},
      +	} {
      +		cd := configDrive{tt.root, nil}
      +		if configRoot := cd.ConfigRoot(); configRoot != tt.configRoot {
      +			t.Fatalf("bad config root for %q: want %q, got %q", tt, tt.configRoot, configRoot)
      +		}
      +	}
      +}
      +
      +func TestNewDatasource(t *testing.T) {
      +	for _, tt := range []struct {
      +		root       string
      +		expectRoot string
      +	}{
      +		{
      +			root:       "",
      +			expectRoot: "",
      +		},
      +		{
      +			root:       "/media/configdrive",
      +			expectRoot: "/media/configdrive",
      +		},
      +	} {
      +		service := NewDatasource(tt.root)
      +		if service.root != tt.expectRoot {
      +			t.Fatalf("bad root (%q): want %q, got %q", tt.root, tt.expectRoot, service.root)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/datasource.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/datasource.go
      new file mode 100644
      index 00000000..e459303c
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/datasource.go
      @@ -0,0 +1,38 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package datasource
      +
      +import (
      +	"net"
      +)
      +
      +type Datasource interface {
      +	IsAvailable() bool
      +	AvailabilityChanges() bool
      +	ConfigRoot() string
      +	FetchMetadata() (Metadata, error)
      +	FetchUserdata() ([]byte, error)
      +	Type() string
      +}
      +
      +type Metadata struct {
      +	PublicIPv4    net.IP
      +	PublicIPv6    net.IP
      +	PrivateIPv4   net.IP
      +	PrivateIPv6   net.IP
      +	Hostname      string
      +	SSHPublicKeys map[string]string
      +	NetworkConfig interface{}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/file/file.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/file/file.go
      new file mode 100644
      index 00000000..2700b460
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/file/file.go
      @@ -0,0 +1,55 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package file
      +
      +import (
      +	"io/ioutil"
      +	"os"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +)
      +
      +type localFile struct {
      +	path string
      +}
      +
      +func NewDatasource(path string) *localFile {
      +	return &localFile{path}
      +}
      +
      +func (f *localFile) IsAvailable() bool {
      +	_, err := os.Stat(f.path)
      +	return !os.IsNotExist(err)
      +}
      +
      +func (f *localFile) AvailabilityChanges() bool {
      +	return true
      +}
      +
      +func (f *localFile) ConfigRoot() string {
      +	return ""
      +}
      +
      +func (f *localFile) FetchMetadata() (datasource.Metadata, error) {
      +	return datasource.Metadata{}, nil
      +}
      +
      +func (f *localFile) FetchUserdata() ([]byte, error) {
      +	return ioutil.ReadFile(f.path)
      +}
      +
      +func (f *localFile) Type() string {
      +	return "local-file"
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/cloudsigma/server_context.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/cloudsigma/server_context.go
      new file mode 100644
      index 00000000..0c3924cc
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/cloudsigma/server_context.go
      @@ -0,0 +1,197 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package cloudsigma
      +
      +import (
      +	"bytes"
      +	"encoding/base64"
      +	"encoding/json"
      +	"errors"
      +	"io/ioutil"
      +	"net"
      +	"os"
      +	"strings"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +
      +	"github.com/cloudsigma/cepgo"
      +)
      +
      +const (
      +	userDataFieldName = "cloudinit-user-data"
      +)
      +
      +type serverContextService struct {
      +	client interface {
      +		All() (interface{}, error)
      +		Key(string) (interface{}, error)
      +		Meta() (map[string]string, error)
      +		FetchRaw(string) ([]byte, error)
      +	}
      +}
      +
      +func NewServerContextService() *serverContextService {
      +	return &serverContextService{
      +		client: cepgo.NewCepgo(),
      +	}
      +}
      +
      +func (_ *serverContextService) IsAvailable() bool {
      +	productNameFile, err := os.Open("/sys/class/dmi/id/product_name")
      +	if err != nil {
      +		return false
      +	}
      +	productName := make([]byte, 10)
      +	_, err = productNameFile.Read(productName)
      +
      +	return err == nil && string(productName) == "CloudSigma" && hasDHCPLeases()
      +}
      +
      +func (_ *serverContextService) AvailabilityChanges() bool {
      +	return true
      +}
      +
      +func (_ *serverContextService) ConfigRoot() string {
      +	return ""
      +}
      +
      +func (_ *serverContextService) Type() string {
      +	return "server-context"
      +}
      +
      +func (scs *serverContextService) FetchMetadata() (metadata datasource.Metadata, err error) {
      +	var (
      +		inputMetadata struct {
      +			Name string            `json:"name"`
      +			UUID string            `json:"uuid"`
      +			Meta map[string]string `json:"meta"`
      +			Nics []struct {
      +				Mac      string `json:"mac"`
      +				IPv4Conf struct {
      +					InterfaceType string `json:"interface_type"`
      +					IP            struct {
      +						UUID string `json:"uuid"`
      +					} `json:"ip"`
      +				} `json:"ip_v4_conf"`
      +				VLAN struct {
      +					UUID string `json:"uuid"`
      +				} `json:"vlan"`
      +			} `json:"nics"`
      +		}
      +		rawMetadata []byte
      +	)
      +
      +	if rawMetadata, err = scs.client.FetchRaw(""); err != nil {
      +		return
      +	}
      +
      +	if err = json.Unmarshal(rawMetadata, &inputMetadata); err != nil {
      +		return
      +	}
      +
      +	if inputMetadata.Name != "" {
      +		metadata.Hostname = inputMetadata.Name
      +	} else {
      +		metadata.Hostname = inputMetadata.UUID
      +	}
      +
      +	metadata.SSHPublicKeys = map[string]string{}
      +	// CloudSigma uses an empty string, rather than no string,
      +	// to represent the lack of a SSH key
      +	if key, _ := inputMetadata.Meta["ssh_public_key"]; len(key) > 0 {
      +		splitted := strings.Split(key, " ")
      +		metadata.SSHPublicKeys[splitted[len(splitted)-1]] = key
      +	}
      +
      +	for _, nic := range inputMetadata.Nics {
      +		if nic.IPv4Conf.IP.UUID != "" {
      +			metadata.PublicIPv4 = net.ParseIP(nic.IPv4Conf.IP.UUID)
      +		}
      +		if nic.VLAN.UUID != "" {
      +			if localIP, err := scs.findLocalIP(nic.Mac); err == nil {
      +				metadata.PrivateIPv4 = localIP
      +			}
      +		}
      +	}
      +
      +	return
      +}
      +
      +func (scs *serverContextService) FetchUserdata() ([]byte, error) {
      +	metadata, err := scs.client.Meta()
      +	if err != nil {
      +		return []byte{}, err
      +	}
      +
      +	userData, ok := metadata[userDataFieldName]
      +	if ok && isBase64Encoded(userDataFieldName, metadata) {
      +		if decodedUserData, err := base64.StdEncoding.DecodeString(userData); err == nil {
      +			return decodedUserData, nil
      +		} else {
      +			return []byte{}, nil
      +		}
      +	}
      +
      +	return []byte(userData), nil
      +}
      +
      +func (scs *serverContextService) findLocalIP(mac string) (net.IP, error) {
      +	ifaces, err := net.Interfaces()
      +	if err != nil {
      +		return nil, err
      +	}
      +	ifaceMac, err := net.ParseMAC(mac)
      +	if err != nil {
      +		return nil, err
      +	}
      +	for _, iface := range ifaces {
      +		if !bytes.Equal(iface.HardwareAddr, ifaceMac) {
      +			continue
      +		}
      +		addrs, err := iface.Addrs()
      +		if err != nil {
      +			continue
      +		}
      +
      +		for _, addr := range addrs {
      +			switch ip := addr.(type) {
      +			case *net.IPNet:
      +				if ip.IP.To4() != nil {
      +					return ip.IP.To4(), nil
      +				}
      +			}
      +		}
      +	}
      +	return nil, errors.New("Local IP not found")
      +}
      +
      +func isBase64Encoded(field string, userdata map[string]string) bool {
      +	base64Fields, ok := userdata["base64_fields"]
      +	if !ok {
      +		return false
      +	}
      +
      +	for _, base64Field := range strings.Split(base64Fields, ",") {
      +		if field == base64Field {
      +			return true
      +		}
      +	}
      +	return false
      +}
      +
      +func hasDHCPLeases() bool {
      +	files, err := ioutil.ReadDir("/run/systemd/netif/leases/")
      +	return err == nil && len(files) > 0
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/cloudsigma/server_context_test.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/cloudsigma/server_context_test.go
      new file mode 100644
      index 00000000..4f29d7f2
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/cloudsigma/server_context_test.go
      @@ -0,0 +1,200 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package cloudsigma
      +
      +import (
      +	"net"
      +	"reflect"
      +	"testing"
      +)
      +
      +type fakeCepgoClient struct {
      +	raw  []byte
      +	meta map[string]string
      +	keys map[string]interface{}
      +	err  error
      +}
      +
      +func (f *fakeCepgoClient) All() (interface{}, error) {
      +	return f.keys, f.err
      +}
      +
      +func (f *fakeCepgoClient) Key(key string) (interface{}, error) {
      +	return f.keys[key], f.err
      +}
      +
      +func (f *fakeCepgoClient) Meta() (map[string]string, error) {
      +	return f.meta, f.err
      +}
      +
      +func (f *fakeCepgoClient) FetchRaw(key string) ([]byte, error) {
      +	return f.raw, f.err
      +}
      +
      +func TestServerContextWithEmptyPublicSSHKey(t *testing.T) {
      +	client := new(fakeCepgoClient)
      +	scs := NewServerContextService()
      +	scs.client = client
      +	client.raw = []byte(`{
      +		"meta": {
      +			"base64_fields": "cloudinit-user-data",
      +			"cloudinit-user-data": "I2Nsb3VkLWNvbmZpZwoKaG9zdG5hbWU6IGNvcmVvczE=",
      +			"ssh_public_key": ""
      +		}
      +	}`)
      +	metadata, err := scs.FetchMetadata()
      +	if err != nil {
      +		t.Error(err.Error())
      +	}
      +
      +	if len(metadata.SSHPublicKeys) != 0 {
      +		t.Error("There should be no Public SSH Keys provided")
      +	}
      +}
      +
      +func TestServerContextFetchMetadata(t *testing.T) {
      +	client := new(fakeCepgoClient)
      +	scs := NewServerContextService()
      +	scs.client = client
      +	client.raw = []byte(`{
      +		"context": true,
      +		"cpu": 4000,
      +		"cpu_model": null,
      +		"cpus_instead_of_cores": false,
      +		"enable_numa": false,
      +		"grantees": [],
      +		"hv_relaxed": false,
      +		"hv_tsc": false,
      +		"jobs": [],
      +		"mem": 4294967296,
      +		"meta": {
      +			"base64_fields": "cloudinit-user-data",
      +			"cloudinit-user-data": "I2Nsb3VkLWNvbmZpZwoKaG9zdG5hbWU6IGNvcmVvczE=",
      +			"ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe"
      +		},
      +		"name": "coreos",
      +		"nics": [
      +			{
      +				"boot_order": null,
      +				"ip_v4_conf": {
      +					"conf": "dhcp",
      +					"ip": {
      +						"gateway": "31.171.244.1",
      +						"meta": {},
      +						"nameservers": [
      +							"178.22.66.167",
      +							"178.22.71.56",
      +							"8.8.8.8"
      +						],
      +						"netmask": 22,
      +						"tags": [],
      +						"uuid": "31.171.251.74"
      +					}
      +				},
      +				"ip_v6_conf": null,
      +				"mac": "22:3d:09:6b:90:f3",
      +				"model": "virtio",
      +				"vlan": null
      +			},
      +			{
      +				"boot_order": null,
      +				"ip_v4_conf": null,
      +				"ip_v6_conf": null,
      +				"mac": "22:ae:4a:fb:8f:31",
      +				"model": "virtio",
      +				"vlan": {
      +					"meta": {
      +						"description": "",
      +						"name": "CoreOS"
      +					},
      +					"tags": [],
      +					"uuid": "5dec030e-25b8-4621-a5a4-a3302c9d9619"
      +				}
      +			}
      +		],
      +		"smp": 2,
      +		"status": "running",
      +		"uuid": "20a0059b-041e-4d0c-bcc6-9b2852de48b3"
      +	}`)
      +
      +	metadata, err := scs.FetchMetadata()
      +	if err != nil {
      +		t.Error(err.Error())
      +	}
      +
      +	if metadata.Hostname != "coreos" {
      +		t.Errorf("Hostname is not 'coreos' but %s instead", metadata.Hostname)
      +	}
      +
      +	if metadata.SSHPublicKeys["john@doe"] != "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe" {
      +		t.Error("Public SSH Keys are not being read properly")
      +	}
      +
      +	if !metadata.PublicIPv4.Equal(net.ParseIP("31.171.251.74")) {
      +		t.Errorf("Public IP is not 31.171.251.74 but %s instead", metadata.PublicIPv4)
      +	}
      +}
      +
      +func TestServerContextFetchUserdata(t *testing.T) {
      +	client := new(fakeCepgoClient)
      +	scs := NewServerContextService()
      +	scs.client = client
      +	userdataSets := []struct {
      +		in  map[string]string
      +		err bool
      +		out []byte
      +	}{
      +		{map[string]string{
      +			"base64_fields":       "cloudinit-user-data",
      +			"cloudinit-user-data": "aG9zdG5hbWU6IGNvcmVvc190ZXN0",
      +		}, false, []byte("hostname: coreos_test")},
      +		{map[string]string{
      +			"cloudinit-user-data": "#cloud-config\\nhostname: coreos1",
      +		}, false, []byte("#cloud-config\\nhostname: coreos1")},
      +		{map[string]string{}, false, []byte{}},
      +	}
      +
      +	for i, set := range userdataSets {
      +		client.meta = set.in
      +		got, err := scs.FetchUserdata()
      +		if (err != nil) != set.err {
      +			t.Errorf("case %d: bad error state (got %t, want %t)", i, err != nil, set.err)
      +		}
      +
      +		if !reflect.DeepEqual(got, set.out) {
      +			t.Errorf("case %d: got %s, want %s", i, got, set.out)
      +		}
      +	}
      +}
      +
      +func TestServerContextDecodingBase64UserData(t *testing.T) {
      +	base64Sets := []struct {
      +		in  string
      +		out bool
      +	}{
      +		{"cloudinit-user-data,foo,bar", true},
      +		{"bar,cloudinit-user-data,foo,bar", true},
      +		{"cloudinit-user-data", true},
      +		{"", false},
      +		{"foo", false},
      +	}
      +
      +	for _, set := range base64Sets {
      +		userdata := map[string]string{"base64_fields": set.in}
      +		if isBase64Encoded("cloudinit-user-data", userdata) != set.out {
      +			t.Errorf("isBase64Encoded(cloudinit-user-data, %s) should be %t", userdata, set.out)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean/metadata.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean/metadata.go
      new file mode 100644
      index 00000000..fa6c605c
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean/metadata.go
      @@ -0,0 +1,111 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package digitalocean
      +
      +import (
      +	"encoding/json"
      +	"net"
      +	"strconv"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +	"github.com/coreos/coreos-cloudinit/datasource/metadata"
      +)
      +
      +const (
      +	DefaultAddress = "http://169.254.169.254/"
      +	apiVersion     = "metadata/v1"
      +	userdataUrl    = apiVersion + "/user-data"
      +	metadataPath   = apiVersion + ".json"
      +)
      +
      +type Address struct {
      +	IPAddress string `json:"ip_address"`
      +	Netmask   string `json:"netmask"`
      +	Cidr      int    `json:"cidr"`
      +	Gateway   string `json:"gateway"`
      +}
      +
      +type Interface struct {
      +	IPv4       *Address `json:"ipv4"`
      +	IPv6       *Address `json:"ipv6"`
      +	AnchorIPv4 *Address `json:"anchor_ipv4"`
      +	MAC        string   `json:"mac"`
      +	Type       string   `json:"type"`
      +}
      +
      +type Interfaces struct {
      +	Public  []Interface `json:"public"`
      +	Private []Interface `json:"private"`
      +}
      +
      +type DNS struct {
      +	Nameservers []string `json:"nameservers"`
      +}
      +
      +type Metadata struct {
      +	Hostname   string     `json:"hostname"`
      +	Interfaces Interfaces `json:"interfaces"`
      +	PublicKeys []string   `json:"public_keys"`
      +	DNS        DNS        `json:"dns"`
      +}
      +
      +type metadataService struct {
      +	metadata.MetadataService
      +}
      +
      +func NewDatasource(root string) *metadataService {
      +	return &metadataService{MetadataService: metadata.NewDatasource(root, apiVersion, userdataUrl, metadataPath)}
      +}
      +
      +func (ms *metadataService) FetchMetadata() (metadata datasource.Metadata, err error) {
      +	var data []byte
      +	var m Metadata
      +
      +	if data, err = ms.FetchData(ms.MetadataUrl()); err != nil || len(data) == 0 {
      +		return
      +	}
      +	if err = json.Unmarshal(data, &m); err != nil {
      +		return
      +	}
      +
      +	if len(m.Interfaces.Public) > 0 {
      +		if m.Interfaces.Public[0].IPv4 != nil {
      +			metadata.PublicIPv4 = net.ParseIP(m.Interfaces.Public[0].IPv4.IPAddress)
      +		}
      +		if m.Interfaces.Public[0].IPv6 != nil {
      +			metadata.PublicIPv6 = net.ParseIP(m.Interfaces.Public[0].IPv6.IPAddress)
      +		}
      +	}
      +	if len(m.Interfaces.Private) > 0 {
      +		if m.Interfaces.Private[0].IPv4 != nil {
      +			metadata.PrivateIPv4 = net.ParseIP(m.Interfaces.Private[0].IPv4.IPAddress)
      +		}
      +		if m.Interfaces.Private[0].IPv6 != nil {
      +			metadata.PrivateIPv6 = net.ParseIP(m.Interfaces.Private[0].IPv6.IPAddress)
      +		}
      +	}
      +	metadata.Hostname = m.Hostname
      +	metadata.SSHPublicKeys = map[string]string{}
      +	for i, key := range m.PublicKeys {
      +		metadata.SSHPublicKeys[strconv.Itoa(i)] = key
      +	}
      +	metadata.NetworkConfig = m
      +
      +	return
      +}
      +
      +func (ms metadataService) Type() string {
      +	return "digitalocean-metadata-service"
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean/metadata_test.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean/metadata_test.go
      new file mode 100644
      index 00000000..fd8a0a0c
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean/metadata_test.go
      @@ -0,0 +1,143 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package digitalocean
      +
      +import (
      +	"fmt"
      +	"net"
      +	"reflect"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +	"github.com/coreos/coreos-cloudinit/datasource/metadata"
      +	"github.com/coreos/coreos-cloudinit/datasource/metadata/test"
      +	"github.com/coreos/coreos-cloudinit/pkg"
      +)
      +
      +func TestType(t *testing.T) {
      +	want := "digitalocean-metadata-service"
      +	if kind := (metadataService{}).Type(); kind != want {
      +		t.Fatalf("bad type: want %q, got %q", want, kind)
      +	}
      +}
      +
      +func TestFetchMetadata(t *testing.T) {
      +	for _, tt := range []struct {
      +		root         string
      +		metadataPath string
      +		resources    map[string]string
      +		expect       datasource.Metadata
      +		clientErr    error
      +		expectErr    error
      +	}{
      +		{
      +			root:         "/",
      +			metadataPath: "v1.json",
      +			resources: map[string]string{
      +				"/v1.json": "bad",
      +			},
      +			expectErr: fmt.Errorf("invalid character 'b' looking for beginning of value"),
      +		},
      +		{
      +			root:         "/",
      +			metadataPath: "v1.json",
      +			resources: map[string]string{
      +				"/v1.json": `{
      +  "droplet_id": 1,
      +  "user_data": "hello",
      +  "vendor_data": "hello",
      +  "public_keys": [
      +    "publickey1",
      +    "publickey2"
      +  ],
      +  "region": "nyc2",
      +  "interfaces": {
      +    "public": [
      +      {
      +        "ipv4": {
      +          "ip_address": "192.168.1.2",
      +          "netmask": "255.255.255.0",
      +          "gateway": "192.168.1.1"
      +        },
      +        "ipv6": {
      +          "ip_address": "fe00::",
      +          "cidr": 126,
      +          "gateway": "fe00::"
      +        },
      +        "mac": "ab:cd:ef:gh:ij",
      +        "type": "public"
      +      }
      +    ]
      +  }
      +}`,
      +			},
      +			expect: datasource.Metadata{
      +				PublicIPv4: net.ParseIP("192.168.1.2"),
      +				PublicIPv6: net.ParseIP("fe00::"),
      +				SSHPublicKeys: map[string]string{
      +					"0": "publickey1",
      +					"1": "publickey2",
      +				},
      +				NetworkConfig: Metadata{
      +					Interfaces: Interfaces{
      +						Public: []Interface{
      +							Interface{
      +								IPv4: &Address{
      +									IPAddress: "192.168.1.2",
      +									Netmask:   "255.255.255.0",
      +									Gateway:   "192.168.1.1",
      +								},
      +								IPv6: &Address{
      +									IPAddress: "fe00::",
      +									Cidr:      126,
      +									Gateway:   "fe00::",
      +								},
      +								MAC:  "ab:cd:ef:gh:ij",
      +								Type: "public",
      +							},
      +						},
      +					},
      +					PublicKeys: []string{"publickey1", "publickey2"},
      +				},
      +			},
      +		},
      +		{
      +			clientErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
      +			expectErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
      +		},
      +	} {
      +		service := &metadataService{
      +			MetadataService: metadata.MetadataService{
      +				Root:         tt.root,
      +				Client:       &test.HttpClient{Resources: tt.resources, Err: tt.clientErr},
      +				MetadataPath: tt.metadataPath,
      +			},
      +		}
      +		metadata, err := service.FetchMetadata()
      +		if Error(err) != Error(tt.expectErr) {
      +			t.Fatalf("bad error (%q): want %q, got %q", tt.resources, tt.expectErr, err)
      +		}
      +		if !reflect.DeepEqual(tt.expect, metadata) {
      +			t.Fatalf("bad fetch (%q): want %#q, got %#q", tt.resources, tt.expect, metadata)
      +		}
      +	}
      +}
      +
      +func Error(err error) string {
      +	if err != nil {
      +		return err.Error()
      +	}
      +	return ""
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/ec2/metadata.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/ec2/metadata.go
      new file mode 100644
      index 00000000..141fdc94
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/ec2/metadata.go
      @@ -0,0 +1,115 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package ec2
      +
      +import (
      +	"bufio"
      +	"bytes"
      +	"fmt"
      +	"log"
      +	"net"
      +	"strings"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +	"github.com/coreos/coreos-cloudinit/datasource/metadata"
      +	"github.com/coreos/coreos-cloudinit/pkg"
      +)
      +
      +const (
      +	DefaultAddress = "http://169.254.169.254/"
      +	apiVersion     = "2009-04-04/"
      +	userdataPath   = apiVersion + "user-data"
      +	metadataPath   = apiVersion + "meta-data"
      +)
      +
      +type metadataService struct {
      +	metadata.MetadataService
      +}
      +
      +func NewDatasource(root string) *metadataService {
      +	return &metadataService{metadata.NewDatasource(root, apiVersion, userdataPath, metadataPath)}
      +}
      +
      +func (ms metadataService) FetchMetadata() (datasource.Metadata, error) {
      +	metadata := datasource.Metadata{}
      +
      +	if keynames, err := ms.fetchAttributes(fmt.Sprintf("%s/public-keys", ms.MetadataUrl())); err == nil {
      +		keyIDs := make(map[string]string)
      +		for _, keyname := range keynames {
      +			tokens := strings.SplitN(keyname, "=", 2)
      +			if len(tokens) != 2 {
      +				return metadata, fmt.Errorf("malformed public key: %q", keyname)
      +			}
      +			keyIDs[tokens[1]] = tokens[0]
      +		}
      +
      +		metadata.SSHPublicKeys = map[string]string{}
      +		for name, id := range keyIDs {
      +			sshkey, err := ms.fetchAttribute(fmt.Sprintf("%s/public-keys/%s/openssh-key", ms.MetadataUrl(), id))
      +			if err != nil {
      +				return metadata, err
      +			}
      +			metadata.SSHPublicKeys[name] = sshkey
      +			log.Printf("Found SSH key for %q\n", name)
      +		}
      +	} else if _, ok := err.(pkg.ErrNotFound); !ok {
      +		return metadata, err
      +	}
      +
      +	if hostname, err := ms.fetchAttribute(fmt.Sprintf("%s/hostname", ms.MetadataUrl())); err == nil {
      +		metadata.Hostname = strings.Split(hostname, " ")[0]
      +	} else if _, ok := err.(pkg.ErrNotFound); !ok {
      +		return metadata, err
      +	}
      +
      +	if localAddr, err := ms.fetchAttribute(fmt.Sprintf("%s/local-ipv4", ms.MetadataUrl())); err == nil {
      +		metadata.PrivateIPv4 = net.ParseIP(localAddr)
      +	} else if _, ok := err.(pkg.ErrNotFound); !ok {
      +		return metadata, err
      +	}
      +
      +	if publicAddr, err := ms.fetchAttribute(fmt.Sprintf("%s/public-ipv4", ms.MetadataUrl())); err == nil {
      +		metadata.PublicIPv4 = net.ParseIP(publicAddr)
      +	} else if _, ok := err.(pkg.ErrNotFound); !ok {
      +		return metadata, err
      +	}
      +
      +	return metadata, nil
      +}
      +
      +func (ms metadataService) Type() string {
      +	return "ec2-metadata-service"
      +}
      +
      +func (ms metadataService) fetchAttributes(url string) ([]string, error) {
      +	resp, err := ms.FetchData(url)
      +	if err != nil {
      +		return nil, err
      +	}
      +	scanner := bufio.NewScanner(bytes.NewBuffer(resp))
      +	data := make([]string, 0)
      +	for scanner.Scan() {
      +		data = append(data, scanner.Text())
      +	}
      +	return data, scanner.Err()
      +}
      +
      +func (ms metadataService) fetchAttribute(url string) (string, error) {
      +	if attrs, err := ms.fetchAttributes(url); err == nil && len(attrs) > 0 {
      +		return attrs[0], nil
      +	} else {
      +		return "", err
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/ec2/metadata_test.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/ec2/metadata_test.go
      new file mode 100644
      index 00000000..ba463c28
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/ec2/metadata_test.go
      @@ -0,0 +1,222 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package ec2
      +
      +import (
      +	"fmt"
      +	"net"
      +	"reflect"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +	"github.com/coreos/coreos-cloudinit/datasource/metadata"
      +	"github.com/coreos/coreos-cloudinit/datasource/metadata/test"
      +	"github.com/coreos/coreos-cloudinit/pkg"
      +)
      +
      +func TestType(t *testing.T) {
      +	want := "ec2-metadata-service"
      +	if kind := (metadataService{}).Type(); kind != want {
      +		t.Fatalf("bad type: want %q, got %q", want, kind)
      +	}
      +}
      +
      +func TestFetchAttributes(t *testing.T) {
      +	for _, s := range []struct {
      +		resources map[string]string
      +		err       error
      +		tests     []struct {
      +			path string
      +			val  []string
      +		}
      +	}{
      +		{
      +			resources: map[string]string{
      +				"/":      "a\nb\nc/",
      +				"/c/":    "d\ne/",
      +				"/c/e/":  "f",
      +				"/a":     "1",
      +				"/b":     "2",
      +				"/c/d":   "3",
      +				"/c/e/f": "4",
      +			},
      +			tests: []struct {
      +				path string
      +				val  []string
      +			}{
      +				{"/", []string{"a", "b", "c/"}},
      +				{"/b", []string{"2"}},
      +				{"/c/d", []string{"3"}},
      +				{"/c/e/", []string{"f"}},
      +			},
      +		},
      +		{
      +			err: fmt.Errorf("test error"),
      +			tests: []struct {
      +				path string
      +				val  []string
      +			}{
      +				{"", nil},
      +			},
      +		},
      +	} {
      +		service := metadataService{metadata.MetadataService{
      +			Client: &test.HttpClient{Resources: s.resources, Err: s.err},
      +		}}
      +		for _, tt := range s.tests {
      +			attrs, err := service.fetchAttributes(tt.path)
      +			if err != s.err {
      +				t.Fatalf("bad error for %q (%q): want %q, got %q", tt.path, s.resources, s.err, err)
      +			}
      +			if !reflect.DeepEqual(attrs, tt.val) {
      +				t.Fatalf("bad fetch for %q (%q): want %q, got %q", tt.path, s.resources, tt.val, attrs)
      +			}
      +		}
      +	}
      +}
      +
      +func TestFetchAttribute(t *testing.T) {
      +	for _, s := range []struct {
      +		resources map[string]string
      +		err       error
      +		tests     []struct {
      +			path string
      +			val  string
      +		}
      +	}{
      +		{
      +			resources: map[string]string{
      +				"/":      "a\nb\nc/",
      +				"/c/":    "d\ne/",
      +				"/c/e/":  "f",
      +				"/a":     "1",
      +				"/b":     "2",
      +				"/c/d":   "3",
      +				"/c/e/f": "4",
      +			},
      +			tests: []struct {
      +				path string
      +				val  string
      +			}{
      +				{"/a", "1"},
      +				{"/b", "2"},
      +				{"/c/d", "3"},
      +				{"/c/e/f", "4"},
      +			},
      +		},
      +		{
      +			err: fmt.Errorf("test error"),
      +			tests: []struct {
      +				path string
      +				val  string
      +			}{
      +				{"", ""},
      +			},
      +		},
      +	} {
      +		service := metadataService{metadata.MetadataService{
      +			Client: &test.HttpClient{Resources: s.resources, Err: s.err},
      +		}}
      +		for _, tt := range s.tests {
      +			attr, err := service.fetchAttribute(tt.path)
      +			if err != s.err {
      +				t.Fatalf("bad error for %q (%q): want %q, got %q", tt.path, s.resources, s.err, err)
      +			}
      +			if attr != tt.val {
      +				t.Fatalf("bad fetch for %q (%q): want %q, got %q", tt.path, s.resources, tt.val, attr)
      +			}
      +		}
      +	}
      +}
      +
      +func TestFetchMetadata(t *testing.T) {
      +	for _, tt := range []struct {
      +		root         string
      +		metadataPath string
      +		resources    map[string]string
      +		expect       datasource.Metadata
      +		clientErr    error
      +		expectErr    error
      +	}{
      +		{
      +			root:         "/",
      +			metadataPath: "2009-04-04/meta-data",
      +			resources: map[string]string{
      +				"/2009-04-04/meta-data/public-keys": "bad\n",
      +			},
      +			expectErr: fmt.Errorf("malformed public key: \"bad\""),
      +		},
      +		{
      +			root:         "/",
      +			metadataPath: "2009-04-04/meta-data",
      +			resources: map[string]string{
      +				"/2009-04-04/meta-data/hostname":                  "host",
      +				"/2009-04-04/meta-data/local-ipv4":                "1.2.3.4",
      +				"/2009-04-04/meta-data/public-ipv4":               "5.6.7.8",
      +				"/2009-04-04/meta-data/public-keys":               "0=test1\n",
      +				"/2009-04-04/meta-data/public-keys/0":             "openssh-key",
      +				"/2009-04-04/meta-data/public-keys/0/openssh-key": "key",
      +			},
      +			expect: datasource.Metadata{
      +				Hostname:      "host",
      +				PrivateIPv4:   net.ParseIP("1.2.3.4"),
      +				PublicIPv4:    net.ParseIP("5.6.7.8"),
      +				SSHPublicKeys: map[string]string{"test1": "key"},
      +			},
      +		},
      +		{
      +			root:         "/",
      +			metadataPath: "2009-04-04/meta-data",
      +			resources: map[string]string{
      +				"/2009-04-04/meta-data/hostname":                  "host domain another_domain",
      +				"/2009-04-04/meta-data/local-ipv4":                "1.2.3.4",
      +				"/2009-04-04/meta-data/public-ipv4":               "5.6.7.8",
      +				"/2009-04-04/meta-data/public-keys":               "0=test1\n",
      +				"/2009-04-04/meta-data/public-keys/0":             "openssh-key",
      +				"/2009-04-04/meta-data/public-keys/0/openssh-key": "key",
      +			},
      +			expect: datasource.Metadata{
      +				Hostname:      "host",
      +				PrivateIPv4:   net.ParseIP("1.2.3.4"),
      +				PublicIPv4:    net.ParseIP("5.6.7.8"),
      +				SSHPublicKeys: map[string]string{"test1": "key"},
      +			},
      +		},
      +		{
      +			clientErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
      +			expectErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
      +		},
      +	} {
      +		service := &metadataService{metadata.MetadataService{
      +			Root:         tt.root,
      +			Client:       &test.HttpClient{Resources: tt.resources, Err: tt.clientErr},
      +			MetadataPath: tt.metadataPath,
      +		}}
      +		metadata, err := service.FetchMetadata()
      +		if Error(err) != Error(tt.expectErr) {
      +			t.Fatalf("bad error (%q): want %q, got %q", tt.resources, tt.expectErr, err)
      +		}
      +		if !reflect.DeepEqual(tt.expect, metadata) {
      +			t.Fatalf("bad fetch (%q): want %#v, got %#v", tt.resources, tt.expect, metadata)
      +		}
      +	}
      +}
      +
      +func Error(err error) string {
      +	if err != nil {
      +		return err.Error()
      +	}
      +	return ""
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/metadata.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/metadata.go
      new file mode 100644
      index 00000000..2baa4f17
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/metadata.go
      @@ -0,0 +1,71 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package metadata
      +
      +import (
      +	"strings"
      +
      +	"github.com/coreos/coreos-cloudinit/pkg"
      +)
      +
      +type MetadataService struct {
      +	Root         string
      +	Client       pkg.Getter
      +	ApiVersion   string
      +	UserdataPath string
      +	MetadataPath string
      +}
      +
      +func NewDatasource(root, apiVersion, userdataPath, metadataPath string) MetadataService {
      +	if !strings.HasSuffix(root, "/") {
      +		root += "/"
      +	}
      +	return MetadataService{root, pkg.NewHttpClient(), apiVersion, userdataPath, metadataPath}
      +}
      +
      +func (ms MetadataService) IsAvailable() bool {
      +	_, err := ms.Client.Get(ms.Root + ms.ApiVersion)
      +	return (err == nil)
      +}
      +
      +func (ms MetadataService) AvailabilityChanges() bool {
      +	return true
      +}
      +
      +func (ms MetadataService) ConfigRoot() string {
      +	return ms.Root
      +}
      +
      +func (ms MetadataService) FetchUserdata() ([]byte, error) {
      +	return ms.FetchData(ms.UserdataUrl())
      +}
      +
      +func (ms MetadataService) FetchData(url string) ([]byte, error) {
      +	if data, err := ms.Client.GetRetry(url); err == nil {
      +		return data, err
      +	} else if _, ok := err.(pkg.ErrNotFound); ok {
      +		return []byte{}, nil
      +	} else {
      +		return data, err
      +	}
      +}
      +
      +func (ms MetadataService) MetadataUrl() string {
      +	return (ms.Root + ms.MetadataPath)
      +}
      +
      +func (ms MetadataService) UserdataUrl() string {
      +	return (ms.Root + ms.UserdataPath)
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/metadata_test.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/metadata_test.go
      new file mode 100644
      index 00000000..9d6b4b29
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/metadata_test.go
      @@ -0,0 +1,185 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package metadata
      +
      +import (
      +	"bytes"
      +	"fmt"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource/metadata/test"
      +	"github.com/coreos/coreos-cloudinit/pkg"
      +)
      +
      +func TestAvailabilityChanges(t *testing.T) {
      +	want := true
      +	if ac := (MetadataService{}).AvailabilityChanges(); ac != want {
      +		t.Fatalf("bad AvailabilityChanges: want %t, got %t", want, ac)
      +	}
      +}
      +
      +func TestIsAvailable(t *testing.T) {
      +	for _, tt := range []struct {
      +		root       string
      +		apiVersion string
      +		resources  map[string]string
      +		expect     bool
      +	}{
      +		{
      +			root:       "/",
      +			apiVersion: "2009-04-04",
      +			resources: map[string]string{
      +				"/2009-04-04": "",
      +			},
      +			expect: true,
      +		},
      +		{
      +			root:      "/",
      +			resources: map[string]string{},
      +			expect:    false,
      +		},
      +	} {
      +		service := &MetadataService{
      +			Root:       tt.root,
      +			Client:     &test.HttpClient{Resources: tt.resources, Err: nil},
      +			ApiVersion: tt.apiVersion,
      +		}
      +		if a := service.IsAvailable(); a != tt.expect {
      +			t.Fatalf("bad isAvailable (%q): want %t, got %t", tt.resources, tt.expect, a)
      +		}
      +	}
      +}
      +
      +func TestFetchUserdata(t *testing.T) {
      +	for _, tt := range []struct {
      +		root         string
      +		userdataPath string
      +		resources    map[string]string
      +		userdata     []byte
      +		clientErr    error
      +		expectErr    error
      +	}{
      +		{
      +			root:         "/",
      +			userdataPath: "2009-04-04/user-data",
      +			resources: map[string]string{
      +				"/2009-04-04/user-data": "hello",
      +			},
      +			userdata: []byte("hello"),
      +		},
      +		{
      +			root:      "/",
      +			clientErr: pkg.ErrNotFound{Err: fmt.Errorf("test not found error")},
      +			userdata:  []byte{},
      +		},
      +		{
      +			root:      "/",
      +			clientErr: pkg.ErrTimeout{Err: fmt.Errorf("test timeout error")},
      +			expectErr: pkg.ErrTimeout{Err: fmt.Errorf("test timeout error")},
      +		},
      +	} {
      +		service := &MetadataService{
      +			Root:         tt.root,
      +			Client:       &test.HttpClient{Resources: tt.resources, Err: tt.clientErr},
      +			UserdataPath: tt.userdataPath,
      +		}
      +		data, err := service.FetchUserdata()
      +		if Error(err) != Error(tt.expectErr) {
      +			t.Fatalf("bad error (%q): want %q, got %q", tt.resources, tt.expectErr, err)
      +		}
      +		if !bytes.Equal(data, tt.userdata) {
      +			t.Fatalf("bad userdata (%q): want %q, got %q", tt.resources, tt.userdata, data)
      +		}
      +	}
      +}
      +
      +func TestUrls(t *testing.T) {
      +	for _, tt := range []struct {
      +		root         string
      +		userdataPath string
      +		metadataPath string
      +		expectRoot   string
      +		userdata     string
      +		metadata     string
      +	}{
      +		{
      +			root:         "/",
      +			userdataPath: "2009-04-04/user-data",
      +			metadataPath: "2009-04-04/meta-data",
      +			expectRoot:   "/",
      +			userdata:     "/2009-04-04/user-data",
      +			metadata:     "/2009-04-04/meta-data",
      +		},
      +		{
      +			root:         "http://169.254.169.254/",
      +			userdataPath: "2009-04-04/user-data",
      +			metadataPath: "2009-04-04/meta-data",
      +			expectRoot:   "http://169.254.169.254/",
      +			userdata:     "http://169.254.169.254/2009-04-04/user-data",
      +			metadata:     "http://169.254.169.254/2009-04-04/meta-data",
      +		},
      +	} {
      +		service := &MetadataService{
      +			Root:         tt.root,
      +			UserdataPath: tt.userdataPath,
      +			MetadataPath: tt.metadataPath,
      +		}
      +		if url := service.UserdataUrl(); url != tt.userdata {
      +			t.Fatalf("bad url (%q): want %q, got %q", tt.root, tt.userdata, url)
      +		}
      +		if url := service.MetadataUrl(); url != tt.metadata {
      +			t.Fatalf("bad url (%q): want %q, got %q", tt.root, tt.metadata, url)
      +		}
      +		if url := service.ConfigRoot(); url != tt.expectRoot {
      +			t.Fatalf("bad url (%q): want %q, got %q", tt.root, tt.expectRoot, url)
      +		}
      +	}
      +}
      +
      +func TestNewDatasource(t *testing.T) {
      +	for _, tt := range []struct {
      +		root       string
      +		expectRoot string
      +	}{
      +		{
      +			root:       "",
      +			expectRoot: "/",
      +		},
      +		{
      +			root:       "/",
      +			expectRoot: "/",
      +		},
      +		{
      +			root:       "http://169.254.169.254",
      +			expectRoot: "http://169.254.169.254/",
      +		},
      +		{
      +			root:       "http://169.254.169.254/",
      +			expectRoot: "http://169.254.169.254/",
      +		},
      +	} {
      +		service := NewDatasource(tt.root, "", "", "")
      +		if service.Root != tt.expectRoot {
      +			t.Fatalf("bad root (%q): want %q, got %q", tt.root, tt.expectRoot, service.Root)
      +		}
      +	}
      +}
      +
      +func Error(err error) string {
      +	if err != nil {
      +		return err.Error()
      +	}
      +	return ""
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/packet/metadata.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/packet/metadata.go
      new file mode 100644
      index 00000000..3c7d499d
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/packet/metadata.go
      @@ -0,0 +1,106 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package packet
      +
      +import (
      +	"encoding/json"
      +	"net"
      +	"strconv"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +	"github.com/coreos/coreos-cloudinit/datasource/metadata"
      +)
      +
      +const (
      +	DefaultAddress = "https://metadata.packet.net/"
      +	apiVersion     = ""
      +	userdataUrl    = "userdata"
      +	metadataPath   = "metadata"
      +)
      +
      +type Netblock struct {
      +	Address       net.IP `json:"address"`
      +	Cidr          int    `json:"cidr"`
      +	Netmask       net.IP `json:"netmask"`
      +	Gateway       net.IP `json:"gateway"`
      +	AddressFamily int    `json:"address_family"`
      +	Public        bool   `json:"public"`
      +}
      +
      +type Nic struct {
      +	Name string `json:"name"`
      +	Mac  string `json:"mac"`
      +}
      +
      +type NetworkData struct {
      +	Interfaces []Nic      `json:"interfaces"`
      +	Netblocks  []Netblock `json:"addresses"`
      +	DNS        []net.IP   `json:"dns"`
      +}
      +
      +// Metadata that will be pulled from the https://metadata.packet.net/metadata only. We have the opportunity to add more later.
      +type Metadata struct {
      +	Hostname    string      `json:"hostname"`
      +	SSHKeys     []string    `json:"ssh_keys"`
      +	NetworkData NetworkData `json:"network"`
      +}
      +
      +type metadataService struct {
      +	metadata.MetadataService
      +}
      +
      +func NewDatasource(root string) *metadataService {
      +	return &metadataService{MetadataService: metadata.NewDatasource(root, apiVersion, userdataUrl, metadataPath)}
      +}
      +
      +func (ms *metadataService) FetchMetadata() (metadata datasource.Metadata, err error) {
      +	var data []byte
      +	var m Metadata
      +
      +	if data, err = ms.FetchData(ms.MetadataUrl()); err != nil || len(data) == 0 {
      +		return
      +	}
      +
      +	if err = json.Unmarshal(data, &m); err != nil {
      +		return
      +	}
      +
      +	if len(m.NetworkData.Netblocks) > 0 {
      +		for _, Netblock := range m.NetworkData.Netblocks {
      +			if Netblock.AddressFamily == 4 {
      +				if Netblock.Public == true {
      +					metadata.PublicIPv4 = Netblock.Address
      +				} else {
      +					metadata.PrivateIPv4 = Netblock.Address
      +				}
      +			} else {
      +				metadata.PublicIPv6 = Netblock.Address
      +			}
      +		}
      +	}
      +	metadata.Hostname = m.Hostname
      +	metadata.SSHPublicKeys = map[string]string{}
      +	for i, key := range m.SSHKeys {
      +		metadata.SSHPublicKeys[strconv.Itoa(i)] = key
      +	}
      +
      +	metadata.NetworkConfig = m.NetworkData
      +
      +	return
      +}
      +
      +func (ms metadataService) Type() string {
      +	return "packet-metadata-service"
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/test/test.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/test/test.go
      new file mode 100644
      index 00000000..3be08f3b
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/test/test.go
      @@ -0,0 +1,41 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package test
      +
      +import (
      +	"fmt"
      +
      +	"github.com/coreos/coreos-cloudinit/pkg"
      +)
      +
      +type HttpClient struct {
      +	Resources map[string]string
      +	Err       error
      +}
      +
      +func (t *HttpClient) GetRetry(url string) ([]byte, error) {
      +	if t.Err != nil {
      +		return nil, t.Err
      +	}
      +	if val, ok := t.Resources[url]; ok {
      +		return []byte(val), nil
      +	} else {
      +		return nil, pkg.ErrNotFound{fmt.Errorf("not found: %q", url)}
      +	}
      +}
      +
      +func (t *HttpClient) Get(url string) ([]byte, error) {
      +	return t.GetRetry(url)
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/proc_cmdline/proc_cmdline.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/proc_cmdline/proc_cmdline.go
      new file mode 100644
      index 00000000..8423b251
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/proc_cmdline/proc_cmdline.go
      @@ -0,0 +1,110 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package proc_cmdline
      +
      +import (
      +	"errors"
      +	"io/ioutil"
      +	"log"
      +	"strings"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +	"github.com/coreos/coreos-cloudinit/pkg"
      +)
      +
      +const (
      +	ProcCmdlineLocation        = "/proc/cmdline"
      +	ProcCmdlineCloudConfigFlag = "cloud-config-url"
      +)
      +
      +type procCmdline struct {
      +	Location string
      +}
      +
      +func NewDatasource() *procCmdline {
      +	return &procCmdline{Location: ProcCmdlineLocation}
      +}
      +
      +func (c *procCmdline) IsAvailable() bool {
      +	contents, err := ioutil.ReadFile(c.Location)
      +	if err != nil {
      +		return false
      +	}
      +
      +	cmdline := strings.TrimSpace(string(contents))
      +	_, err = findCloudConfigURL(cmdline)
      +	return (err == nil)
      +}
      +
      +func (c *procCmdline) AvailabilityChanges() bool {
      +	return false
      +}
      +
      +func (c *procCmdline) ConfigRoot() string {
      +	return ""
      +}
      +
      +func (c *procCmdline) FetchMetadata() (datasource.Metadata, error) {
      +	return datasource.Metadata{}, nil
      +}
      +
      +func (c *procCmdline) FetchUserdata() ([]byte, error) {
      +	contents, err := ioutil.ReadFile(c.Location)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	cmdline := strings.TrimSpace(string(contents))
      +	url, err := findCloudConfigURL(cmdline)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	client := pkg.NewHttpClient()
      +	cfg, err := client.GetRetry(url)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	return cfg, nil
      +}
      +
      +func (c *procCmdline) Type() string {
      +	return "proc-cmdline"
      +}
      +
      +func findCloudConfigURL(input string) (url string, err error) {
      +	err = errors.New("cloud-config-url not found")
      +	for _, token := range strings.Split(input, " ") {
      +		parts := strings.SplitN(token, "=", 2)
      +
      +		key := parts[0]
      +		key = strings.Replace(key, "_", "-", -1)
      +
      +		if key != "cloud-config-url" {
      +			continue
      +		}
      +
      +		if len(parts) != 2 {
      +			log.Printf("Found cloud-config-url in /proc/cmdline with no value, ignoring.")
      +			continue
      +		}
      +
      +		url = parts[1]
      +		err = nil
      +	}
      +
      +	return
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/proc_cmdline/proc_cmdline_test.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/proc_cmdline/proc_cmdline_test.go
      new file mode 100644
      index 00000000..a0245812
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/proc_cmdline/proc_cmdline_test.go
      @@ -0,0 +1,102 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package proc_cmdline
      +
      +import (
      +	"fmt"
      +	"io/ioutil"
      +	"net/http"
      +	"net/http/httptest"
      +	"os"
      +	"testing"
      +)
      +
      +func TestParseCmdlineCloudConfigFound(t *testing.T) {
      +	tests := []struct {
      +		input  string
      +		expect string
      +	}{
      +		{
      +			"cloud-config-url=example.com",
      +			"example.com",
      +		},
      +		{
      +			"cloud_config_url=example.com",
      +			"example.com",
      +		},
      +		{
      +			"cloud-config-url cloud-config-url=example.com",
      +			"example.com",
      +		},
      +		{
      +			"cloud-config-url= cloud-config-url=example.com",
      +			"example.com",
      +		},
      +		{
      +			"cloud-config-url=one.example.com cloud-config-url=two.example.com",
      +			"two.example.com",
      +		},
      +		{
      +			"foo=bar cloud-config-url=example.com ping=pong",
      +			"example.com",
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		output, err := findCloudConfigURL(tt.input)
      +		if output != tt.expect {
      +			t.Errorf("Test case %d failed: %s != %s", i, output, tt.expect)
      +		}
      +		if err != nil {
      +			t.Errorf("Test case %d produced error: %v", i, err)
      +		}
      +	}
      +}
      +
      +func TestProcCmdlineAndFetchConfig(t *testing.T) {
      +
      +	var (
      +		ProcCmdlineTmpl    = "foo=bar cloud-config-url=%s/config\n"
      +		CloudConfigContent = "#cloud-config\n"
      +	)
      +
      +	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
      +		if r.Method == "GET" && r.RequestURI == "/config" {
      +			fmt.Fprint(w, CloudConfigContent)
      +		}
      +	}))
      +	defer ts.Close()
      +
      +	file, err := ioutil.TempFile(os.TempDir(), "test_proc_cmdline")
      +	defer os.Remove(file.Name())
      +	if err != nil {
      +		t.Errorf("Test produced error: %v", err)
      +	}
      +	_, err = file.Write([]byte(fmt.Sprintf(ProcCmdlineTmpl, ts.URL)))
      +	if err != nil {
      +		t.Errorf("Test produced error: %v", err)
      +	}
      +
      +	p := NewDatasource()
      +	p.Location = file.Name()
      +	cfg, err := p.FetchUserdata()
      +	if err != nil {
      +		t.Errorf("Test produced error: %v", err)
      +	}
      +
      +	if string(cfg) != CloudConfigContent {
      +		t.Errorf("Test failed, response body: %s != %s", cfg, CloudConfigContent)
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/test/filesystem.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/test/filesystem.go
      new file mode 100644
      index 00000000..95d6fb27
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/test/filesystem.go
      @@ -0,0 +1,57 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package test
      +
      +import (
      +	"fmt"
      +	"os"
      +	"path"
      +)
      +
      +type MockFilesystem map[string]File
      +
      +type File struct {
      +	Path      string
      +	Contents  string
      +	Directory bool
      +}
      +
      +func (m MockFilesystem) ReadFile(filename string) ([]byte, error) {
      +	if f, ok := m[path.Clean(filename)]; ok {
      +		if f.Directory {
      +			return nil, fmt.Errorf("read %s: is a directory", filename)
      +		}
      +		return []byte(f.Contents), nil
      +	}
      +	return nil, os.ErrNotExist
      +}
      +
      +func NewMockFilesystem(files ...File) MockFilesystem {
      +	fs := MockFilesystem{}
      +	for _, file := range files {
      +		fs[file.Path] = file
      +
      +		// Create the directories leading up to the file
      +		p := path.Dir(file.Path)
      +		for p != "/" && p != "." {
      +			if f, ok := fs[p]; ok && !f.Directory {
      +				panic(fmt.Sprintf("%q already exists and is not a directory (%#v)", p, f))
      +			}
      +			fs[p] = File{Path: p, Directory: true}
      +			p = path.Dir(p)
      +		}
      +	}
      +	return fs
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/test/filesystem_test.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/test/filesystem_test.go
      new file mode 100644
      index 00000000..547c51f1
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/test/filesystem_test.go
      @@ -0,0 +1,115 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package test
      +
      +import (
      +	"errors"
      +	"os"
      +	"reflect"
      +	"testing"
      +)
      +
      +func TestReadFile(t *testing.T) {
      +	tests := []struct {
      +		filesystem MockFilesystem
      +
      +		filename string
      +		contents string
      +		err      error
      +	}{
      +		{
      +			filename: "dne",
      +			err:      os.ErrNotExist,
      +		},
      +		{
      +			filesystem: MockFilesystem{
      +				"exists": File{Contents: "hi"},
      +			},
      +			filename: "exists",
      +			contents: "hi",
      +		},
      +		{
      +			filesystem: MockFilesystem{
      +				"dir": File{Directory: true},
      +			},
      +			filename: "dir",
      +			err:      errors.New("read dir: is a directory"),
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		contents, err := tt.filesystem.ReadFile(tt.filename)
      +		if tt.contents != string(contents) {
      +			t.Errorf("bad contents (test %d): want %q, got %q", i, tt.contents, string(contents))
      +		}
      +		if !reflect.DeepEqual(tt.err, err) {
      +			t.Errorf("bad error (test %d): want %v, got %v", i, tt.err, err)
      +		}
      +	}
      +}
      +
      +func TestNewMockFilesystem(t *testing.T) {
      +	tests := []struct {
      +		files []File
      +
      +		filesystem MockFilesystem
      +	}{
      +		{
      +			filesystem: MockFilesystem{},
      +		},
      +		{
      +			files: []File{File{Path: "file"}},
      +			filesystem: MockFilesystem{
      +				"file": File{Path: "file"},
      +			},
      +		},
      +		{
      +			files: []File{File{Path: "/file"}},
      +			filesystem: MockFilesystem{
      +				"/file": File{Path: "/file"},
      +			},
      +		},
      +		{
      +			files: []File{File{Path: "/dir/file"}},
      +			filesystem: MockFilesystem{
      +				"/dir":      File{Path: "/dir", Directory: true},
      +				"/dir/file": File{Path: "/dir/file"},
      +			},
      +		},
      +		{
      +			files: []File{File{Path: "/dir/dir/file"}},
      +			filesystem: MockFilesystem{
      +				"/dir":          File{Path: "/dir", Directory: true},
      +				"/dir/dir":      File{Path: "/dir/dir", Directory: true},
      +				"/dir/dir/file": File{Path: "/dir/dir/file"},
      +			},
      +		},
      +		{
      +			files: []File{File{Path: "/dir/dir/dir", Directory: true}},
      +			filesystem: MockFilesystem{
      +				"/dir":         File{Path: "/dir", Directory: true},
      +				"/dir/dir":     File{Path: "/dir/dir", Directory: true},
      +				"/dir/dir/dir": File{Path: "/dir/dir/dir", Directory: true},
      +			},
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		filesystem := NewMockFilesystem(tt.files...)
      +		if !reflect.DeepEqual(tt.filesystem, filesystem) {
      +			t.Errorf("bad filesystem (test %d): want %#v, got %#v", i, tt.filesystem, filesystem)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/url/url.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/url/url.go
      new file mode 100644
      index 00000000..8a9c5416
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/url/url.go
      @@ -0,0 +1,55 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package url
      +
      +import (
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +	"github.com/coreos/coreos-cloudinit/pkg"
      +)
      +
      +type remoteFile struct {
      +	url string
      +}
      +
      +func NewDatasource(url string) *remoteFile {
      +	return &remoteFile{url}
      +}
      +
      +func (f *remoteFile) IsAvailable() bool {
      +	client := pkg.NewHttpClient()
      +	_, err := client.Get(f.url)
      +	return (err == nil)
      +}
      +
      +func (f *remoteFile) AvailabilityChanges() bool {
      +	return true
      +}
      +
      +func (f *remoteFile) ConfigRoot() string {
      +	return ""
      +}
      +
      +func (f *remoteFile) FetchMetadata() (datasource.Metadata, error) {
      +	return datasource.Metadata{}, nil
      +}
      +
      +func (f *remoteFile) FetchUserdata() ([]byte, error) {
      +	client := pkg.NewHttpClient()
      +	return client.GetRetry(f.url)
      +}
      +
      +func (f *remoteFile) Type() string {
      +	return "url"
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/vmware/vmware.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/vmware/vmware.go
      new file mode 100644
      index 00000000..815dbc58
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/vmware/vmware.go
      @@ -0,0 +1,235 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package vmware
      +
      +import (
      +	"fmt"
      +	"io/ioutil"
      +	"log"
      +	"net"
      +	"os"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +	"github.com/coreos/coreos-cloudinit/pkg"
      +
      +	"github.com/sigma/vmw-guestinfo/rpcvmx"
      +	"github.com/sigma/vmw-guestinfo/vmcheck"
      +	"github.com/sigma/vmw-ovflib"
      +)
      +
      +type readConfigFunction func(key string) (string, error)
      +type urlDownloadFunction func(url string) ([]byte, error)
      +
      +type vmware struct {
      +	ovfFileName string
      +	readConfig  readConfigFunction
      +	urlDownload urlDownloadFunction
      +}
      +
      +type ovfWrapper struct {
      +	env *ovf.OvfEnvironment
      +}
      +
      +func (ovf ovfWrapper) readConfig(key string) (string, error) {
      +	return ovf.env.Properties["guestinfo."+key], nil
      +}
      +
      +func NewDatasource(fileName string) *vmware {
      +	getOvfReadConfig := func(ovfEnv []byte) readConfigFunction {
      +		env := &ovf.OvfEnvironment{}
      +		if len(ovfEnv) != 0 {
      +			env = ovf.ReadEnvironment(ovfEnv)
      +		}
      +
      +		wrapper := ovfWrapper{env}
      +		return wrapper.readConfig
      +	}
      +
      +	// read from provided ovf environment document (typically /media/ovfenv/ovf-env.xml)
      +	if fileName != "" {
      +		log.Printf("Using OVF environment from %s\n", fileName)
      +		ovfEnv, err := ioutil.ReadFile(fileName)
      +		if err != nil {
      +			ovfEnv = make([]byte, 0)
      +		}
      +		return &vmware{
      +			ovfFileName: fileName,
      +			readConfig:  getOvfReadConfig(ovfEnv),
      +			urlDownload: urlDownload,
      +		}
      +	}
      +
      +	// try to read ovf environment from VMware tools
      +	data, err := readConfig("ovfenv")
      +	if err == nil && data != "" {
      +		log.Printf("Using OVF environment from guestinfo\n")
      +		return &vmware{
      +			readConfig:  getOvfReadConfig([]byte(data)),
      +			urlDownload: urlDownload,
      +		}
      +	}
      +
      +	// if everything fails, fallback to directly reading variables from the backdoor
      +	log.Printf("Using guestinfo variables\n")
      +	return &vmware{
      +		readConfig:  readConfig,
      +		urlDownload: urlDownload,
      +	}
      +}
      +
      +func (v vmware) IsAvailable() bool {
      +	if v.ovfFileName != "" {
      +		_, err := os.Stat(v.ovfFileName)
      +		return !os.IsNotExist(err)
      +	}
      +	return vmcheck.IsVirtualWorld()
      +}
      +
      +func (v vmware) AvailabilityChanges() bool {
      +	return false
      +}
      +
      +func (v vmware) ConfigRoot() string {
      +	return "/"
      +}
      +
      +func (v vmware) FetchMetadata() (metadata datasource.Metadata, err error) {
      +	metadata.Hostname, _ = v.readConfig("hostname")
      +
      +	netconf := map[string]string{}
      +	saveConfig := func(key string, args ...interface{}) string {
      +		key = fmt.Sprintf(key, args...)
      +		val, _ := v.readConfig(key)
      +		if val != "" {
      +			netconf[key] = val
      +		}
      +		return val
      +	}
      +
      +	for i := 0; ; i++ {
      +		if nameserver := saveConfig("dns.server.%d", i); nameserver == "" {
      +			break
      +		}
      +	}
      +
      +	found := true
      +	for i := 0; found; i++ {
      +		found = false
      +
      +		found = (saveConfig("interface.%d.name", i) != "") || found
      +		found = (saveConfig("interface.%d.mac", i) != "") || found
      +		found = (saveConfig("interface.%d.dhcp", i) != "") || found
      +
      +		role, _ := v.readConfig(fmt.Sprintf("interface.%d.role", i))
      +		for a := 0; ; a++ {
      +			address := saveConfig("interface.%d.ip.%d.address", i, a)
      +			if address == "" {
      +				break
      +			} else {
      +				found = true
      +			}
      +
      +			ip, _, err := net.ParseCIDR(address)
      +			if err != nil {
      +				return metadata, err
      +			}
      +
      +			switch role {
      +			case "public":
      +				if ip.To4() != nil {
      +					metadata.PublicIPv4 = ip
      +				} else {
      +					metadata.PublicIPv6 = ip
      +				}
      +			case "private":
      +				if ip.To4() != nil {
      +					metadata.PrivateIPv4 = ip
      +				} else {
      +					metadata.PrivateIPv6 = ip
      +				}
      +			case "":
      +			default:
      +				return metadata, fmt.Errorf("unrecognized role: %q", role)
      +			}
      +		}
      +
      +		for r := 0; ; r++ {
      +			gateway := saveConfig("interface.%d.route.%d.gateway", i, r)
      +			destination := saveConfig("interface.%d.route.%d.destination", i, r)
      +
      +			if gateway == "" && destination == "" {
      +				break
      +			} else {
      +				found = true
      +			}
      +		}
      +	}
      +	metadata.NetworkConfig = netconf
      +
      +	return
      +}
      +
      +func (v vmware) FetchUserdata() ([]byte, error) {
      +	encoding, err := v.readConfig("coreos.config.data.encoding")
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	data, err := v.readConfig("coreos.config.data")
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	// Try to fallback to url if no explicit data
      +	if data == "" {
      +		url, err := v.readConfig("coreos.config.url")
      +		if err != nil {
      +			return nil, err
      +		}
      +
      +		if url != "" {
      +			rawData, err := v.urlDownload(url)
      +			if err != nil {
      +				return nil, err
      +			}
      +			data = string(rawData)
      +		}
      +	}
      +
      +	if encoding != "" {
      +		return config.DecodeContent(data, encoding)
      +	}
      +	return []byte(data), nil
      +}
      +
      +func (v vmware) Type() string {
      +	return "vmware"
      +}
      +
      +func urlDownload(url string) ([]byte, error) {
      +	client := pkg.NewHttpClient()
      +	return client.GetRetry(url)
      +}
      +
      +func readConfig(key string) (string, error) {
      +	data, err := rpcvmx.NewConfig().String(key, "")
      +	if err == nil {
      +		log.Printf("Read from %q: %q\n", key, data)
      +	} else {
      +		log.Printf("Failed to read from %q: %v\n", key, err)
      +	}
      +	return data, err
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/vmware/vmware_test.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/vmware/vmware_test.go
      new file mode 100644
      index 00000000..9ab645a7
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/vmware/vmware_test.go
      @@ -0,0 +1,298 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package vmware
      +
      +import (
      +	"errors"
      +	"io/ioutil"
      +	"net"
      +	"os"
      +	"reflect"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +)
      +
      +type MockHypervisor map[string]string
      +
      +func (h MockHypervisor) ReadConfig(key string) (string, error) {
      +	return h[key], nil
      +}
      +
      +var fakeDownloader urlDownloadFunction = func(url string) ([]byte, error) {
      +	mapping := map[string]struct {
      +		data []byte
      +		err  error
      +	}{
      +		"http://good.example.com": {[]byte("test config"), nil},
      +		"http://bad.example.com":  {nil, errors.New("Not found")},
      +	}
      +	val := mapping[url]
      +	return val.data, val.err
      +}
      +
      +func TestFetchMetadata(t *testing.T) {
      +	tests := []struct {
      +		variables MockHypervisor
      +
      +		metadata datasource.Metadata
      +		err      error
      +	}{
      +		{
      +			variables: map[string]string{
      +				"interface.0.mac":  "test mac",
      +				"interface.0.dhcp": "yes",
      +			},
      +			metadata: datasource.Metadata{
      +				NetworkConfig: map[string]string{
      +					"interface.0.mac":  "test mac",
      +					"interface.0.dhcp": "yes",
      +				},
      +			},
      +		},
      +		{
      +			variables: map[string]string{
      +				"interface.0.name": "test name",
      +				"interface.0.dhcp": "yes",
      +			},
      +			metadata: datasource.Metadata{
      +				NetworkConfig: map[string]string{
      +					"interface.0.name": "test name",
      +					"interface.0.dhcp": "yes",
      +				},
      +			},
      +		},
      +		{
      +			variables: map[string]string{
      +				"hostname":                        "test host",
      +				"interface.0.mac":                 "test mac",
      +				"interface.0.role":                "private",
      +				"interface.0.ip.0.address":        "fe00::100/64",
      +				"interface.0.route.0.gateway":     "fe00::1",
      +				"interface.0.route.0.destination": "::",
      +			},
      +			metadata: datasource.Metadata{
      +				Hostname:    "test host",
      +				PrivateIPv6: net.ParseIP("fe00::100"),
      +				NetworkConfig: map[string]string{
      +					"interface.0.mac":                 "test mac",
      +					"interface.0.ip.0.address":        "fe00::100/64",
      +					"interface.0.route.0.gateway":     "fe00::1",
      +					"interface.0.route.0.destination": "::",
      +				},
      +			},
      +		},
      +		{
      +			variables: map[string]string{
      +				"hostname":                        "test host",
      +				"interface.0.name":                "test name",
      +				"interface.0.role":                "public",
      +				"interface.0.ip.0.address":        "10.0.0.100/24",
      +				"interface.0.ip.1.address":        "10.0.0.101/24",
      +				"interface.0.route.0.gateway":     "10.0.0.1",
      +				"interface.0.route.0.destination": "0.0.0.0",
      +				"interface.1.mac":                 "test mac",
      +				"interface.1.role":                "private",
      +				"interface.1.route.0.gateway":     "10.0.0.2",
      +				"interface.1.route.0.destination": "0.0.0.0",
      +				"interface.1.ip.0.address":        "10.0.0.102/24",
      +			},
      +			metadata: datasource.Metadata{
      +				Hostname:    "test host",
      +				PublicIPv4:  net.ParseIP("10.0.0.101"),
      +				PrivateIPv4: net.ParseIP("10.0.0.102"),
      +				NetworkConfig: map[string]string{
      +					"interface.0.name":                "test name",
      +					"interface.0.ip.0.address":        "10.0.0.100/24",
      +					"interface.0.ip.1.address":        "10.0.0.101/24",
      +					"interface.0.route.0.gateway":     "10.0.0.1",
      +					"interface.0.route.0.destination": "0.0.0.0",
      +					"interface.1.mac":                 "test mac",
      +					"interface.1.route.0.gateway":     "10.0.0.2",
      +					"interface.1.route.0.destination": "0.0.0.0",
      +					"interface.1.ip.0.address":        "10.0.0.102/24",
      +				},
      +			},
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		v := vmware{readConfig: tt.variables.ReadConfig}
      +		metadata, err := v.FetchMetadata()
      +		if !reflect.DeepEqual(tt.err, err) {
      +			t.Errorf("bad error (#%d): want %v, got %v", i, tt.err, err)
      +		}
      +		if !reflect.DeepEqual(tt.metadata, metadata) {
      +			t.Errorf("bad metadata (#%d): want %#v, got %#v", i, tt.metadata, metadata)
      +		}
      +	}
      +}
      +
      +func TestFetchUserdata(t *testing.T) {
      +	tests := []struct {
      +		variables MockHypervisor
      +
      +		userdata string
      +		err      error
      +	}{
      +		{},
      +		{
      +			variables: map[string]string{"coreos.config.data": "test config"},
      +			userdata:  "test config",
      +		},
      +		{
      +			variables: map[string]string{
      +				"coreos.config.data.encoding": "",
      +				"coreos.config.data":          "test config",
      +			},
      +			userdata: "test config",
      +		},
      +		{
      +			variables: map[string]string{
      +				"coreos.config.data.encoding": "base64",
      +				"coreos.config.data":          "dGVzdCBjb25maWc=",
      +			},
      +			userdata: "test config",
      +		},
      +		{
      +			variables: map[string]string{
      +				"coreos.config.data.encoding": "gzip+base64",
      +				"coreos.config.data":          "H4sIABaoWlUAAytJLS5RSM7PS8tMBwCQiHNZCwAAAA==",
      +			},
      +			userdata: "test config",
      +		},
      +		{
      +			variables: map[string]string{
      +				"coreos.config.data.encoding": "test encoding",
      +			},
      +			err: errors.New(`Unsupported encoding "test encoding"`),
      +		},
      +		{
      +			variables: map[string]string{
      +				"coreos.config.url": "http://good.example.com",
      +			},
      +			userdata: "test config",
      +		},
      +		{
      +			variables: map[string]string{
      +				"coreos.config.url": "http://bad.example.com",
      +			},
      +			err: errors.New("Not found"),
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		v := vmware{
      +			readConfig:  tt.variables.ReadConfig,
      +			urlDownload: fakeDownloader,
      +		}
      +		userdata, err := v.FetchUserdata()
      +		if !reflect.DeepEqual(tt.err, err) {
      +			t.Errorf("bad error (#%d): want %v, got %v", i, tt.err, err)
      +		}
      +		if tt.userdata != string(userdata) {
      +			t.Errorf("bad userdata (#%d): want %q, got %q", i, tt.userdata, userdata)
      +		}
      +	}
      +}
      +
      +func TestFetchUserdataError(t *testing.T) {
      +	testErr := errors.New("test error")
      +	_, err := vmware{readConfig: func(_ string) (string, error) { return "", testErr }}.FetchUserdata()
      +
      +	if testErr != err {
      +		t.Errorf("bad error: want %v, got %v", testErr, err)
      +	}
      +}
      +
      +func TestOvfTransport(t *testing.T) {
      +	tests := []struct {
      +		document string
      +
      +		metadata datasource.Metadata
      +		userdata []byte
      +	}{
      +		{
      +			document: `<?xml version="1.0" encoding="UTF-8"?>
      +<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
      +     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
      +     xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
      +     oe:id="CoreOS-vmw">
      +   <PlatformSection>
      +      <Kind>VMware ESXi</Kind>
      +      <Version>5.5.0</Version>
      +      <Vendor>VMware, Inc.</Vendor>
      +      <Locale>en</Locale>
      +   </PlatformSection>
      +   <PropertySection>
      +      <Property oe:key="foo" oe:value="42"/>
      +      <Property oe:key="guestinfo.coreos.config.url" oe:value="http://good.example.com"/>
      +      <Property oe:key="guestinfo.hostname" oe:value="test host"/>
      +      <Property oe:key="guestinfo.interface.0.name" oe:value="test name"/>
      +      <Property oe:key="guestinfo.interface.0.role" oe:value="public"/>
      +      <Property oe:key="guestinfo.interface.0.ip.0.address" oe:value="10.0.0.100/24"/>
      +      <Property oe:key="guestinfo.interface.0.ip.1.address" oe:value="10.0.0.101/24"/>
      +      <Property oe:key="guestinfo.interface.0.route.0.gateway" oe:value="10.0.0.1"/>
      +      <Property oe:key="guestinfo.interface.0.route.0.destination" oe:value="0.0.0.0"/>
      +      <Property oe:key="guestinfo.interface.1.mac" oe:value="test mac"/>
      +      <Property oe:key="guestinfo.interface.1.role" oe:value="private"/>
      +      <Property oe:key="guestinfo.interface.1.route.0.gateway" oe:value="10.0.0.2"/>
      +      <Property oe:key="guestinfo.interface.1.route.0.destination" oe:value="0.0.0.0"/>
      +      <Property oe:key="guestinfo.interface.1.ip.0.address" oe:value="10.0.0.102/24"/>
      +   </PropertySection>
      +</Environment>`,
      +			metadata: datasource.Metadata{
      +				Hostname:    "test host",
      +				PublicIPv4:  net.ParseIP("10.0.0.101"),
      +				PrivateIPv4: net.ParseIP("10.0.0.102"),
      +				NetworkConfig: map[string]string{
      +					"interface.0.name":                "test name",
      +					"interface.0.ip.0.address":        "10.0.0.100/24",
      +					"interface.0.ip.1.address":        "10.0.0.101/24",
      +					"interface.0.route.0.gateway":     "10.0.0.1",
      +					"interface.0.route.0.destination": "0.0.0.0",
      +					"interface.1.mac":                 "test mac",
      +					"interface.1.route.0.gateway":     "10.0.0.2",
      +					"interface.1.route.0.destination": "0.0.0.0",
      +					"interface.1.ip.0.address":        "10.0.0.102/24",
      +				},
      +			},
      +			userdata: []byte("test config"),
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		file, err := ioutil.TempFile(os.TempDir(), "ovf")
      +		if err != nil {
      +			t.Errorf("error creating ovf file (#%d)", i)
      +		}
      +		defer os.Remove(file.Name())
      +
      +		file.WriteString(tt.document)
      +		v := NewDatasource(file.Name())
      +		v.urlDownload = fakeDownloader
      +
      +		metadata, err := v.FetchMetadata()
      +		userdata, err := v.FetchUserdata()
      +
      +		if !reflect.DeepEqual(tt.metadata, metadata) {
      +			t.Errorf("bad metadata (#%d): want %#v, got %#v", i, tt.metadata, metadata)
      +		}
      +		if !reflect.DeepEqual(tt.userdata, userdata) {
      +			t.Errorf("bad userdata (#%d): want %#v, got %#v", i, tt.userdata, userdata)
      +		}
      +	}
      +
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/waagent/waagent.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/waagent/waagent.go
      new file mode 100644
      index 00000000..c70a60f4
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/waagent/waagent.go
      @@ -0,0 +1,117 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package waagent
      +
      +import (
      +	"encoding/xml"
      +	"io/ioutil"
      +	"log"
      +	"net"
      +	"os"
      +	"path"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +)
      +
      +type waagent struct {
      +	root     string
      +	readFile func(filename string) ([]byte, error)
      +}
      +
      +func NewDatasource(root string) *waagent {
      +	return &waagent{root, ioutil.ReadFile}
      +}
      +
      +func (a *waagent) IsAvailable() bool {
      +	_, err := os.Stat(path.Join(a.root, "provisioned"))
      +	return !os.IsNotExist(err)
      +}
      +
      +func (a *waagent) AvailabilityChanges() bool {
      +	return true
      +}
      +
      +func (a *waagent) ConfigRoot() string {
      +	return a.root
      +}
      +
      +func (a *waagent) FetchMetadata() (metadata datasource.Metadata, err error) {
      +	var metadataBytes []byte
      +	if metadataBytes, err = a.tryReadFile(path.Join(a.root, "SharedConfig.xml")); err != nil {
      +		return
      +	}
      +	if len(metadataBytes) == 0 {
      +		return
      +	}
      +
      +	type Instance struct {
      +		Id             string `xml:"id,attr"`
      +		Address        string `xml:"address,attr"`
      +		InputEndpoints struct {
      +			Endpoints []struct {
      +				LoadBalancedPublicAddress string `xml:"loadBalancedPublicAddress,attr"`
      +			} `xml:"Endpoint"`
      +		}
      +	}
      +
      +	type SharedConfig struct {
      +		Incarnation struct {
      +			Instance string `xml:"instance,attr"`
      +		}
      +		Instances struct {
      +			Instances []Instance `xml:"Instance"`
      +		}
      +	}
      +
      +	var m SharedConfig
      +	if err = xml.Unmarshal(metadataBytes, &m); err != nil {
      +		return
      +	}
      +
      +	var instance Instance
      +	for _, i := range m.Instances.Instances {
      +		if i.Id == m.Incarnation.Instance {
      +			instance = i
      +			break
      +		}
      +	}
      +
      +	metadata.PrivateIPv4 = net.ParseIP(instance.Address)
      +	for _, e := range instance.InputEndpoints.Endpoints {
      +		host, _, err := net.SplitHostPort(e.LoadBalancedPublicAddress)
      +		if err == nil {
      +			metadata.PublicIPv4 = net.ParseIP(host)
      +			break
      +		}
      +	}
      +	return
      +}
      +
      +func (a *waagent) FetchUserdata() ([]byte, error) {
      +	return a.tryReadFile(path.Join(a.root, "CustomData"))
      +}
      +
      +func (a *waagent) Type() string {
      +	return "waagent"
      +}
      +
      +func (a *waagent) tryReadFile(filename string) ([]byte, error) {
      +	log.Printf("Attempting to read from %q\n", filename)
      +	data, err := a.readFile(filename)
      +	if os.IsNotExist(err) {
      +		err = nil
      +	}
      +	return data, err
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/datasource/waagent/waagent_test.go b/vendor/github.com/coreos/coreos-cloudinit/datasource/waagent/waagent_test.go
      new file mode 100644
      index 00000000..f60a4c2b
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/datasource/waagent/waagent_test.go
      @@ -0,0 +1,166 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package waagent
      +
      +import (
      +	"net"
      +	"reflect"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +	"github.com/coreos/coreos-cloudinit/datasource/test"
      +)
      +
      +func TestFetchMetadata(t *testing.T) {
      +	for _, tt := range []struct {
      +		root     string
      +		files    test.MockFilesystem
      +		metadata datasource.Metadata
      +	}{
      +		{
      +			root:  "/",
      +			files: test.NewMockFilesystem(),
      +		},
      +		{
      +			root:  "/",
      +			files: test.NewMockFilesystem(test.File{Path: "/SharedConfig.xml", Contents: ""}),
      +		},
      +		{
      +			root:  "/var/lib/waagent",
      +			files: test.NewMockFilesystem(test.File{Path: "/var/lib/waagent/SharedConfig.xml", Contents: ""}),
      +		},
      +		{
      +			root: "/var/lib/waagent",
      +			files: test.NewMockFilesystem(test.File{Path: "/var/lib/waagent/SharedConfig.xml", Contents: `<?xml version="1.0" encoding="utf-8"?>
      +<SharedConfig version="1.0.0.0" goalStateIncarnation="1">
      +  <Deployment name="c8f9e4c9c18948e1bebf57c5685da756" guid="{1d10394f-c741-4a1a-a6bb-278f213c5a5e}" incarnation="0" isNonCancellableTopologyChangeEnabled="false">
      +    <Service name="core-test-1" guid="{00000000-0000-0000-0000-000000000000}" />
      +    <ServiceInstance name="c8f9e4c9c18948e1bebf57c5685da756.0" guid="{1e202e9a-8ffe-4915-b6ef-4118c9628fda}" />
      +  </Deployment>
      +  <Incarnation number="1" instance="core-test-1" guid="{8767eb4b-b445-4783-b1f5-6c0beaf41ea0}" />
      +  <Role guid="{53ecc81e-257f-fbc9-a53a-8cf1a0a122b4}" name="core-test-1" settleTimeSeconds="0" />
      +  <LoadBalancerSettings timeoutSeconds="0" waitLoadBalancerProbeCount="8">
      +    <Probes>
      +      <Probe name="D41D8CD98F00B204E9800998ECF8427E" />
      +      <Probe name="C9DEC1518E1158748FA4B6081A8266DD" />
      +    </Probes>
      +  </LoadBalancerSettings>
      +  <OutputEndpoints>
      +    <Endpoint name="core-test-1:openInternalEndpoint" type="SFS">
      +      <Target instance="core-test-1" endpoint="openInternalEndpoint" />
      +    </Endpoint>
      +  </OutputEndpoints>
      +  <Instances>
      +    <Instance id="core-test-1" address="100.73.202.64">
      +      <FaultDomains randomId="0" updateId="0" updateCount="0" />
      +      <InputEndpoints>
      +        <Endpoint name="openInternalEndpoint" address="100.73.202.64" protocol="any" isPublic="false" enableDirectServerReturn="false" isDirectAddress="false" disableStealthMode="false">
      +          <LocalPorts>
      +            <LocalPortSelfManaged />
      +          </LocalPorts>
      +        </Endpoint>
      +        <Endpoint name="ssh" address="100.73.202.64:22" protocol="tcp" hostName="core-test-1ContractContract" isPublic="true" loadBalancedPublicAddress="191.239.39.77:22" enableDirectServerReturn="false" isDirectAddress="false" disableStealthMode="false">
      +          <LocalPorts>
      +            <LocalPortRange from="22" to="22" />
      +          </LocalPorts>
      +        </Endpoint>
      +      </InputEndpoints>
      +    </Instance>
      +  </Instances>
      +</SharedConfig>`}),
      +			metadata: datasource.Metadata{
      +				PrivateIPv4: net.ParseIP("100.73.202.64"),
      +				PublicIPv4:  net.ParseIP("191.239.39.77"),
      +			},
      +		},
      +	} {
      +		a := waagent{tt.root, tt.files.ReadFile}
      +		metadata, err := a.FetchMetadata()
      +		if err != nil {
      +			t.Fatalf("bad error for %+v: want %v, got %q", tt, nil, err)
      +		}
      +		if !reflect.DeepEqual(tt.metadata, metadata) {
      +			t.Fatalf("bad metadata for %+v: want %#v, got %#v", tt, tt.metadata, metadata)
      +		}
      +	}
      +}
      +
      +func TestFetchUserdata(t *testing.T) {
      +	for _, tt := range []struct {
      +		root  string
      +		files test.MockFilesystem
      +	}{
      +		{
      +			"/",
      +			test.NewMockFilesystem(),
      +		},
      +		{
      +			"/",
      +			test.NewMockFilesystem(test.File{Path: "/CustomData", Contents: ""}),
      +		},
      +		{
      +			"/var/lib/waagent/",
      +			test.NewMockFilesystem(test.File{Path: "/var/lib/waagent/CustomData", Contents: ""}),
      +		},
      +	} {
      +		a := waagent{tt.root, tt.files.ReadFile}
      +		_, err := a.FetchUserdata()
      +		if err != nil {
      +			t.Fatalf("bad error for %+v: want %v, got %q", tt, nil, err)
      +		}
      +	}
      +}
      +
      +func TestConfigRoot(t *testing.T) {
      +	for _, tt := range []struct {
      +		root       string
      +		configRoot string
      +	}{
      +		{
      +			"/",
      +			"/",
      +		},
      +		{
      +			"/var/lib/waagent",
      +			"/var/lib/waagent",
      +		},
      +	} {
      +		a := waagent{tt.root, nil}
      +		if configRoot := a.ConfigRoot(); configRoot != tt.configRoot {
      +			t.Fatalf("bad config root for %q: want %q, got %q", tt, tt.configRoot, configRoot)
      +		}
      +	}
      +}
      +
      +func TestNewDatasource(t *testing.T) {
      +	for _, tt := range []struct {
      +		root       string
      +		expectRoot string
      +	}{
      +		{
      +			root:       "",
      +			expectRoot: "",
      +		},
      +		{
      +			root:       "/var/lib/waagent",
      +			expectRoot: "/var/lib/waagent",
      +		},
      +	} {
      +		service := NewDatasource(tt.root)
      +		if service.root != tt.expectRoot {
      +			t.Fatalf("bad root (%q): want %q, got %q", tt.root, tt.expectRoot, service.root)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/initialize/config.go b/vendor/github.com/coreos/coreos-cloudinit/initialize/config.go
      new file mode 100644
      index 00000000..94a47fa3
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/initialize/config.go
      @@ -0,0 +1,294 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package initialize
      +
      +import (
      +	"errors"
      +	"fmt"
      +	"log"
      +	"path"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +	"github.com/coreos/coreos-cloudinit/network"
      +	"github.com/coreos/coreos-cloudinit/system"
      +)
      +
      +// CloudConfigFile represents a CoreOS specific configuration option that can generate
      +// an associated system.File to be written to disk
      +type CloudConfigFile interface {
      +	// File should either return (*system.File, error), or (nil, nil) if nothing
      +	// needs to be done for this configuration option.
      +	File() (*system.File, error)
      +}
      +
      +// CloudConfigUnit represents a CoreOS specific configuration option that can generate
      +// associated system.Units to be created/enabled appropriately
      +type CloudConfigUnit interface {
      +	Units() []system.Unit
      +}
      +
      +// Apply renders a CloudConfig to an Environment. This can involve things like
      +// configuring the hostname, adding new users, writing various configuration
      +// files to disk, and manipulating systemd services.
      +func Apply(cfg config.CloudConfig, ifaces []network.InterfaceGenerator, env *Environment) error {
      +	if cfg.Hostname != "" {
      +		if err := system.SetHostname(cfg.Hostname); err != nil {
      +			return err
      +		}
      +		log.Printf("Set hostname to %s", cfg.Hostname)
      +	}
      +
      +	for _, user := range cfg.Users {
      +		if user.Name == "" {
      +			log.Printf("User object has no 'name' field, skipping")
      +			continue
      +		}
      +
      +		if system.UserExists(&user) {
      +			log.Printf("User '%s' exists, ignoring creation-time fields", user.Name)
      +			if user.PasswordHash != "" {
      +				log.Printf("Setting '%s' user's password", user.Name)
      +				if err := system.SetUserPassword(user.Name, user.PasswordHash); err != nil {
      +					log.Printf("Failed setting '%s' user's password: %v", user.Name, err)
      +					return err
      +				}
      +			}
      +		} else {
      +			log.Printf("Creating user '%s'", user.Name)
      +			if err := system.CreateUser(&user); err != nil {
      +				log.Printf("Failed creating user '%s': %v", user.Name, err)
      +				return err
      +			}
      +		}
      +
      +		if len(user.SSHAuthorizedKeys) > 0 {
      +			log.Printf("Authorizing %d SSH keys for user '%s'", len(user.SSHAuthorizedKeys), user.Name)
      +			if err := system.AuthorizeSSHKeys(user.Name, env.SSHKeyName(), user.SSHAuthorizedKeys); err != nil {
      +				return err
      +			}
      +		}
      +		if user.SSHImportGithubUser != "" {
      +			log.Printf("Authorizing github user %s SSH keys for CoreOS user '%s'", user.SSHImportGithubUser, user.Name)
      +			if err := SSHImportGithubUser(user.Name, user.SSHImportGithubUser); err != nil {
      +				return err
      +			}
      +		}
      +		for _, u := range user.SSHImportGithubUsers {
      +			log.Printf("Authorizing github user %s SSH keys for CoreOS user '%s'", u, user.Name)
      +			if err := SSHImportGithubUser(user.Name, u); err != nil {
      +				return err
      +			}
      +		}
      +		if user.SSHImportURL != "" {
      +			log.Printf("Authorizing SSH keys for CoreOS user '%s' from '%s'", user.Name, user.SSHImportURL)
      +			if err := SSHImportKeysFromURL(user.Name, user.SSHImportURL); err != nil {
      +				return err
      +			}
      +		}
      +	}
      +
      +	if len(cfg.SSHAuthorizedKeys) > 0 {
      +		err := system.AuthorizeSSHKeys("core", env.SSHKeyName(), cfg.SSHAuthorizedKeys)
      +		if err == nil {
      +			log.Printf("Authorized SSH keys for core user")
      +		} else {
      +			return err
      +		}
      +	}
      +
      +	var writeFiles []system.File
      +	for _, file := range cfg.WriteFiles {
      +		writeFiles = append(writeFiles, system.File{File: file})
      +	}
      +
      +	for _, ccf := range []CloudConfigFile{
      +		system.OEM{OEM: cfg.CoreOS.OEM},
      +		system.Update{Update: cfg.CoreOS.Update, ReadConfig: system.DefaultReadConfig},
      +		system.EtcHosts{EtcHosts: cfg.ManageEtcHosts},
      +		system.Flannel{Flannel: cfg.CoreOS.Flannel},
      +	} {
      +		f, err := ccf.File()
      +		if err != nil {
      +			return err
      +		}
      +		if f != nil {
      +			writeFiles = append(writeFiles, *f)
      +		}
      +	}
      +
      +	var units []system.Unit
      +	for _, u := range cfg.CoreOS.Units {
      +		units = append(units, system.Unit{Unit: u})
      +	}
      +
      +	for _, ccu := range []CloudConfigUnit{
      +		system.Etcd{Etcd: cfg.CoreOS.Etcd},
      +		system.Etcd2{Etcd2: cfg.CoreOS.Etcd2},
      +		system.Fleet{Fleet: cfg.CoreOS.Fleet},
      +		system.Locksmith{Locksmith: cfg.CoreOS.Locksmith},
      +		system.Update{Update: cfg.CoreOS.Update, ReadConfig: system.DefaultReadConfig},
      +	} {
      +		units = append(units, ccu.Units()...)
      +	}
      +
      +	wroteEnvironment := false
      +	for _, file := range writeFiles {
      +		fullPath, err := system.WriteFile(&file, env.Root())
      +		if err != nil {
      +			return err
      +		}
      +		if path.Clean(file.Path) == "/etc/environment" {
      +			wroteEnvironment = true
      +		}
      +		log.Printf("Wrote file %s to filesystem", fullPath)
      +	}
      +
      +	if !wroteEnvironment {
      +		ef := env.DefaultEnvironmentFile()
      +		if ef != nil {
      +			err := system.WriteEnvFile(ef, env.Root())
      +			if err != nil {
      +				return err
      +			}
      +			log.Printf("Updated /etc/environment")
      +		}
      +	}
      +
      +	if len(ifaces) > 0 {
      +		units = append(units, createNetworkingUnits(ifaces)...)
      +		if err := system.RestartNetwork(ifaces); err != nil {
      +			return err
      +		}
      +	}
      +
      +	um := system.NewUnitManager(env.Root())
      +	return processUnits(units, env.Root(), um)
      +}
      +
      +func createNetworkingUnits(interfaces []network.InterfaceGenerator) (units []system.Unit) {
      +	appendNewUnit := func(units []system.Unit, name, content string) []system.Unit {
      +		if content == "" {
      +			return units
      +		}
      +		return append(units, system.Unit{Unit: config.Unit{
      +			Name:    name,
      +			Runtime: true,
      +			Content: content,
      +		}})
      +	}
      +	for _, i := range interfaces {
      +		units = appendNewUnit(units, fmt.Sprintf("%s.netdev", i.Filename()), i.Netdev())
      +		units = appendNewUnit(units, fmt.Sprintf("%s.link", i.Filename()), i.Link())
      +		units = appendNewUnit(units, fmt.Sprintf("%s.network", i.Filename()), i.Network())
      +	}
      +	return units
      +}
      +
      +// processUnits takes a set of Units and applies them to the given root using
      +// the given UnitManager. This can involve things like writing unit files to
      +// disk, masking/unmasking units, or invoking systemd
      +// commands against units. It returns any error encountered.
      +func processUnits(units []system.Unit, root string, um system.UnitManager) error {
      +	type action struct {
      +		unit    system.Unit
      +		command string
      +	}
      +	actions := make([]action, 0, len(units))
      +	reload := false
      +	restartNetworkd := false
      +	for _, unit := range units {
      +		if unit.Name == "" {
      +			log.Printf("Skipping unit without name")
      +			continue
      +		}
      +
      +		if unit.Content != "" {
      +			log.Printf("Writing unit %q to filesystem", unit.Name)
      +			if err := um.PlaceUnit(unit); err != nil {
      +				return err
      +			}
      +			log.Printf("Wrote unit %q", unit.Name)
      +			reload = true
      +		}
      +
      +		for _, dropin := range unit.DropIns {
      +			if dropin.Name != "" && dropin.Content != "" {
      +				log.Printf("Writing drop-in unit %q to filesystem", dropin.Name)
      +				if err := um.PlaceUnitDropIn(unit, dropin); err != nil {
      +					return err
      +				}
      +				log.Printf("Wrote drop-in unit %q", dropin.Name)
      +				reload = true
      +			}
      +		}
      +
      +		if unit.Mask {
      +			log.Printf("Masking unit file %q", unit.Name)
      +			if err := um.MaskUnit(unit); err != nil {
      +				return err
      +			}
      +		} else if unit.Runtime {
      +			log.Printf("Ensuring runtime unit file %q is unmasked", unit.Name)
      +			if err := um.UnmaskUnit(unit); err != nil {
      +				return err
      +			}
      +		}
      +
      +		if unit.Enable {
      +			if unit.Group() != "network" {
      +				log.Printf("Enabling unit file %q", unit.Name)
      +				if err := um.EnableUnitFile(unit); err != nil {
      +					return err
      +				}
      +				log.Printf("Enabled unit %q", unit.Name)
      +			} else {
      +				log.Printf("Skipping enable for network-like unit %q", unit.Name)
      +			}
      +		}
      +
      +		if unit.Group() == "network" {
      +			restartNetworkd = true
      +		} else if unit.Command != "" {
      +			actions = append(actions, action{unit, unit.Command})
      +		}
      +	}
      +
      +	if reload {
      +		if err := um.DaemonReload(); err != nil {
      +			return errors.New(fmt.Sprintf("failed systemd daemon-reload: %s", err))
      +		}
      +	}
      +
      +	if restartNetworkd {
      +		log.Printf("Restarting systemd-networkd")
      +		networkd := system.Unit{Unit: config.Unit{Name: "systemd-networkd.service"}}
      +		res, err := um.RunUnitCommand(networkd, "restart")
      +		if err != nil {
      +			return err
      +		}
      +		log.Printf("Restarted systemd-networkd (%s)", res)
      +	}
      +
      +	for _, action := range actions {
      +		log.Printf("Calling unit command %q on %q'", action.command, action.unit.Name)
      +		res, err := um.RunUnitCommand(action.unit, action.command)
      +		if err != nil {
      +			return err
      +		}
      +		log.Printf("Result of %q on %q: %s", action.command, action.unit.Name, res)
      +	}
      +
      +	return nil
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/initialize/config_test.go b/vendor/github.com/coreos/coreos-cloudinit/initialize/config_test.go
      new file mode 100644
      index 00000000..33be737e
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/initialize/config_test.go
      @@ -0,0 +1,299 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package initialize
      +
      +import (
      +	"reflect"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +	"github.com/coreos/coreos-cloudinit/network"
      +	"github.com/coreos/coreos-cloudinit/system"
      +)
      +
      +type TestUnitManager struct {
      +	placed   []string
      +	enabled  []string
      +	masked   []string
      +	unmasked []string
      +	commands []UnitAction
      +	reload   bool
      +}
      +
      +type UnitAction struct {
      +	unit    string
      +	command string
      +}
      +
      +func (tum *TestUnitManager) PlaceUnit(u system.Unit) error {
      +	tum.placed = append(tum.placed, u.Name)
      +	return nil
      +}
      +func (tum *TestUnitManager) PlaceUnitDropIn(u system.Unit, d config.UnitDropIn) error {
      +	tum.placed = append(tum.placed, u.Name+".d/"+d.Name)
      +	return nil
      +}
      +func (tum *TestUnitManager) EnableUnitFile(u system.Unit) error {
      +	tum.enabled = append(tum.enabled, u.Name)
      +	return nil
      +}
      +func (tum *TestUnitManager) RunUnitCommand(u system.Unit, c string) (string, error) {
      +	tum.commands = append(tum.commands, UnitAction{u.Name, c})
      +	return "", nil
      +}
      +func (tum *TestUnitManager) DaemonReload() error {
      +	tum.reload = true
      +	return nil
      +}
      +func (tum *TestUnitManager) MaskUnit(u system.Unit) error {
      +	tum.masked = append(tum.masked, u.Name)
      +	return nil
      +}
      +func (tum *TestUnitManager) UnmaskUnit(u system.Unit) error {
      +	tum.unmasked = append(tum.unmasked, u.Name)
      +	return nil
      +}
      +
      +type mockInterface struct {
      +	name           string
      +	filename       string
      +	netdev         string
      +	link           string
      +	network        string
      +	kind           string
      +	modprobeParams string
      +}
      +
      +func (i mockInterface) Name() string {
      +	return i.name
      +}
      +
      +func (i mockInterface) Filename() string {
      +	return i.filename
      +}
      +
      +func (i mockInterface) Netdev() string {
      +	return i.netdev
      +}
      +
      +func (i mockInterface) Link() string {
      +	return i.link
      +}
      +
      +func (i mockInterface) Network() string {
      +	return i.network
      +}
      +
      +func (i mockInterface) Type() string {
      +	return i.kind
      +}
      +
      +func (i mockInterface) ModprobeParams() string {
      +	return i.modprobeParams
      +}
      +
      +func TestCreateNetworkingUnits(t *testing.T) {
      +	for _, tt := range []struct {
      +		interfaces []network.InterfaceGenerator
      +		expect     []system.Unit
      +	}{
      +		{nil, nil},
      +		{
      +			[]network.InterfaceGenerator{
      +				network.InterfaceGenerator(mockInterface{filename: "test"}),
      +			},
      +			nil,
      +		},
      +		{
      +			[]network.InterfaceGenerator{
      +				network.InterfaceGenerator(mockInterface{filename: "test1", netdev: "test netdev"}),
      +				network.InterfaceGenerator(mockInterface{filename: "test2", link: "test link"}),
      +				network.InterfaceGenerator(mockInterface{filename: "test3", network: "test network"}),
      +			},
      +			[]system.Unit{
      +				system.Unit{Unit: config.Unit{Name: "test1.netdev", Runtime: true, Content: "test netdev"}},
      +				system.Unit{Unit: config.Unit{Name: "test2.link", Runtime: true, Content: "test link"}},
      +				system.Unit{Unit: config.Unit{Name: "test3.network", Runtime: true, Content: "test network"}},
      +			},
      +		},
      +		{
      +			[]network.InterfaceGenerator{
      +				network.InterfaceGenerator(mockInterface{filename: "test", netdev: "test netdev", link: "test link", network: "test network"}),
      +			},
      +			[]system.Unit{
      +				system.Unit{Unit: config.Unit{Name: "test.netdev", Runtime: true, Content: "test netdev"}},
      +				system.Unit{Unit: config.Unit{Name: "test.link", Runtime: true, Content: "test link"}},
      +				system.Unit{Unit: config.Unit{Name: "test.network", Runtime: true, Content: "test network"}},
      +			},
      +		},
      +	} {
      +		units := createNetworkingUnits(tt.interfaces)
      +		if !reflect.DeepEqual(tt.expect, units) {
      +			t.Errorf("bad units (%+v): want %#v, got %#v", tt.interfaces, tt.expect, units)
      +		}
      +	}
      +}
      +
      +func TestProcessUnits(t *testing.T) {
      +	tests := []struct {
      +		units []system.Unit
      +
      +		result TestUnitManager
      +	}{
      +		{
      +			units: []system.Unit{
      +				system.Unit{Unit: config.Unit{
      +					Name: "foo",
      +					Mask: true,
      +				}},
      +			},
      +			result: TestUnitManager{
      +				masked: []string{"foo"},
      +			},
      +		},
      +		{
      +			units: []system.Unit{
      +				system.Unit{Unit: config.Unit{
      +					Name:    "baz.service",
      +					Content: "[Service]\nExecStart=/bin/baz",
      +					Command: "start",
      +				}},
      +				system.Unit{Unit: config.Unit{
      +					Name:    "foo.network",
      +					Content: "[Network]\nFoo=true",
      +				}},
      +				system.Unit{Unit: config.Unit{
      +					Name:    "bar.network",
      +					Content: "[Network]\nBar=true",
      +				}},
      +			},
      +			result: TestUnitManager{
      +				placed: []string{"baz.service", "foo.network", "bar.network"},
      +				commands: []UnitAction{
      +					UnitAction{"systemd-networkd.service", "restart"},
      +					UnitAction{"baz.service", "start"},
      +				},
      +				reload: true,
      +			},
      +		},
      +		{
      +			units: []system.Unit{
      +				system.Unit{Unit: config.Unit{
      +					Name:    "baz.service",
      +					Content: "[Service]\nExecStart=/bin/true",
      +				}},
      +			},
      +			result: TestUnitManager{
      +				placed: []string{"baz.service"},
      +				reload: true,
      +			},
      +		},
      +		{
      +			units: []system.Unit{
      +				system.Unit{Unit: config.Unit{
      +					Name:    "locksmithd.service",
      +					Runtime: true,
      +				}},
      +			},
      +			result: TestUnitManager{
      +				unmasked: []string{"locksmithd.service"},
      +			},
      +		},
      +		{
      +			units: []system.Unit{
      +				system.Unit{Unit: config.Unit{
      +					Name:   "woof",
      +					Enable: true,
      +				}},
      +			},
      +			result: TestUnitManager{
      +				enabled: []string{"woof"},
      +			},
      +		},
      +		{
      +			units: []system.Unit{
      +				system.Unit{Unit: config.Unit{
      +					Name:    "hi.service",
      +					Runtime: true,
      +					Content: "[Service]\nExecStart=/bin/echo hi",
      +					DropIns: []config.UnitDropIn{
      +						{
      +							Name:    "lo.conf",
      +							Content: "[Service]\nExecStart=/bin/echo lo",
      +						},
      +						{
      +							Name:    "bye.conf",
      +							Content: "[Service]\nExecStart=/bin/echo bye",
      +						},
      +					},
      +				}},
      +			},
      +			result: TestUnitManager{
      +				placed:   []string{"hi.service", "hi.service.d/lo.conf", "hi.service.d/bye.conf"},
      +				unmasked: []string{"hi.service"},
      +				reload:   true,
      +			},
      +		},
      +		{
      +			units: []system.Unit{
      +				system.Unit{Unit: config.Unit{
      +					DropIns: []config.UnitDropIn{
      +						{
      +							Name:    "lo.conf",
      +							Content: "[Service]\nExecStart=/bin/echo lo",
      +						},
      +					},
      +				}},
      +			},
      +			result: TestUnitManager{},
      +		},
      +		{
      +			units: []system.Unit{
      +				system.Unit{Unit: config.Unit{
      +					Name: "hi.service",
      +					DropIns: []config.UnitDropIn{
      +						{
      +							Content: "[Service]\nExecStart=/bin/echo lo",
      +						},
      +					},
      +				}},
      +			},
      +			result: TestUnitManager{},
      +		},
      +		{
      +			units: []system.Unit{
      +				system.Unit{Unit: config.Unit{
      +					Name: "hi.service",
      +					DropIns: []config.UnitDropIn{
      +						{
      +							Name: "lo.conf",
      +						},
      +					},
      +				}},
      +			},
      +			result: TestUnitManager{},
      +		},
      +	}
      +
      +	for _, tt := range tests {
      +		tum := &TestUnitManager{}
      +		if err := processUnits(tt.units, "", tum); err != nil {
      +			t.Errorf("bad error (%+v): want nil, got %s", tt.units, err)
      +		}
      +		if !reflect.DeepEqual(tt.result, *tum) {
      +			t.Errorf("bad result (%+v): want %+v, got %+v", tt.units, tt.result, tum)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/initialize/env.go b/vendor/github.com/coreos/coreos-cloudinit/initialize/env.go
      new file mode 100644
      index 00000000..f90cc932
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/initialize/env.go
      @@ -0,0 +1,116 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package initialize
      +
      +import (
      +	"net"
      +	"os"
      +	"path"
      +	"regexp"
      +	"strings"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +	"github.com/coreos/coreos-cloudinit/system"
      +)
      +
      +const DefaultSSHKeyName = "coreos-cloudinit"
      +
      +type Environment struct {
      +	root          string
      +	configRoot    string
      +	workspace     string
      +	sshKeyName    string
      +	substitutions map[string]string
      +}
      +
      +// TODO(jonboulle): this is getting unwieldy, should be able to simplify the interface somehow
      +func NewEnvironment(root, configRoot, workspace, sshKeyName string, metadata datasource.Metadata) *Environment {
      +	firstNonNull := func(ip net.IP, env string) string {
      +		if ip == nil {
      +			return env
      +		}
      +		return ip.String()
      +	}
      +	substitutions := map[string]string{
      +		"$public_ipv4":  firstNonNull(metadata.PublicIPv4, os.Getenv("COREOS_PUBLIC_IPV4")),
      +		"$private_ipv4": firstNonNull(metadata.PrivateIPv4, os.Getenv("COREOS_PRIVATE_IPV4")),
      +		"$public_ipv6":  firstNonNull(metadata.PublicIPv6, os.Getenv("COREOS_PUBLIC_IPV6")),
      +		"$private_ipv6": firstNonNull(metadata.PrivateIPv6, os.Getenv("COREOS_PRIVATE_IPV6")),
      +	}
      +	return &Environment{root, configRoot, workspace, sshKeyName, substitutions}
      +}
      +
      +func (e *Environment) Workspace() string {
      +	return path.Join(e.root, e.workspace)
      +}
      +
      +func (e *Environment) Root() string {
      +	return e.root
      +}
      +
      +func (e *Environment) ConfigRoot() string {
      +	return e.configRoot
      +}
      +
      +func (e *Environment) SSHKeyName() string {
      +	return e.sshKeyName
      +}
      +
      +func (e *Environment) SetSSHKeyName(name string) {
      +	e.sshKeyName = name
      +}
      +
      +// Apply goes through the map of substitutions and replaces all instances of
      +// the keys with their respective values. It supports escaping substitutions
      +// with a leading '\'.
      +func (e *Environment) Apply(data string) string {
      +	for key, val := range e.substitutions {
      +		matchKey := strings.Replace(key, `$`, `\$`, -1)
      +		replKey := strings.Replace(key, `$`, `$$`, -1)
      +
      +		// "key" -> "val"
      +		data = regexp.MustCompile(`([^\\]|^)`+matchKey).ReplaceAllString(data, `${1}`+val)
      +		// "\key" -> "key"
      +		data = regexp.MustCompile(`\\`+matchKey).ReplaceAllString(data, replKey)
      +	}
      +	return data
      +}
      +
      +func (e *Environment) DefaultEnvironmentFile() *system.EnvFile {
      +	ef := system.EnvFile{
      +		File: &system.File{File: config.File{
      +			Path: "/etc/environment",
      +		}},
      +		Vars: map[string]string{},
      +	}
      +	if ip, ok := e.substitutions["$public_ipv4"]; ok && len(ip) > 0 {
      +		ef.Vars["COREOS_PUBLIC_IPV4"] = ip
      +	}
      +	if ip, ok := e.substitutions["$private_ipv4"]; ok && len(ip) > 0 {
      +		ef.Vars["COREOS_PRIVATE_IPV4"] = ip
      +	}
      +	if ip, ok := e.substitutions["$public_ipv6"]; ok && len(ip) > 0 {
      +		ef.Vars["COREOS_PUBLIC_IPV6"] = ip
      +	}
      +	if ip, ok := e.substitutions["$private_ipv6"]; ok && len(ip) > 0 {
      +		ef.Vars["COREOS_PRIVATE_IPV6"] = ip
      +	}
      +	if len(ef.Vars) == 0 {
      +		return nil
      +	} else {
      +		return &ef
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/initialize/env_test.go b/vendor/github.com/coreos/coreos-cloudinit/initialize/env_test.go
      new file mode 100644
      index 00000000..abb770cf
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/initialize/env_test.go
      @@ -0,0 +1,148 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package initialize
      +
      +import (
      +	"io/ioutil"
      +	"net"
      +	"os"
      +	"path"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource"
      +	"github.com/coreos/coreos-cloudinit/system"
      +)
      +
      +func TestEnvironmentApply(t *testing.T) {
      +	os.Setenv("COREOS_PUBLIC_IPV4", "1.2.3.4")
      +	os.Setenv("COREOS_PRIVATE_IPV4", "5.6.7.8")
      +	os.Setenv("COREOS_PUBLIC_IPV6", "1234::")
      +	os.Setenv("COREOS_PRIVATE_IPV6", "5678::")
      +	for _, tt := range []struct {
      +		metadata datasource.Metadata
      +		input    string
      +		out      string
      +	}{
      +		{
      +			// Substituting both values directly should always take precedence
      +			// over environment variables
      +			datasource.Metadata{
      +				PublicIPv4:  net.ParseIP("192.0.2.3"),
      +				PrivateIPv4: net.ParseIP("192.0.2.203"),
      +				PublicIPv6:  net.ParseIP("fe00:1234::"),
      +				PrivateIPv6: net.ParseIP("fe00:5678::"),
      +			},
      +			`[Service]
      +ExecStart=/usr/bin/echo "$public_ipv4 $public_ipv6"
      +ExecStop=/usr/bin/echo $private_ipv4 $private_ipv6
      +ExecStop=/usr/bin/echo $unknown`,
      +			`[Service]
      +ExecStart=/usr/bin/echo "192.0.2.3 fe00:1234::"
      +ExecStop=/usr/bin/echo 192.0.2.203 fe00:5678::
      +ExecStop=/usr/bin/echo $unknown`,
      +		},
      +		{
      +			// Substituting one value directly while falling back with the other
      +			datasource.Metadata{
      +				PrivateIPv4: net.ParseIP("127.0.0.1"),
      +			},
      +			"$private_ipv4\n$public_ipv4",
      +			"127.0.0.1\n1.2.3.4",
      +		},
      +		{
      +			// Falling back to environment variables for both values
      +			datasource.Metadata{},
      +			"$private_ipv4\n$public_ipv4",
      +			"5.6.7.8\n1.2.3.4",
      +		},
      +		{
      +			// No substitutions
      +			datasource.Metadata{},
      +			"$private_ipv4\nfoobar",
      +			"5.6.7.8\nfoobar",
      +		},
      +		{
      +			// Escaping substitutions
      +			datasource.Metadata{
      +				PrivateIPv4: net.ParseIP("127.0.0.1"),
      +			},
      +			`\$private_ipv4
      +$private_ipv4
      +addr: \$private_ipv4
      +\\$private_ipv4`,
      +			`$private_ipv4
      +127.0.0.1
      +addr: $private_ipv4
      +\$private_ipv4`,
      +		},
      +		{
      +			// No substitutions with escaping
      +			datasource.Metadata{},
      +			"\\$test\n$test",
      +			"\\$test\n$test",
      +		},
      +	} {
      +
      +		env := NewEnvironment("./", "./", "./", "", tt.metadata)
      +		got := env.Apply(tt.input)
      +		if got != tt.out {
      +			t.Fatalf("Environment incorrectly applied.\ngot:\n%s\nwant:\n%s", got, tt.out)
      +		}
      +	}
      +}
      +
      +func TestEnvironmentFile(t *testing.T) {
      +	metadata := datasource.Metadata{
      +		PublicIPv4:  net.ParseIP("1.2.3.4"),
      +		PrivateIPv4: net.ParseIP("5.6.7.8"),
      +		PublicIPv6:  net.ParseIP("1234::"),
      +		PrivateIPv6: net.ParseIP("5678::"),
      +	}
      +	expect := "COREOS_PRIVATE_IPV4=5.6.7.8\nCOREOS_PRIVATE_IPV6=5678::\nCOREOS_PUBLIC_IPV4=1.2.3.4\nCOREOS_PUBLIC_IPV6=1234::\n"
      +
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	env := NewEnvironment("./", "./", "./", "", metadata)
      +	ef := env.DefaultEnvironmentFile()
      +	err = system.WriteEnvFile(ef, dir)
      +	if err != nil {
      +		t.Fatalf("WriteEnvFile failed: %v", err)
      +	}
      +
      +	fullPath := path.Join(dir, "etc", "environment")
      +	contents, err := ioutil.ReadFile(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to read expected file: %v", err)
      +	}
      +
      +	if string(contents) != expect {
      +		t.Fatalf("File has incorrect contents: %q", contents)
      +	}
      +}
      +
      +func TestEnvironmentFileNil(t *testing.T) {
      +	os.Clearenv()
      +	metadata := datasource.Metadata{}
      +
      +	env := NewEnvironment("./", "./", "./", "", metadata)
      +	ef := env.DefaultEnvironmentFile()
      +	if ef != nil {
      +		t.Fatalf("Environment file not nil: %v", ef)
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/initialize/github.go b/vendor/github.com/coreos/coreos-cloudinit/initialize/github.go
      new file mode 100644
      index 00000000..2f7755fe
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/initialize/github.go
      @@ -0,0 +1,32 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package initialize
      +
      +import (
      +	"fmt"
      +
      +	"github.com/coreos/coreos-cloudinit/system"
      +)
      +
      +func SSHImportGithubUser(system_user string, github_user string) error {
      +	url := fmt.Sprintf("https://api.github.com/users/%s/keys", github_user)
      +	keys, err := fetchUserKeys(url)
      +	if err != nil {
      +		return err
      +	}
      +
      +	key_name := fmt.Sprintf("github-%s", github_user)
      +	return system.AuthorizeSSHKeys(system_user, key_name, keys)
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/initialize/ssh_keys.go b/vendor/github.com/coreos/coreos-cloudinit/initialize/ssh_keys.go
      new file mode 100644
      index 00000000..17b0c4a9
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/initialize/ssh_keys.go
      @@ -0,0 +1,57 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package initialize
      +
      +import (
      +	"encoding/json"
      +	"fmt"
      +
      +	"github.com/coreos/coreos-cloudinit/pkg"
      +	"github.com/coreos/coreos-cloudinit/system"
      +)
      +
      +type UserKey struct {
      +	ID  int    `json:"id,omitempty"`
      +	Key string `json:"key"`
      +}
      +
      +func SSHImportKeysFromURL(system_user string, url string) error {
      +	keys, err := fetchUserKeys(url)
      +	if err != nil {
      +		return err
      +	}
      +
      +	key_name := fmt.Sprintf("coreos-cloudinit-%s", system_user)
      +	return system.AuthorizeSSHKeys(system_user, key_name, keys)
      +}
      +
      +func fetchUserKeys(url string) ([]string, error) {
      +	client := pkg.NewHttpClient()
      +	data, err := client.GetRetry(url)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	var userKeys []UserKey
      +	err = json.Unmarshal(data, &userKeys)
      +	if err != nil {
      +		return nil, err
      +	}
      +	keys := make([]string, 0)
      +	for _, key := range userKeys {
      +		keys = append(keys, key.Key)
      +	}
      +	return keys, err
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/initialize/ssh_keys_test.go b/vendor/github.com/coreos/coreos-cloudinit/initialize/ssh_keys_test.go
      new file mode 100644
      index 00000000..86395797
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/initialize/ssh_keys_test.go
      @@ -0,0 +1,56 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package initialize
      +
      +import (
      +	"fmt"
      +	"net/http"
      +	"net/http/httptest"
      +	"testing"
      +)
      +
      +func TestCloudConfigUsersUrlMarshal(t *testing.T) {
      +	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
      +		gh_res := `
      +[
      +  {
      +    "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIHAu822ggSkIHrJYvhmBceOSVjuflfQm8RbMMDNVe9relQfuPbN+nxGGTCKzPLebeOcX+Wwi77TPXWwK3BZMglfXxhABlFPsuMb63Tqp94pBYsJdx/iFj9iGo6pKoM1k8ubOcqsUnq+BR9895zRbE7MjdwkGo67+QhCEwvkwAnNAAAAFQCuddVqXLCubzqnWmeHLQE+2GFfHwAAAIBnlXW5h15ndVuwi0htF4oodVSB1KwnTWcuBK+aE1zRs76yvRb0Ws+oifumThDwB/Tec6FQuAfRKfy6piChZqsu5KvL98I+2t5yyi1td+kMvdTnVL2lW44etDKseOcozmknCOmh4Dqvhl/2MwrDAhlPaN08EEq9h3w3mXtNLWH64QAAAIBAzDOKr17llngaKIdDXh+LtXKh87+zfjlTA36/9r2uF2kYE5uApDtu9sPCkt7+YBQt7R8prADPckwAiXwVdk0xijIOpLDBmoydQJJRQ+zTMxvpQmUr/1kUOv0zb+lB657CgvN0vVTmP2swPeMvgntt3C4vw7Ab+O+MS9peOAJbbQ=="
      +  },
      +  {
      +    "key": "ssh-dss AAAAB3NzaC1kc3MAAACBANxpzIbTzKTeBRaOIdUxwwGwvDasTfU/PonhbNIuhYjc+xFGvBRTumox2F+luVAKKs4WdvA4nJXaY1OFi6DZftk5Bp4E2JaSzp8ulAzHsMexDdv6LGHGEJj/qdHAL1vHk2K89PpwRFSRZI8XRBLjvkr4ZgBKLG5ZILXPJEPP2j3lAAAAFQCtxoTnV8wy0c4grcGrQ+1sCsD7WQAAAIAqZsW2GviMe1RQrbZT0xAZmI64XRPrnLsoLxycHWlS7r6uUln2c6Ae2MB/YF0d4Kd1XZii9GHj7rrypqEo7MW8uSabhu70nmu1J8m2O3Dsr+4oJLeat9vwPsJV92IKO0jQwjKnAOHOiB9JKGeCw+NfXfogbti9/q38Q6XcS+SI5wAAAIEA1803Y2h+tOOpZXAsNIwl9mRfExWzLQ3L7knwJdznQu/6SW1H/1oyoYLebuk187Qj2UFI5qQ6AZNc49DvohWx0Cg6ABcyubNyoaCjZKWIdxVnItHWNbLe//+tyTu0I2eQwJOORsEPK5gMpf599C7wXQ//DzZOWbTWiHEX52gCTmk="
      +  },
      +  {
      +    "id": 5224438,
      +    "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAPKRWdKhzGZuLAJL6M1eM51hWViMqNBC2C6lm2OqGRYLuIf1GJ391widUuSf4wQqnkR22Q9PCmAZ19XCf11wBRMnuw9I/Z3Bt5bXfc+dzFBCmHYGJ6wNSv++H9jxyMb+usmsenWOFZGNO2jN0wrJ4ay8Yt0bwtRU+VCXpuRLszMzAAAAFQDZUIuPjcfK5HLgnwZ/J3lvtvlUjQAAAIEApIkAwLuCQV5j3U6DmI/Y6oELqSUR2purFm8jo8jePFfe1t+ghikgD254/JXlhDCVgY0NLXcak+coJfGCTT23quJ7I5xdpTn/OZO2Q6Woum/bijFC/UWwQbLz0R2nU3DoHv5v6XHQZxuIG4Fsxa91S+vWjZFtI7RuYlBCZA//ANMAAACBAJO0FojzkX6IeaWLqrgu9GTkFwGFazZ+LPH5JOWPoPn1hQKuR32Uf6qNcBZcIjY7SF0P7HF5rLQd6zKZzHqqQQ92MV555NEwjsnJglYU8CaaZsfYooaGPgA1YN7RhTSAuDmUW5Hyfj5BH4NTtrzrvJxIhDoQLf31Fasjw00r4R0O"
      +  }
      +]
      +`
      +		fmt.Fprintln(w, gh_res)
      +	}))
      +	defer ts.Close()
      +
      +	keys, err := fetchUserKeys(ts.URL)
      +	if err != nil {
      +		t.Fatalf("Encountered unexpected error: %v", err)
      +	}
      +	expected := "ssh-dss AAAAB3NzaC1kc3MAAACBAIHAu822ggSkIHrJYvhmBceOSVjuflfQm8RbMMDNVe9relQfuPbN+nxGGTCKzPLebeOcX+Wwi77TPXWwK3BZMglfXxhABlFPsuMb63Tqp94pBYsJdx/iFj9iGo6pKoM1k8ubOcqsUnq+BR9895zRbE7MjdwkGo67+QhCEwvkwAnNAAAAFQCuddVqXLCubzqnWmeHLQE+2GFfHwAAAIBnlXW5h15ndVuwi0htF4oodVSB1KwnTWcuBK+aE1zRs76yvRb0Ws+oifumThDwB/Tec6FQuAfRKfy6piChZqsu5KvL98I+2t5yyi1td+kMvdTnVL2lW44etDKseOcozmknCOmh4Dqvhl/2MwrDAhlPaN08EEq9h3w3mXtNLWH64QAAAIBAzDOKr17llngaKIdDXh+LtXKh87+zfjlTA36/9r2uF2kYE5uApDtu9sPCkt7+YBQt7R8prADPckwAiXwVdk0xijIOpLDBmoydQJJRQ+zTMxvpQmUr/1kUOv0zb+lB657CgvN0vVTmP2swPeMvgntt3C4vw7Ab+O+MS9peOAJbbQ=="
      +	if keys[0] != expected {
      +		t.Fatalf("expected %s, got %s", expected, keys[0])
      +	}
      +	expected = "ssh-dss AAAAB3NzaC1kc3MAAACBAPKRWdKhzGZuLAJL6M1eM51hWViMqNBC2C6lm2OqGRYLuIf1GJ391widUuSf4wQqnkR22Q9PCmAZ19XCf11wBRMnuw9I/Z3Bt5bXfc+dzFBCmHYGJ6wNSv++H9jxyMb+usmsenWOFZGNO2jN0wrJ4ay8Yt0bwtRU+VCXpuRLszMzAAAAFQDZUIuPjcfK5HLgnwZ/J3lvtvlUjQAAAIEApIkAwLuCQV5j3U6DmI/Y6oELqSUR2purFm8jo8jePFfe1t+ghikgD254/JXlhDCVgY0NLXcak+coJfGCTT23quJ7I5xdpTn/OZO2Q6Woum/bijFC/UWwQbLz0R2nU3DoHv5v6XHQZxuIG4Fsxa91S+vWjZFtI7RuYlBCZA//ANMAAACBAJO0FojzkX6IeaWLqrgu9GTkFwGFazZ+LPH5JOWPoPn1hQKuR32Uf6qNcBZcIjY7SF0P7HF5rLQd6zKZzHqqQQ92MV555NEwjsnJglYU8CaaZsfYooaGPgA1YN7RhTSAuDmUW5Hyfj5BH4NTtrzrvJxIhDoQLf31Fasjw00r4R0O"
      +	if keys[2] != expected {
      +		t.Fatalf("expected %s, got %s", expected, keys[2])
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/initialize/user_data.go b/vendor/github.com/coreos/coreos-cloudinit/initialize/user_data.go
      new file mode 100644
      index 00000000..c728d64e
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/initialize/user_data.go
      @@ -0,0 +1,45 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package initialize
      +
      +import (
      +	"errors"
      +	"log"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +var (
      +	ErrIgnitionConfig = errors.New("not a config (found Ignition)")
      +)
      +
      +func ParseUserData(contents string) (interface{}, error) {
      +	if len(contents) == 0 {
      +		return nil, nil
      +	}
      +
      +	switch {
      +	case config.IsScript(contents):
      +		log.Printf("Parsing user-data as script")
      +		return config.NewScript(contents)
      +	case config.IsCloudConfig(contents):
      +		log.Printf("Parsing user-data as cloud-config")
      +		return config.NewCloudConfig(contents)
      +	case config.IsIgnitionConfig(contents):
      +		return nil, ErrIgnitionConfig
      +	default:
      +		return nil, errors.New("Unrecognized user-data format")
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/initialize/user_data_test.go b/vendor/github.com/coreos/coreos-cloudinit/initialize/user_data_test.go
      new file mode 100644
      index 00000000..7a5acc98
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/initialize/user_data_test.go
      @@ -0,0 +1,74 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package initialize
      +
      +import (
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +func TestParseHeaderCRLF(t *testing.T) {
      +	configs := []string{
      +		"#cloud-config\nfoo: bar",
      +		"#cloud-config\r\nfoo: bar",
      +	}
      +
      +	for i, config := range configs {
      +		_, err := ParseUserData(config)
      +		if err != nil {
      +			t.Errorf("Failed parsing config %d: %v", i, err)
      +		}
      +	}
      +
      +	scripts := []string{
      +		"#!bin/bash\necho foo",
      +		"#!bin/bash\r\necho foo",
      +	}
      +
      +	for i, script := range scripts {
      +		_, err := ParseUserData(script)
      +		if err != nil {
      +			t.Errorf("Failed parsing script %d: %v", i, err)
      +		}
      +	}
      +}
      +
      +func TestParseConfigCRLF(t *testing.T) {
      +	contents := "#cloud-config \r\nhostname: foo\r\nssh_authorized_keys:\r\n  - foobar\r\n"
      +	ud, err := ParseUserData(contents)
      +	if err != nil {
      +		t.Fatalf("Failed parsing config: %v", err)
      +	}
      +
      +	cfg := ud.(*config.CloudConfig)
      +
      +	if cfg.Hostname != "foo" {
      +		t.Error("Failed parsing hostname from config")
      +	}
      +
      +	if len(cfg.SSHAuthorizedKeys) != 1 {
      +		t.Error("Parsed incorrect number of SSH keys")
      +	}
      +}
      +
      +func TestParseConfigEmpty(t *testing.T) {
      +	i, e := ParseUserData(``)
      +	if i != nil {
      +		t.Error("ParseUserData of empty string returned non-nil unexpectedly")
      +	} else if e != nil {
      +		t.Error("ParseUserData of empty string returned error unexpectedly")
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/initialize/workspace.go b/vendor/github.com/coreos/coreos-cloudinit/initialize/workspace.go
      new file mode 100644
      index 00000000..540dcf41
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/initialize/workspace.go
      @@ -0,0 +1,66 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package initialize
      +
      +import (
      +	"io/ioutil"
      +	"path"
      +	"strings"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +	"github.com/coreos/coreos-cloudinit/system"
      +)
      +
      +func PrepWorkspace(workspace string) error {
      +	if err := system.EnsureDirectoryExists(workspace); err != nil {
      +		return err
      +	}
      +
      +	scripts := path.Join(workspace, "scripts")
      +	if err := system.EnsureDirectoryExists(scripts); err != nil {
      +		return err
      +	}
      +
      +	return nil
      +}
      +
      +func PersistScriptInWorkspace(script config.Script, workspace string) (string, error) {
      +	scriptsPath := path.Join(workspace, "scripts")
      +	tmp, err := ioutil.TempFile(scriptsPath, "")
      +	if err != nil {
      +		return "", err
      +	}
      +	tmp.Close()
      +
      +	relpath := strings.TrimPrefix(tmp.Name(), workspace)
      +
      +	file := system.File{File: config.File{
      +		Path:               relpath,
      +		RawFilePermissions: "0744",
      +		Content:            string(script),
      +	}}
      +
      +	return system.WriteFile(&file, workspace)
      +}
      +
      +func PersistUnitNameInWorkspace(name string, workspace string) error {
      +	file := system.File{File: config.File{
      +		Path:               path.Join("scripts", "unit-name"),
      +		RawFilePermissions: "0644",
      +		Content:            name,
      +	}}
      +	_, err := system.WriteFile(&file, workspace)
      +	return err
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/debian.go b/vendor/github.com/coreos/coreos-cloudinit/network/debian.go
      new file mode 100644
      index 00000000..91646cbd
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/network/debian.go
      @@ -0,0 +1,63 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package network
      +
      +import (
      +	"log"
      +	"strings"
      +)
      +
      +func ProcessDebianNetconf(config []byte) ([]InterfaceGenerator, error) {
      +	log.Println("Processing Debian network config")
      +	lines := formatConfig(string(config))
      +	stanzas, err := parseStanzas(lines)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	interfaces := make([]*stanzaInterface, 0, len(stanzas))
      +	for _, stanza := range stanzas {
      +		switch s := stanza.(type) {
      +		case *stanzaInterface:
      +			interfaces = append(interfaces, s)
      +		}
      +	}
      +	log.Printf("Parsed %d network interfaces\n", len(interfaces))
      +
      +	log.Println("Processed Debian network config")
      +	return buildInterfaces(interfaces), nil
      +}
      +
      +func formatConfig(config string) []string {
      +	lines := []string{}
      +	config = strings.Replace(config, "\\\n", "", -1)
      +	for config != "" {
      +		split := strings.SplitN(config, "\n", 2)
      +		line := strings.TrimSpace(split[0])
      +
      +		if len(split) == 2 {
      +			config = split[1]
      +		} else {
      +			config = ""
      +		}
      +
      +		if strings.HasPrefix(line, "#") || line == "" {
      +			continue
      +		}
      +
      +		lines = append(lines, line)
      +	}
      +	return lines
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/debian_test.go b/vendor/github.com/coreos/coreos-cloudinit/network/debian_test.go
      new file mode 100644
      index 00000000..da9e281d
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/network/debian_test.go
      @@ -0,0 +1,56 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package network
      +
      +import (
      +	"testing"
      +)
      +
      +func TestFormatConfigs(t *testing.T) {
      +	for in, n := range map[string]int{
      +		"":                                                    0,
      +		"line1\\\nis long":                                    1,
      +		"#comment":                                            0,
      +		"#comment\\\ncomment":                                 0,
      +		"  #comment \\\n comment\nline 1\nline 2\\\n is long": 2,
      +	} {
      +		lines := formatConfig(in)
      +		if len(lines) != n {
      +			t.Fatalf("bad number of lines for config %q: got %d, want %d", in, len(lines), n)
      +		}
      +	}
      +}
      +
      +func TestProcessDebianNetconf(t *testing.T) {
      +	for _, tt := range []struct {
      +		in   string
      +		fail bool
      +		n    int
      +	}{
      +		{"", false, 0},
      +		{"iface", true, -1},
      +		{"auto eth1\nauto eth2", false, 0},
      +		{"iface eth1 inet manual", false, 1},
      +	} {
      +		interfaces, err := ProcessDebianNetconf([]byte(tt.in))
      +		failed := err != nil
      +		if tt.fail != failed {
      +			t.Fatalf("bad failure state for %q: got %t, want %t", tt.in, failed, tt.fail)
      +		}
      +		if tt.n != -1 && tt.n != len(interfaces) {
      +			t.Fatalf("bad number of interfaces for %q: got %d, want %q", tt.in, len(interfaces), tt.n)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/digitalocean.go b/vendor/github.com/coreos/coreos-cloudinit/network/digitalocean.go
      new file mode 100644
      index 00000000..78759a10
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/network/digitalocean.go
      @@ -0,0 +1,169 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package network
      +
      +import (
      +	"fmt"
      +	"log"
      +	"net"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean"
      +)
      +
      +func ProcessDigitalOceanNetconf(config digitalocean.Metadata) ([]InterfaceGenerator, error) {
      +	log.Println("Processing DigitalOcean network config")
      +
      +	log.Println("Parsing nameservers")
      +	nameservers, err := parseNameservers(config.DNS)
      +	if err != nil {
      +		return nil, err
      +	}
      +	log.Printf("Parsed %d nameservers\n", len(nameservers))
      +
      +	log.Println("Parsing interfaces")
      +	generators, err := parseInterfaces(config.Interfaces, nameservers)
      +	if err != nil {
      +		return nil, err
      +	}
      +	log.Printf("Parsed %d network interfaces\n", len(generators))
      +
      +	log.Println("Processed DigitalOcean network config")
      +	return generators, nil
      +}
      +
      +func parseNameservers(config digitalocean.DNS) ([]net.IP, error) {
      +	nameservers := make([]net.IP, 0, len(config.Nameservers))
      +	for _, ns := range config.Nameservers {
      +		if ip := net.ParseIP(ns); ip == nil {
      +			return nil, fmt.Errorf("could not parse %q as nameserver IP address", ns)
      +		} else {
      +			nameservers = append(nameservers, ip)
      +		}
      +	}
      +	return nameservers, nil
      +}
      +
      +func parseInterfaces(config digitalocean.Interfaces, nameservers []net.IP) ([]InterfaceGenerator, error) {
      +	generators := make([]InterfaceGenerator, 0, len(config.Public)+len(config.Private))
      +	for _, iface := range config.Public {
      +		if generator, err := parseInterface(iface, nameservers, true); err == nil {
      +			generators = append(generators, &physicalInterface{*generator})
      +		} else {
      +			return nil, err
      +		}
      +	}
      +	for _, iface := range config.Private {
      +		if generator, err := parseInterface(iface, []net.IP{}, false); err == nil {
      +			generators = append(generators, &physicalInterface{*generator})
      +		} else {
      +			return nil, err
      +		}
      +	}
      +	return generators, nil
      +}
      +
      +func parseInterface(iface digitalocean.Interface, nameservers []net.IP, useRoute bool) (*logicalInterface, error) {
      +	routes := make([]route, 0)
      +	addresses := make([]net.IPNet, 0)
      +	if iface.IPv4 != nil {
      +		var ip, mask, gateway net.IP
      +		if ip = net.ParseIP(iface.IPv4.IPAddress); ip == nil {
      +			return nil, fmt.Errorf("could not parse %q as IPv4 address", iface.IPv4.IPAddress)
      +		}
      +		if mask = net.ParseIP(iface.IPv4.Netmask); mask == nil {
      +			return nil, fmt.Errorf("could not parse %q as IPv4 mask", iface.IPv4.Netmask)
      +		}
      +		addresses = append(addresses, net.IPNet{
      +			IP:   ip,
      +			Mask: net.IPMask(mask),
      +		})
      +
      +		if useRoute {
      +			if gateway = net.ParseIP(iface.IPv4.Gateway); gateway == nil {
      +				return nil, fmt.Errorf("could not parse %q as IPv4 gateway", iface.IPv4.Gateway)
      +			}
      +			routes = append(routes, route{
      +				destination: net.IPNet{
      +					IP:   net.IPv4zero,
      +					Mask: net.IPMask(net.IPv4zero),
      +				},
      +				gateway: gateway,
      +			})
      +		}
      +	}
      +	if iface.IPv6 != nil {
      +		var ip, gateway net.IP
      +		if ip = net.ParseIP(iface.IPv6.IPAddress); ip == nil {
      +			return nil, fmt.Errorf("could not parse %q as IPv6 address", iface.IPv6.IPAddress)
      +		}
      +		addresses = append(addresses, net.IPNet{
      +			IP:   ip,
      +			Mask: net.CIDRMask(iface.IPv6.Cidr, net.IPv6len*8),
      +		})
      +
      +		if useRoute {
      +			if gateway = net.ParseIP(iface.IPv6.Gateway); gateway == nil {
      +				return nil, fmt.Errorf("could not parse %q as IPv6 gateway", iface.IPv6.Gateway)
      +			}
      +			routes = append(routes, route{
      +				destination: net.IPNet{
      +					IP:   net.IPv6zero,
      +					Mask: net.IPMask(net.IPv6zero),
      +				},
      +				gateway: gateway,
      +			})
      +		}
      +	}
      +	if iface.AnchorIPv4 != nil {
      +		var ip, mask net.IP
      +		if ip = net.ParseIP(iface.AnchorIPv4.IPAddress); ip == nil {
      +			return nil, fmt.Errorf("could not parse %q as anchor IPv4 address", iface.AnchorIPv4.IPAddress)
      +		}
      +		if mask = net.ParseIP(iface.AnchorIPv4.Netmask); mask == nil {
      +			return nil, fmt.Errorf("could not parse %q as anchor IPv4 mask", iface.AnchorIPv4.Netmask)
      +		}
      +		addresses = append(addresses, net.IPNet{
      +			IP:   ip,
      +			Mask: net.IPMask(mask),
      +		})
      +
      +		if useRoute {
      +			routes = append(routes, route{
      +				destination: net.IPNet{
      +					IP:   net.IPv4zero,
      +					Mask: net.IPMask(net.IPv4zero),
      +				},
      +			})
      +		}
      +	}
      +
      +	hwaddr, err := net.ParseMAC(iface.MAC)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	if nameservers == nil {
      +		nameservers = []net.IP{}
      +	}
      +
      +	return &logicalInterface{
      +		hwaddr: hwaddr,
      +		config: configMethodStatic{
      +			addresses:   addresses,
      +			nameservers: nameservers,
      +			routes:      routes,
      +		},
      +	}, nil
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/digitalocean_test.go b/vendor/github.com/coreos/coreos-cloudinit/network/digitalocean_test.go
      new file mode 100644
      index 00000000..fc4fc6d5
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/network/digitalocean_test.go
      @@ -0,0 +1,481 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package network
      +
      +import (
      +	"errors"
      +	"net"
      +	"reflect"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean"
      +)
      +
      +func TestParseNameservers(t *testing.T) {
      +	for _, tt := range []struct {
      +		dns digitalocean.DNS
      +		nss []net.IP
      +		err error
      +	}{
      +		{
      +			dns: digitalocean.DNS{},
      +			nss: []net.IP{},
      +		},
      +		{
      +			dns: digitalocean.DNS{Nameservers: []string{"1.2.3.4"}},
      +			nss: []net.IP{net.ParseIP("1.2.3.4")},
      +		},
      +		{
      +			dns: digitalocean.DNS{Nameservers: []string{"bad"}},
      +			err: errors.New("could not parse \"bad\" as nameserver IP address"),
      +		},
      +	} {
      +		nss, err := parseNameservers(tt.dns)
      +		if !errorsEqual(tt.err, err) {
      +			t.Fatalf("bad error (%+v): want %q, got %q", tt.dns, tt.err, err)
      +		}
      +		if !reflect.DeepEqual(tt.nss, nss) {
      +			t.Fatalf("bad nameservers (%+v): want %#v, got %#v", tt.dns, tt.nss, nss)
      +		}
      +	}
      +}
      +
      +func mkInvalidMAC() error {
      +	if isGo15 {
      +		return &net.AddrError{Err: "invalid MAC address", Addr: "bad"}
      +	} else {
      +		return errors.New("invalid MAC address: bad")
      +	}
      +}
      +
      +func TestParseInterface(t *testing.T) {
      +	for _, tt := range []struct {
      +		cfg      digitalocean.Interface
      +		nss      []net.IP
      +		useRoute bool
      +		iface    *logicalInterface
      +		err      error
      +	}{
      +		{
      +			cfg: digitalocean.Interface{
      +				MAC: "bad",
      +			},
      +			err: mkInvalidMAC(),
      +		},
      +		{
      +			cfg: digitalocean.Interface{
      +				MAC: "01:23:45:67:89:AB",
      +			},
      +			nss: []net.IP{},
      +			iface: &logicalInterface{
      +				hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}),
      +				config: configMethodStatic{
      +					addresses:   []net.IPNet{},
      +					nameservers: []net.IP{},
      +					routes:      []route{},
      +				},
      +			},
      +		},
      +		{
      +			cfg: digitalocean.Interface{
      +				MAC: "01:23:45:67:89:AB",
      +			},
      +			useRoute: true,
      +			nss:      []net.IP{net.ParseIP("1.2.3.4")},
      +			iface: &logicalInterface{
      +				hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}),
      +				config: configMethodStatic{
      +					addresses:   []net.IPNet{},
      +					nameservers: []net.IP{net.ParseIP("1.2.3.4")},
      +					routes:      []route{},
      +				},
      +			},
      +		},
      +		{
      +			cfg: digitalocean.Interface{
      +				MAC: "01:23:45:67:89:AB",
      +				IPv4: &digitalocean.Address{
      +					IPAddress: "bad",
      +					Netmask:   "255.255.0.0",
      +				},
      +			},
      +			nss: []net.IP{},
      +			err: errors.New("could not parse \"bad\" as IPv4 address"),
      +		},
      +		{
      +			cfg: digitalocean.Interface{
      +				MAC: "01:23:45:67:89:AB",
      +				IPv4: &digitalocean.Address{
      +					IPAddress: "1.2.3.4",
      +					Netmask:   "bad",
      +				},
      +			},
      +			nss: []net.IP{},
      +			err: errors.New("could not parse \"bad\" as IPv4 mask"),
      +		},
      +		{
      +			cfg: digitalocean.Interface{
      +				MAC: "01:23:45:67:89:AB",
      +				IPv4: &digitalocean.Address{
      +					IPAddress: "1.2.3.4",
      +					Netmask:   "255.255.0.0",
      +					Gateway:   "ignoreme",
      +				},
      +			},
      +			nss: []net.IP{},
      +			iface: &logicalInterface{
      +				hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}),
      +				config: configMethodStatic{
      +					addresses: []net.IPNet{net.IPNet{
      +						IP:   net.ParseIP("1.2.3.4"),
      +						Mask: net.IPMask(net.ParseIP("255.255.0.0")),
      +					}},
      +					nameservers: []net.IP{},
      +					routes:      []route{},
      +				},
      +			},
      +		},
      +		{
      +			cfg: digitalocean.Interface{
      +				MAC: "01:23:45:67:89:AB",
      +				IPv4: &digitalocean.Address{
      +					IPAddress: "1.2.3.4",
      +					Netmask:   "255.255.0.0",
      +					Gateway:   "bad",
      +				},
      +			},
      +			useRoute: true,
      +			nss:      []net.IP{},
      +			err:      errors.New("could not parse \"bad\" as IPv4 gateway"),
      +		},
      +		{
      +			cfg: digitalocean.Interface{
      +				MAC: "01:23:45:67:89:AB",
      +				IPv4: &digitalocean.Address{
      +					IPAddress: "1.2.3.4",
      +					Netmask:   "255.255.0.0",
      +					Gateway:   "5.6.7.8",
      +				},
      +			},
      +			useRoute: true,
      +			nss:      []net.IP{},
      +			iface: &logicalInterface{
      +				hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}),
      +				config: configMethodStatic{
      +					addresses: []net.IPNet{net.IPNet{
      +						IP:   net.ParseIP("1.2.3.4"),
      +						Mask: net.IPMask(net.ParseIP("255.255.0.0")),
      +					}},
      +					nameservers: []net.IP{},
      +					routes: []route{route{
      +						net.IPNet{IP: net.IPv4zero, Mask: net.IPMask(net.IPv4zero)},
      +						net.ParseIP("5.6.7.8"),
      +					}},
      +				},
      +			},
      +		},
      +		{
      +			cfg: digitalocean.Interface{
      +				MAC: "01:23:45:67:89:AB",
      +				IPv6: &digitalocean.Address{
      +					IPAddress: "bad",
      +					Cidr:      16,
      +				},
      +			},
      +			nss: []net.IP{},
      +			err: errors.New("could not parse \"bad\" as IPv6 address"),
      +		},
      +		{
      +			cfg: digitalocean.Interface{
      +				MAC: "01:23:45:67:89:AB",
      +				IPv6: &digitalocean.Address{
      +					IPAddress: "fe00::",
      +					Cidr:      16,
      +					Gateway:   "ignoreme",
      +				},
      +			},
      +			nss: []net.IP{},
      +			iface: &logicalInterface{
      +				hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}),
      +				config: configMethodStatic{
      +					addresses: []net.IPNet{net.IPNet{
      +						IP:   net.ParseIP("fe00::"),
      +						Mask: net.IPMask(net.ParseIP("ffff::")),
      +					}},
      +					nameservers: []net.IP{},
      +					routes:      []route{},
      +				},
      +			},
      +		},
      +		{
      +			cfg: digitalocean.Interface{
      +				MAC: "01:23:45:67:89:AB",
      +				IPv6: &digitalocean.Address{
      +					IPAddress: "fe00::",
      +					Cidr:      16,
      +					Gateway:   "bad",
      +				},
      +			},
      +			useRoute: true,
      +			nss:      []net.IP{},
      +			err:      errors.New("could not parse \"bad\" as IPv6 gateway"),
      +		},
      +		{
      +			cfg: digitalocean.Interface{
      +				MAC: "01:23:45:67:89:AB",
      +				IPv6: &digitalocean.Address{
      +					IPAddress: "fe00::",
      +					Cidr:      16,
      +					Gateway:   "fe00:1234::",
      +				},
      +			},
      +			useRoute: true,
      +			nss:      []net.IP{},
      +			iface: &logicalInterface{
      +				hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}),
      +				config: configMethodStatic{
      +					addresses: []net.IPNet{net.IPNet{
      +						IP:   net.ParseIP("fe00::"),
      +						Mask: net.IPMask(net.ParseIP("ffff::")),
      +					}},
      +					nameservers: []net.IP{},
      +					routes: []route{route{
      +						net.IPNet{IP: net.IPv6zero, Mask: net.IPMask(net.IPv6zero)},
      +						net.ParseIP("fe00:1234::"),
      +					}},
      +				},
      +			},
      +		},
      +
      +		{
      +			cfg: digitalocean.Interface{
      +				MAC: "01:23:45:67:89:AB",
      +				AnchorIPv4: &digitalocean.Address{
      +					IPAddress: "bad",
      +					Netmask:   "255.255.0.0",
      +				},
      +			},
      +			nss: []net.IP{},
      +			err: errors.New("could not parse \"bad\" as anchor IPv4 address"),
      +		},
      +		{
      +			cfg: digitalocean.Interface{
      +				MAC: "01:23:45:67:89:AB",
      +				AnchorIPv4: &digitalocean.Address{
      +					IPAddress: "1.2.3.4",
      +					Netmask:   "bad",
      +				},
      +			},
      +			nss: []net.IP{},
      +			err: errors.New("could not parse \"bad\" as anchor IPv4 mask"),
      +		},
      +		{
      +			cfg: digitalocean.Interface{
      +				MAC: "01:23:45:67:89:AB",
      +				IPv4: &digitalocean.Address{
      +					IPAddress: "1.2.3.4",
      +					Netmask:   "255.255.0.0",
      +					Gateway:   "5.6.7.8",
      +				},
      +				AnchorIPv4: &digitalocean.Address{
      +					IPAddress: "7.8.9.10",
      +					Netmask:   "255.255.0.0",
      +				},
      +			},
      +			useRoute: true,
      +			nss:      []net.IP{},
      +			iface: &logicalInterface{
      +				hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}),
      +				config: configMethodStatic{
      +					addresses: []net.IPNet{
      +						{
      +							IP:   net.ParseIP("1.2.3.4"),
      +							Mask: net.IPMask(net.ParseIP("255.255.0.0")),
      +						},
      +						{
      +							IP:   net.ParseIP("7.8.9.10"),
      +							Mask: net.IPMask(net.ParseIP("255.255.0.0")),
      +						},
      +					},
      +					nameservers: []net.IP{},
      +					routes: []route{
      +						{
      +							destination: net.IPNet{IP: net.IPv4zero, Mask: net.IPMask(net.IPv4zero)},
      +							gateway:     net.ParseIP("5.6.7.8"),
      +						},
      +						{
      +							destination: net.IPNet{IP: net.IPv4zero, Mask: net.IPMask(net.IPv4zero)},
      +						},
      +					},
      +				},
      +			},
      +		},
      +	} {
      +		iface, err := parseInterface(tt.cfg, tt.nss, tt.useRoute)
      +		if !errorsEqual(tt.err, err) {
      +			t.Fatalf("bad error (%+v): want %q, got %q", tt.cfg, tt.err, err)
      +		}
      +		if !reflect.DeepEqual(tt.iface, iface) {
      +			t.Fatalf("bad interface (%+v): want %#v, got %#v", tt.cfg, tt.iface, iface)
      +		}
      +	}
      +}
      +
      +func TestParseInterfaces(t *testing.T) {
      +	for _, tt := range []struct {
      +		cfg    digitalocean.Interfaces
      +		nss    []net.IP
      +		ifaces []InterfaceGenerator
      +		err    error
      +	}{
      +		{
      +			ifaces: []InterfaceGenerator{},
      +		},
      +		{
      +			cfg: digitalocean.Interfaces{
      +				Public: []digitalocean.Interface{{MAC: "01:23:45:67:89:AB"}},
      +			},
      +			ifaces: []InterfaceGenerator{
      +				&physicalInterface{logicalInterface{
      +					hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}),
      +					config: configMethodStatic{
      +						addresses:   []net.IPNet{},
      +						nameservers: []net.IP{},
      +						routes:      []route{},
      +					},
      +				}},
      +			},
      +		},
      +		{
      +			cfg: digitalocean.Interfaces{
      +				Private: []digitalocean.Interface{{MAC: "01:23:45:67:89:AB"}},
      +			},
      +			ifaces: []InterfaceGenerator{
      +				&physicalInterface{logicalInterface{
      +					hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}),
      +					config: configMethodStatic{
      +						addresses:   []net.IPNet{},
      +						nameservers: []net.IP{},
      +						routes:      []route{},
      +					},
      +				}},
      +			},
      +		},
      +		{
      +			cfg: digitalocean.Interfaces{
      +				Public: []digitalocean.Interface{{MAC: "01:23:45:67:89:AB"}},
      +			},
      +			nss: []net.IP{net.ParseIP("1.2.3.4")},
      +			ifaces: []InterfaceGenerator{
      +				&physicalInterface{logicalInterface{
      +					hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}),
      +					config: configMethodStatic{
      +						addresses:   []net.IPNet{},
      +						nameservers: []net.IP{net.ParseIP("1.2.3.4")},
      +						routes:      []route{},
      +					},
      +				}},
      +			},
      +		},
      +		{
      +			cfg: digitalocean.Interfaces{
      +				Private: []digitalocean.Interface{{MAC: "01:23:45:67:89:AB"}},
      +			},
      +			nss: []net.IP{net.ParseIP("1.2.3.4")},
      +			ifaces: []InterfaceGenerator{
      +				&physicalInterface{logicalInterface{
      +					hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}),
      +					config: configMethodStatic{
      +						addresses:   []net.IPNet{},
      +						nameservers: []net.IP{},
      +						routes:      []route{},
      +					},
      +				}},
      +			},
      +		},
      +		{
      +			cfg: digitalocean.Interfaces{
      +				Public: []digitalocean.Interface{{MAC: "bad"}},
      +			},
      +			err: mkInvalidMAC(),
      +		},
      +		{
      +			cfg: digitalocean.Interfaces{
      +				Private: []digitalocean.Interface{{MAC: "bad"}},
      +			},
      +			err: mkInvalidMAC(),
      +		},
      +	} {
      +		ifaces, err := parseInterfaces(tt.cfg, tt.nss)
      +		if !errorsEqual(tt.err, err) {
      +			t.Fatalf("bad error (%+v): want %q, got %q", tt.cfg, tt.err, err)
      +		}
      +		if !reflect.DeepEqual(tt.ifaces, ifaces) {
      +			t.Fatalf("bad interfaces (%+v): want %#v, got %#v", tt.cfg, tt.ifaces, ifaces)
      +		}
      +	}
      +}
      +
      +func TestProcessDigitalOceanNetconf(t *testing.T) {
      +	for _, tt := range []struct {
      +		cfg    digitalocean.Metadata
      +		ifaces []InterfaceGenerator
      +		err    error
      +	}{
      +		{
      +			cfg: digitalocean.Metadata{
      +				DNS: digitalocean.DNS{
      +					Nameservers: []string{"bad"},
      +				},
      +			},
      +			err: errors.New("could not parse \"bad\" as nameserver IP address"),
      +		},
      +		{
      +			cfg: digitalocean.Metadata{
      +				Interfaces: digitalocean.Interfaces{
      +					Public: []digitalocean.Interface{
      +						digitalocean.Interface{
      +							IPv4: &digitalocean.Address{
      +								IPAddress: "bad",
      +							},
      +						},
      +					},
      +				},
      +			},
      +			err: errors.New("could not parse \"bad\" as IPv4 address"),
      +		},
      +		{
      +			ifaces: []InterfaceGenerator{},
      +		},
      +	} {
      +		ifaces, err := ProcessDigitalOceanNetconf(tt.cfg)
      +		if !errorsEqual(tt.err, err) {
      +			t.Fatalf("bad error (%q): want %q, got %q", tt.cfg, tt.err, err)
      +		}
      +		if !reflect.DeepEqual(tt.ifaces, ifaces) {
      +			t.Fatalf("bad interfaces (%q): want %#v, got %#v", tt.cfg, tt.ifaces, ifaces)
      +		}
      +	}
      +}
      +
      +func errorsEqual(a, b error) bool {
      +	if a == nil && b == nil {
      +		return true
      +	}
      +	if (a != nil && b == nil) || (a == nil && b != nil) {
      +		return false
      +	}
      +	return (a.Error() == b.Error())
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/interface.go b/vendor/github.com/coreos/coreos-cloudinit/network/interface.go
      new file mode 100644
      index 00000000..73a83cbc
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/network/interface.go
      @@ -0,0 +1,340 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package network
      +
      +import (
      +	"fmt"
      +	"net"
      +	"sort"
      +	"strconv"
      +	"strings"
      +)
      +
      +type InterfaceGenerator interface {
      +	Name() string
      +	Filename() string
      +	Netdev() string
      +	Link() string
      +	Network() string
      +	Type() string
      +	ModprobeParams() string
      +}
      +
      +type networkInterface interface {
      +	InterfaceGenerator
      +	Children() []networkInterface
      +	setConfigDepth(int)
      +}
      +
      +type logicalInterface struct {
      +	name        string
      +	hwaddr      net.HardwareAddr
      +	config      configMethod
      +	children    []networkInterface
      +	configDepth int
      +}
      +
      +func (i *logicalInterface) Name() string {
      +	return i.name
      +}
      +
      +func (i *logicalInterface) Network() string {
      +	config := fmt.Sprintln("[Match]")
      +	if i.name != "" {
      +		config += fmt.Sprintf("Name=%s\n", i.name)
      +	}
      +	if i.hwaddr != nil {
      +		config += fmt.Sprintf("MACAddress=%s\n", i.hwaddr)
      +	}
      +	config += "\n[Network]\n"
      +
      +	for _, child := range i.children {
      +		switch iface := child.(type) {
      +		case *vlanInterface:
      +			config += fmt.Sprintf("VLAN=%s\n", iface.name)
      +		case *bondInterface:
      +			config += fmt.Sprintf("Bond=%s\n", iface.name)
      +		}
      +	}
      +
      +	switch conf := i.config.(type) {
      +	case configMethodStatic:
      +		for _, nameserver := range conf.nameservers {
      +			config += fmt.Sprintf("DNS=%s\n", nameserver)
      +		}
      +		for _, addr := range conf.addresses {
      +			config += fmt.Sprintf("\n[Address]\nAddress=%s\n", addr.String())
      +		}
      +		for _, route := range conf.routes {
      +			config += fmt.Sprintf("\n[Route]\nDestination=%s\nGateway=%s\n", route.destination.String(), route.gateway)
      +		}
      +	case configMethodDHCP:
      +		config += "DHCP=true\n"
      +	}
      +
      +	return config
      +}
      +
      +func (i *logicalInterface) Link() string {
      +	return ""
      +}
      +
      +func (i *logicalInterface) Netdev() string {
      +	return ""
      +}
      +
      +func (i *logicalInterface) Filename() string {
      +	name := i.name
      +	if name == "" {
      +		name = i.hwaddr.String()
      +	}
      +	return fmt.Sprintf("%02x-%s", i.configDepth, name)
      +}
      +
      +func (i *logicalInterface) Children() []networkInterface {
      +	return i.children
      +}
      +
      +func (i *logicalInterface) ModprobeParams() string {
      +	return ""
      +}
      +
      +func (i *logicalInterface) setConfigDepth(depth int) {
      +	i.configDepth = depth
      +}
      +
      +type physicalInterface struct {
      +	logicalInterface
      +}
      +
      +func (p *physicalInterface) Type() string {
      +	return "physical"
      +}
      +
      +type bondInterface struct {
      +	logicalInterface
      +	slaves  []string
      +	options map[string]string
      +}
      +
      +func (b *bondInterface) Netdev() string {
      +	config := fmt.Sprintf("[NetDev]\nKind=bond\nName=%s\n", b.name)
      +	if b.hwaddr != nil {
      +		config += fmt.Sprintf("MACAddress=%s\n", b.hwaddr.String())
      +	}
      +
      +	config += fmt.Sprintf("\n[Bond]\n")
      +	for _, name := range sortedKeys(b.options) {
      +		config += fmt.Sprintf("%s=%s\n", name, b.options[name])
      +	}
      +
      +	return config
      +}
      +
      +func (b *bondInterface) Type() string {
      +	return "bond"
      +}
      +
      +func (b *bondInterface) ModprobeParams() string {
      +	params := ""
      +	for _, name := range sortedKeys(b.options) {
      +		params += fmt.Sprintf("%s=%s ", name, b.options[name])
      +	}
      +	params = strings.TrimSuffix(params, " ")
      +	return params
      +}
      +
      +type vlanInterface struct {
      +	logicalInterface
      +	id        int
      +	rawDevice string
      +}
      +
      +func (v *vlanInterface) Netdev() string {
      +	config := fmt.Sprintf("[NetDev]\nKind=vlan\nName=%s\n", v.name)
      +	switch c := v.config.(type) {
      +	case configMethodStatic:
      +		if c.hwaddress != nil {
      +			config += fmt.Sprintf("MACAddress=%s\n", c.hwaddress)
      +		}
      +	case configMethodDHCP:
      +		if c.hwaddress != nil {
      +			config += fmt.Sprintf("MACAddress=%s\n", c.hwaddress)
      +		}
      +	}
      +	config += fmt.Sprintf("\n[VLAN]\nId=%d\n", v.id)
      +	return config
      +}
      +
      +func (v *vlanInterface) Type() string {
      +	return "vlan"
      +}
      +
      +func buildInterfaces(stanzas []*stanzaInterface) []InterfaceGenerator {
      +	interfaceMap := createInterfaces(stanzas)
      +	linkAncestors(interfaceMap)
      +	markConfigDepths(interfaceMap)
      +
      +	interfaces := make([]InterfaceGenerator, 0, len(interfaceMap))
      +	for _, name := range sortedInterfaces(interfaceMap) {
      +		interfaces = append(interfaces, interfaceMap[name])
      +	}
      +
      +	return interfaces
      +}
      +
      +func createInterfaces(stanzas []*stanzaInterface) map[string]networkInterface {
      +	interfaceMap := make(map[string]networkInterface)
      +	for _, iface := range stanzas {
      +		switch iface.kind {
      +		case interfaceBond:
      +			bondOptions := make(map[string]string)
      +			for _, k := range []string{"mode", "miimon", "lacp-rate"} {
      +				if v, ok := iface.options["bond-"+k]; ok && len(v) > 0 {
      +					bondOptions[k] = v[0]
      +				}
      +			}
      +			interfaceMap[iface.name] = &bondInterface{
      +				logicalInterface{
      +					name:     iface.name,
      +					config:   iface.configMethod,
      +					children: []networkInterface{},
      +				},
      +				iface.options["bond-slaves"],
      +				bondOptions,
      +			}
      +			for _, slave := range iface.options["bond-slaves"] {
      +				if _, ok := interfaceMap[slave]; !ok {
      +					interfaceMap[slave] = &physicalInterface{
      +						logicalInterface{
      +							name:     slave,
      +							config:   configMethodManual{},
      +							children: []networkInterface{},
      +						},
      +					}
      +				}
      +			}
      +
      +		case interfacePhysical:
      +			if _, ok := iface.configMethod.(configMethodLoopback); ok {
      +				continue
      +			}
      +			interfaceMap[iface.name] = &physicalInterface{
      +				logicalInterface{
      +					name:     iface.name,
      +					config:   iface.configMethod,
      +					children: []networkInterface{},
      +				},
      +			}
      +
      +		case interfaceVLAN:
      +			var rawDevice string
      +			id, _ := strconv.Atoi(iface.options["id"][0])
      +			if device := iface.options["raw_device"]; len(device) == 1 {
      +				rawDevice = device[0]
      +				if _, ok := interfaceMap[rawDevice]; !ok {
      +					interfaceMap[rawDevice] = &physicalInterface{
      +						logicalInterface{
      +							name:     rawDevice,
      +							config:   configMethodManual{},
      +							children: []networkInterface{},
      +						},
      +					}
      +				}
      +			}
      +			interfaceMap[iface.name] = &vlanInterface{
      +				logicalInterface{
      +					name:     iface.name,
      +					config:   iface.configMethod,
      +					children: []networkInterface{},
      +				},
      +				id,
      +				rawDevice,
      +			}
      +		}
      +	}
      +	return interfaceMap
      +}
      +
      +func linkAncestors(interfaceMap map[string]networkInterface) {
      +	for _, name := range sortedInterfaces(interfaceMap) {
      +		iface := interfaceMap[name]
      +		switch i := iface.(type) {
      +		case *vlanInterface:
      +			if parent, ok := interfaceMap[i.rawDevice]; ok {
      +				switch p := parent.(type) {
      +				case *physicalInterface:
      +					p.children = append(p.children, iface)
      +				case *bondInterface:
      +					p.children = append(p.children, iface)
      +				}
      +			}
      +		case *bondInterface:
      +			for _, slave := range i.slaves {
      +				if parent, ok := interfaceMap[slave]; ok {
      +					switch p := parent.(type) {
      +					case *physicalInterface:
      +						p.children = append(p.children, iface)
      +					case *bondInterface:
      +						p.children = append(p.children, iface)
      +					}
      +				}
      +			}
      +		}
      +	}
      +}
      +
      +func markConfigDepths(interfaceMap map[string]networkInterface) {
      +	rootInterfaceMap := make(map[string]networkInterface)
      +	for k, v := range interfaceMap {
      +		rootInterfaceMap[k] = v
      +	}
      +
      +	for _, iface := range interfaceMap {
      +		for _, child := range iface.Children() {
      +			delete(rootInterfaceMap, child.Name())
      +		}
      +	}
      +	for _, iface := range rootInterfaceMap {
      +		setDepth(iface)
      +	}
      +}
      +
      +func setDepth(iface networkInterface) int {
      +	maxDepth := 0
      +	for _, child := range iface.Children() {
      +		if depth := setDepth(child); depth > maxDepth {
      +			maxDepth = depth
      +		}
      +	}
      +	iface.setConfigDepth(maxDepth)
      +	return (maxDepth + 1)
      +}
      +
      +func sortedKeys(m map[string]string) (keys []string) {
      +	for key := range m {
      +		keys = append(keys, key)
      +	}
      +	sort.Strings(keys)
      +	return
      +}
      +
      +func sortedInterfaces(m map[string]networkInterface) (keys []string) {
      +	for key := range m {
      +		keys = append(keys, key)
      +	}
      +	sort.Strings(keys)
      +	return
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/interface_test.go b/vendor/github.com/coreos/coreos-cloudinit/network/interface_test.go
      new file mode 100644
      index 00000000..5cafc939
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/network/interface_test.go
      @@ -0,0 +1,368 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package network
      +
      +import (
      +	"net"
      +	"reflect"
      +	"testing"
      +)
      +
      +func TestInterfaceGenerators(t *testing.T) {
      +	for _, tt := range []struct {
      +		name    string
      +		netdev  string
      +		link    string
      +		network string
      +		kind    string
      +		iface   InterfaceGenerator
      +	}{
      +		{
      +			name:    "",
      +			network: "[Match]\nMACAddress=00:01:02:03:04:05\n\n[Network]\n",
      +			kind:    "physical",
      +			iface: &physicalInterface{logicalInterface{
      +				hwaddr: net.HardwareAddr([]byte{0, 1, 2, 3, 4, 5}),
      +			}},
      +		},
      +		{
      +			name:    "testname",
      +			network: "[Match]\nName=testname\n\n[Network]\nBond=testbond1\nVLAN=testvlan1\nVLAN=testvlan2\n",
      +			kind:    "physical",
      +			iface: &physicalInterface{logicalInterface{
      +				name: "testname",
      +				children: []networkInterface{
      +					&bondInterface{logicalInterface: logicalInterface{name: "testbond1"}},
      +					&vlanInterface{logicalInterface: logicalInterface{name: "testvlan1"}, id: 1},
      +					&vlanInterface{logicalInterface: logicalInterface{name: "testvlan2"}, id: 1},
      +				},
      +			}},
      +		},
      +		{
      +			name:    "testname",
      +			netdev:  "[NetDev]\nKind=bond\nName=testname\n\n[Bond]\n",
      +			network: "[Match]\nName=testname\n\n[Network]\nBond=testbond1\nVLAN=testvlan1\nVLAN=testvlan2\nDHCP=true\n",
      +			kind:    "bond",
      +			iface: &bondInterface{logicalInterface: logicalInterface{
      +				name:   "testname",
      +				config: configMethodDHCP{},
      +				children: []networkInterface{
      +					&bondInterface{logicalInterface: logicalInterface{name: "testbond1"}},
      +					&vlanInterface{logicalInterface: logicalInterface{name: "testvlan1"}, id: 1},
      +					&vlanInterface{logicalInterface: logicalInterface{name: "testvlan2"}, id: 1},
      +				},
      +			}},
      +		},
      +		{
      +			name:    "testname",
      +			netdev:  "[NetDev]\nKind=vlan\nName=testname\n\n[VLAN]\nId=1\n",
      +			network: "[Match]\nName=testname\n\n[Network]\n",
      +			kind:    "vlan",
      +			iface:   &vlanInterface{logicalInterface{name: "testname"}, 1, ""},
      +		},
      +		{
      +			name:    "testname",
      +			netdev:  "[NetDev]\nKind=vlan\nName=testname\nMACAddress=00:01:02:03:04:05\n\n[VLAN]\nId=1\n",
      +			network: "[Match]\nName=testname\n\n[Network]\n",
      +			kind:    "vlan",
      +			iface:   &vlanInterface{logicalInterface{name: "testname", config: configMethodStatic{hwaddress: net.HardwareAddr([]byte{0, 1, 2, 3, 4, 5})}}, 1, ""},
      +		},
      +		{
      +			name:    "testname",
      +			netdev:  "[NetDev]\nKind=vlan\nName=testname\nMACAddress=00:01:02:03:04:05\n\n[VLAN]\nId=1\n",
      +			network: "[Match]\nName=testname\n\n[Network]\nDHCP=true\n",
      +			kind:    "vlan",
      +			iface:   &vlanInterface{logicalInterface{name: "testname", config: configMethodDHCP{hwaddress: net.HardwareAddr([]byte{0, 1, 2, 3, 4, 5})}}, 1, ""},
      +		},
      +		{
      +			name:    "testname",
      +			netdev:  "[NetDev]\nKind=vlan\nName=testname\n\n[VLAN]\nId=0\n",
      +			network: "[Match]\nName=testname\n\n[Network]\nDNS=8.8.8.8\n\n[Address]\nAddress=192.168.1.100/24\n\n[Route]\nDestination=0.0.0.0/0\nGateway=1.2.3.4\n",
      +			kind:    "vlan",
      +			iface: &vlanInterface{logicalInterface: logicalInterface{
      +				name: "testname",
      +				config: configMethodStatic{
      +					addresses:   []net.IPNet{{IP: []byte{192, 168, 1, 100}, Mask: []byte{255, 255, 255, 0}}},
      +					nameservers: []net.IP{[]byte{8, 8, 8, 8}},
      +					routes:      []route{route{destination: net.IPNet{IP: []byte{0, 0, 0, 0}, Mask: []byte{0, 0, 0, 0}}, gateway: []byte{1, 2, 3, 4}}},
      +				},
      +			}},
      +		},
      +	} {
      +		if name := tt.iface.Name(); name != tt.name {
      +			t.Fatalf("bad name (%q): want %q, got %q", tt.iface, tt.name, name)
      +		}
      +		if netdev := tt.iface.Netdev(); netdev != tt.netdev {
      +			t.Fatalf("bad netdev (%q): want %q, got %q", tt.iface, tt.netdev, netdev)
      +		}
      +		if link := tt.iface.Link(); link != tt.link {
      +			t.Fatalf("bad link (%q): want %q, got %q", tt.iface, tt.link, link)
      +		}
      +		if network := tt.iface.Network(); network != tt.network {
      +			t.Fatalf("bad network (%q): want %q, got %q", tt.iface, tt.network, network)
      +		}
      +		if kind := tt.iface.Type(); kind != tt.kind {
      +			t.Fatalf("bad type (%q): want %q, got %q", tt.iface, tt.kind, kind)
      +		}
      +	}
      +}
      +
      +func TestModprobeParams(t *testing.T) {
      +	for _, tt := range []struct {
      +		i InterfaceGenerator
      +		p string
      +	}{
      +		{
      +			i: &physicalInterface{},
      +			p: "",
      +		},
      +		{
      +			i: &vlanInterface{},
      +			p: "",
      +		},
      +		{
      +			i: &bondInterface{
      +				logicalInterface{},
      +				nil,
      +				map[string]string{
      +					"a": "1",
      +					"b": "2",
      +				},
      +			},
      +			p: "a=1 b=2",
      +		},
      +	} {
      +		if p := tt.i.ModprobeParams(); p != tt.p {
      +			t.Fatalf("bad params (%q): got %s, want %s", tt.i, p, tt.p)
      +		}
      +	}
      +}
      +
      +func TestBuildInterfacesLo(t *testing.T) {
      +	stanzas := []*stanzaInterface{
      +		&stanzaInterface{
      +			name:         "lo",
      +			kind:         interfacePhysical,
      +			auto:         false,
      +			configMethod: configMethodLoopback{},
      +			options:      map[string][]string{},
      +		},
      +	}
      +	interfaces := buildInterfaces(stanzas)
      +	if len(interfaces) != 0 {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestBuildInterfacesBlindBond(t *testing.T) {
      +	stanzas := []*stanzaInterface{
      +		{
      +			name:         "bond0",
      +			kind:         interfaceBond,
      +			auto:         false,
      +			configMethod: configMethodManual{},
      +			options: map[string][]string{
      +				"bond-slaves": []string{"eth0"},
      +			},
      +		},
      +	}
      +	interfaces := buildInterfaces(stanzas)
      +	bond0 := &bondInterface{
      +		logicalInterface{
      +			name:        "bond0",
      +			config:      configMethodManual{},
      +			children:    []networkInterface{},
      +			configDepth: 0,
      +		},
      +		[]string{"eth0"},
      +		map[string]string{},
      +	}
      +	eth0 := &physicalInterface{
      +		logicalInterface{
      +			name:        "eth0",
      +			config:      configMethodManual{},
      +			children:    []networkInterface{bond0},
      +			configDepth: 1,
      +		},
      +	}
      +	expect := []InterfaceGenerator{bond0, eth0}
      +	if !reflect.DeepEqual(interfaces, expect) {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestBuildInterfacesBlindVLAN(t *testing.T) {
      +	stanzas := []*stanzaInterface{
      +		{
      +			name:         "vlan0",
      +			kind:         interfaceVLAN,
      +			auto:         false,
      +			configMethod: configMethodManual{},
      +			options: map[string][]string{
      +				"id":         []string{"0"},
      +				"raw_device": []string{"eth0"},
      +			},
      +		},
      +	}
      +	interfaces := buildInterfaces(stanzas)
      +	vlan0 := &vlanInterface{
      +		logicalInterface{
      +			name:        "vlan0",
      +			config:      configMethodManual{},
      +			children:    []networkInterface{},
      +			configDepth: 0,
      +		},
      +		0,
      +		"eth0",
      +	}
      +	eth0 := &physicalInterface{
      +		logicalInterface{
      +			name:        "eth0",
      +			config:      configMethodManual{},
      +			children:    []networkInterface{vlan0},
      +			configDepth: 1,
      +		},
      +	}
      +	expect := []InterfaceGenerator{eth0, vlan0}
      +	if !reflect.DeepEqual(interfaces, expect) {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestBuildInterfaces(t *testing.T) {
      +	stanzas := []*stanzaInterface{
      +		&stanzaInterface{
      +			name:         "eth0",
      +			kind:         interfacePhysical,
      +			auto:         false,
      +			configMethod: configMethodManual{},
      +			options:      map[string][]string{},
      +		},
      +		&stanzaInterface{
      +			name:         "bond0",
      +			kind:         interfaceBond,
      +			auto:         false,
      +			configMethod: configMethodManual{},
      +			options: map[string][]string{
      +				"bond-slaves": []string{"eth0"},
      +				"bond-mode":   []string{"4"},
      +				"bond-miimon": []string{"100"},
      +			},
      +		},
      +		&stanzaInterface{
      +			name:         "bond1",
      +			kind:         interfaceBond,
      +			auto:         false,
      +			configMethod: configMethodManual{},
      +			options: map[string][]string{
      +				"bond-slaves": []string{"bond0"},
      +			},
      +		},
      +		&stanzaInterface{
      +			name:         "vlan0",
      +			kind:         interfaceVLAN,
      +			auto:         false,
      +			configMethod: configMethodManual{},
      +			options: map[string][]string{
      +				"id":         []string{"0"},
      +				"raw_device": []string{"eth0"},
      +			},
      +		},
      +		&stanzaInterface{
      +			name:         "vlan1",
      +			kind:         interfaceVLAN,
      +			auto:         false,
      +			configMethod: configMethodManual{},
      +			options: map[string][]string{
      +				"id":         []string{"1"},
      +				"raw_device": []string{"bond0"},
      +			},
      +		},
      +	}
      +	interfaces := buildInterfaces(stanzas)
      +	vlan1 := &vlanInterface{
      +		logicalInterface{
      +			name:        "vlan1",
      +			config:      configMethodManual{},
      +			children:    []networkInterface{},
      +			configDepth: 0,
      +		},
      +		1,
      +		"bond0",
      +	}
      +	vlan0 := &vlanInterface{
      +		logicalInterface{
      +			name:        "vlan0",
      +			config:      configMethodManual{},
      +			children:    []networkInterface{},
      +			configDepth: 0,
      +		},
      +		0,
      +		"eth0",
      +	}
      +	bond1 := &bondInterface{
      +		logicalInterface{
      +			name:        "bond1",
      +			config:      configMethodManual{},
      +			children:    []networkInterface{},
      +			configDepth: 0,
      +		},
      +		[]string{"bond0"},
      +		map[string]string{},
      +	}
      +	bond0 := &bondInterface{
      +		logicalInterface{
      +			name:        "bond0",
      +			config:      configMethodManual{},
      +			children:    []networkInterface{bond1, vlan1},
      +			configDepth: 1,
      +		},
      +		[]string{"eth0"},
      +		map[string]string{
      +			"mode":   "4",
      +			"miimon": "100",
      +		},
      +	}
      +	eth0 := &physicalInterface{
      +		logicalInterface{
      +			name:        "eth0",
      +			config:      configMethodManual{},
      +			children:    []networkInterface{bond0, vlan0},
      +			configDepth: 2,
      +		},
      +	}
      +	expect := []InterfaceGenerator{bond0, bond1, eth0, vlan0, vlan1}
      +	if !reflect.DeepEqual(interfaces, expect) {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestFilename(t *testing.T) {
      +	for _, tt := range []struct {
      +		i logicalInterface
      +		f string
      +	}{
      +		{logicalInterface{name: "iface", configDepth: 0}, "00-iface"},
      +		{logicalInterface{name: "iface", configDepth: 9}, "09-iface"},
      +		{logicalInterface{name: "iface", configDepth: 10}, "0a-iface"},
      +		{logicalInterface{name: "iface", configDepth: 53}, "35-iface"},
      +		{logicalInterface{hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}), configDepth: 1}, "01-01:23:45:67:89:ab"},
      +		{logicalInterface{name: "iface", hwaddr: net.HardwareAddr([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}), configDepth: 1}, "01-iface"},
      +	} {
      +		if tt.i.Filename() != tt.f {
      +			t.Fatalf("bad filename (%q): got %q, want %q", tt.i, tt.i.Filename(), tt.f)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/is_go15_false_test.go b/vendor/github.com/coreos/coreos-cloudinit/network/is_go15_false_test.go
      new file mode 100644
      index 00000000..85d5f0db
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/network/is_go15_false_test.go
      @@ -0,0 +1,5 @@
      +// +build !go1.5
      +
      +package network
      +
      +const isGo15 = false
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/is_go15_true_test.go b/vendor/github.com/coreos/coreos-cloudinit/network/is_go15_true_test.go
      new file mode 100644
      index 00000000..953836de
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/network/is_go15_true_test.go
      @@ -0,0 +1,5 @@
      +// +build go1.5
      +
      +package network
      +
      +const isGo15 = true
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/packet.go b/vendor/github.com/coreos/coreos-cloudinit/network/packet.go
      new file mode 100644
      index 00000000..a2834ff9
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/network/packet.go
      @@ -0,0 +1,115 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package network
      +
      +import (
      +	"net"
      +
      +	"github.com/coreos/coreos-cloudinit/datasource/metadata/packet"
      +)
      +
      +func ProcessPacketNetconf(netdata packet.NetworkData) ([]InterfaceGenerator, error) {
      +	var nameservers []net.IP
      +	if netdata.DNS != nil {
      +		nameservers = netdata.DNS
      +	} else {
      +		nameservers = append(nameservers, net.ParseIP("8.8.8.8"), net.ParseIP("8.8.4.4"))
      +	}
      +
      +	generators, err := parseNetwork(netdata, nameservers)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	return generators, nil
      +}
      +
      +func parseNetwork(netdata packet.NetworkData, nameservers []net.IP) ([]InterfaceGenerator, error) {
      +	var interfaces []InterfaceGenerator
      +	var addresses []net.IPNet
      +	var routes []route
      +	for _, netblock := range netdata.Netblocks {
      +		addresses = append(addresses, net.IPNet{
      +			IP:   netblock.Address,
      +			Mask: net.IPMask(netblock.Netmask),
      +		})
      +		if netblock.Public == false {
      +			routes = append(routes, route{
      +				destination: net.IPNet{
      +					IP:   net.IPv4(10, 0, 0, 0),
      +					Mask: net.IPv4Mask(255, 0, 0, 0),
      +				},
      +				gateway: netblock.Gateway,
      +			})
      +		} else {
      +			if netblock.AddressFamily == 4 {
      +				routes = append(routes, route{
      +					destination: net.IPNet{
      +						IP:   net.IPv4zero,
      +						Mask: net.IPMask(net.IPv4zero),
      +					},
      +					gateway: netblock.Gateway,
      +				})
      +			} else {
      +				routes = append(routes, route{
      +					destination: net.IPNet{
      +						IP:   net.IPv6zero,
      +						Mask: net.IPMask(net.IPv6zero),
      +					},
      +					gateway: netblock.Gateway,
      +				})
      +			}
      +		}
      +	}
      +
      +	bond := bondInterface{
      +		logicalInterface: logicalInterface{
      +			name: "bond0",
      +			config: configMethodStatic{
      +				addresses:   addresses,
      +				nameservers: nameservers,
      +				routes:      routes,
      +			},
      +		},
      +		options: map[string]string{
      +			"Mode":             "802.3ad",
      +			"LACPTransmitRate": "fast",
      +			"MIIMonitorSec":    ".2",
      +			"UpDelaySec":       ".2",
      +			"DownDelaySec":     ".2",
      +		},
      +	}
      +
      +	bond.hwaddr, _ = net.ParseMAC(netdata.Interfaces[0].Mac)
      +
      +	for index, iface := range netdata.Interfaces {
      +		bond.slaves = append(bond.slaves, iface.Name)
      +
      +		interfaces = append(interfaces, &physicalInterface{
      +			logicalInterface: logicalInterface{
      +				name: iface.Name,
      +				config: configMethodStatic{
      +					nameservers: nameservers,
      +				},
      +				children:    []networkInterface{&bond},
      +				configDepth: index,
      +			},
      +		})
      +	}
      +
      +	interfaces = append(interfaces, &bond)
      +
      +	return interfaces, nil
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/stanza.go b/vendor/github.com/coreos/coreos-cloudinit/network/stanza.go
      new file mode 100644
      index 00000000..88bca813
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/network/stanza.go
      @@ -0,0 +1,340 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package network
      +
      +import (
      +	"fmt"
      +	"net"
      +	"strconv"
      +	"strings"
      +)
      +
      +type stanza interface{}
      +
      +type stanzaAuto struct {
      +	interfaces []string
      +}
      +
      +type stanzaInterface struct {
      +	name         string
      +	kind         interfaceKind
      +	auto         bool
      +	configMethod configMethod
      +	options      map[string][]string
      +}
      +
      +type interfaceKind int
      +
      +const (
      +	interfaceBond = interfaceKind(iota)
      +	interfacePhysical
      +	interfaceVLAN
      +)
      +
      +type route struct {
      +	destination net.IPNet
      +	gateway     net.IP
      +}
      +
      +type configMethod interface{}
      +
      +type configMethodStatic struct {
      +	addresses   []net.IPNet
      +	nameservers []net.IP
      +	routes      []route
      +	hwaddress   net.HardwareAddr
      +}
      +
      +type configMethodLoopback struct{}
      +
      +type configMethodManual struct{}
      +
      +type configMethodDHCP struct {
      +	hwaddress net.HardwareAddr
      +}
      +
      +func parseStanzas(lines []string) (stanzas []stanza, err error) {
      +	rawStanzas, err := splitStanzas(lines)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	stanzas = make([]stanza, 0, len(rawStanzas))
      +	for _, rawStanza := range rawStanzas {
      +		if stanza, err := parseStanza(rawStanza); err == nil {
      +			stanzas = append(stanzas, stanza)
      +		} else {
      +			return nil, err
      +		}
      +	}
      +
      +	autos := make([]string, 0)
      +	interfaceMap := make(map[string]*stanzaInterface)
      +	for _, stanza := range stanzas {
      +		switch c := stanza.(type) {
      +		case *stanzaAuto:
      +			autos = append(autos, c.interfaces...)
      +		case *stanzaInterface:
      +			interfaceMap[c.name] = c
      +		}
      +	}
      +
      +	// Apply the auto attribute
      +	for _, auto := range autos {
      +		if iface, ok := interfaceMap[auto]; ok {
      +			iface.auto = true
      +		}
      +	}
      +
      +	return stanzas, nil
      +}
      +
      +func splitStanzas(lines []string) ([][]string, error) {
      +	var curStanza []string
      +	stanzas := make([][]string, 0)
      +	for _, line := range lines {
      +		if isStanzaStart(line) {
      +			if curStanza != nil {
      +				stanzas = append(stanzas, curStanza)
      +			}
      +			curStanza = []string{line}
      +		} else if curStanza != nil {
      +			curStanza = append(curStanza, line)
      +		} else {
      +			return nil, fmt.Errorf("missing stanza start %q", line)
      +		}
      +	}
      +
      +	if curStanza != nil {
      +		stanzas = append(stanzas, curStanza)
      +	}
      +
      +	return stanzas, nil
      +}
      +
      +func isStanzaStart(line string) bool {
      +	switch strings.Split(line, " ")[0] {
      +	case "auto":
      +		fallthrough
      +	case "iface":
      +		fallthrough
      +	case "mapping":
      +		return true
      +	}
      +
      +	if strings.HasPrefix(line, "allow-") {
      +		return true
      +	}
      +
      +	return false
      +}
      +
      +func parseStanza(rawStanza []string) (stanza, error) {
      +	if len(rawStanza) == 0 {
      +		panic("empty stanza")
      +	}
      +	tokens := strings.Fields(rawStanza[0])
      +	if len(tokens) < 2 {
      +		return nil, fmt.Errorf("malformed stanza start %q", rawStanza[0])
      +	}
      +
      +	kind := tokens[0]
      +	attributes := tokens[1:]
      +
      +	switch kind {
      +	case "auto":
      +		return parseAutoStanza(attributes, rawStanza[1:])
      +	case "iface":
      +		return parseInterfaceStanza(attributes, rawStanza[1:])
      +	default:
      +		return nil, fmt.Errorf("unknown stanza %q", kind)
      +	}
      +}
      +
      +func parseAutoStanza(attributes []string, options []string) (*stanzaAuto, error) {
      +	return &stanzaAuto{interfaces: attributes}, nil
      +}
      +
      +func parseInterfaceStanza(attributes []string, options []string) (*stanzaInterface, error) {
      +	if len(attributes) != 3 {
      +		return nil, fmt.Errorf("incorrect number of attributes")
      +	}
      +
      +	iface := attributes[0]
      +	confMethod := attributes[2]
      +
      +	optionMap := make(map[string][]string, 0)
      +	for _, option := range options {
      +		if strings.HasPrefix(option, "post-up") {
      +			tokens := strings.SplitAfterN(option, " ", 2)
      +			if len(tokens) != 2 {
      +				continue
      +			}
      +			if v, ok := optionMap["post-up"]; ok {
      +				optionMap["post-up"] = append(v, tokens[1])
      +			} else {
      +				optionMap["post-up"] = []string{tokens[1]}
      +			}
      +		} else if strings.HasPrefix(option, "pre-down") {
      +			tokens := strings.SplitAfterN(option, " ", 2)
      +			if len(tokens) != 2 {
      +				continue
      +			}
      +			if v, ok := optionMap["pre-down"]; ok {
      +				optionMap["pre-down"] = append(v, tokens[1])
      +			} else {
      +				optionMap["pre-down"] = []string{tokens[1]}
      +			}
      +		} else {
      +			tokens := strings.Fields(option)
      +			optionMap[tokens[0]] = tokens[1:]
      +		}
      +	}
      +
      +	var conf configMethod
      +	switch confMethod {
      +	case "static":
      +		config := configMethodStatic{
      +			addresses:   make([]net.IPNet, 1),
      +			routes:      make([]route, 0),
      +			nameservers: make([]net.IP, 0),
      +		}
      +		if addresses, ok := optionMap["address"]; ok {
      +			if len(addresses) == 1 {
      +				config.addresses[0].IP = net.ParseIP(addresses[0])
      +			}
      +		}
      +		if netmasks, ok := optionMap["netmask"]; ok {
      +			if len(netmasks) == 1 {
      +				config.addresses[0].Mask = net.IPMask(net.ParseIP(netmasks[0]).To4())
      +			}
      +		}
      +		if config.addresses[0].IP == nil || config.addresses[0].Mask == nil {
      +			return nil, fmt.Errorf("malformed static network config for %q", iface)
      +		}
      +		if gateways, ok := optionMap["gateway"]; ok {
      +			if len(gateways) == 1 {
      +				config.routes = append(config.routes, route{
      +					destination: net.IPNet{
      +						IP:   net.IPv4(0, 0, 0, 0),
      +						Mask: net.IPv4Mask(0, 0, 0, 0),
      +					},
      +					gateway: net.ParseIP(gateways[0]),
      +				})
      +			}
      +		}
      +		if hwaddress, err := parseHwaddress(optionMap, iface); err == nil {
      +			config.hwaddress = hwaddress
      +		} else {
      +			return nil, err
      +		}
      +		for _, nameserver := range optionMap["dns-nameservers"] {
      +			config.nameservers = append(config.nameservers, net.ParseIP(nameserver))
      +		}
      +		for _, postup := range optionMap["post-up"] {
      +			if strings.HasPrefix(postup, "route add") {
      +				route := route{}
      +				fields := strings.Fields(postup)
      +				for i, field := range fields[:len(fields)-1] {
      +					switch field {
      +					case "-net":
      +						if _, dst, err := net.ParseCIDR(fields[i+1]); err == nil {
      +							route.destination = *dst
      +						} else {
      +							route.destination.IP = net.ParseIP(fields[i+1])
      +						}
      +					case "netmask":
      +						route.destination.Mask = net.IPMask(net.ParseIP(fields[i+1]).To4())
      +					case "gw":
      +						route.gateway = net.ParseIP(fields[i+1])
      +					}
      +				}
      +				if route.destination.IP != nil && route.destination.Mask != nil && route.gateway != nil {
      +					config.routes = append(config.routes, route)
      +				}
      +			}
      +		}
      +		conf = config
      +	case "loopback":
      +		conf = configMethodLoopback{}
      +	case "manual":
      +		conf = configMethodManual{}
      +	case "dhcp":
      +		config := configMethodDHCP{}
      +		if hwaddress, err := parseHwaddress(optionMap, iface); err == nil {
      +			config.hwaddress = hwaddress
      +		} else {
      +			return nil, err
      +		}
      +		conf = config
      +	default:
      +		return nil, fmt.Errorf("invalid config method %q", confMethod)
      +	}
      +
      +	if _, ok := optionMap["vlan_raw_device"]; ok {
      +		return parseVLANStanza(iface, conf, attributes, optionMap)
      +	}
      +
      +	if strings.Contains(iface, ".") {
      +		return parseVLANStanza(iface, conf, attributes, optionMap)
      +	}
      +
      +	if _, ok := optionMap["bond-slaves"]; ok {
      +		return parseBondStanza(iface, conf, attributes, optionMap)
      +	}
      +
      +	return parsePhysicalStanza(iface, conf, attributes, optionMap)
      +}
      +
      +func parseHwaddress(options map[string][]string, iface string) (net.HardwareAddr, error) {
      +	if hwaddress, ok := options["hwaddress"]; ok && len(hwaddress) == 2 {
      +		switch hwaddress[0] {
      +		case "ether":
      +			if address, err := net.ParseMAC(hwaddress[1]); err == nil {
      +				return address, nil
      +			}
      +			return nil, fmt.Errorf("malformed hwaddress option for %q", iface)
      +		}
      +	}
      +	return nil, nil
      +}
      +
      +func parseBondStanza(iface string, conf configMethod, attributes []string, options map[string][]string) (*stanzaInterface, error) {
      +	return &stanzaInterface{name: iface, kind: interfaceBond, configMethod: conf, options: options}, nil
      +}
      +
      +func parsePhysicalStanza(iface string, conf configMethod, attributes []string, options map[string][]string) (*stanzaInterface, error) {
      +	return &stanzaInterface{name: iface, kind: interfacePhysical, configMethod: conf, options: options}, nil
      +}
      +
      +func parseVLANStanza(iface string, conf configMethod, attributes []string, options map[string][]string) (*stanzaInterface, error) {
      +	var id string
      +	if strings.Contains(iface, ".") {
      +		tokens := strings.Split(iface, ".")
      +		id = tokens[len(tokens)-1]
      +	} else if strings.HasPrefix(iface, "vlan") {
      +		id = strings.TrimPrefix(iface, "vlan")
      +	} else {
      +		return nil, fmt.Errorf("malformed vlan name %q", iface)
      +	}
      +
      +	if _, err := strconv.Atoi(id); err != nil {
      +		return nil, fmt.Errorf("malformed vlan name %q", iface)
      +	}
      +	options["id"] = []string{id}
      +	options["raw_device"] = options["vlan_raw_device"]
      +
      +	return &stanzaInterface{name: iface, kind: interfaceVLAN, configMethod: conf, options: options}, nil
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/stanza_test.go b/vendor/github.com/coreos/coreos-cloudinit/network/stanza_test.go
      new file mode 100644
      index 00000000..9f476380
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/network/stanza_test.go
      @@ -0,0 +1,582 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package network
      +
      +import (
      +	"net"
      +	"reflect"
      +	"strings"
      +	"testing"
      +)
      +
      +func TestSplitStanzasNoParent(t *testing.T) {
      +	in := []string{"test"}
      +	e := "missing stanza start"
      +	_, err := splitStanzas(in)
      +	if err == nil || !strings.HasPrefix(err.Error(), e) {
      +		t.Fatalf("bad error for splitStanzas(%q): got %q, want %q", in, err, e)
      +	}
      +}
      +
      +func TestBadParseStanzas(t *testing.T) {
      +	for in, e := range map[string]string{
      +		"":                 "missing stanza start",
      +		"iface":            "malformed stanza start",
      +		"allow-?? unknown": "unknown stanza",
      +	} {
      +		_, err := parseStanzas([]string{in})
      +		if err == nil || !strings.HasPrefix(err.Error(), e) {
      +			t.Fatalf("bad error for parseStanzas(%q): got %q, want %q", in, err, e)
      +		}
      +
      +	}
      +}
      +
      +func TestBadParseInterfaceStanza(t *testing.T) {
      +	for _, tt := range []struct {
      +		in   []string
      +		opts []string
      +		e    string
      +	}{
      +		{[]string{}, nil, "incorrect number of attributes"},
      +		{[]string{"eth", "inet", "invalid"}, nil, "invalid config method"},
      +		{[]string{"eth", "inet", "static"}, []string{"address 192.168.1.100"}, "malformed static network config"},
      +		{[]string{"eth", "inet", "static"}, []string{"netmask 255.255.255.0"}, "malformed static network config"},
      +		{[]string{"eth", "inet", "static"}, []string{"address invalid", "netmask 255.255.255.0"}, "malformed static network config"},
      +		{[]string{"eth", "inet", "static"}, []string{"address 192.168.1.100", "netmask invalid"}, "malformed static network config"},
      +		{[]string{"eth", "inet", "static"}, []string{"address 192.168.1.100", "netmask 255.255.255.0", "hwaddress ether NotAnAddress"}, "malformed hwaddress option"},
      +		{[]string{"eth", "inet", "dhcp"}, []string{"hwaddress ether NotAnAddress"}, "malformed hwaddress option"},
      +	} {
      +		_, err := parseInterfaceStanza(tt.in, tt.opts)
      +		if err == nil || !strings.HasPrefix(err.Error(), tt.e) {
      +			t.Fatalf("bad error parsing interface stanza %q: got %q, want %q", tt.in, err.Error(), tt.e)
      +		}
      +	}
      +}
      +
      +func TestBadParseVLANStanzas(t *testing.T) {
      +	conf := configMethodManual{}
      +	options := map[string][]string{}
      +	for _, in := range []string{"myvlan", "eth.vlan"} {
      +		_, err := parseVLANStanza(in, conf, nil, options)
      +		if err == nil || !strings.HasPrefix(err.Error(), "malformed vlan name") {
      +			t.Fatalf("did not error on bad vlan %q", in)
      +		}
      +	}
      +}
      +
      +func TestSplitStanzas(t *testing.T) {
      +	expect := [][]string{
      +		{"auto lo"},
      +		{"iface eth1", "option: 1"},
      +		{"mapping"},
      +		{"allow-"},
      +	}
      +	lines := make([]string, 0, 5)
      +	for _, stanza := range expect {
      +		for _, line := range stanza {
      +			lines = append(lines, line)
      +		}
      +	}
      +
      +	stanzas, err := splitStanzas(lines)
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	for i, stanza := range stanzas {
      +		if len(stanza) != len(expect[i]) {
      +			t.FailNow()
      +		}
      +		for j, line := range stanza {
      +			if line != expect[i][j] {
      +				t.FailNow()
      +			}
      +		}
      +	}
      +}
      +
      +func TestParseStanzaNil(t *testing.T) {
      +	defer func() {
      +		if r := recover(); r == nil {
      +			t.Fatal("parseStanza(nil) did not panic")
      +		}
      +	}()
      +	parseStanza(nil)
      +}
      +
      +func TestParseStanzaSuccess(t *testing.T) {
      +	for _, in := range []string{
      +		"auto a",
      +		"iface a inet manual",
      +	} {
      +		if _, err := parseStanza([]string{in}); err != nil {
      +			t.Fatalf("unexpected error parsing stanza %q: %s", in, err)
      +		}
      +	}
      +}
      +
      +func TestParseAutoStanza(t *testing.T) {
      +	interfaces := []string{"test", "attribute"}
      +	stanza, err := parseAutoStanza(interfaces, nil)
      +	if err != nil {
      +		t.Fatalf("unexpected error parsing auto stanza %q: %s", interfaces, err)
      +	}
      +	if !reflect.DeepEqual(stanza.interfaces, interfaces) {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestParseBondStanzaNoSlaves(t *testing.T) {
      +	bond, err := parseBondStanza("", nil, nil, map[string][]string{})
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	if bond.options["bond-slaves"] != nil {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestParseBondStanza(t *testing.T) {
      +	conf := configMethodManual{}
      +	options := map[string][]string{
      +		"bond-slaves": []string{"1", "2"},
      +	}
      +	bond, err := parseBondStanza("test", conf, nil, options)
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	if bond.name != "test" {
      +		t.FailNow()
      +	}
      +	if bond.kind != interfaceBond {
      +		t.FailNow()
      +	}
      +	if bond.configMethod != conf {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestParsePhysicalStanza(t *testing.T) {
      +	conf := configMethodManual{}
      +	options := map[string][]string{
      +		"a": []string{"1", "2"},
      +		"b": []string{"1"},
      +	}
      +	physical, err := parsePhysicalStanza("test", conf, nil, options)
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	if physical.name != "test" {
      +		t.FailNow()
      +	}
      +	if physical.kind != interfacePhysical {
      +		t.FailNow()
      +	}
      +	if physical.configMethod != conf {
      +		t.FailNow()
      +	}
      +	if !reflect.DeepEqual(physical.options, options) {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestParseVLANStanzas(t *testing.T) {
      +	conf := configMethodManual{}
      +	options := map[string][]string{}
      +	for _, in := range []string{"vlan25", "eth.25"} {
      +		vlan, err := parseVLANStanza(in, conf, nil, options)
      +		if err != nil {
      +			t.Fatalf("unexpected error from parseVLANStanza(%q): %s", in, err)
      +		}
      +		if !reflect.DeepEqual(vlan.options["id"], []string{"25"}) {
      +			t.FailNow()
      +		}
      +	}
      +}
      +
      +func TestParseInterfaceStanzaStaticAddress(t *testing.T) {
      +	options := []string{"address 192.168.1.100", "netmask 255.255.255.0"}
      +	expect := []net.IPNet{
      +		{
      +			IP:   net.IPv4(192, 168, 1, 100),
      +			Mask: net.IPv4Mask(255, 255, 255, 0),
      +		},
      +	}
      +
      +	iface, err := parseInterfaceStanza([]string{"eth", "inet", "static"}, options)
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	static, ok := iface.configMethod.(configMethodStatic)
      +	if !ok {
      +		t.FailNow()
      +	}
      +	if !reflect.DeepEqual(static.addresses, expect) {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestParseInterfaceStanzaStaticGateway(t *testing.T) {
      +	options := []string{"address 192.168.1.100", "netmask 255.255.255.0", "gateway 192.168.1.1"}
      +	expect := []route{
      +		{
      +			destination: net.IPNet{
      +				IP:   net.IPv4(0, 0, 0, 0),
      +				Mask: net.IPv4Mask(0, 0, 0, 0),
      +			},
      +			gateway: net.IPv4(192, 168, 1, 1),
      +		},
      +	}
      +
      +	iface, err := parseInterfaceStanza([]string{"eth", "inet", "static"}, options)
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	static, ok := iface.configMethod.(configMethodStatic)
      +	if !ok {
      +		t.FailNow()
      +	}
      +	if !reflect.DeepEqual(static.routes, expect) {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestParseInterfaceStanzaStaticDNS(t *testing.T) {
      +	options := []string{"address 192.168.1.100", "netmask 255.255.255.0", "dns-nameservers 192.168.1.10 192.168.1.11 192.168.1.12"}
      +	expect := []net.IP{
      +		net.IPv4(192, 168, 1, 10),
      +		net.IPv4(192, 168, 1, 11),
      +		net.IPv4(192, 168, 1, 12),
      +	}
      +	iface, err := parseInterfaceStanza([]string{"eth", "inet", "static"}, options)
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	static, ok := iface.configMethod.(configMethodStatic)
      +	if !ok {
      +		t.FailNow()
      +	}
      +	if !reflect.DeepEqual(static.nameservers, expect) {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestBadParseInterfaceStanzasStaticPostUp(t *testing.T) {
      +	for _, in := range []string{
      +		"post-up invalid",
      +		"post-up route add",
      +		"post-up route add -net",
      +		"post-up route add gw",
      +		"post-up route add netmask",
      +		"gateway",
      +		"gateway 192.168.1.1 192.168.1.2",
      +	} {
      +		options := []string{"address 192.168.1.100", "netmask 255.255.255.0", in}
      +		iface, err := parseInterfaceStanza([]string{"eth", "inet", "static"}, options)
      +		if err != nil {
      +			t.Fatalf("parseInterfaceStanza with options %s got unexpected error", options)
      +		}
      +		static, ok := iface.configMethod.(configMethodStatic)
      +		if !ok {
      +			t.Fatalf("parseInterfaceStanza with options %s did not return configMethodStatic", options)
      +		}
      +		if len(static.routes) != 0 {
      +			t.Fatalf("parseInterfaceStanza with options %s did not return zero-length static routes", options)
      +		}
      +	}
      +}
      +
      +func TestParseInterfaceStanzaStaticPostUp(t *testing.T) {
      +	for _, tt := range []struct {
      +		options []string
      +		expect  []route
      +	}{
      +		{
      +			options: []string{
      +				"address 192.168.1.100",
      +				"netmask 255.255.255.0",
      +				"post-up route add gw 192.168.1.1 -net 192.168.1.0 netmask 255.255.255.0",
      +			},
      +			expect: []route{
      +				{
      +					destination: net.IPNet{
      +						IP:   net.IPv4(192, 168, 1, 0),
      +						Mask: net.IPv4Mask(255, 255, 255, 0),
      +					},
      +					gateway: net.IPv4(192, 168, 1, 1),
      +				},
      +			},
      +		},
      +		{
      +			options: []string{
      +				"address 192.168.1.100",
      +				"netmask 255.255.255.0",
      +				"post-up route add gw 192.168.1.1 -net 192.168.1.0/24 || true",
      +			},
      +			expect: []route{
      +				{
      +					destination: func() net.IPNet {
      +						if _, net, err := net.ParseCIDR("192.168.1.0/24"); err == nil {
      +							return *net
      +						} else {
      +							panic(err)
      +						}
      +					}(),
      +					gateway: net.IPv4(192, 168, 1, 1),
      +				},
      +			},
      +		},
      +	} {
      +		iface, err := parseInterfaceStanza([]string{"eth", "inet", "static"}, tt.options)
      +		if err != nil {
      +			t.Fatalf("bad error (%+v): want nil, got %s\n", tt, err)
      +		}
      +		static, ok := iface.configMethod.(configMethodStatic)
      +		if !ok {
      +			t.Fatalf("bad config method (%+v): want configMethodStatic, got %T\n", tt, iface.configMethod)
      +		}
      +		if !reflect.DeepEqual(static.routes, tt.expect) {
      +			t.Fatalf("bad routes (%+v): want %#v, got %#v\n", tt, tt.expect, static.routes)
      +		}
      +	}
      +}
      +
      +func TestParseInterfaceStanzaLoopback(t *testing.T) {
      +	iface, err := parseInterfaceStanza([]string{"eth", "inet", "loopback"}, nil)
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	if _, ok := iface.configMethod.(configMethodLoopback); !ok {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestParseInterfaceStanzaManual(t *testing.T) {
      +	iface, err := parseInterfaceStanza([]string{"eth", "inet", "manual"}, nil)
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	if _, ok := iface.configMethod.(configMethodManual); !ok {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestParseInterfaceStanzaDHCP(t *testing.T) {
      +	iface, err := parseInterfaceStanza([]string{"eth", "inet", "dhcp"}, nil)
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	if _, ok := iface.configMethod.(configMethodDHCP); !ok {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestParseInterfaceStanzaPostUpOption(t *testing.T) {
      +	options := []string{
      +		"post-up",
      +		"post-up 1 2",
      +		"post-up 3 4",
      +	}
      +	iface, err := parseInterfaceStanza([]string{"eth", "inet", "manual"}, options)
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	if !reflect.DeepEqual(iface.options["post-up"], []string{"1 2", "3 4"}) {
      +		t.Log(iface.options["post-up"])
      +		t.FailNow()
      +	}
      +}
      +
      +func TestParseInterfaceStanzaPreDownOption(t *testing.T) {
      +	options := []string{
      +		"pre-down",
      +		"pre-down 3",
      +		"pre-down 4",
      +	}
      +	iface, err := parseInterfaceStanza([]string{"eth", "inet", "manual"}, options)
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	if !reflect.DeepEqual(iface.options["pre-down"], []string{"3", "4"}) {
      +		t.Log(iface.options["pre-down"])
      +		t.FailNow()
      +	}
      +}
      +
      +func TestParseInterfaceStanzaEmptyOption(t *testing.T) {
      +	options := []string{
      +		"test",
      +	}
      +	iface, err := parseInterfaceStanza([]string{"eth", "inet", "manual"}, options)
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	if !reflect.DeepEqual(iface.options["test"], []string{}) {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestParseInterfaceStanzaOptions(t *testing.T) {
      +	options := []string{
      +		"test1 1",
      +		"test2 2 3",
      +		"test1 5 6",
      +	}
      +	iface, err := parseInterfaceStanza([]string{"eth", "inet", "manual"}, options)
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	if !reflect.DeepEqual(iface.options["test1"], []string{"5", "6"}) {
      +		t.Log(iface.options["test1"])
      +		t.FailNow()
      +	}
      +	if !reflect.DeepEqual(iface.options["test2"], []string{"2", "3"}) {
      +		t.Log(iface.options["test2"])
      +		t.FailNow()
      +	}
      +}
      +
      +func TestParseInterfaceStanzaHwaddress(t *testing.T) {
      +	for _, tt := range []struct {
      +		attr []string
      +		opt  []string
      +		hw   net.HardwareAddr
      +	}{
      +		{
      +			[]string{"mybond", "inet", "dhcp"},
      +			[]string{},
      +			nil,
      +		},
      +		{
      +			[]string{"mybond", "inet", "dhcp"},
      +			[]string{"hwaddress ether 00:01:02:03:04:05"},
      +			net.HardwareAddr([]byte{0, 1, 2, 3, 4, 5}),
      +		},
      +		{
      +			[]string{"mybond", "inet", "static"},
      +			[]string{"hwaddress ether 00:01:02:03:04:05", "address 192.168.1.100", "netmask 255.255.255.0"},
      +			net.HardwareAddr([]byte{0, 1, 2, 3, 4, 5}),
      +		},
      +	} {
      +		iface, err := parseInterfaceStanza(tt.attr, tt.opt)
      +		if err != nil {
      +			t.Fatalf("error in parseInterfaceStanza (%q, %q): %q", tt.attr, tt.opt, err)
      +		}
      +		switch c := iface.configMethod.(type) {
      +		case configMethodStatic:
      +			if !reflect.DeepEqual(c.hwaddress, tt.hw) {
      +				t.Fatalf("bad hwaddress (%q, %q): got %q, want %q", tt.attr, tt.opt, c.hwaddress, tt.hw)
      +			}
      +		case configMethodDHCP:
      +			if !reflect.DeepEqual(c.hwaddress, tt.hw) {
      +				t.Fatalf("bad hwaddress (%q, %q): got %q, want %q", tt.attr, tt.opt, c.hwaddress, tt.hw)
      +			}
      +		}
      +	}
      +}
      +
      +func TestParseInterfaceStanzaBond(t *testing.T) {
      +	iface, err := parseInterfaceStanza([]string{"mybond", "inet", "manual"}, []string{"bond-slaves eth"})
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	if iface.kind != interfaceBond {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestParseInterfaceStanzaVLANName(t *testing.T) {
      +	iface, err := parseInterfaceStanza([]string{"eth0.1", "inet", "manual"}, nil)
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	if iface.kind != interfaceVLAN {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestParseInterfaceStanzaVLANOption(t *testing.T) {
      +	iface, err := parseInterfaceStanza([]string{"vlan1", "inet", "manual"}, []string{"vlan_raw_device eth"})
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	if iface.kind != interfaceVLAN {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestParseStanzasNone(t *testing.T) {
      +	stanzas, err := parseStanzas(nil)
      +	if err != nil {
      +		t.FailNow()
      +	}
      +	if len(stanzas) != 0 {
      +		t.FailNow()
      +	}
      +}
      +
      +func TestParseStanzas(t *testing.T) {
      +	lines := []string{
      +		"auto lo",
      +		"iface lo inet loopback",
      +		"iface eth1 inet manual",
      +		"iface eth2 inet manual",
      +		"iface eth3 inet manual",
      +		"auto eth1 eth3",
      +	}
      +	expect := []stanza{
      +		&stanzaAuto{
      +			interfaces: []string{"lo"},
      +		},
      +		&stanzaInterface{
      +			name:         "lo",
      +			kind:         interfacePhysical,
      +			auto:         true,
      +			configMethod: configMethodLoopback{},
      +			options:      map[string][]string{},
      +		},
      +		&stanzaInterface{
      +			name:         "eth1",
      +			kind:         interfacePhysical,
      +			auto:         true,
      +			configMethod: configMethodManual{},
      +			options:      map[string][]string{},
      +		},
      +		&stanzaInterface{
      +			name:         "eth2",
      +			kind:         interfacePhysical,
      +			auto:         false,
      +			configMethod: configMethodManual{},
      +			options:      map[string][]string{},
      +		},
      +		&stanzaInterface{
      +			name:         "eth3",
      +			kind:         interfacePhysical,
      +			auto:         true,
      +			configMethod: configMethodManual{},
      +			options:      map[string][]string{},
      +		},
      +		&stanzaAuto{
      +			interfaces: []string{"eth1", "eth3"},
      +		},
      +	}
      +	stanzas, err := parseStanzas(lines)
      +	if err != err {
      +		t.FailNow()
      +	}
      +	if !reflect.DeepEqual(stanzas, expect) {
      +		t.FailNow()
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/vmware.go b/vendor/github.com/coreos/coreos-cloudinit/network/vmware.go
      new file mode 100644
      index 00000000..230be42a
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/network/vmware.go
      @@ -0,0 +1,174 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package network
      +
      +import (
      +	"fmt"
      +	"log"
      +	"net"
      +)
      +
      +func ProcessVMwareNetconf(config map[string]string) ([]InterfaceGenerator, error) {
      +	log.Println("Processing VMware network config")
      +
      +	log.Println("Parsing nameservers")
      +	var nameservers []net.IP
      +	for i := 0; ; i++ {
      +		if ipStr, ok := config[fmt.Sprintf("dns.server.%d", i)]; ok {
      +			if ip := net.ParseIP(ipStr); ip != nil {
      +				nameservers = append(nameservers, ip)
      +			} else {
      +				return nil, fmt.Errorf("invalid nameserver: %q", ipStr)
      +			}
      +		} else {
      +			break
      +		}
      +	}
      +	log.Printf("Parsed %d nameservers", len(nameservers))
      +
      +	var interfaces []InterfaceGenerator
      +	for i := 0; ; i++ {
      +		var addresses []net.IPNet
      +		var routes []route
      +		var err error
      +		var dhcp bool
      +		iface := &physicalInterface{}
      +
      +		log.Printf("Proccessing interface %d", i)
      +
      +		log.Println("Processing DHCP")
      +		if dhcp, err = processDHCPConfig(config, fmt.Sprintf("interface.%d.", i)); err != nil {
      +			return nil, err
      +		}
      +
      +		log.Println("Processing addresses")
      +		if as, err := processAddressConfig(config, fmt.Sprintf("interface.%d.", i)); err == nil {
      +			addresses = append(addresses, as...)
      +		} else {
      +			return nil, err
      +		}
      +
      +		log.Println("Processing routes")
      +		if rs, err := processRouteConfig(config, fmt.Sprintf("interface.%d.", i)); err == nil {
      +			routes = append(routes, rs...)
      +		} else {
      +			return nil, err
      +		}
      +
      +		if mac, ok := config[fmt.Sprintf("interface.%d.mac", i)]; ok {
      +			log.Printf("Parsing interface %d MAC address: %q", i, mac)
      +			if hwaddr, err := net.ParseMAC(mac); err == nil {
      +				iface.hwaddr = hwaddr
      +			} else {
      +				return nil, fmt.Errorf("error while parsing MAC address: %v", err)
      +			}
      +		}
      +
      +		if name, ok := config[fmt.Sprintf("interface.%d.name", i)]; ok {
      +			log.Printf("Parsing interface %d name: %q", i, name)
      +			iface.name = name
      +		}
      +
      +		if len(addresses) > 0 || len(routes) > 0 {
      +			iface.config = configMethodStatic{
      +				hwaddress:   iface.hwaddr,
      +				addresses:   addresses,
      +				nameservers: nameservers,
      +				routes:      routes,
      +			}
      +		} else if dhcp {
      +			iface.config = configMethodDHCP{
      +				hwaddress: iface.hwaddr,
      +			}
      +		} else {
      +			break
      +		}
      +
      +		interfaces = append(interfaces, iface)
      +	}
      +
      +	return interfaces, nil
      +}
      +
      +func processAddressConfig(config map[string]string, prefix string) (addresses []net.IPNet, err error) {
      +	for a := 0; ; a++ {
      +		prefix := fmt.Sprintf("%sip.%d.", prefix, a)
      +
      +		addressStr, ok := config[prefix+"address"]
      +		if !ok {
      +			break
      +		}
      +
      +		ip, network, err := net.ParseCIDR(addressStr)
      +		if err != nil {
      +			return nil, fmt.Errorf("invalid address: %q", addressStr)
      +		}
      +		addresses = append(addresses, net.IPNet{
      +			IP:   ip,
      +			Mask: network.Mask,
      +		})
      +	}
      +
      +	return
      +}
      +
      +func processRouteConfig(config map[string]string, prefix string) (routes []route, err error) {
      +	for r := 0; ; r++ {
      +		prefix := fmt.Sprintf("%sroute.%d.", prefix, r)
      +
      +		gatewayStr, gok := config[prefix+"gateway"]
      +		destinationStr, dok := config[prefix+"destination"]
      +		if gok && !dok {
      +			return nil, fmt.Errorf("missing destination key")
      +		} else if !gok && dok {
      +			return nil, fmt.Errorf("missing gateway key")
      +		} else if !gok && !dok {
      +			break
      +		}
      +
      +		gateway := net.ParseIP(gatewayStr)
      +		if gateway == nil {
      +			return nil, fmt.Errorf("invalid gateway: %q", gatewayStr)
      +		}
      +
      +		_, destination, err := net.ParseCIDR(destinationStr)
      +		if err != nil {
      +			return nil, err
      +		}
      +
      +		routes = append(routes, route{
      +			destination: *destination,
      +			gateway:     gateway,
      +		})
      +	}
      +
      +	return
      +}
      +
      +func processDHCPConfig(config map[string]string, prefix string) (dhcp bool, err error) {
      +	dhcpStr, ok := config[prefix+"dhcp"]
      +	if !ok {
      +		return false, nil
      +	}
      +
      +	switch dhcpStr {
      +	case "yes":
      +		return true, nil
      +	case "no":
      +		return false, nil
      +	default:
      +		return false, fmt.Errorf("invalid DHCP option: %q", dhcpStr)
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/vmware_test.go b/vendor/github.com/coreos/coreos-cloudinit/network/vmware_test.go
      new file mode 100644
      index 00000000..010568c9
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/network/vmware_test.go
      @@ -0,0 +1,361 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package network
      +
      +import (
      +	"errors"
      +	"net"
      +	"reflect"
      +	"testing"
      +)
      +
      +func mustParseMac(mac net.HardwareAddr, err error) net.HardwareAddr {
      +	if err != nil {
      +		panic(err)
      +	}
      +	return mac
      +}
      +
      +func TestProcessVMwareNetconf(t *testing.T) {
      +	tests := []struct {
      +		config map[string]string
      +
      +		interfaces []InterfaceGenerator
      +		err        error
      +	}{
      +		{},
      +		{
      +			config: map[string]string{
      +				"interface.0.dhcp": "yes",
      +			},
      +			interfaces: []InterfaceGenerator{
      +				&physicalInterface{logicalInterface{
      +					config: configMethodDHCP{},
      +				}},
      +			},
      +		},
      +		{
      +			config: map[string]string{
      +				"interface.0.mac":  "00:11:22:33:44:55",
      +				"interface.0.dhcp": "yes",
      +			},
      +			interfaces: []InterfaceGenerator{
      +				&physicalInterface{logicalInterface{
      +					hwaddr: mustParseMac(net.ParseMAC("00:11:22:33:44:55")),
      +					config: configMethodDHCP{hwaddress: mustParseMac(net.ParseMAC("00:11:22:33:44:55"))},
      +				}},
      +			},
      +		},
      +		{
      +			config: map[string]string{
      +				"interface.0.name": "eth0",
      +				"interface.0.dhcp": "yes",
      +			},
      +			interfaces: []InterfaceGenerator{
      +				&physicalInterface{logicalInterface{
      +					name:   "eth0",
      +					config: configMethodDHCP{},
      +				}},
      +			},
      +		},
      +		{
      +			config: map[string]string{
      +				"interface.0.mac":                 "00:11:22:33:44:55",
      +				"interface.0.ip.0.address":        "10.0.0.100/24",
      +				"interface.0.route.0.gateway":     "10.0.0.1",
      +				"interface.0.route.0.destination": "0.0.0.0/0",
      +			},
      +			interfaces: []InterfaceGenerator{
      +				&physicalInterface{logicalInterface{
      +					hwaddr: mustParseMac(net.ParseMAC("00:11:22:33:44:55")),
      +					config: configMethodStatic{
      +						hwaddress: mustParseMac(net.ParseMAC("00:11:22:33:44:55")),
      +						addresses: []net.IPNet{net.IPNet{IP: net.ParseIP("10.0.0.100"), Mask: net.CIDRMask(24, net.IPv4len*8)}},
      +						// I realize how upset you must be that I am shoving an IPMask into an IP. This is because net.IPv4zero is
      +						// actually a magic IPv6 address which ruins our equality check. What's that? Just use IP::Equal()? I'd rather
      +						// DeepEqual just handle that for me, but until Go gets operator overloading, we are stuck with this.
      +						routes: []route{route{
      +							destination: net.IPNet{IP: net.IP(net.CIDRMask(0, net.IPv4len*8)), Mask: net.CIDRMask(0, net.IPv4len*8)},
      +							gateway:     net.ParseIP("10.0.0.1")},
      +						},
      +					},
      +				}},
      +			},
      +		},
      +		{
      +			config: map[string]string{
      +				"dns.server.0":                    "1.2.3.4",
      +				"dns.server.1":                    "5.6.7.8",
      +				"interface.0.mac":                 "00:11:22:33:44:55",
      +				"interface.0.ip.0.address":        "10.0.0.100/24",
      +				"interface.0.ip.1.address":        "10.0.0.101/24",
      +				"interface.0.route.0.gateway":     "10.0.0.1",
      +				"interface.0.route.0.destination": "0.0.0.0/0",
      +				"interface.1.name":                "eth0",
      +				"interface.1.ip.0.address":        "10.0.1.100/24",
      +				"interface.1.route.0.gateway":     "10.0.1.1",
      +				"interface.1.route.0.destination": "0.0.0.0/0",
      +				"interface.2.dhcp":                "yes",
      +				"interface.2.mac":                 "00:11:22:33:44:77",
      +			},
      +			interfaces: []InterfaceGenerator{
      +				&physicalInterface{logicalInterface{
      +					hwaddr: mustParseMac(net.ParseMAC("00:11:22:33:44:55")),
      +					config: configMethodStatic{
      +						hwaddress: mustParseMac(net.ParseMAC("00:11:22:33:44:55")),
      +						addresses: []net.IPNet{
      +							net.IPNet{IP: net.ParseIP("10.0.0.100"), Mask: net.CIDRMask(24, net.IPv4len*8)},
      +							net.IPNet{IP: net.ParseIP("10.0.0.101"), Mask: net.CIDRMask(24, net.IPv4len*8)},
      +						},
      +						routes: []route{route{
      +							destination: net.IPNet{IP: net.IP(net.CIDRMask(0, net.IPv4len*8)), Mask: net.CIDRMask(0, net.IPv4len*8)},
      +							gateway:     net.ParseIP("10.0.0.1")},
      +						},
      +						nameservers: []net.IP{net.ParseIP("1.2.3.4"), net.ParseIP("5.6.7.8")},
      +					},
      +				}},
      +				&physicalInterface{logicalInterface{
      +					name: "eth0",
      +					config: configMethodStatic{
      +						addresses: []net.IPNet{net.IPNet{IP: net.ParseIP("10.0.1.100"), Mask: net.CIDRMask(24, net.IPv4len*8)}},
      +						routes: []route{route{
      +							destination: net.IPNet{IP: net.IP(net.CIDRMask(0, net.IPv4len*8)), Mask: net.CIDRMask(0, net.IPv4len*8)},
      +							gateway:     net.ParseIP("10.0.1.1")},
      +						},
      +						nameservers: []net.IP{net.ParseIP("1.2.3.4"), net.ParseIP("5.6.7.8")},
      +					},
      +				}},
      +				&physicalInterface{logicalInterface{
      +					hwaddr: mustParseMac(net.ParseMAC("00:11:22:33:44:77")),
      +					config: configMethodDHCP{hwaddress: mustParseMac(net.ParseMAC("00:11:22:33:44:77"))},
      +				}},
      +			},
      +		},
      +		{
      +			config: map[string]string{"dns.server.0": "test dns"},
      +			err:    errors.New(`invalid nameserver: "test dns"`),
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		interfaces, err := ProcessVMwareNetconf(tt.config)
      +		if !reflect.DeepEqual(tt.err, err) {
      +			t.Errorf("bad error (#%d): want %v, got %v", i, tt.err, err)
      +		}
      +		if !reflect.DeepEqual(tt.interfaces, interfaces) {
      +			t.Errorf("bad interfaces (#%d): want %#v, got %#v", i, tt.interfaces, interfaces)
      +			for _, iface := range tt.interfaces {
      +				t.Logf("  want: %#v", iface)
      +			}
      +			for _, iface := range interfaces {
      +				t.Logf("  got:  %#v", iface)
      +			}
      +		}
      +	}
      +}
      +
      +func TestProcessAddressConfig(t *testing.T) {
      +	tests := []struct {
      +		config map[string]string
      +		prefix string
      +
      +		addresses []net.IPNet
      +		err       error
      +	}{
      +		{},
      +
      +		// static - ipv4
      +		{
      +			config: map[string]string{
      +				"ip.0.address": "10.0.0.100/24",
      +			},
      +
      +			addresses: []net.IPNet{{IP: net.ParseIP("10.0.0.100"), Mask: net.CIDRMask(24, net.IPv4len*8)}},
      +		},
      +		{
      +			config: map[string]string{
      +				"this.is.a.prefix.ip.0.address": "10.0.0.100/24",
      +			},
      +			prefix: "this.is.a.prefix.",
      +
      +			addresses: []net.IPNet{{IP: net.ParseIP("10.0.0.100"), Mask: net.CIDRMask(24, net.IPv4len*8)}},
      +		},
      +		{
      +			config: map[string]string{
      +				"ip.0.address": "10.0.0.100/24",
      +				"ip.1.address": "10.0.0.101/24",
      +				"ip.2.address": "10.0.0.102/24",
      +			},
      +
      +			addresses: []net.IPNet{
      +				{IP: net.ParseIP("10.0.0.100"), Mask: net.CIDRMask(24, net.IPv4len*8)},
      +				{IP: net.ParseIP("10.0.0.101"), Mask: net.CIDRMask(24, net.IPv4len*8)},
      +				{IP: net.ParseIP("10.0.0.102"), Mask: net.CIDRMask(24, net.IPv4len*8)},
      +			},
      +		},
      +
      +		// static - ipv6
      +		{
      +			config: map[string]string{
      +				"ip.0.address": "fe00::100/64",
      +			},
      +
      +			addresses: []net.IPNet{{IP: net.ParseIP("fe00::100"), Mask: net.IPMask(net.CIDRMask(64, net.IPv6len*8))}},
      +		},
      +		{
      +			config: map[string]string{
      +				"ip.0.address": "fe00::100/64",
      +				"ip.1.address": "fe00::101/64",
      +				"ip.2.address": "fe00::102/64",
      +			},
      +
      +			addresses: []net.IPNet{
      +				{IP: net.ParseIP("fe00::100"), Mask: net.CIDRMask(64, net.IPv6len*8)},
      +				{IP: net.ParseIP("fe00::101"), Mask: net.CIDRMask(64, net.IPv6len*8)},
      +				{IP: net.ParseIP("fe00::102"), Mask: net.CIDRMask(64, net.IPv6len*8)},
      +			},
      +		},
      +
      +		// invalid
      +		{
      +			config: map[string]string{
      +				"ip.0.address": "test address",
      +			},
      +
      +			err: errors.New(`invalid address: "test address"`),
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		addresses, err := processAddressConfig(tt.config, tt.prefix)
      +		if !reflect.DeepEqual(tt.err, err) {
      +			t.Errorf("bad error (#%d): want %v, got %v", i, tt.err, err)
      +		}
      +		if err != nil {
      +			continue
      +		}
      +
      +		if !reflect.DeepEqual(tt.addresses, addresses) {
      +			t.Errorf("bad addresses (#%d): want %#v, got %#v", i, tt.addresses, addresses)
      +		}
      +	}
      +}
      +
      +func TestProcessRouteConfig(t *testing.T) {
      +	tests := []struct {
      +		config map[string]string
      +		prefix string
      +
      +		routes []route
      +		err    error
      +	}{
      +		{},
      +
      +		{
      +			config: map[string]string{
      +				"route.0.gateway":     "10.0.0.1",
      +				"route.0.destination": "0.0.0.0/0",
      +			},
      +
      +			routes: []route{{destination: net.IPNet{IP: net.IP(net.CIDRMask(0, net.IPv4len*8)), Mask: net.CIDRMask(0, net.IPv4len*8)}, gateway: net.ParseIP("10.0.0.1")}},
      +		},
      +		{
      +			config: map[string]string{
      +				"this.is.a.prefix.route.0.gateway":     "10.0.0.1",
      +				"this.is.a.prefix.route.0.destination": "0.0.0.0/0",
      +			},
      +			prefix: "this.is.a.prefix.",
      +
      +			routes: []route{{destination: net.IPNet{IP: net.IP(net.CIDRMask(0, net.IPv4len*8)), Mask: net.CIDRMask(0, net.IPv4len*8)}, gateway: net.ParseIP("10.0.0.1")}},
      +		},
      +		{
      +			config: map[string]string{
      +				"route.0.gateway":     "fe00::1",
      +				"route.0.destination": "::/0",
      +			},
      +
      +			routes: []route{{destination: net.IPNet{IP: net.IPv6zero, Mask: net.IPMask(net.IPv6zero)}, gateway: net.ParseIP("fe00::1")}},
      +		},
      +
      +		// invalid
      +		{
      +			config: map[string]string{
      +				"route.0.gateway":     "test gateway",
      +				"route.0.destination": "0.0.0.0/0",
      +			},
      +
      +			err: errors.New(`invalid gateway: "test gateway"`),
      +		},
      +		{
      +			config: map[string]string{
      +				"route.0.gateway":     "10.0.0.1",
      +				"route.0.destination": "test destination",
      +			},
      +
      +			err: &net.ParseError{Type: "CIDR address", Text: "test destination"},
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		routes, err := processRouteConfig(tt.config, tt.prefix)
      +		if !reflect.DeepEqual(tt.err, err) {
      +			t.Errorf("bad error (#%d): want %v, got %v", i, tt.err, err)
      +		}
      +		if err != nil {
      +			continue
      +		}
      +
      +		if !reflect.DeepEqual(tt.routes, routes) {
      +			t.Errorf("bad routes (#%d): want %#v, got %#v", i, tt.routes, routes)
      +		}
      +	}
      +}
      +
      +func TestProcessDHCPConfig(t *testing.T) {
      +	tests := []struct {
      +		config map[string]string
      +		prefix string
      +
      +		dhcp bool
      +		err  error
      +	}{
      +		{},
      +
      +		// prefix
      +		{config: map[string]string{"this.is.a.prefix.mac": ""}, prefix: "this.is.a.prefix.", dhcp: false},
      +		{config: map[string]string{"this.is.a.prefix.dhcp": "yes"}, prefix: "this.is.a.prefix.", dhcp: true},
      +
      +		// dhcp
      +		{config: map[string]string{"dhcp": "yes"}, dhcp: true},
      +		{config: map[string]string{"dhcp": "no"}, dhcp: false},
      +
      +		// invalid
      +		{config: map[string]string{"dhcp": "blah"}, err: errors.New(`invalid DHCP option: "blah"`)},
      +	}
      +
      +	for i, tt := range tests {
      +		dhcp, err := processDHCPConfig(tt.config, tt.prefix)
      +		if !reflect.DeepEqual(tt.err, err) {
      +			t.Errorf("bad error (#%d): want %v, got %v", i, tt.err, err)
      +		}
      +		if err != nil {
      +			continue
      +		}
      +
      +		if tt.dhcp != dhcp {
      +			t.Errorf("bad dhcp (#%d): want %v, got %v", i, tt.dhcp, dhcp)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/pkg/http_client.go b/vendor/github.com/coreos/coreos-cloudinit/pkg/http_client.go
      new file mode 100644
      index 00000000..c4fb8032
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/pkg/http_client.go
      @@ -0,0 +1,155 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package pkg
      +
      +import (
      +	"errors"
      +	"fmt"
      +	"io/ioutil"
      +	"log"
      +	"net/http"
      +	neturl "net/url"
      +	"strings"
      +	"time"
      +)
      +
      +const (
      +	HTTP_2xx = 2
      +	HTTP_4xx = 4
      +)
      +
      +type Err error
      +
      +type ErrTimeout struct {
      +	Err
      +}
      +
      +type ErrNotFound struct {
      +	Err
      +}
      +
      +type ErrInvalid struct {
      +	Err
      +}
      +
      +type ErrServer struct {
      +	Err
      +}
      +
      +type ErrNetwork struct {
      +	Err
      +}
      +
      +type HttpClient struct {
      +	// Initial backoff duration. Defaults to 50 milliseconds
      +	InitialBackoff time.Duration
      +
      +	// Maximum exp backoff duration. Defaults to 5 seconds
      +	MaxBackoff time.Duration
      +
      +	// Maximum number of connection retries. Defaults to 15
      +	MaxRetries int
      +
      +	// Whether or not to skip TLS verification. Defaults to false
      +	SkipTLS bool
      +
      +	client *http.Client
      +}
      +
      +type Getter interface {
      +	Get(string) ([]byte, error)
      +	GetRetry(string) ([]byte, error)
      +}
      +
      +func NewHttpClient() *HttpClient {
      +	hc := &HttpClient{
      +		InitialBackoff: 50 * time.Millisecond,
      +		MaxBackoff:     time.Second * 5,
      +		MaxRetries:     15,
      +		SkipTLS:        false,
      +		client: &http.Client{
      +			Timeout: 10 * time.Second,
      +		},
      +	}
      +
      +	return hc
      +}
      +
      +func ExpBackoff(interval, max time.Duration) time.Duration {
      +	interval = interval * 2
      +	if interval > max {
      +		interval = max
      +	}
      +	return interval
      +}
      +
      +// GetRetry fetches a given URL with support for exponential backoff and maximum retries
      +func (h *HttpClient) GetRetry(rawurl string) ([]byte, error) {
      +	if rawurl == "" {
      +		return nil, ErrInvalid{errors.New("URL is empty. Skipping.")}
      +	}
      +
      +	url, err := neturl.Parse(rawurl)
      +	if err != nil {
      +		return nil, ErrInvalid{err}
      +	}
      +
      +	// Unfortunately, url.Parse is too generic to throw errors if a URL does not
      +	// have a valid HTTP scheme. So, we have to do this extra validation
      +	if !strings.HasPrefix(url.Scheme, "http") {
      +		return nil, ErrInvalid{fmt.Errorf("URL %s does not have a valid HTTP scheme. Skipping.", rawurl)}
      +	}
      +
      +	dataURL := url.String()
      +
      +	duration := h.InitialBackoff
      +	for retry := 1; retry <= h.MaxRetries; retry++ {
      +		log.Printf("Fetching data from %s. Attempt #%d", dataURL, retry)
      +
      +		data, err := h.Get(dataURL)
      +		switch err.(type) {
      +		case ErrNetwork:
      +			log.Printf(err.Error())
      +		case ErrServer:
      +			log.Printf(err.Error())
      +		case ErrNotFound:
      +			return data, err
      +		default:
      +			return data, err
      +		}
      +
      +		duration = ExpBackoff(duration, h.MaxBackoff)
      +		log.Printf("Sleeping for %v...", duration)
      +		time.Sleep(duration)
      +	}
      +
      +	return nil, ErrTimeout{fmt.Errorf("Unable to fetch data. Maximum retries reached: %d", h.MaxRetries)}
      +}
      +
      +func (h *HttpClient) Get(dataURL string) ([]byte, error) {
      +	if resp, err := h.client.Get(dataURL); err == nil {
      +		defer resp.Body.Close()
      +		switch resp.StatusCode / 100 {
      +		case HTTP_2xx:
      +			return ioutil.ReadAll(resp.Body)
      +		case HTTP_4xx:
      +			return nil, ErrNotFound{fmt.Errorf("Not found. HTTP status code: %d", resp.StatusCode)}
      +		default:
      +			return nil, ErrServer{fmt.Errorf("Server error. HTTP status code: %d", resp.StatusCode)}
      +		}
      +	} else {
      +		return nil, ErrNetwork{fmt.Errorf("Unable to fetch data: %s", err.Error())}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/pkg/http_client_test.go b/vendor/github.com/coreos/coreos-cloudinit/pkg/http_client_test.go
      new file mode 100644
      index 00000000..d581d94e
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/pkg/http_client_test.go
      @@ -0,0 +1,154 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package pkg
      +
      +import (
      +	"fmt"
      +	"io"
      +	"math"
      +	"net/http"
      +	"net/http/httptest"
      +	"testing"
      +	"time"
      +)
      +
      +func TestExpBackoff(t *testing.T) {
      +	duration := time.Millisecond
      +	max := time.Hour
      +	for i := 0; i < math.MaxUint16; i++ {
      +		duration = ExpBackoff(duration, max)
      +		if duration < 0 {
      +			t.Fatalf("duration too small: %v %v", duration, i)
      +		}
      +		if duration > max {
      +			t.Fatalf("duration too large: %v %v", duration, i)
      +		}
      +	}
      +}
      +
      +// Test exponential backoff and that it continues retrying if a 5xx response is
      +// received
      +func TestGetURLExpBackOff(t *testing.T) {
      +	var expBackoffTests = []struct {
      +		count int
      +		body  string
      +	}{
      +		{0, "number of attempts: 0"},
      +		{1, "number of attempts: 1"},
      +		{2, "number of attempts: 2"},
      +	}
      +	client := NewHttpClient()
      +
      +	for i, tt := range expBackoffTests {
      +		mux := http.NewServeMux()
      +		count := 0
      +		mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
      +			if count == tt.count {
      +				io.WriteString(w, fmt.Sprintf("number of attempts: %d", count))
      +				return
      +			}
      +			count++
      +			http.Error(w, "", 500)
      +		})
      +		ts := httptest.NewServer(mux)
      +		defer ts.Close()
      +
      +		data, err := client.GetRetry(ts.URL)
      +		if err != nil {
      +			t.Errorf("Test case %d produced error: %v", i, err)
      +		}
      +
      +		if count != tt.count {
      +			t.Errorf("Test case %d failed: %d != %d", i, count, tt.count)
      +		}
      +
      +		if string(data) != tt.body {
      +			t.Errorf("Test case %d failed: %s != %s", i, tt.body, data)
      +		}
      +	}
      +}
      +
      +// Test that it stops retrying if a 4xx response comes back
      +func TestGetURL4xx(t *testing.T) {
      +	client := NewHttpClient()
      +	retries := 0
      +	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
      +		retries++
      +		http.Error(w, "", 404)
      +	}))
      +	defer ts.Close()
      +
      +	_, err := client.GetRetry(ts.URL)
      +	if err == nil {
      +		t.Errorf("Incorrect result\ngot:  %s\nwant: %s", err.Error(), "Not found. HTTP status code: 404")
      +	}
      +
      +	if retries > 1 {
      +		t.Errorf("Number of retries:\n%d\nExpected number of retries:\n%d", retries, 1)
      +	}
      +}
      +
      +// Test that it fetches and returns user-data just fine
      +func TestGetURL2xx(t *testing.T) {
      +	var cloudcfg = `
      +#cloud-config
      +coreos: 
      +	oem:
      +	    id: test
      +	    name: CoreOS.box for Test
      +	    version-id: %VERSION_ID%+%BUILD_ID%
      +	    home-url: https://github.com/coreos/coreos-cloudinit
      +	    bug-report-url: https://github.com/coreos/coreos-cloudinit
      +	update:
      +		reboot-strategy: best-effort
      +`
      +
      +	client := NewHttpClient()
      +	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
      +		fmt.Fprint(w, cloudcfg)
      +	}))
      +	defer ts.Close()
      +
      +	data, err := client.GetRetry(ts.URL)
      +	if err != nil {
      +		t.Errorf("Incorrect result\ngot:  %v\nwant: %v", err, nil)
      +	}
      +
      +	if string(data) != cloudcfg {
      +		t.Errorf("Incorrect result\ngot:  %s\nwant: %s", string(data), cloudcfg)
      +	}
      +}
      +
      +// Test attempt to fetching using malformed URL
      +func TestGetMalformedURL(t *testing.T) {
      +	client := NewHttpClient()
      +
      +	var tests = []struct {
      +		url  string
      +		want string
      +	}{
      +		{"boo", "URL boo does not have a valid HTTP scheme. Skipping."},
      +		{"mailto://boo", "URL mailto://boo does not have a valid HTTP scheme. Skipping."},
      +		{"ftp://boo", "URL ftp://boo does not have a valid HTTP scheme. Skipping."},
      +		{"", "URL is empty. Skipping."},
      +	}
      +
      +	for _, test := range tests {
      +		_, err := client.GetRetry(test.url)
      +		if err == nil || err.Error() != test.want {
      +			t.Errorf("Incorrect result\ngot:  %v\nwant: %v", err, test.want)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/env.go b/vendor/github.com/coreos/coreos-cloudinit/system/env.go
      new file mode 100644
      index 00000000..60702b6d
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/env.go
      @@ -0,0 +1,52 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"fmt"
      +	"reflect"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +// serviceContents generates the contents for a drop-in unit given the config.
      +// The argument must be a struct from the 'config' package.
      +func serviceContents(e interface{}) string {
      +	vars := getEnvVars(e)
      +	if len(vars) == 0 {
      +		return ""
      +	}
      +
      +	out := "[Service]\n"
      +	for _, v := range vars {
      +		out += fmt.Sprintf("Environment=\"%s\"\n", v)
      +	}
      +	return out
      +}
      +
      +func getEnvVars(e interface{}) []string {
      +	et := reflect.TypeOf(e)
      +	ev := reflect.ValueOf(e)
      +
      +	vars := []string{}
      +	for i := 0; i < et.NumField(); i++ {
      +		if val := ev.Field(i).Interface(); !config.IsZero(val) {
      +			key := et.Field(i).Tag.Get("env")
      +			vars = append(vars, fmt.Sprintf("%s=%v", key, val))
      +		}
      +	}
      +
      +	return vars
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/env_file.go b/vendor/github.com/coreos/coreos-cloudinit/system/env_file.go
      new file mode 100644
      index 00000000..6b2c926d
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/env_file.go
      @@ -0,0 +1,114 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"bytes"
      +	"fmt"
      +	"io/ioutil"
      +	"os"
      +	"path"
      +	"regexp"
      +	"sort"
      +)
      +
      +type EnvFile struct {
      +	Vars map[string]string
      +	// mask File.Content, it shouldn't be used.
      +	Content interface{} `json:"-" yaml:"-"`
      +	*File
      +}
      +
      +// only allow sh compatible identifiers
      +var validKey = regexp.MustCompile(`^[a-zA-Z0-9_]+$`)
      +
      +// match each line, optionally capturing valid identifiers, discarding dos line endings
      +var lineLexer = regexp.MustCompile(`(?m)^((?:([a-zA-Z0-9_]+)=)?.*?)\r?\n`)
      +
      +// mergeEnvContents: Update the existing file contents with new values,
      +// preserving variable ordering and all content this code doesn't understand.
      +// All new values are appended to the bottom of the old, sorted by key.
      +func mergeEnvContents(old []byte, pending map[string]string) []byte {
      +	var buf bytes.Buffer
      +	var match [][]byte
      +
      +	// it is awkward for the regex to handle a missing newline gracefully
      +	if len(old) != 0 && !bytes.HasSuffix(old, []byte{'\n'}) {
      +		old = append(old, byte('\n'))
      +	}
      +
      +	for _, match = range lineLexer.FindAllSubmatch(old, -1) {
      +		key := string(match[2])
      +		if value, ok := pending[key]; ok {
      +			fmt.Fprintf(&buf, "%s=%s\n", key, value)
      +			delete(pending, key)
      +		} else {
      +			fmt.Fprintf(&buf, "%s\n", match[1])
      +		}
      +	}
      +
      +	for _, key := range keys(pending) {
      +		value := pending[key]
      +		fmt.Fprintf(&buf, "%s=%s\n", key, value)
      +	}
      +
      +	return buf.Bytes()
      +}
      +
      +// WriteEnvFile updates an existing env `KEY=value` formated file with
      +// new values provided in EnvFile.Vars; File.Content is ignored.
      +// Existing ordering and any unknown formatting such as comments are
      +// preserved. If no changes are required the file is untouched.
      +func WriteEnvFile(ef *EnvFile, root string) error {
      +	// validate new keys, mergeEnvContents uses pending to track writes
      +	pending := make(map[string]string, len(ef.Vars))
      +	for key, value := range ef.Vars {
      +		if !validKey.MatchString(key) {
      +			return fmt.Errorf("Invalid name %q for %s", key, ef.Path)
      +		}
      +		pending[key] = value
      +	}
      +
      +	if len(pending) == 0 {
      +		return nil
      +	}
      +
      +	oldContent, err := ioutil.ReadFile(path.Join(root, ef.Path))
      +	if err != nil {
      +		if os.IsNotExist(err) {
      +			oldContent = []byte{}
      +		} else {
      +			return err
      +		}
      +	}
      +
      +	newContent := mergeEnvContents(oldContent, pending)
      +	if bytes.Equal(oldContent, newContent) {
      +		return nil
      +	}
      +
      +	ef.File.Content = string(newContent)
      +	_, err = WriteFile(ef.File, root)
      +	return err
      +}
      +
      +// keys returns the keys of a map in sorted order
      +func keys(m map[string]string) (s []string) {
      +	for k, _ := range m {
      +		s = append(s, k)
      +	}
      +	sort.Strings(s)
      +	return
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/env_file_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/env_file_test.go
      new file mode 100644
      index 00000000..5ec03640
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/env_file_test.go
      @@ -0,0 +1,442 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"io/ioutil"
      +	"os"
      +	"path"
      +	"strings"
      +	"syscall"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +const (
      +	base          = "# a file\nFOO=base\n\nBAR= hi there\n"
      +	baseNoNewline = "# a file\nFOO=base\n\nBAR= hi there"
      +	baseDos       = "# a file\r\nFOO=base\r\n\r\nBAR= hi there\r\n"
      +	expectUpdate  = "# a file\nFOO=test\n\nBAR= hi there\nNEW=a value\n"
      +	expectCreate  = "FOO=test\nNEW=a value\n"
      +)
      +
      +var (
      +	valueUpdate = map[string]string{
      +		"FOO": "test",
      +		"NEW": "a value",
      +	}
      +	valueNoop = map[string]string{
      +		"FOO": "base",
      +	}
      +	valueEmpty   = map[string]string{}
      +	valueInvalid = map[string]string{
      +		"FOO-X": "test",
      +	}
      +)
      +
      +func TestWriteEnvFileUpdate(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	name := "foo.conf"
      +	fullPath := path.Join(dir, name)
      +	ioutil.WriteFile(fullPath, []byte(base), 0644)
      +
      +	oldStat, err := os.Stat(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to stat file: %v", err)
      +	}
      +
      +	ef := EnvFile{
      +		File: &File{config.File{
      +			Path: name,
      +		}},
      +		Vars: valueUpdate,
      +	}
      +
      +	err = WriteEnvFile(&ef, dir)
      +	if err != nil {
      +		t.Fatalf("WriteFile failed: %v", err)
      +	}
      +
      +	contents, err := ioutil.ReadFile(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to read expected file: %v", err)
      +	}
      +
      +	if string(contents) != expectUpdate {
      +		t.Fatalf("File has incorrect contents: %q", contents)
      +	}
      +
      +	newStat, err := os.Stat(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to stat file: %v", err)
      +	}
      +
      +	if oldStat.Sys().(*syscall.Stat_t).Ino == newStat.Sys().(*syscall.Stat_t).Ino {
      +		t.Fatalf("File was not replaced: %s", fullPath)
      +	}
      +}
      +
      +func TestWriteEnvFileUpdateNoNewline(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	name := "foo.conf"
      +	fullPath := path.Join(dir, name)
      +	ioutil.WriteFile(fullPath, []byte(baseNoNewline), 0644)
      +
      +	oldStat, err := os.Stat(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to stat file: %v", err)
      +	}
      +
      +	ef := EnvFile{
      +		File: &File{config.File{
      +			Path: name,
      +		}},
      +		Vars: valueUpdate,
      +	}
      +
      +	err = WriteEnvFile(&ef, dir)
      +	if err != nil {
      +		t.Fatalf("WriteFile failed: %v", err)
      +	}
      +
      +	contents, err := ioutil.ReadFile(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to read expected file: %v", err)
      +	}
      +
      +	if string(contents) != expectUpdate {
      +		t.Fatalf("File has incorrect contents: %q", contents)
      +	}
      +
      +	newStat, err := os.Stat(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to stat file: %v", err)
      +	}
      +
      +	if oldStat.Sys().(*syscall.Stat_t).Ino == newStat.Sys().(*syscall.Stat_t).Ino {
      +		t.Fatalf("File was not replaced: %s", fullPath)
      +	}
      +}
      +
      +func TestWriteEnvFileCreate(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	name := "foo.conf"
      +	fullPath := path.Join(dir, name)
      +
      +	ef := EnvFile{
      +		File: &File{config.File{
      +			Path: name,
      +		}},
      +		Vars: valueUpdate,
      +	}
      +
      +	err = WriteEnvFile(&ef, dir)
      +	if err != nil {
      +		t.Fatalf("WriteFile failed: %v", err)
      +	}
      +
      +	contents, err := ioutil.ReadFile(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to read expected file: %v", err)
      +	}
      +
      +	if string(contents) != expectCreate {
      +		t.Fatalf("File has incorrect contents: %q", contents)
      +	}
      +}
      +
      +func TestWriteEnvFileNoop(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	name := "foo.conf"
      +	fullPath := path.Join(dir, name)
      +	ioutil.WriteFile(fullPath, []byte(base), 0644)
      +
      +	oldStat, err := os.Stat(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to stat file: %v", err)
      +	}
      +
      +	ef := EnvFile{
      +		File: &File{config.File{
      +			Path: name,
      +		}},
      +		Vars: valueNoop,
      +	}
      +
      +	err = WriteEnvFile(&ef, dir)
      +	if err != nil {
      +		t.Fatalf("WriteFile failed: %v", err)
      +	}
      +
      +	contents, err := ioutil.ReadFile(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to read expected file: %v", err)
      +	}
      +
      +	if string(contents) != base {
      +		t.Fatalf("File has incorrect contents: %q", contents)
      +	}
      +
      +	newStat, err := os.Stat(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to stat file: %v", err)
      +	}
      +
      +	if oldStat.Sys().(*syscall.Stat_t).Ino != newStat.Sys().(*syscall.Stat_t).Ino {
      +		t.Fatalf("File was replaced: %s", fullPath)
      +	}
      +}
      +
      +func TestWriteEnvFileUpdateDos(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	name := "foo.conf"
      +	fullPath := path.Join(dir, name)
      +	ioutil.WriteFile(fullPath, []byte(baseDos), 0644)
      +
      +	oldStat, err := os.Stat(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to stat file: %v", err)
      +	}
      +
      +	ef := EnvFile{
      +		File: &File{config.File{
      +			Path: name,
      +		}},
      +		Vars: valueUpdate,
      +	}
      +
      +	err = WriteEnvFile(&ef, dir)
      +	if err != nil {
      +		t.Fatalf("WriteFile failed: %v", err)
      +	}
      +
      +	contents, err := ioutil.ReadFile(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to read expected file: %v", err)
      +	}
      +
      +	if string(contents) != expectUpdate {
      +		t.Fatalf("File has incorrect contents: %q", contents)
      +	}
      +
      +	newStat, err := os.Stat(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to stat file: %v", err)
      +	}
      +
      +	if oldStat.Sys().(*syscall.Stat_t).Ino == newStat.Sys().(*syscall.Stat_t).Ino {
      +		t.Fatalf("File was not replaced: %s", fullPath)
      +	}
      +}
      +
      +// A middle ground noop, values are unchanged but we did have a value.
      +// Seems reasonable to rewrite the file in Unix format anyway.
      +func TestWriteEnvFileDos2Unix(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	name := "foo.conf"
      +	fullPath := path.Join(dir, name)
      +	ioutil.WriteFile(fullPath, []byte(baseDos), 0644)
      +
      +	oldStat, err := os.Stat(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to stat file: %v", err)
      +	}
      +
      +	ef := EnvFile{
      +		File: &File{config.File{
      +			Path: name,
      +		}},
      +		Vars: valueNoop,
      +	}
      +
      +	err = WriteEnvFile(&ef, dir)
      +	if err != nil {
      +		t.Fatalf("WriteFile failed: %v", err)
      +	}
      +
      +	contents, err := ioutil.ReadFile(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to read expected file: %v", err)
      +	}
      +
      +	if string(contents) != base {
      +		t.Fatalf("File has incorrect contents: %q", contents)
      +	}
      +
      +	newStat, err := os.Stat(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to stat file: %v", err)
      +	}
      +
      +	if oldStat.Sys().(*syscall.Stat_t).Ino == newStat.Sys().(*syscall.Stat_t).Ino {
      +		t.Fatalf("File was not replaced: %s", fullPath)
      +	}
      +}
      +
      +// If it really is a noop (structure is empty) don't even do dos2unix
      +func TestWriteEnvFileEmpty(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	name := "foo.conf"
      +	fullPath := path.Join(dir, name)
      +	ioutil.WriteFile(fullPath, []byte(baseDos), 0644)
      +
      +	oldStat, err := os.Stat(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to stat file: %v", err)
      +	}
      +
      +	ef := EnvFile{
      +		File: &File{config.File{
      +			Path: name,
      +		}},
      +		Vars: valueEmpty,
      +	}
      +
      +	err = WriteEnvFile(&ef, dir)
      +	if err != nil {
      +		t.Fatalf("WriteFile failed: %v", err)
      +	}
      +
      +	contents, err := ioutil.ReadFile(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to read expected file: %v", err)
      +	}
      +
      +	if string(contents) != baseDos {
      +		t.Fatalf("File has incorrect contents: %q", contents)
      +	}
      +
      +	newStat, err := os.Stat(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to stat file: %v", err)
      +	}
      +
      +	if oldStat.Sys().(*syscall.Stat_t).Ino != newStat.Sys().(*syscall.Stat_t).Ino {
      +		t.Fatalf("File was replaced: %s", fullPath)
      +	}
      +}
      +
      +// no point in creating empty files
      +func TestWriteEnvFileEmptyNoCreate(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	name := "foo.conf"
      +	fullPath := path.Join(dir, name)
      +
      +	ef := EnvFile{
      +		File: &File{config.File{
      +			Path: name,
      +		}},
      +		Vars: valueEmpty,
      +	}
      +
      +	err = WriteEnvFile(&ef, dir)
      +	if err != nil {
      +		t.Fatalf("WriteFile failed: %v", err)
      +	}
      +
      +	contents, err := ioutil.ReadFile(fullPath)
      +	if err == nil {
      +		t.Fatalf("File has incorrect contents: %q", contents)
      +	} else if !os.IsNotExist(err) {
      +		t.Fatalf("Unexpected error while reading file: %v", err)
      +	}
      +}
      +
      +func TestWriteEnvFilePermFailure(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	name := "foo.conf"
      +	fullPath := path.Join(dir, name)
      +	ioutil.WriteFile(fullPath, []byte(base), 0000)
      +
      +	ef := EnvFile{
      +		File: &File{config.File{
      +			Path: name,
      +		}},
      +		Vars: valueUpdate,
      +	}
      +
      +	err = WriteEnvFile(&ef, dir)
      +	if !os.IsPermission(err) {
      +		t.Fatalf("Not a pemission denied error: %v", err)
      +	}
      +}
      +
      +func TestWriteEnvFileNameFailure(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	name := "foo.conf"
      +
      +	ef := EnvFile{
      +		File: &File{config.File{
      +			Path: name,
      +		}},
      +		Vars: valueInvalid,
      +	}
      +
      +	err = WriteEnvFile(&ef, dir)
      +	if err == nil || !strings.HasPrefix(err.Error(), "Invalid name") {
      +		t.Fatalf("Not an invalid name error: %v", err)
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/env_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/env_test.go
      new file mode 100644
      index 00000000..9d62aed5
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/env_test.go
      @@ -0,0 +1,69 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"testing"
      +)
      +
      +func TestServiceContents(t *testing.T) {
      +	tests := []struct {
      +		Config   interface{}
      +		Contents string
      +	}{
      +		{
      +			struct{}{},
      +			"",
      +		},
      +		{
      +			struct {
      +				A string  `env:"A"`
      +				B int     `env:"B"`
      +				C bool    `env:"C"`
      +				D float64 `env:"D"`
      +			}{
      +				"hi", 1, true, 0.12345,
      +			},
      +			`[Service]
      +Environment="A=hi"
      +Environment="B=1"
      +Environment="C=true"
      +Environment="D=0.12345"
      +`,
      +		},
      +		{
      +			struct {
      +				A float64 `env:"A"`
      +				B float64 `env:"B"`
      +				C float64 `env:"C"`
      +				D float64 `env:"D"`
      +			}{
      +				0.000001, 1, 0.9999999, 0.1,
      +			},
      +			`[Service]
      +Environment="A=1e-06"
      +Environment="B=1"
      +Environment="C=0.9999999"
      +Environment="D=0.1"
      +`,
      +		},
      +	}
      +
      +	for _, tt := range tests {
      +		if c := serviceContents(tt.Config); c != tt.Contents {
      +			t.Errorf("bad contents (%+v): want %q, got %q", tt, tt.Contents, c)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/etc_hosts.go b/vendor/github.com/coreos/coreos-cloudinit/system/etc_hosts.go
      new file mode 100644
      index 00000000..7208c161
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/etc_hosts.go
      @@ -0,0 +1,62 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"errors"
      +	"fmt"
      +	"os"
      +	"path"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +const DefaultIpv4Address = "127.0.0.1"
      +
      +type EtcHosts struct {
      +	config.EtcHosts
      +}
      +
      +func (eh EtcHosts) generateEtcHosts() (out string, err error) {
      +	if eh.EtcHosts != "localhost" {
      +		return "", errors.New("Invalid option to manage_etc_hosts")
      +	}
      +
      +	// use the operating system hostname
      +	hostname, err := os.Hostname()
      +	if err != nil {
      +		return "", err
      +	}
      +
      +	return fmt.Sprintf("%s %s\n", DefaultIpv4Address, hostname), nil
      +
      +}
      +
      +func (eh EtcHosts) File() (*File, error) {
      +	if eh.EtcHosts == "" {
      +		return nil, nil
      +	}
      +
      +	etcHosts, err := eh.generateEtcHosts()
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	return &File{config.File{
      +		Path:               path.Join("etc", "hosts"),
      +		RawFilePermissions: "0644",
      +		Content:            etcHosts,
      +	}}, nil
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/etc_hosts_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/etc_hosts_test.go
      new file mode 100644
      index 00000000..e5efd7c4
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/etc_hosts_test.go
      @@ -0,0 +1,60 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"fmt"
      +	"os"
      +	"reflect"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +func TestEtcdHostsFile(t *testing.T) {
      +	hostname, err := os.Hostname()
      +	if err != nil {
      +		panic(err)
      +	}
      +
      +	for _, tt := range []struct {
      +		config config.EtcHosts
      +		file   *File
      +		err    error
      +	}{
      +		{
      +			"invalid",
      +			nil,
      +			fmt.Errorf("Invalid option to manage_etc_hosts"),
      +		},
      +		{
      +			"localhost",
      +			&File{config.File{
      +				Content:            fmt.Sprintf("127.0.0.1 %s\n", hostname),
      +				Path:               "etc/hosts",
      +				RawFilePermissions: "0644",
      +			}},
      +			nil,
      +		},
      +	} {
      +		file, err := EtcHosts{tt.config}.File()
      +		if !reflect.DeepEqual(tt.err, err) {
      +			t.Errorf("bad error (%q): want %q, got %q", tt.config, tt.err, err)
      +		}
      +		if !reflect.DeepEqual(tt.file, file) {
      +			t.Errorf("bad units (%q): want %#v, got %#v", tt.config, tt.file, file)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/etcd.go b/vendor/github.com/coreos/coreos-cloudinit/system/etcd.go
      new file mode 100644
      index 00000000..0c7faf4a
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/etcd.go
      @@ -0,0 +1,37 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +// Etcd is a top-level structure which embeds its underlying configuration,
      +// config.Etcd, and provides the system-specific Unit().
      +type Etcd struct {
      +	config.Etcd
      +}
      +
      +// Units creates a Unit file drop-in for etcd, using any configured options.
      +func (ee Etcd) Units() []Unit {
      +	return []Unit{{config.Unit{
      +		Name:    "etcd.service",
      +		Runtime: true,
      +		DropIns: []config.UnitDropIn{{
      +			Name:    "20-cloudinit.conf",
      +			Content: serviceContents(ee.Etcd),
      +		}},
      +	}}}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/etcd2.go b/vendor/github.com/coreos/coreos-cloudinit/system/etcd2.go
      new file mode 100644
      index 00000000..9b243e0f
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/etcd2.go
      @@ -0,0 +1,37 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +// Etcd2 is a top-level structure which embeds its underlying configuration,
      +// config.Etcd2, and provides the system-specific Unit().
      +type Etcd2 struct {
      +	config.Etcd2
      +}
      +
      +// Units creates a Unit file drop-in for etcd, using any configured options.
      +func (ee Etcd2) Units() []Unit {
      +	return []Unit{{config.Unit{
      +		Name:    "etcd2.service",
      +		Runtime: true,
      +		DropIns: []config.UnitDropIn{{
      +			Name:    "20-cloudinit.conf",
      +			Content: serviceContents(ee.Etcd2),
      +		}},
      +	}}}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/etcd_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/etcd_test.go
      new file mode 100644
      index 00000000..5fc17f02
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/etcd_test.go
      @@ -0,0 +1,79 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"reflect"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +func TestEtcdUnits(t *testing.T) {
      +	for _, tt := range []struct {
      +		config config.Etcd
      +		units  []Unit
      +	}{
      +		{
      +			config.Etcd{},
      +			[]Unit{{config.Unit{
      +				Name:    "etcd.service",
      +				Runtime: true,
      +				DropIns: []config.UnitDropIn{{Name: "20-cloudinit.conf"}},
      +			}}},
      +		},
      +		{
      +			config.Etcd{
      +				Discovery:    "http://disco.example.com/foobar",
      +				PeerBindAddr: "127.0.0.1:7002",
      +			},
      +			[]Unit{{config.Unit{
      +				Name:    "etcd.service",
      +				Runtime: true,
      +				DropIns: []config.UnitDropIn{{
      +					Name: "20-cloudinit.conf",
      +					Content: `[Service]
      +Environment="ETCD_DISCOVERY=http://disco.example.com/foobar"
      +Environment="ETCD_PEER_BIND_ADDR=127.0.0.1:7002"
      +`,
      +				}},
      +			}}},
      +		},
      +		{
      +			config.Etcd{
      +				Name:         "node001",
      +				Discovery:    "http://disco.example.com/foobar",
      +				PeerBindAddr: "127.0.0.1:7002",
      +			},
      +			[]Unit{{config.Unit{
      +				Name:    "etcd.service",
      +				Runtime: true,
      +				DropIns: []config.UnitDropIn{{
      +					Name: "20-cloudinit.conf",
      +					Content: `[Service]
      +Environment="ETCD_DISCOVERY=http://disco.example.com/foobar"
      +Environment="ETCD_NAME=node001"
      +Environment="ETCD_PEER_BIND_ADDR=127.0.0.1:7002"
      +`,
      +				}},
      +			}}},
      +		},
      +	} {
      +		units := Etcd{tt.config}.Units()
      +		if !reflect.DeepEqual(tt.units, units) {
      +			t.Errorf("bad units (%+v): want %#v, got %#v", tt.config, tt.units, units)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/file.go b/vendor/github.com/coreos/coreos-cloudinit/system/file.go
      new file mode 100644
      index 00000000..50b1466e
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/file.go
      @@ -0,0 +1,116 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"fmt"
      +	"io/ioutil"
      +	"log"
      +	"os"
      +	"os/exec"
      +	"path"
      +	"strconv"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +// File is a top-level structure which embeds its underlying configuration,
      +// config.File, and provides the system-specific Permissions().
      +type File struct {
      +	config.File
      +}
      +
      +func (f *File) Permissions() (os.FileMode, error) {
      +	if f.RawFilePermissions == "" {
      +		return os.FileMode(0644), nil
      +	}
      +
      +	// Parse string representation of file mode as integer
      +	perm, err := strconv.ParseInt(f.RawFilePermissions, 8, 32)
      +	if err != nil {
      +		return 0, fmt.Errorf("Unable to parse file permissions %q as integer", f.RawFilePermissions)
      +	}
      +	return os.FileMode(perm), nil
      +}
      +
      +func WriteFile(f *File, root string) (string, error) {
      +	fullpath := path.Join(root, f.Path)
      +	dir := path.Dir(fullpath)
      +	log.Printf("Writing file to %q", fullpath)
      +
      +	content, err := config.DecodeContent(f.Content, f.Encoding)
      +
      +	if err != nil {
      +		return "", fmt.Errorf("Unable to decode %s (%v)", f.Path, err)
      +	}
      +
      +	if err := EnsureDirectoryExists(dir); err != nil {
      +		return "", err
      +	}
      +
      +	perm, err := f.Permissions()
      +	if err != nil {
      +		return "", err
      +	}
      +
      +	var tmp *os.File
      +	// Create a temporary file in the same directory to ensure it's on the same filesystem
      +	if tmp, err = ioutil.TempFile(dir, "cloudinit-temp"); err != nil {
      +		return "", err
      +	}
      +
      +	if err := ioutil.WriteFile(tmp.Name(), content, perm); err != nil {
      +		return "", err
      +	}
      +
      +	if err := tmp.Close(); err != nil {
      +		return "", err
      +	}
      +
      +	// Ensure the permissions are as requested (since WriteFile can be affected by sticky bit)
      +	if err := os.Chmod(tmp.Name(), perm); err != nil {
      +		return "", err
      +	}
      +
      +	if f.Owner != "" {
      +		// We shell out since we don't have a way to look up unix groups natively
      +		cmd := exec.Command("chown", f.Owner, tmp.Name())
      +		if err := cmd.Run(); err != nil {
      +			return "", err
      +		}
      +	}
      +
      +	if err := os.Rename(tmp.Name(), fullpath); err != nil {
      +		return "", err
      +	}
      +
      +	log.Printf("Wrote file to %q", fullpath)
      +	return fullpath, nil
      +}
      +
      +func EnsureDirectoryExists(dir string) error {
      +	info, err := os.Stat(dir)
      +	if err == nil {
      +		if !info.IsDir() {
      +			return fmt.Errorf("%s is not a directory", dir)
      +		}
      +	} else {
      +		err = os.MkdirAll(dir, 0755)
      +		if err != nil {
      +			return err
      +		}
      +	}
      +	return nil
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/file_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/file_test.go
      new file mode 100644
      index 00000000..f68ec2fa
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/file_test.go
      @@ -0,0 +1,253 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"io/ioutil"
      +	"os"
      +	"path"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +func TestWriteFileUnencodedContent(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	fn := "foo"
      +	fullPath := path.Join(dir, fn)
      +
      +	wf := File{config.File{
      +		Path:               fn,
      +		Content:            "bar",
      +		RawFilePermissions: "0644",
      +	}}
      +
      +	path, err := WriteFile(&wf, dir)
      +	if err != nil {
      +		t.Fatalf("Processing of WriteFile failed: %v", err)
      +	} else if path != fullPath {
      +		t.Fatalf("WriteFile returned bad path: want %s, got %s", fullPath, path)
      +	}
      +
      +	fi, err := os.Stat(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to stat file: %v", err)
      +	}
      +
      +	if fi.Mode() != os.FileMode(0644) {
      +		t.Errorf("File has incorrect mode: %v", fi.Mode())
      +	}
      +
      +	contents, err := ioutil.ReadFile(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to read expected file: %v", err)
      +	}
      +
      +	if string(contents) != "bar" {
      +		t.Fatalf("File has incorrect contents")
      +	}
      +}
      +
      +func TestWriteFileInvalidPermission(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	wf := File{config.File{
      +		Path:               path.Join(dir, "tmp", "foo"),
      +		Content:            "bar",
      +		RawFilePermissions: "pants",
      +	}}
      +
      +	if _, err := WriteFile(&wf, dir); err == nil {
      +		t.Fatalf("Expected error to be raised when writing file with invalid permission")
      +	}
      +}
      +
      +func TestDecimalFilePermissions(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	fn := "foo"
      +	fullPath := path.Join(dir, fn)
      +
      +	wf := File{config.File{
      +		Path:               fn,
      +		RawFilePermissions: "744",
      +	}}
      +
      +	path, err := WriteFile(&wf, dir)
      +	if err != nil {
      +		t.Fatalf("Processing of WriteFile failed: %v", err)
      +	} else if path != fullPath {
      +		t.Fatalf("WriteFile returned bad path: want %s, got %s", fullPath, path)
      +	}
      +
      +	fi, err := os.Stat(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to stat file: %v", err)
      +	}
      +
      +	if fi.Mode() != os.FileMode(0744) {
      +		t.Errorf("File has incorrect mode: %v", fi.Mode())
      +	}
      +}
      +
      +func TestWriteFilePermissions(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	fn := "foo"
      +	fullPath := path.Join(dir, fn)
      +
      +	wf := File{config.File{
      +		Path:               fn,
      +		RawFilePermissions: "0755",
      +	}}
      +
      +	path, err := WriteFile(&wf, dir)
      +	if err != nil {
      +		t.Fatalf("Processing of WriteFile failed: %v", err)
      +	} else if path != fullPath {
      +		t.Fatalf("WriteFile returned bad path: want %s, got %s", fullPath, path)
      +	}
      +
      +	fi, err := os.Stat(fullPath)
      +	if err != nil {
      +		t.Fatalf("Unable to stat file: %v", err)
      +	}
      +
      +	if fi.Mode() != os.FileMode(0755) {
      +		t.Errorf("File has incorrect mode: %v", fi.Mode())
      +	}
      +}
      +
      +func TestWriteFileEncodedContent(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	//all of these decode to "bar"
      +	content_tests := map[string]string{
      +		"base64":      "YmFy",
      +		"b64":         "YmFy",
      +		"gz":          "\x1f\x8b\x08\x08w\x14\x87T\x02\xffok\x00KJ,\x02\x00\xaa\x8c\xffv\x03\x00\x00\x00",
      +		"gzip":        "\x1f\x8b\x08\x08w\x14\x87T\x02\xffok\x00KJ,\x02\x00\xaa\x8c\xffv\x03\x00\x00\x00",
      +		"gz+base64":   "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=",
      +		"gzip+base64": "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=",
      +		"gz+b64":      "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=",
      +		"gzip+b64":    "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=",
      +	}
      +
      +	for encoding, content := range content_tests {
      +		fullPath := path.Join(dir, encoding)
      +
      +		wf := File{config.File{
      +			Path:               encoding,
      +			Encoding:           encoding,
      +			Content:            content,
      +			RawFilePermissions: "0644",
      +		}}
      +
      +		path, err := WriteFile(&wf, dir)
      +		if err != nil {
      +			t.Fatalf("Processing of WriteFile failed: %v", err)
      +		} else if path != fullPath {
      +			t.Fatalf("WriteFile returned bad path: want %s, got %s", fullPath, path)
      +		}
      +
      +		fi, err := os.Stat(fullPath)
      +		if err != nil {
      +			t.Fatalf("Unable to stat file: %v", err)
      +		}
      +
      +		if fi.Mode() != os.FileMode(0644) {
      +			t.Errorf("File has incorrect mode: %v", fi.Mode())
      +		}
      +
      +		contents, err := ioutil.ReadFile(fullPath)
      +		if err != nil {
      +			t.Fatalf("Unable to read expected file: %v", err)
      +		}
      +
      +		if string(contents) != "bar" {
      +			t.Fatalf("File has incorrect contents: '%s'", contents)
      +		}
      +	}
      +}
      +
      +func TestWriteFileInvalidEncodedContent(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	content_encodings := []string{
      +		"base64",
      +		"b64",
      +		"gz",
      +		"gzip",
      +		"gz+base64",
      +		"gzip+base64",
      +		"gz+b64",
      +		"gzip+b64",
      +	}
      +
      +	for _, encoding := range content_encodings {
      +		wf := File{config.File{
      +			Path:     path.Join(dir, "tmp", "foo"),
      +			Content:  "@&*#%invalid data*@&^#*&",
      +			Encoding: encoding,
      +		}}
      +
      +		if _, err := WriteFile(&wf, dir); err == nil {
      +			t.Fatalf("Expected error to be raised when writing file with encoding")
      +		}
      +	}
      +}
      +
      +func TestWriteFileUnknownEncodedContent(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	wf := File{config.File{
      +		Path:     path.Join(dir, "tmp", "foo"),
      +		Content:  "",
      +		Encoding: "no-such-encoding",
      +	}}
      +
      +	if _, err := WriteFile(&wf, dir); err == nil {
      +		t.Fatalf("Expected error to be raised when writing file with encoding")
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/flannel.go b/vendor/github.com/coreos/coreos-cloudinit/system/flannel.go
      new file mode 100644
      index 00000000..7442414c
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/flannel.go
      @@ -0,0 +1,44 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"path"
      +	"strings"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +// flannel is a top-level structure which embeds its underlying configuration,
      +// config.Flannel, and provides the system-specific Unit().
      +type Flannel struct {
      +	config.Flannel
      +}
      +
      +func (fl Flannel) envVars() string {
      +	return strings.Join(getEnvVars(fl.Flannel), "\n")
      +}
      +
      +func (fl Flannel) File() (*File, error) {
      +	vars := fl.envVars()
      +	if vars == "" {
      +		return nil, nil
      +	}
      +	return &File{config.File{
      +		Path:               path.Join("run", "flannel", "options.env"),
      +		RawFilePermissions: "0644",
      +		Content:            vars,
      +	}}, nil
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/flannel_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/flannel_test.go
      new file mode 100644
      index 00000000..7bc9f7f6
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/flannel_test.go
      @@ -0,0 +1,76 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"reflect"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +func TestFlannelEnvVars(t *testing.T) {
      +	for _, tt := range []struct {
      +		config   config.Flannel
      +		contents string
      +	}{
      +		{
      +			config.Flannel{},
      +			"",
      +		},
      +		{
      +			config.Flannel{
      +				EtcdEndpoints: "http://12.34.56.78:4001",
      +				EtcdPrefix:    "/coreos.com/network/tenant1",
      +			},
      +			`FLANNELD_ETCD_ENDPOINTS=http://12.34.56.78:4001
      +FLANNELD_ETCD_PREFIX=/coreos.com/network/tenant1`,
      +		},
      +	} {
      +		out := Flannel{tt.config}.envVars()
      +		if out != tt.contents {
      +			t.Errorf("bad contents (%+v): want %q, got %q", tt, tt.contents, out)
      +		}
      +	}
      +}
      +
      +func TestFlannelFile(t *testing.T) {
      +	for _, tt := range []struct {
      +		config config.Flannel
      +		file   *File
      +	}{
      +		{
      +			config.Flannel{},
      +			nil,
      +		},
      +		{
      +			config.Flannel{
      +				EtcdEndpoints: "http://12.34.56.78:4001",
      +				EtcdPrefix:    "/coreos.com/network/tenant1",
      +			},
      +			&File{config.File{
      +				Path:               "run/flannel/options.env",
      +				RawFilePermissions: "0644",
      +				Content: `FLANNELD_ETCD_ENDPOINTS=http://12.34.56.78:4001
      +FLANNELD_ETCD_PREFIX=/coreos.com/network/tenant1`,
      +			}},
      +		},
      +	} {
      +		file, _ := Flannel{tt.config}.File()
      +		if !reflect.DeepEqual(tt.file, file) {
      +			t.Errorf("bad units (%q): want %#v, got %#v", tt.config, tt.file, file)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/fleet.go b/vendor/github.com/coreos/coreos-cloudinit/system/fleet.go
      new file mode 100644
      index 00000000..2d12d819
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/fleet.go
      @@ -0,0 +1,38 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +// Fleet is a top-level structure which embeds its underlying configuration,
      +// config.Fleet, and provides the system-specific Unit().
      +type Fleet struct {
      +	config.Fleet
      +}
      +
      +// Units generates a Unit file drop-in for fleet, if any fleet options were
      +// configured in cloud-config
      +func (fe Fleet) Units() []Unit {
      +	return []Unit{{config.Unit{
      +		Name:    "fleet.service",
      +		Runtime: true,
      +		DropIns: []config.UnitDropIn{{
      +			Name:    "20-cloudinit.conf",
      +			Content: serviceContents(fe.Fleet),
      +		}},
      +	}}}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/fleet_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/fleet_test.go
      new file mode 100644
      index 00000000..dfe7c3f5
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/fleet_test.go
      @@ -0,0 +1,58 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"reflect"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +func TestFleetUnits(t *testing.T) {
      +	for _, tt := range []struct {
      +		config config.Fleet
      +		units  []Unit
      +	}{
      +		{
      +			config.Fleet{},
      +			[]Unit{{config.Unit{
      +				Name:    "fleet.service",
      +				Runtime: true,
      +				DropIns: []config.UnitDropIn{{Name: "20-cloudinit.conf"}},
      +			}}},
      +		},
      +		{
      +			config.Fleet{
      +				PublicIP: "12.34.56.78",
      +			},
      +			[]Unit{{config.Unit{
      +				Name:    "fleet.service",
      +				Runtime: true,
      +				DropIns: []config.UnitDropIn{{
      +					Name: "20-cloudinit.conf",
      +					Content: `[Service]
      +Environment="FLEET_PUBLIC_IP=12.34.56.78"
      +`,
      +				}},
      +			}}},
      +		},
      +	} {
      +		units := Fleet{tt.config}.Units()
      +		if !reflect.DeepEqual(units, tt.units) {
      +			t.Errorf("bad units (%+v): want %#v, got %#v", tt.config, tt.units, units)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/locksmith.go b/vendor/github.com/coreos/coreos-cloudinit/system/locksmith.go
      new file mode 100644
      index 00000000..6095e5f7
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/locksmith.go
      @@ -0,0 +1,37 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +// Locksmith is a top-level structure which embeds its underlying configuration,
      +// config.Locksmith, and provides the system-specific Unit().
      +type Locksmith struct {
      +	config.Locksmith
      +}
      +
      +// Units creates a Unit file drop-in for etcd, using any configured options.
      +func (ee Locksmith) Units() []Unit {
      +	return []Unit{{config.Unit{
      +		Name:    "locksmithd.service",
      +		Runtime: true,
      +		DropIns: []config.UnitDropIn{{
      +			Name:    "20-cloudinit.conf",
      +			Content: serviceContents(ee.Locksmith),
      +		}},
      +	}}}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/locksmith_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/locksmith_test.go
      new file mode 100644
      index 00000000..6d7d9887
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/locksmith_test.go
      @@ -0,0 +1,58 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"reflect"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +func TestLocksmithUnits(t *testing.T) {
      +	for _, tt := range []struct {
      +		config config.Locksmith
      +		units  []Unit
      +	}{
      +		{
      +			config.Locksmith{},
      +			[]Unit{{config.Unit{
      +				Name:    "locksmithd.service",
      +				Runtime: true,
      +				DropIns: []config.UnitDropIn{{Name: "20-cloudinit.conf"}},
      +			}}},
      +		},
      +		{
      +			config.Locksmith{
      +				Endpoint: "12.34.56.78:4001",
      +			},
      +			[]Unit{{config.Unit{
      +				Name:    "locksmithd.service",
      +				Runtime: true,
      +				DropIns: []config.UnitDropIn{{
      +					Name: "20-cloudinit.conf",
      +					Content: `[Service]
      +Environment="LOCKSMITHD_ENDPOINT=12.34.56.78:4001"
      +`,
      +				}},
      +			}}},
      +		},
      +	} {
      +		units := Locksmith{tt.config}.Units()
      +		if !reflect.DeepEqual(units, tt.units) {
      +			t.Errorf("bad units (%+v): want %#v, got %#v", tt.config, tt.units, units)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/networkd.go b/vendor/github.com/coreos/coreos-cloudinit/system/networkd.go
      new file mode 100644
      index 00000000..ed9c1a5a
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/networkd.go
      @@ -0,0 +1,95 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"log"
      +	"net"
      +	"os/exec"
      +	"strings"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +	"github.com/coreos/coreos-cloudinit/network"
      +
      +	"github.com/dotcloud/docker/pkg/netlink"
      +)
      +
      +func RestartNetwork(interfaces []network.InterfaceGenerator) (err error) {
      +	defer func() {
      +		if e := restartNetworkd(); e != nil {
      +			err = e
      +		}
      +	}()
      +
      +	if err = downNetworkInterfaces(interfaces); err != nil {
      +		return
      +	}
      +
      +	if err = maybeProbe8012q(interfaces); err != nil {
      +		return
      +	}
      +	return maybeProbeBonding(interfaces)
      +}
      +
      +func downNetworkInterfaces(interfaces []network.InterfaceGenerator) error {
      +	sysInterfaceMap := make(map[string]*net.Interface)
      +	if systemInterfaces, err := net.Interfaces(); err == nil {
      +		for _, iface := range systemInterfaces {
      +			iface := iface
      +			sysInterfaceMap[iface.Name] = &iface
      +		}
      +	} else {
      +		return err
      +	}
      +
      +	for _, iface := range interfaces {
      +		if systemInterface, ok := sysInterfaceMap[iface.Name()]; ok {
      +			log.Printf("Taking down interface %q\n", systemInterface.Name)
      +			if err := netlink.NetworkLinkDown(systemInterface); err != nil {
      +				log.Printf("Error while downing interface %q (%s). Continuing...\n", systemInterface.Name, err)
      +			}
      +		}
      +	}
      +
      +	return nil
      +}
      +
      +func maybeProbe8012q(interfaces []network.InterfaceGenerator) error {
      +	for _, iface := range interfaces {
      +		if iface.Type() == "vlan" {
      +			log.Printf("Probing LKM %q (%q)\n", "8021q", "8021q")
      +			return exec.Command("modprobe", "8021q").Run()
      +		}
      +	}
      +	return nil
      +}
      +
      +func maybeProbeBonding(interfaces []network.InterfaceGenerator) error {
      +	for _, iface := range interfaces {
      +		if iface.Type() == "bond" {
      +			args := append([]string{"bonding"}, strings.Split(iface.ModprobeParams(), " ")...)
      +			log.Printf("Probing LKM %q (%q)\n", "bonding", args)
      +			return exec.Command("modprobe", args...).Run()
      +		}
      +	}
      +	return nil
      +}
      +
      +func restartNetworkd() error {
      +	log.Printf("Restarting networkd.service\n")
      +	networkd := Unit{config.Unit{Name: "systemd-networkd.service"}}
      +	_, err := NewUnitManager("").RunUnitCommand(networkd, "restart")
      +	return err
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/oem.go b/vendor/github.com/coreos/coreos-cloudinit/system/oem.go
      new file mode 100644
      index 00000000..b77e9c8b
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/oem.go
      @@ -0,0 +1,46 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"fmt"
      +	"path"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +// OEM is a top-level structure which embeds its underlying configuration,
      +// config.OEM, and provides the system-specific File().
      +type OEM struct {
      +	config.OEM
      +}
      +
      +func (oem OEM) File() (*File, error) {
      +	if oem.ID == "" {
      +		return nil, nil
      +	}
      +
      +	content := fmt.Sprintf("ID=%s\n", oem.ID)
      +	content += fmt.Sprintf("VERSION_ID=%s\n", oem.VersionID)
      +	content += fmt.Sprintf("NAME=%q\n", oem.Name)
      +	content += fmt.Sprintf("HOME_URL=%q\n", oem.HomeURL)
      +	content += fmt.Sprintf("BUG_REPORT_URL=%q\n", oem.BugReportURL)
      +
      +	return &File{config.File{
      +		Path:               path.Join("etc", "oem-release"),
      +		RawFilePermissions: "0644",
      +		Content:            content,
      +	}}, nil
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/oem_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/oem_test.go
      new file mode 100644
      index 00000000..38661201
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/oem_test.go
      @@ -0,0 +1,61 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"reflect"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +func TestOEMFile(t *testing.T) {
      +	for _, tt := range []struct {
      +		config config.OEM
      +		file   *File
      +	}{
      +		{
      +			config.OEM{},
      +			nil,
      +		},
      +		{
      +			config.OEM{
      +				ID:           "rackspace",
      +				Name:         "Rackspace Cloud Servers",
      +				VersionID:    "168.0.0",
      +				HomeURL:      "https://www.rackspace.com/cloud/servers/",
      +				BugReportURL: "https://github.com/coreos/coreos-overlay",
      +			},
      +			&File{config.File{
      +				Path:               "etc/oem-release",
      +				RawFilePermissions: "0644",
      +				Content: `ID=rackspace
      +VERSION_ID=168.0.0
      +NAME="Rackspace Cloud Servers"
      +HOME_URL="https://www.rackspace.com/cloud/servers/"
      +BUG_REPORT_URL="https://github.com/coreos/coreos-overlay"
      +`,
      +			}},
      +		},
      +	} {
      +		file, err := OEM{tt.config}.File()
      +		if err != nil {
      +			t.Errorf("bad error (%q): want %v, got %q", tt.config, nil, err)
      +		}
      +		if !reflect.DeepEqual(tt.file, file) {
      +			t.Errorf("bad file (%q): want %#v, got %#v", tt.config, tt.file, file)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/ssh_key.go b/vendor/github.com/coreos/coreos-cloudinit/system/ssh_key.go
      new file mode 100644
      index 00000000..9811422a
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/ssh_key.go
      @@ -0,0 +1,73 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"fmt"
      +	"io"
      +	"io/ioutil"
      +	"os/exec"
      +	"strings"
      +)
      +
      +// Add the provide SSH public key to the core user's list of
      +// authorized keys
      +func AuthorizeSSHKeys(user string, keysName string, keys []string) error {
      +	for i, key := range keys {
      +		keys[i] = strings.TrimSpace(key)
      +	}
      +
      +	// join all keys with newlines, ensuring the resulting string
      +	// also ends with a newline
      +	joined := fmt.Sprintf("%s\n", strings.Join(keys, "\n"))
      +
      +	cmd := exec.Command("update-ssh-keys", "-u", user, "-a", keysName)
      +	stdin, err := cmd.StdinPipe()
      +	if err != nil {
      +		return err
      +	}
      +
      +	stdout, err := cmd.StdoutPipe()
      +	if err != nil {
      +		return err
      +	}
      +
      +	stderr, err := cmd.StderrPipe()
      +	if err != nil {
      +		return err
      +	}
      +
      +	err = cmd.Start()
      +	if err != nil {
      +		stdin.Close()
      +		return err
      +	}
      +
      +	_, err = io.WriteString(stdin, joined)
      +	if err != nil {
      +		return err
      +	}
      +
      +	stdin.Close()
      +	stdoutBytes, _ := ioutil.ReadAll(stdout)
      +	stderrBytes, _ := ioutil.ReadAll(stderr)
      +
      +	err = cmd.Wait()
      +	if err != nil {
      +		return fmt.Errorf("Call to update-ssh-keys failed with %v: %s %s", err, string(stdoutBytes), string(stderrBytes))
      +	}
      +
      +	return nil
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/systemd.go b/vendor/github.com/coreos/coreos-cloudinit/system/systemd.go
      new file mode 100644
      index 00000000..4385b556
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/systemd.go
      @@ -0,0 +1,205 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"fmt"
      +	"io/ioutil"
      +	"log"
      +	"os"
      +	"os/exec"
      +	"path"
      +	"strings"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +	"github.com/coreos/go-systemd/dbus"
      +)
      +
      +func NewUnitManager(root string) UnitManager {
      +	return &systemd{root}
      +}
      +
      +type systemd struct {
      +	root string
      +}
      +
      +// fakeMachineID is placed on non-usr CoreOS images and should
      +// never be used as a true MachineID
      +const fakeMachineID = "42000000000000000000000000000042"
      +
      +// PlaceUnit writes a unit file at its desired destination, creating parent
      +// directories as necessary.
      +func (s *systemd) PlaceUnit(u Unit) error {
      +	file := File{config.File{
      +		Path:               u.Destination(s.root),
      +		Content:            u.Content,
      +		RawFilePermissions: "0644",
      +	}}
      +
      +	_, err := WriteFile(&file, "/")
      +	return err
      +}
      +
      +// PlaceUnitDropIn writes a unit drop-in file at its desired destination,
      +// creating parent directories as necessary.
      +func (s *systemd) PlaceUnitDropIn(u Unit, d config.UnitDropIn) error {
      +	file := File{config.File{
      +		Path:               u.DropInDestination(s.root, d),
      +		Content:            d.Content,
      +		RawFilePermissions: "0644",
      +	}}
      +
      +	_, err := WriteFile(&file, "/")
      +	return err
      +}
      +
      +func (s *systemd) EnableUnitFile(u Unit) error {
      +	conn, err := dbus.New()
      +	if err != nil {
      +		return err
      +	}
      +
      +	units := []string{u.Name}
      +	_, _, err = conn.EnableUnitFiles(units, u.Runtime, true)
      +	return err
      +}
      +
      +func (s *systemd) RunUnitCommand(u Unit, c string) (string, error) {
      +	conn, err := dbus.New()
      +	if err != nil {
      +		return "", err
      +	}
      +
      +	var fn func(string, string) (string, error)
      +	switch c {
      +	case "start":
      +		fn = conn.StartUnit
      +	case "stop":
      +		fn = conn.StopUnit
      +	case "restart":
      +		fn = conn.RestartUnit
      +	case "reload":
      +		fn = conn.ReloadUnit
      +	case "try-restart":
      +		fn = conn.TryRestartUnit
      +	case "reload-or-restart":
      +		fn = conn.ReloadOrRestartUnit
      +	case "reload-or-try-restart":
      +		fn = conn.ReloadOrTryRestartUnit
      +	default:
      +		return "", fmt.Errorf("Unsupported systemd command %q", c)
      +	}
      +
      +	return fn(u.Name, "replace")
      +}
      +
      +func (s *systemd) DaemonReload() error {
      +	conn, err := dbus.New()
      +	if err != nil {
      +		return err
      +	}
      +
      +	return conn.Reload()
      +}
      +
      +// MaskUnit masks the given Unit by symlinking its unit file to
      +// /dev/null, analogous to `systemctl mask`.
      +// N.B.: Unlike `systemctl mask`, this function will *remove any existing unit
      +// file at the location*, to ensure that the mask will succeed.
      +func (s *systemd) MaskUnit(u Unit) error {
      +	masked := u.Destination(s.root)
      +	if _, err := os.Stat(masked); os.IsNotExist(err) {
      +		if err := os.MkdirAll(path.Dir(masked), os.FileMode(0755)); err != nil {
      +			return err
      +		}
      +	} else if err := os.Remove(masked); err != nil {
      +		return err
      +	}
      +	return os.Symlink("/dev/null", masked)
      +}
      +
      +// UnmaskUnit is analogous to systemd's unit_file_unmask. If the file
      +// associated with the given Unit is empty or appears to be a symlink to
      +// /dev/null, it is removed.
      +func (s *systemd) UnmaskUnit(u Unit) error {
      +	masked := u.Destination(s.root)
      +	ne, err := nullOrEmpty(masked)
      +	if os.IsNotExist(err) {
      +		return nil
      +	} else if err != nil {
      +		return err
      +	}
      +	if !ne {
      +		log.Printf("%s is not null or empty, refusing to unmask", masked)
      +		return nil
      +	}
      +	return os.Remove(masked)
      +}
      +
      +// nullOrEmpty checks whether a given path appears to be an empty regular file
      +// or a symlink to /dev/null
      +func nullOrEmpty(path string) (bool, error) {
      +	fi, err := os.Stat(path)
      +	if err != nil {
      +		return false, err
      +	}
      +	m := fi.Mode()
      +	if m.IsRegular() && fi.Size() <= 0 {
      +		return true, nil
      +	}
      +	if m&os.ModeCharDevice > 0 {
      +		return true, nil
      +	}
      +	return false, nil
      +}
      +
      +func ExecuteScript(scriptPath string) (string, error) {
      +	props := []dbus.Property{
      +		dbus.PropDescription("Unit generated and executed by coreos-cloudinit on behalf of user"),
      +		dbus.PropExecStart([]string{"/bin/bash", scriptPath}, false),
      +	}
      +
      +	base := path.Base(scriptPath)
      +	name := fmt.Sprintf("coreos-cloudinit-%s.service", base)
      +
      +	log.Printf("Creating transient systemd unit '%s'", name)
      +
      +	conn, err := dbus.New()
      +	if err != nil {
      +		return "", err
      +	}
      +
      +	_, err = conn.StartTransientUnit(name, "replace", props...)
      +	return name, err
      +}
      +
      +func SetHostname(hostname string) error {
      +	return exec.Command("hostnamectl", "set-hostname", hostname).Run()
      +}
      +
      +func Hostname() (string, error) {
      +	return os.Hostname()
      +}
      +
      +func MachineID(root string) string {
      +	contents, _ := ioutil.ReadFile(path.Join(root, "etc", "machine-id"))
      +	id := strings.TrimSpace(string(contents))
      +
      +	if id == fakeMachineID {
      +		id = ""
      +	}
      +
      +	return id
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/systemd_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/systemd_test.go
      new file mode 100644
      index 00000000..82052abc
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/systemd_test.go
      @@ -0,0 +1,280 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"fmt"
      +	"io/ioutil"
      +	"os"
      +	"path"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +func TestPlaceUnit(t *testing.T) {
      +	tests := []config.Unit{
      +		{
      +			Name:    "50-eth0.network",
      +			Runtime: true,
      +			Content: "[Match]\nName=eth47\n\n[Network]\nAddress=10.209.171.177/19\n",
      +		},
      +		{
      +			Name:    "media-state.mount",
      +			Content: "[Mount]\nWhat=/dev/sdb1\nWhere=/media/state\n",
      +		},
      +	}
      +
      +	for _, tt := range tests {
      +		dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +		if err != nil {
      +			panic(fmt.Sprintf("Unable to create tempdir: %v", err))
      +		}
      +
      +		u := Unit{tt}
      +		sd := &systemd{dir}
      +
      +		if err := sd.PlaceUnit(u); err != nil {
      +			t.Fatalf("PlaceUnit(): bad error (%+v): want nil, got %s", tt, err)
      +		}
      +
      +		fi, err := os.Stat(u.Destination(dir))
      +		if err != nil {
      +			t.Fatalf("Stat(): bad error (%+v): want nil, got %s", tt, err)
      +		}
      +
      +		if mode := fi.Mode(); mode != os.FileMode(0644) {
      +			t.Errorf("bad filemode (%+v): want %v, got %v", tt, os.FileMode(0644), mode)
      +		}
      +
      +		c, err := ioutil.ReadFile(u.Destination(dir))
      +		if err != nil {
      +			t.Fatalf("ReadFile(): bad error (%+v): want nil, got %s", tt, err)
      +		}
      +
      +		if string(c) != tt.Content {
      +			t.Errorf("bad contents (%+v): want %q, got %q", tt, tt.Content, string(c))
      +		}
      +
      +		os.RemoveAll(dir)
      +	}
      +}
      +
      +func TestPlaceUnitDropIn(t *testing.T) {
      +	tests := []config.Unit{
      +		{
      +			Name:    "false.service",
      +			Runtime: true,
      +			DropIns: []config.UnitDropIn{
      +				{
      +					Name:    "00-true.conf",
      +					Content: "[Service]\nExecStart=\nExecStart=/usr/bin/true\n",
      +				},
      +			},
      +		},
      +		{
      +			Name: "true.service",
      +			DropIns: []config.UnitDropIn{
      +				{
      +					Name:    "00-false.conf",
      +					Content: "[Service]\nExecStart=\nExecStart=/usr/bin/false\n",
      +				},
      +			},
      +		},
      +	}
      +
      +	for _, tt := range tests {
      +		dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +		if err != nil {
      +			panic(fmt.Sprintf("Unable to create tempdir: %v", err))
      +		}
      +
      +		u := Unit{tt}
      +		sd := &systemd{dir}
      +
      +		if err := sd.PlaceUnitDropIn(u, u.DropIns[0]); err != nil {
      +			t.Fatalf("PlaceUnit(): bad error (%+v): want nil, got %s", tt, err)
      +		}
      +
      +		fi, err := os.Stat(u.DropInDestination(dir, u.DropIns[0]))
      +		if err != nil {
      +			t.Fatalf("Stat(): bad error (%+v): want nil, got %s", tt, err)
      +		}
      +
      +		if mode := fi.Mode(); mode != os.FileMode(0644) {
      +			t.Errorf("bad filemode (%+v): want %v, got %v", tt, os.FileMode(0644), mode)
      +		}
      +
      +		c, err := ioutil.ReadFile(u.DropInDestination(dir, u.DropIns[0]))
      +		if err != nil {
      +			t.Fatalf("ReadFile(): bad error (%+v): want nil, got %s", tt, err)
      +		}
      +
      +		if string(c) != u.DropIns[0].Content {
      +			t.Errorf("bad contents (%+v): want %q, got %q", tt, u.DropIns[0].Content, string(c))
      +		}
      +
      +		os.RemoveAll(dir)
      +	}
      +}
      +
      +func TestMachineID(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	os.Mkdir(path.Join(dir, "etc"), os.FileMode(0755))
      +	ioutil.WriteFile(path.Join(dir, "etc", "machine-id"), []byte("node007\n"), os.FileMode(0444))
      +
      +	if MachineID(dir) != "node007" {
      +		t.Fatalf("File has incorrect contents")
      +	}
      +}
      +
      +func TestMaskUnit(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	sd := &systemd{dir}
      +
      +	// Ensure mask works with units that do not currently exist
      +	uf := Unit{config.Unit{Name: "foo.service"}}
      +	if err := sd.MaskUnit(uf); err != nil {
      +		t.Fatalf("Unable to mask new unit: %v", err)
      +	}
      +	fooPath := path.Join(dir, "etc", "systemd", "system", "foo.service")
      +	fooTgt, err := os.Readlink(fooPath)
      +	if err != nil {
      +		t.Fatal("Unable to read link", err)
      +	}
      +	if fooTgt != "/dev/null" {
      +		t.Fatal("unit not masked, got unit target", fooTgt)
      +	}
      +
      +	// Ensure mask works with unit files that already exist
      +	ub := Unit{config.Unit{Name: "bar.service"}}
      +	barPath := path.Join(dir, "etc", "systemd", "system", "bar.service")
      +	if _, err := os.Create(barPath); err != nil {
      +		t.Fatalf("Error creating new unit file: %v", err)
      +	}
      +	if err := sd.MaskUnit(ub); err != nil {
      +		t.Fatalf("Unable to mask existing unit: %v", err)
      +	}
      +	barTgt, err := os.Readlink(barPath)
      +	if err != nil {
      +		t.Fatal("Unable to read link", err)
      +	}
      +	if barTgt != "/dev/null" {
      +		t.Fatal("unit not masked, got unit target", barTgt)
      +	}
      +}
      +
      +func TestUnmaskUnit(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	sd := &systemd{dir}
      +
      +	nilUnit := Unit{config.Unit{Name: "null.service"}}
      +	if err := sd.UnmaskUnit(nilUnit); err != nil {
      +		t.Errorf("unexpected error from unmasking nonexistent unit: %v", err)
      +	}
      +
      +	uf := Unit{config.Unit{Name: "foo.service", Content: "[Service]\nExecStart=/bin/true"}}
      +	dst := uf.Destination(dir)
      +	if err := os.MkdirAll(path.Dir(dst), os.FileMode(0755)); err != nil {
      +		t.Fatalf("Unable to create unit directory: %v", err)
      +	}
      +	if _, err := os.Create(dst); err != nil {
      +		t.Fatalf("Unable to write unit file: %v", err)
      +	}
      +
      +	if err := ioutil.WriteFile(dst, []byte(uf.Content), 700); err != nil {
      +		t.Fatalf("Unable to write unit file: %v", err)
      +	}
      +	if err := sd.UnmaskUnit(uf); err != nil {
      +		t.Errorf("unmask of non-empty unit returned unexpected error: %v", err)
      +	}
      +	got, _ := ioutil.ReadFile(dst)
      +	if string(got) != uf.Content {
      +		t.Errorf("unmask of non-empty unit mutated unit contents unexpectedly")
      +	}
      +
      +	ub := Unit{config.Unit{Name: "bar.service"}}
      +	dst = ub.Destination(dir)
      +	if err := os.Symlink("/dev/null", dst); err != nil {
      +		t.Fatalf("Unable to create masked unit: %v", err)
      +	}
      +	if err := sd.UnmaskUnit(ub); err != nil {
      +		t.Errorf("unmask of unit returned unexpected error: %v", err)
      +	}
      +	if _, err := os.Stat(dst); !os.IsNotExist(err) {
      +		t.Errorf("expected %s to not exist after unmask, but got err: %s", dst, err)
      +	}
      +}
      +
      +func TestNullOrEmpty(t *testing.T) {
      +	dir, err := ioutil.TempDir(os.TempDir(), "coreos-cloudinit-")
      +	if err != nil {
      +		t.Fatalf("Unable to create tempdir: %v", err)
      +	}
      +	defer os.RemoveAll(dir)
      +
      +	non := path.Join(dir, "does_not_exist")
      +	ne, err := nullOrEmpty(non)
      +	if !os.IsNotExist(err) {
      +		t.Errorf("nullOrEmpty on nonexistent file returned bad error: %v", err)
      +	}
      +	if ne {
      +		t.Errorf("nullOrEmpty returned true unxpectedly")
      +	}
      +
      +	regEmpty := path.Join(dir, "regular_empty_file")
      +	_, err = os.Create(regEmpty)
      +	if err != nil {
      +		t.Fatalf("Unable to create tempfile: %v", err)
      +	}
      +	gotNe, gotErr := nullOrEmpty(regEmpty)
      +	if !gotNe || gotErr != nil {
      +		t.Errorf("nullOrEmpty of regular empty file returned %t, %v - want true, nil", gotNe, gotErr)
      +	}
      +
      +	reg := path.Join(dir, "regular_file")
      +	if err := ioutil.WriteFile(reg, []byte("asdf"), 700); err != nil {
      +		t.Fatalf("Unable to create tempfile: %v", err)
      +	}
      +	gotNe, gotErr = nullOrEmpty(reg)
      +	if gotNe || gotErr != nil {
      +		t.Errorf("nullOrEmpty of regular file returned %t, %v - want false, nil", gotNe, gotErr)
      +	}
      +
      +	null := path.Join(dir, "null")
      +	if err := os.Symlink(os.DevNull, null); err != nil {
      +		t.Fatalf("Unable to create /dev/null link: %s", err)
      +	}
      +	gotNe, gotErr = nullOrEmpty(null)
      +	if !gotNe || gotErr != nil {
      +		t.Errorf("nullOrEmpty of null symlink returned %t, %v - want true, nil", gotNe, gotErr)
      +	}
      +
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/unit.go b/vendor/github.com/coreos/coreos-cloudinit/system/unit.go
      new file mode 100644
      index 00000000..22d006d1
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/unit.go
      @@ -0,0 +1,82 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"fmt"
      +	"path"
      +	"path/filepath"
      +	"strings"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +type UnitManager interface {
      +	PlaceUnit(unit Unit) error
      +	PlaceUnitDropIn(unit Unit, dropIn config.UnitDropIn) error
      +	EnableUnitFile(unit Unit) error
      +	RunUnitCommand(unit Unit, command string) (string, error)
      +	MaskUnit(unit Unit) error
      +	UnmaskUnit(unit Unit) error
      +	DaemonReload() error
      +}
      +
      +// Unit is a top-level structure which embeds its underlying configuration,
      +// config.Unit, and provides the system-specific Destination(), Type(), and
      +// Group().
      +type Unit struct {
      +	config.Unit
      +}
      +
      +// Type returns the extension of the unit (everything that follows the final
      +// period).
      +func (u Unit) Type() string {
      +	ext := filepath.Ext(u.Name)
      +	return strings.TrimLeft(ext, ".")
      +}
      +
      +// Group returns "network" or "system" depending on whether or not the unit is
      +// a network unit or otherwise.
      +func (u Unit) Group() string {
      +	switch u.Type() {
      +	case "network", "netdev", "link":
      +		return "network"
      +	default:
      +		return "system"
      +	}
      +}
      +
      +// Destination builds the appropriate absolute file path for the Unit. The root
      +// argument indicates the effective base directory of the system (similar to a
      +// chroot).
      +func (u Unit) Destination(root string) string {
      +	return path.Join(u.prefix(root), u.Name)
      +}
      +
      +// DropInDestination builds the appropriate absolute file path for the
      +// UnitDropIn. The root argument indicates the effective base directory of the
      +// system (similar to a chroot) and the dropIn argument is the UnitDropIn for
      +// which the destination is being calculated.
      +func (u Unit) DropInDestination(root string, dropIn config.UnitDropIn) string {
      +	return path.Join(u.prefix(root), fmt.Sprintf("%s.d", u.Name), dropIn.Name)
      +}
      +
      +func (u Unit) prefix(root string) string {
      +	dir := "etc"
      +	if u.Runtime {
      +		dir = "run"
      +	}
      +	return path.Join(root, dir, "systemd", u.Group())
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/unit_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/unit_test.go
      new file mode 100644
      index 00000000..c995b592
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/unit_test.go
      @@ -0,0 +1,136 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +func TestType(t *testing.T) {
      +	tests := []struct {
      +		name string
      +
      +		typ string
      +	}{
      +		{},
      +		{"test.service", "service"},
      +		{"hello", ""},
      +		{"lots.of.dots", "dots"},
      +	}
      +
      +	for _, tt := range tests {
      +		u := Unit{config.Unit{
      +			Name: tt.name,
      +		}}
      +		if typ := u.Type(); tt.typ != typ {
      +			t.Errorf("bad type (%+v): want %q, got %q", tt, tt.typ, typ)
      +		}
      +	}
      +}
      +
      +func TestGroup(t *testing.T) {
      +	tests := []struct {
      +		name string
      +
      +		group string
      +	}{
      +		{"test.service", "system"},
      +		{"test.link", "network"},
      +		{"test.network", "network"},
      +		{"test.netdev", "network"},
      +		{"test.conf", "system"},
      +	}
      +
      +	for _, tt := range tests {
      +		u := Unit{config.Unit{
      +			Name: tt.name,
      +		}}
      +		if group := u.Group(); tt.group != group {
      +			t.Errorf("bad group (%+v): want %q, got %q", tt, tt.group, group)
      +		}
      +	}
      +}
      +
      +func TestDestination(t *testing.T) {
      +	tests := []struct {
      +		root    string
      +		name    string
      +		runtime bool
      +
      +		destination string
      +	}{
      +		{
      +			root:        "/some/dir",
      +			name:        "foobar.service",
      +			destination: "/some/dir/etc/systemd/system/foobar.service",
      +		},
      +		{
      +			root:        "/some/dir",
      +			name:        "foobar.service",
      +			runtime:     true,
      +			destination: "/some/dir/run/systemd/system/foobar.service",
      +		},
      +	}
      +
      +	for _, tt := range tests {
      +		u := Unit{config.Unit{
      +			Name:    tt.name,
      +			Runtime: tt.runtime,
      +		}}
      +		if d := u.Destination(tt.root); tt.destination != d {
      +			t.Errorf("bad destination (%+v): want %q, got %q", tt, tt.destination, d)
      +		}
      +	}
      +}
      +
      +func TestDropInDestination(t *testing.T) {
      +	tests := []struct {
      +		root       string
      +		unitName   string
      +		dropInName string
      +		runtime    bool
      +
      +		destination string
      +	}{
      +		{
      +			root:        "/some/dir",
      +			unitName:    "foo.service",
      +			dropInName:  "bar.conf",
      +			destination: "/some/dir/etc/systemd/system/foo.service.d/bar.conf",
      +		},
      +		{
      +			root:        "/some/dir",
      +			unitName:    "foo.service",
      +			dropInName:  "bar.conf",
      +			runtime:     true,
      +			destination: "/some/dir/run/systemd/system/foo.service.d/bar.conf",
      +		},
      +	}
      +
      +	for _, tt := range tests {
      +		u := Unit{config.Unit{
      +			Name:    tt.unitName,
      +			Runtime: tt.runtime,
      +			DropIns: []config.UnitDropIn{{
      +				Name: tt.dropInName,
      +			}},
      +		}}
      +		if d := u.DropInDestination(tt.root, u.DropIns[0]); tt.destination != d {
      +			t.Errorf("bad destination (%+v): want %q, got %q", tt, tt.destination, d)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/update.go b/vendor/github.com/coreos/coreos-cloudinit/system/update.go
      new file mode 100644
      index 00000000..5e92dace
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/update.go
      @@ -0,0 +1,151 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"bufio"
      +	"fmt"
      +	"io"
      +	"os"
      +	"path"
      +	"reflect"
      +	"sort"
      +	"strings"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +const (
      +	locksmithUnit    = "locksmithd.service"
      +	updateEngineUnit = "update-engine.service"
      +)
      +
      +// Update is a top-level structure which contains its underlying configuration,
      +// config.Update, a function for reading the configuration (the default
      +// implementation reading from the filesystem), and provides the system-specific
      +// File() and Unit().
      +type Update struct {
      +	ReadConfig func() (io.Reader, error)
      +	config.Update
      +}
      +
      +func DefaultReadConfig() (io.Reader, error) {
      +	etcUpdate := path.Join("/etc", "coreos", "update.conf")
      +	usrUpdate := path.Join("/usr", "share", "coreos", "update.conf")
      +
      +	f, err := os.Open(etcUpdate)
      +	if os.IsNotExist(err) {
      +		f, err = os.Open(usrUpdate)
      +	}
      +	return f, err
      +}
      +
      +// File generates an `/etc/coreos/update.conf` file (if any update
      +// configuration options are set in cloud-config) by either rewriting the
      +// existing file on disk, or starting from `/usr/share/coreos/update.conf`
      +func (uc Update) File() (*File, error) {
      +	if config.IsZero(uc.Update) {
      +		return nil, nil
      +	}
      +	if err := config.AssertStructValid(uc.Update); err != nil {
      +		return nil, err
      +	}
      +
      +	// Generate the list of possible substitutions to be performed based on the options that are configured
      +	subs := map[string]string{}
      +	uct := reflect.TypeOf(uc.Update)
      +	ucv := reflect.ValueOf(uc.Update)
      +	for i := 0; i < uct.NumField(); i++ {
      +		val := ucv.Field(i).String()
      +		if val == "" {
      +			continue
      +		}
      +		env := uct.Field(i).Tag.Get("env")
      +		subs[env] = fmt.Sprintf("%s=%s", env, val)
      +	}
      +
      +	conf, err := uc.ReadConfig()
      +	if err != nil {
      +		return nil, err
      +	}
      +	scanner := bufio.NewScanner(conf)
      +
      +	var out string
      +	for scanner.Scan() {
      +		line := scanner.Text()
      +		for env, value := range subs {
      +			if strings.HasPrefix(line, env) {
      +				line = value
      +				delete(subs, env)
      +				break
      +			}
      +		}
      +		out += line
      +		out += "\n"
      +		if err := scanner.Err(); err != nil {
      +			return nil, err
      +		}
      +	}
      +
      +	for _, key := range sortedKeys(subs) {
      +		out += subs[key]
      +		out += "\n"
      +	}
      +
      +	return &File{config.File{
      +		Path:               path.Join("etc", "coreos", "update.conf"),
      +		RawFilePermissions: "0644",
      +		Content:            out,
      +	}}, nil
      +}
      +
      +// Units generates units for the cloud-init initializer to act on:
      +// - a locksmith Unit, if "reboot-strategy" was set in cloud-config
      +// - an update_engine Unit, if "group" or "server" was set in cloud-config
      +func (uc Update) Units() []Unit {
      +	var units []Unit
      +	if uc.Update.RebootStrategy != "" {
      +		ls := &Unit{config.Unit{
      +			Name:    locksmithUnit,
      +			Command: "restart",
      +			Mask:    false,
      +			Runtime: true,
      +		}}
      +
      +		if uc.Update.RebootStrategy == "off" {
      +			ls.Command = "stop"
      +			ls.Mask = true
      +		}
      +		units = append(units, *ls)
      +	}
      +
      +	if uc.Update.Group != "" || uc.Update.Server != "" {
      +		ue := Unit{config.Unit{
      +			Name:    updateEngineUnit,
      +			Command: "restart",
      +		}}
      +		units = append(units, ue)
      +	}
      +
      +	return units
      +}
      +
      +func sortedKeys(m map[string]string) (keys []string) {
      +	for key := range m {
      +		keys = append(keys, key)
      +	}
      +	sort.Strings(keys)
      +	return
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/update_test.go b/vendor/github.com/coreos/coreos-cloudinit/system/update_test.go
      new file mode 100644
      index 00000000..ae3972b8
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/update_test.go
      @@ -0,0 +1,161 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"io"
      +	"reflect"
      +	"strings"
      +	"testing"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +func testReadConfig(config string) func() (io.Reader, error) {
      +	return func() (io.Reader, error) {
      +		return strings.NewReader(config), nil
      +	}
      +}
      +
      +func TestUpdateUnits(t *testing.T) {
      +	for _, tt := range []struct {
      +		config config.Update
      +		units  []Unit
      +		err    error
      +	}{
      +		{
      +			config: config.Update{},
      +		},
      +		{
      +			config: config.Update{Group: "master", Server: "http://foo.com"},
      +			units: []Unit{{config.Unit{
      +				Name:    "update-engine.service",
      +				Command: "restart",
      +			}}},
      +		},
      +		{
      +			config: config.Update{RebootStrategy: "best-effort"},
      +			units: []Unit{{config.Unit{
      +				Name:    "locksmithd.service",
      +				Command: "restart",
      +				Runtime: true,
      +			}}},
      +		},
      +		{
      +			config: config.Update{RebootStrategy: "etcd-lock"},
      +			units: []Unit{{config.Unit{
      +				Name:    "locksmithd.service",
      +				Command: "restart",
      +				Runtime: true,
      +			}}},
      +		},
      +		{
      +			config: config.Update{RebootStrategy: "reboot"},
      +			units: []Unit{{config.Unit{
      +				Name:    "locksmithd.service",
      +				Command: "restart",
      +				Runtime: true,
      +			}}},
      +		},
      +		{
      +			config: config.Update{RebootStrategy: "off"},
      +			units: []Unit{{config.Unit{
      +				Name:    "locksmithd.service",
      +				Command: "stop",
      +				Runtime: true,
      +				Mask:    true,
      +			}}},
      +		},
      +	} {
      +		units := Update{Update: tt.config, ReadConfig: testReadConfig("")}.Units()
      +		if !reflect.DeepEqual(tt.units, units) {
      +			t.Errorf("bad units (%q): want %#v, got %#v", tt.config, tt.units, units)
      +		}
      +	}
      +}
      +
      +func TestUpdateFile(t *testing.T) {
      +	for _, tt := range []struct {
      +		config config.Update
      +		orig   string
      +		file   *File
      +		err    error
      +	}{
      +		{
      +			config: config.Update{},
      +		},
      +		{
      +			config: config.Update{RebootStrategy: "wizzlewazzle"},
      +			err:    &config.ErrorValid{Value: "wizzlewazzle", Field: "RebootStrategy", Valid: "^(best-effort|etcd-lock|reboot|off)$"},
      +		},
      +		{
      +			config: config.Update{Group: "master", Server: "http://foo.com"},
      +			file: &File{config.File{
      +				Content:            "GROUP=master\nSERVER=http://foo.com\n",
      +				Path:               "etc/coreos/update.conf",
      +				RawFilePermissions: "0644",
      +			}},
      +		},
      +		{
      +			config: config.Update{RebootStrategy: "best-effort"},
      +			file: &File{config.File{
      +				Content:            "REBOOT_STRATEGY=best-effort\n",
      +				Path:               "etc/coreos/update.conf",
      +				RawFilePermissions: "0644",
      +			}},
      +		},
      +		{
      +			config: config.Update{RebootStrategy: "etcd-lock"},
      +			file: &File{config.File{
      +				Content:            "REBOOT_STRATEGY=etcd-lock\n",
      +				Path:               "etc/coreos/update.conf",
      +				RawFilePermissions: "0644",
      +			}},
      +		},
      +		{
      +			config: config.Update{RebootStrategy: "reboot"},
      +			file: &File{config.File{
      +				Content:            "REBOOT_STRATEGY=reboot\n",
      +				Path:               "etc/coreos/update.conf",
      +				RawFilePermissions: "0644",
      +			}},
      +		},
      +		{
      +			config: config.Update{RebootStrategy: "off"},
      +			file: &File{config.File{
      +				Content:            "REBOOT_STRATEGY=off\n",
      +				Path:               "etc/coreos/update.conf",
      +				RawFilePermissions: "0644",
      +			}},
      +		},
      +		{
      +			config: config.Update{RebootStrategy: "etcd-lock"},
      +			orig:   "SERVER=https://example.com\nGROUP=thegroupc\nREBOOT_STRATEGY=awesome",
      +			file: &File{config.File{
      +				Content:            "SERVER=https://example.com\nGROUP=thegroupc\nREBOOT_STRATEGY=etcd-lock\n",
      +				Path:               "etc/coreos/update.conf",
      +				RawFilePermissions: "0644",
      +			}},
      +		},
      +	} {
      +		file, err := Update{Update: tt.config, ReadConfig: testReadConfig(tt.orig)}.File()
      +		if !reflect.DeepEqual(tt.err, err) {
      +			t.Errorf("bad error (%q): want %q, got %q", tt.config, tt.err, err)
      +		}
      +		if !reflect.DeepEqual(tt.file, file) {
      +			t.Errorf("bad units (%q): want %#v, got %#v", tt.config, tt.file, file)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/user.go b/vendor/github.com/coreos/coreos-cloudinit/system/user.go
      new file mode 100644
      index 00000000..3f973c3d
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/system/user.go
      @@ -0,0 +1,114 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package system
      +
      +import (
      +	"fmt"
      +	"log"
      +	"os/exec"
      +	"os/user"
      +	"strings"
      +
      +	"github.com/coreos/coreos-cloudinit/config"
      +)
      +
      +func UserExists(u *config.User) bool {
      +	_, err := user.Lookup(u.Name)
      +	return err == nil
      +}
      +
      +func CreateUser(u *config.User) error {
      +	args := []string{}
      +
      +	if u.PasswordHash != "" {
      +		args = append(args, "--password", u.PasswordHash)
      +	} else {
      +		args = append(args, "--password", "*")
      +	}
      +
      +	if u.GECOS != "" {
      +		args = append(args, "--comment", fmt.Sprintf("%q", u.GECOS))
      +	}
      +
      +	if u.Homedir != "" {
      +		args = append(args, "--home-dir", u.Homedir)
      +	}
      +
      +	if u.NoCreateHome {
      +		args = append(args, "--no-create-home")
      +	} else {
      +		args = append(args, "--create-home")
      +	}
      +
      +	if u.PrimaryGroup != "" {
      +		args = append(args, "--gid", u.PrimaryGroup)
      +	}
      +
      +	if len(u.Groups) > 0 {
      +		args = append(args, "--groups", strings.Join(u.Groups, ","))
      +	}
      +
      +	if u.NoUserGroup {
      +		args = append(args, "--no-user-group")
      +	}
      +
      +	if u.System {
      +		args = append(args, "--system")
      +	}
      +
      +	if u.NoLogInit {
      +		args = append(args, "--no-log-init")
      +	}
      +
      +	if u.Shell != "" {
      +		args = append(args, "--shell", u.Shell)
      +	}
      +
      +	args = append(args, u.Name)
      +
      +	output, err := exec.Command("useradd", args...).CombinedOutput()
      +	if err != nil {
      +		log.Printf("Command 'useradd %s' failed: %v\n%s", strings.Join(args, " "), err, output)
      +	}
      +	return err
      +}
      +
      +func SetUserPassword(user, hash string) error {
      +	cmd := exec.Command("/usr/sbin/chpasswd", "-e")
      +
      +	stdin, err := cmd.StdinPipe()
      +	if err != nil {
      +		return err
      +	}
      +
      +	err = cmd.Start()
      +	if err != nil {
      +		log.Fatal(err)
      +	}
      +
      +	arg := fmt.Sprintf("%s:%s", user, hash)
      +	_, err = stdin.Write([]byte(arg))
      +	if err != nil {
      +		return err
      +	}
      +	stdin.Close()
      +
      +	err = cmd.Wait()
      +	if err != nil {
      +		return err
      +	}
      +
      +	return nil
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/test b/vendor/github.com/coreos/coreos-cloudinit/test
      new file mode 100755
      index 00000000..645e967a
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/test
      @@ -0,0 +1,43 @@
      +#!/bin/bash -e
      +
      +source ./build
      +
      +SRC="
      +	config
      +	config/validate
      +	datasource
      +	datasource/configdrive
      +	datasource/file
      +	datasource/metadata
      +	datasource/metadata/cloudsigma
      +	datasource/metadata/digitalocean
      +	datasource/metadata/ec2
      +	datasource/proc_cmdline
      +	datasource/test
      +	datasource/url
      +	datasource/vmware
      +	datasource/waagent
      +	initialize
      +	network
      +	pkg
      +	system
      +	.
      +"
      +
      +echo "Checking gofix..."
      +go tool fix -diff $SRC
      +
      +echo "Checking gofmt..."
      +gofmt -d -e $SRC
      +
      +# split SRC into an array and prepend REPO_PATH to each local package for go vet
      +split_vet=(${SRC// / })
      +VET_TEST="${REPO_PATH} ${split_vet[@]/#/${REPO_PATH}/}"
      +
      +echo "Checking govet..."
      +go vet $VET_TEST
      +
      +echo "Running tests..."
      +go test -timeout 60s -cover $@ ${VET_TEST} --race
      +
      +echo "Success"
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/units/90-configdrive.rules b/vendor/github.com/coreos/coreos-cloudinit/units/90-configdrive.rules
      new file mode 100644
      index 00000000..38d5ee6c
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/units/90-configdrive.rules
      @@ -0,0 +1,11 @@
      +# Automatically trigger configdrive mounting.
      +
      +ACTION!="add|change", GOTO="coreos_configdrive_end"
      +
      +# A normal config drive. Block device formatted with iso9660 or fat
      +SUBSYSTEM=="block", ENV{ID_FS_TYPE}=="iso9660|udf|vfat", ENV{ID_FS_LABEL}=="config-2", TAG+="systemd", ENV{SYSTEMD_WANTS}+="media-configdrive.mount"
      +
      +# Addtionally support virtfs from QEMU
      +SUBSYSTEM=="virtio", DRIVER=="9pnet_virtio", ATTR{mount_tag}=="config-2", TAG+="systemd", ENV{SYSTEMD_WANTS}+="media-configvirtfs.mount"
      +
      +LABEL="coreos_configdrive_end"
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/units/90-ovfenv.rules b/vendor/github.com/coreos/coreos-cloudinit/units/90-ovfenv.rules
      new file mode 100644
      index 00000000..aba638ec
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/units/90-ovfenv.rules
      @@ -0,0 +1,8 @@
      +# Automatically trigger ovfenv mounting.
      +
      +ACTION!="add|change", GOTO="coreos_ovfenv_end"
      +
      +# A normal config drive. Block device formatted with iso9660 or fat
      +SUBSYSTEM=="block", ENV{ID_FS_TYPE}=="iso9660|vfat", ENV{ID_FS_LABEL}=="OVF_ENV", TAG+="systemd", ENV{SYSTEMD_WANTS}+="media-ovfenv.mount"
      +
      +LABEL="coreos_ovfenv_end"
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/units/media-configdrive.mount b/vendor/github.com/coreos/coreos-cloudinit/units/media-configdrive.mount
      new file mode 100644
      index 00000000..ee766546
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/units/media-configdrive.mount
      @@ -0,0 +1,13 @@
      +[Unit]
      +Wants=user-configdrive.service
      +Before=user-configdrive.service
      +# Only mount config drive block devices automatically in virtual machines
      +# or any host that has it explicitly enabled and not explicitly disabled.
      +ConditionVirtualization=|vm
      +ConditionKernelCommandLine=|coreos.configdrive=1
      +ConditionKernelCommandLine=!coreos.configdrive=0
      +
      +[Mount]
      +What=LABEL=config-2
      +Where=/media/configdrive
      +Options=ro
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/units/media-configvirtfs.mount b/vendor/github.com/coreos/coreos-cloudinit/units/media-configvirtfs.mount
      new file mode 100644
      index 00000000..669154ca
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/units/media-configvirtfs.mount
      @@ -0,0 +1,18 @@
      +[Unit]
      +Wants=user-configvirtfs.service
      +Before=user-configvirtfs.service
      +# Only mount config drive block devices automatically in virtual machines
      +# or any host that has it explicitly enabled and not explicitly disabled.
      +ConditionVirtualization=|vm
      +ConditionKernelCommandLine=|coreos.configdrive=1
      +ConditionKernelCommandLine=!coreos.configdrive=0
      +
      +# Support old style setup for now
      +Wants=addon-run@media-configvirtfs.service addon-config@media-configvirtfs.service
      +Before=addon-run@media-configvirtfs.service addon-config@media-configvirtfs.service
      +
      +[Mount]
      +What=config-2
      +Where=/media/configvirtfs
      +Options=ro,trans=virtio,version=9p2000.L
      +Type=9p
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/units/media-ovfenv.mount b/vendor/github.com/coreos/coreos-cloudinit/units/media-ovfenv.mount
      new file mode 100644
      index 00000000..fdea2239
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/units/media-ovfenv.mount
      @@ -0,0 +1,10 @@
      +[Unit]
      +Wants=user-config-ovfenv.service
      +Before=user-config-ovfenv.service
      +# Only mount ovfenv drive block devices automatically in virtual machines
      +ConditionVirtualization=vmware
      +
      +[Mount]
      +What=LABEL="OVF ENV"
      +Where=/media/ovfenv
      +Options=ro
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/units/system-cloudinit@.service b/vendor/github.com/coreos/coreos-cloudinit/units/system-cloudinit@.service
      new file mode 100644
      index 00000000..47882728
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/units/system-cloudinit@.service
      @@ -0,0 +1,11 @@
      +[Unit]
      +Description=Load cloud-config from %f
      +Requires=dbus.service
      +After=dbus.service
      +Before=system-config.target
      +ConditionFileNotEmpty=%f
      +
      +[Service]
      +Type=oneshot
      +RemainAfterExit=yes
      +ExecStart=/usr/bin/coreos-cloudinit --from-file=%f
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/units/system-config.target b/vendor/github.com/coreos/coreos-cloudinit/units/system-config.target
      new file mode 100644
      index 00000000..a510959f
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/units/system-config.target
      @@ -0,0 +1,10 @@
      +[Unit]
      +Description=Load system-provided cloud configs
      +
      +# Generate /etc/environment
      +Requires=coreos-setup-environment.service
      +After=coreos-setup-environment.service
      +
      +# Load OEM cloud-config.yml
      +Requires=system-cloudinit@usr-share-oem-cloud\x2dconfig.yml.service
      +After=system-cloudinit@usr-share-oem-cloud\x2dconfig.yml.service
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/units/user-cloudinit-proc-cmdline.service b/vendor/github.com/coreos/coreos-cloudinit/units/user-cloudinit-proc-cmdline.service
      new file mode 100644
      index 00000000..cf1569f7
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/units/user-cloudinit-proc-cmdline.service
      @@ -0,0 +1,13 @@
      +[Unit]
      +Description=Load cloud-config from url defined in /proc/cmdline
      +Requires=coreos-setup-environment.service
      +After=coreos-setup-environment.service
      +After=system-config.target
      +Before=user-config.target
      +ConditionKernelCommandLine=cloud-config-url
      +
      +[Service]
      +Type=oneshot
      +RemainAfterExit=yes
      +EnvironmentFile=-/etc/environment
      +ExecStart=/usr/bin/coreos-cloudinit --from-proc-cmdline
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/units/user-cloudinit@.path b/vendor/github.com/coreos/coreos-cloudinit/units/user-cloudinit@.path
      new file mode 100644
      index 00000000..8a3ba0f9
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/units/user-cloudinit@.path
      @@ -0,0 +1,5 @@
      +[Unit]
      +Description=Watch for a cloud-config at %f
      +
      +[Path]
      +PathExists=%f
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/units/user-cloudinit@.service b/vendor/github.com/coreos/coreos-cloudinit/units/user-cloudinit@.service
      new file mode 100644
      index 00000000..0c1b556c
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/units/user-cloudinit@.service
      @@ -0,0 +1,13 @@
      +[Unit]
      +Description=Load cloud-config from %f
      +Requires=coreos-setup-environment.service
      +After=coreos-setup-environment.service
      +After=system-config.target
      +Before=user-config.target
      +ConditionFileNotEmpty=%f
      +
      +[Service]
      +Type=oneshot
      +RemainAfterExit=yes
      +EnvironmentFile=-/etc/environment
      +ExecStart=/usr/bin/coreos-cloudinit --from-file=%f
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/units/user-config-ovfenv.service b/vendor/github.com/coreos/coreos-cloudinit/units/user-config-ovfenv.service
      new file mode 100644
      index 00000000..7a996d5f
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/units/user-config-ovfenv.service
      @@ -0,0 +1,12 @@
      +[Unit]
      +Description=Load cloud-config from /media/ovfenv
      +Requires=coreos-setup-environment.service
      +After=coreos-setup-environment.service system-config.target
      +Before=user-config.target
      +After=user-config-vmw-tools.service
      +
      +[Service]
      +Type=oneshot
      +RemainAfterExit=yes
      +EnvironmentFile=-/etc/environment
      +ExecStart=/usr/bin/coreos-cloudinit --from-vmware-ovf-env=/media/ovfenv/ovf-env.xml
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/units/user-config.target b/vendor/github.com/coreos/coreos-cloudinit/units/user-config.target
      new file mode 100644
      index 00000000..d7cb9091
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/units/user-config.target
      @@ -0,0 +1,13 @@
      +[Unit]
      +Description=Load user-provided cloud configs
      +Requires=system-config.target
      +After=system-config.target
      +
      +# Watch for configs at a couple common paths
      +Requires=user-configdrive.path
      +After=user-configdrive.path
      +Requires=user-cloudinit@var-lib-coreos\x2dinstall-user_data.path
      +After=user-cloudinit@var-lib-coreos\x2dinstall-user_data.path
      +
      +Requires=user-cloudinit-proc-cmdline.service
      +After=user-cloudinit-proc-cmdline.service
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/units/user-configdrive.path b/vendor/github.com/coreos/coreos-cloudinit/units/user-configdrive.path
      new file mode 100644
      index 00000000..fea642e9
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/units/user-configdrive.path
      @@ -0,0 +1,10 @@
      +[Unit]
      +Description=Watch for a cloud-config at /media/configdrive
      +
      +# Note: This unit is essentially just here as a fall-back mechanism to
      +# trigger cloudinit if it isn't triggered explicitly by other means
      +# such as by a Wants= in the mount unit. This ensures we handle the
      +# case where /media/configdrive is provided to a CoreOS container.
      +
      +[Path]
      +DirectoryNotEmpty=/media/configdrive
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/units/user-configdrive.service b/vendor/github.com/coreos/coreos-cloudinit/units/user-configdrive.service
      new file mode 100644
      index 00000000..e24b6d30
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/units/user-configdrive.service
      @@ -0,0 +1,22 @@
      +[Unit]
      +Description=Load cloud-config from /media/configdrive
      +Requires=coreos-setup-environment.service
      +After=coreos-setup-environment.service system-config.target
      +Before=user-config.target
      +
      +# HACK: work around ordering between config drive and ec2 metadata It is
      +# possible for OpenStack style systems to provide both the metadata service
      +# and config drive, to prevent the two from stomping on each other, force
      +# this to run after OEM and after metadata (if it exsts). I'm doing this
      +# here instead of in the oem service because the oem unit is not written
      +# to disk until the OEM cloud config is evaluated and I want to make sure
      +# systemd knows about the ordering as early as possible.
      +# coreos-cloudinit could implement a simple lock but that cannot be used
      +# until after the systemd dbus calls are made non-blocking.
      +After=oem-cloudinit.service
      +
      +[Service]
      +Type=oneshot
      +RemainAfterExit=yes
      +EnvironmentFile=-/etc/environment
      +ExecStart=/usr/bin/coreos-cloudinit --from-configdrive=/media/configdrive
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/units/user-configvirtfs.service b/vendor/github.com/coreos/coreos-cloudinit/units/user-configvirtfs.service
      new file mode 100644
      index 00000000..c9839ad8
      --- /dev/null
      +++ b/vendor/github.com/coreos/coreos-cloudinit/units/user-configvirtfs.service
      @@ -0,0 +1,11 @@
      +[Unit]
      +Description=Load cloud-config from /media/configvirtfs
      +Requires=coreos-setup-environment.service
      +After=coreos-setup-environment.service
      +Before=user-config.target
      +
      +[Service]
      +Type=oneshot
      +RemainAfterExit=yes
      +EnvironmentFile=-/etc/environment
      +ExecStart=/usr/bin/coreos-cloudinit --from-configdrive=/media/configvirtfs
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/.travis.yml b/vendor/github.com/coreos/go-semver/.travis.yml
      similarity index 100%
      rename from vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/.travis.yml
      rename to vendor/github.com/coreos/go-semver/.travis.yml
      diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE
      new file mode 100644
      index 00000000..d6456956
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-semver/LICENSE
      @@ -0,0 +1,202 @@
      +
      +                                 Apache License
      +                           Version 2.0, January 2004
      +                        http://www.apache.org/licenses/
      +
      +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
      +
      +   1. Definitions.
      +
      +      "License" shall mean the terms and conditions for use, reproduction,
      +      and distribution as defined by Sections 1 through 9 of this document.
      +
      +      "Licensor" shall mean the copyright owner or entity authorized by
      +      the copyright owner that is granting the License.
      +
      +      "Legal Entity" shall mean the union of the acting entity and all
      +      other entities that control, are controlled by, or are under common
      +      control with that entity. For the purposes of this definition,
      +      "control" means (i) the power, direct or indirect, to cause the
      +      direction or management of such entity, whether by contract or
      +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      +      outstanding shares, or (iii) beneficial ownership of such entity.
      +
      +      "You" (or "Your") shall mean an individual or Legal Entity
      +      exercising permissions granted by this License.
      +
      +      "Source" form shall mean the preferred form for making modifications,
      +      including but not limited to software source code, documentation
      +      source, and configuration files.
      +
      +      "Object" form shall mean any form resulting from mechanical
      +      transformation or translation of a Source form, including but
      +      not limited to compiled object code, generated documentation,
      +      and conversions to other media types.
      +
      +      "Work" shall mean the work of authorship, whether in Source or
      +      Object form, made available under the License, as indicated by a
      +      copyright notice that is included in or attached to the work
      +      (an example is provided in the Appendix below).
      +
      +      "Derivative Works" shall mean any work, whether in Source or Object
      +      form, that is based on (or derived from) the Work and for which the
      +      editorial revisions, annotations, elaborations, or other modifications
      +      represent, as a whole, an original work of authorship. For the purposes
      +      of this License, Derivative Works shall not include works that remain
      +      separable from, or merely link (or bind by name) to the interfaces of,
      +      the Work and Derivative Works thereof.
      +
      +      "Contribution" shall mean any work of authorship, including
      +      the original version of the Work and any modifications or additions
      +      to that Work or Derivative Works thereof, that is intentionally
      +      submitted to Licensor for inclusion in the Work by the copyright owner
      +      or by an individual or Legal Entity authorized to submit on behalf of
      +      the copyright owner. For the purposes of this definition, "submitted"
      +      means any form of electronic, verbal, or written communication sent
      +      to the Licensor or its representatives, including but not limited to
      +      communication on electronic mailing lists, source code control systems,
      +      and issue tracking systems that are managed by, or on behalf of, the
      +      Licensor for the purpose of discussing and improving the Work, but
      +      excluding communication that is conspicuously marked or otherwise
      +      designated in writing by the copyright owner as "Not a Contribution."
      +
      +      "Contributor" shall mean Licensor and any individual or Legal Entity
      +      on behalf of whom a Contribution has been received by Licensor and
      +      subsequently incorporated within the Work.
      +
      +   2. Grant of Copyright License. Subject to the terms and conditions of
      +      this License, each Contributor hereby grants to You a perpetual,
      +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      +      copyright license to reproduce, prepare Derivative Works of,
      +      publicly display, publicly perform, sublicense, and distribute the
      +      Work and such Derivative Works in Source or Object form.
      +
      +   3. Grant of Patent License. Subject to the terms and conditions of
      +      this License, each Contributor hereby grants to You a perpetual,
      +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      +      (except as stated in this section) patent license to make, have made,
      +      use, offer to sell, sell, import, and otherwise transfer the Work,
      +      where such license applies only to those patent claims licensable
      +      by such Contributor that are necessarily infringed by their
      +      Contribution(s) alone or by combination of their Contribution(s)
      +      with the Work to which such Contribution(s) was submitted. If You
      +      institute patent litigation against any entity (including a
      +      cross-claim or counterclaim in a lawsuit) alleging that the Work
      +      or a Contribution incorporated within the Work constitutes direct
      +      or contributory patent infringement, then any patent licenses
      +      granted to You under this License for that Work shall terminate
      +      as of the date such litigation is filed.
      +
      +   4. Redistribution. You may reproduce and distribute copies of the
      +      Work or Derivative Works thereof in any medium, with or without
      +      modifications, and in Source or Object form, provided that You
      +      meet the following conditions:
      +
      +      (a) You must give any other recipients of the Work or
      +          Derivative Works a copy of this License; and
      +
      +      (b) You must cause any modified files to carry prominent notices
      +          stating that You changed the files; and
      +
      +      (c) You must retain, in the Source form of any Derivative Works
      +          that You distribute, all copyright, patent, trademark, and
      +          attribution notices from the Source form of the Work,
      +          excluding those notices that do not pertain to any part of
      +          the Derivative Works; and
      +
      +      (d) If the Work includes a "NOTICE" text file as part of its
      +          distribution, then any Derivative Works that You distribute must
      +          include a readable copy of the attribution notices contained
      +          within such NOTICE file, excluding those notices that do not
      +          pertain to any part of the Derivative Works, in at least one
      +          of the following places: within a NOTICE text file distributed
      +          as part of the Derivative Works; within the Source form or
      +          documentation, if provided along with the Derivative Works; or,
      +          within a display generated by the Derivative Works, if and
      +          wherever such third-party notices normally appear. The contents
      +          of the NOTICE file are for informational purposes only and
      +          do not modify the License. You may add Your own attribution
      +          notices within Derivative Works that You distribute, alongside
      +          or as an addendum to the NOTICE text from the Work, provided
      +          that such additional attribution notices cannot be construed
      +          as modifying the License.
      +
      +      You may add Your own copyright statement to Your modifications and
      +      may provide additional or different license terms and conditions
      +      for use, reproduction, or distribution of Your modifications, or
      +      for any such Derivative Works as a whole, provided Your use,
      +      reproduction, and distribution of the Work otherwise complies with
      +      the conditions stated in this License.
      +
      +   5. Submission of Contributions. Unless You explicitly state otherwise,
      +      any Contribution intentionally submitted for inclusion in the Work
      +      by You to the Licensor shall be under the terms and conditions of
      +      this License, without any additional terms or conditions.
      +      Notwithstanding the above, nothing herein shall supersede or modify
      +      the terms of any separate license agreement you may have executed
      +      with Licensor regarding such Contributions.
      +
      +   6. Trademarks. This License does not grant permission to use the trade
      +      names, trademarks, service marks, or product names of the Licensor,
      +      except as required for reasonable and customary use in describing the
      +      origin of the Work and reproducing the content of the NOTICE file.
      +
      +   7. Disclaimer of Warranty. Unless required by applicable law or
      +      agreed to in writing, Licensor provides the Work (and each
      +      Contributor provides its Contributions) on an "AS IS" BASIS,
      +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      +      implied, including, without limitation, any warranties or conditions
      +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      +      PARTICULAR PURPOSE. You are solely responsible for determining the
      +      appropriateness of using or redistributing the Work and assume any
      +      risks associated with Your exercise of permissions under this License.
      +
      +   8. Limitation of Liability. In no event and under no legal theory,
      +      whether in tort (including negligence), contract, or otherwise,
      +      unless required by applicable law (such as deliberate and grossly
      +      negligent acts) or agreed to in writing, shall any Contributor be
      +      liable to You for damages, including any direct, indirect, special,
      +      incidental, or consequential damages of any character arising as a
      +      result of this License or out of the use or inability to use the
      +      Work (including but not limited to damages for loss of goodwill,
      +      work stoppage, computer failure or malfunction, or any and all
      +      other commercial damages or losses), even if such Contributor
      +      has been advised of the possibility of such damages.
      +
      +   9. Accepting Warranty or Additional Liability. While redistributing
      +      the Work or Derivative Works thereof, You may choose to offer,
      +      and charge a fee for, acceptance of support, warranty, indemnity,
      +      or other liability obligations and/or rights consistent with this
      +      License. However, in accepting such obligations, You may act only
      +      on Your own behalf and on Your sole responsibility, not on behalf
      +      of any other Contributor, and only if You agree to indemnify,
      +      defend, and hold each Contributor harmless for any liability
      +      incurred by, or claims asserted against, such Contributor by reason
      +      of your accepting any such warranty or additional liability.
      +
      +   END OF TERMS AND CONDITIONS
      +
      +   APPENDIX: How to apply the Apache License to your work.
      +
      +      To apply the Apache License to your work, attach the following
      +      boilerplate notice, with the fields enclosed by brackets "[]"
      +      replaced with your own identifying information. (Don't include
      +      the brackets!)  The text should be enclosed in the appropriate
      +      comment syntax for the file format. We also recommend that a
      +      file or class name and description of purpose be included on the
      +      same "printed page" as the copyright notice for easier
      +      identification within third-party archives.
      +
      +   Copyright [yyyy] [name of copyright owner]
      +
      +   Licensed under the Apache License, Version 2.0 (the "License");
      +   you may not use this file except in compliance with the License.
      +   You may obtain a copy of the License at
      +
      +       http://www.apache.org/licenses/LICENSE-2.0
      +
      +   Unless required by applicable law or agreed to in writing, software
      +   distributed under the License is distributed on an "AS IS" BASIS,
      +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +   See the License for the specific language governing permissions and
      +   limitations under the License.
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/README.md b/vendor/github.com/coreos/go-semver/README.md
      similarity index 100%
      rename from vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/README.md
      rename to vendor/github.com/coreos/go-semver/README.md
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/example.go b/vendor/github.com/coreos/go-semver/example.go
      similarity index 100%
      rename from vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/example.go
      rename to vendor/github.com/coreos/go-semver/example.go
      diff --git a/vendor/github.com/coreos/go-systemd/.travis.yml b/vendor/github.com/coreos/go-systemd/.travis.yml
      new file mode 100644
      index 00000000..3c37292e
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/.travis.yml
      @@ -0,0 +1,8 @@
      +language: go
      +go: 1.4
      +
      +install:
      + - go get github.com/godbus/dbus
      +
      +script:
      + - ./test
      diff --git a/vendor/github.com/coreos/go-systemd/CONTRIBUTING.md b/vendor/github.com/coreos/go-systemd/CONTRIBUTING.md
      new file mode 100644
      index 00000000..0551ed53
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/CONTRIBUTING.md
      @@ -0,0 +1,77 @@
      +# How to Contribute
      +
      +CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via
      +GitHub pull requests.  This document outlines some of the conventions on
      +development workflow, commit message formatting, contact points and other
      +resources to make it easier to get your contribution accepted.
      +
      +# Certificate of Origin
      +
      +By contributing to this project you agree to the Developer Certificate of
      +Origin (DCO). This document was created by the Linux Kernel community and is a
      +simple statement that you, as a contributor, have the legal right to make the
      +contribution. See the [DCO](DCO) file for details.
      +
      +# Email and Chat
      +
      +The project currently uses the general CoreOS email list and IRC channel:
      +- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev)
      +- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org
      +
      +Please avoid emailing maintainers found in the MAINTAINERS file directly. They
      +are very busy and read the mailing lists.
      +
      +## Getting Started
      +
      +- Fork the repository on GitHub
      +- Read the [README](README.md) for build and test instructions
      +- Play with the project, submit bugs, submit patches!
      +
      +## Contribution Flow
      +
      +This is a rough outline of what a contributor's workflow looks like:
      +
      +- Create a topic branch from where you want to base your work (usually master).
      +- Make commits of logical units.
      +- Make sure your commit messages are in the proper format (see below).
      +- Push your changes to a topic branch in your fork of the repository.
      +- Make sure the tests pass, and add any new tests as appropriate.
      +- Submit a pull request to the original repository.
      +
      +Thanks for your contributions!
      +
      +### Coding Style
      +
      +CoreOS projects written in Go follow a set of style guidelines that we've documented 
      +[here](https://github.com/coreos/docs/tree/master/golang). Please follow them when 
      +working on your contributions.
      +
      +### Format of the Commit Message
      +
      +We follow a rough convention for commit messages that is designed to answer two
      +questions: what changed and why. The subject line should feature the what and
      +the body of the commit should describe the why.
      +
      +```
      +scripts: add the test-cluster command
      +
      +this uses tmux to setup a test cluster that you can easily kill and
      +start for debugging.
      +
      +Fixes #38
      +```
      +
      +The format can be described more formally as follows:
      +
      +```
      +<subsystem>: <what changed>
      +<BLANK LINE>
      +<why this change was made>
      +<BLANK LINE>
      +<footer>
      +```
      +
      +The first line is the subject and should be no longer than 70 characters, the
      +second line is always blank, and other lines should be wrapped at 80 characters.
      +This allows the message to be easier to read on GitHub as well as in various
      +git tools.
      diff --git a/vendor/github.com/coreos/go-systemd/DCO b/vendor/github.com/coreos/go-systemd/DCO
      new file mode 100644
      index 00000000..716561d5
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/DCO
      @@ -0,0 +1,36 @@
      +Developer Certificate of Origin
      +Version 1.1
      +
      +Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
      +660 York Street, Suite 102,
      +San Francisco, CA 94110 USA
      +
      +Everyone is permitted to copy and distribute verbatim copies of this
      +license document, but changing it is not allowed.
      +
      +
      +Developer's Certificate of Origin 1.1
      +
      +By making a contribution to this project, I certify that:
      +
      +(a) The contribution was created in whole or in part by me and I
      +    have the right to submit it under the open source license
      +    indicated in the file; or
      +
      +(b) The contribution is based upon previous work that, to the best
      +    of my knowledge, is covered under an appropriate open source
      +    license and I have the right under that license to submit that
      +    work with modifications, whether created in whole or in part
      +    by me, under the same open source license (unless I am
      +    permitted to submit under a different license), as indicated
      +    in the file; or
      +
      +(c) The contribution was provided directly to me by some other
      +    person who certified (a), (b) or (c) and I have not modified
      +    it.
      +
      +(d) I understand and agree that this project and the contribution
      +    are public and that a record of the contribution (including all
      +    personal information I submit with it, including my sign-off) is
      +    maintained indefinitely and may be redistributed consistent with
      +    this project or the open source license(s) involved.
      diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE
      new file mode 100644
      index 00000000..37ec93a1
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/LICENSE
      @@ -0,0 +1,191 @@
      +Apache License
      +Version 2.0, January 2004
      +http://www.apache.org/licenses/
      +
      +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
      +
      +1. Definitions.
      +
      +"License" shall mean the terms and conditions for use, reproduction, and
      +distribution as defined by Sections 1 through 9 of this document.
      +
      +"Licensor" shall mean the copyright owner or entity authorized by the copyright
      +owner that is granting the License.
      +
      +"Legal Entity" shall mean the union of the acting entity and all other entities
      +that control, are controlled by, or are under common control with that entity.
      +For the purposes of this definition, "control" means (i) the power, direct or
      +indirect, to cause the direction or management of such entity, whether by
      +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
      +outstanding shares, or (iii) beneficial ownership of such entity.
      +
      +"You" (or "Your") shall mean an individual or Legal Entity exercising
      +permissions granted by this License.
      +
      +"Source" form shall mean the preferred form for making modifications, including
      +but not limited to software source code, documentation source, and configuration
      +files.
      +
      +"Object" form shall mean any form resulting from mechanical transformation or
      +translation of a Source form, including but not limited to compiled object code,
      +generated documentation, and conversions to other media types.
      +
      +"Work" shall mean the work of authorship, whether in Source or Object form, made
      +available under the License, as indicated by a copyright notice that is included
      +in or attached to the work (an example is provided in the Appendix below).
      +
      +"Derivative Works" shall mean any work, whether in Source or Object form, that
      +is based on (or derived from) the Work and for which the editorial revisions,
      +annotations, elaborations, or other modifications represent, as a whole, an
      +original work of authorship. For the purposes of this License, Derivative Works
      +shall not include works that remain separable from, or merely link (or bind by
      +name) to the interfaces of, the Work and Derivative Works thereof.
      +
      +"Contribution" shall mean any work of authorship, including the original version
      +of the Work and any modifications or additions to that Work or Derivative Works
      +thereof, that is intentionally submitted to Licensor for inclusion in the Work
      +by the copyright owner or by an individual or Legal Entity authorized to submit
      +on behalf of the copyright owner. For the purposes of this definition,
      +"submitted" means any form of electronic, verbal, or written communication sent
      +to the Licensor or its representatives, including but not limited to
      +communication on electronic mailing lists, source code control systems, and
      +issue tracking systems that are managed by, or on behalf of, the Licensor for
      +the purpose of discussing and improving the Work, but excluding communication
      +that is conspicuously marked or otherwise designated in writing by the copyright
      +owner as "Not a Contribution."
      +
      +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
      +of whom a Contribution has been received by Licensor and subsequently
      +incorporated within the Work.
      +
      +2. Grant of Copyright License.
      +
      +Subject to the terms and conditions of this License, each Contributor hereby
      +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
      +irrevocable copyright license to reproduce, prepare Derivative Works of,
      +publicly display, publicly perform, sublicense, and distribute the Work and such
      +Derivative Works in Source or Object form.
      +
      +3. Grant of Patent License.
      +
      +Subject to the terms and conditions of this License, each Contributor hereby
      +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
      +irrevocable (except as stated in this section) patent license to make, have
      +made, use, offer to sell, sell, import, and otherwise transfer the Work, where
      +such license applies only to those patent claims licensable by such Contributor
      +that are necessarily infringed by their Contribution(s) alone or by combination
      +of their Contribution(s) with the Work to which such Contribution(s) was
      +submitted. If You institute patent litigation against any entity (including a
      +cross-claim or counterclaim in a lawsuit) alleging that the Work or a
      +Contribution incorporated within the Work constitutes direct or contributory
      +patent infringement, then any patent licenses granted to You under this License
      +for that Work shall terminate as of the date such litigation is filed.
      +
      +4. Redistribution.
      +
      +You may reproduce and distribute copies of the Work or Derivative Works thereof
      +in any medium, with or without modifications, and in Source or Object form,
      +provided that You meet the following conditions:
      +
      +You must give any other recipients of the Work or Derivative Works a copy of
      +this License; and
      +You must cause any modified files to carry prominent notices stating that You
      +changed the files; and
      +You must retain, in the Source form of any Derivative Works that You distribute,
      +all copyright, patent, trademark, and attribution notices from the Source form
      +of the Work, excluding those notices that do not pertain to any part of the
      +Derivative Works; and
      +If the Work includes a "NOTICE" text file as part of its distribution, then any
      +Derivative Works that You distribute must include a readable copy of the
      +attribution notices contained within such NOTICE file, excluding those notices
      +that do not pertain to any part of the Derivative Works, in at least one of the
      +following places: within a NOTICE text file distributed as part of the
      +Derivative Works; within the Source form or documentation, if provided along
      +with the Derivative Works; or, within a display generated by the Derivative
      +Works, if and wherever such third-party notices normally appear. The contents of
      +the NOTICE file are for informational purposes only and do not modify the
      +License. You may add Your own attribution notices within Derivative Works that
      +You distribute, alongside or as an addendum to the NOTICE text from the Work,
      +provided that such additional attribution notices cannot be construed as
      +modifying the License.
      +You may add Your own copyright statement to Your modifications and may provide
      +additional or different license terms and conditions for use, reproduction, or
      +distribution of Your modifications, or for any such Derivative Works as a whole,
      +provided Your use, reproduction, and distribution of the Work otherwise complies
      +with the conditions stated in this License.
      +
      +5. Submission of Contributions.
      +
      +Unless You explicitly state otherwise, any Contribution intentionally submitted
      +for inclusion in the Work by You to the Licensor shall be under the terms and
      +conditions of this License, without any additional terms or conditions.
      +Notwithstanding the above, nothing herein shall supersede or modify the terms of
      +any separate license agreement you may have executed with Licensor regarding
      +such Contributions.
      +
      +6. Trademarks.
      +
      +This License does not grant permission to use the trade names, trademarks,
      +service marks, or product names of the Licensor, except as required for
      +reasonable and customary use in describing the origin of the Work and
      +reproducing the content of the NOTICE file.
      +
      +7. Disclaimer of Warranty.
      +
      +Unless required by applicable law or agreed to in writing, Licensor provides the
      +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
      +including, without limitation, any warranties or conditions of TITLE,
      +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
      +solely responsible for determining the appropriateness of using or
      +redistributing the Work and assume any risks associated with Your exercise of
      +permissions under this License.
      +
      +8. Limitation of Liability.
      +
      +In no event and under no legal theory, whether in tort (including negligence),
      +contract, or otherwise, unless required by applicable law (such as deliberate
      +and grossly negligent acts) or agreed to in writing, shall any Contributor be
      +liable to You for damages, including any direct, indirect, special, incidental,
      +or consequential damages of any character arising as a result of this License or
      +out of the use or inability to use the Work (including but not limited to
      +damages for loss of goodwill, work stoppage, computer failure or malfunction, or
      +any and all other commercial damages or losses), even if such Contributor has
      +been advised of the possibility of such damages.
      +
      +9. Accepting Warranty or Additional Liability.
      +
      +While redistributing the Work or Derivative Works thereof, You may choose to
      +offer, and charge a fee for, acceptance of support, warranty, indemnity, or
      +other liability obligations and/or rights consistent with this License. However,
      +in accepting such obligations, You may act only on Your own behalf and on Your
      +sole responsibility, not on behalf of any other Contributor, and only if You
      +agree to indemnify, defend, and hold each Contributor harmless for any liability
      +incurred by, or claims asserted against, such Contributor by reason of your
      +accepting any such warranty or additional liability.
      +
      +END OF TERMS AND CONDITIONS
      +
      +APPENDIX: How to apply the Apache License to your work
      +
      +To apply the Apache License to your work, attach the following boilerplate
      +notice, with the fields enclosed by brackets "[]" replaced with your own
      +identifying information. (Don't include the brackets!) The text should be
      +enclosed in the appropriate comment syntax for the file format. We also
      +recommend that a file or class name and description of purpose be included on
      +the same "printed page" as the copyright notice for easier identification within
      +third-party archives.
      +
      +   Copyright [yyyy] [name of copyright owner]
      +
      +   Licensed under the Apache License, Version 2.0 (the "License");
      +   you may not use this file except in compliance with the License.
      +   You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +   Unless required by applicable law or agreed to in writing, software
      +   distributed under the License is distributed on an "AS IS" BASIS,
      +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +   See the License for the specific language governing permissions and
      +   limitations under the License.
      diff --git a/vendor/github.com/coreos/go-systemd/README.md b/vendor/github.com/coreos/go-systemd/README.md
      new file mode 100644
      index 00000000..cb87a112
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/README.md
      @@ -0,0 +1,54 @@
      +# go-systemd
      +
      +[![Build Status](https://travis-ci.org/coreos/go-systemd.png?branch=master)](https://travis-ci.org/coreos/go-systemd)
      +[![godoc](https://godoc.org/github.com/coreos/go-systemd?status.svg)](http://godoc.org/github.com/coreos/go-systemd)
      +
      +Go bindings to systemd. The project has several packages:
      +
      +- `activation` - for writing and using socket activation from Go
      +- `dbus` - for starting/stopping/inspecting running services and units
      +- `journal` - for writing to systemd's logging service, journald
      +- `sdjournal` - for reading from journald by wrapping its C API
      +- `machine1` - for registering machines/containers with systemd
      +- `unit` - for (de)serialization and comparison of unit files
      +
      +## Socket Activation
      +
      +An example HTTP server using socket activation can be quickly set up by following this README on a Linux machine running systemd:
      +
      +https://github.com/coreos/go-systemd/tree/master/examples/activation/httpserver
      +
      +## Journal
      +
      +Using the pure-Go `journal` package you can submit journal entries directly to systemd's journal, taking advantage of features like indexed key/value pairs for each log entry.
      +The `sdjournal` package provides read access to the journal by wrapping around journald's native C API; consequently it requires cgo and the journal headers to be available.
      +
      +## D-Bus
      +
      +The `dbus` package connects to the [systemd D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/dbus/) and lets you start, stop and introspect systemd units. The API docs are here:
      +
      +http://godoc.org/github.com/coreos/go-systemd/dbus
      +
      +### Debugging
      +
      +Create `/etc/dbus-1/system-local.conf` that looks like this:
      +
      +```
      +<!DOCTYPE busconfig PUBLIC
      +"-//freedesktop//DTD D-Bus Bus Configuration 1.0//EN"
      +"http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
      +<busconfig>
      +    <policy user="root">
      +        <allow eavesdrop="true"/>
      +        <allow eavesdrop="true" send_destination="*"/>
      +    </policy>
      +</busconfig>
      +```
      +
      +## machined
      +
      +The `machine1` package allows interaction with the [systemd machined D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/machined/).
      +
      +## Units
      +
      +The `unit` package provides various functions for working with [systemd unit files](http://www.freedesktop.org/software/systemd/man/systemd.unit.html).
      diff --git a/vendor/github.com/coreos/go-systemd/activation/files.go b/vendor/github.com/coreos/go-systemd/activation/files.go
      new file mode 100644
      index 00000000..c8e85fcd
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/activation/files.go
      @@ -0,0 +1,52 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// Package activation implements primitives for systemd socket activation.
      +package activation
      +
      +import (
      +	"os"
      +	"strconv"
      +	"syscall"
      +)
      +
      +// based on: https://gist.github.com/alberts/4640792
      +const (
      +	listenFdsStart = 3
      +)
      +
      +func Files(unsetEnv bool) []*os.File {
      +	if unsetEnv {
      +		defer os.Unsetenv("LISTEN_PID")
      +		defer os.Unsetenv("LISTEN_FDS")
      +	}
      +
      +	pid, err := strconv.Atoi(os.Getenv("LISTEN_PID"))
      +	if err != nil || pid != os.Getpid() {
      +		return nil
      +	}
      +
      +	nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS"))
      +	if err != nil || nfds == 0 {
      +		return nil
      +	}
      +
      +	files := make([]*os.File, 0, nfds)
      +	for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ {
      +		syscall.CloseOnExec(fd)
      +		files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd)))
      +	}
      +
      +	return files
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/activation/files_test.go b/vendor/github.com/coreos/go-systemd/activation/files_test.go
      new file mode 100644
      index 00000000..8e15f2a1
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/activation/files_test.go
      @@ -0,0 +1,82 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package activation
      +
      +import (
      +	"bytes"
      +	"io"
      +	"os"
      +	"os/exec"
      +	"testing"
      +)
      +
      +// correctStringWritten fails the text if the correct string wasn't written
      +// to the other side of the pipe.
      +func correctStringWritten(t *testing.T, r *os.File, expected string) bool {
      +	bytes := make([]byte, len(expected))
      +	io.ReadAtLeast(r, bytes, len(expected))
      +
      +	if string(bytes) != expected {
      +		t.Fatalf("Unexpected string %s", string(bytes))
      +	}
      +
      +	return true
      +}
      +
      +// TestActivation forks out a copy of activation.go example and reads back two
      +// strings from the pipes that are passed in.
      +func TestActivation(t *testing.T) {
      +	cmd := exec.Command("go", "run", "../examples/activation/activation.go")
      +
      +	r1, w1, _ := os.Pipe()
      +	r2, w2, _ := os.Pipe()
      +	cmd.ExtraFiles = []*os.File{
      +		w1,
      +		w2,
      +	}
      +
      +	cmd.Env = os.Environ()
      +	cmd.Env = append(cmd.Env, "LISTEN_FDS=2", "FIX_LISTEN_PID=1")
      +
      +	err := cmd.Run()
      +	if err != nil {
      +		t.Fatalf(err.Error())
      +	}
      +
      +	correctStringWritten(t, r1, "Hello world")
      +	correctStringWritten(t, r2, "Goodbye world")
      +}
      +
      +func TestActivationNoFix(t *testing.T) {
      +	cmd := exec.Command("go", "run", "../examples/activation/activation.go")
      +	cmd.Env = os.Environ()
      +	cmd.Env = append(cmd.Env, "LISTEN_FDS=2")
      +
      +	out, _ := cmd.CombinedOutput()
      +	if bytes.Contains(out, []byte("No files")) == false {
      +		t.Fatalf("Child didn't error out as expected")
      +	}
      +}
      +
      +func TestActivationNoFiles(t *testing.T) {
      +	cmd := exec.Command("go", "run", "../examples/activation/activation.go")
      +	cmd.Env = os.Environ()
      +	cmd.Env = append(cmd.Env, "LISTEN_FDS=0", "FIX_LISTEN_PID=1")
      +
      +	out, _ := cmd.CombinedOutput()
      +	if bytes.Contains(out, []byte("No files")) == false {
      +		t.Fatalf("Child didn't error out as expected")
      +	}
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/activation/listeners.go b/vendor/github.com/coreos/go-systemd/activation/listeners.go
      new file mode 100644
      index 00000000..df27c29e
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/activation/listeners.go
      @@ -0,0 +1,62 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package activation
      +
      +import (
      +	"crypto/tls"
      +	"net"
      +)
      +
      +// Listeners returns a slice containing a net.Listener for each matching socket type
      +// passed to this process.
      +//
      +// The order of the file descriptors is preserved in the returned slice.
      +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors
      +// corresponding with "udp, tcp, tcp", then the slice would contain {nil, net.Listener, net.Listener}
      +func Listeners(unsetEnv bool) ([]net.Listener, error) {
      +	files := Files(unsetEnv)
      +	listeners := make([]net.Listener, len(files))
      +
      +	for i, f := range files {
      +		if pc, err := net.FileListener(f); err == nil {
      +			listeners[i] = pc
      +		}
      +	}
      +	return listeners, nil
      +}
      +
      +// TLSListeners returns a slice containing a net.listener for each matching TCP socket type
      +// passed to this process.
      +// It uses default Listeners func and forces TCP sockets handlers to use TLS based on tlsConfig.
      +func TLSListeners(unsetEnv bool, tlsConfig *tls.Config) ([]net.Listener, error) {
      +	listeners, err := Listeners(unsetEnv)
      +
      +	if listeners == nil || err != nil {
      +		return nil, err
      +	}
      +
      +	if tlsConfig != nil && err == nil {
      +		tlsConfig.NextProtos = []string{"http/1.1"}
      +
      +		for i, l := range listeners {
      +			// Activate TLS only for TCP sockets
      +			if l.Addr().Network() == "tcp" {
      +				listeners[i] = tls.NewListener(l, tlsConfig)
      +			}
      +		}
      +	}
      +
      +	return listeners, err
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/activation/listeners_test.go b/vendor/github.com/coreos/go-systemd/activation/listeners_test.go
      new file mode 100644
      index 00000000..72fb0ff6
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/activation/listeners_test.go
      @@ -0,0 +1,86 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package activation
      +
      +import (
      +	"io"
      +	"net"
      +	"os"
      +	"os/exec"
      +	"testing"
      +)
      +
      +// correctStringWritten fails the text if the correct string wasn't written
      +// to the other side of the pipe.
      +func correctStringWrittenNet(t *testing.T, r net.Conn, expected string) bool {
      +	bytes := make([]byte, len(expected))
      +	io.ReadAtLeast(r, bytes, len(expected))
      +
      +	if string(bytes) != expected {
      +		t.Fatalf("Unexpected string %s", string(bytes))
      +	}
      +
      +	return true
      +}
      +
      +// TestActivation forks out a copy of activation.go example and reads back two
      +// strings from the pipes that are passed in.
      +func TestListeners(t *testing.T) {
      +	cmd := exec.Command("go", "run", "../examples/activation/listen.go")
      +
      +	l1, err := net.Listen("tcp", ":9999")
      +	if err != nil {
      +		t.Fatalf(err.Error())
      +	}
      +	l2, err := net.Listen("tcp", ":1234")
      +	if err != nil {
      +		t.Fatalf(err.Error())
      +	}
      +
      +	t1 := l1.(*net.TCPListener)
      +	t2 := l2.(*net.TCPListener)
      +
      +	f1, _ := t1.File()
      +	f2, _ := t2.File()
      +
      +	cmd.ExtraFiles = []*os.File{
      +		f1,
      +		f2,
      +	}
      +
      +	r1, err := net.Dial("tcp", "127.0.0.1:9999")
      +	if err != nil {
      +		t.Fatalf(err.Error())
      +	}
      +	r1.Write([]byte("Hi"))
      +
      +	r2, err := net.Dial("tcp", "127.0.0.1:1234")
      +	if err != nil {
      +		t.Fatalf(err.Error())
      +	}
      +	r2.Write([]byte("Hi"))
      +
      +	cmd.Env = os.Environ()
      +	cmd.Env = append(cmd.Env, "LISTEN_FDS=2", "FIX_LISTEN_PID=1")
      +
      +	out, err := cmd.Output()
      +	if err != nil {
      +		println(string(out))
      +		t.Fatalf(err.Error())
      +	}
      +
      +	correctStringWrittenNet(t, r1, "Hello world")
      +	correctStringWrittenNet(t, r2, "Goodbye world")
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/activation/packetconns.go b/vendor/github.com/coreos/go-systemd/activation/packetconns.go
      new file mode 100644
      index 00000000..48b2ca02
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/activation/packetconns.go
      @@ -0,0 +1,37 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package activation
      +
      +import (
      +	"net"
      +)
      +
      +// PacketConns returns a slice containing a net.PacketConn for each matching socket type
      +// passed to this process.
      +//
      +// The order of the file descriptors is preserved in the returned slice.
      +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors
      +// corresponding with "udp, tcp, udp", then the slice would contain {net.PacketConn, nil, net.PacketConn}
      +func PacketConns(unsetEnv bool) ([]net.PacketConn, error) {
      +	files := Files(unsetEnv)
      +	conns := make([]net.PacketConn, len(files))
      +
      +	for i, f := range files {
      +		if pc, err := net.FilePacketConn(f); err == nil {
      +			conns[i] = pc
      +		}
      +	}
      +	return conns, nil
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/activation/packetconns_test.go b/vendor/github.com/coreos/go-systemd/activation/packetconns_test.go
      new file mode 100644
      index 00000000..8449756c
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/activation/packetconns_test.go
      @@ -0,0 +1,68 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package activation
      +
      +import (
      +	"net"
      +	"os"
      +	"os/exec"
      +	"testing"
      +)
      +
      +// TestActivation forks out a copy of activation.go example and reads back two
      +// strings from the pipes that are passed in.
      +func TestPacketConns(t *testing.T) {
      +	cmd := exec.Command("go", "run", "../examples/activation/udpconn.go")
      +
      +	u1, err := net.ListenUDP("udp", &net.UDPAddr{Port: 9999})
      +	if err != nil {
      +		t.Fatalf(err.Error())
      +	}
      +	u2, err := net.ListenUDP("udp", &net.UDPAddr{Port: 1234})
      +	if err != nil {
      +		t.Fatalf(err.Error())
      +	}
      +
      +	f1, _ := u1.File()
      +	f2, _ := u2.File()
      +
      +	cmd.ExtraFiles = []*os.File{
      +		f1,
      +		f2,
      +	}
      +
      +	r1, err := net.Dial("udp", "127.0.0.1:9999")
      +	if err != nil {
      +		t.Fatalf(err.Error())
      +	}
      +	r1.Write([]byte("Hi"))
      +
      +	r2, err := net.Dial("udp", "127.0.0.1:1234")
      +	if err != nil {
      +		t.Fatalf(err.Error())
      +	}
      +	r2.Write([]byte("Hi"))
      +
      +	cmd.Env = os.Environ()
      +	cmd.Env = append(cmd.Env, "LISTEN_FDS=2", "FIX_LISTEN_PID=1")
      +
      +	out, err := cmd.CombinedOutput()
      +	if err != nil {
      +		t.Fatalf("Cmd output '%s', err: '%s'\n", out, err)
      +	}
      +
      +	correctStringWrittenNet(t, r1, "Hello world")
      +	correctStringWrittenNet(t, r2, "Goodbye world")
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go
      new file mode 100644
      index 00000000..b92b1911
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go
      @@ -0,0 +1,31 @@
      +// Code forked from Docker project
      +package daemon
      +
      +import (
      +	"errors"
      +	"net"
      +	"os"
      +)
      +
      +var SdNotifyNoSocket = errors.New("No socket")
      +
      +// SdNotify sends a message to the init daemon. It is common to ignore the error.
      +func SdNotify(state string) error {
      +	socketAddr := &net.UnixAddr{
      +		Name: os.Getenv("NOTIFY_SOCKET"),
      +		Net:  "unixgram",
      +	}
      +
      +	if socketAddr.Name == "" {
      +		return SdNotifyNoSocket
      +	}
      +
      +	conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr)
      +	if err != nil {
      +		return err
      +	}
      +	defer conn.Close()
      +
      +	_, err = conn.Write([]byte(state))
      +	return err
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/dbus/dbus.go
      new file mode 100644
      index 00000000..94043347
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/dbus/dbus.go
      @@ -0,0 +1,198 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// Integration with the systemd D-Bus API.  See http://www.freedesktop.org/wiki/Software/systemd/dbus/
      +package dbus
      +
      +import (
      +	"fmt"
      +	"os"
      +	"strconv"
      +	"strings"
      +	"sync"
      +
      +	"github.com/godbus/dbus"
      +)
      +
      +const (
      +	alpha        = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`
      +	num          = `0123456789`
      +	alphanum     = alpha + num
      +	signalBuffer = 100
      +)
      +
      +// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped
      +func needsEscape(i int, b byte) bool {
      +	// Escape everything that is not a-z-A-Z-0-9
      +	// Also escape 0-9 if it's the first character
      +	return strings.IndexByte(alphanum, b) == -1 ||
      +		(i == 0 && strings.IndexByte(num, b) != -1)
      +}
      +
      +// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the
      +// rules that systemd uses for serializing special characters.
      +func PathBusEscape(path string) string {
      +	// Special case the empty string
      +	if len(path) == 0 {
      +		return "_"
      +	}
      +	n := []byte{}
      +	for i := 0; i < len(path); i++ {
      +		c := path[i]
      +		if needsEscape(i, c) {
      +			e := fmt.Sprintf("_%x", c)
      +			n = append(n, []byte(e)...)
      +		} else {
      +			n = append(n, c)
      +		}
      +	}
      +	return string(n)
      +}
      +
      +// Conn is a connection to systemd's dbus endpoint.
      +type Conn struct {
      +	// sysconn/sysobj are only used to call dbus methods
      +	sysconn *dbus.Conn
      +	sysobj  dbus.BusObject
      +
      +	// sigconn/sigobj are only used to receive dbus signals
      +	sigconn *dbus.Conn
      +	sigobj  dbus.BusObject
      +
      +	jobListener struct {
      +		jobs map[dbus.ObjectPath]chan<- string
      +		sync.Mutex
      +	}
      +	subscriber struct {
      +		updateCh chan<- *SubStateUpdate
      +		errCh    chan<- error
      +		sync.Mutex
      +		ignore      map[dbus.ObjectPath]int64
      +		cleanIgnore int64
      +	}
      +}
      +
      +// New establishes a connection to the system bus and authenticates.
      +// Callers should call Close() when done with the connection.
      +func New() (*Conn, error) {
      +	return newConnection(func() (*dbus.Conn, error) {
      +		return dbusAuthHelloConnection(dbus.SystemBusPrivate)
      +	})
      +}
      +
      +// NewUserConnection establishes a connection to the session bus and
      +// authenticates. This can be used to connect to systemd user instances.
      +// Callers should call Close() when done with the connection.
      +func NewUserConnection() (*Conn, error) {
      +	return newConnection(func() (*dbus.Conn, error) {
      +		return dbusAuthHelloConnection(dbus.SessionBusPrivate)
      +	})
      +}
      +
      +// NewSystemdConnection establishes a private, direct connection to systemd.
      +// This can be used for communicating with systemd without a dbus daemon.
      +// Callers should call Close() when done with the connection.
      +func NewSystemdConnection() (*Conn, error) {
      +	return newConnection(func() (*dbus.Conn, error) {
      +		// We skip Hello when talking directly to systemd.
      +		return dbusAuthConnection(func() (*dbus.Conn, error) {
      +			return dbus.Dial("unix:path=/run/systemd/private")
      +		})
      +	})
      +}
      +
      +// Close closes an established connection
      +func (c *Conn) Close() {
      +	c.sysconn.Close()
      +	c.sigconn.Close()
      +}
      +
      +func newConnection(createBus func() (*dbus.Conn, error)) (*Conn, error) {
      +	sysconn, err := createBus()
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	sigconn, err := createBus()
      +	if err != nil {
      +		sysconn.Close()
      +		return nil, err
      +	}
      +
      +	c := &Conn{
      +		sysconn: sysconn,
      +		sysobj:  systemdObject(sysconn),
      +		sigconn: sigconn,
      +		sigobj:  systemdObject(sigconn),
      +	}
      +
      +	c.subscriber.ignore = make(map[dbus.ObjectPath]int64)
      +	c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string)
      +
      +	// Setup the listeners on jobs so that we can get completions
      +	c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
      +		"type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'")
      +
      +	c.dispatch()
      +	return c, nil
      +}
      +
      +// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager
      +// interface. The value is returned in its string representation, as defined at
      +// https://developer.gnome.org/glib/unstable/gvariant-text.html
      +func (c *Conn) GetManagerProperty(prop string) (string, error) {
      +	variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop)
      +	if err != nil {
      +		return "", err
      +	}
      +	return variant.String(), nil
      +}
      +
      +func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
      +	conn, err := createBus()
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	// Only use EXTERNAL method, and hardcode the uid (not username)
      +	// to avoid a username lookup (which requires a dynamically linked
      +	// libc)
      +	methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))}
      +
      +	err = conn.Auth(methods)
      +	if err != nil {
      +		conn.Close()
      +		return nil, err
      +	}
      +
      +	return conn, nil
      +}
      +
      +func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
      +	conn, err := dbusAuthConnection(createBus)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	if err = conn.Hello(); err != nil {
      +		conn.Close()
      +		return nil, err
      +	}
      +
      +	return conn, nil
      +}
      +
      +func systemdObject(conn *dbus.Conn) dbus.BusObject {
      +	return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1"))
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/dbus/dbus_test.go b/vendor/github.com/coreos/go-systemd/dbus/dbus_test.go
      new file mode 100644
      index 00000000..3ea131e2
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/dbus/dbus_test.go
      @@ -0,0 +1,77 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package dbus
      +
      +import (
      +	"testing"
      +)
      +
      +func TestNeedsEscape(t *testing.T) {
      +	// Anything not 0-9a-zA-Z should always be escaped
      +	for want, vals := range map[bool][]byte{
      +		false: []byte{'a', 'b', 'z', 'A', 'Q', '1', '4', '9'},
      +		true:  []byte{'#', '%', '$', '!', '.', '_', '-', '%', '\\'},
      +	} {
      +		for i := 1; i < 10; i++ {
      +			for _, b := range vals {
      +				got := needsEscape(i, b)
      +				if got != want {
      +					t.Errorf("needsEscape(%d, %c) returned %t, want %t", i, b, got, want)
      +				}
      +			}
      +		}
      +	}
      +
      +	// 0-9 in position 0 should be escaped
      +	for want, vals := range map[bool][]byte{
      +		false: []byte{'A', 'a', 'e', 'x', 'Q', 'Z'},
      +		true:  []byte{'0', '4', '5', '9'},
      +	} {
      +		for _, b := range vals {
      +			got := needsEscape(0, b)
      +			if got != want {
      +				t.Errorf("needsEscape(0, %c) returned %t, want %t", b, got, want)
      +			}
      +		}
      +	}
      +
      +}
      +
      +func TestPathBusEscape(t *testing.T) {
      +	for in, want := range map[string]string{
      +		"":                   "_",
      +		"foo.service":        "foo_2eservice",
      +		"foobar":             "foobar",
      +		"woof@woof.service":  "woof_40woof_2eservice",
      +		"0123456":            "_30123456",
      +		"account_db.service": "account_5fdb_2eservice",
      +		"got-dashes":         "got_2ddashes",
      +	} {
      +		got := PathBusEscape(in)
      +		if got != want {
      +			t.Errorf("bad result for PathBusEscape(%s): got %q, want %q", in, got, want)
      +		}
      +	}
      +
      +}
      +
      +// TestNew ensures that New() works without errors.
      +func TestNew(t *testing.T) {
      +	_, err := New()
      +
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/dbus/methods.go b/vendor/github.com/coreos/go-systemd/dbus/methods.go
      new file mode 100644
      index 00000000..f9552a33
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/dbus/methods.go
      @@ -0,0 +1,442 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package dbus
      +
      +import (
      +	"errors"
      +	"path"
      +	"strconv"
      +
      +	"github.com/godbus/dbus"
      +)
      +
      +func (c *Conn) jobComplete(signal *dbus.Signal) {
      +	var id uint32
      +	var job dbus.ObjectPath
      +	var unit string
      +	var result string
      +	dbus.Store(signal.Body, &id, &job, &unit, &result)
      +	c.jobListener.Lock()
      +	out, ok := c.jobListener.jobs[job]
      +	if ok {
      +		out <- result
      +		delete(c.jobListener.jobs, job)
      +	}
      +	c.jobListener.Unlock()
      +}
      +
      +func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) {
      +	if ch != nil {
      +		c.jobListener.Lock()
      +		defer c.jobListener.Unlock()
      +	}
      +
      +	var p dbus.ObjectPath
      +	err := c.sysobj.Call(job, 0, args...).Store(&p)
      +	if err != nil {
      +		return 0, err
      +	}
      +
      +	if ch != nil {
      +		c.jobListener.jobs[p] = ch
      +	}
      +
      +	// ignore error since 0 is fine if conversion fails
      +	jobID, _ := strconv.Atoi(path.Base(string(p)))
      +
      +	return jobID, nil
      +}
      +
      +// StartUnit enqueues a start job and depending jobs, if any (unless otherwise
      +// specified by the mode string).
      +//
      +// Takes the unit to activate, plus a mode string. The mode needs to be one of
      +// replace, fail, isolate, ignore-dependencies, ignore-requirements. If
      +// "replace" the call will start the unit and its dependencies, possibly
      +// replacing already queued jobs that conflict with this. If "fail" the call
      +// will start the unit and its dependencies, but will fail if this would change
      +// an already queued job. If "isolate" the call will start the unit in question
      +// and terminate all units that aren't dependencies of it. If
      +// "ignore-dependencies" it will start a unit but ignore all its dependencies.
      +// If "ignore-requirements" it will start a unit but only ignore the
      +// requirement dependencies. It is not recommended to make use of the latter
      +// two options.
      +//
      +// If the provided channel is non-nil, a result string will be sent to it upon
      +// job completion: one of done, canceled, timeout, failed, dependency, skipped.
      +// done indicates successful execution of a job. canceled indicates that a job
      +// has been canceled  before it finished execution. timeout indicates that the
      +// job timeout was reached. failed indicates that the job failed. dependency
      +// indicates that a job this job has been depending on failed and the job hence
      +// has been removed too. skipped indicates that a job was skipped because it
      +// didn't apply to the units current state.
      +//
      +// If no error occurs, the ID of the underlying systemd job will be returned. There
      +// does exist the possibility for no error to be returned, but for the returned job
      +// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint
      +// should not be considered authoritative.
      +//
      +// If an error does occur, it will be returned to the user alongside a job ID of 0.
      +func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) {
      +	return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode)
      +}
      +
      +// StopUnit is similar to StartUnit but stops the specified unit rather
      +// than starting it.
      +func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) {
      +	return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode)
      +}
      +
      +// ReloadUnit reloads a unit.  Reloading is done only if the unit is already running and fails otherwise.
      +func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) {
      +	return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode)
      +}
      +
      +// RestartUnit restarts a service.  If a service is restarted that isn't
      +// running it will be started.
      +func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) {
      +	return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode)
      +}
      +
      +// TryRestartUnit is like RestartUnit, except that a service that isn't running
      +// is not affected by the restart.
      +func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
      +	return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
      +}
      +
      +// ReloadOrRestart attempts a reload if the unit supports it and use a restart
      +// otherwise.
      +func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) {
      +	return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
      +}
      +
      +// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try"
      +// flavored restart otherwise.
      +func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
      +	return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
      +}
      +
      +// StartTransientUnit() may be used to create and start a transient unit, which
      +// will be released as soon as it is not running or referenced anymore or the
      +// system is rebooted. name is the unit name including suffix, and must be
      +// unique. mode is the same as in StartUnit(), properties contains properties
      +// of the unit.
      +func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) {
      +	return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0))
      +}
      +
      +// KillUnit takes the unit name and a UNIX signal number to send.  All of the unit's
      +// processes are killed.
      +func (c *Conn) KillUnit(name string, signal int32) {
      +	c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store()
      +}
      +
      +// ResetFailedUnit resets the "failed" state of a specific unit.
      +func (c *Conn) ResetFailedUnit(name string) error {
      +	return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store()
      +}
      +
      +// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface
      +func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) {
      +	var err error
      +	var props map[string]dbus.Variant
      +
      +	path := unitPath(unit)
      +	if !path.IsValid() {
      +		return nil, errors.New("invalid unit name: " + unit)
      +	}
      +
      +	obj := c.sysconn.Object("org.freedesktop.systemd1", path)
      +	err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	out := make(map[string]interface{}, len(props))
      +	for k, v := range props {
      +		out[k] = v.Value()
      +	}
      +
      +	return out, nil
      +}
      +
      +// GetUnitProperties takes the unit name and returns all of its dbus object properties.
      +func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) {
      +	return c.getProperties(unit, "org.freedesktop.systemd1.Unit")
      +}
      +
      +func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) {
      +	var err error
      +	var prop dbus.Variant
      +
      +	path := unitPath(unit)
      +	if !path.IsValid() {
      +		return nil, errors.New("invalid unit name: " + unit)
      +	}
      +
      +	obj := c.sysconn.Object("org.freedesktop.systemd1", path)
      +	err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	return &Property{Name: propertyName, Value: prop}, nil
      +}
      +
      +func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) {
      +	return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName)
      +}
      +
      +// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type.
      +// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope
      +// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit
      +func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) {
      +	return c.getProperties(unit, "org.freedesktop.systemd1."+unitType)
      +}
      +
      +// SetUnitProperties() may be used to modify certain unit properties at runtime.
      +// Not all properties may be changed at runtime, but many resource management
      +// settings (primarily those in systemd.cgroup(5)) may. The changes are applied
      +// instantly, and stored on disk for future boots, unless runtime is true, in which
      +// case the settings only apply until the next reboot. name is the name of the unit
      +// to modify. properties are the settings to set, encoded as an array of property
      +// name and value pairs.
      +func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error {
      +	return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store()
      +}
      +
      +func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
      +	return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName)
      +}
      +
      +type UnitStatus struct {
      +	Name        string          // The primary unit name as string
      +	Description string          // The human readable description string
      +	LoadState   string          // The load state (i.e. whether the unit file has been loaded successfully)
      +	ActiveState string          // The active state (i.e. whether the unit is currently started or not)
      +	SubState    string          // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not)
      +	Followed    string          // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string.
      +	Path        dbus.ObjectPath // The unit object path
      +	JobId       uint32          // If there is a job queued for the job unit the numeric job id, 0 otherwise
      +	JobType     string          // The job type as string
      +	JobPath     dbus.ObjectPath // The job object path
      +}
      +
      +// ListUnits returns an array with all currently loaded units. Note that
      +// units may be known by multiple names at the same time, and hence there might
      +// be more unit names loaded than actual units behind them.
      +func (c *Conn) ListUnits() ([]UnitStatus, error) {
      +	result := make([][]interface{}, 0)
      +	err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store(&result)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	resultInterface := make([]interface{}, len(result))
      +	for i := range result {
      +		resultInterface[i] = result[i]
      +	}
      +
      +	status := make([]UnitStatus, len(result))
      +	statusInterface := make([]interface{}, len(status))
      +	for i := range status {
      +		statusInterface[i] = &status[i]
      +	}
      +
      +	err = dbus.Store(resultInterface, statusInterface...)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	return status, nil
      +}
      +
      +type UnitFile struct {
      +	Path string
      +	Type string
      +}
      +
      +// ListUnitFiles returns an array of all available units on disk.
      +func (c *Conn) ListUnitFiles() ([]UnitFile, error) {
      +	result := make([][]interface{}, 0)
      +	err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store(&result)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	resultInterface := make([]interface{}, len(result))
      +	for i := range result {
      +		resultInterface[i] = result[i]
      +	}
      +
      +	files := make([]UnitFile, len(result))
      +	fileInterface := make([]interface{}, len(files))
      +	for i := range files {
      +		fileInterface[i] = &files[i]
      +	}
      +
      +	err = dbus.Store(resultInterface, fileInterface...)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	return files, nil
      +}
      +
      +type LinkUnitFileChange EnableUnitFileChange
      +
      +// LinkUnitFiles() links unit files (that are located outside of the
      +// usual unit search paths) into the unit search path.
      +//
      +// It takes a list of absolute paths to unit files to link and two
      +// booleans. The first boolean controls whether the unit shall be
      +// enabled for runtime only (true, /run), or persistently (false,
      +// /etc).
      +// The second controls whether symlinks pointing to other units shall
      +// be replaced if necessary.
      +//
      +// This call returns a list of the changes made. The list consists of
      +// structures with three strings: the type of the change (one of symlink
      +// or unlink), the file name of the symlink and the destination of the
      +// symlink.
      +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) {
      +	result := make([][]interface{}, 0)
      +	err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	resultInterface := make([]interface{}, len(result))
      +	for i := range result {
      +		resultInterface[i] = result[i]
      +	}
      +
      +	changes := make([]LinkUnitFileChange, len(result))
      +	changesInterface := make([]interface{}, len(changes))
      +	for i := range changes {
      +		changesInterface[i] = &changes[i]
      +	}
      +
      +	err = dbus.Store(resultInterface, changesInterface...)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	return changes, nil
      +}
      +
      +// EnableUnitFiles() may be used to enable one or more units in the system (by
      +// creating symlinks to them in /etc or /run).
      +//
      +// It takes a list of unit files to enable (either just file names or full
      +// absolute paths if the unit files are residing outside the usual unit
      +// search paths), and two booleans: the first controls whether the unit shall
      +// be enabled for runtime only (true, /run), or persistently (false, /etc).
      +// The second one controls whether symlinks pointing to other units shall
      +// be replaced if necessary.
      +//
      +// This call returns one boolean and an array with the changes made. The
      +// boolean signals whether the unit files contained any enablement
      +// information (i.e. an [Install]) section. The changes list consists of
      +// structures with three strings: the type of the change (one of symlink
      +// or unlink), the file name of the symlink and the destination of the
      +// symlink.
      +func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
      +	var carries_install_info bool
      +
      +	result := make([][]interface{}, 0)
      +	err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result)
      +	if err != nil {
      +		return false, nil, err
      +	}
      +
      +	resultInterface := make([]interface{}, len(result))
      +	for i := range result {
      +		resultInterface[i] = result[i]
      +	}
      +
      +	changes := make([]EnableUnitFileChange, len(result))
      +	changesInterface := make([]interface{}, len(changes))
      +	for i := range changes {
      +		changesInterface[i] = &changes[i]
      +	}
      +
      +	err = dbus.Store(resultInterface, changesInterface...)
      +	if err != nil {
      +		return false, nil, err
      +	}
      +
      +	return carries_install_info, changes, nil
      +}
      +
      +type EnableUnitFileChange struct {
      +	Type        string // Type of the change (one of symlink or unlink)
      +	Filename    string // File name of the symlink
      +	Destination string // Destination of the symlink
      +}
      +
      +// DisableUnitFiles() may be used to disable one or more units in the system (by
      +// removing symlinks to them from /etc or /run).
      +//
      +// It takes a list of unit files to disable (either just file names or full
      +// absolute paths if the unit files are residing outside the usual unit
      +// search paths), and one boolean: whether the unit was enabled for runtime
      +// only (true, /run), or persistently (false, /etc).
      +//
      +// This call returns an array with the changes made. The changes list
      +// consists of structures with three strings: the type of the change (one of
      +// symlink or unlink), the file name of the symlink and the destination of the
      +// symlink.
      +func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) {
      +	result := make([][]interface{}, 0)
      +	err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	resultInterface := make([]interface{}, len(result))
      +	for i := range result {
      +		resultInterface[i] = result[i]
      +	}
      +
      +	changes := make([]DisableUnitFileChange, len(result))
      +	changesInterface := make([]interface{}, len(changes))
      +	for i := range changes {
      +		changesInterface[i] = &changes[i]
      +	}
      +
      +	err = dbus.Store(resultInterface, changesInterface...)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	return changes, nil
      +}
      +
      +type DisableUnitFileChange struct {
      +	Type        string // Type of the change (one of symlink or unlink)
      +	Filename    string // File name of the symlink
      +	Destination string // Destination of the symlink
      +}
      +
      +// Reload instructs systemd to scan for and reload unit files. This is
      +// equivalent to a 'systemctl daemon-reload'.
      +func (c *Conn) Reload() error {
      +	return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store()
      +}
      +
      +func unitPath(name string) dbus.ObjectPath {
      +	return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name))
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/dbus/methods_test.go b/vendor/github.com/coreos/go-systemd/dbus/methods_test.go
      new file mode 100644
      index 00000000..c9f9ccde
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/dbus/methods_test.go
      @@ -0,0 +1,345 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package dbus
      +
      +import (
      +	"fmt"
      +	"math/rand"
      +	"os"
      +	"path/filepath"
      +	"reflect"
      +	"testing"
      +
      +	"github.com/godbus/dbus"
      +)
      +
      +func setupConn(t *testing.T) *Conn {
      +	conn, err := New()
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	return conn
      +}
      +
      +func findFixture(target string, t *testing.T) string {
      +	abs, err := filepath.Abs("../fixtures/" + target)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	return abs
      +}
      +
      +func setupUnit(target string, conn *Conn, t *testing.T) {
      +	// Blindly stop the unit in case it is running
      +	conn.StopUnit(target, "replace", nil)
      +
      +	// Blindly remove the symlink in case it exists
      +	targetRun := filepath.Join("/run/systemd/system/", target)
      +	os.Remove(targetRun)
      +}
      +
      +func linkUnit(target string, conn *Conn, t *testing.T) {
      +	abs := findFixture(target, t)
      +	fixture := []string{abs}
      +
      +	changes, err := conn.LinkUnitFiles(fixture, true, true)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	if len(changes) < 1 {
      +		t.Fatalf("Expected one change, got %v", changes)
      +	}
      +
      +	runPath := filepath.Join("/run/systemd/system/", target)
      +	if changes[0].Filename != runPath {
      +		t.Fatal("Unexpected target filename")
      +	}
      +}
      +
      +// Ensure that basic unit starting and stopping works.
      +func TestStartStopUnit(t *testing.T) {
      +	target := "start-stop.service"
      +	conn := setupConn(t)
      +
      +	setupUnit(target, conn, t)
      +	linkUnit(target, conn, t)
      +
      +	// 2. Start the unit
      +	reschan := make(chan string)
      +	_, err := conn.StartUnit(target, "replace", reschan)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	job := <-reschan
      +	if job != "done" {
      +		t.Fatal("Job is not done:", job)
      +	}
      +
      +	units, err := conn.ListUnits()
      +
      +	var unit *UnitStatus
      +	for _, u := range units {
      +		if u.Name == target {
      +			unit = &u
      +		}
      +	}
      +
      +	if unit == nil {
      +		t.Fatalf("Test unit not found in list")
      +	}
      +
      +	if unit.ActiveState != "active" {
      +		t.Fatalf("Test unit not active")
      +	}
      +
      +	// 3. Stop the unit
      +	_, err = conn.StopUnit(target, "replace", reschan)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	// wait for StopUnit job to complete
      +	<-reschan
      +
      +	units, err = conn.ListUnits()
      +
      +	unit = nil
      +	for _, u := range units {
      +		if u.Name == target {
      +			unit = &u
      +		}
      +	}
      +
      +	if unit != nil {
      +		t.Fatalf("Test unit found in list, should be stopped")
      +	}
      +}
      +
      +// Enables a unit and then immediately tears it down
      +func TestEnableDisableUnit(t *testing.T) {
      +	target := "enable-disable.service"
      +	conn := setupConn(t)
      +
      +	setupUnit(target, conn, t)
      +	abs := findFixture(target, t)
      +	runPath := filepath.Join("/run/systemd/system/", target)
      +
      +	// 1. Enable the unit
      +	install, changes, err := conn.EnableUnitFiles([]string{abs}, true, true)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	if install != false {
      +		t.Fatal("Install was true")
      +	}
      +
      +	if len(changes) < 1 {
      +		t.Fatalf("Expected one change, got %v", changes)
      +	}
      +
      +	if changes[0].Filename != runPath {
      +		t.Fatal("Unexpected target filename")
      +	}
      +
      +	// 2. Disable the unit
      +	dChanges, err := conn.DisableUnitFiles([]string{abs}, true)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	if len(dChanges) != 1 {
      +		t.Fatalf("Changes should include the path, %v", dChanges)
      +	}
      +	if dChanges[0].Filename != runPath {
      +		t.Fatalf("Change should include correct filename, %+v", dChanges[0])
      +	}
      +	if dChanges[0].Destination != "" {
      +		t.Fatalf("Change destination should be empty, %+v", dChanges[0])
      +	}
      +}
      +
      +// TestGetUnitProperties reads the `-.mount` which should exist on all systemd
      +// systems and ensures that one of its properties is valid.
      +func TestGetUnitProperties(t *testing.T) {
      +	conn := setupConn(t)
      +
      +	unit := "-.mount"
      +
      +	info, err := conn.GetUnitProperties(unit)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	names := info["Wants"].([]string)
      +
      +	if len(names) < 1 {
      +		t.Fatal("/ is unwanted")
      +	}
      +
      +	if names[0] != "system.slice" {
      +		t.Fatal("unexpected wants for /")
      +	}
      +
      +	prop, err := conn.GetUnitProperty(unit, "Wants")
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	if prop.Name != "Wants" {
      +		t.Fatal("unexpected property name")
      +	}
      +
      +	val := prop.Value.Value().([]string)
      +	if !reflect.DeepEqual(val, names) {
      +		t.Fatal("unexpected property value")
      +	}
      +}
      +
      +// TestGetUnitPropertiesRejectsInvalidName attempts to get the properties for a
      +// unit with an invalid name. This test should be run with --test.timeout set,
      +// as a fail will manifest as GetUnitProperties hanging indefinitely.
      +func TestGetUnitPropertiesRejectsInvalidName(t *testing.T) {
      +	conn := setupConn(t)
      +
      +	unit := "//invalid#$^/"
      +
      +	_, err := conn.GetUnitProperties(unit)
      +	if err == nil {
      +		t.Fatal("Expected an error, got nil")
      +	}
      +
      +	_, err = conn.GetUnitProperty(unit, "Wants")
      +	if err == nil {
      +		t.Fatal("Expected an error, got nil")
      +	}
      +}
      +
      +// TestSetUnitProperties changes a cgroup setting on the `tmp.mount`
      +// which should exist on all systemd systems and ensures that the
      +// property was set.
      +func TestSetUnitProperties(t *testing.T) {
      +	conn := setupConn(t)
      +
      +	unit := "tmp.mount"
      +
      +	if err := conn.SetUnitProperties(unit, true, Property{"CPUShares", dbus.MakeVariant(uint64(1023))}); err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	info, err := conn.GetUnitTypeProperties(unit, "Mount")
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	value := info["CPUShares"].(uint64)
      +	if value != 1023 {
      +		t.Fatal("CPUShares of unit is not 1023:", value)
      +	}
      +}
      +
      +// Ensure that basic transient unit starting and stopping works.
      +func TestStartStopTransientUnit(t *testing.T) {
      +	conn := setupConn(t)
      +
      +	props := []Property{
      +		PropExecStart([]string{"/bin/sleep", "400"}, false),
      +	}
      +	target := fmt.Sprintf("testing-transient-%d.service", rand.Int())
      +
      +	// Start the unit
      +	reschan := make(chan string)
      +	_, err := conn.StartTransientUnit(target, "replace", props, reschan)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	job := <-reschan
      +	if job != "done" {
      +		t.Fatal("Job is not done:", job)
      +	}
      +
      +	units, err := conn.ListUnits()
      +
      +	var unit *UnitStatus
      +	for _, u := range units {
      +		if u.Name == target {
      +			unit = &u
      +		}
      +	}
      +
      +	if unit == nil {
      +		t.Fatalf("Test unit not found in list")
      +	}
      +
      +	if unit.ActiveState != "active" {
      +		t.Fatalf("Test unit not active")
      +	}
      +
      +	// 3. Stop the unit
      +	_, err = conn.StopUnit(target, "replace", reschan)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	// wait for StopUnit job to complete
      +	<-reschan
      +
      +	units, err = conn.ListUnits()
      +
      +	unit = nil
      +	for _, u := range units {
      +		if u.Name == target {
      +			unit = &u
      +		}
      +	}
      +
      +	if unit != nil {
      +		t.Fatalf("Test unit found in list, should be stopped")
      +	}
      +}
      +
      +func TestConnJobListener(t *testing.T) {
      +	target := "start-stop.service"
      +	conn := setupConn(t)
      +
      +	setupUnit(target, conn, t)
      +	linkUnit(target, conn, t)
      +
      +	jobSize := len(conn.jobListener.jobs)
      +
      +	reschan := make(chan string)
      +	_, err := conn.StartUnit(target, "replace", reschan)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	<-reschan
      +
      +	_, err = conn.StopUnit(target, "replace", reschan)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	<-reschan
      +
      +	currentJobSize := len(conn.jobListener.jobs)
      +	if jobSize != currentJobSize {
      +		t.Fatal("JobListener jobs leaked")
      +	}
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/dbus/properties.go b/vendor/github.com/coreos/go-systemd/dbus/properties.go
      new file mode 100644
      index 00000000..75200115
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/dbus/properties.go
      @@ -0,0 +1,218 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package dbus
      +
      +import (
      +	"github.com/godbus/dbus"
      +)
      +
      +// From the systemd docs:
      +//
      +// The properties array of StartTransientUnit() may take many of the settings
      +// that may also be configured in unit files. Not all parameters are currently
      +// accepted though, but we plan to cover more properties with future release.
      +// Currently you may set the Description, Slice and all dependency types of
      +// units, as well as RemainAfterExit, ExecStart for service units,
      +// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares,
      +// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth,
      +// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit,
      +// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map
      +// directly to their counterparts in unit files and as normal D-Bus object
      +// properties. The exception here is the PIDs field of scope units which is
      +// used for construction of the scope only and specifies the initial PIDs to
      +// add to the scope object.
      +
      +type Property struct {
      +	Name  string
      +	Value dbus.Variant
      +}
      +
      +type PropertyCollection struct {
      +	Name       string
      +	Properties []Property
      +}
      +
      +type execStart struct {
      +	Path             string   // the binary path to execute
      +	Args             []string // an array with all arguments to pass to the executed command, starting with argument 0
      +	UncleanIsFailure bool     // a boolean whether it should be considered a failure if the process exits uncleanly
      +}
      +
      +// PropExecStart sets the ExecStart service property.  The first argument is a
      +// slice with the binary path to execute followed by the arguments to pass to
      +// the executed command. See
      +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
      +func PropExecStart(command []string, uncleanIsFailure bool) Property {
      +	execStarts := []execStart{
      +		execStart{
      +			Path:             command[0],
      +			Args:             command,
      +			UncleanIsFailure: uncleanIsFailure,
      +		},
      +	}
      +
      +	return Property{
      +		Name:  "ExecStart",
      +		Value: dbus.MakeVariant(execStarts),
      +	}
      +}
      +
      +// PropRemainAfterExit sets the RemainAfterExit service property. See
      +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit=
      +func PropRemainAfterExit(b bool) Property {
      +	return Property{
      +		Name:  "RemainAfterExit",
      +		Value: dbus.MakeVariant(b),
      +	}
      +}
      +
      +// PropDescription sets the Description unit property. See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description=
      +func PropDescription(desc string) Property {
      +	return Property{
      +		Name:  "Description",
      +		Value: dbus.MakeVariant(desc),
      +	}
      +}
      +
      +func propDependency(name string, units []string) Property {
      +	return Property{
      +		Name:  name,
      +		Value: dbus.MakeVariant(units),
      +	}
      +}
      +
      +// PropRequires sets the Requires unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires=
      +func PropRequires(units ...string) Property {
      +	return propDependency("Requires", units)
      +}
      +
      +// PropRequiresOverridable sets the RequiresOverridable unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable=
      +func PropRequiresOverridable(units ...string) Property {
      +	return propDependency("RequiresOverridable", units)
      +}
      +
      +// PropRequisite sets the Requisite unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite=
      +func PropRequisite(units ...string) Property {
      +	return propDependency("Requisite", units)
      +}
      +
      +// PropRequisiteOverridable sets the RequisiteOverridable unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable=
      +func PropRequisiteOverridable(units ...string) Property {
      +	return propDependency("RequisiteOverridable", units)
      +}
      +
      +// PropWants sets the Wants unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants=
      +func PropWants(units ...string) Property {
      +	return propDependency("Wants", units)
      +}
      +
      +// PropBindsTo sets the BindsTo unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo=
      +func PropBindsTo(units ...string) Property {
      +	return propDependency("BindsTo", units)
      +}
      +
      +// PropRequiredBy sets the RequiredBy unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy=
      +func PropRequiredBy(units ...string) Property {
      +	return propDependency("RequiredBy", units)
      +}
      +
      +// PropRequiredByOverridable sets the RequiredByOverridable unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable=
      +func PropRequiredByOverridable(units ...string) Property {
      +	return propDependency("RequiredByOverridable", units)
      +}
      +
      +// PropWantedBy sets the WantedBy unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy=
      +func PropWantedBy(units ...string) Property {
      +	return propDependency("WantedBy", units)
      +}
      +
      +// PropBoundBy sets the BoundBy unit property.  See
      +// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy=
      +func PropBoundBy(units ...string) Property {
      +	return propDependency("BoundBy", units)
      +}
      +
      +// PropConflicts sets the Conflicts unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts=
      +func PropConflicts(units ...string) Property {
      +	return propDependency("Conflicts", units)
      +}
      +
      +// PropConflictedBy sets the ConflictedBy unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy=
      +func PropConflictedBy(units ...string) Property {
      +	return propDependency("ConflictedBy", units)
      +}
      +
      +// PropBefore sets the Before unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=
      +func PropBefore(units ...string) Property {
      +	return propDependency("Before", units)
      +}
      +
      +// PropAfter sets the After unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After=
      +func PropAfter(units ...string) Property {
      +	return propDependency("After", units)
      +}
      +
      +// PropOnFailure sets the OnFailure unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure=
      +func PropOnFailure(units ...string) Property {
      +	return propDependency("OnFailure", units)
      +}
      +
      +// PropTriggers sets the Triggers unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers=
      +func PropTriggers(units ...string) Property {
      +	return propDependency("Triggers", units)
      +}
      +
      +// PropTriggeredBy sets the TriggeredBy unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy=
      +func PropTriggeredBy(units ...string) Property {
      +	return propDependency("TriggeredBy", units)
      +}
      +
      +// PropPropagatesReloadTo sets the PropagatesReloadTo unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo=
      +func PropPropagatesReloadTo(units ...string) Property {
      +	return propDependency("PropagatesReloadTo", units)
      +}
      +
      +// PropRequiresMountsFor sets the RequiresMountsFor unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor=
      +func PropRequiresMountsFor(units ...string) Property {
      +	return propDependency("RequiresMountsFor", units)
      +}
      +
      +// PropSlice sets the Slice unit property.  See
      +// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice=
      +func PropSlice(slice string) Property {
      +	return Property{
      +		Name:  "Slice",
      +		Value: dbus.MakeVariant(slice),
      +	}
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/dbus/set.go b/vendor/github.com/coreos/go-systemd/dbus/set.go
      new file mode 100644
      index 00000000..f92e6fbe
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/dbus/set.go
      @@ -0,0 +1,47 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package dbus
      +
      +type set struct {
      +	data map[string]bool
      +}
      +
      +func (s *set) Add(value string) {
      +	s.data[value] = true
      +}
      +
      +func (s *set) Remove(value string) {
      +	delete(s.data, value)
      +}
      +
      +func (s *set) Contains(value string) (exists bool) {
      +	_, exists = s.data[value]
      +	return
      +}
      +
      +func (s *set) Length() int {
      +	return len(s.data)
      +}
      +
      +func (s *set) Values() (values []string) {
      +	for val, _ := range s.data {
      +		values = append(values, val)
      +	}
      +	return
      +}
      +
      +func newSet() *set {
      +	return &set{make(map[string]bool)}
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/dbus/set_test.go b/vendor/github.com/coreos/go-systemd/dbus/set_test.go
      new file mode 100644
      index 00000000..2f04096f
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/dbus/set_test.go
      @@ -0,0 +1,53 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package dbus
      +
      +import (
      +	"testing"
      +)
      +
      +// TestBasicSetActions asserts that Add & Remove behavior is correct
      +func TestBasicSetActions(t *testing.T) {
      +	s := newSet()
      +
      +	if s.Contains("foo") {
      +		t.Fatal("set should not contain 'foo'")
      +	}
      +
      +	s.Add("foo")
      +
      +	if !s.Contains("foo") {
      +		t.Fatal("set should contain 'foo'")
      +	}
      +
      +	v := s.Values()
      +	if len(v) != 1 {
      +		t.Fatal("set.Values did not report correct number of values")
      +	}
      +	if v[0] != "foo" {
      +		t.Fatal("set.Values did not report value")
      +	}
      +
      +	s.Remove("foo")
      +
      +	if s.Contains("foo") {
      +		t.Fatal("set should not contain 'foo'")
      +	}
      +
      +	v = s.Values()
      +	if len(v) != 0 {
      +		t.Fatal("set.Values did not report correct number of values")
      +	}
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/dbus/subscription.go
      new file mode 100644
      index 00000000..99645144
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription.go
      @@ -0,0 +1,250 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package dbus
      +
      +import (
      +	"errors"
      +	"time"
      +
      +	"github.com/godbus/dbus"
      +)
      +
      +const (
      +	cleanIgnoreInterval = int64(10 * time.Second)
      +	ignoreInterval      = int64(30 * time.Millisecond)
      +)
      +
      +// Subscribe sets up this connection to subscribe to all systemd dbus events.
      +// This is required before calling SubscribeUnits. When the connection closes
      +// systemd will automatically stop sending signals so there is no need to
      +// explicitly call Unsubscribe().
      +func (c *Conn) Subscribe() error {
      +	c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
      +		"type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'")
      +	c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
      +		"type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'")
      +
      +	err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store()
      +	if err != nil {
      +		return err
      +	}
      +
      +	return nil
      +}
      +
      +// Unsubscribe this connection from systemd dbus events.
      +func (c *Conn) Unsubscribe() error {
      +	err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store()
      +	if err != nil {
      +		return err
      +	}
      +
      +	return nil
      +}
      +
      +func (c *Conn) dispatch() {
      +	ch := make(chan *dbus.Signal, signalBuffer)
      +
      +	c.sigconn.Signal(ch)
      +
      +	go func() {
      +		for {
      +			signal, ok := <-ch
      +			if !ok {
      +				return
      +			}
      +
      +			if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" {
      +				c.jobComplete(signal)
      +			}
      +
      +			if c.subscriber.updateCh == nil {
      +				continue
      +			}
      +
      +			var unitPath dbus.ObjectPath
      +			switch signal.Name {
      +			case "org.freedesktop.systemd1.Manager.JobRemoved":
      +				unitName := signal.Body[2].(string)
      +				c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath)
      +			case "org.freedesktop.systemd1.Manager.UnitNew":
      +				unitPath = signal.Body[1].(dbus.ObjectPath)
      +			case "org.freedesktop.DBus.Properties.PropertiesChanged":
      +				if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" {
      +					unitPath = signal.Path
      +				}
      +			}
      +
      +			if unitPath == dbus.ObjectPath("") {
      +				continue
      +			}
      +
      +			c.sendSubStateUpdate(unitPath)
      +		}
      +	}()
      +}
      +
      +// Returns two unbuffered channels which will receive all changed units every
      +// interval.  Deleted units are sent as nil.
      +func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) {
      +	return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil)
      +}
      +
      +// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
      +// size of the channels, the comparison function for detecting changes and a filter
      +// function for cutting down on the noise that your channel receives.
      +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
      +	old := make(map[string]*UnitStatus)
      +	statusChan := make(chan map[string]*UnitStatus, buffer)
      +	errChan := make(chan error, buffer)
      +
      +	go func() {
      +		for {
      +			timerChan := time.After(interval)
      +
      +			units, err := c.ListUnits()
      +			if err == nil {
      +				cur := make(map[string]*UnitStatus)
      +				for i := range units {
      +					if filterUnit != nil && filterUnit(units[i].Name) {
      +						continue
      +					}
      +					cur[units[i].Name] = &units[i]
      +				}
      +
      +				// add all new or changed units
      +				changed := make(map[string]*UnitStatus)
      +				for n, u := range cur {
      +					if oldU, ok := old[n]; !ok || isChanged(oldU, u) {
      +						changed[n] = u
      +					}
      +					delete(old, n)
      +				}
      +
      +				// add all deleted units
      +				for oldN := range old {
      +					changed[oldN] = nil
      +				}
      +
      +				old = cur
      +
      +				if len(changed) != 0 {
      +					statusChan <- changed
      +				}
      +			} else {
      +				errChan <- err
      +			}
      +
      +			<-timerChan
      +		}
      +	}()
      +
      +	return statusChan, errChan
      +}
      +
      +type SubStateUpdate struct {
      +	UnitName string
      +	SubState string
      +}
      +
      +// SetSubStateSubscriber writes to updateCh when any unit's substate changes.
      +// Although this writes to updateCh on every state change, the reported state
      +// may be more recent than the change that generated it (due to an unavoidable
      +// race in the systemd dbus interface).  That is, this method provides a good
      +// way to keep a current view of all units' states, but is not guaranteed to
      +// show every state transition they go through.  Furthermore, state changes
      +// will only be written to the channel with non-blocking writes.  If updateCh
      +// is full, it attempts to write an error to errCh; if errCh is full, the error
      +// passes silently.
      +func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) {
      +	c.subscriber.Lock()
      +	defer c.subscriber.Unlock()
      +	c.subscriber.updateCh = updateCh
      +	c.subscriber.errCh = errCh
      +}
      +
      +func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) {
      +	c.subscriber.Lock()
      +	defer c.subscriber.Unlock()
      +
      +	if c.shouldIgnore(path) {
      +		return
      +	}
      +
      +	info, err := c.GetUnitProperties(string(path))
      +	if err != nil {
      +		select {
      +		case c.subscriber.errCh <- err:
      +		default:
      +		}
      +	}
      +
      +	name := info["Id"].(string)
      +	substate := info["SubState"].(string)
      +
      +	update := &SubStateUpdate{name, substate}
      +	select {
      +	case c.subscriber.updateCh <- update:
      +	default:
      +		select {
      +		case c.subscriber.errCh <- errors.New("update channel full!"):
      +		default:
      +		}
      +	}
      +
      +	c.updateIgnore(path, info)
      +}
      +
      +// The ignore functions work around a wart in the systemd dbus interface.
      +// Requesting the properties of an unloaded unit will cause systemd to send a
      +// pair of UnitNew/UnitRemoved signals.  Because we need to get a unit's
      +// properties on UnitNew (as that's the only indication of a new unit coming up
      +// for the first time), we would enter an infinite loop if we did not attempt
      +// to detect and ignore these spurious signals.  The signal themselves are
      +// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an
      +// unloaded unit's signals for a short time after requesting its properties.
      +// This means that we will miss e.g. a transient unit being restarted
      +// *immediately* upon failure and also a transient unit being started
      +// immediately after requesting its status (with systemctl status, for example,
      +// because this causes a UnitNew signal to be sent which then causes us to fetch
      +// the properties).
      +
      +func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool {
      +	t, ok := c.subscriber.ignore[path]
      +	return ok && t >= time.Now().UnixNano()
      +}
      +
      +func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) {
      +	c.cleanIgnore()
      +
      +	// unit is unloaded - it will trigger bad systemd dbus behavior
      +	if info["LoadState"].(string) == "not-found" {
      +		c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval
      +	}
      +}
      +
      +// without this, ignore would grow unboundedly over time
      +func (c *Conn) cleanIgnore() {
      +	now := time.Now().UnixNano()
      +	if c.subscriber.cleanIgnore < now {
      +		c.subscriber.cleanIgnore = now + cleanIgnoreInterval
      +
      +		for p, t := range c.subscriber.ignore {
      +			if t < now {
      +				delete(c.subscriber.ignore, p)
      +			}
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go
      new file mode 100644
      index 00000000..5b408d58
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go
      @@ -0,0 +1,57 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package dbus
      +
      +import (
      +	"time"
      +)
      +
      +// SubscriptionSet returns a subscription set which is like conn.Subscribe but
      +// can filter to only return events for a set of units.
      +type SubscriptionSet struct {
      +	*set
      +	conn *Conn
      +}
      +
      +func (s *SubscriptionSet) filter(unit string) bool {
      +	return !s.Contains(unit)
      +}
      +
      +// Subscribe starts listening for dbus events for all of the units in the set.
      +// Returns channels identical to conn.SubscribeUnits.
      +func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) {
      +	// TODO: Make fully evented by using systemd 209 with properties changed values
      +	return s.conn.SubscribeUnitsCustom(time.Second, 0,
      +		mismatchUnitStatus,
      +		func(unit string) bool { return s.filter(unit) },
      +	)
      +}
      +
      +// NewSubscriptionSet returns a new subscription set.
      +func (conn *Conn) NewSubscriptionSet() *SubscriptionSet {
      +	return &SubscriptionSet{newSet(), conn}
      +}
      +
      +// mismatchUnitStatus returns true if the provided UnitStatus objects
      +// are not equivalent. false is returned if the objects are equivalent.
      +// Only the Name, Description and state-related fields are used in
      +// the comparison.
      +func mismatchUnitStatus(u1, u2 *UnitStatus) bool {
      +	return u1.Name != u2.Name ||
      +		u1.Description != u2.Description ||
      +		u1.LoadState != u2.LoadState ||
      +		u1.ActiveState != u2.ActiveState ||
      +		u1.SubState != u2.SubState
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_set_test.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_set_test.go
      new file mode 100644
      index 00000000..53f75dfb
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription_set_test.go
      @@ -0,0 +1,82 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package dbus
      +
      +import (
      +	"testing"
      +	"time"
      +)
      +
      +// TestSubscribeUnit exercises the basics of subscription of a particular unit.
      +func TestSubscriptionSetUnit(t *testing.T) {
      +	target := "subscribe-events-set.service"
      +
      +	conn, err := New()
      +
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	err = conn.Subscribe()
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	subSet := conn.NewSubscriptionSet()
      +	evChan, errChan := subSet.Subscribe()
      +
      +	subSet.Add(target)
      +	setupUnit(target, conn, t)
      +	linkUnit(target, conn, t)
      +
      +	reschan := make(chan string)
      +	_, err = conn.StartUnit(target, "replace", reschan)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	job := <-reschan
      +	if job != "done" {
      +		t.Fatal("Couldn't start", target)
      +	}
      +
      +	timeout := make(chan bool, 1)
      +	go func() {
      +		time.Sleep(3 * time.Second)
      +		close(timeout)
      +	}()
      +
      +	for {
      +		select {
      +		case changes := <-evChan:
      +			tCh, ok := changes[target]
      +
      +			if !ok {
      +				t.Fatal("Unexpected event:", changes)
      +			}
      +
      +			if tCh.ActiveState == "active" && tCh.Name == target {
      +				goto success
      +			}
      +		case err = <-errChan:
      +			t.Fatal(err)
      +		case <-timeout:
      +			t.Fatal("Reached timeout")
      +		}
      +	}
      +
      +success:
      +	return
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_test.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_test.go
      new file mode 100644
      index 00000000..e50fc6f9
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription_test.go
      @@ -0,0 +1,105 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package dbus
      +
      +import (
      +	"testing"
      +	"time"
      +)
      +
      +// TestSubscribe exercises the basics of subscription
      +func TestSubscribe(t *testing.T) {
      +	conn, err := New()
      +
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	err = conn.Subscribe()
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	err = conn.Unsubscribe()
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +}
      +
      +// TestSubscribeUnit exercises the basics of subscription of a particular unit.
      +func TestSubscribeUnit(t *testing.T) {
      +	target := "subscribe-events.service"
      +
      +	conn, err := New()
      +
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	err = conn.Subscribe()
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	err = conn.Unsubscribe()
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	evChan, errChan := conn.SubscribeUnits(time.Second)
      +
      +	setupUnit(target, conn, t)
      +	linkUnit(target, conn, t)
      +
      +	reschan := make(chan string)
      +	_, err = conn.StartUnit(target, "replace", reschan)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	job := <-reschan
      +	if job != "done" {
      +		t.Fatal("Couldn't start", target)
      +	}
      +
      +	timeout := make(chan bool, 1)
      +	go func() {
      +		time.Sleep(3 * time.Second)
      +		close(timeout)
      +	}()
      +
      +	for {
      +		select {
      +		case changes := <-evChan:
      +			tCh, ok := changes[target]
      +
      +			// Just continue until we see our event.
      +			if !ok {
      +				continue
      +			}
      +
      +			if tCh.ActiveState == "active" && tCh.Name == target {
      +				goto success
      +			}
      +		case err = <-errChan:
      +			t.Fatal(err)
      +		case <-timeout:
      +			t.Fatal("Reached timeout")
      +		}
      +	}
      +
      +success:
      +	return
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/examples/activation/activation.go b/vendor/github.com/coreos/go-systemd/examples/activation/activation.go
      new file mode 100644
      index 00000000..058a84b7
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/examples/activation/activation.go
      @@ -0,0 +1,58 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// Activation example used by the activation unit tests.
      +package main
      +
      +import (
      +	"fmt"
      +	"os"
      +
      +	"github.com/coreos/go-systemd/activation"
      +)
      +
      +func fixListenPid() {
      +	if os.Getenv("FIX_LISTEN_PID") != "" {
      +		// HACK: real systemd would set LISTEN_PID before exec'ing but
      +		// this is too difficult in golang for the purpose of a test.
      +		// Do not do this in real code.
      +		os.Setenv("LISTEN_PID", fmt.Sprintf("%d", os.Getpid()))
      +	}
      +}
      +
      +func main() {
      +	fixListenPid()
      +
      +	files := activation.Files(false)
      +
      +	if len(files) == 0 {
      +		panic("No files")
      +	}
      +
      +	if os.Getenv("LISTEN_PID") == "" || os.Getenv("LISTEN_FDS") == "" {
      +		panic("Should not unset envs")
      +	}
      +
      +	files = activation.Files(true)
      +
      +	if os.Getenv("LISTEN_PID") != "" || os.Getenv("LISTEN_FDS") != "" {
      +		panic("Can not unset envs")
      +	}
      +
      +	// Write out the expected strings to the two pipes
      +	files[0].Write([]byte("Hello world"))
      +	files[1].Write([]byte("Goodbye world"))
      +
      +	return
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/examples/activation/httpserver/README.md b/vendor/github.com/coreos/go-systemd/examples/activation/httpserver/README.md
      new file mode 100644
      index 00000000..a350cca5
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/examples/activation/httpserver/README.md
      @@ -0,0 +1,19 @@
      +## socket activated http server
      +
      +This is a simple example of using socket activation with systemd to serve a
      +simple HTTP server on http://127.0.0.1:8076
      +
      +To try it out `go get` the httpserver and run it under the systemd-activate helper
      +
      +```
      +export GOPATH=`pwd`
      +go get github.com/coreos/go-systemd/examples/activation/httpserver
      +sudo /usr/lib/systemd/systemd-activate -l 127.0.0.1:8076 ./bin/httpserver
      +```
      +
      +Then curl the URL and you will notice that it starts up:
      +
      +```
      +curl 127.0.0.1:8076
      +hello socket activated world!
      +```
      diff --git a/vendor/github.com/coreos/go-systemd/examples/activation/httpserver/hello.service b/vendor/github.com/coreos/go-systemd/examples/activation/httpserver/hello.service
      new file mode 100644
      index 00000000..c8dea0f6
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/examples/activation/httpserver/hello.service
      @@ -0,0 +1,11 @@
      +[Unit]
      +Description=Hello World HTTP
      +Requires=network.target
      +After=multi-user.target
      +
      +[Service]
      +Type=simple
      +ExecStart=/usr/local/bin/httpserver
      +
      +[Install]
      +WantedBy=multi-user.target
      diff --git a/vendor/github.com/coreos/go-systemd/examples/activation/httpserver/hello.socket b/vendor/github.com/coreos/go-systemd/examples/activation/httpserver/hello.socket
      new file mode 100644
      index 00000000..723ed7ed
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/examples/activation/httpserver/hello.socket
      @@ -0,0 +1,5 @@
      +[Socket]
      +ListenStream=127.0.0.1:8076
      +
      +[Install]
      +WantedBy=sockets.target
      diff --git a/vendor/github.com/coreos/go-systemd/examples/activation/httpserver/httpserver.go b/vendor/github.com/coreos/go-systemd/examples/activation/httpserver/httpserver.go
      new file mode 100644
      index 00000000..a720f794
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/examples/activation/httpserver/httpserver.go
      @@ -0,0 +1,40 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package main
      +
      +import (
      +	"io"
      +	"net/http"
      +
      +	"github.com/coreos/go-systemd/activation"
      +)
      +
      +func HelloServer(w http.ResponseWriter, req *http.Request) {
      +	io.WriteString(w, "hello socket activated world!\n")
      +}
      +
      +func main() {
      +	listeners, err := activation.Listeners(true)
      +	if err != nil {
      +		panic(err)
      +	}
      +
      +	if len(listeners) != 1 {
      +		panic("Unexpected number of socket activation fds")
      +	}
      +
      +	http.HandleFunc("/", HelloServer)
      +	http.Serve(listeners[0], nil)
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/examples/activation/listen.go b/vendor/github.com/coreos/go-systemd/examples/activation/listen.go
      new file mode 100644
      index 00000000..c207a8f4
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/examples/activation/listen.go
      @@ -0,0 +1,64 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// Activation example used by the activation unit tests.
      +package main
      +
      +import (
      +	"fmt"
      +	"os"
      +
      +	"github.com/coreos/go-systemd/activation"
      +)
      +
      +func fixListenPid() {
      +	if os.Getenv("FIX_LISTEN_PID") != "" {
      +		// HACK: real systemd would set LISTEN_PID before exec'ing but
      +		// this is too difficult in golang for the purpose of a test.
      +		// Do not do this in real code.
      +		os.Setenv("LISTEN_PID", fmt.Sprintf("%d", os.Getpid()))
      +	}
      +}
      +
      +func main() {
      +	fixListenPid()
      +
      +	listeners, _ := activation.Listeners(false)
      +
      +	if len(listeners) == 0 {
      +		panic("No listeners")
      +	}
      +
      +	if os.Getenv("LISTEN_PID") == "" || os.Getenv("LISTEN_FDS") == "" {
      +		panic("Should not unset envs")
      +	}
      +
      +	listeners, err := activation.Listeners(true)
      +	if err != nil {
      +		panic(err)
      +	}
      +
      +	if os.Getenv("LISTEN_PID") != "" || os.Getenv("LISTEN_FDS") != "" {
      +		panic("Can not unset envs")
      +	}
      +
      +	c0, _ := listeners[0].Accept()
      +	c1, _ := listeners[1].Accept()
      +
      +	// Write out the expected strings to the two pipes
      +	c0.Write([]byte("Hello world"))
      +	c1.Write([]byte("Goodbye world"))
      +
      +	return
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/examples/activation/udpconn.go b/vendor/github.com/coreos/go-systemd/examples/activation/udpconn.go
      new file mode 100644
      index 00000000..6d66f99d
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/examples/activation/udpconn.go
      @@ -0,0 +1,86 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// Activation example used by the activation unit tests.
      +package main
      +
      +import (
      +	"fmt"
      +	"net"
      +	"os"
      +
      +	"github.com/coreos/go-systemd/activation"
      +)
      +
      +func fixListenPid() {
      +	if os.Getenv("FIX_LISTEN_PID") != "" {
      +		// HACK: real systemd would set LISTEN_PID before exec'ing but
      +		// this is too difficult in golang for the purpose of a test.
      +		// Do not do this in real code.
      +		os.Setenv("LISTEN_PID", fmt.Sprintf("%d", os.Getpid()))
      +	}
      +}
      +
      +func main() {
      +	fixListenPid()
      +
      +	pc, _ := activation.PacketConns(false)
      +
      +	if len(pc) == 0 {
      +		panic("No packetConns")
      +	}
      +
      +	if os.Getenv("LISTEN_PID") == "" || os.Getenv("LISTEN_FDS") == "" {
      +		panic("Should not unset envs")
      +	}
      +
      +	pc, err := activation.PacketConns(true)
      +	if err != nil {
      +		panic(err)
      +	}
      +
      +	if os.Getenv("LISTEN_PID") != "" || os.Getenv("LISTEN_FDS") != "" {
      +		panic("Can not unset envs")
      +	}
      +
      +	udp1, ok := pc[0].(*net.UDPConn)
      +	if !ok {
      +		panic("packetConn 1 not UDP")
      +	}
      +	udp2, ok := pc[1].(*net.UDPConn)
      +	if !ok {
      +		panic("packetConn 2 not UDP")
      +	}
      +
      +	_, addr1, err := udp1.ReadFromUDP(nil)
      +	if err != nil {
      +		panic(err)
      +	}
      +	_, addr2, err := udp2.ReadFromUDP(nil)
      +	if err != nil {
      +		panic(err)
      +	}
      +
      +	// Write out the expected strings to the two pipes
      +	_, err = udp1.WriteToUDP([]byte("Hello world"), addr1)
      +	if err != nil {
      +		panic(err)
      +	}
      +	_, err = udp2.WriteToUDP([]byte("Goodbye world"), addr2)
      +	if err != nil {
      +		panic(err)
      +	}
      +
      +	return
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/fixtures/enable-disable.service b/vendor/github.com/coreos/go-systemd/fixtures/enable-disable.service
      new file mode 100644
      index 00000000..74c94590
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/fixtures/enable-disable.service
      @@ -0,0 +1,5 @@
      +[Unit]
      +Description=enable disable test
      +
      +[Service]
      +ExecStart=/bin/sleep 400
      diff --git a/vendor/github.com/coreos/go-systemd/fixtures/start-stop.service b/vendor/github.com/coreos/go-systemd/fixtures/start-stop.service
      new file mode 100644
      index 00000000..a1f8c367
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/fixtures/start-stop.service
      @@ -0,0 +1,5 @@
      +[Unit]
      +Description=start stop test
      +
      +[Service]
      +ExecStart=/bin/sleep 400
      diff --git a/vendor/github.com/coreos/go-systemd/fixtures/subscribe-events-set.service b/vendor/github.com/coreos/go-systemd/fixtures/subscribe-events-set.service
      new file mode 100644
      index 00000000..a1f8c367
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/fixtures/subscribe-events-set.service
      @@ -0,0 +1,5 @@
      +[Unit]
      +Description=start stop test
      +
      +[Service]
      +ExecStart=/bin/sleep 400
      diff --git a/vendor/github.com/coreos/go-systemd/fixtures/subscribe-events.service b/vendor/github.com/coreos/go-systemd/fixtures/subscribe-events.service
      new file mode 100644
      index 00000000..a1f8c367
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/fixtures/subscribe-events.service
      @@ -0,0 +1,5 @@
      +[Unit]
      +Description=start stop test
      +
      +[Service]
      +ExecStart=/bin/sleep 400
      diff --git a/vendor/github.com/coreos/go-systemd/login1/dbus.go b/vendor/github.com/coreos/go-systemd/login1/dbus.go
      new file mode 100644
      index 00000000..5e1e670b
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/login1/dbus.go
      @@ -0,0 +1,108 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// Integration with the systemd logind API.  See http://www.freedesktop.org/wiki/Software/systemd/logind/
      +package login1
      +
      +import (
      +	"fmt"
      +	"os"
      +	"strconv"
      +
      +	"github.com/godbus/dbus"
      +)
      +
      +const (
      +	dbusInterface = "org.freedesktop.login1.Manager"
      +	dbusPath      = "/org/freedesktop/login1"
      +)
      +
      +// Conn is a connection to systemds dbus endpoint.
      +type Conn struct {
      +	conn   *dbus.Conn
      +	object dbus.BusObject
      +}
      +
      +// New() establishes a connection to the system bus and authenticates.
      +func New() (*Conn, error) {
      +	c := new(Conn)
      +
      +	if err := c.initConnection(); err != nil {
      +		return nil, err
      +	}
      +
      +	return c, nil
      +}
      +
      +func (c *Conn) initConnection() error {
      +	var err error
      +	c.conn, err = dbus.SystemBusPrivate()
      +	if err != nil {
      +		return err
      +	}
      +
      +	// Only use EXTERNAL method, and hardcode the uid (not username)
      +	// to avoid a username lookup (which requires a dynamically linked
      +	// libc)
      +	methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))}
      +
      +	err = c.conn.Auth(methods)
      +	if err != nil {
      +		c.conn.Close()
      +		return err
      +	}
      +
      +	err = c.conn.Hello()
      +	if err != nil {
      +		c.conn.Close()
      +		return err
      +	}
      +
      +	c.object = c.conn.Object("org.freedesktop.login1", dbus.ObjectPath(dbusPath))
      +
      +	return nil
      +}
      +
      +// Reboot asks logind for a reboot optionally asking for auth.
      +func (c *Conn) Reboot(askForAuth bool) {
      +	c.object.Call(dbusInterface+".Reboot", 0, askForAuth)
      +}
      +
      +// Inhibit takes inhibition lock in logind.
      +func (c *Conn) Inhibit(what, who, why, mode string) (*os.File, error) {
      +	var fd dbus.UnixFD
      +
      +	err := c.object.Call(dbusInterface+".Inhibit", 0, what, who, why, mode).Store(&fd)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	return os.NewFile(uintptr(fd), "inhibit"), nil
      +}
      +
      +// Subscribe to signals on the logind dbus
      +func (c *Conn) Subscribe(members ...string) chan *dbus.Signal {
      +	for _, member := range members {
      +		c.conn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
      +			fmt.Sprintf("type='signal',interface='org.freedesktop.login1.Manager',member='%s'", member))
      +	}
      +	ch := make(chan *dbus.Signal, 10)
      +	c.conn.Signal(ch)
      +	return ch
      +}
      +
      +// PowerOff asks logind for a power off optionally asking for auth.
      +func (c *Conn) PowerOff(askForAuth bool) {
      +	c.object.Call(dbusInterface+".PowerOff", 0, askForAuth)
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/login1/dbus_test.go b/vendor/github.com/coreos/go-systemd/login1/dbus_test.go
      new file mode 100644
      index 00000000..d160f470
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/login1/dbus_test.go
      @@ -0,0 +1,28 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package login1
      +
      +import (
      +	"testing"
      +)
      +
      +// TestNew ensures that New() works without errors.
      +func TestNew(t *testing.T) {
      +	_, err := New()
      +
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/machine1/dbus.go b/vendor/github.com/coreos/go-systemd/machine1/dbus.go
      new file mode 100644
      index 00000000..dae80525
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/machine1/dbus.go
      @@ -0,0 +1,81 @@
      +/*
      +Copyright 2015 CoreOS Inc.
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Integration with the systemd machined API.  See http://www.freedesktop.org/wiki/Software/systemd/machined/
      +package machine1
      +
      +import (
      +	"os"
      +	"strconv"
      +
      +	"github.com/godbus/dbus"
      +)
      +
      +const (
      +	dbusInterface = "org.freedesktop.machine1.Manager"
      +	dbusPath      = "/org/freedesktop/machine1"
      +)
      +
      +// Conn is a connection to systemds dbus endpoint.
      +type Conn struct {
      +	conn   *dbus.Conn
      +	object dbus.BusObject
      +}
      +
      +// New() establishes a connection to the system bus and authenticates.
      +func New() (*Conn, error) {
      +	c := new(Conn)
      +
      +	if err := c.initConnection(); err != nil {
      +		return nil, err
      +	}
      +
      +	return c, nil
      +}
      +
      +func (c *Conn) initConnection() error {
      +	var err error
      +	c.conn, err = dbus.SystemBusPrivate()
      +	if err != nil {
      +		return err
      +	}
      +
      +	// Only use EXTERNAL method, and hardcode the uid (not username)
      +	// to avoid a username lookup (which requires a dynamically linked
      +	// libc)
      +	methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))}
      +
      +	err = c.conn.Auth(methods)
      +	if err != nil {
      +		c.conn.Close()
      +		return err
      +	}
      +
      +	err = c.conn.Hello()
      +	if err != nil {
      +		c.conn.Close()
      +		return err
      +	}
      +
      +	c.object = c.conn.Object("org.freedesktop.machine1", dbus.ObjectPath(dbusPath))
      +
      +	return nil
      +}
      +
      +// RegisterMachine registers the container with the systemd-machined
      +func (c *Conn) RegisterMachine(name string, id []byte, service string, class string, pid int, root_directory string) error {
      +	return c.object.Call(dbusInterface+".RegisterMachine", 0, name, id, service, class, uint32(pid), root_directory).Err
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/machine1/dbus_test.go b/vendor/github.com/coreos/go-systemd/machine1/dbus_test.go
      new file mode 100644
      index 00000000..63f17cc5
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/machine1/dbus_test.go
      @@ -0,0 +1,30 @@
      +/*
      +Copyright 2015 CoreOS Inc.
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package machine1
      +
      +import (
      +	"testing"
      +)
      +
      +// TestNew ensures that New() works without errors.
      +func TestNew(t *testing.T) {
      +	_, err := New()
      +
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/sdjournal/journal.go b/vendor/github.com/coreos/go-systemd/sdjournal/journal.go
      new file mode 100644
      index 00000000..0324983a
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/sdjournal/journal.go
      @@ -0,0 +1,357 @@
      +// Copyright 2015 RedHat, Inc.
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// Package sdjournal provides a low-level Go interface to the
      +// systemd journal wrapped around the sd-journal C API.
      +//
      +// All public read methods map closely to the sd-journal API functions. See the
      +// sd-journal.h documentation[1] for information about each function.
      +//
      +// To write to the journal, see the pure-Go "journal" package
      +//
      +// [1] http://www.freedesktop.org/software/systemd/man/sd-journal.html
      +package sdjournal
      +
      +/*
      +#cgo pkg-config: libsystemd
      +#include <systemd/sd-journal.h>
      +#include <stdlib.h>
      +#include <syslog.h>
      +*/
      +import "C"
      +import (
      +	"fmt"
      +	"path/filepath"
      +	"strings"
      +	"sync"
      +	"time"
      +	"unsafe"
      +)
      +
      +// Journal entry field strings which correspond to:
      +// http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html
      +const (
      +	SD_JOURNAL_FIELD_SYSTEMD_UNIT = "_SYSTEMD_UNIT"
      +	SD_JOURNAL_FIELD_MESSAGE      = "MESSAGE"
      +	SD_JOURNAL_FIELD_PID          = "_PID"
      +	SD_JOURNAL_FIELD_UID          = "_UID"
      +	SD_JOURNAL_FIELD_GID          = "_GID"
      +	SD_JOURNAL_FIELD_HOSTNAME     = "_HOSTNAME"
      +	SD_JOURNAL_FIELD_MACHINE_ID   = "_MACHINE_ID"
      +)
      +
      +// Journal event constants
      +const (
      +	SD_JOURNAL_NOP        = int(C.SD_JOURNAL_NOP)
      +	SD_JOURNAL_APPEND     = int(C.SD_JOURNAL_APPEND)
      +	SD_JOURNAL_INVALIDATE = int(C.SD_JOURNAL_INVALIDATE)
      +)
      +
      +const (
      +	// IndefiniteWait is a sentinel value that can be passed to
      +	// sdjournal.Wait() to signal an indefinite wait for new journal
      +	// events. It is implemented as the maximum value for a time.Duration:
      +	// https://github.com/golang/go/blob/e4dcf5c8c22d98ac9eac7b9b226596229624cb1d/src/time/time.go#L434
      +	IndefiniteWait time.Duration = 1<<63 - 1
      +)
      +
      +// Journal is a Go wrapper of an sd_journal structure.
      +type Journal struct {
      +	cjournal *C.sd_journal
      +	mu       sync.Mutex
      +}
      +
      +// Match is a convenience wrapper to describe filters supplied to AddMatch.
      +type Match struct {
      +	Field string
      +	Value string
      +}
      +
      +// String returns a string representation of a Match suitable for use with AddMatch.
      +func (m *Match) String() string {
      +	return m.Field + "=" + m.Value
      +}
      +
      +// NewJournal returns a new Journal instance pointing to the local journal
      +func NewJournal() (*Journal, error) {
      +	j := &Journal{}
      +	r := C.sd_journal_open(&j.cjournal, C.SD_JOURNAL_LOCAL_ONLY)
      +
      +	if r < 0 {
      +		return nil, fmt.Errorf("failed to open journal: %d", r)
      +	}
      +
      +	return j, nil
      +}
      +
      +// NewJournalFromDir returns a new Journal instance pointing to a journal residing
      +// in a given directory. The supplied path may be relative or absolute; if
      +// relative, it will be converted to an absolute path before being opened.
      +func NewJournalFromDir(path string) (*Journal, error) {
      +	path, err := filepath.Abs(path)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	p := C.CString(path)
      +	defer C.free(unsafe.Pointer(p))
      +
      +	j := &Journal{}
      +	r := C.sd_journal_open_directory(&j.cjournal, p, 0)
      +	if r < 0 {
      +		return nil, fmt.Errorf("failed to open journal in directory %q: %d", path, r)
      +	}
      +
      +	return j, nil
      +}
      +
      +// Close closes a journal opened with NewJournal.
      +func (j *Journal) Close() error {
      +	j.mu.Lock()
      +	C.sd_journal_close(j.cjournal)
      +	j.mu.Unlock()
      +
      +	return nil
      +}
      +
      +// AddMatch adds a match by which to filter the entries of the journal.
      +func (j *Journal) AddMatch(match string) error {
      +	m := C.CString(match)
      +	defer C.free(unsafe.Pointer(m))
      +
      +	j.mu.Lock()
      +	r := C.sd_journal_add_match(j.cjournal, unsafe.Pointer(m), C.size_t(len(match)))
      +	j.mu.Unlock()
      +
      +	if r < 0 {
      +		return fmt.Errorf("failed to add match: %d", r)
      +	}
      +
      +	return nil
      +}
      +
      +// AddDisjunction inserts a logical OR in the match list.
      +func (j *Journal) AddDisjunction() error {
      +	j.mu.Lock()
      +	r := C.sd_journal_add_disjunction(j.cjournal)
      +	j.mu.Unlock()
      +
      +	if r < 0 {
      +		return fmt.Errorf("failed to add a disjunction in the match list: %d", r)
      +	}
      +
      +	return nil
      +}
      +
      +// AddConjunction inserts a logical AND in the match list.
      +func (j *Journal) AddConjunction() error {
      +	j.mu.Lock()
      +	r := C.sd_journal_add_conjunction(j.cjournal)
      +	j.mu.Unlock()
      +
      +	if r < 0 {
      +		return fmt.Errorf("failed to add a conjunction in the match list: %d", r)
      +	}
      +
      +	return nil
      +}
      +
      +// FlushMatches flushes all matches, disjunctions and conjunctions.
      +func (j *Journal) FlushMatches() {
      +	j.mu.Lock()
      +	C.sd_journal_flush_matches(j.cjournal)
      +	j.mu.Unlock()
      +}
      +
      +// Next advances the read pointer into the journal by one entry.
      +func (j *Journal) Next() (int, error) {
      +	j.mu.Lock()
      +	r := C.sd_journal_next(j.cjournal)
      +	j.mu.Unlock()
      +
      +	if r < 0 {
      +		return int(r), fmt.Errorf("failed to iterate journal: %d", r)
      +	}
      +
      +	return int(r), nil
      +}
      +
      +// NextSkip advances the read pointer by multiple entries at once,
      +// as specified by the skip parameter.
      +func (j *Journal) NextSkip(skip uint64) (uint64, error) {
      +	j.mu.Lock()
      +	r := C.sd_journal_next_skip(j.cjournal, C.uint64_t(skip))
      +	j.mu.Unlock()
      +
      +	if r < 0 {
      +		return uint64(r), fmt.Errorf("failed to iterate journal: %d", r)
      +	}
      +
      +	return uint64(r), nil
      +}
      +
      +// Previous sets the read pointer into the journal back by one entry.
      +func (j *Journal) Previous() (uint64, error) {
      +	j.mu.Lock()
      +	r := C.sd_journal_previous(j.cjournal)
      +	j.mu.Unlock()
      +
      +	if r < 0 {
      +		return uint64(r), fmt.Errorf("failed to iterate journal: %d", r)
      +	}
      +
      +	return uint64(r), nil
      +}
      +
      +// PreviousSkip sets back the read pointer by multiple entries at once,
      +// as specified by the skip parameter.
      +func (j *Journal) PreviousSkip(skip uint64) (uint64, error) {
      +	j.mu.Lock()
      +	r := C.sd_journal_previous_skip(j.cjournal, C.uint64_t(skip))
      +	j.mu.Unlock()
      +
      +	if r < 0 {
      +		return uint64(r), fmt.Errorf("failed to iterate journal: %d", r)
      +	}
      +
      +	return uint64(r), nil
      +}
      +
      +// GetData gets the data object associated with a specific field from the
      +// current journal entry.
      +func (j *Journal) GetData(field string) (string, error) {
      +	f := C.CString(field)
      +	defer C.free(unsafe.Pointer(f))
      +
      +	var d unsafe.Pointer
      +	var l C.size_t
      +
      +	j.mu.Lock()
      +	r := C.sd_journal_get_data(j.cjournal, f, &d, &l)
      +	j.mu.Unlock()
      +
      +	if r < 0 {
      +		return "", fmt.Errorf("failed to read message: %d", r)
      +	}
      +
      +	msg := C.GoStringN((*C.char)(d), C.int(l))
      +
      +	return msg, nil
      +}
      +
      +// GetDataValue gets the data object associated with a specific field from the
      +// current journal entry, returning only the value of the object.
      +func (j *Journal) GetDataValue(field string) (string, error) {
      +	val, err := j.GetData(field)
      +	if err != nil {
      +		return "", err
      +	}
      +	return strings.SplitN(val, "=", 2)[1], nil
      +}
      +
      +// SetDataThresold sets the data field size threshold for data returned by
      +// GetData. To retrieve the complete data fields this threshold should be
      +// turned off by setting it to 0, so that the library always returns the
      +// complete data objects.
      +func (j *Journal) SetDataThreshold(threshold uint64) error {
      +	j.mu.Lock()
      +	r := C.sd_journal_set_data_threshold(j.cjournal, C.size_t(threshold))
      +	j.mu.Unlock()
      +
      +	if r < 0 {
      +		return fmt.Errorf("failed to set data threshold: %d", r)
      +	}
      +
      +	return nil
      +}
      +
      +// GetRealtimeUsec gets the realtime (wallclock) timestamp of the current
      +// journal entry.
      +func (j *Journal) GetRealtimeUsec() (uint64, error) {
      +	var usec C.uint64_t
      +
      +	j.mu.Lock()
      +	r := C.sd_journal_get_realtime_usec(j.cjournal, &usec)
      +	j.mu.Unlock()
      +
      +	if r < 0 {
      +		return 0, fmt.Errorf("error getting timestamp for entry: %d", r)
      +	}
      +
      +	return uint64(usec), nil
      +}
      +
      +// SeekTail may be used to seek to the end of the journal, i.e. the most recent
      +// available entry.
      +func (j *Journal) SeekTail() error {
      +	j.mu.Lock()
      +	r := C.sd_journal_seek_tail(j.cjournal)
      +	j.mu.Unlock()
      +
      +	if r < 0 {
      +		return fmt.Errorf("failed to seek to tail of journal: %d", r)
      +	}
      +
      +	return nil
      +}
      +
      +// SeekRealtimeUsec seeks to the entry with the specified realtime (wallclock)
      +// timestamp, i.e. CLOCK_REALTIME.
      +func (j *Journal) SeekRealtimeUsec(usec uint64) error {
      +	j.mu.Lock()
      +	r := C.sd_journal_seek_realtime_usec(j.cjournal, C.uint64_t(usec))
      +	j.mu.Unlock()
      +
      +	if r < 0 {
      +		return fmt.Errorf("failed to seek to %d: %d", usec, r)
      +	}
      +
      +	return nil
      +}
      +
      +// Wait will synchronously wait until the journal gets changed. The maximum time
      +// this call sleeps may be controlled with the timeout parameter.  If
      +// sdjournal.IndefiniteWait is passed as the timeout parameter, Wait will
      +// wait indefinitely for a journal change.
      +func (j *Journal) Wait(timeout time.Duration) int {
      +	var to uint64
      +	if timeout == IndefiniteWait {
      +		// sd_journal_wait(3) calls for a (uint64_t) -1 to be passed to signify
      +		// indefinite wait, but using a -1 overflows our C.uint64_t, so we use an
      +		// equivalent hex value.
      +		to = 0xffffffffffffffff
      +	} else {
      +		to = uint64(time.Now().Add(timeout).Unix() / 1000)
      +	}
      +	j.mu.Lock()
      +	r := C.sd_journal_wait(j.cjournal, C.uint64_t(to))
      +	j.mu.Unlock()
      +
      +	return int(r)
      +}
      +
      +// GetUsage returns the journal disk space usage, in bytes.
      +func (j *Journal) GetUsage() (uint64, error) {
      +	var out C.uint64_t
      +	j.mu.Lock()
      +	r := C.sd_journal_get_usage(j.cjournal, &out)
      +	j.mu.Unlock()
      +
      +	if r < 0 {
      +		return 0, fmt.Errorf("failed to get journal disk space usage: %d", r)
      +	}
      +
      +	return uint64(out), nil
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/sdjournal/journal_test.go b/vendor/github.com/coreos/go-systemd/sdjournal/journal_test.go
      new file mode 100644
      index 00000000..2861b31f
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/sdjournal/journal_test.go
      @@ -0,0 +1,90 @@
      +// Copyright 2015 RedHat, Inc.
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package sdjournal
      +
      +import (
      +	"os"
      +	"testing"
      +	"time"
      +
      +	"github.com/coreos/go-systemd/journal"
      +)
      +
      +func TestJournalFollow(t *testing.T) {
      +	r, err := NewJournalReader(JournalReaderConfig{
      +		Since: time.Duration(-15) * time.Second,
      +		Matches: []Match{
      +			{
      +				Field: SD_JOURNAL_FIELD_SYSTEMD_UNIT,
      +				Value: "NetworkManager.service",
      +			},
      +		},
      +	})
      +
      +	if err != nil {
      +		t.Fatalf("Error opening journal: %s", err)
      +	}
      +
      +	if r == nil {
      +		t.Fatal("Got a nil reader")
      +	}
      +
      +	defer r.Close()
      +
      +	// start writing some test entries
      +	done := make(chan struct{}, 1)
      +	defer close(done)
      +	go func() {
      +		for {
      +			select {
      +			case <-done:
      +				return
      +			default:
      +				if err = journal.Print(journal.PriInfo, "test message %s", time.Now()); err != nil {
      +					t.Fatalf("Error writing to journal: %s", err)
      +				}
      +
      +				time.Sleep(time.Second)
      +			}
      +		}
      +	}()
      +
      +	// and follow the reader synchronously
      +	timeout := time.Duration(5) * time.Second
      +	if err = r.Follow(time.After(timeout), os.Stdout); err != ErrExpired {
      +		t.Fatalf("Error during follow: %s", err)
      +	}
      +}
      +
      +func TestJournalGetUsage(t *testing.T) {
      +	j, err := NewJournal()
      +
      +	if err != nil {
      +		t.Fatalf("Error opening journal: %s", err)
      +	}
      +
      +	if j == nil {
      +		t.Fatal("Got a nil journal")
      +	}
      +
      +	defer j.Close()
      +
      +	_, err = j.GetUsage()
      +
      +	if err != nil {
      +		t.Fatalf("Error getting journal size: %s", err)
      +	}
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/sdjournal/read.go b/vendor/github.com/coreos/go-systemd/sdjournal/read.go
      new file mode 100644
      index 00000000..8944448c
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/sdjournal/read.go
      @@ -0,0 +1,209 @@
      +// Copyright 2015 RedHat, Inc.
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package sdjournal
      +
      +import (
      +	"errors"
      +	"fmt"
      +	"io"
      +	"log"
      +	"time"
      +)
      +
      +var (
      +	ErrExpired = errors.New("Timeout expired")
      +)
      +
      +// JournalReaderConfig represents options to drive the behavior of a JournalReader.
      +type JournalReaderConfig struct {
      +	// The Since and NumFromTail options are mutually exclusive and determine
      +	// where the reading begins within the journal.
      +	Since       time.Duration // start relative to a Duration from now
      +	NumFromTail uint64        // start relative to the tail
      +
      +	// Show only journal entries whose fields match the supplied values. If
      +	// the array is empty, entries will not be filtered.
      +	Matches []Match
      +
      +	// If not empty, the journal instance will point to a journal residing
      +	// in this directory. The supplied path may be relative or absolute.
      +	Path string
      +}
      +
      +// JournalReader is an io.ReadCloser which provides a simple interface for iterating through the
      +// systemd journal.
      +type JournalReader struct {
      +	journal *Journal
      +}
      +
      +// NewJournalReader creates a new JournalReader with configuration options that are similar to the
      +// systemd journalctl tool's iteration and filtering features.
      +func NewJournalReader(config JournalReaderConfig) (*JournalReader, error) {
      +	r := &JournalReader{}
      +
      +	// Open the journal
      +	var err error
      +	if config.Path != "" {
      +		r.journal, err = NewJournalFromDir(config.Path)
      +	} else {
      +		r.journal, err = NewJournal()
      +	}
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	// Add any supplied matches
      +	for _, m := range config.Matches {
      +		r.journal.AddMatch(m.String())
      +	}
      +
      +	// Set the start position based on options
      +	if config.Since != 0 {
      +		// Start based on a relative time
      +		start := time.Now().Add(config.Since)
      +		if err := r.journal.SeekRealtimeUsec(uint64(start.UnixNano() / 1000)); err != nil {
      +			return nil, err
      +		}
      +	} else if config.NumFromTail != 0 {
      +		// Start based on a number of lines before the tail
      +		if err := r.journal.SeekTail(); err != nil {
      +			return nil, err
      +		}
      +
      +		// Move the read pointer into position near the tail. Go one further than
      +		// the option so that the initial cursor advancement positions us at the
      +		// correct starting point.
      +		if _, err := r.journal.PreviousSkip(config.NumFromTail + 1); err != nil {
      +			return nil, err
      +		}
      +	}
      +
      +	return r, nil
      +}
      +
      +func (r *JournalReader) Read(b []byte) (int, error) {
      +	var err error
      +	var c int
      +
      +	// Advance the journal cursor
      +	c, err = r.journal.Next()
      +
      +	// An unexpected error
      +	if err != nil {
      +		return 0, err
      +	}
      +
      +	// EOF detection
      +	if c == 0 {
      +		return 0, io.EOF
      +	}
      +
      +	// Build a message
      +	var msg string
      +	msg, err = r.buildMessage()
      +
      +	if err != nil {
      +		return 0, err
      +	}
      +
      +	// Copy and return the message
      +	copy(b, []byte(msg))
      +
      +	return len(msg), nil
      +}
      +
      +func (r *JournalReader) Close() error {
      +	return r.journal.Close()
      +}
      +
      +// Follow synchronously follows the JournalReader, writing each new journal entry to writer. The
      +// follow will continue until a single time.Time is received on the until channel.
      +func (r *JournalReader) Follow(until <-chan time.Time, writer io.Writer) (err error) {
      +
      +	// Process journal entries and events. Entries are flushed until the tail or
      +	// timeout is reached, and then we wait for new events or the timeout.
      +	var msg = make([]byte, 64*1<<(10))
      +process:
      +	for {
      +		c, err := r.Read(msg)
      +		if err != nil && err != io.EOF {
      +			break process
      +		}
      +
      +		select {
      +		case <-until:
      +			return ErrExpired
      +		default:
      +			if c > 0 {
      +				writer.Write(msg[:c])
      +				continue process
      +			}
      +		}
      +
      +		// We're at the tail, so wait for new events or time out.
      +		// Holds journal events to process. Tightly bounded for now unless there's a
      +		// reason to unblock the journal watch routine more quickly.
      +		events := make(chan int, 1)
      +		pollDone := make(chan bool, 1)
      +		go func() {
      +			for {
      +				select {
      +				case <-pollDone:
      +					return
      +				default:
      +					events <- r.journal.Wait(time.Duration(1) * time.Second)
      +				}
      +			}
      +		}()
      +
      +		select {
      +		case <-until:
      +			pollDone <- true
      +			return ErrExpired
      +		case e := <-events:
      +			pollDone <- true
      +			switch e {
      +			case SD_JOURNAL_NOP, SD_JOURNAL_APPEND, SD_JOURNAL_INVALIDATE:
      +				// TODO: need to account for any of these?
      +			default:
      +				log.Printf("Received unknown event: %d\n", e)
      +			}
      +			continue process
      +		}
      +	}
      +
      +	return
      +}
      +
      +// buildMessage returns a string representing the current journal entry in a simple format which
      +// includes the entry timestamp and MESSAGE field.
      +func (r *JournalReader) buildMessage() (string, error) {
      +	var msg string
      +	var usec uint64
      +	var err error
      +
      +	if msg, err = r.journal.GetData("MESSAGE"); err != nil {
      +		return "", err
      +	}
      +
      +	if usec, err = r.journal.GetRealtimeUsec(); err != nil {
      +		return "", err
      +	}
      +
      +	timestamp := time.Unix(0, int64(usec)*int64(time.Microsecond))
      +
      +	return fmt.Sprintf("%s %s\n", timestamp, msg), nil
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/test b/vendor/github.com/coreos/go-systemd/test
      new file mode 100755
      index 00000000..bc1b9859
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/test
      @@ -0,0 +1,76 @@
      +#!/bin/bash -e
      +#
      +# Run all tests
      +#   ./test
      +#   ./test -v
      +#
      +# Run tests for one package
      +#   PKG=./foo ./test
      +#   PKG=bar ./test
      +#
      +
      +# Invoke ./cover for HTML output
      +COVER=${COVER:-"-cover"}
      +
      +PROJ="go-systemd"
      +ORG_PATH="github.com/coreos"
      +REPO_PATH="${ORG_PATH}/${PROJ}"
      +
      +# As a convenience, set up a self-contained GOPATH if none set
      +if [ -z "$GOPATH" ]; then
      +	if [ ! -h gopath/src/${REPO_PATH} ]; then
      +		mkdir -p gopath/src/${ORG_PATH}
      +		ln -s ../../../.. gopath/src/${REPO_PATH} || exit 255
      +	fi
      +	export GOPATH=${PWD}/gopath
      +	go get -u github.com/godbus/dbus
      +fi
      +
      +TESTABLE="activation journal login1 machine1 unit"
      +FORMATTABLE="$TESTABLE sdjournal dbus"
      +if [ -e "/run/systemd/system/" ]; then
      +	TESTABLE="${TESTABLE} sdjournal"
      +	if [ "$EUID" == "0" ]; then
      +		# testing actual systemd behaviour requires root
      +		TESTABLE="${TESTABLE} dbus"
      +	fi
      +fi
      +
      +
      +# user has not provided PKG override
      +if [ -z "$PKG" ]; then
      +	TEST=$TESTABLE
      +	FMT=$FORMATTABLE
      +
      +# user has provided PKG override
      +else
      +	# strip out slashes and dots from PKG=./foo/
      +	TEST=${PKG//\//}
      +	TEST=${TEST//./}
      +
      +	# only run gofmt on packages provided by user
      +	FMT="$TEST"
      +fi
      +
      +# split TEST into an array and prepend REPO_PATH to each local package
      +split=(${TEST// / })
      +TEST=${split[@]/#/${REPO_PATH}/}
      +
      +echo "Running tests..."
      +go test ${COVER} $@ ${TEST}
      +
      +echo "Checking gofmt..."
      +fmtRes=$(gofmt -l $FMT)
      +if [ -n "${fmtRes}" ]; then
      +	echo -e "gofmt checking failed:\n${fmtRes}"
      +	exit 255
      +fi
      +
      +echo "Checking govet..."
      +vetRes=$(go vet $TEST)
      +if [ -n "${vetRes}" ]; then
      +	echo -e "govet checking failed:\n${vetRes}"
      +	exit 255
      +fi
      +
      +echo "Success"
      diff --git a/vendor/github.com/coreos/go-systemd/unit/deserialize.go b/vendor/github.com/coreos/go-systemd/unit/deserialize.go
      new file mode 100644
      index 00000000..8a88162f
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/unit/deserialize.go
      @@ -0,0 +1,276 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package unit
      +
      +import (
      +	"bufio"
      +	"bytes"
      +	"errors"
      +	"fmt"
      +	"io"
      +	"strings"
      +	"unicode"
      +)
      +
      +const (
      +	// SYSTEMD_LINE_MAX mimics the maximum line length that systemd can use.
      +	// On typical systemd platforms (i.e. modern Linux), this will most
      +	// commonly be 2048, so let's use that as a sanity check.
      +	// Technically, we should probably pull this at runtime:
      +	//    SYSTEMD_LINE_MAX = int(C.sysconf(C.__SC_LINE_MAX))
      +	// but this would introduce an (unfortunate) dependency on cgo
      +	SYSTEMD_LINE_MAX = 2048
      +
      +	// characters that systemd considers indicate a newline
      +	SYSTEMD_NEWLINE = "\r\n"
      +)
      +
      +var (
      +	ErrLineTooLong = fmt.Errorf("line too long (max %d bytes)", SYSTEMD_LINE_MAX)
      +)
      +
      +// Deserialize parses a systemd unit file into a list of UnitOption objects.
      +func Deserialize(f io.Reader) (opts []*UnitOption, err error) {
      +	lexer, optchan, errchan := newLexer(f)
      +	go lexer.lex()
      +
      +	for opt := range optchan {
      +		opts = append(opts, &(*opt))
      +	}
      +
      +	err = <-errchan
      +	return opts, err
      +}
      +
      +func newLexer(f io.Reader) (*lexer, <-chan *UnitOption, <-chan error) {
      +	optchan := make(chan *UnitOption)
      +	errchan := make(chan error, 1)
      +	buf := bufio.NewReader(f)
      +
      +	return &lexer{buf, optchan, errchan, ""}, optchan, errchan
      +}
      +
      +type lexer struct {
      +	buf     *bufio.Reader
      +	optchan chan *UnitOption
      +	errchan chan error
      +	section string
      +}
      +
      +func (l *lexer) lex() {
      +	var err error
      +	defer func() {
      +		close(l.optchan)
      +		close(l.errchan)
      +	}()
      +	next := l.lexNextSection
      +	for next != nil {
      +		if l.buf.Buffered() >= SYSTEMD_LINE_MAX {
      +			// systemd truncates lines longer than LINE_MAX
      +			// https://bugs.freedesktop.org/show_bug.cgi?id=85308
      +			// Rather than allowing this to pass silently, let's
      +			// explicitly gate people from encountering this
      +			line, err := l.buf.Peek(SYSTEMD_LINE_MAX)
      +			if err != nil {
      +				l.errchan <- err
      +				return
      +			}
      +			if bytes.IndexAny(line, SYSTEMD_NEWLINE) == -1 {
      +				l.errchan <- ErrLineTooLong
      +				return
      +			}
      +		}
      +
      +		next, err = next()
      +		if err != nil {
      +			l.errchan <- err
      +			return
      +		}
      +	}
      +}
      +
      +type lexStep func() (lexStep, error)
      +
      +func (l *lexer) lexSectionName() (lexStep, error) {
      +	sec, err := l.buf.ReadBytes(']')
      +	if err != nil {
      +		return nil, errors.New("unable to find end of section")
      +	}
      +
      +	return l.lexSectionSuffixFunc(string(sec[:len(sec)-1])), nil
      +}
      +
      +func (l *lexer) lexSectionSuffixFunc(section string) lexStep {
      +	return func() (lexStep, error) {
      +		garbage, _, err := l.toEOL()
      +		if err != nil {
      +			return nil, err
      +		}
      +
      +		garbage = bytes.TrimSpace(garbage)
      +		if len(garbage) > 0 {
      +			return nil, fmt.Errorf("found garbage after section name %s: %v", l.section, garbage)
      +		}
      +
      +		return l.lexNextSectionOrOptionFunc(section), nil
      +	}
      +}
      +
      +func (l *lexer) ignoreLineFunc(next lexStep) lexStep {
      +	return func() (lexStep, error) {
      +		for {
      +			line, _, err := l.toEOL()
      +			if err != nil {
      +				return nil, err
      +			}
      +
      +			line = bytes.TrimSuffix(line, []byte{' '})
      +
      +			// lack of continuation means this line has been exhausted
      +			if !bytes.HasSuffix(line, []byte{'\\'}) {
      +				break
      +			}
      +		}
      +
      +		// reached end of buffer, safe to exit
      +		return next, nil
      +	}
      +}
      +
      +func (l *lexer) lexNextSection() (lexStep, error) {
      +	r, _, err := l.buf.ReadRune()
      +	if err != nil {
      +		if err == io.EOF {
      +			err = nil
      +		}
      +		return nil, err
      +	}
      +
      +	if r == '[' {
      +		return l.lexSectionName, nil
      +	} else if isComment(r) {
      +		return l.ignoreLineFunc(l.lexNextSection), nil
      +	}
      +
      +	return l.lexNextSection, nil
      +}
      +
      +func (l *lexer) lexNextSectionOrOptionFunc(section string) lexStep {
      +	return func() (lexStep, error) {
      +		r, _, err := l.buf.ReadRune()
      +		if err != nil {
      +			if err == io.EOF {
      +				err = nil
      +			}
      +			return nil, err
      +		}
      +
      +		if unicode.IsSpace(r) {
      +			return l.lexNextSectionOrOptionFunc(section), nil
      +		} else if r == '[' {
      +			return l.lexSectionName, nil
      +		} else if isComment(r) {
      +			return l.ignoreLineFunc(l.lexNextSectionOrOptionFunc(section)), nil
      +		}
      +
      +		l.buf.UnreadRune()
      +		return l.lexOptionNameFunc(section), nil
      +	}
      +}
      +
      +func (l *lexer) lexOptionNameFunc(section string) lexStep {
      +	return func() (lexStep, error) {
      +		var partial bytes.Buffer
      +		for {
      +			r, _, err := l.buf.ReadRune()
      +			if err != nil {
      +				return nil, err
      +			}
      +
      +			if r == '\n' || r == '\r' {
      +				return nil, errors.New("unexpected newline encountered while parsing option name")
      +			}
      +
      +			if r == '=' {
      +				break
      +			}
      +
      +			partial.WriteRune(r)
      +		}
      +
      +		name := strings.TrimSpace(partial.String())
      +		return l.lexOptionValueFunc(section, name, bytes.Buffer{}), nil
      +	}
      +}
      +
      +func (l *lexer) lexOptionValueFunc(section, name string, partial bytes.Buffer) lexStep {
      +	return func() (lexStep, error) {
      +		for {
      +			line, eof, err := l.toEOL()
      +			if err != nil {
      +				return nil, err
      +			}
      +
      +			if len(bytes.TrimSpace(line)) == 0 {
      +				break
      +			}
      +
      +			partial.Write(line)
      +
      +			// lack of continuation means this value has been exhausted
      +			idx := bytes.LastIndex(line, []byte{'\\'})
      +			if idx == -1 || idx != (len(line)-1) {
      +				break
      +			}
      +
      +			if !eof {
      +				partial.WriteRune('\n')
      +			}
      +
      +			return l.lexOptionValueFunc(section, name, partial), nil
      +		}
      +
      +		val := partial.String()
      +		if strings.HasSuffix(val, "\n") {
      +			// A newline was added to the end, so the file didn't end with a backslash.
      +			// => Keep the newline
      +			val = strings.TrimSpace(val) + "\n"
      +		} else {
      +			val = strings.TrimSpace(val)
      +		}
      +		l.optchan <- &UnitOption{Section: section, Name: name, Value: val}
      +
      +		return l.lexNextSectionOrOptionFunc(section), nil
      +	}
      +}
      +
      +// toEOL reads until the end-of-line or end-of-file.
      +// Returns (data, EOFfound, error)
      +func (l *lexer) toEOL() ([]byte, bool, error) {
      +	line, err := l.buf.ReadBytes('\n')
      +	// ignore EOF here since it's roughly equivalent to EOL
      +	if err != nil && err != io.EOF {
      +		return nil, false, err
      +	}
      +
      +	line = bytes.TrimSuffix(line, []byte{'\r'})
      +	line = bytes.TrimSuffix(line, []byte{'\n'})
      +
      +	return line, err == io.EOF, nil
      +}
      +
      +func isComment(r rune) bool {
      +	return r == '#' || r == ';'
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/unit/deserialize_test.go b/vendor/github.com/coreos/go-systemd/unit/deserialize_test.go
      new file mode 100644
      index 00000000..84b7169f
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/unit/deserialize_test.go
      @@ -0,0 +1,381 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package unit
      +
      +import (
      +	"bytes"
      +	"fmt"
      +	"reflect"
      +	"testing"
      +)
      +
      +func TestDeserialize(t *testing.T) {
      +	tests := []struct {
      +		input  []byte
      +		output []*UnitOption
      +	}{
      +		// multiple options underneath a section
      +		{
      +			[]byte(`[Unit]
      +Description=Foo
      +Description=Bar
      +Requires=baz.service
      +After=baz.service
      +`),
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", "Foo"},
      +				&UnitOption{"Unit", "Description", "Bar"},
      +				&UnitOption{"Unit", "Requires", "baz.service"},
      +				&UnitOption{"Unit", "After", "baz.service"},
      +			},
      +		},
      +
      +		// multiple sections
      +		{
      +			[]byte(`[Unit]
      +Description=Foo
      +
      +[Service]
      +ExecStart=/usr/bin/sleep infinity
      +
      +[X-Third-Party]
      +Pants=on
      +
      +`),
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", "Foo"},
      +				&UnitOption{"Service", "ExecStart", "/usr/bin/sleep infinity"},
      +				&UnitOption{"X-Third-Party", "Pants", "on"},
      +			},
      +		},
      +
      +		// multiple sections with no options
      +		{
      +			[]byte(`[Unit]
      +[Service]
      +[X-Third-Party]
      +`),
      +			[]*UnitOption{},
      +		},
      +
      +		// multiple values not special-cased
      +		{
      +			[]byte(`[Service]
      +Environment= "FOO=BAR" "BAZ=QUX"
      +`),
      +			[]*UnitOption{
      +				&UnitOption{"Service", "Environment", "\"FOO=BAR\" \"BAZ=QUX\""},
      +			},
      +		},
      +
      +		// line continuations unmodified
      +		{
      +			[]byte(`[Unit]
      +Description= Unnecessarily wrapped \
      +    words here
      +`),
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", `Unnecessarily wrapped \
      +    words here`},
      +			},
      +		},
      +
      +		// comments ignored
      +		{
      +			[]byte(`; comment alpha
      +# comment bravo
      +[Unit]
      +; comment charlie
      +# comment delta
      +#Description=Foo
      +Description=Bar
      +; comment echo
      +# comment foxtrot
      +`),
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", "Bar"},
      +			},
      +		},
      +
      +		// apparent comment lines inside of line continuations not ignored
      +		{
      +			[]byte(`[Unit]
      +Description=Bar\
      +# comment alpha
      +
      +Description=Bar\
      +# comment bravo \
      +Baz
      +`),
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", "Bar\\\n# comment alpha"},
      +				&UnitOption{"Unit", "Description", "Bar\\\n# comment bravo \\\nBaz"},
      +			},
      +		},
      +
      +		// options outside of sections are ignored
      +		{
      +			[]byte(`Description=Foo
      +[Unit]
      +Description=Bar
      +`),
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", "Bar"},
      +			},
      +		},
      +
      +		// garbage outside of sections are ignored
      +		{
      +			[]byte(`<<<<<<<<
      +[Unit]
      +Description=Bar
      +`),
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", "Bar"},
      +			},
      +		},
      +
      +		// garbage used as unit option
      +		{
      +			[]byte(`[Unit]
      +<<<<<<<<=Bar
      +`),
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "<<<<<<<<", "Bar"},
      +			},
      +		},
      +
      +		// option name with spaces are valid
      +		{
      +			[]byte(`[Unit]
      +Some Thing = Bar
      +`),
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Some Thing", "Bar"},
      +			},
      +		},
      +
      +		// lack of trailing newline doesn't cause problem for non-continued file
      +		{
      +			[]byte(`[Unit]
      +Description=Bar`),
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", "Bar"},
      +			},
      +		},
      +
      +		// unit file with continuation but no following line is ok, too
      +		{
      +			[]byte(`[Unit]
      +Description=Bar \`),
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", "Bar \\"},
      +			},
      +		},
      +
      +		// Assert utf8 characters are preserved
      +		{
      +			[]byte(`[©]
      +µ☃=ÇôrèÕ$`),
      +			[]*UnitOption{
      +				&UnitOption{"©", "µ☃", "ÇôrèÕ$"},
      +			},
      +		},
      +
      +		// whitespace removed around option name
      +		{
      +			[]byte(`[Unit]
      + Description   =words here
      +`),
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", "words here"},
      +			},
      +		},
      +
      +		// whitespace around option value stripped
      +		{
      +			[]byte(`[Unit]
      +Description= words here `),
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", "words here"},
      +			},
      +		},
      +
      +		// whitespace around option value stripped, regardless of continuation
      +		{
      +			[]byte(`[Unit]
      +Description= words here \
      +  `),
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", "words here \\\n"},
      +			},
      +		},
      +
      +		// backslash not considered continuation if followed by text
      +		{
      +			[]byte(`[Service]
      +ExecStart=/bin/bash -c "while true; do echo \"ping\"; sleep 1; done"
      +`),
      +			[]*UnitOption{
      +				&UnitOption{"Service", "ExecStart", `/bin/bash -c "while true; do echo \"ping\"; sleep 1; done"`},
      +			},
      +		},
      +
      +		// backslash not considered continuation if followed by whitespace, but still trimmed
      +		{
      +			[]byte(`[Service]
      +ExecStart=/bin/bash echo poof \  `),
      +			[]*UnitOption{
      +				&UnitOption{"Service", "ExecStart", `/bin/bash echo poof \`},
      +			},
      +		},
      +		// a long unit file line that's just equal to the maximum permitted length
      +		{
      +			[]byte(`[Service]
      +ExecStart=/bin/bash -c "echo ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................."`),
      +			[]*UnitOption{
      +				&UnitOption{"Service", "ExecStart", `/bin/bash -c "echo ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................."`},
      +			},
      +		},
      +		// the same, but with a trailing newline
      +		{
      +			[]byte(`[Service]
      +ExecStart=/bin/bash -c "echo ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................."
      +Option=value
      +`),
      +			[]*UnitOption{
      +				&UnitOption{"Service", "ExecStart", `/bin/bash -c "echo ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................."`},
      +				&UnitOption{"Service", "Option", "value"},
      +			},
      +		},
      +	}
      +
      +	assert := func(expect, output []*UnitOption) error {
      +		if len(expect) != len(output) {
      +			return fmt.Errorf("expected %d items, got %d", len(expect), len(output))
      +		}
      +
      +		for i, _ := range expect {
      +			if !reflect.DeepEqual(expect[i], output[i]) {
      +				return fmt.Errorf("item %d: expected %v, got %v", i, expect[i], output[i])
      +			}
      +		}
      +
      +		return nil
      +	}
      +
      +	for i, tt := range tests {
      +		output, err := Deserialize(bytes.NewReader(tt.input))
      +		if err != nil {
      +			t.Errorf("case %d: unexpected error parsing unit: %v", i, err)
      +			continue
      +		}
      +
      +		err = assert(tt.output, output)
      +		if err != nil {
      +			t.Errorf("case %d: %v", i, err)
      +			t.Log("Expected options:")
      +			logUnitOptionSlice(t, tt.output)
      +			t.Log("Actual options:")
      +			logUnitOptionSlice(t, output)
      +		}
      +	}
      +}
      +
      +func TestDeserializeFail(t *testing.T) {
      +	tests := [][]byte{
      +		// malformed section header
      +		[]byte(`[Unit
      +Description=Foo
      +`),
      +
      +		// garbage following section header
      +		[]byte(`[Unit] pants
      +Description=Foo
      +`),
      +
      +		// option without value
      +		[]byte(`[Unit]
      +Description
      +`),
      +
      +		// garbage inside of section
      +		[]byte(`[Unit]
      +<<<<<<
      +Description=Foo
      +`),
      +	}
      +
      +	for i, tt := range tests {
      +		output, err := Deserialize(bytes.NewReader(tt))
      +		if err == nil {
      +			t.Errorf("case %d: unexpected nil error", i)
      +			t.Log("Output:")
      +			logUnitOptionSlice(t, output)
      +		}
      +	}
      +}
      +
      +func logUnitOptionSlice(t *testing.T, opts []*UnitOption) {
      +	for idx, opt := range opts {
      +		t.Logf("%d: %v", idx, opt)
      +	}
      +}
      +
      +func TestDeserializeLineTooLong(t *testing.T) {
      +	tests := [][]byte{
      +		// section header that's far too long
      +		[]byte(`[Seeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeervice]
      +`),
      +		// sane-looking unit file with a line just greater than the maximum allowed (currently, 2048)
      +		[]byte(`[Service]
      +ExecStart=/bin/bash -c "echo ..................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................."
      +`),
      +		// sane-looking unit file with option value way too long
      +		[]byte(`
      +# test unit file
      +
      +[Service]
      +ExecStartPre=-/usr/bin/docker rm %p
      +ExecStartPre=-/usr/bin/docker pull busybox
      +ExecStart=/usr/bin/docker run --rm --name %p --net=host \
      +  -e "test=1123t" \
      +  -e "test=1123t" \
      +  -e "fiz=1123t" \
      +  -e "buz=1123t" \
      +  -e "FOO=BARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBABARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARRBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBAR"BARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBABARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARRBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBARBAR" \
      +  busybox sleep 10
      +ExecStop=-/usr/bin/docker kill %p
      +SyslogIdentifier=busybox
      +Restart=always
      +RestartSec=10s
      +`),
      +		// single arbitrary line that's way too long
      +		[]byte(`arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 character arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 character arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 character arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 character arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters arbitrary and extraordinarily long line that is far greater than 2048 characters`),
      +		// sane-looking unit file with option name way too long
      +		[]byte(`[Service]
      +ExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStartExecStart=/bin/true
      +`),
      +	}
      +
      +	for i, tt := range tests {
      +		output, err := Deserialize(bytes.NewReader(tt))
      +		if err != ErrLineTooLong {
      +			t.Errorf("case %d: unexpected err: %v", i, err)
      +			t.Log("Output:")
      +			logUnitOptionSlice(t, output)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/unit/end_to_end_test.go b/vendor/github.com/coreos/go-systemd/unit/end_to_end_test.go
      new file mode 100644
      index 00000000..7182327a
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/unit/end_to_end_test.go
      @@ -0,0 +1,88 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package unit
      +
      +import (
      +	"bytes"
      +	"io/ioutil"
      +	"testing"
      +)
      +
      +func TestDeserializeAndReserialize(t *testing.T) {
      +	tests := []struct {
      +		in   string
      +		wout string
      +	}{
      +		{
      +			`[Service]
      +ExecStart=/bin/bash -c "while true; do echo \"ping\"; sleep 1; done"
      +`,
      +			`[Service]
      +ExecStart=/bin/bash -c "while true; do echo \"ping\"; sleep 1; done"
      +`},
      +		{
      +			`[Unit]
      +Description= Unnecessarily wrapped \
      +    words here`,
      +			`[Unit]
      +Description=Unnecessarily wrapped \
      +    words here
      +`,
      +		},
      +		{
      +			`[Unit]
      +Description=Demo \
      +
      +Requires=docker.service
      +`,
      +			`[Unit]
      +Description=Demo \
      +
      +Requires=docker.service
      +`,
      +		},
      +		{
      +			`; comment alpha
      +# comment bravo
      +[Unit]
      +; comment charlie
      +# comment delta
      +#Description=Foo
      +Description=Bar
      +; comment echo
      +# comment foxtrot
      +`,
      +			`[Unit]
      +Description=Bar
      +`},
      +	}
      +	for i, tt := range tests {
      +		ds, err := Deserialize(bytes.NewBufferString(tt.in))
      +		if err != nil {
      +			t.Errorf("case %d: unexpected error parsing unit: %v", i, err)
      +			continue
      +		}
      +		out, err := ioutil.ReadAll(Serialize(ds))
      +		if err != nil {
      +			t.Errorf("case %d: unexpected error serializing unit: %v", i, err)
      +			continue
      +		}
      +		if g := string(out); g != tt.wout {
      +			t.Errorf("case %d: incorrect output", i)
      +			t.Logf("Expected:\n%#v", tt.wout)
      +			t.Logf("Actual:\n%#v", g)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/unit/escape.go b/vendor/github.com/coreos/go-systemd/unit/escape.go
      new file mode 100644
      index 00000000..63b11726
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/unit/escape.go
      @@ -0,0 +1,116 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// Implements systemd-escape [--unescape] [--path]
      +
      +package unit
      +
      +import (
      +	"fmt"
      +	"strconv"
      +	"strings"
      +)
      +
      +const (
      +	allowed = `:_.abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789`
      +)
      +
      +// If isPath is true:
      +//   We remove redundant '/'s, the leading '/', and trailing '/'.
      +//   If the result is empty, a '/' is inserted.
      +//
      +// We always:
      +//  Replace the following characters with `\x%x`:
      +//   Leading `.`
      +//   `-`, `\`, and anything not in this set: `:-_.\[0-9a-zA-Z]`
      +//  Replace '/' with '-'.
      +func escape(unescaped string, isPath bool) string {
      +	e := []byte{}
      +	inSlashes := false
      +	start := true
      +	for i := 0; i < len(unescaped); i++ {
      +		c := unescaped[i]
      +		if isPath {
      +			if c == '/' {
      +				inSlashes = true
      +				continue
      +			} else if inSlashes {
      +				inSlashes = false
      +				if !start {
      +					e = append(e, '-')
      +				}
      +			}
      +		}
      +
      +		if c == '/' {
      +			e = append(e, '-')
      +		} else if start && c == '.' || strings.IndexByte(allowed, c) == -1 {
      +			e = append(e, []byte(fmt.Sprintf(`\x%x`, c))...)
      +		} else {
      +			e = append(e, c)
      +		}
      +		start = false
      +	}
      +	if isPath && len(e) == 0 {
      +		e = append(e, '-')
      +	}
      +	return string(e)
      +}
      +
      +// If isPath is true:
      +//   We always return a string beginning with '/'.
      +//
      +// We always:
      +//  Replace '-' with '/'.
      +//  Replace `\x%x` with the value represented in hex.
      +func unescape(escaped string, isPath bool) string {
      +	u := []byte{}
      +	for i := 0; i < len(escaped); i++ {
      +		c := escaped[i]
      +		if c == '-' {
      +			c = '/'
      +		} else if c == '\\' && len(escaped)-i >= 4 && escaped[i+1] == 'x' {
      +			n, err := strconv.ParseInt(escaped[i+2:i+4], 16, 8)
      +			if err == nil {
      +				c = byte(n)
      +				i += 3
      +			}
      +		}
      +		u = append(u, c)
      +	}
      +	if isPath && (len(u) == 0 || u[0] != '/') {
      +		u = append([]byte("/"), u...)
      +	}
      +	return string(u)
      +}
      +
      +// UnitNameEscape escapes a string as `systemd-escape` would
      +func UnitNameEscape(unescaped string) string {
      +	return escape(unescaped, false)
      +}
      +
      +// UnitNameUnescape unescapes a string as `systemd-escape --unescape` would
      +func UnitNameUnescape(escaped string) string {
      +	return unescape(escaped, false)
      +}
      +
      +// UnitNamePathEscape escapes a string as `systemd-escape --path` would
      +func UnitNamePathEscape(unescaped string) string {
      +	return escape(unescaped, true)
      +}
      +
      +// UnitNamePathUnescape unescapes a string as `systemd-escape --path --unescape` would
      +func UnitNamePathUnescape(escaped string) string {
      +	return unescape(escaped, true)
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/unit/escape_test.go b/vendor/github.com/coreos/go-systemd/unit/escape_test.go
      new file mode 100644
      index 00000000..36b1a7d3
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/unit/escape_test.go
      @@ -0,0 +1,211 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package unit
      +
      +import (
      +	"testing"
      +)
      +
      +func TestUnitNameEscape(t *testing.T) {
      +	tests := []struct {
      +		in     string
      +		out    string
      +		isPath bool
      +	}{
      +		// turn empty string path into escaped /
      +		{
      +			in:     "",
      +			out:    "-",
      +			isPath: true,
      +		},
      +		// turn redundant ////s into single escaped /
      +		{
      +			in:     "/////////",
      +			out:    "-",
      +			isPath: true,
      +		},
      +		// remove all redundant ////s
      +		{
      +			in:     "///foo////bar/////tail//////",
      +			out:    "foo-bar-tail",
      +			isPath: true,
      +		},
      +		// leave empty string empty
      +		{
      +			in:     "",
      +			out:    "",
      +			isPath: false,
      +		},
      +		// escape leading dot
      +		{
      +			in:     ".",
      +			out:    `\x2e`,
      +			isPath: true,
      +		},
      +		// escape leading dot
      +		{
      +			in:     "/.",
      +			out:    `\x2e`,
      +			isPath: true,
      +		},
      +		// escape leading dot
      +		{
      +			in:     "/////////.",
      +			out:    `\x2e`,
      +			isPath: true,
      +		},
      +		// escape leading dot
      +		{
      +			in:     "/////////.///////////////",
      +			out:    `\x2e`,
      +			isPath: true,
      +		},
      +		// escape leading dot
      +		{
      +			in:     ".....",
      +			out:    `\x2e....`,
      +			isPath: true,
      +		},
      +		// escape leading dot
      +		{
      +			in:     "/.foo/.bar",
      +			out:    `\x2efoo-.bar`,
      +			isPath: true,
      +		},
      +		// escape leading dot
      +		{
      +			in:     ".foo/.bar",
      +			out:    `\x2efoo-.bar`,
      +			isPath: true,
      +		},
      +		// escape leading dot
      +		{
      +			in:     ".foo/.bar",
      +			out:    `\x2efoo-.bar`,
      +			isPath: false,
      +		},
      +		// escape disallowed
      +		{
      +			in:     `///..\-!#??///`,
      +			out:    `---..\x5c\x2d\x21\x23\x3f\x3f---`,
      +			isPath: false,
      +		},
      +		// escape disallowed
      +		{
      +			in:     `///..\-!#??///`,
      +			out:    `\x2e.\x5c\x2d\x21\x23\x3f\x3f`,
      +			isPath: true,
      +		},
      +		// escape real-world example
      +		{
      +			in:     `user-cloudinit@/var/lib/coreos/vagrant/vagrantfile-user-data.service`,
      +			out:    `user\x2dcloudinit\x40-var-lib-coreos-vagrant-vagrantfile\x2duser\x2ddata.service`,
      +			isPath: false,
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		var s string
      +		if tt.isPath {
      +			s = UnitNamePathEscape(tt.in)
      +		} else {
      +			s = UnitNameEscape(tt.in)
      +		}
      +		if s != tt.out {
      +			t.Errorf("case %d: failed escaping %v isPath: %v - expected %v, got %v", i, tt.in, tt.isPath, tt.out, s)
      +		}
      +	}
      +}
      +
      +func TestUnitNameUnescape(t *testing.T) {
      +	tests := []struct {
      +		in     string
      +		out    string
      +		isPath bool
      +	}{
      +		// turn empty string path into /
      +		{
      +			in:     "",
      +			out:    "/",
      +			isPath: true,
      +		},
      +		// leave empty string empty
      +		{
      +			in:     "",
      +			out:    "",
      +			isPath: false,
      +		},
      +		// turn ////s into
      +		{
      +			in:     "---------",
      +			out:    "/////////",
      +			isPath: true,
      +		},
      +		// unescape hex
      +		{
      +			in:     `---..\x5c\x2d\x21\x23\x3f\x3f---`,
      +			out:    `///..\-!#??///`,
      +			isPath: false,
      +		},
      +		// unescape hex
      +		{
      +			in:     `\x2e.\x5c\x2d\x21\x23\x3f\x3f`,
      +			out:    `/..\-!#??`,
      +			isPath: true,
      +		},
      +		// unescape hex, retain invalids
      +		{
      +			in:     `\x2e.\x5c\x2d\xaZ\x.o\x21\x23\x3f\x3f`,
      +			out:    `/..\-\xaZ\x.o!#??`,
      +			isPath: true,
      +		},
      +		// unescape hex, retain invalids, partial tail
      +		{
      +			in:     `\x2e.\x5c\x\x2d\xaZ\x.o\x21\x23\x3f\x3f\x3`,
      +			out:    `/..\\x-\xaZ\x.o!#??\x3`,
      +			isPath: true,
      +		},
      +		// unescape hex, retain invalids, partial tail
      +		{
      +			in:     `\x2e.\x5c\x\x2d\xaZ\x.o\x21\x23\x3f\x3f\x`,
      +			out:    `/..\\x-\xaZ\x.o!#??\x`,
      +			isPath: true,
      +		},
      +		// unescape hex, retain invalids, partial tail
      +		{
      +			in:     `\x2e.\x5c\x\x2d\xaZ\x.o\x21\x23\x3f\x3f\`,
      +			out:    `/..\\x-\xaZ\x.o!#??\`,
      +			isPath: true,
      +		},
      +		// unescape real-world example
      +		{
      +			in:     `user\x2dcloudinit\x40-var-lib-coreos-vagrant-vagrantfile\x2duser\x2ddata.service`,
      +			out:    `user-cloudinit@/var/lib/coreos/vagrant/vagrantfile-user-data.service`,
      +			isPath: false,
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		var s string
      +		if tt.isPath {
      +			s = UnitNamePathUnescape(tt.in)
      +		} else {
      +			s = UnitNameUnescape(tt.in)
      +		}
      +		if s != tt.out {
      +			t.Errorf("case %d: failed unescaping %v isPath: %v - expected %v, got %v", i, tt.in, tt.isPath, tt.out, s)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/unit/option.go b/vendor/github.com/coreos/go-systemd/unit/option.go
      new file mode 100644
      index 00000000..e5d21e19
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/unit/option.go
      @@ -0,0 +1,54 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package unit
      +
      +import (
      +	"fmt"
      +)
      +
      +type UnitOption struct {
      +	Section string
      +	Name    string
      +	Value   string
      +}
      +
      +func NewUnitOption(section, name, value string) *UnitOption {
      +	return &UnitOption{Section: section, Name: name, Value: value}
      +}
      +
      +func (uo *UnitOption) String() string {
      +	return fmt.Sprintf("{Section: %q, Name: %q, Value: %q}", uo.Section, uo.Name, uo.Value)
      +}
      +
      +func (uo *UnitOption) Match(other *UnitOption) bool {
      +	return uo.Section == other.Section &&
      +		uo.Name == other.Name &&
      +		uo.Value == other.Value
      +}
      +
      +func AllMatch(u1 []*UnitOption, u2 []*UnitOption) bool {
      +	length := len(u1)
      +	if length != len(u2) {
      +		return false
      +	}
      +
      +	for i := 0; i < length; i++ {
      +		if !u1[i].Match(u2[i]) {
      +			return false
      +		}
      +	}
      +
      +	return true
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/unit/option_test.go b/vendor/github.com/coreos/go-systemd/unit/option_test.go
      new file mode 100644
      index 00000000..0765f03a
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/unit/option_test.go
      @@ -0,0 +1,214 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package unit
      +
      +import (
      +	"testing"
      +)
      +
      +func TestAllMatch(t *testing.T) {
      +	tests := []struct {
      +		u1    []*UnitOption
      +		u2    []*UnitOption
      +		match bool
      +	}{
      +		// empty lists match
      +		{
      +			u1:    []*UnitOption{},
      +			u2:    []*UnitOption{},
      +			match: true,
      +		},
      +
      +		// simple match of a single option
      +		{
      +			u1: []*UnitOption{
      +				{Section: "Unit", Name: "Description", Value: "FOO"},
      +			},
      +			u2: []*UnitOption{
      +				{Section: "Unit", Name: "Description", Value: "FOO"},
      +			},
      +			match: true,
      +		},
      +
      +		// single option mismatched
      +		{
      +			u1: []*UnitOption{
      +				{Section: "Unit", Name: "Description", Value: "FOO"},
      +			},
      +			u2: []*UnitOption{
      +				{Section: "Unit", Name: "Description", Value: "BAR"},
      +			},
      +			match: false,
      +		},
      +
      +		// multiple options match
      +		{
      +			u1: []*UnitOption{
      +				{Section: "Unit", Name: "Description", Value: "FOO"},
      +				{Section: "Unit", Name: "BindsTo", Value: "bar.service"},
      +				{Section: "Service", Name: "ExecStart", Value: "/bin/true"},
      +			},
      +			u2: []*UnitOption{
      +				{Section: "Unit", Name: "Description", Value: "FOO"},
      +				{Section: "Unit", Name: "BindsTo", Value: "bar.service"},
      +				{Section: "Service", Name: "ExecStart", Value: "/bin/true"},
      +			},
      +			match: true,
      +		},
      +
      +		// mismatch length
      +		{
      +			u1: []*UnitOption{
      +				{Section: "Unit", Name: "Description", Value: "FOO"},
      +				{Section: "Unit", Name: "BindsTo", Value: "bar.service"},
      +			},
      +			u2: []*UnitOption{
      +				{Section: "Unit", Name: "Description", Value: "FOO"},
      +				{Section: "Unit", Name: "BindsTo", Value: "bar.service"},
      +				{Section: "Service", Name: "ExecStart", Value: "/bin/true"},
      +			},
      +			match: false,
      +		},
      +
      +		// multiple options misordered
      +		{
      +			u1: []*UnitOption{
      +				{Section: "Unit", Name: "Description", Value: "FOO"},
      +				{Section: "Service", Name: "ExecStart", Value: "/bin/true"},
      +			},
      +			u2: []*UnitOption{
      +				{Section: "Service", Name: "ExecStart", Value: "/bin/true"},
      +				{Section: "Unit", Name: "Description", Value: "FOO"},
      +			},
      +			match: false,
      +		},
      +
      +		// interleaved sections mismatch
      +		{
      +			u1: []*UnitOption{
      +				{Section: "Unit", Name: "Description", Value: "FOO"},
      +				{Section: "Unit", Name: "BindsTo", Value: "bar.service"},
      +				{Section: "Service", Name: "ExecStart", Value: "/bin/true"},
      +				{Section: "Service", Name: "ExecStop", Value: "/bin/true"},
      +			},
      +			u2: []*UnitOption{
      +				{Section: "Unit", Name: "Description", Value: "FOO"},
      +				{Section: "Service", Name: "ExecStart", Value: "/bin/true"},
      +				{Section: "Unit", Name: "BindsTo", Value: "bar.service"},
      +				{Section: "Service", Name: "ExecStop", Value: "/bin/true"},
      +			},
      +			match: false,
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		match := AllMatch(tt.u1, tt.u2)
      +		if match != tt.match {
      +			t.Errorf("case %d: failed comparing u1 to u2 - expected match=%t, got %t", i, tt.match, match)
      +		}
      +
      +		match = AllMatch(tt.u2, tt.u1)
      +		if match != tt.match {
      +			t.Errorf("case %d: failed comparing u2 to u1 - expected match=%t, got %t", i, tt.match, match)
      +		}
      +	}
      +}
      +
      +func TestMatch(t *testing.T) {
      +	tests := []struct {
      +		o1    *UnitOption
      +		o2    *UnitOption
      +		match bool
      +	}{
      +		// empty options match
      +		{
      +			o1:    &UnitOption{},
      +			o2:    &UnitOption{},
      +			match: true,
      +		},
      +
      +		// all fields match
      +		{
      +			o1: &UnitOption{
      +				Section: "Unit",
      +				Name:    "Description",
      +				Value:   "FOO",
      +			},
      +			o2: &UnitOption{
      +				Section: "Unit",
      +				Name:    "Description",
      +				Value:   "FOO",
      +			},
      +			match: true,
      +		},
      +
      +		// Section mismatch
      +		{
      +			o1: &UnitOption{
      +				Section: "Unit",
      +				Name:    "Description",
      +				Value:   "FOO",
      +			},
      +			o2: &UnitOption{
      +				Section: "X-Other",
      +				Name:    "Description",
      +				Value:   "FOO",
      +			},
      +			match: false,
      +		},
      +
      +		// Name mismatch
      +		{
      +			o1: &UnitOption{
      +				Section: "Unit",
      +				Name:    "Description",
      +				Value:   "FOO",
      +			},
      +			o2: &UnitOption{
      +				Section: "Unit",
      +				Name:    "BindsTo",
      +				Value:   "FOO",
      +			},
      +			match: false,
      +		},
      +
      +		// Value mismatch
      +		{
      +			o1: &UnitOption{
      +				Section: "Unit",
      +				Name:    "Description",
      +				Value:   "FOO",
      +			},
      +			o2: &UnitOption{
      +				Section: "Unit",
      +				Name:    "Description",
      +				Value:   "BAR",
      +			},
      +			match: false,
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		match := tt.o1.Match(tt.o2)
      +		if match != tt.match {
      +			t.Errorf("case %d: failed comparing o1 to o2 - expected match=%t, got %t", i, tt.match, match)
      +		}
      +
      +		match = tt.o2.Match(tt.o1)
      +		if match != tt.match {
      +			t.Errorf("case %d: failed comparing o2 to o1 - expected match=%t, got %t", i, tt.match, match)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/unit/serialize.go b/vendor/github.com/coreos/go-systemd/unit/serialize.go
      new file mode 100644
      index 00000000..e07799ca
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/unit/serialize.go
      @@ -0,0 +1,75 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package unit
      +
      +import (
      +	"bytes"
      +	"io"
      +)
      +
      +// Serialize encodes all of the given UnitOption objects into a
      +// unit file. When serialized the options are sorted in their
      +// supplied order but grouped by section.
      +func Serialize(opts []*UnitOption) io.Reader {
      +	var buf bytes.Buffer
      +
      +	if len(opts) == 0 {
      +		return &buf
      +	}
      +
      +	// Index of sections -> ordered options
      +	idx := map[string][]*UnitOption{}
      +	// Separately preserve order in which sections were seen
      +	sections := []string{}
      +	for _, opt := range opts {
      +		sec := opt.Section
      +		if _, ok := idx[sec]; !ok {
      +			sections = append(sections, sec)
      +		}
      +		idx[sec] = append(idx[sec], opt)
      +	}
      +
      +	for i, sect := range sections {
      +		writeSectionHeader(&buf, sect)
      +		writeNewline(&buf)
      +
      +		opts := idx[sect]
      +		for _, opt := range opts {
      +			writeOption(&buf, opt)
      +			writeNewline(&buf)
      +		}
      +		if i < len(sections)-1 {
      +			writeNewline(&buf)
      +		}
      +	}
      +
      +	return &buf
      +}
      +
      +func writeNewline(buf *bytes.Buffer) {
      +	buf.WriteRune('\n')
      +}
      +
      +func writeSectionHeader(buf *bytes.Buffer, section string) {
      +	buf.WriteRune('[')
      +	buf.WriteString(section)
      +	buf.WriteRune(']')
      +}
      +
      +func writeOption(buf *bytes.Buffer, opt *UnitOption) {
      +	buf.WriteString(opt.Name)
      +	buf.WriteRune('=')
      +	buf.WriteString(opt.Value)
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/unit/serialize_test.go b/vendor/github.com/coreos/go-systemd/unit/serialize_test.go
      new file mode 100644
      index 00000000..bd492b0d
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/unit/serialize_test.go
      @@ -0,0 +1,170 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package unit
      +
      +import (
      +	"io/ioutil"
      +	"testing"
      +)
      +
      +func TestSerialize(t *testing.T) {
      +	tests := []struct {
      +		input  []*UnitOption
      +		output string
      +	}{
      +		// no options results in empty file
      +		{
      +			[]*UnitOption{},
      +			``,
      +		},
      +
      +		// options with same section share the header
      +		{
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", "Foo"},
      +				&UnitOption{"Unit", "BindsTo", "bar.service"},
      +			},
      +			`[Unit]
      +Description=Foo
      +BindsTo=bar.service
      +`,
      +		},
      +
      +		// options with same name are not combined
      +		{
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", "Foo"},
      +				&UnitOption{"Unit", "Description", "Bar"},
      +			},
      +			`[Unit]
      +Description=Foo
      +Description=Bar
      +`,
      +		},
      +
      +		// multiple options printed under different section headers
      +		{
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", "Foo"},
      +				&UnitOption{"Service", "ExecStart", "/usr/bin/sleep infinity"},
      +			},
      +			`[Unit]
      +Description=Foo
      +
      +[Service]
      +ExecStart=/usr/bin/sleep infinity
      +`,
      +		},
      +
      +		// options are grouped into sections
      +		{
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", "Foo"},
      +				&UnitOption{"Service", "ExecStart", "/usr/bin/sleep infinity"},
      +				&UnitOption{"Unit", "BindsTo", "bar.service"},
      +			},
      +			`[Unit]
      +Description=Foo
      +BindsTo=bar.service
      +
      +[Service]
      +ExecStart=/usr/bin/sleep infinity
      +`,
      +		},
      +
      +		// options are ordered within groups, and sections are ordered in the order in which they were first seen
      +		{
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", "Foo"},
      +				&UnitOption{"Service", "ExecStart", "/usr/bin/sleep infinity"},
      +				&UnitOption{"Unit", "BindsTo", "bar.service"},
      +				&UnitOption{"X-Foo", "Bar", "baz"},
      +				&UnitOption{"Service", "ExecStop", "/usr/bin/sleep 1"},
      +				&UnitOption{"Unit", "Documentation", "https://foo.com"},
      +			},
      +			`[Unit]
      +Description=Foo
      +BindsTo=bar.service
      +Documentation=https://foo.com
      +
      +[Service]
      +ExecStart=/usr/bin/sleep infinity
      +ExecStop=/usr/bin/sleep 1
      +
      +[X-Foo]
      +Bar=baz
      +`,
      +		},
      +
      +		// utf8 characters are not a problem
      +		{
      +			[]*UnitOption{
      +				&UnitOption{"©", "µ☃", "ÇôrèÕ$"},
      +			},
      +			`[©]
      +µ☃=ÇôrèÕ$
      +`,
      +		},
      +
      +		// no verification is done on section names
      +		{
      +			[]*UnitOption{
      +				&UnitOption{"Un\nit", "Description", "Foo"},
      +			},
      +			`[Un
      +it]
      +Description=Foo
      +`,
      +		},
      +
      +		// no verification is done on option names
      +		{
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Desc\nription", "Foo"},
      +			},
      +			`[Unit]
      +Desc
      +ription=Foo
      +`,
      +		},
      +
      +		// no verification is done on option values
      +		{
      +			[]*UnitOption{
      +				&UnitOption{"Unit", "Description", "Fo\no"},
      +			},
      +			`[Unit]
      +Description=Fo
      +o
      +`,
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		outReader := Serialize(tt.input)
      +		outBytes, err := ioutil.ReadAll(outReader)
      +		if err != nil {
      +			t.Errorf("case %d: encountered error while reading output: %v", i, err)
      +			continue
      +		}
      +
      +		output := string(outBytes)
      +		if tt.output != output {
      +			t.Errorf("case %d: incorrect output", i)
      +			t.Logf("Expected:\n%s", tt.output)
      +			t.Logf("Actual:\n%s", output)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/go-systemd/util/util.go b/vendor/github.com/coreos/go-systemd/util/util.go
      new file mode 100644
      index 00000000..f9f0b2a3
      --- /dev/null
      +++ b/vendor/github.com/coreos/go-systemd/util/util.go
      @@ -0,0 +1,270 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// Package util contains utility functions related to systemd that applications
      +// can use to check things like whether systemd is running.  Note that some of
      +// these functions attempt to manually load systemd libraries at runtime rather
      +// than linking against them.
      +package util
      +
      +// #cgo LDFLAGS: -ldl
      +// #include <stdlib.h>
      +// #include <dlfcn.h>
      +// #include <sys/types.h>
      +// #include <unistd.h>
      +//
      +// int
      +// my_sd_pid_get_owner_uid(void *f, pid_t pid, uid_t *uid)
      +// {
      +//   int (*sd_pid_get_owner_uid)(pid_t, uid_t *);
      +//
      +//   sd_pid_get_owner_uid = (int (*)(pid_t, uid_t *))f;
      +//   return sd_pid_get_owner_uid(pid, uid);
      +// }
      +//
      +// int
      +// my_sd_pid_get_unit(void *f, pid_t pid, char **unit)
      +// {
      +//   int (*sd_pid_get_unit)(pid_t, char **);
      +//
      +//   sd_pid_get_unit = (int (*)(pid_t, char **))f;
      +//   return sd_pid_get_unit(pid, unit);
      +// }
      +//
      +// int
      +// my_sd_pid_get_slice(void *f, pid_t pid, char **slice)
      +// {
      +//   int (*sd_pid_get_slice)(pid_t, char **);
      +//
      +//   sd_pid_get_slice = (int (*)(pid_t, char **))f;
      +//   return sd_pid_get_slice(pid, slice);
      +// }
      +//
      +// int
      +// am_session_leader()
      +// {
      +//   return (getsid(0) == getpid());
      +// }
      +import "C"
      +import (
      +	"errors"
      +	"fmt"
      +	"io/ioutil"
      +	"os"
      +	"strings"
      +	"syscall"
      +	"unsafe"
      +)
      +
      +var ErrSoNotFound = errors.New("unable to open a handle to libsystemd")
      +
      +// libHandle represents an open handle to the systemd C library
      +type libHandle struct {
      +	handle  unsafe.Pointer
      +	libname string
      +}
      +
      +func (h *libHandle) Close() error {
      +	if r := C.dlclose(h.handle); r != 0 {
      +		return fmt.Errorf("error closing %v: %d", h.libname, r)
      +	}
      +	return nil
      +}
      +
      +// getHandle tries to get a handle to a systemd library (.so), attempting to
      +// access it by several different names and returning the first that is
      +// successfully opened. Callers are responsible for closing the handler.
      +// If no library can be successfully opened, an error is returned.
      +func getHandle() (*libHandle, error) {
      +	for _, name := range []string{
      +		// systemd < 209
      +		"libsystemd-login.so",
      +		"libsystemd-login.so.0",
      +
      +		// systemd >= 209 merged libsystemd-login into libsystemd proper
      +		"libsystemd.so",
      +		"libsystemd.so.0",
      +	} {
      +		libname := C.CString(name)
      +		defer C.free(unsafe.Pointer(libname))
      +		handle := C.dlopen(libname, C.RTLD_LAZY)
      +		if handle != nil {
      +			h := &libHandle{
      +				handle:  handle,
      +				libname: name,
      +			}
      +			return h, nil
      +		}
      +	}
      +	return nil, ErrSoNotFound
      +}
      +
      +// GetRunningSlice attempts to retrieve the name of the systemd slice in which
      +// the current process is running.
      +// This function is a wrapper around the libsystemd C library; if it cannot be
      +// opened, an error is returned.
      +func GetRunningSlice() (slice string, err error) {
      +	var h *libHandle
      +	h, err = getHandle()
      +	if err != nil {
      +		return
      +	}
      +	defer func() {
      +		if err1 := h.Close(); err1 != nil {
      +			err = err1
      +		}
      +	}()
      +
      +	sym := C.CString("sd_pid_get_slice")
      +	defer C.free(unsafe.Pointer(sym))
      +	sd_pid_get_slice := C.dlsym(h.handle, sym)
      +	if sd_pid_get_slice == nil {
      +		err = fmt.Errorf("error resolving sd_pid_get_slice function")
      +		return
      +	}
      +
      +	var s string
      +	sl := C.CString(s)
      +	defer C.free(unsafe.Pointer(sl))
      +
      +	ret := C.my_sd_pid_get_slice(sd_pid_get_slice, 0, &sl)
      +	if ret < 0 {
      +		err = fmt.Errorf("error calling sd_pid_get_slice: %v", syscall.Errno(-ret))
      +		return
      +	}
      +
      +	return C.GoString(sl), nil
      +}
      +
      +// RunningFromSystemService tries to detect whether the current process has
      +// been invoked from a system service. The condition for this is whether the
      +// process is _not_ a user process. User processes are those running in session
      +// scopes or under per-user `systemd --user` instances.
      +//
      +// To avoid false positives on systems without `pam_systemd` (which is
      +// responsible for creating user sessions), this function also uses a heuristic
      +// to detect whether it's being invoked from a session leader process. This is
      +// the case if the current process is executed directly from a service file
      +// (e.g. with `ExecStart=/this/cmd`). Note that this heuristic will fail if the
      +// command is instead launched in a subshell or similar so that it is not
      +// session leader (e.g. `ExecStart=/bin/bash -c "/this/cmd"`)
      +//
      +// This function is a wrapper around the libsystemd C library; if this is
      +// unable to successfully open a handle to the library for any reason (e.g. it
      +// cannot be found), an errr will be returned
      +func RunningFromSystemService() (ret bool, err error) {
      +	var h *libHandle
      +	h, err = getHandle()
      +	if err != nil {
      +		return
      +	}
      +	defer func() {
      +		if err1 := h.Close(); err1 != nil {
      +			err = err1
      +		}
      +	}()
      +
      +	sym := C.CString("sd_pid_get_owner_uid")
      +	defer C.free(unsafe.Pointer(sym))
      +	sd_pid_get_owner_uid := C.dlsym(h.handle, sym)
      +	if sd_pid_get_owner_uid == nil {
      +		err = fmt.Errorf("error resolving sd_pid_get_owner_uid function")
      +		return
      +	}
      +
      +	var uid C.uid_t
      +	errno := C.my_sd_pid_get_owner_uid(sd_pid_get_owner_uid, 0, &uid)
      +	serrno := syscall.Errno(-errno)
      +	// when we're running from a unit file, sd_pid_get_owner_uid returns
      +	// ENOENT (systemd <220) or ENXIO (systemd >=220)
      +	switch {
      +	case errno >= 0:
      +		ret = false
      +	case serrno == syscall.ENOENT, serrno == syscall.ENXIO:
      +		// Since the implementation of sessions in systemd relies on
      +		// the `pam_systemd` module, using the sd_pid_get_owner_uid
      +		// heuristic alone can result in false positives if that module
      +		// (or PAM itself) is not present or properly configured on the
      +		// system. As such, we also check if we're the session leader,
      +		// which should be the case if we're invoked from a unit file,
      +		// but not if e.g. we're invoked from the command line from a
      +		// user's login session
      +		ret = C.am_session_leader() == 1
      +	default:
      +		err = fmt.Errorf("error calling sd_pid_get_owner_uid: %v", syscall.Errno(-errno))
      +	}
      +	return
      +}
      +
      +// CurrentUnitName attempts to retrieve the name of the systemd system unit
      +// from which the calling process has been invoked. It wraps the systemd
      +// `sd_pid_get_unit` call, with the same caveat: for processes not part of a
      +// systemd system unit, this function will return an error.
      +func CurrentUnitName() (unit string, err error) {
      +	var h *libHandle
      +	h, err = getHandle()
      +	if err != nil {
      +		return
      +	}
      +	defer func() {
      +		if err1 := h.Close(); err1 != nil {
      +			err = err1
      +		}
      +	}()
      +
      +	sym := C.CString("sd_pid_get_unit")
      +	defer C.free(unsafe.Pointer(sym))
      +	sd_pid_get_unit := C.dlsym(h.handle, sym)
      +	if sd_pid_get_unit == nil {
      +		err = fmt.Errorf("error resolving sd_pid_get_unit function")
      +		return
      +	}
      +
      +	var s string
      +	u := C.CString(s)
      +	defer C.free(unsafe.Pointer(u))
      +
      +	ret := C.my_sd_pid_get_unit(sd_pid_get_unit, 0, &u)
      +	if ret < 0 {
      +		err = fmt.Errorf("error calling sd_pid_get_unit: %v", syscall.Errno(-ret))
      +		return
      +	}
      +
      +	unit = C.GoString(u)
      +	return
      +}
      +
      +// IsRunningSystemd checks whether the host was booted with systemd as its init
      +// system. This functions similarly to systemd's `sd_booted(3)`: internally, it
      +// checks whether /run/systemd/system/ exists and is a directory.
      +// http://www.freedesktop.org/software/systemd/man/sd_booted.html
      +func IsRunningSystemd() bool {
      +	fi, err := os.Lstat("/run/systemd/system")
      +	if err != nil {
      +		return false
      +	}
      +	return fi.IsDir()
      +}
      +
      +// GetMachineID returns a host's 128-bit machine ID as a string. This functions
      +// similarly to systemd's `sd_id128_get_machine`: internally, it simply reads
      +// the contents of /etc/machine-id
      +// http://www.freedesktop.org/software/systemd/man/sd_id128_get_machine.html
      +func GetMachineID() (string, error) {
      +	machineID, err := ioutil.ReadFile("/etc/machine-id")
      +	if err != nil {
      +		return "", fmt.Errorf("failed to read /etc/machine-id: %v", err)
      +	}
      +	return strings.TrimSpace(string(machineID)), nil
      +}
      diff --git a/vendor/github.com/coreos/ignition/.gitignore b/vendor/github.com/coreos/ignition/.gitignore
      new file mode 100644
      index 00000000..f2aba92f
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/.gitignore
      @@ -0,0 +1,3 @@
      +gopath/
      +bin/
      +*.swp
      diff --git a/vendor/github.com/coreos/ignition/.travis.yml b/vendor/github.com/coreos/ignition/.travis.yml
      new file mode 100644
      index 00000000..9c11341c
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/.travis.yml
      @@ -0,0 +1,12 @@
      +language: go
      +matrix:
      +  include:
      +    - go: 1.5
      +      env: GO15VENDOREXPERIMENT=1
      +    - go: 1.6
      +
      +install:
      +  -
      +
      +script:
      +  - ./test
      diff --git a/vendor/github.com/coreos/ignition/CONTRIBUTING.md b/vendor/github.com/coreos/ignition/CONTRIBUTING.md
      new file mode 100644
      index 00000000..cf66661e
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/CONTRIBUTING.md
      @@ -0,0 +1,59 @@
      +# How to Contribute
      +
      +CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via GitHub pull requests.  This document outlines some of the conventions on development workflow, commit message formatting, contact points and other resources to make it easier to get your contribution accepted.
      +
      +# Certificate of Origin
      +
      +By contributing to this project you agree to the Developer Certificate of Origin (DCO). This document was created by the Linux Kernel community and is a simple statement that you, as a contributor, have the legal right to make the contribution. See the [DCO](DCO) file for details.
      +
      +# Email and Chat
      +
      +The project currently uses the general CoreOS email list and IRC channel:
      +- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev)
      +- IRC: #[coreos-dev](irc://irc.freenode.org:6667/#coreos-dev) IRC channel on freenode.org
      +
      +Please avoid emailing maintainers found in the MAINTAINERS file directly. They are very busy and read the mailing lists.
      +
      +## Getting Started
      +
      +- Fork the repository on GitHub
      +- Read the [README](README.md) for build and test instructions
      +- Play with the project, submit bugs, submit patches!
      +
      +## Contribution Flow
      +
      +This is a rough outline of what a contributor's workflow looks like:
      +
      +- Create a topic branch from where you want to base your work (usually master).
      +- Make commits of logical units.
      +- Make sure your commit messages are in the proper format (see below).
      +- Push your changes to a topic branch in your fork of the repository.
      +- Make sure the tests pass, and add any new tests as appropriate.
      +- Submit a pull request to the original repository.
      +
      +Thanks for your contributions!
      +
      +### Format of the Commit Message
      +
      +We follow a rough convention for commit messages that is designed to answer two questions: what changed and why. The subject line should feature the what and the body of the commit should describe the why.
      +
      +```
      +scripts: add the test-cluster command
      +
      +this uses tmux to setup a test cluster that you can easily kill and
      +start for debugging.
      +
      +Fixes #38
      +```
      +
      +The format can be described more formally as follows:
      +
      +```
      +<subsystem>: <what changed>
      +<BLANK LINE>
      +<why this change was made>
      +<BLANK LINE>
      +<footer>
      +```
      +
      +The first line is the subject and should be no longer than 70 characters, the second line is always blank, and other lines should be wrapped at 80 characters. This allows the message to be easier to read on GitHub as well as in various git tools.
      diff --git a/vendor/github.com/coreos/ignition/DCO b/vendor/github.com/coreos/ignition/DCO
      new file mode 100644
      index 00000000..716561d5
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/DCO
      @@ -0,0 +1,36 @@
      +Developer Certificate of Origin
      +Version 1.1
      +
      +Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
      +660 York Street, Suite 102,
      +San Francisco, CA 94110 USA
      +
      +Everyone is permitted to copy and distribute verbatim copies of this
      +license document, but changing it is not allowed.
      +
      +
      +Developer's Certificate of Origin 1.1
      +
      +By making a contribution to this project, I certify that:
      +
      +(a) The contribution was created in whole or in part by me and I
      +    have the right to submit it under the open source license
      +    indicated in the file; or
      +
      +(b) The contribution is based upon previous work that, to the best
      +    of my knowledge, is covered under an appropriate open source
      +    license and I have the right under that license to submit that
      +    work with modifications, whether created in whole or in part
      +    by me, under the same open source license (unless I am
      +    permitted to submit under a different license), as indicated
      +    in the file; or
      +
      +(c) The contribution was provided directly to me by some other
      +    person who certified (a), (b) or (c) and I have not modified
      +    it.
      +
      +(d) I understand and agree that this project and the contribution
      +    are public and that a record of the contribution (including all
      +    personal information I submit with it, including my sign-off) is
      +    maintained indefinitely and may be redistributed consistent with
      +    this project or the open source license(s) involved.
      diff --git a/vendor/github.com/coreos/ignition/LICENSE b/vendor/github.com/coreos/ignition/LICENSE
      new file mode 100644
      index 00000000..e06d2081
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/LICENSE
      @@ -0,0 +1,202 @@
      +Apache License
      +                           Version 2.0, January 2004
      +                        http://www.apache.org/licenses/
      +
      +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
      +
      +   1. Definitions.
      +
      +      "License" shall mean the terms and conditions for use, reproduction,
      +      and distribution as defined by Sections 1 through 9 of this document.
      +
      +      "Licensor" shall mean the copyright owner or entity authorized by
      +      the copyright owner that is granting the License.
      +
      +      "Legal Entity" shall mean the union of the acting entity and all
      +      other entities that control, are controlled by, or are under common
      +      control with that entity. For the purposes of this definition,
      +      "control" means (i) the power, direct or indirect, to cause the
      +      direction or management of such entity, whether by contract or
      +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      +      outstanding shares, or (iii) beneficial ownership of such entity.
      +
      +      "You" (or "Your") shall mean an individual or Legal Entity
      +      exercising permissions granted by this License.
      +
      +      "Source" form shall mean the preferred form for making modifications,
      +      including but not limited to software source code, documentation
      +      source, and configuration files.
      +
      +      "Object" form shall mean any form resulting from mechanical
      +      transformation or translation of a Source form, including but
      +      not limited to compiled object code, generated documentation,
      +      and conversions to other media types.
      +
      +      "Work" shall mean the work of authorship, whether in Source or
      +      Object form, made available under the License, as indicated by a
      +      copyright notice that is included in or attached to the work
      +      (an example is provided in the Appendix below).
      +
      +      "Derivative Works" shall mean any work, whether in Source or Object
      +      form, that is based on (or derived from) the Work and for which the
      +      editorial revisions, annotations, elaborations, or other modifications
      +      represent, as a whole, an original work of authorship. For the purposes
      +      of this License, Derivative Works shall not include works that remain
      +      separable from, or merely link (or bind by name) to the interfaces of,
      +      the Work and Derivative Works thereof.
      +
      +      "Contribution" shall mean any work of authorship, including
      +      the original version of the Work and any modifications or additions
      +      to that Work or Derivative Works thereof, that is intentionally
      +      submitted to Licensor for inclusion in the Work by the copyright owner
      +      or by an individual or Legal Entity authorized to submit on behalf of
      +      the copyright owner. For the purposes of this definition, "submitted"
      +      means any form of electronic, verbal, or written communication sent
      +      to the Licensor or its representatives, including but not limited to
      +      communication on electronic mailing lists, source code control systems,
      +      and issue tracking systems that are managed by, or on behalf of, the
      +      Licensor for the purpose of discussing and improving the Work, but
      +      excluding communication that is conspicuously marked or otherwise
      +      designated in writing by the copyright owner as "Not a Contribution."
      +
      +      "Contributor" shall mean Licensor and any individual or Legal Entity
      +      on behalf of whom a Contribution has been received by Licensor and
      +      subsequently incorporated within the Work.
      +
      +   2. Grant of Copyright License. Subject to the terms and conditions of
      +      this License, each Contributor hereby grants to You a perpetual,
      +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      +      copyright license to reproduce, prepare Derivative Works of,
      +      publicly display, publicly perform, sublicense, and distribute the
      +      Work and such Derivative Works in Source or Object form.
      +
      +   3. Grant of Patent License. Subject to the terms and conditions of
      +      this License, each Contributor hereby grants to You a perpetual,
      +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      +      (except as stated in this section) patent license to make, have made,
      +      use, offer to sell, sell, import, and otherwise transfer the Work,
      +      where such license applies only to those patent claims licensable
      +      by such Contributor that are necessarily infringed by their
      +      Contribution(s) alone or by combination of their Contribution(s)
      +      with the Work to which such Contribution(s) was submitted. If You
      +      institute patent litigation against any entity (including a
      +      cross-claim or counterclaim in a lawsuit) alleging that the Work
      +      or a Contribution incorporated within the Work constitutes direct
      +      or contributory patent infringement, then any patent licenses
      +      granted to You under this License for that Work shall terminate
      +      as of the date such litigation is filed.
      +
      +   4. Redistribution. You may reproduce and distribute copies of the
      +      Work or Derivative Works thereof in any medium, with or without
      +      modifications, and in Source or Object form, provided that You
      +      meet the following conditions:
      +
      +      (a) You must give any other recipients of the Work or
      +          Derivative Works a copy of this License; and
      +
      +      (b) You must cause any modified files to carry prominent notices
      +          stating that You changed the files; and
      +
      +      (c) You must retain, in the Source form of any Derivative Works
      +          that You distribute, all copyright, patent, trademark, and
      +          attribution notices from the Source form of the Work,
      +          excluding those notices that do not pertain to any part of
      +          the Derivative Works; and
      +
      +      (d) If the Work includes a "NOTICE" text file as part of its
      +          distribution, then any Derivative Works that You distribute must
      +          include a readable copy of the attribution notices contained
      +          within such NOTICE file, excluding those notices that do not
      +          pertain to any part of the Derivative Works, in at least one
      +          of the following places: within a NOTICE text file distributed
      +          as part of the Derivative Works; within the Source form or
      +          documentation, if provided along with the Derivative Works; or,
      +          within a display generated by the Derivative Works, if and
      +          wherever such third-party notices normally appear. The contents
      +          of the NOTICE file are for informational purposes only and
      +          do not modify the License. You may add Your own attribution
      +          notices within Derivative Works that You distribute, alongside
      +          or as an addendum to the NOTICE text from the Work, provided
      +          that such additional attribution notices cannot be construed
      +          as modifying the License.
      +
      +      You may add Your own copyright statement to Your modifications and
      +      may provide additional or different license terms and conditions
      +      for use, reproduction, or distribution of Your modifications, or
      +      for any such Derivative Works as a whole, provided Your use,
      +      reproduction, and distribution of the Work otherwise complies with
      +      the conditions stated in this License.
      +
      +   5. Submission of Contributions. Unless You explicitly state otherwise,
      +      any Contribution intentionally submitted for inclusion in the Work
      +      by You to the Licensor shall be under the terms and conditions of
      +      this License, without any additional terms or conditions.
      +      Notwithstanding the above, nothing herein shall supersede or modify
      +      the terms of any separate license agreement you may have executed
      +      with Licensor regarding such Contributions.
      +
      +   6. Trademarks. This License does not grant permission to use the trade
      +      names, trademarks, service marks, or product names of the Licensor,
      +      except as required for reasonable and customary use in describing the
      +      origin of the Work and reproducing the content of the NOTICE file.
      +
      +   7. Disclaimer of Warranty. Unless required by applicable law or
      +      agreed to in writing, Licensor provides the Work (and each
      +      Contributor provides its Contributions) on an "AS IS" BASIS,
      +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      +      implied, including, without limitation, any warranties or conditions
      +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      +      PARTICULAR PURPOSE. You are solely responsible for determining the
      +      appropriateness of using or redistributing the Work and assume any
      +      risks associated with Your exercise of permissions under this License.
      +
      +   8. Limitation of Liability. In no event and under no legal theory,
      +      whether in tort (including negligence), contract, or otherwise,
      +      unless required by applicable law (such as deliberate and grossly
      +      negligent acts) or agreed to in writing, shall any Contributor be
      +      liable to You for damages, including any direct, indirect, special,
      +      incidental, or consequential damages of any character arising as a
      +      result of this License or out of the use or inability to use the
      +      Work (including but not limited to damages for loss of goodwill,
      +      work stoppage, computer failure or malfunction, or any and all
      +      other commercial damages or losses), even if such Contributor
      +      has been advised of the possibility of such damages.
      +
      +   9. Accepting Warranty or Additional Liability. While redistributing
      +      the Work or Derivative Works thereof, You may choose to offer,
      +      and charge a fee for, acceptance of support, warranty, indemnity,
      +      or other liability obligations and/or rights consistent with this
      +      License. However, in accepting such obligations, You may act only
      +      on Your own behalf and on Your sole responsibility, not on behalf
      +      of any other Contributor, and only if You agree to indemnify,
      +      defend, and hold each Contributor harmless for any liability
      +      incurred by, or claims asserted against, such Contributor by reason
      +      of your accepting any such warranty or additional liability.
      +
      +   END OF TERMS AND CONDITIONS
      +
      +   APPENDIX: How to apply the Apache License to your work.
      +
      +      To apply the Apache License to your work, attach the following
      +      boilerplate notice, with the fields enclosed by brackets "{}"
      +      replaced with your own identifying information. (Don't include
      +      the brackets!)  The text should be enclosed in the appropriate
      +      comment syntax for the file format. We also recommend that a
      +      file or class name and description of purpose be included on the
      +      same "printed page" as the copyright notice for easier
      +      identification within third-party archives.
      +
      +   Copyright {yyyy} {name of copyright owner}
      +
      +   Licensed under the Apache License, Version 2.0 (the "License");
      +   you may not use this file except in compliance with the License.
      +   You may obtain a copy of the License at
      +
      +       http://www.apache.org/licenses/LICENSE-2.0
      +
      +   Unless required by applicable law or agreed to in writing, software
      +   distributed under the License is distributed on an "AS IS" BASIS,
      +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +   See the License for the specific language governing permissions and
      +   limitations under the License.
      +
      diff --git a/vendor/github.com/coreos/ignition/MAINTAINERS b/vendor/github.com/coreos/ignition/MAINTAINERS
      new file mode 100644
      index 00000000..d320c824
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/MAINTAINERS
      @@ -0,0 +1,2 @@
      +Alex Crawford <alex.crawford@coreos.com> (@crawford)
      +Vito Caputo <vito.caputo@coreos.com> (@vcaputo)
      diff --git a/vendor/github.com/coreos/ignition/NEWS b/vendor/github.com/coreos/ignition/NEWS
      new file mode 100644
      index 00000000..19362b5f
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/NEWS
      @@ -0,0 +1,190 @@
      +05-Apr-2016 IGNITION v0.4.0
      +
      +  Features
      +
      +    - Update the config spec to v2.0.0 (see the migration guide for more info)
      +      - v1 configs will be automatically translated to v2.0.0
      +    - Add HTTP "User-Agent" and "Accept" headers to all requests
      +
      +  Changes
      +
      +    - Use Go's vendor directory for all dependencies
      +    - Split source into a public "config" package and "internal"
      +
      +25-Mar-2016 IGNITION v0.3.3
      +
      +  Bug Fixes
      +
      +    - Fix compilation errors when building for ARM
      +    - Properly fetch configs from EC2
      +
      +17-Mar-2016 IGNITION v0.3.2
      +
      +  Bug Fixes
      +
      +    - Properly decode VMware guest variables before parsing config
      +
      +  Changes
      +
      +    - Move config structures from config package to config/types
      +
      +02-Mar-2016 IGNITION v0.3.1
      +
      +  Bug Fixes
      +
      +    - Allow building on non-AMD64 architectures
      +
      +  Changes
      +
      +    - Major refactoring of the internal processing of OEMs and providers
      +
      +24-Feb-2016 IGNITION v0.3.0
      +
      +  Features
      +
      +    - Add support for VMware
      +
      +13-Jan-2016 IGNITION v0.2.6
      +
      +  Features
      +
      +    - Improve validation of storage.filesystems options
      +
      +  Bug Fixes
      +
      +    - Properly zap GPT tables when they are partially valid
      +
      +06-Jan-2016 IGNITION v0.2.5
      +
      +  Bug Fixes
      +
      +    - Recognize and ignore gzipped cloud-configs
      +
      +19-Nov-2015 IGNITION v0.2.4
      +
      +  Bug Fixes
      +
      +    - Correctly escape device unit names
      +
      +17-Nov-2015 IGNITION v0.2.3
      +
      +  Features
      +
      +    - Provide logging to pinpoint JSON errors in invalid configs
      +
      +  Bug Fixes
      +
      +    - Ensure that /mnt/oem exists before mounting
      +    - Remove /sysroot/ prefix from alternate config path
      +
      +20-Oct-2015 IGNITION v0.2.2
      +
      +  Bug Fixes
      +
      +    - Mount the oem partition for oem:// schemes when needed
      +
      +15-Oct-2015 IGNITION v0.2.1
      +
      +  Bug Fixes
      +
      +    - Allow empty CustomData on Azure
      +
      +29-Sep-2015 IGNITION v0.2.0
      +
      +  Features
      +
      +    - Added support for Azure
      +    - Added support for formatting partitions as xfs
      +
      +  Bug Fixes
      +
      +    - Removed online timeout for EC2
      +
      +09-Sep-2015 IGNITION v0.1.6
      +
      +  Features
      +
      +    - --fetchtimeout becomes --online-timeout
      +    - --online-timeout of 0 now represents infinity
      +    - Added recognition of "interoute" OEM
      +
      +  Documentation
      +
      +    - Examples have been removed and supported platforms added
      +    - Various minor cleanups
      +
      +  Bug Fixes
      +
      +    - Ensure added SSH keys are newline terminated
      +
      +  Build System Changes
      +
      +    - Fix gofmt invocation from test script to fail when appropriate
      +
      +28-Aug-2015 IGNITION v0.1.5
      +
      +  Bug Fixes
      +
      +    - Disable EC2 provider for now
      +
      +27-Aug-2015 IGNITION v0.1.4
      +
      +  Features
      +
      +    - Add support for oem:// scheme config urls
      +
      +  Documentation
      +
      +    - Added guides
      +    - Updated config specification
      +
      +  Bug Fixes
      +
      +    - Add DefaultDependencies=false to WaitOnDevices() transient unit
      +    - Updated JSON configuration keys to match style
      +
      +  Build System Changes
      +
      +    - Added script for tagging releases
      +
      +11-Aug-2015 IGNITION v0.1.3
      +
      +  Features
      +
      +    - Add support for ssh keys on EC2
      +    - Log version at runtime
      +
      +  Bug Fixes
      +
      +    - Log ssh keys as they are added
      +    - Various small cleanups
      +
      +  Build System Changes
      +
      +    - Derive version from git describe at build time
      +    - Use bash build and test scripts instead of make
      +
      +22-Jul-2015 IGNITION v0.1.2
      +
      +  Bug Fixes
      +
      +    - Fix validation of drop-in names
      +    - Properly handle a lack of userdata on EC2
      +
      +22-Jul-2015 IGNITION v0.1.1
      +
      +  Bug Fixes
      +
      +    - Ignore empty configs
      +    - Ignore unsupported CoreOS OEMs
      +    - Panic on incorrect OEM flag configurations
      +
      +14-Jul-2015 IGNITION v0.1.0
      +
      +  Features
      +
      +    - Initial release of Ignition!
      +    - Support for disk partitioning, partition formatting, writing files,
      +      RAID, systemd units, networkd units, users, and groups.
      +    - Supports reading the config from a remote URL (via
      +      config.coreos.url) or from the Amazon EC2 metadata service.
      diff --git a/vendor/github.com/coreos/ignition/NOTICE b/vendor/github.com/coreos/ignition/NOTICE
      new file mode 100644
      index 00000000..e520005c
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/NOTICE
      @@ -0,0 +1,5 @@
      +CoreOS Project
      +Copyright 2015 CoreOS, Inc
      +
      +This product includes software developed at CoreOS, Inc.
      +(http://www.coreos.com/).
      diff --git a/vendor/github.com/coreos/ignition/README.md b/vendor/github.com/coreos/ignition/README.md
      new file mode 100644
      index 00000000..6b6967d3
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/README.md
      @@ -0,0 +1,15 @@
      +# Ignition #
      +
      +Ignition is the utility used by CoreOS Linux to manipulate disks during the initramfs. This includes partitioning disks, formatting partitions, writing files (regular files, systemd units, networkd units, etc.), and configuring users. On first boot, Ignition reads its configuration from a source of truth (remote URL, network metadata service, hypervisor bridge, etc.) and applies the configuration.
      +
      +## Usage ##
      +
      +Odds are good that you don't want to invoke Ignition directly. In fact, it isn't even present in the CoreOS Linux root filesystem. Take a look at the [Getting Started Guide][getting started] for details on providing Ignition with a runtime configuration.
      +
      +[getting started]: doc/getting-started.md
      +
      +**Ignition is under very active development!**
      +
      +Please check out the [roadmap](ROADMAP.md) for information about the timeline. Use the [bug tracker][issues] to report bugs, but please avoid the urge to report lack of features for now.
      +
      +[issues]: https://github.com/coreos/bugs/issues/new?labels=component/ignition
      diff --git a/vendor/github.com/coreos/ignition/ROADMAP.md b/vendor/github.com/coreos/ignition/ROADMAP.md
      new file mode 100644
      index 00000000..80dcb6a9
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/ROADMAP.md
      @@ -0,0 +1,18 @@
      +# Ignition roadmap #
      +
      +**work in progress**
      +
      +This document defines a high level roadmap for Ignition development. The dates below should not be considered authoritative, but rather indicative of the projected timeline of the project.
      +
      +## Ignition 0.4 (Apr) ##
      +
      +- support for version 2.0.0 of the specification
      +  - remote file contents
      +  - chain-loaded configs
      +  - RAM-based filesystems
      +
      +## Ignition 0.5 (May) ##
      +
      +- support for more config providers
      +  - GCE
      +  - OpenStack
      diff --git a/vendor/github.com/coreos/ignition/build b/vendor/github.com/coreos/ignition/build
      new file mode 100755
      index 00000000..c64fe205
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/build
      @@ -0,0 +1,20 @@
      +#!/bin/bash -eu
      +
      +NAME="ignition"
      +ORG_PATH="github.com/coreos"
      +REPO_PATH="${ORG_PATH}/${NAME}"
      +VERSION=$(git describe --dirty)
      +GLDFLAGS="-X github.com/coreos/ignition/internal/version.Raw=${VERSION}"
      +
      +if [ ! -h gopath/src/${REPO_PATH} ]; then
      +	mkdir -p gopath/src/${ORG_PATH}
      +	ln -s ../../../.. gopath/src/${REPO_PATH} || exit 255
      +fi
      +
      +export GOBIN=${PWD}/bin
      +export GOPATH=${PWD}/gopath
      +
      +eval $(go env)
      +
      +echo "Building ${NAME}..."
      +go build -ldflags "${GLDFLAGS}" -o ${GOBIN}/${NAME} ${REPO_PATH}/internal
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/COPYING b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/COPYING
      deleted file mode 100644
      index 2993ec08..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/COPYING
      +++ /dev/null
      @@ -1,19 +0,0 @@
      -Copyright (C) 2014 Alec Thomas
      -
      -Permission is hereby granted, free of charge, to any person obtaining a copy of
      -this software and associated documentation files (the "Software"), to deal in
      -the Software without restriction, including without limitation the rights to
      -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
      -of the Software, and to permit persons to whom the Software is furnished to do
      -so, subject to the following conditions:
      -
      -The above copyright notice and this permission notice shall be included in all
      -copies or substantial portions of the Software.
      -
      -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
      -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
      -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
      -SOFTWARE.
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/README.md b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/README.md
      deleted file mode 100644
      index bee884e3..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/README.md
      +++ /dev/null
      @@ -1,11 +0,0 @@
      -# Units - Helpful unit multipliers and functions for Go
      -
      -The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package.
      -
      -It allows for code like this:
      -
      -```go
      -n, err := ParseBase2Bytes("1KB")
      -// n == 1024
      -n = units.Mebibyte * 512
      -```
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/bytes.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/bytes.go
      deleted file mode 100644
      index eaadeb80..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/bytes.go
      +++ /dev/null
      @@ -1,83 +0,0 @@
      -package units
      -
      -// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte,
      -// etc.).
      -type Base2Bytes int64
      -
      -// Base-2 byte units.
      -const (
      -	Kibibyte Base2Bytes = 1024
      -	KiB                 = Kibibyte
      -	Mebibyte            = Kibibyte * 1024
      -	MiB                 = Mebibyte
      -	Gibibyte            = Mebibyte * 1024
      -	GiB                 = Gibibyte
      -	Tebibyte            = Gibibyte * 1024
      -	TiB                 = Tebibyte
      -	Pebibyte            = Tebibyte * 1024
      -	PiB                 = Pebibyte
      -	Exbibyte            = Pebibyte * 1024
      -	EiB                 = Exbibyte
      -)
      -
      -var (
      -	bytesUnitMap    = MakeUnitMap("iB", "B", 1024)
      -	oldBytesUnitMap = MakeUnitMap("B", "B", 1024)
      -)
      -
      -// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB
      -// and KiB are both 1024.
      -func ParseBase2Bytes(s string) (Base2Bytes, error) {
      -	n, err := ParseUnit(s, bytesUnitMap)
      -	if err != nil {
      -		n, err = ParseUnit(s, oldBytesUnitMap)
      -	}
      -	return Base2Bytes(n), err
      -}
      -
      -func (b Base2Bytes) String() string {
      -	return ToString(int64(b), 1024, "iB", "B")
      -}
      -
      -var (
      -	metricBytesUnitMap = MakeUnitMap("B", "B", 1000)
      -)
      -
      -// MetricBytes are SI byte units (1000 bytes in a kilobyte).
      -type MetricBytes SI
      -
      -// SI base-10 byte units.
      -const (
      -	Kilobyte MetricBytes = 1000
      -	KB                   = Kilobyte
      -	Megabyte             = Kilobyte * 1000
      -	MB                   = Megabyte
      -	Gigabyte             = Megabyte * 1000
      -	GB                   = Gigabyte
      -	Terabyte             = Gigabyte * 1000
      -	TB                   = Terabyte
      -	Petabyte             = Terabyte * 1000
      -	PB                   = Petabyte
      -	Exabyte              = Petabyte * 1000
      -	EB                   = Exabyte
      -)
      -
      -// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes.
      -func ParseMetricBytes(s string) (MetricBytes, error) {
      -	n, err := ParseUnit(s, metricBytesUnitMap)
      -	return MetricBytes(n), err
      -}
      -
      -func (m MetricBytes) String() string {
      -	return ToString(int64(m), 1000, "B", "B")
      -}
      -
      -// ParseStrictBytes supports both iB and B suffixes for base 2 and metric,
      -// respectively. That is, KiB represents 1024 and KB represents 1000.
      -func ParseStrictBytes(s string) (int64, error) {
      -	n, err := ParseUnit(s, bytesUnitMap)
      -	if err != nil {
      -		n, err = ParseUnit(s, metricBytesUnitMap)
      -	}
      -	return int64(n), err
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/bytes_test.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/bytes_test.go
      deleted file mode 100644
      index d4317aa5..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/bytes_test.go
      +++ /dev/null
      @@ -1,49 +0,0 @@
      -package units
      -
      -import (
      -	"testing"
      -
      -	"github.com/stretchr/testify/assert"
      -)
      -
      -func TestBase2BytesString(t *testing.T) {
      -	assert.Equal(t, Base2Bytes(0).String(), "0B")
      -	assert.Equal(t, Base2Bytes(1025).String(), "1KiB1B")
      -	assert.Equal(t, Base2Bytes(1048577).String(), "1MiB1B")
      -}
      -
      -func TestParseBase2Bytes(t *testing.T) {
      -	n, err := ParseBase2Bytes("0B")
      -	assert.NoError(t, err)
      -	assert.Equal(t, 0, n)
      -	n, err = ParseBase2Bytes("1KB")
      -	assert.NoError(t, err)
      -	assert.Equal(t, 1024, n)
      -	n, err = ParseBase2Bytes("1MB1KB25B")
      -	assert.NoError(t, err)
      -	assert.Equal(t, 1049625, n)
      -	n, err = ParseBase2Bytes("1.5MB")
      -	assert.NoError(t, err)
      -	assert.Equal(t, 1572864, n)
      -}
      -
      -func TestMetricBytesString(t *testing.T) {
      -	assert.Equal(t, MetricBytes(0).String(), "0B")
      -	assert.Equal(t, MetricBytes(1001).String(), "1KB1B")
      -	assert.Equal(t, MetricBytes(1001025).String(), "1MB1KB25B")
      -}
      -
      -func TestParseMetricBytes(t *testing.T) {
      -	n, err := ParseMetricBytes("0B")
      -	assert.NoError(t, err)
      -	assert.Equal(t, 0, n)
      -	n, err = ParseMetricBytes("1KB1B")
      -	assert.NoError(t, err)
      -	assert.Equal(t, 1001, n)
      -	n, err = ParseMetricBytes("1MB1KB25B")
      -	assert.NoError(t, err)
      -	assert.Equal(t, 1001025, n)
      -	n, err = ParseMetricBytes("1.5MB")
      -	assert.NoError(t, err)
      -	assert.Equal(t, 1500000, n)
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/doc.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/doc.go
      deleted file mode 100644
      index 156ae386..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/doc.go
      +++ /dev/null
      @@ -1,13 +0,0 @@
      -// Package units provides helpful unit multipliers and functions for Go.
      -//
      -// The goal of this package is to have functionality similar to the time [1] package.
      -//
      -//
      -// [1] http://golang.org/pkg/time/
      -//
      -// It allows for code like this:
      -//
      -//     n, err := ParseBase2Bytes("1KB")
      -//     // n == 1024
      -//     n = units.Mebibyte * 512
      -package units
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/si.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/si.go
      deleted file mode 100644
      index 8234a9d5..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/si.go
      +++ /dev/null
      @@ -1,26 +0,0 @@
      -package units
      -
      -// SI units.
      -type SI int64
      -
      -// SI unit multiples.
      -const (
      -	Kilo SI = 1000
      -	Mega    = Kilo * 1000
      -	Giga    = Mega * 1000
      -	Tera    = Giga * 1000
      -	Peta    = Tera * 1000
      -	Exa     = Peta * 1000
      -)
      -
      -func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 {
      -	return map[string]float64{
      -		shortSuffix:  1,
      -		"K" + suffix: float64(scale),
      -		"M" + suffix: float64(scale * scale),
      -		"G" + suffix: float64(scale * scale * scale),
      -		"T" + suffix: float64(scale * scale * scale * scale),
      -		"P" + suffix: float64(scale * scale * scale * scale * scale),
      -		"E" + suffix: float64(scale * scale * scale * scale * scale * scale),
      -	}
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/util.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/util.go
      deleted file mode 100644
      index 6527e92d..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/alecthomas/units/util.go
      +++ /dev/null
      @@ -1,138 +0,0 @@
      -package units
      -
      -import (
      -	"errors"
      -	"fmt"
      -	"strings"
      -)
      -
      -var (
      -	siUnits = []string{"", "K", "M", "G", "T", "P", "E"}
      -)
      -
      -func ToString(n int64, scale int64, suffix, baseSuffix string) string {
      -	mn := len(siUnits)
      -	out := make([]string, mn)
      -	for i, m := range siUnits {
      -		if n%scale != 0 || i == 0 && n == 0 {
      -			s := suffix
      -			if i == 0 {
      -				s = baseSuffix
      -			}
      -			out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s)
      -		}
      -		n /= scale
      -		if n == 0 {
      -			break
      -		}
      -	}
      -	return strings.Join(out, "")
      -}
      -
      -// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123
      -var errLeadingInt = errors.New("units: bad [0-9]*") // never printed
      -
      -// leadingInt consumes the leading [0-9]* from s.
      -func leadingInt(s string) (x int64, rem string, err error) {
      -	i := 0
      -	for ; i < len(s); i++ {
      -		c := s[i]
      -		if c < '0' || c > '9' {
      -			break
      -		}
      -		if x >= (1<<63-10)/10 {
      -			// overflow
      -			return 0, "", errLeadingInt
      -		}
      -		x = x*10 + int64(c) - '0'
      -	}
      -	return x, s[i:], nil
      -}
      -
      -func ParseUnit(s string, unitMap map[string]float64) (int64, error) {
      -	// [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
      -	orig := s
      -	f := float64(0)
      -	neg := false
      -
      -	// Consume [-+]?
      -	if s != "" {
      -		c := s[0]
      -		if c == '-' || c == '+' {
      -			neg = c == '-'
      -			s = s[1:]
      -		}
      -	}
      -	// Special case: if all that is left is "0", this is zero.
      -	if s == "0" {
      -		return 0, nil
      -	}
      -	if s == "" {
      -		return 0, errors.New("units: invalid " + orig)
      -	}
      -	for s != "" {
      -		g := float64(0) // this element of the sequence
      -
      -		var x int64
      -		var err error
      -
      -		// The next character must be [0-9.]
      -		if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) {
      -			return 0, errors.New("units: invalid " + orig)
      -		}
      -		// Consume [0-9]*
      -		pl := len(s)
      -		x, s, err = leadingInt(s)
      -		if err != nil {
      -			return 0, errors.New("units: invalid " + orig)
      -		}
      -		g = float64(x)
      -		pre := pl != len(s) // whether we consumed anything before a period
      -
      -		// Consume (\.[0-9]*)?
      -		post := false
      -		if s != "" && s[0] == '.' {
      -			s = s[1:]
      -			pl := len(s)
      -			x, s, err = leadingInt(s)
      -			if err != nil {
      -				return 0, errors.New("units: invalid " + orig)
      -			}
      -			scale := 1.0
      -			for n := pl - len(s); n > 0; n-- {
      -				scale *= 10
      -			}
      -			g += float64(x) / scale
      -			post = pl != len(s)
      -		}
      -		if !pre && !post {
      -			// no digits (e.g. ".s" or "-.s")
      -			return 0, errors.New("units: invalid " + orig)
      -		}
      -
      -		// Consume unit.
      -		i := 0
      -		for ; i < len(s); i++ {
      -			c := s[i]
      -			if c == '.' || ('0' <= c && c <= '9') {
      -				break
      -			}
      -		}
      -		u := s[:i]
      -		s = s[i:]
      -		unit, ok := unitMap[u]
      -		if !ok {
      -			return 0, errors.New("units: unknown unit " + u + " in " + orig)
      -		}
      -
      -		f += g * unit
      -	}
      -
      -	if neg {
      -		f = -f
      -	}
      -	if f < float64(-1<<63) || f > float64(1<<63-1) {
      -		return 0, errors.New("units: overflow parsing unit")
      -	}
      -	return int64(f), nil
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/camlistore/camlistore/pkg/errorutil/highlight.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/camlistore/camlistore/pkg/errorutil/highlight.go
      deleted file mode 100644
      index aace6a46..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/camlistore/camlistore/pkg/errorutil/highlight.go
      +++ /dev/null
      @@ -1,58 +0,0 @@
      -/*
      -Copyright 2011 Google Inc.
      -
      -Licensed under the Apache License, Version 2.0 (the "License");
      -you may not use this file except in compliance with the License.
      -You may obtain a copy of the License at
      -
      -     http://www.apache.org/licenses/LICENSE-2.0
      -
      -Unless required by applicable law or agreed to in writing, software
      -distributed under the License is distributed on an "AS IS" BASIS,
      -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      -See the License for the specific language governing permissions and
      -limitations under the License.
      -*/
      -
      -// Package errorutil helps make better error messages.
      -package errorutil
      -
      -import (
      -	"bufio"
      -	"bytes"
      -	"fmt"
      -	"io"
      -	"strings"
      -)
      -
      -// HighlightBytePosition takes a reader and the location in bytes of a parse
      -// error (for instance, from json.SyntaxError.Offset) and returns the line, column,
      -// and pretty-printed context around the error with an arrow indicating the exact
      -// position of the syntax error.
      -func HighlightBytePosition(f io.Reader, pos int64) (line, col int, highlight string) {
      -	line = 1
      -	br := bufio.NewReader(f)
      -	lastLine := ""
      -	thisLine := new(bytes.Buffer)
      -	for n := int64(0); n < pos; n++ {
      -		b, err := br.ReadByte()
      -		if err != nil {
      -			break
      -		}
      -		if b == '\n' {
      -			lastLine = thisLine.String()
      -			thisLine.Reset()
      -			line++
      -			col = 1
      -		} else {
      -			col++
      -			thisLine.WriteByte(b)
      -		}
      -	}
      -	if line > 1 {
      -		highlight += fmt.Sprintf("%5d: %s\n", line-1, lastLine)
      -	}
      -	highlight += fmt.Sprintf("%5d: %s\n", line, thisLine.String())
      -	highlight += fmt.Sprintf("%s^\n", strings.Repeat(" ", col+5))
      -	return
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/LICENSE b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/LICENSE
      deleted file mode 100644
      index a68e67f0..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/LICENSE
      +++ /dev/null
      @@ -1,188 +0,0 @@
      -
      -Copyright (c) 2011-2014 - Canonical Inc.
      -
      -This software is licensed under the LGPLv3, included below.
      -
      -As a special exception to the GNU Lesser General Public License version 3
      -("LGPL3"), the copyright holders of this Library give you permission to
      -convey to a third party a Combined Work that links statically or dynamically
      -to this Library without providing any Minimal Corresponding Source or
      -Minimal Application Code as set out in 4d or providing the installation
      -information set out in section 4e, provided that you comply with the other
      -provisions of LGPL3 and provided that you meet, for the Application the
      -terms and conditions of the license(s) which apply to the Application.
      -
      -Except as stated in this special exception, the provisions of LGPL3 will
      -continue to comply in full to this Library. If you modify this Library, you
      -may apply this exception to your version of this Library, but you are not
      -obliged to do so. If you do not wish to do so, delete this exception
      -statement from your version. This exception does not (and cannot) modify any
      -license terms which apply to the Application, with which you must still
      -comply.
      -
      -
      -                   GNU LESSER GENERAL PUBLIC LICENSE
      -                       Version 3, 29 June 2007
      -
      - Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
      - Everyone is permitted to copy and distribute verbatim copies
      - of this license document, but changing it is not allowed.
      -
      -
      -  This version of the GNU Lesser General Public License incorporates
      -the terms and conditions of version 3 of the GNU General Public
      -License, supplemented by the additional permissions listed below.
      -
      -  0. Additional Definitions.
      -
      -  As used herein, "this License" refers to version 3 of the GNU Lesser
      -General Public License, and the "GNU GPL" refers to version 3 of the GNU
      -General Public License.
      -
      -  "The Library" refers to a covered work governed by this License,
      -other than an Application or a Combined Work as defined below.
      -
      -  An "Application" is any work that makes use of an interface provided
      -by the Library, but which is not otherwise based on the Library.
      -Defining a subclass of a class defined by the Library is deemed a mode
      -of using an interface provided by the Library.
      -
      -  A "Combined Work" is a work produced by combining or linking an
      -Application with the Library.  The particular version of the Library
      -with which the Combined Work was made is also called the "Linked
      -Version".
      -
      -  The "Minimal Corresponding Source" for a Combined Work means the
      -Corresponding Source for the Combined Work, excluding any source code
      -for portions of the Combined Work that, considered in isolation, are
      -based on the Application, and not on the Linked Version.
      -
      -  The "Corresponding Application Code" for a Combined Work means the
      -object code and/or source code for the Application, including any data
      -and utility programs needed for reproducing the Combined Work from the
      -Application, but excluding the System Libraries of the Combined Work.
      -
      -  1. Exception to Section 3 of the GNU GPL.
      -
      -  You may convey a covered work under sections 3 and 4 of this License
      -without being bound by section 3 of the GNU GPL.
      -
      -  2. Conveying Modified Versions.
      -
      -  If you modify a copy of the Library, and, in your modifications, a
      -facility refers to a function or data to be supplied by an Application
      -that uses the facility (other than as an argument passed when the
      -facility is invoked), then you may convey a copy of the modified
      -version:
      -
      -   a) under this License, provided that you make a good faith effort to
      -   ensure that, in the event an Application does not supply the
      -   function or data, the facility still operates, and performs
      -   whatever part of its purpose remains meaningful, or
      -
      -   b) under the GNU GPL, with none of the additional permissions of
      -   this License applicable to that copy.
      -
      -  3. Object Code Incorporating Material from Library Header Files.
      -
      -  The object code form of an Application may incorporate material from
      -a header file that is part of the Library.  You may convey such object
      -code under terms of your choice, provided that, if the incorporated
      -material is not limited to numerical parameters, data structure
      -layouts and accessors, or small macros, inline functions and templates
      -(ten or fewer lines in length), you do both of the following:
      -
      -   a) Give prominent notice with each copy of the object code that the
      -   Library is used in it and that the Library and its use are
      -   covered by this License.
      -
      -   b) Accompany the object code with a copy of the GNU GPL and this license
      -   document.
      -
      -  4. Combined Works.
      -
      -  You may convey a Combined Work under terms of your choice that,
      -taken together, effectively do not restrict modification of the
      -portions of the Library contained in the Combined Work and reverse
      -engineering for debugging such modifications, if you also do each of
      -the following:
      -
      -   a) Give prominent notice with each copy of the Combined Work that
      -   the Library is used in it and that the Library and its use are
      -   covered by this License.
      -
      -   b) Accompany the Combined Work with a copy of the GNU GPL and this license
      -   document.
      -
      -   c) For a Combined Work that displays copyright notices during
      -   execution, include the copyright notice for the Library among
      -   these notices, as well as a reference directing the user to the
      -   copies of the GNU GPL and this license document.
      -
      -   d) Do one of the following:
      -
      -       0) Convey the Minimal Corresponding Source under the terms of this
      -       License, and the Corresponding Application Code in a form
      -       suitable for, and under terms that permit, the user to
      -       recombine or relink the Application with a modified version of
      -       the Linked Version to produce a modified Combined Work, in the
      -       manner specified by section 6 of the GNU GPL for conveying
      -       Corresponding Source.
      -
      -       1) Use a suitable shared library mechanism for linking with the
      -       Library.  A suitable mechanism is one that (a) uses at run time
      -       a copy of the Library already present on the user's computer
      -       system, and (b) will operate properly with a modified version
      -       of the Library that is interface-compatible with the Linked
      -       Version.
      -
      -   e) Provide Installation Information, but only if you would otherwise
      -   be required to provide such information under section 6 of the
      -   GNU GPL, and only to the extent that such information is
      -   necessary to install and execute a modified version of the
      -   Combined Work produced by recombining or relinking the
      -   Application with a modified version of the Linked Version. (If
      -   you use option 4d0, the Installation Information must accompany
      -   the Minimal Corresponding Source and Corresponding Application
      -   Code. If you use option 4d1, you must provide the Installation
      -   Information in the manner specified by section 6 of the GNU GPL
      -   for conveying Corresponding Source.)
      -
      -  5. Combined Libraries.
      -
      -  You may place library facilities that are a work based on the
      -Library side by side in a single library together with other library
      -facilities that are not Applications and are not covered by this
      -License, and convey such a combined library under terms of your
      -choice, if you do both of the following:
      -
      -   a) Accompany the combined library with a copy of the same work based
      -   on the Library, uncombined with any other library facilities,
      -   conveyed under the terms of this License.
      -
      -   b) Give prominent notice with the combined library that part of it
      -   is a work based on the Library, and explaining where to find the
      -   accompanying uncombined form of the same work.
      -
      -  6. Revised Versions of the GNU Lesser General Public License.
      -
      -  The Free Software Foundation may publish revised and/or new versions
      -of the GNU Lesser General Public License from time to time. Such new
      -versions will be similar in spirit to the present version, but may
      -differ in detail to address new problems or concerns.
      -
      -  Each version is given a distinguishing version number. If the
      -Library as you received it specifies that a certain numbered version
      -of the GNU Lesser General Public License "or any later version"
      -applies to it, you have the option of following the terms and
      -conditions either of that published version or of any later version
      -published by the Free Software Foundation. If the Library as you
      -received it does not specify a version number of the GNU Lesser
      -General Public License, you may choose any version of the GNU Lesser
      -General Public License ever published by the Free Software Foundation.
      -
      -  If the Library as you received it specifies that a proxy can decide
      -whether future versions of the GNU Lesser General Public License shall
      -apply, that proxy's public statement of acceptance of any version is
      -permanent authorization for you to choose that version for the
      -Library.
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/LICENSE.libyaml b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/LICENSE.libyaml
      deleted file mode 100644
      index 8da58fbf..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/LICENSE.libyaml
      +++ /dev/null
      @@ -1,31 +0,0 @@
      -The following files were ported to Go from C files of libyaml, and thus
      -are still covered by their original copyright and license:
      -
      -    apic.go
      -    emitterc.go
      -    parserc.go
      -    readerc.go
      -    scannerc.go
      -    writerc.go
      -    yamlh.go
      -    yamlprivateh.go
      -
      -Copyright (c) 2006 Kirill Simonov
      -
      -Permission is hereby granted, free of charge, to any person obtaining a copy of
      -this software and associated documentation files (the "Software"), to deal in
      -the Software without restriction, including without limitation the rights to
      -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
      -of the Software, and to permit persons to whom the Software is furnished to do
      -so, subject to the following conditions:
      -
      -The above copyright notice and this permission notice shall be included in all
      -copies or substantial portions of the Software.
      -
      -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
      -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
      -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
      -SOFTWARE.
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/README.md b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/README.md
      deleted file mode 100644
      index d6c919e6..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/README.md
      +++ /dev/null
      @@ -1,128 +0,0 @@
      -# YAML support for the Go language
      -
      -Introduction
      -------------
      -
      -The yaml package enables Go programs to comfortably encode and decode YAML
      -values. It was developed within [Canonical](https://www.canonical.com) as
      -part of the [juju](https://juju.ubuntu.com) project, and is based on a
      -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
      -C library to parse and generate YAML data quickly and reliably.
      -
      -Compatibility
      --------------
      -
      -The yaml package supports most of YAML 1.1 and 1.2, including support for
      -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
      -implemented, and base-60 floats from YAML 1.1 are purposefully not
      -supported since they're a poor design and are gone in YAML 1.2.
      -
      -Installation and usage
      -----------------------
      -
      -The import path for the package is *gopkg.in/yaml.v2*.
      -
      -To install it, run:
      -
      -    go get gopkg.in/yaml.v2
      -
      -API documentation
      ------------------
      -
      -If opened in a browser, the import path itself leads to the API documentation:
      -
      -  * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
      -
      -API stability
      --------------
      -
      -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
      -
      -
      -License
      --------
      -
      -The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
      -
      -
      -Example
      --------
      -
      -```Go
      -package main
      -
      -import (
      -        "fmt"
      -        "log"
      -
      -        "gopkg.in/yaml.v2"
      -)
      -
      -var data = `
      -a: Easy!
      -b:
      -  c: 2
      -  d: [3, 4]
      -`
      -
      -type T struct {
      -        A string
      -        B struct{C int; D []int ",flow"}
      -}
      -
      -func main() {
      -        t := T{}
      -    
      -        err := yaml.Unmarshal([]byte(data), &t)
      -        if err != nil {
      -                log.Fatalf("error: %v", err)
      -        }
      -        fmt.Printf("--- t:\n%v\n\n", t)
      -    
      -        d, err := yaml.Marshal(&t)
      -        if err != nil {
      -                log.Fatalf("error: %v", err)
      -        }
      -        fmt.Printf("--- t dump:\n%s\n\n", string(d))
      -    
      -        m := make(map[interface{}]interface{})
      -    
      -        err = yaml.Unmarshal([]byte(data), &m)
      -        if err != nil {
      -                log.Fatalf("error: %v", err)
      -        }
      -        fmt.Printf("--- m:\n%v\n\n", m)
      -    
      -        d, err = yaml.Marshal(&m)
      -        if err != nil {
      -                log.Fatalf("error: %v", err)
      -        }
      -        fmt.Printf("--- m dump:\n%s\n\n", string(d))
      -}
      -```
      -
      -This example will generate the following output:
      -
      -```
      ---- t:
      -{Easy! {2 [3 4]}}
      -
      ---- t dump:
      -a: Easy!
      -b:
      -  c: 2
      -  d: [3, 4]
      -
      -
      ---- m:
      -map[a:Easy! b:map[c:2 d:[3 4]]]
      -
      ---- m dump:
      -a: Easy!
      -b:
      -  c: 2
      -  d:
      -  - 3
      -  - 4
      -```
      -
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/apic.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/apic.go
      deleted file mode 100644
      index 95ec014e..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/apic.go
      +++ /dev/null
      @@ -1,742 +0,0 @@
      -package yaml
      -
      -import (
      -	"io"
      -	"os"
      -)
      -
      -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
      -	//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
      -
      -	// Check if we can move the queue at the beginning of the buffer.
      -	if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
      -		if parser.tokens_head != len(parser.tokens) {
      -			copy(parser.tokens, parser.tokens[parser.tokens_head:])
      -		}
      -		parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
      -		parser.tokens_head = 0
      -	}
      -	parser.tokens = append(parser.tokens, *token)
      -	if pos < 0 {
      -		return
      -	}
      -	copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
      -	parser.tokens[parser.tokens_head+pos] = *token
      -}
      -
      -// Create a new parser object.
      -func yaml_parser_initialize(parser *yaml_parser_t) bool {
      -	*parser = yaml_parser_t{
      -		raw_buffer: make([]byte, 0, input_raw_buffer_size),
      -		buffer:     make([]byte, 0, input_buffer_size),
      -	}
      -	return true
      -}
      -
      -// Destroy a parser object.
      -func yaml_parser_delete(parser *yaml_parser_t) {
      -	*parser = yaml_parser_t{}
      -}
      -
      -// String read handler.
      -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
      -	if parser.input_pos == len(parser.input) {
      -		return 0, io.EOF
      -	}
      -	n = copy(buffer, parser.input[parser.input_pos:])
      -	parser.input_pos += n
      -	return n, nil
      -}
      -
      -// File read handler.
      -func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
      -	return parser.input_file.Read(buffer)
      -}
      -
      -// Set a string input.
      -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
      -	if parser.read_handler != nil {
      -		panic("must set the input source only once")
      -	}
      -	parser.read_handler = yaml_string_read_handler
      -	parser.input = input
      -	parser.input_pos = 0
      -}
      -
      -// Set a file input.
      -func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
      -	if parser.read_handler != nil {
      -		panic("must set the input source only once")
      -	}
      -	parser.read_handler = yaml_file_read_handler
      -	parser.input_file = file
      -}
      -
      -// Set the source encoding.
      -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
      -	if parser.encoding != yaml_ANY_ENCODING {
      -		panic("must set the encoding only once")
      -	}
      -	parser.encoding = encoding
      -}
      -
      -// Create a new emitter object.
      -func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
      -	*emitter = yaml_emitter_t{
      -		buffer:     make([]byte, output_buffer_size),
      -		raw_buffer: make([]byte, 0, output_raw_buffer_size),
      -		states:     make([]yaml_emitter_state_t, 0, initial_stack_size),
      -		events:     make([]yaml_event_t, 0, initial_queue_size),
      -	}
      -	return true
      -}
      -
      -// Destroy an emitter object.
      -func yaml_emitter_delete(emitter *yaml_emitter_t) {
      -	*emitter = yaml_emitter_t{}
      -}
      -
      -// String write handler.
      -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
      -	*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
      -	return nil
      -}
      -
      -// File write handler.
      -func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
      -	_, err := emitter.output_file.Write(buffer)
      -	return err
      -}
      -
      -// Set a string output.
      -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
      -	if emitter.write_handler != nil {
      -		panic("must set the output target only once")
      -	}
      -	emitter.write_handler = yaml_string_write_handler
      -	emitter.output_buffer = output_buffer
      -}
      -
      -// Set a file output.
      -func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
      -	if emitter.write_handler != nil {
      -		panic("must set the output target only once")
      -	}
      -	emitter.write_handler = yaml_file_write_handler
      -	emitter.output_file = file
      -}
      -
      -// Set the output encoding.
      -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
      -	if emitter.encoding != yaml_ANY_ENCODING {
      -		panic("must set the output encoding only once")
      -	}
      -	emitter.encoding = encoding
      -}
      -
      -// Set the canonical output style.
      -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
      -	emitter.canonical = canonical
      -}
      -
      -//// Set the indentation increment.
      -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
      -	if indent < 2 || indent > 9 {
      -		indent = 2
      -	}
      -	emitter.best_indent = indent
      -}
      -
      -// Set the preferred line width.
      -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
      -	if width < 0 {
      -		width = -1
      -	}
      -	emitter.best_width = width
      -}
      -
      -// Set if unescaped non-ASCII characters are allowed.
      -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
      -	emitter.unicode = unicode
      -}
      -
      -// Set the preferred line break character.
      -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
      -	emitter.line_break = line_break
      -}
      -
      -///*
      -// * Destroy a token object.
      -// */
      -//
      -//YAML_DECLARE(void)
      -//yaml_token_delete(yaml_token_t *token)
      -//{
      -//    assert(token);  // Non-NULL token object expected.
      -//
      -//    switch (token.type)
      -//    {
      -//        case YAML_TAG_DIRECTIVE_TOKEN:
      -//            yaml_free(token.data.tag_directive.handle);
      -//            yaml_free(token.data.tag_directive.prefix);
      -//            break;
      -//
      -//        case YAML_ALIAS_TOKEN:
      -//            yaml_free(token.data.alias.value);
      -//            break;
      -//
      -//        case YAML_ANCHOR_TOKEN:
      -//            yaml_free(token.data.anchor.value);
      -//            break;
      -//
      -//        case YAML_TAG_TOKEN:
      -//            yaml_free(token.data.tag.handle);
      -//            yaml_free(token.data.tag.suffix);
      -//            break;
      -//
      -//        case YAML_SCALAR_TOKEN:
      -//            yaml_free(token.data.scalar.value);
      -//            break;
      -//
      -//        default:
      -//            break;
      -//    }
      -//
      -//    memset(token, 0, sizeof(yaml_token_t));
      -//}
      -//
      -///*
      -// * Check if a string is a valid UTF-8 sequence.
      -// *
      -// * Check 'reader.c' for more details on UTF-8 encoding.
      -// */
      -//
      -//static int
      -//yaml_check_utf8(yaml_char_t *start, size_t length)
      -//{
      -//    yaml_char_t *end = start+length;
      -//    yaml_char_t *pointer = start;
      -//
      -//    while (pointer < end) {
      -//        unsigned char octet;
      -//        unsigned int width;
      -//        unsigned int value;
      -//        size_t k;
      -//
      -//        octet = pointer[0];
      -//        width = (octet & 0x80) == 0x00 ? 1 :
      -//                (octet & 0xE0) == 0xC0 ? 2 :
      -//                (octet & 0xF0) == 0xE0 ? 3 :
      -//                (octet & 0xF8) == 0xF0 ? 4 : 0;
      -//        value = (octet & 0x80) == 0x00 ? octet & 0x7F :
      -//                (octet & 0xE0) == 0xC0 ? octet & 0x1F :
      -//                (octet & 0xF0) == 0xE0 ? octet & 0x0F :
      -//                (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
      -//        if (!width) return 0;
      -//        if (pointer+width > end) return 0;
      -//        for (k = 1; k < width; k ++) {
      -//            octet = pointer[k];
      -//            if ((octet & 0xC0) != 0x80) return 0;
      -//            value = (value << 6) + (octet & 0x3F);
      -//        }
      -//        if (!((width == 1) ||
      -//            (width == 2 && value >= 0x80) ||
      -//            (width == 3 && value >= 0x800) ||
      -//            (width == 4 && value >= 0x10000))) return 0;
      -//
      -//        pointer += width;
      -//    }
      -//
      -//    return 1;
      -//}
      -//
      -
      -// Create STREAM-START.
      -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
      -	*event = yaml_event_t{
      -		typ:      yaml_STREAM_START_EVENT,
      -		encoding: encoding,
      -	}
      -	return true
      -}
      -
      -// Create STREAM-END.
      -func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
      -	*event = yaml_event_t{
      -		typ: yaml_STREAM_END_EVENT,
      -	}
      -	return true
      -}
      -
      -// Create DOCUMENT-START.
      -func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
      -	tag_directives []yaml_tag_directive_t, implicit bool) bool {
      -	*event = yaml_event_t{
      -		typ:               yaml_DOCUMENT_START_EVENT,
      -		version_directive: version_directive,
      -		tag_directives:    tag_directives,
      -		implicit:          implicit,
      -	}
      -	return true
      -}
      -
      -// Create DOCUMENT-END.
      -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
      -	*event = yaml_event_t{
      -		typ:      yaml_DOCUMENT_END_EVENT,
      -		implicit: implicit,
      -	}
      -	return true
      -}
      -
      -///*
      -// * Create ALIAS.
      -// */
      -//
      -//YAML_DECLARE(int)
      -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
      -//{
      -//    mark yaml_mark_t = { 0, 0, 0 }
      -//    anchor_copy *yaml_char_t = NULL
      -//
      -//    assert(event) // Non-NULL event object is expected.
      -//    assert(anchor) // Non-NULL anchor is expected.
      -//
      -//    if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
      -//
      -//    anchor_copy = yaml_strdup(anchor)
      -//    if (!anchor_copy)
      -//        return 0
      -//
      -//    ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
      -//
      -//    return 1
      -//}
      -
      -// Create SCALAR.
      -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
      -	*event = yaml_event_t{
      -		typ:             yaml_SCALAR_EVENT,
      -		anchor:          anchor,
      -		tag:             tag,
      -		value:           value,
      -		implicit:        plain_implicit,
      -		quoted_implicit: quoted_implicit,
      -		style:           yaml_style_t(style),
      -	}
      -	return true
      -}
      -
      -// Create SEQUENCE-START.
      -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
      -	*event = yaml_event_t{
      -		typ:      yaml_SEQUENCE_START_EVENT,
      -		anchor:   anchor,
      -		tag:      tag,
      -		implicit: implicit,
      -		style:    yaml_style_t(style),
      -	}
      -	return true
      -}
      -
      -// Create SEQUENCE-END.
      -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
      -	*event = yaml_event_t{
      -		typ: yaml_SEQUENCE_END_EVENT,
      -	}
      -	return true
      -}
      -
      -// Create MAPPING-START.
      -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
      -	*event = yaml_event_t{
      -		typ:      yaml_MAPPING_START_EVENT,
      -		anchor:   anchor,
      -		tag:      tag,
      -		implicit: implicit,
      -		style:    yaml_style_t(style),
      -	}
      -	return true
      -}
      -
      -// Create MAPPING-END.
      -func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
      -	*event = yaml_event_t{
      -		typ: yaml_MAPPING_END_EVENT,
      -	}
      -	return true
      -}
      -
      -// Destroy an event object.
      -func yaml_event_delete(event *yaml_event_t) {
      -	*event = yaml_event_t{}
      -}
      -
      -///*
      -// * Create a document object.
      -// */
      -//
      -//YAML_DECLARE(int)
      -//yaml_document_initialize(document *yaml_document_t,
      -//        version_directive *yaml_version_directive_t,
      -//        tag_directives_start *yaml_tag_directive_t,
      -//        tag_directives_end *yaml_tag_directive_t,
      -//        start_implicit int, end_implicit int)
      -//{
      -//    struct {
      -//        error yaml_error_type_t
      -//    } context
      -//    struct {
      -//        start *yaml_node_t
      -//        end *yaml_node_t
      -//        top *yaml_node_t
      -//    } nodes = { NULL, NULL, NULL }
      -//    version_directive_copy *yaml_version_directive_t = NULL
      -//    struct {
      -//        start *yaml_tag_directive_t
      -//        end *yaml_tag_directive_t
      -//        top *yaml_tag_directive_t
      -//    } tag_directives_copy = { NULL, NULL, NULL }
      -//    value yaml_tag_directive_t = { NULL, NULL }
      -//    mark yaml_mark_t = { 0, 0, 0 }
      -//
      -//    assert(document) // Non-NULL document object is expected.
      -//    assert((tag_directives_start && tag_directives_end) ||
      -//            (tag_directives_start == tag_directives_end))
      -//                            // Valid tag directives are expected.
      -//
      -//    if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
      -//
      -//    if (version_directive) {
      -//        version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
      -//        if (!version_directive_copy) goto error
      -//        version_directive_copy.major = version_directive.major
      -//        version_directive_copy.minor = version_directive.minor
      -//    }
      -//
      -//    if (tag_directives_start != tag_directives_end) {
      -//        tag_directive *yaml_tag_directive_t
      -//        if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
      -//            goto error
      -//        for (tag_directive = tag_directives_start
      -//                tag_directive != tag_directives_end; tag_directive ++) {
      -//            assert(tag_directive.handle)
      -//            assert(tag_directive.prefix)
      -//            if (!yaml_check_utf8(tag_directive.handle,
      -//                        strlen((char *)tag_directive.handle)))
      -//                goto error
      -//            if (!yaml_check_utf8(tag_directive.prefix,
      -//                        strlen((char *)tag_directive.prefix)))
      -//                goto error
      -//            value.handle = yaml_strdup(tag_directive.handle)
      -//            value.prefix = yaml_strdup(tag_directive.prefix)
      -//            if (!value.handle || !value.prefix) goto error
      -//            if (!PUSH(&context, tag_directives_copy, value))
      -//                goto error
      -//            value.handle = NULL
      -//            value.prefix = NULL
      -//        }
      -//    }
      -//
      -//    DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
      -//            tag_directives_copy.start, tag_directives_copy.top,
      -//            start_implicit, end_implicit, mark, mark)
      -//
      -//    return 1
      -//
      -//error:
      -//    STACK_DEL(&context, nodes)
      -//    yaml_free(version_directive_copy)
      -//    while (!STACK_EMPTY(&context, tag_directives_copy)) {
      -//        value yaml_tag_directive_t = POP(&context, tag_directives_copy)
      -//        yaml_free(value.handle)
      -//        yaml_free(value.prefix)
      -//    }
      -//    STACK_DEL(&context, tag_directives_copy)
      -//    yaml_free(value.handle)
      -//    yaml_free(value.prefix)
      -//
      -//    return 0
      -//}
      -//
      -///*
      -// * Destroy a document object.
      -// */
      -//
      -//YAML_DECLARE(void)
      -//yaml_document_delete(document *yaml_document_t)
      -//{
      -//    struct {
      -//        error yaml_error_type_t
      -//    } context
      -//    tag_directive *yaml_tag_directive_t
      -//
      -//    context.error = YAML_NO_ERROR // Eliminate a compliler warning.
      -//
      -//    assert(document) // Non-NULL document object is expected.
      -//
      -//    while (!STACK_EMPTY(&context, document.nodes)) {
      -//        node yaml_node_t = POP(&context, document.nodes)
      -//        yaml_free(node.tag)
      -//        switch (node.type) {
      -//            case YAML_SCALAR_NODE:
      -//                yaml_free(node.data.scalar.value)
      -//                break
      -//            case YAML_SEQUENCE_NODE:
      -//                STACK_DEL(&context, node.data.sequence.items)
      -//                break
      -//            case YAML_MAPPING_NODE:
      -//                STACK_DEL(&context, node.data.mapping.pairs)
      -//                break
      -//            default:
      -//                assert(0) // Should not happen.
      -//        }
      -//    }
      -//    STACK_DEL(&context, document.nodes)
      -//
      -//    yaml_free(document.version_directive)
      -//    for (tag_directive = document.tag_directives.start
      -//            tag_directive != document.tag_directives.end
      -//            tag_directive++) {
      -//        yaml_free(tag_directive.handle)
      -//        yaml_free(tag_directive.prefix)
      -//    }
      -//    yaml_free(document.tag_directives.start)
      -//
      -//    memset(document, 0, sizeof(yaml_document_t))
      -//}
      -//
      -///**
      -// * Get a document node.
      -// */
      -//
      -//YAML_DECLARE(yaml_node_t *)
      -//yaml_document_get_node(document *yaml_document_t, index int)
      -//{
      -//    assert(document) // Non-NULL document object is expected.
      -//
      -//    if (index > 0 && document.nodes.start + index <= document.nodes.top) {
      -//        return document.nodes.start + index - 1
      -//    }
      -//    return NULL
      -//}
      -//
      -///**
      -// * Get the root object.
      -// */
      -//
      -//YAML_DECLARE(yaml_node_t *)
      -//yaml_document_get_root_node(document *yaml_document_t)
      -//{
      -//    assert(document) // Non-NULL document object is expected.
      -//
      -//    if (document.nodes.top != document.nodes.start) {
      -//        return document.nodes.start
      -//    }
      -//    return NULL
      -//}
      -//
      -///*
      -// * Add a scalar node to a document.
      -// */
      -//
      -//YAML_DECLARE(int)
      -//yaml_document_add_scalar(document *yaml_document_t,
      -//        tag *yaml_char_t, value *yaml_char_t, length int,
      -//        style yaml_scalar_style_t)
      -//{
      -//    struct {
      -//        error yaml_error_type_t
      -//    } context
      -//    mark yaml_mark_t = { 0, 0, 0 }
      -//    tag_copy *yaml_char_t = NULL
      -//    value_copy *yaml_char_t = NULL
      -//    node yaml_node_t
      -//
      -//    assert(document) // Non-NULL document object is expected.
      -//    assert(value) // Non-NULL value is expected.
      -//
      -//    if (!tag) {
      -//        tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
      -//    }
      -//
      -//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
      -//    tag_copy = yaml_strdup(tag)
      -//    if (!tag_copy) goto error
      -//
      -//    if (length < 0) {
      -//        length = strlen((char *)value)
      -//    }
      -//
      -//    if (!yaml_check_utf8(value, length)) goto error
      -//    value_copy = yaml_malloc(length+1)
      -//    if (!value_copy) goto error
      -//    memcpy(value_copy, value, length)
      -//    value_copy[length] = '\0'
      -//
      -//    SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
      -//    if (!PUSH(&context, document.nodes, node)) goto error
      -//
      -//    return document.nodes.top - document.nodes.start
      -//
      -//error:
      -//    yaml_free(tag_copy)
      -//    yaml_free(value_copy)
      -//
      -//    return 0
      -//}
      -//
      -///*
      -// * Add a sequence node to a document.
      -// */
      -//
      -//YAML_DECLARE(int)
      -//yaml_document_add_sequence(document *yaml_document_t,
      -//        tag *yaml_char_t, style yaml_sequence_style_t)
      -//{
      -//    struct {
      -//        error yaml_error_type_t
      -//    } context
      -//    mark yaml_mark_t = { 0, 0, 0 }
      -//    tag_copy *yaml_char_t = NULL
      -//    struct {
      -//        start *yaml_node_item_t
      -//        end *yaml_node_item_t
      -//        top *yaml_node_item_t
      -//    } items = { NULL, NULL, NULL }
      -//    node yaml_node_t
      -//
      -//    assert(document) // Non-NULL document object is expected.
      -//
      -//    if (!tag) {
      -//        tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
      -//    }
      -//
      -//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
      -//    tag_copy = yaml_strdup(tag)
      -//    if (!tag_copy) goto error
      -//
      -//    if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
      -//
      -//    SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
      -//            style, mark, mark)
      -//    if (!PUSH(&context, document.nodes, node)) goto error
      -//
      -//    return document.nodes.top - document.nodes.start
      -//
      -//error:
      -//    STACK_DEL(&context, items)
      -//    yaml_free(tag_copy)
      -//
      -//    return 0
      -//}
      -//
      -///*
      -// * Add a mapping node to a document.
      -// */
      -//
      -//YAML_DECLARE(int)
      -//yaml_document_add_mapping(document *yaml_document_t,
      -//        tag *yaml_char_t, style yaml_mapping_style_t)
      -//{
      -//    struct {
      -//        error yaml_error_type_t
      -//    } context
      -//    mark yaml_mark_t = { 0, 0, 0 }
      -//    tag_copy *yaml_char_t = NULL
      -//    struct {
      -//        start *yaml_node_pair_t
      -//        end *yaml_node_pair_t
      -//        top *yaml_node_pair_t
      -//    } pairs = { NULL, NULL, NULL }
      -//    node yaml_node_t
      -//
      -//    assert(document) // Non-NULL document object is expected.
      -//
      -//    if (!tag) {
      -//        tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
      -//    }
      -//
      -//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
      -//    tag_copy = yaml_strdup(tag)
      -//    if (!tag_copy) goto error
      -//
      -//    if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
      -//
      -//    MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
      -//            style, mark, mark)
      -//    if (!PUSH(&context, document.nodes, node)) goto error
      -//
      -//    return document.nodes.top - document.nodes.start
      -//
      -//error:
      -//    STACK_DEL(&context, pairs)
      -//    yaml_free(tag_copy)
      -//
      -//    return 0
      -//}
      -//
      -///*
      -// * Append an item to a sequence node.
      -// */
      -//
      -//YAML_DECLARE(int)
      -//yaml_document_append_sequence_item(document *yaml_document_t,
      -//        sequence int, item int)
      -//{
      -//    struct {
      -//        error yaml_error_type_t
      -//    } context
      -//
      -//    assert(document) // Non-NULL document is required.
      -//    assert(sequence > 0
      -//            && document.nodes.start + sequence <= document.nodes.top)
      -//                            // Valid sequence id is required.
      -//    assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
      -//                            // A sequence node is required.
      -//    assert(item > 0 && document.nodes.start + item <= document.nodes.top)
      -//                            // Valid item id is required.
      -//
      -//    if (!PUSH(&context,
      -//                document.nodes.start[sequence-1].data.sequence.items, item))
      -//        return 0
      -//
      -//    return 1
      -//}
      -//
      -///*
      -// * Append a pair of a key and a value to a mapping node.
      -// */
      -//
      -//YAML_DECLARE(int)
      -//yaml_document_append_mapping_pair(document *yaml_document_t,
      -//        mapping int, key int, value int)
      -//{
      -//    struct {
      -//        error yaml_error_type_t
      -//    } context
      -//
      -//    pair yaml_node_pair_t
      -//
      -//    assert(document) // Non-NULL document is required.
      -//    assert(mapping > 0
      -//            && document.nodes.start + mapping <= document.nodes.top)
      -//                            // Valid mapping id is required.
      -//    assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
      -//                            // A mapping node is required.
      -//    assert(key > 0 && document.nodes.start + key <= document.nodes.top)
      -//                            // Valid key id is required.
      -//    assert(value > 0 && document.nodes.start + value <= document.nodes.top)
      -//                            // Valid value id is required.
      -//
      -//    pair.key = key
      -//    pair.value = value
      -//
      -//    if (!PUSH(&context,
      -//                document.nodes.start[mapping-1].data.mapping.pairs, pair))
      -//        return 0
      -//
      -//    return 1
      -//}
      -//
      -//
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/decode.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/decode.go
      deleted file mode 100644
      index 085cddc4..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/decode.go
      +++ /dev/null
      @@ -1,683 +0,0 @@
      -package yaml
      -
      -import (
      -	"encoding"
      -	"encoding/base64"
      -	"fmt"
      -	"math"
      -	"reflect"
      -	"strconv"
      -	"time"
      -)
      -
      -const (
      -	documentNode = 1 << iota
      -	mappingNode
      -	sequenceNode
      -	scalarNode
      -	aliasNode
      -)
      -
      -type node struct {
      -	kind         int
      -	line, column int
      -	tag          string
      -	value        string
      -	implicit     bool
      -	children     []*node
      -	anchors      map[string]*node
      -}
      -
      -// ----------------------------------------------------------------------------
      -// Parser, produces a node tree out of a libyaml event stream.
      -
      -type parser struct {
      -	parser yaml_parser_t
      -	event  yaml_event_t
      -	doc    *node
      -}
      -
      -func newParser(b []byte) *parser {
      -	p := parser{}
      -	if !yaml_parser_initialize(&p.parser) {
      -		panic("failed to initialize YAML emitter")
      -	}
      -
      -	if len(b) == 0 {
      -		b = []byte{'\n'}
      -	}
      -
      -	yaml_parser_set_input_string(&p.parser, b)
      -
      -	p.skip()
      -	if p.event.typ != yaml_STREAM_START_EVENT {
      -		panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
      -	}
      -	p.skip()
      -	return &p
      -}
      -
      -func (p *parser) destroy() {
      -	if p.event.typ != yaml_NO_EVENT {
      -		yaml_event_delete(&p.event)
      -	}
      -	yaml_parser_delete(&p.parser)
      -}
      -
      -func (p *parser) skip() {
      -	if p.event.typ != yaml_NO_EVENT {
      -		if p.event.typ == yaml_STREAM_END_EVENT {
      -			failf("attempted to go past the end of stream; corrupted value?")
      -		}
      -		yaml_event_delete(&p.event)
      -	}
      -	if !yaml_parser_parse(&p.parser, &p.event) {
      -		p.fail()
      -	}
      -}
      -
      -func (p *parser) fail() {
      -	var where string
      -	var line int
      -	if p.parser.problem_mark.line != 0 {
      -		line = p.parser.problem_mark.line
      -	} else if p.parser.context_mark.line != 0 {
      -		line = p.parser.context_mark.line
      -	}
      -	if line != 0 {
      -		where = "line " + strconv.Itoa(line) + ": "
      -	}
      -	var msg string
      -	if len(p.parser.problem) > 0 {
      -		msg = p.parser.problem
      -	} else {
      -		msg = "unknown problem parsing YAML content"
      -	}
      -	failf("%s%s", where, msg)
      -}
      -
      -func (p *parser) anchor(n *node, anchor []byte) {
      -	if anchor != nil {
      -		p.doc.anchors[string(anchor)] = n
      -	}
      -}
      -
      -func (p *parser) parse() *node {
      -	switch p.event.typ {
      -	case yaml_SCALAR_EVENT:
      -		return p.scalar()
      -	case yaml_ALIAS_EVENT:
      -		return p.alias()
      -	case yaml_MAPPING_START_EVENT:
      -		return p.mapping()
      -	case yaml_SEQUENCE_START_EVENT:
      -		return p.sequence()
      -	case yaml_DOCUMENT_START_EVENT:
      -		return p.document()
      -	case yaml_STREAM_END_EVENT:
      -		// Happens when attempting to decode an empty buffer.
      -		return nil
      -	default:
      -		panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
      -	}
      -	panic("unreachable")
      -}
      -
      -func (p *parser) node(kind int) *node {
      -	return &node{
      -		kind:   kind,
      -		line:   p.event.start_mark.line,
      -		column: p.event.start_mark.column,
      -	}
      -}
      -
      -func (p *parser) document() *node {
      -	n := p.node(documentNode)
      -	n.anchors = make(map[string]*node)
      -	p.doc = n
      -	p.skip()
      -	n.children = append(n.children, p.parse())
      -	if p.event.typ != yaml_DOCUMENT_END_EVENT {
      -		panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
      -	}
      -	p.skip()
      -	return n
      -}
      -
      -func (p *parser) alias() *node {
      -	n := p.node(aliasNode)
      -	n.value = string(p.event.anchor)
      -	p.skip()
      -	return n
      -}
      -
      -func (p *parser) scalar() *node {
      -	n := p.node(scalarNode)
      -	n.value = string(p.event.value)
      -	n.tag = string(p.event.tag)
      -	n.implicit = p.event.implicit
      -	p.anchor(n, p.event.anchor)
      -	p.skip()
      -	return n
      -}
      -
      -func (p *parser) sequence() *node {
      -	n := p.node(sequenceNode)
      -	p.anchor(n, p.event.anchor)
      -	p.skip()
      -	for p.event.typ != yaml_SEQUENCE_END_EVENT {
      -		n.children = append(n.children, p.parse())
      -	}
      -	p.skip()
      -	return n
      -}
      -
      -func (p *parser) mapping() *node {
      -	n := p.node(mappingNode)
      -	p.anchor(n, p.event.anchor)
      -	p.skip()
      -	for p.event.typ != yaml_MAPPING_END_EVENT {
      -		n.children = append(n.children, p.parse(), p.parse())
      -	}
      -	p.skip()
      -	return n
      -}
      -
      -// ----------------------------------------------------------------------------
      -// Decoder, unmarshals a node into a provided value.
      -
      -type decoder struct {
      -	doc     *node
      -	aliases map[string]bool
      -	mapType reflect.Type
      -	terrors []string
      -}
      -
      -var (
      -	mapItemType    = reflect.TypeOf(MapItem{})
      -	durationType   = reflect.TypeOf(time.Duration(0))
      -	defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
      -	ifaceType      = defaultMapType.Elem()
      -)
      -
      -func newDecoder() *decoder {
      -	d := &decoder{mapType: defaultMapType}
      -	d.aliases = make(map[string]bool)
      -	return d
      -}
      -
      -func (d *decoder) terror(n *node, tag string, out reflect.Value) {
      -	if n.tag != "" {
      -		tag = n.tag
      -	}
      -	value := n.value
      -	if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
      -		if len(value) > 10 {
      -			value = " `" + value[:7] + "...`"
      -		} else {
      -			value = " `" + value + "`"
      -		}
      -	}
      -	d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
      -}
      -
      -func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
      -	terrlen := len(d.terrors)
      -	err := u.UnmarshalYAML(func(v interface{}) (err error) {
      -		defer handleErr(&err)
      -		d.unmarshal(n, reflect.ValueOf(v))
      -		if len(d.terrors) > terrlen {
      -			issues := d.terrors[terrlen:]
      -			d.terrors = d.terrors[:terrlen]
      -			return &TypeError{issues}
      -		}
      -		return nil
      -	})
      -	if e, ok := err.(*TypeError); ok {
      -		d.terrors = append(d.terrors, e.Errors...)
      -		return false
      -	}
      -	if err != nil {
      -		fail(err)
      -	}
      -	return true
      -}
      -
      -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
      -// if a value is found to implement it.
      -// It returns the initialized and dereferenced out value, whether
      -// unmarshalling was already done by UnmarshalYAML, and if so whether
      -// its types unmarshalled appropriately.
      -//
      -// If n holds a null value, prepare returns before doing anything.
      -func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
      -	if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
      -		return out, false, false
      -	}
      -	again := true
      -	for again {
      -		again = false
      -		if out.Kind() == reflect.Ptr {
      -			if out.IsNil() {
      -				out.Set(reflect.New(out.Type().Elem()))
      -			}
      -			out = out.Elem()
      -			again = true
      -		}
      -		if out.CanAddr() {
      -			if u, ok := out.Addr().Interface().(Unmarshaler); ok {
      -				good = d.callUnmarshaler(n, u)
      -				return out, true, good
      -			}
      -		}
      -	}
      -	return out, false, false
      -}
      -
      -func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
      -	switch n.kind {
      -	case documentNode:
      -		return d.document(n, out)
      -	case aliasNode:
      -		return d.alias(n, out)
      -	}
      -	out, unmarshaled, good := d.prepare(n, out)
      -	if unmarshaled {
      -		return good
      -	}
      -	switch n.kind {
      -	case scalarNode:
      -		good = d.scalar(n, out)
      -	case mappingNode:
      -		good = d.mapping(n, out)
      -	case sequenceNode:
      -		good = d.sequence(n, out)
      -	default:
      -		panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
      -	}
      -	return good
      -}
      -
      -func (d *decoder) document(n *node, out reflect.Value) (good bool) {
      -	if len(n.children) == 1 {
      -		d.doc = n
      -		d.unmarshal(n.children[0], out)
      -		return true
      -	}
      -	return false
      -}
      -
      -func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
      -	an, ok := d.doc.anchors[n.value]
      -	if !ok {
      -		failf("unknown anchor '%s' referenced", n.value)
      -	}
      -	if d.aliases[n.value] {
      -		failf("anchor '%s' value contains itself", n.value)
      -	}
      -	d.aliases[n.value] = true
      -	good = d.unmarshal(an, out)
      -	delete(d.aliases, n.value)
      -	return good
      -}
      -
      -var zeroValue reflect.Value
      -
      -func resetMap(out reflect.Value) {
      -	for _, k := range out.MapKeys() {
      -		out.SetMapIndex(k, zeroValue)
      -	}
      -}
      -
      -func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
      -	var tag string
      -	var resolved interface{}
      -	if n.tag == "" && !n.implicit {
      -		tag = yaml_STR_TAG
      -		resolved = n.value
      -	} else {
      -		tag, resolved = resolve(n.tag, n.value)
      -		if tag == yaml_BINARY_TAG {
      -			data, err := base64.StdEncoding.DecodeString(resolved.(string))
      -			if err != nil {
      -				failf("!!binary value contains invalid base64 data")
      -			}
      -			resolved = string(data)
      -		}
      -	}
      -	if resolved == nil {
      -		if out.Kind() == reflect.Map && !out.CanAddr() {
      -			resetMap(out)
      -		} else {
      -			out.Set(reflect.Zero(out.Type()))
      -		}
      -		return true
      -	}
      -	if s, ok := resolved.(string); ok && out.CanAddr() {
      -		if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
      -			err := u.UnmarshalText([]byte(s))
      -			if err != nil {
      -				fail(err)
      -			}
      -			return true
      -		}
      -	}
      -	switch out.Kind() {
      -	case reflect.String:
      -		if tag == yaml_BINARY_TAG {
      -			out.SetString(resolved.(string))
      -			good = true
      -		} else if resolved != nil {
      -			out.SetString(n.value)
      -			good = true
      -		}
      -	case reflect.Interface:
      -		if resolved == nil {
      -			out.Set(reflect.Zero(out.Type()))
      -		} else {
      -			out.Set(reflect.ValueOf(resolved))
      -		}
      -		good = true
      -	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
      -		switch resolved := resolved.(type) {
      -		case int:
      -			if !out.OverflowInt(int64(resolved)) {
      -				out.SetInt(int64(resolved))
      -				good = true
      -			}
      -		case int64:
      -			if !out.OverflowInt(resolved) {
      -				out.SetInt(resolved)
      -				good = true
      -			}
      -		case uint64:
      -			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
      -				out.SetInt(int64(resolved))
      -				good = true
      -			}
      -		case float64:
      -			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
      -				out.SetInt(int64(resolved))
      -				good = true
      -			}
      -		case string:
      -			if out.Type() == durationType {
      -				d, err := time.ParseDuration(resolved)
      -				if err == nil {
      -					out.SetInt(int64(d))
      -					good = true
      -				}
      -			}
      -		}
      -	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
      -		switch resolved := resolved.(type) {
      -		case int:
      -			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
      -				out.SetUint(uint64(resolved))
      -				good = true
      -			}
      -		case int64:
      -			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
      -				out.SetUint(uint64(resolved))
      -				good = true
      -			}
      -		case uint64:
      -			if !out.OverflowUint(uint64(resolved)) {
      -				out.SetUint(uint64(resolved))
      -				good = true
      -			}
      -		case float64:
      -			if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
      -				out.SetUint(uint64(resolved))
      -				good = true
      -			}
      -		}
      -	case reflect.Bool:
      -		switch resolved := resolved.(type) {
      -		case bool:
      -			out.SetBool(resolved)
      -			good = true
      -		}
      -	case reflect.Float32, reflect.Float64:
      -		switch resolved := resolved.(type) {
      -		case int:
      -			out.SetFloat(float64(resolved))
      -			good = true
      -		case int64:
      -			out.SetFloat(float64(resolved))
      -			good = true
      -		case uint64:
      -			out.SetFloat(float64(resolved))
      -			good = true
      -		case float64:
      -			out.SetFloat(resolved)
      -			good = true
      -		}
      -	case reflect.Ptr:
      -		if out.Type().Elem() == reflect.TypeOf(resolved) {
      -			// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
      -			elem := reflect.New(out.Type().Elem())
      -			elem.Elem().Set(reflect.ValueOf(resolved))
      -			out.Set(elem)
      -			good = true
      -		}
      -	}
      -	if !good {
      -		d.terror(n, tag, out)
      -	}
      -	return good
      -}
      -
      -func settableValueOf(i interface{}) reflect.Value {
      -	v := reflect.ValueOf(i)
      -	sv := reflect.New(v.Type()).Elem()
      -	sv.Set(v)
      -	return sv
      -}
      -
      -func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
      -	l := len(n.children)
      -
      -	var iface reflect.Value
      -	switch out.Kind() {
      -	case reflect.Slice:
      -		out.Set(reflect.MakeSlice(out.Type(), l, l))
      -	case reflect.Interface:
      -		// No type hints. Will have to use a generic sequence.
      -		iface = out
      -		out = settableValueOf(make([]interface{}, l))
      -	default:
      -		d.terror(n, yaml_SEQ_TAG, out)
      -		return false
      -	}
      -	et := out.Type().Elem()
      -
      -	j := 0
      -	for i := 0; i < l; i++ {
      -		e := reflect.New(et).Elem()
      -		if ok := d.unmarshal(n.children[i], e); ok {
      -			out.Index(j).Set(e)
      -			j++
      -		}
      -	}
      -	out.Set(out.Slice(0, j))
      -	if iface.IsValid() {
      -		iface.Set(out)
      -	}
      -	return true
      -}
      -
      -func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
      -	switch out.Kind() {
      -	case reflect.Struct:
      -		return d.mappingStruct(n, out)
      -	case reflect.Slice:
      -		return d.mappingSlice(n, out)
      -	case reflect.Map:
      -		// okay
      -	case reflect.Interface:
      -		if d.mapType.Kind() == reflect.Map {
      -			iface := out
      -			out = reflect.MakeMap(d.mapType)
      -			iface.Set(out)
      -		} else {
      -			slicev := reflect.New(d.mapType).Elem()
      -			if !d.mappingSlice(n, slicev) {
      -				return false
      -			}
      -			out.Set(slicev)
      -			return true
      -		}
      -	default:
      -		d.terror(n, yaml_MAP_TAG, out)
      -		return false
      -	}
      -	outt := out.Type()
      -	kt := outt.Key()
      -	et := outt.Elem()
      -
      -	mapType := d.mapType
      -	if outt.Key() == ifaceType && outt.Elem() == ifaceType {
      -		d.mapType = outt
      -	}
      -
      -	if out.IsNil() {
      -		out.Set(reflect.MakeMap(outt))
      -	}
      -	l := len(n.children)
      -	for i := 0; i < l; i += 2 {
      -		if isMerge(n.children[i]) {
      -			d.merge(n.children[i+1], out)
      -			continue
      -		}
      -		k := reflect.New(kt).Elem()
      -		if d.unmarshal(n.children[i], k) {
      -			kkind := k.Kind()
      -			if kkind == reflect.Interface {
      -				kkind = k.Elem().Kind()
      -			}
      -			if kkind == reflect.Map || kkind == reflect.Slice {
      -				failf("invalid map key: %#v", k.Interface())
      -			}
      -			e := reflect.New(et).Elem()
      -			if d.unmarshal(n.children[i+1], e) {
      -				out.SetMapIndex(k, e)
      -			}
      -		}
      -	}
      -	d.mapType = mapType
      -	return true
      -}
      -
      -func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
      -	outt := out.Type()
      -	if outt.Elem() != mapItemType {
      -		d.terror(n, yaml_MAP_TAG, out)
      -		return false
      -	}
      -
      -	mapType := d.mapType
      -	d.mapType = outt
      -
      -	var slice []MapItem
      -	var l = len(n.children)
      -	for i := 0; i < l; i += 2 {
      -		if isMerge(n.children[i]) {
      -			d.merge(n.children[i+1], out)
      -			continue
      -		}
      -		item := MapItem{}
      -		k := reflect.ValueOf(&item.Key).Elem()
      -		if d.unmarshal(n.children[i], k) {
      -			v := reflect.ValueOf(&item.Value).Elem()
      -			if d.unmarshal(n.children[i+1], v) {
      -				slice = append(slice, item)
      -			}
      -		}
      -	}
      -	out.Set(reflect.ValueOf(slice))
      -	d.mapType = mapType
      -	return true
      -}
      -
      -func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
      -	sinfo, err := getStructInfo(out.Type())
      -	if err != nil {
      -		panic(err)
      -	}
      -	name := settableValueOf("")
      -	l := len(n.children)
      -
      -	var inlineMap reflect.Value
      -	var elemType reflect.Type
      -	if sinfo.InlineMap != -1 {
      -		inlineMap = out.Field(sinfo.InlineMap)
      -		inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
      -		elemType = inlineMap.Type().Elem()
      -	}
      -
      -	for i := 0; i < l; i += 2 {
      -		ni := n.children[i]
      -		if isMerge(ni) {
      -			d.merge(n.children[i+1], out)
      -			continue
      -		}
      -		if !d.unmarshal(ni, name) {
      -			continue
      -		}
      -		if info, ok := sinfo.FieldsMap[name.String()]; ok {
      -			var field reflect.Value
      -			if info.Inline == nil {
      -				field = out.Field(info.Num)
      -			} else {
      -				field = out.FieldByIndex(info.Inline)
      -			}
      -			d.unmarshal(n.children[i+1], field)
      -		} else if sinfo.InlineMap != -1 {
      -			if inlineMap.IsNil() {
      -				inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
      -			}
      -			value := reflect.New(elemType).Elem()
      -			d.unmarshal(n.children[i+1], value)
      -			inlineMap.SetMapIndex(name, value)
      -		}
      -	}
      -	return true
      -}
      -
      -func failWantMap() {
      -	failf("map merge requires map or sequence of maps as the value")
      -}
      -
      -func (d *decoder) merge(n *node, out reflect.Value) {
      -	switch n.kind {
      -	case mappingNode:
      -		d.unmarshal(n, out)
      -	case aliasNode:
      -		an, ok := d.doc.anchors[n.value]
      -		if ok && an.kind != mappingNode {
      -			failWantMap()
      -		}
      -		d.unmarshal(n, out)
      -	case sequenceNode:
      -		// Step backwards as earlier nodes take precedence.
      -		for i := len(n.children) - 1; i >= 0; i-- {
      -			ni := n.children[i]
      -			if ni.kind == aliasNode {
      -				an, ok := d.doc.anchors[ni.value]
      -				if ok && an.kind != mappingNode {
      -					failWantMap()
      -				}
      -			} else if ni.kind != mappingNode {
      -				failWantMap()
      -			}
      -			d.unmarshal(ni, out)
      -		}
      -	default:
      -		failWantMap()
      -	}
      -}
      -
      -func isMerge(n *node) bool {
      -	return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/decode_test.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/decode_test.go
      deleted file mode 100644
      index 04fdd9e7..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/decode_test.go
      +++ /dev/null
      @@ -1,966 +0,0 @@
      -package yaml_test
      -
      -import (
      -	"errors"
      -	. "gopkg.in/check.v1"
      -	"gopkg.in/yaml.v2"
      -	"math"
      -	"net"
      -	"reflect"
      -	"strings"
      -	"time"
      -)
      -
      -var unmarshalIntTest = 123
      -
      -var unmarshalTests = []struct {
      -	data  string
      -	value interface{}
      -}{
      -	{
      -		"",
      -		&struct{}{},
      -	}, {
      -		"{}", &struct{}{},
      -	}, {
      -		"v: hi",
      -		map[string]string{"v": "hi"},
      -	}, {
      -		"v: hi", map[string]interface{}{"v": "hi"},
      -	}, {
      -		"v: true",
      -		map[string]string{"v": "true"},
      -	}, {
      -		"v: true",
      -		map[string]interface{}{"v": true},
      -	}, {
      -		"v: 10",
      -		map[string]interface{}{"v": 10},
      -	}, {
      -		"v: 0b10",
      -		map[string]interface{}{"v": 2},
      -	}, {
      -		"v: 0xA",
      -		map[string]interface{}{"v": 10},
      -	}, {
      -		"v: 4294967296",
      -		map[string]int64{"v": 4294967296},
      -	}, {
      -		"v: 0.1",
      -		map[string]interface{}{"v": 0.1},
      -	}, {
      -		"v: .1",
      -		map[string]interface{}{"v": 0.1},
      -	}, {
      -		"v: .Inf",
      -		map[string]interface{}{"v": math.Inf(+1)},
      -	}, {
      -		"v: -.Inf",
      -		map[string]interface{}{"v": math.Inf(-1)},
      -	}, {
      -		"v: -10",
      -		map[string]interface{}{"v": -10},
      -	}, {
      -		"v: -.1",
      -		map[string]interface{}{"v": -0.1},
      -	},
      -
      -	// Simple values.
      -	{
      -		"123",
      -		&unmarshalIntTest,
      -	},
      -
      -	// Floats from spec
      -	{
      -		"canonical: 6.8523e+5",
      -		map[string]interface{}{"canonical": 6.8523e+5},
      -	}, {
      -		"expo: 685.230_15e+03",
      -		map[string]interface{}{"expo": 685.23015e+03},
      -	}, {
      -		"fixed: 685_230.15",
      -		map[string]interface{}{"fixed": 685230.15},
      -	}, {
      -		"neginf: -.inf",
      -		map[string]interface{}{"neginf": math.Inf(-1)},
      -	}, {
      -		"fixed: 685_230.15",
      -		map[string]float64{"fixed": 685230.15},
      -	},
      -	//{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported
      -	//{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails.
      -
      -	// Bools from spec
      -	{
      -		"canonical: y",
      -		map[string]interface{}{"canonical": true},
      -	}, {
      -		"answer: NO",
      -		map[string]interface{}{"answer": false},
      -	}, {
      -		"logical: True",
      -		map[string]interface{}{"logical": true},
      -	}, {
      -		"option: on",
      -		map[string]interface{}{"option": true},
      -	}, {
      -		"option: on",
      -		map[string]bool{"option": true},
      -	},
      -	// Ints from spec
      -	{
      -		"canonical: 685230",
      -		map[string]interface{}{"canonical": 685230},
      -	}, {
      -		"decimal: +685_230",
      -		map[string]interface{}{"decimal": 685230},
      -	}, {
      -		"octal: 02472256",
      -		map[string]interface{}{"octal": 685230},
      -	}, {
      -		"hexa: 0x_0A_74_AE",
      -		map[string]interface{}{"hexa": 685230},
      -	}, {
      -		"bin: 0b1010_0111_0100_1010_1110",
      -		map[string]interface{}{"bin": 685230},
      -	}, {
      -		"bin: -0b101010",
      -		map[string]interface{}{"bin": -42},
      -	}, {
      -		"decimal: +685_230",
      -		map[string]int{"decimal": 685230},
      -	},
      -
      -	//{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported
      -
      -	// Nulls from spec
      -	{
      -		"empty:",
      -		map[string]interface{}{"empty": nil},
      -	}, {
      -		"canonical: ~",
      -		map[string]interface{}{"canonical": nil},
      -	}, {
      -		"english: null",
      -		map[string]interface{}{"english": nil},
      -	}, {
      -		"~: null key",
      -		map[interface{}]string{nil: "null key"},
      -	}, {
      -		"empty:",
      -		map[string]*bool{"empty": nil},
      -	},
      -
      -	// Flow sequence
      -	{
      -		"seq: [A,B]",
      -		map[string]interface{}{"seq": []interface{}{"A", "B"}},
      -	}, {
      -		"seq: [A,B,C,]",
      -		map[string][]string{"seq": []string{"A", "B", "C"}},
      -	}, {
      -		"seq: [A,1,C]",
      -		map[string][]string{"seq": []string{"A", "1", "C"}},
      -	}, {
      -		"seq: [A,1,C]",
      -		map[string][]int{"seq": []int{1}},
      -	}, {
      -		"seq: [A,1,C]",
      -		map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
      -	},
      -	// Block sequence
      -	{
      -		"seq:\n - A\n - B",
      -		map[string]interface{}{"seq": []interface{}{"A", "B"}},
      -	}, {
      -		"seq:\n - A\n - B\n - C",
      -		map[string][]string{"seq": []string{"A", "B", "C"}},
      -	}, {
      -		"seq:\n - A\n - 1\n - C",
      -		map[string][]string{"seq": []string{"A", "1", "C"}},
      -	}, {
      -		"seq:\n - A\n - 1\n - C",
      -		map[string][]int{"seq": []int{1}},
      -	}, {
      -		"seq:\n - A\n - 1\n - C",
      -		map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
      -	},
      -
      -	// Literal block scalar
      -	{
      -		"scalar: | # Comment\n\n literal\n\n \ttext\n\n",
      -		map[string]string{"scalar": "\nliteral\n\n\ttext\n"},
      -	},
      -
      -	// Folded block scalar
      -	{
      -		"scalar: > # Comment\n\n folded\n line\n \n next\n line\n  * one\n  * two\n\n last\n line\n\n",
      -		map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"},
      -	},
      -
      -	// Map inside interface with no type hints.
      -	{
      -		"a: {b: c}",
      -		map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
      -	},
      -
      -	// Structs and type conversions.
      -	{
      -		"hello: world",
      -		&struct{ Hello string }{"world"},
      -	}, {
      -		"a: {b: c}",
      -		&struct{ A struct{ B string } }{struct{ B string }{"c"}},
      -	}, {
      -		"a: {b: c}",
      -		&struct{ A *struct{ B string } }{&struct{ B string }{"c"}},
      -	}, {
      -		"a: {b: c}",
      -		&struct{ A map[string]string }{map[string]string{"b": "c"}},
      -	}, {
      -		"a: {b: c}",
      -		&struct{ A *map[string]string }{&map[string]string{"b": "c"}},
      -	}, {
      -		"a:",
      -		&struct{ A map[string]string }{},
      -	}, {
      -		"a: 1",
      -		&struct{ A int }{1},
      -	}, {
      -		"a: 1",
      -		&struct{ A float64 }{1},
      -	}, {
      -		"a: 1.0",
      -		&struct{ A int }{1},
      -	}, {
      -		"a: 1.0",
      -		&struct{ A uint }{1},
      -	}, {
      -		"a: [1, 2]",
      -		&struct{ A []int }{[]int{1, 2}},
      -	}, {
      -		"a: 1",
      -		&struct{ B int }{0},
      -	}, {
      -		"a: 1",
      -		&struct {
      -			B int "a"
      -		}{1},
      -	}, {
      -		"a: y",
      -		&struct{ A bool }{true},
      -	},
      -
      -	// Some cross type conversions
      -	{
      -		"v: 42",
      -		map[string]uint{"v": 42},
      -	}, {
      -		"v: -42",
      -		map[string]uint{},
      -	}, {
      -		"v: 4294967296",
      -		map[string]uint64{"v": 4294967296},
      -	}, {
      -		"v: -4294967296",
      -		map[string]uint64{},
      -	},
      -
      -	// int
      -	{
      -		"int_max: 2147483647",
      -		map[string]int{"int_max": math.MaxInt32},
      -	},
      -	{
      -		"int_min: -2147483648",
      -		map[string]int{"int_min": math.MinInt32},
      -	},
      -	{
      -		"int_overflow: 9223372036854775808", // math.MaxInt64 + 1
      -		map[string]int{},
      -	},
      -
      -	// int64
      -	{
      -		"int64_max: 9223372036854775807",
      -		map[string]int64{"int64_max": math.MaxInt64},
      -	},
      -	{
      -		"int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111",
      -		map[string]int64{"int64_max_base2": math.MaxInt64},
      -	},
      -	{
      -		"int64_min: -9223372036854775808",
      -		map[string]int64{"int64_min": math.MinInt64},
      -	},
      -	{
      -		"int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111",
      -		map[string]int64{"int64_neg_base2": -math.MaxInt64},
      -	},
      -	{
      -		"int64_overflow: 9223372036854775808", // math.MaxInt64 + 1
      -		map[string]int64{},
      -	},
      -
      -	// uint
      -	{
      -		"uint_min: 0",
      -		map[string]uint{"uint_min": 0},
      -	},
      -	{
      -		"uint_max: 4294967295",
      -		map[string]uint{"uint_max": math.MaxUint32},
      -	},
      -	{
      -		"uint_underflow: -1",
      -		map[string]uint{},
      -	},
      -
      -	// uint64
      -	{
      -		"uint64_min: 0",
      -		map[string]uint{"uint64_min": 0},
      -	},
      -	{
      -		"uint64_max: 18446744073709551615",
      -		map[string]uint64{"uint64_max": math.MaxUint64},
      -	},
      -	{
      -		"uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111",
      -		map[string]uint64{"uint64_max_base2": math.MaxUint64},
      -	},
      -	{
      -		"uint64_maxint64: 9223372036854775807",
      -		map[string]uint64{"uint64_maxint64": math.MaxInt64},
      -	},
      -	{
      -		"uint64_underflow: -1",
      -		map[string]uint64{},
      -	},
      -
      -	// float32
      -	{
      -		"float32_max: 3.40282346638528859811704183484516925440e+38",
      -		map[string]float32{"float32_max": math.MaxFloat32},
      -	},
      -	{
      -		"float32_nonzero: 1.401298464324817070923729583289916131280e-45",
      -		map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32},
      -	},
      -	{
      -		"float32_maxuint64: 18446744073709551615",
      -		map[string]float32{"float32_maxuint64": float32(math.MaxUint64)},
      -	},
      -	{
      -		"float32_maxuint64+1: 18446744073709551616",
      -		map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)},
      -	},
      -
      -	// float64
      -	{
      -		"float64_max: 1.797693134862315708145274237317043567981e+308",
      -		map[string]float64{"float64_max": math.MaxFloat64},
      -	},
      -	{
      -		"float64_nonzero: 4.940656458412465441765687928682213723651e-324",
      -		map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64},
      -	},
      -	{
      -		"float64_maxuint64: 18446744073709551615",
      -		map[string]float64{"float64_maxuint64": float64(math.MaxUint64)},
      -	},
      -	{
      -		"float64_maxuint64+1: 18446744073709551616",
      -		map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)},
      -	},
      -
      -	// Overflow cases.
      -	{
      -		"v: 4294967297",
      -		map[string]int32{},
      -	}, {
      -		"v: 128",
      -		map[string]int8{},
      -	},
      -
      -	// Quoted values.
      -	{
      -		"'1': '\"2\"'",
      -		map[interface{}]interface{}{"1": "\"2\""},
      -	}, {
      -		"v:\n- A\n- 'B\n\n  C'\n",
      -		map[string][]string{"v": []string{"A", "B\nC"}},
      -	},
      -
      -	// Explicit tags.
      -	{
      -		"v: !!float '1.1'",
      -		map[string]interface{}{"v": 1.1},
      -	}, {
      -		"v: !!null ''",
      -		map[string]interface{}{"v": nil},
      -	}, {
      -		"%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'",
      -		map[string]interface{}{"v": 1},
      -	},
      -
      -	// Anchors and aliases.
      -	{
      -		"a: &x 1\nb: &y 2\nc: *x\nd: *y\n",
      -		&struct{ A, B, C, D int }{1, 2, 1, 2},
      -	}, {
      -		"a: &a {c: 1}\nb: *a",
      -		&struct {
      -			A, B struct {
      -				C int
      -			}
      -		}{struct{ C int }{1}, struct{ C int }{1}},
      -	}, {
      -		"a: &a [1, 2]\nb: *a",
      -		&struct{ B []int }{[]int{1, 2}},
      -	}, {
      -		"b: *a\na: &a {c: 1}",
      -		&struct {
      -			A, B struct {
      -				C int
      -			}
      -		}{struct{ C int }{1}, struct{ C int }{1}},
      -	},
      -
      -	// Bug #1133337
      -	{
      -		"foo: ''",
      -		map[string]*string{"foo": new(string)},
      -	}, {
      -		"foo: null",
      -		map[string]string{"foo": ""},
      -	}, {
      -		"foo: null",
      -		map[string]interface{}{"foo": nil},
      -	},
      -
      -	// Ignored field
      -	{
      -		"a: 1\nb: 2\n",
      -		&struct {
      -			A int
      -			B int "-"
      -		}{1, 0},
      -	},
      -
      -	// Bug #1191981
      -	{
      -		"" +
      -			"%YAML 1.1\n" +
      -			"--- !!str\n" +
      -			`"Generic line break (no glyph)\n\` + "\n" +
      -			` Generic line break (glyphed)\n\` + "\n" +
      -			` Line separator\u2028\` + "\n" +
      -			` Paragraph separator\u2029"` + "\n",
      -		"" +
      -			"Generic line break (no glyph)\n" +
      -			"Generic line break (glyphed)\n" +
      -			"Line separator\u2028Paragraph separator\u2029",
      -	},
      -
      -	// Struct inlining
      -	{
      -		"a: 1\nb: 2\nc: 3\n",
      -		&struct {
      -			A int
      -			C inlineB `yaml:",inline"`
      -		}{1, inlineB{2, inlineC{3}}},
      -	},
      -
      -	// Map inlining
      -	{
      -		"a: 1\nb: 2\nc: 3\n",
      -		&struct {
      -			A int
      -			C map[string]int `yaml:",inline"`
      -		}{1, map[string]int{"b": 2, "c": 3}},
      -	},
      -
      -	// bug 1243827
      -	{
      -		"a: -b_c",
      -		map[string]interface{}{"a": "-b_c"},
      -	},
      -	{
      -		"a: +b_c",
      -		map[string]interface{}{"a": "+b_c"},
      -	},
      -	{
      -		"a: 50cent_of_dollar",
      -		map[string]interface{}{"a": "50cent_of_dollar"},
      -	},
      -
      -	// Duration
      -	{
      -		"a: 3s",
      -		map[string]time.Duration{"a": 3 * time.Second},
      -	},
      -
      -	// Issue #24.
      -	{
      -		"a: <foo>",
      -		map[string]string{"a": "<foo>"},
      -	},
      -
      -	// Base 60 floats are obsolete and unsupported.
      -	{
      -		"a: 1:1\n",
      -		map[string]string{"a": "1:1"},
      -	},
      -
      -	// Binary data.
      -	{
      -		"a: !!binary gIGC\n",
      -		map[string]string{"a": "\x80\x81\x82"},
      -	}, {
      -		"a: !!binary |\n  " + strings.Repeat("kJCQ", 17) + "kJ\n  CQ\n",
      -		map[string]string{"a": strings.Repeat("\x90", 54)},
      -	}, {
      -		"a: !!binary |\n  " + strings.Repeat("A", 70) + "\n  ==\n",
      -		map[string]string{"a": strings.Repeat("\x00", 52)},
      -	},
      -
      -	// Ordered maps.
      -	{
      -		"{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}",
      -		&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
      -	},
      -
      -	// Issue #39.
      -	{
      -		"a:\n b:\n  c: d\n",
      -		map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}},
      -	},
      -
      -	// Custom map type.
      -	{
      -		"a: {b: c}",
      -		M{"a": M{"b": "c"}},
      -	},
      -
      -	// Support encoding.TextUnmarshaler.
      -	{
      -		"a: 1.2.3.4\n",
      -		map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
      -	},
      -	{
      -		"a: 2015-02-24T18:19:39Z\n",
      -		map[string]time.Time{"a": time.Unix(1424801979, 0)},
      -	},
      -
      -	// Encode empty lists as zero-length slices.
      -	{
      -		"a: []",
      -		&struct{ A []int }{[]int{}},
      -	},
      -}
      -
      -type M map[interface{}]interface{}
      -
      -type inlineB struct {
      -	B       int
      -	inlineC `yaml:",inline"`
      -}
      -
      -type inlineC struct {
      -	C int
      -}
      -
      -func (s *S) TestUnmarshal(c *C) {
      -	for _, item := range unmarshalTests {
      -		t := reflect.ValueOf(item.value).Type()
      -		var value interface{}
      -		switch t.Kind() {
      -		case reflect.Map:
      -			value = reflect.MakeMap(t).Interface()
      -		case reflect.String:
      -			value = reflect.New(t).Interface()
      -		case reflect.Ptr:
      -			value = reflect.New(t.Elem()).Interface()
      -		default:
      -			c.Fatalf("missing case for %s", t)
      -		}
      -		err := yaml.Unmarshal([]byte(item.data), value)
      -		if _, ok := err.(*yaml.TypeError); !ok {
      -			c.Assert(err, IsNil)
      -		}
      -		if t.Kind() == reflect.String {
      -			c.Assert(*value.(*string), Equals, item.value)
      -		} else {
      -			c.Assert(value, DeepEquals, item.value)
      -		}
      -	}
      -}
      -
      -func (s *S) TestUnmarshalNaN(c *C) {
      -	value := map[string]interface{}{}
      -	err := yaml.Unmarshal([]byte("notanum: .NaN"), &value)
      -	c.Assert(err, IsNil)
      -	c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
      -}
      -
      -var unmarshalErrorTests = []struct {
      -	data, error string
      -}{
      -	{"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"},
      -	{"v: [A,", "yaml: line 1: did not find expected node content"},
      -	{"v:\n- [A,", "yaml: line 2: did not find expected node content"},
      -	{"a: *b\n", "yaml: unknown anchor 'b' referenced"},
      -	{"a: &a\n  b: *a\n", "yaml: anchor 'a' value contains itself"},
      -	{"value: -", "yaml: block sequence entries are not allowed in this context"},
      -	{"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"},
      -	{"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`},
      -	{"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`},
      -}
      -
      -func (s *S) TestUnmarshalErrors(c *C) {
      -	for _, item := range unmarshalErrorTests {
      -		var value interface{}
      -		err := yaml.Unmarshal([]byte(item.data), &value)
      -		c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
      -	}
      -}
      -
      -var unmarshalerTests = []struct {
      -	data, tag string
      -	value     interface{}
      -}{
      -	{"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}},
      -	{"_: [1,A]", "!!seq", []interface{}{1, "A"}},
      -	{"_: 10", "!!int", 10},
      -	{"_: null", "!!null", nil},
      -	{`_: BAR!`, "!!str", "BAR!"},
      -	{`_: "BAR!"`, "!!str", "BAR!"},
      -	{"_: !!foo 'BAR!'", "!!foo", "BAR!"},
      -}
      -
      -var unmarshalerResult = map[int]error{}
      -
      -type unmarshalerType struct {
      -	value interface{}
      -}
      -
      -func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error {
      -	if err := unmarshal(&o.value); err != nil {
      -		return err
      -	}
      -	if i, ok := o.value.(int); ok {
      -		if result, ok := unmarshalerResult[i]; ok {
      -			return result
      -		}
      -	}
      -	return nil
      -}
      -
      -type unmarshalerPointer struct {
      -	Field *unmarshalerType "_"
      -}
      -
      -type unmarshalerValue struct {
      -	Field unmarshalerType "_"
      -}
      -
      -func (s *S) TestUnmarshalerPointerField(c *C) {
      -	for _, item := range unmarshalerTests {
      -		obj := &unmarshalerPointer{}
      -		err := yaml.Unmarshal([]byte(item.data), obj)
      -		c.Assert(err, IsNil)
      -		if item.value == nil {
      -			c.Assert(obj.Field, IsNil)
      -		} else {
      -			c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
      -			c.Assert(obj.Field.value, DeepEquals, item.value)
      -		}
      -	}
      -}
      -
      -func (s *S) TestUnmarshalerValueField(c *C) {
      -	for _, item := range unmarshalerTests {
      -		obj := &unmarshalerValue{}
      -		err := yaml.Unmarshal([]byte(item.data), obj)
      -		c.Assert(err, IsNil)
      -		c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
      -		c.Assert(obj.Field.value, DeepEquals, item.value)
      -	}
      -}
      -
      -func (s *S) TestUnmarshalerWholeDocument(c *C) {
      -	obj := &unmarshalerType{}
      -	err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj)
      -	c.Assert(err, IsNil)
      -	value, ok := obj.value.(map[interface{}]interface{})
      -	c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value))
      -	c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value)
      -}
      -
      -func (s *S) TestUnmarshalerTypeError(c *C) {
      -	unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}}
      -	unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}}
      -	defer func() {
      -		delete(unmarshalerResult, 2)
      -		delete(unmarshalerResult, 4)
      -	}()
      -
      -	type T struct {
      -		Before int
      -		After  int
      -		M      map[string]*unmarshalerType
      -	}
      -	var v T
      -	data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}`
      -	err := yaml.Unmarshal([]byte(data), &v)
      -	c.Assert(err, ErrorMatches, ""+
      -		"yaml: unmarshal errors:\n"+
      -		"  line 1: cannot unmarshal !!str `A` into int\n"+
      -		"  foo\n"+
      -		"  bar\n"+
      -		"  line 1: cannot unmarshal !!str `B` into int")
      -	c.Assert(v.M["abc"], NotNil)
      -	c.Assert(v.M["def"], IsNil)
      -	c.Assert(v.M["ghi"], NotNil)
      -	c.Assert(v.M["jkl"], IsNil)
      -
      -	c.Assert(v.M["abc"].value, Equals, 1)
      -	c.Assert(v.M["ghi"].value, Equals, 3)
      -}
      -
      -type proxyTypeError struct{}
      -
      -func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error {
      -	var s string
      -	var a int32
      -	var b int64
      -	if err := unmarshal(&s); err != nil {
      -		panic(err)
      -	}
      -	if s == "a" {
      -		if err := unmarshal(&b); err == nil {
      -			panic("should have failed")
      -		}
      -		return unmarshal(&a)
      -	}
      -	if err := unmarshal(&a); err == nil {
      -		panic("should have failed")
      -	}
      -	return unmarshal(&b)
      -}
      -
      -func (s *S) TestUnmarshalerTypeErrorProxying(c *C) {
      -	type T struct {
      -		Before int
      -		After  int
      -		M      map[string]*proxyTypeError
      -	}
      -	var v T
      -	data := `{before: A, m: {abc: a, def: b}, after: B}`
      -	err := yaml.Unmarshal([]byte(data), &v)
      -	c.Assert(err, ErrorMatches, ""+
      -		"yaml: unmarshal errors:\n"+
      -		"  line 1: cannot unmarshal !!str `A` into int\n"+
      -		"  line 1: cannot unmarshal !!str `a` into int32\n"+
      -		"  line 1: cannot unmarshal !!str `b` into int64\n"+
      -		"  line 1: cannot unmarshal !!str `B` into int")
      -}
      -
      -type failingUnmarshaler struct{}
      -
      -var failingErr = errors.New("failingErr")
      -
      -func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
      -	return failingErr
      -}
      -
      -func (s *S) TestUnmarshalerError(c *C) {
      -	err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{})
      -	c.Assert(err, Equals, failingErr)
      -}
      -
      -type sliceUnmarshaler []int
      -
      -func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
      -	var slice []int
      -	err := unmarshal(&slice)
      -	if err == nil {
      -		*su = slice
      -		return nil
      -	}
      -
      -	var intVal int
      -	err = unmarshal(&intVal)
      -	if err == nil {
      -		*su = []int{intVal}
      -		return nil
      -	}
      -
      -	return err
      -}
      -
      -func (s *S) TestUnmarshalerRetry(c *C) {
      -	var su sliceUnmarshaler
      -	err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su)
      -	c.Assert(err, IsNil)
      -	c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3}))
      -
      -	err = yaml.Unmarshal([]byte("1"), &su)
      -	c.Assert(err, IsNil)
      -	c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1}))
      -}
      -
      -// From http://yaml.org/type/merge.html
      -var mergeTests = `
      -anchors:
      -  list:
      -    - &CENTER { "x": 1, "y": 2 }
      -    - &LEFT   { "x": 0, "y": 2 }
      -    - &BIG    { "r": 10 }
      -    - &SMALL  { "r": 1 }
      -
      -# All the following maps are equal:
      -
      -plain:
      -  # Explicit keys
      -  "x": 1
      -  "y": 2
      -  "r": 10
      -  label: center/big
      -
      -mergeOne:
      -  # Merge one map
      -  << : *CENTER
      -  "r": 10
      -  label: center/big
      -
      -mergeMultiple:
      -  # Merge multiple maps
      -  << : [ *CENTER, *BIG ]
      -  label: center/big
      -
      -override:
      -  # Override
      -  << : [ *BIG, *LEFT, *SMALL ]
      -  "x": 1
      -  label: center/big
      -
      -shortTag:
      -  # Explicit short merge tag
      -  !!merge "<<" : [ *CENTER, *BIG ]
      -  label: center/big
      -
      -longTag:
      -  # Explicit merge long tag
      -  !<tag:yaml.org,2002:merge> "<<" : [ *CENTER, *BIG ]
      -  label: center/big
      -
      -inlineMap:
      -  # Inlined map 
      -  << : {"x": 1, "y": 2, "r": 10}
      -  label: center/big
      -
      -inlineSequenceMap:
      -  # Inlined map in sequence
      -  << : [ *CENTER, {"r": 10} ]
      -  label: center/big
      -`
      -
      -func (s *S) TestMerge(c *C) {
      -	var want = map[interface{}]interface{}{
      -		"x":     1,
      -		"y":     2,
      -		"r":     10,
      -		"label": "center/big",
      -	}
      -
      -	var m map[interface{}]interface{}
      -	err := yaml.Unmarshal([]byte(mergeTests), &m)
      -	c.Assert(err, IsNil)
      -	for name, test := range m {
      -		if name == "anchors" {
      -			continue
      -		}
      -		c.Assert(test, DeepEquals, want, Commentf("test %q failed", name))
      -	}
      -}
      -
      -func (s *S) TestMergeStruct(c *C) {
      -	type Data struct {
      -		X, Y, R int
      -		Label   string
      -	}
      -	want := Data{1, 2, 10, "center/big"}
      -
      -	var m map[string]Data
      -	err := yaml.Unmarshal([]byte(mergeTests), &m)
      -	c.Assert(err, IsNil)
      -	for name, test := range m {
      -		if name == "anchors" {
      -			continue
      -		}
      -		c.Assert(test, Equals, want, Commentf("test %q failed", name))
      -	}
      -}
      -
      -var unmarshalNullTests = []func() interface{}{
      -	func() interface{} { var v interface{}; v = "v"; return &v },
      -	func() interface{} { var s = "s"; return &s },
      -	func() interface{} { var s = "s"; sptr := &s; return &sptr },
      -	func() interface{} { var i = 1; return &i },
      -	func() interface{} { var i = 1; iptr := &i; return &iptr },
      -	func() interface{} { m := map[string]int{"s": 1}; return &m },
      -	func() interface{} { m := map[string]int{"s": 1}; return m },
      -}
      -
      -func (s *S) TestUnmarshalNull(c *C) {
      -	for _, test := range unmarshalNullTests {
      -		item := test()
      -		zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface()
      -		err := yaml.Unmarshal([]byte("null"), item)
      -		c.Assert(err, IsNil)
      -		if reflect.TypeOf(item).Kind() == reflect.Map {
      -			c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface())
      -		} else {
      -			c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero)
      -		}
      -	}
      -}
      -
      -func (s *S) TestUnmarshalSliceOnPreset(c *C) {
      -	// Issue #48.
      -	v := struct{ A []int }{[]int{1}}
      -	yaml.Unmarshal([]byte("a: [2]"), &v)
      -	c.Assert(v.A, DeepEquals, []int{2})
      -}
      -
      -//var data []byte
      -//func init() {
      -//	var err error
      -//	data, err = ioutil.ReadFile("/tmp/file.yaml")
      -//	if err != nil {
      -//		panic(err)
      -//	}
      -//}
      -//
      -//func (s *S) BenchmarkUnmarshal(c *C) {
      -//	var err error
      -//	for i := 0; i < c.N; i++ {
      -//		var v map[string]interface{}
      -//		err = yaml.Unmarshal(data, &v)
      -//	}
      -//	if err != nil {
      -//		panic(err)
      -//	}
      -//}
      -//
      -//func (s *S) BenchmarkMarshal(c *C) {
      -//	var v map[string]interface{}
      -//	yaml.Unmarshal(data, &v)
      -//	c.ResetTimer()
      -//	for i := 0; i < c.N; i++ {
      -//		yaml.Marshal(&v)
      -//	}
      -//}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/emitterc.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/emitterc.go
      deleted file mode 100644
      index 9b3dc4a4..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/emitterc.go
      +++ /dev/null
      @@ -1,1685 +0,0 @@
      -package yaml
      -
      -import (
      -	"bytes"
      -)
      -
      -// Flush the buffer if needed.
      -func flush(emitter *yaml_emitter_t) bool {
      -	if emitter.buffer_pos+5 >= len(emitter.buffer) {
      -		return yaml_emitter_flush(emitter)
      -	}
      -	return true
      -}
      -
      -// Put a character to the output buffer.
      -func put(emitter *yaml_emitter_t, value byte) bool {
      -	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
      -		return false
      -	}
      -	emitter.buffer[emitter.buffer_pos] = value
      -	emitter.buffer_pos++
      -	emitter.column++
      -	return true
      -}
      -
      -// Put a line break to the output buffer.
      -func put_break(emitter *yaml_emitter_t) bool {
      -	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
      -		return false
      -	}
      -	switch emitter.line_break {
      -	case yaml_CR_BREAK:
      -		emitter.buffer[emitter.buffer_pos] = '\r'
      -		emitter.buffer_pos += 1
      -	case yaml_LN_BREAK:
      -		emitter.buffer[emitter.buffer_pos] = '\n'
      -		emitter.buffer_pos += 1
      -	case yaml_CRLN_BREAK:
      -		emitter.buffer[emitter.buffer_pos+0] = '\r'
      -		emitter.buffer[emitter.buffer_pos+1] = '\n'
      -		emitter.buffer_pos += 2
      -	default:
      -		panic("unknown line break setting")
      -	}
      -	emitter.column = 0
      -	emitter.line++
      -	return true
      -}
      -
      -// Copy a character from a string into buffer.
      -func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
      -	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
      -		return false
      -	}
      -	p := emitter.buffer_pos
      -	w := width(s[*i])
      -	switch w {
      -	case 4:
      -		emitter.buffer[p+3] = s[*i+3]
      -		fallthrough
      -	case 3:
      -		emitter.buffer[p+2] = s[*i+2]
      -		fallthrough
      -	case 2:
      -		emitter.buffer[p+1] = s[*i+1]
      -		fallthrough
      -	case 1:
      -		emitter.buffer[p+0] = s[*i+0]
      -	default:
      -		panic("unknown character width")
      -	}
      -	emitter.column++
      -	emitter.buffer_pos += w
      -	*i += w
      -	return true
      -}
      -
      -// Write a whole string into buffer.
      -func write_all(emitter *yaml_emitter_t, s []byte) bool {
      -	for i := 0; i < len(s); {
      -		if !write(emitter, s, &i) {
      -			return false
      -		}
      -	}
      -	return true
      -}
      -
      -// Copy a line break character from a string into buffer.
      -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
      -	if s[*i] == '\n' {
      -		if !put_break(emitter) {
      -			return false
      -		}
      -		*i++
      -	} else {
      -		if !write(emitter, s, i) {
      -			return false
      -		}
      -		emitter.column = 0
      -		emitter.line++
      -	}
      -	return true
      -}
      -
      -// Set an emitter error and return false.
      -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
      -	emitter.error = yaml_EMITTER_ERROR
      -	emitter.problem = problem
      -	return false
      -}
      -
      -// Emit an event.
      -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	emitter.events = append(emitter.events, *event)
      -	for !yaml_emitter_need_more_events(emitter) {
      -		event := &emitter.events[emitter.events_head]
      -		if !yaml_emitter_analyze_event(emitter, event) {
      -			return false
      -		}
      -		if !yaml_emitter_state_machine(emitter, event) {
      -			return false
      -		}
      -		yaml_event_delete(event)
      -		emitter.events_head++
      -	}
      -	return true
      -}
      -
      -// Check if we need to accumulate more events before emitting.
      -//
      -// We accumulate extra
      -//  - 1 event for DOCUMENT-START
      -//  - 2 events for SEQUENCE-START
      -//  - 3 events for MAPPING-START
      -//
      -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
      -	if emitter.events_head == len(emitter.events) {
      -		return true
      -	}
      -	var accumulate int
      -	switch emitter.events[emitter.events_head].typ {
      -	case yaml_DOCUMENT_START_EVENT:
      -		accumulate = 1
      -		break
      -	case yaml_SEQUENCE_START_EVENT:
      -		accumulate = 2
      -		break
      -	case yaml_MAPPING_START_EVENT:
      -		accumulate = 3
      -		break
      -	default:
      -		return false
      -	}
      -	if len(emitter.events)-emitter.events_head > accumulate {
      -		return false
      -	}
      -	var level int
      -	for i := emitter.events_head; i < len(emitter.events); i++ {
      -		switch emitter.events[i].typ {
      -		case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
      -			level++
      -		case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
      -			level--
      -		}
      -		if level == 0 {
      -			return false
      -		}
      -	}
      -	return true
      -}
      -
      -// Append a directive to the directives stack.
      -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
      -	for i := 0; i < len(emitter.tag_directives); i++ {
      -		if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
      -			if allow_duplicates {
      -				return true
      -			}
      -			return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
      -		}
      -	}
      -
      -	// [Go] Do we actually need to copy this given garbage collection
      -	// and the lack of deallocating destructors?
      -	tag_copy := yaml_tag_directive_t{
      -		handle: make([]byte, len(value.handle)),
      -		prefix: make([]byte, len(value.prefix)),
      -	}
      -	copy(tag_copy.handle, value.handle)
      -	copy(tag_copy.prefix, value.prefix)
      -	emitter.tag_directives = append(emitter.tag_directives, tag_copy)
      -	return true
      -}
      -
      -// Increase the indentation level.
      -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
      -	emitter.indents = append(emitter.indents, emitter.indent)
      -	if emitter.indent < 0 {
      -		if flow {
      -			emitter.indent = emitter.best_indent
      -		} else {
      -			emitter.indent = 0
      -		}
      -	} else if !indentless {
      -		emitter.indent += emitter.best_indent
      -	}
      -	return true
      -}
      -
      -// State dispatcher.
      -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	switch emitter.state {
      -	default:
      -	case yaml_EMIT_STREAM_START_STATE:
      -		return yaml_emitter_emit_stream_start(emitter, event)
      -
      -	case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
      -		return yaml_emitter_emit_document_start(emitter, event, true)
      -
      -	case yaml_EMIT_DOCUMENT_START_STATE:
      -		return yaml_emitter_emit_document_start(emitter, event, false)
      -
      -	case yaml_EMIT_DOCUMENT_CONTENT_STATE:
      -		return yaml_emitter_emit_document_content(emitter, event)
      -
      -	case yaml_EMIT_DOCUMENT_END_STATE:
      -		return yaml_emitter_emit_document_end(emitter, event)
      -
      -	case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
      -		return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
      -
      -	case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
      -		return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
      -
      -	case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
      -		return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
      -
      -	case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
      -		return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
      -
      -	case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
      -		return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
      -
      -	case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
      -		return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
      -
      -	case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
      -		return yaml_emitter_emit_block_sequence_item(emitter, event, true)
      -
      -	case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
      -		return yaml_emitter_emit_block_sequence_item(emitter, event, false)
      -
      -	case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
      -		return yaml_emitter_emit_block_mapping_key(emitter, event, true)
      -
      -	case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
      -		return yaml_emitter_emit_block_mapping_key(emitter, event, false)
      -
      -	case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
      -		return yaml_emitter_emit_block_mapping_value(emitter, event, true)
      -
      -	case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
      -		return yaml_emitter_emit_block_mapping_value(emitter, event, false)
      -
      -	case yaml_EMIT_END_STATE:
      -		return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
      -	}
      -	panic("invalid emitter state")
      -}
      -
      -// Expect STREAM-START.
      -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	if event.typ != yaml_STREAM_START_EVENT {
      -		return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
      -	}
      -	if emitter.encoding == yaml_ANY_ENCODING {
      -		emitter.encoding = event.encoding
      -		if emitter.encoding == yaml_ANY_ENCODING {
      -			emitter.encoding = yaml_UTF8_ENCODING
      -		}
      -	}
      -	if emitter.best_indent < 2 || emitter.best_indent > 9 {
      -		emitter.best_indent = 2
      -	}
      -	if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
      -		emitter.best_width = 80
      -	}
      -	if emitter.best_width < 0 {
      -		emitter.best_width = 1<<31 - 1
      -	}
      -	if emitter.line_break == yaml_ANY_BREAK {
      -		emitter.line_break = yaml_LN_BREAK
      -	}
      -
      -	emitter.indent = -1
      -	emitter.line = 0
      -	emitter.column = 0
      -	emitter.whitespace = true
      -	emitter.indention = true
      -
      -	if emitter.encoding != yaml_UTF8_ENCODING {
      -		if !yaml_emitter_write_bom(emitter) {
      -			return false
      -		}
      -	}
      -	emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
      -	return true
      -}
      -
      -// Expect DOCUMENT-START or STREAM-END.
      -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
      -
      -	if event.typ == yaml_DOCUMENT_START_EVENT {
      -
      -		if event.version_directive != nil {
      -			if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
      -				return false
      -			}
      -		}
      -
      -		for i := 0; i < len(event.tag_directives); i++ {
      -			tag_directive := &event.tag_directives[i]
      -			if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
      -				return false
      -			}
      -			if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
      -				return false
      -			}
      -		}
      -
      -		for i := 0; i < len(default_tag_directives); i++ {
      -			tag_directive := &default_tag_directives[i]
      -			if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
      -				return false
      -			}
      -		}
      -
      -		implicit := event.implicit
      -		if !first || emitter.canonical {
      -			implicit = false
      -		}
      -
      -		if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
      -			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
      -				return false
      -			}
      -			if !yaml_emitter_write_indent(emitter) {
      -				return false
      -			}
      -		}
      -
      -		if event.version_directive != nil {
      -			implicit = false
      -			if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
      -				return false
      -			}
      -			if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
      -				return false
      -			}
      -			if !yaml_emitter_write_indent(emitter) {
      -				return false
      -			}
      -		}
      -
      -		if len(event.tag_directives) > 0 {
      -			implicit = false
      -			for i := 0; i < len(event.tag_directives); i++ {
      -				tag_directive := &event.tag_directives[i]
      -				if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
      -					return false
      -				}
      -				if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
      -					return false
      -				}
      -				if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
      -					return false
      -				}
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -			}
      -		}
      -
      -		if yaml_emitter_check_empty_document(emitter) {
      -			implicit = false
      -		}
      -		if !implicit {
      -			if !yaml_emitter_write_indent(emitter) {
      -				return false
      -			}
      -			if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
      -				return false
      -			}
      -			if emitter.canonical {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -			}
      -		}
      -
      -		emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
      -		return true
      -	}
      -
      -	if event.typ == yaml_STREAM_END_EVENT {
      -		if emitter.open_ended {
      -			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
      -				return false
      -			}
      -			if !yaml_emitter_write_indent(emitter) {
      -				return false
      -			}
      -		}
      -		if !yaml_emitter_flush(emitter) {
      -			return false
      -		}
      -		emitter.state = yaml_EMIT_END_STATE
      -		return true
      -	}
      -
      -	return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
      -}
      -
      -// Expect the root node.
      -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
      -	return yaml_emitter_emit_node(emitter, event, true, false, false, false)
      -}
      -
      -// Expect DOCUMENT-END.
      -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	if event.typ != yaml_DOCUMENT_END_EVENT {
      -		return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
      -	}
      -	if !yaml_emitter_write_indent(emitter) {
      -		return false
      -	}
      -	if !event.implicit {
      -		// [Go] Allocate the slice elsewhere.
      -		if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
      -			return false
      -		}
      -		if !yaml_emitter_write_indent(emitter) {
      -			return false
      -		}
      -	}
      -	if !yaml_emitter_flush(emitter) {
      -		return false
      -	}
      -	emitter.state = yaml_EMIT_DOCUMENT_START_STATE
      -	emitter.tag_directives = emitter.tag_directives[:0]
      -	return true
      -}
      -
      -// Expect a flow item node.
      -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
      -	if first {
      -		if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
      -			return false
      -		}
      -		if !yaml_emitter_increase_indent(emitter, true, false) {
      -			return false
      -		}
      -		emitter.flow_level++
      -	}
      -
      -	if event.typ == yaml_SEQUENCE_END_EVENT {
      -		emitter.flow_level--
      -		emitter.indent = emitter.indents[len(emitter.indents)-1]
      -		emitter.indents = emitter.indents[:len(emitter.indents)-1]
      -		if emitter.canonical && !first {
      -			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
      -				return false
      -			}
      -			if !yaml_emitter_write_indent(emitter) {
      -				return false
      -			}
      -		}
      -		if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
      -			return false
      -		}
      -		emitter.state = emitter.states[len(emitter.states)-1]
      -		emitter.states = emitter.states[:len(emitter.states)-1]
      -
      -		return true
      -	}
      -
      -	if !first {
      -		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
      -			return false
      -		}
      -	}
      -
      -	if emitter.canonical || emitter.column > emitter.best_width {
      -		if !yaml_emitter_write_indent(emitter) {
      -			return false
      -		}
      -	}
      -	emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
      -	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
      -}
      -
      -// Expect a flow key node.
      -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
      -	if first {
      -		if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
      -			return false
      -		}
      -		if !yaml_emitter_increase_indent(emitter, true, false) {
      -			return false
      -		}
      -		emitter.flow_level++
      -	}
      -
      -	if event.typ == yaml_MAPPING_END_EVENT {
      -		emitter.flow_level--
      -		emitter.indent = emitter.indents[len(emitter.indents)-1]
      -		emitter.indents = emitter.indents[:len(emitter.indents)-1]
      -		if emitter.canonical && !first {
      -			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
      -				return false
      -			}
      -			if !yaml_emitter_write_indent(emitter) {
      -				return false
      -			}
      -		}
      -		if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
      -			return false
      -		}
      -		emitter.state = emitter.states[len(emitter.states)-1]
      -		emitter.states = emitter.states[:len(emitter.states)-1]
      -		return true
      -	}
      -
      -	if !first {
      -		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
      -			return false
      -		}
      -	}
      -	if emitter.canonical || emitter.column > emitter.best_width {
      -		if !yaml_emitter_write_indent(emitter) {
      -			return false
      -		}
      -	}
      -
      -	if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
      -		emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
      -		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
      -	}
      -	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
      -		return false
      -	}
      -	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
      -	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
      -}
      -
      -// Expect a flow value node.
      -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
      -	if simple {
      -		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
      -			return false
      -		}
      -	} else {
      -		if emitter.canonical || emitter.column > emitter.best_width {
      -			if !yaml_emitter_write_indent(emitter) {
      -				return false
      -			}
      -		}
      -		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
      -			return false
      -		}
      -	}
      -	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
      -	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
      -}
      -
      -// Expect a block item node.
      -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
      -	if first {
      -		if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
      -			return false
      -		}
      -	}
      -	if event.typ == yaml_SEQUENCE_END_EVENT {
      -		emitter.indent = emitter.indents[len(emitter.indents)-1]
      -		emitter.indents = emitter.indents[:len(emitter.indents)-1]
      -		emitter.state = emitter.states[len(emitter.states)-1]
      -		emitter.states = emitter.states[:len(emitter.states)-1]
      -		return true
      -	}
      -	if !yaml_emitter_write_indent(emitter) {
      -		return false
      -	}
      -	if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
      -		return false
      -	}
      -	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
      -	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
      -}
      -
      -// Expect a block key node.
      -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
      -	if first {
      -		if !yaml_emitter_increase_indent(emitter, false, false) {
      -			return false
      -		}
      -	}
      -	if event.typ == yaml_MAPPING_END_EVENT {
      -		emitter.indent = emitter.indents[len(emitter.indents)-1]
      -		emitter.indents = emitter.indents[:len(emitter.indents)-1]
      -		emitter.state = emitter.states[len(emitter.states)-1]
      -		emitter.states = emitter.states[:len(emitter.states)-1]
      -		return true
      -	}
      -	if !yaml_emitter_write_indent(emitter) {
      -		return false
      -	}
      -	if yaml_emitter_check_simple_key(emitter) {
      -		emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
      -		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
      -	}
      -	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
      -		return false
      -	}
      -	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
      -	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
      -}
      -
      -// Expect a block value node.
      -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
      -	if simple {
      -		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
      -			return false
      -		}
      -	} else {
      -		if !yaml_emitter_write_indent(emitter) {
      -			return false
      -		}
      -		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
      -			return false
      -		}
      -	}
      -	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
      -	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
      -}
      -
      -// Expect a node.
      -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
      -	root bool, sequence bool, mapping bool, simple_key bool) bool {
      -
      -	emitter.root_context = root
      -	emitter.sequence_context = sequence
      -	emitter.mapping_context = mapping
      -	emitter.simple_key_context = simple_key
      -
      -	switch event.typ {
      -	case yaml_ALIAS_EVENT:
      -		return yaml_emitter_emit_alias(emitter, event)
      -	case yaml_SCALAR_EVENT:
      -		return yaml_emitter_emit_scalar(emitter, event)
      -	case yaml_SEQUENCE_START_EVENT:
      -		return yaml_emitter_emit_sequence_start(emitter, event)
      -	case yaml_MAPPING_START_EVENT:
      -		return yaml_emitter_emit_mapping_start(emitter, event)
      -	default:
      -		return yaml_emitter_set_emitter_error(emitter,
      -			"expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
      -	}
      -	return false
      -}
      -
      -// Expect ALIAS.
      -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	if !yaml_emitter_process_anchor(emitter) {
      -		return false
      -	}
      -	emitter.state = emitter.states[len(emitter.states)-1]
      -	emitter.states = emitter.states[:len(emitter.states)-1]
      -	return true
      -}
      -
      -// Expect SCALAR.
      -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	if !yaml_emitter_select_scalar_style(emitter, event) {
      -		return false
      -	}
      -	if !yaml_emitter_process_anchor(emitter) {
      -		return false
      -	}
      -	if !yaml_emitter_process_tag(emitter) {
      -		return false
      -	}
      -	if !yaml_emitter_increase_indent(emitter, true, false) {
      -		return false
      -	}
      -	if !yaml_emitter_process_scalar(emitter) {
      -		return false
      -	}
      -	emitter.indent = emitter.indents[len(emitter.indents)-1]
      -	emitter.indents = emitter.indents[:len(emitter.indents)-1]
      -	emitter.state = emitter.states[len(emitter.states)-1]
      -	emitter.states = emitter.states[:len(emitter.states)-1]
      -	return true
      -}
      -
      -// Expect SEQUENCE-START.
      -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	if !yaml_emitter_process_anchor(emitter) {
      -		return false
      -	}
      -	if !yaml_emitter_process_tag(emitter) {
      -		return false
      -	}
      -	if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
      -		yaml_emitter_check_empty_sequence(emitter) {
      -		emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
      -	} else {
      -		emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
      -	}
      -	return true
      -}
      -
      -// Expect MAPPING-START.
      -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	if !yaml_emitter_process_anchor(emitter) {
      -		return false
      -	}
      -	if !yaml_emitter_process_tag(emitter) {
      -		return false
      -	}
      -	if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
      -		yaml_emitter_check_empty_mapping(emitter) {
      -		emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
      -	} else {
      -		emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
      -	}
      -	return true
      -}
      -
      -// Check if the document content is an empty scalar.
      -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
      -	return false // [Go] Huh?
      -}
      -
      -// Check if the next events represent an empty sequence.
      -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
      -	if len(emitter.events)-emitter.events_head < 2 {
      -		return false
      -	}
      -	return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
      -		emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
      -}
      -
      -// Check if the next events represent an empty mapping.
      -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
      -	if len(emitter.events)-emitter.events_head < 2 {
      -		return false
      -	}
      -	return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
      -		emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
      -}
      -
      -// Check if the next node can be expressed as a simple key.
      -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
      -	length := 0
      -	switch emitter.events[emitter.events_head].typ {
      -	case yaml_ALIAS_EVENT:
      -		length += len(emitter.anchor_data.anchor)
      -	case yaml_SCALAR_EVENT:
      -		if emitter.scalar_data.multiline {
      -			return false
      -		}
      -		length += len(emitter.anchor_data.anchor) +
      -			len(emitter.tag_data.handle) +
      -			len(emitter.tag_data.suffix) +
      -			len(emitter.scalar_data.value)
      -	case yaml_SEQUENCE_START_EVENT:
      -		if !yaml_emitter_check_empty_sequence(emitter) {
      -			return false
      -		}
      -		length += len(emitter.anchor_data.anchor) +
      -			len(emitter.tag_data.handle) +
      -			len(emitter.tag_data.suffix)
      -	case yaml_MAPPING_START_EVENT:
      -		if !yaml_emitter_check_empty_mapping(emitter) {
      -			return false
      -		}
      -		length += len(emitter.anchor_data.anchor) +
      -			len(emitter.tag_data.handle) +
      -			len(emitter.tag_data.suffix)
      -	default:
      -		return false
      -	}
      -	return length <= 128
      -}
      -
      -// Determine an acceptable scalar style.
      -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -
      -	no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
      -	if no_tag && !event.implicit && !event.quoted_implicit {
      -		return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
      -	}
      -
      -	style := event.scalar_style()
      -	if style == yaml_ANY_SCALAR_STYLE {
      -		style = yaml_PLAIN_SCALAR_STYLE
      -	}
      -	if emitter.canonical {
      -		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
      -	}
      -	if emitter.simple_key_context && emitter.scalar_data.multiline {
      -		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
      -	}
      -
      -	if style == yaml_PLAIN_SCALAR_STYLE {
      -		if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
      -			emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
      -			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
      -		}
      -		if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
      -			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
      -		}
      -		if no_tag && !event.implicit {
      -			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
      -		}
      -	}
      -	if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
      -		if !emitter.scalar_data.single_quoted_allowed {
      -			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
      -		}
      -	}
      -	if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
      -		if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
      -			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
      -		}
      -	}
      -
      -	if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
      -		emitter.tag_data.handle = []byte{'!'}
      -	}
      -	emitter.scalar_data.style = style
      -	return true
      -}
      -
      -// Write an achor.
      -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
      -	if emitter.anchor_data.anchor == nil {
      -		return true
      -	}
      -	c := []byte{'&'}
      -	if emitter.anchor_data.alias {
      -		c[0] = '*'
      -	}
      -	if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
      -		return false
      -	}
      -	return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
      -}
      -
      -// Write a tag.
      -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
      -	if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
      -		return true
      -	}
      -	if len(emitter.tag_data.handle) > 0 {
      -		if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
      -			return false
      -		}
      -		if len(emitter.tag_data.suffix) > 0 {
      -			if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
      -				return false
      -			}
      -		}
      -	} else {
      -		// [Go] Allocate these slices elsewhere.
      -		if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
      -			return false
      -		}
      -		if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
      -			return false
      -		}
      -		if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
      -			return false
      -		}
      -	}
      -	return true
      -}
      -
      -// Write a scalar.
      -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
      -	switch emitter.scalar_data.style {
      -	case yaml_PLAIN_SCALAR_STYLE:
      -		return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
      -
      -	case yaml_SINGLE_QUOTED_SCALAR_STYLE:
      -		return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
      -
      -	case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
      -		return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
      -
      -	case yaml_LITERAL_SCALAR_STYLE:
      -		return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
      -
      -	case yaml_FOLDED_SCALAR_STYLE:
      -		return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
      -	}
      -	panic("unknown scalar style")
      -}
      -
      -// Check if a %YAML directive is valid.
      -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
      -	if version_directive.major != 1 || version_directive.minor != 1 {
      -		return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
      -	}
      -	return true
      -}
      -
      -// Check if a %TAG directive is valid.
      -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
      -	handle := tag_directive.handle
      -	prefix := tag_directive.prefix
      -	if len(handle) == 0 {
      -		return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
      -	}
      -	if handle[0] != '!' {
      -		return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
      -	}
      -	if handle[len(handle)-1] != '!' {
      -		return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
      -	}
      -	for i := 1; i < len(handle)-1; i += width(handle[i]) {
      -		if !is_alpha(handle, i) {
      -			return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
      -		}
      -	}
      -	if len(prefix) == 0 {
      -		return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
      -	}
      -	return true
      -}
      -
      -// Check if an anchor is valid.
      -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
      -	if len(anchor) == 0 {
      -		problem := "anchor value must not be empty"
      -		if alias {
      -			problem = "alias value must not be empty"
      -		}
      -		return yaml_emitter_set_emitter_error(emitter, problem)
      -	}
      -	for i := 0; i < len(anchor); i += width(anchor[i]) {
      -		if !is_alpha(anchor, i) {
      -			problem := "anchor value must contain alphanumerical characters only"
      -			if alias {
      -				problem = "alias value must contain alphanumerical characters only"
      -			}
      -			return yaml_emitter_set_emitter_error(emitter, problem)
      -		}
      -	}
      -	emitter.anchor_data.anchor = anchor
      -	emitter.anchor_data.alias = alias
      -	return true
      -}
      -
      -// Check if a tag is valid.
      -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
      -	if len(tag) == 0 {
      -		return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
      -	}
      -	for i := 0; i < len(emitter.tag_directives); i++ {
      -		tag_directive := &emitter.tag_directives[i]
      -		if bytes.HasPrefix(tag, tag_directive.prefix) {
      -			emitter.tag_data.handle = tag_directive.handle
      -			emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
      -			return true
      -		}
      -	}
      -	emitter.tag_data.suffix = tag
      -	return true
      -}
      -
      -// Check if a scalar is valid.
      -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
      -	var (
      -		block_indicators   = false
      -		flow_indicators    = false
      -		line_breaks        = false
      -		special_characters = false
      -
      -		leading_space  = false
      -		leading_break  = false
      -		trailing_space = false
      -		trailing_break = false
      -		break_space    = false
      -		space_break    = false
      -
      -		preceeded_by_whitespace = false
      -		followed_by_whitespace  = false
      -		previous_space          = false
      -		previous_break          = false
      -	)
      -
      -	emitter.scalar_data.value = value
      -
      -	if len(value) == 0 {
      -		emitter.scalar_data.multiline = false
      -		emitter.scalar_data.flow_plain_allowed = false
      -		emitter.scalar_data.block_plain_allowed = true
      -		emitter.scalar_data.single_quoted_allowed = true
      -		emitter.scalar_data.block_allowed = false
      -		return true
      -	}
      -
      -	if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
      -		block_indicators = true
      -		flow_indicators = true
      -	}
      -
      -	preceeded_by_whitespace = true
      -	for i, w := 0, 0; i < len(value); i += w {
      -		w = width(value[0])
      -		followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
      -
      -		if i == 0 {
      -			switch value[i] {
      -			case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
      -				flow_indicators = true
      -				block_indicators = true
      -			case '?', ':':
      -				flow_indicators = true
      -				if followed_by_whitespace {
      -					block_indicators = true
      -				}
      -			case '-':
      -				if followed_by_whitespace {
      -					flow_indicators = true
      -					block_indicators = true
      -				}
      -			}
      -		} else {
      -			switch value[i] {
      -			case ',', '?', '[', ']', '{', '}':
      -				flow_indicators = true
      -			case ':':
      -				flow_indicators = true
      -				if followed_by_whitespace {
      -					block_indicators = true
      -				}
      -			case '#':
      -				if preceeded_by_whitespace {
      -					flow_indicators = true
      -					block_indicators = true
      -				}
      -			}
      -		}
      -
      -		if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
      -			special_characters = true
      -		}
      -		if is_space(value, i) {
      -			if i == 0 {
      -				leading_space = true
      -			}
      -			if i+width(value[i]) == len(value) {
      -				trailing_space = true
      -			}
      -			if previous_break {
      -				break_space = true
      -			}
      -			previous_space = true
      -			previous_break = false
      -		} else if is_break(value, i) {
      -			line_breaks = true
      -			if i == 0 {
      -				leading_break = true
      -			}
      -			if i+width(value[i]) == len(value) {
      -				trailing_break = true
      -			}
      -			if previous_space {
      -				space_break = true
      -			}
      -			previous_space = false
      -			previous_break = true
      -		} else {
      -			previous_space = false
      -			previous_break = false
      -		}
      -
      -		// [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
      -		preceeded_by_whitespace = is_blankz(value, i)
      -	}
      -
      -	emitter.scalar_data.multiline = line_breaks
      -	emitter.scalar_data.flow_plain_allowed = true
      -	emitter.scalar_data.block_plain_allowed = true
      -	emitter.scalar_data.single_quoted_allowed = true
      -	emitter.scalar_data.block_allowed = true
      -
      -	if leading_space || leading_break || trailing_space || trailing_break {
      -		emitter.scalar_data.flow_plain_allowed = false
      -		emitter.scalar_data.block_plain_allowed = false
      -	}
      -	if trailing_space {
      -		emitter.scalar_data.block_allowed = false
      -	}
      -	if break_space {
      -		emitter.scalar_data.flow_plain_allowed = false
      -		emitter.scalar_data.block_plain_allowed = false
      -		emitter.scalar_data.single_quoted_allowed = false
      -	}
      -	if space_break || special_characters {
      -		emitter.scalar_data.flow_plain_allowed = false
      -		emitter.scalar_data.block_plain_allowed = false
      -		emitter.scalar_data.single_quoted_allowed = false
      -		emitter.scalar_data.block_allowed = false
      -	}
      -	if line_breaks {
      -		emitter.scalar_data.flow_plain_allowed = false
      -		emitter.scalar_data.block_plain_allowed = false
      -	}
      -	if flow_indicators {
      -		emitter.scalar_data.flow_plain_allowed = false
      -	}
      -	if block_indicators {
      -		emitter.scalar_data.block_plain_allowed = false
      -	}
      -	return true
      -}
      -
      -// Check if the event data is valid.
      -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -
      -	emitter.anchor_data.anchor = nil
      -	emitter.tag_data.handle = nil
      -	emitter.tag_data.suffix = nil
      -	emitter.scalar_data.value = nil
      -
      -	switch event.typ {
      -	case yaml_ALIAS_EVENT:
      -		if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
      -			return false
      -		}
      -
      -	case yaml_SCALAR_EVENT:
      -		if len(event.anchor) > 0 {
      -			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
      -				return false
      -			}
      -		}
      -		if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
      -			if !yaml_emitter_analyze_tag(emitter, event.tag) {
      -				return false
      -			}
      -		}
      -		if !yaml_emitter_analyze_scalar(emitter, event.value) {
      -			return false
      -		}
      -
      -	case yaml_SEQUENCE_START_EVENT:
      -		if len(event.anchor) > 0 {
      -			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
      -				return false
      -			}
      -		}
      -		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
      -			if !yaml_emitter_analyze_tag(emitter, event.tag) {
      -				return false
      -			}
      -		}
      -
      -	case yaml_MAPPING_START_EVENT:
      -		if len(event.anchor) > 0 {
      -			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
      -				return false
      -			}
      -		}
      -		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
      -			if !yaml_emitter_analyze_tag(emitter, event.tag) {
      -				return false
      -			}
      -		}
      -	}
      -	return true
      -}
      -
      -// Write the BOM character.
      -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
      -	if !flush(emitter) {
      -		return false
      -	}
      -	pos := emitter.buffer_pos
      -	emitter.buffer[pos+0] = '\xEF'
      -	emitter.buffer[pos+1] = '\xBB'
      -	emitter.buffer[pos+2] = '\xBF'
      -	emitter.buffer_pos += 3
      -	return true
      -}
      -
      -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
      -	indent := emitter.indent
      -	if indent < 0 {
      -		indent = 0
      -	}
      -	if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
      -		if !put_break(emitter) {
      -			return false
      -		}
      -	}
      -	for emitter.column < indent {
      -		if !put(emitter, ' ') {
      -			return false
      -		}
      -	}
      -	emitter.whitespace = true
      -	emitter.indention = true
      -	return true
      -}
      -
      -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
      -	if need_whitespace && !emitter.whitespace {
      -		if !put(emitter, ' ') {
      -			return false
      -		}
      -	}
      -	if !write_all(emitter, indicator) {
      -		return false
      -	}
      -	emitter.whitespace = is_whitespace
      -	emitter.indention = (emitter.indention && is_indention)
      -	emitter.open_ended = false
      -	return true
      -}
      -
      -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
      -	if !write_all(emitter, value) {
      -		return false
      -	}
      -	emitter.whitespace = false
      -	emitter.indention = false
      -	return true
      -}
      -
      -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
      -	if !emitter.whitespace {
      -		if !put(emitter, ' ') {
      -			return false
      -		}
      -	}
      -	if !write_all(emitter, value) {
      -		return false
      -	}
      -	emitter.whitespace = false
      -	emitter.indention = false
      -	return true
      -}
      -
      -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
      -	if need_whitespace && !emitter.whitespace {
      -		if !put(emitter, ' ') {
      -			return false
      -		}
      -	}
      -	for i := 0; i < len(value); {
      -		var must_write bool
      -		switch value[i] {
      -		case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
      -			must_write = true
      -		default:
      -			must_write = is_alpha(value, i)
      -		}
      -		if must_write {
      -			if !write(emitter, value, &i) {
      -				return false
      -			}
      -		} else {
      -			w := width(value[i])
      -			for k := 0; k < w; k++ {
      -				octet := value[i]
      -				i++
      -				if !put(emitter, '%') {
      -					return false
      -				}
      -
      -				c := octet >> 4
      -				if c < 10 {
      -					c += '0'
      -				} else {
      -					c += 'A' - 10
      -				}
      -				if !put(emitter, c) {
      -					return false
      -				}
      -
      -				c = octet & 0x0f
      -				if c < 10 {
      -					c += '0'
      -				} else {
      -					c += 'A' - 10
      -				}
      -				if !put(emitter, c) {
      -					return false
      -				}
      -			}
      -		}
      -	}
      -	emitter.whitespace = false
      -	emitter.indention = false
      -	return true
      -}
      -
      -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
      -	if !emitter.whitespace {
      -		if !put(emitter, ' ') {
      -			return false
      -		}
      -	}
      -
      -	spaces := false
      -	breaks := false
      -	for i := 0; i < len(value); {
      -		if is_space(value, i) {
      -			if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -				i += width(value[i])
      -			} else {
      -				if !write(emitter, value, &i) {
      -					return false
      -				}
      -			}
      -			spaces = true
      -		} else if is_break(value, i) {
      -			if !breaks && value[i] == '\n' {
      -				if !put_break(emitter) {
      -					return false
      -				}
      -			}
      -			if !write_break(emitter, value, &i) {
      -				return false
      -			}
      -			emitter.indention = true
      -			breaks = true
      -		} else {
      -			if breaks {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -			}
      -			if !write(emitter, value, &i) {
      -				return false
      -			}
      -			emitter.indention = false
      -			spaces = false
      -			breaks = false
      -		}
      -	}
      -
      -	emitter.whitespace = false
      -	emitter.indention = false
      -	if emitter.root_context {
      -		emitter.open_ended = true
      -	}
      -
      -	return true
      -}
      -
      -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
      -
      -	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
      -		return false
      -	}
      -
      -	spaces := false
      -	breaks := false
      -	for i := 0; i < len(value); {
      -		if is_space(value, i) {
      -			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -				i += width(value[i])
      -			} else {
      -				if !write(emitter, value, &i) {
      -					return false
      -				}
      -			}
      -			spaces = true
      -		} else if is_break(value, i) {
      -			if !breaks && value[i] == '\n' {
      -				if !put_break(emitter) {
      -					return false
      -				}
      -			}
      -			if !write_break(emitter, value, &i) {
      -				return false
      -			}
      -			emitter.indention = true
      -			breaks = true
      -		} else {
      -			if breaks {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -			}
      -			if value[i] == '\'' {
      -				if !put(emitter, '\'') {
      -					return false
      -				}
      -			}
      -			if !write(emitter, value, &i) {
      -				return false
      -			}
      -			emitter.indention = false
      -			spaces = false
      -			breaks = false
      -		}
      -	}
      -	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
      -		return false
      -	}
      -	emitter.whitespace = false
      -	emitter.indention = false
      -	return true
      -}
      -
      -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
      -	spaces := false
      -	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
      -		return false
      -	}
      -
      -	for i := 0; i < len(value); {
      -		if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
      -			is_bom(value, i) || is_break(value, i) ||
      -			value[i] == '"' || value[i] == '\\' {
      -
      -			octet := value[i]
      -
      -			var w int
      -			var v rune
      -			switch {
      -			case octet&0x80 == 0x00:
      -				w, v = 1, rune(octet&0x7F)
      -			case octet&0xE0 == 0xC0:
      -				w, v = 2, rune(octet&0x1F)
      -			case octet&0xF0 == 0xE0:
      -				w, v = 3, rune(octet&0x0F)
      -			case octet&0xF8 == 0xF0:
      -				w, v = 4, rune(octet&0x07)
      -			}
      -			for k := 1; k < w; k++ {
      -				octet = value[i+k]
      -				v = (v << 6) + (rune(octet) & 0x3F)
      -			}
      -			i += w
      -
      -			if !put(emitter, '\\') {
      -				return false
      -			}
      -
      -			var ok bool
      -			switch v {
      -			case 0x00:
      -				ok = put(emitter, '0')
      -			case 0x07:
      -				ok = put(emitter, 'a')
      -			case 0x08:
      -				ok = put(emitter, 'b')
      -			case 0x09:
      -				ok = put(emitter, 't')
      -			case 0x0A:
      -				ok = put(emitter, 'n')
      -			case 0x0b:
      -				ok = put(emitter, 'v')
      -			case 0x0c:
      -				ok = put(emitter, 'f')
      -			case 0x0d:
      -				ok = put(emitter, 'r')
      -			case 0x1b:
      -				ok = put(emitter, 'e')
      -			case 0x22:
      -				ok = put(emitter, '"')
      -			case 0x5c:
      -				ok = put(emitter, '\\')
      -			case 0x85:
      -				ok = put(emitter, 'N')
      -			case 0xA0:
      -				ok = put(emitter, '_')
      -			case 0x2028:
      -				ok = put(emitter, 'L')
      -			case 0x2029:
      -				ok = put(emitter, 'P')
      -			default:
      -				if v <= 0xFF {
      -					ok = put(emitter, 'x')
      -					w = 2
      -				} else if v <= 0xFFFF {
      -					ok = put(emitter, 'u')
      -					w = 4
      -				} else {
      -					ok = put(emitter, 'U')
      -					w = 8
      -				}
      -				for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
      -					digit := byte((v >> uint(k)) & 0x0F)
      -					if digit < 10 {
      -						ok = put(emitter, digit+'0')
      -					} else {
      -						ok = put(emitter, digit+'A'-10)
      -					}
      -				}
      -			}
      -			if !ok {
      -				return false
      -			}
      -			spaces = false
      -		} else if is_space(value, i) {
      -			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -				if is_space(value, i+1) {
      -					if !put(emitter, '\\') {
      -						return false
      -					}
      -				}
      -				i += width(value[i])
      -			} else if !write(emitter, value, &i) {
      -				return false
      -			}
      -			spaces = true
      -		} else {
      -			if !write(emitter, value, &i) {
      -				return false
      -			}
      -			spaces = false
      -		}
      -	}
      -	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
      -		return false
      -	}
      -	emitter.whitespace = false
      -	emitter.indention = false
      -	return true
      -}
      -
      -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
      -	if is_space(value, 0) || is_break(value, 0) {
      -		indent_hint := []byte{'0' + byte(emitter.best_indent)}
      -		if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
      -			return false
      -		}
      -	}
      -
      -	emitter.open_ended = false
      -
      -	var chomp_hint [1]byte
      -	if len(value) == 0 {
      -		chomp_hint[0] = '-'
      -	} else {
      -		i := len(value) - 1
      -		for value[i]&0xC0 == 0x80 {
      -			i--
      -		}
      -		if !is_break(value, i) {
      -			chomp_hint[0] = '-'
      -		} else if i == 0 {
      -			chomp_hint[0] = '+'
      -			emitter.open_ended = true
      -		} else {
      -			i--
      -			for value[i]&0xC0 == 0x80 {
      -				i--
      -			}
      -			if is_break(value, i) {
      -				chomp_hint[0] = '+'
      -				emitter.open_ended = true
      -			}
      -		}
      -	}
      -	if chomp_hint[0] != 0 {
      -		if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
      -			return false
      -		}
      -	}
      -	return true
      -}
      -
      -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
      -	if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
      -		return false
      -	}
      -	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
      -		return false
      -	}
      -	if !put_break(emitter) {
      -		return false
      -	}
      -	emitter.indention = true
      -	emitter.whitespace = true
      -	breaks := true
      -	for i := 0; i < len(value); {
      -		if is_break(value, i) {
      -			if !write_break(emitter, value, &i) {
      -				return false
      -			}
      -			emitter.indention = true
      -			breaks = true
      -		} else {
      -			if breaks {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -			}
      -			if !write(emitter, value, &i) {
      -				return false
      -			}
      -			emitter.indention = false
      -			breaks = false
      -		}
      -	}
      -
      -	return true
      -}
      -
      -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
      -	if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
      -		return false
      -	}
      -	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
      -		return false
      -	}
      -
      -	if !put_break(emitter) {
      -		return false
      -	}
      -	emitter.indention = true
      -	emitter.whitespace = true
      -
      -	breaks := true
      -	leading_spaces := true
      -	for i := 0; i < len(value); {
      -		if is_break(value, i) {
      -			if !breaks && !leading_spaces && value[i] == '\n' {
      -				k := 0
      -				for is_break(value, k) {
      -					k += width(value[k])
      -				}
      -				if !is_blankz(value, k) {
      -					if !put_break(emitter) {
      -						return false
      -					}
      -				}
      -			}
      -			if !write_break(emitter, value, &i) {
      -				return false
      -			}
      -			emitter.indention = true
      -			breaks = true
      -		} else {
      -			if breaks {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -				leading_spaces = is_blank(value, i)
      -			}
      -			if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -				i += width(value[i])
      -			} else {
      -				if !write(emitter, value, &i) {
      -					return false
      -				}
      -			}
      -			emitter.indention = false
      -			breaks = false
      -		}
      -	}
      -	return true
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/encode.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/encode.go
      deleted file mode 100644
      index 84f84995..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/encode.go
      +++ /dev/null
      @@ -1,306 +0,0 @@
      -package yaml
      -
      -import (
      -	"encoding"
      -	"fmt"
      -	"reflect"
      -	"regexp"
      -	"sort"
      -	"strconv"
      -	"strings"
      -	"time"
      -)
      -
      -type encoder struct {
      -	emitter yaml_emitter_t
      -	event   yaml_event_t
      -	out     []byte
      -	flow    bool
      -}
      -
      -func newEncoder() (e *encoder) {
      -	e = &encoder{}
      -	e.must(yaml_emitter_initialize(&e.emitter))
      -	yaml_emitter_set_output_string(&e.emitter, &e.out)
      -	yaml_emitter_set_unicode(&e.emitter, true)
      -	e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
      -	e.emit()
      -	e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
      -	e.emit()
      -	return e
      -}
      -
      -func (e *encoder) finish() {
      -	e.must(yaml_document_end_event_initialize(&e.event, true))
      -	e.emit()
      -	e.emitter.open_ended = false
      -	e.must(yaml_stream_end_event_initialize(&e.event))
      -	e.emit()
      -}
      -
      -func (e *encoder) destroy() {
      -	yaml_emitter_delete(&e.emitter)
      -}
      -
      -func (e *encoder) emit() {
      -	// This will internally delete the e.event value.
      -	if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
      -		e.must(false)
      -	}
      -}
      -
      -func (e *encoder) must(ok bool) {
      -	if !ok {
      -		msg := e.emitter.problem
      -		if msg == "" {
      -			msg = "unknown problem generating YAML content"
      -		}
      -		failf("%s", msg)
      -	}
      -}
      -
      -func (e *encoder) marshal(tag string, in reflect.Value) {
      -	if !in.IsValid() {
      -		e.nilv()
      -		return
      -	}
      -	iface := in.Interface()
      -	if m, ok := iface.(Marshaler); ok {
      -		v, err := m.MarshalYAML()
      -		if err != nil {
      -			fail(err)
      -		}
      -		if v == nil {
      -			e.nilv()
      -			return
      -		}
      -		in = reflect.ValueOf(v)
      -	} else if m, ok := iface.(encoding.TextMarshaler); ok {
      -		text, err := m.MarshalText()
      -		if err != nil {
      -			fail(err)
      -		}
      -		in = reflect.ValueOf(string(text))
      -	}
      -	switch in.Kind() {
      -	case reflect.Interface:
      -		if in.IsNil() {
      -			e.nilv()
      -		} else {
      -			e.marshal(tag, in.Elem())
      -		}
      -	case reflect.Map:
      -		e.mapv(tag, in)
      -	case reflect.Ptr:
      -		if in.IsNil() {
      -			e.nilv()
      -		} else {
      -			e.marshal(tag, in.Elem())
      -		}
      -	case reflect.Struct:
      -		e.structv(tag, in)
      -	case reflect.Slice:
      -		if in.Type().Elem() == mapItemType {
      -			e.itemsv(tag, in)
      -		} else {
      -			e.slicev(tag, in)
      -		}
      -	case reflect.String:
      -		e.stringv(tag, in)
      -	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
      -		if in.Type() == durationType {
      -			e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
      -		} else {
      -			e.intv(tag, in)
      -		}
      -	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
      -		e.uintv(tag, in)
      -	case reflect.Float32, reflect.Float64:
      -		e.floatv(tag, in)
      -	case reflect.Bool:
      -		e.boolv(tag, in)
      -	default:
      -		panic("cannot marshal type: " + in.Type().String())
      -	}
      -}
      -
      -func (e *encoder) mapv(tag string, in reflect.Value) {
      -	e.mappingv(tag, func() {
      -		keys := keyList(in.MapKeys())
      -		sort.Sort(keys)
      -		for _, k := range keys {
      -			e.marshal("", k)
      -			e.marshal("", in.MapIndex(k))
      -		}
      -	})
      -}
      -
      -func (e *encoder) itemsv(tag string, in reflect.Value) {
      -	e.mappingv(tag, func() {
      -		slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
      -		for _, item := range slice {
      -			e.marshal("", reflect.ValueOf(item.Key))
      -			e.marshal("", reflect.ValueOf(item.Value))
      -		}
      -	})
      -}
      -
      -func (e *encoder) structv(tag string, in reflect.Value) {
      -	sinfo, err := getStructInfo(in.Type())
      -	if err != nil {
      -		panic(err)
      -	}
      -	e.mappingv(tag, func() {
      -		for _, info := range sinfo.FieldsList {
      -			var value reflect.Value
      -			if info.Inline == nil {
      -				value = in.Field(info.Num)
      -			} else {
      -				value = in.FieldByIndex(info.Inline)
      -			}
      -			if info.OmitEmpty && isZero(value) {
      -				continue
      -			}
      -			e.marshal("", reflect.ValueOf(info.Key))
      -			e.flow = info.Flow
      -			e.marshal("", value)
      -		}
      -		if sinfo.InlineMap >= 0 {
      -			m := in.Field(sinfo.InlineMap)
      -			if m.Len() > 0 {
      -				e.flow = false
      -				keys := keyList(m.MapKeys())
      -				sort.Sort(keys)
      -				for _, k := range keys {
      -					if _, found := sinfo.FieldsMap[k.String()]; found {
      -						panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
      -					}
      -					e.marshal("", k)
      -					e.flow = false
      -					e.marshal("", m.MapIndex(k))
      -				}
      -			}
      -		}
      -	})
      -}
      -
      -func (e *encoder) mappingv(tag string, f func()) {
      -	implicit := tag == ""
      -	style := yaml_BLOCK_MAPPING_STYLE
      -	if e.flow {
      -		e.flow = false
      -		style = yaml_FLOW_MAPPING_STYLE
      -	}
      -	e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
      -	e.emit()
      -	f()
      -	e.must(yaml_mapping_end_event_initialize(&e.event))
      -	e.emit()
      -}
      -
      -func (e *encoder) slicev(tag string, in reflect.Value) {
      -	implicit := tag == ""
      -	style := yaml_BLOCK_SEQUENCE_STYLE
      -	if e.flow {
      -		e.flow = false
      -		style = yaml_FLOW_SEQUENCE_STYLE
      -	}
      -	e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
      -	e.emit()
      -	n := in.Len()
      -	for i := 0; i < n; i++ {
      -		e.marshal("", in.Index(i))
      -	}
      -	e.must(yaml_sequence_end_event_initialize(&e.event))
      -	e.emit()
      -}
      -
      -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
      -//
      -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
      -// in YAML 1.2 and by this package, but these should be marshalled quoted for
      -// the time being for compatibility with other parsers.
      -func isBase60Float(s string) (result bool) {
      -	// Fast path.
      -	if s == "" {
      -		return false
      -	}
      -	c := s[0]
      -	if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
      -		return false
      -	}
      -	// Do the full match.
      -	return base60float.MatchString(s)
      -}
      -
      -// From http://yaml.org/type/float.html, except the regular expression there
      -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
      -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
      -
      -func (e *encoder) stringv(tag string, in reflect.Value) {
      -	var style yaml_scalar_style_t
      -	s := in.String()
      -	rtag, rs := resolve("", s)
      -	if rtag == yaml_BINARY_TAG {
      -		if tag == "" || tag == yaml_STR_TAG {
      -			tag = rtag
      -			s = rs.(string)
      -		} else if tag == yaml_BINARY_TAG {
      -			failf("explicitly tagged !!binary data must be base64-encoded")
      -		} else {
      -			failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
      -		}
      -	}
      -	if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
      -		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
      -	} else if strings.Contains(s, "\n") {
      -		style = yaml_LITERAL_SCALAR_STYLE
      -	} else {
      -		style = yaml_PLAIN_SCALAR_STYLE
      -	}
      -	e.emitScalar(s, "", tag, style)
      -}
      -
      -func (e *encoder) boolv(tag string, in reflect.Value) {
      -	var s string
      -	if in.Bool() {
      -		s = "true"
      -	} else {
      -		s = "false"
      -	}
      -	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
      -}
      -
      -func (e *encoder) intv(tag string, in reflect.Value) {
      -	s := strconv.FormatInt(in.Int(), 10)
      -	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
      -}
      -
      -func (e *encoder) uintv(tag string, in reflect.Value) {
      -	s := strconv.FormatUint(in.Uint(), 10)
      -	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
      -}
      -
      -func (e *encoder) floatv(tag string, in reflect.Value) {
      -	// FIXME: Handle 64 bits here.
      -	s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
      -	switch s {
      -	case "+Inf":
      -		s = ".inf"
      -	case "-Inf":
      -		s = "-.inf"
      -	case "NaN":
      -		s = ".nan"
      -	}
      -	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
      -}
      -
      -func (e *encoder) nilv() {
      -	e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
      -}
      -
      -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
      -	implicit := tag == ""
      -	e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
      -	e.emit()
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/encode_test.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/encode_test.go
      deleted file mode 100644
      index ba68ad29..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/encode_test.go
      +++ /dev/null
      @@ -1,485 +0,0 @@
      -package yaml_test
      -
      -import (
      -	"fmt"
      -	"math"
      -	"strconv"
      -	"strings"
      -	"time"
      -
      -	. "gopkg.in/check.v1"
      -	"gopkg.in/yaml.v2"
      -	"net"
      -	"os"
      -)
      -
      -var marshalIntTest = 123
      -
      -var marshalTests = []struct {
      -	value interface{}
      -	data  string
      -}{
      -	{
      -		nil,
      -		"null\n",
      -	}, {
      -		&struct{}{},
      -		"{}\n",
      -	}, {
      -		map[string]string{"v": "hi"},
      -		"v: hi\n",
      -	}, {
      -		map[string]interface{}{"v": "hi"},
      -		"v: hi\n",
      -	}, {
      -		map[string]string{"v": "true"},
      -		"v: \"true\"\n",
      -	}, {
      -		map[string]string{"v": "false"},
      -		"v: \"false\"\n",
      -	}, {
      -		map[string]interface{}{"v": true},
      -		"v: true\n",
      -	}, {
      -		map[string]interface{}{"v": false},
      -		"v: false\n",
      -	}, {
      -		map[string]interface{}{"v": 10},
      -		"v: 10\n",
      -	}, {
      -		map[string]interface{}{"v": -10},
      -		"v: -10\n",
      -	}, {
      -		map[string]uint{"v": 42},
      -		"v: 42\n",
      -	}, {
      -		map[string]interface{}{"v": int64(4294967296)},
      -		"v: 4294967296\n",
      -	}, {
      -		map[string]int64{"v": int64(4294967296)},
      -		"v: 4294967296\n",
      -	}, {
      -		map[string]uint64{"v": 4294967296},
      -		"v: 4294967296\n",
      -	}, {
      -		map[string]interface{}{"v": "10"},
      -		"v: \"10\"\n",
      -	}, {
      -		map[string]interface{}{"v": 0.1},
      -		"v: 0.1\n",
      -	}, {
      -		map[string]interface{}{"v": float64(0.1)},
      -		"v: 0.1\n",
      -	}, {
      -		map[string]interface{}{"v": -0.1},
      -		"v: -0.1\n",
      -	}, {
      -		map[string]interface{}{"v": math.Inf(+1)},
      -		"v: .inf\n",
      -	}, {
      -		map[string]interface{}{"v": math.Inf(-1)},
      -		"v: -.inf\n",
      -	}, {
      -		map[string]interface{}{"v": math.NaN()},
      -		"v: .nan\n",
      -	}, {
      -		map[string]interface{}{"v": nil},
      -		"v: null\n",
      -	}, {
      -		map[string]interface{}{"v": ""},
      -		"v: \"\"\n",
      -	}, {
      -		map[string][]string{"v": []string{"A", "B"}},
      -		"v:\n- A\n- B\n",
      -	}, {
      -		map[string][]string{"v": []string{"A", "B\nC"}},
      -		"v:\n- A\n- |-\n  B\n  C\n",
      -	}, {
      -		map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
      -		"v:\n- A\n- 1\n- B:\n  - 2\n  - 3\n",
      -	}, {
      -		map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
      -		"a:\n  b: c\n",
      -	}, {
      -		map[string]interface{}{"a": "-"},
      -		"a: '-'\n",
      -	},
      -
      -	// Simple values.
      -	{
      -		&marshalIntTest,
      -		"123\n",
      -	},
      -
      -	// Structures
      -	{
      -		&struct{ Hello string }{"world"},
      -		"hello: world\n",
      -	}, {
      -		&struct {
      -			A struct {
      -				B string
      -			}
      -		}{struct{ B string }{"c"}},
      -		"a:\n  b: c\n",
      -	}, {
      -		&struct {
      -			A *struct {
      -				B string
      -			}
      -		}{&struct{ B string }{"c"}},
      -		"a:\n  b: c\n",
      -	}, {
      -		&struct {
      -			A *struct {
      -				B string
      -			}
      -		}{},
      -		"a: null\n",
      -	}, {
      -		&struct{ A int }{1},
      -		"a: 1\n",
      -	}, {
      -		&struct{ A []int }{[]int{1, 2}},
      -		"a:\n- 1\n- 2\n",
      -	}, {
      -		&struct {
      -			B int "a"
      -		}{1},
      -		"a: 1\n",
      -	}, {
      -		&struct{ A bool }{true},
      -		"a: true\n",
      -	},
      -
      -	// Conditional flag
      -	{
      -		&struct {
      -			A int "a,omitempty"
      -			B int "b,omitempty"
      -		}{1, 0},
      -		"a: 1\n",
      -	}, {
      -		&struct {
      -			A int "a,omitempty"
      -			B int "b,omitempty"
      -		}{0, 0},
      -		"{}\n",
      -	}, {
      -		&struct {
      -			A *struct{ X, y int } "a,omitempty,flow"
      -		}{&struct{ X, y int }{1, 2}},
      -		"a: {x: 1}\n",
      -	}, {
      -		&struct {
      -			A *struct{ X, y int } "a,omitempty,flow"
      -		}{nil},
      -		"{}\n",
      -	}, {
      -		&struct {
      -			A *struct{ X, y int } "a,omitempty,flow"
      -		}{&struct{ X, y int }{}},
      -		"a: {x: 0}\n",
      -	}, {
      -		&struct {
      -			A struct{ X, y int } "a,omitempty,flow"
      -		}{struct{ X, y int }{1, 2}},
      -		"a: {x: 1}\n",
      -	}, {
      -		&struct {
      -			A struct{ X, y int } "a,omitempty,flow"
      -		}{struct{ X, y int }{0, 1}},
      -		"{}\n",
      -	},
      -
      -	// Flow flag
      -	{
      -		&struct {
      -			A []int "a,flow"
      -		}{[]int{1, 2}},
      -		"a: [1, 2]\n",
      -	}, {
      -		&struct {
      -			A map[string]string "a,flow"
      -		}{map[string]string{"b": "c", "d": "e"}},
      -		"a: {b: c, d: e}\n",
      -	}, {
      -		&struct {
      -			A struct {
      -				B, D string
      -			} "a,flow"
      -		}{struct{ B, D string }{"c", "e"}},
      -		"a: {b: c, d: e}\n",
      -	},
      -
      -	// Unexported field
      -	{
      -		&struct {
      -			u int
      -			A int
      -		}{0, 1},
      -		"a: 1\n",
      -	},
      -
      -	// Ignored field
      -	{
      -		&struct {
      -			A int
      -			B int "-"
      -		}{1, 2},
      -		"a: 1\n",
      -	},
      -
      -	// Struct inlining
      -	{
      -		&struct {
      -			A int
      -			C inlineB `yaml:",inline"`
      -		}{1, inlineB{2, inlineC{3}}},
      -		"a: 1\nb: 2\nc: 3\n",
      -	},
      -
      -	// Map inlining
      -	{
      -		&struct {
      -			A int
      -			C map[string]int `yaml:",inline"`
      -		}{1, map[string]int{"b": 2, "c": 3}},
      -		"a: 1\nb: 2\nc: 3\n",
      -	},
      -
      -	// Duration
      -	{
      -		map[string]time.Duration{"a": 3 * time.Second},
      -		"a: 3s\n",
      -	},
      -
      -	// Issue #24: bug in map merging logic.
      -	{
      -		map[string]string{"a": "<foo>"},
      -		"a: <foo>\n",
      -	},
      -
      -	// Issue #34: marshal unsupported base 60 floats quoted for compatibility
      -	// with old YAML 1.1 parsers.
      -	{
      -		map[string]string{"a": "1:1"},
      -		"a: \"1:1\"\n",
      -	},
      -
      -	// Binary data.
      -	{
      -		map[string]string{"a": "\x00"},
      -		"a: \"\\0\"\n",
      -	}, {
      -		map[string]string{"a": "\x80\x81\x82"},
      -		"a: !!binary gIGC\n",
      -	}, {
      -		map[string]string{"a": strings.Repeat("\x90", 54)},
      -		"a: !!binary |\n  " + strings.Repeat("kJCQ", 17) + "kJ\n  CQ\n",
      -	},
      -
      -	// Ordered maps.
      -	{
      -		&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
      -		"b: 2\na: 1\nd: 4\nc: 3\nsub:\n  e: 5\n",
      -	},
      -
      -	// Encode unicode as utf-8 rather than in escaped form.
      -	{
      -		map[string]string{"a": "你好"},
      -		"a: 你好\n",
      -	},
      -
      -	// Support encoding.TextMarshaler.
      -	{
      -		map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
      -		"a: 1.2.3.4\n",
      -	},
      -	{
      -		map[string]time.Time{"a": time.Unix(1424801979, 0)},
      -		"a: 2015-02-24T18:19:39Z\n",
      -	},
      -
      -	// Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible).
      -	{
      -		map[string]string{"a": "b: c"},
      -		"a: 'b: c'\n",
      -	},
      -}
      -
      -func (s *S) TestMarshal(c *C) {
      -	defer os.Setenv("TZ", os.Getenv("TZ"))
      -	os.Setenv("TZ", "UTC")
      -	for _, item := range marshalTests {
      -		data, err := yaml.Marshal(item.value)
      -		c.Assert(err, IsNil)
      -		c.Assert(string(data), Equals, item.data)
      -	}
      -}
      -
      -var marshalErrorTests = []struct {
      -	value interface{}
      -	error string
      -	panic string
      -}{{
      -	value: &struct {
      -		B       int
      -		inlineB ",inline"
      -	}{1, inlineB{2, inlineC{3}}},
      -	panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
      -}, {
      -	value: &struct {
      -		A       int
      -		B map[string]int ",inline"
      -	}{1, map[string]int{"a": 2}},
      -	panic: `Can't have key "a" in inlined map; conflicts with struct field`,
      -}}
      -
      -func (s *S) TestMarshalErrors(c *C) {
      -	for _, item := range marshalErrorTests {
      -		if item.panic != "" {
      -			c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
      -		} else {
      -			_, err := yaml.Marshal(item.value)
      -			c.Assert(err, ErrorMatches, item.error)
      -		}
      -	}
      -}
      -
      -func (s *S) TestMarshalTypeCache(c *C) {
      -	var data []byte
      -	var err error
      -	func() {
      -		type T struct{ A int }
      -		data, err = yaml.Marshal(&T{})
      -		c.Assert(err, IsNil)
      -	}()
      -	func() {
      -		type T struct{ B int }
      -		data, err = yaml.Marshal(&T{})
      -		c.Assert(err, IsNil)
      -	}()
      -	c.Assert(string(data), Equals, "b: 0\n")
      -}
      -
      -var marshalerTests = []struct {
      -	data  string
      -	value interface{}
      -}{
      -	{"_:\n  hi: there\n", map[interface{}]interface{}{"hi": "there"}},
      -	{"_:\n- 1\n- A\n", []interface{}{1, "A"}},
      -	{"_: 10\n", 10},
      -	{"_: null\n", nil},
      -	{"_: BAR!\n", "BAR!"},
      -}
      -
      -type marshalerType struct {
      -	value interface{}
      -}
      -
      -func (o marshalerType) MarshalText() ([]byte, error) {
      -	panic("MarshalText called on type with MarshalYAML")
      -}
      -
      -func (o marshalerType) MarshalYAML() (interface{}, error) {
      -	return o.value, nil
      -}
      -
      -type marshalerValue struct {
      -	Field marshalerType "_"
      -}
      -
      -func (s *S) TestMarshaler(c *C) {
      -	for _, item := range marshalerTests {
      -		obj := &marshalerValue{}
      -		obj.Field.value = item.value
      -		data, err := yaml.Marshal(obj)
      -		c.Assert(err, IsNil)
      -		c.Assert(string(data), Equals, string(item.data))
      -	}
      -}
      -
      -func (s *S) TestMarshalerWholeDocument(c *C) {
      -	obj := &marshalerType{}
      -	obj.value = map[string]string{"hello": "world!"}
      -	data, err := yaml.Marshal(obj)
      -	c.Assert(err, IsNil)
      -	c.Assert(string(data), Equals, "hello: world!\n")
      -}
      -
      -type failingMarshaler struct{}
      -
      -func (ft *failingMarshaler) MarshalYAML() (interface{}, error) {
      -	return nil, failingErr
      -}
      -
      -func (s *S) TestMarshalerError(c *C) {
      -	_, err := yaml.Marshal(&failingMarshaler{})
      -	c.Assert(err, Equals, failingErr)
      -}
      -
      -func (s *S) TestSortedOutput(c *C) {
      -	order := []interface{}{
      -		false,
      -		true,
      -		1,
      -		uint(1),
      -		1.0,
      -		1.1,
      -		1.2,
      -		2,
      -		uint(2),
      -		2.0,
      -		2.1,
      -		"",
      -		".1",
      -		".2",
      -		".a",
      -		"1",
      -		"2",
      -		"a!10",
      -		"a/2",
      -		"a/10",
      -		"a~10",
      -		"ab/1",
      -		"b/1",
      -		"b/01",
      -		"b/2",
      -		"b/02",
      -		"b/3",
      -		"b/03",
      -		"b1",
      -		"b01",
      -		"b3",
      -		"c2.10",
      -		"c10.2",
      -		"d1",
      -		"d12",
      -		"d12a",
      -	}
      -	m := make(map[interface{}]int)
      -	for _, k := range order {
      -		m[k] = 1
      -	}
      -	data, err := yaml.Marshal(m)
      -	c.Assert(err, IsNil)
      -	out := "\n" + string(data)
      -	last := 0
      -	for i, k := range order {
      -		repr := fmt.Sprint(k)
      -		if s, ok := k.(string); ok {
      -			if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
      -				repr = `"` + repr + `"`
      -			}
      -		}
      -		index := strings.Index(out, "\n"+repr+":")
      -		if index == -1 {
      -			c.Fatalf("%#v is not in the output: %#v", k, out)
      -		}
      -		if index < last {
      -			c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
      -		}
      -		last = index
      -	}
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/parserc.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/parserc.go
      deleted file mode 100644
      index 0a7037ad..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/parserc.go
      +++ /dev/null
      @@ -1,1096 +0,0 @@
      -package yaml
      -
      -import (
      -	"bytes"
      -)
      -
      -// The parser implements the following grammar:
      -//
      -// stream               ::= STREAM-START implicit_document? explicit_document* STREAM-END
      -// implicit_document    ::= block_node DOCUMENT-END*
      -// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
      -// block_node_or_indentless_sequence    ::=
      -//                          ALIAS
      -//                          | properties (block_content | indentless_block_sequence)?
      -//                          | block_content
      -//                          | indentless_block_sequence
      -// block_node           ::= ALIAS
      -//                          | properties block_content?
      -//                          | block_content
      -// flow_node            ::= ALIAS
      -//                          | properties flow_content?
      -//                          | flow_content
      -// properties           ::= TAG ANCHOR? | ANCHOR TAG?
      -// block_content        ::= block_collection | flow_collection | SCALAR
      -// flow_content         ::= flow_collection | SCALAR
      -// block_collection     ::= block_sequence | block_mapping
      -// flow_collection      ::= flow_sequence | flow_mapping
      -// block_sequence       ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
      -// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
      -// block_mapping        ::= BLOCK-MAPPING_START
      -//                          ((KEY block_node_or_indentless_sequence?)?
      -//                          (VALUE block_node_or_indentless_sequence?)?)*
      -//                          BLOCK-END
      -// flow_sequence        ::= FLOW-SEQUENCE-START
      -//                          (flow_sequence_entry FLOW-ENTRY)*
      -//                          flow_sequence_entry?
      -//                          FLOW-SEQUENCE-END
      -// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
      -// flow_mapping         ::= FLOW-MAPPING-START
      -//                          (flow_mapping_entry FLOW-ENTRY)*
      -//                          flow_mapping_entry?
      -//                          FLOW-MAPPING-END
      -// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
      -
      -// Peek the next token in the token queue.
      -func peek_token(parser *yaml_parser_t) *yaml_token_t {
      -	if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
      -		return &parser.tokens[parser.tokens_head]
      -	}
      -	return nil
      -}
      -
      -// Remove the next token from the queue (must be called after peek_token).
      -func skip_token(parser *yaml_parser_t) {
      -	parser.token_available = false
      -	parser.tokens_parsed++
      -	parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
      -	parser.tokens_head++
      -}
      -
      -// Get the next event.
      -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	// Erase the event object.
      -	*event = yaml_event_t{}
      -
      -	// No events after the end of the stream or error.
      -	if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
      -		return true
      -	}
      -
      -	// Generate the next event.
      -	return yaml_parser_state_machine(parser, event)
      -}
      -
      -// Set parser error.
      -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
      -	parser.error = yaml_PARSER_ERROR
      -	parser.problem = problem
      -	parser.problem_mark = problem_mark
      -	return false
      -}
      -
      -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
      -	parser.error = yaml_PARSER_ERROR
      -	parser.context = context
      -	parser.context_mark = context_mark
      -	parser.problem = problem
      -	parser.problem_mark = problem_mark
      -	return false
      -}
      -
      -// State dispatcher.
      -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	//trace("yaml_parser_state_machine", "state:", parser.state.String())
      -
      -	switch parser.state {
      -	case yaml_PARSE_STREAM_START_STATE:
      -		return yaml_parser_parse_stream_start(parser, event)
      -
      -	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
      -		return yaml_parser_parse_document_start(parser, event, true)
      -
      -	case yaml_PARSE_DOCUMENT_START_STATE:
      -		return yaml_parser_parse_document_start(parser, event, false)
      -
      -	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
      -		return yaml_parser_parse_document_content(parser, event)
      -
      -	case yaml_PARSE_DOCUMENT_END_STATE:
      -		return yaml_parser_parse_document_end(parser, event)
      -
      -	case yaml_PARSE_BLOCK_NODE_STATE:
      -		return yaml_parser_parse_node(parser, event, true, false)
      -
      -	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
      -		return yaml_parser_parse_node(parser, event, true, true)
      -
      -	case yaml_PARSE_FLOW_NODE_STATE:
      -		return yaml_parser_parse_node(parser, event, false, false)
      -
      -	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
      -		return yaml_parser_parse_block_sequence_entry(parser, event, true)
      -
      -	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
      -		return yaml_parser_parse_block_sequence_entry(parser, event, false)
      -
      -	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
      -		return yaml_parser_parse_indentless_sequence_entry(parser, event)
      -
      -	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
      -		return yaml_parser_parse_block_mapping_key(parser, event, true)
      -
      -	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
      -		return yaml_parser_parse_block_mapping_key(parser, event, false)
      -
      -	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
      -		return yaml_parser_parse_block_mapping_value(parser, event)
      -
      -	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
      -		return yaml_parser_parse_flow_sequence_entry(parser, event, true)
      -
      -	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
      -		return yaml_parser_parse_flow_sequence_entry(parser, event, false)
      -
      -	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
      -		return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
      -
      -	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
      -		return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
      -
      -	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
      -		return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
      -
      -	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
      -		return yaml_parser_parse_flow_mapping_key(parser, event, true)
      -
      -	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
      -		return yaml_parser_parse_flow_mapping_key(parser, event, false)
      -
      -	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
      -		return yaml_parser_parse_flow_mapping_value(parser, event, false)
      -
      -	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
      -		return yaml_parser_parse_flow_mapping_value(parser, event, true)
      -
      -	default:
      -		panic("invalid parser state")
      -	}
      -	return false
      -}
      -
      -// Parse the production:
      -// stream   ::= STREAM-START implicit_document? explicit_document* STREAM-END
      -//              ************
      -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -	if token.typ != yaml_STREAM_START_TOKEN {
      -		return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
      -	}
      -	parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
      -	*event = yaml_event_t{
      -		typ:        yaml_STREAM_START_EVENT,
      -		start_mark: token.start_mark,
      -		end_mark:   token.end_mark,
      -		encoding:   token.encoding,
      -	}
      -	skip_token(parser)
      -	return true
      -}
      -
      -// Parse the productions:
      -// implicit_document    ::= block_node DOCUMENT-END*
      -//                          *
      -// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
      -//                          *************************
      -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
      -
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -
      -	// Parse extra document end indicators.
      -	if !implicit {
      -		for token.typ == yaml_DOCUMENT_END_TOKEN {
      -			skip_token(parser)
      -			token = peek_token(parser)
      -			if token == nil {
      -				return false
      -			}
      -		}
      -	}
      -
      -	if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
      -		token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
      -		token.typ != yaml_DOCUMENT_START_TOKEN &&
      -		token.typ != yaml_STREAM_END_TOKEN {
      -		// Parse an implicit document.
      -		if !yaml_parser_process_directives(parser, nil, nil) {
      -			return false
      -		}
      -		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
      -		parser.state = yaml_PARSE_BLOCK_NODE_STATE
      -
      -		*event = yaml_event_t{
      -			typ:        yaml_DOCUMENT_START_EVENT,
      -			start_mark: token.start_mark,
      -			end_mark:   token.end_mark,
      -		}
      -
      -	} else if token.typ != yaml_STREAM_END_TOKEN {
      -		// Parse an explicit document.
      -		var version_directive *yaml_version_directive_t
      -		var tag_directives []yaml_tag_directive_t
      -		start_mark := token.start_mark
      -		if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
      -			return false
      -		}
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ != yaml_DOCUMENT_START_TOKEN {
      -			yaml_parser_set_parser_error(parser,
      -				"did not find expected <document start>", token.start_mark)
      -			return false
      -		}
      -		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
      -		parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
      -		end_mark := token.end_mark
      -
      -		*event = yaml_event_t{
      -			typ:               yaml_DOCUMENT_START_EVENT,
      -			start_mark:        start_mark,
      -			end_mark:          end_mark,
      -			version_directive: version_directive,
      -			tag_directives:    tag_directives,
      -			implicit:          false,
      -		}
      -		skip_token(parser)
      -
      -	} else {
      -		// Parse the stream end.
      -		parser.state = yaml_PARSE_END_STATE
      -		*event = yaml_event_t{
      -			typ:        yaml_STREAM_END_EVENT,
      -			start_mark: token.start_mark,
      -			end_mark:   token.end_mark,
      -		}
      -		skip_token(parser)
      -	}
      -
      -	return true
      -}
      -
      -// Parse the productions:
      -// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
      -//                                                    ***********
      -//
      -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -	if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
      -		token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
      -		token.typ == yaml_DOCUMENT_START_TOKEN ||
      -		token.typ == yaml_DOCUMENT_END_TOKEN ||
      -		token.typ == yaml_STREAM_END_TOKEN {
      -		parser.state = parser.states[len(parser.states)-1]
      -		parser.states = parser.states[:len(parser.states)-1]
      -		return yaml_parser_process_empty_scalar(parser, event,
      -			token.start_mark)
      -	}
      -	return yaml_parser_parse_node(parser, event, true, false)
      -}
      -
      -// Parse the productions:
      -// implicit_document    ::= block_node DOCUMENT-END*
      -//                                     *************
      -// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
      -//
      -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -
      -	start_mark := token.start_mark
      -	end_mark := token.start_mark
      -
      -	implicit := true
      -	if token.typ == yaml_DOCUMENT_END_TOKEN {
      -		end_mark = token.end_mark
      -		skip_token(parser)
      -		implicit = false
      -	}
      -
      -	parser.tag_directives = parser.tag_directives[:0]
      -
      -	parser.state = yaml_PARSE_DOCUMENT_START_STATE
      -	*event = yaml_event_t{
      -		typ:        yaml_DOCUMENT_END_EVENT,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -		implicit:   implicit,
      -	}
      -	return true
      -}
      -
      -// Parse the productions:
      -// block_node_or_indentless_sequence    ::=
      -//                          ALIAS
      -//                          *****
      -//                          | properties (block_content | indentless_block_sequence)?
      -//                            **********  *
      -//                          | block_content | indentless_block_sequence
      -//                            *
      -// block_node           ::= ALIAS
      -//                          *****
      -//                          | properties block_content?
      -//                            ********** *
      -//                          | block_content
      -//                            *
      -// flow_node            ::= ALIAS
      -//                          *****
      -//                          | properties flow_content?
      -//                            ********** *
      -//                          | flow_content
      -//                            *
      -// properties           ::= TAG ANCHOR? | ANCHOR TAG?
      -//                          *************************
      -// block_content        ::= block_collection | flow_collection | SCALAR
      -//                                                               ******
      -// flow_content         ::= flow_collection | SCALAR
      -//                                            ******
      -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
      -	//defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
      -
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -
      -	if token.typ == yaml_ALIAS_TOKEN {
      -		parser.state = parser.states[len(parser.states)-1]
      -		parser.states = parser.states[:len(parser.states)-1]
      -		*event = yaml_event_t{
      -			typ:        yaml_ALIAS_EVENT,
      -			start_mark: token.start_mark,
      -			end_mark:   token.end_mark,
      -			anchor:     token.value,
      -		}
      -		skip_token(parser)
      -		return true
      -	}
      -
      -	start_mark := token.start_mark
      -	end_mark := token.start_mark
      -
      -	var tag_token bool
      -	var tag_handle, tag_suffix, anchor []byte
      -	var tag_mark yaml_mark_t
      -	if token.typ == yaml_ANCHOR_TOKEN {
      -		anchor = token.value
      -		start_mark = token.start_mark
      -		end_mark = token.end_mark
      -		skip_token(parser)
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ == yaml_TAG_TOKEN {
      -			tag_token = true
      -			tag_handle = token.value
      -			tag_suffix = token.suffix
      -			tag_mark = token.start_mark
      -			end_mark = token.end_mark
      -			skip_token(parser)
      -			token = peek_token(parser)
      -			if token == nil {
      -				return false
      -			}
      -		}
      -	} else if token.typ == yaml_TAG_TOKEN {
      -		tag_token = true
      -		tag_handle = token.value
      -		tag_suffix = token.suffix
      -		start_mark = token.start_mark
      -		tag_mark = token.start_mark
      -		end_mark = token.end_mark
      -		skip_token(parser)
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ == yaml_ANCHOR_TOKEN {
      -			anchor = token.value
      -			end_mark = token.end_mark
      -			skip_token(parser)
      -			token = peek_token(parser)
      -			if token == nil {
      -				return false
      -			}
      -		}
      -	}
      -
      -	var tag []byte
      -	if tag_token {
      -		if len(tag_handle) == 0 {
      -			tag = tag_suffix
      -			tag_suffix = nil
      -		} else {
      -			for i := range parser.tag_directives {
      -				if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
      -					tag = append([]byte(nil), parser.tag_directives[i].prefix...)
      -					tag = append(tag, tag_suffix...)
      -					break
      -				}
      -			}
      -			if len(tag) == 0 {
      -				yaml_parser_set_parser_error_context(parser,
      -					"while parsing a node", start_mark,
      -					"found undefined tag handle", tag_mark)
      -				return false
      -			}
      -		}
      -	}
      -
      -	implicit := len(tag) == 0
      -	if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
      -		end_mark = token.end_mark
      -		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
      -		*event = yaml_event_t{
      -			typ:        yaml_SEQUENCE_START_EVENT,
      -			start_mark: start_mark,
      -			end_mark:   end_mark,
      -			anchor:     anchor,
      -			tag:        tag,
      -			implicit:   implicit,
      -			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
      -		}
      -		return true
      -	}
      -	if token.typ == yaml_SCALAR_TOKEN {
      -		var plain_implicit, quoted_implicit bool
      -		end_mark = token.end_mark
      -		if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
      -			plain_implicit = true
      -		} else if len(tag) == 0 {
      -			quoted_implicit = true
      -		}
      -		parser.state = parser.states[len(parser.states)-1]
      -		parser.states = parser.states[:len(parser.states)-1]
      -
      -		*event = yaml_event_t{
      -			typ:             yaml_SCALAR_EVENT,
      -			start_mark:      start_mark,
      -			end_mark:        end_mark,
      -			anchor:          anchor,
      -			tag:             tag,
      -			value:           token.value,
      -			implicit:        plain_implicit,
      -			quoted_implicit: quoted_implicit,
      -			style:           yaml_style_t(token.style),
      -		}
      -		skip_token(parser)
      -		return true
      -	}
      -	if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
      -		// [Go] Some of the events below can be merged as they differ only on style.
      -		end_mark = token.end_mark
      -		parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
      -		*event = yaml_event_t{
      -			typ:        yaml_SEQUENCE_START_EVENT,
      -			start_mark: start_mark,
      -			end_mark:   end_mark,
      -			anchor:     anchor,
      -			tag:        tag,
      -			implicit:   implicit,
      -			style:      yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
      -		}
      -		return true
      -	}
      -	if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
      -		end_mark = token.end_mark
      -		parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
      -		*event = yaml_event_t{
      -			typ:        yaml_MAPPING_START_EVENT,
      -			start_mark: start_mark,
      -			end_mark:   end_mark,
      -			anchor:     anchor,
      -			tag:        tag,
      -			implicit:   implicit,
      -			style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
      -		}
      -		return true
      -	}
      -	if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
      -		end_mark = token.end_mark
      -		parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
      -		*event = yaml_event_t{
      -			typ:        yaml_SEQUENCE_START_EVENT,
      -			start_mark: start_mark,
      -			end_mark:   end_mark,
      -			anchor:     anchor,
      -			tag:        tag,
      -			implicit:   implicit,
      -			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
      -		}
      -		return true
      -	}
      -	if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
      -		end_mark = token.end_mark
      -		parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
      -		*event = yaml_event_t{
      -			typ:        yaml_MAPPING_START_EVENT,
      -			start_mark: start_mark,
      -			end_mark:   end_mark,
      -			anchor:     anchor,
      -			tag:        tag,
      -			implicit:   implicit,
      -			style:      yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
      -		}
      -		return true
      -	}
      -	if len(anchor) > 0 || len(tag) > 0 {
      -		parser.state = parser.states[len(parser.states)-1]
      -		parser.states = parser.states[:len(parser.states)-1]
      -
      -		*event = yaml_event_t{
      -			typ:             yaml_SCALAR_EVENT,
      -			start_mark:      start_mark,
      -			end_mark:        end_mark,
      -			anchor:          anchor,
      -			tag:             tag,
      -			implicit:        implicit,
      -			quoted_implicit: false,
      -			style:           yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
      -		}
      -		return true
      -	}
      -
      -	context := "while parsing a flow node"
      -	if block {
      -		context = "while parsing a block node"
      -	}
      -	yaml_parser_set_parser_error_context(parser, context, start_mark,
      -		"did not find expected node content", token.start_mark)
      -	return false
      -}
      -
      -// Parse the productions:
      -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
      -//                    ********************  *********** *             *********
      -//
      -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
      -	if first {
      -		token := peek_token(parser)
      -		parser.marks = append(parser.marks, token.start_mark)
      -		skip_token(parser)
      -	}
      -
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -
      -	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
      -		mark := token.end_mark
      -		skip_token(parser)
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
      -			parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
      -			return yaml_parser_parse_node(parser, event, true, false)
      -		} else {
      -			parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
      -			return yaml_parser_process_empty_scalar(parser, event, mark)
      -		}
      -	}
      -	if token.typ == yaml_BLOCK_END_TOKEN {
      -		parser.state = parser.states[len(parser.states)-1]
      -		parser.states = parser.states[:len(parser.states)-1]
      -		parser.marks = parser.marks[:len(parser.marks)-1]
      -
      -		*event = yaml_event_t{
      -			typ:        yaml_SEQUENCE_END_EVENT,
      -			start_mark: token.start_mark,
      -			end_mark:   token.end_mark,
      -		}
      -
      -		skip_token(parser)
      -		return true
      -	}
      -
      -	context_mark := parser.marks[len(parser.marks)-1]
      -	parser.marks = parser.marks[:len(parser.marks)-1]
      -	return yaml_parser_set_parser_error_context(parser,
      -		"while parsing a block collection", context_mark,
      -		"did not find expected '-' indicator", token.start_mark)
      -}
      -
      -// Parse the productions:
      -// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
      -//                           *********** *
      -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -
      -	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
      -		mark := token.end_mark
      -		skip_token(parser)
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
      -			token.typ != yaml_KEY_TOKEN &&
      -			token.typ != yaml_VALUE_TOKEN &&
      -			token.typ != yaml_BLOCK_END_TOKEN {
      -			parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
      -			return yaml_parser_parse_node(parser, event, true, false)
      -		}
      -		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
      -		return yaml_parser_process_empty_scalar(parser, event, mark)
      -	}
      -	parser.state = parser.states[len(parser.states)-1]
      -	parser.states = parser.states[:len(parser.states)-1]
      -
      -	*event = yaml_event_t{
      -		typ:        yaml_SEQUENCE_END_EVENT,
      -		start_mark: token.start_mark,
      -		end_mark:   token.start_mark, // [Go] Shouldn't this be token.end_mark?
      -	}
      -	return true
      -}
      -
      -// Parse the productions:
      -// block_mapping        ::= BLOCK-MAPPING_START
      -//                          *******************
      -//                          ((KEY block_node_or_indentless_sequence?)?
      -//                            *** *
      -//                          (VALUE block_node_or_indentless_sequence?)?)*
      -//
      -//                          BLOCK-END
      -//                          *********
      -//
      -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
      -	if first {
      -		token := peek_token(parser)
      -		parser.marks = append(parser.marks, token.start_mark)
      -		skip_token(parser)
      -	}
      -
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -
      -	if token.typ == yaml_KEY_TOKEN {
      -		mark := token.end_mark
      -		skip_token(parser)
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ != yaml_KEY_TOKEN &&
      -			token.typ != yaml_VALUE_TOKEN &&
      -			token.typ != yaml_BLOCK_END_TOKEN {
      -			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
      -			return yaml_parser_parse_node(parser, event, true, true)
      -		} else {
      -			parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
      -			return yaml_parser_process_empty_scalar(parser, event, mark)
      -		}
      -	} else if token.typ == yaml_BLOCK_END_TOKEN {
      -		parser.state = parser.states[len(parser.states)-1]
      -		parser.states = parser.states[:len(parser.states)-1]
      -		parser.marks = parser.marks[:len(parser.marks)-1]
      -		*event = yaml_event_t{
      -			typ:        yaml_MAPPING_END_EVENT,
      -			start_mark: token.start_mark,
      -			end_mark:   token.end_mark,
      -		}
      -		skip_token(parser)
      -		return true
      -	}
      -
      -	context_mark := parser.marks[len(parser.marks)-1]
      -	parser.marks = parser.marks[:len(parser.marks)-1]
      -	return yaml_parser_set_parser_error_context(parser,
      -		"while parsing a block mapping", context_mark,
      -		"did not find expected key", token.start_mark)
      -}
      -
      -// Parse the productions:
      -// block_mapping        ::= BLOCK-MAPPING_START
      -//
      -//                          ((KEY block_node_or_indentless_sequence?)?
      -//
      -//                          (VALUE block_node_or_indentless_sequence?)?)*
      -//                           ***** *
      -//                          BLOCK-END
      -//
      -//
      -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -	if token.typ == yaml_VALUE_TOKEN {
      -		mark := token.end_mark
      -		skip_token(parser)
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ != yaml_KEY_TOKEN &&
      -			token.typ != yaml_VALUE_TOKEN &&
      -			token.typ != yaml_BLOCK_END_TOKEN {
      -			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
      -			return yaml_parser_parse_node(parser, event, true, true)
      -		}
      -		parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
      -		return yaml_parser_process_empty_scalar(parser, event, mark)
      -	}
      -	parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
      -	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
      -}
      -
      -// Parse the productions:
      -// flow_sequence        ::= FLOW-SEQUENCE-START
      -//                          *******************
      -//                          (flow_sequence_entry FLOW-ENTRY)*
      -//                           *                   **********
      -//                          flow_sequence_entry?
      -//                          *
      -//                          FLOW-SEQUENCE-END
      -//                          *****************
      -// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
      -//                          *
      -//
      -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
      -	if first {
      -		token := peek_token(parser)
      -		parser.marks = append(parser.marks, token.start_mark)
      -		skip_token(parser)
      -	}
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -	if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
      -		if !first {
      -			if token.typ == yaml_FLOW_ENTRY_TOKEN {
      -				skip_token(parser)
      -				token = peek_token(parser)
      -				if token == nil {
      -					return false
      -				}
      -			} else {
      -				context_mark := parser.marks[len(parser.marks)-1]
      -				parser.marks = parser.marks[:len(parser.marks)-1]
      -				return yaml_parser_set_parser_error_context(parser,
      -					"while parsing a flow sequence", context_mark,
      -					"did not find expected ',' or ']'", token.start_mark)
      -			}
      -		}
      -
      -		if token.typ == yaml_KEY_TOKEN {
      -			parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
      -			*event = yaml_event_t{
      -				typ:        yaml_MAPPING_START_EVENT,
      -				start_mark: token.start_mark,
      -				end_mark:   token.end_mark,
      -				implicit:   true,
      -				style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
      -			}
      -			skip_token(parser)
      -			return true
      -		} else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
      -			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
      -			return yaml_parser_parse_node(parser, event, false, false)
      -		}
      -	}
      -
      -	parser.state = parser.states[len(parser.states)-1]
      -	parser.states = parser.states[:len(parser.states)-1]
      -	parser.marks = parser.marks[:len(parser.marks)-1]
      -
      -	*event = yaml_event_t{
      -		typ:        yaml_SEQUENCE_END_EVENT,
      -		start_mark: token.start_mark,
      -		end_mark:   token.end_mark,
      -	}
      -
      -	skip_token(parser)
      -	return true
      -}
      -
      -//
      -// Parse the productions:
      -// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
      -//                                      *** *
      -//
      -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -	if token.typ != yaml_VALUE_TOKEN &&
      -		token.typ != yaml_FLOW_ENTRY_TOKEN &&
      -		token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
      -		parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
      -		return yaml_parser_parse_node(parser, event, false, false)
      -	}
      -	mark := token.end_mark
      -	skip_token(parser)
      -	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
      -	return yaml_parser_process_empty_scalar(parser, event, mark)
      -}
      -
      -// Parse the productions:
      -// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
      -//                                                      ***** *
      -//
      -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -	if token.typ == yaml_VALUE_TOKEN {
      -		skip_token(parser)
      -		token := peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
      -			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
      -			return yaml_parser_parse_node(parser, event, false, false)
      -		}
      -	}
      -	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
      -	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
      -}
      -
      -// Parse the productions:
      -// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
      -//                                                                      *
      -//
      -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
      -	*event = yaml_event_t{
      -		typ:        yaml_MAPPING_END_EVENT,
      -		start_mark: token.start_mark,
      -		end_mark:   token.start_mark, // [Go] Shouldn't this be end_mark?
      -	}
      -	return true
      -}
      -
      -// Parse the productions:
      -// flow_mapping         ::= FLOW-MAPPING-START
      -//                          ******************
      -//                          (flow_mapping_entry FLOW-ENTRY)*
      -//                           *                  **********
      -//                          flow_mapping_entry?
      -//                          ******************
      -//                          FLOW-MAPPING-END
      -//                          ****************
      -// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
      -//                          *           *** *
      -//
      -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
      -	if first {
      -		token := peek_token(parser)
      -		parser.marks = append(parser.marks, token.start_mark)
      -		skip_token(parser)
      -	}
      -
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -
      -	if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
      -		if !first {
      -			if token.typ == yaml_FLOW_ENTRY_TOKEN {
      -				skip_token(parser)
      -				token = peek_token(parser)
      -				if token == nil {
      -					return false
      -				}
      -			} else {
      -				context_mark := parser.marks[len(parser.marks)-1]
      -				parser.marks = parser.marks[:len(parser.marks)-1]
      -				return yaml_parser_set_parser_error_context(parser,
      -					"while parsing a flow mapping", context_mark,
      -					"did not find expected ',' or '}'", token.start_mark)
      -			}
      -		}
      -
      -		if token.typ == yaml_KEY_TOKEN {
      -			skip_token(parser)
      -			token = peek_token(parser)
      -			if token == nil {
      -				return false
      -			}
      -			if token.typ != yaml_VALUE_TOKEN &&
      -				token.typ != yaml_FLOW_ENTRY_TOKEN &&
      -				token.typ != yaml_FLOW_MAPPING_END_TOKEN {
      -				parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
      -				return yaml_parser_parse_node(parser, event, false, false)
      -			} else {
      -				parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
      -				return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
      -			}
      -		} else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
      -			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
      -			return yaml_parser_parse_node(parser, event, false, false)
      -		}
      -	}
      -
      -	parser.state = parser.states[len(parser.states)-1]
      -	parser.states = parser.states[:len(parser.states)-1]
      -	parser.marks = parser.marks[:len(parser.marks)-1]
      -	*event = yaml_event_t{
      -		typ:        yaml_MAPPING_END_EVENT,
      -		start_mark: token.start_mark,
      -		end_mark:   token.end_mark,
      -	}
      -	skip_token(parser)
      -	return true
      -}
      -
      -// Parse the productions:
      -// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
      -//                                   *                  ***** *
      -//
      -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -	if empty {
      -		parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
      -		return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
      -	}
      -	if token.typ == yaml_VALUE_TOKEN {
      -		skip_token(parser)
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
      -			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
      -			return yaml_parser_parse_node(parser, event, false, false)
      -		}
      -	}
      -	parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
      -	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
      -}
      -
      -// Generate an empty scalar event.
      -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
      -	*event = yaml_event_t{
      -		typ:        yaml_SCALAR_EVENT,
      -		start_mark: mark,
      -		end_mark:   mark,
      -		value:      nil, // Empty
      -		implicit:   true,
      -		style:      yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
      -	}
      -	return true
      -}
      -
      -var default_tag_directives = []yaml_tag_directive_t{
      -	{[]byte("!"), []byte("!")},
      -	{[]byte("!!"), []byte("tag:yaml.org,2002:")},
      -}
      -
      -// Parse directives.
      -func yaml_parser_process_directives(parser *yaml_parser_t,
      -	version_directive_ref **yaml_version_directive_t,
      -	tag_directives_ref *[]yaml_tag_directive_t) bool {
      -
      -	var version_directive *yaml_version_directive_t
      -	var tag_directives []yaml_tag_directive_t
      -
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -
      -	for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
      -		if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
      -			if version_directive != nil {
      -				yaml_parser_set_parser_error(parser,
      -					"found duplicate %YAML directive", token.start_mark)
      -				return false
      -			}
      -			if token.major != 1 || token.minor != 1 {
      -				yaml_parser_set_parser_error(parser,
      -					"found incompatible YAML document", token.start_mark)
      -				return false
      -			}
      -			version_directive = &yaml_version_directive_t{
      -				major: token.major,
      -				minor: token.minor,
      -			}
      -		} else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
      -			value := yaml_tag_directive_t{
      -				handle: token.value,
      -				prefix: token.prefix,
      -			}
      -			if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
      -				return false
      -			}
      -			tag_directives = append(tag_directives, value)
      -		}
      -
      -		skip_token(parser)
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -	}
      -
      -	for i := range default_tag_directives {
      -		if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
      -			return false
      -		}
      -	}
      -
      -	if version_directive_ref != nil {
      -		*version_directive_ref = version_directive
      -	}
      -	if tag_directives_ref != nil {
      -		*tag_directives_ref = tag_directives
      -	}
      -	return true
      -}
      -
      -// Append a tag directive to the directives stack.
      -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
      -	for i := range parser.tag_directives {
      -		if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
      -			if allow_duplicates {
      -				return true
      -			}
      -			return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
      -		}
      -	}
      -
      -	// [Go] I suspect the copy is unnecessary. This was likely done
      -	// because there was no way to track ownership of the data.
      -	value_copy := yaml_tag_directive_t{
      -		handle: make([]byte, len(value.handle)),
      -		prefix: make([]byte, len(value.prefix)),
      -	}
      -	copy(value_copy.handle, value.handle)
      -	copy(value_copy.prefix, value.prefix)
      -	parser.tag_directives = append(parser.tag_directives, value_copy)
      -	return true
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/readerc.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/readerc.go
      deleted file mode 100644
      index d5fb0972..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/readerc.go
      +++ /dev/null
      @@ -1,391 +0,0 @@
      -package yaml
      -
      -import (
      -	"io"
      -)
      -
      -// Set the reader error and return 0.
      -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
      -	parser.error = yaml_READER_ERROR
      -	parser.problem = problem
      -	parser.problem_offset = offset
      -	parser.problem_value = value
      -	return false
      -}
      -
      -// Byte order marks.
      -const (
      -	bom_UTF8    = "\xef\xbb\xbf"
      -	bom_UTF16LE = "\xff\xfe"
      -	bom_UTF16BE = "\xfe\xff"
      -)
      -
      -// Determine the input stream encoding by checking the BOM symbol. If no BOM is
      -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
      -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
      -	// Ensure that we had enough bytes in the raw buffer.
      -	for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
      -		if !yaml_parser_update_raw_buffer(parser) {
      -			return false
      -		}
      -	}
      -
      -	// Determine the encoding.
      -	buf := parser.raw_buffer
      -	pos := parser.raw_buffer_pos
      -	avail := len(buf) - pos
      -	if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
      -		parser.encoding = yaml_UTF16LE_ENCODING
      -		parser.raw_buffer_pos += 2
      -		parser.offset += 2
      -	} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
      -		parser.encoding = yaml_UTF16BE_ENCODING
      -		parser.raw_buffer_pos += 2
      -		parser.offset += 2
      -	} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
      -		parser.encoding = yaml_UTF8_ENCODING
      -		parser.raw_buffer_pos += 3
      -		parser.offset += 3
      -	} else {
      -		parser.encoding = yaml_UTF8_ENCODING
      -	}
      -	return true
      -}
      -
      -// Update the raw buffer.
      -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
      -	size_read := 0
      -
      -	// Return if the raw buffer is full.
      -	if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
      -		return true
      -	}
      -
      -	// Return on EOF.
      -	if parser.eof {
      -		return true
      -	}
      -
      -	// Move the remaining bytes in the raw buffer to the beginning.
      -	if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
      -		copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
      -	}
      -	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
      -	parser.raw_buffer_pos = 0
      -
      -	// Call the read handler to fill the buffer.
      -	size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
      -	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
      -	if err == io.EOF {
      -		parser.eof = true
      -	} else if err != nil {
      -		return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
      -	}
      -	return true
      -}
      -
      -// Ensure that the buffer contains at least `length` characters.
      -// Return true on success, false on failure.
      -//
      -// The length is supposed to be significantly less that the buffer size.
      -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
      -	if parser.read_handler == nil {
      -		panic("read handler must be set")
      -	}
      -
      -	// If the EOF flag is set and the raw buffer is empty, do nothing.
      -	if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
      -		return true
      -	}
      -
      -	// Return if the buffer contains enough characters.
      -	if parser.unread >= length {
      -		return true
      -	}
      -
      -	// Determine the input encoding if it is not known yet.
      -	if parser.encoding == yaml_ANY_ENCODING {
      -		if !yaml_parser_determine_encoding(parser) {
      -			return false
      -		}
      -	}
      -
      -	// Move the unread characters to the beginning of the buffer.
      -	buffer_len := len(parser.buffer)
      -	if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
      -		copy(parser.buffer, parser.buffer[parser.buffer_pos:])
      -		buffer_len -= parser.buffer_pos
      -		parser.buffer_pos = 0
      -	} else if parser.buffer_pos == buffer_len {
      -		buffer_len = 0
      -		parser.buffer_pos = 0
      -	}
      -
      -	// Open the whole buffer for writing, and cut it before returning.
      -	parser.buffer = parser.buffer[:cap(parser.buffer)]
      -
      -	// Fill the buffer until it has enough characters.
      -	first := true
      -	for parser.unread < length {
      -
      -		// Fill the raw buffer if necessary.
      -		if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
      -			if !yaml_parser_update_raw_buffer(parser) {
      -				parser.buffer = parser.buffer[:buffer_len]
      -				return false
      -			}
      -		}
      -		first = false
      -
      -		// Decode the raw buffer.
      -	inner:
      -		for parser.raw_buffer_pos != len(parser.raw_buffer) {
      -			var value rune
      -			var width int
      -
      -			raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
      -
      -			// Decode the next character.
      -			switch parser.encoding {
      -			case yaml_UTF8_ENCODING:
      -				// Decode a UTF-8 character.  Check RFC 3629
      -				// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
      -				//
      -				// The following table (taken from the RFC) is used for
      -				// decoding.
      -				//
      -				//    Char. number range |        UTF-8 octet sequence
      -				//      (hexadecimal)    |              (binary)
      -				//   --------------------+------------------------------------
      -				//   0000 0000-0000 007F | 0xxxxxxx
      -				//   0000 0080-0000 07FF | 110xxxxx 10xxxxxx
      -				//   0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
      -				//   0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
      -				//
      -				// Additionally, the characters in the range 0xD800-0xDFFF
      -				// are prohibited as they are reserved for use with UTF-16
      -				// surrogate pairs.
      -
      -				// Determine the length of the UTF-8 sequence.
      -				octet := parser.raw_buffer[parser.raw_buffer_pos]
      -				switch {
      -				case octet&0x80 == 0x00:
      -					width = 1
      -				case octet&0xE0 == 0xC0:
      -					width = 2
      -				case octet&0xF0 == 0xE0:
      -					width = 3
      -				case octet&0xF8 == 0xF0:
      -					width = 4
      -				default:
      -					// The leading octet is invalid.
      -					return yaml_parser_set_reader_error(parser,
      -						"invalid leading UTF-8 octet",
      -						parser.offset, int(octet))
      -				}
      -
      -				// Check if the raw buffer contains an incomplete character.
      -				if width > raw_unread {
      -					if parser.eof {
      -						return yaml_parser_set_reader_error(parser,
      -							"incomplete UTF-8 octet sequence",
      -							parser.offset, -1)
      -					}
      -					break inner
      -				}
      -
      -				// Decode the leading octet.
      -				switch {
      -				case octet&0x80 == 0x00:
      -					value = rune(octet & 0x7F)
      -				case octet&0xE0 == 0xC0:
      -					value = rune(octet & 0x1F)
      -				case octet&0xF0 == 0xE0:
      -					value = rune(octet & 0x0F)
      -				case octet&0xF8 == 0xF0:
      -					value = rune(octet & 0x07)
      -				default:
      -					value = 0
      -				}
      -
      -				// Check and decode the trailing octets.
      -				for k := 1; k < width; k++ {
      -					octet = parser.raw_buffer[parser.raw_buffer_pos+k]
      -
      -					// Check if the octet is valid.
      -					if (octet & 0xC0) != 0x80 {
      -						return yaml_parser_set_reader_error(parser,
      -							"invalid trailing UTF-8 octet",
      -							parser.offset+k, int(octet))
      -					}
      -
      -					// Decode the octet.
      -					value = (value << 6) + rune(octet&0x3F)
      -				}
      -
      -				// Check the length of the sequence against the value.
      -				switch {
      -				case width == 1:
      -				case width == 2 && value >= 0x80:
      -				case width == 3 && value >= 0x800:
      -				case width == 4 && value >= 0x10000:
      -				default:
      -					return yaml_parser_set_reader_error(parser,
      -						"invalid length of a UTF-8 sequence",
      -						parser.offset, -1)
      -				}
      -
      -				// Check the range of the value.
      -				if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
      -					return yaml_parser_set_reader_error(parser,
      -						"invalid Unicode character",
      -						parser.offset, int(value))
      -				}
      -
      -			case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
      -				var low, high int
      -				if parser.encoding == yaml_UTF16LE_ENCODING {
      -					low, high = 0, 1
      -				} else {
      -					high, low = 1, 0
      -				}
      -
      -				// The UTF-16 encoding is not as simple as one might
      -				// naively think.  Check RFC 2781
      -				// (http://www.ietf.org/rfc/rfc2781.txt).
      -				//
      -				// Normally, two subsequent bytes describe a Unicode
      -				// character.  However a special technique (called a
      -				// surrogate pair) is used for specifying character
      -				// values larger than 0xFFFF.
      -				//
      -				// A surrogate pair consists of two pseudo-characters:
      -				//      high surrogate area (0xD800-0xDBFF)
      -				//      low surrogate area (0xDC00-0xDFFF)
      -				//
      -				// The following formulas are used for decoding
      -				// and encoding characters using surrogate pairs:
      -				//
      -				//  U  = U' + 0x10000   (0x01 00 00 <= U <= 0x10 FF FF)
      -				//  U' = yyyyyyyyyyxxxxxxxxxx   (0 <= U' <= 0x0F FF FF)
      -				//  W1 = 110110yyyyyyyyyy
      -				//  W2 = 110111xxxxxxxxxx
      -				//
      -				// where U is the character value, W1 is the high surrogate
      -				// area, W2 is the low surrogate area.
      -
      -				// Check for incomplete UTF-16 character.
      -				if raw_unread < 2 {
      -					if parser.eof {
      -						return yaml_parser_set_reader_error(parser,
      -							"incomplete UTF-16 character",
      -							parser.offset, -1)
      -					}
      -					break inner
      -				}
      -
      -				// Get the character.
      -				value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
      -					(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
      -
      -				// Check for unexpected low surrogate area.
      -				if value&0xFC00 == 0xDC00 {
      -					return yaml_parser_set_reader_error(parser,
      -						"unexpected low surrogate area",
      -						parser.offset, int(value))
      -				}
      -
      -				// Check for a high surrogate area.
      -				if value&0xFC00 == 0xD800 {
      -					width = 4
      -
      -					// Check for incomplete surrogate pair.
      -					if raw_unread < 4 {
      -						if parser.eof {
      -							return yaml_parser_set_reader_error(parser,
      -								"incomplete UTF-16 surrogate pair",
      -								parser.offset, -1)
      -						}
      -						break inner
      -					}
      -
      -					// Get the next character.
      -					value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
      -						(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
      -
      -					// Check for a low surrogate area.
      -					if value2&0xFC00 != 0xDC00 {
      -						return yaml_parser_set_reader_error(parser,
      -							"expected low surrogate area",
      -							parser.offset+2, int(value2))
      -					}
      -
      -					// Generate the value of the surrogate pair.
      -					value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
      -				} else {
      -					width = 2
      -				}
      -
      -			default:
      -				panic("impossible")
      -			}
      -
      -			// Check if the character is in the allowed range:
      -			//      #x9 | #xA | #xD | [#x20-#x7E]               (8 bit)
      -			//      | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD]    (16 bit)
      -			//      | [#x10000-#x10FFFF]                        (32 bit)
      -			switch {
      -			case value == 0x09:
      -			case value == 0x0A:
      -			case value == 0x0D:
      -			case value >= 0x20 && value <= 0x7E:
      -			case value == 0x85:
      -			case value >= 0xA0 && value <= 0xD7FF:
      -			case value >= 0xE000 && value <= 0xFFFD:
      -			case value >= 0x10000 && value <= 0x10FFFF:
      -			default:
      -				return yaml_parser_set_reader_error(parser,
      -					"control characters are not allowed",
      -					parser.offset, int(value))
      -			}
      -
      -			// Move the raw pointers.
      -			parser.raw_buffer_pos += width
      -			parser.offset += width
      -
      -			// Finally put the character into the buffer.
      -			if value <= 0x7F {
      -				// 0000 0000-0000 007F . 0xxxxxxx
      -				parser.buffer[buffer_len+0] = byte(value)
      -			} else if value <= 0x7FF {
      -				// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
      -				parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
      -				parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
      -			} else if value <= 0xFFFF {
      -				// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
      -				parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
      -				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
      -				parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
      -			} else {
      -				// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
      -				parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
      -				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
      -				parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
      -				parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
      -			}
      -			buffer_len += width
      -
      -			parser.unread++
      -		}
      -
      -		// On EOF, put NUL into the buffer and return.
      -		if parser.eof {
      -			parser.buffer[buffer_len] = 0
      -			buffer_len++
      -			parser.unread++
      -			break
      -		}
      -	}
      -	parser.buffer = parser.buffer[:buffer_len]
      -	return true
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/resolve.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/resolve.go
      deleted file mode 100644
      index 93a86327..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/resolve.go
      +++ /dev/null
      @@ -1,203 +0,0 @@
      -package yaml
      -
      -import (
      -	"encoding/base64"
      -	"math"
      -	"strconv"
      -	"strings"
      -	"unicode/utf8"
      -)
      -
      -type resolveMapItem struct {
      -	value interface{}
      -	tag   string
      -}
      -
      -var resolveTable = make([]byte, 256)
      -var resolveMap = make(map[string]resolveMapItem)
      -
      -func init() {
      -	t := resolveTable
      -	t[int('+')] = 'S' // Sign
      -	t[int('-')] = 'S'
      -	for _, c := range "0123456789" {
      -		t[int(c)] = 'D' // Digit
      -	}
      -	for _, c := range "yYnNtTfFoO~" {
      -		t[int(c)] = 'M' // In map
      -	}
      -	t[int('.')] = '.' // Float (potentially in map)
      -
      -	var resolveMapList = []struct {
      -		v   interface{}
      -		tag string
      -		l   []string
      -	}{
      -		{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
      -		{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
      -		{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
      -		{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
      -		{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
      -		{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
      -		{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
      -		{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
      -		{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
      -		{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
      -		{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
      -		{"<<", yaml_MERGE_TAG, []string{"<<"}},
      -	}
      -
      -	m := resolveMap
      -	for _, item := range resolveMapList {
      -		for _, s := range item.l {
      -			m[s] = resolveMapItem{item.v, item.tag}
      -		}
      -	}
      -}
      -
      -const longTagPrefix = "tag:yaml.org,2002:"
      -
      -func shortTag(tag string) string {
      -	// TODO This can easily be made faster and produce less garbage.
      -	if strings.HasPrefix(tag, longTagPrefix) {
      -		return "!!" + tag[len(longTagPrefix):]
      -	}
      -	return tag
      -}
      -
      -func longTag(tag string) string {
      -	if strings.HasPrefix(tag, "!!") {
      -		return longTagPrefix + tag[2:]
      -	}
      -	return tag
      -}
      -
      -func resolvableTag(tag string) bool {
      -	switch tag {
      -	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
      -		return true
      -	}
      -	return false
      -}
      -
      -func resolve(tag string, in string) (rtag string, out interface{}) {
      -	if !resolvableTag(tag) {
      -		return tag, in
      -	}
      -
      -	defer func() {
      -		switch tag {
      -		case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
      -			return
      -		}
      -		failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
      -	}()
      -
      -	// Any data is accepted as a !!str or !!binary.
      -	// Otherwise, the prefix is enough of a hint about what it might be.
      -	hint := byte('N')
      -	if in != "" {
      -		hint = resolveTable[in[0]]
      -	}
      -	if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
      -		// Handle things we can lookup in a map.
      -		if item, ok := resolveMap[in]; ok {
      -			return item.tag, item.value
      -		}
      -
      -		// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
      -		// are purposefully unsupported here. They're still quoted on
      -		// the way out for compatibility with other parser, though.
      -
      -		switch hint {
      -		case 'M':
      -			// We've already checked the map above.
      -
      -		case '.':
      -			// Not in the map, so maybe a normal float.
      -			floatv, err := strconv.ParseFloat(in, 64)
      -			if err == nil {
      -				return yaml_FLOAT_TAG, floatv
      -			}
      -
      -		case 'D', 'S':
      -			// Int, float, or timestamp.
      -			plain := strings.Replace(in, "_", "", -1)
      -			intv, err := strconv.ParseInt(plain, 0, 64)
      -			if err == nil {
      -				if intv == int64(int(intv)) {
      -					return yaml_INT_TAG, int(intv)
      -				} else {
      -					return yaml_INT_TAG, intv
      -				}
      -			}
      -			uintv, err := strconv.ParseUint(plain, 0, 64)
      -			if err == nil {
      -				return yaml_INT_TAG, uintv
      -			}
      -			floatv, err := strconv.ParseFloat(plain, 64)
      -			if err == nil {
      -				return yaml_FLOAT_TAG, floatv
      -			}
      -			if strings.HasPrefix(plain, "0b") {
      -				intv, err := strconv.ParseInt(plain[2:], 2, 64)
      -				if err == nil {
      -					if intv == int64(int(intv)) {
      -						return yaml_INT_TAG, int(intv)
      -					} else {
      -						return yaml_INT_TAG, intv
      -					}
      -				}
      -				uintv, err := strconv.ParseUint(plain[2:], 2, 64)
      -				if err == nil {
      -					return yaml_INT_TAG, uintv
      -				}
      -			} else if strings.HasPrefix(plain, "-0b") {
      -				intv, err := strconv.ParseInt(plain[3:], 2, 64)
      -				if err == nil {
      -					if intv == int64(int(intv)) {
      -						return yaml_INT_TAG, -int(intv)
      -					} else {
      -						return yaml_INT_TAG, -intv
      -					}
      -				}
      -			}
      -			// XXX Handle timestamps here.
      -
      -		default:
      -			panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
      -		}
      -	}
      -	if tag == yaml_BINARY_TAG {
      -		return yaml_BINARY_TAG, in
      -	}
      -	if utf8.ValidString(in) {
      -		return yaml_STR_TAG, in
      -	}
      -	return yaml_BINARY_TAG, encodeBase64(in)
      -}
      -
      -// encodeBase64 encodes s as base64 that is broken up into multiple lines
      -// as appropriate for the resulting length.
      -func encodeBase64(s string) string {
      -	const lineLen = 70
      -	encLen := base64.StdEncoding.EncodedLen(len(s))
      -	lines := encLen/lineLen + 1
      -	buf := make([]byte, encLen*2+lines)
      -	in := buf[0:encLen]
      -	out := buf[encLen:]
      -	base64.StdEncoding.Encode(in, []byte(s))
      -	k := 0
      -	for i := 0; i < len(in); i += lineLen {
      -		j := i + lineLen
      -		if j > len(in) {
      -			j = len(in)
      -		}
      -		k += copy(out[k:], in[i:j])
      -		if lines > 1 {
      -			out[k] = '\n'
      -			k++
      -		}
      -	}
      -	return string(out[:k])
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/scannerc.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/scannerc.go
      deleted file mode 100644
      index fe93b190..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/scannerc.go
      +++ /dev/null
      @@ -1,2710 +0,0 @@
      -package yaml
      -
      -import (
      -	"bytes"
      -	"fmt"
      -)
      -
      -// Introduction
      -// ************
      -//
      -// The following notes assume that you are familiar with the YAML specification
      -// (http://yaml.org/spec/cvs/current.html).  We mostly follow it, although in
      -// some cases we are less restrictive that it requires.
      -//
      -// The process of transforming a YAML stream into a sequence of events is
      -// divided on two steps: Scanning and Parsing.
      -//
      -// The Scanner transforms the input stream into a sequence of tokens, while the
      -// parser transform the sequence of tokens produced by the Scanner into a
      -// sequence of parsing events.
      -//
      -// The Scanner is rather clever and complicated. The Parser, on the contrary,
      -// is a straightforward implementation of a recursive-descendant parser (or,
      -// LL(1) parser, as it is usually called).
      -//
      -// Actually there are two issues of Scanning that might be called "clever", the
      -// rest is quite straightforward.  The issues are "block collection start" and
      -// "simple keys".  Both issues are explained below in details.
      -//
      -// Here the Scanning step is explained and implemented.  We start with the list
      -// of all the tokens produced by the Scanner together with short descriptions.
      -//
      -// Now, tokens:
      -//
      -//      STREAM-START(encoding)          # The stream start.
      -//      STREAM-END                      # The stream end.
      -//      VERSION-DIRECTIVE(major,minor)  # The '%YAML' directive.
      -//      TAG-DIRECTIVE(handle,prefix)    # The '%TAG' directive.
      -//      DOCUMENT-START                  # '---'
      -//      DOCUMENT-END                    # '...'
      -//      BLOCK-SEQUENCE-START            # Indentation increase denoting a block
      -//      BLOCK-MAPPING-START             # sequence or a block mapping.
      -//      BLOCK-END                       # Indentation decrease.
      -//      FLOW-SEQUENCE-START             # '['
      -//      FLOW-SEQUENCE-END               # ']'
      -//      BLOCK-SEQUENCE-START            # '{'
      -//      BLOCK-SEQUENCE-END              # '}'
      -//      BLOCK-ENTRY                     # '-'
      -//      FLOW-ENTRY                      # ','
      -//      KEY                             # '?' or nothing (simple keys).
      -//      VALUE                           # ':'
      -//      ALIAS(anchor)                   # '*anchor'
      -//      ANCHOR(anchor)                  # '&anchor'
      -//      TAG(handle,suffix)              # '!handle!suffix'
      -//      SCALAR(value,style)             # A scalar.
      -//
      -// The following two tokens are "virtual" tokens denoting the beginning and the
      -// end of the stream:
      -//
      -//      STREAM-START(encoding)
      -//      STREAM-END
      -//
      -// We pass the information about the input stream encoding with the
      -// STREAM-START token.
      -//
      -// The next two tokens are responsible for tags:
      -//
      -//      VERSION-DIRECTIVE(major,minor)
      -//      TAG-DIRECTIVE(handle,prefix)
      -//
      -// Example:
      -//
      -//      %YAML   1.1
      -//      %TAG    !   !foo
      -//      %TAG    !yaml!  tag:yaml.org,2002:
      -//      ---
      -//
      -// The correspoding sequence of tokens:
      -//
      -//      STREAM-START(utf-8)
      -//      VERSION-DIRECTIVE(1,1)
      -//      TAG-DIRECTIVE("!","!foo")
      -//      TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
      -//      DOCUMENT-START
      -//      STREAM-END
      -//
      -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
      -// line.
      -//
      -// The document start and end indicators are represented by:
      -//
      -//      DOCUMENT-START
      -//      DOCUMENT-END
      -//
      -// Note that if a YAML stream contains an implicit document (without '---'
      -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
      -// produced.
      -//
      -// In the following examples, we present whole documents together with the
      -// produced tokens.
      -//
      -//      1. An implicit document:
      -//
      -//          'a scalar'
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          SCALAR("a scalar",single-quoted)
      -//          STREAM-END
      -//
      -//      2. An explicit document:
      -//
      -//          ---
      -//          'a scalar'
      -//          ...
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          DOCUMENT-START
      -//          SCALAR("a scalar",single-quoted)
      -//          DOCUMENT-END
      -//          STREAM-END
      -//
      -//      3. Several documents in a stream:
      -//
      -//          'a scalar'
      -//          ---
      -//          'another scalar'
      -//          ---
      -//          'yet another scalar'
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          SCALAR("a scalar",single-quoted)
      -//          DOCUMENT-START
      -//          SCALAR("another scalar",single-quoted)
      -//          DOCUMENT-START
      -//          SCALAR("yet another scalar",single-quoted)
      -//          STREAM-END
      -//
      -// We have already introduced the SCALAR token above.  The following tokens are
      -// used to describe aliases, anchors, tag, and scalars:
      -//
      -//      ALIAS(anchor)
      -//      ANCHOR(anchor)
      -//      TAG(handle,suffix)
      -//      SCALAR(value,style)
      -//
      -// The following series of examples illustrate the usage of these tokens:
      -//
      -//      1. A recursive sequence:
      -//
      -//          &A [ *A ]
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          ANCHOR("A")
      -//          FLOW-SEQUENCE-START
      -//          ALIAS("A")
      -//          FLOW-SEQUENCE-END
      -//          STREAM-END
      -//
      -//      2. A tagged scalar:
      -//
      -//          !!float "3.14"  # A good approximation.
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          TAG("!!","float")
      -//          SCALAR("3.14",double-quoted)
      -//          STREAM-END
      -//
      -//      3. Various scalar styles:
      -//
      -//          --- # Implicit empty plain scalars do not produce tokens.
      -//          --- a plain scalar
      -//          --- 'a single-quoted scalar'
      -//          --- "a double-quoted scalar"
      -//          --- |-
      -//            a literal scalar
      -//          --- >-
      -//            a folded
      -//            scalar
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          DOCUMENT-START
      -//          DOCUMENT-START
      -//          SCALAR("a plain scalar",plain)
      -//          DOCUMENT-START
      -//          SCALAR("a single-quoted scalar",single-quoted)
      -//          DOCUMENT-START
      -//          SCALAR("a double-quoted scalar",double-quoted)
      -//          DOCUMENT-START
      -//          SCALAR("a literal scalar",literal)
      -//          DOCUMENT-START
      -//          SCALAR("a folded scalar",folded)
      -//          STREAM-END
      -//
      -// Now it's time to review collection-related tokens. We will start with
      -// flow collections:
      -//
      -//      FLOW-SEQUENCE-START
      -//      FLOW-SEQUENCE-END
      -//      FLOW-MAPPING-START
      -//      FLOW-MAPPING-END
      -//      FLOW-ENTRY
      -//      KEY
      -//      VALUE
      -//
      -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
      -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
      -// correspondingly.  FLOW-ENTRY represent the ',' indicator.  Finally the
      -// indicators '?' and ':', which are used for denoting mapping keys and values,
      -// are represented by the KEY and VALUE tokens.
      -//
      -// The following examples show flow collections:
      -//
      -//      1. A flow sequence:
      -//
      -//          [item 1, item 2, item 3]
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          FLOW-SEQUENCE-START
      -//          SCALAR("item 1",plain)
      -//          FLOW-ENTRY
      -//          SCALAR("item 2",plain)
      -//          FLOW-ENTRY
      -//          SCALAR("item 3",plain)
      -//          FLOW-SEQUENCE-END
      -//          STREAM-END
      -//
      -//      2. A flow mapping:
      -//
      -//          {
      -//              a simple key: a value,  # Note that the KEY token is produced.
      -//              ? a complex key: another value,
      -//          }
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          FLOW-MAPPING-START
      -//          KEY
      -//          SCALAR("a simple key",plain)
      -//          VALUE
      -//          SCALAR("a value",plain)
      -//          FLOW-ENTRY
      -//          KEY
      -//          SCALAR("a complex key",plain)
      -//          VALUE
      -//          SCALAR("another value",plain)
      -//          FLOW-ENTRY
      -//          FLOW-MAPPING-END
      -//          STREAM-END
      -//
      -// A simple key is a key which is not denoted by the '?' indicator.  Note that
      -// the Scanner still produce the KEY token whenever it encounters a simple key.
      -//
      -// For scanning block collections, the following tokens are used (note that we
      -// repeat KEY and VALUE here):
      -//
      -//      BLOCK-SEQUENCE-START
      -//      BLOCK-MAPPING-START
      -//      BLOCK-END
      -//      BLOCK-ENTRY
      -//      KEY
      -//      VALUE
      -//
      -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
      -// increase that precedes a block collection (cf. the INDENT token in Python).
      -// The token BLOCK-END denote indentation decrease that ends a block collection
      -// (cf. the DEDENT token in Python).  However YAML has some syntax pecularities
      -// that makes detections of these tokens more complex.
      -//
      -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
      -// '-', '?', and ':' correspondingly.
      -//
      -// The following examples show how the tokens BLOCK-SEQUENCE-START,
      -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
      -//
      -//      1. Block sequences:
      -//
      -//          - item 1
      -//          - item 2
      -//          -
      -//            - item 3.1
      -//            - item 3.2
      -//          -
      -//            key 1: value 1
      -//            key 2: value 2
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          BLOCK-SEQUENCE-START
      -//          BLOCK-ENTRY
      -//          SCALAR("item 1",plain)
      -//          BLOCK-ENTRY
      -//          SCALAR("item 2",plain)
      -//          BLOCK-ENTRY
      -//          BLOCK-SEQUENCE-START
      -//          BLOCK-ENTRY
      -//          SCALAR("item 3.1",plain)
      -//          BLOCK-ENTRY
      -//          SCALAR("item 3.2",plain)
      -//          BLOCK-END
      -//          BLOCK-ENTRY
      -//          BLOCK-MAPPING-START
      -//          KEY
      -//          SCALAR("key 1",plain)
      -//          VALUE
      -//          SCALAR("value 1",plain)
      -//          KEY
      -//          SCALAR("key 2",plain)
      -//          VALUE
      -//          SCALAR("value 2",plain)
      -//          BLOCK-END
      -//          BLOCK-END
      -//          STREAM-END
      -//
      -//      2. Block mappings:
      -//
      -//          a simple key: a value   # The KEY token is produced here.
      -//          ? a complex key
      -//          : another value
      -//          a mapping:
      -//            key 1: value 1
      -//            key 2: value 2
      -//          a sequence:
      -//            - item 1
      -//            - item 2
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          BLOCK-MAPPING-START
      -//          KEY
      -//          SCALAR("a simple key",plain)
      -//          VALUE
      -//          SCALAR("a value",plain)
      -//          KEY
      -//          SCALAR("a complex key",plain)
      -//          VALUE
      -//          SCALAR("another value",plain)
      -//          KEY
      -//          SCALAR("a mapping",plain)
      -//          BLOCK-MAPPING-START
      -//          KEY
      -//          SCALAR("key 1",plain)
      -//          VALUE
      -//          SCALAR("value 1",plain)
      -//          KEY
      -//          SCALAR("key 2",plain)
      -//          VALUE
      -//          SCALAR("value 2",plain)
      -//          BLOCK-END
      -//          KEY
      -//          SCALAR("a sequence",plain)
      -//          VALUE
      -//          BLOCK-SEQUENCE-START
      -//          BLOCK-ENTRY
      -//          SCALAR("item 1",plain)
      -//          BLOCK-ENTRY
      -//          SCALAR("item 2",plain)
      -//          BLOCK-END
      -//          BLOCK-END
      -//          STREAM-END
      -//
      -// YAML does not always require to start a new block collection from a new
      -// line.  If the current line contains only '-', '?', and ':' indicators, a new
      -// block collection may start at the current line.  The following examples
      -// illustrate this case:
      -//
      -//      1. Collections in a sequence:
      -//
      -//          - - item 1
      -//            - item 2
      -//          - key 1: value 1
      -//            key 2: value 2
      -//          - ? complex key
      -//            : complex value
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          BLOCK-SEQUENCE-START
      -//          BLOCK-ENTRY
      -//          BLOCK-SEQUENCE-START
      -//          BLOCK-ENTRY
      -//          SCALAR("item 1",plain)
      -//          BLOCK-ENTRY
      -//          SCALAR("item 2",plain)
      -//          BLOCK-END
      -//          BLOCK-ENTRY
      -//          BLOCK-MAPPING-START
      -//          KEY
      -//          SCALAR("key 1",plain)
      -//          VALUE
      -//          SCALAR("value 1",plain)
      -//          KEY
      -//          SCALAR("key 2",plain)
      -//          VALUE
      -//          SCALAR("value 2",plain)
      -//          BLOCK-END
      -//          BLOCK-ENTRY
      -//          BLOCK-MAPPING-START
      -//          KEY
      -//          SCALAR("complex key")
      -//          VALUE
      -//          SCALAR("complex value")
      -//          BLOCK-END
      -//          BLOCK-END
      -//          STREAM-END
      -//
      -//      2. Collections in a mapping:
      -//
      -//          ? a sequence
      -//          : - item 1
      -//            - item 2
      -//          ? a mapping
      -//          : key 1: value 1
      -//            key 2: value 2
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          BLOCK-MAPPING-START
      -//          KEY
      -//          SCALAR("a sequence",plain)
      -//          VALUE
      -//          BLOCK-SEQUENCE-START
      -//          BLOCK-ENTRY
      -//          SCALAR("item 1",plain)
      -//          BLOCK-ENTRY
      -//          SCALAR("item 2",plain)
      -//          BLOCK-END
      -//          KEY
      -//          SCALAR("a mapping",plain)
      -//          VALUE
      -//          BLOCK-MAPPING-START
      -//          KEY
      -//          SCALAR("key 1",plain)
      -//          VALUE
      -//          SCALAR("value 1",plain)
      -//          KEY
      -//          SCALAR("key 2",plain)
      -//          VALUE
      -//          SCALAR("value 2",plain)
      -//          BLOCK-END
      -//          BLOCK-END
      -//          STREAM-END
      -//
      -// YAML also permits non-indented sequences if they are included into a block
      -// mapping.  In this case, the token BLOCK-SEQUENCE-START is not produced:
      -//
      -//      key:
      -//      - item 1    # BLOCK-SEQUENCE-START is NOT produced here.
      -//      - item 2
      -//
      -// Tokens:
      -//
      -//      STREAM-START(utf-8)
      -//      BLOCK-MAPPING-START
      -//      KEY
      -//      SCALAR("key",plain)
      -//      VALUE
      -//      BLOCK-ENTRY
      -//      SCALAR("item 1",plain)
      -//      BLOCK-ENTRY
      -//      SCALAR("item 2",plain)
      -//      BLOCK-END
      -//
      -
      -// Ensure that the buffer contains the required number of characters.
      -// Return true on success, false on failure (reader error or memory error).
      -func cache(parser *yaml_parser_t, length int) bool {
      -	// [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
      -	return parser.unread >= length || yaml_parser_update_buffer(parser, length)
      -}
      -
      -// Advance the buffer pointer.
      -func skip(parser *yaml_parser_t) {
      -	parser.mark.index++
      -	parser.mark.column++
      -	parser.unread--
      -	parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
      -}
      -
      -func skip_line(parser *yaml_parser_t) {
      -	if is_crlf(parser.buffer, parser.buffer_pos) {
      -		parser.mark.index += 2
      -		parser.mark.column = 0
      -		parser.mark.line++
      -		parser.unread -= 2
      -		parser.buffer_pos += 2
      -	} else if is_break(parser.buffer, parser.buffer_pos) {
      -		parser.mark.index++
      -		parser.mark.column = 0
      -		parser.mark.line++
      -		parser.unread--
      -		parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
      -	}
      -}
      -
      -// Copy a character to a string buffer and advance pointers.
      -func read(parser *yaml_parser_t, s []byte) []byte {
      -	w := width(parser.buffer[parser.buffer_pos])
      -	if w == 0 {
      -		panic("invalid character sequence")
      -	}
      -	if len(s) == 0 {
      -		s = make([]byte, 0, 32)
      -	}
      -	if w == 1 && len(s)+w <= cap(s) {
      -		s = s[:len(s)+1]
      -		s[len(s)-1] = parser.buffer[parser.buffer_pos]
      -		parser.buffer_pos++
      -	} else {
      -		s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
      -		parser.buffer_pos += w
      -	}
      -	parser.mark.index++
      -	parser.mark.column++
      -	parser.unread--
      -	return s
      -}
      -
      -// Copy a line break character to a string buffer and advance pointers.
      -func read_line(parser *yaml_parser_t, s []byte) []byte {
      -	buf := parser.buffer
      -	pos := parser.buffer_pos
      -	switch {
      -	case buf[pos] == '\r' && buf[pos+1] == '\n':
      -		// CR LF . LF
      -		s = append(s, '\n')
      -		parser.buffer_pos += 2
      -		parser.mark.index++
      -		parser.unread--
      -	case buf[pos] == '\r' || buf[pos] == '\n':
      -		// CR|LF . LF
      -		s = append(s, '\n')
      -		parser.buffer_pos += 1
      -	case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
      -		// NEL . LF
      -		s = append(s, '\n')
      -		parser.buffer_pos += 2
      -	case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
      -		// LS|PS . LS|PS
      -		s = append(s, buf[parser.buffer_pos:pos+3]...)
      -		parser.buffer_pos += 3
      -	default:
      -		return s
      -	}
      -	parser.mark.index++
      -	parser.mark.column = 0
      -	parser.mark.line++
      -	parser.unread--
      -	return s
      -}
      -
      -// Get the next token.
      -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
      -	// Erase the token object.
      -	*token = yaml_token_t{} // [Go] Is this necessary?
      -
      -	// No tokens after STREAM-END or error.
      -	if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
      -		return true
      -	}
      -
      -	// Ensure that the tokens queue contains enough tokens.
      -	if !parser.token_available {
      -		if !yaml_parser_fetch_more_tokens(parser) {
      -			return false
      -		}
      -	}
      -
      -	// Fetch the next token from the queue.
      -	*token = parser.tokens[parser.tokens_head]
      -	parser.tokens_head++
      -	parser.tokens_parsed++
      -	parser.token_available = false
      -
      -	if token.typ == yaml_STREAM_END_TOKEN {
      -		parser.stream_end_produced = true
      -	}
      -	return true
      -}
      -
      -// Set the scanner error and return false.
      -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
      -	parser.error = yaml_SCANNER_ERROR
      -	parser.context = context
      -	parser.context_mark = context_mark
      -	parser.problem = problem
      -	parser.problem_mark = parser.mark
      -	return false
      -}
      -
      -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
      -	context := "while parsing a tag"
      -	if directive {
      -		context = "while parsing a %TAG directive"
      -	}
      -	return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
      -}
      -
      -func trace(args ...interface{}) func() {
      -	pargs := append([]interface{}{"+++"}, args...)
      -	fmt.Println(pargs...)
      -	pargs = append([]interface{}{"---"}, args...)
      -	return func() { fmt.Println(pargs...) }
      -}
      -
      -// Ensure that the tokens queue contains at least one token which can be
      -// returned to the Parser.
      -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
      -	// While we need more tokens to fetch, do it.
      -	for {
      -		// Check if we really need to fetch more tokens.
      -		need_more_tokens := false
      -
      -		if parser.tokens_head == len(parser.tokens) {
      -			// Queue is empty.
      -			need_more_tokens = true
      -		} else {
      -			// Check if any potential simple key may occupy the head position.
      -			if !yaml_parser_stale_simple_keys(parser) {
      -				return false
      -			}
      -
      -			for i := range parser.simple_keys {
      -				simple_key := &parser.simple_keys[i]
      -				if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
      -					need_more_tokens = true
      -					break
      -				}
      -			}
      -		}
      -
      -		// We are finished.
      -		if !need_more_tokens {
      -			break
      -		}
      -		// Fetch the next token.
      -		if !yaml_parser_fetch_next_token(parser) {
      -			return false
      -		}
      -	}
      -
      -	parser.token_available = true
      -	return true
      -}
      -
      -// The dispatcher for token fetchers.
      -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
      -	// Ensure that the buffer is initialized.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -
      -	// Check if we just started scanning.  Fetch STREAM-START then.
      -	if !parser.stream_start_produced {
      -		return yaml_parser_fetch_stream_start(parser)
      -	}
      -
      -	// Eat whitespaces and comments until we reach the next token.
      -	if !yaml_parser_scan_to_next_token(parser) {
      -		return false
      -	}
      -
      -	// Remove obsolete potential simple keys.
      -	if !yaml_parser_stale_simple_keys(parser) {
      -		return false
      -	}
      -
      -	// Check the indentation level against the current column.
      -	if !yaml_parser_unroll_indent(parser, parser.mark.column) {
      -		return false
      -	}
      -
      -	// Ensure that the buffer contains at least 4 characters.  4 is the length
      -	// of the longest indicators ('--- ' and '... ').
      -	if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
      -		return false
      -	}
      -
      -	// Is it the end of the stream?
      -	if is_z(parser.buffer, parser.buffer_pos) {
      -		return yaml_parser_fetch_stream_end(parser)
      -	}
      -
      -	// Is it a directive?
      -	if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
      -		return yaml_parser_fetch_directive(parser)
      -	}
      -
      -	buf := parser.buffer
      -	pos := parser.buffer_pos
      -
      -	// Is it the document start indicator?
      -	if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
      -		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
      -	}
      -
      -	// Is it the document end indicator?
      -	if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
      -		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
      -	}
      -
      -	// Is it the flow sequence start indicator?
      -	if buf[pos] == '[' {
      -		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
      -	}
      -
      -	// Is it the flow mapping start indicator?
      -	if parser.buffer[parser.buffer_pos] == '{' {
      -		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
      -	}
      -
      -	// Is it the flow sequence end indicator?
      -	if parser.buffer[parser.buffer_pos] == ']' {
      -		return yaml_parser_fetch_flow_collection_end(parser,
      -			yaml_FLOW_SEQUENCE_END_TOKEN)
      -	}
      -
      -	// Is it the flow mapping end indicator?
      -	if parser.buffer[parser.buffer_pos] == '}' {
      -		return yaml_parser_fetch_flow_collection_end(parser,
      -			yaml_FLOW_MAPPING_END_TOKEN)
      -	}
      -
      -	// Is it the flow entry indicator?
      -	if parser.buffer[parser.buffer_pos] == ',' {
      -		return yaml_parser_fetch_flow_entry(parser)
      -	}
      -
      -	// Is it the block entry indicator?
      -	if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
      -		return yaml_parser_fetch_block_entry(parser)
      -	}
      -
      -	// Is it the key indicator?
      -	if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
      -		return yaml_parser_fetch_key(parser)
      -	}
      -
      -	// Is it the value indicator?
      -	if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
      -		return yaml_parser_fetch_value(parser)
      -	}
      -
      -	// Is it an alias?
      -	if parser.buffer[parser.buffer_pos] == '*' {
      -		return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
      -	}
      -
      -	// Is it an anchor?
      -	if parser.buffer[parser.buffer_pos] == '&' {
      -		return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
      -	}
      -
      -	// Is it a tag?
      -	if parser.buffer[parser.buffer_pos] == '!' {
      -		return yaml_parser_fetch_tag(parser)
      -	}
      -
      -	// Is it a literal scalar?
      -	if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
      -		return yaml_parser_fetch_block_scalar(parser, true)
      -	}
      -
      -	// Is it a folded scalar?
      -	if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
      -		return yaml_parser_fetch_block_scalar(parser, false)
      -	}
      -
      -	// Is it a single-quoted scalar?
      -	if parser.buffer[parser.buffer_pos] == '\'' {
      -		return yaml_parser_fetch_flow_scalar(parser, true)
      -	}
      -
      -	// Is it a double-quoted scalar?
      -	if parser.buffer[parser.buffer_pos] == '"' {
      -		return yaml_parser_fetch_flow_scalar(parser, false)
      -	}
      -
      -	// Is it a plain scalar?
      -	//
      -	// A plain scalar may start with any non-blank characters except
      -	//
      -	//      '-', '?', ':', ',', '[', ']', '{', '}',
      -	//      '#', '&', '*', '!', '|', '>', '\'', '\"',
      -	//      '%', '@', '`'.
      -	//
      -	// In the block context (and, for the '-' indicator, in the flow context
      -	// too), it may also start with the characters
      -	//
      -	//      '-', '?', ':'
      -	//
      -	// if it is followed by a non-space character.
      -	//
      -	// The last rule is more restrictive than the specification requires.
      -	// [Go] Make this logic more reasonable.
      -	//switch parser.buffer[parser.buffer_pos] {
      -	//case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
      -	//}
      -	if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
      -		parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
      -		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
      -		parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
      -		parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
      -		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
      -		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
      -		parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
      -		parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
      -		parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
      -		(parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
      -		(parser.flow_level == 0 &&
      -			(parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
      -			!is_blankz(parser.buffer, parser.buffer_pos+1)) {
      -		return yaml_parser_fetch_plain_scalar(parser)
      -	}
      -
      -	// If we don't determine the token type so far, it is an error.
      -	return yaml_parser_set_scanner_error(parser,
      -		"while scanning for the next token", parser.mark,
      -		"found character that cannot start any token")
      -}
      -
      -// Check the list of potential simple keys and remove the positions that
      -// cannot contain simple keys anymore.
      -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
      -	// Check for a potential simple key for each flow level.
      -	for i := range parser.simple_keys {
      -		simple_key := &parser.simple_keys[i]
      -
      -		// The specification requires that a simple key
      -		//
      -		//  - is limited to a single line,
      -		//  - is shorter than 1024 characters.
      -		if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
      -
      -			// Check if the potential simple key to be removed is required.
      -			if simple_key.required {
      -				return yaml_parser_set_scanner_error(parser,
      -					"while scanning a simple key", simple_key.mark,
      -					"could not find expected ':'")
      -			}
      -			simple_key.possible = false
      -		}
      -	}
      -	return true
      -}
      -
      -// Check if a simple key may start at the current position and add it if
      -// needed.
      -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
      -	// A simple key is required at the current position if the scanner is in
      -	// the block context and the current column coincides with the indentation
      -	// level.
      -
      -	required := parser.flow_level == 0 && parser.indent == parser.mark.column
      -
      -	// A simple key is required only when it is the first token in the current
      -	// line.  Therefore it is always allowed.  But we add a check anyway.
      -	if required && !parser.simple_key_allowed {
      -		panic("should not happen")
      -	}
      -
      -	//
      -	// If the current position may start a simple key, save it.
      -	//
      -	if parser.simple_key_allowed {
      -		simple_key := yaml_simple_key_t{
      -			possible:     true,
      -			required:     required,
      -			token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
      -		}
      -		simple_key.mark = parser.mark
      -
      -		if !yaml_parser_remove_simple_key(parser) {
      -			return false
      -		}
      -		parser.simple_keys[len(parser.simple_keys)-1] = simple_key
      -	}
      -	return true
      -}
      -
      -// Remove a potential simple key at the current flow level.
      -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
      -	i := len(parser.simple_keys) - 1
      -	if parser.simple_keys[i].possible {
      -		// If the key is required, it is an error.
      -		if parser.simple_keys[i].required {
      -			return yaml_parser_set_scanner_error(parser,
      -				"while scanning a simple key", parser.simple_keys[i].mark,
      -				"could not find expected ':'")
      -		}
      -	}
      -	// Remove the key from the stack.
      -	parser.simple_keys[i].possible = false
      -	return true
      -}
      -
      -// Increase the flow level and resize the simple key list if needed.
      -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
      -	// Reset the simple key on the next level.
      -	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
      -
      -	// Increase the flow level.
      -	parser.flow_level++
      -	return true
      -}
      -
      -// Decrease the flow level.
      -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
      -	if parser.flow_level > 0 {
      -		parser.flow_level--
      -		parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
      -	}
      -	return true
      -}
      -
      -// Push the current indentation level to the stack and set the new level
      -// the current column is greater than the indentation level.  In this case,
      -// append or insert the specified token into the token queue.
      -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
      -	// In the flow context, do nothing.
      -	if parser.flow_level > 0 {
      -		return true
      -	}
      -
      -	if parser.indent < column {
      -		// Push the current indentation level to the stack and set the new
      -		// indentation level.
      -		parser.indents = append(parser.indents, parser.indent)
      -		parser.indent = column
      -
      -		// Create a token and insert it into the queue.
      -		token := yaml_token_t{
      -			typ:        typ,
      -			start_mark: mark,
      -			end_mark:   mark,
      -		}
      -		if number > -1 {
      -			number -= parser.tokens_parsed
      -		}
      -		yaml_insert_token(parser, number, &token)
      -	}
      -	return true
      -}
      -
      -// Pop indentation levels from the indents stack until the current level
      -// becomes less or equal to the column.  For each intendation level, append
      -// the BLOCK-END token.
      -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
      -	// In the flow context, do nothing.
      -	if parser.flow_level > 0 {
      -		return true
      -	}
      -
      -	// Loop through the intendation levels in the stack.
      -	for parser.indent > column {
      -		// Create a token and append it to the queue.
      -		token := yaml_token_t{
      -			typ:        yaml_BLOCK_END_TOKEN,
      -			start_mark: parser.mark,
      -			end_mark:   parser.mark,
      -		}
      -		yaml_insert_token(parser, -1, &token)
      -
      -		// Pop the indentation level.
      -		parser.indent = parser.indents[len(parser.indents)-1]
      -		parser.indents = parser.indents[:len(parser.indents)-1]
      -	}
      -	return true
      -}
      -
      -// Initialize the scanner and produce the STREAM-START token.
      -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
      -
      -	// Set the initial indentation.
      -	parser.indent = -1
      -
      -	// Initialize the simple key stack.
      -	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
      -
      -	// A simple key is allowed at the beginning of the stream.
      -	parser.simple_key_allowed = true
      -
      -	// We have started.
      -	parser.stream_start_produced = true
      -
      -	// Create the STREAM-START token and append it to the queue.
      -	token := yaml_token_t{
      -		typ:        yaml_STREAM_START_TOKEN,
      -		start_mark: parser.mark,
      -		end_mark:   parser.mark,
      -		encoding:   parser.encoding,
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the STREAM-END token and shut down the scanner.
      -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
      -
      -	// Force new line.
      -	if parser.mark.column != 0 {
      -		parser.mark.column = 0
      -		parser.mark.line++
      -	}
      -
      -	// Reset the indentation level.
      -	if !yaml_parser_unroll_indent(parser, -1) {
      -		return false
      -	}
      -
      -	// Reset simple keys.
      -	if !yaml_parser_remove_simple_key(parser) {
      -		return false
      -	}
      -
      -	parser.simple_key_allowed = false
      -
      -	// Create the STREAM-END token and append it to the queue.
      -	token := yaml_token_t{
      -		typ:        yaml_STREAM_END_TOKEN,
      -		start_mark: parser.mark,
      -		end_mark:   parser.mark,
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
      -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
      -	// Reset the indentation level.
      -	if !yaml_parser_unroll_indent(parser, -1) {
      -		return false
      -	}
      -
      -	// Reset simple keys.
      -	if !yaml_parser_remove_simple_key(parser) {
      -		return false
      -	}
      -
      -	parser.simple_key_allowed = false
      -
      -	// Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
      -	token := yaml_token_t{}
      -	if !yaml_parser_scan_directive(parser, &token) {
      -		return false
      -	}
      -	// Append the token to the queue.
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the DOCUMENT-START or DOCUMENT-END token.
      -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
      -	// Reset the indentation level.
      -	if !yaml_parser_unroll_indent(parser, -1) {
      -		return false
      -	}
      -
      -	// Reset simple keys.
      -	if !yaml_parser_remove_simple_key(parser) {
      -		return false
      -	}
      -
      -	parser.simple_key_allowed = false
      -
      -	// Consume the token.
      -	start_mark := parser.mark
      -
      -	skip(parser)
      -	skip(parser)
      -	skip(parser)
      -
      -	end_mark := parser.mark
      -
      -	// Create the DOCUMENT-START or DOCUMENT-END token.
      -	token := yaml_token_t{
      -		typ:        typ,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -	}
      -	// Append the token to the queue.
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
      -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
      -	// The indicators '[' and '{' may start a simple key.
      -	if !yaml_parser_save_simple_key(parser) {
      -		return false
      -	}
      -
      -	// Increase the flow level.
      -	if !yaml_parser_increase_flow_level(parser) {
      -		return false
      -	}
      -
      -	// A simple key may follow the indicators '[' and '{'.
      -	parser.simple_key_allowed = true
      -
      -	// Consume the token.
      -	start_mark := parser.mark
      -	skip(parser)
      -	end_mark := parser.mark
      -
      -	// Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
      -	token := yaml_token_t{
      -		typ:        typ,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -	}
      -	// Append the token to the queue.
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
      -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
      -	// Reset any potential simple key on the current flow level.
      -	if !yaml_parser_remove_simple_key(parser) {
      -		return false
      -	}
      -
      -	// Decrease the flow level.
      -	if !yaml_parser_decrease_flow_level(parser) {
      -		return false
      -	}
      -
      -	// No simple keys after the indicators ']' and '}'.
      -	parser.simple_key_allowed = false
      -
      -	// Consume the token.
      -
      -	start_mark := parser.mark
      -	skip(parser)
      -	end_mark := parser.mark
      -
      -	// Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
      -	token := yaml_token_t{
      -		typ:        typ,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -	}
      -	// Append the token to the queue.
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the FLOW-ENTRY token.
      -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
      -	// Reset any potential simple keys on the current flow level.
      -	if !yaml_parser_remove_simple_key(parser) {
      -		return false
      -	}
      -
      -	// Simple keys are allowed after ','.
      -	parser.simple_key_allowed = true
      -
      -	// Consume the token.
      -	start_mark := parser.mark
      -	skip(parser)
      -	end_mark := parser.mark
      -
      -	// Create the FLOW-ENTRY token and append it to the queue.
      -	token := yaml_token_t{
      -		typ:        yaml_FLOW_ENTRY_TOKEN,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the BLOCK-ENTRY token.
      -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
      -	// Check if the scanner is in the block context.
      -	if parser.flow_level == 0 {
      -		// Check if we are allowed to start a new entry.
      -		if !parser.simple_key_allowed {
      -			return yaml_parser_set_scanner_error(parser, "", parser.mark,
      -				"block sequence entries are not allowed in this context")
      -		}
      -		// Add the BLOCK-SEQUENCE-START token if needed.
      -		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
      -			return false
      -		}
      -	} else {
      -		// It is an error for the '-' indicator to occur in the flow context,
      -		// but we let the Parser detect and report about it because the Parser
      -		// is able to point to the context.
      -	}
      -
      -	// Reset any potential simple keys on the current flow level.
      -	if !yaml_parser_remove_simple_key(parser) {
      -		return false
      -	}
      -
      -	// Simple keys are allowed after '-'.
      -	parser.simple_key_allowed = true
      -
      -	// Consume the token.
      -	start_mark := parser.mark
      -	skip(parser)
      -	end_mark := parser.mark
      -
      -	// Create the BLOCK-ENTRY token and append it to the queue.
      -	token := yaml_token_t{
      -		typ:        yaml_BLOCK_ENTRY_TOKEN,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the KEY token.
      -func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
      -
      -	// In the block context, additional checks are required.
      -	if parser.flow_level == 0 {
      -		// Check if we are allowed to start a new key (not nessesary simple).
      -		if !parser.simple_key_allowed {
      -			return yaml_parser_set_scanner_error(parser, "", parser.mark,
      -				"mapping keys are not allowed in this context")
      -		}
      -		// Add the BLOCK-MAPPING-START token if needed.
      -		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
      -			return false
      -		}
      -	}
      -
      -	// Reset any potential simple keys on the current flow level.
      -	if !yaml_parser_remove_simple_key(parser) {
      -		return false
      -	}
      -
      -	// Simple keys are allowed after '?' in the block context.
      -	parser.simple_key_allowed = parser.flow_level == 0
      -
      -	// Consume the token.
      -	start_mark := parser.mark
      -	skip(parser)
      -	end_mark := parser.mark
      -
      -	// Create the KEY token and append it to the queue.
      -	token := yaml_token_t{
      -		typ:        yaml_KEY_TOKEN,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the VALUE token.
      -func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
      -
      -	simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
      -
      -	// Have we found a simple key?
      -	if simple_key.possible {
      -		// Create the KEY token and insert it into the queue.
      -		token := yaml_token_t{
      -			typ:        yaml_KEY_TOKEN,
      -			start_mark: simple_key.mark,
      -			end_mark:   simple_key.mark,
      -		}
      -		yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
      -
      -		// In the block context, we may need to add the BLOCK-MAPPING-START token.
      -		if !yaml_parser_roll_indent(parser, simple_key.mark.column,
      -			simple_key.token_number,
      -			yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
      -			return false
      -		}
      -
      -		// Remove the simple key.
      -		simple_key.possible = false
      -
      -		// A simple key cannot follow another simple key.
      -		parser.simple_key_allowed = false
      -
      -	} else {
      -		// The ':' indicator follows a complex key.
      -
      -		// In the block context, extra checks are required.
      -		if parser.flow_level == 0 {
      -
      -			// Check if we are allowed to start a complex value.
      -			if !parser.simple_key_allowed {
      -				return yaml_parser_set_scanner_error(parser, "", parser.mark,
      -					"mapping values are not allowed in this context")
      -			}
      -
      -			// Add the BLOCK-MAPPING-START token if needed.
      -			if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
      -				return false
      -			}
      -		}
      -
      -		// Simple keys after ':' are allowed in the block context.
      -		parser.simple_key_allowed = parser.flow_level == 0
      -	}
      -
      -	// Consume the token.
      -	start_mark := parser.mark
      -	skip(parser)
      -	end_mark := parser.mark
      -
      -	// Create the VALUE token and append it to the queue.
      -	token := yaml_token_t{
      -		typ:        yaml_VALUE_TOKEN,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the ALIAS or ANCHOR token.
      -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
      -	// An anchor or an alias could be a simple key.
      -	if !yaml_parser_save_simple_key(parser) {
      -		return false
      -	}
      -
      -	// A simple key cannot follow an anchor or an alias.
      -	parser.simple_key_allowed = false
      -
      -	// Create the ALIAS or ANCHOR token and append it to the queue.
      -	var token yaml_token_t
      -	if !yaml_parser_scan_anchor(parser, &token, typ) {
      -		return false
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the TAG token.
      -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
      -	// A tag could be a simple key.
      -	if !yaml_parser_save_simple_key(parser) {
      -		return false
      -	}
      -
      -	// A simple key cannot follow a tag.
      -	parser.simple_key_allowed = false
      -
      -	// Create the TAG token and append it to the queue.
      -	var token yaml_token_t
      -	if !yaml_parser_scan_tag(parser, &token) {
      -		return false
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
      -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
      -	// Remove any potential simple keys.
      -	if !yaml_parser_remove_simple_key(parser) {
      -		return false
      -	}
      -
      -	// A simple key may follow a block scalar.
      -	parser.simple_key_allowed = true
      -
      -	// Create the SCALAR token and append it to the queue.
      -	var token yaml_token_t
      -	if !yaml_parser_scan_block_scalar(parser, &token, literal) {
      -		return false
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
      -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
      -	// A plain scalar could be a simple key.
      -	if !yaml_parser_save_simple_key(parser) {
      -		return false
      -	}
      -
      -	// A simple key cannot follow a flow scalar.
      -	parser.simple_key_allowed = false
      -
      -	// Create the SCALAR token and append it to the queue.
      -	var token yaml_token_t
      -	if !yaml_parser_scan_flow_scalar(parser, &token, single) {
      -		return false
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the SCALAR(...,plain) token.
      -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
      -	// A plain scalar could be a simple key.
      -	if !yaml_parser_save_simple_key(parser) {
      -		return false
      -	}
      -
      -	// A simple key cannot follow a flow scalar.
      -	parser.simple_key_allowed = false
      -
      -	// Create the SCALAR token and append it to the queue.
      -	var token yaml_token_t
      -	if !yaml_parser_scan_plain_scalar(parser, &token) {
      -		return false
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Eat whitespaces and comments until the next token is found.
      -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
      -
      -	// Until the next token is not found.
      -	for {
      -		// Allow the BOM mark to start a line.
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -		if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
      -			skip(parser)
      -		}
      -
      -		// Eat whitespaces.
      -		// Tabs are allowed:
      -		//  - in the flow context
      -		//  - in the block context, but not at the beginning of the line or
      -		//  after '-', '?', or ':' (complex value).
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -
      -		for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
      -			skip(parser)
      -			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -				return false
      -			}
      -		}
      -
      -		// Eat a comment until a line break.
      -		if parser.buffer[parser.buffer_pos] == '#' {
      -			for !is_breakz(parser.buffer, parser.buffer_pos) {
      -				skip(parser)
      -				if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -					return false
      -				}
      -			}
      -		}
      -
      -		// If it is a line break, eat it.
      -		if is_break(parser.buffer, parser.buffer_pos) {
      -			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -				return false
      -			}
      -			skip_line(parser)
      -
      -			// In the block context, a new line may start a simple key.
      -			if parser.flow_level == 0 {
      -				parser.simple_key_allowed = true
      -			}
      -		} else {
      -			break // We have found a token.
      -		}
      -	}
      -
      -	return true
      -}
      -
      -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
      -//
      -// Scope:
      -//      %YAML    1.1    # a comment \n
      -//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
      -//      %TAG    !yaml!  tag:yaml.org,2002:  \n
      -//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
      -//
      -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
      -	// Eat '%'.
      -	start_mark := parser.mark
      -	skip(parser)
      -
      -	// Scan the directive name.
      -	var name []byte
      -	if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
      -		return false
      -	}
      -
      -	// Is it a YAML directive?
      -	if bytes.Equal(name, []byte("YAML")) {
      -		// Scan the VERSION directive value.
      -		var major, minor int8
      -		if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
      -			return false
      -		}
      -		end_mark := parser.mark
      -
      -		// Create a VERSION-DIRECTIVE token.
      -		*token = yaml_token_t{
      -			typ:        yaml_VERSION_DIRECTIVE_TOKEN,
      -			start_mark: start_mark,
      -			end_mark:   end_mark,
      -			major:      major,
      -			minor:      minor,
      -		}
      -
      -		// Is it a TAG directive?
      -	} else if bytes.Equal(name, []byte("TAG")) {
      -		// Scan the TAG directive value.
      -		var handle, prefix []byte
      -		if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
      -			return false
      -		}
      -		end_mark := parser.mark
      -
      -		// Create a TAG-DIRECTIVE token.
      -		*token = yaml_token_t{
      -			typ:        yaml_TAG_DIRECTIVE_TOKEN,
      -			start_mark: start_mark,
      -			end_mark:   end_mark,
      -			value:      handle,
      -			prefix:     prefix,
      -		}
      -
      -		// Unknown directive.
      -	} else {
      -		yaml_parser_set_scanner_error(parser, "while scanning a directive",
      -			start_mark, "found uknown directive name")
      -		return false
      -	}
      -
      -	// Eat the rest of the line including any comments.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -
      -	for is_blank(parser.buffer, parser.buffer_pos) {
      -		skip(parser)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	if parser.buffer[parser.buffer_pos] == '#' {
      -		for !is_breakz(parser.buffer, parser.buffer_pos) {
      -			skip(parser)
      -			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -				return false
      -			}
      -		}
      -	}
      -
      -	// Check if we are at the end of the line.
      -	if !is_breakz(parser.buffer, parser.buffer_pos) {
      -		yaml_parser_set_scanner_error(parser, "while scanning a directive",
      -			start_mark, "did not find expected comment or line break")
      -		return false
      -	}
      -
      -	// Eat a line break.
      -	if is_break(parser.buffer, parser.buffer_pos) {
      -		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -			return false
      -		}
      -		skip_line(parser)
      -	}
      -
      -	return true
      -}
      -
      -// Scan the directive name.
      -//
      -// Scope:
      -//      %YAML   1.1     # a comment \n
      -//       ^^^^
      -//      %TAG    !yaml!  tag:yaml.org,2002:  \n
      -//       ^^^
      -//
      -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
      -	// Consume the directive name.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -
      -	var s []byte
      -	for is_alpha(parser.buffer, parser.buffer_pos) {
      -		s = read(parser, s)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	// Check if the name is empty.
      -	if len(s) == 0 {
      -		yaml_parser_set_scanner_error(parser, "while scanning a directive",
      -			start_mark, "could not find expected directive name")
      -		return false
      -	}
      -
      -	// Check for an blank character after the name.
      -	if !is_blankz(parser.buffer, parser.buffer_pos) {
      -		yaml_parser_set_scanner_error(parser, "while scanning a directive",
      -			start_mark, "found unexpected non-alphabetical character")
      -		return false
      -	}
      -	*name = s
      -	return true
      -}
      -
      -// Scan the value of VERSION-DIRECTIVE.
      -//
      -// Scope:
      -//      %YAML   1.1     # a comment \n
      -//           ^^^^^^
      -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
      -	// Eat whitespaces.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	for is_blank(parser.buffer, parser.buffer_pos) {
      -		skip(parser)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	// Consume the major version number.
      -	if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
      -		return false
      -	}
      -
      -	// Eat '.'.
      -	if parser.buffer[parser.buffer_pos] != '.' {
      -		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
      -			start_mark, "did not find expected digit or '.' character")
      -	}
      -
      -	skip(parser)
      -
      -	// Consume the minor version number.
      -	if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
      -		return false
      -	}
      -	return true
      -}
      -
      -const max_number_length = 2
      -
      -// Scan the version number of VERSION-DIRECTIVE.
      -//
      -// Scope:
      -//      %YAML   1.1     # a comment \n
      -//              ^
      -//      %YAML   1.1     # a comment \n
      -//                ^
      -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
      -
      -	// Repeat while the next character is digit.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	var value, length int8
      -	for is_digit(parser.buffer, parser.buffer_pos) {
      -		// Check if the number is too long.
      -		length++
      -		if length > max_number_length {
      -			return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
      -				start_mark, "found extremely long version number")
      -		}
      -		value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
      -		skip(parser)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	// Check if the number was present.
      -	if length == 0 {
      -		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
      -			start_mark, "did not find expected version number")
      -	}
      -	*number = value
      -	return true
      -}
      -
      -// Scan the value of a TAG-DIRECTIVE token.
      -//
      -// Scope:
      -//      %TAG    !yaml!  tag:yaml.org,2002:  \n
      -//          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
      -//
      -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
      -	var handle_value, prefix_value []byte
      -
      -	// Eat whitespaces.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -
      -	for is_blank(parser.buffer, parser.buffer_pos) {
      -		skip(parser)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	// Scan a handle.
      -	if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
      -		return false
      -	}
      -
      -	// Expect a whitespace.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	if !is_blank(parser.buffer, parser.buffer_pos) {
      -		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
      -			start_mark, "did not find expected whitespace")
      -		return false
      -	}
      -
      -	// Eat whitespaces.
      -	for is_blank(parser.buffer, parser.buffer_pos) {
      -		skip(parser)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	// Scan a prefix.
      -	if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
      -		return false
      -	}
      -
      -	// Expect a whitespace or line break.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	if !is_blankz(parser.buffer, parser.buffer_pos) {
      -		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
      -			start_mark, "did not find expected whitespace or line break")
      -		return false
      -	}
      -
      -	*handle = handle_value
      -	*prefix = prefix_value
      -	return true
      -}
      -
      -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
      -	var s []byte
      -
      -	// Eat the indicator character.
      -	start_mark := parser.mark
      -	skip(parser)
      -
      -	// Consume the value.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -
      -	for is_alpha(parser.buffer, parser.buffer_pos) {
      -		s = read(parser, s)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	end_mark := parser.mark
      -
      -	/*
      -	 * Check if length of the anchor is greater than 0 and it is followed by
      -	 * a whitespace character or one of the indicators:
      -	 *
      -	 *      '?', ':', ',', ']', '}', '%', '@', '`'.
      -	 */
      -
      -	if len(s) == 0 ||
      -		!(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
      -			parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
      -			parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
      -			parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
      -			parser.buffer[parser.buffer_pos] == '`') {
      -		context := "while scanning an alias"
      -		if typ == yaml_ANCHOR_TOKEN {
      -			context = "while scanning an anchor"
      -		}
      -		yaml_parser_set_scanner_error(parser, context, start_mark,
      -			"did not find expected alphabetic or numeric character")
      -		return false
      -	}
      -
      -	// Create a token.
      -	*token = yaml_token_t{
      -		typ:        typ,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -		value:      s,
      -	}
      -
      -	return true
      -}
      -
      -/*
      - * Scan a TAG token.
      - */
      -
      -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
      -	var handle, suffix []byte
      -
      -	start_mark := parser.mark
      -
      -	// Check if the tag is in the canonical form.
      -	if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -		return false
      -	}
      -
      -	if parser.buffer[parser.buffer_pos+1] == '<' {
      -		// Keep the handle as ''
      -
      -		// Eat '!<'
      -		skip(parser)
      -		skip(parser)
      -
      -		// Consume the tag value.
      -		if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
      -			return false
      -		}
      -
      -		// Check for '>' and eat it.
      -		if parser.buffer[parser.buffer_pos] != '>' {
      -			yaml_parser_set_scanner_error(parser, "while scanning a tag",
      -				start_mark, "did not find the expected '>'")
      -			return false
      -		}
      -
      -		skip(parser)
      -	} else {
      -		// The tag has either the '!suffix' or the '!handle!suffix' form.
      -
      -		// First, try to scan a handle.
      -		if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
      -			return false
      -		}
      -
      -		// Check if it is, indeed, handle.
      -		if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
      -			// Scan the suffix now.
      -			if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
      -				return false
      -			}
      -		} else {
      -			// It wasn't a handle after all.  Scan the rest of the tag.
      -			if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
      -				return false
      -			}
      -
      -			// Set the handle to '!'.
      -			handle = []byte{'!'}
      -
      -			// A special case: the '!' tag.  Set the handle to '' and the
      -			// suffix to '!'.
      -			if len(suffix) == 0 {
      -				handle, suffix = suffix, handle
      -			}
      -		}
      -	}
      -
      -	// Check the character which ends the tag.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	if !is_blankz(parser.buffer, parser.buffer_pos) {
      -		yaml_parser_set_scanner_error(parser, "while scanning a tag",
      -			start_mark, "did not find expected whitespace or line break")
      -		return false
      -	}
      -
      -	end_mark := parser.mark
      -
      -	// Create a token.
      -	*token = yaml_token_t{
      -		typ:        yaml_TAG_TOKEN,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -		value:      handle,
      -		suffix:     suffix,
      -	}
      -	return true
      -}
      -
      -// Scan a tag handle.
      -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
      -	// Check the initial '!' character.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	if parser.buffer[parser.buffer_pos] != '!' {
      -		yaml_parser_set_scanner_tag_error(parser, directive,
      -			start_mark, "did not find expected '!'")
      -		return false
      -	}
      -
      -	var s []byte
      -
      -	// Copy the '!' character.
      -	s = read(parser, s)
      -
      -	// Copy all subsequent alphabetical and numerical characters.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	for is_alpha(parser.buffer, parser.buffer_pos) {
      -		s = read(parser, s)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	// Check if the trailing character is '!' and copy it.
      -	if parser.buffer[parser.buffer_pos] == '!' {
      -		s = read(parser, s)
      -	} else {
      -		// It's either the '!' tag or not really a tag handle.  If it's a %TAG
      -		// directive, it's an error.  If it's a tag token, it must be a part of URI.
      -		if directive && !(s[0] == '!' && s[1] == 0) {
      -			yaml_parser_set_scanner_tag_error(parser, directive,
      -				start_mark, "did not find expected '!'")
      -			return false
      -		}
      -	}
      -
      -	*handle = s
      -	return true
      -}
      -
      -// Scan a tag.
      -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
      -	//size_t length = head ? strlen((char *)head) : 0
      -	var s []byte
      -
      -	// Copy the head if needed.
      -	//
      -	// Note that we don't copy the leading '!' character.
      -	if len(head) > 1 {
      -		s = append(s, head[1:]...)
      -	}
      -
      -	// Scan the tag.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -
      -	// The set of characters that may appear in URI is as follows:
      -	//
      -	//      '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
      -	//      '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
      -	//      '%'.
      -	// [Go] Convert this into more reasonable logic.
      -	for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
      -		parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
      -		parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
      -		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
      -		parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
      -		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
      -		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
      -		parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
      -		parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
      -		parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
      -		parser.buffer[parser.buffer_pos] == '%' {
      -		// Check if it is a URI-escape sequence.
      -		if parser.buffer[parser.buffer_pos] == '%' {
      -			if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
      -				return false
      -			}
      -		} else {
      -			s = read(parser, s)
      -		}
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	// Check if the tag is non-empty.
      -	if len(s) == 0 {
      -		yaml_parser_set_scanner_tag_error(parser, directive,
      -			start_mark, "did not find expected tag URI")
      -		return false
      -	}
      -	*uri = s
      -	return true
      -}
      -
      -// Decode an URI-escape sequence corresponding to a single UTF-8 character.
      -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
      -
      -	// Decode the required number of characters.
      -	w := 1024
      -	for w > 0 {
      -		// Check for a URI-escaped octet.
      -		if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
      -			return false
      -		}
      -
      -		if !(parser.buffer[parser.buffer_pos] == '%' &&
      -			is_hex(parser.buffer, parser.buffer_pos+1) &&
      -			is_hex(parser.buffer, parser.buffer_pos+2)) {
      -			return yaml_parser_set_scanner_tag_error(parser, directive,
      -				start_mark, "did not find URI escaped octet")
      -		}
      -
      -		// Get the octet.
      -		octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
      -
      -		// If it is the leading octet, determine the length of the UTF-8 sequence.
      -		if w == 1024 {
      -			w = width(octet)
      -			if w == 0 {
      -				return yaml_parser_set_scanner_tag_error(parser, directive,
      -					start_mark, "found an incorrect leading UTF-8 octet")
      -			}
      -		} else {
      -			// Check if the trailing octet is correct.
      -			if octet&0xC0 != 0x80 {
      -				return yaml_parser_set_scanner_tag_error(parser, directive,
      -					start_mark, "found an incorrect trailing UTF-8 octet")
      -			}
      -		}
      -
      -		// Copy the octet and move the pointers.
      -		*s = append(*s, octet)
      -		skip(parser)
      -		skip(parser)
      -		skip(parser)
      -		w--
      -	}
      -	return true
      -}
      -
      -// Scan a block scalar.
      -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
      -	// Eat the indicator '|' or '>'.
      -	start_mark := parser.mark
      -	skip(parser)
      -
      -	// Scan the additional block scalar indicators.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -
      -	// Check for a chomping indicator.
      -	var chomping, increment int
      -	if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
      -		// Set the chomping method and eat the indicator.
      -		if parser.buffer[parser.buffer_pos] == '+' {
      -			chomping = +1
      -		} else {
      -			chomping = -1
      -		}
      -		skip(parser)
      -
      -		// Check for an indentation indicator.
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -		if is_digit(parser.buffer, parser.buffer_pos) {
      -			// Check that the intendation is greater than 0.
      -			if parser.buffer[parser.buffer_pos] == '0' {
      -				yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
      -					start_mark, "found an intendation indicator equal to 0")
      -				return false
      -			}
      -
      -			// Get the intendation level and eat the indicator.
      -			increment = as_digit(parser.buffer, parser.buffer_pos)
      -			skip(parser)
      -		}
      -
      -	} else if is_digit(parser.buffer, parser.buffer_pos) {
      -		// Do the same as above, but in the opposite order.
      -
      -		if parser.buffer[parser.buffer_pos] == '0' {
      -			yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
      -				start_mark, "found an intendation indicator equal to 0")
      -			return false
      -		}
      -		increment = as_digit(parser.buffer, parser.buffer_pos)
      -		skip(parser)
      -
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -		if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
      -			if parser.buffer[parser.buffer_pos] == '+' {
      -				chomping = +1
      -			} else {
      -				chomping = -1
      -			}
      -			skip(parser)
      -		}
      -	}
      -
      -	// Eat whitespaces and comments to the end of the line.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	for is_blank(parser.buffer, parser.buffer_pos) {
      -		skip(parser)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -	if parser.buffer[parser.buffer_pos] == '#' {
      -		for !is_breakz(parser.buffer, parser.buffer_pos) {
      -			skip(parser)
      -			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -				return false
      -			}
      -		}
      -	}
      -
      -	// Check if we are at the end of the line.
      -	if !is_breakz(parser.buffer, parser.buffer_pos) {
      -		yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
      -			start_mark, "did not find expected comment or line break")
      -		return false
      -	}
      -
      -	// Eat a line break.
      -	if is_break(parser.buffer, parser.buffer_pos) {
      -		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -			return false
      -		}
      -		skip_line(parser)
      -	}
      -
      -	end_mark := parser.mark
      -
      -	// Set the intendation level if it was specified.
      -	var indent int
      -	if increment > 0 {
      -		if parser.indent >= 0 {
      -			indent = parser.indent + increment
      -		} else {
      -			indent = increment
      -		}
      -	}
      -
      -	// Scan the leading line breaks and determine the indentation level if needed.
      -	var s, leading_break, trailing_breaks []byte
      -	if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
      -		return false
      -	}
      -
      -	// Scan the block scalar content.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	var leading_blank, trailing_blank bool
      -	for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
      -		// We are at the beginning of a non-empty line.
      -
      -		// Is it a trailing whitespace?
      -		trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
      -
      -		// Check if we need to fold the leading line break.
      -		if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
      -			// Do we need to join the lines by space?
      -			if len(trailing_breaks) == 0 {
      -				s = append(s, ' ')
      -			}
      -		} else {
      -			s = append(s, leading_break...)
      -		}
      -		leading_break = leading_break[:0]
      -
      -		// Append the remaining line breaks.
      -		s = append(s, trailing_breaks...)
      -		trailing_breaks = trailing_breaks[:0]
      -
      -		// Is it a leading whitespace?
      -		leading_blank = is_blank(parser.buffer, parser.buffer_pos)
      -
      -		// Consume the current line.
      -		for !is_breakz(parser.buffer, parser.buffer_pos) {
      -			s = read(parser, s)
      -			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -				return false
      -			}
      -		}
      -
      -		// Consume the line break.
      -		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -			return false
      -		}
      -
      -		leading_break = read_line(parser, leading_break)
      -
      -		// Eat the following intendation spaces and line breaks.
      -		if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
      -			return false
      -		}
      -	}
      -
      -	// Chomp the tail.
      -	if chomping != -1 {
      -		s = append(s, leading_break...)
      -	}
      -	if chomping == 1 {
      -		s = append(s, trailing_breaks...)
      -	}
      -
      -	// Create a token.
      -	*token = yaml_token_t{
      -		typ:        yaml_SCALAR_TOKEN,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -		value:      s,
      -		style:      yaml_LITERAL_SCALAR_STYLE,
      -	}
      -	if !literal {
      -		token.style = yaml_FOLDED_SCALAR_STYLE
      -	}
      -	return true
      -}
      -
      -// Scan intendation spaces and line breaks for a block scalar.  Determine the
      -// intendation level if needed.
      -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
      -	*end_mark = parser.mark
      -
      -	// Eat the intendation spaces and line breaks.
      -	max_indent := 0
      -	for {
      -		// Eat the intendation spaces.
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -		for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
      -			skip(parser)
      -			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -				return false
      -			}
      -		}
      -		if parser.mark.column > max_indent {
      -			max_indent = parser.mark.column
      -		}
      -
      -		// Check for a tab character messing the intendation.
      -		if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
      -			return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
      -				start_mark, "found a tab character where an intendation space is expected")
      -		}
      -
      -		// Have we found a non-empty line?
      -		if !is_break(parser.buffer, parser.buffer_pos) {
      -			break
      -		}
      -
      -		// Consume the line break.
      -		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -			return false
      -		}
      -		// [Go] Should really be returning breaks instead.
      -		*breaks = read_line(parser, *breaks)
      -		*end_mark = parser.mark
      -	}
      -
      -	// Determine the indentation level if needed.
      -	if *indent == 0 {
      -		*indent = max_indent
      -		if *indent < parser.indent+1 {
      -			*indent = parser.indent + 1
      -		}
      -		if *indent < 1 {
      -			*indent = 1
      -		}
      -	}
      -	return true
      -}
      -
      -// Scan a quoted scalar.
      -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
      -	// Eat the left quote.
      -	start_mark := parser.mark
      -	skip(parser)
      -
      -	// Consume the content of the quoted scalar.
      -	var s, leading_break, trailing_breaks, whitespaces []byte
      -	for {
      -		// Check that there are no document indicators at the beginning of the line.
      -		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
      -			return false
      -		}
      -
      -		if parser.mark.column == 0 &&
      -			((parser.buffer[parser.buffer_pos+0] == '-' &&
      -				parser.buffer[parser.buffer_pos+1] == '-' &&
      -				parser.buffer[parser.buffer_pos+2] == '-') ||
      -				(parser.buffer[parser.buffer_pos+0] == '.' &&
      -					parser.buffer[parser.buffer_pos+1] == '.' &&
      -					parser.buffer[parser.buffer_pos+2] == '.')) &&
      -			is_blankz(parser.buffer, parser.buffer_pos+3) {
      -			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
      -				start_mark, "found unexpected document indicator")
      -			return false
      -		}
      -
      -		// Check for EOF.
      -		if is_z(parser.buffer, parser.buffer_pos) {
      -			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
      -				start_mark, "found unexpected end of stream")
      -			return false
      -		}
      -
      -		// Consume non-blank characters.
      -		leading_blanks := false
      -		for !is_blankz(parser.buffer, parser.buffer_pos) {
      -			if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
      -				// Is is an escaped single quote.
      -				s = append(s, '\'')
      -				skip(parser)
      -				skip(parser)
      -
      -			} else if single && parser.buffer[parser.buffer_pos] == '\'' {
      -				// It is a right single quote.
      -				break
      -			} else if !single && parser.buffer[parser.buffer_pos] == '"' {
      -				// It is a right double quote.
      -				break
      -
      -			} else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
      -				// It is an escaped line break.
      -				if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
      -					return false
      -				}
      -				skip(parser)
      -				skip_line(parser)
      -				leading_blanks = true
      -				break
      -
      -			} else if !single && parser.buffer[parser.buffer_pos] == '\\' {
      -				// It is an escape sequence.
      -				code_length := 0
      -
      -				// Check the escape character.
      -				switch parser.buffer[parser.buffer_pos+1] {
      -				case '0':
      -					s = append(s, 0)
      -				case 'a':
      -					s = append(s, '\x07')
      -				case 'b':
      -					s = append(s, '\x08')
      -				case 't', '\t':
      -					s = append(s, '\x09')
      -				case 'n':
      -					s = append(s, '\x0A')
      -				case 'v':
      -					s = append(s, '\x0B')
      -				case 'f':
      -					s = append(s, '\x0C')
      -				case 'r':
      -					s = append(s, '\x0D')
      -				case 'e':
      -					s = append(s, '\x1B')
      -				case ' ':
      -					s = append(s, '\x20')
      -				case '"':
      -					s = append(s, '"')
      -				case '\'':
      -					s = append(s, '\'')
      -				case '\\':
      -					s = append(s, '\\')
      -				case 'N': // NEL (#x85)
      -					s = append(s, '\xC2')
      -					s = append(s, '\x85')
      -				case '_': // #xA0
      -					s = append(s, '\xC2')
      -					s = append(s, '\xA0')
      -				case 'L': // LS (#x2028)
      -					s = append(s, '\xE2')
      -					s = append(s, '\x80')
      -					s = append(s, '\xA8')
      -				case 'P': // PS (#x2029)
      -					s = append(s, '\xE2')
      -					s = append(s, '\x80')
      -					s = append(s, '\xA9')
      -				case 'x':
      -					code_length = 2
      -				case 'u':
      -					code_length = 4
      -				case 'U':
      -					code_length = 8
      -				default:
      -					yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
      -						start_mark, "found unknown escape character")
      -					return false
      -				}
      -
      -				skip(parser)
      -				skip(parser)
      -
      -				// Consume an arbitrary escape code.
      -				if code_length > 0 {
      -					var value int
      -
      -					// Scan the character value.
      -					if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
      -						return false
      -					}
      -					for k := 0; k < code_length; k++ {
      -						if !is_hex(parser.buffer, parser.buffer_pos+k) {
      -							yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
      -								start_mark, "did not find expected hexdecimal number")
      -							return false
      -						}
      -						value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
      -					}
      -
      -					// Check the value and write the character.
      -					if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
      -						yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
      -							start_mark, "found invalid Unicode character escape code")
      -						return false
      -					}
      -					if value <= 0x7F {
      -						s = append(s, byte(value))
      -					} else if value <= 0x7FF {
      -						s = append(s, byte(0xC0+(value>>6)))
      -						s = append(s, byte(0x80+(value&0x3F)))
      -					} else if value <= 0xFFFF {
      -						s = append(s, byte(0xE0+(value>>12)))
      -						s = append(s, byte(0x80+((value>>6)&0x3F)))
      -						s = append(s, byte(0x80+(value&0x3F)))
      -					} else {
      -						s = append(s, byte(0xF0+(value>>18)))
      -						s = append(s, byte(0x80+((value>>12)&0x3F)))
      -						s = append(s, byte(0x80+((value>>6)&0x3F)))
      -						s = append(s, byte(0x80+(value&0x3F)))
      -					}
      -
      -					// Advance the pointer.
      -					for k := 0; k < code_length; k++ {
      -						skip(parser)
      -					}
      -				}
      -			} else {
      -				// It is a non-escaped non-blank character.
      -				s = read(parser, s)
      -			}
      -			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -				return false
      -			}
      -		}
      -
      -		// Check if we are at the end of the scalar.
      -		if single {
      -			if parser.buffer[parser.buffer_pos] == '\'' {
      -				break
      -			}
      -		} else {
      -			if parser.buffer[parser.buffer_pos] == '"' {
      -				break
      -			}
      -		}
      -
      -		// Consume blank characters.
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -
      -		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
      -			if is_blank(parser.buffer, parser.buffer_pos) {
      -				// Consume a space or a tab character.
      -				if !leading_blanks {
      -					whitespaces = read(parser, whitespaces)
      -				} else {
      -					skip(parser)
      -				}
      -			} else {
      -				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -					return false
      -				}
      -
      -				// Check if it is a first line break.
      -				if !leading_blanks {
      -					whitespaces = whitespaces[:0]
      -					leading_break = read_line(parser, leading_break)
      -					leading_blanks = true
      -				} else {
      -					trailing_breaks = read_line(parser, trailing_breaks)
      -				}
      -			}
      -			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -				return false
      -			}
      -		}
      -
      -		// Join the whitespaces or fold line breaks.
      -		if leading_blanks {
      -			// Do we need to fold line breaks?
      -			if len(leading_break) > 0 && leading_break[0] == '\n' {
      -				if len(trailing_breaks) == 0 {
      -					s = append(s, ' ')
      -				} else {
      -					s = append(s, trailing_breaks...)
      -				}
      -			} else {
      -				s = append(s, leading_break...)
      -				s = append(s, trailing_breaks...)
      -			}
      -			trailing_breaks = trailing_breaks[:0]
      -			leading_break = leading_break[:0]
      -		} else {
      -			s = append(s, whitespaces...)
      -			whitespaces = whitespaces[:0]
      -		}
      -	}
      -
      -	// Eat the right quote.
      -	skip(parser)
      -	end_mark := parser.mark
      -
      -	// Create a token.
      -	*token = yaml_token_t{
      -		typ:        yaml_SCALAR_TOKEN,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -		value:      s,
      -		style:      yaml_SINGLE_QUOTED_SCALAR_STYLE,
      -	}
      -	if !single {
      -		token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
      -	}
      -	return true
      -}
      -
      -// Scan a plain scalar.
      -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
      -
      -	var s, leading_break, trailing_breaks, whitespaces []byte
      -	var leading_blanks bool
      -	var indent = parser.indent + 1
      -
      -	start_mark := parser.mark
      -	end_mark := parser.mark
      -
      -	// Consume the content of the plain scalar.
      -	for {
      -		// Check for a document indicator.
      -		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
      -			return false
      -		}
      -		if parser.mark.column == 0 &&
      -			((parser.buffer[parser.buffer_pos+0] == '-' &&
      -				parser.buffer[parser.buffer_pos+1] == '-' &&
      -				parser.buffer[parser.buffer_pos+2] == '-') ||
      -				(parser.buffer[parser.buffer_pos+0] == '.' &&
      -					parser.buffer[parser.buffer_pos+1] == '.' &&
      -					parser.buffer[parser.buffer_pos+2] == '.')) &&
      -			is_blankz(parser.buffer, parser.buffer_pos+3) {
      -			break
      -		}
      -
      -		// Check for a comment.
      -		if parser.buffer[parser.buffer_pos] == '#' {
      -			break
      -		}
      -
      -		// Consume non-blank characters.
      -		for !is_blankz(parser.buffer, parser.buffer_pos) {
      -
      -			// Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
      -			if parser.flow_level > 0 &&
      -				parser.buffer[parser.buffer_pos] == ':' &&
      -				!is_blankz(parser.buffer, parser.buffer_pos+1) {
      -				yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
      -					start_mark, "found unexpected ':'")
      -				return false
      -			}
      -
      -			// Check for indicators that may end a plain scalar.
      -			if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
      -				(parser.flow_level > 0 &&
      -					(parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
      -						parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
      -						parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
      -						parser.buffer[parser.buffer_pos] == '}')) {
      -				break
      -			}
      -
      -			// Check if we need to join whitespaces and breaks.
      -			if leading_blanks || len(whitespaces) > 0 {
      -				if leading_blanks {
      -					// Do we need to fold line breaks?
      -					if leading_break[0] == '\n' {
      -						if len(trailing_breaks) == 0 {
      -							s = append(s, ' ')
      -						} else {
      -							s = append(s, trailing_breaks...)
      -						}
      -					} else {
      -						s = append(s, leading_break...)
      -						s = append(s, trailing_breaks...)
      -					}
      -					trailing_breaks = trailing_breaks[:0]
      -					leading_break = leading_break[:0]
      -					leading_blanks = false
      -				} else {
      -					s = append(s, whitespaces...)
      -					whitespaces = whitespaces[:0]
      -				}
      -			}
      -
      -			// Copy the character.
      -			s = read(parser, s)
      -
      -			end_mark = parser.mark
      -			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -				return false
      -			}
      -		}
      -
      -		// Is it the end?
      -		if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
      -			break
      -		}
      -
      -		// Consume blank characters.
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -
      -		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
      -			if is_blank(parser.buffer, parser.buffer_pos) {
      -
      -				// Check for tab character that abuse intendation.
      -				if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
      -					yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
      -						start_mark, "found a tab character that violate intendation")
      -					return false
      -				}
      -
      -				// Consume a space or a tab character.
      -				if !leading_blanks {
      -					whitespaces = read(parser, whitespaces)
      -				} else {
      -					skip(parser)
      -				}
      -			} else {
      -				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -					return false
      -				}
      -
      -				// Check if it is a first line break.
      -				if !leading_blanks {
      -					whitespaces = whitespaces[:0]
      -					leading_break = read_line(parser, leading_break)
      -					leading_blanks = true
      -				} else {
      -					trailing_breaks = read_line(parser, trailing_breaks)
      -				}
      -			}
      -			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -				return false
      -			}
      -		}
      -
      -		// Check intendation level.
      -		if parser.flow_level == 0 && parser.mark.column < indent {
      -			break
      -		}
      -	}
      -
      -	// Create a token.
      -	*token = yaml_token_t{
      -		typ:        yaml_SCALAR_TOKEN,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -		value:      s,
      -		style:      yaml_PLAIN_SCALAR_STYLE,
      -	}
      -
      -	// Note that we change the 'simple_key_allowed' flag.
      -	if leading_blanks {
      -		parser.simple_key_allowed = true
      -	}
      -	return true
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/sorter.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/sorter.go
      deleted file mode 100644
      index 5958822f..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/sorter.go
      +++ /dev/null
      @@ -1,104 +0,0 @@
      -package yaml
      -
      -import (
      -	"reflect"
      -	"unicode"
      -)
      -
      -type keyList []reflect.Value
      -
      -func (l keyList) Len() int      { return len(l) }
      -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
      -func (l keyList) Less(i, j int) bool {
      -	a := l[i]
      -	b := l[j]
      -	ak := a.Kind()
      -	bk := b.Kind()
      -	for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
      -		a = a.Elem()
      -		ak = a.Kind()
      -	}
      -	for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
      -		b = b.Elem()
      -		bk = b.Kind()
      -	}
      -	af, aok := keyFloat(a)
      -	bf, bok := keyFloat(b)
      -	if aok && bok {
      -		if af != bf {
      -			return af < bf
      -		}
      -		if ak != bk {
      -			return ak < bk
      -		}
      -		return numLess(a, b)
      -	}
      -	if ak != reflect.String || bk != reflect.String {
      -		return ak < bk
      -	}
      -	ar, br := []rune(a.String()), []rune(b.String())
      -	for i := 0; i < len(ar) && i < len(br); i++ {
      -		if ar[i] == br[i] {
      -			continue
      -		}
      -		al := unicode.IsLetter(ar[i])
      -		bl := unicode.IsLetter(br[i])
      -		if al && bl {
      -			return ar[i] < br[i]
      -		}
      -		if al || bl {
      -			return bl
      -		}
      -		var ai, bi int
      -		var an, bn int64
      -		for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
      -			an = an*10 + int64(ar[ai]-'0')
      -		}
      -		for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
      -			bn = bn*10 + int64(br[bi]-'0')
      -		}
      -		if an != bn {
      -			return an < bn
      -		}
      -		if ai != bi {
      -			return ai < bi
      -		}
      -		return ar[i] < br[i]
      -	}
      -	return len(ar) < len(br)
      -}
      -
      -// keyFloat returns a float value for v if it is a number/bool
      -// and whether it is a number/bool or not.
      -func keyFloat(v reflect.Value) (f float64, ok bool) {
      -	switch v.Kind() {
      -	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
      -		return float64(v.Int()), true
      -	case reflect.Float32, reflect.Float64:
      -		return v.Float(), true
      -	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
      -		return float64(v.Uint()), true
      -	case reflect.Bool:
      -		if v.Bool() {
      -			return 1, true
      -		}
      -		return 0, true
      -	}
      -	return 0, false
      -}
      -
      -// numLess returns whether a < b.
      -// a and b must necessarily have the same kind.
      -func numLess(a, b reflect.Value) bool {
      -	switch a.Kind() {
      -	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
      -		return a.Int() < b.Int()
      -	case reflect.Float32, reflect.Float64:
      -		return a.Float() < b.Float()
      -	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
      -		return a.Uint() < b.Uint()
      -	case reflect.Bool:
      -		return !a.Bool() && b.Bool()
      -	}
      -	panic("not a number")
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/suite_test.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/suite_test.go
      deleted file mode 100644
      index c5cf1ed4..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/suite_test.go
      +++ /dev/null
      @@ -1,12 +0,0 @@
      -package yaml_test
      -
      -import (
      -	. "gopkg.in/check.v1"
      -	"testing"
      -)
      -
      -func Test(t *testing.T) { TestingT(t) }
      -
      -type S struct{}
      -
      -var _ = Suite(&S{})
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/writerc.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/writerc.go
      deleted file mode 100644
      index 190362f2..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/writerc.go
      +++ /dev/null
      @@ -1,89 +0,0 @@
      -package yaml
      -
      -// Set the writer error and return false.
      -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
      -	emitter.error = yaml_WRITER_ERROR
      -	emitter.problem = problem
      -	return false
      -}
      -
      -// Flush the output buffer.
      -func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
      -	if emitter.write_handler == nil {
      -		panic("write handler not set")
      -	}
      -
      -	// Check if the buffer is empty.
      -	if emitter.buffer_pos == 0 {
      -		return true
      -	}
      -
      -	// If the output encoding is UTF-8, we don't need to recode the buffer.
      -	if emitter.encoding == yaml_UTF8_ENCODING {
      -		if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
      -			return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
      -		}
      -		emitter.buffer_pos = 0
      -		return true
      -	}
      -
      -	// Recode the buffer into the raw buffer.
      -	var low, high int
      -	if emitter.encoding == yaml_UTF16LE_ENCODING {
      -		low, high = 0, 1
      -	} else {
      -		high, low = 1, 0
      -	}
      -
      -	pos := 0
      -	for pos < emitter.buffer_pos {
      -		// See the "reader.c" code for more details on UTF-8 encoding.  Note
      -		// that we assume that the buffer contains a valid UTF-8 sequence.
      -
      -		// Read the next UTF-8 character.
      -		octet := emitter.buffer[pos]
      -
      -		var w int
      -		var value rune
      -		switch {
      -		case octet&0x80 == 0x00:
      -			w, value = 1, rune(octet&0x7F)
      -		case octet&0xE0 == 0xC0:
      -			w, value = 2, rune(octet&0x1F)
      -		case octet&0xF0 == 0xE0:
      -			w, value = 3, rune(octet&0x0F)
      -		case octet&0xF8 == 0xF0:
      -			w, value = 4, rune(octet&0x07)
      -		}
      -		for k := 1; k < w; k++ {
      -			octet = emitter.buffer[pos+k]
      -			value = (value << 6) + (rune(octet) & 0x3F)
      -		}
      -		pos += w
      -
      -		// Write the character.
      -		if value < 0x10000 {
      -			var b [2]byte
      -			b[high] = byte(value >> 8)
      -			b[low] = byte(value & 0xFF)
      -			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
      -		} else {
      -			// Write the character using a surrogate pair (check "reader.c").
      -			var b [4]byte
      -			value -= 0x10000
      -			b[high] = byte(0xD8 + (value >> 18))
      -			b[low] = byte((value >> 10) & 0xFF)
      -			b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
      -			b[low+2] = byte(value & 0xFF)
      -			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
      -		}
      -	}
      -
      -	// Write the raw buffer.
      -	if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
      -		return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
      -	}
      -	emitter.buffer_pos = 0
      -	emitter.raw_buffer = emitter.raw_buffer[:0]
      -	return true
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/yaml.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/yaml.go
      deleted file mode 100644
      index af4df8a4..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/yaml.go
      +++ /dev/null
      @@ -1,344 +0,0 @@
      -// Package yaml implements YAML support for the Go language.
      -//
      -// Source code and other details for the project are available at GitHub:
      -//
      -//   https://github.com/go-yaml/yaml
      -//
      -package yaml
      -
      -import (
      -	"errors"
      -	"fmt"
      -	"reflect"
      -	"strings"
      -	"sync"
      -)
      -
      -// MapSlice encodes and decodes as a YAML map.
      -// The order of keys is preserved when encoding and decoding.
      -type MapSlice []MapItem
      -
      -// MapItem is an item in a MapSlice.
      -type MapItem struct {
      -	Key, Value interface{}
      -}
      -
      -// The Unmarshaler interface may be implemented by types to customize their
      -// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
      -// method receives a function that may be called to unmarshal the original
      -// YAML value into a field or variable. It is safe to call the unmarshal
      -// function parameter more than once if necessary.
      -type Unmarshaler interface {
      -	UnmarshalYAML(unmarshal func(interface{}) error) error
      -}
      -
      -// The Marshaler interface may be implemented by types to customize their
      -// behavior when being marshaled into a YAML document. The returned value
      -// is marshaled in place of the original value implementing Marshaler.
      -//
      -// If an error is returned by MarshalYAML, the marshaling procedure stops
      -// and returns with the provided error.
      -type Marshaler interface {
      -	MarshalYAML() (interface{}, error)
      -}
      -
      -// Unmarshal decodes the first document found within the in byte slice
      -// and assigns decoded values into the out value.
      -//
      -// Maps and pointers (to a struct, string, int, etc) are accepted as out
      -// values. If an internal pointer within a struct is not initialized,
      -// the yaml package will initialize it if necessary for unmarshalling
      -// the provided data. The out parameter must not be nil.
      -//
      -// The type of the decoded values should be compatible with the respective
      -// values in out. If one or more values cannot be decoded due to a type
      -// mismatches, decoding continues partially until the end of the YAML
      -// content, and a *yaml.TypeError is returned with details for all
      -// missed values.
      -//
      -// Struct fields are only unmarshalled if they are exported (have an
      -// upper case first letter), and are unmarshalled using the field name
      -// lowercased as the default key. Custom keys may be defined via the
      -// "yaml" name in the field tag: the content preceding the first comma
      -// is used as the key, and the following comma-separated options are
      -// used to tweak the marshalling process (see Marshal).
      -// Conflicting names result in a runtime error.
      -//
      -// For example:
      -//
      -//     type T struct {
      -//         F int `yaml:"a,omitempty"`
      -//         B int
      -//     }
      -//     var t T
      -//     yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
      -//
      -// See the documentation of Marshal for the format of tags and a list of
      -// supported tag options.
      -//
      -func Unmarshal(in []byte, out interface{}) (err error) {
      -	defer handleErr(&err)
      -	d := newDecoder()
      -	p := newParser(in)
      -	defer p.destroy()
      -	node := p.parse()
      -	if node != nil {
      -		v := reflect.ValueOf(out)
      -		if v.Kind() == reflect.Ptr && !v.IsNil() {
      -			v = v.Elem()
      -		}
      -		d.unmarshal(node, v)
      -	}
      -	if len(d.terrors) > 0 {
      -		return &TypeError{d.terrors}
      -	}
      -	return nil
      -}
      -
      -// Marshal serializes the value provided into a YAML document. The structure
      -// of the generated document will reflect the structure of the value itself.
      -// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
      -//
      -// Struct fields are only unmarshalled if they are exported (have an upper case
      -// first letter), and are unmarshalled using the field name lowercased as the
      -// default key. Custom keys may be defined via the "yaml" name in the field
      -// tag: the content preceding the first comma is used as the key, and the
      -// following comma-separated options are used to tweak the marshalling process.
      -// Conflicting names result in a runtime error.
      -//
      -// The field tag format accepted is:
      -//
      -//     `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
      -//
      -// The following flags are currently supported:
      -//
      -//     omitempty    Only include the field if it's not set to the zero
      -//                  value for the type or to empty slices or maps.
      -//                  Does not apply to zero valued structs.
      -//
      -//     flow         Marshal using a flow style (useful for structs,
      -//                  sequences and maps).
      -//
      -//     inline       Inline the field, which must be a struct or a map,
      -//                  causing all of its fields or keys to be processed as if
      -//                  they were part of the outer struct. For maps, keys must
      -//                  not conflict with the yaml keys of other struct fields.
      -//
      -// In addition, if the key is "-", the field is ignored.
      -//
      -// For example:
      -//
      -//     type T struct {
      -//         F int "a,omitempty"
      -//         B int
      -//     }
      -//     yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
      -//     yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
      -//
      -func Marshal(in interface{}) (out []byte, err error) {
      -	defer handleErr(&err)
      -	e := newEncoder()
      -	defer e.destroy()
      -	e.marshal("", reflect.ValueOf(in))
      -	e.finish()
      -	out = e.out
      -	return
      -}
      -
      -func handleErr(err *error) {
      -	if v := recover(); v != nil {
      -		if e, ok := v.(yamlError); ok {
      -			*err = e.err
      -		} else {
      -			panic(v)
      -		}
      -	}
      -}
      -
      -type yamlError struct {
      -	err error
      -}
      -
      -func fail(err error) {
      -	panic(yamlError{err})
      -}
      -
      -func failf(format string, args ...interface{}) {
      -	panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
      -}
      -
      -// A TypeError is returned by Unmarshal when one or more fields in
      -// the YAML document cannot be properly decoded into the requested
      -// types. When this error is returned, the value is still
      -// unmarshaled partially.
      -type TypeError struct {
      -	Errors []string
      -}
      -
      -func (e *TypeError) Error() string {
      -	return fmt.Sprintf("yaml: unmarshal errors:\n  %s", strings.Join(e.Errors, "\n  "))
      -}
      -
      -// --------------------------------------------------------------------------
      -// Maintain a mapping of keys to structure field indexes
      -
      -// The code in this section was copied from mgo/bson.
      -
      -// structInfo holds details for the serialization of fields of
      -// a given struct.
      -type structInfo struct {
      -	FieldsMap  map[string]fieldInfo
      -	FieldsList []fieldInfo
      -
      -	// InlineMap is the number of the field in the struct that
      -	// contains an ,inline map, or -1 if there's none.
      -	InlineMap int
      -}
      -
      -type fieldInfo struct {
      -	Key       string
      -	Num       int
      -	OmitEmpty bool
      -	Flow      bool
      -
      -	// Inline holds the field index if the field is part of an inlined struct.
      -	Inline []int
      -}
      -
      -var structMap = make(map[reflect.Type]*structInfo)
      -var fieldMapMutex sync.RWMutex
      -
      -func getStructInfo(st reflect.Type) (*structInfo, error) {
      -	fieldMapMutex.RLock()
      -	sinfo, found := structMap[st]
      -	fieldMapMutex.RUnlock()
      -	if found {
      -		return sinfo, nil
      -	}
      -
      -	n := st.NumField()
      -	fieldsMap := make(map[string]fieldInfo)
      -	fieldsList := make([]fieldInfo, 0, n)
      -	inlineMap := -1
      -	for i := 0; i != n; i++ {
      -		field := st.Field(i)
      -		if field.PkgPath != "" {
      -			continue // Private field
      -		}
      -
      -		info := fieldInfo{Num: i}
      -
      -		tag := field.Tag.Get("yaml")
      -		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
      -			tag = string(field.Tag)
      -		}
      -		if tag == "-" {
      -			continue
      -		}
      -
      -		inline := false
      -		fields := strings.Split(tag, ",")
      -		if len(fields) > 1 {
      -			for _, flag := range fields[1:] {
      -				switch flag {
      -				case "omitempty":
      -					info.OmitEmpty = true
      -				case "flow":
      -					info.Flow = true
      -				case "inline":
      -					inline = true
      -				default:
      -					return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
      -				}
      -			}
      -			tag = fields[0]
      -		}
      -
      -		if inline {
      -			switch field.Type.Kind() {
      -			case reflect.Map:
      -				if inlineMap >= 0 {
      -					return nil, errors.New("Multiple ,inline maps in struct " + st.String())
      -				}
      -				if field.Type.Key() != reflect.TypeOf("") {
      -					return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
      -				}
      -				inlineMap = info.Num
      -			case reflect.Struct:
      -				sinfo, err := getStructInfo(field.Type)
      -				if err != nil {
      -					return nil, err
      -				}
      -				for _, finfo := range sinfo.FieldsList {
      -					if _, found := fieldsMap[finfo.Key]; found {
      -						msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
      -						return nil, errors.New(msg)
      -					}
      -					if finfo.Inline == nil {
      -						finfo.Inline = []int{i, finfo.Num}
      -					} else {
      -						finfo.Inline = append([]int{i}, finfo.Inline...)
      -					}
      -					fieldsMap[finfo.Key] = finfo
      -					fieldsList = append(fieldsList, finfo)
      -				}
      -			default:
      -				//return nil, errors.New("Option ,inline needs a struct value or map field")
      -				return nil, errors.New("Option ,inline needs a struct value field")
      -			}
      -			continue
      -		}
      -
      -		if tag != "" {
      -			info.Key = tag
      -		} else {
      -			info.Key = strings.ToLower(field.Name)
      -		}
      -
      -		if _, found = fieldsMap[info.Key]; found {
      -			msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
      -			return nil, errors.New(msg)
      -		}
      -
      -		fieldsList = append(fieldsList, info)
      -		fieldsMap[info.Key] = info
      -	}
      -
      -	sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
      -
      -	fieldMapMutex.Lock()
      -	structMap[st] = sinfo
      -	fieldMapMutex.Unlock()
      -	return sinfo, nil
      -}
      -
      -func isZero(v reflect.Value) bool {
      -	switch v.Kind() {
      -	case reflect.String:
      -		return len(v.String()) == 0
      -	case reflect.Interface, reflect.Ptr:
      -		return v.IsNil()
      -	case reflect.Slice:
      -		return v.Len() == 0
      -	case reflect.Map:
      -		return v.Len() == 0
      -	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
      -		return v.Int() == 0
      -	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
      -		return v.Uint() == 0
      -	case reflect.Bool:
      -		return !v.Bool()
      -	case reflect.Struct:
      -		vt := v.Type()
      -		for i := v.NumField()-1; i >= 0; i-- {
      -			if vt.Field(i).PkgPath != "" {
      -				continue // Private field
      -			}
      -			if !isZero(v.Field(i)) {
      -				return false
      -			}
      -		}
      -		return true
      -	}
      -	return false
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/yamlh.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/yamlh.go
      deleted file mode 100644
      index d60a6b6b..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/yamlh.go
      +++ /dev/null
      @@ -1,716 +0,0 @@
      -package yaml
      -
      -import (
      -	"io"
      -)
      -
      -// The version directive data.
      -type yaml_version_directive_t struct {
      -	major int8 // The major version number.
      -	minor int8 // The minor version number.
      -}
      -
      -// The tag directive data.
      -type yaml_tag_directive_t struct {
      -	handle []byte // The tag handle.
      -	prefix []byte // The tag prefix.
      -}
      -
      -type yaml_encoding_t int
      -
      -// The stream encoding.
      -const (
      -	// Let the parser choose the encoding.
      -	yaml_ANY_ENCODING yaml_encoding_t = iota
      -
      -	yaml_UTF8_ENCODING    // The default UTF-8 encoding.
      -	yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
      -	yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
      -)
      -
      -type yaml_break_t int
      -
      -// Line break types.
      -const (
      -	// Let the parser choose the break type.
      -	yaml_ANY_BREAK yaml_break_t = iota
      -
      -	yaml_CR_BREAK   // Use CR for line breaks (Mac style).
      -	yaml_LN_BREAK   // Use LN for line breaks (Unix style).
      -	yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
      -)
      -
      -type yaml_error_type_t int
      -
      -// Many bad things could happen with the parser and emitter.
      -const (
      -	// No error is produced.
      -	yaml_NO_ERROR yaml_error_type_t = iota
      -
      -	yaml_MEMORY_ERROR   // Cannot allocate or reallocate a block of memory.
      -	yaml_READER_ERROR   // Cannot read or decode the input stream.
      -	yaml_SCANNER_ERROR  // Cannot scan the input stream.
      -	yaml_PARSER_ERROR   // Cannot parse the input stream.
      -	yaml_COMPOSER_ERROR // Cannot compose a YAML document.
      -	yaml_WRITER_ERROR   // Cannot write to the output stream.
      -	yaml_EMITTER_ERROR  // Cannot emit a YAML stream.
      -)
      -
      -// The pointer position.
      -type yaml_mark_t struct {
      -	index  int // The position index.
      -	line   int // The position line.
      -	column int // The position column.
      -}
      -
      -// Node Styles
      -
      -type yaml_style_t int8
      -
      -type yaml_scalar_style_t yaml_style_t
      -
      -// Scalar styles.
      -const (
      -	// Let the emitter choose the style.
      -	yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
      -
      -	yaml_PLAIN_SCALAR_STYLE         // The plain scalar style.
      -	yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
      -	yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
      -	yaml_LITERAL_SCALAR_STYLE       // The literal scalar style.
      -	yaml_FOLDED_SCALAR_STYLE        // The folded scalar style.
      -)
      -
      -type yaml_sequence_style_t yaml_style_t
      -
      -// Sequence styles.
      -const (
      -	// Let the emitter choose the style.
      -	yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
      -
      -	yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
      -	yaml_FLOW_SEQUENCE_STYLE  // The flow sequence style.
      -)
      -
      -type yaml_mapping_style_t yaml_style_t
      -
      -// Mapping styles.
      -const (
      -	// Let the emitter choose the style.
      -	yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
      -
      -	yaml_BLOCK_MAPPING_STYLE // The block mapping style.
      -	yaml_FLOW_MAPPING_STYLE  // The flow mapping style.
      -)
      -
      -// Tokens
      -
      -type yaml_token_type_t int
      -
      -// Token types.
      -const (
      -	// An empty token.
      -	yaml_NO_TOKEN yaml_token_type_t = iota
      -
      -	yaml_STREAM_START_TOKEN // A STREAM-START token.
      -	yaml_STREAM_END_TOKEN   // A STREAM-END token.
      -
      -	yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
      -	yaml_TAG_DIRECTIVE_TOKEN     // A TAG-DIRECTIVE token.
      -	yaml_DOCUMENT_START_TOKEN    // A DOCUMENT-START token.
      -	yaml_DOCUMENT_END_TOKEN      // A DOCUMENT-END token.
      -
      -	yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
      -	yaml_BLOCK_MAPPING_START_TOKEN  // A BLOCK-SEQUENCE-END token.
      -	yaml_BLOCK_END_TOKEN            // A BLOCK-END token.
      -
      -	yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
      -	yaml_FLOW_SEQUENCE_END_TOKEN   // A FLOW-SEQUENCE-END token.
      -	yaml_FLOW_MAPPING_START_TOKEN  // A FLOW-MAPPING-START token.
      -	yaml_FLOW_MAPPING_END_TOKEN    // A FLOW-MAPPING-END token.
      -
      -	yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
      -	yaml_FLOW_ENTRY_TOKEN  // A FLOW-ENTRY token.
      -	yaml_KEY_TOKEN         // A KEY token.
      -	yaml_VALUE_TOKEN       // A VALUE token.
      -
      -	yaml_ALIAS_TOKEN  // An ALIAS token.
      -	yaml_ANCHOR_TOKEN // An ANCHOR token.
      -	yaml_TAG_TOKEN    // A TAG token.
      -	yaml_SCALAR_TOKEN // A SCALAR token.
      -)
      -
      -func (tt yaml_token_type_t) String() string {
      -	switch tt {
      -	case yaml_NO_TOKEN:
      -		return "yaml_NO_TOKEN"
      -	case yaml_STREAM_START_TOKEN:
      -		return "yaml_STREAM_START_TOKEN"
      -	case yaml_STREAM_END_TOKEN:
      -		return "yaml_STREAM_END_TOKEN"
      -	case yaml_VERSION_DIRECTIVE_TOKEN:
      -		return "yaml_VERSION_DIRECTIVE_TOKEN"
      -	case yaml_TAG_DIRECTIVE_TOKEN:
      -		return "yaml_TAG_DIRECTIVE_TOKEN"
      -	case yaml_DOCUMENT_START_TOKEN:
      -		return "yaml_DOCUMENT_START_TOKEN"
      -	case yaml_DOCUMENT_END_TOKEN:
      -		return "yaml_DOCUMENT_END_TOKEN"
      -	case yaml_BLOCK_SEQUENCE_START_TOKEN:
      -		return "yaml_BLOCK_SEQUENCE_START_TOKEN"
      -	case yaml_BLOCK_MAPPING_START_TOKEN:
      -		return "yaml_BLOCK_MAPPING_START_TOKEN"
      -	case yaml_BLOCK_END_TOKEN:
      -		return "yaml_BLOCK_END_TOKEN"
      -	case yaml_FLOW_SEQUENCE_START_TOKEN:
      -		return "yaml_FLOW_SEQUENCE_START_TOKEN"
      -	case yaml_FLOW_SEQUENCE_END_TOKEN:
      -		return "yaml_FLOW_SEQUENCE_END_TOKEN"
      -	case yaml_FLOW_MAPPING_START_TOKEN:
      -		return "yaml_FLOW_MAPPING_START_TOKEN"
      -	case yaml_FLOW_MAPPING_END_TOKEN:
      -		return "yaml_FLOW_MAPPING_END_TOKEN"
      -	case yaml_BLOCK_ENTRY_TOKEN:
      -		return "yaml_BLOCK_ENTRY_TOKEN"
      -	case yaml_FLOW_ENTRY_TOKEN:
      -		return "yaml_FLOW_ENTRY_TOKEN"
      -	case yaml_KEY_TOKEN:
      -		return "yaml_KEY_TOKEN"
      -	case yaml_VALUE_TOKEN:
      -		return "yaml_VALUE_TOKEN"
      -	case yaml_ALIAS_TOKEN:
      -		return "yaml_ALIAS_TOKEN"
      -	case yaml_ANCHOR_TOKEN:
      -		return "yaml_ANCHOR_TOKEN"
      -	case yaml_TAG_TOKEN:
      -		return "yaml_TAG_TOKEN"
      -	case yaml_SCALAR_TOKEN:
      -		return "yaml_SCALAR_TOKEN"
      -	}
      -	return "<unknown token>"
      -}
      -
      -// The token structure.
      -type yaml_token_t struct {
      -	// The token type.
      -	typ yaml_token_type_t
      -
      -	// The start/end of the token.
      -	start_mark, end_mark yaml_mark_t
      -
      -	// The stream encoding (for yaml_STREAM_START_TOKEN).
      -	encoding yaml_encoding_t
      -
      -	// The alias/anchor/scalar value or tag/tag directive handle
      -	// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
      -	value []byte
      -
      -	// The tag suffix (for yaml_TAG_TOKEN).
      -	suffix []byte
      -
      -	// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
      -	prefix []byte
      -
      -	// The scalar style (for yaml_SCALAR_TOKEN).
      -	style yaml_scalar_style_t
      -
      -	// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
      -	major, minor int8
      -}
      -
      -// Events
      -
      -type yaml_event_type_t int8
      -
      -// Event types.
      -const (
      -	// An empty event.
      -	yaml_NO_EVENT yaml_event_type_t = iota
      -
      -	yaml_STREAM_START_EVENT   // A STREAM-START event.
      -	yaml_STREAM_END_EVENT     // A STREAM-END event.
      -	yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
      -	yaml_DOCUMENT_END_EVENT   // A DOCUMENT-END event.
      -	yaml_ALIAS_EVENT          // An ALIAS event.
      -	yaml_SCALAR_EVENT         // A SCALAR event.
      -	yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
      -	yaml_SEQUENCE_END_EVENT   // A SEQUENCE-END event.
      -	yaml_MAPPING_START_EVENT  // A MAPPING-START event.
      -	yaml_MAPPING_END_EVENT    // A MAPPING-END event.
      -)
      -
      -// The event structure.
      -type yaml_event_t struct {
      -
      -	// The event type.
      -	typ yaml_event_type_t
      -
      -	// The start and end of the event.
      -	start_mark, end_mark yaml_mark_t
      -
      -	// The document encoding (for yaml_STREAM_START_EVENT).
      -	encoding yaml_encoding_t
      -
      -	// The version directive (for yaml_DOCUMENT_START_EVENT).
      -	version_directive *yaml_version_directive_t
      -
      -	// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
      -	tag_directives []yaml_tag_directive_t
      -
      -	// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
      -	anchor []byte
      -
      -	// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
      -	tag []byte
      -
      -	// The scalar value (for yaml_SCALAR_EVENT).
      -	value []byte
      -
      -	// Is the document start/end indicator implicit, or the tag optional?
      -	// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
      -	implicit bool
      -
      -	// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
      -	quoted_implicit bool
      -
      -	// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
      -	style yaml_style_t
      -}
      -
      -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t     { return yaml_scalar_style_t(e.style) }
      -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
      -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t   { return yaml_mapping_style_t(e.style) }
      -
      -// Nodes
      -
      -const (
      -	yaml_NULL_TAG      = "tag:yaml.org,2002:null"      // The tag !!null with the only possible value: null.
      -	yaml_BOOL_TAG      = "tag:yaml.org,2002:bool"      // The tag !!bool with the values: true and false.
      -	yaml_STR_TAG       = "tag:yaml.org,2002:str"       // The tag !!str for string values.
      -	yaml_INT_TAG       = "tag:yaml.org,2002:int"       // The tag !!int for integer values.
      -	yaml_FLOAT_TAG     = "tag:yaml.org,2002:float"     // The tag !!float for float values.
      -	yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
      -
      -	yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
      -	yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
      -
      -	// Not in original libyaml.
      -	yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
      -	yaml_MERGE_TAG  = "tag:yaml.org,2002:merge"
      -
      -	yaml_DEFAULT_SCALAR_TAG   = yaml_STR_TAG // The default scalar tag is !!str.
      -	yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
      -	yaml_DEFAULT_MAPPING_TAG  = yaml_MAP_TAG // The default mapping tag is !!map.
      -)
      -
      -type yaml_node_type_t int
      -
      -// Node types.
      -const (
      -	// An empty node.
      -	yaml_NO_NODE yaml_node_type_t = iota
      -
      -	yaml_SCALAR_NODE   // A scalar node.
      -	yaml_SEQUENCE_NODE // A sequence node.
      -	yaml_MAPPING_NODE  // A mapping node.
      -)
      -
      -// An element of a sequence node.
      -type yaml_node_item_t int
      -
      -// An element of a mapping node.
      -type yaml_node_pair_t struct {
      -	key   int // The key of the element.
      -	value int // The value of the element.
      -}
      -
      -// The node structure.
      -type yaml_node_t struct {
      -	typ yaml_node_type_t // The node type.
      -	tag []byte           // The node tag.
      -
      -	// The node data.
      -
      -	// The scalar parameters (for yaml_SCALAR_NODE).
      -	scalar struct {
      -		value  []byte              // The scalar value.
      -		length int                 // The length of the scalar value.
      -		style  yaml_scalar_style_t // The scalar style.
      -	}
      -
      -	// The sequence parameters (for YAML_SEQUENCE_NODE).
      -	sequence struct {
      -		items_data []yaml_node_item_t    // The stack of sequence items.
      -		style      yaml_sequence_style_t // The sequence style.
      -	}
      -
      -	// The mapping parameters (for yaml_MAPPING_NODE).
      -	mapping struct {
      -		pairs_data  []yaml_node_pair_t   // The stack of mapping pairs (key, value).
      -		pairs_start *yaml_node_pair_t    // The beginning of the stack.
      -		pairs_end   *yaml_node_pair_t    // The end of the stack.
      -		pairs_top   *yaml_node_pair_t    // The top of the stack.
      -		style       yaml_mapping_style_t // The mapping style.
      -	}
      -
      -	start_mark yaml_mark_t // The beginning of the node.
      -	end_mark   yaml_mark_t // The end of the node.
      -
      -}
      -
      -// The document structure.
      -type yaml_document_t struct {
      -
      -	// The document nodes.
      -	nodes []yaml_node_t
      -
      -	// The version directive.
      -	version_directive *yaml_version_directive_t
      -
      -	// The list of tag directives.
      -	tag_directives_data  []yaml_tag_directive_t
      -	tag_directives_start int // The beginning of the tag directives list.
      -	tag_directives_end   int // The end of the tag directives list.
      -
      -	start_implicit int // Is the document start indicator implicit?
      -	end_implicit   int // Is the document end indicator implicit?
      -
      -	// The start/end of the document.
      -	start_mark, end_mark yaml_mark_t
      -}
      -
      -// The prototype of a read handler.
      -//
      -// The read handler is called when the parser needs to read more bytes from the
      -// source. The handler should write not more than size bytes to the buffer.
      -// The number of written bytes should be set to the size_read variable.
      -//
      -// [in,out]   data        A pointer to an application data specified by
      -//                        yaml_parser_set_input().
      -// [out]      buffer      The buffer to write the data from the source.
      -// [in]       size        The size of the buffer.
      -// [out]      size_read   The actual number of bytes read from the source.
      -//
      -// On success, the handler should return 1.  If the handler failed,
      -// the returned value should be 0. On EOF, the handler should set the
      -// size_read to 0 and return 1.
      -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
      -
      -// This structure holds information about a potential simple key.
      -type yaml_simple_key_t struct {
      -	possible     bool        // Is a simple key possible?
      -	required     bool        // Is a simple key required?
      -	token_number int         // The number of the token.
      -	mark         yaml_mark_t // The position mark.
      -}
      -
      -// The states of the parser.
      -type yaml_parser_state_t int
      -
      -const (
      -	yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
      -
      -	yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE           // Expect the beginning of an implicit document.
      -	yaml_PARSE_DOCUMENT_START_STATE                    // Expect DOCUMENT-START.
      -	yaml_PARSE_DOCUMENT_CONTENT_STATE                  // Expect the content of a document.
      -	yaml_PARSE_DOCUMENT_END_STATE                      // Expect DOCUMENT-END.
      -	yaml_PARSE_BLOCK_NODE_STATE                        // Expect a block node.
      -	yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
      -	yaml_PARSE_FLOW_NODE_STATE                         // Expect a flow node.
      -	yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE        // Expect the first entry of a block sequence.
      -	yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE              // Expect an entry of a block sequence.
      -	yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE         // Expect an entry of an indentless sequence.
      -	yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE           // Expect the first key of a block mapping.
      -	yaml_PARSE_BLOCK_MAPPING_KEY_STATE                 // Expect a block mapping key.
      -	yaml_PARSE_BLOCK_MAPPING_VALUE_STATE               // Expect a block mapping value.
      -	yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE         // Expect the first entry of a flow sequence.
      -	yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE               // Expect an entry of a flow sequence.
      -	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE   // Expect a key of an ordered mapping.
      -	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
      -	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE   // Expect the and of an ordered mapping entry.
      -	yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE            // Expect the first key of a flow mapping.
      -	yaml_PARSE_FLOW_MAPPING_KEY_STATE                  // Expect a key of a flow mapping.
      -	yaml_PARSE_FLOW_MAPPING_VALUE_STATE                // Expect a value of a flow mapping.
      -	yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE          // Expect an empty value of a flow mapping.
      -	yaml_PARSE_END_STATE                               // Expect nothing.
      -)
      -
      -func (ps yaml_parser_state_t) String() string {
      -	switch ps {
      -	case yaml_PARSE_STREAM_START_STATE:
      -		return "yaml_PARSE_STREAM_START_STATE"
      -	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
      -		return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
      -	case yaml_PARSE_DOCUMENT_START_STATE:
      -		return "yaml_PARSE_DOCUMENT_START_STATE"
      -	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
      -		return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
      -	case yaml_PARSE_DOCUMENT_END_STATE:
      -		return "yaml_PARSE_DOCUMENT_END_STATE"
      -	case yaml_PARSE_BLOCK_NODE_STATE:
      -		return "yaml_PARSE_BLOCK_NODE_STATE"
      -	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
      -		return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
      -	case yaml_PARSE_FLOW_NODE_STATE:
      -		return "yaml_PARSE_FLOW_NODE_STATE"
      -	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
      -		return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
      -	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
      -		return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
      -	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
      -		return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
      -	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
      -		return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
      -	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
      -		return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
      -	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
      -		return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
      -	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
      -		return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
      -	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
      -		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
      -	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
      -		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
      -	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
      -		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
      -	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
      -		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
      -	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
      -		return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
      -	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
      -		return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
      -	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
      -		return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
      -	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
      -		return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
      -	case yaml_PARSE_END_STATE:
      -		return "yaml_PARSE_END_STATE"
      -	}
      -	return "<unknown parser state>"
      -}
      -
      -// This structure holds aliases data.
      -type yaml_alias_data_t struct {
      -	anchor []byte      // The anchor.
      -	index  int         // The node id.
      -	mark   yaml_mark_t // The anchor mark.
      -}
      -
      -// The parser structure.
      -//
      -// All members are internal. Manage the structure using the
      -// yaml_parser_ family of functions.
      -type yaml_parser_t struct {
      -
      -	// Error handling
      -
      -	error yaml_error_type_t // Error type.
      -
      -	problem string // Error description.
      -
      -	// The byte about which the problem occured.
      -	problem_offset int
      -	problem_value  int
      -	problem_mark   yaml_mark_t
      -
      -	// The error context.
      -	context      string
      -	context_mark yaml_mark_t
      -
      -	// Reader stuff
      -
      -	read_handler yaml_read_handler_t // Read handler.
      -
      -	input_file io.Reader // File input data.
      -	input      []byte    // String input data.
      -	input_pos  int
      -
      -	eof bool // EOF flag
      -
      -	buffer     []byte // The working buffer.
      -	buffer_pos int    // The current position of the buffer.
      -
      -	unread int // The number of unread characters in the buffer.
      -
      -	raw_buffer     []byte // The raw buffer.
      -	raw_buffer_pos int    // The current position of the buffer.
      -
      -	encoding yaml_encoding_t // The input encoding.
      -
      -	offset int         // The offset of the current position (in bytes).
      -	mark   yaml_mark_t // The mark of the current position.
      -
      -	// Scanner stuff
      -
      -	stream_start_produced bool // Have we started to scan the input stream?
      -	stream_end_produced   bool // Have we reached the end of the input stream?
      -
      -	flow_level int // The number of unclosed '[' and '{' indicators.
      -
      -	tokens          []yaml_token_t // The tokens queue.
      -	tokens_head     int            // The head of the tokens queue.
      -	tokens_parsed   int            // The number of tokens fetched from the queue.
      -	token_available bool           // Does the tokens queue contain a token ready for dequeueing.
      -
      -	indent  int   // The current indentation level.
      -	indents []int // The indentation levels stack.
      -
      -	simple_key_allowed bool                // May a simple key occur at the current position?
      -	simple_keys        []yaml_simple_key_t // The stack of simple keys.
      -
      -	// Parser stuff
      -
      -	state          yaml_parser_state_t    // The current parser state.
      -	states         []yaml_parser_state_t  // The parser states stack.
      -	marks          []yaml_mark_t          // The stack of marks.
      -	tag_directives []yaml_tag_directive_t // The list of TAG directives.
      -
      -	// Dumper stuff
      -
      -	aliases []yaml_alias_data_t // The alias data.
      -
      -	document *yaml_document_t // The currently parsed document.
      -}
      -
      -// Emitter Definitions
      -
      -// The prototype of a write handler.
      -//
      -// The write handler is called when the emitter needs to flush the accumulated
      -// characters to the output.  The handler should write @a size bytes of the
      -// @a buffer to the output.
      -//
      -// @param[in,out]   data        A pointer to an application data specified by
      -//                              yaml_emitter_set_output().
      -// @param[in]       buffer      The buffer with bytes to be written.
      -// @param[in]       size        The size of the buffer.
      -//
      -// @returns On success, the handler should return @c 1.  If the handler failed,
      -// the returned value should be @c 0.
      -//
      -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
      -
      -type yaml_emitter_state_t int
      -
      -// The emitter states.
      -const (
      -	// Expect STREAM-START.
      -	yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
      -
      -	yaml_EMIT_FIRST_DOCUMENT_START_STATE       // Expect the first DOCUMENT-START or STREAM-END.
      -	yaml_EMIT_DOCUMENT_START_STATE             // Expect DOCUMENT-START or STREAM-END.
      -	yaml_EMIT_DOCUMENT_CONTENT_STATE           // Expect the content of a document.
      -	yaml_EMIT_DOCUMENT_END_STATE               // Expect DOCUMENT-END.
      -	yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE   // Expect the first item of a flow sequence.
      -	yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE         // Expect an item of a flow sequence.
      -	yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE     // Expect the first key of a flow mapping.
      -	yaml_EMIT_FLOW_MAPPING_KEY_STATE           // Expect a key of a flow mapping.
      -	yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE  // Expect a value for a simple key of a flow mapping.
      -	yaml_EMIT_FLOW_MAPPING_VALUE_STATE         // Expect a value of a flow mapping.
      -	yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE  // Expect the first item of a block sequence.
      -	yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE        // Expect an item of a block sequence.
      -	yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE    // Expect the first key of a block mapping.
      -	yaml_EMIT_BLOCK_MAPPING_KEY_STATE          // Expect the key of a block mapping.
      -	yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
      -	yaml_EMIT_BLOCK_MAPPING_VALUE_STATE        // Expect a value of a block mapping.
      -	yaml_EMIT_END_STATE                        // Expect nothing.
      -)
      -
      -// The emitter structure.
      -//
      -// All members are internal.  Manage the structure using the @c yaml_emitter_
      -// family of functions.
      -type yaml_emitter_t struct {
      -
      -	// Error handling
      -
      -	error   yaml_error_type_t // Error type.
      -	problem string            // Error description.
      -
      -	// Writer stuff
      -
      -	write_handler yaml_write_handler_t // Write handler.
      -
      -	output_buffer *[]byte   // String output data.
      -	output_file   io.Writer // File output data.
      -
      -	buffer     []byte // The working buffer.
      -	buffer_pos int    // The current position of the buffer.
      -
      -	raw_buffer     []byte // The raw buffer.
      -	raw_buffer_pos int    // The current position of the buffer.
      -
      -	encoding yaml_encoding_t // The stream encoding.
      -
      -	// Emitter stuff
      -
      -	canonical   bool         // If the output is in the canonical style?
      -	best_indent int          // The number of indentation spaces.
      -	best_width  int          // The preferred width of the output lines.
      -	unicode     bool         // Allow unescaped non-ASCII characters?
      -	line_break  yaml_break_t // The preferred line break.
      -
      -	state  yaml_emitter_state_t   // The current emitter state.
      -	states []yaml_emitter_state_t // The stack of states.
      -
      -	events      []yaml_event_t // The event queue.
      -	events_head int            // The head of the event queue.
      -
      -	indents []int // The stack of indentation levels.
      -
      -	tag_directives []yaml_tag_directive_t // The list of tag directives.
      -
      -	indent int // The current indentation level.
      -
      -	flow_level int // The current flow level.
      -
      -	root_context       bool // Is it the document root context?
      -	sequence_context   bool // Is it a sequence context?
      -	mapping_context    bool // Is it a mapping context?
      -	simple_key_context bool // Is it a simple mapping key context?
      -
      -	line       int  // The current line.
      -	column     int  // The current column.
      -	whitespace bool // If the last character was a whitespace?
      -	indention  bool // If the last character was an indentation character (' ', '-', '?', ':')?
      -	open_ended bool // If an explicit document end is required?
      -
      -	// Anchor analysis.
      -	anchor_data struct {
      -		anchor []byte // The anchor value.
      -		alias  bool   // Is it an alias?
      -	}
      -
      -	// Tag analysis.
      -	tag_data struct {
      -		handle []byte // The tag handle.
      -		suffix []byte // The tag suffix.
      -	}
      -
      -	// Scalar analysis.
      -	scalar_data struct {
      -		value                 []byte              // The scalar value.
      -		multiline             bool                // Does the scalar contain line breaks?
      -		flow_plain_allowed    bool                // Can the scalar be expessed in the flow plain style?
      -		block_plain_allowed   bool                // Can the scalar be expressed in the block plain style?
      -		single_quoted_allowed bool                // Can the scalar be expressed in the single quoted style?
      -		block_allowed         bool                // Can the scalar be expressed in the literal or folded styles?
      -		style                 yaml_scalar_style_t // The output style.
      -	}
      -
      -	// Dumper stuff
      -
      -	opened bool // If the stream was already opened?
      -	closed bool // If the stream was already closed?
      -
      -	// The information associated with the document nodes.
      -	anchors *struct {
      -		references int  // The number of references.
      -		anchor     int  // The anchor id.
      -		serialized bool // If the node has been emitted?
      -	}
      -
      -	last_anchor_id int // The last assigned anchor id.
      -
      -	document *yaml_document_t // The currently emitted document.
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/yamlprivateh.go b/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/yamlprivateh.go
      deleted file mode 100644
      index 8110ce3c..00000000
      --- a/vendor/github.com/coreos/ignition/config/v1/vendor/github.com/go-yaml/yaml/yamlprivateh.go
      +++ /dev/null
      @@ -1,173 +0,0 @@
      -package yaml
      -
      -const (
      -	// The size of the input raw buffer.
      -	input_raw_buffer_size = 512
      -
      -	// The size of the input buffer.
      -	// It should be possible to decode the whole raw buffer.
      -	input_buffer_size = input_raw_buffer_size * 3
      -
      -	// The size of the output buffer.
      -	output_buffer_size = 128
      -
      -	// The size of the output raw buffer.
      -	// It should be possible to encode the whole output buffer.
      -	output_raw_buffer_size = (output_buffer_size*2 + 2)
      -
      -	// The size of other stacks and queues.
      -	initial_stack_size  = 16
      -	initial_queue_size  = 16
      -	initial_string_size = 16
      -)
      -
      -// Check if the character at the specified position is an alphabetical
      -// character, a digit, '_', or '-'.
      -func is_alpha(b []byte, i int) bool {
      -	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
      -}
      -
      -// Check if the character at the specified position is a digit.
      -func is_digit(b []byte, i int) bool {
      -	return b[i] >= '0' && b[i] <= '9'
      -}
      -
      -// Get the value of a digit.
      -func as_digit(b []byte, i int) int {
      -	return int(b[i]) - '0'
      -}
      -
      -// Check if the character at the specified position is a hex-digit.
      -func is_hex(b []byte, i int) bool {
      -	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
      -}
      -
      -// Get the value of a hex-digit.
      -func as_hex(b []byte, i int) int {
      -	bi := b[i]
      -	if bi >= 'A' && bi <= 'F' {
      -		return int(bi) - 'A' + 10
      -	}
      -	if bi >= 'a' && bi <= 'f' {
      -		return int(bi) - 'a' + 10
      -	}
      -	return int(bi) - '0'
      -}
      -
      -// Check if the character is ASCII.
      -func is_ascii(b []byte, i int) bool {
      -	return b[i] <= 0x7F
      -}
      -
      -// Check if the character at the start of the buffer can be printed unescaped.
      -func is_printable(b []byte, i int) bool {
      -	return ((b[i] == 0x0A) || // . == #x0A
      -		(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
      -		(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
      -		(b[i] > 0xC2 && b[i] < 0xED) ||
      -		(b[i] == 0xED && b[i+1] < 0xA0) ||
      -		(b[i] == 0xEE) ||
      -		(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
      -			!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
      -			!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
      -}
      -
      -// Check if the character at the specified position is NUL.
      -func is_z(b []byte, i int) bool {
      -	return b[i] == 0x00
      -}
      -
      -// Check if the beginning of the buffer is a BOM.
      -func is_bom(b []byte, i int) bool {
      -	return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
      -}
      -
      -// Check if the character at the specified position is space.
      -func is_space(b []byte, i int) bool {
      -	return b[i] == ' '
      -}
      -
      -// Check if the character at the specified position is tab.
      -func is_tab(b []byte, i int) bool {
      -	return b[i] == '\t'
      -}
      -
      -// Check if the character at the specified position is blank (space or tab).
      -func is_blank(b []byte, i int) bool {
      -	//return is_space(b, i) || is_tab(b, i)
      -	return b[i] == ' ' || b[i] == '\t'
      -}
      -
      -// Check if the character at the specified position is a line break.
      -func is_break(b []byte, i int) bool {
      -	return (b[i] == '\r' || // CR (#xD)
      -		b[i] == '\n' || // LF (#xA)
      -		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
      -		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
      -		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
      -}
      -
      -func is_crlf(b []byte, i int) bool {
      -	return b[i] == '\r' && b[i+1] == '\n'
      -}
      -
      -// Check if the character is a line break or NUL.
      -func is_breakz(b []byte, i int) bool {
      -	//return is_break(b, i) || is_z(b, i)
      -	return (        // is_break:
      -	b[i] == '\r' || // CR (#xD)
      -		b[i] == '\n' || // LF (#xA)
      -		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
      -		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
      -		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
      -		// is_z:
      -		b[i] == 0)
      -}
      -
      -// Check if the character is a line break, space, or NUL.
      -func is_spacez(b []byte, i int) bool {
      -	//return is_space(b, i) || is_breakz(b, i)
      -	return ( // is_space:
      -	b[i] == ' ' ||
      -		// is_breakz:
      -		b[i] == '\r' || // CR (#xD)
      -		b[i] == '\n' || // LF (#xA)
      -		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
      -		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
      -		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
      -		b[i] == 0)
      -}
      -
      -// Check if the character is a line break, space, tab, or NUL.
      -func is_blankz(b []byte, i int) bool {
      -	//return is_blank(b, i) || is_breakz(b, i)
      -	return ( // is_blank:
      -	b[i] == ' ' || b[i] == '\t' ||
      -		// is_breakz:
      -		b[i] == '\r' || // CR (#xD)
      -		b[i] == '\n' || // LF (#xA)
      -		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
      -		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
      -		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
      -		b[i] == 0)
      -}
      -
      -// Determine the width of the character.
      -func width(b byte) int {
      -	// Don't replace these by a switch without first
      -	// confirming that it is being inlined.
      -	if b&0x80 == 0x00 {
      -		return 1
      -	}
      -	if b&0xE0 == 0xC0 {
      -		return 2
      -	}
      -	if b&0xF0 == 0xE0 {
      -		return 3
      -	}
      -	if b&0xF8 == 0xF0 {
      -		return 4
      -	}
      -	return 0
      -
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/COPYING b/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/COPYING
      deleted file mode 100644
      index 2993ec08..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/COPYING
      +++ /dev/null
      @@ -1,19 +0,0 @@
      -Copyright (C) 2014 Alec Thomas
      -
      -Permission is hereby granted, free of charge, to any person obtaining a copy of
      -this software and associated documentation files (the "Software"), to deal in
      -the Software without restriction, including without limitation the rights to
      -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
      -of the Software, and to permit persons to whom the Software is furnished to do
      -so, subject to the following conditions:
      -
      -The above copyright notice and this permission notice shall be included in all
      -copies or substantial portions of the Software.
      -
      -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
      -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
      -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
      -SOFTWARE.
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/README.md b/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/README.md
      deleted file mode 100644
      index bee884e3..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/README.md
      +++ /dev/null
      @@ -1,11 +0,0 @@
      -# Units - Helpful unit multipliers and functions for Go
      -
      -The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package.
      -
      -It allows for code like this:
      -
      -```go
      -n, err := ParseBase2Bytes("1KB")
      -// n == 1024
      -n = units.Mebibyte * 512
      -```
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/bytes.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/bytes.go
      deleted file mode 100644
      index eaadeb80..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/bytes.go
      +++ /dev/null
      @@ -1,83 +0,0 @@
      -package units
      -
      -// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte,
      -// etc.).
      -type Base2Bytes int64
      -
      -// Base-2 byte units.
      -const (
      -	Kibibyte Base2Bytes = 1024
      -	KiB                 = Kibibyte
      -	Mebibyte            = Kibibyte * 1024
      -	MiB                 = Mebibyte
      -	Gibibyte            = Mebibyte * 1024
      -	GiB                 = Gibibyte
      -	Tebibyte            = Gibibyte * 1024
      -	TiB                 = Tebibyte
      -	Pebibyte            = Tebibyte * 1024
      -	PiB                 = Pebibyte
      -	Exbibyte            = Pebibyte * 1024
      -	EiB                 = Exbibyte
      -)
      -
      -var (
      -	bytesUnitMap    = MakeUnitMap("iB", "B", 1024)
      -	oldBytesUnitMap = MakeUnitMap("B", "B", 1024)
      -)
      -
      -// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB
      -// and KiB are both 1024.
      -func ParseBase2Bytes(s string) (Base2Bytes, error) {
      -	n, err := ParseUnit(s, bytesUnitMap)
      -	if err != nil {
      -		n, err = ParseUnit(s, oldBytesUnitMap)
      -	}
      -	return Base2Bytes(n), err
      -}
      -
      -func (b Base2Bytes) String() string {
      -	return ToString(int64(b), 1024, "iB", "B")
      -}
      -
      -var (
      -	metricBytesUnitMap = MakeUnitMap("B", "B", 1000)
      -)
      -
      -// MetricBytes are SI byte units (1000 bytes in a kilobyte).
      -type MetricBytes SI
      -
      -// SI base-10 byte units.
      -const (
      -	Kilobyte MetricBytes = 1000
      -	KB                   = Kilobyte
      -	Megabyte             = Kilobyte * 1000
      -	MB                   = Megabyte
      -	Gigabyte             = Megabyte * 1000
      -	GB                   = Gigabyte
      -	Terabyte             = Gigabyte * 1000
      -	TB                   = Terabyte
      -	Petabyte             = Terabyte * 1000
      -	PB                   = Petabyte
      -	Exabyte              = Petabyte * 1000
      -	EB                   = Exabyte
      -)
      -
      -// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes.
      -func ParseMetricBytes(s string) (MetricBytes, error) {
      -	n, err := ParseUnit(s, metricBytesUnitMap)
      -	return MetricBytes(n), err
      -}
      -
      -func (m MetricBytes) String() string {
      -	return ToString(int64(m), 1000, "B", "B")
      -}
      -
      -// ParseStrictBytes supports both iB and B suffixes for base 2 and metric,
      -// respectively. That is, KiB represents 1024 and KB represents 1000.
      -func ParseStrictBytes(s string) (int64, error) {
      -	n, err := ParseUnit(s, bytesUnitMap)
      -	if err != nil {
      -		n, err = ParseUnit(s, metricBytesUnitMap)
      -	}
      -	return int64(n), err
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/bytes_test.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/bytes_test.go
      deleted file mode 100644
      index d4317aa5..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/bytes_test.go
      +++ /dev/null
      @@ -1,49 +0,0 @@
      -package units
      -
      -import (
      -	"testing"
      -
      -	"github.com/stretchr/testify/assert"
      -)
      -
      -func TestBase2BytesString(t *testing.T) {
      -	assert.Equal(t, Base2Bytes(0).String(), "0B")
      -	assert.Equal(t, Base2Bytes(1025).String(), "1KiB1B")
      -	assert.Equal(t, Base2Bytes(1048577).String(), "1MiB1B")
      -}
      -
      -func TestParseBase2Bytes(t *testing.T) {
      -	n, err := ParseBase2Bytes("0B")
      -	assert.NoError(t, err)
      -	assert.Equal(t, 0, n)
      -	n, err = ParseBase2Bytes("1KB")
      -	assert.NoError(t, err)
      -	assert.Equal(t, 1024, n)
      -	n, err = ParseBase2Bytes("1MB1KB25B")
      -	assert.NoError(t, err)
      -	assert.Equal(t, 1049625, n)
      -	n, err = ParseBase2Bytes("1.5MB")
      -	assert.NoError(t, err)
      -	assert.Equal(t, 1572864, n)
      -}
      -
      -func TestMetricBytesString(t *testing.T) {
      -	assert.Equal(t, MetricBytes(0).String(), "0B")
      -	assert.Equal(t, MetricBytes(1001).String(), "1KB1B")
      -	assert.Equal(t, MetricBytes(1001025).String(), "1MB1KB25B")
      -}
      -
      -func TestParseMetricBytes(t *testing.T) {
      -	n, err := ParseMetricBytes("0B")
      -	assert.NoError(t, err)
      -	assert.Equal(t, 0, n)
      -	n, err = ParseMetricBytes("1KB1B")
      -	assert.NoError(t, err)
      -	assert.Equal(t, 1001, n)
      -	n, err = ParseMetricBytes("1MB1KB25B")
      -	assert.NoError(t, err)
      -	assert.Equal(t, 1001025, n)
      -	n, err = ParseMetricBytes("1.5MB")
      -	assert.NoError(t, err)
      -	assert.Equal(t, 1500000, n)
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/doc.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/doc.go
      deleted file mode 100644
      index 156ae386..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/doc.go
      +++ /dev/null
      @@ -1,13 +0,0 @@
      -// Package units provides helpful unit multipliers and functions for Go.
      -//
      -// The goal of this package is to have functionality similar to the time [1] package.
      -//
      -//
      -// [1] http://golang.org/pkg/time/
      -//
      -// It allows for code like this:
      -//
      -//     n, err := ParseBase2Bytes("1KB")
      -//     // n == 1024
      -//     n = units.Mebibyte * 512
      -package units
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/si.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/si.go
      deleted file mode 100644
      index 8234a9d5..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/si.go
      +++ /dev/null
      @@ -1,26 +0,0 @@
      -package units
      -
      -// SI units.
      -type SI int64
      -
      -// SI unit multiples.
      -const (
      -	Kilo SI = 1000
      -	Mega    = Kilo * 1000
      -	Giga    = Mega * 1000
      -	Tera    = Giga * 1000
      -	Peta    = Tera * 1000
      -	Exa     = Peta * 1000
      -)
      -
      -func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 {
      -	return map[string]float64{
      -		shortSuffix:  1,
      -		"K" + suffix: float64(scale),
      -		"M" + suffix: float64(scale * scale),
      -		"G" + suffix: float64(scale * scale * scale),
      -		"T" + suffix: float64(scale * scale * scale * scale),
      -		"P" + suffix: float64(scale * scale * scale * scale * scale),
      -		"E" + suffix: float64(scale * scale * scale * scale * scale * scale),
      -	}
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/util.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/util.go
      deleted file mode 100644
      index 6527e92d..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/alecthomas/units/util.go
      +++ /dev/null
      @@ -1,138 +0,0 @@
      -package units
      -
      -import (
      -	"errors"
      -	"fmt"
      -	"strings"
      -)
      -
      -var (
      -	siUnits = []string{"", "K", "M", "G", "T", "P", "E"}
      -)
      -
      -func ToString(n int64, scale int64, suffix, baseSuffix string) string {
      -	mn := len(siUnits)
      -	out := make([]string, mn)
      -	for i, m := range siUnits {
      -		if n%scale != 0 || i == 0 && n == 0 {
      -			s := suffix
      -			if i == 0 {
      -				s = baseSuffix
      -			}
      -			out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s)
      -		}
      -		n /= scale
      -		if n == 0 {
      -			break
      -		}
      -	}
      -	return strings.Join(out, "")
      -}
      -
      -// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123
      -var errLeadingInt = errors.New("units: bad [0-9]*") // never printed
      -
      -// leadingInt consumes the leading [0-9]* from s.
      -func leadingInt(s string) (x int64, rem string, err error) {
      -	i := 0
      -	for ; i < len(s); i++ {
      -		c := s[i]
      -		if c < '0' || c > '9' {
      -			break
      -		}
      -		if x >= (1<<63-10)/10 {
      -			// overflow
      -			return 0, "", errLeadingInt
      -		}
      -		x = x*10 + int64(c) - '0'
      -	}
      -	return x, s[i:], nil
      -}
      -
      -func ParseUnit(s string, unitMap map[string]float64) (int64, error) {
      -	// [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
      -	orig := s
      -	f := float64(0)
      -	neg := false
      -
      -	// Consume [-+]?
      -	if s != "" {
      -		c := s[0]
      -		if c == '-' || c == '+' {
      -			neg = c == '-'
      -			s = s[1:]
      -		}
      -	}
      -	// Special case: if all that is left is "0", this is zero.
      -	if s == "0" {
      -		return 0, nil
      -	}
      -	if s == "" {
      -		return 0, errors.New("units: invalid " + orig)
      -	}
      -	for s != "" {
      -		g := float64(0) // this element of the sequence
      -
      -		var x int64
      -		var err error
      -
      -		// The next character must be [0-9.]
      -		if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) {
      -			return 0, errors.New("units: invalid " + orig)
      -		}
      -		// Consume [0-9]*
      -		pl := len(s)
      -		x, s, err = leadingInt(s)
      -		if err != nil {
      -			return 0, errors.New("units: invalid " + orig)
      -		}
      -		g = float64(x)
      -		pre := pl != len(s) // whether we consumed anything before a period
      -
      -		// Consume (\.[0-9]*)?
      -		post := false
      -		if s != "" && s[0] == '.' {
      -			s = s[1:]
      -			pl := len(s)
      -			x, s, err = leadingInt(s)
      -			if err != nil {
      -				return 0, errors.New("units: invalid " + orig)
      -			}
      -			scale := 1.0
      -			for n := pl - len(s); n > 0; n-- {
      -				scale *= 10
      -			}
      -			g += float64(x) / scale
      -			post = pl != len(s)
      -		}
      -		if !pre && !post {
      -			// no digits (e.g. ".s" or "-.s")
      -			return 0, errors.New("units: invalid " + orig)
      -		}
      -
      -		// Consume unit.
      -		i := 0
      -		for ; i < len(s); i++ {
      -			c := s[i]
      -			if c == '.' || ('0' <= c && c <= '9') {
      -				break
      -			}
      -		}
      -		u := s[:i]
      -		s = s[i:]
      -		unit, ok := unitMap[u]
      -		if !ok {
      -			return 0, errors.New("units: unknown unit " + u + " in " + orig)
      -		}
      -
      -		f += g * unit
      -	}
      -
      -	if neg {
      -		f = -f
      -	}
      -	if f < float64(-1<<63) || f > float64(1<<63-1) {
      -		return 0, errors.New("units: overflow parsing unit")
      -	}
      -	return int64(f), nil
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/semver/semver.go
      deleted file mode 100644
      index 000a0205..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/semver/semver.go
      +++ /dev/null
      @@ -1,257 +0,0 @@
      -// Copyright 2013-2015 CoreOS, Inc.
      -//
      -// Licensed under the Apache License, Version 2.0 (the "License");
      -// you may not use this file except in compliance with the License.
      -// You may obtain a copy of the License at
      -//
      -//     http://www.apache.org/licenses/LICENSE-2.0
      -//
      -// Unless required by applicable law or agreed to in writing, software
      -// distributed under the License is distributed on an "AS IS" BASIS,
      -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      -// See the License for the specific language governing permissions and
      -// limitations under the License.
      -
      -// Semantic Versions http://semver.org
      -package semver
      -
      -import (
      -	"bytes"
      -	"errors"
      -	"fmt"
      -	"strconv"
      -	"strings"
      -)
      -
      -type Version struct {
      -	Major      int64
      -	Minor      int64
      -	Patch      int64
      -	PreRelease PreRelease
      -	Metadata   string
      -}
      -
      -type PreRelease string
      -
      -func splitOff(input *string, delim string) (val string) {
      -	parts := strings.SplitN(*input, delim, 2)
      -
      -	if len(parts) == 2 {
      -		*input = parts[0]
      -		val = parts[1]
      -	}
      -
      -	return val
      -}
      -
      -func NewVersion(version string) (*Version, error) {
      -	v := Version{}
      -
      -	v.Metadata = splitOff(&version, "+")
      -	v.PreRelease = PreRelease(splitOff(&version, "-"))
      -
      -	dotParts := strings.SplitN(version, ".", 3)
      -
      -	if len(dotParts) != 3 {
      -		return nil, errors.New(fmt.Sprintf("%s is not in dotted-tri format", version))
      -	}
      -
      -	parsed := make([]int64, 3, 3)
      -
      -	for i, v := range dotParts[:3] {
      -		val, err := strconv.ParseInt(v, 10, 64)
      -		parsed[i] = val
      -		if err != nil {
      -			return nil, err
      -		}
      -	}
      -
      -	v.Major = parsed[0]
      -	v.Minor = parsed[1]
      -	v.Patch = parsed[2]
      -
      -	return &v, nil
      -}
      -
      -func Must(v *Version, err error) *Version {
      -	if err != nil {
      -		panic(err)
      -	}
      -	return v
      -}
      -
      -func (v Version) String() string {
      -	var buffer bytes.Buffer
      -
      -	fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
      -
      -	if v.PreRelease != "" {
      -		fmt.Fprintf(&buffer, "-%s", v.PreRelease)
      -	}
      -
      -	if v.Metadata != "" {
      -		fmt.Fprintf(&buffer, "+%s", v.Metadata)
      -	}
      -
      -	return buffer.String()
      -}
      -
      -func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
      -	var data string
      -	if err := unmarshal(&data); err != nil {
      -		return err
      -	}
      -	vv, err := NewVersion(data)
      -	if err != nil {
      -		return err
      -	}
      -	*v = *vv
      -	return nil
      -}
      -
      -func (v Version) MarshalJSON() ([]byte, error) {
      -	return []byte(`"` + v.String() + `"`), nil
      -}
      -
      -func (v *Version) UnmarshalJSON(data []byte) error {
      -	l := len(data)
      -	if l == 0 || string(data) == `""` {
      -		return nil
      -	}
      -	if l < 2 || data[0] != '"' || data[l-1] != '"' {
      -		return errors.New("invalid semver string")
      -	}
      -	vv, err := NewVersion(string(data[1 : l-1]))
      -	if err != nil {
      -		return err
      -	}
      -	*v = *vv
      -	return nil
      -}
      -
      -func (v Version) LessThan(versionB Version) bool {
      -	versionA := v
      -	cmp := recursiveCompare(versionA.Slice(), versionB.Slice())
      -
      -	if cmp == 0 {
      -		cmp = preReleaseCompare(versionA, versionB)
      -	}
      -
      -	if cmp == -1 {
      -		return true
      -	}
      -
      -	return false
      -}
      -
      -/* Slice converts the comparable parts of the semver into a slice of strings */
      -func (v Version) Slice() []int64 {
      -	return []int64{v.Major, v.Minor, v.Patch}
      -}
      -
      -func (p PreRelease) Slice() []string {
      -	preRelease := string(p)
      -	return strings.Split(preRelease, ".")
      -}
      -
      -func preReleaseCompare(versionA Version, versionB Version) int {
      -	a := versionA.PreRelease
      -	b := versionB.PreRelease
      -
      -	/* Handle the case where if two versions are otherwise equal it is the
      -	 * one without a PreRelease that is greater */
      -	if len(a) == 0 && (len(b) > 0) {
      -		return 1
      -	} else if len(b) == 0 && (len(a) > 0) {
      -		return -1
      -	}
      -
      -	// If there is a prelease, check and compare each part.
      -	return recursivePreReleaseCompare(a.Slice(), b.Slice())
      -}
      -
      -func recursiveCompare(versionA []int64, versionB []int64) int {
      -	if len(versionA) == 0 {
      -		return 0
      -	}
      -
      -	a := versionA[0]
      -	b := versionB[0]
      -
      -	if a > b {
      -		return 1
      -	} else if a < b {
      -		return -1
      -	}
      -
      -	return recursiveCompare(versionA[1:], versionB[1:])
      -}
      -
      -func recursivePreReleaseCompare(versionA []string, versionB []string) int {
      -	// Handle slice length disparity.
      -	if len(versionA) == 0 {
      -		// Nothing to compare too, so we return 0
      -		return 0
      -	} else if len(versionB) == 0 {
      -		// We're longer than versionB so return 1.
      -		return 1
      -	}
      -
      -	a := versionA[0]
      -	b := versionB[0]
      -
      -	aInt := false
      -	bInt := false
      -
      -	aI, err := strconv.Atoi(versionA[0])
      -	if err == nil {
      -		aInt = true
      -	}
      -
      -	bI, err := strconv.Atoi(versionB[0])
      -	if err == nil {
      -		bInt = true
      -	}
      -
      -	// Handle Integer Comparison
      -	if aInt && bInt {
      -		if aI > bI {
      -			return 1
      -		} else if aI < bI {
      -			return -1
      -		}
      -	}
      -
      -	// Handle String Comparison
      -	if a > b {
      -		return 1
      -	} else if a < b {
      -		return -1
      -	}
      -
      -	return recursivePreReleaseCompare(versionA[1:], versionB[1:])
      -}
      -
      -// BumpMajor increments the Major field by 1 and resets all other fields to their default values
      -func (v *Version) BumpMajor() {
      -	v.Major += 1
      -	v.Minor = 0
      -	v.Patch = 0
      -	v.PreRelease = PreRelease("")
      -	v.Metadata = ""
      -}
      -
      -// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
      -func (v *Version) BumpMinor() {
      -	v.Minor += 1
      -	v.Patch = 0
      -	v.PreRelease = PreRelease("")
      -	v.Metadata = ""
      -}
      -
      -// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
      -func (v *Version) BumpPatch() {
      -	v.Patch += 1
      -	v.PreRelease = PreRelease("")
      -	v.Metadata = ""
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/semver/semver_test.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/semver/semver_test.go
      deleted file mode 100644
      index 8a6e200a..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/semver/semver_test.go
      +++ /dev/null
      @@ -1,330 +0,0 @@
      -// Copyright 2013-2015 CoreOS, Inc.
      -//
      -// Licensed under the Apache License, Version 2.0 (the "License");
      -// you may not use this file except in compliance with the License.
      -// You may obtain a copy of the License at
      -//
      -//     http://www.apache.org/licenses/LICENSE-2.0
      -//
      -// Unless required by applicable law or agreed to in writing, software
      -// distributed under the License is distributed on an "AS IS" BASIS,
      -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      -// See the License for the specific language governing permissions and
      -// limitations under the License.
      -
      -package semver
      -
      -import (
      -	"bytes"
      -	"encoding/json"
      -	"errors"
      -	"math/rand"
      -	"reflect"
      -	"testing"
      -	"time"
      -
      -	"gopkg.in/yaml.v2"
      -)
      -
      -type fixture struct {
      -	GreaterVersion string
      -	LesserVersion  string
      -}
      -
      -var fixtures = []fixture{
      -	fixture{"0.0.0", "0.0.0-foo"},
      -	fixture{"0.0.1", "0.0.0"},
      -	fixture{"1.0.0", "0.9.9"},
      -	fixture{"0.10.0", "0.9.0"},
      -	fixture{"0.99.0", "0.10.0"},
      -	fixture{"2.0.0", "1.2.3"},
      -	fixture{"0.0.0", "0.0.0-foo"},
      -	fixture{"0.0.1", "0.0.0"},
      -	fixture{"1.0.0", "0.9.9"},
      -	fixture{"0.10.0", "0.9.0"},
      -	fixture{"0.99.0", "0.10.0"},
      -	fixture{"2.0.0", "1.2.3"},
      -	fixture{"0.0.0", "0.0.0-foo"},
      -	fixture{"0.0.1", "0.0.0"},
      -	fixture{"1.0.0", "0.9.9"},
      -	fixture{"0.10.0", "0.9.0"},
      -	fixture{"0.99.0", "0.10.0"},
      -	fixture{"2.0.0", "1.2.3"},
      -	fixture{"1.2.3", "1.2.3-asdf"},
      -	fixture{"1.2.3", "1.2.3-4"},
      -	fixture{"1.2.3", "1.2.3-4-foo"},
      -	fixture{"1.2.3-5-foo", "1.2.3-5"},
      -	fixture{"1.2.3-5", "1.2.3-4"},
      -	fixture{"1.2.3-5-foo", "1.2.3-5-Foo"},
      -	fixture{"3.0.0", "2.7.2+asdf"},
      -	fixture{"3.0.0+foobar", "2.7.2"},
      -	fixture{"1.2.3-a.10", "1.2.3-a.5"},
      -	fixture{"1.2.3-a.b", "1.2.3-a.5"},
      -	fixture{"1.2.3-a.b", "1.2.3-a"},
      -	fixture{"1.2.3-a.b.c.10.d.5", "1.2.3-a.b.c.5.d.100"},
      -	fixture{"1.0.0", "1.0.0-rc.1"},
      -	fixture{"1.0.0-rc.2", "1.0.0-rc.1"},
      -	fixture{"1.0.0-rc.1", "1.0.0-beta.11"},
      -	fixture{"1.0.0-beta.11", "1.0.0-beta.2"},
      -	fixture{"1.0.0-beta.2", "1.0.0-beta"},
      -	fixture{"1.0.0-beta", "1.0.0-alpha.beta"},
      -	fixture{"1.0.0-alpha.beta", "1.0.0-alpha.1"},
      -	fixture{"1.0.0-alpha.1", "1.0.0-alpha"},
      -}
      -
      -func TestCompare(t *testing.T) {
      -	for _, v := range fixtures {
      -		gt, err := NewVersion(v.GreaterVersion)
      -		if err != nil {
      -			t.Error(err)
      -		}
      -
      -		lt, err := NewVersion(v.LesserVersion)
      -		if err != nil {
      -			t.Error(err)
      -		}
      -
      -		if gt.LessThan(*lt) == true {
      -			t.Errorf("%s should not be less than %s", gt, lt)
      -		}
      -	}
      -}
      -
      -func testString(t *testing.T, orig string, version *Version) {
      -	if orig != version.String() {
      -		t.Errorf("%s != %s", orig, version)
      -	}
      -}
      -
      -func TestString(t *testing.T) {
      -	for _, v := range fixtures {
      -		gt, err := NewVersion(v.GreaterVersion)
      -		if err != nil {
      -			t.Error(err)
      -		}
      -		testString(t, v.GreaterVersion, gt)
      -
      -		lt, err := NewVersion(v.LesserVersion)
      -		if err != nil {
      -			t.Error(err)
      -		}
      -		testString(t, v.LesserVersion, lt)
      -	}
      -}
      -
      -func shuffleStringSlice(src []string) []string {
      -	dest := make([]string, len(src))
      -	rand.Seed(time.Now().Unix())
      -	perm := rand.Perm(len(src))
      -	for i, v := range perm {
      -		dest[v] = src[i]
      -	}
      -	return dest
      -}
      -
      -func TestSort(t *testing.T) {
      -	sortedVersions := []string{"1.0.0", "1.0.2", "1.2.0", "3.1.1"}
      -	unsortedVersions := shuffleStringSlice(sortedVersions)
      -
      -	semvers := []*Version{}
      -	for _, v := range unsortedVersions {
      -		sv, err := NewVersion(v)
      -		if err != nil {
      -			t.Fatal(err)
      -		}
      -		semvers = append(semvers, sv)
      -	}
      -
      -	Sort(semvers)
      -
      -	for idx, sv := range semvers {
      -		if sv.String() != sortedVersions[idx] {
      -			t.Fatalf("incorrect sort at index %v", idx)
      -		}
      -	}
      -}
      -
      -func TestBumpMajor(t *testing.T) {
      -	version, _ := NewVersion("1.0.0")
      -	version.BumpMajor()
      -	if version.Major != 2 {
      -		t.Fatalf("bumping major on 1.0.0 resulted in %v", version)
      -	}
      -
      -	version, _ = NewVersion("1.5.2")
      -	version.BumpMajor()
      -	if version.Minor != 0 && version.Patch != 0 {
      -		t.Fatalf("bumping major on 1.5.2 resulted in %v", version)
      -	}
      -
      -	version, _ = NewVersion("1.0.0+build.1-alpha.1")
      -	version.BumpMajor()
      -	if version.PreRelease != "" && version.PreRelease != "" {
      -		t.Fatalf("bumping major on 1.0.0+build.1-alpha.1 resulted in %v", version)
      -	}
      -}
      -
      -func TestBumpMinor(t *testing.T) {
      -	version, _ := NewVersion("1.0.0")
      -	version.BumpMinor()
      -
      -	if version.Major != 1 {
      -		t.Fatalf("bumping minor on 1.0.0 resulted in %v", version)
      -	}
      -
      -	if version.Minor != 1 {
      -		t.Fatalf("bumping major on 1.0.0 resulted in %v", version)
      -	}
      -
      -	version, _ = NewVersion("1.0.0+build.1-alpha.1")
      -	version.BumpMinor()
      -	if version.PreRelease != "" && version.PreRelease != "" {
      -		t.Fatalf("bumping major on 1.0.0+build.1-alpha.1 resulted in %v", version)
      -	}
      -}
      -
      -func TestBumpPatch(t *testing.T) {
      -	version, _ := NewVersion("1.0.0")
      -	version.BumpPatch()
      -
      -	if version.Major != 1 {
      -		t.Fatalf("bumping minor on 1.0.0 resulted in %v", version)
      -	}
      -
      -	if version.Minor != 0 {
      -		t.Fatalf("bumping major on 1.0.0 resulted in %v", version)
      -	}
      -
      -	if version.Patch != 1 {
      -		t.Fatalf("bumping major on 1.0.0 resulted in %v", version)
      -	}
      -
      -	version, _ = NewVersion("1.0.0+build.1-alpha.1")
      -	version.BumpPatch()
      -	if version.PreRelease != "" && version.PreRelease != "" {
      -		t.Fatalf("bumping major on 1.0.0+build.1-alpha.1 resulted in %v", version)
      -	}
      -}
      -
      -func TestMust(t *testing.T) {
      -	tests := []struct {
      -		versionStr string
      -
      -		version *Version
      -		recov   interface{}
      -	}{
      -		{
      -			versionStr: "1.0.0",
      -			version:    &Version{Major: 1},
      -		},
      -		{
      -			versionStr: "version number",
      -			recov:      errors.New("version number is not in dotted-tri format"),
      -		},
      -	}
      -
      -	for _, tt := range tests {
      -		func() {
      -			defer func() {
      -				recov := recover()
      -				if !reflect.DeepEqual(tt.recov, recov) {
      -					t.Fatalf("incorrect panic for %q: want %v, got %v", tt.versionStr, tt.recov, recov)
      -				}
      -			}()
      -
      -			version := Must(NewVersion(tt.versionStr))
      -			if !reflect.DeepEqual(tt.version, version) {
      -				t.Fatalf("incorrect version for %q: want %+v, got %+v", tt.versionStr, tt.version, version)
      -			}
      -		}()
      -	}
      -}
      -
      -type fixtureJSON struct {
      -	GreaterVersion *Version
      -	LesserVersion  *Version
      -}
      -
      -func TestJSON(t *testing.T) {
      -	fj := make([]fixtureJSON, len(fixtures))
      -	for i, v := range fixtures {
      -		var err error
      -		fj[i].GreaterVersion, err = NewVersion(v.GreaterVersion)
      -		if err != nil {
      -			t.Fatal(err)
      -		}
      -		fj[i].LesserVersion, err = NewVersion(v.LesserVersion)
      -		if err != nil {
      -			t.Fatal(err)
      -		}
      -	}
      -
      -	fromStrings, err := json.Marshal(fixtures)
      -	if err != nil {
      -		t.Fatal(err)
      -	}
      -	fromVersions, err := json.Marshal(fj)
      -	if err != nil {
      -		t.Fatal(err)
      -	}
      -	if !bytes.Equal(fromStrings, fromVersions) {
      -		t.Errorf("Expected:   %s", fromStrings)
      -		t.Errorf("Unexpected: %s", fromVersions)
      -	}
      -
      -	fromJson := make([]fixtureJSON, 0, len(fj))
      -	err = json.Unmarshal(fromStrings, &fromJson)
      -	if err != nil {
      -		t.Fatal(err)
      -	}
      -	if !reflect.DeepEqual(fromJson, fj) {
      -		t.Error("Expected:   ", fj)
      -		t.Error("Unexpected: ", fromJson)
      -	}
      -}
      -
      -func TestYAML(t *testing.T) {
      -	document, err := yaml.Marshal(fixtures)
      -	if err != nil {
      -		t.Fatal(err)
      -	}
      -
      -	expected := make([]fixtureJSON, len(fixtures))
      -	for i, v := range fixtures {
      -		var err error
      -		expected[i].GreaterVersion, err = NewVersion(v.GreaterVersion)
      -		if err != nil {
      -			t.Fatal(err)
      -		}
      -		expected[i].LesserVersion, err = NewVersion(v.LesserVersion)
      -		if err != nil {
      -			t.Fatal(err)
      -		}
      -	}
      -
      -	fromYAML := make([]fixtureJSON, 0, len(fixtures))
      -	err = yaml.Unmarshal(document, &fromYAML)
      -	if err != nil {
      -		t.Fatal(err)
      -	}
      -
      -	if !reflect.DeepEqual(fromYAML, expected) {
      -		t.Error("Expected:   ", expected)
      -		t.Error("Unexpected: ", fromYAML)
      -	}
      -}
      -
      -func TestBadInput(t *testing.T) {
      -	bad := []string{
      -		"1.2",
      -		"1.2.3x",
      -		"0x1.3.4",
      -		"-1.2.3",
      -		"1.2.3.4",
      -	}
      -	for _, b := range bad {
      -		if _, err := NewVersion(b); err == nil {
      -			t.Error("Improperly accepted value: ", b)
      -		}
      -	}
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/LICENSE b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/LICENSE
      deleted file mode 100644
      index a68e67f0..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/LICENSE
      +++ /dev/null
      @@ -1,188 +0,0 @@
      -
      -Copyright (c) 2011-2014 - Canonical Inc.
      -
      -This software is licensed under the LGPLv3, included below.
      -
      -As a special exception to the GNU Lesser General Public License version 3
      -("LGPL3"), the copyright holders of this Library give you permission to
      -convey to a third party a Combined Work that links statically or dynamically
      -to this Library without providing any Minimal Corresponding Source or
      -Minimal Application Code as set out in 4d or providing the installation
      -information set out in section 4e, provided that you comply with the other
      -provisions of LGPL3 and provided that you meet, for the Application the
      -terms and conditions of the license(s) which apply to the Application.
      -
      -Except as stated in this special exception, the provisions of LGPL3 will
      -continue to comply in full to this Library. If you modify this Library, you
      -may apply this exception to your version of this Library, but you are not
      -obliged to do so. If you do not wish to do so, delete this exception
      -statement from your version. This exception does not (and cannot) modify any
      -license terms which apply to the Application, with which you must still
      -comply.
      -
      -
      -                   GNU LESSER GENERAL PUBLIC LICENSE
      -                       Version 3, 29 June 2007
      -
      - Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
      - Everyone is permitted to copy and distribute verbatim copies
      - of this license document, but changing it is not allowed.
      -
      -
      -  This version of the GNU Lesser General Public License incorporates
      -the terms and conditions of version 3 of the GNU General Public
      -License, supplemented by the additional permissions listed below.
      -
      -  0. Additional Definitions.
      -
      -  As used herein, "this License" refers to version 3 of the GNU Lesser
      -General Public License, and the "GNU GPL" refers to version 3 of the GNU
      -General Public License.
      -
      -  "The Library" refers to a covered work governed by this License,
      -other than an Application or a Combined Work as defined below.
      -
      -  An "Application" is any work that makes use of an interface provided
      -by the Library, but which is not otherwise based on the Library.
      -Defining a subclass of a class defined by the Library is deemed a mode
      -of using an interface provided by the Library.
      -
      -  A "Combined Work" is a work produced by combining or linking an
      -Application with the Library.  The particular version of the Library
      -with which the Combined Work was made is also called the "Linked
      -Version".
      -
      -  The "Minimal Corresponding Source" for a Combined Work means the
      -Corresponding Source for the Combined Work, excluding any source code
      -for portions of the Combined Work that, considered in isolation, are
      -based on the Application, and not on the Linked Version.
      -
      -  The "Corresponding Application Code" for a Combined Work means the
      -object code and/or source code for the Application, including any data
      -and utility programs needed for reproducing the Combined Work from the
      -Application, but excluding the System Libraries of the Combined Work.
      -
      -  1. Exception to Section 3 of the GNU GPL.
      -
      -  You may convey a covered work under sections 3 and 4 of this License
      -without being bound by section 3 of the GNU GPL.
      -
      -  2. Conveying Modified Versions.
      -
      -  If you modify a copy of the Library, and, in your modifications, a
      -facility refers to a function or data to be supplied by an Application
      -that uses the facility (other than as an argument passed when the
      -facility is invoked), then you may convey a copy of the modified
      -version:
      -
      -   a) under this License, provided that you make a good faith effort to
      -   ensure that, in the event an Application does not supply the
      -   function or data, the facility still operates, and performs
      -   whatever part of its purpose remains meaningful, or
      -
      -   b) under the GNU GPL, with none of the additional permissions of
      -   this License applicable to that copy.
      -
      -  3. Object Code Incorporating Material from Library Header Files.
      -
      -  The object code form of an Application may incorporate material from
      -a header file that is part of the Library.  You may convey such object
      -code under terms of your choice, provided that, if the incorporated
      -material is not limited to numerical parameters, data structure
      -layouts and accessors, or small macros, inline functions and templates
      -(ten or fewer lines in length), you do both of the following:
      -
      -   a) Give prominent notice with each copy of the object code that the
      -   Library is used in it and that the Library and its use are
      -   covered by this License.
      -
      -   b) Accompany the object code with a copy of the GNU GPL and this license
      -   document.
      -
      -  4. Combined Works.
      -
      -  You may convey a Combined Work under terms of your choice that,
      -taken together, effectively do not restrict modification of the
      -portions of the Library contained in the Combined Work and reverse
      -engineering for debugging such modifications, if you also do each of
      -the following:
      -
      -   a) Give prominent notice with each copy of the Combined Work that
      -   the Library is used in it and that the Library and its use are
      -   covered by this License.
      -
      -   b) Accompany the Combined Work with a copy of the GNU GPL and this license
      -   document.
      -
      -   c) For a Combined Work that displays copyright notices during
      -   execution, include the copyright notice for the Library among
      -   these notices, as well as a reference directing the user to the
      -   copies of the GNU GPL and this license document.
      -
      -   d) Do one of the following:
      -
      -       0) Convey the Minimal Corresponding Source under the terms of this
      -       License, and the Corresponding Application Code in a form
      -       suitable for, and under terms that permit, the user to
      -       recombine or relink the Application with a modified version of
      -       the Linked Version to produce a modified Combined Work, in the
      -       manner specified by section 6 of the GNU GPL for conveying
      -       Corresponding Source.
      -
      -       1) Use a suitable shared library mechanism for linking with the
      -       Library.  A suitable mechanism is one that (a) uses at run time
      -       a copy of the Library already present on the user's computer
      -       system, and (b) will operate properly with a modified version
      -       of the Library that is interface-compatible with the Linked
      -       Version.
      -
      -   e) Provide Installation Information, but only if you would otherwise
      -   be required to provide such information under section 6 of the
      -   GNU GPL, and only to the extent that such information is
      -   necessary to install and execute a modified version of the
      -   Combined Work produced by recombining or relinking the
      -   Application with a modified version of the Linked Version. (If
      -   you use option 4d0, the Installation Information must accompany
      -   the Minimal Corresponding Source and Corresponding Application
      -   Code. If you use option 4d1, you must provide the Installation
      -   Information in the manner specified by section 6 of the GNU GPL
      -   for conveying Corresponding Source.)
      -
      -  5. Combined Libraries.
      -
      -  You may place library facilities that are a work based on the
      -Library side by side in a single library together with other library
      -facilities that are not Applications and are not covered by this
      -License, and convey such a combined library under terms of your
      -choice, if you do both of the following:
      -
      -   a) Accompany the combined library with a copy of the same work based
      -   on the Library, uncombined with any other library facilities,
      -   conveyed under the terms of this License.
      -
      -   b) Give prominent notice with the combined library that part of it
      -   is a work based on the Library, and explaining where to find the
      -   accompanying uncombined form of the same work.
      -
      -  6. Revised Versions of the GNU Lesser General Public License.
      -
      -  The Free Software Foundation may publish revised and/or new versions
      -of the GNU Lesser General Public License from time to time. Such new
      -versions will be similar in spirit to the present version, but may
      -differ in detail to address new problems or concerns.
      -
      -  Each version is given a distinguishing version number. If the
      -Library as you received it specifies that a certain numbered version
      -of the GNU Lesser General Public License "or any later version"
      -applies to it, you have the option of following the terms and
      -conditions either of that published version or of any later version
      -published by the Free Software Foundation. If the Library as you
      -received it does not specify a version number of the GNU Lesser
      -General Public License, you may choose any version of the GNU Lesser
      -General Public License ever published by the Free Software Foundation.
      -
      -  If the Library as you received it specifies that a proxy can decide
      -whether future versions of the GNU Lesser General Public License shall
      -apply, that proxy's public statement of acceptance of any version is
      -permanent authorization for you to choose that version for the
      -Library.
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/LICENSE.libyaml b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/LICENSE.libyaml
      deleted file mode 100644
      index 8da58fbf..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/LICENSE.libyaml
      +++ /dev/null
      @@ -1,31 +0,0 @@
      -The following files were ported to Go from C files of libyaml, and thus
      -are still covered by their original copyright and license:
      -
      -    apic.go
      -    emitterc.go
      -    parserc.go
      -    readerc.go
      -    scannerc.go
      -    writerc.go
      -    yamlh.go
      -    yamlprivateh.go
      -
      -Copyright (c) 2006 Kirill Simonov
      -
      -Permission is hereby granted, free of charge, to any person obtaining a copy of
      -this software and associated documentation files (the "Software"), to deal in
      -the Software without restriction, including without limitation the rights to
      -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
      -of the Software, and to permit persons to whom the Software is furnished to do
      -so, subject to the following conditions:
      -
      -The above copyright notice and this permission notice shall be included in all
      -copies or substantial portions of the Software.
      -
      -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
      -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
      -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
      -SOFTWARE.
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/README.md b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/README.md
      deleted file mode 100644
      index d6c919e6..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/README.md
      +++ /dev/null
      @@ -1,128 +0,0 @@
      -# YAML support for the Go language
      -
      -Introduction
      -------------
      -
      -The yaml package enables Go programs to comfortably encode and decode YAML
      -values. It was developed within [Canonical](https://www.canonical.com) as
      -part of the [juju](https://juju.ubuntu.com) project, and is based on a
      -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
      -C library to parse and generate YAML data quickly and reliably.
      -
      -Compatibility
      --------------
      -
      -The yaml package supports most of YAML 1.1 and 1.2, including support for
      -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
      -implemented, and base-60 floats from YAML 1.1 are purposefully not
      -supported since they're a poor design and are gone in YAML 1.2.
      -
      -Installation and usage
      -----------------------
      -
      -The import path for the package is *gopkg.in/yaml.v2*.
      -
      -To install it, run:
      -
      -    go get gopkg.in/yaml.v2
      -
      -API documentation
      ------------------
      -
      -If opened in a browser, the import path itself leads to the API documentation:
      -
      -  * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
      -
      -API stability
      --------------
      -
      -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
      -
      -
      -License
      --------
      -
      -The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
      -
      -
      -Example
      --------
      -
      -```Go
      -package main
      -
      -import (
      -        "fmt"
      -        "log"
      -
      -        "gopkg.in/yaml.v2"
      -)
      -
      -var data = `
      -a: Easy!
      -b:
      -  c: 2
      -  d: [3, 4]
      -`
      -
      -type T struct {
      -        A string
      -        B struct{C int; D []int ",flow"}
      -}
      -
      -func main() {
      -        t := T{}
      -    
      -        err := yaml.Unmarshal([]byte(data), &t)
      -        if err != nil {
      -                log.Fatalf("error: %v", err)
      -        }
      -        fmt.Printf("--- t:\n%v\n\n", t)
      -    
      -        d, err := yaml.Marshal(&t)
      -        if err != nil {
      -                log.Fatalf("error: %v", err)
      -        }
      -        fmt.Printf("--- t dump:\n%s\n\n", string(d))
      -    
      -        m := make(map[interface{}]interface{})
      -    
      -        err = yaml.Unmarshal([]byte(data), &m)
      -        if err != nil {
      -                log.Fatalf("error: %v", err)
      -        }
      -        fmt.Printf("--- m:\n%v\n\n", m)
      -    
      -        d, err = yaml.Marshal(&m)
      -        if err != nil {
      -                log.Fatalf("error: %v", err)
      -        }
      -        fmt.Printf("--- m dump:\n%s\n\n", string(d))
      -}
      -```
      -
      -This example will generate the following output:
      -
      -```
      ---- t:
      -{Easy! {2 [3 4]}}
      -
      ---- t dump:
      -a: Easy!
      -b:
      -  c: 2
      -  d: [3, 4]
      -
      -
      ---- m:
      -map[a:Easy! b:map[c:2 d:[3 4]]]
      -
      ---- m dump:
      -a: Easy!
      -b:
      -  c: 2
      -  d:
      -  - 3
      -  - 4
      -```
      -
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/apic.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/apic.go
      deleted file mode 100644
      index 95ec014e..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/apic.go
      +++ /dev/null
      @@ -1,742 +0,0 @@
      -package yaml
      -
      -import (
      -	"io"
      -	"os"
      -)
      -
      -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
      -	//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
      -
      -	// Check if we can move the queue at the beginning of the buffer.
      -	if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
      -		if parser.tokens_head != len(parser.tokens) {
      -			copy(parser.tokens, parser.tokens[parser.tokens_head:])
      -		}
      -		parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
      -		parser.tokens_head = 0
      -	}
      -	parser.tokens = append(parser.tokens, *token)
      -	if pos < 0 {
      -		return
      -	}
      -	copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
      -	parser.tokens[parser.tokens_head+pos] = *token
      -}
      -
      -// Create a new parser object.
      -func yaml_parser_initialize(parser *yaml_parser_t) bool {
      -	*parser = yaml_parser_t{
      -		raw_buffer: make([]byte, 0, input_raw_buffer_size),
      -		buffer:     make([]byte, 0, input_buffer_size),
      -	}
      -	return true
      -}
      -
      -// Destroy a parser object.
      -func yaml_parser_delete(parser *yaml_parser_t) {
      -	*parser = yaml_parser_t{}
      -}
      -
      -// String read handler.
      -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
      -	if parser.input_pos == len(parser.input) {
      -		return 0, io.EOF
      -	}
      -	n = copy(buffer, parser.input[parser.input_pos:])
      -	parser.input_pos += n
      -	return n, nil
      -}
      -
      -// File read handler.
      -func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
      -	return parser.input_file.Read(buffer)
      -}
      -
      -// Set a string input.
      -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
      -	if parser.read_handler != nil {
      -		panic("must set the input source only once")
      -	}
      -	parser.read_handler = yaml_string_read_handler
      -	parser.input = input
      -	parser.input_pos = 0
      -}
      -
      -// Set a file input.
      -func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
      -	if parser.read_handler != nil {
      -		panic("must set the input source only once")
      -	}
      -	parser.read_handler = yaml_file_read_handler
      -	parser.input_file = file
      -}
      -
      -// Set the source encoding.
      -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
      -	if parser.encoding != yaml_ANY_ENCODING {
      -		panic("must set the encoding only once")
      -	}
      -	parser.encoding = encoding
      -}
      -
      -// Create a new emitter object.
      -func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
      -	*emitter = yaml_emitter_t{
      -		buffer:     make([]byte, output_buffer_size),
      -		raw_buffer: make([]byte, 0, output_raw_buffer_size),
      -		states:     make([]yaml_emitter_state_t, 0, initial_stack_size),
      -		events:     make([]yaml_event_t, 0, initial_queue_size),
      -	}
      -	return true
      -}
      -
      -// Destroy an emitter object.
      -func yaml_emitter_delete(emitter *yaml_emitter_t) {
      -	*emitter = yaml_emitter_t{}
      -}
      -
      -// String write handler.
      -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
      -	*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
      -	return nil
      -}
      -
      -// File write handler.
      -func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
      -	_, err := emitter.output_file.Write(buffer)
      -	return err
      -}
      -
      -// Set a string output.
      -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
      -	if emitter.write_handler != nil {
      -		panic("must set the output target only once")
      -	}
      -	emitter.write_handler = yaml_string_write_handler
      -	emitter.output_buffer = output_buffer
      -}
      -
      -// Set a file output.
      -func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
      -	if emitter.write_handler != nil {
      -		panic("must set the output target only once")
      -	}
      -	emitter.write_handler = yaml_file_write_handler
      -	emitter.output_file = file
      -}
      -
      -// Set the output encoding.
      -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
      -	if emitter.encoding != yaml_ANY_ENCODING {
      -		panic("must set the output encoding only once")
      -	}
      -	emitter.encoding = encoding
      -}
      -
      -// Set the canonical output style.
      -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
      -	emitter.canonical = canonical
      -}
      -
      -//// Set the indentation increment.
      -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
      -	if indent < 2 || indent > 9 {
      -		indent = 2
      -	}
      -	emitter.best_indent = indent
      -}
      -
      -// Set the preferred line width.
      -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
      -	if width < 0 {
      -		width = -1
      -	}
      -	emitter.best_width = width
      -}
      -
      -// Set if unescaped non-ASCII characters are allowed.
      -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
      -	emitter.unicode = unicode
      -}
      -
      -// Set the preferred line break character.
      -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
      -	emitter.line_break = line_break
      -}
      -
      -///*
      -// * Destroy a token object.
      -// */
      -//
      -//YAML_DECLARE(void)
      -//yaml_token_delete(yaml_token_t *token)
      -//{
      -//    assert(token);  // Non-NULL token object expected.
      -//
      -//    switch (token.type)
      -//    {
      -//        case YAML_TAG_DIRECTIVE_TOKEN:
      -//            yaml_free(token.data.tag_directive.handle);
      -//            yaml_free(token.data.tag_directive.prefix);
      -//            break;
      -//
      -//        case YAML_ALIAS_TOKEN:
      -//            yaml_free(token.data.alias.value);
      -//            break;
      -//
      -//        case YAML_ANCHOR_TOKEN:
      -//            yaml_free(token.data.anchor.value);
      -//            break;
      -//
      -//        case YAML_TAG_TOKEN:
      -//            yaml_free(token.data.tag.handle);
      -//            yaml_free(token.data.tag.suffix);
      -//            break;
      -//
      -//        case YAML_SCALAR_TOKEN:
      -//            yaml_free(token.data.scalar.value);
      -//            break;
      -//
      -//        default:
      -//            break;
      -//    }
      -//
      -//    memset(token, 0, sizeof(yaml_token_t));
      -//}
      -//
      -///*
      -// * Check if a string is a valid UTF-8 sequence.
      -// *
      -// * Check 'reader.c' for more details on UTF-8 encoding.
      -// */
      -//
      -//static int
      -//yaml_check_utf8(yaml_char_t *start, size_t length)
      -//{
      -//    yaml_char_t *end = start+length;
      -//    yaml_char_t *pointer = start;
      -//
      -//    while (pointer < end) {
      -//        unsigned char octet;
      -//        unsigned int width;
      -//        unsigned int value;
      -//        size_t k;
      -//
      -//        octet = pointer[0];
      -//        width = (octet & 0x80) == 0x00 ? 1 :
      -//                (octet & 0xE0) == 0xC0 ? 2 :
      -//                (octet & 0xF0) == 0xE0 ? 3 :
      -//                (octet & 0xF8) == 0xF0 ? 4 : 0;
      -//        value = (octet & 0x80) == 0x00 ? octet & 0x7F :
      -//                (octet & 0xE0) == 0xC0 ? octet & 0x1F :
      -//                (octet & 0xF0) == 0xE0 ? octet & 0x0F :
      -//                (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
      -//        if (!width) return 0;
      -//        if (pointer+width > end) return 0;
      -//        for (k = 1; k < width; k ++) {
      -//            octet = pointer[k];
      -//            if ((octet & 0xC0) != 0x80) return 0;
      -//            value = (value << 6) + (octet & 0x3F);
      -//        }
      -//        if (!((width == 1) ||
      -//            (width == 2 && value >= 0x80) ||
      -//            (width == 3 && value >= 0x800) ||
      -//            (width == 4 && value >= 0x10000))) return 0;
      -//
      -//        pointer += width;
      -//    }
      -//
      -//    return 1;
      -//}
      -//
      -
      -// Create STREAM-START.
      -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
      -	*event = yaml_event_t{
      -		typ:      yaml_STREAM_START_EVENT,
      -		encoding: encoding,
      -	}
      -	return true
      -}
      -
      -// Create STREAM-END.
      -func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
      -	*event = yaml_event_t{
      -		typ: yaml_STREAM_END_EVENT,
      -	}
      -	return true
      -}
      -
      -// Create DOCUMENT-START.
      -func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
      -	tag_directives []yaml_tag_directive_t, implicit bool) bool {
      -	*event = yaml_event_t{
      -		typ:               yaml_DOCUMENT_START_EVENT,
      -		version_directive: version_directive,
      -		tag_directives:    tag_directives,
      -		implicit:          implicit,
      -	}
      -	return true
      -}
      -
      -// Create DOCUMENT-END.
      -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
      -	*event = yaml_event_t{
      -		typ:      yaml_DOCUMENT_END_EVENT,
      -		implicit: implicit,
      -	}
      -	return true
      -}
      -
      -///*
      -// * Create ALIAS.
      -// */
      -//
      -//YAML_DECLARE(int)
      -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
      -//{
      -//    mark yaml_mark_t = { 0, 0, 0 }
      -//    anchor_copy *yaml_char_t = NULL
      -//
      -//    assert(event) // Non-NULL event object is expected.
      -//    assert(anchor) // Non-NULL anchor is expected.
      -//
      -//    if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
      -//
      -//    anchor_copy = yaml_strdup(anchor)
      -//    if (!anchor_copy)
      -//        return 0
      -//
      -//    ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
      -//
      -//    return 1
      -//}
      -
      -// Create SCALAR.
      -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
      -	*event = yaml_event_t{
      -		typ:             yaml_SCALAR_EVENT,
      -		anchor:          anchor,
      -		tag:             tag,
      -		value:           value,
      -		implicit:        plain_implicit,
      -		quoted_implicit: quoted_implicit,
      -		style:           yaml_style_t(style),
      -	}
      -	return true
      -}
      -
      -// Create SEQUENCE-START.
      -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
      -	*event = yaml_event_t{
      -		typ:      yaml_SEQUENCE_START_EVENT,
      -		anchor:   anchor,
      -		tag:      tag,
      -		implicit: implicit,
      -		style:    yaml_style_t(style),
      -	}
      -	return true
      -}
      -
      -// Create SEQUENCE-END.
      -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
      -	*event = yaml_event_t{
      -		typ: yaml_SEQUENCE_END_EVENT,
      -	}
      -	return true
      -}
      -
      -// Create MAPPING-START.
      -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
      -	*event = yaml_event_t{
      -		typ:      yaml_MAPPING_START_EVENT,
      -		anchor:   anchor,
      -		tag:      tag,
      -		implicit: implicit,
      -		style:    yaml_style_t(style),
      -	}
      -	return true
      -}
      -
      -// Create MAPPING-END.
      -func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
      -	*event = yaml_event_t{
      -		typ: yaml_MAPPING_END_EVENT,
      -	}
      -	return true
      -}
      -
      -// Destroy an event object.
      -func yaml_event_delete(event *yaml_event_t) {
      -	*event = yaml_event_t{}
      -}
      -
      -///*
      -// * Create a document object.
      -// */
      -//
      -//YAML_DECLARE(int)
      -//yaml_document_initialize(document *yaml_document_t,
      -//        version_directive *yaml_version_directive_t,
      -//        tag_directives_start *yaml_tag_directive_t,
      -//        tag_directives_end *yaml_tag_directive_t,
      -//        start_implicit int, end_implicit int)
      -//{
      -//    struct {
      -//        error yaml_error_type_t
      -//    } context
      -//    struct {
      -//        start *yaml_node_t
      -//        end *yaml_node_t
      -//        top *yaml_node_t
      -//    } nodes = { NULL, NULL, NULL }
      -//    version_directive_copy *yaml_version_directive_t = NULL
      -//    struct {
      -//        start *yaml_tag_directive_t
      -//        end *yaml_tag_directive_t
      -//        top *yaml_tag_directive_t
      -//    } tag_directives_copy = { NULL, NULL, NULL }
      -//    value yaml_tag_directive_t = { NULL, NULL }
      -//    mark yaml_mark_t = { 0, 0, 0 }
      -//
      -//    assert(document) // Non-NULL document object is expected.
      -//    assert((tag_directives_start && tag_directives_end) ||
      -//            (tag_directives_start == tag_directives_end))
      -//                            // Valid tag directives are expected.
      -//
      -//    if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
      -//
      -//    if (version_directive) {
      -//        version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
      -//        if (!version_directive_copy) goto error
      -//        version_directive_copy.major = version_directive.major
      -//        version_directive_copy.minor = version_directive.minor
      -//    }
      -//
      -//    if (tag_directives_start != tag_directives_end) {
      -//        tag_directive *yaml_tag_directive_t
      -//        if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
      -//            goto error
      -//        for (tag_directive = tag_directives_start
      -//                tag_directive != tag_directives_end; tag_directive ++) {
      -//            assert(tag_directive.handle)
      -//            assert(tag_directive.prefix)
      -//            if (!yaml_check_utf8(tag_directive.handle,
      -//                        strlen((char *)tag_directive.handle)))
      -//                goto error
      -//            if (!yaml_check_utf8(tag_directive.prefix,
      -//                        strlen((char *)tag_directive.prefix)))
      -//                goto error
      -//            value.handle = yaml_strdup(tag_directive.handle)
      -//            value.prefix = yaml_strdup(tag_directive.prefix)
      -//            if (!value.handle || !value.prefix) goto error
      -//            if (!PUSH(&context, tag_directives_copy, value))
      -//                goto error
      -//            value.handle = NULL
      -//            value.prefix = NULL
      -//        }
      -//    }
      -//
      -//    DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
      -//            tag_directives_copy.start, tag_directives_copy.top,
      -//            start_implicit, end_implicit, mark, mark)
      -//
      -//    return 1
      -//
      -//error:
      -//    STACK_DEL(&context, nodes)
      -//    yaml_free(version_directive_copy)
      -//    while (!STACK_EMPTY(&context, tag_directives_copy)) {
      -//        value yaml_tag_directive_t = POP(&context, tag_directives_copy)
      -//        yaml_free(value.handle)
      -//        yaml_free(value.prefix)
      -//    }
      -//    STACK_DEL(&context, tag_directives_copy)
      -//    yaml_free(value.handle)
      -//    yaml_free(value.prefix)
      -//
      -//    return 0
      -//}
      -//
      -///*
      -// * Destroy a document object.
      -// */
      -//
      -//YAML_DECLARE(void)
      -//yaml_document_delete(document *yaml_document_t)
      -//{
      -//    struct {
      -//        error yaml_error_type_t
      -//    } context
      -//    tag_directive *yaml_tag_directive_t
      -//
      -//    context.error = YAML_NO_ERROR // Eliminate a compliler warning.
      -//
      -//    assert(document) // Non-NULL document object is expected.
      -//
      -//    while (!STACK_EMPTY(&context, document.nodes)) {
      -//        node yaml_node_t = POP(&context, document.nodes)
      -//        yaml_free(node.tag)
      -//        switch (node.type) {
      -//            case YAML_SCALAR_NODE:
      -//                yaml_free(node.data.scalar.value)
      -//                break
      -//            case YAML_SEQUENCE_NODE:
      -//                STACK_DEL(&context, node.data.sequence.items)
      -//                break
      -//            case YAML_MAPPING_NODE:
      -//                STACK_DEL(&context, node.data.mapping.pairs)
      -//                break
      -//            default:
      -//                assert(0) // Should not happen.
      -//        }
      -//    }
      -//    STACK_DEL(&context, document.nodes)
      -//
      -//    yaml_free(document.version_directive)
      -//    for (tag_directive = document.tag_directives.start
      -//            tag_directive != document.tag_directives.end
      -//            tag_directive++) {
      -//        yaml_free(tag_directive.handle)
      -//        yaml_free(tag_directive.prefix)
      -//    }
      -//    yaml_free(document.tag_directives.start)
      -//
      -//    memset(document, 0, sizeof(yaml_document_t))
      -//}
      -//
      -///**
      -// * Get a document node.
      -// */
      -//
      -//YAML_DECLARE(yaml_node_t *)
      -//yaml_document_get_node(document *yaml_document_t, index int)
      -//{
      -//    assert(document) // Non-NULL document object is expected.
      -//
      -//    if (index > 0 && document.nodes.start + index <= document.nodes.top) {
      -//        return document.nodes.start + index - 1
      -//    }
      -//    return NULL
      -//}
      -//
      -///**
      -// * Get the root object.
      -// */
      -//
      -//YAML_DECLARE(yaml_node_t *)
      -//yaml_document_get_root_node(document *yaml_document_t)
      -//{
      -//    assert(document) // Non-NULL document object is expected.
      -//
      -//    if (document.nodes.top != document.nodes.start) {
      -//        return document.nodes.start
      -//    }
      -//    return NULL
      -//}
      -//
      -///*
      -// * Add a scalar node to a document.
      -// */
      -//
      -//YAML_DECLARE(int)
      -//yaml_document_add_scalar(document *yaml_document_t,
      -//        tag *yaml_char_t, value *yaml_char_t, length int,
      -//        style yaml_scalar_style_t)
      -//{
      -//    struct {
      -//        error yaml_error_type_t
      -//    } context
      -//    mark yaml_mark_t = { 0, 0, 0 }
      -//    tag_copy *yaml_char_t = NULL
      -//    value_copy *yaml_char_t = NULL
      -//    node yaml_node_t
      -//
      -//    assert(document) // Non-NULL document object is expected.
      -//    assert(value) // Non-NULL value is expected.
      -//
      -//    if (!tag) {
      -//        tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
      -//    }
      -//
      -//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
      -//    tag_copy = yaml_strdup(tag)
      -//    if (!tag_copy) goto error
      -//
      -//    if (length < 0) {
      -//        length = strlen((char *)value)
      -//    }
      -//
      -//    if (!yaml_check_utf8(value, length)) goto error
      -//    value_copy = yaml_malloc(length+1)
      -//    if (!value_copy) goto error
      -//    memcpy(value_copy, value, length)
      -//    value_copy[length] = '\0'
      -//
      -//    SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
      -//    if (!PUSH(&context, document.nodes, node)) goto error
      -//
      -//    return document.nodes.top - document.nodes.start
      -//
      -//error:
      -//    yaml_free(tag_copy)
      -//    yaml_free(value_copy)
      -//
      -//    return 0
      -//}
      -//
      -///*
      -// * Add a sequence node to a document.
      -// */
      -//
      -//YAML_DECLARE(int)
      -//yaml_document_add_sequence(document *yaml_document_t,
      -//        tag *yaml_char_t, style yaml_sequence_style_t)
      -//{
      -//    struct {
      -//        error yaml_error_type_t
      -//    } context
      -//    mark yaml_mark_t = { 0, 0, 0 }
      -//    tag_copy *yaml_char_t = NULL
      -//    struct {
      -//        start *yaml_node_item_t
      -//        end *yaml_node_item_t
      -//        top *yaml_node_item_t
      -//    } items = { NULL, NULL, NULL }
      -//    node yaml_node_t
      -//
      -//    assert(document) // Non-NULL document object is expected.
      -//
      -//    if (!tag) {
      -//        tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
      -//    }
      -//
      -//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
      -//    tag_copy = yaml_strdup(tag)
      -//    if (!tag_copy) goto error
      -//
      -//    if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
      -//
      -//    SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
      -//            style, mark, mark)
      -//    if (!PUSH(&context, document.nodes, node)) goto error
      -//
      -//    return document.nodes.top - document.nodes.start
      -//
      -//error:
      -//    STACK_DEL(&context, items)
      -//    yaml_free(tag_copy)
      -//
      -//    return 0
      -//}
      -//
      -///*
      -// * Add a mapping node to a document.
      -// */
      -//
      -//YAML_DECLARE(int)
      -//yaml_document_add_mapping(document *yaml_document_t,
      -//        tag *yaml_char_t, style yaml_mapping_style_t)
      -//{
      -//    struct {
      -//        error yaml_error_type_t
      -//    } context
      -//    mark yaml_mark_t = { 0, 0, 0 }
      -//    tag_copy *yaml_char_t = NULL
      -//    struct {
      -//        start *yaml_node_pair_t
      -//        end *yaml_node_pair_t
      -//        top *yaml_node_pair_t
      -//    } pairs = { NULL, NULL, NULL }
      -//    node yaml_node_t
      -//
      -//    assert(document) // Non-NULL document object is expected.
      -//
      -//    if (!tag) {
      -//        tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
      -//    }
      -//
      -//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
      -//    tag_copy = yaml_strdup(tag)
      -//    if (!tag_copy) goto error
      -//
      -//    if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
      -//
      -//    MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
      -//            style, mark, mark)
      -//    if (!PUSH(&context, document.nodes, node)) goto error
      -//
      -//    return document.nodes.top - document.nodes.start
      -//
      -//error:
      -//    STACK_DEL(&context, pairs)
      -//    yaml_free(tag_copy)
      -//
      -//    return 0
      -//}
      -//
      -///*
      -// * Append an item to a sequence node.
      -// */
      -//
      -//YAML_DECLARE(int)
      -//yaml_document_append_sequence_item(document *yaml_document_t,
      -//        sequence int, item int)
      -//{
      -//    struct {
      -//        error yaml_error_type_t
      -//    } context
      -//
      -//    assert(document) // Non-NULL document is required.
      -//    assert(sequence > 0
      -//            && document.nodes.start + sequence <= document.nodes.top)
      -//                            // Valid sequence id is required.
      -//    assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
      -//                            // A sequence node is required.
      -//    assert(item > 0 && document.nodes.start + item <= document.nodes.top)
      -//                            // Valid item id is required.
      -//
      -//    if (!PUSH(&context,
      -//                document.nodes.start[sequence-1].data.sequence.items, item))
      -//        return 0
      -//
      -//    return 1
      -//}
      -//
      -///*
      -// * Append a pair of a key and a value to a mapping node.
      -// */
      -//
      -//YAML_DECLARE(int)
      -//yaml_document_append_mapping_pair(document *yaml_document_t,
      -//        mapping int, key int, value int)
      -//{
      -//    struct {
      -//        error yaml_error_type_t
      -//    } context
      -//
      -//    pair yaml_node_pair_t
      -//
      -//    assert(document) // Non-NULL document is required.
      -//    assert(mapping > 0
      -//            && document.nodes.start + mapping <= document.nodes.top)
      -//                            // Valid mapping id is required.
      -//    assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
      -//                            // A mapping node is required.
      -//    assert(key > 0 && document.nodes.start + key <= document.nodes.top)
      -//                            // Valid key id is required.
      -//    assert(value > 0 && document.nodes.start + value <= document.nodes.top)
      -//                            // Valid value id is required.
      -//
      -//    pair.key = key
      -//    pair.value = value
      -//
      -//    if (!PUSH(&context,
      -//                document.nodes.start[mapping-1].data.mapping.pairs, pair))
      -//        return 0
      -//
      -//    return 1
      -//}
      -//
      -//
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/decode.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/decode.go
      deleted file mode 100644
      index 085cddc4..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/decode.go
      +++ /dev/null
      @@ -1,683 +0,0 @@
      -package yaml
      -
      -import (
      -	"encoding"
      -	"encoding/base64"
      -	"fmt"
      -	"math"
      -	"reflect"
      -	"strconv"
      -	"time"
      -)
      -
      -const (
      -	documentNode = 1 << iota
      -	mappingNode
      -	sequenceNode
      -	scalarNode
      -	aliasNode
      -)
      -
      -type node struct {
      -	kind         int
      -	line, column int
      -	tag          string
      -	value        string
      -	implicit     bool
      -	children     []*node
      -	anchors      map[string]*node
      -}
      -
      -// ----------------------------------------------------------------------------
      -// Parser, produces a node tree out of a libyaml event stream.
      -
      -type parser struct {
      -	parser yaml_parser_t
      -	event  yaml_event_t
      -	doc    *node
      -}
      -
      -func newParser(b []byte) *parser {
      -	p := parser{}
      -	if !yaml_parser_initialize(&p.parser) {
      -		panic("failed to initialize YAML emitter")
      -	}
      -
      -	if len(b) == 0 {
      -		b = []byte{'\n'}
      -	}
      -
      -	yaml_parser_set_input_string(&p.parser, b)
      -
      -	p.skip()
      -	if p.event.typ != yaml_STREAM_START_EVENT {
      -		panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
      -	}
      -	p.skip()
      -	return &p
      -}
      -
      -func (p *parser) destroy() {
      -	if p.event.typ != yaml_NO_EVENT {
      -		yaml_event_delete(&p.event)
      -	}
      -	yaml_parser_delete(&p.parser)
      -}
      -
      -func (p *parser) skip() {
      -	if p.event.typ != yaml_NO_EVENT {
      -		if p.event.typ == yaml_STREAM_END_EVENT {
      -			failf("attempted to go past the end of stream; corrupted value?")
      -		}
      -		yaml_event_delete(&p.event)
      -	}
      -	if !yaml_parser_parse(&p.parser, &p.event) {
      -		p.fail()
      -	}
      -}
      -
      -func (p *parser) fail() {
      -	var where string
      -	var line int
      -	if p.parser.problem_mark.line != 0 {
      -		line = p.parser.problem_mark.line
      -	} else if p.parser.context_mark.line != 0 {
      -		line = p.parser.context_mark.line
      -	}
      -	if line != 0 {
      -		where = "line " + strconv.Itoa(line) + ": "
      -	}
      -	var msg string
      -	if len(p.parser.problem) > 0 {
      -		msg = p.parser.problem
      -	} else {
      -		msg = "unknown problem parsing YAML content"
      -	}
      -	failf("%s%s", where, msg)
      -}
      -
      -func (p *parser) anchor(n *node, anchor []byte) {
      -	if anchor != nil {
      -		p.doc.anchors[string(anchor)] = n
      -	}
      -}
      -
      -func (p *parser) parse() *node {
      -	switch p.event.typ {
      -	case yaml_SCALAR_EVENT:
      -		return p.scalar()
      -	case yaml_ALIAS_EVENT:
      -		return p.alias()
      -	case yaml_MAPPING_START_EVENT:
      -		return p.mapping()
      -	case yaml_SEQUENCE_START_EVENT:
      -		return p.sequence()
      -	case yaml_DOCUMENT_START_EVENT:
      -		return p.document()
      -	case yaml_STREAM_END_EVENT:
      -		// Happens when attempting to decode an empty buffer.
      -		return nil
      -	default:
      -		panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
      -	}
      -	panic("unreachable")
      -}
      -
      -func (p *parser) node(kind int) *node {
      -	return &node{
      -		kind:   kind,
      -		line:   p.event.start_mark.line,
      -		column: p.event.start_mark.column,
      -	}
      -}
      -
      -func (p *parser) document() *node {
      -	n := p.node(documentNode)
      -	n.anchors = make(map[string]*node)
      -	p.doc = n
      -	p.skip()
      -	n.children = append(n.children, p.parse())
      -	if p.event.typ != yaml_DOCUMENT_END_EVENT {
      -		panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
      -	}
      -	p.skip()
      -	return n
      -}
      -
      -func (p *parser) alias() *node {
      -	n := p.node(aliasNode)
      -	n.value = string(p.event.anchor)
      -	p.skip()
      -	return n
      -}
      -
      -func (p *parser) scalar() *node {
      -	n := p.node(scalarNode)
      -	n.value = string(p.event.value)
      -	n.tag = string(p.event.tag)
      -	n.implicit = p.event.implicit
      -	p.anchor(n, p.event.anchor)
      -	p.skip()
      -	return n
      -}
      -
      -func (p *parser) sequence() *node {
      -	n := p.node(sequenceNode)
      -	p.anchor(n, p.event.anchor)
      -	p.skip()
      -	for p.event.typ != yaml_SEQUENCE_END_EVENT {
      -		n.children = append(n.children, p.parse())
      -	}
      -	p.skip()
      -	return n
      -}
      -
      -func (p *parser) mapping() *node {
      -	n := p.node(mappingNode)
      -	p.anchor(n, p.event.anchor)
      -	p.skip()
      -	for p.event.typ != yaml_MAPPING_END_EVENT {
      -		n.children = append(n.children, p.parse(), p.parse())
      -	}
      -	p.skip()
      -	return n
      -}
      -
      -// ----------------------------------------------------------------------------
      -// Decoder, unmarshals a node into a provided value.
      -
      -type decoder struct {
      -	doc     *node
      -	aliases map[string]bool
      -	mapType reflect.Type
      -	terrors []string
      -}
      -
      -var (
      -	mapItemType    = reflect.TypeOf(MapItem{})
      -	durationType   = reflect.TypeOf(time.Duration(0))
      -	defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
      -	ifaceType      = defaultMapType.Elem()
      -)
      -
      -func newDecoder() *decoder {
      -	d := &decoder{mapType: defaultMapType}
      -	d.aliases = make(map[string]bool)
      -	return d
      -}
      -
      -func (d *decoder) terror(n *node, tag string, out reflect.Value) {
      -	if n.tag != "" {
      -		tag = n.tag
      -	}
      -	value := n.value
      -	if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
      -		if len(value) > 10 {
      -			value = " `" + value[:7] + "...`"
      -		} else {
      -			value = " `" + value + "`"
      -		}
      -	}
      -	d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
      -}
      -
      -func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
      -	terrlen := len(d.terrors)
      -	err := u.UnmarshalYAML(func(v interface{}) (err error) {
      -		defer handleErr(&err)
      -		d.unmarshal(n, reflect.ValueOf(v))
      -		if len(d.terrors) > terrlen {
      -			issues := d.terrors[terrlen:]
      -			d.terrors = d.terrors[:terrlen]
      -			return &TypeError{issues}
      -		}
      -		return nil
      -	})
      -	if e, ok := err.(*TypeError); ok {
      -		d.terrors = append(d.terrors, e.Errors...)
      -		return false
      -	}
      -	if err != nil {
      -		fail(err)
      -	}
      -	return true
      -}
      -
      -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
      -// if a value is found to implement it.
      -// It returns the initialized and dereferenced out value, whether
      -// unmarshalling was already done by UnmarshalYAML, and if so whether
      -// its types unmarshalled appropriately.
      -//
      -// If n holds a null value, prepare returns before doing anything.
      -func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
      -	if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
      -		return out, false, false
      -	}
      -	again := true
      -	for again {
      -		again = false
      -		if out.Kind() == reflect.Ptr {
      -			if out.IsNil() {
      -				out.Set(reflect.New(out.Type().Elem()))
      -			}
      -			out = out.Elem()
      -			again = true
      -		}
      -		if out.CanAddr() {
      -			if u, ok := out.Addr().Interface().(Unmarshaler); ok {
      -				good = d.callUnmarshaler(n, u)
      -				return out, true, good
      -			}
      -		}
      -	}
      -	return out, false, false
      -}
      -
      -func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
      -	switch n.kind {
      -	case documentNode:
      -		return d.document(n, out)
      -	case aliasNode:
      -		return d.alias(n, out)
      -	}
      -	out, unmarshaled, good := d.prepare(n, out)
      -	if unmarshaled {
      -		return good
      -	}
      -	switch n.kind {
      -	case scalarNode:
      -		good = d.scalar(n, out)
      -	case mappingNode:
      -		good = d.mapping(n, out)
      -	case sequenceNode:
      -		good = d.sequence(n, out)
      -	default:
      -		panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
      -	}
      -	return good
      -}
      -
      -func (d *decoder) document(n *node, out reflect.Value) (good bool) {
      -	if len(n.children) == 1 {
      -		d.doc = n
      -		d.unmarshal(n.children[0], out)
      -		return true
      -	}
      -	return false
      -}
      -
      -func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
      -	an, ok := d.doc.anchors[n.value]
      -	if !ok {
      -		failf("unknown anchor '%s' referenced", n.value)
      -	}
      -	if d.aliases[n.value] {
      -		failf("anchor '%s' value contains itself", n.value)
      -	}
      -	d.aliases[n.value] = true
      -	good = d.unmarshal(an, out)
      -	delete(d.aliases, n.value)
      -	return good
      -}
      -
      -var zeroValue reflect.Value
      -
      -func resetMap(out reflect.Value) {
      -	for _, k := range out.MapKeys() {
      -		out.SetMapIndex(k, zeroValue)
      -	}
      -}
      -
      -func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
      -	var tag string
      -	var resolved interface{}
      -	if n.tag == "" && !n.implicit {
      -		tag = yaml_STR_TAG
      -		resolved = n.value
      -	} else {
      -		tag, resolved = resolve(n.tag, n.value)
      -		if tag == yaml_BINARY_TAG {
      -			data, err := base64.StdEncoding.DecodeString(resolved.(string))
      -			if err != nil {
      -				failf("!!binary value contains invalid base64 data")
      -			}
      -			resolved = string(data)
      -		}
      -	}
      -	if resolved == nil {
      -		if out.Kind() == reflect.Map && !out.CanAddr() {
      -			resetMap(out)
      -		} else {
      -			out.Set(reflect.Zero(out.Type()))
      -		}
      -		return true
      -	}
      -	if s, ok := resolved.(string); ok && out.CanAddr() {
      -		if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
      -			err := u.UnmarshalText([]byte(s))
      -			if err != nil {
      -				fail(err)
      -			}
      -			return true
      -		}
      -	}
      -	switch out.Kind() {
      -	case reflect.String:
      -		if tag == yaml_BINARY_TAG {
      -			out.SetString(resolved.(string))
      -			good = true
      -		} else if resolved != nil {
      -			out.SetString(n.value)
      -			good = true
      -		}
      -	case reflect.Interface:
      -		if resolved == nil {
      -			out.Set(reflect.Zero(out.Type()))
      -		} else {
      -			out.Set(reflect.ValueOf(resolved))
      -		}
      -		good = true
      -	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
      -		switch resolved := resolved.(type) {
      -		case int:
      -			if !out.OverflowInt(int64(resolved)) {
      -				out.SetInt(int64(resolved))
      -				good = true
      -			}
      -		case int64:
      -			if !out.OverflowInt(resolved) {
      -				out.SetInt(resolved)
      -				good = true
      -			}
      -		case uint64:
      -			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
      -				out.SetInt(int64(resolved))
      -				good = true
      -			}
      -		case float64:
      -			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
      -				out.SetInt(int64(resolved))
      -				good = true
      -			}
      -		case string:
      -			if out.Type() == durationType {
      -				d, err := time.ParseDuration(resolved)
      -				if err == nil {
      -					out.SetInt(int64(d))
      -					good = true
      -				}
      -			}
      -		}
      -	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
      -		switch resolved := resolved.(type) {
      -		case int:
      -			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
      -				out.SetUint(uint64(resolved))
      -				good = true
      -			}
      -		case int64:
      -			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
      -				out.SetUint(uint64(resolved))
      -				good = true
      -			}
      -		case uint64:
      -			if !out.OverflowUint(uint64(resolved)) {
      -				out.SetUint(uint64(resolved))
      -				good = true
      -			}
      -		case float64:
      -			if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
      -				out.SetUint(uint64(resolved))
      -				good = true
      -			}
      -		}
      -	case reflect.Bool:
      -		switch resolved := resolved.(type) {
      -		case bool:
      -			out.SetBool(resolved)
      -			good = true
      -		}
      -	case reflect.Float32, reflect.Float64:
      -		switch resolved := resolved.(type) {
      -		case int:
      -			out.SetFloat(float64(resolved))
      -			good = true
      -		case int64:
      -			out.SetFloat(float64(resolved))
      -			good = true
      -		case uint64:
      -			out.SetFloat(float64(resolved))
      -			good = true
      -		case float64:
      -			out.SetFloat(resolved)
      -			good = true
      -		}
      -	case reflect.Ptr:
      -		if out.Type().Elem() == reflect.TypeOf(resolved) {
      -			// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
      -			elem := reflect.New(out.Type().Elem())
      -			elem.Elem().Set(reflect.ValueOf(resolved))
      -			out.Set(elem)
      -			good = true
      -		}
      -	}
      -	if !good {
      -		d.terror(n, tag, out)
      -	}
      -	return good
      -}
      -
      -func settableValueOf(i interface{}) reflect.Value {
      -	v := reflect.ValueOf(i)
      -	sv := reflect.New(v.Type()).Elem()
      -	sv.Set(v)
      -	return sv
      -}
      -
      -func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
      -	l := len(n.children)
      -
      -	var iface reflect.Value
      -	switch out.Kind() {
      -	case reflect.Slice:
      -		out.Set(reflect.MakeSlice(out.Type(), l, l))
      -	case reflect.Interface:
      -		// No type hints. Will have to use a generic sequence.
      -		iface = out
      -		out = settableValueOf(make([]interface{}, l))
      -	default:
      -		d.terror(n, yaml_SEQ_TAG, out)
      -		return false
      -	}
      -	et := out.Type().Elem()
      -
      -	j := 0
      -	for i := 0; i < l; i++ {
      -		e := reflect.New(et).Elem()
      -		if ok := d.unmarshal(n.children[i], e); ok {
      -			out.Index(j).Set(e)
      -			j++
      -		}
      -	}
      -	out.Set(out.Slice(0, j))
      -	if iface.IsValid() {
      -		iface.Set(out)
      -	}
      -	return true
      -}
      -
      -func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
      -	switch out.Kind() {
      -	case reflect.Struct:
      -		return d.mappingStruct(n, out)
      -	case reflect.Slice:
      -		return d.mappingSlice(n, out)
      -	case reflect.Map:
      -		// okay
      -	case reflect.Interface:
      -		if d.mapType.Kind() == reflect.Map {
      -			iface := out
      -			out = reflect.MakeMap(d.mapType)
      -			iface.Set(out)
      -		} else {
      -			slicev := reflect.New(d.mapType).Elem()
      -			if !d.mappingSlice(n, slicev) {
      -				return false
      -			}
      -			out.Set(slicev)
      -			return true
      -		}
      -	default:
      -		d.terror(n, yaml_MAP_TAG, out)
      -		return false
      -	}
      -	outt := out.Type()
      -	kt := outt.Key()
      -	et := outt.Elem()
      -
      -	mapType := d.mapType
      -	if outt.Key() == ifaceType && outt.Elem() == ifaceType {
      -		d.mapType = outt
      -	}
      -
      -	if out.IsNil() {
      -		out.Set(reflect.MakeMap(outt))
      -	}
      -	l := len(n.children)
      -	for i := 0; i < l; i += 2 {
      -		if isMerge(n.children[i]) {
      -			d.merge(n.children[i+1], out)
      -			continue
      -		}
      -		k := reflect.New(kt).Elem()
      -		if d.unmarshal(n.children[i], k) {
      -			kkind := k.Kind()
      -			if kkind == reflect.Interface {
      -				kkind = k.Elem().Kind()
      -			}
      -			if kkind == reflect.Map || kkind == reflect.Slice {
      -				failf("invalid map key: %#v", k.Interface())
      -			}
      -			e := reflect.New(et).Elem()
      -			if d.unmarshal(n.children[i+1], e) {
      -				out.SetMapIndex(k, e)
      -			}
      -		}
      -	}
      -	d.mapType = mapType
      -	return true
      -}
      -
      -func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
      -	outt := out.Type()
      -	if outt.Elem() != mapItemType {
      -		d.terror(n, yaml_MAP_TAG, out)
      -		return false
      -	}
      -
      -	mapType := d.mapType
      -	d.mapType = outt
      -
      -	var slice []MapItem
      -	var l = len(n.children)
      -	for i := 0; i < l; i += 2 {
      -		if isMerge(n.children[i]) {
      -			d.merge(n.children[i+1], out)
      -			continue
      -		}
      -		item := MapItem{}
      -		k := reflect.ValueOf(&item.Key).Elem()
      -		if d.unmarshal(n.children[i], k) {
      -			v := reflect.ValueOf(&item.Value).Elem()
      -			if d.unmarshal(n.children[i+1], v) {
      -				slice = append(slice, item)
      -			}
      -		}
      -	}
      -	out.Set(reflect.ValueOf(slice))
      -	d.mapType = mapType
      -	return true
      -}
      -
      -func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
      -	sinfo, err := getStructInfo(out.Type())
      -	if err != nil {
      -		panic(err)
      -	}
      -	name := settableValueOf("")
      -	l := len(n.children)
      -
      -	var inlineMap reflect.Value
      -	var elemType reflect.Type
      -	if sinfo.InlineMap != -1 {
      -		inlineMap = out.Field(sinfo.InlineMap)
      -		inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
      -		elemType = inlineMap.Type().Elem()
      -	}
      -
      -	for i := 0; i < l; i += 2 {
      -		ni := n.children[i]
      -		if isMerge(ni) {
      -			d.merge(n.children[i+1], out)
      -			continue
      -		}
      -		if !d.unmarshal(ni, name) {
      -			continue
      -		}
      -		if info, ok := sinfo.FieldsMap[name.String()]; ok {
      -			var field reflect.Value
      -			if info.Inline == nil {
      -				field = out.Field(info.Num)
      -			} else {
      -				field = out.FieldByIndex(info.Inline)
      -			}
      -			d.unmarshal(n.children[i+1], field)
      -		} else if sinfo.InlineMap != -1 {
      -			if inlineMap.IsNil() {
      -				inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
      -			}
      -			value := reflect.New(elemType).Elem()
      -			d.unmarshal(n.children[i+1], value)
      -			inlineMap.SetMapIndex(name, value)
      -		}
      -	}
      -	return true
      -}
      -
      -func failWantMap() {
      -	failf("map merge requires map or sequence of maps as the value")
      -}
      -
      -func (d *decoder) merge(n *node, out reflect.Value) {
      -	switch n.kind {
      -	case mappingNode:
      -		d.unmarshal(n, out)
      -	case aliasNode:
      -		an, ok := d.doc.anchors[n.value]
      -		if ok && an.kind != mappingNode {
      -			failWantMap()
      -		}
      -		d.unmarshal(n, out)
      -	case sequenceNode:
      -		// Step backwards as earlier nodes take precedence.
      -		for i := len(n.children) - 1; i >= 0; i-- {
      -			ni := n.children[i]
      -			if ni.kind == aliasNode {
      -				an, ok := d.doc.anchors[ni.value]
      -				if ok && an.kind != mappingNode {
      -					failWantMap()
      -				}
      -			} else if ni.kind != mappingNode {
      -				failWantMap()
      -			}
      -			d.unmarshal(ni, out)
      -		}
      -	default:
      -		failWantMap()
      -	}
      -}
      -
      -func isMerge(n *node) bool {
      -	return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/decode_test.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/decode_test.go
      deleted file mode 100644
      index 04fdd9e7..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/decode_test.go
      +++ /dev/null
      @@ -1,966 +0,0 @@
      -package yaml_test
      -
      -import (
      -	"errors"
      -	. "gopkg.in/check.v1"
      -	"gopkg.in/yaml.v2"
      -	"math"
      -	"net"
      -	"reflect"
      -	"strings"
      -	"time"
      -)
      -
      -var unmarshalIntTest = 123
      -
      -var unmarshalTests = []struct {
      -	data  string
      -	value interface{}
      -}{
      -	{
      -		"",
      -		&struct{}{},
      -	}, {
      -		"{}", &struct{}{},
      -	}, {
      -		"v: hi",
      -		map[string]string{"v": "hi"},
      -	}, {
      -		"v: hi", map[string]interface{}{"v": "hi"},
      -	}, {
      -		"v: true",
      -		map[string]string{"v": "true"},
      -	}, {
      -		"v: true",
      -		map[string]interface{}{"v": true},
      -	}, {
      -		"v: 10",
      -		map[string]interface{}{"v": 10},
      -	}, {
      -		"v: 0b10",
      -		map[string]interface{}{"v": 2},
      -	}, {
      -		"v: 0xA",
      -		map[string]interface{}{"v": 10},
      -	}, {
      -		"v: 4294967296",
      -		map[string]int64{"v": 4294967296},
      -	}, {
      -		"v: 0.1",
      -		map[string]interface{}{"v": 0.1},
      -	}, {
      -		"v: .1",
      -		map[string]interface{}{"v": 0.1},
      -	}, {
      -		"v: .Inf",
      -		map[string]interface{}{"v": math.Inf(+1)},
      -	}, {
      -		"v: -.Inf",
      -		map[string]interface{}{"v": math.Inf(-1)},
      -	}, {
      -		"v: -10",
      -		map[string]interface{}{"v": -10},
      -	}, {
      -		"v: -.1",
      -		map[string]interface{}{"v": -0.1},
      -	},
      -
      -	// Simple values.
      -	{
      -		"123",
      -		&unmarshalIntTest,
      -	},
      -
      -	// Floats from spec
      -	{
      -		"canonical: 6.8523e+5",
      -		map[string]interface{}{"canonical": 6.8523e+5},
      -	}, {
      -		"expo: 685.230_15e+03",
      -		map[string]interface{}{"expo": 685.23015e+03},
      -	}, {
      -		"fixed: 685_230.15",
      -		map[string]interface{}{"fixed": 685230.15},
      -	}, {
      -		"neginf: -.inf",
      -		map[string]interface{}{"neginf": math.Inf(-1)},
      -	}, {
      -		"fixed: 685_230.15",
      -		map[string]float64{"fixed": 685230.15},
      -	},
      -	//{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported
      -	//{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails.
      -
      -	// Bools from spec
      -	{
      -		"canonical: y",
      -		map[string]interface{}{"canonical": true},
      -	}, {
      -		"answer: NO",
      -		map[string]interface{}{"answer": false},
      -	}, {
      -		"logical: True",
      -		map[string]interface{}{"logical": true},
      -	}, {
      -		"option: on",
      -		map[string]interface{}{"option": true},
      -	}, {
      -		"option: on",
      -		map[string]bool{"option": true},
      -	},
      -	// Ints from spec
      -	{
      -		"canonical: 685230",
      -		map[string]interface{}{"canonical": 685230},
      -	}, {
      -		"decimal: +685_230",
      -		map[string]interface{}{"decimal": 685230},
      -	}, {
      -		"octal: 02472256",
      -		map[string]interface{}{"octal": 685230},
      -	}, {
      -		"hexa: 0x_0A_74_AE",
      -		map[string]interface{}{"hexa": 685230},
      -	}, {
      -		"bin: 0b1010_0111_0100_1010_1110",
      -		map[string]interface{}{"bin": 685230},
      -	}, {
      -		"bin: -0b101010",
      -		map[string]interface{}{"bin": -42},
      -	}, {
      -		"decimal: +685_230",
      -		map[string]int{"decimal": 685230},
      -	},
      -
      -	//{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported
      -
      -	// Nulls from spec
      -	{
      -		"empty:",
      -		map[string]interface{}{"empty": nil},
      -	}, {
      -		"canonical: ~",
      -		map[string]interface{}{"canonical": nil},
      -	}, {
      -		"english: null",
      -		map[string]interface{}{"english": nil},
      -	}, {
      -		"~: null key",
      -		map[interface{}]string{nil: "null key"},
      -	}, {
      -		"empty:",
      -		map[string]*bool{"empty": nil},
      -	},
      -
      -	// Flow sequence
      -	{
      -		"seq: [A,B]",
      -		map[string]interface{}{"seq": []interface{}{"A", "B"}},
      -	}, {
      -		"seq: [A,B,C,]",
      -		map[string][]string{"seq": []string{"A", "B", "C"}},
      -	}, {
      -		"seq: [A,1,C]",
      -		map[string][]string{"seq": []string{"A", "1", "C"}},
      -	}, {
      -		"seq: [A,1,C]",
      -		map[string][]int{"seq": []int{1}},
      -	}, {
      -		"seq: [A,1,C]",
      -		map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
      -	},
      -	// Block sequence
      -	{
      -		"seq:\n - A\n - B",
      -		map[string]interface{}{"seq": []interface{}{"A", "B"}},
      -	}, {
      -		"seq:\n - A\n - B\n - C",
      -		map[string][]string{"seq": []string{"A", "B", "C"}},
      -	}, {
      -		"seq:\n - A\n - 1\n - C",
      -		map[string][]string{"seq": []string{"A", "1", "C"}},
      -	}, {
      -		"seq:\n - A\n - 1\n - C",
      -		map[string][]int{"seq": []int{1}},
      -	}, {
      -		"seq:\n - A\n - 1\n - C",
      -		map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
      -	},
      -
      -	// Literal block scalar
      -	{
      -		"scalar: | # Comment\n\n literal\n\n \ttext\n\n",
      -		map[string]string{"scalar": "\nliteral\n\n\ttext\n"},
      -	},
      -
      -	// Folded block scalar
      -	{
      -		"scalar: > # Comment\n\n folded\n line\n \n next\n line\n  * one\n  * two\n\n last\n line\n\n",
      -		map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"},
      -	},
      -
      -	// Map inside interface with no type hints.
      -	{
      -		"a: {b: c}",
      -		map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
      -	},
      -
      -	// Structs and type conversions.
      -	{
      -		"hello: world",
      -		&struct{ Hello string }{"world"},
      -	}, {
      -		"a: {b: c}",
      -		&struct{ A struct{ B string } }{struct{ B string }{"c"}},
      -	}, {
      -		"a: {b: c}",
      -		&struct{ A *struct{ B string } }{&struct{ B string }{"c"}},
      -	}, {
      -		"a: {b: c}",
      -		&struct{ A map[string]string }{map[string]string{"b": "c"}},
      -	}, {
      -		"a: {b: c}",
      -		&struct{ A *map[string]string }{&map[string]string{"b": "c"}},
      -	}, {
      -		"a:",
      -		&struct{ A map[string]string }{},
      -	}, {
      -		"a: 1",
      -		&struct{ A int }{1},
      -	}, {
      -		"a: 1",
      -		&struct{ A float64 }{1},
      -	}, {
      -		"a: 1.0",
      -		&struct{ A int }{1},
      -	}, {
      -		"a: 1.0",
      -		&struct{ A uint }{1},
      -	}, {
      -		"a: [1, 2]",
      -		&struct{ A []int }{[]int{1, 2}},
      -	}, {
      -		"a: 1",
      -		&struct{ B int }{0},
      -	}, {
      -		"a: 1",
      -		&struct {
      -			B int "a"
      -		}{1},
      -	}, {
      -		"a: y",
      -		&struct{ A bool }{true},
      -	},
      -
      -	// Some cross type conversions
      -	{
      -		"v: 42",
      -		map[string]uint{"v": 42},
      -	}, {
      -		"v: -42",
      -		map[string]uint{},
      -	}, {
      -		"v: 4294967296",
      -		map[string]uint64{"v": 4294967296},
      -	}, {
      -		"v: -4294967296",
      -		map[string]uint64{},
      -	},
      -
      -	// int
      -	{
      -		"int_max: 2147483647",
      -		map[string]int{"int_max": math.MaxInt32},
      -	},
      -	{
      -		"int_min: -2147483648",
      -		map[string]int{"int_min": math.MinInt32},
      -	},
      -	{
      -		"int_overflow: 9223372036854775808", // math.MaxInt64 + 1
      -		map[string]int{},
      -	},
      -
      -	// int64
      -	{
      -		"int64_max: 9223372036854775807",
      -		map[string]int64{"int64_max": math.MaxInt64},
      -	},
      -	{
      -		"int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111",
      -		map[string]int64{"int64_max_base2": math.MaxInt64},
      -	},
      -	{
      -		"int64_min: -9223372036854775808",
      -		map[string]int64{"int64_min": math.MinInt64},
      -	},
      -	{
      -		"int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111",
      -		map[string]int64{"int64_neg_base2": -math.MaxInt64},
      -	},
      -	{
      -		"int64_overflow: 9223372036854775808", // math.MaxInt64 + 1
      -		map[string]int64{},
      -	},
      -
      -	// uint
      -	{
      -		"uint_min: 0",
      -		map[string]uint{"uint_min": 0},
      -	},
      -	{
      -		"uint_max: 4294967295",
      -		map[string]uint{"uint_max": math.MaxUint32},
      -	},
      -	{
      -		"uint_underflow: -1",
      -		map[string]uint{},
      -	},
      -
      -	// uint64
      -	{
      -		"uint64_min: 0",
      -		map[string]uint{"uint64_min": 0},
      -	},
      -	{
      -		"uint64_max: 18446744073709551615",
      -		map[string]uint64{"uint64_max": math.MaxUint64},
      -	},
      -	{
      -		"uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111",
      -		map[string]uint64{"uint64_max_base2": math.MaxUint64},
      -	},
      -	{
      -		"uint64_maxint64: 9223372036854775807",
      -		map[string]uint64{"uint64_maxint64": math.MaxInt64},
      -	},
      -	{
      -		"uint64_underflow: -1",
      -		map[string]uint64{},
      -	},
      -
      -	// float32
      -	{
      -		"float32_max: 3.40282346638528859811704183484516925440e+38",
      -		map[string]float32{"float32_max": math.MaxFloat32},
      -	},
      -	{
      -		"float32_nonzero: 1.401298464324817070923729583289916131280e-45",
      -		map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32},
      -	},
      -	{
      -		"float32_maxuint64: 18446744073709551615",
      -		map[string]float32{"float32_maxuint64": float32(math.MaxUint64)},
      -	},
      -	{
      -		"float32_maxuint64+1: 18446744073709551616",
      -		map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)},
      -	},
      -
      -	// float64
      -	{
      -		"float64_max: 1.797693134862315708145274237317043567981e+308",
      -		map[string]float64{"float64_max": math.MaxFloat64},
      -	},
      -	{
      -		"float64_nonzero: 4.940656458412465441765687928682213723651e-324",
      -		map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64},
      -	},
      -	{
      -		"float64_maxuint64: 18446744073709551615",
      -		map[string]float64{"float64_maxuint64": float64(math.MaxUint64)},
      -	},
      -	{
      -		"float64_maxuint64+1: 18446744073709551616",
      -		map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)},
      -	},
      -
      -	// Overflow cases.
      -	{
      -		"v: 4294967297",
      -		map[string]int32{},
      -	}, {
      -		"v: 128",
      -		map[string]int8{},
      -	},
      -
      -	// Quoted values.
      -	{
      -		"'1': '\"2\"'",
      -		map[interface{}]interface{}{"1": "\"2\""},
      -	}, {
      -		"v:\n- A\n- 'B\n\n  C'\n",
      -		map[string][]string{"v": []string{"A", "B\nC"}},
      -	},
      -
      -	// Explicit tags.
      -	{
      -		"v: !!float '1.1'",
      -		map[string]interface{}{"v": 1.1},
      -	}, {
      -		"v: !!null ''",
      -		map[string]interface{}{"v": nil},
      -	}, {
      -		"%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'",
      -		map[string]interface{}{"v": 1},
      -	},
      -
      -	// Anchors and aliases.
      -	{
      -		"a: &x 1\nb: &y 2\nc: *x\nd: *y\n",
      -		&struct{ A, B, C, D int }{1, 2, 1, 2},
      -	}, {
      -		"a: &a {c: 1}\nb: *a",
      -		&struct {
      -			A, B struct {
      -				C int
      -			}
      -		}{struct{ C int }{1}, struct{ C int }{1}},
      -	}, {
      -		"a: &a [1, 2]\nb: *a",
      -		&struct{ B []int }{[]int{1, 2}},
      -	}, {
      -		"b: *a\na: &a {c: 1}",
      -		&struct {
      -			A, B struct {
      -				C int
      -			}
      -		}{struct{ C int }{1}, struct{ C int }{1}},
      -	},
      -
      -	// Bug #1133337
      -	{
      -		"foo: ''",
      -		map[string]*string{"foo": new(string)},
      -	}, {
      -		"foo: null",
      -		map[string]string{"foo": ""},
      -	}, {
      -		"foo: null",
      -		map[string]interface{}{"foo": nil},
      -	},
      -
      -	// Ignored field
      -	{
      -		"a: 1\nb: 2\n",
      -		&struct {
      -			A int
      -			B int "-"
      -		}{1, 0},
      -	},
      -
      -	// Bug #1191981
      -	{
      -		"" +
      -			"%YAML 1.1\n" +
      -			"--- !!str\n" +
      -			`"Generic line break (no glyph)\n\` + "\n" +
      -			` Generic line break (glyphed)\n\` + "\n" +
      -			` Line separator\u2028\` + "\n" +
      -			` Paragraph separator\u2029"` + "\n",
      -		"" +
      -			"Generic line break (no glyph)\n" +
      -			"Generic line break (glyphed)\n" +
      -			"Line separator\u2028Paragraph separator\u2029",
      -	},
      -
      -	// Struct inlining
      -	{
      -		"a: 1\nb: 2\nc: 3\n",
      -		&struct {
      -			A int
      -			C inlineB `yaml:",inline"`
      -		}{1, inlineB{2, inlineC{3}}},
      -	},
      -
      -	// Map inlining
      -	{
      -		"a: 1\nb: 2\nc: 3\n",
      -		&struct {
      -			A int
      -			C map[string]int `yaml:",inline"`
      -		}{1, map[string]int{"b": 2, "c": 3}},
      -	},
      -
      -	// bug 1243827
      -	{
      -		"a: -b_c",
      -		map[string]interface{}{"a": "-b_c"},
      -	},
      -	{
      -		"a: +b_c",
      -		map[string]interface{}{"a": "+b_c"},
      -	},
      -	{
      -		"a: 50cent_of_dollar",
      -		map[string]interface{}{"a": "50cent_of_dollar"},
      -	},
      -
      -	// Duration
      -	{
      -		"a: 3s",
      -		map[string]time.Duration{"a": 3 * time.Second},
      -	},
      -
      -	// Issue #24.
      -	{
      -		"a: <foo>",
      -		map[string]string{"a": "<foo>"},
      -	},
      -
      -	// Base 60 floats are obsolete and unsupported.
      -	{
      -		"a: 1:1\n",
      -		map[string]string{"a": "1:1"},
      -	},
      -
      -	// Binary data.
      -	{
      -		"a: !!binary gIGC\n",
      -		map[string]string{"a": "\x80\x81\x82"},
      -	}, {
      -		"a: !!binary |\n  " + strings.Repeat("kJCQ", 17) + "kJ\n  CQ\n",
      -		map[string]string{"a": strings.Repeat("\x90", 54)},
      -	}, {
      -		"a: !!binary |\n  " + strings.Repeat("A", 70) + "\n  ==\n",
      -		map[string]string{"a": strings.Repeat("\x00", 52)},
      -	},
      -
      -	// Ordered maps.
      -	{
      -		"{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}",
      -		&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
      -	},
      -
      -	// Issue #39.
      -	{
      -		"a:\n b:\n  c: d\n",
      -		map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}},
      -	},
      -
      -	// Custom map type.
      -	{
      -		"a: {b: c}",
      -		M{"a": M{"b": "c"}},
      -	},
      -
      -	// Support encoding.TextUnmarshaler.
      -	{
      -		"a: 1.2.3.4\n",
      -		map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
      -	},
      -	{
      -		"a: 2015-02-24T18:19:39Z\n",
      -		map[string]time.Time{"a": time.Unix(1424801979, 0)},
      -	},
      -
      -	// Encode empty lists as zero-length slices.
      -	{
      -		"a: []",
      -		&struct{ A []int }{[]int{}},
      -	},
      -}
      -
      -type M map[interface{}]interface{}
      -
      -type inlineB struct {
      -	B       int
      -	inlineC `yaml:",inline"`
      -}
      -
      -type inlineC struct {
      -	C int
      -}
      -
      -func (s *S) TestUnmarshal(c *C) {
      -	for _, item := range unmarshalTests {
      -		t := reflect.ValueOf(item.value).Type()
      -		var value interface{}
      -		switch t.Kind() {
      -		case reflect.Map:
      -			value = reflect.MakeMap(t).Interface()
      -		case reflect.String:
      -			value = reflect.New(t).Interface()
      -		case reflect.Ptr:
      -			value = reflect.New(t.Elem()).Interface()
      -		default:
      -			c.Fatalf("missing case for %s", t)
      -		}
      -		err := yaml.Unmarshal([]byte(item.data), value)
      -		if _, ok := err.(*yaml.TypeError); !ok {
      -			c.Assert(err, IsNil)
      -		}
      -		if t.Kind() == reflect.String {
      -			c.Assert(*value.(*string), Equals, item.value)
      -		} else {
      -			c.Assert(value, DeepEquals, item.value)
      -		}
      -	}
      -}
      -
      -func (s *S) TestUnmarshalNaN(c *C) {
      -	value := map[string]interface{}{}
      -	err := yaml.Unmarshal([]byte("notanum: .NaN"), &value)
      -	c.Assert(err, IsNil)
      -	c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
      -}
      -
      -var unmarshalErrorTests = []struct {
      -	data, error string
      -}{
      -	{"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"},
      -	{"v: [A,", "yaml: line 1: did not find expected node content"},
      -	{"v:\n- [A,", "yaml: line 2: did not find expected node content"},
      -	{"a: *b\n", "yaml: unknown anchor 'b' referenced"},
      -	{"a: &a\n  b: *a\n", "yaml: anchor 'a' value contains itself"},
      -	{"value: -", "yaml: block sequence entries are not allowed in this context"},
      -	{"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"},
      -	{"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`},
      -	{"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`},
      -}
      -
      -func (s *S) TestUnmarshalErrors(c *C) {
      -	for _, item := range unmarshalErrorTests {
      -		var value interface{}
      -		err := yaml.Unmarshal([]byte(item.data), &value)
      -		c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
      -	}
      -}
      -
      -var unmarshalerTests = []struct {
      -	data, tag string
      -	value     interface{}
      -}{
      -	{"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}},
      -	{"_: [1,A]", "!!seq", []interface{}{1, "A"}},
      -	{"_: 10", "!!int", 10},
      -	{"_: null", "!!null", nil},
      -	{`_: BAR!`, "!!str", "BAR!"},
      -	{`_: "BAR!"`, "!!str", "BAR!"},
      -	{"_: !!foo 'BAR!'", "!!foo", "BAR!"},
      -}
      -
      -var unmarshalerResult = map[int]error{}
      -
      -type unmarshalerType struct {
      -	value interface{}
      -}
      -
      -func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error {
      -	if err := unmarshal(&o.value); err != nil {
      -		return err
      -	}
      -	if i, ok := o.value.(int); ok {
      -		if result, ok := unmarshalerResult[i]; ok {
      -			return result
      -		}
      -	}
      -	return nil
      -}
      -
      -type unmarshalerPointer struct {
      -	Field *unmarshalerType "_"
      -}
      -
      -type unmarshalerValue struct {
      -	Field unmarshalerType "_"
      -}
      -
      -func (s *S) TestUnmarshalerPointerField(c *C) {
      -	for _, item := range unmarshalerTests {
      -		obj := &unmarshalerPointer{}
      -		err := yaml.Unmarshal([]byte(item.data), obj)
      -		c.Assert(err, IsNil)
      -		if item.value == nil {
      -			c.Assert(obj.Field, IsNil)
      -		} else {
      -			c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
      -			c.Assert(obj.Field.value, DeepEquals, item.value)
      -		}
      -	}
      -}
      -
      -func (s *S) TestUnmarshalerValueField(c *C) {
      -	for _, item := range unmarshalerTests {
      -		obj := &unmarshalerValue{}
      -		err := yaml.Unmarshal([]byte(item.data), obj)
      -		c.Assert(err, IsNil)
      -		c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
      -		c.Assert(obj.Field.value, DeepEquals, item.value)
      -	}
      -}
      -
      -func (s *S) TestUnmarshalerWholeDocument(c *C) {
      -	obj := &unmarshalerType{}
      -	err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj)
      -	c.Assert(err, IsNil)
      -	value, ok := obj.value.(map[interface{}]interface{})
      -	c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value))
      -	c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value)
      -}
      -
      -func (s *S) TestUnmarshalerTypeError(c *C) {
      -	unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}}
      -	unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}}
      -	defer func() {
      -		delete(unmarshalerResult, 2)
      -		delete(unmarshalerResult, 4)
      -	}()
      -
      -	type T struct {
      -		Before int
      -		After  int
      -		M      map[string]*unmarshalerType
      -	}
      -	var v T
      -	data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}`
      -	err := yaml.Unmarshal([]byte(data), &v)
      -	c.Assert(err, ErrorMatches, ""+
      -		"yaml: unmarshal errors:\n"+
      -		"  line 1: cannot unmarshal !!str `A` into int\n"+
      -		"  foo\n"+
      -		"  bar\n"+
      -		"  line 1: cannot unmarshal !!str `B` into int")
      -	c.Assert(v.M["abc"], NotNil)
      -	c.Assert(v.M["def"], IsNil)
      -	c.Assert(v.M["ghi"], NotNil)
      -	c.Assert(v.M["jkl"], IsNil)
      -
      -	c.Assert(v.M["abc"].value, Equals, 1)
      -	c.Assert(v.M["ghi"].value, Equals, 3)
      -}
      -
      -type proxyTypeError struct{}
      -
      -func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error {
      -	var s string
      -	var a int32
      -	var b int64
      -	if err := unmarshal(&s); err != nil {
      -		panic(err)
      -	}
      -	if s == "a" {
      -		if err := unmarshal(&b); err == nil {
      -			panic("should have failed")
      -		}
      -		return unmarshal(&a)
      -	}
      -	if err := unmarshal(&a); err == nil {
      -		panic("should have failed")
      -	}
      -	return unmarshal(&b)
      -}
      -
      -func (s *S) TestUnmarshalerTypeErrorProxying(c *C) {
      -	type T struct {
      -		Before int
      -		After  int
      -		M      map[string]*proxyTypeError
      -	}
      -	var v T
      -	data := `{before: A, m: {abc: a, def: b}, after: B}`
      -	err := yaml.Unmarshal([]byte(data), &v)
      -	c.Assert(err, ErrorMatches, ""+
      -		"yaml: unmarshal errors:\n"+
      -		"  line 1: cannot unmarshal !!str `A` into int\n"+
      -		"  line 1: cannot unmarshal !!str `a` into int32\n"+
      -		"  line 1: cannot unmarshal !!str `b` into int64\n"+
      -		"  line 1: cannot unmarshal !!str `B` into int")
      -}
      -
      -type failingUnmarshaler struct{}
      -
      -var failingErr = errors.New("failingErr")
      -
      -func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
      -	return failingErr
      -}
      -
      -func (s *S) TestUnmarshalerError(c *C) {
      -	err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{})
      -	c.Assert(err, Equals, failingErr)
      -}
      -
      -type sliceUnmarshaler []int
      -
      -func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
      -	var slice []int
      -	err := unmarshal(&slice)
      -	if err == nil {
      -		*su = slice
      -		return nil
      -	}
      -
      -	var intVal int
      -	err = unmarshal(&intVal)
      -	if err == nil {
      -		*su = []int{intVal}
      -		return nil
      -	}
      -
      -	return err
      -}
      -
      -func (s *S) TestUnmarshalerRetry(c *C) {
      -	var su sliceUnmarshaler
      -	err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su)
      -	c.Assert(err, IsNil)
      -	c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3}))
      -
      -	err = yaml.Unmarshal([]byte("1"), &su)
      -	c.Assert(err, IsNil)
      -	c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1}))
      -}
      -
      -// From http://yaml.org/type/merge.html
      -var mergeTests = `
      -anchors:
      -  list:
      -    - &CENTER { "x": 1, "y": 2 }
      -    - &LEFT   { "x": 0, "y": 2 }
      -    - &BIG    { "r": 10 }
      -    - &SMALL  { "r": 1 }
      -
      -# All the following maps are equal:
      -
      -plain:
      -  # Explicit keys
      -  "x": 1
      -  "y": 2
      -  "r": 10
      -  label: center/big
      -
      -mergeOne:
      -  # Merge one map
      -  << : *CENTER
      -  "r": 10
      -  label: center/big
      -
      -mergeMultiple:
      -  # Merge multiple maps
      -  << : [ *CENTER, *BIG ]
      -  label: center/big
      -
      -override:
      -  # Override
      -  << : [ *BIG, *LEFT, *SMALL ]
      -  "x": 1
      -  label: center/big
      -
      -shortTag:
      -  # Explicit short merge tag
      -  !!merge "<<" : [ *CENTER, *BIG ]
      -  label: center/big
      -
      -longTag:
      -  # Explicit merge long tag
      -  !<tag:yaml.org,2002:merge> "<<" : [ *CENTER, *BIG ]
      -  label: center/big
      -
      -inlineMap:
      -  # Inlined map 
      -  << : {"x": 1, "y": 2, "r": 10}
      -  label: center/big
      -
      -inlineSequenceMap:
      -  # Inlined map in sequence
      -  << : [ *CENTER, {"r": 10} ]
      -  label: center/big
      -`
      -
      -func (s *S) TestMerge(c *C) {
      -	var want = map[interface{}]interface{}{
      -		"x":     1,
      -		"y":     2,
      -		"r":     10,
      -		"label": "center/big",
      -	}
      -
      -	var m map[interface{}]interface{}
      -	err := yaml.Unmarshal([]byte(mergeTests), &m)
      -	c.Assert(err, IsNil)
      -	for name, test := range m {
      -		if name == "anchors" {
      -			continue
      -		}
      -		c.Assert(test, DeepEquals, want, Commentf("test %q failed", name))
      -	}
      -}
      -
      -func (s *S) TestMergeStruct(c *C) {
      -	type Data struct {
      -		X, Y, R int
      -		Label   string
      -	}
      -	want := Data{1, 2, 10, "center/big"}
      -
      -	var m map[string]Data
      -	err := yaml.Unmarshal([]byte(mergeTests), &m)
      -	c.Assert(err, IsNil)
      -	for name, test := range m {
      -		if name == "anchors" {
      -			continue
      -		}
      -		c.Assert(test, Equals, want, Commentf("test %q failed", name))
      -	}
      -}
      -
      -var unmarshalNullTests = []func() interface{}{
      -	func() interface{} { var v interface{}; v = "v"; return &v },
      -	func() interface{} { var s = "s"; return &s },
      -	func() interface{} { var s = "s"; sptr := &s; return &sptr },
      -	func() interface{} { var i = 1; return &i },
      -	func() interface{} { var i = 1; iptr := &i; return &iptr },
      -	func() interface{} { m := map[string]int{"s": 1}; return &m },
      -	func() interface{} { m := map[string]int{"s": 1}; return m },
      -}
      -
      -func (s *S) TestUnmarshalNull(c *C) {
      -	for _, test := range unmarshalNullTests {
      -		item := test()
      -		zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface()
      -		err := yaml.Unmarshal([]byte("null"), item)
      -		c.Assert(err, IsNil)
      -		if reflect.TypeOf(item).Kind() == reflect.Map {
      -			c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface())
      -		} else {
      -			c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero)
      -		}
      -	}
      -}
      -
      -func (s *S) TestUnmarshalSliceOnPreset(c *C) {
      -	// Issue #48.
      -	v := struct{ A []int }{[]int{1}}
      -	yaml.Unmarshal([]byte("a: [2]"), &v)
      -	c.Assert(v.A, DeepEquals, []int{2})
      -}
      -
      -//var data []byte
      -//func init() {
      -//	var err error
      -//	data, err = ioutil.ReadFile("/tmp/file.yaml")
      -//	if err != nil {
      -//		panic(err)
      -//	}
      -//}
      -//
      -//func (s *S) BenchmarkUnmarshal(c *C) {
      -//	var err error
      -//	for i := 0; i < c.N; i++ {
      -//		var v map[string]interface{}
      -//		err = yaml.Unmarshal(data, &v)
      -//	}
      -//	if err != nil {
      -//		panic(err)
      -//	}
      -//}
      -//
      -//func (s *S) BenchmarkMarshal(c *C) {
      -//	var v map[string]interface{}
      -//	yaml.Unmarshal(data, &v)
      -//	c.ResetTimer()
      -//	for i := 0; i < c.N; i++ {
      -//		yaml.Marshal(&v)
      -//	}
      -//}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/emitterc.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/emitterc.go
      deleted file mode 100644
      index 9b3dc4a4..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/emitterc.go
      +++ /dev/null
      @@ -1,1685 +0,0 @@
      -package yaml
      -
      -import (
      -	"bytes"
      -)
      -
      -// Flush the buffer if needed.
      -func flush(emitter *yaml_emitter_t) bool {
      -	if emitter.buffer_pos+5 >= len(emitter.buffer) {
      -		return yaml_emitter_flush(emitter)
      -	}
      -	return true
      -}
      -
      -// Put a character to the output buffer.
      -func put(emitter *yaml_emitter_t, value byte) bool {
      -	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
      -		return false
      -	}
      -	emitter.buffer[emitter.buffer_pos] = value
      -	emitter.buffer_pos++
      -	emitter.column++
      -	return true
      -}
      -
      -// Put a line break to the output buffer.
      -func put_break(emitter *yaml_emitter_t) bool {
      -	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
      -		return false
      -	}
      -	switch emitter.line_break {
      -	case yaml_CR_BREAK:
      -		emitter.buffer[emitter.buffer_pos] = '\r'
      -		emitter.buffer_pos += 1
      -	case yaml_LN_BREAK:
      -		emitter.buffer[emitter.buffer_pos] = '\n'
      -		emitter.buffer_pos += 1
      -	case yaml_CRLN_BREAK:
      -		emitter.buffer[emitter.buffer_pos+0] = '\r'
      -		emitter.buffer[emitter.buffer_pos+1] = '\n'
      -		emitter.buffer_pos += 2
      -	default:
      -		panic("unknown line break setting")
      -	}
      -	emitter.column = 0
      -	emitter.line++
      -	return true
      -}
      -
      -// Copy a character from a string into buffer.
      -func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
      -	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
      -		return false
      -	}
      -	p := emitter.buffer_pos
      -	w := width(s[*i])
      -	switch w {
      -	case 4:
      -		emitter.buffer[p+3] = s[*i+3]
      -		fallthrough
      -	case 3:
      -		emitter.buffer[p+2] = s[*i+2]
      -		fallthrough
      -	case 2:
      -		emitter.buffer[p+1] = s[*i+1]
      -		fallthrough
      -	case 1:
      -		emitter.buffer[p+0] = s[*i+0]
      -	default:
      -		panic("unknown character width")
      -	}
      -	emitter.column++
      -	emitter.buffer_pos += w
      -	*i += w
      -	return true
      -}
      -
      -// Write a whole string into buffer.
      -func write_all(emitter *yaml_emitter_t, s []byte) bool {
      -	for i := 0; i < len(s); {
      -		if !write(emitter, s, &i) {
      -			return false
      -		}
      -	}
      -	return true
      -}
      -
      -// Copy a line break character from a string into buffer.
      -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
      -	if s[*i] == '\n' {
      -		if !put_break(emitter) {
      -			return false
      -		}
      -		*i++
      -	} else {
      -		if !write(emitter, s, i) {
      -			return false
      -		}
      -		emitter.column = 0
      -		emitter.line++
      -	}
      -	return true
      -}
      -
      -// Set an emitter error and return false.
      -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
      -	emitter.error = yaml_EMITTER_ERROR
      -	emitter.problem = problem
      -	return false
      -}
      -
      -// Emit an event.
      -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	emitter.events = append(emitter.events, *event)
      -	for !yaml_emitter_need_more_events(emitter) {
      -		event := &emitter.events[emitter.events_head]
      -		if !yaml_emitter_analyze_event(emitter, event) {
      -			return false
      -		}
      -		if !yaml_emitter_state_machine(emitter, event) {
      -			return false
      -		}
      -		yaml_event_delete(event)
      -		emitter.events_head++
      -	}
      -	return true
      -}
      -
      -// Check if we need to accumulate more events before emitting.
      -//
      -// We accumulate extra
      -//  - 1 event for DOCUMENT-START
      -//  - 2 events for SEQUENCE-START
      -//  - 3 events for MAPPING-START
      -//
      -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
      -	if emitter.events_head == len(emitter.events) {
      -		return true
      -	}
      -	var accumulate int
      -	switch emitter.events[emitter.events_head].typ {
      -	case yaml_DOCUMENT_START_EVENT:
      -		accumulate = 1
      -		break
      -	case yaml_SEQUENCE_START_EVENT:
      -		accumulate = 2
      -		break
      -	case yaml_MAPPING_START_EVENT:
      -		accumulate = 3
      -		break
      -	default:
      -		return false
      -	}
      -	if len(emitter.events)-emitter.events_head > accumulate {
      -		return false
      -	}
      -	var level int
      -	for i := emitter.events_head; i < len(emitter.events); i++ {
      -		switch emitter.events[i].typ {
      -		case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
      -			level++
      -		case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
      -			level--
      -		}
      -		if level == 0 {
      -			return false
      -		}
      -	}
      -	return true
      -}
      -
      -// Append a directive to the directives stack.
      -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
      -	for i := 0; i < len(emitter.tag_directives); i++ {
      -		if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
      -			if allow_duplicates {
      -				return true
      -			}
      -			return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
      -		}
      -	}
      -
      -	// [Go] Do we actually need to copy this given garbage collection
      -	// and the lack of deallocating destructors?
      -	tag_copy := yaml_tag_directive_t{
      -		handle: make([]byte, len(value.handle)),
      -		prefix: make([]byte, len(value.prefix)),
      -	}
      -	copy(tag_copy.handle, value.handle)
      -	copy(tag_copy.prefix, value.prefix)
      -	emitter.tag_directives = append(emitter.tag_directives, tag_copy)
      -	return true
      -}
      -
      -// Increase the indentation level.
      -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
      -	emitter.indents = append(emitter.indents, emitter.indent)
      -	if emitter.indent < 0 {
      -		if flow {
      -			emitter.indent = emitter.best_indent
      -		} else {
      -			emitter.indent = 0
      -		}
      -	} else if !indentless {
      -		emitter.indent += emitter.best_indent
      -	}
      -	return true
      -}
      -
      -// State dispatcher.
      -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	switch emitter.state {
      -	default:
      -	case yaml_EMIT_STREAM_START_STATE:
      -		return yaml_emitter_emit_stream_start(emitter, event)
      -
      -	case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
      -		return yaml_emitter_emit_document_start(emitter, event, true)
      -
      -	case yaml_EMIT_DOCUMENT_START_STATE:
      -		return yaml_emitter_emit_document_start(emitter, event, false)
      -
      -	case yaml_EMIT_DOCUMENT_CONTENT_STATE:
      -		return yaml_emitter_emit_document_content(emitter, event)
      -
      -	case yaml_EMIT_DOCUMENT_END_STATE:
      -		return yaml_emitter_emit_document_end(emitter, event)
      -
      -	case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
      -		return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
      -
      -	case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
      -		return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
      -
      -	case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
      -		return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
      -
      -	case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
      -		return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
      -
      -	case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
      -		return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
      -
      -	case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
      -		return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
      -
      -	case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
      -		return yaml_emitter_emit_block_sequence_item(emitter, event, true)
      -
      -	case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
      -		return yaml_emitter_emit_block_sequence_item(emitter, event, false)
      -
      -	case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
      -		return yaml_emitter_emit_block_mapping_key(emitter, event, true)
      -
      -	case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
      -		return yaml_emitter_emit_block_mapping_key(emitter, event, false)
      -
      -	case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
      -		return yaml_emitter_emit_block_mapping_value(emitter, event, true)
      -
      -	case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
      -		return yaml_emitter_emit_block_mapping_value(emitter, event, false)
      -
      -	case yaml_EMIT_END_STATE:
      -		return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
      -	}
      -	panic("invalid emitter state")
      -}
      -
      -// Expect STREAM-START.
      -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	if event.typ != yaml_STREAM_START_EVENT {
      -		return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
      -	}
      -	if emitter.encoding == yaml_ANY_ENCODING {
      -		emitter.encoding = event.encoding
      -		if emitter.encoding == yaml_ANY_ENCODING {
      -			emitter.encoding = yaml_UTF8_ENCODING
      -		}
      -	}
      -	if emitter.best_indent < 2 || emitter.best_indent > 9 {
      -		emitter.best_indent = 2
      -	}
      -	if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
      -		emitter.best_width = 80
      -	}
      -	if emitter.best_width < 0 {
      -		emitter.best_width = 1<<31 - 1
      -	}
      -	if emitter.line_break == yaml_ANY_BREAK {
      -		emitter.line_break = yaml_LN_BREAK
      -	}
      -
      -	emitter.indent = -1
      -	emitter.line = 0
      -	emitter.column = 0
      -	emitter.whitespace = true
      -	emitter.indention = true
      -
      -	if emitter.encoding != yaml_UTF8_ENCODING {
      -		if !yaml_emitter_write_bom(emitter) {
      -			return false
      -		}
      -	}
      -	emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
      -	return true
      -}
      -
      -// Expect DOCUMENT-START or STREAM-END.
      -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
      -
      -	if event.typ == yaml_DOCUMENT_START_EVENT {
      -
      -		if event.version_directive != nil {
      -			if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
      -				return false
      -			}
      -		}
      -
      -		for i := 0; i < len(event.tag_directives); i++ {
      -			tag_directive := &event.tag_directives[i]
      -			if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
      -				return false
      -			}
      -			if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
      -				return false
      -			}
      -		}
      -
      -		for i := 0; i < len(default_tag_directives); i++ {
      -			tag_directive := &default_tag_directives[i]
      -			if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
      -				return false
      -			}
      -		}
      -
      -		implicit := event.implicit
      -		if !first || emitter.canonical {
      -			implicit = false
      -		}
      -
      -		if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
      -			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
      -				return false
      -			}
      -			if !yaml_emitter_write_indent(emitter) {
      -				return false
      -			}
      -		}
      -
      -		if event.version_directive != nil {
      -			implicit = false
      -			if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
      -				return false
      -			}
      -			if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
      -				return false
      -			}
      -			if !yaml_emitter_write_indent(emitter) {
      -				return false
      -			}
      -		}
      -
      -		if len(event.tag_directives) > 0 {
      -			implicit = false
      -			for i := 0; i < len(event.tag_directives); i++ {
      -				tag_directive := &event.tag_directives[i]
      -				if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
      -					return false
      -				}
      -				if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
      -					return false
      -				}
      -				if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
      -					return false
      -				}
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -			}
      -		}
      -
      -		if yaml_emitter_check_empty_document(emitter) {
      -			implicit = false
      -		}
      -		if !implicit {
      -			if !yaml_emitter_write_indent(emitter) {
      -				return false
      -			}
      -			if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
      -				return false
      -			}
      -			if emitter.canonical {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -			}
      -		}
      -
      -		emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
      -		return true
      -	}
      -
      -	if event.typ == yaml_STREAM_END_EVENT {
      -		if emitter.open_ended {
      -			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
      -				return false
      -			}
      -			if !yaml_emitter_write_indent(emitter) {
      -				return false
      -			}
      -		}
      -		if !yaml_emitter_flush(emitter) {
      -			return false
      -		}
      -		emitter.state = yaml_EMIT_END_STATE
      -		return true
      -	}
      -
      -	return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
      -}
      -
      -// Expect the root node.
      -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
      -	return yaml_emitter_emit_node(emitter, event, true, false, false, false)
      -}
      -
      -// Expect DOCUMENT-END.
      -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	if event.typ != yaml_DOCUMENT_END_EVENT {
      -		return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
      -	}
      -	if !yaml_emitter_write_indent(emitter) {
      -		return false
      -	}
      -	if !event.implicit {
      -		// [Go] Allocate the slice elsewhere.
      -		if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
      -			return false
      -		}
      -		if !yaml_emitter_write_indent(emitter) {
      -			return false
      -		}
      -	}
      -	if !yaml_emitter_flush(emitter) {
      -		return false
      -	}
      -	emitter.state = yaml_EMIT_DOCUMENT_START_STATE
      -	emitter.tag_directives = emitter.tag_directives[:0]
      -	return true
      -}
      -
      -// Expect a flow item node.
      -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
      -	if first {
      -		if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
      -			return false
      -		}
      -		if !yaml_emitter_increase_indent(emitter, true, false) {
      -			return false
      -		}
      -		emitter.flow_level++
      -	}
      -
      -	if event.typ == yaml_SEQUENCE_END_EVENT {
      -		emitter.flow_level--
      -		emitter.indent = emitter.indents[len(emitter.indents)-1]
      -		emitter.indents = emitter.indents[:len(emitter.indents)-1]
      -		if emitter.canonical && !first {
      -			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
      -				return false
      -			}
      -			if !yaml_emitter_write_indent(emitter) {
      -				return false
      -			}
      -		}
      -		if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
      -			return false
      -		}
      -		emitter.state = emitter.states[len(emitter.states)-1]
      -		emitter.states = emitter.states[:len(emitter.states)-1]
      -
      -		return true
      -	}
      -
      -	if !first {
      -		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
      -			return false
      -		}
      -	}
      -
      -	if emitter.canonical || emitter.column > emitter.best_width {
      -		if !yaml_emitter_write_indent(emitter) {
      -			return false
      -		}
      -	}
      -	emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
      -	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
      -}
      -
      -// Expect a flow key node.
      -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
      -	if first {
      -		if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
      -			return false
      -		}
      -		if !yaml_emitter_increase_indent(emitter, true, false) {
      -			return false
      -		}
      -		emitter.flow_level++
      -	}
      -
      -	if event.typ == yaml_MAPPING_END_EVENT {
      -		emitter.flow_level--
      -		emitter.indent = emitter.indents[len(emitter.indents)-1]
      -		emitter.indents = emitter.indents[:len(emitter.indents)-1]
      -		if emitter.canonical && !first {
      -			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
      -				return false
      -			}
      -			if !yaml_emitter_write_indent(emitter) {
      -				return false
      -			}
      -		}
      -		if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
      -			return false
      -		}
      -		emitter.state = emitter.states[len(emitter.states)-1]
      -		emitter.states = emitter.states[:len(emitter.states)-1]
      -		return true
      -	}
      -
      -	if !first {
      -		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
      -			return false
      -		}
      -	}
      -	if emitter.canonical || emitter.column > emitter.best_width {
      -		if !yaml_emitter_write_indent(emitter) {
      -			return false
      -		}
      -	}
      -
      -	if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
      -		emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
      -		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
      -	}
      -	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
      -		return false
      -	}
      -	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
      -	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
      -}
      -
      -// Expect a flow value node.
      -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
      -	if simple {
      -		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
      -			return false
      -		}
      -	} else {
      -		if emitter.canonical || emitter.column > emitter.best_width {
      -			if !yaml_emitter_write_indent(emitter) {
      -				return false
      -			}
      -		}
      -		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
      -			return false
      -		}
      -	}
      -	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
      -	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
      -}
      -
      -// Expect a block item node.
      -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
      -	if first {
      -		if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
      -			return false
      -		}
      -	}
      -	if event.typ == yaml_SEQUENCE_END_EVENT {
      -		emitter.indent = emitter.indents[len(emitter.indents)-1]
      -		emitter.indents = emitter.indents[:len(emitter.indents)-1]
      -		emitter.state = emitter.states[len(emitter.states)-1]
      -		emitter.states = emitter.states[:len(emitter.states)-1]
      -		return true
      -	}
      -	if !yaml_emitter_write_indent(emitter) {
      -		return false
      -	}
      -	if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
      -		return false
      -	}
      -	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
      -	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
      -}
      -
      -// Expect a block key node.
      -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
      -	if first {
      -		if !yaml_emitter_increase_indent(emitter, false, false) {
      -			return false
      -		}
      -	}
      -	if event.typ == yaml_MAPPING_END_EVENT {
      -		emitter.indent = emitter.indents[len(emitter.indents)-1]
      -		emitter.indents = emitter.indents[:len(emitter.indents)-1]
      -		emitter.state = emitter.states[len(emitter.states)-1]
      -		emitter.states = emitter.states[:len(emitter.states)-1]
      -		return true
      -	}
      -	if !yaml_emitter_write_indent(emitter) {
      -		return false
      -	}
      -	if yaml_emitter_check_simple_key(emitter) {
      -		emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
      -		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
      -	}
      -	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
      -		return false
      -	}
      -	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
      -	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
      -}
      -
      -// Expect a block value node.
      -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
      -	if simple {
      -		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
      -			return false
      -		}
      -	} else {
      -		if !yaml_emitter_write_indent(emitter) {
      -			return false
      -		}
      -		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
      -			return false
      -		}
      -	}
      -	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
      -	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
      -}
      -
      -// Expect a node.
      -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
      -	root bool, sequence bool, mapping bool, simple_key bool) bool {
      -
      -	emitter.root_context = root
      -	emitter.sequence_context = sequence
      -	emitter.mapping_context = mapping
      -	emitter.simple_key_context = simple_key
      -
      -	switch event.typ {
      -	case yaml_ALIAS_EVENT:
      -		return yaml_emitter_emit_alias(emitter, event)
      -	case yaml_SCALAR_EVENT:
      -		return yaml_emitter_emit_scalar(emitter, event)
      -	case yaml_SEQUENCE_START_EVENT:
      -		return yaml_emitter_emit_sequence_start(emitter, event)
      -	case yaml_MAPPING_START_EVENT:
      -		return yaml_emitter_emit_mapping_start(emitter, event)
      -	default:
      -		return yaml_emitter_set_emitter_error(emitter,
      -			"expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
      -	}
      -	return false
      -}
      -
      -// Expect ALIAS.
      -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	if !yaml_emitter_process_anchor(emitter) {
      -		return false
      -	}
      -	emitter.state = emitter.states[len(emitter.states)-1]
      -	emitter.states = emitter.states[:len(emitter.states)-1]
      -	return true
      -}
      -
      -// Expect SCALAR.
      -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	if !yaml_emitter_select_scalar_style(emitter, event) {
      -		return false
      -	}
      -	if !yaml_emitter_process_anchor(emitter) {
      -		return false
      -	}
      -	if !yaml_emitter_process_tag(emitter) {
      -		return false
      -	}
      -	if !yaml_emitter_increase_indent(emitter, true, false) {
      -		return false
      -	}
      -	if !yaml_emitter_process_scalar(emitter) {
      -		return false
      -	}
      -	emitter.indent = emitter.indents[len(emitter.indents)-1]
      -	emitter.indents = emitter.indents[:len(emitter.indents)-1]
      -	emitter.state = emitter.states[len(emitter.states)-1]
      -	emitter.states = emitter.states[:len(emitter.states)-1]
      -	return true
      -}
      -
      -// Expect SEQUENCE-START.
      -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	if !yaml_emitter_process_anchor(emitter) {
      -		return false
      -	}
      -	if !yaml_emitter_process_tag(emitter) {
      -		return false
      -	}
      -	if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
      -		yaml_emitter_check_empty_sequence(emitter) {
      -		emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
      -	} else {
      -		emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
      -	}
      -	return true
      -}
      -
      -// Expect MAPPING-START.
      -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -	if !yaml_emitter_process_anchor(emitter) {
      -		return false
      -	}
      -	if !yaml_emitter_process_tag(emitter) {
      -		return false
      -	}
      -	if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
      -		yaml_emitter_check_empty_mapping(emitter) {
      -		emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
      -	} else {
      -		emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
      -	}
      -	return true
      -}
      -
      -// Check if the document content is an empty scalar.
      -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
      -	return false // [Go] Huh?
      -}
      -
      -// Check if the next events represent an empty sequence.
      -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
      -	if len(emitter.events)-emitter.events_head < 2 {
      -		return false
      -	}
      -	return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
      -		emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
      -}
      -
      -// Check if the next events represent an empty mapping.
      -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
      -	if len(emitter.events)-emitter.events_head < 2 {
      -		return false
      -	}
      -	return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
      -		emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
      -}
      -
      -// Check if the next node can be expressed as a simple key.
      -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
      -	length := 0
      -	switch emitter.events[emitter.events_head].typ {
      -	case yaml_ALIAS_EVENT:
      -		length += len(emitter.anchor_data.anchor)
      -	case yaml_SCALAR_EVENT:
      -		if emitter.scalar_data.multiline {
      -			return false
      -		}
      -		length += len(emitter.anchor_data.anchor) +
      -			len(emitter.tag_data.handle) +
      -			len(emitter.tag_data.suffix) +
      -			len(emitter.scalar_data.value)
      -	case yaml_SEQUENCE_START_EVENT:
      -		if !yaml_emitter_check_empty_sequence(emitter) {
      -			return false
      -		}
      -		length += len(emitter.anchor_data.anchor) +
      -			len(emitter.tag_data.handle) +
      -			len(emitter.tag_data.suffix)
      -	case yaml_MAPPING_START_EVENT:
      -		if !yaml_emitter_check_empty_mapping(emitter) {
      -			return false
      -		}
      -		length += len(emitter.anchor_data.anchor) +
      -			len(emitter.tag_data.handle) +
      -			len(emitter.tag_data.suffix)
      -	default:
      -		return false
      -	}
      -	return length <= 128
      -}
      -
      -// Determine an acceptable scalar style.
      -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -
      -	no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
      -	if no_tag && !event.implicit && !event.quoted_implicit {
      -		return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
      -	}
      -
      -	style := event.scalar_style()
      -	if style == yaml_ANY_SCALAR_STYLE {
      -		style = yaml_PLAIN_SCALAR_STYLE
      -	}
      -	if emitter.canonical {
      -		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
      -	}
      -	if emitter.simple_key_context && emitter.scalar_data.multiline {
      -		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
      -	}
      -
      -	if style == yaml_PLAIN_SCALAR_STYLE {
      -		if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
      -			emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
      -			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
      -		}
      -		if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
      -			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
      -		}
      -		if no_tag && !event.implicit {
      -			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
      -		}
      -	}
      -	if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
      -		if !emitter.scalar_data.single_quoted_allowed {
      -			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
      -		}
      -	}
      -	if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
      -		if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
      -			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
      -		}
      -	}
      -
      -	if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
      -		emitter.tag_data.handle = []byte{'!'}
      -	}
      -	emitter.scalar_data.style = style
      -	return true
      -}
      -
      -// Write an achor.
      -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
      -	if emitter.anchor_data.anchor == nil {
      -		return true
      -	}
      -	c := []byte{'&'}
      -	if emitter.anchor_data.alias {
      -		c[0] = '*'
      -	}
      -	if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
      -		return false
      -	}
      -	return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
      -}
      -
      -// Write a tag.
      -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
      -	if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
      -		return true
      -	}
      -	if len(emitter.tag_data.handle) > 0 {
      -		if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
      -			return false
      -		}
      -		if len(emitter.tag_data.suffix) > 0 {
      -			if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
      -				return false
      -			}
      -		}
      -	} else {
      -		// [Go] Allocate these slices elsewhere.
      -		if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
      -			return false
      -		}
      -		if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
      -			return false
      -		}
      -		if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
      -			return false
      -		}
      -	}
      -	return true
      -}
      -
      -// Write a scalar.
      -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
      -	switch emitter.scalar_data.style {
      -	case yaml_PLAIN_SCALAR_STYLE:
      -		return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
      -
      -	case yaml_SINGLE_QUOTED_SCALAR_STYLE:
      -		return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
      -
      -	case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
      -		return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
      -
      -	case yaml_LITERAL_SCALAR_STYLE:
      -		return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
      -
      -	case yaml_FOLDED_SCALAR_STYLE:
      -		return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
      -	}
      -	panic("unknown scalar style")
      -}
      -
      -// Check if a %YAML directive is valid.
      -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
      -	if version_directive.major != 1 || version_directive.minor != 1 {
      -		return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
      -	}
      -	return true
      -}
      -
      -// Check if a %TAG directive is valid.
      -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
      -	handle := tag_directive.handle
      -	prefix := tag_directive.prefix
      -	if len(handle) == 0 {
      -		return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
      -	}
      -	if handle[0] != '!' {
      -		return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
      -	}
      -	if handle[len(handle)-1] != '!' {
      -		return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
      -	}
      -	for i := 1; i < len(handle)-1; i += width(handle[i]) {
      -		if !is_alpha(handle, i) {
      -			return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
      -		}
      -	}
      -	if len(prefix) == 0 {
      -		return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
      -	}
      -	return true
      -}
      -
      -// Check if an anchor is valid.
      -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
      -	if len(anchor) == 0 {
      -		problem := "anchor value must not be empty"
      -		if alias {
      -			problem = "alias value must not be empty"
      -		}
      -		return yaml_emitter_set_emitter_error(emitter, problem)
      -	}
      -	for i := 0; i < len(anchor); i += width(anchor[i]) {
      -		if !is_alpha(anchor, i) {
      -			problem := "anchor value must contain alphanumerical characters only"
      -			if alias {
      -				problem = "alias value must contain alphanumerical characters only"
      -			}
      -			return yaml_emitter_set_emitter_error(emitter, problem)
      -		}
      -	}
      -	emitter.anchor_data.anchor = anchor
      -	emitter.anchor_data.alias = alias
      -	return true
      -}
      -
      -// Check if a tag is valid.
      -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
      -	if len(tag) == 0 {
      -		return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
      -	}
      -	for i := 0; i < len(emitter.tag_directives); i++ {
      -		tag_directive := &emitter.tag_directives[i]
      -		if bytes.HasPrefix(tag, tag_directive.prefix) {
      -			emitter.tag_data.handle = tag_directive.handle
      -			emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
      -			return true
      -		}
      -	}
      -	emitter.tag_data.suffix = tag
      -	return true
      -}
      -
      -// Check if a scalar is valid.
      -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
      -	var (
      -		block_indicators   = false
      -		flow_indicators    = false
      -		line_breaks        = false
      -		special_characters = false
      -
      -		leading_space  = false
      -		leading_break  = false
      -		trailing_space = false
      -		trailing_break = false
      -		break_space    = false
      -		space_break    = false
      -
      -		preceeded_by_whitespace = false
      -		followed_by_whitespace  = false
      -		previous_space          = false
      -		previous_break          = false
      -	)
      -
      -	emitter.scalar_data.value = value
      -
      -	if len(value) == 0 {
      -		emitter.scalar_data.multiline = false
      -		emitter.scalar_data.flow_plain_allowed = false
      -		emitter.scalar_data.block_plain_allowed = true
      -		emitter.scalar_data.single_quoted_allowed = true
      -		emitter.scalar_data.block_allowed = false
      -		return true
      -	}
      -
      -	if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
      -		block_indicators = true
      -		flow_indicators = true
      -	}
      -
      -	preceeded_by_whitespace = true
      -	for i, w := 0, 0; i < len(value); i += w {
      -		w = width(value[0])
      -		followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
      -
      -		if i == 0 {
      -			switch value[i] {
      -			case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
      -				flow_indicators = true
      -				block_indicators = true
      -			case '?', ':':
      -				flow_indicators = true
      -				if followed_by_whitespace {
      -					block_indicators = true
      -				}
      -			case '-':
      -				if followed_by_whitespace {
      -					flow_indicators = true
      -					block_indicators = true
      -				}
      -			}
      -		} else {
      -			switch value[i] {
      -			case ',', '?', '[', ']', '{', '}':
      -				flow_indicators = true
      -			case ':':
      -				flow_indicators = true
      -				if followed_by_whitespace {
      -					block_indicators = true
      -				}
      -			case '#':
      -				if preceeded_by_whitespace {
      -					flow_indicators = true
      -					block_indicators = true
      -				}
      -			}
      -		}
      -
      -		if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
      -			special_characters = true
      -		}
      -		if is_space(value, i) {
      -			if i == 0 {
      -				leading_space = true
      -			}
      -			if i+width(value[i]) == len(value) {
      -				trailing_space = true
      -			}
      -			if previous_break {
      -				break_space = true
      -			}
      -			previous_space = true
      -			previous_break = false
      -		} else if is_break(value, i) {
      -			line_breaks = true
      -			if i == 0 {
      -				leading_break = true
      -			}
      -			if i+width(value[i]) == len(value) {
      -				trailing_break = true
      -			}
      -			if previous_space {
      -				space_break = true
      -			}
      -			previous_space = false
      -			previous_break = true
      -		} else {
      -			previous_space = false
      -			previous_break = false
      -		}
      -
      -		// [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
      -		preceeded_by_whitespace = is_blankz(value, i)
      -	}
      -
      -	emitter.scalar_data.multiline = line_breaks
      -	emitter.scalar_data.flow_plain_allowed = true
      -	emitter.scalar_data.block_plain_allowed = true
      -	emitter.scalar_data.single_quoted_allowed = true
      -	emitter.scalar_data.block_allowed = true
      -
      -	if leading_space || leading_break || trailing_space || trailing_break {
      -		emitter.scalar_data.flow_plain_allowed = false
      -		emitter.scalar_data.block_plain_allowed = false
      -	}
      -	if trailing_space {
      -		emitter.scalar_data.block_allowed = false
      -	}
      -	if break_space {
      -		emitter.scalar_data.flow_plain_allowed = false
      -		emitter.scalar_data.block_plain_allowed = false
      -		emitter.scalar_data.single_quoted_allowed = false
      -	}
      -	if space_break || special_characters {
      -		emitter.scalar_data.flow_plain_allowed = false
      -		emitter.scalar_data.block_plain_allowed = false
      -		emitter.scalar_data.single_quoted_allowed = false
      -		emitter.scalar_data.block_allowed = false
      -	}
      -	if line_breaks {
      -		emitter.scalar_data.flow_plain_allowed = false
      -		emitter.scalar_data.block_plain_allowed = false
      -	}
      -	if flow_indicators {
      -		emitter.scalar_data.flow_plain_allowed = false
      -	}
      -	if block_indicators {
      -		emitter.scalar_data.block_plain_allowed = false
      -	}
      -	return true
      -}
      -
      -// Check if the event data is valid.
      -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
      -
      -	emitter.anchor_data.anchor = nil
      -	emitter.tag_data.handle = nil
      -	emitter.tag_data.suffix = nil
      -	emitter.scalar_data.value = nil
      -
      -	switch event.typ {
      -	case yaml_ALIAS_EVENT:
      -		if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
      -			return false
      -		}
      -
      -	case yaml_SCALAR_EVENT:
      -		if len(event.anchor) > 0 {
      -			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
      -				return false
      -			}
      -		}
      -		if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
      -			if !yaml_emitter_analyze_tag(emitter, event.tag) {
      -				return false
      -			}
      -		}
      -		if !yaml_emitter_analyze_scalar(emitter, event.value) {
      -			return false
      -		}
      -
      -	case yaml_SEQUENCE_START_EVENT:
      -		if len(event.anchor) > 0 {
      -			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
      -				return false
      -			}
      -		}
      -		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
      -			if !yaml_emitter_analyze_tag(emitter, event.tag) {
      -				return false
      -			}
      -		}
      -
      -	case yaml_MAPPING_START_EVENT:
      -		if len(event.anchor) > 0 {
      -			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
      -				return false
      -			}
      -		}
      -		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
      -			if !yaml_emitter_analyze_tag(emitter, event.tag) {
      -				return false
      -			}
      -		}
      -	}
      -	return true
      -}
      -
      -// Write the BOM character.
      -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
      -	if !flush(emitter) {
      -		return false
      -	}
      -	pos := emitter.buffer_pos
      -	emitter.buffer[pos+0] = '\xEF'
      -	emitter.buffer[pos+1] = '\xBB'
      -	emitter.buffer[pos+2] = '\xBF'
      -	emitter.buffer_pos += 3
      -	return true
      -}
      -
      -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
      -	indent := emitter.indent
      -	if indent < 0 {
      -		indent = 0
      -	}
      -	if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
      -		if !put_break(emitter) {
      -			return false
      -		}
      -	}
      -	for emitter.column < indent {
      -		if !put(emitter, ' ') {
      -			return false
      -		}
      -	}
      -	emitter.whitespace = true
      -	emitter.indention = true
      -	return true
      -}
      -
      -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
      -	if need_whitespace && !emitter.whitespace {
      -		if !put(emitter, ' ') {
      -			return false
      -		}
      -	}
      -	if !write_all(emitter, indicator) {
      -		return false
      -	}
      -	emitter.whitespace = is_whitespace
      -	emitter.indention = (emitter.indention && is_indention)
      -	emitter.open_ended = false
      -	return true
      -}
      -
      -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
      -	if !write_all(emitter, value) {
      -		return false
      -	}
      -	emitter.whitespace = false
      -	emitter.indention = false
      -	return true
      -}
      -
      -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
      -	if !emitter.whitespace {
      -		if !put(emitter, ' ') {
      -			return false
      -		}
      -	}
      -	if !write_all(emitter, value) {
      -		return false
      -	}
      -	emitter.whitespace = false
      -	emitter.indention = false
      -	return true
      -}
      -
      -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
      -	if need_whitespace && !emitter.whitespace {
      -		if !put(emitter, ' ') {
      -			return false
      -		}
      -	}
      -	for i := 0; i < len(value); {
      -		var must_write bool
      -		switch value[i] {
      -		case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
      -			must_write = true
      -		default:
      -			must_write = is_alpha(value, i)
      -		}
      -		if must_write {
      -			if !write(emitter, value, &i) {
      -				return false
      -			}
      -		} else {
      -			w := width(value[i])
      -			for k := 0; k < w; k++ {
      -				octet := value[i]
      -				i++
      -				if !put(emitter, '%') {
      -					return false
      -				}
      -
      -				c := octet >> 4
      -				if c < 10 {
      -					c += '0'
      -				} else {
      -					c += 'A' - 10
      -				}
      -				if !put(emitter, c) {
      -					return false
      -				}
      -
      -				c = octet & 0x0f
      -				if c < 10 {
      -					c += '0'
      -				} else {
      -					c += 'A' - 10
      -				}
      -				if !put(emitter, c) {
      -					return false
      -				}
      -			}
      -		}
      -	}
      -	emitter.whitespace = false
      -	emitter.indention = false
      -	return true
      -}
      -
      -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
      -	if !emitter.whitespace {
      -		if !put(emitter, ' ') {
      -			return false
      -		}
      -	}
      -
      -	spaces := false
      -	breaks := false
      -	for i := 0; i < len(value); {
      -		if is_space(value, i) {
      -			if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -				i += width(value[i])
      -			} else {
      -				if !write(emitter, value, &i) {
      -					return false
      -				}
      -			}
      -			spaces = true
      -		} else if is_break(value, i) {
      -			if !breaks && value[i] == '\n' {
      -				if !put_break(emitter) {
      -					return false
      -				}
      -			}
      -			if !write_break(emitter, value, &i) {
      -				return false
      -			}
      -			emitter.indention = true
      -			breaks = true
      -		} else {
      -			if breaks {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -			}
      -			if !write(emitter, value, &i) {
      -				return false
      -			}
      -			emitter.indention = false
      -			spaces = false
      -			breaks = false
      -		}
      -	}
      -
      -	emitter.whitespace = false
      -	emitter.indention = false
      -	if emitter.root_context {
      -		emitter.open_ended = true
      -	}
      -
      -	return true
      -}
      -
      -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
      -
      -	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
      -		return false
      -	}
      -
      -	spaces := false
      -	breaks := false
      -	for i := 0; i < len(value); {
      -		if is_space(value, i) {
      -			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -				i += width(value[i])
      -			} else {
      -				if !write(emitter, value, &i) {
      -					return false
      -				}
      -			}
      -			spaces = true
      -		} else if is_break(value, i) {
      -			if !breaks && value[i] == '\n' {
      -				if !put_break(emitter) {
      -					return false
      -				}
      -			}
      -			if !write_break(emitter, value, &i) {
      -				return false
      -			}
      -			emitter.indention = true
      -			breaks = true
      -		} else {
      -			if breaks {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -			}
      -			if value[i] == '\'' {
      -				if !put(emitter, '\'') {
      -					return false
      -				}
      -			}
      -			if !write(emitter, value, &i) {
      -				return false
      -			}
      -			emitter.indention = false
      -			spaces = false
      -			breaks = false
      -		}
      -	}
      -	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
      -		return false
      -	}
      -	emitter.whitespace = false
      -	emitter.indention = false
      -	return true
      -}
      -
      -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
      -	spaces := false
      -	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
      -		return false
      -	}
      -
      -	for i := 0; i < len(value); {
      -		if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
      -			is_bom(value, i) || is_break(value, i) ||
      -			value[i] == '"' || value[i] == '\\' {
      -
      -			octet := value[i]
      -
      -			var w int
      -			var v rune
      -			switch {
      -			case octet&0x80 == 0x00:
      -				w, v = 1, rune(octet&0x7F)
      -			case octet&0xE0 == 0xC0:
      -				w, v = 2, rune(octet&0x1F)
      -			case octet&0xF0 == 0xE0:
      -				w, v = 3, rune(octet&0x0F)
      -			case octet&0xF8 == 0xF0:
      -				w, v = 4, rune(octet&0x07)
      -			}
      -			for k := 1; k < w; k++ {
      -				octet = value[i+k]
      -				v = (v << 6) + (rune(octet) & 0x3F)
      -			}
      -			i += w
      -
      -			if !put(emitter, '\\') {
      -				return false
      -			}
      -
      -			var ok bool
      -			switch v {
      -			case 0x00:
      -				ok = put(emitter, '0')
      -			case 0x07:
      -				ok = put(emitter, 'a')
      -			case 0x08:
      -				ok = put(emitter, 'b')
      -			case 0x09:
      -				ok = put(emitter, 't')
      -			case 0x0A:
      -				ok = put(emitter, 'n')
      -			case 0x0b:
      -				ok = put(emitter, 'v')
      -			case 0x0c:
      -				ok = put(emitter, 'f')
      -			case 0x0d:
      -				ok = put(emitter, 'r')
      -			case 0x1b:
      -				ok = put(emitter, 'e')
      -			case 0x22:
      -				ok = put(emitter, '"')
      -			case 0x5c:
      -				ok = put(emitter, '\\')
      -			case 0x85:
      -				ok = put(emitter, 'N')
      -			case 0xA0:
      -				ok = put(emitter, '_')
      -			case 0x2028:
      -				ok = put(emitter, 'L')
      -			case 0x2029:
      -				ok = put(emitter, 'P')
      -			default:
      -				if v <= 0xFF {
      -					ok = put(emitter, 'x')
      -					w = 2
      -				} else if v <= 0xFFFF {
      -					ok = put(emitter, 'u')
      -					w = 4
      -				} else {
      -					ok = put(emitter, 'U')
      -					w = 8
      -				}
      -				for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
      -					digit := byte((v >> uint(k)) & 0x0F)
      -					if digit < 10 {
      -						ok = put(emitter, digit+'0')
      -					} else {
      -						ok = put(emitter, digit+'A'-10)
      -					}
      -				}
      -			}
      -			if !ok {
      -				return false
      -			}
      -			spaces = false
      -		} else if is_space(value, i) {
      -			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -				if is_space(value, i+1) {
      -					if !put(emitter, '\\') {
      -						return false
      -					}
      -				}
      -				i += width(value[i])
      -			} else if !write(emitter, value, &i) {
      -				return false
      -			}
      -			spaces = true
      -		} else {
      -			if !write(emitter, value, &i) {
      -				return false
      -			}
      -			spaces = false
      -		}
      -	}
      -	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
      -		return false
      -	}
      -	emitter.whitespace = false
      -	emitter.indention = false
      -	return true
      -}
      -
      -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
      -	if is_space(value, 0) || is_break(value, 0) {
      -		indent_hint := []byte{'0' + byte(emitter.best_indent)}
      -		if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
      -			return false
      -		}
      -	}
      -
      -	emitter.open_ended = false
      -
      -	var chomp_hint [1]byte
      -	if len(value) == 0 {
      -		chomp_hint[0] = '-'
      -	} else {
      -		i := len(value) - 1
      -		for value[i]&0xC0 == 0x80 {
      -			i--
      -		}
      -		if !is_break(value, i) {
      -			chomp_hint[0] = '-'
      -		} else if i == 0 {
      -			chomp_hint[0] = '+'
      -			emitter.open_ended = true
      -		} else {
      -			i--
      -			for value[i]&0xC0 == 0x80 {
      -				i--
      -			}
      -			if is_break(value, i) {
      -				chomp_hint[0] = '+'
      -				emitter.open_ended = true
      -			}
      -		}
      -	}
      -	if chomp_hint[0] != 0 {
      -		if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
      -			return false
      -		}
      -	}
      -	return true
      -}
      -
      -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
      -	if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
      -		return false
      -	}
      -	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
      -		return false
      -	}
      -	if !put_break(emitter) {
      -		return false
      -	}
      -	emitter.indention = true
      -	emitter.whitespace = true
      -	breaks := true
      -	for i := 0; i < len(value); {
      -		if is_break(value, i) {
      -			if !write_break(emitter, value, &i) {
      -				return false
      -			}
      -			emitter.indention = true
      -			breaks = true
      -		} else {
      -			if breaks {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -			}
      -			if !write(emitter, value, &i) {
      -				return false
      -			}
      -			emitter.indention = false
      -			breaks = false
      -		}
      -	}
      -
      -	return true
      -}
      -
      -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
      -	if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
      -		return false
      -	}
      -	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
      -		return false
      -	}
      -
      -	if !put_break(emitter) {
      -		return false
      -	}
      -	emitter.indention = true
      -	emitter.whitespace = true
      -
      -	breaks := true
      -	leading_spaces := true
      -	for i := 0; i < len(value); {
      -		if is_break(value, i) {
      -			if !breaks && !leading_spaces && value[i] == '\n' {
      -				k := 0
      -				for is_break(value, k) {
      -					k += width(value[k])
      -				}
      -				if !is_blankz(value, k) {
      -					if !put_break(emitter) {
      -						return false
      -					}
      -				}
      -			}
      -			if !write_break(emitter, value, &i) {
      -				return false
      -			}
      -			emitter.indention = true
      -			breaks = true
      -		} else {
      -			if breaks {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -				leading_spaces = is_blank(value, i)
      -			}
      -			if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
      -				if !yaml_emitter_write_indent(emitter) {
      -					return false
      -				}
      -				i += width(value[i])
      -			} else {
      -				if !write(emitter, value, &i) {
      -					return false
      -				}
      -			}
      -			emitter.indention = false
      -			breaks = false
      -		}
      -	}
      -	return true
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/encode.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/encode.go
      deleted file mode 100644
      index 84f84995..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/encode.go
      +++ /dev/null
      @@ -1,306 +0,0 @@
      -package yaml
      -
      -import (
      -	"encoding"
      -	"fmt"
      -	"reflect"
      -	"regexp"
      -	"sort"
      -	"strconv"
      -	"strings"
      -	"time"
      -)
      -
      -type encoder struct {
      -	emitter yaml_emitter_t
      -	event   yaml_event_t
      -	out     []byte
      -	flow    bool
      -}
      -
      -func newEncoder() (e *encoder) {
      -	e = &encoder{}
      -	e.must(yaml_emitter_initialize(&e.emitter))
      -	yaml_emitter_set_output_string(&e.emitter, &e.out)
      -	yaml_emitter_set_unicode(&e.emitter, true)
      -	e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
      -	e.emit()
      -	e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
      -	e.emit()
      -	return e
      -}
      -
      -func (e *encoder) finish() {
      -	e.must(yaml_document_end_event_initialize(&e.event, true))
      -	e.emit()
      -	e.emitter.open_ended = false
      -	e.must(yaml_stream_end_event_initialize(&e.event))
      -	e.emit()
      -}
      -
      -func (e *encoder) destroy() {
      -	yaml_emitter_delete(&e.emitter)
      -}
      -
      -func (e *encoder) emit() {
      -	// This will internally delete the e.event value.
      -	if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
      -		e.must(false)
      -	}
      -}
      -
      -func (e *encoder) must(ok bool) {
      -	if !ok {
      -		msg := e.emitter.problem
      -		if msg == "" {
      -			msg = "unknown problem generating YAML content"
      -		}
      -		failf("%s", msg)
      -	}
      -}
      -
      -func (e *encoder) marshal(tag string, in reflect.Value) {
      -	if !in.IsValid() {
      -		e.nilv()
      -		return
      -	}
      -	iface := in.Interface()
      -	if m, ok := iface.(Marshaler); ok {
      -		v, err := m.MarshalYAML()
      -		if err != nil {
      -			fail(err)
      -		}
      -		if v == nil {
      -			e.nilv()
      -			return
      -		}
      -		in = reflect.ValueOf(v)
      -	} else if m, ok := iface.(encoding.TextMarshaler); ok {
      -		text, err := m.MarshalText()
      -		if err != nil {
      -			fail(err)
      -		}
      -		in = reflect.ValueOf(string(text))
      -	}
      -	switch in.Kind() {
      -	case reflect.Interface:
      -		if in.IsNil() {
      -			e.nilv()
      -		} else {
      -			e.marshal(tag, in.Elem())
      -		}
      -	case reflect.Map:
      -		e.mapv(tag, in)
      -	case reflect.Ptr:
      -		if in.IsNil() {
      -			e.nilv()
      -		} else {
      -			e.marshal(tag, in.Elem())
      -		}
      -	case reflect.Struct:
      -		e.structv(tag, in)
      -	case reflect.Slice:
      -		if in.Type().Elem() == mapItemType {
      -			e.itemsv(tag, in)
      -		} else {
      -			e.slicev(tag, in)
      -		}
      -	case reflect.String:
      -		e.stringv(tag, in)
      -	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
      -		if in.Type() == durationType {
      -			e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
      -		} else {
      -			e.intv(tag, in)
      -		}
      -	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
      -		e.uintv(tag, in)
      -	case reflect.Float32, reflect.Float64:
      -		e.floatv(tag, in)
      -	case reflect.Bool:
      -		e.boolv(tag, in)
      -	default:
      -		panic("cannot marshal type: " + in.Type().String())
      -	}
      -}
      -
      -func (e *encoder) mapv(tag string, in reflect.Value) {
      -	e.mappingv(tag, func() {
      -		keys := keyList(in.MapKeys())
      -		sort.Sort(keys)
      -		for _, k := range keys {
      -			e.marshal("", k)
      -			e.marshal("", in.MapIndex(k))
      -		}
      -	})
      -}
      -
      -func (e *encoder) itemsv(tag string, in reflect.Value) {
      -	e.mappingv(tag, func() {
      -		slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
      -		for _, item := range slice {
      -			e.marshal("", reflect.ValueOf(item.Key))
      -			e.marshal("", reflect.ValueOf(item.Value))
      -		}
      -	})
      -}
      -
      -func (e *encoder) structv(tag string, in reflect.Value) {
      -	sinfo, err := getStructInfo(in.Type())
      -	if err != nil {
      -		panic(err)
      -	}
      -	e.mappingv(tag, func() {
      -		for _, info := range sinfo.FieldsList {
      -			var value reflect.Value
      -			if info.Inline == nil {
      -				value = in.Field(info.Num)
      -			} else {
      -				value = in.FieldByIndex(info.Inline)
      -			}
      -			if info.OmitEmpty && isZero(value) {
      -				continue
      -			}
      -			e.marshal("", reflect.ValueOf(info.Key))
      -			e.flow = info.Flow
      -			e.marshal("", value)
      -		}
      -		if sinfo.InlineMap >= 0 {
      -			m := in.Field(sinfo.InlineMap)
      -			if m.Len() > 0 {
      -				e.flow = false
      -				keys := keyList(m.MapKeys())
      -				sort.Sort(keys)
      -				for _, k := range keys {
      -					if _, found := sinfo.FieldsMap[k.String()]; found {
      -						panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
      -					}
      -					e.marshal("", k)
      -					e.flow = false
      -					e.marshal("", m.MapIndex(k))
      -				}
      -			}
      -		}
      -	})
      -}
      -
      -func (e *encoder) mappingv(tag string, f func()) {
      -	implicit := tag == ""
      -	style := yaml_BLOCK_MAPPING_STYLE
      -	if e.flow {
      -		e.flow = false
      -		style = yaml_FLOW_MAPPING_STYLE
      -	}
      -	e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
      -	e.emit()
      -	f()
      -	e.must(yaml_mapping_end_event_initialize(&e.event))
      -	e.emit()
      -}
      -
      -func (e *encoder) slicev(tag string, in reflect.Value) {
      -	implicit := tag == ""
      -	style := yaml_BLOCK_SEQUENCE_STYLE
      -	if e.flow {
      -		e.flow = false
      -		style = yaml_FLOW_SEQUENCE_STYLE
      -	}
      -	e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
      -	e.emit()
      -	n := in.Len()
      -	for i := 0; i < n; i++ {
      -		e.marshal("", in.Index(i))
      -	}
      -	e.must(yaml_sequence_end_event_initialize(&e.event))
      -	e.emit()
      -}
      -
      -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
      -//
      -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
      -// in YAML 1.2 and by this package, but these should be marshalled quoted for
      -// the time being for compatibility with other parsers.
      -func isBase60Float(s string) (result bool) {
      -	// Fast path.
      -	if s == "" {
      -		return false
      -	}
      -	c := s[0]
      -	if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
      -		return false
      -	}
      -	// Do the full match.
      -	return base60float.MatchString(s)
      -}
      -
      -// From http://yaml.org/type/float.html, except the regular expression there
      -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
      -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
      -
      -func (e *encoder) stringv(tag string, in reflect.Value) {
      -	var style yaml_scalar_style_t
      -	s := in.String()
      -	rtag, rs := resolve("", s)
      -	if rtag == yaml_BINARY_TAG {
      -		if tag == "" || tag == yaml_STR_TAG {
      -			tag = rtag
      -			s = rs.(string)
      -		} else if tag == yaml_BINARY_TAG {
      -			failf("explicitly tagged !!binary data must be base64-encoded")
      -		} else {
      -			failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
      -		}
      -	}
      -	if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
      -		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
      -	} else if strings.Contains(s, "\n") {
      -		style = yaml_LITERAL_SCALAR_STYLE
      -	} else {
      -		style = yaml_PLAIN_SCALAR_STYLE
      -	}
      -	e.emitScalar(s, "", tag, style)
      -}
      -
      -func (e *encoder) boolv(tag string, in reflect.Value) {
      -	var s string
      -	if in.Bool() {
      -		s = "true"
      -	} else {
      -		s = "false"
      -	}
      -	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
      -}
      -
      -func (e *encoder) intv(tag string, in reflect.Value) {
      -	s := strconv.FormatInt(in.Int(), 10)
      -	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
      -}
      -
      -func (e *encoder) uintv(tag string, in reflect.Value) {
      -	s := strconv.FormatUint(in.Uint(), 10)
      -	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
      -}
      -
      -func (e *encoder) floatv(tag string, in reflect.Value) {
      -	// FIXME: Handle 64 bits here.
      -	s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
      -	switch s {
      -	case "+Inf":
      -		s = ".inf"
      -	case "-Inf":
      -		s = "-.inf"
      -	case "NaN":
      -		s = ".nan"
      -	}
      -	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
      -}
      -
      -func (e *encoder) nilv() {
      -	e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
      -}
      -
      -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
      -	implicit := tag == ""
      -	e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
      -	e.emit()
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/encode_test.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/encode_test.go
      deleted file mode 100644
      index ba68ad29..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/encode_test.go
      +++ /dev/null
      @@ -1,485 +0,0 @@
      -package yaml_test
      -
      -import (
      -	"fmt"
      -	"math"
      -	"strconv"
      -	"strings"
      -	"time"
      -
      -	. "gopkg.in/check.v1"
      -	"gopkg.in/yaml.v2"
      -	"net"
      -	"os"
      -)
      -
      -var marshalIntTest = 123
      -
      -var marshalTests = []struct {
      -	value interface{}
      -	data  string
      -}{
      -	{
      -		nil,
      -		"null\n",
      -	}, {
      -		&struct{}{},
      -		"{}\n",
      -	}, {
      -		map[string]string{"v": "hi"},
      -		"v: hi\n",
      -	}, {
      -		map[string]interface{}{"v": "hi"},
      -		"v: hi\n",
      -	}, {
      -		map[string]string{"v": "true"},
      -		"v: \"true\"\n",
      -	}, {
      -		map[string]string{"v": "false"},
      -		"v: \"false\"\n",
      -	}, {
      -		map[string]interface{}{"v": true},
      -		"v: true\n",
      -	}, {
      -		map[string]interface{}{"v": false},
      -		"v: false\n",
      -	}, {
      -		map[string]interface{}{"v": 10},
      -		"v: 10\n",
      -	}, {
      -		map[string]interface{}{"v": -10},
      -		"v: -10\n",
      -	}, {
      -		map[string]uint{"v": 42},
      -		"v: 42\n",
      -	}, {
      -		map[string]interface{}{"v": int64(4294967296)},
      -		"v: 4294967296\n",
      -	}, {
      -		map[string]int64{"v": int64(4294967296)},
      -		"v: 4294967296\n",
      -	}, {
      -		map[string]uint64{"v": 4294967296},
      -		"v: 4294967296\n",
      -	}, {
      -		map[string]interface{}{"v": "10"},
      -		"v: \"10\"\n",
      -	}, {
      -		map[string]interface{}{"v": 0.1},
      -		"v: 0.1\n",
      -	}, {
      -		map[string]interface{}{"v": float64(0.1)},
      -		"v: 0.1\n",
      -	}, {
      -		map[string]interface{}{"v": -0.1},
      -		"v: -0.1\n",
      -	}, {
      -		map[string]interface{}{"v": math.Inf(+1)},
      -		"v: .inf\n",
      -	}, {
      -		map[string]interface{}{"v": math.Inf(-1)},
      -		"v: -.inf\n",
      -	}, {
      -		map[string]interface{}{"v": math.NaN()},
      -		"v: .nan\n",
      -	}, {
      -		map[string]interface{}{"v": nil},
      -		"v: null\n",
      -	}, {
      -		map[string]interface{}{"v": ""},
      -		"v: \"\"\n",
      -	}, {
      -		map[string][]string{"v": []string{"A", "B"}},
      -		"v:\n- A\n- B\n",
      -	}, {
      -		map[string][]string{"v": []string{"A", "B\nC"}},
      -		"v:\n- A\n- |-\n  B\n  C\n",
      -	}, {
      -		map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
      -		"v:\n- A\n- 1\n- B:\n  - 2\n  - 3\n",
      -	}, {
      -		map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
      -		"a:\n  b: c\n",
      -	}, {
      -		map[string]interface{}{"a": "-"},
      -		"a: '-'\n",
      -	},
      -
      -	// Simple values.
      -	{
      -		&marshalIntTest,
      -		"123\n",
      -	},
      -
      -	// Structures
      -	{
      -		&struct{ Hello string }{"world"},
      -		"hello: world\n",
      -	}, {
      -		&struct {
      -			A struct {
      -				B string
      -			}
      -		}{struct{ B string }{"c"}},
      -		"a:\n  b: c\n",
      -	}, {
      -		&struct {
      -			A *struct {
      -				B string
      -			}
      -		}{&struct{ B string }{"c"}},
      -		"a:\n  b: c\n",
      -	}, {
      -		&struct {
      -			A *struct {
      -				B string
      -			}
      -		}{},
      -		"a: null\n",
      -	}, {
      -		&struct{ A int }{1},
      -		"a: 1\n",
      -	}, {
      -		&struct{ A []int }{[]int{1, 2}},
      -		"a:\n- 1\n- 2\n",
      -	}, {
      -		&struct {
      -			B int "a"
      -		}{1},
      -		"a: 1\n",
      -	}, {
      -		&struct{ A bool }{true},
      -		"a: true\n",
      -	},
      -
      -	// Conditional flag
      -	{
      -		&struct {
      -			A int "a,omitempty"
      -			B int "b,omitempty"
      -		}{1, 0},
      -		"a: 1\n",
      -	}, {
      -		&struct {
      -			A int "a,omitempty"
      -			B int "b,omitempty"
      -		}{0, 0},
      -		"{}\n",
      -	}, {
      -		&struct {
      -			A *struct{ X, y int } "a,omitempty,flow"
      -		}{&struct{ X, y int }{1, 2}},
      -		"a: {x: 1}\n",
      -	}, {
      -		&struct {
      -			A *struct{ X, y int } "a,omitempty,flow"
      -		}{nil},
      -		"{}\n",
      -	}, {
      -		&struct {
      -			A *struct{ X, y int } "a,omitempty,flow"
      -		}{&struct{ X, y int }{}},
      -		"a: {x: 0}\n",
      -	}, {
      -		&struct {
      -			A struct{ X, y int } "a,omitempty,flow"
      -		}{struct{ X, y int }{1, 2}},
      -		"a: {x: 1}\n",
      -	}, {
      -		&struct {
      -			A struct{ X, y int } "a,omitempty,flow"
      -		}{struct{ X, y int }{0, 1}},
      -		"{}\n",
      -	},
      -
      -	// Flow flag
      -	{
      -		&struct {
      -			A []int "a,flow"
      -		}{[]int{1, 2}},
      -		"a: [1, 2]\n",
      -	}, {
      -		&struct {
      -			A map[string]string "a,flow"
      -		}{map[string]string{"b": "c", "d": "e"}},
      -		"a: {b: c, d: e}\n",
      -	}, {
      -		&struct {
      -			A struct {
      -				B, D string
      -			} "a,flow"
      -		}{struct{ B, D string }{"c", "e"}},
      -		"a: {b: c, d: e}\n",
      -	},
      -
      -	// Unexported field
      -	{
      -		&struct {
      -			u int
      -			A int
      -		}{0, 1},
      -		"a: 1\n",
      -	},
      -
      -	// Ignored field
      -	{
      -		&struct {
      -			A int
      -			B int "-"
      -		}{1, 2},
      -		"a: 1\n",
      -	},
      -
      -	// Struct inlining
      -	{
      -		&struct {
      -			A int
      -			C inlineB `yaml:",inline"`
      -		}{1, inlineB{2, inlineC{3}}},
      -		"a: 1\nb: 2\nc: 3\n",
      -	},
      -
      -	// Map inlining
      -	{
      -		&struct {
      -			A int
      -			C map[string]int `yaml:",inline"`
      -		}{1, map[string]int{"b": 2, "c": 3}},
      -		"a: 1\nb: 2\nc: 3\n",
      -	},
      -
      -	// Duration
      -	{
      -		map[string]time.Duration{"a": 3 * time.Second},
      -		"a: 3s\n",
      -	},
      -
      -	// Issue #24: bug in map merging logic.
      -	{
      -		map[string]string{"a": "<foo>"},
      -		"a: <foo>\n",
      -	},
      -
      -	// Issue #34: marshal unsupported base 60 floats quoted for compatibility
      -	// with old YAML 1.1 parsers.
      -	{
      -		map[string]string{"a": "1:1"},
      -		"a: \"1:1\"\n",
      -	},
      -
      -	// Binary data.
      -	{
      -		map[string]string{"a": "\x00"},
      -		"a: \"\\0\"\n",
      -	}, {
      -		map[string]string{"a": "\x80\x81\x82"},
      -		"a: !!binary gIGC\n",
      -	}, {
      -		map[string]string{"a": strings.Repeat("\x90", 54)},
      -		"a: !!binary |\n  " + strings.Repeat("kJCQ", 17) + "kJ\n  CQ\n",
      -	},
      -
      -	// Ordered maps.
      -	{
      -		&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
      -		"b: 2\na: 1\nd: 4\nc: 3\nsub:\n  e: 5\n",
      -	},
      -
      -	// Encode unicode as utf-8 rather than in escaped form.
      -	{
      -		map[string]string{"a": "你好"},
      -		"a: 你好\n",
      -	},
      -
      -	// Support encoding.TextMarshaler.
      -	{
      -		map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
      -		"a: 1.2.3.4\n",
      -	},
      -	{
      -		map[string]time.Time{"a": time.Unix(1424801979, 0)},
      -		"a: 2015-02-24T18:19:39Z\n",
      -	},
      -
      -	// Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible).
      -	{
      -		map[string]string{"a": "b: c"},
      -		"a: 'b: c'\n",
      -	},
      -}
      -
      -func (s *S) TestMarshal(c *C) {
      -	defer os.Setenv("TZ", os.Getenv("TZ"))
      -	os.Setenv("TZ", "UTC")
      -	for _, item := range marshalTests {
      -		data, err := yaml.Marshal(item.value)
      -		c.Assert(err, IsNil)
      -		c.Assert(string(data), Equals, item.data)
      -	}
      -}
      -
      -var marshalErrorTests = []struct {
      -	value interface{}
      -	error string
      -	panic string
      -}{{
      -	value: &struct {
      -		B       int
      -		inlineB ",inline"
      -	}{1, inlineB{2, inlineC{3}}},
      -	panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
      -}, {
      -	value: &struct {
      -		A       int
      -		B map[string]int ",inline"
      -	}{1, map[string]int{"a": 2}},
      -	panic: `Can't have key "a" in inlined map; conflicts with struct field`,
      -}}
      -
      -func (s *S) TestMarshalErrors(c *C) {
      -	for _, item := range marshalErrorTests {
      -		if item.panic != "" {
      -			c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
      -		} else {
      -			_, err := yaml.Marshal(item.value)
      -			c.Assert(err, ErrorMatches, item.error)
      -		}
      -	}
      -}
      -
      -func (s *S) TestMarshalTypeCache(c *C) {
      -	var data []byte
      -	var err error
      -	func() {
      -		type T struct{ A int }
      -		data, err = yaml.Marshal(&T{})
      -		c.Assert(err, IsNil)
      -	}()
      -	func() {
      -		type T struct{ B int }
      -		data, err = yaml.Marshal(&T{})
      -		c.Assert(err, IsNil)
      -	}()
      -	c.Assert(string(data), Equals, "b: 0\n")
      -}
      -
      -var marshalerTests = []struct {
      -	data  string
      -	value interface{}
      -}{
      -	{"_:\n  hi: there\n", map[interface{}]interface{}{"hi": "there"}},
      -	{"_:\n- 1\n- A\n", []interface{}{1, "A"}},
      -	{"_: 10\n", 10},
      -	{"_: null\n", nil},
      -	{"_: BAR!\n", "BAR!"},
      -}
      -
      -type marshalerType struct {
      -	value interface{}
      -}
      -
      -func (o marshalerType) MarshalText() ([]byte, error) {
      -	panic("MarshalText called on type with MarshalYAML")
      -}
      -
      -func (o marshalerType) MarshalYAML() (interface{}, error) {
      -	return o.value, nil
      -}
      -
      -type marshalerValue struct {
      -	Field marshalerType "_"
      -}
      -
      -func (s *S) TestMarshaler(c *C) {
      -	for _, item := range marshalerTests {
      -		obj := &marshalerValue{}
      -		obj.Field.value = item.value
      -		data, err := yaml.Marshal(obj)
      -		c.Assert(err, IsNil)
      -		c.Assert(string(data), Equals, string(item.data))
      -	}
      -}
      -
      -func (s *S) TestMarshalerWholeDocument(c *C) {
      -	obj := &marshalerType{}
      -	obj.value = map[string]string{"hello": "world!"}
      -	data, err := yaml.Marshal(obj)
      -	c.Assert(err, IsNil)
      -	c.Assert(string(data), Equals, "hello: world!\n")
      -}
      -
      -type failingMarshaler struct{}
      -
      -func (ft *failingMarshaler) MarshalYAML() (interface{}, error) {
      -	return nil, failingErr
      -}
      -
      -func (s *S) TestMarshalerError(c *C) {
      -	_, err := yaml.Marshal(&failingMarshaler{})
      -	c.Assert(err, Equals, failingErr)
      -}
      -
      -func (s *S) TestSortedOutput(c *C) {
      -	order := []interface{}{
      -		false,
      -		true,
      -		1,
      -		uint(1),
      -		1.0,
      -		1.1,
      -		1.2,
      -		2,
      -		uint(2),
      -		2.0,
      -		2.1,
      -		"",
      -		".1",
      -		".2",
      -		".a",
      -		"1",
      -		"2",
      -		"a!10",
      -		"a/2",
      -		"a/10",
      -		"a~10",
      -		"ab/1",
      -		"b/1",
      -		"b/01",
      -		"b/2",
      -		"b/02",
      -		"b/3",
      -		"b/03",
      -		"b1",
      -		"b01",
      -		"b3",
      -		"c2.10",
      -		"c10.2",
      -		"d1",
      -		"d12",
      -		"d12a",
      -	}
      -	m := make(map[interface{}]int)
      -	for _, k := range order {
      -		m[k] = 1
      -	}
      -	data, err := yaml.Marshal(m)
      -	c.Assert(err, IsNil)
      -	out := "\n" + string(data)
      -	last := 0
      -	for i, k := range order {
      -		repr := fmt.Sprint(k)
      -		if s, ok := k.(string); ok {
      -			if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
      -				repr = `"` + repr + `"`
      -			}
      -		}
      -		index := strings.Index(out, "\n"+repr+":")
      -		if index == -1 {
      -			c.Fatalf("%#v is not in the output: %#v", k, out)
      -		}
      -		if index < last {
      -			c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
      -		}
      -		last = index
      -	}
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/parserc.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/parserc.go
      deleted file mode 100644
      index 0a7037ad..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/parserc.go
      +++ /dev/null
      @@ -1,1096 +0,0 @@
      -package yaml
      -
      -import (
      -	"bytes"
      -)
      -
      -// The parser implements the following grammar:
      -//
      -// stream               ::= STREAM-START implicit_document? explicit_document* STREAM-END
      -// implicit_document    ::= block_node DOCUMENT-END*
      -// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
      -// block_node_or_indentless_sequence    ::=
      -//                          ALIAS
      -//                          | properties (block_content | indentless_block_sequence)?
      -//                          | block_content
      -//                          | indentless_block_sequence
      -// block_node           ::= ALIAS
      -//                          | properties block_content?
      -//                          | block_content
      -// flow_node            ::= ALIAS
      -//                          | properties flow_content?
      -//                          | flow_content
      -// properties           ::= TAG ANCHOR? | ANCHOR TAG?
      -// block_content        ::= block_collection | flow_collection | SCALAR
      -// flow_content         ::= flow_collection | SCALAR
      -// block_collection     ::= block_sequence | block_mapping
      -// flow_collection      ::= flow_sequence | flow_mapping
      -// block_sequence       ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
      -// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
      -// block_mapping        ::= BLOCK-MAPPING_START
      -//                          ((KEY block_node_or_indentless_sequence?)?
      -//                          (VALUE block_node_or_indentless_sequence?)?)*
      -//                          BLOCK-END
      -// flow_sequence        ::= FLOW-SEQUENCE-START
      -//                          (flow_sequence_entry FLOW-ENTRY)*
      -//                          flow_sequence_entry?
      -//                          FLOW-SEQUENCE-END
      -// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
      -// flow_mapping         ::= FLOW-MAPPING-START
      -//                          (flow_mapping_entry FLOW-ENTRY)*
      -//                          flow_mapping_entry?
      -//                          FLOW-MAPPING-END
      -// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
      -
      -// Peek the next token in the token queue.
      -func peek_token(parser *yaml_parser_t) *yaml_token_t {
      -	if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
      -		return &parser.tokens[parser.tokens_head]
      -	}
      -	return nil
      -}
      -
      -// Remove the next token from the queue (must be called after peek_token).
      -func skip_token(parser *yaml_parser_t) {
      -	parser.token_available = false
      -	parser.tokens_parsed++
      -	parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
      -	parser.tokens_head++
      -}
      -
      -// Get the next event.
      -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	// Erase the event object.
      -	*event = yaml_event_t{}
      -
      -	// No events after the end of the stream or error.
      -	if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
      -		return true
      -	}
      -
      -	// Generate the next event.
      -	return yaml_parser_state_machine(parser, event)
      -}
      -
      -// Set parser error.
      -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
      -	parser.error = yaml_PARSER_ERROR
      -	parser.problem = problem
      -	parser.problem_mark = problem_mark
      -	return false
      -}
      -
      -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
      -	parser.error = yaml_PARSER_ERROR
      -	parser.context = context
      -	parser.context_mark = context_mark
      -	parser.problem = problem
      -	parser.problem_mark = problem_mark
      -	return false
      -}
      -
      -// State dispatcher.
      -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	//trace("yaml_parser_state_machine", "state:", parser.state.String())
      -
      -	switch parser.state {
      -	case yaml_PARSE_STREAM_START_STATE:
      -		return yaml_parser_parse_stream_start(parser, event)
      -
      -	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
      -		return yaml_parser_parse_document_start(parser, event, true)
      -
      -	case yaml_PARSE_DOCUMENT_START_STATE:
      -		return yaml_parser_parse_document_start(parser, event, false)
      -
      -	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
      -		return yaml_parser_parse_document_content(parser, event)
      -
      -	case yaml_PARSE_DOCUMENT_END_STATE:
      -		return yaml_parser_parse_document_end(parser, event)
      -
      -	case yaml_PARSE_BLOCK_NODE_STATE:
      -		return yaml_parser_parse_node(parser, event, true, false)
      -
      -	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
      -		return yaml_parser_parse_node(parser, event, true, true)
      -
      -	case yaml_PARSE_FLOW_NODE_STATE:
      -		return yaml_parser_parse_node(parser, event, false, false)
      -
      -	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
      -		return yaml_parser_parse_block_sequence_entry(parser, event, true)
      -
      -	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
      -		return yaml_parser_parse_block_sequence_entry(parser, event, false)
      -
      -	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
      -		return yaml_parser_parse_indentless_sequence_entry(parser, event)
      -
      -	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
      -		return yaml_parser_parse_block_mapping_key(parser, event, true)
      -
      -	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
      -		return yaml_parser_parse_block_mapping_key(parser, event, false)
      -
      -	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
      -		return yaml_parser_parse_block_mapping_value(parser, event)
      -
      -	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
      -		return yaml_parser_parse_flow_sequence_entry(parser, event, true)
      -
      -	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
      -		return yaml_parser_parse_flow_sequence_entry(parser, event, false)
      -
      -	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
      -		return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
      -
      -	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
      -		return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
      -
      -	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
      -		return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
      -
      -	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
      -		return yaml_parser_parse_flow_mapping_key(parser, event, true)
      -
      -	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
      -		return yaml_parser_parse_flow_mapping_key(parser, event, false)
      -
      -	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
      -		return yaml_parser_parse_flow_mapping_value(parser, event, false)
      -
      -	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
      -		return yaml_parser_parse_flow_mapping_value(parser, event, true)
      -
      -	default:
      -		panic("invalid parser state")
      -	}
      -	return false
      -}
      -
      -// Parse the production:
      -// stream   ::= STREAM-START implicit_document? explicit_document* STREAM-END
      -//              ************
      -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -	if token.typ != yaml_STREAM_START_TOKEN {
      -		return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
      -	}
      -	parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
      -	*event = yaml_event_t{
      -		typ:        yaml_STREAM_START_EVENT,
      -		start_mark: token.start_mark,
      -		end_mark:   token.end_mark,
      -		encoding:   token.encoding,
      -	}
      -	skip_token(parser)
      -	return true
      -}
      -
      -// Parse the productions:
      -// implicit_document    ::= block_node DOCUMENT-END*
      -//                          *
      -// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
      -//                          *************************
      -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
      -
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -
      -	// Parse extra document end indicators.
      -	if !implicit {
      -		for token.typ == yaml_DOCUMENT_END_TOKEN {
      -			skip_token(parser)
      -			token = peek_token(parser)
      -			if token == nil {
      -				return false
      -			}
      -		}
      -	}
      -
      -	if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
      -		token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
      -		token.typ != yaml_DOCUMENT_START_TOKEN &&
      -		token.typ != yaml_STREAM_END_TOKEN {
      -		// Parse an implicit document.
      -		if !yaml_parser_process_directives(parser, nil, nil) {
      -			return false
      -		}
      -		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
      -		parser.state = yaml_PARSE_BLOCK_NODE_STATE
      -
      -		*event = yaml_event_t{
      -			typ:        yaml_DOCUMENT_START_EVENT,
      -			start_mark: token.start_mark,
      -			end_mark:   token.end_mark,
      -		}
      -
      -	} else if token.typ != yaml_STREAM_END_TOKEN {
      -		// Parse an explicit document.
      -		var version_directive *yaml_version_directive_t
      -		var tag_directives []yaml_tag_directive_t
      -		start_mark := token.start_mark
      -		if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
      -			return false
      -		}
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ != yaml_DOCUMENT_START_TOKEN {
      -			yaml_parser_set_parser_error(parser,
      -				"did not find expected <document start>", token.start_mark)
      -			return false
      -		}
      -		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
      -		parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
      -		end_mark := token.end_mark
      -
      -		*event = yaml_event_t{
      -			typ:               yaml_DOCUMENT_START_EVENT,
      -			start_mark:        start_mark,
      -			end_mark:          end_mark,
      -			version_directive: version_directive,
      -			tag_directives:    tag_directives,
      -			implicit:          false,
      -		}
      -		skip_token(parser)
      -
      -	} else {
      -		// Parse the stream end.
      -		parser.state = yaml_PARSE_END_STATE
      -		*event = yaml_event_t{
      -			typ:        yaml_STREAM_END_EVENT,
      -			start_mark: token.start_mark,
      -			end_mark:   token.end_mark,
      -		}
      -		skip_token(parser)
      -	}
      -
      -	return true
      -}
      -
      -// Parse the productions:
      -// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
      -//                                                    ***********
      -//
      -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -	if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
      -		token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
      -		token.typ == yaml_DOCUMENT_START_TOKEN ||
      -		token.typ == yaml_DOCUMENT_END_TOKEN ||
      -		token.typ == yaml_STREAM_END_TOKEN {
      -		parser.state = parser.states[len(parser.states)-1]
      -		parser.states = parser.states[:len(parser.states)-1]
      -		return yaml_parser_process_empty_scalar(parser, event,
      -			token.start_mark)
      -	}
      -	return yaml_parser_parse_node(parser, event, true, false)
      -}
      -
      -// Parse the productions:
      -// implicit_document    ::= block_node DOCUMENT-END*
      -//                                     *************
      -// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
      -//
      -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -
      -	start_mark := token.start_mark
      -	end_mark := token.start_mark
      -
      -	implicit := true
      -	if token.typ == yaml_DOCUMENT_END_TOKEN {
      -		end_mark = token.end_mark
      -		skip_token(parser)
      -		implicit = false
      -	}
      -
      -	parser.tag_directives = parser.tag_directives[:0]
      -
      -	parser.state = yaml_PARSE_DOCUMENT_START_STATE
      -	*event = yaml_event_t{
      -		typ:        yaml_DOCUMENT_END_EVENT,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -		implicit:   implicit,
      -	}
      -	return true
      -}
      -
      -// Parse the productions:
      -// block_node_or_indentless_sequence    ::=
      -//                          ALIAS
      -//                          *****
      -//                          | properties (block_content | indentless_block_sequence)?
      -//                            **********  *
      -//                          | block_content | indentless_block_sequence
      -//                            *
      -// block_node           ::= ALIAS
      -//                          *****
      -//                          | properties block_content?
      -//                            ********** *
      -//                          | block_content
      -//                            *
      -// flow_node            ::= ALIAS
      -//                          *****
      -//                          | properties flow_content?
      -//                            ********** *
      -//                          | flow_content
      -//                            *
      -// properties           ::= TAG ANCHOR? | ANCHOR TAG?
      -//                          *************************
      -// block_content        ::= block_collection | flow_collection | SCALAR
      -//                                                               ******
      -// flow_content         ::= flow_collection | SCALAR
      -//                                            ******
      -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
      -	//defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
      -
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -
      -	if token.typ == yaml_ALIAS_TOKEN {
      -		parser.state = parser.states[len(parser.states)-1]
      -		parser.states = parser.states[:len(parser.states)-1]
      -		*event = yaml_event_t{
      -			typ:        yaml_ALIAS_EVENT,
      -			start_mark: token.start_mark,
      -			end_mark:   token.end_mark,
      -			anchor:     token.value,
      -		}
      -		skip_token(parser)
      -		return true
      -	}
      -
      -	start_mark := token.start_mark
      -	end_mark := token.start_mark
      -
      -	var tag_token bool
      -	var tag_handle, tag_suffix, anchor []byte
      -	var tag_mark yaml_mark_t
      -	if token.typ == yaml_ANCHOR_TOKEN {
      -		anchor = token.value
      -		start_mark = token.start_mark
      -		end_mark = token.end_mark
      -		skip_token(parser)
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ == yaml_TAG_TOKEN {
      -			tag_token = true
      -			tag_handle = token.value
      -			tag_suffix = token.suffix
      -			tag_mark = token.start_mark
      -			end_mark = token.end_mark
      -			skip_token(parser)
      -			token = peek_token(parser)
      -			if token == nil {
      -				return false
      -			}
      -		}
      -	} else if token.typ == yaml_TAG_TOKEN {
      -		tag_token = true
      -		tag_handle = token.value
      -		tag_suffix = token.suffix
      -		start_mark = token.start_mark
      -		tag_mark = token.start_mark
      -		end_mark = token.end_mark
      -		skip_token(parser)
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ == yaml_ANCHOR_TOKEN {
      -			anchor = token.value
      -			end_mark = token.end_mark
      -			skip_token(parser)
      -			token = peek_token(parser)
      -			if token == nil {
      -				return false
      -			}
      -		}
      -	}
      -
      -	var tag []byte
      -	if tag_token {
      -		if len(tag_handle) == 0 {
      -			tag = tag_suffix
      -			tag_suffix = nil
      -		} else {
      -			for i := range parser.tag_directives {
      -				if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
      -					tag = append([]byte(nil), parser.tag_directives[i].prefix...)
      -					tag = append(tag, tag_suffix...)
      -					break
      -				}
      -			}
      -			if len(tag) == 0 {
      -				yaml_parser_set_parser_error_context(parser,
      -					"while parsing a node", start_mark,
      -					"found undefined tag handle", tag_mark)
      -				return false
      -			}
      -		}
      -	}
      -
      -	implicit := len(tag) == 0
      -	if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
      -		end_mark = token.end_mark
      -		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
      -		*event = yaml_event_t{
      -			typ:        yaml_SEQUENCE_START_EVENT,
      -			start_mark: start_mark,
      -			end_mark:   end_mark,
      -			anchor:     anchor,
      -			tag:        tag,
      -			implicit:   implicit,
      -			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
      -		}
      -		return true
      -	}
      -	if token.typ == yaml_SCALAR_TOKEN {
      -		var plain_implicit, quoted_implicit bool
      -		end_mark = token.end_mark
      -		if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
      -			plain_implicit = true
      -		} else if len(tag) == 0 {
      -			quoted_implicit = true
      -		}
      -		parser.state = parser.states[len(parser.states)-1]
      -		parser.states = parser.states[:len(parser.states)-1]
      -
      -		*event = yaml_event_t{
      -			typ:             yaml_SCALAR_EVENT,
      -			start_mark:      start_mark,
      -			end_mark:        end_mark,
      -			anchor:          anchor,
      -			tag:             tag,
      -			value:           token.value,
      -			implicit:        plain_implicit,
      -			quoted_implicit: quoted_implicit,
      -			style:           yaml_style_t(token.style),
      -		}
      -		skip_token(parser)
      -		return true
      -	}
      -	if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
      -		// [Go] Some of the events below can be merged as they differ only on style.
      -		end_mark = token.end_mark
      -		parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
      -		*event = yaml_event_t{
      -			typ:        yaml_SEQUENCE_START_EVENT,
      -			start_mark: start_mark,
      -			end_mark:   end_mark,
      -			anchor:     anchor,
      -			tag:        tag,
      -			implicit:   implicit,
      -			style:      yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
      -		}
      -		return true
      -	}
      -	if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
      -		end_mark = token.end_mark
      -		parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
      -		*event = yaml_event_t{
      -			typ:        yaml_MAPPING_START_EVENT,
      -			start_mark: start_mark,
      -			end_mark:   end_mark,
      -			anchor:     anchor,
      -			tag:        tag,
      -			implicit:   implicit,
      -			style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
      -		}
      -		return true
      -	}
      -	if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
      -		end_mark = token.end_mark
      -		parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
      -		*event = yaml_event_t{
      -			typ:        yaml_SEQUENCE_START_EVENT,
      -			start_mark: start_mark,
      -			end_mark:   end_mark,
      -			anchor:     anchor,
      -			tag:        tag,
      -			implicit:   implicit,
      -			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
      -		}
      -		return true
      -	}
      -	if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
      -		end_mark = token.end_mark
      -		parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
      -		*event = yaml_event_t{
      -			typ:        yaml_MAPPING_START_EVENT,
      -			start_mark: start_mark,
      -			end_mark:   end_mark,
      -			anchor:     anchor,
      -			tag:        tag,
      -			implicit:   implicit,
      -			style:      yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
      -		}
      -		return true
      -	}
      -	if len(anchor) > 0 || len(tag) > 0 {
      -		parser.state = parser.states[len(parser.states)-1]
      -		parser.states = parser.states[:len(parser.states)-1]
      -
      -		*event = yaml_event_t{
      -			typ:             yaml_SCALAR_EVENT,
      -			start_mark:      start_mark,
      -			end_mark:        end_mark,
      -			anchor:          anchor,
      -			tag:             tag,
      -			implicit:        implicit,
      -			quoted_implicit: false,
      -			style:           yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
      -		}
      -		return true
      -	}
      -
      -	context := "while parsing a flow node"
      -	if block {
      -		context = "while parsing a block node"
      -	}
      -	yaml_parser_set_parser_error_context(parser, context, start_mark,
      -		"did not find expected node content", token.start_mark)
      -	return false
      -}
      -
      -// Parse the productions:
      -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
      -//                    ********************  *********** *             *********
      -//
      -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
      -	if first {
      -		token := peek_token(parser)
      -		parser.marks = append(parser.marks, token.start_mark)
      -		skip_token(parser)
      -	}
      -
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -
      -	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
      -		mark := token.end_mark
      -		skip_token(parser)
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
      -			parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
      -			return yaml_parser_parse_node(parser, event, true, false)
      -		} else {
      -			parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
      -			return yaml_parser_process_empty_scalar(parser, event, mark)
      -		}
      -	}
      -	if token.typ == yaml_BLOCK_END_TOKEN {
      -		parser.state = parser.states[len(parser.states)-1]
      -		parser.states = parser.states[:len(parser.states)-1]
      -		parser.marks = parser.marks[:len(parser.marks)-1]
      -
      -		*event = yaml_event_t{
      -			typ:        yaml_SEQUENCE_END_EVENT,
      -			start_mark: token.start_mark,
      -			end_mark:   token.end_mark,
      -		}
      -
      -		skip_token(parser)
      -		return true
      -	}
      -
      -	context_mark := parser.marks[len(parser.marks)-1]
      -	parser.marks = parser.marks[:len(parser.marks)-1]
      -	return yaml_parser_set_parser_error_context(parser,
      -		"while parsing a block collection", context_mark,
      -		"did not find expected '-' indicator", token.start_mark)
      -}
      -
      -// Parse the productions:
      -// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
      -//                           *********** *
      -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -
      -	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
      -		mark := token.end_mark
      -		skip_token(parser)
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
      -			token.typ != yaml_KEY_TOKEN &&
      -			token.typ != yaml_VALUE_TOKEN &&
      -			token.typ != yaml_BLOCK_END_TOKEN {
      -			parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
      -			return yaml_parser_parse_node(parser, event, true, false)
      -		}
      -		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
      -		return yaml_parser_process_empty_scalar(parser, event, mark)
      -	}
      -	parser.state = parser.states[len(parser.states)-1]
      -	parser.states = parser.states[:len(parser.states)-1]
      -
      -	*event = yaml_event_t{
      -		typ:        yaml_SEQUENCE_END_EVENT,
      -		start_mark: token.start_mark,
      -		end_mark:   token.start_mark, // [Go] Shouldn't this be token.end_mark?
      -	}
      -	return true
      -}
      -
      -// Parse the productions:
      -// block_mapping        ::= BLOCK-MAPPING_START
      -//                          *******************
      -//                          ((KEY block_node_or_indentless_sequence?)?
      -//                            *** *
      -//                          (VALUE block_node_or_indentless_sequence?)?)*
      -//
      -//                          BLOCK-END
      -//                          *********
      -//
      -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
      -	if first {
      -		token := peek_token(parser)
      -		parser.marks = append(parser.marks, token.start_mark)
      -		skip_token(parser)
      -	}
      -
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -
      -	if token.typ == yaml_KEY_TOKEN {
      -		mark := token.end_mark
      -		skip_token(parser)
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ != yaml_KEY_TOKEN &&
      -			token.typ != yaml_VALUE_TOKEN &&
      -			token.typ != yaml_BLOCK_END_TOKEN {
      -			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
      -			return yaml_parser_parse_node(parser, event, true, true)
      -		} else {
      -			parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
      -			return yaml_parser_process_empty_scalar(parser, event, mark)
      -		}
      -	} else if token.typ == yaml_BLOCK_END_TOKEN {
      -		parser.state = parser.states[len(parser.states)-1]
      -		parser.states = parser.states[:len(parser.states)-1]
      -		parser.marks = parser.marks[:len(parser.marks)-1]
      -		*event = yaml_event_t{
      -			typ:        yaml_MAPPING_END_EVENT,
      -			start_mark: token.start_mark,
      -			end_mark:   token.end_mark,
      -		}
      -		skip_token(parser)
      -		return true
      -	}
      -
      -	context_mark := parser.marks[len(parser.marks)-1]
      -	parser.marks = parser.marks[:len(parser.marks)-1]
      -	return yaml_parser_set_parser_error_context(parser,
      -		"while parsing a block mapping", context_mark,
      -		"did not find expected key", token.start_mark)
      -}
      -
      -// Parse the productions:
      -// block_mapping        ::= BLOCK-MAPPING_START
      -//
      -//                          ((KEY block_node_or_indentless_sequence?)?
      -//
      -//                          (VALUE block_node_or_indentless_sequence?)?)*
      -//                           ***** *
      -//                          BLOCK-END
      -//
      -//
      -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -	if token.typ == yaml_VALUE_TOKEN {
      -		mark := token.end_mark
      -		skip_token(parser)
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ != yaml_KEY_TOKEN &&
      -			token.typ != yaml_VALUE_TOKEN &&
      -			token.typ != yaml_BLOCK_END_TOKEN {
      -			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
      -			return yaml_parser_parse_node(parser, event, true, true)
      -		}
      -		parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
      -		return yaml_parser_process_empty_scalar(parser, event, mark)
      -	}
      -	parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
      -	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
      -}
      -
      -// Parse the productions:
      -// flow_sequence        ::= FLOW-SEQUENCE-START
      -//                          *******************
      -//                          (flow_sequence_entry FLOW-ENTRY)*
      -//                           *                   **********
      -//                          flow_sequence_entry?
      -//                          *
      -//                          FLOW-SEQUENCE-END
      -//                          *****************
      -// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
      -//                          *
      -//
      -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
      -	if first {
      -		token := peek_token(parser)
      -		parser.marks = append(parser.marks, token.start_mark)
      -		skip_token(parser)
      -	}
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -	if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
      -		if !first {
      -			if token.typ == yaml_FLOW_ENTRY_TOKEN {
      -				skip_token(parser)
      -				token = peek_token(parser)
      -				if token == nil {
      -					return false
      -				}
      -			} else {
      -				context_mark := parser.marks[len(parser.marks)-1]
      -				parser.marks = parser.marks[:len(parser.marks)-1]
      -				return yaml_parser_set_parser_error_context(parser,
      -					"while parsing a flow sequence", context_mark,
      -					"did not find expected ',' or ']'", token.start_mark)
      -			}
      -		}
      -
      -		if token.typ == yaml_KEY_TOKEN {
      -			parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
      -			*event = yaml_event_t{
      -				typ:        yaml_MAPPING_START_EVENT,
      -				start_mark: token.start_mark,
      -				end_mark:   token.end_mark,
      -				implicit:   true,
      -				style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
      -			}
      -			skip_token(parser)
      -			return true
      -		} else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
      -			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
      -			return yaml_parser_parse_node(parser, event, false, false)
      -		}
      -	}
      -
      -	parser.state = parser.states[len(parser.states)-1]
      -	parser.states = parser.states[:len(parser.states)-1]
      -	parser.marks = parser.marks[:len(parser.marks)-1]
      -
      -	*event = yaml_event_t{
      -		typ:        yaml_SEQUENCE_END_EVENT,
      -		start_mark: token.start_mark,
      -		end_mark:   token.end_mark,
      -	}
      -
      -	skip_token(parser)
      -	return true
      -}
      -
      -//
      -// Parse the productions:
      -// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
      -//                                      *** *
      -//
      -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -	if token.typ != yaml_VALUE_TOKEN &&
      -		token.typ != yaml_FLOW_ENTRY_TOKEN &&
      -		token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
      -		parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
      -		return yaml_parser_parse_node(parser, event, false, false)
      -	}
      -	mark := token.end_mark
      -	skip_token(parser)
      -	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
      -	return yaml_parser_process_empty_scalar(parser, event, mark)
      -}
      -
      -// Parse the productions:
      -// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
      -//                                                      ***** *
      -//
      -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -	if token.typ == yaml_VALUE_TOKEN {
      -		skip_token(parser)
      -		token := peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
      -			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
      -			return yaml_parser_parse_node(parser, event, false, false)
      -		}
      -	}
      -	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
      -	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
      -}
      -
      -// Parse the productions:
      -// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
      -//                                                                      *
      -//
      -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
      -	*event = yaml_event_t{
      -		typ:        yaml_MAPPING_END_EVENT,
      -		start_mark: token.start_mark,
      -		end_mark:   token.start_mark, // [Go] Shouldn't this be end_mark?
      -	}
      -	return true
      -}
      -
      -// Parse the productions:
      -// flow_mapping         ::= FLOW-MAPPING-START
      -//                          ******************
      -//                          (flow_mapping_entry FLOW-ENTRY)*
      -//                           *                  **********
      -//                          flow_mapping_entry?
      -//                          ******************
      -//                          FLOW-MAPPING-END
      -//                          ****************
      -// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
      -//                          *           *** *
      -//
      -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
      -	if first {
      -		token := peek_token(parser)
      -		parser.marks = append(parser.marks, token.start_mark)
      -		skip_token(parser)
      -	}
      -
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -
      -	if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
      -		if !first {
      -			if token.typ == yaml_FLOW_ENTRY_TOKEN {
      -				skip_token(parser)
      -				token = peek_token(parser)
      -				if token == nil {
      -					return false
      -				}
      -			} else {
      -				context_mark := parser.marks[len(parser.marks)-1]
      -				parser.marks = parser.marks[:len(parser.marks)-1]
      -				return yaml_parser_set_parser_error_context(parser,
      -					"while parsing a flow mapping", context_mark,
      -					"did not find expected ',' or '}'", token.start_mark)
      -			}
      -		}
      -
      -		if token.typ == yaml_KEY_TOKEN {
      -			skip_token(parser)
      -			token = peek_token(parser)
      -			if token == nil {
      -				return false
      -			}
      -			if token.typ != yaml_VALUE_TOKEN &&
      -				token.typ != yaml_FLOW_ENTRY_TOKEN &&
      -				token.typ != yaml_FLOW_MAPPING_END_TOKEN {
      -				parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
      -				return yaml_parser_parse_node(parser, event, false, false)
      -			} else {
      -				parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
      -				return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
      -			}
      -		} else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
      -			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
      -			return yaml_parser_parse_node(parser, event, false, false)
      -		}
      -	}
      -
      -	parser.state = parser.states[len(parser.states)-1]
      -	parser.states = parser.states[:len(parser.states)-1]
      -	parser.marks = parser.marks[:len(parser.marks)-1]
      -	*event = yaml_event_t{
      -		typ:        yaml_MAPPING_END_EVENT,
      -		start_mark: token.start_mark,
      -		end_mark:   token.end_mark,
      -	}
      -	skip_token(parser)
      -	return true
      -}
      -
      -// Parse the productions:
      -// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
      -//                                   *                  ***** *
      -//
      -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -	if empty {
      -		parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
      -		return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
      -	}
      -	if token.typ == yaml_VALUE_TOKEN {
      -		skip_token(parser)
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
      -			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
      -			return yaml_parser_parse_node(parser, event, false, false)
      -		}
      -	}
      -	parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
      -	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
      -}
      -
      -// Generate an empty scalar event.
      -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
      -	*event = yaml_event_t{
      -		typ:        yaml_SCALAR_EVENT,
      -		start_mark: mark,
      -		end_mark:   mark,
      -		value:      nil, // Empty
      -		implicit:   true,
      -		style:      yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
      -	}
      -	return true
      -}
      -
      -var default_tag_directives = []yaml_tag_directive_t{
      -	{[]byte("!"), []byte("!")},
      -	{[]byte("!!"), []byte("tag:yaml.org,2002:")},
      -}
      -
      -// Parse directives.
      -func yaml_parser_process_directives(parser *yaml_parser_t,
      -	version_directive_ref **yaml_version_directive_t,
      -	tag_directives_ref *[]yaml_tag_directive_t) bool {
      -
      -	var version_directive *yaml_version_directive_t
      -	var tag_directives []yaml_tag_directive_t
      -
      -	token := peek_token(parser)
      -	if token == nil {
      -		return false
      -	}
      -
      -	for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
      -		if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
      -			if version_directive != nil {
      -				yaml_parser_set_parser_error(parser,
      -					"found duplicate %YAML directive", token.start_mark)
      -				return false
      -			}
      -			if token.major != 1 || token.minor != 1 {
      -				yaml_parser_set_parser_error(parser,
      -					"found incompatible YAML document", token.start_mark)
      -				return false
      -			}
      -			version_directive = &yaml_version_directive_t{
      -				major: token.major,
      -				minor: token.minor,
      -			}
      -		} else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
      -			value := yaml_tag_directive_t{
      -				handle: token.value,
      -				prefix: token.prefix,
      -			}
      -			if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
      -				return false
      -			}
      -			tag_directives = append(tag_directives, value)
      -		}
      -
      -		skip_token(parser)
      -		token = peek_token(parser)
      -		if token == nil {
      -			return false
      -		}
      -	}
      -
      -	for i := range default_tag_directives {
      -		if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
      -			return false
      -		}
      -	}
      -
      -	if version_directive_ref != nil {
      -		*version_directive_ref = version_directive
      -	}
      -	if tag_directives_ref != nil {
      -		*tag_directives_ref = tag_directives
      -	}
      -	return true
      -}
      -
      -// Append a tag directive to the directives stack.
      -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
      -	for i := range parser.tag_directives {
      -		if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
      -			if allow_duplicates {
      -				return true
      -			}
      -			return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
      -		}
      -	}
      -
      -	// [Go] I suspect the copy is unnecessary. This was likely done
      -	// because there was no way to track ownership of the data.
      -	value_copy := yaml_tag_directive_t{
      -		handle: make([]byte, len(value.handle)),
      -		prefix: make([]byte, len(value.prefix)),
      -	}
      -	copy(value_copy.handle, value.handle)
      -	copy(value_copy.prefix, value.prefix)
      -	parser.tag_directives = append(parser.tag_directives, value_copy)
      -	return true
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/readerc.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/readerc.go
      deleted file mode 100644
      index d5fb0972..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/readerc.go
      +++ /dev/null
      @@ -1,391 +0,0 @@
      -package yaml
      -
      -import (
      -	"io"
      -)
      -
      -// Set the reader error and return 0.
      -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
      -	parser.error = yaml_READER_ERROR
      -	parser.problem = problem
      -	parser.problem_offset = offset
      -	parser.problem_value = value
      -	return false
      -}
      -
      -// Byte order marks.
      -const (
      -	bom_UTF8    = "\xef\xbb\xbf"
      -	bom_UTF16LE = "\xff\xfe"
      -	bom_UTF16BE = "\xfe\xff"
      -)
      -
      -// Determine the input stream encoding by checking the BOM symbol. If no BOM is
      -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
      -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
      -	// Ensure that we had enough bytes in the raw buffer.
      -	for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
      -		if !yaml_parser_update_raw_buffer(parser) {
      -			return false
      -		}
      -	}
      -
      -	// Determine the encoding.
      -	buf := parser.raw_buffer
      -	pos := parser.raw_buffer_pos
      -	avail := len(buf) - pos
      -	if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
      -		parser.encoding = yaml_UTF16LE_ENCODING
      -		parser.raw_buffer_pos += 2
      -		parser.offset += 2
      -	} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
      -		parser.encoding = yaml_UTF16BE_ENCODING
      -		parser.raw_buffer_pos += 2
      -		parser.offset += 2
      -	} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
      -		parser.encoding = yaml_UTF8_ENCODING
      -		parser.raw_buffer_pos += 3
      -		parser.offset += 3
      -	} else {
      -		parser.encoding = yaml_UTF8_ENCODING
      -	}
      -	return true
      -}
      -
      -// Update the raw buffer.
      -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
      -	size_read := 0
      -
      -	// Return if the raw buffer is full.
      -	if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
      -		return true
      -	}
      -
      -	// Return on EOF.
      -	if parser.eof {
      -		return true
      -	}
      -
      -	// Move the remaining bytes in the raw buffer to the beginning.
      -	if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
      -		copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
      -	}
      -	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
      -	parser.raw_buffer_pos = 0
      -
      -	// Call the read handler to fill the buffer.
      -	size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
      -	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
      -	if err == io.EOF {
      -		parser.eof = true
      -	} else if err != nil {
      -		return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
      -	}
      -	return true
      -}
      -
      -// Ensure that the buffer contains at least `length` characters.
      -// Return true on success, false on failure.
      -//
      -// The length is supposed to be significantly less that the buffer size.
      -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
      -	if parser.read_handler == nil {
      -		panic("read handler must be set")
      -	}
      -
      -	// If the EOF flag is set and the raw buffer is empty, do nothing.
      -	if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
      -		return true
      -	}
      -
      -	// Return if the buffer contains enough characters.
      -	if parser.unread >= length {
      -		return true
      -	}
      -
      -	// Determine the input encoding if it is not known yet.
      -	if parser.encoding == yaml_ANY_ENCODING {
      -		if !yaml_parser_determine_encoding(parser) {
      -			return false
      -		}
      -	}
      -
      -	// Move the unread characters to the beginning of the buffer.
      -	buffer_len := len(parser.buffer)
      -	if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
      -		copy(parser.buffer, parser.buffer[parser.buffer_pos:])
      -		buffer_len -= parser.buffer_pos
      -		parser.buffer_pos = 0
      -	} else if parser.buffer_pos == buffer_len {
      -		buffer_len = 0
      -		parser.buffer_pos = 0
      -	}
      -
      -	// Open the whole buffer for writing, and cut it before returning.
      -	parser.buffer = parser.buffer[:cap(parser.buffer)]
      -
      -	// Fill the buffer until it has enough characters.
      -	first := true
      -	for parser.unread < length {
      -
      -		// Fill the raw buffer if necessary.
      -		if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
      -			if !yaml_parser_update_raw_buffer(parser) {
      -				parser.buffer = parser.buffer[:buffer_len]
      -				return false
      -			}
      -		}
      -		first = false
      -
      -		// Decode the raw buffer.
      -	inner:
      -		for parser.raw_buffer_pos != len(parser.raw_buffer) {
      -			var value rune
      -			var width int
      -
      -			raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
      -
      -			// Decode the next character.
      -			switch parser.encoding {
      -			case yaml_UTF8_ENCODING:
      -				// Decode a UTF-8 character.  Check RFC 3629
      -				// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
      -				//
      -				// The following table (taken from the RFC) is used for
      -				// decoding.
      -				//
      -				//    Char. number range |        UTF-8 octet sequence
      -				//      (hexadecimal)    |              (binary)
      -				//   --------------------+------------------------------------
      -				//   0000 0000-0000 007F | 0xxxxxxx
      -				//   0000 0080-0000 07FF | 110xxxxx 10xxxxxx
      -				//   0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
      -				//   0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
      -				//
      -				// Additionally, the characters in the range 0xD800-0xDFFF
      -				// are prohibited as they are reserved for use with UTF-16
      -				// surrogate pairs.
      -
      -				// Determine the length of the UTF-8 sequence.
      -				octet := parser.raw_buffer[parser.raw_buffer_pos]
      -				switch {
      -				case octet&0x80 == 0x00:
      -					width = 1
      -				case octet&0xE0 == 0xC0:
      -					width = 2
      -				case octet&0xF0 == 0xE0:
      -					width = 3
      -				case octet&0xF8 == 0xF0:
      -					width = 4
      -				default:
      -					// The leading octet is invalid.
      -					return yaml_parser_set_reader_error(parser,
      -						"invalid leading UTF-8 octet",
      -						parser.offset, int(octet))
      -				}
      -
      -				// Check if the raw buffer contains an incomplete character.
      -				if width > raw_unread {
      -					if parser.eof {
      -						return yaml_parser_set_reader_error(parser,
      -							"incomplete UTF-8 octet sequence",
      -							parser.offset, -1)
      -					}
      -					break inner
      -				}
      -
      -				// Decode the leading octet.
      -				switch {
      -				case octet&0x80 == 0x00:
      -					value = rune(octet & 0x7F)
      -				case octet&0xE0 == 0xC0:
      -					value = rune(octet & 0x1F)
      -				case octet&0xF0 == 0xE0:
      -					value = rune(octet & 0x0F)
      -				case octet&0xF8 == 0xF0:
      -					value = rune(octet & 0x07)
      -				default:
      -					value = 0
      -				}
      -
      -				// Check and decode the trailing octets.
      -				for k := 1; k < width; k++ {
      -					octet = parser.raw_buffer[parser.raw_buffer_pos+k]
      -
      -					// Check if the octet is valid.
      -					if (octet & 0xC0) != 0x80 {
      -						return yaml_parser_set_reader_error(parser,
      -							"invalid trailing UTF-8 octet",
      -							parser.offset+k, int(octet))
      -					}
      -
      -					// Decode the octet.
      -					value = (value << 6) + rune(octet&0x3F)
      -				}
      -
      -				// Check the length of the sequence against the value.
      -				switch {
      -				case width == 1:
      -				case width == 2 && value >= 0x80:
      -				case width == 3 && value >= 0x800:
      -				case width == 4 && value >= 0x10000:
      -				default:
      -					return yaml_parser_set_reader_error(parser,
      -						"invalid length of a UTF-8 sequence",
      -						parser.offset, -1)
      -				}
      -
      -				// Check the range of the value.
      -				if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
      -					return yaml_parser_set_reader_error(parser,
      -						"invalid Unicode character",
      -						parser.offset, int(value))
      -				}
      -
      -			case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
      -				var low, high int
      -				if parser.encoding == yaml_UTF16LE_ENCODING {
      -					low, high = 0, 1
      -				} else {
      -					high, low = 1, 0
      -				}
      -
      -				// The UTF-16 encoding is not as simple as one might
      -				// naively think.  Check RFC 2781
      -				// (http://www.ietf.org/rfc/rfc2781.txt).
      -				//
      -				// Normally, two subsequent bytes describe a Unicode
      -				// character.  However a special technique (called a
      -				// surrogate pair) is used for specifying character
      -				// values larger than 0xFFFF.
      -				//
      -				// A surrogate pair consists of two pseudo-characters:
      -				//      high surrogate area (0xD800-0xDBFF)
      -				//      low surrogate area (0xDC00-0xDFFF)
      -				//
      -				// The following formulas are used for decoding
      -				// and encoding characters using surrogate pairs:
      -				//
      -				//  U  = U' + 0x10000   (0x01 00 00 <= U <= 0x10 FF FF)
      -				//  U' = yyyyyyyyyyxxxxxxxxxx   (0 <= U' <= 0x0F FF FF)
      -				//  W1 = 110110yyyyyyyyyy
      -				//  W2 = 110111xxxxxxxxxx
      -				//
      -				// where U is the character value, W1 is the high surrogate
      -				// area, W2 is the low surrogate area.
      -
      -				// Check for incomplete UTF-16 character.
      -				if raw_unread < 2 {
      -					if parser.eof {
      -						return yaml_parser_set_reader_error(parser,
      -							"incomplete UTF-16 character",
      -							parser.offset, -1)
      -					}
      -					break inner
      -				}
      -
      -				// Get the character.
      -				value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
      -					(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
      -
      -				// Check for unexpected low surrogate area.
      -				if value&0xFC00 == 0xDC00 {
      -					return yaml_parser_set_reader_error(parser,
      -						"unexpected low surrogate area",
      -						parser.offset, int(value))
      -				}
      -
      -				// Check for a high surrogate area.
      -				if value&0xFC00 == 0xD800 {
      -					width = 4
      -
      -					// Check for incomplete surrogate pair.
      -					if raw_unread < 4 {
      -						if parser.eof {
      -							return yaml_parser_set_reader_error(parser,
      -								"incomplete UTF-16 surrogate pair",
      -								parser.offset, -1)
      -						}
      -						break inner
      -					}
      -
      -					// Get the next character.
      -					value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
      -						(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
      -
      -					// Check for a low surrogate area.
      -					if value2&0xFC00 != 0xDC00 {
      -						return yaml_parser_set_reader_error(parser,
      -							"expected low surrogate area",
      -							parser.offset+2, int(value2))
      -					}
      -
      -					// Generate the value of the surrogate pair.
      -					value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
      -				} else {
      -					width = 2
      -				}
      -
      -			default:
      -				panic("impossible")
      -			}
      -
      -			// Check if the character is in the allowed range:
      -			//      #x9 | #xA | #xD | [#x20-#x7E]               (8 bit)
      -			//      | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD]    (16 bit)
      -			//      | [#x10000-#x10FFFF]                        (32 bit)
      -			switch {
      -			case value == 0x09:
      -			case value == 0x0A:
      -			case value == 0x0D:
      -			case value >= 0x20 && value <= 0x7E:
      -			case value == 0x85:
      -			case value >= 0xA0 && value <= 0xD7FF:
      -			case value >= 0xE000 && value <= 0xFFFD:
      -			case value >= 0x10000 && value <= 0x10FFFF:
      -			default:
      -				return yaml_parser_set_reader_error(parser,
      -					"control characters are not allowed",
      -					parser.offset, int(value))
      -			}
      -
      -			// Move the raw pointers.
      -			parser.raw_buffer_pos += width
      -			parser.offset += width
      -
      -			// Finally put the character into the buffer.
      -			if value <= 0x7F {
      -				// 0000 0000-0000 007F . 0xxxxxxx
      -				parser.buffer[buffer_len+0] = byte(value)
      -			} else if value <= 0x7FF {
      -				// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
      -				parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
      -				parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
      -			} else if value <= 0xFFFF {
      -				// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
      -				parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
      -				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
      -				parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
      -			} else {
      -				// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
      -				parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
      -				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
      -				parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
      -				parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
      -			}
      -			buffer_len += width
      -
      -			parser.unread++
      -		}
      -
      -		// On EOF, put NUL into the buffer and return.
      -		if parser.eof {
      -			parser.buffer[buffer_len] = 0
      -			buffer_len++
      -			parser.unread++
      -			break
      -		}
      -	}
      -	parser.buffer = parser.buffer[:buffer_len]
      -	return true
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/resolve.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/resolve.go
      deleted file mode 100644
      index 93a86327..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/resolve.go
      +++ /dev/null
      @@ -1,203 +0,0 @@
      -package yaml
      -
      -import (
      -	"encoding/base64"
      -	"math"
      -	"strconv"
      -	"strings"
      -	"unicode/utf8"
      -)
      -
      -type resolveMapItem struct {
      -	value interface{}
      -	tag   string
      -}
      -
      -var resolveTable = make([]byte, 256)
      -var resolveMap = make(map[string]resolveMapItem)
      -
      -func init() {
      -	t := resolveTable
      -	t[int('+')] = 'S' // Sign
      -	t[int('-')] = 'S'
      -	for _, c := range "0123456789" {
      -		t[int(c)] = 'D' // Digit
      -	}
      -	for _, c := range "yYnNtTfFoO~" {
      -		t[int(c)] = 'M' // In map
      -	}
      -	t[int('.')] = '.' // Float (potentially in map)
      -
      -	var resolveMapList = []struct {
      -		v   interface{}
      -		tag string
      -		l   []string
      -	}{
      -		{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
      -		{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
      -		{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
      -		{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
      -		{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
      -		{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
      -		{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
      -		{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
      -		{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
      -		{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
      -		{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
      -		{"<<", yaml_MERGE_TAG, []string{"<<"}},
      -	}
      -
      -	m := resolveMap
      -	for _, item := range resolveMapList {
      -		for _, s := range item.l {
      -			m[s] = resolveMapItem{item.v, item.tag}
      -		}
      -	}
      -}
      -
      -const longTagPrefix = "tag:yaml.org,2002:"
      -
      -func shortTag(tag string) string {
      -	// TODO This can easily be made faster and produce less garbage.
      -	if strings.HasPrefix(tag, longTagPrefix) {
      -		return "!!" + tag[len(longTagPrefix):]
      -	}
      -	return tag
      -}
      -
      -func longTag(tag string) string {
      -	if strings.HasPrefix(tag, "!!") {
      -		return longTagPrefix + tag[2:]
      -	}
      -	return tag
      -}
      -
      -func resolvableTag(tag string) bool {
      -	switch tag {
      -	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
      -		return true
      -	}
      -	return false
      -}
      -
      -func resolve(tag string, in string) (rtag string, out interface{}) {
      -	if !resolvableTag(tag) {
      -		return tag, in
      -	}
      -
      -	defer func() {
      -		switch tag {
      -		case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
      -			return
      -		}
      -		failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
      -	}()
      -
      -	// Any data is accepted as a !!str or !!binary.
      -	// Otherwise, the prefix is enough of a hint about what it might be.
      -	hint := byte('N')
      -	if in != "" {
      -		hint = resolveTable[in[0]]
      -	}
      -	if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
      -		// Handle things we can lookup in a map.
      -		if item, ok := resolveMap[in]; ok {
      -			return item.tag, item.value
      -		}
      -
      -		// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
      -		// are purposefully unsupported here. They're still quoted on
      -		// the way out for compatibility with other parser, though.
      -
      -		switch hint {
      -		case 'M':
      -			// We've already checked the map above.
      -
      -		case '.':
      -			// Not in the map, so maybe a normal float.
      -			floatv, err := strconv.ParseFloat(in, 64)
      -			if err == nil {
      -				return yaml_FLOAT_TAG, floatv
      -			}
      -
      -		case 'D', 'S':
      -			// Int, float, or timestamp.
      -			plain := strings.Replace(in, "_", "", -1)
      -			intv, err := strconv.ParseInt(plain, 0, 64)
      -			if err == nil {
      -				if intv == int64(int(intv)) {
      -					return yaml_INT_TAG, int(intv)
      -				} else {
      -					return yaml_INT_TAG, intv
      -				}
      -			}
      -			uintv, err := strconv.ParseUint(plain, 0, 64)
      -			if err == nil {
      -				return yaml_INT_TAG, uintv
      -			}
      -			floatv, err := strconv.ParseFloat(plain, 64)
      -			if err == nil {
      -				return yaml_FLOAT_TAG, floatv
      -			}
      -			if strings.HasPrefix(plain, "0b") {
      -				intv, err := strconv.ParseInt(plain[2:], 2, 64)
      -				if err == nil {
      -					if intv == int64(int(intv)) {
      -						return yaml_INT_TAG, int(intv)
      -					} else {
      -						return yaml_INT_TAG, intv
      -					}
      -				}
      -				uintv, err := strconv.ParseUint(plain[2:], 2, 64)
      -				if err == nil {
      -					return yaml_INT_TAG, uintv
      -				}
      -			} else if strings.HasPrefix(plain, "-0b") {
      -				intv, err := strconv.ParseInt(plain[3:], 2, 64)
      -				if err == nil {
      -					if intv == int64(int(intv)) {
      -						return yaml_INT_TAG, -int(intv)
      -					} else {
      -						return yaml_INT_TAG, -intv
      -					}
      -				}
      -			}
      -			// XXX Handle timestamps here.
      -
      -		default:
      -			panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
      -		}
      -	}
      -	if tag == yaml_BINARY_TAG {
      -		return yaml_BINARY_TAG, in
      -	}
      -	if utf8.ValidString(in) {
      -		return yaml_STR_TAG, in
      -	}
      -	return yaml_BINARY_TAG, encodeBase64(in)
      -}
      -
      -// encodeBase64 encodes s as base64 that is broken up into multiple lines
      -// as appropriate for the resulting length.
      -func encodeBase64(s string) string {
      -	const lineLen = 70
      -	encLen := base64.StdEncoding.EncodedLen(len(s))
      -	lines := encLen/lineLen + 1
      -	buf := make([]byte, encLen*2+lines)
      -	in := buf[0:encLen]
      -	out := buf[encLen:]
      -	base64.StdEncoding.Encode(in, []byte(s))
      -	k := 0
      -	for i := 0; i < len(in); i += lineLen {
      -		j := i + lineLen
      -		if j > len(in) {
      -			j = len(in)
      -		}
      -		k += copy(out[k:], in[i:j])
      -		if lines > 1 {
      -			out[k] = '\n'
      -			k++
      -		}
      -	}
      -	return string(out[:k])
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/scannerc.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/scannerc.go
      deleted file mode 100644
      index fe93b190..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/scannerc.go
      +++ /dev/null
      @@ -1,2710 +0,0 @@
      -package yaml
      -
      -import (
      -	"bytes"
      -	"fmt"
      -)
      -
      -// Introduction
      -// ************
      -//
      -// The following notes assume that you are familiar with the YAML specification
      -// (http://yaml.org/spec/cvs/current.html).  We mostly follow it, although in
      -// some cases we are less restrictive that it requires.
      -//
      -// The process of transforming a YAML stream into a sequence of events is
      -// divided on two steps: Scanning and Parsing.
      -//
      -// The Scanner transforms the input stream into a sequence of tokens, while the
      -// parser transform the sequence of tokens produced by the Scanner into a
      -// sequence of parsing events.
      -//
      -// The Scanner is rather clever and complicated. The Parser, on the contrary,
      -// is a straightforward implementation of a recursive-descendant parser (or,
      -// LL(1) parser, as it is usually called).
      -//
      -// Actually there are two issues of Scanning that might be called "clever", the
      -// rest is quite straightforward.  The issues are "block collection start" and
      -// "simple keys".  Both issues are explained below in details.
      -//
      -// Here the Scanning step is explained and implemented.  We start with the list
      -// of all the tokens produced by the Scanner together with short descriptions.
      -//
      -// Now, tokens:
      -//
      -//      STREAM-START(encoding)          # The stream start.
      -//      STREAM-END                      # The stream end.
      -//      VERSION-DIRECTIVE(major,minor)  # The '%YAML' directive.
      -//      TAG-DIRECTIVE(handle,prefix)    # The '%TAG' directive.
      -//      DOCUMENT-START                  # '---'
      -//      DOCUMENT-END                    # '...'
      -//      BLOCK-SEQUENCE-START            # Indentation increase denoting a block
      -//      BLOCK-MAPPING-START             # sequence or a block mapping.
      -//      BLOCK-END                       # Indentation decrease.
      -//      FLOW-SEQUENCE-START             # '['
      -//      FLOW-SEQUENCE-END               # ']'
      -//      BLOCK-SEQUENCE-START            # '{'
      -//      BLOCK-SEQUENCE-END              # '}'
      -//      BLOCK-ENTRY                     # '-'
      -//      FLOW-ENTRY                      # ','
      -//      KEY                             # '?' or nothing (simple keys).
      -//      VALUE                           # ':'
      -//      ALIAS(anchor)                   # '*anchor'
      -//      ANCHOR(anchor)                  # '&anchor'
      -//      TAG(handle,suffix)              # '!handle!suffix'
      -//      SCALAR(value,style)             # A scalar.
      -//
      -// The following two tokens are "virtual" tokens denoting the beginning and the
      -// end of the stream:
      -//
      -//      STREAM-START(encoding)
      -//      STREAM-END
      -//
      -// We pass the information about the input stream encoding with the
      -// STREAM-START token.
      -//
      -// The next two tokens are responsible for tags:
      -//
      -//      VERSION-DIRECTIVE(major,minor)
      -//      TAG-DIRECTIVE(handle,prefix)
      -//
      -// Example:
      -//
      -//      %YAML   1.1
      -//      %TAG    !   !foo
      -//      %TAG    !yaml!  tag:yaml.org,2002:
      -//      ---
      -//
      -// The correspoding sequence of tokens:
      -//
      -//      STREAM-START(utf-8)
      -//      VERSION-DIRECTIVE(1,1)
      -//      TAG-DIRECTIVE("!","!foo")
      -//      TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
      -//      DOCUMENT-START
      -//      STREAM-END
      -//
      -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
      -// line.
      -//
      -// The document start and end indicators are represented by:
      -//
      -//      DOCUMENT-START
      -//      DOCUMENT-END
      -//
      -// Note that if a YAML stream contains an implicit document (without '---'
      -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
      -// produced.
      -//
      -// In the following examples, we present whole documents together with the
      -// produced tokens.
      -//
      -//      1. An implicit document:
      -//
      -//          'a scalar'
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          SCALAR("a scalar",single-quoted)
      -//          STREAM-END
      -//
      -//      2. An explicit document:
      -//
      -//          ---
      -//          'a scalar'
      -//          ...
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          DOCUMENT-START
      -//          SCALAR("a scalar",single-quoted)
      -//          DOCUMENT-END
      -//          STREAM-END
      -//
      -//      3. Several documents in a stream:
      -//
      -//          'a scalar'
      -//          ---
      -//          'another scalar'
      -//          ---
      -//          'yet another scalar'
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          SCALAR("a scalar",single-quoted)
      -//          DOCUMENT-START
      -//          SCALAR("another scalar",single-quoted)
      -//          DOCUMENT-START
      -//          SCALAR("yet another scalar",single-quoted)
      -//          STREAM-END
      -//
      -// We have already introduced the SCALAR token above.  The following tokens are
      -// used to describe aliases, anchors, tag, and scalars:
      -//
      -//      ALIAS(anchor)
      -//      ANCHOR(anchor)
      -//      TAG(handle,suffix)
      -//      SCALAR(value,style)
      -//
      -// The following series of examples illustrate the usage of these tokens:
      -//
      -//      1. A recursive sequence:
      -//
      -//          &A [ *A ]
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          ANCHOR("A")
      -//          FLOW-SEQUENCE-START
      -//          ALIAS("A")
      -//          FLOW-SEQUENCE-END
      -//          STREAM-END
      -//
      -//      2. A tagged scalar:
      -//
      -//          !!float "3.14"  # A good approximation.
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          TAG("!!","float")
      -//          SCALAR("3.14",double-quoted)
      -//          STREAM-END
      -//
      -//      3. Various scalar styles:
      -//
      -//          --- # Implicit empty plain scalars do not produce tokens.
      -//          --- a plain scalar
      -//          --- 'a single-quoted scalar'
      -//          --- "a double-quoted scalar"
      -//          --- |-
      -//            a literal scalar
      -//          --- >-
      -//            a folded
      -//            scalar
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          DOCUMENT-START
      -//          DOCUMENT-START
      -//          SCALAR("a plain scalar",plain)
      -//          DOCUMENT-START
      -//          SCALAR("a single-quoted scalar",single-quoted)
      -//          DOCUMENT-START
      -//          SCALAR("a double-quoted scalar",double-quoted)
      -//          DOCUMENT-START
      -//          SCALAR("a literal scalar",literal)
      -//          DOCUMENT-START
      -//          SCALAR("a folded scalar",folded)
      -//          STREAM-END
      -//
      -// Now it's time to review collection-related tokens. We will start with
      -// flow collections:
      -//
      -//      FLOW-SEQUENCE-START
      -//      FLOW-SEQUENCE-END
      -//      FLOW-MAPPING-START
      -//      FLOW-MAPPING-END
      -//      FLOW-ENTRY
      -//      KEY
      -//      VALUE
      -//
      -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
      -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
      -// correspondingly.  FLOW-ENTRY represent the ',' indicator.  Finally the
      -// indicators '?' and ':', which are used for denoting mapping keys and values,
      -// are represented by the KEY and VALUE tokens.
      -//
      -// The following examples show flow collections:
      -//
      -//      1. A flow sequence:
      -//
      -//          [item 1, item 2, item 3]
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          FLOW-SEQUENCE-START
      -//          SCALAR("item 1",plain)
      -//          FLOW-ENTRY
      -//          SCALAR("item 2",plain)
      -//          FLOW-ENTRY
      -//          SCALAR("item 3",plain)
      -//          FLOW-SEQUENCE-END
      -//          STREAM-END
      -//
      -//      2. A flow mapping:
      -//
      -//          {
      -//              a simple key: a value,  # Note that the KEY token is produced.
      -//              ? a complex key: another value,
      -//          }
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          FLOW-MAPPING-START
      -//          KEY
      -//          SCALAR("a simple key",plain)
      -//          VALUE
      -//          SCALAR("a value",plain)
      -//          FLOW-ENTRY
      -//          KEY
      -//          SCALAR("a complex key",plain)
      -//          VALUE
      -//          SCALAR("another value",plain)
      -//          FLOW-ENTRY
      -//          FLOW-MAPPING-END
      -//          STREAM-END
      -//
      -// A simple key is a key which is not denoted by the '?' indicator.  Note that
      -// the Scanner still produce the KEY token whenever it encounters a simple key.
      -//
      -// For scanning block collections, the following tokens are used (note that we
      -// repeat KEY and VALUE here):
      -//
      -//      BLOCK-SEQUENCE-START
      -//      BLOCK-MAPPING-START
      -//      BLOCK-END
      -//      BLOCK-ENTRY
      -//      KEY
      -//      VALUE
      -//
      -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
      -// increase that precedes a block collection (cf. the INDENT token in Python).
      -// The token BLOCK-END denote indentation decrease that ends a block collection
      -// (cf. the DEDENT token in Python).  However YAML has some syntax pecularities
      -// that makes detections of these tokens more complex.
      -//
      -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
      -// '-', '?', and ':' correspondingly.
      -//
      -// The following examples show how the tokens BLOCK-SEQUENCE-START,
      -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
      -//
      -//      1. Block sequences:
      -//
      -//          - item 1
      -//          - item 2
      -//          -
      -//            - item 3.1
      -//            - item 3.2
      -//          -
      -//            key 1: value 1
      -//            key 2: value 2
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          BLOCK-SEQUENCE-START
      -//          BLOCK-ENTRY
      -//          SCALAR("item 1",plain)
      -//          BLOCK-ENTRY
      -//          SCALAR("item 2",plain)
      -//          BLOCK-ENTRY
      -//          BLOCK-SEQUENCE-START
      -//          BLOCK-ENTRY
      -//          SCALAR("item 3.1",plain)
      -//          BLOCK-ENTRY
      -//          SCALAR("item 3.2",plain)
      -//          BLOCK-END
      -//          BLOCK-ENTRY
      -//          BLOCK-MAPPING-START
      -//          KEY
      -//          SCALAR("key 1",plain)
      -//          VALUE
      -//          SCALAR("value 1",plain)
      -//          KEY
      -//          SCALAR("key 2",plain)
      -//          VALUE
      -//          SCALAR("value 2",plain)
      -//          BLOCK-END
      -//          BLOCK-END
      -//          STREAM-END
      -//
      -//      2. Block mappings:
      -//
      -//          a simple key: a value   # The KEY token is produced here.
      -//          ? a complex key
      -//          : another value
      -//          a mapping:
      -//            key 1: value 1
      -//            key 2: value 2
      -//          a sequence:
      -//            - item 1
      -//            - item 2
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          BLOCK-MAPPING-START
      -//          KEY
      -//          SCALAR("a simple key",plain)
      -//          VALUE
      -//          SCALAR("a value",plain)
      -//          KEY
      -//          SCALAR("a complex key",plain)
      -//          VALUE
      -//          SCALAR("another value",plain)
      -//          KEY
      -//          SCALAR("a mapping",plain)
      -//          BLOCK-MAPPING-START
      -//          KEY
      -//          SCALAR("key 1",plain)
      -//          VALUE
      -//          SCALAR("value 1",plain)
      -//          KEY
      -//          SCALAR("key 2",plain)
      -//          VALUE
      -//          SCALAR("value 2",plain)
      -//          BLOCK-END
      -//          KEY
      -//          SCALAR("a sequence",plain)
      -//          VALUE
      -//          BLOCK-SEQUENCE-START
      -//          BLOCK-ENTRY
      -//          SCALAR("item 1",plain)
      -//          BLOCK-ENTRY
      -//          SCALAR("item 2",plain)
      -//          BLOCK-END
      -//          BLOCK-END
      -//          STREAM-END
      -//
      -// YAML does not always require to start a new block collection from a new
      -// line.  If the current line contains only '-', '?', and ':' indicators, a new
      -// block collection may start at the current line.  The following examples
      -// illustrate this case:
      -//
      -//      1. Collections in a sequence:
      -//
      -//          - - item 1
      -//            - item 2
      -//          - key 1: value 1
      -//            key 2: value 2
      -//          - ? complex key
      -//            : complex value
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          BLOCK-SEQUENCE-START
      -//          BLOCK-ENTRY
      -//          BLOCK-SEQUENCE-START
      -//          BLOCK-ENTRY
      -//          SCALAR("item 1",plain)
      -//          BLOCK-ENTRY
      -//          SCALAR("item 2",plain)
      -//          BLOCK-END
      -//          BLOCK-ENTRY
      -//          BLOCK-MAPPING-START
      -//          KEY
      -//          SCALAR("key 1",plain)
      -//          VALUE
      -//          SCALAR("value 1",plain)
      -//          KEY
      -//          SCALAR("key 2",plain)
      -//          VALUE
      -//          SCALAR("value 2",plain)
      -//          BLOCK-END
      -//          BLOCK-ENTRY
      -//          BLOCK-MAPPING-START
      -//          KEY
      -//          SCALAR("complex key")
      -//          VALUE
      -//          SCALAR("complex value")
      -//          BLOCK-END
      -//          BLOCK-END
      -//          STREAM-END
      -//
      -//      2. Collections in a mapping:
      -//
      -//          ? a sequence
      -//          : - item 1
      -//            - item 2
      -//          ? a mapping
      -//          : key 1: value 1
      -//            key 2: value 2
      -//
      -//      Tokens:
      -//
      -//          STREAM-START(utf-8)
      -//          BLOCK-MAPPING-START
      -//          KEY
      -//          SCALAR("a sequence",plain)
      -//          VALUE
      -//          BLOCK-SEQUENCE-START
      -//          BLOCK-ENTRY
      -//          SCALAR("item 1",plain)
      -//          BLOCK-ENTRY
      -//          SCALAR("item 2",plain)
      -//          BLOCK-END
      -//          KEY
      -//          SCALAR("a mapping",plain)
      -//          VALUE
      -//          BLOCK-MAPPING-START
      -//          KEY
      -//          SCALAR("key 1",plain)
      -//          VALUE
      -//          SCALAR("value 1",plain)
      -//          KEY
      -//          SCALAR("key 2",plain)
      -//          VALUE
      -//          SCALAR("value 2",plain)
      -//          BLOCK-END
      -//          BLOCK-END
      -//          STREAM-END
      -//
      -// YAML also permits non-indented sequences if they are included into a block
      -// mapping.  In this case, the token BLOCK-SEQUENCE-START is not produced:
      -//
      -//      key:
      -//      - item 1    # BLOCK-SEQUENCE-START is NOT produced here.
      -//      - item 2
      -//
      -// Tokens:
      -//
      -//      STREAM-START(utf-8)
      -//      BLOCK-MAPPING-START
      -//      KEY
      -//      SCALAR("key",plain)
      -//      VALUE
      -//      BLOCK-ENTRY
      -//      SCALAR("item 1",plain)
      -//      BLOCK-ENTRY
      -//      SCALAR("item 2",plain)
      -//      BLOCK-END
      -//
      -
      -// Ensure that the buffer contains the required number of characters.
      -// Return true on success, false on failure (reader error or memory error).
      -func cache(parser *yaml_parser_t, length int) bool {
      -	// [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
      -	return parser.unread >= length || yaml_parser_update_buffer(parser, length)
      -}
      -
      -// Advance the buffer pointer.
      -func skip(parser *yaml_parser_t) {
      -	parser.mark.index++
      -	parser.mark.column++
      -	parser.unread--
      -	parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
      -}
      -
      -func skip_line(parser *yaml_parser_t) {
      -	if is_crlf(parser.buffer, parser.buffer_pos) {
      -		parser.mark.index += 2
      -		parser.mark.column = 0
      -		parser.mark.line++
      -		parser.unread -= 2
      -		parser.buffer_pos += 2
      -	} else if is_break(parser.buffer, parser.buffer_pos) {
      -		parser.mark.index++
      -		parser.mark.column = 0
      -		parser.mark.line++
      -		parser.unread--
      -		parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
      -	}
      -}
      -
      -// Copy a character to a string buffer and advance pointers.
      -func read(parser *yaml_parser_t, s []byte) []byte {
      -	w := width(parser.buffer[parser.buffer_pos])
      -	if w == 0 {
      -		panic("invalid character sequence")
      -	}
      -	if len(s) == 0 {
      -		s = make([]byte, 0, 32)
      -	}
      -	if w == 1 && len(s)+w <= cap(s) {
      -		s = s[:len(s)+1]
      -		s[len(s)-1] = parser.buffer[parser.buffer_pos]
      -		parser.buffer_pos++
      -	} else {
      -		s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
      -		parser.buffer_pos += w
      -	}
      -	parser.mark.index++
      -	parser.mark.column++
      -	parser.unread--
      -	return s
      -}
      -
      -// Copy a line break character to a string buffer and advance pointers.
      -func read_line(parser *yaml_parser_t, s []byte) []byte {
      -	buf := parser.buffer
      -	pos := parser.buffer_pos
      -	switch {
      -	case buf[pos] == '\r' && buf[pos+1] == '\n':
      -		// CR LF . LF
      -		s = append(s, '\n')
      -		parser.buffer_pos += 2
      -		parser.mark.index++
      -		parser.unread--
      -	case buf[pos] == '\r' || buf[pos] == '\n':
      -		// CR|LF . LF
      -		s = append(s, '\n')
      -		parser.buffer_pos += 1
      -	case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
      -		// NEL . LF
      -		s = append(s, '\n')
      -		parser.buffer_pos += 2
      -	case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
      -		// LS|PS . LS|PS
      -		s = append(s, buf[parser.buffer_pos:pos+3]...)
      -		parser.buffer_pos += 3
      -	default:
      -		return s
      -	}
      -	parser.mark.index++
      -	parser.mark.column = 0
      -	parser.mark.line++
      -	parser.unread--
      -	return s
      -}
      -
      -// Get the next token.
      -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
      -	// Erase the token object.
      -	*token = yaml_token_t{} // [Go] Is this necessary?
      -
      -	// No tokens after STREAM-END or error.
      -	if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
      -		return true
      -	}
      -
      -	// Ensure that the tokens queue contains enough tokens.
      -	if !parser.token_available {
      -		if !yaml_parser_fetch_more_tokens(parser) {
      -			return false
      -		}
      -	}
      -
      -	// Fetch the next token from the queue.
      -	*token = parser.tokens[parser.tokens_head]
      -	parser.tokens_head++
      -	parser.tokens_parsed++
      -	parser.token_available = false
      -
      -	if token.typ == yaml_STREAM_END_TOKEN {
      -		parser.stream_end_produced = true
      -	}
      -	return true
      -}
      -
      -// Set the scanner error and return false.
      -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
      -	parser.error = yaml_SCANNER_ERROR
      -	parser.context = context
      -	parser.context_mark = context_mark
      -	parser.problem = problem
      -	parser.problem_mark = parser.mark
      -	return false
      -}
      -
      -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
      -	context := "while parsing a tag"
      -	if directive {
      -		context = "while parsing a %TAG directive"
      -	}
      -	return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
      -}
      -
      -func trace(args ...interface{}) func() {
      -	pargs := append([]interface{}{"+++"}, args...)
      -	fmt.Println(pargs...)
      -	pargs = append([]interface{}{"---"}, args...)
      -	return func() { fmt.Println(pargs...) }
      -}
      -
      -// Ensure that the tokens queue contains at least one token which can be
      -// returned to the Parser.
      -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
      -	// While we need more tokens to fetch, do it.
      -	for {
      -		// Check if we really need to fetch more tokens.
      -		need_more_tokens := false
      -
      -		if parser.tokens_head == len(parser.tokens) {
      -			// Queue is empty.
      -			need_more_tokens = true
      -		} else {
      -			// Check if any potential simple key may occupy the head position.
      -			if !yaml_parser_stale_simple_keys(parser) {
      -				return false
      -			}
      -
      -			for i := range parser.simple_keys {
      -				simple_key := &parser.simple_keys[i]
      -				if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
      -					need_more_tokens = true
      -					break
      -				}
      -			}
      -		}
      -
      -		// We are finished.
      -		if !need_more_tokens {
      -			break
      -		}
      -		// Fetch the next token.
      -		if !yaml_parser_fetch_next_token(parser) {
      -			return false
      -		}
      -	}
      -
      -	parser.token_available = true
      -	return true
      -}
      -
      -// The dispatcher for token fetchers.
      -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
      -	// Ensure that the buffer is initialized.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -
      -	// Check if we just started scanning.  Fetch STREAM-START then.
      -	if !parser.stream_start_produced {
      -		return yaml_parser_fetch_stream_start(parser)
      -	}
      -
      -	// Eat whitespaces and comments until we reach the next token.
      -	if !yaml_parser_scan_to_next_token(parser) {
      -		return false
      -	}
      -
      -	// Remove obsolete potential simple keys.
      -	if !yaml_parser_stale_simple_keys(parser) {
      -		return false
      -	}
      -
      -	// Check the indentation level against the current column.
      -	if !yaml_parser_unroll_indent(parser, parser.mark.column) {
      -		return false
      -	}
      -
      -	// Ensure that the buffer contains at least 4 characters.  4 is the length
      -	// of the longest indicators ('--- ' and '... ').
      -	if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
      -		return false
      -	}
      -
      -	// Is it the end of the stream?
      -	if is_z(parser.buffer, parser.buffer_pos) {
      -		return yaml_parser_fetch_stream_end(parser)
      -	}
      -
      -	// Is it a directive?
      -	if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
      -		return yaml_parser_fetch_directive(parser)
      -	}
      -
      -	buf := parser.buffer
      -	pos := parser.buffer_pos
      -
      -	// Is it the document start indicator?
      -	if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
      -		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
      -	}
      -
      -	// Is it the document end indicator?
      -	if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
      -		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
      -	}
      -
      -	// Is it the flow sequence start indicator?
      -	if buf[pos] == '[' {
      -		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
      -	}
      -
      -	// Is it the flow mapping start indicator?
      -	if parser.buffer[parser.buffer_pos] == '{' {
      -		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
      -	}
      -
      -	// Is it the flow sequence end indicator?
      -	if parser.buffer[parser.buffer_pos] == ']' {
      -		return yaml_parser_fetch_flow_collection_end(parser,
      -			yaml_FLOW_SEQUENCE_END_TOKEN)
      -	}
      -
      -	// Is it the flow mapping end indicator?
      -	if parser.buffer[parser.buffer_pos] == '}' {
      -		return yaml_parser_fetch_flow_collection_end(parser,
      -			yaml_FLOW_MAPPING_END_TOKEN)
      -	}
      -
      -	// Is it the flow entry indicator?
      -	if parser.buffer[parser.buffer_pos] == ',' {
      -		return yaml_parser_fetch_flow_entry(parser)
      -	}
      -
      -	// Is it the block entry indicator?
      -	if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
      -		return yaml_parser_fetch_block_entry(parser)
      -	}
      -
      -	// Is it the key indicator?
      -	if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
      -		return yaml_parser_fetch_key(parser)
      -	}
      -
      -	// Is it the value indicator?
      -	if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
      -		return yaml_parser_fetch_value(parser)
      -	}
      -
      -	// Is it an alias?
      -	if parser.buffer[parser.buffer_pos] == '*' {
      -		return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
      -	}
      -
      -	// Is it an anchor?
      -	if parser.buffer[parser.buffer_pos] == '&' {
      -		return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
      -	}
      -
      -	// Is it a tag?
      -	if parser.buffer[parser.buffer_pos] == '!' {
      -		return yaml_parser_fetch_tag(parser)
      -	}
      -
      -	// Is it a literal scalar?
      -	if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
      -		return yaml_parser_fetch_block_scalar(parser, true)
      -	}
      -
      -	// Is it a folded scalar?
      -	if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
      -		return yaml_parser_fetch_block_scalar(parser, false)
      -	}
      -
      -	// Is it a single-quoted scalar?
      -	if parser.buffer[parser.buffer_pos] == '\'' {
      -		return yaml_parser_fetch_flow_scalar(parser, true)
      -	}
      -
      -	// Is it a double-quoted scalar?
      -	if parser.buffer[parser.buffer_pos] == '"' {
      -		return yaml_parser_fetch_flow_scalar(parser, false)
      -	}
      -
      -	// Is it a plain scalar?
      -	//
      -	// A plain scalar may start with any non-blank characters except
      -	//
      -	//      '-', '?', ':', ',', '[', ']', '{', '}',
      -	//      '#', '&', '*', '!', '|', '>', '\'', '\"',
      -	//      '%', '@', '`'.
      -	//
      -	// In the block context (and, for the '-' indicator, in the flow context
      -	// too), it may also start with the characters
      -	//
      -	//      '-', '?', ':'
      -	//
      -	// if it is followed by a non-space character.
      -	//
      -	// The last rule is more restrictive than the specification requires.
      -	// [Go] Make this logic more reasonable.
      -	//switch parser.buffer[parser.buffer_pos] {
      -	//case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
      -	//}
      -	if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
      -		parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
      -		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
      -		parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
      -		parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
      -		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
      -		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
      -		parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
      -		parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
      -		parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
      -		(parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
      -		(parser.flow_level == 0 &&
      -			(parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
      -			!is_blankz(parser.buffer, parser.buffer_pos+1)) {
      -		return yaml_parser_fetch_plain_scalar(parser)
      -	}
      -
      -	// If we don't determine the token type so far, it is an error.
      -	return yaml_parser_set_scanner_error(parser,
      -		"while scanning for the next token", parser.mark,
      -		"found character that cannot start any token")
      -}
      -
      -// Check the list of potential simple keys and remove the positions that
      -// cannot contain simple keys anymore.
      -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
      -	// Check for a potential simple key for each flow level.
      -	for i := range parser.simple_keys {
      -		simple_key := &parser.simple_keys[i]
      -
      -		// The specification requires that a simple key
      -		//
      -		//  - is limited to a single line,
      -		//  - is shorter than 1024 characters.
      -		if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
      -
      -			// Check if the potential simple key to be removed is required.
      -			if simple_key.required {
      -				return yaml_parser_set_scanner_error(parser,
      -					"while scanning a simple key", simple_key.mark,
      -					"could not find expected ':'")
      -			}
      -			simple_key.possible = false
      -		}
      -	}
      -	return true
      -}
      -
      -// Check if a simple key may start at the current position and add it if
      -// needed.
      -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
      -	// A simple key is required at the current position if the scanner is in
      -	// the block context and the current column coincides with the indentation
      -	// level.
      -
      -	required := parser.flow_level == 0 && parser.indent == parser.mark.column
      -
      -	// A simple key is required only when it is the first token in the current
      -	// line.  Therefore it is always allowed.  But we add a check anyway.
      -	if required && !parser.simple_key_allowed {
      -		panic("should not happen")
      -	}
      -
      -	//
      -	// If the current position may start a simple key, save it.
      -	//
      -	if parser.simple_key_allowed {
      -		simple_key := yaml_simple_key_t{
      -			possible:     true,
      -			required:     required,
      -			token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
      -		}
      -		simple_key.mark = parser.mark
      -
      -		if !yaml_parser_remove_simple_key(parser) {
      -			return false
      -		}
      -		parser.simple_keys[len(parser.simple_keys)-1] = simple_key
      -	}
      -	return true
      -}
      -
      -// Remove a potential simple key at the current flow level.
      -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
      -	i := len(parser.simple_keys) - 1
      -	if parser.simple_keys[i].possible {
      -		// If the key is required, it is an error.
      -		if parser.simple_keys[i].required {
      -			return yaml_parser_set_scanner_error(parser,
      -				"while scanning a simple key", parser.simple_keys[i].mark,
      -				"could not find expected ':'")
      -		}
      -	}
      -	// Remove the key from the stack.
      -	parser.simple_keys[i].possible = false
      -	return true
      -}
      -
      -// Increase the flow level and resize the simple key list if needed.
      -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
      -	// Reset the simple key on the next level.
      -	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
      -
      -	// Increase the flow level.
      -	parser.flow_level++
      -	return true
      -}
      -
      -// Decrease the flow level.
      -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
      -	if parser.flow_level > 0 {
      -		parser.flow_level--
      -		parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
      -	}
      -	return true
      -}
      -
      -// Push the current indentation level to the stack and set the new level
      -// the current column is greater than the indentation level.  In this case,
      -// append or insert the specified token into the token queue.
      -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
      -	// In the flow context, do nothing.
      -	if parser.flow_level > 0 {
      -		return true
      -	}
      -
      -	if parser.indent < column {
      -		// Push the current indentation level to the stack and set the new
      -		// indentation level.
      -		parser.indents = append(parser.indents, parser.indent)
      -		parser.indent = column
      -
      -		// Create a token and insert it into the queue.
      -		token := yaml_token_t{
      -			typ:        typ,
      -			start_mark: mark,
      -			end_mark:   mark,
      -		}
      -		if number > -1 {
      -			number -= parser.tokens_parsed
      -		}
      -		yaml_insert_token(parser, number, &token)
      -	}
      -	return true
      -}
      -
      -// Pop indentation levels from the indents stack until the current level
      -// becomes less or equal to the column.  For each intendation level, append
      -// the BLOCK-END token.
      -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
      -	// In the flow context, do nothing.
      -	if parser.flow_level > 0 {
      -		return true
      -	}
      -
      -	// Loop through the intendation levels in the stack.
      -	for parser.indent > column {
      -		// Create a token and append it to the queue.
      -		token := yaml_token_t{
      -			typ:        yaml_BLOCK_END_TOKEN,
      -			start_mark: parser.mark,
      -			end_mark:   parser.mark,
      -		}
      -		yaml_insert_token(parser, -1, &token)
      -
      -		// Pop the indentation level.
      -		parser.indent = parser.indents[len(parser.indents)-1]
      -		parser.indents = parser.indents[:len(parser.indents)-1]
      -	}
      -	return true
      -}
      -
      -// Initialize the scanner and produce the STREAM-START token.
      -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
      -
      -	// Set the initial indentation.
      -	parser.indent = -1
      -
      -	// Initialize the simple key stack.
      -	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
      -
      -	// A simple key is allowed at the beginning of the stream.
      -	parser.simple_key_allowed = true
      -
      -	// We have started.
      -	parser.stream_start_produced = true
      -
      -	// Create the STREAM-START token and append it to the queue.
      -	token := yaml_token_t{
      -		typ:        yaml_STREAM_START_TOKEN,
      -		start_mark: parser.mark,
      -		end_mark:   parser.mark,
      -		encoding:   parser.encoding,
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the STREAM-END token and shut down the scanner.
      -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
      -
      -	// Force new line.
      -	if parser.mark.column != 0 {
      -		parser.mark.column = 0
      -		parser.mark.line++
      -	}
      -
      -	// Reset the indentation level.
      -	if !yaml_parser_unroll_indent(parser, -1) {
      -		return false
      -	}
      -
      -	// Reset simple keys.
      -	if !yaml_parser_remove_simple_key(parser) {
      -		return false
      -	}
      -
      -	parser.simple_key_allowed = false
      -
      -	// Create the STREAM-END token and append it to the queue.
      -	token := yaml_token_t{
      -		typ:        yaml_STREAM_END_TOKEN,
      -		start_mark: parser.mark,
      -		end_mark:   parser.mark,
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
      -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
      -	// Reset the indentation level.
      -	if !yaml_parser_unroll_indent(parser, -1) {
      -		return false
      -	}
      -
      -	// Reset simple keys.
      -	if !yaml_parser_remove_simple_key(parser) {
      -		return false
      -	}
      -
      -	parser.simple_key_allowed = false
      -
      -	// Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
      -	token := yaml_token_t{}
      -	if !yaml_parser_scan_directive(parser, &token) {
      -		return false
      -	}
      -	// Append the token to the queue.
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the DOCUMENT-START or DOCUMENT-END token.
      -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
      -	// Reset the indentation level.
      -	if !yaml_parser_unroll_indent(parser, -1) {
      -		return false
      -	}
      -
      -	// Reset simple keys.
      -	if !yaml_parser_remove_simple_key(parser) {
      -		return false
      -	}
      -
      -	parser.simple_key_allowed = false
      -
      -	// Consume the token.
      -	start_mark := parser.mark
      -
      -	skip(parser)
      -	skip(parser)
      -	skip(parser)
      -
      -	end_mark := parser.mark
      -
      -	// Create the DOCUMENT-START or DOCUMENT-END token.
      -	token := yaml_token_t{
      -		typ:        typ,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -	}
      -	// Append the token to the queue.
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
      -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
      -	// The indicators '[' and '{' may start a simple key.
      -	if !yaml_parser_save_simple_key(parser) {
      -		return false
      -	}
      -
      -	// Increase the flow level.
      -	if !yaml_parser_increase_flow_level(parser) {
      -		return false
      -	}
      -
      -	// A simple key may follow the indicators '[' and '{'.
      -	parser.simple_key_allowed = true
      -
      -	// Consume the token.
      -	start_mark := parser.mark
      -	skip(parser)
      -	end_mark := parser.mark
      -
      -	// Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
      -	token := yaml_token_t{
      -		typ:        typ,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -	}
      -	// Append the token to the queue.
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
      -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
      -	// Reset any potential simple key on the current flow level.
      -	if !yaml_parser_remove_simple_key(parser) {
      -		return false
      -	}
      -
      -	// Decrease the flow level.
      -	if !yaml_parser_decrease_flow_level(parser) {
      -		return false
      -	}
      -
      -	// No simple keys after the indicators ']' and '}'.
      -	parser.simple_key_allowed = false
      -
      -	// Consume the token.
      -
      -	start_mark := parser.mark
      -	skip(parser)
      -	end_mark := parser.mark
      -
      -	// Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
      -	token := yaml_token_t{
      -		typ:        typ,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -	}
      -	// Append the token to the queue.
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the FLOW-ENTRY token.
      -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
      -	// Reset any potential simple keys on the current flow level.
      -	if !yaml_parser_remove_simple_key(parser) {
      -		return false
      -	}
      -
      -	// Simple keys are allowed after ','.
      -	parser.simple_key_allowed = true
      -
      -	// Consume the token.
      -	start_mark := parser.mark
      -	skip(parser)
      -	end_mark := parser.mark
      -
      -	// Create the FLOW-ENTRY token and append it to the queue.
      -	token := yaml_token_t{
      -		typ:        yaml_FLOW_ENTRY_TOKEN,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the BLOCK-ENTRY token.
      -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
      -	// Check if the scanner is in the block context.
      -	if parser.flow_level == 0 {
      -		// Check if we are allowed to start a new entry.
      -		if !parser.simple_key_allowed {
      -			return yaml_parser_set_scanner_error(parser, "", parser.mark,
      -				"block sequence entries are not allowed in this context")
      -		}
      -		// Add the BLOCK-SEQUENCE-START token if needed.
      -		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
      -			return false
      -		}
      -	} else {
      -		// It is an error for the '-' indicator to occur in the flow context,
      -		// but we let the Parser detect and report about it because the Parser
      -		// is able to point to the context.
      -	}
      -
      -	// Reset any potential simple keys on the current flow level.
      -	if !yaml_parser_remove_simple_key(parser) {
      -		return false
      -	}
      -
      -	// Simple keys are allowed after '-'.
      -	parser.simple_key_allowed = true
      -
      -	// Consume the token.
      -	start_mark := parser.mark
      -	skip(parser)
      -	end_mark := parser.mark
      -
      -	// Create the BLOCK-ENTRY token and append it to the queue.
      -	token := yaml_token_t{
      -		typ:        yaml_BLOCK_ENTRY_TOKEN,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the KEY token.
      -func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
      -
      -	// In the block context, additional checks are required.
      -	if parser.flow_level == 0 {
      -		// Check if we are allowed to start a new key (not nessesary simple).
      -		if !parser.simple_key_allowed {
      -			return yaml_parser_set_scanner_error(parser, "", parser.mark,
      -				"mapping keys are not allowed in this context")
      -		}
      -		// Add the BLOCK-MAPPING-START token if needed.
      -		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
      -			return false
      -		}
      -	}
      -
      -	// Reset any potential simple keys on the current flow level.
      -	if !yaml_parser_remove_simple_key(parser) {
      -		return false
      -	}
      -
      -	// Simple keys are allowed after '?' in the block context.
      -	parser.simple_key_allowed = parser.flow_level == 0
      -
      -	// Consume the token.
      -	start_mark := parser.mark
      -	skip(parser)
      -	end_mark := parser.mark
      -
      -	// Create the KEY token and append it to the queue.
      -	token := yaml_token_t{
      -		typ:        yaml_KEY_TOKEN,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the VALUE token.
      -func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
      -
      -	simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
      -
      -	// Have we found a simple key?
      -	if simple_key.possible {
      -		// Create the KEY token and insert it into the queue.
      -		token := yaml_token_t{
      -			typ:        yaml_KEY_TOKEN,
      -			start_mark: simple_key.mark,
      -			end_mark:   simple_key.mark,
      -		}
      -		yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
      -
      -		// In the block context, we may need to add the BLOCK-MAPPING-START token.
      -		if !yaml_parser_roll_indent(parser, simple_key.mark.column,
      -			simple_key.token_number,
      -			yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
      -			return false
      -		}
      -
      -		// Remove the simple key.
      -		simple_key.possible = false
      -
      -		// A simple key cannot follow another simple key.
      -		parser.simple_key_allowed = false
      -
      -	} else {
      -		// The ':' indicator follows a complex key.
      -
      -		// In the block context, extra checks are required.
      -		if parser.flow_level == 0 {
      -
      -			// Check if we are allowed to start a complex value.
      -			if !parser.simple_key_allowed {
      -				return yaml_parser_set_scanner_error(parser, "", parser.mark,
      -					"mapping values are not allowed in this context")
      -			}
      -
      -			// Add the BLOCK-MAPPING-START token if needed.
      -			if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
      -				return false
      -			}
      -		}
      -
      -		// Simple keys after ':' are allowed in the block context.
      -		parser.simple_key_allowed = parser.flow_level == 0
      -	}
      -
      -	// Consume the token.
      -	start_mark := parser.mark
      -	skip(parser)
      -	end_mark := parser.mark
      -
      -	// Create the VALUE token and append it to the queue.
      -	token := yaml_token_t{
      -		typ:        yaml_VALUE_TOKEN,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the ALIAS or ANCHOR token.
      -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
      -	// An anchor or an alias could be a simple key.
      -	if !yaml_parser_save_simple_key(parser) {
      -		return false
      -	}
      -
      -	// A simple key cannot follow an anchor or an alias.
      -	parser.simple_key_allowed = false
      -
      -	// Create the ALIAS or ANCHOR token and append it to the queue.
      -	var token yaml_token_t
      -	if !yaml_parser_scan_anchor(parser, &token, typ) {
      -		return false
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the TAG token.
      -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
      -	// A tag could be a simple key.
      -	if !yaml_parser_save_simple_key(parser) {
      -		return false
      -	}
      -
      -	// A simple key cannot follow a tag.
      -	parser.simple_key_allowed = false
      -
      -	// Create the TAG token and append it to the queue.
      -	var token yaml_token_t
      -	if !yaml_parser_scan_tag(parser, &token) {
      -		return false
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
      -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
      -	// Remove any potential simple keys.
      -	if !yaml_parser_remove_simple_key(parser) {
      -		return false
      -	}
      -
      -	// A simple key may follow a block scalar.
      -	parser.simple_key_allowed = true
      -
      -	// Create the SCALAR token and append it to the queue.
      -	var token yaml_token_t
      -	if !yaml_parser_scan_block_scalar(parser, &token, literal) {
      -		return false
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
      -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
      -	// A plain scalar could be a simple key.
      -	if !yaml_parser_save_simple_key(parser) {
      -		return false
      -	}
      -
      -	// A simple key cannot follow a flow scalar.
      -	parser.simple_key_allowed = false
      -
      -	// Create the SCALAR token and append it to the queue.
      -	var token yaml_token_t
      -	if !yaml_parser_scan_flow_scalar(parser, &token, single) {
      -		return false
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Produce the SCALAR(...,plain) token.
      -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
      -	// A plain scalar could be a simple key.
      -	if !yaml_parser_save_simple_key(parser) {
      -		return false
      -	}
      -
      -	// A simple key cannot follow a flow scalar.
      -	parser.simple_key_allowed = false
      -
      -	// Create the SCALAR token and append it to the queue.
      -	var token yaml_token_t
      -	if !yaml_parser_scan_plain_scalar(parser, &token) {
      -		return false
      -	}
      -	yaml_insert_token(parser, -1, &token)
      -	return true
      -}
      -
      -// Eat whitespaces and comments until the next token is found.
      -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
      -
      -	// Until the next token is not found.
      -	for {
      -		// Allow the BOM mark to start a line.
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -		if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
      -			skip(parser)
      -		}
      -
      -		// Eat whitespaces.
      -		// Tabs are allowed:
      -		//  - in the flow context
      -		//  - in the block context, but not at the beginning of the line or
      -		//  after '-', '?', or ':' (complex value).
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -
      -		for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
      -			skip(parser)
      -			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -				return false
      -			}
      -		}
      -
      -		// Eat a comment until a line break.
      -		if parser.buffer[parser.buffer_pos] == '#' {
      -			for !is_breakz(parser.buffer, parser.buffer_pos) {
      -				skip(parser)
      -				if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -					return false
      -				}
      -			}
      -		}
      -
      -		// If it is a line break, eat it.
      -		if is_break(parser.buffer, parser.buffer_pos) {
      -			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -				return false
      -			}
      -			skip_line(parser)
      -
      -			// In the block context, a new line may start a simple key.
      -			if parser.flow_level == 0 {
      -				parser.simple_key_allowed = true
      -			}
      -		} else {
      -			break // We have found a token.
      -		}
      -	}
      -
      -	return true
      -}
      -
      -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
      -//
      -// Scope:
      -//      %YAML    1.1    # a comment \n
      -//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
      -//      %TAG    !yaml!  tag:yaml.org,2002:  \n
      -//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
      -//
      -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
      -	// Eat '%'.
      -	start_mark := parser.mark
      -	skip(parser)
      -
      -	// Scan the directive name.
      -	var name []byte
      -	if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
      -		return false
      -	}
      -
      -	// Is it a YAML directive?
      -	if bytes.Equal(name, []byte("YAML")) {
      -		// Scan the VERSION directive value.
      -		var major, minor int8
      -		if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
      -			return false
      -		}
      -		end_mark := parser.mark
      -
      -		// Create a VERSION-DIRECTIVE token.
      -		*token = yaml_token_t{
      -			typ:        yaml_VERSION_DIRECTIVE_TOKEN,
      -			start_mark: start_mark,
      -			end_mark:   end_mark,
      -			major:      major,
      -			minor:      minor,
      -		}
      -
      -		// Is it a TAG directive?
      -	} else if bytes.Equal(name, []byte("TAG")) {
      -		// Scan the TAG directive value.
      -		var handle, prefix []byte
      -		if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
      -			return false
      -		}
      -		end_mark := parser.mark
      -
      -		// Create a TAG-DIRECTIVE token.
      -		*token = yaml_token_t{
      -			typ:        yaml_TAG_DIRECTIVE_TOKEN,
      -			start_mark: start_mark,
      -			end_mark:   end_mark,
      -			value:      handle,
      -			prefix:     prefix,
      -		}
      -
      -		// Unknown directive.
      -	} else {
      -		yaml_parser_set_scanner_error(parser, "while scanning a directive",
      -			start_mark, "found uknown directive name")
      -		return false
      -	}
      -
      -	// Eat the rest of the line including any comments.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -
      -	for is_blank(parser.buffer, parser.buffer_pos) {
      -		skip(parser)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	if parser.buffer[parser.buffer_pos] == '#' {
      -		for !is_breakz(parser.buffer, parser.buffer_pos) {
      -			skip(parser)
      -			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -				return false
      -			}
      -		}
      -	}
      -
      -	// Check if we are at the end of the line.
      -	if !is_breakz(parser.buffer, parser.buffer_pos) {
      -		yaml_parser_set_scanner_error(parser, "while scanning a directive",
      -			start_mark, "did not find expected comment or line break")
      -		return false
      -	}
      -
      -	// Eat a line break.
      -	if is_break(parser.buffer, parser.buffer_pos) {
      -		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -			return false
      -		}
      -		skip_line(parser)
      -	}
      -
      -	return true
      -}
      -
      -// Scan the directive name.
      -//
      -// Scope:
      -//      %YAML   1.1     # a comment \n
      -//       ^^^^
      -//      %TAG    !yaml!  tag:yaml.org,2002:  \n
      -//       ^^^
      -//
      -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
      -	// Consume the directive name.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -
      -	var s []byte
      -	for is_alpha(parser.buffer, parser.buffer_pos) {
      -		s = read(parser, s)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	// Check if the name is empty.
      -	if len(s) == 0 {
      -		yaml_parser_set_scanner_error(parser, "while scanning a directive",
      -			start_mark, "could not find expected directive name")
      -		return false
      -	}
      -
      -	// Check for an blank character after the name.
      -	if !is_blankz(parser.buffer, parser.buffer_pos) {
      -		yaml_parser_set_scanner_error(parser, "while scanning a directive",
      -			start_mark, "found unexpected non-alphabetical character")
      -		return false
      -	}
      -	*name = s
      -	return true
      -}
      -
      -// Scan the value of VERSION-DIRECTIVE.
      -//
      -// Scope:
      -//      %YAML   1.1     # a comment \n
      -//           ^^^^^^
      -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
      -	// Eat whitespaces.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	for is_blank(parser.buffer, parser.buffer_pos) {
      -		skip(parser)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	// Consume the major version number.
      -	if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
      -		return false
      -	}
      -
      -	// Eat '.'.
      -	if parser.buffer[parser.buffer_pos] != '.' {
      -		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
      -			start_mark, "did not find expected digit or '.' character")
      -	}
      -
      -	skip(parser)
      -
      -	// Consume the minor version number.
      -	if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
      -		return false
      -	}
      -	return true
      -}
      -
      -const max_number_length = 2
      -
      -// Scan the version number of VERSION-DIRECTIVE.
      -//
      -// Scope:
      -//      %YAML   1.1     # a comment \n
      -//              ^
      -//      %YAML   1.1     # a comment \n
      -//                ^
      -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
      -
      -	// Repeat while the next character is digit.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	var value, length int8
      -	for is_digit(parser.buffer, parser.buffer_pos) {
      -		// Check if the number is too long.
      -		length++
      -		if length > max_number_length {
      -			return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
      -				start_mark, "found extremely long version number")
      -		}
      -		value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
      -		skip(parser)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	// Check if the number was present.
      -	if length == 0 {
      -		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
      -			start_mark, "did not find expected version number")
      -	}
      -	*number = value
      -	return true
      -}
      -
      -// Scan the value of a TAG-DIRECTIVE token.
      -//
      -// Scope:
      -//      %TAG    !yaml!  tag:yaml.org,2002:  \n
      -//          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
      -//
      -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
      -	var handle_value, prefix_value []byte
      -
      -	// Eat whitespaces.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -
      -	for is_blank(parser.buffer, parser.buffer_pos) {
      -		skip(parser)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	// Scan a handle.
      -	if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
      -		return false
      -	}
      -
      -	// Expect a whitespace.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	if !is_blank(parser.buffer, parser.buffer_pos) {
      -		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
      -			start_mark, "did not find expected whitespace")
      -		return false
      -	}
      -
      -	// Eat whitespaces.
      -	for is_blank(parser.buffer, parser.buffer_pos) {
      -		skip(parser)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	// Scan a prefix.
      -	if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
      -		return false
      -	}
      -
      -	// Expect a whitespace or line break.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	if !is_blankz(parser.buffer, parser.buffer_pos) {
      -		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
      -			start_mark, "did not find expected whitespace or line break")
      -		return false
      -	}
      -
      -	*handle = handle_value
      -	*prefix = prefix_value
      -	return true
      -}
      -
      -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
      -	var s []byte
      -
      -	// Eat the indicator character.
      -	start_mark := parser.mark
      -	skip(parser)
      -
      -	// Consume the value.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -
      -	for is_alpha(parser.buffer, parser.buffer_pos) {
      -		s = read(parser, s)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	end_mark := parser.mark
      -
      -	/*
      -	 * Check if length of the anchor is greater than 0 and it is followed by
      -	 * a whitespace character or one of the indicators:
      -	 *
      -	 *      '?', ':', ',', ']', '}', '%', '@', '`'.
      -	 */
      -
      -	if len(s) == 0 ||
      -		!(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
      -			parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
      -			parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
      -			parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
      -			parser.buffer[parser.buffer_pos] == '`') {
      -		context := "while scanning an alias"
      -		if typ == yaml_ANCHOR_TOKEN {
      -			context = "while scanning an anchor"
      -		}
      -		yaml_parser_set_scanner_error(parser, context, start_mark,
      -			"did not find expected alphabetic or numeric character")
      -		return false
      -	}
      -
      -	// Create a token.
      -	*token = yaml_token_t{
      -		typ:        typ,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -		value:      s,
      -	}
      -
      -	return true
      -}
      -
      -/*
      - * Scan a TAG token.
      - */
      -
      -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
      -	var handle, suffix []byte
      -
      -	start_mark := parser.mark
      -
      -	// Check if the tag is in the canonical form.
      -	if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -		return false
      -	}
      -
      -	if parser.buffer[parser.buffer_pos+1] == '<' {
      -		// Keep the handle as ''
      -
      -		// Eat '!<'
      -		skip(parser)
      -		skip(parser)
      -
      -		// Consume the tag value.
      -		if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
      -			return false
      -		}
      -
      -		// Check for '>' and eat it.
      -		if parser.buffer[parser.buffer_pos] != '>' {
      -			yaml_parser_set_scanner_error(parser, "while scanning a tag",
      -				start_mark, "did not find the expected '>'")
      -			return false
      -		}
      -
      -		skip(parser)
      -	} else {
      -		// The tag has either the '!suffix' or the '!handle!suffix' form.
      -
      -		// First, try to scan a handle.
      -		if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
      -			return false
      -		}
      -
      -		// Check if it is, indeed, handle.
      -		if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
      -			// Scan the suffix now.
      -			if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
      -				return false
      -			}
      -		} else {
      -			// It wasn't a handle after all.  Scan the rest of the tag.
      -			if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
      -				return false
      -			}
      -
      -			// Set the handle to '!'.
      -			handle = []byte{'!'}
      -
      -			// A special case: the '!' tag.  Set the handle to '' and the
      -			// suffix to '!'.
      -			if len(suffix) == 0 {
      -				handle, suffix = suffix, handle
      -			}
      -		}
      -	}
      -
      -	// Check the character which ends the tag.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	if !is_blankz(parser.buffer, parser.buffer_pos) {
      -		yaml_parser_set_scanner_error(parser, "while scanning a tag",
      -			start_mark, "did not find expected whitespace or line break")
      -		return false
      -	}
      -
      -	end_mark := parser.mark
      -
      -	// Create a token.
      -	*token = yaml_token_t{
      -		typ:        yaml_TAG_TOKEN,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -		value:      handle,
      -		suffix:     suffix,
      -	}
      -	return true
      -}
      -
      -// Scan a tag handle.
      -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
      -	// Check the initial '!' character.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	if parser.buffer[parser.buffer_pos] != '!' {
      -		yaml_parser_set_scanner_tag_error(parser, directive,
      -			start_mark, "did not find expected '!'")
      -		return false
      -	}
      -
      -	var s []byte
      -
      -	// Copy the '!' character.
      -	s = read(parser, s)
      -
      -	// Copy all subsequent alphabetical and numerical characters.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	for is_alpha(parser.buffer, parser.buffer_pos) {
      -		s = read(parser, s)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	// Check if the trailing character is '!' and copy it.
      -	if parser.buffer[parser.buffer_pos] == '!' {
      -		s = read(parser, s)
      -	} else {
      -		// It's either the '!' tag or not really a tag handle.  If it's a %TAG
      -		// directive, it's an error.  If it's a tag token, it must be a part of URI.
      -		if directive && !(s[0] == '!' && s[1] == 0) {
      -			yaml_parser_set_scanner_tag_error(parser, directive,
      -				start_mark, "did not find expected '!'")
      -			return false
      -		}
      -	}
      -
      -	*handle = s
      -	return true
      -}
      -
      -// Scan a tag.
      -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
      -	//size_t length = head ? strlen((char *)head) : 0
      -	var s []byte
      -
      -	// Copy the head if needed.
      -	//
      -	// Note that we don't copy the leading '!' character.
      -	if len(head) > 1 {
      -		s = append(s, head[1:]...)
      -	}
      -
      -	// Scan the tag.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -
      -	// The set of characters that may appear in URI is as follows:
      -	//
      -	//      '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
      -	//      '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
      -	//      '%'.
      -	// [Go] Convert this into more reasonable logic.
      -	for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
      -		parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
      -		parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
      -		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
      -		parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
      -		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
      -		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
      -		parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
      -		parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
      -		parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
      -		parser.buffer[parser.buffer_pos] == '%' {
      -		// Check if it is a URI-escape sequence.
      -		if parser.buffer[parser.buffer_pos] == '%' {
      -			if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
      -				return false
      -			}
      -		} else {
      -			s = read(parser, s)
      -		}
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -
      -	// Check if the tag is non-empty.
      -	if len(s) == 0 {
      -		yaml_parser_set_scanner_tag_error(parser, directive,
      -			start_mark, "did not find expected tag URI")
      -		return false
      -	}
      -	*uri = s
      -	return true
      -}
      -
      -// Decode an URI-escape sequence corresponding to a single UTF-8 character.
      -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
      -
      -	// Decode the required number of characters.
      -	w := 1024
      -	for w > 0 {
      -		// Check for a URI-escaped octet.
      -		if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
      -			return false
      -		}
      -
      -		if !(parser.buffer[parser.buffer_pos] == '%' &&
      -			is_hex(parser.buffer, parser.buffer_pos+1) &&
      -			is_hex(parser.buffer, parser.buffer_pos+2)) {
      -			return yaml_parser_set_scanner_tag_error(parser, directive,
      -				start_mark, "did not find URI escaped octet")
      -		}
      -
      -		// Get the octet.
      -		octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
      -
      -		// If it is the leading octet, determine the length of the UTF-8 sequence.
      -		if w == 1024 {
      -			w = width(octet)
      -			if w == 0 {
      -				return yaml_parser_set_scanner_tag_error(parser, directive,
      -					start_mark, "found an incorrect leading UTF-8 octet")
      -			}
      -		} else {
      -			// Check if the trailing octet is correct.
      -			if octet&0xC0 != 0x80 {
      -				return yaml_parser_set_scanner_tag_error(parser, directive,
      -					start_mark, "found an incorrect trailing UTF-8 octet")
      -			}
      -		}
      -
      -		// Copy the octet and move the pointers.
      -		*s = append(*s, octet)
      -		skip(parser)
      -		skip(parser)
      -		skip(parser)
      -		w--
      -	}
      -	return true
      -}
      -
      -// Scan a block scalar.
      -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
      -	// Eat the indicator '|' or '>'.
      -	start_mark := parser.mark
      -	skip(parser)
      -
      -	// Scan the additional block scalar indicators.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -
      -	// Check for a chomping indicator.
      -	var chomping, increment int
      -	if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
      -		// Set the chomping method and eat the indicator.
      -		if parser.buffer[parser.buffer_pos] == '+' {
      -			chomping = +1
      -		} else {
      -			chomping = -1
      -		}
      -		skip(parser)
      -
      -		// Check for an indentation indicator.
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -		if is_digit(parser.buffer, parser.buffer_pos) {
      -			// Check that the intendation is greater than 0.
      -			if parser.buffer[parser.buffer_pos] == '0' {
      -				yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
      -					start_mark, "found an intendation indicator equal to 0")
      -				return false
      -			}
      -
      -			// Get the intendation level and eat the indicator.
      -			increment = as_digit(parser.buffer, parser.buffer_pos)
      -			skip(parser)
      -		}
      -
      -	} else if is_digit(parser.buffer, parser.buffer_pos) {
      -		// Do the same as above, but in the opposite order.
      -
      -		if parser.buffer[parser.buffer_pos] == '0' {
      -			yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
      -				start_mark, "found an intendation indicator equal to 0")
      -			return false
      -		}
      -		increment = as_digit(parser.buffer, parser.buffer_pos)
      -		skip(parser)
      -
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -		if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
      -			if parser.buffer[parser.buffer_pos] == '+' {
      -				chomping = +1
      -			} else {
      -				chomping = -1
      -			}
      -			skip(parser)
      -		}
      -	}
      -
      -	// Eat whitespaces and comments to the end of the line.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	for is_blank(parser.buffer, parser.buffer_pos) {
      -		skip(parser)
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -	}
      -	if parser.buffer[parser.buffer_pos] == '#' {
      -		for !is_breakz(parser.buffer, parser.buffer_pos) {
      -			skip(parser)
      -			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -				return false
      -			}
      -		}
      -	}
      -
      -	// Check if we are at the end of the line.
      -	if !is_breakz(parser.buffer, parser.buffer_pos) {
      -		yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
      -			start_mark, "did not find expected comment or line break")
      -		return false
      -	}
      -
      -	// Eat a line break.
      -	if is_break(parser.buffer, parser.buffer_pos) {
      -		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -			return false
      -		}
      -		skip_line(parser)
      -	}
      -
      -	end_mark := parser.mark
      -
      -	// Set the intendation level if it was specified.
      -	var indent int
      -	if increment > 0 {
      -		if parser.indent >= 0 {
      -			indent = parser.indent + increment
      -		} else {
      -			indent = increment
      -		}
      -	}
      -
      -	// Scan the leading line breaks and determine the indentation level if needed.
      -	var s, leading_break, trailing_breaks []byte
      -	if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
      -		return false
      -	}
      -
      -	// Scan the block scalar content.
      -	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -		return false
      -	}
      -	var leading_blank, trailing_blank bool
      -	for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
      -		// We are at the beginning of a non-empty line.
      -
      -		// Is it a trailing whitespace?
      -		trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
      -
      -		// Check if we need to fold the leading line break.
      -		if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
      -			// Do we need to join the lines by space?
      -			if len(trailing_breaks) == 0 {
      -				s = append(s, ' ')
      -			}
      -		} else {
      -			s = append(s, leading_break...)
      -		}
      -		leading_break = leading_break[:0]
      -
      -		// Append the remaining line breaks.
      -		s = append(s, trailing_breaks...)
      -		trailing_breaks = trailing_breaks[:0]
      -
      -		// Is it a leading whitespace?
      -		leading_blank = is_blank(parser.buffer, parser.buffer_pos)
      -
      -		// Consume the current line.
      -		for !is_breakz(parser.buffer, parser.buffer_pos) {
      -			s = read(parser, s)
      -			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -				return false
      -			}
      -		}
      -
      -		// Consume the line break.
      -		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -			return false
      -		}
      -
      -		leading_break = read_line(parser, leading_break)
      -
      -		// Eat the following intendation spaces and line breaks.
      -		if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
      -			return false
      -		}
      -	}
      -
      -	// Chomp the tail.
      -	if chomping != -1 {
      -		s = append(s, leading_break...)
      -	}
      -	if chomping == 1 {
      -		s = append(s, trailing_breaks...)
      -	}
      -
      -	// Create a token.
      -	*token = yaml_token_t{
      -		typ:        yaml_SCALAR_TOKEN,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -		value:      s,
      -		style:      yaml_LITERAL_SCALAR_STYLE,
      -	}
      -	if !literal {
      -		token.style = yaml_FOLDED_SCALAR_STYLE
      -	}
      -	return true
      -}
      -
      -// Scan intendation spaces and line breaks for a block scalar.  Determine the
      -// intendation level if needed.
      -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
      -	*end_mark = parser.mark
      -
      -	// Eat the intendation spaces and line breaks.
      -	max_indent := 0
      -	for {
      -		// Eat the intendation spaces.
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -		for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
      -			skip(parser)
      -			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -				return false
      -			}
      -		}
      -		if parser.mark.column > max_indent {
      -			max_indent = parser.mark.column
      -		}
      -
      -		// Check for a tab character messing the intendation.
      -		if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
      -			return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
      -				start_mark, "found a tab character where an intendation space is expected")
      -		}
      -
      -		// Have we found a non-empty line?
      -		if !is_break(parser.buffer, parser.buffer_pos) {
      -			break
      -		}
      -
      -		// Consume the line break.
      -		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -			return false
      -		}
      -		// [Go] Should really be returning breaks instead.
      -		*breaks = read_line(parser, *breaks)
      -		*end_mark = parser.mark
      -	}
      -
      -	// Determine the indentation level if needed.
      -	if *indent == 0 {
      -		*indent = max_indent
      -		if *indent < parser.indent+1 {
      -			*indent = parser.indent + 1
      -		}
      -		if *indent < 1 {
      -			*indent = 1
      -		}
      -	}
      -	return true
      -}
      -
      -// Scan a quoted scalar.
      -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
      -	// Eat the left quote.
      -	start_mark := parser.mark
      -	skip(parser)
      -
      -	// Consume the content of the quoted scalar.
      -	var s, leading_break, trailing_breaks, whitespaces []byte
      -	for {
      -		// Check that there are no document indicators at the beginning of the line.
      -		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
      -			return false
      -		}
      -
      -		if parser.mark.column == 0 &&
      -			((parser.buffer[parser.buffer_pos+0] == '-' &&
      -				parser.buffer[parser.buffer_pos+1] == '-' &&
      -				parser.buffer[parser.buffer_pos+2] == '-') ||
      -				(parser.buffer[parser.buffer_pos+0] == '.' &&
      -					parser.buffer[parser.buffer_pos+1] == '.' &&
      -					parser.buffer[parser.buffer_pos+2] == '.')) &&
      -			is_blankz(parser.buffer, parser.buffer_pos+3) {
      -			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
      -				start_mark, "found unexpected document indicator")
      -			return false
      -		}
      -
      -		// Check for EOF.
      -		if is_z(parser.buffer, parser.buffer_pos) {
      -			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
      -				start_mark, "found unexpected end of stream")
      -			return false
      -		}
      -
      -		// Consume non-blank characters.
      -		leading_blanks := false
      -		for !is_blankz(parser.buffer, parser.buffer_pos) {
      -			if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
      -				// Is is an escaped single quote.
      -				s = append(s, '\'')
      -				skip(parser)
      -				skip(parser)
      -
      -			} else if single && parser.buffer[parser.buffer_pos] == '\'' {
      -				// It is a right single quote.
      -				break
      -			} else if !single && parser.buffer[parser.buffer_pos] == '"' {
      -				// It is a right double quote.
      -				break
      -
      -			} else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
      -				// It is an escaped line break.
      -				if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
      -					return false
      -				}
      -				skip(parser)
      -				skip_line(parser)
      -				leading_blanks = true
      -				break
      -
      -			} else if !single && parser.buffer[parser.buffer_pos] == '\\' {
      -				// It is an escape sequence.
      -				code_length := 0
      -
      -				// Check the escape character.
      -				switch parser.buffer[parser.buffer_pos+1] {
      -				case '0':
      -					s = append(s, 0)
      -				case 'a':
      -					s = append(s, '\x07')
      -				case 'b':
      -					s = append(s, '\x08')
      -				case 't', '\t':
      -					s = append(s, '\x09')
      -				case 'n':
      -					s = append(s, '\x0A')
      -				case 'v':
      -					s = append(s, '\x0B')
      -				case 'f':
      -					s = append(s, '\x0C')
      -				case 'r':
      -					s = append(s, '\x0D')
      -				case 'e':
      -					s = append(s, '\x1B')
      -				case ' ':
      -					s = append(s, '\x20')
      -				case '"':
      -					s = append(s, '"')
      -				case '\'':
      -					s = append(s, '\'')
      -				case '\\':
      -					s = append(s, '\\')
      -				case 'N': // NEL (#x85)
      -					s = append(s, '\xC2')
      -					s = append(s, '\x85')
      -				case '_': // #xA0
      -					s = append(s, '\xC2')
      -					s = append(s, '\xA0')
      -				case 'L': // LS (#x2028)
      -					s = append(s, '\xE2')
      -					s = append(s, '\x80')
      -					s = append(s, '\xA8')
      -				case 'P': // PS (#x2029)
      -					s = append(s, '\xE2')
      -					s = append(s, '\x80')
      -					s = append(s, '\xA9')
      -				case 'x':
      -					code_length = 2
      -				case 'u':
      -					code_length = 4
      -				case 'U':
      -					code_length = 8
      -				default:
      -					yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
      -						start_mark, "found unknown escape character")
      -					return false
      -				}
      -
      -				skip(parser)
      -				skip(parser)
      -
      -				// Consume an arbitrary escape code.
      -				if code_length > 0 {
      -					var value int
      -
      -					// Scan the character value.
      -					if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
      -						return false
      -					}
      -					for k := 0; k < code_length; k++ {
      -						if !is_hex(parser.buffer, parser.buffer_pos+k) {
      -							yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
      -								start_mark, "did not find expected hexdecimal number")
      -							return false
      -						}
      -						value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
      -					}
      -
      -					// Check the value and write the character.
      -					if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
      -						yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
      -							start_mark, "found invalid Unicode character escape code")
      -						return false
      -					}
      -					if value <= 0x7F {
      -						s = append(s, byte(value))
      -					} else if value <= 0x7FF {
      -						s = append(s, byte(0xC0+(value>>6)))
      -						s = append(s, byte(0x80+(value&0x3F)))
      -					} else if value <= 0xFFFF {
      -						s = append(s, byte(0xE0+(value>>12)))
      -						s = append(s, byte(0x80+((value>>6)&0x3F)))
      -						s = append(s, byte(0x80+(value&0x3F)))
      -					} else {
      -						s = append(s, byte(0xF0+(value>>18)))
      -						s = append(s, byte(0x80+((value>>12)&0x3F)))
      -						s = append(s, byte(0x80+((value>>6)&0x3F)))
      -						s = append(s, byte(0x80+(value&0x3F)))
      -					}
      -
      -					// Advance the pointer.
      -					for k := 0; k < code_length; k++ {
      -						skip(parser)
      -					}
      -				}
      -			} else {
      -				// It is a non-escaped non-blank character.
      -				s = read(parser, s)
      -			}
      -			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -				return false
      -			}
      -		}
      -
      -		// Check if we are at the end of the scalar.
      -		if single {
      -			if parser.buffer[parser.buffer_pos] == '\'' {
      -				break
      -			}
      -		} else {
      -			if parser.buffer[parser.buffer_pos] == '"' {
      -				break
      -			}
      -		}
      -
      -		// Consume blank characters.
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -
      -		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
      -			if is_blank(parser.buffer, parser.buffer_pos) {
      -				// Consume a space or a tab character.
      -				if !leading_blanks {
      -					whitespaces = read(parser, whitespaces)
      -				} else {
      -					skip(parser)
      -				}
      -			} else {
      -				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -					return false
      -				}
      -
      -				// Check if it is a first line break.
      -				if !leading_blanks {
      -					whitespaces = whitespaces[:0]
      -					leading_break = read_line(parser, leading_break)
      -					leading_blanks = true
      -				} else {
      -					trailing_breaks = read_line(parser, trailing_breaks)
      -				}
      -			}
      -			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -				return false
      -			}
      -		}
      -
      -		// Join the whitespaces or fold line breaks.
      -		if leading_blanks {
      -			// Do we need to fold line breaks?
      -			if len(leading_break) > 0 && leading_break[0] == '\n' {
      -				if len(trailing_breaks) == 0 {
      -					s = append(s, ' ')
      -				} else {
      -					s = append(s, trailing_breaks...)
      -				}
      -			} else {
      -				s = append(s, leading_break...)
      -				s = append(s, trailing_breaks...)
      -			}
      -			trailing_breaks = trailing_breaks[:0]
      -			leading_break = leading_break[:0]
      -		} else {
      -			s = append(s, whitespaces...)
      -			whitespaces = whitespaces[:0]
      -		}
      -	}
      -
      -	// Eat the right quote.
      -	skip(parser)
      -	end_mark := parser.mark
      -
      -	// Create a token.
      -	*token = yaml_token_t{
      -		typ:        yaml_SCALAR_TOKEN,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -		value:      s,
      -		style:      yaml_SINGLE_QUOTED_SCALAR_STYLE,
      -	}
      -	if !single {
      -		token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
      -	}
      -	return true
      -}
      -
      -// Scan a plain scalar.
      -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
      -
      -	var s, leading_break, trailing_breaks, whitespaces []byte
      -	var leading_blanks bool
      -	var indent = parser.indent + 1
      -
      -	start_mark := parser.mark
      -	end_mark := parser.mark
      -
      -	// Consume the content of the plain scalar.
      -	for {
      -		// Check for a document indicator.
      -		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
      -			return false
      -		}
      -		if parser.mark.column == 0 &&
      -			((parser.buffer[parser.buffer_pos+0] == '-' &&
      -				parser.buffer[parser.buffer_pos+1] == '-' &&
      -				parser.buffer[parser.buffer_pos+2] == '-') ||
      -				(parser.buffer[parser.buffer_pos+0] == '.' &&
      -					parser.buffer[parser.buffer_pos+1] == '.' &&
      -					parser.buffer[parser.buffer_pos+2] == '.')) &&
      -			is_blankz(parser.buffer, parser.buffer_pos+3) {
      -			break
      -		}
      -
      -		// Check for a comment.
      -		if parser.buffer[parser.buffer_pos] == '#' {
      -			break
      -		}
      -
      -		// Consume non-blank characters.
      -		for !is_blankz(parser.buffer, parser.buffer_pos) {
      -
      -			// Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
      -			if parser.flow_level > 0 &&
      -				parser.buffer[parser.buffer_pos] == ':' &&
      -				!is_blankz(parser.buffer, parser.buffer_pos+1) {
      -				yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
      -					start_mark, "found unexpected ':'")
      -				return false
      -			}
      -
      -			// Check for indicators that may end a plain scalar.
      -			if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
      -				(parser.flow_level > 0 &&
      -					(parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
      -						parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
      -						parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
      -						parser.buffer[parser.buffer_pos] == '}')) {
      -				break
      -			}
      -
      -			// Check if we need to join whitespaces and breaks.
      -			if leading_blanks || len(whitespaces) > 0 {
      -				if leading_blanks {
      -					// Do we need to fold line breaks?
      -					if leading_break[0] == '\n' {
      -						if len(trailing_breaks) == 0 {
      -							s = append(s, ' ')
      -						} else {
      -							s = append(s, trailing_breaks...)
      -						}
      -					} else {
      -						s = append(s, leading_break...)
      -						s = append(s, trailing_breaks...)
      -					}
      -					trailing_breaks = trailing_breaks[:0]
      -					leading_break = leading_break[:0]
      -					leading_blanks = false
      -				} else {
      -					s = append(s, whitespaces...)
      -					whitespaces = whitespaces[:0]
      -				}
      -			}
      -
      -			// Copy the character.
      -			s = read(parser, s)
      -
      -			end_mark = parser.mark
      -			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -				return false
      -			}
      -		}
      -
      -		// Is it the end?
      -		if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
      -			break
      -		}
      -
      -		// Consume blank characters.
      -		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -			return false
      -		}
      -
      -		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
      -			if is_blank(parser.buffer, parser.buffer_pos) {
      -
      -				// Check for tab character that abuse intendation.
      -				if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
      -					yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
      -						start_mark, "found a tab character that violate intendation")
      -					return false
      -				}
      -
      -				// Consume a space or a tab character.
      -				if !leading_blanks {
      -					whitespaces = read(parser, whitespaces)
      -				} else {
      -					skip(parser)
      -				}
      -			} else {
      -				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
      -					return false
      -				}
      -
      -				// Check if it is a first line break.
      -				if !leading_blanks {
      -					whitespaces = whitespaces[:0]
      -					leading_break = read_line(parser, leading_break)
      -					leading_blanks = true
      -				} else {
      -					trailing_breaks = read_line(parser, trailing_breaks)
      -				}
      -			}
      -			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
      -				return false
      -			}
      -		}
      -
      -		// Check intendation level.
      -		if parser.flow_level == 0 && parser.mark.column < indent {
      -			break
      -		}
      -	}
      -
      -	// Create a token.
      -	*token = yaml_token_t{
      -		typ:        yaml_SCALAR_TOKEN,
      -		start_mark: start_mark,
      -		end_mark:   end_mark,
      -		value:      s,
      -		style:      yaml_PLAIN_SCALAR_STYLE,
      -	}
      -
      -	// Note that we change the 'simple_key_allowed' flag.
      -	if leading_blanks {
      -		parser.simple_key_allowed = true
      -	}
      -	return true
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/sorter.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/sorter.go
      deleted file mode 100644
      index 5958822f..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/sorter.go
      +++ /dev/null
      @@ -1,104 +0,0 @@
      -package yaml
      -
      -import (
      -	"reflect"
      -	"unicode"
      -)
      -
      -type keyList []reflect.Value
      -
      -func (l keyList) Len() int      { return len(l) }
      -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
      -func (l keyList) Less(i, j int) bool {
      -	a := l[i]
      -	b := l[j]
      -	ak := a.Kind()
      -	bk := b.Kind()
      -	for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
      -		a = a.Elem()
      -		ak = a.Kind()
      -	}
      -	for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
      -		b = b.Elem()
      -		bk = b.Kind()
      -	}
      -	af, aok := keyFloat(a)
      -	bf, bok := keyFloat(b)
      -	if aok && bok {
      -		if af != bf {
      -			return af < bf
      -		}
      -		if ak != bk {
      -			return ak < bk
      -		}
      -		return numLess(a, b)
      -	}
      -	if ak != reflect.String || bk != reflect.String {
      -		return ak < bk
      -	}
      -	ar, br := []rune(a.String()), []rune(b.String())
      -	for i := 0; i < len(ar) && i < len(br); i++ {
      -		if ar[i] == br[i] {
      -			continue
      -		}
      -		al := unicode.IsLetter(ar[i])
      -		bl := unicode.IsLetter(br[i])
      -		if al && bl {
      -			return ar[i] < br[i]
      -		}
      -		if al || bl {
      -			return bl
      -		}
      -		var ai, bi int
      -		var an, bn int64
      -		for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
      -			an = an*10 + int64(ar[ai]-'0')
      -		}
      -		for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
      -			bn = bn*10 + int64(br[bi]-'0')
      -		}
      -		if an != bn {
      -			return an < bn
      -		}
      -		if ai != bi {
      -			return ai < bi
      -		}
      -		return ar[i] < br[i]
      -	}
      -	return len(ar) < len(br)
      -}
      -
      -// keyFloat returns a float value for v if it is a number/bool
      -// and whether it is a number/bool or not.
      -func keyFloat(v reflect.Value) (f float64, ok bool) {
      -	switch v.Kind() {
      -	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
      -		return float64(v.Int()), true
      -	case reflect.Float32, reflect.Float64:
      -		return v.Float(), true
      -	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
      -		return float64(v.Uint()), true
      -	case reflect.Bool:
      -		if v.Bool() {
      -			return 1, true
      -		}
      -		return 0, true
      -	}
      -	return 0, false
      -}
      -
      -// numLess returns whether a < b.
      -// a and b must necessarily have the same kind.
      -func numLess(a, b reflect.Value) bool {
      -	switch a.Kind() {
      -	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
      -		return a.Int() < b.Int()
      -	case reflect.Float32, reflect.Float64:
      -		return a.Float() < b.Float()
      -	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
      -		return a.Uint() < b.Uint()
      -	case reflect.Bool:
      -		return !a.Bool() && b.Bool()
      -	}
      -	panic("not a number")
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/suite_test.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/suite_test.go
      deleted file mode 100644
      index c5cf1ed4..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/suite_test.go
      +++ /dev/null
      @@ -1,12 +0,0 @@
      -package yaml_test
      -
      -import (
      -	. "gopkg.in/check.v1"
      -	"testing"
      -)
      -
      -func Test(t *testing.T) { TestingT(t) }
      -
      -type S struct{}
      -
      -var _ = Suite(&S{})
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/writerc.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/writerc.go
      deleted file mode 100644
      index 190362f2..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/writerc.go
      +++ /dev/null
      @@ -1,89 +0,0 @@
      -package yaml
      -
      -// Set the writer error and return false.
      -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
      -	emitter.error = yaml_WRITER_ERROR
      -	emitter.problem = problem
      -	return false
      -}
      -
      -// Flush the output buffer.
      -func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
      -	if emitter.write_handler == nil {
      -		panic("write handler not set")
      -	}
      -
      -	// Check if the buffer is empty.
      -	if emitter.buffer_pos == 0 {
      -		return true
      -	}
      -
      -	// If the output encoding is UTF-8, we don't need to recode the buffer.
      -	if emitter.encoding == yaml_UTF8_ENCODING {
      -		if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
      -			return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
      -		}
      -		emitter.buffer_pos = 0
      -		return true
      -	}
      -
      -	// Recode the buffer into the raw buffer.
      -	var low, high int
      -	if emitter.encoding == yaml_UTF16LE_ENCODING {
      -		low, high = 0, 1
      -	} else {
      -		high, low = 1, 0
      -	}
      -
      -	pos := 0
      -	for pos < emitter.buffer_pos {
      -		// See the "reader.c" code for more details on UTF-8 encoding.  Note
      -		// that we assume that the buffer contains a valid UTF-8 sequence.
      -
      -		// Read the next UTF-8 character.
      -		octet := emitter.buffer[pos]
      -
      -		var w int
      -		var value rune
      -		switch {
      -		case octet&0x80 == 0x00:
      -			w, value = 1, rune(octet&0x7F)
      -		case octet&0xE0 == 0xC0:
      -			w, value = 2, rune(octet&0x1F)
      -		case octet&0xF0 == 0xE0:
      -			w, value = 3, rune(octet&0x0F)
      -		case octet&0xF8 == 0xF0:
      -			w, value = 4, rune(octet&0x07)
      -		}
      -		for k := 1; k < w; k++ {
      -			octet = emitter.buffer[pos+k]
      -			value = (value << 6) + (rune(octet) & 0x3F)
      -		}
      -		pos += w
      -
      -		// Write the character.
      -		if value < 0x10000 {
      -			var b [2]byte
      -			b[high] = byte(value >> 8)
      -			b[low] = byte(value & 0xFF)
      -			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
      -		} else {
      -			// Write the character using a surrogate pair (check "reader.c").
      -			var b [4]byte
      -			value -= 0x10000
      -			b[high] = byte(0xD8 + (value >> 18))
      -			b[low] = byte((value >> 10) & 0xFF)
      -			b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
      -			b[low+2] = byte(value & 0xFF)
      -			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
      -		}
      -	}
      -
      -	// Write the raw buffer.
      -	if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
      -		return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
      -	}
      -	emitter.buffer_pos = 0
      -	emitter.raw_buffer = emitter.raw_buffer[:0]
      -	return true
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/yaml.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/yaml.go
      deleted file mode 100644
      index af4df8a4..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/yaml.go
      +++ /dev/null
      @@ -1,344 +0,0 @@
      -// Package yaml implements YAML support for the Go language.
      -//
      -// Source code and other details for the project are available at GitHub:
      -//
      -//   https://github.com/go-yaml/yaml
      -//
      -package yaml
      -
      -import (
      -	"errors"
      -	"fmt"
      -	"reflect"
      -	"strings"
      -	"sync"
      -)
      -
      -// MapSlice encodes and decodes as a YAML map.
      -// The order of keys is preserved when encoding and decoding.
      -type MapSlice []MapItem
      -
      -// MapItem is an item in a MapSlice.
      -type MapItem struct {
      -	Key, Value interface{}
      -}
      -
      -// The Unmarshaler interface may be implemented by types to customize their
      -// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
      -// method receives a function that may be called to unmarshal the original
      -// YAML value into a field or variable. It is safe to call the unmarshal
      -// function parameter more than once if necessary.
      -type Unmarshaler interface {
      -	UnmarshalYAML(unmarshal func(interface{}) error) error
      -}
      -
      -// The Marshaler interface may be implemented by types to customize their
      -// behavior when being marshaled into a YAML document. The returned value
      -// is marshaled in place of the original value implementing Marshaler.
      -//
      -// If an error is returned by MarshalYAML, the marshaling procedure stops
      -// and returns with the provided error.
      -type Marshaler interface {
      -	MarshalYAML() (interface{}, error)
      -}
      -
      -// Unmarshal decodes the first document found within the in byte slice
      -// and assigns decoded values into the out value.
      -//
      -// Maps and pointers (to a struct, string, int, etc) are accepted as out
      -// values. If an internal pointer within a struct is not initialized,
      -// the yaml package will initialize it if necessary for unmarshalling
      -// the provided data. The out parameter must not be nil.
      -//
      -// The type of the decoded values should be compatible with the respective
      -// values in out. If one or more values cannot be decoded due to a type
      -// mismatches, decoding continues partially until the end of the YAML
      -// content, and a *yaml.TypeError is returned with details for all
      -// missed values.
      -//
      -// Struct fields are only unmarshalled if they are exported (have an
      -// upper case first letter), and are unmarshalled using the field name
      -// lowercased as the default key. Custom keys may be defined via the
      -// "yaml" name in the field tag: the content preceding the first comma
      -// is used as the key, and the following comma-separated options are
      -// used to tweak the marshalling process (see Marshal).
      -// Conflicting names result in a runtime error.
      -//
      -// For example:
      -//
      -//     type T struct {
      -//         F int `yaml:"a,omitempty"`
      -//         B int
      -//     }
      -//     var t T
      -//     yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
      -//
      -// See the documentation of Marshal for the format of tags and a list of
      -// supported tag options.
      -//
      -func Unmarshal(in []byte, out interface{}) (err error) {
      -	defer handleErr(&err)
      -	d := newDecoder()
      -	p := newParser(in)
      -	defer p.destroy()
      -	node := p.parse()
      -	if node != nil {
      -		v := reflect.ValueOf(out)
      -		if v.Kind() == reflect.Ptr && !v.IsNil() {
      -			v = v.Elem()
      -		}
      -		d.unmarshal(node, v)
      -	}
      -	if len(d.terrors) > 0 {
      -		return &TypeError{d.terrors}
      -	}
      -	return nil
      -}
      -
      -// Marshal serializes the value provided into a YAML document. The structure
      -// of the generated document will reflect the structure of the value itself.
      -// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
      -//
      -// Struct fields are only unmarshalled if they are exported (have an upper case
      -// first letter), and are unmarshalled using the field name lowercased as the
      -// default key. Custom keys may be defined via the "yaml" name in the field
      -// tag: the content preceding the first comma is used as the key, and the
      -// following comma-separated options are used to tweak the marshalling process.
      -// Conflicting names result in a runtime error.
      -//
      -// The field tag format accepted is:
      -//
      -//     `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
      -//
      -// The following flags are currently supported:
      -//
      -//     omitempty    Only include the field if it's not set to the zero
      -//                  value for the type or to empty slices or maps.
      -//                  Does not apply to zero valued structs.
      -//
      -//     flow         Marshal using a flow style (useful for structs,
      -//                  sequences and maps).
      -//
      -//     inline       Inline the field, which must be a struct or a map,
      -//                  causing all of its fields or keys to be processed as if
      -//                  they were part of the outer struct. For maps, keys must
      -//                  not conflict with the yaml keys of other struct fields.
      -//
      -// In addition, if the key is "-", the field is ignored.
      -//
      -// For example:
      -//
      -//     type T struct {
      -//         F int "a,omitempty"
      -//         B int
      -//     }
      -//     yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
      -//     yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
      -//
      -func Marshal(in interface{}) (out []byte, err error) {
      -	defer handleErr(&err)
      -	e := newEncoder()
      -	defer e.destroy()
      -	e.marshal("", reflect.ValueOf(in))
      -	e.finish()
      -	out = e.out
      -	return
      -}
      -
      -func handleErr(err *error) {
      -	if v := recover(); v != nil {
      -		if e, ok := v.(yamlError); ok {
      -			*err = e.err
      -		} else {
      -			panic(v)
      -		}
      -	}
      -}
      -
      -type yamlError struct {
      -	err error
      -}
      -
      -func fail(err error) {
      -	panic(yamlError{err})
      -}
      -
      -func failf(format string, args ...interface{}) {
      -	panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
      -}
      -
      -// A TypeError is returned by Unmarshal when one or more fields in
      -// the YAML document cannot be properly decoded into the requested
      -// types. When this error is returned, the value is still
      -// unmarshaled partially.
      -type TypeError struct {
      -	Errors []string
      -}
      -
      -func (e *TypeError) Error() string {
      -	return fmt.Sprintf("yaml: unmarshal errors:\n  %s", strings.Join(e.Errors, "\n  "))
      -}
      -
      -// --------------------------------------------------------------------------
      -// Maintain a mapping of keys to structure field indexes
      -
      -// The code in this section was copied from mgo/bson.
      -
      -// structInfo holds details for the serialization of fields of
      -// a given struct.
      -type structInfo struct {
      -	FieldsMap  map[string]fieldInfo
      -	FieldsList []fieldInfo
      -
      -	// InlineMap is the number of the field in the struct that
      -	// contains an ,inline map, or -1 if there's none.
      -	InlineMap int
      -}
      -
      -type fieldInfo struct {
      -	Key       string
      -	Num       int
      -	OmitEmpty bool
      -	Flow      bool
      -
      -	// Inline holds the field index if the field is part of an inlined struct.
      -	Inline []int
      -}
      -
      -var structMap = make(map[reflect.Type]*structInfo)
      -var fieldMapMutex sync.RWMutex
      -
      -func getStructInfo(st reflect.Type) (*structInfo, error) {
      -	fieldMapMutex.RLock()
      -	sinfo, found := structMap[st]
      -	fieldMapMutex.RUnlock()
      -	if found {
      -		return sinfo, nil
      -	}
      -
      -	n := st.NumField()
      -	fieldsMap := make(map[string]fieldInfo)
      -	fieldsList := make([]fieldInfo, 0, n)
      -	inlineMap := -1
      -	for i := 0; i != n; i++ {
      -		field := st.Field(i)
      -		if field.PkgPath != "" {
      -			continue // Private field
      -		}
      -
      -		info := fieldInfo{Num: i}
      -
      -		tag := field.Tag.Get("yaml")
      -		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
      -			tag = string(field.Tag)
      -		}
      -		if tag == "-" {
      -			continue
      -		}
      -
      -		inline := false
      -		fields := strings.Split(tag, ",")
      -		if len(fields) > 1 {
      -			for _, flag := range fields[1:] {
      -				switch flag {
      -				case "omitempty":
      -					info.OmitEmpty = true
      -				case "flow":
      -					info.Flow = true
      -				case "inline":
      -					inline = true
      -				default:
      -					return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
      -				}
      -			}
      -			tag = fields[0]
      -		}
      -
      -		if inline {
      -			switch field.Type.Kind() {
      -			case reflect.Map:
      -				if inlineMap >= 0 {
      -					return nil, errors.New("Multiple ,inline maps in struct " + st.String())
      -				}
      -				if field.Type.Key() != reflect.TypeOf("") {
      -					return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
      -				}
      -				inlineMap = info.Num
      -			case reflect.Struct:
      -				sinfo, err := getStructInfo(field.Type)
      -				if err != nil {
      -					return nil, err
      -				}
      -				for _, finfo := range sinfo.FieldsList {
      -					if _, found := fieldsMap[finfo.Key]; found {
      -						msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
      -						return nil, errors.New(msg)
      -					}
      -					if finfo.Inline == nil {
      -						finfo.Inline = []int{i, finfo.Num}
      -					} else {
      -						finfo.Inline = append([]int{i}, finfo.Inline...)
      -					}
      -					fieldsMap[finfo.Key] = finfo
      -					fieldsList = append(fieldsList, finfo)
      -				}
      -			default:
      -				//return nil, errors.New("Option ,inline needs a struct value or map field")
      -				return nil, errors.New("Option ,inline needs a struct value field")
      -			}
      -			continue
      -		}
      -
      -		if tag != "" {
      -			info.Key = tag
      -		} else {
      -			info.Key = strings.ToLower(field.Name)
      -		}
      -
      -		if _, found = fieldsMap[info.Key]; found {
      -			msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
      -			return nil, errors.New(msg)
      -		}
      -
      -		fieldsList = append(fieldsList, info)
      -		fieldsMap[info.Key] = info
      -	}
      -
      -	sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
      -
      -	fieldMapMutex.Lock()
      -	structMap[st] = sinfo
      -	fieldMapMutex.Unlock()
      -	return sinfo, nil
      -}
      -
      -func isZero(v reflect.Value) bool {
      -	switch v.Kind() {
      -	case reflect.String:
      -		return len(v.String()) == 0
      -	case reflect.Interface, reflect.Ptr:
      -		return v.IsNil()
      -	case reflect.Slice:
      -		return v.Len() == 0
      -	case reflect.Map:
      -		return v.Len() == 0
      -	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
      -		return v.Int() == 0
      -	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
      -		return v.Uint() == 0
      -	case reflect.Bool:
      -		return !v.Bool()
      -	case reflect.Struct:
      -		vt := v.Type()
      -		for i := v.NumField()-1; i >= 0; i-- {
      -			if vt.Field(i).PkgPath != "" {
      -				continue // Private field
      -			}
      -			if !isZero(v.Field(i)) {
      -				return false
      -			}
      -		}
      -		return true
      -	}
      -	return false
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/yamlh.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/yamlh.go
      deleted file mode 100644
      index d60a6b6b..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/yamlh.go
      +++ /dev/null
      @@ -1,716 +0,0 @@
      -package yaml
      -
      -import (
      -	"io"
      -)
      -
      -// The version directive data.
      -type yaml_version_directive_t struct {
      -	major int8 // The major version number.
      -	minor int8 // The minor version number.
      -}
      -
      -// The tag directive data.
      -type yaml_tag_directive_t struct {
      -	handle []byte // The tag handle.
      -	prefix []byte // The tag prefix.
      -}
      -
      -type yaml_encoding_t int
      -
      -// The stream encoding.
      -const (
      -	// Let the parser choose the encoding.
      -	yaml_ANY_ENCODING yaml_encoding_t = iota
      -
      -	yaml_UTF8_ENCODING    // The default UTF-8 encoding.
      -	yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
      -	yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
      -)
      -
      -type yaml_break_t int
      -
      -// Line break types.
      -const (
      -	// Let the parser choose the break type.
      -	yaml_ANY_BREAK yaml_break_t = iota
      -
      -	yaml_CR_BREAK   // Use CR for line breaks (Mac style).
      -	yaml_LN_BREAK   // Use LN for line breaks (Unix style).
      -	yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
      -)
      -
      -type yaml_error_type_t int
      -
      -// Many bad things could happen with the parser and emitter.
      -const (
      -	// No error is produced.
      -	yaml_NO_ERROR yaml_error_type_t = iota
      -
      -	yaml_MEMORY_ERROR   // Cannot allocate or reallocate a block of memory.
      -	yaml_READER_ERROR   // Cannot read or decode the input stream.
      -	yaml_SCANNER_ERROR  // Cannot scan the input stream.
      -	yaml_PARSER_ERROR   // Cannot parse the input stream.
      -	yaml_COMPOSER_ERROR // Cannot compose a YAML document.
      -	yaml_WRITER_ERROR   // Cannot write to the output stream.
      -	yaml_EMITTER_ERROR  // Cannot emit a YAML stream.
      -)
      -
      -// The pointer position.
      -type yaml_mark_t struct {
      -	index  int // The position index.
      -	line   int // The position line.
      -	column int // The position column.
      -}
      -
      -// Node Styles
      -
      -type yaml_style_t int8
      -
      -type yaml_scalar_style_t yaml_style_t
      -
      -// Scalar styles.
      -const (
      -	// Let the emitter choose the style.
      -	yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
      -
      -	yaml_PLAIN_SCALAR_STYLE         // The plain scalar style.
      -	yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
      -	yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
      -	yaml_LITERAL_SCALAR_STYLE       // The literal scalar style.
      -	yaml_FOLDED_SCALAR_STYLE        // The folded scalar style.
      -)
      -
      -type yaml_sequence_style_t yaml_style_t
      -
      -// Sequence styles.
      -const (
      -	// Let the emitter choose the style.
      -	yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
      -
      -	yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
      -	yaml_FLOW_SEQUENCE_STYLE  // The flow sequence style.
      -)
      -
      -type yaml_mapping_style_t yaml_style_t
      -
      -// Mapping styles.
      -const (
      -	// Let the emitter choose the style.
      -	yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
      -
      -	yaml_BLOCK_MAPPING_STYLE // The block mapping style.
      -	yaml_FLOW_MAPPING_STYLE  // The flow mapping style.
      -)
      -
      -// Tokens
      -
      -type yaml_token_type_t int
      -
      -// Token types.
      -const (
      -	// An empty token.
      -	yaml_NO_TOKEN yaml_token_type_t = iota
      -
      -	yaml_STREAM_START_TOKEN // A STREAM-START token.
      -	yaml_STREAM_END_TOKEN   // A STREAM-END token.
      -
      -	yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
      -	yaml_TAG_DIRECTIVE_TOKEN     // A TAG-DIRECTIVE token.
      -	yaml_DOCUMENT_START_TOKEN    // A DOCUMENT-START token.
      -	yaml_DOCUMENT_END_TOKEN      // A DOCUMENT-END token.
      -
      -	yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
      -	yaml_BLOCK_MAPPING_START_TOKEN  // A BLOCK-SEQUENCE-END token.
      -	yaml_BLOCK_END_TOKEN            // A BLOCK-END token.
      -
      -	yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
      -	yaml_FLOW_SEQUENCE_END_TOKEN   // A FLOW-SEQUENCE-END token.
      -	yaml_FLOW_MAPPING_START_TOKEN  // A FLOW-MAPPING-START token.
      -	yaml_FLOW_MAPPING_END_TOKEN    // A FLOW-MAPPING-END token.
      -
      -	yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
      -	yaml_FLOW_ENTRY_TOKEN  // A FLOW-ENTRY token.
      -	yaml_KEY_TOKEN         // A KEY token.
      -	yaml_VALUE_TOKEN       // A VALUE token.
      -
      -	yaml_ALIAS_TOKEN  // An ALIAS token.
      -	yaml_ANCHOR_TOKEN // An ANCHOR token.
      -	yaml_TAG_TOKEN    // A TAG token.
      -	yaml_SCALAR_TOKEN // A SCALAR token.
      -)
      -
      -func (tt yaml_token_type_t) String() string {
      -	switch tt {
      -	case yaml_NO_TOKEN:
      -		return "yaml_NO_TOKEN"
      -	case yaml_STREAM_START_TOKEN:
      -		return "yaml_STREAM_START_TOKEN"
      -	case yaml_STREAM_END_TOKEN:
      -		return "yaml_STREAM_END_TOKEN"
      -	case yaml_VERSION_DIRECTIVE_TOKEN:
      -		return "yaml_VERSION_DIRECTIVE_TOKEN"
      -	case yaml_TAG_DIRECTIVE_TOKEN:
      -		return "yaml_TAG_DIRECTIVE_TOKEN"
      -	case yaml_DOCUMENT_START_TOKEN:
      -		return "yaml_DOCUMENT_START_TOKEN"
      -	case yaml_DOCUMENT_END_TOKEN:
      -		return "yaml_DOCUMENT_END_TOKEN"
      -	case yaml_BLOCK_SEQUENCE_START_TOKEN:
      -		return "yaml_BLOCK_SEQUENCE_START_TOKEN"
      -	case yaml_BLOCK_MAPPING_START_TOKEN:
      -		return "yaml_BLOCK_MAPPING_START_TOKEN"
      -	case yaml_BLOCK_END_TOKEN:
      -		return "yaml_BLOCK_END_TOKEN"
      -	case yaml_FLOW_SEQUENCE_START_TOKEN:
      -		return "yaml_FLOW_SEQUENCE_START_TOKEN"
      -	case yaml_FLOW_SEQUENCE_END_TOKEN:
      -		return "yaml_FLOW_SEQUENCE_END_TOKEN"
      -	case yaml_FLOW_MAPPING_START_TOKEN:
      -		return "yaml_FLOW_MAPPING_START_TOKEN"
      -	case yaml_FLOW_MAPPING_END_TOKEN:
      -		return "yaml_FLOW_MAPPING_END_TOKEN"
      -	case yaml_BLOCK_ENTRY_TOKEN:
      -		return "yaml_BLOCK_ENTRY_TOKEN"
      -	case yaml_FLOW_ENTRY_TOKEN:
      -		return "yaml_FLOW_ENTRY_TOKEN"
      -	case yaml_KEY_TOKEN:
      -		return "yaml_KEY_TOKEN"
      -	case yaml_VALUE_TOKEN:
      -		return "yaml_VALUE_TOKEN"
      -	case yaml_ALIAS_TOKEN:
      -		return "yaml_ALIAS_TOKEN"
      -	case yaml_ANCHOR_TOKEN:
      -		return "yaml_ANCHOR_TOKEN"
      -	case yaml_TAG_TOKEN:
      -		return "yaml_TAG_TOKEN"
      -	case yaml_SCALAR_TOKEN:
      -		return "yaml_SCALAR_TOKEN"
      -	}
      -	return "<unknown token>"
      -}
      -
      -// The token structure.
      -type yaml_token_t struct {
      -	// The token type.
      -	typ yaml_token_type_t
      -
      -	// The start/end of the token.
      -	start_mark, end_mark yaml_mark_t
      -
      -	// The stream encoding (for yaml_STREAM_START_TOKEN).
      -	encoding yaml_encoding_t
      -
      -	// The alias/anchor/scalar value or tag/tag directive handle
      -	// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
      -	value []byte
      -
      -	// The tag suffix (for yaml_TAG_TOKEN).
      -	suffix []byte
      -
      -	// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
      -	prefix []byte
      -
      -	// The scalar style (for yaml_SCALAR_TOKEN).
      -	style yaml_scalar_style_t
      -
      -	// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
      -	major, minor int8
      -}
      -
      -// Events
      -
      -type yaml_event_type_t int8
      -
      -// Event types.
      -const (
      -	// An empty event.
      -	yaml_NO_EVENT yaml_event_type_t = iota
      -
      -	yaml_STREAM_START_EVENT   // A STREAM-START event.
      -	yaml_STREAM_END_EVENT     // A STREAM-END event.
      -	yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
      -	yaml_DOCUMENT_END_EVENT   // A DOCUMENT-END event.
      -	yaml_ALIAS_EVENT          // An ALIAS event.
      -	yaml_SCALAR_EVENT         // A SCALAR event.
      -	yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
      -	yaml_SEQUENCE_END_EVENT   // A SEQUENCE-END event.
      -	yaml_MAPPING_START_EVENT  // A MAPPING-START event.
      -	yaml_MAPPING_END_EVENT    // A MAPPING-END event.
      -)
      -
      -// The event structure.
      -type yaml_event_t struct {
      -
      -	// The event type.
      -	typ yaml_event_type_t
      -
      -	// The start and end of the event.
      -	start_mark, end_mark yaml_mark_t
      -
      -	// The document encoding (for yaml_STREAM_START_EVENT).
      -	encoding yaml_encoding_t
      -
      -	// The version directive (for yaml_DOCUMENT_START_EVENT).
      -	version_directive *yaml_version_directive_t
      -
      -	// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
      -	tag_directives []yaml_tag_directive_t
      -
      -	// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
      -	anchor []byte
      -
      -	// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
      -	tag []byte
      -
      -	// The scalar value (for yaml_SCALAR_EVENT).
      -	value []byte
      -
      -	// Is the document start/end indicator implicit, or the tag optional?
      -	// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
      -	implicit bool
      -
      -	// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
      -	quoted_implicit bool
      -
      -	// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
      -	style yaml_style_t
      -}
      -
      -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t     { return yaml_scalar_style_t(e.style) }
      -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
      -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t   { return yaml_mapping_style_t(e.style) }
      -
      -// Nodes
      -
      -const (
      -	yaml_NULL_TAG      = "tag:yaml.org,2002:null"      // The tag !!null with the only possible value: null.
      -	yaml_BOOL_TAG      = "tag:yaml.org,2002:bool"      // The tag !!bool with the values: true and false.
      -	yaml_STR_TAG       = "tag:yaml.org,2002:str"       // The tag !!str for string values.
      -	yaml_INT_TAG       = "tag:yaml.org,2002:int"       // The tag !!int for integer values.
      -	yaml_FLOAT_TAG     = "tag:yaml.org,2002:float"     // The tag !!float for float values.
      -	yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
      -
      -	yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
      -	yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
      -
      -	// Not in original libyaml.
      -	yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
      -	yaml_MERGE_TAG  = "tag:yaml.org,2002:merge"
      -
      -	yaml_DEFAULT_SCALAR_TAG   = yaml_STR_TAG // The default scalar tag is !!str.
      -	yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
      -	yaml_DEFAULT_MAPPING_TAG  = yaml_MAP_TAG // The default mapping tag is !!map.
      -)
      -
      -type yaml_node_type_t int
      -
      -// Node types.
      -const (
      -	// An empty node.
      -	yaml_NO_NODE yaml_node_type_t = iota
      -
      -	yaml_SCALAR_NODE   // A scalar node.
      -	yaml_SEQUENCE_NODE // A sequence node.
      -	yaml_MAPPING_NODE  // A mapping node.
      -)
      -
      -// An element of a sequence node.
      -type yaml_node_item_t int
      -
      -// An element of a mapping node.
      -type yaml_node_pair_t struct {
      -	key   int // The key of the element.
      -	value int // The value of the element.
      -}
      -
      -// The node structure.
      -type yaml_node_t struct {
      -	typ yaml_node_type_t // The node type.
      -	tag []byte           // The node tag.
      -
      -	// The node data.
      -
      -	// The scalar parameters (for yaml_SCALAR_NODE).
      -	scalar struct {
      -		value  []byte              // The scalar value.
      -		length int                 // The length of the scalar value.
      -		style  yaml_scalar_style_t // The scalar style.
      -	}
      -
      -	// The sequence parameters (for YAML_SEQUENCE_NODE).
      -	sequence struct {
      -		items_data []yaml_node_item_t    // The stack of sequence items.
      -		style      yaml_sequence_style_t // The sequence style.
      -	}
      -
      -	// The mapping parameters (for yaml_MAPPING_NODE).
      -	mapping struct {
      -		pairs_data  []yaml_node_pair_t   // The stack of mapping pairs (key, value).
      -		pairs_start *yaml_node_pair_t    // The beginning of the stack.
      -		pairs_end   *yaml_node_pair_t    // The end of the stack.
      -		pairs_top   *yaml_node_pair_t    // The top of the stack.
      -		style       yaml_mapping_style_t // The mapping style.
      -	}
      -
      -	start_mark yaml_mark_t // The beginning of the node.
      -	end_mark   yaml_mark_t // The end of the node.
      -
      -}
      -
      -// The document structure.
      -type yaml_document_t struct {
      -
      -	// The document nodes.
      -	nodes []yaml_node_t
      -
      -	// The version directive.
      -	version_directive *yaml_version_directive_t
      -
      -	// The list of tag directives.
      -	tag_directives_data  []yaml_tag_directive_t
      -	tag_directives_start int // The beginning of the tag directives list.
      -	tag_directives_end   int // The end of the tag directives list.
      -
      -	start_implicit int // Is the document start indicator implicit?
      -	end_implicit   int // Is the document end indicator implicit?
      -
      -	// The start/end of the document.
      -	start_mark, end_mark yaml_mark_t
      -}
      -
      -// The prototype of a read handler.
      -//
      -// The read handler is called when the parser needs to read more bytes from the
      -// source. The handler should write not more than size bytes to the buffer.
      -// The number of written bytes should be set to the size_read variable.
      -//
      -// [in,out]   data        A pointer to an application data specified by
      -//                        yaml_parser_set_input().
      -// [out]      buffer      The buffer to write the data from the source.
      -// [in]       size        The size of the buffer.
      -// [out]      size_read   The actual number of bytes read from the source.
      -//
      -// On success, the handler should return 1.  If the handler failed,
      -// the returned value should be 0. On EOF, the handler should set the
      -// size_read to 0 and return 1.
      -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
      -
      -// This structure holds information about a potential simple key.
      -type yaml_simple_key_t struct {
      -	possible     bool        // Is a simple key possible?
      -	required     bool        // Is a simple key required?
      -	token_number int         // The number of the token.
      -	mark         yaml_mark_t // The position mark.
      -}
      -
      -// The states of the parser.
      -type yaml_parser_state_t int
      -
      -const (
      -	yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
      -
      -	yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE           // Expect the beginning of an implicit document.
      -	yaml_PARSE_DOCUMENT_START_STATE                    // Expect DOCUMENT-START.
      -	yaml_PARSE_DOCUMENT_CONTENT_STATE                  // Expect the content of a document.
      -	yaml_PARSE_DOCUMENT_END_STATE                      // Expect DOCUMENT-END.
      -	yaml_PARSE_BLOCK_NODE_STATE                        // Expect a block node.
      -	yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
      -	yaml_PARSE_FLOW_NODE_STATE                         // Expect a flow node.
      -	yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE        // Expect the first entry of a block sequence.
      -	yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE              // Expect an entry of a block sequence.
      -	yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE         // Expect an entry of an indentless sequence.
      -	yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE           // Expect the first key of a block mapping.
      -	yaml_PARSE_BLOCK_MAPPING_KEY_STATE                 // Expect a block mapping key.
      -	yaml_PARSE_BLOCK_MAPPING_VALUE_STATE               // Expect a block mapping value.
      -	yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE         // Expect the first entry of a flow sequence.
      -	yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE               // Expect an entry of a flow sequence.
      -	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE   // Expect a key of an ordered mapping.
      -	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
      -	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE   // Expect the and of an ordered mapping entry.
      -	yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE            // Expect the first key of a flow mapping.
      -	yaml_PARSE_FLOW_MAPPING_KEY_STATE                  // Expect a key of a flow mapping.
      -	yaml_PARSE_FLOW_MAPPING_VALUE_STATE                // Expect a value of a flow mapping.
      -	yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE          // Expect an empty value of a flow mapping.
      -	yaml_PARSE_END_STATE                               // Expect nothing.
      -)
      -
      -func (ps yaml_parser_state_t) String() string {
      -	switch ps {
      -	case yaml_PARSE_STREAM_START_STATE:
      -		return "yaml_PARSE_STREAM_START_STATE"
      -	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
      -		return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
      -	case yaml_PARSE_DOCUMENT_START_STATE:
      -		return "yaml_PARSE_DOCUMENT_START_STATE"
      -	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
      -		return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
      -	case yaml_PARSE_DOCUMENT_END_STATE:
      -		return "yaml_PARSE_DOCUMENT_END_STATE"
      -	case yaml_PARSE_BLOCK_NODE_STATE:
      -		return "yaml_PARSE_BLOCK_NODE_STATE"
      -	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
      -		return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
      -	case yaml_PARSE_FLOW_NODE_STATE:
      -		return "yaml_PARSE_FLOW_NODE_STATE"
      -	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
      -		return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
      -	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
      -		return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
      -	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
      -		return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
      -	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
      -		return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
      -	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
      -		return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
      -	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
      -		return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
      -	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
      -		return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
      -	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
      -		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
      -	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
      -		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
      -	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
      -		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
      -	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
      -		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
      -	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
      -		return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
      -	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
      -		return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
      -	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
      -		return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
      -	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
      -		return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
      -	case yaml_PARSE_END_STATE:
      -		return "yaml_PARSE_END_STATE"
      -	}
      -	return "<unknown parser state>"
      -}
      -
      -// This structure holds aliases data.
      -type yaml_alias_data_t struct {
      -	anchor []byte      // The anchor.
      -	index  int         // The node id.
      -	mark   yaml_mark_t // The anchor mark.
      -}
      -
      -// The parser structure.
      -//
      -// All members are internal. Manage the structure using the
      -// yaml_parser_ family of functions.
      -type yaml_parser_t struct {
      -
      -	// Error handling
      -
      -	error yaml_error_type_t // Error type.
      -
      -	problem string // Error description.
      -
      -	// The byte about which the problem occured.
      -	problem_offset int
      -	problem_value  int
      -	problem_mark   yaml_mark_t
      -
      -	// The error context.
      -	context      string
      -	context_mark yaml_mark_t
      -
      -	// Reader stuff
      -
      -	read_handler yaml_read_handler_t // Read handler.
      -
      -	input_file io.Reader // File input data.
      -	input      []byte    // String input data.
      -	input_pos  int
      -
      -	eof bool // EOF flag
      -
      -	buffer     []byte // The working buffer.
      -	buffer_pos int    // The current position of the buffer.
      -
      -	unread int // The number of unread characters in the buffer.
      -
      -	raw_buffer     []byte // The raw buffer.
      -	raw_buffer_pos int    // The current position of the buffer.
      -
      -	encoding yaml_encoding_t // The input encoding.
      -
      -	offset int         // The offset of the current position (in bytes).
      -	mark   yaml_mark_t // The mark of the current position.
      -
      -	// Scanner stuff
      -
      -	stream_start_produced bool // Have we started to scan the input stream?
      -	stream_end_produced   bool // Have we reached the end of the input stream?
      -
      -	flow_level int // The number of unclosed '[' and '{' indicators.
      -
      -	tokens          []yaml_token_t // The tokens queue.
      -	tokens_head     int            // The head of the tokens queue.
      -	tokens_parsed   int            // The number of tokens fetched from the queue.
      -	token_available bool           // Does the tokens queue contain a token ready for dequeueing.
      -
      -	indent  int   // The current indentation level.
      -	indents []int // The indentation levels stack.
      -
      -	simple_key_allowed bool                // May a simple key occur at the current position?
      -	simple_keys        []yaml_simple_key_t // The stack of simple keys.
      -
      -	// Parser stuff
      -
      -	state          yaml_parser_state_t    // The current parser state.
      -	states         []yaml_parser_state_t  // The parser states stack.
      -	marks          []yaml_mark_t          // The stack of marks.
      -	tag_directives []yaml_tag_directive_t // The list of TAG directives.
      -
      -	// Dumper stuff
      -
      -	aliases []yaml_alias_data_t // The alias data.
      -
      -	document *yaml_document_t // The currently parsed document.
      -}
      -
      -// Emitter Definitions
      -
      -// The prototype of a write handler.
      -//
      -// The write handler is called when the emitter needs to flush the accumulated
      -// characters to the output.  The handler should write @a size bytes of the
      -// @a buffer to the output.
      -//
      -// @param[in,out]   data        A pointer to an application data specified by
      -//                              yaml_emitter_set_output().
      -// @param[in]       buffer      The buffer with bytes to be written.
      -// @param[in]       size        The size of the buffer.
      -//
      -// @returns On success, the handler should return @c 1.  If the handler failed,
      -// the returned value should be @c 0.
      -//
      -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
      -
      -type yaml_emitter_state_t int
      -
      -// The emitter states.
      -const (
      -	// Expect STREAM-START.
      -	yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
      -
      -	yaml_EMIT_FIRST_DOCUMENT_START_STATE       // Expect the first DOCUMENT-START or STREAM-END.
      -	yaml_EMIT_DOCUMENT_START_STATE             // Expect DOCUMENT-START or STREAM-END.
      -	yaml_EMIT_DOCUMENT_CONTENT_STATE           // Expect the content of a document.
      -	yaml_EMIT_DOCUMENT_END_STATE               // Expect DOCUMENT-END.
      -	yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE   // Expect the first item of a flow sequence.
      -	yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE         // Expect an item of a flow sequence.
      -	yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE     // Expect the first key of a flow mapping.
      -	yaml_EMIT_FLOW_MAPPING_KEY_STATE           // Expect a key of a flow mapping.
      -	yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE  // Expect a value for a simple key of a flow mapping.
      -	yaml_EMIT_FLOW_MAPPING_VALUE_STATE         // Expect a value of a flow mapping.
      -	yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE  // Expect the first item of a block sequence.
      -	yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE        // Expect an item of a block sequence.
      -	yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE    // Expect the first key of a block mapping.
      -	yaml_EMIT_BLOCK_MAPPING_KEY_STATE          // Expect the key of a block mapping.
      -	yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
      -	yaml_EMIT_BLOCK_MAPPING_VALUE_STATE        // Expect a value of a block mapping.
      -	yaml_EMIT_END_STATE                        // Expect nothing.
      -)
      -
      -// The emitter structure.
      -//
      -// All members are internal.  Manage the structure using the @c yaml_emitter_
      -// family of functions.
      -type yaml_emitter_t struct {
      -
      -	// Error handling
      -
      -	error   yaml_error_type_t // Error type.
      -	problem string            // Error description.
      -
      -	// Writer stuff
      -
      -	write_handler yaml_write_handler_t // Write handler.
      -
      -	output_buffer *[]byte   // String output data.
      -	output_file   io.Writer // File output data.
      -
      -	buffer     []byte // The working buffer.
      -	buffer_pos int    // The current position of the buffer.
      -
      -	raw_buffer     []byte // The raw buffer.
      -	raw_buffer_pos int    // The current position of the buffer.
      -
      -	encoding yaml_encoding_t // The stream encoding.
      -
      -	// Emitter stuff
      -
      -	canonical   bool         // If the output is in the canonical style?
      -	best_indent int          // The number of indentation spaces.
      -	best_width  int          // The preferred width of the output lines.
      -	unicode     bool         // Allow unescaped non-ASCII characters?
      -	line_break  yaml_break_t // The preferred line break.
      -
      -	state  yaml_emitter_state_t   // The current emitter state.
      -	states []yaml_emitter_state_t // The stack of states.
      -
      -	events      []yaml_event_t // The event queue.
      -	events_head int            // The head of the event queue.
      -
      -	indents []int // The stack of indentation levels.
      -
      -	tag_directives []yaml_tag_directive_t // The list of tag directives.
      -
      -	indent int // The current indentation level.
      -
      -	flow_level int // The current flow level.
      -
      -	root_context       bool // Is it the document root context?
      -	sequence_context   bool // Is it a sequence context?
      -	mapping_context    bool // Is it a mapping context?
      -	simple_key_context bool // Is it a simple mapping key context?
      -
      -	line       int  // The current line.
      -	column     int  // The current column.
      -	whitespace bool // If the last character was a whitespace?
      -	indention  bool // If the last character was an indentation character (' ', '-', '?', ':')?
      -	open_ended bool // If an explicit document end is required?
      -
      -	// Anchor analysis.
      -	anchor_data struct {
      -		anchor []byte // The anchor value.
      -		alias  bool   // Is it an alias?
      -	}
      -
      -	// Tag analysis.
      -	tag_data struct {
      -		handle []byte // The tag handle.
      -		suffix []byte // The tag suffix.
      -	}
      -
      -	// Scalar analysis.
      -	scalar_data struct {
      -		value                 []byte              // The scalar value.
      -		multiline             bool                // Does the scalar contain line breaks?
      -		flow_plain_allowed    bool                // Can the scalar be expessed in the flow plain style?
      -		block_plain_allowed   bool                // Can the scalar be expressed in the block plain style?
      -		single_quoted_allowed bool                // Can the scalar be expressed in the single quoted style?
      -		block_allowed         bool                // Can the scalar be expressed in the literal or folded styles?
      -		style                 yaml_scalar_style_t // The output style.
      -	}
      -
      -	// Dumper stuff
      -
      -	opened bool // If the stream was already opened?
      -	closed bool // If the stream was already closed?
      -
      -	// The information associated with the document nodes.
      -	anchors *struct {
      -		references int  // The number of references.
      -		anchor     int  // The anchor id.
      -		serialized bool // If the node has been emitted?
      -	}
      -
      -	last_anchor_id int // The last assigned anchor id.
      -
      -	document *yaml_document_t // The currently emitted document.
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/yamlprivateh.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/yamlprivateh.go
      deleted file mode 100644
      index 8110ce3c..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/go-yaml/yaml/yamlprivateh.go
      +++ /dev/null
      @@ -1,173 +0,0 @@
      -package yaml
      -
      -const (
      -	// The size of the input raw buffer.
      -	input_raw_buffer_size = 512
      -
      -	// The size of the input buffer.
      -	// It should be possible to decode the whole raw buffer.
      -	input_buffer_size = input_raw_buffer_size * 3
      -
      -	// The size of the output buffer.
      -	output_buffer_size = 128
      -
      -	// The size of the output raw buffer.
      -	// It should be possible to encode the whole output buffer.
      -	output_raw_buffer_size = (output_buffer_size*2 + 2)
      -
      -	// The size of other stacks and queues.
      -	initial_stack_size  = 16
      -	initial_queue_size  = 16
      -	initial_string_size = 16
      -)
      -
      -// Check if the character at the specified position is an alphabetical
      -// character, a digit, '_', or '-'.
      -func is_alpha(b []byte, i int) bool {
      -	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
      -}
      -
      -// Check if the character at the specified position is a digit.
      -func is_digit(b []byte, i int) bool {
      -	return b[i] >= '0' && b[i] <= '9'
      -}
      -
      -// Get the value of a digit.
      -func as_digit(b []byte, i int) int {
      -	return int(b[i]) - '0'
      -}
      -
      -// Check if the character at the specified position is a hex-digit.
      -func is_hex(b []byte, i int) bool {
      -	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
      -}
      -
      -// Get the value of a hex-digit.
      -func as_hex(b []byte, i int) int {
      -	bi := b[i]
      -	if bi >= 'A' && bi <= 'F' {
      -		return int(bi) - 'A' + 10
      -	}
      -	if bi >= 'a' && bi <= 'f' {
      -		return int(bi) - 'a' + 10
      -	}
      -	return int(bi) - '0'
      -}
      -
      -// Check if the character is ASCII.
      -func is_ascii(b []byte, i int) bool {
      -	return b[i] <= 0x7F
      -}
      -
      -// Check if the character at the start of the buffer can be printed unescaped.
      -func is_printable(b []byte, i int) bool {
      -	return ((b[i] == 0x0A) || // . == #x0A
      -		(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
      -		(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
      -		(b[i] > 0xC2 && b[i] < 0xED) ||
      -		(b[i] == 0xED && b[i+1] < 0xA0) ||
      -		(b[i] == 0xEE) ||
      -		(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
      -			!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
      -			!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
      -}
      -
      -// Check if the character at the specified position is NUL.
      -func is_z(b []byte, i int) bool {
      -	return b[i] == 0x00
      -}
      -
      -// Check if the beginning of the buffer is a BOM.
      -func is_bom(b []byte, i int) bool {
      -	return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
      -}
      -
      -// Check if the character at the specified position is space.
      -func is_space(b []byte, i int) bool {
      -	return b[i] == ' '
      -}
      -
      -// Check if the character at the specified position is tab.
      -func is_tab(b []byte, i int) bool {
      -	return b[i] == '\t'
      -}
      -
      -// Check if the character at the specified position is blank (space or tab).
      -func is_blank(b []byte, i int) bool {
      -	//return is_space(b, i) || is_tab(b, i)
      -	return b[i] == ' ' || b[i] == '\t'
      -}
      -
      -// Check if the character at the specified position is a line break.
      -func is_break(b []byte, i int) bool {
      -	return (b[i] == '\r' || // CR (#xD)
      -		b[i] == '\n' || // LF (#xA)
      -		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
      -		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
      -		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
      -}
      -
      -func is_crlf(b []byte, i int) bool {
      -	return b[i] == '\r' && b[i+1] == '\n'
      -}
      -
      -// Check if the character is a line break or NUL.
      -func is_breakz(b []byte, i int) bool {
      -	//return is_break(b, i) || is_z(b, i)
      -	return (        // is_break:
      -	b[i] == '\r' || // CR (#xD)
      -		b[i] == '\n' || // LF (#xA)
      -		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
      -		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
      -		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
      -		// is_z:
      -		b[i] == 0)
      -}
      -
      -// Check if the character is a line break, space, or NUL.
      -func is_spacez(b []byte, i int) bool {
      -	//return is_space(b, i) || is_breakz(b, i)
      -	return ( // is_space:
      -	b[i] == ' ' ||
      -		// is_breakz:
      -		b[i] == '\r' || // CR (#xD)
      -		b[i] == '\n' || // LF (#xA)
      -		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
      -		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
      -		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
      -		b[i] == 0)
      -}
      -
      -// Check if the character is a line break, space, tab, or NUL.
      -func is_blankz(b []byte, i int) bool {
      -	//return is_blank(b, i) || is_breakz(b, i)
      -	return ( // is_blank:
      -	b[i] == ' ' || b[i] == '\t' ||
      -		// is_breakz:
      -		b[i] == '\r' || // CR (#xD)
      -		b[i] == '\n' || // LF (#xA)
      -		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
      -		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
      -		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
      -		b[i] == 0)
      -}
      -
      -// Determine the width of the character.
      -func width(b byte) int {
      -	// Don't replace these by a switch without first
      -	// confirming that it is being inlined.
      -	if b&0x80 == 0x00 {
      -		return 1
      -	}
      -	if b&0xE0 == 0xC0 {
      -		return 2
      -	}
      -	if b&0xF0 == 0xE0 {
      -		return 3
      -	}
      -	if b&0xF8 == 0xF0 {
      -		return 4
      -	}
      -	return 0
      -
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/LICENSE b/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/LICENSE
      deleted file mode 100644
      index ae6cb62b..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/LICENSE
      +++ /dev/null
      @@ -1,20 +0,0 @@
      -The MIT License (MIT)
      -
      -Copyright (c) 2014 Vincent Petithory
      -
      -Permission is hereby granted, free of charge, to any person obtaining a copy of
      -this software and associated documentation files (the "Software"), to deal in
      -the Software without restriction, including without limitation the rights to
      -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
      -the Software, and to permit persons to whom the Software is furnished to do so,
      -subject to the following conditions:
      -
      -The above copyright notice and this permission notice shall be included in all
      -copies or substantial portions of the Software.
      -
      -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
      -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
      -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
      -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
      -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/README.md b/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/README.md
      deleted file mode 100644
      index 1ac59ad2..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/README.md
      +++ /dev/null
      @@ -1,81 +0,0 @@
      -# Data URL Schemes for Go [![wercker status](https://app.wercker.com/status/6f9a2e144dfcc59e862c52459b452928/s "wercker status")](https://app.wercker.com/project/bykey/6f9a2e144dfcc59e862c52459b452928) [![GoDoc](https://godoc.org/github.com/vincent-petithory/dataurl?status.png)](https://godoc.org/github.com/vincent-petithory/dataurl)
      -
      -This package parses and generates Data URL Schemes for the Go language, according to [RFC 2397](http://tools.ietf.org/html/rfc2397).
      -
      -Data URLs are small chunks of data commonly used in browsers to display inline data,
      -typically like small images, or when you use the FileReader API of the browser.
      -
      -Common use-cases:
      -
      - * generate a data URL out of a `string`, `[]byte`, `io.Reader` for inclusion in HTML templates,
      - * parse a data URL sent by a browser in a http.Handler, and do something with the data (save to disk, etc.)
      - * ...
      -
      -Install the package with:
      -~~~
      -go get github.com/vincent-petithory/dataurl
      -~~~
      -
      -## Usage
      -
      -~~~ go
      -package main
      -
      -import (
      -	"github.com/vincent-petithory/dataurl"
      -	"fmt"
      -)
      -
      -func main() {
      -	dataURL, err := dataurl.DecodeString(`data:text/plain;charset=utf-8;base64,aGV5YQ==`)
      -	if err != nil {
      -		fmt.Println(err)
      -		return
      -	}
      -	fmt.Printf("content type: %s, data: %s\n", dataURL.MediaType.ContentType(), string(dataURL.Data))
      -	// Output: content type: text/plain, data: heya
      -}
      -~~~
      -
      -From a `http.Handler`:
      -
      -~~~ go
      -func handleDataURLUpload(w http.ResponseWriter, r *http.Request) {
      -	dataURL, err := dataurl.Decode(r.Body)
      -	defer r.Body.Close()
      -	if err != nil {
      -		http.Error(w, err.Error(), http.StatusBadRequest)
      -		return
      -	}
      -	if dataURL.ContentType() == "image/png" {
      -		ioutil.WriteFile("image.png", dataURL.Data, 0644)
      -	} else {
      -		http.Error(w, "not a png", http.StatusBadRequest)
      -	}
      -}
      -~~~
      -
      -## Command
      -
      -For convenience, a `dataurl` command is provided to encode/decode dataurl streams.
      -
      -~~~
      -dataurl - Encode or decode dataurl data and print to standard output
      -
      -Usage: dataurl [OPTION]... [FILE]
      -
      -  dataurl encodes or decodes FILE or standard input if FILE is - or omitted, and prints to standard output.
      -  Unless -mimetype is used, when FILE is specified, dataurl will attempt to detect its mimetype using Go's mime.TypeByExtension (http://golang.org/pkg/mime/#TypeByExtension). If this fails or data is read from STDIN, the mimetype will default to application/octet-stream.
      -
      -Options:
      -  -a=false: encode data using ascii instead of base64
      -  -ascii=false: encode data using ascii instead of base64
      -  -d=false: decode data instead of encoding
      -  -decode=false: decode data instead of encoding
      -  -m="": force the mimetype of the data to encode to this value
      -  -mimetype="": force the mimetype of the data to encode to this value
      -~~~
      -
      -## Contributing
      -
      -Feel free to file an issue/make a pull request if you find any bug, or want to suggest enhancements.
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/cmd/dataurl/main.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/cmd/dataurl/main.go
      deleted file mode 100644
      index cf764c94..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/cmd/dataurl/main.go
      +++ /dev/null
      @@ -1,142 +0,0 @@
      -package main
      -
      -import (
      -	"flag"
      -	"fmt"
      -	"io"
      -	"io/ioutil"
      -	"log"
      -	"mime"
      -	"os"
      -	"path"
      -
      -	"github.com/vincent-petithory/dataurl"
      -)
      -
      -var (
      -	performDecode bool
      -	asciiEncoding bool
      -	mimetype      string
      -)
      -
      -func init() {
      -	const decodeUsage = "decode data instead of encoding"
      -	flag.BoolVar(&performDecode, "decode", false, decodeUsage)
      -	flag.BoolVar(&performDecode, "d", false, decodeUsage)
      -
      -	const mimetypeUsage = "force the mimetype of the data to encode to this value"
      -	flag.StringVar(&mimetype, "mimetype", "", mimetypeUsage)
      -	flag.StringVar(&mimetype, "m", "", mimetypeUsage)
      -
      -	const asciiUsage = "encode data using ascii instead of base64"
      -	flag.BoolVar(&asciiEncoding, "ascii", false, asciiUsage)
      -	flag.BoolVar(&asciiEncoding, "a", false, asciiUsage)
      -
      -	flag.Usage = func() {
      -		fmt.Fprint(os.Stderr,
      -			`dataurl - Encode or decode dataurl data and print to standard output
      -
      -Usage: dataurl [OPTION]... [FILE]
      -
      -  dataurl encodes or decodes FILE or standard input if FILE is - or omitted, and prints to standard output.
      -  Unless -mimetype is used, when FILE is specified, dataurl will attempt to detect its mimetype using Go's mime.TypeByExtension (http://golang.org/pkg/mime/#TypeByExtension). If this fails or data is read from STDIN, the mimetype will default to application/octet-stream.
      -
      -Options:
      -`)
      -		flag.PrintDefaults()
      -	}
      -}
      -
      -func main() {
      -	log.SetFlags(0)
      -	flag.Parse()
      -
      -	var (
      -		in               io.Reader
      -		out              = os.Stdout
      -		encoding         = dataurl.EncodingBase64
      -		detectedMimetype string
      -	)
      -	switch n := flag.NArg(); n {
      -	case 0:
      -		in = os.Stdin
      -	case 1:
      -		if flag.Arg(0) == "-" {
      -			in = os.Stdin
      -			return
      -		}
      -		if f, err := os.Open(flag.Arg(0)); err != nil {
      -			log.Fatal(err)
      -		} else {
      -			in = f
      -			defer f.Close()
      -		}
      -		ext := path.Ext(flag.Arg(0))
      -		detectedMimetype = mime.TypeByExtension(ext)
      -	}
      -
      -	switch {
      -	case mimetype == "" && detectedMimetype == "":
      -		mimetype = "application/octet-stream"
      -	case mimetype == "" && detectedMimetype != "":
      -		mimetype = detectedMimetype
      -	}
      -
      -	if performDecode {
      -		if err := decode(in, out); err != nil {
      -			log.Fatal(err)
      -		}
      -	} else {
      -		if asciiEncoding {
      -			encoding = dataurl.EncodingASCII
      -		}
      -		if err := encode(in, out, encoding, mimetype); err != nil {
      -			log.Fatal(err)
      -		}
      -	}
      -}
      -
      -func decode(in io.Reader, out io.Writer) (err error) {
      -	defer func() {
      -		if e := recover(); e != nil {
      -			err = e.(error)
      -		}
      -	}()
      -
      -	du, err := dataurl.Decode(in)
      -	if err != nil {
      -		return
      -	}
      -
      -	_, err = out.Write(du.Data)
      -	if err != nil {
      -		return
      -	}
      -	return
      -}
      -
      -func encode(in io.Reader, out io.Writer, encoding string, mediatype string) (err error) {
      -	defer func() {
      -		if e := recover(); e != nil {
      -			var ok bool
      -			err, ok = e.(error)
      -			if !ok {
      -				err = fmt.Errorf("%v", e)
      -			}
      -			return
      -		}
      -	}()
      -	b, err := ioutil.ReadAll(in)
      -	if err != nil {
      -		return
      -	}
      -
      -	du := dataurl.New(b, mediatype)
      -	du.Encoding = encoding
      -
      -	_, err = du.WriteTo(out)
      -	if err != nil {
      -		return
      -	}
      -	return
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/dataurl.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/dataurl.go
      deleted file mode 100644
      index bfd07654..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/dataurl.go
      +++ /dev/null
      @@ -1,280 +0,0 @@
      -package dataurl
      -
      -import (
      -	"bytes"
      -	"encoding/base64"
      -	"errors"
      -	"fmt"
      -	"io"
      -	"io/ioutil"
      -	"net/http"
      -	"strconv"
      -	"strings"
      -)
      -
      -const (
      -	// EncodingBase64 is base64 encoding for the data url
      -	EncodingBase64 = "base64"
      -	// EncodingASCII is ascii encoding for the data url
      -	EncodingASCII = "ascii"
      -)
      -
      -func defaultMediaType() MediaType {
      -	return MediaType{
      -		"text",
      -		"plain",
      -		map[string]string{"charset": "US-ASCII"},
      -	}
      -}
      -
      -// MediaType is the combination of a media type, a media subtype
      -// and optional parameters.
      -type MediaType struct {
      -	Type    string
      -	Subtype string
      -	Params  map[string]string
      -}
      -
      -// ContentType returns the content type of the dataurl's data, in the form type/subtype.
      -func (mt *MediaType) ContentType() string {
      -	return fmt.Sprintf("%s/%s", mt.Type, mt.Subtype)
      -}
      -
      -// String implements the Stringer interface.
      -//
      -// Params values are escaped with the Escape function, rather than in a quoted string.
      -func (mt *MediaType) String() string {
      -	var buf bytes.Buffer
      -	for k, v := range mt.Params {
      -		fmt.Fprintf(&buf, ";%s=%s", k, EscapeString(v))
      -	}
      -	return mt.ContentType() + (&buf).String()
      -}
      -
      -// DataURL is the combination of a MediaType describing the type of its Data.
      -type DataURL struct {
      -	MediaType
      -	Encoding string
      -	Data     []byte
      -}
      -
      -// New returns a new DataURL initialized with data and
      -// a MediaType parsed from mediatype and paramPairs.
      -// mediatype must be of the form "type/subtype" or it will panic.
      -// paramPairs must have an even number of elements or it will panic.
      -// For more complex DataURL, initialize a DataURL struct.
      -// The DataURL is initialized with base64 encoding.
      -func New(data []byte, mediatype string, paramPairs ...string) *DataURL {
      -	parts := strings.Split(mediatype, "/")
      -	if len(parts) != 2 {
      -		panic("dataurl: invalid mediatype")
      -	}
      -
      -	nParams := len(paramPairs)
      -	if nParams%2 != 0 {
      -		panic("dataurl: requires an even number of param pairs")
      -	}
      -	params := make(map[string]string)
      -	for i := 0; i < nParams; i += 2 {
      -		params[paramPairs[i]] = paramPairs[i+1]
      -	}
      -
      -	mt := MediaType{
      -		parts[0],
      -		parts[1],
      -		params,
      -	}
      -	return &DataURL{
      -		MediaType: mt,
      -		Encoding:  EncodingBase64,
      -		Data:      data,
      -	}
      -}
      -
      -// String implements the Stringer interface.
      -//
      -// Note: it doesn't guarantee the returned string is equal to
      -// the initial source string that was used to create this DataURL.
      -// The reasons for that are:
      -//  * Insertion of default values for MediaType that were maybe not in the initial string,
      -//  * Various ways to encode the MediaType parameters (quoted string or url encoded string, the latter is used),
      -func (du *DataURL) String() string {
      -	var buf bytes.Buffer
      -	du.WriteTo(&buf)
      -	return (&buf).String()
      -}
      -
      -// WriteTo implements the WriterTo interface.
      -// See the note about String().
      -func (du *DataURL) WriteTo(w io.Writer) (n int64, err error) {
      -	var ni int
      -	ni, _ = fmt.Fprint(w, "data:")
      -	n += int64(ni)
      -
      -	ni, _ = fmt.Fprint(w, du.MediaType.String())
      -	n += int64(ni)
      -
      -	if du.Encoding == EncodingBase64 {
      -		ni, _ = fmt.Fprint(w, ";base64")
      -		n += int64(ni)
      -	}
      -
      -	ni, _ = fmt.Fprint(w, ",")
      -	n += int64(ni)
      -
      -	if du.Encoding == EncodingBase64 {
      -		encoder := base64.NewEncoder(base64.StdEncoding, w)
      -		ni, err = encoder.Write(du.Data)
      -		if err != nil {
      -			return
      -		}
      -		encoder.Close()
      -	} else if du.Encoding == EncodingASCII {
      -		ni, _ = fmt.Fprint(w, Escape(du.Data))
      -		n += int64(ni)
      -	} else {
      -		err = fmt.Errorf("dataurl: invalid encoding %s", du.Encoding)
      -		return
      -	}
      -
      -	return
      -}
      -
      -// UnmarshalText decodes a Data URL string and sets it to *du
      -func (du *DataURL) UnmarshalText(text []byte) error {
      -	decoded, err := DecodeString(string(text))
      -	if err != nil {
      -		return err
      -	}
      -	*du = *decoded
      -	return nil
      -}
      -
      -// MarshalText writes du as a Data URL
      -func (du *DataURL) MarshalText() ([]byte, error) {
      -	buf := bytes.NewBuffer(nil)
      -	if _, err := du.WriteTo(buf); err != nil {
      -		return nil, err
      -	}
      -	return buf.Bytes(), nil
      -}
      -
      -type encodedDataReader func(string) ([]byte, error)
      -
      -var asciiDataReader encodedDataReader = func(s string) ([]byte, error) {
      -	us, err := Unescape(s)
      -	if err != nil {
      -		return nil, err
      -	}
      -	return []byte(us), nil
      -}
      -
      -var base64DataReader encodedDataReader = func(s string) ([]byte, error) {
      -	data, err := base64.StdEncoding.DecodeString(s)
      -	if err != nil {
      -		return nil, err
      -	}
      -	return []byte(data), nil
      -}
      -
      -type parser struct {
      -	du                  *DataURL
      -	l                   *lexer
      -	currentAttr         string
      -	unquoteParamVal     bool
      -	encodedDataReaderFn encodedDataReader
      -}
      -
      -func (p *parser) parse() error {
      -	for item := range p.l.items {
      -		switch item.t {
      -		case itemError:
      -			return errors.New(item.String())
      -		case itemMediaType:
      -			p.du.MediaType.Type = item.val
      -			// Should we clear the default
      -			// "charset" parameter at this point?
      -			delete(p.du.MediaType.Params, "charset")
      -		case itemMediaSubType:
      -			p.du.MediaType.Subtype = item.val
      -		case itemParamAttr:
      -			p.currentAttr = item.val
      -		case itemLeftStringQuote:
      -			p.unquoteParamVal = true
      -		case itemParamVal:
      -			val := item.val
      -			if p.unquoteParamVal {
      -				p.unquoteParamVal = false
      -				us, err := strconv.Unquote("\"" + val + "\"")
      -				if err != nil {
      -					return err
      -				}
      -				val = us
      -			} else {
      -				us, err := UnescapeToString(val)
      -				if err != nil {
      -					return err
      -				}
      -				val = us
      -			}
      -			p.du.MediaType.Params[p.currentAttr] = val
      -		case itemBase64Enc:
      -			p.du.Encoding = EncodingBase64
      -			p.encodedDataReaderFn = base64DataReader
      -		case itemDataComma:
      -			if p.encodedDataReaderFn == nil {
      -				p.encodedDataReaderFn = asciiDataReader
      -			}
      -		case itemData:
      -			reader, err := p.encodedDataReaderFn(item.val)
      -			if err != nil {
      -				return err
      -			}
      -			p.du.Data = reader
      -		case itemEOF:
      -			if p.du.Data == nil {
      -				p.du.Data = []byte("")
      -			}
      -			return nil
      -		}
      -	}
      -	panic("EOF not found")
      -}
      -
      -// DecodeString decodes a Data URL scheme string.
      -func DecodeString(s string) (*DataURL, error) {
      -	du := &DataURL{
      -		MediaType: defaultMediaType(),
      -		Encoding:  EncodingASCII,
      -	}
      -
      -	parser := &parser{
      -		du: du,
      -		l:  lex(s),
      -	}
      -	if err := parser.parse(); err != nil {
      -		return nil, err
      -	}
      -	return du, nil
      -}
      -
      -// Decode decodes a Data URL scheme from a io.Reader.
      -func Decode(r io.Reader) (*DataURL, error) {
      -	data, err := ioutil.ReadAll(r)
      -	if err != nil {
      -		return nil, err
      -	}
      -	return DecodeString(string(data))
      -}
      -
      -// EncodeBytes encodes the data bytes into a Data URL string, using base 64 encoding.
      -//
      -// The media type of data is detected using http.DetectContentType.
      -func EncodeBytes(data []byte) string {
      -	mt := http.DetectContentType(data)
      -	// http.DetectContentType may add spurious spaces between ; and a parameter.
      -	// The canonical way is to not have them.
      -	cleanedMt := strings.Replace(mt, "; ", ";", -1)
      -
      -	return New(data, cleanedMt).String()
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/dataurl_test.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/dataurl_test.go
      deleted file mode 100644
      index efff4f34..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/dataurl_test.go
      +++ /dev/null
      @@ -1,587 +0,0 @@
      -package dataurl
      -
      -import (
      -	"bytes"
      -	"encoding/base64"
      -	"fmt"
      -	"net/http"
      -	"net/http/httptest"
      -	"reflect"
      -	"regexp"
      -	"strings"
      -	"testing"
      -)
      -
      -type dataURLTest struct {
      -	InputRawDataURL string
      -	ExpectedItems   []item
      -	ExpectedDataURL DataURL
      -}
      -
      -func genTestTable() []dataURLTest {
      -	return []dataURLTest{
      -		dataURLTest{
      -			`data:;base64,aGV5YQ==`,
      -			[]item{
      -				item{itemDataPrefix, dataPrefix},
      -				item{itemParamSemicolon, ";"},
      -				item{itemBase64Enc, "base64"},
      -				item{itemDataComma, ","},
      -				item{itemData, "aGV5YQ=="},
      -				item{itemEOF, ""},
      -			},
      -			DataURL{
      -				defaultMediaType(),
      -				EncodingBase64,
      -				[]byte("heya"),
      -			},
      -		},
      -		dataURLTest{
      -			`data:text/plain;base64,aGV5YQ==`,
      -			[]item{
      -				item{itemDataPrefix, dataPrefix},
      -				item{itemMediaType, "text"},
      -				item{itemMediaSep, "/"},
      -				item{itemMediaSubType, "plain"},
      -				item{itemParamSemicolon, ";"},
      -				item{itemBase64Enc, "base64"},
      -				item{itemDataComma, ","},
      -				item{itemData, "aGV5YQ=="},
      -				item{itemEOF, ""},
      -			},
      -			DataURL{
      -				MediaType{
      -					"text",
      -					"plain",
      -					map[string]string{},
      -				},
      -				EncodingBase64,
      -				[]byte("heya"),
      -			},
      -		},
      -		dataURLTest{
      -			`data:text/plain;charset=utf-8;base64,aGV5YQ==`,
      -			[]item{
      -				item{itemDataPrefix, dataPrefix},
      -				item{itemMediaType, "text"},
      -				item{itemMediaSep, "/"},
      -				item{itemMediaSubType, "plain"},
      -				item{itemParamSemicolon, ";"},
      -				item{itemParamAttr, "charset"},
      -				item{itemParamEqual, "="},
      -				item{itemParamVal, "utf-8"},
      -				item{itemParamSemicolon, ";"},
      -				item{itemBase64Enc, "base64"},
      -				item{itemDataComma, ","},
      -				item{itemData, "aGV5YQ=="},
      -				item{itemEOF, ""},
      -			},
      -			DataURL{
      -				MediaType{
      -					"text",
      -					"plain",
      -					map[string]string{
      -						"charset": "utf-8",
      -					},
      -				},
      -				EncodingBase64,
      -				[]byte("heya"),
      -			},
      -		},
      -		dataURLTest{
      -			`data:text/plain;charset=utf-8;foo=bar;base64,aGV5YQ==`,
      -			[]item{
      -				item{itemDataPrefix, dataPrefix},
      -				item{itemMediaType, "text"},
      -				item{itemMediaSep, "/"},
      -				item{itemMediaSubType, "plain"},
      -				item{itemParamSemicolon, ";"},
      -				item{itemParamAttr, "charset"},
      -				item{itemParamEqual, "="},
      -				item{itemParamVal, "utf-8"},
      -				item{itemParamSemicolon, ";"},
      -				item{itemParamAttr, "foo"},
      -				item{itemParamEqual, "="},
      -				item{itemParamVal, "bar"},
      -				item{itemParamSemicolon, ";"},
      -				item{itemBase64Enc, "base64"},
      -				item{itemDataComma, ","},
      -				item{itemData, "aGV5YQ=="},
      -				item{itemEOF, ""},
      -			},
      -			DataURL{
      -				MediaType{
      -					"text",
      -					"plain",
      -					map[string]string{
      -						"charset": "utf-8",
      -						"foo":     "bar",
      -					},
      -				},
      -				EncodingBase64,
      -				[]byte("heya"),
      -			},
      -		},
      -		dataURLTest{
      -			`data:application/json;charset=utf-8;foo="b\"<@>\"r";style=unformatted%20json;base64,eyJtc2ciOiAiaGV5YSJ9`,
      -			[]item{
      -				item{itemDataPrefix, dataPrefix},
      -				item{itemMediaType, "application"},
      -				item{itemMediaSep, "/"},
      -				item{itemMediaSubType, "json"},
      -				item{itemParamSemicolon, ";"},
      -				item{itemParamAttr, "charset"},
      -				item{itemParamEqual, "="},
      -				item{itemParamVal, "utf-8"},
      -				item{itemParamSemicolon, ";"},
      -				item{itemParamAttr, "foo"},
      -				item{itemParamEqual, "="},
      -				item{itemLeftStringQuote, "\""},
      -				item{itemParamVal, `b\"<@>\"r`},
      -				item{itemRightStringQuote, "\""},
      -				item{itemParamSemicolon, ";"},
      -				item{itemParamAttr, "style"},
      -				item{itemParamEqual, "="},
      -				item{itemParamVal, "unformatted%20json"},
      -				item{itemParamSemicolon, ";"},
      -				item{itemBase64Enc, "base64"},
      -				item{itemDataComma, ","},
      -				item{itemData, "eyJtc2ciOiAiaGV5YSJ9"},
      -				item{itemEOF, ""},
      -			},
      -			DataURL{
      -				MediaType{
      -					"application",
      -					"json",
      -					map[string]string{
      -						"charset": "utf-8",
      -						"foo":     `b"<@>"r`,
      -						"style":   "unformatted json",
      -					},
      -				},
      -				EncodingBase64,
      -				[]byte(`{"msg": "heya"}`),
      -			},
      -		},
      -		dataURLTest{
      -			`data:xxx;base64,aGV5YQ==`,
      -			[]item{
      -				item{itemDataPrefix, dataPrefix},
      -				item{itemError, "invalid character for media type"},
      -			},
      -			DataURL{},
      -		},
      -		dataURLTest{
      -			`data:,`,
      -			[]item{
      -				item{itemDataPrefix, dataPrefix},
      -				item{itemDataComma, ","},
      -				item{itemEOF, ""},
      -			},
      -			DataURL{
      -				defaultMediaType(),
      -				EncodingASCII,
      -				[]byte(""),
      -			},
      -		},
      -		dataURLTest{
      -			`data:,A%20brief%20note`,
      -			[]item{
      -				item{itemDataPrefix, dataPrefix},
      -				item{itemDataComma, ","},
      -				item{itemData, "A%20brief%20note"},
      -				item{itemEOF, ""},
      -			},
      -			DataURL{
      -				defaultMediaType(),
      -				EncodingASCII,
      -				[]byte("A brief note"),
      -			},
      -		},
      -		dataURLTest{
      -			`data:image/svg+xml-im.a.fake;base64,cGllLXN0b2NrX1RoaXJ0eQ==`,
      -			[]item{
      -				item{itemDataPrefix, dataPrefix},
      -				item{itemMediaType, "image"},
      -				item{itemMediaSep, "/"},
      -				item{itemMediaSubType, "svg+xml-im.a.fake"},
      -				item{itemParamSemicolon, ";"},
      -				item{itemBase64Enc, "base64"},
      -				item{itemDataComma, ","},
      -				item{itemData, "cGllLXN0b2NrX1RoaXJ0eQ=="},
      -				item{itemEOF, ""},
      -			},
      -			DataURL{
      -				MediaType{
      -					"image",
      -					"svg+xml-im.a.fake",
      -					map[string]string{},
      -				},
      -				EncodingBase64,
      -				[]byte("pie-stock_Thirty"),
      -			},
      -		},
      -	}
      -}
      -
      -func expectItems(expected, actual []item) bool {
      -	if len(expected) != len(actual) {
      -		return false
      -	}
      -	for i := range expected {
      -		if expected[i].t != actual[i].t {
      -			return false
      -		}
      -		if expected[i].val != actual[i].val {
      -			return false
      -		}
      -	}
      -	return true
      -}
      -
      -func equal(du1, du2 *DataURL) (bool, error) {
      -	if !reflect.DeepEqual(du1.MediaType, du2.MediaType) {
      -		return false, nil
      -	}
      -	if du1.Encoding != du2.Encoding {
      -		return false, nil
      -	}
      -
      -	if du1.Data == nil || du2.Data == nil {
      -		return false, fmt.Errorf("nil Data")
      -	}
      -
      -	if !bytes.Equal(du1.Data, du2.Data) {
      -		return false, nil
      -	}
      -	return true, nil
      -}
      -
      -func TestLexDataURLs(t *testing.T) {
      -	for _, test := range genTestTable() {
      -		l := lex(test.InputRawDataURL)
      -		var items []item
      -		for item := range l.items {
      -			items = append(items, item)
      -		}
      -		if !expectItems(test.ExpectedItems, items) {
      -			t.Errorf("Expected %v, got %v", test.ExpectedItems, items)
      -		}
      -	}
      -}
      -
      -func testDataURLs(t *testing.T, factory func(string) (*DataURL, error)) {
      -	for _, test := range genTestTable() {
      -		var expectedItemError string
      -		for _, item := range test.ExpectedItems {
      -			if item.t == itemError {
      -				expectedItemError = item.String()
      -				break
      -			}
      -		}
      -		dataURL, err := factory(test.InputRawDataURL)
      -		if expectedItemError == "" && err != nil {
      -			t.Error(err)
      -			continue
      -		} else if expectedItemError != "" && err == nil {
      -			t.Errorf("Expected error \"%s\", got nil", expectedItemError)
      -			continue
      -		} else if expectedItemError != "" && err != nil {
      -			if err.Error() != expectedItemError {
      -				t.Errorf("Expected error \"%s\", got \"%s\"", expectedItemError, err.Error())
      -			}
      -			continue
      -		}
      -
      -		if ok, err := equal(dataURL, &test.ExpectedDataURL); err != nil {
      -			t.Error(err)
      -		} else if !ok {
      -			t.Errorf("Expected %v, got %v", test.ExpectedDataURL, *dataURL)
      -		}
      -	}
      -}
      -
      -func TestDataURLsWithDecode(t *testing.T) {
      -	testDataURLs(t, func(s string) (*DataURL, error) {
      -		return Decode(strings.NewReader(s))
      -	})
      -}
      -
      -func TestDataURLsWithDecodeString(t *testing.T) {
      -	testDataURLs(t, func(s string) (*DataURL, error) {
      -		return DecodeString(s)
      -	})
      -}
      -
      -func TestDataURLsWithUnmarshalText(t *testing.T) {
      -	testDataURLs(t, func(s string) (*DataURL, error) {
      -		d := &DataURL{}
      -		err := d.UnmarshalText([]byte(s))
      -		return d, err
      -	})
      -}
      -
      -func TestRoundTrip(t *testing.T) {
      -	tests := []struct {
      -		s           string
      -		roundTripOk bool
      -	}{
      -		{`data:text/plain;charset=utf-8;foo=bar;base64,aGV5YQ==`, true},
      -		{`data:;charset=utf-8;foo=bar;base64,aGV5YQ==`, false},
      -		{`data:text/plain;charset=utf-8;foo="bar";base64,aGV5YQ==`, false},
      -		{`data:text/plain;charset=utf-8;foo="bar",A%20brief%20note`, false},
      -		{`data:text/plain;charset=utf-8;foo=bar,A%20brief%20note`, true},
      -	}
      -	for _, test := range tests {
      -		dataURL, err := DecodeString(test.s)
      -		if err != nil {
      -			t.Error(err)
      -			continue
      -		}
      -		dus := dataURL.String()
      -		if test.roundTripOk && dus != test.s {
      -			t.Errorf("Expected %s, got %s", test.s, dus)
      -		} else if !test.roundTripOk && dus == test.s {
      -			t.Errorf("Found %s, expected something else", test.s)
      -		}
      -
      -		txt, err := dataURL.MarshalText()
      -		if err != nil {
      -			t.Error(err)
      -			continue
      -		}
      -		if test.roundTripOk && string(txt) != test.s {
      -			t.Errorf("MarshalText roundtrip: got '%s', want '%s'", txt, test.s)
      -		} else if !test.roundTripOk && string(txt) == test.s {
      -			t.Errorf("MarshalText roundtrip: got '%s', want something else", txt)
      -		}
      -	}
      -}
      -
      -func TestNew(t *testing.T) {
      -	tests := []struct {
      -		Data            []byte
      -		MediaType       string
      -		ParamPairs      []string
      -		WillPanic       bool
      -		ExpectedDataURL *DataURL
      -	}{
      -		{
      -			[]byte(`{"msg": "heya"}`),
      -			"application/json",
      -			[]string{},
      -			false,
      -			&DataURL{
      -				MediaType{
      -					"application",
      -					"json",
      -					map[string]string{},
      -				},
      -				EncodingBase64,
      -				[]byte(`{"msg": "heya"}`),
      -			},
      -		},
      -		{
      -			[]byte(``),
      -			"application//json",
      -			[]string{},
      -			true,
      -			nil,
      -		},
      -		{
      -			[]byte(``),
      -			"",
      -			[]string{},
      -			true,
      -			nil,
      -		},
      -		{
      -			[]byte(`{"msg": "heya"}`),
      -			"text/plain",
      -			[]string{"charset", "utf-8"},
      -			false,
      -			&DataURL{
      -				MediaType{
      -					"text",
      -					"plain",
      -					map[string]string{
      -						"charset": "utf-8",
      -					},
      -				},
      -				EncodingBase64,
      -				[]byte(`{"msg": "heya"}`),
      -			},
      -		},
      -		{
      -			[]byte(`{"msg": "heya"}`),
      -			"text/plain",
      -			[]string{"charset", "utf-8", "name"},
      -			true,
      -			nil,
      -		},
      -	}
      -	for _, test := range tests {
      -		var dataURL *DataURL
      -		func() {
      -			defer func() {
      -				if test.WillPanic {
      -					if e := recover(); e == nil {
      -						t.Error("Expected panic didn't happen")
      -					}
      -				} else {
      -					if e := recover(); e != nil {
      -						t.Errorf("Unexpected panic: %v", e)
      -					}
      -				}
      -			}()
      -			dataURL = New(test.Data, test.MediaType, test.ParamPairs...)
      -		}()
      -		if test.WillPanic {
      -			if dataURL != nil {
      -				t.Error("Expected nil DataURL")
      -			}
      -		} else {
      -			if ok, err := equal(dataURL, test.ExpectedDataURL); err != nil {
      -				t.Error(err)
      -			} else if !ok {
      -				t.Errorf("Expected %v, got %v", test.ExpectedDataURL, *dataURL)
      -			}
      -		}
      -	}
      -}
      -
      -var golangFavicon = strings.Replace(`AAABAAEAEBAAAAEAIABoBAAAFgAAACgAAAAQAAAAIAAAAAEAIAAAAAAAAAAAAAAAAAAAAAAAAAAA
      -AAAAAAD///8AVE44//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb/
      -/uF2/1ROOP////8A////AFROOP/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+
      -4Xb//uF2//7hdv9UTjj/////AP///wBUTjj//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7h
      -dv/+4Xb//uF2//7hdv/+4Xb/VE44/////wD///8AVE44//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2
      -//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2/1ROOP////8A////AFROOP/+4Xb//uF2//7hdv/+4Xb/
      -/uF2//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv9UTjj/////AP///wBUTjj//uF2//7hdv/+
      -4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb/VE44/////wD///8AVE44//7h
      -dv/+4Xb//uF2//7hdv/+4Xb/z7t5/8Kyev/+4Xb//993///dd///3Xf//uF2/1ROOP////8A////
      -AFROOP/+4Xb//uF2//7hdv//4Hn/dIzD//v8///7/P//dIzD//7hdv//3Xf//913//7hdv9UTjj/
      -////AP///wBUTjj//uF2///fd//+4Xb//uF2/6ajif90jMP/dIzD/46Zpv/+4Xb//+F1///feP/+
      -4Xb/VE44/////wD///8AVE44//7hdv/z1XT////////////Is3L/HyAj/x8gI//Is3L/////////
      -///z1XT//uF2/1ROOP////8A19nd/1ROOP/+4Xb/5+HS//v+//8RExf/Liwn//7hdv/+4Xb/5+HS
      -//v8//8RExf/Liwn//7hdv9UTjj/19nd/1ROOP94aDT/yKdO/+fh0v//////ERMX/y4sJ//+4Xb/
      -/uF2/+fh0v//////ERMX/y4sJ//Ip07/dWU3/1ROOP9UTjj/yKdO/6qSSP/Is3L/9fb7//f6///I
      -s3L//uF2//7hdv/Is3L////////////Is3L/qpJI/8inTv9UTjj/19nd/1ROOP97c07/qpJI/8in
      -Tv/Ip07//uF2//7hdv/+4Xb//uF2/8zBlv/Kv4//pZJU/3tzTv9UTjj/19nd/////wD///8A4eLl
      -/6CcjP97c07/e3NO/1dOMf9BOiX/TkUn/2VXLf97c07/e3NO/6CcjP/h4uX/////AP///wD///8A
      -////AP///wD///8A////AP///wDq6/H/3N/j/9fZ3f/q6/H/////AP///wD///8A////AP///wD/
      -//8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
      -AAAAAAAAAAAAAA==`, "\n", "", -1)
      -
      -func TestEncodeBytes(t *testing.T) {
      -	mustDecode := func(s string) []byte {
      -		data, err := base64.StdEncoding.DecodeString(s)
      -		if err != nil {
      -			panic(err)
      -		}
      -		return data
      -	}
      -	tests := []struct {
      -		Data           []byte
      -		ExpectedString string
      -	}{
      -		{
      -			[]byte(`A brief note`),
      -			"data:text/plain;charset=utf-8;base64,QSBicmllZiBub3Rl",
      -		},
      -		{
      -			[]byte{0xA, 0xFF, 0x99, 0x34, 0x56, 0x34, 0x00},
      -			`data:application/octet-stream;base64,Cv+ZNFY0AA==`,
      -		},
      -		{
      -			mustDecode(golangFavicon),
      -			`data:image/vnd.microsoft.icon;base64,` + golangFavicon,
      -		},
      -	}
      -	for _, test := range tests {
      -		str := EncodeBytes(test.Data)
      -		if str != test.ExpectedString {
      -			t.Errorf("Expected %s, got %s", test.ExpectedString, str)
      -		}
      -	}
      -}
      -
      -func BenchmarkLex(b *testing.B) {
      -	for i := 0; i < b.N; i++ {
      -		for _, test := range genTestTable() {
      -			l := lex(test.InputRawDataURL)
      -			for _ = range l.items {
      -			}
      -		}
      -	}
      -}
      -
      -const rep = `^data:(?P<mediatype>\w+/[\w\+\-\.]+)?(?P<parameter>(?:;[\w\-]+="?[\w\-\\<>@,";:%]*"?)+)?(?P<base64>;base64)?,(?P<data>.*)$`
      -
      -func TestRegexp(t *testing.T) {
      -	re, err := regexp.Compile(rep)
      -	if err != nil {
      -		t.Fatal(err)
      -	}
      -	for _, test := range genTestTable() {
      -		shouldMatch := true
      -		for _, item := range test.ExpectedItems {
      -			if item.t == itemError {
      -				shouldMatch = false
      -				break
      -			}
      -		}
      -		// just test it matches, do not parse
      -		if re.MatchString(test.InputRawDataURL) && !shouldMatch {
      -			t.Error("doesn't match", test.InputRawDataURL)
      -		} else if !re.MatchString(test.InputRawDataURL) && shouldMatch {
      -			t.Error("match", test.InputRawDataURL)
      -		}
      -	}
      -}
      -
      -func BenchmarkRegexp(b *testing.B) {
      -	re, err := regexp.Compile(rep)
      -	if err != nil {
      -		b.Fatal(err)
      -	}
      -	for i := 0; i < b.N; i++ {
      -		for _, test := range genTestTable() {
      -			_ = re.FindStringSubmatch(test.InputRawDataURL)
      -		}
      -	}
      -}
      -
      -func ExampleDecodeString() {
      -	dataURL, err := DecodeString(`data:text/plain;charset=utf-8;base64,aGV5YQ==`)
      -	if err != nil {
      -		fmt.Println(err)
      -		return
      -	}
      -	fmt.Printf("%s, %s", dataURL.MediaType.ContentType(), string(dataURL.Data))
      -	// Output: text/plain, heya
      -}
      -
      -func ExampleDecode() {
      -	r, err := http.NewRequest(
      -		"POST", "/",
      -		strings.NewReader(`data:image/vnd.microsoft.icon;name=golang%20favicon;base64,`+golangFavicon),
      -	)
      -	if err != nil {
      -		fmt.Println(err)
      -		return
      -	}
      -
      -	var dataURL *DataURL
      -	h := func(w http.ResponseWriter, r *http.Request) {
      -		var err error
      -		dataURL, err = Decode(r.Body)
      -		defer r.Body.Close()
      -		if err != nil {
      -			fmt.Println(err)
      -		}
      -	}
      -	w := httptest.NewRecorder()
      -	h(w, r)
      -	fmt.Printf("%s: %s", dataURL.Params["name"], dataURL.ContentType())
      -	// Output: golang favicon: image/vnd.microsoft.icon
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/doc.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/doc.go
      deleted file mode 100644
      index 56461d04..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/doc.go
      +++ /dev/null
      @@ -1,28 +0,0 @@
      -/*
      -Package dataurl parses Data URL Schemes
      -according to RFC 2397
      -(http://tools.ietf.org/html/rfc2397).
      -
      -Data URLs are small chunks of data commonly used in browsers to display inline data,
      -typically like small images, or when you use the FileReader API of the browser.
      -
      -A dataurl looks like:
      -
      -	data:text/plain;charset=utf-8,A%20brief%20note
      -
      -Or, with base64 encoding:
      -
      -	data:image/vnd.microsoft.icon;name=golang%20favicon;base64,AAABAAEAEBAAAAEAIABoBAAAFgAAACgAAAAQAAAAIAAAAAEAIAAAAAAAAAAAAAAAAAAAAAAAAAAA
      -	AAAAAAD///8AVE44//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb/
      -	/uF2/1ROOP////8A////AFROOP/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+
      -	...
      -	/6CcjP97c07/e3NO/1dOMf9BOiX/TkUn/2VXLf97c07/e3NO/6CcjP/h4uX/////AP///wD///8A
      -	////AP///wD///8A////AP///wDq6/H/3N/j/9fZ3f/q6/H/////AP///wD///8A////AP///wD/
      -	//8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
      -	AAAAAAAAAAAAAA==
      -
      -Common functions are Decode and DecodeString to obtain a DataURL,
      -and DataURL.String() and DataURL.WriteTo to generate a Data URL string.
      -
      -*/
      -package dataurl
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/lex.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/lex.go
      deleted file mode 100644
      index 1a8717f5..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/lex.go
      +++ /dev/null
      @@ -1,521 +0,0 @@
      -package dataurl
      -
      -import (
      -	"fmt"
      -	"strings"
      -	"unicode"
      -	"unicode/utf8"
      -)
      -
      -type item struct {
      -	t   itemType
      -	val string
      -}
      -
      -func (i item) String() string {
      -	switch i.t {
      -	case itemEOF:
      -		return "EOF"
      -	case itemError:
      -		return i.val
      -	}
      -	if len(i.val) > 10 {
      -		return fmt.Sprintf("%.10q...", i.val)
      -	}
      -	return fmt.Sprintf("%q", i.val)
      -}
      -
      -type itemType int
      -
      -const (
      -	itemError itemType = iota
      -	itemEOF
      -
      -	itemDataPrefix
      -
      -	itemMediaType
      -	itemMediaSep
      -	itemMediaSubType
      -	itemParamSemicolon
      -	itemParamAttr
      -	itemParamEqual
      -	itemLeftStringQuote
      -	itemRightStringQuote
      -	itemParamVal
      -
      -	itemBase64Enc
      -
      -	itemDataComma
      -	itemData
      -)
      -
      -const eof rune = -1
      -
      -func isTokenRune(r rune) bool {
      -	return r <= unicode.MaxASCII &&
      -		!unicode.IsControl(r) &&
      -		!unicode.IsSpace(r) &&
      -		!isTSpecialRune(r)
      -}
      -
      -func isTSpecialRune(r rune) bool {
      -	return r == '(' ||
      -		r == ')' ||
      -		r == '<' ||
      -		r == '>' ||
      -		r == '@' ||
      -		r == ',' ||
      -		r == ';' ||
      -		r == ':' ||
      -		r == '\\' ||
      -		r == '"' ||
      -		r == '/' ||
      -		r == '[' ||
      -		r == ']' ||
      -		r == '?' ||
      -		r == '='
      -}
      -
      -// See http://tools.ietf.org/html/rfc2045
      -// This doesn't include extension-token case
      -// as it's handled separatly
      -func isDiscreteType(s string) bool {
      -	if strings.HasPrefix(s, "text") ||
      -		strings.HasPrefix(s, "image") ||
      -		strings.HasPrefix(s, "audio") ||
      -		strings.HasPrefix(s, "video") ||
      -		strings.HasPrefix(s, "application") {
      -		return true
      -	}
      -	return false
      -}
      -
      -// See http://tools.ietf.org/html/rfc2045
      -// This doesn't include extension-token case
      -// as it's handled separatly
      -func isCompositeType(s string) bool {
      -	if strings.HasPrefix(s, "message") ||
      -		strings.HasPrefix(s, "multipart") {
      -		return true
      -	}
      -	return false
      -}
      -
      -func isURLCharRune(r rune) bool {
      -	// We're a bit permissive here,
      -	// by not including '%' in delims
      -	// This is okay, since url unescaping will validate
      -	// that later in the parser.
      -	return r <= unicode.MaxASCII &&
      -		!(r >= 0x00 && r <= 0x1F) && r != 0x7F && /* control */
      -		// delims
      -		r != ' ' &&
      -		r != '<' &&
      -		r != '>' &&
      -		r != '#' &&
      -		r != '"' &&
      -		// unwise
      -		r != '{' &&
      -		r != '}' &&
      -		r != '|' &&
      -		r != '\\' &&
      -		r != '^' &&
      -		r != '[' &&
      -		r != ']' &&
      -		r != '`'
      -}
      -
      -func isBase64Rune(r rune) bool {
      -	return (r >= 'a' && r <= 'z') ||
      -		(r >= 'A' && r <= 'Z') ||
      -		(r >= '0' && r <= '9') ||
      -		r == '+' ||
      -		r == '/' ||
      -		r == '=' ||
      -		r == '\n'
      -}
      -
      -type stateFn func(*lexer) stateFn
      -
      -// lexer lexes the data URL scheme input string.
      -// The implementation is from the text/template/parser package.
      -type lexer struct {
      -	input          string
      -	start          int
      -	pos            int
      -	width          int
      -	seenBase64Item bool
      -	items          chan item
      -}
      -
      -func (l *lexer) run() {
      -	for state := lexBeforeDataPrefix; state != nil; {
      -		state = state(l)
      -	}
      -	close(l.items)
      -}
      -
      -func (l *lexer) emit(t itemType) {
      -	l.items <- item{t, l.input[l.start:l.pos]}
      -	l.start = l.pos
      -}
      -
      -func (l *lexer) next() (r rune) {
      -	if l.pos >= len(l.input) {
      -		l.width = 0
      -		return eof
      -	}
      -	r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
      -	l.pos += l.width
      -	return r
      -}
      -
      -func (l *lexer) backup() {
      -	l.pos -= l.width
      -}
      -
      -func (l *lexer) ignore() {
      -	l.start = l.pos
      -}
      -
      -func (l *lexer) errorf(format string, args ...interface{}) stateFn {
      -	l.items <- item{itemError, fmt.Sprintf(format, args...)}
      -	return nil
      -}
      -
      -func lex(input string) *lexer {
      -	l := &lexer{
      -		input: input,
      -		items: make(chan item),
      -	}
      -	go l.run() // Concurrently run state machine.
      -	return l
      -}
      -
      -const (
      -	dataPrefix     = "data:"
      -	mediaSep       = '/'
      -	paramSemicolon = ';'
      -	paramEqual     = '='
      -	dataComma      = ','
      -)
      -
      -// start lexing by detecting data prefix
      -func lexBeforeDataPrefix(l *lexer) stateFn {
      -	if strings.HasPrefix(l.input[l.pos:], dataPrefix) {
      -		return lexDataPrefix
      -	}
      -	return l.errorf("missing data prefix")
      -}
      -
      -// lex data prefix
      -func lexDataPrefix(l *lexer) stateFn {
      -	l.pos += len(dataPrefix)
      -	l.emit(itemDataPrefix)
      -	return lexAfterDataPrefix
      -}
      -
      -// lex what's after data prefix.
      -// it can be the media type/subtype separator,
      -// the base64 encoding, or the comma preceding the data
      -func lexAfterDataPrefix(l *lexer) stateFn {
      -	switch r := l.next(); {
      -	case r == paramSemicolon:
      -		l.backup()
      -		return lexParamSemicolon
      -	case r == dataComma:
      -		l.backup()
      -		return lexDataComma
      -	case r == eof:
      -		return l.errorf("missing comma before data")
      -	case r == 'x' || r == 'X':
      -		if l.next() == '-' {
      -			return lexXTokenMediaType
      -		}
      -		return lexInDiscreteMediaType
      -	case isTokenRune(r):
      -		return lexInDiscreteMediaType
      -	default:
      -		return l.errorf("invalid character after data prefix")
      -	}
      -}
      -
      -func lexXTokenMediaType(l *lexer) stateFn {
      -	for {
      -		switch r := l.next(); {
      -		case r == mediaSep:
      -			l.backup()
      -			return lexMediaType
      -		case r == eof:
      -			return l.errorf("missing media type slash")
      -		case isTokenRune(r):
      -		default:
      -			return l.errorf("invalid character for media type")
      -		}
      -	}
      -}
      -
      -func lexInDiscreteMediaType(l *lexer) stateFn {
      -	for {
      -		switch r := l.next(); {
      -		case r == mediaSep:
      -			l.backup()
      -			// check it's valid discrete type
      -			if !isDiscreteType(l.input[l.start:l.pos]) &&
      -				!isCompositeType(l.input[l.start:l.pos]) {
      -				return l.errorf("invalid media type")
      -			}
      -			return lexMediaType
      -		case r == eof:
      -			return l.errorf("missing media type slash")
      -		case isTokenRune(r):
      -		default:
      -			return l.errorf("invalid character for media type")
      -		}
      -	}
      -}
      -
      -func lexMediaType(l *lexer) stateFn {
      -	if l.pos > l.start {
      -		l.emit(itemMediaType)
      -	}
      -	return lexMediaSep
      -}
      -
      -func lexMediaSep(l *lexer) stateFn {
      -	l.next()
      -	l.emit(itemMediaSep)
      -	return lexAfterMediaSep
      -}
      -
      -func lexAfterMediaSep(l *lexer) stateFn {
      -	for {
      -		switch r := l.next(); {
      -		case r == paramSemicolon || r == dataComma:
      -			l.backup()
      -			return lexMediaSubType
      -		case r == eof:
      -			return l.errorf("incomplete media type")
      -		case isTokenRune(r):
      -		default:
      -			return l.errorf("invalid character for media subtype")
      -		}
      -	}
      -}
      -
      -func lexMediaSubType(l *lexer) stateFn {
      -	if l.pos > l.start {
      -		l.emit(itemMediaSubType)
      -	}
      -	return lexAfterMediaSubType
      -}
      -
      -func lexAfterMediaSubType(l *lexer) stateFn {
      -	switch r := l.next(); {
      -	case r == paramSemicolon:
      -		l.backup()
      -		return lexParamSemicolon
      -	case r == dataComma:
      -		l.backup()
      -		return lexDataComma
      -	case r == eof:
      -		return l.errorf("missing comma before data")
      -	default:
      -		return l.errorf("expected semicolon or comma")
      -	}
      -}
      -
      -func lexParamSemicolon(l *lexer) stateFn {
      -	l.next()
      -	l.emit(itemParamSemicolon)
      -	return lexAfterParamSemicolon
      -}
      -
      -func lexAfterParamSemicolon(l *lexer) stateFn {
      -	switch r := l.next(); {
      -	case r == eof:
      -		return l.errorf("unterminated parameter sequence")
      -	case r == paramEqual || r == dataComma:
      -		return l.errorf("unterminated parameter sequence")
      -	case isTokenRune(r):
      -		l.backup()
      -		return lexInParamAttr
      -	default:
      -		return l.errorf("invalid character for parameter attribute")
      -	}
      -}
      -
      -func lexBase64Enc(l *lexer) stateFn {
      -	if l.pos > l.start {
      -		if v := l.input[l.start:l.pos]; v != "base64" {
      -			return l.errorf("expected base64, got %s", v)
      -		}
      -		l.seenBase64Item = true
      -		l.emit(itemBase64Enc)
      -	}
      -	return lexDataComma
      -}
      -
      -func lexInParamAttr(l *lexer) stateFn {
      -	for {
      -		switch r := l.next(); {
      -		case r == paramEqual:
      -			l.backup()
      -			return lexParamAttr
      -		case r == dataComma:
      -			l.backup()
      -			return lexBase64Enc
      -		case r == eof:
      -			return l.errorf("unterminated parameter sequence")
      -		case isTokenRune(r):
      -		default:
      -			return l.errorf("invalid character for parameter attribute")
      -		}
      -	}
      -}
      -
      -func lexParamAttr(l *lexer) stateFn {
      -	if l.pos > l.start {
      -		l.emit(itemParamAttr)
      -	}
      -	return lexParamEqual
      -}
      -
      -func lexParamEqual(l *lexer) stateFn {
      -	l.next()
      -	l.emit(itemParamEqual)
      -	return lexAfterParamEqual
      -}
      -
      -func lexAfterParamEqual(l *lexer) stateFn {
      -	switch r := l.next(); {
      -	case r == '"':
      -		l.emit(itemLeftStringQuote)
      -		return lexInQuotedStringParamVal
      -	case r == eof:
      -		return l.errorf("missing comma before data")
      -	case isTokenRune(r):
      -		return lexInParamVal
      -	default:
      -		return l.errorf("invalid character for parameter value")
      -	}
      -}
      -
      -func lexInQuotedStringParamVal(l *lexer) stateFn {
      -	for {
      -		switch r := l.next(); {
      -		case r == eof:
      -			return l.errorf("unclosed quoted string")
      -		case r == '\\':
      -			return lexEscapedChar
      -		case r == '"':
      -			l.backup()
      -			return lexQuotedStringParamVal
      -		case r <= unicode.MaxASCII:
      -		default:
      -			return l.errorf("invalid character for parameter value")
      -		}
      -	}
      -}
      -
      -func lexEscapedChar(l *lexer) stateFn {
      -	switch r := l.next(); {
      -	case r <= unicode.MaxASCII:
      -		return lexInQuotedStringParamVal
      -	case r == eof:
      -		return l.errorf("unexpected eof")
      -	default:
      -		return l.errorf("invalid escaped character")
      -	}
      -}
      -
      -func lexInParamVal(l *lexer) stateFn {
      -	for {
      -		switch r := l.next(); {
      -		case r == paramSemicolon || r == dataComma:
      -			l.backup()
      -			return lexParamVal
      -		case r == eof:
      -			return l.errorf("missing comma before data")
      -		case isTokenRune(r):
      -		default:
      -			return l.errorf("invalid character for parameter value")
      -		}
      -	}
      -}
      -
      -func lexQuotedStringParamVal(l *lexer) stateFn {
      -	if l.pos > l.start {
      -		l.emit(itemParamVal)
      -	}
      -	l.next()
      -	l.emit(itemRightStringQuote)
      -	return lexAfterParamVal
      -}
      -
      -func lexParamVal(l *lexer) stateFn {
      -	if l.pos > l.start {
      -		l.emit(itemParamVal)
      -	}
      -	return lexAfterParamVal
      -}
      -
      -func lexAfterParamVal(l *lexer) stateFn {
      -	switch r := l.next(); {
      -	case r == paramSemicolon:
      -		l.backup()
      -		return lexParamSemicolon
      -	case r == dataComma:
      -		l.backup()
      -		return lexDataComma
      -	case r == eof:
      -		return l.errorf("missing comma before data")
      -	default:
      -		return l.errorf("expected semicolon or comma")
      -	}
      -}
      -
      -func lexDataComma(l *lexer) stateFn {
      -	l.next()
      -	l.emit(itemDataComma)
      -	if l.seenBase64Item {
      -		return lexBase64Data
      -	}
      -	return lexData
      -}
      -
      -func lexData(l *lexer) stateFn {
      -Loop:
      -	for {
      -		switch r := l.next(); {
      -		case r == eof:
      -			break Loop
      -		case isURLCharRune(r):
      -		default:
      -			return l.errorf("invalid data character")
      -		}
      -	}
      -	if l.pos > l.start {
      -		l.emit(itemData)
      -	}
      -	l.emit(itemEOF)
      -	return nil
      -}
      -
      -func lexBase64Data(l *lexer) stateFn {
      -Loop:
      -	for {
      -		switch r := l.next(); {
      -		case r == eof:
      -			break Loop
      -		case isBase64Rune(r):
      -		default:
      -			return l.errorf("invalid data character")
      -		}
      -	}
      -	if l.pos > l.start {
      -		l.emit(itemData)
      -	}
      -	l.emit(itemEOF)
      -	return nil
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/rfc2396.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/rfc2396.go
      deleted file mode 100644
      index e2ea0cac..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/rfc2396.go
      +++ /dev/null
      @@ -1,130 +0,0 @@
      -package dataurl
      -
      -import (
      -	"bytes"
      -	"fmt"
      -	"io"
      -	"strings"
      -)
      -
      -// Escape implements URL escaping, as defined in RFC 2397 (http://tools.ietf.org/html/rfc2397).
      -// It differs a bit from net/url's QueryEscape and QueryUnescape, e.g how spaces are treated (+ instead of %20):
      -//
      -// Only ASCII chars are allowed. Reserved chars are escaped to their %xx form.
      -// Unreserved chars are [a-z], [A-Z], [0-9], and -_.!~*\().
      -func Escape(data []byte) string {
      -	var buf = new(bytes.Buffer)
      -	for _, b := range data {
      -		switch {
      -		case isUnreserved(b):
      -			buf.WriteByte(b)
      -		default:
      -			fmt.Fprintf(buf, "%%%02X", b)
      -		}
      -	}
      -	return buf.String()
      -}
      -
      -// EscapeString is like Escape, but taking
      -// a string as argument.
      -func EscapeString(s string) string {
      -	return Escape([]byte(s))
      -}
      -
      -// isUnreserved return true
      -// if the byte c is an unreserved char,
      -// as defined in RFC 2396.
      -func isUnreserved(c byte) bool {
      -	return (c >= 'a' && c <= 'z') ||
      -		(c >= 'A' && c <= 'Z') ||
      -		(c >= '0' && c <= '9') ||
      -		c == '-' ||
      -		c == '_' ||
      -		c == '.' ||
      -		c == '!' ||
      -		c == '~' ||
      -		c == '*' ||
      -		c == '\'' ||
      -		c == '(' ||
      -		c == ')'
      -}
      -
      -func isHex(c byte) bool {
      -	switch {
      -	case c >= 'a' && c <= 'f':
      -		return true
      -	case c >= 'A' && c <= 'F':
      -		return true
      -	case c >= '0' && c <= '9':
      -		return true
      -	}
      -	return false
      -}
      -
      -// borrowed from net/url/url.go
      -func unhex(c byte) byte {
      -	switch {
      -	case '0' <= c && c <= '9':
      -		return c - '0'
      -	case 'a' <= c && c <= 'f':
      -		return c - 'a' + 10
      -	case 'A' <= c && c <= 'F':
      -		return c - 'A' + 10
      -	}
      -	return 0
      -}
      -
      -// Unescape unescapes a character sequence
      -// escaped with Escape(String?).
      -func Unescape(s string) ([]byte, error) {
      -	var buf = new(bytes.Buffer)
      -	reader := strings.NewReader(s)
      -
      -	for {
      -		r, size, err := reader.ReadRune()
      -		if err == io.EOF {
      -			break
      -		}
      -		if err != nil {
      -			return nil, err
      -		}
      -		if size > 1 {
      -			return nil, fmt.Errorf("rfc2396: non-ASCII char detected")
      -		}
      -
      -		switch r {
      -		case '%':
      -			eb1, err := reader.ReadByte()
      -			if err == io.EOF {
      -				return nil, fmt.Errorf("rfc2396: unexpected end of unescape sequence")
      -			}
      -			if err != nil {
      -				return nil, err
      -			}
      -			if !isHex(eb1) {
      -				return nil, fmt.Errorf("rfc2396: invalid char 0x%x in unescape sequence", r)
      -			}
      -			eb0, err := reader.ReadByte()
      -			if err == io.EOF {
      -				return nil, fmt.Errorf("rfc2396: unexpected end of unescape sequence")
      -			}
      -			if err != nil {
      -				return nil, err
      -			}
      -			if !isHex(eb0) {
      -				return nil, fmt.Errorf("rfc2396: invalid char 0x%x in unescape sequence", r)
      -			}
      -			buf.WriteByte(unhex(eb0) + unhex(eb1)*16)
      -		default:
      -			buf.WriteByte(byte(r))
      -		}
      -	}
      -	return buf.Bytes(), nil
      -}
      -
      -// UnescapeToString is like Unescape, but returning
      -// a string.
      -func UnescapeToString(s string) (string, error) {
      -	b, err := Unescape(s)
      -	return string(b), err
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/rfc2396_test.go b/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/rfc2396_test.go
      deleted file mode 100644
      index 45efbaf2..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/rfc2396_test.go
      +++ /dev/null
      @@ -1,69 +0,0 @@
      -package dataurl
      -
      -import (
      -	"bytes"
      -	"fmt"
      -	"testing"
      -)
      -
      -var tests = []struct {
      -	escaped   string
      -	unescaped []byte
      -}{
      -	{"A%20brief%20note%0A", []byte("A brief note\n")},
      -	{"%7B%5B%5Dbyte(%22A%2520brief%2520note%22)%2C%20%5B%5Dbyte(%22A%20brief%20note%22)%7D", []byte(`{[]byte("A%20brief%20note"), []byte("A brief note")}`)},
      -}
      -
      -func TestEscape(t *testing.T) {
      -	for _, test := range tests {
      -		escaped := Escape(test.unescaped)
      -		if string(escaped) != test.escaped {
      -			t.Errorf("Expected %s, got %s", test.escaped, string(escaped))
      -		}
      -	}
      -}
      -
      -func TestUnescape(t *testing.T) {
      -	for _, test := range tests {
      -		unescaped, err := Unescape(test.escaped)
      -		if err != nil {
      -			t.Error(err)
      -			continue
      -		}
      -		if !bytes.Equal(unescaped, test.unescaped) {
      -			t.Errorf("Expected %s, got %s", test.unescaped, unescaped)
      -		}
      -	}
      -}
      -
      -func ExampleEscapeString() {
      -	fmt.Println(EscapeString("A brief note"))
      -	// Output: A%20brief%20note
      -}
      -
      -func ExampleEscape() {
      -	fmt.Println(Escape([]byte("A brief note")))
      -	// Output: A%20brief%20note
      -}
      -
      -func ExampleUnescape() {
      -	data, err := Unescape("A%20brief%20note")
      -	if err != nil {
      -		// can fail e.g if incorrect escaped sequence
      -		fmt.Println(err)
      -		return
      -	}
      -	fmt.Println(string(data))
      -	// Output: A brief note
      -}
      -
      -func ExampleUnescapeToString() {
      -	s, err := UnescapeToString("A%20brief%20note")
      -	if err != nil {
      -		// can fail e.g if incorrect escaped sequence
      -		fmt.Println(err)
      -		return
      -	}
      -	fmt.Println(s)
      -	// Output: A brief note
      -}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/wercker.yml b/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/wercker.yml
      deleted file mode 100644
      index 3ab8084c..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/vincent-petithory/dataurl/wercker.yml
      +++ /dev/null
      @@ -1 +0,0 @@
      -box: wercker/default
      \ No newline at end of file
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/go4.org/errorutil/highlight.go b/vendor/github.com/coreos/ignition/config/vendor/go4.org/errorutil/highlight.go
      deleted file mode 100644
      index aace6a46..00000000
      --- a/vendor/github.com/coreos/ignition/config/vendor/go4.org/errorutil/highlight.go
      +++ /dev/null
      @@ -1,58 +0,0 @@
      -/*
      -Copyright 2011 Google Inc.
      -
      -Licensed under the Apache License, Version 2.0 (the "License");
      -you may not use this file except in compliance with the License.
      -You may obtain a copy of the License at
      -
      -     http://www.apache.org/licenses/LICENSE-2.0
      -
      -Unless required by applicable law or agreed to in writing, software
      -distributed under the License is distributed on an "AS IS" BASIS,
      -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      -See the License for the specific language governing permissions and
      -limitations under the License.
      -*/
      -
      -// Package errorutil helps make better error messages.
      -package errorutil
      -
      -import (
      -	"bufio"
      -	"bytes"
      -	"fmt"
      -	"io"
      -	"strings"
      -)
      -
      -// HighlightBytePosition takes a reader and the location in bytes of a parse
      -// error (for instance, from json.SyntaxError.Offset) and returns the line, column,
      -// and pretty-printed context around the error with an arrow indicating the exact
      -// position of the syntax error.
      -func HighlightBytePosition(f io.Reader, pos int64) (line, col int, highlight string) {
      -	line = 1
      -	br := bufio.NewReader(f)
      -	lastLine := ""
      -	thisLine := new(bytes.Buffer)
      -	for n := int64(0); n < pos; n++ {
      -		b, err := br.ReadByte()
      -		if err != nil {
      -			break
      -		}
      -		if b == '\n' {
      -			lastLine = thisLine.String()
      -			thisLine.Reset()
      -			line++
      -			col = 1
      -		} else {
      -			col++
      -			thisLine.WriteByte(b)
      -		}
      -	}
      -	if line > 1 {
      -		highlight += fmt.Sprintf("%5d: %s\n", line-1, lastLine)
      -	}
      -	highlight += fmt.Sprintf("%5d: %s\n", line, thisLine.String())
      -	highlight += fmt.Sprintf("%s^\n", strings.Repeat(" ", col+5))
      -	return
      -}
      diff --git a/vendor/github.com/coreos/ignition/doc/configuration.md b/vendor/github.com/coreos/ignition/doc/configuration.md
      new file mode 100644
      index 00000000..964dd68f
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/doc/configuration.md
      @@ -0,0 +1,87 @@
      +# Configuration Specification #
      +
      +The Ignition configuration is a JSON document conforming to the following specification, with **_italicized_** entries being optional:
      +
      +* **ignition** (object): metadata about the configuration itself.
      +  * **version** (string): the semantic version number of the spec. The spec version must be compatible with the latest version (`2.0.0`). Compatibility requires the major versions to match and the spec version be less than or equal to the latest version.
      +  * **_config_** (objects): options related to the configuration.
      +    * **_append_** (list of objects): a list of the configs to be appended to the current config.
      +      * **source** (string): the URL of the config. Supported schemes are http. Note: When using http, it is advisable to use the verification option to ensure the contents haven't been modified.
      +      * **_verification_** (object): options related to the verification of the config.
      +        * **_hash_** (string): the hash of the config, in the form `<type>-<value>` where type is sha512.
      +    * **_replace_** (object): the config that will replace the current.
      +      * **source** (string): the URL of the config. Supported schemes are http. Note: When using http, it is advisable to use the verification option to ensure the contents haven't been modified.
      +      * **_verification_** (object): options related to the verification of the config.
      +        * **_hash_** (string): the hash of the config, in the form `<type>-<value>` where type is sha512.
      +* **_storage_** (object): describes the desired state of the system's storage devices.
      +  * **_disks_** (list of objects): the list of disks to be configured and their options.
      +    * **device** (string): the absolute path to the device. Devices are typically referenced by the `/dev/disk/by-*` symlinks.
      +    * **_wipeTable_** (boolean): whether or not the partition tables shall be wiped. When true, the partition tables are erased before any further manipulation. Otherwise, the existing entries are left intact.
      +    * **_partitions_** (list of objects): the list of partitions and their configuration for this particular disk.
      +      * **_label_** (string): the PARTLABEL for the partition.
      +      * **_number_** (integer): the partition number, which dictates it's position in the partition table (one-indexed). If zero, use the next available partition slot.
      +      * **_size_** (integer): the size of the partition (in sectors). If zero, the partition will fill the remainder of the disk.
      +      * **_start_** (integer): the start of the partition (in sectors). If zero, the partition will be positioned at the earliest available part of the disk.
      +      * **_typeGuid_** (string): the GPT [partition type GUID][part-types]. If omitted, the default will be 0FC63DAF-8483-4772-8E79-3D69D8477DE4 (Linux filesystem data).
      +  * **_raid_** (list of objects): the list of RAID arrays to be configured.
      +    * **name** (string): the name to use for the resulting md device.
      +    * **level** (string): the redundancy level of the array (e.g. linear, raid1, raid5, etc.).
      +    * **devices** (list of strings): the list of devices (referenced by their absolute path) in the array.
      +    * **_spares_** (integer): the number of spares (if applicable) in the array.
      +  * **_filesystems_** (list of objects): the list of filesystems to be configured and/or used in the "files" section. Either "mount" or "path" needs to be specified.
      +    * **_name_** (string): the identifier for the filesystem, internal to Ignition. This is only required if the filesystem needs to be referenced in the "files" section.
      +    * **_mount_** (object): contains the set of mount and formatting options for the filesystem. A non-null entry indicates that the filesystem should be mounted before it is used by Ignition.
      +      * **device** (string): the absolute path to the device. Devices are typically referenced by the `/dev/disk/by-*` symlinks.
      +      * **format** (string): the filesystem format (ext4, btrfs, or xfs).
      +      * **_create_** (object): contains the set of options to be used when creating the filesystem. A non-null entry indicates that the filesystem shall be created.
      +        * **_force_** (boolean): whether or not the create operation shall overwrite an existing filesystem.
      +        * **_options_** (list of strings): any additional options to be passed to the format-specific mkfs utility.
      +    * **_path_** (string): the mount-point of the filesystem. A non-null entry indicates that the filesystem has already been mounted by the system at the specified path. This is really only useful for "/sysroot".
      +  * **_files_** (list of objects): the list of files, rooted in this particular filesystem, to be written.
      +    * **filesystem** (string): the internal identifier of the filesystem. This matches the last filesystem with the given identifier.
      +    * **path** (string): the absolute path to the file.
      +    * **_contents_** (object): options related to the contents of the file.
      +      * **_compression_** (string): the type of compression used on the contents (null or gzip)
      +      * **_source_** (string): the URL of the file contents. Supported schemes are http and [data][rfc2397]. Note: When using http, it is advisable to use the verification option to ensure the contents haven't been modified.
      +      * **_verification_** (object): options related to the verification of the file contents.
      +        * **_hash_** (string): the hash of the config, in the form `<type>-<value>` where type is sha512.
      +    * **_mode_** (integer): the file's permission mode. Note that the mode must be properly specified as a **decimal** value (i.e. 0644 -> 420).
      +    * **_user_** (object): specifies the file's owner.
      +      * **_id_** (integer): the user ID of the owner.
      +    * **_group_** (object): specifies the group of the owner.
      +      * **_id_** (integer): the group ID of the owner.
      +* **_systemd_** (object): describes the desired state of the systemd units.
      +  * **_units_** (list of objects): the list of systemd units.
      +    * **name** (string): the name of the unit. This must be suffixed with a valid unit type (e.g. "thing.service").
      +    * **_enable_** (boolean): whether or not the service shall be enabled. When true, the service is enabled. In order for this to have any effect, the unit must have an install section.
      +    * **_mask_** (boolean): whether or not the service shall be masked. When true, the service is masked by symlinking it to `/dev/null`.
      +    * **_contents_** (string): the contents of the unit.
      +    * **_dropins_** (list of objects): the list of drop-ins for the unit.
      +      * **name** (string): the name of the drop-in. This must be suffixed with ".conf".
      +      * **_contents_** (string): the contents of the drop-in.
      +* **_networkd_** (object): describes the desired state of the networkd files.
      +  * **_units_** (list of objects): the list of networkd files.
      +    * **name** (string): the name of the file. This must be suffixed with a valid unit type (e.g. "00-eth0.network").
      +    * **_contents_** (string): the contents of the networkd file.
      +* **_passwd_** (object): describes the desired additions to the passwd database.
      +  * **_users_** (list of objects): the list of accounts to be added.
      +    * **name** (string): the username for the account.
      +    * **_passwordHash_** (string): the encrypted password for the account.
      +    * **_sshAuthorizedKeys_** (list of strings): a list of SSH keys to be added to the user's authorized_keys.
      +    * **_create_** (object): contains the set of options to be used when creating the user. A non-null entry indicates that the user account shall be created.
      +      * **_uid_** (integer): the user ID of the new account.
      +      * **_gecos_** (string): the GECOS field of the new account.
      +      * **_homeDir_** (string): the home directory of the new account.
      +      * **_noCreateHome_** (boolean): whether or not to create the user's home directory.
      +      * **_primaryGroup_** (string): the name or ID of the primary group of the new account.
      +      * **_groups_** (list of strings): the list of supplementary groups of the new account.
      +      * **_noUserGroup_** (boolean): whether or not to create a group with the same name as the user.
      +      * **_noLogInit_** (boolean): whether or not to add the user to the lastlog and faillog databases.
      +      * **_shell_** (string): the login shell of the new account.
      +  * **_groups_** (list of objects): the list of groups to be added.
      +    * **name** (string): the name of the group.
      +    * **_gid_** (integer): the group ID of the new group.
      +    * **_passwordHash_** (string): the encrypted password of the new group.
      +
      +[part-types]: http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs
      +[rfc2397]: https://tools.ietf.org/html/rfc2397
      diff --git a/vendor/github.com/coreos/ignition/doc/examples.md b/vendor/github.com/coreos/ignition/doc/examples.md
      new file mode 100644
      index 00000000..0bd13a6a
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/doc/examples.md
      @@ -0,0 +1,220 @@
      +# Example Configs
      +
      +Each of these examples is written in version 2.0.0 of the config. Ensure that any configuration is compatible with the version that Ignition accepts. Compatibility requires the major versions to match and the spec version be less than or equal to the version Ignition accepts.
      +
      +## Starting Services
      +
      +This config will write a single service unit (shown below) with the contents of an example service. This unit will be enabled as a dependency of multi-user.target and therefore start on boot.
      +
      +```json
      +{
      +  "ignition": { "version": "2.0.0" },
      +  "systemd": {
      +    "units": [{
      +      "name": "example.service",
      +      "enable": true,
      +      "contents": "[Service]\nType=oneshot\nExecStart=/usr/bin/echo Hello World\n\n[Install]\nWantedBy=multi-user.target"
      +    }]
      +  }
      +}
      +```
      +
      +### example.service
      +
      +```
      +[Service]
      +Type=oneshot
      +ExecStart=/usr/bin/echo Hello World
      +
      +[Install]
      +WantedBy=multi-user.target
      +```
      +
      +## Reformat the Root Filesystem
      +
      +This example Ignition configuration will locate the device with the "ROOT" filesystem label (the root filesystem) and reformat it to btrfs, recreating the filesystem label. The `force` option is set to ensure that `mkfs.btrfs` ignores any existing filesystem.
      +
      +### Btrfs
      +
      +```json
      +{
      +  "ignition": { "version": "2.0.0" },
      +  "storage": {
      +    "filesystems": [{
      +      "mount": {
      +        "device": "/dev/disk/by-label/ROOT",
      +        "format": "btrfs",
      +        "create": {
      +          "force": true,
      +          "options": [ "--label=ROOT" ]
      +        }
      +      }
      +    }]
      +  }
      +}
      +```
      +
      +### XFS
      +
      +```json
      +{
      +  "ignition": { "version": "2.0.0" },
      +  "storage": {
      +    "filesystems": [{
      +      "mount": {
      +        "device": "/dev/disk/by-label/ROOT",
      +        "format": "xfs",
      +        "create": {
      +          "force": true,
      +          "options": [ "-L", "ROOT" ]
      +        }
      +      }
      +    }]
      +  }
      +}
      +```
      +
      +The create options are forwarded to the underlying `mkfs.$format` utility. The respective `mkfs.$format` manual pages document the available options.
      +
      +## Create Files on the Root Filesystem
      +
      +In many cases it is useful to write files to the root filesystem. This example writes a single file to `/foo/bar` on the root filesystem. The contents of the file ("example file") are specified inline in the config using the [data URL scheme][rfc2397].
      +
      +```json
      +{
      +  "ignition": { "version": "2.0.0" },
      +  "storage": {
      +    "files": [{
      +      "filesystem": "root",
      +      "path": "/foo/bar",
      +      "contents": { "source": "data:,example%20file%0A" }
      +    }]
      +  }
      +}
      +```
      +
      +The config makes use of the universally-defined "root" filesystem. This filesystem is defined within Ignition itself and roughly looks like the following. The "root" filesystem allows additional configs to reference the root filesystem, regardless of its type (e.g. btrfs, tmpfs, ext4).
      +
      +```json
      +{
      +  "ignition": { "version": "2.0.0" },
      +  "storage": {
      +    "filesystems": [{
      +      "name": "root",
      +      "path": "/sysroot"
      +    }]
      +  }
      +}
      +```
      +
      +## Create Files from Remote Contents
      +
      +There are cases where it is desirable to write a file to disk, but with the contents of a remote resource. The following config demonstrates how to do this in addition to validating the contents of the file.
      +
      +```json
      +{
      +  "ignition": { "version": "2.0.0" },
      +  "storage": {
      +    "files": [{
      +      "filesystem": "root",
      +      "path": "/foo/bar",
      +      "contents": {
      +        "source": "http://example.com/asset",
      +        "verification": { "hash": "sha512-0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" }
      +      }
      +    }]
      +  }
      +}
      +```
      +
      +The SHA512 sum of the file can be determined using `sha512sum`.
      +
      +## Create a RAID-enabled Data Volume
      +
      +In many scenarios, it may be useful to have an external data volume. This config will set up a RAID0 ext4 volume, `data`, between two separate disks. It also writes a mount unit (shown below) which will automatically mount the volume to `/var/lib/data`.
      +
      +```json
      +{
      +  "ignition": { "version": "2.0.0" },
      +  "storage": {
      +    "disks": [
      +      {
      +        "device": "/dev/sdb",
      +        "wipeTable": true,
      +        "partitions": [{
      +          "label": "raid.1.1",
      +          "number": 1,
      +          "size": 20480,
      +          "start": 0
      +        }]
      +      },
      +      {
      +        "device": "/dev/sdc",
      +        "wipeTable": true,
      +        "partitions": [{
      +          "label": "raid.1.2",
      +          "number": 1,
      +          "size": 20480,
      +          "start": 0
      +        }]
      +      }
      +    ],
      +    "raid": [{
      +      "devices": [
      +        "/dev/disk/by-partlabel/raid.1.1",
      +        "/dev/disk/by-partlabel/raid.1.2"
      +      ],
      +      "level": "stripe",
      +      "name": "data"
      +    }],
      +    "filesystems": [{
      +      "mount": {
      +        "device": "/dev/md/data",
      +        "format": "ext4",
      +        "create": { "options": [ "-L", "DATA" ] }
      +      }
      +    }]
      +  },
      +  "systemd": {
      +    "units": [{
      +      "name": "var-lib-data.mount",
      +      "enable": true,
      +      "contents": "[Mount]\nWhat=/dev/md/data\nWhere=/var/lib/data\nType=ext4\n\n[Install]\nWantedBy=local-fs.target"
      +    }]
      +  }
      +}
      +```
      +
      +### var-lib-data.mount
      +
      +```
      +[Mount]
      +What=/dev/data
      +Where=/var/lib/data
      +Type=ext4
      +
      +[Install]
      +WantedBy=local-fs.target
      +```
      +
      +## Replacing the Config with a Remote Config
      +
      +In some cloud environments, there is a limit on the size of the config which may be provided to a machine. To work around this, Ignition allows configs to be replaced with the contents of an alternate, remote config. The following demonstrates this, using a SHA512 sum to verify the contents of the config.
      +
      +```json
      +{
      +  "ignition": {
      +    "version": "2.0.0",
      +    "config": {
      +      "replace": {
      +        "source": "http://example.com/config.json",
      +        "verification": { "hash": "sha512-0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" }
      +      }
      +    }
      +  }
      +}
      +```
      +
      +The SHA512 sum of the config can be determined using `sha512sum`.
      +
      +[rfc2397]: http://tools.ietf.org/html/rfc2397
      diff --git a/vendor/github.com/coreos/ignition/doc/getting-started.md b/vendor/github.com/coreos/ignition/doc/getting-started.md
      new file mode 100644
      index 00000000..74d9795a
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/doc/getting-started.md
      @@ -0,0 +1,39 @@
      +# Getting Started with Ignition
      +
      +*Ignition* is a low-level system configuration utility. The Ignition executable is part of the temporary initial root filesystem, the *initramfs*. When Ignition runs, it finds configuration data in a named location for a given environment, such as a file or URL, and applies it to the machine before `switch_root` is called to pivot to the machine's root filesystem.
      +
      +Ignition uses a JSON configuration file to represent the set of changes to be made. The format of this config is detailed [in the specification][configspec]. One of the most important parts of this config is the version number. This **must** match the version number accepted by Ignition. If the config version isn't accepted by Ignition, Ignition will fail to run and prevent the machine from booting. This can be seen by inspecting the console output of the failed instance. For more information, check out the [troubleshooting section][troubleshooting].
      +
      +## Providing a Config
      +
      +Ignition will choose where to look for configuration based on the underlying platform. A list of [supported platforms][platforms] and metadata sources is provided for reference.
      +
      +The configuration must be passed to Ignition through the designated data source. Please refer to Ignition [config examples][examples] to learn about writing config files. The provided configuration will be appended to the universal base configuration:
      +
      +```json
      +{
      +  "storage": {
      +    "filesystems": [{
      +      "name": "root",
      +      "path": "/sysroot"
      +    }]
      +  }
      +}
      +```
      +
      +## Troubleshooting
      +
      +The single most useful piece of information needed when troubleshooting is the log from Ignition. Ignition runs in multiple stages so it's easiest to filter by the syslog identifier: `ignition`. When using systemd, this can be accomplished with the following command:
      +
      +```
      +journalctl --identifier=ignition
      +```
      +
      +In the event that this doesn't yield any results, running as root may help. There are circumstances where the journal isn't owned by the systemd-journal group or the current user is not a part of that group.
      +
      +In the vast majority of cases, it will be immediately obvious why Ignition failed. If it's not, inspect the config that Ignition wrote into the log. This shows how Ignition interpreted the supplied configuration. The user-provided config may have a misspelled section or maybe an incorrect hierarchy.
      +
      +[configspec]: configuration.md
      +[examples]: https://github.com/coreos/docs/blob/master/ignition/examples.md
      +[platforms]: supported-platforms.md
      +[troubleshooting]: #troubleshooting
      diff --git a/vendor/github.com/coreos/ignition/doc/migrating-configs.md b/vendor/github.com/coreos/ignition/doc/migrating-configs.md
      new file mode 100644
      index 00000000..3c684a2a
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/doc/migrating-configs.md
      @@ -0,0 +1,148 @@
      +# Migrating Between Configuration Versions
      +
      +Occasionally, there are changes made to Ignition's configuration that break backward compatibility. While this is not a concern for running machines (since Ignition only runs one time during first boot), it is a concern for those who maintain configuration files. This document serves to detail each of the breaking changes and tries to provide some reasoning for the change. This does not cover all of the changes to the spec - just those that need to be considered when migrating from one version to the next.
      +
      +## From Version 1 to 2.0.0
      +
      +This section will cover the breaking changes made between versions 1 and 2.0.0 of the configuration specification.
      +
      +### Version
      +
      +One of the more notable changes was the representation of the config version, moving from an integer to a [Semantic Version][semver] string. Using a Semantic Version will allow the configuration specification to pick up additions and other backward-compatible changes in the future without necessarily requiring the user to update their config. The version number has also moved locations and is now in an Ignition metadata section named "ignition".
      +
      +The following shows the changes to the version section:
      +
      +```json
      +{
      +  "ignitionVersion": 1
      +}
      +```
      +
      +```json
      +{
      +  "ignition": {
      +    "version": "2.0.0"
      +  }
      +}
      +```
      +
      +### Files
      +
      +The `files` section was moved out from under `filesystems` and is now directly under the `storage` section. This was done in order to decouple file definitions from filesystem definitions. This is particularly useful when merging multiple configs together. One config may define a filesystem while another can write files to that filesystem without needing to know the specifics of that filesystem. A common example of this is referencing the universally-defined "root" filesystem which is defined by default inside of Ignition.
      +
      +The following shows this particular change to the files section:
      +
      +```json
      +{
      +  "storage": {
      +    "filesystems": [
      +      {
      +        "device": "/dev/sdb1",
      +        "format": "ext4",
      +        "files": [
      +          {
      +            "path": "/foo/bar"
      +          }
      +        ]
      +      }
      +    ]
      +  }
      +}
      +```
      +
      +```json
      +{
      +  "storage": {
      +    "filesystems": [
      +      {
      +        "name": "example",
      +        "device": "/dev/sdb1",
      +        "format": "ext4"
      +      }
      +    ],
      +    "files": [
      +      {
      +        "filesystem": "example",
      +        "path": "/foo/bar"
      +      }
      +    ]
      +  }
      +}
      +```
      +
      +#### Contents
      +
      +The `contents` section was changed from a simple string to an object. This allows extra properties to be added to file contents (e.g. compression type, content hashs). The source for the file contents has also changed from being inline in the config to a URL. This provides the ability to include the contents inline (via a [data URL][rfc2397]) or to reference a remote resource (via an http URL).
      +
      +The following shows the changes to the file contents (snipped for clarity):
      +
      +```json
      +...
      +
      +"files": [
      +  {
      +    "path": "/foo/bar",
      +    "contents": "example file\n"
      +  }
      +]
      +
      +...
      +```
      +
      +```json
      +...
      +
      +"files": [
      +  {
      +    "path": "/foo/bar",
      +    "contents": {
      +      "source": "data:,example%20file%0A"
      +    }
      +  }
      +]
      +
      +...
      +```
      +
      +#### User and Group
      +
      +The `uid` and `gid` sections have been moved into new `id` sections under new `user` and `group` sections. This will allow alternate methods of identifying a user or a group (e.g. by name) in the future.
      +
      +The following shows the changes to the file uid and gid:
      +
      +```json
      +...
      +
      +"files": [
      +  {
      +    "path": "/foo/bar",
      +    "uid": 500,
      +    "gid": 500
      +  }
      +]
      +
      +...
      +
      +```
      +
      +```json
      +...
      +
      +"files": [
      +  {
      +    "path": "/foo/bar",
      +    "user": {
      +      "id": 500
      +    },
      +    "group": {
      +      "id": 500
      +    }
      +  }
      +]
      +
      +...
      +
      +```
      +
      +[semver]: http://semver.org
      +[rfc2397]: https://tools.ietf.org/html/rfc2397
      diff --git a/vendor/github.com/coreos/ignition/doc/supported-platforms.md b/vendor/github.com/coreos/ignition/doc/supported-platforms.md
      new file mode 100644
      index 00000000..c8473112
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/doc/supported-platforms.md
      @@ -0,0 +1,17 @@
      +# Supported Platforms #
      +
      +Ignition is currently only supported for the following platforms:
      +
      +* [Bare Metal] - Use the `coreos.config.url` kernel parameter to provide a URL to the configuration. The URL can use the `http://` scheme to specify a remote config or the `oem://` scheme to specify a local config, rooted in `/usr/share/oem`.
      +* [PXE] - Use the `coreos.config.url` and `coreos.first_boot=1` (**in case of the very first PXE boot only**) kernel parameters to provide a URL to the configuration. The URL can use the `http://` scheme to specify a remote config or the `oem://` scheme to specify a local config, rooted in `/usr/share/oem`.
      +* [Amazon EC2] - Ignition will read its configuration from the userdata and append the SSH keys listed in the instance metadata.
      +* [Microsoft Azure] - Ignition will read its configuration from the custom data provided to the instance. SSH keys are handled by the Azure Linux Agent.
      +* [VMware] - Use the VMware Guestinfo variables `coreos.config.data` and `coreos.config.data.encoding` to provide the config and its encoding to the virtual machine. Valid encodings are "", "base64", and "gzip+base64".
      +
      +Ignition is under active development so expect this list to expand in the coming months.
      +
      +[Bare Metal]: https://github.com/coreos/docs/blob/master/os/installing-to-disk.md
      +[PXE]: https://github.com/coreos/docs/blob/master/os/booting-with-pxe.md
      +[Amazon EC2]: https://github.com/coreos/docs/blob/master/os/booting-on-ec2.md
      +[Microsoft Azure]: https://github.com/coreos/docs/blob/master/os/booting-on-azure.md
      +[VMware]: https://github.com/coreos/docs/blob/master/os/booting-on-vmware.md
      diff --git a/vendor/github.com/coreos/ignition/internal/exec/engine.go b/vendor/github.com/coreos/ignition/internal/exec/engine.go
      new file mode 100644
      index 00000000..fcc277b3
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/exec/engine.go
      @@ -0,0 +1,184 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package exec
      +
      +import (
      +	"encoding/json"
      +	"errors"
      +	"io/ioutil"
      +	"net/http"
      +	"time"
      +
      +	"github.com/coreos/ignition/config"
      +	"github.com/coreos/ignition/config/types"
      +	"github.com/coreos/ignition/internal/exec/stages"
      +	"github.com/coreos/ignition/internal/log"
      +	"github.com/coreos/ignition/internal/providers"
      +	putil "github.com/coreos/ignition/internal/providers/util"
      +	"github.com/coreos/ignition/internal/util"
      +)
      +
      +const (
      +	DefaultOnlineTimeout = time.Minute
      +)
      +
      +var (
      +	ErrSchemeUnsupported = errors.New("unsupported url scheme")
      +	ErrNetworkFailure    = errors.New("network failure")
      +)
      +
      +var (
      +	baseConfig = types.Config{
      +		Ignition: types.Ignition{Version: types.IgnitionVersion(types.MaxVersion)},
      +		Storage: types.Storage{
      +			Filesystems: []types.Filesystem{{
      +				Name: "root",
      +				Path: "/sysroot",
      +			}},
      +		},
      +	}
      +)
      +
      +// Engine represents the entity that fetches and executes a configuration.
      +type Engine struct {
      +	ConfigCache   string
      +	OnlineTimeout time.Duration
      +	Logger        *log.Logger
      +	Root          string
      +	Provider      providers.Provider
      +	OemConfig     types.Config
      +}
      +
      +// Run executes the stage of the given name. It returns true if the stage
      +// successfully ran and false if there were any errors.
      +func (e Engine) Run(stageName string) bool {
      +	cfg, err := e.acquireConfig()
      +	switch err {
      +	case config.ErrEmpty, nil:
      +		e.Logger.PushPrefix(stageName)
      +		defer e.Logger.PopPrefix()
      +		return stages.Get(stageName).Create(e.Logger, e.Root).Run(config.Append(config.Append(baseConfig, e.OemConfig), cfg))
      +	case config.ErrCloudConfig, config.ErrScript:
      +		e.Logger.Info("%v: ignoring and exiting...", err)
      +		return true
      +	default:
      +		e.Logger.Crit("failed to acquire config: %v", err)
      +		return false
      +	}
      +}
      +
      +// acquireConfig returns the configuration, first checking a local cache
      +// before attempting to fetch it from the provider.
      +func (e Engine) acquireConfig() (cfg types.Config, err error) {
      +	// First try read the config @ e.ConfigCache.
      +	b, err := ioutil.ReadFile(e.ConfigCache)
      +	if err == nil {
      +		if err = json.Unmarshal(b, &cfg); err != nil {
      +			e.Logger.Crit("failed to parse cached config: %v", err)
      +		}
      +		return
      +	}
      +
      +	// (Re)Fetch the config if the cache is unreadable.
      +	cfg, err = e.fetchProviderConfig()
      +	if err != nil {
      +		e.Logger.Crit("failed to fetch config: %s", err)
      +		return
      +	}
      +	e.Logger.Debug("fetched config: %+v", cfg)
      +
      +	// Populate the config cache.
      +	b, err = json.Marshal(cfg)
      +	if err != nil {
      +		e.Logger.Crit("failed to marshal cached config: %v", err)
      +		return
      +	}
      +	if err = ioutil.WriteFile(e.ConfigCache, b, 0640); err != nil {
      +		e.Logger.Crit("failed to write cached config: %v", err)
      +		return
      +	}
      +
      +	return
      +}
      +
      +// fetchProviderConfig returns the configuration from the engine's provider
      +// returning an error if the provider is unavailable. This will also render the
      +// config (see renderConfig) before returning.
      +func (e Engine) fetchProviderConfig() (types.Config, error) {
      +	if err := putil.WaitUntilOnline(e.Provider, e.OnlineTimeout); err != nil {
      +		return types.Config{}, err
      +	}
      +
      +	cfg, err := e.Provider.FetchConfig()
      +	switch err {
      +	case config.ErrDeprecated:
      +		e.Logger.Warning("%v: the provided config format is deprecated and will not be supported in the future", err)
      +		fallthrough
      +	case nil:
      +		return e.renderConfig(cfg)
      +	default:
      +		return types.Config{}, err
      +	}
      +}
      +
      +// renderConfig evaluates "ignition.config.replace" and "ignition.config.append"
      +// in the given config and returns the result. If "ignition.config.replace" is
      +// set, the referenced and evaluted config will be returned. Otherwise, if
      +// "ignition.config.append" is set, each of the referenced configs will be
      +// evaluated and appended to the provided config. If neither option is set, the
      +// provided config will be returned unmodified.
      +func (e Engine) renderConfig(cfg types.Config) (types.Config, error) {
      +	if cfgRef := cfg.Ignition.Config.Replace; cfgRef != nil {
      +		return e.fetchReferencedConfig(*cfgRef)
      +	}
      +
      +	appendedCfg := cfg
      +	for _, cfgRef := range cfg.Ignition.Config.Append {
      +		newCfg, err := e.fetchReferencedConfig(cfgRef)
      +		if err != nil {
      +			return newCfg, err
      +		}
      +
      +		appendedCfg = config.Append(appendedCfg, newCfg)
      +	}
      +	return appendedCfg, nil
      +}
      +
      +// fetchReferencedConfig fetches, renders, and attempts to verify the requested
      +// config.
      +func (e Engine) fetchReferencedConfig(cfgRef types.ConfigReference) (types.Config, error) {
      +	var rawCfg []byte
      +	switch cfgRef.Source.Scheme {
      +	case "http":
      +		rawCfg = util.NewHttpClient(e.Logger).
      +			FetchConfig(cfgRef.Source.String(), http.StatusOK, http.StatusNoContent)
      +		if rawCfg == nil {
      +			return types.Config{}, ErrNetworkFailure
      +		}
      +	default:
      +		return types.Config{}, ErrSchemeUnsupported
      +	}
      +
      +	if err := util.AssertValid(cfgRef.Verification, rawCfg); err != nil {
      +		return types.Config{}, err
      +	}
      +
      +	cfg, err := config.Parse(rawCfg)
      +	if err != nil {
      +		return types.Config{}, err
      +	}
      +
      +	return e.renderConfig(cfg)
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/exec/engine_test.go b/vendor/github.com/coreos/ignition/internal/exec/engine_test.go
      new file mode 100644
      index 00000000..ed329002
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/exec/engine_test.go
      @@ -0,0 +1,98 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package exec
      +
      +import (
      +	"errors"
      +	"reflect"
      +	"testing"
      +	"time"
      +
      +	"github.com/coreos/ignition/config/types"
      +	"github.com/coreos/ignition/internal/providers"
      +)
      +
      +type mockProvider struct {
      +	config  types.Config
      +	err     error
      +	online  bool
      +	retry   bool
      +	backoff time.Duration
      +}
      +
      +func (p mockProvider) FetchConfig() (types.Config, error) { return p.config, p.err }
      +func (p mockProvider) IsOnline() bool                     { return p.online }
      +func (p mockProvider) ShouldRetry() bool                  { return p.retry }
      +func (p mockProvider) BackoffDuration() time.Duration     { return p.backoff }
      +
      +// TODO
      +func TestRun(t *testing.T) {
      +}
      +
      +func TestFetchConfigs(t *testing.T) {
      +	type in struct {
      +		provider mockProvider
      +		timeout  time.Duration
      +	}
      +	type out struct {
      +		config types.Config
      +		err    error
      +	}
      +
      +	online := mockProvider{
      +		online: true,
      +		config: types.Config{
      +			Systemd: types.Systemd{
      +				Units: []types.SystemdUnit{},
      +			},
      +		},
      +	}
      +	error := mockProvider{
      +		online: true,
      +		err:    errors.New("test error"),
      +	}
      +	offline := mockProvider{online: false}
      +
      +	tests := []struct {
      +		in  in
      +		out out
      +	}{
      +		{
      +			in:  in{provider: online, timeout: time.Second},
      +			out: out{config: online.config},
      +		},
      +		{
      +			in:  in{provider: error, timeout: time.Second},
      +			out: out{config: types.Config{}, err: error.err},
      +		},
      +		{
      +			in:  in{provider: offline, timeout: time.Second},
      +			out: out{config: types.Config{}, err: providers.ErrNoProvider},
      +		},
      +	}
      +
      +	for i, test := range tests {
      +		config, err := Engine{
      +			Provider:      test.in.provider,
      +			OnlineTimeout: test.in.timeout,
      +		}.fetchProviderConfig()
      +		if !reflect.DeepEqual(test.out.config, config) {
      +			t.Errorf("#%d: bad provider: want %+v, got %+v", i, test.out.config, config)
      +		}
      +		if test.out.err != err {
      +			t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/exec/stages/disks/disks.go b/vendor/github.com/coreos/ignition/internal/exec/stages/disks/disks.go
      new file mode 100644
      index 00000000..0b359317
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/exec/stages/disks/disks.go
      @@ -0,0 +1,260 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// The storage stage is responsible for partitioning disks, creating RAID
      +// arrays, formatting partitions, writing files, writing systemd units, and
      +// writing network units.
      +
      +package disks
      +
      +import (
      +	"fmt"
      +	"os/exec"
      +
      +	"github.com/coreos/ignition/config/types"
      +	"github.com/coreos/ignition/internal/exec/stages"
      +	"github.com/coreos/ignition/internal/exec/util"
      +	"github.com/coreos/ignition/internal/log"
      +	"github.com/coreos/ignition/internal/sgdisk"
      +	"github.com/coreos/ignition/internal/systemd"
      +)
      +
      +const (
      +	name = "disks"
      +)
      +
      +func init() {
      +	stages.Register(creator{})
      +}
      +
      +type creator struct{}
      +
      +func (creator) Create(logger *log.Logger, root string) stages.Stage {
      +	return &stage{util.Util{
      +		DestDir: root,
      +		Logger:  logger,
      +	}}
      +}
      +
      +func (creator) Name() string {
      +	return name
      +}
      +
      +type stage struct {
      +	util.Util
      +}
      +
      +func (stage) Name() string {
      +	return name
      +}
      +
      +func (s stage) Run(config types.Config) bool {
      +	if err := s.createPartitions(config); err != nil {
      +		s.Logger.Crit("create partitions failed: %v", err)
      +		return false
      +	}
      +
      +	if err := s.createRaids(config); err != nil {
      +		s.Logger.Crit("failed to create raids: %v", err)
      +		return false
      +	}
      +
      +	if err := s.createFilesystems(config); err != nil {
      +		s.Logger.Crit("failed to create filesystems: %v", err)
      +		return false
      +	}
      +
      +	return true
      +}
      +
      +// waitOnDevices waits for the devices enumerated in devs as a logged operation
      +// using ctxt for the logging and systemd unit identity.
      +func (s stage) waitOnDevices(devs []string, ctxt string) error {
      +	if err := s.LogOp(
      +		func() error { return systemd.WaitOnDevices(devs, ctxt) },
      +		"waiting for devices %v", devs,
      +	); err != nil {
      +		return fmt.Errorf("failed to wait on %s devs: %v", ctxt, err)
      +	}
      +	return nil
      +}
      +
      +// createPartitions creates the partitions described in config.Storage.Disks.
      +func (s stage) createPartitions(config types.Config) error {
      +	if len(config.Storage.Disks) == 0 {
      +		return nil
      +	}
      +	s.Logger.PushPrefix("createPartitions")
      +	defer s.Logger.PopPrefix()
      +
      +	devs := []string{}
      +	for _, disk := range config.Storage.Disks {
      +		devs = append(devs, string(disk.Device))
      +	}
      +
      +	if err := s.waitOnDevices(devs, "disks"); err != nil {
      +		return err
      +	}
      +
      +	for _, dev := range config.Storage.Disks {
      +		err := s.Logger.LogOp(func() error {
      +			op := sgdisk.Begin(s.Logger, string(dev.Device))
      +			if dev.WipeTable {
      +				s.Logger.Info("wiping partition table requested on %q", dev.Device)
      +				op.WipeTable(true)
      +			}
      +
      +			for _, part := range dev.Partitions {
      +				op.CreatePartition(sgdisk.Partition{
      +					Number:   part.Number,
      +					Length:   uint64(part.Size),
      +					Offset:   uint64(part.Start),
      +					Label:    string(part.Label),
      +					TypeGUID: string(part.TypeGUID),
      +				})
      +			}
      +
      +			if err := op.Commit(); err != nil {
      +				return fmt.Errorf("commit failure: %v", err)
      +			}
      +			return nil
      +		}, "partitioning %q", dev.Device)
      +		if err != nil {
      +			return err
      +		}
      +	}
      +
      +	return nil
      +}
      +
      +// createRaids creates the raid arrays described in config.Storage.Arrays.
      +func (s stage) createRaids(config types.Config) error {
      +	if len(config.Storage.Arrays) == 0 {
      +		return nil
      +	}
      +	s.Logger.PushPrefix("createRaids")
      +	defer s.Logger.PopPrefix()
      +
      +	devs := []string{}
      +	for _, array := range config.Storage.Arrays {
      +		for _, dev := range array.Devices {
      +			devs = append(devs, string(dev))
      +		}
      +	}
      +
      +	if err := s.waitOnDevices(devs, "raids"); err != nil {
      +		return err
      +	}
      +
      +	for _, md := range config.Storage.Arrays {
      +		// FIXME(vc): this is utterly flummoxed by a preexisting md.Name, the magic of device-resident md metadata really interferes with us.
      +		// It's as if what ignition really needs is to turn off automagic md probing/running before getting started.
      +		args := []string{
      +			"--create", md.Name,
      +			"--force",
      +			"--run",
      +			"--level", md.Level,
      +			"--raid-devices", fmt.Sprintf("%d", len(md.Devices)-md.Spares),
      +		}
      +
      +		if md.Spares > 0 {
      +			args = append(args, "--spare-devices", fmt.Sprintf("%d", md.Spares))
      +		}
      +
      +		for _, dev := range md.Devices {
      +			args = append(args, string(dev))
      +		}
      +
      +		if err := s.Logger.LogCmd(
      +			exec.Command("/sbin/mdadm", args...),
      +			"creating %q", md.Name,
      +		); err != nil {
      +			return fmt.Errorf("mdadm failed: %v", err)
      +		}
      +	}
      +
      +	return nil
      +}
      +
      +// createFilesystems creates the filesystems described in config.Storage.Filesystems.
      +func (s stage) createFilesystems(config types.Config) error {
      +	fss := make([]types.FilesystemMount, 0, len(config.Storage.Filesystems))
      +	for _, fs := range config.Storage.Filesystems {
      +		if fs.Mount != nil {
      +			fss = append(fss, *fs.Mount)
      +		}
      +	}
      +
      +	if len(fss) == 0 {
      +		return nil
      +	}
      +	s.Logger.PushPrefix("createFilesystems")
      +	defer s.Logger.PopPrefix()
      +
      +	devs := []string{}
      +	for _, fs := range fss {
      +		devs = append(devs, string(fs.Device))
      +	}
      +
      +	if err := s.waitOnDevices(devs, "filesystems"); err != nil {
      +		return err
      +	}
      +
      +	for _, fs := range fss {
      +		if err := s.createFilesystem(fs); err != nil {
      +			return err
      +		}
      +	}
      +
      +	return nil
      +}
      +
      +func (s stage) createFilesystem(fs types.FilesystemMount) error {
      +	if fs.Create == nil {
      +		return nil
      +	}
      +
      +	mkfs := ""
      +	args := []string(fs.Create.Options)
      +	switch fs.Format {
      +	case "btrfs":
      +		mkfs = "/sbin/mkfs.btrfs"
      +		if fs.Create.Force {
      +			args = append(args, "--force")
      +		}
      +	case "ext4":
      +		mkfs = "/sbin/mkfs.ext4"
      +		if fs.Create.Force {
      +			args = append(args, "-F")
      +		}
      +	case "xfs":
      +		mkfs = "/sbin/mkfs.xfs"
      +		if fs.Create.Force {
      +			args = append(args, "-f")
      +		}
      +	default:
      +		return fmt.Errorf("unsupported filesystem format: %q", fs.Format)
      +	}
      +
      +	args = append(args, string(fs.Device))
      +	if err := s.Logger.LogCmd(
      +		exec.Command(mkfs, args...),
      +		"creating %q filesystem on %q",
      +		fs.Format, string(fs.Device),
      +	); err != nil {
      +		return fmt.Errorf("failed to run %q: %v %v", mkfs, err, args)
      +	}
      +
      +	return nil
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/exec/stages/files/files.go b/vendor/github.com/coreos/ignition/internal/exec/stages/files/files.go
      new file mode 100644
      index 00000000..faa69e97
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/exec/stages/files/files.go
      @@ -0,0 +1,324 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package files
      +
      +import (
      +	"errors"
      +	"fmt"
      +	"io/ioutil"
      +	"os"
      +	"syscall"
      +
      +	"github.com/coreos/ignition/config/types"
      +	"github.com/coreos/ignition/internal/exec/stages"
      +	"github.com/coreos/ignition/internal/exec/util"
      +	"github.com/coreos/ignition/internal/log"
      +)
      +
      +const (
      +	name = "files"
      +)
      +
      +var (
      +	ErrFilesystemUndefined = errors.New("the referenced filesystem was not defined")
      +)
      +
      +func init() {
      +	stages.Register(creator{})
      +}
      +
      +type creator struct{}
      +
      +func (creator) Create(logger *log.Logger, root string) stages.Stage {
      +	return &stage{util.Util{
      +		DestDir: root,
      +		Logger:  logger,
      +	}}
      +}
      +
      +func (creator) Name() string {
      +	return name
      +}
      +
      +type stage struct {
      +	util.Util
      +}
      +
      +func (stage) Name() string {
      +	return name
      +}
      +
      +func (s stage) Run(config types.Config) bool {
      +	if err := s.createPasswd(config); err != nil {
      +		s.Logger.Crit("failed to create users/groups: %v", err)
      +		return false
      +	}
      +
      +	if err := s.createUnits(config); err != nil {
      +		s.Logger.Crit("failed to create units: %v", err)
      +		return false
      +	}
      +
      +	if err := s.createFilesystemsFiles(config); err != nil {
      +		s.Logger.Crit("failed to create files: %v", err)
      +		return false
      +	}
      +
      +	return true
      +}
      +
      +// createFilesystemsFiles creates the files described in config.Storage.Filesystems.
      +func (s stage) createFilesystemsFiles(config types.Config) error {
      +	if len(config.Storage.Filesystems) == 0 {
      +		return nil
      +	}
      +	s.Logger.PushPrefix("createFilesystemsFiles")
      +	defer s.Logger.PopPrefix()
      +
      +	fileMap, err := s.mapFilesToFilesystems(config)
      +	if err != nil {
      +		return err
      +	}
      +
      +	for fs, f := range fileMap {
      +		if err := s.createFiles(fs, f); err != nil {
      +			return fmt.Errorf("failed to create files: %v", err)
      +		}
      +	}
      +
      +	return nil
      +}
      +
      +// mapFilesToFilesystems builds a map of filesystems to files. If multiple
      +// definitions of the same filesystem are present, only the final definition is
      +// used.
      +func (s stage) mapFilesToFilesystems(config types.Config) (map[types.Filesystem][]types.File, error) {
      +	files := map[string][]types.File{}
      +	for _, f := range config.Storage.Files {
      +		files[f.Filesystem] = append(files[f.Filesystem], f)
      +	}
      +
      +	filesystems := map[string]types.Filesystem{}
      +	for _, fs := range config.Storage.Filesystems {
      +		filesystems[fs.Name] = fs
      +	}
      +
      +	fileMap := map[types.Filesystem][]types.File{}
      +	for fsn, f := range files {
      +		if fs, ok := filesystems[fsn]; ok {
      +			fileMap[fs] = append(fileMap[fs], f...)
      +		} else {
      +			s.Logger.Crit("the filesystem (%q), was not defined", fsn)
      +			return nil, ErrFilesystemUndefined
      +		}
      +	}
      +
      +	return fileMap, nil
      +}
      +
      +// createFiles creates any files listed for the filesystem in fs.Files.
      +func (s stage) createFiles(fs types.Filesystem, files []types.File) error {
      +	s.Logger.PushPrefix("createFiles")
      +	defer s.Logger.PopPrefix()
      +
      +	mnt := string(fs.Path)
      +	if len(mnt) == 0 {
      +		var err error
      +		mnt, err = ioutil.TempDir("", "ignition-files")
      +		if err != nil {
      +			return fmt.Errorf("failed to create temp directory: %v", err)
      +		}
      +		defer os.Remove(mnt)
      +
      +		dev := string(fs.Mount.Device)
      +		format := string(fs.Mount.Format)
      +
      +		if err := s.Logger.LogOp(
      +			func() error { return syscall.Mount(dev, mnt, format, 0, "") },
      +			"mounting %q at %q", dev, mnt,
      +		); err != nil {
      +			return fmt.Errorf("failed to mount device %q at %q: %v", dev, mnt, err)
      +		}
      +		defer s.Logger.LogOp(
      +			func() error { return syscall.Unmount(mnt, 0) },
      +			"unmounting %q at %q", dev, mnt,
      +		)
      +	}
      +
      +	u := util.Util{
      +		Logger:  s.Logger,
      +		DestDir: mnt,
      +	}
      +	for _, f := range files {
      +		file := util.RenderFile(s.Logger, f)
      +		if file == nil {
      +			return fmt.Errorf("failed to resolve file %q", f.Path)
      +		}
      +
      +		if err := s.Logger.LogOp(
      +			func() error { return u.WriteFile(file) },
      +			"writing file %q", string(f.Path),
      +		); err != nil {
      +			return fmt.Errorf("failed to create file %q: %v", file.Path, err)
      +		}
      +	}
      +
      +	return nil
      +}
      +
      +// createUnits creates the units listed under systemd.units and networkd.units.
      +func (s stage) createUnits(config types.Config) error {
      +	for _, unit := range config.Systemd.Units {
      +		if err := s.writeSystemdUnit(unit); err != nil {
      +			return err
      +		}
      +		if unit.Enable {
      +			if err := s.Logger.LogOp(
      +				func() error { return s.EnableUnit(unit) },
      +				"enabling unit %q", unit.Name,
      +			); err != nil {
      +				return err
      +			}
      +		}
      +		if unit.Mask {
      +			if err := s.Logger.LogOp(
      +				func() error { return s.MaskUnit(unit) },
      +				"masking unit %q", unit.Name,
      +			); err != nil {
      +				return err
      +			}
      +		}
      +	}
      +	for _, unit := range config.Networkd.Units {
      +		if err := s.writeNetworkdUnit(unit); err != nil {
      +			return err
      +		}
      +	}
      +	return nil
      +}
      +
      +// writeSystemdUnit creates the specified unit and any dropins for that unit.
      +// If the contents of the unit or are empty, the unit is not created. The same
      +// applies to the unit's dropins.
      +func (s stage) writeSystemdUnit(unit types.SystemdUnit) error {
      +	return s.Logger.LogOp(func() error {
      +		for _, dropin := range unit.DropIns {
      +			if dropin.Contents == "" {
      +				continue
      +			}
      +
      +			f := util.FileFromUnitDropin(unit, dropin)
      +			if err := s.Logger.LogOp(
      +				func() error { return s.WriteFile(f) },
      +				"writing dropin %q at %q", dropin.Name, f.Path,
      +			); err != nil {
      +				return err
      +			}
      +		}
      +
      +		if unit.Contents == "" {
      +			return nil
      +		}
      +
      +		f := util.FileFromSystemdUnit(unit)
      +		if err := s.Logger.LogOp(
      +			func() error { return s.WriteFile(f) },
      +			"writing unit %q at %q", unit.Name, f.Path,
      +		); err != nil {
      +			return err
      +		}
      +
      +		return nil
      +	}, "writing unit %q", unit.Name)
      +}
      +
      +// writeNetworkdUnit creates the specified unit. If the contents of the unit or
      +// are empty, the unit is not created.
      +func (s stage) writeNetworkdUnit(unit types.NetworkdUnit) error {
      +	return s.Logger.LogOp(func() error {
      +		if unit.Contents == "" {
      +			return nil
      +		}
      +
      +		f := util.FileFromNetworkdUnit(unit)
      +		if err := s.Logger.LogOp(
      +			func() error { return s.WriteFile(f) },
      +			"writing unit %q at %q", unit.Name, f.Path,
      +		); err != nil {
      +			return err
      +		}
      +
      +		return nil
      +	}, "writing unit %q", unit.Name)
      +}
      +
      +// createPasswd creates the users and groups as described in config.Passwd.
      +func (s stage) createPasswd(config types.Config) error {
      +	if err := s.createGroups(config); err != nil {
      +		return fmt.Errorf("failed to create groups: %v", err)
      +	}
      +
      +	if err := s.createUsers(config); err != nil {
      +		return fmt.Errorf("failed to create users: %v", err)
      +	}
      +
      +	return nil
      +}
      +
      +// createUsers creates the users as described in config.Passwd.Users.
      +func (s stage) createUsers(config types.Config) error {
      +	if len(config.Passwd.Users) == 0 {
      +		return nil
      +	}
      +	s.Logger.PushPrefix("createUsers")
      +	defer s.Logger.PopPrefix()
      +
      +	for _, u := range config.Passwd.Users {
      +		if err := s.CreateUser(u); err != nil {
      +			return fmt.Errorf("failed to create user %q: %v",
      +				u.Name, err)
      +		}
      +
      +		if err := s.SetPasswordHash(u); err != nil {
      +			return fmt.Errorf("failed to set password for %q: %v",
      +				u.Name, err)
      +		}
      +
      +		if err := s.AuthorizeSSHKeys(u); err != nil {
      +			return fmt.Errorf("failed to add keys to user %q: %v",
      +				u.Name, err)
      +		}
      +	}
      +
      +	return nil
      +}
      +
      +// createGroups creates the users as described in config.Passwd.Groups.
      +func (s stage) createGroups(config types.Config) error {
      +	if len(config.Passwd.Groups) == 0 {
      +		return nil
      +	}
      +	s.Logger.PushPrefix("createGroups")
      +	defer s.Logger.PopPrefix()
      +
      +	for _, g := range config.Passwd.Groups {
      +		if err := s.CreateGroup(g); err != nil {
      +			return fmt.Errorf("failed to create group %q: %v",
      +				g.Name, err)
      +		}
      +	}
      +
      +	return nil
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/exec/stages/files/files_test.go b/vendor/github.com/coreos/ignition/internal/exec/stages/files/files_test.go
      new file mode 100644
      index 00000000..1a269717
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/exec/stages/files/files_test.go
      @@ -0,0 +1,85 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package files
      +
      +import (
      +	"reflect"
      +	"testing"
      +
      +	"github.com/coreos/ignition/config/types"
      +	"github.com/coreos/ignition/internal/exec/util"
      +	"github.com/coreos/ignition/internal/log"
      +)
      +
      +func TestMapFilesToFilesystems(t *testing.T) {
      +	type in struct {
      +		config types.Config
      +	}
      +	type out struct {
      +		files map[types.Filesystem][]types.File
      +		err   error
      +	}
      +
      +	tests := []struct {
      +		in  in
      +		out out
      +	}{
      +		{
      +			in:  in{config: types.Config{}},
      +			out: out{files: map[types.Filesystem][]types.File{}},
      +		},
      +		{
      +			in:  in{config: types.Config{Storage: types.Storage{Files: []types.File{{Filesystem: "foo"}}}}},
      +			out: out{err: ErrFilesystemUndefined},
      +		},
      +		{
      +			in: in{config: types.Config{Storage: types.Storage{
      +				Filesystems: []types.Filesystem{{Name: "fs1"}},
      +				Files:       []types.File{{Filesystem: "fs1", Path: "/foo"}, {Filesystem: "fs1", Path: "/bar"}},
      +			}}},
      +			out: out{files: map[types.Filesystem][]types.File{types.Filesystem{Name: "fs1"}: {{Filesystem: "fs1", Path: "/foo"}, {Filesystem: "fs1", Path: "/bar"}}}},
      +		},
      +		{
      +			in: in{config: types.Config{Storage: types.Storage{
      +				Filesystems: []types.Filesystem{{Name: "fs1", Path: "/fs1"}, {Name: "fs2", Path: "/fs2"}},
      +				Files:       []types.File{{Filesystem: "fs1", Path: "/foo"}, {Filesystem: "fs2", Path: "/bar"}},
      +			}}},
      +			out: out{files: map[types.Filesystem][]types.File{
      +				types.Filesystem{Name: "fs1", Path: "/fs1"}: {{Filesystem: "fs1", Path: "/foo"}},
      +				types.Filesystem{Name: "fs2", Path: "/fs2"}: {{Filesystem: "fs2", Path: "/bar"}},
      +			}},
      +		},
      +		{
      +			in: in{config: types.Config{Storage: types.Storage{
      +				Filesystems: []types.Filesystem{{Name: "fs1"}, {Name: "fs1", Path: "/fs1"}},
      +				Files:       []types.File{{Filesystem: "fs1", Path: "/foo"}, {Filesystem: "fs1", Path: "/bar"}},
      +			}}},
      +			out: out{files: map[types.Filesystem][]types.File{
      +				types.Filesystem{Name: "fs1", Path: "/fs1"}: {{Filesystem: "fs1", Path: "/foo"}, {Filesystem: "fs1", Path: "/bar"}},
      +			}},
      +		},
      +	}
      +
      +	for i, test := range tests {
      +		logger := log.New()
      +		files, err := stage{Util: util.Util{Logger: &logger}}.mapFilesToFilesystems(test.in.config)
      +		if !reflect.DeepEqual(test.out.err, err) {
      +			t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
      +		}
      +		if !reflect.DeepEqual(test.out.files, files) {
      +			t.Errorf("#%d: bad map: want %#v, got %#v", i, test.out.files, files)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/exec/stages/name.go b/vendor/github.com/coreos/ignition/internal/exec/stages/name.go
      new file mode 100644
      index 00000000..9e6a93ff
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/exec/stages/name.go
      @@ -0,0 +1,37 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package stages
      +
      +import (
      +	"fmt"
      +)
      +
      +// Name is used to identify a StageCreator (which instantiates the stage of
      +// the same name) from the command line. It must be in the set of registered
      +// stages.
      +type Name string
      +
      +func (s Name) String() string {
      +	return string(s)
      +}
      +
      +func (s *Name) Set(val string) error {
      +	if stage := Get(val); stage == nil {
      +		return fmt.Errorf("%s is not a valid stage", val)
      +	}
      +
      +	*s = Name(val)
      +	return nil
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/exec/stages/stages.go b/vendor/github.com/coreos/ignition/internal/exec/stages/stages.go
      new file mode 100644
      index 00000000..a5639728
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/exec/stages/stages.go
      @@ -0,0 +1,51 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package stages
      +
      +import (
      +	"github.com/coreos/ignition/config/types"
      +	"github.com/coreos/ignition/internal/log"
      +	"github.com/coreos/ignition/internal/registry"
      +)
      +
      +// Stage is responsible for actually executing a stage of the configuration.
      +type Stage interface {
      +	Run(config types.Config) bool
      +	Name() string
      +}
      +
      +// StageCreator is responsible for instantiating a particular stage given a
      +// logger and root path under the root partition.
      +type StageCreator interface {
      +	Create(logger *log.Logger, root string) Stage
      +	Name() string
      +}
      +
      +var stages = registry.Create("stages")
      +
      +func Register(stage StageCreator) {
      +	stages.Register(stage)
      +}
      +
      +func Get(name string) StageCreator {
      +	if s, ok := stages.Get(name).(StageCreator); ok {
      +		return s
      +	}
      +	return nil
      +}
      +
      +func Names() (names []string) {
      +	return stages.Names()
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/file.go b/vendor/github.com/coreos/ignition/internal/exec/util/file.go
      new file mode 100644
      index 00000000..7c591db8
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/exec/util/file.go
      @@ -0,0 +1,180 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package util
      +
      +import (
      +	"bytes"
      +	"compress/gzip"
      +	"errors"
      +	"io/ioutil"
      +	"net/http"
      +	"os"
      +	"path/filepath"
      +
      +	"github.com/coreos/ignition/config/types"
      +	"github.com/coreos/ignition/internal/log"
      +	"github.com/coreos/ignition/internal/util"
      +
      +	"github.com/vincent-petithory/dataurl"
      +)
      +
      +const (
      +	DefaultDirectoryPermissions os.FileMode = 0755
      +	DefaultFilePermissions      os.FileMode = 0644
      +)
      +
      +var (
      +	ErrSchemeUnsupported = errors.New("unsupported source scheme")
      +	ErrStatusBad         = errors.New("bad HTTP response status")
      +)
      +
      +type File struct {
      +	Path     types.Path
      +	Contents []byte
      +	Mode     os.FileMode
      +	Uid      int
      +	Gid      int
      +}
      +
      +func RenderFile(l *log.Logger, f types.File) *File {
      +	var contents []byte
      +	var err error
      +
      +	fetch := func() error {
      +		contents, err = fetchFile(l, f)
      +		return err
      +	}
      +
      +	validate := func() error {
      +		return util.AssertValid(f.Contents.Verification, contents)
      +	}
      +
      +	decompress := func() error {
      +		contents, err = decompressFile(l, f, contents)
      +		return err
      +	}
      +
      +	if l.LogOp(fetch, "fetching file %q", f.Path) != nil {
      +		return nil
      +	}
      +	if l.LogOp(validate, "validating file contents") != nil {
      +		return nil
      +	}
      +	if l.LogOp(decompress, "decompressing file contents") != nil {
      +		return nil
      +	}
      +
      +	return &File{
      +		Path:     f.Path,
      +		Contents: []byte(contents),
      +		Mode:     os.FileMode(f.Mode),
      +		Uid:      f.User.Id,
      +		Gid:      f.Group.Id,
      +	}
      +}
      +
      +func fetchFile(l *log.Logger, f types.File) ([]byte, error) {
      +	switch f.Contents.Source.Scheme {
      +	case "http":
      +		client := util.NewHttpClient(l)
      +		data, status, err := client.Get(f.Contents.Source.String())
      +		if err != nil {
      +			return nil, err
      +		}
      +
      +		l.Debug("GET result: %s", http.StatusText(status))
      +		if status != http.StatusOK {
      +			return nil, ErrStatusBad
      +		}
      +
      +		return data, nil
      +	case "data":
      +		url, err := dataurl.DecodeString(f.Contents.Source.String())
      +		if err != nil {
      +			return nil, err
      +		}
      +
      +		return url.Data, nil
      +	default:
      +		return nil, ErrSchemeUnsupported
      +	}
      +}
      +
      +func decompressFile(l *log.Logger, f types.File, contents []byte) ([]byte, error) {
      +	switch f.Contents.Compression {
      +	case "":
      +		return contents, nil
      +	case "gzip":
      +		reader, err := gzip.NewReader(bytes.NewReader(contents))
      +		if err != nil {
      +			return nil, err
      +		}
      +		defer reader.Close()
      +
      +		return ioutil.ReadAll(reader)
      +	default:
      +		return nil, types.ErrCompressionInvalid
      +	}
      +}
      +
      +// WriteFile creates and writes the file described by f using the provided context
      +func (u Util) WriteFile(f *File) error {
      +	var err error
      +
      +	path := u.JoinPath(string(f.Path))
      +
      +	if err := mkdirForFile(path); err != nil {
      +		return err
      +	}
      +
      +	// Create a temporary file in the same directory to ensure it's on the same filesystem
      +	var tmp *os.File
      +	if tmp, err = ioutil.TempFile(filepath.Dir(path), "tmp"); err != nil {
      +		return err
      +	}
      +	tmp.Close()
      +	defer func() {
      +		if err != nil {
      +			os.Remove(tmp.Name())
      +		}
      +	}()
      +
      +	if err := ioutil.WriteFile(tmp.Name(), f.Contents, f.Mode); err != nil {
      +		return err
      +	}
      +
      +	// XXX(vc): Note that we assume to be operating on the file we just wrote, this is only guaranteed
      +	// by using syscall.Fchown() and syscall.Fchmod()
      +
      +	// Ensure the ownership and mode are as requested (since WriteFile can be affected by sticky bit)
      +	if err := os.Chown(tmp.Name(), f.Uid, f.Gid); err != nil {
      +		return err
      +	}
      +
      +	if err := os.Chmod(tmp.Name(), f.Mode); err != nil {
      +		return err
      +	}
      +
      +	if err := os.Rename(tmp.Name(), path); err != nil {
      +		return err
      +	}
      +
      +	return nil
      +}
      +
      +// mkdirForFile helper creates the directory components of path
      +func mkdirForFile(path string) error {
      +	return os.MkdirAll(filepath.Dir(path), DefaultDirectoryPermissions)
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/passwd.go b/vendor/github.com/coreos/ignition/internal/exec/util/passwd.go
      new file mode 100644
      index 00000000..1edb4535
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/exec/util/passwd.go
      @@ -0,0 +1,173 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package util
      +
      +import (
      +	"fmt"
      +	"os/exec"
      +	"strconv"
      +	"strings"
      +
      +	"github.com/coreos/ignition/config/types"
      +
      +	keys "github.com/coreos/update-ssh-keys/authorized_keys_d"
      +)
      +
      +// CreateUser creates the user as described.
      +func (u Util) CreateUser(c types.User) error {
      +	if c.Create == nil {
      +		return nil
      +	}
      +
      +	cu := c.Create
      +	args := []string{"--root", u.DestDir}
      +
      +	if c.PasswordHash != "" {
      +		args = append(args, "--password", c.PasswordHash)
      +	} else {
      +		args = append(args, "--password", "*")
      +	}
      +
      +	if cu.Uid != nil {
      +		args = append(args, "--uid",
      +			strconv.FormatUint(uint64(*cu.Uid), 10))
      +	}
      +
      +	if cu.GECOS != "" {
      +		args = append(args, "--comment", fmt.Sprintf("%q", cu.GECOS))
      +	}
      +
      +	if cu.Homedir != "" {
      +		args = append(args, "--home-dir", cu.Homedir)
      +	}
      +
      +	if cu.NoCreateHome {
      +		args = append(args, "--no-create-home")
      +	} else {
      +		args = append(args, "--create-home")
      +	}
      +
      +	if cu.PrimaryGroup != "" {
      +		args = append(args, "--gid", cu.PrimaryGroup)
      +	}
      +
      +	if len(cu.Groups) > 0 {
      +		args = append(args, "--groups", strings.Join(cu.Groups, ","))
      +	}
      +
      +	if cu.NoUserGroup {
      +		args = append(args, "--no-user-group")
      +	}
      +
      +	if cu.System {
      +		args = append(args, "--system")
      +	}
      +
      +	if cu.NoLogInit {
      +		args = append(args, "--no-log-init")
      +	}
      +
      +	if cu.Shell != "" {
      +		args = append(args, "--shell", cu.Shell)
      +	}
      +
      +	args = append(args, c.Name)
      +
      +	return u.LogCmd(exec.Command("useradd", args...),
      +		"creating user %q", c.Name)
      +}
      +
      +// Add the provided SSH public keys to the user's authorized keys.
      +func (u Util) AuthorizeSSHKeys(c types.User) error {
      +	if len(c.SSHAuthorizedKeys) == 0 {
      +		return nil
      +	}
      +
      +	return u.LogOp(func() error {
      +		usr, err := u.userLookup(c.Name)
      +		if err != nil {
      +			return fmt.Errorf("unable to lookup user %q", c.Name)
      +		}
      +
      +		akd, err := keys.Open(usr, true)
      +		if err != nil {
      +			return err
      +		}
      +		defer akd.Close()
      +
      +		// TODO(vc): introduce key names to config?
      +		// TODO(vc): validate c.SSHAuthorizedKeys well-formedness.
      +		ks := strings.Join(c.SSHAuthorizedKeys, "\n")
      +		// XXX(vc): for now ensure the addition is always
      +		// newline-terminated.  A future version of akd will handle this
      +		// for us in addition to validating the ssh keys for
      +		// well-formedness.
      +		if !strings.HasSuffix(ks, "\n") {
      +			ks = ks + "\n"
      +		}
      +
      +		if err := akd.Add("coreos-ignition", []byte(ks), true, true); err != nil {
      +			return err
      +		}
      +
      +		if err := akd.Sync(); err != nil {
      +			return err
      +		}
      +
      +		return nil
      +	}, "adding ssh keys to user %q", c.Name)
      +}
      +
      +// SetPasswordHash sets the password hash of the specified user.
      +func (u Util) SetPasswordHash(c types.User) error {
      +	if c.PasswordHash == "" {
      +		return nil
      +	}
      +
      +	args := []string{
      +		"--root", u.DestDir,
      +		"--password", c.PasswordHash,
      +	}
      +
      +	args = append(args, c.Name)
      +
      +	return u.LogCmd(exec.Command("usermod", args...),
      +		"setting password for %q", c.Name)
      +}
      +
      +// CreateGroup creates the group as described.
      +func (u Util) CreateGroup(g types.Group) error {
      +	args := []string{"--root", u.DestDir}
      +
      +	if g.Gid != nil {
      +		args = append(args, "--gid",
      +			strconv.FormatUint(uint64(*g.Gid), 10))
      +	}
      +
      +	if g.PasswordHash != "" {
      +		args = append(args, "--password", g.PasswordHash)
      +	} else {
      +		args = append(args, "--password", "*")
      +	}
      +
      +	if g.System {
      +		args = append(args, "--system")
      +	}
      +
      +	args = append(args, g.Name)
      +
      +	return u.LogCmd(exec.Command("groupadd", args...),
      +		"adding group %q", g.Name)
      +}
      diff --git a/vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/ignition/internal/exec/util/path.go
      similarity index 61%
      rename from vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/semver/sort.go
      rename to vendor/github.com/coreos/ignition/internal/exec/util/path.go
      index e256b41a..ec68dd96 100644
      --- a/vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/semver/sort.go
      +++ b/vendor/github.com/coreos/ignition/internal/exec/util/path.go
      @@ -1,4 +1,4 @@
      -// Copyright 2013-2015 CoreOS, Inc.
      +// Copyright 2015 CoreOS, Inc.
       //
       // Licensed under the Apache License, Version 2.0 (the "License");
       // you may not use this file except in compliance with the License.
      @@ -12,27 +12,20 @@
       // See the License for the specific language governing permissions and
       // limitations under the License.
       
      -package semver
      +package util
       
       import (
      -	"sort"
      +	"path/filepath"
       )
       
      -type Versions []*Version
      -
      -func (s Versions) Len() int {
      -	return len(s)
      +func SystemdUnitsPath() string {
      +	return filepath.Join("etc", "systemd", "system")
       }
       
      -func (s Versions) Swap(i, j int) {
      -	s[i], s[j] = s[j], s[i]
      +func NetworkdUnitsPath() string {
      +	return filepath.Join("etc", "systemd", "network")
       }
       
      -func (s Versions) Less(i, j int) bool {
      -	return s[i].LessThan(*s[j])
      -}
      -
      -// Sort sorts the given slice of Version
      -func Sort(versions []*Version) {
      -	sort.Sort(Versions(versions))
      +func SystemdDropinsPath(unitName string) string {
      +	return filepath.Join("etc", "systemd", "system", unitName+".d")
       }
      diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/unit.go b/vendor/github.com/coreos/ignition/internal/exec/util/unit.go
      new file mode 100644
      index 00000000..d3361d65
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/exec/util/unit.go
      @@ -0,0 +1,75 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package util
      +
      +import (
      +	"fmt"
      +	"os"
      +	"path/filepath"
      +
      +	"github.com/coreos/ignition/config/types"
      +)
      +
      +const (
      +	presetPath               string      = "/etc/systemd/system-preset/20-ignition.preset"
      +	DefaultPresetPermissions os.FileMode = 0644
      +)
      +
      +func FileFromSystemdUnit(unit types.SystemdUnit) *File {
      +	return &File{
      +		Path:     types.Path(filepath.Join(SystemdUnitsPath(), string(unit.Name))),
      +		Contents: []byte(unit.Contents),
      +		Mode:     DefaultFilePermissions,
      +	}
      +}
      +
      +func FileFromNetworkdUnit(unit types.NetworkdUnit) *File {
      +	return &File{
      +		Path:     types.Path(filepath.Join(NetworkdUnitsPath(), string(unit.Name))),
      +		Contents: []byte(unit.Contents),
      +		Mode:     DefaultFilePermissions,
      +	}
      +}
      +
      +func FileFromUnitDropin(unit types.SystemdUnit, dropin types.SystemdUnitDropIn) *File {
      +	return &File{
      +		Path:     types.Path(filepath.Join(SystemdDropinsPath(string(unit.Name)), string(dropin.Name))),
      +		Contents: []byte(dropin.Contents),
      +		Mode:     DefaultFilePermissions,
      +	}
      +}
      +
      +func (u Util) MaskUnit(unit types.SystemdUnit) error {
      +	path := u.JoinPath(SystemdUnitsPath(), string(unit.Name))
      +	if err := mkdirForFile(path); err != nil {
      +		return err
      +	}
      +	return os.Symlink("/dev/null", path)
      +}
      +
      +func (u Util) EnableUnit(unit types.SystemdUnit) error {
      +	path := u.JoinPath(presetPath)
      +	if err := mkdirForFile(path); err != nil {
      +		return err
      +	}
      +	file, err := os.OpenFile(path, os.O_RDWR|os.O_APPEND|os.O_CREATE, DefaultPresetPermissions)
      +	if err != nil {
      +		return err
      +	}
      +	defer file.Close()
      +
      +	_, err = file.WriteString(fmt.Sprintf("enable %s\n", unit.Name))
      +	return err
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.c b/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.c
      new file mode 100644
      index 00000000..a63da3f6
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.c
      @@ -0,0 +1,139 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +#define _GNU_SOURCE
      +#include <errno.h>
      +#include <pwd.h>
      +#include <sched.h>
      +#include <signal.h>
      +#include <stdio.h>
      +#include <stdlib.h>
      +#include <string.h>
      +#include <sys/stat.h>
      +#include <sys/types.h>
      +#include <sys/wait.h>
      +#include <unistd.h>
      +
      +#include "user_lookup.h"
      +
      +#define STACK_SIZE (64 * 1024)
      +
      +/* This is all a bit copy-and-pasty from update-ssh-keys/authorized_keys_d,
      + * TODO(vc): refactor authorized_keys_d a bit so external packages can reuse
      + * the pieces duplicated here.
      + */
      +typedef struct user_lookup_ctxt {
      +	void			*stack;
      +
      +	const char		*name;
      +	const char		*root;
      +
      +	user_lookup_res_t	*res;
      +	int			ret;
      +	int			err;
      +} user_lookup_ctxt_t;
      +
      +
      +static int user_lookup_fn(user_lookup_ctxt_t *ctxt) {
      +	char		buf[16 * 1024];
      +	struct passwd	p, *pptr;
      +
      +	if(chroot(ctxt->root) == -1) {
      +		goto out_err;
      +	}
      +
      +	if(getpwnam_r(ctxt->name, &p, buf, sizeof(buf), &pptr) != 0 || !pptr) {
      +		goto out_err;
      +	}
      +
      +	if(!(ctxt->res->name = strdup(p.pw_name))) {
      +		goto out_err;
      +	}
      +
      +	if(!(ctxt->res->home = strdup(p.pw_dir))) {
      +		free(ctxt->res->name);
      +		goto out_err;
      +	}
      +
      +	ctxt->res->uid = p.pw_uid;
      +	ctxt->res->gid = p.pw_gid;
      +
      +	return 0;
      +
      +out_err:
      +	ctxt->err = errno;
      +	ctxt->ret = -1;
      +	return 0;
      +}
      +
      +/* user_lookup() looks up a user in a chroot.
      + * returns 0 and the results in res on success,
      + * res->name will be NULL if user doesn't exist.
      + * returns -1 on error.
      + */
      +int user_lookup(const char *root, const char *name, user_lookup_res_t *res) {
      +	user_lookup_ctxt_t	ctxt = {
      +					.root = root,
      +					.name = name,
      +					.res = res,
      +					.ret = 0
      +				};
      +	int			pid, ret = 0;
      +	sigset_t		allsigs, orig;
      +
      +	if(!(ctxt.stack = malloc(STACK_SIZE))) {
      +		ret = -1;
      +		goto out;
      +	}
      +
      +	/* It's necessary to block all signals before cloning, so the child
      +	 * doesn't run any of the Go runtime's signal handlers.
      +	 */
      +	if((ret = sigemptyset(&orig)) == -1 ||
      +	   (ret = sigfillset(&allsigs)) == -1)
      +		goto out_stack;
      +
      +	if((ret = sigprocmask(SIG_BLOCK, &allsigs, &orig)) == -1)
      +		goto out_stack;
      +
      +	pid = clone((int(*)(void *))user_lookup_fn, ctxt.stack + STACK_SIZE,
      +		    CLONE_VM, &ctxt);
      +
      +	ret = sigprocmask(SIG_SETMASK, &orig, NULL);
      +
      +	if(pid != -1) {
      +		if(waitpid(pid, NULL, __WCLONE) == -1 && errno != ECHILD) {
      +			ret = -1;
      +			goto out_stack;
      +		}
      +	} else {
      +		ret = -1;
      +	}
      +
      +	if(ret != -1) {
      +		errno = ctxt.err;
      +		ret = ctxt.ret;
      +	}
      +
      +out_stack:
      +	free(ctxt.stack);
      +
      +out:
      +	return ret;
      +}
      +
      +/* user_lookup_res_free() frees any memory allocated by a successful user_lookup(). */
      +void user_lookup_res_free(user_lookup_res_t *res) {
      +	free(res->home);
      +	free(res->name);
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.go b/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.go
      new file mode 100644
      index 00000000..85d34919
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.go
      @@ -0,0 +1,50 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// +build linux
      +
      +package util
      +
      +// #include "user_lookup.h"
      +import "C"
      +
      +import (
      +	"fmt"
      +	"os/user"
      +)
      +
      +// userLookup looks up the user in u.DestDir.
      +func (u Util) userLookup(name string) (*user.User, error) {
      +	res := &C.user_lookup_res_t{}
      +
      +	if ret, err := C.user_lookup(C.CString(u.DestDir),
      +		C.CString(name), res); ret < 0 {
      +		return nil, fmt.Errorf("lookup failed: %v", err)
      +	}
      +
      +	if res.name == nil {
      +		return nil, fmt.Errorf("user %q not found", name)
      +	}
      +
      +	usr := &user.User{
      +		Name:    C.GoString(res.name),
      +		Uid:     fmt.Sprintf("%d", int(res.uid)),
      +		Gid:     fmt.Sprintf("%d", int(res.gid)),
      +		HomeDir: u.JoinPath(C.GoString(res.home)),
      +	}
      +
      +	C.user_lookup_res_free(res)
      +
      +	return usr, nil
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.h b/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.h
      new file mode 100644
      index 00000000..8178ee41
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.h
      @@ -0,0 +1,23 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +typedef struct user_lookup_res {
      +	int	uid;
      +	int	gid;
      +	char	*home;
      +	char	*name;
      +} user_lookup_res_t;
      +
      +int user_lookup(const char *, const char *, user_lookup_res_t *);
      +void user_lookup_res_free(user_lookup_res_t *);
      diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup_test.go b/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup_test.go
      new file mode 100644
      index 00000000..a323f9a2
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup_test.go
      @@ -0,0 +1,97 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package util
      +
      +import (
      +	"io/ioutil"
      +	"os"
      +	"os/user"
      +	"path/filepath"
      +	"testing"
      +
      +	"github.com/coreos/ignition/internal/log"
      +)
      +
      +// tempBase() slaps together a minimal /etc/{passwd,group} for the lookup test.
      +func tempBase() (string, error) {
      +	td, err := ioutil.TempDir("", "ign-usr-lookup-test")
      +	if err != nil {
      +		return "", err
      +	}
      +
      +	if err := os.MkdirAll(filepath.Join(td, "etc"), 0755); err != nil {
      +		return "", err
      +	}
      +
      +	gp := filepath.Join(td, "etc/group")
      +	err = ioutil.WriteFile(gp, []byte("foo:x:4242:\n"), 0644)
      +	if err != nil {
      +		return "", err
      +	}
      +
      +	pp := filepath.Join(td, "etc/passwd")
      +	err = ioutil.WriteFile(pp, []byte("foo:x:44:4242::/home/foo:/bin/false"), 0644)
      +	if err != nil {
      +		return "", err
      +	}
      +
      +	nsp := filepath.Join(td, "etc/nsswitch.conf")
      +	err = ioutil.WriteFile(nsp, []byte("passwd: files\ngroup: files\nshadow: files\ngshadow: files\n"), 0644)
      +	if err != nil {
      +		return "", err
      +	}
      +
      +	return td, nil
      +}
      +
      +func TestUserLookup(t *testing.T) {
      +	if os.Geteuid() != 0 {
      +		t.Skip("test requires root for chroot(), skipping")
      +	}
      +
      +	// perform a user lookup to ensure libnss_files.so is loaded
      +	// note this assumes /etc/nsswitch.conf invokes files.
      +	user.Lookup("root")
      +
      +	td, err := tempBase()
      +	if err != nil {
      +		t.Fatalf("temp base error: %v", err)
      +	}
      +
      +	logger := log.New()
      +	defer logger.Close()
      +
      +	u := &Util{
      +		DestDir: td,
      +		Logger:  &logger,
      +	}
      +
      +	usr, err := u.userLookup("foo")
      +	if err != nil {
      +		t.Fatalf("lookup error: %v", err)
      +	}
      +
      +	if usr.Name != "foo" {
      +		t.Fatalf("unexpected name: %q", usr.Name)
      +	}
      +
      +	if usr.Uid != "44" {
      +		t.Fatalf("unexpected uid: %q", usr.Uid)
      +	}
      +
      +	if usr.Gid != "4242" {
      +		t.Fatalf("unexpected gid: %q", usr.Gid)
      +	}
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/util.go b/vendor/github.com/coreos/ignition/internal/exec/util/util.go
      new file mode 100644
      index 00000000..31fd33e9
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/exec/util/util.go
      @@ -0,0 +1,32 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package util
      +
      +import (
      +	"path/filepath"
      +
      +	"github.com/coreos/ignition/internal/log"
      +)
      +
      +// Util encapsulates logging and destdir indirection for the util methods.
      +type Util struct {
      +	DestDir string // directory prefix to use in applying fs paths.
      +	*log.Logger
      +}
      +
      +// JoinPath returns a path into the context ala filepath.Join(d, args)
      +func (u Util) JoinPath(path ...string) string {
      +	return filepath.Join(u.DestDir, filepath.Join(path...))
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/log/log.go b/vendor/github.com/coreos/ignition/internal/log/log.go
      new file mode 100644
      index 00000000..89bc5427
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/log/log.go
      @@ -0,0 +1,180 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package log
      +
      +import (
      +	"bytes"
      +	"fmt"
      +	"log/syslog"
      +	"os/exec"
      +	"strings"
      +)
      +
      +type LoggerOps interface {
      +	Emerg(string) error
      +	Alert(string) error
      +	Crit(string) error
      +	Err(string) error
      +	Warning(string) error
      +	Notice(string) error
      +	Info(string) error
      +	Debug(string) error
      +	Close() error
      +}
      +
      +// Logger implements a variadic flavor of log/syslog.Writer
      +type Logger struct {
      +	ops           LoggerOps
      +	prefixStack   []string
      +	opSequenceNum int
      +}
      +
      +// New creates a new logger.
      +// syslog is tried first, if syslog fails Stdout is used.
      +func New() Logger {
      +	logger := Logger{}
      +	if slogger, err := syslog.New(syslog.LOG_DEBUG, "ignition"); err == nil {
      +		logger.ops = slogger
      +	} else {
      +		logger.ops = Stdout{}
      +		logger.Err("unable to open syslog: %v", err)
      +	}
      +	return logger
      +}
      +
      +// Close closes the logger.
      +func (l Logger) Close() {
      +	l.ops.Close()
      +}
      +
      +// Emerg logs a message at emergency priority.
      +func (l Logger) Emerg(format string, a ...interface{}) error {
      +	return l.log(l.ops.Emerg, format, a...)
      +}
      +
      +// Alert logs a message at alert priority.
      +func (l Logger) Alert(format string, a ...interface{}) error {
      +	return l.log(l.ops.Alert, format, a...)
      +}
      +
      +// Crit logs a message at critical priority.
      +func (l Logger) Crit(format string, a ...interface{}) error {
      +	return l.log(l.ops.Crit, format, a...)
      +}
      +
      +// Err logs a message at error priority.
      +func (l Logger) Err(format string, a ...interface{}) error {
      +	return l.log(l.ops.Err, format, a...)
      +}
      +
      +// Warning logs a message at warning priority.
      +func (l Logger) Warning(format string, a ...interface{}) error {
      +	return l.log(l.ops.Warning, format, a...)
      +}
      +
      +// Notice logs a message at notice priority.
      +func (l Logger) Notice(format string, a ...interface{}) error {
      +	return l.log(l.ops.Notice, format, a...)
      +}
      +
      +// Info logs a message at info priority.
      +func (l Logger) Info(format string, a ...interface{}) error {
      +	return l.log(l.ops.Info, format, a...)
      +}
      +
      +// Debug logs a message at debug priority.
      +func (l Logger) Debug(format string, a ...interface{}) error {
      +	return l.log(l.ops.Debug, format, a...)
      +}
      +
      +// PushPrefix pushes the supplied message onto the Logger's prefix stack.
      +// The prefix stack is concatenated in FIFO order and prefixed to the start of every message logged via Logger.
      +func (l *Logger) PushPrefix(format string, a ...interface{}) {
      +	l.prefixStack = append(l.prefixStack, fmt.Sprintf(format, a...))
      +}
      +
      +// PopPrefix pops the top entry from the Logger's prefix stack.
      +// The prefix stack is concatenated in FIFO order and prefixed to the start of every message logged via Logger.
      +func (l *Logger) PopPrefix() {
      +	if len(l.prefixStack) == 0 {
      +		l.Debug("popped from empty stack")
      +		return
      +	}
      +	l.prefixStack = l.prefixStack[:len(l.prefixStack)-1]
      +}
      +
      +// LogCmd runs and logs the supplied cmd as an operation with distinct start/finish/fail log messages uniformly combined with the supplied format string.
      +// The exact command path and arguments being executed are also logged for debugging assistance.
      +func (l *Logger) LogCmd(cmd *exec.Cmd, format string, a ...interface{}) error {
      +	f := func() error {
      +		if len(cmd.Args) <= 1 {
      +			l.Debug("executing: %v", cmd.Path)
      +		} else {
      +			l.Debug("executing: %v %v", cmd.Path, cmd.Args[1:])
      +		}
      +		stderr := &bytes.Buffer{}
      +		cmd.Stderr = stderr
      +		if err := cmd.Run(); err != nil {
      +			return fmt.Errorf("%v: Stderr: %q", err, stderr.Bytes())
      +		}
      +		return nil
      +	}
      +	return l.LogOp(f, format, a...)
      +}
      +
      +// LogOp calls and logs the supplied function as an operation with distinct start/finish/fail log messages uniformly combined with the supplied format string.
      +func (l *Logger) LogOp(op func() error, format string, a ...interface{}) error {
      +	l.opSequenceNum++
      +	l.PushPrefix("op(%x)", l.opSequenceNum)
      +	defer l.PopPrefix()
      +
      +	l.logStart(format, a...)
      +	if err := op(); err != nil {
      +		l.logFail("%s: %v", fmt.Sprintf(format, a...), err)
      +		return err
      +	}
      +	l.logFinish(format, a...)
      +	return nil
      +}
      +
      +// logStart logs the start of a multi-step/substantial/time-consuming operation.
      +func (l Logger) logStart(format string, a ...interface{}) {
      +	l.Info(fmt.Sprintf("[started]  %s", format), a...)
      +}
      +
      +// logFail logs the failure of a multi-step/substantial/time-consuming operation.
      +func (l Logger) logFail(format string, a ...interface{}) {
      +	l.Crit(fmt.Sprintf("[failed]   %s", format), a...)
      +}
      +
      +// logFinish logs the completion of a multi-step/substantial/time-consuming operation.
      +func (l Logger) logFinish(format string, a ...interface{}) {
      +	l.Info(fmt.Sprintf("[finished] %s", format), a...)
      +}
      +
      +// log logs a formatted message using the supplied logFunc.
      +func (l Logger) log(logFunc func(string) error, format string, a ...interface{}) error {
      +	return logFunc(l.sprintf(format, a...))
      +}
      +
      +// sprintf returns the current prefix stack, if any, concatenated with the supplied format string and args in expanded form.
      +func (l Logger) sprintf(format string, a ...interface{}) string {
      +	m := []string{}
      +	for _, pfx := range l.prefixStack {
      +		m = append(m, fmt.Sprintf("%s:", pfx))
      +	}
      +	m = append(m, fmt.Sprintf(format, a...))
      +	return strings.Join(m, " ")
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/log/stdout.go b/vendor/github.com/coreos/ignition/internal/log/stdout.go
      new file mode 100644
      index 00000000..3ba1e3ad
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/log/stdout.go
      @@ -0,0 +1,31 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package log
      +
      +import (
      +	"fmt"
      +)
      +
      +type Stdout struct{}
      +
      +func (Stdout) Emerg(msg string) error   { fmt.Println("EMERGENCY:", msg); return nil }
      +func (Stdout) Alert(msg string) error   { fmt.Println("ALERT    :", msg); return nil }
      +func (Stdout) Crit(msg string) error    { fmt.Println("CRITICAL :", msg); return nil }
      +func (Stdout) Err(msg string) error     { fmt.Println("ERROR    :", msg); return nil }
      +func (Stdout) Warning(msg string) error { fmt.Println("WARNING  :", msg); return nil }
      +func (Stdout) Notice(msg string) error  { fmt.Println("NOTICE   :", msg); return nil }
      +func (Stdout) Info(msg string) error    { fmt.Println("INFO     :", msg); return nil }
      +func (Stdout) Debug(msg string) error   { fmt.Println("DEBUG    :", msg); return nil }
      +func (Stdout) Close() error             { return nil }
      diff --git a/vendor/github.com/coreos/ignition/internal/main.go b/vendor/github.com/coreos/ignition/internal/main.go
      new file mode 100644
      index 00000000..f62098ae
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/main.go
      @@ -0,0 +1,98 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package main
      +
      +import (
      +	"flag"
      +	"fmt"
      +	"os"
      +	"time"
      +
      +	"github.com/coreos/ignition/internal/exec"
      +	"github.com/coreos/ignition/internal/exec/stages"
      +	_ "github.com/coreos/ignition/internal/exec/stages/disks"
      +	_ "github.com/coreos/ignition/internal/exec/stages/files"
      +	"github.com/coreos/ignition/internal/log"
      +	"github.com/coreos/ignition/internal/oem"
      +	"github.com/coreos/ignition/internal/version"
      +)
      +
      +func main() {
      +	flags := struct {
      +		clearCache    bool
      +		configCache   string
      +		onlineTimeout time.Duration
      +		oem           oem.Name
      +		root          string
      +		stage         stages.Name
      +		version       bool
      +	}{}
      +
      +	flag.BoolVar(&flags.clearCache, "clear-cache", false, "clear any cached config")
      +	flag.StringVar(&flags.configCache, "config-cache", "/run/ignition.json", "where to cache the config")
      +	flag.DurationVar(&flags.onlineTimeout, "online-timeout", exec.DefaultOnlineTimeout, "how long to wait for a provider to come online")
      +	flag.Var(&flags.oem, "oem", fmt.Sprintf("current oem. %v", oem.Names()))
      +	flag.StringVar(&flags.root, "root", "/", "root of the filesystem")
      +	flag.Var(&flags.stage, "stage", fmt.Sprintf("execution stage. %v", stages.Names()))
      +	flag.BoolVar(&flags.version, "version", false, "print the version and exit")
      +
      +	flag.Parse()
      +
      +	if flags.version {
      +		fmt.Printf("%s\n", version.String)
      +		return
      +	}
      +
      +	if flags.oem == "" {
      +		fmt.Fprint(os.Stderr, "'--oem' must be provided\n")
      +		os.Exit(2)
      +	}
      +
      +	if flags.stage == "" {
      +		fmt.Fprint(os.Stderr, "'--stage' must be provided\n")
      +		os.Exit(2)
      +	}
      +
      +	for k, v := range oem.MustGet(flags.oem.String()).Flags() {
      +		if err := flag.Set(k, v); err != nil {
      +			panic(err)
      +		}
      +	}
      +
      +	logger := log.New()
      +	defer logger.Close()
      +
      +	logger.Info(version.String)
      +
      +	if flags.clearCache {
      +		if err := os.Remove(flags.configCache); err != nil {
      +			logger.Err("unable to clear cache: %v", err)
      +		}
      +	}
      +
      +	oemConfig := oem.MustGet(flags.oem.String())
      +	engine := exec.Engine{
      +		Root:          flags.root,
      +		OnlineTimeout: flags.onlineTimeout,
      +		Logger:        &logger,
      +		ConfigCache:   flags.configCache,
      +		Provider:      oemConfig.Provider().Create(&logger),
      +		OemConfig:     oemConfig.Config(),
      +	}
      +
      +	if !engine.Run(flags.stage.String()) {
      +		os.Exit(1)
      +	}
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/oem/name.go b/vendor/github.com/coreos/ignition/internal/oem/name.go
      new file mode 100644
      index 00000000..447eef65
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/oem/name.go
      @@ -0,0 +1,35 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package oem
      +
      +import (
      +	"fmt"
      +)
      +
      +// Name is used to identify an OEM. It must be in the set of registered OEMs.
      +type Name string
      +
      +func (s Name) String() string {
      +	return string(s)
      +}
      +
      +func (s *Name) Set(val string) error {
      +	if _, ok := Get(val); !ok {
      +		return fmt.Errorf("%s is not a valid oem", val)
      +	}
      +
      +	*s = Name(val)
      +	return nil
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/oem/oem.go b/vendor/github.com/coreos/ignition/internal/oem/oem.go
      new file mode 100644
      index 00000000..02c29e48
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/oem/oem.go
      @@ -0,0 +1,170 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package oem
      +
      +import (
      +	"fmt"
      +
      +	"github.com/coreos/ignition/config/types"
      +	"github.com/coreos/ignition/internal/providers"
      +	"github.com/coreos/ignition/internal/providers/azure"
      +	"github.com/coreos/ignition/internal/providers/cmdline"
      +	"github.com/coreos/ignition/internal/providers/ec2"
      +	"github.com/coreos/ignition/internal/providers/gce"
      +	"github.com/coreos/ignition/internal/providers/noop"
      +	"github.com/coreos/ignition/internal/providers/vmware"
      +	"github.com/coreos/ignition/internal/registry"
      +)
      +
      +// Config represents a set of command line flags that map to a particular OEM.
      +type Config struct {
      +	name     string
      +	flags    map[string]string
      +	provider providers.ProviderCreator
      +	config   types.Config
      +}
      +
      +func (c Config) Name() string {
      +	return c.name
      +}
      +
      +func (c Config) Flags() map[string]string {
      +	return c.flags
      +}
      +
      +func (c Config) Provider() providers.ProviderCreator {
      +	return c.provider
      +}
      +
      +func (c Config) Config() types.Config {
      +	return c.config
      +}
      +
      +var configs = registry.Create("oem configs")
      +
      +func init() {
      +	configs.Register(Config{
      +		name:     "azure",
      +		provider: azure.Creator{},
      +	})
      +	configs.Register(Config{
      +		name:     "cloudsigma",
      +		provider: noop.Creator{},
      +	})
      +	configs.Register(Config{
      +		name:     "cloudstack",
      +		provider: noop.Creator{},
      +	})
      +	configs.Register(Config{
      +		name:     "digitalocean",
      +		provider: noop.Creator{},
      +	})
      +	configs.Register(Config{
      +		name:     "brightbox",
      +		provider: noop.Creator{},
      +	})
      +	configs.Register(Config{
      +		name:     "openstack",
      +		provider: noop.Creator{},
      +	})
      +	configs.Register(Config{
      +		name:     "ec2",
      +		provider: ec2.Creator{},
      +		flags: map[string]string{
      +			"online-timeout": "0",
      +		},
      +		config: types.Config{
      +			Systemd: types.Systemd{
      +				Units: []types.SystemdUnit{{
      +					Name:   "coreos-metadata-sshkeys@.service",
      +					Enable: true,
      +				}},
      +			},
      +		},
      +	})
      +	configs.Register(Config{
      +		name:     "exoscale",
      +		provider: noop.Creator{},
      +	})
      +	configs.Register(Config{
      +		name:     "gce",
      +		provider: gce.Creator{},
      +		config: types.Config{
      +			Systemd: types.Systemd{
      +				Units: []types.SystemdUnit{{
      +					Name:   "coreos-metadata-sshkeys@.service",
      +					Enable: true,
      +				}},
      +			},
      +		},
      +	})
      +	configs.Register(Config{
      +		name:     "hyperv",
      +		provider: noop.Creator{},
      +	})
      +	configs.Register(Config{
      +		name:     "niftycloud",
      +		provider: noop.Creator{},
      +	})
      +	configs.Register(Config{
      +		name:     "packet",
      +		provider: noop.Creator{},
      +	})
      +	configs.Register(Config{
      +		name:     "pxe",
      +		provider: cmdline.Creator{},
      +	})
      +	configs.Register(Config{
      +		name:     "rackspace",
      +		provider: noop.Creator{},
      +	})
      +	configs.Register(Config{
      +		name:     "rackspace-onmetal",
      +		provider: noop.Creator{},
      +	})
      +	configs.Register(Config{
      +		name:     "vagrant",
      +		provider: noop.Creator{},
      +	})
      +	configs.Register(Config{
      +		name:     "vmware",
      +		provider: vmware.Creator{},
      +	})
      +	configs.Register(Config{
      +		name:     "xendom0",
      +		provider: noop.Creator{},
      +	})
      +	configs.Register(Config{
      +		name:     "interoute",
      +		provider: noop.Creator{},
      +	})
      +}
      +
      +func Get(name string) (config Config, ok bool) {
      +	config, ok = configs.Get(name).(Config)
      +	return
      +}
      +
      +func MustGet(name string) Config {
      +	if config, ok := Get(name); ok {
      +		return config
      +	} else {
      +		panic(fmt.Sprintf("invalid OEM name %q provided", name))
      +	}
      +}
      +
      +func Names() (names []string) {
      +	return configs.Names()
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/providers/azure/azure.go b/vendor/github.com/coreos/ignition/internal/providers/azure/azure.go
      new file mode 100644
      index 00000000..adba2b7f
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/providers/azure/azure.go
      @@ -0,0 +1,139 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// The azure provider fetches a configuration from the Azure OVF DVD.
      +
      +package azure
      +
      +import (
      +	"fmt"
      +	"io/ioutil"
      +	"os"
      +	"path/filepath"
      +	"syscall"
      +	"time"
      +
      +	"github.com/coreos/ignition/config"
      +	"github.com/coreos/ignition/config/types"
      +	"github.com/coreos/ignition/internal/log"
      +	"github.com/coreos/ignition/internal/providers"
      +	"github.com/coreos/ignition/internal/providers/util"
      +)
      +
      +const (
      +	initialBackoff = 100 * time.Millisecond
      +	maxBackoff     = 30 * time.Second
      +	configDevice   = "/dev/disk/by-id/ata-Virtual_CD"
      +	configPath     = "/CustomData.bin"
      +)
      +
      +// These constants come from <cdrom.h>.
      +const (
      +	CDROM_DRIVE_STATUS = 0x5326
      +)
      +
      +// These constants come from <cdrom.h>.
      +const (
      +	CDS_NO_INFO = iota
      +	CDS_NO_DISC
      +	CDS_TRAY_OPEN
      +	CDS_DRIVE_NOT_READY
      +	CDS_DISC_OK
      +)
      +
      +type Creator struct{}
      +
      +func (Creator) Create(logger *log.Logger) providers.Provider {
      +	return &provider{
      +		logger:  logger,
      +		backoff: initialBackoff,
      +	}
      +}
      +
      +type provider struct {
      +	logger  *log.Logger
      +	backoff time.Duration
      +}
      +
      +func (p provider) FetchConfig() (types.Config, error) {
      +	p.logger.Debug("creating temporary mount point")
      +	mnt, err := ioutil.TempDir("", "ignition-azure")
      +	if err != nil {
      +		return types.Config{}, fmt.Errorf("failed to create temp directory: %v", err)
      +	}
      +	defer os.Remove(mnt)
      +
      +	p.logger.Debug("mounting config device")
      +	if err := p.logger.LogOp(
      +		func() error { return syscall.Mount(configDevice, mnt, "udf", syscall.MS_RDONLY, "") },
      +		"mounting %q at %q", configDevice, mnt,
      +	); err != nil {
      +		return types.Config{}, fmt.Errorf("failed to mount device %q at %q: %v", configDevice, mnt, err)
      +	}
      +	defer p.logger.LogOp(
      +		func() error { return syscall.Unmount(mnt, 0) },
      +		"unmounting %q at %q", configDevice, mnt,
      +	)
      +
      +	p.logger.Debug("reading config")
      +	rawConfig, err := ioutil.ReadFile(filepath.Join(mnt, configPath))
      +	if err != nil && !os.IsNotExist(err) {
      +		return types.Config{}, fmt.Errorf("failed to read config: %v", err)
      +	}
      +
      +	return config.Parse(rawConfig)
      +}
      +
      +func (p provider) IsOnline() bool {
      +	p.logger.Debug("opening config device")
      +	device, err := os.Open(configDevice)
      +	if err != nil {
      +		p.logger.Info("failed to open config device: %v", err)
      +		return false
      +	}
      +	defer device.Close()
      +
      +	p.logger.Debug("getting drive status")
      +	status, _, errno := syscall.Syscall(
      +		syscall.SYS_IOCTL,
      +		uintptr(device.Fd()),
      +		uintptr(CDROM_DRIVE_STATUS),
      +		uintptr(0),
      +	)
      +
      +	switch status {
      +	case CDS_NO_INFO:
      +		p.logger.Info("drive status: no info")
      +	case CDS_NO_DISC:
      +		p.logger.Info("drive status: no disc")
      +	case CDS_TRAY_OPEN:
      +		p.logger.Info("drive status: open")
      +	case CDS_DRIVE_NOT_READY:
      +		p.logger.Info("drive status: not ready")
      +	case CDS_DISC_OK:
      +		p.logger.Info("drive status: OK")
      +	default:
      +		p.logger.Err("failed to get drive status: %s", errno.Error())
      +	}
      +
      +	return (status == CDS_DISC_OK)
      +}
      +
      +func (p provider) ShouldRetry() bool {
      +	return true
      +}
      +
      +func (p *provider) BackoffDuration() time.Duration {
      +	return util.ExpBackoff(&p.backoff, maxBackoff)
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/providers/cmdline/cmdline.go b/vendor/github.com/coreos/ignition/internal/providers/cmdline/cmdline.go
      new file mode 100644
      index 00000000..2e2b0021
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/providers/cmdline/cmdline.go
      @@ -0,0 +1,208 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// The cmdline provider fetches a remote configuration from the URL specified
      +// in the kernel boot option "coreos.config.url".
      +
      +package cmdline
      +
      +import (
      +	"fmt"
      +	"io/ioutil"
      +	"net/http"
      +	"net/url"
      +	"os"
      +	"path/filepath"
      +	"strings"
      +	"syscall"
      +	"time"
      +
      +	"github.com/coreos/ignition/config"
      +	"github.com/coreos/ignition/config/types"
      +	"github.com/coreos/ignition/internal/log"
      +	"github.com/coreos/ignition/internal/providers"
      +	putil "github.com/coreos/ignition/internal/providers/util"
      +	"github.com/coreos/ignition/internal/systemd"
      +	"github.com/coreos/ignition/internal/util"
      +)
      +
      +const (
      +	initialBackoff = 100 * time.Millisecond
      +	maxBackoff     = 30 * time.Second
      +	cmdlinePath    = "/proc/cmdline"
      +	cmdlineUrlFlag = "coreos.config.url"
      +	oemDevicePath  = "/dev/disk/by-label/OEM" // Device link where oem partition is found.
      +	oemDirPath     = "/usr/share/oem"         // OEM dir within root fs to consider for pxe scenarios.
      +	oemMountPath   = "/mnt/oem"               // Mountpoint where oem partition is mounted when present.
      +)
      +
      +type Creator struct{}
      +
      +func (Creator) Create(logger *log.Logger) providers.Provider {
      +	return &provider{
      +		logger:  logger,
      +		backoff: initialBackoff,
      +		path:    cmdlinePath,
      +		client:  util.NewHttpClient(logger),
      +	}
      +}
      +
      +type provider struct {
      +	logger    *log.Logger
      +	backoff   time.Duration
      +	path      string
      +	client    util.HttpClient
      +	configUrl string
      +	rawConfig []byte
      +}
      +
      +func (p provider) FetchConfig() (types.Config, error) {
      +	if p.rawConfig == nil {
      +		return types.Config{}, nil
      +	} else {
      +		return config.Parse(p.rawConfig)
      +	}
      +}
      +
      +func (p *provider) IsOnline() bool {
      +	if p.configUrl == "" {
      +		args, err := ioutil.ReadFile(p.path)
      +		if err != nil {
      +			p.logger.Err("couldn't read cmdline")
      +			return false
      +		}
      +
      +		p.configUrl = parseCmdline(args)
      +		p.logger.Debug("parsed url from cmdline: %q", p.configUrl)
      +		if p.configUrl == "" {
      +			// If the cmdline flag wasn't provided, just no-op.
      +			p.logger.Info("no config URL provided")
      +			return true
      +		}
      +	}
      +
      +	return p.getRawConfig()
      +
      +}
      +
      +func (p provider) ShouldRetry() bool {
      +	return true
      +}
      +
      +func (p *provider) BackoffDuration() time.Duration {
      +	return putil.ExpBackoff(&p.backoff, maxBackoff)
      +}
      +
      +func parseCmdline(cmdline []byte) (url string) {
      +	for _, arg := range strings.Split(string(cmdline), " ") {
      +		parts := strings.SplitN(strings.TrimSpace(arg), "=", 2)
      +		key := parts[0]
      +
      +		if key != cmdlineUrlFlag {
      +			continue
      +		}
      +
      +		if len(parts) == 2 {
      +			url = parts[1]
      +		}
      +	}
      +
      +	return
      +}
      +
      +// getRawConfig gets the raw configuration data from p.configUrl.
      +// Supported URL schemes are:
      +// http://	remote resource accessed via http
      +// oem://	local file in /usr/share/oem or /mnt/oem
      +func (p *provider) getRawConfig() bool {
      +	url, err := url.Parse(p.configUrl)
      +	if err != nil {
      +		p.logger.Err("failed to parse url: %v", err)
      +		return false
      +	}
      +
      +	switch url.Scheme {
      +	case "http":
      +		p.rawConfig = p.client.FetchConfig(p.configUrl, http.StatusOK, http.StatusNoContent)
      +		if p.rawConfig == nil {
      +			return false
      +		}
      +	case "oem":
      +		path := filepath.Clean(url.Path)
      +		if !filepath.IsAbs(path) {
      +			p.logger.Err("oem path is not absolute: %q", url.Path)
      +			return false
      +		}
      +
      +		// check if present under oemDirPath, if so use it.
      +		absPath := filepath.Join(oemDirPath, path)
      +		p.rawConfig, err = ioutil.ReadFile(absPath)
      +		if os.IsNotExist(err) {
      +			p.logger.Info("oem config not found in %q, trying %q",
      +				oemDirPath, oemMountPath)
      +
      +			// try oemMountPath, requires mounting it.
      +			err = p.mountOEM()
      +			if err == nil {
      +				absPath := filepath.Join(oemMountPath, path)
      +				p.rawConfig, err = ioutil.ReadFile(absPath)
      +				p.umountOEM()
      +			}
      +		}
      +
      +		if err != nil {
      +			p.logger.Err("failed to read oem config: %v", err)
      +			return false
      +		}
      +	default:
      +		p.logger.Err("unsupported url scheme: %q", url.Scheme)
      +		return false
      +	}
      +
      +	return true
      +}
      +
      +// mountOEM waits for the presence of and mounts the oem partition @ oemMountPath.
      +func (p *provider) mountOEM() error {
      +	dev := []string{oemDevicePath}
      +	if err := systemd.WaitOnDevices(dev, "oem-cmdline"); err != nil {
      +		p.logger.Err("failed to wait for oem device: %v", err)
      +		return err
      +	}
      +
      +	if err := os.MkdirAll(oemMountPath, 0700); err != nil {
      +		p.logger.Err("failed to create oem mount point: %v", err)
      +		return err
      +	}
      +
      +	if err := p.logger.LogOp(
      +		func() error {
      +			return syscall.Mount(dev[0], oemMountPath, "ext4", 0, "")
      +		},
      +		"mounting %q at %q", oemDevicePath, oemMountPath,
      +	); err != nil {
      +		return fmt.Errorf("failed to mount device %q at %q: %v",
      +			oemDevicePath, oemMountPath, err)
      +	}
      +
      +	return nil
      +}
      +
      +// umountOEM unmounts the oem partition @ oemMountPath.
      +func (p *provider) umountOEM() {
      +	p.logger.LogOp(
      +		func() error { return syscall.Unmount(oemMountPath, 0) },
      +		"unmounting %q", oemMountPath,
      +	)
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/providers/ec2/ec2.go b/vendor/github.com/coreos/ignition/internal/providers/ec2/ec2.go
      new file mode 100644
      index 00000000..5f2797cf
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/providers/ec2/ec2.go
      @@ -0,0 +1,70 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// The ec2 provider fetches a remote configuration from the ec2 user-data
      +// metadata service URL.
      +
      +package ec2
      +
      +import (
      +	"net/http"
      +	"time"
      +
      +	"github.com/coreos/ignition/config"
      +	"github.com/coreos/ignition/config/types"
      +	"github.com/coreos/ignition/internal/log"
      +	"github.com/coreos/ignition/internal/providers"
      +	putil "github.com/coreos/ignition/internal/providers/util"
      +	"github.com/coreos/ignition/internal/util"
      +)
      +
      +const (
      +	initialBackoff = 100 * time.Millisecond
      +	maxBackoff     = 30 * time.Second
      +	userdataUrl    = "http://169.254.169.254/2009-04-04/user-data"
      +)
      +
      +type Creator struct{}
      +
      +func (Creator) Create(logger *log.Logger) providers.Provider {
      +	return &provider{
      +		logger:  logger,
      +		backoff: initialBackoff,
      +		client:  util.NewHttpClient(logger),
      +	}
      +}
      +
      +type provider struct {
      +	logger    *log.Logger
      +	backoff   time.Duration
      +	client    util.HttpClient
      +	rawConfig []byte
      +}
      +
      +func (p provider) FetchConfig() (types.Config, error) {
      +	return config.Parse(p.rawConfig)
      +}
      +
      +func (p *provider) IsOnline() bool {
      +	p.rawConfig = p.client.FetchConfig(userdataUrl, http.StatusOK, http.StatusNotFound)
      +	return (p.rawConfig != nil)
      +}
      +
      +func (p provider) ShouldRetry() bool {
      +	return true
      +}
      +
      +func (p *provider) BackoffDuration() time.Duration {
      +	return putil.ExpBackoff(&p.backoff, maxBackoff)
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/providers/gce/gce.go b/vendor/github.com/coreos/ignition/internal/providers/gce/gce.go
      new file mode 100644
      index 00000000..401a882f
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/providers/gce/gce.go
      @@ -0,0 +1,74 @@
      +// Copyright 2016 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// The gce provider fetches a remote configuration from the gce user-data
      +// metadata service URL.
      +
      +package gce
      +
      +import (
      +	"net/http"
      +	"time"
      +
      +	"github.com/coreos/ignition/config"
      +	"github.com/coreos/ignition/config/types"
      +	"github.com/coreos/ignition/internal/log"
      +	"github.com/coreos/ignition/internal/providers"
      +	putil "github.com/coreos/ignition/internal/providers/util"
      +	"github.com/coreos/ignition/internal/util"
      +)
      +
      +const (
      +	initialBackoff = 100 * time.Millisecond
      +	maxBackoff     = 30 * time.Second
      +	userdataUrl    = "http://metadata.google.internal/computeMetadata/v1/instance/attributes/user-data"
      +)
      +
      +var (
      +	metadataHeader = http.Header{"Metadata-Flavor": []string{"Google"}}
      +)
      +
      +type Creator struct{}
      +
      +func (Creator) Create(logger *log.Logger) providers.Provider {
      +	return &provider{
      +		logger:  logger,
      +		backoff: initialBackoff,
      +		client:  util.NewHttpClient(logger),
      +	}
      +}
      +
      +type provider struct {
      +	logger    *log.Logger
      +	backoff   time.Duration
      +	client    util.HttpClient
      +	rawConfig []byte
      +}
      +
      +func (p provider) FetchConfig() (types.Config, error) {
      +	return config.Parse(p.rawConfig)
      +}
      +
      +func (p *provider) IsOnline() bool {
      +	p.rawConfig = p.client.FetchConfigWithHeader(userdataUrl, metadataHeader, http.StatusOK, http.StatusNotFound)
      +	return (p.rawConfig != nil)
      +}
      +
      +func (p provider) ShouldRetry() bool {
      +	return true
      +}
      +
      +func (p *provider) BackoffDuration() time.Duration {
      +	return putil.ExpBackoff(&p.backoff, maxBackoff)
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/providers/noop/noop.go b/vendor/github.com/coreos/ignition/internal/providers/noop/noop.go
      new file mode 100644
      index 00000000..8aa94343
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/providers/noop/noop.go
      @@ -0,0 +1,55 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// The noop provider does nothing, for use by unimplemented oems.
      +
      +package noop
      +
      +import (
      +	"time"
      +
      +	"github.com/coreos/ignition/config"
      +	"github.com/coreos/ignition/config/types"
      +	"github.com/coreos/ignition/internal/log"
      +	"github.com/coreos/ignition/internal/providers"
      +)
      +
      +type Creator struct{}
      +
      +func (Creator) Create(logger *log.Logger) providers.Provider {
      +	return &provider{
      +		logger: logger,
      +	}
      +}
      +
      +type provider struct {
      +	logger *log.Logger
      +}
      +
      +func (p provider) FetchConfig() (types.Config, error) {
      +	p.logger.Debug("noop provider fetching empty config")
      +	return types.Config{}, config.ErrEmpty
      +}
      +
      +func (p *provider) IsOnline() bool {
      +	return true
      +}
      +
      +func (p provider) ShouldRetry() bool {
      +	return false
      +}
      +
      +func (p *provider) BackoffDuration() time.Duration {
      +	return 0
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/providers/providers.go b/vendor/github.com/coreos/ignition/internal/providers/providers.go
      new file mode 100644
      index 00000000..05d033e3
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/providers/providers.go
      @@ -0,0 +1,43 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package providers
      +
      +import (
      +	"errors"
      +	"time"
      +
      +	"github.com/coreos/ignition/config/types"
      +	"github.com/coreos/ignition/internal/log"
      +)
      +
      +var (
      +	ErrNoProvider = errors.New("config provider was not online")
      +	ErrTimeout    = errors.New("timed out while waiting for config provider to come online")
      +)
      +
      +// Provider represents an external source of configuration. The source can be
      +// local to the host system or it may be remote. The provider dictates whether
      +// or not the source is online, if the caller should try again when the source
      +// is offline, and how long the caller should wait before retries.
      +type Provider interface {
      +	FetchConfig() (types.Config, error)
      +	IsOnline() bool
      +	ShouldRetry() bool
      +	BackoffDuration() time.Duration
      +}
      +
      +type ProviderCreator interface {
      +	Create(logger *log.Logger) Provider
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/providers/util/backoff.go b/vendor/github.com/coreos/ignition/internal/providers/util/backoff.go
      new file mode 100644
      index 00000000..589d91e4
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/providers/util/backoff.go
      @@ -0,0 +1,30 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package util
      +
      +import (
      +	"time"
      +)
      +
      +// ExpBackoff calculates an exponential (power 2) backoff given the last
      +// backoff duration and the maximum backoff duration.
      +func ExpBackoff(backoff *time.Duration, maxBackoff time.Duration) time.Duration {
      +	if *backoff < maxBackoff {
      +		*backoff = *backoff * 2
      +	} else {
      +		*backoff = maxBackoff
      +	}
      +	return *backoff
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/providers/util/wait.go b/vendor/github.com/coreos/ignition/internal/providers/util/wait.go
      new file mode 100644
      index 00000000..3c785bbf
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/providers/util/wait.go
      @@ -0,0 +1,67 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package util
      +
      +import (
      +	"time"
      +
      +	"github.com/coreos/ignition/internal/providers"
      +)
      +
      +// WaitUntilOnline waits for the provider to come online. If the provider will
      +// never be online, or if the timeout elapses before it is online, this returns
      +// an appropriate error.
      +func WaitUntilOnline(provider providers.Provider, timeout time.Duration) error {
      +	online := make(chan bool, 1)
      +	stop := make(chan struct{})
      +	defer close(stop)
      +
      +	go func() {
      +		for {
      +			if provider.IsOnline() {
      +				online <- true
      +				return
      +			} else if !provider.ShouldRetry() {
      +				online <- false
      +				return
      +			}
      +
      +			select {
      +			case <-time.After(provider.BackoffDuration()):
      +			case <-stop:
      +				return
      +			}
      +		}
      +	}()
      +
      +	expired := make(chan struct{})
      +	if timeout > 0 {
      +		go func() {
      +			<-time.After(timeout)
      +			close(expired)
      +		}()
      +	}
      +
      +	select {
      +	case on := <-online:
      +		if !on {
      +			return providers.ErrNoProvider
      +		}
      +	case <-expired:
      +		return providers.ErrTimeout
      +	}
      +
      +	return nil
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/providers/util/wait_test.go b/vendor/github.com/coreos/ignition/internal/providers/util/wait_test.go
      new file mode 100644
      index 00000000..2f01957b
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/providers/util/wait_test.go
      @@ -0,0 +1,75 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package util
      +
      +import (
      +	"testing"
      +	"time"
      +
      +	"github.com/coreos/ignition/config/types"
      +	"github.com/coreos/ignition/internal/providers"
      +)
      +
      +type mockProvider struct {
      +	config  types.Config
      +	err     error
      +	online  bool
      +	retry   bool
      +	backoff time.Duration
      +}
      +
      +func (p mockProvider) FetchConfig() (types.Config, error) { return p.config, p.err }
      +func (p mockProvider) IsOnline() bool                     { return p.online }
      +func (p mockProvider) ShouldRetry() bool                  { return p.retry }
      +func (p mockProvider) BackoffDuration() time.Duration     { return p.backoff }
      +
      +func TestWaitUntilOnline(t *testing.T) {
      +	type in struct {
      +		provider mockProvider
      +		timeout  time.Duration
      +	}
      +	type out struct {
      +		err error
      +	}
      +
      +	online := mockProvider{online: true}
      +	offline := mockProvider{online: false}
      +	offlineRetry := mockProvider{online: false, retry: true}
      +
      +	tests := []struct {
      +		in  in
      +		out out
      +	}{
      +		{
      +			in:  in{provider: online, timeout: time.Second},
      +			out: out{err: nil},
      +		},
      +		{
      +			in:  in{provider: offline, timeout: time.Second},
      +			out: out{err: providers.ErrNoProvider},
      +		},
      +		{
      +			in:  in{provider: offlineRetry, timeout: time.Second},
      +			out: out{err: providers.ErrTimeout},
      +		},
      +	}
      +
      +	for i, test := range tests {
      +		err := WaitUntilOnline(test.in.provider, test.in.timeout)
      +		if test.out.err != err {
      +			t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/providers/vmware/vmware.go b/vendor/github.com/coreos/ignition/internal/providers/vmware/vmware.go
      new file mode 100644
      index 00000000..3390ee35
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/providers/vmware/vmware.go
      @@ -0,0 +1,93 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// The vmware provider fetches a configuration from the VMware Guest Info
      +// interface.
      +
      +package vmware
      +
      +import (
      +	"compress/gzip"
      +	"encoding/base64"
      +	"fmt"
      +	"io/ioutil"
      +	"strings"
      +	"time"
      +
      +	"github.com/coreos/ignition/internal/log"
      +	"github.com/coreos/ignition/internal/providers"
      +)
      +
      +type Creator struct{}
      +
      +func (Creator) Create(logger *log.Logger) providers.Provider {
      +	return &provider{
      +		logger: logger,
      +	}
      +}
      +
      +type provider struct {
      +	logger *log.Logger
      +}
      +
      +func (p provider) ShouldRetry() bool {
      +	return false
      +}
      +
      +func (p *provider) BackoffDuration() time.Duration {
      +	return 0
      +}
      +
      +func decodeData(data string, encoding string) ([]byte, error) {
      +	switch encoding {
      +	case "":
      +		return []byte(data), nil
      +
      +	case "b64", "base64":
      +		return decodeBase64Data(data)
      +
      +	case "gz", "gzip":
      +		return decodeGzipData(data)
      +
      +	case "gz+base64", "gzip+base64", "gz+b64", "gzip+b64":
      +		gz, err := decodeBase64Data(data)
      +
      +		if err != nil {
      +			return nil, err
      +		}
      +
      +		return decodeGzipData(string(gz))
      +	}
      +
      +	return nil, fmt.Errorf("Unsupported encoding %q", encoding)
      +}
      +
      +func decodeBase64Data(data string) ([]byte, error) {
      +	decodedData, err := base64.StdEncoding.DecodeString(data)
      +	if err != nil {
      +		return nil, fmt.Errorf("Unable to decode base64: %q", err)
      +	}
      +
      +	return decodedData, nil
      +}
      +
      +func decodeGzipData(data string) ([]byte, error) {
      +	reader, err := gzip.NewReader(strings.NewReader(data))
      +	if err != nil {
      +		return nil, err
      +	}
      +	defer reader.Close()
      +
      +	return ioutil.ReadAll(reader)
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/providers/vmware/vmware_amd64.go b/vendor/github.com/coreos/ignition/internal/providers/vmware/vmware_amd64.go
      new file mode 100644
      index 00000000..68a6fc3d
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/providers/vmware/vmware_amd64.go
      @@ -0,0 +1,54 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// The vmware provider fetches a configuration from the VMware Guest Info
      +// interface.
      +
      +package vmware
      +
      +import (
      +	"github.com/coreos/ignition/config"
      +	"github.com/coreos/ignition/config/types"
      +
      +	"github.com/sigma/vmw-guestinfo/rpcvmx"
      +	"github.com/sigma/vmw-guestinfo/vmcheck"
      +)
      +
      +func (p provider) FetchConfig() (types.Config, error) {
      +	info := rpcvmx.NewConfig()
      +	data, err := info.String("coreos.config.data", "")
      +	if err != nil {
      +		p.logger.Debug("failed to fetch config: %v", err)
      +		return types.Config{}, err
      +	}
      +
      +	encoding, err := info.String("coreos.config.data.encoding", "")
      +	if err != nil {
      +		p.logger.Debug("failed to fetch config encoding: %v", err)
      +		return types.Config{}, err
      +	}
      +
      +	decodedData, err := decodeData(data, encoding)
      +	if err != nil {
      +		p.logger.Debug("failed to decode config: %v", err)
      +		return types.Config{}, err
      +	}
      +
      +	p.logger.Debug("config successfully fetched")
      +	return config.Parse(decodedData)
      +}
      +
      +func (p *provider) IsOnline() bool {
      +	return vmcheck.IsVirtualWorld()
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/providers/vmware/vmware_unsupported.go b/vendor/github.com/coreos/ignition/internal/providers/vmware/vmware_unsupported.go
      new file mode 100644
      index 00000000..24c3de13
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/providers/vmware/vmware_unsupported.go
      @@ -0,0 +1,34 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// The vmware provider fetches a configuration from the VMware Guest Info
      +// interface.
      +
      +// +build !amd64
      +
      +package vmware
      +
      +import (
      +	"errors"
      +
      +	"github.com/coreos/ignition/config/types"
      +)
      +
      +func (p provider) FetchConfig() (types.Config, error) {
      +	return types.Config{}, errors.New("vmware provider is not supported on this architecture")
      +}
      +
      +func (p *provider) IsOnline() bool {
      +	return false
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/registry/registry.go b/vendor/github.com/coreos/ignition/internal/registry/registry.go
      new file mode 100644
      index 00000000..b8bb5b95
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/registry/registry.go
      @@ -0,0 +1,58 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package registry
      +
      +import (
      +	"fmt"
      +	"sort"
      +)
      +
      +// Registrant interface implementors may be registered in a Registry
      +type Registrant interface {
      +	Name() string
      +}
      +
      +type Registry struct {
      +	name        string
      +	registrants map[string]Registrant
      +}
      +
      +// Create creates a new registry
      +func Create(name string) *Registry {
      +	return &Registry{name: name, registrants: map[string]Registrant{}}
      +}
      +
      +// Register registers a new registrant to a registry
      +func (r *Registry) Register(registrant Registrant) {
      +	if _, ok := r.registrants[registrant.Name()]; ok {
      +		panic(fmt.Sprintf("%s: registrant %q already registered", r.name, registrant.Name()))
      +	}
      +	r.registrants[registrant.Name()] = registrant
      +}
      +
      +// Get gets a named registrant from a registry
      +func (r *Registry) Get(name string) interface{} {
      +	return r.registrants[name]
      +}
      +
      +// Names returns the sorted registrant names
      +func (r *Registry) Names() []string {
      +	keys := []string{}
      +	for key := range r.registrants {
      +		keys = append(keys, key)
      +	}
      +	sort.Strings(keys)
      +	return keys
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/registry/registry_test.go b/vendor/github.com/coreos/ignition/internal/registry/registry_test.go
      new file mode 100644
      index 00000000..97e2b4d7
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/registry/registry_test.go
      @@ -0,0 +1,154 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package registry
      +
      +import (
      +	"reflect"
      +	"testing"
      +)
      +
      +// Minimally implement the Registrant interface
      +type registrant struct {
      +	name string
      +}
      +
      +func (t registrant) Name() string {
      +	return t.name
      +}
      +
      +func TestCreateRegister(t *testing.T) {
      +	type in struct {
      +		registrants []registrant
      +	}
      +	type out struct {
      +		registrants Registry
      +	}
      +
      +	a := registrant{name: "a"}
      +	b := registrant{name: "b"}
      +	c := registrant{name: "c"}
      +
      +	tests := []struct {
      +		name string
      +		in   in
      +		out  out
      +	}{
      +		{
      +			name: "empty",
      +			in:   in{registrants: []registrant{}},
      +			out:  out{registrants: Registry{name: "empty", registrants: map[string]Registrant{}}},
      +		},
      +		{
      +			name: "three abc ...",
      +			in:   in{registrants: []registrant{a, b, c}},
      +			out:  out{registrants: Registry{name: "three abc ...", registrants: map[string]Registrant{"a": a, "b": b, "c": c}}},
      +		},
      +	}
      +
      +	for i, test := range tests {
      +		tr := Create(test.name)
      +		for _, r := range test.in.registrants {
      +			tr.Register(r)
      +		}
      +		if !reflect.DeepEqual(&test.out.registrants, tr) {
      +			t.Errorf("#%d: bad registrants: want %#v, got %#v", i, &test.out.registrants, tr)
      +		}
      +	}
      +}
      +
      +func TestGet(t *testing.T) {
      +	type in struct {
      +		registrants []registrant
      +		name        string
      +	}
      +	type out struct {
      +		creator *registrant
      +	}
      +
      +	a := registrant{name: "a"}
      +	b := registrant{name: "b"}
      +	c := registrant{name: "c"}
      +
      +	tests := []struct {
      +		in  in
      +		out out
      +	}{
      +		{
      +			in:  in{registrants: nil, name: "a"},
      +			out: out{creator: nil},
      +		},
      +		{
      +			in:  in{registrants: []registrant{a, b, c}, name: "a"},
      +			out: out{creator: &a},
      +		},
      +		{
      +			in:  in{registrants: []registrant{a, b, c}, name: "c"},
      +			out: out{creator: &c},
      +		},
      +	}
      +
      +	for i, test := range tests {
      +		tr := Create("test")
      +		for _, r := range test.in.registrants {
      +			tr.Register(r)
      +		}
      +		r := tr.Get(test.in.name)
      +		if r == nil {
      +			if test.out.creator != nil {
      +				t.Errorf("#%d: got nil expected %#v", i, r)
      +			}
      +		} else if !reflect.DeepEqual(*test.out.creator, r.(registrant)) {
      +			t.Errorf("#%d: bad registrant: want %#v, got %#v", i, *test.out.creator, r.(registrant))
      +		}
      +	}
      +}
      +
      +func TestNames(t *testing.T) {
      +	type in struct {
      +		registrants []registrant
      +	}
      +	type out struct {
      +		names []string
      +	}
      +
      +	a := registrant{name: "a"}
      +	b := registrant{name: "b"}
      +	c := registrant{name: "c"}
      +
      +	tests := []struct {
      +		in  in
      +		out out
      +	}{
      +		{
      +			in:  in{registrants: nil},
      +			out: out{names: []string{}},
      +		},
      +		{
      +			in:  in{registrants: []registrant{a, b, c}},
      +			out: out{names: []string{"a", "b", "c"}},
      +		},
      +	}
      +
      +	for i, test := range tests {
      +		tr := Create("test")
      +		for _, r := range test.in.registrants {
      +			tr.Register(r)
      +		}
      +		names := tr.Names()
      +		if !reflect.DeepEqual(test.out.names, names) {
      +			t.Errorf("#%d: bad names: want %#v, got %#v", i, test.out.names, names)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/sgdisk/sgdisk.go b/vendor/github.com/coreos/ignition/internal/sgdisk/sgdisk.go
      new file mode 100644
      index 00000000..0e215e62
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/sgdisk/sgdisk.go
      @@ -0,0 +1,87 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package sgdisk
      +
      +import (
      +	"fmt"
      +	"os/exec"
      +
      +	"github.com/coreos/ignition/internal/log"
      +)
      +
      +const sgdiskPath = "/sbin/sgdisk"
      +
      +type Operation struct {
      +	logger *log.Logger
      +	dev    string
      +	wipe   bool
      +	parts  []Partition
      +}
      +
      +type Partition struct {
      +	Number   int
      +	Offset   uint64 // 512-byte sectors
      +	Length   uint64 // 512-byte sectors
      +	Label    string
      +	TypeGUID string
      +}
      +
      +// Begin begins an sgdisk operation
      +func Begin(logger *log.Logger, dev string) *Operation {
      +	return &Operation{logger: logger, dev: dev}
      +}
      +
      +// CreatePartition adds the supplied partition to the list of partitions to be created as part of an operation.
      +func (op *Operation) CreatePartition(p Partition) {
      +	// XXX(vc): no checking is performed here, since we perform checking at yaml/json parsing, Commit() will just fail on badness.
      +	op.parts = append(op.parts, p)
      +}
      +
      +// WipeTable toggles if the table is to be wiped first when commiting this operation.
      +func (op *Operation) WipeTable(wipe bool) {
      +	op.wipe = wipe
      +}
      +
      +// Commit commits an partitioning operation.
      +func (op *Operation) Commit() error {
      +	if op.wipe {
      +		cmd := exec.Command(sgdiskPath, "--zap-all", op.dev)
      +		if err := op.logger.LogCmd(cmd, "wiping table on %q", op.dev); err != nil {
      +			op.logger.Info("potential error encountered while wiping table... retrying")
      +			cmd = exec.Command(sgdiskPath, "--zap-all", op.dev)
      +			if err := op.logger.LogCmd(cmd, "wiping table on %q", op.dev); err != nil {
      +				return fmt.Errorf("wipe failed: %v", err)
      +			}
      +		}
      +	}
      +
      +	if len(op.parts) != 0 {
      +		opts := []string{}
      +		for _, p := range op.parts {
      +			opts = append(opts, fmt.Sprintf("--new=%d:%d:+%d", p.Number, p.Offset, p.Length))
      +			opts = append(opts, fmt.Sprintf("--change-name=%d:%s", p.Number, p.Label))
      +			if p.TypeGUID != "" {
      +				opts = append(opts, fmt.Sprintf("--partition-guid=%d:%s", p.Number, p.TypeGUID))
      +			}
      +		}
      +		opts = append(opts, op.dev)
      +		cmd := exec.Command(sgdiskPath, opts...)
      +		if err := op.logger.LogCmd(cmd, "creating %d partitions on %q", len(op.parts), op.dev); err != nil {
      +			return fmt.Errorf("create partitions failed: %v", err)
      +		}
      +	}
      +
      +	return nil
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/systemd/systemd.go b/vendor/github.com/coreos/ignition/internal/systemd/systemd.go
      new file mode 100644
      index 00000000..39a1926c
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/systemd/systemd.go
      @@ -0,0 +1,50 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package systemd
      +
      +import (
      +	"fmt"
      +
      +	"github.com/coreos/go-systemd/dbus"
      +	"github.com/coreos/go-systemd/unit"
      +)
      +
      +// WaitOnDevices waits for the devices named in devs to be plugged before returning.
      +func WaitOnDevices(devs []string, stage string) error {
      +	conn, err := dbus.NewSystemdConnection()
      +	if err != nil {
      +		return err
      +	}
      +
      +	results := map[string]chan string{}
      +	for _, dev := range devs {
      +		unitName := unit.UnitNamePathEscape(dev + ".device")
      +		results[unitName] = make(chan string)
      +
      +		if _, err = conn.StartUnit(unitName, "replace", results[unitName]); err != nil {
      +			return fmt.Errorf("failed starting device unit %s: %v", unitName, err)
      +		}
      +	}
      +
      +	for unitName, result := range results {
      +		s := <-result
      +
      +		if s != "done" {
      +			return fmt.Errorf("device unit %s %s", unitName, s)
      +		}
      +	}
      +
      +	return nil
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/util/http.go b/vendor/github.com/coreos/ignition/internal/util/http.go
      new file mode 100644
      index 00000000..2252fa91
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/util/http.go
      @@ -0,0 +1,130 @@
      +// Copyright 2016 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package util
      +
      +import (
      +	"fmt"
      +	"io/ioutil"
      +	"net/http"
      +	"time"
      +
      +	"github.com/coreos/ignition/internal/log"
      +	"github.com/coreos/ignition/internal/version"
      +)
      +
      +// HttpClient is a simple wrapper around the Go HTTP client that standardizes
      +// the process and logging of fetching payloads.
      +type HttpClient struct {
      +	client *http.Client
      +	logger *log.Logger
      +}
      +
      +// NewHttpClient creates a new client with the given logger.
      +func NewHttpClient(logger *log.Logger) HttpClient {
      +	return HttpClient{
      +		client: &http.Client{
      +			Timeout: 10 * time.Second,
      +		},
      +		logger: logger,
      +	}
      +}
      +
      +// Get performs an HTTP GET on the provided URL and returns the response body,
      +// HTTP status code, and error (if any).
      +func (c HttpClient) Get(url string) ([]byte, int, error) {
      +	return c.GetWithHeader(url, http.Header{})
      +}
      +
      +// Get performs an HTTP GET on the provided URL with the provided request header
      +// and returns the response body, HTTP status code, and error (if any). By
      +// default, User-Agent and Accept are added to the header but these can be
      +// overridden.
      +func (c HttpClient) GetWithHeader(url string, header http.Header) ([]byte, int, error) {
      +	var body []byte
      +	var status int
      +
      +	err := c.logger.LogOp(func() error {
      +		req, err := http.NewRequest("GET", url, nil)
      +		req.Header.Set("User-Agent", "Ignition/"+version.Raw)
      +		req.Header.Set("Accept", "*")
      +		for key, values := range header {
      +			req.Header.Del(key)
      +			for _, value := range values {
      +				req.Header.Add(key, value)
      +			}
      +		}
      +		resp, err := c.client.Do(req)
      +		if err != nil {
      +			return err
      +		}
      +		defer resp.Body.Close()
      +
      +		status = resp.StatusCode
      +		c.logger.Debug("GET result: %s", http.StatusText(status))
      +		body, err = ioutil.ReadAll(resp.Body)
      +
      +		return err
      +	}, "GET %q", url)
      +
      +	return body, status, err
      +}
      +
      +// FetchConfig calls FetchConfigWithHeader with an empty set of headers.
      +func (c HttpClient) FetchConfig(url string, acceptedStatuses ...int) []byte {
      +	return c.FetchConfigWithHeader(url, http.Header{}, acceptedStatuses...)
      +}
      +
      +// FetchConfigWithHeader fetches a raw config from the provided URL and returns
      +// the response body on success or nil on failure. The caller must also provide
      +// a list of acceptable HTTP status codes and headers. If the response's status
      +// code is not in the provided list, it is considered a failure. The HTTP
      +// response must be OK, otherwise an empty (v.s. nil) config is returned. The
      +// provided headers are merged with a set of default headers.
      +func (c HttpClient) FetchConfigWithHeader(url string, header http.Header, acceptedStatuses ...int) []byte {
      +	var config []byte
      +
      +	c.logger.LogOp(func() error {
      +		reqHeader := http.Header{
      +			"Accept-Encoding": []string{"identity"},
      +			"Accept":          []string{"application/vnd.coreos.ignition+json; version=2.0.0, application/vnd.coreos.ignition+json; version=1; q=0.5, */*; q=0.1"},
      +		}
      +		for key, values := range header {
      +			reqHeader.Del(key)
      +			for _, value := range values {
      +				reqHeader.Add(key, value)
      +			}
      +		}
      +
      +		data, status, err := c.GetWithHeader(url, reqHeader)
      +		if err != nil {
      +			return err
      +		}
      +
      +		for _, acceptedStatus := range acceptedStatuses {
      +			if status == acceptedStatus {
      +				if status == http.StatusOK {
      +					config = data
      +				} else {
      +					config = []byte{}
      +				}
      +				return nil
      +			}
      +		}
      +
      +		return fmt.Errorf("%s", http.StatusText(status))
      +	}, "fetching config from %q", url)
      +
      +	return config
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/util/tools/prerelease_check.go b/vendor/github.com/coreos/ignition/internal/util/tools/prerelease_check.go
      new file mode 100644
      index 00000000..3b5b9b27
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/util/tools/prerelease_check.go
      @@ -0,0 +1,33 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package main
      +
      +import (
      +	"fmt"
      +	"os"
      +
      +	"github.com/coreos/ignition/config/types"
      +)
      +
      +func main() {
      +	if types.MaxVersion.PreRelease != "" {
      +		fmt.Fprintf(os.Stderr, "config version still has pre-release (%s)\n", types.MaxVersion.PreRelease)
      +		os.Exit(1)
      +	}
      +	if types.MaxVersion.Metadata != "" {
      +		fmt.Fprintf(os.Stderr, "config version still has metadata (%s)\n", types.MaxVersion.Metadata)
      +		os.Exit(1)
      +	}
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/util/verification.go b/vendor/github.com/coreos/ignition/internal/util/verification.go
      new file mode 100644
      index 00000000..5a0f9726
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/util/verification.go
      @@ -0,0 +1,57 @@
      +// Copyright 2015 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package util
      +
      +import (
      +	"crypto/sha512"
      +	"encoding/hex"
      +	"fmt"
      +
      +	"github.com/coreos/ignition/config/types"
      +)
      +
      +type ErrHashMismatch struct {
      +	calculated string
      +	expected   string
      +}
      +
      +func (e ErrHashMismatch) Error() string {
      +	return fmt.Sprintf("hash verification failed (calculated %s but expected %s)",
      +		e.calculated, e.expected)
      +}
      +
      +func AssertValid(verify types.Verification, data []byte) error {
      +	if hash := verify.Hash; hash != nil {
      +		var sum []byte
      +		switch hash.Function {
      +		case "sha512":
      +			rawSum := sha512.Sum512(data)
      +			sum = rawSum[:]
      +		default:
      +			return types.ErrHashUnrecognized
      +		}
      +
      +		encodedSum := make([]byte, hex.EncodedLen(len(sum)))
      +		hex.Encode(encodedSum, sum)
      +		if string(encodedSum) != hash.Sum {
      +			return ErrHashMismatch{
      +				calculated: string(encodedSum),
      +				expected:   hash.Sum,
      +			}
      +		}
      +	}
      +
      +	return nil
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/util/verification_test.go b/vendor/github.com/coreos/ignition/internal/util/verification_test.go
      new file mode 100644
      index 00000000..9232bcee
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/util/verification_test.go
      @@ -0,0 +1,84 @@
      +// Copyright 2016 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package util
      +
      +import (
      +	"reflect"
      +	"testing"
      +
      +	"github.com/coreos/ignition/config/types"
      +)
      +
      +func TestAssertValid(t *testing.T) {
      +	type in struct {
      +		verification types.Verification
      +		data         []byte
      +	}
      +	type out struct {
      +		err error
      +	}
      +
      +	tests := []struct {
      +		in  in
      +		out out
      +	}{
      +		{
      +			in:  in{data: []byte("hello")},
      +			out: out{},
      +		},
      +		{
      +			in: in{
      +				verification: types.Verification{
      +					Hash: &types.Hash{
      +						Function: "sha512",
      +						Sum:      "9b71d224bd62f3785d96d46ad3ea3d73319bfbc2890caadae2dff72519673ca72323c3d99ba5c11d7c7acc6e14b8c5da0c4663475c2e5c3adef46f73bcdec043",
      +					},
      +				},
      +				data: []byte("hello"),
      +			},
      +			out: out{},
      +		},
      +		{
      +			in: in{
      +				verification: types.Verification{
      +					Hash: &types.Hash{Function: "xor"},
      +				},
      +			},
      +			out: out{err: types.ErrHashUnrecognized},
      +		},
      +		{
      +			in: in{
      +				verification: types.Verification{
      +					Hash: &types.Hash{
      +						Function: "sha512",
      +						Sum:      "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
      +					},
      +				},
      +				data: []byte("hello"),
      +			},
      +			out: out{err: ErrHashMismatch{
      +				calculated: "9b71d224bd62f3785d96d46ad3ea3d73319bfbc2890caadae2dff72519673ca72323c3d99ba5c11d7c7acc6e14b8c5da0c4663475c2e5c3adef46f73bcdec043",
      +				expected:   "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
      +			}},
      +		},
      +	}
      +
      +	for i, test := range tests {
      +		err := AssertValid(test.in.verification, test.in.data)
      +		if !reflect.DeepEqual(test.out.err, err) {
      +			t.Errorf("#%d: bad err: want %+v, got %+v", i, test.out.err, err)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/ignition/internal/vendor.manifest b/vendor/github.com/coreos/ignition/internal/vendor.manifest
      new file mode 100644
      index 00000000..0bea590f
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/vendor.manifest
      @@ -0,0 +1,9 @@
      +# If you manipulate the contents of third_party/, amend this accordingly.
      +# pkg							version
      +github.com/coreos/go-systemd/dbus			9b2f329b69ae0292a30f426867494f38af617a42
      +github.com/coreos/go-systemd/unit			9b2f329b69ae0292a30f426867494f38af617a42
      +github.com/coreos/update-ssh-keys/authorized_keys_d	539317dc5100bc599dab125b1e644a2b017b2b35
      +github.com/godbus/dbus					41608027bdce7bfa8959d653a00b954591220e67
      +github.com/sigma/vmw-guestinfo				95dd4126d6e8b4ef1970b3f3fe2e8cdd470d2903
      +github.com/sigma/bdoor					babf2a4017b020d4ce04e8167076186e82645dd1
      +github.com/vincent-petithory/dataurl			9a301d65acbb728fcc3ace14f45f511a4cfeea9c
      diff --git a/vendor/github.com/coreos/ignition/internal/version/version.go b/vendor/github.com/coreos/ignition/internal/version/version.go
      new file mode 100644
      index 00000000..a0e234be
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/internal/version/version.go
      @@ -0,0 +1,24 @@
      +// Copyright 2016 CoreOS, Inc.
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package version
      +
      +import (
      +	"fmt"
      +)
      +
      +var (
      +	Raw    = "was not built properly"
      +	String = fmt.Sprintf("Ignition %s", Raw)
      +)
      diff --git a/vendor/github.com/coreos/ignition/tag_release.sh b/vendor/github.com/coreos/ignition/tag_release.sh
      new file mode 100755
      index 00000000..da3f9e3a
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/tag_release.sh
      @@ -0,0 +1,29 @@
      +#!/bin/bash -e
      +
      +[ $# == 2 ] || { echo "usage: $0 version commit" && exit 1; }
      +
      +VER=$1
      +COMMIT=$2
      +
      +[[ "${VER}" =~ ^v[[:digit:]]+\.[[:digit:]]+\.[[:digit:]]+$ ]] || {
      +	echo "malformed version: \"${VER}\""
      +	exit 2
      +}
      +
      +[[ "${COMMIT}" =~ ^[[:xdigit:]]+$ ]] || {
      +	echo "malformed commit id: \"${COMMIT}\""
      +	exit 3
      +}
      +
      +source ./build
      +go run internal/util/tools/prerelease_check.go
      +
      +# TODO(vc): generate NEWS as part of the release process.
      +# @marineam suggested using git notes to associate NEWS-destined payloads
      +# with objects, we just need to define a syntax and employ them.
      +# I would like to be able to write the NEWS annotation as part of the commit message,
      +# while still having it go into a note.
      +
      +git tag --sign --message "Ignition ${VER}" "${VER}" "${COMMIT}"
      +
      +git verify-tag --verbose "${VER}"
      diff --git a/vendor/github.com/coreos/ignition/test b/vendor/github.com/coreos/ignition/test
      new file mode 100755
      index 00000000..a6818612
      --- /dev/null
      +++ b/vendor/github.com/coreos/ignition/test
      @@ -0,0 +1,34 @@
      +#!/bin/bash -eu
      +
      +source ./build
      +
      +SRC=$(find . -name '*.go' \
      +	-not -path "./internal/vendor/*" \
      +	-not -path "./config/vendor/*" \
      +	-not -path "./config/v1/vendor/*")
      +
      +PKG=$(cd gopath/src/${REPO_PATH}; go list ./... | \
      +	grep --invert-match vendor)
      +
      +# https://github.com/golang/go/issues/15067
      +PKG_VET=$(cd gopath/src/${REPO_PATH}; go list ./... | \
      +	grep --invert-match vendor | \
      +	grep --invert-match internal/log)
      +
      +echo "Checking gofix..."
      +go tool fix -diff $SRC
      +
      +echo "Checking gofmt..."
      +res=$(gofmt -d -e -s $SRC)
      +echo "${res}"
      +if [ -n "${res}" ]; then
      +	exit 1
      +fi
      +
      +echo "Checking govet..."
      +go vet $PKG_VET
      +
      +echo "Running tests..."
      +go test -timeout 60s -cover $@ ${PKG} --race
      +
      +echo "Success"
      diff --git a/vendor/github.com/coreos/pkg/.gitignore b/vendor/github.com/coreos/pkg/.gitignore
      new file mode 100644
      index 00000000..00ae1054
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/.gitignore
      @@ -0,0 +1,27 @@
      +# Compiled Object files, Static and Dynamic libs (Shared Objects)
      +*.o
      +*.a
      +*.so
      +
      +# Folders
      +_obj
      +_test
      +
      +# Architecture specific extensions/prefixes
      +*.[568vq]
      +[568vq].out
      +
      +*.cgo1.go
      +*.cgo2.c
      +_cgo_defun.c
      +_cgo_gotypes.go
      +_cgo_export.*
      +
      +_testmain.go
      +
      +*.exe
      +*.test
      +*.prof
      +
      +bin/
      +coverage/
      diff --git a/vendor/github.com/coreos/pkg/CONTRIBUTING.md b/vendor/github.com/coreos/pkg/CONTRIBUTING.md
      new file mode 100644
      index 00000000..6662073a
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/CONTRIBUTING.md
      @@ -0,0 +1,71 @@
      +# How to Contribute
      +
      +CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via
      +GitHub pull requests.  This document outlines some of the conventions on
      +development workflow, commit message formatting, contact points and other
      +resources to make it easier to get your contribution accepted.
      +
      +# Certificate of Origin
      +
      +By contributing to this project you agree to the Developer Certificate of
      +Origin (DCO). This document was created by the Linux Kernel community and is a
      +simple statement that you, as a contributor, have the legal right to make the
      +contribution. See the [DCO](DCO) file for details.
      +
      +# Email and Chat
      +
      +The project currently uses the general CoreOS email list and IRC channel:
      +- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev)
      +- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org
      +
      +Please avoid emailing maintainers found in the MAINTAINERS file directly. They
      +are very busy and read the mailing lists.
      +
      +## Getting Started
      +
      +- Fork the repository on GitHub
      +- Read the [README](README.md) for build and test instructions
      +- Play with the project, submit bugs, submit patches!
      +
      +## Contribution Flow
      +
      +This is a rough outline of what a contributor's workflow looks like:
      +
      +- Create a topic branch from where you want to base your work (usually master).
      +- Make commits of logical units.
      +- Make sure your commit messages are in the proper format (see below).
      +- Push your changes to a topic branch in your fork of the repository.
      +- Make sure the tests pass, and add any new tests as appropriate.
      +- Submit a pull request to the original repository.
      +
      +Thanks for your contributions!
      +
      +### Format of the Commit Message
      +
      +We follow a rough convention for commit messages that is designed to answer two
      +questions: what changed and why. The subject line should feature the what and
      +the body of the commit should describe the why.
      +
      +```
      +scripts: add the test-cluster command
      +
      +this uses tmux to setup a test cluster that you can easily kill and
      +start for debugging.
      +
      +Fixes #38
      +```
      +
      +The format can be described more formally as follows:
      +
      +```
      +<subsystem>: <what changed>
      +<BLANK LINE>
      +<why this change was made>
      +<BLANK LINE>
      +<footer>
      +```
      +
      +The first line is the subject and should be no longer than 70 characters, the
      +second line is always blank, and other lines should be wrapped at 80 characters.
      +This allows the message to be easier to read on GitHub as well as in various
      +git tools.
      diff --git a/vendor/github.com/coreos/pkg/DCO b/vendor/github.com/coreos/pkg/DCO
      new file mode 100644
      index 00000000..716561d5
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/DCO
      @@ -0,0 +1,36 @@
      +Developer Certificate of Origin
      +Version 1.1
      +
      +Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
      +660 York Street, Suite 102,
      +San Francisco, CA 94110 USA
      +
      +Everyone is permitted to copy and distribute verbatim copies of this
      +license document, but changing it is not allowed.
      +
      +
      +Developer's Certificate of Origin 1.1
      +
      +By making a contribution to this project, I certify that:
      +
      +(a) The contribution was created in whole or in part by me and I
      +    have the right to submit it under the open source license
      +    indicated in the file; or
      +
      +(b) The contribution is based upon previous work that, to the best
      +    of my knowledge, is covered under an appropriate open source
      +    license and I have the right under that license to submit that
      +    work with modifications, whether created in whole or in part
      +    by me, under the same open source license (unless I am
      +    permitted to submit under a different license), as indicated
      +    in the file; or
      +
      +(c) The contribution was provided directly to me by some other
      +    person who certified (a), (b) or (c) and I have not modified
      +    it.
      +
      +(d) I understand and agree that this project and the contribution
      +    are public and that a record of the contribution (including all
      +    personal information I submit with it, including my sign-off) is
      +    maintained indefinitely and may be redistributed consistent with
      +    this project or the open source license(s) involved.
      diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE
      new file mode 100644
      index 00000000..e06d2081
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/LICENSE
      @@ -0,0 +1,202 @@
      +Apache License
      +                           Version 2.0, January 2004
      +                        http://www.apache.org/licenses/
      +
      +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
      +
      +   1. Definitions.
      +
      +      "License" shall mean the terms and conditions for use, reproduction,
      +      and distribution as defined by Sections 1 through 9 of this document.
      +
      +      "Licensor" shall mean the copyright owner or entity authorized by
      +      the copyright owner that is granting the License.
      +
      +      "Legal Entity" shall mean the union of the acting entity and all
      +      other entities that control, are controlled by, or are under common
      +      control with that entity. For the purposes of this definition,
      +      "control" means (i) the power, direct or indirect, to cause the
      +      direction or management of such entity, whether by contract or
      +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      +      outstanding shares, or (iii) beneficial ownership of such entity.
      +
      +      "You" (or "Your") shall mean an individual or Legal Entity
      +      exercising permissions granted by this License.
      +
      +      "Source" form shall mean the preferred form for making modifications,
      +      including but not limited to software source code, documentation
      +      source, and configuration files.
      +
      +      "Object" form shall mean any form resulting from mechanical
      +      transformation or translation of a Source form, including but
      +      not limited to compiled object code, generated documentation,
      +      and conversions to other media types.
      +
      +      "Work" shall mean the work of authorship, whether in Source or
      +      Object form, made available under the License, as indicated by a
      +      copyright notice that is included in or attached to the work
      +      (an example is provided in the Appendix below).
      +
      +      "Derivative Works" shall mean any work, whether in Source or Object
      +      form, that is based on (or derived from) the Work and for which the
      +      editorial revisions, annotations, elaborations, or other modifications
      +      represent, as a whole, an original work of authorship. For the purposes
      +      of this License, Derivative Works shall not include works that remain
      +      separable from, or merely link (or bind by name) to the interfaces of,
      +      the Work and Derivative Works thereof.
      +
      +      "Contribution" shall mean any work of authorship, including
      +      the original version of the Work and any modifications or additions
      +      to that Work or Derivative Works thereof, that is intentionally
      +      submitted to Licensor for inclusion in the Work by the copyright owner
      +      or by an individual or Legal Entity authorized to submit on behalf of
      +      the copyright owner. For the purposes of this definition, "submitted"
      +      means any form of electronic, verbal, or written communication sent
      +      to the Licensor or its representatives, including but not limited to
      +      communication on electronic mailing lists, source code control systems,
      +      and issue tracking systems that are managed by, or on behalf of, the
      +      Licensor for the purpose of discussing and improving the Work, but
      +      excluding communication that is conspicuously marked or otherwise
      +      designated in writing by the copyright owner as "Not a Contribution."
      +
      +      "Contributor" shall mean Licensor and any individual or Legal Entity
      +      on behalf of whom a Contribution has been received by Licensor and
      +      subsequently incorporated within the Work.
      +
      +   2. Grant of Copyright License. Subject to the terms and conditions of
      +      this License, each Contributor hereby grants to You a perpetual,
      +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      +      copyright license to reproduce, prepare Derivative Works of,
      +      publicly display, publicly perform, sublicense, and distribute the
      +      Work and such Derivative Works in Source or Object form.
      +
      +   3. Grant of Patent License. Subject to the terms and conditions of
      +      this License, each Contributor hereby grants to You a perpetual,
      +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      +      (except as stated in this section) patent license to make, have made,
      +      use, offer to sell, sell, import, and otherwise transfer the Work,
      +      where such license applies only to those patent claims licensable
      +      by such Contributor that are necessarily infringed by their
      +      Contribution(s) alone or by combination of their Contribution(s)
      +      with the Work to which such Contribution(s) was submitted. If You
      +      institute patent litigation against any entity (including a
      +      cross-claim or counterclaim in a lawsuit) alleging that the Work
      +      or a Contribution incorporated within the Work constitutes direct
      +      or contributory patent infringement, then any patent licenses
      +      granted to You under this License for that Work shall terminate
      +      as of the date such litigation is filed.
      +
      +   4. Redistribution. You may reproduce and distribute copies of the
      +      Work or Derivative Works thereof in any medium, with or without
      +      modifications, and in Source or Object form, provided that You
      +      meet the following conditions:
      +
      +      (a) You must give any other recipients of the Work or
      +          Derivative Works a copy of this License; and
      +
      +      (b) You must cause any modified files to carry prominent notices
      +          stating that You changed the files; and
      +
      +      (c) You must retain, in the Source form of any Derivative Works
      +          that You distribute, all copyright, patent, trademark, and
      +          attribution notices from the Source form of the Work,
      +          excluding those notices that do not pertain to any part of
      +          the Derivative Works; and
      +
      +      (d) If the Work includes a "NOTICE" text file as part of its
      +          distribution, then any Derivative Works that You distribute must
      +          include a readable copy of the attribution notices contained
      +          within such NOTICE file, excluding those notices that do not
      +          pertain to any part of the Derivative Works, in at least one
      +          of the following places: within a NOTICE text file distributed
      +          as part of the Derivative Works; within the Source form or
      +          documentation, if provided along with the Derivative Works; or,
      +          within a display generated by the Derivative Works, if and
      +          wherever such third-party notices normally appear. The contents
      +          of the NOTICE file are for informational purposes only and
      +          do not modify the License. You may add Your own attribution
      +          notices within Derivative Works that You distribute, alongside
      +          or as an addendum to the NOTICE text from the Work, provided
      +          that such additional attribution notices cannot be construed
      +          as modifying the License.
      +
      +      You may add Your own copyright statement to Your modifications and
      +      may provide additional or different license terms and conditions
      +      for use, reproduction, or distribution of Your modifications, or
      +      for any such Derivative Works as a whole, provided Your use,
      +      reproduction, and distribution of the Work otherwise complies with
      +      the conditions stated in this License.
      +
      +   5. Submission of Contributions. Unless You explicitly state otherwise,
      +      any Contribution intentionally submitted for inclusion in the Work
      +      by You to the Licensor shall be under the terms and conditions of
      +      this License, without any additional terms or conditions.
      +      Notwithstanding the above, nothing herein shall supersede or modify
      +      the terms of any separate license agreement you may have executed
      +      with Licensor regarding such Contributions.
      +
      +   6. Trademarks. This License does not grant permission to use the trade
      +      names, trademarks, service marks, or product names of the Licensor,
      +      except as required for reasonable and customary use in describing the
      +      origin of the Work and reproducing the content of the NOTICE file.
      +
      +   7. Disclaimer of Warranty. Unless required by applicable law or
      +      agreed to in writing, Licensor provides the Work (and each
      +      Contributor provides its Contributions) on an "AS IS" BASIS,
      +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      +      implied, including, without limitation, any warranties or conditions
      +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      +      PARTICULAR PURPOSE. You are solely responsible for determining the
      +      appropriateness of using or redistributing the Work and assume any
      +      risks associated with Your exercise of permissions under this License.
      +
      +   8. Limitation of Liability. In no event and under no legal theory,
      +      whether in tort (including negligence), contract, or otherwise,
      +      unless required by applicable law (such as deliberate and grossly
      +      negligent acts) or agreed to in writing, shall any Contributor be
      +      liable to You for damages, including any direct, indirect, special,
      +      incidental, or consequential damages of any character arising as a
      +      result of this License or out of the use or inability to use the
      +      Work (including but not limited to damages for loss of goodwill,
      +      work stoppage, computer failure or malfunction, or any and all
      +      other commercial damages or losses), even if such Contributor
      +      has been advised of the possibility of such damages.
      +
      +   9. Accepting Warranty or Additional Liability. While redistributing
      +      the Work or Derivative Works thereof, You may choose to offer,
      +      and charge a fee for, acceptance of support, warranty, indemnity,
      +      or other liability obligations and/or rights consistent with this
      +      License. However, in accepting such obligations, You may act only
      +      on Your own behalf and on Your sole responsibility, not on behalf
      +      of any other Contributor, and only if You agree to indemnify,
      +      defend, and hold each Contributor harmless for any liability
      +      incurred by, or claims asserted against, such Contributor by reason
      +      of your accepting any such warranty or additional liability.
      +
      +   END OF TERMS AND CONDITIONS
      +
      +   APPENDIX: How to apply the Apache License to your work.
      +
      +      To apply the Apache License to your work, attach the following
      +      boilerplate notice, with the fields enclosed by brackets "{}"
      +      replaced with your own identifying information. (Don't include
      +      the brackets!)  The text should be enclosed in the appropriate
      +      comment syntax for the file format. We also recommend that a
      +      file or class name and description of purpose be included on the
      +      same "printed page" as the copyright notice for easier
      +      identification within third-party archives.
      +
      +   Copyright {yyyy} {name of copyright owner}
      +
      +   Licensed under the Apache License, Version 2.0 (the "License");
      +   you may not use this file except in compliance with the License.
      +   You may obtain a copy of the License at
      +
      +       http://www.apache.org/licenses/LICENSE-2.0
      +
      +   Unless required by applicable law or agreed to in writing, software
      +   distributed under the License is distributed on an "AS IS" BASIS,
      +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +   See the License for the specific language governing permissions and
      +   limitations under the License.
      +
      diff --git a/vendor/github.com/coreos/pkg/MAINTAINERS b/vendor/github.com/coreos/pkg/MAINTAINERS
      new file mode 100644
      index 00000000..ff13ec92
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/MAINTAINERS
      @@ -0,0 +1 @@
      +Ed Rooth <ed.rooth@coreos.com> (@sym3tri)
      diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE
      new file mode 100644
      index 00000000..b39ddfa5
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/NOTICE
      @@ -0,0 +1,5 @@
      +CoreOS Project
      +Copyright 2014 CoreOS, Inc
      +
      +This product includes software developed at CoreOS, Inc.
      +(http://www.coreos.com/).
      diff --git a/vendor/github.com/coreos/pkg/README.md b/vendor/github.com/coreos/pkg/README.md
      new file mode 100644
      index 00000000..549dabfd
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/README.md
      @@ -0,0 +1,3 @@
      +a collection of go utility packages
      +
      +[![Build Status](https://semaphoreci.com/api/v1/projects/14b3f261-22c2-4f56-b1ff-f23f4aa03f5c/411991/badge.svg)](https://semaphoreci.com/coreos/pkg) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/coreos/pkg)
      diff --git a/vendor/github.com/coreos/pkg/build b/vendor/github.com/coreos/pkg/build
      new file mode 100755
      index 00000000..f5d3c476
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/build
      @@ -0,0 +1,3 @@
      +#!/bin/bash -e
      +
      +go build ./...
      diff --git a/vendor/github.com/coreos/pkg/cryptoutil/aes.go b/vendor/github.com/coreos/pkg/cryptoutil/aes.go
      new file mode 100644
      index 00000000..bd755287
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/cryptoutil/aes.go
      @@ -0,0 +1,94 @@
      +package cryptoutil
      +
      +import (
      +	"crypto/aes"
      +	"crypto/cipher"
      +	"crypto/rand"
      +	"errors"
      +)
      +
      +// pad uses the PKCS#7 padding scheme to align the a payload to a specific block size
      +func pad(plaintext []byte, bsize int) ([]byte, error) {
      +	if bsize >= 256 {
      +		return nil, errors.New("bsize must be < 256")
      +	}
      +	pad := bsize - (len(plaintext) % bsize)
      +	if pad == 0 {
      +		pad = bsize
      +	}
      +	for i := 0; i < pad; i++ {
      +		plaintext = append(plaintext, byte(pad))
      +	}
      +	return plaintext, nil
      +}
      +
      +// unpad strips the padding previously added using the PKCS#7 padding scheme
      +func unpad(paddedtext []byte) ([]byte, error) {
      +	length := len(paddedtext)
      +	paddedtext, lbyte := paddedtext[:length-1], paddedtext[length-1]
      +	pad := int(lbyte)
      +	if pad >= 256 || pad > length {
      +		return nil, errors.New("padding malformed")
      +	}
      +	return paddedtext[:length-(pad)], nil
      +}
      +
      +// AESEncrypt encrypts a payload with an AES cipher.
      +// The returned ciphertext has three notable properties:
      +// 1. ciphertext is aligned to the standard AES block size
      +// 2. ciphertext is padded using PKCS#7
      +// 3. IV is prepended to the ciphertext
      +func AESEncrypt(plaintext, key []byte) ([]byte, error) {
      +	plaintext, err := pad(plaintext, aes.BlockSize)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	block, err := aes.NewCipher(key)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	ciphertext := make([]byte, aes.BlockSize+len(plaintext))
      +	iv := ciphertext[:aes.BlockSize]
      +	if _, err := rand.Read(iv); err != nil {
      +		return nil, err
      +	}
      +
      +	mode := cipher.NewCBCEncrypter(block, iv)
      +	mode.CryptBlocks(ciphertext[aes.BlockSize:], plaintext)
      +
      +	return ciphertext, nil
      +}
      +
      +// AESDecrypt decrypts an encrypted payload with an AES cipher.
      +// The decryption algorithm makes three assumptions:
      +// 1. ciphertext is aligned to the standard AES block size
      +// 2. ciphertext is padded using PKCS#7
      +// 3. the IV is prepended to ciphertext
      +func AESDecrypt(ciphertext, key []byte) ([]byte, error) {
      +	if len(ciphertext) < aes.BlockSize {
      +		return nil, errors.New("ciphertext too short")
      +	}
      +
      +	iv := ciphertext[:aes.BlockSize]
      +	ciphertext = ciphertext[aes.BlockSize:]
      +
      +	if len(ciphertext)%aes.BlockSize != 0 {
      +		return nil, errors.New("ciphertext is not a multiple of the block size")
      +	}
      +
      +	block, err := aes.NewCipher(key)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	mode := cipher.NewCBCDecrypter(block, iv)
      +	mode.CryptBlocks(ciphertext, ciphertext)
      +
      +	if len(ciphertext)%aes.BlockSize != 0 {
      +		return nil, errors.New("ciphertext is not a multiple of the block size")
      +	}
      +
      +	return unpad(ciphertext)
      +}
      diff --git a/vendor/github.com/coreos/pkg/cryptoutil/aes_test.go b/vendor/github.com/coreos/pkg/cryptoutil/aes_test.go
      new file mode 100644
      index 00000000..b23bec70
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/cryptoutil/aes_test.go
      @@ -0,0 +1,93 @@
      +package cryptoutil
      +
      +import (
      +	"reflect"
      +	"testing"
      +)
      +
      +func TestPadUnpad(t *testing.T) {
      +	tests := []struct {
      +		plaintext []byte
      +		bsize     int
      +		padded    []byte
      +	}{
      +		{
      +			plaintext: []byte{1, 2, 3, 4},
      +			bsize:     7,
      +			padded:    []byte{1, 2, 3, 4, 3, 3, 3},
      +		},
      +		{
      +			plaintext: []byte{1, 2, 3, 4, 5, 6, 7},
      +			bsize:     3,
      +			padded:    []byte{1, 2, 3, 4, 5, 6, 7, 2, 2},
      +		},
      +		{
      +			plaintext: []byte{9, 9, 9, 9},
      +			bsize:     4,
      +			padded:    []byte{9, 9, 9, 9, 4, 4, 4, 4},
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		padded, err := pad(tt.plaintext, tt.bsize)
      +		if err != nil {
      +			t.Errorf("case %d: unexpected error: %v", i, err)
      +			continue
      +		}
      +		if !reflect.DeepEqual(tt.padded, padded) {
      +			t.Errorf("case %d: want=%v got=%v", i, tt.padded, padded)
      +			continue
      +		}
      +
      +		plaintext, err := unpad(tt.padded)
      +		if err != nil {
      +			t.Errorf("case %d: unexpected error: %v", i, err)
      +			continue
      +		}
      +		if !reflect.DeepEqual(tt.plaintext, plaintext) {
      +			t.Errorf("case %d: want=%v got=%v", i, tt.plaintext, plaintext)
      +			continue
      +		}
      +	}
      +}
      +
      +func TestPadMaxBlockSize(t *testing.T) {
      +	_, err := pad([]byte{1, 2, 3}, 256)
      +	if err == nil {
      +		t.Errorf("Expected non-nil error")
      +	}
      +}
      +
      +func TestAESEncryptDecrypt(t *testing.T) {
      +	message := []byte("Let me worry about blank.")
      +	key := append([]byte("shark"), make([]byte, 27)...)
      +
      +	ciphertext, err := AESEncrypt(message, key)
      +	if err != nil {
      +		t.Fatalf("Unexpected error: %v", err)
      +	}
      +	if reflect.DeepEqual(message, ciphertext) {
      +		t.Fatal("Encrypted data matches original payload")
      +	}
      +
      +	decrypted, err := AESDecrypt(ciphertext, key)
      +	if !reflect.DeepEqual(message, decrypted) {
      +		t.Fatalf("Decrypted data does not match original payload: want=%v got=%v", message, decrypted)
      +	}
      +}
      +
      +func TestAESDecryptWrongKey(t *testing.T) {
      +	message := []byte("My bones!")
      +	key := append([]byte("shark"), make([]byte, 27)...)
      +
      +	ciphertext, err := AESEncrypt(message, key)
      +	if err != nil {
      +		t.Fatalf("Unexpected error: %v", err)
      +	}
      +
      +	wrongKey := append([]byte("sheep"), make([]byte, 27)...)
      +	decrypted, _ := AESDecrypt(ciphertext, wrongKey)
      +	if reflect.DeepEqual(message, decrypted) {
      +		t.Fatalf("Data decrypted with different key matches original payload")
      +	}
      +}
      diff --git a/vendor/github.com/coreos/pkg/health/README.md b/vendor/github.com/coreos/pkg/health/README.md
      new file mode 100644
      index 00000000..5ec34c21
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/health/README.md
      @@ -0,0 +1,11 @@
      +health
      +====
      +
      +A simple framework for implementing an HTTP health check endpoint on servers.
      +
      +Users implement their `health.Checkable` types, and create a `health.Checker`, from which they can get an `http.HandlerFunc` using `health.Checker.MakeHealthHandlerFunc`.
      +
      +### Documentation
      +
      +For more details, visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/health)
      +
      diff --git a/vendor/github.com/coreos/pkg/health/health.go b/vendor/github.com/coreos/pkg/health/health.go
      new file mode 100644
      index 00000000..a1c3610f
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/health/health.go
      @@ -0,0 +1,127 @@
      +package health
      +
      +import (
      +	"expvar"
      +	"fmt"
      +	"log"
      +	"net/http"
      +
      +	"github.com/coreos/pkg/httputil"
      +)
      +
      +// Checkables should return nil when the thing they are checking is healthy, and an error otherwise.
      +type Checkable interface {
      +	Healthy() error
      +}
      +
      +// Checker provides a way to make an endpoint which can be probed for system health.
      +type Checker struct {
      +	// Checks are the Checkables to be checked when probing.
      +	Checks []Checkable
      +
      +	// Unhealthyhandler is called when one or more of the checks are unhealthy.
      +	// If not provided DefaultUnhealthyHandler is called.
      +	UnhealthyHandler UnhealthyHandler
      +
      +	// HealthyHandler is called when all checks are healthy.
      +	// If not provided, DefaultHealthyHandler is called.
      +	HealthyHandler http.HandlerFunc
      +}
      +
      +func (c Checker) ServeHTTP(w http.ResponseWriter, r *http.Request) {
      +	unhealthyHandler := c.UnhealthyHandler
      +	if unhealthyHandler == nil {
      +		unhealthyHandler = DefaultUnhealthyHandler
      +	}
      +
      +	successHandler := c.HealthyHandler
      +	if successHandler == nil {
      +		successHandler = DefaultHealthyHandler
      +	}
      +
      +	if r.Method != "GET" {
      +		w.Header().Set("Allow", "GET")
      +		w.WriteHeader(http.StatusMethodNotAllowed)
      +		return
      +	}
      +
      +	if err := Check(c.Checks); err != nil {
      +		unhealthyHandler(w, r, err)
      +		return
      +	}
      +
      +	successHandler(w, r)
      +}
      +
      +type UnhealthyHandler func(w http.ResponseWriter, r *http.Request, err error)
      +
      +type StatusResponse struct {
      +	Status  string                 `json:"status"`
      +	Details *StatusResponseDetails `json:"details,omitempty"`
      +}
      +
      +type StatusResponseDetails struct {
      +	Code    int    `json:"code,omitempty"`
      +	Message string `json:"message,omitempty"`
      +}
      +
      +func Check(checks []Checkable) (err error) {
      +	errs := []error{}
      +	for _, c := range checks {
      +		if e := c.Healthy(); e != nil {
      +			errs = append(errs, e)
      +		}
      +	}
      +
      +	switch len(errs) {
      +	case 0:
      +		err = nil
      +	case 1:
      +		err = errs[0]
      +	default:
      +		err = fmt.Errorf("multiple health check failure: %v", errs)
      +	}
      +
      +	return
      +}
      +
      +func DefaultHealthyHandler(w http.ResponseWriter, r *http.Request) {
      +	err := httputil.WriteJSONResponse(w, http.StatusOK, StatusResponse{
      +		Status: "ok",
      +	})
      +	if err != nil {
      +		// TODO(bobbyrullo): replace with logging from new logging pkg,
      +		// once it lands.
      +		log.Printf("Failed to write JSON response: %v", err)
      +	}
      +}
      +
      +func DefaultUnhealthyHandler(w http.ResponseWriter, r *http.Request, err error) {
      +	writeErr := httputil.WriteJSONResponse(w, http.StatusInternalServerError, StatusResponse{
      +		Status: "error",
      +		Details: &StatusResponseDetails{
      +			Code:    http.StatusInternalServerError,
      +			Message: err.Error(),
      +		},
      +	})
      +	if writeErr != nil {
      +		// TODO(bobbyrullo): replace with logging from new logging pkg,
      +		// once it lands.
      +		log.Printf("Failed to write JSON response: %v", err)
      +	}
      +}
      +
      +// ExpvarHandler is copied from https://golang.org/src/expvar/expvar.go, where it's sadly unexported.
      +func ExpvarHandler(w http.ResponseWriter, r *http.Request) {
      +	w.Header().Set("Content-Type", "application/json; charset=utf-8")
      +	fmt.Fprintf(w, "{\n")
      +	first := true
      +	expvar.Do(func(kv expvar.KeyValue) {
      +		if !first {
      +			fmt.Fprintf(w, ",\n")
      +		}
      +		first = false
      +		fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
      +	})
      +	fmt.Fprintf(w, "\n}\n")
      +}
      diff --git a/vendor/github.com/coreos/pkg/health/health_test.go b/vendor/github.com/coreos/pkg/health/health_test.go
      new file mode 100644
      index 00000000..99428eb7
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/health/health_test.go
      @@ -0,0 +1,198 @@
      +package health
      +
      +import (
      +	"encoding/json"
      +	"errors"
      +	"net/http"
      +	"net/http/httptest"
      +	"testing"
      +
      +	"github.com/coreos/pkg/httputil"
      +)
      +
      +type boolChecker bool
      +
      +func (b boolChecker) Healthy() error {
      +	if b {
      +		return nil
      +	}
      +	return errors.New("Unhealthy")
      +}
      +
      +func errString(err error) string {
      +	if err == nil {
      +		return ""
      +	}
      +	return err.Error()
      +}
      +
      +func TestCheck(t *testing.T) {
      +	for i, test := range []struct {
      +		checks   []Checkable
      +		expected string
      +	}{
      +		{[]Checkable{}, ""},
      +
      +		{[]Checkable{boolChecker(true)}, ""},
      +
      +		{[]Checkable{boolChecker(true), boolChecker(true)}, ""},
      +
      +		{[]Checkable{boolChecker(true), boolChecker(false)}, "Unhealthy"},
      +
      +		{[]Checkable{boolChecker(true), boolChecker(false), boolChecker(false)}, "multiple health check failure: [Unhealthy Unhealthy]"},
      +	} {
      +		err := Check(test.checks)
      +
      +		if errString(err) != test.expected {
      +			t.Errorf("case %d: want %v, got %v", i, test.expected, errString(err))
      +		}
      +	}
      +}
      +
      +func TestHandlerFunc(t *testing.T) {
      +	for i, test := range []struct {
      +		checker         Checker
      +		method          string
      +		expectedStatus  string
      +		expectedCode    int
      +		expectedMessage string
      +	}{
      +		{
      +			Checker{
      +				Checks: []Checkable{
      +					boolChecker(true),
      +				},
      +			},
      +			"GET",
      +			"ok",
      +			http.StatusOK,
      +			"",
      +		},
      +
      +		// Wrong method.
      +		{
      +			Checker{
      +				Checks: []Checkable{
      +					boolChecker(true),
      +				},
      +			},
      +			"POST",
      +			"",
      +			http.StatusMethodNotAllowed,
      +			"GET only acceptable method",
      +		},
      +
      +		// Health check fails.
      +		{
      +			Checker{
      +				Checks: []Checkable{
      +					boolChecker(false),
      +				},
      +			},
      +			"GET",
      +			"error",
      +			http.StatusInternalServerError,
      +			"Unhealthy",
      +		},
      +
      +		// Health check fails, with overridden ErrorHandler.
      +		{
      +			Checker{
      +				Checks: []Checkable{
      +					boolChecker(false),
      +				},
      +				UnhealthyHandler: func(w http.ResponseWriter, r *http.Request, err error) {
      +					httputil.WriteJSONResponse(w,
      +						http.StatusInternalServerError, StatusResponse{
      +							Status: "error",
      +							Details: &StatusResponseDetails{
      +								Code:    http.StatusInternalServerError,
      +								Message: "Override!",
      +							},
      +						})
      +				},
      +			},
      +			"GET",
      +			"error",
      +			http.StatusInternalServerError,
      +			"Override!",
      +		},
      +
      +		// Health check succeeds, with overridden SuccessHandler.
      +		{
      +			Checker{
      +				Checks: []Checkable{
      +					boolChecker(true),
      +				},
      +				HealthyHandler: func(w http.ResponseWriter, r *http.Request) {
      +					httputil.WriteJSONResponse(w,
      +						http.StatusOK, StatusResponse{
      +							Status: "okey-dokey",
      +						})
      +				},
      +			},
      +			"GET",
      +			"okey-dokey",
      +			http.StatusOK,
      +			"",
      +		},
      +	} {
      +		w := httptest.NewRecorder()
      +		r := &http.Request{}
      +		r.Method = test.method
      +		test.checker.ServeHTTP(w, r)
      +		if w.Code != test.expectedCode {
      +			t.Errorf("case %d: w.code == %v, want %v", i, w.Code, test.expectedCode)
      +		}
      +
      +		if test.expectedStatus == "" {
      +			// This is to handle the wrong-method case, when the
      +			// body of the response is empty.
      +			continue
      +		}
      +
      +		statusMap := make(map[string]interface{})
      +		err := json.Unmarshal(w.Body.Bytes(), &statusMap)
      +		if err != nil {
      +			t.Fatalf("case %d: failed to Unmarshal response body: %v", i, err)
      +		}
      +
      +		status, ok := statusMap["status"].(string)
      +		if !ok {
      +			t.Errorf("case %d: status not present or not a string in json: %q", i, w.Body.Bytes())
      +		}
      +		if status != test.expectedStatus {
      +			t.Errorf("case %d: status == %v, want %v", i, status, test.expectedStatus)
      +		}
      +
      +		detailMap, ok := statusMap["details"].(map[string]interface{})
      +		if test.expectedMessage != "" {
      +			if !ok {
      +				t.Fatalf("case %d: could not find/unmarshal detailMap", i)
      +			}
      +			message, ok := detailMap["message"].(string)
      +			if !ok {
      +				t.Fatalf("case %d: message not present or not a string in json: %q",
      +					i, w.Body.Bytes())
      +			}
      +			if message != test.expectedMessage {
      +				t.Errorf("case %d: message == %v, want %v", i, message, test.expectedMessage)
      +			}
      +
      +			code, ok := detailMap["code"].(float64)
      +			if !ok {
      +				t.Fatalf("case %d: code not present or not an int in json: %q",
      +					i, w.Body.Bytes())
      +			}
      +			if int(code) != test.expectedCode {
      +				t.Errorf("case %d: code == %v, want %v", i, code, test.expectedCode)
      +			}
      +
      +		} else {
      +			if ok {
      +				t.Errorf("case %d: unwanted detailMap present: %q", i, detailMap)
      +			}
      +		}
      +
      +	}
      +}
      diff --git a/vendor/github.com/coreos/pkg/httputil/README.md b/vendor/github.com/coreos/pkg/httputil/README.md
      new file mode 100644
      index 00000000..44fa751c
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/httputil/README.md
      @@ -0,0 +1,13 @@
      +httputil
      +====
      +
      +Common code for dealing with HTTP.
      +
      +Includes:
      +
      +* Code for returning JSON responses.
      +
      +### Documentation
      +
      +Visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/httputil)
      +
      diff --git a/vendor/github.com/coreos/pkg/httputil/cookie.go b/vendor/github.com/coreos/pkg/httputil/cookie.go
      new file mode 100644
      index 00000000..c37a37bb
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/httputil/cookie.go
      @@ -0,0 +1,21 @@
      +package httputil
      +
      +import (
      +	"net/http"
      +	"time"
      +)
      +
      +// DeleteCookies effectively deletes all named cookies
      +// by wiping all data and setting to expire immediately.
      +func DeleteCookies(w http.ResponseWriter, cookieNames ...string) {
      +	for _, n := range cookieNames {
      +		c := &http.Cookie{
      +			Name:    n,
      +			Value:   "",
      +			Path:    "/",
      +			MaxAge:  -1,
      +			Expires: time.Time{},
      +		}
      +		http.SetCookie(w, c)
      +	}
      +}
      diff --git a/vendor/github.com/coreos/pkg/httputil/cookie_test.go b/vendor/github.com/coreos/pkg/httputil/cookie_test.go
      new file mode 100644
      index 00000000..cb35883c
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/httputil/cookie_test.go
      @@ -0,0 +1,51 @@
      +package httputil
      +
      +import (
      +	"net/http"
      +	"net/http/httptest"
      +	"testing"
      +	"time"
      +)
      +
      +func TestDeleteCookies(t *testing.T) {
      +	tests := []struct {
      +		// cookie names to delete
      +		n []string
      +	}{
      +		// single
      +		{
      +			n: []string{"foo"},
      +		},
      +		// multiple
      +		{
      +			n: []string{"foo", "bar"},
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		w := httptest.NewRecorder()
      +		DeleteCookies(w, tt.n...)
      +		resp := &http.Response{}
      +		resp.Header = w.Header()
      +		cks := resp.Cookies()
      +
      +		if len(cks) != len(tt.n) {
      +			t.Errorf("case %d: unexpected number of cookies, want: %d, got: %d", i, len(tt.n), len(cks))
      +		}
      +
      +		for _, c := range cks {
      +			if c.Value != "" {
      +				t.Errorf("case %d: unexpected cookie value, want: %q, got: %q", i, "", c.Value)
      +			}
      +			if c.Path != "/" {
      +				t.Errorf("case %d: unexpected cookie path, want: %q, got: %q", i, "/", c.Path)
      +			}
      +			if c.MaxAge != -1 {
      +				t.Errorf("case %d: unexpected cookie max-age, want: %q, got: %q", i, -1, c.MaxAge)
      +			}
      +			if !c.Expires.IsZero() {
      +				t.Errorf("case %d: unexpected cookie expires, want: %q, got: %q", i, time.Time{}, c.MaxAge)
      +			}
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/pkg/httputil/json.go b/vendor/github.com/coreos/pkg/httputil/json.go
      new file mode 100644
      index 00000000..0b092350
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/httputil/json.go
      @@ -0,0 +1,27 @@
      +package httputil
      +
      +import (
      +	"encoding/json"
      +	"net/http"
      +)
      +
      +const (
      +	JSONContentType = "application/json"
      +)
      +
      +func WriteJSONResponse(w http.ResponseWriter, code int, resp interface{}) error {
      +	enc, err := json.Marshal(resp)
      +	if err != nil {
      +		w.WriteHeader(http.StatusInternalServerError)
      +		return err
      +	}
      +
      +	w.Header().Set("Content-Type", JSONContentType)
      +	w.WriteHeader(code)
      +
      +	_, err = w.Write(enc)
      +	if err != nil {
      +		return err
      +	}
      +	return nil
      +}
      diff --git a/vendor/github.com/coreos/pkg/httputil/json_test.go b/vendor/github.com/coreos/pkg/httputil/json_test.go
      new file mode 100644
      index 00000000..92108927
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/httputil/json_test.go
      @@ -0,0 +1,56 @@
      +package httputil
      +
      +import (
      +	"net/http/httptest"
      +	"testing"
      +)
      +
      +func TestWriteJSONResponse(t *testing.T) {
      +	for i, test := range []struct {
      +		code         int
      +		resp         interface{}
      +		expectedJSON string
      +		expectErr    bool
      +	}{
      +		{
      +			200,
      +			struct {
      +				A string
      +				B string
      +			}{A: "foo", B: "bar"},
      +			`{"A":"foo","B":"bar"}`,
      +			false,
      +		},
      +		{
      +			500,
      +			// Something that json.Marshal cannot serialize.
      +			make(chan int),
      +			"",
      +			true,
      +		},
      +	} {
      +		w := httptest.NewRecorder()
      +		err := WriteJSONResponse(w, test.code, test.resp)
      +
      +		if w.Code != test.code {
      +			t.Errorf("case %d: w.code == %v, want %v", i, w.Code, test.code)
      +		}
      +
      +		if (err != nil) != test.expectErr {
      +			t.Errorf("case %d: (err != nil) == %v, want %v. err: %v", i, err != nil, test.expectErr, err)
      +		}
      +
      +		if string(w.Body.Bytes()) != test.expectedJSON {
      +			t.Errorf("case %d: w.Body.Bytes()) == %q, want %q", i,
      +				string(w.Body.Bytes()), test.expectedJSON)
      +		}
      +
      +		if !test.expectErr {
      +			contentType := w.Header()["Content-Type"][0]
      +			if contentType != JSONContentType {
      +				t.Errorf("case %d: contentType == %v, want %v", i, contentType, JSONContentType)
      +			}
      +		}
      +	}
      +
      +}
      diff --git a/vendor/github.com/coreos/pkg/multierror/multierror.go b/vendor/github.com/coreos/pkg/multierror/multierror.go
      new file mode 100644
      index 00000000..76c98383
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/multierror/multierror.go
      @@ -0,0 +1,32 @@
      +// Package multierror wraps a slice of errors and implements the error interface.
      +// This can be used to collect a bunch of errors (such as during form validation)
      +// and then return them all together as a single error. To see usage examples
      +// refer to the unit tests.
      +package multierror
      +
      +import (
      +	"fmt"
      +	"strings"
      +)
      +
      +type Error []error
      +
      +func (me Error) Error() string {
      +	if me == nil {
      +		return ""
      +	}
      +
      +	strs := make([]string, len(me))
      +	for i, err := range me {
      +		strs[i] = fmt.Sprintf("[%d] %v", i, err)
      +	}
      +	return strings.Join(strs, " ")
      +}
      +
      +func (me Error) AsError() error {
      +	if len([]error(me)) <= 0 {
      +		return nil
      +	}
      +
      +	return me
      +}
      diff --git a/vendor/github.com/coreos/pkg/multierror/multierror_test.go b/vendor/github.com/coreos/pkg/multierror/multierror_test.go
      new file mode 100644
      index 00000000..48a4a733
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/multierror/multierror_test.go
      @@ -0,0 +1,59 @@
      +package multierror
      +
      +import (
      +	"errors"
      +	"reflect"
      +	"testing"
      +)
      +
      +func TestAsError(t *testing.T) {
      +	tests := []struct {
      +		multierr Error
      +		want     error
      +	}{
      +		{
      +			multierr: Error([]error{errors.New("foo"), errors.New("bar")}),
      +			want:     Error([]error{errors.New("foo"), errors.New("bar")}),
      +		},
      +		{
      +			multierr: Error([]error{}),
      +			want:     nil,
      +		},
      +		{
      +			multierr: Error(nil),
      +			want:     nil,
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		got := tt.multierr.AsError()
      +		if !reflect.DeepEqual(tt.want, got) {
      +			t.Errorf("case %d: incorrect error value: want=%+v got=%+v", i, tt.want, got)
      +		}
      +	}
      +
      +}
      +
      +func TestErrorAppend(t *testing.T) {
      +	var multierr Error
      +	multierr = append(multierr, errors.New("foo"))
      +	multierr = append(multierr, errors.New("bar"))
      +	multierr = append(multierr, errors.New("baz"))
      +	want := Error([]error{errors.New("foo"), errors.New("bar"), errors.New("baz")})
      +	got := multierr.AsError()
      +	if !reflect.DeepEqual(want, got) {
      +		t.Fatalf("incorrect error value: want=%+v got=%+v", want, got)
      +	}
      +}
      +
      +func TestErrorString(t *testing.T) {
      +	var multierr Error
      +	multierr = append(multierr, errors.New("foo"))
      +	multierr = append(multierr, errors.New("bar"))
      +	multierr = append(multierr, errors.New("baz"))
      +	got := multierr.Error()
      +	want := "[0] foo [1] bar [2] baz"
      +	if want != got {
      +		t.Fatalf("incorrect output: want=%q got=%q", want, got)
      +	}
      +}
      diff --git a/vendor/github.com/coreos/pkg/netutil/proxy.go b/vendor/github.com/coreos/pkg/netutil/proxy.go
      new file mode 100644
      index 00000000..7bddd76e
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/netutil/proxy.go
      @@ -0,0 +1,55 @@
      +package netutil
      +
      +import (
      +	"io"
      +	"net"
      +	"sync"
      +	"time"
      +
      +	"github.com/coreos/pkg/capnslog"
      +)
      +
      +var (
      +	log = capnslog.NewPackageLogger("github.com/coreos/pkg/netutil", "main")
      +)
      +
      +// ProxyTCP proxies between two TCP connections.
      +// Because TLS connections don't have CloseRead() and CloseWrite() methods, our
      +// temporary solution is to use timeouts.
      +func ProxyTCP(conn1, conn2 net.Conn, tlsWriteDeadline, tlsReadDeadline time.Duration) {
      +	var wg sync.WaitGroup
      +	wg.Add(2)
      +
      +	go copyBytes(conn1, conn2, &wg, tlsWriteDeadline, tlsReadDeadline)
      +	go copyBytes(conn2, conn1, &wg, tlsWriteDeadline, tlsReadDeadline)
      +
      +	wg.Wait()
      +	conn1.Close()
      +	conn2.Close()
      +}
      +
      +func copyBytes(dst, src net.Conn, wg *sync.WaitGroup, writeDeadline, readDeadline time.Duration) {
      +	defer wg.Done()
      +	n, err := io.Copy(dst, src)
      +	if err != nil {
      +		log.Errorf("proxy i/o error: %v", err)
      +	}
      +
      +	if cr, ok := src.(*net.TCPConn); ok {
      +		cr.CloseRead()
      +	} else {
      +		// For TLS connections.
      +		wto := time.Now().Add(writeDeadline)
      +		src.SetWriteDeadline(wto)
      +	}
      +
      +	if cw, ok := dst.(*net.TCPConn); ok {
      +		cw.CloseWrite()
      +	} else {
      +		// For TLS connections.
      +		rto := time.Now().Add(readDeadline)
      +		dst.SetReadDeadline(rto)
      +	}
      +
      +	log.Debugf("proxy copied %d bytes %s -> %s", n, src.RemoteAddr(), dst.RemoteAddr())
      +}
      diff --git a/vendor/github.com/coreos/pkg/netutil/url.go b/vendor/github.com/coreos/pkg/netutil/url.go
      new file mode 100644
      index 00000000..046cf5dc
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/netutil/url.go
      @@ -0,0 +1,17 @@
      +package netutil
      +
      +import (
      +	"net/url"
      +)
      +
      +// MergeQuery appends additional query values to an existing URL.
      +func MergeQuery(u url.URL, q url.Values) url.URL {
      +	uv := u.Query()
      +	for k, vs := range q {
      +		for _, v := range vs {
      +			uv.Add(k, v)
      +		}
      +	}
      +	u.RawQuery = uv.Encode()
      +	return u
      +}
      diff --git a/vendor/github.com/coreos/pkg/netutil/url_test.go b/vendor/github.com/coreos/pkg/netutil/url_test.go
      new file mode 100644
      index 00000000..6fbcb814
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/netutil/url_test.go
      @@ -0,0 +1,86 @@
      +package netutil
      +
      +import (
      +	"net/url"
      +	"reflect"
      +	"testing"
      +)
      +
      +func TestMergeQuery(t *testing.T) {
      +	tests := []struct {
      +		u string
      +		q url.Values
      +		w string
      +	}{
      +		// No values
      +		{
      +			u: "http://example.com",
      +			q: nil,
      +			w: "http://example.com",
      +		},
      +		// No additional values
      +		{
      +			u: "http://example.com?foo=bar",
      +			q: nil,
      +			w: "http://example.com?foo=bar",
      +		},
      +		// Simple addition
      +		{
      +			u: "http://example.com",
      +			q: url.Values{
      +				"foo": []string{"bar"},
      +			},
      +			w: "http://example.com?foo=bar",
      +		},
      +		// Addition with existing values
      +		{
      +			u: "http://example.com?dog=boo",
      +			q: url.Values{
      +				"foo": []string{"bar"},
      +			},
      +			w: "http://example.com?dog=boo&foo=bar",
      +		},
      +		// Merge
      +		{
      +			u: "http://example.com?dog=boo",
      +			q: url.Values{
      +				"dog": []string{"elroy"},
      +			},
      +			w: "http://example.com?dog=boo&dog=elroy",
      +		},
      +		// Add and merge
      +		{
      +			u: "http://example.com?dog=boo",
      +			q: url.Values{
      +				"dog": []string{"elroy"},
      +				"foo": []string{"bar"},
      +			},
      +			w: "http://example.com?dog=boo&dog=elroy&foo=bar",
      +		},
      +		// Multivalue merge
      +		{
      +			u: "http://example.com?dog=boo",
      +			q: url.Values{
      +				"dog": []string{"elroy", "penny"},
      +			},
      +			w: "http://example.com?dog=boo&dog=elroy&dog=penny",
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		ur, err := url.Parse(tt.u)
      +		if err != nil {
      +			t.Errorf("case %d: failed parsing test url: %v, error: %v", i, tt.u, err)
      +		}
      +
      +		got := MergeQuery(*ur, tt.q)
      +		want, err := url.Parse(tt.w)
      +		if err != nil {
      +			t.Errorf("case %d: failed parsing want url: %v, error: %v", i, tt.w, err)
      +		}
      +
      +		if !reflect.DeepEqual(*want, got) {
      +			t.Errorf("case %d: want: %v, got: %v", i, *want, got)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/pkg/test b/vendor/github.com/coreos/pkg/test
      new file mode 100755
      index 00000000..4c78a0c4
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/test
      @@ -0,0 +1,56 @@
      +#!/bin/bash -e
      +#
      +# Run all tests (not including functional)
      +#   ./test
      +#   ./test -v
      +#
      +# Run tests for one package
      +#   PKG=./unit ./test
      +#   PKG=ssh ./test
      +#
      +
      +# Invoke ./cover for HTML output
      +COVER=${COVER:-"-cover"}
      +
      +source ./build
      +
      +TESTABLE="cryptoutil flagutil timeutil netutil yamlutil httputil health multierror"
      +FORMATTABLE="$TESTABLE capnslog"
      +
      +# user has not provided PKG override
      +if [ -z "$PKG" ]; then
      +	TEST=$TESTABLE
      +	FMT=$FORMATTABLE
      +
      +# user has provided PKG override
      +else
      +	# strip out slashes and dots from PKG=./foo/
      +	TEST=${PKG//\//}
      +	TEST=${TEST//./}
      +
      +	# only run gofmt on packages provided by user
      +	FMT="$TEST"
      +fi
      +
      +# split TEST into an array and prepend repo path to each local package
      +split=(${TEST// / })
      +TEST=${split[@]/#/github.com/coreos/pkg/}
      +
      +echo "Running tests..."
      +go test ${COVER} $@ ${TEST}
      +
      +echo "Checking gofmt..."
      +fmtRes=$(gofmt -l $FMT)
      +if [ -n "${fmtRes}" ]; then
      +	echo -e "gofmt checking failed:\n${fmtRes}"
      +	exit 255
      +fi
      +
      +echo "Checking govet..."
      +vetRes=$(go vet $TEST)
      +if [ -n "${vetRes}" ]; then
      +	echo -e "govet checking failed:\n${vetRes}"
      +	exit 255
      +fi
      +
      +echo "Success"
      diff --git a/vendor/github.com/coreos/pkg/timeutil/backoff.go b/vendor/github.com/coreos/pkg/timeutil/backoff.go
      new file mode 100644
      index 00000000..b34fb496
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/timeutil/backoff.go
      @@ -0,0 +1,15 @@
      +package timeutil
      +
      +import (
      +	"time"
      +)
      +
      +func ExpBackoff(prev, max time.Duration) time.Duration {
      +	if prev == 0 {
      +		return time.Second
      +	}
      +	if prev > max/2 {
      +		return max
      +	}
      +	return 2 * prev
      +}
      diff --git a/vendor/github.com/coreos/pkg/timeutil/backoff_test.go b/vendor/github.com/coreos/pkg/timeutil/backoff_test.go
      new file mode 100644
      index 00000000..d8392bc5
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/timeutil/backoff_test.go
      @@ -0,0 +1,52 @@
      +package timeutil
      +
      +import (
      +	"testing"
      +	"time"
      +)
      +
      +func TestExpBackoff(t *testing.T) {
      +	tests := []struct {
      +		prev time.Duration
      +		max  time.Duration
      +		want time.Duration
      +	}{
      +		{
      +			prev: time.Duration(0),
      +			max:  time.Minute,
      +			want: time.Second,
      +		},
      +		{
      +			prev: time.Second,
      +			max:  time.Minute,
      +			want: 2 * time.Second,
      +		},
      +		{
      +			prev: 16 * time.Second,
      +			max:  time.Minute,
      +			want: 32 * time.Second,
      +		},
      +		{
      +			prev: 32 * time.Second,
      +			max:  time.Minute,
      +			want: time.Minute,
      +		},
      +		{
      +			prev: time.Minute,
      +			max:  time.Minute,
      +			want: time.Minute,
      +		},
      +		{
      +			prev: 2 * time.Minute,
      +			max:  time.Minute,
      +			want: time.Minute,
      +		},
      +	}
      +
      +	for i, tt := range tests {
      +		got := ExpBackoff(tt.prev, tt.max)
      +		if tt.want != got {
      +			t.Errorf("case %d: want=%v got=%v", i, tt.want, got)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/coreos/pkg/yamlutil/yaml.go b/vendor/github.com/coreos/pkg/yamlutil/yaml.go
      new file mode 100644
      index 00000000..4681b7bd
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/yamlutil/yaml.go
      @@ -0,0 +1,55 @@
      +package yamlutil
      +
      +import (
      +	"flag"
      +	"fmt"
      +	"strings"
      +
      +	"gopkg.in/yaml.v1"
      +)
      +
      +// SetFlagsFromYaml goes through all registered flags in the given flagset,
      +// and if they are not already set it attempts to set their values from
      +// the YAML config. It will use the key REPLACE(UPPERCASE(flagname), '-', '_')
      +func SetFlagsFromYaml(fs *flag.FlagSet, rawYaml []byte) (err error) {
      +	conf := make(map[string]string)
      +	if err = yaml.Unmarshal(rawYaml, conf); err != nil {
      +		return
      +	}
      +	alreadySet := map[string]struct{}{}
      +	fs.Visit(func(f *flag.Flag) {
      +		alreadySet[f.Name] = struct{}{}
      +	})
      +
      +	errs := make([]error, 0)
      +	fs.VisitAll(func(f *flag.Flag) {
      +		if f.Name == "" {
      +			return
      +		}
      +		if _, ok := alreadySet[f.Name]; ok {
      +			return
      +		}
      +		tag := strings.Replace(strings.ToUpper(f.Name), "-", "_", -1)
      +		val, ok := conf[tag]
      +		if !ok {
      +			return
      +		}
      +		if serr := fs.Set(f.Name, val); serr != nil {
      +			errs = append(errs, fmt.Errorf("invalid value %q for %s: %v", val, tag, serr))
      +		}
      +	})
      +	if len(errs) != 0 {
      +		err = ErrorSlice(errs)
      +	}
      +	return
      +}
      +
      +type ErrorSlice []error
      +
      +func (e ErrorSlice) Error() string {
      +	s := ""
      +	for _, err := range e {
      +		s += ", " + err.Error()
      +	}
      +	return "Errors: " + s
      +}
      diff --git a/vendor/github.com/coreos/pkg/yamlutil/yaml_test.go b/vendor/github.com/coreos/pkg/yamlutil/yaml_test.go
      new file mode 100644
      index 00000000..46c07436
      --- /dev/null
      +++ b/vendor/github.com/coreos/pkg/yamlutil/yaml_test.go
      @@ -0,0 +1,80 @@
      +package yamlutil
      +
      +import (
      +	"flag"
      +	"testing"
      +)
      +
      +func TestSetFlagsFromYaml(t *testing.T) {
      +	config := "A: foo\nC: woof"
      +	fs := flag.NewFlagSet("testing", flag.ExitOnError)
      +	fs.String("a", "", "")
      +	fs.String("b", "", "")
      +	fs.String("c", "", "")
      +	fs.Parse([]string{})
      +
      +	// flags should be settable using yaml vars
      +	// and command-line flags
      +	if err := fs.Set("b", "bar"); err != nil {
      +		t.Fatal(err)
      +	}
      +	// command-line flags take precedence over the file
      +	if err := fs.Set("c", "quack"); err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	// first verify that flags are as expected before reading the file
      +	for f, want := range map[string]string{
      +		"a": "",
      +		"b": "bar",
      +		"c": "quack",
      +	} {
      +		if got := fs.Lookup(f).Value.String(); got != want {
      +			t.Fatalf("flag %q=%q, want %q", f, got, want)
      +		}
      +	}
      +
      +	// now read the yaml and verify flags were updated as expected
      +	err := SetFlagsFromYaml(fs, []byte(config))
      +	if err != nil {
      +		t.Errorf("err=%v, want nil", err)
      +	}
      +	for f, want := range map[string]string{
      +		"a": "foo",
      +		"b": "bar",
      +		"c": "quack",
      +	} {
      +		if got := fs.Lookup(f).Value.String(); got != want {
      +			t.Errorf("flag %q=%q, want %q", f, got, want)
      +		}
      +	}
      +}
      +
      +func TestSetFlagsFromYamlBad(t *testing.T) {
      +	// now verify that an error is propagated
      +	fs := flag.NewFlagSet("testing", flag.ExitOnError)
      +	fs.Int("x", 0, "")
      +	badConf := "X: not_a_number"
      +	if err := SetFlagsFromYaml(fs, []byte(badConf)); err == nil {
      +		t.Errorf("got err=nil, flag x=%q, want err != nil", fs.Lookup("x").Value.String())
      +	}
      +}
      +
      +func TestSetFlagsFromYamlMultiError(t *testing.T) {
      +	fs := flag.NewFlagSet("testing", flag.ExitOnError)
      +	fs.Int("x", 0, "")
      +	fs.Int("y", 0, "")
      +	fs.Int("z", 0, "")
      +	conf := "X: foo\nY: bar\nZ: 3"
      +	err := SetFlagsFromYaml(fs, []byte(conf))
      +	if err == nil {
      +		t.Errorf("got err= nil, want err != nil")
      +	}
      +	es, ok := err.(ErrorSlice)
      +	if !ok {
      +		t.Errorf("Got ok=false want ok=true")
      +	}
      +	if len(es) != 2 {
      +		t.Errorf("2 errors should be contained in the error, got %d errors", len(es))
      +	}
      +}
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/LICENSE b/vendor/github.com/coreos/yaml/LICENSE
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/LICENSE
      rename to vendor/github.com/coreos/yaml/LICENSE
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/LICENSE.libyaml b/vendor/github.com/coreos/yaml/LICENSE.libyaml
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/LICENSE.libyaml
      rename to vendor/github.com/coreos/yaml/LICENSE.libyaml
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/README.md b/vendor/github.com/coreos/yaml/README.md
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/README.md
      rename to vendor/github.com/coreos/yaml/README.md
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/apic.go b/vendor/github.com/coreos/yaml/apic.go
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/apic.go
      rename to vendor/github.com/coreos/yaml/apic.go
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/decode.go b/vendor/github.com/coreos/yaml/decode.go
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/decode.go
      rename to vendor/github.com/coreos/yaml/decode.go
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/decode_test.go b/vendor/github.com/coreos/yaml/decode_test.go
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/decode_test.go
      rename to vendor/github.com/coreos/yaml/decode_test.go
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/emitterc.go b/vendor/github.com/coreos/yaml/emitterc.go
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/emitterc.go
      rename to vendor/github.com/coreos/yaml/emitterc.go
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/encode.go b/vendor/github.com/coreos/yaml/encode.go
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/encode.go
      rename to vendor/github.com/coreos/yaml/encode.go
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/encode_test.go b/vendor/github.com/coreos/yaml/encode_test.go
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/encode_test.go
      rename to vendor/github.com/coreos/yaml/encode_test.go
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/parserc.go b/vendor/github.com/coreos/yaml/parserc.go
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/parserc.go
      rename to vendor/github.com/coreos/yaml/parserc.go
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/readerc.go b/vendor/github.com/coreos/yaml/readerc.go
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/readerc.go
      rename to vendor/github.com/coreos/yaml/readerc.go
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/resolve.go b/vendor/github.com/coreos/yaml/resolve.go
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/resolve.go
      rename to vendor/github.com/coreos/yaml/resolve.go
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/scannerc.go b/vendor/github.com/coreos/yaml/scannerc.go
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/scannerc.go
      rename to vendor/github.com/coreos/yaml/scannerc.go
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/sorter.go b/vendor/github.com/coreos/yaml/sorter.go
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/sorter.go
      rename to vendor/github.com/coreos/yaml/sorter.go
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/suite_test.go b/vendor/github.com/coreos/yaml/suite_test.go
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/suite_test.go
      rename to vendor/github.com/coreos/yaml/suite_test.go
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/writerc.go b/vendor/github.com/coreos/yaml/writerc.go
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/writerc.go
      rename to vendor/github.com/coreos/yaml/writerc.go
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/yaml.go b/vendor/github.com/coreos/yaml/yaml.go
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/yaml.go
      rename to vendor/github.com/coreos/yaml/yaml.go
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/yamlh.go b/vendor/github.com/coreos/yaml/yamlh.go
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/yamlh.go
      rename to vendor/github.com/coreos/yaml/yamlh.go
      diff --git a/vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/yamlprivateh.go b/vendor/github.com/coreos/yaml/yamlprivateh.go
      similarity index 100%
      rename from vendor/github.com/coreos/coreos-cloudinit/Godeps/_workspace/src/github.com/coreos/yaml/yamlprivateh.go
      rename to vendor/github.com/coreos/yaml/yamlprivateh.go
      diff --git a/vendor/github.com/davecgh/go-spew/.gitignore b/vendor/github.com/davecgh/go-spew/.gitignore
      new file mode 100644
      index 00000000..00268614
      --- /dev/null
      +++ b/vendor/github.com/davecgh/go-spew/.gitignore
      @@ -0,0 +1,22 @@
      +# Compiled Object files, Static and Dynamic libs (Shared Objects)
      +*.o
      +*.a
      +*.so
      +
      +# Folders
      +_obj
      +_test
      +
      +# Architecture specific extensions/prefixes
      +*.[568vq]
      +[568vq].out
      +
      +*.cgo1.go
      +*.cgo2.c
      +_cgo_defun.c
      +_cgo_gotypes.go
      +_cgo_export.*
      +
      +_testmain.go
      +
      +*.exe
      diff --git a/vendor/github.com/davecgh/go-spew/.travis.yml b/vendor/github.com/davecgh/go-spew/.travis.yml
      new file mode 100644
      index 00000000..10f469a2
      --- /dev/null
      +++ b/vendor/github.com/davecgh/go-spew/.travis.yml
      @@ -0,0 +1,11 @@
      +language: go
      +go: 1.2
      +install:
      +    - go get -v code.google.com/p/go.tools/cmd/cover
      +script:
      +    - go test -v -tags=disableunsafe ./spew
      +    - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov
      +after_success:
      +    - go get -v github.com/mattn/goveralls
      +    - export PATH=$PATH:$HOME/gopath/bin
      +    - goveralls -coverprofile=profile.cov -service=travis-ci
      diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
      new file mode 100644
      index 00000000..2a7cfd2b
      --- /dev/null
      +++ b/vendor/github.com/davecgh/go-spew/LICENSE
      @@ -0,0 +1,13 @@
      +Copyright (c) 2012-2013 Dave Collins <dave@davec.name>
      +
      +Permission to use, copy, modify, and distribute this software for any
      +purpose with or without fee is hereby granted, provided that the above
      +copyright notice and this permission notice appear in all copies.
      +
      +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
      +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
      +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
      +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
      +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
      +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
      +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
      diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md
      new file mode 100644
      index 00000000..777a8e1d
      --- /dev/null
      +++ b/vendor/github.com/davecgh/go-spew/README.md
      @@ -0,0 +1,194 @@
      +go-spew
      +=======
      +
      +[![Build Status](https://travis-ci.org/davecgh/go-spew.png?branch=master)]
      +(https://travis-ci.org/davecgh/go-spew) [![Coverage Status]
      +(https://coveralls.io/repos/davecgh/go-spew/badge.png?branch=master)]
      +(https://coveralls.io/r/davecgh/go-spew?branch=master)
      +
      +Go-spew implements a deep pretty printer for Go data structures to aid in
      +debugging.  A comprehensive suite of tests with 100% test coverage is provided
      +to ensure proper functionality.  See `test_coverage.txt` for the gocov coverage
      +report.  Go-spew is licensed under the liberal ISC license, so it may be used in
      +open source or commercial projects.
      +
      +If you're interested in reading about how this package came to life and some
      +of the challenges involved in providing a deep pretty printer, there is a blog
      +post about it
      +[here](https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).
      +
      +## Documentation
      +
      +[![GoDoc](https://godoc.org/github.com/davecgh/go-spew/spew?status.png)]
      +(http://godoc.org/github.com/davecgh/go-spew/spew)
      +
      +Full `go doc` style documentation for the project can be viewed online without
      +installing this package by using the excellent GoDoc site here:
      +http://godoc.org/github.com/davecgh/go-spew/spew
      +
      +You can also view the documentation locally once the package is installed with
      +the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
      +http://localhost:6060/pkg/github.com/davecgh/go-spew/spew
      +
      +## Installation
      +
      +```bash
      +$ go get -u github.com/davecgh/go-spew/spew
      +```
      +
      +## Quick Start
      +
      +Add this import line to the file you're working in:
      +
      +```Go
      +import "github.com/davecgh/go-spew/spew"
      +```
      +
      +To dump a variable with full newlines, indentation, type, and pointer
      +information use Dump, Fdump, or Sdump:
      +
      +```Go
      +spew.Dump(myVar1, myVar2, ...)
      +spew.Fdump(someWriter, myVar1, myVar2, ...)
      +str := spew.Sdump(myVar1, myVar2, ...)
      +```
      +
      +Alternatively, if you would prefer to use format strings with a compacted inline
      +printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most
      +compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types
      +and pointer addresses): 
      +
      +```Go
      +spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
      +spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
      +spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
      +spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
      +```
      +
      +## Debugging a Web Application Example
      +
      +Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production.
      +
      +```Go
      +package main
      +
      +import (
      +    "fmt"
      +    "html"
      +    "net/http"
      +
      +    "github.com/davecgh/go-spew/spew"
      +)
      +
      +func handler(w http.ResponseWriter, r *http.Request) {
      +    w.Header().Set("Content-Type", "text/html")
      +    fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:])
      +    fmt.Fprintf(w, "<!--\n" + html.EscapeString(spew.Sdump(w)) + "\n-->")
      +}
      +
      +func main() {
      +    http.HandleFunc("/", handler)
      +    http.ListenAndServe(":8080", nil)
      +}
      +```
      +
      +## Sample Dump Output
      +
      +```
      +(main.Foo) {
      + unexportedField: (*main.Bar)(0xf84002e210)({
      +  flag: (main.Flag) flagTwo,
      +  data: (uintptr) <nil>
      + }),
      + ExportedField: (map[interface {}]interface {}) {
      +  (string) "one": (bool) true
      + }
      +}
      +([]uint8) {
      + 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
      + 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
      + 00000020  31 32                                             |12|
      +}
      +```
      +
      +## Sample Formatter Output
      +
      +Double pointer to a uint8:
      +```
      +	  %v: <**>5
      +	 %+v: <**>(0xf8400420d0->0xf8400420c8)5
      +	 %#v: (**uint8)5
      +	%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
      +```
      +
      +Pointer to circular struct with a uint8 field and a pointer to itself:
      +```
      +	  %v: <*>{1 <*><shown>}
      +	 %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
      +	 %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
      +	%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
      +```
      +
      +## Configuration Options
      +
      +Configuration of spew is handled by fields in the ConfigState type. For
      +convenience, all of the top-level functions use a global state available via the
      +spew.Config global.
      +
      +It is also possible to create a ConfigState instance that provides methods
      +equivalent to the top-level functions. This allows concurrent configuration
      +options. See the ConfigState documentation for more details.
      +
      +```
      +* Indent
      +	String to use for each indentation level for Dump functions.
      +	It is a single space by default.  A popular alternative is "\t".
      +
      +* MaxDepth
      +	Maximum number of levels to descend into nested data structures.
      +	There is no limit by default.
      +
      +* DisableMethods
      +	Disables invocation of error and Stringer interface methods.
      +	Method invocation is enabled by default.
      +
      +* DisablePointerMethods
      +	Disables invocation of error and Stringer interface methods on types
      +	which only accept pointer receivers from non-pointer variables.  This option
      +	relies on access to the unsafe package, so it will not have any effect when
      +	running in environments without access to the unsafe package such as Google
      +	App Engine or with the "disableunsafe" build tag specified.
      +	Pointer method invocation is enabled by default.
      +
      +* ContinueOnMethod
      +	Enables recursion into types after invoking error and Stringer interface
      +	methods. Recursion after method invocation is disabled by default.
      +
      +* SortKeys
      +	Specifies map keys should be sorted before being printed. Use
      +	this to have a more deterministic, diffable output.  Note that
      +	only native types (bool, int, uint, floats, uintptr and string)
      +	and types which implement error or Stringer interfaces are supported,
      +	with other types sorted according to the reflect.Value.String() output
      +	which guarantees display stability.  Natural map order is used by
      +	default.
      +
      +* SpewKeys
      +	SpewKeys specifies that, as a last resort attempt, map keys should be
      +	spewed to strings and sorted by those strings.  This is only considered
      +	if SortKeys is true.
      +
      +```
      +
      +## Unsafe Package Dependency
      +
      +This package relies on the unsafe package to perform some of the more advanced
      +features, however it also supports a "limited" mode which allows it to work in
      +environments where the unsafe package is not available.  By default, it will
      +operate in this mode on Google App Engine.  The "disableunsafe" build tag may
      +also be specified to force the package to build without using the unsafe
      +package.
      +
      +## License
      +
      +Go-spew is licensed under the liberal ISC License.
      diff --git a/vendor/github.com/davecgh/go-spew/cov_report.sh b/vendor/github.com/davecgh/go-spew/cov_report.sh
      new file mode 100644
      index 00000000..9579497e
      --- /dev/null
      +++ b/vendor/github.com/davecgh/go-spew/cov_report.sh
      @@ -0,0 +1,22 @@
      +#!/bin/sh
      +
      +# This script uses gocov to generate a test coverage report.
      +# The gocov tool my be obtained with the following command:
      +#   go get github.com/axw/gocov/gocov
      +#
      +# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
      +
      +# Check for gocov.
      +if ! type gocov >/dev/null 2>&1; then
      +	echo >&2 "This script requires the gocov tool."
      +	echo >&2 "You may obtain it with the following command:"
      +	echo >&2 "go get github.com/axw/gocov/gocov"
      +	exit 1
      +fi
      +
      +# Only run the cgo tests if gcc is installed.
      +if type gcc >/dev/null 2>&1; then
      +	(cd spew && gocov test -tags testcgo | gocov report)
      +else
      +	(cd spew && gocov test | gocov report)
      +fi
      diff --git a/vendor/github.com/davecgh/go-spew/test_coverage.txt b/vendor/github.com/davecgh/go-spew/test_coverage.txt
      new file mode 100644
      index 00000000..2cd087a2
      --- /dev/null
      +++ b/vendor/github.com/davecgh/go-spew/test_coverage.txt
      @@ -0,0 +1,61 @@
      +
      +github.com/davecgh/go-spew/spew/dump.go		 dumpState.dump			 100.00% (88/88)
      +github.com/davecgh/go-spew/spew/format.go	 formatState.format		 100.00% (82/82)
      +github.com/davecgh/go-spew/spew/format.go	 formatState.formatPtr		 100.00% (52/52)
      +github.com/davecgh/go-spew/spew/dump.go		 dumpState.dumpPtr		 100.00% (44/44)
      +github.com/davecgh/go-spew/spew/dump.go		 dumpState.dumpSlice		 100.00% (39/39)
      +github.com/davecgh/go-spew/spew/common.go	 handleMethods			 100.00% (30/30)
      +github.com/davecgh/go-spew/spew/common.go	 printHexPtr			 100.00% (18/18)
      +github.com/davecgh/go-spew/spew/common.go	 unsafeReflectValue		 100.00% (13/13)
      +github.com/davecgh/go-spew/spew/format.go	 formatState.constructOrigFormat 100.00% (12/12)
      +github.com/davecgh/go-spew/spew/dump.go		 fdump				 100.00% (11/11)
      +github.com/davecgh/go-spew/spew/format.go	 formatState.Format		 100.00% (11/11)
      +github.com/davecgh/go-spew/spew/common.go	 init				 100.00% (10/10)
      +github.com/davecgh/go-spew/spew/common.go	 printComplex			 100.00% (9/9)
      +github.com/davecgh/go-spew/spew/common.go	 valuesSorter.Less		 100.00% (8/8)
      +github.com/davecgh/go-spew/spew/format.go	 formatState.buildDefaultFormat	 100.00% (7/7)
      +github.com/davecgh/go-spew/spew/format.go	 formatState.unpackValue	 100.00% (5/5)
      +github.com/davecgh/go-spew/spew/dump.go		 dumpState.indent		 100.00% (4/4)
      +github.com/davecgh/go-spew/spew/common.go	 catchPanic			 100.00% (4/4)
      +github.com/davecgh/go-spew/spew/config.go	 ConfigState.convertArgs	 100.00% (4/4)
      +github.com/davecgh/go-spew/spew/spew.go		 convertArgs			 100.00% (4/4)
      +github.com/davecgh/go-spew/spew/format.go	 newFormatter			 100.00% (3/3)
      +github.com/davecgh/go-spew/spew/dump.go		 Sdump				 100.00% (3/3)
      +github.com/davecgh/go-spew/spew/common.go	 printBool			 100.00% (3/3)
      +github.com/davecgh/go-spew/spew/common.go	 sortValues			 100.00% (3/3)
      +github.com/davecgh/go-spew/spew/config.go	 ConfigState.Sdump		 100.00% (3/3)
      +github.com/davecgh/go-spew/spew/dump.go		 dumpState.unpackValue		 100.00% (3/3)
      +github.com/davecgh/go-spew/spew/spew.go		 Printf				 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/spew.go		 Println			 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/spew.go		 Sprint				 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/spew.go		 Sprintf			 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/spew.go		 Sprintln			 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/common.go	 printFloat			 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/config.go	 NewDefaultConfig		 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/common.go	 printInt			 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/common.go	 printUint			 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/common.go	 valuesSorter.Len		 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/common.go	 valuesSorter.Swap		 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/config.go	 ConfigState.Errorf		 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/config.go	 ConfigState.Fprint		 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/config.go	 ConfigState.Fprintf		 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/config.go	 ConfigState.Fprintln		 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/config.go	 ConfigState.Print		 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/config.go	 ConfigState.Printf		 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/config.go	 ConfigState.Println		 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/config.go	 ConfigState.Sprint		 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/config.go	 ConfigState.Sprintf		 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/config.go	 ConfigState.Sprintln		 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/config.go	 ConfigState.NewFormatter	 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/config.go	 ConfigState.Fdump		 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/config.go	 ConfigState.Dump		 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/dump.go		 Fdump				 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/dump.go		 Dump				 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/spew.go		 Fprintln			 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/format.go	 NewFormatter			 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/spew.go		 Errorf				 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/spew.go		 Fprint				 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/spew.go		 Fprintf			 100.00% (1/1)
      +github.com/davecgh/go-spew/spew/spew.go		 Print				 100.00% (1/1)
      +github.com/davecgh/go-spew/spew			 ------------------------------- 100.00% (505/505)
      +
      diff --git a/vendor/github.com/golang/protobuf/.gitignore b/vendor/github.com/golang/protobuf/.gitignore
      new file mode 100644
      index 00000000..2f337392
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/.gitignore
      @@ -0,0 +1,15 @@
      +.DS_Store
      +*.[568ao]
      +*.ao
      +*.so
      +*.pyc
      +._*
      +.nfs.*
      +[568a].out
      +*~
      +*.orig
      +core
      +_obj
      +_test
      +_testmain.go
      +protoc-gen-go/testdata/multi/*.pb.go
      diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
      new file mode 100644
      index 00000000..15167cd7
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/AUTHORS
      @@ -0,0 +1,3 @@
      +# This source code refers to The Go Authors for copyright purposes.
      +# The master list of authors is in the main Go distribution,
      +# visible at http://tip.golang.org/AUTHORS.
      diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
      new file mode 100644
      index 00000000..1c4577e9
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
      @@ -0,0 +1,3 @@
      +# This source code was written by the Go contributors.
      +# The master list of contributors is in the main Go distribution,
      +# visible at http://tip.golang.org/CONTRIBUTORS.
      diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
      new file mode 100644
      index 00000000..1b1b1921
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/LICENSE
      @@ -0,0 +1,31 @@
      +Go support for Protocol Buffers - Google's data interchange format
      +
      +Copyright 2010 The Go Authors.  All rights reserved.
      +https://github.com/golang/protobuf
      +
      +Redistribution and use in source and binary forms, with or without
      +modification, are permitted provided that the following conditions are
      +met:
      +
      +    * Redistributions of source code must retain the above copyright
      +notice, this list of conditions and the following disclaimer.
      +    * Redistributions in binary form must reproduce the above
      +copyright notice, this list of conditions and the following disclaimer
      +in the documentation and/or other materials provided with the
      +distribution.
      +    * Neither the name of Google Inc. nor the names of its
      +contributors may be used to endorse or promote products derived from
      +this software without specific prior written permission.
      +
      +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      diff --git a/vendor/github.com/golang/protobuf/Make.protobuf b/vendor/github.com/golang/protobuf/Make.protobuf
      new file mode 100644
      index 00000000..15071de1
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/Make.protobuf
      @@ -0,0 +1,40 @@
      +# Go support for Protocol Buffers - Google's data interchange format
      +#
      +# Copyright 2010 The Go Authors.  All rights reserved.
      +# https://github.com/golang/protobuf
      +#
      +# Redistribution and use in source and binary forms, with or without
      +# modification, are permitted provided that the following conditions are
      +# met:
      +#
      +#     * Redistributions of source code must retain the above copyright
      +# notice, this list of conditions and the following disclaimer.
      +#     * Redistributions in binary form must reproduce the above
      +# copyright notice, this list of conditions and the following disclaimer
      +# in the documentation and/or other materials provided with the
      +# distribution.
      +#     * Neither the name of Google Inc. nor the names of its
      +# contributors may be used to endorse or promote products derived from
      +# this software without specific prior written permission.
      +#
      +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +# Includable Makefile to add a rule for generating .pb.go files from .proto files
      +# (Google protocol buffer descriptions).
      +# Typical use if myproto.proto is a file in package mypackage in this directory:
      +#
      +#	include $(GOROOT)/src/pkg/github.com/golang/protobuf/Make.protobuf
      +
      +%.pb.go:	%.proto
      +	protoc --go_out=. $<
      +
      diff --git a/vendor/github.com/golang/protobuf/Makefile b/vendor/github.com/golang/protobuf/Makefile
      new file mode 100644
      index 00000000..80b6a17d
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/Makefile
      @@ -0,0 +1,54 @@
      +# Go support for Protocol Buffers - Google's data interchange format
      +#
      +# Copyright 2010 The Go Authors.  All rights reserved.
      +# https://github.com/golang/protobuf
      +#
      +# Redistribution and use in source and binary forms, with or without
      +# modification, are permitted provided that the following conditions are
      +# met:
      +#
      +#     * Redistributions of source code must retain the above copyright
      +# notice, this list of conditions and the following disclaimer.
      +#     * Redistributions in binary form must reproduce the above
      +# copyright notice, this list of conditions and the following disclaimer
      +# in the documentation and/or other materials provided with the
      +# distribution.
      +#     * Neither the name of Google Inc. nor the names of its
      +# contributors may be used to endorse or promote products derived from
      +# this software without specific prior written permission.
      +#
      +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +
      +all:	install
      +
      +install:
      +	go install ./proto ./jsonpb ./ptypes
      +	go install ./protoc-gen-go
      +
      +test:
      +	go test ./proto ./jsonpb ./ptypes
      +	make -C protoc-gen-go/testdata test
      +
      +clean:
      +	go clean ./...
      +
      +nuke:
      +	go clean -i ./...
      +
      +regenerate:
      +	make -C protoc-gen-go/descriptor regenerate
      +	make -C protoc-gen-go/plugin regenerate
      +	make -C protoc-gen-go/testdata regenerate
      +	make -C proto/testdata regenerate
      +	make -C jsonpb/jsonpb_test_proto regenerate
      diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md
      new file mode 100644
      index 00000000..8fdc89b4
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/README.md
      @@ -0,0 +1,199 @@
      +# Go support for Protocol Buffers
      +
      +Google's data interchange format.
      +Copyright 2010 The Go Authors.
      +https://github.com/golang/protobuf
      +
      +This package and the code it generates requires at least Go 1.4.
      +
      +This software implements Go bindings for protocol buffers.  For
      +information about protocol buffers themselves, see
      +	https://developers.google.com/protocol-buffers/
      +
      +## Installation ##
      +
      +To use this software, you must:
      +- Install the standard C++ implementation of protocol buffers from
      +	https://developers.google.com/protocol-buffers/
      +- Of course, install the Go compiler and tools from
      +	https://golang.org/
      +  See
      +	https://golang.org/doc/install
      +  for details or, if you are using gccgo, follow the instructions at
      +	https://golang.org/doc/install/gccgo
      +- Grab the code from the repository and install the proto package.
      +  The simplest way is to run `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`.
      +  The compiler plugin, protoc-gen-go, will be installed in $GOBIN,
      +  defaulting to $GOPATH/bin.  It must be in your $PATH for the protocol
      +  compiler, protoc, to find it.
      +
      +This software has two parts: a 'protocol compiler plugin' that
      +generates Go source files that, once compiled, can access and manage
      +protocol buffers; and a library that implements run-time support for
      +encoding (marshaling), decoding (unmarshaling), and accessing protocol
      +buffers.
      +
      +There is support for gRPC in Go using protocol buffers.
      +See the note at the bottom of this file for details.
      +
      +There are no insertion points in the plugin.
      +
      +
      +## Using protocol buffers with Go ##
      +
      +Once the software is installed, there are two steps to using it.
      +First you must compile the protocol buffer definitions and then import
      +them, with the support library, into your program.
      +
      +To compile the protocol buffer definition, run protoc with the --go_out
      +parameter set to the directory you want to output the Go code to.
      +
      +	protoc --go_out=. *.proto
      +
      +The generated files will be suffixed .pb.go.  See the Test code below
      +for an example using such a file.
      +
      +
      +The package comment for the proto library contains text describing
      +the interface provided in Go for protocol buffers. Here is an edited
      +version.
      +
      +==========
      +
      +The proto package converts data structures to and from the
      +wire format of protocol buffers.  It works in concert with the
      +Go source code generated for .proto files by the protocol compiler.
      +
      +A summary of the properties of the protocol buffer interface
      +for a protocol buffer variable v:
      +
      +  - Names are turned from camel_case to CamelCase for export.
      +  - There are no methods on v to set fields; just treat
      +  	them as structure fields.
      +  - There are getters that return a field's value if set,
      +	and return the field's default value if unset.
      +	The getters work even if the receiver is a nil message.
      +  - The zero value for a struct is its correct initialization state.
      +	All desired fields must be set before marshaling.
      +  - A Reset() method will restore a protobuf struct to its zero state.
      +  - Non-repeated fields are pointers to the values; nil means unset.
      +	That is, optional or required field int32 f becomes F *int32.
      +  - Repeated fields are slices.
      +  - Helper functions are available to aid the setting of fields.
      +	Helpers for getting values are superseded by the
      +	GetFoo methods and their use is deprecated.
      +		msg.Foo = proto.String("hello") // set field
      +  - Constants are defined to hold the default values of all fields that
      +	have them.  They have the form Default_StructName_FieldName.
      +	Because the getter methods handle defaulted values,
      +	direct use of these constants should be rare.
      +  - Enums are given type names and maps from names to values.
      +	Enum values are prefixed with the enum's type name. Enum types have
      +	a String method, and a Enum method to assist in message construction.
      +  - Nested groups and enums have type names prefixed with the name of
      +  	the surrounding message type.
      +  - Extensions are given descriptor names that start with E_,
      +	followed by an underscore-delimited list of the nested messages
      +	that contain it (if any) followed by the CamelCased name of the
      +	extension field itself.  HasExtension, ClearExtension, GetExtension
      +	and SetExtension are functions for manipulating extensions.
      +  - Oneof field sets are given a single field in their message,
      +	with distinguished wrapper types for each possible field value.
      +  - Marshal and Unmarshal are functions to encode and decode the wire format.
      +
      +When the .proto file specifies `syntax="proto3"`, there are some differences:
      +
      +  - Non-repeated fields of non-message type are values instead of pointers.
      +  - Getters are only generated for message and oneof fields.
      +  - Enum types do not get an Enum method.
      +
      +Consider file test.proto, containing
      +
      +```proto
      +	package example;
      +	
      +	enum FOO { X = 17; };
      +	
      +	message Test {
      +	  required string label = 1;
      +	  optional int32 type = 2 [default=77];
      +	  repeated int64 reps = 3;
      +	  optional group OptionalGroup = 4 {
      +	    required string RequiredField = 5;
      +	  }
      +	}
      +```
      +
      +To create and play with a Test object from the example package,
      +
      +```go
      +	package main
      +
      +	import (
      +		"log"
      +
      +		"github.com/golang/protobuf/proto"
      +		"path/to/example"
      +	)
      +
      +	func main() {
      +		test := &example.Test {
      +			Label: proto.String("hello"),
      +			Type:  proto.Int32(17),
      +			Reps:  []int64{1, 2, 3},
      +			Optionalgroup: &example.Test_OptionalGroup {
      +				RequiredField: proto.String("good bye"),
      +			},
      +		}
      +		data, err := proto.Marshal(test)
      +		if err != nil {
      +			log.Fatal("marshaling error: ", err)
      +		}
      +		newTest := &example.Test{}
      +		err = proto.Unmarshal(data, newTest)
      +		if err != nil {
      +			log.Fatal("unmarshaling error: ", err)
      +		}
      +		// Now test and newTest contain the same data.
      +		if test.GetLabel() != newTest.GetLabel() {
      +			log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
      +		}
      +		// etc.
      +	}
      +```
      +
      +## Parameters ##
      +
      +To pass extra parameters to the plugin, use a comma-separated
      +parameter list separated from the output directory by a colon:
      +
      +
      +	protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto
      +
      +
      +- `import_prefix=xxx` - a prefix that is added onto the beginning of
      +  all imports. Useful for things like generating protos in a
      +  subdirectory, or regenerating vendored protobufs in-place.
      +- `import_path=foo/bar` - used as the package if no input files
      +  declare `go_package`. If it contains slashes, everything up to the
      +  rightmost slash is ignored.
      +- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to
      +  load. The only plugin in this repo is `grpc`.
      +- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is
      +  associated with Go package quux/shme.  This is subject to the
      +  import_prefix parameter.
      +
      +## gRPC Support ##
      +
      +If a proto file specifies RPC services, protoc-gen-go can be instructed to
      +generate code compatible with gRPC (http://www.grpc.io/). To do this, pass
      +the `plugins` parameter to protoc-gen-go; the usual way is to insert it into
      +the --go_out argument to protoc:
      +
      +	protoc --go_out=plugins=grpc:. *.proto
      +
      +## Plugins ##
      +
      +The `protoc-gen-go/generator` package exposes a plugin interface,
      +which is used by the gRPC code generation. This interface is not
      +supported and is subject to incompatible changes without notice.
      diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
      new file mode 100644
      index 00000000..69ed7fd0
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
      @@ -0,0 +1,722 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2015 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +/*
      +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON.
      +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json.
      +
      +This package produces a different output than the standard "encoding/json" package,
      +which does not operate correctly on protocol buffers.
      +*/
      +package jsonpb
      +
      +import (
      +	"bytes"
      +	"encoding/json"
      +	"errors"
      +	"fmt"
      +	"io"
      +	"reflect"
      +	"sort"
      +	"strconv"
      +	"strings"
      +	"time"
      +
      +	"github.com/golang/protobuf/proto"
      +)
      +
      +var (
      +	byteArrayType = reflect.TypeOf([]byte{})
      +)
      +
      +// Marshaler is a configurable object for converting between
      +// protocol buffer objects and a JSON representation for them.
      +type Marshaler struct {
      +	// Whether to render enum values as integers, as opposed to string values.
      +	EnumsAsInts bool
      +
      +	// Whether to render fields with zero values.
      +	EmitDefaults bool
      +
      +	// A string to indent each level by. The presence of this field will
      +	// also cause a space to appear between the field separator and
      +	// value, and for newlines to be appear between fields and array
      +	// elements.
      +	Indent string
      +
      +	// Whether to use the original (.proto) name for fields.
      +	OrigName bool
      +}
      +
      +// Marshal marshals a protocol buffer into JSON.
      +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error {
      +	writer := &errWriter{writer: out}
      +	return m.marshalObject(writer, pb, "")
      +}
      +
      +// MarshalToString converts a protocol buffer object to JSON string.
      +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) {
      +	var buf bytes.Buffer
      +	if err := m.Marshal(&buf, pb); err != nil {
      +		return "", err
      +	}
      +	return buf.String(), nil
      +}
      +
      +type int32Slice []int32
      +
      +// For sorting extensions ids to ensure stable output.
      +func (s int32Slice) Len() int           { return len(s) }
      +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
      +func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
      +
      +// marshalObject writes a struct to the Writer.
      +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent string) error {
      +	s := reflect.ValueOf(v).Elem()
      +
      +	// Handle well-known types.
      +	type wkt interface {
      +		XXX_WellKnownType() string
      +	}
      +	if wkt, ok := v.(wkt); ok {
      +		switch wkt.XXX_WellKnownType() {
      +		case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
      +			"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
      +			// "Wrappers use the same representation in JSON
      +			//  as the wrapped primitive type, ..."
      +			sprop := proto.GetProperties(s.Type())
      +			return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent)
      +		case "Duration":
      +			// "Generated output always contains 3, 6, or 9 fractional digits,
      +			//  depending on required precision."
      +			s, ns := s.Field(0).Int(), s.Field(1).Int()
      +			d := time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond
      +			x := fmt.Sprintf("%.9f", d.Seconds())
      +			x = strings.TrimSuffix(x, "000")
      +			x = strings.TrimSuffix(x, "000")
      +			out.write(`"`)
      +			out.write(x)
      +			out.write(`s"`)
      +			return out.err
      +		case "Struct":
      +			// Let marshalValue handle the `fields` map.
      +			// TODO: pass the correct Properties if needed.
      +			return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent)
      +		case "Timestamp":
      +			// "RFC 3339, where generated output will always be Z-normalized
      +			//  and uses 3, 6 or 9 fractional digits."
      +			s, ns := s.Field(0).Int(), s.Field(1).Int()
      +			t := time.Unix(s, ns).UTC()
      +			// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
      +			x := t.Format("2006-01-02T15:04:05.000000000")
      +			x = strings.TrimSuffix(x, "000")
      +			x = strings.TrimSuffix(x, "000")
      +			out.write(`"`)
      +			out.write(x)
      +			out.write(`Z"`)
      +			return out.err
      +		case "Value":
      +			// Value has a single oneof.
      +			kind := s.Field(0)
      +			if kind.IsNil() {
      +				// "absence of any variant indicates an error"
      +				return errors.New("nil Value")
      +			}
      +			// oneof -> *T -> T -> T.F
      +			x := kind.Elem().Elem().Field(0)
      +			// TODO: pass the correct Properties if needed.
      +			return m.marshalValue(out, &proto.Properties{}, x, indent)
      +		}
      +	}
      +
      +	out.write("{")
      +	if m.Indent != "" {
      +		out.write("\n")
      +	}
      +
      +	firstField := true
      +	for i := 0; i < s.NumField(); i++ {
      +		value := s.Field(i)
      +		valueField := s.Type().Field(i)
      +		if strings.HasPrefix(valueField.Name, "XXX_") {
      +			continue
      +		}
      +
      +		// IsNil will panic on most value kinds.
      +		switch value.Kind() {
      +		case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
      +			if value.IsNil() {
      +				continue
      +			}
      +		}
      +
      +		if !m.EmitDefaults {
      +			switch value.Kind() {
      +			case reflect.Bool:
      +				if !value.Bool() {
      +					continue
      +				}
      +			case reflect.Int32, reflect.Int64:
      +				if value.Int() == 0 {
      +					continue
      +				}
      +			case reflect.Uint32, reflect.Uint64:
      +				if value.Uint() == 0 {
      +					continue
      +				}
      +			case reflect.Float32, reflect.Float64:
      +				if value.Float() == 0 {
      +					continue
      +				}
      +			case reflect.String:
      +				if value.Len() == 0 {
      +					continue
      +				}
      +			}
      +		}
      +
      +		// Oneof fields need special handling.
      +		if valueField.Tag.Get("protobuf_oneof") != "" {
      +			// value is an interface containing &T{real_value}.
      +			sv := value.Elem().Elem() // interface -> *T -> T
      +			value = sv.Field(0)
      +			valueField = sv.Type().Field(0)
      +		}
      +		prop := jsonProperties(valueField, m.OrigName)
      +		if !firstField {
      +			m.writeSep(out)
      +		}
      +		if err := m.marshalField(out, prop, value, indent); err != nil {
      +			return err
      +		}
      +		firstField = false
      +	}
      +
      +	// Handle proto2 extensions.
      +	if ep, ok := v.(extendableProto); ok {
      +		extensions := proto.RegisteredExtensions(v)
      +		extensionMap := ep.ExtensionMap()
      +		// Sort extensions for stable output.
      +		ids := make([]int32, 0, len(extensionMap))
      +		for id := range extensionMap {
      +			ids = append(ids, id)
      +		}
      +		sort.Sort(int32Slice(ids))
      +		for _, id := range ids {
      +			desc := extensions[id]
      +			if desc == nil {
      +				// unknown extension
      +				continue
      +			}
      +			ext, extErr := proto.GetExtension(ep, desc)
      +			if extErr != nil {
      +				return extErr
      +			}
      +			value := reflect.ValueOf(ext)
      +			var prop proto.Properties
      +			prop.Parse(desc.Tag)
      +			prop.JSONName = fmt.Sprintf("[%s]", desc.Name)
      +			if !firstField {
      +				m.writeSep(out)
      +			}
      +			if err := m.marshalField(out, &prop, value, indent); err != nil {
      +				return err
      +			}
      +			firstField = false
      +		}
      +
      +	}
      +
      +	if m.Indent != "" {
      +		out.write("\n")
      +		out.write(indent)
      +	}
      +	out.write("}")
      +	return out.err
      +}
      +
      +func (m *Marshaler) writeSep(out *errWriter) {
      +	if m.Indent != "" {
      +		out.write(",\n")
      +	} else {
      +		out.write(",")
      +	}
      +}
      +
      +// marshalField writes field description and value to the Writer.
      +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
      +	if m.Indent != "" {
      +		out.write(indent)
      +		out.write(m.Indent)
      +	}
      +	out.write(`"`)
      +	out.write(prop.JSONName)
      +	out.write(`":`)
      +	if m.Indent != "" {
      +		out.write(" ")
      +	}
      +	if err := m.marshalValue(out, prop, v, indent); err != nil {
      +		return err
      +	}
      +	return nil
      +}
      +
      +// marshalValue writes the value to the Writer.
      +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
      +
      +	var err error
      +	v = reflect.Indirect(v)
      +
      +	// Handle repeated elements.
      +	if v.Type() != byteArrayType && v.Kind() == reflect.Slice {
      +		out.write("[")
      +		comma := ""
      +		for i := 0; i < v.Len(); i++ {
      +			sliceVal := v.Index(i)
      +			out.write(comma)
      +			if m.Indent != "" {
      +				out.write("\n")
      +				out.write(indent)
      +				out.write(m.Indent)
      +				out.write(m.Indent)
      +			}
      +			m.marshalValue(out, prop, sliceVal, indent+m.Indent)
      +			comma = ","
      +		}
      +		if m.Indent != "" {
      +			out.write("\n")
      +			out.write(indent)
      +			out.write(m.Indent)
      +		}
      +		out.write("]")
      +		return out.err
      +	}
      +
      +	// Handle well-known types.
      +	// Most are handled up in marshalObject (because 99% are messages).
      +	type wkt interface {
      +		XXX_WellKnownType() string
      +	}
      +	if wkt, ok := v.Interface().(wkt); ok {
      +		switch wkt.XXX_WellKnownType() {
      +		case "NullValue":
      +			out.write("null")
      +			return out.err
      +		}
      +	}
      +
      +	// Handle enumerations.
      +	if !m.EnumsAsInts && prop.Enum != "" {
      +		// Unknown enum values will are stringified by the proto library as their
      +		// value. Such values should _not_ be quoted or they will be interpreted
      +		// as an enum string instead of their value.
      +		enumStr := v.Interface().(fmt.Stringer).String()
      +		var valStr string
      +		if v.Kind() == reflect.Ptr {
      +			valStr = strconv.Itoa(int(v.Elem().Int()))
      +		} else {
      +			valStr = strconv.Itoa(int(v.Int()))
      +		}
      +		isKnownEnum := enumStr != valStr
      +		if isKnownEnum {
      +			out.write(`"`)
      +		}
      +		out.write(enumStr)
      +		if isKnownEnum {
      +			out.write(`"`)
      +		}
      +		return out.err
      +	}
      +
      +	// Handle nested messages.
      +	if v.Kind() == reflect.Struct {
      +		return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent)
      +	}
      +
      +	// Handle maps.
      +	// Since Go randomizes map iteration, we sort keys for stable output.
      +	if v.Kind() == reflect.Map {
      +		out.write(`{`)
      +		keys := v.MapKeys()
      +		sort.Sort(mapKeys(keys))
      +		for i, k := range keys {
      +			if i > 0 {
      +				out.write(`,`)
      +			}
      +			if m.Indent != "" {
      +				out.write("\n")
      +				out.write(indent)
      +				out.write(m.Indent)
      +				out.write(m.Indent)
      +			}
      +
      +			b, err := json.Marshal(k.Interface())
      +			if err != nil {
      +				return err
      +			}
      +			s := string(b)
      +
      +			// If the JSON is not a string value, encode it again to make it one.
      +			if !strings.HasPrefix(s, `"`) {
      +				b, err := json.Marshal(s)
      +				if err != nil {
      +					return err
      +				}
      +				s = string(b)
      +			}
      +
      +			out.write(s)
      +			out.write(`:`)
      +			if m.Indent != "" {
      +				out.write(` `)
      +			}
      +
      +			if err := m.marshalValue(out, prop, v.MapIndex(k), indent+m.Indent); err != nil {
      +				return err
      +			}
      +		}
      +		if m.Indent != "" {
      +			out.write("\n")
      +			out.write(indent)
      +			out.write(m.Indent)
      +		}
      +		out.write(`}`)
      +		return out.err
      +	}
      +
      +	// Default handling defers to the encoding/json library.
      +	b, err := json.Marshal(v.Interface())
      +	if err != nil {
      +		return err
      +	}
      +	needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64)
      +	if needToQuote {
      +		out.write(`"`)
      +	}
      +	out.write(string(b))
      +	if needToQuote {
      +		out.write(`"`)
      +	}
      +	return out.err
      +}
      +
      +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
      +// This function is lenient and will decode any options permutations of the
      +// related Marshaler.
      +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
      +	inputValue := json.RawMessage{}
      +	if err := dec.Decode(&inputValue); err != nil {
      +		return err
      +	}
      +	return unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil)
      +}
      +
      +// Unmarshal unmarshals a JSON object stream into a protocol
      +// buffer. This function is lenient and will decode any options
      +// permutations of the related Marshaler.
      +func Unmarshal(r io.Reader, pb proto.Message) error {
      +	dec := json.NewDecoder(r)
      +	return UnmarshalNext(dec, pb)
      +}
      +
      +// UnmarshalString will populate the fields of a protocol buffer based
      +// on a JSON string. This function is lenient and will decode any options
      +// permutations of the related Marshaler.
      +func UnmarshalString(str string, pb proto.Message) error {
      +	return Unmarshal(strings.NewReader(str), pb)
      +}
      +
      +// unmarshalValue converts/copies a value into the target.
      +// prop may be nil.
      +func unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error {
      +	targetType := target.Type()
      +
      +	// Allocate memory for pointer fields.
      +	if targetType.Kind() == reflect.Ptr {
      +		target.Set(reflect.New(targetType.Elem()))
      +		return unmarshalValue(target.Elem(), inputValue, prop)
      +	}
      +
      +	// Handle well-known types.
      +	type wkt interface {
      +		XXX_WellKnownType() string
      +	}
      +	if wkt, ok := target.Addr().Interface().(wkt); ok {
      +		switch wkt.XXX_WellKnownType() {
      +		case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
      +			"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
      +			// "Wrappers use the same representation in JSON
      +			//  as the wrapped primitive type, except that null is allowed."
      +			// encoding/json will turn JSON `null` into Go `nil`,
      +			// so we don't have to do any extra work.
      +			return unmarshalValue(target.Field(0), inputValue, prop)
      +		case "Duration":
      +			unq, err := strconv.Unquote(string(inputValue))
      +			if err != nil {
      +				return err
      +			}
      +			d, err := time.ParseDuration(unq)
      +			if err != nil {
      +				return fmt.Errorf("bad Duration: %v", err)
      +			}
      +			ns := d.Nanoseconds()
      +			s := ns / 1e9
      +			ns %= 1e9
      +			target.Field(0).SetInt(s)
      +			target.Field(1).SetInt(ns)
      +			return nil
      +		case "Timestamp":
      +			unq, err := strconv.Unquote(string(inputValue))
      +			if err != nil {
      +				return err
      +			}
      +			t, err := time.Parse(time.RFC3339Nano, unq)
      +			if err != nil {
      +				return fmt.Errorf("bad Timestamp: %v", err)
      +			}
      +			ns := t.UnixNano()
      +			s := ns / 1e9
      +			ns %= 1e9
      +			target.Field(0).SetInt(s)
      +			target.Field(1).SetInt(ns)
      +			return nil
      +		}
      +	}
      +
      +	// Handle enums, which have an underlying type of int32,
      +	// and may appear as strings.
      +	// The case of an enum appearing as a number is handled
      +	// at the bottom of this function.
      +	if inputValue[0] == '"' && prop != nil && prop.Enum != "" {
      +		vmap := proto.EnumValueMap(prop.Enum)
      +		// Don't need to do unquoting; valid enum names
      +		// are from a limited character set.
      +		s := inputValue[1 : len(inputValue)-1]
      +		n, ok := vmap[string(s)]
      +		if !ok {
      +			return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum)
      +		}
      +		if target.Kind() == reflect.Ptr { // proto2
      +			target.Set(reflect.New(targetType.Elem()))
      +			target = target.Elem()
      +		}
      +		target.SetInt(int64(n))
      +		return nil
      +	}
      +
      +	// Handle nested messages.
      +	if targetType.Kind() == reflect.Struct {
      +		var jsonFields map[string]json.RawMessage
      +		if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
      +			return err
      +		}
      +
      +		consumeField := func(prop *proto.Properties) (json.RawMessage, bool) {
      +			// Be liberal in what names we accept; both orig_name and camelName are okay.
      +			fieldNames := acceptedJSONFieldNames(prop)
      +
      +			vOrig, okOrig := jsonFields[fieldNames.orig]
      +			vCamel, okCamel := jsonFields[fieldNames.camel]
      +			if !okOrig && !okCamel {
      +				return nil, false
      +			}
      +			// If, for some reason, both are present in the data, favour the camelName.
      +			var raw json.RawMessage
      +			if okOrig {
      +				raw = vOrig
      +				delete(jsonFields, fieldNames.orig)
      +			}
      +			if okCamel {
      +				raw = vCamel
      +				delete(jsonFields, fieldNames.camel)
      +			}
      +			return raw, true
      +		}
      +
      +		sprops := proto.GetProperties(targetType)
      +		for i := 0; i < target.NumField(); i++ {
      +			ft := target.Type().Field(i)
      +			if strings.HasPrefix(ft.Name, "XXX_") {
      +				continue
      +			}
      +
      +			valueForField, ok := consumeField(sprops.Prop[i])
      +			if !ok {
      +				continue
      +			}
      +
      +			if err := unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil {
      +				return err
      +			}
      +		}
      +		// Check for any oneof fields.
      +		if len(jsonFields) > 0 {
      +			for _, oop := range sprops.OneofTypes {
      +				raw, ok := consumeField(oop.Prop)
      +				if !ok {
      +					continue
      +				}
      +				nv := reflect.New(oop.Type.Elem())
      +				target.Field(oop.Field).Set(nv)
      +				if err := unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil {
      +					return err
      +				}
      +			}
      +		}
      +		if len(jsonFields) > 0 {
      +			// Pick any field to be the scapegoat.
      +			var f string
      +			for fname := range jsonFields {
      +				f = fname
      +				break
      +			}
      +			return fmt.Errorf("unknown field %q in %v", f, targetType)
      +		}
      +		return nil
      +	}
      +
      +	// Handle arrays (which aren't encoded bytes)
      +	if targetType != byteArrayType && targetType.Kind() == reflect.Slice {
      +		var slc []json.RawMessage
      +		if err := json.Unmarshal(inputValue, &slc); err != nil {
      +			return err
      +		}
      +		len := len(slc)
      +		target.Set(reflect.MakeSlice(targetType, len, len))
      +		for i := 0; i < len; i++ {
      +			if err := unmarshalValue(target.Index(i), slc[i], prop); err != nil {
      +				return err
      +			}
      +		}
      +		return nil
      +	}
      +
      +	// Handle maps (whose keys are always strings)
      +	if targetType.Kind() == reflect.Map {
      +		var mp map[string]json.RawMessage
      +		if err := json.Unmarshal(inputValue, &mp); err != nil {
      +			return err
      +		}
      +		target.Set(reflect.MakeMap(targetType))
      +		var keyprop, valprop *proto.Properties
      +		if prop != nil {
      +			// These could still be nil if the protobuf metadata is broken somehow.
      +			// TODO: This won't work because the fields are unexported.
      +			// We should probably just reparse them.
      +			//keyprop, valprop = prop.mkeyprop, prop.mvalprop
      +		}
      +		for ks, raw := range mp {
      +			// Unmarshal map key. The core json library already decoded the key into a
      +			// string, so we handle that specially. Other types were quoted post-serialization.
      +			var k reflect.Value
      +			if targetType.Key().Kind() == reflect.String {
      +				k = reflect.ValueOf(ks)
      +			} else {
      +				k = reflect.New(targetType.Key()).Elem()
      +				if err := unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil {
      +					return err
      +				}
      +			}
      +
      +			// Unmarshal map value.
      +			v := reflect.New(targetType.Elem()).Elem()
      +			if err := unmarshalValue(v, raw, valprop); err != nil {
      +				return err
      +			}
      +			target.SetMapIndex(k, v)
      +		}
      +		return nil
      +	}
      +
      +	// 64-bit integers can be encoded as strings. In this case we drop
      +	// the quotes and proceed as normal.
      +	isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64
      +	if isNum && strings.HasPrefix(string(inputValue), `"`) {
      +		inputValue = inputValue[1 : len(inputValue)-1]
      +	}
      +
      +	// Use the encoding/json for parsing other value types.
      +	return json.Unmarshal(inputValue, target.Addr().Interface())
      +}
      +
      +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute.
      +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties {
      +	var prop proto.Properties
      +	prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f)
      +	if origName || prop.JSONName == "" {
      +		prop.JSONName = prop.OrigName
      +	}
      +	return &prop
      +}
      +
      +type fieldNames struct {
      +	orig, camel string
      +}
      +
      +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames {
      +	opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName}
      +	if prop.JSONName != "" {
      +		opts.camel = prop.JSONName
      +	}
      +	return opts
      +}
      +
      +// extendableProto is an interface implemented by any protocol buffer that may be extended.
      +type extendableProto interface {
      +	proto.Message
      +	ExtensionRangeArray() []proto.ExtensionRange
      +	ExtensionMap() map[int32]proto.Extension
      +}
      +
      +// Writer wrapper inspired by https://blog.golang.org/errors-are-values
      +type errWriter struct {
      +	writer io.Writer
      +	err    error
      +}
      +
      +func (w *errWriter) write(str string) {
      +	if w.err != nil {
      +		return
      +	}
      +	_, w.err = w.writer.Write([]byte(str))
      +}
      +
      +// Map fields may have key types of non-float scalars, strings and enums.
      +// The easiest way to sort them in some deterministic order is to use fmt.
      +// If this turns out to be inefficient we can always consider other options,
      +// such as doing a Schwartzian transform.
      +type mapKeys []reflect.Value
      +
      +func (s mapKeys) Len() int      { return len(s) }
      +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
      +func (s mapKeys) Less(i, j int) bool {
      +	return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
      +}
      diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go
      new file mode 100644
      index 00000000..b912fae2
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go
      @@ -0,0 +1,509 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2015 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +package jsonpb
      +
      +import (
      +	"bytes"
      +	"encoding/json"
      +	"io"
      +	"reflect"
      +	"testing"
      +
      +	"github.com/golang/protobuf/proto"
      +
      +	pb "github.com/golang/protobuf/jsonpb/jsonpb_test_proto"
      +	proto3pb "github.com/golang/protobuf/proto/proto3_proto"
      +	durpb "github.com/golang/protobuf/ptypes/duration"
      +	stpb "github.com/golang/protobuf/ptypes/struct"
      +	tspb "github.com/golang/protobuf/ptypes/timestamp"
      +	wpb "github.com/golang/protobuf/ptypes/wrappers"
      +)
      +
      +var (
      +	marshaler = Marshaler{}
      +
      +	marshalerAllOptions = Marshaler{
      +		Indent: "  ",
      +	}
      +
      +	simpleObject = &pb.Simple{
      +		OInt32:  proto.Int32(-32),
      +		OInt64:  proto.Int64(-6400000000),
      +		OUint32: proto.Uint32(32),
      +		OUint64: proto.Uint64(6400000000),
      +		OSint32: proto.Int32(-13),
      +		OSint64: proto.Int64(-2600000000),
      +		OFloat:  proto.Float32(3.14),
      +		ODouble: proto.Float64(6.02214179e23),
      +		OBool:   proto.Bool(true),
      +		OString: proto.String("hello \"there\""),
      +		OBytes:  []byte("beep boop"),
      +	}
      +
      +	simpleObjectJSON = `{` +
      +		`"oBool":true,` +
      +		`"oInt32":-32,` +
      +		`"oInt64":"-6400000000",` +
      +		`"oUint32":32,` +
      +		`"oUint64":"6400000000",` +
      +		`"oSint32":-13,` +
      +		`"oSint64":"-2600000000",` +
      +		`"oFloat":3.14,` +
      +		`"oDouble":6.02214179e+23,` +
      +		`"oString":"hello \"there\"",` +
      +		`"oBytes":"YmVlcCBib29w"` +
      +		`}`
      +
      +	simpleObjectPrettyJSON = `{
      +  "oBool": true,
      +  "oInt32": -32,
      +  "oInt64": "-6400000000",
      +  "oUint32": 32,
      +  "oUint64": "6400000000",
      +  "oSint32": -13,
      +  "oSint64": "-2600000000",
      +  "oFloat": 3.14,
      +  "oDouble": 6.02214179e+23,
      +  "oString": "hello \"there\"",
      +  "oBytes": "YmVlcCBib29w"
      +}`
      +
      +	repeatsObject = &pb.Repeats{
      +		RBool:   []bool{true, false, true},
      +		RInt32:  []int32{-3, -4, -5},
      +		RInt64:  []int64{-123456789, -987654321},
      +		RUint32: []uint32{1, 2, 3},
      +		RUint64: []uint64{6789012345, 3456789012},
      +		RSint32: []int32{-1, -2, -3},
      +		RSint64: []int64{-6789012345, -3456789012},
      +		RFloat:  []float32{3.14, 6.28},
      +		RDouble: []float64{299792458, 6.62606957e-34},
      +		RString: []string{"happy", "days"},
      +		RBytes:  [][]byte{[]byte("skittles"), []byte("m&m's")},
      +	}
      +
      +	repeatsObjectJSON = `{` +
      +		`"rBool":[true,false,true],` +
      +		`"rInt32":[-3,-4,-5],` +
      +		`"rInt64":["-123456789","-987654321"],` +
      +		`"rUint32":[1,2,3],` +
      +		`"rUint64":["6789012345","3456789012"],` +
      +		`"rSint32":[-1,-2,-3],` +
      +		`"rSint64":["-6789012345","-3456789012"],` +
      +		`"rFloat":[3.14,6.28],` +
      +		`"rDouble":[2.99792458e+08,6.62606957e-34],` +
      +		`"rString":["happy","days"],` +
      +		`"rBytes":["c2tpdHRsZXM=","bSZtJ3M="]` +
      +		`}`
      +
      +	repeatsObjectPrettyJSON = `{
      +  "rBool": [
      +    true,
      +    false,
      +    true
      +  ],
      +  "rInt32": [
      +    -3,
      +    -4,
      +    -5
      +  ],
      +  "rInt64": [
      +    "-123456789",
      +    "-987654321"
      +  ],
      +  "rUint32": [
      +    1,
      +    2,
      +    3
      +  ],
      +  "rUint64": [
      +    "6789012345",
      +    "3456789012"
      +  ],
      +  "rSint32": [
      +    -1,
      +    -2,
      +    -3
      +  ],
      +  "rSint64": [
      +    "-6789012345",
      +    "-3456789012"
      +  ],
      +  "rFloat": [
      +    3.14,
      +    6.28
      +  ],
      +  "rDouble": [
      +    2.99792458e+08,
      +    6.62606957e-34
      +  ],
      +  "rString": [
      +    "happy",
      +    "days"
      +  ],
      +  "rBytes": [
      +    "c2tpdHRsZXM=",
      +    "bSZtJ3M="
      +  ]
      +}`
      +
      +	innerSimple   = &pb.Simple{OInt32: proto.Int32(-32)}
      +	innerSimple2  = &pb.Simple{OInt64: proto.Int64(25)}
      +	innerRepeats  = &pb.Repeats{RString: []string{"roses", "red"}}
      +	innerRepeats2 = &pb.Repeats{RString: []string{"violets", "blue"}}
      +	complexObject = &pb.Widget{
      +		Color:    pb.Widget_GREEN.Enum(),
      +		RColor:   []pb.Widget_Color{pb.Widget_RED, pb.Widget_GREEN, pb.Widget_BLUE},
      +		Simple:   innerSimple,
      +		RSimple:  []*pb.Simple{innerSimple, innerSimple2},
      +		Repeats:  innerRepeats,
      +		RRepeats: []*pb.Repeats{innerRepeats, innerRepeats2},
      +	}
      +
      +	complexObjectJSON = `{"color":"GREEN",` +
      +		`"rColor":["RED","GREEN","BLUE"],` +
      +		`"simple":{"oInt32":-32},` +
      +		`"rSimple":[{"oInt32":-32},{"oInt64":"25"}],` +
      +		`"repeats":{"rString":["roses","red"]},` +
      +		`"rRepeats":[{"rString":["roses","red"]},{"rString":["violets","blue"]}]` +
      +		`}`
      +
      +	complexObjectPrettyJSON = `{
      +  "color": "GREEN",
      +  "rColor": [
      +    "RED",
      +    "GREEN",
      +    "BLUE"
      +  ],
      +  "simple": {
      +    "oInt32": -32
      +  },
      +  "rSimple": [
      +    {
      +      "oInt32": -32
      +    },
      +    {
      +      "oInt64": "25"
      +    }
      +  ],
      +  "repeats": {
      +    "rString": [
      +      "roses",
      +      "red"
      +    ]
      +  },
      +  "rRepeats": [
      +    {
      +      "rString": [
      +        "roses",
      +        "red"
      +      ]
      +    },
      +    {
      +      "rString": [
      +        "violets",
      +        "blue"
      +      ]
      +    }
      +  ]
      +}`
      +
      +	colorPrettyJSON = `{
      + "color": 2
      +}`
      +
      +	colorListPrettyJSON = `{
      +  "color": 1000,
      +  "rColor": [
      +    "RED"
      +  ]
      +}`
      +
      +	nummyPrettyJSON = `{
      +  "nummy": {
      +    "1": 2,
      +    "3": 4
      +  }
      +}`
      +
      +	objjyPrettyJSON = `{
      +  "objjy": {
      +    "1": {
      +      "dub": 1
      +    }
      +  }
      +}`
      +	realNumber     = &pb.Real{Value: proto.Float64(3.14159265359)}
      +	realNumberName = "Pi"
      +	complexNumber  = &pb.Complex{Imaginary: proto.Float64(0.5772156649)}
      +	realNumberJSON = `{` +
      +		`"value":3.14159265359,` +
      +		`"[jsonpb.Complex.real_extension]":{"imaginary":0.5772156649},` +
      +		`"[jsonpb.name]":"Pi"` +
      +		`}`
      +)
      +
      +func init() {
      +	if err := proto.SetExtension(realNumber, pb.E_Name, &realNumberName); err != nil {
      +		panic(err)
      +	}
      +	if err := proto.SetExtension(realNumber, pb.E_Complex_RealExtension, complexNumber); err != nil {
      +		panic(err)
      +	}
      +}
      +
      +var marshalingTests = []struct {
      +	desc      string
      +	marshaler Marshaler
      +	pb        proto.Message
      +	json      string
      +}{
      +	{"simple flat object", marshaler, simpleObject, simpleObjectJSON},
      +	{"simple pretty object", marshalerAllOptions, simpleObject, simpleObjectPrettyJSON},
      +	{"repeated fields flat object", marshaler, repeatsObject, repeatsObjectJSON},
      +	{"repeated fields pretty object", marshalerAllOptions, repeatsObject, repeatsObjectPrettyJSON},
      +	{"nested message/enum flat object", marshaler, complexObject, complexObjectJSON},
      +	{"nested message/enum pretty object", marshalerAllOptions, complexObject, complexObjectPrettyJSON},
      +	{"enum-string flat object", Marshaler{},
      +		&pb.Widget{Color: pb.Widget_BLUE.Enum()}, `{"color":"BLUE"}`},
      +	{"enum-value pretty object", Marshaler{EnumsAsInts: true, Indent: " "},
      +		&pb.Widget{Color: pb.Widget_BLUE.Enum()}, colorPrettyJSON},
      +	{"unknown enum value object", marshalerAllOptions,
      +		&pb.Widget{Color: pb.Widget_Color(1000).Enum(), RColor: []pb.Widget_Color{pb.Widget_RED}}, colorListPrettyJSON},
      +	{"repeated proto3 enum", Marshaler{},
      +		&proto3pb.Message{RFunny: []proto3pb.Message_Humour{
      +			proto3pb.Message_PUNS,
      +			proto3pb.Message_SLAPSTICK,
      +		}},
      +		`{"rFunny":["PUNS","SLAPSTICK"]}`},
      +	{"repeated proto3 enum as int", Marshaler{EnumsAsInts: true},
      +		&proto3pb.Message{RFunny: []proto3pb.Message_Humour{
      +			proto3pb.Message_PUNS,
      +			proto3pb.Message_SLAPSTICK,
      +		}},
      +		`{"rFunny":[1,2]}`},
      +	{"empty value", marshaler, &pb.Simple3{}, `{}`},
      +	{"empty value emitted", Marshaler{EmitDefaults: true}, &pb.Simple3{}, `{"dub":0}`},
      +	{"map<int64, int32>", marshaler, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}, `{"nummy":{"1":2,"3":4}}`},
      +	{"map<int64, int32>", marshalerAllOptions, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}, nummyPrettyJSON},
      +	{"map<string, string>", marshaler,
      +		&pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}},
      +		`{"strry":{"\"one\"":"two","three":"four"}}`},
      +	{"map<int32, Object>", marshaler,
      +		&pb.Mappy{Objjy: map[int32]*pb.Simple3{1: &pb.Simple3{Dub: 1}}}, `{"objjy":{"1":{"dub":1}}}`},
      +	{"map<int32, Object>", marshalerAllOptions,
      +		&pb.Mappy{Objjy: map[int32]*pb.Simple3{1: &pb.Simple3{Dub: 1}}}, objjyPrettyJSON},
      +	{"map<int64, string>", marshaler, &pb.Mappy{Buggy: map[int64]string{1234: "yup"}},
      +		`{"buggy":{"1234":"yup"}}`},
      +	{"map<bool, bool>", marshaler, &pb.Mappy{Booly: map[bool]bool{false: true}}, `{"booly":{"false":true}}`},
      +	// TODO: This is broken.
      +	//{"map<string, enum>", marshaler, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":"ROMAN"}`},
      +	{"map<string, enum as int>", Marshaler{EnumsAsInts: true}, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":2}}`},
      +	{"proto2 map<int64, string>", marshaler, &pb.Maps{MInt64Str: map[int64]string{213: "cat"}},
      +		`{"mInt64Str":{"213":"cat"}}`},
      +	{"proto2 map<bool, Object>", marshaler,
      +		&pb.Maps{MBoolSimple: map[bool]*pb.Simple{true: &pb.Simple{OInt32: proto.Int32(1)}}},
      +		`{"mBoolSimple":{"true":{"oInt32":1}}}`},
      +	{"oneof, not set", marshaler, &pb.MsgWithOneof{}, `{}`},
      +	{"oneof, set", marshaler, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Title{"Grand Poobah"}}, `{"title":"Grand Poobah"}`},
      +	{"force orig_name", Marshaler{OrigName: true}, &pb.Simple{OInt32: proto.Int32(4)},
      +		`{"o_int32":4}`},
      +	{"proto2 extension", marshaler, realNumber, realNumberJSON},
      +
      +	{"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}, `{"dur":"3.000s"}`},
      +	{"Struct", marshaler, &pb.KnownTypes{St: &stpb.Struct{
      +		Fields: map[string]*stpb.Value{
      +			"one": &stpb.Value{Kind: &stpb.Value_StringValue{"loneliest number"}},
      +			"two": &stpb.Value{Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}},
      +		},
      +	}}, `{"st":{"one":"loneliest number","two":null}}`},
      +	{"Timestamp", marshaler, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}, `{"ts":"2014-05-13T16:53:20.021Z"}`},
      +
      +	{"DoubleValue", marshaler, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}, `{"dbl":1.2}`},
      +	{"FloatValue", marshaler, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}, `{"flt":1.2}`},
      +	{"Int64Value", marshaler, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}, `{"i64":"-3"}`},
      +	{"UInt64Value", marshaler, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}, `{"u64":"3"}`},
      +	{"Int32Value", marshaler, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}, `{"i32":-4}`},
      +	{"UInt32Value", marshaler, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}, `{"u32":4}`},
      +	{"BoolValue", marshaler, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}, `{"bool":true}`},
      +	{"StringValue", marshaler, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}, `{"str":"plush"}`},
      +	{"BytesValue", marshaler, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}, `{"bytes":"d293"}`},
      +}
      +
      +func TestMarshaling(t *testing.T) {
      +	for _, tt := range marshalingTests {
      +		json, err := tt.marshaler.MarshalToString(tt.pb)
      +		if err != nil {
      +			t.Errorf("%s: marshaling error: %v", tt.desc, err)
      +		} else if tt.json != json {
      +			t.Errorf("%s: got [%v] want [%v]", tt.desc, json, tt.json)
      +		}
      +	}
      +}
      +
      +var unmarshalingTests = []struct {
      +	desc string
      +	json string
      +	pb   proto.Message
      +}{
      +	{"simple flat object", simpleObjectJSON, simpleObject},
      +	{"simple pretty object", simpleObjectPrettyJSON, simpleObject},
      +	{"repeated fields flat object", repeatsObjectJSON, repeatsObject},
      +	{"repeated fields pretty object", repeatsObjectPrettyJSON, repeatsObject},
      +	{"nested message/enum flat object", complexObjectJSON, complexObject},
      +	{"nested message/enum pretty object", complexObjectPrettyJSON, complexObject},
      +	{"enum-string object", `{"color":"BLUE"}`, &pb.Widget{Color: pb.Widget_BLUE.Enum()}},
      +	{"enum-value object", "{\n \"color\": 2\n}", &pb.Widget{Color: pb.Widget_BLUE.Enum()}},
      +	{"proto3 enum string", `{"hilarity":"PUNS"}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
      +	{"proto3 enum value", `{"hilarity":1}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
      +	{"unknown enum value object",
      +		"{\n  \"color\": 1000,\n  \"r_color\": [\n    \"RED\"\n  ]\n}",
      +		&pb.Widget{Color: pb.Widget_Color(1000).Enum(), RColor: []pb.Widget_Color{pb.Widget_RED}}},
      +	{"repeated proto3 enum", `{"rFunny":["PUNS","SLAPSTICK"]}`,
      +		&proto3pb.Message{RFunny: []proto3pb.Message_Humour{
      +			proto3pb.Message_PUNS,
      +			proto3pb.Message_SLAPSTICK,
      +		}}},
      +	{"repeated proto3 enum as int", `{"rFunny":[1,2]}`,
      +		&proto3pb.Message{RFunny: []proto3pb.Message_Humour{
      +			proto3pb.Message_PUNS,
      +			proto3pb.Message_SLAPSTICK,
      +		}}},
      +	{"repeated proto3 enum as mix of strings and ints", `{"rFunny":["PUNS",2]}`,
      +		&proto3pb.Message{RFunny: []proto3pb.Message_Humour{
      +			proto3pb.Message_PUNS,
      +			proto3pb.Message_SLAPSTICK,
      +		}}},
      +	{"unquoted int64 object", `{"oInt64":-314}`, &pb.Simple{OInt64: proto.Int64(-314)}},
      +	{"unquoted uint64 object", `{"oUint64":123}`, &pb.Simple{OUint64: proto.Uint64(123)}},
      +	{"map<int64, int32>", `{"nummy":{"1":2,"3":4}}`, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}},
      +	{"map<string, string>", `{"strry":{"\"one\"":"two","three":"four"}}`, &pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}}},
      +	{"map<int32, Object>", `{"objjy":{"1":{"dub":1}}}`, &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: &pb.Simple3{Dub: 1}}}},
      +	// TODO: This is broken.
      +	//{"map<string, enum>", `{"enumy":{"XIV":"ROMAN"}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}},
      +	{"map<string, enum as int>", `{"enumy":{"XIV":2}}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}},
      +	{"oneof", `{"salary":31000}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Salary{31000}}},
      +	{"oneof spec name", `{"country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}},
      +	{"oneof orig_name", `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}},
      +	{"orig_name input", `{"o_bool":true}`, &pb.Simple{OBool: proto.Bool(true)}},
      +	{"camelName input", `{"oBool":true}`, &pb.Simple{OBool: proto.Bool(true)}},
      +
      +	{"Duration", `{"dur":"3.000s"}`, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}},
      +	{"Timestamp", `{"ts":"2014-05-13T16:53:20.021Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}},
      +
      +	{"DoubleValue", `{"dbl":1.2}`, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}},
      +	{"FloatValue", `{"flt":1.2}`, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}},
      +	{"Int64Value", `{"i64":"-3"}`, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}},
      +	{"UInt64Value", `{"u64":"3"}`, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}},
      +	{"Int32Value", `{"i32":-4}`, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}},
      +	{"UInt32Value", `{"u32":4}`, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}},
      +	{"BoolValue", `{"bool":true}`, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}},
      +	{"StringValue", `{"str":"plush"}`, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}},
      +	{"BytesValue", `{"bytes":"d293"}`, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}},
      +	// `null` is also a permissible value. Let's just test one.
      +	{"null DoubleValue", `{"dbl":null}`, &pb.KnownTypes{Dbl: &wpb.DoubleValue{}}},
      +}
      +
      +func TestUnmarshaling(t *testing.T) {
      +	for _, tt := range unmarshalingTests {
      +		// Make a new instance of the type of our expected object.
      +		p := reflect.New(reflect.TypeOf(tt.pb).Elem()).Interface().(proto.Message)
      +
      +		err := UnmarshalString(tt.json, p)
      +		if err != nil {
      +			t.Errorf("%s: %v", tt.desc, err)
      +			continue
      +		}
      +
      +		// For easier diffs, compare text strings of the protos.
      +		exp := proto.MarshalTextString(tt.pb)
      +		act := proto.MarshalTextString(p)
      +		if string(exp) != string(act) {
      +			t.Errorf("%s: got [%s] want [%s]", tt.desc, act, exp)
      +		}
      +	}
      +}
      +
      +func TestUnmarshalNext(t *testing.T) {
      +	// We only need to check against a few, not all of them.
      +	tests := unmarshalingTests[:5]
      +
      +	// Create a buffer with many concatenated JSON objects.
      +	var b bytes.Buffer
      +	for _, tt := range tests {
      +		b.WriteString(tt.json)
      +	}
      +
      +	dec := json.NewDecoder(&b)
      +	for _, tt := range tests {
      +		// Make a new instance of the type of our expected object.
      +		p := reflect.New(reflect.TypeOf(tt.pb).Elem()).Interface().(proto.Message)
      +
      +		err := UnmarshalNext(dec, p)
      +		if err != nil {
      +			t.Errorf("%s: %v", tt.desc, err)
      +			continue
      +		}
      +
      +		// For easier diffs, compare text strings of the protos.
      +		exp := proto.MarshalTextString(tt.pb)
      +		act := proto.MarshalTextString(p)
      +		if string(exp) != string(act) {
      +			t.Errorf("%s: got [%s] want [%s]", tt.desc, act, exp)
      +		}
      +	}
      +
      +	p := &pb.Simple{}
      +	err := UnmarshalNext(dec, p)
      +	if err != io.EOF {
      +		t.Errorf("eof: got %v, expected io.EOF", err)
      +	}
      +}
      +
      +var unmarshalingShouldError = []struct {
      +	desc string
      +	in   string
      +	pb   proto.Message
      +}{
      +	{"a value", "666", new(pb.Simple)},
      +	{"gibberish", "{adskja123;l23=-=", new(pb.Simple)},
      +	{"unknown enum name", `{"hilarity":"DAVE"}`, new(proto3pb.Message)},
      +}
      +
      +func TestUnmarshalingBadInput(t *testing.T) {
      +	for _, tt := range unmarshalingShouldError {
      +		err := UnmarshalString(tt.in, tt.pb)
      +		if err == nil {
      +			t.Errorf("an error was expected when parsing %q instead of an object", tt.desc)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile
      new file mode 100644
      index 00000000..3f845ba1
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile
      @@ -0,0 +1,33 @@
      +# Go support for Protocol Buffers - Google's data interchange format
      +#
      +# Copyright 2015 The Go Authors.  All rights reserved.
      +# https://github.com/golang/protobuf
      +#
      +# Redistribution and use in source and binary forms, with or without
      +# modification, are permitted provided that the following conditions are
      +# met:
      +#
      +#     * Redistributions of source code must retain the above copyright
      +# notice, this list of conditions and the following disclaimer.
      +#     * Redistributions in binary form must reproduce the above
      +# copyright notice, this list of conditions and the following disclaimer
      +# in the documentation and/or other materials provided with the
      +# distribution.
      +#     * Neither the name of Google Inc. nor the names of its
      +# contributors may be used to endorse or promote products derived from
      +# this software without specific prior written permission.
      +#
      +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +regenerate:
      +	protoc --go_out=Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,Mgoogle/protobuf/struct.proto=github.com/golang/protobuf/ptypes/struct,Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers:. *.proto
      diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go
      new file mode 100644
      index 00000000..3d1a0896
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go
      @@ -0,0 +1,159 @@
      +// Code generated by protoc-gen-go.
      +// source: more_test_objects.proto
      +// DO NOT EDIT!
      +
      +/*
      +Package jsonpb is a generated protocol buffer package.
      +
      +It is generated from these files:
      +	more_test_objects.proto
      +	test_objects.proto
      +
      +It has these top-level messages:
      +	Simple3
      +	Mappy
      +	Simple
      +	Repeats
      +	Widget
      +	Maps
      +	MsgWithOneof
      +	Real
      +	Complex
      +	KnownTypes
      +*/
      +package jsonpb
      +
      +import proto "github.com/golang/protobuf/proto"
      +import fmt "fmt"
      +import math "math"
      +
      +// Reference imports to suppress errors if they are not otherwise used.
      +var _ = proto.Marshal
      +var _ = fmt.Errorf
      +var _ = math.Inf
      +
      +// This is a compile-time assertion to ensure that this generated file
      +// is compatible with the proto package it is being compiled against.
      +const _ = proto.ProtoPackageIsVersion1
      +
      +type Numeral int32
      +
      +const (
      +	Numeral_UNKNOWN Numeral = 0
      +	Numeral_ARABIC  Numeral = 1
      +	Numeral_ROMAN   Numeral = 2
      +)
      +
      +var Numeral_name = map[int32]string{
      +	0: "UNKNOWN",
      +	1: "ARABIC",
      +	2: "ROMAN",
      +}
      +var Numeral_value = map[string]int32{
      +	"UNKNOWN": 0,
      +	"ARABIC":  1,
      +	"ROMAN":   2,
      +}
      +
      +func (x Numeral) String() string {
      +	return proto.EnumName(Numeral_name, int32(x))
      +}
      +func (Numeral) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
      +
      +type Simple3 struct {
      +	Dub float64 `protobuf:"fixed64,1,opt,name=dub" json:"dub,omitempty"`
      +}
      +
      +func (m *Simple3) Reset()                    { *m = Simple3{} }
      +func (m *Simple3) String() string            { return proto.CompactTextString(m) }
      +func (*Simple3) ProtoMessage()               {}
      +func (*Simple3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
      +
      +type Mappy struct {
      +	Nummy map[int64]int32    `protobuf:"bytes,1,rep,name=nummy" json:"nummy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
      +	Strry map[string]string  `protobuf:"bytes,2,rep,name=strry" json:"strry,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
      +	Objjy map[int32]*Simple3 `protobuf:"bytes,3,rep,name=objjy" json:"objjy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
      +	Buggy map[int64]string   `protobuf:"bytes,4,rep,name=buggy" json:"buggy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
      +	Booly map[bool]bool      `protobuf:"bytes,5,rep,name=booly" json:"booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
      +	Enumy map[string]Numeral `protobuf:"bytes,6,rep,name=enumy" json:"enumy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=jsonpb.Numeral"`
      +}
      +
      +func (m *Mappy) Reset()                    { *m = Mappy{} }
      +func (m *Mappy) String() string            { return proto.CompactTextString(m) }
      +func (*Mappy) ProtoMessage()               {}
      +func (*Mappy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
      +
      +func (m *Mappy) GetNummy() map[int64]int32 {
      +	if m != nil {
      +		return m.Nummy
      +	}
      +	return nil
      +}
      +
      +func (m *Mappy) GetStrry() map[string]string {
      +	if m != nil {
      +		return m.Strry
      +	}
      +	return nil
      +}
      +
      +func (m *Mappy) GetObjjy() map[int32]*Simple3 {
      +	if m != nil {
      +		return m.Objjy
      +	}
      +	return nil
      +}
      +
      +func (m *Mappy) GetBuggy() map[int64]string {
      +	if m != nil {
      +		return m.Buggy
      +	}
      +	return nil
      +}
      +
      +func (m *Mappy) GetBooly() map[bool]bool {
      +	if m != nil {
      +		return m.Booly
      +	}
      +	return nil
      +}
      +
      +func (m *Mappy) GetEnumy() map[string]Numeral {
      +	if m != nil {
      +		return m.Enumy
      +	}
      +	return nil
      +}
      +
      +func init() {
      +	proto.RegisterType((*Simple3)(nil), "jsonpb.Simple3")
      +	proto.RegisterType((*Mappy)(nil), "jsonpb.Mappy")
      +	proto.RegisterEnum("jsonpb.Numeral", Numeral_name, Numeral_value)
      +}
      +
      +var fileDescriptor0 = []byte{
      +	// 357 bytes of a gzipped FileDescriptorProto
      +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x93, 0x4d, 0x4b, 0xc3, 0x40,
      +	0x10, 0x86, 0x4d, 0xe2, 0xa6, 0xcd, 0x14, 0x34, 0x2c, 0x82, 0x8b, 0x5e, 0x4a, 0x41, 0x28, 0x82,
      +	0x39, 0xb4, 0x97, 0xe2, 0xad, 0x95, 0x1e, 0x8a, 0x34, 0x85, 0x14, 0xf1, 0x58, 0x1a, 0x5d, 0x8a,
      +	0x31, 0xc9, 0x86, 0x7c, 0x08, 0xfb, 0x83, 0xfc, 0x9f, 0x32, 0x9b, 0xd4, 0xc4, 0xb2, 0xe0, 0x6d,
      +	0x92, 0xf7, 0x79, 0xc2, 0xec, 0x1b, 0x16, 0xae, 0x13, 0x91, 0xf3, 0x5d, 0xc9, 0x8b, 0x72, 0x27,
      +	0xc2, 0x88, 0xbf, 0x95, 0x85, 0x97, 0xe5, 0xa2, 0x14, 0xd4, 0x8e, 0x0a, 0x91, 0x66, 0xe1, 0xe8,
      +	0x16, 0x7a, 0xdb, 0x8f, 0x24, 0x8b, 0xf9, 0x94, 0xba, 0x60, 0xbd, 0x57, 0x21, 0x33, 0x86, 0xc6,
      +	0xd8, 0x08, 0x70, 0x1c, 0x7d, 0x13, 0x20, 0xeb, 0x7d, 0x96, 0x49, 0xea, 0x01, 0x49, 0xab, 0x24,
      +	0x91, 0xcc, 0x18, 0x5a, 0xe3, 0xc1, 0x84, 0x79, 0xb5, 0xee, 0xa9, 0xd4, 0xf3, 0x31, 0x5a, 0xa6,
      +	0x65, 0x2e, 0x83, 0x1a, 0x43, 0xbe, 0x28, 0xf3, 0x5c, 0x32, 0x53, 0xc7, 0x6f, 0x31, 0x6a, 0x78,
      +	0x85, 0x21, 0x2f, 0xc2, 0x28, 0x92, 0xcc, 0xd2, 0xf1, 0x1b, 0x8c, 0x1a, 0x5e, 0x61, 0xc8, 0x87,
      +	0xd5, 0xe1, 0x20, 0xd9, 0xb9, 0x8e, 0x5f, 0x60, 0xd4, 0xf0, 0x0a, 0x53, 0xbc, 0x10, 0xb1, 0x64,
      +	0x44, 0xcb, 0x63, 0x74, 0xe4, 0x71, 0x46, 0x9e, 0xa7, 0x55, 0x22, 0x99, 0xad, 0xe3, 0x97, 0x18,
      +	0x35, 0xbc, 0xc2, 0x6e, 0x66, 0x00, 0x6d, 0x09, 0xd8, 0xe4, 0x27, 0x97, 0xaa, 0x49, 0x2b, 0xc0,
      +	0x91, 0x5e, 0x01, 0xf9, 0xda, 0xc7, 0x15, 0x67, 0xe6, 0xd0, 0x18, 0x93, 0xa0, 0x7e, 0x78, 0x34,
      +	0x67, 0x06, 0x9a, 0x6d, 0x1d, 0x5d, 0xd3, 0xd1, 0x98, 0x4e, 0xd7, 0x5c, 0x01, 0xb4, 0xc5, 0x74,
      +	0x4d, 0x52, 0x9b, 0x77, 0x5d, 0x73, 0x30, 0xb9, 0x3c, 0x9e, 0xa1, 0xf9, 0xdf, 0x27, 0x4b, 0xb4,
      +	0x9d, 0xfd, 0xb7, 0xbe, 0x73, 0x6a, 0xfe, 0xb6, 0xd7, 0x35, 0xfb, 0x1a, 0xb3, 0x7f, 0xb2, 0x7e,
      +	0xdb, 0xa3, 0xe6, 0xe0, 0x7f, 0xd6, 0xbf, 0x68, 0xd7, 0xf7, 0xab, 0x84, 0xe7, 0xfb, 0xb8, 0xf3,
      +	0xa9, 0xfb, 0x07, 0xe8, 0x35, 0x6f, 0xe9, 0x00, 0x7a, 0x2f, 0xfe, 0xb3, 0xbf, 0x79, 0xf5, 0xdd,
      +	0x33, 0x0a, 0x60, 0xcf, 0x83, 0xf9, 0x62, 0xf5, 0xe4, 0x1a, 0xd4, 0x01, 0x12, 0x6c, 0xd6, 0x73,
      +	0xdf, 0x35, 0x43, 0x5b, 0x5d, 0x81, 0xe9, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3d, 0x04, 0xff,
      +	0x62, 0x1d, 0x03, 0x00, 0x00,
      +}
      diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto
      new file mode 100644
      index 00000000..511f021f
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto
      @@ -0,0 +1,53 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2015 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto3";
      +
      +package jsonpb;
      +
      +message Simple3 {
      +  double dub = 1;
      +}
      +
      +enum Numeral {
      +  UNKNOWN = 0;
      +  ARABIC = 1;
      +  ROMAN = 2;
      +}
      +
      +message Mappy {
      +  map<int64, int32> nummy = 1;
      +  map<string, string> strry = 2;
      +  map<int32, Simple3> objjy = 3;
      +  map<int64, string> buggy = 4;
      +  map<bool, bool> booly = 5;
      +  map<string, Numeral> enumy = 6;
      +}
      diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go
      new file mode 100644
      index 00000000..a1500e5a
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go
      @@ -0,0 +1,740 @@
      +// Code generated by protoc-gen-go.
      +// source: test_objects.proto
      +// DO NOT EDIT!
      +
      +package jsonpb
      +
      +import proto "github.com/golang/protobuf/proto"
      +import fmt "fmt"
      +import math "math"
      +import google_protobuf "github.com/golang/protobuf/ptypes/duration"
      +import google_protobuf1 "github.com/golang/protobuf/ptypes/struct"
      +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp"
      +import google_protobuf3 "github.com/golang/protobuf/ptypes/wrappers"
      +
      +// Reference imports to suppress errors if they are not otherwise used.
      +var _ = proto.Marshal
      +var _ = fmt.Errorf
      +var _ = math.Inf
      +
      +type Widget_Color int32
      +
      +const (
      +	Widget_RED   Widget_Color = 0
      +	Widget_GREEN Widget_Color = 1
      +	Widget_BLUE  Widget_Color = 2
      +)
      +
      +var Widget_Color_name = map[int32]string{
      +	0: "RED",
      +	1: "GREEN",
      +	2: "BLUE",
      +}
      +var Widget_Color_value = map[string]int32{
      +	"RED":   0,
      +	"GREEN": 1,
      +	"BLUE":  2,
      +}
      +
      +func (x Widget_Color) Enum() *Widget_Color {
      +	p := new(Widget_Color)
      +	*p = x
      +	return p
      +}
      +func (x Widget_Color) String() string {
      +	return proto.EnumName(Widget_Color_name, int32(x))
      +}
      +func (x *Widget_Color) UnmarshalJSON(data []byte) error {
      +	value, err := proto.UnmarshalJSONEnum(Widget_Color_value, data, "Widget_Color")
      +	if err != nil {
      +		return err
      +	}
      +	*x = Widget_Color(value)
      +	return nil
      +}
      +func (Widget_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{2, 0} }
      +
      +// Test message for holding primitive types.
      +type Simple struct {
      +	OBool            *bool    `protobuf:"varint,1,opt,name=o_bool,json=oBool" json:"o_bool,omitempty"`
      +	OInt32           *int32   `protobuf:"varint,2,opt,name=o_int32,json=oInt32" json:"o_int32,omitempty"`
      +	OInt64           *int64   `protobuf:"varint,3,opt,name=o_int64,json=oInt64" json:"o_int64,omitempty"`
      +	OUint32          *uint32  `protobuf:"varint,4,opt,name=o_uint32,json=oUint32" json:"o_uint32,omitempty"`
      +	OUint64          *uint64  `protobuf:"varint,5,opt,name=o_uint64,json=oUint64" json:"o_uint64,omitempty"`
      +	OSint32          *int32   `protobuf:"zigzag32,6,opt,name=o_sint32,json=oSint32" json:"o_sint32,omitempty"`
      +	OSint64          *int64   `protobuf:"zigzag64,7,opt,name=o_sint64,json=oSint64" json:"o_sint64,omitempty"`
      +	OFloat           *float32 `protobuf:"fixed32,8,opt,name=o_float,json=oFloat" json:"o_float,omitempty"`
      +	ODouble          *float64 `protobuf:"fixed64,9,opt,name=o_double,json=oDouble" json:"o_double,omitempty"`
      +	OString          *string  `protobuf:"bytes,10,opt,name=o_string,json=oString" json:"o_string,omitempty"`
      +	OBytes           []byte   `protobuf:"bytes,11,opt,name=o_bytes,json=oBytes" json:"o_bytes,omitempty"`
      +	XXX_unrecognized []byte   `json:"-"`
      +}
      +
      +func (m *Simple) Reset()                    { *m = Simple{} }
      +func (m *Simple) String() string            { return proto.CompactTextString(m) }
      +func (*Simple) ProtoMessage()               {}
      +func (*Simple) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
      +
      +func (m *Simple) GetOBool() bool {
      +	if m != nil && m.OBool != nil {
      +		return *m.OBool
      +	}
      +	return false
      +}
      +
      +func (m *Simple) GetOInt32() int32 {
      +	if m != nil && m.OInt32 != nil {
      +		return *m.OInt32
      +	}
      +	return 0
      +}
      +
      +func (m *Simple) GetOInt64() int64 {
      +	if m != nil && m.OInt64 != nil {
      +		return *m.OInt64
      +	}
      +	return 0
      +}
      +
      +func (m *Simple) GetOUint32() uint32 {
      +	if m != nil && m.OUint32 != nil {
      +		return *m.OUint32
      +	}
      +	return 0
      +}
      +
      +func (m *Simple) GetOUint64() uint64 {
      +	if m != nil && m.OUint64 != nil {
      +		return *m.OUint64
      +	}
      +	return 0
      +}
      +
      +func (m *Simple) GetOSint32() int32 {
      +	if m != nil && m.OSint32 != nil {
      +		return *m.OSint32
      +	}
      +	return 0
      +}
      +
      +func (m *Simple) GetOSint64() int64 {
      +	if m != nil && m.OSint64 != nil {
      +		return *m.OSint64
      +	}
      +	return 0
      +}
      +
      +func (m *Simple) GetOFloat() float32 {
      +	if m != nil && m.OFloat != nil {
      +		return *m.OFloat
      +	}
      +	return 0
      +}
      +
      +func (m *Simple) GetODouble() float64 {
      +	if m != nil && m.ODouble != nil {
      +		return *m.ODouble
      +	}
      +	return 0
      +}
      +
      +func (m *Simple) GetOString() string {
      +	if m != nil && m.OString != nil {
      +		return *m.OString
      +	}
      +	return ""
      +}
      +
      +func (m *Simple) GetOBytes() []byte {
      +	if m != nil {
      +		return m.OBytes
      +	}
      +	return nil
      +}
      +
      +// Test message for holding repeated primitives.
      +type Repeats struct {
      +	RBool            []bool    `protobuf:"varint,1,rep,name=r_bool,json=rBool" json:"r_bool,omitempty"`
      +	RInt32           []int32   `protobuf:"varint,2,rep,name=r_int32,json=rInt32" json:"r_int32,omitempty"`
      +	RInt64           []int64   `protobuf:"varint,3,rep,name=r_int64,json=rInt64" json:"r_int64,omitempty"`
      +	RUint32          []uint32  `protobuf:"varint,4,rep,name=r_uint32,json=rUint32" json:"r_uint32,omitempty"`
      +	RUint64          []uint64  `protobuf:"varint,5,rep,name=r_uint64,json=rUint64" json:"r_uint64,omitempty"`
      +	RSint32          []int32   `protobuf:"zigzag32,6,rep,name=r_sint32,json=rSint32" json:"r_sint32,omitempty"`
      +	RSint64          []int64   `protobuf:"zigzag64,7,rep,name=r_sint64,json=rSint64" json:"r_sint64,omitempty"`
      +	RFloat           []float32 `protobuf:"fixed32,8,rep,name=r_float,json=rFloat" json:"r_float,omitempty"`
      +	RDouble          []float64 `protobuf:"fixed64,9,rep,name=r_double,json=rDouble" json:"r_double,omitempty"`
      +	RString          []string  `protobuf:"bytes,10,rep,name=r_string,json=rString" json:"r_string,omitempty"`
      +	RBytes           [][]byte  `protobuf:"bytes,11,rep,name=r_bytes,json=rBytes" json:"r_bytes,omitempty"`
      +	XXX_unrecognized []byte    `json:"-"`
      +}
      +
      +func (m *Repeats) Reset()                    { *m = Repeats{} }
      +func (m *Repeats) String() string            { return proto.CompactTextString(m) }
      +func (*Repeats) ProtoMessage()               {}
      +func (*Repeats) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
      +
      +func (m *Repeats) GetRBool() []bool {
      +	if m != nil {
      +		return m.RBool
      +	}
      +	return nil
      +}
      +
      +func (m *Repeats) GetRInt32() []int32 {
      +	if m != nil {
      +		return m.RInt32
      +	}
      +	return nil
      +}
      +
      +func (m *Repeats) GetRInt64() []int64 {
      +	if m != nil {
      +		return m.RInt64
      +	}
      +	return nil
      +}
      +
      +func (m *Repeats) GetRUint32() []uint32 {
      +	if m != nil {
      +		return m.RUint32
      +	}
      +	return nil
      +}
      +
      +func (m *Repeats) GetRUint64() []uint64 {
      +	if m != nil {
      +		return m.RUint64
      +	}
      +	return nil
      +}
      +
      +func (m *Repeats) GetRSint32() []int32 {
      +	if m != nil {
      +		return m.RSint32
      +	}
      +	return nil
      +}
      +
      +func (m *Repeats) GetRSint64() []int64 {
      +	if m != nil {
      +		return m.RSint64
      +	}
      +	return nil
      +}
      +
      +func (m *Repeats) GetRFloat() []float32 {
      +	if m != nil {
      +		return m.RFloat
      +	}
      +	return nil
      +}
      +
      +func (m *Repeats) GetRDouble() []float64 {
      +	if m != nil {
      +		return m.RDouble
      +	}
      +	return nil
      +}
      +
      +func (m *Repeats) GetRString() []string {
      +	if m != nil {
      +		return m.RString
      +	}
      +	return nil
      +}
      +
      +func (m *Repeats) GetRBytes() [][]byte {
      +	if m != nil {
      +		return m.RBytes
      +	}
      +	return nil
      +}
      +
      +// Test message for holding enums and nested messages.
      +type Widget struct {
      +	Color            *Widget_Color  `protobuf:"varint,1,opt,name=color,enum=jsonpb.Widget_Color" json:"color,omitempty"`
      +	RColor           []Widget_Color `protobuf:"varint,2,rep,name=r_color,json=rColor,enum=jsonpb.Widget_Color" json:"r_color,omitempty"`
      +	Simple           *Simple        `protobuf:"bytes,10,opt,name=simple" json:"simple,omitempty"`
      +	RSimple          []*Simple      `protobuf:"bytes,11,rep,name=r_simple,json=rSimple" json:"r_simple,omitempty"`
      +	Repeats          *Repeats       `protobuf:"bytes,20,opt,name=repeats" json:"repeats,omitempty"`
      +	RRepeats         []*Repeats     `protobuf:"bytes,21,rep,name=r_repeats,json=rRepeats" json:"r_repeats,omitempty"`
      +	XXX_unrecognized []byte         `json:"-"`
      +}
      +
      +func (m *Widget) Reset()                    { *m = Widget{} }
      +func (m *Widget) String() string            { return proto.CompactTextString(m) }
      +func (*Widget) ProtoMessage()               {}
      +func (*Widget) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
      +
      +func (m *Widget) GetColor() Widget_Color {
      +	if m != nil && m.Color != nil {
      +		return *m.Color
      +	}
      +	return Widget_RED
      +}
      +
      +func (m *Widget) GetRColor() []Widget_Color {
      +	if m != nil {
      +		return m.RColor
      +	}
      +	return nil
      +}
      +
      +func (m *Widget) GetSimple() *Simple {
      +	if m != nil {
      +		return m.Simple
      +	}
      +	return nil
      +}
      +
      +func (m *Widget) GetRSimple() []*Simple {
      +	if m != nil {
      +		return m.RSimple
      +	}
      +	return nil
      +}
      +
      +func (m *Widget) GetRepeats() *Repeats {
      +	if m != nil {
      +		return m.Repeats
      +	}
      +	return nil
      +}
      +
      +func (m *Widget) GetRRepeats() []*Repeats {
      +	if m != nil {
      +		return m.RRepeats
      +	}
      +	return nil
      +}
      +
      +type Maps struct {
      +	MInt64Str        map[int64]string `protobuf:"bytes,1,rep,name=m_int64_str,json=mInt64Str" json:"m_int64_str,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
      +	MBoolSimple      map[bool]*Simple `protobuf:"bytes,2,rep,name=m_bool_simple,json=mBoolSimple" json:"m_bool_simple,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
      +	XXX_unrecognized []byte           `json:"-"`
      +}
      +
      +func (m *Maps) Reset()                    { *m = Maps{} }
      +func (m *Maps) String() string            { return proto.CompactTextString(m) }
      +func (*Maps) ProtoMessage()               {}
      +func (*Maps) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} }
      +
      +func (m *Maps) GetMInt64Str() map[int64]string {
      +	if m != nil {
      +		return m.MInt64Str
      +	}
      +	return nil
      +}
      +
      +func (m *Maps) GetMBoolSimple() map[bool]*Simple {
      +	if m != nil {
      +		return m.MBoolSimple
      +	}
      +	return nil
      +}
      +
      +type MsgWithOneof struct {
      +	// Types that are valid to be assigned to Union:
      +	//	*MsgWithOneof_Title
      +	//	*MsgWithOneof_Salary
      +	//	*MsgWithOneof_Country
      +	Union            isMsgWithOneof_Union `protobuf_oneof:"union"`
      +	XXX_unrecognized []byte               `json:"-"`
      +}
      +
      +func (m *MsgWithOneof) Reset()                    { *m = MsgWithOneof{} }
      +func (m *MsgWithOneof) String() string            { return proto.CompactTextString(m) }
      +func (*MsgWithOneof) ProtoMessage()               {}
      +func (*MsgWithOneof) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} }
      +
      +type isMsgWithOneof_Union interface {
      +	isMsgWithOneof_Union()
      +}
      +
      +type MsgWithOneof_Title struct {
      +	Title string `protobuf:"bytes,1,opt,name=title,oneof"`
      +}
      +type MsgWithOneof_Salary struct {
      +	Salary int64 `protobuf:"varint,2,opt,name=salary,oneof"`
      +}
      +type MsgWithOneof_Country struct {
      +	Country string `protobuf:"bytes,3,opt,name=Country,json=country,oneof"`
      +}
      +
      +func (*MsgWithOneof_Title) isMsgWithOneof_Union()   {}
      +func (*MsgWithOneof_Salary) isMsgWithOneof_Union()  {}
      +func (*MsgWithOneof_Country) isMsgWithOneof_Union() {}
      +
      +func (m *MsgWithOneof) GetUnion() isMsgWithOneof_Union {
      +	if m != nil {
      +		return m.Union
      +	}
      +	return nil
      +}
      +
      +func (m *MsgWithOneof) GetTitle() string {
      +	if x, ok := m.GetUnion().(*MsgWithOneof_Title); ok {
      +		return x.Title
      +	}
      +	return ""
      +}
      +
      +func (m *MsgWithOneof) GetSalary() int64 {
      +	if x, ok := m.GetUnion().(*MsgWithOneof_Salary); ok {
      +		return x.Salary
      +	}
      +	return 0
      +}
      +
      +func (m *MsgWithOneof) GetCountry() string {
      +	if x, ok := m.GetUnion().(*MsgWithOneof_Country); ok {
      +		return x.Country
      +	}
      +	return ""
      +}
      +
      +// XXX_OneofFuncs is for the internal use of the proto package.
      +func (*MsgWithOneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
      +	return _MsgWithOneof_OneofMarshaler, _MsgWithOneof_OneofUnmarshaler, _MsgWithOneof_OneofSizer, []interface{}{
      +		(*MsgWithOneof_Title)(nil),
      +		(*MsgWithOneof_Salary)(nil),
      +		(*MsgWithOneof_Country)(nil),
      +	}
      +}
      +
      +func _MsgWithOneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
      +	m := msg.(*MsgWithOneof)
      +	// union
      +	switch x := m.Union.(type) {
      +	case *MsgWithOneof_Title:
      +		b.EncodeVarint(1<<3 | proto.WireBytes)
      +		b.EncodeStringBytes(x.Title)
      +	case *MsgWithOneof_Salary:
      +		b.EncodeVarint(2<<3 | proto.WireVarint)
      +		b.EncodeVarint(uint64(x.Salary))
      +	case *MsgWithOneof_Country:
      +		b.EncodeVarint(3<<3 | proto.WireBytes)
      +		b.EncodeStringBytes(x.Country)
      +	case nil:
      +	default:
      +		return fmt.Errorf("MsgWithOneof.Union has unexpected type %T", x)
      +	}
      +	return nil
      +}
      +
      +func _MsgWithOneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
      +	m := msg.(*MsgWithOneof)
      +	switch tag {
      +	case 1: // union.title
      +		if wire != proto.WireBytes {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeStringBytes()
      +		m.Union = &MsgWithOneof_Title{x}
      +		return true, err
      +	case 2: // union.salary
      +		if wire != proto.WireVarint {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeVarint()
      +		m.Union = &MsgWithOneof_Salary{int64(x)}
      +		return true, err
      +	case 3: // union.Country
      +		if wire != proto.WireBytes {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeStringBytes()
      +		m.Union = &MsgWithOneof_Country{x}
      +		return true, err
      +	default:
      +		return false, nil
      +	}
      +}
      +
      +func _MsgWithOneof_OneofSizer(msg proto.Message) (n int) {
      +	m := msg.(*MsgWithOneof)
      +	// union
      +	switch x := m.Union.(type) {
      +	case *MsgWithOneof_Title:
      +		n += proto.SizeVarint(1<<3 | proto.WireBytes)
      +		n += proto.SizeVarint(uint64(len(x.Title)))
      +		n += len(x.Title)
      +	case *MsgWithOneof_Salary:
      +		n += proto.SizeVarint(2<<3 | proto.WireVarint)
      +		n += proto.SizeVarint(uint64(x.Salary))
      +	case *MsgWithOneof_Country:
      +		n += proto.SizeVarint(3<<3 | proto.WireBytes)
      +		n += proto.SizeVarint(uint64(len(x.Country)))
      +		n += len(x.Country)
      +	case nil:
      +	default:
      +		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
      +	}
      +	return n
      +}
      +
      +type Real struct {
      +	Value            *float64                  `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
      +	XXX_extensions   map[int32]proto.Extension `json:"-"`
      +	XXX_unrecognized []byte                    `json:"-"`
      +}
      +
      +func (m *Real) Reset()                    { *m = Real{} }
      +func (m *Real) String() string            { return proto.CompactTextString(m) }
      +func (*Real) ProtoMessage()               {}
      +func (*Real) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} }
      +
      +var extRange_Real = []proto.ExtensionRange{
      +	{100, 536870911},
      +}
      +
      +func (*Real) ExtensionRangeArray() []proto.ExtensionRange {
      +	return extRange_Real
      +}
      +func (m *Real) ExtensionMap() map[int32]proto.Extension {
      +	if m.XXX_extensions == nil {
      +		m.XXX_extensions = make(map[int32]proto.Extension)
      +	}
      +	return m.XXX_extensions
      +}
      +
      +func (m *Real) GetValue() float64 {
      +	if m != nil && m.Value != nil {
      +		return *m.Value
      +	}
      +	return 0
      +}
      +
      +type Complex struct {
      +	Imaginary        *float64                  `protobuf:"fixed64,1,opt,name=imaginary" json:"imaginary,omitempty"`
      +	XXX_extensions   map[int32]proto.Extension `json:"-"`
      +	XXX_unrecognized []byte                    `json:"-"`
      +}
      +
      +func (m *Complex) Reset()                    { *m = Complex{} }
      +func (m *Complex) String() string            { return proto.CompactTextString(m) }
      +func (*Complex) ProtoMessage()               {}
      +func (*Complex) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} }
      +
      +var extRange_Complex = []proto.ExtensionRange{
      +	{100, 536870911},
      +}
      +
      +func (*Complex) ExtensionRangeArray() []proto.ExtensionRange {
      +	return extRange_Complex
      +}
      +func (m *Complex) ExtensionMap() map[int32]proto.Extension {
      +	if m.XXX_extensions == nil {
      +		m.XXX_extensions = make(map[int32]proto.Extension)
      +	}
      +	return m.XXX_extensions
      +}
      +
      +func (m *Complex) GetImaginary() float64 {
      +	if m != nil && m.Imaginary != nil {
      +		return *m.Imaginary
      +	}
      +	return 0
      +}
      +
      +var E_Complex_RealExtension = &proto.ExtensionDesc{
      +	ExtendedType:  (*Real)(nil),
      +	ExtensionType: (*Complex)(nil),
      +	Field:         123,
      +	Name:          "jsonpb.Complex.real_extension",
      +	Tag:           "bytes,123,opt,name=real_extension,json=realExtension",
      +}
      +
      +type KnownTypes struct {
      +	Dur              *google_protobuf.Duration     `protobuf:"bytes,1,opt,name=dur" json:"dur,omitempty"`
      +	St               *google_protobuf1.Struct      `protobuf:"bytes,12,opt,name=st" json:"st,omitempty"`
      +	Ts               *google_protobuf2.Timestamp   `protobuf:"bytes,2,opt,name=ts" json:"ts,omitempty"`
      +	Dbl              *google_protobuf3.DoubleValue `protobuf:"bytes,3,opt,name=dbl" json:"dbl,omitempty"`
      +	Flt              *google_protobuf3.FloatValue  `protobuf:"bytes,4,opt,name=flt" json:"flt,omitempty"`
      +	I64              *google_protobuf3.Int64Value  `protobuf:"bytes,5,opt,name=i64" json:"i64,omitempty"`
      +	U64              *google_protobuf3.UInt64Value `protobuf:"bytes,6,opt,name=u64" json:"u64,omitempty"`
      +	I32              *google_protobuf3.Int32Value  `protobuf:"bytes,7,opt,name=i32" json:"i32,omitempty"`
      +	U32              *google_protobuf3.UInt32Value `protobuf:"bytes,8,opt,name=u32" json:"u32,omitempty"`
      +	Bool             *google_protobuf3.BoolValue   `protobuf:"bytes,9,opt,name=bool" json:"bool,omitempty"`
      +	Str              *google_protobuf3.StringValue `protobuf:"bytes,10,opt,name=str" json:"str,omitempty"`
      +	Bytes            *google_protobuf3.BytesValue  `protobuf:"bytes,11,opt,name=bytes" json:"bytes,omitempty"`
      +	XXX_unrecognized []byte                        `json:"-"`
      +}
      +
      +func (m *KnownTypes) Reset()                    { *m = KnownTypes{} }
      +func (m *KnownTypes) String() string            { return proto.CompactTextString(m) }
      +func (*KnownTypes) ProtoMessage()               {}
      +func (*KnownTypes) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} }
      +
      +func (m *KnownTypes) GetDur() *google_protobuf.Duration {
      +	if m != nil {
      +		return m.Dur
      +	}
      +	return nil
      +}
      +
      +func (m *KnownTypes) GetSt() *google_protobuf1.Struct {
      +	if m != nil {
      +		return m.St
      +	}
      +	return nil
      +}
      +
      +func (m *KnownTypes) GetTs() *google_protobuf2.Timestamp {
      +	if m != nil {
      +		return m.Ts
      +	}
      +	return nil
      +}
      +
      +func (m *KnownTypes) GetDbl() *google_protobuf3.DoubleValue {
      +	if m != nil {
      +		return m.Dbl
      +	}
      +	return nil
      +}
      +
      +func (m *KnownTypes) GetFlt() *google_protobuf3.FloatValue {
      +	if m != nil {
      +		return m.Flt
      +	}
      +	return nil
      +}
      +
      +func (m *KnownTypes) GetI64() *google_protobuf3.Int64Value {
      +	if m != nil {
      +		return m.I64
      +	}
      +	return nil
      +}
      +
      +func (m *KnownTypes) GetU64() *google_protobuf3.UInt64Value {
      +	if m != nil {
      +		return m.U64
      +	}
      +	return nil
      +}
      +
      +func (m *KnownTypes) GetI32() *google_protobuf3.Int32Value {
      +	if m != nil {
      +		return m.I32
      +	}
      +	return nil
      +}
      +
      +func (m *KnownTypes) GetU32() *google_protobuf3.UInt32Value {
      +	if m != nil {
      +		return m.U32
      +	}
      +	return nil
      +}
      +
      +func (m *KnownTypes) GetBool() *google_protobuf3.BoolValue {
      +	if m != nil {
      +		return m.Bool
      +	}
      +	return nil
      +}
      +
      +func (m *KnownTypes) GetStr() *google_protobuf3.StringValue {
      +	if m != nil {
      +		return m.Str
      +	}
      +	return nil
      +}
      +
      +func (m *KnownTypes) GetBytes() *google_protobuf3.BytesValue {
      +	if m != nil {
      +		return m.Bytes
      +	}
      +	return nil
      +}
      +
      +var E_Name = &proto.ExtensionDesc{
      +	ExtendedType:  (*Real)(nil),
      +	ExtensionType: (*string)(nil),
      +	Field:         124,
      +	Name:          "jsonpb.name",
      +	Tag:           "bytes,124,opt,name=name",
      +}
      +
      +func init() {
      +	proto.RegisterType((*Simple)(nil), "jsonpb.Simple")
      +	proto.RegisterType((*Repeats)(nil), "jsonpb.Repeats")
      +	proto.RegisterType((*Widget)(nil), "jsonpb.Widget")
      +	proto.RegisterType((*Maps)(nil), "jsonpb.Maps")
      +	proto.RegisterType((*MsgWithOneof)(nil), "jsonpb.MsgWithOneof")
      +	proto.RegisterType((*Real)(nil), "jsonpb.Real")
      +	proto.RegisterType((*Complex)(nil), "jsonpb.Complex")
      +	proto.RegisterType((*KnownTypes)(nil), "jsonpb.KnownTypes")
      +	proto.RegisterEnum("jsonpb.Widget_Color", Widget_Color_name, Widget_Color_value)
      +	proto.RegisterExtension(E_Complex_RealExtension)
      +	proto.RegisterExtension(E_Name)
      +}
      +
      +var fileDescriptor1 = []byte{
      +	// 1008 bytes of a gzipped FileDescriptorProto
      +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x95, 0xdf, 0x72, 0xdb, 0x44,
      +	0x14, 0xc6, 0x2b, 0xad, 0x25, 0xd9, 0xeb, 0x24, 0x98, 0x9d, 0x94, 0xaa, 0x26, 0x80, 0xc6, 0x03,
      +	0x45, 0x14, 0xea, 0x0e, 0x8a, 0xc7, 0xc3, 0x14, 0x6e, 0x48, 0x63, 0x28, 0x03, 0x29, 0x33, 0xeb,
      +	0x86, 0x5e, 0x7a, 0xe4, 0x78, 0x6d, 0x54, 0x64, 0xad, 0x67, 0x77, 0x45, 0xea, 0x81, 0x8b, 0x3c,
      +	0x04, 0xaf, 0x00, 0x8f, 0xc0, 0x25, 0xcf, 0xc6, 0x9c, 0x5d, 0xfd, 0x71, 0xec, 0xf8, 0x2a, 0x3e,
      +	0x3a, 0xdf, 0xf9, 0xb2, 0xfa, 0xed, 0xd1, 0x39, 0x98, 0x28, 0x26, 0xd5, 0x84, 0x4f, 0xdf, 0xb0,
      +	0x2b, 0x25, 0xfb, 0x2b, 0xc1, 0x15, 0x27, 0xee, 0x1b, 0xc9, 0xb3, 0xd5, 0xb4, 0xfb, 0xe1, 0x82,
      +	0xf3, 0x45, 0xca, 0x9e, 0xea, 0xa7, 0xd3, 0x7c, 0xfe, 0x74, 0x96, 0x8b, 0x58, 0x25, 0x3c, 0x33,
      +	0xba, 0xee, 0xc9, 0x76, 0x5e, 0x2a, 0x91, 0x5f, 0xa9, 0x22, 0xfb, 0xd1, 0x76, 0x56, 0x25, 0x4b,
      +	0x26, 0x55, 0xbc, 0x5c, 0x15, 0x82, 0x1d, 0xfb, 0x6b, 0x11, 0xaf, 0x56, 0x4c, 0x14, 0xc7, 0xe8,
      +	0xfd, 0x6d, 0x63, 0x77, 0x9c, 0x2c, 0x57, 0x29, 0x23, 0xf7, 0xb1, 0xcb, 0x27, 0x53, 0xce, 0x53,
      +	0xdf, 0x0a, 0xac, 0xb0, 0x49, 0x1d, 0x7e, 0xc6, 0x79, 0x4a, 0x1e, 0x60, 0x8f, 0x4f, 0x92, 0x4c,
      +	0x9d, 0x46, 0xbe, 0x1d, 0x58, 0xa1, 0x43, 0x5d, 0xfe, 0x03, 0x44, 0x55, 0x62, 0x38, 0xf0, 0x51,
      +	0x60, 0x85, 0xc8, 0x24, 0x86, 0x03, 0xf2, 0x10, 0x37, 0xf9, 0x24, 0x37, 0x25, 0x8d, 0xc0, 0x0a,
      +	0x0f, 0xa9, 0xc7, 0x2f, 0x75, 0x58, 0xa7, 0x86, 0x03, 0xdf, 0x09, 0xac, 0xb0, 0x51, 0xa4, 0xca,
      +	0x2a, 0x69, 0xaa, 0xdc, 0xc0, 0x0a, 0xdf, 0xa5, 0x1e, 0x1f, 0x6f, 0x54, 0x49, 0x53, 0xe5, 0x05,
      +	0x56, 0x48, 0x8a, 0xd4, 0x70, 0x60, 0x0e, 0x31, 0x4f, 0x79, 0xac, 0xfc, 0x66, 0x60, 0x85, 0x36,
      +	0x75, 0xf9, 0x77, 0x10, 0x99, 0x9a, 0x19, 0xcf, 0xa7, 0x29, 0xf3, 0x5b, 0x81, 0x15, 0x5a, 0xd4,
      +	0xe3, 0xe7, 0x3a, 0x2c, 0xec, 0x94, 0x48, 0xb2, 0x85, 0x8f, 0x03, 0x2b, 0x6c, 0x81, 0x9d, 0x0e,
      +	0x8d, 0xdd, 0x74, 0xad, 0x98, 0xf4, 0xdb, 0x81, 0x15, 0x1e, 0x50, 0x97, 0x9f, 0x41, 0xd4, 0xfb,
      +	0xc7, 0xc6, 0x1e, 0x65, 0x2b, 0x16, 0x2b, 0x09, 0xa0, 0x44, 0x09, 0x0a, 0x01, 0x28, 0x51, 0x82,
      +	0x12, 0x15, 0x28, 0x04, 0xa0, 0x44, 0x05, 0x4a, 0x54, 0xa0, 0x10, 0x80, 0x12, 0x15, 0x28, 0x51,
      +	0x83, 0x42, 0x00, 0x4a, 0xd4, 0xa0, 0x44, 0x0d, 0x0a, 0x01, 0x28, 0x51, 0x83, 0x12, 0x35, 0x28,
      +	0x04, 0xa0, 0xc4, 0x78, 0xa3, 0xaa, 0x02, 0x85, 0x00, 0x94, 0xa8, 0x41, 0x89, 0x0a, 0x14, 0x02,
      +	0x50, 0xa2, 0x02, 0x25, 0x6a, 0x50, 0x08, 0x40, 0x89, 0x1a, 0x94, 0xa8, 0x41, 0x21, 0x00, 0x25,
      +	0x6a, 0x50, 0xa2, 0x02, 0x85, 0x00, 0x94, 0x30, 0xa0, 0xfe, 0xb5, 0xb1, 0xfb, 0x3a, 0x99, 0x2d,
      +	0x98, 0x22, 0x8f, 0xb1, 0x73, 0xc5, 0x53, 0x2e, 0x74, 0x3f, 0x1d, 0x45, 0xc7, 0x7d, 0xd3, 0xf2,
      +	0x7d, 0x93, 0xee, 0x3f, 0x87, 0x1c, 0x35, 0x12, 0xf2, 0x04, 0xfc, 0x8c, 0x1a, 0xe0, 0xed, 0x53,
      +	0xbb, 0x42, 0xff, 0x25, 0x8f, 0xb0, 0x2b, 0x75, 0xd7, 0xea, 0x0b, 0x6c, 0x47, 0x47, 0xa5, 0xda,
      +	0xf4, 0x32, 0x2d, 0xb2, 0xe4, 0x33, 0x03, 0x44, 0x2b, 0xe1, 0x9c, 0xbb, 0x4a, 0x00, 0x54, 0x48,
      +	0x3d, 0x61, 0x2e, 0xd8, 0x3f, 0xd6, 0x9e, 0xef, 0x94, 0xca, 0xe2, 0xde, 0x69, 0x99, 0x27, 0x5f,
      +	0xe0, 0x96, 0x98, 0x94, 0xe2, 0xfb, 0xda, 0x76, 0x47, 0xdc, 0x14, 0xc5, 0xaf, 0xde, 0x27, 0xd8,
      +	0x31, 0x87, 0xf6, 0x30, 0xa2, 0xa3, 0xf3, 0xce, 0x3d, 0xd2, 0xc2, 0xce, 0xf7, 0x74, 0x34, 0x7a,
      +	0xd9, 0xb1, 0x48, 0x13, 0x37, 0xce, 0x7e, 0xba, 0x1c, 0x75, 0xec, 0xde, 0x5f, 0x36, 0x6e, 0x5c,
      +	0xc4, 0x2b, 0x49, 0xbe, 0xc6, 0xed, 0xa5, 0x69, 0x17, 0x60, 0xaf, 0x7b, 0xac, 0x1d, 0xbd, 0x5f,
      +	0xfa, 0x83, 0xa4, 0x7f, 0xa1, 0xfb, 0x67, 0xac, 0xc4, 0x28, 0x53, 0x62, 0x4d, 0x5b, 0xcb, 0x32,
      +	0x26, 0xdf, 0xe2, 0xc3, 0xa5, 0xee, 0xcd, 0xf2, 0xad, 0x6d, 0x5d, 0xfe, 0xc1, 0xed, 0x72, 0xe8,
      +	0x57, 0xf3, 0xda, 0xc6, 0xa0, 0xbd, 0xac, 0x9f, 0x74, 0xbf, 0xc1, 0x47, 0xb7, 0xfd, 0x49, 0x07,
      +	0xa3, 0xdf, 0xd8, 0x5a, 0x5f, 0x23, 0xa2, 0xf0, 0x93, 0x1c, 0x63, 0xe7, 0xf7, 0x38, 0xcd, 0x99,
      +	0x1e, 0x09, 0x2d, 0x6a, 0x82, 0x67, 0xf6, 0x57, 0x56, 0xf7, 0x25, 0xee, 0x6c, 0xdb, 0x6f, 0xd6,
      +	0x37, 0x4d, 0xfd, 0xc7, 0x9b, 0xf5, 0xbb, 0x97, 0x52, 0xfb, 0xf5, 0x18, 0x3e, 0xb8, 0x90, 0x8b,
      +	0xd7, 0x89, 0xfa, 0xf5, 0xe7, 0x8c, 0xf1, 0x39, 0x79, 0x0f, 0x3b, 0x2a, 0x51, 0x29, 0xd3, 0x6e,
      +	0xad, 0x17, 0xf7, 0xa8, 0x09, 0x89, 0x8f, 0x5d, 0x19, 0xa7, 0xb1, 0x58, 0x6b, 0x4b, 0xf4, 0xe2,
      +	0x1e, 0x2d, 0x62, 0xd2, 0xc5, 0xde, 0x73, 0x9e, 0xc3, 0x41, 0xf4, 0x9c, 0x82, 0x1a, 0xef, 0xca,
      +	0x3c, 0x38, 0xf3, 0xb0, 0x93, 0x67, 0x09, 0xcf, 0x7a, 0x8f, 0x70, 0x83, 0xb2, 0x38, 0xad, 0x5f,
      +	0xcc, 0xd2, 0x33, 0xc3, 0x04, 0x8f, 0x9b, 0xcd, 0x59, 0xe7, 0xe6, 0xe6, 0xe6, 0xc6, 0xee, 0x5d,
      +	0x83, 0x19, 0x9c, 0xf1, 0x2d, 0x39, 0xc1, 0xad, 0x64, 0x19, 0x2f, 0x92, 0x0c, 0xfe, 0xa9, 0x91,
      +	0xd7, 0x0f, 0xea, 0x92, 0xe8, 0x1c, 0x1f, 0x09, 0x16, 0xa7, 0x13, 0xf6, 0x56, 0xb1, 0x4c, 0x26,
      +	0x3c, 0x23, 0x07, 0x75, 0xb3, 0xc4, 0xa9, 0xff, 0xc7, 0xed, 0x6e, 0x2b, 0xec, 0xe9, 0x21, 0x14,
      +	0x8d, 0xca, 0x9a, 0xde, 0x7f, 0x0d, 0x8c, 0x7f, 0xcc, 0xf8, 0x75, 0xf6, 0x6a, 0xbd, 0x62, 0x92,
      +	0x7c, 0x8e, 0xd1, 0x2c, 0x37, 0x5f, 0x56, 0x3b, 0x7a, 0xd8, 0x37, 0x53, 0xbe, 0x5f, 0x4e, 0xf9,
      +	0xfe, 0x79, 0xb1, 0x44, 0x28, 0xa8, 0xc8, 0xa7, 0xd8, 0x96, 0xca, 0x3f, 0xd0, 0xda, 0x07, 0x3b,
      +	0xda, 0xb1, 0x5e, 0x28, 0xd4, 0x96, 0xf0, 0xc5, 0xda, 0x4a, 0x16, 0x77, 0xd2, 0xdd, 0x11, 0xbe,
      +	0x2a, 0x77, 0x0b, 0xb5, 0x95, 0x24, 0x7d, 0x8c, 0x66, 0xd3, 0x54, 0x23, 0x6d, 0x47, 0x27, 0xbb,
      +	0x27, 0xd0, 0x23, 0xe4, 0x17, 0xc0, 0x47, 0x41, 0x48, 0x9e, 0x60, 0x34, 0x4f, 0x95, 0x5e, 0x08,
      +	0xd0, 0xce, 0xdb, 0x7a, 0x3d, 0x8c, 0x0a, 0xf9, 0x3c, 0x55, 0x20, 0x4f, 0x8a, 0x25, 0x71, 0x97,
      +	0x5c, 0x37, 0x68, 0x21, 0x4f, 0x86, 0x03, 0x38, 0x4d, 0x3e, 0x1c, 0xe8, 0xc5, 0x71, 0xd7, 0x69,
      +	0x2e, 0x37, 0xf5, 0xf9, 0x70, 0xa0, 0xed, 0x4f, 0x23, 0xbd, 0x4d, 0xf6, 0xd8, 0x9f, 0x46, 0xa5,
      +	0xfd, 0x69, 0xa4, 0xed, 0x4f, 0x23, 0xbd, 0x62, 0xf6, 0xd9, 0x57, 0xfa, 0x5c, 0xeb, 0x1b, 0x7a,
      +	0x41, 0xb4, 0xf6, 0xa0, 0x84, 0x2f, 0xc4, 0xc8, 0xb5, 0x0e, 0xfc, 0xe1, 0x5b, 0xc7, 0x7b, 0xfc,
      +	0xcd, 0xd0, 0x2d, 0xfc, 0xa5, 0x12, 0xe4, 0x4b, 0xec, 0xd4, 0x5b, 0xea, 0xae, 0x17, 0xd0, 0xc3,
      +	0xd8, 0x14, 0x18, 0xe5, 0xb3, 0x00, 0x37, 0xb2, 0x78, 0xc9, 0xb6, 0x9a, 0xef, 0x4f, 0xfd, 0xfd,
      +	0xea, 0xcc, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x39, 0x6b, 0x15, 0x0e, 0xa7, 0x08, 0x00, 0x00,
      +}
      diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto
      new file mode 100644
      index 00000000..ea4a0483
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto
      @@ -0,0 +1,132 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2015 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto2";
      +
      +import "google/protobuf/duration.proto";
      +import "google/protobuf/struct.proto";
      +import "google/protobuf/timestamp.proto";
      +import "google/protobuf/wrappers.proto";
      +
      +package jsonpb;
      +
      +// Test message for holding primitive types.
      +message Simple {
      +  optional bool o_bool = 1;
      +  optional int32 o_int32 = 2;
      +  optional int64 o_int64 = 3;
      +  optional uint32 o_uint32 = 4;
      +  optional uint64 o_uint64 = 5;
      +  optional sint32 o_sint32 = 6;
      +  optional sint64 o_sint64 = 7;
      +  optional float o_float = 8;
      +  optional double o_double = 9;
      +  optional string o_string = 10;
      +  optional bytes o_bytes = 11;
      +}
      +
      +// Test message for holding repeated primitives.
      +message Repeats {
      +  repeated bool r_bool = 1;
      +  repeated int32 r_int32 = 2;
      +  repeated int64 r_int64 = 3;
      +  repeated uint32 r_uint32 = 4;
      +  repeated uint64 r_uint64 = 5;
      +  repeated sint32 r_sint32 = 6;
      +  repeated sint64 r_sint64 = 7;
      +  repeated float r_float = 8;
      +  repeated double r_double = 9;
      +  repeated string r_string = 10;
      +  repeated bytes r_bytes = 11;
      +}
      +
      +// Test message for holding enums and nested messages.
      +message Widget {
      +  enum Color {
      +    RED = 0;
      +    GREEN = 1;
      +    BLUE = 2;
      +  };
      +  optional Color color = 1;
      +  repeated Color r_color = 2;
      +
      +  optional Simple simple = 10;
      +  repeated Simple r_simple = 11;
      +
      +  optional Repeats repeats = 20;
      +  repeated Repeats r_repeats = 21;
      +}
      +
      +message Maps {
      +  map<int64, string> m_int64_str = 1;
      +  map<bool, Simple> m_bool_simple = 2;
      +}
      +
      +message MsgWithOneof {
      +  oneof union {
      +    string title = 1;
      +    int64 salary = 2;
      +    string Country = 3;
      +  }
      +}
      +
      +message Real {
      +  optional double value = 1;
      +  extensions 100 to max;
      +}
      +
      +extend Real {
      +  optional string name = 124;
      +}
      +
      +message Complex {
      +  extend Real {
      +    optional Complex real_extension = 123;
      +  }
      +  optional double imaginary = 1;
      +  extensions 100 to max;
      +}
      +
      +message KnownTypes {
      +  optional google.protobuf.Duration dur = 1;
      +  optional google.protobuf.Struct st = 12;
      +  optional google.protobuf.Timestamp ts = 2;
      +
      +  optional google.protobuf.DoubleValue dbl = 3;
      +  optional google.protobuf.FloatValue flt = 4;
      +  optional google.protobuf.Int64Value i64 = 5;
      +  optional google.protobuf.UInt64Value u64 = 6;
      +  optional google.protobuf.Int32Value i32 = 7;
      +  optional google.protobuf.UInt32Value u32 = 8;
      +  optional google.protobuf.BoolValue bool = 9;
      +  optional google.protobuf.StringValue str = 10;
      +  optional google.protobuf.BytesValue bytes = 11;
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile
      new file mode 100644
      index 00000000..a42cc371
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile
      @@ -0,0 +1,33 @@
      +# Go support for Protocol Buffers - Google's data interchange format
      +#
      +# Copyright 2010 The Go Authors.  All rights reserved.
      +# https://github.com/golang/protobuf
      +#
      +# Redistribution and use in source and binary forms, with or without
      +# modification, are permitted provided that the following conditions are
      +# met:
      +#
      +#     * Redistributions of source code must retain the above copyright
      +# notice, this list of conditions and the following disclaimer.
      +#     * Redistributions in binary form must reproduce the above
      +# copyright notice, this list of conditions and the following disclaimer
      +# in the documentation and/or other materials provided with the
      +# distribution.
      +#     * Neither the name of Google Inc. nor the names of its
      +# contributors may be used to endorse or promote products derived from
      +# this software without specific prior written permission.
      +#
      +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +test:
      +	cd testdata && make test
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
      new file mode 100644
      index 00000000..4942418e
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
      @@ -0,0 +1,39 @@
      +# Go support for Protocol Buffers - Google's data interchange format
      +#
      +# Copyright 2010 The Go Authors.  All rights reserved.
      +# https://github.com/golang/protobuf
      +#
      +# Redistribution and use in source and binary forms, with or without
      +# modification, are permitted provided that the following conditions are
      +# met:
      +#
      +#     * Redistributions of source code must retain the above copyright
      +# notice, this list of conditions and the following disclaimer.
      +#     * Redistributions in binary form must reproduce the above
      +# copyright notice, this list of conditions and the following disclaimer
      +# in the documentation and/or other materials provided with the
      +# distribution.
      +#     * Neither the name of Google Inc. nor the names of its
      +# contributors may be used to endorse or promote products derived from
      +# this software without specific prior written permission.
      +#
      +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/
      +# at src/google/protobuf/descriptor.proto
      +regenerate:
      +	echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION
      +	protoc --go_out=. -I$(HOME)/src/protobuf/src $(HOME)/src/protobuf/src/google/protobuf/descriptor.proto && \
      +		sed 's,^package google_protobuf,package descriptor,' google/protobuf/descriptor.pb.go > \
      +		$(GOPATH)/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go && \
      +		rm -f google/protobuf/descriptor.pb.go
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
      new file mode 100644
      index 00000000..5849e309
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
      @@ -0,0 +1,2006 @@
      +// Code generated by protoc-gen-go.
      +// source: google/protobuf/descriptor.proto
      +// DO NOT EDIT!
      +
      +/*
      +Package descriptor is a generated protocol buffer package.
      +
      +It is generated from these files:
      +	google/protobuf/descriptor.proto
      +
      +It has these top-level messages:
      +	FileDescriptorSet
      +	FileDescriptorProto
      +	DescriptorProto
      +	FieldDescriptorProto
      +	OneofDescriptorProto
      +	EnumDescriptorProto
      +	EnumValueDescriptorProto
      +	ServiceDescriptorProto
      +	MethodDescriptorProto
      +	FileOptions
      +	MessageOptions
      +	FieldOptions
      +	EnumOptions
      +	EnumValueOptions
      +	ServiceOptions
      +	MethodOptions
      +	UninterpretedOption
      +	SourceCodeInfo
      +*/
      +package descriptor
      +
      +import proto "github.com/golang/protobuf/proto"
      +import fmt "fmt"
      +import math "math"
      +
      +// Reference imports to suppress errors if they are not otherwise used.
      +var _ = proto.Marshal
      +var _ = fmt.Errorf
      +var _ = math.Inf
      +
      +// This is a compile-time assertion to ensure that this generated file
      +// is compatible with the proto package it is being compiled against.
      +const _ = proto.ProtoPackageIsVersion1
      +
      +type FieldDescriptorProto_Type int32
      +
      +const (
      +	// 0 is reserved for errors.
      +	// Order is weird for historical reasons.
      +	FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1
      +	FieldDescriptorProto_TYPE_FLOAT  FieldDescriptorProto_Type = 2
      +	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
      +	// negative values are likely.
      +	FieldDescriptorProto_TYPE_INT64  FieldDescriptorProto_Type = 3
      +	FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4
      +	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
      +	// negative values are likely.
      +	FieldDescriptorProto_TYPE_INT32   FieldDescriptorProto_Type = 5
      +	FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6
      +	FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7
      +	FieldDescriptorProto_TYPE_BOOL    FieldDescriptorProto_Type = 8
      +	FieldDescriptorProto_TYPE_STRING  FieldDescriptorProto_Type = 9
      +	FieldDescriptorProto_TYPE_GROUP   FieldDescriptorProto_Type = 10
      +	FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11
      +	// New in version 2.
      +	FieldDescriptorProto_TYPE_BYTES    FieldDescriptorProto_Type = 12
      +	FieldDescriptorProto_TYPE_UINT32   FieldDescriptorProto_Type = 13
      +	FieldDescriptorProto_TYPE_ENUM     FieldDescriptorProto_Type = 14
      +	FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15
      +	FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16
      +	FieldDescriptorProto_TYPE_SINT32   FieldDescriptorProto_Type = 17
      +	FieldDescriptorProto_TYPE_SINT64   FieldDescriptorProto_Type = 18
      +)
      +
      +var FieldDescriptorProto_Type_name = map[int32]string{
      +	1:  "TYPE_DOUBLE",
      +	2:  "TYPE_FLOAT",
      +	3:  "TYPE_INT64",
      +	4:  "TYPE_UINT64",
      +	5:  "TYPE_INT32",
      +	6:  "TYPE_FIXED64",
      +	7:  "TYPE_FIXED32",
      +	8:  "TYPE_BOOL",
      +	9:  "TYPE_STRING",
      +	10: "TYPE_GROUP",
      +	11: "TYPE_MESSAGE",
      +	12: "TYPE_BYTES",
      +	13: "TYPE_UINT32",
      +	14: "TYPE_ENUM",
      +	15: "TYPE_SFIXED32",
      +	16: "TYPE_SFIXED64",
      +	17: "TYPE_SINT32",
      +	18: "TYPE_SINT64",
      +}
      +var FieldDescriptorProto_Type_value = map[string]int32{
      +	"TYPE_DOUBLE":   1,
      +	"TYPE_FLOAT":    2,
      +	"TYPE_INT64":    3,
      +	"TYPE_UINT64":   4,
      +	"TYPE_INT32":    5,
      +	"TYPE_FIXED64":  6,
      +	"TYPE_FIXED32":  7,
      +	"TYPE_BOOL":     8,
      +	"TYPE_STRING":   9,
      +	"TYPE_GROUP":    10,
      +	"TYPE_MESSAGE":  11,
      +	"TYPE_BYTES":    12,
      +	"TYPE_UINT32":   13,
      +	"TYPE_ENUM":     14,
      +	"TYPE_SFIXED32": 15,
      +	"TYPE_SFIXED64": 16,
      +	"TYPE_SINT32":   17,
      +	"TYPE_SINT64":   18,
      +}
      +
      +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type {
      +	p := new(FieldDescriptorProto_Type)
      +	*p = x
      +	return p
      +}
      +func (x FieldDescriptorProto_Type) String() string {
      +	return proto.EnumName(FieldDescriptorProto_Type_name, int32(x))
      +}
      +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
      +	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type")
      +	if err != nil {
      +		return err
      +	}
      +	*x = FieldDescriptorProto_Type(value)
      +	return nil
      +}
      +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} }
      +
      +type FieldDescriptorProto_Label int32
      +
      +const (
      +	// 0 is reserved for errors
      +	FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
      +	FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
      +	FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
      +)
      +
      +var FieldDescriptorProto_Label_name = map[int32]string{
      +	1: "LABEL_OPTIONAL",
      +	2: "LABEL_REQUIRED",
      +	3: "LABEL_REPEATED",
      +}
      +var FieldDescriptorProto_Label_value = map[string]int32{
      +	"LABEL_OPTIONAL": 1,
      +	"LABEL_REQUIRED": 2,
      +	"LABEL_REPEATED": 3,
      +}
      +
      +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label {
      +	p := new(FieldDescriptorProto_Label)
      +	*p = x
      +	return p
      +}
      +func (x FieldDescriptorProto_Label) String() string {
      +	return proto.EnumName(FieldDescriptorProto_Label_name, int32(x))
      +}
      +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
      +	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label")
      +	if err != nil {
      +		return err
      +	}
      +	*x = FieldDescriptorProto_Label(value)
      +	return nil
      +}
      +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
      +	return fileDescriptor0, []int{3, 1}
      +}
      +
      +// Generated classes can be optimized for speed or code size.
      +type FileOptions_OptimizeMode int32
      +
      +const (
      +	FileOptions_SPEED FileOptions_OptimizeMode = 1
      +	// etc.
      +	FileOptions_CODE_SIZE    FileOptions_OptimizeMode = 2
      +	FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3
      +)
      +
      +var FileOptions_OptimizeMode_name = map[int32]string{
      +	1: "SPEED",
      +	2: "CODE_SIZE",
      +	3: "LITE_RUNTIME",
      +}
      +var FileOptions_OptimizeMode_value = map[string]int32{
      +	"SPEED":        1,
      +	"CODE_SIZE":    2,
      +	"LITE_RUNTIME": 3,
      +}
      +
      +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode {
      +	p := new(FileOptions_OptimizeMode)
      +	*p = x
      +	return p
      +}
      +func (x FileOptions_OptimizeMode) String() string {
      +	return proto.EnumName(FileOptions_OptimizeMode_name, int32(x))
      +}
      +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
      +	value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode")
      +	if err != nil {
      +		return err
      +	}
      +	*x = FileOptions_OptimizeMode(value)
      +	return nil
      +}
      +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} }
      +
      +type FieldOptions_CType int32
      +
      +const (
      +	// Default mode.
      +	FieldOptions_STRING       FieldOptions_CType = 0
      +	FieldOptions_CORD         FieldOptions_CType = 1
      +	FieldOptions_STRING_PIECE FieldOptions_CType = 2
      +)
      +
      +var FieldOptions_CType_name = map[int32]string{
      +	0: "STRING",
      +	1: "CORD",
      +	2: "STRING_PIECE",
      +}
      +var FieldOptions_CType_value = map[string]int32{
      +	"STRING":       0,
      +	"CORD":         1,
      +	"STRING_PIECE": 2,
      +}
      +
      +func (x FieldOptions_CType) Enum() *FieldOptions_CType {
      +	p := new(FieldOptions_CType)
      +	*p = x
      +	return p
      +}
      +func (x FieldOptions_CType) String() string {
      +	return proto.EnumName(FieldOptions_CType_name, int32(x))
      +}
      +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
      +	value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType")
      +	if err != nil {
      +		return err
      +	}
      +	*x = FieldOptions_CType(value)
      +	return nil
      +}
      +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 0} }
      +
      +type FieldOptions_JSType int32
      +
      +const (
      +	// Use the default type.
      +	FieldOptions_JS_NORMAL FieldOptions_JSType = 0
      +	// Use JavaScript strings.
      +	FieldOptions_JS_STRING FieldOptions_JSType = 1
      +	// Use JavaScript numbers.
      +	FieldOptions_JS_NUMBER FieldOptions_JSType = 2
      +)
      +
      +var FieldOptions_JSType_name = map[int32]string{
      +	0: "JS_NORMAL",
      +	1: "JS_STRING",
      +	2: "JS_NUMBER",
      +}
      +var FieldOptions_JSType_value = map[string]int32{
      +	"JS_NORMAL": 0,
      +	"JS_STRING": 1,
      +	"JS_NUMBER": 2,
      +}
      +
      +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType {
      +	p := new(FieldOptions_JSType)
      +	*p = x
      +	return p
      +}
      +func (x FieldOptions_JSType) String() string {
      +	return proto.EnumName(FieldOptions_JSType_name, int32(x))
      +}
      +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
      +	value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType")
      +	if err != nil {
      +		return err
      +	}
      +	*x = FieldOptions_JSType(value)
      +	return nil
      +}
      +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 1} }
      +
      +// The protocol compiler can output a FileDescriptorSet containing the .proto
      +// files it parses.
      +type FileDescriptorSet struct {
      +	File             []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
      +	XXX_unrecognized []byte                 `json:"-"`
      +}
      +
      +func (m *FileDescriptorSet) Reset()                    { *m = FileDescriptorSet{} }
      +func (m *FileDescriptorSet) String() string            { return proto.CompactTextString(m) }
      +func (*FileDescriptorSet) ProtoMessage()               {}
      +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
      +
      +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto {
      +	if m != nil {
      +		return m.File
      +	}
      +	return nil
      +}
      +
      +// Describes a complete .proto file.
      +type FileDescriptorProto struct {
      +	Name    *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
      +	Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"`
      +	// Names of files imported by this file.
      +	Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
      +	// Indexes of the public imported files in the dependency list above.
      +	PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"`
      +	// Indexes of the weak imported files in the dependency list.
      +	// For Google-internal migration only. Do not use.
      +	WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
      +	// All top-level definitions in this file.
      +	MessageType []*DescriptorProto        `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
      +	EnumType    []*EnumDescriptorProto    `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
      +	Service     []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"`
      +	Extension   []*FieldDescriptorProto   `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"`
      +	Options     *FileOptions              `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
      +	// This field contains optional information about the original source code.
      +	// You may safely remove this entire field without harming runtime
      +	// functionality of the descriptors -- the information is needed only by
      +	// development tools.
      +	SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
      +	// The syntax of the proto file.
      +	// The supported values are "proto2" and "proto3".
      +	Syntax           *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
      +	XXX_unrecognized []byte  `json:"-"`
      +}
      +
      +func (m *FileDescriptorProto) Reset()                    { *m = FileDescriptorProto{} }
      +func (m *FileDescriptorProto) String() string            { return proto.CompactTextString(m) }
      +func (*FileDescriptorProto) ProtoMessage()               {}
      +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
      +
      +func (m *FileDescriptorProto) GetName() string {
      +	if m != nil && m.Name != nil {
      +		return *m.Name
      +	}
      +	return ""
      +}
      +
      +func (m *FileDescriptorProto) GetPackage() string {
      +	if m != nil && m.Package != nil {
      +		return *m.Package
      +	}
      +	return ""
      +}
      +
      +func (m *FileDescriptorProto) GetDependency() []string {
      +	if m != nil {
      +		return m.Dependency
      +	}
      +	return nil
      +}
      +
      +func (m *FileDescriptorProto) GetPublicDependency() []int32 {
      +	if m != nil {
      +		return m.PublicDependency
      +	}
      +	return nil
      +}
      +
      +func (m *FileDescriptorProto) GetWeakDependency() []int32 {
      +	if m != nil {
      +		return m.WeakDependency
      +	}
      +	return nil
      +}
      +
      +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto {
      +	if m != nil {
      +		return m.MessageType
      +	}
      +	return nil
      +}
      +
      +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto {
      +	if m != nil {
      +		return m.EnumType
      +	}
      +	return nil
      +}
      +
      +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto {
      +	if m != nil {
      +		return m.Service
      +	}
      +	return nil
      +}
      +
      +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto {
      +	if m != nil {
      +		return m.Extension
      +	}
      +	return nil
      +}
      +
      +func (m *FileDescriptorProto) GetOptions() *FileOptions {
      +	if m != nil {
      +		return m.Options
      +	}
      +	return nil
      +}
      +
      +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo {
      +	if m != nil {
      +		return m.SourceCodeInfo
      +	}
      +	return nil
      +}
      +
      +func (m *FileDescriptorProto) GetSyntax() string {
      +	if m != nil && m.Syntax != nil {
      +		return *m.Syntax
      +	}
      +	return ""
      +}
      +
      +// Describes a message type.
      +type DescriptorProto struct {
      +	Name           *string                           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
      +	Field          []*FieldDescriptorProto           `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
      +	Extension      []*FieldDescriptorProto           `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
      +	NestedType     []*DescriptorProto                `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"`
      +	EnumType       []*EnumDescriptorProto            `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
      +	ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"`
      +	OneofDecl      []*OneofDescriptorProto           `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"`
      +	Options        *MessageOptions                   `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
      +	ReservedRange  []*DescriptorProto_ReservedRange  `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
      +	// Reserved field names, which may not be used by fields in the same message.
      +	// A given name may only be reserved once.
      +	ReservedName     []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
      +	XXX_unrecognized []byte   `json:"-"`
      +}
      +
      +func (m *DescriptorProto) Reset()                    { *m = DescriptorProto{} }
      +func (m *DescriptorProto) String() string            { return proto.CompactTextString(m) }
      +func (*DescriptorProto) ProtoMessage()               {}
      +func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
      +
      +func (m *DescriptorProto) GetName() string {
      +	if m != nil && m.Name != nil {
      +		return *m.Name
      +	}
      +	return ""
      +}
      +
      +func (m *DescriptorProto) GetField() []*FieldDescriptorProto {
      +	if m != nil {
      +		return m.Field
      +	}
      +	return nil
      +}
      +
      +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto {
      +	if m != nil {
      +		return m.Extension
      +	}
      +	return nil
      +}
      +
      +func (m *DescriptorProto) GetNestedType() []*DescriptorProto {
      +	if m != nil {
      +		return m.NestedType
      +	}
      +	return nil
      +}
      +
      +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto {
      +	if m != nil {
      +		return m.EnumType
      +	}
      +	return nil
      +}
      +
      +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange {
      +	if m != nil {
      +		return m.ExtensionRange
      +	}
      +	return nil
      +}
      +
      +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto {
      +	if m != nil {
      +		return m.OneofDecl
      +	}
      +	return nil
      +}
      +
      +func (m *DescriptorProto) GetOptions() *MessageOptions {
      +	if m != nil {
      +		return m.Options
      +	}
      +	return nil
      +}
      +
      +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange {
      +	if m != nil {
      +		return m.ReservedRange
      +	}
      +	return nil
      +}
      +
      +func (m *DescriptorProto) GetReservedName() []string {
      +	if m != nil {
      +		return m.ReservedName
      +	}
      +	return nil
      +}
      +
      +type DescriptorProto_ExtensionRange struct {
      +	Start            *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
      +	End              *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
      +	XXX_unrecognized []byte `json:"-"`
      +}
      +
      +func (m *DescriptorProto_ExtensionRange) Reset()         { *m = DescriptorProto_ExtensionRange{} }
      +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
      +func (*DescriptorProto_ExtensionRange) ProtoMessage()    {}
      +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
      +	return fileDescriptor0, []int{2, 0}
      +}
      +
      +func (m *DescriptorProto_ExtensionRange) GetStart() int32 {
      +	if m != nil && m.Start != nil {
      +		return *m.Start
      +	}
      +	return 0
      +}
      +
      +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 {
      +	if m != nil && m.End != nil {
      +		return *m.End
      +	}
      +	return 0
      +}
      +
      +// Range of reserved tag numbers. Reserved tag numbers may not be used by
      +// fields or extension ranges in the same message. Reserved ranges may
      +// not overlap.
      +type DescriptorProto_ReservedRange struct {
      +	Start            *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
      +	End              *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
      +	XXX_unrecognized []byte `json:"-"`
      +}
      +
      +func (m *DescriptorProto_ReservedRange) Reset()         { *m = DescriptorProto_ReservedRange{} }
      +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
      +func (*DescriptorProto_ReservedRange) ProtoMessage()    {}
      +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
      +	return fileDescriptor0, []int{2, 1}
      +}
      +
      +func (m *DescriptorProto_ReservedRange) GetStart() int32 {
      +	if m != nil && m.Start != nil {
      +		return *m.Start
      +	}
      +	return 0
      +}
      +
      +func (m *DescriptorProto_ReservedRange) GetEnd() int32 {
      +	if m != nil && m.End != nil {
      +		return *m.End
      +	}
      +	return 0
      +}
      +
      +// Describes a field within a message.
      +type FieldDescriptorProto struct {
      +	Name   *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
      +	Number *int32                      `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
      +	Label  *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
      +	// If type_name is set, this need not be set.  If both this and type_name
      +	// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
      +	Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"`
      +	// For message and enum types, this is the name of the type.  If the name
      +	// starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping
      +	// rules are used to find the type (i.e. first the nested types within this
      +	// message are searched, then within the parent, on up to the root
      +	// namespace).
      +	TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"`
      +	// For extensions, this is the name of the type being extended.  It is
      +	// resolved in the same manner as type_name.
      +	Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"`
      +	// For numeric types, contains the original text representation of the value.
      +	// For booleans, "true" or "false".
      +	// For strings, contains the default text contents (not escaped in any way).
      +	// For bytes, contains the C escaped value.  All bytes >= 128 are escaped.
      +	// TODO(kenton):  Base-64 encode?
      +	DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
      +	// If set, gives the index of a oneof in the containing type's oneof_decl
      +	// list.  This field is a member of that oneof.
      +	OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
      +	// JSON name of this field. The value is set by protocol compiler. If the
      +	// user has set a "json_name" option on this field, that option's value
      +	// will be used. Otherwise, it's deduced from the field's name by converting
      +	// it to camelCase.
      +	JsonName         *string       `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
      +	Options          *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
      +	XXX_unrecognized []byte        `json:"-"`
      +}
      +
      +func (m *FieldDescriptorProto) Reset()                    { *m = FieldDescriptorProto{} }
      +func (m *FieldDescriptorProto) String() string            { return proto.CompactTextString(m) }
      +func (*FieldDescriptorProto) ProtoMessage()               {}
      +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
      +
      +func (m *FieldDescriptorProto) GetName() string {
      +	if m != nil && m.Name != nil {
      +		return *m.Name
      +	}
      +	return ""
      +}
      +
      +func (m *FieldDescriptorProto) GetNumber() int32 {
      +	if m != nil && m.Number != nil {
      +		return *m.Number
      +	}
      +	return 0
      +}
      +
      +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label {
      +	if m != nil && m.Label != nil {
      +		return *m.Label
      +	}
      +	return FieldDescriptorProto_LABEL_OPTIONAL
      +}
      +
      +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type {
      +	if m != nil && m.Type != nil {
      +		return *m.Type
      +	}
      +	return FieldDescriptorProto_TYPE_DOUBLE
      +}
      +
      +func (m *FieldDescriptorProto) GetTypeName() string {
      +	if m != nil && m.TypeName != nil {
      +		return *m.TypeName
      +	}
      +	return ""
      +}
      +
      +func (m *FieldDescriptorProto) GetExtendee() string {
      +	if m != nil && m.Extendee != nil {
      +		return *m.Extendee
      +	}
      +	return ""
      +}
      +
      +func (m *FieldDescriptorProto) GetDefaultValue() string {
      +	if m != nil && m.DefaultValue != nil {
      +		return *m.DefaultValue
      +	}
      +	return ""
      +}
      +
      +func (m *FieldDescriptorProto) GetOneofIndex() int32 {
      +	if m != nil && m.OneofIndex != nil {
      +		return *m.OneofIndex
      +	}
      +	return 0
      +}
      +
      +func (m *FieldDescriptorProto) GetJsonName() string {
      +	if m != nil && m.JsonName != nil {
      +		return *m.JsonName
      +	}
      +	return ""
      +}
      +
      +func (m *FieldDescriptorProto) GetOptions() *FieldOptions {
      +	if m != nil {
      +		return m.Options
      +	}
      +	return nil
      +}
      +
      +// Describes a oneof.
      +type OneofDescriptorProto struct {
      +	Name             *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
      +	XXX_unrecognized []byte  `json:"-"`
      +}
      +
      +func (m *OneofDescriptorProto) Reset()                    { *m = OneofDescriptorProto{} }
      +func (m *OneofDescriptorProto) String() string            { return proto.CompactTextString(m) }
      +func (*OneofDescriptorProto) ProtoMessage()               {}
      +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
      +
      +func (m *OneofDescriptorProto) GetName() string {
      +	if m != nil && m.Name != nil {
      +		return *m.Name
      +	}
      +	return ""
      +}
      +
      +// Describes an enum type.
      +type EnumDescriptorProto struct {
      +	Name             *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
      +	Value            []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
      +	Options          *EnumOptions                `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
      +	XXX_unrecognized []byte                      `json:"-"`
      +}
      +
      +func (m *EnumDescriptorProto) Reset()                    { *m = EnumDescriptorProto{} }
      +func (m *EnumDescriptorProto) String() string            { return proto.CompactTextString(m) }
      +func (*EnumDescriptorProto) ProtoMessage()               {}
      +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
      +
      +func (m *EnumDescriptorProto) GetName() string {
      +	if m != nil && m.Name != nil {
      +		return *m.Name
      +	}
      +	return ""
      +}
      +
      +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto {
      +	if m != nil {
      +		return m.Value
      +	}
      +	return nil
      +}
      +
      +func (m *EnumDescriptorProto) GetOptions() *EnumOptions {
      +	if m != nil {
      +		return m.Options
      +	}
      +	return nil
      +}
      +
      +// Describes a value within an enum.
      +type EnumValueDescriptorProto struct {
      +	Name             *string           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
      +	Number           *int32            `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
      +	Options          *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
      +	XXX_unrecognized []byte            `json:"-"`
      +}
      +
      +func (m *EnumValueDescriptorProto) Reset()                    { *m = EnumValueDescriptorProto{} }
      +func (m *EnumValueDescriptorProto) String() string            { return proto.CompactTextString(m) }
      +func (*EnumValueDescriptorProto) ProtoMessage()               {}
      +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
      +
      +func (m *EnumValueDescriptorProto) GetName() string {
      +	if m != nil && m.Name != nil {
      +		return *m.Name
      +	}
      +	return ""
      +}
      +
      +func (m *EnumValueDescriptorProto) GetNumber() int32 {
      +	if m != nil && m.Number != nil {
      +		return *m.Number
      +	}
      +	return 0
      +}
      +
      +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
      +	if m != nil {
      +		return m.Options
      +	}
      +	return nil
      +}
      +
      +// Describes a service.
      +type ServiceDescriptorProto struct {
      +	Name             *string                  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
      +	Method           []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
      +	Options          *ServiceOptions          `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
      +	XXX_unrecognized []byte                   `json:"-"`
      +}
      +
      +func (m *ServiceDescriptorProto) Reset()                    { *m = ServiceDescriptorProto{} }
      +func (m *ServiceDescriptorProto) String() string            { return proto.CompactTextString(m) }
      +func (*ServiceDescriptorProto) ProtoMessage()               {}
      +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
      +
      +func (m *ServiceDescriptorProto) GetName() string {
      +	if m != nil && m.Name != nil {
      +		return *m.Name
      +	}
      +	return ""
      +}
      +
      +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto {
      +	if m != nil {
      +		return m.Method
      +	}
      +	return nil
      +}
      +
      +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions {
      +	if m != nil {
      +		return m.Options
      +	}
      +	return nil
      +}
      +
      +// Describes a method of a service.
      +type MethodDescriptorProto struct {
      +	Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
      +	// Input and output type names.  These are resolved in the same way as
      +	// FieldDescriptorProto.type_name, but must refer to a message type.
      +	InputType  *string        `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
      +	OutputType *string        `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"`
      +	Options    *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
      +	// Identifies if client streams multiple client messages
      +	ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
      +	// Identifies if server streams multiple server messages
      +	ServerStreaming  *bool  `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
      +	XXX_unrecognized []byte `json:"-"`
      +}
      +
      +func (m *MethodDescriptorProto) Reset()                    { *m = MethodDescriptorProto{} }
      +func (m *MethodDescriptorProto) String() string            { return proto.CompactTextString(m) }
      +func (*MethodDescriptorProto) ProtoMessage()               {}
      +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
      +
      +const Default_MethodDescriptorProto_ClientStreaming bool = false
      +const Default_MethodDescriptorProto_ServerStreaming bool = false
      +
      +func (m *MethodDescriptorProto) GetName() string {
      +	if m != nil && m.Name != nil {
      +		return *m.Name
      +	}
      +	return ""
      +}
      +
      +func (m *MethodDescriptorProto) GetInputType() string {
      +	if m != nil && m.InputType != nil {
      +		return *m.InputType
      +	}
      +	return ""
      +}
      +
      +func (m *MethodDescriptorProto) GetOutputType() string {
      +	if m != nil && m.OutputType != nil {
      +		return *m.OutputType
      +	}
      +	return ""
      +}
      +
      +func (m *MethodDescriptorProto) GetOptions() *MethodOptions {
      +	if m != nil {
      +		return m.Options
      +	}
      +	return nil
      +}
      +
      +func (m *MethodDescriptorProto) GetClientStreaming() bool {
      +	if m != nil && m.ClientStreaming != nil {
      +		return *m.ClientStreaming
      +	}
      +	return Default_MethodDescriptorProto_ClientStreaming
      +}
      +
      +func (m *MethodDescriptorProto) GetServerStreaming() bool {
      +	if m != nil && m.ServerStreaming != nil {
      +		return *m.ServerStreaming
      +	}
      +	return Default_MethodDescriptorProto_ServerStreaming
      +}
      +
      +type FileOptions struct {
      +	// Sets the Java package where classes generated from this .proto will be
      +	// placed.  By default, the proto package is used, but this is often
      +	// inappropriate because proto packages do not normally start with backwards
      +	// domain names.
      +	JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
      +	// If set, all the classes from the .proto file are wrapped in a single
      +	// outer class with the given name.  This applies to both Proto1
      +	// (equivalent to the old "--one_java_file" option) and Proto2 (where
      +	// a .proto always translates to a single class, but you may want to
      +	// explicitly choose the class name).
      +	JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
      +	// If set true, then the Java code generator will generate a separate .java
      +	// file for each top-level message, enum, and service defined in the .proto
      +	// file.  Thus, these types will *not* be nested inside the outer class
      +	// named by java_outer_classname.  However, the outer class will still be
      +	// generated to contain the file's getDescriptor() method as well as any
      +	// top-level extensions defined in the file.
      +	JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
      +	// If set true, then the Java code generator will generate equals() and
      +	// hashCode() methods for all messages defined in the .proto file.
      +	// This increases generated code size, potentially substantially for large
      +	// protos, which may harm a memory-constrained application.
      +	// - In the full runtime this is a speed optimization, as the
      +	// AbstractMessage base class includes reflection-based implementations of
      +	// these methods.
      +	// - In the lite runtime, setting this option changes the semantics of
      +	// equals() and hashCode() to more closely match those of the full runtime;
      +	// the generated methods compute their results based on field values rather
      +	// than object identity. (Implementations should not assume that hashcodes
      +	// will be consistent across runtimes or versions of the protocol compiler.)
      +	JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash,def=0" json:"java_generate_equals_and_hash,omitempty"`
      +	// If set true, then the Java2 code generator will generate code that
      +	// throws an exception whenever an attempt is made to assign a non-UTF-8
      +	// byte sequence to a string field.
      +	// Message reflection will do the same.
      +	// However, an extension field still accepts non-UTF-8 byte sequences.
      +	// This option has no effect on when used with the lite runtime.
      +	JavaStringCheckUtf8 *bool                     `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
      +	OptimizeFor         *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
      +	// Sets the Go package where structs generated from this .proto will be
      +	// placed. If omitted, the Go package will be derived from the following:
      +	//   - The basename of the package import path, if provided.
      +	//   - Otherwise, the package statement in the .proto file, if present.
      +	//   - Otherwise, the basename of the .proto file, without extension.
      +	GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"`
      +	// Should generic services be generated in each language?  "Generic" services
      +	// are not specific to any particular RPC system.  They are generated by the
      +	// main code generators in each language (without additional plugins).
      +	// Generic services were the only kind of service generation supported by
      +	// early versions of google.protobuf.
      +	//
      +	// Generic services are now considered deprecated in favor of using plugins
      +	// that generate code specific to your particular RPC system.  Therefore,
      +	// these default to false.  Old code which depends on generic services should
      +	// explicitly set them to true.
      +	CcGenericServices   *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
      +	JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
      +	PyGenericServices   *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
      +	// Is this file deprecated?
      +	// Depending on the target platform, this can emit Deprecated annotations
      +	// for everything in the file, or it will be completely ignored; in the very
      +	// least, this is a formalization for deprecating files.
      +	Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
      +	// Enables the use of arenas for the proto messages in this file. This applies
      +	// only to generated classes for C++.
      +	CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"`
      +	// Sets the objective c class prefix which is prepended to all objective c
      +	// generated classes from this .proto. There is no default.
      +	ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
      +	// Namespace for generated classes; defaults to the package.
      +	CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
      +	// Whether the nano proto compiler should generate in the deprecated non-nano
      +	// suffixed package.
      +	JavananoUseDeprecatedPackage *bool `protobuf:"varint,38,opt,name=javanano_use_deprecated_package,json=javananoUseDeprecatedPackage" json:"javanano_use_deprecated_package,omitempty"`
      +	// The parser stores options it doesn't recognize here. See above.
      +	UninterpretedOption []*UninterpretedOption    `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
      +	XXX_extensions      map[int32]proto.Extension `json:"-"`
      +	XXX_unrecognized    []byte                    `json:"-"`
      +}
      +
      +func (m *FileOptions) Reset()                    { *m = FileOptions{} }
      +func (m *FileOptions) String() string            { return proto.CompactTextString(m) }
      +func (*FileOptions) ProtoMessage()               {}
      +func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
      +
      +var extRange_FileOptions = []proto.ExtensionRange{
      +	{1000, 536870911},
      +}
      +
      +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
      +	return extRange_FileOptions
      +}
      +func (m *FileOptions) ExtensionMap() map[int32]proto.Extension {
      +	if m.XXX_extensions == nil {
      +		m.XXX_extensions = make(map[int32]proto.Extension)
      +	}
      +	return m.XXX_extensions
      +}
      +
      +const Default_FileOptions_JavaMultipleFiles bool = false
      +const Default_FileOptions_JavaGenerateEqualsAndHash bool = false
      +const Default_FileOptions_JavaStringCheckUtf8 bool = false
      +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED
      +const Default_FileOptions_CcGenericServices bool = false
      +const Default_FileOptions_JavaGenericServices bool = false
      +const Default_FileOptions_PyGenericServices bool = false
      +const Default_FileOptions_Deprecated bool = false
      +const Default_FileOptions_CcEnableArenas bool = false
      +
      +func (m *FileOptions) GetJavaPackage() string {
      +	if m != nil && m.JavaPackage != nil {
      +		return *m.JavaPackage
      +	}
      +	return ""
      +}
      +
      +func (m *FileOptions) GetJavaOuterClassname() string {
      +	if m != nil && m.JavaOuterClassname != nil {
      +		return *m.JavaOuterClassname
      +	}
      +	return ""
      +}
      +
      +func (m *FileOptions) GetJavaMultipleFiles() bool {
      +	if m != nil && m.JavaMultipleFiles != nil {
      +		return *m.JavaMultipleFiles
      +	}
      +	return Default_FileOptions_JavaMultipleFiles
      +}
      +
      +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool {
      +	if m != nil && m.JavaGenerateEqualsAndHash != nil {
      +		return *m.JavaGenerateEqualsAndHash
      +	}
      +	return Default_FileOptions_JavaGenerateEqualsAndHash
      +}
      +
      +func (m *FileOptions) GetJavaStringCheckUtf8() bool {
      +	if m != nil && m.JavaStringCheckUtf8 != nil {
      +		return *m.JavaStringCheckUtf8
      +	}
      +	return Default_FileOptions_JavaStringCheckUtf8
      +}
      +
      +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode {
      +	if m != nil && m.OptimizeFor != nil {
      +		return *m.OptimizeFor
      +	}
      +	return Default_FileOptions_OptimizeFor
      +}
      +
      +func (m *FileOptions) GetGoPackage() string {
      +	if m != nil && m.GoPackage != nil {
      +		return *m.GoPackage
      +	}
      +	return ""
      +}
      +
      +func (m *FileOptions) GetCcGenericServices() bool {
      +	if m != nil && m.CcGenericServices != nil {
      +		return *m.CcGenericServices
      +	}
      +	return Default_FileOptions_CcGenericServices
      +}
      +
      +func (m *FileOptions) GetJavaGenericServices() bool {
      +	if m != nil && m.JavaGenericServices != nil {
      +		return *m.JavaGenericServices
      +	}
      +	return Default_FileOptions_JavaGenericServices
      +}
      +
      +func (m *FileOptions) GetPyGenericServices() bool {
      +	if m != nil && m.PyGenericServices != nil {
      +		return *m.PyGenericServices
      +	}
      +	return Default_FileOptions_PyGenericServices
      +}
      +
      +func (m *FileOptions) GetDeprecated() bool {
      +	if m != nil && m.Deprecated != nil {
      +		return *m.Deprecated
      +	}
      +	return Default_FileOptions_Deprecated
      +}
      +
      +func (m *FileOptions) GetCcEnableArenas() bool {
      +	if m != nil && m.CcEnableArenas != nil {
      +		return *m.CcEnableArenas
      +	}
      +	return Default_FileOptions_CcEnableArenas
      +}
      +
      +func (m *FileOptions) GetObjcClassPrefix() string {
      +	if m != nil && m.ObjcClassPrefix != nil {
      +		return *m.ObjcClassPrefix
      +	}
      +	return ""
      +}
      +
      +func (m *FileOptions) GetCsharpNamespace() string {
      +	if m != nil && m.CsharpNamespace != nil {
      +		return *m.CsharpNamespace
      +	}
      +	return ""
      +}
      +
      +func (m *FileOptions) GetJavananoUseDeprecatedPackage() bool {
      +	if m != nil && m.JavananoUseDeprecatedPackage != nil {
      +		return *m.JavananoUseDeprecatedPackage
      +	}
      +	return false
      +}
      +
      +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
      +	if m != nil {
      +		return m.UninterpretedOption
      +	}
      +	return nil
      +}
      +
      +type MessageOptions struct {
      +	// Set true to use the old proto1 MessageSet wire format for extensions.
      +	// This is provided for backwards-compatibility with the MessageSet wire
      +	// format.  You should not use this for any other reason:  It's less
      +	// efficient, has fewer features, and is more complicated.
      +	//
      +	// The message must be defined exactly as follows:
      +	//   message Foo {
      +	//     option message_set_wire_format = true;
      +	//     extensions 4 to max;
      +	//   }
      +	// Note that the message cannot have any defined fields; MessageSets only
      +	// have extensions.
      +	//
      +	// All extensions of your type must be singular messages; e.g. they cannot
      +	// be int32s, enums, or repeated messages.
      +	//
      +	// Because this is an option, the above two restrictions are not enforced by
      +	// the protocol compiler.
      +	MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"`
      +	// Disables the generation of the standard "descriptor()" accessor, which can
      +	// conflict with a field of the same name.  This is meant to make migration
      +	// from proto1 easier; new code should avoid fields named "descriptor".
      +	NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
      +	// Is this message deprecated?
      +	// Depending on the target platform, this can emit Deprecated annotations
      +	// for the message, or it will be completely ignored; in the very least,
      +	// this is a formalization for deprecating messages.
      +	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
      +	// Whether the message is an automatically generated map entry type for the
      +	// maps field.
      +	//
      +	// For maps fields:
      +	//     map<KeyType, ValueType> map_field = 1;
      +	// The parsed descriptor looks like:
      +	//     message MapFieldEntry {
      +	//         option map_entry = true;
      +	//         optional KeyType key = 1;
      +	//         optional ValueType value = 2;
      +	//     }
      +	//     repeated MapFieldEntry map_field = 1;
      +	//
      +	// Implementations may choose not to generate the map_entry=true message, but
      +	// use a native map in the target language to hold the keys and values.
      +	// The reflection APIs in such implementions still need to work as
      +	// if the field is a repeated message field.
      +	//
      +	// NOTE: Do not set the option in .proto files. Always use the maps syntax
      +	// instead. The option should only be implicitly set by the proto compiler
      +	// parser.
      +	MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
      +	// The parser stores options it doesn't recognize here. See above.
      +	UninterpretedOption []*UninterpretedOption    `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
      +	XXX_extensions      map[int32]proto.Extension `json:"-"`
      +	XXX_unrecognized    []byte                    `json:"-"`
      +}
      +
      +func (m *MessageOptions) Reset()                    { *m = MessageOptions{} }
      +func (m *MessageOptions) String() string            { return proto.CompactTextString(m) }
      +func (*MessageOptions) ProtoMessage()               {}
      +func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
      +
      +var extRange_MessageOptions = []proto.ExtensionRange{
      +	{1000, 536870911},
      +}
      +
      +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
      +	return extRange_MessageOptions
      +}
      +func (m *MessageOptions) ExtensionMap() map[int32]proto.Extension {
      +	if m.XXX_extensions == nil {
      +		m.XXX_extensions = make(map[int32]proto.Extension)
      +	}
      +	return m.XXX_extensions
      +}
      +
      +const Default_MessageOptions_MessageSetWireFormat bool = false
      +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false
      +const Default_MessageOptions_Deprecated bool = false
      +
      +func (m *MessageOptions) GetMessageSetWireFormat() bool {
      +	if m != nil && m.MessageSetWireFormat != nil {
      +		return *m.MessageSetWireFormat
      +	}
      +	return Default_MessageOptions_MessageSetWireFormat
      +}
      +
      +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool {
      +	if m != nil && m.NoStandardDescriptorAccessor != nil {
      +		return *m.NoStandardDescriptorAccessor
      +	}
      +	return Default_MessageOptions_NoStandardDescriptorAccessor
      +}
      +
      +func (m *MessageOptions) GetDeprecated() bool {
      +	if m != nil && m.Deprecated != nil {
      +		return *m.Deprecated
      +	}
      +	return Default_MessageOptions_Deprecated
      +}
      +
      +func (m *MessageOptions) GetMapEntry() bool {
      +	if m != nil && m.MapEntry != nil {
      +		return *m.MapEntry
      +	}
      +	return false
      +}
      +
      +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
      +	if m != nil {
      +		return m.UninterpretedOption
      +	}
      +	return nil
      +}
      +
      +type FieldOptions struct {
      +	// The ctype option instructs the C++ code generator to use a different
      +	// representation of the field than it normally would.  See the specific
      +	// options below.  This option is not yet implemented in the open source
      +	// release -- sorry, we'll try to include it in a future version!
      +	Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
      +	// The packed option can be enabled for repeated primitive fields to enable
      +	// a more efficient representation on the wire. Rather than repeatedly
      +	// writing the tag and type for each element, the entire array is encoded as
      +	// a single length-delimited blob. In proto3, only explicit setting it to
      +	// false will avoid using packed encoding.
      +	Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
      +	// The jstype option determines the JavaScript type used for values of the
      +	// field.  The option is permitted only for 64 bit integral and fixed types
      +	// (int64, uint64, sint64, fixed64, sfixed64).  By default these types are
      +	// represented as JavaScript strings.  This avoids loss of precision that can
      +	// happen when a large value is converted to a floating point JavaScript
      +	// numbers.  Specifying JS_NUMBER for the jstype causes the generated
      +	// JavaScript code to use the JavaScript "number" type instead of strings.
      +	// This option is an enum to permit additional types to be added,
      +	// e.g. goog.math.Integer.
      +	Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
      +	// Should this field be parsed lazily?  Lazy applies only to message-type
      +	// fields.  It means that when the outer message is initially parsed, the
      +	// inner message's contents will not be parsed but instead stored in encoded
      +	// form.  The inner message will actually be parsed when it is first accessed.
      +	//
      +	// This is only a hint.  Implementations are free to choose whether to use
      +	// eager or lazy parsing regardless of the value of this option.  However,
      +	// setting this option true suggests that the protocol author believes that
      +	// using lazy parsing on this field is worth the additional bookkeeping
      +	// overhead typically needed to implement it.
      +	//
      +	// This option does not affect the public interface of any generated code;
      +	// all method signatures remain the same.  Furthermore, thread-safety of the
      +	// interface is not affected by this option; const methods remain safe to
      +	// call from multiple threads concurrently, while non-const methods continue
      +	// to require exclusive access.
      +	//
      +	//
      +	// Note that implementations may choose not to check required fields within
      +	// a lazy sub-message.  That is, calling IsInitialized() on the outher message
      +	// may return true even if the inner message has missing required fields.
      +	// This is necessary because otherwise the inner message would have to be
      +	// parsed in order to perform the check, defeating the purpose of lazy
      +	// parsing.  An implementation which chooses not to check required fields
      +	// must be consistent about it.  That is, for any particular sub-message, the
      +	// implementation must either *always* check its required fields, or *never*
      +	// check its required fields, regardless of whether or not the message has
      +	// been parsed.
      +	Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"`
      +	// Is this field deprecated?
      +	// Depending on the target platform, this can emit Deprecated annotations
      +	// for accessors, or it will be completely ignored; in the very least, this
      +	// is a formalization for deprecating fields.
      +	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
      +	// For Google-internal migration only. Do not use.
      +	Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
      +	// The parser stores options it doesn't recognize here. See above.
      +	UninterpretedOption []*UninterpretedOption    `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
      +	XXX_extensions      map[int32]proto.Extension `json:"-"`
      +	XXX_unrecognized    []byte                    `json:"-"`
      +}
      +
      +func (m *FieldOptions) Reset()                    { *m = FieldOptions{} }
      +func (m *FieldOptions) String() string            { return proto.CompactTextString(m) }
      +func (*FieldOptions) ProtoMessage()               {}
      +func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
      +
      +var extRange_FieldOptions = []proto.ExtensionRange{
      +	{1000, 536870911},
      +}
      +
      +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
      +	return extRange_FieldOptions
      +}
      +func (m *FieldOptions) ExtensionMap() map[int32]proto.Extension {
      +	if m.XXX_extensions == nil {
      +		m.XXX_extensions = make(map[int32]proto.Extension)
      +	}
      +	return m.XXX_extensions
      +}
      +
      +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING
      +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL
      +const Default_FieldOptions_Lazy bool = false
      +const Default_FieldOptions_Deprecated bool = false
      +const Default_FieldOptions_Weak bool = false
      +
      +func (m *FieldOptions) GetCtype() FieldOptions_CType {
      +	if m != nil && m.Ctype != nil {
      +		return *m.Ctype
      +	}
      +	return Default_FieldOptions_Ctype
      +}
      +
      +func (m *FieldOptions) GetPacked() bool {
      +	if m != nil && m.Packed != nil {
      +		return *m.Packed
      +	}
      +	return false
      +}
      +
      +func (m *FieldOptions) GetJstype() FieldOptions_JSType {
      +	if m != nil && m.Jstype != nil {
      +		return *m.Jstype
      +	}
      +	return Default_FieldOptions_Jstype
      +}
      +
      +func (m *FieldOptions) GetLazy() bool {
      +	if m != nil && m.Lazy != nil {
      +		return *m.Lazy
      +	}
      +	return Default_FieldOptions_Lazy
      +}
      +
      +func (m *FieldOptions) GetDeprecated() bool {
      +	if m != nil && m.Deprecated != nil {
      +		return *m.Deprecated
      +	}
      +	return Default_FieldOptions_Deprecated
      +}
      +
      +func (m *FieldOptions) GetWeak() bool {
      +	if m != nil && m.Weak != nil {
      +		return *m.Weak
      +	}
      +	return Default_FieldOptions_Weak
      +}
      +
      +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
      +	if m != nil {
      +		return m.UninterpretedOption
      +	}
      +	return nil
      +}
      +
      +type EnumOptions struct {
      +	// Set this option to true to allow mapping different tag names to the same
      +	// value.
      +	AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
      +	// Is this enum deprecated?
      +	// Depending on the target platform, this can emit Deprecated annotations
      +	// for the enum, or it will be completely ignored; in the very least, this
      +	// is a formalization for deprecating enums.
      +	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
      +	// The parser stores options it doesn't recognize here. See above.
      +	UninterpretedOption []*UninterpretedOption    `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
      +	XXX_extensions      map[int32]proto.Extension `json:"-"`
      +	XXX_unrecognized    []byte                    `json:"-"`
      +}
      +
      +func (m *EnumOptions) Reset()                    { *m = EnumOptions{} }
      +func (m *EnumOptions) String() string            { return proto.CompactTextString(m) }
      +func (*EnumOptions) ProtoMessage()               {}
      +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
      +
      +var extRange_EnumOptions = []proto.ExtensionRange{
      +	{1000, 536870911},
      +}
      +
      +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
      +	return extRange_EnumOptions
      +}
      +func (m *EnumOptions) ExtensionMap() map[int32]proto.Extension {
      +	if m.XXX_extensions == nil {
      +		m.XXX_extensions = make(map[int32]proto.Extension)
      +	}
      +	return m.XXX_extensions
      +}
      +
      +const Default_EnumOptions_Deprecated bool = false
      +
      +func (m *EnumOptions) GetAllowAlias() bool {
      +	if m != nil && m.AllowAlias != nil {
      +		return *m.AllowAlias
      +	}
      +	return false
      +}
      +
      +func (m *EnumOptions) GetDeprecated() bool {
      +	if m != nil && m.Deprecated != nil {
      +		return *m.Deprecated
      +	}
      +	return Default_EnumOptions_Deprecated
      +}
      +
      +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
      +	if m != nil {
      +		return m.UninterpretedOption
      +	}
      +	return nil
      +}
      +
      +type EnumValueOptions struct {
      +	// Is this enum value deprecated?
      +	// Depending on the target platform, this can emit Deprecated annotations
      +	// for the enum value, or it will be completely ignored; in the very least,
      +	// this is a formalization for deprecating enum values.
      +	Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
      +	// The parser stores options it doesn't recognize here. See above.
      +	UninterpretedOption []*UninterpretedOption    `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
      +	XXX_extensions      map[int32]proto.Extension `json:"-"`
      +	XXX_unrecognized    []byte                    `json:"-"`
      +}
      +
      +func (m *EnumValueOptions) Reset()                    { *m = EnumValueOptions{} }
      +func (m *EnumValueOptions) String() string            { return proto.CompactTextString(m) }
      +func (*EnumValueOptions) ProtoMessage()               {}
      +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
      +
      +var extRange_EnumValueOptions = []proto.ExtensionRange{
      +	{1000, 536870911},
      +}
      +
      +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
      +	return extRange_EnumValueOptions
      +}
      +func (m *EnumValueOptions) ExtensionMap() map[int32]proto.Extension {
      +	if m.XXX_extensions == nil {
      +		m.XXX_extensions = make(map[int32]proto.Extension)
      +	}
      +	return m.XXX_extensions
      +}
      +
      +const Default_EnumValueOptions_Deprecated bool = false
      +
      +func (m *EnumValueOptions) GetDeprecated() bool {
      +	if m != nil && m.Deprecated != nil {
      +		return *m.Deprecated
      +	}
      +	return Default_EnumValueOptions_Deprecated
      +}
      +
      +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
      +	if m != nil {
      +		return m.UninterpretedOption
      +	}
      +	return nil
      +}
      +
      +type ServiceOptions struct {
      +	// Is this service deprecated?
      +	// Depending on the target platform, this can emit Deprecated annotations
      +	// for the service, or it will be completely ignored; in the very least,
      +	// this is a formalization for deprecating services.
      +	Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
      +	// The parser stores options it doesn't recognize here. See above.
      +	UninterpretedOption []*UninterpretedOption    `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
      +	XXX_extensions      map[int32]proto.Extension `json:"-"`
      +	XXX_unrecognized    []byte                    `json:"-"`
      +}
      +
      +func (m *ServiceOptions) Reset()                    { *m = ServiceOptions{} }
      +func (m *ServiceOptions) String() string            { return proto.CompactTextString(m) }
      +func (*ServiceOptions) ProtoMessage()               {}
      +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
      +
      +var extRange_ServiceOptions = []proto.ExtensionRange{
      +	{1000, 536870911},
      +}
      +
      +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
      +	return extRange_ServiceOptions
      +}
      +func (m *ServiceOptions) ExtensionMap() map[int32]proto.Extension {
      +	if m.XXX_extensions == nil {
      +		m.XXX_extensions = make(map[int32]proto.Extension)
      +	}
      +	return m.XXX_extensions
      +}
      +
      +const Default_ServiceOptions_Deprecated bool = false
      +
      +func (m *ServiceOptions) GetDeprecated() bool {
      +	if m != nil && m.Deprecated != nil {
      +		return *m.Deprecated
      +	}
      +	return Default_ServiceOptions_Deprecated
      +}
      +
      +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
      +	if m != nil {
      +		return m.UninterpretedOption
      +	}
      +	return nil
      +}
      +
      +type MethodOptions struct {
      +	// Is this method deprecated?
      +	// Depending on the target platform, this can emit Deprecated annotations
      +	// for the method, or it will be completely ignored; in the very least,
      +	// this is a formalization for deprecating methods.
      +	Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
      +	// The parser stores options it doesn't recognize here. See above.
      +	UninterpretedOption []*UninterpretedOption    `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
      +	XXX_extensions      map[int32]proto.Extension `json:"-"`
      +	XXX_unrecognized    []byte                    `json:"-"`
      +}
      +
      +func (m *MethodOptions) Reset()                    { *m = MethodOptions{} }
      +func (m *MethodOptions) String() string            { return proto.CompactTextString(m) }
      +func (*MethodOptions) ProtoMessage()               {}
      +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
      +
      +var extRange_MethodOptions = []proto.ExtensionRange{
      +	{1000, 536870911},
      +}
      +
      +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
      +	return extRange_MethodOptions
      +}
      +func (m *MethodOptions) ExtensionMap() map[int32]proto.Extension {
      +	if m.XXX_extensions == nil {
      +		m.XXX_extensions = make(map[int32]proto.Extension)
      +	}
      +	return m.XXX_extensions
      +}
      +
      +const Default_MethodOptions_Deprecated bool = false
      +
      +func (m *MethodOptions) GetDeprecated() bool {
      +	if m != nil && m.Deprecated != nil {
      +		return *m.Deprecated
      +	}
      +	return Default_MethodOptions_Deprecated
      +}
      +
      +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
      +	if m != nil {
      +		return m.UninterpretedOption
      +	}
      +	return nil
      +}
      +
      +// A message representing a option the parser does not recognize. This only
      +// appears in options protos created by the compiler::Parser class.
      +// DescriptorPool resolves these when building Descriptor objects. Therefore,
      +// options protos in descriptor objects (e.g. returned by Descriptor::options(),
      +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
      +// in them.
      +type UninterpretedOption struct {
      +	Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
      +	// The value of the uninterpreted option, in whatever type the tokenizer
      +	// identified it as during parsing. Exactly one of these should be set.
      +	IdentifierValue  *string  `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
      +	PositiveIntValue *uint64  `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"`
      +	NegativeIntValue *int64   `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"`
      +	DoubleValue      *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
      +	StringValue      []byte   `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
      +	AggregateValue   *string  `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
      +	XXX_unrecognized []byte   `json:"-"`
      +}
      +
      +func (m *UninterpretedOption) Reset()                    { *m = UninterpretedOption{} }
      +func (m *UninterpretedOption) String() string            { return proto.CompactTextString(m) }
      +func (*UninterpretedOption) ProtoMessage()               {}
      +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
      +
      +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
      +	if m != nil {
      +		return m.Name
      +	}
      +	return nil
      +}
      +
      +func (m *UninterpretedOption) GetIdentifierValue() string {
      +	if m != nil && m.IdentifierValue != nil {
      +		return *m.IdentifierValue
      +	}
      +	return ""
      +}
      +
      +func (m *UninterpretedOption) GetPositiveIntValue() uint64 {
      +	if m != nil && m.PositiveIntValue != nil {
      +		return *m.PositiveIntValue
      +	}
      +	return 0
      +}
      +
      +func (m *UninterpretedOption) GetNegativeIntValue() int64 {
      +	if m != nil && m.NegativeIntValue != nil {
      +		return *m.NegativeIntValue
      +	}
      +	return 0
      +}
      +
      +func (m *UninterpretedOption) GetDoubleValue() float64 {
      +	if m != nil && m.DoubleValue != nil {
      +		return *m.DoubleValue
      +	}
      +	return 0
      +}
      +
      +func (m *UninterpretedOption) GetStringValue() []byte {
      +	if m != nil {
      +		return m.StringValue
      +	}
      +	return nil
      +}
      +
      +func (m *UninterpretedOption) GetAggregateValue() string {
      +	if m != nil && m.AggregateValue != nil {
      +		return *m.AggregateValue
      +	}
      +	return ""
      +}
      +
      +// The name of the uninterpreted option.  Each string represents a segment in
      +// a dot-separated name.  is_extension is true iff a segment represents an
      +// extension (denoted with parentheses in options specs in .proto files).
      +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
      +// "foo.(bar.baz).qux".
      +type UninterpretedOption_NamePart struct {
      +	NamePart         *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
      +	IsExtension      *bool   `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
      +	XXX_unrecognized []byte  `json:"-"`
      +}
      +
      +func (m *UninterpretedOption_NamePart) Reset()         { *m = UninterpretedOption_NamePart{} }
      +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
      +func (*UninterpretedOption_NamePart) ProtoMessage()    {}
      +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
      +	return fileDescriptor0, []int{16, 0}
      +}
      +
      +func (m *UninterpretedOption_NamePart) GetNamePart() string {
      +	if m != nil && m.NamePart != nil {
      +		return *m.NamePart
      +	}
      +	return ""
      +}
      +
      +func (m *UninterpretedOption_NamePart) GetIsExtension() bool {
      +	if m != nil && m.IsExtension != nil {
      +		return *m.IsExtension
      +	}
      +	return false
      +}
      +
      +// Encapsulates information about the original source file from which a
      +// FileDescriptorProto was generated.
      +type SourceCodeInfo struct {
      +	// A Location identifies a piece of source code in a .proto file which
      +	// corresponds to a particular definition.  This information is intended
      +	// to be useful to IDEs, code indexers, documentation generators, and similar
      +	// tools.
      +	//
      +	// For example, say we have a file like:
      +	//   message Foo {
      +	//     optional string foo = 1;
      +	//   }
      +	// Let's look at just the field definition:
      +	//   optional string foo = 1;
      +	//   ^       ^^     ^^  ^  ^^^
      +	//   a       bc     de  f  ghi
      +	// We have the following locations:
      +	//   span   path               represents
      +	//   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.
      +	//   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).
      +	//   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).
      +	//   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).
      +	//   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).
      +	//
      +	// Notes:
      +	// - A location may refer to a repeated field itself (i.e. not to any
      +	//   particular index within it).  This is used whenever a set of elements are
      +	//   logically enclosed in a single code segment.  For example, an entire
      +	//   extend block (possibly containing multiple extension definitions) will
      +	//   have an outer location whose path refers to the "extensions" repeated
      +	//   field without an index.
      +	// - Multiple locations may have the same path.  This happens when a single
      +	//   logical declaration is spread out across multiple places.  The most
      +	//   obvious example is the "extend" block again -- there may be multiple
      +	//   extend blocks in the same scope, each of which will have the same path.
      +	// - A location's span is not always a subset of its parent's span.  For
      +	//   example, the "extendee" of an extension declaration appears at the
      +	//   beginning of the "extend" block and is shared by all extensions within
      +	//   the block.
      +	// - Just because a location's span is a subset of some other location's span
      +	//   does not mean that it is a descendent.  For example, a "group" defines
      +	//   both a type and a field in a single declaration.  Thus, the locations
      +	//   corresponding to the type and field and their components will overlap.
      +	// - Code which tries to interpret locations should probably be designed to
      +	//   ignore those that it doesn't understand, as more types of locations could
      +	//   be recorded in the future.
      +	Location         []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
      +	XXX_unrecognized []byte                     `json:"-"`
      +}
      +
      +func (m *SourceCodeInfo) Reset()                    { *m = SourceCodeInfo{} }
      +func (m *SourceCodeInfo) String() string            { return proto.CompactTextString(m) }
      +func (*SourceCodeInfo) ProtoMessage()               {}
      +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
      +
      +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
      +	if m != nil {
      +		return m.Location
      +	}
      +	return nil
      +}
      +
      +type SourceCodeInfo_Location struct {
      +	// Identifies which part of the FileDescriptorProto was defined at this
      +	// location.
      +	//
      +	// Each element is a field number or an index.  They form a path from
      +	// the root FileDescriptorProto to the place where the definition.  For
      +	// example, this path:
      +	//   [ 4, 3, 2, 7, 1 ]
      +	// refers to:
      +	//   file.message_type(3)  // 4, 3
      +	//       .field(7)         // 2, 7
      +	//       .name()           // 1
      +	// This is because FileDescriptorProto.message_type has field number 4:
      +	//   repeated DescriptorProto message_type = 4;
      +	// and DescriptorProto.field has field number 2:
      +	//   repeated FieldDescriptorProto field = 2;
      +	// and FieldDescriptorProto.name has field number 1:
      +	//   optional string name = 1;
      +	//
      +	// Thus, the above path gives the location of a field name.  If we removed
      +	// the last element:
      +	//   [ 4, 3, 2, 7 ]
      +	// this path refers to the whole field declaration (from the beginning
      +	// of the label to the terminating semicolon).
      +	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
      +	// Always has exactly three or four elements: start line, start column,
      +	// end line (optional, otherwise assumed same as start line), end column.
      +	// These are packed into a single field for efficiency.  Note that line
      +	// and column numbers are zero-based -- typically you will want to add
      +	// 1 to each before displaying to a user.
      +	Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"`
      +	// If this SourceCodeInfo represents a complete declaration, these are any
      +	// comments appearing before and after the declaration which appear to be
      +	// attached to the declaration.
      +	//
      +	// A series of line comments appearing on consecutive lines, with no other
      +	// tokens appearing on those lines, will be treated as a single comment.
      +	//
      +	// leading_detached_comments will keep paragraphs of comments that appear
      +	// before (but not connected to) the current element. Each paragraph,
      +	// separated by empty lines, will be one comment element in the repeated
      +	// field.
      +	//
      +	// Only the comment content is provided; comment markers (e.g. //) are
      +	// stripped out.  For block comments, leading whitespace and an asterisk
      +	// will be stripped from the beginning of each line other than the first.
      +	// Newlines are included in the output.
      +	//
      +	// Examples:
      +	//
      +	//   optional int32 foo = 1;  // Comment attached to foo.
      +	//   // Comment attached to bar.
      +	//   optional int32 bar = 2;
      +	//
      +	//   optional string baz = 3;
      +	//   // Comment attached to baz.
      +	//   // Another line attached to baz.
      +	//
      +	//   // Comment attached to qux.
      +	//   //
      +	//   // Another line attached to qux.
      +	//   optional double qux = 4;
      +	//
      +	//   // Detached comment for corge. This is not leading or trailing comments
      +	//   // to qux or corge because there are blank lines separating it from
      +	//   // both.
      +	//
      +	//   // Detached comment for corge paragraph 2.
      +	//
      +	//   optional string corge = 5;
      +	//   /* Block comment attached
      +	//    * to corge.  Leading asterisks
      +	//    * will be removed. */
      +	//   /* Block comment attached to
      +	//    * grault. */
      +	//   optional int32 grault = 6;
      +	//
      +	//   // ignored detached comments.
      +	LeadingComments         *string  `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
      +	TrailingComments        *string  `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
      +	LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
      +	XXX_unrecognized        []byte   `json:"-"`
      +}
      +
      +func (m *SourceCodeInfo_Location) Reset()                    { *m = SourceCodeInfo_Location{} }
      +func (m *SourceCodeInfo_Location) String() string            { return proto.CompactTextString(m) }
      +func (*SourceCodeInfo_Location) ProtoMessage()               {}
      +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17, 0} }
      +
      +func (m *SourceCodeInfo_Location) GetPath() []int32 {
      +	if m != nil {
      +		return m.Path
      +	}
      +	return nil
      +}
      +
      +func (m *SourceCodeInfo_Location) GetSpan() []int32 {
      +	if m != nil {
      +		return m.Span
      +	}
      +	return nil
      +}
      +
      +func (m *SourceCodeInfo_Location) GetLeadingComments() string {
      +	if m != nil && m.LeadingComments != nil {
      +		return *m.LeadingComments
      +	}
      +	return ""
      +}
      +
      +func (m *SourceCodeInfo_Location) GetTrailingComments() string {
      +	if m != nil && m.TrailingComments != nil {
      +		return *m.TrailingComments
      +	}
      +	return ""
      +}
      +
      +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
      +	if m != nil {
      +		return m.LeadingDetachedComments
      +	}
      +	return nil
      +}
      +
      +func init() {
      +	proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet")
      +	proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto")
      +	proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
      +	proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange")
      +	proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange")
      +	proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto")
      +	proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto")
      +	proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto")
      +	proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto")
      +	proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto")
      +	proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto")
      +	proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions")
      +	proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions")
      +	proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions")
      +	proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions")
      +	proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions")
      +	proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions")
      +	proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions")
      +	proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption")
      +	proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart")
      +	proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo")
      +	proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location")
      +	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
      +	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
      +	proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
      +	proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
      +	proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
      +}
      +
      +var fileDescriptor0 = []byte{
      +	// 2199 bytes of a gzipped FileDescriptorProto
      +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x73, 0xdb, 0xd6,
      +	0x11, 0x2f, 0xff, 0x8a, 0x5c, 0x52, 0x24, 0xf4, 0xa4, 0xd8, 0xb4, 0x62, 0x47, 0x36, 0x63, 0xc7,
      +	0x8e, 0xd3, 0x52, 0x19, 0xb7, 0x49, 0x5c, 0xa5, 0x93, 0x0e, 0x45, 0xc2, 0x0a, 0x3d, 0x94, 0xc8,
      +	0x82, 0x64, 0xeb, 0xe4, 0x82, 0x81, 0xc0, 0x47, 0x0a, 0x36, 0x08, 0xb0, 0x00, 0x68, 0x5b, 0x39,
      +	0x75, 0xa6, 0xa7, 0x7e, 0x83, 0x4e, 0xdb, 0xe9, 0x21, 0x97, 0xcc, 0xf4, 0x03, 0xf4, 0xd0, 0x7b,
      +	0xaf, 0x3d, 0xf4, 0xdc, 0x63, 0x67, 0xda, 0x6f, 0xd0, 0x6b, 0xf7, 0xbd, 0x07, 0x80, 0x00, 0x09,
      +	0xc5, 0x6a, 0x66, 0x52, 0x47, 0x17, 0xf1, 0xed, 0xfe, 0x76, 0xb1, 0x6f, 0xdf, 0xef, 0xed, 0x2e,
      +	0x00, 0x37, 0xa7, 0xb6, 0x3d, 0x35, 0xe9, 0xfe, 0xdc, 0xb1, 0x3d, 0xfb, 0x74, 0x31, 0xd9, 0x1f,
      +	0x53, 0x57, 0x77, 0x8c, 0xb9, 0x67, 0x3b, 0x0d, 0x2e, 0x23, 0x55, 0x81, 0x68, 0x04, 0x88, 0xfa,
      +	0x31, 0x6c, 0x3d, 0x32, 0x4c, 0xda, 0x0e, 0x81, 0x03, 0xea, 0x91, 0x87, 0x90, 0x9d, 0xa0, 0xb0,
      +	0x96, 0xba, 0x99, 0xb9, 0x57, 0x7a, 0x70, 0xbb, 0xb1, 0x62, 0xd4, 0x88, 0x5b, 0xf4, 0x99, 0x58,
      +	0xe1, 0x16, 0xf5, 0x7f, 0x66, 0x61, 0x3b, 0x41, 0x4b, 0x08, 0x64, 0x2d, 0x6d, 0xc6, 0x3c, 0xa6,
      +	0xee, 0x15, 0x15, 0xfe, 0x9b, 0xd4, 0x60, 0x63, 0xae, 0xe9, 0xcf, 0xb4, 0x29, 0xad, 0xa5, 0xb9,
      +	0x38, 0x58, 0x92, 0xb7, 0x00, 0xc6, 0x74, 0x4e, 0xad, 0x31, 0xb5, 0xf4, 0xf3, 0x5a, 0x06, 0xa3,
      +	0x28, 0x2a, 0x11, 0x09, 0x79, 0x0f, 0xb6, 0xe6, 0x8b, 0x53, 0xd3, 0xd0, 0xd5, 0x08, 0x0c, 0x10,
      +	0x96, 0x53, 0x24, 0xa1, 0x68, 0x2f, 0xc1, 0x77, 0xa1, 0xfa, 0x82, 0x6a, 0xcf, 0xa2, 0xd0, 0x12,
      +	0x87, 0x56, 0x98, 0x38, 0x02, 0x6c, 0x41, 0x79, 0x46, 0x5d, 0x17, 0x03, 0x50, 0xbd, 0xf3, 0x39,
      +	0xad, 0x65, 0xf9, 0xee, 0x6f, 0xae, 0xed, 0x7e, 0x75, 0xe7, 0x25, 0xdf, 0x6a, 0x88, 0x46, 0xa4,
      +	0x09, 0x45, 0x6a, 0x2d, 0x66, 0xc2, 0x43, 0xee, 0x82, 0xfc, 0xc9, 0x88, 0x58, 0xf5, 0x52, 0x60,
      +	0x66, 0xbe, 0x8b, 0x0d, 0x97, 0x3a, 0xcf, 0x0d, 0x9d, 0xd6, 0xf2, 0xdc, 0xc1, 0xdd, 0x35, 0x07,
      +	0x03, 0xa1, 0x5f, 0xf5, 0x11, 0xd8, 0xe1, 0x56, 0x8a, 0xf4, 0xa5, 0x47, 0x2d, 0xd7, 0xb0, 0xad,
      +	0xda, 0x06, 0x77, 0x72, 0x27, 0xe1, 0x14, 0xa9, 0x39, 0x5e, 0x75, 0xb1, 0xb4, 0x23, 0x1f, 0xc2,
      +	0x86, 0x3d, 0xf7, 0xf0, 0x97, 0x5b, 0x2b, 0xe0, 0xf9, 0x94, 0x1e, 0x5c, 0x4f, 0x24, 0x42, 0x4f,
      +	0x60, 0x94, 0x00, 0x4c, 0x3a, 0x20, 0xb9, 0xf6, 0xc2, 0xd1, 0xa9, 0xaa, 0xdb, 0x63, 0xaa, 0x1a,
      +	0xd6, 0xc4, 0xae, 0x15, 0xb9, 0x83, 0xbd, 0xf5, 0x8d, 0x70, 0x60, 0x0b, 0x71, 0x1d, 0x84, 0x29,
      +	0x15, 0x37, 0xb6, 0x26, 0x57, 0x20, 0xef, 0x9e, 0x5b, 0x9e, 0xf6, 0xb2, 0x56, 0xe6, 0x0c, 0xf1,
      +	0x57, 0xf5, 0xff, 0xe4, 0xa0, 0x7a, 0x19, 0x8a, 0x7d, 0x0c, 0xb9, 0x09, 0xdb, 0x25, 0x12, 0xec,
      +	0x7f, 0xc8, 0x81, 0xb0, 0x89, 0x27, 0x31, 0xff, 0x0d, 0x93, 0xd8, 0x84, 0x92, 0x45, 0x5d, 0x8f,
      +	0x8e, 0x05, 0x23, 0x32, 0x97, 0xe4, 0x14, 0x08, 0xa3, 0x75, 0x4a, 0x65, 0xbf, 0x11, 0xa5, 0x9e,
      +	0x40, 0x35, 0x0c, 0x49, 0x75, 0x34, 0x6b, 0x1a, 0x70, 0x73, 0xff, 0x55, 0x91, 0x34, 0xe4, 0xc0,
      +	0x4e, 0x61, 0x66, 0x4a, 0x85, 0xc6, 0xd6, 0xa4, 0x0d, 0x60, 0x5b, 0xd4, 0x9e, 0xe0, 0xf5, 0xd2,
      +	0x4d, 0xe4, 0x49, 0x72, 0x96, 0x7a, 0x0c, 0xb2, 0x96, 0x25, 0x5b, 0x48, 0x75, 0x93, 0xfc, 0x78,
      +	0x49, 0xb5, 0x8d, 0x0b, 0x98, 0x72, 0x2c, 0x2e, 0xd9, 0x1a, 0xdb, 0x46, 0x50, 0x71, 0x28, 0xe3,
      +	0x3d, 0xa6, 0x58, 0xec, 0xac, 0xc8, 0x83, 0x68, 0xbc, 0x72, 0x67, 0x8a, 0x6f, 0x26, 0x36, 0xb6,
      +	0xe9, 0x44, 0x97, 0xe4, 0x6d, 0x08, 0x05, 0x2a, 0xa7, 0x15, 0xf0, 0x2a, 0x54, 0x0e, 0x84, 0x27,
      +	0x28, 0xdb, 0x7d, 0x08, 0x95, 0x78, 0x7a, 0xc8, 0x0e, 0xe4, 0x5c, 0x4f, 0x73, 0x3c, 0xce, 0xc2,
      +	0x9c, 0x22, 0x16, 0x44, 0x82, 0x0c, 0x16, 0x19, 0x5e, 0xe5, 0x72, 0x0a, 0xfb, 0xb9, 0xfb, 0x11,
      +	0x6c, 0xc6, 0x1e, 0x7f, 0x59, 0xc3, 0xfa, 0x6f, 0xf3, 0xb0, 0x93, 0xc4, 0xb9, 0x44, 0xfa, 0xe3,
      +	0xf5, 0x41, 0x06, 0x9c, 0x52, 0x07, 0x79, 0xc7, 0x3c, 0xf8, 0x2b, 0x64, 0x54, 0xce, 0xd4, 0x4e,
      +	0xa9, 0x89, 0x6c, 0x4a, 0xdd, 0xab, 0x3c, 0x78, 0xef, 0x52, 0xac, 0x6e, 0x74, 0x99, 0x89, 0x22,
      +	0x2c, 0xc9, 0x27, 0x90, 0xf5, 0x4b, 0x1c, 0xf3, 0x70, 0xff, 0x72, 0x1e, 0x18, 0x17, 0x15, 0x6e,
      +	0x47, 0xde, 0x84, 0x22, 0xfb, 0x2f, 0x72, 0x9b, 0xe7, 0x31, 0x17, 0x98, 0x80, 0xe5, 0x95, 0xec,
      +	0x42, 0x81, 0xd3, 0x6c, 0x4c, 0x83, 0xd6, 0x10, 0xae, 0xd9, 0xc1, 0x8c, 0xe9, 0x44, 0x5b, 0x98,
      +	0x9e, 0xfa, 0x5c, 0x33, 0x17, 0x94, 0x13, 0x06, 0x0f, 0xc6, 0x17, 0xfe, 0x9c, 0xc9, 0xc8, 0x1e,
      +	0x94, 0x04, 0x2b, 0x0d, 0xb4, 0x79, 0xc9, 0xab, 0x4f, 0x4e, 0x11, 0x44, 0xed, 0x30, 0x09, 0x7b,
      +	0xfc, 0x53, 0x17, 0xef, 0x82, 0x7f, 0xb4, 0xfc, 0x11, 0x4c, 0xc0, 0x1f, 0xff, 0xd1, 0x6a, 0xe1,
      +	0xbb, 0x91, 0xbc, 0xbd, 0x55, 0x2e, 0xd6, 0xff, 0x9c, 0x86, 0x2c, 0xbf, 0x6f, 0x55, 0x28, 0x0d,
      +	0x3f, 0xeb, 0xcb, 0x6a, 0xbb, 0x37, 0x3a, 0xec, 0xca, 0x52, 0x8a, 0x54, 0x00, 0xb8, 0xe0, 0x51,
      +	0xb7, 0xd7, 0x1c, 0x4a, 0xe9, 0x70, 0xdd, 0x39, 0x19, 0x7e, 0xf8, 0x23, 0x29, 0x13, 0x1a, 0x8c,
      +	0x84, 0x20, 0x1b, 0x05, 0xfc, 0xf0, 0x81, 0x94, 0x43, 0x26, 0x94, 0x85, 0x83, 0xce, 0x13, 0xb9,
      +	0x8d, 0x88, 0x7c, 0x5c, 0x82, 0x98, 0x0d, 0xb2, 0x09, 0x45, 0x2e, 0x39, 0xec, 0xf5, 0xba, 0x52,
      +	0x21, 0xf4, 0x39, 0x18, 0x2a, 0x9d, 0x93, 0x23, 0xa9, 0x18, 0xfa, 0x3c, 0x52, 0x7a, 0xa3, 0xbe,
      +	0x04, 0xa1, 0x87, 0x63, 0x79, 0x30, 0x68, 0x1e, 0xc9, 0x52, 0x29, 0x44, 0x1c, 0x7e, 0x36, 0x94,
      +	0x07, 0x52, 0x39, 0x16, 0x16, 0x3e, 0x62, 0x33, 0x7c, 0x84, 0x7c, 0x32, 0x3a, 0x96, 0x2a, 0x64,
      +	0x0b, 0x36, 0xc5, 0x23, 0x82, 0x20, 0xaa, 0x2b, 0x22, 0x8c, 0x54, 0x5a, 0x06, 0x22, 0xbc, 0x6c,
      +	0xc5, 0x04, 0x88, 0x20, 0xf5, 0x16, 0xe4, 0x38, 0xbb, 0x90, 0xc5, 0x95, 0x6e, 0xf3, 0x50, 0xee,
      +	0xaa, 0xbd, 0xfe, 0xb0, 0xd3, 0x3b, 0x69, 0x76, 0x31, 0x77, 0xa1, 0x4c, 0x91, 0x7f, 0x36, 0xea,
      +	0x28, 0x72, 0x1b, 0xf3, 0x17, 0x91, 0xf5, 0xe5, 0xe6, 0x10, 0x65, 0x99, 0xfa, 0x7d, 0xd8, 0x49,
      +	0xaa, 0x33, 0x49, 0x37, 0xa3, 0xfe, 0x65, 0x0a, 0xb6, 0x13, 0x4a, 0x66, 0xe2, 0x2d, 0xfa, 0x29,
      +	0xe4, 0x04, 0xd3, 0x44, 0x13, 0x79, 0x37, 0xb1, 0xf6, 0x72, 0xde, 0xad, 0x35, 0x12, 0x6e, 0x17,
      +	0x6d, 0xa4, 0x99, 0x0b, 0x1a, 0x29, 0x73, 0xb1, 0x46, 0xa7, 0x5f, 0xa7, 0xa0, 0x76, 0x91, 0xef,
      +	0x57, 0xdc, 0xf7, 0x74, 0xec, 0xbe, 0x7f, 0xbc, 0x1a, 0xc0, 0xad, 0x8b, 0xf7, 0xb0, 0x16, 0xc5,
      +	0x57, 0x29, 0xb8, 0x92, 0x3c, 0x6f, 0x24, 0xc6, 0xf0, 0x09, 0xe4, 0x67, 0xd4, 0x3b, 0xb3, 0x83,
      +	0x9e, 0xfb, 0x4e, 0x42, 0x25, 0x67, 0xea, 0xd5, 0x5c, 0xf9, 0x56, 0xd1, 0x56, 0x90, 0xb9, 0x68,
      +	0x68, 0x10, 0xd1, 0xac, 0x45, 0xfa, 0x9b, 0x34, 0xbc, 0x91, 0xe8, 0x3c, 0x31, 0xd0, 0x1b, 0x00,
      +	0x86, 0x35, 0x5f, 0x78, 0xa2, 0xaf, 0x8a, 0x32, 0x53, 0xe4, 0x12, 0x7e, 0x85, 0x59, 0x09, 0x59,
      +	0x78, 0xa1, 0x3e, 0xc3, 0xf5, 0x20, 0x44, 0x1c, 0xf0, 0x70, 0x19, 0x68, 0x96, 0x07, 0xfa, 0xd6,
      +	0x05, 0x3b, 0x5d, 0x6b, 0x59, 0xef, 0x83, 0xa4, 0x9b, 0x06, 0xb5, 0x3c, 0xd5, 0xf5, 0x1c, 0xaa,
      +	0xcd, 0x0c, 0x6b, 0xca, 0xeb, 0x68, 0xe1, 0x20, 0x37, 0xd1, 0x4c, 0x97, 0x2a, 0x55, 0xa1, 0x1e,
      +	0x04, 0x5a, 0x66, 0xc1, 0x9b, 0x85, 0x13, 0xb1, 0xc8, 0xc7, 0x2c, 0x84, 0x3a, 0xb4, 0xa8, 0xff,
      +	0x7d, 0x03, 0x4a, 0x91, 0xe9, 0x8c, 0xdc, 0x82, 0xf2, 0x53, 0xed, 0xb9, 0xa6, 0x06, 0x13, 0xb7,
      +	0xc8, 0x44, 0x89, 0xc9, 0xfa, 0xfe, 0xd4, 0xfd, 0x3e, 0xec, 0x70, 0x08, 0xee, 0x11, 0x1f, 0xa4,
      +	0x9b, 0x9a, 0xeb, 0xf2, 0xa4, 0x15, 0x38, 0x94, 0x30, 0x5d, 0x8f, 0xa9, 0x5a, 0x81, 0x86, 0x7c,
      +	0x00, 0xdb, 0xdc, 0x62, 0x86, 0x85, 0xd7, 0x98, 0x9b, 0x54, 0x65, 0xef, 0x00, 0x2e, 0xaf, 0xa7,
      +	0x61, 0x64, 0x5b, 0x0c, 0x71, 0xec, 0x03, 0x58, 0x44, 0x2e, 0x39, 0x82, 0x1b, 0xdc, 0x6c, 0x4a,
      +	0x2d, 0xea, 0x68, 0x1e, 0x55, 0xe9, 0x2f, 0x17, 0x88, 0x55, 0x35, 0x6b, 0xac, 0x9e, 0x69, 0xee,
      +	0x59, 0x6d, 0x27, 0xea, 0xe0, 0x1a, 0xc3, 0x1e, 0xf9, 0x50, 0x99, 0x23, 0x9b, 0xd6, 0xf8, 0x53,
      +	0xc4, 0x91, 0x03, 0xb8, 0xc2, 0x1d, 0x61, 0x52, 0x70, 0xcf, 0xaa, 0x7e, 0x46, 0xf5, 0x67, 0xea,
      +	0xc2, 0x9b, 0x3c, 0xac, 0xbd, 0x19, 0xf5, 0xc0, 0x83, 0x1c, 0x70, 0x4c, 0x8b, 0x41, 0x46, 0x88,
      +	0x20, 0x03, 0x28, 0xb3, 0xf3, 0x98, 0x19, 0x5f, 0x60, 0xd8, 0xb6, 0xc3, 0x7b, 0x44, 0x25, 0xe1,
      +	0x72, 0x47, 0x92, 0xd8, 0xe8, 0xf9, 0x06, 0xc7, 0x38, 0x9f, 0x1e, 0xe4, 0x06, 0x7d, 0x59, 0x6e,
      +	0x2b, 0xa5, 0xc0, 0xcb, 0x23, 0xdb, 0x61, 0x9c, 0x9a, 0xda, 0x61, 0x8e, 0x4b, 0x82, 0x53, 0x53,
      +	0x3b, 0xc8, 0x30, 0xe6, 0x4b, 0xd7, 0xc5, 0xb6, 0xf1, 0xdd, 0xc5, 0x1f, 0xd6, 0xdd, 0x9a, 0x14,
      +	0xcb, 0x97, 0xae, 0x1f, 0x09, 0x80, 0x4f, 0x73, 0x17, 0xaf, 0xc4, 0x1b, 0xcb, 0x7c, 0x45, 0x0d,
      +	0xb7, 0xd6, 0x76, 0xb9, 0x6a, 0x8a, 0x4f, 0x9c, 0x9f, 0xaf, 0x1b, 0x92, 0xd8, 0x13, 0xe7, 0xe7,
      +	0xab, 0x66, 0x77, 0xf8, 0x0b, 0x98, 0x43, 0x75, 0x4c, 0xf9, 0xb8, 0x76, 0x35, 0x8a, 0x8e, 0x28,
      +	0xc8, 0x3e, 0x12, 0x59, 0x57, 0xa9, 0xa5, 0x9d, 0xe2, 0xd9, 0x6b, 0x0e, 0xfe, 0x70, 0x6b, 0x7b,
      +	0x51, 0x70, 0x45, 0xd7, 0x65, 0xae, 0x6d, 0x72, 0x25, 0xb9, 0x0f, 0x5b, 0xf6, 0xe9, 0x53, 0x5d,
      +	0x90, 0x4b, 0x45, 0x3f, 0x13, 0xe3, 0x65, 0xed, 0x36, 0x4f, 0x53, 0x95, 0x29, 0x38, 0xb5, 0xfa,
      +	0x5c, 0x4c, 0xde, 0x45, 0xe7, 0xee, 0x99, 0xe6, 0xcc, 0x79, 0x93, 0x76, 0x31, 0xa9, 0xb4, 0x76,
      +	0x47, 0x40, 0x85, 0xfc, 0x24, 0x10, 0x13, 0x19, 0xf6, 0xd8, 0xe6, 0x2d, 0xcd, 0xb2, 0xd5, 0x85,
      +	0x4b, 0xd5, 0x65, 0x88, 0xe1, 0x59, 0xbc, 0xc3, 0xc2, 0x52, 0xae, 0x07, 0xb0, 0x91, 0x8b, 0xc5,
      +	0x2c, 0x00, 0x05, 0xc7, 0xf3, 0x04, 0x76, 0x16, 0x96, 0x61, 0x21, 0xc5, 0x51, 0xc3, 0x8c, 0xc5,
      +	0x85, 0xad, 0xfd, 0x6b, 0xe3, 0x82, 0xa1, 0x7b, 0x14, 0x45, 0x0b, 0x92, 0x28, 0xdb, 0x8b, 0x75,
      +	0x61, 0xfd, 0x00, 0xca, 0x51, 0xee, 0x90, 0x22, 0x08, 0xf6, 0x60, 0x77, 0xc3, 0x8e, 0xda, 0xea,
      +	0xb5, 0x59, 0x2f, 0xfc, 0x5c, 0xc6, 0xc6, 0x86, 0x3d, 0xb9, 0xdb, 0x19, 0xca, 0xaa, 0x32, 0x3a,
      +	0x19, 0x76, 0x8e, 0x65, 0x29, 0x73, 0xbf, 0x58, 0xf8, 0xf7, 0x86, 0xf4, 0x2b, 0xfc, 0x4b, 0xd7,
      +	0xff, 0x9a, 0x86, 0x4a, 0x7c, 0x0e, 0x26, 0x3f, 0x81, 0xab, 0xc1, 0x4b, 0xab, 0x4b, 0x3d, 0xf5,
      +	0x85, 0xe1, 0x70, 0x3a, 0xcf, 0x34, 0x31, 0x49, 0x86, 0x27, 0xb1, 0xe3, 0xa3, 0xf0, 0xf5, 0xfe,
      +	0x17, 0x88, 0x79, 0xc4, 0x21, 0xa4, 0x0b, 0x7b, 0x98, 0x32, 0x9c, 0x35, 0xad, 0xb1, 0xe6, 0x8c,
      +	0xd5, 0xe5, 0xe7, 0x02, 0x55, 0xd3, 0x91, 0x07, 0xae, 0x2d, 0x3a, 0x49, 0xe8, 0xe5, 0xba, 0x65,
      +	0x0f, 0x7c, 0xf0, 0xb2, 0xc4, 0x36, 0x7d, 0xe8, 0x0a, 0x6b, 0x32, 0x17, 0xb1, 0x06, 0x67, 0xaf,
      +	0x99, 0x36, 0x47, 0xda, 0x78, 0xce, 0x39, 0x9f, 0xde, 0x0a, 0x4a, 0x01, 0x05, 0x32, 0x5b, 0x7f,
      +	0x7b, 0x67, 0x10, 0xcd, 0xe3, 0x3f, 0x32, 0x50, 0x8e, 0x4e, 0x70, 0x6c, 0x20, 0xd6, 0x79, 0x99,
      +	0x4f, 0xf1, 0x2a, 0xf0, 0xf6, 0xd7, 0xce, 0x7b, 0x8d, 0x16, 0xab, 0xff, 0x07, 0x79, 0x31, 0x57,
      +	0x29, 0xc2, 0x92, 0xf5, 0x5e, 0xc6, 0x35, 0x2a, 0xa6, 0xf5, 0x82, 0xe2, 0xaf, 0xb0, 0xd8, 0xe5,
      +	0x9f, 0xba, 0xdc, 0x77, 0x9e, 0xfb, 0xbe, 0xfd, 0xf5, 0xbe, 0x1f, 0x0f, 0xb8, 0xf3, 0xe2, 0xe3,
      +	0x81, 0x7a, 0xd2, 0x53, 0x8e, 0x9b, 0x5d, 0xc5, 0x37, 0x27, 0xd7, 0x20, 0x6b, 0x6a, 0x5f, 0x9c,
      +	0xc7, 0x3b, 0x05, 0x17, 0x5d, 0x36, 0xf1, 0xe8, 0x81, 0x7d, 0xf2, 0x88, 0xd7, 0x67, 0x2e, 0xfa,
      +	0x16, 0xa9, 0xbf, 0x0f, 0x39, 0x9e, 0x2f, 0x02, 0xe0, 0x67, 0x4c, 0xfa, 0x1e, 0x29, 0x40, 0xb6,
      +	0xd5, 0x53, 0x18, 0xfd, 0x91, 0xef, 0x42, 0xaa, 0xf6, 0x3b, 0x72, 0x0b, 0x6f, 0x40, 0xfd, 0x03,
      +	0xc8, 0x8b, 0x24, 0xb0, 0xab, 0x11, 0xa6, 0x01, 0x8d, 0xc4, 0xd2, 0xf7, 0x91, 0x0a, 0xb4, 0xa3,
      +	0xe3, 0x43, 0x59, 0x91, 0xd2, 0xd1, 0xe3, 0xfd, 0x4b, 0x0a, 0x4a, 0x91, 0x81, 0x8a, 0xb5, 0x72,
      +	0xcd, 0x34, 0xed, 0x17, 0xaa, 0x66, 0x1a, 0x58, 0xa1, 0xc4, 0xf9, 0x00, 0x17, 0x35, 0x99, 0xe4,
      +	0xb2, 0xf9, 0xfb, 0xbf, 0x70, 0xf3, 0x8f, 0x29, 0x90, 0x56, 0x87, 0xb1, 0x95, 0x00, 0x53, 0xaf,
      +	0x35, 0xc0, 0x3f, 0xa4, 0xa0, 0x12, 0x9f, 0xc0, 0x56, 0xc2, 0xbb, 0xf5, 0x5a, 0xc3, 0xfb, 0x7d,
      +	0x0a, 0x36, 0x63, 0x73, 0xd7, 0x77, 0x2a, 0xba, 0xdf, 0x65, 0x60, 0x3b, 0xc1, 0x0e, 0x0b, 0x90,
      +	0x18, 0x50, 0xc5, 0xcc, 0xfc, 0x83, 0xcb, 0x3c, 0xab, 0xc1, 0xfa, 0x5f, 0x5f, 0x73, 0x3c, 0x7f,
      +	0x9e, 0xc5, 0x7e, 0x69, 0x8c, 0xb1, 0xa8, 0x1a, 0x13, 0x03, 0xc7, 0x37, 0xf1, 0xc6, 0x22, 0xa6,
      +	0xd6, 0xea, 0x52, 0x2e, 0x5e, 0x8f, 0xbf, 0x0f, 0x64, 0x6e, 0xbb, 0x86, 0x67, 0x3c, 0x67, 0x9f,
      +	0xe7, 0x82, 0x17, 0x69, 0x36, 0xc5, 0x66, 0x15, 0x29, 0xd0, 0x74, 0x2c, 0x2f, 0x44, 0x5b, 0x74,
      +	0xaa, 0xad, 0xa0, 0x59, 0x19, 0xca, 0x28, 0x52, 0xa0, 0x09, 0xd1, 0x38, 0x68, 0x8e, 0xed, 0x05,
      +	0x1b, 0x08, 0x04, 0x8e, 0x55, 0xbd, 0x94, 0x52, 0x12, 0xb2, 0x10, 0xe2, 0x4f, 0x6c, 0xcb, 0x37,
      +	0xf8, 0xb2, 0x52, 0x12, 0x32, 0x01, 0xb9, 0x0b, 0x55, 0x6d, 0x3a, 0x75, 0x98, 0xf3, 0xc0, 0x91,
      +	0x18, 0x43, 0x2b, 0xa1, 0x98, 0x03, 0x77, 0x1f, 0x43, 0x21, 0xc8, 0x03, 0x6b, 0x2c, 0x2c, 0x13,
      +	0xd8, 0xf3, 0xf9, 0x77, 0x94, 0x34, 0x7b, 0xa9, 0xb7, 0x02, 0x25, 0x3e, 0xd4, 0x70, 0xd5, 0xe5,
      +	0x07, 0xbd, 0x34, 0xea, 0x0b, 0x4a, 0xc9, 0x70, 0xc3, 0x2f, 0x38, 0xf5, 0xaf, 0xb0, 0xbd, 0xc6,
      +	0x3f, 0x48, 0x92, 0x36, 0x14, 0x4c, 0x1b, 0xf9, 0xc1, 0x2c, 0xc4, 0xd7, 0xf0, 0x7b, 0xaf, 0xf8,
      +	0x86, 0xd9, 0xe8, 0xfa, 0x78, 0x25, 0xb4, 0xdc, 0xfd, 0x5b, 0x0a, 0x0a, 0x81, 0x18, 0x1b, 0x45,
      +	0x76, 0xae, 0x79, 0x67, 0xdc, 0x5d, 0xee, 0x30, 0x2d, 0xa5, 0x14, 0xbe, 0x66, 0x72, 0x9c, 0x66,
      +	0x2c, 0x4e, 0x01, 0x5f, 0xce, 0xd6, 0xec, 0x5c, 0x4d, 0xaa, 0x8d, 0xf9, 0x80, 0x6b, 0xcf, 0x66,
      +	0x78, 0x92, 0x6e, 0x70, 0xae, 0xbe, 0xbc, 0xe5, 0x8b, 0xd9, 0x77, 0x71, 0xcf, 0xd1, 0x0c, 0x33,
      +	0x86, 0xcd, 0x72, 0xac, 0x14, 0x28, 0x42, 0xf0, 0x01, 0x5c, 0x0b, 0xfc, 0x8e, 0xa9, 0xa7, 0xe1,
      +	0xf0, 0x3c, 0x5e, 0x1a, 0xe5, 0xf9, 0xd7, 0xae, 0xab, 0x3e, 0xa0, 0xed, 0xeb, 0x03, 0xdb, 0xc3,
      +	0x27, 0x38, 0xc8, 0xda, 0xb3, 0xd5, 0x4c, 0x1c, 0x4a, 0x2b, 0xef, 0x5d, 0xee, 0xa7, 0xa9, 0xcf,
      +	0x61, 0x39, 0x54, 0x7c, 0x99, 0xce, 0x1c, 0xf5, 0x0f, 0xff, 0x94, 0xde, 0x3d, 0x12, 0x76, 0xfd,
      +	0x20, 0x83, 0x0a, 0x9d, 0x98, 0x54, 0x67, 0xd9, 0xf9, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3,
      +	0xe8, 0xdf, 0x9c, 0xc3, 0x18, 0x00, 0x00,
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go
      new file mode 100644
      index 00000000..0d6055d6
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go
      @@ -0,0 +1,51 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2010 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +/*
      +	A plugin for the Google protocol buffer compiler to generate Go code.
      +	Run it by building this program and putting it in your path with the name
      +		protoc-gen-go
      +	That word 'go' at the end becomes part of the option string set for the
      +	protocol compiler, so once the protocol compiler (protoc) is installed
      +	you can run
      +		protoc --go_out=output_directory input_directory/file.proto
      +	to generate Go bindings for the protocol defined by file.proto.
      +	With that input, the output will be written to
      +		output_directory/file.pb.go
      +
      +	The generated code is documented in the package comment for
      +	the library.
      +
      +	See the README and documentation for protocol buffers to learn more:
      +		https://developers.google.com/protocol-buffers/
      +
      +*/
      +package documentation
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile
      new file mode 100644
      index 00000000..b5715c35
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile
      @@ -0,0 +1,40 @@
      +# Go support for Protocol Buffers - Google's data interchange format
      +#
      +# Copyright 2010 The Go Authors.  All rights reserved.
      +# https://github.com/golang/protobuf
      +#
      +# Redistribution and use in source and binary forms, with or without
      +# modification, are permitted provided that the following conditions are
      +# met:
      +#
      +#     * Redistributions of source code must retain the above copyright
      +# notice, this list of conditions and the following disclaimer.
      +#     * Redistributions in binary form must reproduce the above
      +# copyright notice, this list of conditions and the following disclaimer
      +# in the documentation and/or other materials provided with the
      +# distribution.
      +#     * Neither the name of Google Inc. nor the names of its
      +# contributors may be used to endorse or promote products derived from
      +# this software without specific prior written permission.
      +#
      +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +include $(GOROOT)/src/Make.inc
      +
      +TARG=github.com/golang/protobuf/compiler/generator
      +GOFILES=\
      +	generator.go\
      +
      +DEPS=../descriptor ../plugin ../../proto
      +
      +include $(GOROOT)/src/Make.pkg
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
      new file mode 100644
      index 00000000..2994cc96
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
      @@ -0,0 +1,2781 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2010 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +/*
      +	The code generator for the plugin for the Google protocol buffer compiler.
      +	It generates Go code from the protocol buffer description files read by the
      +	main routine.
      +*/
      +package generator
      +
      +import (
      +	"bufio"
      +	"bytes"
      +	"compress/gzip"
      +	"fmt"
      +	"go/parser"
      +	"go/printer"
      +	"go/token"
      +	"log"
      +	"os"
      +	"path"
      +	"strconv"
      +	"strings"
      +	"unicode"
      +	"unicode/utf8"
      +
      +	"github.com/golang/protobuf/proto"
      +
      +	"github.com/golang/protobuf/protoc-gen-go/descriptor"
      +	plugin "github.com/golang/protobuf/protoc-gen-go/plugin"
      +)
      +
      +// generatedCodeVersion indicates a version of the generated code.
      +// It is incremented whenever an incompatibility between the generated code and
      +// proto package is introduced; the generated code references
      +// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion).
      +const generatedCodeVersion = 1
      +
      +// A Plugin provides functionality to add to the output during Go code generation,
      +// such as to produce RPC stubs.
      +type Plugin interface {
      +	// Name identifies the plugin.
      +	Name() string
      +	// Init is called once after data structures are built but before
      +	// code generation begins.
      +	Init(g *Generator)
      +	// Generate produces the code generated by the plugin for this file,
      +	// except for the imports, by calling the generator's methods P, In, and Out.
      +	Generate(file *FileDescriptor)
      +	// GenerateImports produces the import declarations for this file.
      +	// It is called after Generate.
      +	GenerateImports(file *FileDescriptor)
      +}
      +
      +var plugins []Plugin
      +
      +// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated.
      +// It is typically called during initialization.
      +func RegisterPlugin(p Plugin) {
      +	plugins = append(plugins, p)
      +}
      +
      +// Each type we import as a protocol buffer (other than FileDescriptorProto) needs
      +// a pointer to the FileDescriptorProto that represents it.  These types achieve that
      +// wrapping by placing each Proto inside a struct with the pointer to its File. The
      +// structs have the same names as their contents, with "Proto" removed.
      +// FileDescriptor is used to store the things that it points to.
      +
      +// The file and package name method are common to messages and enums.
      +type common struct {
      +	file *descriptor.FileDescriptorProto // File this object comes from.
      +}
      +
      +// PackageName is name in the package clause in the generated file.
      +func (c *common) PackageName() string { return uniquePackageOf(c.file) }
      +
      +func (c *common) File() *descriptor.FileDescriptorProto { return c.file }
      +
      +func fileIsProto3(file *descriptor.FileDescriptorProto) bool {
      +	return file.GetSyntax() == "proto3"
      +}
      +
      +func (c *common) proto3() bool { return fileIsProto3(c.file) }
      +
      +// Descriptor represents a protocol buffer message.
      +type Descriptor struct {
      +	common
      +	*descriptor.DescriptorProto
      +	parent   *Descriptor            // The containing message, if any.
      +	nested   []*Descriptor          // Inner messages, if any.
      +	enums    []*EnumDescriptor      // Inner enums, if any.
      +	ext      []*ExtensionDescriptor // Extensions, if any.
      +	typename []string               // Cached typename vector.
      +	index    int                    // The index into the container, whether the file or another message.
      +	path     string                 // The SourceCodeInfo path as comma-separated integers.
      +	group    bool
      +}
      +
      +// TypeName returns the elements of the dotted type name.
      +// The package name is not part of this name.
      +func (d *Descriptor) TypeName() []string {
      +	if d.typename != nil {
      +		return d.typename
      +	}
      +	n := 0
      +	for parent := d; parent != nil; parent = parent.parent {
      +		n++
      +	}
      +	s := make([]string, n, n)
      +	for parent := d; parent != nil; parent = parent.parent {
      +		n--
      +		s[n] = parent.GetName()
      +	}
      +	d.typename = s
      +	return s
      +}
      +
      +// EnumDescriptor describes an enum. If it's at top level, its parent will be nil.
      +// Otherwise it will be the descriptor of the message in which it is defined.
      +type EnumDescriptor struct {
      +	common
      +	*descriptor.EnumDescriptorProto
      +	parent   *Descriptor // The containing message, if any.
      +	typename []string    // Cached typename vector.
      +	index    int         // The index into the container, whether the file or a message.
      +	path     string      // The SourceCodeInfo path as comma-separated integers.
      +}
      +
      +// TypeName returns the elements of the dotted type name.
      +// The package name is not part of this name.
      +func (e *EnumDescriptor) TypeName() (s []string) {
      +	if e.typename != nil {
      +		return e.typename
      +	}
      +	name := e.GetName()
      +	if e.parent == nil {
      +		s = make([]string, 1)
      +	} else {
      +		pname := e.parent.TypeName()
      +		s = make([]string, len(pname)+1)
      +		copy(s, pname)
      +	}
      +	s[len(s)-1] = name
      +	e.typename = s
      +	return s
      +}
      +
      +// Everything but the last element of the full type name, CamelCased.
      +// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... .
      +func (e *EnumDescriptor) prefix() string {
      +	if e.parent == nil {
      +		// If the enum is not part of a message, the prefix is just the type name.
      +		return CamelCase(*e.Name) + "_"
      +	}
      +	typeName := e.TypeName()
      +	return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_"
      +}
      +
      +// The integer value of the named constant in this enumerated type.
      +func (e *EnumDescriptor) integerValueAsString(name string) string {
      +	for _, c := range e.Value {
      +		if c.GetName() == name {
      +			return fmt.Sprint(c.GetNumber())
      +		}
      +	}
      +	log.Fatal("cannot find value for enum constant")
      +	return ""
      +}
      +
      +// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil.
      +// Otherwise it will be the descriptor of the message in which it is defined.
      +type ExtensionDescriptor struct {
      +	common
      +	*descriptor.FieldDescriptorProto
      +	parent *Descriptor // The containing message, if any.
      +}
      +
      +// TypeName returns the elements of the dotted type name.
      +// The package name is not part of this name.
      +func (e *ExtensionDescriptor) TypeName() (s []string) {
      +	name := e.GetName()
      +	if e.parent == nil {
      +		// top-level extension
      +		s = make([]string, 1)
      +	} else {
      +		pname := e.parent.TypeName()
      +		s = make([]string, len(pname)+1)
      +		copy(s, pname)
      +	}
      +	s[len(s)-1] = name
      +	return s
      +}
      +
      +// DescName returns the variable name used for the generated descriptor.
      +func (e *ExtensionDescriptor) DescName() string {
      +	// The full type name.
      +	typeName := e.TypeName()
      +	// Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix.
      +	for i, s := range typeName {
      +		typeName[i] = CamelCase(s)
      +	}
      +	return "E_" + strings.Join(typeName, "_")
      +}
      +
      +// ImportedDescriptor describes a type that has been publicly imported from another file.
      +type ImportedDescriptor struct {
      +	common
      +	o Object
      +}
      +
      +func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() }
      +
      +// FileDescriptor describes an protocol buffer descriptor file (.proto).
      +// It includes slices of all the messages and enums defined within it.
      +// Those slices are constructed by WrapTypes.
      +type FileDescriptor struct {
      +	*descriptor.FileDescriptorProto
      +	desc []*Descriptor          // All the messages defined in this file.
      +	enum []*EnumDescriptor      // All the enums defined in this file.
      +	ext  []*ExtensionDescriptor // All the top-level extensions defined in this file.
      +	imp  []*ImportedDescriptor  // All types defined in files publicly imported by this file.
      +
      +	// Comments, stored as a map of path (comma-separated integers) to the comment.
      +	comments map[string]*descriptor.SourceCodeInfo_Location
      +
      +	// The full list of symbols that are exported,
      +	// as a map from the exported object to its symbols.
      +	// This is used for supporting public imports.
      +	exported map[Object][]symbol
      +
      +	index int // The index of this file in the list of files to generate code for
      +
      +	proto3 bool // whether to generate proto3 code for this file
      +}
      +
      +// PackageName is the package name we'll use in the generated code to refer to this file.
      +func (d *FileDescriptor) PackageName() string { return uniquePackageOf(d.FileDescriptorProto) }
      +
      +// goPackageOption interprets the file's go_package option.
      +// If there is no go_package, it returns ("", "", false).
      +// If there's a simple name, it returns ("", pkg, true).
      +// If the option implies an import path, it returns (impPath, pkg, true).
      +func (d *FileDescriptor) goPackageOption() (impPath, pkg string, ok bool) {
      +	pkg = d.GetOptions().GetGoPackage()
      +	if pkg == "" {
      +		return
      +	}
      +	ok = true
      +	// The presence of a slash implies there's an import path.
      +	slash := strings.LastIndex(pkg, "/")
      +	if slash < 0 {
      +		return
      +	}
      +	impPath, pkg = pkg, pkg[slash+1:]
      +	// A semicolon-delimited suffix overrides the package name.
      +	sc := strings.IndexByte(impPath, ';')
      +	if sc < 0 {
      +		return
      +	}
      +	impPath, pkg = impPath[:sc], impPath[sc+1:]
      +	return
      +}
      +
      +// goPackageName returns the Go package name to use in the
      +// generated Go file.  The result explicit reports whether the name
      +// came from an option go_package statement.  If explicit is false,
      +// the name was derived from the protocol buffer's package statement
      +// or the input file name.
      +func (d *FileDescriptor) goPackageName() (name string, explicit bool) {
      +	// Does the file have a "go_package" option?
      +	if _, pkg, ok := d.goPackageOption(); ok {
      +		return pkg, true
      +	}
      +
      +	// Does the file have a package clause?
      +	if pkg := d.GetPackage(); pkg != "" {
      +		return pkg, false
      +	}
      +	// Use the file base name.
      +	return baseName(d.GetName()), false
      +}
      +
      +// goFileName returns the output name for the generated Go file.
      +func (d *FileDescriptor) goFileName() string {
      +	name := *d.Name
      +	if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" {
      +		name = name[:len(name)-len(ext)]
      +	}
      +	name += ".pb.go"
      +
      +	// Does the file have a "go_package" option?
      +	// If it does, it may override the filename.
      +	if impPath, _, ok := d.goPackageOption(); ok && impPath != "" {
      +		// Replace the existing dirname with the declared import path.
      +		_, name = path.Split(name)
      +		name = path.Join(impPath, name)
      +		return name
      +	}
      +
      +	return name
      +}
      +
      +func (d *FileDescriptor) addExport(obj Object, sym symbol) {
      +	d.exported[obj] = append(d.exported[obj], sym)
      +}
      +
      +// symbol is an interface representing an exported Go symbol.
      +type symbol interface {
      +	// GenerateAlias should generate an appropriate alias
      +	// for the symbol from the named package.
      +	GenerateAlias(g *Generator, pkg string)
      +}
      +
      +type messageSymbol struct {
      +	sym                         string
      +	hasExtensions, isMessageSet bool
      +	hasOneof                    bool
      +	getters                     []getterSymbol
      +}
      +
      +type getterSymbol struct {
      +	name     string
      +	typ      string
      +	typeName string // canonical name in proto world; empty for proto.Message and similar
      +	genType  bool   // whether typ contains a generated type (message/group/enum)
      +}
      +
      +func (ms *messageSymbol) GenerateAlias(g *Generator, pkg string) {
      +	remoteSym := pkg + "." + ms.sym
      +
      +	g.P("type ", ms.sym, " ", remoteSym)
      +	g.P("func (m *", ms.sym, ") Reset() { (*", remoteSym, ")(m).Reset() }")
      +	g.P("func (m *", ms.sym, ") String() string { return (*", remoteSym, ")(m).String() }")
      +	g.P("func (*", ms.sym, ") ProtoMessage() {}")
      +	if ms.hasExtensions {
      +		g.P("func (*", ms.sym, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange ",
      +			"{ return (*", remoteSym, ")(nil).ExtensionRangeArray() }")
      +		g.P("func (m *", ms.sym, ") ExtensionMap() map[int32]", g.Pkg["proto"], ".Extension ",
      +			"{ return (*", remoteSym, ")(m).ExtensionMap() }")
      +		if ms.isMessageSet {
      +			g.P("func (m *", ms.sym, ") Marshal() ([]byte, error) ",
      +				"{ return (*", remoteSym, ")(m).Marshal() }")
      +			g.P("func (m *", ms.sym, ") Unmarshal(buf []byte) error ",
      +				"{ return (*", remoteSym, ")(m).Unmarshal(buf) }")
      +		}
      +	}
      +	if ms.hasOneof {
      +		// Oneofs and public imports do not mix well.
      +		// We can make them work okay for the binary format,
      +		// but they're going to break weirdly for text/JSON.
      +		enc := "_" + ms.sym + "_OneofMarshaler"
      +		dec := "_" + ms.sym + "_OneofUnmarshaler"
      +		size := "_" + ms.sym + "_OneofSizer"
      +		encSig := "(msg " + g.Pkg["proto"] + ".Message, b *" + g.Pkg["proto"] + ".Buffer) error"
      +		decSig := "(msg " + g.Pkg["proto"] + ".Message, tag, wire int, b *" + g.Pkg["proto"] + ".Buffer) (bool, error)"
      +		sizeSig := "(msg " + g.Pkg["proto"] + ".Message) int"
      +		g.P("func (m *", ms.sym, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", func", sizeSig, ", []interface{}) {")
      +		g.P("return ", enc, ", ", dec, ", ", size, ", nil")
      +		g.P("}")
      +
      +		g.P("func ", enc, encSig, " {")
      +		g.P("m := msg.(*", ms.sym, ")")
      +		g.P("m0 := (*", remoteSym, ")(m)")
      +		g.P("enc, _, _, _ := m0.XXX_OneofFuncs()")
      +		g.P("return enc(m0, b)")
      +		g.P("}")
      +
      +		g.P("func ", dec, decSig, " {")
      +		g.P("m := msg.(*", ms.sym, ")")
      +		g.P("m0 := (*", remoteSym, ")(m)")
      +		g.P("_, dec, _, _ := m0.XXX_OneofFuncs()")
      +		g.P("return dec(m0, tag, wire, b)")
      +		g.P("}")
      +
      +		g.P("func ", size, sizeSig, " {")
      +		g.P("m := msg.(*", ms.sym, ")")
      +		g.P("m0 := (*", remoteSym, ")(m)")
      +		g.P("_, _, size, _ := m0.XXX_OneofFuncs()")
      +		g.P("return size(m0)")
      +		g.P("}")
      +	}
      +	for _, get := range ms.getters {
      +
      +		if get.typeName != "" {
      +			g.RecordTypeUse(get.typeName)
      +		}
      +		typ := get.typ
      +		val := "(*" + remoteSym + ")(m)." + get.name + "()"
      +		if get.genType {
      +			// typ will be "*pkg.T" (message/group) or "pkg.T" (enum)
      +			// or "map[t]*pkg.T" (map to message/enum).
      +			// The first two of those might have a "[]" prefix if it is repeated.
      +			// Drop any package qualifier since we have hoisted the type into this package.
      +			rep := strings.HasPrefix(typ, "[]")
      +			if rep {
      +				typ = typ[2:]
      +			}
      +			isMap := strings.HasPrefix(typ, "map[")
      +			star := typ[0] == '*'
      +			if !isMap { // map types handled lower down
      +				typ = typ[strings.Index(typ, ".")+1:]
      +			}
      +			if star {
      +				typ = "*" + typ
      +			}
      +			if rep {
      +				// Go does not permit conversion between slice types where both
      +				// element types are named. That means we need to generate a bit
      +				// of code in this situation.
      +				// typ is the element type.
      +				// val is the expression to get the slice from the imported type.
      +
      +				ctyp := typ // conversion type expression; "Foo" or "(*Foo)"
      +				if star {
      +					ctyp = "(" + typ + ")"
      +				}
      +
      +				g.P("func (m *", ms.sym, ") ", get.name, "() []", typ, " {")
      +				g.In()
      +				g.P("o := ", val)
      +				g.P("if o == nil {")
      +				g.In()
      +				g.P("return nil")
      +				g.Out()
      +				g.P("}")
      +				g.P("s := make([]", typ, ", len(o))")
      +				g.P("for i, x := range o {")
      +				g.In()
      +				g.P("s[i] = ", ctyp, "(x)")
      +				g.Out()
      +				g.P("}")
      +				g.P("return s")
      +				g.Out()
      +				g.P("}")
      +				continue
      +			}
      +			if isMap {
      +				// Split map[keyTyp]valTyp.
      +				bra, ket := strings.Index(typ, "["), strings.Index(typ, "]")
      +				keyTyp, valTyp := typ[bra+1:ket], typ[ket+1:]
      +				// Drop any package qualifier.
      +				// Only the value type may be foreign.
      +				star := valTyp[0] == '*'
      +				valTyp = valTyp[strings.Index(valTyp, ".")+1:]
      +				if star {
      +					valTyp = "*" + valTyp
      +				}
      +
      +				typ := "map[" + keyTyp + "]" + valTyp
      +				g.P("func (m *", ms.sym, ") ", get.name, "() ", typ, " {")
      +				g.P("o := ", val)
      +				g.P("if o == nil { return nil }")
      +				g.P("s := make(", typ, ", len(o))")
      +				g.P("for k, v := range o {")
      +				g.P("s[k] = (", valTyp, ")(v)")
      +				g.P("}")
      +				g.P("return s")
      +				g.P("}")
      +				continue
      +			}
      +			// Convert imported type into the forwarding type.
      +			val = "(" + typ + ")(" + val + ")"
      +		}
      +
      +		g.P("func (m *", ms.sym, ") ", get.name, "() ", typ, " { return ", val, " }")
      +	}
      +
      +}
      +
      +type enumSymbol struct {
      +	name   string
      +	proto3 bool // Whether this came from a proto3 file.
      +}
      +
      +func (es enumSymbol) GenerateAlias(g *Generator, pkg string) {
      +	s := es.name
      +	g.P("type ", s, " ", pkg, ".", s)
      +	g.P("var ", s, "_name = ", pkg, ".", s, "_name")
      +	g.P("var ", s, "_value = ", pkg, ".", s, "_value")
      +	g.P("func (x ", s, ") String() string { return (", pkg, ".", s, ")(x).String() }")
      +	if !es.proto3 {
      +		g.P("func (x ", s, ") Enum() *", s, "{ return (*", s, ")((", pkg, ".", s, ")(x).Enum()) }")
      +		g.P("func (x *", s, ") UnmarshalJSON(data []byte) error { return (*", pkg, ".", s, ")(x).UnmarshalJSON(data) }")
      +	}
      +}
      +
      +type constOrVarSymbol struct {
      +	sym  string
      +	typ  string // either "const" or "var"
      +	cast string // if non-empty, a type cast is required (used for enums)
      +}
      +
      +func (cs constOrVarSymbol) GenerateAlias(g *Generator, pkg string) {
      +	v := pkg + "." + cs.sym
      +	if cs.cast != "" {
      +		v = cs.cast + "(" + v + ")"
      +	}
      +	g.P(cs.typ, " ", cs.sym, " = ", v)
      +}
      +
      +// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects.
      +type Object interface {
      +	PackageName() string // The name we use in our output (a_b_c), possibly renamed for uniqueness.
      +	TypeName() []string
      +	File() *descriptor.FileDescriptorProto
      +}
      +
      +// Each package name we generate must be unique. The package we're generating
      +// gets its own name but every other package must have a unique name that does
      +// not conflict in the code we generate.  These names are chosen globally (although
      +// they don't have to be, it simplifies things to do them globally).
      +func uniquePackageOf(fd *descriptor.FileDescriptorProto) string {
      +	s, ok := uniquePackageName[fd]
      +	if !ok {
      +		log.Fatal("internal error: no package name defined for " + fd.GetName())
      +	}
      +	return s
      +}
      +
      +// Generator is the type whose methods generate the output, stored in the associated response structure.
      +type Generator struct {
      +	*bytes.Buffer
      +
      +	Request  *plugin.CodeGeneratorRequest  // The input.
      +	Response *plugin.CodeGeneratorResponse // The output.
      +
      +	Param             map[string]string // Command-line parameters.
      +	PackageImportPath string            // Go import path of the package we're generating code for
      +	ImportPrefix      string            // String to prefix to imported package file names.
      +	ImportMap         map[string]string // Mapping from .proto file name to import path
      +
      +	Pkg map[string]string // The names under which we import support packages
      +
      +	packageName      string                     // What we're calling ourselves.
      +	allFiles         []*FileDescriptor          // All files in the tree
      +	allFilesByName   map[string]*FileDescriptor // All files by filename.
      +	genFiles         []*FileDescriptor          // Those files we will generate output for.
      +	file             *FileDescriptor            // The file we are compiling now.
      +	usedPackages     map[string]bool            // Names of packages used in current file.
      +	typeNameToObject map[string]Object          // Key is a fully-qualified name in input syntax.
      +	init             []string                   // Lines to emit in the init function.
      +	indent           string
      +	writeOutput      bool
      +}
      +
      +// New creates a new generator and allocates the request and response protobufs.
      +func New() *Generator {
      +	g := new(Generator)
      +	g.Buffer = new(bytes.Buffer)
      +	g.Request = new(plugin.CodeGeneratorRequest)
      +	g.Response = new(plugin.CodeGeneratorResponse)
      +	return g
      +}
      +
      +// Error reports a problem, including an error, and exits the program.
      +func (g *Generator) Error(err error, msgs ...string) {
      +	s := strings.Join(msgs, " ") + ":" + err.Error()
      +	log.Print("protoc-gen-go: error:", s)
      +	os.Exit(1)
      +}
      +
      +// Fail reports a problem and exits the program.
      +func (g *Generator) Fail(msgs ...string) {
      +	s := strings.Join(msgs, " ")
      +	log.Print("protoc-gen-go: error:", s)
      +	os.Exit(1)
      +}
      +
      +// CommandLineParameters breaks the comma-separated list of key=value pairs
      +// in the parameter (a member of the request protobuf) into a key/value map.
      +// It then sets file name mappings defined by those entries.
      +func (g *Generator) CommandLineParameters(parameter string) {
      +	g.Param = make(map[string]string)
      +	for _, p := range strings.Split(parameter, ",") {
      +		if i := strings.Index(p, "="); i < 0 {
      +			g.Param[p] = ""
      +		} else {
      +			g.Param[p[0:i]] = p[i+1:]
      +		}
      +	}
      +
      +	g.ImportMap = make(map[string]string)
      +	pluginList := "none" // Default list of plugin names to enable (empty means all).
      +	for k, v := range g.Param {
      +		switch k {
      +		case "import_prefix":
      +			g.ImportPrefix = v
      +		case "import_path":
      +			g.PackageImportPath = v
      +		case "plugins":
      +			pluginList = v
      +		default:
      +			if len(k) > 0 && k[0] == 'M' {
      +				g.ImportMap[k[1:]] = v
      +			}
      +		}
      +	}
      +
      +	if pluginList != "" {
      +		// Amend the set of plugins.
      +		enabled := make(map[string]bool)
      +		for _, name := range strings.Split(pluginList, "+") {
      +			enabled[name] = true
      +		}
      +		var nplugins []Plugin
      +		for _, p := range plugins {
      +			if enabled[p.Name()] {
      +				nplugins = append(nplugins, p)
      +			}
      +		}
      +		plugins = nplugins
      +	}
      +}
      +
      +// DefaultPackageName returns the package name printed for the object.
      +// If its file is in a different package, it returns the package name we're using for this file, plus ".".
      +// Otherwise it returns the empty string.
      +func (g *Generator) DefaultPackageName(obj Object) string {
      +	pkg := obj.PackageName()
      +	if pkg == g.packageName {
      +		return ""
      +	}
      +	return pkg + "."
      +}
      +
      +// For each input file, the unique package name to use, underscored.
      +var uniquePackageName = make(map[*descriptor.FileDescriptorProto]string)
      +
      +// Package names already registered.  Key is the name from the .proto file;
      +// value is the name that appears in the generated code.
      +var pkgNamesInUse = make(map[string]bool)
      +
      +// Create and remember a guaranteed unique package name for this file descriptor.
      +// Pkg is the candidate name.  If f is nil, it's a builtin package like "proto" and
      +// has no file descriptor.
      +func RegisterUniquePackageName(pkg string, f *FileDescriptor) string {
      +	// Convert dots to underscores before finding a unique alias.
      +	pkg = strings.Map(badToUnderscore, pkg)
      +
      +	for i, orig := 1, pkg; pkgNamesInUse[pkg]; i++ {
      +		// It's a duplicate; must rename.
      +		pkg = orig + strconv.Itoa(i)
      +	}
      +	// Install it.
      +	pkgNamesInUse[pkg] = true
      +	if f != nil {
      +		uniquePackageName[f.FileDescriptorProto] = pkg
      +	}
      +	return pkg
      +}
      +
      +var isGoKeyword = map[string]bool{
      +	"break":       true,
      +	"case":        true,
      +	"chan":        true,
      +	"const":       true,
      +	"continue":    true,
      +	"default":     true,
      +	"else":        true,
      +	"defer":       true,
      +	"fallthrough": true,
      +	"for":         true,
      +	"func":        true,
      +	"go":          true,
      +	"goto":        true,
      +	"if":          true,
      +	"import":      true,
      +	"interface":   true,
      +	"map":         true,
      +	"package":     true,
      +	"range":       true,
      +	"return":      true,
      +	"select":      true,
      +	"struct":      true,
      +	"switch":      true,
      +	"type":        true,
      +	"var":         true,
      +}
      +
      +// defaultGoPackage returns the package name to use,
      +// derived from the import path of the package we're building code for.
      +func (g *Generator) defaultGoPackage() string {
      +	p := g.PackageImportPath
      +	if i := strings.LastIndex(p, "/"); i >= 0 {
      +		p = p[i+1:]
      +	}
      +	if p == "" {
      +		return ""
      +	}
      +
      +	p = strings.Map(badToUnderscore, p)
      +	// Identifier must not be keyword: insert _.
      +	if isGoKeyword[p] {
      +		p = "_" + p
      +	}
      +	// Identifier must not begin with digit: insert _.
      +	if r, _ := utf8.DecodeRuneInString(p); unicode.IsDigit(r) {
      +		p = "_" + p
      +	}
      +	return p
      +}
      +
      +// SetPackageNames sets the package name for this run.
      +// The package name must agree across all files being generated.
      +// It also defines unique package names for all imported files.
      +func (g *Generator) SetPackageNames() {
      +	// Register the name for this package.  It will be the first name
      +	// registered so is guaranteed to be unmodified.
      +	pkg, explicit := g.genFiles[0].goPackageName()
      +
      +	// Check all files for an explicit go_package option.
      +	for _, f := range g.genFiles {
      +		thisPkg, thisExplicit := f.goPackageName()
      +		if thisExplicit {
      +			if !explicit {
      +				// Let this file's go_package option serve for all input files.
      +				pkg, explicit = thisPkg, true
      +			} else if thisPkg != pkg {
      +				g.Fail("inconsistent package names:", thisPkg, pkg)
      +			}
      +		}
      +	}
      +
      +	// If we don't have an explicit go_package option but we have an
      +	// import path, use that.
      +	if !explicit {
      +		p := g.defaultGoPackage()
      +		if p != "" {
      +			pkg, explicit = p, true
      +		}
      +	}
      +
      +	// If there was no go_package and no import path to use,
      +	// double-check that all the inputs have the same implicit
      +	// Go package name.
      +	if !explicit {
      +		for _, f := range g.genFiles {
      +			thisPkg, _ := f.goPackageName()
      +			if thisPkg != pkg {
      +				g.Fail("inconsistent package names:", thisPkg, pkg)
      +			}
      +		}
      +	}
      +
      +	g.packageName = RegisterUniquePackageName(pkg, g.genFiles[0])
      +
      +	// Register the support package names. They might collide with the
      +	// name of a package we import.
      +	g.Pkg = map[string]string{
      +		"fmt":   RegisterUniquePackageName("fmt", nil),
      +		"math":  RegisterUniquePackageName("math", nil),
      +		"proto": RegisterUniquePackageName("proto", nil),
      +	}
      +
      +AllFiles:
      +	for _, f := range g.allFiles {
      +		for _, genf := range g.genFiles {
      +			if f == genf {
      +				// In this package already.
      +				uniquePackageName[f.FileDescriptorProto] = g.packageName
      +				continue AllFiles
      +			}
      +		}
      +		// The file is a dependency, so we want to ignore its go_package option
      +		// because that is only relevant for its specific generated output.
      +		pkg := f.GetPackage()
      +		if pkg == "" {
      +			pkg = baseName(*f.Name)
      +		}
      +		RegisterUniquePackageName(pkg, f)
      +	}
      +}
      +
      +// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos
      +// and FileDescriptorProtos into file-referenced objects within the Generator.
      +// It also creates the list of files to generate and so should be called before GenerateAllFiles.
      +func (g *Generator) WrapTypes() {
      +	g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile))
      +	g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles))
      +	for _, f := range g.Request.ProtoFile {
      +		// We must wrap the descriptors before we wrap the enums
      +		descs := wrapDescriptors(f)
      +		g.buildNestedDescriptors(descs)
      +		enums := wrapEnumDescriptors(f, descs)
      +		g.buildNestedEnums(descs, enums)
      +		exts := wrapExtensions(f)
      +		fd := &FileDescriptor{
      +			FileDescriptorProto: f,
      +			desc:                descs,
      +			enum:                enums,
      +			ext:                 exts,
      +			exported:            make(map[Object][]symbol),
      +			proto3:              fileIsProto3(f),
      +		}
      +		extractComments(fd)
      +		g.allFiles = append(g.allFiles, fd)
      +		g.allFilesByName[f.GetName()] = fd
      +	}
      +	for _, fd := range g.allFiles {
      +		fd.imp = wrapImported(fd.FileDescriptorProto, g)
      +	}
      +
      +	g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate))
      +	for _, fileName := range g.Request.FileToGenerate {
      +		fd := g.allFilesByName[fileName]
      +		if fd == nil {
      +			g.Fail("could not find file named", fileName)
      +		}
      +		fd.index = len(g.genFiles)
      +		g.genFiles = append(g.genFiles, fd)
      +	}
      +}
      +
      +// Scan the descriptors in this file.  For each one, build the slice of nested descriptors
      +func (g *Generator) buildNestedDescriptors(descs []*Descriptor) {
      +	for _, desc := range descs {
      +		if len(desc.NestedType) != 0 {
      +			for _, nest := range descs {
      +				if nest.parent == desc {
      +					desc.nested = append(desc.nested, nest)
      +				}
      +			}
      +			if len(desc.nested) != len(desc.NestedType) {
      +				g.Fail("internal error: nesting failure for", desc.GetName())
      +			}
      +		}
      +	}
      +}
      +
      +func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) {
      +	for _, desc := range descs {
      +		if len(desc.EnumType) != 0 {
      +			for _, enum := range enums {
      +				if enum.parent == desc {
      +					desc.enums = append(desc.enums, enum)
      +				}
      +			}
      +			if len(desc.enums) != len(desc.EnumType) {
      +				g.Fail("internal error: enum nesting failure for", desc.GetName())
      +			}
      +		}
      +	}
      +}
      +
      +// Construct the Descriptor
      +func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) *Descriptor {
      +	d := &Descriptor{
      +		common:          common{file},
      +		DescriptorProto: desc,
      +		parent:          parent,
      +		index:           index,
      +	}
      +	if parent == nil {
      +		d.path = fmt.Sprintf("%d,%d", messagePath, index)
      +	} else {
      +		d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index)
      +	}
      +
      +	// The only way to distinguish a group from a message is whether
      +	// the containing message has a TYPE_GROUP field that matches.
      +	if parent != nil {
      +		parts := d.TypeName()
      +		if file.Package != nil {
      +			parts = append([]string{*file.Package}, parts...)
      +		}
      +		exp := "." + strings.Join(parts, ".")
      +		for _, field := range parent.Field {
      +			if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp {
      +				d.group = true
      +				break
      +			}
      +		}
      +	}
      +
      +	for _, field := range desc.Extension {
      +		d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d})
      +	}
      +
      +	return d
      +}
      +
      +// Return a slice of all the Descriptors defined within this file
      +func wrapDescriptors(file *descriptor.FileDescriptorProto) []*Descriptor {
      +	sl := make([]*Descriptor, 0, len(file.MessageType)+10)
      +	for i, desc := range file.MessageType {
      +		sl = wrapThisDescriptor(sl, desc, nil, file, i)
      +	}
      +	return sl
      +}
      +
      +// Wrap this Descriptor, recursively
      +func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) []*Descriptor {
      +	sl = append(sl, newDescriptor(desc, parent, file, index))
      +	me := sl[len(sl)-1]
      +	for i, nested := range desc.NestedType {
      +		sl = wrapThisDescriptor(sl, nested, me, file, i)
      +	}
      +	return sl
      +}
      +
      +// Construct the EnumDescriptor
      +func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) *EnumDescriptor {
      +	ed := &EnumDescriptor{
      +		common:              common{file},
      +		EnumDescriptorProto: desc,
      +		parent:              parent,
      +		index:               index,
      +	}
      +	if parent == nil {
      +		ed.path = fmt.Sprintf("%d,%d", enumPath, index)
      +	} else {
      +		ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index)
      +	}
      +	return ed
      +}
      +
      +// Return a slice of all the EnumDescriptors defined within this file
      +func wrapEnumDescriptors(file *descriptor.FileDescriptorProto, descs []*Descriptor) []*EnumDescriptor {
      +	sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10)
      +	// Top-level enums.
      +	for i, enum := range file.EnumType {
      +		sl = append(sl, newEnumDescriptor(enum, nil, file, i))
      +	}
      +	// Enums within messages. Enums within embedded messages appear in the outer-most message.
      +	for _, nested := range descs {
      +		for i, enum := range nested.EnumType {
      +			sl = append(sl, newEnumDescriptor(enum, nested, file, i))
      +		}
      +	}
      +	return sl
      +}
      +
      +// Return a slice of all the top-level ExtensionDescriptors defined within this file.
      +func wrapExtensions(file *descriptor.FileDescriptorProto) []*ExtensionDescriptor {
      +	var sl []*ExtensionDescriptor
      +	for _, field := range file.Extension {
      +		sl = append(sl, &ExtensionDescriptor{common{file}, field, nil})
      +	}
      +	return sl
      +}
      +
      +// Return a slice of all the types that are publicly imported into this file.
      +func wrapImported(file *descriptor.FileDescriptorProto, g *Generator) (sl []*ImportedDescriptor) {
      +	for _, index := range file.PublicDependency {
      +		df := g.fileByName(file.Dependency[index])
      +		for _, d := range df.desc {
      +			if d.GetOptions().GetMapEntry() {
      +				continue
      +			}
      +			sl = append(sl, &ImportedDescriptor{common{file}, d})
      +		}
      +		for _, e := range df.enum {
      +			sl = append(sl, &ImportedDescriptor{common{file}, e})
      +		}
      +		for _, ext := range df.ext {
      +			sl = append(sl, &ImportedDescriptor{common{file}, ext})
      +		}
      +	}
      +	return
      +}
      +
      +func extractComments(file *FileDescriptor) {
      +	file.comments = make(map[string]*descriptor.SourceCodeInfo_Location)
      +	for _, loc := range file.GetSourceCodeInfo().GetLocation() {
      +		if loc.LeadingComments == nil {
      +			continue
      +		}
      +		var p []string
      +		for _, n := range loc.Path {
      +			p = append(p, strconv.Itoa(int(n)))
      +		}
      +		file.comments[strings.Join(p, ",")] = loc
      +	}
      +}
      +
      +// BuildTypeNameMap builds the map from fully qualified type names to objects.
      +// The key names for the map come from the input data, which puts a period at the beginning.
      +// It should be called after SetPackageNames and before GenerateAllFiles.
      +func (g *Generator) BuildTypeNameMap() {
      +	g.typeNameToObject = make(map[string]Object)
      +	for _, f := range g.allFiles {
      +		// The names in this loop are defined by the proto world, not us, so the
      +		// package name may be empty.  If so, the dotted package name of X will
      +		// be ".X"; otherwise it will be ".pkg.X".
      +		dottedPkg := "." + f.GetPackage()
      +		if dottedPkg != "." {
      +			dottedPkg += "."
      +		}
      +		for _, enum := range f.enum {
      +			name := dottedPkg + dottedSlice(enum.TypeName())
      +			g.typeNameToObject[name] = enum
      +		}
      +		for _, desc := range f.desc {
      +			name := dottedPkg + dottedSlice(desc.TypeName())
      +			g.typeNameToObject[name] = desc
      +		}
      +	}
      +}
      +
      +// ObjectNamed, given a fully-qualified input type name as it appears in the input data,
      +// returns the descriptor for the message or enum with that name.
      +func (g *Generator) ObjectNamed(typeName string) Object {
      +	o, ok := g.typeNameToObject[typeName]
      +	if !ok {
      +		g.Fail("can't find object with type", typeName)
      +	}
      +
      +	// If the file of this object isn't a direct dependency of the current file,
      +	// or in the current file, then this object has been publicly imported into
      +	// a dependency of the current file.
      +	// We should return the ImportedDescriptor object for it instead.
      +	direct := *o.File().Name == *g.file.Name
      +	if !direct {
      +		for _, dep := range g.file.Dependency {
      +			if *g.fileByName(dep).Name == *o.File().Name {
      +				direct = true
      +				break
      +			}
      +		}
      +	}
      +	if !direct {
      +		found := false
      +	Loop:
      +		for _, dep := range g.file.Dependency {
      +			df := g.fileByName(*g.fileByName(dep).Name)
      +			for _, td := range df.imp {
      +				if td.o == o {
      +					// Found it!
      +					o = td
      +					found = true
      +					break Loop
      +				}
      +			}
      +		}
      +		if !found {
      +			log.Printf("protoc-gen-go: WARNING: failed finding publicly imported dependency for %v, used in %v", typeName, *g.file.Name)
      +		}
      +	}
      +
      +	return o
      +}
      +
      +// P prints the arguments to the generated output.  It handles strings and int32s, plus
      +// handling indirections because they may be *string, etc.
      +func (g *Generator) P(str ...interface{}) {
      +	if !g.writeOutput {
      +		return
      +	}
      +	g.WriteString(g.indent)
      +	for _, v := range str {
      +		switch s := v.(type) {
      +		case string:
      +			g.WriteString(s)
      +		case *string:
      +			g.WriteString(*s)
      +		case bool:
      +			fmt.Fprintf(g, "%t", s)
      +		case *bool:
      +			fmt.Fprintf(g, "%t", *s)
      +		case int:
      +			fmt.Fprintf(g, "%d", s)
      +		case *int32:
      +			fmt.Fprintf(g, "%d", *s)
      +		case *int64:
      +			fmt.Fprintf(g, "%d", *s)
      +		case float64:
      +			fmt.Fprintf(g, "%g", s)
      +		case *float64:
      +			fmt.Fprintf(g, "%g", *s)
      +		default:
      +			g.Fail(fmt.Sprintf("unknown type in printer: %T", v))
      +		}
      +	}
      +	g.WriteByte('\n')
      +}
      +
      +// addInitf stores the given statement to be printed inside the file's init function.
      +// The statement is given as a format specifier and arguments.
      +func (g *Generator) addInitf(stmt string, a ...interface{}) {
      +	g.init = append(g.init, fmt.Sprintf(stmt, a...))
      +}
      +
      +// In Indents the output one tab stop.
      +func (g *Generator) In() { g.indent += "\t" }
      +
      +// Out unindents the output one tab stop.
      +func (g *Generator) Out() {
      +	if len(g.indent) > 0 {
      +		g.indent = g.indent[1:]
      +	}
      +}
      +
      +// GenerateAllFiles generates the output for all the files we're outputting.
      +func (g *Generator) GenerateAllFiles() {
      +	// Initialize the plugins
      +	for _, p := range plugins {
      +		p.Init(g)
      +	}
      +	// Generate the output. The generator runs for every file, even the files
      +	// that we don't generate output for, so that we can collate the full list
      +	// of exported symbols to support public imports.
      +	genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles))
      +	for _, file := range g.genFiles {
      +		genFileMap[file] = true
      +	}
      +	for _, file := range g.allFiles {
      +		g.Reset()
      +		g.writeOutput = genFileMap[file]
      +		g.generate(file)
      +		if !g.writeOutput {
      +			continue
      +		}
      +		g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{
      +			Name:    proto.String(file.goFileName()),
      +			Content: proto.String(g.String()),
      +		})
      +	}
      +}
      +
      +// Run all the plugins associated with the file.
      +func (g *Generator) runPlugins(file *FileDescriptor) {
      +	for _, p := range plugins {
      +		p.Generate(file)
      +	}
      +}
      +
      +// FileOf return the FileDescriptor for this FileDescriptorProto.
      +func (g *Generator) FileOf(fd *descriptor.FileDescriptorProto) *FileDescriptor {
      +	for _, file := range g.allFiles {
      +		if file.FileDescriptorProto == fd {
      +			return file
      +		}
      +	}
      +	g.Fail("could not find file in table:", fd.GetName())
      +	return nil
      +}
      +
      +// Fill the response protocol buffer with the generated output for all the files we're
      +// supposed to generate.
      +func (g *Generator) generate(file *FileDescriptor) {
      +	g.file = g.FileOf(file.FileDescriptorProto)
      +	g.usedPackages = make(map[string]bool)
      +
      +	if g.file.index == 0 {
      +		// For one file in the package, assert version compatibility.
      +		g.P("// This is a compile-time assertion to ensure that this generated file")
      +		g.P("// is compatible with the proto package it is being compiled against.")
      +		g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion)
      +		g.P()
      +	}
      +
      +	for _, td := range g.file.imp {
      +		g.generateImported(td)
      +	}
      +	for _, enum := range g.file.enum {
      +		g.generateEnum(enum)
      +	}
      +	for _, desc := range g.file.desc {
      +		// Don't generate virtual messages for maps.
      +		if desc.GetOptions().GetMapEntry() {
      +			continue
      +		}
      +		g.generateMessage(desc)
      +	}
      +	for _, ext := range g.file.ext {
      +		g.generateExtension(ext)
      +	}
      +	g.generateInitFunction()
      +
      +	// Run the plugins before the imports so we know which imports are necessary.
      +	g.runPlugins(file)
      +
      +	g.generateFileDescriptor(file)
      +
      +	// Generate header and imports last, though they appear first in the output.
      +	rem := g.Buffer
      +	g.Buffer = new(bytes.Buffer)
      +	g.generateHeader()
      +	g.generateImports()
      +	if !g.writeOutput {
      +		return
      +	}
      +	g.Write(rem.Bytes())
      +
      +	// Reformat generated code.
      +	fset := token.NewFileSet()
      +	raw := g.Bytes()
      +	ast, err := parser.ParseFile(fset, "", g, parser.ParseComments)
      +	if err != nil {
      +		// Print out the bad code with line numbers.
      +		// This should never happen in practice, but it can while changing generated code,
      +		// so consider this a debugging aid.
      +		var src bytes.Buffer
      +		s := bufio.NewScanner(bytes.NewReader(raw))
      +		for line := 1; s.Scan(); line++ {
      +			fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes())
      +		}
      +		g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String())
      +	}
      +	g.Reset()
      +	err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, ast)
      +	if err != nil {
      +		g.Fail("generated Go source code could not be reformatted:", err.Error())
      +	}
      +}
      +
      +// Generate the header, including package definition
      +func (g *Generator) generateHeader() {
      +	g.P("// Code generated by protoc-gen-go.")
      +	g.P("// source: ", g.file.Name)
      +	g.P("// DO NOT EDIT!")
      +	g.P()
      +
      +	name := g.file.PackageName()
      +
      +	if g.file.index == 0 {
      +		// Generate package docs for the first file in the package.
      +		g.P("/*")
      +		g.P("Package ", name, " is a generated protocol buffer package.")
      +		g.P()
      +		if loc, ok := g.file.comments[strconv.Itoa(packagePath)]; ok {
      +			// not using g.PrintComments because this is a /* */ comment block.
      +			text := strings.TrimSuffix(loc.GetLeadingComments(), "\n")
      +			for _, line := range strings.Split(text, "\n") {
      +				line = strings.TrimPrefix(line, " ")
      +				// ensure we don't escape from the block comment
      +				line = strings.Replace(line, "*/", "* /", -1)
      +				g.P(line)
      +			}
      +			g.P()
      +		}
      +		var topMsgs []string
      +		g.P("It is generated from these files:")
      +		for _, f := range g.genFiles {
      +			g.P("\t", f.Name)
      +			for _, msg := range f.desc {
      +				if msg.parent != nil {
      +					continue
      +				}
      +				topMsgs = append(topMsgs, CamelCaseSlice(msg.TypeName()))
      +			}
      +		}
      +		g.P()
      +		g.P("It has these top-level messages:")
      +		for _, msg := range topMsgs {
      +			g.P("\t", msg)
      +		}
      +		g.P("*/")
      +	}
      +
      +	g.P("package ", name)
      +	g.P()
      +}
      +
      +// PrintComments prints any comments from the source .proto file.
      +// The path is a comma-separated list of integers.
      +// It returns an indication of whether any comments were printed.
      +// See descriptor.proto for its format.
      +func (g *Generator) PrintComments(path string) bool {
      +	if !g.writeOutput {
      +		return false
      +	}
      +	if loc, ok := g.file.comments[path]; ok {
      +		text := strings.TrimSuffix(loc.GetLeadingComments(), "\n")
      +		for _, line := range strings.Split(text, "\n") {
      +			g.P("// ", strings.TrimPrefix(line, " "))
      +		}
      +		return true
      +	}
      +	return false
      +}
      +
      +func (g *Generator) fileByName(filename string) *FileDescriptor {
      +	return g.allFilesByName[filename]
      +}
      +
      +// weak returns whether the ith import of the current file is a weak import.
      +func (g *Generator) weak(i int32) bool {
      +	for _, j := range g.file.WeakDependency {
      +		if j == i {
      +			return true
      +		}
      +	}
      +	return false
      +}
      +
      +// Generate the imports
      +func (g *Generator) generateImports() {
      +	// We almost always need a proto import.  Rather than computing when we
      +	// do, which is tricky when there's a plugin, just import it and
      +	// reference it later. The same argument applies to the fmt and math packages.
      +	g.P("import " + g.Pkg["proto"] + " " + strconv.Quote(g.ImportPrefix+"github.com/golang/protobuf/proto"))
      +	g.P("import " + g.Pkg["fmt"] + ` "fmt"`)
      +	g.P("import " + g.Pkg["math"] + ` "math"`)
      +	for i, s := range g.file.Dependency {
      +		fd := g.fileByName(s)
      +		// Do not import our own package.
      +		if fd.PackageName() == g.packageName {
      +			continue
      +		}
      +		filename := fd.goFileName()
      +		// By default, import path is the dirname of the Go filename.
      +		importPath := path.Dir(filename)
      +		if substitution, ok := g.ImportMap[s]; ok {
      +			importPath = substitution
      +		}
      +		importPath = g.ImportPrefix + importPath
      +		// Skip weak imports.
      +		if g.weak(int32(i)) {
      +			g.P("// skipping weak import ", fd.PackageName(), " ", strconv.Quote(importPath))
      +			continue
      +		}
      +		// We need to import all the dependencies, even if we don't reference them,
      +		// because other code and tools depend on having the full transitive closure
      +		// of protocol buffer types in the binary.
      +		pname := fd.PackageName()
      +		if _, ok := g.usedPackages[pname]; !ok {
      +			pname = "_"
      +		}
      +		g.P("import ", pname, " ", strconv.Quote(importPath))
      +	}
      +	g.P()
      +	// TODO: may need to worry about uniqueness across plugins
      +	for _, p := range plugins {
      +		p.GenerateImports(g.file)
      +		g.P()
      +	}
      +	g.P("// Reference imports to suppress errors if they are not otherwise used.")
      +	g.P("var _ = ", g.Pkg["proto"], ".Marshal")
      +	g.P("var _ = ", g.Pkg["fmt"], ".Errorf")
      +	g.P("var _ = ", g.Pkg["math"], ".Inf")
      +	g.P()
      +}
      +
      +func (g *Generator) generateImported(id *ImportedDescriptor) {
      +	// Don't generate public import symbols for files that we are generating
      +	// code for, since those symbols will already be in this package.
      +	// We can't simply avoid creating the ImportedDescriptor objects,
      +	// because g.genFiles isn't populated at that stage.
      +	tn := id.TypeName()
      +	sn := tn[len(tn)-1]
      +	df := g.FileOf(id.o.File())
      +	filename := *df.Name
      +	for _, fd := range g.genFiles {
      +		if *fd.Name == filename {
      +			g.P("// Ignoring public import of ", sn, " from ", filename)
      +			g.P()
      +			return
      +		}
      +	}
      +	g.P("// ", sn, " from public import ", filename)
      +	g.usedPackages[df.PackageName()] = true
      +
      +	for _, sym := range df.exported[id.o] {
      +		sym.GenerateAlias(g, df.PackageName())
      +	}
      +
      +	g.P()
      +}
      +
      +// Generate the enum definitions for this EnumDescriptor.
      +func (g *Generator) generateEnum(enum *EnumDescriptor) {
      +	// The full type name
      +	typeName := enum.TypeName()
      +	// The full type name, CamelCased.
      +	ccTypeName := CamelCaseSlice(typeName)
      +	ccPrefix := enum.prefix()
      +
      +	g.PrintComments(enum.path)
      +	g.P("type ", ccTypeName, " int32")
      +	g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()})
      +	g.P("const (")
      +	g.In()
      +	for i, e := range enum.Value {
      +		g.PrintComments(fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i))
      +
      +		name := ccPrefix + *e.Name
      +		g.P(name, " ", ccTypeName, " = ", e.Number)
      +		g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName})
      +	}
      +	g.Out()
      +	g.P(")")
      +	g.P("var ", ccTypeName, "_name = map[int32]string{")
      +	g.In()
      +	generated := make(map[int32]bool) // avoid duplicate values
      +	for _, e := range enum.Value {
      +		duplicate := ""
      +		if _, present := generated[*e.Number]; present {
      +			duplicate = "// Duplicate value: "
      +		}
      +		g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",")
      +		generated[*e.Number] = true
      +	}
      +	g.Out()
      +	g.P("}")
      +	g.P("var ", ccTypeName, "_value = map[string]int32{")
      +	g.In()
      +	for _, e := range enum.Value {
      +		g.P(strconv.Quote(*e.Name), ": ", e.Number, ",")
      +	}
      +	g.Out()
      +	g.P("}")
      +
      +	if !enum.proto3() {
      +		g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {")
      +		g.In()
      +		g.P("p := new(", ccTypeName, ")")
      +		g.P("*p = x")
      +		g.P("return p")
      +		g.Out()
      +		g.P("}")
      +	}
      +
      +	g.P("func (x ", ccTypeName, ") String() string {")
      +	g.In()
      +	g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))")
      +	g.Out()
      +	g.P("}")
      +
      +	if !enum.proto3() {
      +		g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {")
      +		g.In()
      +		g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`)
      +		g.P("if err != nil {")
      +		g.In()
      +		g.P("return err")
      +		g.Out()
      +		g.P("}")
      +		g.P("*x = ", ccTypeName, "(value)")
      +		g.P("return nil")
      +		g.Out()
      +		g.P("}")
      +	}
      +
      +	var indexes []string
      +	for m := enum.parent; m != nil; m = m.parent {
      +		// XXX: skip groups?
      +		indexes = append([]string{strconv.Itoa(m.index)}, indexes...)
      +	}
      +	indexes = append(indexes, strconv.Itoa(enum.index))
      +	g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) { return fileDescriptor", g.file.index, ", []int{", strings.Join(indexes, ", "), "} }")
      +	if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" {
      +		g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`)
      +	}
      +
      +	g.P()
      +}
      +
      +// The tag is a string like "varint,2,opt,name=fieldname,def=7" that
      +// identifies details of the field for the protocol buffer marshaling and unmarshaling
      +// code.  The fields are:
      +//	wire encoding
      +//	protocol tag number
      +//	opt,req,rep for optional, required, or repeated
      +//	packed whether the encoding is "packed" (optional; repeated primitives only)
      +//	name= the original declared name
      +//	enum= the name of the enum type if it is an enum-typed field.
      +//	proto3 if this field is in a proto3 message
      +//	def= string representation of the default value, if any.
      +// The default value must be in a representation that can be used at run-time
      +// to generate the default value. Thus bools become 0 and 1, for instance.
      +func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string {
      +	optrepreq := ""
      +	switch {
      +	case isOptional(field):
      +		optrepreq = "opt"
      +	case isRequired(field):
      +		optrepreq = "req"
      +	case isRepeated(field):
      +		optrepreq = "rep"
      +	}
      +	var defaultValue string
      +	if dv := field.DefaultValue; dv != nil { // set means an explicit default
      +		defaultValue = *dv
      +		// Some types need tweaking.
      +		switch *field.Type {
      +		case descriptor.FieldDescriptorProto_TYPE_BOOL:
      +			if defaultValue == "true" {
      +				defaultValue = "1"
      +			} else {
      +				defaultValue = "0"
      +			}
      +		case descriptor.FieldDescriptorProto_TYPE_STRING,
      +			descriptor.FieldDescriptorProto_TYPE_BYTES:
      +			// Nothing to do. Quoting is done for the whole tag.
      +		case descriptor.FieldDescriptorProto_TYPE_ENUM:
      +			// For enums we need to provide the integer constant.
      +			obj := g.ObjectNamed(field.GetTypeName())
      +			if id, ok := obj.(*ImportedDescriptor); ok {
      +				// It is an enum that was publicly imported.
      +				// We need the underlying type.
      +				obj = id.o
      +			}
      +			enum, ok := obj.(*EnumDescriptor)
      +			if !ok {
      +				log.Printf("obj is a %T", obj)
      +				if id, ok := obj.(*ImportedDescriptor); ok {
      +					log.Printf("id.o is a %T", id.o)
      +				}
      +				g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName()))
      +			}
      +			defaultValue = enum.integerValueAsString(defaultValue)
      +		}
      +		defaultValue = ",def=" + defaultValue
      +	}
      +	enum := ""
      +	if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM {
      +		// We avoid using obj.PackageName(), because we want to use the
      +		// original (proto-world) package name.
      +		obj := g.ObjectNamed(field.GetTypeName())
      +		if id, ok := obj.(*ImportedDescriptor); ok {
      +			obj = id.o
      +		}
      +		enum = ",enum="
      +		if pkg := obj.File().GetPackage(); pkg != "" {
      +			enum += pkg + "."
      +		}
      +		enum += CamelCaseSlice(obj.TypeName())
      +	}
      +	packed := ""
      +	if field.Options != nil && field.Options.GetPacked() {
      +		packed = ",packed"
      +	}
      +	fieldName := field.GetName()
      +	name := fieldName
      +	if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP {
      +		// We must use the type name for groups instead of
      +		// the field name to preserve capitalization.
      +		// type_name in FieldDescriptorProto is fully-qualified,
      +		// but we only want the local part.
      +		name = *field.TypeName
      +		if i := strings.LastIndex(name, "."); i >= 0 {
      +			name = name[i+1:]
      +		}
      +	}
      +	if json := field.GetJsonName(); json != "" && json != name {
      +		// TODO: escaping might be needed, in which case
      +		// perhaps this should be in its own "json" tag.
      +		name += ",json=" + json
      +	}
      +	name = ",name=" + name
      +	if message.proto3() {
      +		// We only need the extra tag for []byte fields;
      +		// no need to add noise for the others.
      +		if *field.Type == descriptor.FieldDescriptorProto_TYPE_BYTES {
      +			name += ",proto3"
      +		}
      +
      +	}
      +	oneof := ""
      +	if field.OneofIndex != nil {
      +		oneof = ",oneof"
      +	}
      +	return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s",
      +		wiretype,
      +		field.GetNumber(),
      +		optrepreq,
      +		packed,
      +		name,
      +		enum,
      +		oneof,
      +		defaultValue))
      +}
      +
      +func needsStar(typ descriptor.FieldDescriptorProto_Type) bool {
      +	switch typ {
      +	case descriptor.FieldDescriptorProto_TYPE_GROUP:
      +		return false
      +	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
      +		return false
      +	case descriptor.FieldDescriptorProto_TYPE_BYTES:
      +		return false
      +	}
      +	return true
      +}
      +
      +// TypeName is the printed name appropriate for an item. If the object is in the current file,
      +// TypeName drops the package name and underscores the rest.
      +// Otherwise the object is from another package; and the result is the underscored
      +// package name followed by the item name.
      +// The result always has an initial capital.
      +func (g *Generator) TypeName(obj Object) string {
      +	return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName())
      +}
      +
      +// TypeNameWithPackage is like TypeName, but always includes the package
      +// name even if the object is in our own package.
      +func (g *Generator) TypeNameWithPackage(obj Object) string {
      +	return obj.PackageName() + CamelCaseSlice(obj.TypeName())
      +}
      +
      +// GoType returns a string representing the type name, and the wire type
      +func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) {
      +	// TODO: Options.
      +	switch *field.Type {
      +	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
      +		typ, wire = "float64", "fixed64"
      +	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
      +		typ, wire = "float32", "fixed32"
      +	case descriptor.FieldDescriptorProto_TYPE_INT64:
      +		typ, wire = "int64", "varint"
      +	case descriptor.FieldDescriptorProto_TYPE_UINT64:
      +		typ, wire = "uint64", "varint"
      +	case descriptor.FieldDescriptorProto_TYPE_INT32:
      +		typ, wire = "int32", "varint"
      +	case descriptor.FieldDescriptorProto_TYPE_UINT32:
      +		typ, wire = "uint32", "varint"
      +	case descriptor.FieldDescriptorProto_TYPE_FIXED64:
      +		typ, wire = "uint64", "fixed64"
      +	case descriptor.FieldDescriptorProto_TYPE_FIXED32:
      +		typ, wire = "uint32", "fixed32"
      +	case descriptor.FieldDescriptorProto_TYPE_BOOL:
      +		typ, wire = "bool", "varint"
      +	case descriptor.FieldDescriptorProto_TYPE_STRING:
      +		typ, wire = "string", "bytes"
      +	case descriptor.FieldDescriptorProto_TYPE_GROUP:
      +		desc := g.ObjectNamed(field.GetTypeName())
      +		typ, wire = "*"+g.TypeName(desc), "group"
      +	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
      +		desc := g.ObjectNamed(field.GetTypeName())
      +		typ, wire = "*"+g.TypeName(desc), "bytes"
      +	case descriptor.FieldDescriptorProto_TYPE_BYTES:
      +		typ, wire = "[]byte", "bytes"
      +	case descriptor.FieldDescriptorProto_TYPE_ENUM:
      +		desc := g.ObjectNamed(field.GetTypeName())
      +		typ, wire = g.TypeName(desc), "varint"
      +	case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
      +		typ, wire = "int32", "fixed32"
      +	case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
      +		typ, wire = "int64", "fixed64"
      +	case descriptor.FieldDescriptorProto_TYPE_SINT32:
      +		typ, wire = "int32", "zigzag32"
      +	case descriptor.FieldDescriptorProto_TYPE_SINT64:
      +		typ, wire = "int64", "zigzag64"
      +	default:
      +		g.Fail("unknown type for", field.GetName())
      +	}
      +	if isRepeated(field) {
      +		typ = "[]" + typ
      +	} else if message != nil && message.proto3() {
      +		return
      +	} else if field.OneofIndex != nil && message != nil {
      +		return
      +	} else if needsStar(*field.Type) {
      +		typ = "*" + typ
      +	}
      +	return
      +}
      +
      +func (g *Generator) RecordTypeUse(t string) {
      +	if obj, ok := g.typeNameToObject[t]; ok {
      +		// Call ObjectNamed to get the true object to record the use.
      +		obj = g.ObjectNamed(t)
      +		g.usedPackages[obj.PackageName()] = true
      +	}
      +}
      +
      +// Method names that may be generated.  Fields with these names get an
      +// underscore appended.
      +var methodNames = [...]string{
      +	"Reset",
      +	"String",
      +	"ProtoMessage",
      +	"Marshal",
      +	"Unmarshal",
      +	"ExtensionRangeArray",
      +	"ExtensionMap",
      +	"Descriptor",
      +}
      +
      +// Names of messages in the `google.protobuf` package for which
      +// we will generate XXX_WellKnownType methods.
      +var wellKnownTypes = map[string]bool{
      +	"Any":       true,
      +	"Duration":  true,
      +	"Empty":     true,
      +	"Struct":    true,
      +	"Timestamp": true,
      +
      +	"Value":       true,
      +	"ListValue":   true,
      +	"DoubleValue": true,
      +	"FloatValue":  true,
      +	"Int64Value":  true,
      +	"UInt64Value": true,
      +	"Int32Value":  true,
      +	"UInt32Value": true,
      +	"BoolValue":   true,
      +	"StringValue": true,
      +	"BytesValue":  true,
      +}
      +
      +// Generate the type and default constant definitions for this Descriptor.
      +func (g *Generator) generateMessage(message *Descriptor) {
      +	// The full type name
      +	typeName := message.TypeName()
      +	// The full type name, CamelCased.
      +	ccTypeName := CamelCaseSlice(typeName)
      +
      +	usedNames := make(map[string]bool)
      +	for _, n := range methodNames {
      +		usedNames[n] = true
      +	}
      +	fieldNames := make(map[*descriptor.FieldDescriptorProto]string)
      +	fieldGetterNames := make(map[*descriptor.FieldDescriptorProto]string)
      +	fieldTypes := make(map[*descriptor.FieldDescriptorProto]string)
      +	mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string)
      +
      +	oneofFieldName := make(map[int32]string)                           // indexed by oneof_index field of FieldDescriptorProto
      +	oneofDisc := make(map[int32]string)                                // name of discriminator method
      +	oneofTypeName := make(map[*descriptor.FieldDescriptorProto]string) // without star
      +	oneofInsertPoints := make(map[int32]int)                           // oneof_index => offset of g.Buffer
      +
      +	g.PrintComments(message.path)
      +	g.P("type ", ccTypeName, " struct {")
      +	g.In()
      +
      +	// allocNames finds a conflict-free variation of the given strings,
      +	// consistently mutating their suffixes.
      +	// It returns the same number of strings.
      +	allocNames := func(ns ...string) []string {
      +	Loop:
      +		for {
      +			for _, n := range ns {
      +				if usedNames[n] {
      +					for i := range ns {
      +						ns[i] += "_"
      +					}
      +					continue Loop
      +				}
      +			}
      +			for _, n := range ns {
      +				usedNames[n] = true
      +			}
      +			return ns
      +		}
      +	}
      +
      +	for i, field := range message.Field {
      +		// Allocate the getter and the field at the same time so name
      +		// collisions create field/method consistent names.
      +		// TODO: This allocation occurs based on the order of the fields
      +		// in the proto file, meaning that a change in the field
      +		// ordering can change generated Method/Field names.
      +		base := CamelCase(*field.Name)
      +		ns := allocNames(base, "Get"+base)
      +		fieldName, fieldGetterName := ns[0], ns[1]
      +		typename, wiretype := g.GoType(message, field)
      +		jsonName := *field.Name
      +		tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty")
      +
      +		fieldNames[field] = fieldName
      +		fieldGetterNames[field] = fieldGetterName
      +
      +		oneof := field.OneofIndex != nil
      +		if oneof && oneofFieldName[*field.OneofIndex] == "" {
      +			odp := message.OneofDecl[int(*field.OneofIndex)]
      +			fname := allocNames(CamelCase(odp.GetName()))[0]
      +
      +			// This is the first field of a oneof we haven't seen before.
      +			// Generate the union field.
      +			com := g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex))
      +			if com {
      +				g.P("//")
      +			}
      +			g.P("// Types that are valid to be assigned to ", fname, ":")
      +			// Generate the rest of this comment later,
      +			// when we've computed any disambiguation.
      +			oneofInsertPoints[*field.OneofIndex] = g.Buffer.Len()
      +
      +			dname := "is" + ccTypeName + "_" + fname
      +			oneofFieldName[*field.OneofIndex] = fname
      +			oneofDisc[*field.OneofIndex] = dname
      +			tag := `protobuf_oneof:"` + odp.GetName() + `"`
      +			g.P(fname, " ", dname, " `", tag, "`")
      +		}
      +
      +		if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
      +			desc := g.ObjectNamed(field.GetTypeName())
      +			if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() {
      +				// Figure out the Go types and tags for the key and value types.
      +				keyField, valField := d.Field[0], d.Field[1]
      +				keyType, keyWire := g.GoType(d, keyField)
      +				valType, valWire := g.GoType(d, valField)
      +				keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire)
      +
      +				// We don't use stars, except for message-typed values.
      +				// Message and enum types are the only two possibly foreign types used in maps,
      +				// so record their use. They are not permitted as map keys.
      +				keyType = strings.TrimPrefix(keyType, "*")
      +				switch *valField.Type {
      +				case descriptor.FieldDescriptorProto_TYPE_ENUM:
      +					valType = strings.TrimPrefix(valType, "*")
      +					g.RecordTypeUse(valField.GetTypeName())
      +				case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
      +					g.RecordTypeUse(valField.GetTypeName())
      +				default:
      +					valType = strings.TrimPrefix(valType, "*")
      +				}
      +
      +				typename = fmt.Sprintf("map[%s]%s", keyType, valType)
      +				mapFieldTypes[field] = typename // record for the getter generation
      +
      +				tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag)
      +			}
      +		}
      +
      +		fieldTypes[field] = typename
      +
      +		if oneof {
      +			tname := ccTypeName + "_" + fieldName
      +			// It is possible for this to collide with a message or enum
      +			// nested in this message. Check for collisions.
      +			for {
      +				ok := true
      +				for _, desc := range message.nested {
      +					if CamelCaseSlice(desc.TypeName()) == tname {
      +						ok = false
      +						break
      +					}
      +				}
      +				for _, enum := range message.enums {
      +					if CamelCaseSlice(enum.TypeName()) == tname {
      +						ok = false
      +						break
      +					}
      +				}
      +				if !ok {
      +					tname += "_"
      +					continue
      +				}
      +				break
      +			}
      +
      +			oneofTypeName[field] = tname
      +			continue
      +		}
      +
      +		g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i))
      +		g.P(fieldName, "\t", typename, "\t`", tag, "`")
      +		g.RecordTypeUse(field.GetTypeName())
      +	}
      +	if len(message.ExtensionRange) > 0 {
      +		g.P("XXX_extensions\t\tmap[int32]", g.Pkg["proto"], ".Extension `json:\"-\"`")
      +	}
      +	if !message.proto3() {
      +		g.P("XXX_unrecognized\t[]byte `json:\"-\"`")
      +	}
      +	g.Out()
      +	g.P("}")
      +
      +	// Update g.Buffer to list valid oneof types.
      +	// We do this down here, after we've disambiguated the oneof type names.
      +	// We go in reverse order of insertion point to avoid invalidating offsets.
      +	for oi := int32(len(message.OneofDecl)); oi >= 0; oi-- {
      +		ip := oneofInsertPoints[oi]
      +		all := g.Buffer.Bytes()
      +		rem := all[ip:]
      +		g.Buffer = bytes.NewBuffer(all[:ip:ip]) // set cap so we don't scribble on rem
      +		for _, field := range message.Field {
      +			if field.OneofIndex == nil || *field.OneofIndex != oi {
      +				continue
      +			}
      +			g.P("//\t*", oneofTypeName[field])
      +		}
      +		g.Buffer.Write(rem)
      +	}
      +
      +	// Reset, String and ProtoMessage methods.
      +	g.P("func (m *", ccTypeName, ") Reset() { *m = ", ccTypeName, "{} }")
      +	g.P("func (m *", ccTypeName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }")
      +	g.P("func (*", ccTypeName, ") ProtoMessage() {}")
      +	var indexes []string
      +	for m := message; m != nil; m = m.parent {
      +		indexes = append([]string{strconv.Itoa(m.index)}, indexes...)
      +	}
      +	g.P("func (*", ccTypeName, ") Descriptor() ([]byte, []int) { return fileDescriptor", g.file.index, ", []int{", strings.Join(indexes, ", "), "} }")
      +	// TODO: Revisit the decision to use a XXX_WellKnownType method
      +	// if we change proto.MessageName to work with multiple equivalents.
      +	if message.file.GetPackage() == "google.protobuf" && wellKnownTypes[message.GetName()] {
      +		g.P("func (*", ccTypeName, `) XXX_WellKnownType() string { return "`, message.GetName(), `" }`)
      +	}
      +
      +	// Extension support methods
      +	var hasExtensions, isMessageSet bool
      +	if len(message.ExtensionRange) > 0 {
      +		hasExtensions = true
      +		// message_set_wire_format only makes sense when extensions are defined.
      +		if opts := message.Options; opts != nil && opts.GetMessageSetWireFormat() {
      +			isMessageSet = true
      +			g.P()
      +			g.P("func (m *", ccTypeName, ") Marshal() ([]byte, error) {")
      +			g.In()
      +			g.P("return ", g.Pkg["proto"], ".MarshalMessageSet(m.ExtensionMap())")
      +			g.Out()
      +			g.P("}")
      +			g.P("func (m *", ccTypeName, ") Unmarshal(buf []byte) error {")
      +			g.In()
      +			g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSet(buf, m.ExtensionMap())")
      +			g.Out()
      +			g.P("}")
      +			g.P("func (m *", ccTypeName, ") MarshalJSON() ([]byte, error) {")
      +			g.In()
      +			g.P("return ", g.Pkg["proto"], ".MarshalMessageSetJSON(m.XXX_extensions)")
      +			g.Out()
      +			g.P("}")
      +			g.P("func (m *", ccTypeName, ") UnmarshalJSON(buf []byte) error {")
      +			g.In()
      +			g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSetJSON(buf, m.XXX_extensions)")
      +			g.Out()
      +			g.P("}")
      +			g.P("// ensure ", ccTypeName, " satisfies proto.Marshaler and proto.Unmarshaler")
      +			g.P("var _ ", g.Pkg["proto"], ".Marshaler = (*", ccTypeName, ")(nil)")
      +			g.P("var _ ", g.Pkg["proto"], ".Unmarshaler = (*", ccTypeName, ")(nil)")
      +		}
      +
      +		g.P()
      +		g.P("var extRange_", ccTypeName, " = []", g.Pkg["proto"], ".ExtensionRange{")
      +		g.In()
      +		for _, r := range message.ExtensionRange {
      +			end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends
      +			g.P("{", r.Start, ", ", end, "},")
      +		}
      +		g.Out()
      +		g.P("}")
      +		g.P("func (*", ccTypeName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {")
      +		g.In()
      +		g.P("return extRange_", ccTypeName)
      +		g.Out()
      +		g.P("}")
      +		g.P("func (m *", ccTypeName, ") ExtensionMap() map[int32]", g.Pkg["proto"], ".Extension {")
      +		g.In()
      +		g.P("if m.XXX_extensions == nil {")
      +		g.In()
      +		g.P("m.XXX_extensions = make(map[int32]", g.Pkg["proto"], ".Extension)")
      +		g.Out()
      +		g.P("}")
      +		g.P("return m.XXX_extensions")
      +		g.Out()
      +		g.P("}")
      +	}
      +
      +	// Default constants
      +	defNames := make(map[*descriptor.FieldDescriptorProto]string)
      +	for _, field := range message.Field {
      +		def := field.GetDefaultValue()
      +		if def == "" {
      +			continue
      +		}
      +		fieldname := "Default_" + ccTypeName + "_" + CamelCase(*field.Name)
      +		defNames[field] = fieldname
      +		typename, _ := g.GoType(message, field)
      +		if typename[0] == '*' {
      +			typename = typename[1:]
      +		}
      +		kind := "const "
      +		switch {
      +		case typename == "bool":
      +		case typename == "string":
      +			def = strconv.Quote(def)
      +		case typename == "[]byte":
      +			def = "[]byte(" + strconv.Quote(def) + ")"
      +			kind = "var "
      +		case def == "inf", def == "-inf", def == "nan":
      +			// These names are known to, and defined by, the protocol language.
      +			switch def {
      +			case "inf":
      +				def = "math.Inf(1)"
      +			case "-inf":
      +				def = "math.Inf(-1)"
      +			case "nan":
      +				def = "math.NaN()"
      +			}
      +			if *field.Type == descriptor.FieldDescriptorProto_TYPE_FLOAT {
      +				def = "float32(" + def + ")"
      +			}
      +			kind = "var "
      +		case *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM:
      +			// Must be an enum.  Need to construct the prefixed name.
      +			obj := g.ObjectNamed(field.GetTypeName())
      +			var enum *EnumDescriptor
      +			if id, ok := obj.(*ImportedDescriptor); ok {
      +				// The enum type has been publicly imported.
      +				enum, _ = id.o.(*EnumDescriptor)
      +			} else {
      +				enum, _ = obj.(*EnumDescriptor)
      +			}
      +			if enum == nil {
      +				log.Printf("don't know how to generate constant for %s", fieldname)
      +				continue
      +			}
      +			def = g.DefaultPackageName(obj) + enum.prefix() + def
      +		}
      +		g.P(kind, fieldname, " ", typename, " = ", def)
      +		g.file.addExport(message, constOrVarSymbol{fieldname, kind, ""})
      +	}
      +	g.P()
      +
      +	// Oneof per-field types, discriminants and getters.
      +	//
      +	// Generate unexported named types for the discriminant interfaces.
      +	// We shouldn't have to do this, but there was (~19 Aug 2015) a compiler/linker bug
      +	// that was triggered by using anonymous interfaces here.
      +	// TODO: Revisit this and consider reverting back to anonymous interfaces.
      +	for oi := range message.OneofDecl {
      +		dname := oneofDisc[int32(oi)]
      +		g.P("type ", dname, " interface { ", dname, "() }")
      +	}
      +	g.P()
      +	for _, field := range message.Field {
      +		if field.OneofIndex == nil {
      +			continue
      +		}
      +		_, wiretype := g.GoType(message, field)
      +		tag := "protobuf:" + g.goTag(message, field, wiretype)
      +		g.P("type ", oneofTypeName[field], " struct{ ", fieldNames[field], " ", fieldTypes[field], " `", tag, "` }")
      +		g.RecordTypeUse(field.GetTypeName())
      +	}
      +	g.P()
      +	for _, field := range message.Field {
      +		if field.OneofIndex == nil {
      +			continue
      +		}
      +		g.P("func (*", oneofTypeName[field], ") ", oneofDisc[*field.OneofIndex], "() {}")
      +	}
      +	g.P()
      +	for oi := range message.OneofDecl {
      +		fname := oneofFieldName[int32(oi)]
      +		g.P("func (m *", ccTypeName, ") Get", fname, "() ", oneofDisc[int32(oi)], " {")
      +		g.P("if m != nil { return m.", fname, " }")
      +		g.P("return nil")
      +		g.P("}")
      +	}
      +	g.P()
      +
      +	// Field getters
      +	var getters []getterSymbol
      +	for _, field := range message.Field {
      +		oneof := field.OneofIndex != nil
      +
      +		fname := fieldNames[field]
      +		typename, _ := g.GoType(message, field)
      +		if t, ok := mapFieldTypes[field]; ok {
      +			typename = t
      +		}
      +		mname := fieldGetterNames[field]
      +		star := ""
      +		if needsStar(*field.Type) && typename[0] == '*' {
      +			typename = typename[1:]
      +			star = "*"
      +		}
      +
      +		// In proto3, only generate getters for message fields and oneof fields.
      +		if message.proto3() && *field.Type != descriptor.FieldDescriptorProto_TYPE_MESSAGE && !oneof {
      +			continue
      +		}
      +
      +		// Only export getter symbols for basic types,
      +		// and for messages and enums in the same package.
      +		// Groups are not exported.
      +		// Foreign types can't be hoisted through a public import because
      +		// the importer may not already be importing the defining .proto.
      +		// As an example, imagine we have an import tree like this:
      +		//   A.proto -> B.proto -> C.proto
      +		// If A publicly imports B, we need to generate the getters from B in A's output,
      +		// but if one such getter returns something from C then we cannot do that
      +		// because A is not importing C already.
      +		var getter, genType bool
      +		switch *field.Type {
      +		case descriptor.FieldDescriptorProto_TYPE_GROUP:
      +			getter = false
      +		case descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_ENUM:
      +			// Only export getter if its return type is in this package.
      +			getter = g.ObjectNamed(field.GetTypeName()).PackageName() == message.PackageName()
      +			genType = true
      +		default:
      +			getter = true
      +		}
      +		if getter {
      +			getters = append(getters, getterSymbol{
      +				name:     mname,
      +				typ:      typename,
      +				typeName: field.GetTypeName(),
      +				genType:  genType,
      +			})
      +		}
      +
      +		g.P("func (m *", ccTypeName, ") "+mname+"() "+typename+" {")
      +		g.In()
      +		def, hasDef := defNames[field]
      +		typeDefaultIsNil := false // whether this field type's default value is a literal nil unless specified
      +		switch *field.Type {
      +		case descriptor.FieldDescriptorProto_TYPE_BYTES:
      +			typeDefaultIsNil = !hasDef
      +		case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE:
      +			typeDefaultIsNil = true
      +		}
      +		if isRepeated(field) {
      +			typeDefaultIsNil = true
      +		}
      +		if typeDefaultIsNil && !oneof {
      +			// A bytes field with no explicit default needs less generated code,
      +			// as does a message or group field, or a repeated field.
      +			g.P("if m != nil {")
      +			g.In()
      +			g.P("return m." + fname)
      +			g.Out()
      +			g.P("}")
      +			g.P("return nil")
      +			g.Out()
      +			g.P("}")
      +			g.P()
      +			continue
      +		}
      +		if !oneof {
      +			g.P("if m != nil && m." + fname + " != nil {")
      +			g.In()
      +			g.P("return " + star + "m." + fname)
      +			g.Out()
      +			g.P("}")
      +		} else {
      +			uname := oneofFieldName[*field.OneofIndex]
      +			tname := oneofTypeName[field]
      +			g.P("if x, ok := m.Get", uname, "().(*", tname, "); ok {")
      +			g.P("return x.", fname)
      +			g.P("}")
      +		}
      +		if hasDef {
      +			if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES {
      +				g.P("return " + def)
      +			} else {
      +				// The default is a []byte var.
      +				// Make a copy when returning it to be safe.
      +				g.P("return append([]byte(nil), ", def, "...)")
      +			}
      +		} else {
      +			switch *field.Type {
      +			case descriptor.FieldDescriptorProto_TYPE_BOOL:
      +				g.P("return false")
      +			case descriptor.FieldDescriptorProto_TYPE_STRING:
      +				g.P(`return ""`)
      +			case descriptor.FieldDescriptorProto_TYPE_GROUP,
      +				descriptor.FieldDescriptorProto_TYPE_MESSAGE,
      +				descriptor.FieldDescriptorProto_TYPE_BYTES:
      +				// This is only possible for oneof fields.
      +				g.P("return nil")
      +			case descriptor.FieldDescriptorProto_TYPE_ENUM:
      +				// The default default for an enum is the first value in the enum,
      +				// not zero.
      +				obj := g.ObjectNamed(field.GetTypeName())
      +				var enum *EnumDescriptor
      +				if id, ok := obj.(*ImportedDescriptor); ok {
      +					// The enum type has been publicly imported.
      +					enum, _ = id.o.(*EnumDescriptor)
      +				} else {
      +					enum, _ = obj.(*EnumDescriptor)
      +				}
      +				if enum == nil {
      +					log.Printf("don't know how to generate getter for %s", field.GetName())
      +					continue
      +				}
      +				if len(enum.Value) == 0 {
      +					g.P("return 0 // empty enum")
      +				} else {
      +					first := enum.Value[0].GetName()
      +					g.P("return ", g.DefaultPackageName(obj)+enum.prefix()+first)
      +				}
      +			default:
      +				g.P("return 0")
      +			}
      +		}
      +		g.Out()
      +		g.P("}")
      +		g.P()
      +	}
      +
      +	if !message.group {
      +		ms := &messageSymbol{
      +			sym:           ccTypeName,
      +			hasExtensions: hasExtensions,
      +			isMessageSet:  isMessageSet,
      +			hasOneof:      len(message.OneofDecl) > 0,
      +			getters:       getters,
      +		}
      +		g.file.addExport(message, ms)
      +	}
      +
      +	// Oneof functions
      +	if len(message.OneofDecl) > 0 {
      +		fieldWire := make(map[*descriptor.FieldDescriptorProto]string)
      +
      +		// method
      +		enc := "_" + ccTypeName + "_OneofMarshaler"
      +		dec := "_" + ccTypeName + "_OneofUnmarshaler"
      +		size := "_" + ccTypeName + "_OneofSizer"
      +		encSig := "(msg " + g.Pkg["proto"] + ".Message, b *" + g.Pkg["proto"] + ".Buffer) error"
      +		decSig := "(msg " + g.Pkg["proto"] + ".Message, tag, wire int, b *" + g.Pkg["proto"] + ".Buffer) (bool, error)"
      +		sizeSig := "(msg " + g.Pkg["proto"] + ".Message) (n int)"
      +
      +		g.P("// XXX_OneofFuncs is for the internal use of the proto package.")
      +		g.P("func (*", ccTypeName, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", func", sizeSig, ", []interface{}) {")
      +		g.P("return ", enc, ", ", dec, ", ", size, ", []interface{}{")
      +		for _, field := range message.Field {
      +			if field.OneofIndex == nil {
      +				continue
      +			}
      +			g.P("(*", oneofTypeName[field], ")(nil),")
      +		}
      +		g.P("}")
      +		g.P("}")
      +		g.P()
      +
      +		// marshaler
      +		g.P("func ", enc, encSig, " {")
      +		g.P("m := msg.(*", ccTypeName, ")")
      +		for oi, odp := range message.OneofDecl {
      +			g.P("// ", odp.GetName())
      +			fname := oneofFieldName[int32(oi)]
      +			g.P("switch x := m.", fname, ".(type) {")
      +			for _, field := range message.Field {
      +				if field.OneofIndex == nil || int(*field.OneofIndex) != oi {
      +					continue
      +				}
      +				g.P("case *", oneofTypeName[field], ":")
      +				var wire, pre, post string
      +				val := "x." + fieldNames[field] // overridden for TYPE_BOOL
      +				canFail := false                // only TYPE_MESSAGE and TYPE_GROUP can fail
      +				switch *field.Type {
      +				case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
      +					wire = "WireFixed64"
      +					pre = "b.EncodeFixed64(" + g.Pkg["math"] + ".Float64bits("
      +					post = "))"
      +				case descriptor.FieldDescriptorProto_TYPE_FLOAT:
      +					wire = "WireFixed32"
      +					pre = "b.EncodeFixed32(uint64(" + g.Pkg["math"] + ".Float32bits("
      +					post = ")))"
      +				case descriptor.FieldDescriptorProto_TYPE_INT64,
      +					descriptor.FieldDescriptorProto_TYPE_UINT64:
      +					wire = "WireVarint"
      +					pre, post = "b.EncodeVarint(uint64(", "))"
      +				case descriptor.FieldDescriptorProto_TYPE_INT32,
      +					descriptor.FieldDescriptorProto_TYPE_UINT32,
      +					descriptor.FieldDescriptorProto_TYPE_ENUM:
      +					wire = "WireVarint"
      +					pre, post = "b.EncodeVarint(uint64(", "))"
      +				case descriptor.FieldDescriptorProto_TYPE_FIXED64,
      +					descriptor.FieldDescriptorProto_TYPE_SFIXED64:
      +					wire = "WireFixed64"
      +					pre, post = "b.EncodeFixed64(uint64(", "))"
      +				case descriptor.FieldDescriptorProto_TYPE_FIXED32,
      +					descriptor.FieldDescriptorProto_TYPE_SFIXED32:
      +					wire = "WireFixed32"
      +					pre, post = "b.EncodeFixed32(uint64(", "))"
      +				case descriptor.FieldDescriptorProto_TYPE_BOOL:
      +					// bool needs special handling.
      +					g.P("t := uint64(0)")
      +					g.P("if ", val, " { t = 1 }")
      +					val = "t"
      +					wire = "WireVarint"
      +					pre, post = "b.EncodeVarint(", ")"
      +				case descriptor.FieldDescriptorProto_TYPE_STRING:
      +					wire = "WireBytes"
      +					pre, post = "b.EncodeStringBytes(", ")"
      +				case descriptor.FieldDescriptorProto_TYPE_GROUP:
      +					wire = "WireStartGroup"
      +					pre, post = "b.Marshal(", ")"
      +					canFail = true
      +				case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
      +					wire = "WireBytes"
      +					pre, post = "b.EncodeMessage(", ")"
      +					canFail = true
      +				case descriptor.FieldDescriptorProto_TYPE_BYTES:
      +					wire = "WireBytes"
      +					pre, post = "b.EncodeRawBytes(", ")"
      +				case descriptor.FieldDescriptorProto_TYPE_SINT32:
      +					wire = "WireVarint"
      +					pre, post = "b.EncodeZigzag32(uint64(", "))"
      +				case descriptor.FieldDescriptorProto_TYPE_SINT64:
      +					wire = "WireVarint"
      +					pre, post = "b.EncodeZigzag64(uint64(", "))"
      +				default:
      +					g.Fail("unhandled oneof field type ", field.Type.String())
      +				}
      +				fieldWire[field] = wire
      +				g.P("b.EncodeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".", wire, ")")
      +				if !canFail {
      +					g.P(pre, val, post)
      +				} else {
      +					g.P("if err := ", pre, val, post, "; err != nil {")
      +					g.P("return err")
      +					g.P("}")
      +				}
      +				if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP {
      +					g.P("b.EncodeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".WireEndGroup)")
      +				}
      +			}
      +			g.P("case nil:")
      +			g.P("default: return ", g.Pkg["fmt"], `.Errorf("`, ccTypeName, ".", fname, ` has unexpected type %T", x)`)
      +			g.P("}")
      +		}
      +		g.P("return nil")
      +		g.P("}")
      +		g.P()
      +
      +		// unmarshaler
      +		g.P("func ", dec, decSig, " {")
      +		g.P("m := msg.(*", ccTypeName, ")")
      +		g.P("switch tag {")
      +		for _, field := range message.Field {
      +			if field.OneofIndex == nil {
      +				continue
      +			}
      +			odp := message.OneofDecl[int(*field.OneofIndex)]
      +			g.P("case ", field.Number, ": // ", odp.GetName(), ".", *field.Name)
      +			g.P("if wire != ", g.Pkg["proto"], ".", fieldWire[field], " {")
      +			g.P("return true, ", g.Pkg["proto"], ".ErrInternalBadWireType")
      +			g.P("}")
      +			lhs := "x, err" // overridden for TYPE_MESSAGE and TYPE_GROUP
      +			var dec, cast, cast2 string
      +			switch *field.Type {
      +			case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
      +				dec, cast = "b.DecodeFixed64()", g.Pkg["math"]+".Float64frombits"
      +			case descriptor.FieldDescriptorProto_TYPE_FLOAT:
      +				dec, cast, cast2 = "b.DecodeFixed32()", "uint32", g.Pkg["math"]+".Float32frombits"
      +			case descriptor.FieldDescriptorProto_TYPE_INT64:
      +				dec, cast = "b.DecodeVarint()", "int64"
      +			case descriptor.FieldDescriptorProto_TYPE_UINT64:
      +				dec = "b.DecodeVarint()"
      +			case descriptor.FieldDescriptorProto_TYPE_INT32:
      +				dec, cast = "b.DecodeVarint()", "int32"
      +			case descriptor.FieldDescriptorProto_TYPE_FIXED64:
      +				dec = "b.DecodeFixed64()"
      +			case descriptor.FieldDescriptorProto_TYPE_FIXED32:
      +				dec, cast = "b.DecodeFixed32()", "uint32"
      +			case descriptor.FieldDescriptorProto_TYPE_BOOL:
      +				dec = "b.DecodeVarint()"
      +				// handled specially below
      +			case descriptor.FieldDescriptorProto_TYPE_STRING:
      +				dec = "b.DecodeStringBytes()"
      +			case descriptor.FieldDescriptorProto_TYPE_GROUP:
      +				g.P("msg := new(", fieldTypes[field][1:], ")") // drop star
      +				lhs = "err"
      +				dec = "b.DecodeGroup(msg)"
      +				// handled specially below
      +			case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
      +				g.P("msg := new(", fieldTypes[field][1:], ")") // drop star
      +				lhs = "err"
      +				dec = "b.DecodeMessage(msg)"
      +				// handled specially below
      +			case descriptor.FieldDescriptorProto_TYPE_BYTES:
      +				dec = "b.DecodeRawBytes(true)"
      +			case descriptor.FieldDescriptorProto_TYPE_UINT32:
      +				dec, cast = "b.DecodeVarint()", "uint32"
      +			case descriptor.FieldDescriptorProto_TYPE_ENUM:
      +				dec, cast = "b.DecodeVarint()", fieldTypes[field]
      +			case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
      +				dec, cast = "b.DecodeFixed32()", "int32"
      +			case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
      +				dec, cast = "b.DecodeFixed64()", "int64"
      +			case descriptor.FieldDescriptorProto_TYPE_SINT32:
      +				dec, cast = "b.DecodeZigzag32()", "int32"
      +			case descriptor.FieldDescriptorProto_TYPE_SINT64:
      +				dec, cast = "b.DecodeZigzag64()", "int64"
      +			default:
      +				g.Fail("unhandled oneof field type ", field.Type.String())
      +			}
      +			g.P(lhs, " := ", dec)
      +			val := "x"
      +			if cast != "" {
      +				val = cast + "(" + val + ")"
      +			}
      +			if cast2 != "" {
      +				val = cast2 + "(" + val + ")"
      +			}
      +			switch *field.Type {
      +			case descriptor.FieldDescriptorProto_TYPE_BOOL:
      +				val += " != 0"
      +			case descriptor.FieldDescriptorProto_TYPE_GROUP,
      +				descriptor.FieldDescriptorProto_TYPE_MESSAGE:
      +				val = "msg"
      +			}
      +			g.P("m.", oneofFieldName[*field.OneofIndex], " = &", oneofTypeName[field], "{", val, "}")
      +			g.P("return true, err")
      +		}
      +		g.P("default: return false, nil")
      +		g.P("}")
      +		g.P("}")
      +		g.P()
      +
      +		// sizer
      +		g.P("func ", size, sizeSig, " {")
      +		g.P("m := msg.(*", ccTypeName, ")")
      +		for oi, odp := range message.OneofDecl {
      +			g.P("// ", odp.GetName())
      +			fname := oneofFieldName[int32(oi)]
      +			g.P("switch x := m.", fname, ".(type) {")
      +			for _, field := range message.Field {
      +				if field.OneofIndex == nil || int(*field.OneofIndex) != oi {
      +					continue
      +				}
      +				g.P("case *", oneofTypeName[field], ":")
      +				val := "x." + fieldNames[field]
      +				var wire, varint, fixed string
      +				switch *field.Type {
      +				case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
      +					wire = "WireFixed64"
      +					fixed = "8"
      +				case descriptor.FieldDescriptorProto_TYPE_FLOAT:
      +					wire = "WireFixed32"
      +					fixed = "4"
      +				case descriptor.FieldDescriptorProto_TYPE_INT64,
      +					descriptor.FieldDescriptorProto_TYPE_UINT64,
      +					descriptor.FieldDescriptorProto_TYPE_INT32,
      +					descriptor.FieldDescriptorProto_TYPE_UINT32,
      +					descriptor.FieldDescriptorProto_TYPE_ENUM:
      +					wire = "WireVarint"
      +					varint = val
      +				case descriptor.FieldDescriptorProto_TYPE_FIXED64,
      +					descriptor.FieldDescriptorProto_TYPE_SFIXED64:
      +					wire = "WireFixed64"
      +					fixed = "8"
      +				case descriptor.FieldDescriptorProto_TYPE_FIXED32,
      +					descriptor.FieldDescriptorProto_TYPE_SFIXED32:
      +					wire = "WireFixed32"
      +					fixed = "4"
      +				case descriptor.FieldDescriptorProto_TYPE_BOOL:
      +					wire = "WireVarint"
      +					fixed = "1"
      +				case descriptor.FieldDescriptorProto_TYPE_STRING:
      +					wire = "WireBytes"
      +					fixed = "len(" + val + ")"
      +					varint = fixed
      +				case descriptor.FieldDescriptorProto_TYPE_GROUP:
      +					wire = "WireStartGroup"
      +					fixed = g.Pkg["proto"] + ".Size(" + val + ")"
      +				case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
      +					wire = "WireBytes"
      +					g.P("s := ", g.Pkg["proto"], ".Size(", val, ")")
      +					fixed = "s"
      +					varint = fixed
      +				case descriptor.FieldDescriptorProto_TYPE_BYTES:
      +					wire = "WireBytes"
      +					fixed = "len(" + val + ")"
      +					varint = fixed
      +				case descriptor.FieldDescriptorProto_TYPE_SINT32:
      +					wire = "WireVarint"
      +					varint = "(uint32(" + val + ") << 1) ^ uint32((int32(" + val + ") >> 31))"
      +				case descriptor.FieldDescriptorProto_TYPE_SINT64:
      +					wire = "WireVarint"
      +					varint = "uint64(" + val + " << 1) ^ uint64((int64(" + val + ") >> 63))"
      +				default:
      +					g.Fail("unhandled oneof field type ", field.Type.String())
      +				}
      +				g.P("n += ", g.Pkg["proto"], ".SizeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".", wire, ")")
      +				if varint != "" {
      +					g.P("n += ", g.Pkg["proto"], ".SizeVarint(uint64(", varint, "))")
      +				}
      +				if fixed != "" {
      +					g.P("n += ", fixed)
      +				}
      +				if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP {
      +					g.P("n += ", g.Pkg["proto"], ".SizeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".WireEndGroup)")
      +				}
      +			}
      +			g.P("case nil:")
      +			g.P("default:")
      +			g.P("panic(", g.Pkg["fmt"], ".Sprintf(\"proto: unexpected type %T in oneof\", x))")
      +			g.P("}")
      +		}
      +		g.P("return n")
      +		g.P("}")
      +		g.P()
      +	}
      +
      +	for _, ext := range message.ext {
      +		g.generateExtension(ext)
      +	}
      +
      +	fullName := strings.Join(message.TypeName(), ".")
      +	if g.file.Package != nil {
      +		fullName = *g.file.Package + "." + fullName
      +	}
      +
      +	g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], ccTypeName, fullName)
      +}
      +
      +func (g *Generator) generateExtension(ext *ExtensionDescriptor) {
      +	ccTypeName := ext.DescName()
      +
      +	extObj := g.ObjectNamed(*ext.Extendee)
      +	var extDesc *Descriptor
      +	if id, ok := extObj.(*ImportedDescriptor); ok {
      +		// This is extending a publicly imported message.
      +		// We need the underlying type for goTag.
      +		extDesc = id.o.(*Descriptor)
      +	} else {
      +		extDesc = extObj.(*Descriptor)
      +	}
      +	extendedType := "*" + g.TypeName(extObj) // always use the original
      +	field := ext.FieldDescriptorProto
      +	fieldType, wireType := g.GoType(ext.parent, field)
      +	tag := g.goTag(extDesc, field, wireType)
      +	g.RecordTypeUse(*ext.Extendee)
      +	if n := ext.FieldDescriptorProto.TypeName; n != nil {
      +		// foreign extension type
      +		g.RecordTypeUse(*n)
      +	}
      +
      +	typeName := ext.TypeName()
      +
      +	// Special case for proto2 message sets: If this extension is extending
      +	// proto2_bridge.MessageSet, and its final name component is "message_set_extension",
      +	// then drop that last component.
      +	mset := false
      +	if extendedType == "*proto2_bridge.MessageSet" && typeName[len(typeName)-1] == "message_set_extension" {
      +		typeName = typeName[:len(typeName)-1]
      +		mset = true
      +	}
      +
      +	// For text formatting, the package must be exactly what the .proto file declares,
      +	// ignoring overrides such as the go_package option, and with no dot/underscore mapping.
      +	extName := strings.Join(typeName, ".")
      +	if g.file.Package != nil {
      +		extName = *g.file.Package + "." + extName
      +	}
      +
      +	g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{")
      +	g.In()
      +	g.P("ExtendedType: (", extendedType, ")(nil),")
      +	g.P("ExtensionType: (", fieldType, ")(nil),")
      +	g.P("Field: ", field.Number, ",")
      +	g.P(`Name: "`, extName, `",`)
      +	g.P("Tag: ", tag, ",")
      +
      +	g.Out()
      +	g.P("}")
      +	g.P()
      +
      +	if mset {
      +		// Generate a bit more code to register with message_set.go.
      +		g.addInitf("%s.RegisterMessageSetType((%s)(nil), %d, %q)", g.Pkg["proto"], fieldType, *field.Number, extName)
      +	}
      +
      +	g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""})
      +}
      +
      +func (g *Generator) generateInitFunction() {
      +	for _, enum := range g.file.enum {
      +		g.generateEnumRegistration(enum)
      +	}
      +	for _, d := range g.file.desc {
      +		for _, ext := range d.ext {
      +			g.generateExtensionRegistration(ext)
      +		}
      +	}
      +	for _, ext := range g.file.ext {
      +		g.generateExtensionRegistration(ext)
      +	}
      +	if len(g.init) == 0 {
      +		return
      +	}
      +	g.P("func init() {")
      +	g.In()
      +	for _, l := range g.init {
      +		g.P(l)
      +	}
      +	g.Out()
      +	g.P("}")
      +	g.init = nil
      +}
      +
      +func (g *Generator) generateFileDescriptor(file *FileDescriptor) {
      +	// Make a copy and trim source_code_info data.
      +	// TODO: Trim this more when we know exactly what we need.
      +	pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto)
      +	pb.SourceCodeInfo = nil
      +
      +	b, err := proto.Marshal(pb)
      +	if err != nil {
      +		g.Fail(err.Error())
      +	}
      +
      +	var buf bytes.Buffer
      +	w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression)
      +	w.Write(b)
      +	w.Close()
      +	b = buf.Bytes()
      +
      +	v := fmt.Sprintf("fileDescriptor%d", file.index)
      +	g.P()
      +	g.P("var ", v, " = []byte{")
      +	g.In()
      +	g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto")
      +	for len(b) > 0 {
      +		n := 16
      +		if n > len(b) {
      +			n = len(b)
      +		}
      +
      +		s := ""
      +		for _, c := range b[:n] {
      +			s += fmt.Sprintf("0x%02x,", c)
      +		}
      +		g.P(s)
      +
      +		b = b[n:]
      +	}
      +	g.Out()
      +	g.P("}")
      +}
      +
      +func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) {
      +	// // We always print the full (proto-world) package name here.
      +	pkg := enum.File().GetPackage()
      +	if pkg != "" {
      +		pkg += "."
      +	}
      +	// The full type name
      +	typeName := enum.TypeName()
      +	// The full type name, CamelCased.
      +	ccTypeName := CamelCaseSlice(typeName)
      +	g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName)
      +}
      +
      +func (g *Generator) generateExtensionRegistration(ext *ExtensionDescriptor) {
      +	g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName())
      +}
      +
      +// And now lots of helper functions.
      +
      +// Is c an ASCII lower-case letter?
      +func isASCIILower(c byte) bool {
      +	return 'a' <= c && c <= 'z'
      +}
      +
      +// Is c an ASCII digit?
      +func isASCIIDigit(c byte) bool {
      +	return '0' <= c && c <= '9'
      +}
      +
      +// CamelCase returns the CamelCased name.
      +// If there is an interior underscore followed by a lower case letter,
      +// drop the underscore and convert the letter to upper case.
      +// There is a remote possibility of this rewrite causing a name collision,
      +// but it's so remote we're prepared to pretend it's nonexistent - since the
      +// C++ generator lowercases names, it's extremely unlikely to have two fields
      +// with different capitalizations.
      +// In short, _my_field_name_2 becomes XMyFieldName_2.
      +func CamelCase(s string) string {
      +	if s == "" {
      +		return ""
      +	}
      +	t := make([]byte, 0, 32)
      +	i := 0
      +	if s[0] == '_' {
      +		// Need a capital letter; drop the '_'.
      +		t = append(t, 'X')
      +		i++
      +	}
      +	// Invariant: if the next letter is lower case, it must be converted
      +	// to upper case.
      +	// That is, we process a word at a time, where words are marked by _ or
      +	// upper case letter. Digits are treated as words.
      +	for ; i < len(s); i++ {
      +		c := s[i]
      +		if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) {
      +			continue // Skip the underscore in s.
      +		}
      +		if isASCIIDigit(c) {
      +			t = append(t, c)
      +			continue
      +		}
      +		// Assume we have a letter now - if not, it's a bogus identifier.
      +		// The next word is a sequence of characters that must start upper case.
      +		if isASCIILower(c) {
      +			c ^= ' ' // Make it a capital letter.
      +		}
      +		t = append(t, c) // Guaranteed not lower case.
      +		// Accept lower case sequence that follows.
      +		for i+1 < len(s) && isASCIILower(s[i+1]) {
      +			i++
      +			t = append(t, s[i])
      +		}
      +	}
      +	return string(t)
      +}
      +
      +// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to
      +// be joined with "_".
      +func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) }
      +
      +// dottedSlice turns a sliced name into a dotted name.
      +func dottedSlice(elem []string) string { return strings.Join(elem, ".") }
      +
      +// Is this field optional?
      +func isOptional(field *descriptor.FieldDescriptorProto) bool {
      +	return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL
      +}
      +
      +// Is this field required?
      +func isRequired(field *descriptor.FieldDescriptorProto) bool {
      +	return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED
      +}
      +
      +// Is this field repeated?
      +func isRepeated(field *descriptor.FieldDescriptorProto) bool {
      +	return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED
      +}
      +
      +// badToUnderscore is the mapping function used to generate Go names from package names,
      +// which can be dotted in the input .proto file.  It replaces non-identifier characters such as
      +// dot or dash with underscore.
      +func badToUnderscore(r rune) rune {
      +	if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' {
      +		return r
      +	}
      +	return '_'
      +}
      +
      +// baseName returns the last path element of the name, with the last dotted suffix removed.
      +func baseName(name string) string {
      +	// First, find the last element
      +	if i := strings.LastIndex(name, "/"); i >= 0 {
      +		name = name[i+1:]
      +	}
      +	// Now drop the suffix
      +	if i := strings.LastIndex(name, "."); i >= 0 {
      +		name = name[0:i]
      +	}
      +	return name
      +}
      +
      +// The SourceCodeInfo message describes the location of elements of a parsed
      +// .proto file by way of a "path", which is a sequence of integers that
      +// describe the route from a FileDescriptorProto to the relevant submessage.
      +// The path alternates between a field number of a repeated field, and an index
      +// into that repeated field. The constants below define the field numbers that
      +// are used.
      +//
      +// See descriptor.proto for more information about this.
      +const (
      +	// tag numbers in FileDescriptorProto
      +	packagePath = 2 // package
      +	messagePath = 4 // message_type
      +	enumPath    = 5 // enum_type
      +	// tag numbers in DescriptorProto
      +	messageFieldPath   = 2 // field
      +	messageMessagePath = 3 // nested_type
      +	messageEnumPath    = 4 // enum_type
      +	messageOneofPath   = 8 // oneof_decl
      +	// tag numbers in EnumDescriptorProto
      +	enumValuePath = 2 // value
      +)
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go
      new file mode 100644
      index 00000000..a5ebc853
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go
      @@ -0,0 +1,85 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2013 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +package generator
      +
      +import (
      +	"testing"
      +
      +	"github.com/golang/protobuf/protoc-gen-go/descriptor"
      +)
      +
      +func TestCamelCase(t *testing.T) {
      +	tests := []struct {
      +		in, want string
      +	}{
      +		{"one", "One"},
      +		{"one_two", "OneTwo"},
      +		{"_my_field_name_2", "XMyFieldName_2"},
      +		{"Something_Capped", "Something_Capped"},
      +		{"my_Name", "My_Name"},
      +		{"OneTwo", "OneTwo"},
      +		{"_", "X"},
      +		{"_a_", "XA_"},
      +	}
      +	for _, tc := range tests {
      +		if got := CamelCase(tc.in); got != tc.want {
      +			t.Errorf("CamelCase(%q) = %q, want %q", tc.in, got, tc.want)
      +		}
      +	}
      +}
      +
      +func TestGoPackageOption(t *testing.T) {
      +	tests := []struct {
      +		in           string
      +		impPath, pkg string
      +		ok           bool
      +	}{
      +		{"", "", "", false},
      +		{"foo", "", "foo", true},
      +		{"github.com/golang/bar", "github.com/golang/bar", "bar", true},
      +		{"github.com/golang/bar;baz", "github.com/golang/bar", "baz", true},
      +	}
      +	for _, tc := range tests {
      +		d := &FileDescriptor{
      +			FileDescriptorProto: &descriptor.FileDescriptorProto{
      +				Options: &descriptor.FileOptions{
      +					GoPackage: &tc.in,
      +				},
      +			},
      +		}
      +		impPath, pkg, ok := d.goPackageOption()
      +		if impPath != tc.impPath || pkg != tc.pkg || ok != tc.ok {
      +			t.Errorf("go_package = %q => (%q, %q, %t), want (%q, %q, %t)", tc.in,
      +				impPath, pkg, ok, tc.impPath, tc.pkg, tc.ok)
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go
      new file mode 100644
      index 00000000..a11e07bc
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go
      @@ -0,0 +1,456 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2015 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +// Package grpc outputs gRPC service descriptions in Go code.
      +// It runs as a plugin for the Go protocol buffer compiler plugin.
      +// It is linked in to protoc-gen-go.
      +package grpc
      +
      +import (
      +	"fmt"
      +	"path"
      +	"strconv"
      +	"strings"
      +
      +	pb "github.com/golang/protobuf/protoc-gen-go/descriptor"
      +	"github.com/golang/protobuf/protoc-gen-go/generator"
      +)
      +
      +// generatedCodeVersion indicates a version of the generated code.
      +// It is incremented whenever an incompatibility between the generated code and
      +// the grpc package is introduced; the generated code references
      +// a constant, grpc.SupportPackageIsVersionN (where N is generatedCodeVersion).
      +const generatedCodeVersion = 1
      +
      +// Paths for packages used by code generated in this file,
      +// relative to the import_prefix of the generator.Generator.
      +const (
      +	contextPkgPath = "golang.org/x/net/context"
      +	grpcPkgPath    = "google.golang.org/grpc"
      +)
      +
      +func init() {
      +	generator.RegisterPlugin(new(grpc))
      +}
      +
      +// grpc is an implementation of the Go protocol buffer compiler's
      +// plugin architecture.  It generates bindings for gRPC support.
      +type grpc struct {
      +	gen *generator.Generator
      +}
      +
      +// Name returns the name of this plugin, "grpc".
      +func (g *grpc) Name() string {
      +	return "grpc"
      +}
      +
      +// The names for packages imported in the generated code.
      +// They may vary from the final path component of the import path
      +// if the name is used by other packages.
      +var (
      +	contextPkg string
      +	grpcPkg    string
      +)
      +
      +// Init initializes the plugin.
      +func (g *grpc) Init(gen *generator.Generator) {
      +	g.gen = gen
      +	contextPkg = generator.RegisterUniquePackageName("context", nil)
      +	grpcPkg = generator.RegisterUniquePackageName("grpc", nil)
      +}
      +
      +// Given a type name defined in a .proto, return its object.
      +// Also record that we're using it, to guarantee the associated import.
      +func (g *grpc) objectNamed(name string) generator.Object {
      +	g.gen.RecordTypeUse(name)
      +	return g.gen.ObjectNamed(name)
      +}
      +
      +// Given a type name defined in a .proto, return its name as we will print it.
      +func (g *grpc) typeName(str string) string {
      +	return g.gen.TypeName(g.objectNamed(str))
      +}
      +
      +// P forwards to g.gen.P.
      +func (g *grpc) P(args ...interface{}) { g.gen.P(args...) }
      +
      +// Generate generates code for the services in the given file.
      +func (g *grpc) Generate(file *generator.FileDescriptor) {
      +	if len(file.FileDescriptorProto.Service) == 0 {
      +		return
      +	}
      +
      +	g.P("// Reference imports to suppress errors if they are not otherwise used.")
      +	g.P("var _ ", contextPkg, ".Context")
      +	g.P("var _ ", grpcPkg, ".ClientConn")
      +	g.P()
      +
      +	// Assert version compatibility.
      +	g.P("// This is a compile-time assertion to ensure that this generated file")
      +	g.P("// is compatible with the grpc package it is being compiled against.")
      +	g.P("const _ = ", grpcPkg, ".SupportPackageIsVersion", generatedCodeVersion)
      +	g.P()
      +
      +	for i, service := range file.FileDescriptorProto.Service {
      +		g.generateService(file, service, i)
      +	}
      +}
      +
      +// GenerateImports generates the import declaration for this file.
      +func (g *grpc) GenerateImports(file *generator.FileDescriptor) {
      +	if len(file.FileDescriptorProto.Service) == 0 {
      +		return
      +	}
      +	g.P("import (")
      +	g.P(contextPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, contextPkgPath)))
      +	g.P(grpcPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, grpcPkgPath)))
      +	g.P(")")
      +	g.P()
      +}
      +
      +// reservedClientName records whether a client name is reserved on the client side.
      +var reservedClientName = map[string]bool{
      +// TODO: do we need any in gRPC?
      +}
      +
      +func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] }
      +
      +// generateService generates all the code for the named service.
      +func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) {
      +	path := fmt.Sprintf("6,%d", index) // 6 means service.
      +
      +	origServName := service.GetName()
      +	fullServName := origServName
      +	if pkg := file.GetPackage(); pkg != "" {
      +		fullServName = pkg + "." + fullServName
      +	}
      +	servName := generator.CamelCase(origServName)
      +
      +	g.P()
      +	g.P("// Client API for ", servName, " service")
      +	g.P()
      +
      +	// Client interface.
      +	g.P("type ", servName, "Client interface {")
      +	for i, method := range service.Method {
      +		g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service.
      +		g.P(g.generateClientSignature(servName, method))
      +	}
      +	g.P("}")
      +	g.P()
      +
      +	// Client structure.
      +	g.P("type ", unexport(servName), "Client struct {")
      +	g.P("cc *", grpcPkg, ".ClientConn")
      +	g.P("}")
      +	g.P()
      +
      +	// NewClient factory.
      +	g.P("func New", servName, "Client (cc *", grpcPkg, ".ClientConn) ", servName, "Client {")
      +	g.P("return &", unexport(servName), "Client{cc}")
      +	g.P("}")
      +	g.P()
      +
      +	var methodIndex, streamIndex int
      +	serviceDescVar := "_" + servName + "_serviceDesc"
      +	// Client method implementations.
      +	for _, method := range service.Method {
      +		var descExpr string
      +		if !method.GetServerStreaming() && !method.GetClientStreaming() {
      +			// Unary RPC method
      +			descExpr = fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex)
      +			methodIndex++
      +		} else {
      +			// Streaming RPC method
      +			descExpr = fmt.Sprintf("&%s.Streams[%d]", serviceDescVar, streamIndex)
      +			streamIndex++
      +		}
      +		g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr)
      +	}
      +
      +	g.P("// Server API for ", servName, " service")
      +	g.P()
      +
      +	// Server interface.
      +	serverType := servName + "Server"
      +	g.P("type ", serverType, " interface {")
      +	for i, method := range service.Method {
      +		g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service.
      +		g.P(g.generateServerSignature(servName, method))
      +	}
      +	g.P("}")
      +	g.P()
      +
      +	// Server registration.
      +	g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {")
      +	g.P("s.RegisterService(&", serviceDescVar, `, srv)`)
      +	g.P("}")
      +	g.P()
      +
      +	// Server handler implementations.
      +	var handlerNames []string
      +	for _, method := range service.Method {
      +		hname := g.generateServerMethod(servName, method)
      +		handlerNames = append(handlerNames, hname)
      +	}
      +
      +	// Service descriptor.
      +	g.P("var ", serviceDescVar, " = ", grpcPkg, ".ServiceDesc {")
      +	g.P("ServiceName: ", strconv.Quote(fullServName), ",")
      +	g.P("HandlerType: (*", serverType, ")(nil),")
      +	g.P("Methods: []", grpcPkg, ".MethodDesc{")
      +	for i, method := range service.Method {
      +		if method.GetServerStreaming() || method.GetClientStreaming() {
      +			continue
      +		}
      +		g.P("{")
      +		g.P("MethodName: ", strconv.Quote(method.GetName()), ",")
      +		g.P("Handler: ", handlerNames[i], ",")
      +		g.P("},")
      +	}
      +	g.P("},")
      +	g.P("Streams: []", grpcPkg, ".StreamDesc{")
      +	for i, method := range service.Method {
      +		if !method.GetServerStreaming() && !method.GetClientStreaming() {
      +			continue
      +		}
      +		g.P("{")
      +		g.P("StreamName: ", strconv.Quote(method.GetName()), ",")
      +		g.P("Handler: ", handlerNames[i], ",")
      +		if method.GetServerStreaming() {
      +			g.P("ServerStreams: true,")
      +		}
      +		if method.GetClientStreaming() {
      +			g.P("ClientStreams: true,")
      +		}
      +		g.P("},")
      +	}
      +	g.P("},")
      +	g.P("}")
      +	g.P()
      +}
      +
      +// generateClientSignature returns the client-side signature for a method.
      +func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string {
      +	origMethName := method.GetName()
      +	methName := generator.CamelCase(origMethName)
      +	if reservedClientName[methName] {
      +		methName += "_"
      +	}
      +	reqArg := ", in *" + g.typeName(method.GetInputType())
      +	if method.GetClientStreaming() {
      +		reqArg = ""
      +	}
      +	respName := "*" + g.typeName(method.GetOutputType())
      +	if method.GetServerStreaming() || method.GetClientStreaming() {
      +		respName = servName + "_" + generator.CamelCase(origMethName) + "Client"
      +	}
      +	return fmt.Sprintf("%s(ctx %s.Context%s, opts ...%s.CallOption) (%s, error)", methName, contextPkg, reqArg, grpcPkg, respName)
      +}
      +
      +func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) {
      +	sname := fmt.Sprintf("/%s/%s", fullServName, method.GetName())
      +	methName := generator.CamelCase(method.GetName())
      +	inType := g.typeName(method.GetInputType())
      +	outType := g.typeName(method.GetOutputType())
      +
      +	g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{")
      +	if !method.GetServerStreaming() && !method.GetClientStreaming() {
      +		g.P("out := new(", outType, ")")
      +		// TODO: Pass descExpr to Invoke.
      +		g.P("err := ", grpcPkg, `.Invoke(ctx, "`, sname, `", in, out, c.cc, opts...)`)
      +		g.P("if err != nil { return nil, err }")
      +		g.P("return out, nil")
      +		g.P("}")
      +		g.P()
      +		return
      +	}
      +	streamType := unexport(servName) + methName + "Client"
      +	g.P("stream, err := ", grpcPkg, ".NewClientStream(ctx, ", descExpr, `, c.cc, "`, sname, `", opts...)`)
      +	g.P("if err != nil { return nil, err }")
      +	g.P("x := &", streamType, "{stream}")
      +	if !method.GetClientStreaming() {
      +		g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }")
      +		g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
      +	}
      +	g.P("return x, nil")
      +	g.P("}")
      +	g.P()
      +
      +	genSend := method.GetClientStreaming()
      +	genRecv := method.GetServerStreaming()
      +	genCloseAndRecv := !method.GetServerStreaming()
      +
      +	// Stream auxiliary types and methods.
      +	g.P("type ", servName, "_", methName, "Client interface {")
      +	if genSend {
      +		g.P("Send(*", inType, ") error")
      +	}
      +	if genRecv {
      +		g.P("Recv() (*", outType, ", error)")
      +	}
      +	if genCloseAndRecv {
      +		g.P("CloseAndRecv() (*", outType, ", error)")
      +	}
      +	g.P(grpcPkg, ".ClientStream")
      +	g.P("}")
      +	g.P()
      +
      +	g.P("type ", streamType, " struct {")
      +	g.P(grpcPkg, ".ClientStream")
      +	g.P("}")
      +	g.P()
      +
      +	if genSend {
      +		g.P("func (x *", streamType, ") Send(m *", inType, ") error {")
      +		g.P("return x.ClientStream.SendMsg(m)")
      +		g.P("}")
      +		g.P()
      +	}
      +	if genRecv {
      +		g.P("func (x *", streamType, ") Recv() (*", outType, ", error) {")
      +		g.P("m := new(", outType, ")")
      +		g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
      +		g.P("return m, nil")
      +		g.P("}")
      +		g.P()
      +	}
      +	if genCloseAndRecv {
      +		g.P("func (x *", streamType, ") CloseAndRecv() (*", outType, ", error) {")
      +		g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
      +		g.P("m := new(", outType, ")")
      +		g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
      +		g.P("return m, nil")
      +		g.P("}")
      +		g.P()
      +	}
      +}
      +
      +// generateServerSignature returns the server-side signature for a method.
      +func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string {
      +	origMethName := method.GetName()
      +	methName := generator.CamelCase(origMethName)
      +	if reservedClientName[methName] {
      +		methName += "_"
      +	}
      +
      +	var reqArgs []string
      +	ret := "error"
      +	if !method.GetServerStreaming() && !method.GetClientStreaming() {
      +		reqArgs = append(reqArgs, contextPkg+".Context")
      +		ret = "(*" + g.typeName(method.GetOutputType()) + ", error)"
      +	}
      +	if !method.GetClientStreaming() {
      +		reqArgs = append(reqArgs, "*"+g.typeName(method.GetInputType()))
      +	}
      +	if method.GetServerStreaming() || method.GetClientStreaming() {
      +		reqArgs = append(reqArgs, servName+"_"+generator.CamelCase(origMethName)+"Server")
      +	}
      +
      +	return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret
      +}
      +
      +func (g *grpc) generateServerMethod(servName string, method *pb.MethodDescriptorProto) string {
      +	methName := generator.CamelCase(method.GetName())
      +	hname := fmt.Sprintf("_%s_%s_Handler", servName, methName)
      +	inType := g.typeName(method.GetInputType())
      +	outType := g.typeName(method.GetOutputType())
      +
      +	if !method.GetServerStreaming() && !method.GetClientStreaming() {
      +		g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error) (interface{}, error) {")
      +		g.P("in := new(", inType, ")")
      +		g.P("if err := dec(in); err != nil { return nil, err }")
      +		g.P("out, err := srv.(", servName, "Server).", methName, "(ctx, in)")
      +		g.P("if err != nil { return nil, err }")
      +		g.P("return out, nil")
      +		g.P("}")
      +		g.P()
      +		return hname
      +	}
      +	streamType := unexport(servName) + methName + "Server"
      +	g.P("func ", hname, "(srv interface{}, stream ", grpcPkg, ".ServerStream) error {")
      +	if !method.GetClientStreaming() {
      +		g.P("m := new(", inType, ")")
      +		g.P("if err := stream.RecvMsg(m); err != nil { return err }")
      +		g.P("return srv.(", servName, "Server).", methName, "(m, &", streamType, "{stream})")
      +	} else {
      +		g.P("return srv.(", servName, "Server).", methName, "(&", streamType, "{stream})")
      +	}
      +	g.P("}")
      +	g.P()
      +
      +	genSend := method.GetServerStreaming()
      +	genSendAndClose := !method.GetServerStreaming()
      +	genRecv := method.GetClientStreaming()
      +
      +	// Stream auxiliary types and methods.
      +	g.P("type ", servName, "_", methName, "Server interface {")
      +	if genSend {
      +		g.P("Send(*", outType, ") error")
      +	}
      +	if genSendAndClose {
      +		g.P("SendAndClose(*", outType, ") error")
      +	}
      +	if genRecv {
      +		g.P("Recv() (*", inType, ", error)")
      +	}
      +	g.P(grpcPkg, ".ServerStream")
      +	g.P("}")
      +	g.P()
      +
      +	g.P("type ", streamType, " struct {")
      +	g.P(grpcPkg, ".ServerStream")
      +	g.P("}")
      +	g.P()
      +
      +	if genSend {
      +		g.P("func (x *", streamType, ") Send(m *", outType, ") error {")
      +		g.P("return x.ServerStream.SendMsg(m)")
      +		g.P("}")
      +		g.P()
      +	}
      +	if genSendAndClose {
      +		g.P("func (x *", streamType, ") SendAndClose(m *", outType, ") error {")
      +		g.P("return x.ServerStream.SendMsg(m)")
      +		g.P("}")
      +		g.P()
      +	}
      +	if genRecv {
      +		g.P("func (x *", streamType, ") Recv() (*", inType, ", error) {")
      +		g.P("m := new(", inType, ")")
      +		g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }")
      +		g.P("return m, nil")
      +		g.P("}")
      +		g.P()
      +	}
      +
      +	return hname
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go
      new file mode 100644
      index 00000000..532a5500
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go
      @@ -0,0 +1,34 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2015 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +package main
      +
      +import _ "github.com/golang/protobuf/protoc-gen-go/grpc"
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/main.go b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go
      new file mode 100644
      index 00000000..8e2486de
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go
      @@ -0,0 +1,98 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2010 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate
      +// Go code.  Run it by building this program and putting it in your path with
      +// the name
      +// 	protoc-gen-go
      +// That word 'go' at the end becomes part of the option string set for the
      +// protocol compiler, so once the protocol compiler (protoc) is installed
      +// you can run
      +// 	protoc --go_out=output_directory input_directory/file.proto
      +// to generate Go bindings for the protocol defined by file.proto.
      +// With that input, the output will be written to
      +// 	output_directory/file.pb.go
      +//
      +// The generated code is documented in the package comment for
      +// the library.
      +//
      +// See the README and documentation for protocol buffers to learn more:
      +// 	https://developers.google.com/protocol-buffers/
      +package main
      +
      +import (
      +	"io/ioutil"
      +	"os"
      +
      +	"github.com/golang/protobuf/proto"
      +	"github.com/golang/protobuf/protoc-gen-go/generator"
      +)
      +
      +func main() {
      +	// Begin by allocating a generator. The request and response structures are stored there
      +	// so we can do error handling easily - the response structure contains the field to
      +	// report failure.
      +	g := generator.New()
      +
      +	data, err := ioutil.ReadAll(os.Stdin)
      +	if err != nil {
      +		g.Error(err, "reading input")
      +	}
      +
      +	if err := proto.Unmarshal(data, g.Request); err != nil {
      +		g.Error(err, "parsing input proto")
      +	}
      +
      +	if len(g.Request.FileToGenerate) == 0 {
      +		g.Fail("no files to generate")
      +	}
      +
      +	g.CommandLineParameters(g.Request.GetParameter())
      +
      +	// Create a wrapped version of the Descriptors and EnumDescriptors that
      +	// point to the file that defines them.
      +	g.WrapTypes()
      +
      +	g.SetPackageNames()
      +	g.BuildTypeNameMap()
      +
      +	g.GenerateAllFiles()
      +
      +	// Send back the results.
      +	data, err = proto.Marshal(g.Response)
      +	if err != nil {
      +		g.Error(err, "failed to marshal output proto")
      +	}
      +	_, err = os.Stdout.Write(data)
      +	if err != nil {
      +		g.Error(err, "failed to write output proto")
      +	}
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile
      new file mode 100644
      index 00000000..eb41f20d
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile
      @@ -0,0 +1,45 @@
      +# Go support for Protocol Buffers - Google's data interchange format
      +#
      +# Copyright 2010 The Go Authors.  All rights reserved.
      +# https://github.com/golang/protobuf
      +#
      +# Redistribution and use in source and binary forms, with or without
      +# modification, are permitted provided that the following conditions are
      +# met:
      +#
      +#     * Redistributions of source code must retain the above copyright
      +# notice, this list of conditions and the following disclaimer.
      +#     * Redistributions in binary form must reproduce the above
      +# copyright notice, this list of conditions and the following disclaimer
      +# in the documentation and/or other materials provided with the
      +# distribution.
      +#     * Neither the name of Google Inc. nor the names of its
      +# contributors may be used to endorse or promote products derived from
      +# this software without specific prior written permission.
      +#
      +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +# Not stored here, but plugin.proto is in https://github.com/google/protobuf/
      +# at src/google/protobuf/compiler/plugin.proto
      +# Also we need to fix an import.
      +regenerate:
      +	echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION
      +	protoc --go_out=Mgoogle/protobuf/descriptor.proto=github.com/golang/protobuf/protoc-gen-go/descriptor:. \
      +		-I$(HOME)/src/protobuf/src $(HOME)/src/protobuf/src/google/protobuf/compiler/plugin.proto && \
      +		mv google/protobuf/compiler/plugin.pb.go $(GOPATH)/src/github.com/golang/protobuf/protoc-gen-go/plugin
      +
      +restore:
      +	cp plugin.pb.golden plugin.pb.go
      +
      +preserve:
      +	cp plugin.pb.go plugin.pb.golden
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go
      new file mode 100644
      index 00000000..faa81583
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go
      @@ -0,0 +1,225 @@
      +// Code generated by protoc-gen-go.
      +// source: google/protobuf/compiler/plugin.proto
      +// DO NOT EDIT!
      +
      +/*
      +Package plugin_go is a generated protocol buffer package.
      +
      +It is generated from these files:
      +	google/protobuf/compiler/plugin.proto
      +
      +It has these top-level messages:
      +	CodeGeneratorRequest
      +	CodeGeneratorResponse
      +*/
      +package plugin_go
      +
      +import proto "github.com/golang/protobuf/proto"
      +import fmt "fmt"
      +import math "math"
      +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
      +
      +// Reference imports to suppress errors if they are not otherwise used.
      +var _ = proto.Marshal
      +var _ = fmt.Errorf
      +var _ = math.Inf
      +
      +// This is a compile-time assertion to ensure that this generated file
      +// is compatible with the proto package it is being compiled against.
      +const _ = proto.ProtoPackageIsVersion1
      +
      +// An encoded CodeGeneratorRequest is written to the plugin's stdin.
      +type CodeGeneratorRequest struct {
      +	// The .proto files that were explicitly listed on the command-line.  The
      +	// code generator should generate code only for these files.  Each file's
      +	// descriptor will be included in proto_file, below.
      +	FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"`
      +	// The generator parameter passed on the command-line.
      +	Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
      +	// FileDescriptorProtos for all files in files_to_generate and everything
      +	// they import.  The files will appear in topological order, so each file
      +	// appears before any file that imports it.
      +	//
      +	// protoc guarantees that all proto_files will be written after
      +	// the fields above, even though this is not technically guaranteed by the
      +	// protobuf wire format.  This theoretically could allow a plugin to stream
      +	// in the FileDescriptorProtos and handle them one by one rather than read
      +	// the entire set into memory at once.  However, as of this writing, this
      +	// is not similarly optimized on protoc's end -- it will store all fields in
      +	// memory at once before sending them to the plugin.
      +	ProtoFile        []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"`
      +	XXX_unrecognized []byte                                 `json:"-"`
      +}
      +
      +func (m *CodeGeneratorRequest) Reset()                    { *m = CodeGeneratorRequest{} }
      +func (m *CodeGeneratorRequest) String() string            { return proto.CompactTextString(m) }
      +func (*CodeGeneratorRequest) ProtoMessage()               {}
      +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
      +
      +func (m *CodeGeneratorRequest) GetFileToGenerate() []string {
      +	if m != nil {
      +		return m.FileToGenerate
      +	}
      +	return nil
      +}
      +
      +func (m *CodeGeneratorRequest) GetParameter() string {
      +	if m != nil && m.Parameter != nil {
      +		return *m.Parameter
      +	}
      +	return ""
      +}
      +
      +func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto {
      +	if m != nil {
      +		return m.ProtoFile
      +	}
      +	return nil
      +}
      +
      +// The plugin writes an encoded CodeGeneratorResponse to stdout.
      +type CodeGeneratorResponse struct {
      +	// Error message.  If non-empty, code generation failed.  The plugin process
      +	// should exit with status code zero even if it reports an error in this way.
      +	//
      +	// This should be used to indicate errors in .proto files which prevent the
      +	// code generator from generating correct code.  Errors which indicate a
      +	// problem in protoc itself -- such as the input CodeGeneratorRequest being
      +	// unparseable -- should be reported by writing a message to stderr and
      +	// exiting with a non-zero status code.
      +	Error            *string                       `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
      +	File             []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
      +	XXX_unrecognized []byte                        `json:"-"`
      +}
      +
      +func (m *CodeGeneratorResponse) Reset()                    { *m = CodeGeneratorResponse{} }
      +func (m *CodeGeneratorResponse) String() string            { return proto.CompactTextString(m) }
      +func (*CodeGeneratorResponse) ProtoMessage()               {}
      +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
      +
      +func (m *CodeGeneratorResponse) GetError() string {
      +	if m != nil && m.Error != nil {
      +		return *m.Error
      +	}
      +	return ""
      +}
      +
      +func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File {
      +	if m != nil {
      +		return m.File
      +	}
      +	return nil
      +}
      +
      +// Represents a single generated file.
      +type CodeGeneratorResponse_File struct {
      +	// The file name, relative to the output directory.  The name must not
      +	// contain "." or ".." components and must be relative, not be absolute (so,
      +	// the file cannot lie outside the output directory).  "/" must be used as
      +	// the path separator, not "\".
      +	//
      +	// If the name is omitted, the content will be appended to the previous
      +	// file.  This allows the generator to break large files into small chunks,
      +	// and allows the generated text to be streamed back to protoc so that large
      +	// files need not reside completely in memory at one time.  Note that as of
      +	// this writing protoc does not optimize for this -- it will read the entire
      +	// CodeGeneratorResponse before writing files to disk.
      +	Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
      +	// If non-empty, indicates that the named file should already exist, and the
      +	// content here is to be inserted into that file at a defined insertion
      +	// point.  This feature allows a code generator to extend the output
      +	// produced by another code generator.  The original generator may provide
      +	// insertion points by placing special annotations in the file that look
      +	// like:
      +	//   @@protoc_insertion_point(NAME)
      +	// The annotation can have arbitrary text before and after it on the line,
      +	// which allows it to be placed in a comment.  NAME should be replaced with
      +	// an identifier naming the point -- this is what other generators will use
      +	// as the insertion_point.  Code inserted at this point will be placed
      +	// immediately above the line containing the insertion point (thus multiple
      +	// insertions to the same point will come out in the order they were added).
      +	// The double-@ is intended to make it unlikely that the generated code
      +	// could contain things that look like insertion points by accident.
      +	//
      +	// For example, the C++ code generator places the following line in the
      +	// .pb.h files that it generates:
      +	//   // @@protoc_insertion_point(namespace_scope)
      +	// This line appears within the scope of the file's package namespace, but
      +	// outside of any particular class.  Another plugin can then specify the
      +	// insertion_point "namespace_scope" to generate additional classes or
      +	// other declarations that should be placed in this scope.
      +	//
      +	// Note that if the line containing the insertion point begins with
      +	// whitespace, the same whitespace will be added to every line of the
      +	// inserted text.  This is useful for languages like Python, where
      +	// indentation matters.  In these languages, the insertion point comment
      +	// should be indented the same amount as any inserted code will need to be
      +	// in order to work correctly in that context.
      +	//
      +	// The code generator that generates the initial file and the one which
      +	// inserts into it must both run as part of a single invocation of protoc.
      +	// Code generators are executed in the order in which they appear on the
      +	// command line.
      +	//
      +	// If |insertion_point| is present, |name| must also be present.
      +	InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"`
      +	// The file contents.
      +	Content          *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
      +	XXX_unrecognized []byte  `json:"-"`
      +}
      +
      +func (m *CodeGeneratorResponse_File) Reset()                    { *m = CodeGeneratorResponse_File{} }
      +func (m *CodeGeneratorResponse_File) String() string            { return proto.CompactTextString(m) }
      +func (*CodeGeneratorResponse_File) ProtoMessage()               {}
      +func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} }
      +
      +func (m *CodeGeneratorResponse_File) GetName() string {
      +	if m != nil && m.Name != nil {
      +		return *m.Name
      +	}
      +	return ""
      +}
      +
      +func (m *CodeGeneratorResponse_File) GetInsertionPoint() string {
      +	if m != nil && m.InsertionPoint != nil {
      +		return *m.InsertionPoint
      +	}
      +	return ""
      +}
      +
      +func (m *CodeGeneratorResponse_File) GetContent() string {
      +	if m != nil && m.Content != nil {
      +		return *m.Content
      +	}
      +	return ""
      +}
      +
      +func init() {
      +	proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest")
      +	proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse")
      +	proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File")
      +}
      +
      +var fileDescriptor0 = []byte{
      +	// 311 bytes of a gzipped FileDescriptorProto
      +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x91, 0xd1, 0x4a, 0xfb, 0x30,
      +	0x14, 0xc6, 0xe9, 0xff, 0x3f, 0x91, 0x1d, 0x65, 0x93, 0x30, 0xa1, 0x8c, 0x5d, 0x94, 0xa1, 0xb8,
      +	0xab, 0x14, 0x44, 0xf0, 0x7e, 0x13, 0xf5, 0xb2, 0x14, 0xaf, 0x04, 0x29, 0xb5, 0x3b, 0x2b, 0x81,
      +	0x2e, 0x27, 0xa6, 0xe9, 0x13, 0xf9, 0x4e, 0x3e, 0x8f, 0x49, 0xda, 0x4e, 0x29, 0xee, 0xaa, 0x3d,
      +	0xdf, 0xf9, 0xe5, 0x3b, 0x5f, 0x72, 0xe0, 0xba, 0x24, 0x2a, 0x2b, 0x8c, 0x95, 0x26, 0x43, 0xef,
      +	0xcd, 0x2e, 0x2e, 0x68, 0xaf, 0x44, 0x85, 0x3a, 0x56, 0x55, 0x53, 0x0a, 0xc9, 0x7d, 0x83, 0x85,
      +	0x2d, 0xc6, 0x7b, 0x8c, 0xf7, 0xd8, 0x3c, 0x1a, 0x1a, 0x6c, 0xb1, 0x2e, 0xb4, 0x50, 0x86, 0x74,
      +	0x4b, 0x2f, 0x3f, 0x03, 0x98, 0x6d, 0x68, 0x8b, 0x4f, 0x28, 0x51, 0xe7, 0x56, 0x4f, 0xf1, 0xa3,
      +	0xc1, 0xda, 0xb0, 0x15, 0x5c, 0xec, 0xac, 0x47, 0x66, 0x28, 0x2b, 0xdb, 0x1e, 0x86, 0x41, 0xf4,
      +	0x7f, 0x35, 0x4e, 0x27, 0x4e, 0x7f, 0xa1, 0xee, 0x04, 0xb2, 0x05, 0x8c, 0x55, 0xae, 0xf3, 0x3d,
      +	0x1a, 0xd4, 0xe1, 0xbf, 0x28, 0xb0, 0xc8, 0x8f, 0xc0, 0x36, 0x00, 0x7e, 0x52, 0xe6, 0x4e, 0x85,
      +	0x53, 0xeb, 0x70, 0x76, 0x7b, 0xc5, 0x87, 0x89, 0x1f, 0x6d, 0xf3, 0xe1, 0x90, 0x2d, 0x71, 0xb2,
      +	0x35, 0x71, 0x1f, 0xd7, 0x59, 0x7e, 0x05, 0x70, 0x39, 0x48, 0x59, 0x2b, 0x92, 0x35, 0xb2, 0x19,
      +	0x9c, 0xa0, 0xd6, 0xa4, 0x6d, 0x36, 0x37, 0xb8, 0x2d, 0xd8, 0x33, 0x8c, 0x7e, 0x8d, 0xbb, 0xe3,
      +	0xc7, 0x1e, 0x88, 0xff, 0x69, 0xea, 0xd3, 0xa4, 0xde, 0x61, 0xfe, 0x06, 0x23, 0x57, 0x31, 0x06,
      +	0x23, 0x69, 0x6f, 0xd4, 0x8d, 0xf1, 0xff, 0xec, 0x06, 0xa6, 0xc2, 0xe2, 0xda, 0x08, 0x92, 0x99,
      +	0x22, 0x21, 0x4d, 0x77, 0xfd, 0xc9, 0x41, 0x4e, 0x9c, 0xca, 0x42, 0x38, 0x2d, 0x48, 0x1a, 0xb4,
      +	0xc0, 0xd4, 0x03, 0x7d, 0xb9, 0xbe, 0x87, 0x85, 0xcd, 0x72, 0x34, 0xdf, 0xfa, 0x3c, 0xf1, 0x8b,
      +	0xf6, 0x0f, 0x52, 0xbf, 0x8e, 0xdb, 0xb5, 0x67, 0x25, 0x7d, 0x07, 0x00, 0x00, 0xff, 0xff, 0x83,
      +	0x7b, 0x5c, 0x7c, 0x1b, 0x02, 0x00, 0x00,
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
      new file mode 100644
      index 00000000..8953d0ff
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
      @@ -0,0 +1,83 @@
      +// Code generated by protoc-gen-go.
      +// source: google/protobuf/compiler/plugin.proto
      +// DO NOT EDIT!
      +
      +package google_protobuf_compiler
      +
      +import proto "github.com/golang/protobuf/proto"
      +import "math"
      +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
      +
      +// Reference proto and math imports to suppress error if they are not otherwise used.
      +var _ = proto.GetString
      +var _ = math.Inf
      +
      +type CodeGeneratorRequest struct {
      +	FileToGenerate   []string                               `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"`
      +	Parameter        *string                                `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
      +	ProtoFile        []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"`
      +	XXX_unrecognized []byte                                 `json:"-"`
      +}
      +
      +func (this *CodeGeneratorRequest) Reset()         { *this = CodeGeneratorRequest{} }
      +func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) }
      +func (*CodeGeneratorRequest) ProtoMessage()       {}
      +
      +func (this *CodeGeneratorRequest) GetParameter() string {
      +	if this != nil && this.Parameter != nil {
      +		return *this.Parameter
      +	}
      +	return ""
      +}
      +
      +type CodeGeneratorResponse struct {
      +	Error            *string                       `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
      +	File             []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
      +	XXX_unrecognized []byte                        `json:"-"`
      +}
      +
      +func (this *CodeGeneratorResponse) Reset()         { *this = CodeGeneratorResponse{} }
      +func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) }
      +func (*CodeGeneratorResponse) ProtoMessage()       {}
      +
      +func (this *CodeGeneratorResponse) GetError() string {
      +	if this != nil && this.Error != nil {
      +		return *this.Error
      +	}
      +	return ""
      +}
      +
      +type CodeGeneratorResponse_File struct {
      +	Name             *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
      +	InsertionPoint   *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"`
      +	Content          *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
      +	XXX_unrecognized []byte  `json:"-"`
      +}
      +
      +func (this *CodeGeneratorResponse_File) Reset()         { *this = CodeGeneratorResponse_File{} }
      +func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) }
      +func (*CodeGeneratorResponse_File) ProtoMessage()       {}
      +
      +func (this *CodeGeneratorResponse_File) GetName() string {
      +	if this != nil && this.Name != nil {
      +		return *this.Name
      +	}
      +	return ""
      +}
      +
      +func (this *CodeGeneratorResponse_File) GetInsertionPoint() string {
      +	if this != nil && this.InsertionPoint != nil {
      +		return *this.InsertionPoint
      +	}
      +	return ""
      +}
      +
      +func (this *CodeGeneratorResponse_File) GetContent() string {
      +	if this != nil && this.Content != nil {
      +		return *this.Content
      +	}
      +	return ""
      +}
      +
      +func init() {
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile
      new file mode 100644
      index 00000000..a85cc565
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile
      @@ -0,0 +1,72 @@
      +# Go support for Protocol Buffers - Google's data interchange format
      +#
      +# Copyright 2010 The Go Authors.  All rights reserved.
      +# https://github.com/golang/protobuf
      +#
      +# Redistribution and use in source and binary forms, with or without
      +# modification, are permitted provided that the following conditions are
      +# met:
      +#
      +#     * Redistributions of source code must retain the above copyright
      +# notice, this list of conditions and the following disclaimer.
      +#     * Redistributions in binary form must reproduce the above
      +# copyright notice, this list of conditions and the following disclaimer
      +# in the documentation and/or other materials provided with the
      +# distribution.
      +#     * Neither the name of Google Inc. nor the names of its
      +# contributors may be used to endorse or promote products derived from
      +# this software without specific prior written permission.
      +#
      +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +all:
      +	@echo run make test
      +
      +include ../../Make.protobuf
      +
      +test:	golden testbuild
      +
      +#test:	golden testbuild extension_test
      +#	./extension_test
      +#	@echo PASS
      +
      +my_test/test.pb.go: my_test/test.proto
      +	protoc --go_out=Mmulti/multi1.proto=github.com/golang/protobuf/protoc-gen-go/testdata/multi:. $<
      +
      +golden:
      +	make -B my_test/test.pb.go
      +	sed -i '/return.*fileDescriptor/d' my_test/test.pb.go
      +	sed -i '/^var fileDescriptor/,/^}/d' my_test/test.pb.go
      +	gofmt -w my_test/test.pb.go
      +	diff -w my_test/test.pb.go my_test/test.pb.go.golden
      +
      +nuke:	clean
      +
      +testbuild:	regenerate
      +	go test
      +
      +regenerate:
      +	# Invoke protoc once to generate three independent .pb.go files in the same package.
      +	protoc --go_out=. multi/multi{1,2,3}.proto
      +
      +#extension_test:	extension_test.$O
      +#	$(LD) -L. -o $@ $<
      +
      +#multi.a: multi3.pb.$O multi2.pb.$O multi1.pb.$O
      +#	rm -f multi.a
      +#	$(QUOTED_GOBIN)/gopack grc $@ $<
      +
      +#test.pb.go:	imp.pb.go
      +#multi1.pb.go:	multi2.pb.go multi3.pb.go
      +#main.$O: imp.pb.$O test.pb.$O multi.a
      +#extension_test.$O: extension_base.pb.$O extension_extra.pb.$O extension_user.pb.$O
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto
      new file mode 100644
      index 00000000..94acfc1b
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto
      @@ -0,0 +1,46 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2010 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto2";
      +
      +package extension_base;
      +
      +message BaseMessage {
      +  optional int32 height = 1;
      +  extensions 4 to 9;
      +  extensions 16 to max;
      +}
      +
      +// Another message that may be extended, using message_set_wire_format.
      +message OldStyleMessage {
      +  option message_set_wire_format = true;
      +  extensions 100 to max;
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto
      new file mode 100644
      index 00000000..fca7f600
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto
      @@ -0,0 +1,38 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2011 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto2";
      +
      +package extension_extra;
      +
      +message ExtraMessage {
      +  optional int32 width = 1;
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go
      new file mode 100644
      index 00000000..86e9c118
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go
      @@ -0,0 +1,210 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2010 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +// Test that we can use protocol buffers that use extensions.
      +
      +package testdata
      +
      +/*
      +
      +import (
      +	"bytes"
      +	"regexp"
      +	"testing"
      +
      +	"github.com/golang/protobuf/proto"
      +	base "extension_base.pb"
      +	user "extension_user.pb"
      +)
      +
      +func TestSingleFieldExtension(t *testing.T) {
      +	bm := &base.BaseMessage{
      +		Height: proto.Int32(178),
      +	}
      +
      +	// Use extension within scope of another type.
      +	vol := proto.Uint32(11)
      +	err := proto.SetExtension(bm, user.E_LoudMessage_Volume, vol)
      +	if err != nil {
      +		t.Fatal("Failed setting extension:", err)
      +	}
      +	buf, err := proto.Marshal(bm)
      +	if err != nil {
      +		t.Fatal("Failed encoding message with extension:", err)
      +	}
      +	bm_new := new(base.BaseMessage)
      +	if err := proto.Unmarshal(buf, bm_new); err != nil {
      +		t.Fatal("Failed decoding message with extension:", err)
      +	}
      +	if !proto.HasExtension(bm_new, user.E_LoudMessage_Volume) {
      +		t.Fatal("Decoded message didn't contain extension.")
      +	}
      +	vol_out, err := proto.GetExtension(bm_new, user.E_LoudMessage_Volume)
      +	if err != nil {
      +		t.Fatal("Failed getting extension:", err)
      +	}
      +	if v := vol_out.(*uint32); *v != *vol {
      +		t.Errorf("vol_out = %v, expected %v", *v, *vol)
      +	}
      +	proto.ClearExtension(bm_new, user.E_LoudMessage_Volume)
      +	if proto.HasExtension(bm_new, user.E_LoudMessage_Volume) {
      +		t.Fatal("Failed clearing extension.")
      +	}
      +}
      +
      +func TestMessageExtension(t *testing.T) {
      +	bm := &base.BaseMessage{
      +		Height: proto.Int32(179),
      +	}
      +
      +	// Use extension that is itself a message.
      +	um := &user.UserMessage{
      +		Name: proto.String("Dave"),
      +		Rank: proto.String("Major"),
      +	}
      +	err := proto.SetExtension(bm, user.E_LoginMessage_UserMessage, um)
      +	if err != nil {
      +		t.Fatal("Failed setting extension:", err)
      +	}
      +	buf, err := proto.Marshal(bm)
      +	if err != nil {
      +		t.Fatal("Failed encoding message with extension:", err)
      +	}
      +	bm_new := new(base.BaseMessage)
      +	if err := proto.Unmarshal(buf, bm_new); err != nil {
      +		t.Fatal("Failed decoding message with extension:", err)
      +	}
      +	if !proto.HasExtension(bm_new, user.E_LoginMessage_UserMessage) {
      +		t.Fatal("Decoded message didn't contain extension.")
      +	}
      +	um_out, err := proto.GetExtension(bm_new, user.E_LoginMessage_UserMessage)
      +	if err != nil {
      +		t.Fatal("Failed getting extension:", err)
      +	}
      +	if n := um_out.(*user.UserMessage).Name; *n != *um.Name {
      +		t.Errorf("um_out.Name = %q, expected %q", *n, *um.Name)
      +	}
      +	if r := um_out.(*user.UserMessage).Rank; *r != *um.Rank {
      +		t.Errorf("um_out.Rank = %q, expected %q", *r, *um.Rank)
      +	}
      +	proto.ClearExtension(bm_new, user.E_LoginMessage_UserMessage)
      +	if proto.HasExtension(bm_new, user.E_LoginMessage_UserMessage) {
      +		t.Fatal("Failed clearing extension.")
      +	}
      +}
      +
      +func TestTopLevelExtension(t *testing.T) {
      +	bm := &base.BaseMessage{
      +		Height: proto.Int32(179),
      +	}
      +
      +	width := proto.Int32(17)
      +	err := proto.SetExtension(bm, user.E_Width, width)
      +	if err != nil {
      +		t.Fatal("Failed setting extension:", err)
      +	}
      +	buf, err := proto.Marshal(bm)
      +	if err != nil {
      +		t.Fatal("Failed encoding message with extension:", err)
      +	}
      +	bm_new := new(base.BaseMessage)
      +	if err := proto.Unmarshal(buf, bm_new); err != nil {
      +		t.Fatal("Failed decoding message with extension:", err)
      +	}
      +	if !proto.HasExtension(bm_new, user.E_Width) {
      +		t.Fatal("Decoded message didn't contain extension.")
      +	}
      +	width_out, err := proto.GetExtension(bm_new, user.E_Width)
      +	if err != nil {
      +		t.Fatal("Failed getting extension:", err)
      +	}
      +	if w := width_out.(*int32); *w != *width {
      +		t.Errorf("width_out = %v, expected %v", *w, *width)
      +	}
      +	proto.ClearExtension(bm_new, user.E_Width)
      +	if proto.HasExtension(bm_new, user.E_Width) {
      +		t.Fatal("Failed clearing extension.")
      +	}
      +}
      +
      +func TestMessageSetWireFormat(t *testing.T) {
      +	osm := new(base.OldStyleMessage)
      +	osp := &user.OldStyleParcel{
      +		Name:   proto.String("Dave"),
      +		Height: proto.Int32(178),
      +	}
      +
      +	err := proto.SetExtension(osm, user.E_OldStyleParcel_MessageSetExtension, osp)
      +	if err != nil {
      +		t.Fatal("Failed setting extension:", err)
      +	}
      +
      +	buf, err := proto.Marshal(osm)
      +	if err != nil {
      +		t.Fatal("Failed encoding message:", err)
      +	}
      +
      +	// Data generated from Python implementation.
      +	expected := []byte{
      +		11, 16, 209, 15, 26, 9, 10, 4, 68, 97, 118, 101, 16, 178, 1, 12,
      +	}
      +
      +	if !bytes.Equal(expected, buf) {
      +		t.Errorf("Encoding mismatch.\nwant %+v\n got %+v", expected, buf)
      +	}
      +
      +	// Check that it is restored correctly.
      +	osm = new(base.OldStyleMessage)
      +	if err := proto.Unmarshal(buf, osm); err != nil {
      +		t.Fatal("Failed decoding message:", err)
      +	}
      +	osp_out, err := proto.GetExtension(osm, user.E_OldStyleParcel_MessageSetExtension)
      +	if err != nil {
      +		t.Fatal("Failed getting extension:", err)
      +	}
      +	osp = osp_out.(*user.OldStyleParcel)
      +	if *osp.Name != "Dave" || *osp.Height != 178 {
      +		t.Errorf("Retrieved extension from decoded message is not correct: %+v", osp)
      +	}
      +}
      +
      +func main() {
      +	// simpler than rigging up gotest
      +	testing.Main(regexp.MatchString, []testing.InternalTest{
      +		{"TestSingleFieldExtension", TestSingleFieldExtension},
      +		{"TestMessageExtension", TestMessageExtension},
      +		{"TestTopLevelExtension", TestTopLevelExtension},
      +	},
      +		[]testing.InternalBenchmark{},
      +		[]testing.InternalExample{})
      +}
      +
      +*/
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto
      new file mode 100644
      index 00000000..ff65873d
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto
      @@ -0,0 +1,100 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2010 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto2";
      +
      +import "extension_base.proto";
      +import "extension_extra.proto";
      +
      +package extension_user;
      +
      +message UserMessage {
      +  optional string name = 1;
      +  optional string rank = 2;
      +}
      +
      +// Extend with a message
      +extend extension_base.BaseMessage {
      +  optional UserMessage user_message = 5;
      +}
      +
      +// Extend with a foreign message
      +extend extension_base.BaseMessage {
      +  optional extension_extra.ExtraMessage extra_message = 9;
      +}
      +
      +// Extend with some primitive types
      +extend extension_base.BaseMessage {
      +  optional int32 width = 6;
      +  optional int64 area = 7;
      +}
      +
      +// Extend inside the scope of another type
      +message LoudMessage {
      +  extend extension_base.BaseMessage {
      +    optional uint32 volume = 8;
      +  }
      +  extensions 100 to max;
      +}
      +
      +// Extend inside the scope of another type, using a message.
      +message LoginMessage {
      +  extend extension_base.BaseMessage {
      +    optional UserMessage user_message = 16;
      +  }
      +}
      +
      +// Extend with a repeated field
      +extend extension_base.BaseMessage {
      +  repeated Detail detail = 17;
      +}
      +
      +message Detail {
      +  optional string color = 1;
      +}
      +
      +// An extension of an extension
      +message Announcement {
      +  optional string words = 1;
      +  extend LoudMessage {
      +    optional Announcement loud_ext = 100;
      +  }
      +}
      +
      +// Something that can be put in a message set.
      +message OldStyleParcel {
      +  extend extension_base.OldStyleMessage {
      +    optional OldStyleParcel message_set_extension = 2001;
      +  }
      +
      +  required string name = 1;
      +  optional int32 height = 2;
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto
      new file mode 100644
      index 00000000..b8bc41ac
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto
      @@ -0,0 +1,59 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2015 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto3";
      +
      +package grpc.testing;
      +
      +message SimpleRequest {
      +}
      +
      +message SimpleResponse {
      +}
      +
      +message StreamMsg {
      +}
      +
      +message StreamMsg2 {
      +}
      +
      +service Test {
      +  rpc UnaryCall(SimpleRequest) returns (SimpleResponse);
      +
      +  // This RPC streams from the server only.
      +  rpc Downstream(SimpleRequest) returns (stream StreamMsg);
      +
      +  // This RPC streams from the client.
      +  rpc Upstream(stream StreamMsg) returns (SimpleResponse);
      +
      +  // This one streams in both directions.
      +  rpc Bidi(stream StreamMsg) returns (stream StreamMsg2);
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden
      new file mode 100644
      index 00000000..784a4f86
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden
      @@ -0,0 +1,113 @@
      +// Code generated by protoc-gen-go.
      +// source: imp.proto
      +// DO NOT EDIT!
      +
      +package imp
      +
      +import proto "github.com/golang/protobuf/proto"
      +import "math"
      +import "os"
      +import imp1 "imp2.pb"
      +
      +// Reference proto & math imports to suppress error if they are not otherwise used.
      +var _ = proto.GetString
      +var _ = math.Inf
      +
      +// Types from public import imp2.proto
      +type PubliclyImportedMessage imp1.PubliclyImportedMessage
      +
      +func (this *PubliclyImportedMessage) Reset() { (*imp1.PubliclyImportedMessage)(this).Reset() }
      +func (this *PubliclyImportedMessage) String() string {
      +	return (*imp1.PubliclyImportedMessage)(this).String()
      +}
      +
      +// PubliclyImportedMessage from public import imp.proto
      +
      +type ImportedMessage_Owner int32
      +
      +const (
      +	ImportedMessage_DAVE ImportedMessage_Owner = 1
      +	ImportedMessage_MIKE ImportedMessage_Owner = 2
      +)
      +
      +var ImportedMessage_Owner_name = map[int32]string{
      +	1: "DAVE",
      +	2: "MIKE",
      +}
      +var ImportedMessage_Owner_value = map[string]int32{
      +	"DAVE": 1,
      +	"MIKE": 2,
      +}
      +
      +// NewImportedMessage_Owner is deprecated. Use x.Enum() instead.
      +func NewImportedMessage_Owner(x ImportedMessage_Owner) *ImportedMessage_Owner {
      +	e := ImportedMessage_Owner(x)
      +	return &e
      +}
      +func (x ImportedMessage_Owner) Enum() *ImportedMessage_Owner {
      +	p := new(ImportedMessage_Owner)
      +	*p = x
      +	return p
      +}
      +func (x ImportedMessage_Owner) String() string {
      +	return proto.EnumName(ImportedMessage_Owner_name, int32(x))
      +}
      +
      +type ImportedMessage struct {
      +	Field            *int64           `protobuf:"varint,1,req,name=field" json:"field,omitempty"`
      +	XXX_extensions   map[int32][]byte `json:",omitempty"`
      +	XXX_unrecognized []byte           `json:",omitempty"`
      +}
      +
      +func (this *ImportedMessage) Reset()         { *this = ImportedMessage{} }
      +func (this *ImportedMessage) String() string { return proto.CompactTextString(this) }
      +
      +var extRange_ImportedMessage = []proto.ExtensionRange{
      +	proto.ExtensionRange{90, 100},
      +}
      +
      +func (*ImportedMessage) ExtensionRangeArray() []proto.ExtensionRange {
      +	return extRange_ImportedMessage
      +}
      +func (this *ImportedMessage) ExtensionMap() map[int32][]byte {
      +	if this.XXX_extensions == nil {
      +		this.XXX_extensions = make(map[int32][]byte)
      +	}
      +	return this.XXX_extensions
      +}
      +
      +type ImportedExtendable struct {
      +	XXX_extensions   map[int32][]byte `json:",omitempty"`
      +	XXX_unrecognized []byte           `json:",omitempty"`
      +}
      +
      +func (this *ImportedExtendable) Reset()         { *this = ImportedExtendable{} }
      +func (this *ImportedExtendable) String() string { return proto.CompactTextString(this) }
      +
      +func (this *ImportedExtendable) Marshal() ([]byte, error) {
      +	return proto.MarshalMessageSet(this.ExtensionMap())
      +}
      +func (this *ImportedExtendable) Unmarshal(buf []byte) error {
      +	return proto.UnmarshalMessageSet(buf, this.ExtensionMap())
      +}
      +// ensure ImportedExtendable satisfies proto.Marshaler and proto.Unmarshaler
      +var _ proto.Marshaler = (*ImportedExtendable)(nil)
      +var _ proto.Unmarshaler = (*ImportedExtendable)(nil)
      +
      +var extRange_ImportedExtendable = []proto.ExtensionRange{
      +	proto.ExtensionRange{100, 536870911},
      +}
      +
      +func (*ImportedExtendable) ExtensionRangeArray() []proto.ExtensionRange {
      +	return extRange_ImportedExtendable
      +}
      +func (this *ImportedExtendable) ExtensionMap() map[int32][]byte {
      +	if this.XXX_extensions == nil {
      +		this.XXX_extensions = make(map[int32][]byte)
      +	}
      +	return this.XXX_extensions
      +}
      +
      +func init() {
      +	proto.RegisterEnum("imp.ImportedMessage_Owner", ImportedMessage_Owner_name, ImportedMessage_Owner_value)
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto
      new file mode 100644
      index 00000000..156e078d
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto
      @@ -0,0 +1,70 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2010 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto2";
      +
      +package imp;
      +
      +import "imp2.proto";
      +import "imp3.proto";
      +
      +message ImportedMessage {
      +  required int64 field = 1;
      +
      +  // The forwarded getters for these fields are fiddly to get right.
      +  optional ImportedMessage2 local_msg = 2;
      +  optional ForeignImportedMessage foreign_msg = 3;  // in imp3.proto
      +  optional Owner enum_field = 4;
      +  oneof union {
      +    int32 state = 9;
      +  }
      +
      +  repeated string name = 5;
      +  repeated Owner boss = 6;
      +  repeated ImportedMessage2 memo = 7;
      +
      +  map<string, ImportedMessage2> msg_map = 8;
      +
      +  enum Owner {
      +    DAVE = 1;
      +    MIKE = 2;
      +  }
      +
      +  extensions 90 to 100;
      +}
      +
      +message ImportedMessage2 {
      +}
      +
      +message ImportedExtendable {
      +  option message_set_wire_format = true;
      +  extensions 100 to max;
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto
      new file mode 100644
      index 00000000..3bb0632b
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto
      @@ -0,0 +1,43 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2011 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto2";
      +
      +package imp;
      +
      +message PubliclyImportedMessage {
      +  optional int64 field = 1;
      +}
      +
      +enum PubliclyImportedEnum {
      +  GLASSES = 1;
      +  HAIR = 2;
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto
      new file mode 100644
      index 00000000..58fc7598
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto
      @@ -0,0 +1,38 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2012 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto2";
      +
      +package imp;
      +
      +message ForeignImportedMessage {
      +  optional string tuber = 1;
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go
      new file mode 100644
      index 00000000..f9b5ccf2
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go
      @@ -0,0 +1,46 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2010 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +// A simple binary to link together the protocol buffers in this test.
      +
      +package testdata
      +
      +import (
      +	"testing"
      +
      +	mytestpb "./my_test"
      +	multipb "github.com/golang/protobuf/protoc-gen-go/testdata/multi"
      +)
      +
      +func TestLink(t *testing.T) {
      +	_ = &multipb.Multi1{}
      +	_ = &mytestpb.Request{}
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto
      new file mode 100644
      index 00000000..0da6e0af
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto
      @@ -0,0 +1,44 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2010 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto2";
      +
      +import "multi/multi2.proto";
      +import "multi/multi3.proto";
      +
      +package multitest;
      +
      +message Multi1 {
      +  required Multi2 multi2 = 1;
      +  optional Multi2.Color color = 2;
      +  optional Multi3.HatType hat_type = 3;
      +}
      +
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto
      new file mode 100644
      index 00000000..e6bfc71b
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto
      @@ -0,0 +1,46 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2010 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto2";
      +
      +package multitest;
      +
      +message Multi2 {
      +  required int32 required_value = 1;
      +
      +  enum Color {
      +    BLUE = 1;
      +    GREEN = 2;
      +    RED = 3;
      +  };
      +  optional Color color = 2;
      +}
      +
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto
      new file mode 100644
      index 00000000..146c255b
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto
      @@ -0,0 +1,43 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2010 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto2";
      +
      +package multitest;
      +
      +message Multi3 {
      +  enum HatType {
      +    FEDORA = 1;
      +    FEZ = 2;
      +  };
      +  optional HatType hat_type = 1;
      +}
      +
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go
      new file mode 100644
      index 00000000..997743be
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go
      @@ -0,0 +1,882 @@
      +// Code generated by protoc-gen-go.
      +// source: my_test/test.proto
      +// DO NOT EDIT!
      +
      +/*
      +Package my_test is a generated protocol buffer package.
      +
      +This package holds interesting messages.
      +
      +It is generated from these files:
      +	my_test/test.proto
      +
      +It has these top-level messages:
      +	Request
      +	Reply
      +	OtherBase
      +	ReplyExtensions
      +	OtherReplyExtensions
      +	OldReply
      +	Communique
      +*/
      +package my_test
      +
      +import proto "github.com/golang/protobuf/proto"
      +import fmt "fmt"
      +import math "math"
      +import _ "github.com/golang/protobuf/protoc-gen-go/testdata/multi"
      +
      +// Reference imports to suppress errors if they are not otherwise used.
      +var _ = proto.Marshal
      +var _ = fmt.Errorf
      +var _ = math.Inf
      +
      +// This is a compile-time assertion to ensure that this generated file
      +// is compatible with the proto package it is being compiled against.
      +const _ = proto.ProtoPackageIsVersion1
      +
      +type HatType int32
      +
      +const (
      +	// deliberately skipping 0
      +	HatType_FEDORA HatType = 1
      +	HatType_FEZ    HatType = 2
      +)
      +
      +var HatType_name = map[int32]string{
      +	1: "FEDORA",
      +	2: "FEZ",
      +}
      +var HatType_value = map[string]int32{
      +	"FEDORA": 1,
      +	"FEZ":    2,
      +}
      +
      +func (x HatType) Enum() *HatType {
      +	p := new(HatType)
      +	*p = x
      +	return p
      +}
      +func (x HatType) String() string {
      +	return proto.EnumName(HatType_name, int32(x))
      +}
      +func (x *HatType) UnmarshalJSON(data []byte) error {
      +	value, err := proto.UnmarshalJSONEnum(HatType_value, data, "HatType")
      +	if err != nil {
      +		return err
      +	}
      +	*x = HatType(value)
      +	return nil
      +}
      +
      +// This enum represents days of the week.
      +type Days int32
      +
      +const (
      +	Days_MONDAY  Days = 1
      +	Days_TUESDAY Days = 2
      +	Days_LUNDI   Days = 1
      +)
      +
      +var Days_name = map[int32]string{
      +	1: "MONDAY",
      +	2: "TUESDAY",
      +	// Duplicate value: 1: "LUNDI",
      +}
      +var Days_value = map[string]int32{
      +	"MONDAY":  1,
      +	"TUESDAY": 2,
      +	"LUNDI":   1,
      +}
      +
      +func (x Days) Enum() *Days {
      +	p := new(Days)
      +	*p = x
      +	return p
      +}
      +func (x Days) String() string {
      +	return proto.EnumName(Days_name, int32(x))
      +}
      +func (x *Days) UnmarshalJSON(data []byte) error {
      +	value, err := proto.UnmarshalJSONEnum(Days_value, data, "Days")
      +	if err != nil {
      +		return err
      +	}
      +	*x = Days(value)
      +	return nil
      +}
      +
      +type Request_Color int32
      +
      +const (
      +	Request_RED   Request_Color = 0
      +	Request_GREEN Request_Color = 1
      +	Request_BLUE  Request_Color = 2
      +)
      +
      +var Request_Color_name = map[int32]string{
      +	0: "RED",
      +	1: "GREEN",
      +	2: "BLUE",
      +}
      +var Request_Color_value = map[string]int32{
      +	"RED":   0,
      +	"GREEN": 1,
      +	"BLUE":  2,
      +}
      +
      +func (x Request_Color) Enum() *Request_Color {
      +	p := new(Request_Color)
      +	*p = x
      +	return p
      +}
      +func (x Request_Color) String() string {
      +	return proto.EnumName(Request_Color_name, int32(x))
      +}
      +func (x *Request_Color) UnmarshalJSON(data []byte) error {
      +	value, err := proto.UnmarshalJSONEnum(Request_Color_value, data, "Request_Color")
      +	if err != nil {
      +		return err
      +	}
      +	*x = Request_Color(value)
      +	return nil
      +}
      +
      +type Reply_Entry_Game int32
      +
      +const (
      +	Reply_Entry_FOOTBALL Reply_Entry_Game = 1
      +	Reply_Entry_TENNIS   Reply_Entry_Game = 2
      +)
      +
      +var Reply_Entry_Game_name = map[int32]string{
      +	1: "FOOTBALL",
      +	2: "TENNIS",
      +}
      +var Reply_Entry_Game_value = map[string]int32{
      +	"FOOTBALL": 1,
      +	"TENNIS":   2,
      +}
      +
      +func (x Reply_Entry_Game) Enum() *Reply_Entry_Game {
      +	p := new(Reply_Entry_Game)
      +	*p = x
      +	return p
      +}
      +func (x Reply_Entry_Game) String() string {
      +	return proto.EnumName(Reply_Entry_Game_name, int32(x))
      +}
      +func (x *Reply_Entry_Game) UnmarshalJSON(data []byte) error {
      +	value, err := proto.UnmarshalJSONEnum(Reply_Entry_Game_value, data, "Reply_Entry_Game")
      +	if err != nil {
      +		return err
      +	}
      +	*x = Reply_Entry_Game(value)
      +	return nil
      +}
      +
      +// This is a message that might be sent somewhere.
      +type Request struct {
      +	Key []int64 `protobuf:"varint,1,rep,name=key" json:"key,omitempty"`
      +	//  optional imp.ImportedMessage imported_message = 2;
      +	Hue *Request_Color `protobuf:"varint,3,opt,name=hue,enum=my.test.Request_Color" json:"hue,omitempty"`
      +	Hat *HatType       `protobuf:"varint,4,opt,name=hat,enum=my.test.HatType,def=1" json:"hat,omitempty"`
      +	//  optional imp.ImportedMessage.Owner owner = 6;
      +	Deadline  *float32           `protobuf:"fixed32,7,opt,name=deadline,def=inf" json:"deadline,omitempty"`
      +	Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"`
      +	// This is a map field. It will generate map[int32]string.
      +	NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
      +	// This is a map field whose value type is a message.
      +	MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
      +	Reset_     *int32           `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"`
      +	// This field should not conflict with any getters.
      +	GetKey_          *string `protobuf:"bytes,16,opt,name=get_key,json=getKey" json:"get_key,omitempty"`
      +	XXX_unrecognized []byte  `json:"-"`
      +}
      +
      +func (m *Request) Reset()         { *m = Request{} }
      +func (m *Request) String() string { return proto.CompactTextString(m) }
      +func (*Request) ProtoMessage()    {}
      +
      +const Default_Request_Hat HatType = HatType_FEDORA
      +
      +var Default_Request_Deadline float32 = float32(math.Inf(1))
      +
      +func (m *Request) GetKey() []int64 {
      +	if m != nil {
      +		return m.Key
      +	}
      +	return nil
      +}
      +
      +func (m *Request) GetHue() Request_Color {
      +	if m != nil && m.Hue != nil {
      +		return *m.Hue
      +	}
      +	return Request_RED
      +}
      +
      +func (m *Request) GetHat() HatType {
      +	if m != nil && m.Hat != nil {
      +		return *m.Hat
      +	}
      +	return Default_Request_Hat
      +}
      +
      +func (m *Request) GetDeadline() float32 {
      +	if m != nil && m.Deadline != nil {
      +		return *m.Deadline
      +	}
      +	return Default_Request_Deadline
      +}
      +
      +func (m *Request) GetSomegroup() *Request_SomeGroup {
      +	if m != nil {
      +		return m.Somegroup
      +	}
      +	return nil
      +}
      +
      +func (m *Request) GetNameMapping() map[int32]string {
      +	if m != nil {
      +		return m.NameMapping
      +	}
      +	return nil
      +}
      +
      +func (m *Request) GetMsgMapping() map[int64]*Reply {
      +	if m != nil {
      +		return m.MsgMapping
      +	}
      +	return nil
      +}
      +
      +func (m *Request) GetReset_() int32 {
      +	if m != nil && m.Reset_ != nil {
      +		return *m.Reset_
      +	}
      +	return 0
      +}
      +
      +func (m *Request) GetGetKey_() string {
      +	if m != nil && m.GetKey_ != nil {
      +		return *m.GetKey_
      +	}
      +	return ""
      +}
      +
      +type Request_SomeGroup struct {
      +	GroupField       *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"`
      +	XXX_unrecognized []byte `json:"-"`
      +}
      +
      +func (m *Request_SomeGroup) Reset()         { *m = Request_SomeGroup{} }
      +func (m *Request_SomeGroup) String() string { return proto.CompactTextString(m) }
      +func (*Request_SomeGroup) ProtoMessage()    {}
      +
      +func (m *Request_SomeGroup) GetGroupField() int32 {
      +	if m != nil && m.GroupField != nil {
      +		return *m.GroupField
      +	}
      +	return 0
      +}
      +
      +type Reply struct {
      +	Found            []*Reply_Entry            `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"`
      +	CompactKeys      []int32                   `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"`
      +	XXX_extensions   map[int32]proto.Extension `json:"-"`
      +	XXX_unrecognized []byte                    `json:"-"`
      +}
      +
      +func (m *Reply) Reset()         { *m = Reply{} }
      +func (m *Reply) String() string { return proto.CompactTextString(m) }
      +func (*Reply) ProtoMessage()    {}
      +
      +var extRange_Reply = []proto.ExtensionRange{
      +	{100, 536870911},
      +}
      +
      +func (*Reply) ExtensionRangeArray() []proto.ExtensionRange {
      +	return extRange_Reply
      +}
      +func (m *Reply) ExtensionMap() map[int32]proto.Extension {
      +	if m.XXX_extensions == nil {
      +		m.XXX_extensions = make(map[int32]proto.Extension)
      +	}
      +	return m.XXX_extensions
      +}
      +
      +func (m *Reply) GetFound() []*Reply_Entry {
      +	if m != nil {
      +		return m.Found
      +	}
      +	return nil
      +}
      +
      +func (m *Reply) GetCompactKeys() []int32 {
      +	if m != nil {
      +		return m.CompactKeys
      +	}
      +	return nil
      +}
      +
      +type Reply_Entry struct {
      +	KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"`
      +	Value                         *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"`
      +	XMyFieldName_2                *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=myFieldName2" json:"_my_field_name_2,omitempty"`
      +	XXX_unrecognized              []byte `json:"-"`
      +}
      +
      +func (m *Reply_Entry) Reset()         { *m = Reply_Entry{} }
      +func (m *Reply_Entry) String() string { return proto.CompactTextString(m) }
      +func (*Reply_Entry) ProtoMessage()    {}
      +
      +const Default_Reply_Entry_Value int64 = 7
      +
      +func (m *Reply_Entry) GetKeyThatNeeds_1234Camel_CasIng() int64 {
      +	if m != nil && m.KeyThatNeeds_1234Camel_CasIng != nil {
      +		return *m.KeyThatNeeds_1234Camel_CasIng
      +	}
      +	return 0
      +}
      +
      +func (m *Reply_Entry) GetValue() int64 {
      +	if m != nil && m.Value != nil {
      +		return *m.Value
      +	}
      +	return Default_Reply_Entry_Value
      +}
      +
      +func (m *Reply_Entry) GetXMyFieldName_2() int64 {
      +	if m != nil && m.XMyFieldName_2 != nil {
      +		return *m.XMyFieldName_2
      +	}
      +	return 0
      +}
      +
      +type OtherBase struct {
      +	Name             *string                   `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
      +	XXX_extensions   map[int32]proto.Extension `json:"-"`
      +	XXX_unrecognized []byte                    `json:"-"`
      +}
      +
      +func (m *OtherBase) Reset()         { *m = OtherBase{} }
      +func (m *OtherBase) String() string { return proto.CompactTextString(m) }
      +func (*OtherBase) ProtoMessage()    {}
      +
      +var extRange_OtherBase = []proto.ExtensionRange{
      +	{100, 536870911},
      +}
      +
      +func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange {
      +	return extRange_OtherBase
      +}
      +func (m *OtherBase) ExtensionMap() map[int32]proto.Extension {
      +	if m.XXX_extensions == nil {
      +		m.XXX_extensions = make(map[int32]proto.Extension)
      +	}
      +	return m.XXX_extensions
      +}
      +
      +func (m *OtherBase) GetName() string {
      +	if m != nil && m.Name != nil {
      +		return *m.Name
      +	}
      +	return ""
      +}
      +
      +type ReplyExtensions struct {
      +	XXX_unrecognized []byte `json:"-"`
      +}
      +
      +func (m *ReplyExtensions) Reset()         { *m = ReplyExtensions{} }
      +func (m *ReplyExtensions) String() string { return proto.CompactTextString(m) }
      +func (*ReplyExtensions) ProtoMessage()    {}
      +
      +var E_ReplyExtensions_Time = &proto.ExtensionDesc{
      +	ExtendedType:  (*Reply)(nil),
      +	ExtensionType: (*float64)(nil),
      +	Field:         101,
      +	Name:          "my.test.ReplyExtensions.time",
      +	Tag:           "fixed64,101,opt,name=time",
      +}
      +
      +var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{
      +	ExtendedType:  (*Reply)(nil),
      +	ExtensionType: (*ReplyExtensions)(nil),
      +	Field:         105,
      +	Name:          "my.test.ReplyExtensions.carrot",
      +	Tag:           "bytes,105,opt,name=carrot",
      +}
      +
      +var E_ReplyExtensions_Donut = &proto.ExtensionDesc{
      +	ExtendedType:  (*OtherBase)(nil),
      +	ExtensionType: (*ReplyExtensions)(nil),
      +	Field:         101,
      +	Name:          "my.test.ReplyExtensions.donut",
      +	Tag:           "bytes,101,opt,name=donut",
      +}
      +
      +type OtherReplyExtensions struct {
      +	Key              *int32 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"`
      +	XXX_unrecognized []byte `json:"-"`
      +}
      +
      +func (m *OtherReplyExtensions) Reset()         { *m = OtherReplyExtensions{} }
      +func (m *OtherReplyExtensions) String() string { return proto.CompactTextString(m) }
      +func (*OtherReplyExtensions) ProtoMessage()    {}
      +
      +func (m *OtherReplyExtensions) GetKey() int32 {
      +	if m != nil && m.Key != nil {
      +		return *m.Key
      +	}
      +	return 0
      +}
      +
      +type OldReply struct {
      +	XXX_extensions   map[int32]proto.Extension `json:"-"`
      +	XXX_unrecognized []byte                    `json:"-"`
      +}
      +
      +func (m *OldReply) Reset()         { *m = OldReply{} }
      +func (m *OldReply) String() string { return proto.CompactTextString(m) }
      +func (*OldReply) ProtoMessage()    {}
      +
      +func (m *OldReply) Marshal() ([]byte, error) {
      +	return proto.MarshalMessageSet(m.ExtensionMap())
      +}
      +func (m *OldReply) Unmarshal(buf []byte) error {
      +	return proto.UnmarshalMessageSet(buf, m.ExtensionMap())
      +}
      +func (m *OldReply) MarshalJSON() ([]byte, error) {
      +	return proto.MarshalMessageSetJSON(m.XXX_extensions)
      +}
      +func (m *OldReply) UnmarshalJSON(buf []byte) error {
      +	return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions)
      +}
      +
      +// ensure OldReply satisfies proto.Marshaler and proto.Unmarshaler
      +var _ proto.Marshaler = (*OldReply)(nil)
      +var _ proto.Unmarshaler = (*OldReply)(nil)
      +
      +var extRange_OldReply = []proto.ExtensionRange{
      +	{100, 2147483646},
      +}
      +
      +func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange {
      +	return extRange_OldReply
      +}
      +func (m *OldReply) ExtensionMap() map[int32]proto.Extension {
      +	if m.XXX_extensions == nil {
      +		m.XXX_extensions = make(map[int32]proto.Extension)
      +	}
      +	return m.XXX_extensions
      +}
      +
      +type Communique struct {
      +	MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"`
      +	// This is a oneof, called "union".
      +	//
      +	// Types that are valid to be assigned to Union:
      +	//	*Communique_Number
      +	//	*Communique_Name
      +	//	*Communique_Data
      +	//	*Communique_TempC
      +	//	*Communique_Height
      +	//	*Communique_Today
      +	//	*Communique_Maybe
      +	//	*Communique_Delta_
      +	//	*Communique_Msg
      +	//	*Communique_Somegroup
      +	Union            isCommunique_Union `protobuf_oneof:"union"`
      +	XXX_unrecognized []byte             `json:"-"`
      +}
      +
      +func (m *Communique) Reset()         { *m = Communique{} }
      +func (m *Communique) String() string { return proto.CompactTextString(m) }
      +func (*Communique) ProtoMessage()    {}
      +
      +type isCommunique_Union interface {
      +	isCommunique_Union()
      +}
      +
      +type Communique_Number struct {
      +	Number int32 `protobuf:"varint,5,opt,name=number,oneof"`
      +}
      +type Communique_Name struct {
      +	Name string `protobuf:"bytes,6,opt,name=name,oneof"`
      +}
      +type Communique_Data struct {
      +	Data []byte `protobuf:"bytes,7,opt,name=data,oneof"`
      +}
      +type Communique_TempC struct {
      +	TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"`
      +}
      +type Communique_Height struct {
      +	Height float32 `protobuf:"fixed32,9,opt,name=height,oneof"`
      +}
      +type Communique_Today struct {
      +	Today Days `protobuf:"varint,10,opt,name=today,enum=my.test.Days,oneof"`
      +}
      +type Communique_Maybe struct {
      +	Maybe bool `protobuf:"varint,11,opt,name=maybe,oneof"`
      +}
      +type Communique_Delta_ struct {
      +	Delta int32 `protobuf:"zigzag32,12,opt,name=delta,oneof"`
      +}
      +type Communique_Msg struct {
      +	Msg *Reply `protobuf:"bytes,13,opt,name=msg,oneof"`
      +}
      +type Communique_Somegroup struct {
      +	Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,json=somegroup,oneof"`
      +}
      +
      +func (*Communique_Number) isCommunique_Union()    {}
      +func (*Communique_Name) isCommunique_Union()      {}
      +func (*Communique_Data) isCommunique_Union()      {}
      +func (*Communique_TempC) isCommunique_Union()     {}
      +func (*Communique_Height) isCommunique_Union()    {}
      +func (*Communique_Today) isCommunique_Union()     {}
      +func (*Communique_Maybe) isCommunique_Union()     {}
      +func (*Communique_Delta_) isCommunique_Union()    {}
      +func (*Communique_Msg) isCommunique_Union()       {}
      +func (*Communique_Somegroup) isCommunique_Union() {}
      +
      +func (m *Communique) GetUnion() isCommunique_Union {
      +	if m != nil {
      +		return m.Union
      +	}
      +	return nil
      +}
      +
      +func (m *Communique) GetMakeMeCry() bool {
      +	if m != nil && m.MakeMeCry != nil {
      +		return *m.MakeMeCry
      +	}
      +	return false
      +}
      +
      +func (m *Communique) GetNumber() int32 {
      +	if x, ok := m.GetUnion().(*Communique_Number); ok {
      +		return x.Number
      +	}
      +	return 0
      +}
      +
      +func (m *Communique) GetName() string {
      +	if x, ok := m.GetUnion().(*Communique_Name); ok {
      +		return x.Name
      +	}
      +	return ""
      +}
      +
      +func (m *Communique) GetData() []byte {
      +	if x, ok := m.GetUnion().(*Communique_Data); ok {
      +		return x.Data
      +	}
      +	return nil
      +}
      +
      +func (m *Communique) GetTempC() float64 {
      +	if x, ok := m.GetUnion().(*Communique_TempC); ok {
      +		return x.TempC
      +	}
      +	return 0
      +}
      +
      +func (m *Communique) GetHeight() float32 {
      +	if x, ok := m.GetUnion().(*Communique_Height); ok {
      +		return x.Height
      +	}
      +	return 0
      +}
      +
      +func (m *Communique) GetToday() Days {
      +	if x, ok := m.GetUnion().(*Communique_Today); ok {
      +		return x.Today
      +	}
      +	return Days_MONDAY
      +}
      +
      +func (m *Communique) GetMaybe() bool {
      +	if x, ok := m.GetUnion().(*Communique_Maybe); ok {
      +		return x.Maybe
      +	}
      +	return false
      +}
      +
      +func (m *Communique) GetDelta() int32 {
      +	if x, ok := m.GetUnion().(*Communique_Delta_); ok {
      +		return x.Delta
      +	}
      +	return 0
      +}
      +
      +func (m *Communique) GetMsg() *Reply {
      +	if x, ok := m.GetUnion().(*Communique_Msg); ok {
      +		return x.Msg
      +	}
      +	return nil
      +}
      +
      +func (m *Communique) GetSomegroup() *Communique_SomeGroup {
      +	if x, ok := m.GetUnion().(*Communique_Somegroup); ok {
      +		return x.Somegroup
      +	}
      +	return nil
      +}
      +
      +// XXX_OneofFuncs is for the internal use of the proto package.
      +func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
      +	return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{
      +		(*Communique_Number)(nil),
      +		(*Communique_Name)(nil),
      +		(*Communique_Data)(nil),
      +		(*Communique_TempC)(nil),
      +		(*Communique_Height)(nil),
      +		(*Communique_Today)(nil),
      +		(*Communique_Maybe)(nil),
      +		(*Communique_Delta_)(nil),
      +		(*Communique_Msg)(nil),
      +		(*Communique_Somegroup)(nil),
      +	}
      +}
      +
      +func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
      +	m := msg.(*Communique)
      +	// union
      +	switch x := m.Union.(type) {
      +	case *Communique_Number:
      +		b.EncodeVarint(5<<3 | proto.WireVarint)
      +		b.EncodeVarint(uint64(x.Number))
      +	case *Communique_Name:
      +		b.EncodeVarint(6<<3 | proto.WireBytes)
      +		b.EncodeStringBytes(x.Name)
      +	case *Communique_Data:
      +		b.EncodeVarint(7<<3 | proto.WireBytes)
      +		b.EncodeRawBytes(x.Data)
      +	case *Communique_TempC:
      +		b.EncodeVarint(8<<3 | proto.WireFixed64)
      +		b.EncodeFixed64(math.Float64bits(x.TempC))
      +	case *Communique_Height:
      +		b.EncodeVarint(9<<3 | proto.WireFixed32)
      +		b.EncodeFixed32(uint64(math.Float32bits(x.Height)))
      +	case *Communique_Today:
      +		b.EncodeVarint(10<<3 | proto.WireVarint)
      +		b.EncodeVarint(uint64(x.Today))
      +	case *Communique_Maybe:
      +		t := uint64(0)
      +		if x.Maybe {
      +			t = 1
      +		}
      +		b.EncodeVarint(11<<3 | proto.WireVarint)
      +		b.EncodeVarint(t)
      +	case *Communique_Delta_:
      +		b.EncodeVarint(12<<3 | proto.WireVarint)
      +		b.EncodeZigzag32(uint64(x.Delta))
      +	case *Communique_Msg:
      +		b.EncodeVarint(13<<3 | proto.WireBytes)
      +		if err := b.EncodeMessage(x.Msg); err != nil {
      +			return err
      +		}
      +	case *Communique_Somegroup:
      +		b.EncodeVarint(14<<3 | proto.WireStartGroup)
      +		if err := b.Marshal(x.Somegroup); err != nil {
      +			return err
      +		}
      +		b.EncodeVarint(14<<3 | proto.WireEndGroup)
      +	case nil:
      +	default:
      +		return fmt.Errorf("Communique.Union has unexpected type %T", x)
      +	}
      +	return nil
      +}
      +
      +func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
      +	m := msg.(*Communique)
      +	switch tag {
      +	case 5: // union.number
      +		if wire != proto.WireVarint {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeVarint()
      +		m.Union = &Communique_Number{int32(x)}
      +		return true, err
      +	case 6: // union.name
      +		if wire != proto.WireBytes {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeStringBytes()
      +		m.Union = &Communique_Name{x}
      +		return true, err
      +	case 7: // union.data
      +		if wire != proto.WireBytes {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeRawBytes(true)
      +		m.Union = &Communique_Data{x}
      +		return true, err
      +	case 8: // union.temp_c
      +		if wire != proto.WireFixed64 {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeFixed64()
      +		m.Union = &Communique_TempC{math.Float64frombits(x)}
      +		return true, err
      +	case 9: // union.height
      +		if wire != proto.WireFixed32 {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeFixed32()
      +		m.Union = &Communique_Height{math.Float32frombits(uint32(x))}
      +		return true, err
      +	case 10: // union.today
      +		if wire != proto.WireVarint {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeVarint()
      +		m.Union = &Communique_Today{Days(x)}
      +		return true, err
      +	case 11: // union.maybe
      +		if wire != proto.WireVarint {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeVarint()
      +		m.Union = &Communique_Maybe{x != 0}
      +		return true, err
      +	case 12: // union.delta
      +		if wire != proto.WireVarint {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeZigzag32()
      +		m.Union = &Communique_Delta_{int32(x)}
      +		return true, err
      +	case 13: // union.msg
      +		if wire != proto.WireBytes {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		msg := new(Reply)
      +		err := b.DecodeMessage(msg)
      +		m.Union = &Communique_Msg{msg}
      +		return true, err
      +	case 14: // union.somegroup
      +		if wire != proto.WireStartGroup {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		msg := new(Communique_SomeGroup)
      +		err := b.DecodeGroup(msg)
      +		m.Union = &Communique_Somegroup{msg}
      +		return true, err
      +	default:
      +		return false, nil
      +	}
      +}
      +
      +func _Communique_OneofSizer(msg proto.Message) (n int) {
      +	m := msg.(*Communique)
      +	// union
      +	switch x := m.Union.(type) {
      +	case *Communique_Number:
      +		n += proto.SizeVarint(5<<3 | proto.WireVarint)
      +		n += proto.SizeVarint(uint64(x.Number))
      +	case *Communique_Name:
      +		n += proto.SizeVarint(6<<3 | proto.WireBytes)
      +		n += proto.SizeVarint(uint64(len(x.Name)))
      +		n += len(x.Name)
      +	case *Communique_Data:
      +		n += proto.SizeVarint(7<<3 | proto.WireBytes)
      +		n += proto.SizeVarint(uint64(len(x.Data)))
      +		n += len(x.Data)
      +	case *Communique_TempC:
      +		n += proto.SizeVarint(8<<3 | proto.WireFixed64)
      +		n += 8
      +	case *Communique_Height:
      +		n += proto.SizeVarint(9<<3 | proto.WireFixed32)
      +		n += 4
      +	case *Communique_Today:
      +		n += proto.SizeVarint(10<<3 | proto.WireVarint)
      +		n += proto.SizeVarint(uint64(x.Today))
      +	case *Communique_Maybe:
      +		n += proto.SizeVarint(11<<3 | proto.WireVarint)
      +		n += 1
      +	case *Communique_Delta_:
      +		n += proto.SizeVarint(12<<3 | proto.WireVarint)
      +		n += proto.SizeVarint(uint64((uint32(x.Delta) << 1) ^ uint32((int32(x.Delta) >> 31))))
      +	case *Communique_Msg:
      +		s := proto.Size(x.Msg)
      +		n += proto.SizeVarint(13<<3 | proto.WireBytes)
      +		n += proto.SizeVarint(uint64(s))
      +		n += s
      +	case *Communique_Somegroup:
      +		n += proto.SizeVarint(14<<3 | proto.WireStartGroup)
      +		n += proto.Size(x.Somegroup)
      +		n += proto.SizeVarint(14<<3 | proto.WireEndGroup)
      +	case nil:
      +	default:
      +		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
      +	}
      +	return n
      +}
      +
      +type Communique_SomeGroup struct {
      +	Member           *string `protobuf:"bytes,15,opt,name=member" json:"member,omitempty"`
      +	XXX_unrecognized []byte  `json:"-"`
      +}
      +
      +func (m *Communique_SomeGroup) Reset()         { *m = Communique_SomeGroup{} }
      +func (m *Communique_SomeGroup) String() string { return proto.CompactTextString(m) }
      +func (*Communique_SomeGroup) ProtoMessage()    {}
      +
      +func (m *Communique_SomeGroup) GetMember() string {
      +	if m != nil && m.Member != nil {
      +		return *m.Member
      +	}
      +	return ""
      +}
      +
      +type Communique_Delta struct {
      +	XXX_unrecognized []byte `json:"-"`
      +}
      +
      +func (m *Communique_Delta) Reset()         { *m = Communique_Delta{} }
      +func (m *Communique_Delta) String() string { return proto.CompactTextString(m) }
      +func (*Communique_Delta) ProtoMessage()    {}
      +
      +var E_Tag = &proto.ExtensionDesc{
      +	ExtendedType:  (*Reply)(nil),
      +	ExtensionType: (*string)(nil),
      +	Field:         103,
      +	Name:          "my.test.tag",
      +	Tag:           "bytes,103,opt,name=tag",
      +}
      +
      +var E_Donut = &proto.ExtensionDesc{
      +	ExtendedType:  (*Reply)(nil),
      +	ExtensionType: (*OtherReplyExtensions)(nil),
      +	Field:         106,
      +	Name:          "my.test.donut",
      +	Tag:           "bytes,106,opt,name=donut",
      +}
      +
      +func init() {
      +	proto.RegisterType((*Request)(nil), "my.test.Request")
      +	proto.RegisterType((*Request_SomeGroup)(nil), "my.test.Request.SomeGroup")
      +	proto.RegisterType((*Reply)(nil), "my.test.Reply")
      +	proto.RegisterType((*Reply_Entry)(nil), "my.test.Reply.Entry")
      +	proto.RegisterType((*OtherBase)(nil), "my.test.OtherBase")
      +	proto.RegisterType((*ReplyExtensions)(nil), "my.test.ReplyExtensions")
      +	proto.RegisterType((*OtherReplyExtensions)(nil), "my.test.OtherReplyExtensions")
      +	proto.RegisterType((*OldReply)(nil), "my.test.OldReply")
      +	proto.RegisterType((*Communique)(nil), "my.test.Communique")
      +	proto.RegisterType((*Communique_SomeGroup)(nil), "my.test.Communique.SomeGroup")
      +	proto.RegisterType((*Communique_Delta)(nil), "my.test.Communique.Delta")
      +	proto.RegisterEnum("my.test.HatType", HatType_name, HatType_value)
      +	proto.RegisterEnum("my.test.Days", Days_name, Days_value)
      +	proto.RegisterEnum("my.test.Request_Color", Request_Color_name, Request_Color_value)
      +	proto.RegisterEnum("my.test.Reply_Entry_Game", Reply_Entry_Game_name, Reply_Entry_Game_value)
      +	proto.RegisterExtension(E_ReplyExtensions_Time)
      +	proto.RegisterExtension(E_ReplyExtensions_Carrot)
      +	proto.RegisterExtension(E_ReplyExtensions_Donut)
      +	proto.RegisterExtension(E_Tag)
      +	proto.RegisterExtension(E_Donut)
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden
      new file mode 100644
      index 00000000..997743be
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden
      @@ -0,0 +1,882 @@
      +// Code generated by protoc-gen-go.
      +// source: my_test/test.proto
      +// DO NOT EDIT!
      +
      +/*
      +Package my_test is a generated protocol buffer package.
      +
      +This package holds interesting messages.
      +
      +It is generated from these files:
      +	my_test/test.proto
      +
      +It has these top-level messages:
      +	Request
      +	Reply
      +	OtherBase
      +	ReplyExtensions
      +	OtherReplyExtensions
      +	OldReply
      +	Communique
      +*/
      +package my_test
      +
      +import proto "github.com/golang/protobuf/proto"
      +import fmt "fmt"
      +import math "math"
      +import _ "github.com/golang/protobuf/protoc-gen-go/testdata/multi"
      +
      +// Reference imports to suppress errors if they are not otherwise used.
      +var _ = proto.Marshal
      +var _ = fmt.Errorf
      +var _ = math.Inf
      +
      +// This is a compile-time assertion to ensure that this generated file
      +// is compatible with the proto package it is being compiled against.
      +const _ = proto.ProtoPackageIsVersion1
      +
      +type HatType int32
      +
      +const (
      +	// deliberately skipping 0
      +	HatType_FEDORA HatType = 1
      +	HatType_FEZ    HatType = 2
      +)
      +
      +var HatType_name = map[int32]string{
      +	1: "FEDORA",
      +	2: "FEZ",
      +}
      +var HatType_value = map[string]int32{
      +	"FEDORA": 1,
      +	"FEZ":    2,
      +}
      +
      +func (x HatType) Enum() *HatType {
      +	p := new(HatType)
      +	*p = x
      +	return p
      +}
      +func (x HatType) String() string {
      +	return proto.EnumName(HatType_name, int32(x))
      +}
      +func (x *HatType) UnmarshalJSON(data []byte) error {
      +	value, err := proto.UnmarshalJSONEnum(HatType_value, data, "HatType")
      +	if err != nil {
      +		return err
      +	}
      +	*x = HatType(value)
      +	return nil
      +}
      +
      +// This enum represents days of the week.
      +type Days int32
      +
      +const (
      +	Days_MONDAY  Days = 1
      +	Days_TUESDAY Days = 2
      +	Days_LUNDI   Days = 1
      +)
      +
      +var Days_name = map[int32]string{
      +	1: "MONDAY",
      +	2: "TUESDAY",
      +	// Duplicate value: 1: "LUNDI",
      +}
      +var Days_value = map[string]int32{
      +	"MONDAY":  1,
      +	"TUESDAY": 2,
      +	"LUNDI":   1,
      +}
      +
      +func (x Days) Enum() *Days {
      +	p := new(Days)
      +	*p = x
      +	return p
      +}
      +func (x Days) String() string {
      +	return proto.EnumName(Days_name, int32(x))
      +}
      +func (x *Days) UnmarshalJSON(data []byte) error {
      +	value, err := proto.UnmarshalJSONEnum(Days_value, data, "Days")
      +	if err != nil {
      +		return err
      +	}
      +	*x = Days(value)
      +	return nil
      +}
      +
      +type Request_Color int32
      +
      +const (
      +	Request_RED   Request_Color = 0
      +	Request_GREEN Request_Color = 1
      +	Request_BLUE  Request_Color = 2
      +)
      +
      +var Request_Color_name = map[int32]string{
      +	0: "RED",
      +	1: "GREEN",
      +	2: "BLUE",
      +}
      +var Request_Color_value = map[string]int32{
      +	"RED":   0,
      +	"GREEN": 1,
      +	"BLUE":  2,
      +}
      +
      +func (x Request_Color) Enum() *Request_Color {
      +	p := new(Request_Color)
      +	*p = x
      +	return p
      +}
      +func (x Request_Color) String() string {
      +	return proto.EnumName(Request_Color_name, int32(x))
      +}
      +func (x *Request_Color) UnmarshalJSON(data []byte) error {
      +	value, err := proto.UnmarshalJSONEnum(Request_Color_value, data, "Request_Color")
      +	if err != nil {
      +		return err
      +	}
      +	*x = Request_Color(value)
      +	return nil
      +}
      +
      +type Reply_Entry_Game int32
      +
      +const (
      +	Reply_Entry_FOOTBALL Reply_Entry_Game = 1
      +	Reply_Entry_TENNIS   Reply_Entry_Game = 2
      +)
      +
      +var Reply_Entry_Game_name = map[int32]string{
      +	1: "FOOTBALL",
      +	2: "TENNIS",
      +}
      +var Reply_Entry_Game_value = map[string]int32{
      +	"FOOTBALL": 1,
      +	"TENNIS":   2,
      +}
      +
      +func (x Reply_Entry_Game) Enum() *Reply_Entry_Game {
      +	p := new(Reply_Entry_Game)
      +	*p = x
      +	return p
      +}
      +func (x Reply_Entry_Game) String() string {
      +	return proto.EnumName(Reply_Entry_Game_name, int32(x))
      +}
      +func (x *Reply_Entry_Game) UnmarshalJSON(data []byte) error {
      +	value, err := proto.UnmarshalJSONEnum(Reply_Entry_Game_value, data, "Reply_Entry_Game")
      +	if err != nil {
      +		return err
      +	}
      +	*x = Reply_Entry_Game(value)
      +	return nil
      +}
      +
      +// This is a message that might be sent somewhere.
      +type Request struct {
      +	Key []int64 `protobuf:"varint,1,rep,name=key" json:"key,omitempty"`
      +	//  optional imp.ImportedMessage imported_message = 2;
      +	Hue *Request_Color `protobuf:"varint,3,opt,name=hue,enum=my.test.Request_Color" json:"hue,omitempty"`
      +	Hat *HatType       `protobuf:"varint,4,opt,name=hat,enum=my.test.HatType,def=1" json:"hat,omitempty"`
      +	//  optional imp.ImportedMessage.Owner owner = 6;
      +	Deadline  *float32           `protobuf:"fixed32,7,opt,name=deadline,def=inf" json:"deadline,omitempty"`
      +	Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"`
      +	// This is a map field. It will generate map[int32]string.
      +	NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
      +	// This is a map field whose value type is a message.
      +	MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
      +	Reset_     *int32           `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"`
      +	// This field should not conflict with any getters.
      +	GetKey_          *string `protobuf:"bytes,16,opt,name=get_key,json=getKey" json:"get_key,omitempty"`
      +	XXX_unrecognized []byte  `json:"-"`
      +}
      +
      +func (m *Request) Reset()         { *m = Request{} }
      +func (m *Request) String() string { return proto.CompactTextString(m) }
      +func (*Request) ProtoMessage()    {}
      +
      +const Default_Request_Hat HatType = HatType_FEDORA
      +
      +var Default_Request_Deadline float32 = float32(math.Inf(1))
      +
      +func (m *Request) GetKey() []int64 {
      +	if m != nil {
      +		return m.Key
      +	}
      +	return nil
      +}
      +
      +func (m *Request) GetHue() Request_Color {
      +	if m != nil && m.Hue != nil {
      +		return *m.Hue
      +	}
      +	return Request_RED
      +}
      +
      +func (m *Request) GetHat() HatType {
      +	if m != nil && m.Hat != nil {
      +		return *m.Hat
      +	}
      +	return Default_Request_Hat
      +}
      +
      +func (m *Request) GetDeadline() float32 {
      +	if m != nil && m.Deadline != nil {
      +		return *m.Deadline
      +	}
      +	return Default_Request_Deadline
      +}
      +
      +func (m *Request) GetSomegroup() *Request_SomeGroup {
      +	if m != nil {
      +		return m.Somegroup
      +	}
      +	return nil
      +}
      +
      +func (m *Request) GetNameMapping() map[int32]string {
      +	if m != nil {
      +		return m.NameMapping
      +	}
      +	return nil
      +}
      +
      +func (m *Request) GetMsgMapping() map[int64]*Reply {
      +	if m != nil {
      +		return m.MsgMapping
      +	}
      +	return nil
      +}
      +
      +func (m *Request) GetReset_() int32 {
      +	if m != nil && m.Reset_ != nil {
      +		return *m.Reset_
      +	}
      +	return 0
      +}
      +
      +func (m *Request) GetGetKey_() string {
      +	if m != nil && m.GetKey_ != nil {
      +		return *m.GetKey_
      +	}
      +	return ""
      +}
      +
      +type Request_SomeGroup struct {
      +	GroupField       *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"`
      +	XXX_unrecognized []byte `json:"-"`
      +}
      +
      +func (m *Request_SomeGroup) Reset()         { *m = Request_SomeGroup{} }
      +func (m *Request_SomeGroup) String() string { return proto.CompactTextString(m) }
      +func (*Request_SomeGroup) ProtoMessage()    {}
      +
      +func (m *Request_SomeGroup) GetGroupField() int32 {
      +	if m != nil && m.GroupField != nil {
      +		return *m.GroupField
      +	}
      +	return 0
      +}
      +
      +type Reply struct {
      +	Found            []*Reply_Entry            `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"`
      +	CompactKeys      []int32                   `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"`
      +	XXX_extensions   map[int32]proto.Extension `json:"-"`
      +	XXX_unrecognized []byte                    `json:"-"`
      +}
      +
      +func (m *Reply) Reset()         { *m = Reply{} }
      +func (m *Reply) String() string { return proto.CompactTextString(m) }
      +func (*Reply) ProtoMessage()    {}
      +
      +var extRange_Reply = []proto.ExtensionRange{
      +	{100, 536870911},
      +}
      +
      +func (*Reply) ExtensionRangeArray() []proto.ExtensionRange {
      +	return extRange_Reply
      +}
      +func (m *Reply) ExtensionMap() map[int32]proto.Extension {
      +	if m.XXX_extensions == nil {
      +		m.XXX_extensions = make(map[int32]proto.Extension)
      +	}
      +	return m.XXX_extensions
      +}
      +
      +func (m *Reply) GetFound() []*Reply_Entry {
      +	if m != nil {
      +		return m.Found
      +	}
      +	return nil
      +}
      +
      +func (m *Reply) GetCompactKeys() []int32 {
      +	if m != nil {
      +		return m.CompactKeys
      +	}
      +	return nil
      +}
      +
      +type Reply_Entry struct {
      +	KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"`
      +	Value                         *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"`
      +	XMyFieldName_2                *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=myFieldName2" json:"_my_field_name_2,omitempty"`
      +	XXX_unrecognized              []byte `json:"-"`
      +}
      +
      +func (m *Reply_Entry) Reset()         { *m = Reply_Entry{} }
      +func (m *Reply_Entry) String() string { return proto.CompactTextString(m) }
      +func (*Reply_Entry) ProtoMessage()    {}
      +
      +const Default_Reply_Entry_Value int64 = 7
      +
      +func (m *Reply_Entry) GetKeyThatNeeds_1234Camel_CasIng() int64 {
      +	if m != nil && m.KeyThatNeeds_1234Camel_CasIng != nil {
      +		return *m.KeyThatNeeds_1234Camel_CasIng
      +	}
      +	return 0
      +}
      +
      +func (m *Reply_Entry) GetValue() int64 {
      +	if m != nil && m.Value != nil {
      +		return *m.Value
      +	}
      +	return Default_Reply_Entry_Value
      +}
      +
      +func (m *Reply_Entry) GetXMyFieldName_2() int64 {
      +	if m != nil && m.XMyFieldName_2 != nil {
      +		return *m.XMyFieldName_2
      +	}
      +	return 0
      +}
      +
      +type OtherBase struct {
      +	Name             *string                   `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
      +	XXX_extensions   map[int32]proto.Extension `json:"-"`
      +	XXX_unrecognized []byte                    `json:"-"`
      +}
      +
      +func (m *OtherBase) Reset()         { *m = OtherBase{} }
      +func (m *OtherBase) String() string { return proto.CompactTextString(m) }
      +func (*OtherBase) ProtoMessage()    {}
      +
      +var extRange_OtherBase = []proto.ExtensionRange{
      +	{100, 536870911},
      +}
      +
      +func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange {
      +	return extRange_OtherBase
      +}
      +func (m *OtherBase) ExtensionMap() map[int32]proto.Extension {
      +	if m.XXX_extensions == nil {
      +		m.XXX_extensions = make(map[int32]proto.Extension)
      +	}
      +	return m.XXX_extensions
      +}
      +
      +func (m *OtherBase) GetName() string {
      +	if m != nil && m.Name != nil {
      +		return *m.Name
      +	}
      +	return ""
      +}
      +
      +type ReplyExtensions struct {
      +	XXX_unrecognized []byte `json:"-"`
      +}
      +
      +func (m *ReplyExtensions) Reset()         { *m = ReplyExtensions{} }
      +func (m *ReplyExtensions) String() string { return proto.CompactTextString(m) }
      +func (*ReplyExtensions) ProtoMessage()    {}
      +
      +var E_ReplyExtensions_Time = &proto.ExtensionDesc{
      +	ExtendedType:  (*Reply)(nil),
      +	ExtensionType: (*float64)(nil),
      +	Field:         101,
      +	Name:          "my.test.ReplyExtensions.time",
      +	Tag:           "fixed64,101,opt,name=time",
      +}
      +
      +var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{
      +	ExtendedType:  (*Reply)(nil),
      +	ExtensionType: (*ReplyExtensions)(nil),
      +	Field:         105,
      +	Name:          "my.test.ReplyExtensions.carrot",
      +	Tag:           "bytes,105,opt,name=carrot",
      +}
      +
      +var E_ReplyExtensions_Donut = &proto.ExtensionDesc{
      +	ExtendedType:  (*OtherBase)(nil),
      +	ExtensionType: (*ReplyExtensions)(nil),
      +	Field:         101,
      +	Name:          "my.test.ReplyExtensions.donut",
      +	Tag:           "bytes,101,opt,name=donut",
      +}
      +
      +type OtherReplyExtensions struct {
      +	Key              *int32 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"`
      +	XXX_unrecognized []byte `json:"-"`
      +}
      +
      +func (m *OtherReplyExtensions) Reset()         { *m = OtherReplyExtensions{} }
      +func (m *OtherReplyExtensions) String() string { return proto.CompactTextString(m) }
      +func (*OtherReplyExtensions) ProtoMessage()    {}
      +
      +func (m *OtherReplyExtensions) GetKey() int32 {
      +	if m != nil && m.Key != nil {
      +		return *m.Key
      +	}
      +	return 0
      +}
      +
      +type OldReply struct {
      +	XXX_extensions   map[int32]proto.Extension `json:"-"`
      +	XXX_unrecognized []byte                    `json:"-"`
      +}
      +
      +func (m *OldReply) Reset()         { *m = OldReply{} }
      +func (m *OldReply) String() string { return proto.CompactTextString(m) }
      +func (*OldReply) ProtoMessage()    {}
      +
      +func (m *OldReply) Marshal() ([]byte, error) {
      +	return proto.MarshalMessageSet(m.ExtensionMap())
      +}
      +func (m *OldReply) Unmarshal(buf []byte) error {
      +	return proto.UnmarshalMessageSet(buf, m.ExtensionMap())
      +}
      +func (m *OldReply) MarshalJSON() ([]byte, error) {
      +	return proto.MarshalMessageSetJSON(m.XXX_extensions)
      +}
      +func (m *OldReply) UnmarshalJSON(buf []byte) error {
      +	return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions)
      +}
      +
      +// ensure OldReply satisfies proto.Marshaler and proto.Unmarshaler
      +var _ proto.Marshaler = (*OldReply)(nil)
      +var _ proto.Unmarshaler = (*OldReply)(nil)
      +
      +var extRange_OldReply = []proto.ExtensionRange{
      +	{100, 2147483646},
      +}
      +
      +func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange {
      +	return extRange_OldReply
      +}
      +func (m *OldReply) ExtensionMap() map[int32]proto.Extension {
      +	if m.XXX_extensions == nil {
      +		m.XXX_extensions = make(map[int32]proto.Extension)
      +	}
      +	return m.XXX_extensions
      +}
      +
      +type Communique struct {
      +	MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"`
      +	// This is a oneof, called "union".
      +	//
      +	// Types that are valid to be assigned to Union:
      +	//	*Communique_Number
      +	//	*Communique_Name
      +	//	*Communique_Data
      +	//	*Communique_TempC
      +	//	*Communique_Height
      +	//	*Communique_Today
      +	//	*Communique_Maybe
      +	//	*Communique_Delta_
      +	//	*Communique_Msg
      +	//	*Communique_Somegroup
      +	Union            isCommunique_Union `protobuf_oneof:"union"`
      +	XXX_unrecognized []byte             `json:"-"`
      +}
      +
      +func (m *Communique) Reset()         { *m = Communique{} }
      +func (m *Communique) String() string { return proto.CompactTextString(m) }
      +func (*Communique) ProtoMessage()    {}
      +
      +type isCommunique_Union interface {
      +	isCommunique_Union()
      +}
      +
      +type Communique_Number struct {
      +	Number int32 `protobuf:"varint,5,opt,name=number,oneof"`
      +}
      +type Communique_Name struct {
      +	Name string `protobuf:"bytes,6,opt,name=name,oneof"`
      +}
      +type Communique_Data struct {
      +	Data []byte `protobuf:"bytes,7,opt,name=data,oneof"`
      +}
      +type Communique_TempC struct {
      +	TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"`
      +}
      +type Communique_Height struct {
      +	Height float32 `protobuf:"fixed32,9,opt,name=height,oneof"`
      +}
      +type Communique_Today struct {
      +	Today Days `protobuf:"varint,10,opt,name=today,enum=my.test.Days,oneof"`
      +}
      +type Communique_Maybe struct {
      +	Maybe bool `protobuf:"varint,11,opt,name=maybe,oneof"`
      +}
      +type Communique_Delta_ struct {
      +	Delta int32 `protobuf:"zigzag32,12,opt,name=delta,oneof"`
      +}
      +type Communique_Msg struct {
      +	Msg *Reply `protobuf:"bytes,13,opt,name=msg,oneof"`
      +}
      +type Communique_Somegroup struct {
      +	Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,json=somegroup,oneof"`
      +}
      +
      +func (*Communique_Number) isCommunique_Union()    {}
      +func (*Communique_Name) isCommunique_Union()      {}
      +func (*Communique_Data) isCommunique_Union()      {}
      +func (*Communique_TempC) isCommunique_Union()     {}
      +func (*Communique_Height) isCommunique_Union()    {}
      +func (*Communique_Today) isCommunique_Union()     {}
      +func (*Communique_Maybe) isCommunique_Union()     {}
      +func (*Communique_Delta_) isCommunique_Union()    {}
      +func (*Communique_Msg) isCommunique_Union()       {}
      +func (*Communique_Somegroup) isCommunique_Union() {}
      +
      +func (m *Communique) GetUnion() isCommunique_Union {
      +	if m != nil {
      +		return m.Union
      +	}
      +	return nil
      +}
      +
      +func (m *Communique) GetMakeMeCry() bool {
      +	if m != nil && m.MakeMeCry != nil {
      +		return *m.MakeMeCry
      +	}
      +	return false
      +}
      +
      +func (m *Communique) GetNumber() int32 {
      +	if x, ok := m.GetUnion().(*Communique_Number); ok {
      +		return x.Number
      +	}
      +	return 0
      +}
      +
      +func (m *Communique) GetName() string {
      +	if x, ok := m.GetUnion().(*Communique_Name); ok {
      +		return x.Name
      +	}
      +	return ""
      +}
      +
      +func (m *Communique) GetData() []byte {
      +	if x, ok := m.GetUnion().(*Communique_Data); ok {
      +		return x.Data
      +	}
      +	return nil
      +}
      +
      +func (m *Communique) GetTempC() float64 {
      +	if x, ok := m.GetUnion().(*Communique_TempC); ok {
      +		return x.TempC
      +	}
      +	return 0
      +}
      +
      +func (m *Communique) GetHeight() float32 {
      +	if x, ok := m.GetUnion().(*Communique_Height); ok {
      +		return x.Height
      +	}
      +	return 0
      +}
      +
      +func (m *Communique) GetToday() Days {
      +	if x, ok := m.GetUnion().(*Communique_Today); ok {
      +		return x.Today
      +	}
      +	return Days_MONDAY
      +}
      +
      +func (m *Communique) GetMaybe() bool {
      +	if x, ok := m.GetUnion().(*Communique_Maybe); ok {
      +		return x.Maybe
      +	}
      +	return false
      +}
      +
      +func (m *Communique) GetDelta() int32 {
      +	if x, ok := m.GetUnion().(*Communique_Delta_); ok {
      +		return x.Delta
      +	}
      +	return 0
      +}
      +
      +func (m *Communique) GetMsg() *Reply {
      +	if x, ok := m.GetUnion().(*Communique_Msg); ok {
      +		return x.Msg
      +	}
      +	return nil
      +}
      +
      +func (m *Communique) GetSomegroup() *Communique_SomeGroup {
      +	if x, ok := m.GetUnion().(*Communique_Somegroup); ok {
      +		return x.Somegroup
      +	}
      +	return nil
      +}
      +
      +// XXX_OneofFuncs is for the internal use of the proto package.
      +func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
      +	return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{
      +		(*Communique_Number)(nil),
      +		(*Communique_Name)(nil),
      +		(*Communique_Data)(nil),
      +		(*Communique_TempC)(nil),
      +		(*Communique_Height)(nil),
      +		(*Communique_Today)(nil),
      +		(*Communique_Maybe)(nil),
      +		(*Communique_Delta_)(nil),
      +		(*Communique_Msg)(nil),
      +		(*Communique_Somegroup)(nil),
      +	}
      +}
      +
      +func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
      +	m := msg.(*Communique)
      +	// union
      +	switch x := m.Union.(type) {
      +	case *Communique_Number:
      +		b.EncodeVarint(5<<3 | proto.WireVarint)
      +		b.EncodeVarint(uint64(x.Number))
      +	case *Communique_Name:
      +		b.EncodeVarint(6<<3 | proto.WireBytes)
      +		b.EncodeStringBytes(x.Name)
      +	case *Communique_Data:
      +		b.EncodeVarint(7<<3 | proto.WireBytes)
      +		b.EncodeRawBytes(x.Data)
      +	case *Communique_TempC:
      +		b.EncodeVarint(8<<3 | proto.WireFixed64)
      +		b.EncodeFixed64(math.Float64bits(x.TempC))
      +	case *Communique_Height:
      +		b.EncodeVarint(9<<3 | proto.WireFixed32)
      +		b.EncodeFixed32(uint64(math.Float32bits(x.Height)))
      +	case *Communique_Today:
      +		b.EncodeVarint(10<<3 | proto.WireVarint)
      +		b.EncodeVarint(uint64(x.Today))
      +	case *Communique_Maybe:
      +		t := uint64(0)
      +		if x.Maybe {
      +			t = 1
      +		}
      +		b.EncodeVarint(11<<3 | proto.WireVarint)
      +		b.EncodeVarint(t)
      +	case *Communique_Delta_:
      +		b.EncodeVarint(12<<3 | proto.WireVarint)
      +		b.EncodeZigzag32(uint64(x.Delta))
      +	case *Communique_Msg:
      +		b.EncodeVarint(13<<3 | proto.WireBytes)
      +		if err := b.EncodeMessage(x.Msg); err != nil {
      +			return err
      +		}
      +	case *Communique_Somegroup:
      +		b.EncodeVarint(14<<3 | proto.WireStartGroup)
      +		if err := b.Marshal(x.Somegroup); err != nil {
      +			return err
      +		}
      +		b.EncodeVarint(14<<3 | proto.WireEndGroup)
      +	case nil:
      +	default:
      +		return fmt.Errorf("Communique.Union has unexpected type %T", x)
      +	}
      +	return nil
      +}
      +
      +func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
      +	m := msg.(*Communique)
      +	switch tag {
      +	case 5: // union.number
      +		if wire != proto.WireVarint {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeVarint()
      +		m.Union = &Communique_Number{int32(x)}
      +		return true, err
      +	case 6: // union.name
      +		if wire != proto.WireBytes {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeStringBytes()
      +		m.Union = &Communique_Name{x}
      +		return true, err
      +	case 7: // union.data
      +		if wire != proto.WireBytes {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeRawBytes(true)
      +		m.Union = &Communique_Data{x}
      +		return true, err
      +	case 8: // union.temp_c
      +		if wire != proto.WireFixed64 {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeFixed64()
      +		m.Union = &Communique_TempC{math.Float64frombits(x)}
      +		return true, err
      +	case 9: // union.height
      +		if wire != proto.WireFixed32 {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeFixed32()
      +		m.Union = &Communique_Height{math.Float32frombits(uint32(x))}
      +		return true, err
      +	case 10: // union.today
      +		if wire != proto.WireVarint {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeVarint()
      +		m.Union = &Communique_Today{Days(x)}
      +		return true, err
      +	case 11: // union.maybe
      +		if wire != proto.WireVarint {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeVarint()
      +		m.Union = &Communique_Maybe{x != 0}
      +		return true, err
      +	case 12: // union.delta
      +		if wire != proto.WireVarint {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeZigzag32()
      +		m.Union = &Communique_Delta_{int32(x)}
      +		return true, err
      +	case 13: // union.msg
      +		if wire != proto.WireBytes {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		msg := new(Reply)
      +		err := b.DecodeMessage(msg)
      +		m.Union = &Communique_Msg{msg}
      +		return true, err
      +	case 14: // union.somegroup
      +		if wire != proto.WireStartGroup {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		msg := new(Communique_SomeGroup)
      +		err := b.DecodeGroup(msg)
      +		m.Union = &Communique_Somegroup{msg}
      +		return true, err
      +	default:
      +		return false, nil
      +	}
      +}
      +
      +func _Communique_OneofSizer(msg proto.Message) (n int) {
      +	m := msg.(*Communique)
      +	// union
      +	switch x := m.Union.(type) {
      +	case *Communique_Number:
      +		n += proto.SizeVarint(5<<3 | proto.WireVarint)
      +		n += proto.SizeVarint(uint64(x.Number))
      +	case *Communique_Name:
      +		n += proto.SizeVarint(6<<3 | proto.WireBytes)
      +		n += proto.SizeVarint(uint64(len(x.Name)))
      +		n += len(x.Name)
      +	case *Communique_Data:
      +		n += proto.SizeVarint(7<<3 | proto.WireBytes)
      +		n += proto.SizeVarint(uint64(len(x.Data)))
      +		n += len(x.Data)
      +	case *Communique_TempC:
      +		n += proto.SizeVarint(8<<3 | proto.WireFixed64)
      +		n += 8
      +	case *Communique_Height:
      +		n += proto.SizeVarint(9<<3 | proto.WireFixed32)
      +		n += 4
      +	case *Communique_Today:
      +		n += proto.SizeVarint(10<<3 | proto.WireVarint)
      +		n += proto.SizeVarint(uint64(x.Today))
      +	case *Communique_Maybe:
      +		n += proto.SizeVarint(11<<3 | proto.WireVarint)
      +		n += 1
      +	case *Communique_Delta_:
      +		n += proto.SizeVarint(12<<3 | proto.WireVarint)
      +		n += proto.SizeVarint(uint64((uint32(x.Delta) << 1) ^ uint32((int32(x.Delta) >> 31))))
      +	case *Communique_Msg:
      +		s := proto.Size(x.Msg)
      +		n += proto.SizeVarint(13<<3 | proto.WireBytes)
      +		n += proto.SizeVarint(uint64(s))
      +		n += s
      +	case *Communique_Somegroup:
      +		n += proto.SizeVarint(14<<3 | proto.WireStartGroup)
      +		n += proto.Size(x.Somegroup)
      +		n += proto.SizeVarint(14<<3 | proto.WireEndGroup)
      +	case nil:
      +	default:
      +		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
      +	}
      +	return n
      +}
      +
      +type Communique_SomeGroup struct {
      +	Member           *string `protobuf:"bytes,15,opt,name=member" json:"member,omitempty"`
      +	XXX_unrecognized []byte  `json:"-"`
      +}
      +
      +func (m *Communique_SomeGroup) Reset()         { *m = Communique_SomeGroup{} }
      +func (m *Communique_SomeGroup) String() string { return proto.CompactTextString(m) }
      +func (*Communique_SomeGroup) ProtoMessage()    {}
      +
      +func (m *Communique_SomeGroup) GetMember() string {
      +	if m != nil && m.Member != nil {
      +		return *m.Member
      +	}
      +	return ""
      +}
      +
      +type Communique_Delta struct {
      +	XXX_unrecognized []byte `json:"-"`
      +}
      +
      +func (m *Communique_Delta) Reset()         { *m = Communique_Delta{} }
      +func (m *Communique_Delta) String() string { return proto.CompactTextString(m) }
      +func (*Communique_Delta) ProtoMessage()    {}
      +
      +var E_Tag = &proto.ExtensionDesc{
      +	ExtendedType:  (*Reply)(nil),
      +	ExtensionType: (*string)(nil),
      +	Field:         103,
      +	Name:          "my.test.tag",
      +	Tag:           "bytes,103,opt,name=tag",
      +}
      +
      +var E_Donut = &proto.ExtensionDesc{
      +	ExtendedType:  (*Reply)(nil),
      +	ExtensionType: (*OtherReplyExtensions)(nil),
      +	Field:         106,
      +	Name:          "my.test.donut",
      +	Tag:           "bytes,106,opt,name=donut",
      +}
      +
      +func init() {
      +	proto.RegisterType((*Request)(nil), "my.test.Request")
      +	proto.RegisterType((*Request_SomeGroup)(nil), "my.test.Request.SomeGroup")
      +	proto.RegisterType((*Reply)(nil), "my.test.Reply")
      +	proto.RegisterType((*Reply_Entry)(nil), "my.test.Reply.Entry")
      +	proto.RegisterType((*OtherBase)(nil), "my.test.OtherBase")
      +	proto.RegisterType((*ReplyExtensions)(nil), "my.test.ReplyExtensions")
      +	proto.RegisterType((*OtherReplyExtensions)(nil), "my.test.OtherReplyExtensions")
      +	proto.RegisterType((*OldReply)(nil), "my.test.OldReply")
      +	proto.RegisterType((*Communique)(nil), "my.test.Communique")
      +	proto.RegisterType((*Communique_SomeGroup)(nil), "my.test.Communique.SomeGroup")
      +	proto.RegisterType((*Communique_Delta)(nil), "my.test.Communique.Delta")
      +	proto.RegisterEnum("my.test.HatType", HatType_name, HatType_value)
      +	proto.RegisterEnum("my.test.Days", Days_name, Days_value)
      +	proto.RegisterEnum("my.test.Request_Color", Request_Color_name, Request_Color_value)
      +	proto.RegisterEnum("my.test.Reply_Entry_Game", Reply_Entry_Game_name, Reply_Entry_Game_value)
      +	proto.RegisterExtension(E_ReplyExtensions_Time)
      +	proto.RegisterExtension(E_ReplyExtensions_Carrot)
      +	proto.RegisterExtension(E_ReplyExtensions_Donut)
      +	proto.RegisterExtension(E_Tag)
      +	proto.RegisterExtension(E_Donut)
      +}
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto
      new file mode 100644
      index 00000000..8e709463
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto
      @@ -0,0 +1,156 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2010 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto2";
      +
      +// This package holds interesting messages.
      +package my.test;  // dotted package name
      +
      +//import "imp.proto";
      +import "multi/multi1.proto";  // unused import
      +
      +enum HatType {
      +  // deliberately skipping 0
      +  FEDORA = 1;
      +  FEZ = 2;
      +}
      +
      +// This enum represents days of the week.
      +enum Days {
      +  option allow_alias = true;
      +
      +  MONDAY = 1;
      +  TUESDAY = 2;
      +  LUNDI = 1;  // same value as MONDAY
      +}
      +
      +// This is a message that might be sent somewhere.
      +message Request {
      +  enum Color {
      +    RED = 0;
      +    GREEN = 1;
      +    BLUE = 2;
      +  }
      +  repeated int64 key = 1;
      +//  optional imp.ImportedMessage imported_message = 2;
      +  optional Color hue = 3; // no default
      +  optional HatType hat = 4 [default=FEDORA];
      +//  optional imp.ImportedMessage.Owner owner = 6;
      +  optional float deadline = 7 [default=inf];
      +  optional group SomeGroup = 8 {
      +    optional int32 group_field = 9;
      +  }
      +
      +  // These foreign types are in imp2.proto,
      +  // which is publicly imported by imp.proto.
      +//  optional imp.PubliclyImportedMessage pub = 10;
      +//  optional imp.PubliclyImportedEnum pub_enum = 13 [default=HAIR];
      +
      +
      +  // This is a map field. It will generate map[int32]string.
      +  map<int32, string> name_mapping = 14;
      +  // This is a map field whose value type is a message.
      +  map<sint64, Reply> msg_mapping = 15;
      +
      +  optional int32 reset = 12;
      +  // This field should not conflict with any getters.
      +  optional string get_key = 16;
      +}
      +
      +message Reply {
      +  message Entry {
      +    required int64 key_that_needs_1234camel_CasIng = 1;
      +    optional int64 value = 2 [default=7];
      +    optional int64 _my_field_name_2 = 3;
      +    enum Game {
      +      FOOTBALL = 1;
      +      TENNIS = 2;
      +    }
      +  }
      +  repeated Entry found = 1;
      +  repeated int32 compact_keys = 2 [packed=true];
      +  extensions 100 to max;
      +}
      +
      +message OtherBase {
      +  optional string name = 1;
      +  extensions 100 to max;
      +}
      +
      +message ReplyExtensions {
      +  extend Reply {
      +    optional double time = 101;
      +    optional ReplyExtensions carrot = 105;
      +  }
      +  extend OtherBase {
      +    optional ReplyExtensions donut = 101;
      +  }
      +}
      +
      +message OtherReplyExtensions {
      +  optional int32 key = 1;
      +}
      +
      +// top-level extension
      +extend Reply {
      +  optional string tag = 103;
      +  optional OtherReplyExtensions donut = 106;
      +//  optional imp.ImportedMessage elephant = 107;  // extend with message from another file.
      +}
      +
      +message OldReply {
      +  // Extensions will be encoded in MessageSet wire format.
      +  option message_set_wire_format = true;
      +  extensions 100 to max;
      +}
      +
      +message Communique {
      +  optional bool make_me_cry = 1;
      +
      +  // This is a oneof, called "union".
      +  oneof union {
      +    int32 number = 5;
      +    string name = 6;
      +    bytes data = 7;
      +    double temp_c = 8;
      +    float height = 9;
      +    Days today = 10;
      +    bool maybe = 11;
      +    sint32 delta = 12;  // name will conflict with Delta below
      +    Reply msg = 13;
      +    group SomeGroup = 14 {
      +      optional string member = 15;
      +    }
      +  }
      +
      +  message Delta {}
      +}
      +
      diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto
      new file mode 100644
      index 00000000..c994914e
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto
      @@ -0,0 +1,52 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2014 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto3";
      +
      +package proto3;
      +
      +message Request {
      +  enum Flavour {
      +    SWEET = 0;
      +    SOUR = 1;
      +    UMAMI = 2;
      +    GOPHERLICIOUS = 3;
      +  }
      +  string name = 1;
      +  repeated int64 key = 2;
      +  Flavour taste = 3;
      +  Book book = 4;
      +}
      +
      +message Book {
      +  string title = 1;
      +  bytes raw_data = 2;
      +}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
      new file mode 100644
      index 00000000..89e07ae1
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/any.go
      @@ -0,0 +1,136 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2016 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +package ptypes
      +
      +// This file implements functions to marshal proto.Message to/from
      +// google.protobuf.Any message.
      +
      +import (
      +	"fmt"
      +	"reflect"
      +	"strings"
      +
      +	"github.com/golang/protobuf/proto"
      +	"github.com/golang/protobuf/ptypes/any"
      +)
      +
      +const googleApis = "type.googleapis.com/"
      +
      +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
      +//
      +// Note that regular type assertions should be done using the Is
      +// function. AnyMessageName is provided for less common use cases like filtering a
      +// sequence of Any messages based on a set of allowed message type names.
      +func AnyMessageName(any *any.Any) (string, error) {
      +	slash := strings.LastIndex(any.TypeUrl, "/")
      +	if slash < 0 {
      +		return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
      +	}
      +	return any.TypeUrl[slash+1:], nil
      +}
      +
      +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
      +func MarshalAny(pb proto.Message) (*any.Any, error) {
      +	value, err := proto.Marshal(pb)
      +	if err != nil {
      +		return nil, err
      +	}
      +	return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
      +}
      +
      +// DynamicAny is a value that can be passed to UnmarshalAny to automatically
      +// allocate a proto.Message for the type specified in a google.protobuf.Any
      +// message. The allocated message is stored in the embedded proto.Message.
      +//
      +// Example:
      +//
      +//   var x ptypes.DynamicAny
      +//   if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
      +//   fmt.Printf("unmarshaled message: %v", x.Message)
      +type DynamicAny struct {
      +	proto.Message
      +}
      +
      +// Empty returns a new proto.Message of the type specified in a
      +// google.protobuf.Any message. It returns an error if corresponding message
      +// type isn't linked in.
      +func Empty(any *any.Any) (proto.Message, error) {
      +	aname, err := AnyMessageName(any)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	t := proto.MessageType(aname)
      +	if t == nil {
      +		return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
      +	}
      +	return reflect.New(t.Elem()).Interface().(proto.Message), nil
      +}
      +
      +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
      +// message and places the decoded result in pb. It returns an error if type of
      +// contents of Any message does not match type of pb message.
      +//
      +// pb can be a proto.Message, or a *DynamicAny.
      +func UnmarshalAny(any *any.Any, pb proto.Message) error {
      +	if d, ok := pb.(*DynamicAny); ok {
      +		if d.Message == nil {
      +			var err error
      +			d.Message, err = Empty(any)
      +			if err != nil {
      +				return err
      +			}
      +		}
      +		return UnmarshalAny(any, d.Message)
      +	}
      +
      +	aname, err := AnyMessageName(any)
      +	if err != nil {
      +		return err
      +	}
      +
      +	mname := proto.MessageName(pb)
      +	if aname != mname {
      +		return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
      +	}
      +	return proto.Unmarshal(any.Value, pb)
      +}
      +
      +// Is returns true if any value contains a given message type.
      +func Is(any *any.Any, pb proto.Message) bool {
      +	aname, err := AnyMessageName(any)
      +	if err != nil {
      +		return false
      +	}
      +
      +	return aname == proto.MessageName(pb)
      +}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
      new file mode 100644
      index 00000000..a26093fc
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
      @@ -0,0 +1,111 @@
      +// Code generated by protoc-gen-go.
      +// source: github.com/golang/protobuf/ptypes/any/any.proto
      +// DO NOT EDIT!
      +
      +/*
      +Package any is a generated protocol buffer package.
      +
      +It is generated from these files:
      +	github.com/golang/protobuf/ptypes/any/any.proto
      +
      +It has these top-level messages:
      +	Any
      +*/
      +package any
      +
      +import proto "github.com/golang/protobuf/proto"
      +import fmt "fmt"
      +import math "math"
      +
      +// Reference imports to suppress errors if they are not otherwise used.
      +var _ = proto.Marshal
      +var _ = fmt.Errorf
      +var _ = math.Inf
      +
      +// This is a compile-time assertion to ensure that this generated file
      +// is compatible with the proto package it is being compiled against.
      +const _ = proto.ProtoPackageIsVersion1
      +
      +// `Any` contains an arbitrary serialized message along with a URL
      +// that describes the type of the serialized message.
      +//
      +//
      +// JSON
      +// ====
      +// The JSON representation of an `Any` value uses the regular
      +// representation of the deserialized, embedded message, with an
      +// additional field `@type` which contains the type URL. Example:
      +//
      +//     package google.profile;
      +//     message Person {
      +//       string first_name = 1;
      +//       string last_name = 2;
      +//     }
      +//
      +//     {
      +//       "@type": "type.googleapis.com/google.profile.Person",
      +//       "firstName": <string>,
      +//       "lastName": <string>
      +//     }
      +//
      +// If the embedded message type is well-known and has a custom JSON
      +// representation, that representation will be embedded adding a field
      +// `value` which holds the custom JSON in addition to the `@type`
      +// field. Example (for message [google.protobuf.Duration][]):
      +//
      +//     {
      +//       "@type": "type.googleapis.com/google.protobuf.Duration",
      +//       "value": "1.212s"
      +//     }
      +//
      +type Any struct {
      +	// A URL/resource name whose content describes the type of the
      +	// serialized message.
      +	//
      +	// For URLs which use the schema `http`, `https`, or no schema, the
      +	// following restrictions and interpretations apply:
      +	//
      +	// * If no schema is provided, `https` is assumed.
      +	// * The last segment of the URL's path must represent the fully
      +	//   qualified name of the type (as in `path/google.protobuf.Duration`).
      +	// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
      +	//   value in binary format, or produce an error.
      +	// * Applications are allowed to cache lookup results based on the
      +	//   URL, or have them precompiled into a binary to avoid any
      +	//   lookup. Therefore, binary compatibility needs to be preserved
      +	//   on changes to types. (Use versioned type names to manage
      +	//   breaking changes.)
      +	//
      +	// Schemas other than `http`, `https` (or the empty schema) might be
      +	// used with implementation specific semantics.
      +	//
      +	TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
      +	// Must be valid serialized data of the above specified type.
      +	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
      +}
      +
      +func (m *Any) Reset()                    { *m = Any{} }
      +func (m *Any) String() string            { return proto.CompactTextString(m) }
      +func (*Any) ProtoMessage()               {}
      +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
      +func (*Any) XXX_WellKnownType() string   { return "Any" }
      +
      +func init() {
      +	proto.RegisterType((*Any)(nil), "google.protobuf.Any")
      +}
      +
      +var fileDescriptor0 = []byte{
      +	// 184 bytes of a gzipped FileDescriptorProto
      +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
      +	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
      +	0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
      +	0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
      +	0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
      +	0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
      +	0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0x04, 0x14, 0xe7, 0x09, 0x82, 0x70, 0x9c,
      +	0x8a, 0xb8, 0x84, 0x81, 0x96, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0x01, 0x34, 0x2c, 0x00, 0xc4, 0x09,
      +	0x60, 0x8c, 0x52, 0x25, 0xca, 0x71, 0x0b, 0x18, 0x19, 0x17, 0x31, 0x31, 0xbb, 0x07, 0x38, 0xad,
      +	0x62, 0x92, 0x73, 0x87, 0x98, 0x16, 0x00, 0x55, 0xa5, 0x17, 0x9e, 0x9a, 0x93, 0xe3, 0x9d, 0x97,
      +	0x5f, 0x9e, 0x17, 0x02, 0x52, 0x9d, 0xc4, 0x06, 0xd6, 0x6e, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff,
      +	0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00,
      +}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
      new file mode 100644
      index 00000000..9b907566
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
      @@ -0,0 +1,100 @@
      +// Protocol Buffers - Google's data interchange format
      +// Copyright 2008 Google Inc.  All rights reserved.
      +// https://developers.google.com/protocol-buffers/
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto3";
      +
      +package google.protobuf;
      +option go_package = "github.com/golang/protobuf/ptypes/any";
      +
      +option csharp_namespace = "Google.Protobuf.WellKnownTypes";
      +option java_package = "com.google.protobuf";
      +option java_outer_classname = "AnyProto";
      +option java_multiple_files = true;
      +option java_generate_equals_and_hash = true;
      +option objc_class_prefix = "GPB";
      +
      +// `Any` contains an arbitrary serialized message along with a URL
      +// that describes the type of the serialized message.
      +//
      +//
      +// JSON
      +// ====
      +// The JSON representation of an `Any` value uses the regular
      +// representation of the deserialized, embedded message, with an
      +// additional field `@type` which contains the type URL. Example:
      +//
      +//     package google.profile;
      +//     message Person {
      +//       string first_name = 1;
      +//       string last_name = 2;
      +//     }
      +//
      +//     {
      +//       "@type": "type.googleapis.com/google.profile.Person",
      +//       "firstName": <string>,
      +//       "lastName": <string>
      +//     }
      +//
      +// If the embedded message type is well-known and has a custom JSON
      +// representation, that representation will be embedded adding a field
      +// `value` which holds the custom JSON in addition to the `@type`
      +// field. Example (for message [google.protobuf.Duration][]):
      +//
      +//     {
      +//       "@type": "type.googleapis.com/google.protobuf.Duration",
      +//       "value": "1.212s"
      +//     }
      +//
      +message Any {
      +  // A URL/resource name whose content describes the type of the
      +  // serialized message.
      +  //
      +  // For URLs which use the schema `http`, `https`, or no schema, the
      +  // following restrictions and interpretations apply:
      +  //
      +  // * If no schema is provided, `https` is assumed.
      +  // * The last segment of the URL's path must represent the fully
      +  //   qualified name of the type (as in `path/google.protobuf.Duration`).
      +  // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
      +  //   value in binary format, or produce an error.
      +  // * Applications are allowed to cache lookup results based on the
      +  //   URL, or have them precompiled into a binary to avoid any
      +  //   lookup. Therefore, binary compatibility needs to be preserved
      +  //   on changes to types. (Use versioned type names to manage
      +  //   breaking changes.)
      +  //
      +  // Schemas other than `http`, `https` (or the empty schema) might be
      +  // used with implementation specific semantics.
      +  //
      +  string type_url = 1;
      +
      +  // Must be valid serialized data of the above specified type.
      +  bytes value = 2;
      +}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/any_test.go b/vendor/github.com/golang/protobuf/ptypes/any_test.go
      new file mode 100644
      index 00000000..ed675b48
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/any_test.go
      @@ -0,0 +1,113 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2016 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +package ptypes
      +
      +import (
      +	"testing"
      +
      +	"github.com/golang/protobuf/proto"
      +	pb "github.com/golang/protobuf/protoc-gen-go/descriptor"
      +	"github.com/golang/protobuf/ptypes/any"
      +)
      +
      +func TestMarshalUnmarshal(t *testing.T) {
      +	orig := &any.Any{Value: []byte("test")}
      +
      +	packed, err := MarshalAny(orig)
      +	if err != nil {
      +		t.Errorf("MarshalAny(%+v): got: _, %v exp: _, nil", orig, err)
      +	}
      +
      +	unpacked := &any.Any{}
      +	err = UnmarshalAny(packed, unpacked)
      +	if err != nil || !proto.Equal(unpacked, orig) {
      +		t.Errorf("got: %v, %+v; want nil, %+v", err, unpacked, orig)
      +	}
      +}
      +
      +func TestIs(t *testing.T) {
      +	a, err := MarshalAny(&pb.FileDescriptorProto{})
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	if Is(a, &pb.DescriptorProto{}) {
      +		t.Error("FileDescriptorProto is not a DescriptorProto, but Is says it is")
      +	}
      +	if !Is(a, &pb.FileDescriptorProto{}) {
      +		t.Error("FileDescriptorProto is indeed a FileDescriptorProto, but Is says it is not")
      +	}
      +}
      +
      +func TestIsDifferentUrlPrefixes(t *testing.T) {
      +	m := &pb.FileDescriptorProto{}
      +	a := &any.Any{TypeUrl: "foo/bar/" + proto.MessageName(m)}
      +	if !Is(a, m) {
      +		t.Errorf("message with type url %q didn't satisfy Is for type %q", a.TypeUrl, proto.MessageName(m))
      +	}
      +}
      +
      +func TestUnmarshalDynamic(t *testing.T) {
      +	want := &pb.FileDescriptorProto{Name: proto.String("foo")}
      +	a, err := MarshalAny(want)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	var got DynamicAny
      +	if err := UnmarshalAny(a, &got); err != nil {
      +		t.Fatal(err)
      +	}
      +	if !proto.Equal(got.Message, want) {
      +		t.Errorf("invalid result from UnmarshalAny, got %q want %q", got.Message, want)
      +	}
      +}
      +
      +func TestEmpty(t *testing.T) {
      +	want := &pb.FileDescriptorProto{}
      +	a, err := MarshalAny(want)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	got, err := Empty(a)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	if !proto.Equal(got, want) {
      +		t.Errorf("unequal empty message, got %q, want %q", got, want)
      +	}
      +
      +	// that's a valid type_url for a message which shouldn't be linked into this
      +	// test binary. We want an error.
      +	a.TypeUrl = "type.googleapis.com/google.protobuf.FieldMask"
      +	if _, err := Empty(a); err == nil {
      +		t.Errorf("got no error for an attempt to create a message of type %q, which shouldn't be linked in", a.TypeUrl)
      +	}
      +}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
      new file mode 100644
      index 00000000..c0d595da
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
      @@ -0,0 +1,35 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2016 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +/*
      +Package ptypes contains code for interacting with well-known types.
      +*/
      +package ptypes
      diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
      new file mode 100644
      index 00000000..65cb0f8e
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
      @@ -0,0 +1,102 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2016 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +package ptypes
      +
      +// This file implements conversions between google.protobuf.Duration
      +// and time.Duration.
      +
      +import (
      +	"errors"
      +	"fmt"
      +	"time"
      +
      +	durpb "github.com/golang/protobuf/ptypes/duration"
      +)
      +
      +const (
      +	// Range of a durpb.Duration in seconds, as specified in
      +	// google/protobuf/duration.proto. This is about 10,000 years in seconds.
      +	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
      +	minSeconds = -maxSeconds
      +)
      +
      +// validateDuration determines whether the durpb.Duration is valid according to the
      +// definition in google/protobuf/duration.proto. A valid durpb.Duration
      +// may still be too large to fit into a time.Duration (the range of durpb.Duration
      +// is about 10,000 years, and the range of time.Duration is about 290).
      +func validateDuration(d *durpb.Duration) error {
      +	if d == nil {
      +		return errors.New("duration: nil Duration")
      +	}
      +	if d.Seconds < minSeconds || d.Seconds > maxSeconds {
      +		return fmt.Errorf("duration: %v: seconds out of range", d)
      +	}
      +	if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
      +		return fmt.Errorf("duration: %v: nanos out of range", d)
      +	}
      +	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
      +	if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
      +		return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
      +	}
      +	return nil
      +}
      +
      +// Duration converts a durpb.Duration to a time.Duration. Duration
      +// returns an error if the durpb.Duration is invalid or is too large to be
      +// represented in a time.Duration.
      +func Duration(p *durpb.Duration) (time.Duration, error) {
      +	if err := validateDuration(p); err != nil {
      +		return 0, err
      +	}
      +	d := time.Duration(p.Seconds) * time.Second
      +	if int64(d/time.Second) != p.Seconds {
      +		return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
      +	}
      +	if p.Nanos != 0 {
      +		d += time.Duration(p.Nanos)
      +		if (d < 0) != (p.Nanos < 0) {
      +			return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
      +		}
      +	}
      +	return d, nil
      +}
      +
      +// DurationProto converts a time.Duration to a durpb.Duration.
      +func DurationProto(d time.Duration) *durpb.Duration {
      +	nanos := d.Nanoseconds()
      +	secs := nanos / 1e9
      +	nanos -= secs * 1e9
      +	return &durpb.Duration{
      +		Seconds: secs,
      +		Nanos:   int32(nanos),
      +	}
      +}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
      new file mode 100644
      index 00000000..593c02a9
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
      @@ -0,0 +1,107 @@
      +// Code generated by protoc-gen-go.
      +// source: github.com/golang/protobuf/ptypes/duration/duration.proto
      +// DO NOT EDIT!
      +
      +/*
      +Package duration is a generated protocol buffer package.
      +
      +It is generated from these files:
      +	github.com/golang/protobuf/ptypes/duration/duration.proto
      +
      +It has these top-level messages:
      +	Duration
      +*/
      +package duration
      +
      +import proto "github.com/golang/protobuf/proto"
      +import fmt "fmt"
      +import math "math"
      +
      +// Reference imports to suppress errors if they are not otherwise used.
      +var _ = proto.Marshal
      +var _ = fmt.Errorf
      +var _ = math.Inf
      +
      +// This is a compile-time assertion to ensure that this generated file
      +// is compatible with the proto package it is being compiled against.
      +const _ = proto.ProtoPackageIsVersion1
      +
      +// A Duration represents a signed, fixed-length span of time represented
      +// as a count of seconds and fractions of seconds at nanosecond
      +// resolution. It is independent of any calendar and concepts like "day"
      +// or "month". It is related to Timestamp in that the difference between
      +// two Timestamp values is a Duration and it can be added or subtracted
      +// from a Timestamp. Range is approximately +-10,000 years.
      +//
      +// Example 1: Compute Duration from two Timestamps in pseudo code.
      +//
      +//     Timestamp start = ...;
      +//     Timestamp end = ...;
      +//     Duration duration = ...;
      +//
      +//     duration.seconds = end.seconds - start.seconds;
      +//     duration.nanos = end.nanos - start.nanos;
      +//
      +//     if (duration.seconds < 0 && duration.nanos > 0) {
      +//       duration.seconds += 1;
      +//       duration.nanos -= 1000000000;
      +//     } else if (durations.seconds > 0 && duration.nanos < 0) {
      +//       duration.seconds -= 1;
      +//       duration.nanos += 1000000000;
      +//     }
      +//
      +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
      +//
      +//     Timestamp start = ...;
      +//     Duration duration = ...;
      +//     Timestamp end = ...;
      +//
      +//     end.seconds = start.seconds + duration.seconds;
      +//     end.nanos = start.nanos + duration.nanos;
      +//
      +//     if (end.nanos < 0) {
      +//       end.seconds -= 1;
      +//       end.nanos += 1000000000;
      +//     } else if (end.nanos >= 1000000000) {
      +//       end.seconds += 1;
      +//       end.nanos -= 1000000000;
      +//     }
      +//
      +type Duration struct {
      +	// Signed seconds of the span of time. Must be from -315,576,000,000
      +	// to +315,576,000,000 inclusive.
      +	Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
      +	// Signed fractions of a second at nanosecond resolution of the span
      +	// of time. Durations less than one second are represented with a 0
      +	// `seconds` field and a positive or negative `nanos` field. For durations
      +	// of one second or more, a non-zero value for the `nanos` field must be
      +	// of the same sign as the `seconds` field. Must be from -999,999,999
      +	// to +999,999,999 inclusive.
      +	Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
      +}
      +
      +func (m *Duration) Reset()                    { *m = Duration{} }
      +func (m *Duration) String() string            { return proto.CompactTextString(m) }
      +func (*Duration) ProtoMessage()               {}
      +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
      +func (*Duration) XXX_WellKnownType() string   { return "Duration" }
      +
      +func init() {
      +	proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
      +}
      +
      +var fileDescriptor0 = []byte{
      +	// 187 bytes of a gzipped FileDescriptorProto
      +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
      +	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
      +	0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29,
      +	0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3,
      +	0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8,
      +	0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60,
      +	0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0xa0, 0x38, 0x6b, 0x10,
      +	0x84, 0xe3, 0x54, 0xc3, 0x25, 0x0c, 0x74, 0x82, 0x1e, 0x9a, 0x91, 0x4e, 0xbc, 0x30, 0x03, 0x03,
      +	0x40, 0x22, 0x01, 0x8c, 0x51, 0x5a, 0xc4, 0xbb, 0x77, 0x01, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7,
      +	0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0x73, 0x03, 0xa0, 0x4a, 0xf5, 0xc2, 0x53, 0x73, 0x72,
      +	0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0x5a, 0x92, 0xd8, 0xc0, 0x66, 0x18, 0x03, 0x02, 0x00,
      +	0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00,
      +}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
      new file mode 100644
      index 00000000..9be52f69
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
      @@ -0,0 +1,97 @@
      +// Protocol Buffers - Google's data interchange format
      +// Copyright 2008 Google Inc.  All rights reserved.
      +// https://developers.google.com/protocol-buffers/
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto3";
      +
      +package google.protobuf;
      +option go_package = "github.com/golang/protobuf/ptypes/duration";
      +
      +option csharp_namespace = "Google.Protobuf.WellKnownTypes";
      +option java_package = "com.google.protobuf";
      +option java_outer_classname = "DurationProto";
      +option java_multiple_files = true;
      +option java_generate_equals_and_hash = true;
      +option objc_class_prefix = "GPB";
      +
      +// A Duration represents a signed, fixed-length span of time represented
      +// as a count of seconds and fractions of seconds at nanosecond
      +// resolution. It is independent of any calendar and concepts like "day"
      +// or "month". It is related to Timestamp in that the difference between
      +// two Timestamp values is a Duration and it can be added or subtracted
      +// from a Timestamp. Range is approximately +-10,000 years.
      +//
      +// Example 1: Compute Duration from two Timestamps in pseudo code.
      +//
      +//     Timestamp start = ...;
      +//     Timestamp end = ...;
      +//     Duration duration = ...;
      +//
      +//     duration.seconds = end.seconds - start.seconds;
      +//     duration.nanos = end.nanos - start.nanos;
      +//
      +//     if (duration.seconds < 0 && duration.nanos > 0) {
      +//       duration.seconds += 1;
      +//       duration.nanos -= 1000000000;
      +//     } else if (durations.seconds > 0 && duration.nanos < 0) {
      +//       duration.seconds -= 1;
      +//       duration.nanos += 1000000000;
      +//     }
      +//
      +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
      +//
      +//     Timestamp start = ...;
      +//     Duration duration = ...;
      +//     Timestamp end = ...;
      +//
      +//     end.seconds = start.seconds + duration.seconds;
      +//     end.nanos = start.nanos + duration.nanos;
      +//
      +//     if (end.nanos < 0) {
      +//       end.seconds -= 1;
      +//       end.nanos += 1000000000;
      +//     } else if (end.nanos >= 1000000000) {
      +//       end.seconds += 1;
      +//       end.nanos -= 1000000000;
      +//     }
      +//
      +message Duration {
      +
      +  // Signed seconds of the span of time. Must be from -315,576,000,000
      +  // to +315,576,000,000 inclusive.
      +  int64 seconds = 1;
      +
      +  // Signed fractions of a second at nanosecond resolution of the span
      +  // of time. Durations less than one second are represented with a 0
      +  // `seconds` field and a positive or negative `nanos` field. For durations
      +  // of one second or more, a non-zero value for the `nanos` field must be
      +  // of the same sign as the `seconds` field. Must be from -999,999,999
      +  // to +999,999,999 inclusive.
      +  int32 nanos = 2;
      +}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/duration_test.go b/vendor/github.com/golang/protobuf/ptypes/duration_test.go
      new file mode 100644
      index 00000000..e761289f
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/duration_test.go
      @@ -0,0 +1,121 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2016 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +package ptypes
      +
      +import (
      +	"math"
      +	"testing"
      +	"time"
      +
      +	"github.com/golang/protobuf/proto"
      +	durpb "github.com/golang/protobuf/ptypes/duration"
      +)
      +
      +const (
      +	minGoSeconds = math.MinInt64 / int64(1e9)
      +	maxGoSeconds = math.MaxInt64 / int64(1e9)
      +)
      +
      +var durationTests = []struct {
      +	proto   *durpb.Duration
      +	isValid bool
      +	inRange bool
      +	dur     time.Duration
      +}{
      +	// The zero duration.
      +	{&durpb.Duration{0, 0}, true, true, 0},
      +	// Some ordinary non-zero durations.
      +	{&durpb.Duration{100, 0}, true, true, 100 * time.Second},
      +	{&durpb.Duration{-100, 0}, true, true, -100 * time.Second},
      +	{&durpb.Duration{100, 987}, true, true, 100*time.Second + 987},
      +	{&durpb.Duration{-100, -987}, true, true, -(100*time.Second + 987)},
      +	// The largest duration representable in Go.
      +	{&durpb.Duration{maxGoSeconds, int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, true, math.MaxInt64},
      +	// The smallest duration representable in Go.
      +	{&durpb.Duration{minGoSeconds, int32(math.MinInt64 - 1e9*minGoSeconds)}, true, true, math.MinInt64},
      +	{nil, false, false, 0},
      +	{&durpb.Duration{-100, 987}, false, false, 0},
      +	{&durpb.Duration{100, -987}, false, false, 0},
      +	{&durpb.Duration{math.MinInt64, 0}, false, false, 0},
      +	{&durpb.Duration{math.MaxInt64, 0}, false, false, 0},
      +	// The largest valid duration.
      +	{&durpb.Duration{maxSeconds, 1e9 - 1}, true, false, 0},
      +	// The smallest valid duration.
      +	{&durpb.Duration{minSeconds, -(1e9 - 1)}, true, false, 0},
      +	// The smallest invalid duration above the valid range.
      +	{&durpb.Duration{maxSeconds + 1, 0}, false, false, 0},
      +	// The largest invalid duration below the valid range.
      +	{&durpb.Duration{minSeconds - 1, -(1e9 - 1)}, false, false, 0},
      +	// One nanosecond past the largest duration representable in Go.
      +	{&durpb.Duration{maxGoSeconds, int32(math.MaxInt64-1e9*maxGoSeconds) + 1}, true, false, 0},
      +	// One nanosecond past the smallest duration representable in Go.
      +	{&durpb.Duration{minGoSeconds, int32(math.MinInt64-1e9*minGoSeconds) - 1}, true, false, 0},
      +	// One second past the largest duration representable in Go.
      +	{&durpb.Duration{maxGoSeconds + 1, int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, false, 0},
      +	// One second past the smallest duration representable in Go.
      +	{&durpb.Duration{minGoSeconds - 1, int32(math.MinInt64 - 1e9*minGoSeconds)}, true, false, 0},
      +}
      +
      +func TestValidateDuration(t *testing.T) {
      +	for _, test := range durationTests {
      +		err := validateDuration(test.proto)
      +		gotValid := (err == nil)
      +		if gotValid != test.isValid {
      +			t.Errorf("validateDuration(%v) = %t, want %t", test.proto, gotValid, test.isValid)
      +		}
      +	}
      +}
      +
      +func TestDuration(t *testing.T) {
      +	for _, test := range durationTests {
      +		got, err := Duration(test.proto)
      +		gotOK := (err == nil)
      +		wantOK := test.isValid && test.inRange
      +		if gotOK != wantOK {
      +			t.Errorf("Duration(%v) ok = %t, want %t", test.proto, gotOK, wantOK)
      +		}
      +		if err == nil && got != test.dur {
      +			t.Errorf("Duration(%v) = %v, want %v", test.proto, got, test.dur)
      +		}
      +	}
      +}
      +
      +func TestDurationProto(t *testing.T) {
      +	for _, test := range durationTests {
      +		if test.isValid && test.inRange {
      +			got := DurationProto(test.dur)
      +			if !proto.Equal(got, test.proto) {
      +				t.Errorf("DurationProto(%v) = %v, want %v", test.dur, got, test.proto)
      +			}
      +		}
      +	}
      +}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
      new file mode 100644
      index 00000000..bac5e384
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
      @@ -0,0 +1,63 @@
      +// Code generated by protoc-gen-go.
      +// source: github.com/golang/protobuf/ptypes/empty/empty.proto
      +// DO NOT EDIT!
      +
      +/*
      +Package empty is a generated protocol buffer package.
      +
      +It is generated from these files:
      +	github.com/golang/protobuf/ptypes/empty/empty.proto
      +
      +It has these top-level messages:
      +	Empty
      +*/
      +package empty
      +
      +import proto "github.com/golang/protobuf/proto"
      +import fmt "fmt"
      +import math "math"
      +
      +// Reference imports to suppress errors if they are not otherwise used.
      +var _ = proto.Marshal
      +var _ = fmt.Errorf
      +var _ = math.Inf
      +
      +// This is a compile-time assertion to ensure that this generated file
      +// is compatible with the proto package it is being compiled against.
      +const _ = proto.ProtoPackageIsVersion1
      +
      +// A generic empty message that you can re-use to avoid defining duplicated
      +// empty messages in your APIs. A typical example is to use it as the request
      +// or the response type of an API method. For instance:
      +//
      +//     service Foo {
      +//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
      +//     }
      +//
      +// The JSON representation for `Empty` is empty JSON object `{}`.
      +type Empty struct {
      +}
      +
      +func (m *Empty) Reset()                    { *m = Empty{} }
      +func (m *Empty) String() string            { return proto.CompactTextString(m) }
      +func (*Empty) ProtoMessage()               {}
      +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
      +func (*Empty) XXX_WellKnownType() string   { return "Empty" }
      +
      +func init() {
      +	proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
      +}
      +
      +var fileDescriptor0 = []byte{
      +	// 148 bytes of a gzipped FileDescriptorProto
      +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x32, 0x4e, 0xcf, 0x2c, 0xc9,
      +	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
      +	0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcd,
      +	0x05, 0x32, 0x20, 0xa4, 0x1e, 0x58, 0x4e, 0x88, 0x3f, 0x3d, 0x3f, 0x3f, 0x3d, 0x27, 0x55, 0x0f,
      +	0xa6, 0x52, 0x89, 0x9d, 0x8b, 0xd5, 0x15, 0x24, 0xef, 0x54, 0xc9, 0x25, 0x0c, 0x34, 0x49, 0x0f,
      +	0x4d, 0xde, 0x89, 0x0b, 0x2c, 0x1b, 0x00, 0xe2, 0x06, 0x30, 0x46, 0xa9, 0x13, 0x69, 0xe7, 0x02,
      +	0x46, 0xc6, 0x1f, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43,
      +	0x0c, 0x0d, 0x80, 0x2a, 0xd5, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01,
      +	0x69, 0x49, 0x62, 0x03, 0x9b, 0x61, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xbb, 0xf4, 0x0e,
      +	0xd2, 0x00, 0x00, 0x00,
      +}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
      new file mode 100644
      index 00000000..0c0d2625
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
      @@ -0,0 +1,53 @@
      +// Protocol Buffers - Google's data interchange format
      +// Copyright 2008 Google Inc.  All rights reserved.
      +// https://developers.google.com/protocol-buffers/
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto3";
      +
      +package google.protobuf;
      +option go_package = "github.com/golang/protobuf/ptypes/empty";
      +
      +option csharp_namespace = "Google.Protobuf.WellKnownTypes";
      +option java_package = "com.google.protobuf";
      +option java_outer_classname = "EmptyProto";
      +option java_multiple_files = true;
      +option java_generate_equals_and_hash = true;
      +option objc_class_prefix = "GPB";
      +option cc_enable_arenas = true;
      +
      +// A generic empty message that you can re-use to avoid defining duplicated
      +// empty messages in your APIs. A typical example is to use it as the request
      +// or the response type of an API method. For instance:
      +//
      +//     service Foo {
      +//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
      +//     }
      +//
      +// The JSON representation for `Empty` is empty JSON object `{}`.
      +message Empty {}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh
      new file mode 100755
      index 00000000..48e7cff0
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/regen.sh
      @@ -0,0 +1,72 @@
      +#!/bin/bash -e
      +#
      +# This script fetches and rebuilds the "well-known types" protocol buffers.
      +# To run this you will need protoc and goprotobuf installed;
      +# see https://github.com/golang/protobuf for instructions.
      +# You also need Go and Git installed.
      +
      +PKG=github.com/golang/protobuf/ptypes
      +UPSTREAM=https://github.com/google/protobuf
      +UPSTREAM_SUBDIR=src/google/protobuf
      +PROTO_FILES='
      +  any.proto
      +  duration.proto
      +  empty.proto
      +  struct.proto
      +  timestamp.proto
      +  wrappers.proto
      +'
      +
      +function die() {
      +  echo 1>&2 $*
      +  exit 1
      +}
      +
      +# Sanity check that the right tools are accessible.
      +for tool in go git protoc protoc-gen-go; do
      +  q=$(which $tool) || die "didn't find $tool"
      +  echo 1>&2 "$tool: $q"
      +done
      +
      +tmpdir=$(mktemp -d -t regen-wkt.XXXXXX)
      +trap 'rm -rf $tmpdir' EXIT
      +
      +echo -n 1>&2 "finding package dir... "
      +pkgdir=$(go list -f '{{.Dir}}' $PKG)
      +echo 1>&2 $pkgdir
      +base=$(echo $pkgdir | sed "s,/$PKG\$,,")
      +echo 1>&2 "base: $base"
      +cd $base
      +
      +echo 1>&2 "fetching latest protos... "
      +git clone -q $UPSTREAM $tmpdir
      +# Pass 1: build mapping from upstream filename to our filename.
      +declare -A filename_map
      +for f in $(cd $PKG && find * -name '*.proto'); do
      +  echo -n 1>&2 "looking for latest version of $f... "
      +  up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f) | grep -v /testdata/)
      +  echo 1>&2 $up
      +  if [ $(echo $up | wc -w) != "1" ]; then
      +    die "not exactly one match"
      +  fi
      +  filename_map[$up]=$f
      +done
      +# Pass 2: copy files, making necessary adjustments.
      +for up in "${!filename_map[@]}"; do
      +  f=${filename_map[$up]}
      +  shortname=$(basename $f | sed 's,\.proto$,,')
      +  cat $tmpdir/$UPSTREAM_SUBDIR/$up |
      +    # Adjust proto package.
      +    # TODO(dsymonds): Remove when the right go_package options are upstream.
      +    sed '/^package /a option go_package = "github.com\/golang\/protobuf\/ptypes\/'${shortname}'";' |
      +    # Unfortunately "package struct" and "package type" don't work.
      +    sed '/option go_package/s,struct",struct;structpb",' |
      +    cat > $PKG/$f
      +done
      +
      +# Run protoc once per package.
      +for dir in $(find $PKG -name '*.proto' | xargs dirname | sort | uniq); do
      +  echo 1>&2 "* $dir"
      +  protoc --go_out=. $dir/*.proto
      +done
      +echo 1>&2 "All OK"
      diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
      new file mode 100644
      index 00000000..f3d0a4a5
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
      @@ -0,0 +1,376 @@
      +// Code generated by protoc-gen-go.
      +// source: github.com/golang/protobuf/ptypes/struct/struct.proto
      +// DO NOT EDIT!
      +
      +/*
      +Package structpb is a generated protocol buffer package.
      +
      +It is generated from these files:
      +	github.com/golang/protobuf/ptypes/struct/struct.proto
      +
      +It has these top-level messages:
      +	Struct
      +	Value
      +	ListValue
      +*/
      +package structpb
      +
      +import proto "github.com/golang/protobuf/proto"
      +import fmt "fmt"
      +import math "math"
      +
      +// Reference imports to suppress errors if they are not otherwise used.
      +var _ = proto.Marshal
      +var _ = fmt.Errorf
      +var _ = math.Inf
      +
      +// This is a compile-time assertion to ensure that this generated file
      +// is compatible with the proto package it is being compiled against.
      +const _ = proto.ProtoPackageIsVersion1
      +
      +// `NullValue` is a singleton enumeration to represent the null value for the
      +// `Value` type union.
      +//
      +//  The JSON representation for `NullValue` is JSON `null`.
      +type NullValue int32
      +
      +const (
      +	// Null value.
      +	NullValue_NULL_VALUE NullValue = 0
      +)
      +
      +var NullValue_name = map[int32]string{
      +	0: "NULL_VALUE",
      +}
      +var NullValue_value = map[string]int32{
      +	"NULL_VALUE": 0,
      +}
      +
      +func (x NullValue) String() string {
      +	return proto.EnumName(NullValue_name, int32(x))
      +}
      +func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
      +func (NullValue) XXX_WellKnownType() string       { return "NullValue" }
      +
      +// `Struct` represents a structured data value, consisting of fields
      +// which map to dynamically typed values. In some languages, `Struct`
      +// might be supported by a native representation. For example, in
      +// scripting languages like JS a struct is represented as an
      +// object. The details of that representation are described together
      +// with the proto support for the language.
      +//
      +// The JSON representation for `Struct` is JSON object.
      +type Struct struct {
      +	// Map of dynamically typed values.
      +	Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
      +}
      +
      +func (m *Struct) Reset()                    { *m = Struct{} }
      +func (m *Struct) String() string            { return proto.CompactTextString(m) }
      +func (*Struct) ProtoMessage()               {}
      +func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
      +func (*Struct) XXX_WellKnownType() string   { return "Struct" }
      +
      +func (m *Struct) GetFields() map[string]*Value {
      +	if m != nil {
      +		return m.Fields
      +	}
      +	return nil
      +}
      +
      +// `Value` represents a dynamically typed value which can be either
      +// null, a number, a string, a boolean, a recursive struct value, or a
      +// list of values. A producer of value is expected to set one of that
      +// variants, absence of any variant indicates an error.
      +//
      +// The JSON representation for `Value` is JSON value.
      +type Value struct {
      +	// The kind of value.
      +	//
      +	// Types that are valid to be assigned to Kind:
      +	//	*Value_NullValue
      +	//	*Value_NumberValue
      +	//	*Value_StringValue
      +	//	*Value_BoolValue
      +	//	*Value_StructValue
      +	//	*Value_ListValue
      +	Kind isValue_Kind `protobuf_oneof:"kind"`
      +}
      +
      +func (m *Value) Reset()                    { *m = Value{} }
      +func (m *Value) String() string            { return proto.CompactTextString(m) }
      +func (*Value) ProtoMessage()               {}
      +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
      +func (*Value) XXX_WellKnownType() string   { return "Value" }
      +
      +type isValue_Kind interface {
      +	isValue_Kind()
      +}
      +
      +type Value_NullValue struct {
      +	NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"`
      +}
      +type Value_NumberValue struct {
      +	NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,oneof"`
      +}
      +type Value_StringValue struct {
      +	StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"`
      +}
      +type Value_BoolValue struct {
      +	BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"`
      +}
      +type Value_StructValue struct {
      +	StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"`
      +}
      +type Value_ListValue struct {
      +	ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"`
      +}
      +
      +func (*Value_NullValue) isValue_Kind()   {}
      +func (*Value_NumberValue) isValue_Kind() {}
      +func (*Value_StringValue) isValue_Kind() {}
      +func (*Value_BoolValue) isValue_Kind()   {}
      +func (*Value_StructValue) isValue_Kind() {}
      +func (*Value_ListValue) isValue_Kind()   {}
      +
      +func (m *Value) GetKind() isValue_Kind {
      +	if m != nil {
      +		return m.Kind
      +	}
      +	return nil
      +}
      +
      +func (m *Value) GetNullValue() NullValue {
      +	if x, ok := m.GetKind().(*Value_NullValue); ok {
      +		return x.NullValue
      +	}
      +	return NullValue_NULL_VALUE
      +}
      +
      +func (m *Value) GetNumberValue() float64 {
      +	if x, ok := m.GetKind().(*Value_NumberValue); ok {
      +		return x.NumberValue
      +	}
      +	return 0
      +}
      +
      +func (m *Value) GetStringValue() string {
      +	if x, ok := m.GetKind().(*Value_StringValue); ok {
      +		return x.StringValue
      +	}
      +	return ""
      +}
      +
      +func (m *Value) GetBoolValue() bool {
      +	if x, ok := m.GetKind().(*Value_BoolValue); ok {
      +		return x.BoolValue
      +	}
      +	return false
      +}
      +
      +func (m *Value) GetStructValue() *Struct {
      +	if x, ok := m.GetKind().(*Value_StructValue); ok {
      +		return x.StructValue
      +	}
      +	return nil
      +}
      +
      +func (m *Value) GetListValue() *ListValue {
      +	if x, ok := m.GetKind().(*Value_ListValue); ok {
      +		return x.ListValue
      +	}
      +	return nil
      +}
      +
      +// XXX_OneofFuncs is for the internal use of the proto package.
      +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
      +	return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{
      +		(*Value_NullValue)(nil),
      +		(*Value_NumberValue)(nil),
      +		(*Value_StringValue)(nil),
      +		(*Value_BoolValue)(nil),
      +		(*Value_StructValue)(nil),
      +		(*Value_ListValue)(nil),
      +	}
      +}
      +
      +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
      +	m := msg.(*Value)
      +	// kind
      +	switch x := m.Kind.(type) {
      +	case *Value_NullValue:
      +		b.EncodeVarint(1<<3 | proto.WireVarint)
      +		b.EncodeVarint(uint64(x.NullValue))
      +	case *Value_NumberValue:
      +		b.EncodeVarint(2<<3 | proto.WireFixed64)
      +		b.EncodeFixed64(math.Float64bits(x.NumberValue))
      +	case *Value_StringValue:
      +		b.EncodeVarint(3<<3 | proto.WireBytes)
      +		b.EncodeStringBytes(x.StringValue)
      +	case *Value_BoolValue:
      +		t := uint64(0)
      +		if x.BoolValue {
      +			t = 1
      +		}
      +		b.EncodeVarint(4<<3 | proto.WireVarint)
      +		b.EncodeVarint(t)
      +	case *Value_StructValue:
      +		b.EncodeVarint(5<<3 | proto.WireBytes)
      +		if err := b.EncodeMessage(x.StructValue); err != nil {
      +			return err
      +		}
      +	case *Value_ListValue:
      +		b.EncodeVarint(6<<3 | proto.WireBytes)
      +		if err := b.EncodeMessage(x.ListValue); err != nil {
      +			return err
      +		}
      +	case nil:
      +	default:
      +		return fmt.Errorf("Value.Kind has unexpected type %T", x)
      +	}
      +	return nil
      +}
      +
      +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
      +	m := msg.(*Value)
      +	switch tag {
      +	case 1: // kind.null_value
      +		if wire != proto.WireVarint {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeVarint()
      +		m.Kind = &Value_NullValue{NullValue(x)}
      +		return true, err
      +	case 2: // kind.number_value
      +		if wire != proto.WireFixed64 {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeFixed64()
      +		m.Kind = &Value_NumberValue{math.Float64frombits(x)}
      +		return true, err
      +	case 3: // kind.string_value
      +		if wire != proto.WireBytes {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeStringBytes()
      +		m.Kind = &Value_StringValue{x}
      +		return true, err
      +	case 4: // kind.bool_value
      +		if wire != proto.WireVarint {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		x, err := b.DecodeVarint()
      +		m.Kind = &Value_BoolValue{x != 0}
      +		return true, err
      +	case 5: // kind.struct_value
      +		if wire != proto.WireBytes {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		msg := new(Struct)
      +		err := b.DecodeMessage(msg)
      +		m.Kind = &Value_StructValue{msg}
      +		return true, err
      +	case 6: // kind.list_value
      +		if wire != proto.WireBytes {
      +			return true, proto.ErrInternalBadWireType
      +		}
      +		msg := new(ListValue)
      +		err := b.DecodeMessage(msg)
      +		m.Kind = &Value_ListValue{msg}
      +		return true, err
      +	default:
      +		return false, nil
      +	}
      +}
      +
      +func _Value_OneofSizer(msg proto.Message) (n int) {
      +	m := msg.(*Value)
      +	// kind
      +	switch x := m.Kind.(type) {
      +	case *Value_NullValue:
      +		n += proto.SizeVarint(1<<3 | proto.WireVarint)
      +		n += proto.SizeVarint(uint64(x.NullValue))
      +	case *Value_NumberValue:
      +		n += proto.SizeVarint(2<<3 | proto.WireFixed64)
      +		n += 8
      +	case *Value_StringValue:
      +		n += proto.SizeVarint(3<<3 | proto.WireBytes)
      +		n += proto.SizeVarint(uint64(len(x.StringValue)))
      +		n += len(x.StringValue)
      +	case *Value_BoolValue:
      +		n += proto.SizeVarint(4<<3 | proto.WireVarint)
      +		n += 1
      +	case *Value_StructValue:
      +		s := proto.Size(x.StructValue)
      +		n += proto.SizeVarint(5<<3 | proto.WireBytes)
      +		n += proto.SizeVarint(uint64(s))
      +		n += s
      +	case *Value_ListValue:
      +		s := proto.Size(x.ListValue)
      +		n += proto.SizeVarint(6<<3 | proto.WireBytes)
      +		n += proto.SizeVarint(uint64(s))
      +		n += s
      +	case nil:
      +	default:
      +		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
      +	}
      +	return n
      +}
      +
      +// `ListValue` is a wrapper around a repeated field of values.
      +//
      +// The JSON representation for `ListValue` is JSON array.
      +type ListValue struct {
      +	// Repeated field of dynamically typed values.
      +	Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
      +}
      +
      +func (m *ListValue) Reset()                    { *m = ListValue{} }
      +func (m *ListValue) String() string            { return proto.CompactTextString(m) }
      +func (*ListValue) ProtoMessage()               {}
      +func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
      +func (*ListValue) XXX_WellKnownType() string   { return "ListValue" }
      +
      +func (m *ListValue) GetValues() []*Value {
      +	if m != nil {
      +		return m.Values
      +	}
      +	return nil
      +}
      +
      +func init() {
      +	proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
      +	proto.RegisterType((*Value)(nil), "google.protobuf.Value")
      +	proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
      +	proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
      +}
      +
      +var fileDescriptor0 = []byte{
      +	// 412 bytes of a gzipped FileDescriptorProto
      +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x92, 0xcf, 0x8b, 0xd3, 0x40,
      +	0x14, 0xc7, 0x3b, 0x49, 0x1b, 0xcc, 0x8b, 0xd4, 0x12, 0x41, 0x4b, 0x05, 0x95, 0xf6, 0x52, 0x44,
      +	0x12, 0xac, 0x08, 0x62, 0xbd, 0x18, 0xa8, 0x15, 0x0c, 0x25, 0x46, 0x5b, 0xc1, 0x4b, 0x69, 0xda,
      +	0x34, 0x86, 0x4e, 0x67, 0x42, 0x7e, 0x28, 0x3d, 0xfa, 0x5f, 0x78, 0x5c, 0xf6, 0xb8, 0xc7, 0xfd,
      +	0x0b, 0x77, 0x7e, 0x24, 0xd9, 0xa5, 0xa5, 0xb0, 0xa7, 0x99, 0xf7, 0x9d, 0xcf, 0xfb, 0xce, 0x7b,
      +	0x6f, 0x06, 0xde, 0x45, 0x71, 0xfe, 0xbb, 0x08, 0xac, 0x35, 0xdd, 0xdb, 0x11, 0xc5, 0x2b, 0x12,
      +	0xd9, 0x49, 0x4a, 0x73, 0x1a, 0x14, 0x5b, 0x3b, 0xc9, 0x0f, 0x49, 0x98, 0xd9, 0x59, 0x9e, 0x16,
      +	0xeb, 0xbc, 0x5c, 0x2c, 0x71, 0x6a, 0x3e, 0x8a, 0x28, 0x8d, 0x70, 0x68, 0x55, 0x6c, 0xff, 0x3f,
      +	0x02, 0xed, 0xbb, 0x20, 0xcc, 0x31, 0x68, 0xdb, 0x38, 0xc4, 0x9b, 0xac, 0x8b, 0x5e, 0xaa, 0x43,
      +	0x63, 0x34, 0xb0, 0x8e, 0x60, 0x4b, 0x82, 0xd6, 0x67, 0x41, 0x4d, 0x48, 0x9e, 0x1e, 0xfc, 0x32,
      +	0xa5, 0xf7, 0x0d, 0x8c, 0x3b, 0xb2, 0xd9, 0x01, 0x75, 0x17, 0x1e, 0x98, 0x11, 0x1a, 0xea, 0x3e,
      +	0xdf, 0x9a, 0xaf, 0xa1, 0xf5, 0x67, 0x85, 0x8b, 0xb0, 0xab, 0x30, 0xcd, 0x18, 0x3d, 0x39, 0x31,
      +	0x5f, 0xf0, 0x53, 0x5f, 0x42, 0x1f, 0x94, 0xf7, 0xa8, 0x7f, 0xad, 0x40, 0x4b, 0x88, 0xac, 0x32,
      +	0x20, 0x05, 0xc6, 0x4b, 0x69, 0xc0, 0x4d, 0xdb, 0xa3, 0xde, 0x89, 0xc1, 0x8c, 0x21, 0x82, 0xff,
      +	0xd2, 0xf0, 0x75, 0x52, 0x05, 0xe6, 0x00, 0x1e, 0x92, 0x62, 0x1f, 0x84, 0xe9, 0xf2, 0xf6, 0x7e,
      +	0xc4, 0x10, 0x43, 0xaa, 0x35, 0xc4, 0xe6, 0x14, 0x93, 0xa8, 0x84, 0x54, 0x5e, 0x38, 0x87, 0xa4,
      +	0x2a, 0xa1, 0x17, 0x00, 0x01, 0xa5, 0x55, 0x19, 0x4d, 0x86, 0x3c, 0xe0, 0x57, 0x71, 0x4d, 0x02,
      +	0x1f, 0x85, 0x0b, 0x1b, 0x51, 0x89, 0xb4, 0x44, 0xab, 0x4f, 0xcf, 0xcc, 0xb1, 0xb4, 0x67, 0xbb,
      +	0xba, 0x4b, 0x1c, 0x67, 0x55, 0xae, 0x26, 0x72, 0x4f, 0xbb, 0x74, 0x19, 0x52, 0x77, 0x89, 0xab,
      +	0xc0, 0xd1, 0xa0, 0xb9, 0x8b, 0xc9, 0xa6, 0x3f, 0x06, 0xbd, 0x26, 0x4c, 0x0b, 0x34, 0x61, 0x56,
      +	0xbd, 0xe8, 0xb9, 0xa1, 0x97, 0xd4, 0xab, 0x67, 0xa0, 0xd7, 0x43, 0x34, 0xdb, 0x00, 0xb3, 0xb9,
      +	0xeb, 0x2e, 0x17, 0x9f, 0xdc, 0xf9, 0xa4, 0xd3, 0x70, 0xfe, 0x21, 0x78, 0xcc, 0x7e, 0xdb, 0xb1,
      +	0x85, 0x63, 0xc8, 0x6e, 0x3c, 0x1e, 0x7b, 0xe8, 0xd7, 0x9b, 0xfb, 0x7e, 0xcc, 0xb1, 0x5c, 0x92,
      +	0xe0, 0x02, 0xa1, 0x4b, 0x45, 0x9d, 0x7a, 0xce, 0x95, 0xf2, 0x7c, 0x2a, 0xcd, 0xbd, 0xaa, 0xbe,
      +	0x9f, 0x21, 0xc6, 0x5f, 0x09, 0xfd, 0x4b, 0x7e, 0xf0, 0xcc, 0x40, 0x13, 0x56, 0x6f, 0x6f, 0x02,
      +	0x00, 0x00, 0xff, 0xff, 0xbc, 0xcf, 0x6d, 0x50, 0xfe, 0x02, 0x00, 0x00,
      +}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
      new file mode 100644
      index 00000000..764be034
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
      @@ -0,0 +1,96 @@
      +// Protocol Buffers - Google's data interchange format
      +// Copyright 2008 Google Inc.  All rights reserved.
      +// https://developers.google.com/protocol-buffers/
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto3";
      +
      +package google.protobuf;
      +option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
      +
      +option csharp_namespace = "Google.Protobuf.WellKnownTypes";
      +option java_package = "com.google.protobuf";
      +option java_outer_classname = "StructProto";
      +option java_multiple_files = true;
      +option java_generate_equals_and_hash = true;
      +option objc_class_prefix = "GPB";
      +
      +
      +// `Struct` represents a structured data value, consisting of fields
      +// which map to dynamically typed values. In some languages, `Struct`
      +// might be supported by a native representation. For example, in
      +// scripting languages like JS a struct is represented as an
      +// object. The details of that representation are described together
      +// with the proto support for the language.
      +//
      +// The JSON representation for `Struct` is JSON object.
      +message Struct {
      +  // Map of dynamically typed values.
      +  map<string, Value> fields = 1;
      +}
      +
      +// `Value` represents a dynamically typed value which can be either
      +// null, a number, a string, a boolean, a recursive struct value, or a
      +// list of values. A producer of value is expected to set one of that
      +// variants, absence of any variant indicates an error.
      +//
      +// The JSON representation for `Value` is JSON value.
      +message Value {
      +  // The kind of value.
      +  oneof kind {
      +    // Represents a null value.
      +    NullValue null_value = 1;
      +    // Represents a double value.
      +    double number_value = 2;
      +    // Represents a string value.
      +    string string_value = 3;
      +    // Represents a boolean value.
      +    bool bool_value = 4;
      +    // Represents a structured value.
      +    Struct struct_value = 5;
      +    // Represents a repeated `Value`.
      +    ListValue list_value = 6;
      +  }
      +}
      +
      +// `NullValue` is a singleton enumeration to represent the null value for the
      +// `Value` type union.
      +//
      +//  The JSON representation for `NullValue` is JSON `null`.
      +enum NullValue {
      +  // Null value.
      +  NULL_VALUE = 0;
      +}
      +
      +// `ListValue` is a wrapper around a repeated field of values.
      +//
      +// The JSON representation for `ListValue` is JSON array.
      +message ListValue {
      +  // Repeated field of dynamically typed values.
      +  repeated Value values = 1;
      +}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
      new file mode 100644
      index 00000000..1b365762
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
      @@ -0,0 +1,125 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2016 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +package ptypes
      +
      +// This file implements operations on google.protobuf.Timestamp.
      +
      +import (
      +	"errors"
      +	"fmt"
      +	"time"
      +
      +	tspb "github.com/golang/protobuf/ptypes/timestamp"
      +)
      +
      +const (
      +	// Seconds field of the earliest valid Timestamp.
      +	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
      +	minValidSeconds = -62135596800
      +	// Seconds field just after the latest valid Timestamp.
      +	// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
      +	maxValidSeconds = 253402300800
      +)
      +
      +// validateTimestamp determines whether a Timestamp is valid.
      +// A valid timestamp represents a time in the range
      +// [0001-01-01, 10000-01-01) and has a Nanos field
      +// in the range [0, 1e9).
      +//
      +// If the Timestamp is valid, validateTimestamp returns nil.
      +// Otherwise, it returns an error that describes
      +// the problem.
      +//
      +// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
      +func validateTimestamp(ts *tspb.Timestamp) error {
      +	if ts == nil {
      +		return errors.New("timestamp: nil Timestamp")
      +	}
      +	if ts.Seconds < minValidSeconds {
      +		return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
      +	}
      +	if ts.Seconds >= maxValidSeconds {
      +		return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
      +	}
      +	if ts.Nanos < 0 || ts.Nanos >= 1e9 {
      +		return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
      +	}
      +	return nil
      +}
      +
      +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
      +// It returns an error if the argument is invalid.
      +//
      +// Unlike most Go functions, if Timestamp returns an error, the first return value
      +// is not the zero time.Time. Instead, it is the value obtained from the
      +// time.Unix function when passed the contents of the Timestamp, in the UTC
      +// locale. This may or may not be a meaningful time; many invalid Timestamps
      +// do map to valid time.Times.
      +//
      +// A nil Timestamp returns an error. The first return value in that case is
      +// undefined.
      +func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
      +	// Don't return the zero value on error, because corresponds to a valid
      +	// timestamp. Instead return whatever time.Unix gives us.
      +	var t time.Time
      +	if ts == nil {
      +		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
      +	} else {
      +		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
      +	}
      +	return t, validateTimestamp(ts)
      +}
      +
      +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
      +// It returns an error if the resulting Timestamp is invalid.
      +func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
      +	seconds := t.Unix()
      +	nanos := int32(t.Sub(time.Unix(seconds, 0)))
      +	ts := &tspb.Timestamp{
      +		Seconds: seconds,
      +		Nanos:   nanos,
      +	}
      +	if err := validateTimestamp(ts); err != nil {
      +		return nil, err
      +	}
      +	return ts, nil
      +}
      +
      +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
      +// Timestamps, it returns an error message in parentheses.
      +func TimestampString(ts *tspb.Timestamp) string {
      +	t, err := Timestamp(ts)
      +	if err != nil {
      +		return fmt.Sprintf("(%v)", err)
      +	}
      +	return t.Format(time.RFC3339Nano)
      +}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
      new file mode 100644
      index 00000000..643a70c4
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
      @@ -0,0 +1,120 @@
      +// Code generated by protoc-gen-go.
      +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
      +// DO NOT EDIT!
      +
      +/*
      +Package timestamp is a generated protocol buffer package.
      +
      +It is generated from these files:
      +	github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
      +
      +It has these top-level messages:
      +	Timestamp
      +*/
      +package timestamp
      +
      +import proto "github.com/golang/protobuf/proto"
      +import fmt "fmt"
      +import math "math"
      +
      +// Reference imports to suppress errors if they are not otherwise used.
      +var _ = proto.Marshal
      +var _ = fmt.Errorf
      +var _ = math.Inf
      +
      +// This is a compile-time assertion to ensure that this generated file
      +// is compatible with the proto package it is being compiled against.
      +const _ = proto.ProtoPackageIsVersion1
      +
      +// A Timestamp represents a point in time independent of any time zone
      +// or calendar, represented as seconds and fractions of seconds at
      +// nanosecond resolution in UTC Epoch time. It is encoded using the
      +// Proleptic Gregorian Calendar which extends the Gregorian calendar
      +// backwards to year one. It is encoded assuming all minutes are 60
      +// seconds long, i.e. leap seconds are "smeared" so that no leap second
      +// table is needed for interpretation. Range is from
      +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
      +// By restricting to that range, we ensure that we can convert to
      +// and from  RFC 3339 date strings.
      +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
      +//
      +// Example 1: Compute Timestamp from POSIX `time()`.
      +//
      +//     Timestamp timestamp;
      +//     timestamp.set_seconds(time(NULL));
      +//     timestamp.set_nanos(0);
      +//
      +// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
      +//
      +//     struct timeval tv;
      +//     gettimeofday(&tv, NULL);
      +//
      +//     Timestamp timestamp;
      +//     timestamp.set_seconds(tv.tv_sec);
      +//     timestamp.set_nanos(tv.tv_usec * 1000);
      +//
      +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
      +//
      +//     FILETIME ft;
      +//     GetSystemTimeAsFileTime(&ft);
      +//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
      +//
      +//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
      +//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
      +//     Timestamp timestamp;
      +//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
      +//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
      +//
      +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
      +//
      +//     long millis = System.currentTimeMillis();
      +//
      +//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
      +//         .setNanos((int) ((millis % 1000) * 1000000)).build();
      +//
      +//
      +// Example 5: Compute Timestamp from current time in Python.
      +//
      +//     now = time.time()
      +//     seconds = int(now)
      +//     nanos = int((now - seconds) * 10**9)
      +//     timestamp = Timestamp(seconds=seconds, nanos=nanos)
      +//
      +//
      +type Timestamp struct {
      +	// Represents seconds of UTC time since Unix epoch
      +	// 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
      +	// 9999-12-31T23:59:59Z inclusive.
      +	Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
      +	// Non-negative fractions of a second at nanosecond resolution. Negative
      +	// second values with fractions must still have non-negative nanos values
      +	// that count forward in time. Must be from 0 to 999,999,999
      +	// inclusive.
      +	Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
      +}
      +
      +func (m *Timestamp) Reset()                    { *m = Timestamp{} }
      +func (m *Timestamp) String() string            { return proto.CompactTextString(m) }
      +func (*Timestamp) ProtoMessage()               {}
      +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
      +func (*Timestamp) XXX_WellKnownType() string   { return "Timestamp" }
      +
      +func init() {
      +	proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
      +}
      +
      +var fileDescriptor0 = []byte{
      +	// 192 bytes of a gzipped FileDescriptorProto
      +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
      +	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
      +	0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9,
      +	0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3,
      +	0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24,
      +	0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83,
      +	0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0xa0, 0x38, 0x6b,
      +	0x10, 0x84, 0xe3, 0xd4, 0xc8, 0xc8, 0x25, 0x0c, 0x74, 0x86, 0x1e, 0x9a, 0xa1, 0x4e, 0x7c, 0x70,
      +	0x23, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, 0xda, 0x24, 0x38, 0x7a, 0x01, 0x23, 0xe3, 0x0f, 0x46,
      +	0xc6, 0x45, 0x4c, 0xcc, 0xee, 0x01, 0x4e, 0xab, 0x98, 0xe4, 0xdc, 0x21, 0x86, 0x07, 0x40, 0x95,
      +	0xeb, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, 0xb4, 0x25, 0xb1, 0x81,
      +	0xcd, 0x31, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01, 0x00, 0x00,
      +}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
      new file mode 100644
      index 00000000..f02178ee
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
      @@ -0,0 +1,111 @@
      +// Protocol Buffers - Google's data interchange format
      +// Copyright 2008 Google Inc.  All rights reserved.
      +// https://developers.google.com/protocol-buffers/
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +syntax = "proto3";
      +
      +package google.protobuf;
      +option go_package = "github.com/golang/protobuf/ptypes/timestamp";
      +
      +option csharp_namespace = "Google.Protobuf.WellKnownTypes";
      +option cc_enable_arenas = true;
      +option java_package = "com.google.protobuf";
      +option java_outer_classname = "TimestampProto";
      +option java_multiple_files = true;
      +option java_generate_equals_and_hash = true;
      +option objc_class_prefix = "GPB";
      +
      +// A Timestamp represents a point in time independent of any time zone
      +// or calendar, represented as seconds and fractions of seconds at
      +// nanosecond resolution in UTC Epoch time. It is encoded using the
      +// Proleptic Gregorian Calendar which extends the Gregorian calendar
      +// backwards to year one. It is encoded assuming all minutes are 60
      +// seconds long, i.e. leap seconds are "smeared" so that no leap second
      +// table is needed for interpretation. Range is from
      +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
      +// By restricting to that range, we ensure that we can convert to
      +// and from  RFC 3339 date strings.
      +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
      +//
      +// Example 1: Compute Timestamp from POSIX `time()`.
      +//
      +//     Timestamp timestamp;
      +//     timestamp.set_seconds(time(NULL));
      +//     timestamp.set_nanos(0);
      +//
      +// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
      +//
      +//     struct timeval tv;
      +//     gettimeofday(&tv, NULL);
      +//
      +//     Timestamp timestamp;
      +//     timestamp.set_seconds(tv.tv_sec);
      +//     timestamp.set_nanos(tv.tv_usec * 1000);
      +//
      +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
      +//
      +//     FILETIME ft;
      +//     GetSystemTimeAsFileTime(&ft);
      +//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
      +//
      +//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
      +//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
      +//     Timestamp timestamp;
      +//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
      +//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
      +//
      +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
      +//
      +//     long millis = System.currentTimeMillis();
      +//
      +//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
      +//         .setNanos((int) ((millis % 1000) * 1000000)).build();
      +//
      +//
      +// Example 5: Compute Timestamp from current time in Python.
      +//
      +//     now = time.time()
      +//     seconds = int(now)
      +//     nanos = int((now - seconds) * 10**9)
      +//     timestamp = Timestamp(seconds=seconds, nanos=nanos)
      +//
      +//
      +message Timestamp {
      +
      +  // Represents seconds of UTC time since Unix epoch
      +  // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
      +  // 9999-12-31T23:59:59Z inclusive.
      +  int64 seconds = 1;
      +
      +  // Non-negative fractions of a second at nanosecond resolution. Negative
      +  // second values with fractions must still have non-negative nanos values
      +  // that count forward in time. Must be from 0 to 999,999,999
      +  // inclusive.
      +  int32 nanos = 2;
      +}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go b/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go
      new file mode 100644
      index 00000000..114a7f9f
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go
      @@ -0,0 +1,138 @@
      +// Go support for Protocol Buffers - Google's data interchange format
      +//
      +// Copyright 2016 The Go Authors.  All rights reserved.
      +// https://github.com/golang/protobuf
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +package ptypes
      +
      +import (
      +	"math"
      +	"testing"
      +	"time"
      +
      +	"github.com/golang/protobuf/proto"
      +	tspb "github.com/golang/protobuf/ptypes/timestamp"
      +)
      +
      +var tests = []struct {
      +	ts    *tspb.Timestamp
      +	valid bool
      +	t     time.Time
      +}{
      +	// The timestamp representing the Unix epoch date.
      +	{&tspb.Timestamp{0, 0}, true, utcDate(1970, 1, 1)},
      +	// The smallest representable timestamp.
      +	{&tspb.Timestamp{math.MinInt64, math.MinInt32}, false,
      +		time.Unix(math.MinInt64, math.MinInt32).UTC()},
      +	// The smallest representable timestamp with non-negative nanos.
      +	{&tspb.Timestamp{math.MinInt64, 0}, false, time.Unix(math.MinInt64, 0).UTC()},
      +	// The earliest valid timestamp.
      +	{&tspb.Timestamp{minValidSeconds, 0}, true, utcDate(1, 1, 1)},
      +	//"0001-01-01T00:00:00Z"},
      +	// The largest representable timestamp.
      +	{&tspb.Timestamp{math.MaxInt64, math.MaxInt32}, false,
      +		time.Unix(math.MaxInt64, math.MaxInt32).UTC()},
      +	// The largest representable timestamp with nanos in range.
      +	{&tspb.Timestamp{math.MaxInt64, 1e9 - 1}, false,
      +		time.Unix(math.MaxInt64, 1e9-1).UTC()},
      +	// The largest valid timestamp.
      +	{&tspb.Timestamp{maxValidSeconds - 1, 1e9 - 1}, true,
      +		time.Date(9999, 12, 31, 23, 59, 59, 1e9-1, time.UTC)},
      +	// The smallest invalid timestamp that is larger than the valid range.
      +	{&tspb.Timestamp{maxValidSeconds, 0}, false, time.Unix(maxValidSeconds, 0).UTC()},
      +	// A date before the epoch.
      +	{&tspb.Timestamp{-281836800, 0}, true, utcDate(1961, 1, 26)},
      +	// A date after the epoch.
      +	{&tspb.Timestamp{1296000000, 0}, true, utcDate(2011, 1, 26)},
      +	// A date after the epoch, in the middle of the day.
      +	{&tspb.Timestamp{1296012345, 940483}, true,
      +		time.Date(2011, 1, 26, 3, 25, 45, 940483, time.UTC)},
      +}
      +
      +func TestValidateTimestamp(t *testing.T) {
      +	for _, s := range tests {
      +		got := validateTimestamp(s.ts)
      +		if (got == nil) != s.valid {
      +			t.Errorf("validateTimestamp(%v) = %v, want %v", s.ts, got, s.valid)
      +		}
      +	}
      +}
      +
      +func TestTimestamp(t *testing.T) {
      +	for _, s := range tests {
      +		got, err := Timestamp(s.ts)
      +		if (err == nil) != s.valid {
      +			t.Errorf("Timestamp(%v) error = %v, but valid = %t", s.ts, err, s.valid)
      +		} else if s.valid && got != s.t {
      +			t.Errorf("Timestamp(%v) = %v, want %v", s.ts, got, s.t)
      +		}
      +	}
      +	// Special case: a nil Timestamp is an error, but returns the 0 Unix time.
      +	got, err := Timestamp(nil)
      +	want := time.Unix(0, 0).UTC()
      +	if got != want {
      +		t.Errorf("Timestamp(nil) = %v, want %v", got, want)
      +	}
      +	if err == nil {
      +		t.Errorf("Timestamp(nil) error = nil, expected error")
      +	}
      +}
      +
      +func TestTimestampProto(t *testing.T) {
      +	for _, s := range tests {
      +		got, err := TimestampProto(s.t)
      +		if (err == nil) != s.valid {
      +			t.Errorf("TimestampProto(%v) error = %v, but valid = %t", s.t, err, s.valid)
      +		} else if s.valid && !proto.Equal(got, s.ts) {
      +			t.Errorf("TimestampProto(%v) = %v, want %v", s.t, got, s.ts)
      +		}
      +	}
      +	// No corresponding special case here: no time.Time results in a nil Timestamp.
      +}
      +
      +func TestTimestampString(t *testing.T) {
      +	for _, test := range []struct {
      +		ts   *tspb.Timestamp
      +		want string
      +	}{
      +		// Not much testing needed because presumably time.Format is
      +		// well-tested.
      +		{&tspb.Timestamp{0, 0}, "1970-01-01T00:00:00Z"},
      +		{&tspb.Timestamp{minValidSeconds - 1, 0}, "(timestamp: seconds:-62135596801  before 0001-01-01)"},
      +	} {
      +		got := TimestampString(test.ts)
      +		if got != test.want {
      +			t.Errorf("TimestampString(%v) = %q, want %q", test.ts, got, test.want)
      +		}
      +	}
      +}
      +
      +func utcDate(year, month, day int) time.Time {
      +	return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
      +}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
      new file mode 100644
      index 00000000..bb7caf61
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
      @@ -0,0 +1,194 @@
      +// Code generated by protoc-gen-go.
      +// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
      +// DO NOT EDIT!
      +
      +/*
      +Package wrappers is a generated protocol buffer package.
      +
      +It is generated from these files:
      +	github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
      +
      +It has these top-level messages:
      +	DoubleValue
      +	FloatValue
      +	Int64Value
      +	UInt64Value
      +	Int32Value
      +	UInt32Value
      +	BoolValue
      +	StringValue
      +	BytesValue
      +*/
      +package wrappers
      +
      +import proto "github.com/golang/protobuf/proto"
      +import fmt "fmt"
      +import math "math"
      +
      +// Reference imports to suppress errors if they are not otherwise used.
      +var _ = proto.Marshal
      +var _ = fmt.Errorf
      +var _ = math.Inf
      +
      +// This is a compile-time assertion to ensure that this generated file
      +// is compatible with the proto package it is being compiled against.
      +const _ = proto.ProtoPackageIsVersion1
      +
      +// Wrapper message for `double`.
      +//
      +// The JSON representation for `DoubleValue` is JSON number.
      +type DoubleValue struct {
      +	// The double value.
      +	Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
      +}
      +
      +func (m *DoubleValue) Reset()                    { *m = DoubleValue{} }
      +func (m *DoubleValue) String() string            { return proto.CompactTextString(m) }
      +func (*DoubleValue) ProtoMessage()               {}
      +func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
      +func (*DoubleValue) XXX_WellKnownType() string   { return "DoubleValue" }
      +
      +// Wrapper message for `float`.
      +//
      +// The JSON representation for `FloatValue` is JSON number.
      +type FloatValue struct {
      +	// The float value.
      +	Value float32 `protobuf:"fixed32,1,opt,name=value" json:"value,omitempty"`
      +}
      +
      +func (m *FloatValue) Reset()                    { *m = FloatValue{} }
      +func (m *FloatValue) String() string            { return proto.CompactTextString(m) }
      +func (*FloatValue) ProtoMessage()               {}
      +func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
      +func (*FloatValue) XXX_WellKnownType() string   { return "FloatValue" }
      +
      +// Wrapper message for `int64`.
      +//
      +// The JSON representation for `Int64Value` is JSON string.
      +type Int64Value struct {
      +	// The int64 value.
      +	Value int64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
      +}
      +
      +func (m *Int64Value) Reset()                    { *m = Int64Value{} }
      +func (m *Int64Value) String() string            { return proto.CompactTextString(m) }
      +func (*Int64Value) ProtoMessage()               {}
      +func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
      +func (*Int64Value) XXX_WellKnownType() string   { return "Int64Value" }
      +
      +// Wrapper message for `uint64`.
      +//
      +// The JSON representation for `UInt64Value` is JSON string.
      +type UInt64Value struct {
      +	// The uint64 value.
      +	Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
      +}
      +
      +func (m *UInt64Value) Reset()                    { *m = UInt64Value{} }
      +func (m *UInt64Value) String() string            { return proto.CompactTextString(m) }
      +func (*UInt64Value) ProtoMessage()               {}
      +func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
      +func (*UInt64Value) XXX_WellKnownType() string   { return "UInt64Value" }
      +
      +// Wrapper message for `int32`.
      +//
      +// The JSON representation for `Int32Value` is JSON number.
      +type Int32Value struct {
      +	// The int32 value.
      +	Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
      +}
      +
      +func (m *Int32Value) Reset()                    { *m = Int32Value{} }
      +func (m *Int32Value) String() string            { return proto.CompactTextString(m) }
      +func (*Int32Value) ProtoMessage()               {}
      +func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
      +func (*Int32Value) XXX_WellKnownType() string   { return "Int32Value" }
      +
      +// Wrapper message for `uint32`.
      +//
      +// The JSON representation for `UInt32Value` is JSON number.
      +type UInt32Value struct {
      +	// The uint32 value.
      +	Value uint32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
      +}
      +
      +func (m *UInt32Value) Reset()                    { *m = UInt32Value{} }
      +func (m *UInt32Value) String() string            { return proto.CompactTextString(m) }
      +func (*UInt32Value) ProtoMessage()               {}
      +func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
      +func (*UInt32Value) XXX_WellKnownType() string   { return "UInt32Value" }
      +
      +// Wrapper message for `bool`.
      +//
      +// The JSON representation for `BoolValue` is JSON `true` and `false`.
      +type BoolValue struct {
      +	// The bool value.
      +	Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
      +}
      +
      +func (m *BoolValue) Reset()                    { *m = BoolValue{} }
      +func (m *BoolValue) String() string            { return proto.CompactTextString(m) }
      +func (*BoolValue) ProtoMessage()               {}
      +func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
      +func (*BoolValue) XXX_WellKnownType() string   { return "BoolValue" }
      +
      +// Wrapper message for `string`.
      +//
      +// The JSON representation for `StringValue` is JSON string.
      +type StringValue struct {
      +	// The string value.
      +	Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"`
      +}
      +
      +func (m *StringValue) Reset()                    { *m = StringValue{} }
      +func (m *StringValue) String() string            { return proto.CompactTextString(m) }
      +func (*StringValue) ProtoMessage()               {}
      +func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
      +func (*StringValue) XXX_WellKnownType() string   { return "StringValue" }
      +
      +// Wrapper message for `bytes`.
      +//
      +// The JSON representation for `BytesValue` is JSON string.
      +type BytesValue struct {
      +	// The bytes value.
      +	Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
      +}
      +
      +func (m *BytesValue) Reset()                    { *m = BytesValue{} }
      +func (m *BytesValue) String() string            { return proto.CompactTextString(m) }
      +func (*BytesValue) ProtoMessage()               {}
      +func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
      +func (*BytesValue) XXX_WellKnownType() string   { return "BytesValue" }
      +
      +func init() {
      +	proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue")
      +	proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue")
      +	proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value")
      +	proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value")
      +	proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value")
      +	proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value")
      +	proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue")
      +	proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue")
      +	proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue")
      +}
      +
      +var fileDescriptor0 = []byte{
      +	// 258 bytes of a gzipped FileDescriptorProto
      +	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
      +	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
      +	0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0x2f,
      +	0x4a, 0x2c, 0x28, 0x48, 0x2d, 0x42, 0x30, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, 0xd3,
      +	0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0x94, 0xb9, 0xb8, 0x5d, 0xf2, 0x4b, 0x93, 0x72, 0x52, 0xc3,
      +	0x12, 0x73, 0x4a, 0x53, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x40, 0x0c, 0x09, 0x46, 0x05, 0x46, 0x0d,
      +	0xc6, 0x20, 0x08, 0x47, 0x49, 0x89, 0x8b, 0xcb, 0x2d, 0x27, 0x3f, 0xb1, 0x04, 0x8b, 0x1a, 0x26,
      +	0x24, 0x35, 0x9e, 0x79, 0x25, 0x66, 0x26, 0x58, 0xd4, 0x30, 0xc3, 0xd4, 0x00, 0x2d, 0x0b, 0xc5,
      +	0xa5, 0x88, 0x05, 0xd5, 0x20, 0x63, 0x23, 0x2c, 0x6a, 0x58, 0xd1, 0x0c, 0xc2, 0xaa, 0x88, 0x17,
      +	0xa6, 0x48, 0x91, 0x8b, 0xd3, 0x29, 0x3f, 0x3f, 0x07, 0x8b, 0x12, 0x0e, 0x24, 0x73, 0x82, 0x4b,
      +	0x8a, 0x32, 0xf3, 0xd2, 0xb1, 0x28, 0xe2, 0x44, 0x72, 0x90, 0x53, 0x65, 0x49, 0x6a, 0x31, 0x16,
      +	0x35, 0x3c, 0x50, 0x35, 0x4e, 0xf5, 0x5c, 0xc2, 0xc0, 0xd8, 0xd0, 0x43, 0x0b, 0x5d, 0x27, 0xde,
      +	0x70, 0x68, 0xf0, 0x07, 0x80, 0x44, 0x02, 0x18, 0xa3, 0xb4, 0x88, 0x8f, 0xba, 0x05, 0x8c, 0x8c,
      +	0x3f, 0x18, 0x19, 0x17, 0x31, 0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, 0x73, 0x87, 0x18, 0x1d,
      +	0x00, 0x55, 0xad, 0x17, 0x9e, 0x9a, 0x93, 0xe3, 0x9d, 0x97, 0x5f, 0x9e, 0x17, 0x02, 0xd2, 0x95,
      +	0xc4, 0x06, 0x36, 0xc6, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xdf, 0x64, 0x4b, 0x1c, 0x02,
      +	0x00, 0x00,
      +}
      diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
      new file mode 100644
      index 00000000..f035adce
      --- /dev/null
      +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
      @@ -0,0 +1,119 @@
      +// Protocol Buffers - Google's data interchange format
      +// Copyright 2008 Google Inc.  All rights reserved.
      +// https://developers.google.com/protocol-buffers/
      +//
      +// Redistribution and use in source and binary forms, with or without
      +// modification, are permitted provided that the following conditions are
      +// met:
      +//
      +//     * Redistributions of source code must retain the above copyright
      +// notice, this list of conditions and the following disclaimer.
      +//     * Redistributions in binary form must reproduce the above
      +// copyright notice, this list of conditions and the following disclaimer
      +// in the documentation and/or other materials provided with the
      +// distribution.
      +//     * Neither the name of Google Inc. nor the names of its
      +// contributors may be used to endorse or promote products derived from
      +// this software without specific prior written permission.
      +//
      +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +// Wrappers for primitive (non-message) types. These types are useful
      +// for embedding primitives in the `google.protobuf.Any` type and for places
      +// where we need to distinguish between the absence of a primitive
      +// typed field and its default value.
      +
      +syntax = "proto3";
      +
      +package google.protobuf;
      +option go_package = "github.com/golang/protobuf/ptypes/wrappers";
      +
      +option csharp_namespace = "Google.Protobuf.WellKnownTypes";
      +option cc_enable_arenas = true;
      +option java_package = "com.google.protobuf";
      +option java_outer_classname = "WrappersProto";
      +option java_multiple_files = true;
      +option java_generate_equals_and_hash = true;
      +option objc_class_prefix = "GPB";
      +
      +// Wrapper message for `double`.
      +//
      +// The JSON representation for `DoubleValue` is JSON number.
      +message DoubleValue {
      +  // The double value.
      +  double value = 1;
      +}
      +
      +// Wrapper message for `float`.
      +//
      +// The JSON representation for `FloatValue` is JSON number.
      +message FloatValue {
      +  // The float value.
      +  float value = 1;
      +}
      +
      +// Wrapper message for `int64`.
      +//
      +// The JSON representation for `Int64Value` is JSON string.
      +message Int64Value {
      +  // The int64 value.
      +  int64 value = 1;
      +}
      +
      +// Wrapper message for `uint64`.
      +//
      +// The JSON representation for `UInt64Value` is JSON string.
      +message UInt64Value {
      +  // The uint64 value.
      +  uint64 value = 1;
      +}
      +
      +// Wrapper message for `int32`.
      +//
      +// The JSON representation for `Int32Value` is JSON number.
      +message Int32Value {
      +  // The int32 value.
      +  int32 value = 1;
      +}
      +
      +// Wrapper message for `uint32`.
      +//
      +// The JSON representation for `UInt32Value` is JSON number.
      +message UInt32Value {
      +  // The uint32 value.
      +  uint32 value = 1;
      +}
      +
      +// Wrapper message for `bool`.
      +//
      +// The JSON representation for `BoolValue` is JSON `true` and `false`.
      +message BoolValue {
      +  // The bool value.
      +  bool value = 1;
      +}
      +
      +// Wrapper message for `string`.
      +//
      +// The JSON representation for `StringValue` is JSON string.
      +message StringValue {
      +  // The string value.
      +  string value = 1;
      +}
      +
      +// Wrapper message for `bytes`.
      +//
      +// The JSON representation for `BytesValue` is JSON string.
      +message BytesValue {
      +  // The bytes value.
      +  bytes value = 1;
      +}
      diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE
      new file mode 100644
      index 00000000..5f0d1fb6
      --- /dev/null
      +++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE
      @@ -0,0 +1,13 @@
      +Copyright 2014 Alan Shreve
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +   http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md
      new file mode 100644
      index 00000000..7a950d17
      --- /dev/null
      +++ b/vendor/github.com/inconshreveable/mousetrap/README.md
      @@ -0,0 +1,23 @@
      +# mousetrap
      +
      +mousetrap is a tiny library that answers a single question.
      +
      +On a Windows machine, was the process invoked by someone double clicking on
      +the executable file while browsing in explorer?
      +
      +### Motivation
      +
      +Windows developers unfamiliar with command line tools will often "double-click"
      +the executable for a tool. Because most CLI tools print the help and then exit
      +when invoked without arguments, this is often very frustrating for those users.
      +
      +mousetrap provides a way to detect these invocations so that you can provide
      +more helpful behavior and instructions on how to run the CLI tool. To see what
      +this looks like, both from an organizational and a technical perspective, see
      +https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
      +
      +### The interface
      +
      +The library exposes a single interface:
      +
      +    func StartedByExplorer() (bool)
      diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
      new file mode 100644
      index 00000000..9d2d8a4b
      --- /dev/null
      +++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
      @@ -0,0 +1,15 @@
      +// +build !windows
      +
      +package mousetrap
      +
      +// StartedByExplorer returns true if the program was invoked by the user
      +// double-clicking on the executable from explorer.exe
      +//
      +// It is conservative and returns false if any of the internal calls fail.
      +// It does not guarantee that the program was run from a terminal. It only can tell you
      +// whether it was launched from explorer.exe
      +//
      +// On non-Windows platforms, it always returns false.
      +func StartedByExplorer() bool {
      +	return false
      +}
      diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
      new file mode 100644
      index 00000000..336142a5
      --- /dev/null
      +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
      @@ -0,0 +1,98 @@
      +// +build windows
      +// +build !go1.4
      +
      +package mousetrap
      +
      +import (
      +	"fmt"
      +	"os"
      +	"syscall"
      +	"unsafe"
      +)
      +
      +const (
      +	// defined by the Win32 API
      +	th32cs_snapprocess uintptr = 0x2
      +)
      +
      +var (
      +	kernel                   = syscall.MustLoadDLL("kernel32.dll")
      +	CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot")
      +	Process32First           = kernel.MustFindProc("Process32FirstW")
      +	Process32Next            = kernel.MustFindProc("Process32NextW")
      +)
      +
      +// ProcessEntry32 structure defined by the Win32 API
      +type processEntry32 struct {
      +	dwSize              uint32
      +	cntUsage            uint32
      +	th32ProcessID       uint32
      +	th32DefaultHeapID   int
      +	th32ModuleID        uint32
      +	cntThreads          uint32
      +	th32ParentProcessID uint32
      +	pcPriClassBase      int32
      +	dwFlags             uint32
      +	szExeFile           [syscall.MAX_PATH]uint16
      +}
      +
      +func getProcessEntry(pid int) (pe *processEntry32, err error) {
      +	snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0))
      +	if snapshot == uintptr(syscall.InvalidHandle) {
      +		err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1)
      +		return
      +	}
      +	defer syscall.CloseHandle(syscall.Handle(snapshot))
      +
      +	var processEntry processEntry32
      +	processEntry.dwSize = uint32(unsafe.Sizeof(processEntry))
      +	ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
      +	if ok == 0 {
      +		err = fmt.Errorf("Process32First: %v", e1)
      +		return
      +	}
      +
      +	for {
      +		if processEntry.th32ProcessID == uint32(pid) {
      +			pe = &processEntry
      +			return
      +		}
      +
      +		ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
      +		if ok == 0 {
      +			err = fmt.Errorf("Process32Next: %v", e1)
      +			return
      +		}
      +	}
      +}
      +
      +func getppid() (pid int, err error) {
      +	pe, err := getProcessEntry(os.Getpid())
      +	if err != nil {
      +		return
      +	}
      +
      +	pid = int(pe.th32ParentProcessID)
      +	return
      +}
      +
      +// StartedByExplorer returns true if the program was invoked by the user double-clicking
      +// on the executable from explorer.exe
      +//
      +// It is conservative and returns false if any of the internal calls fail.
      +// It does not guarantee that the program was run from a terminal. It only can tell you
      +// whether it was launched from explorer.exe
      +func StartedByExplorer() bool {
      +	ppid, err := getppid()
      +	if err != nil {
      +		return false
      +	}
      +
      +	pe, err := getProcessEntry(ppid)
      +	if err != nil {
      +		return false
      +	}
      +
      +	name := syscall.UTF16ToString(pe.szExeFile[:])
      +	return name == "explorer.exe"
      +}
      diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
      new file mode 100644
      index 00000000..9a28e57c
      --- /dev/null
      +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
      @@ -0,0 +1,46 @@
      +// +build windows
      +// +build go1.4
      +
      +package mousetrap
      +
      +import (
      +	"os"
      +	"syscall"
      +	"unsafe"
      +)
      +
      +func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
      +	snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
      +	if err != nil {
      +		return nil, err
      +	}
      +	defer syscall.CloseHandle(snapshot)
      +	var procEntry syscall.ProcessEntry32
      +	procEntry.Size = uint32(unsafe.Sizeof(procEntry))
      +	if err = syscall.Process32First(snapshot, &procEntry); err != nil {
      +		return nil, err
      +	}
      +	for {
      +		if procEntry.ProcessID == uint32(pid) {
      +			return &procEntry, nil
      +		}
      +		err = syscall.Process32Next(snapshot, &procEntry)
      +		if err != nil {
      +			return nil, err
      +		}
      +	}
      +}
      +
      +// StartedByExplorer returns true if the program was invoked by the user double-clicking
      +// on the executable from explorer.exe
      +//
      +// It is conservative and returns false if any of the internal calls fail.
      +// It does not guarantee that the program was run from a terminal. It only can tell you
      +// whether it was launched from explorer.exe
      +func StartedByExplorer() bool {
      +	pe, err := getProcessEntry(os.Getppid())
      +	if err != nil {
      +		return false
      +	}
      +	return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
      +}
      diff --git a/vendor/github.com/pmezard/go-difflib/.travis.yml b/vendor/github.com/pmezard/go-difflib/.travis.yml
      new file mode 100644
      index 00000000..90c9c6f9
      --- /dev/null
      +++ b/vendor/github.com/pmezard/go-difflib/.travis.yml
      @@ -0,0 +1,5 @@
      +language: go
      +go:
      +  - 1.5
      +  - tip
      +
      diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE
      new file mode 100644
      index 00000000..c67dad61
      --- /dev/null
      +++ b/vendor/github.com/pmezard/go-difflib/LICENSE
      @@ -0,0 +1,27 @@
      +Copyright (c) 2013, Patrick Mezard
      +All rights reserved.
      +
      +Redistribution and use in source and binary forms, with or without
      +modification, are permitted provided that the following conditions are
      +met:
      +
      +    Redistributions of source code must retain the above copyright
      +notice, this list of conditions and the following disclaimer.
      +    Redistributions in binary form must reproduce the above copyright
      +notice, this list of conditions and the following disclaimer in the
      +documentation and/or other materials provided with the distribution.
      +    The names of its contributors may not be used to endorse or promote
      +products derived from this software without specific prior written
      +permission.
      +
      +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
      +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
      +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
      +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
      +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
      +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
      +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
      +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
      +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      diff --git a/vendor/github.com/pmezard/go-difflib/README.md b/vendor/github.com/pmezard/go-difflib/README.md
      new file mode 100644
      index 00000000..e87f307e
      --- /dev/null
      +++ b/vendor/github.com/pmezard/go-difflib/README.md
      @@ -0,0 +1,50 @@
      +go-difflib
      +==========
      +
      +[![Build Status](https://travis-ci.org/pmezard/go-difflib.png?branch=master)](https://travis-ci.org/pmezard/go-difflib)
      +[![GoDoc](https://godoc.org/github.com/pmezard/go-difflib/difflib?status.svg)](https://godoc.org/github.com/pmezard/go-difflib/difflib)
      +
      +Go-difflib is a partial port of python 3 difflib package. Its main goal
      +was to make unified and context diff available in pure Go, mostly for
      +testing purposes.
      +
      +The following class and functions (and related tests) have be ported:
      +
      +* `SequenceMatcher`
      +* `unified_diff()`
      +* `context_diff()`
      +
      +## Installation
      +
      +```bash
      +$ go get github.com/pmezard/go-difflib/difflib
      +```
      +
      +### Quick Start
      +
      +Diffs are configured with Unified (or ContextDiff) structures, and can
      +be output to an io.Writer or returned as a string.
      +
      +```Go
      +diff := UnifiedDiff{
      +    A:        difflib.SplitLines("foo\nbar\n"),
      +    B:        difflib.SplitLines("foo\nbaz\n"),
      +    FromFile: "Original",
      +    ToFile:   "Current",
      +    Context:  3,
      +}
      +text, _ := GetUnifiedDiffString(diff)
      +fmt.Printf(text)
      +```
      +
      +would output:
      +
      +```
      +--- Original
      ++++ Current
      +@@ -1,3 +1,3 @@
      + foo
      +-bar
      ++baz
      +```
      +
      diff --git a/vendor/github.com/spf13/pflag/verify/all.sh b/vendor/github.com/spf13/pflag/verify/all.sh
      old mode 100644
      new mode 100755
      diff --git a/vendor/github.com/spf13/pflag/verify/gofmt.sh b/vendor/github.com/spf13/pflag/verify/gofmt.sh
      old mode 100644
      new mode 100755
      diff --git a/vendor/github.com/spf13/pflag/verify/golint.sh b/vendor/github.com/spf13/pflag/verify/golint.sh
      old mode 100644
      new mode 100755
      diff --git a/vendor/github.com/stretchr/testify/.gitignore b/vendor/github.com/stretchr/testify/.gitignore
      new file mode 100644
      index 00000000..5aacdb7c
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/.gitignore
      @@ -0,0 +1,24 @@
      +# Compiled Object files, Static and Dynamic libs (Shared Objects)
      +*.o
      +*.a
      +*.so
      +
      +# Folders
      +_obj
      +_test
      +
      +# Architecture specific extensions/prefixes
      +*.[568vq]
      +[568vq].out
      +
      +*.cgo1.go
      +*.cgo2.c
      +_cgo_defun.c
      +_cgo_gotypes.go
      +_cgo_export.*
      +
      +_testmain.go
      +
      +*.exe
      +
      +.DS_Store
      diff --git a/vendor/github.com/stretchr/testify/.travis.yml b/vendor/github.com/stretchr/testify/.travis.yml
      new file mode 100644
      index 00000000..455923ec
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/.travis.yml
      @@ -0,0 +1,15 @@
      +language: go
      +
      +sudo: false
      +
      +go:
      +  - 1.1
      +  - 1.2
      +  - 1.3
      +  - 1.4
      +  - 1.5
      +  - 1.6
      +  - tip
      +
      +script:
      +  - go test -v ./...
      diff --git a/vendor/github.com/stretchr/testify/Godeps/Godeps.json b/vendor/github.com/stretchr/testify/Godeps/Godeps.json
      new file mode 100644
      index 00000000..b206a609
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/Godeps/Godeps.json
      @@ -0,0 +1,21 @@
      +{
      +	"ImportPath": "github.com/stretchr/testify",
      +	"GoVersion": "go1.5",
      +	"Packages": [
      +		"./..."
      +	],
      +	"Deps": [
      +		{
      +			"ImportPath": "github.com/davecgh/go-spew/spew",
      +			"Rev": "2df174808ee097f90d259e432cc04442cf60be21"
      +		},
      +		{
      +			"ImportPath": "github.com/pmezard/go-difflib/difflib",
      +			"Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d"
      +		},
      +		{
      +			"ImportPath": "github.com/stretchr/objx",
      +			"Rev": "cbeaeb16a013161a98496fad62933b1d21786672"
      +		}
      +	]
      +}
      diff --git a/vendor/github.com/stretchr/testify/Godeps/Readme b/vendor/github.com/stretchr/testify/Godeps/Readme
      new file mode 100644
      index 00000000..4cdaa53d
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/Godeps/Readme
      @@ -0,0 +1,5 @@
      +This directory tree is generated automatically by godep.
      +
      +Please do not edit.
      +
      +See https://github.com/tools/godep for more information.
      diff --git a/vendor/github.com/stretchr/testify/LICENCE.txt b/vendor/github.com/stretchr/testify/LICENCE.txt
      new file mode 100644
      index 00000000..473b670a
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/LICENCE.txt
      @@ -0,0 +1,22 @@
      +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell
      +
      +Please consider promoting this project if you find it useful.
      +
      +Permission is hereby granted, free of charge, to any person 
      +obtaining a copy of this software and associated documentation 
      +files (the "Software"), to deal in the Software without restriction, 
      +including without limitation the rights to use, copy, modify, merge, 
      +publish, distribute, sublicense, and/or sell copies of the Software, 
      +and to permit persons to whom the Software is furnished to do so, 
      +subject to the following conditions:
      +
      +The above copyright notice and this permission notice shall be included
      +in all copies or substantial portions of the Software.
      +
      +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
      +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 
      +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 
      +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, 
      +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT 
      +OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
      +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
      diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE
      new file mode 100644
      index 00000000..473b670a
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/LICENSE
      @@ -0,0 +1,22 @@
      +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell
      +
      +Please consider promoting this project if you find it useful.
      +
      +Permission is hereby granted, free of charge, to any person 
      +obtaining a copy of this software and associated documentation 
      +files (the "Software"), to deal in the Software without restriction, 
      +including without limitation the rights to use, copy, modify, merge, 
      +publish, distribute, sublicense, and/or sell copies of the Software, 
      +and to permit persons to whom the Software is furnished to do so, 
      +subject to the following conditions:
      +
      +The above copyright notice and this permission notice shall be included
      +in all copies or substantial portions of the Software.
      +
      +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
      +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 
      +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 
      +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, 
      +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT 
      +OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
      +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
      diff --git a/vendor/github.com/stretchr/testify/README.md b/vendor/github.com/stretchr/testify/README.md
      new file mode 100644
      index 00000000..aaf2aa0a
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/README.md
      @@ -0,0 +1,332 @@
      +Testify - Thou Shalt Write Tests
      +================================
      +
      +[![Build Status](https://travis-ci.org/stretchr/testify.svg)](https://travis-ci.org/stretchr/testify)
      +
      +Go code (golang) set of packages that provide many tools for testifying that your code will behave as you intend.
      +
      +Features include:
      +
      +  * [Easy assertions](#assert-package)
      +  * [Mocking](#mock-package)
      +  * [HTTP response trapping](#http-package)
      +  * [Testing suite interfaces and functions](#suite-package)
      +
      +Get started:
      +
      +  * Install testify with [one line of code](#installation), or [update it with another](#staying-up-to-date)
      +  * For an introduction to writing test code in Go, see http://golang.org/doc/code.html#Testing
      +  * Check out the API Documentation http://godoc.org/github.com/stretchr/testify
      +  * To make your testing life easier, check out our other project, [gorc](http://github.com/stretchr/gorc)
      +  * A little about [Test-Driven Development (TDD)](http://en.wikipedia.org/wiki/Test-driven_development)
      +
      +
      +
      +[`assert`](http://godoc.org/github.com/stretchr/testify/assert "API documentation") package
      +-------------------------------------------------------------------------------------------
      +
      +The `assert` package provides some helpful methods that allow you to write better test code in Go.
      +
      +  * Prints friendly, easy to read failure descriptions
      +  * Allows for very readable code
      +  * Optionally annotate each assertion with a message
      +
      +See it in action:
      +
      +```go
      +package yours
      +
      +import (
      +  "testing"
      +  "github.com/stretchr/testify/assert"
      +)
      +
      +func TestSomething(t *testing.T) {
      +
      +  // assert equality
      +  assert.Equal(t, 123, 123, "they should be equal")
      +
      +  // assert inequality
      +  assert.NotEqual(t, 123, 456, "they should not be equal")
      +
      +  // assert for nil (good for errors)
      +  assert.Nil(t, object)
      +
      +  // assert for not nil (good when you expect something)
      +  if assert.NotNil(t, object) {
      +
      +    // now we know that object isn't nil, we are safe to make
      +    // further assertions without causing any errors
      +    assert.Equal(t, "Something", object.Value)
      +
      +  }
      +
      +}
      +```
      +
      +  * Every assert func takes the `testing.T` object as the first argument.  This is how it writes the errors out through the normal `go test` capabilities.
      +  * Every assert func returns a bool indicating whether the assertion was successful or not, this is useful for if you want to go on making further assertions under certain conditions.
      +
      +if you assert many times, use the below:
      +
      +```go
      +package yours
      +
      +import (
      +  "testing"
      +  "github.com/stretchr/testify/assert"
      +)
      +
      +func TestSomething(t *testing.T) {
      +  assert := assert.New(t)
      +
      +  // assert equality
      +  assert.Equal(123, 123, "they should be equal")
      +
      +  // assert inequality
      +  assert.NotEqual(123, 456, "they should not be equal")
      +
      +  // assert for nil (good for errors)
      +  assert.Nil(object)
      +
      +  // assert for not nil (good when you expect something)
      +  if assert.NotNil(object) {
      +
      +    // now we know that object isn't nil, we are safe to make
      +    // further assertions without causing any errors
      +    assert.Equal("Something", object.Value)
      +  }
      +}
      +```
      +
      +[`require`](http://godoc.org/github.com/stretchr/testify/require "API documentation") package
      +---------------------------------------------------------------------------------------------
      +
      +The `require` package provides same global functions as the `assert` package, but instead of returning a boolean result they terminate current test.
      +
      +See [t.FailNow](http://golang.org/pkg/testing/#T.FailNow) for details.
      +
      +
      +[`http`](http://godoc.org/github.com/stretchr/testify/http "API documentation") package
      +---------------------------------------------------------------------------------------
      +
      +The `http` package contains test objects useful for testing code that relies on the `net/http` package.  Check out the [(deprecated) API documentation for the `http` package](http://godoc.org/github.com/stretchr/testify/http).
      +
      +We recommend you use [httptest](http://golang.org/pkg/net/http/httptest) instead.
      +
      +[`mock`](http://godoc.org/github.com/stretchr/testify/mock "API documentation") package
      +----------------------------------------------------------------------------------------
      +
      +The `mock` package provides a mechanism for easily writing mock objects that can be used in place of real objects when writing test code.
      +
      +An example test function that tests a piece of code that relies on an external object `testObj`, can setup expectations (testify) and assert that they indeed happened:
      +
      +```go
      +package yours
      +
      +import (
      +  "testing"
      +  "github.com/stretchr/testify/mock"
      +)
      +
      +/*
      +  Test objects
      +*/
      +
      +// MyMockedObject is a mocked object that implements an interface
      +// that describes an object that the code I am testing relies on.
      +type MyMockedObject struct{
      +  mock.Mock
      +}
      +
      +// DoSomething is a method on MyMockedObject that implements some interface
      +// and just records the activity, and returns what the Mock object tells it to.
      +//
      +// In the real object, this method would do something useful, but since this
      +// is a mocked object - we're just going to stub it out.
      +//
      +// NOTE: This method is not being tested here, code that uses this object is.
      +func (m *MyMockedObject) DoSomething(number int) (bool, error) {
      +
      +  args := m.Called(number)
      +  return args.Bool(0), args.Error(1)
      +
      +}
      +
      +/*
      +  Actual test functions
      +*/
      +
      +// TestSomething is an example of how to use our test object to
      +// make assertions about some target code we are testing.
      +func TestSomething(t *testing.T) {
      +
      +  // create an instance of our test object
      +  testObj := new(MyMockedObject)
      +
      +  // setup expectations
      +  testObj.On("DoSomething", 123).Return(true, nil)
      +
      +  // call the code we are testing
      +  targetFuncThatDoesSomethingWithObj(testObj)
      +
      +  // assert that the expectations were met
      +  testObj.AssertExpectations(t)
      +
      +}
      +```
      +
      +For more information on how to write mock code, check out the [API documentation for the `mock` package](http://godoc.org/github.com/stretchr/testify/mock).
      +
      +You can use the [mockery tool](http://github.com/vektra/mockery) to autogenerate the mock code against an interface as well, making using mocks much quicker.
      +
      +[`suite`](http://godoc.org/github.com/stretchr/testify/suite "API documentation") package
      +-----------------------------------------------------------------------------------------
      +
      +The `suite` package provides functionality that you might be used to from more common object oriented languages.  With it, you can build a testing suite as a struct, build setup/teardown methods and testing methods on your struct, and run them with 'go test' as per normal.
      +
      +An example suite is shown below:
      +
      +```go
      +// Basic imports
      +import (
      +    "testing"
      +    "github.com/stretchr/testify/assert"
      +    "github.com/stretchr/testify/suite"
      +)
      +
      +// Define the suite, and absorb the built-in basic suite
      +// functionality from testify - including a T() method which
      +// returns the current testing context
      +type ExampleTestSuite struct {
      +    suite.Suite
      +    VariableThatShouldStartAtFive int
      +}
      +
      +// Make sure that VariableThatShouldStartAtFive is set to five
      +// before each test
      +func (suite *ExampleTestSuite) SetupTest() {
      +    suite.VariableThatShouldStartAtFive = 5
      +}
      +
      +// All methods that begin with "Test" are run as tests within a
      +// suite.
      +func (suite *ExampleTestSuite) TestExample() {
      +    assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive)
      +}
      +
      +// In order for 'go test' to run this suite, we need to create
      +// a normal test function and pass our suite to suite.Run
      +func TestExampleTestSuite(t *testing.T) {
      +    suite.Run(t, new(ExampleTestSuite))
      +}
      +```
      +
      +For a more complete example, using all of the functionality provided by the suite package, look at our [example testing suite](https://github.com/stretchr/testify/blob/master/suite/suite_test.go)
      +
      +For more information on writing suites, check out the [API documentation for the `suite` package](http://godoc.org/github.com/stretchr/testify/suite).
      +
      +`Suite` object has assertion methods:
      +
      +```go
      +// Basic imports
      +import (
      +    "testing"
      +    "github.com/stretchr/testify/suite"
      +)
      +
      +// Define the suite, and absorb the built-in basic suite
      +// functionality from testify - including assertion methods.
      +type ExampleTestSuite struct {
      +    suite.Suite
      +    VariableThatShouldStartAtFive int
      +}
      +
      +// Make sure that VariableThatShouldStartAtFive is set to five
      +// before each test
      +func (suite *ExampleTestSuite) SetupTest() {
      +    suite.VariableThatShouldStartAtFive = 5
      +}
      +
      +// All methods that begin with "Test" are run as tests within a
      +// suite.
      +func (suite *ExampleTestSuite) TestExample() {
      +    suite.Equal(suite.VariableThatShouldStartAtFive, 5)
      +}
      +
      +// In order for 'go test' to run this suite, we need to create
      +// a normal test function and pass our suite to suite.Run
      +func TestExampleTestSuite(t *testing.T) {
      +    suite.Run(t, new(ExampleTestSuite))
      +}
      +```
      +
      +------
      +
      +Installation
      +============
      +
      +To install Testify, use `go get`:
      +
      +    * Latest version: go get github.com/stretchr/testify
      +    * Specific version: go get gopkg.in/stretchr/testify.v1
      +
      +This will then make the following packages available to you:
      +
      +    github.com/stretchr/testify/assert
      +    github.com/stretchr/testify/mock
      +    github.com/stretchr/testify/http
      +
      +Import the `testify/assert` package into your code using this template:
      +
      +```go
      +package yours
      +
      +import (
      +  "testing"
      +  "github.com/stretchr/testify/assert"
      +)
      +
      +func TestSomething(t *testing.T) {
      +
      +  assert.True(t, true, "True is true!")
      +
      +}
      +```
      +
      +------
      +
      +Staying up to date
      +==================
      +
      +To update Testify to the latest version, use `go get -u github.com/stretchr/testify`.
      +
      +------
      +
      +Version History
      +===============
      +
      +   * 1.0 - New package versioning strategy adopted.
      +
      +------
      +
      +Contributing
      +============
      +
      +Please feel free to submit issues, fork the repository and send pull requests!
      +
      +When submitting an issue, we ask that you please include a complete test function that demonstrates the issue.  Extra credit for those using Testify to write the test code that demonstrates it.
      +
      +------
      +
      +Licence
      +=======
      +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell
      +
      +Please consider promoting this project if you find it useful.
      +
      +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
      +
      +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
      +
      +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
      diff --git a/vendor/github.com/stretchr/testify/_codegen/main.go b/vendor/github.com/stretchr/testify/_codegen/main.go
      new file mode 100644
      index 00000000..328009f8
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/_codegen/main.go
      @@ -0,0 +1,287 @@
      +// This program reads all assertion functions from the assert package and
      +// automatically generates the corersponding requires and forwarded assertions
      +
      +package main
      +
      +import (
      +	"bytes"
      +	"flag"
      +	"fmt"
      +	"go/ast"
      +	"go/build"
      +	"go/doc"
      +	"go/importer"
      +	"go/parser"
      +	"go/token"
      +	"go/types"
      +	"io"
      +	"io/ioutil"
      +	"log"
      +	"os"
      +	"path"
      +	"strings"
      +	"text/template"
      +
      +	"github.com/ernesto-jimenez/gogen/imports"
      +)
      +
      +var (
      +	pkg       = flag.String("assert-path", "github.com/stretchr/testify/assert", "Path to the assert package")
      +	outputPkg = flag.String("output-package", "", "package for the resulting code")
      +	tmplFile  = flag.String("template", "", "What file to load the function template from")
      +	out       = flag.String("out", "", "What file to write the source code to")
      +)
      +
      +func main() {
      +	flag.Parse()
      +
      +	scope, docs, err := parsePackageSource(*pkg)
      +	if err != nil {
      +		log.Fatal(err)
      +	}
      +
      +	importer, funcs, err := analyzeCode(scope, docs)
      +	if err != nil {
      +		log.Fatal(err)
      +	}
      +
      +	if err := generateCode(importer, funcs); err != nil {
      +		log.Fatal(err)
      +	}
      +}
      +
      +func generateCode(importer imports.Importer, funcs []testFunc) error {
      +	buff := bytes.NewBuffer(nil)
      +
      +	tmplHead, tmplFunc, err := parseTemplates()
      +	if err != nil {
      +		return err
      +	}
      +
      +	// Generate header
      +	if err := tmplHead.Execute(buff, struct {
      +		Name    string
      +		Imports map[string]string
      +	}{
      +		*outputPkg,
      +		importer.Imports(),
      +	}); err != nil {
      +		return err
      +	}
      +
      +	// Generate funcs
      +	for _, fn := range funcs {
      +		buff.Write([]byte("\n\n"))
      +		if err := tmplFunc.Execute(buff, &fn); err != nil {
      +			return err
      +		}
      +	}
      +
      +	// Write file
      +	output, err := outputFile()
      +	if err != nil {
      +		return err
      +	}
      +	defer output.Close()
      +	_, err = io.Copy(output, buff)
      +	return err
      +}
      +
      +func parseTemplates() (*template.Template, *template.Template, error) {
      +	tmplHead, err := template.New("header").Parse(headerTemplate)
      +	if err != nil {
      +		return nil, nil, err
      +	}
      +	if *tmplFile != "" {
      +		f, err := ioutil.ReadFile(*tmplFile)
      +		if err != nil {
      +			return nil, nil, err
      +		}
      +		funcTemplate = string(f)
      +	}
      +	tmpl, err := template.New("function").Parse(funcTemplate)
      +	if err != nil {
      +		return nil, nil, err
      +	}
      +	return tmplHead, tmpl, nil
      +}
      +
      +func outputFile() (*os.File, error) {
      +	filename := *out
      +	if filename == "-" || (filename == "" && *tmplFile == "") {
      +		return os.Stdout, nil
      +	}
      +	if filename == "" {
      +		filename = strings.TrimSuffix(strings.TrimSuffix(*tmplFile, ".tmpl"), ".go") + ".go"
      +	}
      +	return os.Create(filename)
      +}
      +
      +// analyzeCode takes the types scope and the docs and returns the import
      +// information and information about all the assertion functions.
      +func analyzeCode(scope *types.Scope, docs *doc.Package) (imports.Importer, []testFunc, error) {
      +	testingT := scope.Lookup("TestingT").Type().Underlying().(*types.Interface)
      +
      +	importer := imports.New(*outputPkg)
      +	var funcs []testFunc
      +	// Go through all the top level functions
      +	for _, fdocs := range docs.Funcs {
      +		// Find the function
      +		obj := scope.Lookup(fdocs.Name)
      +
      +		fn, ok := obj.(*types.Func)
      +		if !ok {
      +			continue
      +		}
      +		// Check function signatuer has at least two arguments
      +		sig := fn.Type().(*types.Signature)
      +		if sig.Params().Len() < 2 {
      +			continue
      +		}
      +		// Check first argument is of type testingT
      +		first, ok := sig.Params().At(0).Type().(*types.Named)
      +		if !ok {
      +			continue
      +		}
      +		firstType, ok := first.Underlying().(*types.Interface)
      +		if !ok {
      +			continue
      +		}
      +		if !types.Implements(firstType, testingT) {
      +			continue
      +		}
      +
      +		funcs = append(funcs, testFunc{*outputPkg, fdocs, fn})
      +		importer.AddImportsFrom(sig.Params())
      +	}
      +	return importer, funcs, nil
      +}
      +
      +// parsePackageSource returns the types scope and the package documentation from the pa
      +func parsePackageSource(pkg string) (*types.Scope, *doc.Package, error) {
      +	pd, err := build.Import(pkg, ".", 0)
      +	if err != nil {
      +		return nil, nil, err
      +	}
      +
      +	fset := token.NewFileSet()
      +	files := make(map[string]*ast.File)
      +	fileList := make([]*ast.File, len(pd.GoFiles))
      +	for i, fname := range pd.GoFiles {
      +		src, err := ioutil.ReadFile(path.Join(pd.SrcRoot, pd.ImportPath, fname))
      +		if err != nil {
      +			return nil, nil, err
      +		}
      +		f, err := parser.ParseFile(fset, fname, src, parser.ParseComments|parser.AllErrors)
      +		if err != nil {
      +			return nil, nil, err
      +		}
      +		files[fname] = f
      +		fileList[i] = f
      +	}
      +
      +	cfg := types.Config{
      +		Importer: importer.Default(),
      +	}
      +	info := types.Info{
      +		Defs: make(map[*ast.Ident]types.Object),
      +	}
      +	tp, err := cfg.Check(pkg, fset, fileList, &info)
      +	if err != nil {
      +		return nil, nil, err
      +	}
      +
      +	scope := tp.Scope()
      +
      +	ap, _ := ast.NewPackage(fset, files, nil, nil)
      +	docs := doc.New(ap, pkg, 0)
      +
      +	return scope, docs, nil
      +}
      +
      +type testFunc struct {
      +	CurrentPkg string
      +	DocInfo    *doc.Func
      +	TypeInfo   *types.Func
      +}
      +
      +func (f *testFunc) Qualifier(p *types.Package) string {
      +	if p == nil || p.Name() == f.CurrentPkg {
      +		return ""
      +	}
      +	return p.Name()
      +}
      +
      +func (f *testFunc) Params() string {
      +	sig := f.TypeInfo.Type().(*types.Signature)
      +	params := sig.Params()
      +	p := ""
      +	comma := ""
      +	to := params.Len()
      +	var i int
      +
      +	if sig.Variadic() {
      +		to--
      +	}
      +	for i = 1; i < to; i++ {
      +		param := params.At(i)
      +		p += fmt.Sprintf("%s%s %s", comma, param.Name(), types.TypeString(param.Type(), f.Qualifier))
      +		comma = ", "
      +	}
      +	if sig.Variadic() {
      +		param := params.At(params.Len() - 1)
      +		p += fmt.Sprintf("%s%s ...%s", comma, param.Name(), types.TypeString(param.Type().(*types.Slice).Elem(), f.Qualifier))
      +	}
      +	return p
      +}
      +
      +func (f *testFunc) ForwardedParams() string {
      +	sig := f.TypeInfo.Type().(*types.Signature)
      +	params := sig.Params()
      +	p := ""
      +	comma := ""
      +	to := params.Len()
      +	var i int
      +
      +	if sig.Variadic() {
      +		to--
      +	}
      +	for i = 1; i < to; i++ {
      +		param := params.At(i)
      +		p += fmt.Sprintf("%s%s", comma, param.Name())
      +		comma = ", "
      +	}
      +	if sig.Variadic() {
      +		param := params.At(params.Len() - 1)
      +		p += fmt.Sprintf("%s%s...", comma, param.Name())
      +	}
      +	return p
      +}
      +
      +func (f *testFunc) Comment() string {
      +	return "// " + strings.Replace(strings.TrimSpace(f.DocInfo.Doc), "\n", "\n// ", -1)
      +}
      +
      +func (f *testFunc) CommentWithoutT(receiver string) string {
      +	search := fmt.Sprintf("assert.%s(t, ", f.DocInfo.Name)
      +	replace := fmt.Sprintf("%s.%s(", receiver, f.DocInfo.Name)
      +	return strings.Replace(f.Comment(), search, replace, -1)
      +}
      +
      +var headerTemplate = `/*
      +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
      +* THIS FILE MUST NOT BE EDITED BY HAND
      +*/
      +
      +package {{.Name}}
      +
      +import (
      +{{range $path, $name := .Imports}}
      +	{{$name}} "{{$path}}"{{end}}
      +)
      +`
      +
      +var funcTemplate = `{{.Comment}}
      +func (fwd *AssertionsForwarder) {{.DocInfo.Name}}({{.Params}}) bool {
      +	return assert.{{.DocInfo.Name}}({{.ForwardedParams}})
      +}`
      diff --git a/vendor/github.com/stretchr/testify/doc.go b/vendor/github.com/stretchr/testify/doc.go
      new file mode 100644
      index 00000000..377d5cc5
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/doc.go
      @@ -0,0 +1,22 @@
      +// Package testify is a set of packages that provide many tools for testifying that your code will behave as you intend.
      +//
      +// testify contains the following packages:
      +//
      +// The assert package provides a comprehensive set of assertion functions that tie in to the Go testing system.
      +//
      +// The http package contains tools to make it easier to test http activity using the Go testing system.
      +//
      +// The mock package provides a system by which it is possible to mock your objects and verify calls are happening as expected.
      +//
      +// The suite package provides a basic structure for using structs as testing suites, and methods on those structs as tests.  It includes setup/teardown functionality in the way of interfaces.
      +package testify
      +
      +// blank imports help docs.
      +import (
      +	// assert package
      +	_ "github.com/stretchr/testify/assert"
      +	// http package
      +	_ "github.com/stretchr/testify/http"
      +	// mock package
      +	_ "github.com/stretchr/testify/mock"
      +)
      diff --git a/vendor/github.com/stretchr/testify/http/doc.go b/vendor/github.com/stretchr/testify/http/doc.go
      new file mode 100644
      index 00000000..695167c6
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/http/doc.go
      @@ -0,0 +1,2 @@
      +// Package http DEPRECATED USE net/http/httptest
      +package http
      diff --git a/vendor/github.com/stretchr/testify/http/test_response_writer.go b/vendor/github.com/stretchr/testify/http/test_response_writer.go
      new file mode 100644
      index 00000000..5c3f813f
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/http/test_response_writer.go
      @@ -0,0 +1,49 @@
      +package http
      +
      +import (
      +	"net/http"
      +)
      +
      +// TestResponseWriter DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead.
      +type TestResponseWriter struct {
      +
      +	// StatusCode is the last int written by the call to WriteHeader(int)
      +	StatusCode int
      +
      +	// Output is a string containing the written bytes using the Write([]byte) func.
      +	Output string
      +
      +	// header is the internal storage of the http.Header object
      +	header http.Header
      +}
      +
      +// Header DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead.
      +func (rw *TestResponseWriter) Header() http.Header {
      +
      +	if rw.header == nil {
      +		rw.header = make(http.Header)
      +	}
      +
      +	return rw.header
      +}
      +
      +// Write DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead.
      +func (rw *TestResponseWriter) Write(bytes []byte) (int, error) {
      +
      +	// assume 200 success if no header has been set
      +	if rw.StatusCode == 0 {
      +		rw.WriteHeader(200)
      +	}
      +
      +	// add these bytes to the output string
      +	rw.Output = rw.Output + string(bytes)
      +
      +	// return normal values
      +	return 0, nil
      +
      +}
      +
      +// WriteHeader DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead.
      +func (rw *TestResponseWriter) WriteHeader(i int) {
      +	rw.StatusCode = i
      +}
      diff --git a/vendor/github.com/stretchr/testify/http/test_round_tripper.go b/vendor/github.com/stretchr/testify/http/test_round_tripper.go
      new file mode 100644
      index 00000000..b1e32f1d
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/http/test_round_tripper.go
      @@ -0,0 +1,17 @@
      +package http
      +
      +import (
      +	"github.com/stretchr/testify/mock"
      +	"net/http"
      +)
      +
      +// TestRoundTripper DEPRECATED USE net/http/httptest
      +type TestRoundTripper struct {
      +	mock.Mock
      +}
      +
      +// RoundTrip DEPRECATED USE net/http/httptest
      +func (t *TestRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
      +	args := t.Called(req)
      +	return args.Get(0).(*http.Response), args.Error(1)
      +}
      diff --git a/vendor/github.com/stretchr/testify/mock/doc.go b/vendor/github.com/stretchr/testify/mock/doc.go
      new file mode 100644
      index 00000000..7324128e
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/mock/doc.go
      @@ -0,0 +1,44 @@
      +// Package mock provides a system by which it is possible to mock your objects
      +// and verify calls are happening as expected.
      +//
      +// Example Usage
      +//
      +// The mock package provides an object, Mock, that tracks activity on another object.  It is usually
      +// embedded into a test object as shown below:
      +//
      +//   type MyTestObject struct {
      +//     // add a Mock object instance
      +//     mock.Mock
      +//
      +//     // other fields go here as normal
      +//   }
      +//
      +// When implementing the methods of an interface, you wire your functions up
      +// to call the Mock.Called(args...) method, and return the appropriate values.
      +//
      +// For example, to mock a method that saves the name and age of a person and returns
      +// the year of their birth or an error, you might write this:
      +//
      +//     func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) {
      +//       args := o.Called(firstname, lastname, age)
      +//       return args.Int(0), args.Error(1)
      +//     }
      +//
      +// The Int, Error and Bool methods are examples of strongly typed getters that take the argument
      +// index position. Given this argument list:
      +//
      +//     (12, true, "Something")
      +//
      +// You could read them out strongly typed like this:
      +//
      +//     args.Int(0)
      +//     args.Bool(1)
      +//     args.String(2)
      +//
      +// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion:
      +//
      +//     return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine)
      +//
      +// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those
      +// cases you should check for nil first.
      +package mock
      diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go
      new file mode 100644
      index 00000000..03cc0f6b
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/mock/mock.go
      @@ -0,0 +1,683 @@
      +package mock
      +
      +import (
      +	"fmt"
      +	"reflect"
      +	"regexp"
      +	"runtime"
      +	"strings"
      +	"sync"
      +	"time"
      +
      +	"github.com/stretchr/objx"
      +	"github.com/stretchr/testify/assert"
      +)
      +
      +// TestingT is an interface wrapper around *testing.T
      +type TestingT interface {
      +	Logf(format string, args ...interface{})
      +	Errorf(format string, args ...interface{})
      +	FailNow()
      +}
      +
      +/*
      +	Call
      +*/
      +
      +// Call represents a method call and is used for setting expectations,
      +// as well as recording activity.
      +type Call struct {
      +	Parent *Mock
      +
      +	// The name of the method that was or will be called.
      +	Method string
      +
      +	// Holds the arguments of the method.
      +	Arguments Arguments
      +
      +	// Holds the arguments that should be returned when
      +	// this method is called.
      +	ReturnArguments Arguments
      +
      +	// The number of times to return the return arguments when setting
      +	// expectations. 0 means to always return the value.
      +	Repeatability int
      +
      +	// Holds a channel that will be used to block the Return until it either
      +	// recieves a message or is closed. nil means it returns immediately.
      +	WaitFor <-chan time.Time
      +
      +	// Holds a handler used to manipulate arguments content that are passed by
      +	// reference. It's useful when mocking methods such as unmarshalers or
      +	// decoders.
      +	RunFn func(Arguments)
      +}
      +
      +func newCall(parent *Mock, methodName string, methodArguments ...interface{}) *Call {
      +	return &Call{
      +		Parent:          parent,
      +		Method:          methodName,
      +		Arguments:       methodArguments,
      +		ReturnArguments: make([]interface{}, 0),
      +		Repeatability:   0,
      +		WaitFor:         nil,
      +		RunFn:           nil,
      +	}
      +}
      +
      +func (c *Call) lock() {
      +	c.Parent.mutex.Lock()
      +}
      +
      +func (c *Call) unlock() {
      +	c.Parent.mutex.Unlock()
      +}
      +
      +// Return specifies the return arguments for the expectation.
      +//
      +//    Mock.On("DoSomething").Return(errors.New("failed"))
      +func (c *Call) Return(returnArguments ...interface{}) *Call {
      +	c.lock()
      +	defer c.unlock()
      +
      +	c.ReturnArguments = returnArguments
      +
      +	return c
      +}
      +
      +// Once indicates that that the mock should only return the value once.
      +//
      +//    Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once()
      +func (c *Call) Once() *Call {
      +	return c.Times(1)
      +}
      +
      +// Twice indicates that that the mock should only return the value twice.
      +//
      +//    Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice()
      +func (c *Call) Twice() *Call {
      +	return c.Times(2)
      +}
      +
      +// Times indicates that that the mock should only return the indicated number
      +// of times.
      +//
      +//    Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5)
      +func (c *Call) Times(i int) *Call {
      +	c.lock()
      +	defer c.unlock()
      +	c.Repeatability = i
      +	return c
      +}
      +
      +// WaitUntil sets the channel that will block the mock's return until its closed
      +// or a message is received.
      +//
      +//    Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second))
      +func (c *Call) WaitUntil(w <-chan time.Time) *Call {
      +	c.lock()
      +	defer c.unlock()
      +	c.WaitFor = w
      +	return c
      +}
      +
      +// After sets how long to block until the call returns
      +//
      +//    Mock.On("MyMethod", arg1, arg2).After(time.Second)
      +func (c *Call) After(d time.Duration) *Call {
      +	return c.WaitUntil(time.After(d))
      +}
      +
      +// Run sets a handler to be called before returning. It can be used when
      +// mocking a method such as unmarshalers that takes a pointer to a struct and
      +// sets properties in such struct
      +//
      +//    Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}").Return().Run(func(args Arguments) {
      +//    	arg := args.Get(0).(*map[string]interface{})
      +//    	arg["foo"] = "bar"
      +//    })
      +func (c *Call) Run(fn func(Arguments)) *Call {
      +	c.lock()
      +	defer c.unlock()
      +	c.RunFn = fn
      +	return c
      +}
      +
      +// On chains a new expectation description onto the mocked interface. This
      +// allows syntax like.
      +//
      +//    Mock.
      +//       On("MyMethod", 1).Return(nil).
      +//       On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error"))
      +func (c *Call) On(methodName string, arguments ...interface{}) *Call {
      +	return c.Parent.On(methodName, arguments...)
      +}
      +
      +// Mock is the workhorse used to track activity on another object.
      +// For an example of its usage, refer to the "Example Usage" section at the top
      +// of this document.
      +type Mock struct {
      +	// Represents the calls that are expected of
      +	// an object.
      +	ExpectedCalls []*Call
      +
      +	// Holds the calls that were made to this mocked object.
      +	Calls []Call
      +
      +	// TestData holds any data that might be useful for testing.  Testify ignores
      +	// this data completely allowing you to do whatever you like with it.
      +	testData objx.Map
      +
      +	mutex sync.Mutex
      +}
      +
      +// TestData holds any data that might be useful for testing.  Testify ignores
      +// this data completely allowing you to do whatever you like with it.
      +func (m *Mock) TestData() objx.Map {
      +
      +	if m.testData == nil {
      +		m.testData = make(objx.Map)
      +	}
      +
      +	return m.testData
      +}
      +
      +/*
      +	Setting expectations
      +*/
      +
      +// On starts a description of an expectation of the specified method
      +// being called.
      +//
      +//     Mock.On("MyMethod", arg1, arg2)
      +func (m *Mock) On(methodName string, arguments ...interface{}) *Call {
      +	for _, arg := range arguments {
      +		if v := reflect.ValueOf(arg); v.Kind() == reflect.Func {
      +			panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg))
      +		}
      +	}
      +
      +	m.mutex.Lock()
      +	defer m.mutex.Unlock()
      +	c := newCall(m, methodName, arguments...)
      +	m.ExpectedCalls = append(m.ExpectedCalls, c)
      +	return c
      +}
      +
      +// /*
      +// 	Recording and responding to activity
      +// */
      +
      +func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) {
      +	m.mutex.Lock()
      +	defer m.mutex.Unlock()
      +	for i, call := range m.ExpectedCalls {
      +		if call.Method == method && call.Repeatability > -1 {
      +
      +			_, diffCount := call.Arguments.Diff(arguments)
      +			if diffCount == 0 {
      +				return i, call
      +			}
      +
      +		}
      +	}
      +	return -1, nil
      +}
      +
      +func (m *Mock) findClosestCall(method string, arguments ...interface{}) (bool, *Call) {
      +	diffCount := 0
      +	var closestCall *Call
      +
      +	for _, call := range m.expectedCalls() {
      +		if call.Method == method {
      +
      +			_, tempDiffCount := call.Arguments.Diff(arguments)
      +			if tempDiffCount < diffCount || diffCount == 0 {
      +				diffCount = tempDiffCount
      +				closestCall = call
      +			}
      +
      +		}
      +	}
      +
      +	if closestCall == nil {
      +		return false, nil
      +	}
      +
      +	return true, closestCall
      +}
      +
      +func callString(method string, arguments Arguments, includeArgumentValues bool) string {
      +
      +	var argValsString string
      +	if includeArgumentValues {
      +		var argVals []string
      +		for argIndex, arg := range arguments {
      +			argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg))
      +		}
      +		argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t"))
      +	}
      +
      +	return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString)
      +}
      +
      +// Called tells the mock object that a method has been called, and gets an array
      +// of arguments to return.  Panics if the call is unexpected (i.e. not preceeded by
      +// appropriate .On .Return() calls)
      +// If Call.WaitFor is set, blocks until the channel is closed or receives a message.
      +func (m *Mock) Called(arguments ...interface{}) Arguments {
      +	// get the calling function's name
      +	pc, _, _, ok := runtime.Caller(1)
      +	if !ok {
      +		panic("Couldn't get the caller information")
      +	}
      +	functionPath := runtime.FuncForPC(pc).Name()
      +	//Next four lines are required to use GCCGO function naming conventions.
      +	//For Ex:  github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock
      +	//uses inteface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree
      +	//With GCCGO we need to remove interface information starting from pN<dd>.
      +	re := regexp.MustCompile("\\.pN\\d+_")
      +	if re.MatchString(functionPath) {
      +		functionPath = re.Split(functionPath, -1)[0]
      +	}
      +	parts := strings.Split(functionPath, ".")
      +	functionName := parts[len(parts)-1]
      +
      +	found, call := m.findExpectedCall(functionName, arguments...)
      +
      +	if found < 0 {
      +		// we have to fail here - because we don't know what to do
      +		// as the return arguments.  This is because:
      +		//
      +		//   a) this is a totally unexpected call to this method,
      +		//   b) the arguments are not what was expected, or
      +		//   c) the developer has forgotten to add an accompanying On...Return pair.
      +
      +		closestFound, closestCall := m.findClosestCall(functionName, arguments...)
      +
      +		if closestFound {
      +			panic(fmt.Sprintf("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n", callString(functionName, arguments, true), callString(functionName, closestCall.Arguments, true)))
      +		} else {
      +			panic(fmt.Sprintf("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", functionName, functionName, callString(functionName, arguments, true), assert.CallerInfo()))
      +		}
      +	} else {
      +		m.mutex.Lock()
      +		switch {
      +		case call.Repeatability == 1:
      +			call.Repeatability = -1
      +
      +		case call.Repeatability > 1:
      +			call.Repeatability--
      +		}
      +		m.mutex.Unlock()
      +	}
      +
      +	// add the call
      +	m.mutex.Lock()
      +	m.Calls = append(m.Calls, *newCall(m, functionName, arguments...))
      +	m.mutex.Unlock()
      +
      +	// block if specified
      +	if call.WaitFor != nil {
      +		<-call.WaitFor
      +	}
      +
      +	if call.RunFn != nil {
      +		call.RunFn(arguments)
      +	}
      +
      +	return call.ReturnArguments
      +}
      +
      +/*
      +	Assertions
      +*/
      +
      +// AssertExpectationsForObjects asserts that everything specified with On and Return
      +// of the specified objects was in fact called as expected.
      +//
      +// Calls may have occurred in any order.
      +func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool {
      +	var success = true
      +	for _, obj := range testObjects {
      +		mockObj := obj.(Mock)
      +		success = success && mockObj.AssertExpectations(t)
      +	}
      +	return success
      +}
      +
      +// AssertExpectations asserts that everything specified with On and Return was
      +// in fact called as expected.  Calls may have occurred in any order.
      +func (m *Mock) AssertExpectations(t TestingT) bool {
      +	var somethingMissing bool
      +	var failedExpectations int
      +
      +	// iterate through each expectation
      +	expectedCalls := m.expectedCalls()
      +	for _, expectedCall := range expectedCalls {
      +		if !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) {
      +			somethingMissing = true
      +			failedExpectations++
      +			t.Logf("\u274C\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String())
      +		} else {
      +			m.mutex.Lock()
      +			if expectedCall.Repeatability > 0 {
      +				somethingMissing = true
      +				failedExpectations++
      +			} else {
      +				t.Logf("\u2705\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String())
      +			}
      +			m.mutex.Unlock()
      +		}
      +	}
      +
      +	if somethingMissing {
      +		t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo())
      +	}
      +
      +	return !somethingMissing
      +}
      +
      +// AssertNumberOfCalls asserts that the method was called expectedCalls times.
      +func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool {
      +	var actualCalls int
      +	for _, call := range m.calls() {
      +		if call.Method == methodName {
      +			actualCalls++
      +		}
      +	}
      +	return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls))
      +}
      +
      +// AssertCalled asserts that the method was called.
      +func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool {
      +	if !assert.True(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method should have been called with %d argument(s), but was not.", methodName, len(arguments))) {
      +		t.Logf("%v", m.expectedCalls())
      +		return false
      +	}
      +	return true
      +}
      +
      +// AssertNotCalled asserts that the method was not called.
      +func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool {
      +	if !assert.False(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method was called with %d argument(s), but should NOT have been.", methodName, len(arguments))) {
      +		t.Logf("%v", m.expectedCalls())
      +		return false
      +	}
      +	return true
      +}
      +
      +func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool {
      +	for _, call := range m.calls() {
      +		if call.Method == methodName {
      +
      +			_, differences := Arguments(expected).Diff(call.Arguments)
      +
      +			if differences == 0 {
      +				// found the expected call
      +				return true
      +			}
      +
      +		}
      +	}
      +	// we didn't find the expected call
      +	return false
      +}
      +
      +func (m *Mock) expectedCalls() []*Call {
      +	m.mutex.Lock()
      +	defer m.mutex.Unlock()
      +	return append([]*Call{}, m.ExpectedCalls...)
      +}
      +
      +func (m *Mock) calls() []Call {
      +	m.mutex.Lock()
      +	defer m.mutex.Unlock()
      +	return append([]Call{}, m.Calls...)
      +}
      +
      +/*
      +	Arguments
      +*/
      +
      +// Arguments holds an array of method arguments or return values.
      +type Arguments []interface{}
      +
      +const (
      +	// Anything is used in Diff and Assert when the argument being tested
      +	// shouldn't be taken into consideration.
      +	Anything string = "mock.Anything"
      +)
      +
      +// AnythingOfTypeArgument is a string that contains the type of an argument
      +// for use when type checking.  Used in Diff and Assert.
      +type AnythingOfTypeArgument string
      +
      +// AnythingOfType returns an AnythingOfTypeArgument object containing the
      +// name of the type to check for.  Used in Diff and Assert.
      +//
      +// For example:
      +//	Assert(t, AnythingOfType("string"), AnythingOfType("int"))
      +func AnythingOfType(t string) AnythingOfTypeArgument {
      +	return AnythingOfTypeArgument(t)
      +}
      +
      +// argumentMatcher performs custom argument matching, returning whether or
      +// not the argument is matched by the expectation fixture function.
      +type argumentMatcher struct {
      +	// fn is a function which accepts one argument, and returns a bool.
      +	fn reflect.Value
      +}
      +
      +func (f argumentMatcher) Matches(argument interface{}) bool {
      +	expectType := f.fn.Type().In(0)
      +
      +	if reflect.TypeOf(argument).AssignableTo(expectType) {
      +		result := f.fn.Call([]reflect.Value{reflect.ValueOf(argument)})
      +		return result[0].Bool()
      +	}
      +	return false
      +}
      +
      +func (f argumentMatcher) String() string {
      +	return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).Name())
      +}
      +
      +// MatchedBy can be used to match a mock call based on only certain properties
      +// from a complex struct or some calculation. It takes a function that will be
      +// evaluated with the called argument and will return true when there's a match
      +// and false otherwise.
      +//
      +// Example:
      +// m.On("Do", func(req *http.Request) bool { return req.Host == "example.com" })
      +//
      +// |fn|, must be a function accepting a single argument (of the expected type)
      +// which returns a bool. If |fn| doesn't match the required signature,
      +// MathedBy() panics.
      +func MatchedBy(fn interface{}) argumentMatcher {
      +	fnType := reflect.TypeOf(fn)
      +
      +	if fnType.Kind() != reflect.Func {
      +		panic(fmt.Sprintf("assert: arguments: %s is not a func", fn))
      +	}
      +	if fnType.NumIn() != 1 {
      +		panic(fmt.Sprintf("assert: arguments: %s does not take exactly one argument", fn))
      +	}
      +	if fnType.NumOut() != 1 || fnType.Out(0).Kind() != reflect.Bool {
      +		panic(fmt.Sprintf("assert: arguments: %s does not return a bool", fn))
      +	}
      +
      +	return argumentMatcher{fn: reflect.ValueOf(fn)}
      +}
      +
      +// Get Returns the argument at the specified index.
      +func (args Arguments) Get(index int) interface{} {
      +	if index+1 > len(args) {
      +		panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args)))
      +	}
      +	return args[index]
      +}
      +
      +// Is gets whether the objects match the arguments specified.
      +func (args Arguments) Is(objects ...interface{}) bool {
      +	for i, obj := range args {
      +		if obj != objects[i] {
      +			return false
      +		}
      +	}
      +	return true
      +}
      +
      +// Diff gets a string describing the differences between the arguments
      +// and the specified objects.
      +//
      +// Returns the diff string and number of differences found.
      +func (args Arguments) Diff(objects []interface{}) (string, int) {
      +
      +	var output = "\n"
      +	var differences int
      +
      +	var maxArgCount = len(args)
      +	if len(objects) > maxArgCount {
      +		maxArgCount = len(objects)
      +	}
      +
      +	for i := 0; i < maxArgCount; i++ {
      +		var actual, expected interface{}
      +
      +		if len(objects) <= i {
      +			actual = "(Missing)"
      +		} else {
      +			actual = objects[i]
      +		}
      +
      +		if len(args) <= i {
      +			expected = "(Missing)"
      +		} else {
      +			expected = args[i]
      +		}
      +
      +		if matcher, ok := expected.(argumentMatcher); ok {
      +			if matcher.Matches(actual) {
      +				output = fmt.Sprintf("%s\t%d: \u2705  %s matched by %s\n", output, i, actual, matcher)
      +			} else {
      +				differences++
      +				output = fmt.Sprintf("%s\t%d: \u2705  %s not matched by %s\n", output, i, actual, matcher)
      +			}
      +		} else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() {
      +
      +			// type checking
      +			if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) {
      +				// not match
      +				differences++
      +				output = fmt.Sprintf("%s\t%d: \u274C  type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actual)
      +			}
      +
      +		} else {
      +
      +			// normal checking
      +
      +			if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) {
      +				// match
      +				output = fmt.Sprintf("%s\t%d: \u2705  %s == %s\n", output, i, actual, expected)
      +			} else {
      +				// not match
      +				differences++
      +				output = fmt.Sprintf("%s\t%d: \u274C  %s != %s\n", output, i, actual, expected)
      +			}
      +		}
      +
      +	}
      +
      +	if differences == 0 {
      +		return "No differences.", differences
      +	}
      +
      +	return output, differences
      +
      +}
      +
      +// Assert compares the arguments with the specified objects and fails if
      +// they do not exactly match.
      +func (args Arguments) Assert(t TestingT, objects ...interface{}) bool {
      +
      +	// get the differences
      +	diff, diffCount := args.Diff(objects)
      +
      +	if diffCount == 0 {
      +		return true
      +	}
      +
      +	// there are differences... report them...
      +	t.Logf(diff)
      +	t.Errorf("%sArguments do not match.", assert.CallerInfo())
      +
      +	return false
      +
      +}
      +
      +// String gets the argument at the specified index. Panics if there is no argument, or
      +// if the argument is of the wrong type.
      +//
      +// If no index is provided, String() returns a complete string representation
      +// of the arguments.
      +func (args Arguments) String(indexOrNil ...int) string {
      +
      +	if len(indexOrNil) == 0 {
      +		// normal String() method - return a string representation of the args
      +		var argsStr []string
      +		for _, arg := range args {
      +			argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg)))
      +		}
      +		return strings.Join(argsStr, ",")
      +	} else if len(indexOrNil) == 1 {
      +		// Index has been specified - get the argument at that index
      +		var index = indexOrNil[0]
      +		var s string
      +		var ok bool
      +		if s, ok = args.Get(index).(string); !ok {
      +			panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index)))
      +		}
      +		return s
      +	}
      +
      +	panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String.  Must be 0 or 1, not %d", len(indexOrNil)))
      +
      +}
      +
      +// Int gets the argument at the specified index. Panics if there is no argument, or
      +// if the argument is of the wrong type.
      +func (args Arguments) Int(index int) int {
      +	var s int
      +	var ok bool
      +	if s, ok = args.Get(index).(int); !ok {
      +		panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index)))
      +	}
      +	return s
      +}
      +
      +// Error gets the argument at the specified index. Panics if there is no argument, or
      +// if the argument is of the wrong type.
      +func (args Arguments) Error(index int) error {
      +	obj := args.Get(index)
      +	var s error
      +	var ok bool
      +	if obj == nil {
      +		return nil
      +	}
      +	if s, ok = obj.(error); !ok {
      +		panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index)))
      +	}
      +	return s
      +}
      +
      +// Bool gets the argument at the specified index. Panics if there is no argument, or
      +// if the argument is of the wrong type.
      +func (args Arguments) Bool(index int) bool {
      +	var s bool
      +	var ok bool
      +	if s, ok = args.Get(index).(bool); !ok {
      +		panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index)))
      +	}
      +	return s
      +}
      diff --git a/vendor/github.com/stretchr/testify/mock/mock_test.go b/vendor/github.com/stretchr/testify/mock/mock_test.go
      new file mode 100644
      index 00000000..b206faaa
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/mock/mock_test.go
      @@ -0,0 +1,1068 @@
      +package mock
      +
      +import (
      +	"errors"
      +	"github.com/stretchr/testify/assert"
      +	"github.com/stretchr/testify/require"
      +	"testing"
      +	"time"
      +)
      +
      +/*
      +	Test objects
      +*/
      +
      +// ExampleInterface represents an example interface.
      +type ExampleInterface interface {
      +	TheExampleMethod(a, b, c int) (int, error)
      +}
      +
      +// TestExampleImplementation is a test implementation of ExampleInterface
      +type TestExampleImplementation struct {
      +	Mock
      +}
      +
      +func (i *TestExampleImplementation) TheExampleMethod(a, b, c int) (int, error) {
      +	args := i.Called(a, b, c)
      +	return args.Int(0), errors.New("Whoops")
      +}
      +
      +func (i *TestExampleImplementation) TheExampleMethod2(yesorno bool) {
      +	i.Called(yesorno)
      +}
      +
      +type ExampleType struct {
      +	ran bool
      +}
      +
      +func (i *TestExampleImplementation) TheExampleMethod3(et *ExampleType) error {
      +	args := i.Called(et)
      +	return args.Error(0)
      +}
      +
      +func (i *TestExampleImplementation) TheExampleMethodFunc(fn func(string) error) error {
      +	args := i.Called(fn)
      +	return args.Error(0)
      +}
      +
      +func (i *TestExampleImplementation) TheExampleMethodVariadic(a ...int) error {
      +	args := i.Called(a)
      +	return args.Error(0)
      +}
      +
      +func (i *TestExampleImplementation) TheExampleMethodVariadicInterface(a ...interface{}) error {
      +	args := i.Called(a)
      +	return args.Error(0)
      +}
      +
      +type ExampleFuncType func(string) error
      +
      +func (i *TestExampleImplementation) TheExampleMethodFuncType(fn ExampleFuncType) error {
      +	args := i.Called(fn)
      +	return args.Error(0)
      +}
      +
      +/*
      +	Mock
      +*/
      +
      +func Test_Mock_TestData(t *testing.T) {
      +
      +	var mockedService = new(TestExampleImplementation)
      +
      +	if assert.NotNil(t, mockedService.TestData()) {
      +
      +		mockedService.TestData().Set("something", 123)
      +		assert.Equal(t, 123, mockedService.TestData().Get("something").Data())
      +	}
      +}
      +
      +func Test_Mock_On(t *testing.T) {
      +
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +
      +	c := mockedService.On("TheExampleMethod")
      +	assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
      +	assert.Equal(t, "TheExampleMethod", c.Method)
      +}
      +
      +func Test_Mock_Chained_On(t *testing.T) {
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +
      +	mockedService.
      +		On("TheExampleMethod", 1, 2, 3).
      +		Return(0).
      +		On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")).
      +		Return(nil)
      +
      +	expectedCalls := []*Call{
      +		&Call{
      +			Parent:          &mockedService.Mock,
      +			Method:          "TheExampleMethod",
      +			Arguments:       []interface{}{1, 2, 3},
      +			ReturnArguments: []interface{}{0},
      +		},
      +		&Call{
      +			Parent:          &mockedService.Mock,
      +			Method:          "TheExampleMethod3",
      +			Arguments:       []interface{}{AnythingOfType("*mock.ExampleType")},
      +			ReturnArguments: []interface{}{nil},
      +		},
      +	}
      +	assert.Equal(t, expectedCalls, mockedService.ExpectedCalls)
      +}
      +
      +func Test_Mock_On_WithArgs(t *testing.T) {
      +
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +
      +	c := mockedService.On("TheExampleMethod", 1, 2, 3, 4)
      +
      +	assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
      +	assert.Equal(t, "TheExampleMethod", c.Method)
      +	assert.Equal(t, Arguments{1, 2, 3, 4}, c.Arguments)
      +}
      +
      +func Test_Mock_On_WithFuncArg(t *testing.T) {
      +
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +
      +	c := mockedService.
      +		On("TheExampleMethodFunc", AnythingOfType("func(string) error")).
      +		Return(nil)
      +
      +	assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
      +	assert.Equal(t, "TheExampleMethodFunc", c.Method)
      +	assert.Equal(t, 1, len(c.Arguments))
      +	assert.Equal(t, AnythingOfType("func(string) error"), c.Arguments[0])
      +
      +	fn := func(string) error { return nil }
      +
      +	assert.NotPanics(t, func() {
      +		mockedService.TheExampleMethodFunc(fn)
      +	})
      +}
      +
      +func Test_Mock_On_WithIntArgMatcher(t *testing.T) {
      +	var mockedService TestExampleImplementation
      +
      +	mockedService.On("TheExampleMethod",
      +		MatchedBy(func(a int) bool {
      +			return a == 1
      +		}), MatchedBy(func(b int) bool {
      +			return b == 2
      +		}), MatchedBy(func(c int) bool {
      +			return c == 3
      +		})).Return(0, nil)
      +
      +	assert.Panics(t, func() {
      +		mockedService.TheExampleMethod(1, 2, 4)
      +	})
      +	assert.Panics(t, func() {
      +		mockedService.TheExampleMethod(2, 2, 3)
      +	})
      +	assert.NotPanics(t, func() {
      +		mockedService.TheExampleMethod(1, 2, 3)
      +	})
      +}
      +
      +func Test_Mock_On_WithPtrArgMatcher(t *testing.T) {
      +	var mockedService TestExampleImplementation
      +
      +	mockedService.On("TheExampleMethod3",
      +		MatchedBy(func(a *ExampleType) bool { return a.ran == true }),
      +	).Return(nil)
      +
      +	mockedService.On("TheExampleMethod3",
      +		MatchedBy(func(a *ExampleType) bool { return a.ran == false }),
      +	).Return(errors.New("error"))
      +
      +	assert.Equal(t, mockedService.TheExampleMethod3(&ExampleType{true}), nil)
      +	assert.EqualError(t, mockedService.TheExampleMethod3(&ExampleType{false}), "error")
      +}
      +
      +func Test_Mock_On_WithFuncArgMatcher(t *testing.T) {
      +	var mockedService TestExampleImplementation
      +
      +	fixture1, fixture2 := errors.New("fixture1"), errors.New("fixture2")
      +
      +	mockedService.On("TheExampleMethodFunc",
      +		MatchedBy(func(a func(string) error) bool { return a("string") == fixture1 }),
      +	).Return(errors.New("fixture1"))
      +
      +	mockedService.On("TheExampleMethodFunc",
      +		MatchedBy(func(a func(string) error) bool { return a("string") == fixture2 }),
      +	).Return(errors.New("fixture2"))
      +
      +	assert.EqualError(t, mockedService.TheExampleMethodFunc(
      +		func(string) error { return fixture1 }), "fixture1")
      +	assert.EqualError(t, mockedService.TheExampleMethodFunc(
      +		func(string) error { return fixture2 }), "fixture2")
      +}
      +
      +func Test_Mock_On_WithVariadicFunc(t *testing.T) {
      +
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +
      +	c := mockedService.
      +		On("TheExampleMethodVariadic", []int{1, 2, 3}).
      +		Return(nil)
      +
      +	assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
      +	assert.Equal(t, 1, len(c.Arguments))
      +	assert.Equal(t, []int{1, 2, 3}, c.Arguments[0])
      +
      +	assert.NotPanics(t, func() {
      +		mockedService.TheExampleMethodVariadic(1, 2, 3)
      +	})
      +	assert.Panics(t, func() {
      +		mockedService.TheExampleMethodVariadic(1, 2)
      +	})
      +
      +}
      +
      +func Test_Mock_On_WithVariadicFuncWithInterface(t *testing.T) {
      +
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +
      +	c := mockedService.On("TheExampleMethodVariadicInterface", []interface{}{1, 2, 3}).
      +		Return(nil)
      +
      +	assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
      +	assert.Equal(t, 1, len(c.Arguments))
      +	assert.Equal(t, []interface{}{1, 2, 3}, c.Arguments[0])
      +
      +	assert.NotPanics(t, func() {
      +		mockedService.TheExampleMethodVariadicInterface(1, 2, 3)
      +	})
      +	assert.Panics(t, func() {
      +		mockedService.TheExampleMethodVariadicInterface(1, 2)
      +	})
      +
      +}
      +
      +func Test_Mock_On_WithVariadicFuncWithEmptyInterfaceArray(t *testing.T) {
      +
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +
      +	var expected []interface{}
      +	c := mockedService.
      +		On("TheExampleMethodVariadicInterface", expected).
      +		Return(nil)
      +
      +	assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
      +	assert.Equal(t, 1, len(c.Arguments))
      +	assert.Equal(t, expected, c.Arguments[0])
      +
      +	assert.NotPanics(t, func() {
      +		mockedService.TheExampleMethodVariadicInterface()
      +	})
      +	assert.Panics(t, func() {
      +		mockedService.TheExampleMethodVariadicInterface(1, 2)
      +	})
      +
      +}
      +
      +func Test_Mock_On_WithFuncPanics(t *testing.T) {
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +
      +	assert.Panics(t, func() {
      +		mockedService.On("TheExampleMethodFunc", func(string) error { return nil })
      +	})
      +}
      +
      +func Test_Mock_On_WithFuncTypeArg(t *testing.T) {
      +
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +
      +	c := mockedService.
      +		On("TheExampleMethodFuncType", AnythingOfType("mock.ExampleFuncType")).
      +		Return(nil)
      +
      +	assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
      +	assert.Equal(t, 1, len(c.Arguments))
      +	assert.Equal(t, AnythingOfType("mock.ExampleFuncType"), c.Arguments[0])
      +
      +	fn := func(string) error { return nil }
      +	assert.NotPanics(t, func() {
      +		mockedService.TheExampleMethodFuncType(fn)
      +	})
      +}
      +
      +func Test_Mock_Return(t *testing.T) {
      +
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +
      +	c := mockedService.
      +		On("TheExampleMethod", "A", "B", true).
      +		Return(1, "two", true)
      +
      +	require.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
      +
      +	call := mockedService.ExpectedCalls[0]
      +
      +	assert.Equal(t, "TheExampleMethod", call.Method)
      +	assert.Equal(t, "A", call.Arguments[0])
      +	assert.Equal(t, "B", call.Arguments[1])
      +	assert.Equal(t, true, call.Arguments[2])
      +	assert.Equal(t, 1, call.ReturnArguments[0])
      +	assert.Equal(t, "two", call.ReturnArguments[1])
      +	assert.Equal(t, true, call.ReturnArguments[2])
      +	assert.Equal(t, 0, call.Repeatability)
      +	assert.Nil(t, call.WaitFor)
      +}
      +
      +func Test_Mock_Return_WaitUntil(t *testing.T) {
      +
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +	ch := time.After(time.Second)
      +
      +	c := mockedService.Mock.
      +		On("TheExampleMethod", "A", "B", true).
      +		WaitUntil(ch).
      +		Return(1, "two", true)
      +
      +	// assert that the call was created
      +	require.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
      +
      +	call := mockedService.ExpectedCalls[0]
      +
      +	assert.Equal(t, "TheExampleMethod", call.Method)
      +	assert.Equal(t, "A", call.Arguments[0])
      +	assert.Equal(t, "B", call.Arguments[1])
      +	assert.Equal(t, true, call.Arguments[2])
      +	assert.Equal(t, 1, call.ReturnArguments[0])
      +	assert.Equal(t, "two", call.ReturnArguments[1])
      +	assert.Equal(t, true, call.ReturnArguments[2])
      +	assert.Equal(t, 0, call.Repeatability)
      +	assert.Equal(t, ch, call.WaitFor)
      +}
      +
      +func Test_Mock_Return_After(t *testing.T) {
      +
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +
      +	c := mockedService.Mock.
      +		On("TheExampleMethod", "A", "B", true).
      +		Return(1, "two", true).
      +		After(time.Second)
      +
      +	require.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
      +
      +	call := mockedService.Mock.ExpectedCalls[0]
      +
      +	assert.Equal(t, "TheExampleMethod", call.Method)
      +	assert.Equal(t, "A", call.Arguments[0])
      +	assert.Equal(t, "B", call.Arguments[1])
      +	assert.Equal(t, true, call.Arguments[2])
      +	assert.Equal(t, 1, call.ReturnArguments[0])
      +	assert.Equal(t, "two", call.ReturnArguments[1])
      +	assert.Equal(t, true, call.ReturnArguments[2])
      +	assert.Equal(t, 0, call.Repeatability)
      +	assert.NotEqual(t, nil, call.WaitFor)
      +
      +}
      +
      +func Test_Mock_Return_Run(t *testing.T) {
      +
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +
      +	fn := func(args Arguments) {
      +		arg := args.Get(0).(*ExampleType)
      +		arg.ran = true
      +	}
      +
      +	c := mockedService.Mock.
      +		On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")).
      +		Return(nil).
      +		Run(fn)
      +
      +	require.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
      +
      +	call := mockedService.Mock.ExpectedCalls[0]
      +
      +	assert.Equal(t, "TheExampleMethod3", call.Method)
      +	assert.Equal(t, AnythingOfType("*mock.ExampleType"), call.Arguments[0])
      +	assert.Equal(t, nil, call.ReturnArguments[0])
      +	assert.Equal(t, 0, call.Repeatability)
      +	assert.NotEqual(t, nil, call.WaitFor)
      +	assert.NotNil(t, call.Run)
      +
      +	et := ExampleType{}
      +	assert.Equal(t, false, et.ran)
      +	mockedService.TheExampleMethod3(&et)
      +	assert.Equal(t, true, et.ran)
      +}
      +
      +func Test_Mock_Return_Run_Out_Of_Order(t *testing.T) {
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +	f := func(args Arguments) {
      +		arg := args.Get(0).(*ExampleType)
      +		arg.ran = true
      +	}
      +
      +	c := mockedService.Mock.
      +		On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")).
      +		Run(f).
      +		Return(nil)
      +
      +	require.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
      +
      +	call := mockedService.Mock.ExpectedCalls[0]
      +
      +	assert.Equal(t, "TheExampleMethod3", call.Method)
      +	assert.Equal(t, AnythingOfType("*mock.ExampleType"), call.Arguments[0])
      +	assert.Equal(t, nil, call.ReturnArguments[0])
      +	assert.Equal(t, 0, call.Repeatability)
      +	assert.NotEqual(t, nil, call.WaitFor)
      +	assert.NotNil(t, call.Run)
      +}
      +
      +func Test_Mock_Return_Once(t *testing.T) {
      +
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +
      +	c := mockedService.On("TheExampleMethod", "A", "B", true).
      +		Return(1, "two", true).
      +		Once()
      +
      +	require.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
      +
      +	call := mockedService.ExpectedCalls[0]
      +
      +	assert.Equal(t, "TheExampleMethod", call.Method)
      +	assert.Equal(t, "A", call.Arguments[0])
      +	assert.Equal(t, "B", call.Arguments[1])
      +	assert.Equal(t, true, call.Arguments[2])
      +	assert.Equal(t, 1, call.ReturnArguments[0])
      +	assert.Equal(t, "two", call.ReturnArguments[1])
      +	assert.Equal(t, true, call.ReturnArguments[2])
      +	assert.Equal(t, 1, call.Repeatability)
      +	assert.Nil(t, call.WaitFor)
      +}
      +
      +func Test_Mock_Return_Twice(t *testing.T) {
      +
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +
      +	c := mockedService.
      +		On("TheExampleMethod", "A", "B", true).
      +		Return(1, "two", true).
      +		Twice()
      +
      +	require.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
      +
      +	call := mockedService.ExpectedCalls[0]
      +
      +	assert.Equal(t, "TheExampleMethod", call.Method)
      +	assert.Equal(t, "A", call.Arguments[0])
      +	assert.Equal(t, "B", call.Arguments[1])
      +	assert.Equal(t, true, call.Arguments[2])
      +	assert.Equal(t, 1, call.ReturnArguments[0])
      +	assert.Equal(t, "two", call.ReturnArguments[1])
      +	assert.Equal(t, true, call.ReturnArguments[2])
      +	assert.Equal(t, 2, call.Repeatability)
      +	assert.Nil(t, call.WaitFor)
      +}
      +
      +func Test_Mock_Return_Times(t *testing.T) {
      +
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +
      +	c := mockedService.
      +		On("TheExampleMethod", "A", "B", true).
      +		Return(1, "two", true).
      +		Times(5)
      +
      +	require.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
      +
      +	call := mockedService.ExpectedCalls[0]
      +
      +	assert.Equal(t, "TheExampleMethod", call.Method)
      +	assert.Equal(t, "A", call.Arguments[0])
      +	assert.Equal(t, "B", call.Arguments[1])
      +	assert.Equal(t, true, call.Arguments[2])
      +	assert.Equal(t, 1, call.ReturnArguments[0])
      +	assert.Equal(t, "two", call.ReturnArguments[1])
      +	assert.Equal(t, true, call.ReturnArguments[2])
      +	assert.Equal(t, 5, call.Repeatability)
      +	assert.Nil(t, call.WaitFor)
      +}
      +
      +func Test_Mock_Return_Nothing(t *testing.T) {
      +
      +	// make a test impl object
      +	var mockedService = new(TestExampleImplementation)
      +
      +	c := mockedService.
      +		On("TheExampleMethod", "A", "B", true).
      +		Return()
      +
      +	require.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
      +
      +	call := mockedService.ExpectedCalls[0]
      +
      +	assert.Equal(t, "TheExampleMethod", call.Method)
      +	assert.Equal(t, "A", call.Arguments[0])
      +	assert.Equal(t, "B", call.Arguments[1])
      +	assert.Equal(t, true, call.Arguments[2])
      +	assert.Equal(t, 0, len(call.ReturnArguments))
      +}
      +
      +func Test_Mock_findExpectedCall(t *testing.T) {
      +
      +	m := new(Mock)
      +	m.On("One", 1).Return("one")
      +	m.On("Two", 2).Return("two")
      +	m.On("Two", 3).Return("three")
      +
      +	f, c := m.findExpectedCall("Two", 3)
      +
      +	if assert.Equal(t, 2, f) {
      +		if assert.NotNil(t, c) {
      +			assert.Equal(t, "Two", c.Method)
      +			assert.Equal(t, 3, c.Arguments[0])
      +			assert.Equal(t, "three", c.ReturnArguments[0])
      +		}
      +	}
      +
      +}
      +
      +func Test_Mock_findExpectedCall_For_Unknown_Method(t *testing.T) {
      +
      +	m := new(Mock)
      +	m.On("One", 1).Return("one")
      +	m.On("Two", 2).Return("two")
      +	m.On("Two", 3).Return("three")
      +
      +	f, _ := m.findExpectedCall("Two")
      +
      +	assert.Equal(t, -1, f)
      +
      +}
      +
      +func Test_Mock_findExpectedCall_Respects_Repeatability(t *testing.T) {
      +
      +	m := new(Mock)
      +	m.On("One", 1).Return("one")
      +	m.On("Two", 2).Return("two").Once()
      +	m.On("Two", 3).Return("three").Twice()
      +	m.On("Two", 3).Return("three").Times(8)
      +
      +	f, c := m.findExpectedCall("Two", 3)
      +
      +	if assert.Equal(t, 2, f) {
      +		if assert.NotNil(t, c) {
      +			assert.Equal(t, "Two", c.Method)
      +			assert.Equal(t, 3, c.Arguments[0])
      +			assert.Equal(t, "three", c.ReturnArguments[0])
      +		}
      +	}
      +
      +}
      +
      +func Test_callString(t *testing.T) {
      +
      +	assert.Equal(t, `Method(int,bool,string)`, callString("Method", []interface{}{1, true, "something"}, false))
      +
      +}
      +
      +func Test_Mock_Called(t *testing.T) {
      +
      +	var mockedService = new(TestExampleImplementation)
      +
      +	mockedService.On("Test_Mock_Called", 1, 2, 3).Return(5, "6", true)
      +
      +	returnArguments := mockedService.Called(1, 2, 3)
      +
      +	if assert.Equal(t, 1, len(mockedService.Calls)) {
      +		assert.Equal(t, "Test_Mock_Called", mockedService.Calls[0].Method)
      +		assert.Equal(t, 1, mockedService.Calls[0].Arguments[0])
      +		assert.Equal(t, 2, mockedService.Calls[0].Arguments[1])
      +		assert.Equal(t, 3, mockedService.Calls[0].Arguments[2])
      +	}
      +
      +	if assert.Equal(t, 3, len(returnArguments)) {
      +		assert.Equal(t, 5, returnArguments[0])
      +		assert.Equal(t, "6", returnArguments[1])
      +		assert.Equal(t, true, returnArguments[2])
      +	}
      +
      +}
      +
      +func asyncCall(m *Mock, ch chan Arguments) {
      +	ch <- m.Called(1, 2, 3)
      +}
      +
      +func Test_Mock_Called_blocks(t *testing.T) {
      +
      +	var mockedService = new(TestExampleImplementation)
      +
      +	mockedService.Mock.On("asyncCall", 1, 2, 3).Return(5, "6", true).After(2 * time.Millisecond)
      +
      +	ch := make(chan Arguments)
      +
      +	go asyncCall(&mockedService.Mock, ch)
      +
      +	select {
      +	case <-ch:
      +		t.Fatal("should have waited")
      +	case <-time.After(1 * time.Millisecond):
      +	}
      +
      +	returnArguments := <-ch
      +
      +	if assert.Equal(t, 1, len(mockedService.Mock.Calls)) {
      +		assert.Equal(t, "asyncCall", mockedService.Mock.Calls[0].Method)
      +		assert.Equal(t, 1, mockedService.Mock.Calls[0].Arguments[0])
      +		assert.Equal(t, 2, mockedService.Mock.Calls[0].Arguments[1])
      +		assert.Equal(t, 3, mockedService.Mock.Calls[0].Arguments[2])
      +	}
      +
      +	if assert.Equal(t, 3, len(returnArguments)) {
      +		assert.Equal(t, 5, returnArguments[0])
      +		assert.Equal(t, "6", returnArguments[1])
      +		assert.Equal(t, true, returnArguments[2])
      +	}
      +
      +}
      +
      +func Test_Mock_Called_For_Bounded_Repeatability(t *testing.T) {
      +
      +	var mockedService = new(TestExampleImplementation)
      +
      +	mockedService.
      +		On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3).
      +		Return(5, "6", true).
      +		Once()
      +	mockedService.
      +		On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3).
      +		Return(-1, "hi", false)
      +
      +	returnArguments1 := mockedService.Called(1, 2, 3)
      +	returnArguments2 := mockedService.Called(1, 2, 3)
      +
      +	if assert.Equal(t, 2, len(mockedService.Calls)) {
      +		assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Calls[0].Method)
      +		assert.Equal(t, 1, mockedService.Calls[0].Arguments[0])
      +		assert.Equal(t, 2, mockedService.Calls[0].Arguments[1])
      +		assert.Equal(t, 3, mockedService.Calls[0].Arguments[2])
      +
      +		assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Calls[1].Method)
      +		assert.Equal(t, 1, mockedService.Calls[1].Arguments[0])
      +		assert.Equal(t, 2, mockedService.Calls[1].Arguments[1])
      +		assert.Equal(t, 3, mockedService.Calls[1].Arguments[2])
      +	}
      +
      +	if assert.Equal(t, 3, len(returnArguments1)) {
      +		assert.Equal(t, 5, returnArguments1[0])
      +		assert.Equal(t, "6", returnArguments1[1])
      +		assert.Equal(t, true, returnArguments1[2])
      +	}
      +
      +	if assert.Equal(t, 3, len(returnArguments2)) {
      +		assert.Equal(t, -1, returnArguments2[0])
      +		assert.Equal(t, "hi", returnArguments2[1])
      +		assert.Equal(t, false, returnArguments2[2])
      +	}
      +
      +}
      +
      +func Test_Mock_Called_For_SetTime_Expectation(t *testing.T) {
      +
      +	var mockedService = new(TestExampleImplementation)
      +
      +	mockedService.On("TheExampleMethod", 1, 2, 3).Return(5, "6", true).Times(4)
      +
      +	mockedService.TheExampleMethod(1, 2, 3)
      +	mockedService.TheExampleMethod(1, 2, 3)
      +	mockedService.TheExampleMethod(1, 2, 3)
      +	mockedService.TheExampleMethod(1, 2, 3)
      +	assert.Panics(t, func() {
      +		mockedService.TheExampleMethod(1, 2, 3)
      +	})
      +
      +}
      +
      +func Test_Mock_Called_Unexpected(t *testing.T) {
      +
      +	var mockedService = new(TestExampleImplementation)
      +
      +	// make sure it panics if no expectation was made
      +	assert.Panics(t, func() {
      +		mockedService.Called(1, 2, 3)
      +	}, "Calling unexpected method should panic")
      +
      +}
      +
      +func Test_AssertExpectationsForObjects_Helper(t *testing.T) {
      +
      +	var mockedService1 = new(TestExampleImplementation)
      +	var mockedService2 = new(TestExampleImplementation)
      +	var mockedService3 = new(TestExampleImplementation)
      +
      +	mockedService1.On("Test_AssertExpectationsForObjects_Helper", 1).Return()
      +	mockedService2.On("Test_AssertExpectationsForObjects_Helper", 2).Return()
      +	mockedService3.On("Test_AssertExpectationsForObjects_Helper", 3).Return()
      +
      +	mockedService1.Called(1)
      +	mockedService2.Called(2)
      +	mockedService3.Called(3)
      +
      +	assert.True(t, AssertExpectationsForObjects(t, mockedService1.Mock, mockedService2.Mock, mockedService3.Mock))
      +
      +}
      +
      +func Test_AssertExpectationsForObjects_Helper_Failed(t *testing.T) {
      +
      +	var mockedService1 = new(TestExampleImplementation)
      +	var mockedService2 = new(TestExampleImplementation)
      +	var mockedService3 = new(TestExampleImplementation)
      +
      +	mockedService1.On("Test_AssertExpectationsForObjects_Helper_Failed", 1).Return()
      +	mockedService2.On("Test_AssertExpectationsForObjects_Helper_Failed", 2).Return()
      +	mockedService3.On("Test_AssertExpectationsForObjects_Helper_Failed", 3).Return()
      +
      +	mockedService1.Called(1)
      +	mockedService3.Called(3)
      +
      +	tt := new(testing.T)
      +	assert.False(t, AssertExpectationsForObjects(tt, mockedService1.Mock, mockedService2.Mock, mockedService3.Mock))
      +
      +}
      +
      +func Test_Mock_AssertExpectations(t *testing.T) {
      +
      +	var mockedService = new(TestExampleImplementation)
      +
      +	mockedService.On("Test_Mock_AssertExpectations", 1, 2, 3).Return(5, 6, 7)
      +
      +	tt := new(testing.T)
      +	assert.False(t, mockedService.AssertExpectations(tt))
      +
      +	// make the call now
      +	mockedService.Called(1, 2, 3)
      +
      +	// now assert expectations
      +	assert.True(t, mockedService.AssertExpectations(tt))
      +
      +}
      +
      +func Test_Mock_AssertExpectationsCustomType(t *testing.T) {
      +
      +	var mockedService = new(TestExampleImplementation)
      +
      +	mockedService.On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")).Return(nil).Once()
      +
      +	tt := new(testing.T)
      +	assert.False(t, mockedService.AssertExpectations(tt))
      +
      +	// make the call now
      +	mockedService.TheExampleMethod3(&ExampleType{})
      +
      +	// now assert expectations
      +	assert.True(t, mockedService.AssertExpectations(tt))
      +
      +}
      +
      +func Test_Mock_AssertExpectations_With_Repeatability(t *testing.T) {
      +
      +	var mockedService = new(TestExampleImplementation)
      +
      +	mockedService.On("Test_Mock_AssertExpectations_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Twice()
      +
      +	tt := new(testing.T)
      +	assert.False(t, mockedService.AssertExpectations(tt))
      +
      +	// make the call now
      +	mockedService.Called(1, 2, 3)
      +
      +	assert.False(t, mockedService.AssertExpectations(tt))
      +
      +	mockedService.Called(1, 2, 3)
      +
      +	// now assert expectations
      +	assert.True(t, mockedService.AssertExpectations(tt))
      +
      +}
      +
      +func Test_Mock_TwoCallsWithDifferentArguments(t *testing.T) {
      +
      +	var mockedService = new(TestExampleImplementation)
      +
      +	mockedService.On("Test_Mock_TwoCallsWithDifferentArguments", 1, 2, 3).Return(5, 6, 7)
      +	mockedService.On("Test_Mock_TwoCallsWithDifferentArguments", 4, 5, 6).Return(5, 6, 7)
      +
      +	args1 := mockedService.Called(1, 2, 3)
      +	assert.Equal(t, 5, args1.Int(0))
      +	assert.Equal(t, 6, args1.Int(1))
      +	assert.Equal(t, 7, args1.Int(2))
      +
      +	args2 := mockedService.Called(4, 5, 6)
      +	assert.Equal(t, 5, args2.Int(0))
      +	assert.Equal(t, 6, args2.Int(1))
      +	assert.Equal(t, 7, args2.Int(2))
      +
      +}
      +
      +func Test_Mock_AssertNumberOfCalls(t *testing.T) {
      +
      +	var mockedService = new(TestExampleImplementation)
      +
      +	mockedService.On("Test_Mock_AssertNumberOfCalls", 1, 2, 3).Return(5, 6, 7)
      +
      +	mockedService.Called(1, 2, 3)
      +	assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 1))
      +
      +	mockedService.Called(1, 2, 3)
      +	assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 2))
      +
      +}
      +
      +func Test_Mock_AssertCalled(t *testing.T) {
      +
      +	var mockedService = new(TestExampleImplementation)
      +
      +	mockedService.On("Test_Mock_AssertCalled", 1, 2, 3).Return(5, 6, 7)
      +
      +	mockedService.Called(1, 2, 3)
      +
      +	assert.True(t, mockedService.AssertCalled(t, "Test_Mock_AssertCalled", 1, 2, 3))
      +
      +}
      +
      +func Test_Mock_AssertCalled_WithAnythingOfTypeArgument(t *testing.T) {
      +
      +	var mockedService = new(TestExampleImplementation)
      +
      +	mockedService.
      +		On("Test_Mock_AssertCalled_WithAnythingOfTypeArgument", Anything, Anything, Anything).
      +		Return()
      +
      +	mockedService.Called(1, "two", []uint8("three"))
      +
      +	assert.True(t, mockedService.AssertCalled(t, "Test_Mock_AssertCalled_WithAnythingOfTypeArgument", AnythingOfType("int"), AnythingOfType("string"), AnythingOfType("[]uint8")))
      +
      +}
      +
      +func Test_Mock_AssertCalled_WithArguments(t *testing.T) {
      +
      +	var mockedService = new(TestExampleImplementation)
      +
      +	mockedService.On("Test_Mock_AssertCalled_WithArguments", 1, 2, 3).Return(5, 6, 7)
      +
      +	mockedService.Called(1, 2, 3)
      +
      +	tt := new(testing.T)
      +	assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 1, 2, 3))
      +	assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 2, 3, 4))
      +
      +}
      +
      +func Test_Mock_AssertCalled_WithArguments_With_Repeatability(t *testing.T) {
      +
      +	var mockedService = new(TestExampleImplementation)
      +
      +	mockedService.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Once()
      +	mockedService.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4).Return(5, 6, 7).Once()
      +
      +	mockedService.Called(1, 2, 3)
      +	mockedService.Called(2, 3, 4)
      +
      +	tt := new(testing.T)
      +	assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3))
      +	assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4))
      +	assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 3, 4, 5))
      +
      +}
      +
      +func Test_Mock_AssertNotCalled(t *testing.T) {
      +
      +	var mockedService = new(TestExampleImplementation)
      +
      +	mockedService.On("Test_Mock_AssertNotCalled", 1, 2, 3).Return(5, 6, 7)
      +
      +	mockedService.Called(1, 2, 3)
      +
      +	assert.True(t, mockedService.AssertNotCalled(t, "Test_Mock_NotCalled"))
      +
      +}
      +
      +/*
      +	Arguments helper methods
      +*/
      +func Test_Arguments_Get(t *testing.T) {
      +
      +	var args = Arguments([]interface{}{"string", 123, true})
      +
      +	assert.Equal(t, "string", args.Get(0).(string))
      +	assert.Equal(t, 123, args.Get(1).(int))
      +	assert.Equal(t, true, args.Get(2).(bool))
      +
      +}
      +
      +func Test_Arguments_Is(t *testing.T) {
      +
      +	var args = Arguments([]interface{}{"string", 123, true})
      +
      +	assert.True(t, args.Is("string", 123, true))
      +	assert.False(t, args.Is("wrong", 456, false))
      +
      +}
      +
      +func Test_Arguments_Diff(t *testing.T) {
      +
      +	var args = Arguments([]interface{}{"Hello World", 123, true})
      +	var diff string
      +	var count int
      +	diff, count = args.Diff([]interface{}{"Hello World", 456, "false"})
      +
      +	assert.Equal(t, 2, count)
      +	assert.Contains(t, diff, `%!s(int=456) != %!s(int=123)`)
      +	assert.Contains(t, diff, `false != %!s(bool=true)`)
      +
      +}
      +
      +func Test_Arguments_Diff_DifferentNumberOfArgs(t *testing.T) {
      +
      +	var args = Arguments([]interface{}{"string", 123, true})
      +	var diff string
      +	var count int
      +	diff, count = args.Diff([]interface{}{"string", 456, "false", "extra"})
      +
      +	assert.Equal(t, 3, count)
      +	assert.Contains(t, diff, `extra != (Missing)`)
      +
      +}
      +
      +func Test_Arguments_Diff_WithAnythingArgument(t *testing.T) {
      +
      +	var args = Arguments([]interface{}{"string", 123, true})
      +	var count int
      +	_, count = args.Diff([]interface{}{"string", Anything, true})
      +
      +	assert.Equal(t, 0, count)
      +
      +}
      +
      +func Test_Arguments_Diff_WithAnythingArgument_InActualToo(t *testing.T) {
      +
      +	var args = Arguments([]interface{}{"string", Anything, true})
      +	var count int
      +	_, count = args.Diff([]interface{}{"string", 123, true})
      +
      +	assert.Equal(t, 0, count)
      +
      +}
      +
      +func Test_Arguments_Diff_WithAnythingOfTypeArgument(t *testing.T) {
      +
      +	var args = Arguments([]interface{}{"string", AnythingOfType("int"), true})
      +	var count int
      +	_, count = args.Diff([]interface{}{"string", 123, true})
      +
      +	assert.Equal(t, 0, count)
      +
      +}
      +
      +func Test_Arguments_Diff_WithAnythingOfTypeArgument_Failing(t *testing.T) {
      +
      +	var args = Arguments([]interface{}{"string", AnythingOfType("string"), true})
      +	var count int
      +	var diff string
      +	diff, count = args.Diff([]interface{}{"string", 123, true})
      +
      +	assert.Equal(t, 1, count)
      +	assert.Contains(t, diff, `string != type int - %!s(int=123)`)
      +
      +}
      +
      +func Test_Arguments_Diff_WithArgMatcher(t *testing.T) {
      +	matchFn := func(a int) bool {
      +		return a == 123
      +	}
      +	var args = Arguments([]interface{}{"string", MatchedBy(matchFn), true})
      +
      +	diff, count := args.Diff([]interface{}{"string", 124, true})
      +	assert.Equal(t, 1, count)
      +	assert.Contains(t, diff, `%!s(int=124) not matched by func(int) bool`)
      +
      +	diff, count = args.Diff([]interface{}{"string", false, true})
      +	assert.Equal(t, 1, count)
      +	assert.Contains(t, diff, `%!s(bool=false) not matched by func(int) bool`)
      +
      +	diff, count = args.Diff([]interface{}{"string", 123, false})
      +	assert.Contains(t, diff, `%!s(int=123) matched by func(int) bool`)
      +
      +	diff, count = args.Diff([]interface{}{"string", 123, true})
      +	assert.Equal(t, 0, count)
      +	assert.Contains(t, diff, `No differences.`)
      +}
      +
      +func Test_Arguments_Assert(t *testing.T) {
      +
      +	var args = Arguments([]interface{}{"string", 123, true})
      +
      +	assert.True(t, args.Assert(t, "string", 123, true))
      +
      +}
      +
      +func Test_Arguments_String_Representation(t *testing.T) {
      +
      +	var args = Arguments([]interface{}{"string", 123, true})
      +	assert.Equal(t, `string,int,bool`, args.String())
      +
      +}
      +
      +func Test_Arguments_String(t *testing.T) {
      +
      +	var args = Arguments([]interface{}{"string", 123, true})
      +	assert.Equal(t, "string", args.String(0))
      +
      +}
      +
      +func Test_Arguments_Error(t *testing.T) {
      +
      +	var err = errors.New("An Error")
      +	var args = Arguments([]interface{}{"string", 123, true, err})
      +	assert.Equal(t, err, args.Error(3))
      +
      +}
      +
      +func Test_Arguments_Error_Nil(t *testing.T) {
      +
      +	var args = Arguments([]interface{}{"string", 123, true, nil})
      +	assert.Equal(t, nil, args.Error(3))
      +
      +}
      +
      +func Test_Arguments_Int(t *testing.T) {
      +
      +	var args = Arguments([]interface{}{"string", 123, true})
      +	assert.Equal(t, 123, args.Int(1))
      +
      +}
      +
      +func Test_Arguments_Bool(t *testing.T) {
      +
      +	var args = Arguments([]interface{}{"string", 123, true})
      +	assert.Equal(t, true, args.Bool(2))
      +
      +}
      diff --git a/vendor/github.com/stretchr/testify/package_test.go b/vendor/github.com/stretchr/testify/package_test.go
      new file mode 100644
      index 00000000..7ac5d6d8
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/package_test.go
      @@ -0,0 +1,12 @@
      +package testify
      +
      +import (
      +	"github.com/stretchr/testify/assert"
      +	"testing"
      +)
      +
      +func TestImports(t *testing.T) {
      +	if assert.Equal(t, 1, 1) != true {
      +		t.Error("Something is wrong.")
      +	}
      +}
      diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go
      new file mode 100644
      index 00000000..169de392
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/require/doc.go
      @@ -0,0 +1,28 @@
      +// Package require implements the same assertions as the `assert` package but
      +// stops test execution when a test fails.
      +//
      +// Example Usage
      +//
      +// The following is a complete example using require in a standard test function:
      +//    import (
      +//      "testing"
      +//      "github.com/stretchr/testify/require"
      +//    )
      +//
      +//    func TestSomething(t *testing.T) {
      +//
      +//      var a string = "Hello"
      +//      var b string = "Hello"
      +//
      +//      require.Equal(t, a, b, "The two words should be the same.")
      +//
      +//    }
      +//
      +// Assertions
      +//
      +// The `require` package have same global functions as in the `assert` package,
      +// but instead of returning a boolean result they call `t.FailNow()`.
      +//
      +// Every assertion function also takes an optional string message as the final argument,
      +// allowing custom error messages to be appended to the message the assertion method outputs.
      +package require
      diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements.go b/vendor/github.com/stretchr/testify/require/forward_requirements.go
      new file mode 100644
      index 00000000..d3c2ab9b
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/require/forward_requirements.go
      @@ -0,0 +1,16 @@
      +package require
      +
      +// Assertions provides assertion methods around the
      +// TestingT interface.
      +type Assertions struct {
      +	t TestingT
      +}
      +
      +// New makes a new Assertions object for the specified TestingT.
      +func New(t TestingT) *Assertions {
      +	return &Assertions{
      +		t: t,
      +	}
      +}
      +
      +//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl
      diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements_test.go b/vendor/github.com/stretchr/testify/require/forward_requirements_test.go
      new file mode 100644
      index 00000000..b120ae3b
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/require/forward_requirements_test.go
      @@ -0,0 +1,385 @@
      +package require
      +
      +import (
      +	"errors"
      +	"testing"
      +	"time"
      +)
      +
      +func TestImplementsWrapper(t *testing.T) {
      +	require := New(t)
      +
      +	require.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject))
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject))
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestIsTypeWrapper(t *testing.T) {
      +	require := New(t)
      +	require.IsType(new(AssertionTesterConformingObject), new(AssertionTesterConformingObject))
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.IsType(new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject))
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestEqualWrapper(t *testing.T) {
      +	require := New(t)
      +	require.Equal(1, 1)
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.Equal(1, 2)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestNotEqualWrapper(t *testing.T) {
      +	require := New(t)
      +	require.NotEqual(1, 2)
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.NotEqual(2, 2)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestExactlyWrapper(t *testing.T) {
      +	require := New(t)
      +
      +	a := float32(1)
      +	b := float32(1)
      +	c := float64(1)
      +
      +	require.Exactly(a, b)
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.Exactly(a, c)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestNotNilWrapper(t *testing.T) {
      +	require := New(t)
      +	require.NotNil(t, new(AssertionTesterConformingObject))
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.NotNil(nil)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestNilWrapper(t *testing.T) {
      +	require := New(t)
      +	require.Nil(nil)
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.Nil(new(AssertionTesterConformingObject))
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestTrueWrapper(t *testing.T) {
      +	require := New(t)
      +	require.True(true)
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.True(false)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestFalseWrapper(t *testing.T) {
      +	require := New(t)
      +	require.False(false)
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.False(true)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestContainsWrapper(t *testing.T) {
      +	require := New(t)
      +	require.Contains("Hello World", "Hello")
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.Contains("Hello World", "Salut")
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestNotContainsWrapper(t *testing.T) {
      +	require := New(t)
      +	require.NotContains("Hello World", "Hello!")
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.NotContains("Hello World", "Hello")
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestPanicsWrapper(t *testing.T) {
      +	require := New(t)
      +	require.Panics(func() {
      +		panic("Panic!")
      +	})
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.Panics(func() {})
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestNotPanicsWrapper(t *testing.T) {
      +	require := New(t)
      +	require.NotPanics(func() {})
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.NotPanics(func() {
      +		panic("Panic!")
      +	})
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestNoErrorWrapper(t *testing.T) {
      +	require := New(t)
      +	require.NoError(nil)
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.NoError(errors.New("some error"))
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestErrorWrapper(t *testing.T) {
      +	require := New(t)
      +	require.Error(errors.New("some error"))
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.Error(nil)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestEqualErrorWrapper(t *testing.T) {
      +	require := New(t)
      +	require.EqualError(errors.New("some error"), "some error")
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.EqualError(errors.New("some error"), "Not some error")
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestEmptyWrapper(t *testing.T) {
      +	require := New(t)
      +	require.Empty("")
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.Empty("x")
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestNotEmptyWrapper(t *testing.T) {
      +	require := New(t)
      +	require.NotEmpty("x")
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.NotEmpty("")
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestWithinDurationWrapper(t *testing.T) {
      +	require := New(t)
      +	a := time.Now()
      +	b := a.Add(10 * time.Second)
      +
      +	require.WithinDuration(a, b, 15*time.Second)
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.WithinDuration(a, b, 5*time.Second)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestInDeltaWrapper(t *testing.T) {
      +	require := New(t)
      +	require.InDelta(1.001, 1, 0.01)
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.InDelta(1, 2, 0.5)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestZeroWrapper(t *testing.T) {
      +	require := New(t)
      +	require.Zero(0)
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.Zero(1)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestNotZeroWrapper(t *testing.T) {
      +	require := New(t)
      +	require.NotZero(1)
      +
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +	mockRequire.NotZero(0)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestJSONEqWrapper_EqualSONString(t *testing.T) {
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +
      +	mockRequire.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`)
      +	if mockT.Failed {
      +		t.Error("Check should pass")
      +	}
      +}
      +
      +func TestJSONEqWrapper_EquivalentButNotEqual(t *testing.T) {
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +
      +	mockRequire.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
      +	if mockT.Failed {
      +		t.Error("Check should pass")
      +	}
      +}
      +
      +func TestJSONEqWrapper_HashOfArraysAndHashes(t *testing.T) {
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +
      +	mockRequire.JSONEq("{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}",
      +		"{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}")
      +	if mockT.Failed {
      +		t.Error("Check should pass")
      +	}
      +}
      +
      +func TestJSONEqWrapper_Array(t *testing.T) {
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +
      +	mockRequire.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`)
      +	if mockT.Failed {
      +		t.Error("Check should pass")
      +	}
      +}
      +
      +func TestJSONEqWrapper_HashAndArrayNotEquivalent(t *testing.T) {
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +
      +	mockRequire.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestJSONEqWrapper_HashesNotEquivalent(t *testing.T) {
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +
      +	mockRequire.JSONEq(`{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestJSONEqWrapper_ActualIsNotJSON(t *testing.T) {
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +
      +	mockRequire.JSONEq(`{"foo": "bar"}`, "Not JSON")
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestJSONEqWrapper_ExpectedIsNotJSON(t *testing.T) {
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +
      +	mockRequire.JSONEq("Not JSON", `{"foo": "bar", "hello": "world"}`)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestJSONEqWrapper_ExpectedAndActualNotJSON(t *testing.T) {
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +
      +	mockRequire.JSONEq("Not JSON", "Not JSON")
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestJSONEqWrapper_ArraysOfDifferentOrder(t *testing.T) {
      +	mockT := new(MockT)
      +	mockRequire := New(mockT)
      +
      +	mockRequire.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go
      new file mode 100644
      index 00000000..1bcfcb0d
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/require/require.go
      @@ -0,0 +1,464 @@
      +/*
      +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
      +* THIS FILE MUST NOT BE EDITED BY HAND
      +*/
      +
      +package require
      +
      +import (
      +
      +	assert "github.com/stretchr/testify/assert"
      +	http "net/http"
      +	url "net/url"
      +	time "time"
      +)
      +
      +
      +// Condition uses a Comparison to assert a complex condition.
      +func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) {
      +  if !assert.Condition(t, comp, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// Contains asserts that the specified string, list(array, slice...) or map contains the
      +// specified substring or element.
      +// 
      +//    assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'")
      +//    assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
      +//    assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
      +  if !assert.Contains(t, s, contains, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// Empty asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
      +// a slice or a channel with len == 0.
      +// 
      +//  assert.Empty(t, obj)
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
      +  if !assert.Empty(t, object, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// Equal asserts that two objects are equal.
      +// 
      +//    assert.Equal(t, 123, 123, "123 and 123 should be equal")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
      +  if !assert.Equal(t, expected, actual, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// EqualError asserts that a function returned an error (i.e. not `nil`)
      +// and that it is equal to the provided error.
      +// 
      +//   actualObj, err := SomeFunction()
      +//   if assert.Error(t, err, "An error was expected") {
      +// 	   assert.Equal(t, err, expectedError)
      +//   }
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) {
      +  if !assert.EqualError(t, theError, errString, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// EqualValues asserts that two objects are equal or convertable to the same types
      +// and equal.
      +// 
      +//    assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
      +  if !assert.EqualValues(t, expected, actual, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// Error asserts that a function returned an error (i.e. not `nil`).
      +// 
      +//   actualObj, err := SomeFunction()
      +//   if assert.Error(t, err, "An error was expected") {
      +// 	   assert.Equal(t, err, expectedError)
      +//   }
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func Error(t TestingT, err error, msgAndArgs ...interface{}) {
      +  if !assert.Error(t, err, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// Exactly asserts that two objects are equal is value and type.
      +// 
      +//    assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
      +  if !assert.Exactly(t, expected, actual, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// Fail reports a failure through
      +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
      +  if !assert.Fail(t, failureMessage, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// FailNow fails test
      +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
      +  if !assert.FailNow(t, failureMessage, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// False asserts that the specified value is false.
      +// 
      +//    assert.False(t, myBool, "myBool should be false")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func False(t TestingT, value bool, msgAndArgs ...interface{}) {
      +  if !assert.False(t, value, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// HTTPBodyContains asserts that a specified handler returns a
      +// body that contains a string.
      +// 
      +//  assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
      +  if !assert.HTTPBodyContains(t, handler, method, url, values, str) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// HTTPBodyNotContains asserts that a specified handler returns a
      +// body that does not contain a string.
      +// 
      +//  assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
      +  if !assert.HTTPBodyNotContains(t, handler, method, url, values, str) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// HTTPError asserts that a specified handler returns an error status code.
      +// 
      +//  assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
      +  if !assert.HTTPError(t, handler, method, url, values) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// HTTPRedirect asserts that a specified handler returns a redirect status code.
      +// 
      +//  assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
      +  if !assert.HTTPRedirect(t, handler, method, url, values) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// HTTPSuccess asserts that a specified handler returns a success status code.
      +// 
      +//  assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
      +  if !assert.HTTPSuccess(t, handler, method, url, values) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// Implements asserts that an object is implemented by the specified interface.
      +// 
      +//    assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject")
      +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
      +  if !assert.Implements(t, interfaceObject, object, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// InDelta asserts that the two numerals are within delta of each other.
      +// 
      +// 	 assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
      +  if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// InDeltaSlice is the same as InDelta, except it compares two slices.
      +func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
      +  if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// InEpsilon asserts that expected and actual have a relative error less than epsilon
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
      +  if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// InEpsilonSlice is the same as InEpsilon, except it compares two slices.
      +func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
      +  if !assert.InEpsilonSlice(t, expected, actual, delta, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// IsType asserts that the specified objects are of the same type.
      +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
      +  if !assert.IsType(t, expectedType, object, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// JSONEq asserts that two JSON strings are equivalent.
      +// 
      +//  assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
      +  if !assert.JSONEq(t, expected, actual, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// Len asserts that the specified object has specific length.
      +// Len also fails if the object has a type that len() not accept.
      +// 
      +//    assert.Len(t, mySlice, 3, "The size of slice is not 3")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) {
      +  if !assert.Len(t, object, length, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// Nil asserts that the specified object is nil.
      +// 
      +//    assert.Nil(t, err, "err should be nothing")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
      +  if !assert.Nil(t, object, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// NoError asserts that a function returned no error (i.e. `nil`).
      +// 
      +//   actualObj, err := SomeFunction()
      +//   if assert.NoError(t, err) {
      +// 	   assert.Equal(t, actualObj, expectedObj)
      +//   }
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func NoError(t TestingT, err error, msgAndArgs ...interface{}) {
      +  if !assert.NoError(t, err, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
      +// specified substring or element.
      +// 
      +//    assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
      +//    assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
      +//    assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
      +  if !assert.NotContains(t, s, contains, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
      +// a slice or a channel with len == 0.
      +// 
      +//  if assert.NotEmpty(t, obj) {
      +//    assert.Equal(t, "two", obj[1])
      +//  }
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
      +  if !assert.NotEmpty(t, object, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// NotEqual asserts that the specified values are NOT equal.
      +// 
      +//    assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
      +  if !assert.NotEqual(t, expected, actual, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// NotNil asserts that the specified object is not nil.
      +// 
      +//    assert.NotNil(t, err, "err should be something")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
      +  if !assert.NotNil(t, object, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
      +// 
      +//   assert.NotPanics(t, func(){
      +//     RemainCalm()
      +//   }, "Calling RemainCalm() should NOT panic")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
      +  if !assert.NotPanics(t, f, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// NotRegexp asserts that a specified regexp does not match a string.
      +// 
      +//  assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
      +//  assert.NotRegexp(t, "^start", "it's not starting")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
      +  if !assert.NotRegexp(t, rx, str, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// NotZero asserts that i is not the zero value for its type and returns the truth.
      +func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
      +  if !assert.NotZero(t, i, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// Panics asserts that the code inside the specified PanicTestFunc panics.
      +// 
      +//   assert.Panics(t, func(){
      +//     GoCrazy()
      +//   }, "Calling GoCrazy() should panic")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
      +  if !assert.Panics(t, f, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// Regexp asserts that a specified regexp matches a string.
      +// 
      +//  assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
      +//  assert.Regexp(t, "start...$", "it's not starting")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
      +  if !assert.Regexp(t, rx, str, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// True asserts that the specified value is true.
      +// 
      +//    assert.True(t, myBool, "myBool should be true")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func True(t TestingT, value bool, msgAndArgs ...interface{}) {
      +  if !assert.True(t, value, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// WithinDuration asserts that the two times are within duration delta of each other.
      +// 
      +//   assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
      +  if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      +
      +
      +// Zero asserts that i is the zero value for its type and returns the truth.
      +func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
      +  if !assert.Zero(t, i, msgAndArgs...) {
      +    t.FailNow()
      +  }
      +}
      diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl
      new file mode 100644
      index 00000000..ab1b1e9f
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl
      @@ -0,0 +1,6 @@
      +{{.Comment}}
      +func {{.DocInfo.Name}}(t TestingT, {{.Params}}) {
      +  if !assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) {
      +    t.FailNow()
      +  }
      +}
      diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go
      new file mode 100644
      index 00000000..58324f10
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/require/require_forward.go
      @@ -0,0 +1,388 @@
      +/*
      +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
      +* THIS FILE MUST NOT BE EDITED BY HAND
      +*/
      +
      +package require
      +
      +import (
      +
      +	assert "github.com/stretchr/testify/assert"
      +	http "net/http"
      +	url "net/url"
      +	time "time"
      +)
      +
      +
      +// Condition uses a Comparison to assert a complex condition.
      +func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) {
      +	Condition(a.t, comp, msgAndArgs...)
      +}
      +
      +
      +// Contains asserts that the specified string, list(array, slice...) or map contains the
      +// specified substring or element.
      +// 
      +//    a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'")
      +//    a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
      +//    a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) {
      +	Contains(a.t, s, contains, msgAndArgs...)
      +}
      +
      +
      +// Empty asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
      +// a slice or a channel with len == 0.
      +// 
      +//  a.Empty(obj)
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) {
      +	Empty(a.t, object, msgAndArgs...)
      +}
      +
      +
      +// Equal asserts that two objects are equal.
      +// 
      +//    a.Equal(123, 123, "123 and 123 should be equal")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
      +	Equal(a.t, expected, actual, msgAndArgs...)
      +}
      +
      +
      +// EqualError asserts that a function returned an error (i.e. not `nil`)
      +// and that it is equal to the provided error.
      +// 
      +//   actualObj, err := SomeFunction()
      +//   if assert.Error(t, err, "An error was expected") {
      +// 	   assert.Equal(t, err, expectedError)
      +//   }
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) {
      +	EqualError(a.t, theError, errString, msgAndArgs...)
      +}
      +
      +
      +// EqualValues asserts that two objects are equal or convertable to the same types
      +// and equal.
      +// 
      +//    a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
      +	EqualValues(a.t, expected, actual, msgAndArgs...)
      +}
      +
      +
      +// Error asserts that a function returned an error (i.e. not `nil`).
      +// 
      +//   actualObj, err := SomeFunction()
      +//   if a.Error(err, "An error was expected") {
      +// 	   assert.Equal(t, err, expectedError)
      +//   }
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) {
      +	Error(a.t, err, msgAndArgs...)
      +}
      +
      +
      +// Exactly asserts that two objects are equal is value and type.
      +// 
      +//    a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
      +	Exactly(a.t, expected, actual, msgAndArgs...)
      +}
      +
      +
      +// Fail reports a failure through
      +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) {
      +	Fail(a.t, failureMessage, msgAndArgs...)
      +}
      +
      +
      +// FailNow fails test
      +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) {
      +	FailNow(a.t, failureMessage, msgAndArgs...)
      +}
      +
      +
      +// False asserts that the specified value is false.
      +// 
      +//    a.False(myBool, "myBool should be false")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) {
      +	False(a.t, value, msgAndArgs...)
      +}
      +
      +
      +// HTTPBodyContains asserts that a specified handler returns a
      +// body that contains a string.
      +// 
      +//  a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
      +	HTTPBodyContains(a.t, handler, method, url, values, str)
      +}
      +
      +
      +// HTTPBodyNotContains asserts that a specified handler returns a
      +// body that does not contain a string.
      +// 
      +//  a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
      +	HTTPBodyNotContains(a.t, handler, method, url, values, str)
      +}
      +
      +
      +// HTTPError asserts that a specified handler returns an error status code.
      +// 
      +//  a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) {
      +	HTTPError(a.t, handler, method, url, values)
      +}
      +
      +
      +// HTTPRedirect asserts that a specified handler returns a redirect status code.
      +// 
      +//  a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) {
      +	HTTPRedirect(a.t, handler, method, url, values)
      +}
      +
      +
      +// HTTPSuccess asserts that a specified handler returns a success status code.
      +// 
      +//  a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) {
      +	HTTPSuccess(a.t, handler, method, url, values)
      +}
      +
      +
      +// Implements asserts that an object is implemented by the specified interface.
      +// 
      +//    a.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
      +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
      +	Implements(a.t, interfaceObject, object, msgAndArgs...)
      +}
      +
      +
      +// InDelta asserts that the two numerals are within delta of each other.
      +// 
      +// 	 a.InDelta(math.Pi, (22 / 7.0), 0.01)
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
      +	InDelta(a.t, expected, actual, delta, msgAndArgs...)
      +}
      +
      +
      +// InDeltaSlice is the same as InDelta, except it compares two slices.
      +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
      +	InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
      +}
      +
      +
      +// InEpsilon asserts that expected and actual have a relative error less than epsilon
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
      +	InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
      +}
      +
      +
      +// InEpsilonSlice is the same as InEpsilon, except it compares two slices.
      +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
      +	InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...)
      +}
      +
      +
      +// IsType asserts that the specified objects are of the same type.
      +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
      +	IsType(a.t, expectedType, object, msgAndArgs...)
      +}
      +
      +
      +// JSONEq asserts that two JSON strings are equivalent.
      +// 
      +//  a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) {
      +	JSONEq(a.t, expected, actual, msgAndArgs...)
      +}
      +
      +
      +// Len asserts that the specified object has specific length.
      +// Len also fails if the object has a type that len() not accept.
      +// 
      +//    a.Len(mySlice, 3, "The size of slice is not 3")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) {
      +	Len(a.t, object, length, msgAndArgs...)
      +}
      +
      +
      +// Nil asserts that the specified object is nil.
      +// 
      +//    a.Nil(err, "err should be nothing")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) {
      +	Nil(a.t, object, msgAndArgs...)
      +}
      +
      +
      +// NoError asserts that a function returned no error (i.e. `nil`).
      +// 
      +//   actualObj, err := SomeFunction()
      +//   if a.NoError(err) {
      +// 	   assert.Equal(t, actualObj, expectedObj)
      +//   }
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) {
      +	NoError(a.t, err, msgAndArgs...)
      +}
      +
      +
      +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
      +// specified substring or element.
      +// 
      +//    a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
      +//    a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
      +//    a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) {
      +	NotContains(a.t, s, contains, msgAndArgs...)
      +}
      +
      +
      +// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
      +// a slice or a channel with len == 0.
      +// 
      +//  if a.NotEmpty(obj) {
      +//    assert.Equal(t, "two", obj[1])
      +//  }
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) {
      +	NotEmpty(a.t, object, msgAndArgs...)
      +}
      +
      +
      +// NotEqual asserts that the specified values are NOT equal.
      +// 
      +//    a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
      +	NotEqual(a.t, expected, actual, msgAndArgs...)
      +}
      +
      +
      +// NotNil asserts that the specified object is not nil.
      +// 
      +//    a.NotNil(err, "err should be something")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) {
      +	NotNil(a.t, object, msgAndArgs...)
      +}
      +
      +
      +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
      +// 
      +//   a.NotPanics(func(){
      +//     RemainCalm()
      +//   }, "Calling RemainCalm() should NOT panic")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
      +	NotPanics(a.t, f, msgAndArgs...)
      +}
      +
      +
      +// NotRegexp asserts that a specified regexp does not match a string.
      +// 
      +//  a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
      +//  a.NotRegexp("^start", "it's not starting")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) {
      +	NotRegexp(a.t, rx, str, msgAndArgs...)
      +}
      +
      +
      +// NotZero asserts that i is not the zero value for its type and returns the truth.
      +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) {
      +	NotZero(a.t, i, msgAndArgs...)
      +}
      +
      +
      +// Panics asserts that the code inside the specified PanicTestFunc panics.
      +// 
      +//   a.Panics(func(){
      +//     GoCrazy()
      +//   }, "Calling GoCrazy() should panic")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
      +	Panics(a.t, f, msgAndArgs...)
      +}
      +
      +
      +// Regexp asserts that a specified regexp matches a string.
      +// 
      +//  a.Regexp(regexp.MustCompile("start"), "it's starting")
      +//  a.Regexp("start...$", "it's not starting")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) {
      +	Regexp(a.t, rx, str, msgAndArgs...)
      +}
      +
      +
      +// True asserts that the specified value is true.
      +// 
      +//    a.True(myBool, "myBool should be true")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) {
      +	True(a.t, value, msgAndArgs...)
      +}
      +
      +
      +// WithinDuration asserts that the two times are within duration delta of each other.
      +// 
      +//   a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
      +// 
      +// Returns whether the assertion was successful (true) or not (false).
      +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
      +	WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
      +}
      +
      +
      +// Zero asserts that i is the zero value for its type and returns the truth.
      +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) {
      +	Zero(a.t, i, msgAndArgs...)
      +}
      diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl b/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl
      new file mode 100644
      index 00000000..b93569e0
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl
      @@ -0,0 +1,4 @@
      +{{.CommentWithoutT "a"}}
      +func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) {
      +	{{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
      +}
      diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go
      new file mode 100644
      index 00000000..41147562
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/require/requirements.go
      @@ -0,0 +1,9 @@
      +package require
      +
      +// TestingT is an interface wrapper around *testing.T
      +type TestingT interface {
      +	Errorf(format string, args ...interface{})
      +	FailNow()
      +}
      +
      +//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl
      diff --git a/vendor/github.com/stretchr/testify/require/requirements_test.go b/vendor/github.com/stretchr/testify/require/requirements_test.go
      new file mode 100644
      index 00000000..d2ccc99c
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/require/requirements_test.go
      @@ -0,0 +1,369 @@
      +package require
      +
      +import (
      +	"errors"
      +	"testing"
      +	"time"
      +)
      +
      +// AssertionTesterInterface defines an interface to be used for testing assertion methods
      +type AssertionTesterInterface interface {
      +	TestMethod()
      +}
      +
      +// AssertionTesterConformingObject is an object that conforms to the AssertionTesterInterface interface
      +type AssertionTesterConformingObject struct {
      +}
      +
      +func (a *AssertionTesterConformingObject) TestMethod() {
      +}
      +
      +// AssertionTesterNonConformingObject is an object that does not conform to the AssertionTesterInterface interface
      +type AssertionTesterNonConformingObject struct {
      +}
      +
      +type MockT struct {
      +	Failed bool
      +}
      +
      +func (t *MockT) FailNow() {
      +	t.Failed = true
      +}
      +
      +func (t *MockT) Errorf(format string, args ...interface{}) {
      +	_, _ = format, args
      +}
      +
      +func TestImplements(t *testing.T) {
      +
      +	Implements(t, (*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject))
      +
      +	mockT := new(MockT)
      +	Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject))
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestIsType(t *testing.T) {
      +
      +	IsType(t, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject))
      +
      +	mockT := new(MockT)
      +	IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject))
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestEqual(t *testing.T) {
      +
      +	Equal(t, 1, 1)
      +
      +	mockT := new(MockT)
      +	Equal(mockT, 1, 2)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +
      +}
      +
      +func TestNotEqual(t *testing.T) {
      +
      +	NotEqual(t, 1, 2)
      +	mockT := new(MockT)
      +	NotEqual(mockT, 2, 2)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestExactly(t *testing.T) {
      +
      +	a := float32(1)
      +	b := float32(1)
      +	c := float64(1)
      +
      +	Exactly(t, a, b)
      +
      +	mockT := new(MockT)
      +	Exactly(mockT, a, c)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestNotNil(t *testing.T) {
      +
      +	NotNil(t, new(AssertionTesterConformingObject))
      +
      +	mockT := new(MockT)
      +	NotNil(mockT, nil)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestNil(t *testing.T) {
      +
      +	Nil(t, nil)
      +
      +	mockT := new(MockT)
      +	Nil(mockT, new(AssertionTesterConformingObject))
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestTrue(t *testing.T) {
      +
      +	True(t, true)
      +
      +	mockT := new(MockT)
      +	True(mockT, false)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestFalse(t *testing.T) {
      +
      +	False(t, false)
      +
      +	mockT := new(MockT)
      +	False(mockT, true)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestContains(t *testing.T) {
      +
      +	Contains(t, "Hello World", "Hello")
      +
      +	mockT := new(MockT)
      +	Contains(mockT, "Hello World", "Salut")
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestNotContains(t *testing.T) {
      +
      +	NotContains(t, "Hello World", "Hello!")
      +
      +	mockT := new(MockT)
      +	NotContains(mockT, "Hello World", "Hello")
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestPanics(t *testing.T) {
      +
      +	Panics(t, func() {
      +		panic("Panic!")
      +	})
      +
      +	mockT := new(MockT)
      +	Panics(mockT, func() {})
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestNotPanics(t *testing.T) {
      +
      +	NotPanics(t, func() {})
      +
      +	mockT := new(MockT)
      +	NotPanics(mockT, func() {
      +		panic("Panic!")
      +	})
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestNoError(t *testing.T) {
      +
      +	NoError(t, nil)
      +
      +	mockT := new(MockT)
      +	NoError(mockT, errors.New("some error"))
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestError(t *testing.T) {
      +
      +	Error(t, errors.New("some error"))
      +
      +	mockT := new(MockT)
      +	Error(mockT, nil)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestEqualError(t *testing.T) {
      +
      +	EqualError(t, errors.New("some error"), "some error")
      +
      +	mockT := new(MockT)
      +	EqualError(mockT, errors.New("some error"), "Not some error")
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestEmpty(t *testing.T) {
      +
      +	Empty(t, "")
      +
      +	mockT := new(MockT)
      +	Empty(mockT, "x")
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestNotEmpty(t *testing.T) {
      +
      +	NotEmpty(t, "x")
      +
      +	mockT := new(MockT)
      +	NotEmpty(mockT, "")
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestWithinDuration(t *testing.T) {
      +
      +	a := time.Now()
      +	b := a.Add(10 * time.Second)
      +
      +	WithinDuration(t, a, b, 15*time.Second)
      +
      +	mockT := new(MockT)
      +	WithinDuration(mockT, a, b, 5*time.Second)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestInDelta(t *testing.T) {
      +
      +	InDelta(t, 1.001, 1, 0.01)
      +
      +	mockT := new(MockT)
      +	InDelta(mockT, 1, 2, 0.5)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestZero(t *testing.T) {
      +
      +	Zero(t, "")
      +
      +	mockT := new(MockT)
      +	Zero(mockT, "x")
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestNotZero(t *testing.T) {
      +
      +	NotZero(t, "x")
      +
      +	mockT := new(MockT)
      +	NotZero(mockT, "")
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestJSONEq_EqualSONString(t *testing.T) {
      +	mockT := new(MockT)
      +	JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`)
      +	if mockT.Failed {
      +		t.Error("Check should pass")
      +	}
      +}
      +
      +func TestJSONEq_EquivalentButNotEqual(t *testing.T) {
      +	mockT := new(MockT)
      +	JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
      +	if mockT.Failed {
      +		t.Error("Check should pass")
      +	}
      +}
      +
      +func TestJSONEq_HashOfArraysAndHashes(t *testing.T) {
      +	mockT := new(MockT)
      +	JSONEq(mockT, "{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}",
      +		"{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}")
      +	if mockT.Failed {
      +		t.Error("Check should pass")
      +	}
      +}
      +
      +func TestJSONEq_Array(t *testing.T) {
      +	mockT := new(MockT)
      +	JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`)
      +	if mockT.Failed {
      +		t.Error("Check should pass")
      +	}
      +}
      +
      +func TestJSONEq_HashAndArrayNotEquivalent(t *testing.T) {
      +	mockT := new(MockT)
      +	JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestJSONEq_HashesNotEquivalent(t *testing.T) {
      +	mockT := new(MockT)
      +	JSONEq(mockT, `{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestJSONEq_ActualIsNotJSON(t *testing.T) {
      +	mockT := new(MockT)
      +	JSONEq(mockT, `{"foo": "bar"}`, "Not JSON")
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestJSONEq_ExpectedIsNotJSON(t *testing.T) {
      +	mockT := new(MockT)
      +	JSONEq(mockT, "Not JSON", `{"foo": "bar", "hello": "world"}`)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestJSONEq_ExpectedAndActualNotJSON(t *testing.T) {
      +	mockT := new(MockT)
      +	JSONEq(mockT, "Not JSON", "Not JSON")
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      +
      +func TestJSONEq_ArraysOfDifferentOrder(t *testing.T) {
      +	mockT := new(MockT)
      +	JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`)
      +	if !mockT.Failed {
      +		t.Error("Check should fail")
      +	}
      +}
      diff --git a/vendor/github.com/stretchr/testify/suite/doc.go b/vendor/github.com/stretchr/testify/suite/doc.go
      new file mode 100644
      index 00000000..f91a245d
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/suite/doc.go
      @@ -0,0 +1,65 @@
      +// Package suite contains logic for creating testing suite structs
      +// and running the methods on those structs as tests.  The most useful
      +// piece of this package is that you can create setup/teardown methods
      +// on your testing suites, which will run before/after the whole suite
      +// or individual tests (depending on which interface(s) you
      +// implement).
      +//
      +// A testing suite is usually built by first extending the built-in
      +// suite functionality from suite.Suite in testify.  Alternatively,
      +// you could reproduce that logic on your own if you wanted (you
      +// just need to implement the TestingSuite interface from
      +// suite/interfaces.go).
      +//
      +// After that, you can implement any of the interfaces in
      +// suite/interfaces.go to add setup/teardown functionality to your
      +// suite, and add any methods that start with "Test" to add tests.
      +// Methods that do not match any suite interfaces and do not begin
      +// with "Test" will not be run by testify, and can safely be used as
      +// helper methods.
      +//
      +// Once you've built your testing suite, you need to run the suite
      +// (using suite.Run from testify) inside any function that matches the
      +// identity that "go test" is already looking for (i.e.
      +// func(*testing.T)).
      +//
      +// Regular expression to select test suites specified command-line
      +// argument "-run". Regular expression to select the methods
      +// of test suites specified command-line argument "-m".
      +// Suite object has assertion methods.
      +//
      +// A crude example:
      +//     // Basic imports
      +//     import (
      +//         "testing"
      +//         "github.com/stretchr/testify/assert"
      +//         "github.com/stretchr/testify/suite"
      +//     )
      +//
      +//     // Define the suite, and absorb the built-in basic suite
      +//     // functionality from testify - including a T() method which
      +//     // returns the current testing context
      +//     type ExampleTestSuite struct {
      +//         suite.Suite
      +//         VariableThatShouldStartAtFive int
      +//     }
      +//
      +//     // Make sure that VariableThatShouldStartAtFive is set to five
      +//     // before each test
      +//     func (suite *ExampleTestSuite) SetupTest() {
      +//         suite.VariableThatShouldStartAtFive = 5
      +//     }
      +//
      +//     // All methods that begin with "Test" are run as tests within a
      +//     // suite.
      +//     func (suite *ExampleTestSuite) TestExample() {
      +//         assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive)
      +//         suite.Equal(5, suite.VariableThatShouldStartAtFive)
      +//     }
      +//
      +//     // In order for 'go test' to run this suite, we need to create
      +//     // a normal test function and pass our suite to suite.Run
      +//     func TestExampleTestSuite(t *testing.T) {
      +//         suite.Run(t, new(ExampleTestSuite))
      +//     }
      +package suite
      diff --git a/vendor/github.com/stretchr/testify/suite/interfaces.go b/vendor/github.com/stretchr/testify/suite/interfaces.go
      new file mode 100644
      index 00000000..20969472
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/suite/interfaces.go
      @@ -0,0 +1,34 @@
      +package suite
      +
      +import "testing"
      +
      +// TestingSuite can store and return the current *testing.T context
      +// generated by 'go test'.
      +type TestingSuite interface {
      +	T() *testing.T
      +	SetT(*testing.T)
      +}
      +
      +// SetupAllSuite has a SetupSuite method, which will run before the
      +// tests in the suite are run.
      +type SetupAllSuite interface {
      +	SetupSuite()
      +}
      +
      +// SetupTestSuite has a SetupTest method, which will run before each
      +// test in the suite.
      +type SetupTestSuite interface {
      +	SetupTest()
      +}
      +
      +// TearDownAllSuite has a TearDownSuite method, which will run after
      +// all the tests in the suite have been run.
      +type TearDownAllSuite interface {
      +	TearDownSuite()
      +}
      +
      +// TearDownTestSuite has a TearDownTest method, which will run after
      +// each test in the suite.
      +type TearDownTestSuite interface {
      +	TearDownTest()
      +}
      diff --git a/vendor/github.com/stretchr/testify/suite/suite.go b/vendor/github.com/stretchr/testify/suite/suite.go
      new file mode 100644
      index 00000000..f831e251
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/suite/suite.go
      @@ -0,0 +1,115 @@
      +package suite
      +
      +import (
      +	"flag"
      +	"fmt"
      +	"os"
      +	"reflect"
      +	"regexp"
      +	"testing"
      +
      +	"github.com/stretchr/testify/assert"
      +	"github.com/stretchr/testify/require"
      +)
      +
      +var matchMethod = flag.String("m", "", "regular expression to select tests of the suite to run")
      +
      +// Suite is a basic testing suite with methods for storing and
      +// retrieving the current *testing.T context.
      +type Suite struct {
      +	*assert.Assertions
      +	require *require.Assertions
      +	t       *testing.T
      +}
      +
      +// T retrieves the current *testing.T context.
      +func (suite *Suite) T() *testing.T {
      +	return suite.t
      +}
      +
      +// SetT sets the current *testing.T context.
      +func (suite *Suite) SetT(t *testing.T) {
      +	suite.t = t
      +	suite.Assertions = assert.New(t)
      +	suite.require = require.New(t)
      +}
      +
      +// Require returns a require context for suite.
      +func (suite *Suite) Require() *require.Assertions {
      +	if suite.require == nil {
      +		suite.require = require.New(suite.T())
      +	}
      +	return suite.require
      +}
      +
      +// Assert returns an assert context for suite.  Normally, you can call
      +// `suite.NoError(expected, actual)`, but for situations where the embedded
      +// methods are overridden (for example, you might want to override
      +// assert.Assertions with require.Assertions), this method is provided so you
      +// can call `suite.Assert().NoError()`.
      +func (suite *Suite) Assert() *assert.Assertions {
      +	if suite.Assertions == nil {
      +		suite.Assertions = assert.New(suite.T())
      +	}
      +	return suite.Assertions
      +}
      +
      +// Run takes a testing suite and runs all of the tests attached
      +// to it.
      +func Run(t *testing.T, suite TestingSuite) {
      +	suite.SetT(t)
      +
      +	if setupAllSuite, ok := suite.(SetupAllSuite); ok {
      +		setupAllSuite.SetupSuite()
      +	}
      +	defer func() {
      +		if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok {
      +			tearDownAllSuite.TearDownSuite()
      +		}
      +	}()
      +
      +	methodFinder := reflect.TypeOf(suite)
      +	tests := []testing.InternalTest{}
      +	for index := 0; index < methodFinder.NumMethod(); index++ {
      +		method := methodFinder.Method(index)
      +		ok, err := methodFilter(method.Name)
      +		if err != nil {
      +			fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err)
      +			os.Exit(1)
      +		}
      +		if ok {
      +			test := testing.InternalTest{
      +				Name: method.Name,
      +				F: func(t *testing.T) {
      +					parentT := suite.T()
      +					suite.SetT(t)
      +					if setupTestSuite, ok := suite.(SetupTestSuite); ok {
      +						setupTestSuite.SetupTest()
      +					}
      +					defer func() {
      +						if tearDownTestSuite, ok := suite.(TearDownTestSuite); ok {
      +							tearDownTestSuite.TearDownTest()
      +						}
      +						suite.SetT(parentT)
      +					}()
      +					method.Func.Call([]reflect.Value{reflect.ValueOf(suite)})
      +				},
      +			}
      +			tests = append(tests, test)
      +		}
      +	}
      +
      +	if !testing.RunTests(func(_, _ string) (bool, error) { return true, nil },
      +		tests) {
      +		t.Fail()
      +	}
      +}
      +
      +// Filtering method according to set regular expression
      +// specified command-line argument -m
      +func methodFilter(name string) (bool, error) {
      +	if ok, _ := regexp.MatchString("^Test", name); !ok {
      +		return false, nil
      +	}
      +	return regexp.MatchString(*matchMethod, name)
      +}
      diff --git a/vendor/github.com/stretchr/testify/suite/suite_test.go b/vendor/github.com/stretchr/testify/suite/suite_test.go
      new file mode 100644
      index 00000000..c7c4e88f
      --- /dev/null
      +++ b/vendor/github.com/stretchr/testify/suite/suite_test.go
      @@ -0,0 +1,239 @@
      +package suite
      +
      +import (
      +	"errors"
      +	"io/ioutil"
      +	"os"
      +	"testing"
      +
      +	"github.com/stretchr/testify/assert"
      +)
      +
      +// SuiteRequireTwice is intended to test the usage of suite.Require in two
      +// different tests
      +type SuiteRequireTwice struct{ Suite }
      +
      +// TestSuiteRequireTwice checks for regressions of issue #149 where
      +// suite.requirements was not initialised in suite.SetT()
      +// A regression would result on these tests panicking rather than failing.
      +func TestSuiteRequireTwice(t *testing.T) {
      +	ok := testing.RunTests(
      +		func(_, _ string) (bool, error) { return true, nil },
      +		[]testing.InternalTest{{
      +			Name: "TestSuiteRequireTwice",
      +			F: func(t *testing.T) {
      +				suite := new(SuiteRequireTwice)
      +				Run(t, suite)
      +			},
      +		}},
      +	)
      +	assert.Equal(t, false, ok)
      +}
      +
      +func (s *SuiteRequireTwice) TestRequireOne() {
      +	r := s.Require()
      +	r.Equal(1, 2)
      +}
      +
      +func (s *SuiteRequireTwice) TestRequireTwo() {
      +	r := s.Require()
      +	r.Equal(1, 2)
      +}
      +
      +// This suite is intended to store values to make sure that only
      +// testing-suite-related methods are run.  It's also a fully
      +// functional example of a testing suite, using setup/teardown methods
      +// and a helper method that is ignored by testify.  To make this look
      +// more like a real world example, all tests in the suite perform some
      +// type of assertion.
      +type SuiteTester struct {
      +	// Include our basic suite logic.
      +	Suite
      +
      +	// Keep counts of how many times each method is run.
      +	SetupSuiteRunCount    int
      +	TearDownSuiteRunCount int
      +	SetupTestRunCount     int
      +	TearDownTestRunCount  int
      +	TestOneRunCount       int
      +	TestTwoRunCount       int
      +	NonTestMethodRunCount int
      +}
      +
      +type SuiteSkipTester struct {
      +	// Include our basic suite logic.
      +	Suite
      +
      +	// Keep counts of how many times each method is run.
      +	SetupSuiteRunCount    int
      +	TearDownSuiteRunCount int
      +}
      +
      +// The SetupSuite method will be run by testify once, at the very
      +// start of the testing suite, before any tests are run.
      +func (suite *SuiteTester) SetupSuite() {
      +	suite.SetupSuiteRunCount++
      +}
      +
      +func (suite *SuiteSkipTester) SetupSuite() {
      +	suite.SetupSuiteRunCount++
      +	suite.T().Skip()
      +}
      +
      +// The TearDownSuite method will be run by testify once, at the very
      +// end of the testing suite, after all tests have been run.
      +func (suite *SuiteTester) TearDownSuite() {
      +	suite.TearDownSuiteRunCount++
      +}
      +
      +func (suite *SuiteSkipTester) TearDownSuite() {
      +	suite.TearDownSuiteRunCount++
      +}
      +
      +// The SetupTest method will be run before every test in the suite.
      +func (suite *SuiteTester) SetupTest() {
      +	suite.SetupTestRunCount++
      +}
      +
      +// The TearDownTest method will be run after every test in the suite.
      +func (suite *SuiteTester) TearDownTest() {
      +	suite.TearDownTestRunCount++
      +}
      +
      +// Every method in a testing suite that begins with "Test" will be run
      +// as a test.  TestOne is an example of a test.  For the purposes of
      +// this example, we've included assertions in the tests, since most
      +// tests will issue assertions.
      +func (suite *SuiteTester) TestOne() {
      +	beforeCount := suite.TestOneRunCount
      +	suite.TestOneRunCount++
      +	assert.Equal(suite.T(), suite.TestOneRunCount, beforeCount+1)
      +	suite.Equal(suite.TestOneRunCount, beforeCount+1)
      +}
      +
      +// TestTwo is another example of a test.
      +func (suite *SuiteTester) TestTwo() {
      +	beforeCount := suite.TestTwoRunCount
      +	suite.TestTwoRunCount++
      +	assert.NotEqual(suite.T(), suite.TestTwoRunCount, beforeCount)
      +	suite.NotEqual(suite.TestTwoRunCount, beforeCount)
      +}
      +
      +func (suite *SuiteTester) TestSkip() {
      +	suite.T().Skip()
      +}
      +
      +// NonTestMethod does not begin with "Test", so it will not be run by
      +// testify as a test in the suite.  This is useful for creating helper
      +// methods for your tests.
      +func (suite *SuiteTester) NonTestMethod() {
      +	suite.NonTestMethodRunCount++
      +}
      +
      +// TestRunSuite will be run by the 'go test' command, so within it, we
      +// can run our suite using the Run(*testing.T, TestingSuite) function.
      +func TestRunSuite(t *testing.T) {
      +	suiteTester := new(SuiteTester)
      +	Run(t, suiteTester)
      +
      +	// Normally, the test would end here.  The following are simply
      +	// some assertions to ensure that the Run function is working as
      +	// intended - they are not part of the example.
      +
      +	// The suite was only run once, so the SetupSuite and TearDownSuite
      +	// methods should have each been run only once.
      +	assert.Equal(t, suiteTester.SetupSuiteRunCount, 1)
      +	assert.Equal(t, suiteTester.TearDownSuiteRunCount, 1)
      +
      +	// There are three test methods (TestOne, TestTwo, and TestSkip), so
      +	// the SetupTest and TearDownTest methods (which should be run once for
      +	// each test) should have been run three times.
      +	assert.Equal(t, suiteTester.SetupTestRunCount, 3)
      +	assert.Equal(t, suiteTester.TearDownTestRunCount, 3)
      +
      +	// Each test should have been run once.
      +	assert.Equal(t, suiteTester.TestOneRunCount, 1)
      +	assert.Equal(t, suiteTester.TestTwoRunCount, 1)
      +
      +	// Methods that don't match the test method identifier shouldn't
      +	// have been run at all.
      +	assert.Equal(t, suiteTester.NonTestMethodRunCount, 0)
      +
      +	suiteSkipTester := new(SuiteSkipTester)
      +	Run(t, suiteSkipTester)
      +
      +	// The suite was only run once, so the SetupSuite and TearDownSuite
      +	// methods should have each been run only once, even though SetupSuite
      +	// called Skip()
      +	assert.Equal(t, suiteSkipTester.SetupSuiteRunCount, 1)
      +	assert.Equal(t, suiteSkipTester.TearDownSuiteRunCount, 1)
      +
      +}
      +
      +func TestSuiteGetters(t *testing.T) {
      +	suite := new(SuiteTester)
      +	suite.SetT(t)
      +	assert.NotNil(t, suite.Assert())
      +	assert.Equal(t, suite.Assertions, suite.Assert())
      +	assert.NotNil(t, suite.Require())
      +	assert.Equal(t, suite.require, suite.Require())
      +}
      +
      +type SuiteLoggingTester struct {
      +	Suite
      +}
      +
      +func (s *SuiteLoggingTester) TestLoggingPass() {
      +	s.T().Log("TESTLOGPASS")
      +}
      +
      +func (s *SuiteLoggingTester) TestLoggingFail() {
      +	s.T().Log("TESTLOGFAIL")
      +	assert.NotNil(s.T(), nil) // expected to fail
      +}
      +
      +type StdoutCapture struct {
      +	oldStdout *os.File
      +	readPipe  *os.File
      +}
      +
      +func (sc *StdoutCapture) StartCapture() {
      +	sc.oldStdout = os.Stdout
      +	sc.readPipe, os.Stdout, _ = os.Pipe()
      +}
      +
      +func (sc *StdoutCapture) StopCapture() (string, error) {
      +	if sc.oldStdout == nil || sc.readPipe == nil {
      +		return "", errors.New("StartCapture not called before StopCapture")
      +	}
      +	os.Stdout.Close()
      +	os.Stdout = sc.oldStdout
      +	bytes, err := ioutil.ReadAll(sc.readPipe)
      +	if err != nil {
      +		return "", err
      +	}
      +	return string(bytes), nil
      +}
      +
      +func TestSuiteLogging(t *testing.T) {
      +	testT := testing.T{}
      +
      +	suiteLoggingTester := new(SuiteLoggingTester)
      +
      +	capture := StdoutCapture{}
      +	capture.StartCapture()
      +	Run(&testT, suiteLoggingTester)
      +	output, err := capture.StopCapture()
      +
      +	assert.Nil(t, err, "Got an error trying to capture stdout!")
      +
      +	// Failed tests' output is always printed
      +	assert.Contains(t, output, "TESTLOGFAIL")
      +
      +	if testing.Verbose() {
      +		// In verbose mode, output from successful tests is also printed
      +		assert.Contains(t, output, "TESTLOGPASS")
      +	} else {
      +		assert.NotContains(t, output, "TESTLOGPASS")
      +	}
      +}
      diff --git a/vendor/go4.org/.gitignore b/vendor/go4.org/.gitignore
      new file mode 100644
      index 00000000..daf913b1
      --- /dev/null
      +++ b/vendor/go4.org/.gitignore
      @@ -0,0 +1,24 @@
      +# Compiled Object files, Static and Dynamic libs (Shared Objects)
      +*.o
      +*.a
      +*.so
      +
      +# Folders
      +_obj
      +_test
      +
      +# Architecture specific extensions/prefixes
      +*.[568vq]
      +[568vq].out
      +
      +*.cgo1.go
      +*.cgo2.c
      +_cgo_defun.c
      +_cgo_gotypes.go
      +_cgo_export.*
      +
      +_testmain.go
      +
      +*.exe
      +*.test
      +*.prof
      diff --git a/vendor/go4.org/.travis.yml b/vendor/go4.org/.travis.yml
      new file mode 100644
      index 00000000..15a6d8c8
      --- /dev/null
      +++ b/vendor/go4.org/.travis.yml
      @@ -0,0 +1,10 @@
      +go_import_path: go4.org
      +language: go
      +go:
      +  - 1.5
      +  - tip
      +before_install:
      +  - go get -u google.golang.org/cloud/storage
      +  - cd $HOME/gopath/src/google.golang.org/cloud
      +  - git reset --hard 2375e186ca77be721a7c9c7b13a659738a8511d2
      +  - cd $HOME/gopath/src/go4.org
      diff --git a/vendor/go4.org/AUTHORS b/vendor/go4.org/AUTHORS
      new file mode 100644
      index 00000000..d1ad485f
      --- /dev/null
      +++ b/vendor/go4.org/AUTHORS
      @@ -0,0 +1,8 @@
      +# This is the official list of go4 authors for copyright purposes.
      +# This is distinct from the CONTRIBUTORS file, which is the list of
      +# people who have contributed, even if they don't own the copyright on
      +# their work.
      +
      +Mathieu Lonjaret <mathieu.lonjaret@gmail.com>
      +Daniel Theophanes <kardianos@gmail.com>
      +Google
      diff --git a/vendor/go4.org/LICENSE b/vendor/go4.org/LICENSE
      new file mode 100644
      index 00000000..8f71f43f
      --- /dev/null
      +++ b/vendor/go4.org/LICENSE
      @@ -0,0 +1,202 @@
      +                                 Apache License
      +                           Version 2.0, January 2004
      +                        http://www.apache.org/licenses/
      +
      +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
      +
      +   1. Definitions.
      +
      +      "License" shall mean the terms and conditions for use, reproduction,
      +      and distribution as defined by Sections 1 through 9 of this document.
      +
      +      "Licensor" shall mean the copyright owner or entity authorized by
      +      the copyright owner that is granting the License.
      +
      +      "Legal Entity" shall mean the union of the acting entity and all
      +      other entities that control, are controlled by, or are under common
      +      control with that entity. For the purposes of this definition,
      +      "control" means (i) the power, direct or indirect, to cause the
      +      direction or management of such entity, whether by contract or
      +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      +      outstanding shares, or (iii) beneficial ownership of such entity.
      +
      +      "You" (or "Your") shall mean an individual or Legal Entity
      +      exercising permissions granted by this License.
      +
      +      "Source" form shall mean the preferred form for making modifications,
      +      including but not limited to software source code, documentation
      +      source, and configuration files.
      +
      +      "Object" form shall mean any form resulting from mechanical
      +      transformation or translation of a Source form, including but
      +      not limited to compiled object code, generated documentation,
      +      and conversions to other media types.
      +
      +      "Work" shall mean the work of authorship, whether in Source or
      +      Object form, made available under the License, as indicated by a
      +      copyright notice that is included in or attached to the work
      +      (an example is provided in the Appendix below).
      +
      +      "Derivative Works" shall mean any work, whether in Source or Object
      +      form, that is based on (or derived from) the Work and for which the
      +      editorial revisions, annotations, elaborations, or other modifications
      +      represent, as a whole, an original work of authorship. For the purposes
      +      of this License, Derivative Works shall not include works that remain
      +      separable from, or merely link (or bind by name) to the interfaces of,
      +      the Work and Derivative Works thereof.
      +
      +      "Contribution" shall mean any work of authorship, including
      +      the original version of the Work and any modifications or additions
      +      to that Work or Derivative Works thereof, that is intentionally
      +      submitted to Licensor for inclusion in the Work by the copyright owner
      +      or by an individual or Legal Entity authorized to submit on behalf of
      +      the copyright owner. For the purposes of this definition, "submitted"
      +      means any form of electronic, verbal, or written communication sent
      +      to the Licensor or its representatives, including but not limited to
      +      communication on electronic mailing lists, source code control systems,
      +      and issue tracking systems that are managed by, or on behalf of, the
      +      Licensor for the purpose of discussing and improving the Work, but
      +      excluding communication that is conspicuously marked or otherwise
      +      designated in writing by the copyright owner as "Not a Contribution."
      +
      +      "Contributor" shall mean Licensor and any individual or Legal Entity
      +      on behalf of whom a Contribution has been received by Licensor and
      +      subsequently incorporated within the Work.
      +
      +   2. Grant of Copyright License. Subject to the terms and conditions of
      +      this License, each Contributor hereby grants to You a perpetual,
      +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      +      copyright license to reproduce, prepare Derivative Works of,
      +      publicly display, publicly perform, sublicense, and distribute the
      +      Work and such Derivative Works in Source or Object form.
      +
      +   3. Grant of Patent License. Subject to the terms and conditions of
      +      this License, each Contributor hereby grants to You a perpetual,
      +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      +      (except as stated in this section) patent license to make, have made,
      +      use, offer to sell, sell, import, and otherwise transfer the Work,
      +      where such license applies only to those patent claims licensable
      +      by such Contributor that are necessarily infringed by their
      +      Contribution(s) alone or by combination of their Contribution(s)
      +      with the Work to which such Contribution(s) was submitted. If You
      +      institute patent litigation against any entity (including a
      +      cross-claim or counterclaim in a lawsuit) alleging that the Work
      +      or a Contribution incorporated within the Work constitutes direct
      +      or contributory patent infringement, then any patent licenses
      +      granted to You under this License for that Work shall terminate
      +      as of the date such litigation is filed.
      +
      +   4. Redistribution. You may reproduce and distribute copies of the
      +      Work or Derivative Works thereof in any medium, with or without
      +      modifications, and in Source or Object form, provided that You
      +      meet the following conditions:
      +
      +      (a) You must give any other recipients of the Work or
      +          Derivative Works a copy of this License; and
      +
      +      (b) You must cause any modified files to carry prominent notices
      +          stating that You changed the files; and
      +
      +      (c) You must retain, in the Source form of any Derivative Works
      +          that You distribute, all copyright, patent, trademark, and
      +          attribution notices from the Source form of the Work,
      +          excluding those notices that do not pertain to any part of
      +          the Derivative Works; and
      +
      +      (d) If the Work includes a "NOTICE" text file as part of its
      +          distribution, then any Derivative Works that You distribute must
      +          include a readable copy of the attribution notices contained
      +          within such NOTICE file, excluding those notices that do not
      +          pertain to any part of the Derivative Works, in at least one
      +          of the following places: within a NOTICE text file distributed
      +          as part of the Derivative Works; within the Source form or
      +          documentation, if provided along with the Derivative Works; or,
      +          within a display generated by the Derivative Works, if and
      +          wherever such third-party notices normally appear. The contents
      +          of the NOTICE file are for informational purposes only and
      +          do not modify the License. You may add Your own attribution
      +          notices within Derivative Works that You distribute, alongside
      +          or as an addendum to the NOTICE text from the Work, provided
      +          that such additional attribution notices cannot be construed
      +          as modifying the License.
      +
      +      You may add Your own copyright statement to Your modifications and
      +      may provide additional or different license terms and conditions
      +      for use, reproduction, or distribution of Your modifications, or
      +      for any such Derivative Works as a whole, provided Your use,
      +      reproduction, and distribution of the Work otherwise complies with
      +      the conditions stated in this License.
      +
      +   5. Submission of Contributions. Unless You explicitly state otherwise,
      +      any Contribution intentionally submitted for inclusion in the Work
      +      by You to the Licensor shall be under the terms and conditions of
      +      this License, without any additional terms or conditions.
      +      Notwithstanding the above, nothing herein shall supersede or modify
      +      the terms of any separate license agreement you may have executed
      +      with Licensor regarding such Contributions.
      +
      +   6. Trademarks. This License does not grant permission to use the trade
      +      names, trademarks, service marks, or product names of the Licensor,
      +      except as required for reasonable and customary use in describing the
      +      origin of the Work and reproducing the content of the NOTICE file.
      +
      +   7. Disclaimer of Warranty. Unless required by applicable law or
      +      agreed to in writing, Licensor provides the Work (and each
      +      Contributor provides its Contributions) on an "AS IS" BASIS,
      +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      +      implied, including, without limitation, any warranties or conditions
      +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      +      PARTICULAR PURPOSE. You are solely responsible for determining the
      +      appropriateness of using or redistributing the Work and assume any
      +      risks associated with Your exercise of permissions under this License.
      +
      +   8. Limitation of Liability. In no event and under no legal theory,
      +      whether in tort (including negligence), contract, or otherwise,
      +      unless required by applicable law (such as deliberate and grossly
      +      negligent acts) or agreed to in writing, shall any Contributor be
      +      liable to You for damages, including any direct, indirect, special,
      +      incidental, or consequential damages of any character arising as a
      +      result of this License or out of the use or inability to use the
      +      Work (including but not limited to damages for loss of goodwill,
      +      work stoppage, computer failure or malfunction, or any and all
      +      other commercial damages or losses), even if such Contributor
      +      has been advised of the possibility of such damages.
      +
      +   9. Accepting Warranty or Additional Liability. While redistributing
      +      the Work or Derivative Works thereof, You may choose to offer,
      +      and charge a fee for, acceptance of support, warranty, indemnity,
      +      or other liability obligations and/or rights consistent with this
      +      License. However, in accepting such obligations, You may act only
      +      on Your own behalf and on Your sole responsibility, not on behalf
      +      of any other Contributor, and only if You agree to indemnify,
      +      defend, and hold each Contributor harmless for any liability
      +      incurred by, or claims asserted against, such Contributor by reason
      +      of your accepting any such warranty or additional liability.
      +
      +   END OF TERMS AND CONDITIONS
      +
      +   APPENDIX: How to apply the Apache License to your work.
      +
      +      To apply the Apache License to your work, attach the following
      +      boilerplate notice, with the fields enclosed by brackets "{}"
      +      replaced with your own identifying information. (Don't include
      +      the brackets!)  The text should be enclosed in the appropriate
      +      comment syntax for the file format. We also recommend that a
      +      file or class name and description of purpose be included on the
      +      same "printed page" as the copyright notice for easier
      +      identification within third-party archives.
      +
      +   Copyright {yyyy} {name of copyright owner}
      +
      +   Licensed under the Apache License, Version 2.0 (the "License");
      +   you may not use this file except in compliance with the License.
      +   You may obtain a copy of the License at
      +
      +       http://www.apache.org/licenses/LICENSE-2.0
      +
      +   Unless required by applicable law or agreed to in writing, software
      +   distributed under the License is distributed on an "AS IS" BASIS,
      +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +   See the License for the specific language governing permissions and
      +   limitations under the License.
      +
      diff --git a/vendor/go4.org/README.md b/vendor/go4.org/README.md
      new file mode 100644
      index 00000000..33be9c80
      --- /dev/null
      +++ b/vendor/go4.org/README.md
      @@ -0,0 +1,83 @@
      +# go4
      +
      +[![travis badge](https://travis-ci.org/camlistore/go4.svg?branch=master)](https://travis-ci.org/camlistore/go4 "Travis CI")
      +
      +[go4.org](http://go4.org) is a collection of packages for
      +Go programmers.
      +
      +They started out living in [Camlistore](https://camlistore.org)'s repo
      +and elsewhere but they have nothing to do with Camlistore, so we're
      +moving them here.
      +
      +## Details
      +
      +* **single repo**. go4 is a single repo. That means things can be
      +    changed and rearranged globally atomically with ease and
      +    confidence.
      +
      +* **no backwards compatibility**. go4 makes no backwards compatibility
      +    promises. If you want to use go4, vendor it. And next time you
      +    update your vendor tree, update to the latest API if things in go4
      +    changed. The plan is to eventually provide tools to make this
      +    easier.
      +
      +* **forward progress** because we have no backwards compatibility,
      +    it's always okay to change things to make things better. That also
      +    means the bar for contributions is lower. We don't have to get the
      +    API 100% correct in the first commit.
      +
      +* **code review** contributions must be code-reviewed. We're trying
      +    out Gerrithub, to see if we can find a mix of Github Pull Requests
      +    and Gerrit that works well for many people. We'll see.
      +
      +* **CLA compliant** contributors must agree to the Google CLA (the
      +    same as Go itself). This ensures we can move things into Go as
      +    necessary in the future. It also makes lawyers at various
      +    companies happy.  The CLA is **not** a copyright *assignment*; you
      +    retain the copyright on your work. The CLA just says that your
      +    work is open source and you have permission to open source it. See
      +    https://golang.org/doc/contribute.html#tmp_6
      +
      +* **docs, tests, portability** all code should be documented in the
      +    normal Go style, have tests, and be portable to different
      +    operating systems and architectures. We'll try to get builders in
      +    place to help run the tests on different OS/arches. For now we
      +    have Travis at least.
      +
      +## Contributing
      +
      +To add code to go4, send a pull request or push a change to Gerrithub.
      +
      +We assume you already have your $GOPATH set and the go4 code cloned at
      +$GOPATH/src/go4.org. For example:
      +
      +* `git clone https://review.gerrithub.io/camlistore/go4 $GOPATH/src/go4.org`
      +
      +### To push a code review to Gerrithub directly:
      +
      +* Sign in to [http://gerrithub.io](http://gerrithub.io "Gerrithub") with your Github account.
      +
      +* Install the git hook that adds the magic "Change-Id" line to your commit messages:
      +
      +  `curl -o $GOPATH/src/go4.org/.git/hooks/commit-msg https://camlistore.googlesource.com/camlistore/+/master/misc/commit-msg.githook`
      +
      +* make changes
      +
      +* commit (the unit of code review is a single commit identified by the Change-ID, **NOT** a series of commits on a branch)
      +
      +* `git push ssh://$YOUR_GITHUB_USERNAME@review.gerrithub.io:29418/camlistore/go4 HEAD:refs/for/master`
      +
      +### Using Github Pull Requests
      +
      +* send a pull request with a single commit
      +
      +* create a Gerrithub code review at https://review.gerrithub.io/plugins/github-plugin/static/pullrequests.html, selecting the pull request you just created.
      +
      +### Problems contributing?
      +
      +* Please file an issue or contact the [Camlistore mailing list](https://groups.google.com/forum/#!forum/camlistore) for any problems with the above.
      +
      +See [https://review.gerrithub.io/Documentation/user-upload.html](https://review.gerrithub.io/Documentation/user-upload.html) for more generic documentation.
      +
      +(TODO: more docs on Gerrit, integrate git-codereview, etc.)
      +
      diff --git a/vendor/go4.org/bytereplacer/bytereplacer.go b/vendor/go4.org/bytereplacer/bytereplacer.go
      new file mode 100644
      index 00000000..3e756fe2
      --- /dev/null
      +++ b/vendor/go4.org/bytereplacer/bytereplacer.go
      @@ -0,0 +1,286 @@
      +/*
      +Copyright 2015 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package bytereplacer provides a utility for replacing parts of byte slices.
      +package bytereplacer // import "go4.org/bytereplacer"
      +
      +import "bytes"
      +
      +// Replacer replaces a list of strings with replacements.
      +// It is safe for concurrent use by multiple goroutines.
      +type Replacer struct {
      +	r replacer
      +}
      +
      +// replacer is the interface that a replacement algorithm needs to implement.
      +type replacer interface {
      +	// Replace performs all replacements, in-place if possible.
      +	Replace(s []byte) []byte
      +}
      +
      +// New returns a new Replacer from a list of old, new string pairs.
      +// Replacements are performed in order, without overlapping matches.
      +func New(oldnew ...string) *Replacer {
      +	if len(oldnew)%2 == 1 {
      +		panic("bytes.NewReplacer: odd argument count")
      +	}
      +
      +	allNewBytes := true
      +	for i := 0; i < len(oldnew); i += 2 {
      +		if len(oldnew[i]) != 1 {
      +			return &Replacer{r: makeGenericReplacer(oldnew)}
      +		}
      +		if len(oldnew[i+1]) != 1 {
      +			allNewBytes = false
      +		}
      +	}
      +
      +	if allNewBytes {
      +		r := byteReplacer{}
      +		for i := range r {
      +			r[i] = byte(i)
      +		}
      +		// The first occurrence of old->new map takes precedence
      +		// over the others with the same old string.
      +		for i := len(oldnew) - 2; i >= 0; i -= 2 {
      +			o := oldnew[i][0]
      +			n := oldnew[i+1][0]
      +			r[o] = n
      +		}
      +		return &Replacer{r: &r}
      +	}
      +
      +	return &Replacer{r: makeGenericReplacer(oldnew)}
      +}
      +
      +// Replace performs all replacements in-place on s. If the capacity
      +// of s is not sufficient, a new slice is allocated, otherwise Replace
      +// returns s.
      +func (r *Replacer) Replace(s []byte) []byte {
      +	return r.r.Replace(s)
      +}
      +
      +type trieNode struct {
      +	value    []byte
      +	priority int
      +	prefix   []byte
      +	next     *trieNode
      +	table    []*trieNode
      +}
      +
      +func (t *trieNode) add(key, val []byte, priority int, r *genericReplacer) {
      +	if len(key) == 0 {
      +		if t.priority == 0 {
      +			t.value = val
      +			t.priority = priority
      +		}
      +		return
      +	}
      +
      +	if len(t.prefix) > 0 {
      +		// Need to split the prefix among multiple nodes.
      +		var n int // length of the longest common prefix
      +		for ; n < len(t.prefix) && n < len(key); n++ {
      +			if t.prefix[n] != key[n] {
      +				break
      +			}
      +		}
      +		if n == len(t.prefix) {
      +			t.next.add(key[n:], val, priority, r)
      +		} else if n == 0 {
      +			// First byte differs, start a new lookup table here. Looking up
      +			// what is currently t.prefix[0] will lead to prefixNode, and
      +			// looking up key[0] will lead to keyNode.
      +			var prefixNode *trieNode
      +			if len(t.prefix) == 1 {
      +				prefixNode = t.next
      +			} else {
      +				prefixNode = &trieNode{
      +					prefix: t.prefix[1:],
      +					next:   t.next,
      +				}
      +			}
      +			keyNode := new(trieNode)
      +			t.table = make([]*trieNode, r.tableSize)
      +			t.table[r.mapping[t.prefix[0]]] = prefixNode
      +			t.table[r.mapping[key[0]]] = keyNode
      +			t.prefix = nil
      +			t.next = nil
      +			keyNode.add(key[1:], val, priority, r)
      +		} else {
      +			// Insert new node after the common section of the prefix.
      +			next := &trieNode{
      +				prefix: t.prefix[n:],
      +				next:   t.next,
      +			}
      +			t.prefix = t.prefix[:n]
      +			t.next = next
      +			next.add(key[n:], val, priority, r)
      +		}
      +	} else if t.table != nil {
      +		// Insert into existing table.
      +		m := r.mapping[key[0]]
      +		if t.table[m] == nil {
      +			t.table[m] = new(trieNode)
      +		}
      +		t.table[m].add(key[1:], val, priority, r)
      +	} else {
      +		t.prefix = key
      +		t.next = new(trieNode)
      +		t.next.add(nil, val, priority, r)
      +	}
      +}
      +
      +func (r *genericReplacer) lookup(s []byte, ignoreRoot bool) (val []byte, keylen int, found bool) {
      +	// Iterate down the trie to the end, and grab the value and keylen with
      +	// the highest priority.
      +	bestPriority := 0
      +	node := &r.root
      +	n := 0
      +	for node != nil {
      +		if node.priority > bestPriority && !(ignoreRoot && node == &r.root) {
      +			bestPriority = node.priority
      +			val = node.value
      +			keylen = n
      +			found = true
      +		}
      +
      +		if len(s) == 0 {
      +			break
      +		}
      +		if node.table != nil {
      +			index := r.mapping[s[0]]
      +			if int(index) == r.tableSize {
      +				break
      +			}
      +			node = node.table[index]
      +			s = s[1:]
      +			n++
      +		} else if len(node.prefix) > 0 && bytes.HasPrefix(s, node.prefix) {
      +			n += len(node.prefix)
      +			s = s[len(node.prefix):]
      +			node = node.next
      +		} else {
      +			break
      +		}
      +	}
      +	return
      +}
      +
      +// genericReplacer is the fully generic algorithm.
      +// It's used as a fallback when nothing faster can be used.
      +type genericReplacer struct {
      +	root trieNode
      +	// tableSize is the size of a trie node's lookup table. It is the number
      +	// of unique key bytes.
      +	tableSize int
      +	// mapping maps from key bytes to a dense index for trieNode.table.
      +	mapping [256]byte
      +}
      +
      +func makeGenericReplacer(oldnew []string) *genericReplacer {
      +	r := new(genericReplacer)
      +	// Find each byte used, then assign them each an index.
      +	for i := 0; i < len(oldnew); i += 2 {
      +		key := oldnew[i]
      +		for j := 0; j < len(key); j++ {
      +			r.mapping[key[j]] = 1
      +		}
      +	}
      +
      +	for _, b := range r.mapping {
      +		r.tableSize += int(b)
      +	}
      +
      +	var index byte
      +	for i, b := range r.mapping {
      +		if b == 0 {
      +			r.mapping[i] = byte(r.tableSize)
      +		} else {
      +			r.mapping[i] = index
      +			index++
      +		}
      +	}
      +	// Ensure root node uses a lookup table (for performance).
      +	r.root.table = make([]*trieNode, r.tableSize)
      +
      +	for i := 0; i < len(oldnew); i += 2 {
      +		r.root.add([]byte(oldnew[i]), []byte(oldnew[i+1]), len(oldnew)-i, r)
      +	}
      +	return r
      +}
      +
      +func (r *genericReplacer) Replace(s []byte) []byte {
      +	var last int
      +	var prevMatchEmpty bool
      +	dst := s[:0]
      +	grown := false
      +	for i := 0; i <= len(s); {
      +		// Fast path: s[i] is not a prefix of any pattern.
      +		if i != len(s) && r.root.priority == 0 {
      +			index := int(r.mapping[s[i]])
      +			if index == r.tableSize || r.root.table[index] == nil {
      +				i++
      +				continue
      +			}
      +		}
      +
      +		// Ignore the empty match iff the previous loop found the empty match.
      +		val, keylen, match := r.lookup(s[i:], prevMatchEmpty)
      +		prevMatchEmpty = match && keylen == 0
      +		if match {
      +			dst = append(dst, s[last:i]...)
      +			if diff := len(val) - keylen; grown || diff < 0 {
      +				dst = append(dst, val...)
      +				i += keylen
      +			} else if diff <= cap(s)-len(s) {
      +				// The replacement is larger than the original, but can still fit in the original buffer.
      +				copy(s[i+len(val):cap(dst)], s[i+keylen:])
      +				dst = append(dst, val...)
      +				s = s[:len(s)+diff]
      +				i += len(val)
      +			} else {
      +				// The output will grow larger than the original buffer.  Allocate a new one.
      +				grown = true
      +				newDst := make([]byte, len(dst), cap(dst)+diff)
      +				copy(newDst, dst)
      +				dst = newDst
      +
      +				dst = append(dst, val...)
      +				i += keylen
      +			}
      +			last = i
      +			continue
      +		}
      +		i++
      +	}
      +	if last != len(s) {
      +		dst = append(dst, s[last:]...)
      +	}
      +	return dst
      +}
      +
      +// byteReplacer is the implementation that's used when all the "old"
      +// and "new" values are single ASCII bytes.
      +// The array contains replacement bytes indexed by old byte.
      +type byteReplacer [256]byte
      +
      +func (r *byteReplacer) Replace(s []byte) []byte {
      +	for i, b := range s {
      +		s[i] = r[b]
      +	}
      +	return s
      +}
      diff --git a/vendor/go4.org/bytereplacer/bytereplacer_test.go b/vendor/go4.org/bytereplacer/bytereplacer_test.go
      new file mode 100644
      index 00000000..6eeda24a
      --- /dev/null
      +++ b/vendor/go4.org/bytereplacer/bytereplacer_test.go
      @@ -0,0 +1,423 @@
      +/*
      +Copyright 2015 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package bytereplacer
      +
      +import (
      +	"bytes"
      +	"strings"
      +	"testing"
      +)
      +
      +var htmlEscaper = New(
      +	"&", "&amp;",
      +	"<", "&lt;",
      +	">", "&gt;",
      +	`"`, "&quot;",
      +	"'", "&apos;",
      +)
      +
      +var htmlUnescaper = New(
      +	"&amp;", "&",
      +	"&lt;", "<",
      +	"&gt;", ">",
      +	"&quot;", `"`,
      +	"&apos;", "'",
      +)
      +
      +var capitalLetters = New("a", "A", "b", "B")
      +
      +func TestReplacer(t *testing.T) {
      +	type testCase struct {
      +		r       *Replacer
      +		in, out string
      +	}
      +	var testCases []testCase
      +
      +	// str converts 0xff to "\xff". This isn't just string(b) since that converts to UTF-8.
      +	str := func(b byte) string {
      +		return string([]byte{b})
      +	}
      +	var s []string
      +
      +	// inc maps "\x00"->"\x01", ..., "a"->"b", "b"->"c", ..., "\xff"->"\x00".
      +	s = nil
      +	for i := 0; i < 256; i++ {
      +		s = append(s, str(byte(i)), str(byte(i+1)))
      +	}
      +	inc := New(s...)
      +
      +	// Test cases with 1-byte old strings, 1-byte new strings.
      +	testCases = append(testCases,
      +		testCase{capitalLetters, "brad", "BrAd"},
      +		testCase{capitalLetters, strings.Repeat("a", (32<<10)+123), strings.Repeat("A", (32<<10)+123)},
      +		testCase{capitalLetters, "", ""},
      +
      +		testCase{inc, "brad", "csbe"},
      +		testCase{inc, "\x00\xff", "\x01\x00"},
      +		testCase{inc, "", ""},
      +
      +		testCase{New("a", "1", "a", "2"), "brad", "br1d"},
      +	)
      +
      +	// repeat maps "a"->"a", "b"->"bb", "c"->"ccc", ...
      +	s = nil
      +	for i := 0; i < 256; i++ {
      +		n := i + 1 - 'a'
      +		if n < 1 {
      +			n = 1
      +		}
      +		s = append(s, str(byte(i)), strings.Repeat(str(byte(i)), n))
      +	}
      +	repeat := New(s...)
      +
      +	// Test cases with 1-byte old strings, variable length new strings.
      +	testCases = append(testCases,
      +		testCase{htmlEscaper, "No changes", "No changes"},
      +		testCase{htmlEscaper, "I <3 escaping & stuff", "I &lt;3 escaping &amp; stuff"},
      +		testCase{htmlEscaper, "&&&", "&amp;&amp;&amp;"},
      +		testCase{htmlEscaper, "", ""},
      +
      +		testCase{repeat, "brad", "bbrrrrrrrrrrrrrrrrrradddd"},
      +		testCase{repeat, "abba", "abbbba"},
      +		testCase{repeat, "", ""},
      +
      +		testCase{New("a", "11", "a", "22"), "brad", "br11d"},
      +	)
      +
      +	// The remaining test cases have variable length old strings.
      +
      +	testCases = append(testCases,
      +		testCase{htmlUnescaper, "&amp;amp;", "&amp;"},
      +		testCase{htmlUnescaper, "&lt;b&gt;HTML&apos;s neat&lt;/b&gt;", "<b>HTML's neat</b>"},
      +		testCase{htmlUnescaper, "", ""},
      +
      +		testCase{New("a", "1", "a", "2", "xxx", "xxx"), "brad", "br1d"},
      +
      +		testCase{New("a", "1", "aa", "2", "aaa", "3"), "aaaa", "1111"},
      +
      +		testCase{New("aaa", "3", "aa", "2", "a", "1"), "aaaa", "31"},
      +	)
      +
      +	// gen1 has multiple old strings of variable length. There is no
      +	// overall non-empty common prefix, but some pairwise common prefixes.
      +	gen1 := New(
      +		"aaa", "3[aaa]",
      +		"aa", "2[aa]",
      +		"a", "1[a]",
      +		"i", "i",
      +		"longerst", "most long",
      +		"longer", "medium",
      +		"long", "short",
      +		"xx", "xx",
      +		"x", "X",
      +		"X", "Y",
      +		"Y", "Z",
      +	)
      +	testCases = append(testCases,
      +		testCase{gen1, "fooaaabar", "foo3[aaa]b1[a]r"},
      +		testCase{gen1, "long, longerst, longer", "short, most long, medium"},
      +		testCase{gen1, "xxxxx", "xxxxX"},
      +		testCase{gen1, "XiX", "YiY"},
      +		testCase{gen1, "", ""},
      +	)
      +
      +	// gen2 has multiple old strings with no pairwise common prefix.
      +	gen2 := New(
      +		"roses", "red",
      +		"violets", "blue",
      +		"sugar", "sweet",
      +	)
      +	testCases = append(testCases,
      +		testCase{gen2, "roses are red, violets are blue...", "red are red, blue are blue..."},
      +		testCase{gen2, "", ""},
      +	)
      +
      +	// gen3 has multiple old strings with an overall common prefix.
      +	gen3 := New(
      +		"abracadabra", "poof",
      +		"abracadabrakazam", "splat",
      +		"abraham", "lincoln",
      +		"abrasion", "scrape",
      +		"abraham", "isaac",
      +	)
      +	testCases = append(testCases,
      +		testCase{gen3, "abracadabrakazam abraham", "poofkazam lincoln"},
      +		testCase{gen3, "abrasion abracad", "scrape abracad"},
      +		testCase{gen3, "abba abram abrasive", "abba abram abrasive"},
      +		testCase{gen3, "", ""},
      +	)
      +
      +	// foo{1,2,3,4} have multiple old strings with an overall common prefix
      +	// and 1- or 2- byte extensions from the common prefix.
      +	foo1 := New(
      +		"foo1", "A",
      +		"foo2", "B",
      +		"foo3", "C",
      +	)
      +	foo2 := New(
      +		"foo1", "A",
      +		"foo2", "B",
      +		"foo31", "C",
      +		"foo32", "D",
      +	)
      +	foo3 := New(
      +		"foo11", "A",
      +		"foo12", "B",
      +		"foo31", "C",
      +		"foo32", "D",
      +	)
      +	foo4 := New(
      +		"foo12", "B",
      +		"foo32", "D",
      +	)
      +	testCases = append(testCases,
      +		testCase{foo1, "fofoofoo12foo32oo", "fofooA2C2oo"},
      +		testCase{foo1, "", ""},
      +
      +		testCase{foo2, "fofoofoo12foo32oo", "fofooA2Doo"},
      +		testCase{foo2, "", ""},
      +
      +		testCase{foo3, "fofoofoo12foo32oo", "fofooBDoo"},
      +		testCase{foo3, "", ""},
      +
      +		testCase{foo4, "fofoofoo12foo32oo", "fofooBDoo"},
      +		testCase{foo4, "", ""},
      +	)
      +
      +	// genAll maps "\x00\x01\x02...\xfe\xff" to "[all]", amongst other things.
      +	allBytes := make([]byte, 256)
      +	for i := range allBytes {
      +		allBytes[i] = byte(i)
      +	}
      +	allString := string(allBytes)
      +	genAll := New(
      +		allString, "[all]",
      +		"\xff", "[ff]",
      +		"\x00", "[00]",
      +	)
      +	testCases = append(testCases,
      +		testCase{genAll, allString, "[all]"},
      +		testCase{genAll, "a\xff" + allString + "\x00", "a[ff][all][00]"},
      +		testCase{genAll, "", ""},
      +	)
      +
      +	// Test cases with empty old strings.
      +
      +	blankToX1 := New("", "X")
      +	blankToX2 := New("", "X", "", "")
      +	blankHighPriority := New("", "X", "o", "O")
      +	blankLowPriority := New("o", "O", "", "X")
      +	blankNoOp1 := New("", "")
      +	blankNoOp2 := New("", "", "", "A")
      +	blankFoo := New("", "X", "foobar", "R", "foobaz", "Z")
      +	testCases = append(testCases,
      +		testCase{blankToX1, "foo", "XfXoXoX"},
      +		testCase{blankToX1, "", "X"},
      +
      +		testCase{blankToX2, "foo", "XfXoXoX"},
      +		testCase{blankToX2, "", "X"},
      +
      +		testCase{blankHighPriority, "oo", "XOXOX"},
      +		testCase{blankHighPriority, "ii", "XiXiX"},
      +		testCase{blankHighPriority, "oiio", "XOXiXiXOX"},
      +		testCase{blankHighPriority, "iooi", "XiXOXOXiX"},
      +		testCase{blankHighPriority, "", "X"},
      +
      +		testCase{blankLowPriority, "oo", "OOX"},
      +		testCase{blankLowPriority, "ii", "XiXiX"},
      +		testCase{blankLowPriority, "oiio", "OXiXiOX"},
      +		testCase{blankLowPriority, "iooi", "XiOOXiX"},
      +		testCase{blankLowPriority, "", "X"},
      +
      +		testCase{blankNoOp1, "foo", "foo"},
      +		testCase{blankNoOp1, "", ""},
      +
      +		testCase{blankNoOp2, "foo", "foo"},
      +		testCase{blankNoOp2, "", ""},
      +
      +		testCase{blankFoo, "foobarfoobaz", "XRXZX"},
      +		testCase{blankFoo, "foobar-foobaz", "XRX-XZX"},
      +		testCase{blankFoo, "", "X"},
      +	)
      +
      +	// single string replacer
      +
      +	abcMatcher := New("abc", "[match]")
      +
      +	testCases = append(testCases,
      +		testCase{abcMatcher, "", ""},
      +		testCase{abcMatcher, "ab", "ab"},
      +		testCase{abcMatcher, "abc", "[match]"},
      +		testCase{abcMatcher, "abcd", "[match]d"},
      +		testCase{abcMatcher, "cabcabcdabca", "c[match][match]d[match]a"},
      +	)
      +
      +	// Issue 6659 cases (more single string replacer)
      +
      +	noHello := New("Hello", "")
      +	testCases = append(testCases,
      +		testCase{noHello, "Hello", ""},
      +		testCase{noHello, "Hellox", "x"},
      +		testCase{noHello, "xHello", "x"},
      +		testCase{noHello, "xHellox", "xx"},
      +	)
      +
      +	// No-arg test cases.
      +
      +	nop := New()
      +	testCases = append(testCases,
      +		testCase{nop, "abc", "abc"},
      +		testCase{nop, "", ""},
      +	)
      +
      +	// Run the test cases.
      +
      +	for i, tc := range testCases {
      +		{
      +			// Replace with len(in) == cap(in)
      +			in := make([]byte, len(tc.in))
      +			copy(in, tc.in)
      +			if s := string(tc.r.Replace(in)); s != tc.out {
      +				t.Errorf("%d. Replace(%q /* len == cap */) = %q, want %q", i, tc.in, s, tc.out)
      +			}
      +		}
      +
      +		{
      +			// Replace with len(in) < cap(in)
      +			in := make([]byte, len(tc.in), len(tc.in)*2)
      +			copy(in, tc.in)
      +			if s := string(tc.r.Replace(in)); s != tc.out {
      +				t.Errorf("%d. Replace(%q /* len < cap */) = %q, want %q", i, tc.in, s, tc.out)
      +			}
      +		}
      +	}
      +}
      +
      +func BenchmarkGenericNoMatch(b *testing.B) {
      +	str := []byte(strings.Repeat("A", 100) + strings.Repeat("B", 100))
      +	generic := New("a", "A", "b", "B", "12", "123") // varying lengths forces generic
      +	for i := 0; i < b.N; i++ {
      +		generic.Replace(str)
      +	}
      +}
      +
      +func BenchmarkGenericMatch1(b *testing.B) {
      +	str := []byte(strings.Repeat("a", 100) + strings.Repeat("b", 100))
      +	generic := New("a", "A", "b", "B", "12", "123")
      +	for i := 0; i < b.N; i++ {
      +		generic.Replace(str)
      +	}
      +}
      +
      +func BenchmarkGenericMatch2(b *testing.B) {
      +	str := bytes.Repeat([]byte("It&apos;s &lt;b&gt;HTML&lt;/b&gt;!"), 100)
      +	for i := 0; i < b.N; i++ {
      +		htmlUnescaper.Replace(str)
      +	}
      +}
      +
      +func benchmarkSingleString(b *testing.B, pattern, text string) {
      +	r := New(pattern, "[match]")
      +	buf := make([]byte, len(text), len(text)*7)
      +	b.SetBytes(int64(len(text)))
      +	b.ResetTimer()
      +	for i := 0; i < b.N; i++ {
      +		copy(buf, text)
      +		r.Replace(buf)
      +	}
      +}
      +
      +func BenchmarkSingleMaxSkipping(b *testing.B) {
      +	benchmarkSingleString(b, strings.Repeat("b", 25), strings.Repeat("a", 10000))
      +}
      +
      +func BenchmarkSingleLongSuffixFail(b *testing.B) {
      +	benchmarkSingleString(b, "b"+strings.Repeat("a", 500), strings.Repeat("a", 1002))
      +}
      +
      +func BenchmarkSingleMatch(b *testing.B) {
      +	benchmarkSingleString(b, "abcdef", strings.Repeat("abcdefghijklmno", 1000))
      +}
      +
      +func benchmarkReplacer(b *testing.B, r *Replacer, str string) {
      +	buf := make([]byte, len(str))
      +	b.ResetTimer()
      +	for i := 0; i < b.N; i++ {
      +		copy(buf, str)
      +		r.Replace(buf)
      +	}
      +}
      +
      +func BenchmarkByteByteNoMatch(b *testing.B) {
      +	benchmarkReplacer(b, capitalLetters, strings.Repeat("A", 100)+strings.Repeat("B", 100))
      +}
      +
      +func BenchmarkByteByteMatch(b *testing.B) {
      +	benchmarkReplacer(b, capitalLetters, strings.Repeat("a", 100)+strings.Repeat("b", 100))
      +}
      +
      +func BenchmarkByteStringMatch(b *testing.B) {
      +	benchmarkReplacer(b, htmlEscaper, "<"+strings.Repeat("a", 99)+strings.Repeat("b", 99)+">")
      +}
      +
      +func BenchmarkHTMLEscapeNew(b *testing.B) {
      +	benchmarkReplacer(b, htmlEscaper, "I <3 to escape HTML & other text too.")
      +}
      +
      +func BenchmarkHTMLEscapeOld(b *testing.B) {
      +	str := "I <3 to escape HTML & other text too."
      +	buf := make([]byte, len(str))
      +	for i := 0; i < b.N; i++ {
      +		copy(buf, str)
      +		oldHTMLEscape(buf)
      +	}
      +}
      +
      +// The http package's old HTML escaping function in bytes form.
      +func oldHTMLEscape(s []byte) []byte {
      +	s = bytes.Replace(s, []byte("&"), []byte("&amp;"), -1)
      +	s = bytes.Replace(s, []byte("<"), []byte("&lt;"), -1)
      +	s = bytes.Replace(s, []byte(">"), []byte("&gt;"), -1)
      +	s = bytes.Replace(s, []byte(`"`), []byte("&quot;"), -1)
      +	s = bytes.Replace(s, []byte("'"), []byte("&apos;"), -1)
      +	return s
      +}
      +
      +// BenchmarkByteByteReplaces compares byteByteImpl against multiple Replaces.
      +func BenchmarkByteByteReplaces(b *testing.B) {
      +	str := strings.Repeat("a", 100) + strings.Repeat("b", 100)
      +	for i := 0; i < b.N; i++ {
      +		bytes.Replace(bytes.Replace([]byte(str), []byte{'a'}, []byte{'A'}, -1), []byte{'b'}, []byte{'B'}, -1)
      +	}
      +}
      +
      +// BenchmarkByteByteMap compares byteByteImpl against Map.
      +func BenchmarkByteByteMap(b *testing.B) {
      +	str := strings.Repeat("a", 100) + strings.Repeat("b", 100)
      +	fn := func(r rune) rune {
      +		switch r {
      +		case 'a':
      +			return 'A'
      +		case 'b':
      +			return 'B'
      +		}
      +		return r
      +	}
      +	for i := 0; i < b.N; i++ {
      +		bytes.Map(fn, []byte(str))
      +	}
      +}
      diff --git a/vendor/go4.org/cloud/cloudlaunch/cloudlaunch.go b/vendor/go4.org/cloud/cloudlaunch/cloudlaunch.go
      new file mode 100644
      index 00000000..d1ee1c6e
      --- /dev/null
      +++ b/vendor/go4.org/cloud/cloudlaunch/cloudlaunch.go
      @@ -0,0 +1,445 @@
      +/*
      +Copyright 2015 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package cloudlaunch helps binaries run themselves on The Cloud, copying
      +// themselves to GCE.
      +package cloudlaunch // import "go4.org/cloud/cloudlaunch"
      +
      +import (
      +	"encoding/json"
      +	"flag"
      +	"fmt"
      +	"io"
      +	"io/ioutil"
      +	"log"
      +	"net/http"
      +	"os"
      +	"path"
      +	"path/filepath"
      +	"runtime"
      +	"strings"
      +	"time"
      +
      +	"go4.org/cloud/google/gceutil"
      +
      +	"golang.org/x/net/context"
      +	"golang.org/x/oauth2"
      +	"golang.org/x/oauth2/google"
      +	compute "google.golang.org/api/compute/v1"
      +	"google.golang.org/api/googleapi"
      +	storageapi "google.golang.org/api/storage/v1"
      +	"google.golang.org/cloud"
      +	"google.golang.org/cloud/storage"
      +)
      +
      +func readFile(v string) string {
      +	slurp, err := ioutil.ReadFile(v)
      +	if err != nil {
      +		log.Fatalf("Error reading %s: %v", v, err)
      +	}
      +	return strings.TrimSpace(string(slurp))
      +}
      +
      +const baseConfig = `#cloud-config
      +coreos:
      +  update:
      +    group: stable
      +    reboot-strategy: off
      +  units:
      +    - name: $NAME.service
      +      command: start
      +      content: |
      +        [Unit]
      +        Description=$NAME service
      +        After=network.target
      +        
      +        [Service]
      +        Type=simple
      +        ExecStartPre=/bin/sh -c 'mkdir -p /opt/bin && /usr/bin/curl --silent -f -o /opt/bin/$NAME $URL?$(date +%s) && chmod +x /opt/bin/$NAME'
      +        ExecStart=/opt/bin/$NAME
      +        RestartSec=10
      +        Restart=always
      +        StartLimitInterval=0
      +        
      +        [Install]
      +        WantedBy=network-online.target
      +`
      +
      +// RestartPolicy controls whether the binary automatically restarts.
      +type RestartPolicy int
      +
      +const (
      +	RestartOnUpdates RestartPolicy = iota
      +	RestartNever
      +	// TODO: more graceful restarts; make systemd own listening on network sockets,
      +	// don't break connections.
      +)
      +
      +type Config struct {
      +	// Name is the name of a service to run.
      +	// This is the name of the systemd service (without .service)
      +	// and the name of the GCE instance.
      +	Name string
      +
      +	// RestartPolicy controls whether the binary automatically restarts
      +	// on updates. The zero value means automatic.
      +	RestartPolicy RestartPolicy
      +
      +	// BinaryBucket and BinaryObject are the GCS bucket and object
      +	// within that bucket containing the Linux binary to download
      +	// on boot and occasionally run. This binary must be public
      +	// (at least for now).
      +	BinaryBucket string
      +	BinaryObject string // defaults to Name
      +
      +	GCEProjectID string
      +	Zone         string // defaults to us-central1-f
      +	SSD          bool
      +
      +	Scopes []string // any additional scopes
      +
      +	MachineType  string
      +	InstanceName string
      +}
      +
      +// cloudLaunch is a launch of a Config.
      +type cloudLaunch struct {
      +	*Config
      +	oauthClient    *http.Client
      +	computeService *compute.Service
      +}
      +
      +func (c *Config) binaryURL() string {
      +	return "https://storage.googleapis.com/" + c.BinaryBucket + "/" + c.binaryObject()
      +}
      +
      +func (c *Config) instName() string     { return c.Name } // for now
      +func (c *Config) zone() string         { return strDefault(c.Zone, "us-central1-f") }
      +func (c *Config) machineType() string  { return strDefault(c.MachineType, "g1-small") }
      +func (c *Config) binaryObject() string { return strDefault(c.BinaryObject, c.Name) }
      +
      +func (c *Config) projectAPIURL() string {
      +	return "https://www.googleapis.com/compute/v1/projects/" + c.GCEProjectID
      +}
      +func (c *Config) machineTypeURL() string {
      +	return c.projectAPIURL() + "/zones/" + c.zone() + "/machineTypes/" + c.machineType()
      +}
      +
      +func strDefault(a, b string) string {
      +	if a != "" {
      +		return a
      +	}
      +	return b
      +}
      +
      +var (
      +	doLaunch = flag.Bool("cloudlaunch", false, "Deploy or update this binary to the cloud. Must be on Linux, for now.")
      +)
      +
      +func (c *Config) MaybeDeploy() {
      +	flag.Parse()
      +	if !*doLaunch {
      +		go c.restartLoop()
      +		return
      +	}
      +	defer os.Exit(1) // backup, in case we return without Fatal or os.Exit later
      +
      +	if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" {
      +		log.Fatal("Can only use --cloudlaunch on linux/amd64, for now.")
      +	}
      +
      +	if c.GCEProjectID == "" {
      +		log.Fatal("cloudconfig.GCEProjectID is empty")
      +	}
      +	filename := filepath.Join(os.Getenv("HOME"), "keys", c.GCEProjectID+".key.json")
      +	log.Printf("Using OAuth config from JSON service file: %s", filename)
      +	jwtConf, err := google.JWTConfigFromJSON([]byte(readFile(filename)), append([]string{
      +		storageapi.DevstorageFullControlScope,
      +		compute.ComputeScope,
      +		"https://www.googleapis.com/auth/cloud-platform",
      +	}, c.Scopes...)...)
      +	if err != nil {
      +		log.Fatalf("ConfigFromJSON: %v", err)
      +	}
      +
      +	cl := &cloudLaunch{
      +		Config:      c,
      +		oauthClient: jwtConf.Client(oauth2.NoContext),
      +	}
      +	cl.computeService, _ = compute.New(cl.oauthClient)
      +
      +	cl.uploadBinary()
      +	cl.createInstance()
      +	os.Exit(0)
      +}
      +
      +func (c *Config) restartLoop() {
      +	if c.RestartPolicy == RestartNever {
      +		return
      +	}
      +	url := "https://storage.googleapis.com/" + c.BinaryBucket + "/" + c.binaryObject()
      +	var lastEtag string
      +	for {
      +		res, err := http.Head(url + "?" + fmt.Sprint(time.Now().Unix()))
      +		if err != nil {
      +			log.Printf("Warning: %v", err)
      +			time.Sleep(15 * time.Second)
      +			continue
      +		}
      +		etag := res.Header.Get("Etag")
      +		if etag == "" {
      +			log.Printf("Warning, no ETag in response: %v", res)
      +			time.Sleep(15 * time.Second)
      +			continue
      +		}
      +		if lastEtag != "" && etag != lastEtag {
      +			log.Printf("Binary updated; restarting.")
      +			// TODO: more graceful restart, letting systemd own the network connections.
      +			// Then we can finish up requests here.
      +			os.Exit(0)
      +		}
      +		lastEtag = etag
      +		time.Sleep(15 * time.Second)
      +	}
      +}
      +
      +// uploadBinary uploads the currently-running Linux binary.
      +// It crashes if it fails.
      +func (cl *cloudLaunch) uploadBinary() {
      +	ctx := context.Background()
      +	if cl.BinaryBucket == "" {
      +		log.Fatal("cloudlaunch: Config.BinaryBucket is empty")
      +	}
      +	stoClient, err := storage.NewClient(ctx, cloud.WithBaseHTTP(cl.oauthClient))
      +	if err != nil {
      +		log.Fatal(err)
      +	}
      +	w := stoClient.Bucket(cl.BinaryBucket).Object(cl.binaryObject()).NewWriter(ctx)
      +	if err != nil {
      +		log.Fatal(err)
      +	}
      +	w.ACL = []storage.ACLRule{
      +		// If you don't give the owners access, the web UI seems to
      +		// have a bug and doesn't have access to see that it's public, so
      +		// won't render the "Shared Publicly" link. So we do that, even
      +		// though it's dumb and unnecessary otherwise:
      +		{
      +			Entity: storage.ACLEntity("project-owners-" + cl.GCEProjectID),
      +			Role:   storage.RoleOwner,
      +		},
      +		// Public, so our systemd unit can get it easily:
      +		{
      +			Entity: storage.AllUsers,
      +			Role:   storage.RoleReader,
      +		},
      +	}
      +	w.CacheControl = "no-cache"
      +	selfPath := getSelfPath()
      +	log.Printf("Uploading %q to %v", selfPath, cl.binaryURL())
      +	f, err := os.Open(selfPath)
      +	if err != nil {
      +		log.Fatal(err)
      +	}
      +	defer f.Close()
      +	n, err := io.Copy(w, f)
      +	if err != nil {
      +		log.Fatal(err)
      +	}
      +	if err := w.Close(); err != nil {
      +		log.Fatal(err)
      +	}
      +	log.Printf("Uploaded %d bytes", n)
      +}
      +
      +func getSelfPath() string {
      +	if runtime.GOOS != "linux" {
      +		panic("TODO")
      +	}
      +	v, err := os.Readlink("/proc/self/exe")
      +	if err != nil {
      +		log.Fatal(err)
      +	}
      +	return v
      +}
      +
      +func zoneInRegion(zone, regionURL string) bool {
      +	if zone == "" {
      +		panic("empty zone")
      +	}
      +	if regionURL == "" {
      +		panic("empty regionURL")
      +	}
      +	// zone is like "us-central1-f"
      +	// regionURL is like "https://www.googleapis.com/compute/v1/projects/camlistore-website/regions/us-central1"
      +	region := path.Base(regionURL) // "us-central1"
      +	if region == "" {
      +		panic("empty region")
      +	}
      +	return strings.HasPrefix(zone, region)
      +}
      +
      +// findIP finds an IP address to use, or returns the empty string if none is found.
      +// It tries to find a reserved one in the same region where the name of the reserved IP
      +// is "NAME-ip" and the IP is not in use.
      +func (cl *cloudLaunch) findIP() string {
      +	// Try to find it by name.
      +	aggAddrList, err := cl.computeService.Addresses.AggregatedList(cl.GCEProjectID).Do()
      +	if err != nil {
      +		log.Fatal(err)
      +	}
      +	// https://godoc.org/google.golang.org/api/compute/v1#AddressAggregatedList
      +	var ip string
      +IPLoop:
      +	for _, asl := range aggAddrList.Items {
      +		for _, addr := range asl.Addresses {
      +			log.Printf("  addr: %#v", addr)
      +			if addr.Name == cl.Name+"-ip" && addr.Status == "RESERVED" && zoneInRegion(cl.zone(), addr.Region) {
      +				ip = addr.Address
      +				break IPLoop
      +			}
      +		}
      +	}
      +	return ip
      +}
      +
      +func (cl *cloudLaunch) createInstance() {
      +	inst := cl.lookupInstance()
      +	if inst != nil {
      +		log.Printf("Instance exists; not re-creating.")
      +		return
      +	}
      +
      +	log.Printf("Instance doesn't exist; creating...")
      +
      +	ip := cl.findIP()
      +	log.Printf("Found IP: %v", ip)
      +
      +	cloudConfig := strings.NewReplacer(
      +		"$NAME", cl.Name,
      +		"$URL", cl.binaryURL(),
      +	).Replace(baseConfig)
      +
      +	instance := &compute.Instance{
      +		Name:        cl.instName(),
      +		Description: cl.Name,
      +		MachineType: cl.machineTypeURL(),
      +		Disks:       []*compute.AttachedDisk{cl.instanceDisk()},
      +		Tags: &compute.Tags{
      +			Items: []string{"http-server", "https-server"},
      +		},
      +		Metadata: &compute.Metadata{
      +			Items: []*compute.MetadataItems{
      +				{
      +					Key:   "user-data",
      +					Value: googleapi.String(cloudConfig),
      +				},
      +			},
      +		},
      +		NetworkInterfaces: []*compute.NetworkInterface{
      +			&compute.NetworkInterface{
      +				AccessConfigs: []*compute.AccessConfig{
      +					&compute.AccessConfig{
      +						Type:  "ONE_TO_ONE_NAT",
      +						Name:  "External NAT",
      +						NatIP: ip,
      +					},
      +				},
      +				Network: cl.projectAPIURL() + "/global/networks/default",
      +			},
      +		},
      +		ServiceAccounts: []*compute.ServiceAccount{
      +			{
      +				Email:  "default",
      +				Scopes: cl.Scopes,
      +			},
      +		},
      +	}
      +
      +	log.Printf("Creating instance...")
      +	op, err := cl.computeService.Instances.Insert(cl.GCEProjectID, cl.zone(), instance).Do()
      +	if err != nil {
      +		log.Fatalf("Failed to create instance: %v", err)
      +	}
      +	opName := op.Name
      +	log.Printf("Created. Waiting on operation %v", opName)
      +OpLoop:
      +	for {
      +		time.Sleep(2 * time.Second)
      +		op, err := cl.computeService.ZoneOperations.Get(cl.GCEProjectID, cl.zone(), opName).Do()
      +		if err != nil {
      +			log.Fatalf("Failed to get op %s: %v", opName, err)
      +		}
      +		switch op.Status {
      +		case "PENDING", "RUNNING":
      +			log.Printf("Waiting on operation %v", opName)
      +			continue
      +		case "DONE":
      +			if op.Error != nil {
      +				for _, operr := range op.Error.Errors {
      +					log.Printf("Error: %+v", operr)
      +				}
      +				log.Fatalf("Failed to start.")
      +			}
      +			log.Printf("Success. %+v", op)
      +			break OpLoop
      +		default:
      +			log.Fatalf("Unknown status %q: %+v", op.Status, op)
      +		}
      +	}
      +
      +	inst, err = cl.computeService.Instances.Get(cl.GCEProjectID, cl.zone(), cl.instName()).Do()
      +	if err != nil {
      +		log.Fatalf("Error getting instance after creation: %v", err)
      +	}
      +	ij, _ := json.MarshalIndent(inst, "", "    ")
      +	log.Printf("%s", ij)
      +	log.Printf("Instance created.")
      +	os.Exit(0)
      +}
      +
      +// returns nil if instance doesn't exist.
      +func (cl *cloudLaunch) lookupInstance() *compute.Instance {
      +	inst, err := cl.computeService.Instances.Get(cl.GCEProjectID, cl.zone(), cl.instName()).Do()
      +	if ae, ok := err.(*googleapi.Error); ok && ae.Code == 404 {
      +		return nil
      +	} else if err != nil {
      +		log.Fatalf("Instances.Get: %v", err)
      +	}
      +	return inst
      +}
      +
      +func (cl *cloudLaunch) instanceDisk() *compute.AttachedDisk {
      +	imageURL, err := gceutil.CoreOSImageURL(cl.oauthClient)
      +	if err != nil {
      +		log.Fatalf("error looking up latest CoreOS stable image: %v", err)
      +	}
      +	diskName := cl.instName() + "-coreos-stateless-pd"
      +	var diskType string
      +	if cl.SSD {
      +		diskType = cl.projectAPIURL() + "/zones/" + cl.zone() + "/diskTypes/pd-ssd"
      +	}
      +	return &compute.AttachedDisk{
      +		AutoDelete: true,
      +		Boot:       true,
      +		Type:       "PERSISTENT",
      +		InitializeParams: &compute.AttachedDiskInitializeParams{
      +			DiskName:    diskName,
      +			SourceImage: imageURL,
      +			DiskSizeGb:  50,
      +			DiskType:    diskType,
      +		},
      +	}
      +}
      diff --git a/vendor/go4.org/cloud/google/gceutil/gceutil.go b/vendor/go4.org/cloud/google/gceutil/gceutil.go
      new file mode 100644
      index 00000000..9e5d39fc
      --- /dev/null
      +++ b/vendor/go4.org/cloud/google/gceutil/gceutil.go
      @@ -0,0 +1,110 @@
      +/*
      +Copyright 2015 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package gceutil provides utility functions to help with instances on
      +// Google Compute Engine.
      +package gceutil // import "go4.org/cloud/google/gceutil"
      +
      +import (
      +	"encoding/json"
      +	"errors"
      +	"net/http"
      +	"strings"
      +	"time"
      +
      +	"google.golang.org/api/compute/v1"
      +)
      +
      +// CoreOSImageURL returns the URL of the latest stable CoreOS image for running on Google Compute Engine.
      +func CoreOSImageURL(cl *http.Client) (string, error) {
      +	resp, err := cl.Get("https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images")
      +	if err != nil {
      +		return "", err
      +	}
      +	defer resp.Body.Close()
      +
      +	type coreOSImage struct {
      +		SelfLink          string
      +		CreationTimestamp time.Time
      +		Name              string
      +	}
      +
      +	type coreOSImageList struct {
      +		Items []coreOSImage
      +	}
      +
      +	imageList := &coreOSImageList{}
      +	if err := json.NewDecoder(resp.Body).Decode(imageList); err != nil {
      +		return "", err
      +	}
      +	if imageList == nil || len(imageList.Items) == 0 {
      +		return "", errors.New("no images list in response")
      +	}
      +
      +	imageURL := ""
      +	var max time.Time // latest stable image creation time
      +	for _, v := range imageList.Items {
      +		if !strings.HasPrefix(v.Name, "coreos-stable") {
      +			continue
      +		}
      +		if v.CreationTimestamp.After(max) {
      +			max = v.CreationTimestamp
      +			imageURL = v.SelfLink
      +		}
      +	}
      +	if imageURL == "" {
      +		return "", errors.New("no stable coreOS image found")
      +	}
      +	return imageURL, nil
      +}
      +
      +// InstanceGroupAndManager contains both an InstanceGroup and
      +// its InstanceGroupManager, if any.
      +type InstanceGroupAndManager struct {
      +	Group *compute.InstanceGroup
      +
      +	// Manager is the manager of the Group. It may be nil.
      +	Manager *compute.InstanceGroupManager
      +}
      +
      +// InstanceGroups returns all the instance groups in a project's zone, along
      +// with their associated InstanceGroupManagers.
      +// The returned map is keyed by the instance group identifier URL.
      +func InstanceGroups(svc *compute.Service, proj, zone string) (map[string]InstanceGroupAndManager, error) {
      +	managerList, err := svc.InstanceGroupManagers.List(proj, zone).Do()
      +	if err != nil {
      +		return nil, err
      +	}
      +	if managerList.NextPageToken != "" {
      +		return nil, errors.New("too many managers; pagination not supported")
      +	}
      +	managedBy := make(map[string]*compute.InstanceGroupManager) // instance group URL -> its manager
      +	for _, it := range managerList.Items {
      +		managedBy[it.InstanceGroup] = it
      +	}
      +	groupList, err := svc.InstanceGroups.List(proj, zone).Do()
      +	if err != nil {
      +		return nil, err
      +	}
      +	if groupList.NextPageToken != "" {
      +		return nil, errors.New("too many instance groups; pagination not supported")
      +	}
      +	ret := make(map[string]InstanceGroupAndManager)
      +	for _, it := range groupList.Items {
      +		ret[it.SelfLink] = InstanceGroupAndManager{it, managedBy[it.SelfLink]}
      +	}
      +	return ret, nil
      +}
      diff --git a/vendor/go4.org/cloud/google/gcsutil/storage.go b/vendor/go4.org/cloud/google/gcsutil/storage.go
      new file mode 100644
      index 00000000..b88771c1
      --- /dev/null
      +++ b/vendor/go4.org/cloud/google/gcsutil/storage.go
      @@ -0,0 +1,180 @@
      +/*
      +Copyright 2015 The Go4 Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package gcsutil provides tools for accessing Google Cloud Storage until they can be
      +// completely replaced by google.golang.org/cloud/storage.
      +package gcsutil // import "go4.org/cloud/google/gcsutil"
      +
      +import (
      +	"encoding/xml"
      +	"errors"
      +	"fmt"
      +	"io"
      +	"net/http"
      +	"net/url"
      +	"os"
      +	"strings"
      +
      +	"go4.org/ctxutil"
      +	"golang.org/x/net/context"
      +	"google.golang.org/cloud/storage"
      +)
      +
      +const gsAccessURL = "https://storage.googleapis.com"
      +
      +// An Object holds the name of an object (its bucket and key) within
      +// Google Cloud Storage.
      +type Object struct {
      +	Bucket string
      +	Key    string
      +}
      +
      +func (o *Object) valid() error {
      +	if o == nil {
      +		return errors.New("invalid nil Object")
      +	}
      +	if o.Bucket == "" {
      +		return errors.New("missing required Bucket field in Object")
      +	}
      +	if o.Key == "" {
      +		return errors.New("missing required Key field in Object")
      +	}
      +	return nil
      +}
      +
      +// A SizedObject holds the bucket, key, and size of an object.
      +type SizedObject struct {
      +	Object
      +	Size int64
      +}
      +
      +func (o *Object) String() string {
      +	if o == nil {
      +		return "<nil *Object>"
      +	}
      +	return fmt.Sprintf("%v/%v", o.Bucket, o.Key)
      +}
      +
      +func (so SizedObject) String() string {
      +	return fmt.Sprintf("%v/%v (%vB)", so.Bucket, so.Key, so.Size)
      +}
      +
      +// Makes a simple body-less google storage request
      +func simpleRequest(method, url_ string) (*http.Request, error) {
      +	req, err := http.NewRequest(method, url_, nil)
      +	if err != nil {
      +		return nil, err
      +	}
      +	req.Header.Set("x-goog-api-version", "2")
      +	return req, err
      +}
      +
      +// ErrInvalidRange is used when the server has returned http.StatusRequestedRangeNotSatisfiable.
      +var ErrInvalidRange = errors.New("gcsutil: requested range not satisfiable")
      +
      +// GetPartialObject fetches part of a Google Cloud Storage object.
      +// This function relies on the ctx ctxutil.HTTPClient value being set to an OAuth2
      +// authorized and authenticated HTTP client.
      +// If length is negative, the rest of the object is returned.
      +// It returns ErrInvalidRange if the server replies with http.StatusRequestedRangeNotSatisfiable.
      +// The caller must call Close on the returned value.
      +func GetPartialObject(ctx context.Context, obj Object, offset, length int64) (io.ReadCloser, error) {
      +	if offset < 0 {
      +		return nil, errors.New("invalid negative offset")
      +	}
      +	if err := obj.valid(); err != nil {
      +		return nil, err
      +	}
      +
      +	req, err := simpleRequest("GET", gsAccessURL+"/"+obj.Bucket+"/"+obj.Key)
      +	if err != nil {
      +		return nil, err
      +	}
      +	if length >= 0 {
      +		req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
      +	} else {
      +		req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
      +	}
      +	req.Cancel = ctx.Done()
      +	res, err := ctxutil.Client(ctx).Do(req)
      +	if err != nil {
      +		return nil, fmt.Errorf("GET (offset=%d, length=%d) failed: %v\n", offset, length, err)
      +	}
      +	if res.StatusCode == http.StatusNotFound {
      +		res.Body.Close()
      +		return nil, os.ErrNotExist
      +	}
      +	if !(res.StatusCode == http.StatusPartialContent || (offset == 0 && res.StatusCode == http.StatusOK)) {
      +		res.Body.Close()
      +		if res.StatusCode == http.StatusRequestedRangeNotSatisfiable {
      +			return nil, ErrInvalidRange
      +		}
      +		return nil, fmt.Errorf("GET (offset=%d, length=%d) got failed status: %v\n", offset, length, res.Status)
      +	}
      +
      +	return res.Body, nil
      +}
      +
      +// EnumerateObjects lists the objects in a bucket.
      +// This function relies on the ctx oauth2.HTTPClient value being set to an OAuth2
      +// authorized and authenticated HTTP client.
      +// If after is non-empty, listing will begin with lexically greater object names.
      +// If limit is non-zero, the length of the list will be limited to that number.
      +func EnumerateObjects(ctx context.Context, bucket, after string, limit int) ([]*storage.ObjectAttrs, error) {
      +	// Build url, with query params
      +	var params []string
      +	if after != "" {
      +		params = append(params, "marker="+url.QueryEscape(after))
      +	}
      +	if limit > 0 {
      +		params = append(params, fmt.Sprintf("max-keys=%v", limit))
      +	}
      +	query := ""
      +	if len(params) > 0 {
      +		query = "?" + strings.Join(params, "&")
      +	}
      +
      +	req, err := simpleRequest("GET", gsAccessURL+"/"+bucket+"/"+query)
      +	if err != nil {
      +		return nil, err
      +	}
      +	req.Cancel = ctx.Done()
      +	res, err := ctxutil.Client(ctx).Do(req)
      +	if err != nil {
      +		return nil, err
      +	}
      +	defer res.Body.Close()
      +	if res.StatusCode != http.StatusOK {
      +		return nil, fmt.Errorf("gcsutil: bad enumerate response code: %v", res.Status)
      +	}
      +
      +	var xres struct {
      +		Contents []SizedObject
      +	}
      +	if err = xml.NewDecoder(res.Body).Decode(&xres); err != nil {
      +		return nil, err
      +	}
      +
      +	objAttrs := make([]*storage.ObjectAttrs, len(xres.Contents))
      +	for k, o := range xres.Contents {
      +		objAttrs[k] = &storage.ObjectAttrs{
      +			Name: o.Key,
      +			Size: o.Size,
      +		}
      +	}
      +
      +	return objAttrs, nil
      +}
      diff --git a/vendor/go4.org/ctxutil/ctxutil.go b/vendor/go4.org/ctxutil/ctxutil.go
      new file mode 100644
      index 00000000..f5842a61
      --- /dev/null
      +++ b/vendor/go4.org/ctxutil/ctxutil.go
      @@ -0,0 +1,44 @@
      +/*
      +Copyright 2015 The Go4 Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package ctxutil contains golang.org/x/net/context related utilities.
      +package ctxutil // import "go4.org/ctxutil"
      +
      +import (
      +	"net/http"
      +
      +	"golang.org/x/net/context"
      +	"golang.org/x/oauth2"
      +)
      +
      +// HTTPClient is the context key to use with golang.org/x/net/context's WithValue function
      +// to associate an *http.Client value with a context.
      +//
      +// We use the same value as the oauth2 package (which first introduced this key) rather
      +// than creating a new one and forcing users to possibly set two.
      +var HTTPClient = oauth2.HTTPClient
      +
      +// Client returns the HTTP client to use for the provided context.
      +// If ctx is non-nil and has an associated HTTP client, that client is returned.
      +// Otherwise, http.DefaultClient is returned.
      +func Client(ctx context.Context) *http.Client {
      +	if ctx != nil {
      +		if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
      +			return hc
      +		}
      +	}
      +	return http.DefaultClient
      +}
      diff --git a/vendor/go4.org/errorutil/highlight.go b/vendor/go4.org/errorutil/highlight.go
      index aace6a46..1b1efb0f 100644
      --- a/vendor/go4.org/errorutil/highlight.go
      +++ b/vendor/go4.org/errorutil/highlight.go
      @@ -15,7 +15,7 @@ limitations under the License.
       */
       
       // Package errorutil helps make better error messages.
      -package errorutil
      +package errorutil // import "go4.org/errorutil"
       
       import (
       	"bufio"
      diff --git a/vendor/go4.org/fault/fault.go b/vendor/go4.org/fault/fault.go
      new file mode 100644
      index 00000000..25cbdc7a
      --- /dev/null
      +++ b/vendor/go4.org/fault/fault.go
      @@ -0,0 +1,59 @@
      +/*
      +Copyright 2014 The Go4 Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package fault handles fault injection for testing.
      +package fault // import "go4.org/fault"
      +
      +import (
      +	"errors"
      +	"math/rand"
      +	"os"
      +	"strconv"
      +	"strings"
      +)
      +
      +var fakeErr = errors.New("fake injected error for testing")
      +
      +// An Injector reports whether fake errors should be returned.
      +type Injector struct {
      +	failPercent int
      +}
      +
      +// NewInjector returns a new fault injector with the given name.  The
      +// environment variable "FAULT_" + capital(name) + "_FAIL_PERCENT"
      +// controls the percentage of requests that fail. If undefined or
      +// zero, no requests fail.
      +func NewInjector(name string) *Injector {
      +	var failPercent, _ = strconv.Atoi(os.Getenv("FAULT_" + strings.ToUpper(name) + "_FAIL_PERCENT"))
      +	return &Injector{
      +		failPercent: failPercent,
      +	}
      +}
      +
      +// ShouldFail reports whether a fake error should be returned.
      +func (in *Injector) ShouldFail() bool {
      +	return in.failPercent > 0 && in.failPercent > rand.Intn(100)
      +}
      +
      +// FailErr checks ShouldFail and, if true, assigns a fake error to err
      +// and returns true.
      +func (in *Injector) FailErr(err *error) bool {
      +	if !in.ShouldFail() {
      +		return false
      +	}
      +	*err = fakeErr
      +	return true
      +}
      diff --git a/vendor/go4.org/jsonconfig/eval.go b/vendor/go4.org/jsonconfig/eval.go
      new file mode 100644
      index 00000000..988597d4
      --- /dev/null
      +++ b/vendor/go4.org/jsonconfig/eval.go
      @@ -0,0 +1,321 @@
      +/*
      +Copyright 2011 The go4 Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package jsonconfig
      +
      +import (
      +	"encoding/json"
      +	"errors"
      +	"fmt"
      +	"io"
      +	"log"
      +	"os"
      +	"path/filepath"
      +	"regexp"
      +	"runtime"
      +	"strconv"
      +	"strings"
      +
      +	"go4.org/errorutil"
      +	"go4.org/wkfs"
      +)
      +
      +type stringVector struct {
      +	v []string
      +}
      +
      +func (v *stringVector) Push(s string) {
      +	v.v = append(v.v, s)
      +}
      +
      +func (v *stringVector) Pop() {
      +	v.v = v.v[:len(v.v)-1]
      +}
      +
      +func (v *stringVector) Last() string {
      +	return v.v[len(v.v)-1]
      +}
      +
      +// A File is the type returned by ConfigParser.Open.
      +type File interface {
      +	io.ReadSeeker
      +	io.Closer
      +	Name() string
      +}
      +
      +// ConfigParser specifies the environment for parsing a config file
      +// and evaluating expressions.
      +type ConfigParser struct {
      +	rootJSON Obj
      +
      +	touchedFiles map[string]bool
      +	includeStack stringVector
      +
      +	// Open optionally specifies an opener function.
      +	Open func(filename string) (File, error)
      +
      +	// IncludeDirs optionally specifies where to find the other config files which are child
      +	// objects of this config, if any. Even if nil, the working directory is always searched
      +	// first.
      +	IncludeDirs []string
      +}
      +
      +func (c *ConfigParser) open(filename string) (File, error) {
      +	if c.Open == nil {
      +		return wkfs.Open(filename)
      +	}
      +	return c.Open(filename)
      +}
      +
      +// Validates variable names for config _env expresssions
      +var envPattern = regexp.MustCompile(`\$\{[A-Za-z0-9_]+\}`)
      +
      +// ReadFile parses the provided path and returns the config file.
      +// If path is empty, the c.Open function must be defined.
      +func (c *ConfigParser) ReadFile(path string) (Obj, error) {
      +	if path == "" && c.Open == nil {
      +		return nil, errors.New("ReadFile of empty string but Open hook not defined")
      +	}
      +	c.touchedFiles = make(map[string]bool)
      +	var err error
      +	c.rootJSON, err = c.recursiveReadJSON(path)
      +	return c.rootJSON, err
      +}
      +
      +// Decodes and evaluates a json config file, watching for include cycles.
      +func (c *ConfigParser) recursiveReadJSON(configPath string) (decodedObject map[string]interface{}, err error) {
      +	if configPath != "" {
      +		absConfigPath, err := filepath.Abs(configPath)
      +		if err != nil {
      +			return nil, fmt.Errorf("Failed to expand absolute path for %s", configPath)
      +		}
      +		if c.touchedFiles[absConfigPath] {
      +			return nil, fmt.Errorf("ConfigParser include cycle detected reading config: %v",
      +				absConfigPath)
      +		}
      +		c.touchedFiles[absConfigPath] = true
      +
      +		c.includeStack.Push(absConfigPath)
      +		defer c.includeStack.Pop()
      +	}
      +
      +	var f File
      +	if f, err = c.open(configPath); err != nil {
      +		return nil, fmt.Errorf("Failed to open config: %v", err)
      +	}
      +	defer f.Close()
      +
      +	decodedObject = make(map[string]interface{})
      +	dj := json.NewDecoder(f)
      +	if err = dj.Decode(&decodedObject); err != nil {
      +		extra := ""
      +		if serr, ok := err.(*json.SyntaxError); ok {
      +			if _, serr := f.Seek(0, os.SEEK_SET); serr != nil {
      +				log.Fatalf("seek error: %v", serr)
      +			}
      +			line, col, highlight := errorutil.HighlightBytePosition(f, serr.Offset)
      +			extra = fmt.Sprintf(":\nError at line %d, column %d (file offset %d):\n%s",
      +				line, col, serr.Offset, highlight)
      +		}
      +		return nil, fmt.Errorf("error parsing JSON object in config file %s%s\n%v",
      +			f.Name(), extra, err)
      +	}
      +
      +	if err = c.evaluateExpressions(decodedObject, nil, false); err != nil {
      +		return nil, fmt.Errorf("error expanding JSON config expressions in %s:\n%v",
      +			f.Name(), err)
      +	}
      +
      +	return decodedObject, nil
      +}
      +
      +var regFunc = map[string]expanderFunc{}
      +
      +// RegisterFunc registers a new function that may be called from JSON
      +// configs using an array of the form ["_name", arg0, argN...].
      +// The provided name must begin with an underscore.
      +func RegisterFunc(name string, fn func(c *ConfigParser, v []interface{}) (interface{}, error)) {
      +	if len(name) < 2 || !strings.HasPrefix(name, "_") {
      +		panic("illegal name")
      +	}
      +	if _, dup := regFunc[name]; dup {
      +		panic("duplicate registration of " + name)
      +	}
      +	regFunc[name] = fn
      +}
      +
      +type expanderFunc func(c *ConfigParser, v []interface{}) (interface{}, error)
      +
      +func namedExpander(name string) (fn expanderFunc, ok bool) {
      +	switch name {
      +	case "_env":
      +		return (*ConfigParser).expandEnv, true
      +	case "_fileobj":
      +		return (*ConfigParser).expandFile, true
      +	}
      +	fn, ok = regFunc[name]
      +	return
      +}
      +
      +func (c *ConfigParser) evalValue(v interface{}) (interface{}, error) {
      +	sl, ok := v.([]interface{})
      +	if !ok {
      +		return v, nil
      +	}
      +	if name, ok := sl[0].(string); ok {
      +		if expander, ok := namedExpander(name); ok {
      +			newval, err := expander(c, sl[1:])
      +			if err != nil {
      +				return nil, err
      +			}
      +			return newval, nil
      +		}
      +	}
      +	for i, oldval := range sl {
      +		newval, err := c.evalValue(oldval)
      +		if err != nil {
      +			return nil, err
      +		}
      +		sl[i] = newval
      +	}
      +	return v, nil
      +}
      +
      +// CheckTypes parses m and returns an error if it encounters a type or value
      +// that is not supported by this package.
      +func (c *ConfigParser) CheckTypes(m map[string]interface{}) error {
      +	return c.evaluateExpressions(m, nil, true)
      +}
      +
      +// evaluateExpressions parses recursively m, populating it with the values
      +// that are found, unless testOnly is true.
      +func (c *ConfigParser) evaluateExpressions(m map[string]interface{}, seenKeys []string, testOnly bool) error {
      +	for k, ei := range m {
      +		thisPath := append(seenKeys, k)
      +		switch subval := ei.(type) {
      +		case string, bool, float64, nil:
      +			continue
      +		case []interface{}:
      +			if len(subval) == 0 {
      +				continue
      +			}
      +			evaled, err := c.evalValue(subval)
      +			if err != nil {
      +				return fmt.Errorf("%s: value error %v", strings.Join(thisPath, "."), err)
      +			}
      +			if !testOnly {
      +				m[k] = evaled
      +			}
      +		case map[string]interface{}:
      +			if err := c.evaluateExpressions(subval, thisPath, testOnly); err != nil {
      +				return err
      +			}
      +		default:
      +			return fmt.Errorf("%s: unhandled type %T", strings.Join(thisPath, "."), ei)
      +		}
      +	}
      +	return nil
      +}
      +
      +// Permit either:
      +//    ["_env", "VARIABLE"] (required to be set)
      +// or ["_env", "VARIABLE", "default_value"]
      +func (c *ConfigParser) expandEnv(v []interface{}) (interface{}, error) {
      +	hasDefault := false
      +	def := ""
      +	if len(v) < 1 || len(v) > 2 {
      +		return "", fmt.Errorf("_env expansion expected 1 or 2 args, got %d", len(v))
      +	}
      +	s, ok := v[0].(string)
      +	if !ok {
      +		return "", fmt.Errorf("Expected a string after _env expansion; got %#v", v[0])
      +	}
      +	boolDefault, wantsBool := false, false
      +	if len(v) == 2 {
      +		hasDefault = true
      +		switch vdef := v[1].(type) {
      +		case string:
      +			def = vdef
      +		case bool:
      +			wantsBool = true
      +			boolDefault = vdef
      +		default:
      +			return "", fmt.Errorf("Expected default value in %q _env expansion; got %#v", s, v[1])
      +		}
      +	}
      +	var err error
      +	expanded := envPattern.ReplaceAllStringFunc(s, func(match string) string {
      +		envVar := match[2 : len(match)-1]
      +		val := os.Getenv(envVar)
      +		// Special case:
      +		if val == "" && envVar == "USER" && runtime.GOOS == "windows" {
      +			val = os.Getenv("USERNAME")
      +		}
      +		if val == "" {
      +			if hasDefault {
      +				return def
      +			}
      +			err = fmt.Errorf("couldn't expand environment variable %q", envVar)
      +		}
      +		return val
      +	})
      +	if wantsBool {
      +		if expanded == "" {
      +			return boolDefault, nil
      +		}
      +		return strconv.ParseBool(expanded)
      +	}
      +	return expanded, err
      +}
      +
      +func (c *ConfigParser) expandFile(v []interface{}) (exp interface{}, err error) {
      +	if len(v) != 1 {
      +		return "", fmt.Errorf("_file expansion expected 1 arg, got %d", len(v))
      +	}
      +	var incPath string
      +	if incPath, err = c.ConfigFilePath(v[0].(string)); err != nil {
      +		return "", fmt.Errorf("Included config does not exist: %v", v[0])
      +	}
      +	if exp, err = c.recursiveReadJSON(incPath); err != nil {
      +		return "", fmt.Errorf("In file included from %s:\n%v",
      +			c.includeStack.Last(), err)
      +	}
      +	return exp, nil
      +}
      +
      +// ConfigFilePath checks if configFile is found and returns a usable path to it.
      +// It first checks if configFile is an absolute path, or if it's found in the
      +// current working directory. If not, it then checks if configFile is in one of
      +// c.IncludeDirs. It returns an error if configFile is absolute and could not be
      +// statted, or os.ErrNotExist if configFile was not found.
      +func (c *ConfigParser) ConfigFilePath(configFile string) (path string, err error) {
      +	// Try to open as absolute / relative to CWD
      +	_, err = os.Stat(configFile)
      +	if err != nil && filepath.IsAbs(configFile) {
      +		return "", err
      +	}
      +	if err == nil {
      +		return configFile, nil
      +	}
      +
      +	for _, d := range c.IncludeDirs {
      +		if _, err := os.Stat(filepath.Join(d, configFile)); err == nil {
      +			return filepath.Join(d, configFile), nil
      +		}
      +	}
      +
      +	return "", os.ErrNotExist
      +}
      diff --git a/vendor/go4.org/jsonconfig/jsonconfig.go b/vendor/go4.org/jsonconfig/jsonconfig.go
      new file mode 100644
      index 00000000..386dda87
      --- /dev/null
      +++ b/vendor/go4.org/jsonconfig/jsonconfig.go
      @@ -0,0 +1,297 @@
      +/*
      +Copyright 2011 The go4 Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package jsonconfig defines a helper type for JSON objects to be
      +// used for configuration.
      +package jsonconfig // import "go4.org/jsonconfig"
      +
      +import (
      +	"fmt"
      +	"sort"
      +	"strconv"
      +	"strings"
      +)
      +
      +// Obj is a JSON configuration map.
      +type Obj map[string]interface{}
      +
      +// ReadFile reads JSON config data from the specified open file, expanding
      +// all expressions. Use *ConfigParser.ReadFile instead if you
      +// need to set c.IncludeDirs.
      +func ReadFile(configPath string) (Obj, error) {
      +	var c ConfigParser
      +	return c.ReadFile(configPath)
      +}
      +
      +func (jc Obj) RequiredObject(key string) Obj {
      +	return jc.obj(key, false)
      +}
      +
      +func (jc Obj) OptionalObject(key string) Obj {
      +	return jc.obj(key, true)
      +}
      +
      +func (jc Obj) obj(key string, optional bool) Obj {
      +	jc.noteKnownKey(key)
      +	ei, ok := jc[key]
      +	if !ok {
      +		if optional {
      +			return make(Obj)
      +		}
      +		jc.appendError(fmt.Errorf("Missing required config key %q (object)", key))
      +		return make(Obj)
      +	}
      +	m, ok := ei.(map[string]interface{})
      +	if !ok {
      +		jc.appendError(fmt.Errorf("Expected config key %q to be an object, not %T", key, ei))
      +		return make(Obj)
      +	}
      +	return m
      +}
      +
      +func (jc Obj) RequiredString(key string) string {
      +	return jc.string(key, nil)
      +}
      +
      +func (jc Obj) OptionalString(key, def string) string {
      +	return jc.string(key, &def)
      +}
      +
      +func (jc Obj) string(key string, def *string) string {
      +	jc.noteKnownKey(key)
      +	ei, ok := jc[key]
      +	if !ok {
      +		if def != nil {
      +			return *def
      +		}
      +		jc.appendError(fmt.Errorf("Missing required config key %q (string)", key))
      +		return ""
      +	}
      +	s, ok := ei.(string)
      +	if !ok {
      +		jc.appendError(fmt.Errorf("Expected config key %q to be a string", key))
      +		return ""
      +	}
      +	return s
      +}
      +
      +func (jc Obj) RequiredStringOrObject(key string) interface{} {
      +	return jc.stringOrObject(key, true)
      +}
      +
      +func (jc Obj) OptionalStringOrObject(key string) interface{} {
      +	return jc.stringOrObject(key, false)
      +}
      +
      +func (jc Obj) stringOrObject(key string, required bool) interface{} {
      +	jc.noteKnownKey(key)
      +	ei, ok := jc[key]
      +	if !ok {
      +		if !required {
      +			return nil
      +		}
      +		jc.appendError(fmt.Errorf("Missing required config key %q (string or object)", key))
      +		return ""
      +	}
      +	if _, ok := ei.(map[string]interface{}); ok {
      +		return ei
      +	}
      +	if _, ok := ei.(string); ok {
      +		return ei
      +	}
      +	jc.appendError(fmt.Errorf("Expected config key %q to be a string or object", key))
      +	return ""
      +}
      +
      +func (jc Obj) RequiredBool(key string) bool {
      +	return jc.bool(key, nil)
      +}
      +
      +func (jc Obj) OptionalBool(key string, def bool) bool {
      +	return jc.bool(key, &def)
      +}
      +
      +func (jc Obj) bool(key string, def *bool) bool {
      +	jc.noteKnownKey(key)
      +	ei, ok := jc[key]
      +	if !ok {
      +		if def != nil {
      +			return *def
      +		}
      +		jc.appendError(fmt.Errorf("Missing required config key %q (boolean)", key))
      +		return false
      +	}
      +	switch v := ei.(type) {
      +	case bool:
      +		return v
      +	case string:
      +		b, err := strconv.ParseBool(v)
      +		if err != nil {
      +			jc.appendError(fmt.Errorf("Config key %q has bad boolean format %q", key, v))
      +		}
      +		return b
      +	default:
      +		jc.appendError(fmt.Errorf("Expected config key %q to be a boolean", key))
      +		return false
      +	}
      +}
      +
      +func (jc Obj) RequiredInt(key string) int {
      +	return jc.int(key, nil)
      +}
      +
      +func (jc Obj) OptionalInt(key string, def int) int {
      +	return jc.int(key, &def)
      +}
      +
      +func (jc Obj) int(key string, def *int) int {
      +	jc.noteKnownKey(key)
      +	ei, ok := jc[key]
      +	if !ok {
      +		if def != nil {
      +			return *def
      +		}
      +		jc.appendError(fmt.Errorf("Missing required config key %q (integer)", key))
      +		return 0
      +	}
      +	b, ok := ei.(float64)
      +	if !ok {
      +		jc.appendError(fmt.Errorf("Expected config key %q to be a number", key))
      +		return 0
      +	}
      +	return int(b)
      +}
      +
      +func (jc Obj) RequiredInt64(key string) int64 {
      +	return jc.int64(key, nil)
      +}
      +
      +func (jc Obj) OptionalInt64(key string, def int64) int64 {
      +	return jc.int64(key, &def)
      +}
      +
      +func (jc Obj) int64(key string, def *int64) int64 {
      +	jc.noteKnownKey(key)
      +	ei, ok := jc[key]
      +	if !ok {
      +		if def != nil {
      +			return *def
      +		}
      +		jc.appendError(fmt.Errorf("Missing required config key %q (integer)", key))
      +		return 0
      +	}
      +	b, ok := ei.(float64)
      +	if !ok {
      +		jc.appendError(fmt.Errorf("Expected config key %q to be a number", key))
      +		return 0
      +	}
      +	return int64(b)
      +}
      +
      +func (jc Obj) RequiredList(key string) []string {
      +	return jc.requiredList(key, true)
      +}
      +
      +func (jc Obj) OptionalList(key string) []string {
      +	return jc.requiredList(key, false)
      +}
      +
      +func (jc Obj) requiredList(key string, required bool) []string {
      +	jc.noteKnownKey(key)
      +	ei, ok := jc[key]
      +	if !ok {
      +		if required {
      +			jc.appendError(fmt.Errorf("Missing required config key %q (list of strings)", key))
      +		}
      +		return nil
      +	}
      +	eil, ok := ei.([]interface{})
      +	if !ok {
      +		jc.appendError(fmt.Errorf("Expected config key %q to be a list, not %T", key, ei))
      +		return nil
      +	}
      +	sl := make([]string, len(eil))
      +	for i, ei := range eil {
      +		s, ok := ei.(string)
      +		if !ok {
      +			jc.appendError(fmt.Errorf("Expected config key %q index %d to be a string, not %T", key, i, ei))
      +			return nil
      +		}
      +		sl[i] = s
      +	}
      +	return sl
      +}
      +
      +func (jc Obj) noteKnownKey(key string) {
      +	_, ok := jc["_knownkeys"]
      +	if !ok {
      +		jc["_knownkeys"] = make(map[string]bool)
      +	}
      +	jc["_knownkeys"].(map[string]bool)[key] = true
      +}
      +
      +func (jc Obj) appendError(err error) {
      +	ei, ok := jc["_errors"]
      +	if ok {
      +		jc["_errors"] = append(ei.([]error), err)
      +	} else {
      +		jc["_errors"] = []error{err}
      +	}
      +}
      +
      +// UnknownKeys returns the keys from the config that have not yet been discovered by one of the RequiredT or OptionalT calls.
      +func (jc Obj) UnknownKeys() []string {
      +	ei, ok := jc["_knownkeys"]
      +	var known map[string]bool
      +	if ok {
      +		known = ei.(map[string]bool)
      +	}
      +	var unknown []string
      +	for k, _ := range jc {
      +		if ok && known[k] {
      +			continue
      +		}
      +		if strings.HasPrefix(k, "_") {
      +			// Permit keys with a leading underscore as a
      +			// form of comments.
      +			continue
      +		}
      +		unknown = append(unknown, k)
      +	}
      +	sort.Strings(unknown)
      +	return unknown
      +}
      +
      +func (jc Obj) Validate() error {
      +	unknown := jc.UnknownKeys()
      +	for _, k := range unknown {
      +		jc.appendError(fmt.Errorf("Unknown key %q", k))
      +	}
      +
      +	ei, ok := jc["_errors"]
      +	if !ok {
      +		return nil
      +	}
      +	errList := ei.([]error)
      +	if len(errList) == 1 {
      +		return errList[0]
      +	}
      +	strs := make([]string, 0)
      +	for _, v := range errList {
      +		strs = append(strs, v.Error())
      +	}
      +	return fmt.Errorf("Multiple errors: " + strings.Join(strs, ", "))
      +}
      diff --git a/vendor/go4.org/jsonconfig/jsonconfig_test.go b/vendor/go4.org/jsonconfig/jsonconfig_test.go
      new file mode 100644
      index 00000000..8d2a1bd6
      --- /dev/null
      +++ b/vendor/go4.org/jsonconfig/jsonconfig_test.go
      @@ -0,0 +1,114 @@
      +/*
      +Copyright 2011 The go4 Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package jsonconfig
      +
      +import (
      +	"os"
      +	"reflect"
      +	"strings"
      +	"testing"
      +)
      +
      +func testIncludes(configFile string, t *testing.T) {
      +	var c ConfigParser
      +	c.IncludeDirs = []string{"testdata"}
      +	obj, err := c.ReadFile(configFile)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	two := obj.RequiredObject("two")
      +	if err := obj.Validate(); err != nil {
      +		t.Error(err)
      +	}
      +	if g, e := two.RequiredString("key"), "value"; g != e {
      +		t.Errorf("sub object key = %q; want %q", g, e)
      +	}
      +}
      +
      +func TestIncludesCWD(t *testing.T) {
      +	testIncludes("testdata/include1.json", t)
      +}
      +
      +func TestIncludesIncludeDirs(t *testing.T) {
      +	testIncludes("testdata/include1bis.json", t)
      +}
      +
      +func TestIncludeLoop(t *testing.T) {
      +	_, err := ReadFile("testdata/loop1.json")
      +	if err == nil {
      +		t.Fatal("expected an error about import cycles.")
      +	}
      +	if !strings.Contains(err.Error(), "include cycle detected") {
      +		t.Fatalf("expected an error about import cycles; got: %v", err)
      +	}
      +}
      +
      +func TestBoolEnvs(t *testing.T) {
      +	os.Setenv("TEST_EMPTY", "")
      +	os.Setenv("TEST_TRUE", "true")
      +	os.Setenv("TEST_ONE", "1")
      +	os.Setenv("TEST_ZERO", "0")
      +	os.Setenv("TEST_FALSE", "false")
      +	obj, err := ReadFile("testdata/boolenv.json")
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	if str := obj.RequiredString("emptystr"); str != "" {
      +		t.Errorf("str = %q, want empty", str)
      +	}
      +	tests := []struct {
      +		key  string
      +		want bool
      +	}{
      +		{"def_false", false},
      +		{"def_true", true},
      +		{"set_true_def_false", true},
      +		{"set_false_def_true", false},
      +		{"lit_true", true},
      +		{"lit_false", false},
      +		{"one", true},
      +		{"zero", false},
      +	}
      +	for _, tt := range tests {
      +		if v := obj.RequiredBool(tt.key); v != tt.want {
      +			t.Errorf("key %q = %v; want %v", tt.key, v, tt.want)
      +		}
      +	}
      +	if err := obj.Validate(); err != nil {
      +		t.Error(err)
      +	}
      +}
      +
      +func TestListExpansion(t *testing.T) {
      +	os.Setenv("TEST_BAR", "bar")
      +	obj, err := ReadFile("testdata/listexpand.json")
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	s := obj.RequiredString("str")
      +	l := obj.RequiredList("list")
      +	if err := obj.Validate(); err != nil {
      +		t.Error(err)
      +	}
      +	want := []string{"foo", "bar"}
      +	if !reflect.DeepEqual(l, want) {
      +		t.Errorf("got = %#v\nwant = %#v", l, want)
      +	}
      +	if s != "bar" {
      +		t.Errorf("str = %q, want %q", s, "bar")
      +	}
      +}
      diff --git a/vendor/go4.org/jsonconfig/testdata/boolenv.json b/vendor/go4.org/jsonconfig/testdata/boolenv.json
      new file mode 100644
      index 00000000..fe9431eb
      --- /dev/null
      +++ b/vendor/go4.org/jsonconfig/testdata/boolenv.json
      @@ -0,0 +1,11 @@
      +{
      + "emptystr": ["_env", "${TEST_EMPTY}", ""],
      + "def_false": ["_env", "${TEST_EMPTY}", false],
      + "def_true": ["_env", "${TEST_EMPTY}", true],
      + "set_true_def_false": ["_env", "${TEST_TRUE}", false],
      + "set_false_def_true": ["_env", "${TEST_FALSE}", true],
      + "one": ["_env", "${TEST_ONE}"],
      + "zero": ["_env", "${TEST_ZERO}"],
      + "lit_true": true,
      + "lit_false": false
      +}
      diff --git a/vendor/go4.org/jsonconfig/testdata/include1.json b/vendor/go4.org/jsonconfig/testdata/include1.json
      new file mode 100644
      index 00000000..6d8b38e9
      --- /dev/null
      +++ b/vendor/go4.org/jsonconfig/testdata/include1.json
      @@ -0,0 +1,3 @@
      +{
      +  "two": ["_fileobj", "testdata/include2.json"]
      +}
      diff --git a/vendor/go4.org/jsonconfig/testdata/include1bis.json b/vendor/go4.org/jsonconfig/testdata/include1bis.json
      new file mode 100644
      index 00000000..8459f8ee
      --- /dev/null
      +++ b/vendor/go4.org/jsonconfig/testdata/include1bis.json
      @@ -0,0 +1,3 @@
      +{
      +  "two": ["_fileobj", "include2.json"]
      +}
      diff --git a/vendor/go4.org/jsonconfig/testdata/include2.json b/vendor/go4.org/jsonconfig/testdata/include2.json
      new file mode 100644
      index 00000000..7a9e8644
      --- /dev/null
      +++ b/vendor/go4.org/jsonconfig/testdata/include2.json
      @@ -0,0 +1,3 @@
      +{
      +  "key": "value"
      +}
      diff --git a/vendor/go4.org/jsonconfig/testdata/listexpand.json b/vendor/go4.org/jsonconfig/testdata/listexpand.json
      new file mode 100644
      index 00000000..ccabceff
      --- /dev/null
      +++ b/vendor/go4.org/jsonconfig/testdata/listexpand.json
      @@ -0,0 +1,4 @@
      +{
      +  "list": ["foo", ["_env", "${TEST_BAR}"]],
      +  "str": ["_env", "${TEST_BAR}"]
      +}
      diff --git a/vendor/go4.org/jsonconfig/testdata/loop1.json b/vendor/go4.org/jsonconfig/testdata/loop1.json
      new file mode 100644
      index 00000000..215146fd
      --- /dev/null
      +++ b/vendor/go4.org/jsonconfig/testdata/loop1.json
      @@ -0,0 +1,3 @@
      +{
      +  "obj": ["_fileobj", "testdata/loop2.json"]
      +}
      diff --git a/vendor/go4.org/jsonconfig/testdata/loop2.json b/vendor/go4.org/jsonconfig/testdata/loop2.json
      new file mode 100644
      index 00000000..1d270eb4
      --- /dev/null
      +++ b/vendor/go4.org/jsonconfig/testdata/loop2.json
      @@ -0,0 +1,3 @@
      +{
      +  "obj": ["_fileobj", "testdata/loop1.json"]
      +}
      diff --git a/vendor/go4.org/legal/legal.go b/vendor/go4.org/legal/legal.go
      new file mode 100644
      index 00000000..954b143d
      --- /dev/null
      +++ b/vendor/go4.org/legal/legal.go
      @@ -0,0 +1,32 @@
      +/*
      +Copyright 2014 The Go4 Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package legal provides in-process storage for compiled-in licenses.
      +package legal // import "go4.org/legal"
      +
      +var licenses []string
      +
      +// RegisterLicense stores the license text.
      +// It doesn't check whether the text was already present.
      +func RegisterLicense(text string) {
      +	licenses = append(licenses, text)
      +	return
      +}
      +
      +// Licenses returns a slice of the licenses.
      +func Licenses() []string {
      +	return licenses
      +}
      diff --git a/vendor/go4.org/legal/legal_test.go b/vendor/go4.org/legal/legal_test.go
      new file mode 100644
      index 00000000..0c65e9a0
      --- /dev/null
      +++ b/vendor/go4.org/legal/legal_test.go
      @@ -0,0 +1,29 @@
      +/*
      +Copyright 2014 The Go4 Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package legal
      +
      +import (
      +	"testing"
      +)
      +
      +func TestRegisterLicense(t *testing.T) {
      +	initial := len(licenses)
      +	RegisterLicense("dummy")
      +	if initial+1 != len(licenses) {
      +		t.Fatal("didn't add a license")
      +	}
      +}
      diff --git a/vendor/go4.org/lock/.gitignore b/vendor/go4.org/lock/.gitignore
      new file mode 100644
      index 00000000..b25c15b8
      --- /dev/null
      +++ b/vendor/go4.org/lock/.gitignore
      @@ -0,0 +1 @@
      +*~
      diff --git a/vendor/go4.org/lock/lock.go b/vendor/go4.org/lock/lock.go
      new file mode 100644
      index 00000000..3e253628
      --- /dev/null
      +++ b/vendor/go4.org/lock/lock.go
      @@ -0,0 +1,186 @@
      +/*
      +Copyright 2013 The Go Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package lock is a file locking library.
      +package lock // import "go4.org/lock"
      +
      +import (
      +	"encoding/json"
      +	"fmt"
      +	"io"
      +	"os"
      +	"path/filepath"
      +	"sync"
      +)
      +
      +// Lock locks the given file, creating the file if necessary. If the
      +// file already exists, it must have zero size or an error is returned.
      +// The lock is an exclusive lock (a write lock), but locked files
      +// should neither be read from nor written to. Such files should have
      +// zero size and only exist to co-ordinate ownership across processes.
      +//
      +// A nil Closer is returned if an error occurred. Otherwise, close that
      +// Closer to release the lock.
      +//
      +// On Linux, FreeBSD and OSX, a lock has the same semantics as fcntl(2)'s
      +// advisory locks.  In particular, closing any other file descriptor for the
      +// same file will release the lock prematurely.
      +//
      +// Attempting to lock a file that is already locked by the current process
      +// has undefined behavior.
      +//
      +// On other operating systems, lock will fallback to using the presence and
      +// content of a file named name + '.lock' to implement locking behavior.
      +func Lock(name string) (io.Closer, error) {
      +	abs, err := filepath.Abs(name)
      +	if err != nil {
      +		return nil, err
      +	}
      +	lockmu.Lock()
      +	defer lockmu.Unlock()
      +	if locked[abs] {
      +		return nil, fmt.Errorf("file %q already locked", abs)
      +	}
      +
      +	c, err := lockFn(abs)
      +	if err != nil {
      +		return nil, fmt.Errorf("cannot acquire lock: %v", err)
      +	}
      +	locked[abs] = true
      +	return c, nil
      +}
      +
      +var lockFn = lockPortable
      +
      +// lockPortable is a portable version not using fcntl. Doesn't handle crashes as gracefully,
      +// since it can leave stale lock files.
      +func lockPortable(name string) (io.Closer, error) {
      +	fi, err := os.Stat(name)
      +	if err == nil && fi.Size() > 0 {
      +		st := portableLockStatus(name)
      +		switch st {
      +		case statusLocked:
      +			return nil, fmt.Errorf("file %q already locked", name)
      +		case statusStale:
      +			os.Remove(name)
      +		case statusInvalid:
      +			return nil, fmt.Errorf("can't Lock file %q: has invalid contents", name)
      +		}
      +	}
      +	f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_EXCL, 0666)
      +	if err != nil {
      +		return nil, fmt.Errorf("failed to create lock file %s %v", name, err)
      +	}
      +	if err := json.NewEncoder(f).Encode(&pidLockMeta{OwnerPID: os.Getpid()}); err != nil {
      +		return nil, fmt.Errorf("cannot write owner pid: %v", err)
      +	}
      +	return &unlocker{
      +		f:        f,
      +		abs:      name,
      +		portable: true,
      +	}, nil
      +}
      +
      +type lockStatus int
      +
      +const (
      +	statusInvalid lockStatus = iota
      +	statusLocked
      +	statusUnlocked
      +	statusStale
      +)
      +
      +type pidLockMeta struct {
      +	OwnerPID int
      +}
      +
      +func portableLockStatus(path string) lockStatus {
      +	f, err := os.Open(path)
      +	if err != nil {
      +		return statusUnlocked
      +	}
      +	defer f.Close()
      +	var meta pidLockMeta
      +	if json.NewDecoder(f).Decode(&meta) != nil {
      +		return statusInvalid
      +	}
      +	if meta.OwnerPID == 0 {
      +		return statusInvalid
      +	}
      +	p, err := os.FindProcess(meta.OwnerPID)
      +	if err != nil {
      +		// e.g. on Windows
      +		return statusStale
      +	}
      +	// On unix, os.FindProcess always is true, so we have to send
      +	// it a signal to see if it's alive.
      +	if signalZero != nil {
      +		if p.Signal(signalZero) != nil {
      +			return statusStale
      +		}
      +	}
      +	return statusLocked
      +}
      +
      +var signalZero os.Signal // nil or set by lock_sigzero.go
      +
      +var (
      +	lockmu sync.Mutex
      +	locked = map[string]bool{} // abs path -> true
      +)
      +
      +type unlocker struct {
      +	portable bool
      +	f        *os.File
      +	abs      string
      +	// once guards the close method call.
      +	once sync.Once
      +	// err holds the error returned by Close.
      +	err error
      +}
      +
      +func (u *unlocker) Close() error {
      +	u.once.Do(u.close)
      +	return u.err
      +}
      +
      +func (u *unlocker) close() {
      +	lockmu.Lock()
      +	defer lockmu.Unlock()
      +	delete(locked, u.abs)
      +
      +	if u.portable {
      +		// In the portable lock implementation, it's
      +		// important to close before removing because
      +		// Windows won't allow us to remove an open
      +		// file.
      +		if err := u.f.Close(); err != nil {
      +			u.err = err
      +		}
      +		if err := os.Remove(u.abs); err != nil {
      +			// Note that if both Close and Remove fail,
      +			// we care more about the latter than the former
      +			// so we'll return that error.
      +			u.err = err
      +		}
      +		return
      +	}
      +	// In other implementatioons, it's nice for us to clean up.
      +	// If we do do this, though, it needs to be before the
      +	// u.f.Close below.
      +	os.Remove(u.abs)
      +	u.err = u.f.Close()
      +}
      diff --git a/vendor/go4.org/lock/lock_appengine.go b/vendor/go4.org/lock/lock_appengine.go
      new file mode 100644
      index 00000000..ab4cad6a
      --- /dev/null
      +++ b/vendor/go4.org/lock/lock_appengine.go
      @@ -0,0 +1,32 @@
      +// +build appengine
      +
      +/*
      +Copyright 2013 The Go Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package lock
      +
      +import (
      +	"errors"
      +	"io"
      +)
      +
      +func init() {
      +	lockFn = lockAppEngine
      +}
      +
      +func lockAppEngine(name string) (io.Closer, error) {
      +	return nil, errors.New("Lock not available on App Engine")
      +}
      diff --git a/vendor/go4.org/lock/lock_darwin_amd64.go b/vendor/go4.org/lock/lock_darwin_amd64.go
      new file mode 100644
      index 00000000..35f5787b
      --- /dev/null
      +++ b/vendor/go4.org/lock/lock_darwin_amd64.go
      @@ -0,0 +1,67 @@
      +// +build darwin,amd64
      +// +build !appengine
      +
      +/*
      +Copyright 2013 The Go Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package lock
      +
      +import (
      +	"fmt"
      +	"io"
      +	"os"
      +	"syscall"
      +	"unsafe"
      +)
      +
      +func init() {
      +	lockFn = lockFcntl
      +}
      +
      +func lockFcntl(name string) (io.Closer, error) {
      +	fi, err := os.Stat(name)
      +	if err == nil && fi.Size() > 0 {
      +		return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
      +	}
      +
      +	f, err := os.Create(name)
      +	if err != nil {
      +		return nil, fmt.Errorf("Lock Create of %s failed: %v", name, err)
      +	}
      +
      +	// This type matches C's "struct flock" defined in /usr/include/sys/fcntl.h.
      +	// TODO: move this into the standard syscall package.
      +	k := struct {
      +		Start  uint64 // sizeof(off_t): 8
      +		Len    uint64 // sizeof(off_t): 8
      +		Pid    uint32 // sizeof(pid_t): 4
      +		Type   uint16 // sizeof(short): 2
      +		Whence uint16 // sizeof(short): 2
      +	}{
      +		Type:   syscall.F_WRLCK,
      +		Whence: uint16(os.SEEK_SET),
      +		Start:  0,
      +		Len:    0, // 0 means to lock the entire file.
      +		Pid:    uint32(os.Getpid()),
      +	}
      +
      +	_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k)))
      +	if errno != 0 {
      +		f.Close()
      +		return nil, errno
      +	}
      +	return &unlocker{f: f, abs: name}, nil
      +}
      diff --git a/vendor/go4.org/lock/lock_freebsd.go b/vendor/go4.org/lock/lock_freebsd.go
      new file mode 100644
      index 00000000..ee2767a0
      --- /dev/null
      +++ b/vendor/go4.org/lock/lock_freebsd.go
      @@ -0,0 +1,66 @@
      +/*
      +Copyright 2013 The Go Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package lock
      +
      +import (
      +	"fmt"
      +	"io"
      +	"os"
      +	"syscall"
      +	"unsafe"
      +)
      +
      +func init() {
      +	lockFn = lockFcntl
      +}
      +
      +func lockFcntl(name string) (io.Closer, error) {
      +	fi, err := os.Stat(name)
      +	if err == nil && fi.Size() > 0 {
      +		return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
      +	}
      +
      +	f, err := os.Create(name)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	// This type matches C's "struct flock" defined in /usr/include/fcntl.h.
      +	// TODO: move this into the standard syscall package.
      +	k := struct {
      +		Start  int64 /* off_t starting offset */
      +		Len    int64 /* off_t len = 0 means until end of file */
      +		Pid    int32 /* pid_t lock owner */
      +		Type   int16 /* short lock type: read/write, etc. */
      +		Whence int16 /* short type of l_start */
      +		Sysid  int32 /* int   remote system id or zero for local */
      +	}{
      +		Start:  0,
      +		Len:    0, // 0 means to lock the entire file.
      +		Pid:    int32(os.Getpid()),
      +		Type:   syscall.F_WRLCK,
      +		Whence: int16(os.SEEK_SET),
      +		Sysid:  0,
      +	}
      +
      +	_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k)))
      +	if errno != 0 {
      +		f.Close()
      +		return nil, errno
      +	}
      +	return &unlocker{f: f, abs: name}, nil
      +}
      diff --git a/vendor/go4.org/lock/lock_linux_amd64.go b/vendor/go4.org/lock/lock_linux_amd64.go
      new file mode 100644
      index 00000000..08b3aae9
      --- /dev/null
      +++ b/vendor/go4.org/lock/lock_linux_amd64.go
      @@ -0,0 +1,67 @@
      +// +build linux,amd64
      +// +build !appengine
      +
      +/*
      +Copyright 2013 The Go Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package lock
      +
      +import (
      +	"fmt"
      +	"io"
      +	"os"
      +	"syscall"
      +	"unsafe"
      +)
      +
      +func init() {
      +	lockFn = lockFcntl
      +}
      +
      +func lockFcntl(name string) (io.Closer, error) {
      +	fi, err := os.Stat(name)
      +	if err == nil && fi.Size() > 0 {
      +		return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
      +	}
      +
      +	f, err := os.Create(name)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	// This type matches C's "struct flock" defined in /usr/include/bits/fcntl.h.
      +	// TODO: move this into the standard syscall package.
      +	k := struct {
      +		Type   uint32
      +		Whence uint32
      +		Start  uint64
      +		Len    uint64
      +		Pid    uint32
      +	}{
      +		Type:   syscall.F_WRLCK,
      +		Whence: uint32(os.SEEK_SET),
      +		Start:  0,
      +		Len:    0, // 0 means to lock the entire file.
      +		Pid:    uint32(os.Getpid()),
      +	}
      +
      +	_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k)))
      +	if errno != 0 {
      +		f.Close()
      +		return nil, errno
      +	}
      +	return &unlocker{f: f, abs: name}, nil
      +}
      diff --git a/vendor/go4.org/lock/lock_linux_arm.go b/vendor/go4.org/lock/lock_linux_arm.go
      new file mode 100644
      index 00000000..ebf87bd3
      --- /dev/null
      +++ b/vendor/go4.org/lock/lock_linux_arm.go
      @@ -0,0 +1,68 @@
      +// +build linux,arm
      +// +build !appengine
      +
      +/*
      +Copyright 2013 The Go Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package lock
      +
      +import (
      +	"fmt"
      +	"io"
      +	"os"
      +	"syscall"
      +	"unsafe"
      +)
      +
      +func init() {
      +	lockFn = lockFcntl
      +}
      +
      +func lockFcntl(name string) (io.Closer, error) {
      +	fi, err := os.Stat(name)
      +	if err == nil && fi.Size() > 0 {
      +		return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
      +	}
      +
      +	f, err := os.Create(name)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	// This type matches C's "struct flock" defined in /usr/include/bits/fcntl.h.
      +	// TODO: move this into the standard syscall package.
      +	k := struct {
      +		Type   uint16
      +		Whence uint16
      +		Start  uint32
      +		Len    uint32
      +		Pid    uint32
      +	}{
      +		Type:   syscall.F_WRLCK,
      +		Whence: uint16(os.SEEK_SET),
      +		Start:  0,
      +		Len:    0, // 0 means to lock the entire file.
      +		Pid:    uint32(os.Getpid()),
      +	}
      +
      +	const F_SETLK = 6 // actual value. syscall package is wrong: golang.org/issue/7059
      +	_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(F_SETLK), uintptr(unsafe.Pointer(&k)))
      +	if errno != 0 {
      +		f.Close()
      +		return nil, errno
      +	}
      +	return &unlocker{f: f, abs: name}, nil
      +}
      diff --git a/vendor/go4.org/lock/lock_plan9.go b/vendor/go4.org/lock/lock_plan9.go
      new file mode 100644
      index 00000000..d841c27d
      --- /dev/null
      +++ b/vendor/go4.org/lock/lock_plan9.go
      @@ -0,0 +1,41 @@
      +/*
      +Copyright 2013 The Go Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package lock
      +
      +import (
      +	"fmt"
      +	"io"
      +	"os"
      +)
      +
      +func init() {
      +	lockFn = lockPlan9
      +}
      +
      +func lockPlan9(name string) (io.Closer, error) {
      +	fi, err := os.Stat(name)
      +	if err == nil && fi.Size() > 0 {
      +		return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
      +	}
      +
      +	f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644)
      +	if err != nil {
      +		return nil, fmt.Errorf("Lock Create of %s failed: %v", name, err)
      +	}
      +
      +	return &unlocker{f: f, abs: name}, nil
      +}
      diff --git a/vendor/go4.org/lock/lock_sigzero.go b/vendor/go4.org/lock/lock_sigzero.go
      new file mode 100644
      index 00000000..fd3ba2db
      --- /dev/null
      +++ b/vendor/go4.org/lock/lock_sigzero.go
      @@ -0,0 +1,26 @@
      +// +build !appengine
      +// +build linux darwin freebsd openbsd netbsd dragonfly
      +
      +/*
      +Copyright 2013 The Go Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package lock
      +
      +import "syscall"
      +
      +func init() {
      +	signalZero = syscall.Signal(0)
      +}
      diff --git a/vendor/go4.org/lock/lock_test.go b/vendor/go4.org/lock/lock_test.go
      new file mode 100644
      index 00000000..de9c8f87
      --- /dev/null
      +++ b/vendor/go4.org/lock/lock_test.go
      @@ -0,0 +1,222 @@
      +/*
      +Copyright 2013 The Go Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package lock
      +
      +import (
      +	"bufio"
      +	"errors"
      +	"fmt"
      +	"io"
      +	"io/ioutil"
      +	"os"
      +	"os/exec"
      +	"path/filepath"
      +	"strconv"
      +	"testing"
      +)
      +
      +func TestLock(t *testing.T) {
      +	testLock(t, false)
      +}
      +
      +func TestLockPortable(t *testing.T) {
      +	testLock(t, true)
      +}
      +
      +func TestLockInChild(t *testing.T) {
      +	f := os.Getenv("TEST_LOCK_FILE")
      +	if f == "" {
      +		// not child
      +		return
      +	}
      +	lock := Lock
      +	if v, _ := strconv.ParseBool(os.Getenv("TEST_LOCK_PORTABLE")); v {
      +		lock = lockPortable
      +	}
      +
      +	var lk io.Closer
      +	for scan := bufio.NewScanner(os.Stdin); scan.Scan(); {
      +		var err error
      +		switch scan.Text() {
      +		case "lock":
      +			lk, err = lock(f)
      +		case "unlock":
      +			err = lk.Close()
      +			lk = nil
      +		case "exit":
      +			// Simulate a crash, or at least not unlocking the lock.
      +			os.Exit(0)
      +		default:
      +			err = fmt.Errorf("unexpected child command %q", scan.Text())
      +		}
      +		if err != nil {
      +			fmt.Println(err)
      +		} else {
      +			fmt.Println("")
      +		}
      +	}
      +}
      +
      +func testLock(t *testing.T, portable bool) {
      +	lock := Lock
      +	if portable {
      +		lock = lockPortable
      +	}
      +	t.Logf("test lock, portable %v", portable)
      +
      +	td, err := ioutil.TempDir("", "")
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	defer os.RemoveAll(td)
      +
      +	path := filepath.Join(td, "foo.lock")
      +
      +	proc := newChildProc(t, path, portable)
      +	defer proc.kill()
      +
      +	t.Logf("First lock in child")
      +	if err := proc.do("lock"); err != nil {
      +		t.Fatalf("first lock in child process: %v", err)
      +	}
      +
      +	t.Logf("Crash child")
      +	if err := proc.do("exit"); err != nil {
      +		t.Fatalf("crash in child process: %v", err)
      +	}
      +
      +	proc = newChildProc(t, path, portable)
      +	defer proc.kill()
      +
      +	t.Logf("Locking+unlocking in child...")
      +	if err := proc.do("lock"); err != nil {
      +		t.Fatalf("lock in child process after crashing child: %v", err)
      +	}
      +	if err := proc.do("unlock"); err != nil {
      +		t.Fatalf("lock in child process after crashing child: %v", err)
      +	}
      +
      +	t.Logf("Locking in parent...")
      +	lk1, err := lock(path)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	t.Logf("Again in parent...")
      +	_, err = lock(path)
      +	if err == nil {
      +		t.Fatal("expected second lock to fail")
      +	}
      +
      +	t.Logf("Locking in child...")
      +	if err := proc.do("lock"); err == nil {
      +		t.Fatalf("expected lock in child process to fail")
      +	}
      +
      +	t.Logf("Unlocking lock in parent")
      +	if err := lk1.Close(); err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	t.Logf("Trying lock again in child...")
      +	if err := proc.do("lock"); err != nil {
      +		t.Fatal(err)
      +	}
      +	if err := proc.do("unlock"); err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	lk3, err := lock(path)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	lk3.Close()
      +}
      +
      +type childLockCmd struct {
      +	op    string
      +	reply chan<- error
      +}
      +
      +type childProc struct {
      +	proc *os.Process
      +	c    chan childLockCmd
      +}
      +
      +func (c *childProc) kill() {
      +	c.proc.Kill()
      +}
      +
      +func (c *childProc) do(op string) error {
      +	reply := make(chan error)
      +	c.c <- childLockCmd{
      +		op:    op,
      +		reply: reply,
      +	}
      +	return <-reply
      +}
      +
      +func newChildProc(t *testing.T, path string, portable bool) *childProc {
      +	cmd := exec.Command(os.Args[0], "-test.run=LockInChild$")
      +	cmd.Env = []string{"TEST_LOCK_FILE=" + path}
      +	toChild, err := cmd.StdinPipe()
      +	if err != nil {
      +		t.Fatalf("cannot make pipe: %v", err)
      +	}
      +	fromChild, err := cmd.StdoutPipe()
      +	if err != nil {
      +		t.Fatalf("cannot make pipe: %v", err)
      +	}
      +	cmd.Stderr = os.Stderr
      +	if portable {
      +		cmd.Env = append(cmd.Env, "TEST_LOCK_PORTABLE=1")
      +	}
      +	if err := cmd.Start(); err != nil {
      +		t.Fatalf("cannot start child: %v", err)
      +	}
      +	cmdChan := make(chan childLockCmd)
      +	go func() {
      +		defer fromChild.Close()
      +		defer toChild.Close()
      +		inScan := bufio.NewScanner(fromChild)
      +		for c := range cmdChan {
      +			fmt.Fprintln(toChild, c.op)
      +			ok := inScan.Scan()
      +			if c.op == "exit" {
      +				if ok {
      +					c.reply <- errors.New("child did not exit")
      +				} else {
      +					cmd.Wait()
      +					c.reply <- nil
      +				}
      +				break
      +			}
      +			if !ok {
      +				panic("child exited early")
      +			}
      +			if errText := inScan.Text(); errText != "" {
      +				c.reply <- errors.New(errText)
      +			} else {
      +				c.reply <- nil
      +			}
      +		}
      +	}()
      +	return &childProc{
      +		c:    cmdChan,
      +		proc: cmd.Process,
      +	}
      +}
      diff --git a/vendor/go4.org/net/throttle/throttle.go b/vendor/go4.org/net/throttle/throttle.go
      new file mode 100644
      index 00000000..2aa77e1a
      --- /dev/null
      +++ b/vendor/go4.org/net/throttle/throttle.go
      @@ -0,0 +1,137 @@
      +/*
      +Copyright 2012 Google Inc.
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package throttle provides a net.Listener that returns
      +// artificially-delayed connections for testing real-world
      +// connectivity.
      +package throttle // import "go4.org/net/throttle"
      +
      +import (
      +	"fmt"
      +	"net"
      +	"sync"
      +	"time"
      +)
      +
      +const unitSize = 1400 // read/write chunk size. ~MTU size.
      +
      +type Rate struct {
      +	KBps    int // or 0, to not rate-limit bandwidth
      +	Latency time.Duration
      +}
      +
      +// byteTime returns the time required for n bytes.
      +func (r Rate) byteTime(n int) time.Duration {
      +	if r.KBps == 0 {
      +		return 0
      +	}
      +	return time.Duration(float64(n)/1024/float64(r.KBps)) * time.Second
      +}
      +
      +type Listener struct {
      +	net.Listener
      +	Down Rate // server Writes to Client
      +	Up   Rate // server Reads from client
      +}
      +
      +func (ln *Listener) Accept() (net.Conn, error) {
      +	c, err := ln.Listener.Accept()
      +	time.Sleep(ln.Up.Latency)
      +	if err != nil {
      +		return nil, err
      +	}
      +	tc := &conn{Conn: c, Down: ln.Down, Up: ln.Up}
      +	tc.start()
      +	return tc, nil
      +}
      +
      +type nErr struct {
      +	n   int
      +	err error
      +}
      +
      +type writeReq struct {
      +	writeAt time.Time
      +	p       []byte
      +	resc    chan nErr
      +}
      +
      +type conn struct {
      +	net.Conn
      +	Down Rate // for reads
      +	Up   Rate // for writes
      +
      +	wchan     chan writeReq
      +	closeOnce sync.Once
      +	closeErr  error
      +}
      +
      +func (c *conn) start() {
      +	c.wchan = make(chan writeReq, 1024)
      +	go c.writeLoop()
      +}
      +
      +func (c *conn) writeLoop() {
      +	for req := range c.wchan {
      +		time.Sleep(req.writeAt.Sub(time.Now()))
      +		var res nErr
      +		for len(req.p) > 0 && res.err == nil {
      +			writep := req.p
      +			if len(writep) > unitSize {
      +				writep = writep[:unitSize]
      +			}
      +			n, err := c.Conn.Write(writep)
      +			time.Sleep(c.Up.byteTime(len(writep)))
      +			res.n += n
      +			res.err = err
      +			req.p = req.p[n:]
      +		}
      +		req.resc <- res
      +	}
      +}
      +
      +func (c *conn) Close() error {
      +	c.closeOnce.Do(func() {
      +		err := c.Conn.Close()
      +		close(c.wchan)
      +		c.closeErr = err
      +	})
      +	return c.closeErr
      +}
      +
      +func (c *conn) Write(p []byte) (n int, err error) {
      +	defer func() {
      +		if e := recover(); e != nil {
      +			n = 0
      +			err = fmt.Errorf("%v", err)
      +			return
      +		}
      +	}()
      +	resc := make(chan nErr, 1)
      +	c.wchan <- writeReq{time.Now().Add(c.Up.Latency), p, resc}
      +	res := <-resc
      +	return res.n, res.err
      +}
      +
      +func (c *conn) Read(p []byte) (n int, err error) {
      +	const max = 1024
      +	if len(p) > max {
      +		p = p[:max]
      +	}
      +	n, err = c.Conn.Read(p)
      +	time.Sleep(c.Down.byteTime(n))
      +	return
      +}
      diff --git a/vendor/go4.org/oauthutil/oauth.go b/vendor/go4.org/oauthutil/oauth.go
      new file mode 100644
      index 00000000..260298d9
      --- /dev/null
      +++ b/vendor/go4.org/oauthutil/oauth.go
      @@ -0,0 +1,121 @@
      +/*
      +Copyright 2015 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package oauthutil contains OAuth 2 related utilities.
      +package oauthutil // import "go4.org/oauthutil"
      +
      +import (
      +	"encoding/json"
      +	"errors"
      +	"fmt"
      +	"time"
      +
      +	"go4.org/wkfs"
      +	"golang.org/x/oauth2"
      +)
      +
      +// TitleBarRedirectURL is the OAuth2 redirect URL to use when the authorization
      +// code should be returned in the title bar of the browser, with the page text
      +// prompting the user to copy the code and paste it in the application.
      +const TitleBarRedirectURL = "urn:ietf:wg:oauth:2.0:oob"
      +
      +// ErrNoAuthCode is returned when Token() has not found any valid cached token
      +// and TokenSource does not have an AuthCode for getting a new token.
      +var ErrNoAuthCode = errors.New("oauthutil: unspecified TokenSource.AuthCode")
      +
      +// TokenSource is an implementation of oauth2.TokenSource. It uses CacheFile to store and
      +// reuse the the acquired token, and AuthCode to provide the authorization code that will be
      +// exchanged for a token otherwise.
      +type TokenSource struct {
      +	Config *oauth2.Config
      +
      +	// CacheFile is where the token will be stored JSON-encoded. Any call to Token
      +	// first tries to read a valid token from CacheFile.
      +	CacheFile string
      +
      +	// AuthCode provides the authorization code that Token will exchange for a token.
      +	// It usually is a way to prompt the user for the code. If CacheFile does not provide
      +	// a token and AuthCode is nil, Token returns ErrNoAuthCode.
      +	AuthCode func() string
      +}
      +
      +var errExpiredToken = errors.New("expired token")
      +
      +// cachedToken returns the token saved in cacheFile. It specifically returns
      +// errTokenExpired if the token is expired.
      +func cachedToken(cacheFile string) (*oauth2.Token, error) {
      +	tok := new(oauth2.Token)
      +	tokenData, err := wkfs.ReadFile(cacheFile)
      +	if err != nil {
      +		return nil, err
      +	}
      +	if err = json.Unmarshal(tokenData, tok); err != nil {
      +		return nil, err
      +	}
      +	if !tok.Valid() {
      +		if tok != nil && time.Now().After(tok.Expiry) {
      +			return nil, errExpiredToken
      +		}
      +		return nil, errors.New("invalid token")
      +	}
      +	return tok, nil
      +}
      +
      +// Token first tries to find a valid token in CacheFile, and otherwise uses
      +// Config and AuthCode to fetch a new token. This new token is saved in CacheFile
      +// (if not blank). If CacheFile did not provide a token and AuthCode is nil,
      +// ErrNoAuthCode is returned.
      +func (src TokenSource) Token() (*oauth2.Token, error) {
      +	var tok *oauth2.Token
      +	var err error
      +	if src.CacheFile != "" {
      +		tok, err = cachedToken(src.CacheFile)
      +		if err == nil {
      +			return tok, nil
      +		}
      +		if err != errExpiredToken {
      +			fmt.Printf("Error getting token from %s: %v\n", src.CacheFile, err)
      +		}
      +	}
      +	if src.AuthCode == nil {
      +		return nil, ErrNoAuthCode
      +	}
      +	tok, err = src.Config.Exchange(oauth2.NoContext, src.AuthCode())
      +	if err != nil {
      +		return nil, fmt.Errorf("could not exchange auth code for a token: %v", err)
      +	}
      +	if src.CacheFile == "" {
      +		return tok, nil
      +	}
      +	tokenData, err := json.Marshal(&tok)
      +	if err != nil {
      +		return nil, fmt.Errorf("could not encode token as json: %v", err)
      +	}
      +	if err := wkfs.WriteFile(src.CacheFile, tokenData, 0600); err != nil {
      +		return nil, fmt.Errorf("could not cache token in %v: %v", src.CacheFile, err)
      +	}
      +	return tok, nil
      +}
      +
      +// NewRefreshTokenSource returns a token source that obtains its initial token
      +// based on the provided config and the refresh token.
      +func NewRefreshTokenSource(config *oauth2.Config, refreshToken string) oauth2.TokenSource {
      +	var noInitialToken *oauth2.Token = nil
      +	return oauth2.ReuseTokenSource(noInitialToken, config.TokenSource(
      +		oauth2.NoContext, // TODO: maybe accept a context later.
      +		&oauth2.Token{RefreshToken: refreshToken},
      +	))
      +}
      diff --git a/vendor/go4.org/osutil/exec_plan9.go b/vendor/go4.org/osutil/exec_plan9.go
      new file mode 100644
      index 00000000..8c82a95a
      --- /dev/null
      +++ b/vendor/go4.org/osutil/exec_plan9.go
      @@ -0,0 +1,35 @@
      +// Copyright 2015 The go4 Authors
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// +build plan9
      +
      +package osutil
      +
      +import (
      +	"fmt"
      +	"os"
      +	"path/filepath"
      +	"syscall"
      +)
      +
      +func executable() (string, error) {
      +	fn := fmt.Sprintf("/proc/%d/text", os.Getpid())
      +	f, err := os.Open(fn)
      +	if err != nil {
      +		return "", err
      +	}
      +	defer f.Close()
      +	p, err := syscall.Fd2path(int(f.Fd()))
      +	return filepath.Clean(p), err
      +}
      diff --git a/vendor/go4.org/osutil/exec_procfs.go b/vendor/go4.org/osutil/exec_procfs.go
      new file mode 100644
      index 00000000..062861ba
      --- /dev/null
      +++ b/vendor/go4.org/osutil/exec_procfs.go
      @@ -0,0 +1,42 @@
      +// Copyright 2015 The go4 Authors
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// +build linux netbsd openbsd dragonfly nacl
      +
      +package osutil
      +
      +import (
      +	"errors"
      +	"os"
      +	"path/filepath"
      +	"runtime"
      +)
      +
      +func executable() (string, error) {
      +	var procfn string
      +	switch runtime.GOOS {
      +	default:
      +		return "", errors.New("Executable not implemented for " + runtime.GOOS)
      +	case "linux":
      +		procfn = "/proc/self/exe"
      +	case "netbsd":
      +		procfn = "/proc/curproc/exe"
      +	case "openbsd":
      +		procfn = "/proc/curproc/file"
      +	case "dragonfly":
      +		procfn = "/proc/curproc/file"
      +	}
      +	p, err := os.Readlink(procfn)
      +	return filepath.Clean(p), err
      +}
      diff --git a/vendor/go4.org/osutil/exec_solaris_amd64.go b/vendor/go4.org/osutil/exec_solaris_amd64.go
      new file mode 100644
      index 00000000..1a43157b
      --- /dev/null
      +++ b/vendor/go4.org/osutil/exec_solaris_amd64.go
      @@ -0,0 +1,71 @@
      +// Copyright 2015 The go4 Authors
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// +build amd64,solaris
      +
      +package osutil
      +
      +import (
      +	"os"
      +	"syscall"
      +	"unsafe"
      +)
      +
      +//go:cgo_import_dynamic libc_getexecname getexecname "libc.so"
      +//go:linkname libc_getexecname libc_getexecname
      +
      +var libc_getexecname uintptr
      +
      +func getexecname() (path unsafe.Pointer, err error) {
      +	r0, _, e1 := syscall.Syscall6(uintptr(unsafe.Pointer(&libc_getexecname)), 0, 0, 0, 0, 0, 0)
      +	path = unsafe.Pointer(r0)
      +	if e1 != 0 {
      +		err = syscall.Errno(e1)
      +	}
      +	return
      +}
      +
      +func syscallGetexecname() (path string, err error) {
      +	ptr, err := getexecname()
      +	if err != nil {
      +		return "", err
      +	}
      +	bytes := (*[1 << 29]byte)(ptr)[:]
      +	for i, b := range bytes {
      +		if b == 0 {
      +			return string(bytes[:i]), nil
      +		}
      +	}
      +	panic("unreachable")
      +}
      +
      +var initCwd, initCwdErr = os.Getwd()
      +
      +func executable() (string, error) {
      +	path, err := syscallGetexecname()
      +	if err != nil {
      +		return path, err
      +	}
      +	if len(path) > 0 && path[0] != '/' {
      +		if initCwdErr != nil {
      +			return path, initCwdErr
      +		}
      +		if len(path) > 2 && path[0:2] == "./" {
      +			// skip "./"
      +			path = path[2:]
      +		}
      +		return initCwd + "/" + path, nil
      +	}
      +	return path, nil
      +}
      diff --git a/vendor/go4.org/osutil/exec_sysctl.go b/vendor/go4.org/osutil/exec_sysctl.go
      new file mode 100644
      index 00000000..192b0f04
      --- /dev/null
      +++ b/vendor/go4.org/osutil/exec_sysctl.go
      @@ -0,0 +1,63 @@
      +// Copyright 2015 The go4 Authors
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +// +build freebsd darwin
      +
      +package osutil
      +
      +import (
      +	"os"
      +	"path/filepath"
      +	"runtime"
      +	"syscall"
      +	"unsafe"
      +)
      +
      +var cacheWD, cacheWDErr = os.Getwd()
      +
      +func executable() (string, error) {
      +	var mib [4]int32
      +	switch runtime.GOOS {
      +	case "freebsd":
      +		mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
      +	case "darwin":
      +		mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
      +	}
      +
      +	n := uintptr(0)
      +	// get length
      +	_, _, err := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
      +	if err != 0 {
      +		return "", err
      +	}
      +	if n == 0 { // shouldn't happen
      +		return "", nil
      +	}
      +	buf := make([]byte, n)
      +	_, _, err = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
      +	if err != 0 {
      +		return "", err
      +	}
      +	if n == 0 { // shouldn't happen
      +		return "", nil
      +	}
      +	p := string(buf[:n-1])
      +	if !filepath.IsAbs(p) {
      +		if cacheWDErr != nil {
      +			return p, cacheWDErr
      +		}
      +		p = filepath.Join(cacheWD, filepath.Clean(p))
      +	}
      +	return filepath.EvalSymlinks(p)
      +}
      diff --git a/vendor/go4.org/osutil/exec_test.go b/vendor/go4.org/osutil/exec_test.go
      new file mode 100644
      index 00000000..0761919b
      --- /dev/null
      +++ b/vendor/go4.org/osutil/exec_test.go
      @@ -0,0 +1,94 @@
      +// Copyright 2015 The go4 Authors
      +//
      +// Licensed under the Apache License, Version 2.0 (the "License");
      +// you may not use this file except in compliance with the License.
      +// You may obtain a copy of the License at
      +//
      +//     http://www.apache.org/licenses/LICENSE-2.0
      +//
      +// Unless required by applicable law or agreed to in writing, software
      +// distributed under the License is distributed on an "AS IS" BASIS,
      +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +// See the License for the specific language governing permissions and
      +// limitations under the License.
      +
      +package osutil
      +
      +import (
      +	"fmt"
      +	"os"
      +	osexec "os/exec"
      +	"path/filepath"
      +	"runtime"
      +	"testing"
      +)
      +
      +const executable_EnvVar = "OSTEST_OUTPUT_EXECPATH"
      +
      +func TestExecutable(t *testing.T) {
      +	if runtime.GOOS == "nacl" {
      +		t.Skip()
      +	}
      +	ep, err := Executable()
      +	if err != nil {
      +		switch goos := runtime.GOOS; goos {
      +		case "openbsd": // procfs is not mounted by default
      +			t.Skipf("Executable failed on %s: %v, expected", goos, err)
      +		}
      +		t.Fatalf("Executable failed: %v", err)
      +	}
      +	// we want fn to be of the form "dir/prog"
      +	dir := filepath.Dir(filepath.Dir(ep))
      +	fn, err := filepath.Rel(dir, ep)
      +	if err != nil {
      +		t.Fatalf("filepath.Rel: %v", err)
      +	}
      +	cmd := &osexec.Cmd{}
      +	// make child start with a relative program path
      +	cmd.Dir = dir
      +	cmd.Path = fn
      +	// forge argv[0] for child, so that we can verify we could correctly
      +	// get real path of the executable without influenced by argv[0].
      +	cmd.Args = []string{"-", "-test.run=XXXX"}
      +	cmd.Env = []string{fmt.Sprintf("%s=1", executable_EnvVar)}
      +	out, err := cmd.CombinedOutput()
      +	if err != nil {
      +		t.Fatalf("exec(self) failed: %v", err)
      +	}
      +	outs := string(out)
      +	if !filepath.IsAbs(outs) {
      +		t.Fatalf("Child returned %q, want an absolute path", out)
      +	}
      +	if !sameFile(outs, ep) {
      +		t.Fatalf("Child returned %q, not the same file as %q", out, ep)
      +	}
      +}
      +
      +func sameFile(fn1, fn2 string) bool {
      +	fi1, err := os.Stat(fn1)
      +	if err != nil {
      +		return false
      +	}
      +	fi2, err := os.Stat(fn2)
      +	if err != nil {
      +		return false
      +	}
      +	return os.SameFile(fi1, fi2)
      +}
      +
      +func init() {
      +	if e := os.Getenv(executable_EnvVar); e != "" {
      +		// first chdir to another path
      +		dir := "/"
      +		if runtime.GOOS == "windows" {
      +			dir = filepath.VolumeName(".")
      +		}
      +		os.Chdir(dir)
      +		if ep, err := Executable(); err != nil {
      +			fmt.Fprint(os.Stderr, "ERROR: ", err)
      +		} else {
      +			fmt.Fprint(os.Stderr, ep)
      +		}
      +		os.Exit(0)
      +	}
      +}
      diff --git a/vendor/go4.org/osutil/exec_windows.go b/vendor/go4.org/osutil/exec_windows.go
      new file mode 100644
      index 00000000..e2d73a14
      --- /dev/null
      +++ b/vendor/go4.org/osutil/exec_windows.go
      @@ -0,0 +1,64 @@
      +/*
      +Copyright 2015 The go4 Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package osutil
      +
      +import (
      +	"path/filepath"
      +	"syscall"
      +	"unsafe"
      +)
      +
      +var (
      +	modkernel32            = syscall.MustLoadDLL("kernel32.dll")
      +	procGetModuleFileNameW = modkernel32.MustFindProc("GetModuleFileNameW")
      +)
      +
      +func getModuleFileName(handle syscall.Handle) (string, error) {
      +	n := uint32(1024)
      +	var buf []uint16
      +	for {
      +		buf = make([]uint16, n)
      +		r, err := syscallGetModuleFileName(handle, &buf[0], n)
      +		if err != nil {
      +			return "", err
      +		}
      +		if r < n {
      +			break
      +		}
      +		// r == n means n not big enough
      +		n += 1024
      +	}
      +	return syscall.UTF16ToString(buf), nil
      +}
      +
      +func executable() (string, error) {
      +	p, err := getModuleFileName(0)
      +	return filepath.Clean(p), err
      +}
      +
      +func syscallGetModuleFileName(module syscall.Handle, fn *uint16, len uint32) (n uint32, err error) {
      +	r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(fn)), uintptr(len))
      +	n = uint32(r0)
      +	if n == 0 {
      +		if e1 != 0 {
      +			err = error(e1)
      +		} else {
      +			err = syscall.EINVAL
      +		}
      +	}
      +	return
      +}
      diff --git a/vendor/go4.org/osutil/osutil.go b/vendor/go4.org/osutil/osutil.go
      new file mode 100644
      index 00000000..fc0dda06
      --- /dev/null
      +++ b/vendor/go4.org/osutil/osutil.go
      @@ -0,0 +1,32 @@
      +/*
      +Copyright 2015 The go4 Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package osutil contains os level functions.
      +package osutil // import "go4.org/osutil"
      +
      +// capture executable on package init to work around various os issues if
      +// captured after executable has been renamed.
      +var execPath, execError = executable()
      +
      +// Executable returns the path name for the executable that starts the
      +// current process. The result is the path that was used to start the
      +// current process, but there is no guarantee that the path is still
      +// pointing to the correct executable.
      +//
      +// OpenBSD is currently unsupported.
      +func Executable() (string, error) {
      +	return execPath, execError
      +}
      diff --git a/vendor/go4.org/readerutil/fakeseeker.go b/vendor/go4.org/readerutil/fakeseeker.go
      new file mode 100644
      index 00000000..5e54c68f
      --- /dev/null
      +++ b/vendor/go4.org/readerutil/fakeseeker.go
      @@ -0,0 +1,70 @@
      +/*
      +Copyright 2014 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package readerutil
      +
      +import (
      +	"errors"
      +	"fmt"
      +	"io"
      +	"os"
      +)
      +
      +// fakeSeeker can seek to the ends but any read not at the current
      +// position will fail.
      +type fakeSeeker struct {
      +	r    io.Reader
      +	size int64
      +
      +	fakePos int64
      +	realPos int64
      +}
      +
      +// NewFakeSeeker returns a ReadSeeker that can pretend to Seek (based
      +// on the provided total size of the reader's content), but any reads
      +// will fail if the fake seek position doesn't match reality.
      +func NewFakeSeeker(r io.Reader, size int64) io.ReadSeeker {
      +	return &fakeSeeker{r: r, size: size}
      +}
      +
      +func (fs *fakeSeeker) Seek(offset int64, whence int) (int64, error) {
      +	var newo int64
      +	switch whence {
      +	default:
      +		return 0, errors.New("invalid whence")
      +	case os.SEEK_SET:
      +		newo = offset
      +	case os.SEEK_CUR:
      +		newo = fs.fakePos + offset
      +	case os.SEEK_END:
      +		newo = fs.size + offset
      +	}
      +	if newo < 0 {
      +		return 0, errors.New("negative seek")
      +	}
      +	fs.fakePos = newo
      +	return newo, nil
      +}
      +
      +func (fs *fakeSeeker) Read(p []byte) (n int, err error) {
      +	if fs.fakePos != fs.realPos {
      +		return 0, fmt.Errorf("attempt to read from fake seek offset %d; real offset is %d", fs.fakePos, fs.realPos)
      +	}
      +	n, err = fs.r.Read(p)
      +	fs.fakePos += int64(n)
      +	fs.realPos += int64(n)
      +	return
      +}
      diff --git a/vendor/go4.org/readerutil/fakeseeker_test.go b/vendor/go4.org/readerutil/fakeseeker_test.go
      new file mode 100644
      index 00000000..16f62bda
      --- /dev/null
      +++ b/vendor/go4.org/readerutil/fakeseeker_test.go
      @@ -0,0 +1,55 @@
      +/*
      +Copyright 2014 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package readerutil
      +
      +import (
      +	"os"
      +	"strings"
      +	"testing"
      +)
      +
      +func TestFakeSeeker(t *testing.T) {
      +	rs := NewFakeSeeker(strings.NewReader("foobar"), 6)
      +	if pos, err := rs.Seek(0, os.SEEK_END); err != nil || pos != 6 {
      +		t.Fatalf("SEEK_END = %d, %v; want 6, nil", pos, err)
      +	}
      +	if pos, err := rs.Seek(0, os.SEEK_CUR); err != nil || pos != 6 {
      +		t.Fatalf("SEEK_CUR = %d, %v; want 6, nil", pos, err)
      +	}
      +	if pos, err := rs.Seek(0, os.SEEK_SET); err != nil || pos != 0 {
      +		t.Fatalf("SEEK_SET = %d, %v; want 0, nil", pos, err)
      +	}
      +
      +	buf := make([]byte, 3)
      +	if n, err := rs.Read(buf); n != 3 || err != nil || string(buf) != "foo" {
      +		t.Fatalf("First read = %d, %v (buf = %q); want foo", n, err, buf)
      +	}
      +	if pos, err := rs.Seek(0, os.SEEK_CUR); err != nil || pos != 3 {
      +		t.Fatalf("Seek cur pos after first read = %d, %v; want 3, nil", pos, err)
      +	}
      +	if n, err := rs.Read(buf); n != 3 || err != nil || string(buf) != "bar" {
      +		t.Fatalf("Second read = %d, %v (buf = %q); want foo", n, err, buf)
      +	}
      +
      +	if pos, err := rs.Seek(1, os.SEEK_SET); err != nil || pos != 1 {
      +		t.Fatalf("SEEK_SET = %d, %v; want 1, nil", pos, err)
      +	}
      +	const msg = "attempt to read from fake seek offset"
      +	if _, err := rs.Read(buf); err == nil || !strings.Contains(err.Error(), msg) {
      +		t.Fatalf("bogus Read after seek = %v; want something containing %q", err, msg)
      +	}
      +}
      diff --git a/vendor/go4.org/readerutil/multireaderat.go b/vendor/go4.org/readerutil/multireaderat.go
      new file mode 100644
      index 00000000..33d148c0
      --- /dev/null
      +++ b/vendor/go4.org/readerutil/multireaderat.go
      @@ -0,0 +1,91 @@
      +/*
      +Copyright 2016 The go4 Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package readerutil
      +
      +import (
      +	"io"
      +	"sort"
      +)
      +
      +// NewMultiReaderAt is like io.MultiReader but produces a ReaderAt
      +// (and Size), instead of just a reader.
      +func NewMultiReaderAt(parts ...SizeReaderAt) SizeReaderAt {
      +	m := &multiRA{
      +		parts: make([]offsetAndSource, 0, len(parts)),
      +	}
      +	var off int64
      +	for _, p := range parts {
      +		m.parts = append(m.parts, offsetAndSource{off, p})
      +		off += p.Size()
      +	}
      +	m.size = off
      +	return m
      +}
      +
      +type offsetAndSource struct {
      +	off int64
      +	SizeReaderAt
      +}
      +
      +type multiRA struct {
      +	parts []offsetAndSource
      +	size  int64
      +}
      +
      +func (m *multiRA) Size() int64 { return m.size }
      +
      +func (m *multiRA) ReadAt(p []byte, off int64) (n int, err error) {
      +	wantN := len(p)
      +
      +	// Skip past the requested offset.
      +	skipParts := sort.Search(len(m.parts), func(i int) bool {
      +		// This function returns whether parts[i] will
      +		// contribute any bytes to our output.
      +		part := m.parts[i]
      +		return part.off+part.Size() > off
      +	})
      +	parts := m.parts[skipParts:]
      +
      +	// How far to skip in the first part.
      +	needSkip := off
      +	if len(parts) > 0 {
      +		needSkip -= parts[0].off
      +	}
      +
      +	for len(parts) > 0 && len(p) > 0 {
      +		readP := p
      +		partSize := parts[0].Size()
      +		if int64(len(readP)) > partSize-needSkip {
      +			readP = readP[:partSize-needSkip]
      +		}
      +		pn, err0 := parts[0].ReadAt(readP, needSkip)
      +		if err0 != nil {
      +			return n, err0
      +		}
      +		n += pn
      +		p = p[pn:]
      +		if int64(pn)+needSkip == partSize {
      +			parts = parts[1:]
      +		}
      +		needSkip = 0
      +	}
      +
      +	if n != wantN {
      +		err = io.ErrUnexpectedEOF
      +	}
      +	return
      +}
      diff --git a/vendor/go4.org/readerutil/multireaderat_test.go b/vendor/go4.org/readerutil/multireaderat_test.go
      new file mode 100644
      index 00000000..a736e028
      --- /dev/null
      +++ b/vendor/go4.org/readerutil/multireaderat_test.go
      @@ -0,0 +1,48 @@
      +/*
      +Copyright 2016 The Go4 Authors.
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package readerutil
      +
      +import (
      +	"io"
      +	"io/ioutil"
      +	"strings"
      +	"testing"
      +)
      +
      +func TestMultiReaderAt(t *testing.T) {
      +	sra := NewMultiReaderAt(
      +		io.NewSectionReader(strings.NewReader("xaaax"), 1, 3),
      +		io.NewSectionReader(strings.NewReader("xxbbbbxx"), 2, 3),
      +		io.NewSectionReader(strings.NewReader("cccx"), 0, 3),
      +	)
      +	if sra.Size() != 9 {
      +		t.Fatalf("Size = %d; want 9", sra.Size())
      +	}
      +	const full = "aaabbbccc"
      +	for start := 0; start < len(full); start++ {
      +		for end := start; end < len(full); end++ {
      +			want := full[start:end]
      +			got, err := ioutil.ReadAll(io.NewSectionReader(sra, int64(start), int64(end-start)))
      +			if err != nil {
      +				t.Fatal(err)
      +			}
      +			if string(got) != want {
      +				t.Errorf("for start=%d, end=%d: ReadAll = %q; want %q", start, end, got, want)
      +			}
      +		}
      +	}
      +}
      diff --git a/vendor/go4.org/readerutil/readerutil.go b/vendor/go4.org/readerutil/readerutil.go
      new file mode 100644
      index 00000000..61bb2c47
      --- /dev/null
      +++ b/vendor/go4.org/readerutil/readerutil.go
      @@ -0,0 +1,84 @@
      +/*
      +Copyright 2016 The go4 Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package readerutil contains io.Reader types.
      +package readerutil // import "go4.org/readerutil"
      +
      +import (
      +	"expvar"
      +	"io"
      +)
      +
      +// A SizeReaderAt is a ReaderAt with a Size method.
      +//
      +// An io.SectionReader implements SizeReaderAt.
      +type SizeReaderAt interface {
      +	Size() int64
      +	io.ReaderAt
      +}
      +
      +// A ReadSeekCloser can Read, Seek, and Close.
      +type ReadSeekCloser interface {
      +	io.Reader
      +	io.Seeker
      +	io.Closer
      +}
      +
      +type ReaderAtCloser interface {
      +	io.ReaderAt
      +	io.Closer
      +}
      +
      +// TODO(wathiede): make sure all the stat readers work with code that
      +// type asserts ReadFrom/WriteTo.
      +
      +type varStatReader struct {
      +	*expvar.Int
      +	r io.Reader
      +}
      +
      +// NewReaderStats returns an io.Reader that will have the number of bytes
      +// read from r added to v.
      +func NewStatsReader(v *expvar.Int, r io.Reader) io.Reader {
      +	return &varStatReader{v, r}
      +}
      +
      +func (v *varStatReader) Read(p []byte) (int, error) {
      +	n, err := v.r.Read(p)
      +	v.Int.Add(int64(n))
      +	return n, err
      +}
      +
      +type varStatReadSeeker struct {
      +	*expvar.Int
      +	rs io.ReadSeeker
      +}
      +
      +// NewReaderStats returns an io.ReadSeeker that will have the number of bytes
      +// read from rs added to v.
      +func NewStatsReadSeeker(v *expvar.Int, rs io.ReadSeeker) io.ReadSeeker {
      +	return &varStatReadSeeker{v, rs}
      +}
      +
      +func (v *varStatReadSeeker) Read(p []byte) (int, error) {
      +	n, err := v.rs.Read(p)
      +	v.Int.Add(int64(n))
      +	return n, err
      +}
      +
      +func (v *varStatReadSeeker) Seek(offset int64, whence int) (int64, error) {
      +	return v.rs.Seek(offset, whence)
      +}
      diff --git a/vendor/go4.org/readerutil/readerutil_test.go b/vendor/go4.org/readerutil/readerutil_test.go
      new file mode 100644
      index 00000000..991e03c2
      --- /dev/null
      +++ b/vendor/go4.org/readerutil/readerutil_test.go
      @@ -0,0 +1,38 @@
      +/*
      +Copyright 2016 The Go4 Authors.
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package readerutil
      +
      +import (
      +	"expvar"
      +	"fmt"
      +	"io"
      +	"io/ioutil"
      +	"strings"
      +)
      +
      +func ExampleNewStatsReader() {
      +	var (
      +		// r is the io.Reader we'd like to count read from.
      +		r  = strings.NewReader("Hello world")
      +		v  = expvar.NewInt("read-bytes")
      +		sw = NewStatsReader(v, r)
      +	)
      +	// Read from the wrapped io.Reader, StatReader will count the bytes.
      +	io.Copy(ioutil.Discard, sw)
      +	fmt.Printf("Read %s bytes\n", v.String())
      +	// Output: Read 11 bytes
      +}
      diff --git a/vendor/go4.org/strutil/intern.go b/vendor/go4.org/strutil/intern.go
      new file mode 100644
      index 00000000..633ebb36
      --- /dev/null
      +++ b/vendor/go4.org/strutil/intern.go
      @@ -0,0 +1,39 @@
      +/*
      +Copyright 2013 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package strutil
      +
      +var internStr = map[string]string{}
      +
      +// RegisterCommonString adds common strings to the interned string
      +// table.  This should be called during init from the main
      +// goroutine, not later at runtime.
      +func RegisterCommonString(s ...string) {
      +	for _, v := range s {
      +		internStr[v] = v
      +	}
      +}
      +
      +// StringFromBytes returns string(v), minimizing copies for common values of v
      +// as previously registered with RegisterCommonString.
      +func StringFromBytes(v []byte) string {
      +	// In Go 1.3, this string conversion in the map lookup does not allocate
      +	// to make a new string. We depend on Go 1.3, so this is always free:
      +	if s, ok := internStr[string(v)]; ok {
      +		return s
      +	}
      +	return string(v)
      +}
      diff --git a/vendor/go4.org/strutil/strconv.go b/vendor/go4.org/strutil/strconv.go
      new file mode 100644
      index 00000000..9d4ccfff
      --- /dev/null
      +++ b/vendor/go4.org/strutil/strconv.go
      @@ -0,0 +1,117 @@
      +/*
      +Copyright 2013 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package strutil
      +
      +import (
      +	"errors"
      +	"strconv"
      +)
      +
      +// ParseUintBytes is like strconv.ParseUint, but using a []byte.
      +func ParseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
      +	var cutoff, maxVal uint64
      +
      +	if bitSize == 0 {
      +		bitSize = int(strconv.IntSize)
      +	}
      +
      +	s0 := s
      +	switch {
      +	case len(s) < 1:
      +		err = strconv.ErrSyntax
      +		goto Error
      +
      +	case 2 <= base && base <= 36:
      +		// valid base; nothing to do
      +
      +	case base == 0:
      +		// Look for octal, hex prefix.
      +		switch {
      +		case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
      +			base = 16
      +			s = s[2:]
      +			if len(s) < 1 {
      +				err = strconv.ErrSyntax
      +				goto Error
      +			}
      +		case s[0] == '0':
      +			base = 8
      +		default:
      +			base = 10
      +		}
      +
      +	default:
      +		err = errors.New("invalid base " + strconv.Itoa(base))
      +		goto Error
      +	}
      +
      +	n = 0
      +	cutoff = cutoff64(base)
      +	maxVal = 1<<uint(bitSize) - 1
      +
      +	for i := 0; i < len(s); i++ {
      +		var v byte
      +		d := s[i]
      +		switch {
      +		case '0' <= d && d <= '9':
      +			v = d - '0'
      +		case 'a' <= d && d <= 'z':
      +			v = d - 'a' + 10
      +		case 'A' <= d && d <= 'Z':
      +			v = d - 'A' + 10
      +		default:
      +			n = 0
      +			err = strconv.ErrSyntax
      +			goto Error
      +		}
      +		if int(v) >= base {
      +			n = 0
      +			err = strconv.ErrSyntax
      +			goto Error
      +		}
      +
      +		if n >= cutoff {
      +			// n*base overflows
      +			n = 1<<64 - 1
      +			err = strconv.ErrRange
      +			goto Error
      +		}
      +		n *= uint64(base)
      +
      +		n1 := n + uint64(v)
      +		if n1 < n || n1 > maxVal {
      +			// n+v overflows
      +			n = 1<<64 - 1
      +			err = strconv.ErrRange
      +			goto Error
      +		}
      +		n = n1
      +	}
      +
      +	return n, nil
      +
      +Error:
      +	return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
      +}
      +
      +// Return the first number n such that n*base >= 1<<64.
      +func cutoff64(base int) uint64 {
      +	if base < 2 {
      +		return 0
      +	}
      +	return (1<<64-1)/uint64(base) + 1
      +}
      diff --git a/vendor/go4.org/strutil/strutil.go b/vendor/go4.org/strutil/strutil.go
      new file mode 100644
      index 00000000..cab79309
      --- /dev/null
      +++ b/vendor/go4.org/strutil/strutil.go
      @@ -0,0 +1,200 @@
      +/*
      +Copyright 2013 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package strutil contains string and byte processing functions.
      +package strutil // import "go4.org/strutil"
      +
      +import (
      +	"strings"
      +	"unicode"
      +	"unicode/utf8"
      +)
      +
      +// Fork of Go's implementation in pkg/strings/strings.go:
      +// Generic split: splits after each instance of sep,
      +// including sepSave bytes of sep in the subarrays.
      +func genSplit(dst []string, s, sep string, sepSave, n int) []string {
      +	if n == 0 {
      +		return nil
      +	}
      +	if sep == "" {
      +		panic("sep is empty")
      +	}
      +	if n < 0 {
      +		n = strings.Count(s, sep) + 1
      +	}
      +	c := sep[0]
      +	start := 0
      +	na := 0
      +	for i := 0; i+len(sep) <= len(s) && na+1 < n; i++ {
      +		if s[i] == c && (len(sep) == 1 || s[i:i+len(sep)] == sep) {
      +			dst = append(dst, s[start:i+sepSave])
      +			na++
      +			start = i + len(sep)
      +			i += len(sep) - 1
      +		}
      +	}
      +	dst = append(dst, s[start:])
      +	return dst
      +}
      +
      +// AppendSplitN is like strings.SplitN but appends to and returns dst.
      +// Unlike strings.SplitN, an empty separator is not supported.
      +// The count n determines the number of substrings to return:
      +//   n > 0: at most n substrings; the last substring will be the unsplit remainder.
      +//   n == 0: the result is nil (zero substrings)
      +//   n < 0: all substrings
      +func AppendSplitN(dst []string, s, sep string, n int) []string {
      +	return genSplit(dst, s, sep, 0, n)
      +}
      +
      +// equalFoldRune compares a and b runes whether they fold equally.
      +//
      +// The code comes from strings.EqualFold, but shortened to only one rune.
      +func equalFoldRune(sr, tr rune) bool {
      +	if sr == tr {
      +		return true
      +	}
      +	// Make sr < tr to simplify what follows.
      +	if tr < sr {
      +		sr, tr = tr, sr
      +	}
      +	// Fast check for ASCII.
      +	if tr < utf8.RuneSelf && 'A' <= sr && sr <= 'Z' {
      +		// ASCII, and sr is upper case.  tr must be lower case.
      +		if tr == sr+'a'-'A' {
      +			return true
      +		}
      +		return false
      +	}
      +
      +	// General case.  SimpleFold(x) returns the next equivalent rune > x
      +	// or wraps around to smaller values.
      +	r := unicode.SimpleFold(sr)
      +	for r != sr && r < tr {
      +		r = unicode.SimpleFold(r)
      +	}
      +	if r == tr {
      +		return true
      +	}
      +	return false
      +}
      +
      +// HasPrefixFold is like strings.HasPrefix but uses Unicode case-folding.
      +func HasPrefixFold(s, prefix string) bool {
      +	if prefix == "" {
      +		return true
      +	}
      +	for _, pr := range prefix {
      +		if s == "" {
      +			return false
      +		}
      +		// step with s, too
      +		sr, size := utf8.DecodeRuneInString(s)
      +		if sr == utf8.RuneError {
      +			return false
      +		}
      +		s = s[size:]
      +		if !equalFoldRune(sr, pr) {
      +			return false
      +		}
      +	}
      +	return true
      +}
      +
      +// HasSuffixFold is like strings.HasPrefix but uses Unicode case-folding.
      +func HasSuffixFold(s, suffix string) bool {
      +	if suffix == "" {
      +		return true
      +	}
      +	// count the runes and bytes in s, but only till rune count of suffix
      +	bo, so := len(s), len(suffix)
      +	for bo > 0 && so > 0 {
      +		r, size := utf8.DecodeLastRuneInString(s[:bo])
      +		if r == utf8.RuneError {
      +			return false
      +		}
      +		bo -= size
      +
      +		sr, size := utf8.DecodeLastRuneInString(suffix[:so])
      +		if sr == utf8.RuneError {
      +			return false
      +		}
      +		so -= size
      +
      +		if !equalFoldRune(r, sr) {
      +			return false
      +		}
      +	}
      +	return so == 0
      +}
      +
      +// ContainsFold is like strings.Contains but uses Unicode case-folding.
      +func ContainsFold(s, substr string) bool {
      +	if substr == "" {
      +		return true
      +	}
      +	if s == "" {
      +		return false
      +	}
      +	firstRune := rune(substr[0])
      +	if firstRune >= utf8.RuneSelf {
      +		firstRune, _ = utf8.DecodeRuneInString(substr)
      +	}
      +	for i, rune := range s {
      +		if equalFoldRune(rune, firstRune) && HasPrefixFold(s[i:], substr) {
      +			return true
      +		}
      +	}
      +	return false
      +}
      +
      +// IsPlausibleJSON reports whether s likely contains a JSON object, without
      +// actually parsing it. It's meant to be a light heuristic.
      +func IsPlausibleJSON(s string) bool {
      +	return startsWithOpenBrace(s) && endsWithCloseBrace(s)
      +}
      +
      +func isASCIIWhite(b byte) bool { return b == ' ' || b == '\n' || b == '\r' || b == '\t' }
      +
      +func startsWithOpenBrace(s string) bool {
      +	for len(s) > 0 {
      +		switch {
      +		case s[0] == '{':
      +			return true
      +		case isASCIIWhite(s[0]):
      +			s = s[1:]
      +		default:
      +			return false
      +		}
      +	}
      +	return false
      +}
      +
      +func endsWithCloseBrace(s string) bool {
      +	for len(s) > 0 {
      +		last := len(s) - 1
      +		switch {
      +		case s[last] == '}':
      +			return true
      +		case isASCIIWhite(s[last]):
      +			s = s[:last]
      +		default:
      +			return false
      +		}
      +	}
      +	return false
      +}
      diff --git a/vendor/go4.org/strutil/strutil_test.go b/vendor/go4.org/strutil/strutil_test.go
      new file mode 100644
      index 00000000..fa93ee95
      --- /dev/null
      +++ b/vendor/go4.org/strutil/strutil_test.go
      @@ -0,0 +1,230 @@
      +/*
      +Copyright 2013 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package strutil
      +
      +import (
      +	"reflect"
      +	"strings"
      +	"testing"
      +)
      +
      +func TestAppendSplitN(t *testing.T) {
      +	var got []string
      +	tests := []struct {
      +		s, sep string
      +		n      int
      +	}{
      +		{"foo", "|", 1},
      +		{"foo", "|", -1},
      +		{"foo|bar", "|", 1},
      +		{"foo|bar", "|", -1},
      +		{"foo|bar|", "|", 2},
      +		{"foo|bar|", "|", -1},
      +		{"foo|bar|baz", "|", 1},
      +		{"foo|bar|baz", "|", 2},
      +		{"foo|bar|baz", "|", 3},
      +		{"foo|bar|baz", "|", -1},
      +	}
      +	for _, tt := range tests {
      +		want := strings.SplitN(tt.s, tt.sep, tt.n)
      +		got = AppendSplitN(got[:0], tt.s, tt.sep, tt.n)
      +		if !reflect.DeepEqual(want, got) {
      +			t.Errorf("AppendSplitN(%q, %q, %d) = %q; want %q",
      +				tt.s, tt.sep, tt.n, got, want)
      +		}
      +	}
      +}
      +
      +func TestStringFromBytes(t *testing.T) {
      +	for _, s := range []string{"foo", "permanode", "file", "zzzz"} {
      +		got := StringFromBytes([]byte(s))
      +		if got != s {
      +			t.Errorf("StringFromBytes(%q) didn't round-trip; got %q instead", s, got)
      +		}
      +	}
      +}
      +
      +func TestHasPrefixFold(t *testing.T) {
      +	tests := []struct {
      +		s, prefix string
      +		result    bool
      +	}{
      +		{"camli", "CAML", true},
      +		{"CAMLI", "caml", true},
      +		{"cam", "Cam", true},
      +		{"camli", "car", false},
      +		{"caml", "camli", false},
      +		{"Hello, 世界 dasdsa", "HeLlO, 世界", true},
      +		{"Hello, 世界", "HeLlO, 世界-", false},
      +
      +		{"kelvin", "\u212A" + "elvin", true}, // "\u212A" is the Kelvin temperature sign
      +		{"Kelvin", "\u212A" + "elvin", true},
      +		{"kelvin", "\u212A" + "el", true},
      +		{"Kelvin", "\u212A" + "el", true},
      +		{"\u212A" + "elvin", "Kelvin", true},
      +		{"\u212A" + "elvin", "kelvin", true},
      +		{"\u212A" + "elvin", "Kel", true},
      +		{"\u212A" + "elvin", "kel", true},
      +	}
      +	for _, tt := range tests {
      +		r := HasPrefixFold(tt.s, tt.prefix)
      +		if r != tt.result {
      +			t.Errorf("HasPrefixFold(%q, %q) returned %v", tt.s, tt.prefix, r)
      +		}
      +	}
      +}
      +
      +func TestHasSuffixFold(t *testing.T) {
      +	tests := []struct {
      +		s, suffix string
      +		result    bool
      +	}{
      +		{"camli", "AMLI", true},
      +		{"CAMLI", "amli", true},
      +		{"mli", "MLI", true},
      +		{"camli", "ali", false},
      +		{"amli", "camli", false},
      +		{"asas Hello, 世界", "HeLlO, 世界", true},
      +		{"Hello, 世界", "HeLlO, 世界-", false},
      +		{"KkkkKKkelvin", "\u212A" + "elvin", true}, // "\u212A" is the Kelvin temperature sign
      +
      +		{"kelvin", "\u212A" + "elvin", true}, // "\u212A" is the Kelvin temperature sign
      +		{"Kelvin", "\u212A" + "elvin", true},
      +		{"\u212A" + "elvin", "Kelvin", true},
      +		{"\u212A" + "elvin", "kelvin", true},
      +		{"\u212A" + "elvin", "vin", true},
      +		{"\u212A" + "elvin", "viN", true},
      +	}
      +	for _, tt := range tests {
      +		r := HasSuffixFold(tt.s, tt.suffix)
      +		if r != tt.result {
      +			t.Errorf("HasSuffixFold(%q, %q) returned %v", tt.s, tt.suffix, r)
      +		}
      +	}
      +}
      +
      +func TestContainsFold(t *testing.T) {
      +	// TODO: more tests, more languages.
      +	tests := []struct {
      +		s, substr string
      +		result    bool
      +	}{
      +		{"camli", "CAML", true},
      +		{"CAMLI", "caml", true},
      +		{"cam", "Cam", true},
      +		{"мир", "ми", true},
      +		{"МИP", "ми", true},
      +		{"КАМЛИЙСТОР", "камлийс", true},
      +		{"КаМлИйСтОр", "КаМлИйС", true},
      +		{"camli", "car", false},
      +		{"caml", "camli", false},
      +
      +		{"camli", "AMLI", true},
      +		{"CAMLI", "amli", true},
      +		{"mli", "MLI", true},
      +		{"мир", "ир", true},
      +		{"МИP", "ми", true},
      +		{"КАМЛИЙСТОР", "лийстор", true},
      +		{"КаМлИйСтОр", "лИйСтОр", true},
      +		{"мир", "р", true},
      +		{"camli", "ali", false},
      +		{"amli", "camli", false},
      +
      +		{"МИP", "и", true},
      +		{"мир", "и", true},
      +		{"КАМЛИЙСТОР", "лийс", true},
      +		{"КаМлИйСтОр", "лИйС", true},
      +
      +		{"árvíztűrő tükörfúrógép", "árvíztŰrŐ", true},
      +		{"I love ☕", "i love ☕", true},
      +
      +		{"k", "\u212A", true}, // "\u212A" is the Kelvin temperature sign
      +		{"\u212A" + "elvin", "k", true},
      +		{"kelvin", "\u212A" + "elvin", true},
      +		{"Kelvin", "\u212A" + "elvin", true},
      +		{"\u212A" + "elvin", "Kelvin", true},
      +		{"\u212A" + "elvin", "kelvin", true},
      +		{"273.15 kelvin", "\u212A" + "elvin", true},
      +		{"273.15 Kelvin", "\u212A" + "elvin", true},
      +		{"273.15 \u212A" + "elvin", "Kelvin", true},
      +		{"273.15 \u212A" + "elvin", "kelvin", true},
      +	}
      +	for _, tt := range tests {
      +		r := ContainsFold(tt.s, tt.substr)
      +		if r != tt.result {
      +			t.Errorf("ContainsFold(%q, %q) returned %v", tt.s, tt.substr, r)
      +		}
      +	}
      +}
      +
      +func TestIsPlausibleJSON(t *testing.T) {
      +	tests := []struct {
      +		in   string
      +		want bool
      +	}{
      +		{"{}", true},
      +		{" {}", true},
      +		{"{} ", true},
      +		{"\n\r\t {}\t \r \n", true},
      +
      +		{"\n\r\t {x\t \r \n", false},
      +		{"{x", false},
      +		{"x}", false},
      +		{"x", false},
      +		{"", false},
      +	}
      +	for _, tt := range tests {
      +		got := IsPlausibleJSON(tt.in)
      +		if got != tt.want {
      +			t.Errorf("IsPlausibleJSON(%q) = %v; want %v", tt.in, got, tt.want)
      +		}
      +	}
      +}
      +
      +func BenchmarkHasSuffixFoldToLower(tb *testing.B) {
      +	a, b := "camlik", "AMLI\u212A"
      +	for i := 0; i < tb.N; i++ {
      +		if !strings.HasSuffix(strings.ToLower(a), strings.ToLower(b)) {
      +			tb.Fatalf("%q should have the same suffix as %q", a, b)
      +		}
      +	}
      +}
      +func BenchmarkHasSuffixFold(tb *testing.B) {
      +	a, b := "camlik", "AMLI\u212A"
      +	for i := 0; i < tb.N; i++ {
      +		if !HasSuffixFold(a, b) {
      +			tb.Fatalf("%q should have the same suffix as %q", a, b)
      +		}
      +	}
      +}
      +
      +func BenchmarkHasPrefixFoldToLower(tb *testing.B) {
      +	a, b := "kamlistore", "\u212AAMLI"
      +	for i := 0; i < tb.N; i++ {
      +		if !strings.HasPrefix(strings.ToLower(a), strings.ToLower(b)) {
      +			tb.Fatalf("%q should have the same suffix as %q", a, b)
      +		}
      +	}
      +}
      +func BenchmarkHasPrefixFold(tb *testing.B) {
      +	a, b := "kamlistore", "\u212AAMLI"
      +	for i := 0; i < tb.N; i++ {
      +		if !HasPrefixFold(a, b) {
      +			tb.Fatalf("%q should have the same suffix as %q", a, b)
      +		}
      +	}
      +}
      diff --git a/vendor/go4.org/syncutil/gate.go b/vendor/go4.org/syncutil/gate.go
      new file mode 100644
      index 00000000..497c7a5a
      --- /dev/null
      +++ b/vendor/go4.org/syncutil/gate.go
      @@ -0,0 +1,42 @@
      +/*
      +Copyright 2013 Google Inc.
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package syncutil provides various concurrency mechanisms.
      +package syncutil
      +
      +// A Gate limits concurrency.
      +type Gate struct {
      +	c chan struct{}
      +}
      +
      +// NewGate returns a new gate that will only permit max operations at once.
      +func NewGate(max int) *Gate {
      +	return &Gate{make(chan struct{}, max)}
      +}
      +
      +// Start starts an operation, blocking until the gate has room.
      +func (g *Gate) Start() {
      +	g.c <- struct{}{}
      +}
      +
      +// Done finishes an operation.
      +func (g *Gate) Done() {
      +	select {
      +	case <-g.c:
      +	default:
      +		panic("Done called more than Start")
      +	}
      +}
      diff --git a/vendor/go4.org/syncutil/group.go b/vendor/go4.org/syncutil/group.go
      new file mode 100644
      index 00000000..dacef4c4
      --- /dev/null
      +++ b/vendor/go4.org/syncutil/group.go
      @@ -0,0 +1,64 @@
      +/*
      +Copyright 2013 Google Inc.
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package syncutil
      +
      +import "sync"
      +
      +// A Group is like a sync.WaitGroup and coordinates doing
      +// multiple things at once. Its zero value is ready to use.
      +type Group struct {
      +	wg   sync.WaitGroup
      +	mu   sync.Mutex // guards errs
      +	errs []error
      +}
      +
      +// Go runs fn in its own goroutine, but does not wait for it to complete.
      +// Call Err or Errs to wait for all the goroutines to complete.
      +func (g *Group) Go(fn func() error) {
      +	g.wg.Add(1)
      +	go func() {
      +		defer g.wg.Done()
      +		err := fn()
      +		if err != nil {
      +			g.mu.Lock()
      +			defer g.mu.Unlock()
      +			g.errs = append(g.errs, err)
      +		}
      +	}()
      +}
      +
      +// Wait waits for all the previous calls to Go to complete.
      +func (g *Group) Wait() {
      +	g.wg.Wait()
      +}
      +
      +// Err waits for all previous calls to Go to complete and returns the
      +// first non-nil error, or nil.
      +func (g *Group) Err() error {
      +	g.wg.Wait()
      +	if len(g.errs) > 0 {
      +		return g.errs[0]
      +	}
      +	return nil
      +}
      +
      +// Errs waits for all previous calls to Go to complete and returns
      +// all non-nil errors.
      +func (g *Group) Errs() []error {
      +	g.wg.Wait()
      +	return g.errs
      +}
      diff --git a/vendor/go4.org/syncutil/once.go b/vendor/go4.org/syncutil/once.go
      new file mode 100644
      index 00000000..1123f092
      --- /dev/null
      +++ b/vendor/go4.org/syncutil/once.go
      @@ -0,0 +1,60 @@
      +/*
      +Copyright 2014 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package syncutil
      +
      +import (
      +	"sync"
      +	"sync/atomic"
      +)
      +
      +// A Once will perform a successful action exactly once.
      +//
      +// Unlike a sync.Once, this Once's func returns an error
      +// and is re-armed on failure.
      +type Once struct {
      +	m    sync.Mutex
      +	done uint32
      +}
      +
      +// Do calls the function f if and only if Do has not been invoked
      +// without error for this instance of Once.  In other words, given
      +// 	var once Once
      +// if once.Do(f) is called multiple times, only the first call will
      +// invoke f, even if f has a different value in each invocation unless
      +// f returns an error.  A new instance of Once is required for each
      +// function to execute.
      +//
      +// Do is intended for initialization that must be run exactly once.  Since f
      +// is niladic, it may be necessary to use a function literal to capture the
      +// arguments to a function to be invoked by Do:
      +// 	err := config.once.Do(func() error { return config.init(filename) })
      +func (o *Once) Do(f func() error) error {
      +	if atomic.LoadUint32(&o.done) == 1 {
      +		return nil
      +	}
      +	// Slow-path.
      +	o.m.Lock()
      +	defer o.m.Unlock()
      +	var err error
      +	if o.done == 0 {
      +		err = f()
      +		if err == nil {
      +			atomic.StoreUint32(&o.done, 1)
      +		}
      +	}
      +	return err
      +}
      diff --git a/vendor/go4.org/syncutil/once_test.go b/vendor/go4.org/syncutil/once_test.go
      new file mode 100644
      index 00000000..e321d509
      --- /dev/null
      +++ b/vendor/go4.org/syncutil/once_test.go
      @@ -0,0 +1,57 @@
      +package syncutil
      +
      +import (
      +	"errors"
      +	"testing"
      +)
      +
      +func TestOnce(t *testing.T) {
      +	timesRan := 0
      +	f := func() error {
      +		timesRan++
      +		return nil
      +	}
      +
      +	once := Once{}
      +	grp := Group{}
      +
      +	for i := 0; i < 10; i++ {
      +		grp.Go(func() error { return once.Do(f) })
      +	}
      +
      +	if grp.Err() != nil {
      +		t.Errorf("Expected no errors, got %v", grp.Err())
      +	}
      +
      +	if timesRan != 1 {
      +		t.Errorf("Expected to run one time, ran %d", timesRan)
      +	}
      +}
      +
      +// TestOnceErroring verifies we retry on every error, but stop after
      +// the first success.
      +func TestOnceErroring(t *testing.T) {
      +	timesRan := 0
      +	f := func() error {
      +		timesRan++
      +		if timesRan < 3 {
      +			return errors.New("retry")
      +		}
      +		return nil
      +	}
      +
      +	once := Once{}
      +	grp := Group{}
      +
      +	for i := 0; i < 10; i++ {
      +		grp.Go(func() error { return once.Do(f) })
      +	}
      +
      +	if len(grp.Errs()) != 2 {
      +		t.Errorf("Expected two errors, got %d", len(grp.Errs()))
      +	}
      +
      +	if timesRan != 3 {
      +		t.Errorf("Expected to run two times, ran %d", timesRan)
      +	}
      +}
      diff --git a/vendor/go4.org/syncutil/sem.go b/vendor/go4.org/syncutil/sem.go
      new file mode 100644
      index 00000000..092655ff
      --- /dev/null
      +++ b/vendor/go4.org/syncutil/sem.go
      @@ -0,0 +1,64 @@
      +package syncutil
      +
      +import (
      +	"fmt"
      +	"log"
      +	"sync"
      +)
      +
      +type debugT bool
      +
      +var debug = debugT(false)
      +
      +func (d debugT) Printf(format string, args ...interface{}) {
      +	if bool(d) {
      +		log.Printf(format, args...)
      +	}
      +}
      +
      +// Sem implements a semaphore that can have multiple units acquired/released
      +// at a time.
      +type Sem struct {
      +	c         *sync.Cond // Protects size
      +	max, free int64
      +}
      +
      +// NewSem creates a semaphore with max units available for acquisition.
      +func NewSem(max int64) *Sem {
      +	return &Sem{
      +		c:    sync.NewCond(new(sync.Mutex)),
      +		free: max,
      +		max:  max,
      +	}
      +}
      +
      +// Acquire will deduct n units from the semaphore.  If the deduction would
      +// result in the available units falling below zero, the call will block until
      +// another go routine returns units via a call to Release.  If more units are
      +// requested than the semaphore is configured to hold, error will be non-nil.
      +func (s *Sem) Acquire(n int64) error {
      +	if n > s.max {
      +		return fmt.Errorf("sem: attempt to acquire more units than semaphore size %d > %d", n, s.max)
      +	}
      +	s.c.L.Lock()
      +	defer s.c.L.Unlock()
      +	for {
      +		debug.Printf("Acquire check max %d free %d, n %d", s.max, s.free, n)
      +		if s.free >= n {
      +			s.free -= n
      +			return nil
      +		}
      +		debug.Printf("Acquire Wait max %d free %d, n %d", s.max, s.free, n)
      +		s.c.Wait()
      +	}
      +}
      +
      +// Release will return n units to the semaphore and notify any currently
      +// blocking Acquire calls.
      +func (s *Sem) Release(n int64) {
      +	s.c.L.Lock()
      +	defer s.c.L.Unlock()
      +	debug.Printf("Release max %d free %d, n %d", s.max, s.free, n)
      +	s.free += n
      +	s.c.Broadcast()
      +}
      diff --git a/vendor/go4.org/syncutil/sem_test.go b/vendor/go4.org/syncutil/sem_test.go
      new file mode 100644
      index 00000000..59380e76
      --- /dev/null
      +++ b/vendor/go4.org/syncutil/sem_test.go
      @@ -0,0 +1,33 @@
      +package syncutil_test
      +
      +import (
      +	"testing"
      +
      +	"go4.org/syncutil"
      +)
      +
      +func TestSem(t *testing.T) {
      +	s := syncutil.NewSem(5)
      +
      +	if err := s.Acquire(2); err != nil {
      +		t.Fatal(err)
      +	}
      +	if err := s.Acquire(2); err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	go func() {
      +		s.Release(2)
      +		s.Release(2)
      +	}()
      +	if err := s.Acquire(5); err != nil {
      +		t.Fatal(err)
      +	}
      +}
      +
      +func TestSemErr(t *testing.T) {
      +	s := syncutil.NewSem(5)
      +	if err := s.Acquire(6); err == nil {
      +		t.Fatal("Didn't get expected error for large acquire.")
      +	}
      +}
      diff --git a/vendor/go4.org/syncutil/singleflight/singleflight.go b/vendor/go4.org/syncutil/singleflight/singleflight.go
      new file mode 100644
      index 00000000..ee2e1b3e
      --- /dev/null
      +++ b/vendor/go4.org/syncutil/singleflight/singleflight.go
      @@ -0,0 +1,64 @@
      +/*
      +Copyright 2013 Google Inc.
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package singleflight provides a duplicate function call suppression
      +// mechanism.
      +package singleflight // import "go4.org/syncutil/singleflight"
      +
      +import "sync"
      +
      +// call is an in-flight or completed Do call
      +type call struct {
      +	wg  sync.WaitGroup
      +	val interface{}
      +	err error
      +}
      +
      +// Group represents a class of work and forms a namespace in which
      +// units of work can be executed with duplicate suppression.
      +type Group struct {
      +	mu sync.Mutex       // protects m
      +	m  map[string]*call // lazily initialized
      +}
      +
      +// Do executes and returns the results of the given function, making
      +// sure that only one execution is in-flight for a given key at a
      +// time. If a duplicate comes in, the duplicate caller waits for the
      +// original to complete and receives the same results.
      +func (g *Group) Do(key string, fn func() (interface{}, error)) (interface{}, error) {
      +	g.mu.Lock()
      +	if g.m == nil {
      +		g.m = make(map[string]*call)
      +	}
      +	if c, ok := g.m[key]; ok {
      +		g.mu.Unlock()
      +		c.wg.Wait()
      +		return c.val, c.err
      +	}
      +	c := new(call)
      +	c.wg.Add(1)
      +	g.m[key] = c
      +	g.mu.Unlock()
      +
      +	c.val, c.err = fn()
      +	c.wg.Done()
      +
      +	g.mu.Lock()
      +	delete(g.m, key)
      +	g.mu.Unlock()
      +
      +	return c.val, c.err
      +}
      diff --git a/vendor/go4.org/syncutil/singleflight/singleflight_test.go b/vendor/go4.org/syncutil/singleflight/singleflight_test.go
      new file mode 100644
      index 00000000..40edcf30
      --- /dev/null
      +++ b/vendor/go4.org/syncutil/singleflight/singleflight_test.go
      @@ -0,0 +1,85 @@
      +/*
      +Copyright 2013 Google Inc.
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package singleflight
      +
      +import (
      +	"errors"
      +	"fmt"
      +	"sync"
      +	"sync/atomic"
      +	"testing"
      +	"time"
      +)
      +
      +func TestDo(t *testing.T) {
      +	var g Group
      +	v, err := g.Do("key", func() (interface{}, error) {
      +		return "bar", nil
      +	})
      +	if got, want := fmt.Sprintf("%v (%T)", v, v), "bar (string)"; got != want {
      +		t.Errorf("Do = %v; want %v", got, want)
      +	}
      +	if err != nil {
      +		t.Errorf("Do error = %v", err)
      +	}
      +}
      +
      +func TestDoErr(t *testing.T) {
      +	var g Group
      +	someErr := errors.New("Some error")
      +	v, err := g.Do("key", func() (interface{}, error) {
      +		return nil, someErr
      +	})
      +	if err != someErr {
      +		t.Errorf("Do error = %v; want someErr %v", err, someErr)
      +	}
      +	if v != nil {
      +		t.Errorf("unexpected non-nil value %#v", v)
      +	}
      +}
      +
      +func TestDoDupSuppress(t *testing.T) {
      +	var g Group
      +	c := make(chan string)
      +	var calls int32
      +	fn := func() (interface{}, error) {
      +		atomic.AddInt32(&calls, 1)
      +		return <-c, nil
      +	}
      +
      +	const n = 10
      +	var wg sync.WaitGroup
      +	for i := 0; i < n; i++ {
      +		wg.Add(1)
      +		go func() {
      +			v, err := g.Do("key", fn)
      +			if err != nil {
      +				t.Errorf("Do error: %v", err)
      +			}
      +			if v.(string) != "bar" {
      +				t.Errorf("got %q; want %q", v, "bar")
      +			}
      +			wg.Done()
      +		}()
      +	}
      +	time.Sleep(100 * time.Millisecond) // let goroutines above block
      +	c <- "bar"
      +	wg.Wait()
      +	if got := atomic.LoadInt32(&calls); got != 1 {
      +		t.Errorf("number of calls = %d; want 1", got)
      +	}
      +}
      diff --git a/vendor/go4.org/syncutil/syncdebug/syncdebug.go b/vendor/go4.org/syncutil/syncdebug/syncdebug.go
      new file mode 100644
      index 00000000..da664517
      --- /dev/null
      +++ b/vendor/go4.org/syncutil/syncdebug/syncdebug.go
      @@ -0,0 +1,198 @@
      +/*
      +Copyright 2013 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package syncdebug contains facilities for debugging synchronization
      +// problems.
      +package syncdebug // import "go4.org/syncutil/syncdebug"
      +
      +import (
      +	"bytes"
      +	"fmt"
      +	"log"
      +	"runtime"
      +	"sync"
      +	"sync/atomic"
      +	"time"
      +
      +	"go4.org/strutil"
      +)
      +
      +// RWMutexTracker is a sync.RWMutex that tracks who owns the current
      +// exclusive lock. It's used for debugging deadlocks.
      +type RWMutexTracker struct {
      +	mu sync.RWMutex
      +
      +	// Atomic counters for number waiting and having read and write locks.
      +	nwaitr int32
      +	nwaitw int32
      +	nhaver int32
      +	nhavew int32 // should always be 0 or 1
      +
      +	logOnce sync.Once
      +
      +	hmu    sync.Mutex
      +	holder []byte
      +	holdr  map[int64]bool // goroutines holding read lock
      +}
      +
      +const stackBufSize = 16 << 20
      +
      +var stackBuf = make(chan []byte, 8)
      +
      +func getBuf() []byte {
      +	select {
      +	case b := <-stackBuf:
      +		return b[:stackBufSize]
      +	default:
      +		return make([]byte, stackBufSize)
      +	}
      +}
      +
      +func putBuf(b []byte) {
      +	select {
      +	case stackBuf <- b:
      +	default:
      +	}
      +}
      +
      +var goroutineSpace = []byte("goroutine ")
      +
      +// GoroutineID returns the current goroutine's ID.
      +// Use of this function is almost always a terrible idea.
      +// It is also very slow.
      +// GoroutineID is intended only for debugging.
      +// In particular, it is used by syncutil.
      +func GoroutineID() int64 {
      +	b := getBuf()
      +	defer putBuf(b)
      +	b = b[:runtime.Stack(b, false)]
      +	// Parse the 4707 out of "goroutine 4707 ["
      +	b = bytes.TrimPrefix(b, goroutineSpace)
      +	i := bytes.IndexByte(b, ' ')
      +	if i < 0 {
      +		panic(fmt.Sprintf("No space found in %q", b))
      +	}
      +	b = b[:i]
      +	n, err := strutil.ParseUintBytes(b, 10, 64)
      +	if err != nil {
      +		panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
      +	}
      +	return int64(n)
      +}
      +
      +func (m *RWMutexTracker) startLogger() {
      +	go func() {
      +		var buf bytes.Buffer
      +		for {
      +			time.Sleep(1 * time.Second)
      +			buf.Reset()
      +			m.hmu.Lock()
      +			for gid := range m.holdr {
      +				fmt.Fprintf(&buf, " [%d]", gid)
      +			}
      +			m.hmu.Unlock()
      +			log.Printf("Mutex %p: waitW %d haveW %d   waitR %d haveR %d %s",
      +				m,
      +				atomic.LoadInt32(&m.nwaitw),
      +				atomic.LoadInt32(&m.nhavew),
      +				atomic.LoadInt32(&m.nwaitr),
      +				atomic.LoadInt32(&m.nhaver), buf.Bytes())
      +		}
      +	}()
      +}
      +
      +func (m *RWMutexTracker) Lock() {
      +	m.logOnce.Do(m.startLogger)
      +	atomic.AddInt32(&m.nwaitw, 1)
      +	m.mu.Lock()
      +	atomic.AddInt32(&m.nwaitw, -1)
      +	atomic.AddInt32(&m.nhavew, 1)
      +
      +	m.hmu.Lock()
      +	defer m.hmu.Unlock()
      +	if len(m.holder) == 0 {
      +		m.holder = make([]byte, stackBufSize)
      +	}
      +	m.holder = m.holder[:runtime.Stack(m.holder[:stackBufSize], false)]
      +	log.Printf("Lock at %s", string(m.holder))
      +}
      +
      +func (m *RWMutexTracker) Unlock() {
      +	m.hmu.Lock()
      +	m.holder = nil
      +	m.hmu.Unlock()
      +
      +	atomic.AddInt32(&m.nhavew, -1)
      +	m.mu.Unlock()
      +}
      +
      +func (m *RWMutexTracker) RLock() {
      +	m.logOnce.Do(m.startLogger)
      +	atomic.AddInt32(&m.nwaitr, 1)
      +
      +	// Catch read-write-read lock. See if somebody (us? via
      +	// another goroutine?) already has a read lock, and then
      +	// somebody else is waiting to write, meaning our second read
      +	// will deadlock.
      +	if atomic.LoadInt32(&m.nhaver) > 0 && atomic.LoadInt32(&m.nwaitw) > 0 {
      +		buf := getBuf()
      +		buf = buf[:runtime.Stack(buf, false)]
      +		log.Printf("Potential R-W-R deadlock at: %s", buf)
      +		putBuf(buf)
      +	}
      +
      +	m.mu.RLock()
      +	atomic.AddInt32(&m.nwaitr, -1)
      +	atomic.AddInt32(&m.nhaver, 1)
      +
      +	gid := GoroutineID()
      +	m.hmu.Lock()
      +	defer m.hmu.Unlock()
      +	if m.holdr == nil {
      +		m.holdr = make(map[int64]bool)
      +	}
      +	if m.holdr[gid] {
      +		buf := getBuf()
      +		buf = buf[:runtime.Stack(buf, false)]
      +		log.Fatalf("Recursive call to RLock: %s", buf)
      +	}
      +	m.holdr[gid] = true
      +}
      +
      +func stack() []byte {
      +	buf := make([]byte, 1024)
      +	return buf[:runtime.Stack(buf, false)]
      +}
      +
      +func (m *RWMutexTracker) RUnlock() {
      +	atomic.AddInt32(&m.nhaver, -1)
      +
      +	gid := GoroutineID()
      +	m.hmu.Lock()
      +	delete(m.holdr, gid)
      +	m.hmu.Unlock()
      +
      +	m.mu.RUnlock()
      +}
      +
      +// Holder returns the stack trace of the current exclusive lock holder's stack
      +// when it acquired the lock (with Lock). It returns the empty string if the lock
      +// is not currently held.
      +func (m *RWMutexTracker) Holder() string {
      +	m.hmu.Lock()
      +	defer m.hmu.Unlock()
      +	return string(m.holder)
      +}
      diff --git a/vendor/go4.org/syncutil/syncdebug/syncdebug_test.go b/vendor/go4.org/syncutil/syncdebug/syncdebug_test.go
      new file mode 100644
      index 00000000..17fb5003
      --- /dev/null
      +++ b/vendor/go4.org/syncutil/syncdebug/syncdebug_test.go
      @@ -0,0 +1,30 @@
      +/*
      +Copyright 2013 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package syncdebug
      +
      +import "testing"
      +
      +func TestGoroutineID(t *testing.T) {
      +	c := make(chan int64, 2)
      +	c <- GoroutineID()
      +	go func() {
      +		c <- GoroutineID()
      +	}()
      +	if a, b := <-c, <-c; a == b {
      +		t.Errorf("both goroutine IDs were %d; expected different", a)
      +	}
      +}
      diff --git a/vendor/go4.org/syncutil/syncutil.go b/vendor/go4.org/syncutil/syncutil.go
      new file mode 100644
      index 00000000..c914c793
      --- /dev/null
      +++ b/vendor/go4.org/syncutil/syncutil.go
      @@ -0,0 +1,18 @@
      +/*
      +Copyright 2014 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package syncutil contains synchronization utilities.
      +package syncutil // import "go4.org/syncutil"
      diff --git a/vendor/go4.org/types/types.go b/vendor/go4.org/types/types.go
      new file mode 100644
      index 00000000..7e474c72
      --- /dev/null
      +++ b/vendor/go4.org/types/types.go
      @@ -0,0 +1,147 @@
      +/*
      +Copyright 2013 Google Inc.
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package types provides various common types.
      +package types // import "go4.org/types"
      +
      +import (
      +	"bytes"
      +	"encoding/json"
      +	"fmt"
      +	"io"
      +	"io/ioutil"
      +	"strings"
      +	"sync"
      +	"time"
      +)
      +
      +var null_b = []byte("null")
      +
      +// NopCloser is an io.Closer that does nothing.
      +var NopCloser io.Closer = CloseFunc(func() error { return nil })
      +
      +// EmptyBody is a ReadCloser that returns EOF on Read and does nothing
      +// on Close.
      +var EmptyBody io.ReadCloser = ioutil.NopCloser(strings.NewReader(""))
      +
      +// Time3339 is a time.Time which encodes to and from JSON
      +// as an RFC 3339 time in UTC.
      +type Time3339 time.Time
      +
      +var (
      +	_ json.Marshaler   = Time3339{}
      +	_ json.Unmarshaler = (*Time3339)(nil)
      +)
      +
      +func (t Time3339) String() string {
      +	return time.Time(t).UTC().Format(time.RFC3339Nano)
      +}
      +
      +func (t Time3339) MarshalJSON() ([]byte, error) {
      +	if t.Time().IsZero() {
      +		return null_b, nil
      +	}
      +	return json.Marshal(t.String())
      +}
      +
      +func (t *Time3339) UnmarshalJSON(b []byte) error {
      +	if bytes.Equal(b, null_b) {
      +		*t = Time3339{}
      +		return nil
      +	}
      +	if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
      +		return fmt.Errorf("types: failed to unmarshal non-string value %q as an RFC 3339 time", b)
      +	}
      +	s := string(b[1 : len(b)-1])
      +	if s == "" {
      +		*t = Time3339{}
      +		return nil
      +	}
      +	tm, err := time.Parse(time.RFC3339Nano, s)
      +	if err != nil {
      +		if strings.HasPrefix(s, "0000-00-00T00:00:00") {
      +			*t = Time3339{}
      +			return nil
      +		}
      +		return err
      +	}
      +	*t = Time3339(tm)
      +	return nil
      +}
      +
      +// ParseTime3339OrZero parses a string in RFC3339 format. If it's invalid,
      +// the zero time value is returned instead.
      +func ParseTime3339OrZero(v string) Time3339 {
      +	t, err := time.Parse(time.RFC3339Nano, v)
      +	if err != nil {
      +		return Time3339{}
      +	}
      +	return Time3339(t)
      +}
      +
      +func ParseTime3339OrNil(v string) *Time3339 {
      +	t, err := time.Parse(time.RFC3339Nano, v)
      +	if err != nil {
      +		return nil
      +	}
      +	tm := Time3339(t)
      +	return &tm
      +}
      +
      +// Time returns the time as a time.Time with slightly less stutter
      +// than a manual conversion.
      +func (t Time3339) Time() time.Time {
      +	return time.Time(t)
      +}
      +
      +// IsZero returns whether the time is Go zero or Unix zero.
      +func (t *Time3339) IsAnyZero() bool {
      +	return t == nil || time.Time(*t).IsZero() || time.Time(*t).Unix() == 0
      +}
      +
      +// ByTime sorts times.
      +type ByTime []time.Time
      +
      +func (s ByTime) Len() int           { return len(s) }
      +func (s ByTime) Less(i, j int) bool { return s[i].Before(s[j]) }
      +func (s ByTime) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
      +
      +// NewOnceCloser returns a Closer wrapping c which only calls Close on c
      +// once. Subsequent calls to Close return nil.
      +func NewOnceCloser(c io.Closer) io.Closer {
      +	return &onceCloser{c: c}
      +}
      +
      +type onceCloser struct {
      +	mu sync.Mutex
      +	c  io.Closer
      +}
      +
      +func (c *onceCloser) Close() error {
      +	c.mu.Lock()
      +	defer c.mu.Unlock()
      +	if c.c == nil {
      +		return nil
      +	}
      +	err := c.c.Close()
      +	c.c = nil
      +	return err
      +}
      +
      +// CloseFunc implements io.Closer with a function.
      +type CloseFunc func() error
      +
      +func (fn CloseFunc) Close() error { return fn() }
      diff --git a/vendor/go4.org/types/types_test.go b/vendor/go4.org/types/types_test.go
      new file mode 100644
      index 00000000..73571b8b
      --- /dev/null
      +++ b/vendor/go4.org/types/types_test.go
      @@ -0,0 +1,103 @@
      +/*
      +Copyright 2013 Google Inc.
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package types
      +
      +import (
      +	"encoding/json"
      +	"strings"
      +	"testing"
      +	"time"
      +)
      +
      +func TestTime3339(t *testing.T) {
      +	tm := time.Unix(123, 456)
      +	t3 := Time3339(tm)
      +	type O struct {
      +		SomeTime Time3339 `json:"someTime"`
      +	}
      +	o := &O{SomeTime: t3}
      +	got, err := json.Marshal(o)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	goodEnc := "{\"someTime\":\"1970-01-01T00:02:03.000000456Z\"}"
      +	if string(got) != goodEnc {
      +		t.Errorf("Encoding wrong.\n Got: %q\nWant: %q", got, goodEnc)
      +	}
      +	ogot := &O{}
      +	err = json.Unmarshal([]byte(goodEnc), ogot)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	if !tm.Equal(ogot.SomeTime.Time()) {
      +		t.Errorf("Unmarshal got time %v; want %v", ogot.SomeTime.Time(), tm)
      +	}
      +}
      +
      +func TestTime3339_Marshal(t *testing.T) {
      +	tests := []struct {
      +		in   time.Time
      +		want string
      +	}{
      +		{time.Time{}, "null"},
      +		{time.Unix(1, 0), `"1970-01-01T00:00:01Z"`},
      +	}
      +	for i, tt := range tests {
      +		got, err := Time3339(tt.in).MarshalJSON()
      +		if err != nil {
      +			t.Errorf("%d. marshal(%v) got error: %v", i, tt.in, err)
      +			continue
      +		}
      +		if string(got) != tt.want {
      +			t.Errorf("%d. marshal(%v) = %q; want %q", i, tt.in, got, tt.want)
      +		}
      +	}
      +}
      +
      +func TestTime3339_empty(t *testing.T) {
      +	tests := []struct {
      +		enc string
      +		z   bool
      +	}{
      +		{enc: "null", z: true},
      +		{enc: `""`, z: true},
      +		{enc: "0000-00-00T00:00:00Z", z: true},
      +		{enc: "0001-01-01T00:00:00Z", z: true},
      +		{enc: "1970-01-01T00:00:00Z", z: true},
      +		{enc: "2001-02-03T04:05:06Z", z: false},
      +		{enc: "2001-02-03T04:05:06+06:00", z: false},
      +		{enc: "2001-02-03T04:05:06-06:00", z: false},
      +		{enc: "2001-02-03T04:05:06.123456789Z", z: false},
      +		{enc: "2001-02-03T04:05:06.123456789+06:00", z: false},
      +		{enc: "2001-02-03T04:05:06.123456789-06:00", z: false},
      +	}
      +	for _, tt := range tests {
      +		var tm Time3339
      +		enc := tt.enc
      +		if strings.Contains(enc, "T") {
      +			enc = "\"" + enc + "\""
      +		}
      +		err := json.Unmarshal([]byte(enc), &tm)
      +		if err != nil {
      +			t.Errorf("unmarshal %q = %v", enc, err)
      +		}
      +		if tm.IsAnyZero() != tt.z {
      +			t.Errorf("unmarshal %q = %v (%d), %v; zero=%v; want %v", tt.enc, tm.Time(), tm.Time().Unix(), err,
      +				!tt.z, tt.z)
      +		}
      +	}
      +}
      diff --git a/vendor/go4.org/wkfs/gcs/gcs.go b/vendor/go4.org/wkfs/gcs/gcs.go
      new file mode 100644
      index 00000000..21f25511
      --- /dev/null
      +++ b/vendor/go4.org/wkfs/gcs/gcs.go
      @@ -0,0 +1,196 @@
      +/*
      +Copyright 2014 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package gcs registers a Google Cloud Storage filesystem at the
      +// well-known /gcs/ filesystem path if the current machine is running
      +// on Google Compute Engine.
      +//
      +// It was initially only meant for small files, and as such, it can only
      +// read files smaller than 1MB for now.
      +package gcs // import "go4.org/wkfs/gcs"
      +
      +import (
      +	"bytes"
      +	"fmt"
      +	"io"
      +	"io/ioutil"
      +	"os"
      +	"path"
      +	"strings"
      +	"time"
      +
      +	"go4.org/wkfs"
      +	"golang.org/x/net/context"
      +	"golang.org/x/oauth2"
      +	"golang.org/x/oauth2/google"
      +	"google.golang.org/cloud"
      +	"google.golang.org/cloud/compute/metadata"
      +	"google.golang.org/cloud/storage"
      +)
      +
      +// Max size for all files read, because we use a bytes.Reader as our file
      +// reader, instead of storage.NewReader. This is because we get all wkfs.File
      +// methods for free by embedding a bytes.Reader. This filesystem was only supposed
      +// to be for configuration data only, so this is ok for now.
      +const maxSize = 1 << 20
      +
      +func init() {
      +	if !metadata.OnGCE() {
      +		return
      +	}
      +	hc, err := google.DefaultClient(oauth2.NoContext)
      +	if err != nil {
      +		registerBrokenFS(fmt.Errorf("could not get http client for context: %v", err))
      +		return
      +	}
      +	projID, err := metadata.ProjectID()
      +	if projID == "" || err != nil {
      +		registerBrokenFS(fmt.Errorf("could not get GCE project ID: %v", err))
      +		return
      +	}
      +	ctx := cloud.NewContext(projID, hc)
      +	sc, err := storage.NewClient(ctx)
      +	if err != nil {
      +		registerBrokenFS(fmt.Errorf("could not get cloud storage client: %v", err))
      +		return
      +	}
      +	wkfs.RegisterFS("/gcs/", &gcsFS{
      +		ctx: ctx,
      +		sc:  sc,
      +	})
      +}
      +
      +type gcsFS struct {
      +	ctx context.Context
      +	sc  *storage.Client
      +	err error // sticky error
      +}
      +
      +func registerBrokenFS(err error) {
      +	wkfs.RegisterFS("/gcs/", &gcsFS{
      +		err: err,
      +	})
      +}
      +
      +func (fs *gcsFS) parseName(name string) (bucket, fileName string, err error) {
      +	if fs.err != nil {
      +		return "", "", fs.err
      +	}
      +	name = strings.TrimPrefix(name, "/gcs/")
      +	i := strings.Index(name, "/")
      +	if i < 0 {
      +		return name, "", nil
      +	}
      +	return name[:i], name[i+1:], nil
      +}
      +
      +// Open opens the named file for reading. It returns an error if the file size
      +// is larger than 1 << 20.
      +func (fs *gcsFS) Open(name string) (wkfs.File, error) {
      +	bucket, fileName, err := fs.parseName(name)
      +	if err != nil {
      +		return nil, err
      +	}
      +	obj := fs.sc.Bucket(bucket).Object(fileName)
      +	attrs, err := obj.Attrs(fs.ctx)
      +	if err != nil {
      +		return nil, err
      +	}
      +	size := attrs.Size
      +	if size > maxSize {
      +		return nil, fmt.Errorf("file %s too large (%d bytes) for /gcs/ filesystem", name, size)
      +	}
      +	rc, err := obj.NewReader(fs.ctx)
      +	if err != nil {
      +		return nil, err
      +	}
      +	defer rc.Close()
      +
      +	slurp, err := ioutil.ReadAll(io.LimitReader(rc, size))
      +	if err != nil {
      +		return nil, err
      +	}
      +	return &file{
      +		name:   name,
      +		Reader: bytes.NewReader(slurp),
      +	}, nil
      +}
      +
      +func (fs *gcsFS) Stat(name string) (os.FileInfo, error) { return fs.Lstat(name) }
      +func (fs *gcsFS) Lstat(name string) (os.FileInfo, error) {
      +	bucket, fileName, err := fs.parseName(name)
      +	if err != nil {
      +		return nil, err
      +	}
      +	attrs, err := fs.sc.Bucket(bucket).Object(fileName).Attrs(fs.ctx)
      +	if err == storage.ErrObjectNotExist {
      +		return nil, os.ErrNotExist
      +	}
      +	if err != nil {
      +		return nil, err
      +	}
      +	return &statInfo{
      +		name: attrs.Name,
      +		size: attrs.Size,
      +	}, nil
      +}
      +
      +func (fs *gcsFS) MkdirAll(path string, perm os.FileMode) error { return nil }
      +
      +func (fs *gcsFS) OpenFile(name string, flag int, perm os.FileMode) (wkfs.FileWriter, error) {
      +	bucket, fileName, err := fs.parseName(name)
      +	if err != nil {
      +		return nil, err
      +	}
      +	switch flag {
      +	case os.O_WRONLY | os.O_CREATE | os.O_EXCL:
      +	case os.O_WRONLY | os.O_CREATE | os.O_TRUNC:
      +	default:
      +		return nil, fmt.Errorf("Unsupported OpenFlag flag mode %d on Google Cloud Storage", flag)
      +	}
      +	if flag&os.O_EXCL != 0 {
      +		if _, err := fs.Stat(name); err == nil {
      +			return nil, os.ErrExist
      +		}
      +	}
      +	// TODO(mpl): consider adding perm to the object's ObjectAttrs.Metadata
      +	return fs.sc.Bucket(bucket).Object(fileName).NewWriter(fs.ctx), nil
      +}
      +
      +type statInfo struct {
      +	name    string
      +	size    int64
      +	isDir   bool
      +	modtime time.Time
      +}
      +
      +func (si *statInfo) IsDir() bool        { return si.isDir }
      +func (si *statInfo) ModTime() time.Time { return si.modtime }
      +func (si *statInfo) Mode() os.FileMode  { return 0644 }
      +func (si *statInfo) Name() string       { return path.Base(si.name) }
      +func (si *statInfo) Size() int64        { return si.size }
      +func (si *statInfo) Sys() interface{}   { return nil }
      +
      +type file struct {
      +	name string
      +	*bytes.Reader
      +}
      +
      +func (*file) Close() error   { return nil }
      +func (f *file) Name() string { return path.Base(f.name) }
      +func (f *file) Stat() (os.FileInfo, error) {
      +	panic("Stat not implemented on /gcs/ files yet")
      +}
      diff --git a/vendor/go4.org/wkfs/gcs/gcs_test.go b/vendor/go4.org/wkfs/gcs/gcs_test.go
      new file mode 100644
      index 00000000..600554f2
      --- /dev/null
      +++ b/vendor/go4.org/wkfs/gcs/gcs_test.go
      @@ -0,0 +1,84 @@
      +/*
      +Copyright 2015 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package gcs
      +
      +import (
      +	"bytes"
      +	"flag"
      +	"io"
      +	"strings"
      +	"testing"
      +
      +	"go4.org/wkfs"
      +	"golang.org/x/net/context"
      +	"google.golang.org/cloud/compute/metadata"
      +	"google.golang.org/cloud/storage"
      +)
      +
      +var flagBucket = flag.String("bucket", "", "Google Cloud Storage bucket where to run the tests. It should be empty.")
      +
      +func TestWriteRead(t *testing.T) {
      +	if !metadata.OnGCE() {
      +		t.Skipf("Not testing on GCE")
      +	}
      +	if *flagBucket == "" {
      +		t.Skipf("No bucket specified")
      +	}
      +	ctx := context.Background()
      +	cl, err := storage.NewClient(ctx)
      +	list, err := cl.Bucket(*flagBucket).List(ctx, nil)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	if len(list.Results) > 0 {
      +		t.Fatalf("Bucket %v is not empty, aborting test.", *flagBucket)
      +	}
      +	filename := "camli-gcs_test.txt"
      +	defer func() {
      +		if err := cl.Bucket(*flagBucket).Object(filename).Delete(ctx); err != nil {
      +			t.Fatalf("error while cleaning up: %v", err)
      +		}
      +	}()
      +
      +	// Write to camli-gcs_test.txt
      +	gcsPath := "/gcs/" + *flagBucket + "/" + filename
      +	f, err := wkfs.Create(gcsPath)
      +	if err != nil {
      +		t.Fatalf("error creating %v: %v", gcsPath, err)
      +	}
      +	data := "Hello World"
      +	if _, err := io.Copy(f, strings.NewReader(data)); err != nil {
      +		t.Fatalf("error writing to %v: %v", gcsPath, err)
      +	}
      +	if err := f.Close(); err != nil {
      +		t.Fatalf("error closing %v: %v", gcsPath, err)
      +	}
      +
      +	// Read back from camli-gcs_test.txt
      +	g, err := wkfs.Open(gcsPath)
      +	if err != nil {
      +		t.Fatalf("error opening %v: %v", gcsPath, err)
      +	}
      +	defer g.Close()
      +	var buf bytes.Buffer
      +	if _, err := io.Copy(&buf, g); err != nil {
      +		t.Fatalf("error reading %v: %v", gcsPath, err)
      +	}
      +	if buf.String() != data {
      +		t.Fatalf("error with %v contents: got %v, wanted %v", gcsPath, buf.String(), data)
      +	}
      +}
      diff --git a/vendor/go4.org/wkfs/wkfs.go b/vendor/go4.org/wkfs/wkfs.go
      new file mode 100644
      index 00000000..f4df062d
      --- /dev/null
      +++ b/vendor/go4.org/wkfs/wkfs.go
      @@ -0,0 +1,132 @@
      +/*
      +Copyright 2014 The Camlistore Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package wkfs implements the pluggable "well-known filesystem" abstraction layer.
      +//
      +// Instead of accessing files directly through the operating system
      +// using os.Open or os.Stat, code should use wkfs.Open or wkfs.Stat,
      +// which first try to intercept paths at well-known top-level
      +// directories representing previously-registered mount types,
      +// otherwise fall through to the operating system paths.
      +//
      +// Example of top-level well-known directories that might be
      +// registered include /gcs/bucket/object for Google Cloud Storage or
      +// /s3/bucket/object for AWS S3.
      +package wkfs // import "go4.org/wkfs"
      +
      +import (
      +	"io"
      +	"io/ioutil"
      +	"os"
      +	"strings"
      +)
      +
      +type File interface {
      +	io.Reader
      +	io.ReaderAt
      +	io.Closer
      +	io.Seeker
      +	Name() string
      +	Stat() (os.FileInfo, error)
      +}
      +
      +type FileWriter interface {
      +	io.Writer
      +	io.Closer
      +}
      +
      +func Open(name string) (File, error)               { return fs(name).Open(name) }
      +func Stat(name string) (os.FileInfo, error)        { return fs(name).Stat(name) }
      +func Lstat(name string) (os.FileInfo, error)       { return fs(name).Lstat(name) }
      +func MkdirAll(path string, perm os.FileMode) error { return fs(path).MkdirAll(path, perm) }
      +func OpenFile(name string, flag int, perm os.FileMode) (FileWriter, error) {
      +	return fs(name).OpenFile(name, flag, perm)
      +}
      +func Create(name string) (FileWriter, error) {
      +	// like os.Create but WRONLY instead of RDWR because we don't
      +	// expose a Reader here.
      +	return OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
      +}
      +
      +func fs(name string) FileSystem {
      +	for pfx, fs := range wkFS {
      +		if strings.HasPrefix(name, pfx) {
      +			return fs
      +		}
      +	}
      +	return osFS{}
      +}
      +
      +type osFS struct{}
      +
      +func (osFS) Open(name string) (File, error)               { return os.Open(name) }
      +func (osFS) Stat(name string) (os.FileInfo, error)        { return os.Stat(name) }
      +func (osFS) Lstat(name string) (os.FileInfo, error)       { return os.Lstat(name) }
      +func (osFS) MkdirAll(path string, perm os.FileMode) error { return os.MkdirAll(path, perm) }
      +func (osFS) OpenFile(name string, flag int, perm os.FileMode) (FileWriter, error) {
      +	return os.OpenFile(name, flag, perm)
      +}
      +
      +type FileSystem interface {
      +	Open(name string) (File, error)
      +	OpenFile(name string, flag int, perm os.FileMode) (FileWriter, error)
      +	Stat(name string) (os.FileInfo, error)
      +	Lstat(name string) (os.FileInfo, error)
      +	MkdirAll(path string, perm os.FileMode) error
      +}
      +
      +// well-known filesystems
      +var wkFS = map[string]FileSystem{}
      +
      +// RegisterFS registers a well-known filesystem. It intercepts
      +// anything beginning with prefix (which must start and end with a
      +// forward slash) and forwards it to fs.
      +func RegisterFS(prefix string, fs FileSystem) {
      +	if !strings.HasPrefix(prefix, "/") || !strings.HasSuffix(prefix, "/") {
      +		panic("bogus prefix: " + prefix)
      +	}
      +	if _, dup := wkFS[prefix]; dup {
      +		panic("duplication registration of " + prefix)
      +	}
      +	wkFS[prefix] = fs
      +}
      +
      +// WriteFile writes data to a file named by filename.
      +// If the file does not exist, WriteFile creates it with permissions perm;
      +// otherwise WriteFile truncates it before writing.
      +func WriteFile(filename string, data []byte, perm os.FileMode) error {
      +	f, err := OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
      +	if err != nil {
      +		return err
      +	}
      +	n, err := f.Write(data)
      +	if err == nil && n < len(data) {
      +		err = io.ErrShortWrite
      +	}
      +	if err1 := f.Close(); err == nil {
      +		err = err1
      +	}
      +	return err
      +}
      +
      +func ReadFile(filename string) ([]byte, error) {
      +	f, err := Open(filename)
      +	if err != nil {
      +		return nil, err
      +	}
      +	defer f.Close()
      +	return ioutil.ReadAll(f)
      +}
      diff --git a/vendor/go4.org/writerutil/writerutil.go b/vendor/go4.org/writerutil/writerutil.go
      new file mode 100644
      index 00000000..5c209cc6
      --- /dev/null
      +++ b/vendor/go4.org/writerutil/writerutil.go
      @@ -0,0 +1,105 @@
      +/*
      +Copyright 2016 The go4 Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +// Package writerutil contains io.Writer types.
      +package writerutil // import "go4.org/writerutil"
      +
      +import (
      +	"bytes"
      +	"strconv"
      +)
      +
      +// PrefixSuffixSaver is an io.Writer which retains the first N bytes
      +// and the last N bytes written to it. The Bytes method reconstructs
      +// it with a pretty error message.
      +// It is copied from os/exec/exec.go of the Go stdlib.
      +type PrefixSuffixSaver struct {
      +	N         int // max size of prefix or suffix
      +	prefix    []byte
      +	suffix    []byte // ring buffer once len(suffix) == N
      +	suffixOff int    // offset to write into suffix
      +	skipped   int64
      +
      +	// TODO(bradfitz): we could keep one large []byte and use part of it for
      +	// the prefix, reserve space for the '... Omitting N bytes ...' message,
      +	// then the ring buffer suffix, and just rearrange the ring buffer
      +	// suffix when Bytes() is called, but it doesn't seem worth it for
      +	// now just for error messages. It's only ~64KB anyway.
      +}
      +
      +func (w *PrefixSuffixSaver) Write(p []byte) (n int, err error) {
      +	lenp := len(p)
      +	p = w.fill(&w.prefix, p)
      +
      +	// Only keep the last w.N bytes of suffix data.
      +	if overage := len(p) - w.N; overage > 0 {
      +		p = p[overage:]
      +		w.skipped += int64(overage)
      +	}
      +	p = w.fill(&w.suffix, p)
      +
      +	// w.suffix is full now if p is non-empty. Overwrite it in a circle.
      +	for len(p) > 0 { // 0, 1, or 2 iterations.
      +		n := copy(w.suffix[w.suffixOff:], p)
      +		p = p[n:]
      +		w.skipped += int64(n)
      +		w.suffixOff += n
      +		if w.suffixOff == w.N {
      +			w.suffixOff = 0
      +		}
      +	}
      +	return lenp, nil
      +}
      +
      +// fill appends up to len(p) bytes of p to *dst, such that *dst does not
      +// grow larger than w.N. It returns the un-appended suffix of p.
      +func (w *PrefixSuffixSaver) fill(dst *[]byte, p []byte) (pRemain []byte) {
      +	if remain := w.N - len(*dst); remain > 0 {
      +		add := minInt(len(p), remain)
      +		*dst = append(*dst, p[:add]...)
      +		p = p[add:]
      +	}
      +	return p
      +}
      +
      +// Bytes returns a slice of the bytes, or a copy of the bytes, retained by w.
      +// If more bytes than could be retained were written to w, it returns a
      +// concatenation of the N first bytes, a message for how many bytes were dropped,
      +// and the N last bytes.
      +func (w *PrefixSuffixSaver) Bytes() []byte {
      +	if w.suffix == nil {
      +		return w.prefix
      +	}
      +	if w.skipped == 0 {
      +		return append(w.prefix, w.suffix...)
      +	}
      +	var buf bytes.Buffer
      +	buf.Grow(len(w.prefix) + len(w.suffix) + 50)
      +	buf.Write(w.prefix)
      +	buf.WriteString("\n... omitting ")
      +	buf.WriteString(strconv.FormatInt(w.skipped, 10))
      +	buf.WriteString(" bytes ...\n")
      +	buf.Write(w.suffix[w.suffixOff:])
      +	buf.Write(w.suffix[:w.suffixOff])
      +	return buf.Bytes()
      +}
      +
      +func minInt(a, b int) int {
      +	if a < b {
      +		return a
      +	}
      +	return b
      +}
      diff --git a/vendor/go4.org/writerutil/writerutil_test.go b/vendor/go4.org/writerutil/writerutil_test.go
      new file mode 100644
      index 00000000..fae647a3
      --- /dev/null
      +++ b/vendor/go4.org/writerutil/writerutil_test.go
      @@ -0,0 +1,73 @@
      +/*
      +Copyright 2016 The go4 Authors
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +     http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +*/
      +
      +package writerutil
      +
      +import (
      +	"io"
      +	"testing"
      +)
      +
      +func TestPrefixSuffixSaver(t *testing.T) {
      +	tests := []struct {
      +		N      int
      +		writes []string
      +		want   string
      +	}{
      +		{
      +			N:      2,
      +			writes: nil,
      +			want:   "",
      +		},
      +		{
      +			N:      2,
      +			writes: []string{"a"},
      +			want:   "a",
      +		},
      +		{
      +			N:      2,
      +			writes: []string{"abc", "d"},
      +			want:   "abcd",
      +		},
      +		{
      +			N:      2,
      +			writes: []string{"abc", "d", "e"},
      +			want:   "ab\n... omitting 1 bytes ...\nde",
      +		},
      +		{
      +			N:      2,
      +			writes: []string{"ab______________________yz"},
      +			want:   "ab\n... omitting 22 bytes ...\nyz",
      +		},
      +		{
      +			N:      2,
      +			writes: []string{"ab_______________________y", "z"},
      +			want:   "ab\n... omitting 23 bytes ...\nyz",
      +		},
      +	}
      +	for i, tt := range tests {
      +		w := &PrefixSuffixSaver{N: tt.N}
      +		for _, s := range tt.writes {
      +			n, err := io.WriteString(w, s)
      +			if err != nil || n != len(s) {
      +				t.Errorf("%d. WriteString(%q) = %v, %v; want %v, %v", i, s, n, err, len(s), nil)
      +			}
      +		}
      +		if got := string(w.Bytes()); got != tt.want {
      +			t.Errorf("%d. Bytes = %q; want %q", i, got, tt.want)
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/.gitattributes b/vendor/golang.org/x/crypto/.gitattributes
      new file mode 100644
      index 00000000..d2f212e5
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/.gitattributes
      @@ -0,0 +1,10 @@
      +# Treat all files in this repo as binary, with no git magic updating
      +# line endings. Windows users contributing to Go will need to use a
      +# modern version of git and editors capable of LF line endings.
      +#
      +# We'll prevent accidental CRLF line endings from entering the repo
      +# via the git-review gofmt checks.
      +#
      +# See golang.org/issue/9281
      +
      +* -text
      diff --git a/vendor/golang.org/x/crypto/.gitignore b/vendor/golang.org/x/crypto/.gitignore
      new file mode 100644
      index 00000000..8339fd61
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/.gitignore
      @@ -0,0 +1,2 @@
      +# Add no patterns to .hgignore except for files generated by the build.
      +last-change
      diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS
      new file mode 100644
      index 00000000..15167cd7
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/AUTHORS
      @@ -0,0 +1,3 @@
      +# This source code refers to The Go Authors for copyright purposes.
      +# The master list of authors is in the main Go distribution,
      +# visible at http://tip.golang.org/AUTHORS.
      diff --git a/vendor/golang.org/x/crypto/CONTRIBUTING.md b/vendor/golang.org/x/crypto/CONTRIBUTING.md
      new file mode 100644
      index 00000000..88dff59b
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/CONTRIBUTING.md
      @@ -0,0 +1,31 @@
      +# Contributing to Go
      +
      +Go is an open source project.
      +
      +It is the work of hundreds of contributors. We appreciate your help!
      +
      +
      +## Filing issues
      +
      +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions:
      +
      +1. What version of Go are you using (`go version`)?
      +2. What operating system and processor architecture are you using?
      +3. What did you do?
      +4. What did you expect to see?
      +5. What did you see instead?
      +
      +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
      +The gophers there will answer or ask you to file an issue if you've tripped over a bug.
      +
      +## Contributing code
      +
      +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
      +before sending patches.
      +
      +**We do not accept GitHub pull requests**
      +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
      +
      +Unless otherwise noted, the Go source files are distributed under
      +the BSD-style license found in the LICENSE file.
      +
      diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS
      new file mode 100644
      index 00000000..1c4577e9
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/CONTRIBUTORS
      @@ -0,0 +1,3 @@
      +# This source code was written by the Go contributors.
      +# The master list of contributors is in the main Go distribution,
      +# visible at http://tip.golang.org/CONTRIBUTORS.
      diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE
      new file mode 100644
      index 00000000..6a66aea5
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/LICENSE
      @@ -0,0 +1,27 @@
      +Copyright (c) 2009 The Go Authors. All rights reserved.
      +
      +Redistribution and use in source and binary forms, with or without
      +modification, are permitted provided that the following conditions are
      +met:
      +
      +   * Redistributions of source code must retain the above copyright
      +notice, this list of conditions and the following disclaimer.
      +   * Redistributions in binary form must reproduce the above
      +copyright notice, this list of conditions and the following disclaimer
      +in the documentation and/or other materials provided with the
      +distribution.
      +   * Neither the name of Google Inc. nor the names of its
      +contributors may be used to endorse or promote products derived from
      +this software without specific prior written permission.
      +
      +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS
      new file mode 100644
      index 00000000..73309904
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/PATENTS
      @@ -0,0 +1,22 @@
      +Additional IP Rights Grant (Patents)
      +
      +"This implementation" means the copyrightable works distributed by
      +Google as part of the Go project.
      +
      +Google hereby grants to You a perpetual, worldwide, non-exclusive,
      +no-charge, royalty-free, irrevocable (except as stated in this section)
      +patent license to make, have made, use, offer to sell, sell, import,
      +transfer and otherwise run, modify and propagate the contents of this
      +implementation of Go, where such license applies only to those patent
      +claims, both currently owned or controlled by Google and acquired in
      +the future, licensable by Google that are necessarily infringed by this
      +implementation of Go.  This grant does not include claims that would be
      +infringed only as a consequence of further modification of this
      +implementation.  If you or your agent or exclusive licensee institute or
      +order or agree to the institution of patent litigation against any
      +entity (including a cross-claim or counterclaim in a lawsuit) alleging
      +that this implementation of Go or any code incorporated within this
      +implementation of Go constitutes direct or contributory patent
      +infringement, or inducement of patent infringement, then any patent
      +rights granted to you under this License for this implementation of Go
      +shall terminate as of the date such litigation is filed.
      diff --git a/vendor/golang.org/x/crypto/README b/vendor/golang.org/x/crypto/README
      new file mode 100644
      index 00000000..f1e0cbf9
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/README
      @@ -0,0 +1,3 @@
      +This repository holds supplementary Go cryptography libraries.
      +
      +To submit changes to this repository, see http://golang.org/doc/contribute.html.
      diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go
      new file mode 100644
      index 00000000..fc311609
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/bcrypt/base64.go
      @@ -0,0 +1,35 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package bcrypt
      +
      +import "encoding/base64"
      +
      +const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
      +
      +var bcEncoding = base64.NewEncoding(alphabet)
      +
      +func base64Encode(src []byte) []byte {
      +	n := bcEncoding.EncodedLen(len(src))
      +	dst := make([]byte, n)
      +	bcEncoding.Encode(dst, src)
      +	for dst[n-1] == '=' {
      +		n--
      +	}
      +	return dst[:n]
      +}
      +
      +func base64Decode(src []byte) ([]byte, error) {
      +	numOfEquals := 4 - (len(src) % 4)
      +	for i := 0; i < numOfEquals; i++ {
      +		src = append(src, '=')
      +	}
      +
      +	dst := make([]byte, bcEncoding.DecodedLen(len(src)))
      +	n, err := bcEncoding.Decode(dst, src)
      +	if err != nil {
      +		return nil, err
      +	}
      +	return dst[:n], nil
      +}
      diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
      new file mode 100644
      index 00000000..f8b807f9
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
      @@ -0,0 +1,294 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
      +// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
      +package bcrypt // import "golang.org/x/crypto/bcrypt"
      +
      +// The code is a port of Provos and Mazières's C implementation.
      +import (
      +	"crypto/rand"
      +	"crypto/subtle"
      +	"errors"
      +	"fmt"
      +	"golang.org/x/crypto/blowfish"
      +	"io"
      +	"strconv"
      +)
      +
      +const (
      +	MinCost     int = 4  // the minimum allowable cost as passed in to GenerateFromPassword
      +	MaxCost     int = 31 // the maximum allowable cost as passed in to GenerateFromPassword
      +	DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword
      +)
      +
      +// The error returned from CompareHashAndPassword when a password and hash do
      +// not match.
      +var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password")
      +
      +// The error returned from CompareHashAndPassword when a hash is too short to
      +// be a bcrypt hash.
      +var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
      +
      +// The error returned from CompareHashAndPassword when a hash was created with
      +// a bcrypt algorithm newer than this implementation.
      +type HashVersionTooNewError byte
      +
      +func (hv HashVersionTooNewError) Error() string {
      +	return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion)
      +}
      +
      +// The error returned from CompareHashAndPassword when a hash starts with something other than '$'
      +type InvalidHashPrefixError byte
      +
      +func (ih InvalidHashPrefixError) Error() string {
      +	return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih))
      +}
      +
      +type InvalidCostError int
      +
      +func (ic InvalidCostError) Error() string {
      +	return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost))
      +}
      +
      +const (
      +	majorVersion       = '2'
      +	minorVersion       = 'a'
      +	maxSaltSize        = 16
      +	maxCryptedHashSize = 23
      +	encodedSaltSize    = 22
      +	encodedHashSize    = 31
      +	minHashSize        = 59
      +)
      +
      +// magicCipherData is an IV for the 64 Blowfish encryption calls in
      +// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes.
      +var magicCipherData = []byte{
      +	0x4f, 0x72, 0x70, 0x68,
      +	0x65, 0x61, 0x6e, 0x42,
      +	0x65, 0x68, 0x6f, 0x6c,
      +	0x64, 0x65, 0x72, 0x53,
      +	0x63, 0x72, 0x79, 0x44,
      +	0x6f, 0x75, 0x62, 0x74,
      +}
      +
      +type hashed struct {
      +	hash  []byte
      +	salt  []byte
      +	cost  int // allowed range is MinCost to MaxCost
      +	major byte
      +	minor byte
      +}
      +
      +// GenerateFromPassword returns the bcrypt hash of the password at the given
      +// cost. If the cost given is less than MinCost, the cost will be set to
      +// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package,
      +// to compare the returned hashed password with its cleartext version.
      +func GenerateFromPassword(password []byte, cost int) ([]byte, error) {
      +	p, err := newFromPassword(password, cost)
      +	if err != nil {
      +		return nil, err
      +	}
      +	return p.Hash(), nil
      +}
      +
      +// CompareHashAndPassword compares a bcrypt hashed password with its possible
      +// plaintext equivalent. Returns nil on success, or an error on failure.
      +func CompareHashAndPassword(hashedPassword, password []byte) error {
      +	p, err := newFromHash(hashedPassword)
      +	if err != nil {
      +		return err
      +	}
      +
      +	otherHash, err := bcrypt(password, p.cost, p.salt)
      +	if err != nil {
      +		return err
      +	}
      +
      +	otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}
      +	if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {
      +		return nil
      +	}
      +
      +	return ErrMismatchedHashAndPassword
      +}
      +
      +// Cost returns the hashing cost used to create the given hashed
      +// password. When, in the future, the hashing cost of a password system needs
      +// to be increased in order to adjust for greater computational power, this
      +// function allows one to establish which passwords need to be updated.
      +func Cost(hashedPassword []byte) (int, error) {
      +	p, err := newFromHash(hashedPassword)
      +	if err != nil {
      +		return 0, err
      +	}
      +	return p.cost, nil
      +}
      +
      +func newFromPassword(password []byte, cost int) (*hashed, error) {
      +	if cost < MinCost {
      +		cost = DefaultCost
      +	}
      +	p := new(hashed)
      +	p.major = majorVersion
      +	p.minor = minorVersion
      +
      +	err := checkCost(cost)
      +	if err != nil {
      +		return nil, err
      +	}
      +	p.cost = cost
      +
      +	unencodedSalt := make([]byte, maxSaltSize)
      +	_, err = io.ReadFull(rand.Reader, unencodedSalt)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	p.salt = base64Encode(unencodedSalt)
      +	hash, err := bcrypt(password, p.cost, p.salt)
      +	if err != nil {
      +		return nil, err
      +	}
      +	p.hash = hash
      +	return p, err
      +}
      +
      +func newFromHash(hashedSecret []byte) (*hashed, error) {
      +	if len(hashedSecret) < minHashSize {
      +		return nil, ErrHashTooShort
      +	}
      +	p := new(hashed)
      +	n, err := p.decodeVersion(hashedSecret)
      +	if err != nil {
      +		return nil, err
      +	}
      +	hashedSecret = hashedSecret[n:]
      +	n, err = p.decodeCost(hashedSecret)
      +	if err != nil {
      +		return nil, err
      +	}
      +	hashedSecret = hashedSecret[n:]
      +
      +	// The "+2" is here because we'll have to append at most 2 '=' to the salt
      +	// when base64 decoding it in expensiveBlowfishSetup().
      +	p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)
      +	copy(p.salt, hashedSecret[:encodedSaltSize])
      +
      +	hashedSecret = hashedSecret[encodedSaltSize:]
      +	p.hash = make([]byte, len(hashedSecret))
      +	copy(p.hash, hashedSecret)
      +
      +	return p, nil
      +}
      +
      +func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {
      +	cipherData := make([]byte, len(magicCipherData))
      +	copy(cipherData, magicCipherData)
      +
      +	c, err := expensiveBlowfishSetup(password, uint32(cost), salt)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	for i := 0; i < 24; i += 8 {
      +		for j := 0; j < 64; j++ {
      +			c.Encrypt(cipherData[i:i+8], cipherData[i:i+8])
      +		}
      +	}
      +
      +	// Bug compatibility with C bcrypt implementations. We only encode 23 of
      +	// the 24 bytes encrypted.
      +	hsh := base64Encode(cipherData[:maxCryptedHashSize])
      +	return hsh, nil
      +}
      +
      +func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
      +
      +	csalt, err := base64Decode(salt)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	// Bug compatibility with C bcrypt implementations. They use the trailing
      +	// NULL in the key string during expansion.
      +	ckey := append(key, 0)
      +
      +	c, err := blowfish.NewSaltedCipher(ckey, csalt)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	var i, rounds uint64
      +	rounds = 1 << cost
      +	for i = 0; i < rounds; i++ {
      +		blowfish.ExpandKey(ckey, c)
      +		blowfish.ExpandKey(csalt, c)
      +	}
      +
      +	return c, nil
      +}
      +
      +func (p *hashed) Hash() []byte {
      +	arr := make([]byte, 60)
      +	arr[0] = '$'
      +	arr[1] = p.major
      +	n := 2
      +	if p.minor != 0 {
      +		arr[2] = p.minor
      +		n = 3
      +	}
      +	arr[n] = '$'
      +	n += 1
      +	copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
      +	n += 2
      +	arr[n] = '$'
      +	n += 1
      +	copy(arr[n:], p.salt)
      +	n += encodedSaltSize
      +	copy(arr[n:], p.hash)
      +	n += encodedHashSize
      +	return arr[:n]
      +}
      +
      +func (p *hashed) decodeVersion(sbytes []byte) (int, error) {
      +	if sbytes[0] != '$' {
      +		return -1, InvalidHashPrefixError(sbytes[0])
      +	}
      +	if sbytes[1] > majorVersion {
      +		return -1, HashVersionTooNewError(sbytes[1])
      +	}
      +	p.major = sbytes[1]
      +	n := 3
      +	if sbytes[2] != '$' {
      +		p.minor = sbytes[2]
      +		n++
      +	}
      +	return n, nil
      +}
      +
      +// sbytes should begin where decodeVersion left off.
      +func (p *hashed) decodeCost(sbytes []byte) (int, error) {
      +	cost, err := strconv.Atoi(string(sbytes[0:2]))
      +	if err != nil {
      +		return -1, err
      +	}
      +	err = checkCost(cost)
      +	if err != nil {
      +		return -1, err
      +	}
      +	p.cost = cost
      +	return 3, nil
      +}
      +
      +func (p *hashed) String() string {
      +	return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor)
      +}
      +
      +func checkCost(cost int) error {
      +	if cost < MinCost || cost > MaxCost {
      +		return InvalidCostError(cost)
      +	}
      +	return nil
      +}
      diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt_test.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt_test.go
      new file mode 100644
      index 00000000..f08a6f5b
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt_test.go
      @@ -0,0 +1,226 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package bcrypt
      +
      +import (
      +	"bytes"
      +	"fmt"
      +	"testing"
      +)
      +
      +func TestBcryptingIsEasy(t *testing.T) {
      +	pass := []byte("mypassword")
      +	hp, err := GenerateFromPassword(pass, 0)
      +	if err != nil {
      +		t.Fatalf("GenerateFromPassword error: %s", err)
      +	}
      +
      +	if CompareHashAndPassword(hp, pass) != nil {
      +		t.Errorf("%v should hash %s correctly", hp, pass)
      +	}
      +
      +	notPass := "notthepass"
      +	err = CompareHashAndPassword(hp, []byte(notPass))
      +	if err != ErrMismatchedHashAndPassword {
      +		t.Errorf("%v and %s should be mismatched", hp, notPass)
      +	}
      +}
      +
      +func TestBcryptingIsCorrect(t *testing.T) {
      +	pass := []byte("allmine")
      +	salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
      +	expectedHash := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga")
      +
      +	hash, err := bcrypt(pass, 10, salt)
      +	if err != nil {
      +		t.Fatalf("bcrypt blew up: %v", err)
      +	}
      +	if !bytes.HasSuffix(expectedHash, hash) {
      +		t.Errorf("%v should be the suffix of %v", hash, expectedHash)
      +	}
      +
      +	h, err := newFromHash(expectedHash)
      +	if err != nil {
      +		t.Errorf("Unable to parse %s: %v", string(expectedHash), err)
      +	}
      +
      +	// This is not the safe way to compare these hashes. We do this only for
      +	// testing clarity. Use bcrypt.CompareHashAndPassword()
      +	if err == nil && !bytes.Equal(expectedHash, h.Hash()) {
      +		t.Errorf("Parsed hash %v should equal %v", h.Hash(), expectedHash)
      +	}
      +}
      +
      +func TestVeryShortPasswords(t *testing.T) {
      +	key := []byte("k")
      +	salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
      +	_, err := bcrypt(key, 10, salt)
      +	if err != nil {
      +		t.Errorf("One byte key resulted in error: %s", err)
      +	}
      +}
      +
      +func TestTooLongPasswordsWork(t *testing.T) {
      +	salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
      +	// One byte over the usual 56 byte limit that blowfish has
      +	tooLongPass := []byte("012345678901234567890123456789012345678901234567890123456")
      +	tooLongExpected := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C")
      +	hash, err := bcrypt(tooLongPass, 10, salt)
      +	if err != nil {
      +		t.Fatalf("bcrypt blew up on long password: %v", err)
      +	}
      +	if !bytes.HasSuffix(tooLongExpected, hash) {
      +		t.Errorf("%v should be the suffix of %v", hash, tooLongExpected)
      +	}
      +}
      +
      +type InvalidHashTest struct {
      +	err  error
      +	hash []byte
      +}
      +
      +var invalidTests = []InvalidHashTest{
      +	{ErrHashTooShort, []byte("$2a$10$fooo")},
      +	{ErrHashTooShort, []byte("$2a")},
      +	{HashVersionTooNewError('3'), []byte("$3a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
      +	{InvalidHashPrefixError('%'), []byte("%2a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
      +	{InvalidCostError(32), []byte("$2a$32$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
      +}
      +
      +func TestInvalidHashErrors(t *testing.T) {
      +	check := func(name string, expected, err error) {
      +		if err == nil {
      +			t.Errorf("%s: Should have returned an error", name)
      +		}
      +		if err != nil && err != expected {
      +			t.Errorf("%s gave err %v but should have given %v", name, err, expected)
      +		}
      +	}
      +	for _, iht := range invalidTests {
      +		_, err := newFromHash(iht.hash)
      +		check("newFromHash", iht.err, err)
      +		err = CompareHashAndPassword(iht.hash, []byte("anything"))
      +		check("CompareHashAndPassword", iht.err, err)
      +	}
      +}
      +
      +func TestUnpaddedBase64Encoding(t *testing.T) {
      +	original := []byte{101, 201, 101, 75, 19, 227, 199, 20, 239, 236, 133, 32, 30, 109, 243, 30}
      +	encodedOriginal := []byte("XajjQvNhvvRt5GSeFk1xFe")
      +
      +	encoded := base64Encode(original)
      +
      +	if !bytes.Equal(encodedOriginal, encoded) {
      +		t.Errorf("Encoded %v should have equaled %v", encoded, encodedOriginal)
      +	}
      +
      +	decoded, err := base64Decode(encodedOriginal)
      +	if err != nil {
      +		t.Fatalf("base64Decode blew up: %s", err)
      +	}
      +
      +	if !bytes.Equal(decoded, original) {
      +		t.Errorf("Decoded %v should have equaled %v", decoded, original)
      +	}
      +}
      +
      +func TestCost(t *testing.T) {
      +	suffix := "XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C"
      +	for _, vers := range []string{"2a", "2"} {
      +		for _, cost := range []int{4, 10} {
      +			s := fmt.Sprintf("$%s$%02d$%s", vers, cost, suffix)
      +			h := []byte(s)
      +			actual, err := Cost(h)
      +			if err != nil {
      +				t.Errorf("Cost, error: %s", err)
      +				continue
      +			}
      +			if actual != cost {
      +				t.Errorf("Cost, expected: %d, actual: %d", cost, actual)
      +			}
      +		}
      +	}
      +	_, err := Cost([]byte("$a$a$" + suffix))
      +	if err == nil {
      +		t.Errorf("Cost, malformed but no error returned")
      +	}
      +}
      +
      +func TestCostValidationInHash(t *testing.T) {
      +	if testing.Short() {
      +		return
      +	}
      +
      +	pass := []byte("mypassword")
      +
      +	for c := 0; c < MinCost; c++ {
      +		p, _ := newFromPassword(pass, c)
      +		if p.cost != DefaultCost {
      +			t.Errorf("newFromPassword should default costs below %d to %d, but was %d", MinCost, DefaultCost, p.cost)
      +		}
      +	}
      +
      +	p, _ := newFromPassword(pass, 14)
      +	if p.cost != 14 {
      +		t.Errorf("newFromPassword should default cost to 14, but was %d", p.cost)
      +	}
      +
      +	hp, _ := newFromHash(p.Hash())
      +	if p.cost != hp.cost {
      +		t.Errorf("newFromHash should maintain the cost at %d, but was %d", p.cost, hp.cost)
      +	}
      +
      +	_, err := newFromPassword(pass, 32)
      +	if err == nil {
      +		t.Fatalf("newFromPassword: should return a cost error")
      +	}
      +	if err != InvalidCostError(32) {
      +		t.Errorf("newFromPassword: should return cost error, got %#v", err)
      +	}
      +}
      +
      +func TestCostReturnsWithLeadingZeroes(t *testing.T) {
      +	hp, _ := newFromPassword([]byte("abcdefgh"), 7)
      +	cost := hp.Hash()[4:7]
      +	expected := []byte("07$")
      +
      +	if !bytes.Equal(expected, cost) {
      +		t.Errorf("single digit costs in hash should have leading zeros: was %v instead of %v", cost, expected)
      +	}
      +}
      +
      +func TestMinorNotRequired(t *testing.T) {
      +	noMinorHash := []byte("$2$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga")
      +	h, err := newFromHash(noMinorHash)
      +	if err != nil {
      +		t.Fatalf("No minor hash blew up: %s", err)
      +	}
      +	if h.minor != 0 {
      +		t.Errorf("Should leave minor version at 0, but was %d", h.minor)
      +	}
      +
      +	if !bytes.Equal(noMinorHash, h.Hash()) {
      +		t.Errorf("Should generate hash %v, but created %v", noMinorHash, h.Hash())
      +	}
      +}
      +
      +func BenchmarkEqual(b *testing.B) {
      +	b.StopTimer()
      +	passwd := []byte("somepasswordyoulike")
      +	hash, _ := GenerateFromPassword(passwd, 10)
      +	b.StartTimer()
      +	for i := 0; i < b.N; i++ {
      +		CompareHashAndPassword(hash, passwd)
      +	}
      +}
      +
      +func BenchmarkGeneration(b *testing.B) {
      +	b.StopTimer()
      +	passwd := []byte("mylongpassword1234")
      +	b.StartTimer()
      +	for i := 0; i < b.N; i++ {
      +		GenerateFromPassword(passwd, 10)
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go
      new file mode 100644
      index 00000000..9d80f195
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/blowfish/block.go
      @@ -0,0 +1,159 @@
      +// Copyright 2010 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package blowfish
      +
      +// getNextWord returns the next big-endian uint32 value from the byte slice
      +// at the given position in a circular manner, updating the position.
      +func getNextWord(b []byte, pos *int) uint32 {
      +	var w uint32
      +	j := *pos
      +	for i := 0; i < 4; i++ {
      +		w = w<<8 | uint32(b[j])
      +		j++
      +		if j >= len(b) {
      +			j = 0
      +		}
      +	}
      +	*pos = j
      +	return w
      +}
      +
      +// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
      +// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
      +// pi and substitution tables for calls to Encrypt. This is used, primarily,
      +// by the bcrypt package to reuse the Blowfish key schedule during its
      +// set up. It's unlikely that you need to use this directly.
      +func ExpandKey(key []byte, c *Cipher) {
      +	j := 0
      +	for i := 0; i < 18; i++ {
      +		// Using inlined getNextWord for performance.
      +		var d uint32
      +		for k := 0; k < 4; k++ {
      +			d = d<<8 | uint32(key[j])
      +			j++
      +			if j >= len(key) {
      +				j = 0
      +			}
      +		}
      +		c.p[i] ^= d
      +	}
      +
      +	var l, r uint32
      +	for i := 0; i < 18; i += 2 {
      +		l, r = encryptBlock(l, r, c)
      +		c.p[i], c.p[i+1] = l, r
      +	}
      +
      +	for i := 0; i < 256; i += 2 {
      +		l, r = encryptBlock(l, r, c)
      +		c.s0[i], c.s0[i+1] = l, r
      +	}
      +	for i := 0; i < 256; i += 2 {
      +		l, r = encryptBlock(l, r, c)
      +		c.s1[i], c.s1[i+1] = l, r
      +	}
      +	for i := 0; i < 256; i += 2 {
      +		l, r = encryptBlock(l, r, c)
      +		c.s2[i], c.s2[i+1] = l, r
      +	}
      +	for i := 0; i < 256; i += 2 {
      +		l, r = encryptBlock(l, r, c)
      +		c.s3[i], c.s3[i+1] = l, r
      +	}
      +}
      +
      +// This is similar to ExpandKey, but folds the salt during the key
      +// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
      +// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
      +// and specializing it here is useful.
      +func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
      +	j := 0
      +	for i := 0; i < 18; i++ {
      +		c.p[i] ^= getNextWord(key, &j)
      +	}
      +
      +	j = 0
      +	var l, r uint32
      +	for i := 0; i < 18; i += 2 {
      +		l ^= getNextWord(salt, &j)
      +		r ^= getNextWord(salt, &j)
      +		l, r = encryptBlock(l, r, c)
      +		c.p[i], c.p[i+1] = l, r
      +	}
      +
      +	for i := 0; i < 256; i += 2 {
      +		l ^= getNextWord(salt, &j)
      +		r ^= getNextWord(salt, &j)
      +		l, r = encryptBlock(l, r, c)
      +		c.s0[i], c.s0[i+1] = l, r
      +	}
      +
      +	for i := 0; i < 256; i += 2 {
      +		l ^= getNextWord(salt, &j)
      +		r ^= getNextWord(salt, &j)
      +		l, r = encryptBlock(l, r, c)
      +		c.s1[i], c.s1[i+1] = l, r
      +	}
      +
      +	for i := 0; i < 256; i += 2 {
      +		l ^= getNextWord(salt, &j)
      +		r ^= getNextWord(salt, &j)
      +		l, r = encryptBlock(l, r, c)
      +		c.s2[i], c.s2[i+1] = l, r
      +	}
      +
      +	for i := 0; i < 256; i += 2 {
      +		l ^= getNextWord(salt, &j)
      +		r ^= getNextWord(salt, &j)
      +		l, r = encryptBlock(l, r, c)
      +		c.s3[i], c.s3[i+1] = l, r
      +	}
      +}
      +
      +func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
      +	xl, xr := l, r
      +	xl ^= c.p[0]
      +	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
      +	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
      +	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
      +	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
      +	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
      +	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
      +	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
      +	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
      +	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
      +	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
      +	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
      +	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
      +	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
      +	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
      +	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
      +	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
      +	xr ^= c.p[17]
      +	return xr, xl
      +}
      +
      +func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
      +	xl, xr := l, r
      +	xl ^= c.p[17]
      +	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
      +	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
      +	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
      +	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
      +	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
      +	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
      +	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
      +	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
      +	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
      +	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
      +	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
      +	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
      +	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
      +	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
      +	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
      +	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
      +	xr ^= c.p[0]
      +	return xr, xl
      +}
      diff --git a/vendor/golang.org/x/crypto/blowfish/blowfish_test.go b/vendor/golang.org/x/crypto/blowfish/blowfish_test.go
      new file mode 100644
      index 00000000..7afa1fdf
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/blowfish/blowfish_test.go
      @@ -0,0 +1,274 @@
      +// Copyright 2010 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package blowfish
      +
      +import "testing"
      +
      +type CryptTest struct {
      +	key []byte
      +	in  []byte
      +	out []byte
      +}
      +
      +// Test vector values are from http://www.schneier.com/code/vectors.txt.
      +var encryptTests = []CryptTest{
      +	{
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}},
      +	{
      +		[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
      +		[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
      +		[]byte{0x51, 0x86, 0x6F, 0xD5, 0xB8, 0x5E, 0xCB, 0x8A}},
      +	{
      +		[]byte{0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
      +		[]byte{0x7D, 0x85, 0x6F, 0x9A, 0x61, 0x30, 0x63, 0xF2}},
      +	{
      +		[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
      +		[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
      +		[]byte{0x24, 0x66, 0xDD, 0x87, 0x8B, 0x96, 0x3C, 0x9D}},
      +
      +	{
      +		[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
      +		[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
      +		[]byte{0x61, 0xF9, 0xC3, 0x80, 0x22, 0x81, 0xB0, 0x96}},
      +	{
      +		[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
      +		[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
      +		[]byte{0x7D, 0x0C, 0xC6, 0x30, 0xAF, 0xDA, 0x1E, 0xC7}},
      +	{
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}},
      +	{
      +		[]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10},
      +		[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
      +		[]byte{0x0A, 0xCE, 0xAB, 0x0F, 0xC6, 0xA0, 0xA2, 0x8D}},
      +	{
      +		[]byte{0x7C, 0xA1, 0x10, 0x45, 0x4A, 0x1A, 0x6E, 0x57},
      +		[]byte{0x01, 0xA1, 0xD6, 0xD0, 0x39, 0x77, 0x67, 0x42},
      +		[]byte{0x59, 0xC6, 0x82, 0x45, 0xEB, 0x05, 0x28, 0x2B}},
      +	{
      +		[]byte{0x01, 0x31, 0xD9, 0x61, 0x9D, 0xC1, 0x37, 0x6E},
      +		[]byte{0x5C, 0xD5, 0x4C, 0xA8, 0x3D, 0xEF, 0x57, 0xDA},
      +		[]byte{0xB1, 0xB8, 0xCC, 0x0B, 0x25, 0x0F, 0x09, 0xA0}},
      +	{
      +		[]byte{0x07, 0xA1, 0x13, 0x3E, 0x4A, 0x0B, 0x26, 0x86},
      +		[]byte{0x02, 0x48, 0xD4, 0x38, 0x06, 0xF6, 0x71, 0x72},
      +		[]byte{0x17, 0x30, 0xE5, 0x77, 0x8B, 0xEA, 0x1D, 0xA4}},
      +	{
      +		[]byte{0x38, 0x49, 0x67, 0x4C, 0x26, 0x02, 0x31, 0x9E},
      +		[]byte{0x51, 0x45, 0x4B, 0x58, 0x2D, 0xDF, 0x44, 0x0A},
      +		[]byte{0xA2, 0x5E, 0x78, 0x56, 0xCF, 0x26, 0x51, 0xEB}},
      +	{
      +		[]byte{0x04, 0xB9, 0x15, 0xBA, 0x43, 0xFE, 0xB5, 0xB6},
      +		[]byte{0x42, 0xFD, 0x44, 0x30, 0x59, 0x57, 0x7F, 0xA2},
      +		[]byte{0x35, 0x38, 0x82, 0xB1, 0x09, 0xCE, 0x8F, 0x1A}},
      +	{
      +		[]byte{0x01, 0x13, 0xB9, 0x70, 0xFD, 0x34, 0xF2, 0xCE},
      +		[]byte{0x05, 0x9B, 0x5E, 0x08, 0x51, 0xCF, 0x14, 0x3A},
      +		[]byte{0x48, 0xF4, 0xD0, 0x88, 0x4C, 0x37, 0x99, 0x18}},
      +	{
      +		[]byte{0x01, 0x70, 0xF1, 0x75, 0x46, 0x8F, 0xB5, 0xE6},
      +		[]byte{0x07, 0x56, 0xD8, 0xE0, 0x77, 0x47, 0x61, 0xD2},
      +		[]byte{0x43, 0x21, 0x93, 0xB7, 0x89, 0x51, 0xFC, 0x98}},
      +	{
      +		[]byte{0x43, 0x29, 0x7F, 0xAD, 0x38, 0xE3, 0x73, 0xFE},
      +		[]byte{0x76, 0x25, 0x14, 0xB8, 0x29, 0xBF, 0x48, 0x6A},
      +		[]byte{0x13, 0xF0, 0x41, 0x54, 0xD6, 0x9D, 0x1A, 0xE5}},
      +	{
      +		[]byte{0x07, 0xA7, 0x13, 0x70, 0x45, 0xDA, 0x2A, 0x16},
      +		[]byte{0x3B, 0xDD, 0x11, 0x90, 0x49, 0x37, 0x28, 0x02},
      +		[]byte{0x2E, 0xED, 0xDA, 0x93, 0xFF, 0xD3, 0x9C, 0x79}},
      +	{
      +		[]byte{0x04, 0x68, 0x91, 0x04, 0xC2, 0xFD, 0x3B, 0x2F},
      +		[]byte{0x26, 0x95, 0x5F, 0x68, 0x35, 0xAF, 0x60, 0x9A},
      +		[]byte{0xD8, 0x87, 0xE0, 0x39, 0x3C, 0x2D, 0xA6, 0xE3}},
      +	{
      +		[]byte{0x37, 0xD0, 0x6B, 0xB5, 0x16, 0xCB, 0x75, 0x46},
      +		[]byte{0x16, 0x4D, 0x5E, 0x40, 0x4F, 0x27, 0x52, 0x32},
      +		[]byte{0x5F, 0x99, 0xD0, 0x4F, 0x5B, 0x16, 0x39, 0x69}},
      +	{
      +		[]byte{0x1F, 0x08, 0x26, 0x0D, 0x1A, 0xC2, 0x46, 0x5E},
      +		[]byte{0x6B, 0x05, 0x6E, 0x18, 0x75, 0x9F, 0x5C, 0xCA},
      +		[]byte{0x4A, 0x05, 0x7A, 0x3B, 0x24, 0xD3, 0x97, 0x7B}},
      +	{
      +		[]byte{0x58, 0x40, 0x23, 0x64, 0x1A, 0xBA, 0x61, 0x76},
      +		[]byte{0x00, 0x4B, 0xD6, 0xEF, 0x09, 0x17, 0x60, 0x62},
      +		[]byte{0x45, 0x20, 0x31, 0xC1, 0xE4, 0xFA, 0xDA, 0x8E}},
      +	{
      +		[]byte{0x02, 0x58, 0x16, 0x16, 0x46, 0x29, 0xB0, 0x07},
      +		[]byte{0x48, 0x0D, 0x39, 0x00, 0x6E, 0xE7, 0x62, 0xF2},
      +		[]byte{0x75, 0x55, 0xAE, 0x39, 0xF5, 0x9B, 0x87, 0xBD}},
      +	{
      +		[]byte{0x49, 0x79, 0x3E, 0xBC, 0x79, 0xB3, 0x25, 0x8F},
      +		[]byte{0x43, 0x75, 0x40, 0xC8, 0x69, 0x8F, 0x3C, 0xFA},
      +		[]byte{0x53, 0xC5, 0x5F, 0x9C, 0xB4, 0x9F, 0xC0, 0x19}},
      +	{
      +		[]byte{0x4F, 0xB0, 0x5E, 0x15, 0x15, 0xAB, 0x73, 0xA7},
      +		[]byte{0x07, 0x2D, 0x43, 0xA0, 0x77, 0x07, 0x52, 0x92},
      +		[]byte{0x7A, 0x8E, 0x7B, 0xFA, 0x93, 0x7E, 0x89, 0xA3}},
      +	{
      +		[]byte{0x49, 0xE9, 0x5D, 0x6D, 0x4C, 0xA2, 0x29, 0xBF},
      +		[]byte{0x02, 0xFE, 0x55, 0x77, 0x81, 0x17, 0xF1, 0x2A},
      +		[]byte{0xCF, 0x9C, 0x5D, 0x7A, 0x49, 0x86, 0xAD, 0xB5}},
      +	{
      +		[]byte{0x01, 0x83, 0x10, 0xDC, 0x40, 0x9B, 0x26, 0xD6},
      +		[]byte{0x1D, 0x9D, 0x5C, 0x50, 0x18, 0xF7, 0x28, 0xC2},
      +		[]byte{0xD1, 0xAB, 0xB2, 0x90, 0x65, 0x8B, 0xC7, 0x78}},
      +	{
      +		[]byte{0x1C, 0x58, 0x7F, 0x1C, 0x13, 0x92, 0x4F, 0xEF},
      +		[]byte{0x30, 0x55, 0x32, 0x28, 0x6D, 0x6F, 0x29, 0x5A},
      +		[]byte{0x55, 0xCB, 0x37, 0x74, 0xD1, 0x3E, 0xF2, 0x01}},
      +	{
      +		[]byte{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01},
      +		[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
      +		[]byte{0xFA, 0x34, 0xEC, 0x48, 0x47, 0xB2, 0x68, 0xB2}},
      +	{
      +		[]byte{0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E},
      +		[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
      +		[]byte{0xA7, 0x90, 0x79, 0x51, 0x08, 0xEA, 0x3C, 0xAE}},
      +	{
      +		[]byte{0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE},
      +		[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
      +		[]byte{0xC3, 0x9E, 0x07, 0x2D, 0x9F, 0xAC, 0x63, 0x1D}},
      +	{
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
      +		[]byte{0x01, 0x49, 0x33, 0xE0, 0xCD, 0xAF, 0xF6, 0xE4}},
      +	{
      +		[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0xF2, 0x1E, 0x9A, 0x77, 0xB7, 0x1C, 0x49, 0xBC}},
      +	{
      +		[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x24, 0x59, 0x46, 0x88, 0x57, 0x54, 0x36, 0x9A}},
      +	{
      +		[]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10},
      +		[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
      +		[]byte{0x6B, 0x5C, 0x5A, 0x9C, 0x5D, 0x9E, 0x0A, 0x5A}},
      +}
      +
      +func TestCipherEncrypt(t *testing.T) {
      +	for i, tt := range encryptTests {
      +		c, err := NewCipher(tt.key)
      +		if err != nil {
      +			t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err)
      +			continue
      +		}
      +		ct := make([]byte, len(tt.out))
      +		c.Encrypt(ct, tt.in)
      +		for j, v := range ct {
      +			if v != tt.out[j] {
      +				t.Errorf("Cipher.Encrypt, test vector #%d: cipher-text[%d] = %#x, expected %#x", i, j, v, tt.out[j])
      +				break
      +			}
      +		}
      +	}
      +}
      +
      +func TestCipherDecrypt(t *testing.T) {
      +	for i, tt := range encryptTests {
      +		c, err := NewCipher(tt.key)
      +		if err != nil {
      +			t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err)
      +			continue
      +		}
      +		pt := make([]byte, len(tt.in))
      +		c.Decrypt(pt, tt.out)
      +		for j, v := range pt {
      +			if v != tt.in[j] {
      +				t.Errorf("Cipher.Decrypt, test vector #%d: plain-text[%d] = %#x, expected %#x", i, j, v, tt.in[j])
      +				break
      +			}
      +		}
      +	}
      +}
      +
      +func TestSaltedCipherKeyLength(t *testing.T) {
      +	if _, err := NewSaltedCipher(nil, []byte{'a'}); err != KeySizeError(0) {
      +		t.Errorf("NewSaltedCipher with short key, gave error %#v, expected %#v", err, KeySizeError(0))
      +	}
      +
      +	// A 57-byte key. One over the typical blowfish restriction.
      +	key := []byte("012345678901234567890123456789012345678901234567890123456")
      +	if _, err := NewSaltedCipher(key, []byte{'a'}); err != nil {
      +		t.Errorf("NewSaltedCipher with long key, gave error %#v", err)
      +	}
      +}
      +
      +// Test vectors generated with Blowfish from OpenSSH.
      +var saltedVectors = [][8]byte{
      +	{0x0c, 0x82, 0x3b, 0x7b, 0x8d, 0x01, 0x4b, 0x7e},
      +	{0xd1, 0xe1, 0x93, 0xf0, 0x70, 0xa6, 0xdb, 0x12},
      +	{0xfc, 0x5e, 0xba, 0xde, 0xcb, 0xf8, 0x59, 0xad},
      +	{0x8a, 0x0c, 0x76, 0xe7, 0xdd, 0x2c, 0xd3, 0xa8},
      +	{0x2c, 0xcb, 0x7b, 0xee, 0xac, 0x7b, 0x7f, 0xf8},
      +	{0xbb, 0xf6, 0x30, 0x6f, 0xe1, 0x5d, 0x62, 0xbf},
      +	{0x97, 0x1e, 0xc1, 0x3d, 0x3d, 0xe0, 0x11, 0xe9},
      +	{0x06, 0xd7, 0x4d, 0xb1, 0x80, 0xa3, 0xb1, 0x38},
      +	{0x67, 0xa1, 0xa9, 0x75, 0x0e, 0x5b, 0xc6, 0xb4},
      +	{0x51, 0x0f, 0x33, 0x0e, 0x4f, 0x67, 0xd2, 0x0c},
      +	{0xf1, 0x73, 0x7e, 0xd8, 0x44, 0xea, 0xdb, 0xe5},
      +	{0x14, 0x0e, 0x16, 0xce, 0x7f, 0x4a, 0x9c, 0x7b},
      +	{0x4b, 0xfe, 0x43, 0xfd, 0xbf, 0x36, 0x04, 0x47},
      +	{0xb1, 0xeb, 0x3e, 0x15, 0x36, 0xa7, 0xbb, 0xe2},
      +	{0x6d, 0x0b, 0x41, 0xdd, 0x00, 0x98, 0x0b, 0x19},
      +	{0xd3, 0xce, 0x45, 0xce, 0x1d, 0x56, 0xb7, 0xfc},
      +	{0xd9, 0xf0, 0xfd, 0xda, 0xc0, 0x23, 0xb7, 0x93},
      +	{0x4c, 0x6f, 0xa1, 0xe4, 0x0c, 0xa8, 0xca, 0x57},
      +	{0xe6, 0x2f, 0x28, 0xa7, 0x0c, 0x94, 0x0d, 0x08},
      +	{0x8f, 0xe3, 0xf0, 0xb6, 0x29, 0xe3, 0x44, 0x03},
      +	{0xff, 0x98, 0xdd, 0x04, 0x45, 0xb4, 0x6d, 0x1f},
      +	{0x9e, 0x45, 0x4d, 0x18, 0x40, 0x53, 0xdb, 0xef},
      +	{0xb7, 0x3b, 0xef, 0x29, 0xbe, 0xa8, 0x13, 0x71},
      +	{0x02, 0x54, 0x55, 0x41, 0x8e, 0x04, 0xfc, 0xad},
      +	{0x6a, 0x0a, 0xee, 0x7c, 0x10, 0xd9, 0x19, 0xfe},
      +	{0x0a, 0x22, 0xd9, 0x41, 0xcc, 0x23, 0x87, 0x13},
      +	{0x6e, 0xff, 0x1f, 0xff, 0x36, 0x17, 0x9c, 0xbe},
      +	{0x79, 0xad, 0xb7, 0x40, 0xf4, 0x9f, 0x51, 0xa6},
      +	{0x97, 0x81, 0x99, 0xa4, 0xde, 0x9e, 0x9f, 0xb6},
      +	{0x12, 0x19, 0x7a, 0x28, 0xd0, 0xdc, 0xcc, 0x92},
      +	{0x81, 0xda, 0x60, 0x1e, 0x0e, 0xdd, 0x65, 0x56},
      +	{0x7d, 0x76, 0x20, 0xb2, 0x73, 0xc9, 0x9e, 0xee},
      +}
      +
      +func TestSaltedCipher(t *testing.T) {
      +	var key, salt [32]byte
      +	for i := range key {
      +		key[i] = byte(i)
      +		salt[i] = byte(i + 32)
      +	}
      +	for i, v := range saltedVectors {
      +		c, err := NewSaltedCipher(key[:], salt[:i])
      +		if err != nil {
      +			t.Fatal(err)
      +		}
      +		var buf [8]byte
      +		c.Encrypt(buf[:], buf[:])
      +		if v != buf {
      +			t.Errorf("%d: expected %x, got %x", i, v, buf)
      +		}
      +	}
      +}
      +
      +func BenchmarkExpandKeyWithSalt(b *testing.B) {
      +	key := make([]byte, 32)
      +	salt := make([]byte, 16)
      +	c, _ := NewCipher(key)
      +	for i := 0; i < b.N; i++ {
      +		expandKeyWithSalt(key, salt, c)
      +	}
      +}
      +
      +func BenchmarkExpandKey(b *testing.B) {
      +	key := make([]byte, 32)
      +	c, _ := NewCipher(key)
      +	for i := 0; i < b.N; i++ {
      +		ExpandKey(key, c)
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go
      new file mode 100644
      index 00000000..542984aa
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go
      @@ -0,0 +1,91 @@
      +// Copyright 2010 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
      +package blowfish // import "golang.org/x/crypto/blowfish"
      +
      +// The code is a port of Bruce Schneier's C implementation.
      +// See http://www.schneier.com/blowfish.html.
      +
      +import "strconv"
      +
      +// The Blowfish block size in bytes.
      +const BlockSize = 8
      +
      +// A Cipher is an instance of Blowfish encryption using a particular key.
      +type Cipher struct {
      +	p              [18]uint32
      +	s0, s1, s2, s3 [256]uint32
      +}
      +
      +type KeySizeError int
      +
      +func (k KeySizeError) Error() string {
      +	return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
      +}
      +
      +// NewCipher creates and returns a Cipher.
      +// The key argument should be the Blowfish key, from 1 to 56 bytes.
      +func NewCipher(key []byte) (*Cipher, error) {
      +	var result Cipher
      +	if k := len(key); k < 1 || k > 56 {
      +		return nil, KeySizeError(k)
      +	}
      +	initCipher(&result)
      +	ExpandKey(key, &result)
      +	return &result, nil
      +}
      +
      +// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
      +// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
      +// sufficient and desirable. For bcrypt compatiblity, the key can be over 56
      +// bytes.
      +func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
      +	if len(salt) == 0 {
      +		return NewCipher(key)
      +	}
      +	var result Cipher
      +	if k := len(key); k < 1 {
      +		return nil, KeySizeError(k)
      +	}
      +	initCipher(&result)
      +	expandKeyWithSalt(key, salt, &result)
      +	return &result, nil
      +}
      +
      +// BlockSize returns the Blowfish block size, 8 bytes.
      +// It is necessary to satisfy the Block interface in the
      +// package "crypto/cipher".
      +func (c *Cipher) BlockSize() int { return BlockSize }
      +
      +// Encrypt encrypts the 8-byte buffer src using the key k
      +// and stores the result in dst.
      +// Note that for amounts of data larger than a block,
      +// it is not safe to just call Encrypt on successive blocks;
      +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
      +func (c *Cipher) Encrypt(dst, src []byte) {
      +	l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
      +	r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
      +	l, r = encryptBlock(l, r, c)
      +	dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
      +	dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
      +}
      +
      +// Decrypt decrypts the 8-byte buffer src using the key k
      +// and stores the result in dst.
      +func (c *Cipher) Decrypt(dst, src []byte) {
      +	l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
      +	r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
      +	l, r = decryptBlock(l, r, c)
      +	dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
      +	dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
      +}
      +
      +func initCipher(c *Cipher) {
      +	copy(c.p[0:], p[0:])
      +	copy(c.s0[0:], s0[0:])
      +	copy(c.s1[0:], s1[0:])
      +	copy(c.s2[0:], s2[0:])
      +	copy(c.s3[0:], s3[0:])
      +}
      diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go
      new file mode 100644
      index 00000000..8c5ee4cb
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/blowfish/const.go
      @@ -0,0 +1,199 @@
      +// Copyright 2010 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// The startup permutation array and substitution boxes.
      +// They are the hexadecimal digits of PI; see:
      +// http://www.schneier.com/code/constants.txt.
      +
      +package blowfish
      +
      +var s0 = [256]uint32{
      +	0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96,
      +	0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
      +	0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658,
      +	0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
      +	0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e,
      +	0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
      +	0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6,
      +	0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
      +	0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c,
      +	0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
      +	0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1,
      +	0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
      +	0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a,
      +	0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
      +	0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176,
      +	0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
      +	0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706,
      +	0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
      +	0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b,
      +	0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
      +	0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c,
      +	0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
      +	0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a,
      +	0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
      +	0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760,
      +	0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
      +	0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8,
      +	0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
      +	0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33,
      +	0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
      +	0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0,
      +	0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
      +	0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777,
      +	0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
      +	0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705,
      +	0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
      +	0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e,
      +	0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
      +	0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9,
      +	0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
      +	0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f,
      +	0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
      +	0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
      +}
      +
      +var s1 = [256]uint32{
      +	0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d,
      +	0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
      +	0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65,
      +	0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
      +	0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9,
      +	0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
      +	0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d,
      +	0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
      +	0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc,
      +	0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
      +	0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908,
      +	0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
      +	0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124,
      +	0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
      +	0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908,
      +	0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
      +	0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b,
      +	0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
      +	0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa,
      +	0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
      +	0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d,
      +	0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
      +	0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5,
      +	0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
      +	0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96,
      +	0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
      +	0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca,
      +	0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
      +	0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77,
      +	0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
      +	0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054,
      +	0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
      +	0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea,
      +	0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
      +	0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646,
      +	0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
      +	0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea,
      +	0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
      +	0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e,
      +	0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
      +	0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd,
      +	0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
      +	0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
      +}
      +
      +var s2 = [256]uint32{
      +	0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7,
      +	0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
      +	0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af,
      +	0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
      +	0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4,
      +	0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
      +	0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec,
      +	0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
      +	0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332,
      +	0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
      +	0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58,
      +	0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
      +	0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22,
      +	0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
      +	0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60,
      +	0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
      +	0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99,
      +	0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
      +	0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74,
      +	0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
      +	0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3,
      +	0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
      +	0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979,
      +	0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
      +	0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa,
      +	0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
      +	0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086,
      +	0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
      +	0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24,
      +	0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
      +	0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84,
      +	0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
      +	0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09,
      +	0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
      +	0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe,
      +	0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
      +	0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0,
      +	0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
      +	0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188,
      +	0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
      +	0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8,
      +	0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
      +	0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
      +}
      +
      +var s3 = [256]uint32{
      +	0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742,
      +	0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
      +	0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79,
      +	0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
      +	0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a,
      +	0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
      +	0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1,
      +	0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
      +	0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797,
      +	0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
      +	0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6,
      +	0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
      +	0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba,
      +	0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
      +	0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5,
      +	0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
      +	0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce,
      +	0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
      +	0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd,
      +	0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
      +	0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb,
      +	0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
      +	0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc,
      +	0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
      +	0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc,
      +	0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
      +	0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a,
      +	0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
      +	0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a,
      +	0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
      +	0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b,
      +	0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
      +	0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e,
      +	0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
      +	0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623,
      +	0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
      +	0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a,
      +	0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
      +	0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3,
      +	0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
      +	0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c,
      +	0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
      +	0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
      +}
      +
      +var p = [18]uint32{
      +	0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0,
      +	0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
      +	0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b,
      +}
      diff --git a/vendor/golang.org/x/crypto/bn256/bn256.go b/vendor/golang.org/x/crypto/bn256/bn256.go
      new file mode 100644
      index 00000000..014f8b35
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/bn256/bn256.go
      @@ -0,0 +1,404 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package bn256 implements a particular bilinear group at the 128-bit security level.
      +//
      +// Bilinear groups are the basis of many of the new cryptographic protocols
      +// that have been proposed over the past decade. They consist of a triplet of
      +// groups (G₁, G₂ and GT) such that there exists a function e(g₁ˣ,g₂ʸ)=gTˣʸ
      +// (where gₓ is a generator of the respective group). That function is called
      +// a pairing function.
      +//
      +// This package specifically implements the Optimal Ate pairing over a 256-bit
      +// Barreto-Naehrig curve as described in
      +// http://cryptojedi.org/papers/dclxvi-20100714.pdf. Its output is compatible
      +// with the implementation described in that paper.
      +package bn256 // import "golang.org/x/crypto/bn256"
      +
      +import (
      +	"crypto/rand"
      +	"io"
      +	"math/big"
      +)
      +
      +// BUG(agl): this implementation is not constant time.
      +// TODO(agl): keep GF(p²) elements in Mongomery form.
      +
      +// G1 is an abstract cyclic group. The zero value is suitable for use as the
      +// output of an operation, but cannot be used as an input.
      +type G1 struct {
      +	p *curvePoint
      +}
      +
      +// RandomG1 returns x and g₁ˣ where x is a random, non-zero number read from r.
      +func RandomG1(r io.Reader) (*big.Int, *G1, error) {
      +	var k *big.Int
      +	var err error
      +
      +	for {
      +		k, err = rand.Int(r, Order)
      +		if err != nil {
      +			return nil, nil, err
      +		}
      +		if k.Sign() > 0 {
      +			break
      +		}
      +	}
      +
      +	return k, new(G1).ScalarBaseMult(k), nil
      +}
      +
      +func (g *G1) String() string {
      +	return "bn256.G1" + g.p.String()
      +}
      +
      +// ScalarBaseMult sets e to g*k where g is the generator of the group and
      +// then returns e.
      +func (e *G1) ScalarBaseMult(k *big.Int) *G1 {
      +	if e.p == nil {
      +		e.p = newCurvePoint(nil)
      +	}
      +	e.p.Mul(curveGen, k, new(bnPool))
      +	return e
      +}
      +
      +// ScalarMult sets e to a*k and then returns e.
      +func (e *G1) ScalarMult(a *G1, k *big.Int) *G1 {
      +	if e.p == nil {
      +		e.p = newCurvePoint(nil)
      +	}
      +	e.p.Mul(a.p, k, new(bnPool))
      +	return e
      +}
      +
      +// Add sets e to a+b and then returns e.
      +// BUG(agl): this function is not complete: a==b fails.
      +func (e *G1) Add(a, b *G1) *G1 {
      +	if e.p == nil {
      +		e.p = newCurvePoint(nil)
      +	}
      +	e.p.Add(a.p, b.p, new(bnPool))
      +	return e
      +}
      +
      +// Neg sets e to -a and then returns e.
      +func (e *G1) Neg(a *G1) *G1 {
      +	if e.p == nil {
      +		e.p = newCurvePoint(nil)
      +	}
      +	e.p.Negative(a.p)
      +	return e
      +}
      +
      +// Marshal converts n to a byte slice.
      +func (n *G1) Marshal() []byte {
      +	n.p.MakeAffine(nil)
      +
      +	xBytes := new(big.Int).Mod(n.p.x, p).Bytes()
      +	yBytes := new(big.Int).Mod(n.p.y, p).Bytes()
      +
      +	// Each value is a 256-bit number.
      +	const numBytes = 256 / 8
      +
      +	ret := make([]byte, numBytes*2)
      +	copy(ret[1*numBytes-len(xBytes):], xBytes)
      +	copy(ret[2*numBytes-len(yBytes):], yBytes)
      +
      +	return ret
      +}
      +
      +// Unmarshal sets e to the result of converting the output of Marshal back into
      +// a group element and then returns e.
      +func (e *G1) Unmarshal(m []byte) (*G1, bool) {
      +	// Each value is a 256-bit number.
      +	const numBytes = 256 / 8
      +
      +	if len(m) != 2*numBytes {
      +		return nil, false
      +	}
      +
      +	if e.p == nil {
      +		e.p = newCurvePoint(nil)
      +	}
      +
      +	e.p.x.SetBytes(m[0*numBytes : 1*numBytes])
      +	e.p.y.SetBytes(m[1*numBytes : 2*numBytes])
      +
      +	if e.p.x.Sign() == 0 && e.p.y.Sign() == 0 {
      +		// This is the point at infinity.
      +		e.p.y.SetInt64(1)
      +		e.p.z.SetInt64(0)
      +		e.p.t.SetInt64(0)
      +	} else {
      +		e.p.z.SetInt64(1)
      +		e.p.t.SetInt64(1)
      +
      +		if !e.p.IsOnCurve() {
      +			return nil, false
      +		}
      +	}
      +
      +	return e, true
      +}
      +
      +// G2 is an abstract cyclic group. The zero value is suitable for use as the
      +// output of an operation, but cannot be used as an input.
      +type G2 struct {
      +	p *twistPoint
      +}
      +
      +// RandomG1 returns x and g₂ˣ where x is a random, non-zero number read from r.
      +func RandomG2(r io.Reader) (*big.Int, *G2, error) {
      +	var k *big.Int
      +	var err error
      +
      +	for {
      +		k, err = rand.Int(r, Order)
      +		if err != nil {
      +			return nil, nil, err
      +		}
      +		if k.Sign() > 0 {
      +			break
      +		}
      +	}
      +
      +	return k, new(G2).ScalarBaseMult(k), nil
      +}
      +
      +func (g *G2) String() string {
      +	return "bn256.G2" + g.p.String()
      +}
      +
      +// ScalarBaseMult sets e to g*k where g is the generator of the group and
      +// then returns out.
      +func (e *G2) ScalarBaseMult(k *big.Int) *G2 {
      +	if e.p == nil {
      +		e.p = newTwistPoint(nil)
      +	}
      +	e.p.Mul(twistGen, k, new(bnPool))
      +	return e
      +}
      +
      +// ScalarMult sets e to a*k and then returns e.
      +func (e *G2) ScalarMult(a *G2, k *big.Int) *G2 {
      +	if e.p == nil {
      +		e.p = newTwistPoint(nil)
      +	}
      +	e.p.Mul(a.p, k, new(bnPool))
      +	return e
      +}
      +
      +// Add sets e to a+b and then returns e.
      +// BUG(agl): this function is not complete: a==b fails.
      +func (e *G2) Add(a, b *G2) *G2 {
      +	if e.p == nil {
      +		e.p = newTwistPoint(nil)
      +	}
      +	e.p.Add(a.p, b.p, new(bnPool))
      +	return e
      +}
      +
      +// Marshal converts n into a byte slice.
      +func (n *G2) Marshal() []byte {
      +	n.p.MakeAffine(nil)
      +
      +	xxBytes := new(big.Int).Mod(n.p.x.x, p).Bytes()
      +	xyBytes := new(big.Int).Mod(n.p.x.y, p).Bytes()
      +	yxBytes := new(big.Int).Mod(n.p.y.x, p).Bytes()
      +	yyBytes := new(big.Int).Mod(n.p.y.y, p).Bytes()
      +
      +	// Each value is a 256-bit number.
      +	const numBytes = 256 / 8
      +
      +	ret := make([]byte, numBytes*4)
      +	copy(ret[1*numBytes-len(xxBytes):], xxBytes)
      +	copy(ret[2*numBytes-len(xyBytes):], xyBytes)
      +	copy(ret[3*numBytes-len(yxBytes):], yxBytes)
      +	copy(ret[4*numBytes-len(yyBytes):], yyBytes)
      +
      +	return ret
      +}
      +
      +// Unmarshal sets e to the result of converting the output of Marshal back into
      +// a group element and then returns e.
      +func (e *G2) Unmarshal(m []byte) (*G2, bool) {
      +	// Each value is a 256-bit number.
      +	const numBytes = 256 / 8
      +
      +	if len(m) != 4*numBytes {
      +		return nil, false
      +	}
      +
      +	if e.p == nil {
      +		e.p = newTwistPoint(nil)
      +	}
      +
      +	e.p.x.x.SetBytes(m[0*numBytes : 1*numBytes])
      +	e.p.x.y.SetBytes(m[1*numBytes : 2*numBytes])
      +	e.p.y.x.SetBytes(m[2*numBytes : 3*numBytes])
      +	e.p.y.y.SetBytes(m[3*numBytes : 4*numBytes])
      +
      +	if e.p.x.x.Sign() == 0 &&
      +		e.p.x.y.Sign() == 0 &&
      +		e.p.y.x.Sign() == 0 &&
      +		e.p.y.y.Sign() == 0 {
      +		// This is the point at infinity.
      +		e.p.y.SetOne()
      +		e.p.z.SetZero()
      +		e.p.t.SetZero()
      +	} else {
      +		e.p.z.SetOne()
      +		e.p.t.SetOne()
      +
      +		if !e.p.IsOnCurve() {
      +			return nil, false
      +		}
      +	}
      +
      +	return e, true
      +}
      +
      +// GT is an abstract cyclic group. The zero value is suitable for use as the
      +// output of an operation, but cannot be used as an input.
      +type GT struct {
      +	p *gfP12
      +}
      +
      +func (g *GT) String() string {
      +	return "bn256.GT" + g.p.String()
      +}
      +
      +// ScalarMult sets e to a*k and then returns e.
      +func (e *GT) ScalarMult(a *GT, k *big.Int) *GT {
      +	if e.p == nil {
      +		e.p = newGFp12(nil)
      +	}
      +	e.p.Exp(a.p, k, new(bnPool))
      +	return e
      +}
      +
      +// Add sets e to a+b and then returns e.
      +func (e *GT) Add(a, b *GT) *GT {
      +	if e.p == nil {
      +		e.p = newGFp12(nil)
      +	}
      +	e.p.Mul(a.p, b.p, new(bnPool))
      +	return e
      +}
      +
      +// Neg sets e to -a and then returns e.
      +func (e *GT) Neg(a *GT) *GT {
      +	if e.p == nil {
      +		e.p = newGFp12(nil)
      +	}
      +	e.p.Invert(a.p, new(bnPool))
      +	return e
      +}
      +
      +// Marshal converts n into a byte slice.
      +func (n *GT) Marshal() []byte {
      +	n.p.Minimal()
      +
      +	xxxBytes := n.p.x.x.x.Bytes()
      +	xxyBytes := n.p.x.x.y.Bytes()
      +	xyxBytes := n.p.x.y.x.Bytes()
      +	xyyBytes := n.p.x.y.y.Bytes()
      +	xzxBytes := n.p.x.z.x.Bytes()
      +	xzyBytes := n.p.x.z.y.Bytes()
      +	yxxBytes := n.p.y.x.x.Bytes()
      +	yxyBytes := n.p.y.x.y.Bytes()
      +	yyxBytes := n.p.y.y.x.Bytes()
      +	yyyBytes := n.p.y.y.y.Bytes()
      +	yzxBytes := n.p.y.z.x.Bytes()
      +	yzyBytes := n.p.y.z.y.Bytes()
      +
      +	// Each value is a 256-bit number.
      +	const numBytes = 256 / 8
      +
      +	ret := make([]byte, numBytes*12)
      +	copy(ret[1*numBytes-len(xxxBytes):], xxxBytes)
      +	copy(ret[2*numBytes-len(xxyBytes):], xxyBytes)
      +	copy(ret[3*numBytes-len(xyxBytes):], xyxBytes)
      +	copy(ret[4*numBytes-len(xyyBytes):], xyyBytes)
      +	copy(ret[5*numBytes-len(xzxBytes):], xzxBytes)
      +	copy(ret[6*numBytes-len(xzyBytes):], xzyBytes)
      +	copy(ret[7*numBytes-len(yxxBytes):], yxxBytes)
      +	copy(ret[8*numBytes-len(yxyBytes):], yxyBytes)
      +	copy(ret[9*numBytes-len(yyxBytes):], yyxBytes)
      +	copy(ret[10*numBytes-len(yyyBytes):], yyyBytes)
      +	copy(ret[11*numBytes-len(yzxBytes):], yzxBytes)
      +	copy(ret[12*numBytes-len(yzyBytes):], yzyBytes)
      +
      +	return ret
      +}
      +
      +// Unmarshal sets e to the result of converting the output of Marshal back into
      +// a group element and then returns e.
      +func (e *GT) Unmarshal(m []byte) (*GT, bool) {
      +	// Each value is a 256-bit number.
      +	const numBytes = 256 / 8
      +
      +	if len(m) != 12*numBytes {
      +		return nil, false
      +	}
      +
      +	if e.p == nil {
      +		e.p = newGFp12(nil)
      +	}
      +
      +	e.p.x.x.x.SetBytes(m[0*numBytes : 1*numBytes])
      +	e.p.x.x.y.SetBytes(m[1*numBytes : 2*numBytes])
      +	e.p.x.y.x.SetBytes(m[2*numBytes : 3*numBytes])
      +	e.p.x.y.y.SetBytes(m[3*numBytes : 4*numBytes])
      +	e.p.x.z.x.SetBytes(m[4*numBytes : 5*numBytes])
      +	e.p.x.z.y.SetBytes(m[5*numBytes : 6*numBytes])
      +	e.p.y.x.x.SetBytes(m[6*numBytes : 7*numBytes])
      +	e.p.y.x.y.SetBytes(m[7*numBytes : 8*numBytes])
      +	e.p.y.y.x.SetBytes(m[8*numBytes : 9*numBytes])
      +	e.p.y.y.y.SetBytes(m[9*numBytes : 10*numBytes])
      +	e.p.y.z.x.SetBytes(m[10*numBytes : 11*numBytes])
      +	e.p.y.z.y.SetBytes(m[11*numBytes : 12*numBytes])
      +
      +	return e, true
      +}
      +
      +// Pair calculates an Optimal Ate pairing.
      +func Pair(g1 *G1, g2 *G2) *GT {
      +	return &GT{optimalAte(g2.p, g1.p, new(bnPool))}
      +}
      +
      +// bnPool implements a tiny cache of *big.Int objects that's used to reduce the
      +// number of allocations made during processing.
      +type bnPool struct {
      +	bns   []*big.Int
      +	count int
      +}
      +
      +func (pool *bnPool) Get() *big.Int {
      +	if pool == nil {
      +		return new(big.Int)
      +	}
      +
      +	pool.count++
      +	l := len(pool.bns)
      +	if l == 0 {
      +		return new(big.Int)
      +	}
      +
      +	bn := pool.bns[l-1]
      +	pool.bns = pool.bns[:l-1]
      +	return bn
      +}
      +
      +func (pool *bnPool) Put(bn *big.Int) {
      +	if pool == nil {
      +		return
      +	}
      +	pool.bns = append(pool.bns, bn)
      +	pool.count--
      +}
      +
      +func (pool *bnPool) Count() int {
      +	return pool.count
      +}
      diff --git a/vendor/golang.org/x/crypto/bn256/bn256_test.go b/vendor/golang.org/x/crypto/bn256/bn256_test.go
      new file mode 100644
      index 00000000..1cec3884
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/bn256/bn256_test.go
      @@ -0,0 +1,304 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package bn256
      +
      +import (
      +	"bytes"
      +	"crypto/rand"
      +	"math/big"
      +	"testing"
      +)
      +
      +func TestGFp2Invert(t *testing.T) {
      +	pool := new(bnPool)
      +
      +	a := newGFp2(pool)
      +	a.x.SetString("23423492374", 10)
      +	a.y.SetString("12934872398472394827398470", 10)
      +
      +	inv := newGFp2(pool)
      +	inv.Invert(a, pool)
      +
      +	b := newGFp2(pool).Mul(inv, a, pool)
      +	if b.x.Int64() != 0 || b.y.Int64() != 1 {
      +		t.Fatalf("bad result for a^-1*a: %s %s", b.x, b.y)
      +	}
      +
      +	a.Put(pool)
      +	b.Put(pool)
      +	inv.Put(pool)
      +
      +	if c := pool.Count(); c > 0 {
      +		t.Errorf("Pool count non-zero: %d\n", c)
      +	}
      +}
      +
      +func isZero(n *big.Int) bool {
      +	return new(big.Int).Mod(n, p).Int64() == 0
      +}
      +
      +func isOne(n *big.Int) bool {
      +	return new(big.Int).Mod(n, p).Int64() == 1
      +}
      +
      +func TestGFp6Invert(t *testing.T) {
      +	pool := new(bnPool)
      +
      +	a := newGFp6(pool)
      +	a.x.x.SetString("239487238491", 10)
      +	a.x.y.SetString("2356249827341", 10)
      +	a.y.x.SetString("082659782", 10)
      +	a.y.y.SetString("182703523765", 10)
      +	a.z.x.SetString("978236549263", 10)
      +	a.z.y.SetString("64893242", 10)
      +
      +	inv := newGFp6(pool)
      +	inv.Invert(a, pool)
      +
      +	b := newGFp6(pool).Mul(inv, a, pool)
      +	if !isZero(b.x.x) ||
      +		!isZero(b.x.y) ||
      +		!isZero(b.y.x) ||
      +		!isZero(b.y.y) ||
      +		!isZero(b.z.x) ||
      +		!isOne(b.z.y) {
      +		t.Fatalf("bad result for a^-1*a: %s", b)
      +	}
      +
      +	a.Put(pool)
      +	b.Put(pool)
      +	inv.Put(pool)
      +
      +	if c := pool.Count(); c > 0 {
      +		t.Errorf("Pool count non-zero: %d\n", c)
      +	}
      +}
      +
      +func TestGFp12Invert(t *testing.T) {
      +	pool := new(bnPool)
      +
      +	a := newGFp12(pool)
      +	a.x.x.x.SetString("239846234862342323958623", 10)
      +	a.x.x.y.SetString("2359862352529835623", 10)
      +	a.x.y.x.SetString("928836523", 10)
      +	a.x.y.y.SetString("9856234", 10)
      +	a.x.z.x.SetString("235635286", 10)
      +	a.x.z.y.SetString("5628392833", 10)
      +	a.y.x.x.SetString("252936598265329856238956532167968", 10)
      +	a.y.x.y.SetString("23596239865236954178968", 10)
      +	a.y.y.x.SetString("95421692834", 10)
      +	a.y.y.y.SetString("236548", 10)
      +	a.y.z.x.SetString("924523", 10)
      +	a.y.z.y.SetString("12954623", 10)
      +
      +	inv := newGFp12(pool)
      +	inv.Invert(a, pool)
      +
      +	b := newGFp12(pool).Mul(inv, a, pool)
      +	if !isZero(b.x.x.x) ||
      +		!isZero(b.x.x.y) ||
      +		!isZero(b.x.y.x) ||
      +		!isZero(b.x.y.y) ||
      +		!isZero(b.x.z.x) ||
      +		!isZero(b.x.z.y) ||
      +		!isZero(b.y.x.x) ||
      +		!isZero(b.y.x.y) ||
      +		!isZero(b.y.y.x) ||
      +		!isZero(b.y.y.y) ||
      +		!isZero(b.y.z.x) ||
      +		!isOne(b.y.z.y) {
      +		t.Fatalf("bad result for a^-1*a: %s", b)
      +	}
      +
      +	a.Put(pool)
      +	b.Put(pool)
      +	inv.Put(pool)
      +
      +	if c := pool.Count(); c > 0 {
      +		t.Errorf("Pool count non-zero: %d\n", c)
      +	}
      +}
      +
      +func TestCurveImpl(t *testing.T) {
      +	pool := new(bnPool)
      +
      +	g := &curvePoint{
      +		pool.Get().SetInt64(1),
      +		pool.Get().SetInt64(-2),
      +		pool.Get().SetInt64(1),
      +		pool.Get().SetInt64(0),
      +	}
      +
      +	x := pool.Get().SetInt64(32498273234)
      +	X := newCurvePoint(pool).Mul(g, x, pool)
      +
      +	y := pool.Get().SetInt64(98732423523)
      +	Y := newCurvePoint(pool).Mul(g, y, pool)
      +
      +	s1 := newCurvePoint(pool).Mul(X, y, pool).MakeAffine(pool)
      +	s2 := newCurvePoint(pool).Mul(Y, x, pool).MakeAffine(pool)
      +
      +	if s1.x.Cmp(s2.x) != 0 ||
      +		s2.x.Cmp(s1.x) != 0 {
      +		t.Errorf("DH points don't match: (%s, %s) (%s, %s)", s1.x, s1.y, s2.x, s2.y)
      +	}
      +
      +	pool.Put(x)
      +	X.Put(pool)
      +	pool.Put(y)
      +	Y.Put(pool)
      +	s1.Put(pool)
      +	s2.Put(pool)
      +	g.Put(pool)
      +
      +	if c := pool.Count(); c > 0 {
      +		t.Errorf("Pool count non-zero: %d\n", c)
      +	}
      +}
      +
      +func TestOrderG1(t *testing.T) {
      +	g := new(G1).ScalarBaseMult(Order)
      +	if !g.p.IsInfinity() {
      +		t.Error("G1 has incorrect order")
      +	}
      +
      +	one := new(G1).ScalarBaseMult(new(big.Int).SetInt64(1))
      +	g.Add(g, one)
      +	g.p.MakeAffine(nil)
      +	if g.p.x.Cmp(one.p.x) != 0 || g.p.y.Cmp(one.p.y) != 0 {
      +		t.Errorf("1+0 != 1 in G1")
      +	}
      +}
      +
      +func TestOrderG2(t *testing.T) {
      +	g := new(G2).ScalarBaseMult(Order)
      +	if !g.p.IsInfinity() {
      +		t.Error("G2 has incorrect order")
      +	}
      +
      +	one := new(G2).ScalarBaseMult(new(big.Int).SetInt64(1))
      +	g.Add(g, one)
      +	g.p.MakeAffine(nil)
      +	if g.p.x.x.Cmp(one.p.x.x) != 0 ||
      +		g.p.x.y.Cmp(one.p.x.y) != 0 ||
      +		g.p.y.x.Cmp(one.p.y.x) != 0 ||
      +		g.p.y.y.Cmp(one.p.y.y) != 0 {
      +		t.Errorf("1+0 != 1 in G2")
      +	}
      +}
      +
      +func TestOrderGT(t *testing.T) {
      +	gt := Pair(&G1{curveGen}, &G2{twistGen})
      +	g := new(GT).ScalarMult(gt, Order)
      +	if !g.p.IsOne() {
      +		t.Error("GT has incorrect order")
      +	}
      +}
      +
      +func TestBilinearity(t *testing.T) {
      +	for i := 0; i < 2; i++ {
      +		a, p1, _ := RandomG1(rand.Reader)
      +		b, p2, _ := RandomG2(rand.Reader)
      +		e1 := Pair(p1, p2)
      +
      +		e2 := Pair(&G1{curveGen}, &G2{twistGen})
      +		e2.ScalarMult(e2, a)
      +		e2.ScalarMult(e2, b)
      +
      +		minusE2 := new(GT).Neg(e2)
      +		e1.Add(e1, minusE2)
      +
      +		if !e1.p.IsOne() {
      +			t.Fatalf("bad pairing result: %s", e1)
      +		}
      +	}
      +}
      +
      +func TestG1Marshal(t *testing.T) {
      +	g := new(G1).ScalarBaseMult(new(big.Int).SetInt64(1))
      +	form := g.Marshal()
      +	_, ok := new(G1).Unmarshal(form)
      +	if !ok {
      +		t.Fatalf("failed to unmarshal")
      +	}
      +
      +	g.ScalarBaseMult(Order)
      +	form = g.Marshal()
      +	g2, ok := new(G1).Unmarshal(form)
      +	if !ok {
      +		t.Fatalf("failed to unmarshal ∞")
      +	}
      +	if !g2.p.IsInfinity() {
      +		t.Fatalf("∞ unmarshaled incorrectly")
      +	}
      +}
      +
      +func TestG2Marshal(t *testing.T) {
      +	g := new(G2).ScalarBaseMult(new(big.Int).SetInt64(1))
      +	form := g.Marshal()
      +	_, ok := new(G2).Unmarshal(form)
      +	if !ok {
      +		t.Fatalf("failed to unmarshal")
      +	}
      +
      +	g.ScalarBaseMult(Order)
      +	form = g.Marshal()
      +	g2, ok := new(G2).Unmarshal(form)
      +	if !ok {
      +		t.Fatalf("failed to unmarshal ∞")
      +	}
      +	if !g2.p.IsInfinity() {
      +		t.Fatalf("∞ unmarshaled incorrectly")
      +	}
      +}
      +
      +func TestG1Identity(t *testing.T) {
      +	g := new(G1).ScalarBaseMult(new(big.Int).SetInt64(0))
      +	if !g.p.IsInfinity() {
      +		t.Error("failure")
      +	}
      +}
      +
      +func TestG2Identity(t *testing.T) {
      +	g := new(G2).ScalarBaseMult(new(big.Int).SetInt64(0))
      +	if !g.p.IsInfinity() {
      +		t.Error("failure")
      +	}
      +}
      +
      +func TestTripartiteDiffieHellman(t *testing.T) {
      +	a, _ := rand.Int(rand.Reader, Order)
      +	b, _ := rand.Int(rand.Reader, Order)
      +	c, _ := rand.Int(rand.Reader, Order)
      +
      +	pa, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(a).Marshal())
      +	qa, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(a).Marshal())
      +	pb, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(b).Marshal())
      +	qb, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(b).Marshal())
      +	pc, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(c).Marshal())
      +	qc, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(c).Marshal())
      +
      +	k1 := Pair(pb, qc)
      +	k1.ScalarMult(k1, a)
      +	k1Bytes := k1.Marshal()
      +
      +	k2 := Pair(pc, qa)
      +	k2.ScalarMult(k2, b)
      +	k2Bytes := k2.Marshal()
      +
      +	k3 := Pair(pa, qb)
      +	k3.ScalarMult(k3, c)
      +	k3Bytes := k3.Marshal()
      +
      +	if !bytes.Equal(k1Bytes, k2Bytes) || !bytes.Equal(k2Bytes, k3Bytes) {
      +		t.Errorf("keys didn't agree")
      +	}
      +}
      +
      +func BenchmarkPairing(b *testing.B) {
      +	for i := 0; i < b.N; i++ {
      +		Pair(&G1{curveGen}, &G2{twistGen})
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/bn256/constants.go b/vendor/golang.org/x/crypto/bn256/constants.go
      new file mode 100644
      index 00000000..08ccfdf3
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/bn256/constants.go
      @@ -0,0 +1,44 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package bn256
      +
      +import (
      +	"math/big"
      +)
      +
      +func bigFromBase10(s string) *big.Int {
      +	n, _ := new(big.Int).SetString(s, 10)
      +	return n
      +}
      +
      +// u is the BN parameter that determines the prime: 1868033³.
      +var u = bigFromBase10("6518589491078791937")
      +
      +// p is a prime over which we form a basic field: 36u⁴+36u³+24u³+6u+1.
      +var p = bigFromBase10("65000549695646603732796438742359905742825358107623003571877145026864184071783")
      +
      +// Order is the number of elements in both G₁ and G₂: 36u⁴+36u³+18u³+6u+1.
      +var Order = bigFromBase10("65000549695646603732796438742359905742570406053903786389881062969044166799969")
      +
      +// xiToPMinus1Over6 is ξ^((p-1)/6) where ξ = i+3.
      +var xiToPMinus1Over6 = &gfP2{bigFromBase10("8669379979083712429711189836753509758585994370025260553045152614783263110636"), bigFromBase10("19998038925833620163537568958541907098007303196759855091367510456613536016040")}
      +
      +// xiToPMinus1Over3 is ξ^((p-1)/3) where ξ = i+3.
      +var xiToPMinus1Over3 = &gfP2{bigFromBase10("26098034838977895781559542626833399156321265654106457577426020397262786167059"), bigFromBase10("15931493369629630809226283458085260090334794394361662678240713231519278691715")}
      +
      +// xiToPMinus1Over2 is ξ^((p-1)/2) where ξ = i+3.
      +var xiToPMinus1Over2 = &gfP2{bigFromBase10("50997318142241922852281555961173165965672272825141804376761836765206060036244"), bigFromBase10("38665955945962842195025998234511023902832543644254935982879660597356748036009")}
      +
      +// xiToPSquaredMinus1Over3 is ξ^((p²-1)/3) where ξ = i+3.
      +var xiToPSquaredMinus1Over3 = bigFromBase10("65000549695646603727810655408050771481677621702948236658134783353303381437752")
      +
      +// xiTo2PSquaredMinus2Over3 is ξ^((2p²-2)/3) where ξ = i+3 (a cubic root of unity, mod p).
      +var xiTo2PSquaredMinus2Over3 = bigFromBase10("4985783334309134261147736404674766913742361673560802634030")
      +
      +// xiToPSquaredMinus1Over6 is ξ^((1p²-1)/6) where ξ = i+3 (a cubic root of -1, mod p).
      +var xiToPSquaredMinus1Over6 = bigFromBase10("65000549695646603727810655408050771481677621702948236658134783353303381437753")
      +
      +// xiTo2PMinus2Over3 is ξ^((2p-2)/3) where ξ = i+3.
      +var xiTo2PMinus2Over3 = &gfP2{bigFromBase10("19885131339612776214803633203834694332692106372356013117629940868870585019582"), bigFromBase10("21645619881471562101905880913352894726728173167203616652430647841922248593627")}
      diff --git a/vendor/golang.org/x/crypto/bn256/curve.go b/vendor/golang.org/x/crypto/bn256/curve.go
      new file mode 100644
      index 00000000..55b7063f
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/bn256/curve.go
      @@ -0,0 +1,278 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package bn256
      +
      +import (
      +	"math/big"
      +)
      +
      +// curvePoint implements the elliptic curve y²=x³+3. Points are kept in
      +// Jacobian form and t=z² when valid. G₁ is the set of points of this curve on
      +// GF(p).
      +type curvePoint struct {
      +	x, y, z, t *big.Int
      +}
      +
      +var curveB = new(big.Int).SetInt64(3)
      +
      +// curveGen is the generator of G₁.
      +var curveGen = &curvePoint{
      +	new(big.Int).SetInt64(1),
      +	new(big.Int).SetInt64(-2),
      +	new(big.Int).SetInt64(1),
      +	new(big.Int).SetInt64(1),
      +}
      +
      +func newCurvePoint(pool *bnPool) *curvePoint {
      +	return &curvePoint{
      +		pool.Get(),
      +		pool.Get(),
      +		pool.Get(),
      +		pool.Get(),
      +	}
      +}
      +
      +func (c *curvePoint) String() string {
      +	c.MakeAffine(new(bnPool))
      +	return "(" + c.x.String() + ", " + c.y.String() + ")"
      +}
      +
      +func (c *curvePoint) Put(pool *bnPool) {
      +	pool.Put(c.x)
      +	pool.Put(c.y)
      +	pool.Put(c.z)
      +	pool.Put(c.t)
      +}
      +
      +func (c *curvePoint) Set(a *curvePoint) {
      +	c.x.Set(a.x)
      +	c.y.Set(a.y)
      +	c.z.Set(a.z)
      +	c.t.Set(a.t)
      +}
      +
      +// IsOnCurve returns true iff c is on the curve where c must be in affine form.
      +func (c *curvePoint) IsOnCurve() bool {
      +	yy := new(big.Int).Mul(c.y, c.y)
      +	xxx := new(big.Int).Mul(c.x, c.x)
      +	xxx.Mul(xxx, c.x)
      +	yy.Sub(yy, xxx)
      +	yy.Sub(yy, curveB)
      +	if yy.Sign() < 0 || yy.Cmp(p) >= 0 {
      +		yy.Mod(yy, p)
      +	}
      +	return yy.Sign() == 0
      +}
      +
      +func (c *curvePoint) SetInfinity() {
      +	c.z.SetInt64(0)
      +}
      +
      +func (c *curvePoint) IsInfinity() bool {
      +	return c.z.Sign() == 0
      +}
      +
      +func (c *curvePoint) Add(a, b *curvePoint, pool *bnPool) {
      +	if a.IsInfinity() {
      +		c.Set(b)
      +		return
      +	}
      +	if b.IsInfinity() {
      +		c.Set(a)
      +		return
      +	}
      +
      +	// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/addition/add-2007-bl.op3
      +
      +	// Normalize the points by replacing a = [x1:y1:z1] and b = [x2:y2:z2]
      +	// by [u1:s1:z1·z2] and [u2:s2:z1·z2]
      +	// where u1 = x1·z2², s1 = y1·z2³ and u1 = x2·z1², s2 = y2·z1³
      +	z1z1 := pool.Get().Mul(a.z, a.z)
      +	z1z1.Mod(z1z1, p)
      +	z2z2 := pool.Get().Mul(b.z, b.z)
      +	z2z2.Mod(z2z2, p)
      +	u1 := pool.Get().Mul(a.x, z2z2)
      +	u1.Mod(u1, p)
      +	u2 := pool.Get().Mul(b.x, z1z1)
      +	u2.Mod(u2, p)
      +
      +	t := pool.Get().Mul(b.z, z2z2)
      +	t.Mod(t, p)
      +	s1 := pool.Get().Mul(a.y, t)
      +	s1.Mod(s1, p)
      +
      +	t.Mul(a.z, z1z1)
      +	t.Mod(t, p)
      +	s2 := pool.Get().Mul(b.y, t)
      +	s2.Mod(s2, p)
      +
      +	// Compute x = (2h)²(s²-u1-u2)
      +	// where s = (s2-s1)/(u2-u1) is the slope of the line through
      +	// (u1,s1) and (u2,s2). The extra factor 2h = 2(u2-u1) comes from the value of z below.
      +	// This is also:
      +	// 4(s2-s1)² - 4h²(u1+u2) = 4(s2-s1)² - 4h³ - 4h²(2u1)
      +	//                        = r² - j - 2v
      +	// with the notations below.
      +	h := pool.Get().Sub(u2, u1)
      +	xEqual := h.Sign() == 0
      +
      +	t.Add(h, h)
      +	// i = 4h²
      +	i := pool.Get().Mul(t, t)
      +	i.Mod(i, p)
      +	// j = 4h³
      +	j := pool.Get().Mul(h, i)
      +	j.Mod(j, p)
      +
      +	t.Sub(s2, s1)
      +	yEqual := t.Sign() == 0
      +	if xEqual && yEqual {
      +		c.Double(a, pool)
      +		return
      +	}
      +	r := pool.Get().Add(t, t)
      +
      +	v := pool.Get().Mul(u1, i)
      +	v.Mod(v, p)
      +
      +	// t4 = 4(s2-s1)²
      +	t4 := pool.Get().Mul(r, r)
      +	t4.Mod(t4, p)
      +	t.Add(v, v)
      +	t6 := pool.Get().Sub(t4, j)
      +	c.x.Sub(t6, t)
      +
      +	// Set y = -(2h)³(s1 + s*(x/4h²-u1))
      +	// This is also
      +	// y = - 2·s1·j - (s2-s1)(2x - 2i·u1) = r(v-x) - 2·s1·j
      +	t.Sub(v, c.x) // t7
      +	t4.Mul(s1, j) // t8
      +	t4.Mod(t4, p)
      +	t6.Add(t4, t4) // t9
      +	t4.Mul(r, t)   // t10
      +	t4.Mod(t4, p)
      +	c.y.Sub(t4, t6)
      +
      +	// Set z = 2(u2-u1)·z1·z2 = 2h·z1·z2
      +	t.Add(a.z, b.z) // t11
      +	t4.Mul(t, t)    // t12
      +	t4.Mod(t4, p)
      +	t.Sub(t4, z1z1) // t13
      +	t4.Sub(t, z2z2) // t14
      +	c.z.Mul(t4, h)
      +	c.z.Mod(c.z, p)
      +
      +	pool.Put(z1z1)
      +	pool.Put(z2z2)
      +	pool.Put(u1)
      +	pool.Put(u2)
      +	pool.Put(t)
      +	pool.Put(s1)
      +	pool.Put(s2)
      +	pool.Put(h)
      +	pool.Put(i)
      +	pool.Put(j)
      +	pool.Put(r)
      +	pool.Put(v)
      +	pool.Put(t4)
      +	pool.Put(t6)
      +}
      +
      +func (c *curvePoint) Double(a *curvePoint, pool *bnPool) {
      +	// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/doubling/dbl-2009-l.op3
      +	A := pool.Get().Mul(a.x, a.x)
      +	A.Mod(A, p)
      +	B := pool.Get().Mul(a.y, a.y)
      +	B.Mod(B, p)
      +	C := pool.Get().Mul(B, B)
      +	C.Mod(C, p)
      +
      +	t := pool.Get().Add(a.x, B)
      +	t2 := pool.Get().Mul(t, t)
      +	t2.Mod(t2, p)
      +	t.Sub(t2, A)
      +	t2.Sub(t, C)
      +	d := pool.Get().Add(t2, t2)
      +	t.Add(A, A)
      +	e := pool.Get().Add(t, A)
      +	f := pool.Get().Mul(e, e)
      +	f.Mod(f, p)
      +
      +	t.Add(d, d)
      +	c.x.Sub(f, t)
      +
      +	t.Add(C, C)
      +	t2.Add(t, t)
      +	t.Add(t2, t2)
      +	c.y.Sub(d, c.x)
      +	t2.Mul(e, c.y)
      +	t2.Mod(t2, p)
      +	c.y.Sub(t2, t)
      +
      +	t.Mul(a.y, a.z)
      +	t.Mod(t, p)
      +	c.z.Add(t, t)
      +
      +	pool.Put(A)
      +	pool.Put(B)
      +	pool.Put(C)
      +	pool.Put(t)
      +	pool.Put(t2)
      +	pool.Put(d)
      +	pool.Put(e)
      +	pool.Put(f)
      +}
      +
      +func (c *curvePoint) Mul(a *curvePoint, scalar *big.Int, pool *bnPool) *curvePoint {
      +	sum := newCurvePoint(pool)
      +	sum.SetInfinity()
      +	t := newCurvePoint(pool)
      +
      +	for i := scalar.BitLen(); i >= 0; i-- {
      +		t.Double(sum, pool)
      +		if scalar.Bit(i) != 0 {
      +			sum.Add(t, a, pool)
      +		} else {
      +			sum.Set(t)
      +		}
      +	}
      +
      +	c.Set(sum)
      +	sum.Put(pool)
      +	t.Put(pool)
      +	return c
      +}
      +
      +func (c *curvePoint) MakeAffine(pool *bnPool) *curvePoint {
      +	if words := c.z.Bits(); len(words) == 1 && words[0] == 1 {
      +		return c
      +	}
      +
      +	zInv := pool.Get().ModInverse(c.z, p)
      +	t := pool.Get().Mul(c.y, zInv)
      +	t.Mod(t, p)
      +	zInv2 := pool.Get().Mul(zInv, zInv)
      +	zInv2.Mod(zInv2, p)
      +	c.y.Mul(t, zInv2)
      +	c.y.Mod(c.y, p)
      +	t.Mul(c.x, zInv2)
      +	t.Mod(t, p)
      +	c.x.Set(t)
      +	c.z.SetInt64(1)
      +	c.t.SetInt64(1)
      +
      +	pool.Put(zInv)
      +	pool.Put(t)
      +	pool.Put(zInv2)
      +
      +	return c
      +}
      +
      +func (c *curvePoint) Negative(a *curvePoint) {
      +	c.x.Set(a.x)
      +	c.y.Neg(a.y)
      +	c.z.Set(a.z)
      +	c.t.SetInt64(0)
      +}
      diff --git a/vendor/golang.org/x/crypto/bn256/example_test.go b/vendor/golang.org/x/crypto/bn256/example_test.go
      new file mode 100644
      index 00000000..b2d19807
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/bn256/example_test.go
      @@ -0,0 +1,43 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package bn256
      +
      +import (
      +	"crypto/rand"
      +)
      +
      +func ExamplePair() {
      +	// This implements the tripartite Diffie-Hellman algorithm from "A One
      +	// Round Protocol for Tripartite Diffie-Hellman", A. Joux.
      +	// http://www.springerlink.com/content/cddc57yyva0hburb/fulltext.pdf
      +
      +	// Each of three parties, a, b and c, generate a private value.
      +	a, _ := rand.Int(rand.Reader, Order)
      +	b, _ := rand.Int(rand.Reader, Order)
      +	c, _ := rand.Int(rand.Reader, Order)
      +
      +	// Then each party calculates g₁ and g₂ times their private value.
      +	pa := new(G1).ScalarBaseMult(a)
      +	qa := new(G2).ScalarBaseMult(a)
      +
      +	pb := new(G1).ScalarBaseMult(b)
      +	qb := new(G2).ScalarBaseMult(b)
      +
      +	pc := new(G1).ScalarBaseMult(c)
      +	qc := new(G2).ScalarBaseMult(c)
      +
      +	// Now each party exchanges its public values with the other two and
      +	// all parties can calculate the shared key.
      +	k1 := Pair(pb, qc)
      +	k1.ScalarMult(k1, a)
      +
      +	k2 := Pair(pc, qa)
      +	k2.ScalarMult(k2, b)
      +
      +	k3 := Pair(pa, qb)
      +	k3.ScalarMult(k3, c)
      +
      +	// k1, k2 and k3 will all be equal.
      +}
      diff --git a/vendor/golang.org/x/crypto/bn256/gfp12.go b/vendor/golang.org/x/crypto/bn256/gfp12.go
      new file mode 100644
      index 00000000..f084eddf
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/bn256/gfp12.go
      @@ -0,0 +1,200 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package bn256
      +
      +// For details of the algorithms used, see "Multiplication and Squaring on
      +// Pairing-Friendly Fields, Devegili et al.
      +// http://eprint.iacr.org/2006/471.pdf.
      +
      +import (
      +	"math/big"
      +)
      +
      +// gfP12 implements the field of size p¹² as a quadratic extension of gfP6
      +// where ω²=τ.
      +type gfP12 struct {
      +	x, y *gfP6 // value is xω + y
      +}
      +
      +func newGFp12(pool *bnPool) *gfP12 {
      +	return &gfP12{newGFp6(pool), newGFp6(pool)}
      +}
      +
      +func (e *gfP12) String() string {
      +	return "(" + e.x.String() + "," + e.y.String() + ")"
      +}
      +
      +func (e *gfP12) Put(pool *bnPool) {
      +	e.x.Put(pool)
      +	e.y.Put(pool)
      +}
      +
      +func (e *gfP12) Set(a *gfP12) *gfP12 {
      +	e.x.Set(a.x)
      +	e.y.Set(a.y)
      +	return e
      +}
      +
      +func (e *gfP12) SetZero() *gfP12 {
      +	e.x.SetZero()
      +	e.y.SetZero()
      +	return e
      +}
      +
      +func (e *gfP12) SetOne() *gfP12 {
      +	e.x.SetZero()
      +	e.y.SetOne()
      +	return e
      +}
      +
      +func (e *gfP12) Minimal() {
      +	e.x.Minimal()
      +	e.y.Minimal()
      +}
      +
      +func (e *gfP12) IsZero() bool {
      +	e.Minimal()
      +	return e.x.IsZero() && e.y.IsZero()
      +}
      +
      +func (e *gfP12) IsOne() bool {
      +	e.Minimal()
      +	return e.x.IsZero() && e.y.IsOne()
      +}
      +
      +func (e *gfP12) Conjugate(a *gfP12) *gfP12 {
      +	e.x.Negative(a.x)
      +	e.y.Set(a.y)
      +	return a
      +}
      +
      +func (e *gfP12) Negative(a *gfP12) *gfP12 {
      +	e.x.Negative(a.x)
      +	e.y.Negative(a.y)
      +	return e
      +}
      +
      +// Frobenius computes (xω+y)^p = x^p ω·ξ^((p-1)/6) + y^p
      +func (e *gfP12) Frobenius(a *gfP12, pool *bnPool) *gfP12 {
      +	e.x.Frobenius(a.x, pool)
      +	e.y.Frobenius(a.y, pool)
      +	e.x.MulScalar(e.x, xiToPMinus1Over6, pool)
      +	return e
      +}
      +
      +// FrobeniusP2 computes (xω+y)^p² = x^p² ω·ξ^((p²-1)/6) + y^p²
      +func (e *gfP12) FrobeniusP2(a *gfP12, pool *bnPool) *gfP12 {
      +	e.x.FrobeniusP2(a.x)
      +	e.x.MulGFP(e.x, xiToPSquaredMinus1Over6)
      +	e.y.FrobeniusP2(a.y)
      +	return e
      +}
      +
      +func (e *gfP12) Add(a, b *gfP12) *gfP12 {
      +	e.x.Add(a.x, b.x)
      +	e.y.Add(a.y, b.y)
      +	return e
      +}
      +
      +func (e *gfP12) Sub(a, b *gfP12) *gfP12 {
      +	e.x.Sub(a.x, b.x)
      +	e.y.Sub(a.y, b.y)
      +	return e
      +}
      +
      +func (e *gfP12) Mul(a, b *gfP12, pool *bnPool) *gfP12 {
      +	tx := newGFp6(pool)
      +	tx.Mul(a.x, b.y, pool)
      +	t := newGFp6(pool)
      +	t.Mul(b.x, a.y, pool)
      +	tx.Add(tx, t)
      +
      +	ty := newGFp6(pool)
      +	ty.Mul(a.y, b.y, pool)
      +	t.Mul(a.x, b.x, pool)
      +	t.MulTau(t, pool)
      +	e.y.Add(ty, t)
      +	e.x.Set(tx)
      +
      +	tx.Put(pool)
      +	ty.Put(pool)
      +	t.Put(pool)
      +	return e
      +}
      +
      +func (e *gfP12) MulScalar(a *gfP12, b *gfP6, pool *bnPool) *gfP12 {
      +	e.x.Mul(e.x, b, pool)
      +	e.y.Mul(e.y, b, pool)
      +	return e
      +}
      +
      +func (c *gfP12) Exp(a *gfP12, power *big.Int, pool *bnPool) *gfP12 {
      +	sum := newGFp12(pool)
      +	sum.SetOne()
      +	t := newGFp12(pool)
      +
      +	for i := power.BitLen() - 1; i >= 0; i-- {
      +		t.Square(sum, pool)
      +		if power.Bit(i) != 0 {
      +			sum.Mul(t, a, pool)
      +		} else {
      +			sum.Set(t)
      +		}
      +	}
      +
      +	c.Set(sum)
      +
      +	sum.Put(pool)
      +	t.Put(pool)
      +
      +	return c
      +}
      +
      +func (e *gfP12) Square(a *gfP12, pool *bnPool) *gfP12 {
      +	// Complex squaring algorithm
      +	v0 := newGFp6(pool)
      +	v0.Mul(a.x, a.y, pool)
      +
      +	t := newGFp6(pool)
      +	t.MulTau(a.x, pool)
      +	t.Add(a.y, t)
      +	ty := newGFp6(pool)
      +	ty.Add(a.x, a.y)
      +	ty.Mul(ty, t, pool)
      +	ty.Sub(ty, v0)
      +	t.MulTau(v0, pool)
      +	ty.Sub(ty, t)
      +
      +	e.y.Set(ty)
      +	e.x.Double(v0)
      +
      +	v0.Put(pool)
      +	t.Put(pool)
      +	ty.Put(pool)
      +
      +	return e
      +}
      +
      +func (e *gfP12) Invert(a *gfP12, pool *bnPool) *gfP12 {
      +	// See "Implementing cryptographic pairings", M. Scott, section 3.2.
      +	// ftp://136.206.11.249/pub/crypto/pairings.pdf
      +	t1 := newGFp6(pool)
      +	t2 := newGFp6(pool)
      +
      +	t1.Square(a.x, pool)
      +	t2.Square(a.y, pool)
      +	t1.MulTau(t1, pool)
      +	t1.Sub(t2, t1)
      +	t2.Invert(t1, pool)
      +
      +	e.x.Negative(a.x)
      +	e.y.Set(a.y)
      +	e.MulScalar(e, t2, pool)
      +
      +	t1.Put(pool)
      +	t2.Put(pool)
      +
      +	return e
      +}
      diff --git a/vendor/golang.org/x/crypto/bn256/gfp2.go b/vendor/golang.org/x/crypto/bn256/gfp2.go
      new file mode 100644
      index 00000000..97f3f1f3
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/bn256/gfp2.go
      @@ -0,0 +1,219 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package bn256
      +
      +// For details of the algorithms used, see "Multiplication and Squaring on
      +// Pairing-Friendly Fields, Devegili et al.
      +// http://eprint.iacr.org/2006/471.pdf.
      +
      +import (
      +	"math/big"
      +)
      +
      +// gfP2 implements a field of size p² as a quadratic extension of the base
      +// field where i²=-1.
      +type gfP2 struct {
      +	x, y *big.Int // value is xi+y.
      +}
      +
      +func newGFp2(pool *bnPool) *gfP2 {
      +	return &gfP2{pool.Get(), pool.Get()}
      +}
      +
      +func (e *gfP2) String() string {
      +	x := new(big.Int).Mod(e.x, p)
      +	y := new(big.Int).Mod(e.y, p)
      +	return "(" + x.String() + "," + y.String() + ")"
      +}
      +
      +func (e *gfP2) Put(pool *bnPool) {
      +	pool.Put(e.x)
      +	pool.Put(e.y)
      +}
      +
      +func (e *gfP2) Set(a *gfP2) *gfP2 {
      +	e.x.Set(a.x)
      +	e.y.Set(a.y)
      +	return e
      +}
      +
      +func (e *gfP2) SetZero() *gfP2 {
      +	e.x.SetInt64(0)
      +	e.y.SetInt64(0)
      +	return e
      +}
      +
      +func (e *gfP2) SetOne() *gfP2 {
      +	e.x.SetInt64(0)
      +	e.y.SetInt64(1)
      +	return e
      +}
      +
      +func (e *gfP2) Minimal() {
      +	if e.x.Sign() < 0 || e.x.Cmp(p) >= 0 {
      +		e.x.Mod(e.x, p)
      +	}
      +	if e.y.Sign() < 0 || e.y.Cmp(p) >= 0 {
      +		e.y.Mod(e.y, p)
      +	}
      +}
      +
      +func (e *gfP2) IsZero() bool {
      +	return e.x.Sign() == 0 && e.y.Sign() == 0
      +}
      +
      +func (e *gfP2) IsOne() bool {
      +	if e.x.Sign() != 0 {
      +		return false
      +	}
      +	words := e.y.Bits()
      +	return len(words) == 1 && words[0] == 1
      +}
      +
      +func (e *gfP2) Conjugate(a *gfP2) *gfP2 {
      +	e.y.Set(a.y)
      +	e.x.Neg(a.x)
      +	return e
      +}
      +
      +func (e *gfP2) Negative(a *gfP2) *gfP2 {
      +	e.x.Neg(a.x)
      +	e.y.Neg(a.y)
      +	return e
      +}
      +
      +func (e *gfP2) Add(a, b *gfP2) *gfP2 {
      +	e.x.Add(a.x, b.x)
      +	e.y.Add(a.y, b.y)
      +	return e
      +}
      +
      +func (e *gfP2) Sub(a, b *gfP2) *gfP2 {
      +	e.x.Sub(a.x, b.x)
      +	e.y.Sub(a.y, b.y)
      +	return e
      +}
      +
      +func (e *gfP2) Double(a *gfP2) *gfP2 {
      +	e.x.Lsh(a.x, 1)
      +	e.y.Lsh(a.y, 1)
      +	return e
      +}
      +
      +func (c *gfP2) Exp(a *gfP2, power *big.Int, pool *bnPool) *gfP2 {
      +	sum := newGFp2(pool)
      +	sum.SetOne()
      +	t := newGFp2(pool)
      +
      +	for i := power.BitLen() - 1; i >= 0; i-- {
      +		t.Square(sum, pool)
      +		if power.Bit(i) != 0 {
      +			sum.Mul(t, a, pool)
      +		} else {
      +			sum.Set(t)
      +		}
      +	}
      +
      +	c.Set(sum)
      +
      +	sum.Put(pool)
      +	t.Put(pool)
      +
      +	return c
      +}
      +
      +// See "Multiplication and Squaring in Pairing-Friendly Fields",
      +// http://eprint.iacr.org/2006/471.pdf
      +func (e *gfP2) Mul(a, b *gfP2, pool *bnPool) *gfP2 {
      +	tx := pool.Get().Mul(a.x, b.y)
      +	t := pool.Get().Mul(b.x, a.y)
      +	tx.Add(tx, t)
      +	tx.Mod(tx, p)
      +
      +	ty := pool.Get().Mul(a.y, b.y)
      +	t.Mul(a.x, b.x)
      +	ty.Sub(ty, t)
      +	e.y.Mod(ty, p)
      +	e.x.Set(tx)
      +
      +	pool.Put(tx)
      +	pool.Put(ty)
      +	pool.Put(t)
      +
      +	return e
      +}
      +
      +func (e *gfP2) MulScalar(a *gfP2, b *big.Int) *gfP2 {
      +	e.x.Mul(a.x, b)
      +	e.y.Mul(a.y, b)
      +	return e
      +}
      +
      +// MulXi sets e=ξa where ξ=i+3 and then returns e.
      +func (e *gfP2) MulXi(a *gfP2, pool *bnPool) *gfP2 {
      +	// (xi+y)(i+3) = (3x+y)i+(3y-x)
      +	tx := pool.Get().Lsh(a.x, 1)
      +	tx.Add(tx, a.x)
      +	tx.Add(tx, a.y)
      +
      +	ty := pool.Get().Lsh(a.y, 1)
      +	ty.Add(ty, a.y)
      +	ty.Sub(ty, a.x)
      +
      +	e.x.Set(tx)
      +	e.y.Set(ty)
      +
      +	pool.Put(tx)
      +	pool.Put(ty)
      +
      +	return e
      +}
      +
      +func (e *gfP2) Square(a *gfP2, pool *bnPool) *gfP2 {
      +	// Complex squaring algorithm:
      +	// (xi+b)² = (x+y)(y-x) + 2*i*x*y
      +	t1 := pool.Get().Sub(a.y, a.x)
      +	t2 := pool.Get().Add(a.x, a.y)
      +	ty := pool.Get().Mul(t1, t2)
      +	ty.Mod(ty, p)
      +
      +	t1.Mul(a.x, a.y)
      +	t1.Lsh(t1, 1)
      +
      +	e.x.Mod(t1, p)
      +	e.y.Set(ty)
      +
      +	pool.Put(t1)
      +	pool.Put(t2)
      +	pool.Put(ty)
      +
      +	return e
      +}
      +
      +func (e *gfP2) Invert(a *gfP2, pool *bnPool) *gfP2 {
      +	// See "Implementing cryptographic pairings", M. Scott, section 3.2.
      +	// ftp://136.206.11.249/pub/crypto/pairings.pdf
      +	t := pool.Get()
      +	t.Mul(a.y, a.y)
      +	t2 := pool.Get()
      +	t2.Mul(a.x, a.x)
      +	t.Add(t, t2)
      +
      +	inv := pool.Get()
      +	inv.ModInverse(t, p)
      +
      +	e.x.Neg(a.x)
      +	e.x.Mul(e.x, inv)
      +	e.x.Mod(e.x, p)
      +
      +	e.y.Mul(a.y, inv)
      +	e.y.Mod(e.y, p)
      +
      +	pool.Put(t)
      +	pool.Put(t2)
      +	pool.Put(inv)
      +
      +	return e
      +}
      diff --git a/vendor/golang.org/x/crypto/bn256/gfp6.go b/vendor/golang.org/x/crypto/bn256/gfp6.go
      new file mode 100644
      index 00000000..f98ae782
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/bn256/gfp6.go
      @@ -0,0 +1,296 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package bn256
      +
      +// For details of the algorithms used, see "Multiplication and Squaring on
      +// Pairing-Friendly Fields, Devegili et al.
      +// http://eprint.iacr.org/2006/471.pdf.
      +
      +import (
      +	"math/big"
      +)
      +
      +// gfP6 implements the field of size p⁶ as a cubic extension of gfP2 where τ³=ξ
      +// and ξ=i+3.
      +type gfP6 struct {
      +	x, y, z *gfP2 // value is xτ² + yτ + z
      +}
      +
      +func newGFp6(pool *bnPool) *gfP6 {
      +	return &gfP6{newGFp2(pool), newGFp2(pool), newGFp2(pool)}
      +}
      +
      +func (e *gfP6) String() string {
      +	return "(" + e.x.String() + "," + e.y.String() + "," + e.z.String() + ")"
      +}
      +
      +func (e *gfP6) Put(pool *bnPool) {
      +	e.x.Put(pool)
      +	e.y.Put(pool)
      +	e.z.Put(pool)
      +}
      +
      +func (e *gfP6) Set(a *gfP6) *gfP6 {
      +	e.x.Set(a.x)
      +	e.y.Set(a.y)
      +	e.z.Set(a.z)
      +	return e
      +}
      +
      +func (e *gfP6) SetZero() *gfP6 {
      +	e.x.SetZero()
      +	e.y.SetZero()
      +	e.z.SetZero()
      +	return e
      +}
      +
      +func (e *gfP6) SetOne() *gfP6 {
      +	e.x.SetZero()
      +	e.y.SetZero()
      +	e.z.SetOne()
      +	return e
      +}
      +
      +func (e *gfP6) Minimal() {
      +	e.x.Minimal()
      +	e.y.Minimal()
      +	e.z.Minimal()
      +}
      +
      +func (e *gfP6) IsZero() bool {
      +	return e.x.IsZero() && e.y.IsZero() && e.z.IsZero()
      +}
      +
      +func (e *gfP6) IsOne() bool {
      +	return e.x.IsZero() && e.y.IsZero() && e.z.IsOne()
      +}
      +
      +func (e *gfP6) Negative(a *gfP6) *gfP6 {
      +	e.x.Negative(a.x)
      +	e.y.Negative(a.y)
      +	e.z.Negative(a.z)
      +	return e
      +}
      +
      +func (e *gfP6) Frobenius(a *gfP6, pool *bnPool) *gfP6 {
      +	e.x.Conjugate(a.x)
      +	e.y.Conjugate(a.y)
      +	e.z.Conjugate(a.z)
      +
      +	e.x.Mul(e.x, xiTo2PMinus2Over3, pool)
      +	e.y.Mul(e.y, xiToPMinus1Over3, pool)
      +	return e
      +}
      +
      +// FrobeniusP2 computes (xτ²+yτ+z)^(p²) = xτ^(2p²) + yτ^(p²) + z
      +func (e *gfP6) FrobeniusP2(a *gfP6) *gfP6 {
      +	// τ^(2p²) = τ²τ^(2p²-2) = τ²ξ^((2p²-2)/3)
      +	e.x.MulScalar(a.x, xiTo2PSquaredMinus2Over3)
      +	// τ^(p²) = ττ^(p²-1) = τξ^((p²-1)/3)
      +	e.y.MulScalar(a.y, xiToPSquaredMinus1Over3)
      +	e.z.Set(a.z)
      +	return e
      +}
      +
      +func (e *gfP6) Add(a, b *gfP6) *gfP6 {
      +	e.x.Add(a.x, b.x)
      +	e.y.Add(a.y, b.y)
      +	e.z.Add(a.z, b.z)
      +	return e
      +}
      +
      +func (e *gfP6) Sub(a, b *gfP6) *gfP6 {
      +	e.x.Sub(a.x, b.x)
      +	e.y.Sub(a.y, b.y)
      +	e.z.Sub(a.z, b.z)
      +	return e
      +}
      +
      +func (e *gfP6) Double(a *gfP6) *gfP6 {
      +	e.x.Double(a.x)
      +	e.y.Double(a.y)
      +	e.z.Double(a.z)
      +	return e
      +}
      +
      +func (e *gfP6) Mul(a, b *gfP6, pool *bnPool) *gfP6 {
      +	// "Multiplication and Squaring on Pairing-Friendly Fields"
      +	// Section 4, Karatsuba method.
      +	// http://eprint.iacr.org/2006/471.pdf
      +
      +	v0 := newGFp2(pool)
      +	v0.Mul(a.z, b.z, pool)
      +	v1 := newGFp2(pool)
      +	v1.Mul(a.y, b.y, pool)
      +	v2 := newGFp2(pool)
      +	v2.Mul(a.x, b.x, pool)
      +
      +	t0 := newGFp2(pool)
      +	t0.Add(a.x, a.y)
      +	t1 := newGFp2(pool)
      +	t1.Add(b.x, b.y)
      +	tz := newGFp2(pool)
      +	tz.Mul(t0, t1, pool)
      +
      +	tz.Sub(tz, v1)
      +	tz.Sub(tz, v2)
      +	tz.MulXi(tz, pool)
      +	tz.Add(tz, v0)
      +
      +	t0.Add(a.y, a.z)
      +	t1.Add(b.y, b.z)
      +	ty := newGFp2(pool)
      +	ty.Mul(t0, t1, pool)
      +	ty.Sub(ty, v0)
      +	ty.Sub(ty, v1)
      +	t0.MulXi(v2, pool)
      +	ty.Add(ty, t0)
      +
      +	t0.Add(a.x, a.z)
      +	t1.Add(b.x, b.z)
      +	tx := newGFp2(pool)
      +	tx.Mul(t0, t1, pool)
      +	tx.Sub(tx, v0)
      +	tx.Add(tx, v1)
      +	tx.Sub(tx, v2)
      +
      +	e.x.Set(tx)
      +	e.y.Set(ty)
      +	e.z.Set(tz)
      +
      +	t0.Put(pool)
      +	t1.Put(pool)
      +	tx.Put(pool)
      +	ty.Put(pool)
      +	tz.Put(pool)
      +	v0.Put(pool)
      +	v1.Put(pool)
      +	v2.Put(pool)
      +	return e
      +}
      +
      +func (e *gfP6) MulScalar(a *gfP6, b *gfP2, pool *bnPool) *gfP6 {
      +	e.x.Mul(a.x, b, pool)
      +	e.y.Mul(a.y, b, pool)
      +	e.z.Mul(a.z, b, pool)
      +	return e
      +}
      +
      +func (e *gfP6) MulGFP(a *gfP6, b *big.Int) *gfP6 {
      +	e.x.MulScalar(a.x, b)
      +	e.y.MulScalar(a.y, b)
      +	e.z.MulScalar(a.z, b)
      +	return e
      +}
      +
      +// MulTau computes τ·(aτ²+bτ+c) = bτ²+cτ+aξ
      +func (e *gfP6) MulTau(a *gfP6, pool *bnPool) {
      +	tz := newGFp2(pool)
      +	tz.MulXi(a.x, pool)
      +	ty := newGFp2(pool)
      +	ty.Set(a.y)
      +	e.y.Set(a.z)
      +	e.x.Set(ty)
      +	e.z.Set(tz)
      +	tz.Put(pool)
      +	ty.Put(pool)
      +}
      +
      +func (e *gfP6) Square(a *gfP6, pool *bnPool) *gfP6 {
      +	v0 := newGFp2(pool).Square(a.z, pool)
      +	v1 := newGFp2(pool).Square(a.y, pool)
      +	v2 := newGFp2(pool).Square(a.x, pool)
      +
      +	c0 := newGFp2(pool).Add(a.x, a.y)
      +	c0.Square(c0, pool)
      +	c0.Sub(c0, v1)
      +	c0.Sub(c0, v2)
      +	c0.MulXi(c0, pool)
      +	c0.Add(c0, v0)
      +
      +	c1 := newGFp2(pool).Add(a.y, a.z)
      +	c1.Square(c1, pool)
      +	c1.Sub(c1, v0)
      +	c1.Sub(c1, v1)
      +	xiV2 := newGFp2(pool).MulXi(v2, pool)
      +	c1.Add(c1, xiV2)
      +
      +	c2 := newGFp2(pool).Add(a.x, a.z)
      +	c2.Square(c2, pool)
      +	c2.Sub(c2, v0)
      +	c2.Add(c2, v1)
      +	c2.Sub(c2, v2)
      +
      +	e.x.Set(c2)
      +	e.y.Set(c1)
      +	e.z.Set(c0)
      +
      +	v0.Put(pool)
      +	v1.Put(pool)
      +	v2.Put(pool)
      +	c0.Put(pool)
      +	c1.Put(pool)
      +	c2.Put(pool)
      +	xiV2.Put(pool)
      +
      +	return e
      +}
      +
      +func (e *gfP6) Invert(a *gfP6, pool *bnPool) *gfP6 {
      +	// See "Implementing cryptographic pairings", M. Scott, section 3.2.
      +	// ftp://136.206.11.249/pub/crypto/pairings.pdf
      +
      +	// Here we can give a short explanation of how it works: let j be a cubic root of
      +	// unity in GF(p²) so that 1+j+j²=0.
      +	// Then (xτ² + yτ + z)(xj²τ² + yjτ + z)(xjτ² + yj²τ + z)
      +	// = (xτ² + yτ + z)(Cτ²+Bτ+A)
      +	// = (x³ξ²+y³ξ+z³-3ξxyz) = F is an element of the base field (the norm).
      +	//
      +	// On the other hand (xj²τ² + yjτ + z)(xjτ² + yj²τ + z)
      +	// = τ²(y²-ξxz) + τ(ξx²-yz) + (z²-ξxy)
      +	//
      +	// So that's why A = (z²-ξxy), B = (ξx²-yz), C = (y²-ξxz)
      +	t1 := newGFp2(pool)
      +
      +	A := newGFp2(pool)
      +	A.Square(a.z, pool)
      +	t1.Mul(a.x, a.y, pool)
      +	t1.MulXi(t1, pool)
      +	A.Sub(A, t1)
      +
      +	B := newGFp2(pool)
      +	B.Square(a.x, pool)
      +	B.MulXi(B, pool)
      +	t1.Mul(a.y, a.z, pool)
      +	B.Sub(B, t1)
      +
      +	C := newGFp2(pool)
      +	C.Square(a.y, pool)
      +	t1.Mul(a.x, a.z, pool)
      +	C.Sub(C, t1)
      +
      +	F := newGFp2(pool)
      +	F.Mul(C, a.y, pool)
      +	F.MulXi(F, pool)
      +	t1.Mul(A, a.z, pool)
      +	F.Add(F, t1)
      +	t1.Mul(B, a.x, pool)
      +	t1.MulXi(t1, pool)
      +	F.Add(F, t1)
      +
      +	F.Invert(F, pool)
      +
      +	e.x.Mul(C, F, pool)
      +	e.y.Mul(B, F, pool)
      +	e.z.Mul(A, F, pool)
      +
      +	t1.Put(pool)
      +	A.Put(pool)
      +	B.Put(pool)
      +	C.Put(pool)
      +	F.Put(pool)
      +
      +	return e
      +}
      diff --git a/vendor/golang.org/x/crypto/bn256/optate.go b/vendor/golang.org/x/crypto/bn256/optate.go
      new file mode 100644
      index 00000000..7ae0746e
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/bn256/optate.go
      @@ -0,0 +1,395 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package bn256
      +
      +func lineFunctionAdd(r, p *twistPoint, q *curvePoint, r2 *gfP2, pool *bnPool) (a, b, c *gfP2, rOut *twistPoint) {
      +	// See the mixed addition algorithm from "Faster Computation of the
      +	// Tate Pairing", http://arxiv.org/pdf/0904.0854v3.pdf
      +
      +	B := newGFp2(pool).Mul(p.x, r.t, pool)
      +
      +	D := newGFp2(pool).Add(p.y, r.z)
      +	D.Square(D, pool)
      +	D.Sub(D, r2)
      +	D.Sub(D, r.t)
      +	D.Mul(D, r.t, pool)
      +
      +	H := newGFp2(pool).Sub(B, r.x)
      +	I := newGFp2(pool).Square(H, pool)
      +
      +	E := newGFp2(pool).Add(I, I)
      +	E.Add(E, E)
      +
      +	J := newGFp2(pool).Mul(H, E, pool)
      +
      +	L1 := newGFp2(pool).Sub(D, r.y)
      +	L1.Sub(L1, r.y)
      +
      +	V := newGFp2(pool).Mul(r.x, E, pool)
      +
      +	rOut = newTwistPoint(pool)
      +	rOut.x.Square(L1, pool)
      +	rOut.x.Sub(rOut.x, J)
      +	rOut.x.Sub(rOut.x, V)
      +	rOut.x.Sub(rOut.x, V)
      +
      +	rOut.z.Add(r.z, H)
      +	rOut.z.Square(rOut.z, pool)
      +	rOut.z.Sub(rOut.z, r.t)
      +	rOut.z.Sub(rOut.z, I)
      +
      +	t := newGFp2(pool).Sub(V, rOut.x)
      +	t.Mul(t, L1, pool)
      +	t2 := newGFp2(pool).Mul(r.y, J, pool)
      +	t2.Add(t2, t2)
      +	rOut.y.Sub(t, t2)
      +
      +	rOut.t.Square(rOut.z, pool)
      +
      +	t.Add(p.y, rOut.z)
      +	t.Square(t, pool)
      +	t.Sub(t, r2)
      +	t.Sub(t, rOut.t)
      +
      +	t2.Mul(L1, p.x, pool)
      +	t2.Add(t2, t2)
      +	a = newGFp2(pool)
      +	a.Sub(t2, t)
      +
      +	c = newGFp2(pool)
      +	c.MulScalar(rOut.z, q.y)
      +	c.Add(c, c)
      +
      +	b = newGFp2(pool)
      +	b.SetZero()
      +	b.Sub(b, L1)
      +	b.MulScalar(b, q.x)
      +	b.Add(b, b)
      +
      +	B.Put(pool)
      +	D.Put(pool)
      +	H.Put(pool)
      +	I.Put(pool)
      +	E.Put(pool)
      +	J.Put(pool)
      +	L1.Put(pool)
      +	V.Put(pool)
      +	t.Put(pool)
      +	t2.Put(pool)
      +
      +	return
      +}
      +
      +func lineFunctionDouble(r *twistPoint, q *curvePoint, pool *bnPool) (a, b, c *gfP2, rOut *twistPoint) {
      +	// See the doubling algorithm for a=0 from "Faster Computation of the
      +	// Tate Pairing", http://arxiv.org/pdf/0904.0854v3.pdf
      +
      +	A := newGFp2(pool).Square(r.x, pool)
      +	B := newGFp2(pool).Square(r.y, pool)
      +	C := newGFp2(pool).Square(B, pool)
      +
      +	D := newGFp2(pool).Add(r.x, B)
      +	D.Square(D, pool)
      +	D.Sub(D, A)
      +	D.Sub(D, C)
      +	D.Add(D, D)
      +
      +	E := newGFp2(pool).Add(A, A)
      +	E.Add(E, A)
      +
      +	G := newGFp2(pool).Square(E, pool)
      +
      +	rOut = newTwistPoint(pool)
      +	rOut.x.Sub(G, D)
      +	rOut.x.Sub(rOut.x, D)
      +
      +	rOut.z.Add(r.y, r.z)
      +	rOut.z.Square(rOut.z, pool)
      +	rOut.z.Sub(rOut.z, B)
      +	rOut.z.Sub(rOut.z, r.t)
      +
      +	rOut.y.Sub(D, rOut.x)
      +	rOut.y.Mul(rOut.y, E, pool)
      +	t := newGFp2(pool).Add(C, C)
      +	t.Add(t, t)
      +	t.Add(t, t)
      +	rOut.y.Sub(rOut.y, t)
      +
      +	rOut.t.Square(rOut.z, pool)
      +
      +	t.Mul(E, r.t, pool)
      +	t.Add(t, t)
      +	b = newGFp2(pool)
      +	b.SetZero()
      +	b.Sub(b, t)
      +	b.MulScalar(b, q.x)
      +
      +	a = newGFp2(pool)
      +	a.Add(r.x, E)
      +	a.Square(a, pool)
      +	a.Sub(a, A)
      +	a.Sub(a, G)
      +	t.Add(B, B)
      +	t.Add(t, t)
      +	a.Sub(a, t)
      +
      +	c = newGFp2(pool)
      +	c.Mul(rOut.z, r.t, pool)
      +	c.Add(c, c)
      +	c.MulScalar(c, q.y)
      +
      +	A.Put(pool)
      +	B.Put(pool)
      +	C.Put(pool)
      +	D.Put(pool)
      +	E.Put(pool)
      +	G.Put(pool)
      +	t.Put(pool)
      +
      +	return
      +}
      +
      +func mulLine(ret *gfP12, a, b, c *gfP2, pool *bnPool) {
      +	a2 := newGFp6(pool)
      +	a2.x.SetZero()
      +	a2.y.Set(a)
      +	a2.z.Set(b)
      +	a2.Mul(a2, ret.x, pool)
      +	t3 := newGFp6(pool).MulScalar(ret.y, c, pool)
      +
      +	t := newGFp2(pool)
      +	t.Add(b, c)
      +	t2 := newGFp6(pool)
      +	t2.x.SetZero()
      +	t2.y.Set(a)
      +	t2.z.Set(t)
      +	ret.x.Add(ret.x, ret.y)
      +
      +	ret.y.Set(t3)
      +
      +	ret.x.Mul(ret.x, t2, pool)
      +	ret.x.Sub(ret.x, a2)
      +	ret.x.Sub(ret.x, ret.y)
      +	a2.MulTau(a2, pool)
      +	ret.y.Add(ret.y, a2)
      +
      +	a2.Put(pool)
      +	t3.Put(pool)
      +	t2.Put(pool)
      +	t.Put(pool)
      +}
      +
      +// sixuPlus2NAF is 6u+2 in non-adjacent form.
      +var sixuPlus2NAF = []int8{0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, -1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, -1, 0, 1, 0, 0, 0, 1, 0, -1, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0, 1}
      +
      +// miller implements the Miller loop for calculating the Optimal Ate pairing.
      +// See algorithm 1 from http://cryptojedi.org/papers/dclxvi-20100714.pdf
      +func miller(q *twistPoint, p *curvePoint, pool *bnPool) *gfP12 {
      +	ret := newGFp12(pool)
      +	ret.SetOne()
      +
      +	aAffine := newTwistPoint(pool)
      +	aAffine.Set(q)
      +	aAffine.MakeAffine(pool)
      +
      +	bAffine := newCurvePoint(pool)
      +	bAffine.Set(p)
      +	bAffine.MakeAffine(pool)
      +
      +	minusA := newTwistPoint(pool)
      +	minusA.Negative(aAffine, pool)
      +
      +	r := newTwistPoint(pool)
      +	r.Set(aAffine)
      +
      +	r2 := newGFp2(pool)
      +	r2.Square(aAffine.y, pool)
      +
      +	for i := len(sixuPlus2NAF) - 1; i > 0; i-- {
      +		a, b, c, newR := lineFunctionDouble(r, bAffine, pool)
      +		if i != len(sixuPlus2NAF)-1 {
      +			ret.Square(ret, pool)
      +		}
      +
      +		mulLine(ret, a, b, c, pool)
      +		a.Put(pool)
      +		b.Put(pool)
      +		c.Put(pool)
      +		r.Put(pool)
      +		r = newR
      +
      +		switch sixuPlus2NAF[i-1] {
      +		case 1:
      +			a, b, c, newR = lineFunctionAdd(r, aAffine, bAffine, r2, pool)
      +		case -1:
      +			a, b, c, newR = lineFunctionAdd(r, minusA, bAffine, r2, pool)
      +		default:
      +			continue
      +		}
      +
      +		mulLine(ret, a, b, c, pool)
      +		a.Put(pool)
      +		b.Put(pool)
      +		c.Put(pool)
      +		r.Put(pool)
      +		r = newR
      +	}
      +
      +	// In order to calculate Q1 we have to convert q from the sextic twist
      +	// to the full GF(p^12) group, apply the Frobenius there, and convert
      +	// back.
      +	//
      +	// The twist isomorphism is (x', y') -> (xω², yω³). If we consider just
      +	// x for a moment, then after applying the Frobenius, we have x̄ω^(2p)
      +	// where x̄ is the conjugate of x. If we are going to apply the inverse
      +	// isomorphism we need a value with a single coefficient of ω² so we
      +	// rewrite this as x̄ω^(2p-2)ω². ξ⁶ = ω and, due to the construction of
      +	// p, 2p-2 is a multiple of six. Therefore we can rewrite as
      +	// x̄ξ^((p-1)/3)ω² and applying the inverse isomorphism eliminates the
      +	// ω².
      +	//
      +	// A similar argument can be made for the y value.
      +
      +	q1 := newTwistPoint(pool)
      +	q1.x.Conjugate(aAffine.x)
      +	q1.x.Mul(q1.x, xiToPMinus1Over3, pool)
      +	q1.y.Conjugate(aAffine.y)
      +	q1.y.Mul(q1.y, xiToPMinus1Over2, pool)
      +	q1.z.SetOne()
      +	q1.t.SetOne()
      +
      +	// For Q2 we are applying the p² Frobenius. The two conjugations cancel
      +	// out and we are left only with the factors from the isomorphism. In
      +	// the case of x, we end up with a pure number which is why
      +	// xiToPSquaredMinus1Over3 is ∈ GF(p). With y we get a factor of -1. We
      +	// ignore this to end up with -Q2.
      +
      +	minusQ2 := newTwistPoint(pool)
      +	minusQ2.x.MulScalar(aAffine.x, xiToPSquaredMinus1Over3)
      +	minusQ2.y.Set(aAffine.y)
      +	minusQ2.z.SetOne()
      +	minusQ2.t.SetOne()
      +
      +	r2.Square(q1.y, pool)
      +	a, b, c, newR := lineFunctionAdd(r, q1, bAffine, r2, pool)
      +	mulLine(ret, a, b, c, pool)
      +	a.Put(pool)
      +	b.Put(pool)
      +	c.Put(pool)
      +	r.Put(pool)
      +	r = newR
      +
      +	r2.Square(minusQ2.y, pool)
      +	a, b, c, newR = lineFunctionAdd(r, minusQ2, bAffine, r2, pool)
      +	mulLine(ret, a, b, c, pool)
      +	a.Put(pool)
      +	b.Put(pool)
      +	c.Put(pool)
      +	r.Put(pool)
      +	r = newR
      +
      +	aAffine.Put(pool)
      +	bAffine.Put(pool)
      +	minusA.Put(pool)
      +	r.Put(pool)
      +	r2.Put(pool)
      +
      +	return ret
      +}
      +
      +// finalExponentiation computes the (p¹²-1)/Order-th power of an element of
      +// GF(p¹²) to obtain an element of GT (steps 13-15 of algorithm 1 from
      +// http://cryptojedi.org/papers/dclxvi-20100714.pdf)
      +func finalExponentiation(in *gfP12, pool *bnPool) *gfP12 {
      +	t1 := newGFp12(pool)
      +
      +	// This is the p^6-Frobenius
      +	t1.x.Negative(in.x)
      +	t1.y.Set(in.y)
      +
      +	inv := newGFp12(pool)
      +	inv.Invert(in, pool)
      +	t1.Mul(t1, inv, pool)
      +
      +	t2 := newGFp12(pool).FrobeniusP2(t1, pool)
      +	t1.Mul(t1, t2, pool)
      +
      +	fp := newGFp12(pool).Frobenius(t1, pool)
      +	fp2 := newGFp12(pool).FrobeniusP2(t1, pool)
      +	fp3 := newGFp12(pool).Frobenius(fp2, pool)
      +
      +	fu, fu2, fu3 := newGFp12(pool), newGFp12(pool), newGFp12(pool)
      +	fu.Exp(t1, u, pool)
      +	fu2.Exp(fu, u, pool)
      +	fu3.Exp(fu2, u, pool)
      +
      +	y3 := newGFp12(pool).Frobenius(fu, pool)
      +	fu2p := newGFp12(pool).Frobenius(fu2, pool)
      +	fu3p := newGFp12(pool).Frobenius(fu3, pool)
      +	y2 := newGFp12(pool).FrobeniusP2(fu2, pool)
      +
      +	y0 := newGFp12(pool)
      +	y0.Mul(fp, fp2, pool)
      +	y0.Mul(y0, fp3, pool)
      +
      +	y1, y4, y5 := newGFp12(pool), newGFp12(pool), newGFp12(pool)
      +	y1.Conjugate(t1)
      +	y5.Conjugate(fu2)
      +	y3.Conjugate(y3)
      +	y4.Mul(fu, fu2p, pool)
      +	y4.Conjugate(y4)
      +
      +	y6 := newGFp12(pool)
      +	y6.Mul(fu3, fu3p, pool)
      +	y6.Conjugate(y6)
      +
      +	t0 := newGFp12(pool)
      +	t0.Square(y6, pool)
      +	t0.Mul(t0, y4, pool)
      +	t0.Mul(t0, y5, pool)
      +	t1.Mul(y3, y5, pool)
      +	t1.Mul(t1, t0, pool)
      +	t0.Mul(t0, y2, pool)
      +	t1.Square(t1, pool)
      +	t1.Mul(t1, t0, pool)
      +	t1.Square(t1, pool)
      +	t0.Mul(t1, y1, pool)
      +	t1.Mul(t1, y0, pool)
      +	t0.Square(t0, pool)
      +	t0.Mul(t0, t1, pool)
      +
      +	inv.Put(pool)
      +	t1.Put(pool)
      +	t2.Put(pool)
      +	fp.Put(pool)
      +	fp2.Put(pool)
      +	fp3.Put(pool)
      +	fu.Put(pool)
      +	fu2.Put(pool)
      +	fu3.Put(pool)
      +	fu2p.Put(pool)
      +	fu3p.Put(pool)
      +	y0.Put(pool)
      +	y1.Put(pool)
      +	y2.Put(pool)
      +	y3.Put(pool)
      +	y4.Put(pool)
      +	y5.Put(pool)
      +	y6.Put(pool)
      +
      +	return t0
      +}
      +
      +func optimalAte(a *twistPoint, b *curvePoint, pool *bnPool) *gfP12 {
      +	e := miller(a, b, pool)
      +	ret := finalExponentiation(e, pool)
      +	e.Put(pool)
      +
      +	if a.IsInfinity() || b.IsInfinity() {
      +		ret.SetOne()
      +	}
      +
      +	return ret
      +}
      diff --git a/vendor/golang.org/x/crypto/bn256/twist.go b/vendor/golang.org/x/crypto/bn256/twist.go
      new file mode 100644
      index 00000000..4f8b3fed
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/bn256/twist.go
      @@ -0,0 +1,249 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package bn256
      +
      +import (
      +	"math/big"
      +)
      +
      +// twistPoint implements the elliptic curve y²=x³+3/ξ over GF(p²). Points are
      +// kept in Jacobian form and t=z² when valid. The group G₂ is the set of
      +// n-torsion points of this curve over GF(p²) (where n = Order)
      +type twistPoint struct {
      +	x, y, z, t *gfP2
      +}
      +
      +var twistB = &gfP2{
      +	bigFromBase10("6500054969564660373279643874235990574282535810762300357187714502686418407178"),
      +	bigFromBase10("45500384786952622612957507119651934019977750675336102500314001518804928850249"),
      +}
      +
      +// twistGen is the generator of group G₂.
      +var twistGen = &twistPoint{
      +	&gfP2{
      +		bigFromBase10("21167961636542580255011770066570541300993051739349375019639421053990175267184"),
      +		bigFromBase10("64746500191241794695844075326670126197795977525365406531717464316923369116492"),
      +	},
      +	&gfP2{
      +		bigFromBase10("20666913350058776956210519119118544732556678129809273996262322366050359951122"),
      +		bigFromBase10("17778617556404439934652658462602675281523610326338642107814333856843981424549"),
      +	},
      +	&gfP2{
      +		bigFromBase10("0"),
      +		bigFromBase10("1"),
      +	},
      +	&gfP2{
      +		bigFromBase10("0"),
      +		bigFromBase10("1"),
      +	},
      +}
      +
      +func newTwistPoint(pool *bnPool) *twistPoint {
      +	return &twistPoint{
      +		newGFp2(pool),
      +		newGFp2(pool),
      +		newGFp2(pool),
      +		newGFp2(pool),
      +	}
      +}
      +
      +func (c *twistPoint) String() string {
      +	return "(" + c.x.String() + ", " + c.y.String() + ", " + c.z.String() + ")"
      +}
      +
      +func (c *twistPoint) Put(pool *bnPool) {
      +	c.x.Put(pool)
      +	c.y.Put(pool)
      +	c.z.Put(pool)
      +	c.t.Put(pool)
      +}
      +
      +func (c *twistPoint) Set(a *twistPoint) {
      +	c.x.Set(a.x)
      +	c.y.Set(a.y)
      +	c.z.Set(a.z)
      +	c.t.Set(a.t)
      +}
      +
      +// IsOnCurve returns true iff c is on the curve where c must be in affine form.
      +func (c *twistPoint) IsOnCurve() bool {
      +	pool := new(bnPool)
      +	yy := newGFp2(pool).Square(c.y, pool)
      +	xxx := newGFp2(pool).Square(c.x, pool)
      +	xxx.Mul(xxx, c.x, pool)
      +	yy.Sub(yy, xxx)
      +	yy.Sub(yy, twistB)
      +	yy.Minimal()
      +	return yy.x.Sign() == 0 && yy.y.Sign() == 0
      +}
      +
      +func (c *twistPoint) SetInfinity() {
      +	c.z.SetZero()
      +}
      +
      +func (c *twistPoint) IsInfinity() bool {
      +	return c.z.IsZero()
      +}
      +
      +func (c *twistPoint) Add(a, b *twistPoint, pool *bnPool) {
      +	// For additional comments, see the same function in curve.go.
      +
      +	if a.IsInfinity() {
      +		c.Set(b)
      +		return
      +	}
      +	if b.IsInfinity() {
      +		c.Set(a)
      +		return
      +	}
      +
      +	// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/addition/add-2007-bl.op3
      +	z1z1 := newGFp2(pool).Square(a.z, pool)
      +	z2z2 := newGFp2(pool).Square(b.z, pool)
      +	u1 := newGFp2(pool).Mul(a.x, z2z2, pool)
      +	u2 := newGFp2(pool).Mul(b.x, z1z1, pool)
      +
      +	t := newGFp2(pool).Mul(b.z, z2z2, pool)
      +	s1 := newGFp2(pool).Mul(a.y, t, pool)
      +
      +	t.Mul(a.z, z1z1, pool)
      +	s2 := newGFp2(pool).Mul(b.y, t, pool)
      +
      +	h := newGFp2(pool).Sub(u2, u1)
      +	xEqual := h.IsZero()
      +
      +	t.Add(h, h)
      +	i := newGFp2(pool).Square(t, pool)
      +	j := newGFp2(pool).Mul(h, i, pool)
      +
      +	t.Sub(s2, s1)
      +	yEqual := t.IsZero()
      +	if xEqual && yEqual {
      +		c.Double(a, pool)
      +		return
      +	}
      +	r := newGFp2(pool).Add(t, t)
      +
      +	v := newGFp2(pool).Mul(u1, i, pool)
      +
      +	t4 := newGFp2(pool).Square(r, pool)
      +	t.Add(v, v)
      +	t6 := newGFp2(pool).Sub(t4, j)
      +	c.x.Sub(t6, t)
      +
      +	t.Sub(v, c.x)       // t7
      +	t4.Mul(s1, j, pool) // t8
      +	t6.Add(t4, t4)      // t9
      +	t4.Mul(r, t, pool)  // t10
      +	c.y.Sub(t4, t6)
      +
      +	t.Add(a.z, b.z)    // t11
      +	t4.Square(t, pool) // t12
      +	t.Sub(t4, z1z1)    // t13
      +	t4.Sub(t, z2z2)    // t14
      +	c.z.Mul(t4, h, pool)
      +
      +	z1z1.Put(pool)
      +	z2z2.Put(pool)
      +	u1.Put(pool)
      +	u2.Put(pool)
      +	t.Put(pool)
      +	s1.Put(pool)
      +	s2.Put(pool)
      +	h.Put(pool)
      +	i.Put(pool)
      +	j.Put(pool)
      +	r.Put(pool)
      +	v.Put(pool)
      +	t4.Put(pool)
      +	t6.Put(pool)
      +}
      +
      +func (c *twistPoint) Double(a *twistPoint, pool *bnPool) {
      +	// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/doubling/dbl-2009-l.op3
      +	A := newGFp2(pool).Square(a.x, pool)
      +	B := newGFp2(pool).Square(a.y, pool)
      +	C := newGFp2(pool).Square(B, pool)
      +
      +	t := newGFp2(pool).Add(a.x, B)
      +	t2 := newGFp2(pool).Square(t, pool)
      +	t.Sub(t2, A)
      +	t2.Sub(t, C)
      +	d := newGFp2(pool).Add(t2, t2)
      +	t.Add(A, A)
      +	e := newGFp2(pool).Add(t, A)
      +	f := newGFp2(pool).Square(e, pool)
      +
      +	t.Add(d, d)
      +	c.x.Sub(f, t)
      +
      +	t.Add(C, C)
      +	t2.Add(t, t)
      +	t.Add(t2, t2)
      +	c.y.Sub(d, c.x)
      +	t2.Mul(e, c.y, pool)
      +	c.y.Sub(t2, t)
      +
      +	t.Mul(a.y, a.z, pool)
      +	c.z.Add(t, t)
      +
      +	A.Put(pool)
      +	B.Put(pool)
      +	C.Put(pool)
      +	t.Put(pool)
      +	t2.Put(pool)
      +	d.Put(pool)
      +	e.Put(pool)
      +	f.Put(pool)
      +}
      +
      +func (c *twistPoint) Mul(a *twistPoint, scalar *big.Int, pool *bnPool) *twistPoint {
      +	sum := newTwistPoint(pool)
      +	sum.SetInfinity()
      +	t := newTwistPoint(pool)
      +
      +	for i := scalar.BitLen(); i >= 0; i-- {
      +		t.Double(sum, pool)
      +		if scalar.Bit(i) != 0 {
      +			sum.Add(t, a, pool)
      +		} else {
      +			sum.Set(t)
      +		}
      +	}
      +
      +	c.Set(sum)
      +	sum.Put(pool)
      +	t.Put(pool)
      +	return c
      +}
      +
      +func (c *twistPoint) MakeAffine(pool *bnPool) *twistPoint {
      +	if c.z.IsOne() {
      +		return c
      +	}
      +
      +	zInv := newGFp2(pool).Invert(c.z, pool)
      +	t := newGFp2(pool).Mul(c.y, zInv, pool)
      +	zInv2 := newGFp2(pool).Square(zInv, pool)
      +	c.y.Mul(t, zInv2, pool)
      +	t.Mul(c.x, zInv2, pool)
      +	c.x.Set(t)
      +	c.z.SetOne()
      +	c.t.SetOne()
      +
      +	zInv.Put(pool)
      +	t.Put(pool)
      +	zInv2.Put(pool)
      +
      +	return c
      +}
      +
      +func (c *twistPoint) Negative(a *twistPoint, pool *bnPool) {
      +	c.x.Set(a.x)
      +	c.y.SetZero()
      +	c.y.Sub(c.y, a.y)
      +	c.z.Set(a.z)
      +	c.t.SetZero()
      +}
      diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go
      index 8c1b299b..0b4af37b 100644
      --- a/vendor/golang.org/x/crypto/cast5/cast5.go
      +++ b/vendor/golang.org/x/crypto/cast5/cast5.go
      @@ -4,7 +4,7 @@
       
       // Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common
       // OpenPGP cipher.
      -package cast5
      +package cast5 // import "golang.org/x/crypto/cast5"
       
       import "errors"
       
      diff --git a/vendor/golang.org/x/crypto/codereview.cfg b/vendor/golang.org/x/crypto/codereview.cfg
      new file mode 100644
      index 00000000..3f8b14b6
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/codereview.cfg
      @@ -0,0 +1 @@
      +issuerepo: golang/go
      diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/vendor/golang.org/x/crypto/curve25519/const_amd64.s
      new file mode 100644
      index 00000000..797f9b05
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.s
      @@ -0,0 +1,20 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// This code was translated into a form compatible with 6a from the public
      +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
      +
      +// +build amd64,!gccgo,!appengine
      +
      +DATA ·REDMASK51(SB)/8, $0x0007FFFFFFFFFFFF
      +GLOBL ·REDMASK51(SB), 8, $8
      +
      +DATA ·_121666_213(SB)/8, $996687872
      +GLOBL ·_121666_213(SB), 8, $8
      +
      +DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
      +GLOBL ·_2P0(SB), 8, $8
      +
      +DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
      +GLOBL ·_2P1234(SB), 8, $8
      diff --git a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s
      new file mode 100644
      index 00000000..45484d1b
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s
      @@ -0,0 +1,88 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// This code was translated into a form compatible with 6a from the public
      +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
      +
      +// +build amd64,!gccgo,!appengine
      +
      +// func cswap(inout *[5]uint64, v uint64)
      +TEXT ·cswap(SB),7,$0
      +	MOVQ inout+0(FP),DI
      +	MOVQ v+8(FP),SI
      +
      +	CMPQ SI,$1
      +	MOVQ 0(DI),SI
      +	MOVQ 80(DI),DX
      +	MOVQ 8(DI),CX
      +	MOVQ 88(DI),R8
      +	MOVQ SI,R9
      +	CMOVQEQ DX,SI
      +	CMOVQEQ R9,DX
      +	MOVQ CX,R9
      +	CMOVQEQ R8,CX
      +	CMOVQEQ R9,R8
      +	MOVQ SI,0(DI)
      +	MOVQ DX,80(DI)
      +	MOVQ CX,8(DI)
      +	MOVQ R8,88(DI)
      +	MOVQ 16(DI),SI
      +	MOVQ 96(DI),DX
      +	MOVQ 24(DI),CX
      +	MOVQ 104(DI),R8
      +	MOVQ SI,R9
      +	CMOVQEQ DX,SI
      +	CMOVQEQ R9,DX
      +	MOVQ CX,R9
      +	CMOVQEQ R8,CX
      +	CMOVQEQ R9,R8
      +	MOVQ SI,16(DI)
      +	MOVQ DX,96(DI)
      +	MOVQ CX,24(DI)
      +	MOVQ R8,104(DI)
      +	MOVQ 32(DI),SI
      +	MOVQ 112(DI),DX
      +	MOVQ 40(DI),CX
      +	MOVQ 120(DI),R8
      +	MOVQ SI,R9
      +	CMOVQEQ DX,SI
      +	CMOVQEQ R9,DX
      +	MOVQ CX,R9
      +	CMOVQEQ R8,CX
      +	CMOVQEQ R9,R8
      +	MOVQ SI,32(DI)
      +	MOVQ DX,112(DI)
      +	MOVQ CX,40(DI)
      +	MOVQ R8,120(DI)
      +	MOVQ 48(DI),SI
      +	MOVQ 128(DI),DX
      +	MOVQ 56(DI),CX
      +	MOVQ 136(DI),R8
      +	MOVQ SI,R9
      +	CMOVQEQ DX,SI
      +	CMOVQEQ R9,DX
      +	MOVQ CX,R9
      +	CMOVQEQ R8,CX
      +	CMOVQEQ R9,R8
      +	MOVQ SI,48(DI)
      +	MOVQ DX,128(DI)
      +	MOVQ CX,56(DI)
      +	MOVQ R8,136(DI)
      +	MOVQ 64(DI),SI
      +	MOVQ 144(DI),DX
      +	MOVQ 72(DI),CX
      +	MOVQ 152(DI),R8
      +	MOVQ SI,R9
      +	CMOVQEQ DX,SI
      +	CMOVQEQ R9,DX
      +	MOVQ CX,R9
      +	CMOVQEQ R8,CX
      +	CMOVQEQ R9,R8
      +	MOVQ SI,64(DI)
      +	MOVQ DX,144(DI)
      +	MOVQ CX,72(DI)
      +	MOVQ R8,152(DI)
      +	MOVQ DI,AX
      +	MOVQ SI,DX
      +	RET
      diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go
      new file mode 100644
      index 00000000..6918c47f
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go
      @@ -0,0 +1,841 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// We have a implementation in amd64 assembly so this code is only run on
      +// non-amd64 platforms. The amd64 assembly does not support gccgo.
      +// +build !amd64 gccgo appengine
      +
      +package curve25519
      +
      +// This code is a port of the public domain, "ref10" implementation of
      +// curve25519 from SUPERCOP 20130419 by D. J. Bernstein.
      +
      +// fieldElement represents an element of the field GF(2^255 - 19). An element
      +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
      +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
      +// context.
      +type fieldElement [10]int32
      +
      +func feZero(fe *fieldElement) {
      +	for i := range fe {
      +		fe[i] = 0
      +	}
      +}
      +
      +func feOne(fe *fieldElement) {
      +	feZero(fe)
      +	fe[0] = 1
      +}
      +
      +func feAdd(dst, a, b *fieldElement) {
      +	for i := range dst {
      +		dst[i] = a[i] + b[i]
      +	}
      +}
      +
      +func feSub(dst, a, b *fieldElement) {
      +	for i := range dst {
      +		dst[i] = a[i] - b[i]
      +	}
      +}
      +
      +func feCopy(dst, src *fieldElement) {
      +	for i := range dst {
      +		dst[i] = src[i]
      +	}
      +}
      +
      +// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0.
      +//
      +// Preconditions: b in {0,1}.
      +func feCSwap(f, g *fieldElement, b int32) {
      +	var x fieldElement
      +	b = -b
      +	for i := range x {
      +		x[i] = b & (f[i] ^ g[i])
      +	}
      +
      +	for i := range f {
      +		f[i] ^= x[i]
      +	}
      +	for i := range g {
      +		g[i] ^= x[i]
      +	}
      +}
      +
      +// load3 reads a 24-bit, little-endian value from in.
      +func load3(in []byte) int64 {
      +	var r int64
      +	r = int64(in[0])
      +	r |= int64(in[1]) << 8
      +	r |= int64(in[2]) << 16
      +	return r
      +}
      +
      +// load4 reads a 32-bit, little-endian value from in.
      +func load4(in []byte) int64 {
      +	var r int64
      +	r = int64(in[0])
      +	r |= int64(in[1]) << 8
      +	r |= int64(in[2]) << 16
      +	r |= int64(in[3]) << 24
      +	return r
      +}
      +
      +func feFromBytes(dst *fieldElement, src *[32]byte) {
      +	h0 := load4(src[:])
      +	h1 := load3(src[4:]) << 6
      +	h2 := load3(src[7:]) << 5
      +	h3 := load3(src[10:]) << 3
      +	h4 := load3(src[13:]) << 2
      +	h5 := load4(src[16:])
      +	h6 := load3(src[20:]) << 7
      +	h7 := load3(src[23:]) << 5
      +	h8 := load3(src[26:]) << 4
      +	h9 := load3(src[29:]) << 2
      +
      +	var carry [10]int64
      +	carry[9] = (h9 + 1<<24) >> 25
      +	h0 += carry[9] * 19
      +	h9 -= carry[9] << 25
      +	carry[1] = (h1 + 1<<24) >> 25
      +	h2 += carry[1]
      +	h1 -= carry[1] << 25
      +	carry[3] = (h3 + 1<<24) >> 25
      +	h4 += carry[3]
      +	h3 -= carry[3] << 25
      +	carry[5] = (h5 + 1<<24) >> 25
      +	h6 += carry[5]
      +	h5 -= carry[5] << 25
      +	carry[7] = (h7 + 1<<24) >> 25
      +	h8 += carry[7]
      +	h7 -= carry[7] << 25
      +
      +	carry[0] = (h0 + 1<<25) >> 26
      +	h1 += carry[0]
      +	h0 -= carry[0] << 26
      +	carry[2] = (h2 + 1<<25) >> 26
      +	h3 += carry[2]
      +	h2 -= carry[2] << 26
      +	carry[4] = (h4 + 1<<25) >> 26
      +	h5 += carry[4]
      +	h4 -= carry[4] << 26
      +	carry[6] = (h6 + 1<<25) >> 26
      +	h7 += carry[6]
      +	h6 -= carry[6] << 26
      +	carry[8] = (h8 + 1<<25) >> 26
      +	h9 += carry[8]
      +	h8 -= carry[8] << 26
      +
      +	dst[0] = int32(h0)
      +	dst[1] = int32(h1)
      +	dst[2] = int32(h2)
      +	dst[3] = int32(h3)
      +	dst[4] = int32(h4)
      +	dst[5] = int32(h5)
      +	dst[6] = int32(h6)
      +	dst[7] = int32(h7)
      +	dst[8] = int32(h8)
      +	dst[9] = int32(h9)
      +}
      +
      +// feToBytes marshals h to s.
      +// Preconditions:
      +//   |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
      +//
      +// Write p=2^255-19; q=floor(h/p).
      +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
      +//
      +// Proof:
      +//   Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
      +//   Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
      +//
      +//   Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
      +//   Then 0<y<1.
      +//
      +//   Write r=h-pq.
      +//   Have 0<=r<=p-1=2^255-20.
      +//   Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
      +//
      +//   Write x=r+19(2^-255)r+y.
      +//   Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
      +//
      +//   Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
      +//   so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
      +func feToBytes(s *[32]byte, h *fieldElement) {
      +	var carry [10]int32
      +
      +	q := (19*h[9] + (1 << 24)) >> 25
      +	q = (h[0] + q) >> 26
      +	q = (h[1] + q) >> 25
      +	q = (h[2] + q) >> 26
      +	q = (h[3] + q) >> 25
      +	q = (h[4] + q) >> 26
      +	q = (h[5] + q) >> 25
      +	q = (h[6] + q) >> 26
      +	q = (h[7] + q) >> 25
      +	q = (h[8] + q) >> 26
      +	q = (h[9] + q) >> 25
      +
      +	// Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
      +	h[0] += 19 * q
      +	// Goal: Output h-2^255 q, which is between 0 and 2^255-20.
      +
      +	carry[0] = h[0] >> 26
      +	h[1] += carry[0]
      +	h[0] -= carry[0] << 26
      +	carry[1] = h[1] >> 25
      +	h[2] += carry[1]
      +	h[1] -= carry[1] << 25
      +	carry[2] = h[2] >> 26
      +	h[3] += carry[2]
      +	h[2] -= carry[2] << 26
      +	carry[3] = h[3] >> 25
      +	h[4] += carry[3]
      +	h[3] -= carry[3] << 25
      +	carry[4] = h[4] >> 26
      +	h[5] += carry[4]
      +	h[4] -= carry[4] << 26
      +	carry[5] = h[5] >> 25
      +	h[6] += carry[5]
      +	h[5] -= carry[5] << 25
      +	carry[6] = h[6] >> 26
      +	h[7] += carry[6]
      +	h[6] -= carry[6] << 26
      +	carry[7] = h[7] >> 25
      +	h[8] += carry[7]
      +	h[7] -= carry[7] << 25
      +	carry[8] = h[8] >> 26
      +	h[9] += carry[8]
      +	h[8] -= carry[8] << 26
      +	carry[9] = h[9] >> 25
      +	h[9] -= carry[9] << 25
      +	// h10 = carry9
      +
      +	// Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
      +	// Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
      +	// evidently 2^255 h10-2^255 q = 0.
      +	// Goal: Output h[0]+...+2^230 h[9].
      +
      +	s[0] = byte(h[0] >> 0)
      +	s[1] = byte(h[0] >> 8)
      +	s[2] = byte(h[0] >> 16)
      +	s[3] = byte((h[0] >> 24) | (h[1] << 2))
      +	s[4] = byte(h[1] >> 6)
      +	s[5] = byte(h[1] >> 14)
      +	s[6] = byte((h[1] >> 22) | (h[2] << 3))
      +	s[7] = byte(h[2] >> 5)
      +	s[8] = byte(h[2] >> 13)
      +	s[9] = byte((h[2] >> 21) | (h[3] << 5))
      +	s[10] = byte(h[3] >> 3)
      +	s[11] = byte(h[3] >> 11)
      +	s[12] = byte((h[3] >> 19) | (h[4] << 6))
      +	s[13] = byte(h[4] >> 2)
      +	s[14] = byte(h[4] >> 10)
      +	s[15] = byte(h[4] >> 18)
      +	s[16] = byte(h[5] >> 0)
      +	s[17] = byte(h[5] >> 8)
      +	s[18] = byte(h[5] >> 16)
      +	s[19] = byte((h[5] >> 24) | (h[6] << 1))
      +	s[20] = byte(h[6] >> 7)
      +	s[21] = byte(h[6] >> 15)
      +	s[22] = byte((h[6] >> 23) | (h[7] << 3))
      +	s[23] = byte(h[7] >> 5)
      +	s[24] = byte(h[7] >> 13)
      +	s[25] = byte((h[7] >> 21) | (h[8] << 4))
      +	s[26] = byte(h[8] >> 4)
      +	s[27] = byte(h[8] >> 12)
      +	s[28] = byte((h[8] >> 20) | (h[9] << 6))
      +	s[29] = byte(h[9] >> 2)
      +	s[30] = byte(h[9] >> 10)
      +	s[31] = byte(h[9] >> 18)
      +}
      +
      +// feMul calculates h = f * g
      +// Can overlap h with f or g.
      +//
      +// Preconditions:
      +//    |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
      +//    |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
      +//
      +// Postconditions:
      +//    |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
      +//
      +// Notes on implementation strategy:
      +//
      +// Using schoolbook multiplication.
      +// Karatsuba would save a little in some cost models.
      +//
      +// Most multiplications by 2 and 19 are 32-bit precomputations;
      +// cheaper than 64-bit postcomputations.
      +//
      +// There is one remaining multiplication by 19 in the carry chain;
      +// one *19 precomputation can be merged into this,
      +// but the resulting data flow is considerably less clean.
      +//
      +// There are 12 carries below.
      +// 10 of them are 2-way parallelizable and vectorizable.
      +// Can get away with 11 carries, but then data flow is much deeper.
      +//
      +// With tighter constraints on inputs can squeeze carries into int32.
      +func feMul(h, f, g *fieldElement) {
      +	f0 := f[0]
      +	f1 := f[1]
      +	f2 := f[2]
      +	f3 := f[3]
      +	f4 := f[4]
      +	f5 := f[5]
      +	f6 := f[6]
      +	f7 := f[7]
      +	f8 := f[8]
      +	f9 := f[9]
      +	g0 := g[0]
      +	g1 := g[1]
      +	g2 := g[2]
      +	g3 := g[3]
      +	g4 := g[4]
      +	g5 := g[5]
      +	g6 := g[6]
      +	g7 := g[7]
      +	g8 := g[8]
      +	g9 := g[9]
      +	g1_19 := 19 * g1 // 1.4*2^29
      +	g2_19 := 19 * g2 // 1.4*2^30; still ok
      +	g3_19 := 19 * g3
      +	g4_19 := 19 * g4
      +	g5_19 := 19 * g5
      +	g6_19 := 19 * g6
      +	g7_19 := 19 * g7
      +	g8_19 := 19 * g8
      +	g9_19 := 19 * g9
      +	f1_2 := 2 * f1
      +	f3_2 := 2 * f3
      +	f5_2 := 2 * f5
      +	f7_2 := 2 * f7
      +	f9_2 := 2 * f9
      +	f0g0 := int64(f0) * int64(g0)
      +	f0g1 := int64(f0) * int64(g1)
      +	f0g2 := int64(f0) * int64(g2)
      +	f0g3 := int64(f0) * int64(g3)
      +	f0g4 := int64(f0) * int64(g4)
      +	f0g5 := int64(f0) * int64(g5)
      +	f0g6 := int64(f0) * int64(g6)
      +	f0g7 := int64(f0) * int64(g7)
      +	f0g8 := int64(f0) * int64(g8)
      +	f0g9 := int64(f0) * int64(g9)
      +	f1g0 := int64(f1) * int64(g0)
      +	f1g1_2 := int64(f1_2) * int64(g1)
      +	f1g2 := int64(f1) * int64(g2)
      +	f1g3_2 := int64(f1_2) * int64(g3)
      +	f1g4 := int64(f1) * int64(g4)
      +	f1g5_2 := int64(f1_2) * int64(g5)
      +	f1g6 := int64(f1) * int64(g6)
      +	f1g7_2 := int64(f1_2) * int64(g7)
      +	f1g8 := int64(f1) * int64(g8)
      +	f1g9_38 := int64(f1_2) * int64(g9_19)
      +	f2g0 := int64(f2) * int64(g0)
      +	f2g1 := int64(f2) * int64(g1)
      +	f2g2 := int64(f2) * int64(g2)
      +	f2g3 := int64(f2) * int64(g3)
      +	f2g4 := int64(f2) * int64(g4)
      +	f2g5 := int64(f2) * int64(g5)
      +	f2g6 := int64(f2) * int64(g6)
      +	f2g7 := int64(f2) * int64(g7)
      +	f2g8_19 := int64(f2) * int64(g8_19)
      +	f2g9_19 := int64(f2) * int64(g9_19)
      +	f3g0 := int64(f3) * int64(g0)
      +	f3g1_2 := int64(f3_2) * int64(g1)
      +	f3g2 := int64(f3) * int64(g2)
      +	f3g3_2 := int64(f3_2) * int64(g3)
      +	f3g4 := int64(f3) * int64(g4)
      +	f3g5_2 := int64(f3_2) * int64(g5)
      +	f3g6 := int64(f3) * int64(g6)
      +	f3g7_38 := int64(f3_2) * int64(g7_19)
      +	f3g8_19 := int64(f3) * int64(g8_19)
      +	f3g9_38 := int64(f3_2) * int64(g9_19)
      +	f4g0 := int64(f4) * int64(g0)
      +	f4g1 := int64(f4) * int64(g1)
      +	f4g2 := int64(f4) * int64(g2)
      +	f4g3 := int64(f4) * int64(g3)
      +	f4g4 := int64(f4) * int64(g4)
      +	f4g5 := int64(f4) * int64(g5)
      +	f4g6_19 := int64(f4) * int64(g6_19)
      +	f4g7_19 := int64(f4) * int64(g7_19)
      +	f4g8_19 := int64(f4) * int64(g8_19)
      +	f4g9_19 := int64(f4) * int64(g9_19)
      +	f5g0 := int64(f5) * int64(g0)
      +	f5g1_2 := int64(f5_2) * int64(g1)
      +	f5g2 := int64(f5) * int64(g2)
      +	f5g3_2 := int64(f5_2) * int64(g3)
      +	f5g4 := int64(f5) * int64(g4)
      +	f5g5_38 := int64(f5_2) * int64(g5_19)
      +	f5g6_19 := int64(f5) * int64(g6_19)
      +	f5g7_38 := int64(f5_2) * int64(g7_19)
      +	f5g8_19 := int64(f5) * int64(g8_19)
      +	f5g9_38 := int64(f5_2) * int64(g9_19)
      +	f6g0 := int64(f6) * int64(g0)
      +	f6g1 := int64(f6) * int64(g1)
      +	f6g2 := int64(f6) * int64(g2)
      +	f6g3 := int64(f6) * int64(g3)
      +	f6g4_19 := int64(f6) * int64(g4_19)
      +	f6g5_19 := int64(f6) * int64(g5_19)
      +	f6g6_19 := int64(f6) * int64(g6_19)
      +	f6g7_19 := int64(f6) * int64(g7_19)
      +	f6g8_19 := int64(f6) * int64(g8_19)
      +	f6g9_19 := int64(f6) * int64(g9_19)
      +	f7g0 := int64(f7) * int64(g0)
      +	f7g1_2 := int64(f7_2) * int64(g1)
      +	f7g2 := int64(f7) * int64(g2)
      +	f7g3_38 := int64(f7_2) * int64(g3_19)
      +	f7g4_19 := int64(f7) * int64(g4_19)
      +	f7g5_38 := int64(f7_2) * int64(g5_19)
      +	f7g6_19 := int64(f7) * int64(g6_19)
      +	f7g7_38 := int64(f7_2) * int64(g7_19)
      +	f7g8_19 := int64(f7) * int64(g8_19)
      +	f7g9_38 := int64(f7_2) * int64(g9_19)
      +	f8g0 := int64(f8) * int64(g0)
      +	f8g1 := int64(f8) * int64(g1)
      +	f8g2_19 := int64(f8) * int64(g2_19)
      +	f8g3_19 := int64(f8) * int64(g3_19)
      +	f8g4_19 := int64(f8) * int64(g4_19)
      +	f8g5_19 := int64(f8) * int64(g5_19)
      +	f8g6_19 := int64(f8) * int64(g6_19)
      +	f8g7_19 := int64(f8) * int64(g7_19)
      +	f8g8_19 := int64(f8) * int64(g8_19)
      +	f8g9_19 := int64(f8) * int64(g9_19)
      +	f9g0 := int64(f9) * int64(g0)
      +	f9g1_38 := int64(f9_2) * int64(g1_19)
      +	f9g2_19 := int64(f9) * int64(g2_19)
      +	f9g3_38 := int64(f9_2) * int64(g3_19)
      +	f9g4_19 := int64(f9) * int64(g4_19)
      +	f9g5_38 := int64(f9_2) * int64(g5_19)
      +	f9g6_19 := int64(f9) * int64(g6_19)
      +	f9g7_38 := int64(f9_2) * int64(g7_19)
      +	f9g8_19 := int64(f9) * int64(g8_19)
      +	f9g9_38 := int64(f9_2) * int64(g9_19)
      +	h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38
      +	h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19
      +	h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38
      +	h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19
      +	h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38
      +	h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19
      +	h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38
      +	h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19
      +	h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38
      +	h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0
      +	var carry [10]int64
      +
      +	// |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
      +	//   i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
      +	// |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
      +	//   i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
      +
      +	carry[0] = (h0 + (1 << 25)) >> 26
      +	h1 += carry[0]
      +	h0 -= carry[0] << 26
      +	carry[4] = (h4 + (1 << 25)) >> 26
      +	h5 += carry[4]
      +	h4 -= carry[4] << 26
      +	// |h0| <= 2^25
      +	// |h4| <= 2^25
      +	// |h1| <= 1.51*2^58
      +	// |h5| <= 1.51*2^58
      +
      +	carry[1] = (h1 + (1 << 24)) >> 25
      +	h2 += carry[1]
      +	h1 -= carry[1] << 25
      +	carry[5] = (h5 + (1 << 24)) >> 25
      +	h6 += carry[5]
      +	h5 -= carry[5] << 25
      +	// |h1| <= 2^24; from now on fits into int32
      +	// |h5| <= 2^24; from now on fits into int32
      +	// |h2| <= 1.21*2^59
      +	// |h6| <= 1.21*2^59
      +
      +	carry[2] = (h2 + (1 << 25)) >> 26
      +	h3 += carry[2]
      +	h2 -= carry[2] << 26
      +	carry[6] = (h6 + (1 << 25)) >> 26
      +	h7 += carry[6]
      +	h6 -= carry[6] << 26
      +	// |h2| <= 2^25; from now on fits into int32 unchanged
      +	// |h6| <= 2^25; from now on fits into int32 unchanged
      +	// |h3| <= 1.51*2^58
      +	// |h7| <= 1.51*2^58
      +
      +	carry[3] = (h3 + (1 << 24)) >> 25
      +	h4 += carry[3]
      +	h3 -= carry[3] << 25
      +	carry[7] = (h7 + (1 << 24)) >> 25
      +	h8 += carry[7]
      +	h7 -= carry[7] << 25
      +	// |h3| <= 2^24; from now on fits into int32 unchanged
      +	// |h7| <= 2^24; from now on fits into int32 unchanged
      +	// |h4| <= 1.52*2^33
      +	// |h8| <= 1.52*2^33
      +
      +	carry[4] = (h4 + (1 << 25)) >> 26
      +	h5 += carry[4]
      +	h4 -= carry[4] << 26
      +	carry[8] = (h8 + (1 << 25)) >> 26
      +	h9 += carry[8]
      +	h8 -= carry[8] << 26
      +	// |h4| <= 2^25; from now on fits into int32 unchanged
      +	// |h8| <= 2^25; from now on fits into int32 unchanged
      +	// |h5| <= 1.01*2^24
      +	// |h9| <= 1.51*2^58
      +
      +	carry[9] = (h9 + (1 << 24)) >> 25
      +	h0 += carry[9] * 19
      +	h9 -= carry[9] << 25
      +	// |h9| <= 2^24; from now on fits into int32 unchanged
      +	// |h0| <= 1.8*2^37
      +
      +	carry[0] = (h0 + (1 << 25)) >> 26
      +	h1 += carry[0]
      +	h0 -= carry[0] << 26
      +	// |h0| <= 2^25; from now on fits into int32 unchanged
      +	// |h1| <= 1.01*2^24
      +
      +	h[0] = int32(h0)
      +	h[1] = int32(h1)
      +	h[2] = int32(h2)
      +	h[3] = int32(h3)
      +	h[4] = int32(h4)
      +	h[5] = int32(h5)
      +	h[6] = int32(h6)
      +	h[7] = int32(h7)
      +	h[8] = int32(h8)
      +	h[9] = int32(h9)
      +}
      +
      +// feSquare calculates h = f*f. Can overlap h with f.
      +//
      +// Preconditions:
      +//    |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
      +//
      +// Postconditions:
      +//    |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
      +func feSquare(h, f *fieldElement) {
      +	f0 := f[0]
      +	f1 := f[1]
      +	f2 := f[2]
      +	f3 := f[3]
      +	f4 := f[4]
      +	f5 := f[5]
      +	f6 := f[6]
      +	f7 := f[7]
      +	f8 := f[8]
      +	f9 := f[9]
      +	f0_2 := 2 * f0
      +	f1_2 := 2 * f1
      +	f2_2 := 2 * f2
      +	f3_2 := 2 * f3
      +	f4_2 := 2 * f4
      +	f5_2 := 2 * f5
      +	f6_2 := 2 * f6
      +	f7_2 := 2 * f7
      +	f5_38 := 38 * f5 // 1.31*2^30
      +	f6_19 := 19 * f6 // 1.31*2^30
      +	f7_38 := 38 * f7 // 1.31*2^30
      +	f8_19 := 19 * f8 // 1.31*2^30
      +	f9_38 := 38 * f9 // 1.31*2^30
      +	f0f0 := int64(f0) * int64(f0)
      +	f0f1_2 := int64(f0_2) * int64(f1)
      +	f0f2_2 := int64(f0_2) * int64(f2)
      +	f0f3_2 := int64(f0_2) * int64(f3)
      +	f0f4_2 := int64(f0_2) * int64(f4)
      +	f0f5_2 := int64(f0_2) * int64(f5)
      +	f0f6_2 := int64(f0_2) * int64(f6)
      +	f0f7_2 := int64(f0_2) * int64(f7)
      +	f0f8_2 := int64(f0_2) * int64(f8)
      +	f0f9_2 := int64(f0_2) * int64(f9)
      +	f1f1_2 := int64(f1_2) * int64(f1)
      +	f1f2_2 := int64(f1_2) * int64(f2)
      +	f1f3_4 := int64(f1_2) * int64(f3_2)
      +	f1f4_2 := int64(f1_2) * int64(f4)
      +	f1f5_4 := int64(f1_2) * int64(f5_2)
      +	f1f6_2 := int64(f1_2) * int64(f6)
      +	f1f7_4 := int64(f1_2) * int64(f7_2)
      +	f1f8_2 := int64(f1_2) * int64(f8)
      +	f1f9_76 := int64(f1_2) * int64(f9_38)
      +	f2f2 := int64(f2) * int64(f2)
      +	f2f3_2 := int64(f2_2) * int64(f3)
      +	f2f4_2 := int64(f2_2) * int64(f4)
      +	f2f5_2 := int64(f2_2) * int64(f5)
      +	f2f6_2 := int64(f2_2) * int64(f6)
      +	f2f7_2 := int64(f2_2) * int64(f7)
      +	f2f8_38 := int64(f2_2) * int64(f8_19)
      +	f2f9_38 := int64(f2) * int64(f9_38)
      +	f3f3_2 := int64(f3_2) * int64(f3)
      +	f3f4_2 := int64(f3_2) * int64(f4)
      +	f3f5_4 := int64(f3_2) * int64(f5_2)
      +	f3f6_2 := int64(f3_2) * int64(f6)
      +	f3f7_76 := int64(f3_2) * int64(f7_38)
      +	f3f8_38 := int64(f3_2) * int64(f8_19)
      +	f3f9_76 := int64(f3_2) * int64(f9_38)
      +	f4f4 := int64(f4) * int64(f4)
      +	f4f5_2 := int64(f4_2) * int64(f5)
      +	f4f6_38 := int64(f4_2) * int64(f6_19)
      +	f4f7_38 := int64(f4) * int64(f7_38)
      +	f4f8_38 := int64(f4_2) * int64(f8_19)
      +	f4f9_38 := int64(f4) * int64(f9_38)
      +	f5f5_38 := int64(f5) * int64(f5_38)
      +	f5f6_38 := int64(f5_2) * int64(f6_19)
      +	f5f7_76 := int64(f5_2) * int64(f7_38)
      +	f5f8_38 := int64(f5_2) * int64(f8_19)
      +	f5f9_76 := int64(f5_2) * int64(f9_38)
      +	f6f6_19 := int64(f6) * int64(f6_19)
      +	f6f7_38 := int64(f6) * int64(f7_38)
      +	f6f8_38 := int64(f6_2) * int64(f8_19)
      +	f6f9_38 := int64(f6) * int64(f9_38)
      +	f7f7_38 := int64(f7) * int64(f7_38)
      +	f7f8_38 := int64(f7_2) * int64(f8_19)
      +	f7f9_76 := int64(f7_2) * int64(f9_38)
      +	f8f8_19 := int64(f8) * int64(f8_19)
      +	f8f9_38 := int64(f8) * int64(f9_38)
      +	f9f9_38 := int64(f9) * int64(f9_38)
      +	h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
      +	h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
      +	h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
      +	h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38
      +	h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38
      +	h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38
      +	h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19
      +	h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38
      +	h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38
      +	h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2
      +	var carry [10]int64
      +
      +	carry[0] = (h0 + (1 << 25)) >> 26
      +	h1 += carry[0]
      +	h0 -= carry[0] << 26
      +	carry[4] = (h4 + (1 << 25)) >> 26
      +	h5 += carry[4]
      +	h4 -= carry[4] << 26
      +
      +	carry[1] = (h1 + (1 << 24)) >> 25
      +	h2 += carry[1]
      +	h1 -= carry[1] << 25
      +	carry[5] = (h5 + (1 << 24)) >> 25
      +	h6 += carry[5]
      +	h5 -= carry[5] << 25
      +
      +	carry[2] = (h2 + (1 << 25)) >> 26
      +	h3 += carry[2]
      +	h2 -= carry[2] << 26
      +	carry[6] = (h6 + (1 << 25)) >> 26
      +	h7 += carry[6]
      +	h6 -= carry[6] << 26
      +
      +	carry[3] = (h3 + (1 << 24)) >> 25
      +	h4 += carry[3]
      +	h3 -= carry[3] << 25
      +	carry[7] = (h7 + (1 << 24)) >> 25
      +	h8 += carry[7]
      +	h7 -= carry[7] << 25
      +
      +	carry[4] = (h4 + (1 << 25)) >> 26
      +	h5 += carry[4]
      +	h4 -= carry[4] << 26
      +	carry[8] = (h8 + (1 << 25)) >> 26
      +	h9 += carry[8]
      +	h8 -= carry[8] << 26
      +
      +	carry[9] = (h9 + (1 << 24)) >> 25
      +	h0 += carry[9] * 19
      +	h9 -= carry[9] << 25
      +
      +	carry[0] = (h0 + (1 << 25)) >> 26
      +	h1 += carry[0]
      +	h0 -= carry[0] << 26
      +
      +	h[0] = int32(h0)
      +	h[1] = int32(h1)
      +	h[2] = int32(h2)
      +	h[3] = int32(h3)
      +	h[4] = int32(h4)
      +	h[5] = int32(h5)
      +	h[6] = int32(h6)
      +	h[7] = int32(h7)
      +	h[8] = int32(h8)
      +	h[9] = int32(h9)
      +}
      +
      +// feMul121666 calculates h = f * 121666. Can overlap h with f.
      +//
      +// Preconditions:
      +//    |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
      +//
      +// Postconditions:
      +//    |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
      +func feMul121666(h, f *fieldElement) {
      +	h0 := int64(f[0]) * 121666
      +	h1 := int64(f[1]) * 121666
      +	h2 := int64(f[2]) * 121666
      +	h3 := int64(f[3]) * 121666
      +	h4 := int64(f[4]) * 121666
      +	h5 := int64(f[5]) * 121666
      +	h6 := int64(f[6]) * 121666
      +	h7 := int64(f[7]) * 121666
      +	h8 := int64(f[8]) * 121666
      +	h9 := int64(f[9]) * 121666
      +	var carry [10]int64
      +
      +	carry[9] = (h9 + (1 << 24)) >> 25
      +	h0 += carry[9] * 19
      +	h9 -= carry[9] << 25
      +	carry[1] = (h1 + (1 << 24)) >> 25
      +	h2 += carry[1]
      +	h1 -= carry[1] << 25
      +	carry[3] = (h3 + (1 << 24)) >> 25
      +	h4 += carry[3]
      +	h3 -= carry[3] << 25
      +	carry[5] = (h5 + (1 << 24)) >> 25
      +	h6 += carry[5]
      +	h5 -= carry[5] << 25
      +	carry[7] = (h7 + (1 << 24)) >> 25
      +	h8 += carry[7]
      +	h7 -= carry[7] << 25
      +
      +	carry[0] = (h0 + (1 << 25)) >> 26
      +	h1 += carry[0]
      +	h0 -= carry[0] << 26
      +	carry[2] = (h2 + (1 << 25)) >> 26
      +	h3 += carry[2]
      +	h2 -= carry[2] << 26
      +	carry[4] = (h4 + (1 << 25)) >> 26
      +	h5 += carry[4]
      +	h4 -= carry[4] << 26
      +	carry[6] = (h6 + (1 << 25)) >> 26
      +	h7 += carry[6]
      +	h6 -= carry[6] << 26
      +	carry[8] = (h8 + (1 << 25)) >> 26
      +	h9 += carry[8]
      +	h8 -= carry[8] << 26
      +
      +	h[0] = int32(h0)
      +	h[1] = int32(h1)
      +	h[2] = int32(h2)
      +	h[3] = int32(h3)
      +	h[4] = int32(h4)
      +	h[5] = int32(h5)
      +	h[6] = int32(h6)
      +	h[7] = int32(h7)
      +	h[8] = int32(h8)
      +	h[9] = int32(h9)
      +}
      +
      +// feInvert sets out = z^-1.
      +func feInvert(out, z *fieldElement) {
      +	var t0, t1, t2, t3 fieldElement
      +	var i int
      +
      +	feSquare(&t0, z)
      +	for i = 1; i < 1; i++ {
      +		feSquare(&t0, &t0)
      +	}
      +	feSquare(&t1, &t0)
      +	for i = 1; i < 2; i++ {
      +		feSquare(&t1, &t1)
      +	}
      +	feMul(&t1, z, &t1)
      +	feMul(&t0, &t0, &t1)
      +	feSquare(&t2, &t0)
      +	for i = 1; i < 1; i++ {
      +		feSquare(&t2, &t2)
      +	}
      +	feMul(&t1, &t1, &t2)
      +	feSquare(&t2, &t1)
      +	for i = 1; i < 5; i++ {
      +		feSquare(&t2, &t2)
      +	}
      +	feMul(&t1, &t2, &t1)
      +	feSquare(&t2, &t1)
      +	for i = 1; i < 10; i++ {
      +		feSquare(&t2, &t2)
      +	}
      +	feMul(&t2, &t2, &t1)
      +	feSquare(&t3, &t2)
      +	for i = 1; i < 20; i++ {
      +		feSquare(&t3, &t3)
      +	}
      +	feMul(&t2, &t3, &t2)
      +	feSquare(&t2, &t2)
      +	for i = 1; i < 10; i++ {
      +		feSquare(&t2, &t2)
      +	}
      +	feMul(&t1, &t2, &t1)
      +	feSquare(&t2, &t1)
      +	for i = 1; i < 50; i++ {
      +		feSquare(&t2, &t2)
      +	}
      +	feMul(&t2, &t2, &t1)
      +	feSquare(&t3, &t2)
      +	for i = 1; i < 100; i++ {
      +		feSquare(&t3, &t3)
      +	}
      +	feMul(&t2, &t3, &t2)
      +	feSquare(&t2, &t2)
      +	for i = 1; i < 50; i++ {
      +		feSquare(&t2, &t2)
      +	}
      +	feMul(&t1, &t2, &t1)
      +	feSquare(&t1, &t1)
      +	for i = 1; i < 5; i++ {
      +		feSquare(&t1, &t1)
      +	}
      +	feMul(out, &t1, &t0)
      +}
      +
      +func scalarMult(out, in, base *[32]byte) {
      +	var e [32]byte
      +
      +	copy(e[:], in[:])
      +	e[0] &= 248
      +	e[31] &= 127
      +	e[31] |= 64
      +
      +	var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement
      +	feFromBytes(&x1, base)
      +	feOne(&x2)
      +	feCopy(&x3, &x1)
      +	feOne(&z3)
      +
      +	swap := int32(0)
      +	for pos := 254; pos >= 0; pos-- {
      +		b := e[pos/8] >> uint(pos&7)
      +		b &= 1
      +		swap ^= int32(b)
      +		feCSwap(&x2, &x3, swap)
      +		feCSwap(&z2, &z3, swap)
      +		swap = int32(b)
      +
      +		feSub(&tmp0, &x3, &z3)
      +		feSub(&tmp1, &x2, &z2)
      +		feAdd(&x2, &x2, &z2)
      +		feAdd(&z2, &x3, &z3)
      +		feMul(&z3, &tmp0, &x2)
      +		feMul(&z2, &z2, &tmp1)
      +		feSquare(&tmp0, &tmp1)
      +		feSquare(&tmp1, &x2)
      +		feAdd(&x3, &z3, &z2)
      +		feSub(&z2, &z3, &z2)
      +		feMul(&x2, &tmp1, &tmp0)
      +		feSub(&tmp1, &tmp1, &tmp0)
      +		feSquare(&z2, &z2)
      +		feMul121666(&z3, &tmp1)
      +		feSquare(&x3, &x3)
      +		feAdd(&tmp0, &tmp0, &z3)
      +		feMul(&z3, &x1, &z2)
      +		feMul(&z2, &tmp1, &tmp0)
      +	}
      +
      +	feCSwap(&x2, &x3, swap)
      +	feCSwap(&z2, &z3, swap)
      +
      +	feInvert(&z2, &z2)
      +	feMul(&x2, &x2, &z2)
      +	feToBytes(out, &x2)
      +}
      diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_test.go b/vendor/golang.org/x/crypto/curve25519/curve25519_test.go
      new file mode 100644
      index 00000000..14b0ee87
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/curve25519/curve25519_test.go
      @@ -0,0 +1,29 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package curve25519
      +
      +import (
      +	"fmt"
      +	"testing"
      +)
      +
      +const expectedHex = "89161fde887b2b53de549af483940106ecc114d6982daa98256de23bdf77661a"
      +
      +func TestBaseScalarMult(t *testing.T) {
      +	var a, b [32]byte
      +	in := &a
      +	out := &b
      +	a[0] = 1
      +
      +	for i := 0; i < 200; i++ {
      +		ScalarBaseMult(out, in)
      +		in, out = out, in
      +	}
      +
      +	result := fmt.Sprintf("%x", in[:])
      +	if result != expectedHex {
      +		t.Errorf("incorrect result: got %s, want %s", result, expectedHex)
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go
      new file mode 100644
      index 00000000..ebeea3c2
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/curve25519/doc.go
      @@ -0,0 +1,23 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package curve25519 provides an implementation of scalar multiplication on
      +// the elliptic curve known as curve25519. See http://cr.yp.to/ecdh.html
      +package curve25519 // import "golang.org/x/crypto/curve25519"
      +
      +// basePoint is the x coordinate of the generator of the curve.
      +var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
      +
      +// ScalarMult sets dst to the product in*base where dst and base are the x
      +// coordinates of group points and all values are in little-endian form.
      +func ScalarMult(dst, in, base *[32]byte) {
      +	scalarMult(dst, in, base)
      +}
      +
      +// ScalarBaseMult sets dst to the product in*base where dst and base are the x
      +// coordinates of group points, base is the standard generator and all values
      +// are in little-endian form.
      +func ScalarBaseMult(dst, in *[32]byte) {
      +	ScalarMult(dst, in, &basePoint)
      +}
      diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
      new file mode 100644
      index 00000000..37599fac
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
      @@ -0,0 +1,94 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// This code was translated into a form compatible with 6a from the public
      +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
      +
      +// +build amd64,!gccgo,!appengine
      +
      +// func freeze(inout *[5]uint64)
      +TEXT ·freeze(SB),7,$96-8
      +	MOVQ inout+0(FP), DI
      +
      +	MOVQ SP,R11
      +	MOVQ $31,CX
      +	NOTQ CX
      +	ANDQ CX,SP
      +	ADDQ $32,SP
      +
      +	MOVQ R11,0(SP)
      +	MOVQ R12,8(SP)
      +	MOVQ R13,16(SP)
      +	MOVQ R14,24(SP)
      +	MOVQ R15,32(SP)
      +	MOVQ BX,40(SP)
      +	MOVQ BP,48(SP)
      +	MOVQ 0(DI),SI
      +	MOVQ 8(DI),DX
      +	MOVQ 16(DI),CX
      +	MOVQ 24(DI),R8
      +	MOVQ 32(DI),R9
      +	MOVQ ·REDMASK51(SB),AX
      +	MOVQ AX,R10
      +	SUBQ $18,R10
      +	MOVQ $3,R11
      +REDUCELOOP:
      +	MOVQ SI,R12
      +	SHRQ $51,R12
      +	ANDQ AX,SI
      +	ADDQ R12,DX
      +	MOVQ DX,R12
      +	SHRQ $51,R12
      +	ANDQ AX,DX
      +	ADDQ R12,CX
      +	MOVQ CX,R12
      +	SHRQ $51,R12
      +	ANDQ AX,CX
      +	ADDQ R12,R8
      +	MOVQ R8,R12
      +	SHRQ $51,R12
      +	ANDQ AX,R8
      +	ADDQ R12,R9
      +	MOVQ R9,R12
      +	SHRQ $51,R12
      +	ANDQ AX,R9
      +	IMUL3Q $19,R12,R12
      +	ADDQ R12,SI
      +	SUBQ $1,R11
      +	JA REDUCELOOP
      +	MOVQ $1,R12
      +	CMPQ R10,SI
      +	CMOVQLT R11,R12
      +	CMPQ AX,DX
      +	CMOVQNE R11,R12
      +	CMPQ AX,CX
      +	CMOVQNE R11,R12
      +	CMPQ AX,R8
      +	CMOVQNE R11,R12
      +	CMPQ AX,R9
      +	CMOVQNE R11,R12
      +	NEGQ R12
      +	ANDQ R12,AX
      +	ANDQ R12,R10
      +	SUBQ R10,SI
      +	SUBQ AX,DX
      +	SUBQ AX,CX
      +	SUBQ AX,R8
      +	SUBQ AX,R9
      +	MOVQ SI,0(DI)
      +	MOVQ DX,8(DI)
      +	MOVQ CX,16(DI)
      +	MOVQ R8,24(DI)
      +	MOVQ R9,32(DI)
      +	MOVQ 0(SP),R11
      +	MOVQ 8(SP),R12
      +	MOVQ 16(SP),R13
      +	MOVQ 24(SP),R14
      +	MOVQ 32(SP),R15
      +	MOVQ 40(SP),BX
      +	MOVQ 48(SP),BP
      +	MOVQ R11,SP
      +	MOVQ DI,AX
      +	MOVQ SI,DX
      +	RET
      diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s
      new file mode 100644
      index 00000000..3949f9cf
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s
      @@ -0,0 +1,1398 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// This code was translated into a form compatible with 6a from the public
      +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
      +
      +// +build amd64,!gccgo,!appengine
      +
      +// func ladderstep(inout *[5][5]uint64)
      +TEXT ·ladderstep(SB),0,$384-8
      +	MOVQ inout+0(FP),DI
      +
      +	MOVQ SP,R11
      +	MOVQ $31,CX
      +	NOTQ CX
      +	ANDQ CX,SP
      +	ADDQ $32,SP
      +
      +	MOVQ R11,0(SP)
      +	MOVQ R12,8(SP)
      +	MOVQ R13,16(SP)
      +	MOVQ R14,24(SP)
      +	MOVQ R15,32(SP)
      +	MOVQ BX,40(SP)
      +	MOVQ BP,48(SP)
      +	MOVQ 40(DI),SI
      +	MOVQ 48(DI),DX
      +	MOVQ 56(DI),CX
      +	MOVQ 64(DI),R8
      +	MOVQ 72(DI),R9
      +	MOVQ SI,AX
      +	MOVQ DX,R10
      +	MOVQ CX,R11
      +	MOVQ R8,R12
      +	MOVQ R9,R13
      +	ADDQ ·_2P0(SB),AX
      +	ADDQ ·_2P1234(SB),R10
      +	ADDQ ·_2P1234(SB),R11
      +	ADDQ ·_2P1234(SB),R12
      +	ADDQ ·_2P1234(SB),R13
      +	ADDQ 80(DI),SI
      +	ADDQ 88(DI),DX
      +	ADDQ 96(DI),CX
      +	ADDQ 104(DI),R8
      +	ADDQ 112(DI),R9
      +	SUBQ 80(DI),AX
      +	SUBQ 88(DI),R10
      +	SUBQ 96(DI),R11
      +	SUBQ 104(DI),R12
      +	SUBQ 112(DI),R13
      +	MOVQ SI,56(SP)
      +	MOVQ DX,64(SP)
      +	MOVQ CX,72(SP)
      +	MOVQ R8,80(SP)
      +	MOVQ R9,88(SP)
      +	MOVQ AX,96(SP)
      +	MOVQ R10,104(SP)
      +	MOVQ R11,112(SP)
      +	MOVQ R12,120(SP)
      +	MOVQ R13,128(SP)
      +	MOVQ 96(SP),AX
      +	MULQ 96(SP)
      +	MOVQ AX,SI
      +	MOVQ DX,CX
      +	MOVQ 96(SP),AX
      +	SHLQ $1,AX
      +	MULQ 104(SP)
      +	MOVQ AX,R8
      +	MOVQ DX,R9
      +	MOVQ 96(SP),AX
      +	SHLQ $1,AX
      +	MULQ 112(SP)
      +	MOVQ AX,R10
      +	MOVQ DX,R11
      +	MOVQ 96(SP),AX
      +	SHLQ $1,AX
      +	MULQ 120(SP)
      +	MOVQ AX,R12
      +	MOVQ DX,R13
      +	MOVQ 96(SP),AX
      +	SHLQ $1,AX
      +	MULQ 128(SP)
      +	MOVQ AX,R14
      +	MOVQ DX,R15
      +	MOVQ 104(SP),AX
      +	MULQ 104(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 104(SP),AX
      +	SHLQ $1,AX
      +	MULQ 112(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 104(SP),AX
      +	SHLQ $1,AX
      +	MULQ 120(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 104(SP),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 128(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 112(SP),AX
      +	MULQ 112(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 112(SP),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 120(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 112(SP),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 128(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 120(SP),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 120(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 120(SP),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 128(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 128(SP),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 128(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ ·REDMASK51(SB),DX
      +	SHLQ $13,CX:SI
      +	ANDQ DX,SI
      +	SHLQ $13,R9:R8
      +	ANDQ DX,R8
      +	ADDQ CX,R8
      +	SHLQ $13,R11:R10
      +	ANDQ DX,R10
      +	ADDQ R9,R10
      +	SHLQ $13,R13:R12
      +	ANDQ DX,R12
      +	ADDQ R11,R12
      +	SHLQ $13,R15:R14
      +	ANDQ DX,R14
      +	ADDQ R13,R14
      +	IMUL3Q $19,R15,CX
      +	ADDQ CX,SI
      +	MOVQ SI,CX
      +	SHRQ $51,CX
      +	ADDQ R8,CX
      +	ANDQ DX,SI
      +	MOVQ CX,R8
      +	SHRQ $51,CX
      +	ADDQ R10,CX
      +	ANDQ DX,R8
      +	MOVQ CX,R9
      +	SHRQ $51,CX
      +	ADDQ R12,CX
      +	ANDQ DX,R9
      +	MOVQ CX,AX
      +	SHRQ $51,CX
      +	ADDQ R14,CX
      +	ANDQ DX,AX
      +	MOVQ CX,R10
      +	SHRQ $51,CX
      +	IMUL3Q $19,CX,CX
      +	ADDQ CX,SI
      +	ANDQ DX,R10
      +	MOVQ SI,136(SP)
      +	MOVQ R8,144(SP)
      +	MOVQ R9,152(SP)
      +	MOVQ AX,160(SP)
      +	MOVQ R10,168(SP)
      +	MOVQ 56(SP),AX
      +	MULQ 56(SP)
      +	MOVQ AX,SI
      +	MOVQ DX,CX
      +	MOVQ 56(SP),AX
      +	SHLQ $1,AX
      +	MULQ 64(SP)
      +	MOVQ AX,R8
      +	MOVQ DX,R9
      +	MOVQ 56(SP),AX
      +	SHLQ $1,AX
      +	MULQ 72(SP)
      +	MOVQ AX,R10
      +	MOVQ DX,R11
      +	MOVQ 56(SP),AX
      +	SHLQ $1,AX
      +	MULQ 80(SP)
      +	MOVQ AX,R12
      +	MOVQ DX,R13
      +	MOVQ 56(SP),AX
      +	SHLQ $1,AX
      +	MULQ 88(SP)
      +	MOVQ AX,R14
      +	MOVQ DX,R15
      +	MOVQ 64(SP),AX
      +	MULQ 64(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 64(SP),AX
      +	SHLQ $1,AX
      +	MULQ 72(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 64(SP),AX
      +	SHLQ $1,AX
      +	MULQ 80(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 64(SP),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 88(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 72(SP),AX
      +	MULQ 72(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 72(SP),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 80(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 72(SP),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 88(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 80(SP),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 80(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 80(SP),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 88(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 88(SP),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 88(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ ·REDMASK51(SB),DX
      +	SHLQ $13,CX:SI
      +	ANDQ DX,SI
      +	SHLQ $13,R9:R8
      +	ANDQ DX,R8
      +	ADDQ CX,R8
      +	SHLQ $13,R11:R10
      +	ANDQ DX,R10
      +	ADDQ R9,R10
      +	SHLQ $13,R13:R12
      +	ANDQ DX,R12
      +	ADDQ R11,R12
      +	SHLQ $13,R15:R14
      +	ANDQ DX,R14
      +	ADDQ R13,R14
      +	IMUL3Q $19,R15,CX
      +	ADDQ CX,SI
      +	MOVQ SI,CX
      +	SHRQ $51,CX
      +	ADDQ R8,CX
      +	ANDQ DX,SI
      +	MOVQ CX,R8
      +	SHRQ $51,CX
      +	ADDQ R10,CX
      +	ANDQ DX,R8
      +	MOVQ CX,R9
      +	SHRQ $51,CX
      +	ADDQ R12,CX
      +	ANDQ DX,R9
      +	MOVQ CX,AX
      +	SHRQ $51,CX
      +	ADDQ R14,CX
      +	ANDQ DX,AX
      +	MOVQ CX,R10
      +	SHRQ $51,CX
      +	IMUL3Q $19,CX,CX
      +	ADDQ CX,SI
      +	ANDQ DX,R10
      +	MOVQ SI,176(SP)
      +	MOVQ R8,184(SP)
      +	MOVQ R9,192(SP)
      +	MOVQ AX,200(SP)
      +	MOVQ R10,208(SP)
      +	MOVQ SI,SI
      +	MOVQ R8,DX
      +	MOVQ R9,CX
      +	MOVQ AX,R8
      +	MOVQ R10,R9
      +	ADDQ ·_2P0(SB),SI
      +	ADDQ ·_2P1234(SB),DX
      +	ADDQ ·_2P1234(SB),CX
      +	ADDQ ·_2P1234(SB),R8
      +	ADDQ ·_2P1234(SB),R9
      +	SUBQ 136(SP),SI
      +	SUBQ 144(SP),DX
      +	SUBQ 152(SP),CX
      +	SUBQ 160(SP),R8
      +	SUBQ 168(SP),R9
      +	MOVQ SI,216(SP)
      +	MOVQ DX,224(SP)
      +	MOVQ CX,232(SP)
      +	MOVQ R8,240(SP)
      +	MOVQ R9,248(SP)
      +	MOVQ 120(DI),SI
      +	MOVQ 128(DI),DX
      +	MOVQ 136(DI),CX
      +	MOVQ 144(DI),R8
      +	MOVQ 152(DI),R9
      +	MOVQ SI,AX
      +	MOVQ DX,R10
      +	MOVQ CX,R11
      +	MOVQ R8,R12
      +	MOVQ R9,R13
      +	ADDQ ·_2P0(SB),AX
      +	ADDQ ·_2P1234(SB),R10
      +	ADDQ ·_2P1234(SB),R11
      +	ADDQ ·_2P1234(SB),R12
      +	ADDQ ·_2P1234(SB),R13
      +	ADDQ 160(DI),SI
      +	ADDQ 168(DI),DX
      +	ADDQ 176(DI),CX
      +	ADDQ 184(DI),R8
      +	ADDQ 192(DI),R9
      +	SUBQ 160(DI),AX
      +	SUBQ 168(DI),R10
      +	SUBQ 176(DI),R11
      +	SUBQ 184(DI),R12
      +	SUBQ 192(DI),R13
      +	MOVQ SI,256(SP)
      +	MOVQ DX,264(SP)
      +	MOVQ CX,272(SP)
      +	MOVQ R8,280(SP)
      +	MOVQ R9,288(SP)
      +	MOVQ AX,296(SP)
      +	MOVQ R10,304(SP)
      +	MOVQ R11,312(SP)
      +	MOVQ R12,320(SP)
      +	MOVQ R13,328(SP)
      +	MOVQ 280(SP),SI
      +	IMUL3Q $19,SI,AX
      +	MOVQ AX,336(SP)
      +	MULQ 112(SP)
      +	MOVQ AX,SI
      +	MOVQ DX,CX
      +	MOVQ 288(SP),DX
      +	IMUL3Q $19,DX,AX
      +	MOVQ AX,344(SP)
      +	MULQ 104(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 256(SP),AX
      +	MULQ 96(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 256(SP),AX
      +	MULQ 104(SP)
      +	MOVQ AX,R8
      +	MOVQ DX,R9
      +	MOVQ 256(SP),AX
      +	MULQ 112(SP)
      +	MOVQ AX,R10
      +	MOVQ DX,R11
      +	MOVQ 256(SP),AX
      +	MULQ 120(SP)
      +	MOVQ AX,R12
      +	MOVQ DX,R13
      +	MOVQ 256(SP),AX
      +	MULQ 128(SP)
      +	MOVQ AX,R14
      +	MOVQ DX,R15
      +	MOVQ 264(SP),AX
      +	MULQ 96(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 264(SP),AX
      +	MULQ 104(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 264(SP),AX
      +	MULQ 112(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 264(SP),AX
      +	MULQ 120(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 264(SP),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 128(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 272(SP),AX
      +	MULQ 96(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 272(SP),AX
      +	MULQ 104(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 272(SP),AX
      +	MULQ 112(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 272(SP),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 120(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 272(SP),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 128(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 280(SP),AX
      +	MULQ 96(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 280(SP),AX
      +	MULQ 104(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 336(SP),AX
      +	MULQ 120(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 336(SP),AX
      +	MULQ 128(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 288(SP),AX
      +	MULQ 96(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 344(SP),AX
      +	MULQ 112(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 344(SP),AX
      +	MULQ 120(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 344(SP),AX
      +	MULQ 128(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ ·REDMASK51(SB),DX
      +	SHLQ $13,CX:SI
      +	ANDQ DX,SI
      +	SHLQ $13,R9:R8
      +	ANDQ DX,R8
      +	ADDQ CX,R8
      +	SHLQ $13,R11:R10
      +	ANDQ DX,R10
      +	ADDQ R9,R10
      +	SHLQ $13,R13:R12
      +	ANDQ DX,R12
      +	ADDQ R11,R12
      +	SHLQ $13,R15:R14
      +	ANDQ DX,R14
      +	ADDQ R13,R14
      +	IMUL3Q $19,R15,CX
      +	ADDQ CX,SI
      +	MOVQ SI,CX
      +	SHRQ $51,CX
      +	ADDQ R8,CX
      +	MOVQ CX,R8
      +	SHRQ $51,CX
      +	ANDQ DX,SI
      +	ADDQ R10,CX
      +	MOVQ CX,R9
      +	SHRQ $51,CX
      +	ANDQ DX,R8
      +	ADDQ R12,CX
      +	MOVQ CX,AX
      +	SHRQ $51,CX
      +	ANDQ DX,R9
      +	ADDQ R14,CX
      +	MOVQ CX,R10
      +	SHRQ $51,CX
      +	ANDQ DX,AX
      +	IMUL3Q $19,CX,CX
      +	ADDQ CX,SI
      +	ANDQ DX,R10
      +	MOVQ SI,96(SP)
      +	MOVQ R8,104(SP)
      +	MOVQ R9,112(SP)
      +	MOVQ AX,120(SP)
      +	MOVQ R10,128(SP)
      +	MOVQ 320(SP),SI
      +	IMUL3Q $19,SI,AX
      +	MOVQ AX,256(SP)
      +	MULQ 72(SP)
      +	MOVQ AX,SI
      +	MOVQ DX,CX
      +	MOVQ 328(SP),DX
      +	IMUL3Q $19,DX,AX
      +	MOVQ AX,264(SP)
      +	MULQ 64(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 296(SP),AX
      +	MULQ 56(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 296(SP),AX
      +	MULQ 64(SP)
      +	MOVQ AX,R8
      +	MOVQ DX,R9
      +	MOVQ 296(SP),AX
      +	MULQ 72(SP)
      +	MOVQ AX,R10
      +	MOVQ DX,R11
      +	MOVQ 296(SP),AX
      +	MULQ 80(SP)
      +	MOVQ AX,R12
      +	MOVQ DX,R13
      +	MOVQ 296(SP),AX
      +	MULQ 88(SP)
      +	MOVQ AX,R14
      +	MOVQ DX,R15
      +	MOVQ 304(SP),AX
      +	MULQ 56(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 304(SP),AX
      +	MULQ 64(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 304(SP),AX
      +	MULQ 72(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 304(SP),AX
      +	MULQ 80(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 304(SP),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 88(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 312(SP),AX
      +	MULQ 56(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 312(SP),AX
      +	MULQ 64(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 312(SP),AX
      +	MULQ 72(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 312(SP),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 80(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 312(SP),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 88(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 320(SP),AX
      +	MULQ 56(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 320(SP),AX
      +	MULQ 64(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 256(SP),AX
      +	MULQ 80(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 256(SP),AX
      +	MULQ 88(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 328(SP),AX
      +	MULQ 56(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 264(SP),AX
      +	MULQ 72(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 264(SP),AX
      +	MULQ 80(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 264(SP),AX
      +	MULQ 88(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ ·REDMASK51(SB),DX
      +	SHLQ $13,CX:SI
      +	ANDQ DX,SI
      +	SHLQ $13,R9:R8
      +	ANDQ DX,R8
      +	ADDQ CX,R8
      +	SHLQ $13,R11:R10
      +	ANDQ DX,R10
      +	ADDQ R9,R10
      +	SHLQ $13,R13:R12
      +	ANDQ DX,R12
      +	ADDQ R11,R12
      +	SHLQ $13,R15:R14
      +	ANDQ DX,R14
      +	ADDQ R13,R14
      +	IMUL3Q $19,R15,CX
      +	ADDQ CX,SI
      +	MOVQ SI,CX
      +	SHRQ $51,CX
      +	ADDQ R8,CX
      +	MOVQ CX,R8
      +	SHRQ $51,CX
      +	ANDQ DX,SI
      +	ADDQ R10,CX
      +	MOVQ CX,R9
      +	SHRQ $51,CX
      +	ANDQ DX,R8
      +	ADDQ R12,CX
      +	MOVQ CX,AX
      +	SHRQ $51,CX
      +	ANDQ DX,R9
      +	ADDQ R14,CX
      +	MOVQ CX,R10
      +	SHRQ $51,CX
      +	ANDQ DX,AX
      +	IMUL3Q $19,CX,CX
      +	ADDQ CX,SI
      +	ANDQ DX,R10
      +	MOVQ SI,DX
      +	MOVQ R8,CX
      +	MOVQ R9,R11
      +	MOVQ AX,R12
      +	MOVQ R10,R13
      +	ADDQ ·_2P0(SB),DX
      +	ADDQ ·_2P1234(SB),CX
      +	ADDQ ·_2P1234(SB),R11
      +	ADDQ ·_2P1234(SB),R12
      +	ADDQ ·_2P1234(SB),R13
      +	ADDQ 96(SP),SI
      +	ADDQ 104(SP),R8
      +	ADDQ 112(SP),R9
      +	ADDQ 120(SP),AX
      +	ADDQ 128(SP),R10
      +	SUBQ 96(SP),DX
      +	SUBQ 104(SP),CX
      +	SUBQ 112(SP),R11
      +	SUBQ 120(SP),R12
      +	SUBQ 128(SP),R13
      +	MOVQ SI,120(DI)
      +	MOVQ R8,128(DI)
      +	MOVQ R9,136(DI)
      +	MOVQ AX,144(DI)
      +	MOVQ R10,152(DI)
      +	MOVQ DX,160(DI)
      +	MOVQ CX,168(DI)
      +	MOVQ R11,176(DI)
      +	MOVQ R12,184(DI)
      +	MOVQ R13,192(DI)
      +	MOVQ 120(DI),AX
      +	MULQ 120(DI)
      +	MOVQ AX,SI
      +	MOVQ DX,CX
      +	MOVQ 120(DI),AX
      +	SHLQ $1,AX
      +	MULQ 128(DI)
      +	MOVQ AX,R8
      +	MOVQ DX,R9
      +	MOVQ 120(DI),AX
      +	SHLQ $1,AX
      +	MULQ 136(DI)
      +	MOVQ AX,R10
      +	MOVQ DX,R11
      +	MOVQ 120(DI),AX
      +	SHLQ $1,AX
      +	MULQ 144(DI)
      +	MOVQ AX,R12
      +	MOVQ DX,R13
      +	MOVQ 120(DI),AX
      +	SHLQ $1,AX
      +	MULQ 152(DI)
      +	MOVQ AX,R14
      +	MOVQ DX,R15
      +	MOVQ 128(DI),AX
      +	MULQ 128(DI)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 128(DI),AX
      +	SHLQ $1,AX
      +	MULQ 136(DI)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 128(DI),AX
      +	SHLQ $1,AX
      +	MULQ 144(DI)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 128(DI),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 152(DI)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 136(DI),AX
      +	MULQ 136(DI)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 136(DI),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 144(DI)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 136(DI),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 152(DI)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 144(DI),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 144(DI)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 144(DI),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 152(DI)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 152(DI),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 152(DI)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ ·REDMASK51(SB),DX
      +	SHLQ $13,CX:SI
      +	ANDQ DX,SI
      +	SHLQ $13,R9:R8
      +	ANDQ DX,R8
      +	ADDQ CX,R8
      +	SHLQ $13,R11:R10
      +	ANDQ DX,R10
      +	ADDQ R9,R10
      +	SHLQ $13,R13:R12
      +	ANDQ DX,R12
      +	ADDQ R11,R12
      +	SHLQ $13,R15:R14
      +	ANDQ DX,R14
      +	ADDQ R13,R14
      +	IMUL3Q $19,R15,CX
      +	ADDQ CX,SI
      +	MOVQ SI,CX
      +	SHRQ $51,CX
      +	ADDQ R8,CX
      +	ANDQ DX,SI
      +	MOVQ CX,R8
      +	SHRQ $51,CX
      +	ADDQ R10,CX
      +	ANDQ DX,R8
      +	MOVQ CX,R9
      +	SHRQ $51,CX
      +	ADDQ R12,CX
      +	ANDQ DX,R9
      +	MOVQ CX,AX
      +	SHRQ $51,CX
      +	ADDQ R14,CX
      +	ANDQ DX,AX
      +	MOVQ CX,R10
      +	SHRQ $51,CX
      +	IMUL3Q $19,CX,CX
      +	ADDQ CX,SI
      +	ANDQ DX,R10
      +	MOVQ SI,120(DI)
      +	MOVQ R8,128(DI)
      +	MOVQ R9,136(DI)
      +	MOVQ AX,144(DI)
      +	MOVQ R10,152(DI)
      +	MOVQ 160(DI),AX
      +	MULQ 160(DI)
      +	MOVQ AX,SI
      +	MOVQ DX,CX
      +	MOVQ 160(DI),AX
      +	SHLQ $1,AX
      +	MULQ 168(DI)
      +	MOVQ AX,R8
      +	MOVQ DX,R9
      +	MOVQ 160(DI),AX
      +	SHLQ $1,AX
      +	MULQ 176(DI)
      +	MOVQ AX,R10
      +	MOVQ DX,R11
      +	MOVQ 160(DI),AX
      +	SHLQ $1,AX
      +	MULQ 184(DI)
      +	MOVQ AX,R12
      +	MOVQ DX,R13
      +	MOVQ 160(DI),AX
      +	SHLQ $1,AX
      +	MULQ 192(DI)
      +	MOVQ AX,R14
      +	MOVQ DX,R15
      +	MOVQ 168(DI),AX
      +	MULQ 168(DI)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 168(DI),AX
      +	SHLQ $1,AX
      +	MULQ 176(DI)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 168(DI),AX
      +	SHLQ $1,AX
      +	MULQ 184(DI)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 168(DI),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 192(DI)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 176(DI),AX
      +	MULQ 176(DI)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 176(DI),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 184(DI)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 176(DI),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 192(DI)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 184(DI),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 184(DI)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 184(DI),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 192(DI)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 192(DI),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 192(DI)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ ·REDMASK51(SB),DX
      +	SHLQ $13,CX:SI
      +	ANDQ DX,SI
      +	SHLQ $13,R9:R8
      +	ANDQ DX,R8
      +	ADDQ CX,R8
      +	SHLQ $13,R11:R10
      +	ANDQ DX,R10
      +	ADDQ R9,R10
      +	SHLQ $13,R13:R12
      +	ANDQ DX,R12
      +	ADDQ R11,R12
      +	SHLQ $13,R15:R14
      +	ANDQ DX,R14
      +	ADDQ R13,R14
      +	IMUL3Q $19,R15,CX
      +	ADDQ CX,SI
      +	MOVQ SI,CX
      +	SHRQ $51,CX
      +	ADDQ R8,CX
      +	ANDQ DX,SI
      +	MOVQ CX,R8
      +	SHRQ $51,CX
      +	ADDQ R10,CX
      +	ANDQ DX,R8
      +	MOVQ CX,R9
      +	SHRQ $51,CX
      +	ADDQ R12,CX
      +	ANDQ DX,R9
      +	MOVQ CX,AX
      +	SHRQ $51,CX
      +	ADDQ R14,CX
      +	ANDQ DX,AX
      +	MOVQ CX,R10
      +	SHRQ $51,CX
      +	IMUL3Q $19,CX,CX
      +	ADDQ CX,SI
      +	ANDQ DX,R10
      +	MOVQ SI,160(DI)
      +	MOVQ R8,168(DI)
      +	MOVQ R9,176(DI)
      +	MOVQ AX,184(DI)
      +	MOVQ R10,192(DI)
      +	MOVQ 184(DI),SI
      +	IMUL3Q $19,SI,AX
      +	MOVQ AX,56(SP)
      +	MULQ 16(DI)
      +	MOVQ AX,SI
      +	MOVQ DX,CX
      +	MOVQ 192(DI),DX
      +	IMUL3Q $19,DX,AX
      +	MOVQ AX,64(SP)
      +	MULQ 8(DI)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 160(DI),AX
      +	MULQ 0(DI)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 160(DI),AX
      +	MULQ 8(DI)
      +	MOVQ AX,R8
      +	MOVQ DX,R9
      +	MOVQ 160(DI),AX
      +	MULQ 16(DI)
      +	MOVQ AX,R10
      +	MOVQ DX,R11
      +	MOVQ 160(DI),AX
      +	MULQ 24(DI)
      +	MOVQ AX,R12
      +	MOVQ DX,R13
      +	MOVQ 160(DI),AX
      +	MULQ 32(DI)
      +	MOVQ AX,R14
      +	MOVQ DX,R15
      +	MOVQ 168(DI),AX
      +	MULQ 0(DI)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 168(DI),AX
      +	MULQ 8(DI)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 168(DI),AX
      +	MULQ 16(DI)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 168(DI),AX
      +	MULQ 24(DI)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 168(DI),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 32(DI)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 176(DI),AX
      +	MULQ 0(DI)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 176(DI),AX
      +	MULQ 8(DI)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 176(DI),AX
      +	MULQ 16(DI)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 176(DI),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 24(DI)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 176(DI),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 32(DI)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 184(DI),AX
      +	MULQ 0(DI)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 184(DI),AX
      +	MULQ 8(DI)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 56(SP),AX
      +	MULQ 24(DI)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 56(SP),AX
      +	MULQ 32(DI)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 192(DI),AX
      +	MULQ 0(DI)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 64(SP),AX
      +	MULQ 16(DI)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 64(SP),AX
      +	MULQ 24(DI)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 64(SP),AX
      +	MULQ 32(DI)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ ·REDMASK51(SB),DX
      +	SHLQ $13,CX:SI
      +	ANDQ DX,SI
      +	SHLQ $13,R9:R8
      +	ANDQ DX,R8
      +	ADDQ CX,R8
      +	SHLQ $13,R11:R10
      +	ANDQ DX,R10
      +	ADDQ R9,R10
      +	SHLQ $13,R13:R12
      +	ANDQ DX,R12
      +	ADDQ R11,R12
      +	SHLQ $13,R15:R14
      +	ANDQ DX,R14
      +	ADDQ R13,R14
      +	IMUL3Q $19,R15,CX
      +	ADDQ CX,SI
      +	MOVQ SI,CX
      +	SHRQ $51,CX
      +	ADDQ R8,CX
      +	MOVQ CX,R8
      +	SHRQ $51,CX
      +	ANDQ DX,SI
      +	ADDQ R10,CX
      +	MOVQ CX,R9
      +	SHRQ $51,CX
      +	ANDQ DX,R8
      +	ADDQ R12,CX
      +	MOVQ CX,AX
      +	SHRQ $51,CX
      +	ANDQ DX,R9
      +	ADDQ R14,CX
      +	MOVQ CX,R10
      +	SHRQ $51,CX
      +	ANDQ DX,AX
      +	IMUL3Q $19,CX,CX
      +	ADDQ CX,SI
      +	ANDQ DX,R10
      +	MOVQ SI,160(DI)
      +	MOVQ R8,168(DI)
      +	MOVQ R9,176(DI)
      +	MOVQ AX,184(DI)
      +	MOVQ R10,192(DI)
      +	MOVQ 200(SP),SI
      +	IMUL3Q $19,SI,AX
      +	MOVQ AX,56(SP)
      +	MULQ 152(SP)
      +	MOVQ AX,SI
      +	MOVQ DX,CX
      +	MOVQ 208(SP),DX
      +	IMUL3Q $19,DX,AX
      +	MOVQ AX,64(SP)
      +	MULQ 144(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 176(SP),AX
      +	MULQ 136(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 176(SP),AX
      +	MULQ 144(SP)
      +	MOVQ AX,R8
      +	MOVQ DX,R9
      +	MOVQ 176(SP),AX
      +	MULQ 152(SP)
      +	MOVQ AX,R10
      +	MOVQ DX,R11
      +	MOVQ 176(SP),AX
      +	MULQ 160(SP)
      +	MOVQ AX,R12
      +	MOVQ DX,R13
      +	MOVQ 176(SP),AX
      +	MULQ 168(SP)
      +	MOVQ AX,R14
      +	MOVQ DX,R15
      +	MOVQ 184(SP),AX
      +	MULQ 136(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 184(SP),AX
      +	MULQ 144(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 184(SP),AX
      +	MULQ 152(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 184(SP),AX
      +	MULQ 160(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 184(SP),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 168(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 192(SP),AX
      +	MULQ 136(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 192(SP),AX
      +	MULQ 144(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 192(SP),AX
      +	MULQ 152(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 192(SP),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 160(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 192(SP),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 168(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 200(SP),AX
      +	MULQ 136(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 200(SP),AX
      +	MULQ 144(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 56(SP),AX
      +	MULQ 160(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 56(SP),AX
      +	MULQ 168(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 208(SP),AX
      +	MULQ 136(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 64(SP),AX
      +	MULQ 152(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 64(SP),AX
      +	MULQ 160(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 64(SP),AX
      +	MULQ 168(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ ·REDMASK51(SB),DX
      +	SHLQ $13,CX:SI
      +	ANDQ DX,SI
      +	SHLQ $13,R9:R8
      +	ANDQ DX,R8
      +	ADDQ CX,R8
      +	SHLQ $13,R11:R10
      +	ANDQ DX,R10
      +	ADDQ R9,R10
      +	SHLQ $13,R13:R12
      +	ANDQ DX,R12
      +	ADDQ R11,R12
      +	SHLQ $13,R15:R14
      +	ANDQ DX,R14
      +	ADDQ R13,R14
      +	IMUL3Q $19,R15,CX
      +	ADDQ CX,SI
      +	MOVQ SI,CX
      +	SHRQ $51,CX
      +	ADDQ R8,CX
      +	MOVQ CX,R8
      +	SHRQ $51,CX
      +	ANDQ DX,SI
      +	ADDQ R10,CX
      +	MOVQ CX,R9
      +	SHRQ $51,CX
      +	ANDQ DX,R8
      +	ADDQ R12,CX
      +	MOVQ CX,AX
      +	SHRQ $51,CX
      +	ANDQ DX,R9
      +	ADDQ R14,CX
      +	MOVQ CX,R10
      +	SHRQ $51,CX
      +	ANDQ DX,AX
      +	IMUL3Q $19,CX,CX
      +	ADDQ CX,SI
      +	ANDQ DX,R10
      +	MOVQ SI,40(DI)
      +	MOVQ R8,48(DI)
      +	MOVQ R9,56(DI)
      +	MOVQ AX,64(DI)
      +	MOVQ R10,72(DI)
      +	MOVQ 216(SP),AX
      +	MULQ ·_121666_213(SB)
      +	SHRQ $13,AX
      +	MOVQ AX,SI
      +	MOVQ DX,CX
      +	MOVQ 224(SP),AX
      +	MULQ ·_121666_213(SB)
      +	SHRQ $13,AX
      +	ADDQ AX,CX
      +	MOVQ DX,R8
      +	MOVQ 232(SP),AX
      +	MULQ ·_121666_213(SB)
      +	SHRQ $13,AX
      +	ADDQ AX,R8
      +	MOVQ DX,R9
      +	MOVQ 240(SP),AX
      +	MULQ ·_121666_213(SB)
      +	SHRQ $13,AX
      +	ADDQ AX,R9
      +	MOVQ DX,R10
      +	MOVQ 248(SP),AX
      +	MULQ ·_121666_213(SB)
      +	SHRQ $13,AX
      +	ADDQ AX,R10
      +	IMUL3Q $19,DX,DX
      +	ADDQ DX,SI
      +	ADDQ 136(SP),SI
      +	ADDQ 144(SP),CX
      +	ADDQ 152(SP),R8
      +	ADDQ 160(SP),R9
      +	ADDQ 168(SP),R10
      +	MOVQ SI,80(DI)
      +	MOVQ CX,88(DI)
      +	MOVQ R8,96(DI)
      +	MOVQ R9,104(DI)
      +	MOVQ R10,112(DI)
      +	MOVQ 104(DI),SI
      +	IMUL3Q $19,SI,AX
      +	MOVQ AX,56(SP)
      +	MULQ 232(SP)
      +	MOVQ AX,SI
      +	MOVQ DX,CX
      +	MOVQ 112(DI),DX
      +	IMUL3Q $19,DX,AX
      +	MOVQ AX,64(SP)
      +	MULQ 224(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 80(DI),AX
      +	MULQ 216(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 80(DI),AX
      +	MULQ 224(SP)
      +	MOVQ AX,R8
      +	MOVQ DX,R9
      +	MOVQ 80(DI),AX
      +	MULQ 232(SP)
      +	MOVQ AX,R10
      +	MOVQ DX,R11
      +	MOVQ 80(DI),AX
      +	MULQ 240(SP)
      +	MOVQ AX,R12
      +	MOVQ DX,R13
      +	MOVQ 80(DI),AX
      +	MULQ 248(SP)
      +	MOVQ AX,R14
      +	MOVQ DX,R15
      +	MOVQ 88(DI),AX
      +	MULQ 216(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 88(DI),AX
      +	MULQ 224(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 88(DI),AX
      +	MULQ 232(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 88(DI),AX
      +	MULQ 240(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 88(DI),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 248(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 96(DI),AX
      +	MULQ 216(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 96(DI),AX
      +	MULQ 224(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 96(DI),AX
      +	MULQ 232(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 96(DI),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 240(SP)
      +	ADDQ AX,SI
      +	ADCQ DX,CX
      +	MOVQ 96(DI),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 248(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 104(DI),AX
      +	MULQ 216(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 104(DI),AX
      +	MULQ 224(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 56(SP),AX
      +	MULQ 240(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 56(SP),AX
      +	MULQ 248(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 112(DI),AX
      +	MULQ 216(SP)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 64(SP),AX
      +	MULQ 232(SP)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 64(SP),AX
      +	MULQ 240(SP)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 64(SP),AX
      +	MULQ 248(SP)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ ·REDMASK51(SB),DX
      +	SHLQ $13,CX:SI
      +	ANDQ DX,SI
      +	SHLQ $13,R9:R8
      +	ANDQ DX,R8
      +	ADDQ CX,R8
      +	SHLQ $13,R11:R10
      +	ANDQ DX,R10
      +	ADDQ R9,R10
      +	SHLQ $13,R13:R12
      +	ANDQ DX,R12
      +	ADDQ R11,R12
      +	SHLQ $13,R15:R14
      +	ANDQ DX,R14
      +	ADDQ R13,R14
      +	IMUL3Q $19,R15,CX
      +	ADDQ CX,SI
      +	MOVQ SI,CX
      +	SHRQ $51,CX
      +	ADDQ R8,CX
      +	MOVQ CX,R8
      +	SHRQ $51,CX
      +	ANDQ DX,SI
      +	ADDQ R10,CX
      +	MOVQ CX,R9
      +	SHRQ $51,CX
      +	ANDQ DX,R8
      +	ADDQ R12,CX
      +	MOVQ CX,AX
      +	SHRQ $51,CX
      +	ANDQ DX,R9
      +	ADDQ R14,CX
      +	MOVQ CX,R10
      +	SHRQ $51,CX
      +	ANDQ DX,AX
      +	IMUL3Q $19,CX,CX
      +	ADDQ CX,SI
      +	ANDQ DX,R10
      +	MOVQ SI,80(DI)
      +	MOVQ R8,88(DI)
      +	MOVQ R9,96(DI)
      +	MOVQ AX,104(DI)
      +	MOVQ R10,112(DI)
      +	MOVQ 0(SP),R11
      +	MOVQ 8(SP),R12
      +	MOVQ 16(SP),R13
      +	MOVQ 24(SP),R14
      +	MOVQ 32(SP),R15
      +	MOVQ 40(SP),BX
      +	MOVQ 48(SP),BP
      +	MOVQ R11,SP
      +	MOVQ DI,AX
      +	MOVQ SI,DX
      +	RET
      diff --git a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go
      new file mode 100644
      index 00000000..5822bd53
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go
      @@ -0,0 +1,240 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build amd64,!gccgo,!appengine
      +
      +package curve25519
      +
      +// These functions are implemented in the .s files. The names of the functions
      +// in the rest of the file are also taken from the SUPERCOP sources to help
      +// people following along.
      +
      +//go:noescape
      +
      +func cswap(inout *[5]uint64, v uint64)
      +
      +//go:noescape
      +
      +func ladderstep(inout *[5][5]uint64)
      +
      +//go:noescape
      +
      +func freeze(inout *[5]uint64)
      +
      +//go:noescape
      +
      +func mul(dest, a, b *[5]uint64)
      +
      +//go:noescape
      +
      +func square(out, in *[5]uint64)
      +
      +// mladder uses a Montgomery ladder to calculate (xr/zr) *= s.
      +func mladder(xr, zr *[5]uint64, s *[32]byte) {
      +	var work [5][5]uint64
      +
      +	work[0] = *xr
      +	setint(&work[1], 1)
      +	setint(&work[2], 0)
      +	work[3] = *xr
      +	setint(&work[4], 1)
      +
      +	j := uint(6)
      +	var prevbit byte
      +
      +	for i := 31; i >= 0; i-- {
      +		for j < 8 {
      +			bit := ((*s)[i] >> j) & 1
      +			swap := bit ^ prevbit
      +			prevbit = bit
      +			cswap(&work[1], uint64(swap))
      +			ladderstep(&work)
      +			j--
      +		}
      +		j = 7
      +	}
      +
      +	*xr = work[1]
      +	*zr = work[2]
      +}
      +
      +func scalarMult(out, in, base *[32]byte) {
      +	var e [32]byte
      +	copy(e[:], (*in)[:])
      +	e[0] &= 248
      +	e[31] &= 127
      +	e[31] |= 64
      +
      +	var t, z [5]uint64
      +	unpack(&t, base)
      +	mladder(&t, &z, &e)
      +	invert(&z, &z)
      +	mul(&t, &t, &z)
      +	pack(out, &t)
      +}
      +
      +func setint(r *[5]uint64, v uint64) {
      +	r[0] = v
      +	r[1] = 0
      +	r[2] = 0
      +	r[3] = 0
      +	r[4] = 0
      +}
      +
      +// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian
      +// order.
      +func unpack(r *[5]uint64, x *[32]byte) {
      +	r[0] = uint64(x[0]) |
      +		uint64(x[1])<<8 |
      +		uint64(x[2])<<16 |
      +		uint64(x[3])<<24 |
      +		uint64(x[4])<<32 |
      +		uint64(x[5])<<40 |
      +		uint64(x[6]&7)<<48
      +
      +	r[1] = uint64(x[6])>>3 |
      +		uint64(x[7])<<5 |
      +		uint64(x[8])<<13 |
      +		uint64(x[9])<<21 |
      +		uint64(x[10])<<29 |
      +		uint64(x[11])<<37 |
      +		uint64(x[12]&63)<<45
      +
      +	r[2] = uint64(x[12])>>6 |
      +		uint64(x[13])<<2 |
      +		uint64(x[14])<<10 |
      +		uint64(x[15])<<18 |
      +		uint64(x[16])<<26 |
      +		uint64(x[17])<<34 |
      +		uint64(x[18])<<42 |
      +		uint64(x[19]&1)<<50
      +
      +	r[3] = uint64(x[19])>>1 |
      +		uint64(x[20])<<7 |
      +		uint64(x[21])<<15 |
      +		uint64(x[22])<<23 |
      +		uint64(x[23])<<31 |
      +		uint64(x[24])<<39 |
      +		uint64(x[25]&15)<<47
      +
      +	r[4] = uint64(x[25])>>4 |
      +		uint64(x[26])<<4 |
      +		uint64(x[27])<<12 |
      +		uint64(x[28])<<20 |
      +		uint64(x[29])<<28 |
      +		uint64(x[30])<<36 |
      +		uint64(x[31]&127)<<44
      +}
      +
      +// pack sets out = x where out is the usual, little-endian form of the 5,
      +// 51-bit limbs in x.
      +func pack(out *[32]byte, x *[5]uint64) {
      +	t := *x
      +	freeze(&t)
      +
      +	out[0] = byte(t[0])
      +	out[1] = byte(t[0] >> 8)
      +	out[2] = byte(t[0] >> 16)
      +	out[3] = byte(t[0] >> 24)
      +	out[4] = byte(t[0] >> 32)
      +	out[5] = byte(t[0] >> 40)
      +	out[6] = byte(t[0] >> 48)
      +
      +	out[6] ^= byte(t[1]<<3) & 0xf8
      +	out[7] = byte(t[1] >> 5)
      +	out[8] = byte(t[1] >> 13)
      +	out[9] = byte(t[1] >> 21)
      +	out[10] = byte(t[1] >> 29)
      +	out[11] = byte(t[1] >> 37)
      +	out[12] = byte(t[1] >> 45)
      +
      +	out[12] ^= byte(t[2]<<6) & 0xc0
      +	out[13] = byte(t[2] >> 2)
      +	out[14] = byte(t[2] >> 10)
      +	out[15] = byte(t[2] >> 18)
      +	out[16] = byte(t[2] >> 26)
      +	out[17] = byte(t[2] >> 34)
      +	out[18] = byte(t[2] >> 42)
      +	out[19] = byte(t[2] >> 50)
      +
      +	out[19] ^= byte(t[3]<<1) & 0xfe
      +	out[20] = byte(t[3] >> 7)
      +	out[21] = byte(t[3] >> 15)
      +	out[22] = byte(t[3] >> 23)
      +	out[23] = byte(t[3] >> 31)
      +	out[24] = byte(t[3] >> 39)
      +	out[25] = byte(t[3] >> 47)
      +
      +	out[25] ^= byte(t[4]<<4) & 0xf0
      +	out[26] = byte(t[4] >> 4)
      +	out[27] = byte(t[4] >> 12)
      +	out[28] = byte(t[4] >> 20)
      +	out[29] = byte(t[4] >> 28)
      +	out[30] = byte(t[4] >> 36)
      +	out[31] = byte(t[4] >> 44)
      +}
      +
      +// invert calculates r = x^-1 mod p using Fermat's little theorem.
      +func invert(r *[5]uint64, x *[5]uint64) {
      +	var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64
      +
      +	square(&z2, x)        /* 2 */
      +	square(&t, &z2)       /* 4 */
      +	square(&t, &t)        /* 8 */
      +	mul(&z9, &t, x)       /* 9 */
      +	mul(&z11, &z9, &z2)   /* 11 */
      +	square(&t, &z11)      /* 22 */
      +	mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */
      +
      +	square(&t, &z2_5_0)      /* 2^6 - 2^1 */
      +	for i := 1; i < 5; i++ { /* 2^20 - 2^10 */
      +		square(&t, &t)
      +	}
      +	mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */
      +
      +	square(&t, &z2_10_0)      /* 2^11 - 2^1 */
      +	for i := 1; i < 10; i++ { /* 2^20 - 2^10 */
      +		square(&t, &t)
      +	}
      +	mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */
      +
      +	square(&t, &z2_20_0)      /* 2^21 - 2^1 */
      +	for i := 1; i < 20; i++ { /* 2^40 - 2^20 */
      +		square(&t, &t)
      +	}
      +	mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */
      +
      +	square(&t, &t)            /* 2^41 - 2^1 */
      +	for i := 1; i < 10; i++ { /* 2^50 - 2^10 */
      +		square(&t, &t)
      +	}
      +	mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */
      +
      +	square(&t, &z2_50_0)      /* 2^51 - 2^1 */
      +	for i := 1; i < 50; i++ { /* 2^100 - 2^50 */
      +		square(&t, &t)
      +	}
      +	mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */
      +
      +	square(&t, &z2_100_0)      /* 2^101 - 2^1 */
      +	for i := 1; i < 100; i++ { /* 2^200 - 2^100 */
      +		square(&t, &t)
      +	}
      +	mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */
      +
      +	square(&t, &t)            /* 2^201 - 2^1 */
      +	for i := 1; i < 50; i++ { /* 2^250 - 2^50 */
      +		square(&t, &t)
      +	}
      +	mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */
      +
      +	square(&t, &t) /* 2^251 - 2^1 */
      +	square(&t, &t) /* 2^252 - 2^2 */
      +	square(&t, &t) /* 2^253 - 2^3 */
      +
      +	square(&t, &t) /* 2^254 - 2^4 */
      +
      +	square(&t, &t)   /* 2^255 - 2^5 */
      +	mul(r, &t, &z11) /* 2^255 - 21 */
      +}
      diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s
      new file mode 100644
      index 00000000..e48d183e
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s
      @@ -0,0 +1,191 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// This code was translated into a form compatible with 6a from the public
      +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
      +
      +// +build amd64,!gccgo,!appengine
      +
      +// func mul(dest, a, b *[5]uint64)
      +TEXT ·mul(SB),0,$128-24
      +	MOVQ dest+0(FP), DI
      +	MOVQ a+8(FP), SI
      +	MOVQ b+16(FP), DX
      +
      +	MOVQ SP,R11
      +	MOVQ $31,CX
      +	NOTQ CX
      +	ANDQ CX,SP
      +	ADDQ $32,SP
      +
      +	MOVQ R11,0(SP)
      +	MOVQ R12,8(SP)
      +	MOVQ R13,16(SP)
      +	MOVQ R14,24(SP)
      +	MOVQ R15,32(SP)
      +	MOVQ BX,40(SP)
      +	MOVQ BP,48(SP)
      +	MOVQ DI,56(SP)
      +	MOVQ DX,CX
      +	MOVQ 24(SI),DX
      +	IMUL3Q $19,DX,AX
      +	MOVQ AX,64(SP)
      +	MULQ 16(CX)
      +	MOVQ AX,R8
      +	MOVQ DX,R9
      +	MOVQ 32(SI),DX
      +	IMUL3Q $19,DX,AX
      +	MOVQ AX,72(SP)
      +	MULQ 8(CX)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 0(SI),AX
      +	MULQ 0(CX)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 0(SI),AX
      +	MULQ 8(CX)
      +	MOVQ AX,R10
      +	MOVQ DX,R11
      +	MOVQ 0(SI),AX
      +	MULQ 16(CX)
      +	MOVQ AX,R12
      +	MOVQ DX,R13
      +	MOVQ 0(SI),AX
      +	MULQ 24(CX)
      +	MOVQ AX,R14
      +	MOVQ DX,R15
      +	MOVQ 0(SI),AX
      +	MULQ 32(CX)
      +	MOVQ AX,BX
      +	MOVQ DX,BP
      +	MOVQ 8(SI),AX
      +	MULQ 0(CX)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 8(SI),AX
      +	MULQ 8(CX)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 8(SI),AX
      +	MULQ 16(CX)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 8(SI),AX
      +	MULQ 24(CX)
      +	ADDQ AX,BX
      +	ADCQ DX,BP
      +	MOVQ 8(SI),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 32(CX)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 16(SI),AX
      +	MULQ 0(CX)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 16(SI),AX
      +	MULQ 8(CX)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 16(SI),AX
      +	MULQ 16(CX)
      +	ADDQ AX,BX
      +	ADCQ DX,BP
      +	MOVQ 16(SI),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 24(CX)
      +	ADDQ AX,R8
      +	ADCQ DX,R9
      +	MOVQ 16(SI),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 32(CX)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 24(SI),AX
      +	MULQ 0(CX)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ 24(SI),AX
      +	MULQ 8(CX)
      +	ADDQ AX,BX
      +	ADCQ DX,BP
      +	MOVQ 64(SP),AX
      +	MULQ 24(CX)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 64(SP),AX
      +	MULQ 32(CX)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 32(SI),AX
      +	MULQ 0(CX)
      +	ADDQ AX,BX
      +	ADCQ DX,BP
      +	MOVQ 72(SP),AX
      +	MULQ 16(CX)
      +	ADDQ AX,R10
      +	ADCQ DX,R11
      +	MOVQ 72(SP),AX
      +	MULQ 24(CX)
      +	ADDQ AX,R12
      +	ADCQ DX,R13
      +	MOVQ 72(SP),AX
      +	MULQ 32(CX)
      +	ADDQ AX,R14
      +	ADCQ DX,R15
      +	MOVQ ·REDMASK51(SB),SI
      +	SHLQ $13,R9:R8
      +	ANDQ SI,R8
      +	SHLQ $13,R11:R10
      +	ANDQ SI,R10
      +	ADDQ R9,R10
      +	SHLQ $13,R13:R12
      +	ANDQ SI,R12
      +	ADDQ R11,R12
      +	SHLQ $13,R15:R14
      +	ANDQ SI,R14
      +	ADDQ R13,R14
      +	SHLQ $13,BP:BX
      +	ANDQ SI,BX
      +	ADDQ R15,BX
      +	IMUL3Q $19,BP,DX
      +	ADDQ DX,R8
      +	MOVQ R8,DX
      +	SHRQ $51,DX
      +	ADDQ R10,DX
      +	MOVQ DX,CX
      +	SHRQ $51,DX
      +	ANDQ SI,R8
      +	ADDQ R12,DX
      +	MOVQ DX,R9
      +	SHRQ $51,DX
      +	ANDQ SI,CX
      +	ADDQ R14,DX
      +	MOVQ DX,AX
      +	SHRQ $51,DX
      +	ANDQ SI,R9
      +	ADDQ BX,DX
      +	MOVQ DX,R10
      +	SHRQ $51,DX
      +	ANDQ SI,AX
      +	IMUL3Q $19,DX,DX
      +	ADDQ DX,R8
      +	ANDQ SI,R10
      +	MOVQ R8,0(DI)
      +	MOVQ CX,8(DI)
      +	MOVQ R9,16(DI)
      +	MOVQ AX,24(DI)
      +	MOVQ R10,32(DI)
      +	MOVQ 0(SP),R11
      +	MOVQ 8(SP),R12
      +	MOVQ 16(SP),R13
      +	MOVQ 24(SP),R14
      +	MOVQ 32(SP),R15
      +	MOVQ 40(SP),BX
      +	MOVQ 48(SP),BP
      +	MOVQ R11,SP
      +	MOVQ DI,AX
      +	MOVQ SI,DX
      +	RET
      diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s
      new file mode 100644
      index 00000000..78d1a50d
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/curve25519/square_amd64.s
      @@ -0,0 +1,153 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// This code was translated into a form compatible with 6a from the public
      +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
      +
      +// +build amd64,!gccgo,!appengine
      +
      +// func square(out, in *[5]uint64)
      +TEXT ·square(SB),7,$96-16
      +	MOVQ out+0(FP), DI
      +	MOVQ in+8(FP), SI
      +
      +	MOVQ SP,R11
      +	MOVQ $31,CX
      +	NOTQ CX
      +	ANDQ CX,SP
      +	ADDQ $32, SP
      +
      +	MOVQ R11,0(SP)
      +	MOVQ R12,8(SP)
      +	MOVQ R13,16(SP)
      +	MOVQ R14,24(SP)
      +	MOVQ R15,32(SP)
      +	MOVQ BX,40(SP)
      +	MOVQ BP,48(SP)
      +	MOVQ 0(SI),AX
      +	MULQ 0(SI)
      +	MOVQ AX,CX
      +	MOVQ DX,R8
      +	MOVQ 0(SI),AX
      +	SHLQ $1,AX
      +	MULQ 8(SI)
      +	MOVQ AX,R9
      +	MOVQ DX,R10
      +	MOVQ 0(SI),AX
      +	SHLQ $1,AX
      +	MULQ 16(SI)
      +	MOVQ AX,R11
      +	MOVQ DX,R12
      +	MOVQ 0(SI),AX
      +	SHLQ $1,AX
      +	MULQ 24(SI)
      +	MOVQ AX,R13
      +	MOVQ DX,R14
      +	MOVQ 0(SI),AX
      +	SHLQ $1,AX
      +	MULQ 32(SI)
      +	MOVQ AX,R15
      +	MOVQ DX,BX
      +	MOVQ 8(SI),AX
      +	MULQ 8(SI)
      +	ADDQ AX,R11
      +	ADCQ DX,R12
      +	MOVQ 8(SI),AX
      +	SHLQ $1,AX
      +	MULQ 16(SI)
      +	ADDQ AX,R13
      +	ADCQ DX,R14
      +	MOVQ 8(SI),AX
      +	SHLQ $1,AX
      +	MULQ 24(SI)
      +	ADDQ AX,R15
      +	ADCQ DX,BX
      +	MOVQ 8(SI),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 32(SI)
      +	ADDQ AX,CX
      +	ADCQ DX,R8
      +	MOVQ 16(SI),AX
      +	MULQ 16(SI)
      +	ADDQ AX,R15
      +	ADCQ DX,BX
      +	MOVQ 16(SI),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 24(SI)
      +	ADDQ AX,CX
      +	ADCQ DX,R8
      +	MOVQ 16(SI),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 32(SI)
      +	ADDQ AX,R9
      +	ADCQ DX,R10
      +	MOVQ 24(SI),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 24(SI)
      +	ADDQ AX,R9
      +	ADCQ DX,R10
      +	MOVQ 24(SI),DX
      +	IMUL3Q $38,DX,AX
      +	MULQ 32(SI)
      +	ADDQ AX,R11
      +	ADCQ DX,R12
      +	MOVQ 32(SI),DX
      +	IMUL3Q $19,DX,AX
      +	MULQ 32(SI)
      +	ADDQ AX,R13
      +	ADCQ DX,R14
      +	MOVQ ·REDMASK51(SB),SI
      +	SHLQ $13,R8:CX
      +	ANDQ SI,CX
      +	SHLQ $13,R10:R9
      +	ANDQ SI,R9
      +	ADDQ R8,R9
      +	SHLQ $13,R12:R11
      +	ANDQ SI,R11
      +	ADDQ R10,R11
      +	SHLQ $13,R14:R13
      +	ANDQ SI,R13
      +	ADDQ R12,R13
      +	SHLQ $13,BX:R15
      +	ANDQ SI,R15
      +	ADDQ R14,R15
      +	IMUL3Q $19,BX,DX
      +	ADDQ DX,CX
      +	MOVQ CX,DX
      +	SHRQ $51,DX
      +	ADDQ R9,DX
      +	ANDQ SI,CX
      +	MOVQ DX,R8
      +	SHRQ $51,DX
      +	ADDQ R11,DX
      +	ANDQ SI,R8
      +	MOVQ DX,R9
      +	SHRQ $51,DX
      +	ADDQ R13,DX
      +	ANDQ SI,R9
      +	MOVQ DX,AX
      +	SHRQ $51,DX
      +	ADDQ R15,DX
      +	ANDQ SI,AX
      +	MOVQ DX,R10
      +	SHRQ $51,DX
      +	IMUL3Q $19,DX,DX
      +	ADDQ DX,CX
      +	ANDQ SI,R10
      +	MOVQ CX,0(DI)
      +	MOVQ R8,8(DI)
      +	MOVQ R9,16(DI)
      +	MOVQ AX,24(DI)
      +	MOVQ R10,32(DI)
      +	MOVQ 0(SP),R11
      +	MOVQ 8(SP),R12
      +	MOVQ 16(SP),R13
      +	MOVQ 24(SP),R14
      +	MOVQ 32(SP),R15
      +	MOVQ 40(SP),BX
      +	MOVQ 48(SP),BP
      +	MOVQ R11,SP
      +	MOVQ DI,AX
      +	MOVQ SI,DX
      +	RET
      diff --git a/vendor/golang.org/x/crypto/hkdf/example_test.go b/vendor/golang.org/x/crypto/hkdf/example_test.go
      new file mode 100644
      index 00000000..df843951
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/hkdf/example_test.go
      @@ -0,0 +1,61 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package hkdf_test
      +
      +import (
      +	"bytes"
      +	"crypto/rand"
      +	"crypto/sha256"
      +	"fmt"
      +	"golang.org/x/crypto/hkdf"
      +	"io"
      +)
      +
      +// Usage example that expands one master key into three other cryptographically
      +// secure keys.
      +func Example_usage() {
      +	// Underlying hash function to use
      +	hash := sha256.New
      +
      +	// Cryptographically secure master key.
      +	master := []byte{0x00, 0x01, 0x02, 0x03} // i.e. NOT this.
      +
      +	// Non secret salt, optional (can be nil)
      +	// Recommended: hash-length sized random
      +	salt := make([]byte, hash().Size())
      +	n, err := io.ReadFull(rand.Reader, salt)
      +	if n != len(salt) || err != nil {
      +		fmt.Println("error:", err)
      +		return
      +	}
      +
      +	// Non secret context specific info, optional (can be nil).
      +	// Note, independent from the master key.
      +	info := []byte{0x03, 0x14, 0x15, 0x92, 0x65}
      +
      +	// Create the key derivation function
      +	hkdf := hkdf.New(hash, master, salt, info)
      +
      +	// Generate the required keys
      +	keys := make([][]byte, 3)
      +	for i := 0; i < len(keys); i++ {
      +		keys[i] = make([]byte, 24)
      +		n, err := io.ReadFull(hkdf, keys[i])
      +		if n != len(keys[i]) || err != nil {
      +			fmt.Println("error:", err)
      +			return
      +		}
      +	}
      +
      +	// Keys should contain 192 bit random keys
      +	for i := 1; i <= len(keys); i++ {
      +		fmt.Printf("Key #%d: %v\n", i, !bytes.Equal(keys[i-1], make([]byte, 24)))
      +	}
      +
      +	// Output:
      +	// Key #1: true
      +	// Key #2: true
      +	// Key #3: true
      +}
      diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go
      new file mode 100644
      index 00000000..5bc24635
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go
      @@ -0,0 +1,75 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation
      +// Function (HKDF) as defined in RFC 5869.
      +//
      +// HKDF is a cryptographic key derivation function (KDF) with the goal of
      +// expanding limited input keying material into one or more cryptographically
      +// strong secret keys.
      +//
      +// RFC 5869: https://tools.ietf.org/html/rfc5869
      +package hkdf // import "golang.org/x/crypto/hkdf"
      +
      +import (
      +	"crypto/hmac"
      +	"errors"
      +	"hash"
      +	"io"
      +)
      +
      +type hkdf struct {
      +	expander hash.Hash
      +	size     int
      +
      +	info    []byte
      +	counter byte
      +
      +	prev  []byte
      +	cache []byte
      +}
      +
      +func (f *hkdf) Read(p []byte) (int, error) {
      +	// Check whether enough data can be generated
      +	need := len(p)
      +	remains := len(f.cache) + int(255-f.counter+1)*f.size
      +	if remains < need {
      +		return 0, errors.New("hkdf: entropy limit reached")
      +	}
      +	// Read from the cache, if enough data is present
      +	n := copy(p, f.cache)
      +	p = p[n:]
      +
      +	// Fill the buffer
      +	for len(p) > 0 {
      +		f.expander.Reset()
      +		f.expander.Write(f.prev)
      +		f.expander.Write(f.info)
      +		f.expander.Write([]byte{f.counter})
      +		f.prev = f.expander.Sum(f.prev[:0])
      +		f.counter++
      +
      +		// Copy the new batch into p
      +		f.cache = f.prev
      +		n = copy(p, f.cache)
      +		p = p[n:]
      +	}
      +	// Save leftovers for next run
      +	f.cache = f.cache[n:]
      +
      +	return need, nil
      +}
      +
      +// New returns a new HKDF using the given hash, the secret keying material to expand
      +// and optional salt and info fields.
      +func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader {
      +	if salt == nil {
      +		salt = make([]byte, hash().Size())
      +	}
      +	extractor := hmac.New(hash, salt)
      +	extractor.Write(secret)
      +	prk := extractor.Sum(nil)
      +
      +	return &hkdf{hmac.New(hash, prk), extractor.Size(), info, 1, nil, nil}
      +}
      diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf_test.go b/vendor/golang.org/x/crypto/hkdf/hkdf_test.go
      new file mode 100644
      index 00000000..cee659bc
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/hkdf/hkdf_test.go
      @@ -0,0 +1,370 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +package hkdf
      +
      +import (
      +	"bytes"
      +	"crypto/md5"
      +	"crypto/sha1"
      +	"crypto/sha256"
      +	"crypto/sha512"
      +	"hash"
      +	"io"
      +	"testing"
      +)
      +
      +type hkdfTest struct {
      +	hash   func() hash.Hash
      +	master []byte
      +	salt   []byte
      +	info   []byte
      +	out    []byte
      +}
      +
      +var hkdfTests = []hkdfTest{
      +	// Tests from RFC 5869
      +	{
      +		sha256.New,
      +		[]byte{
      +			0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
      +			0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
      +			0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
      +		},
      +		[]byte{
      +			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
      +			0x08, 0x09, 0x0a, 0x0b, 0x0c,
      +		},
      +		[]byte{
      +			0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
      +			0xf8, 0xf9,
      +		},
      +		[]byte{
      +			0x3c, 0xb2, 0x5f, 0x25, 0xfa, 0xac, 0xd5, 0x7a,
      +			0x90, 0x43, 0x4f, 0x64, 0xd0, 0x36, 0x2f, 0x2a,
      +			0x2d, 0x2d, 0x0a, 0x90, 0xcf, 0x1a, 0x5a, 0x4c,
      +			0x5d, 0xb0, 0x2d, 0x56, 0xec, 0xc4, 0xc5, 0xbf,
      +			0x34, 0x00, 0x72, 0x08, 0xd5, 0xb8, 0x87, 0x18,
      +			0x58, 0x65,
      +		},
      +	},
      +	{
      +		sha256.New,
      +		[]byte{
      +			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
      +			0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
      +			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
      +			0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
      +			0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
      +			0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
      +			0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
      +			0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
      +			0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
      +			0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
      +		},
      +		[]byte{
      +			0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
      +			0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
      +			0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
      +			0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
      +			0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
      +			0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
      +			0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
      +			0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
      +			0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
      +			0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
      +		},
      +		[]byte{
      +			0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
      +			0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
      +			0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
      +			0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
      +			0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
      +			0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
      +			0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
      +			0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
      +			0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
      +			0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
      +		},
      +		[]byte{
      +			0xb1, 0x1e, 0x39, 0x8d, 0xc8, 0x03, 0x27, 0xa1,
      +			0xc8, 0xe7, 0xf7, 0x8c, 0x59, 0x6a, 0x49, 0x34,
      +			0x4f, 0x01, 0x2e, 0xda, 0x2d, 0x4e, 0xfa, 0xd8,
      +			0xa0, 0x50, 0xcc, 0x4c, 0x19, 0xaf, 0xa9, 0x7c,
      +			0x59, 0x04, 0x5a, 0x99, 0xca, 0xc7, 0x82, 0x72,
      +			0x71, 0xcb, 0x41, 0xc6, 0x5e, 0x59, 0x0e, 0x09,
      +			0xda, 0x32, 0x75, 0x60, 0x0c, 0x2f, 0x09, 0xb8,
      +			0x36, 0x77, 0x93, 0xa9, 0xac, 0xa3, 0xdb, 0x71,
      +			0xcc, 0x30, 0xc5, 0x81, 0x79, 0xec, 0x3e, 0x87,
      +			0xc1, 0x4c, 0x01, 0xd5, 0xc1, 0xf3, 0x43, 0x4f,
      +			0x1d, 0x87,
      +		},
      +	},
      +	{
      +		sha256.New,
      +		[]byte{
      +			0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
      +			0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
      +			0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
      +		},
      +		[]byte{},
      +		[]byte{},
      +		[]byte{
      +			0x8d, 0xa4, 0xe7, 0x75, 0xa5, 0x63, 0xc1, 0x8f,
      +			0x71, 0x5f, 0x80, 0x2a, 0x06, 0x3c, 0x5a, 0x31,
      +			0xb8, 0xa1, 0x1f, 0x5c, 0x5e, 0xe1, 0x87, 0x9e,
      +			0xc3, 0x45, 0x4e, 0x5f, 0x3c, 0x73, 0x8d, 0x2d,
      +			0x9d, 0x20, 0x13, 0x95, 0xfa, 0xa4, 0xb6, 0x1a,
      +			0x96, 0xc8,
      +		},
      +	},
      +	{
      +		sha1.New,
      +		[]byte{
      +			0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
      +			0x0b, 0x0b, 0x0b,
      +		},
      +		[]byte{
      +			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
      +			0x08, 0x09, 0x0a, 0x0b, 0x0c,
      +		},
      +		[]byte{
      +			0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
      +			0xf8, 0xf9,
      +		},
      +		[]byte{
      +			0x08, 0x5a, 0x01, 0xea, 0x1b, 0x10, 0xf3, 0x69,
      +			0x33, 0x06, 0x8b, 0x56, 0xef, 0xa5, 0xad, 0x81,
      +			0xa4, 0xf1, 0x4b, 0x82, 0x2f, 0x5b, 0x09, 0x15,
      +			0x68, 0xa9, 0xcd, 0xd4, 0xf1, 0x55, 0xfd, 0xa2,
      +			0xc2, 0x2e, 0x42, 0x24, 0x78, 0xd3, 0x05, 0xf3,
      +			0xf8, 0x96,
      +		},
      +	},
      +	{
      +		sha1.New,
      +		[]byte{
      +			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
      +			0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
      +			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
      +			0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
      +			0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
      +			0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
      +			0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
      +			0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
      +			0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
      +			0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
      +		},
      +		[]byte{
      +			0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
      +			0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
      +			0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
      +			0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
      +			0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
      +			0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
      +			0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
      +			0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
      +			0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
      +			0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
      +		},
      +		[]byte{
      +			0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
      +			0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
      +			0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
      +			0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
      +			0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
      +			0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
      +			0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
      +			0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
      +			0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
      +			0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
      +		},
      +		[]byte{
      +			0x0b, 0xd7, 0x70, 0xa7, 0x4d, 0x11, 0x60, 0xf7,
      +			0xc9, 0xf1, 0x2c, 0xd5, 0x91, 0x2a, 0x06, 0xeb,
      +			0xff, 0x6a, 0xdc, 0xae, 0x89, 0x9d, 0x92, 0x19,
      +			0x1f, 0xe4, 0x30, 0x56, 0x73, 0xba, 0x2f, 0xfe,
      +			0x8f, 0xa3, 0xf1, 0xa4, 0xe5, 0xad, 0x79, 0xf3,
      +			0xf3, 0x34, 0xb3, 0xb2, 0x02, 0xb2, 0x17, 0x3c,
      +			0x48, 0x6e, 0xa3, 0x7c, 0xe3, 0xd3, 0x97, 0xed,
      +			0x03, 0x4c, 0x7f, 0x9d, 0xfe, 0xb1, 0x5c, 0x5e,
      +			0x92, 0x73, 0x36, 0xd0, 0x44, 0x1f, 0x4c, 0x43,
      +			0x00, 0xe2, 0xcf, 0xf0, 0xd0, 0x90, 0x0b, 0x52,
      +			0xd3, 0xb4,
      +		},
      +	},
      +	{
      +		sha1.New,
      +		[]byte{
      +			0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
      +			0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
      +			0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
      +		},
      +		[]byte{},
      +		[]byte{},
      +		[]byte{
      +			0x0a, 0xc1, 0xaf, 0x70, 0x02, 0xb3, 0xd7, 0x61,
      +			0xd1, 0xe5, 0x52, 0x98, 0xda, 0x9d, 0x05, 0x06,
      +			0xb9, 0xae, 0x52, 0x05, 0x72, 0x20, 0xa3, 0x06,
      +			0xe0, 0x7b, 0x6b, 0x87, 0xe8, 0xdf, 0x21, 0xd0,
      +			0xea, 0x00, 0x03, 0x3d, 0xe0, 0x39, 0x84, 0xd3,
      +			0x49, 0x18,
      +		},
      +	},
      +	{
      +		sha1.New,
      +		[]byte{
      +			0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
      +			0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
      +			0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
      +		},
      +		nil,
      +		[]byte{},
      +		[]byte{
      +			0x2c, 0x91, 0x11, 0x72, 0x04, 0xd7, 0x45, 0xf3,
      +			0x50, 0x0d, 0x63, 0x6a, 0x62, 0xf6, 0x4f, 0x0a,
      +			0xb3, 0xba, 0xe5, 0x48, 0xaa, 0x53, 0xd4, 0x23,
      +			0xb0, 0xd1, 0xf2, 0x7e, 0xbb, 0xa6, 0xf5, 0xe5,
      +			0x67, 0x3a, 0x08, 0x1d, 0x70, 0xcc, 0xe7, 0xac,
      +			0xfc, 0x48,
      +		},
      +	},
      +}
      +
      +func TestHKDF(t *testing.T) {
      +	for i, tt := range hkdfTests {
      +		hkdf := New(tt.hash, tt.master, tt.salt, tt.info)
      +		out := make([]byte, len(tt.out))
      +
      +		n, err := io.ReadFull(hkdf, out)
      +		if n != len(tt.out) || err != nil {
      +			t.Errorf("test %d: not enough output bytes: %d.", i, n)
      +		}
      +
      +		if !bytes.Equal(out, tt.out) {
      +			t.Errorf("test %d: incorrect output: have %v, need %v.", i, out, tt.out)
      +		}
      +	}
      +}
      +
      +func TestHKDFMultiRead(t *testing.T) {
      +	for i, tt := range hkdfTests {
      +		hkdf := New(tt.hash, tt.master, tt.salt, tt.info)
      +		out := make([]byte, len(tt.out))
      +
      +		for b := 0; b < len(tt.out); b++ {
      +			n, err := io.ReadFull(hkdf, out[b:b+1])
      +			if n != 1 || err != nil {
      +				t.Errorf("test %d.%d: not enough output bytes: have %d, need %d .", i, b, n, len(tt.out))
      +			}
      +		}
      +
      +		if !bytes.Equal(out, tt.out) {
      +			t.Errorf("test %d: incorrect output: have %v, need %v.", i, out, tt.out)
      +		}
      +	}
      +}
      +
      +func TestHKDFLimit(t *testing.T) {
      +	hash := sha1.New
      +	master := []byte{0x00, 0x01, 0x02, 0x03}
      +	info := []byte{}
      +
      +	hkdf := New(hash, master, nil, info)
      +	limit := hash().Size() * 255
      +	out := make([]byte, limit)
      +
      +	// The maximum output bytes should be extractable
      +	n, err := io.ReadFull(hkdf, out)
      +	if n != limit || err != nil {
      +		t.Errorf("not enough output bytes: %d, %v.", n, err)
      +	}
      +
      +	// Reading one more should fail
      +	n, err = io.ReadFull(hkdf, make([]byte, 1))
      +	if n > 0 || err == nil {
      +		t.Errorf("key expansion overflowed: n = %d, err = %v", n, err)
      +	}
      +}
      +
      +func Benchmark16ByteMD5Single(b *testing.B) {
      +	benchmarkHKDFSingle(md5.New, 16, b)
      +}
      +
      +func Benchmark20ByteSHA1Single(b *testing.B) {
      +	benchmarkHKDFSingle(sha1.New, 20, b)
      +}
      +
      +func Benchmark32ByteSHA256Single(b *testing.B) {
      +	benchmarkHKDFSingle(sha256.New, 32, b)
      +}
      +
      +func Benchmark64ByteSHA512Single(b *testing.B) {
      +	benchmarkHKDFSingle(sha512.New, 64, b)
      +}
      +
      +func Benchmark8ByteMD5Stream(b *testing.B) {
      +	benchmarkHKDFStream(md5.New, 8, b)
      +}
      +
      +func Benchmark16ByteMD5Stream(b *testing.B) {
      +	benchmarkHKDFStream(md5.New, 16, b)
      +}
      +
      +func Benchmark8ByteSHA1Stream(b *testing.B) {
      +	benchmarkHKDFStream(sha1.New, 8, b)
      +}
      +
      +func Benchmark20ByteSHA1Stream(b *testing.B) {
      +	benchmarkHKDFStream(sha1.New, 20, b)
      +}
      +
      +func Benchmark8ByteSHA256Stream(b *testing.B) {
      +	benchmarkHKDFStream(sha256.New, 8, b)
      +}
      +
      +func Benchmark32ByteSHA256Stream(b *testing.B) {
      +	benchmarkHKDFStream(sha256.New, 32, b)
      +}
      +
      +func Benchmark8ByteSHA512Stream(b *testing.B) {
      +	benchmarkHKDFStream(sha512.New, 8, b)
      +}
      +
      +func Benchmark64ByteSHA512Stream(b *testing.B) {
      +	benchmarkHKDFStream(sha512.New, 64, b)
      +}
      +
      +func benchmarkHKDFSingle(hasher func() hash.Hash, block int, b *testing.B) {
      +	master := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07}
      +	salt := []byte{0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17}
      +	info := []byte{0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27}
      +	out := make([]byte, block)
      +
      +	b.SetBytes(int64(block))
      +	b.ResetTimer()
      +
      +	for i := 0; i < b.N; i++ {
      +		hkdf := New(hasher, master, salt, info)
      +		io.ReadFull(hkdf, out)
      +	}
      +}
      +
      +func benchmarkHKDFStream(hasher func() hash.Hash, block int, b *testing.B) {
      +	master := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07}
      +	salt := []byte{0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17}
      +	info := []byte{0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27}
      +	out := make([]byte, block)
      +
      +	b.SetBytes(int64(block))
      +	b.ResetTimer()
      +
      +	hkdf := New(hasher, master, salt, info)
      +	for i := 0; i < b.N; i++ {
      +		_, err := io.ReadFull(hkdf, out)
      +		if err != nil {
      +			hkdf = New(hasher, master, salt, info)
      +			i--
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/md4/md4.go b/vendor/golang.org/x/crypto/md4/md4.go
      new file mode 100644
      index 00000000..6d9ba9e5
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/md4/md4.go
      @@ -0,0 +1,118 @@
      +// Copyright 2009 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package md4 implements the MD4 hash algorithm as defined in RFC 1320.
      +package md4 // import "golang.org/x/crypto/md4"
      +
      +import (
      +	"crypto"
      +	"hash"
      +)
      +
      +func init() {
      +	crypto.RegisterHash(crypto.MD4, New)
      +}
      +
      +// The size of an MD4 checksum in bytes.
      +const Size = 16
      +
      +// The blocksize of MD4 in bytes.
      +const BlockSize = 64
      +
      +const (
      +	_Chunk = 64
      +	_Init0 = 0x67452301
      +	_Init1 = 0xEFCDAB89
      +	_Init2 = 0x98BADCFE
      +	_Init3 = 0x10325476
      +)
      +
      +// digest represents the partial evaluation of a checksum.
      +type digest struct {
      +	s   [4]uint32
      +	x   [_Chunk]byte
      +	nx  int
      +	len uint64
      +}
      +
      +func (d *digest) Reset() {
      +	d.s[0] = _Init0
      +	d.s[1] = _Init1
      +	d.s[2] = _Init2
      +	d.s[3] = _Init3
      +	d.nx = 0
      +	d.len = 0
      +}
      +
      +// New returns a new hash.Hash computing the MD4 checksum.
      +func New() hash.Hash {
      +	d := new(digest)
      +	d.Reset()
      +	return d
      +}
      +
      +func (d *digest) Size() int { return Size }
      +
      +func (d *digest) BlockSize() int { return BlockSize }
      +
      +func (d *digest) Write(p []byte) (nn int, err error) {
      +	nn = len(p)
      +	d.len += uint64(nn)
      +	if d.nx > 0 {
      +		n := len(p)
      +		if n > _Chunk-d.nx {
      +			n = _Chunk - d.nx
      +		}
      +		for i := 0; i < n; i++ {
      +			d.x[d.nx+i] = p[i]
      +		}
      +		d.nx += n
      +		if d.nx == _Chunk {
      +			_Block(d, d.x[0:])
      +			d.nx = 0
      +		}
      +		p = p[n:]
      +	}
      +	n := _Block(d, p)
      +	p = p[n:]
      +	if len(p) > 0 {
      +		d.nx = copy(d.x[:], p)
      +	}
      +	return
      +}
      +
      +func (d0 *digest) Sum(in []byte) []byte {
      +	// Make a copy of d0, so that caller can keep writing and summing.
      +	d := new(digest)
      +	*d = *d0
      +
      +	// Padding.  Add a 1 bit and 0 bits until 56 bytes mod 64.
      +	len := d.len
      +	var tmp [64]byte
      +	tmp[0] = 0x80
      +	if len%64 < 56 {
      +		d.Write(tmp[0 : 56-len%64])
      +	} else {
      +		d.Write(tmp[0 : 64+56-len%64])
      +	}
      +
      +	// Length in bits.
      +	len <<= 3
      +	for i := uint(0); i < 8; i++ {
      +		tmp[i] = byte(len >> (8 * i))
      +	}
      +	d.Write(tmp[0:8])
      +
      +	if d.nx != 0 {
      +		panic("d.nx != 0")
      +	}
      +
      +	for _, s := range d.s {
      +		in = append(in, byte(s>>0))
      +		in = append(in, byte(s>>8))
      +		in = append(in, byte(s>>16))
      +		in = append(in, byte(s>>24))
      +	}
      +	return in
      +}
      diff --git a/vendor/golang.org/x/crypto/md4/md4_test.go b/vendor/golang.org/x/crypto/md4/md4_test.go
      new file mode 100644
      index 00000000..b56edd78
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/md4/md4_test.go
      @@ -0,0 +1,71 @@
      +// Copyright 2009 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package md4
      +
      +import (
      +	"fmt"
      +	"io"
      +	"testing"
      +)
      +
      +type md4Test struct {
      +	out string
      +	in  string
      +}
      +
      +var golden = []md4Test{
      +	{"31d6cfe0d16ae931b73c59d7e0c089c0", ""},
      +	{"bde52cb31de33e46245e05fbdbd6fb24", "a"},
      +	{"ec388dd78999dfc7cf4632465693b6bf", "ab"},
      +	{"a448017aaf21d8525fc10ae87aa6729d", "abc"},
      +	{"41decd8f579255c5200f86a4bb3ba740", "abcd"},
      +	{"9803f4a34e8eb14f96adba49064a0c41", "abcde"},
      +	{"804e7f1c2586e50b49ac65db5b645131", "abcdef"},
      +	{"752f4adfe53d1da0241b5bc216d098fc", "abcdefg"},
      +	{"ad9daf8d49d81988590a6f0e745d15dd", "abcdefgh"},
      +	{"1e4e28b05464316b56402b3815ed2dfd", "abcdefghi"},
      +	{"dc959c6f5d6f9e04e4380777cc964b3d", "abcdefghij"},
      +	{"1b5701e265778898ef7de5623bbe7cc0", "Discard medicine more than two years old."},
      +	{"d7f087e090fe7ad4a01cb59dacc9a572", "He who has a shady past knows that nice guys finish last."},
      +	{"a6f8fd6df617c72837592fc3570595c9", "I wouldn't marry him with a ten foot pole."},
      +	{"c92a84a9526da8abc240c05d6b1a1ce0", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"},
      +	{"f6013160c4dcb00847069fee3bb09803", "The days of the digital watch are numbered.  -Tom Stoppard"},
      +	{"2c3bb64f50b9107ed57640fe94bec09f", "Nepal premier won't resign."},
      +	{"45b7d8a32c7806f2f7f897332774d6e4", "For every action there is an equal and opposite government program."},
      +	{"b5b4f9026b175c62d7654bdc3a1cd438", "His money is twice tainted: 'taint yours and 'taint mine."},
      +	{"caf44e80f2c20ce19b5ba1cab766e7bd", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"},
      +	{"191fae6707f496aa54a6bce9f2ecf74d", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"},
      +	{"9ddc753e7a4ccee6081cd1b45b23a834", "size:  a.out:  bad magic"},
      +	{"8d050f55b1cadb9323474564be08a521", "The major problem is with sendmail.  -Mark Horton"},
      +	{"ad6e2587f74c3e3cc19146f6127fa2e3", "Give me a rock, paper and scissors and I will move the world.  CCFestoon"},
      +	{"1d616d60a5fabe85589c3f1566ca7fca", "If the enemy is within range, then so are you."},
      +	{"aec3326a4f496a2ced65a1963f84577f", "It's well we cannot hear the screams/That we create in others' dreams."},
      +	{"77b4fd762d6b9245e61c50bf6ebf118b", "You remind me of a TV show, but that's all right: I watch it anyway."},
      +	{"e8f48c726bae5e516f6ddb1a4fe62438", "C is as portable as Stonehedge!!"},
      +	{"a3a84366e7219e887423b01f9be7166e", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"},
      +	{"a6b7aa35157e984ef5d9b7f32e5fbb52", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction.  Lewis-Randall Rule"},
      +	{"75661f0545955f8f9abeeb17845f3fd6", "How can you write a big system without C++?  -Paul Glick"},
      +}
      +
      +func TestGolden(t *testing.T) {
      +	for i := 0; i < len(golden); i++ {
      +		g := golden[i]
      +		c := New()
      +		for j := 0; j < 3; j++ {
      +			if j < 2 {
      +				io.WriteString(c, g.in)
      +			} else {
      +				io.WriteString(c, g.in[0:len(g.in)/2])
      +				c.Sum(nil)
      +				io.WriteString(c, g.in[len(g.in)/2:])
      +			}
      +			s := fmt.Sprintf("%x", c.Sum(nil))
      +			if s != g.out {
      +				t.Fatalf("md4[%d](%s) = %s want %s", j, g.in, s, g.out)
      +			}
      +			c.Reset()
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/md4/md4block.go b/vendor/golang.org/x/crypto/md4/md4block.go
      new file mode 100644
      index 00000000..3fed475f
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/md4/md4block.go
      @@ -0,0 +1,89 @@
      +// Copyright 2009 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// MD4 block step.
      +// In its own file so that a faster assembly or C version
      +// can be substituted easily.
      +
      +package md4
      +
      +var shift1 = []uint{3, 7, 11, 19}
      +var shift2 = []uint{3, 5, 9, 13}
      +var shift3 = []uint{3, 9, 11, 15}
      +
      +var xIndex2 = []uint{0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15}
      +var xIndex3 = []uint{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15}
      +
      +func _Block(dig *digest, p []byte) int {
      +	a := dig.s[0]
      +	b := dig.s[1]
      +	c := dig.s[2]
      +	d := dig.s[3]
      +	n := 0
      +	var X [16]uint32
      +	for len(p) >= _Chunk {
      +		aa, bb, cc, dd := a, b, c, d
      +
      +		j := 0
      +		for i := 0; i < 16; i++ {
      +			X[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24
      +			j += 4
      +		}
      +
      +		// If this needs to be made faster in the future,
      +		// the usual trick is to unroll each of these
      +		// loops by a factor of 4; that lets you replace
      +		// the shift[] lookups with constants and,
      +		// with suitable variable renaming in each
      +		// unrolled body, delete the a, b, c, d = d, a, b, c
      +		// (or you can let the optimizer do the renaming).
      +		//
      +		// The index variables are uint so that % by a power
      +		// of two can be optimized easily by a compiler.
      +
      +		// Round 1.
      +		for i := uint(0); i < 16; i++ {
      +			x := i
      +			s := shift1[i%4]
      +			f := ((c ^ d) & b) ^ d
      +			a += f + X[x]
      +			a = a<<s | a>>(32-s)
      +			a, b, c, d = d, a, b, c
      +		}
      +
      +		// Round 2.
      +		for i := uint(0); i < 16; i++ {
      +			x := xIndex2[i]
      +			s := shift2[i%4]
      +			g := (b & c) | (b & d) | (c & d)
      +			a += g + X[x] + 0x5a827999
      +			a = a<<s | a>>(32-s)
      +			a, b, c, d = d, a, b, c
      +		}
      +
      +		// Round 3.
      +		for i := uint(0); i < 16; i++ {
      +			x := xIndex3[i]
      +			s := shift3[i%4]
      +			h := b ^ c ^ d
      +			a += h + X[x] + 0x6ed9eba1
      +			a = a<<s | a>>(32-s)
      +			a, b, c, d = d, a, b, c
      +		}
      +
      +		a += aa
      +		b += bb
      +		c += cc
      +		d += dd
      +
      +		p = p[_Chunk:]
      +		n += _Chunk
      +	}
      +
      +	dig.s[0] = a
      +	dig.s[1] = b
      +	dig.s[2] = c
      +	dig.s[3] = d
      +	return n
      +}
      diff --git a/vendor/golang.org/x/crypto/nacl/box/box.go b/vendor/golang.org/x/crypto/nacl/box/box.go
      new file mode 100644
      index 00000000..ca48a6db
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/nacl/box/box.go
      @@ -0,0 +1,85 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +/*
      +Package box authenticates and encrypts messages using public-key cryptography.
      +
      +Box uses Curve25519, XSalsa20 and Poly1305 to encrypt and authenticate
      +messages. The length of messages is not hidden.
      +
      +It is the caller's responsibility to ensure the uniqueness of nonces—for
      +example, by using nonce 1 for the first message, nonce 2 for the second
      +message, etc. Nonces are long enough that randomly generated nonces have
      +negligible risk of collision.
      +
      +This package is interoperable with NaCl: http://nacl.cr.yp.to/box.html.
      +*/
      +package box // import "golang.org/x/crypto/nacl/box"
      +
      +import (
      +	"golang.org/x/crypto/curve25519"
      +	"golang.org/x/crypto/nacl/secretbox"
      +	"golang.org/x/crypto/salsa20/salsa"
      +	"io"
      +)
      +
      +// Overhead is the number of bytes of overhead when boxing a message.
      +const Overhead = secretbox.Overhead
      +
      +// GenerateKey generates a new public/private key pair suitable for use with
      +// Seal and Open.
      +func GenerateKey(rand io.Reader) (publicKey, privateKey *[32]byte, err error) {
      +	publicKey = new([32]byte)
      +	privateKey = new([32]byte)
      +	_, err = io.ReadFull(rand, privateKey[:])
      +	if err != nil {
      +		publicKey = nil
      +		privateKey = nil
      +		return
      +	}
      +
      +	curve25519.ScalarBaseMult(publicKey, privateKey)
      +	return
      +}
      +
      +var zeros [16]byte
      +
      +// Precompute calculates the shared key between peersPublicKey and privateKey
      +// and writes it to sharedKey. The shared key can be used with
      +// OpenAfterPrecomputation and SealAfterPrecomputation to speed up processing
      +// when using the same pair of keys repeatedly.
      +func Precompute(sharedKey, peersPublicKey, privateKey *[32]byte) {
      +	curve25519.ScalarMult(sharedKey, privateKey, peersPublicKey)
      +	salsa.HSalsa20(sharedKey, &zeros, sharedKey, &salsa.Sigma)
      +}
      +
      +// Seal appends an encrypted and authenticated copy of message to out, which
      +// will be Overhead bytes longer than the original and must not overlap. The
      +// nonce must be unique for each distinct message for a given pair of keys.
      +func Seal(out, message []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) []byte {
      +	var sharedKey [32]byte
      +	Precompute(&sharedKey, peersPublicKey, privateKey)
      +	return secretbox.Seal(out, message, nonce, &sharedKey)
      +}
      +
      +// SealAfterPrecomputation performs the same actions as Seal, but takes a
      +// shared key as generated by Precompute.
      +func SealAfterPrecomputation(out, message []byte, nonce *[24]byte, sharedKey *[32]byte) []byte {
      +	return secretbox.Seal(out, message, nonce, sharedKey)
      +}
      +
      +// Open authenticates and decrypts a box produced by Seal and appends the
      +// message to out, which must not overlap box. The output will be Overhead
      +// bytes smaller than box.
      +func Open(out, box []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) ([]byte, bool) {
      +	var sharedKey [32]byte
      +	Precompute(&sharedKey, peersPublicKey, privateKey)
      +	return secretbox.Open(out, box, nonce, &sharedKey)
      +}
      +
      +// OpenAfterPrecomputation performs the same actions as Open, but takes a
      +// shared key as generated by Precompute.
      +func OpenAfterPrecomputation(out, box []byte, nonce *[24]byte, sharedKey *[32]byte) ([]byte, bool) {
      +	return secretbox.Open(out, box, nonce, sharedKey)
      +}
      diff --git a/vendor/golang.org/x/crypto/nacl/box/box_test.go b/vendor/golang.org/x/crypto/nacl/box/box_test.go
      new file mode 100644
      index 00000000..481ade28
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/nacl/box/box_test.go
      @@ -0,0 +1,78 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package box
      +
      +import (
      +	"bytes"
      +	"crypto/rand"
      +	"encoding/hex"
      +	"testing"
      +
      +	"golang.org/x/crypto/curve25519"
      +)
      +
      +func TestSealOpen(t *testing.T) {
      +	publicKey1, privateKey1, _ := GenerateKey(rand.Reader)
      +	publicKey2, privateKey2, _ := GenerateKey(rand.Reader)
      +
      +	if *privateKey1 == *privateKey2 {
      +		t.Fatalf("private keys are equal!")
      +	}
      +	if *publicKey1 == *publicKey2 {
      +		t.Fatalf("public keys are equal!")
      +	}
      +	message := []byte("test message")
      +	var nonce [24]byte
      +
      +	box := Seal(nil, message, &nonce, publicKey1, privateKey2)
      +	opened, ok := Open(nil, box, &nonce, publicKey2, privateKey1)
      +	if !ok {
      +		t.Fatalf("failed to open box")
      +	}
      +
      +	if !bytes.Equal(opened, message) {
      +		t.Fatalf("got %x, want %x", opened, message)
      +	}
      +
      +	for i := range box {
      +		box[i] ^= 0x40
      +		_, ok := Open(nil, box, &nonce, publicKey2, privateKey1)
      +		if ok {
      +			t.Fatalf("opened box with byte %d corrupted", i)
      +		}
      +		box[i] ^= 0x40
      +	}
      +}
      +
      +func TestBox(t *testing.T) {
      +	var privateKey1, privateKey2 [32]byte
      +	for i := range privateKey1[:] {
      +		privateKey1[i] = 1
      +	}
      +	for i := range privateKey2[:] {
      +		privateKey2[i] = 2
      +	}
      +
      +	var publicKey1 [32]byte
      +	curve25519.ScalarBaseMult(&publicKey1, &privateKey1)
      +	var message [64]byte
      +	for i := range message[:] {
      +		message[i] = 3
      +	}
      +
      +	var nonce [24]byte
      +	for i := range nonce[:] {
      +		nonce[i] = 4
      +	}
      +
      +	box := Seal(nil, message[:], &nonce, &publicKey1, &privateKey2)
      +
      +	// expected was generated using the C implementation of NaCl.
      +	expected, _ := hex.DecodeString("78ea30b19d2341ebbdba54180f821eec265cf86312549bea8a37652a8bb94f07b78a73ed1708085e6ddd0e943bbdeb8755079a37eb31d86163ce241164a47629c0539f330b4914cd135b3855bc2a2dfc")
      +
      +	if !bytes.Equal(box, expected) {
      +		t.Fatalf("box didn't match, got\n%x\n, expected\n%x", box, expected)
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
      new file mode 100644
      index 00000000..dbf31bbf
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
      @@ -0,0 +1,149 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +/*
      +Package secretbox encrypts and authenticates small messages.
      +
      +Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with
      +secret-key cryptography. The length of messages is not hidden.
      +
      +It is the caller's responsibility to ensure the uniqueness of nonces—for
      +example, by using nonce 1 for the first message, nonce 2 for the second
      +message, etc. Nonces are long enough that randomly generated nonces have
      +negligible risk of collision.
      +
      +This package is interoperable with NaCl: http://nacl.cr.yp.to/secretbox.html.
      +*/
      +package secretbox // import "golang.org/x/crypto/nacl/secretbox"
      +
      +import (
      +	"golang.org/x/crypto/poly1305"
      +	"golang.org/x/crypto/salsa20/salsa"
      +)
      +
      +// Overhead is the number of bytes of overhead when boxing a message.
      +const Overhead = poly1305.TagSize
      +
      +// setup produces a sub-key and Salsa20 counter given a nonce and key.
      +func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) {
      +	// We use XSalsa20 for encryption so first we need to generate a
      +	// key and nonce with HSalsa20.
      +	var hNonce [16]byte
      +	copy(hNonce[:], nonce[:])
      +	salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma)
      +
      +	// The final 8 bytes of the original nonce form the new nonce.
      +	copy(counter[:], nonce[16:])
      +}
      +
      +// sliceForAppend takes a slice and a requested number of bytes. It returns a
      +// slice with the contents of the given slice followed by that many bytes and a
      +// second slice that aliases into it and contains only the extra bytes. If the
      +// original slice has sufficient capacity then no allocation is performed.
      +func sliceForAppend(in []byte, n int) (head, tail []byte) {
      +	if total := len(in) + n; cap(in) >= total {
      +		head = in[:total]
      +	} else {
      +		head = make([]byte, total)
      +		copy(head, in)
      +	}
      +	tail = head[len(in):]
      +	return
      +}
      +
      +// Seal appends an encrypted and authenticated copy of message to out, which
      +// must not overlap message. The key and nonce pair must be unique for each
      +// distinct message and the output will be Overhead bytes longer than message.
      +func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte {
      +	var subKey [32]byte
      +	var counter [16]byte
      +	setup(&subKey, &counter, nonce, key)
      +
      +	// The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
      +	// Salsa20 works with 64-byte blocks, we also generate 32 bytes of
      +	// keystream as a side effect.
      +	var firstBlock [64]byte
      +	salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
      +
      +	var poly1305Key [32]byte
      +	copy(poly1305Key[:], firstBlock[:])
      +
      +	ret, out := sliceForAppend(out, len(message)+poly1305.TagSize)
      +
      +	// We XOR up to 32 bytes of message with the keystream generated from
      +	// the first block.
      +	firstMessageBlock := message
      +	if len(firstMessageBlock) > 32 {
      +		firstMessageBlock = firstMessageBlock[:32]
      +	}
      +
      +	tagOut := out
      +	out = out[poly1305.TagSize:]
      +	for i, x := range firstMessageBlock {
      +		out[i] = firstBlock[32+i] ^ x
      +	}
      +	message = message[len(firstMessageBlock):]
      +	ciphertext := out
      +	out = out[len(firstMessageBlock):]
      +
      +	// Now encrypt the rest.
      +	counter[8] = 1
      +	salsa.XORKeyStream(out, message, &counter, &subKey)
      +
      +	var tag [poly1305.TagSize]byte
      +	poly1305.Sum(&tag, ciphertext, &poly1305Key)
      +	copy(tagOut, tag[:])
      +
      +	return ret
      +}
      +
      +// Open authenticates and decrypts a box produced by Seal and appends the
      +// message to out, which must not overlap box. The output will be Overhead
      +// bytes smaller than box.
      +func Open(out []byte, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) {
      +	if len(box) < Overhead {
      +		return nil, false
      +	}
      +
      +	var subKey [32]byte
      +	var counter [16]byte
      +	setup(&subKey, &counter, nonce, key)
      +
      +	// The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
      +	// Salsa20 works with 64-byte blocks, we also generate 32 bytes of
      +	// keystream as a side effect.
      +	var firstBlock [64]byte
      +	salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
      +
      +	var poly1305Key [32]byte
      +	copy(poly1305Key[:], firstBlock[:])
      +	var tag [poly1305.TagSize]byte
      +	copy(tag[:], box)
      +
      +	if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) {
      +		return nil, false
      +	}
      +
      +	ret, out := sliceForAppend(out, len(box)-Overhead)
      +
      +	// We XOR up to 32 bytes of box with the keystream generated from
      +	// the first block.
      +	box = box[Overhead:]
      +	firstMessageBlock := box
      +	if len(firstMessageBlock) > 32 {
      +		firstMessageBlock = firstMessageBlock[:32]
      +	}
      +	for i, x := range firstMessageBlock {
      +		out[i] = firstBlock[32+i] ^ x
      +	}
      +
      +	box = box[len(firstMessageBlock):]
      +	out = out[len(firstMessageBlock):]
      +
      +	// Now decrypt the rest.
      +	counter[8] = 1
      +	salsa.XORKeyStream(out, box, &counter, &subKey)
      +
      +	return ret, true
      +}
      diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox_test.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox_test.go
      new file mode 100644
      index 00000000..664dc152
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox_test.go
      @@ -0,0 +1,91 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package secretbox
      +
      +import (
      +	"bytes"
      +	"crypto/rand"
      +	"encoding/hex"
      +	"testing"
      +)
      +
      +func TestSealOpen(t *testing.T) {
      +	var key [32]byte
      +	var nonce [24]byte
      +
      +	rand.Reader.Read(key[:])
      +	rand.Reader.Read(nonce[:])
      +
      +	var box, opened []byte
      +
      +	for msgLen := 0; msgLen < 128; msgLen += 17 {
      +		message := make([]byte, msgLen)
      +		rand.Reader.Read(message)
      +
      +		box = Seal(box[:0], message, &nonce, &key)
      +		var ok bool
      +		opened, ok = Open(opened[:0], box, &nonce, &key)
      +		if !ok {
      +			t.Errorf("%d: failed to open box", msgLen)
      +			continue
      +		}
      +
      +		if !bytes.Equal(opened, message) {
      +			t.Errorf("%d: got %x, expected %x", msgLen, opened, message)
      +			continue
      +		}
      +	}
      +
      +	for i := range box {
      +		box[i] ^= 0x20
      +		_, ok := Open(opened[:0], box, &nonce, &key)
      +		if ok {
      +			t.Errorf("box was opened after corrupting byte %d", i)
      +		}
      +		box[i] ^= 0x20
      +	}
      +}
      +
      +func TestSecretBox(t *testing.T) {
      +	var key [32]byte
      +	var nonce [24]byte
      +	var message [64]byte
      +
      +	for i := range key[:] {
      +		key[i] = 1
      +	}
      +	for i := range nonce[:] {
      +		nonce[i] = 2
      +	}
      +	for i := range message[:] {
      +		message[i] = 3
      +	}
      +
      +	box := Seal(nil, message[:], &nonce, &key)
      +	// expected was generated using the C implementation of NaCl.
      +	expected, _ := hex.DecodeString("8442bc313f4626f1359e3b50122b6ce6fe66ddfe7d39d14e637eb4fd5b45beadab55198df6ab5368439792a23c87db70acb6156dc5ef957ac04f6276cf6093b84be77ff0849cc33e34b7254d5a8f65ad")
      +
      +	if !bytes.Equal(box, expected) {
      +		t.Fatalf("box didn't match, got\n%x\n, expected\n%x", box, expected)
      +	}
      +}
      +
      +func TestAppend(t *testing.T) {
      +	var key [32]byte
      +	var nonce [24]byte
      +	var message [8]byte
      +
      +	out := make([]byte, 4)
      +	box := Seal(out, message[:], &nonce, &key)
      +	if !bytes.Equal(box[:4], out[:4]) {
      +		t.Fatalf("Seal didn't correctly append")
      +	}
      +
      +	out = make([]byte, 4, 100)
      +	box = Seal(out, message[:], &nonce, &key)
      +	if !bytes.Equal(box[:4], out[:4]) {
      +		t.Fatalf("Seal didn't correctly append with sufficient capacity.")
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go
      new file mode 100644
      index 00000000..ea61cf49
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go
      @@ -0,0 +1,673 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses
      +// are signed messages attesting to the validity of a certificate for a small
      +// period of time. This is used to manage revocation for X.509 certificates.
      +package ocsp // import "golang.org/x/crypto/ocsp"
      +
      +import (
      +	"crypto"
      +	"crypto/ecdsa"
      +	"crypto/elliptic"
      +	"crypto/rand"
      +	"crypto/rsa"
      +	"crypto/sha1"
      +	"crypto/x509"
      +	"crypto/x509/pkix"
      +	"encoding/asn1"
      +	"errors"
      +	"math/big"
      +	"strconv"
      +	"time"
      +)
      +
      +var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1})
      +
      +// ResponseStatus contains the result of an OCSP request. See
      +// https://tools.ietf.org/html/rfc6960#section-2.3
      +type ResponseStatus int
      +
      +const (
      +	Success           ResponseStatus = 0
      +	Malformed         ResponseStatus = 1
      +	InternalError     ResponseStatus = 2
      +	TryLater          ResponseStatus = 3
      +	// Status code four is ununsed in OCSP. See
      +	// https://tools.ietf.org/html/rfc6960#section-4.2.1
      +	SignatureRequired ResponseStatus = 5
      +	Unauthorized      ResponseStatus = 6
      +)
      +
      +func (r ResponseStatus) String() string {
      +	switch r {
      +	case Success:
      +		return "success"
      +	case Malformed:
      +		return "malformed"
      +	case InternalError:
      +		return "internal error"
      +	case TryLater:
      +		return "try later"
      +	case SignatureRequired:
      +		return "signature required"
      +	case Unauthorized:
      +		return "unauthorized"
      +	default:
      +		return "unknown OCSP status: " + strconv.Itoa(int(r))
      +	}
      +}
      +
      +// ResponseError is an error that may be returned by ParseResponse to indicate
      +// that the response itself is an error, not just that its indicating that a
      +// certificate is revoked, unknown, etc.
      +type ResponseError struct {
      +	Status ResponseStatus
      +}
      +
      +func (r ResponseError) Error() string {
      +	return "ocsp: error from server: " + r.Status.String()
      +}
      +
      +// These are internal structures that reflect the ASN.1 structure of an OCSP
      +// response. See RFC 2560, section 4.2.
      +
      +type certID struct {
      +	HashAlgorithm pkix.AlgorithmIdentifier
      +	NameHash      []byte
      +	IssuerKeyHash []byte
      +	SerialNumber  *big.Int
      +}
      +
      +// https://tools.ietf.org/html/rfc2560#section-4.1.1
      +type ocspRequest struct {
      +	TBSRequest tbsRequest
      +}
      +
      +type tbsRequest struct {
      +	Version       int              `asn1:"explicit,tag:0,default:0,optional"`
      +	RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"`
      +	RequestList   []request
      +}
      +
      +type request struct {
      +	Cert certID
      +}
      +
      +type responseASN1 struct {
      +	Status   asn1.Enumerated
      +	Response responseBytes `asn1:"explicit,tag:0,optional"`
      +}
      +
      +type responseBytes struct {
      +	ResponseType asn1.ObjectIdentifier
      +	Response     []byte
      +}
      +
      +type basicResponse struct {
      +	TBSResponseData    responseData
      +	SignatureAlgorithm pkix.AlgorithmIdentifier
      +	Signature          asn1.BitString
      +	Certificates       []asn1.RawValue `asn1:"explicit,tag:0,optional"`
      +}
      +
      +type responseData struct {
      +	Raw              asn1.RawContent
      +	Version          int           `asn1:"optional,default:1,explicit,tag:0"`
      +	RawResponderName asn1.RawValue `asn1:"optional,explicit,tag:1"`
      +	KeyHash          []byte        `asn1:"optional,explicit,tag:2"`
      +	ProducedAt       time.Time     `asn1:"generalized"`
      +	Responses        []singleResponse
      +}
      +
      +type singleResponse struct {
      +	CertID           certID
      +	Good             asn1.Flag        `asn1:"tag:0,optional"`
      +	Revoked          revokedInfo      `asn1:"tag:1,optional"`
      +	Unknown          asn1.Flag        `asn1:"tag:2,optional"`
      +	ThisUpdate       time.Time        `asn1:"generalized"`
      +	NextUpdate       time.Time        `asn1:"generalized,explicit,tag:0,optional"`
      +	SingleExtensions []pkix.Extension `asn1:"explicit,tag:1,optional"`
      +}
      +
      +type revokedInfo struct {
      +	RevocationTime time.Time       `asn1:"generalized"`
      +	Reason         asn1.Enumerated `asn1:"explicit,tag:0,optional"`
      +}
      +
      +var (
      +	oidSignatureMD2WithRSA      = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
      +	oidSignatureMD5WithRSA      = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
      +	oidSignatureSHA1WithRSA     = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
      +	oidSignatureSHA256WithRSA   = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
      +	oidSignatureSHA384WithRSA   = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
      +	oidSignatureSHA512WithRSA   = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
      +	oidSignatureDSAWithSHA1     = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
      +	oidSignatureDSAWithSHA256   = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 4, 3, 2}
      +	oidSignatureECDSAWithSHA1   = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
      +	oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
      +	oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
      +	oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
      +)
      +
      +var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{
      +	crypto.SHA1:   asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}),
      +	crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}),
      +	crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}),
      +	crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}),
      +}
      +
      +// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below
      +var signatureAlgorithmDetails = []struct {
      +	algo       x509.SignatureAlgorithm
      +	oid        asn1.ObjectIdentifier
      +	pubKeyAlgo x509.PublicKeyAlgorithm
      +	hash       crypto.Hash
      +}{
      +	{x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */},
      +	{x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5},
      +	{x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1},
      +	{x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256},
      +	{x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384},
      +	{x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512},
      +	{x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1},
      +	{x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256},
      +	{x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1},
      +	{x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256},
      +	{x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384},
      +	{x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512},
      +}
      +
      +// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below
      +func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) {
      +	var pubType x509.PublicKeyAlgorithm
      +
      +	switch pub := pub.(type) {
      +	case *rsa.PublicKey:
      +		pubType = x509.RSA
      +		hashFunc = crypto.SHA256
      +		sigAlgo.Algorithm = oidSignatureSHA256WithRSA
      +		sigAlgo.Parameters = asn1.RawValue{
      +			Tag: 5,
      +		}
      +
      +	case *ecdsa.PublicKey:
      +		pubType = x509.ECDSA
      +
      +		switch pub.Curve {
      +		case elliptic.P224(), elliptic.P256():
      +			hashFunc = crypto.SHA256
      +			sigAlgo.Algorithm = oidSignatureECDSAWithSHA256
      +		case elliptic.P384():
      +			hashFunc = crypto.SHA384
      +			sigAlgo.Algorithm = oidSignatureECDSAWithSHA384
      +		case elliptic.P521():
      +			hashFunc = crypto.SHA512
      +			sigAlgo.Algorithm = oidSignatureECDSAWithSHA512
      +		default:
      +			err = errors.New("x509: unknown elliptic curve")
      +		}
      +
      +	default:
      +		err = errors.New("x509: only RSA and ECDSA keys supported")
      +	}
      +
      +	if err != nil {
      +		return
      +	}
      +
      +	if requestedSigAlgo == 0 {
      +		return
      +	}
      +
      +	found := false
      +	for _, details := range signatureAlgorithmDetails {
      +		if details.algo == requestedSigAlgo {
      +			if details.pubKeyAlgo != pubType {
      +				err = errors.New("x509: requested SignatureAlgorithm does not match private key type")
      +				return
      +			}
      +			sigAlgo.Algorithm, hashFunc = details.oid, details.hash
      +			if hashFunc == 0 {
      +				err = errors.New("x509: cannot sign with hash function requested")
      +				return
      +			}
      +			found = true
      +			break
      +		}
      +	}
      +
      +	if !found {
      +		err = errors.New("x509: unknown SignatureAlgorithm")
      +	}
      +
      +	return
      +}
      +
      +// TODO(agl): this is taken from crypto/x509 and so should probably be exported
      +// from crypto/x509 or crypto/x509/pkix.
      +func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm {
      +	for _, details := range signatureAlgorithmDetails {
      +		if oid.Equal(details.oid) {
      +			return details.algo
      +		}
      +	}
      +	return x509.UnknownSignatureAlgorithm
      +}
      +
      +// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form.
      +func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash {
      +	for hash, oid := range hashOIDs {
      +		if oid.Equal(target) {
      +			return hash
      +		}
      +	}
      +	return crypto.Hash(0)
      +}
      +
      +// This is the exposed reflection of the internal OCSP structures.
      +
      +// The status values that can be expressed in OCSP.  See RFC 6960.
      +const (
      +	// Good means that the certificate is valid.
      +	Good = iota
      +	// Revoked means that the certificate has been deliberately revoked.
      +	Revoked
      +	// Unknown means that the OCSP responder doesn't know about the certificate.
      +	Unknown
      +	// ServerFailed is unused and was never used (see
      +	// https://go-review.googlesource.com/#/c/18944). ParseResponse will
      +	// return a ResponseError when an error response is parsed.
      +	ServerFailed
      +)
      +
      +// The enumerated reasons for revoking a certificate.  See RFC 5280.
      +const (
      +	Unspecified          = iota
      +	KeyCompromise        = iota
      +	CACompromise         = iota
      +	AffiliationChanged   = iota
      +	Superseded           = iota
      +	CessationOfOperation = iota
      +	CertificateHold      = iota
      +	_                    = iota
      +	RemoveFromCRL        = iota
      +	PrivilegeWithdrawn   = iota
      +	AACompromise         = iota
      +)
      +
      +// Request represents an OCSP request. See RFC 6960.
      +type Request struct {
      +	HashAlgorithm  crypto.Hash
      +	IssuerNameHash []byte
      +	IssuerKeyHash  []byte
      +	SerialNumber   *big.Int
      +}
      +
      +// Response represents an OCSP response containing a single SingleResponse. See
      +// RFC 6960.
      +type Response struct {
      +	// Status is one of {Good, Revoked, Unknown}
      +	Status                                        int
      +	SerialNumber                                  *big.Int
      +	ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time
      +	RevocationReason                              int
      +	Certificate                                   *x509.Certificate
      +	// TBSResponseData contains the raw bytes of the signed response. If
      +	// Certificate is nil then this can be used to verify Signature.
      +	TBSResponseData    []byte
      +	Signature          []byte
      +	SignatureAlgorithm x509.SignatureAlgorithm
      +
      +	// Extensions contains raw X.509 extensions from the singleExtensions field
      +	// of the OCSP response. When parsing certificates, this can be used to
      +	// extract non-critical extensions that are not parsed by this package. When
      +	// marshaling OCSP responses, the Extensions field is ignored, see
      +	// ExtraExtensions.
      +	Extensions []pkix.Extension
      +
      +	// ExtraExtensions contains extensions to be copied, raw, into any marshaled
      +	// OCSP response (in the singleExtensions field). Values override any
      +	// extensions that would otherwise be produced based on the other fields. The
      +	// ExtraExtensions field is not populated when parsing certificates, see
      +	// Extensions.
      +	ExtraExtensions []pkix.Extension
      +}
      +
      +// These are pre-serialized error responses for the various non-success codes
      +// defined by OCSP. The Unauthorized code in particular can be used by an OCSP
      +// responder that supports only pre-signed responses as a response to requests
      +// for certificates with unknown status. See RFC 5019.
      +var (
      +	MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01}
      +	InternalErrorErrorResponse    = []byte{0x30, 0x03, 0x0A, 0x01, 0x02}
      +	TryLaterErrorResponse         = []byte{0x30, 0x03, 0x0A, 0x01, 0x03}
      +	SigRequredErrorResponse       = []byte{0x30, 0x03, 0x0A, 0x01, 0x05}
      +	UnauthorizedErrorResponse     = []byte{0x30, 0x03, 0x0A, 0x01, 0x06}
      +)
      +
      +// CheckSignatureFrom checks that the signature in resp is a valid signature
      +// from issuer. This should only be used if resp.Certificate is nil. Otherwise,
      +// the OCSP response contained an intermediate certificate that created the
      +// signature. That signature is checked by ParseResponse and only
      +// resp.Certificate remains to be validated.
      +func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error {
      +	return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature)
      +}
      +
      +// ParseError results from an invalid OCSP response.
      +type ParseError string
      +
      +func (p ParseError) Error() string {
      +	return string(p)
      +}
      +
      +// ParseRequest parses an OCSP request in DER form. It only supports
      +// requests for a single certificate. Signed requests are not supported.
      +// If a request includes a signature, it will result in a ParseError.
      +func ParseRequest(bytes []byte) (*Request, error) {
      +	var req ocspRequest
      +	rest, err := asn1.Unmarshal(bytes, &req)
      +	if err != nil {
      +		return nil, err
      +	}
      +	if len(rest) > 0 {
      +		return nil, ParseError("trailing data in OCSP request")
      +	}
      +
      +	if len(req.TBSRequest.RequestList) == 0 {
      +		return nil, ParseError("OCSP request contains no request body")
      +	}
      +	innerRequest := req.TBSRequest.RequestList[0]
      +
      +	hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm)
      +	if hashFunc == crypto.Hash(0) {
      +		return nil, ParseError("OCSP request uses unknown hash function")
      +	}
      +
      +	return &Request{
      +		HashAlgorithm:  hashFunc,
      +		IssuerNameHash: innerRequest.Cert.NameHash,
      +		IssuerKeyHash:  innerRequest.Cert.IssuerKeyHash,
      +		SerialNumber:   innerRequest.Cert.SerialNumber,
      +	}, nil
      +}
      +
      +// ParseResponse parses an OCSP response in DER form. It only supports
      +// responses for a single certificate. If the response contains a certificate
      +// then the signature over the response is checked. If issuer is not nil then
      +// it will be used to validate the signature or embedded certificate.
      +//
      +// Invalid signatures or parse failures will result in a ParseError. Error
      +// responses will result in a ResponseError.
      +func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) {
      +	var resp responseASN1
      +	rest, err := asn1.Unmarshal(bytes, &resp)
      +	if err != nil {
      +		return nil, err
      +	}
      +	if len(rest) > 0 {
      +		return nil, ParseError("trailing data in OCSP response")
      +	}
      +
      +	if status := ResponseStatus(resp.Status); status != Success {
      +		return nil, ResponseError{status}
      +	}
      +
      +	if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) {
      +		return nil, ParseError("bad OCSP response type")
      +	}
      +
      +	var basicResp basicResponse
      +	rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	if len(basicResp.Certificates) > 1 {
      +		return nil, ParseError("OCSP response contains bad number of certificates")
      +	}
      +
      +	if len(basicResp.TBSResponseData.Responses) != 1 {
      +		return nil, ParseError("OCSP response contains bad number of responses")
      +	}
      +
      +	ret := &Response{
      +		TBSResponseData:    basicResp.TBSResponseData.Raw,
      +		Signature:          basicResp.Signature.RightAlign(),
      +		SignatureAlgorithm: getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm),
      +	}
      +
      +	if len(basicResp.Certificates) > 0 {
      +		ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes)
      +		if err != nil {
      +			return nil, err
      +		}
      +
      +		if err := ret.CheckSignatureFrom(ret.Certificate); err != nil {
      +			return nil, ParseError("bad OCSP signature")
      +		}
      +
      +		if issuer != nil {
      +			if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil {
      +				return nil, ParseError("bad signature on embedded certificate")
      +			}
      +		}
      +	} else if issuer != nil {
      +		if err := ret.CheckSignatureFrom(issuer); err != nil {
      +			return nil, ParseError("bad OCSP signature")
      +		}
      +	}
      +
      +	r := basicResp.TBSResponseData.Responses[0]
      +
      +	for _, ext := range r.SingleExtensions {
      +		if ext.Critical {
      +			return nil, ParseError("unsupported critical extension")
      +		}
      +	}
      +	ret.Extensions = r.SingleExtensions
      +
      +	ret.SerialNumber = r.CertID.SerialNumber
      +
      +	switch {
      +	case bool(r.Good):
      +		ret.Status = Good
      +	case bool(r.Unknown):
      +		ret.Status = Unknown
      +	default:
      +		ret.Status = Revoked
      +		ret.RevokedAt = r.Revoked.RevocationTime
      +		ret.RevocationReason = int(r.Revoked.Reason)
      +	}
      +
      +	ret.ProducedAt = basicResp.TBSResponseData.ProducedAt
      +	ret.ThisUpdate = r.ThisUpdate
      +	ret.NextUpdate = r.NextUpdate
      +
      +	return ret, nil
      +}
      +
      +// RequestOptions contains options for constructing OCSP requests.
      +type RequestOptions struct {
      +	// Hash contains the hash function that should be used when
      +	// constructing the OCSP request. If zero, SHA-1 will be used.
      +	Hash crypto.Hash
      +}
      +
      +func (opts *RequestOptions) hash() crypto.Hash {
      +	if opts == nil || opts.Hash == 0 {
      +		// SHA-1 is nearly universally used in OCSP.
      +		return crypto.SHA1
      +	}
      +	return opts.Hash
      +}
      +
      +// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If
      +// opts is nil then sensible defaults are used.
      +func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) {
      +	hashFunc := opts.hash()
      +
      +	// OCSP seems to be the only place where these raw hash identifiers are
      +	// used. I took the following from
      +	// http://msdn.microsoft.com/en-us/library/ff635603.aspx
      +	var hashOID asn1.ObjectIdentifier
      +	hashOID, ok := hashOIDs[hashFunc]
      +	if !ok {
      +		return nil, x509.ErrUnsupportedAlgorithm
      +	}
      +
      +	if !hashFunc.Available() {
      +		return nil, x509.ErrUnsupportedAlgorithm
      +	}
      +	h := opts.hash().New()
      +
      +	var publicKeyInfo struct {
      +		Algorithm pkix.AlgorithmIdentifier
      +		PublicKey asn1.BitString
      +	}
      +	if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil {
      +		return nil, err
      +	}
      +
      +	h.Write(publicKeyInfo.PublicKey.RightAlign())
      +	issuerKeyHash := h.Sum(nil)
      +
      +	h.Reset()
      +	h.Write(issuer.RawSubject)
      +	issuerNameHash := h.Sum(nil)
      +
      +	return asn1.Marshal(ocspRequest{
      +		tbsRequest{
      +			Version: 0,
      +			RequestList: []request{
      +				{
      +					Cert: certID{
      +						pkix.AlgorithmIdentifier{
      +							Algorithm:  hashOID,
      +							Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */},
      +						},
      +						issuerNameHash,
      +						issuerKeyHash,
      +						cert.SerialNumber,
      +					},
      +				},
      +			},
      +		},
      +	})
      +}
      +
      +// CreateResponse returns a DER-encoded OCSP response with the specified contents.
      +// The fields in the response are populated as follows:
      +//
      +// The responder cert is used to populate the ResponderName field, and the certificate
      +// itself is provided alongside the OCSP response signature.
      +//
      +// The issuer cert is used to puplate the IssuerNameHash and IssuerKeyHash fields.
      +// (SHA-1 is used for the hash function; this is not configurable.)
      +//
      +// The template is used to populate the SerialNumber, RevocationStatus, RevokedAt,
      +// RevocationReason, ThisUpdate, and NextUpdate fields.
      +//
      +// The ProducedAt date is automatically set to the current date, to the nearest minute.
      +func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) {
      +	var publicKeyInfo struct {
      +		Algorithm pkix.AlgorithmIdentifier
      +		PublicKey asn1.BitString
      +	}
      +	if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil {
      +		return nil, err
      +	}
      +
      +	h := sha1.New()
      +	h.Write(publicKeyInfo.PublicKey.RightAlign())
      +	issuerKeyHash := h.Sum(nil)
      +
      +	h.Reset()
      +	h.Write(issuer.RawSubject)
      +	issuerNameHash := h.Sum(nil)
      +
      +	innerResponse := singleResponse{
      +		CertID: certID{
      +			HashAlgorithm: pkix.AlgorithmIdentifier{
      +				Algorithm:  hashOIDs[crypto.SHA1],
      +				Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */},
      +			},
      +			NameHash:      issuerNameHash,
      +			IssuerKeyHash: issuerKeyHash,
      +			SerialNumber:  template.SerialNumber,
      +		},
      +		ThisUpdate:       template.ThisUpdate.UTC(),
      +		NextUpdate:       template.NextUpdate.UTC(),
      +		SingleExtensions: template.ExtraExtensions,
      +	}
      +
      +	switch template.Status {
      +	case Good:
      +		innerResponse.Good = true
      +	case Unknown:
      +		innerResponse.Unknown = true
      +	case Revoked:
      +		innerResponse.Revoked = revokedInfo{
      +			RevocationTime: template.RevokedAt.UTC(),
      +			Reason:         asn1.Enumerated(template.RevocationReason),
      +		}
      +	}
      +
      +	responderName := asn1.RawValue{
      +		Class:      2, // context-specific
      +		Tag:        1, // explicit tag
      +		IsCompound: true,
      +		Bytes:      responderCert.RawSubject,
      +	}
      +	tbsResponseData := responseData{
      +		Version:          0,
      +		RawResponderName: responderName,
      +		ProducedAt:       time.Now().Truncate(time.Minute).UTC(),
      +		Responses:        []singleResponse{innerResponse},
      +	}
      +
      +	tbsResponseDataDER, err := asn1.Marshal(tbsResponseData)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	responseHash := hashFunc.New()
      +	responseHash.Write(tbsResponseDataDER)
      +	signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	response := basicResponse{
      +		TBSResponseData:    tbsResponseData,
      +		SignatureAlgorithm: signatureAlgorithm,
      +		Signature: asn1.BitString{
      +			Bytes:     signature,
      +			BitLength: 8 * len(signature),
      +		},
      +	}
      +	if template.Certificate != nil {
      +		response.Certificates = []asn1.RawValue{
      +			asn1.RawValue{FullBytes: template.Certificate.Raw},
      +		}
      +	}
      +	responseDER, err := asn1.Marshal(response)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	return asn1.Marshal(responseASN1{
      +		Status: asn1.Enumerated(Success),
      +		Response: responseBytes{
      +			ResponseType: idPKIXOCSPBasic,
      +			Response:     responseDER,
      +		},
      +	})
      +}
      diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp_test.go b/vendor/golang.org/x/crypto/ocsp/ocsp_test.go
      new file mode 100644
      index 00000000..33868497
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ocsp/ocsp_test.go
      @@ -0,0 +1,584 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ocsp
      +
      +import (
      +	"bytes"
      +	"crypto"
      +	"crypto/sha1"
      +	"crypto/x509"
      +	"crypto/x509/pkix"
      +	"encoding/asn1"
      +	"encoding/hex"
      +	"math/big"
      +	"reflect"
      +	"testing"
      +	"time"
      +)
      +
      +func TestOCSPDecode(t *testing.T) {
      +	responseBytes, _ := hex.DecodeString(ocspResponseHex)
      +	resp, err := ParseResponse(responseBytes, nil)
      +	if err != nil {
      +		t.Error(err)
      +	}
      +
      +	expected := Response{
      +		Status:           Good,
      +		SerialNumber:     big.NewInt(0x1d0fa),
      +		RevocationReason: Unspecified,
      +		ThisUpdate:       time.Date(2010, 7, 7, 15, 1, 5, 0, time.UTC),
      +		NextUpdate:       time.Date(2010, 7, 7, 18, 35, 17, 0, time.UTC),
      +	}
      +
      +	if !reflect.DeepEqual(resp.ThisUpdate, expected.ThisUpdate) {
      +		t.Errorf("resp.ThisUpdate: got %d, want %d", resp.ThisUpdate, expected.ThisUpdate)
      +	}
      +
      +	if !reflect.DeepEqual(resp.NextUpdate, expected.NextUpdate) {
      +		t.Errorf("resp.NextUpdate: got %d, want %d", resp.NextUpdate, expected.NextUpdate)
      +	}
      +
      +	if resp.Status != expected.Status {
      +		t.Errorf("resp.Status: got %d, want %d", resp.Status, expected.Status)
      +	}
      +
      +	if resp.SerialNumber.Cmp(expected.SerialNumber) != 0 {
      +		t.Errorf("resp.SerialNumber: got %x, want %x", resp.SerialNumber, expected.SerialNumber)
      +	}
      +
      +	if resp.RevocationReason != expected.RevocationReason {
      +		t.Errorf("resp.RevocationReason: got %d, want %d", resp.RevocationReason, expected.RevocationReason)
      +	}
      +}
      +
      +func TestOCSPDecodeWithoutCert(t *testing.T) {
      +	responseBytes, _ := hex.DecodeString(ocspResponseWithoutCertHex)
      +	_, err := ParseResponse(responseBytes, nil)
      +	if err != nil {
      +		t.Error(err)
      +	}
      +}
      +
      +func TestOCSPDecodeWithExtensions(t *testing.T) {
      +	responseBytes, _ := hex.DecodeString(ocspResponseWithCriticalExtensionHex)
      +	_, err := ParseResponse(responseBytes, nil)
      +	if err == nil {
      +		t.Error(err)
      +	}
      +
      +	responseBytes, _ = hex.DecodeString(ocspResponseWithExtensionHex)
      +	response, err := ParseResponse(responseBytes, nil)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	if len(response.Extensions) != 1 {
      +		t.Errorf("len(response.Extensions): got %v, want %v", len(response.Extensions), 1)
      +	}
      +
      +	extensionBytes := response.Extensions[0].Value
      +	expectedBytes, _ := hex.DecodeString(ocspExtensionValueHex)
      +	if !bytes.Equal(extensionBytes, expectedBytes) {
      +		t.Errorf("response.Extensions[0]: got %x, want %x", extensionBytes, expectedBytes)
      +	}
      +}
      +
      +func TestOCSPSignature(t *testing.T) {
      +	issuerCert, _ := hex.DecodeString(startComHex)
      +	issuer, err := x509.ParseCertificate(issuerCert)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	response, _ := hex.DecodeString(ocspResponseHex)
      +	if _, err := ParseResponse(response, issuer); err != nil {
      +		t.Error(err)
      +	}
      +}
      +
      +func TestOCSPRequest(t *testing.T) {
      +	leafCert, _ := hex.DecodeString(leafCertHex)
      +	cert, err := x509.ParseCertificate(leafCert)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	issuerCert, _ := hex.DecodeString(issuerCertHex)
      +	issuer, err := x509.ParseCertificate(issuerCert)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	request, err := CreateRequest(cert, issuer, nil)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	expectedBytes, _ := hex.DecodeString(ocspRequestHex)
      +	if !bytes.Equal(request, expectedBytes) {
      +		t.Errorf("request: got %x, wanted %x", request, expectedBytes)
      +	}
      +
      +	decodedRequest, err := ParseRequest(expectedBytes)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	if decodedRequest.HashAlgorithm != crypto.SHA1 {
      +		t.Errorf("request.HashAlgorithm: got %v, want %v", decodedRequest.HashAlgorithm, crypto.SHA1)
      +	}
      +
      +	var publicKeyInfo struct {
      +		Algorithm pkix.AlgorithmIdentifier
      +		PublicKey asn1.BitString
      +	}
      +	_, err = asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	h := sha1.New()
      +	h.Write(publicKeyInfo.PublicKey.RightAlign())
      +	issuerKeyHash := h.Sum(nil)
      +
      +	h.Reset()
      +	h.Write(issuer.RawSubject)
      +	issuerNameHash := h.Sum(nil)
      +
      +	if got := decodedRequest.IssuerKeyHash; !bytes.Equal(got, issuerKeyHash) {
      +		t.Errorf("request.IssuerKeyHash: got %x, want %x", got, issuerKeyHash)
      +	}
      +
      +	if got := decodedRequest.IssuerNameHash; !bytes.Equal(got, issuerNameHash) {
      +		t.Errorf("request.IssuerKeyHash: got %x, want %x", got, issuerNameHash)
      +	}
      +
      +	if got := decodedRequest.SerialNumber; got.Cmp(cert.SerialNumber) != 0 {
      +		t.Errorf("request.SerialNumber: got %x, want %x", got, cert.SerialNumber)
      +	}
      +}
      +
      +func TestOCSPResponse(t *testing.T) {
      +	leafCert, _ := hex.DecodeString(leafCertHex)
      +	leaf, err := x509.ParseCertificate(leafCert)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	issuerCert, _ := hex.DecodeString(issuerCertHex)
      +	issuer, err := x509.ParseCertificate(issuerCert)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	responderCert, _ := hex.DecodeString(responderCertHex)
      +	responder, err := x509.ParseCertificate(responderCert)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	responderPrivateKeyDER, _ := hex.DecodeString(responderPrivateKeyHex)
      +	responderPrivateKey, err := x509.ParsePKCS1PrivateKey(responderPrivateKeyDER)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	extensionBytes, _ := hex.DecodeString(ocspExtensionValueHex)
      +	extensions := []pkix.Extension{
      +		pkix.Extension{
      +			Id:       ocspExtensionOID,
      +			Critical: false,
      +			Value:    extensionBytes,
      +		},
      +	}
      +
      +	producedAt := time.Now().Truncate(time.Minute)
      +	thisUpdate := time.Date(2010, 7, 7, 15, 1, 5, 0, time.UTC)
      +	nextUpdate := time.Date(2010, 7, 7, 18, 35, 17, 0, time.UTC)
      +	template := Response{
      +		Status:           Revoked,
      +		SerialNumber:     leaf.SerialNumber,
      +		ThisUpdate:       thisUpdate,
      +		NextUpdate:       nextUpdate,
      +		RevokedAt:        thisUpdate,
      +		RevocationReason: KeyCompromise,
      +		Certificate:      responder,
      +		ExtraExtensions:  extensions,
      +	}
      +
      +	responseBytes, err := CreateResponse(issuer, responder, template, responderPrivateKey)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	resp, err := ParseResponse(responseBytes, nil)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	if !reflect.DeepEqual(resp.ThisUpdate, template.ThisUpdate) {
      +		t.Errorf("resp.ThisUpdate: got %d, want %d", resp.ThisUpdate, template.ThisUpdate)
      +	}
      +
      +	if !reflect.DeepEqual(resp.NextUpdate, template.NextUpdate) {
      +		t.Errorf("resp.NextUpdate: got %d, want %d", resp.NextUpdate, template.NextUpdate)
      +	}
      +
      +	if !reflect.DeepEqual(resp.RevokedAt, template.RevokedAt) {
      +		t.Errorf("resp.RevokedAt: got %d, want %d", resp.RevokedAt, template.RevokedAt)
      +	}
      +
      +	if !reflect.DeepEqual(resp.Extensions, template.ExtraExtensions) {
      +		t.Errorf("resp.Extensions: got %v, want %v", resp.Extensions, template.ExtraExtensions)
      +	}
      +
      +	if !resp.ProducedAt.Equal(producedAt) {
      +		t.Errorf("resp.ProducedAt: got %d, want %d", resp.ProducedAt, producedAt)
      +	}
      +
      +	if resp.Status != template.Status {
      +		t.Errorf("resp.Status: got %d, want %d", resp.Status, template.Status)
      +	}
      +
      +	if resp.SerialNumber.Cmp(template.SerialNumber) != 0 {
      +		t.Errorf("resp.SerialNumber: got %x, want %x", resp.SerialNumber, template.SerialNumber)
      +	}
      +
      +	if resp.RevocationReason != template.RevocationReason {
      +		t.Errorf("resp.RevocationReason: got %d, want %d", resp.RevocationReason, template.RevocationReason)
      +	}
      +}
      +
      +func TestErrorResponse(t *testing.T) {
      +	responseBytes, _ := hex.DecodeString(errorResponseHex)
      +	_, err := ParseResponse(responseBytes, nil)
      +
      +	respErr, ok := err.(ResponseError)
      +	if !ok {
      +		t.Fatalf("expected ResponseError from ParseResponse but got %#v", err)
      +	}
      +	if respErr.Status != Malformed {
      +		t.Fatalf("expected Malformed status from ParseResponse but got %d", respErr.Status)
      +	}
      +}
      +
      +// This OCSP response was taken from Thawte's public OCSP responder.
      +// To recreate:
      +//   $ openssl s_client -tls1 -showcerts -servername www.google.com -connect www.google.com:443
      +// Copy and paste the first certificate into /tmp/cert.crt and the second into
      +// /tmp/intermediate.crt
      +//   $ openssl ocsp -issuer /tmp/intermediate.crt -cert /tmp/cert.crt -url http://ocsp.thawte.com -resp_text -respout /tmp/ocsp.der
      +// Then hex encode the result:
      +//   $ python -c 'print file("/tmp/ocsp.der", "r").read().encode("hex")'
      +
      +const ocspResponseHex = "308206bc0a0100a08206b5308206b106092b0601050507300101048206a23082069e3081" +
      +	"c9a14e304c310b300906035504061302494c31163014060355040a130d5374617274436f" +
      +	"6d204c74642e312530230603550403131c5374617274436f6d20436c6173732031204f43" +
      +	"5350205369676e6572180f32303130303730373137333531375a30663064303c30090605" +
      +	"2b0e03021a050004146568874f40750f016a3475625e1f5c93e5a26d580414eb4234d098" +
      +	"b0ab9ff41b6b08f7cc642eef0e2c45020301d0fa8000180f323031303037303731353031" +
      +	"30355aa011180f32303130303730373138333531375a300d06092a864886f70d01010505" +
      +	"000382010100ab557ff070d1d7cebbb5f0ec91a15c3fed22eb2e1b8244f1b84545f013a4" +
      +	"fb46214c5e3fbfbebb8a56acc2b9db19f68fd3c3201046b3824d5ba689f99864328710cb" +
      +	"467195eb37d84f539e49f859316b32964dc3e47e36814ce94d6c56dd02733b1d0802f7ff" +
      +	"4eebdbbd2927dcf580f16cbc290f91e81b53cb365e7223f1d6e20a88ea064104875e0145" +
      +	"672b20fc14829d51ca122f5f5d77d3ad6c83889c55c7dc43680ba2fe3cef8b05dbcabdc0" +
      +	"d3e09aaf9725597f8c858c2fa38c0d6aed2e6318194420dd1a1137445d13e1c97ab47896" +
      +	"17a4e08925f46f867b72e3a4dc1f08cb870b2b0717f7207faa0ac512e628a029aba7457a" +
      +	"e63dcf3281e2162d9349a08204ba308204b6308204b23082039aa003020102020101300d" +
      +	"06092a864886f70d010105050030818c310b300906035504061302494c31163014060355" +
      +	"040a130d5374617274436f6d204c74642e312b3029060355040b13225365637572652044" +
      +	"69676974616c204365727469666963617465205369676e696e6731383036060355040313" +
      +	"2f5374617274436f6d20436c6173732031205072696d61727920496e7465726d65646961" +
      +	"746520536572766572204341301e170d3037313032353030323330365a170d3132313032" +
      +	"333030323330365a304c310b300906035504061302494c31163014060355040a130d5374" +
      +	"617274436f6d204c74642e312530230603550403131c5374617274436f6d20436c617373" +
      +	"2031204f435350205369676e657230820122300d06092a864886f70d0101010500038201" +
      +	"0f003082010a0282010100b9561b4c45318717178084e96e178df2255e18ed8d8ecc7c2b" +
      +	"7b51a6c1c2e6bf0aa3603066f132fe10ae97b50e99fa24b83fc53dd2777496387d14e1c3" +
      +	"a9b6a4933e2ac12413d085570a95b8147414a0bc007c7bcf222446ef7f1a156d7ea1c577" +
      +	"fc5f0facdfd42eb0f5974990cb2f5cefebceef4d1bdc7ae5c1075c5a99a93171f2b0845b" +
      +	"4ff0864e973fcfe32f9d7511ff87a3e943410c90a4493a306b6944359340a9ca96f02b66" +
      +	"ce67f028df2980a6aaee8d5d5d452b8b0eb93f923cc1e23fcccbdbe7ffcb114d08fa7a6a" +
      +	"3c404f825d1a0e715935cf623a8c7b59670014ed0622f6089a9447a7a19010f7fe58f841" +
      +	"29a2765ea367824d1c3bb2fda308530203010001a382015c30820158300c0603551d1301" +
      +	"01ff04023000300b0603551d0f0404030203a8301e0603551d250417301506082b060105" +
      +	"0507030906092b0601050507300105301d0603551d0e0416041445e0a36695414c5dd449" +
      +	"bc00e33cdcdbd2343e173081a80603551d230481a030819d8014eb4234d098b0ab9ff41b" +
      +	"6b08f7cc642eef0e2c45a18181a47f307d310b300906035504061302494c311630140603" +
      +	"55040a130d5374617274436f6d204c74642e312b3029060355040b132253656375726520" +
      +	"4469676974616c204365727469666963617465205369676e696e67312930270603550403" +
      +	"13205374617274436f6d2043657274696669636174696f6e20417574686f726974798201" +
      +	"0a30230603551d12041c301a8618687474703a2f2f7777772e737461727473736c2e636f" +
      +	"6d2f302c06096086480186f842010d041f161d5374617274436f6d205265766f63617469" +
      +	"6f6e20417574686f72697479300d06092a864886f70d01010505000382010100182d2215" +
      +	"8f0fc0291324fa8574c49bb8ff2835085adcbf7b7fc4191c397ab6951328253fffe1e5ec" +
      +	"2a7da0d50fca1a404e6968481366939e666c0a6209073eca57973e2fefa9ed1718e8176f" +
      +	"1d85527ff522c08db702e3b2b180f1cbff05d98128252cf0f450f7dd2772f4188047f19d" +
      +	"c85317366f94bc52d60f453a550af58e308aaab00ced33040b62bf37f5b1ab2a4f7f0f80" +
      +	"f763bf4d707bc8841d7ad9385ee2a4244469260b6f2bf085977af9074796048ecc2f9d48" +
      +	"a1d24ce16e41a9941568fec5b42771e118f16c106a54ccc339a4b02166445a167902e75e" +
      +	"6d8620b0825dcd18a069b90fd851d10fa8effd409deec02860d26d8d833f304b10669b42"
      +
      +const startComHex = "308206343082041ca003020102020118300d06092a864886f70d0101050500307d310b30" +
      +	"0906035504061302494c31163014060355040a130d5374617274436f6d204c74642e312b" +
      +	"3029060355040b1322536563757265204469676974616c20436572746966696361746520" +
      +	"5369676e696e6731293027060355040313205374617274436f6d20436572746966696361" +
      +	"74696f6e20417574686f72697479301e170d3037313032343230353431375a170d313731" +
      +	"3032343230353431375a30818c310b300906035504061302494c31163014060355040a13" +
      +	"0d5374617274436f6d204c74642e312b3029060355040b13225365637572652044696769" +
      +	"74616c204365727469666963617465205369676e696e67313830360603550403132f5374" +
      +	"617274436f6d20436c6173732031205072696d61727920496e7465726d65646961746520" +
      +	"53657276657220434130820122300d06092a864886f70d01010105000382010f00308201" +
      +	"0a0282010100b689c6acef09527807ac9263d0f44418188480561f91aee187fa3250b4d3" +
      +	"4706f0e6075f700e10f71dc0ce103634855a0f92ac83c6ac58523fba38e8fce7a724e240" +
      +	"a60876c0926e9e2a6d4d3f6e61200adb59ded27d63b33e46fefa215118d7cd30a6ed076e" +
      +	"3b7087b4f9faebee823c056f92f7a4dc0a301e9373fe07cad75f809d225852ae06da8b87" +
      +	"2369b0e42ad8ea83d2bdf371db705a280faf5a387045123f304dcd3baf17e50fcba0a95d" +
      +	"48aab16150cb34cd3c5cc30be810c08c9bf0030362feb26c3e720eee1c432ac9480e5739" +
      +	"c43121c810c12c87fe5495521f523c31129b7fe7c0a0a559d5e28f3ef0d5a8e1d77031a9" +
      +	"c4b3cfaf6d532f06f4a70203010001a38201ad308201a9300f0603551d130101ff040530" +
      +	"030101ff300e0603551d0f0101ff040403020106301d0603551d0e04160414eb4234d098" +
      +	"b0ab9ff41b6b08f7cc642eef0e2c45301f0603551d230418301680144e0bef1aa4405ba5" +
      +	"17698730ca346843d041aef2306606082b06010505070101045a3058302706082b060105" +
      +	"05073001861b687474703a2f2f6f6373702e737461727473736c2e636f6d2f6361302d06" +
      +	"082b060105050730028621687474703a2f2f7777772e737461727473736c2e636f6d2f73" +
      +	"667363612e637274305b0603551d1f045430523027a025a0238621687474703a2f2f7777" +
      +	"772e737461727473736c2e636f6d2f73667363612e63726c3027a025a023862168747470" +
      +	"3a2f2f63726c2e737461727473736c2e636f6d2f73667363612e63726c3081800603551d" +
      +	"20047930773075060b2b0601040181b5370102013066302e06082b060105050702011622" +
      +	"687474703a2f2f7777772e737461727473736c2e636f6d2f706f6c6963792e7064663034" +
      +	"06082b060105050702011628687474703a2f2f7777772e737461727473736c2e636f6d2f" +
      +	"696e7465726d6564696174652e706466300d06092a864886f70d01010505000382020100" +
      +	"2109493ea5886ee00b8b48da314d8ff75657a2e1d36257e9b556f38545753be5501f048b" +
      +	"e6a05a3ee700ae85d0fbff200364cbad02e1c69172f8a34dd6dee8cc3fa18aa2e37c37a7" +
      +	"c64f8f35d6f4d66e067bdd21d9cf56ffcb302249fe8904f385e5aaf1e71fe875904dddf9" +
      +	"46f74234f745580c110d84b0c6da5d3ef9019ee7e1da5595be741c7bfc4d144fac7e5547" +
      +	"7d7bf4a50d491e95e8f712c1ccff76a62547d0f37535be97b75816ebaa5c786fec5330af" +
      +	"ea044dcca902e3f0b60412f630b1113d904e5664d7dc3c435f7339ef4baf87ebf6fe6888" +
      +	"4472ead207c669b0c1a18bef1749d761b145485f3b2021e95bb2ccf4d7e931f50b15613b" +
      +	"7a94e3ebd9bc7f94ae6ae3626296a8647cb887f399327e92a252bebbf865cfc9f230fc8b" +
      +	"c1c2a696d75f89e15c3480f58f47072fb491bfb1a27e5f4b5ad05b9f248605515a690365" +
      +	"434971c5e06f94346bf61bd8a9b04c7e53eb8f48dfca33b548fa364a1a53a6330cd089cd" +
      +	"4915cd89313c90c072d7654b52358a461144b93d8e2865a63e799e5c084429adb035112e" +
      +	"214eb8d2e7103e5d8483b3c3c2e4d2c6fd094b7409ddf1b3d3193e800da20b19f038e7c5" +
      +	"c2afe223db61e29d5c6e2089492e236ab262c145b49faf8ba7f1223bf87de290d07a19fb" +
      +	"4a4ce3d27d5f4a8303ed27d6239e6b8db459a2d9ef6c8229dd75193c3f4c108defbb7527" +
      +	"d2ae83a7a8ce5ba7"
      +
      +const ocspResponseWithoutCertHex = "308201d40a0100a08201cd308201c906092b0601050507300101048201ba3082" +
      +	"01b630819fa2160414884451ff502a695e2d88f421bad90cf2cecbea7c180f3230313330" +
      +	"3631383037323434335a30743072304a300906052b0e03021a0500041448b60d38238df8" +
      +	"456e4ee5843ea394111802979f0414884451ff502a695e2d88f421bad90cf2cecbea7c02" +
      +	"1100f78b13b946fc9635d8ab49de9d2148218000180f3230313330363138303732343433" +
      +	"5aa011180f32303133303632323037323434335a300d06092a864886f70d010105050003" +
      +	"82010100103e18b3d297a5e7a6c07a4fc52ac46a15c0eba96f3be17f0ffe84de5b8c8e05" +
      +	"5a8f577586a849dc4abd6440eb6fedde4622451e2823c1cbf3558b4e8184959c9fe96eff" +
      +	"8bc5f95866c58c6d087519faabfdae37e11d9874f1bc0db292208f645dd848185e4dd38b" +
      +	"6a8547dfa7b74d514a8470015719064d35476b95bebb03d4d2845c5ca15202d2784878f2" +
      +	"0f904c24f09736f044609e9c271381713400e563023d212db422236440c6f377bbf24b2b" +
      +	"9e7dec8698e36a8df68b7592ad3489fb2937afb90eb85d2aa96b81c94c25057dbd4759d9" +
      +	"20a1a65c7f0b6427a224b3c98edd96b9b61f706099951188b0289555ad30a216fb774651" +
      +	"5a35fca2e054dfa8"
      +
      +// PKIX nonce extension
      +var ocspExtensionOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 2}
      +var ocspExtensionValueHex = "0403000000"
      +
      +const ocspResponseWithCriticalExtensionHex = "308204fe0a0100a08204f7308204f306092b0601050507300101048204e4308204e03081" +
      +	"dba003020100a11b3019311730150603550403130e4f43535020526573706f6e64657218" +
      +	"0f32303136303130343137303130305a3081a53081a23049300906052b0e03021a050004" +
      +	"14c0fe0278fc99188891b3f212e9c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b21317" +
      +	"7e6f8d157cd4f60210017f77deb3bcbb235d44ccc7dba62e72a116180f32303130303730" +
      +	"373135303130355aa0030a0101180f32303130303730373135303130355aa011180f3230" +
      +	"3130303730373138333531375aa1193017301506092b06010505073001020101ff040504" +
      +	"03000000300d06092a864886f70d01010b0500038201010031c730ca60a7a0d92d8e4010" +
      +	"911b469de95b4d27e89de6537552436237967694f76f701cf6b45c932bd308bca4a8d092" +
      +	"5c604ba94796903091d9e6c000178e72c1f0a24a277dd262835af5d17d3f9d7869606c9f" +
      +	"e7c8e708a41645699895beee38bfa63bb46296683761c5d1d65439b8ab868dc3017c9eeb" +
      +	"b70b82dbf3a31c55b457d48bb9e82b335ed49f445042eaf606b06a3e0639824924c89c63" +
      +	"eccddfe85e6694314138b2536f5e15e07085d0f6e26d4b2f8244bab0d70de07283ac6384" +
      +	"a0501fc3dea7cf0adfd4c7f34871080900e252ddc403e3f0265f2a704af905d3727504ed" +
      +	"28f3214a219d898a022463c78439799ca81c8cbafdbcec34ea937cd6a08202ea308202e6" +
      +	"308202e2308201caa003020102020101300d06092a864886f70d01010b05003019311730" +
      +	"150603550403130e4f43535020526573706f6e646572301e170d31353031333031353530" +
      +	"33335a170d3136303133303135353033335a3019311730150603550403130e4f43535020" +
      +	"526573706f6e64657230820122300d06092a864886f70d01010105000382010f00308201" +
      +	"0a0282010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef1099f0f6616e" +
      +	"c5265b56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df1701dc6ccfbc" +
      +	"bec75a70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074ffde8a99d5b72" +
      +	"3350f0a112076614b12ef79c78991b119453445acf2416ab0046b540db14c9fc0f27b898" +
      +	"9ad0f63aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa77e7332971c7d" +
      +	"285b6a04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f1290bafd97e6" +
      +	"55b1049a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb96222b12ace31" +
      +	"a77dcf920334dc94581b0203010001a3353033300e0603551d0f0101ff04040302078030" +
      +	"130603551d25040c300a06082b06010505070309300c0603551d130101ff04023000300d" +
      +	"06092a864886f70d01010b05000382010100718012761b5063e18f0dc44644d8e6ab8612" +
      +	"31c15fd5357805425d82aec1de85bf6d3e30fce205e3e3b8b795bbe52e40a439286d2288" +
      +	"9064f4aeeb150359b9425f1da51b3a5c939018555d13ac42c565a0603786a919328f3267" +
      +	"09dce52c22ad958ecb7873b9771d1148b1c4be2efe80ba868919fc9f68b6090c2f33c156" +
      +	"d67156e42766a50b5d51e79637b7e58af74c2a951b1e642fa7741fec982cc937de37eff5" +
      +	"9e2005d5939bfc031589ca143e6e8ab83f40ee08cc20a6b4a95a318352c28d18528dcaf9" +
      +	"66705de17afa19d6e8ae91ddf33179d16ebb6ac2c69cae8373d408ebf8c55308be6c04d9" +
      +	"3a25439a94299a65a709756c7a3e568be049d5c38839"
      +
      +const ocspResponseWithExtensionHex = "308204fb0a0100a08204f4308204f006092b0601050507300101048204e1308204dd3081" +
      +	"d8a003020100a11b3019311730150603550403130e4f43535020526573706f6e64657218" +
      +	"0f32303136303130343136353930305a3081a230819f3049300906052b0e03021a050004" +
      +	"14c0fe0278fc99188891b3f212e9c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b21317" +
      +	"7e6f8d157cd4f60210017f77deb3bcbb235d44ccc7dba62e72a116180f32303130303730" +
      +	"373135303130355aa0030a0101180f32303130303730373135303130355aa011180f3230" +
      +	"3130303730373138333531375aa1163014301206092b0601050507300102040504030000" +
      +	"00300d06092a864886f70d01010b05000382010100c09a33e0b2324c852421bb83f85ac9" +
      +	"9113f5426012bd2d2279a8166e9241d18a33c870894250622ffc7ed0c4601b16d624f90b" +
      +	"779265442cdb6868cf40ab304ab4b66e7315ed02cf663b1601d1d4751772b31bc299db23" +
      +	"9aebac78ed6797c06ed815a7a8d18d63cfbb609cafb47ec2e89e37db255216eb09307848" +
      +	"d01be0a3e943653c78212b96ff524b74c9ec456b17cdfb950cc97645c577b2e09ff41dde" +
      +	"b03afb3adaa381cc0f7c1d95663ef22a0f72f2c45613ae8e2b2d1efc96e8463c7d1d8a1d" +
      +	"7e3b35df8fe73a301fc3f804b942b2b3afa337ff105fc1462b7b1c1d75eb4566c8665e59" +
      +	"f80393b0adbf8004ff6c3327ed34f007cb4a3348a7d55e06e3a08202ea308202e6308202" +
      +	"e2308201caa003020102020101300d06092a864886f70d01010b05003019311730150603" +
      +	"550403130e4f43535020526573706f6e646572301e170d3135303133303135353033335a" +
      +	"170d3136303133303135353033335a3019311730150603550403130e4f43535020526573" +
      +	"706f6e64657230820122300d06092a864886f70d01010105000382010f003082010a0282" +
      +	"010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef1099f0f6616ec5265b" +
      +	"56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df1701dc6ccfbcbec75a" +
      +	"70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074ffde8a99d5b723350f0" +
      +	"a112076614b12ef79c78991b119453445acf2416ab0046b540db14c9fc0f27b8989ad0f6" +
      +	"3aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa77e7332971c7d285b6a" +
      +	"04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f1290bafd97e655b104" +
      +	"9a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb96222b12ace31a77dcf" +
      +	"920334dc94581b0203010001a3353033300e0603551d0f0101ff04040302078030130603" +
      +	"551d25040c300a06082b06010505070309300c0603551d130101ff04023000300d06092a" +
      +	"864886f70d01010b05000382010100718012761b5063e18f0dc44644d8e6ab861231c15f" +
      +	"d5357805425d82aec1de85bf6d3e30fce205e3e3b8b795bbe52e40a439286d22889064f4" +
      +	"aeeb150359b9425f1da51b3a5c939018555d13ac42c565a0603786a919328f326709dce5" +
      +	"2c22ad958ecb7873b9771d1148b1c4be2efe80ba868919fc9f68b6090c2f33c156d67156" +
      +	"e42766a50b5d51e79637b7e58af74c2a951b1e642fa7741fec982cc937de37eff59e2005" +
      +	"d5939bfc031589ca143e6e8ab83f40ee08cc20a6b4a95a318352c28d18528dcaf966705d" +
      +	"e17afa19d6e8ae91ddf33179d16ebb6ac2c69cae8373d408ebf8c55308be6c04d93a2543" +
      +	"9a94299a65a709756c7a3e568be049d5c38839"
      +
      +const ocspRequestHex = "3051304f304d304b3049300906052b0e03021a05000414c0fe0278fc99188891b3f212e9" +
      +	"c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b213177e6f8d157cd4f60210017f77deb3" +
      +	"bcbb235d44ccc7dba62e72"
      +
      +const leafCertHex = "308203c830820331a0030201020210017f77deb3bcbb235d44ccc7dba62e72300d06092a" +
      +	"864886f70d01010505003081ba311f301d060355040a1316566572695369676e20547275" +
      +	"7374204e6574776f726b31173015060355040b130e566572695369676e2c20496e632e31" +
      +	"333031060355040b132a566572695369676e20496e7465726e6174696f6e616c20536572" +
      +	"766572204341202d20436c617373203331493047060355040b13407777772e7665726973" +
      +	"69676e2e636f6d2f43505320496e636f72702e6279205265662e204c494142494c495459" +
      +	"204c54442e286329393720566572695369676e301e170d3132303632313030303030305a" +
      +	"170d3133313233313233353935395a3068310b3009060355040613025553311330110603" +
      +	"550408130a43616c69666f726e6961311230100603550407130950616c6f20416c746f31" +
      +	"173015060355040a130e46616365626f6f6b2c20496e632e311730150603550403140e2a" +
      +	"2e66616365626f6f6b2e636f6d30819f300d06092a864886f70d010101050003818d0030" +
      +	"818902818100ae94b171e2deccc1693e051063240102e0689ae83c39b6b3e74b97d48d7b" +
      +	"23689100b0b496ee62f0e6d356bcf4aa0f50643402f5d1766aa972835a7564723f39bbef" +
      +	"5290ded9bcdbf9d3d55dfad23aa03dc604c54d29cf1d4b3bdbd1a809cfae47b44c7eae17" +
      +	"c5109bee24a9cf4a8d911bb0fd0415ae4c3f430aa12a557e2ae10203010001a382011e30" +
      +	"82011a30090603551d130402300030440603551d20043d303b3039060b6086480186f845" +
      +	"01071703302a302806082b06010505070201161c68747470733a2f2f7777772e76657269" +
      +	"7369676e2e636f6d2f727061303c0603551d1f043530333031a02fa02d862b687474703a" +
      +	"2f2f535652496e746c2d63726c2e766572697369676e2e636f6d2f535652496e746c2e63" +
      +	"726c301d0603551d250416301406082b0601050507030106082b06010505070302300b06" +
      +	"03551d0f0404030205a0303406082b0601050507010104283026302406082b0601050507" +
      +	"30018618687474703a2f2f6f6373702e766572697369676e2e636f6d30270603551d1104" +
      +	"20301e820e2a2e66616365626f6f6b2e636f6d820c66616365626f6f6b2e636f6d300d06" +
      +	"092a864886f70d0101050500038181005b6c2b75f8ed30aa51aad36aba595e555141951f" +
      +	"81a53b447910ac1f76ff78fc2781616b58f3122afc1c87010425e9ed43df1a7ba6498060" +
      +	"67e2688af03db58c7df4ee03309a6afc247ccb134dc33e54c6bc1d5133a532a73273b1d7" +
      +	"9cadc08e7e1a83116d34523340b0305427a21742827c98916698ee7eaf8c3bdd71700817"
      +
      +const issuerCertHex = "30820383308202eca003020102021046fcebbab4d02f0f926098233f93078f300d06092a" +
      +	"864886f70d0101050500305f310b300906035504061302555331173015060355040a130e" +
      +	"566572695369676e2c20496e632e31373035060355040b132e436c617373203320507562" +
      +	"6c6963205072696d6172792043657274696669636174696f6e20417574686f7269747930" +
      +	"1e170d3937303431373030303030305a170d3136313032343233353935395a3081ba311f" +
      +	"301d060355040a1316566572695369676e205472757374204e6574776f726b3117301506" +
      +	"0355040b130e566572695369676e2c20496e632e31333031060355040b132a5665726953" +
      +	"69676e20496e7465726e6174696f6e616c20536572766572204341202d20436c61737320" +
      +	"3331493047060355040b13407777772e766572697369676e2e636f6d2f43505320496e63" +
      +	"6f72702e6279205265662e204c494142494c495459204c54442e28632939372056657269" +
      +	"5369676e30819f300d06092a864886f70d010101050003818d0030818902818100d88280" +
      +	"e8d619027d1f85183925a2652be1bfd405d3bce6363baaf04c6c5bb6e7aa3c734555b2f1" +
      +	"bdea9742ed9a340a15d4a95cf54025ddd907c132b2756cc4cabba3fe56277143aa63f530" +
      +	"3e9328e5faf1093bf3b74d4e39f75c495ab8c11dd3b28afe70309542cbfe2b518b5a3c3a" +
      +	"f9224f90b202a7539c4f34e7ab04b27b6f0203010001a381e33081e0300f0603551d1304" +
      +	"0830060101ff02010030440603551d20043d303b3039060b6086480186f8450107010130" +
      +	"2a302806082b06010505070201161c68747470733a2f2f7777772e766572697369676e2e" +
      +	"636f6d2f43505330340603551d25042d302b06082b0601050507030106082b0601050507" +
      +	"030206096086480186f8420401060a6086480186f845010801300b0603551d0f04040302" +
      +	"0106301106096086480186f842010104040302010630310603551d1f042a30283026a024" +
      +	"a0228620687474703a2f2f63726c2e766572697369676e2e636f6d2f706361332e63726c" +
      +	"300d06092a864886f70d010105050003818100408e4997968a73dd8e4def3e61b7caa062" +
      +	"adf40e0abb753de26ed82cc7bff4b98c369bcaa2d09c724639f6a682036511c4bcbf2da6" +
      +	"f5d93b0ab598fab378b91ef22b4c62d5fdb27a1ddf33fd73f9a5d82d8c2aead1fcb028b6" +
      +	"e94948134b838a1b487b24f738de6f4154b8ab576b06dfc7a2d4a9f6f136628088f28b75" +
      +	"d68071"
      +
      +// Key and certificate for the OCSP responder were not taken from the Thawte
      +// responder, since CreateResponse requires that we have the private key.
      +// Instead, they were generated randomly.
      +const responderPrivateKeyHex = "308204a40201000282010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef" +
      +	"1099f0f6616ec5265b56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df" +
      +	"1701dc6ccfbcbec75a70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074f" +
      +	"fde8a99d5b723350f0a112076614b12ef79c78991b119453445acf2416ab0046b540db14" +
      +	"c9fc0f27b8989ad0f63aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa7" +
      +	"7e7332971c7d285b6a04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f" +
      +	"1290bafd97e655b1049a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb9" +
      +	"6222b12ace31a77dcf920334dc94581b02030100010282010100bcf0b93d7238bda329a8" +
      +	"72e7149f61bcb37c154330ccb3f42a85c9002c2e2bdea039d77d8581cd19bed94078794e" +
      +	"56293d601547fc4bf6a2f9002fe5772b92b21b254403b403585e3130cc99ccf08f0ef81a" +
      +	"575b38f597ba4660448b54f44bfbb97072b5a2bf043bfeca828cf7741d13698e3f38162b" +
      +	"679faa646b82abd9a72c5c7d722c5fc577a76d2c2daac588accad18516d1bbad10b0dfa2" +
      +	"05cfe246b59e28608a43942e1b71b0c80498075121de5b900d727c31c42c78cf1db5c0aa" +
      +	"5b491e10ea4ed5c0962aaf2ae025dd81fa4ce490d9d6b4a4465411d8e542fc88617e5695" +
      +	"1aa4fc8ea166f2b4d0eb89ef17f2b206bd5f1014bf8fe0e71fe62f2cccf102818100f2dc" +
      +	"ddf878d553286daad68bac4070a82ffec3dc4666a2750f47879eec913f91836f1d976b60" +
      +	"daf9356e078446dafab5bd2e489e5d64f8572ba24a4ba4f3729b5e106c4dd831cc2497a7" +
      +	"e6c7507df05cb64aeb1bbc81c1e340d58b5964cf39cff84ea30c29ec5d3f005ee1362698" +
      +	"07395037955955655292c3e85f6187fa1f9502818100f4a33c102630840705f8c778a47b" +
      +	"87e8da31e68809af981ac5e5999cf1551685d761cdf0d6520361b99aebd5777a940fa64d" +
      +	"327c09fa63746fbb3247ec73a86edf115f1fe5c83598db803881ade71c33c6e956118345" +
      +	"497b98b5e07bb5be75971465ec78f2f9467e1b74956ca9d4c7c3e314e742a72d8b33889c" +
      +	"6c093a466cef0281801d3df0d02124766dd0be98349b19eb36a508c4e679e793ba0a8bef" +
      +	"4d786888c1e9947078b1ea28938716677b4ad8c5052af12eb73ac194915264a913709a0b" +
      +	"7b9f98d4a18edd781a13d49899f91c20dbd8eb2e61d991ba19b5cdc08893f5cb9d39e5a6" +
      +	"0629ea16d426244673b1b3ee72bd30e41fac8395acac40077403de5efd028180050731dd" +
      +	"d71b1a2b96c8d538ba90bb6b62c8b1c74c03aae9a9f59d21a7a82b0d572ef06fa9c807bf" +
      +	"c373d6b30d809c7871df96510c577421d9860c7383fda0919ece19996b3ca13562159193" +
      +	"c0c246471e287f975e8e57034e5136aaf44254e2650def3d51292474c515b1588969112e" +
      +	"0a85cc77073e9d64d2c2fc497844284b02818100d71d63eabf416cf677401ebf965f8314" +
      +	"120b568a57dd3bd9116c629c40dc0c6948bab3a13cc544c31c7da40e76132ef5dd3f7534" +
      +	"45a635930c74326ae3df0edd1bfb1523e3aa259873ac7cf1ac31151ec8f37b528c275622" +
      +	"48f99b8bed59fd4da2576aa6ee20d93a684900bf907e80c66d6e2261ae15e55284b4ed9d" +
      +	"6bdaa059"
      +
      +const responderCertHex = "308202e2308201caa003020102020101300d06092a864886f70d01010b05003019311730" +
      +	"150603550403130e4f43535020526573706f6e646572301e170d31353031333031353530" +
      +	"33335a170d3136303133303135353033335a3019311730150603550403130e4f43535020" +
      +	"526573706f6e64657230820122300d06092a864886f70d01010105000382010f00308201" +
      +	"0a0282010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef1099f0f6616e" +
      +	"c5265b56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df1701dc6ccfbc" +
      +	"bec75a70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074ffde8a99d5b72" +
      +	"3350f0a112076614b12ef79c78991b119453445acf2416ab0046b540db14c9fc0f27b898" +
      +	"9ad0f63aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa77e7332971c7d" +
      +	"285b6a04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f1290bafd97e6" +
      +	"55b1049a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb96222b12ace31" +
      +	"a77dcf920334dc94581b0203010001a3353033300e0603551d0f0101ff04040302078030" +
      +	"130603551d25040c300a06082b06010505070309300c0603551d130101ff04023000300d" +
      +	"06092a864886f70d01010b05000382010100718012761b5063e18f0dc44644d8e6ab8612" +
      +	"31c15fd5357805425d82aec1de85bf6d3e30fce205e3e3b8b795bbe52e40a439286d2288" +
      +	"9064f4aeeb150359b9425f1da51b3a5c939018555d13ac42c565a0603786a919328f3267" +
      +	"09dce52c22ad958ecb7873b9771d1148b1c4be2efe80ba868919fc9f68b6090c2f33c156" +
      +	"d67156e42766a50b5d51e79637b7e58af74c2a951b1e642fa7741fec982cc937de37eff5" +
      +	"9e2005d5939bfc031589ca143e6e8ab83f40ee08cc20a6b4a95a318352c28d18528dcaf9" +
      +	"66705de17afa19d6e8ae91ddf33179d16ebb6ac2c69cae8373d408ebf8c55308be6c04d9" +
      +	"3a25439a94299a65a709756c7a3e568be049d5c38839"
      +
      +const errorResponseHex = "30030a0101"
      diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go
      index e8df91af..592d1864 100644
      --- a/vendor/golang.org/x/crypto/openpgp/armor/armor.go
      +++ b/vendor/golang.org/x/crypto/openpgp/armor/armor.go
      @@ -4,7 +4,7 @@
       
       // Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is
       // very similar to PEM except that it has an additional CRC checksum.
      -package armor
      +package armor // import "golang.org/x/crypto/openpgp/armor"
       
       import (
       	"bufio"
      diff --git a/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go b/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go
      index 256cefa1..6454d22c 100644
      --- a/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go
      +++ b/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go
      @@ -7,7 +7,7 @@
       //
       // Clearsigned messages are cryptographically signed, but the contents of the
       // message are kept in plaintext so that it can be read without special tools.
      -package clearsign
      +package clearsign // import "golang.org/x/crypto/openpgp/clearsign"
       
       import (
       	"bufio"
      diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
      index a553bdee..73f4fe37 100644
      --- a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
      +++ b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
      @@ -10,7 +10,7 @@
       // This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it
       // unsuitable for other protocols. RSA should be used in preference in any
       // case.
      -package elgamal
      +package elgamal // import "golang.org/x/crypto/openpgp/elgamal"
       
       import (
       	"crypto/rand"
      diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go
      index 6c4c2f17..eb0550b2 100644
      --- a/vendor/golang.org/x/crypto/openpgp/errors/errors.go
      +++ b/vendor/golang.org/x/crypto/openpgp/errors/errors.go
      @@ -3,7 +3,7 @@
       // license that can be found in the LICENSE file.
       
       // Package errors contains common error types for the OpenPGP packages.
      -package errors
      +package errors // import "golang.org/x/crypto/openpgp/errors"
       
       import (
       	"strconv"
      diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go
      index 78a0f5ef..e2bde111 100644
      --- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go
      +++ b/vendor/golang.org/x/crypto/openpgp/packet/packet.go
      @@ -4,7 +4,7 @@
       
       // Package packet implements parsing and serialization of OpenPGP packets, as
       // specified in RFC 4880.
      -package packet
      +package packet // import "golang.org/x/crypto/openpgp/packet"
       
       import (
       	"bufio"
      diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go
      index 1cbddfcd..dfffc398 100644
      --- a/vendor/golang.org/x/crypto/openpgp/read.go
      +++ b/vendor/golang.org/x/crypto/openpgp/read.go
      @@ -3,7 +3,7 @@
       // license that can be found in the LICENSE file.
       
       // Package openpgp implements high level operations on OpenPGP messages.
      -package openpgp
      +package openpgp // import "golang.org/x/crypto/openpgp"
       
       import (
       	"crypto"
      diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
      index 54164214..0e8641ed 100644
      --- a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
      +++ b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
      @@ -4,7 +4,7 @@
       
       // Package s2k implements the various OpenPGP string-to-key transforms as
       // specified in RFC 4800 section 3.7.1.
      -package s2k
      +package s2k // import "golang.org/x/crypto/openpgp/s2k"
       
       import (
       	"crypto"
      diff --git a/vendor/golang.org/x/crypto/otr/libotr_test_helper.c b/vendor/golang.org/x/crypto/otr/libotr_test_helper.c
      new file mode 100644
      index 00000000..b3ca072d
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/otr/libotr_test_helper.c
      @@ -0,0 +1,197 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// This code can be compiled and used to test the otr package against libotr.
      +// See otr_test.go.
      +
      +// +build ignore
      +
      +#include <stdio.h>
      +#include <stdlib.h>
      +#include <unistd.h>
      +
      +#include <proto.h>
      +#include <message.h>
      +#include <privkey.h>
      +
      +static int g_session_established = 0;
      +
      +OtrlPolicy policy(void *opdata, ConnContext *context) {
      +  return OTRL_POLICY_ALWAYS;
      +}
      +
      +int is_logged_in(void *opdata, const char *accountname, const char *protocol,
      +                 const char *recipient) {
      +  return 1;
      +}
      +
      +void inject_message(void *opdata, const char *accountname, const char *protocol,
      +                    const char *recipient, const char *message) {
      +  printf("%s\n", message);
      +  fflush(stdout);
      +  fprintf(stderr, "libotr helper sent: %s\n", message);
      +}
      +
      +void update_context_list(void *opdata) {}
      +
      +void new_fingerprint(void *opdata, OtrlUserState us, const char *accountname,
      +                     const char *protocol, const char *username,
      +                     unsigned char fingerprint[20]) {
      +  fprintf(stderr, "NEW FINGERPRINT\n");
      +  g_session_established = 1;
      +}
      +
      +void write_fingerprints(void *opdata) {}
      +
      +void gone_secure(void *opdata, ConnContext *context) {}
      +
      +void gone_insecure(void *opdata, ConnContext *context) {}
      +
      +void still_secure(void *opdata, ConnContext *context, int is_reply) {}
      +
      +int max_message_size(void *opdata, ConnContext *context) { return 99999; }
      +
      +const char *account_name(void *opdata, const char *account,
      +                         const char *protocol) {
      +  return "ACCOUNT";
      +}
      +
      +void account_name_free(void *opdata, const char *account_name) {}
      +
      +const char *error_message(void *opdata, ConnContext *context,
      +                          OtrlErrorCode err_code) {
      +  return "ERR";
      +}
      +
      +void error_message_free(void *opdata, const char *msg) {}
      +
      +void resent_msg_prefix_free(void *opdata, const char *prefix) {}
      +
      +void handle_smp_event(void *opdata, OtrlSMPEvent smp_event,
      +                      ConnContext *context, unsigned short progress_event,
      +                      char *question) {}
      +
      +void handle_msg_event(void *opdata, OtrlMessageEvent msg_event,
      +                      ConnContext *context, const char *message,
      +                      gcry_error_t err) {
      +  fprintf(stderr, "msg event: %d %s\n", msg_event, message);
      +}
      +
      +OtrlMessageAppOps uiops = {
      +    policy,
      +    NULL,
      +    is_logged_in,
      +    inject_message,
      +    update_context_list,
      +    new_fingerprint,
      +    write_fingerprints,
      +    gone_secure,
      +    gone_insecure,
      +    still_secure,
      +    max_message_size,
      +    account_name,
      +    account_name_free,
      +    NULL, /* received_symkey */
      +    error_message,
      +    error_message_free,
      +    NULL, /* resent_msg_prefix */
      +    resent_msg_prefix_free,
      +    handle_smp_event,
      +    handle_msg_event,
      +    NULL /* create_instag */,
      +    NULL /* convert_msg */,
      +    NULL /* convert_free */,
      +    NULL /* timer_control */,
      +};
      +
      +static const char kPrivateKeyData[] =
      +    "(privkeys (account (name \"account\") (protocol proto) (private-key (dsa "
      +    "(p "
      +    "#00FC07ABCF0DC916AFF6E9AE47BEF60C7AB9B4D6B2469E436630E36F8A489BE812486A09F"
      +    "30B71224508654940A835301ACC525A4FF133FC152CC53DCC59D65C30A54F1993FE13FE63E"
      +    "5823D4C746DB21B90F9B9C00B49EC7404AB1D929BA7FBA12F2E45C6E0A651689750E8528AB"
      +    "8C031D3561FECEE72EBB4A090D450A9B7A857#) (q "
      +    "#00997BD266EF7B1F60A5C23F3A741F2AEFD07A2081#) (g "
      +    "#535E360E8A95EBA46A4F7DE50AD6E9B2A6DB785A66B64EB9F20338D2A3E8FB0E94725848F"
      +    "1AA6CC567CB83A1CC517EC806F2E92EAE71457E80B2210A189B91250779434B41FC8A8873F"
      +    "6DB94BEA7D177F5D59E7E114EE10A49CFD9CEF88AE43387023B672927BA74B04EB6BBB5E57"
      +    "597766A2F9CE3857D7ACE3E1E3BC1FC6F26#) (y "
      +    "#0AC8670AD767D7A8D9D14CC1AC6744CD7D76F993B77FFD9E39DF01E5A6536EF65E775FCEF"
      +    "2A983E2A19BD6415500F6979715D9FD1257E1FE2B6F5E1E74B333079E7C880D39868462A93"
      +    "454B41877BE62E5EF0A041C2EE9C9E76BD1E12AE25D9628DECB097025DD625EF49C3258A1A"
      +    "3C0FF501E3DC673B76D7BABF349009B6ECF#) (x "
      +    "#14D0345A3562C480A039E3C72764F72D79043216#)))))\n";
      +
      +int main() {
      +  OTRL_INIT;
      +
      +  // We have to write the private key information to a file because the libotr
      +  // API demands a filename to read from.
      +  const char *tmpdir = "/tmp";
      +  if (getenv("TMP")) {
      +    tmpdir = getenv("TMP");
      +  }
      +
      +  char private_key_file[256];
      +  snprintf(private_key_file, sizeof(private_key_file),
      +           "%s/libotr_test_helper_privatekeys-XXXXXX", tmpdir);
      +  int fd = mkstemp(private_key_file);
      +  if (fd == -1) {
      +    perror("creating temp file");
      +  }
      +  write(fd, kPrivateKeyData, sizeof(kPrivateKeyData) - 1);
      +  close(fd);
      +
      +  OtrlUserState userstate = otrl_userstate_create();
      +  otrl_privkey_read(userstate, private_key_file);
      +  unlink(private_key_file);
      +
      +  fprintf(stderr, "libotr helper started\n");
      +
      +  char buf[4096];
      +
      +  for (;;) {
      +    char *message = fgets(buf, sizeof(buf), stdin);
      +    if (strlen(message) == 0) {
      +      break;
      +    }
      +    message[strlen(message) - 1] = 0;
      +    fprintf(stderr, "libotr helper got: %s\n", message);
      +
      +    char *newmessage = NULL;
      +    OtrlTLV *tlvs;
      +    int ignore_message = otrl_message_receiving(
      +        userstate, &uiops, NULL, "account", "proto", "peer", message,
      +        &newmessage, &tlvs, NULL, NULL, NULL);
      +    if (tlvs) {
      +      otrl_tlv_free(tlvs);
      +    }
      +
      +    if (newmessage != NULL) {
      +      fprintf(stderr, "libotr got: %s\n", newmessage);
      +      otrl_message_free(newmessage);
      +
      +      gcry_error_t err;
      +      char *newmessage = NULL;
      +
      +      err = otrl_message_sending(userstate, &uiops, NULL, "account", "proto",
      +                                 "peer", 0, "test message", NULL, &newmessage,
      +                                 OTRL_FRAGMENT_SEND_SKIP, NULL, NULL, NULL);
      +      if (newmessage == NULL) {
      +        fprintf(stderr, "libotr didn't encrypt message\n");
      +        return 1;
      +      }
      +      write(1, newmessage, strlen(newmessage));
      +      write(1, "\n", 1);
      +      fprintf(stderr, "libotr sent: %s\n", newmessage);
      +      otrl_message_free(newmessage);
      +
      +      g_session_established = 0;
      +      write(1, "?OTRv2?\n", 8);
      +      fprintf(stderr, "libotr sent: ?OTRv2\n");
      +    }
      +  }
      +
      +  return 0;
      +}
      diff --git a/vendor/golang.org/x/crypto/otr/otr.go b/vendor/golang.org/x/crypto/otr/otr.go
      new file mode 100644
      index 00000000..549be116
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/otr/otr.go
      @@ -0,0 +1,1408 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package otr implements the Off The Record protocol as specified in
      +// http://www.cypherpunks.ca/otr/Protocol-v2-3.1.0.html
      +package otr // import "golang.org/x/crypto/otr"
      +
      +import (
      +	"bytes"
      +	"crypto/aes"
      +	"crypto/cipher"
      +	"crypto/dsa"
      +	"crypto/hmac"
      +	"crypto/rand"
      +	"crypto/sha1"
      +	"crypto/sha256"
      +	"crypto/subtle"
      +	"encoding/base64"
      +	"encoding/hex"
      +	"errors"
      +	"hash"
      +	"io"
      +	"math/big"
      +	"strconv"
      +)
      +
      +// SecurityChange describes a change in the security state of a Conversation.
      +type SecurityChange int
      +
      +const (
      +	NoChange SecurityChange = iota
      +	// NewKeys indicates that a key exchange has completed. This occurs
      +	// when a conversation first becomes encrypted, and when the keys are
      +	// renegotiated within an encrypted conversation.
      +	NewKeys
      +	// SMPSecretNeeded indicates that the peer has started an
      +	// authentication and that we need to supply a secret. Call SMPQuestion
      +	// to get the optional, human readable challenge and then Authenticate
      +	// to supply the matching secret.
      +	SMPSecretNeeded
      +	// SMPComplete indicates that an authentication completed. The identity
      +	// of the peer has now been confirmed.
      +	SMPComplete
      +	// SMPFailed indicates that an authentication failed.
      +	SMPFailed
      +	// ConversationEnded indicates that the peer ended the secure
      +	// conversation.
      +	ConversationEnded
      +)
      +
      +// QueryMessage can be sent to a peer to start an OTR conversation.
      +var QueryMessage = "?OTRv2?"
      +
      +// ErrorPrefix can be used to make an OTR error by appending an error message
      +// to it.
      +var ErrorPrefix = "?OTR Error:"
      +
      +var (
      +	fragmentPartSeparator = []byte(",")
      +	fragmentPrefix        = []byte("?OTR,")
      +	msgPrefix             = []byte("?OTR:")
      +	queryMarker           = []byte("?OTR")
      +)
      +
      +// isQuery attempts to parse an OTR query from msg and returns the greatest
      +// common version, or 0 if msg is not an OTR query.
      +func isQuery(msg []byte) (greatestCommonVersion int) {
      +	pos := bytes.Index(msg, queryMarker)
      +	if pos == -1 {
      +		return 0
      +	}
      +	for i, c := range msg[pos+len(queryMarker):] {
      +		if i == 0 {
      +			if c == '?' {
      +				// Indicates support for version 1, but we don't
      +				// implement that.
      +				continue
      +			}
      +
      +			if c != 'v' {
      +				// Invalid message
      +				return 0
      +			}
      +
      +			continue
      +		}
      +
      +		if c == '?' {
      +			// End of message
      +			return
      +		}
      +
      +		if c == ' ' || c == '\t' {
      +			// Probably an invalid message
      +			return 0
      +		}
      +
      +		if c == '2' {
      +			greatestCommonVersion = 2
      +		}
      +	}
      +
      +	return 0
      +}
      +
      +const (
      +	statePlaintext = iota
      +	stateEncrypted
      +	stateFinished
      +)
      +
      +const (
      +	authStateNone = iota
      +	authStateAwaitingDHKey
      +	authStateAwaitingRevealSig
      +	authStateAwaitingSig
      +)
      +
      +const (
      +	msgTypeDHCommit  = 2
      +	msgTypeData      = 3
      +	msgTypeDHKey     = 10
      +	msgTypeRevealSig = 17
      +	msgTypeSig       = 18
      +)
      +
      +const (
      +	// If the requested fragment size is less than this, it will be ignored.
      +	minFragmentSize = 18
      +	// Messages are padded to a multiple of this number of bytes.
      +	paddingGranularity = 256
      +	// The number of bytes in a Diffie-Hellman private value (320-bits).
      +	dhPrivateBytes = 40
      +	// The number of bytes needed to represent an element of the DSA
      +	// subgroup (160-bits).
      +	dsaSubgroupBytes = 20
      +	// The number of bytes of the MAC that are sent on the wire (160-bits).
      +	macPrefixBytes = 20
      +)
      +
      +// These are the global, common group parameters for OTR.
      +var (
      +	p       *big.Int // group prime
      +	g       *big.Int // group generator
      +	q       *big.Int // group order
      +	pMinus2 *big.Int
      +)
      +
      +func init() {
      +	p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", 16)
      +	q, _ = new(big.Int).SetString("7FFFFFFFFFFFFFFFE487ED5110B4611A62633145C06E0E68948127044533E63A0105DF531D89CD9128A5043CC71A026EF7CA8CD9E69D218D98158536F92F8A1BA7F09AB6B6A8E122F242DABB312F3F637A262174D31BF6B585FFAE5B7A035BF6F71C35FDAD44CFD2D74F9208BE258FF324943328F6722D9EE1003E5C50B1DF82CC6D241B0E2AE9CD348B1FD47E9267AFC1B2AE91EE51D6CB0E3179AB1042A95DCF6A9483B84B4B36B3861AA7255E4C0278BA36046511B993FFFFFFFFFFFFFFFF", 16)
      +	g = new(big.Int).SetInt64(2)
      +	pMinus2 = new(big.Int).Sub(p, g)
      +}
      +
      +// Conversation represents a relation with a peer. The zero value is a valid
      +// Conversation, although PrivateKey must be set.
      +//
      +// When communicating with a peer, all inbound messages should be passed to
      +// Conversation.Receive and all outbound messages to Conversation.Send. The
      +// Conversation will take care of maintaining the encryption state and
      +// negotiating encryption as needed.
      +type Conversation struct {
      +	// PrivateKey contains the private key to use to sign key exchanges.
      +	PrivateKey *PrivateKey
      +
      +	// Rand can be set to override the entropy source. Otherwise,
      +	// crypto/rand will be used.
      +	Rand io.Reader
      +	// If FragmentSize is set, all messages produced by Receive and Send
      +	// will be fragmented into messages of, at most, this number of bytes.
      +	FragmentSize int
      +
      +	// Once Receive has returned NewKeys once, the following fields are
      +	// valid.
      +	SSID           [8]byte
      +	TheirPublicKey PublicKey
      +
      +	state, authState int
      +
      +	r       [16]byte
      +	x, y    *big.Int
      +	gx, gy  *big.Int
      +	gxBytes []byte
      +	digest  [sha256.Size]byte
      +
      +	revealKeys, sigKeys akeKeys
      +
      +	myKeyId         uint32
      +	myCurrentDHPub  *big.Int
      +	myCurrentDHPriv *big.Int
      +	myLastDHPub     *big.Int
      +	myLastDHPriv    *big.Int
      +
      +	theirKeyId        uint32
      +	theirCurrentDHPub *big.Int
      +	theirLastDHPub    *big.Int
      +
      +	keySlots [4]keySlot
      +
      +	myCounter    [8]byte
      +	theirLastCtr [8]byte
      +	oldMACs      []byte
      +
      +	k, n int // fragment state
      +	frag []byte
      +
      +	smp smpState
      +}
      +
      +// A keySlot contains key material for a specific (their keyid, my keyid) pair.
      +type keySlot struct {
      +	// used is true if this slot is valid. If false, it's free for reuse.
      +	used                   bool
      +	theirKeyId             uint32
      +	myKeyId                uint32
      +	sendAESKey, recvAESKey []byte
      +	sendMACKey, recvMACKey []byte
      +	theirLastCtr           [8]byte
      +}
      +
      +// akeKeys are generated during key exchange. There's one set for the reveal
      +// signature message and another for the signature message. In the protocol
      +// spec the latter are indicated with a prime mark.
      +type akeKeys struct {
      +	c      [16]byte
      +	m1, m2 [32]byte
      +}
      +
      +func (c *Conversation) rand() io.Reader {
      +	if c.Rand != nil {
      +		return c.Rand
      +	}
      +	return rand.Reader
      +}
      +
      +func (c *Conversation) randMPI(buf []byte) *big.Int {
      +	_, err := io.ReadFull(c.rand(), buf)
      +	if err != nil {
      +		panic("otr: short read from random source")
      +	}
      +
      +	return new(big.Int).SetBytes(buf)
      +}
      +
      +// tlv represents the type-length value from the protocol.
      +type tlv struct {
      +	typ, length uint16
      +	data        []byte
      +}
      +
      +const (
      +	tlvTypePadding          = 0
      +	tlvTypeDisconnected     = 1
      +	tlvTypeSMP1             = 2
      +	tlvTypeSMP2             = 3
      +	tlvTypeSMP3             = 4
      +	tlvTypeSMP4             = 5
      +	tlvTypeSMPAbort         = 6
      +	tlvTypeSMP1WithQuestion = 7
      +)
      +
      +// Receive handles a message from a peer. It returns a human readable message,
      +// an indicator of whether that message was encrypted, a hint about the
      +// encryption state and zero or more messages to send back to the peer.
      +// These messages do not need to be passed to Send before transmission.
      +func (c *Conversation) Receive(in []byte) (out []byte, encrypted bool, change SecurityChange, toSend [][]byte, err error) {
      +	if bytes.HasPrefix(in, fragmentPrefix) {
      +		in, err = c.processFragment(in)
      +		if in == nil || err != nil {
      +			return
      +		}
      +	}
      +
      +	if bytes.HasPrefix(in, msgPrefix) && in[len(in)-1] == '.' {
      +		in = in[len(msgPrefix) : len(in)-1]
      +	} else if version := isQuery(in); version > 0 {
      +		c.authState = authStateAwaitingDHKey
      +		c.reset()
      +		toSend = c.encode(c.generateDHCommit())
      +		return
      +	} else {
      +		// plaintext message
      +		out = in
      +		return
      +	}
      +
      +	msg := make([]byte, base64.StdEncoding.DecodedLen(len(in)))
      +	msgLen, err := base64.StdEncoding.Decode(msg, in)
      +	if err != nil {
      +		err = errors.New("otr: invalid base64 encoding in message")
      +		return
      +	}
      +	msg = msg[:msgLen]
      +
      +	// The first two bytes are the protocol version (2)
      +	if len(msg) < 3 || msg[0] != 0 || msg[1] != 2 {
      +		err = errors.New("otr: invalid OTR message")
      +		return
      +	}
      +
      +	msgType := int(msg[2])
      +	msg = msg[3:]
      +
      +	switch msgType {
      +	case msgTypeDHCommit:
      +		switch c.authState {
      +		case authStateNone:
      +			c.authState = authStateAwaitingRevealSig
      +			if err = c.processDHCommit(msg); err != nil {
      +				return
      +			}
      +			c.reset()
      +			toSend = c.encode(c.generateDHKey())
      +			return
      +		case authStateAwaitingDHKey:
      +			// This is a 'SYN-crossing'. The greater digest wins.
      +			var cmp int
      +			if cmp, err = c.compareToDHCommit(msg); err != nil {
      +				return
      +			}
      +			if cmp > 0 {
      +				// We win. Retransmit DH commit.
      +				toSend = c.encode(c.serializeDHCommit())
      +				return
      +			} else {
      +				// They win. We forget about our DH commit.
      +				c.authState = authStateAwaitingRevealSig
      +				if err = c.processDHCommit(msg); err != nil {
      +					return
      +				}
      +				c.reset()
      +				toSend = c.encode(c.generateDHKey())
      +				return
      +			}
      +		case authStateAwaitingRevealSig:
      +			if err = c.processDHCommit(msg); err != nil {
      +				return
      +			}
      +			toSend = c.encode(c.serializeDHKey())
      +		case authStateAwaitingSig:
      +			if err = c.processDHCommit(msg); err != nil {
      +				return
      +			}
      +			c.reset()
      +			toSend = c.encode(c.generateDHKey())
      +			c.authState = authStateAwaitingRevealSig
      +		default:
      +			panic("bad state")
      +		}
      +	case msgTypeDHKey:
      +		switch c.authState {
      +		case authStateAwaitingDHKey:
      +			var isSame bool
      +			if isSame, err = c.processDHKey(msg); err != nil {
      +				return
      +			}
      +			if isSame {
      +				err = errors.New("otr: unexpected duplicate DH key")
      +				return
      +			}
      +			toSend = c.encode(c.generateRevealSig())
      +			c.authState = authStateAwaitingSig
      +		case authStateAwaitingSig:
      +			var isSame bool
      +			if isSame, err = c.processDHKey(msg); err != nil {
      +				return
      +			}
      +			if isSame {
      +				toSend = c.encode(c.serializeDHKey())
      +			}
      +		}
      +	case msgTypeRevealSig:
      +		if c.authState != authStateAwaitingRevealSig {
      +			return
      +		}
      +		if err = c.processRevealSig(msg); err != nil {
      +			return
      +		}
      +		toSend = c.encode(c.generateSig())
      +		c.authState = authStateNone
      +		c.state = stateEncrypted
      +		change = NewKeys
      +	case msgTypeSig:
      +		if c.authState != authStateAwaitingSig {
      +			return
      +		}
      +		if err = c.processSig(msg); err != nil {
      +			return
      +		}
      +		c.authState = authStateNone
      +		c.state = stateEncrypted
      +		change = NewKeys
      +	case msgTypeData:
      +		if c.state != stateEncrypted {
      +			err = errors.New("otr: encrypted message received without encrypted session established")
      +			return
      +		}
      +		var tlvs []tlv
      +		out, tlvs, err = c.processData(msg)
      +		encrypted = true
      +
      +	EachTLV:
      +		for _, inTLV := range tlvs {
      +			switch inTLV.typ {
      +			case tlvTypeDisconnected:
      +				change = ConversationEnded
      +				c.state = stateFinished
      +				break EachTLV
      +			case tlvTypeSMP1, tlvTypeSMP2, tlvTypeSMP3, tlvTypeSMP4, tlvTypeSMPAbort, tlvTypeSMP1WithQuestion:
      +				var reply tlv
      +				var complete bool
      +				reply, complete, err = c.processSMP(inTLV)
      +				if err == smpSecretMissingError {
      +					err = nil
      +					change = SMPSecretNeeded
      +					c.smp.saved = &inTLV
      +					return
      +				}
      +				if err == smpFailureError {
      +					err = nil
      +					change = SMPFailed
      +				} else if complete {
      +					change = SMPComplete
      +				}
      +				if reply.typ != 0 {
      +					toSend = c.encode(c.generateData(nil, &reply))
      +				}
      +				break EachTLV
      +			default:
      +				// skip unknown TLVs
      +			}
      +		}
      +	default:
      +		err = errors.New("otr: unknown message type " + strconv.Itoa(msgType))
      +	}
      +
      +	return
      +}
      +
      +// Send takes a human readable message from the local user, possibly encrypts
      +// it and returns zero one or more messages to send to the peer.
      +func (c *Conversation) Send(msg []byte) ([][]byte, error) {
      +	switch c.state {
      +	case statePlaintext:
      +		return [][]byte{msg}, nil
      +	case stateEncrypted:
      +		return c.encode(c.generateData(msg, nil)), nil
      +	case stateFinished:
      +		return nil, errors.New("otr: cannot send message because secure conversation has finished")
      +	}
      +
      +	return nil, errors.New("otr: cannot send message in current state")
      +}
      +
      +// SMPQuestion returns the human readable challenge question from the peer.
      +// It's only valid after Receive has returned SMPSecretNeeded.
      +func (c *Conversation) SMPQuestion() string {
      +	return c.smp.question
      +}
      +
      +// Authenticate begins an authentication with the peer. Authentication involves
      +// an optional challenge message and a shared secret. The authentication
      +// proceeds until either Receive returns SMPComplete, SMPSecretNeeded (which
      +// indicates that a new authentication is happening and thus this one was
      +// aborted) or SMPFailed.
      +func (c *Conversation) Authenticate(question string, mutualSecret []byte) (toSend [][]byte, err error) {
      +	if c.state != stateEncrypted {
      +		err = errors.New("otr: can't authenticate a peer without a secure conversation established")
      +		return
      +	}
      +
      +	if c.smp.saved != nil {
      +		c.calcSMPSecret(mutualSecret, false /* they started it */)
      +
      +		var out tlv
      +		var complete bool
      +		out, complete, err = c.processSMP(*c.smp.saved)
      +		if complete {
      +			panic("SMP completed on the first message")
      +		}
      +		c.smp.saved = nil
      +		if out.typ != 0 {
      +			toSend = c.encode(c.generateData(nil, &out))
      +		}
      +		return
      +	}
      +
      +	c.calcSMPSecret(mutualSecret, true /* we started it */)
      +	outs := c.startSMP(question)
      +	for _, out := range outs {
      +		toSend = append(toSend, c.encode(c.generateData(nil, &out))...)
      +	}
      +	return
      +}
      +
      +// End ends a secure conversation by generating a termination message for
      +// the peer and switches to unencrypted communication.
      +func (c *Conversation) End() (toSend [][]byte) {
      +	switch c.state {
      +	case statePlaintext:
      +		return nil
      +	case stateEncrypted:
      +		c.state = statePlaintext
      +		return c.encode(c.generateData(nil, &tlv{typ: tlvTypeDisconnected}))
      +	case stateFinished:
      +		c.state = statePlaintext
      +		return nil
      +	}
      +	panic("unreachable")
      +}
      +
      +// IsEncrypted returns true if a message passed to Send would be encrypted
      +// before transmission. This result remains valid until the next call to
      +// Receive or End, which may change the state of the Conversation.
      +func (c *Conversation) IsEncrypted() bool {
      +	return c.state == stateEncrypted
      +}
      +
      +var fragmentError = errors.New("otr: invalid OTR fragment")
      +
      +// processFragment processes a fragmented OTR message and possibly returns a
      +// complete message. Fragmented messages look like "?OTR,k,n,msg," where k is
      +// the fragment number (starting from 1), n is the number of fragments in this
      +// message and msg is a substring of the base64 encoded message.
      +func (c *Conversation) processFragment(in []byte) (out []byte, err error) {
      +	in = in[len(fragmentPrefix):] // remove "?OTR,"
      +	parts := bytes.Split(in, fragmentPartSeparator)
      +	if len(parts) != 4 || len(parts[3]) != 0 {
      +		return nil, fragmentError
      +	}
      +
      +	k, err := strconv.Atoi(string(parts[0]))
      +	if err != nil {
      +		return nil, fragmentError
      +	}
      +
      +	n, err := strconv.Atoi(string(parts[1]))
      +	if err != nil {
      +		return nil, fragmentError
      +	}
      +
      +	if k < 1 || n < 1 || k > n {
      +		return nil, fragmentError
      +	}
      +
      +	if k == 1 {
      +		c.frag = append(c.frag[:0], parts[2]...)
      +		c.k, c.n = k, n
      +	} else if n == c.n && k == c.k+1 {
      +		c.frag = append(c.frag, parts[2]...)
      +		c.k++
      +	} else {
      +		c.frag = c.frag[:0]
      +		c.n, c.k = 0, 0
      +	}
      +
      +	if c.n > 0 && c.k == c.n {
      +		c.n, c.k = 0, 0
      +		return c.frag, nil
      +	}
      +
      +	return nil, nil
      +}
      +
      +func (c *Conversation) generateDHCommit() []byte {
      +	_, err := io.ReadFull(c.rand(), c.r[:])
      +	if err != nil {
      +		panic("otr: short read from random source")
      +	}
      +
      +	var xBytes [dhPrivateBytes]byte
      +	c.x = c.randMPI(xBytes[:])
      +	c.gx = new(big.Int).Exp(g, c.x, p)
      +	c.gy = nil
      +	c.gxBytes = appendMPI(nil, c.gx)
      +
      +	h := sha256.New()
      +	h.Write(c.gxBytes)
      +	h.Sum(c.digest[:0])
      +
      +	aesCipher, err := aes.NewCipher(c.r[:])
      +	if err != nil {
      +		panic(err.Error())
      +	}
      +
      +	var iv [aes.BlockSize]byte
      +	ctr := cipher.NewCTR(aesCipher, iv[:])
      +	ctr.XORKeyStream(c.gxBytes, c.gxBytes)
      +
      +	return c.serializeDHCommit()
      +}
      +
      +func (c *Conversation) serializeDHCommit() []byte {
      +	var ret []byte
      +	ret = appendU16(ret, 2) // protocol version
      +	ret = append(ret, msgTypeDHCommit)
      +	ret = appendData(ret, c.gxBytes)
      +	ret = appendData(ret, c.digest[:])
      +	return ret
      +}
      +
      +func (c *Conversation) processDHCommit(in []byte) error {
      +	var ok1, ok2 bool
      +	c.gxBytes, in, ok1 = getData(in)
      +	digest, in, ok2 := getData(in)
      +	if !ok1 || !ok2 || len(in) > 0 {
      +		return errors.New("otr: corrupt DH commit message")
      +	}
      +	copy(c.digest[:], digest)
      +	return nil
      +}
      +
      +func (c *Conversation) compareToDHCommit(in []byte) (int, error) {
      +	_, in, ok1 := getData(in)
      +	digest, in, ok2 := getData(in)
      +	if !ok1 || !ok2 || len(in) > 0 {
      +		return 0, errors.New("otr: corrupt DH commit message")
      +	}
      +	return bytes.Compare(c.digest[:], digest), nil
      +}
      +
      +func (c *Conversation) generateDHKey() []byte {
      +	var yBytes [dhPrivateBytes]byte
      +	c.y = c.randMPI(yBytes[:])
      +	c.gy = new(big.Int).Exp(g, c.y, p)
      +	return c.serializeDHKey()
      +}
      +
      +func (c *Conversation) serializeDHKey() []byte {
      +	var ret []byte
      +	ret = appendU16(ret, 2) // protocol version
      +	ret = append(ret, msgTypeDHKey)
      +	ret = appendMPI(ret, c.gy)
      +	return ret
      +}
      +
      +func (c *Conversation) processDHKey(in []byte) (isSame bool, err error) {
      +	gy, in, ok := getMPI(in)
      +	if !ok {
      +		err = errors.New("otr: corrupt DH key message")
      +		return
      +	}
      +	if gy.Cmp(g) < 0 || gy.Cmp(pMinus2) > 0 {
      +		err = errors.New("otr: DH value out of range")
      +		return
      +	}
      +	if c.gy != nil {
      +		isSame = c.gy.Cmp(gy) == 0
      +		return
      +	}
      +	c.gy = gy
      +	return
      +}
      +
      +func (c *Conversation) generateEncryptedSignature(keys *akeKeys, xFirst bool) ([]byte, []byte) {
      +	var xb []byte
      +	xb = c.PrivateKey.PublicKey.Serialize(xb)
      +
      +	var verifyData []byte
      +	if xFirst {
      +		verifyData = appendMPI(verifyData, c.gx)
      +		verifyData = appendMPI(verifyData, c.gy)
      +	} else {
      +		verifyData = appendMPI(verifyData, c.gy)
      +		verifyData = appendMPI(verifyData, c.gx)
      +	}
      +	verifyData = append(verifyData, xb...)
      +	verifyData = appendU32(verifyData, c.myKeyId)
      +
      +	mac := hmac.New(sha256.New, keys.m1[:])
      +	mac.Write(verifyData)
      +	mb := mac.Sum(nil)
      +
      +	xb = appendU32(xb, c.myKeyId)
      +	xb = append(xb, c.PrivateKey.Sign(c.rand(), mb)...)
      +
      +	aesCipher, err := aes.NewCipher(keys.c[:])
      +	if err != nil {
      +		panic(err.Error())
      +	}
      +	var iv [aes.BlockSize]byte
      +	ctr := cipher.NewCTR(aesCipher, iv[:])
      +	ctr.XORKeyStream(xb, xb)
      +
      +	mac = hmac.New(sha256.New, keys.m2[:])
      +	encryptedSig := appendData(nil, xb)
      +	mac.Write(encryptedSig)
      +
      +	return encryptedSig, mac.Sum(nil)
      +}
      +
      +func (c *Conversation) generateRevealSig() []byte {
      +	s := new(big.Int).Exp(c.gy, c.x, p)
      +	c.calcAKEKeys(s)
      +	c.myKeyId++
      +
      +	encryptedSig, mac := c.generateEncryptedSignature(&c.revealKeys, true /* gx comes first */)
      +
      +	c.myCurrentDHPub = c.gx
      +	c.myCurrentDHPriv = c.x
      +	c.rotateDHKeys()
      +	incCounter(&c.myCounter)
      +
      +	var ret []byte
      +	ret = appendU16(ret, 2)
      +	ret = append(ret, msgTypeRevealSig)
      +	ret = appendData(ret, c.r[:])
      +	ret = append(ret, encryptedSig...)
      +	ret = append(ret, mac[:20]...)
      +	return ret
      +}
      +
      +func (c *Conversation) processEncryptedSig(encryptedSig, theirMAC []byte, keys *akeKeys, xFirst bool) error {
      +	mac := hmac.New(sha256.New, keys.m2[:])
      +	mac.Write(appendData(nil, encryptedSig))
      +	myMAC := mac.Sum(nil)[:20]
      +
      +	if len(myMAC) != len(theirMAC) || subtle.ConstantTimeCompare(myMAC, theirMAC) == 0 {
      +		return errors.New("bad signature MAC in encrypted signature")
      +	}
      +
      +	aesCipher, err := aes.NewCipher(keys.c[:])
      +	if err != nil {
      +		panic(err.Error())
      +	}
      +	var iv [aes.BlockSize]byte
      +	ctr := cipher.NewCTR(aesCipher, iv[:])
      +	ctr.XORKeyStream(encryptedSig, encryptedSig)
      +
      +	sig := encryptedSig
      +	sig, ok1 := c.TheirPublicKey.Parse(sig)
      +	keyId, sig, ok2 := getU32(sig)
      +	if !ok1 || !ok2 {
      +		return errors.New("otr: corrupt encrypted signature")
      +	}
      +
      +	var verifyData []byte
      +	if xFirst {
      +		verifyData = appendMPI(verifyData, c.gx)
      +		verifyData = appendMPI(verifyData, c.gy)
      +	} else {
      +		verifyData = appendMPI(verifyData, c.gy)
      +		verifyData = appendMPI(verifyData, c.gx)
      +	}
      +	verifyData = c.TheirPublicKey.Serialize(verifyData)
      +	verifyData = appendU32(verifyData, keyId)
      +
      +	mac = hmac.New(sha256.New, keys.m1[:])
      +	mac.Write(verifyData)
      +	mb := mac.Sum(nil)
      +
      +	sig, ok1 = c.TheirPublicKey.Verify(mb, sig)
      +	if !ok1 {
      +		return errors.New("bad signature in encrypted signature")
      +	}
      +	if len(sig) > 0 {
      +		return errors.New("corrupt encrypted signature")
      +	}
      +
      +	c.theirKeyId = keyId
      +	zero(c.theirLastCtr[:])
      +	return nil
      +}
      +
      +func (c *Conversation) processRevealSig(in []byte) error {
      +	r, in, ok1 := getData(in)
      +	encryptedSig, in, ok2 := getData(in)
      +	theirMAC := in
      +	if !ok1 || !ok2 || len(theirMAC) != 20 {
      +		return errors.New("otr: corrupt reveal signature message")
      +	}
      +
      +	aesCipher, err := aes.NewCipher(r)
      +	if err != nil {
      +		return errors.New("otr: cannot create AES cipher from reveal signature message: " + err.Error())
      +	}
      +	var iv [aes.BlockSize]byte
      +	ctr := cipher.NewCTR(aesCipher, iv[:])
      +	ctr.XORKeyStream(c.gxBytes, c.gxBytes)
      +	h := sha256.New()
      +	h.Write(c.gxBytes)
      +	digest := h.Sum(nil)
      +	if len(digest) != len(c.digest) || subtle.ConstantTimeCompare(digest, c.digest[:]) == 0 {
      +		return errors.New("otr: bad commit MAC in reveal signature message")
      +	}
      +	var rest []byte
      +	c.gx, rest, ok1 = getMPI(c.gxBytes)
      +	if !ok1 || len(rest) > 0 {
      +		return errors.New("otr: gx corrupt after decryption")
      +	}
      +	if c.gx.Cmp(g) < 0 || c.gx.Cmp(pMinus2) > 0 {
      +		return errors.New("otr: DH value out of range")
      +	}
      +	s := new(big.Int).Exp(c.gx, c.y, p)
      +	c.calcAKEKeys(s)
      +
      +	if err := c.processEncryptedSig(encryptedSig, theirMAC, &c.revealKeys, true /* gx comes first */); err != nil {
      +		return errors.New("otr: in reveal signature message: " + err.Error())
      +	}
      +
      +	c.theirCurrentDHPub = c.gx
      +	c.theirLastDHPub = nil
      +
      +	return nil
      +}
      +
      +func (c *Conversation) generateSig() []byte {
      +	c.myKeyId++
      +
      +	encryptedSig, mac := c.generateEncryptedSignature(&c.sigKeys, false /* gy comes first */)
      +
      +	c.myCurrentDHPub = c.gy
      +	c.myCurrentDHPriv = c.y
      +	c.rotateDHKeys()
      +	incCounter(&c.myCounter)
      +
      +	var ret []byte
      +	ret = appendU16(ret, 2)
      +	ret = append(ret, msgTypeSig)
      +	ret = append(ret, encryptedSig...)
      +	ret = append(ret, mac[:macPrefixBytes]...)
      +	return ret
      +}
      +
      +func (c *Conversation) processSig(in []byte) error {
      +	encryptedSig, in, ok1 := getData(in)
      +	theirMAC := in
      +	if !ok1 || len(theirMAC) != macPrefixBytes {
      +		return errors.New("otr: corrupt signature message")
      +	}
      +
      +	if err := c.processEncryptedSig(encryptedSig, theirMAC, &c.sigKeys, false /* gy comes first */); err != nil {
      +		return errors.New("otr: in signature message: " + err.Error())
      +	}
      +
      +	c.theirCurrentDHPub = c.gy
      +	c.theirLastDHPub = nil
      +
      +	return nil
      +}
      +
      +func (c *Conversation) rotateDHKeys() {
      +	// evict slots using our retired key id
      +	for i := range c.keySlots {
      +		slot := &c.keySlots[i]
      +		if slot.used && slot.myKeyId == c.myKeyId-1 {
      +			slot.used = false
      +			c.oldMACs = append(c.oldMACs, slot.recvMACKey...)
      +		}
      +	}
      +
      +	c.myLastDHPriv = c.myCurrentDHPriv
      +	c.myLastDHPub = c.myCurrentDHPub
      +
      +	var xBytes [dhPrivateBytes]byte
      +	c.myCurrentDHPriv = c.randMPI(xBytes[:])
      +	c.myCurrentDHPub = new(big.Int).Exp(g, c.myCurrentDHPriv, p)
      +	c.myKeyId++
      +}
      +
      +func (c *Conversation) processData(in []byte) (out []byte, tlvs []tlv, err error) {
      +	origIn := in
      +	flags, in, ok1 := getU8(in)
      +	theirKeyId, in, ok2 := getU32(in)
      +	myKeyId, in, ok3 := getU32(in)
      +	y, in, ok4 := getMPI(in)
      +	counter, in, ok5 := getNBytes(in, 8)
      +	encrypted, in, ok6 := getData(in)
      +	macedData := origIn[:len(origIn)-len(in)]
      +	theirMAC, in, ok7 := getNBytes(in, macPrefixBytes)
      +	_, in, ok8 := getData(in)
      +	if !ok1 || !ok2 || !ok3 || !ok4 || !ok5 || !ok6 || !ok7 || !ok8 || len(in) > 0 {
      +		err = errors.New("otr: corrupt data message")
      +		return
      +	}
      +
      +	ignoreErrors := flags&1 != 0
      +
      +	slot, err := c.calcDataKeys(myKeyId, theirKeyId)
      +	if err != nil {
      +		if ignoreErrors {
      +			err = nil
      +		}
      +		return
      +	}
      +
      +	mac := hmac.New(sha1.New, slot.recvMACKey)
      +	mac.Write([]byte{0, 2, 3})
      +	mac.Write(macedData)
      +	myMAC := mac.Sum(nil)
      +	if len(myMAC) != len(theirMAC) || subtle.ConstantTimeCompare(myMAC, theirMAC) == 0 {
      +		if !ignoreErrors {
      +			err = errors.New("otr: bad MAC on data message")
      +		}
      +		return
      +	}
      +
      +	if bytes.Compare(counter, slot.theirLastCtr[:]) <= 0 {
      +		err = errors.New("otr: counter regressed")
      +		return
      +	}
      +	copy(slot.theirLastCtr[:], counter)
      +
      +	var iv [aes.BlockSize]byte
      +	copy(iv[:], counter)
      +	aesCipher, err := aes.NewCipher(slot.recvAESKey)
      +	if err != nil {
      +		panic(err.Error())
      +	}
      +	ctr := cipher.NewCTR(aesCipher, iv[:])
      +	ctr.XORKeyStream(encrypted, encrypted)
      +	decrypted := encrypted
      +
      +	if myKeyId == c.myKeyId {
      +		c.rotateDHKeys()
      +	}
      +	if theirKeyId == c.theirKeyId {
      +		// evict slots using their retired key id
      +		for i := range c.keySlots {
      +			slot := &c.keySlots[i]
      +			if slot.used && slot.theirKeyId == theirKeyId-1 {
      +				slot.used = false
      +				c.oldMACs = append(c.oldMACs, slot.recvMACKey...)
      +			}
      +		}
      +
      +		c.theirLastDHPub = c.theirCurrentDHPub
      +		c.theirKeyId++
      +		c.theirCurrentDHPub = y
      +	}
      +
      +	if nulPos := bytes.IndexByte(decrypted, 0); nulPos >= 0 {
      +		out = decrypted[:nulPos]
      +		tlvData := decrypted[nulPos+1:]
      +		for len(tlvData) > 0 {
      +			var t tlv
      +			var ok1, ok2, ok3 bool
      +
      +			t.typ, tlvData, ok1 = getU16(tlvData)
      +			t.length, tlvData, ok2 = getU16(tlvData)
      +			t.data, tlvData, ok3 = getNBytes(tlvData, int(t.length))
      +			if !ok1 || !ok2 || !ok3 {
      +				err = errors.New("otr: corrupt tlv data")
      +			}
      +			tlvs = append(tlvs, t)
      +		}
      +	} else {
      +		out = decrypted
      +	}
      +
      +	return
      +}
      +
      +func (c *Conversation) generateData(msg []byte, extra *tlv) []byte {
      +	slot, err := c.calcDataKeys(c.myKeyId-1, c.theirKeyId)
      +	if err != nil {
      +		panic("otr: failed to generate sending keys: " + err.Error())
      +	}
      +
      +	var plaintext []byte
      +	plaintext = append(plaintext, msg...)
      +	plaintext = append(plaintext, 0)
      +
      +	padding := paddingGranularity - ((len(plaintext) + 4) % paddingGranularity)
      +	plaintext = appendU16(plaintext, tlvTypePadding)
      +	plaintext = appendU16(plaintext, uint16(padding))
      +	for i := 0; i < padding; i++ {
      +		plaintext = append(plaintext, 0)
      +	}
      +
      +	if extra != nil {
      +		plaintext = appendU16(plaintext, extra.typ)
      +		plaintext = appendU16(plaintext, uint16(len(extra.data)))
      +		plaintext = append(plaintext, extra.data...)
      +	}
      +
      +	encrypted := make([]byte, len(plaintext))
      +
      +	var iv [aes.BlockSize]byte
      +	copy(iv[:], c.myCounter[:])
      +	aesCipher, err := aes.NewCipher(slot.sendAESKey)
      +	if err != nil {
      +		panic(err.Error())
      +	}
      +	ctr := cipher.NewCTR(aesCipher, iv[:])
      +	ctr.XORKeyStream(encrypted, plaintext)
      +
      +	var ret []byte
      +	ret = appendU16(ret, 2)
      +	ret = append(ret, msgTypeData)
      +	ret = append(ret, 0 /* flags */)
      +	ret = appendU32(ret, c.myKeyId-1)
      +	ret = appendU32(ret, c.theirKeyId)
      +	ret = appendMPI(ret, c.myCurrentDHPub)
      +	ret = append(ret, c.myCounter[:]...)
      +	ret = appendData(ret, encrypted)
      +
      +	mac := hmac.New(sha1.New, slot.sendMACKey)
      +	mac.Write(ret)
      +	ret = append(ret, mac.Sum(nil)[:macPrefixBytes]...)
      +	ret = appendData(ret, c.oldMACs)
      +	c.oldMACs = nil
      +	incCounter(&c.myCounter)
      +
      +	return ret
      +}
      +
      +func incCounter(counter *[8]byte) {
      +	for i := 7; i >= 0; i-- {
      +		counter[i]++
      +		if counter[i] > 0 {
      +			break
      +		}
      +	}
      +}
      +
      +// calcDataKeys computes the keys used to encrypt a data message given the key
      +// IDs.
      +func (c *Conversation) calcDataKeys(myKeyId, theirKeyId uint32) (slot *keySlot, err error) {
      +	// Check for a cache hit.
      +	for i := range c.keySlots {
      +		slot = &c.keySlots[i]
      +		if slot.used && slot.theirKeyId == theirKeyId && slot.myKeyId == myKeyId {
      +			return
      +		}
      +	}
      +
      +	// Find an empty slot to write into.
      +	slot = nil
      +	for i := range c.keySlots {
      +		if !c.keySlots[i].used {
      +			slot = &c.keySlots[i]
      +			break
      +		}
      +	}
      +	if slot == nil {
      +		return nil, errors.New("otr: internal error: no more key slots")
      +	}
      +
      +	var myPriv, myPub, theirPub *big.Int
      +
      +	if myKeyId == c.myKeyId {
      +		myPriv = c.myCurrentDHPriv
      +		myPub = c.myCurrentDHPub
      +	} else if myKeyId == c.myKeyId-1 {
      +		myPriv = c.myLastDHPriv
      +		myPub = c.myLastDHPub
      +	} else {
      +		err = errors.New("otr: peer requested keyid " + strconv.FormatUint(uint64(myKeyId), 10) + " when I'm on " + strconv.FormatUint(uint64(c.myKeyId), 10))
      +		return
      +	}
      +
      +	if theirKeyId == c.theirKeyId {
      +		theirPub = c.theirCurrentDHPub
      +	} else if theirKeyId == c.theirKeyId-1 && c.theirLastDHPub != nil {
      +		theirPub = c.theirLastDHPub
      +	} else {
      +		err = errors.New("otr: peer requested keyid " + strconv.FormatUint(uint64(myKeyId), 10) + " when they're on " + strconv.FormatUint(uint64(c.myKeyId), 10))
      +		return
      +	}
      +
      +	var sendPrefixByte, recvPrefixByte [1]byte
      +
      +	if myPub.Cmp(theirPub) > 0 {
      +		// we're the high end
      +		sendPrefixByte[0], recvPrefixByte[0] = 1, 2
      +	} else {
      +		// we're the low end
      +		sendPrefixByte[0], recvPrefixByte[0] = 2, 1
      +	}
      +
      +	s := new(big.Int).Exp(theirPub, myPriv, p)
      +	sBytes := appendMPI(nil, s)
      +
      +	h := sha1.New()
      +	h.Write(sendPrefixByte[:])
      +	h.Write(sBytes)
      +	slot.sendAESKey = h.Sum(slot.sendAESKey[:0])[:16]
      +
      +	h.Reset()
      +	h.Write(slot.sendAESKey)
      +	slot.sendMACKey = h.Sum(slot.sendMACKey[:0])
      +
      +	h.Reset()
      +	h.Write(recvPrefixByte[:])
      +	h.Write(sBytes)
      +	slot.recvAESKey = h.Sum(slot.recvAESKey[:0])[:16]
      +
      +	h.Reset()
      +	h.Write(slot.recvAESKey)
      +	slot.recvMACKey = h.Sum(slot.recvMACKey[:0])
      +
      +	slot.theirKeyId = theirKeyId
      +	slot.myKeyId = myKeyId
      +	slot.used = true
      +
      +	zero(slot.theirLastCtr[:])
      +	return
      +}
      +
      +func (c *Conversation) calcAKEKeys(s *big.Int) {
      +	mpi := appendMPI(nil, s)
      +	h := sha256.New()
      +
      +	var cBytes [32]byte
      +	hashWithPrefix(c.SSID[:], 0, mpi, h)
      +
      +	hashWithPrefix(cBytes[:], 1, mpi, h)
      +	copy(c.revealKeys.c[:], cBytes[:16])
      +	copy(c.sigKeys.c[:], cBytes[16:])
      +
      +	hashWithPrefix(c.revealKeys.m1[:], 2, mpi, h)
      +	hashWithPrefix(c.revealKeys.m2[:], 3, mpi, h)
      +	hashWithPrefix(c.sigKeys.m1[:], 4, mpi, h)
      +	hashWithPrefix(c.sigKeys.m2[:], 5, mpi, h)
      +}
      +
      +func hashWithPrefix(out []byte, prefix byte, in []byte, h hash.Hash) {
      +	h.Reset()
      +	var p [1]byte
      +	p[0] = prefix
      +	h.Write(p[:])
      +	h.Write(in)
      +	if len(out) == h.Size() {
      +		h.Sum(out[:0])
      +	} else {
      +		digest := h.Sum(nil)
      +		copy(out, digest)
      +	}
      +}
      +
      +func (c *Conversation) encode(msg []byte) [][]byte {
      +	b64 := make([]byte, base64.StdEncoding.EncodedLen(len(msg))+len(msgPrefix)+1)
      +	base64.StdEncoding.Encode(b64[len(msgPrefix):], msg)
      +	copy(b64, msgPrefix)
      +	b64[len(b64)-1] = '.'
      +
      +	if c.FragmentSize < minFragmentSize || len(b64) <= c.FragmentSize {
      +		// We can encode this in a single fragment.
      +		return [][]byte{b64}
      +	}
      +
      +	// We have to fragment this message.
      +	var ret [][]byte
      +	bytesPerFragment := c.FragmentSize - minFragmentSize
      +	numFragments := (len(b64) + bytesPerFragment) / bytesPerFragment
      +
      +	for i := 0; i < numFragments; i++ {
      +		frag := []byte("?OTR," + strconv.Itoa(i+1) + "," + strconv.Itoa(numFragments) + ",")
      +		todo := bytesPerFragment
      +		if todo > len(b64) {
      +			todo = len(b64)
      +		}
      +		frag = append(frag, b64[:todo]...)
      +		b64 = b64[todo:]
      +		frag = append(frag, ',')
      +		ret = append(ret, frag)
      +	}
      +
      +	return ret
      +}
      +
      +func (c *Conversation) reset() {
      +	c.myKeyId = 0
      +
      +	for i := range c.keySlots {
      +		c.keySlots[i].used = false
      +	}
      +}
      +
      +type PublicKey struct {
      +	dsa.PublicKey
      +}
      +
      +func (pk *PublicKey) Parse(in []byte) ([]byte, bool) {
      +	var ok bool
      +	var pubKeyType uint16
      +
      +	if pubKeyType, in, ok = getU16(in); !ok || pubKeyType != 0 {
      +		return nil, false
      +	}
      +	if pk.P, in, ok = getMPI(in); !ok {
      +		return nil, false
      +	}
      +	if pk.Q, in, ok = getMPI(in); !ok {
      +		return nil, false
      +	}
      +	if pk.G, in, ok = getMPI(in); !ok {
      +		return nil, false
      +	}
      +	if pk.Y, in, ok = getMPI(in); !ok {
      +		return nil, false
      +	}
      +
      +	return in, true
      +}
      +
      +func (pk *PublicKey) Serialize(in []byte) []byte {
      +	in = appendU16(in, 0)
      +	in = appendMPI(in, pk.P)
      +	in = appendMPI(in, pk.Q)
      +	in = appendMPI(in, pk.G)
      +	in = appendMPI(in, pk.Y)
      +	return in
      +}
      +
      +// Fingerprint returns the 20-byte, binary fingerprint of the PublicKey.
      +func (pk *PublicKey) Fingerprint() []byte {
      +	b := pk.Serialize(nil)
      +	h := sha1.New()
      +	h.Write(b[2:])
      +	return h.Sum(nil)
      +}
      +
      +func (pk *PublicKey) Verify(hashed, sig []byte) ([]byte, bool) {
      +	if len(sig) != 2*dsaSubgroupBytes {
      +		return nil, false
      +	}
      +	r := new(big.Int).SetBytes(sig[:dsaSubgroupBytes])
      +	s := new(big.Int).SetBytes(sig[dsaSubgroupBytes:])
      +	ok := dsa.Verify(&pk.PublicKey, hashed, r, s)
      +	return sig[dsaSubgroupBytes*2:], ok
      +}
      +
      +type PrivateKey struct {
      +	PublicKey
      +	dsa.PrivateKey
      +}
      +
      +func (priv *PrivateKey) Sign(rand io.Reader, hashed []byte) []byte {
      +	r, s, err := dsa.Sign(rand, &priv.PrivateKey, hashed)
      +	if err != nil {
      +		panic(err.Error())
      +	}
      +	rBytes := r.Bytes()
      +	sBytes := s.Bytes()
      +	if len(rBytes) > dsaSubgroupBytes || len(sBytes) > dsaSubgroupBytes {
      +		panic("DSA signature too large")
      +	}
      +
      +	out := make([]byte, 2*dsaSubgroupBytes)
      +	copy(out[dsaSubgroupBytes-len(rBytes):], rBytes)
      +	copy(out[len(out)-len(sBytes):], sBytes)
      +	return out
      +}
      +
      +func (priv *PrivateKey) Serialize(in []byte) []byte {
      +	in = priv.PublicKey.Serialize(in)
      +	in = appendMPI(in, priv.PrivateKey.X)
      +	return in
      +}
      +
      +func (priv *PrivateKey) Parse(in []byte) ([]byte, bool) {
      +	in, ok := priv.PublicKey.Parse(in)
      +	if !ok {
      +		return in, ok
      +	}
      +	priv.PrivateKey.PublicKey = priv.PublicKey.PublicKey
      +	priv.PrivateKey.X, in, ok = getMPI(in)
      +	return in, ok
      +}
      +
      +func (priv *PrivateKey) Generate(rand io.Reader) {
      +	if err := dsa.GenerateParameters(&priv.PrivateKey.PublicKey.Parameters, rand, dsa.L1024N160); err != nil {
      +		panic(err.Error())
      +	}
      +	if err := dsa.GenerateKey(&priv.PrivateKey, rand); err != nil {
      +		panic(err.Error())
      +	}
      +	priv.PublicKey.PublicKey = priv.PrivateKey.PublicKey
      +}
      +
      +func notHex(r rune) bool {
      +	if r >= '0' && r <= '9' ||
      +		r >= 'a' && r <= 'f' ||
      +		r >= 'A' && r <= 'F' {
      +		return false
      +	}
      +
      +	return true
      +}
      +
      +// Import parses the contents of a libotr private key file.
      +func (priv *PrivateKey) Import(in []byte) bool {
      +	mpiStart := []byte(" #")
      +
      +	mpis := make([]*big.Int, 5)
      +
      +	for i := 0; i < len(mpis); i++ {
      +		start := bytes.Index(in, mpiStart)
      +		if start == -1 {
      +			return false
      +		}
      +		in = in[start+len(mpiStart):]
      +		end := bytes.IndexFunc(in, notHex)
      +		if end == -1 {
      +			return false
      +		}
      +		hexBytes := in[:end]
      +		in = in[end:]
      +
      +		if len(hexBytes)&1 != 0 {
      +			return false
      +		}
      +
      +		mpiBytes := make([]byte, len(hexBytes)/2)
      +		if _, err := hex.Decode(mpiBytes, hexBytes); err != nil {
      +			return false
      +		}
      +
      +		mpis[i] = new(big.Int).SetBytes(mpiBytes)
      +	}
      +
      +	priv.PrivateKey.P = mpis[0]
      +	priv.PrivateKey.Q = mpis[1]
      +	priv.PrivateKey.G = mpis[2]
      +	priv.PrivateKey.Y = mpis[3]
      +	priv.PrivateKey.X = mpis[4]
      +	priv.PublicKey.PublicKey = priv.PrivateKey.PublicKey
      +
      +	a := new(big.Int).Exp(priv.PrivateKey.G, priv.PrivateKey.X, priv.PrivateKey.P)
      +	return a.Cmp(priv.PrivateKey.Y) == 0
      +}
      +
      +func getU8(in []byte) (uint8, []byte, bool) {
      +	if len(in) < 1 {
      +		return 0, in, false
      +	}
      +	return in[0], in[1:], true
      +}
      +
      +func getU16(in []byte) (uint16, []byte, bool) {
      +	if len(in) < 2 {
      +		return 0, in, false
      +	}
      +	r := uint16(in[0])<<8 | uint16(in[1])
      +	return r, in[2:], true
      +}
      +
      +func getU32(in []byte) (uint32, []byte, bool) {
      +	if len(in) < 4 {
      +		return 0, in, false
      +	}
      +	r := uint32(in[0])<<24 | uint32(in[1])<<16 | uint32(in[2])<<8 | uint32(in[3])
      +	return r, in[4:], true
      +}
      +
      +func getMPI(in []byte) (*big.Int, []byte, bool) {
      +	l, in, ok := getU32(in)
      +	if !ok || uint32(len(in)) < l {
      +		return nil, in, false
      +	}
      +	r := new(big.Int).SetBytes(in[:l])
      +	return r, in[l:], true
      +}
      +
      +func getData(in []byte) ([]byte, []byte, bool) {
      +	l, in, ok := getU32(in)
      +	if !ok || uint32(len(in)) < l {
      +		return nil, in, false
      +	}
      +	return in[:l], in[l:], true
      +}
      +
      +func getNBytes(in []byte, n int) ([]byte, []byte, bool) {
      +	if len(in) < n {
      +		return nil, in, false
      +	}
      +	return in[:n], in[n:], true
      +}
      +
      +func appendU16(out []byte, v uint16) []byte {
      +	out = append(out, byte(v>>8), byte(v))
      +	return out
      +}
      +
      +func appendU32(out []byte, v uint32) []byte {
      +	out = append(out, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
      +	return out
      +}
      +
      +func appendData(out, v []byte) []byte {
      +	out = appendU32(out, uint32(len(v)))
      +	out = append(out, v...)
      +	return out
      +}
      +
      +func appendMPI(out []byte, v *big.Int) []byte {
      +	vBytes := v.Bytes()
      +	out = appendU32(out, uint32(len(vBytes)))
      +	out = append(out, vBytes...)
      +	return out
      +}
      +
      +func appendMPIs(out []byte, mpis ...*big.Int) []byte {
      +	for _, mpi := range mpis {
      +		out = appendMPI(out, mpi)
      +	}
      +	return out
      +}
      +
      +func zero(b []byte) {
      +	for i := range b {
      +		b[i] = 0
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/otr/otr_test.go b/vendor/golang.org/x/crypto/otr/otr_test.go
      new file mode 100644
      index 00000000..cfcd062b
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/otr/otr_test.go
      @@ -0,0 +1,470 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package otr
      +
      +import (
      +	"bufio"
      +	"bytes"
      +	"crypto/rand"
      +	"encoding/hex"
      +	"math/big"
      +	"os"
      +	"os/exec"
      +	"testing"
      +)
      +
      +var isQueryTests = []struct {
      +	msg             string
      +	expectedVersion int
      +}{
      +	{"foo", 0},
      +	{"?OtR", 0},
      +	{"?OtR?", 0},
      +	{"?OTR?", 0},
      +	{"?OTRv?", 0},
      +	{"?OTRv1?", 0},
      +	{"?OTR?v1?", 0},
      +	{"?OTR?v?", 0},
      +	{"?OTR?v2?", 2},
      +	{"?OTRv2?", 2},
      +	{"?OTRv23?", 2},
      +	{"?OTRv23 ?", 0},
      +}
      +
      +func TestIsQuery(t *testing.T) {
      +	for i, test := range isQueryTests {
      +		version := isQuery([]byte(test.msg))
      +		if version != test.expectedVersion {
      +			t.Errorf("#%d: got %d, want %d", i, version, test.expectedVersion)
      +		}
      +	}
      +}
      +
      +var alicePrivateKeyHex = "000000000080c81c2cb2eb729b7e6fd48e975a932c638b3a9055478583afa46755683e30102447f6da2d8bec9f386bbb5da6403b0040fee8650b6ab2d7f32c55ab017ae9b6aec8c324ab5844784e9a80e194830d548fb7f09a0410df2c4d5c8bc2b3e9ad484e65412be689cf0834694e0839fb2954021521ffdffb8f5c32c14dbf2020b3ce7500000014da4591d58def96de61aea7b04a8405fe1609308d000000808ddd5cb0b9d66956e3dea5a915d9aba9d8a6e7053b74dadb2fc52f9fe4e5bcc487d2305485ed95fed026ad93f06ebb8c9e8baf693b7887132c7ffdd3b0f72f4002ff4ed56583ca7c54458f8c068ca3e8a4dfa309d1dd5d34e2a4b68e6f4338835e5e0fb4317c9e4c7e4806dafda3ef459cd563775a586dd91b1319f72621bf3f00000080b8147e74d8c45e6318c37731b8b33b984a795b3653c2cd1d65cc99efe097cb7eb2fa49569bab5aab6e8a1c261a27d0f7840a5e80b317e6683042b59b6dceca2879c6ffc877a465be690c15e4a42f9a7588e79b10faac11b1ce3741fcef7aba8ce05327a2c16d279ee1b3d77eb783fb10e3356caa25635331e26dd42b8396c4d00000001420bec691fea37ecea58a5c717142f0b804452f57"
      +
      +var aliceFingerprintHex = "0bb01c360424522e94ee9c346ce877a1a4288b2f"
      +
      +var bobPrivateKeyHex = "000000000080a5138eb3d3eb9c1d85716faecadb718f87d31aaed1157671d7fee7e488f95e8e0ba60ad449ec732710a7dec5190f7182af2e2f98312d98497221dff160fd68033dd4f3a33b7c078d0d9f66e26847e76ca7447d4bab35486045090572863d9e4454777f24d6706f63e02548dfec2d0a620af37bbc1d24f884708a212c343b480d00000014e9c58f0ea21a5e4dfd9f44b6a9f7f6a9961a8fa9000000803c4d111aebd62d3c50c2889d420a32cdf1e98b70affcc1fcf44d59cca2eb019f6b774ef88153fb9b9615441a5fe25ea2d11b74ce922ca0232bd81b3c0fcac2a95b20cb6e6c0c5c1ace2e26f65dc43c751af0edbb10d669890e8ab6beea91410b8b2187af1a8347627a06ecea7e0f772c28aae9461301e83884860c9b656c722f0000008065af8625a555ea0e008cd04743671a3cda21162e83af045725db2eb2bb52712708dc0cc1a84c08b3649b88a966974bde27d8612c2861792ec9f08786a246fcadd6d8d3a81a32287745f309238f47618c2bd7612cb8b02d940571e0f30b96420bcd462ff542901b46109b1e5ad6423744448d20a57818a8cbb1647d0fea3b664e0000001440f9f2eb554cb00d45a5826b54bfa419b6980e48"
      +
      +func TestKeySerialization(t *testing.T) {
      +	var priv PrivateKey
      +	alicePrivateKey, _ := hex.DecodeString(alicePrivateKeyHex)
      +	rest, ok := priv.Parse(alicePrivateKey)
      +	if !ok {
      +		t.Error("failed to parse private key")
      +	}
      +	if len(rest) > 0 {
      +		t.Error("data remaining after parsing private key")
      +	}
      +
      +	out := priv.Serialize(nil)
      +	if !bytes.Equal(alicePrivateKey, out) {
      +		t.Errorf("serialization (%x) is not equal to original (%x)", out, alicePrivateKey)
      +	}
      +
      +	aliceFingerprint, _ := hex.DecodeString(aliceFingerprintHex)
      +	fingerprint := priv.PublicKey.Fingerprint()
      +	if !bytes.Equal(aliceFingerprint, fingerprint) {
      +		t.Errorf("fingerprint (%x) is not equal to expected value (%x)", fingerprint, aliceFingerprint)
      +	}
      +}
      +
      +const libOTRPrivateKey = `(privkeys
      + (account
      +(name "foo@example.com")
      +(protocol prpl-jabber)
      +(private-key 
      + (dsa 
      +  (p #00FC07ABCF0DC916AFF6E9AE47BEF60C7AB9B4D6B2469E436630E36F8A489BE812486A09F30B71224508654940A835301ACC525A4FF133FC152CC53DCC59D65C30A54F1993FE13FE63E5823D4C746DB21B90F9B9C00B49EC7404AB1D929BA7FBA12F2E45C6E0A651689750E8528AB8C031D3561FECEE72EBB4A090D450A9B7A857#)
      +  (q #00997BD266EF7B1F60A5C23F3A741F2AEFD07A2081#)
      +  (g #535E360E8A95EBA46A4F7DE50AD6E9B2A6DB785A66B64EB9F20338D2A3E8FB0E94725848F1AA6CC567CB83A1CC517EC806F2E92EAE71457E80B2210A189B91250779434B41FC8A8873F6DB94BEA7D177F5D59E7E114EE10A49CFD9CEF88AE43387023B672927BA74B04EB6BBB5E57597766A2F9CE3857D7ACE3E1E3BC1FC6F26#)
      +  (y #0AC8670AD767D7A8D9D14CC1AC6744CD7D76F993B77FFD9E39DF01E5A6536EF65E775FCEF2A983E2A19BD6415500F6979715D9FD1257E1FE2B6F5E1E74B333079E7C880D39868462A93454B41877BE62E5EF0A041C2EE9C9E76BD1E12AE25D9628DECB097025DD625EF49C3258A1A3C0FF501E3DC673B76D7BABF349009B6ECF#)
      +  (x #14D0345A3562C480A039E3C72764F72D79043216#)
      +  )
      + )
      + )
      +)`
      +
      +func TestParseLibOTRPrivateKey(t *testing.T) {
      +	var priv PrivateKey
      +
      +	if !priv.Import([]byte(libOTRPrivateKey)) {
      +		t.Fatalf("Failed to import sample private key")
      +	}
      +}
      +
      +func TestSignVerify(t *testing.T) {
      +	var priv PrivateKey
      +	alicePrivateKey, _ := hex.DecodeString(alicePrivateKeyHex)
      +	_, ok := priv.Parse(alicePrivateKey)
      +	if !ok {
      +		t.Error("failed to parse private key")
      +	}
      +
      +	var msg [32]byte
      +	rand.Reader.Read(msg[:])
      +
      +	sig := priv.Sign(rand.Reader, msg[:])
      +	rest, ok := priv.PublicKey.Verify(msg[:], sig)
      +	if !ok {
      +		t.Errorf("signature (%x) of %x failed to verify", sig, msg[:])
      +	} else if len(rest) > 0 {
      +		t.Error("signature data remains after verification")
      +	}
      +
      +	sig[10] ^= 80
      +	_, ok = priv.PublicKey.Verify(msg[:], sig)
      +	if ok {
      +		t.Errorf("corrupted signature (%x) of %x verified", sig, msg[:])
      +	}
      +}
      +
      +func setupConversation(t *testing.T) (alice, bob *Conversation) {
      +	alicePrivateKey, _ := hex.DecodeString(alicePrivateKeyHex)
      +	bobPrivateKey, _ := hex.DecodeString(bobPrivateKeyHex)
      +
      +	alice, bob = new(Conversation), new(Conversation)
      +
      +	alice.PrivateKey = new(PrivateKey)
      +	bob.PrivateKey = new(PrivateKey)
      +	alice.PrivateKey.Parse(alicePrivateKey)
      +	bob.PrivateKey.Parse(bobPrivateKey)
      +	alice.FragmentSize = 100
      +	bob.FragmentSize = 100
      +
      +	if alice.IsEncrypted() {
      +		t.Error("Alice believes that the conversation is secure before we've started")
      +	}
      +	if bob.IsEncrypted() {
      +		t.Error("Bob believes that the conversation is secure before we've started")
      +	}
      +
      +	performHandshake(t, alice, bob)
      +	return alice, bob
      +}
      +
      +func performHandshake(t *testing.T, alice, bob *Conversation) {
      +	var alicesMessage, bobsMessage [][]byte
      +	var out []byte
      +	var aliceChange, bobChange SecurityChange
      +	var err error
      +	alicesMessage = append(alicesMessage, []byte(QueryMessage))
      +
      +	for round := 0; len(alicesMessage) > 0 || len(bobsMessage) > 0; round++ {
      +		bobsMessage = nil
      +		for i, msg := range alicesMessage {
      +			out, _, bobChange, bobsMessage, err = bob.Receive(msg)
      +			if len(out) > 0 {
      +				t.Errorf("Bob generated output during key exchange, round %d, message %d", round, i)
      +			}
      +			if err != nil {
      +				t.Fatalf("Bob returned an error, round %d, message %d (%x): %s", round, i, msg, err)
      +			}
      +			if len(bobsMessage) > 0 && i != len(alicesMessage)-1 {
      +				t.Errorf("Bob produced output while processing a fragment, round %d, message %d", round, i)
      +			}
      +		}
      +
      +		alicesMessage = nil
      +		for i, msg := range bobsMessage {
      +			out, _, aliceChange, alicesMessage, err = alice.Receive(msg)
      +			if len(out) > 0 {
      +				t.Errorf("Alice generated output during key exchange, round %d, message %d", round, i)
      +			}
      +			if err != nil {
      +				t.Fatalf("Alice returned an error, round %d, message %d (%x): %s", round, i, msg, err)
      +			}
      +			if len(alicesMessage) > 0 && i != len(bobsMessage)-1 {
      +				t.Errorf("Alice produced output while processing a fragment, round %d, message %d", round, i)
      +			}
      +		}
      +	}
      +
      +	if aliceChange != NewKeys {
      +		t.Errorf("Alice terminated without signaling new keys")
      +	}
      +	if bobChange != NewKeys {
      +		t.Errorf("Bob terminated without signaling new keys")
      +	}
      +
      +	if !bytes.Equal(alice.SSID[:], bob.SSID[:]) {
      +		t.Errorf("Session identifiers don't match. Alice has %x, Bob has %x", alice.SSID[:], bob.SSID[:])
      +	}
      +
      +	if !alice.IsEncrypted() {
      +		t.Error("Alice doesn't believe that the conversation is secure")
      +	}
      +	if !bob.IsEncrypted() {
      +		t.Error("Bob doesn't believe that the conversation is secure")
      +	}
      +}
      +
      +const (
      +	firstRoundTrip = iota
      +	subsequentRoundTrip
      +	noMACKeyCheck
      +)
      +
      +func roundTrip(t *testing.T, alice, bob *Conversation, message []byte, macKeyCheck int) {
      +	alicesMessage, err := alice.Send(message)
      +	if err != nil {
      +		t.Errorf("Error from Alice sending message: %s", err)
      +	}
      +
      +	if len(alice.oldMACs) != 0 {
      +		t.Errorf("Alice has not revealed all MAC keys")
      +	}
      +
      +	for i, msg := range alicesMessage {
      +		out, encrypted, _, _, err := bob.Receive(msg)
      +
      +		if err != nil {
      +			t.Errorf("Error generated while processing test message: %s", err.Error())
      +		}
      +		if len(out) > 0 {
      +			if i != len(alicesMessage)-1 {
      +				t.Fatal("Bob produced a message while processing a fragment of Alice's")
      +			}
      +			if !encrypted {
      +				t.Errorf("Message was not marked as encrypted")
      +			}
      +			if !bytes.Equal(out, message) {
      +				t.Errorf("Message corrupted: got %x, want %x", out, message)
      +			}
      +		}
      +	}
      +
      +	switch macKeyCheck {
      +	case firstRoundTrip:
      +		if len(bob.oldMACs) != 0 {
      +			t.Errorf("Bob should not have MAC keys to reveal")
      +		}
      +	case subsequentRoundTrip:
      +		if len(bob.oldMACs) != 40 {
      +			t.Errorf("Bob has %d bytes of MAC keys to reveal, but should have 40", len(bob.oldMACs))
      +		}
      +	}
      +
      +	bobsMessage, err := bob.Send(message)
      +	if err != nil {
      +		t.Errorf("Error from Bob sending message: %s", err)
      +	}
      +
      +	if len(bob.oldMACs) != 0 {
      +		t.Errorf("Bob has not revealed all MAC keys")
      +	}
      +
      +	for i, msg := range bobsMessage {
      +		out, encrypted, _, _, err := alice.Receive(msg)
      +
      +		if err != nil {
      +			t.Errorf("Error generated while processing test message: %s", err.Error())
      +		}
      +		if len(out) > 0 {
      +			if i != len(bobsMessage)-1 {
      +				t.Fatal("Alice produced a message while processing a fragment of Bob's")
      +			}
      +			if !encrypted {
      +				t.Errorf("Message was not marked as encrypted")
      +			}
      +			if !bytes.Equal(out, message) {
      +				t.Errorf("Message corrupted: got %x, want %x", out, message)
      +			}
      +		}
      +	}
      +
      +	switch macKeyCheck {
      +	case firstRoundTrip:
      +		if len(alice.oldMACs) != 20 {
      +			t.Errorf("Alice has %d bytes of MAC keys to reveal, but should have 20", len(alice.oldMACs))
      +		}
      +	case subsequentRoundTrip:
      +		if len(alice.oldMACs) != 40 {
      +			t.Errorf("Alice has %d bytes of MAC keys to reveal, but should have 40", len(alice.oldMACs))
      +		}
      +	}
      +}
      +
      +func TestConversation(t *testing.T) {
      +	alice, bob := setupConversation(t)
      +
      +	var testMessages = [][]byte{
      +		[]byte("hello"), []byte("bye"),
      +	}
      +
      +	roundTripType := firstRoundTrip
      +
      +	for _, testMessage := range testMessages {
      +		roundTrip(t, alice, bob, testMessage, roundTripType)
      +		roundTripType = subsequentRoundTrip
      +	}
      +}
      +
      +func TestGoodSMP(t *testing.T) {
      +	var alice, bob Conversation
      +
      +	alice.smp.secret = new(big.Int).SetInt64(42)
      +	bob.smp.secret = alice.smp.secret
      +
      +	var alicesMessages, bobsMessages []tlv
      +	var aliceComplete, bobComplete bool
      +	var err error
      +	var out tlv
      +
      +	alicesMessages = alice.startSMP("")
      +	for round := 0; len(alicesMessages) > 0 || len(bobsMessages) > 0; round++ {
      +		bobsMessages = bobsMessages[:0]
      +		for i, msg := range alicesMessages {
      +			out, bobComplete, err = bob.processSMP(msg)
      +			if err != nil {
      +				t.Errorf("Error from Bob in round %d: %s", round, err)
      +			}
      +			if bobComplete && i != len(alicesMessages)-1 {
      +				t.Errorf("Bob returned a completed signal before processing all of Alice's messages in round %d", round)
      +			}
      +			if out.typ != 0 {
      +				bobsMessages = append(bobsMessages, out)
      +			}
      +		}
      +
      +		alicesMessages = alicesMessages[:0]
      +		for i, msg := range bobsMessages {
      +			out, aliceComplete, err = alice.processSMP(msg)
      +			if err != nil {
      +				t.Errorf("Error from Alice in round %d: %s", round, err)
      +			}
      +			if aliceComplete && i != len(bobsMessages)-1 {
      +				t.Errorf("Alice returned a completed signal before processing all of Bob's messages in round %d", round)
      +			}
      +			if out.typ != 0 {
      +				alicesMessages = append(alicesMessages, out)
      +			}
      +		}
      +	}
      +
      +	if !aliceComplete || !bobComplete {
      +		t.Errorf("SMP completed without both sides reporting success: alice: %v, bob: %v\n", aliceComplete, bobComplete)
      +	}
      +}
      +
      +func TestBadSMP(t *testing.T) {
      +	var alice, bob Conversation
      +
      +	alice.smp.secret = new(big.Int).SetInt64(42)
      +	bob.smp.secret = new(big.Int).SetInt64(43)
      +
      +	var alicesMessages, bobsMessages []tlv
      +
      +	alicesMessages = alice.startSMP("")
      +	for round := 0; len(alicesMessages) > 0 || len(bobsMessages) > 0; round++ {
      +		bobsMessages = bobsMessages[:0]
      +		for _, msg := range alicesMessages {
      +			out, complete, _ := bob.processSMP(msg)
      +			if complete {
      +				t.Errorf("Bob signaled completion in round %d", round)
      +			}
      +			if out.typ != 0 {
      +				bobsMessages = append(bobsMessages, out)
      +			}
      +		}
      +
      +		alicesMessages = alicesMessages[:0]
      +		for _, msg := range bobsMessages {
      +			out, complete, _ := alice.processSMP(msg)
      +			if complete {
      +				t.Errorf("Alice signaled completion in round %d", round)
      +			}
      +			if out.typ != 0 {
      +				alicesMessages = append(alicesMessages, out)
      +			}
      +		}
      +	}
      +}
      +
      +func TestRehandshaking(t *testing.T) {
      +	alice, bob := setupConversation(t)
      +	roundTrip(t, alice, bob, []byte("test"), firstRoundTrip)
      +	roundTrip(t, alice, bob, []byte("test 2"), subsequentRoundTrip)
      +	roundTrip(t, alice, bob, []byte("test 3"), subsequentRoundTrip)
      +	roundTrip(t, alice, bob, []byte("test 4"), subsequentRoundTrip)
      +	roundTrip(t, alice, bob, []byte("test 5"), subsequentRoundTrip)
      +	roundTrip(t, alice, bob, []byte("test 6"), subsequentRoundTrip)
      +	roundTrip(t, alice, bob, []byte("test 7"), subsequentRoundTrip)
      +	roundTrip(t, alice, bob, []byte("test 8"), subsequentRoundTrip)
      +	performHandshake(t, alice, bob)
      +	roundTrip(t, alice, bob, []byte("test"), noMACKeyCheck)
      +	roundTrip(t, alice, bob, []byte("test 2"), noMACKeyCheck)
      +}
      +
      +func TestAgainstLibOTR(t *testing.T) {
      +	// This test requires otr.c.test to be built as /tmp/a.out.
      +	// If enabled, this tests runs forever performing OTR handshakes in a
      +	// loop.
      +	return
      +
      +	alicePrivateKey, _ := hex.DecodeString(alicePrivateKeyHex)
      +	var alice Conversation
      +	alice.PrivateKey = new(PrivateKey)
      +	alice.PrivateKey.Parse(alicePrivateKey)
      +
      +	cmd := exec.Command("/tmp/a.out")
      +	cmd.Stderr = os.Stderr
      +
      +	out, err := cmd.StdinPipe()
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	defer out.Close()
      +	stdout, err := cmd.StdoutPipe()
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	in := bufio.NewReader(stdout)
      +
      +	if err := cmd.Start(); err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	out.Write([]byte(QueryMessage))
      +	out.Write([]byte("\n"))
      +	var expectedText = []byte("test message")
      +
      +	for {
      +		line, isPrefix, err := in.ReadLine()
      +		if isPrefix {
      +			t.Fatal("line from subprocess too long")
      +		}
      +		if err != nil {
      +			t.Fatal(err)
      +		}
      +		text, encrypted, change, alicesMessage, err := alice.Receive(line)
      +		if err != nil {
      +			t.Fatal(err)
      +		}
      +		for _, msg := range alicesMessage {
      +			out.Write(msg)
      +			out.Write([]byte("\n"))
      +		}
      +		if change == NewKeys {
      +			alicesMessage, err := alice.Send([]byte("Go -> libotr test message"))
      +			if err != nil {
      +				t.Fatalf("error sending message: %s", err.Error())
      +			} else {
      +				for _, msg := range alicesMessage {
      +					out.Write(msg)
      +					out.Write([]byte("\n"))
      +				}
      +			}
      +		}
      +		if len(text) > 0 {
      +			if !bytes.Equal(text, expectedText) {
      +				t.Fatalf("expected %x, but got %x", expectedText, text)
      +			}
      +			if !encrypted {
      +				t.Fatal("message wasn't encrypted")
      +			}
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/otr/smp.go b/vendor/golang.org/x/crypto/otr/smp.go
      new file mode 100644
      index 00000000..dc6de4ee
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/otr/smp.go
      @@ -0,0 +1,572 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// This file implements the Socialist Millionaires Protocol as described in
      +// http://www.cypherpunks.ca/otr/Protocol-v2-3.1.0.html. The protocol
      +// specification is required in order to understand this code and, where
      +// possible, the variable names in the code match up with the spec.
      +
      +package otr
      +
      +import (
      +	"bytes"
      +	"crypto/sha256"
      +	"errors"
      +	"hash"
      +	"math/big"
      +)
      +
      +type smpFailure string
      +
      +func (s smpFailure) Error() string {
      +	return string(s)
      +}
      +
      +var smpFailureError = smpFailure("otr: SMP protocol failed")
      +var smpSecretMissingError = smpFailure("otr: mutual secret needed")
      +
      +const smpVersion = 1
      +
      +const (
      +	smpState1 = iota
      +	smpState2
      +	smpState3
      +	smpState4
      +)
      +
      +type smpState struct {
      +	state                  int
      +	a2, a3, b2, b3, pb, qb *big.Int
      +	g2a, g3a               *big.Int
      +	g2, g3                 *big.Int
      +	g3b, papb, qaqb, ra    *big.Int
      +	saved                  *tlv
      +	secret                 *big.Int
      +	question               string
      +}
      +
      +func (c *Conversation) startSMP(question string) (tlvs []tlv) {
      +	if c.smp.state != smpState1 {
      +		tlvs = append(tlvs, c.generateSMPAbort())
      +	}
      +	tlvs = append(tlvs, c.generateSMP1(question))
      +	c.smp.question = ""
      +	c.smp.state = smpState2
      +	return
      +}
      +
      +func (c *Conversation) resetSMP() {
      +	c.smp.state = smpState1
      +	c.smp.secret = nil
      +	c.smp.question = ""
      +}
      +
      +func (c *Conversation) processSMP(in tlv) (out tlv, complete bool, err error) {
      +	data := in.data
      +
      +	switch in.typ {
      +	case tlvTypeSMPAbort:
      +		if c.smp.state != smpState1 {
      +			err = smpFailureError
      +		}
      +		c.resetSMP()
      +		return
      +	case tlvTypeSMP1WithQuestion:
      +		// We preprocess this into a SMP1 message.
      +		nulPos := bytes.IndexByte(data, 0)
      +		if nulPos == -1 {
      +			err = errors.New("otr: SMP message with question didn't contain a NUL byte")
      +			return
      +		}
      +		c.smp.question = string(data[:nulPos])
      +		data = data[nulPos+1:]
      +	}
      +
      +	numMPIs, data, ok := getU32(data)
      +	if !ok || numMPIs > 20 {
      +		err = errors.New("otr: corrupt SMP message")
      +		return
      +	}
      +
      +	mpis := make([]*big.Int, numMPIs)
      +	for i := range mpis {
      +		var ok bool
      +		mpis[i], data, ok = getMPI(data)
      +		if !ok {
      +			err = errors.New("otr: corrupt SMP message")
      +			return
      +		}
      +	}
      +
      +	switch in.typ {
      +	case tlvTypeSMP1, tlvTypeSMP1WithQuestion:
      +		if c.smp.state != smpState1 {
      +			c.resetSMP()
      +			out = c.generateSMPAbort()
      +			return
      +		}
      +		if c.smp.secret == nil {
      +			err = smpSecretMissingError
      +			return
      +		}
      +		if err = c.processSMP1(mpis); err != nil {
      +			return
      +		}
      +		c.smp.state = smpState3
      +		out = c.generateSMP2()
      +	case tlvTypeSMP2:
      +		if c.smp.state != smpState2 {
      +			c.resetSMP()
      +			out = c.generateSMPAbort()
      +			return
      +		}
      +		if out, err = c.processSMP2(mpis); err != nil {
      +			out = c.generateSMPAbort()
      +			return
      +		}
      +		c.smp.state = smpState4
      +	case tlvTypeSMP3:
      +		if c.smp.state != smpState3 {
      +			c.resetSMP()
      +			out = c.generateSMPAbort()
      +			return
      +		}
      +		if out, err = c.processSMP3(mpis); err != nil {
      +			return
      +		}
      +		c.smp.state = smpState1
      +		c.smp.secret = nil
      +		complete = true
      +	case tlvTypeSMP4:
      +		if c.smp.state != smpState4 {
      +			c.resetSMP()
      +			out = c.generateSMPAbort()
      +			return
      +		}
      +		if err = c.processSMP4(mpis); err != nil {
      +			out = c.generateSMPAbort()
      +			return
      +		}
      +		c.smp.state = smpState1
      +		c.smp.secret = nil
      +		complete = true
      +	default:
      +		panic("unknown SMP message")
      +	}
      +
      +	return
      +}
      +
      +func (c *Conversation) calcSMPSecret(mutualSecret []byte, weStarted bool) {
      +	h := sha256.New()
      +	h.Write([]byte{smpVersion})
      +	if weStarted {
      +		h.Write(c.PrivateKey.PublicKey.Fingerprint())
      +		h.Write(c.TheirPublicKey.Fingerprint())
      +	} else {
      +		h.Write(c.TheirPublicKey.Fingerprint())
      +		h.Write(c.PrivateKey.PublicKey.Fingerprint())
      +	}
      +	h.Write(c.SSID[:])
      +	h.Write(mutualSecret)
      +	c.smp.secret = new(big.Int).SetBytes(h.Sum(nil))
      +}
      +
      +func (c *Conversation) generateSMP1(question string) tlv {
      +	var randBuf [16]byte
      +	c.smp.a2 = c.randMPI(randBuf[:])
      +	c.smp.a3 = c.randMPI(randBuf[:])
      +	g2a := new(big.Int).Exp(g, c.smp.a2, p)
      +	g3a := new(big.Int).Exp(g, c.smp.a3, p)
      +	h := sha256.New()
      +
      +	r2 := c.randMPI(randBuf[:])
      +	r := new(big.Int).Exp(g, r2, p)
      +	c2 := new(big.Int).SetBytes(hashMPIs(h, 1, r))
      +	d2 := new(big.Int).Mul(c.smp.a2, c2)
      +	d2.Sub(r2, d2)
      +	d2.Mod(d2, q)
      +	if d2.Sign() < 0 {
      +		d2.Add(d2, q)
      +	}
      +
      +	r3 := c.randMPI(randBuf[:])
      +	r.Exp(g, r3, p)
      +	c3 := new(big.Int).SetBytes(hashMPIs(h, 2, r))
      +	d3 := new(big.Int).Mul(c.smp.a3, c3)
      +	d3.Sub(r3, d3)
      +	d3.Mod(d3, q)
      +	if d3.Sign() < 0 {
      +		d3.Add(d3, q)
      +	}
      +
      +	var ret tlv
      +	if len(question) > 0 {
      +		ret.typ = tlvTypeSMP1WithQuestion
      +		ret.data = append(ret.data, question...)
      +		ret.data = append(ret.data, 0)
      +	} else {
      +		ret.typ = tlvTypeSMP1
      +	}
      +	ret.data = appendU32(ret.data, 6)
      +	ret.data = appendMPIs(ret.data, g2a, c2, d2, g3a, c3, d3)
      +	return ret
      +}
      +
      +func (c *Conversation) processSMP1(mpis []*big.Int) error {
      +	if len(mpis) != 6 {
      +		return errors.New("otr: incorrect number of arguments in SMP1 message")
      +	}
      +	g2a := mpis[0]
      +	c2 := mpis[1]
      +	d2 := mpis[2]
      +	g3a := mpis[3]
      +	c3 := mpis[4]
      +	d3 := mpis[5]
      +	h := sha256.New()
      +
      +	r := new(big.Int).Exp(g, d2, p)
      +	s := new(big.Int).Exp(g2a, c2, p)
      +	r.Mul(r, s)
      +	r.Mod(r, p)
      +	t := new(big.Int).SetBytes(hashMPIs(h, 1, r))
      +	if c2.Cmp(t) != 0 {
      +		return errors.New("otr: ZKP c2 incorrect in SMP1 message")
      +	}
      +	r.Exp(g, d3, p)
      +	s.Exp(g3a, c3, p)
      +	r.Mul(r, s)
      +	r.Mod(r, p)
      +	t.SetBytes(hashMPIs(h, 2, r))
      +	if c3.Cmp(t) != 0 {
      +		return errors.New("otr: ZKP c3 incorrect in SMP1 message")
      +	}
      +
      +	c.smp.g2a = g2a
      +	c.smp.g3a = g3a
      +	return nil
      +}
      +
      +func (c *Conversation) generateSMP2() tlv {
      +	var randBuf [16]byte
      +	b2 := c.randMPI(randBuf[:])
      +	c.smp.b3 = c.randMPI(randBuf[:])
      +	r2 := c.randMPI(randBuf[:])
      +	r3 := c.randMPI(randBuf[:])
      +	r4 := c.randMPI(randBuf[:])
      +	r5 := c.randMPI(randBuf[:])
      +	r6 := c.randMPI(randBuf[:])
      +
      +	g2b := new(big.Int).Exp(g, b2, p)
      +	g3b := new(big.Int).Exp(g, c.smp.b3, p)
      +
      +	r := new(big.Int).Exp(g, r2, p)
      +	h := sha256.New()
      +	c2 := new(big.Int).SetBytes(hashMPIs(h, 3, r))
      +	d2 := new(big.Int).Mul(b2, c2)
      +	d2.Sub(r2, d2)
      +	d2.Mod(d2, q)
      +	if d2.Sign() < 0 {
      +		d2.Add(d2, q)
      +	}
      +
      +	r.Exp(g, r3, p)
      +	c3 := new(big.Int).SetBytes(hashMPIs(h, 4, r))
      +	d3 := new(big.Int).Mul(c.smp.b3, c3)
      +	d3.Sub(r3, d3)
      +	d3.Mod(d3, q)
      +	if d3.Sign() < 0 {
      +		d3.Add(d3, q)
      +	}
      +
      +	c.smp.g2 = new(big.Int).Exp(c.smp.g2a, b2, p)
      +	c.smp.g3 = new(big.Int).Exp(c.smp.g3a, c.smp.b3, p)
      +	c.smp.pb = new(big.Int).Exp(c.smp.g3, r4, p)
      +	c.smp.qb = new(big.Int).Exp(g, r4, p)
      +	r.Exp(c.smp.g2, c.smp.secret, p)
      +	c.smp.qb.Mul(c.smp.qb, r)
      +	c.smp.qb.Mod(c.smp.qb, p)
      +
      +	s := new(big.Int)
      +	s.Exp(c.smp.g2, r6, p)
      +	r.Exp(g, r5, p)
      +	s.Mul(r, s)
      +	s.Mod(s, p)
      +	r.Exp(c.smp.g3, r5, p)
      +	cp := new(big.Int).SetBytes(hashMPIs(h, 5, r, s))
      +
      +	// D5 = r5 - r4 cP mod q and D6 = r6 - y cP mod q
      +
      +	s.Mul(r4, cp)
      +	r.Sub(r5, s)
      +	d5 := new(big.Int).Mod(r, q)
      +	if d5.Sign() < 0 {
      +		d5.Add(d5, q)
      +	}
      +
      +	s.Mul(c.smp.secret, cp)
      +	r.Sub(r6, s)
      +	d6 := new(big.Int).Mod(r, q)
      +	if d6.Sign() < 0 {
      +		d6.Add(d6, q)
      +	}
      +
      +	var ret tlv
      +	ret.typ = tlvTypeSMP2
      +	ret.data = appendU32(ret.data, 11)
      +	ret.data = appendMPIs(ret.data, g2b, c2, d2, g3b, c3, d3, c.smp.pb, c.smp.qb, cp, d5, d6)
      +	return ret
      +}
      +
      +func (c *Conversation) processSMP2(mpis []*big.Int) (out tlv, err error) {
      +	if len(mpis) != 11 {
      +		err = errors.New("otr: incorrect number of arguments in SMP2 message")
      +		return
      +	}
      +	g2b := mpis[0]
      +	c2 := mpis[1]
      +	d2 := mpis[2]
      +	g3b := mpis[3]
      +	c3 := mpis[4]
      +	d3 := mpis[5]
      +	pb := mpis[6]
      +	qb := mpis[7]
      +	cp := mpis[8]
      +	d5 := mpis[9]
      +	d6 := mpis[10]
      +	h := sha256.New()
      +
      +	r := new(big.Int).Exp(g, d2, p)
      +	s := new(big.Int).Exp(g2b, c2, p)
      +	r.Mul(r, s)
      +	r.Mod(r, p)
      +	s.SetBytes(hashMPIs(h, 3, r))
      +	if c2.Cmp(s) != 0 {
      +		err = errors.New("otr: ZKP c2 failed in SMP2 message")
      +		return
      +	}
      +
      +	r.Exp(g, d3, p)
      +	s.Exp(g3b, c3, p)
      +	r.Mul(r, s)
      +	r.Mod(r, p)
      +	s.SetBytes(hashMPIs(h, 4, r))
      +	if c3.Cmp(s) != 0 {
      +		err = errors.New("otr: ZKP c3 failed in SMP2 message")
      +		return
      +	}
      +
      +	c.smp.g2 = new(big.Int).Exp(g2b, c.smp.a2, p)
      +	c.smp.g3 = new(big.Int).Exp(g3b, c.smp.a3, p)
      +
      +	r.Exp(g, d5, p)
      +	s.Exp(c.smp.g2, d6, p)
      +	r.Mul(r, s)
      +	s.Exp(qb, cp, p)
      +	r.Mul(r, s)
      +	r.Mod(r, p)
      +
      +	s.Exp(c.smp.g3, d5, p)
      +	t := new(big.Int).Exp(pb, cp, p)
      +	s.Mul(s, t)
      +	s.Mod(s, p)
      +	t.SetBytes(hashMPIs(h, 5, s, r))
      +	if cp.Cmp(t) != 0 {
      +		err = errors.New("otr: ZKP cP failed in SMP2 message")
      +		return
      +	}
      +
      +	var randBuf [16]byte
      +	r4 := c.randMPI(randBuf[:])
      +	r5 := c.randMPI(randBuf[:])
      +	r6 := c.randMPI(randBuf[:])
      +	r7 := c.randMPI(randBuf[:])
      +
      +	pa := new(big.Int).Exp(c.smp.g3, r4, p)
      +	r.Exp(c.smp.g2, c.smp.secret, p)
      +	qa := new(big.Int).Exp(g, r4, p)
      +	qa.Mul(qa, r)
      +	qa.Mod(qa, p)
      +
      +	r.Exp(g, r5, p)
      +	s.Exp(c.smp.g2, r6, p)
      +	r.Mul(r, s)
      +	r.Mod(r, p)
      +
      +	s.Exp(c.smp.g3, r5, p)
      +	cp.SetBytes(hashMPIs(h, 6, s, r))
      +
      +	r.Mul(r4, cp)
      +	d5 = new(big.Int).Sub(r5, r)
      +	d5.Mod(d5, q)
      +	if d5.Sign() < 0 {
      +		d5.Add(d5, q)
      +	}
      +
      +	r.Mul(c.smp.secret, cp)
      +	d6 = new(big.Int).Sub(r6, r)
      +	d6.Mod(d6, q)
      +	if d6.Sign() < 0 {
      +		d6.Add(d6, q)
      +	}
      +
      +	r.ModInverse(qb, p)
      +	qaqb := new(big.Int).Mul(qa, r)
      +	qaqb.Mod(qaqb, p)
      +
      +	ra := new(big.Int).Exp(qaqb, c.smp.a3, p)
      +	r.Exp(qaqb, r7, p)
      +	s.Exp(g, r7, p)
      +	cr := new(big.Int).SetBytes(hashMPIs(h, 7, s, r))
      +
      +	r.Mul(c.smp.a3, cr)
      +	d7 := new(big.Int).Sub(r7, r)
      +	d7.Mod(d7, q)
      +	if d7.Sign() < 0 {
      +		d7.Add(d7, q)
      +	}
      +
      +	c.smp.g3b = g3b
      +	c.smp.qaqb = qaqb
      +
      +	r.ModInverse(pb, p)
      +	c.smp.papb = new(big.Int).Mul(pa, r)
      +	c.smp.papb.Mod(c.smp.papb, p)
      +	c.smp.ra = ra
      +
      +	out.typ = tlvTypeSMP3
      +	out.data = appendU32(out.data, 8)
      +	out.data = appendMPIs(out.data, pa, qa, cp, d5, d6, ra, cr, d7)
      +	return
      +}
      +
      +func (c *Conversation) processSMP3(mpis []*big.Int) (out tlv, err error) {
      +	if len(mpis) != 8 {
      +		err = errors.New("otr: incorrect number of arguments in SMP3 message")
      +		return
      +	}
      +	pa := mpis[0]
      +	qa := mpis[1]
      +	cp := mpis[2]
      +	d5 := mpis[3]
      +	d6 := mpis[4]
      +	ra := mpis[5]
      +	cr := mpis[6]
      +	d7 := mpis[7]
      +	h := sha256.New()
      +
      +	r := new(big.Int).Exp(g, d5, p)
      +	s := new(big.Int).Exp(c.smp.g2, d6, p)
      +	r.Mul(r, s)
      +	s.Exp(qa, cp, p)
      +	r.Mul(r, s)
      +	r.Mod(r, p)
      +
      +	s.Exp(c.smp.g3, d5, p)
      +	t := new(big.Int).Exp(pa, cp, p)
      +	s.Mul(s, t)
      +	s.Mod(s, p)
      +	t.SetBytes(hashMPIs(h, 6, s, r))
      +	if t.Cmp(cp) != 0 {
      +		err = errors.New("otr: ZKP cP failed in SMP3 message")
      +		return
      +	}
      +
      +	r.ModInverse(c.smp.qb, p)
      +	qaqb := new(big.Int).Mul(qa, r)
      +	qaqb.Mod(qaqb, p)
      +
      +	r.Exp(qaqb, d7, p)
      +	s.Exp(ra, cr, p)
      +	r.Mul(r, s)
      +	r.Mod(r, p)
      +
      +	s.Exp(g, d7, p)
      +	t.Exp(c.smp.g3a, cr, p)
      +	s.Mul(s, t)
      +	s.Mod(s, p)
      +	t.SetBytes(hashMPIs(h, 7, s, r))
      +	if t.Cmp(cr) != 0 {
      +		err = errors.New("otr: ZKP cR failed in SMP3 message")
      +		return
      +	}
      +
      +	var randBuf [16]byte
      +	r7 := c.randMPI(randBuf[:])
      +	rb := new(big.Int).Exp(qaqb, c.smp.b3, p)
      +
      +	r.Exp(qaqb, r7, p)
      +	s.Exp(g, r7, p)
      +	cr = new(big.Int).SetBytes(hashMPIs(h, 8, s, r))
      +
      +	r.Mul(c.smp.b3, cr)
      +	d7 = new(big.Int).Sub(r7, r)
      +	d7.Mod(d7, q)
      +	if d7.Sign() < 0 {
      +		d7.Add(d7, q)
      +	}
      +
      +	out.typ = tlvTypeSMP4
      +	out.data = appendU32(out.data, 3)
      +	out.data = appendMPIs(out.data, rb, cr, d7)
      +
      +	r.ModInverse(c.smp.pb, p)
      +	r.Mul(pa, r)
      +	r.Mod(r, p)
      +	s.Exp(ra, c.smp.b3, p)
      +	if r.Cmp(s) != 0 {
      +		err = smpFailureError
      +	}
      +
      +	return
      +}
      +
      +func (c *Conversation) processSMP4(mpis []*big.Int) error {
      +	if len(mpis) != 3 {
      +		return errors.New("otr: incorrect number of arguments in SMP4 message")
      +	}
      +	rb := mpis[0]
      +	cr := mpis[1]
      +	d7 := mpis[2]
      +	h := sha256.New()
      +
      +	r := new(big.Int).Exp(c.smp.qaqb, d7, p)
      +	s := new(big.Int).Exp(rb, cr, p)
      +	r.Mul(r, s)
      +	r.Mod(r, p)
      +
      +	s.Exp(g, d7, p)
      +	t := new(big.Int).Exp(c.smp.g3b, cr, p)
      +	s.Mul(s, t)
      +	s.Mod(s, p)
      +	t.SetBytes(hashMPIs(h, 8, s, r))
      +	if t.Cmp(cr) != 0 {
      +		return errors.New("otr: ZKP cR failed in SMP4 message")
      +	}
      +
      +	r.Exp(rb, c.smp.a3, p)
      +	if r.Cmp(c.smp.papb) != 0 {
      +		return smpFailureError
      +	}
      +
      +	return nil
      +}
      +
      +func (c *Conversation) generateSMPAbort() tlv {
      +	return tlv{typ: tlvTypeSMPAbort}
      +}
      +
      +func hashMPIs(h hash.Hash, magic byte, mpis ...*big.Int) []byte {
      +	if h != nil {
      +		h.Reset()
      +	} else {
      +		h = sha256.New()
      +	}
      +
      +	h.Write([]byte{magic})
      +	for _, mpi := range mpis {
      +		h.Write(appendMPI(nil, mpi))
      +	}
      +	return h.Sum(nil)
      +}
      diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
      new file mode 100644
      index 00000000..593f6530
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
      @@ -0,0 +1,77 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +/*
      +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC
      +2898 / PKCS #5 v2.0.
      +
      +A key derivation function is useful when encrypting data based on a password
      +or any other not-fully-random data. It uses a pseudorandom function to derive
      +a secure encryption key based on the password.
      +
      +While v2.0 of the standard defines only one pseudorandom function to use,
      +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved
      +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
      +choose, you can pass the `New` functions from the different SHA packages to
      +pbkdf2.Key.
      +*/
      +package pbkdf2 // import "golang.org/x/crypto/pbkdf2"
      +
      +import (
      +	"crypto/hmac"
      +	"hash"
      +)
      +
      +// Key derives a key from the password, salt and iteration count, returning a
      +// []byte of length keylen that can be used as cryptographic key. The key is
      +// derived based on the method described as PBKDF2 with the HMAC variant using
      +// the supplied hash function.
      +//
      +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
      +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
      +// doing:
      +//
      +// 	dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
      +//
      +// Remember to get a good random salt. At least 8 bytes is recommended by the
      +// RFC.
      +//
      +// Using a higher iteration count will increase the cost of an exhaustive
      +// search but will also make derivation proportionally slower.
      +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
      +	prf := hmac.New(h, password)
      +	hashLen := prf.Size()
      +	numBlocks := (keyLen + hashLen - 1) / hashLen
      +
      +	var buf [4]byte
      +	dk := make([]byte, 0, numBlocks*hashLen)
      +	U := make([]byte, hashLen)
      +	for block := 1; block <= numBlocks; block++ {
      +		// N.B.: || means concatenation, ^ means XOR
      +		// for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
      +		// U_1 = PRF(password, salt || uint(i))
      +		prf.Reset()
      +		prf.Write(salt)
      +		buf[0] = byte(block >> 24)
      +		buf[1] = byte(block >> 16)
      +		buf[2] = byte(block >> 8)
      +		buf[3] = byte(block)
      +		prf.Write(buf[:4])
      +		dk = prf.Sum(dk)
      +		T := dk[len(dk)-hashLen:]
      +		copy(U, T)
      +
      +		// U_n = PRF(password, U_(n-1))
      +		for n := 2; n <= iter; n++ {
      +			prf.Reset()
      +			prf.Write(U)
      +			U = U[:0]
      +			U = prf.Sum(U)
      +			for x := range U {
      +				T[x] ^= U[x]
      +			}
      +		}
      +	}
      +	return dk[:keyLen]
      +}
      diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2_test.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2_test.go
      new file mode 100644
      index 00000000..13792406
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2_test.go
      @@ -0,0 +1,157 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package pbkdf2
      +
      +import (
      +	"bytes"
      +	"crypto/sha1"
      +	"crypto/sha256"
      +	"hash"
      +	"testing"
      +)
      +
      +type testVector struct {
      +	password string
      +	salt     string
      +	iter     int
      +	output   []byte
      +}
      +
      +// Test vectors from RFC 6070, http://tools.ietf.org/html/rfc6070
      +var sha1TestVectors = []testVector{
      +	{
      +		"password",
      +		"salt",
      +		1,
      +		[]byte{
      +			0x0c, 0x60, 0xc8, 0x0f, 0x96, 0x1f, 0x0e, 0x71,
      +			0xf3, 0xa9, 0xb5, 0x24, 0xaf, 0x60, 0x12, 0x06,
      +			0x2f, 0xe0, 0x37, 0xa6,
      +		},
      +	},
      +	{
      +		"password",
      +		"salt",
      +		2,
      +		[]byte{
      +			0xea, 0x6c, 0x01, 0x4d, 0xc7, 0x2d, 0x6f, 0x8c,
      +			0xcd, 0x1e, 0xd9, 0x2a, 0xce, 0x1d, 0x41, 0xf0,
      +			0xd8, 0xde, 0x89, 0x57,
      +		},
      +	},
      +	{
      +		"password",
      +		"salt",
      +		4096,
      +		[]byte{
      +			0x4b, 0x00, 0x79, 0x01, 0xb7, 0x65, 0x48, 0x9a,
      +			0xbe, 0xad, 0x49, 0xd9, 0x26, 0xf7, 0x21, 0xd0,
      +			0x65, 0xa4, 0x29, 0xc1,
      +		},
      +	},
      +	// // This one takes too long
      +	// {
      +	// 	"password",
      +	// 	"salt",
      +	// 	16777216,
      +	// 	[]byte{
      +	// 		0xee, 0xfe, 0x3d, 0x61, 0xcd, 0x4d, 0xa4, 0xe4,
      +	// 		0xe9, 0x94, 0x5b, 0x3d, 0x6b, 0xa2, 0x15, 0x8c,
      +	// 		0x26, 0x34, 0xe9, 0x84,
      +	// 	},
      +	// },
      +	{
      +		"passwordPASSWORDpassword",
      +		"saltSALTsaltSALTsaltSALTsaltSALTsalt",
      +		4096,
      +		[]byte{
      +			0x3d, 0x2e, 0xec, 0x4f, 0xe4, 0x1c, 0x84, 0x9b,
      +			0x80, 0xc8, 0xd8, 0x36, 0x62, 0xc0, 0xe4, 0x4a,
      +			0x8b, 0x29, 0x1a, 0x96, 0x4c, 0xf2, 0xf0, 0x70,
      +			0x38,
      +		},
      +	},
      +	{
      +		"pass\000word",
      +		"sa\000lt",
      +		4096,
      +		[]byte{
      +			0x56, 0xfa, 0x6a, 0xa7, 0x55, 0x48, 0x09, 0x9d,
      +			0xcc, 0x37, 0xd7, 0xf0, 0x34, 0x25, 0xe0, 0xc3,
      +		},
      +	},
      +}
      +
      +// Test vectors from
      +// http://stackoverflow.com/questions/5130513/pbkdf2-hmac-sha2-test-vectors
      +var sha256TestVectors = []testVector{
      +	{
      +		"password",
      +		"salt",
      +		1,
      +		[]byte{
      +			0x12, 0x0f, 0xb6, 0xcf, 0xfc, 0xf8, 0xb3, 0x2c,
      +			0x43, 0xe7, 0x22, 0x52, 0x56, 0xc4, 0xf8, 0x37,
      +			0xa8, 0x65, 0x48, 0xc9,
      +		},
      +	},
      +	{
      +		"password",
      +		"salt",
      +		2,
      +		[]byte{
      +			0xae, 0x4d, 0x0c, 0x95, 0xaf, 0x6b, 0x46, 0xd3,
      +			0x2d, 0x0a, 0xdf, 0xf9, 0x28, 0xf0, 0x6d, 0xd0,
      +			0x2a, 0x30, 0x3f, 0x8e,
      +		},
      +	},
      +	{
      +		"password",
      +		"salt",
      +		4096,
      +		[]byte{
      +			0xc5, 0xe4, 0x78, 0xd5, 0x92, 0x88, 0xc8, 0x41,
      +			0xaa, 0x53, 0x0d, 0xb6, 0x84, 0x5c, 0x4c, 0x8d,
      +			0x96, 0x28, 0x93, 0xa0,
      +		},
      +	},
      +	{
      +		"passwordPASSWORDpassword",
      +		"saltSALTsaltSALTsaltSALTsaltSALTsalt",
      +		4096,
      +		[]byte{
      +			0x34, 0x8c, 0x89, 0xdb, 0xcb, 0xd3, 0x2b, 0x2f,
      +			0x32, 0xd8, 0x14, 0xb8, 0x11, 0x6e, 0x84, 0xcf,
      +			0x2b, 0x17, 0x34, 0x7e, 0xbc, 0x18, 0x00, 0x18,
      +			0x1c,
      +		},
      +	},
      +	{
      +		"pass\000word",
      +		"sa\000lt",
      +		4096,
      +		[]byte{
      +			0x89, 0xb6, 0x9d, 0x05, 0x16, 0xf8, 0x29, 0x89,
      +			0x3c, 0x69, 0x62, 0x26, 0x65, 0x0a, 0x86, 0x87,
      +		},
      +	},
      +}
      +
      +func testHash(t *testing.T, h func() hash.Hash, hashName string, vectors []testVector) {
      +	for i, v := range vectors {
      +		o := Key([]byte(v.password), []byte(v.salt), v.iter, len(v.output), h)
      +		if !bytes.Equal(o, v.output) {
      +			t.Errorf("%s %d: expected %x, got %x", hashName, i, v.output, o)
      +		}
      +	}
      +}
      +
      +func TestWithHMACSHA1(t *testing.T) {
      +	testHash(t, sha1.New, "SHA1", sha1TestVectors)
      +}
      +
      +func TestWithHMACSHA256(t *testing.T) {
      +	testHash(t, sha256.New, "SHA256", sha256TestVectors)
      +}
      diff --git a/vendor/golang.org/x/crypto/pkcs12/bmp-string.go b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go
      new file mode 100644
      index 00000000..284d2a68
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go
      @@ -0,0 +1,50 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package pkcs12
      +
      +import (
      +	"errors"
      +	"unicode/utf16"
      +)
      +
      +// bmpString returns s encoded in UCS-2 with a zero terminator.
      +func bmpString(s string) ([]byte, error) {
      +	// References:
      +	// https://tools.ietf.org/html/rfc7292#appendix-B.1
      +	// http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane
      +	//  - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes
      +	//	  EncodeRune returns 0xfffd if the rune does not need special encoding
      +	//  - the above RFC provides the info that BMPStrings are NULL terminated.
      +
      +	ret := make([]byte, 0, 2*len(s)+2)
      +
      +	for _, r := range s {
      +		if t, _ := utf16.EncodeRune(r); t != 0xfffd {
      +			return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2")
      +		}
      +		ret = append(ret, byte(r/256), byte(r%256))
      +	}
      +
      +	return append(ret, 0, 0), nil
      +}
      +
      +func decodeBMPString(bmpString []byte) (string, error) {
      +	if len(bmpString)%2 != 0 {
      +		return "", errors.New("pkcs12: odd-length BMP string")
      +	}
      +
      +	// strip terminator if present
      +	if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 {
      +		bmpString = bmpString[:l-2]
      +	}
      +
      +	s := make([]uint16, 0, len(bmpString)/2)
      +	for len(bmpString) > 0 {
      +		s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1]))
      +		bmpString = bmpString[2:]
      +	}
      +
      +	return string(utf16.Decode(s)), nil
      +}
      diff --git a/vendor/golang.org/x/crypto/pkcs12/bmp-string_test.go b/vendor/golang.org/x/crypto/pkcs12/bmp-string_test.go
      new file mode 100644
      index 00000000..7fca55f4
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/pkcs12/bmp-string_test.go
      @@ -0,0 +1,63 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package pkcs12
      +
      +import (
      +	"bytes"
      +	"encoding/hex"
      +	"testing"
      +)
      +
      +var bmpStringTests = []struct {
      +	in          string
      +	expectedHex string
      +	shouldFail  bool
      +}{
      +	{"", "0000", false},
      +	// Example from https://tools.ietf.org/html/rfc7292#appendix-B.
      +	{"Beavis", "0042006500610076006900730000", false},
      +	// Some characters from the "Letterlike Symbols Unicode block".
      +	{"\u2115 - Double-struck N", "21150020002d00200044006f00750062006c0065002d00730074007200750063006b0020004e0000", false},
      +	// any character outside the BMP should trigger an error.
      +	{"\U0001f000 East wind (Mahjong)", "", true},
      +}
      +
      +func TestBMPString(t *testing.T) {
      +	for i, test := range bmpStringTests {
      +		expected, err := hex.DecodeString(test.expectedHex)
      +		if err != nil {
      +			t.Fatalf("#%d: failed to decode expectation", i)
      +		}
      +
      +		out, err := bmpString(test.in)
      +		if err == nil && test.shouldFail {
      +			t.Errorf("#%d: expected to fail, but produced %x", i, out)
      +			continue
      +		}
      +
      +		if err != nil && !test.shouldFail {
      +			t.Errorf("#%d: failed unexpectedly: %s", i, err)
      +			continue
      +		}
      +
      +		if !test.shouldFail {
      +			if !bytes.Equal(out, expected) {
      +				t.Errorf("#%d: expected %s, got %x", i, test.expectedHex, out)
      +				continue
      +			}
      +
      +			roundTrip, err := decodeBMPString(out)
      +			if err != nil {
      +				t.Errorf("#%d: decoding output gave an error: %s", i, err)
      +				continue
      +			}
      +
      +			if roundTrip != test.in {
      +				t.Errorf("#%d: decoding output resulted in %q, but it should have been %q", i, roundTrip, test.in)
      +				continue
      +			}
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/golang.org/x/crypto/pkcs12/crypto.go
      new file mode 100644
      index 00000000..4bd4470e
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/pkcs12/crypto.go
      @@ -0,0 +1,131 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package pkcs12
      +
      +import (
      +	"bytes"
      +	"crypto/cipher"
      +	"crypto/des"
      +	"crypto/x509/pkix"
      +	"encoding/asn1"
      +	"errors"
      +
      +	"golang.org/x/crypto/pkcs12/internal/rc2"
      +)
      +
      +var (
      +	oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3})
      +	oidPBEWithSHAAnd40BitRC2CBC      = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6})
      +)
      +
      +// pbeCipher is an abstraction of a PKCS#12 cipher.
      +type pbeCipher interface {
      +	// create returns a cipher.Block given a key.
      +	create(key []byte) (cipher.Block, error)
      +	// deriveKey returns a key derived from the given password and salt.
      +	deriveKey(salt, password []byte, iterations int) []byte
      +	// deriveKey returns an IV derived from the given password and salt.
      +	deriveIV(salt, password []byte, iterations int) []byte
      +}
      +
      +type shaWithTripleDESCBC struct{}
      +
      +func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) {
      +	return des.NewTripleDESCipher(key)
      +}
      +
      +func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte {
      +	return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24)
      +}
      +
      +func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte {
      +	return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8)
      +}
      +
      +type shaWith40BitRC2CBC struct{}
      +
      +func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) {
      +	return rc2.New(key, len(key)*8)
      +}
      +
      +func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte {
      +	return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5)
      +}
      +
      +func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte {
      +	return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8)
      +}
      +
      +type pbeParams struct {
      +	Salt       []byte
      +	Iterations int
      +}
      +
      +func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) {
      +	var cipherType pbeCipher
      +
      +	switch {
      +	case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC):
      +		cipherType = shaWithTripleDESCBC{}
      +	case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC):
      +		cipherType = shaWith40BitRC2CBC{}
      +	default:
      +		return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported")
      +	}
      +
      +	var params pbeParams
      +	if err := unmarshal(algorithm.Parameters.FullBytes, &params); err != nil {
      +		return nil, 0, err
      +	}
      +
      +	key := cipherType.deriveKey(params.Salt, password, params.Iterations)
      +	iv := cipherType.deriveIV(params.Salt, password, params.Iterations)
      +
      +	block, err := cipherType.create(key)
      +	if err != nil {
      +		return nil, 0, err
      +	}
      +
      +	return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil
      +}
      +
      +func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) {
      +	cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	encrypted := info.Data()
      +	if len(encrypted) == 0 {
      +		return nil, errors.New("pkcs12: empty encrypted data")
      +	}
      +	if len(encrypted)%blockSize != 0 {
      +		return nil, errors.New("pkcs12: input is not a multiple of the block size")
      +	}
      +	decrypted = make([]byte, len(encrypted))
      +	cbc.CryptBlocks(decrypted, encrypted)
      +
      +	psLen := int(decrypted[len(decrypted)-1])
      +	if psLen == 0 || psLen > blockSize {
      +		return nil, ErrDecryption
      +	}
      +
      +	if len(decrypted) < psLen {
      +		return nil, ErrDecryption
      +	}
      +	ps := decrypted[len(decrypted)-psLen:]
      +	decrypted = decrypted[:len(decrypted)-psLen]
      +	if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 {
      +		return nil, ErrDecryption
      +	}
      +
      +	return
      +}
      +
      +// decryptable abstracts a object that contains ciphertext.
      +type decryptable interface {
      +	Algorithm() pkix.AlgorithmIdentifier
      +	Data() []byte
      +}
      diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto_test.go b/vendor/golang.org/x/crypto/pkcs12/crypto_test.go
      new file mode 100644
      index 00000000..eb4dae8f
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/pkcs12/crypto_test.go
      @@ -0,0 +1,125 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package pkcs12
      +
      +import (
      +	"bytes"
      +	"crypto/x509/pkix"
      +	"encoding/asn1"
      +	"testing"
      +)
      +
      +var sha1WithTripleDES = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3})
      +
      +func TestPbDecrypterFor(t *testing.T) {
      +	params, _ := asn1.Marshal(pbeParams{
      +		Salt:       []byte{1, 2, 3, 4, 5, 6, 7, 8},
      +		Iterations: 2048,
      +	})
      +	alg := pkix.AlgorithmIdentifier{
      +		Algorithm: asn1.ObjectIdentifier([]int{1, 2, 3}),
      +		Parameters: asn1.RawValue{
      +			FullBytes: params,
      +		},
      +	}
      +
      +	pass, _ := bmpString("Sesame open")
      +
      +	_, _, err := pbDecrypterFor(alg, pass)
      +	if _, ok := err.(NotImplementedError); !ok {
      +		t.Errorf("expected not implemented error, got: %T %s", err, err)
      +	}
      +
      +	alg.Algorithm = sha1WithTripleDES
      +	cbc, blockSize, err := pbDecrypterFor(alg, pass)
      +	if err != nil {
      +		t.Errorf("unexpected error from pbDecrypterFor %v", err)
      +	}
      +	if blockSize != 8 {
      +		t.Errorf("unexpected block size %d, wanted 8", blockSize)
      +	}
      +
      +	plaintext := []byte{1, 2, 3, 4, 5, 6, 7, 8}
      +	expectedCiphertext := []byte{185, 73, 135, 249, 137, 1, 122, 247}
      +	ciphertext := make([]byte, len(plaintext))
      +	cbc.CryptBlocks(ciphertext, plaintext)
      +
      +	if bytes.Compare(ciphertext, expectedCiphertext) != 0 {
      +		t.Errorf("bad ciphertext, got %x but wanted %x", ciphertext, expectedCiphertext)
      +	}
      +}
      +
      +var pbDecryptTests = []struct {
      +	in            []byte
      +	expected      []byte
      +	expectedError error
      +}{
      +	{
      +		[]byte("\x33\x73\xf3\x9f\xda\x49\xae\xfc\xa0\x9a\xdf\x5a\x58\xa0\xea\x46"), // 7 padding bytes
      +		[]byte("A secret!"),
      +		nil,
      +	},
      +	{
      +		[]byte("\x33\x73\xf3\x9f\xda\x49\xae\xfc\x96\x24\x2f\x71\x7e\x32\x3f\xe7"), // 8 padding bytes
      +		[]byte("A secret"),
      +		nil,
      +	},
      +	{
      +		[]byte("\x35\x0c\xc0\x8d\xab\xa9\x5d\x30\x7f\x9a\xec\x6a\xd8\x9b\x9c\xd9"), // 9 padding bytes, incorrect
      +		nil,
      +		ErrDecryption,
      +	},
      +	{
      +		[]byte("\xb2\xf9\x6e\x06\x60\xae\x20\xcf\x08\xa0\x7b\xd9\x6b\x20\xef\x41"), // incorrect padding bytes: [ ... 0x04 0x02 ]
      +		nil,
      +		ErrDecryption,
      +	},
      +}
      +
      +func TestPbDecrypt(t *testing.T) {
      +	for i, test := range pbDecryptTests {
      +		decryptable := testDecryptable{
      +			data: test.in,
      +			algorithm: pkix.AlgorithmIdentifier{
      +				Algorithm: sha1WithTripleDES,
      +				Parameters: pbeParams{
      +					Salt:       []byte("\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8"),
      +					Iterations: 4096,
      +				}.RawASN1(),
      +			},
      +		}
      +		password, _ := bmpString("sesame")
      +
      +		plaintext, err := pbDecrypt(decryptable, password)
      +		if err != test.expectedError {
      +			t.Errorf("#%d: got error %q, but wanted %q", i, err, test.expectedError)
      +			continue
      +		}
      +
      +		if !bytes.Equal(plaintext, test.expected) {
      +			t.Errorf("#%d: got %x, but wanted %x", i, plaintext, test.expected)
      +		}
      +	}
      +}
      +
      +type testDecryptable struct {
      +	data      []byte
      +	algorithm pkix.AlgorithmIdentifier
      +}
      +
      +func (d testDecryptable) Algorithm() pkix.AlgorithmIdentifier { return d.algorithm }
      +func (d testDecryptable) Data() []byte                        { return d.data }
      +
      +func (params pbeParams) RawASN1() (raw asn1.RawValue) {
      +	asn1Bytes, err := asn1.Marshal(params)
      +	if err != nil {
      +		panic(err)
      +	}
      +	_, err = asn1.Unmarshal(asn1Bytes, &raw)
      +	if err != nil {
      +		panic(err)
      +	}
      +	return
      +}
      diff --git a/vendor/golang.org/x/crypto/pkcs12/errors.go b/vendor/golang.org/x/crypto/pkcs12/errors.go
      new file mode 100644
      index 00000000..7377ce6f
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/pkcs12/errors.go
      @@ -0,0 +1,23 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package pkcs12
      +
      +import "errors"
      +
      +var (
      +	// ErrDecryption represents a failure to decrypt the input.
      +	ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding")
      +
      +	// ErrIncorrectPassword is returned when an incorrect password is detected.
      +	// Usually, P12/PFX data is signed to be able to verify the password.
      +	ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect")
      +)
      +
      +// NotImplementedError indicates that the input is not currently supported.
      +type NotImplementedError string
      +
      +func (e NotImplementedError) Error() string {
      +	return "pkcs12: " + string(e)
      +}
      diff --git a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go
      new file mode 100644
      index 00000000..3347f338
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go
      @@ -0,0 +1,27 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package rc2
      +
      +import (
      +	"testing"
      +)
      +
      +func BenchmarkEncrypt(b *testing.B) {
      +	r, _ := New([]byte{0, 0, 0, 0, 0, 0, 0, 0}, 64)
      +	b.ResetTimer()
      +	var src [8]byte
      +	for i := 0; i < b.N; i++ {
      +		r.Encrypt(src[:], src[:])
      +	}
      +}
      +
      +func BenchmarkDecrypt(b *testing.B) {
      +	r, _ := New([]byte{0, 0, 0, 0, 0, 0, 0, 0}, 64)
      +	b.ResetTimer()
      +	var src [8]byte
      +	for i := 0; i < b.N; i++ {
      +		r.Decrypt(src[:], src[:])
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go
      new file mode 100644
      index 00000000..8c709025
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go
      @@ -0,0 +1,274 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package rc2 implements the RC2 cipher
      +/*
      +https://www.ietf.org/rfc/rfc2268.txt
      +http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf
      +
      +This code is licensed under the MIT license.
      +*/
      +package rc2
      +
      +import (
      +	"crypto/cipher"
      +	"encoding/binary"
      +)
      +
      +// The rc2 block size in bytes
      +const BlockSize = 8
      +
      +type rc2Cipher struct {
      +	k [64]uint16
      +}
      +
      +// New returns a new rc2 cipher with the given key and effective key length t1
      +func New(key []byte, t1 int) (cipher.Block, error) {
      +	// TODO(dgryski): error checking for key length
      +	return &rc2Cipher{
      +		k: expandKey(key, t1),
      +	}, nil
      +}
      +
      +func (*rc2Cipher) BlockSize() int { return BlockSize }
      +
      +var piTable = [256]byte{
      +	0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d,
      +	0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2,
      +	0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32,
      +	0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82,
      +	0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc,
      +	0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26,
      +	0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03,
      +	0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7,
      +	0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a,
      +	0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec,
      +	0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39,
      +	0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31,
      +	0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9,
      +	0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9,
      +	0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e,
      +	0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad,
      +}
      +
      +func expandKey(key []byte, t1 int) [64]uint16 {
      +
      +	l := make([]byte, 128)
      +	copy(l, key)
      +
      +	var t = len(key)
      +	var t8 = (t1 + 7) / 8
      +	var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8))))
      +
      +	for i := len(key); i < 128; i++ {
      +		l[i] = piTable[l[i-1]+l[uint8(i-t)]]
      +	}
      +
      +	l[128-t8] = piTable[l[128-t8]&tm]
      +
      +	for i := 127 - t8; i >= 0; i-- {
      +		l[i] = piTable[l[i+1]^l[i+t8]]
      +	}
      +
      +	var k [64]uint16
      +
      +	for i := range k {
      +		k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256
      +	}
      +
      +	return k
      +}
      +
      +func rotl16(x uint16, b uint) uint16 {
      +	return (x >> (16 - b)) | (x << b)
      +}
      +
      +func (c *rc2Cipher) Encrypt(dst, src []byte) {
      +
      +	r0 := binary.LittleEndian.Uint16(src[0:])
      +	r1 := binary.LittleEndian.Uint16(src[2:])
      +	r2 := binary.LittleEndian.Uint16(src[4:])
      +	r3 := binary.LittleEndian.Uint16(src[6:])
      +
      +	var j int
      +
      +	for j <= 16 {
      +		// mix r0
      +		r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
      +		r0 = rotl16(r0, 1)
      +		j++
      +
      +		// mix r1
      +		r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
      +		r1 = rotl16(r1, 2)
      +		j++
      +
      +		// mix r2
      +		r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
      +		r2 = rotl16(r2, 3)
      +		j++
      +
      +		// mix r3
      +		r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
      +		r3 = rotl16(r3, 5)
      +		j++
      +
      +	}
      +
      +	r0 = r0 + c.k[r3&63]
      +	r1 = r1 + c.k[r0&63]
      +	r2 = r2 + c.k[r1&63]
      +	r3 = r3 + c.k[r2&63]
      +
      +	for j <= 40 {
      +
      +		// mix r0
      +		r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
      +		r0 = rotl16(r0, 1)
      +		j++
      +
      +		// mix r1
      +		r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
      +		r1 = rotl16(r1, 2)
      +		j++
      +
      +		// mix r2
      +		r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
      +		r2 = rotl16(r2, 3)
      +		j++
      +
      +		// mix r3
      +		r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
      +		r3 = rotl16(r3, 5)
      +		j++
      +
      +	}
      +
      +	r0 = r0 + c.k[r3&63]
      +	r1 = r1 + c.k[r0&63]
      +	r2 = r2 + c.k[r1&63]
      +	r3 = r3 + c.k[r2&63]
      +
      +	for j <= 60 {
      +
      +		// mix r0
      +		r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
      +		r0 = rotl16(r0, 1)
      +		j++
      +
      +		// mix r1
      +		r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
      +		r1 = rotl16(r1, 2)
      +		j++
      +
      +		// mix r2
      +		r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
      +		r2 = rotl16(r2, 3)
      +		j++
      +
      +		// mix r3
      +		r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
      +		r3 = rotl16(r3, 5)
      +		j++
      +	}
      +
      +	binary.LittleEndian.PutUint16(dst[0:], r0)
      +	binary.LittleEndian.PutUint16(dst[2:], r1)
      +	binary.LittleEndian.PutUint16(dst[4:], r2)
      +	binary.LittleEndian.PutUint16(dst[6:], r3)
      +}
      +
      +func (c *rc2Cipher) Decrypt(dst, src []byte) {
      +
      +	r0 := binary.LittleEndian.Uint16(src[0:])
      +	r1 := binary.LittleEndian.Uint16(src[2:])
      +	r2 := binary.LittleEndian.Uint16(src[4:])
      +	r3 := binary.LittleEndian.Uint16(src[6:])
      +
      +	j := 63
      +
      +	for j >= 44 {
      +		// unmix r3
      +		r3 = rotl16(r3, 16-5)
      +		r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
      +		j--
      +
      +		// unmix r2
      +		r2 = rotl16(r2, 16-3)
      +		r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
      +		j--
      +
      +		// unmix r1
      +		r1 = rotl16(r1, 16-2)
      +		r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
      +		j--
      +
      +		// unmix r0
      +		r0 = rotl16(r0, 16-1)
      +		r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
      +		j--
      +	}
      +
      +	r3 = r3 - c.k[r2&63]
      +	r2 = r2 - c.k[r1&63]
      +	r1 = r1 - c.k[r0&63]
      +	r0 = r0 - c.k[r3&63]
      +
      +	for j >= 20 {
      +		// unmix r3
      +		r3 = rotl16(r3, 16-5)
      +		r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
      +		j--
      +
      +		// unmix r2
      +		r2 = rotl16(r2, 16-3)
      +		r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
      +		j--
      +
      +		// unmix r1
      +		r1 = rotl16(r1, 16-2)
      +		r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
      +		j--
      +
      +		// unmix r0
      +		r0 = rotl16(r0, 16-1)
      +		r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
      +		j--
      +
      +	}
      +
      +	r3 = r3 - c.k[r2&63]
      +	r2 = r2 - c.k[r1&63]
      +	r1 = r1 - c.k[r0&63]
      +	r0 = r0 - c.k[r3&63]
      +
      +	for j >= 0 {
      +
      +		// unmix r3
      +		r3 = rotl16(r3, 16-5)
      +		r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
      +		j--
      +
      +		// unmix r2
      +		r2 = rotl16(r2, 16-3)
      +		r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
      +		j--
      +
      +		// unmix r1
      +		r1 = rotl16(r1, 16-2)
      +		r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
      +		j--
      +
      +		// unmix r0
      +		r0 = rotl16(r0, 16-1)
      +		r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
      +		j--
      +
      +	}
      +
      +	binary.LittleEndian.PutUint16(dst[0:], r0)
      +	binary.LittleEndian.PutUint16(dst[2:], r1)
      +	binary.LittleEndian.PutUint16(dst[4:], r2)
      +	binary.LittleEndian.PutUint16(dst[6:], r3)
      +}
      diff --git a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go
      new file mode 100644
      index 00000000..8a49dfaf
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go
      @@ -0,0 +1,93 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package rc2
      +
      +import (
      +	"bytes"
      +	"encoding/hex"
      +	"testing"
      +)
      +
      +func TestEncryptDecrypt(t *testing.T) {
      +
      +	// TODO(dgryski): add the rest of the test vectors from the RFC
      +	var tests = []struct {
      +		key    string
      +		plain  string
      +		cipher string
      +		t1     int
      +	}{
      +		{
      +			"0000000000000000",
      +			"0000000000000000",
      +			"ebb773f993278eff",
      +			63,
      +		},
      +		{
      +			"ffffffffffffffff",
      +			"ffffffffffffffff",
      +			"278b27e42e2f0d49",
      +			64,
      +		},
      +		{
      +			"3000000000000000",
      +			"1000000000000001",
      +			"30649edf9be7d2c2",
      +			64,
      +		},
      +		{
      +			"88",
      +			"0000000000000000",
      +			"61a8a244adacccf0",
      +			64,
      +		},
      +		{
      +			"88bca90e90875a",
      +			"0000000000000000",
      +			"6ccf4308974c267f",
      +			64,
      +		},
      +		{
      +			"88bca90e90875a7f0f79c384627bafb2",
      +			"0000000000000000",
      +			"1a807d272bbe5db1",
      +			64,
      +		},
      +		{
      +			"88bca90e90875a7f0f79c384627bafb2",
      +			"0000000000000000",
      +			"2269552ab0f85ca6",
      +			128,
      +		},
      +		{
      +			"88bca90e90875a7f0f79c384627bafb216f80a6f85920584c42fceb0be255daf1e",
      +			"0000000000000000",
      +			"5b78d3a43dfff1f1",
      +			129,
      +		},
      +	}
      +
      +	for _, tt := range tests {
      +		k, _ := hex.DecodeString(tt.key)
      +		p, _ := hex.DecodeString(tt.plain)
      +		c, _ := hex.DecodeString(tt.cipher)
      +
      +		b, _ := New(k, tt.t1)
      +
      +		var dst [8]byte
      +
      +		b.Encrypt(dst[:], p)
      +
      +		if !bytes.Equal(dst[:], c) {
      +			t.Errorf("encrypt failed: got % 2x wanted % 2x\n", dst, c)
      +		}
      +
      +		b.Decrypt(dst[:], c)
      +
      +		if !bytes.Equal(dst[:], p) {
      +			t.Errorf("decrypt failed: got % 2x wanted % 2x\n", dst, p)
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/pkcs12/mac.go b/vendor/golang.org/x/crypto/pkcs12/mac.go
      new file mode 100644
      index 00000000..5f38aa7d
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/pkcs12/mac.go
      @@ -0,0 +1,45 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package pkcs12
      +
      +import (
      +	"crypto/hmac"
      +	"crypto/sha1"
      +	"crypto/x509/pkix"
      +	"encoding/asn1"
      +)
      +
      +type macData struct {
      +	Mac        digestInfo
      +	MacSalt    []byte
      +	Iterations int `asn1:"optional,default:1"`
      +}
      +
      +// from PKCS#7:
      +type digestInfo struct {
      +	Algorithm pkix.AlgorithmIdentifier
      +	Digest    []byte
      +}
      +
      +var (
      +	oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26})
      +)
      +
      +func verifyMac(macData *macData, message, password []byte) error {
      +	if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) {
      +		return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String())
      +	}
      +
      +	key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20)
      +
      +	mac := hmac.New(sha1.New, key)
      +	mac.Write(message)
      +	expectedMAC := mac.Sum(nil)
      +
      +	if !hmac.Equal(macData.Mac.Digest, expectedMAC) {
      +		return ErrIncorrectPassword
      +	}
      +	return nil
      +}
      diff --git a/vendor/golang.org/x/crypto/pkcs12/mac_test.go b/vendor/golang.org/x/crypto/pkcs12/mac_test.go
      new file mode 100644
      index 00000000..1ed4ff21
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/pkcs12/mac_test.go
      @@ -0,0 +1,42 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package pkcs12
      +
      +import (
      +	"encoding/asn1"
      +	"testing"
      +)
      +
      +func TestVerifyMac(t *testing.T) {
      +	td := macData{
      +		Mac: digestInfo{
      +			Digest: []byte{0x18, 0x20, 0x3d, 0xff, 0x1e, 0x16, 0xf4, 0x92, 0xf2, 0xaf, 0xc8, 0x91, 0xa9, 0xba, 0xd6, 0xca, 0x9d, 0xee, 0x51, 0x93},
      +		},
      +		MacSalt:    []byte{1, 2, 3, 4, 5, 6, 7, 8},
      +		Iterations: 2048,
      +	}
      +
      +	message := []byte{11, 12, 13, 14, 15}
      +	password, _ := bmpString("")
      +
      +	td.Mac.Algorithm.Algorithm = asn1.ObjectIdentifier([]int{1, 2, 3})
      +	err := verifyMac(&td, message, password)
      +	if _, ok := err.(NotImplementedError); !ok {
      +		t.Errorf("err: %v", err)
      +	}
      +
      +	td.Mac.Algorithm.Algorithm = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26})
      +	err = verifyMac(&td, message, password)
      +	if err != ErrIncorrectPassword {
      +		t.Errorf("Expected incorrect password, got err: %v", err)
      +	}
      +
      +	password, _ = bmpString("Sesame open")
      +	err = verifyMac(&td, message, password)
      +	if err != nil {
      +		t.Errorf("err: %v", err)
      +	}
      +
      +}
      diff --git a/vendor/golang.org/x/crypto/pkcs12/pbkdf.go b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go
      new file mode 100644
      index 00000000..5c419d41
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go
      @@ -0,0 +1,170 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package pkcs12
      +
      +import (
      +	"bytes"
      +	"crypto/sha1"
      +	"math/big"
      +)
      +
      +var (
      +	one = big.NewInt(1)
      +)
      +
      +// sha1Sum returns the SHA-1 hash of in.
      +func sha1Sum(in []byte) []byte {
      +	sum := sha1.Sum(in)
      +	return sum[:]
      +}
      +
      +// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of
      +// repeats of pattern.
      +func fillWithRepeats(pattern []byte, v int) []byte {
      +	if len(pattern) == 0 {
      +		return nil
      +	}
      +	outputLen := v * ((len(pattern) + v - 1) / v)
      +	return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen]
      +}
      +
      +func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) {
      +	// implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments
      +
      +	//    Let H be a hash function built around a compression function f:
      +
      +	//       Z_2^u x Z_2^v -> Z_2^u
      +
      +	//    (that is, H has a chaining variable and output of length u bits, and
      +	//    the message input to the compression function of H is v bits).  The
      +	//    values for u and v are as follows:
      +
      +	//            HASH FUNCTION     VALUE u        VALUE v
      +	//              MD2, MD5          128            512
      +	//                SHA-1           160            512
      +	//               SHA-224          224            512
      +	//               SHA-256          256            512
      +	//               SHA-384          384            1024
      +	//               SHA-512          512            1024
      +	//             SHA-512/224        224            1024
      +	//             SHA-512/256        256            1024
      +
      +	//    Furthermore, let r be the iteration count.
      +
      +	//    We assume here that u and v are both multiples of 8, as are the
      +	//    lengths of the password and salt strings (which we denote by p and s,
      +	//    respectively) and the number n of pseudorandom bits required.  In
      +	//    addition, u and v are of course non-zero.
      +
      +	//    For information on security considerations for MD5 [19], see [25] and
      +	//    [1], and on those for MD2, see [18].
      +
      +	//    The following procedure can be used to produce pseudorandom bits for
      +	//    a particular "purpose" that is identified by a byte called "ID".
      +	//    This standard specifies 3 different values for the ID byte:
      +
      +	//    1.  If ID=1, then the pseudorandom bits being produced are to be used
      +	//        as key material for performing encryption or decryption.
      +
      +	//    2.  If ID=2, then the pseudorandom bits being produced are to be used
      +	//        as an IV (Initial Value) for encryption or decryption.
      +
      +	//    3.  If ID=3, then the pseudorandom bits being produced are to be used
      +	//        as an integrity key for MACing.
      +
      +	//    1.  Construct a string, D (the "diversifier"), by concatenating v/8
      +	//        copies of ID.
      +	var D []byte
      +	for i := 0; i < v; i++ {
      +		D = append(D, ID)
      +	}
      +
      +	//    2.  Concatenate copies of the salt together to create a string S of
      +	//        length v(ceiling(s/v)) bits (the final copy of the salt may be
      +	//        truncated to create S).  Note that if the salt is the empty
      +	//        string, then so is S.
      +
      +	S := fillWithRepeats(salt, v)
      +
      +	//    3.  Concatenate copies of the password together to create a string P
      +	//        of length v(ceiling(p/v)) bits (the final copy of the password
      +	//        may be truncated to create P).  Note that if the password is the
      +	//        empty string, then so is P.
      +
      +	P := fillWithRepeats(password, v)
      +
      +	//    4.  Set I=S||P to be the concatenation of S and P.
      +	I := append(S, P...)
      +
      +	//    5.  Set c=ceiling(n/u).
      +	c := (size + u - 1) / u
      +
      +	//    6.  For i=1, 2, ..., c, do the following:
      +	A := make([]byte, c*20)
      +	var IjBuf []byte
      +	for i := 0; i < c; i++ {
      +		//        A.  Set A2=H^r(D||I). (i.e., the r-th hash of D||1,
      +		//            H(H(H(... H(D||I))))
      +		Ai := hash(append(D, I...))
      +		for j := 1; j < r; j++ {
      +			Ai = hash(Ai)
      +		}
      +		copy(A[i*20:], Ai[:])
      +
      +		if i < c-1 { // skip on last iteration
      +			// B.  Concatenate copies of Ai to create a string B of length v
      +			//     bits (the final copy of Ai may be truncated to create B).
      +			var B []byte
      +			for len(B) < v {
      +				B = append(B, Ai[:]...)
      +			}
      +			B = B[:v]
      +
      +			// C.  Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit
      +			//     blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by
      +			//     setting I_j=(I_j+B+1) mod 2^v for each j.
      +			{
      +				Bbi := new(big.Int).SetBytes(B)
      +				Ij := new(big.Int)
      +
      +				for j := 0; j < len(I)/v; j++ {
      +					Ij.SetBytes(I[j*v : (j+1)*v])
      +					Ij.Add(Ij, Bbi)
      +					Ij.Add(Ij, one)
      +					Ijb := Ij.Bytes()
      +					// We expect Ijb to be exactly v bytes,
      +					// if it is longer or shorter we must
      +					// adjust it accordingly.
      +					if len(Ijb) > v {
      +						Ijb = Ijb[len(Ijb)-v:]
      +					}
      +					if len(Ijb) < v {
      +						if IjBuf == nil {
      +							IjBuf = make([]byte, v)
      +						}
      +						bytesShort := v - len(Ijb)
      +						for i := 0; i < bytesShort; i++ {
      +							IjBuf[i] = 0
      +						}
      +						copy(IjBuf[bytesShort:], Ijb)
      +						Ijb = IjBuf
      +					}
      +					copy(I[j*v:(j+1)*v], Ijb)
      +				}
      +			}
      +		}
      +	}
      +	//    7.  Concatenate A_1, A_2, ..., A_c together to form a pseudorandom
      +	//        bit string, A.
      +
      +	//    8.  Use the first n bits of A as the output of this entire process.
      +	return A[:size]
      +
      +	//    If the above process is being used to generate a DES key, the process
      +	//    should be used to create 64 random bits, and the key's parity bits
      +	//    should be set after the 64 bits have been produced.  Similar concerns
      +	//    hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any
      +	//    similar keys with parity bits "built into them".
      +}
      diff --git a/vendor/golang.org/x/crypto/pkcs12/pbkdf_test.go b/vendor/golang.org/x/crypto/pkcs12/pbkdf_test.go
      new file mode 100644
      index 00000000..262037d7
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/pkcs12/pbkdf_test.go
      @@ -0,0 +1,34 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package pkcs12
      +
      +import (
      +	"bytes"
      +	"testing"
      +)
      +
      +func TestThatPBKDFWorksCorrectlyForLongKeys(t *testing.T) {
      +	cipherInfo := shaWithTripleDESCBC{}
      +
      +	salt := []byte("\xff\xff\xff\xff\xff\xff\xff\xff")
      +	password, _ := bmpString("sesame")
      +	key := cipherInfo.deriveKey(salt, password, 2048)
      +
      +	if expected := []byte("\x7c\xd9\xfd\x3e\x2b\x3b\xe7\x69\x1a\x44\xe3\xbe\xf0\xf9\xea\x0f\xb9\xb8\x97\xd4\xe3\x25\xd9\xd1"); bytes.Compare(key, expected) != 0 {
      +		t.Fatalf("expected key '%x', but found '%x'", expected, key)
      +	}
      +}
      +
      +func TestThatPBKDFHandlesLeadingZeros(t *testing.T) {
      +	// This test triggers a case where I_j (in step 6C) ends up with leading zero
      +	// byte, meaning that len(Ijb) < v (leading zeros get stripped by big.Int).
      +	// This was previously causing bug whereby certain inputs would break the
      +	// derivation and produce the wrong output.
      +	key := pbkdf(sha1Sum, 20, 64, []byte("\xf3\x7e\x05\xb5\x18\x32\x4b\x4b"), []byte("\x00\x00"), 2048, 1, 24)
      +	expected := []byte("\x00\xf7\x59\xff\x47\xd1\x4d\xd0\x36\x65\xd5\x94\x3c\xb3\xc4\xa3\x9a\x25\x55\xc0\x2a\xed\x66\xe1")
      +	if bytes.Compare(key, expected) != 0 {
      +		t.Fatalf("expected key '%x', but found '%x'", expected, key)
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/pkcs12/pkcs12.go b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go
      new file mode 100644
      index 00000000..ad6341e6
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go
      @@ -0,0 +1,342 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package pkcs12 implements some of PKCS#12.
      +//
      +// This implementation is distilled from https://tools.ietf.org/html/rfc7292
      +// and referenced documents. It is intended for decoding P12/PFX-stored
      +// certificates and keys for use with the crypto/tls package.
      +package pkcs12
      +
      +import (
      +	"crypto/ecdsa"
      +	"crypto/rsa"
      +	"crypto/x509"
      +	"crypto/x509/pkix"
      +	"encoding/asn1"
      +	"encoding/hex"
      +	"encoding/pem"
      +	"errors"
      +)
      +
      +var (
      +	oidDataContentType          = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1})
      +	oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6})
      +
      +	oidFriendlyName     = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20})
      +	oidLocalKeyID       = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21})
      +	oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1})
      +)
      +
      +type pfxPdu struct {
      +	Version  int
      +	AuthSafe contentInfo
      +	MacData  macData `asn1:"optional"`
      +}
      +
      +type contentInfo struct {
      +	ContentType asn1.ObjectIdentifier
      +	Content     asn1.RawValue `asn1:"tag:0,explicit,optional"`
      +}
      +
      +type encryptedData struct {
      +	Version              int
      +	EncryptedContentInfo encryptedContentInfo
      +}
      +
      +type encryptedContentInfo struct {
      +	ContentType                asn1.ObjectIdentifier
      +	ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
      +	EncryptedContent           []byte `asn1:"tag:0,optional"`
      +}
      +
      +func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier {
      +	return i.ContentEncryptionAlgorithm
      +}
      +
      +func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent }
      +
      +type safeBag struct {
      +	Id         asn1.ObjectIdentifier
      +	Value      asn1.RawValue     `asn1:"tag:0,explicit"`
      +	Attributes []pkcs12Attribute `asn1:"set,optional"`
      +}
      +
      +type pkcs12Attribute struct {
      +	Id    asn1.ObjectIdentifier
      +	Value asn1.RawValue `asn1:"set"`
      +}
      +
      +type encryptedPrivateKeyInfo struct {
      +	AlgorithmIdentifier pkix.AlgorithmIdentifier
      +	EncryptedData       []byte
      +}
      +
      +func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier {
      +	return i.AlgorithmIdentifier
      +}
      +
      +func (i encryptedPrivateKeyInfo) Data() []byte {
      +	return i.EncryptedData
      +}
      +
      +// PEM block types
      +const (
      +	certificateType = "CERTIFICATE"
      +	privateKeyType  = "PRIVATE KEY"
      +)
      +
      +// unmarshal calls asn1.Unmarshal, but also returns an error if there is any
      +// trailing data after unmarshaling.
      +func unmarshal(in []byte, out interface{}) error {
      +	trailing, err := asn1.Unmarshal(in, out)
      +	if err != nil {
      +		return err
      +	}
      +	if len(trailing) != 0 {
      +		return errors.New("pkcs12: trailing data found")
      +	}
      +	return nil
      +}
      +
      +// ConvertToPEM converts all "safe bags" contained in pfxData to PEM blocks.
      +func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) {
      +	encodedPassword, err := bmpString(password)
      +	if err != nil {
      +		return nil, ErrIncorrectPassword
      +	}
      +
      +	bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword)
      +
      +	blocks := make([]*pem.Block, 0, len(bags))
      +	for _, bag := range bags {
      +		block, err := convertBag(&bag, encodedPassword)
      +		if err != nil {
      +			return nil, err
      +		}
      +		blocks = append(blocks, block)
      +	}
      +
      +	return blocks, nil
      +}
      +
      +func convertBag(bag *safeBag, password []byte) (*pem.Block, error) {
      +	block := &pem.Block{
      +		Headers: make(map[string]string),
      +	}
      +
      +	for _, attribute := range bag.Attributes {
      +		k, v, err := convertAttribute(&attribute)
      +		if err != nil {
      +			return nil, err
      +		}
      +		block.Headers[k] = v
      +	}
      +
      +	switch {
      +	case bag.Id.Equal(oidCertBag):
      +		block.Type = certificateType
      +		certsData, err := decodeCertBag(bag.Value.Bytes)
      +		if err != nil {
      +			return nil, err
      +		}
      +		block.Bytes = certsData
      +	case bag.Id.Equal(oidPKCS8ShroundedKeyBag):
      +		block.Type = privateKeyType
      +
      +		key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password)
      +		if err != nil {
      +			return nil, err
      +		}
      +
      +		switch key := key.(type) {
      +		case *rsa.PrivateKey:
      +			block.Bytes = x509.MarshalPKCS1PrivateKey(key)
      +		case *ecdsa.PrivateKey:
      +			block.Bytes, err = x509.MarshalECPrivateKey(key)
      +			if err != nil {
      +				return nil, err
      +			}
      +		default:
      +			return nil, errors.New("found unknown private key type in PKCS#8 wrapping")
      +		}
      +	default:
      +		return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String())
      +	}
      +	return block, nil
      +}
      +
      +func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) {
      +	isString := false
      +
      +	switch {
      +	case attribute.Id.Equal(oidFriendlyName):
      +		key = "friendlyName"
      +		isString = true
      +	case attribute.Id.Equal(oidLocalKeyID):
      +		key = "localKeyId"
      +	case attribute.Id.Equal(oidMicrosoftCSPName):
      +		// This key is chosen to match OpenSSL.
      +		key = "Microsoft CSP Name"
      +		isString = true
      +	default:
      +		return "", "", errors.New("pkcs12: unknown attribute with OID " + attribute.Id.String())
      +	}
      +
      +	if isString {
      +		if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil {
      +			return "", "", err
      +		}
      +		if value, err = decodeBMPString(attribute.Value.Bytes); err != nil {
      +			return "", "", err
      +		}
      +	} else {
      +		var id []byte
      +		if err := unmarshal(attribute.Value.Bytes, &id); err != nil {
      +			return "", "", err
      +		}
      +		value = hex.EncodeToString(id)
      +	}
      +
      +	return key, value, nil
      +}
      +
      +// Decode extracts a certificate and private key from pfxData. This function
      +// assumes that there is only one certificate and only one private key in the
      +// pfxData.
      +func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) {
      +	encodedPassword, err := bmpString(password)
      +	if err != nil {
      +		return nil, nil, err
      +	}
      +
      +	bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword)
      +	if err != nil {
      +		return nil, nil, err
      +	}
      +
      +	if len(bags) != 2 {
      +		err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU")
      +		return
      +	}
      +
      +	for _, bag := range bags {
      +		switch {
      +		case bag.Id.Equal(oidCertBag):
      +			if certificate != nil {
      +				err = errors.New("pkcs12: expected exactly one certificate bag")
      +			}
      +
      +			certsData, err := decodeCertBag(bag.Value.Bytes)
      +			if err != nil {
      +				return nil, nil, err
      +			}
      +			certs, err := x509.ParseCertificates(certsData)
      +			if err != nil {
      +				return nil, nil, err
      +			}
      +			if len(certs) != 1 {
      +				err = errors.New("pkcs12: expected exactly one certificate in the certBag")
      +				return nil, nil, err
      +			}
      +			certificate = certs[0]
      +
      +		case bag.Id.Equal(oidPKCS8ShroundedKeyBag):
      +			if privateKey != nil {
      +				err = errors.New("pkcs12: expected exactly one key bag")
      +			}
      +
      +			if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil {
      +				return nil, nil, err
      +			}
      +		}
      +	}
      +
      +	if certificate == nil {
      +		return nil, nil, errors.New("pkcs12: certificate missing")
      +	}
      +	if privateKey == nil {
      +		return nil, nil, errors.New("pkcs12: private key missing")
      +	}
      +
      +	return
      +}
      +
      +func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) {
      +	pfx := new(pfxPdu)
      +	if err := unmarshal(p12Data, pfx); err != nil {
      +		return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error())
      +	}
      +
      +	if pfx.Version != 3 {
      +		return nil, nil, NotImplementedError("can only decode v3 PFX PDU's")
      +	}
      +
      +	if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) {
      +		return nil, nil, NotImplementedError("only password-protected PFX is implemented")
      +	}
      +
      +	// unmarshal the explicit bytes in the content for type 'data'
      +	if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil {
      +		return nil, nil, err
      +	}
      +
      +	if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 {
      +		return nil, nil, errors.New("pkcs12: no MAC in data")
      +	}
      +
      +	if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil {
      +		if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 {
      +			// some implementations use an empty byte array
      +			// for the empty string password try one more
      +			// time with empty-empty password
      +			password = nil
      +			err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password)
      +		}
      +		if err != nil {
      +			return nil, nil, err
      +		}
      +	}
      +
      +	var authenticatedSafe []contentInfo
      +	if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil {
      +		return nil, nil, err
      +	}
      +
      +	if len(authenticatedSafe) != 2 {
      +		return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe")
      +	}
      +
      +	for _, ci := range authenticatedSafe {
      +		var data []byte
      +
      +		switch {
      +		case ci.ContentType.Equal(oidDataContentType):
      +			if err := unmarshal(ci.Content.Bytes, &data); err != nil {
      +				return nil, nil, err
      +			}
      +		case ci.ContentType.Equal(oidEncryptedDataContentType):
      +			var encryptedData encryptedData
      +			if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil {
      +				return nil, nil, err
      +			}
      +			if encryptedData.Version != 0 {
      +				return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported")
      +			}
      +			if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil {
      +				return nil, nil, err
      +			}
      +		default:
      +			return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe")
      +		}
      +
      +		var safeContents []safeBag
      +		if err := unmarshal(data, &safeContents); err != nil {
      +			return nil, nil, err
      +		}
      +		bags = append(bags, safeContents...)
      +	}
      +
      +	return bags, password, nil
      +}
      diff --git a/vendor/golang.org/x/crypto/pkcs12/pkcs12_test.go b/vendor/golang.org/x/crypto/pkcs12/pkcs12_test.go
      new file mode 100644
      index 00000000..14dd2a6c
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/pkcs12/pkcs12_test.go
      @@ -0,0 +1,138 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package pkcs12
      +
      +import (
      +	"crypto/rsa"
      +	"crypto/tls"
      +	"encoding/base64"
      +	"encoding/pem"
      +	"testing"
      +)
      +
      +func TestPfx(t *testing.T) {
      +	for commonName, base64P12 := range testdata {
      +		p12, _ := base64.StdEncoding.DecodeString(base64P12)
      +
      +		priv, cert, err := Decode(p12, "")
      +		if err != nil {
      +			t.Fatal(err)
      +		}
      +
      +		if err := priv.(*rsa.PrivateKey).Validate(); err != nil {
      +			t.Errorf("error while validating private key: %v", err)
      +		}
      +
      +		if cert.Subject.CommonName != commonName {
      +			t.Errorf("expected common name to be %q, but found %q", commonName, cert.Subject.CommonName)
      +		}
      +	}
      +}
      +
      +func TestPEM(t *testing.T) {
      +	for commonName, base64P12 := range testdata {
      +		p12, _ := base64.StdEncoding.DecodeString(base64P12)
      +
      +		blocks, err := ToPEM(p12, "")
      +		if err != nil {
      +			t.Fatalf("error while converting to PEM: %s", err)
      +		}
      +
      +		var pemData []byte
      +		for _, b := range blocks {
      +			pemData = append(pemData, pem.EncodeToMemory(b)...)
      +		}
      +
      +		cert, err := tls.X509KeyPair(pemData, pemData)
      +		if err != nil {
      +			t.Errorf("err while converting to key pair: %v", err)
      +		}
      +		config := tls.Config{
      +			Certificates: []tls.Certificate{cert},
      +		}
      +		config.BuildNameToCertificate()
      +
      +		if _, exists := config.NameToCertificate[commonName]; !exists {
      +			t.Errorf("did not find our cert in PEM?: %v", config.NameToCertificate)
      +		}
      +	}
      +}
      +
      +func ExampleToPEM() {
      +	p12, _ := base64.StdEncoding.DecodeString(`MIIJzgIBAzCCCZQGCS ... CA+gwggPk==`)
      +
      +	blocks, err := ToPEM(p12, "password")
      +	if err != nil {
      +		panic(err)
      +	}
      +
      +	var pemData []byte
      +	for _, b := range blocks {
      +		pemData = append(pemData, pem.EncodeToMemory(b)...)
      +	}
      +
      +	// then use PEM data for tls to construct tls certificate:
      +	cert, err := tls.X509KeyPair(pemData, pemData)
      +	if err != nil {
      +		panic(err)
      +	}
      +
      +	config := &tls.Config{
      +		Certificates: []tls.Certificate{cert},
      +	}
      +
      +	_ = config
      +}
      +
      +var testdata = map[string]string{
      +	// 'null' password test case
      +	"Windows Azure Tools": `MIIKDAIBAzCCCcwGCSqGSIb3DQEHAaCCCb0Eggm5MIIJtTCCBe4GCSqGSIb3DQEHAaCCBd8EggXbMIIF1zCCBdMGCyqGSIb3DQEMCgECoIIE7jCCBOowHAYKKoZIhvcNAQwBAzAOBAhStUNnlTGV+gICB9AEggTIJ81JIossF6boFWpPtkiQRPtI6DW6e9QD4/WvHAVrM2bKdpMzSMsCML5NyuddANTKHBVq00Jc9keqGNAqJPKkjhSUebzQFyhe0E1oI9T4zY5UKr/I8JclOeccH4QQnsySzYUG2SnniXnQ+JrG3juetli7EKth9h6jLc6xbubPadY5HMB3wL/eG/kJymiXwU2KQ9Mgd4X6jbcV+NNCE/8jbZHvSTCPeYTJIjxfeX61Sj5kFKUCzERbsnpyevhY3X0eYtEDezZQarvGmXtMMdzf8HJHkWRdk9VLDLgjk8uiJif/+X4FohZ37ig0CpgC2+dP4DGugaZZ51hb8tN9GeCKIsrmWogMXDIVd0OACBp/EjJVmFB6y0kUCXxUE0TZt0XA1tjAGJcjDUpBvTntZjPsnH/4ZySy+s2d9OOhJ6pzRQBRm360TzkFdSwk9DLiLdGfv4pwMMu/vNGBlqjP/1sQtj+jprJiD1sDbCl4AdQZVoMBQHadF2uSD4/o17XG/Ci0r2h6Htc2yvZMAbEY4zMjjIn2a+vqIxD6onexaek1R3zbkS9j19D6EN9EWn8xgz80YRCyW65znZk8xaIhhvlU/mg7sTxeyuqroBZNcq6uDaQTehDpyH7bY2l4zWRpoj10a6JfH2q5shYz8Y6UZC/kOTfuGqbZDNZWro/9pYquvNNW0M847E5t9bsf9VkAAMHRGBbWoVoU9VpI0UnoXSfvpOo+aXa2DSq5sHHUTVY7A9eov3z5IqT+pligx11xcs+YhDWcU8di3BTJisohKvv5Y8WSkm/rloiZd4ig269k0jTRk1olP/vCksPli4wKG2wdsd5o42nX1yL7mFfXocOANZbB+5qMkiwdyoQSk+Vq+C8nAZx2bbKhUq2MbrORGMzOe0Hh0x2a0PeObycN1Bpyv7Mp3ZI9h5hBnONKCnqMhtyQHUj/nNvbJUnDVYNfoOEqDiEqqEwB7YqWzAKz8KW0OIqdlM8uiQ4JqZZlFllnWJUfaiDrdFM3lYSnFQBkzeVlts6GpDOOBjCYd7dcCNS6kq6pZC6p6HN60Twu0JnurZD6RT7rrPkIGE8vAenFt4iGe/yF52fahCSY8Ws4K0UTwN7bAS+4xRHVCWvE8sMRZsRCHizb5laYsVrPZJhE6+hux6OBb6w8kwPYXc+ud5v6UxawUWgt6uPwl8mlAtU9Z7Miw4Nn/wtBkiLL/ke1UI1gqJtcQXgHxx6mzsjh41+nAgTvdbsSEyU6vfOmxGj3Rwc1eOrIhJUqn5YjOWfzzsz/D5DzWKmwXIwdspt1p+u+kol1N3f2wT9fKPnd/RGCb4g/1hc3Aju4DQYgGY782l89CEEdalpQ/35bQczMFk6Fje12HykakWEXd/bGm9Unh82gH84USiRpeOfQvBDYoqEyrY3zkFZzBjhDqa+jEcAj41tcGx47oSfDq3iVYCdL7HSIjtnyEktVXd7mISZLoMt20JACFcMw+mrbjlug+eU7o2GR7T+LwtOp/p4LZqyLa7oQJDwde1BNZtm3TCK2P1mW94QDL0nDUps5KLtr1DaZXEkRbjSJub2ZE9WqDHyU3KA8G84Tq/rN1IoNu/if45jacyPje1Npj9IftUZSP22nV7HMwZtwQ4P4MYHRMBMGCSqGSIb3DQEJFTEGBAQBAAAAMFsGCSqGSIb3DQEJFDFOHkwAewBCADQAQQA0AEYARQBCADAALQBBADEAOABBAC0ANAA0AEIAQgAtAEIANQBGADIALQA0ADkAMQBFAEYAMQA1ADIAQgBBADEANgB9MF0GCSsGAQQBgjcRATFQHk4ATQBpAGMAcgBvAHMAbwBmAHQAIABTAG8AZgB0AHcAYQByAGUAIABLAGUAeQAgAFMAdABvAHIAYQBnAGUAIABQAHIAbwB2AGkAZABlAHIwggO/BgkqhkiG9w0BBwagggOwMIIDrAIBADCCA6UGCSqGSIb3DQEHATAcBgoqhkiG9w0BDAEGMA4ECEBk5ZAYpu0WAgIH0ICCA3hik4mQFGpw9Ha8TQPtk+j2jwWdxfF0+sTk6S8PTsEfIhB7wPltjiCK92Uv2tCBQnodBUmatIfkpnRDEySmgmdglmOCzj204lWAMRs94PoALGn3JVBXbO1vIDCbAPOZ7Z0Hd0/1t2hmk8v3//QJGUg+qr59/4y/MuVfIg4qfkPcC2QSvYWcK3oTf6SFi5rv9B1IOWFgN5D0+C+x/9Lb/myPYX+rbOHrwtJ4W1fWKoz9g7wwmGFA9IJ2DYGuH8ifVFbDFT1Vcgsvs8arSX7oBsJVW0qrP7XkuDRe3EqCmKW7rBEwYrFznhxZcRDEpMwbFoSvgSIZ4XhFY9VKYglT+JpNH5iDceYEBOQL4vBLpxNUk3l5jKaBNxVa14AIBxq18bVHJ+STInhLhad4u10v/Xbx7wIL3f9DX1yLAkPrpBYbNHS2/ew6H/ySDJnoIDxkw2zZ4qJ+qUJZ1S0lbZVG+VT0OP5uF6tyOSpbMlcGkdl3z254n6MlCrTifcwkzscysDsgKXaYQw06rzrPW6RDub+t+hXzGny799fS9jhQMLDmOggaQ7+LA4oEZsfT89HLMWxJYDqjo3gIfjciV2mV54R684qLDS+AO09U49e6yEbwGlq8lpmO/pbXCbpGbB1b3EomcQbxdWxW2WEkkEd/VBn81K4M3obmywwXJkw+tPXDXfBmzzaqqCR+onMQ5ME1nMkY8ybnfoCc1bDIupjVWsEL2Wvq752RgI6KqzVNr1ew1IdqV5AWN2fOfek+0vi3Jd9FHF3hx8JMwjJL9dZsETV5kHtYJtE7wJ23J68BnCt2eI0GEuwXcCf5EdSKN/xXCTlIokc4Qk/gzRdIZsvcEJ6B1lGovKG54X4IohikqTjiepjbsMWj38yxDmK3mtENZ9ci8FPfbbvIEcOCZIinuY3qFUlRSbx7VUerEoV1IP3clUwexVQo4lHFee2jd7ocWsdSqSapW7OWUupBtDzRkqVhE7tGria+i1W2d6YLlJ21QTjyapWJehAMO637OdbJCCzDs1cXbodRRE7bsP492ocJy8OX66rKdhYbg8srSFNKdb3pF3UDNbN9jhI/t8iagRhNBhlQtTr1me2E/c86Q18qcRXl4bcXTt6acgCeffK6Y26LcVlrgjlD33AEYRRUeyC+rpxbT0aMjdFderlndKRIyG23mSp0HaUwNzAfMAcGBSsOAwIaBBRlviCbIyRrhIysg2dc/KbLFTc2vQQUg4rfwHMM4IKYRD/fsd1x6dda+wQ=`,
      +	// empty string password test case
      +	"testing@example.com": `MIIJzgIBAzCCCZQGCSqGSIb3DQEHAaCCCYUEggmBMIIJfTCCA/cGCSqGSIb3DQEHBqCCA+gwggPk
      +AgEAMIID3QYJKoZIhvcNAQcBMBwGCiqGSIb3DQEMAQYwDgQIIszfRGqcmPcCAggAgIIDsOZ9Eg1L
      +s5Wx8JhYoV3HAL4aRnkAWvTYB5NISZOgSgIQTssmt/3A7134dibTmaT/93LikkL3cTKLnQzJ4wDf
      +YZ1bprpVJvUqz+HFT79m27bP9zYXFrvxWBJbxjYKTSjQMgz+h8LAEpXXGajCmxMJ1oCOtdXkhhzc
      +LdZN6SAYgtmtyFnCdMEDskSggGuLb3fw84QEJ/Sj6FAULXunW/CPaS7Ce0TMsKmNU/jfFWj3yXXw
      +ro0kwjKiVLpVFlnBlHo2OoVU7hmkm59YpGhLgS7nxLD3n7nBroQ0ID1+8R01NnV9XLGoGzxMm1te
      +6UyTCkr5mj+kEQ8EP1Ys7g/TC411uhVWySMt/rcpkx7Vz1r9kYEAzJpONAfr6cuEVkPKrxpq4Fh0
      +2fzlKBky0i/hrfIEUmngh+ERHUb/Mtv/fkv1j5w9suESbhsMLLiCXAlsP1UWMX+3bNizi3WVMEts
      +FM2k9byn+p8IUD/A8ULlE4kEaWeoc+2idkCNQkLGuIdGUXUFVm58se0auUkVRoRJx8x4CkMesT8j
      +b1H831W66YRWoEwwDQp2kK1lA2vQXxdVHWlFevMNxJeromLzj3ayiaFrfByeUXhR2S+Hpm+c0yNR
      +4UVU9WED2kacsZcpRm9nlEa5sr28mri5JdBrNa/K02OOhvKCxr5ZGmbOVzUQKla2z4w+Ku9k8POm
      +dfDNU/fGx1b5hcFWtghXe3msWVsSJrQihnN6q1ughzNiYZlJUGcHdZDRtiWwCFI0bR8h/Dmg9uO9
      +4rawQQrjIRT7B8yF3UbkZyAqs8Ppb1TsMeNPHh1rxEfGVQknh/48ouJYsmtbnzugTUt3mJCXXiL+
      +XcPMV6bBVAUu4aaVKSmg9+yJtY4/VKv10iw88ktv29fViIdBe3t6l/oPuvQgbQ8dqf4T8w0l/uKZ
      +9lS1Na9jfT1vCoS7F5TRi+tmyj1vL5kr/amEIW6xKEP6oeAMvCMtbPAzVEj38zdJ1R22FfuIBxkh
      +f0Zl7pdVbmzRxl/SBx9iIBJSqAvcXItiT0FIj8HxQ+0iZKqMQMiBuNWJf5pYOLWGrIyntCWwHuaQ
      +wrx0sTGuEL9YXLEAsBDrsvzLkx/56E4INGZFrH8G7HBdW6iGqb22IMI4GHltYSyBRKbB0gadYTyv
      +abPEoqww8o7/85aPSzOTJ/53ozD438Q+d0u9SyDuOb60SzCD/zPuCEd78YgtXJwBYTuUNRT27FaM
      +3LGMX8Hz+6yPNRnmnA2XKPn7dx/IlaqAjIs8MIIFfgYJKoZIhvcNAQcBoIIFbwSCBWswggVnMIIF
      +YwYLKoZIhvcNAQwKAQKgggTuMIIE6jAcBgoqhkiG9w0BDAEDMA4ECJr0cClYqOlcAgIIAASCBMhe
      +OQSiP2s0/46ONXcNeVAkz2ksW3u/+qorhSiskGZ0b3dFa1hhgBU2Q7JVIkc4Hf7OXaT1eVQ8oqND
      +uhqsNz83/kqYo70+LS8Hocj49jFgWAKrf/yQkdyP1daHa2yzlEw4mkpqOfnIORQHvYCa8nEApspZ
      +wVu8y6WVuLHKU67mel7db2xwstQp7PRuSAYqGjTfAylElog8ASdaqqYbYIrCXucF8iF9oVgmb/Qo
      +xrXshJ9aSLO4MuXlTPELmWgj07AXKSb90FKNihE+y0bWb9LPVFY1Sly3AX9PfrtkSXIZwqW3phpv
      +MxGxQl/R6mr1z+hlTfY9Wdpb5vlKXPKA0L0Rt8d2pOesylFi6esJoS01QgP1kJILjbrV731kvDc0
      +Jsd+Oxv4BMwA7ClG8w1EAOInc/GrV1MWFGw/HeEqj3CZ/l/0jv9bwkbVeVCiIhoL6P6lVx9pXq4t
      +KZ0uKg/tk5TVJmG2vLcMLvezD0Yk3G2ZOMrywtmskrwoF7oAUpO9e87szoH6fEvUZlkDkPVW1NV4
      +cZk3DBSQiuA3VOOg8qbo/tx/EE3H59P0axZWno2GSB0wFPWd1aj+b//tJEJHaaNR6qPRj4IWj9ru
      +Qbc8eRAcVWleHg8uAehSvUXlFpyMQREyrnpvMGddpiTC8N4UMrrBRhV7+UbCOWhxPCbItnInBqgl
      +1JpSZIP7iUtsIMdu3fEC2cdbXMTRul+4rdzUR7F9OaezV3jjvcAbDvgbK1CpyC+MJ1Mxm/iTgk9V
      +iUArydhlR8OniN84GyGYoYCW9O/KUwb6ASmeFOu/msx8x6kAsSQHIkKqMKv0TUR3kZnkxUvdpBGP
      +KTl4YCTvNGX4dYALBqrAETRDhua2KVBD/kEttDHwBNVbN2xi81+Mc7ml461aADfk0c66R/m2sjHB
      +2tN9+wG12OIWFQjL6wF/UfJMYamxx2zOOExiId29Opt57uYiNVLOO4ourPewHPeH0u8Gz35aero7
      +lkt7cZAe1Q0038JUuE/QGlnK4lESK9UkSIQAjSaAlTsrcfwtQxB2EjoOoLhwH5mvxUEmcNGNnXUc
      +9xj3M5BD3zBz3Ft7G3YMMDwB1+zC2l+0UG0MGVjMVaeoy32VVNvxgX7jk22OXG1iaOB+PY9kdk+O
      +X+52BGSf/rD6X0EnqY7XuRPkMGgjtpZeAYxRQnFtCZgDY4wYheuxqSSpdF49yNczSPLkgB3CeCfS
      ++9NTKN7aC6hBbmW/8yYh6OvSiCEwY0lFS/T+7iaVxr1loE4zI1y/FFp4Pe1qfLlLttVlkygga2UU
      +SCunTQ8UB/M5IXWKkhMOO11dP4niWwb39Y7pCWpau7mwbXOKfRPX96cgHnQJK5uG+BesDD1oYnX0
      +6frN7FOnTSHKruRIwuI8KnOQ/I+owmyz71wiv5LMQt+yM47UrEjB/EZa5X8dpEwOZvkdqL7utcyo
      +l0XH5kWMXdW856LL/FYftAqJIDAmtX1TXF/rbP6mPyN/IlDC0gjP84Uzd/a2UyTIWr+wk49Ek3vQ
      +/uDamq6QrwAxVmNh5Tset5Vhpc1e1kb7mRMZIzxSP8JcTuYd45oFKi98I8YjvueHVZce1g7OudQP
      +SbFQoJvdT46iBg1TTatlltpOiH2mFaxWVS0xYjAjBgkqhkiG9w0BCRUxFgQUdA9eVqvETX4an/c8
      +p8SsTugkit8wOwYJKoZIhvcNAQkUMS4eLABGAHIAaQBlAG4AZABsAHkAIABuAGEAbQBlACAAZgBv
      +AHIAIABjAGUAcgB0MDEwITAJBgUrDgMCGgUABBRFsNz3Zd1O1GI8GTuFwCWuDOjEEwQIuBEfIcAy
      +HQ8CAggA`,
      +}
      diff --git a/vendor/golang.org/x/crypto/pkcs12/safebags.go b/vendor/golang.org/x/crypto/pkcs12/safebags.go
      new file mode 100644
      index 00000000..def1f7b9
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/pkcs12/safebags.go
      @@ -0,0 +1,57 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package pkcs12
      +
      +import (
      +	"crypto/x509"
      +	"encoding/asn1"
      +	"errors"
      +)
      +
      +var (
      +	// see https://tools.ietf.org/html/rfc7292#appendix-D
      +	oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1})
      +	oidPKCS8ShroundedKeyBag    = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2})
      +	oidCertBag                 = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3})
      +)
      +
      +type certBag struct {
      +	Id   asn1.ObjectIdentifier
      +	Data []byte `asn1:"tag:0,explicit"`
      +}
      +
      +func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) {
      +	pkinfo := new(encryptedPrivateKeyInfo)
      +	if err = unmarshal(asn1Data, pkinfo); err != nil {
      +		return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error())
      +	}
      +
      +	pkData, err := pbDecrypt(pkinfo, password)
      +	if err != nil {
      +		return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error())
      +	}
      +
      +	ret := new(asn1.RawValue)
      +	if err = unmarshal(pkData, ret); err != nil {
      +		return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error())
      +	}
      +
      +	if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil {
      +		return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error())
      +	}
      +
      +	return privateKey, nil
      +}
      +
      +func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) {
      +	bag := new(certBag)
      +	if err := unmarshal(asn1Data, bag); err != nil {
      +		return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error())
      +	}
      +	if !bag.Id.Equal(oidCertTypeX509Certificate) {
      +		return nil, NotImplementedError("only X509 certificates are supported")
      +	}
      +	return bag.Data, nil
      +}
      diff --git a/vendor/golang.org/x/crypto/poly1305/const_amd64.s b/vendor/golang.org/x/crypto/poly1305/const_amd64.s
      new file mode 100644
      index 00000000..8e861f33
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/poly1305/const_amd64.s
      @@ -0,0 +1,45 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// This code was translated into a form compatible with 6a from the public
      +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
      +
      +// +build amd64,!gccgo,!appengine
      +
      +DATA ·SCALE(SB)/8, $0x37F4000000000000
      +GLOBL ·SCALE(SB), 8, $8
      +DATA ·TWO32(SB)/8, $0x41F0000000000000
      +GLOBL ·TWO32(SB), 8, $8
      +DATA ·TWO64(SB)/8, $0x43F0000000000000
      +GLOBL ·TWO64(SB), 8, $8
      +DATA ·TWO96(SB)/8, $0x45F0000000000000
      +GLOBL ·TWO96(SB), 8, $8
      +DATA ·ALPHA32(SB)/8, $0x45E8000000000000
      +GLOBL ·ALPHA32(SB), 8, $8
      +DATA ·ALPHA64(SB)/8, $0x47E8000000000000
      +GLOBL ·ALPHA64(SB), 8, $8
      +DATA ·ALPHA96(SB)/8, $0x49E8000000000000
      +GLOBL ·ALPHA96(SB), 8, $8
      +DATA ·ALPHA130(SB)/8, $0x4C08000000000000
      +GLOBL ·ALPHA130(SB), 8, $8
      +DATA ·DOFFSET0(SB)/8, $0x4330000000000000
      +GLOBL ·DOFFSET0(SB), 8, $8
      +DATA ·DOFFSET1(SB)/8, $0x4530000000000000
      +GLOBL ·DOFFSET1(SB), 8, $8
      +DATA ·DOFFSET2(SB)/8, $0x4730000000000000
      +GLOBL ·DOFFSET2(SB), 8, $8
      +DATA ·DOFFSET3(SB)/8, $0x4930000000000000
      +GLOBL ·DOFFSET3(SB), 8, $8
      +DATA ·DOFFSET3MINUSTWO128(SB)/8, $0x492FFFFE00000000
      +GLOBL ·DOFFSET3MINUSTWO128(SB), 8, $8
      +DATA ·HOFFSET0(SB)/8, $0x43300001FFFFFFFB
      +GLOBL ·HOFFSET0(SB), 8, $8
      +DATA ·HOFFSET1(SB)/8, $0x45300001FFFFFFFE
      +GLOBL ·HOFFSET1(SB), 8, $8
      +DATA ·HOFFSET2(SB)/8, $0x47300001FFFFFFFE
      +GLOBL ·HOFFSET2(SB), 8, $8
      +DATA ·HOFFSET3(SB)/8, $0x49300003FFFFFFFE
      +GLOBL ·HOFFSET3(SB), 8, $8
      +DATA ·ROUNDING(SB)/2, $0x137f
      +GLOBL ·ROUNDING(SB), 8, $2
      diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go
      new file mode 100644
      index 00000000..4a5f826f
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/poly1305/poly1305.go
      @@ -0,0 +1,32 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +/*
      +Package poly1305 implements Poly1305 one-time message authentication code as specified in http://cr.yp.to/mac/poly1305-20050329.pdf.
      +
      +Poly1305 is a fast, one-time authentication function. It is infeasible for an
      +attacker to generate an authenticator for a message without the key. However, a
      +key must only be used for a single message. Authenticating two different
      +messages with the same key allows an attacker to forge authenticators for other
      +messages with the same key.
      +
      +Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was
      +used with a fixed key in order to generate one-time keys from an nonce.
      +However, in this package AES isn't used and the one-time key is specified
      +directly.
      +*/
      +package poly1305 // import "golang.org/x/crypto/poly1305"
      +
      +import "crypto/subtle"
      +
      +// TagSize is the size, in bytes, of a poly1305 authenticator.
      +const TagSize = 16
      +
      +// Verify returns true if mac is a valid authenticator for m with the given
      +// key.
      +func Verify(mac *[16]byte, m []byte, key *[32]byte) bool {
      +	var tmp [16]byte
      +	Sum(&tmp, m, key)
      +	return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1
      +}
      diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305_amd64.s b/vendor/golang.org/x/crypto/poly1305/poly1305_amd64.s
      new file mode 100644
      index 00000000..f8d4ee92
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/poly1305/poly1305_amd64.s
      @@ -0,0 +1,497 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// This code was translated into a form compatible with 6a from the public
      +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
      +
      +// +build amd64,!gccgo,!appengine
      +
      +// func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]key)
      +TEXT ·poly1305(SB),0,$224-32
      +	MOVQ out+0(FP),DI
      +	MOVQ m+8(FP),SI
      +	MOVQ mlen+16(FP),DX
      +	MOVQ key+24(FP),CX
      +
      +	MOVQ SP,R11
      +	MOVQ $31,R9
      +	NOTQ R9
      +	ANDQ R9,SP
      +	ADDQ $32,SP
      +
      +	MOVQ R11,32(SP)
      +	MOVQ R12,40(SP)
      +	MOVQ R13,48(SP)
      +	MOVQ R14,56(SP)
      +	MOVQ R15,64(SP)
      +	MOVQ BX,72(SP)
      +	MOVQ BP,80(SP)
      +	FLDCW ·ROUNDING(SB)
      +	MOVL 0(CX),R8
      +	MOVL 4(CX),R9
      +	MOVL 8(CX),AX
      +	MOVL 12(CX),R10
      +	MOVQ DI,88(SP)
      +	MOVQ CX,96(SP)
      +	MOVL $0X43300000,108(SP)
      +	MOVL $0X45300000,116(SP)
      +	MOVL $0X47300000,124(SP)
      +	MOVL $0X49300000,132(SP)
      +	ANDL $0X0FFFFFFF,R8
      +	ANDL $0X0FFFFFFC,R9
      +	ANDL $0X0FFFFFFC,AX
      +	ANDL $0X0FFFFFFC,R10
      +	MOVL R8,104(SP)
      +	MOVL R9,112(SP)
      +	MOVL AX,120(SP)
      +	MOVL R10,128(SP)
      +	FMOVD 104(SP), F0
      +	FSUBD ·DOFFSET0(SB), F0
      +	FMOVD 112(SP), F0
      +	FSUBD ·DOFFSET1(SB), F0
      +	FMOVD 120(SP), F0
      +	FSUBD ·DOFFSET2(SB), F0
      +	FMOVD 128(SP), F0
      +	FSUBD ·DOFFSET3(SB), F0
      +	FXCHD F0, F3
      +	FMOVDP F0, 136(SP)
      +	FXCHD F0, F1
      +	FMOVD F0, 144(SP)
      +	FMULD ·SCALE(SB), F0
      +	FMOVDP F0, 152(SP)
      +	FMOVD F0, 160(SP)
      +	FMULD ·SCALE(SB), F0
      +	FMOVDP F0, 168(SP)
      +	FMOVD F0, 176(SP)
      +	FMULD ·SCALE(SB), F0
      +	FMOVDP F0, 184(SP)
      +	FLDZ
      +	FLDZ
      +	FLDZ
      +	FLDZ
      +	CMPQ DX,$16
      +	JB ADDATMOST15BYTES
      +	INITIALATLEAST16BYTES:
      +	MOVL 12(SI),DI
      +	MOVL 8(SI),CX
      +	MOVL 4(SI),R8
      +	MOVL 0(SI),R9
      +	MOVL DI,128(SP)
      +	MOVL CX,120(SP)
      +	MOVL R8,112(SP)
      +	MOVL R9,104(SP)
      +	ADDQ $16,SI
      +	SUBQ $16,DX
      +	FXCHD F0, F3
      +	FADDD 128(SP), F0
      +	FSUBD ·DOFFSET3MINUSTWO128(SB), F0
      +	FXCHD F0, F1
      +	FADDD 112(SP), F0
      +	FSUBD ·DOFFSET1(SB), F0
      +	FXCHD F0, F2
      +	FADDD 120(SP), F0
      +	FSUBD ·DOFFSET2(SB), F0
      +	FXCHD F0, F3
      +	FADDD 104(SP), F0
      +	FSUBD ·DOFFSET0(SB), F0
      +	CMPQ DX,$16
      +	JB MULTIPLYADDATMOST15BYTES
      +	MULTIPLYADDATLEAST16BYTES:
      +	MOVL 12(SI),DI
      +	MOVL 8(SI),CX
      +	MOVL 4(SI),R8
      +	MOVL 0(SI),R9
      +	MOVL DI,128(SP)
      +	MOVL CX,120(SP)
      +	MOVL R8,112(SP)
      +	MOVL R9,104(SP)
      +	ADDQ $16,SI
      +	SUBQ $16,DX
      +	FMOVD ·ALPHA130(SB), F0
      +	FADDD F2,F0
      +	FSUBD ·ALPHA130(SB), F0
      +	FSUBD F0,F2
      +	FMULD ·SCALE(SB), F0
      +	FMOVD ·ALPHA32(SB), F0
      +	FADDD F2,F0
      +	FSUBD ·ALPHA32(SB), F0
      +	FSUBD F0,F2
      +	FXCHD F0, F2
      +	FADDDP F0,F1
      +	FMOVD ·ALPHA64(SB), F0
      +	FADDD F4,F0
      +	FSUBD ·ALPHA64(SB), F0
      +	FSUBD F0,F4
      +	FMOVD ·ALPHA96(SB), F0
      +	FADDD F6,F0
      +	FSUBD ·ALPHA96(SB), F0
      +	FSUBD F0,F6
      +	FXCHD F0, F6
      +	FADDDP F0,F1
      +	FXCHD F0, F3
      +	FADDDP F0,F5
      +	FXCHD F0, F3
      +	FADDDP F0,F1
      +	FMOVD 176(SP), F0
      +	FMULD F3,F0
      +	FMOVD 160(SP), F0
      +	FMULD F4,F0
      +	FMOVD 144(SP), F0
      +	FMULD F5,F0
      +	FMOVD 136(SP), F0
      +	FMULDP F0,F6
      +	FMOVD 160(SP), F0
      +	FMULD F4,F0
      +	FADDDP F0,F3
      +	FMOVD 144(SP), F0
      +	FMULD F4,F0
      +	FADDDP F0,F2
      +	FMOVD 136(SP), F0
      +	FMULD F4,F0
      +	FADDDP F0,F1
      +	FMOVD 184(SP), F0
      +	FMULDP F0,F4
      +	FXCHD F0, F3
      +	FADDDP F0,F5
      +	FMOVD 144(SP), F0
      +	FMULD F4,F0
      +	FADDDP F0,F2
      +	FMOVD 136(SP), F0
      +	FMULD F4,F0
      +	FADDDP F0,F1
      +	FMOVD 184(SP), F0
      +	FMULD F4,F0
      +	FADDDP F0,F3
      +	FMOVD 168(SP), F0
      +	FMULDP F0,F4
      +	FXCHD F0, F3
      +	FADDDP F0,F4
      +	FMOVD 136(SP), F0
      +	FMULD F5,F0
      +	FADDDP F0,F1
      +	FXCHD F0, F3
      +	FMOVD 184(SP), F0
      +	FMULD F5,F0
      +	FADDDP F0,F3
      +	FXCHD F0, F1
      +	FMOVD 168(SP), F0
      +	FMULD F5,F0
      +	FADDDP F0,F1
      +	FMOVD 152(SP), F0
      +	FMULDP F0,F5
      +	FXCHD F0, F4
      +	FADDDP F0,F1
      +	CMPQ DX,$16
      +	FXCHD F0, F2
      +	FMOVD 128(SP), F0
      +	FSUBD ·DOFFSET3MINUSTWO128(SB), F0
      +	FADDDP F0,F1
      +	FXCHD F0, F1
      +	FMOVD 120(SP), F0
      +	FSUBD ·DOFFSET2(SB), F0
      +	FADDDP F0,F1
      +	FXCHD F0, F3
      +	FMOVD 112(SP), F0
      +	FSUBD ·DOFFSET1(SB), F0
      +	FADDDP F0,F1
      +	FXCHD F0, F2
      +	FMOVD 104(SP), F0
      +	FSUBD ·DOFFSET0(SB), F0
      +	FADDDP F0,F1
      +	JAE MULTIPLYADDATLEAST16BYTES
      +	MULTIPLYADDATMOST15BYTES:
      +	FMOVD ·ALPHA130(SB), F0
      +	FADDD F2,F0
      +	FSUBD ·ALPHA130(SB), F0
      +	FSUBD F0,F2
      +	FMULD ·SCALE(SB), F0
      +	FMOVD ·ALPHA32(SB), F0
      +	FADDD F2,F0
      +	FSUBD ·ALPHA32(SB), F0
      +	FSUBD F0,F2
      +	FMOVD ·ALPHA64(SB), F0
      +	FADDD F5,F0
      +	FSUBD ·ALPHA64(SB), F0
      +	FSUBD F0,F5
      +	FMOVD ·ALPHA96(SB), F0
      +	FADDD F7,F0
      +	FSUBD ·ALPHA96(SB), F0
      +	FSUBD F0,F7
      +	FXCHD F0, F7
      +	FADDDP F0,F1
      +	FXCHD F0, F5
      +	FADDDP F0,F1
      +	FXCHD F0, F3
      +	FADDDP F0,F5
      +	FADDDP F0,F1
      +	FMOVD 176(SP), F0
      +	FMULD F1,F0
      +	FMOVD 160(SP), F0
      +	FMULD F2,F0
      +	FMOVD 144(SP), F0
      +	FMULD F3,F0
      +	FMOVD 136(SP), F0
      +	FMULDP F0,F4
      +	FMOVD 160(SP), F0
      +	FMULD F5,F0
      +	FADDDP F0,F3
      +	FMOVD 144(SP), F0
      +	FMULD F5,F0
      +	FADDDP F0,F2
      +	FMOVD 136(SP), F0
      +	FMULD F5,F0
      +	FADDDP F0,F1
      +	FMOVD 184(SP), F0
      +	FMULDP F0,F5
      +	FXCHD F0, F4
      +	FADDDP F0,F3
      +	FMOVD 144(SP), F0
      +	FMULD F5,F0
      +	FADDDP F0,F2
      +	FMOVD 136(SP), F0
      +	FMULD F5,F0
      +	FADDDP F0,F1
      +	FMOVD 184(SP), F0
      +	FMULD F5,F0
      +	FADDDP F0,F4
      +	FMOVD 168(SP), F0
      +	FMULDP F0,F5
      +	FXCHD F0, F4
      +	FADDDP F0,F2
      +	FMOVD 136(SP), F0
      +	FMULD F5,F0
      +	FADDDP F0,F1
      +	FMOVD 184(SP), F0
      +	FMULD F5,F0
      +	FADDDP F0,F4
      +	FMOVD 168(SP), F0
      +	FMULD F5,F0
      +	FADDDP F0,F3
      +	FMOVD 152(SP), F0
      +	FMULDP F0,F5
      +	FXCHD F0, F4
      +	FADDDP F0,F1
      +	ADDATMOST15BYTES:
      +	CMPQ DX,$0
      +	JE NOMOREBYTES
      +	MOVL $0,0(SP)
      +	MOVL $0, 4 (SP)
      +	MOVL $0, 8 (SP)
      +	MOVL $0, 12 (SP)
      +	LEAQ 0(SP),DI
      +	MOVQ DX,CX
      +	REP; MOVSB
      +	MOVB $1,0(DI)
      +	MOVL  12 (SP),DI
      +	MOVL  8 (SP),SI
      +	MOVL  4 (SP),DX
      +	MOVL 0(SP),CX
      +	MOVL DI,128(SP)
      +	MOVL SI,120(SP)
      +	MOVL DX,112(SP)
      +	MOVL CX,104(SP)
      +	FXCHD F0, F3
      +	FADDD 128(SP), F0
      +	FSUBD ·DOFFSET3(SB), F0
      +	FXCHD F0, F2
      +	FADDD 120(SP), F0
      +	FSUBD ·DOFFSET2(SB), F0
      +	FXCHD F0, F1
      +	FADDD 112(SP), F0
      +	FSUBD ·DOFFSET1(SB), F0
      +	FXCHD F0, F3
      +	FADDD 104(SP), F0
      +	FSUBD ·DOFFSET0(SB), F0
      +	FMOVD ·ALPHA130(SB), F0
      +	FADDD F3,F0
      +	FSUBD ·ALPHA130(SB), F0
      +	FSUBD F0,F3
      +	FMULD ·SCALE(SB), F0
      +	FMOVD ·ALPHA32(SB), F0
      +	FADDD F2,F0
      +	FSUBD ·ALPHA32(SB), F0
      +	FSUBD F0,F2
      +	FMOVD ·ALPHA64(SB), F0
      +	FADDD F6,F0
      +	FSUBD ·ALPHA64(SB), F0
      +	FSUBD F0,F6
      +	FMOVD ·ALPHA96(SB), F0
      +	FADDD F5,F0
      +	FSUBD ·ALPHA96(SB), F0
      +	FSUBD F0,F5
      +	FXCHD F0, F4
      +	FADDDP F0,F3
      +	FXCHD F0, F6
      +	FADDDP F0,F1
      +	FXCHD F0, F3
      +	FADDDP F0,F5
      +	FXCHD F0, F3
      +	FADDDP F0,F1
      +	FMOVD 176(SP), F0
      +	FMULD F3,F0
      +	FMOVD 160(SP), F0
      +	FMULD F4,F0
      +	FMOVD 144(SP), F0
      +	FMULD F5,F0
      +	FMOVD 136(SP), F0
      +	FMULDP F0,F6
      +	FMOVD 160(SP), F0
      +	FMULD F5,F0
      +	FADDDP F0,F3
      +	FMOVD 144(SP), F0
      +	FMULD F5,F0
      +	FADDDP F0,F2
      +	FMOVD 136(SP), F0
      +	FMULD F5,F0
      +	FADDDP F0,F1
      +	FMOVD 184(SP), F0
      +	FMULDP F0,F5
      +	FXCHD F0, F4
      +	FADDDP F0,F5
      +	FMOVD 144(SP), F0
      +	FMULD F6,F0
      +	FADDDP F0,F2
      +	FMOVD 136(SP), F0
      +	FMULD F6,F0
      +	FADDDP F0,F1
      +	FMOVD 184(SP), F0
      +	FMULD F6,F0
      +	FADDDP F0,F4
      +	FMOVD 168(SP), F0
      +	FMULDP F0,F6
      +	FXCHD F0, F5
      +	FADDDP F0,F4
      +	FMOVD 136(SP), F0
      +	FMULD F2,F0
      +	FADDDP F0,F1
      +	FMOVD 184(SP), F0
      +	FMULD F2,F0
      +	FADDDP F0,F5
      +	FMOVD 168(SP), F0
      +	FMULD F2,F0
      +	FADDDP F0,F3
      +	FMOVD 152(SP), F0
      +	FMULDP F0,F2
      +	FXCHD F0, F1
      +	FADDDP F0,F3
      +	FXCHD F0, F3
      +	FXCHD F0, F2
      +	NOMOREBYTES:
      +	MOVL $0,R10
      +	FMOVD ·ALPHA130(SB), F0
      +	FADDD F4,F0
      +	FSUBD ·ALPHA130(SB), F0
      +	FSUBD F0,F4
      +	FMULD ·SCALE(SB), F0
      +	FMOVD ·ALPHA32(SB), F0
      +	FADDD F2,F0
      +	FSUBD ·ALPHA32(SB), F0
      +	FSUBD F0,F2
      +	FMOVD ·ALPHA64(SB), F0
      +	FADDD F4,F0
      +	FSUBD ·ALPHA64(SB), F0
      +	FSUBD F0,F4
      +	FMOVD ·ALPHA96(SB), F0
      +	FADDD F6,F0
      +	FSUBD ·ALPHA96(SB), F0
      +	FXCHD F0, F6
      +	FSUBD F6,F0
      +	FXCHD F0, F4
      +	FADDDP F0,F3
      +	FXCHD F0, F4
      +	FADDDP F0,F1
      +	FXCHD F0, F2
      +	FADDDP F0,F3
      +	FXCHD F0, F4
      +	FADDDP F0,F3
      +	FXCHD F0, F3
      +	FADDD ·HOFFSET0(SB), F0
      +	FXCHD F0, F3
      +	FADDD ·HOFFSET1(SB), F0
      +	FXCHD F0, F1
      +	FADDD ·HOFFSET2(SB), F0
      +	FXCHD F0, F2
      +	FADDD ·HOFFSET3(SB), F0
      +	FXCHD F0, F3
      +	FMOVDP F0, 104(SP)
      +	FMOVDP F0, 112(SP)
      +	FMOVDP F0, 120(SP)
      +	FMOVDP F0, 128(SP)
      +	MOVL 108(SP),DI
      +	ANDL $63,DI
      +	MOVL 116(SP),SI
      +	ANDL $63,SI
      +	MOVL 124(SP),DX
      +	ANDL $63,DX
      +	MOVL 132(SP),CX
      +	ANDL $63,CX
      +	MOVL 112(SP),R8
      +	ADDL DI,R8
      +	MOVQ R8,112(SP)
      +	MOVL 120(SP),DI
      +	ADCL SI,DI
      +	MOVQ DI,120(SP)
      +	MOVL 128(SP),DI
      +	ADCL DX,DI
      +	MOVQ DI,128(SP)
      +	MOVL R10,DI
      +	ADCL CX,DI
      +	MOVQ DI,136(SP)
      +	MOVQ $5,DI
      +	MOVL 104(SP),SI
      +	ADDL SI,DI
      +	MOVQ DI,104(SP)
      +	MOVL R10,DI
      +	MOVQ 112(SP),DX
      +	ADCL DX,DI
      +	MOVQ DI,112(SP)
      +	MOVL R10,DI
      +	MOVQ 120(SP),CX
      +	ADCL CX,DI
      +	MOVQ DI,120(SP)
      +	MOVL R10,DI
      +	MOVQ 128(SP),R8
      +	ADCL R8,DI
      +	MOVQ DI,128(SP)
      +	MOVQ $0XFFFFFFFC,DI
      +	MOVQ 136(SP),R9
      +	ADCL R9,DI
      +	SARL $16,DI
      +	MOVQ DI,R9
      +	XORL $0XFFFFFFFF,R9
      +	ANDQ DI,SI
      +	MOVQ 104(SP),AX
      +	ANDQ R9,AX
      +	ORQ AX,SI
      +	ANDQ DI,DX
      +	MOVQ 112(SP),AX
      +	ANDQ R9,AX
      +	ORQ AX,DX
      +	ANDQ DI,CX
      +	MOVQ 120(SP),AX
      +	ANDQ R9,AX
      +	ORQ AX,CX
      +	ANDQ DI,R8
      +	MOVQ 128(SP),DI
      +	ANDQ R9,DI
      +	ORQ DI,R8
      +	MOVQ 88(SP),DI
      +	MOVQ 96(SP),R9
      +	ADDL 16(R9),SI
      +	ADCL 20(R9),DX
      +	ADCL 24(R9),CX
      +	ADCL 28(R9),R8
      +	MOVL SI,0(DI)
      +	MOVL DX,4(DI)
      +	MOVL CX,8(DI)
      +	MOVL R8,12(DI)
      +	MOVQ 32(SP),R11
      +	MOVQ 40(SP),R12
      +	MOVQ 48(SP),R13
      +	MOVQ 56(SP),R14
      +	MOVQ 64(SP),R15
      +	MOVQ 72(SP),BX
      +	MOVQ 80(SP),BP
      +	MOVQ R11,SP
      +	RET
      diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305_arm.s b/vendor/golang.org/x/crypto/poly1305/poly1305_arm.s
      new file mode 100644
      index 00000000..c1538674
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/poly1305/poly1305_arm.s
      @@ -0,0 +1,379 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// This code was translated into a form compatible with 5a from the public
      +// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305.
      +
      +// +build arm,!gccgo,!appengine
      +
      +DATA poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff
      +DATA poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03
      +DATA poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff
      +DATA poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff
      +DATA poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff
      +GLOBL poly1305_init_constants_armv6<>(SB), 8, $20
      +
      +// Warning: the linker may use R11 to synthesize certain instructions. Please
      +// take care and verify that no synthetic instructions use it.
      +
      +TEXT poly1305_init_ext_armv6<>(SB),4,$-4
      +  MOVM.DB.W [R4-R11], (R13)
      +  MOVM.IA.W (R1), [R2-R5]
      +  MOVW $poly1305_init_constants_armv6<>(SB), R7
      +  MOVW R2, R8
      +  MOVW R2>>26, R9
      +  MOVW R3>>20, g
      +  MOVW R4>>14, R11
      +  MOVW R5>>8, R12
      +  ORR R3<<6, R9, R9
      +  ORR R4<<12, g, g
      +  ORR R5<<18, R11, R11
      +  MOVM.IA (R7), [R2-R6]
      +  AND R8, R2, R2
      +  AND R9, R3, R3
      +  AND g, R4, R4
      +  AND R11, R5, R5
      +  AND R12, R6, R6
      +  MOVM.IA.W [R2-R6], (R0)
      +  EOR R2, R2, R2
      +  EOR R3, R3, R3
      +  EOR R4, R4, R4
      +  EOR R5, R5, R5
      +  EOR R6, R6, R6
      +  MOVM.IA.W [R2-R6], (R0)
      +  MOVM.IA.W (R1), [R2-R5]
      +  MOVM.IA [R2-R6], (R0)
      +  MOVM.IA.W (R13), [R4-R11]
      +  RET
      +
      +#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \
      +  MOVBU (offset+0)(Rsrc), Rtmp; \
      +  MOVBU Rtmp, (offset+0)(Rdst); \
      +  MOVBU (offset+1)(Rsrc), Rtmp; \
      +  MOVBU Rtmp, (offset+1)(Rdst); \
      +  MOVBU (offset+2)(Rsrc), Rtmp; \
      +  MOVBU Rtmp, (offset+2)(Rdst); \
      +  MOVBU (offset+3)(Rsrc), Rtmp; \
      +  MOVBU Rtmp, (offset+3)(Rdst)
      +
      +TEXT poly1305_blocks_armv6<>(SB),4,$-4
      +  MOVM.DB.W [R4, R5, R6, R7, R8, R9, g, R11, R14], (R13)
      +  SUB $128, R13
      +  MOVW R0, 36(R13)
      +  MOVW R1, 40(R13)
      +  MOVW R2, 44(R13)
      +  MOVW R1, R14
      +  MOVW R2, R12
      +  MOVW 56(R0), R8
      +  WORD $0xe1180008 // TST R8, R8 not working see issue 5921
      +  EOR R6, R6, R6
      +  MOVW.EQ $(1<<24), R6
      +  MOVW R6, 32(R13)
      +  ADD $64, R13, g
      +  MOVM.IA (R0), [R0-R9]
      +  MOVM.IA [R0-R4], (g)
      +  CMP $16, R12
      +  BLO poly1305_blocks_armv6_done
      +poly1305_blocks_armv6_mainloop:
      +  WORD $0xe31e0003 // TST R14, #3 not working see issue 5921
      +  BEQ poly1305_blocks_armv6_mainloop_aligned
      +  ADD $48, R13, g
      +  MOVW_UNALIGNED(R14, g, R0, 0)
      +  MOVW_UNALIGNED(R14, g, R0, 4)
      +  MOVW_UNALIGNED(R14, g, R0, 8)
      +  MOVW_UNALIGNED(R14, g, R0, 12)
      +  MOVM.IA (g), [R0-R3]
      +  ADD $16, R14
      +  B poly1305_blocks_armv6_mainloop_loaded
      +poly1305_blocks_armv6_mainloop_aligned:
      +  MOVM.IA.W (R14), [R0-R3]
      +poly1305_blocks_armv6_mainloop_loaded:
      +  MOVW R0>>26, g
      +  MOVW R1>>20, R11
      +  MOVW R2>>14, R12
      +  MOVW R14, 40(R13)
      +  MOVW R3>>8, R4
      +  ORR R1<<6, g, g
      +  ORR R2<<12, R11, R11
      +  ORR R3<<18, R12, R12
      +  BIC $0xfc000000, R0, R0
      +  BIC $0xfc000000, g, g
      +  MOVW 32(R13), R3
      +  BIC $0xfc000000, R11, R11
      +  BIC $0xfc000000, R12, R12
      +  ADD R0, R5, R5
      +  ADD g, R6, R6
      +  ORR R3, R4, R4
      +  ADD R11, R7, R7
      +  ADD $64, R13, R14
      +  ADD R12, R8, R8
      +  ADD R4, R9, R9
      +  MOVM.IA (R14), [R0-R4]
      +  MULLU R4, R5, (R11, g)
      +  MULLU R3, R5, (R14, R12)
      +  MULALU R3, R6, (R11, g)
      +  MULALU R2, R6, (R14, R12)
      +  MULALU R2, R7, (R11, g)
      +  MULALU R1, R7, (R14, R12)
      +  ADD R4<<2, R4, R4
      +  ADD R3<<2, R3, R3
      +  MULALU R1, R8, (R11, g)
      +  MULALU R0, R8, (R14, R12)
      +  MULALU R0, R9, (R11, g)
      +  MULALU R4, R9, (R14, R12)
      +  MOVW g, 24(R13)
      +  MOVW R11, 28(R13)
      +  MOVW R12, 16(R13)
      +  MOVW R14, 20(R13)
      +  MULLU R2, R5, (R11, g)
      +  MULLU R1, R5, (R14, R12)
      +  MULALU R1, R6, (R11, g)
      +  MULALU R0, R6, (R14, R12)
      +  MULALU R0, R7, (R11, g)
      +  MULALU R4, R7, (R14, R12)
      +  ADD R2<<2, R2, R2
      +  ADD R1<<2, R1, R1
      +  MULALU R4, R8, (R11, g)
      +  MULALU R3, R8, (R14, R12)
      +  MULALU R3, R9, (R11, g)
      +  MULALU R2, R9, (R14, R12)
      +  MOVW g, 8(R13)
      +  MOVW R11, 12(R13)
      +  MOVW R12, 0(R13)
      +  MOVW R14, w+4(SP)
      +  MULLU R0, R5, (R11, g)
      +  MULALU R4, R6, (R11, g)
      +  MULALU R3, R7, (R11, g)
      +  MULALU R2, R8, (R11, g)
      +  MULALU R1, R9, (R11, g)
      +  MOVM.IA (R13), [R0-R7]
      +  MOVW g>>26, R12
      +  MOVW R4>>26, R14
      +  ORR R11<<6, R12, R12
      +  ORR R5<<6, R14, R14
      +  BIC $0xfc000000, g, g
      +  BIC $0xfc000000, R4, R4
      +  ADD.S R12, R0, R0
      +  ADC $0, R1, R1
      +  ADD.S R14, R6, R6
      +  ADC $0, R7, R7
      +  MOVW R0>>26, R12
      +  MOVW R6>>26, R14
      +  ORR R1<<6, R12, R12
      +  ORR R7<<6, R14, R14
      +  BIC $0xfc000000, R0, R0
      +  BIC $0xfc000000, R6, R6
      +  ADD R14<<2, R14, R14
      +  ADD.S R12, R2, R2
      +  ADC $0, R3, R3
      +  ADD R14, g, g
      +  MOVW R2>>26, R12
      +  MOVW g>>26, R14
      +  ORR R3<<6, R12, R12
      +  BIC $0xfc000000, g, R5
      +  BIC $0xfc000000, R2, R7
      +  ADD R12, R4, R4
      +  ADD R14, R0, R0
      +  MOVW R4>>26, R12
      +  BIC $0xfc000000, R4, R8
      +  ADD R12, R6, R9
      +  MOVW w+44(SP), R12
      +  MOVW w+40(SP), R14
      +  MOVW R0, R6
      +  CMP $32, R12
      +  SUB $16, R12, R12
      +  MOVW R12, 44(R13)
      +  BHS poly1305_blocks_armv6_mainloop
      +poly1305_blocks_armv6_done:
      +  MOVW 36(R13), R12
      +  MOVW R5, 20(R12)
      +  MOVW R6, 24(R12)
      +  MOVW R7, 28(R12)
      +  MOVW R8, 32(R12)
      +  MOVW R9, 36(R12)
      +  ADD $128, R13, R13
      +  MOVM.IA.W (R13), [R4, R5, R6, R7, R8, R9, g, R11, R14]
      +  RET
      +
      +#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \
      +  MOVBU.P 1(Rsrc), Rtmp; \
      +  MOVBU.P Rtmp, 1(Rdst); \
      +  MOVBU.P 1(Rsrc), Rtmp; \
      +  MOVBU.P Rtmp, 1(Rdst)
      +
      +#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \
      +  MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \
      +  MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp)
      +
      +TEXT poly1305_finish_ext_armv6<>(SB),4,$-4
      +  MOVM.DB.W [R4, R5, R6, R7, R8, R9, g, R11, R14], (R13)
      +  SUB $16, R13, R13
      +  MOVW R0, R5
      +  MOVW R1, R6
      +  MOVW R2, R7
      +  MOVW R3, R8
      +  AND.S R2, R2, R2
      +  BEQ poly1305_finish_ext_armv6_noremaining
      +  EOR R0, R0
      +  MOVW R13, R9
      +  MOVW R0, 0(R13)
      +  MOVW R0, 4(R13)
      +  MOVW R0, 8(R13)
      +  MOVW R0, 12(R13)
      +  WORD $0xe3110003 // TST R1, #3 not working see issue 5921
      +  BEQ poly1305_finish_ext_armv6_aligned
      +  WORD $0xe3120008 // TST R2, #8 not working see issue 5921
      +  BEQ poly1305_finish_ext_armv6_skip8
      +  MOVWP_UNALIGNED(R1, R9, g)
      +  MOVWP_UNALIGNED(R1, R9, g)
      +poly1305_finish_ext_armv6_skip8:
      +  WORD $0xe3120004 // TST $4, R2 not working see issue 5921
      +  BEQ poly1305_finish_ext_armv6_skip4
      +  MOVWP_UNALIGNED(R1, R9, g)
      +poly1305_finish_ext_armv6_skip4:
      +  WORD $0xe3120002 // TST $2, R2 not working see issue 5921
      +  BEQ poly1305_finish_ext_armv6_skip2
      +  MOVHUP_UNALIGNED(R1, R9, g)
      +  B poly1305_finish_ext_armv6_skip2
      +poly1305_finish_ext_armv6_aligned:
      +  WORD $0xe3120008 // TST R2, #8 not working see issue 5921
      +  BEQ poly1305_finish_ext_armv6_skip8_aligned
      +  MOVM.IA.W (R1), [g-R11]
      +  MOVM.IA.W [g-R11], (R9)
      +poly1305_finish_ext_armv6_skip8_aligned:
      +  WORD $0xe3120004 // TST $4, R2 not working see issue 5921
      +  BEQ poly1305_finish_ext_armv6_skip4_aligned
      +  MOVW.P 4(R1), g
      +  MOVW.P g, 4(R9)
      +poly1305_finish_ext_armv6_skip4_aligned:
      +  WORD $0xe3120002 // TST $2, R2 not working see issue 5921
      +  BEQ poly1305_finish_ext_armv6_skip2
      +  MOVHU.P 2(R1), g
      +  MOVH.P g, 2(R9)
      +poly1305_finish_ext_armv6_skip2:
      +  WORD $0xe3120001 // TST $1, R2 not working see issue 5921
      +  BEQ poly1305_finish_ext_armv6_skip1
      +  MOVBU.P 1(R1), g
      +  MOVBU.P g, 1(R9)
      +poly1305_finish_ext_armv6_skip1:
      +  MOVW $1, R11
      +  MOVBU R11, 0(R9)
      +  MOVW R11, 56(R5)
      +  MOVW R5, R0
      +  MOVW R13, R1
      +  MOVW $16, R2
      +  BL poly1305_blocks_armv6<>(SB)
      +poly1305_finish_ext_armv6_noremaining:
      +  MOVW 20(R5), R0
      +  MOVW 24(R5), R1
      +  MOVW 28(R5), R2
      +  MOVW 32(R5), R3
      +  MOVW 36(R5), R4
      +  MOVW R4>>26, R12
      +  BIC $0xfc000000, R4, R4
      +  ADD R12<<2, R12, R12
      +  ADD R12, R0, R0
      +  MOVW R0>>26, R12
      +  BIC $0xfc000000, R0, R0
      +  ADD R12, R1, R1
      +  MOVW R1>>26, R12
      +  BIC $0xfc000000, R1, R1
      +  ADD R12, R2, R2
      +  MOVW R2>>26, R12
      +  BIC $0xfc000000, R2, R2
      +  ADD R12, R3, R3
      +  MOVW R3>>26, R12
      +  BIC $0xfc000000, R3, R3
      +  ADD R12, R4, R4
      +  ADD $5, R0, R6
      +  MOVW R6>>26, R12
      +  BIC $0xfc000000, R6, R6
      +  ADD R12, R1, R7
      +  MOVW R7>>26, R12
      +  BIC $0xfc000000, R7, R7
      +  ADD R12, R2, g
      +  MOVW g>>26, R12
      +  BIC $0xfc000000, g, g
      +  ADD R12, R3, R11
      +  MOVW $-(1<<26), R12
      +  ADD R11>>26, R12, R12
      +  BIC $0xfc000000, R11, R11
      +  ADD R12, R4, R14
      +  MOVW R14>>31, R12
      +  SUB $1, R12
      +  AND R12, R6, R6
      +  AND R12, R7, R7
      +  AND R12, g, g
      +  AND R12, R11, R11
      +  AND R12, R14, R14
      +  MVN R12, R12
      +  AND R12, R0, R0
      +  AND R12, R1, R1
      +  AND R12, R2, R2
      +  AND R12, R3, R3
      +  AND R12, R4, R4
      +  ORR R6, R0, R0
      +  ORR R7, R1, R1
      +  ORR g, R2, R2
      +  ORR R11, R3, R3
      +  ORR R14, R4, R4
      +  ORR R1<<26, R0, R0
      +  MOVW R1>>6, R1
      +  ORR R2<<20, R1, R1
      +  MOVW R2>>12, R2
      +  ORR R3<<14, R2, R2
      +  MOVW R3>>18, R3
      +  ORR R4<<8, R3, R3
      +  MOVW 40(R5), R6
      +  MOVW 44(R5), R7
      +  MOVW 48(R5), g
      +  MOVW 52(R5), R11
      +  ADD.S R6, R0, R0
      +  ADC.S R7, R1, R1
      +  ADC.S g, R2, R2
      +  ADC.S R11, R3, R3
      +  MOVM.IA [R0-R3], (R8)
      +  MOVW R5, R12
      +  EOR R0, R0, R0
      +  EOR R1, R1, R1
      +  EOR R2, R2, R2
      +  EOR R3, R3, R3
      +  EOR R4, R4, R4
      +  EOR R5, R5, R5
      +  EOR R6, R6, R6
      +  EOR R7, R7, R7
      +  MOVM.IA.W [R0-R7], (R12)
      +  MOVM.IA [R0-R7], (R12)
      +  ADD $16, R13, R13
      +  MOVM.IA.W (R13), [R4, R5, R6, R7, R8, R9, g, R11, R14]
      +  RET
      +
      +// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key)
      +TEXT ·poly1305_auth_armv6(SB),0,$280-16
      +  MOVW  out+0(FP), R4
      +  MOVW  m+4(FP), R5
      +  MOVW  mlen+8(FP), R6
      +  MOVW  key+12(FP), R7
      +
      +  MOVW R13, R8
      +  BIC $63, R13
      +  SUB $64, R13, R13
      +  MOVW  R13, R0
      +  MOVW  R7, R1
      +  BL poly1305_init_ext_armv6<>(SB)
      +  BIC.S $15, R6, R2
      +  BEQ poly1305_auth_armv6_noblocks
      +  MOVW R13, R0
      +  MOVW R5, R1
      +  ADD R2, R5, R5
      +  SUB R2, R6, R6
      +  BL poly1305_blocks_armv6<>(SB)
      +poly1305_auth_armv6_noblocks:
      +  MOVW R13, R0
      +  MOVW R5, R1
      +  MOVW R6, R2
      +  MOVW R4, R3
      +  BL poly1305_finish_ext_armv6<>(SB)
      +  MOVW R8, R13
      +  RET
      diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305_test.go b/vendor/golang.org/x/crypto/poly1305/poly1305_test.go
      new file mode 100644
      index 00000000..b3e92310
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/poly1305/poly1305_test.go
      @@ -0,0 +1,86 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package poly1305
      +
      +import (
      +	"bytes"
      +	"testing"
      +	"unsafe"
      +)
      +
      +var testData = []struct {
      +	in, k, correct []byte
      +}{
      +	{
      +		[]byte("Hello world!"),
      +		[]byte("this is 32-byte key for Poly1305"),
      +		[]byte{0xa6, 0xf7, 0x45, 0x00, 0x8f, 0x81, 0xc9, 0x16, 0xa2, 0x0d, 0xcc, 0x74, 0xee, 0xf2, 0xb2, 0xf0},
      +	},
      +	{
      +		make([]byte, 32),
      +		[]byte("this is 32-byte key for Poly1305"),
      +		[]byte{0x49, 0xec, 0x78, 0x09, 0x0e, 0x48, 0x1e, 0xc6, 0xc2, 0x6b, 0x33, 0xb9, 0x1c, 0xcc, 0x03, 0x07},
      +	},
      +	{
      +		make([]byte, 2007),
      +		[]byte("this is 32-byte key for Poly1305"),
      +		[]byte{0xda, 0x84, 0xbc, 0xab, 0x02, 0x67, 0x6c, 0x38, 0xcd, 0xb0, 0x15, 0x60, 0x42, 0x74, 0xc2, 0xaa},
      +	},
      +	{
      +		make([]byte, 2007),
      +		make([]byte, 32),
      +		make([]byte, 16),
      +	},
      +}
      +
      +func testSum(t *testing.T, unaligned bool) {
      +	var out [16]byte
      +	var key [32]byte
      +
      +	for i, v := range testData {
      +		in := v.in
      +		if unaligned {
      +			in = unalignBytes(in)
      +		}
      +		copy(key[:], v.k)
      +		Sum(&out, in, &key)
      +		if !bytes.Equal(out[:], v.correct) {
      +			t.Errorf("%d: expected %x, got %x", i, v.correct, out[:])
      +		}
      +	}
      +}
      +
      +func TestSum(t *testing.T)          { testSum(t, false) }
      +func TestSumUnaligned(t *testing.T) { testSum(t, true) }
      +
      +func benchmark(b *testing.B, size int, unaligned bool) {
      +	var out [16]byte
      +	var key [32]byte
      +	in := make([]byte, size)
      +	if unaligned {
      +		in = unalignBytes(in)
      +	}
      +	b.SetBytes(int64(len(in)))
      +	b.ResetTimer()
      +	for i := 0; i < b.N; i++ {
      +		Sum(&out, in, &key)
      +	}
      +}
      +
      +func Benchmark64(b *testing.B)          { benchmark(b, 64, false) }
      +func Benchmark1K(b *testing.B)          { benchmark(b, 1024, false) }
      +func Benchmark64Unaligned(b *testing.B) { benchmark(b, 64, true) }
      +func Benchmark1KUnaligned(b *testing.B) { benchmark(b, 1024, true) }
      +
      +func unalignBytes(in []byte) []byte {
      +	out := make([]byte, len(in)+1)
      +	if uintptr(unsafe.Pointer(&out[0]))&(unsafe.Alignof(uint32(0))-1) == 0 {
      +		out = out[1:]
      +	} else {
      +		out = out[:len(in)]
      +	}
      +	copy(out, in)
      +	return out
      +}
      diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go
      new file mode 100644
      index 00000000..6775c703
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go
      @@ -0,0 +1,24 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build amd64,!gccgo,!appengine
      +
      +package poly1305
      +
      +// This function is implemented in poly1305_amd64.s
      +
      +//go:noescape
      +
      +func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
      +
      +// Sum generates an authenticator for m using a one-time key and puts the
      +// 16-byte result into out. Authenticating two different messages with the same
      +// key allows an attacker to forge messages at will.
      +func Sum(out *[16]byte, m []byte, key *[32]byte) {
      +	var mPtr *byte
      +	if len(m) > 0 {
      +		mPtr = &m[0]
      +	}
      +	poly1305(out, mPtr, uint64(len(m)), key)
      +}
      diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.go b/vendor/golang.org/x/crypto/poly1305/sum_arm.go
      new file mode 100644
      index 00000000..50b979c2
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/poly1305/sum_arm.go
      @@ -0,0 +1,24 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build arm,!gccgo,!appengine
      +
      +package poly1305
      +
      +// This function is implemented in poly1305_arm.s
      +
      +//go:noescape
      +
      +func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte)
      +
      +// Sum generates an authenticator for m using a one-time key and puts the
      +// 16-byte result into out. Authenticating two different messages with the same
      +// key allows an attacker to forge messages at will.
      +func Sum(out *[16]byte, m []byte, key *[32]byte) {
      +	var mPtr *byte
      +	if len(m) > 0 {
      +		mPtr = &m[0]
      +	}
      +	poly1305_auth_armv6(out, mPtr, uint32(len(m)), key)
      +}
      diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ref.go b/vendor/golang.org/x/crypto/poly1305/sum_ref.go
      new file mode 100644
      index 00000000..0b24fc78
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/poly1305/sum_ref.go
      @@ -0,0 +1,1531 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build !amd64,!arm gccgo appengine
      +
      +package poly1305
      +
      +// Based on original, public domain implementation from NaCl by D. J.
      +// Bernstein.
      +
      +import "math"
      +
      +const (
      +	alpham80 = 0.00000000558793544769287109375
      +	alpham48 = 24.0
      +	alpham16 = 103079215104.0
      +	alpha0   = 6755399441055744.0
      +	alpha18  = 1770887431076116955136.0
      +	alpha32  = 29014219670751100192948224.0
      +	alpha50  = 7605903601369376408980219232256.0
      +	alpha64  = 124615124604835863084731911901282304.0
      +	alpha82  = 32667107224410092492483962313449748299776.0
      +	alpha96  = 535217884764734955396857238543560676143529984.0
      +	alpha112 = 35076039295941670036888435985190792471742381031424.0
      +	alpha130 = 9194973245195333150150082162901855101712434733101613056.0
      +	scale    = 0.0000000000000000000000000000000000000036734198463196484624023016788195177431833298649127735047148490821200539357960224151611328125
      +	offset0  = 6755408030990331.0
      +	offset1  = 29014256564239239022116864.0
      +	offset2  = 124615283061160854719918951570079744.0
      +	offset3  = 535219245894202480694386063513315216128475136.0
      +)
      +
      +// Sum generates an authenticator for m using a one-time key and puts the
      +// 16-byte result into out. Authenticating two different messages with the same
      +// key allows an attacker to forge messages at will.
      +func Sum(out *[16]byte, m []byte, key *[32]byte) {
      +	r := key
      +	s := key[16:]
      +	var (
      +		y7        float64
      +		y6        float64
      +		y1        float64
      +		y0        float64
      +		y5        float64
      +		y4        float64
      +		x7        float64
      +		x6        float64
      +		x1        float64
      +		x0        float64
      +		y3        float64
      +		y2        float64
      +		x5        float64
      +		r3lowx0   float64
      +		x4        float64
      +		r0lowx6   float64
      +		x3        float64
      +		r3highx0  float64
      +		x2        float64
      +		r0highx6  float64
      +		r0lowx0   float64
      +		sr1lowx6  float64
      +		r0highx0  float64
      +		sr1highx6 float64
      +		sr3low    float64
      +		r1lowx0   float64
      +		sr2lowx6  float64
      +		r1highx0  float64
      +		sr2highx6 float64
      +		r2lowx0   float64
      +		sr3lowx6  float64
      +		r2highx0  float64
      +		sr3highx6 float64
      +		r1highx4  float64
      +		r1lowx4   float64
      +		r0highx4  float64
      +		r0lowx4   float64
      +		sr3highx4 float64
      +		sr3lowx4  float64
      +		sr2highx4 float64
      +		sr2lowx4  float64
      +		r0lowx2   float64
      +		r0highx2  float64
      +		r1lowx2   float64
      +		r1highx2  float64
      +		r2lowx2   float64
      +		r2highx2  float64
      +		sr3lowx2  float64
      +		sr3highx2 float64
      +		z0        float64
      +		z1        float64
      +		z2        float64
      +		z3        float64
      +		m0        int64
      +		m1        int64
      +		m2        int64
      +		m3        int64
      +		m00       uint32
      +		m01       uint32
      +		m02       uint32
      +		m03       uint32
      +		m10       uint32
      +		m11       uint32
      +		m12       uint32
      +		m13       uint32
      +		m20       uint32
      +		m21       uint32
      +		m22       uint32
      +		m23       uint32
      +		m30       uint32
      +		m31       uint32
      +		m32       uint32
      +		m33       uint64
      +		lbelow2   int32
      +		lbelow3   int32
      +		lbelow4   int32
      +		lbelow5   int32
      +		lbelow6   int32
      +		lbelow7   int32
      +		lbelow8   int32
      +		lbelow9   int32
      +		lbelow10  int32
      +		lbelow11  int32
      +		lbelow12  int32
      +		lbelow13  int32
      +		lbelow14  int32
      +		lbelow15  int32
      +		s00       uint32
      +		s01       uint32
      +		s02       uint32
      +		s03       uint32
      +		s10       uint32
      +		s11       uint32
      +		s12       uint32
      +		s13       uint32
      +		s20       uint32
      +		s21       uint32
      +		s22       uint32
      +		s23       uint32
      +		s30       uint32
      +		s31       uint32
      +		s32       uint32
      +		s33       uint32
      +		bits32    uint64
      +		f         uint64
      +		f0        uint64
      +		f1        uint64
      +		f2        uint64
      +		f3        uint64
      +		f4        uint64
      +		g         uint64
      +		g0        uint64
      +		g1        uint64
      +		g2        uint64
      +		g3        uint64
      +		g4        uint64
      +	)
      +
      +	var p int32
      +
      +	l := int32(len(m))
      +
      +	r00 := uint32(r[0])
      +
      +	r01 := uint32(r[1])
      +
      +	r02 := uint32(r[2])
      +	r0 := int64(2151)
      +
      +	r03 := uint32(r[3])
      +	r03 &= 15
      +	r0 <<= 51
      +
      +	r10 := uint32(r[4])
      +	r10 &= 252
      +	r01 <<= 8
      +	r0 += int64(r00)
      +
      +	r11 := uint32(r[5])
      +	r02 <<= 16
      +	r0 += int64(r01)
      +
      +	r12 := uint32(r[6])
      +	r03 <<= 24
      +	r0 += int64(r02)
      +
      +	r13 := uint32(r[7])
      +	r13 &= 15
      +	r1 := int64(2215)
      +	r0 += int64(r03)
      +
      +	d0 := r0
      +	r1 <<= 51
      +	r2 := int64(2279)
      +
      +	r20 := uint32(r[8])
      +	r20 &= 252
      +	r11 <<= 8
      +	r1 += int64(r10)
      +
      +	r21 := uint32(r[9])
      +	r12 <<= 16
      +	r1 += int64(r11)
      +
      +	r22 := uint32(r[10])
      +	r13 <<= 24
      +	r1 += int64(r12)
      +
      +	r23 := uint32(r[11])
      +	r23 &= 15
      +	r2 <<= 51
      +	r1 += int64(r13)
      +
      +	d1 := r1
      +	r21 <<= 8
      +	r2 += int64(r20)
      +
      +	r30 := uint32(r[12])
      +	r30 &= 252
      +	r22 <<= 16
      +	r2 += int64(r21)
      +
      +	r31 := uint32(r[13])
      +	r23 <<= 24
      +	r2 += int64(r22)
      +
      +	r32 := uint32(r[14])
      +	r2 += int64(r23)
      +	r3 := int64(2343)
      +
      +	d2 := r2
      +	r3 <<= 51
      +
      +	r33 := uint32(r[15])
      +	r33 &= 15
      +	r31 <<= 8
      +	r3 += int64(r30)
      +
      +	r32 <<= 16
      +	r3 += int64(r31)
      +
      +	r33 <<= 24
      +	r3 += int64(r32)
      +
      +	r3 += int64(r33)
      +	h0 := alpha32 - alpha32
      +
      +	d3 := r3
      +	h1 := alpha32 - alpha32
      +
      +	h2 := alpha32 - alpha32
      +
      +	h3 := alpha32 - alpha32
      +
      +	h4 := alpha32 - alpha32
      +
      +	r0low := math.Float64frombits(uint64(d0))
      +	h5 := alpha32 - alpha32
      +
      +	r1low := math.Float64frombits(uint64(d1))
      +	h6 := alpha32 - alpha32
      +
      +	r2low := math.Float64frombits(uint64(d2))
      +	h7 := alpha32 - alpha32
      +
      +	r0low -= alpha0
      +
      +	r1low -= alpha32
      +
      +	r2low -= alpha64
      +
      +	r0high := r0low + alpha18
      +
      +	r3low := math.Float64frombits(uint64(d3))
      +
      +	r1high := r1low + alpha50
      +	sr1low := scale * r1low
      +
      +	r2high := r2low + alpha82
      +	sr2low := scale * r2low
      +
      +	r0high -= alpha18
      +	r0high_stack := r0high
      +
      +	r3low -= alpha96
      +
      +	r1high -= alpha50
      +	r1high_stack := r1high
      +
      +	sr1high := sr1low + alpham80
      +
      +	r0low -= r0high
      +
      +	r2high -= alpha82
      +	sr3low = scale * r3low
      +
      +	sr2high := sr2low + alpham48
      +
      +	r1low -= r1high
      +	r1low_stack := r1low
      +
      +	sr1high -= alpham80
      +	sr1high_stack := sr1high
      +
      +	r2low -= r2high
      +	r2low_stack := r2low
      +
      +	sr2high -= alpham48
      +	sr2high_stack := sr2high
      +
      +	r3high := r3low + alpha112
      +	r0low_stack := r0low
      +
      +	sr1low -= sr1high
      +	sr1low_stack := sr1low
      +
      +	sr3high := sr3low + alpham16
      +	r2high_stack := r2high
      +
      +	sr2low -= sr2high
      +	sr2low_stack := sr2low
      +
      +	r3high -= alpha112
      +	r3high_stack := r3high
      +
      +	sr3high -= alpham16
      +	sr3high_stack := sr3high
      +
      +	r3low -= r3high
      +	r3low_stack := r3low
      +
      +	sr3low -= sr3high
      +	sr3low_stack := sr3low
      +
      +	if l < 16 {
      +		goto addatmost15bytes
      +	}
      +
      +	m00 = uint32(m[p+0])
      +	m0 = 2151
      +
      +	m0 <<= 51
      +	m1 = 2215
      +	m01 = uint32(m[p+1])
      +
      +	m1 <<= 51
      +	m2 = 2279
      +	m02 = uint32(m[p+2])
      +
      +	m2 <<= 51
      +	m3 = 2343
      +	m03 = uint32(m[p+3])
      +
      +	m10 = uint32(m[p+4])
      +	m01 <<= 8
      +	m0 += int64(m00)
      +
      +	m11 = uint32(m[p+5])
      +	m02 <<= 16
      +	m0 += int64(m01)
      +
      +	m12 = uint32(m[p+6])
      +	m03 <<= 24
      +	m0 += int64(m02)
      +
      +	m13 = uint32(m[p+7])
      +	m3 <<= 51
      +	m0 += int64(m03)
      +
      +	m20 = uint32(m[p+8])
      +	m11 <<= 8
      +	m1 += int64(m10)
      +
      +	m21 = uint32(m[p+9])
      +	m12 <<= 16
      +	m1 += int64(m11)
      +
      +	m22 = uint32(m[p+10])
      +	m13 <<= 24
      +	m1 += int64(m12)
      +
      +	m23 = uint32(m[p+11])
      +	m1 += int64(m13)
      +
      +	m30 = uint32(m[p+12])
      +	m21 <<= 8
      +	m2 += int64(m20)
      +
      +	m31 = uint32(m[p+13])
      +	m22 <<= 16
      +	m2 += int64(m21)
      +
      +	m32 = uint32(m[p+14])
      +	m23 <<= 24
      +	m2 += int64(m22)
      +
      +	m33 = uint64(m[p+15])
      +	m2 += int64(m23)
      +
      +	d0 = m0
      +	m31 <<= 8
      +	m3 += int64(m30)
      +
      +	d1 = m1
      +	m32 <<= 16
      +	m3 += int64(m31)
      +
      +	d2 = m2
      +	m33 += 256
      +
      +	m33 <<= 24
      +	m3 += int64(m32)
      +
      +	m3 += int64(m33)
      +	d3 = m3
      +
      +	p += 16
      +	l -= 16
      +
      +	z0 = math.Float64frombits(uint64(d0))
      +
      +	z1 = math.Float64frombits(uint64(d1))
      +
      +	z2 = math.Float64frombits(uint64(d2))
      +
      +	z3 = math.Float64frombits(uint64(d3))
      +
      +	z0 -= alpha0
      +
      +	z1 -= alpha32
      +
      +	z2 -= alpha64
      +
      +	z3 -= alpha96
      +
      +	h0 += z0
      +
      +	h1 += z1
      +
      +	h3 += z2
      +
      +	h5 += z3
      +
      +	if l < 16 {
      +		goto multiplyaddatmost15bytes
      +	}
      +
      +multiplyaddatleast16bytes:
      +
      +	m2 = 2279
      +	m20 = uint32(m[p+8])
      +	y7 = h7 + alpha130
      +
      +	m2 <<= 51
      +	m3 = 2343
      +	m21 = uint32(m[p+9])
      +	y6 = h6 + alpha130
      +
      +	m3 <<= 51
      +	m0 = 2151
      +	m22 = uint32(m[p+10])
      +	y1 = h1 + alpha32
      +
      +	m0 <<= 51
      +	m1 = 2215
      +	m23 = uint32(m[p+11])
      +	y0 = h0 + alpha32
      +
      +	m1 <<= 51
      +	m30 = uint32(m[p+12])
      +	y7 -= alpha130
      +
      +	m21 <<= 8
      +	m2 += int64(m20)
      +	m31 = uint32(m[p+13])
      +	y6 -= alpha130
      +
      +	m22 <<= 16
      +	m2 += int64(m21)
      +	m32 = uint32(m[p+14])
      +	y1 -= alpha32
      +
      +	m23 <<= 24
      +	m2 += int64(m22)
      +	m33 = uint64(m[p+15])
      +	y0 -= alpha32
      +
      +	m2 += int64(m23)
      +	m00 = uint32(m[p+0])
      +	y5 = h5 + alpha96
      +
      +	m31 <<= 8
      +	m3 += int64(m30)
      +	m01 = uint32(m[p+1])
      +	y4 = h4 + alpha96
      +
      +	m32 <<= 16
      +	m02 = uint32(m[p+2])
      +	x7 = h7 - y7
      +	y7 *= scale
      +
      +	m33 += 256
      +	m03 = uint32(m[p+3])
      +	x6 = h6 - y6
      +	y6 *= scale
      +
      +	m33 <<= 24
      +	m3 += int64(m31)
      +	m10 = uint32(m[p+4])
      +	x1 = h1 - y1
      +
      +	m01 <<= 8
      +	m3 += int64(m32)
      +	m11 = uint32(m[p+5])
      +	x0 = h0 - y0
      +
      +	m3 += int64(m33)
      +	m0 += int64(m00)
      +	m12 = uint32(m[p+6])
      +	y5 -= alpha96
      +
      +	m02 <<= 16
      +	m0 += int64(m01)
      +	m13 = uint32(m[p+7])
      +	y4 -= alpha96
      +
      +	m03 <<= 24
      +	m0 += int64(m02)
      +	d2 = m2
      +	x1 += y7
      +
      +	m0 += int64(m03)
      +	d3 = m3
      +	x0 += y6
      +
      +	m11 <<= 8
      +	m1 += int64(m10)
      +	d0 = m0
      +	x7 += y5
      +
      +	m12 <<= 16
      +	m1 += int64(m11)
      +	x6 += y4
      +
      +	m13 <<= 24
      +	m1 += int64(m12)
      +	y3 = h3 + alpha64
      +
      +	m1 += int64(m13)
      +	d1 = m1
      +	y2 = h2 + alpha64
      +
      +	x0 += x1
      +
      +	x6 += x7
      +
      +	y3 -= alpha64
      +	r3low = r3low_stack
      +
      +	y2 -= alpha64
      +	r0low = r0low_stack
      +
      +	x5 = h5 - y5
      +	r3lowx0 = r3low * x0
      +	r3high = r3high_stack
      +
      +	x4 = h4 - y4
      +	r0lowx6 = r0low * x6
      +	r0high = r0high_stack
      +
      +	x3 = h3 - y3
      +	r3highx0 = r3high * x0
      +	sr1low = sr1low_stack
      +
      +	x2 = h2 - y2
      +	r0highx6 = r0high * x6
      +	sr1high = sr1high_stack
      +
      +	x5 += y3
      +	r0lowx0 = r0low * x0
      +	r1low = r1low_stack
      +
      +	h6 = r3lowx0 + r0lowx6
      +	sr1lowx6 = sr1low * x6
      +	r1high = r1high_stack
      +
      +	x4 += y2
      +	r0highx0 = r0high * x0
      +	sr2low = sr2low_stack
      +
      +	h7 = r3highx0 + r0highx6
      +	sr1highx6 = sr1high * x6
      +	sr2high = sr2high_stack
      +
      +	x3 += y1
      +	r1lowx0 = r1low * x0
      +	r2low = r2low_stack
      +
      +	h0 = r0lowx0 + sr1lowx6
      +	sr2lowx6 = sr2low * x6
      +	r2high = r2high_stack
      +
      +	x2 += y0
      +	r1highx0 = r1high * x0
      +	sr3low = sr3low_stack
      +
      +	h1 = r0highx0 + sr1highx6
      +	sr2highx6 = sr2high * x6
      +	sr3high = sr3high_stack
      +
      +	x4 += x5
      +	r2lowx0 = r2low * x0
      +	z2 = math.Float64frombits(uint64(d2))
      +
      +	h2 = r1lowx0 + sr2lowx6
      +	sr3lowx6 = sr3low * x6
      +
      +	x2 += x3
      +	r2highx0 = r2high * x0
      +	z3 = math.Float64frombits(uint64(d3))
      +
      +	h3 = r1highx0 + sr2highx6
      +	sr3highx6 = sr3high * x6
      +
      +	r1highx4 = r1high * x4
      +	z2 -= alpha64
      +
      +	h4 = r2lowx0 + sr3lowx6
      +	r1lowx4 = r1low * x4
      +
      +	r0highx4 = r0high * x4
      +	z3 -= alpha96
      +
      +	h5 = r2highx0 + sr3highx6
      +	r0lowx4 = r0low * x4
      +
      +	h7 += r1highx4
      +	sr3highx4 = sr3high * x4
      +
      +	h6 += r1lowx4
      +	sr3lowx4 = sr3low * x4
      +
      +	h5 += r0highx4
      +	sr2highx4 = sr2high * x4
      +
      +	h4 += r0lowx4
      +	sr2lowx4 = sr2low * x4
      +
      +	h3 += sr3highx4
      +	r0lowx2 = r0low * x2
      +
      +	h2 += sr3lowx4
      +	r0highx2 = r0high * x2
      +
      +	h1 += sr2highx4
      +	r1lowx2 = r1low * x2
      +
      +	h0 += sr2lowx4
      +	r1highx2 = r1high * x2
      +
      +	h2 += r0lowx2
      +	r2lowx2 = r2low * x2
      +
      +	h3 += r0highx2
      +	r2highx2 = r2high * x2
      +
      +	h4 += r1lowx2
      +	sr3lowx2 = sr3low * x2
      +
      +	h5 += r1highx2
      +	sr3highx2 = sr3high * x2
      +
      +	p += 16
      +	l -= 16
      +	h6 += r2lowx2
      +
      +	h7 += r2highx2
      +
      +	z1 = math.Float64frombits(uint64(d1))
      +	h0 += sr3lowx2
      +
      +	z0 = math.Float64frombits(uint64(d0))
      +	h1 += sr3highx2
      +
      +	z1 -= alpha32
      +
      +	z0 -= alpha0
      +
      +	h5 += z3
      +
      +	h3 += z2
      +
      +	h1 += z1
      +
      +	h0 += z0
      +
      +	if l >= 16 {
      +		goto multiplyaddatleast16bytes
      +	}
      +
      +multiplyaddatmost15bytes:
      +
      +	y7 = h7 + alpha130
      +
      +	y6 = h6 + alpha130
      +
      +	y1 = h1 + alpha32
      +
      +	y0 = h0 + alpha32
      +
      +	y7 -= alpha130
      +
      +	y6 -= alpha130
      +
      +	y1 -= alpha32
      +
      +	y0 -= alpha32
      +
      +	y5 = h5 + alpha96
      +
      +	y4 = h4 + alpha96
      +
      +	x7 = h7 - y7
      +	y7 *= scale
      +
      +	x6 = h6 - y6
      +	y6 *= scale
      +
      +	x1 = h1 - y1
      +
      +	x0 = h0 - y0
      +
      +	y5 -= alpha96
      +
      +	y4 -= alpha96
      +
      +	x1 += y7
      +
      +	x0 += y6
      +
      +	x7 += y5
      +
      +	x6 += y4
      +
      +	y3 = h3 + alpha64
      +
      +	y2 = h2 + alpha64
      +
      +	x0 += x1
      +
      +	x6 += x7
      +
      +	y3 -= alpha64
      +	r3low = r3low_stack
      +
      +	y2 -= alpha64
      +	r0low = r0low_stack
      +
      +	x5 = h5 - y5
      +	r3lowx0 = r3low * x0
      +	r3high = r3high_stack
      +
      +	x4 = h4 - y4
      +	r0lowx6 = r0low * x6
      +	r0high = r0high_stack
      +
      +	x3 = h3 - y3
      +	r3highx0 = r3high * x0
      +	sr1low = sr1low_stack
      +
      +	x2 = h2 - y2
      +	r0highx6 = r0high * x6
      +	sr1high = sr1high_stack
      +
      +	x5 += y3
      +	r0lowx0 = r0low * x0
      +	r1low = r1low_stack
      +
      +	h6 = r3lowx0 + r0lowx6
      +	sr1lowx6 = sr1low * x6
      +	r1high = r1high_stack
      +
      +	x4 += y2
      +	r0highx0 = r0high * x0
      +	sr2low = sr2low_stack
      +
      +	h7 = r3highx0 + r0highx6
      +	sr1highx6 = sr1high * x6
      +	sr2high = sr2high_stack
      +
      +	x3 += y1
      +	r1lowx0 = r1low * x0
      +	r2low = r2low_stack
      +
      +	h0 = r0lowx0 + sr1lowx6
      +	sr2lowx6 = sr2low * x6
      +	r2high = r2high_stack
      +
      +	x2 += y0
      +	r1highx0 = r1high * x0
      +	sr3low = sr3low_stack
      +
      +	h1 = r0highx0 + sr1highx6
      +	sr2highx6 = sr2high * x6
      +	sr3high = sr3high_stack
      +
      +	x4 += x5
      +	r2lowx0 = r2low * x0
      +
      +	h2 = r1lowx0 + sr2lowx6
      +	sr3lowx6 = sr3low * x6
      +
      +	x2 += x3
      +	r2highx0 = r2high * x0
      +
      +	h3 = r1highx0 + sr2highx6
      +	sr3highx6 = sr3high * x6
      +
      +	r1highx4 = r1high * x4
      +
      +	h4 = r2lowx0 + sr3lowx6
      +	r1lowx4 = r1low * x4
      +
      +	r0highx4 = r0high * x4
      +
      +	h5 = r2highx0 + sr3highx6
      +	r0lowx4 = r0low * x4
      +
      +	h7 += r1highx4
      +	sr3highx4 = sr3high * x4
      +
      +	h6 += r1lowx4
      +	sr3lowx4 = sr3low * x4
      +
      +	h5 += r0highx4
      +	sr2highx4 = sr2high * x4
      +
      +	h4 += r0lowx4
      +	sr2lowx4 = sr2low * x4
      +
      +	h3 += sr3highx4
      +	r0lowx2 = r0low * x2
      +
      +	h2 += sr3lowx4
      +	r0highx2 = r0high * x2
      +
      +	h1 += sr2highx4
      +	r1lowx2 = r1low * x2
      +
      +	h0 += sr2lowx4
      +	r1highx2 = r1high * x2
      +
      +	h2 += r0lowx2
      +	r2lowx2 = r2low * x2
      +
      +	h3 += r0highx2
      +	r2highx2 = r2high * x2
      +
      +	h4 += r1lowx2
      +	sr3lowx2 = sr3low * x2
      +
      +	h5 += r1highx2
      +	sr3highx2 = sr3high * x2
      +
      +	h6 += r2lowx2
      +
      +	h7 += r2highx2
      +
      +	h0 += sr3lowx2
      +
      +	h1 += sr3highx2
      +
      +addatmost15bytes:
      +
      +	if l == 0 {
      +		goto nomorebytes
      +	}
      +
      +	lbelow2 = l - 2
      +
      +	lbelow3 = l - 3
      +
      +	lbelow2 >>= 31
      +	lbelow4 = l - 4
      +
      +	m00 = uint32(m[p+0])
      +	lbelow3 >>= 31
      +	p += lbelow2
      +
      +	m01 = uint32(m[p+1])
      +	lbelow4 >>= 31
      +	p += lbelow3
      +
      +	m02 = uint32(m[p+2])
      +	p += lbelow4
      +	m0 = 2151
      +
      +	m03 = uint32(m[p+3])
      +	m0 <<= 51
      +	m1 = 2215
      +
      +	m0 += int64(m00)
      +	m01 &^= uint32(lbelow2)
      +
      +	m02 &^= uint32(lbelow3)
      +	m01 -= uint32(lbelow2)
      +
      +	m01 <<= 8
      +	m03 &^= uint32(lbelow4)
      +
      +	m0 += int64(m01)
      +	lbelow2 -= lbelow3
      +
      +	m02 += uint32(lbelow2)
      +	lbelow3 -= lbelow4
      +
      +	m02 <<= 16
      +	m03 += uint32(lbelow3)
      +
      +	m03 <<= 24
      +	m0 += int64(m02)
      +
      +	m0 += int64(m03)
      +	lbelow5 = l - 5
      +
      +	lbelow6 = l - 6
      +	lbelow7 = l - 7
      +
      +	lbelow5 >>= 31
      +	lbelow8 = l - 8
      +
      +	lbelow6 >>= 31
      +	p += lbelow5
      +
      +	m10 = uint32(m[p+4])
      +	lbelow7 >>= 31
      +	p += lbelow6
      +
      +	m11 = uint32(m[p+5])
      +	lbelow8 >>= 31
      +	p += lbelow7
      +
      +	m12 = uint32(m[p+6])
      +	m1 <<= 51
      +	p += lbelow8
      +
      +	m13 = uint32(m[p+7])
      +	m10 &^= uint32(lbelow5)
      +	lbelow4 -= lbelow5
      +
      +	m10 += uint32(lbelow4)
      +	lbelow5 -= lbelow6
      +
      +	m11 &^= uint32(lbelow6)
      +	m11 += uint32(lbelow5)
      +
      +	m11 <<= 8
      +	m1 += int64(m10)
      +
      +	m1 += int64(m11)
      +	m12 &^= uint32(lbelow7)
      +
      +	lbelow6 -= lbelow7
      +	m13 &^= uint32(lbelow8)
      +
      +	m12 += uint32(lbelow6)
      +	lbelow7 -= lbelow8
      +
      +	m12 <<= 16
      +	m13 += uint32(lbelow7)
      +
      +	m13 <<= 24
      +	m1 += int64(m12)
      +
      +	m1 += int64(m13)
      +	m2 = 2279
      +
      +	lbelow9 = l - 9
      +	m3 = 2343
      +
      +	lbelow10 = l - 10
      +	lbelow11 = l - 11
      +
      +	lbelow9 >>= 31
      +	lbelow12 = l - 12
      +
      +	lbelow10 >>= 31
      +	p += lbelow9
      +
      +	m20 = uint32(m[p+8])
      +	lbelow11 >>= 31
      +	p += lbelow10
      +
      +	m21 = uint32(m[p+9])
      +	lbelow12 >>= 31
      +	p += lbelow11
      +
      +	m22 = uint32(m[p+10])
      +	m2 <<= 51
      +	p += lbelow12
      +
      +	m23 = uint32(m[p+11])
      +	m20 &^= uint32(lbelow9)
      +	lbelow8 -= lbelow9
      +
      +	m20 += uint32(lbelow8)
      +	lbelow9 -= lbelow10
      +
      +	m21 &^= uint32(lbelow10)
      +	m21 += uint32(lbelow9)
      +
      +	m21 <<= 8
      +	m2 += int64(m20)
      +
      +	m2 += int64(m21)
      +	m22 &^= uint32(lbelow11)
      +
      +	lbelow10 -= lbelow11
      +	m23 &^= uint32(lbelow12)
      +
      +	m22 += uint32(lbelow10)
      +	lbelow11 -= lbelow12
      +
      +	m22 <<= 16
      +	m23 += uint32(lbelow11)
      +
      +	m23 <<= 24
      +	m2 += int64(m22)
      +
      +	m3 <<= 51
      +	lbelow13 = l - 13
      +
      +	lbelow13 >>= 31
      +	lbelow14 = l - 14
      +
      +	lbelow14 >>= 31
      +	p += lbelow13
      +	lbelow15 = l - 15
      +
      +	m30 = uint32(m[p+12])
      +	lbelow15 >>= 31
      +	p += lbelow14
      +
      +	m31 = uint32(m[p+13])
      +	p += lbelow15
      +	m2 += int64(m23)
      +
      +	m32 = uint32(m[p+14])
      +	m30 &^= uint32(lbelow13)
      +	lbelow12 -= lbelow13
      +
      +	m30 += uint32(lbelow12)
      +	lbelow13 -= lbelow14
      +
      +	m3 += int64(m30)
      +	m31 &^= uint32(lbelow14)
      +
      +	m31 += uint32(lbelow13)
      +	m32 &^= uint32(lbelow15)
      +
      +	m31 <<= 8
      +	lbelow14 -= lbelow15
      +
      +	m3 += int64(m31)
      +	m32 += uint32(lbelow14)
      +	d0 = m0
      +
      +	m32 <<= 16
      +	m33 = uint64(lbelow15 + 1)
      +	d1 = m1
      +
      +	m33 <<= 24
      +	m3 += int64(m32)
      +	d2 = m2
      +
      +	m3 += int64(m33)
      +	d3 = m3
      +
      +	z3 = math.Float64frombits(uint64(d3))
      +
      +	z2 = math.Float64frombits(uint64(d2))
      +
      +	z1 = math.Float64frombits(uint64(d1))
      +
      +	z0 = math.Float64frombits(uint64(d0))
      +
      +	z3 -= alpha96
      +
      +	z2 -= alpha64
      +
      +	z1 -= alpha32
      +
      +	z0 -= alpha0
      +
      +	h5 += z3
      +
      +	h3 += z2
      +
      +	h1 += z1
      +
      +	h0 += z0
      +
      +	y7 = h7 + alpha130
      +
      +	y6 = h6 + alpha130
      +
      +	y1 = h1 + alpha32
      +
      +	y0 = h0 + alpha32
      +
      +	y7 -= alpha130
      +
      +	y6 -= alpha130
      +
      +	y1 -= alpha32
      +
      +	y0 -= alpha32
      +
      +	y5 = h5 + alpha96
      +
      +	y4 = h4 + alpha96
      +
      +	x7 = h7 - y7
      +	y7 *= scale
      +
      +	x6 = h6 - y6
      +	y6 *= scale
      +
      +	x1 = h1 - y1
      +
      +	x0 = h0 - y0
      +
      +	y5 -= alpha96
      +
      +	y4 -= alpha96
      +
      +	x1 += y7
      +
      +	x0 += y6
      +
      +	x7 += y5
      +
      +	x6 += y4
      +
      +	y3 = h3 + alpha64
      +
      +	y2 = h2 + alpha64
      +
      +	x0 += x1
      +
      +	x6 += x7
      +
      +	y3 -= alpha64
      +	r3low = r3low_stack
      +
      +	y2 -= alpha64
      +	r0low = r0low_stack
      +
      +	x5 = h5 - y5
      +	r3lowx0 = r3low * x0
      +	r3high = r3high_stack
      +
      +	x4 = h4 - y4
      +	r0lowx6 = r0low * x6
      +	r0high = r0high_stack
      +
      +	x3 = h3 - y3
      +	r3highx0 = r3high * x0
      +	sr1low = sr1low_stack
      +
      +	x2 = h2 - y2
      +	r0highx6 = r0high * x6
      +	sr1high = sr1high_stack
      +
      +	x5 += y3
      +	r0lowx0 = r0low * x0
      +	r1low = r1low_stack
      +
      +	h6 = r3lowx0 + r0lowx6
      +	sr1lowx6 = sr1low * x6
      +	r1high = r1high_stack
      +
      +	x4 += y2
      +	r0highx0 = r0high * x0
      +	sr2low = sr2low_stack
      +
      +	h7 = r3highx0 + r0highx6
      +	sr1highx6 = sr1high * x6
      +	sr2high = sr2high_stack
      +
      +	x3 += y1
      +	r1lowx0 = r1low * x0
      +	r2low = r2low_stack
      +
      +	h0 = r0lowx0 + sr1lowx6
      +	sr2lowx6 = sr2low * x6
      +	r2high = r2high_stack
      +
      +	x2 += y0
      +	r1highx0 = r1high * x0
      +	sr3low = sr3low_stack
      +
      +	h1 = r0highx0 + sr1highx6
      +	sr2highx6 = sr2high * x6
      +	sr3high = sr3high_stack
      +
      +	x4 += x5
      +	r2lowx0 = r2low * x0
      +
      +	h2 = r1lowx0 + sr2lowx6
      +	sr3lowx6 = sr3low * x6
      +
      +	x2 += x3
      +	r2highx0 = r2high * x0
      +
      +	h3 = r1highx0 + sr2highx6
      +	sr3highx6 = sr3high * x6
      +
      +	r1highx4 = r1high * x4
      +
      +	h4 = r2lowx0 + sr3lowx6
      +	r1lowx4 = r1low * x4
      +
      +	r0highx4 = r0high * x4
      +
      +	h5 = r2highx0 + sr3highx6
      +	r0lowx4 = r0low * x4
      +
      +	h7 += r1highx4
      +	sr3highx4 = sr3high * x4
      +
      +	h6 += r1lowx4
      +	sr3lowx4 = sr3low * x4
      +
      +	h5 += r0highx4
      +	sr2highx4 = sr2high * x4
      +
      +	h4 += r0lowx4
      +	sr2lowx4 = sr2low * x4
      +
      +	h3 += sr3highx4
      +	r0lowx2 = r0low * x2
      +
      +	h2 += sr3lowx4
      +	r0highx2 = r0high * x2
      +
      +	h1 += sr2highx4
      +	r1lowx2 = r1low * x2
      +
      +	h0 += sr2lowx4
      +	r1highx2 = r1high * x2
      +
      +	h2 += r0lowx2
      +	r2lowx2 = r2low * x2
      +
      +	h3 += r0highx2
      +	r2highx2 = r2high * x2
      +
      +	h4 += r1lowx2
      +	sr3lowx2 = sr3low * x2
      +
      +	h5 += r1highx2
      +	sr3highx2 = sr3high * x2
      +
      +	h6 += r2lowx2
      +
      +	h7 += r2highx2
      +
      +	h0 += sr3lowx2
      +
      +	h1 += sr3highx2
      +
      +nomorebytes:
      +
      +	y7 = h7 + alpha130
      +
      +	y0 = h0 + alpha32
      +
      +	y1 = h1 + alpha32
      +
      +	y2 = h2 + alpha64
      +
      +	y7 -= alpha130
      +
      +	y3 = h3 + alpha64
      +
      +	y4 = h4 + alpha96
      +
      +	y5 = h5 + alpha96
      +
      +	x7 = h7 - y7
      +	y7 *= scale
      +
      +	y0 -= alpha32
      +
      +	y1 -= alpha32
      +
      +	y2 -= alpha64
      +
      +	h6 += x7
      +
      +	y3 -= alpha64
      +
      +	y4 -= alpha96
      +
      +	y5 -= alpha96
      +
      +	y6 = h6 + alpha130
      +
      +	x0 = h0 - y0
      +
      +	x1 = h1 - y1
      +
      +	x2 = h2 - y2
      +
      +	y6 -= alpha130
      +
      +	x0 += y7
      +
      +	x3 = h3 - y3
      +
      +	x4 = h4 - y4
      +
      +	x5 = h5 - y5
      +
      +	x6 = h6 - y6
      +
      +	y6 *= scale
      +
      +	x2 += y0
      +
      +	x3 += y1
      +
      +	x4 += y2
      +
      +	x0 += y6
      +
      +	x5 += y3
      +
      +	x6 += y4
      +
      +	x2 += x3
      +
      +	x0 += x1
      +
      +	x4 += x5
      +
      +	x6 += y5
      +
      +	x2 += offset1
      +	d1 = int64(math.Float64bits(x2))
      +
      +	x0 += offset0
      +	d0 = int64(math.Float64bits(x0))
      +
      +	x4 += offset2
      +	d2 = int64(math.Float64bits(x4))
      +
      +	x6 += offset3
      +	d3 = int64(math.Float64bits(x6))
      +
      +	f0 = uint64(d0)
      +
      +	f1 = uint64(d1)
      +	bits32 = math.MaxUint64
      +
      +	f2 = uint64(d2)
      +	bits32 >>= 32
      +
      +	f3 = uint64(d3)
      +	f = f0 >> 32
      +
      +	f0 &= bits32
      +	f &= 255
      +
      +	f1 += f
      +	g0 = f0 + 5
      +
      +	g = g0 >> 32
      +	g0 &= bits32
      +
      +	f = f1 >> 32
      +	f1 &= bits32
      +
      +	f &= 255
      +	g1 = f1 + g
      +
      +	g = g1 >> 32
      +	f2 += f
      +
      +	f = f2 >> 32
      +	g1 &= bits32
      +
      +	f2 &= bits32
      +	f &= 255
      +
      +	f3 += f
      +	g2 = f2 + g
      +
      +	g = g2 >> 32
      +	g2 &= bits32
      +
      +	f4 = f3 >> 32
      +	f3 &= bits32
      +
      +	f4 &= 255
      +	g3 = f3 + g
      +
      +	g = g3 >> 32
      +	g3 &= bits32
      +
      +	g4 = f4 + g
      +
      +	g4 = g4 - 4
      +	s00 = uint32(s[0])
      +
      +	f = uint64(int64(g4) >> 63)
      +	s01 = uint32(s[1])
      +
      +	f0 &= f
      +	g0 &^= f
      +	s02 = uint32(s[2])
      +
      +	f1 &= f
      +	f0 |= g0
      +	s03 = uint32(s[3])
      +
      +	g1 &^= f
      +	f2 &= f
      +	s10 = uint32(s[4])
      +
      +	f3 &= f
      +	g2 &^= f
      +	s11 = uint32(s[5])
      +
      +	g3 &^= f
      +	f1 |= g1
      +	s12 = uint32(s[6])
      +
      +	f2 |= g2
      +	f3 |= g3
      +	s13 = uint32(s[7])
      +
      +	s01 <<= 8
      +	f0 += uint64(s00)
      +	s20 = uint32(s[8])
      +
      +	s02 <<= 16
      +	f0 += uint64(s01)
      +	s21 = uint32(s[9])
      +
      +	s03 <<= 24
      +	f0 += uint64(s02)
      +	s22 = uint32(s[10])
      +
      +	s11 <<= 8
      +	f1 += uint64(s10)
      +	s23 = uint32(s[11])
      +
      +	s12 <<= 16
      +	f1 += uint64(s11)
      +	s30 = uint32(s[12])
      +
      +	s13 <<= 24
      +	f1 += uint64(s12)
      +	s31 = uint32(s[13])
      +
      +	f0 += uint64(s03)
      +	f1 += uint64(s13)
      +	s32 = uint32(s[14])
      +
      +	s21 <<= 8
      +	f2 += uint64(s20)
      +	s33 = uint32(s[15])
      +
      +	s22 <<= 16
      +	f2 += uint64(s21)
      +
      +	s23 <<= 24
      +	f2 += uint64(s22)
      +
      +	s31 <<= 8
      +	f3 += uint64(s30)
      +
      +	s32 <<= 16
      +	f3 += uint64(s31)
      +
      +	s33 <<= 24
      +	f3 += uint64(s32)
      +
      +	f2 += uint64(s23)
      +	f3 += uint64(s33)
      +
      +	out[0] = byte(f0)
      +	f0 >>= 8
      +	out[1] = byte(f0)
      +	f0 >>= 8
      +	out[2] = byte(f0)
      +	f0 >>= 8
      +	out[3] = byte(f0)
      +	f0 >>= 8
      +	f1 += f0
      +
      +	out[4] = byte(f1)
      +	f1 >>= 8
      +	out[5] = byte(f1)
      +	f1 >>= 8
      +	out[6] = byte(f1)
      +	f1 >>= 8
      +	out[7] = byte(f1)
      +	f1 >>= 8
      +	f2 += f1
      +
      +	out[8] = byte(f2)
      +	f2 >>= 8
      +	out[9] = byte(f2)
      +	f2 >>= 8
      +	out[10] = byte(f2)
      +	f2 >>= 8
      +	out[11] = byte(f2)
      +	f2 >>= 8
      +	f3 += f2
      +
      +	out[12] = byte(f3)
      +	f3 >>= 8
      +	out[13] = byte(f3)
      +	f3 >>= 8
      +	out[14] = byte(f3)
      +	f3 >>= 8
      +	out[15] = byte(f3)
      +}
      diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go
      new file mode 100644
      index 00000000..6c6e8423
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go
      @@ -0,0 +1,120 @@
      +// Copyright 2010 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package ripemd160 implements the RIPEMD-160 hash algorithm.
      +package ripemd160 // import "golang.org/x/crypto/ripemd160"
      +
      +// RIPEMD-160 is designed by by Hans Dobbertin, Antoon Bosselaers, and Bart
      +// Preneel with specifications available at:
      +// http://homes.esat.kuleuven.be/~cosicart/pdf/AB-9601/AB-9601.pdf.
      +
      +import (
      +	"crypto"
      +	"hash"
      +)
      +
      +func init() {
      +	crypto.RegisterHash(crypto.RIPEMD160, New)
      +}
      +
      +// The size of the checksum in bytes.
      +const Size = 20
      +
      +// The block size of the hash algorithm in bytes.
      +const BlockSize = 64
      +
      +const (
      +	_s0 = 0x67452301
      +	_s1 = 0xefcdab89
      +	_s2 = 0x98badcfe
      +	_s3 = 0x10325476
      +	_s4 = 0xc3d2e1f0
      +)
      +
      +// digest represents the partial evaluation of a checksum.
      +type digest struct {
      +	s  [5]uint32       // running context
      +	x  [BlockSize]byte // temporary buffer
      +	nx int             // index into x
      +	tc uint64          // total count of bytes processed
      +}
      +
      +func (d *digest) Reset() {
      +	d.s[0], d.s[1], d.s[2], d.s[3], d.s[4] = _s0, _s1, _s2, _s3, _s4
      +	d.nx = 0
      +	d.tc = 0
      +}
      +
      +// New returns a new hash.Hash computing the checksum.
      +func New() hash.Hash {
      +	result := new(digest)
      +	result.Reset()
      +	return result
      +}
      +
      +func (d *digest) Size() int { return Size }
      +
      +func (d *digest) BlockSize() int { return BlockSize }
      +
      +func (d *digest) Write(p []byte) (nn int, err error) {
      +	nn = len(p)
      +	d.tc += uint64(nn)
      +	if d.nx > 0 {
      +		n := len(p)
      +		if n > BlockSize-d.nx {
      +			n = BlockSize - d.nx
      +		}
      +		for i := 0; i < n; i++ {
      +			d.x[d.nx+i] = p[i]
      +		}
      +		d.nx += n
      +		if d.nx == BlockSize {
      +			_Block(d, d.x[0:])
      +			d.nx = 0
      +		}
      +		p = p[n:]
      +	}
      +	n := _Block(d, p)
      +	p = p[n:]
      +	if len(p) > 0 {
      +		d.nx = copy(d.x[:], p)
      +	}
      +	return
      +}
      +
      +func (d0 *digest) Sum(in []byte) []byte {
      +	// Make a copy of d0 so that caller can keep writing and summing.
      +	d := *d0
      +
      +	// Padding.  Add a 1 bit and 0 bits until 56 bytes mod 64.
      +	tc := d.tc
      +	var tmp [64]byte
      +	tmp[0] = 0x80
      +	if tc%64 < 56 {
      +		d.Write(tmp[0 : 56-tc%64])
      +	} else {
      +		d.Write(tmp[0 : 64+56-tc%64])
      +	}
      +
      +	// Length in bits.
      +	tc <<= 3
      +	for i := uint(0); i < 8; i++ {
      +		tmp[i] = byte(tc >> (8 * i))
      +	}
      +	d.Write(tmp[0:8])
      +
      +	if d.nx != 0 {
      +		panic("d.nx != 0")
      +	}
      +
      +	var digest [Size]byte
      +	for i, s := range d.s {
      +		digest[i*4] = byte(s)
      +		digest[i*4+1] = byte(s >> 8)
      +		digest[i*4+2] = byte(s >> 16)
      +		digest[i*4+3] = byte(s >> 24)
      +	}
      +
      +	return append(in, digest[:]...)
      +}
      diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160_test.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160_test.go
      new file mode 100644
      index 00000000..5df1b259
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160_test.go
      @@ -0,0 +1,64 @@
      +// Copyright 2010 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ripemd160
      +
      +// Test vectors are from:
      +// http://homes.esat.kuleuven.be/~bosselae/ripemd160.html
      +
      +import (
      +	"fmt"
      +	"io"
      +	"testing"
      +)
      +
      +type mdTest struct {
      +	out string
      +	in  string
      +}
      +
      +var vectors = [...]mdTest{
      +	{"9c1185a5c5e9fc54612808977ee8f548b2258d31", ""},
      +	{"0bdc9d2d256b3ee9daae347be6f4dc835a467ffe", "a"},
      +	{"8eb208f7e05d987a9b044a8e98c6b087f15a0bfc", "abc"},
      +	{"5d0689ef49d2fae572b881b123a85ffa21595f36", "message digest"},
      +	{"f71c27109c692c1b56bbdceb5b9d2865b3708dbc", "abcdefghijklmnopqrstuvwxyz"},
      +	{"12a053384a9c0c88e405a06c27dcf49ada62eb2b", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"},
      +	{"b0e20b6e3116640286ed3a87a5713079b21f5189", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"},
      +	{"9b752e45573d4b39f4dbd3323cab82bf63326bfb", "12345678901234567890123456789012345678901234567890123456789012345678901234567890"},
      +}
      +
      +func TestVectors(t *testing.T) {
      +	for i := 0; i < len(vectors); i++ {
      +		tv := vectors[i]
      +		md := New()
      +		for j := 0; j < 3; j++ {
      +			if j < 2 {
      +				io.WriteString(md, tv.in)
      +			} else {
      +				io.WriteString(md, tv.in[0:len(tv.in)/2])
      +				md.Sum(nil)
      +				io.WriteString(md, tv.in[len(tv.in)/2:])
      +			}
      +			s := fmt.Sprintf("%x", md.Sum(nil))
      +			if s != tv.out {
      +				t.Fatalf("RIPEMD-160[%d](%s) = %s, expected %s", j, tv.in, s, tv.out)
      +			}
      +			md.Reset()
      +		}
      +	}
      +}
      +
      +func TestMillionA(t *testing.T) {
      +	md := New()
      +	for i := 0; i < 100000; i++ {
      +		io.WriteString(md, "aaaaaaaaaa")
      +	}
      +	out := "52783243c1697bdbe16d37f97f68f08325dc1528"
      +	s := fmt.Sprintf("%x", md.Sum(nil))
      +	if s != out {
      +		t.Fatalf("RIPEMD-160 (1 million 'a') = %s, expected %s", s, out)
      +	}
      +	md.Reset()
      +}
      diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go
      new file mode 100644
      index 00000000..7bc8e6c4
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go
      @@ -0,0 +1,161 @@
      +// Copyright 2010 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// RIPEMD-160 block step.
      +// In its own file so that a faster assembly or C version
      +// can be substituted easily.
      +
      +package ripemd160
      +
      +// work buffer indices and roll amounts for one line
      +var _n = [80]uint{
      +	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
      +	7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8,
      +	3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12,
      +	1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2,
      +	4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13,
      +}
      +
      +var _r = [80]uint{
      +	11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8,
      +	7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12,
      +	11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5,
      +	11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12,
      +	9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6,
      +}
      +
      +// same for the other parallel one
      +var n_ = [80]uint{
      +	5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12,
      +	6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2,
      +	15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13,
      +	8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14,
      +	12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11,
      +}
      +
      +var r_ = [80]uint{
      +	8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6,
      +	9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11,
      +	9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5,
      +	15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8,
      +	8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11,
      +}
      +
      +func _Block(md *digest, p []byte) int {
      +	n := 0
      +	var x [16]uint32
      +	var alpha, beta uint32
      +	for len(p) >= BlockSize {
      +		a, b, c, d, e := md.s[0], md.s[1], md.s[2], md.s[3], md.s[4]
      +		aa, bb, cc, dd, ee := a, b, c, d, e
      +		j := 0
      +		for i := 0; i < 16; i++ {
      +			x[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24
      +			j += 4
      +		}
      +
      +		// round 1
      +		i := 0
      +		for i < 16 {
      +			alpha = a + (b ^ c ^ d) + x[_n[i]]
      +			s := _r[i]
      +			alpha = (alpha<<s | alpha>>(32-s)) + e
      +			beta = c<<10 | c>>22
      +			a, b, c, d, e = e, alpha, b, beta, d
      +
      +			// parallel line
      +			alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6
      +			s = r_[i]
      +			alpha = (alpha<<s | alpha>>(32-s)) + ee
      +			beta = cc<<10 | cc>>22
      +			aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
      +
      +			i++
      +		}
      +
      +		// round 2
      +		for i < 32 {
      +			alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999
      +			s := _r[i]
      +			alpha = (alpha<<s | alpha>>(32-s)) + e
      +			beta = c<<10 | c>>22
      +			a, b, c, d, e = e, alpha, b, beta, d
      +
      +			// parallel line
      +			alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124
      +			s = r_[i]
      +			alpha = (alpha<<s | alpha>>(32-s)) + ee
      +			beta = cc<<10 | cc>>22
      +			aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
      +
      +			i++
      +		}
      +
      +		// round 3
      +		for i < 48 {
      +			alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1
      +			s := _r[i]
      +			alpha = (alpha<<s | alpha>>(32-s)) + e
      +			beta = c<<10 | c>>22
      +			a, b, c, d, e = e, alpha, b, beta, d
      +
      +			// parallel line
      +			alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3
      +			s = r_[i]
      +			alpha = (alpha<<s | alpha>>(32-s)) + ee
      +			beta = cc<<10 | cc>>22
      +			aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
      +
      +			i++
      +		}
      +
      +		// round 4
      +		for i < 64 {
      +			alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc
      +			s := _r[i]
      +			alpha = (alpha<<s | alpha>>(32-s)) + e
      +			beta = c<<10 | c>>22
      +			a, b, c, d, e = e, alpha, b, beta, d
      +
      +			// parallel line
      +			alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9
      +			s = r_[i]
      +			alpha = (alpha<<s | alpha>>(32-s)) + ee
      +			beta = cc<<10 | cc>>22
      +			aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
      +
      +			i++
      +		}
      +
      +		// round 5
      +		for i < 80 {
      +			alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e
      +			s := _r[i]
      +			alpha = (alpha<<s | alpha>>(32-s)) + e
      +			beta = c<<10 | c>>22
      +			a, b, c, d, e = e, alpha, b, beta, d
      +
      +			// parallel line
      +			alpha = aa + (bb ^ cc ^ dd) + x[n_[i]]
      +			s = r_[i]
      +			alpha = (alpha<<s | alpha>>(32-s)) + ee
      +			beta = cc<<10 | cc>>22
      +			aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
      +
      +			i++
      +		}
      +
      +		// combine results
      +		dd += c + md.s[1]
      +		md.s[1] = md.s[2] + d + ee
      +		md.s[2] = md.s[3] + e + aa
      +		md.s[3] = md.s[4] + a + bb
      +		md.s[4] = md.s[0] + b + cc
      +		md.s[0] = dd
      +
      +		p = p[BlockSize:]
      +		n += BlockSize
      +	}
      +	return n
      +}
      diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
      new file mode 100644
      index 00000000..4c96147c
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
      @@ -0,0 +1,144 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package salsa provides low-level access to functions in the Salsa family.
      +package salsa // import "golang.org/x/crypto/salsa20/salsa"
      +
      +// Sigma is the Salsa20 constant for 256-bit keys.
      +var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'}
      +
      +// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte
      +// key k, and 16-byte constant c, and puts the result into the 32-byte array
      +// out.
      +func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
      +	x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
      +	x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
      +	x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
      +	x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
      +	x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
      +	x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
      +	x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
      +	x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
      +	x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
      +	x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
      +	x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
      +	x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
      +	x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
      +	x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
      +	x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
      +	x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
      +
      +	for i := 0; i < 20; i += 2 {
      +		u := x0 + x12
      +		x4 ^= u<<7 | u>>(32-7)
      +		u = x4 + x0
      +		x8 ^= u<<9 | u>>(32-9)
      +		u = x8 + x4
      +		x12 ^= u<<13 | u>>(32-13)
      +		u = x12 + x8
      +		x0 ^= u<<18 | u>>(32-18)
      +
      +		u = x5 + x1
      +		x9 ^= u<<7 | u>>(32-7)
      +		u = x9 + x5
      +		x13 ^= u<<9 | u>>(32-9)
      +		u = x13 + x9
      +		x1 ^= u<<13 | u>>(32-13)
      +		u = x1 + x13
      +		x5 ^= u<<18 | u>>(32-18)
      +
      +		u = x10 + x6
      +		x14 ^= u<<7 | u>>(32-7)
      +		u = x14 + x10
      +		x2 ^= u<<9 | u>>(32-9)
      +		u = x2 + x14
      +		x6 ^= u<<13 | u>>(32-13)
      +		u = x6 + x2
      +		x10 ^= u<<18 | u>>(32-18)
      +
      +		u = x15 + x11
      +		x3 ^= u<<7 | u>>(32-7)
      +		u = x3 + x15
      +		x7 ^= u<<9 | u>>(32-9)
      +		u = x7 + x3
      +		x11 ^= u<<13 | u>>(32-13)
      +		u = x11 + x7
      +		x15 ^= u<<18 | u>>(32-18)
      +
      +		u = x0 + x3
      +		x1 ^= u<<7 | u>>(32-7)
      +		u = x1 + x0
      +		x2 ^= u<<9 | u>>(32-9)
      +		u = x2 + x1
      +		x3 ^= u<<13 | u>>(32-13)
      +		u = x3 + x2
      +		x0 ^= u<<18 | u>>(32-18)
      +
      +		u = x5 + x4
      +		x6 ^= u<<7 | u>>(32-7)
      +		u = x6 + x5
      +		x7 ^= u<<9 | u>>(32-9)
      +		u = x7 + x6
      +		x4 ^= u<<13 | u>>(32-13)
      +		u = x4 + x7
      +		x5 ^= u<<18 | u>>(32-18)
      +
      +		u = x10 + x9
      +		x11 ^= u<<7 | u>>(32-7)
      +		u = x11 + x10
      +		x8 ^= u<<9 | u>>(32-9)
      +		u = x8 + x11
      +		x9 ^= u<<13 | u>>(32-13)
      +		u = x9 + x8
      +		x10 ^= u<<18 | u>>(32-18)
      +
      +		u = x15 + x14
      +		x12 ^= u<<7 | u>>(32-7)
      +		u = x12 + x15
      +		x13 ^= u<<9 | u>>(32-9)
      +		u = x13 + x12
      +		x14 ^= u<<13 | u>>(32-13)
      +		u = x14 + x13
      +		x15 ^= u<<18 | u>>(32-18)
      +	}
      +	out[0] = byte(x0)
      +	out[1] = byte(x0 >> 8)
      +	out[2] = byte(x0 >> 16)
      +	out[3] = byte(x0 >> 24)
      +
      +	out[4] = byte(x5)
      +	out[5] = byte(x5 >> 8)
      +	out[6] = byte(x5 >> 16)
      +	out[7] = byte(x5 >> 24)
      +
      +	out[8] = byte(x10)
      +	out[9] = byte(x10 >> 8)
      +	out[10] = byte(x10 >> 16)
      +	out[11] = byte(x10 >> 24)
      +
      +	out[12] = byte(x15)
      +	out[13] = byte(x15 >> 8)
      +	out[14] = byte(x15 >> 16)
      +	out[15] = byte(x15 >> 24)
      +
      +	out[16] = byte(x6)
      +	out[17] = byte(x6 >> 8)
      +	out[18] = byte(x6 >> 16)
      +	out[19] = byte(x6 >> 24)
      +
      +	out[20] = byte(x7)
      +	out[21] = byte(x7 >> 8)
      +	out[22] = byte(x7 >> 16)
      +	out[23] = byte(x7 >> 24)
      +
      +	out[24] = byte(x8)
      +	out[25] = byte(x8 >> 8)
      +	out[26] = byte(x8 >> 16)
      +	out[27] = byte(x8 >> 24)
      +
      +	out[28] = byte(x9)
      +	out[29] = byte(x9 >> 8)
      +	out[30] = byte(x9 >> 16)
      +	out[31] = byte(x9 >> 24)
      +}
      diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s
      new file mode 100644
      index 00000000..6e1df963
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s
      @@ -0,0 +1,902 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build amd64,!appengine,!gccgo
      +
      +// This code was translated into a form compatible with 6a from the public
      +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
      +
      +// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
      +TEXT ·salsa2020XORKeyStream(SB),0,$512-40
      +	MOVQ out+0(FP),DI
      +	MOVQ in+8(FP),SI
      +	MOVQ n+16(FP),DX
      +	MOVQ nonce+24(FP),CX
      +	MOVQ key+32(FP),R8
      +
      +	MOVQ SP,R11
      +	MOVQ $31,R9
      +	NOTQ R9
      +	ANDQ R9,SP
      +	ADDQ $32,SP
      +
      +	MOVQ R11,352(SP)
      +	MOVQ R12,360(SP)
      +	MOVQ R13,368(SP)
      +	MOVQ R14,376(SP)
      +	MOVQ R15,384(SP)
      +	MOVQ BX,392(SP)
      +	MOVQ BP,400(SP)
      +	MOVQ DX,R9
      +	MOVQ CX,DX
      +	MOVQ R8,R10
      +	CMPQ R9,$0
      +	JBE DONE
      +	START:
      +	MOVL 20(R10),CX
      +	MOVL 0(R10),R8
      +	MOVL 0(DX),AX
      +	MOVL 16(R10),R11
      +	MOVL CX,0(SP)
      +	MOVL R8, 4 (SP)
      +	MOVL AX, 8 (SP)
      +	MOVL R11, 12 (SP)
      +	MOVL 8(DX),CX
      +	MOVL 24(R10),R8
      +	MOVL 4(R10),AX
      +	MOVL 4(DX),R11
      +	MOVL CX,16(SP)
      +	MOVL R8, 20 (SP)
      +	MOVL AX, 24 (SP)
      +	MOVL R11, 28 (SP)
      +	MOVL 12(DX),CX
      +	MOVL 12(R10),DX
      +	MOVL 28(R10),R8
      +	MOVL 8(R10),AX
      +	MOVL DX,32(SP)
      +	MOVL CX, 36 (SP)
      +	MOVL R8, 40 (SP)
      +	MOVL AX, 44 (SP)
      +	MOVQ $1634760805,DX
      +	MOVQ $857760878,CX
      +	MOVQ $2036477234,R8
      +	MOVQ $1797285236,AX
      +	MOVL DX,48(SP)
      +	MOVL CX, 52 (SP)
      +	MOVL R8, 56 (SP)
      +	MOVL AX, 60 (SP)
      +	CMPQ R9,$256
      +	JB BYTESBETWEEN1AND255
      +	MOVOA 48(SP),X0
      +	PSHUFL $0X55,X0,X1
      +	PSHUFL $0XAA,X0,X2
      +	PSHUFL $0XFF,X0,X3
      +	PSHUFL $0X00,X0,X0
      +	MOVOA X1,64(SP)
      +	MOVOA X2,80(SP)
      +	MOVOA X3,96(SP)
      +	MOVOA X0,112(SP)
      +	MOVOA 0(SP),X0
      +	PSHUFL $0XAA,X0,X1
      +	PSHUFL $0XFF,X0,X2
      +	PSHUFL $0X00,X0,X3
      +	PSHUFL $0X55,X0,X0
      +	MOVOA X1,128(SP)
      +	MOVOA X2,144(SP)
      +	MOVOA X3,160(SP)
      +	MOVOA X0,176(SP)
      +	MOVOA 16(SP),X0
      +	PSHUFL $0XFF,X0,X1
      +	PSHUFL $0X55,X0,X2
      +	PSHUFL $0XAA,X0,X0
      +	MOVOA X1,192(SP)
      +	MOVOA X2,208(SP)
      +	MOVOA X0,224(SP)
      +	MOVOA 32(SP),X0
      +	PSHUFL $0X00,X0,X1
      +	PSHUFL $0XAA,X0,X2
      +	PSHUFL $0XFF,X0,X0
      +	MOVOA X1,240(SP)
      +	MOVOA X2,256(SP)
      +	MOVOA X0,272(SP)
      +	BYTESATLEAST256:
      +	MOVL 16(SP),DX
      +	MOVL  36 (SP),CX
      +	MOVL DX,288(SP)
      +	MOVL CX,304(SP)
      +	ADDQ $1,DX
      +	SHLQ $32,CX
      +	ADDQ CX,DX
      +	MOVQ DX,CX
      +	SHRQ $32,CX
      +	MOVL DX, 292 (SP)
      +	MOVL CX, 308 (SP)
      +	ADDQ $1,DX
      +	SHLQ $32,CX
      +	ADDQ CX,DX
      +	MOVQ DX,CX
      +	SHRQ $32,CX
      +	MOVL DX, 296 (SP)
      +	MOVL CX, 312 (SP)
      +	ADDQ $1,DX
      +	SHLQ $32,CX
      +	ADDQ CX,DX
      +	MOVQ DX,CX
      +	SHRQ $32,CX
      +	MOVL DX, 300 (SP)
      +	MOVL CX, 316 (SP)
      +	ADDQ $1,DX
      +	SHLQ $32,CX
      +	ADDQ CX,DX
      +	MOVQ DX,CX
      +	SHRQ $32,CX
      +	MOVL DX,16(SP)
      +	MOVL CX, 36 (SP)
      +	MOVQ R9,408(SP)
      +	MOVQ $20,DX
      +	MOVOA 64(SP),X0
      +	MOVOA 80(SP),X1
      +	MOVOA 96(SP),X2
      +	MOVOA 256(SP),X3
      +	MOVOA 272(SP),X4
      +	MOVOA 128(SP),X5
      +	MOVOA 144(SP),X6
      +	MOVOA 176(SP),X7
      +	MOVOA 192(SP),X8
      +	MOVOA 208(SP),X9
      +	MOVOA 224(SP),X10
      +	MOVOA 304(SP),X11
      +	MOVOA 112(SP),X12
      +	MOVOA 160(SP),X13
      +	MOVOA 240(SP),X14
      +	MOVOA 288(SP),X15
      +	MAINLOOP1:
      +	MOVOA X1,320(SP)
      +	MOVOA X2,336(SP)
      +	MOVOA X13,X1
      +	PADDL X12,X1
      +	MOVOA X1,X2
      +	PSLLL $7,X1
      +	PXOR X1,X14
      +	PSRLL $25,X2
      +	PXOR X2,X14
      +	MOVOA X7,X1
      +	PADDL X0,X1
      +	MOVOA X1,X2
      +	PSLLL $7,X1
      +	PXOR X1,X11
      +	PSRLL $25,X2
      +	PXOR X2,X11
      +	MOVOA X12,X1
      +	PADDL X14,X1
      +	MOVOA X1,X2
      +	PSLLL $9,X1
      +	PXOR X1,X15
      +	PSRLL $23,X2
      +	PXOR X2,X15
      +	MOVOA X0,X1
      +	PADDL X11,X1
      +	MOVOA X1,X2
      +	PSLLL $9,X1
      +	PXOR X1,X9
      +	PSRLL $23,X2
      +	PXOR X2,X9
      +	MOVOA X14,X1
      +	PADDL X15,X1
      +	MOVOA X1,X2
      +	PSLLL $13,X1
      +	PXOR X1,X13
      +	PSRLL $19,X2
      +	PXOR X2,X13
      +	MOVOA X11,X1
      +	PADDL X9,X1
      +	MOVOA X1,X2
      +	PSLLL $13,X1
      +	PXOR X1,X7
      +	PSRLL $19,X2
      +	PXOR X2,X7
      +	MOVOA X15,X1
      +	PADDL X13,X1
      +	MOVOA X1,X2
      +	PSLLL $18,X1
      +	PXOR X1,X12
      +	PSRLL $14,X2
      +	PXOR X2,X12
      +	MOVOA 320(SP),X1
      +	MOVOA X12,320(SP)
      +	MOVOA X9,X2
      +	PADDL X7,X2
      +	MOVOA X2,X12
      +	PSLLL $18,X2
      +	PXOR X2,X0
      +	PSRLL $14,X12
      +	PXOR X12,X0
      +	MOVOA X5,X2
      +	PADDL X1,X2
      +	MOVOA X2,X12
      +	PSLLL $7,X2
      +	PXOR X2,X3
      +	PSRLL $25,X12
      +	PXOR X12,X3
      +	MOVOA 336(SP),X2
      +	MOVOA X0,336(SP)
      +	MOVOA X6,X0
      +	PADDL X2,X0
      +	MOVOA X0,X12
      +	PSLLL $7,X0
      +	PXOR X0,X4
      +	PSRLL $25,X12
      +	PXOR X12,X4
      +	MOVOA X1,X0
      +	PADDL X3,X0
      +	MOVOA X0,X12
      +	PSLLL $9,X0
      +	PXOR X0,X10
      +	PSRLL $23,X12
      +	PXOR X12,X10
      +	MOVOA X2,X0
      +	PADDL X4,X0
      +	MOVOA X0,X12
      +	PSLLL $9,X0
      +	PXOR X0,X8
      +	PSRLL $23,X12
      +	PXOR X12,X8
      +	MOVOA X3,X0
      +	PADDL X10,X0
      +	MOVOA X0,X12
      +	PSLLL $13,X0
      +	PXOR X0,X5
      +	PSRLL $19,X12
      +	PXOR X12,X5
      +	MOVOA X4,X0
      +	PADDL X8,X0
      +	MOVOA X0,X12
      +	PSLLL $13,X0
      +	PXOR X0,X6
      +	PSRLL $19,X12
      +	PXOR X12,X6
      +	MOVOA X10,X0
      +	PADDL X5,X0
      +	MOVOA X0,X12
      +	PSLLL $18,X0
      +	PXOR X0,X1
      +	PSRLL $14,X12
      +	PXOR X12,X1
      +	MOVOA 320(SP),X0
      +	MOVOA X1,320(SP)
      +	MOVOA X4,X1
      +	PADDL X0,X1
      +	MOVOA X1,X12
      +	PSLLL $7,X1
      +	PXOR X1,X7
      +	PSRLL $25,X12
      +	PXOR X12,X7
      +	MOVOA X8,X1
      +	PADDL X6,X1
      +	MOVOA X1,X12
      +	PSLLL $18,X1
      +	PXOR X1,X2
      +	PSRLL $14,X12
      +	PXOR X12,X2
      +	MOVOA 336(SP),X12
      +	MOVOA X2,336(SP)
      +	MOVOA X14,X1
      +	PADDL X12,X1
      +	MOVOA X1,X2
      +	PSLLL $7,X1
      +	PXOR X1,X5
      +	PSRLL $25,X2
      +	PXOR X2,X5
      +	MOVOA X0,X1
      +	PADDL X7,X1
      +	MOVOA X1,X2
      +	PSLLL $9,X1
      +	PXOR X1,X10
      +	PSRLL $23,X2
      +	PXOR X2,X10
      +	MOVOA X12,X1
      +	PADDL X5,X1
      +	MOVOA X1,X2
      +	PSLLL $9,X1
      +	PXOR X1,X8
      +	PSRLL $23,X2
      +	PXOR X2,X8
      +	MOVOA X7,X1
      +	PADDL X10,X1
      +	MOVOA X1,X2
      +	PSLLL $13,X1
      +	PXOR X1,X4
      +	PSRLL $19,X2
      +	PXOR X2,X4
      +	MOVOA X5,X1
      +	PADDL X8,X1
      +	MOVOA X1,X2
      +	PSLLL $13,X1
      +	PXOR X1,X14
      +	PSRLL $19,X2
      +	PXOR X2,X14
      +	MOVOA X10,X1
      +	PADDL X4,X1
      +	MOVOA X1,X2
      +	PSLLL $18,X1
      +	PXOR X1,X0
      +	PSRLL $14,X2
      +	PXOR X2,X0
      +	MOVOA 320(SP),X1
      +	MOVOA X0,320(SP)
      +	MOVOA X8,X0
      +	PADDL X14,X0
      +	MOVOA X0,X2
      +	PSLLL $18,X0
      +	PXOR X0,X12
      +	PSRLL $14,X2
      +	PXOR X2,X12
      +	MOVOA X11,X0
      +	PADDL X1,X0
      +	MOVOA X0,X2
      +	PSLLL $7,X0
      +	PXOR X0,X6
      +	PSRLL $25,X2
      +	PXOR X2,X6
      +	MOVOA 336(SP),X2
      +	MOVOA X12,336(SP)
      +	MOVOA X3,X0
      +	PADDL X2,X0
      +	MOVOA X0,X12
      +	PSLLL $7,X0
      +	PXOR X0,X13
      +	PSRLL $25,X12
      +	PXOR X12,X13
      +	MOVOA X1,X0
      +	PADDL X6,X0
      +	MOVOA X0,X12
      +	PSLLL $9,X0
      +	PXOR X0,X15
      +	PSRLL $23,X12
      +	PXOR X12,X15
      +	MOVOA X2,X0
      +	PADDL X13,X0
      +	MOVOA X0,X12
      +	PSLLL $9,X0
      +	PXOR X0,X9
      +	PSRLL $23,X12
      +	PXOR X12,X9
      +	MOVOA X6,X0
      +	PADDL X15,X0
      +	MOVOA X0,X12
      +	PSLLL $13,X0
      +	PXOR X0,X11
      +	PSRLL $19,X12
      +	PXOR X12,X11
      +	MOVOA X13,X0
      +	PADDL X9,X0
      +	MOVOA X0,X12
      +	PSLLL $13,X0
      +	PXOR X0,X3
      +	PSRLL $19,X12
      +	PXOR X12,X3
      +	MOVOA X15,X0
      +	PADDL X11,X0
      +	MOVOA X0,X12
      +	PSLLL $18,X0
      +	PXOR X0,X1
      +	PSRLL $14,X12
      +	PXOR X12,X1
      +	MOVOA X9,X0
      +	PADDL X3,X0
      +	MOVOA X0,X12
      +	PSLLL $18,X0
      +	PXOR X0,X2
      +	PSRLL $14,X12
      +	PXOR X12,X2
      +	MOVOA 320(SP),X12
      +	MOVOA 336(SP),X0
      +	SUBQ $2,DX
      +	JA MAINLOOP1
      +	PADDL 112(SP),X12
      +	PADDL 176(SP),X7
      +	PADDL 224(SP),X10
      +	PADDL 272(SP),X4
      +	MOVD X12,DX
      +	MOVD X7,CX
      +	MOVD X10,R8
      +	MOVD X4,R9
      +	PSHUFL $0X39,X12,X12
      +	PSHUFL $0X39,X7,X7
      +	PSHUFL $0X39,X10,X10
      +	PSHUFL $0X39,X4,X4
      +	XORL 0(SI),DX
      +	XORL 4(SI),CX
      +	XORL 8(SI),R8
      +	XORL 12(SI),R9
      +	MOVL DX,0(DI)
      +	MOVL CX,4(DI)
      +	MOVL R8,8(DI)
      +	MOVL R9,12(DI)
      +	MOVD X12,DX
      +	MOVD X7,CX
      +	MOVD X10,R8
      +	MOVD X4,R9
      +	PSHUFL $0X39,X12,X12
      +	PSHUFL $0X39,X7,X7
      +	PSHUFL $0X39,X10,X10
      +	PSHUFL $0X39,X4,X4
      +	XORL 64(SI),DX
      +	XORL 68(SI),CX
      +	XORL 72(SI),R8
      +	XORL 76(SI),R9
      +	MOVL DX,64(DI)
      +	MOVL CX,68(DI)
      +	MOVL R8,72(DI)
      +	MOVL R9,76(DI)
      +	MOVD X12,DX
      +	MOVD X7,CX
      +	MOVD X10,R8
      +	MOVD X4,R9
      +	PSHUFL $0X39,X12,X12
      +	PSHUFL $0X39,X7,X7
      +	PSHUFL $0X39,X10,X10
      +	PSHUFL $0X39,X4,X4
      +	XORL 128(SI),DX
      +	XORL 132(SI),CX
      +	XORL 136(SI),R8
      +	XORL 140(SI),R9
      +	MOVL DX,128(DI)
      +	MOVL CX,132(DI)
      +	MOVL R8,136(DI)
      +	MOVL R9,140(DI)
      +	MOVD X12,DX
      +	MOVD X7,CX
      +	MOVD X10,R8
      +	MOVD X4,R9
      +	XORL 192(SI),DX
      +	XORL 196(SI),CX
      +	XORL 200(SI),R8
      +	XORL 204(SI),R9
      +	MOVL DX,192(DI)
      +	MOVL CX,196(DI)
      +	MOVL R8,200(DI)
      +	MOVL R9,204(DI)
      +	PADDL 240(SP),X14
      +	PADDL 64(SP),X0
      +	PADDL 128(SP),X5
      +	PADDL 192(SP),X8
      +	MOVD X14,DX
      +	MOVD X0,CX
      +	MOVD X5,R8
      +	MOVD X8,R9
      +	PSHUFL $0X39,X14,X14
      +	PSHUFL $0X39,X0,X0
      +	PSHUFL $0X39,X5,X5
      +	PSHUFL $0X39,X8,X8
      +	XORL 16(SI),DX
      +	XORL 20(SI),CX
      +	XORL 24(SI),R8
      +	XORL 28(SI),R9
      +	MOVL DX,16(DI)
      +	MOVL CX,20(DI)
      +	MOVL R8,24(DI)
      +	MOVL R9,28(DI)
      +	MOVD X14,DX
      +	MOVD X0,CX
      +	MOVD X5,R8
      +	MOVD X8,R9
      +	PSHUFL $0X39,X14,X14
      +	PSHUFL $0X39,X0,X0
      +	PSHUFL $0X39,X5,X5
      +	PSHUFL $0X39,X8,X8
      +	XORL 80(SI),DX
      +	XORL 84(SI),CX
      +	XORL 88(SI),R8
      +	XORL 92(SI),R9
      +	MOVL DX,80(DI)
      +	MOVL CX,84(DI)
      +	MOVL R8,88(DI)
      +	MOVL R9,92(DI)
      +	MOVD X14,DX
      +	MOVD X0,CX
      +	MOVD X5,R8
      +	MOVD X8,R9
      +	PSHUFL $0X39,X14,X14
      +	PSHUFL $0X39,X0,X0
      +	PSHUFL $0X39,X5,X5
      +	PSHUFL $0X39,X8,X8
      +	XORL 144(SI),DX
      +	XORL 148(SI),CX
      +	XORL 152(SI),R8
      +	XORL 156(SI),R9
      +	MOVL DX,144(DI)
      +	MOVL CX,148(DI)
      +	MOVL R8,152(DI)
      +	MOVL R9,156(DI)
      +	MOVD X14,DX
      +	MOVD X0,CX
      +	MOVD X5,R8
      +	MOVD X8,R9
      +	XORL 208(SI),DX
      +	XORL 212(SI),CX
      +	XORL 216(SI),R8
      +	XORL 220(SI),R9
      +	MOVL DX,208(DI)
      +	MOVL CX,212(DI)
      +	MOVL R8,216(DI)
      +	MOVL R9,220(DI)
      +	PADDL 288(SP),X15
      +	PADDL 304(SP),X11
      +	PADDL 80(SP),X1
      +	PADDL 144(SP),X6
      +	MOVD X15,DX
      +	MOVD X11,CX
      +	MOVD X1,R8
      +	MOVD X6,R9
      +	PSHUFL $0X39,X15,X15
      +	PSHUFL $0X39,X11,X11
      +	PSHUFL $0X39,X1,X1
      +	PSHUFL $0X39,X6,X6
      +	XORL 32(SI),DX
      +	XORL 36(SI),CX
      +	XORL 40(SI),R8
      +	XORL 44(SI),R9
      +	MOVL DX,32(DI)
      +	MOVL CX,36(DI)
      +	MOVL R8,40(DI)
      +	MOVL R9,44(DI)
      +	MOVD X15,DX
      +	MOVD X11,CX
      +	MOVD X1,R8
      +	MOVD X6,R9
      +	PSHUFL $0X39,X15,X15
      +	PSHUFL $0X39,X11,X11
      +	PSHUFL $0X39,X1,X1
      +	PSHUFL $0X39,X6,X6
      +	XORL 96(SI),DX
      +	XORL 100(SI),CX
      +	XORL 104(SI),R8
      +	XORL 108(SI),R9
      +	MOVL DX,96(DI)
      +	MOVL CX,100(DI)
      +	MOVL R8,104(DI)
      +	MOVL R9,108(DI)
      +	MOVD X15,DX
      +	MOVD X11,CX
      +	MOVD X1,R8
      +	MOVD X6,R9
      +	PSHUFL $0X39,X15,X15
      +	PSHUFL $0X39,X11,X11
      +	PSHUFL $0X39,X1,X1
      +	PSHUFL $0X39,X6,X6
      +	XORL 160(SI),DX
      +	XORL 164(SI),CX
      +	XORL 168(SI),R8
      +	XORL 172(SI),R9
      +	MOVL DX,160(DI)
      +	MOVL CX,164(DI)
      +	MOVL R8,168(DI)
      +	MOVL R9,172(DI)
      +	MOVD X15,DX
      +	MOVD X11,CX
      +	MOVD X1,R8
      +	MOVD X6,R9
      +	XORL 224(SI),DX
      +	XORL 228(SI),CX
      +	XORL 232(SI),R8
      +	XORL 236(SI),R9
      +	MOVL DX,224(DI)
      +	MOVL CX,228(DI)
      +	MOVL R8,232(DI)
      +	MOVL R9,236(DI)
      +	PADDL 160(SP),X13
      +	PADDL 208(SP),X9
      +	PADDL 256(SP),X3
      +	PADDL 96(SP),X2
      +	MOVD X13,DX
      +	MOVD X9,CX
      +	MOVD X3,R8
      +	MOVD X2,R9
      +	PSHUFL $0X39,X13,X13
      +	PSHUFL $0X39,X9,X9
      +	PSHUFL $0X39,X3,X3
      +	PSHUFL $0X39,X2,X2
      +	XORL 48(SI),DX
      +	XORL 52(SI),CX
      +	XORL 56(SI),R8
      +	XORL 60(SI),R9
      +	MOVL DX,48(DI)
      +	MOVL CX,52(DI)
      +	MOVL R8,56(DI)
      +	MOVL R9,60(DI)
      +	MOVD X13,DX
      +	MOVD X9,CX
      +	MOVD X3,R8
      +	MOVD X2,R9
      +	PSHUFL $0X39,X13,X13
      +	PSHUFL $0X39,X9,X9
      +	PSHUFL $0X39,X3,X3
      +	PSHUFL $0X39,X2,X2
      +	XORL 112(SI),DX
      +	XORL 116(SI),CX
      +	XORL 120(SI),R8
      +	XORL 124(SI),R9
      +	MOVL DX,112(DI)
      +	MOVL CX,116(DI)
      +	MOVL R8,120(DI)
      +	MOVL R9,124(DI)
      +	MOVD X13,DX
      +	MOVD X9,CX
      +	MOVD X3,R8
      +	MOVD X2,R9
      +	PSHUFL $0X39,X13,X13
      +	PSHUFL $0X39,X9,X9
      +	PSHUFL $0X39,X3,X3
      +	PSHUFL $0X39,X2,X2
      +	XORL 176(SI),DX
      +	XORL 180(SI),CX
      +	XORL 184(SI),R8
      +	XORL 188(SI),R9
      +	MOVL DX,176(DI)
      +	MOVL CX,180(DI)
      +	MOVL R8,184(DI)
      +	MOVL R9,188(DI)
      +	MOVD X13,DX
      +	MOVD X9,CX
      +	MOVD X3,R8
      +	MOVD X2,R9
      +	XORL 240(SI),DX
      +	XORL 244(SI),CX
      +	XORL 248(SI),R8
      +	XORL 252(SI),R9
      +	MOVL DX,240(DI)
      +	MOVL CX,244(DI)
      +	MOVL R8,248(DI)
      +	MOVL R9,252(DI)
      +	MOVQ 408(SP),R9
      +	SUBQ $256,R9
      +	ADDQ $256,SI
      +	ADDQ $256,DI
      +	CMPQ R9,$256
      +	JAE BYTESATLEAST256
      +	CMPQ R9,$0
      +	JBE DONE
      +	BYTESBETWEEN1AND255:
      +	CMPQ R9,$64
      +	JAE NOCOPY
      +	MOVQ DI,DX
      +	LEAQ 416(SP),DI
      +	MOVQ R9,CX
      +	REP; MOVSB
      +	LEAQ 416(SP),DI
      +	LEAQ 416(SP),SI
      +	NOCOPY:
      +	MOVQ R9,408(SP)
      +	MOVOA 48(SP),X0
      +	MOVOA 0(SP),X1
      +	MOVOA 16(SP),X2
      +	MOVOA 32(SP),X3
      +	MOVOA X1,X4
      +	MOVQ $20,CX
      +	MAINLOOP2:
      +	PADDL X0,X4
      +	MOVOA X0,X5
      +	MOVOA X4,X6
      +	PSLLL $7,X4
      +	PSRLL $25,X6
      +	PXOR X4,X3
      +	PXOR X6,X3
      +	PADDL X3,X5
      +	MOVOA X3,X4
      +	MOVOA X5,X6
      +	PSLLL $9,X5
      +	PSRLL $23,X6
      +	PXOR X5,X2
      +	PSHUFL $0X93,X3,X3
      +	PXOR X6,X2
      +	PADDL X2,X4
      +	MOVOA X2,X5
      +	MOVOA X4,X6
      +	PSLLL $13,X4
      +	PSRLL $19,X6
      +	PXOR X4,X1
      +	PSHUFL $0X4E,X2,X2
      +	PXOR X6,X1
      +	PADDL X1,X5
      +	MOVOA X3,X4
      +	MOVOA X5,X6
      +	PSLLL $18,X5
      +	PSRLL $14,X6
      +	PXOR X5,X0
      +	PSHUFL $0X39,X1,X1
      +	PXOR X6,X0
      +	PADDL X0,X4
      +	MOVOA X0,X5
      +	MOVOA X4,X6
      +	PSLLL $7,X4
      +	PSRLL $25,X6
      +	PXOR X4,X1
      +	PXOR X6,X1
      +	PADDL X1,X5
      +	MOVOA X1,X4
      +	MOVOA X5,X6
      +	PSLLL $9,X5
      +	PSRLL $23,X6
      +	PXOR X5,X2
      +	PSHUFL $0X93,X1,X1
      +	PXOR X6,X2
      +	PADDL X2,X4
      +	MOVOA X2,X5
      +	MOVOA X4,X6
      +	PSLLL $13,X4
      +	PSRLL $19,X6
      +	PXOR X4,X3
      +	PSHUFL $0X4E,X2,X2
      +	PXOR X6,X3
      +	PADDL X3,X5
      +	MOVOA X1,X4
      +	MOVOA X5,X6
      +	PSLLL $18,X5
      +	PSRLL $14,X6
      +	PXOR X5,X0
      +	PSHUFL $0X39,X3,X3
      +	PXOR X6,X0
      +	PADDL X0,X4
      +	MOVOA X0,X5
      +	MOVOA X4,X6
      +	PSLLL $7,X4
      +	PSRLL $25,X6
      +	PXOR X4,X3
      +	PXOR X6,X3
      +	PADDL X3,X5
      +	MOVOA X3,X4
      +	MOVOA X5,X6
      +	PSLLL $9,X5
      +	PSRLL $23,X6
      +	PXOR X5,X2
      +	PSHUFL $0X93,X3,X3
      +	PXOR X6,X2
      +	PADDL X2,X4
      +	MOVOA X2,X5
      +	MOVOA X4,X6
      +	PSLLL $13,X4
      +	PSRLL $19,X6
      +	PXOR X4,X1
      +	PSHUFL $0X4E,X2,X2
      +	PXOR X6,X1
      +	PADDL X1,X5
      +	MOVOA X3,X4
      +	MOVOA X5,X6
      +	PSLLL $18,X5
      +	PSRLL $14,X6
      +	PXOR X5,X0
      +	PSHUFL $0X39,X1,X1
      +	PXOR X6,X0
      +	PADDL X0,X4
      +	MOVOA X0,X5
      +	MOVOA X4,X6
      +	PSLLL $7,X4
      +	PSRLL $25,X6
      +	PXOR X4,X1
      +	PXOR X6,X1
      +	PADDL X1,X5
      +	MOVOA X1,X4
      +	MOVOA X5,X6
      +	PSLLL $9,X5
      +	PSRLL $23,X6
      +	PXOR X5,X2
      +	PSHUFL $0X93,X1,X1
      +	PXOR X6,X2
      +	PADDL X2,X4
      +	MOVOA X2,X5
      +	MOVOA X4,X6
      +	PSLLL $13,X4
      +	PSRLL $19,X6
      +	PXOR X4,X3
      +	PSHUFL $0X4E,X2,X2
      +	PXOR X6,X3
      +	SUBQ $4,CX
      +	PADDL X3,X5
      +	MOVOA X1,X4
      +	MOVOA X5,X6
      +	PSLLL $18,X5
      +	PXOR X7,X7
      +	PSRLL $14,X6
      +	PXOR X5,X0
      +	PSHUFL $0X39,X3,X3
      +	PXOR X6,X0
      +	JA MAINLOOP2
      +	PADDL 48(SP),X0
      +	PADDL 0(SP),X1
      +	PADDL 16(SP),X2
      +	PADDL 32(SP),X3
      +	MOVD X0,CX
      +	MOVD X1,R8
      +	MOVD X2,R9
      +	MOVD X3,AX
      +	PSHUFL $0X39,X0,X0
      +	PSHUFL $0X39,X1,X1
      +	PSHUFL $0X39,X2,X2
      +	PSHUFL $0X39,X3,X3
      +	XORL 0(SI),CX
      +	XORL 48(SI),R8
      +	XORL 32(SI),R9
      +	XORL 16(SI),AX
      +	MOVL CX,0(DI)
      +	MOVL R8,48(DI)
      +	MOVL R9,32(DI)
      +	MOVL AX,16(DI)
      +	MOVD X0,CX
      +	MOVD X1,R8
      +	MOVD X2,R9
      +	MOVD X3,AX
      +	PSHUFL $0X39,X0,X0
      +	PSHUFL $0X39,X1,X1
      +	PSHUFL $0X39,X2,X2
      +	PSHUFL $0X39,X3,X3
      +	XORL 20(SI),CX
      +	XORL 4(SI),R8
      +	XORL 52(SI),R9
      +	XORL 36(SI),AX
      +	MOVL CX,20(DI)
      +	MOVL R8,4(DI)
      +	MOVL R9,52(DI)
      +	MOVL AX,36(DI)
      +	MOVD X0,CX
      +	MOVD X1,R8
      +	MOVD X2,R9
      +	MOVD X3,AX
      +	PSHUFL $0X39,X0,X0
      +	PSHUFL $0X39,X1,X1
      +	PSHUFL $0X39,X2,X2
      +	PSHUFL $0X39,X3,X3
      +	XORL 40(SI),CX
      +	XORL 24(SI),R8
      +	XORL 8(SI),R9
      +	XORL 56(SI),AX
      +	MOVL CX,40(DI)
      +	MOVL R8,24(DI)
      +	MOVL R9,8(DI)
      +	MOVL AX,56(DI)
      +	MOVD X0,CX
      +	MOVD X1,R8
      +	MOVD X2,R9
      +	MOVD X3,AX
      +	XORL 60(SI),CX
      +	XORL 44(SI),R8
      +	XORL 28(SI),R9
      +	XORL 12(SI),AX
      +	MOVL CX,60(DI)
      +	MOVL R8,44(DI)
      +	MOVL R9,28(DI)
      +	MOVL AX,12(DI)
      +	MOVQ 408(SP),R9
      +	MOVL 16(SP),CX
      +	MOVL  36 (SP),R8
      +	ADDQ $1,CX
      +	SHLQ $32,R8
      +	ADDQ R8,CX
      +	MOVQ CX,R8
      +	SHRQ $32,R8
      +	MOVL CX,16(SP)
      +	MOVL R8, 36 (SP)
      +	CMPQ R9,$64
      +	JA BYTESATLEAST65
      +	JAE BYTESATLEAST64
      +	MOVQ DI,SI
      +	MOVQ DX,DI
      +	MOVQ R9,CX
      +	REP; MOVSB
      +	BYTESATLEAST64:
      +	DONE:
      +	MOVQ 352(SP),R11
      +	MOVQ 360(SP),R12
      +	MOVQ 368(SP),R13
      +	MOVQ 376(SP),R14
      +	MOVQ 384(SP),R15
      +	MOVQ 392(SP),BX
      +	MOVQ 400(SP),BP
      +	MOVQ R11,SP
      +	RET
      +	BYTESATLEAST65:
      +	SUBQ $64,R9
      +	ADDQ $64,DI
      +	ADDQ $64,SI
      +	JMP BYTESBETWEEN1AND255
      diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
      new file mode 100644
      index 00000000..9bfc0927
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
      @@ -0,0 +1,199 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package salsa
      +
      +// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts
      +// the result into the 64-byte array out. The input and output may be the same array.
      +func Core208(out *[64]byte, in *[64]byte) {
      +	j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
      +	j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
      +	j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
      +	j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
      +	j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24
      +	j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24
      +	j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24
      +	j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24
      +	j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24
      +	j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24
      +	j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24
      +	j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24
      +	j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24
      +	j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24
      +	j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24
      +	j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24
      +
      +	x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
      +	x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
      +
      +	for i := 0; i < 8; i += 2 {
      +		u := x0 + x12
      +		x4 ^= u<<7 | u>>(32-7)
      +		u = x4 + x0
      +		x8 ^= u<<9 | u>>(32-9)
      +		u = x8 + x4
      +		x12 ^= u<<13 | u>>(32-13)
      +		u = x12 + x8
      +		x0 ^= u<<18 | u>>(32-18)
      +
      +		u = x5 + x1
      +		x9 ^= u<<7 | u>>(32-7)
      +		u = x9 + x5
      +		x13 ^= u<<9 | u>>(32-9)
      +		u = x13 + x9
      +		x1 ^= u<<13 | u>>(32-13)
      +		u = x1 + x13
      +		x5 ^= u<<18 | u>>(32-18)
      +
      +		u = x10 + x6
      +		x14 ^= u<<7 | u>>(32-7)
      +		u = x14 + x10
      +		x2 ^= u<<9 | u>>(32-9)
      +		u = x2 + x14
      +		x6 ^= u<<13 | u>>(32-13)
      +		u = x6 + x2
      +		x10 ^= u<<18 | u>>(32-18)
      +
      +		u = x15 + x11
      +		x3 ^= u<<7 | u>>(32-7)
      +		u = x3 + x15
      +		x7 ^= u<<9 | u>>(32-9)
      +		u = x7 + x3
      +		x11 ^= u<<13 | u>>(32-13)
      +		u = x11 + x7
      +		x15 ^= u<<18 | u>>(32-18)
      +
      +		u = x0 + x3
      +		x1 ^= u<<7 | u>>(32-7)
      +		u = x1 + x0
      +		x2 ^= u<<9 | u>>(32-9)
      +		u = x2 + x1
      +		x3 ^= u<<13 | u>>(32-13)
      +		u = x3 + x2
      +		x0 ^= u<<18 | u>>(32-18)
      +
      +		u = x5 + x4
      +		x6 ^= u<<7 | u>>(32-7)
      +		u = x6 + x5
      +		x7 ^= u<<9 | u>>(32-9)
      +		u = x7 + x6
      +		x4 ^= u<<13 | u>>(32-13)
      +		u = x4 + x7
      +		x5 ^= u<<18 | u>>(32-18)
      +
      +		u = x10 + x9
      +		x11 ^= u<<7 | u>>(32-7)
      +		u = x11 + x10
      +		x8 ^= u<<9 | u>>(32-9)
      +		u = x8 + x11
      +		x9 ^= u<<13 | u>>(32-13)
      +		u = x9 + x8
      +		x10 ^= u<<18 | u>>(32-18)
      +
      +		u = x15 + x14
      +		x12 ^= u<<7 | u>>(32-7)
      +		u = x12 + x15
      +		x13 ^= u<<9 | u>>(32-9)
      +		u = x13 + x12
      +		x14 ^= u<<13 | u>>(32-13)
      +		u = x14 + x13
      +		x15 ^= u<<18 | u>>(32-18)
      +	}
      +	x0 += j0
      +	x1 += j1
      +	x2 += j2
      +	x3 += j3
      +	x4 += j4
      +	x5 += j5
      +	x6 += j6
      +	x7 += j7
      +	x8 += j8
      +	x9 += j9
      +	x10 += j10
      +	x11 += j11
      +	x12 += j12
      +	x13 += j13
      +	x14 += j14
      +	x15 += j15
      +
      +	out[0] = byte(x0)
      +	out[1] = byte(x0 >> 8)
      +	out[2] = byte(x0 >> 16)
      +	out[3] = byte(x0 >> 24)
      +
      +	out[4] = byte(x1)
      +	out[5] = byte(x1 >> 8)
      +	out[6] = byte(x1 >> 16)
      +	out[7] = byte(x1 >> 24)
      +
      +	out[8] = byte(x2)
      +	out[9] = byte(x2 >> 8)
      +	out[10] = byte(x2 >> 16)
      +	out[11] = byte(x2 >> 24)
      +
      +	out[12] = byte(x3)
      +	out[13] = byte(x3 >> 8)
      +	out[14] = byte(x3 >> 16)
      +	out[15] = byte(x3 >> 24)
      +
      +	out[16] = byte(x4)
      +	out[17] = byte(x4 >> 8)
      +	out[18] = byte(x4 >> 16)
      +	out[19] = byte(x4 >> 24)
      +
      +	out[20] = byte(x5)
      +	out[21] = byte(x5 >> 8)
      +	out[22] = byte(x5 >> 16)
      +	out[23] = byte(x5 >> 24)
      +
      +	out[24] = byte(x6)
      +	out[25] = byte(x6 >> 8)
      +	out[26] = byte(x6 >> 16)
      +	out[27] = byte(x6 >> 24)
      +
      +	out[28] = byte(x7)
      +	out[29] = byte(x7 >> 8)
      +	out[30] = byte(x7 >> 16)
      +	out[31] = byte(x7 >> 24)
      +
      +	out[32] = byte(x8)
      +	out[33] = byte(x8 >> 8)
      +	out[34] = byte(x8 >> 16)
      +	out[35] = byte(x8 >> 24)
      +
      +	out[36] = byte(x9)
      +	out[37] = byte(x9 >> 8)
      +	out[38] = byte(x9 >> 16)
      +	out[39] = byte(x9 >> 24)
      +
      +	out[40] = byte(x10)
      +	out[41] = byte(x10 >> 8)
      +	out[42] = byte(x10 >> 16)
      +	out[43] = byte(x10 >> 24)
      +
      +	out[44] = byte(x11)
      +	out[45] = byte(x11 >> 8)
      +	out[46] = byte(x11 >> 16)
      +	out[47] = byte(x11 >> 24)
      +
      +	out[48] = byte(x12)
      +	out[49] = byte(x12 >> 8)
      +	out[50] = byte(x12 >> 16)
      +	out[51] = byte(x12 >> 24)
      +
      +	out[52] = byte(x13)
      +	out[53] = byte(x13 >> 8)
      +	out[54] = byte(x13 >> 16)
      +	out[55] = byte(x13 >> 24)
      +
      +	out[56] = byte(x14)
      +	out[57] = byte(x14 >> 8)
      +	out[58] = byte(x14 >> 16)
      +	out[59] = byte(x14 >> 24)
      +
      +	out[60] = byte(x15)
      +	out[61] = byte(x15 >> 8)
      +	out[62] = byte(x15 >> 16)
      +	out[63] = byte(x15 >> 24)
      +}
      diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go
      new file mode 100644
      index 00000000..903c7858
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go
      @@ -0,0 +1,23 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build amd64,!appengine,!gccgo
      +
      +package salsa
      +
      +// This function is implemented in salsa2020_amd64.s.
      +
      +//go:noescape
      +
      +func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
      +
      +// XORKeyStream crypts bytes from in to out using the given key and counters.
      +// In and out may be the same slice but otherwise should not overlap. Counter
      +// contains the raw salsa20 counter bytes (both nonce and block counter).
      +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
      +	if len(in) == 0 {
      +		return
      +	}
      +	salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0])
      +}
      diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
      new file mode 100644
      index 00000000..95f8ca5b
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
      @@ -0,0 +1,234 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build !amd64 appengine gccgo
      +
      +package salsa
      +
      +const rounds = 20
      +
      +// core applies the Salsa20 core function to 16-byte input in, 32-byte key k,
      +// and 16-byte constant c, and puts the result into 64-byte array out.
      +func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
      +	j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
      +	j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
      +	j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
      +	j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
      +	j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
      +	j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
      +	j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
      +	j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
      +	j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
      +	j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
      +	j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
      +	j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
      +	j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
      +	j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
      +	j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
      +	j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
      +
      +	x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
      +	x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
      +
      +	for i := 0; i < rounds; i += 2 {
      +		u := x0 + x12
      +		x4 ^= u<<7 | u>>(32-7)
      +		u = x4 + x0
      +		x8 ^= u<<9 | u>>(32-9)
      +		u = x8 + x4
      +		x12 ^= u<<13 | u>>(32-13)
      +		u = x12 + x8
      +		x0 ^= u<<18 | u>>(32-18)
      +
      +		u = x5 + x1
      +		x9 ^= u<<7 | u>>(32-7)
      +		u = x9 + x5
      +		x13 ^= u<<9 | u>>(32-9)
      +		u = x13 + x9
      +		x1 ^= u<<13 | u>>(32-13)
      +		u = x1 + x13
      +		x5 ^= u<<18 | u>>(32-18)
      +
      +		u = x10 + x6
      +		x14 ^= u<<7 | u>>(32-7)
      +		u = x14 + x10
      +		x2 ^= u<<9 | u>>(32-9)
      +		u = x2 + x14
      +		x6 ^= u<<13 | u>>(32-13)
      +		u = x6 + x2
      +		x10 ^= u<<18 | u>>(32-18)
      +
      +		u = x15 + x11
      +		x3 ^= u<<7 | u>>(32-7)
      +		u = x3 + x15
      +		x7 ^= u<<9 | u>>(32-9)
      +		u = x7 + x3
      +		x11 ^= u<<13 | u>>(32-13)
      +		u = x11 + x7
      +		x15 ^= u<<18 | u>>(32-18)
      +
      +		u = x0 + x3
      +		x1 ^= u<<7 | u>>(32-7)
      +		u = x1 + x0
      +		x2 ^= u<<9 | u>>(32-9)
      +		u = x2 + x1
      +		x3 ^= u<<13 | u>>(32-13)
      +		u = x3 + x2
      +		x0 ^= u<<18 | u>>(32-18)
      +
      +		u = x5 + x4
      +		x6 ^= u<<7 | u>>(32-7)
      +		u = x6 + x5
      +		x7 ^= u<<9 | u>>(32-9)
      +		u = x7 + x6
      +		x4 ^= u<<13 | u>>(32-13)
      +		u = x4 + x7
      +		x5 ^= u<<18 | u>>(32-18)
      +
      +		u = x10 + x9
      +		x11 ^= u<<7 | u>>(32-7)
      +		u = x11 + x10
      +		x8 ^= u<<9 | u>>(32-9)
      +		u = x8 + x11
      +		x9 ^= u<<13 | u>>(32-13)
      +		u = x9 + x8
      +		x10 ^= u<<18 | u>>(32-18)
      +
      +		u = x15 + x14
      +		x12 ^= u<<7 | u>>(32-7)
      +		u = x12 + x15
      +		x13 ^= u<<9 | u>>(32-9)
      +		u = x13 + x12
      +		x14 ^= u<<13 | u>>(32-13)
      +		u = x14 + x13
      +		x15 ^= u<<18 | u>>(32-18)
      +	}
      +	x0 += j0
      +	x1 += j1
      +	x2 += j2
      +	x3 += j3
      +	x4 += j4
      +	x5 += j5
      +	x6 += j6
      +	x7 += j7
      +	x8 += j8
      +	x9 += j9
      +	x10 += j10
      +	x11 += j11
      +	x12 += j12
      +	x13 += j13
      +	x14 += j14
      +	x15 += j15
      +
      +	out[0] = byte(x0)
      +	out[1] = byte(x0 >> 8)
      +	out[2] = byte(x0 >> 16)
      +	out[3] = byte(x0 >> 24)
      +
      +	out[4] = byte(x1)
      +	out[5] = byte(x1 >> 8)
      +	out[6] = byte(x1 >> 16)
      +	out[7] = byte(x1 >> 24)
      +
      +	out[8] = byte(x2)
      +	out[9] = byte(x2 >> 8)
      +	out[10] = byte(x2 >> 16)
      +	out[11] = byte(x2 >> 24)
      +
      +	out[12] = byte(x3)
      +	out[13] = byte(x3 >> 8)
      +	out[14] = byte(x3 >> 16)
      +	out[15] = byte(x3 >> 24)
      +
      +	out[16] = byte(x4)
      +	out[17] = byte(x4 >> 8)
      +	out[18] = byte(x4 >> 16)
      +	out[19] = byte(x4 >> 24)
      +
      +	out[20] = byte(x5)
      +	out[21] = byte(x5 >> 8)
      +	out[22] = byte(x5 >> 16)
      +	out[23] = byte(x5 >> 24)
      +
      +	out[24] = byte(x6)
      +	out[25] = byte(x6 >> 8)
      +	out[26] = byte(x6 >> 16)
      +	out[27] = byte(x6 >> 24)
      +
      +	out[28] = byte(x7)
      +	out[29] = byte(x7 >> 8)
      +	out[30] = byte(x7 >> 16)
      +	out[31] = byte(x7 >> 24)
      +
      +	out[32] = byte(x8)
      +	out[33] = byte(x8 >> 8)
      +	out[34] = byte(x8 >> 16)
      +	out[35] = byte(x8 >> 24)
      +
      +	out[36] = byte(x9)
      +	out[37] = byte(x9 >> 8)
      +	out[38] = byte(x9 >> 16)
      +	out[39] = byte(x9 >> 24)
      +
      +	out[40] = byte(x10)
      +	out[41] = byte(x10 >> 8)
      +	out[42] = byte(x10 >> 16)
      +	out[43] = byte(x10 >> 24)
      +
      +	out[44] = byte(x11)
      +	out[45] = byte(x11 >> 8)
      +	out[46] = byte(x11 >> 16)
      +	out[47] = byte(x11 >> 24)
      +
      +	out[48] = byte(x12)
      +	out[49] = byte(x12 >> 8)
      +	out[50] = byte(x12 >> 16)
      +	out[51] = byte(x12 >> 24)
      +
      +	out[52] = byte(x13)
      +	out[53] = byte(x13 >> 8)
      +	out[54] = byte(x13 >> 16)
      +	out[55] = byte(x13 >> 24)
      +
      +	out[56] = byte(x14)
      +	out[57] = byte(x14 >> 8)
      +	out[58] = byte(x14 >> 16)
      +	out[59] = byte(x14 >> 24)
      +
      +	out[60] = byte(x15)
      +	out[61] = byte(x15 >> 8)
      +	out[62] = byte(x15 >> 16)
      +	out[63] = byte(x15 >> 24)
      +}
      +
      +// XORKeyStream crypts bytes from in to out using the given key and counters.
      +// In and out may be the same slice but otherwise should not overlap. Counter
      +// contains the raw salsa20 counter bytes (both nonce and block counter).
      +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
      +	var block [64]byte
      +	var counterCopy [16]byte
      +	copy(counterCopy[:], counter[:])
      +
      +	for len(in) >= 64 {
      +		core(&block, &counterCopy, key, &Sigma)
      +		for i, x := range block {
      +			out[i] = in[i] ^ x
      +		}
      +		u := uint32(1)
      +		for i := 8; i < 16; i++ {
      +			u += uint32(counterCopy[i])
      +			counterCopy[i] = byte(u)
      +			u >>= 8
      +		}
      +		in = in[64:]
      +		out = out[64:]
      +	}
      +
      +	if len(in) > 0 {
      +		core(&block, &counterCopy, key, &Sigma)
      +		for i, v := range in {
      +			out[i] = v ^ block[i]
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa_test.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa_test.go
      new file mode 100644
      index 00000000..f8cecd9e
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa_test.go
      @@ -0,0 +1,35 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package salsa
      +
      +import "testing"
      +
      +func TestCore208(t *testing.T) {
      +	in := [64]byte{
      +		0x7e, 0x87, 0x9a, 0x21, 0x4f, 0x3e, 0xc9, 0x86,
      +		0x7c, 0xa9, 0x40, 0xe6, 0x41, 0x71, 0x8f, 0x26,
      +		0xba, 0xee, 0x55, 0x5b, 0x8c, 0x61, 0xc1, 0xb5,
      +		0x0d, 0xf8, 0x46, 0x11, 0x6d, 0xcd, 0x3b, 0x1d,
      +		0xee, 0x24, 0xf3, 0x19, 0xdf, 0x9b, 0x3d, 0x85,
      +		0x14, 0x12, 0x1e, 0x4b, 0x5a, 0xc5, 0xaa, 0x32,
      +		0x76, 0x02, 0x1d, 0x29, 0x09, 0xc7, 0x48, 0x29,
      +		0xed, 0xeb, 0xc6, 0x8d, 0xb8, 0xb8, 0xc2, 0x5e}
      +
      +	out := [64]byte{
      +		0xa4, 0x1f, 0x85, 0x9c, 0x66, 0x08, 0xcc, 0x99,
      +		0x3b, 0x81, 0xca, 0xcb, 0x02, 0x0c, 0xef, 0x05,
      +		0x04, 0x4b, 0x21, 0x81, 0xa2, 0xfd, 0x33, 0x7d,
      +		0xfd, 0x7b, 0x1c, 0x63, 0x96, 0x68, 0x2f, 0x29,
      +		0xb4, 0x39, 0x31, 0x68, 0xe3, 0xc9, 0xe6, 0xbc,
      +		0xfe, 0x6b, 0xc5, 0xb7, 0xa0, 0x6d, 0x96, 0xba,
      +		0xe4, 0x24, 0xcc, 0x10, 0x2c, 0x91, 0x74, 0x5c,
      +		0x24, 0xad, 0x67, 0x3d, 0xc7, 0x61, 0x8f, 0x81,
      +	}
      +
      +	Core208(&in, &in)
      +	if in != out {
      +		t.Errorf("expected %x, got %x", out, in)
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/salsa20/salsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa20.go
      new file mode 100644
      index 00000000..fde9846b
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/salsa20/salsa20.go
      @@ -0,0 +1,54 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +/*
      +Package salsa20 implements the Salsa20 stream cipher as specified in http://cr.yp.to/snuffle/spec.pdf.
      +
      +Salsa20 differs from many other stream ciphers in that it is message orientated
      +rather than byte orientated. Keystream blocks are not preserved between calls,
      +therefore each side must encrypt/decrypt data with the same segmentation.
      +
      +Another aspect of this difference is that part of the counter is exposed as
      +an nonce in each call. Encrypting two different messages with the same (key,
      +nonce) pair leads to trivial plaintext recovery. This is analogous to
      +encrypting two different messages with the same key with a traditional stream
      +cipher.
      +
      +This package also implements XSalsa20: a version of Salsa20 with a 24-byte
      +nonce as specified in http://cr.yp.to/snuffle/xsalsa-20081128.pdf. Simply
      +passing a 24-byte slice as the nonce triggers XSalsa20.
      +*/
      +package salsa20 // import "golang.org/x/crypto/salsa20"
      +
      +// TODO(agl): implement XORKeyStream12 and XORKeyStream8 - the reduced round variants of Salsa20.
      +
      +import (
      +	"golang.org/x/crypto/salsa20/salsa"
      +)
      +
      +// XORKeyStream crypts bytes from in to out using the given key and nonce. In
      +// and out may be the same slice but otherwise should not overlap. Nonce must
      +// be either 8 or 24 bytes long.
      +func XORKeyStream(out, in []byte, nonce []byte, key *[32]byte) {
      +	if len(out) < len(in) {
      +		in = in[:len(out)]
      +	}
      +
      +	var subNonce [16]byte
      +
      +	if len(nonce) == 24 {
      +		var subKey [32]byte
      +		var hNonce [16]byte
      +		copy(hNonce[:], nonce[:16])
      +		salsa.HSalsa20(&subKey, &hNonce, key, &salsa.Sigma)
      +		copy(subNonce[:], nonce[16:])
      +		key = &subKey
      +	} else if len(nonce) == 8 {
      +		copy(subNonce[:], nonce[:])
      +	} else {
      +		panic("salsa20: nonce must be 8 or 24 bytes")
      +	}
      +
      +	salsa.XORKeyStream(out, in, &subNonce, key)
      +}
      diff --git a/vendor/golang.org/x/crypto/salsa20/salsa20_test.go b/vendor/golang.org/x/crypto/salsa20/salsa20_test.go
      new file mode 100644
      index 00000000..0ef3328e
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/salsa20/salsa20_test.go
      @@ -0,0 +1,139 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package salsa20
      +
      +import (
      +	"bytes"
      +	"encoding/hex"
      +	"testing"
      +)
      +
      +func fromHex(s string) []byte {
      +	ret, err := hex.DecodeString(s)
      +	if err != nil {
      +		panic(err)
      +	}
      +	return ret
      +}
      +
      +// testVectors was taken from set 6 of the ECRYPT test vectors:
      +// http://www.ecrypt.eu.org/stream/svn/viewcvs.cgi/ecrypt/trunk/submissions/salsa20/full/verified.test-vectors?logsort=rev&rev=210&view=markup
      +var testVectors = []struct {
      +	key      []byte
      +	iv       []byte
      +	numBytes int
      +	xor      []byte
      +}{
      +	{
      +		fromHex("0053A6F94C9FF24598EB3E91E4378ADD3083D6297CCF2275C81B6EC11467BA0D"),
      +		fromHex("0D74DB42A91077DE"),
      +		131072,
      +		fromHex("C349B6A51A3EC9B712EAED3F90D8BCEE69B7628645F251A996F55260C62EF31FD6C6B0AEA94E136C9D984AD2DF3578F78E457527B03A0450580DD874F63B1AB9"),
      +	},
      +	{
      +		fromHex("0558ABFE51A4F74A9DF04396E93C8FE23588DB2E81D4277ACD2073C6196CBF12"),
      +		fromHex("167DE44BB21980E7"),
      +		131072,
      +		fromHex("C3EAAF32836BACE32D04E1124231EF47E101367D6305413A0EEB07C60698A2876E4D031870A739D6FFDDD208597AFF0A47AC17EDB0167DD67EBA84F1883D4DFD"),
      +	},
      +	{
      +		fromHex("0A5DB00356A9FC4FA2F5489BEE4194E73A8DE03386D92C7FD22578CB1E71C417"),
      +		fromHex("1F86ED54BB2289F0"),
      +		131072,
      +		fromHex("3CD23C3DC90201ACC0CF49B440B6C417F0DC8D8410A716D5314C059E14B1A8D9A9FB8EA3D9C8DAE12B21402F674AA95C67B1FC514E994C9D3F3A6E41DFF5BBA6"),
      +	},
      +	{
      +		fromHex("0F62B5085BAE0154A7FA4DA0F34699EC3F92E5388BDE3184D72A7DD02376C91C"),
      +		fromHex("288FF65DC42B92F9"),
      +		131072,
      +		fromHex("E00EBCCD70D69152725F9987982178A2E2E139C7BCBE04CA8A0E99E318D9AB76F988C8549F75ADD790BA4F81C176DA653C1A043F11A958E169B6D2319F4EEC1A"),
      +	},
      +}
      +
      +func TestSalsa20(t *testing.T) {
      +	var inBuf, outBuf []byte
      +	var key [32]byte
      +
      +	for i, test := range testVectors {
      +		if test.numBytes%64 != 0 {
      +			t.Errorf("#%d: numBytes is not a multiple of 64", i)
      +			continue
      +		}
      +
      +		if test.numBytes > len(inBuf) {
      +			inBuf = make([]byte, test.numBytes)
      +			outBuf = make([]byte, test.numBytes)
      +		}
      +		in := inBuf[:test.numBytes]
      +		out := outBuf[:test.numBytes]
      +		copy(key[:], test.key)
      +		XORKeyStream(out, in, test.iv, &key)
      +
      +		var xor [64]byte
      +		for len(out) > 0 {
      +			for i := 0; i < 64; i++ {
      +				xor[i] ^= out[i]
      +			}
      +			out = out[64:]
      +		}
      +
      +		if !bytes.Equal(xor[:], test.xor) {
      +			t.Errorf("#%d: bad result", i)
      +		}
      +	}
      +}
      +
      +var xSalsa20TestData = []struct {
      +	in, nonce, key, out []byte
      +}{
      +	{
      +		[]byte("Hello world!"),
      +		[]byte("24-byte nonce for xsalsa"),
      +		[]byte("this is 32-byte key for xsalsa20"),
      +		[]byte{0x00, 0x2d, 0x45, 0x13, 0x84, 0x3f, 0xc2, 0x40, 0xc4, 0x01, 0xe5, 0x41},
      +	},
      +	{
      +		make([]byte, 64),
      +		[]byte("24-byte nonce for xsalsa"),
      +		[]byte("this is 32-byte key for xsalsa20"),
      +		[]byte{0x48, 0x48, 0x29, 0x7f, 0xeb, 0x1f, 0xb5, 0x2f, 0xb6,
      +			0x6d, 0x81, 0x60, 0x9b, 0xd5, 0x47, 0xfa, 0xbc, 0xbe, 0x70,
      +			0x26, 0xed, 0xc8, 0xb5, 0xe5, 0xe4, 0x49, 0xd0, 0x88, 0xbf,
      +			0xa6, 0x9c, 0x08, 0x8f, 0x5d, 0x8d, 0xa1, 0xd7, 0x91, 0x26,
      +			0x7c, 0x2c, 0x19, 0x5a, 0x7f, 0x8c, 0xae, 0x9c, 0x4b, 0x40,
      +			0x50, 0xd0, 0x8c, 0xe6, 0xd3, 0xa1, 0x51, 0xec, 0x26, 0x5f,
      +			0x3a, 0x58, 0xe4, 0x76, 0x48},
      +	},
      +}
      +
      +func TestXSalsa20(t *testing.T) {
      +	var key [32]byte
      +
      +	for i, test := range xSalsa20TestData {
      +		out := make([]byte, len(test.in))
      +		copy(key[:], test.key)
      +		XORKeyStream(out, test.in, test.nonce, &key)
      +		if !bytes.Equal(out, test.out) {
      +			t.Errorf("%d: expected %x, got %x", i, test.out, out)
      +		}
      +	}
      +}
      +
      +var (
      +	keyArray [32]byte
      +	key      = &keyArray
      +	nonce    [8]byte
      +	msg      = make([]byte, 1<<10)
      +)
      +
      +func BenchmarkXOR1K(b *testing.B) {
      +	b.StopTimer()
      +	out := make([]byte, 1024)
      +	b.StartTimer()
      +	for i := 0; i < b.N; i++ {
      +		XORKeyStream(out, msg[:1024], nonce[:], key)
      +	}
      +	b.SetBytes(1024)
      +}
      diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go
      new file mode 100644
      index 00000000..dc0124b1
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go
      @@ -0,0 +1,243 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package scrypt implements the scrypt key derivation function as defined in
      +// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard
      +// Functions" (http://www.tarsnap.com/scrypt/scrypt.pdf).
      +package scrypt // import "golang.org/x/crypto/scrypt"
      +
      +import (
      +	"crypto/sha256"
      +	"errors"
      +
      +	"golang.org/x/crypto/pbkdf2"
      +)
      +
      +const maxInt = int(^uint(0) >> 1)
      +
      +// blockCopy copies n numbers from src into dst.
      +func blockCopy(dst, src []uint32, n int) {
      +	copy(dst, src[:n])
      +}
      +
      +// blockXOR XORs numbers from dst with n numbers from src.
      +func blockXOR(dst, src []uint32, n int) {
      +	for i, v := range src[:n] {
      +		dst[i] ^= v
      +	}
      +}
      +
      +// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in,
      +// and puts the result into both both tmp and out.
      +func salsaXOR(tmp *[16]uint32, in, out []uint32) {
      +	w0 := tmp[0] ^ in[0]
      +	w1 := tmp[1] ^ in[1]
      +	w2 := tmp[2] ^ in[2]
      +	w3 := tmp[3] ^ in[3]
      +	w4 := tmp[4] ^ in[4]
      +	w5 := tmp[5] ^ in[5]
      +	w6 := tmp[6] ^ in[6]
      +	w7 := tmp[7] ^ in[7]
      +	w8 := tmp[8] ^ in[8]
      +	w9 := tmp[9] ^ in[9]
      +	w10 := tmp[10] ^ in[10]
      +	w11 := tmp[11] ^ in[11]
      +	w12 := tmp[12] ^ in[12]
      +	w13 := tmp[13] ^ in[13]
      +	w14 := tmp[14] ^ in[14]
      +	w15 := tmp[15] ^ in[15]
      +
      +	x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8
      +	x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15
      +
      +	for i := 0; i < 8; i += 2 {
      +		u := x0 + x12
      +		x4 ^= u<<7 | u>>(32-7)
      +		u = x4 + x0
      +		x8 ^= u<<9 | u>>(32-9)
      +		u = x8 + x4
      +		x12 ^= u<<13 | u>>(32-13)
      +		u = x12 + x8
      +		x0 ^= u<<18 | u>>(32-18)
      +
      +		u = x5 + x1
      +		x9 ^= u<<7 | u>>(32-7)
      +		u = x9 + x5
      +		x13 ^= u<<9 | u>>(32-9)
      +		u = x13 + x9
      +		x1 ^= u<<13 | u>>(32-13)
      +		u = x1 + x13
      +		x5 ^= u<<18 | u>>(32-18)
      +
      +		u = x10 + x6
      +		x14 ^= u<<7 | u>>(32-7)
      +		u = x14 + x10
      +		x2 ^= u<<9 | u>>(32-9)
      +		u = x2 + x14
      +		x6 ^= u<<13 | u>>(32-13)
      +		u = x6 + x2
      +		x10 ^= u<<18 | u>>(32-18)
      +
      +		u = x15 + x11
      +		x3 ^= u<<7 | u>>(32-7)
      +		u = x3 + x15
      +		x7 ^= u<<9 | u>>(32-9)
      +		u = x7 + x3
      +		x11 ^= u<<13 | u>>(32-13)
      +		u = x11 + x7
      +		x15 ^= u<<18 | u>>(32-18)
      +
      +		u = x0 + x3
      +		x1 ^= u<<7 | u>>(32-7)
      +		u = x1 + x0
      +		x2 ^= u<<9 | u>>(32-9)
      +		u = x2 + x1
      +		x3 ^= u<<13 | u>>(32-13)
      +		u = x3 + x2
      +		x0 ^= u<<18 | u>>(32-18)
      +
      +		u = x5 + x4
      +		x6 ^= u<<7 | u>>(32-7)
      +		u = x6 + x5
      +		x7 ^= u<<9 | u>>(32-9)
      +		u = x7 + x6
      +		x4 ^= u<<13 | u>>(32-13)
      +		u = x4 + x7
      +		x5 ^= u<<18 | u>>(32-18)
      +
      +		u = x10 + x9
      +		x11 ^= u<<7 | u>>(32-7)
      +		u = x11 + x10
      +		x8 ^= u<<9 | u>>(32-9)
      +		u = x8 + x11
      +		x9 ^= u<<13 | u>>(32-13)
      +		u = x9 + x8
      +		x10 ^= u<<18 | u>>(32-18)
      +
      +		u = x15 + x14
      +		x12 ^= u<<7 | u>>(32-7)
      +		u = x12 + x15
      +		x13 ^= u<<9 | u>>(32-9)
      +		u = x13 + x12
      +		x14 ^= u<<13 | u>>(32-13)
      +		u = x14 + x13
      +		x15 ^= u<<18 | u>>(32-18)
      +	}
      +	x0 += w0
      +	x1 += w1
      +	x2 += w2
      +	x3 += w3
      +	x4 += w4
      +	x5 += w5
      +	x6 += w6
      +	x7 += w7
      +	x8 += w8
      +	x9 += w9
      +	x10 += w10
      +	x11 += w11
      +	x12 += w12
      +	x13 += w13
      +	x14 += w14
      +	x15 += w15
      +
      +	out[0], tmp[0] = x0, x0
      +	out[1], tmp[1] = x1, x1
      +	out[2], tmp[2] = x2, x2
      +	out[3], tmp[3] = x3, x3
      +	out[4], tmp[4] = x4, x4
      +	out[5], tmp[5] = x5, x5
      +	out[6], tmp[6] = x6, x6
      +	out[7], tmp[7] = x7, x7
      +	out[8], tmp[8] = x8, x8
      +	out[9], tmp[9] = x9, x9
      +	out[10], tmp[10] = x10, x10
      +	out[11], tmp[11] = x11, x11
      +	out[12], tmp[12] = x12, x12
      +	out[13], tmp[13] = x13, x13
      +	out[14], tmp[14] = x14, x14
      +	out[15], tmp[15] = x15, x15
      +}
      +
      +func blockMix(tmp *[16]uint32, in, out []uint32, r int) {
      +	blockCopy(tmp[:], in[(2*r-1)*16:], 16)
      +	for i := 0; i < 2*r; i += 2 {
      +		salsaXOR(tmp, in[i*16:], out[i*8:])
      +		salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:])
      +	}
      +}
      +
      +func integer(b []uint32, r int) uint64 {
      +	j := (2*r - 1) * 16
      +	return uint64(b[j]) | uint64(b[j+1])<<32
      +}
      +
      +func smix(b []byte, r, N int, v, xy []uint32) {
      +	var tmp [16]uint32
      +	x := xy
      +	y := xy[32*r:]
      +
      +	j := 0
      +	for i := 0; i < 32*r; i++ {
      +		x[i] = uint32(b[j]) | uint32(b[j+1])<<8 | uint32(b[j+2])<<16 | uint32(b[j+3])<<24
      +		j += 4
      +	}
      +	for i := 0; i < N; i += 2 {
      +		blockCopy(v[i*(32*r):], x, 32*r)
      +		blockMix(&tmp, x, y, r)
      +
      +		blockCopy(v[(i+1)*(32*r):], y, 32*r)
      +		blockMix(&tmp, y, x, r)
      +	}
      +	for i := 0; i < N; i += 2 {
      +		j := int(integer(x, r) & uint64(N-1))
      +		blockXOR(x, v[j*(32*r):], 32*r)
      +		blockMix(&tmp, x, y, r)
      +
      +		j = int(integer(y, r) & uint64(N-1))
      +		blockXOR(y, v[j*(32*r):], 32*r)
      +		blockMix(&tmp, y, x, r)
      +	}
      +	j = 0
      +	for _, v := range x[:32*r] {
      +		b[j+0] = byte(v >> 0)
      +		b[j+1] = byte(v >> 8)
      +		b[j+2] = byte(v >> 16)
      +		b[j+3] = byte(v >> 24)
      +		j += 4
      +	}
      +}
      +
      +// Key derives a key from the password, salt, and cost parameters, returning
      +// a byte slice of length keyLen that can be used as cryptographic key.
      +//
      +// N is a CPU/memory cost parameter, which must be a power of two greater than 1.
      +// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the
      +// limits, the function returns a nil byte slice and an error.
      +//
      +// For example, you can get a derived key for e.g. AES-256 (which needs a
      +// 32-byte key) by doing:
      +//
      +//      dk := scrypt.Key([]byte("some password"), salt, 16384, 8, 1, 32)
      +//
      +// The recommended parameters for interactive logins as of 2009 are N=16384,
      +// r=8, p=1. They should be increased as memory latency and CPU parallelism
      +// increases. Remember to get a good random salt.
      +func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) {
      +	if N <= 1 || N&(N-1) != 0 {
      +		return nil, errors.New("scrypt: N must be > 1 and a power of 2")
      +	}
      +	if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r {
      +		return nil, errors.New("scrypt: parameters are too large")
      +	}
      +
      +	xy := make([]uint32, 64*r)
      +	v := make([]uint32, 32*N*r)
      +	b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New)
      +
      +	for i := 0; i < p; i++ {
      +		smix(b[i*128*r:], r, N, v, xy)
      +	}
      +
      +	return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil
      +}
      diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt_test.go b/vendor/golang.org/x/crypto/scrypt/scrypt_test.go
      new file mode 100644
      index 00000000..e096c3a3
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/scrypt/scrypt_test.go
      @@ -0,0 +1,160 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package scrypt
      +
      +import (
      +	"bytes"
      +	"testing"
      +)
      +
      +type testVector struct {
      +	password string
      +	salt     string
      +	N, r, p  int
      +	output   []byte
      +}
      +
      +var good = []testVector{
      +	{
      +		"password",
      +		"salt",
      +		2, 10, 10,
      +		[]byte{
      +			0x48, 0x2c, 0x85, 0x8e, 0x22, 0x90, 0x55, 0xe6, 0x2f,
      +			0x41, 0xe0, 0xec, 0x81, 0x9a, 0x5e, 0xe1, 0x8b, 0xdb,
      +			0x87, 0x25, 0x1a, 0x53, 0x4f, 0x75, 0xac, 0xd9, 0x5a,
      +			0xc5, 0xe5, 0xa, 0xa1, 0x5f,
      +		},
      +	},
      +	{
      +		"password",
      +		"salt",
      +		16, 100, 100,
      +		[]byte{
      +			0x88, 0xbd, 0x5e, 0xdb, 0x52, 0xd1, 0xdd, 0x0, 0x18,
      +			0x87, 0x72, 0xad, 0x36, 0x17, 0x12, 0x90, 0x22, 0x4e,
      +			0x74, 0x82, 0x95, 0x25, 0xb1, 0x8d, 0x73, 0x23, 0xa5,
      +			0x7f, 0x91, 0x96, 0x3c, 0x37,
      +		},
      +	},
      +	{
      +		"this is a long \000 password",
      +		"and this is a long \000 salt",
      +		16384, 8, 1,
      +		[]byte{
      +			0xc3, 0xf1, 0x82, 0xee, 0x2d, 0xec, 0x84, 0x6e, 0x70,
      +			0xa6, 0x94, 0x2f, 0xb5, 0x29, 0x98, 0x5a, 0x3a, 0x09,
      +			0x76, 0x5e, 0xf0, 0x4c, 0x61, 0x29, 0x23, 0xb1, 0x7f,
      +			0x18, 0x55, 0x5a, 0x37, 0x07, 0x6d, 0xeb, 0x2b, 0x98,
      +			0x30, 0xd6, 0x9d, 0xe5, 0x49, 0x26, 0x51, 0xe4, 0x50,
      +			0x6a, 0xe5, 0x77, 0x6d, 0x96, 0xd4, 0x0f, 0x67, 0xaa,
      +			0xee, 0x37, 0xe1, 0x77, 0x7b, 0x8a, 0xd5, 0xc3, 0x11,
      +			0x14, 0x32, 0xbb, 0x3b, 0x6f, 0x7e, 0x12, 0x64, 0x40,
      +			0x18, 0x79, 0xe6, 0x41, 0xae,
      +		},
      +	},
      +	{
      +		"p",
      +		"s",
      +		2, 1, 1,
      +		[]byte{
      +			0x48, 0xb0, 0xd2, 0xa8, 0xa3, 0x27, 0x26, 0x11, 0x98,
      +			0x4c, 0x50, 0xeb, 0xd6, 0x30, 0xaf, 0x52,
      +		},
      +	},
      +
      +	{
      +		"",
      +		"",
      +		16, 1, 1,
      +		[]byte{
      +			0x77, 0xd6, 0x57, 0x62, 0x38, 0x65, 0x7b, 0x20, 0x3b,
      +			0x19, 0xca, 0x42, 0xc1, 0x8a, 0x04, 0x97, 0xf1, 0x6b,
      +			0x48, 0x44, 0xe3, 0x07, 0x4a, 0xe8, 0xdf, 0xdf, 0xfa,
      +			0x3f, 0xed, 0xe2, 0x14, 0x42, 0xfc, 0xd0, 0x06, 0x9d,
      +			0xed, 0x09, 0x48, 0xf8, 0x32, 0x6a, 0x75, 0x3a, 0x0f,
      +			0xc8, 0x1f, 0x17, 0xe8, 0xd3, 0xe0, 0xfb, 0x2e, 0x0d,
      +			0x36, 0x28, 0xcf, 0x35, 0xe2, 0x0c, 0x38, 0xd1, 0x89,
      +			0x06,
      +		},
      +	},
      +	{
      +		"password",
      +		"NaCl",
      +		1024, 8, 16,
      +		[]byte{
      +			0xfd, 0xba, 0xbe, 0x1c, 0x9d, 0x34, 0x72, 0x00, 0x78,
      +			0x56, 0xe7, 0x19, 0x0d, 0x01, 0xe9, 0xfe, 0x7c, 0x6a,
      +			0xd7, 0xcb, 0xc8, 0x23, 0x78, 0x30, 0xe7, 0x73, 0x76,
      +			0x63, 0x4b, 0x37, 0x31, 0x62, 0x2e, 0xaf, 0x30, 0xd9,
      +			0x2e, 0x22, 0xa3, 0x88, 0x6f, 0xf1, 0x09, 0x27, 0x9d,
      +			0x98, 0x30, 0xda, 0xc7, 0x27, 0xaf, 0xb9, 0x4a, 0x83,
      +			0xee, 0x6d, 0x83, 0x60, 0xcb, 0xdf, 0xa2, 0xcc, 0x06,
      +			0x40,
      +		},
      +	},
      +	{
      +		"pleaseletmein", "SodiumChloride",
      +		16384, 8, 1,
      +		[]byte{
      +			0x70, 0x23, 0xbd, 0xcb, 0x3a, 0xfd, 0x73, 0x48, 0x46,
      +			0x1c, 0x06, 0xcd, 0x81, 0xfd, 0x38, 0xeb, 0xfd, 0xa8,
      +			0xfb, 0xba, 0x90, 0x4f, 0x8e, 0x3e, 0xa9, 0xb5, 0x43,
      +			0xf6, 0x54, 0x5d, 0xa1, 0xf2, 0xd5, 0x43, 0x29, 0x55,
      +			0x61, 0x3f, 0x0f, 0xcf, 0x62, 0xd4, 0x97, 0x05, 0x24,
      +			0x2a, 0x9a, 0xf9, 0xe6, 0x1e, 0x85, 0xdc, 0x0d, 0x65,
      +			0x1e, 0x40, 0xdf, 0xcf, 0x01, 0x7b, 0x45, 0x57, 0x58,
      +			0x87,
      +		},
      +	},
      +	/*
      +		// Disabled: needs 1 GiB RAM and takes too long for a simple test.
      +		{
      +			"pleaseletmein", "SodiumChloride",
      +			1048576, 8, 1,
      +			[]byte{
      +				0x21, 0x01, 0xcb, 0x9b, 0x6a, 0x51, 0x1a, 0xae, 0xad,
      +				0xdb, 0xbe, 0x09, 0xcf, 0x70, 0xf8, 0x81, 0xec, 0x56,
      +				0x8d, 0x57, 0x4a, 0x2f, 0xfd, 0x4d, 0xab, 0xe5, 0xee,
      +				0x98, 0x20, 0xad, 0xaa, 0x47, 0x8e, 0x56, 0xfd, 0x8f,
      +				0x4b, 0xa5, 0xd0, 0x9f, 0xfa, 0x1c, 0x6d, 0x92, 0x7c,
      +				0x40, 0xf4, 0xc3, 0x37, 0x30, 0x40, 0x49, 0xe8, 0xa9,
      +				0x52, 0xfb, 0xcb, 0xf4, 0x5c, 0x6f, 0xa7, 0x7a, 0x41,
      +				0xa4,
      +			},
      +		},
      +	*/
      +}
      +
      +var bad = []testVector{
      +	{"p", "s", 0, 1, 1, nil},                    // N == 0
      +	{"p", "s", 1, 1, 1, nil},                    // N == 1
      +	{"p", "s", 7, 8, 1, nil},                    // N is not power of 2
      +	{"p", "s", 16, maxInt / 2, maxInt / 2, nil}, // p * r too large
      +}
      +
      +func TestKey(t *testing.T) {
      +	for i, v := range good {
      +		k, err := Key([]byte(v.password), []byte(v.salt), v.N, v.r, v.p, len(v.output))
      +		if err != nil {
      +			t.Errorf("%d: got unexpected error: %s", i, err)
      +		}
      +		if !bytes.Equal(k, v.output) {
      +			t.Errorf("%d: expected %x, got %x", i, v.output, k)
      +		}
      +	}
      +	for i, v := range bad {
      +		_, err := Key([]byte(v.password), []byte(v.salt), v.N, v.r, v.p, 32)
      +		if err == nil {
      +			t.Errorf("%d: expected error, got nil", i)
      +		}
      +	}
      +}
      +
      +func BenchmarkKey(b *testing.B) {
      +	for i := 0; i < b.N; i++ {
      +		Key([]byte("password"), []byte("salt"), 16384, 8, 1, 64)
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go
      new file mode 100644
      index 00000000..a0ee3ae7
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/sha3/doc.go
      @@ -0,0 +1,66 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package sha3 implements the SHA-3 fixed-output-length hash functions and
      +// the SHAKE variable-output-length hash functions defined by FIPS-202.
      +//
      +// Both types of hash function use the "sponge" construction and the Keccak
      +// permutation. For a detailed specification see http://keccak.noekeon.org/
      +//
      +//
      +// Guidance
      +//
      +// If you aren't sure what function you need, use SHAKE256 with at least 64
      +// bytes of output. The SHAKE instances are faster than the SHA3 instances;
      +// the latter have to allocate memory to conform to the hash.Hash interface.
      +//
      +// If you need a secret-key MAC (message authentication code), prepend the
      +// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
      +// output.
      +//
      +//
      +// Security strengths
      +//
      +// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
      +// strength against preimage attacks of x bits. Since they only produce "x"
      +// bits of output, their collision-resistance is only "x/2" bits.
      +//
      +// The SHAKE-256 and -128 functions have a generic security strength of 256 and
      +// 128 bits against all attacks, provided that at least 2x bits of their output
      +// is used.  Requesting more than 64 or 32 bytes of output, respectively, does
      +// not increase the collision-resistance of the SHAKE functions.
      +//
      +//
      +// The sponge construction
      +//
      +// A sponge builds a pseudo-random function from a public pseudo-random
      +// permutation, by applying the permutation to a state of "rate + capacity"
      +// bytes, but hiding "capacity" of the bytes.
      +//
      +// A sponge starts out with a zero state. To hash an input using a sponge, up
      +// to "rate" bytes of the input are XORed into the sponge's state. The sponge
      +// is then "full" and the permutation is applied to "empty" it. This process is
      +// repeated until all the input has been "absorbed". The input is then padded.
      +// The digest is "squeezed" from the sponge in the same way, except that output
      +// output is copied out instead of input being XORed in.
      +//
      +// A sponge is parameterized by its generic security strength, which is equal
      +// to half its capacity; capacity + rate is equal to the permutation's width.
      +// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
      +// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
      +//
      +//
      +// Recommendations
      +//
      +// The SHAKE functions are recommended for most new uses. They can produce
      +// output of arbitrary length. SHAKE256, with an output length of at least
      +// 64 bytes, provides 256-bit security against all attacks.  The Keccak team
      +// recommends it for most applications upgrading from SHA2-512. (NIST chose a
      +// much stronger, but much slower, sponge instance for SHA3-512.)
      +//
      +// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
      +// They produce output of the same length, with the same security strengths
      +// against all attacks. This means, in particular, that SHA3-256 only has
      +// 128-bit collision resistance, because its output length is 32 bytes.
      +package sha3 // import "golang.org/x/crypto/sha3"
      diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go
      new file mode 100644
      index 00000000..2b51cf4e
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/sha3/hashes.go
      @@ -0,0 +1,65 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package sha3
      +
      +// This file provides functions for creating instances of the SHA-3
      +// and SHAKE hash functions, as well as utility functions for hashing
      +// bytes.
      +
      +import (
      +	"hash"
      +)
      +
      +// New224 creates a new SHA3-224 hash.
      +// Its generic security strength is 224 bits against preimage attacks,
      +// and 112 bits against collision attacks.
      +func New224() hash.Hash { return &state{rate: 144, outputLen: 28, dsbyte: 0x06} }
      +
      +// New256 creates a new SHA3-256 hash.
      +// Its generic security strength is 256 bits against preimage attacks,
      +// and 128 bits against collision attacks.
      +func New256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x06} }
      +
      +// New384 creates a new SHA3-384 hash.
      +// Its generic security strength is 384 bits against preimage attacks,
      +// and 192 bits against collision attacks.
      +func New384() hash.Hash { return &state{rate: 104, outputLen: 48, dsbyte: 0x06} }
      +
      +// New512 creates a new SHA3-512 hash.
      +// Its generic security strength is 512 bits against preimage attacks,
      +// and 256 bits against collision attacks.
      +func New512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x06} }
      +
      +// Sum224 returns the SHA3-224 digest of the data.
      +func Sum224(data []byte) (digest [28]byte) {
      +	h := New224()
      +	h.Write(data)
      +	h.Sum(digest[:0])
      +	return
      +}
      +
      +// Sum256 returns the SHA3-256 digest of the data.
      +func Sum256(data []byte) (digest [32]byte) {
      +	h := New256()
      +	h.Write(data)
      +	h.Sum(digest[:0])
      +	return
      +}
      +
      +// Sum384 returns the SHA3-384 digest of the data.
      +func Sum384(data []byte) (digest [48]byte) {
      +	h := New384()
      +	h.Write(data)
      +	h.Sum(digest[:0])
      +	return
      +}
      +
      +// Sum512 returns the SHA3-512 digest of the data.
      +func Sum512(data []byte) (digest [64]byte) {
      +	h := New512()
      +	h.Write(data)
      +	h.Sum(digest[:0])
      +	return
      +}
      diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go
      new file mode 100644
      index 00000000..13e7058f
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/sha3/keccakf.go
      @@ -0,0 +1,410 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package sha3
      +
      +// rc stores the round constants for use in the ι step.
      +var rc = [24]uint64{
      +	0x0000000000000001,
      +	0x0000000000008082,
      +	0x800000000000808A,
      +	0x8000000080008000,
      +	0x000000000000808B,
      +	0x0000000080000001,
      +	0x8000000080008081,
      +	0x8000000000008009,
      +	0x000000000000008A,
      +	0x0000000000000088,
      +	0x0000000080008009,
      +	0x000000008000000A,
      +	0x000000008000808B,
      +	0x800000000000008B,
      +	0x8000000000008089,
      +	0x8000000000008003,
      +	0x8000000000008002,
      +	0x8000000000000080,
      +	0x000000000000800A,
      +	0x800000008000000A,
      +	0x8000000080008081,
      +	0x8000000000008080,
      +	0x0000000080000001,
      +	0x8000000080008008,
      +}
      +
      +// keccakF1600 applies the Keccak permutation to a 1600b-wide
      +// state represented as a slice of 25 uint64s.
      +func keccakF1600(a *[25]uint64) {
      +	// Implementation translated from Keccak-inplace.c
      +	// in the keccak reference code.
      +	var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
      +
      +	for i := 0; i < 24; i += 4 {
      +		// Combines the 5 steps in each round into 2 steps.
      +		// Unrolls 4 rounds per loop and spreads some steps across rounds.
      +
      +		// Round 1
      +		bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
      +		bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
      +		bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
      +		bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
      +		bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
      +		d0 = bc4 ^ (bc1<<1 | bc1>>63)
      +		d1 = bc0 ^ (bc2<<1 | bc2>>63)
      +		d2 = bc1 ^ (bc3<<1 | bc3>>63)
      +		d3 = bc2 ^ (bc4<<1 | bc4>>63)
      +		d4 = bc3 ^ (bc0<<1 | bc0>>63)
      +
      +		bc0 = a[0] ^ d0
      +		t = a[6] ^ d1
      +		bc1 = t<<44 | t>>(64-44)
      +		t = a[12] ^ d2
      +		bc2 = t<<43 | t>>(64-43)
      +		t = a[18] ^ d3
      +		bc3 = t<<21 | t>>(64-21)
      +		t = a[24] ^ d4
      +		bc4 = t<<14 | t>>(64-14)
      +		a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
      +		a[6] = bc1 ^ (bc3 &^ bc2)
      +		a[12] = bc2 ^ (bc4 &^ bc3)
      +		a[18] = bc3 ^ (bc0 &^ bc4)
      +		a[24] = bc4 ^ (bc1 &^ bc0)
      +
      +		t = a[10] ^ d0
      +		bc2 = t<<3 | t>>(64-3)
      +		t = a[16] ^ d1
      +		bc3 = t<<45 | t>>(64-45)
      +		t = a[22] ^ d2
      +		bc4 = t<<61 | t>>(64-61)
      +		t = a[3] ^ d3
      +		bc0 = t<<28 | t>>(64-28)
      +		t = a[9] ^ d4
      +		bc1 = t<<20 | t>>(64-20)
      +		a[10] = bc0 ^ (bc2 &^ bc1)
      +		a[16] = bc1 ^ (bc3 &^ bc2)
      +		a[22] = bc2 ^ (bc4 &^ bc3)
      +		a[3] = bc3 ^ (bc0 &^ bc4)
      +		a[9] = bc4 ^ (bc1 &^ bc0)
      +
      +		t = a[20] ^ d0
      +		bc4 = t<<18 | t>>(64-18)
      +		t = a[1] ^ d1
      +		bc0 = t<<1 | t>>(64-1)
      +		t = a[7] ^ d2
      +		bc1 = t<<6 | t>>(64-6)
      +		t = a[13] ^ d3
      +		bc2 = t<<25 | t>>(64-25)
      +		t = a[19] ^ d4
      +		bc3 = t<<8 | t>>(64-8)
      +		a[20] = bc0 ^ (bc2 &^ bc1)
      +		a[1] = bc1 ^ (bc3 &^ bc2)
      +		a[7] = bc2 ^ (bc4 &^ bc3)
      +		a[13] = bc3 ^ (bc0 &^ bc4)
      +		a[19] = bc4 ^ (bc1 &^ bc0)
      +
      +		t = a[5] ^ d0
      +		bc1 = t<<36 | t>>(64-36)
      +		t = a[11] ^ d1
      +		bc2 = t<<10 | t>>(64-10)
      +		t = a[17] ^ d2
      +		bc3 = t<<15 | t>>(64-15)
      +		t = a[23] ^ d3
      +		bc4 = t<<56 | t>>(64-56)
      +		t = a[4] ^ d4
      +		bc0 = t<<27 | t>>(64-27)
      +		a[5] = bc0 ^ (bc2 &^ bc1)
      +		a[11] = bc1 ^ (bc3 &^ bc2)
      +		a[17] = bc2 ^ (bc4 &^ bc3)
      +		a[23] = bc3 ^ (bc0 &^ bc4)
      +		a[4] = bc4 ^ (bc1 &^ bc0)
      +
      +		t = a[15] ^ d0
      +		bc3 = t<<41 | t>>(64-41)
      +		t = a[21] ^ d1
      +		bc4 = t<<2 | t>>(64-2)
      +		t = a[2] ^ d2
      +		bc0 = t<<62 | t>>(64-62)
      +		t = a[8] ^ d3
      +		bc1 = t<<55 | t>>(64-55)
      +		t = a[14] ^ d4
      +		bc2 = t<<39 | t>>(64-39)
      +		a[15] = bc0 ^ (bc2 &^ bc1)
      +		a[21] = bc1 ^ (bc3 &^ bc2)
      +		a[2] = bc2 ^ (bc4 &^ bc3)
      +		a[8] = bc3 ^ (bc0 &^ bc4)
      +		a[14] = bc4 ^ (bc1 &^ bc0)
      +
      +		// Round 2
      +		bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
      +		bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
      +		bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
      +		bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
      +		bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
      +		d0 = bc4 ^ (bc1<<1 | bc1>>63)
      +		d1 = bc0 ^ (bc2<<1 | bc2>>63)
      +		d2 = bc1 ^ (bc3<<1 | bc3>>63)
      +		d3 = bc2 ^ (bc4<<1 | bc4>>63)
      +		d4 = bc3 ^ (bc0<<1 | bc0>>63)
      +
      +		bc0 = a[0] ^ d0
      +		t = a[16] ^ d1
      +		bc1 = t<<44 | t>>(64-44)
      +		t = a[7] ^ d2
      +		bc2 = t<<43 | t>>(64-43)
      +		t = a[23] ^ d3
      +		bc3 = t<<21 | t>>(64-21)
      +		t = a[14] ^ d4
      +		bc4 = t<<14 | t>>(64-14)
      +		a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
      +		a[16] = bc1 ^ (bc3 &^ bc2)
      +		a[7] = bc2 ^ (bc4 &^ bc3)
      +		a[23] = bc3 ^ (bc0 &^ bc4)
      +		a[14] = bc4 ^ (bc1 &^ bc0)
      +
      +		t = a[20] ^ d0
      +		bc2 = t<<3 | t>>(64-3)
      +		t = a[11] ^ d1
      +		bc3 = t<<45 | t>>(64-45)
      +		t = a[2] ^ d2
      +		bc4 = t<<61 | t>>(64-61)
      +		t = a[18] ^ d3
      +		bc0 = t<<28 | t>>(64-28)
      +		t = a[9] ^ d4
      +		bc1 = t<<20 | t>>(64-20)
      +		a[20] = bc0 ^ (bc2 &^ bc1)
      +		a[11] = bc1 ^ (bc3 &^ bc2)
      +		a[2] = bc2 ^ (bc4 &^ bc3)
      +		a[18] = bc3 ^ (bc0 &^ bc4)
      +		a[9] = bc4 ^ (bc1 &^ bc0)
      +
      +		t = a[15] ^ d0
      +		bc4 = t<<18 | t>>(64-18)
      +		t = a[6] ^ d1
      +		bc0 = t<<1 | t>>(64-1)
      +		t = a[22] ^ d2
      +		bc1 = t<<6 | t>>(64-6)
      +		t = a[13] ^ d3
      +		bc2 = t<<25 | t>>(64-25)
      +		t = a[4] ^ d4
      +		bc3 = t<<8 | t>>(64-8)
      +		a[15] = bc0 ^ (bc2 &^ bc1)
      +		a[6] = bc1 ^ (bc3 &^ bc2)
      +		a[22] = bc2 ^ (bc4 &^ bc3)
      +		a[13] = bc3 ^ (bc0 &^ bc4)
      +		a[4] = bc4 ^ (bc1 &^ bc0)
      +
      +		t = a[10] ^ d0
      +		bc1 = t<<36 | t>>(64-36)
      +		t = a[1] ^ d1
      +		bc2 = t<<10 | t>>(64-10)
      +		t = a[17] ^ d2
      +		bc3 = t<<15 | t>>(64-15)
      +		t = a[8] ^ d3
      +		bc4 = t<<56 | t>>(64-56)
      +		t = a[24] ^ d4
      +		bc0 = t<<27 | t>>(64-27)
      +		a[10] = bc0 ^ (bc2 &^ bc1)
      +		a[1] = bc1 ^ (bc3 &^ bc2)
      +		a[17] = bc2 ^ (bc4 &^ bc3)
      +		a[8] = bc3 ^ (bc0 &^ bc4)
      +		a[24] = bc4 ^ (bc1 &^ bc0)
      +
      +		t = a[5] ^ d0
      +		bc3 = t<<41 | t>>(64-41)
      +		t = a[21] ^ d1
      +		bc4 = t<<2 | t>>(64-2)
      +		t = a[12] ^ d2
      +		bc0 = t<<62 | t>>(64-62)
      +		t = a[3] ^ d3
      +		bc1 = t<<55 | t>>(64-55)
      +		t = a[19] ^ d4
      +		bc2 = t<<39 | t>>(64-39)
      +		a[5] = bc0 ^ (bc2 &^ bc1)
      +		a[21] = bc1 ^ (bc3 &^ bc2)
      +		a[12] = bc2 ^ (bc4 &^ bc3)
      +		a[3] = bc3 ^ (bc0 &^ bc4)
      +		a[19] = bc4 ^ (bc1 &^ bc0)
      +
      +		// Round 3
      +		bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
      +		bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
      +		bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
      +		bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
      +		bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
      +		d0 = bc4 ^ (bc1<<1 | bc1>>63)
      +		d1 = bc0 ^ (bc2<<1 | bc2>>63)
      +		d2 = bc1 ^ (bc3<<1 | bc3>>63)
      +		d3 = bc2 ^ (bc4<<1 | bc4>>63)
      +		d4 = bc3 ^ (bc0<<1 | bc0>>63)
      +
      +		bc0 = a[0] ^ d0
      +		t = a[11] ^ d1
      +		bc1 = t<<44 | t>>(64-44)
      +		t = a[22] ^ d2
      +		bc2 = t<<43 | t>>(64-43)
      +		t = a[8] ^ d3
      +		bc3 = t<<21 | t>>(64-21)
      +		t = a[19] ^ d4
      +		bc4 = t<<14 | t>>(64-14)
      +		a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
      +		a[11] = bc1 ^ (bc3 &^ bc2)
      +		a[22] = bc2 ^ (bc4 &^ bc3)
      +		a[8] = bc3 ^ (bc0 &^ bc4)
      +		a[19] = bc4 ^ (bc1 &^ bc0)
      +
      +		t = a[15] ^ d0
      +		bc2 = t<<3 | t>>(64-3)
      +		t = a[1] ^ d1
      +		bc3 = t<<45 | t>>(64-45)
      +		t = a[12] ^ d2
      +		bc4 = t<<61 | t>>(64-61)
      +		t = a[23] ^ d3
      +		bc0 = t<<28 | t>>(64-28)
      +		t = a[9] ^ d4
      +		bc1 = t<<20 | t>>(64-20)
      +		a[15] = bc0 ^ (bc2 &^ bc1)
      +		a[1] = bc1 ^ (bc3 &^ bc2)
      +		a[12] = bc2 ^ (bc4 &^ bc3)
      +		a[23] = bc3 ^ (bc0 &^ bc4)
      +		a[9] = bc4 ^ (bc1 &^ bc0)
      +
      +		t = a[5] ^ d0
      +		bc4 = t<<18 | t>>(64-18)
      +		t = a[16] ^ d1
      +		bc0 = t<<1 | t>>(64-1)
      +		t = a[2] ^ d2
      +		bc1 = t<<6 | t>>(64-6)
      +		t = a[13] ^ d3
      +		bc2 = t<<25 | t>>(64-25)
      +		t = a[24] ^ d4
      +		bc3 = t<<8 | t>>(64-8)
      +		a[5] = bc0 ^ (bc2 &^ bc1)
      +		a[16] = bc1 ^ (bc3 &^ bc2)
      +		a[2] = bc2 ^ (bc4 &^ bc3)
      +		a[13] = bc3 ^ (bc0 &^ bc4)
      +		a[24] = bc4 ^ (bc1 &^ bc0)
      +
      +		t = a[20] ^ d0
      +		bc1 = t<<36 | t>>(64-36)
      +		t = a[6] ^ d1
      +		bc2 = t<<10 | t>>(64-10)
      +		t = a[17] ^ d2
      +		bc3 = t<<15 | t>>(64-15)
      +		t = a[3] ^ d3
      +		bc4 = t<<56 | t>>(64-56)
      +		t = a[14] ^ d4
      +		bc0 = t<<27 | t>>(64-27)
      +		a[20] = bc0 ^ (bc2 &^ bc1)
      +		a[6] = bc1 ^ (bc3 &^ bc2)
      +		a[17] = bc2 ^ (bc4 &^ bc3)
      +		a[3] = bc3 ^ (bc0 &^ bc4)
      +		a[14] = bc4 ^ (bc1 &^ bc0)
      +
      +		t = a[10] ^ d0
      +		bc3 = t<<41 | t>>(64-41)
      +		t = a[21] ^ d1
      +		bc4 = t<<2 | t>>(64-2)
      +		t = a[7] ^ d2
      +		bc0 = t<<62 | t>>(64-62)
      +		t = a[18] ^ d3
      +		bc1 = t<<55 | t>>(64-55)
      +		t = a[4] ^ d4
      +		bc2 = t<<39 | t>>(64-39)
      +		a[10] = bc0 ^ (bc2 &^ bc1)
      +		a[21] = bc1 ^ (bc3 &^ bc2)
      +		a[7] = bc2 ^ (bc4 &^ bc3)
      +		a[18] = bc3 ^ (bc0 &^ bc4)
      +		a[4] = bc4 ^ (bc1 &^ bc0)
      +
      +		// Round 4
      +		bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
      +		bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
      +		bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
      +		bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
      +		bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
      +		d0 = bc4 ^ (bc1<<1 | bc1>>63)
      +		d1 = bc0 ^ (bc2<<1 | bc2>>63)
      +		d2 = bc1 ^ (bc3<<1 | bc3>>63)
      +		d3 = bc2 ^ (bc4<<1 | bc4>>63)
      +		d4 = bc3 ^ (bc0<<1 | bc0>>63)
      +
      +		bc0 = a[0] ^ d0
      +		t = a[1] ^ d1
      +		bc1 = t<<44 | t>>(64-44)
      +		t = a[2] ^ d2
      +		bc2 = t<<43 | t>>(64-43)
      +		t = a[3] ^ d3
      +		bc3 = t<<21 | t>>(64-21)
      +		t = a[4] ^ d4
      +		bc4 = t<<14 | t>>(64-14)
      +		a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
      +		a[1] = bc1 ^ (bc3 &^ bc2)
      +		a[2] = bc2 ^ (bc4 &^ bc3)
      +		a[3] = bc3 ^ (bc0 &^ bc4)
      +		a[4] = bc4 ^ (bc1 &^ bc0)
      +
      +		t = a[5] ^ d0
      +		bc2 = t<<3 | t>>(64-3)
      +		t = a[6] ^ d1
      +		bc3 = t<<45 | t>>(64-45)
      +		t = a[7] ^ d2
      +		bc4 = t<<61 | t>>(64-61)
      +		t = a[8] ^ d3
      +		bc0 = t<<28 | t>>(64-28)
      +		t = a[9] ^ d4
      +		bc1 = t<<20 | t>>(64-20)
      +		a[5] = bc0 ^ (bc2 &^ bc1)
      +		a[6] = bc1 ^ (bc3 &^ bc2)
      +		a[7] = bc2 ^ (bc4 &^ bc3)
      +		a[8] = bc3 ^ (bc0 &^ bc4)
      +		a[9] = bc4 ^ (bc1 &^ bc0)
      +
      +		t = a[10] ^ d0
      +		bc4 = t<<18 | t>>(64-18)
      +		t = a[11] ^ d1
      +		bc0 = t<<1 | t>>(64-1)
      +		t = a[12] ^ d2
      +		bc1 = t<<6 | t>>(64-6)
      +		t = a[13] ^ d3
      +		bc2 = t<<25 | t>>(64-25)
      +		t = a[14] ^ d4
      +		bc3 = t<<8 | t>>(64-8)
      +		a[10] = bc0 ^ (bc2 &^ bc1)
      +		a[11] = bc1 ^ (bc3 &^ bc2)
      +		a[12] = bc2 ^ (bc4 &^ bc3)
      +		a[13] = bc3 ^ (bc0 &^ bc4)
      +		a[14] = bc4 ^ (bc1 &^ bc0)
      +
      +		t = a[15] ^ d0
      +		bc1 = t<<36 | t>>(64-36)
      +		t = a[16] ^ d1
      +		bc2 = t<<10 | t>>(64-10)
      +		t = a[17] ^ d2
      +		bc3 = t<<15 | t>>(64-15)
      +		t = a[18] ^ d3
      +		bc4 = t<<56 | t>>(64-56)
      +		t = a[19] ^ d4
      +		bc0 = t<<27 | t>>(64-27)
      +		a[15] = bc0 ^ (bc2 &^ bc1)
      +		a[16] = bc1 ^ (bc3 &^ bc2)
      +		a[17] = bc2 ^ (bc4 &^ bc3)
      +		a[18] = bc3 ^ (bc0 &^ bc4)
      +		a[19] = bc4 ^ (bc1 &^ bc0)
      +
      +		t = a[20] ^ d0
      +		bc3 = t<<41 | t>>(64-41)
      +		t = a[21] ^ d1
      +		bc4 = t<<2 | t>>(64-2)
      +		t = a[22] ^ d2
      +		bc0 = t<<62 | t>>(64-62)
      +		t = a[23] ^ d3
      +		bc1 = t<<55 | t>>(64-55)
      +		t = a[24] ^ d4
      +		bc2 = t<<39 | t>>(64-39)
      +		a[20] = bc0 ^ (bc2 &^ bc1)
      +		a[21] = bc1 ^ (bc3 &^ bc2)
      +		a[22] = bc2 ^ (bc4 &^ bc3)
      +		a[23] = bc3 ^ (bc0 &^ bc4)
      +		a[24] = bc4 ^ (bc1 &^ bc0)
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go
      new file mode 100644
      index 00000000..3cf6a22e
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/sha3/register.go
      @@ -0,0 +1,18 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build go1.4
      +
      +package sha3
      +
      +import (
      +	"crypto"
      +)
      +
      +func init() {
      +	crypto.RegisterHash(crypto.SHA3_224, New224)
      +	crypto.RegisterHash(crypto.SHA3_256, New256)
      +	crypto.RegisterHash(crypto.SHA3_384, New384)
      +	crypto.RegisterHash(crypto.SHA3_512, New512)
      +}
      diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go
      new file mode 100644
      index 00000000..c8fd31cb
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/sha3/sha3.go
      @@ -0,0 +1,193 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package sha3
      +
      +// spongeDirection indicates the direction bytes are flowing through the sponge.
      +type spongeDirection int
      +
      +const (
      +	// spongeAbsorbing indicates that the sponge is absorbing input.
      +	spongeAbsorbing spongeDirection = iota
      +	// spongeSqueezing indicates that the sponge is being squeezed.
      +	spongeSqueezing
      +)
      +
      +const (
      +	// maxRate is the maximum size of the internal buffer. SHAKE-256
      +	// currently needs the largest buffer.
      +	maxRate = 168
      +)
      +
      +type state struct {
      +	// Generic sponge components.
      +	a    [25]uint64 // main state of the hash
      +	buf  []byte     // points into storage
      +	rate int        // the number of bytes of state to use
      +
      +	// dsbyte contains the "domain separation" bits and the first bit of
      +	// the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
      +	// SHA-3 and SHAKE functions by appending bitstrings to the message.
      +	// Using a little-endian bit-ordering convention, these are "01" for SHA-3
      +	// and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
      +	// padding rule from section 5.1 is applied to pad the message to a multiple
      +	// of the rate, which involves adding a "1" bit, zero or more "0" bits, and
      +	// a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
      +	// giving 00000110b (0x06) and 00011111b (0x1f).
      +	// [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
      +	//     "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
      +	//      Extendable-Output Functions (May 2014)"
      +	dsbyte  byte
      +	storage [maxRate]byte
      +
      +	// Specific to SHA-3 and SHAKE.
      +	fixedOutput bool            // whether this is a fixed-ouput-length instance
      +	outputLen   int             // the default output size in bytes
      +	state       spongeDirection // whether the sponge is absorbing or squeezing
      +}
      +
      +// BlockSize returns the rate of sponge underlying this hash function.
      +func (d *state) BlockSize() int { return d.rate }
      +
      +// Size returns the output size of the hash function in bytes.
      +func (d *state) Size() int { return d.outputLen }
      +
      +// Reset clears the internal state by zeroing the sponge state and
      +// the byte buffer, and setting Sponge.state to absorbing.
      +func (d *state) Reset() {
      +	// Zero the permutation's state.
      +	for i := range d.a {
      +		d.a[i] = 0
      +	}
      +	d.state = spongeAbsorbing
      +	d.buf = d.storage[:0]
      +}
      +
      +func (d *state) clone() *state {
      +	ret := *d
      +	if ret.state == spongeAbsorbing {
      +		ret.buf = ret.storage[:len(ret.buf)]
      +	} else {
      +		ret.buf = ret.storage[d.rate-cap(d.buf) : d.rate]
      +	}
      +
      +	return &ret
      +}
      +
      +// permute applies the KeccakF-1600 permutation. It handles
      +// any input-output buffering.
      +func (d *state) permute() {
      +	switch d.state {
      +	case spongeAbsorbing:
      +		// If we're absorbing, we need to xor the input into the state
      +		// before applying the permutation.
      +		xorIn(d, d.buf)
      +		d.buf = d.storage[:0]
      +		keccakF1600(&d.a)
      +	case spongeSqueezing:
      +		// If we're squeezing, we need to apply the permutatin before
      +		// copying more output.
      +		keccakF1600(&d.a)
      +		d.buf = d.storage[:d.rate]
      +		copyOut(d, d.buf)
      +	}
      +}
      +
      +// pads appends the domain separation bits in dsbyte, applies
      +// the multi-bitrate 10..1 padding rule, and permutes the state.
      +func (d *state) padAndPermute(dsbyte byte) {
      +	if d.buf == nil {
      +		d.buf = d.storage[:0]
      +	}
      +	// Pad with this instance's domain-separator bits. We know that there's
      +	// at least one byte of space in d.buf because, if it were full,
      +	// permute would have been called to empty it. dsbyte also contains the
      +	// first one bit for the padding. See the comment in the state struct.
      +	d.buf = append(d.buf, dsbyte)
      +	zerosStart := len(d.buf)
      +	d.buf = d.storage[:d.rate]
      +	for i := zerosStart; i < d.rate; i++ {
      +		d.buf[i] = 0
      +	}
      +	// This adds the final one bit for the padding. Because of the way that
      +	// bits are numbered from the LSB upwards, the final bit is the MSB of
      +	// the last byte.
      +	d.buf[d.rate-1] ^= 0x80
      +	// Apply the permutation
      +	d.permute()
      +	d.state = spongeSqueezing
      +	d.buf = d.storage[:d.rate]
      +	copyOut(d, d.buf)
      +}
      +
      +// Write absorbs more data into the hash's state. It produces an error
      +// if more data is written to the ShakeHash after writing
      +func (d *state) Write(p []byte) (written int, err error) {
      +	if d.state != spongeAbsorbing {
      +		panic("sha3: write to sponge after read")
      +	}
      +	if d.buf == nil {
      +		d.buf = d.storage[:0]
      +	}
      +	written = len(p)
      +
      +	for len(p) > 0 {
      +		if len(d.buf) == 0 && len(p) >= d.rate {
      +			// The fast path; absorb a full "rate" bytes of input and apply the permutation.
      +			xorIn(d, p[:d.rate])
      +			p = p[d.rate:]
      +			keccakF1600(&d.a)
      +		} else {
      +			// The slow path; buffer the input until we can fill the sponge, and then xor it in.
      +			todo := d.rate - len(d.buf)
      +			if todo > len(p) {
      +				todo = len(p)
      +			}
      +			d.buf = append(d.buf, p[:todo]...)
      +			p = p[todo:]
      +
      +			// If the sponge is full, apply the permutation.
      +			if len(d.buf) == d.rate {
      +				d.permute()
      +			}
      +		}
      +	}
      +
      +	return
      +}
      +
      +// Read squeezes an arbitrary number of bytes from the sponge.
      +func (d *state) Read(out []byte) (n int, err error) {
      +	// If we're still absorbing, pad and apply the permutation.
      +	if d.state == spongeAbsorbing {
      +		d.padAndPermute(d.dsbyte)
      +	}
      +
      +	n = len(out)
      +
      +	// Now, do the squeezing.
      +	for len(out) > 0 {
      +		n := copy(out, d.buf)
      +		d.buf = d.buf[n:]
      +		out = out[n:]
      +
      +		// Apply the permutation if we've squeezed the sponge dry.
      +		if len(d.buf) == 0 {
      +			d.permute()
      +		}
      +	}
      +
      +	return
      +}
      +
      +// Sum applies padding to the hash state and then squeezes out the desired
      +// number of output bytes.
      +func (d *state) Sum(in []byte) []byte {
      +	// Make a copy of the original hash so that caller can keep writing
      +	// and summing.
      +	dup := d.clone()
      +	hash := make([]byte, dup.outputLen)
      +	dup.Read(hash)
      +	return append(in, hash...)
      +}
      diff --git a/vendor/golang.org/x/crypto/sha3/sha3_test.go b/vendor/golang.org/x/crypto/sha3/sha3_test.go
      new file mode 100644
      index 00000000..caf72f27
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/sha3/sha3_test.go
      @@ -0,0 +1,306 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package sha3
      +
      +// Tests include all the ShortMsgKATs provided by the Keccak team at
      +// https://github.com/gvanas/KeccakCodePackage
      +//
      +// They only include the zero-bit case of the bitwise testvectors
      +// published by NIST in the draft of FIPS-202.
      +
      +import (
      +	"bytes"
      +	"compress/flate"
      +	"encoding/hex"
      +	"encoding/json"
      +	"hash"
      +	"os"
      +	"strings"
      +	"testing"
      +)
      +
      +const (
      +	testString  = "brekeccakkeccak koax koax"
      +	katFilename = "testdata/keccakKats.json.deflate"
      +)
      +
      +// Internal-use instances of SHAKE used to test against KATs.
      +func newHashShake128() hash.Hash {
      +	return &state{rate: 168, dsbyte: 0x1f, outputLen: 512}
      +}
      +func newHashShake256() hash.Hash {
      +	return &state{rate: 136, dsbyte: 0x1f, outputLen: 512}
      +}
      +
      +// testDigests contains functions returning hash.Hash instances
      +// with output-length equal to the KAT length for both SHA-3 and
      +// SHAKE instances.
      +var testDigests = map[string]func() hash.Hash{
      +	"SHA3-224": New224,
      +	"SHA3-256": New256,
      +	"SHA3-384": New384,
      +	"SHA3-512": New512,
      +	"SHAKE128": newHashShake128,
      +	"SHAKE256": newHashShake256,
      +}
      +
      +// testShakes contains functions that return ShakeHash instances for
      +// testing the ShakeHash-specific interface.
      +var testShakes = map[string]func() ShakeHash{
      +	"SHAKE128": NewShake128,
      +	"SHAKE256": NewShake256,
      +}
      +
      +// decodeHex converts a hex-encoded string into a raw byte string.
      +func decodeHex(s string) []byte {
      +	b, err := hex.DecodeString(s)
      +	if err != nil {
      +		panic(err)
      +	}
      +	return b
      +}
      +
      +// structs used to marshal JSON test-cases.
      +type KeccakKats struct {
      +	Kats map[string][]struct {
      +		Digest  string `json:"digest"`
      +		Length  int64  `json:"length"`
      +		Message string `json:"message"`
      +	}
      +}
      +
      +func testUnalignedAndGeneric(t *testing.T, testf func(impl string)) {
      +	xorInOrig, copyOutOrig := xorIn, copyOut
      +	xorIn, copyOut = xorInGeneric, copyOutGeneric
      +	testf("generic")
      +	if xorImplementationUnaligned != "generic" {
      +		xorIn, copyOut = xorInUnaligned, copyOutUnaligned
      +		testf("unaligned")
      +	}
      +	xorIn, copyOut = xorInOrig, copyOutOrig
      +}
      +
      +// TestKeccakKats tests the SHA-3 and Shake implementations against all the
      +// ShortMsgKATs from https://github.com/gvanas/KeccakCodePackage
      +// (The testvectors are stored in keccakKats.json.deflate due to their length.)
      +func TestKeccakKats(t *testing.T) {
      +	testUnalignedAndGeneric(t, func(impl string) {
      +		// Read the KATs.
      +		deflated, err := os.Open(katFilename)
      +		if err != nil {
      +			t.Errorf("error opening %s: %s", katFilename, err)
      +		}
      +		file := flate.NewReader(deflated)
      +		dec := json.NewDecoder(file)
      +		var katSet KeccakKats
      +		err = dec.Decode(&katSet)
      +		if err != nil {
      +			t.Errorf("error decoding KATs: %s", err)
      +		}
      +
      +		// Do the KATs.
      +		for functionName, kats := range katSet.Kats {
      +			d := testDigests[functionName]()
      +			for _, kat := range kats {
      +				d.Reset()
      +				in, err := hex.DecodeString(kat.Message)
      +				if err != nil {
      +					t.Errorf("error decoding KAT: %s", err)
      +				}
      +				d.Write(in[:kat.Length/8])
      +				got := strings.ToUpper(hex.EncodeToString(d.Sum(nil)))
      +				if got != kat.Digest {
      +					t.Errorf("function=%s, implementation=%s, length=%d\nmessage:\n  %s\ngot:\n  %s\nwanted:\n %s",
      +						functionName, impl, kat.Length, kat.Message, got, kat.Digest)
      +					t.Logf("wanted %+v", kat)
      +					t.FailNow()
      +				}
      +				continue
      +			}
      +		}
      +	})
      +}
      +
      +// TestUnalignedWrite tests that writing data in an arbitrary pattern with
      +// small input buffers.
      +func testUnalignedWrite(t *testing.T) {
      +	testUnalignedAndGeneric(t, func(impl string) {
      +		buf := sequentialBytes(0x10000)
      +		for alg, df := range testDigests {
      +			d := df()
      +			d.Reset()
      +			d.Write(buf)
      +			want := d.Sum(nil)
      +			d.Reset()
      +			for i := 0; i < len(buf); {
      +				// Cycle through offsets which make a 137 byte sequence.
      +				// Because 137 is prime this sequence should exercise all corner cases.
      +				offsets := [17]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1}
      +				for _, j := range offsets {
      +					if v := len(buf) - i; v < j {
      +						j = v
      +					}
      +					d.Write(buf[i : i+j])
      +					i += j
      +				}
      +			}
      +			got := d.Sum(nil)
      +			if !bytes.Equal(got, want) {
      +				t.Errorf("Unaligned writes, implementation=%s, alg=%s\ngot %q, want %q", impl, alg, got, want)
      +			}
      +		}
      +	})
      +}
      +
      +// TestAppend checks that appending works when reallocation is necessary.
      +func TestAppend(t *testing.T) {
      +	testUnalignedAndGeneric(t, func(impl string) {
      +		d := New224()
      +
      +		for capacity := 2; capacity <= 66; capacity += 64 {
      +			// The first time around the loop, Sum will have to reallocate.
      +			// The second time, it will not.
      +			buf := make([]byte, 2, capacity)
      +			d.Reset()
      +			d.Write([]byte{0xcc})
      +			buf = d.Sum(buf)
      +			expected := "0000DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
      +			if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
      +				t.Errorf("got %s, want %s", got, expected)
      +			}
      +		}
      +	})
      +}
      +
      +// TestAppendNoRealloc tests that appending works when no reallocation is necessary.
      +func TestAppendNoRealloc(t *testing.T) {
      +	testUnalignedAndGeneric(t, func(impl string) {
      +		buf := make([]byte, 1, 200)
      +		d := New224()
      +		d.Write([]byte{0xcc})
      +		buf = d.Sum(buf)
      +		expected := "00DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
      +		if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
      +			t.Errorf("%s: got %s, want %s", impl, got, expected)
      +		}
      +	})
      +}
      +
      +// TestSqueezing checks that squeezing the full output a single time produces
      +// the same output as repeatedly squeezing the instance.
      +func TestSqueezing(t *testing.T) {
      +	testUnalignedAndGeneric(t, func(impl string) {
      +		for functionName, newShakeHash := range testShakes {
      +			d0 := newShakeHash()
      +			d0.Write([]byte(testString))
      +			ref := make([]byte, 32)
      +			d0.Read(ref)
      +
      +			d1 := newShakeHash()
      +			d1.Write([]byte(testString))
      +			var multiple []byte
      +			for _ = range ref {
      +				one := make([]byte, 1)
      +				d1.Read(one)
      +				multiple = append(multiple, one...)
      +			}
      +			if !bytes.Equal(ref, multiple) {
      +				t.Errorf("%s (%s): squeezing %d bytes one at a time failed", functionName, impl, len(ref))
      +			}
      +		}
      +	})
      +}
      +
      +// sequentialBytes produces a buffer of size consecutive bytes 0x00, 0x01, ..., used for testing.
      +func sequentialBytes(size int) []byte {
      +	result := make([]byte, size)
      +	for i := range result {
      +		result[i] = byte(i)
      +	}
      +	return result
      +}
      +
      +// BenchmarkPermutationFunction measures the speed of the permutation function
      +// with no input data.
      +func BenchmarkPermutationFunction(b *testing.B) {
      +	b.SetBytes(int64(200))
      +	var lanes [25]uint64
      +	for i := 0; i < b.N; i++ {
      +		keccakF1600(&lanes)
      +	}
      +}
      +
      +// benchmarkHash tests the speed to hash num buffers of buflen each.
      +func benchmarkHash(b *testing.B, h hash.Hash, size, num int) {
      +	b.StopTimer()
      +	h.Reset()
      +	data := sequentialBytes(size)
      +	b.SetBytes(int64(size * num))
      +	b.StartTimer()
      +
      +	var state []byte
      +	for i := 0; i < b.N; i++ {
      +		for j := 0; j < num; j++ {
      +			h.Write(data)
      +		}
      +		state = h.Sum(state[:0])
      +	}
      +	b.StopTimer()
      +	h.Reset()
      +}
      +
      +// benchmarkShake is specialized to the Shake instances, which don't
      +// require a copy on reading output.
      +func benchmarkShake(b *testing.B, h ShakeHash, size, num int) {
      +	b.StopTimer()
      +	h.Reset()
      +	data := sequentialBytes(size)
      +	d := make([]byte, 32)
      +
      +	b.SetBytes(int64(size * num))
      +	b.StartTimer()
      +
      +	for i := 0; i < b.N; i++ {
      +		h.Reset()
      +		for j := 0; j < num; j++ {
      +			h.Write(data)
      +		}
      +		h.Read(d)
      +	}
      +}
      +
      +func BenchmarkSha3_512_MTU(b *testing.B) { benchmarkHash(b, New512(), 1350, 1) }
      +func BenchmarkSha3_384_MTU(b *testing.B) { benchmarkHash(b, New384(), 1350, 1) }
      +func BenchmarkSha3_256_MTU(b *testing.B) { benchmarkHash(b, New256(), 1350, 1) }
      +func BenchmarkSha3_224_MTU(b *testing.B) { benchmarkHash(b, New224(), 1350, 1) }
      +
      +func BenchmarkShake128_MTU(b *testing.B)  { benchmarkShake(b, NewShake128(), 1350, 1) }
      +func BenchmarkShake256_MTU(b *testing.B)  { benchmarkShake(b, NewShake256(), 1350, 1) }
      +func BenchmarkShake256_16x(b *testing.B)  { benchmarkShake(b, NewShake256(), 16, 1024) }
      +func BenchmarkShake256_1MiB(b *testing.B) { benchmarkShake(b, NewShake256(), 1024, 1024) }
      +
      +func BenchmarkSha3_512_1MiB(b *testing.B) { benchmarkHash(b, New512(), 1024, 1024) }
      +
      +func Example_sum() {
      +	buf := []byte("some data to hash")
      +	// A hash needs to be 64 bytes long to have 256-bit collision resistance.
      +	h := make([]byte, 64)
      +	// Compute a 64-byte hash of buf and put it in h.
      +	ShakeSum256(h, buf)
      +}
      +
      +func Example_mac() {
      +	k := []byte("this is a secret key; you should generate a strong random key that's at least 32 bytes long")
      +	buf := []byte("and this is some data to authenticate")
      +	// A MAC with 32 bytes of output has 256-bit security strength -- if you use at least a 32-byte-long key.
      +	h := make([]byte, 32)
      +	d := NewShake256()
      +	// Write the key into the hash.
      +	d.Write(k)
      +	// Now write the data.
      +	d.Write(buf)
      +	// Read 32 bytes of output from the hash into h.
      +	d.Read(h)
      +}
      diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go
      new file mode 100644
      index 00000000..841f9860
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/sha3/shake.go
      @@ -0,0 +1,60 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package sha3
      +
      +// This file defines the ShakeHash interface, and provides
      +// functions for creating SHAKE instances, as well as utility
      +// functions for hashing bytes to arbitrary-length output.
      +
      +import (
      +	"io"
      +)
      +
      +// ShakeHash defines the interface to hash functions that
      +// support arbitrary-length output.
      +type ShakeHash interface {
      +	// Write absorbs more data into the hash's state. It panics if input is
      +	// written to it after output has been read from it.
      +	io.Writer
      +
      +	// Read reads more output from the hash; reading affects the hash's
      +	// state. (ShakeHash.Read is thus very different from Hash.Sum)
      +	// It never returns an error.
      +	io.Reader
      +
      +	// Clone returns a copy of the ShakeHash in its current state.
      +	Clone() ShakeHash
      +
      +	// Reset resets the ShakeHash to its initial state.
      +	Reset()
      +}
      +
      +func (d *state) Clone() ShakeHash {
      +	return d.clone()
      +}
      +
      +// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
      +// Its generic security strength is 128 bits against all attacks if at
      +// least 32 bytes of its output are used.
      +func NewShake128() ShakeHash { return &state{rate: 168, dsbyte: 0x1f} }
      +
      +// NewShake256 creates a new SHAKE128 variable-output-length ShakeHash.
      +// Its generic security strength is 256 bits against all attacks if
      +// at least 64 bytes of its output are used.
      +func NewShake256() ShakeHash { return &state{rate: 136, dsbyte: 0x1f} }
      +
      +// ShakeSum128 writes an arbitrary-length digest of data into hash.
      +func ShakeSum128(hash, data []byte) {
      +	h := NewShake128()
      +	h.Write(data)
      +	h.Read(hash)
      +}
      +
      +// ShakeSum256 writes an arbitrary-length digest of data into hash.
      +func ShakeSum256(hash, data []byte) {
      +	h := NewShake256()
      +	h.Write(data)
      +	h.Read(hash)
      +}
      diff --git a/vendor/golang.org/x/crypto/sha3/testdata/keccakKats.json.deflate b/vendor/golang.org/x/crypto/sha3/testdata/keccakKats.json.deflate
      new file mode 100644
      index 00000000..62e85ae2
      Binary files /dev/null and b/vendor/golang.org/x/crypto/sha3/testdata/keccakKats.json.deflate differ
      diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go
      new file mode 100644
      index 00000000..d622979c
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/sha3/xor.go
      @@ -0,0 +1,16 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build !amd64,!386 appengine
      +
      +package sha3
      +
      +var (
      +	xorIn            = xorInGeneric
      +	copyOut          = copyOutGeneric
      +	xorInUnaligned   = xorInGeneric
      +	copyOutUnaligned = copyOutGeneric
      +)
      +
      +const xorImplementationUnaligned = "generic"
      diff --git a/vendor/golang.org/x/crypto/sha3/xor_generic.go b/vendor/golang.org/x/crypto/sha3/xor_generic.go
      new file mode 100644
      index 00000000..fd35f02e
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/sha3/xor_generic.go
      @@ -0,0 +1,28 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package sha3
      +
      +import "encoding/binary"
      +
      +// xorInGeneric xors the bytes in buf into the state; it
      +// makes no non-portable assumptions about memory layout
      +// or alignment.
      +func xorInGeneric(d *state, buf []byte) {
      +	n := len(buf) / 8
      +
      +	for i := 0; i < n; i++ {
      +		a := binary.LittleEndian.Uint64(buf)
      +		d.a[i] ^= a
      +		buf = buf[8:]
      +	}
      +}
      +
      +// copyOutGeneric copies ulint64s to a byte buffer.
      +func copyOutGeneric(d *state, b []byte) {
      +	for i := 0; len(b) >= 8; i++ {
      +		binary.LittleEndian.PutUint64(b, d.a[i])
      +		b = b[8:]
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go
      new file mode 100644
      index 00000000..c7851a1d
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go
      @@ -0,0 +1,58 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build amd64 386
      +// +build !appengine
      +
      +package sha3
      +
      +import "unsafe"
      +
      +func xorInUnaligned(d *state, buf []byte) {
      +	bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))
      +	n := len(buf)
      +	if n >= 72 {
      +		d.a[0] ^= bw[0]
      +		d.a[1] ^= bw[1]
      +		d.a[2] ^= bw[2]
      +		d.a[3] ^= bw[3]
      +		d.a[4] ^= bw[4]
      +		d.a[5] ^= bw[5]
      +		d.a[6] ^= bw[6]
      +		d.a[7] ^= bw[7]
      +		d.a[8] ^= bw[8]
      +	}
      +	if n >= 104 {
      +		d.a[9] ^= bw[9]
      +		d.a[10] ^= bw[10]
      +		d.a[11] ^= bw[11]
      +		d.a[12] ^= bw[12]
      +	}
      +	if n >= 136 {
      +		d.a[13] ^= bw[13]
      +		d.a[14] ^= bw[14]
      +		d.a[15] ^= bw[15]
      +		d.a[16] ^= bw[16]
      +	}
      +	if n >= 144 {
      +		d.a[17] ^= bw[17]
      +	}
      +	if n >= 168 {
      +		d.a[18] ^= bw[18]
      +		d.a[19] ^= bw[19]
      +		d.a[20] ^= bw[20]
      +	}
      +}
      +
      +func copyOutUnaligned(d *state, buf []byte) {
      +	ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0]))
      +	copy(buf, ab[:])
      +}
      +
      +var (
      +	xorIn   = xorInUnaligned
      +	copyOut = copyOutUnaligned
      +)
      +
      +const xorImplementationUnaligned = "unaligned"
      diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go
      new file mode 100644
      index 00000000..2cb9248e
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/agent/client.go
      @@ -0,0 +1,616 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package agent implements the ssh-agent protocol, and provides both
      +// a client and a server. The client can talk to a standard ssh-agent
      +// that uses UNIX sockets, and one could implement an alternative
      +// ssh-agent process using the sample server.
      +//
      +// References:
      +//  [PROTOCOL.agent]:    http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.agent?rev=HEAD
      +package agent // import "golang.org/x/crypto/ssh/agent"
      +
      +import (
      +	"bytes"
      +	"crypto/dsa"
      +	"crypto/ecdsa"
      +	"crypto/elliptic"
      +	"crypto/rsa"
      +	"encoding/base64"
      +	"encoding/binary"
      +	"errors"
      +	"fmt"
      +	"io"
      +	"math/big"
      +	"sync"
      +
      +	"golang.org/x/crypto/ssh"
      +)
      +
      +// Agent represents the capabilities of an ssh-agent.
      +type Agent interface {
      +	// List returns the identities known to the agent.
      +	List() ([]*Key, error)
      +
      +	// Sign has the agent sign the data using a protocol 2 key as defined
      +	// in [PROTOCOL.agent] section 2.6.2.
      +	Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error)
      +
      +	// Add adds a private key to the agent.
      +	Add(key AddedKey) error
      +
      +	// Remove removes all identities with the given public key.
      +	Remove(key ssh.PublicKey) error
      +
      +	// RemoveAll removes all identities.
      +	RemoveAll() error
      +
      +	// Lock locks the agent. Sign and Remove will fail, and List will empty an empty list.
      +	Lock(passphrase []byte) error
      +
      +	// Unlock undoes the effect of Lock
      +	Unlock(passphrase []byte) error
      +
      +	// Signers returns signers for all the known keys.
      +	Signers() ([]ssh.Signer, error)
      +}
      +
      +// AddedKey describes an SSH key to be added to an Agent.
      +type AddedKey struct {
      +	// PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey or
      +	// *ecdsa.PrivateKey, which will be inserted into the agent.
      +	PrivateKey interface{}
      +	// Certificate, if not nil, is communicated to the agent and will be
      +	// stored with the key.
      +	Certificate *ssh.Certificate
      +	// Comment is an optional, free-form string.
      +	Comment string
      +	// LifetimeSecs, if not zero, is the number of seconds that the
      +	// agent will store the key for.
      +	LifetimeSecs uint32
      +	// ConfirmBeforeUse, if true, requests that the agent confirm with the
      +	// user before each use of this key.
      +	ConfirmBeforeUse bool
      +}
      +
      +// See [PROTOCOL.agent], section 3.
      +const (
      +	agentRequestV1Identities = 1
      +
      +	// 3.2 Requests from client to agent for protocol 2 key operations
      +	agentAddIdentity         = 17
      +	agentRemoveIdentity      = 18
      +	agentRemoveAllIdentities = 19
      +	agentAddIdConstrained    = 25
      +
      +	// 3.3 Key-type independent requests from client to agent
      +	agentAddSmartcardKey            = 20
      +	agentRemoveSmartcardKey         = 21
      +	agentLock                       = 22
      +	agentUnlock                     = 23
      +	agentAddSmartcardKeyConstrained = 26
      +
      +	// 3.7 Key constraint identifiers
      +	agentConstrainLifetime = 1
      +	agentConstrainConfirm  = 2
      +)
      +
      +// maxAgentResponseBytes is the maximum agent reply size that is accepted. This
      +// is a sanity check, not a limit in the spec.
      +const maxAgentResponseBytes = 16 << 20
      +
      +// Agent messages:
      +// These structures mirror the wire format of the corresponding ssh agent
      +// messages found in [PROTOCOL.agent].
      +
      +// 3.4 Generic replies from agent to client
      +const agentFailure = 5
      +
      +type failureAgentMsg struct{}
      +
      +const agentSuccess = 6
      +
      +type successAgentMsg struct{}
      +
      +// See [PROTOCOL.agent], section 2.5.2.
      +const agentRequestIdentities = 11
      +
      +type requestIdentitiesAgentMsg struct{}
      +
      +// See [PROTOCOL.agent], section 2.5.2.
      +const agentIdentitiesAnswer = 12
      +
      +type identitiesAnswerAgentMsg struct {
      +	NumKeys uint32 `sshtype:"12"`
      +	Keys    []byte `ssh:"rest"`
      +}
      +
      +// See [PROTOCOL.agent], section 2.6.2.
      +const agentSignRequest = 13
      +
      +type signRequestAgentMsg struct {
      +	KeyBlob []byte `sshtype:"13"`
      +	Data    []byte
      +	Flags   uint32
      +}
      +
      +// See [PROTOCOL.agent], section 2.6.2.
      +
      +// 3.6 Replies from agent to client for protocol 2 key operations
      +const agentSignResponse = 14
      +
      +type signResponseAgentMsg struct {
      +	SigBlob []byte `sshtype:"14"`
      +}
      +
      +type publicKey struct {
      +	Format string
      +	Rest   []byte `ssh:"rest"`
      +}
      +
      +// Key represents a protocol 2 public key as defined in
      +// [PROTOCOL.agent], section 2.5.2.
      +type Key struct {
      +	Format  string
      +	Blob    []byte
      +	Comment string
      +}
      +
      +func clientErr(err error) error {
      +	return fmt.Errorf("agent: client error: %v", err)
      +}
      +
      +// String returns the storage form of an agent key with the format, base64
      +// encoded serialized key, and the comment if it is not empty.
      +func (k *Key) String() string {
      +	s := string(k.Format) + " " + base64.StdEncoding.EncodeToString(k.Blob)
      +
      +	if k.Comment != "" {
      +		s += " " + k.Comment
      +	}
      +
      +	return s
      +}
      +
      +// Type returns the public key type.
      +func (k *Key) Type() string {
      +	return k.Format
      +}
      +
      +// Marshal returns key blob to satisfy the ssh.PublicKey interface.
      +func (k *Key) Marshal() []byte {
      +	return k.Blob
      +}
      +
      +// Verify satisfies the ssh.PublicKey interface, but is not
      +// implemented for agent keys.
      +func (k *Key) Verify(data []byte, sig *ssh.Signature) error {
      +	return errors.New("agent: agent key does not know how to verify")
      +}
      +
      +type wireKey struct {
      +	Format string
      +	Rest   []byte `ssh:"rest"`
      +}
      +
      +func parseKey(in []byte) (out *Key, rest []byte, err error) {
      +	var record struct {
      +		Blob    []byte
      +		Comment string
      +		Rest    []byte `ssh:"rest"`
      +	}
      +
      +	if err := ssh.Unmarshal(in, &record); err != nil {
      +		return nil, nil, err
      +	}
      +
      +	var wk wireKey
      +	if err := ssh.Unmarshal(record.Blob, &wk); err != nil {
      +		return nil, nil, err
      +	}
      +
      +	return &Key{
      +		Format:  wk.Format,
      +		Blob:    record.Blob,
      +		Comment: record.Comment,
      +	}, record.Rest, nil
      +}
      +
      +// client is a client for an ssh-agent process.
      +type client struct {
      +	// conn is typically a *net.UnixConn
      +	conn io.ReadWriter
      +	// mu is used to prevent concurrent access to the agent
      +	mu sync.Mutex
      +}
      +
      +// NewClient returns an Agent that talks to an ssh-agent process over
      +// the given connection.
      +func NewClient(rw io.ReadWriter) Agent {
      +	return &client{conn: rw}
      +}
      +
      +// call sends an RPC to the agent. On success, the reply is
      +// unmarshaled into reply and replyType is set to the first byte of
      +// the reply, which contains the type of the message.
      +func (c *client) call(req []byte) (reply interface{}, err error) {
      +	c.mu.Lock()
      +	defer c.mu.Unlock()
      +
      +	msg := make([]byte, 4+len(req))
      +	binary.BigEndian.PutUint32(msg, uint32(len(req)))
      +	copy(msg[4:], req)
      +	if _, err = c.conn.Write(msg); err != nil {
      +		return nil, clientErr(err)
      +	}
      +
      +	var respSizeBuf [4]byte
      +	if _, err = io.ReadFull(c.conn, respSizeBuf[:]); err != nil {
      +		return nil, clientErr(err)
      +	}
      +	respSize := binary.BigEndian.Uint32(respSizeBuf[:])
      +	if respSize > maxAgentResponseBytes {
      +		return nil, clientErr(err)
      +	}
      +
      +	buf := make([]byte, respSize)
      +	if _, err = io.ReadFull(c.conn, buf); err != nil {
      +		return nil, clientErr(err)
      +	}
      +	reply, err = unmarshal(buf)
      +	if err != nil {
      +		return nil, clientErr(err)
      +	}
      +	return reply, err
      +}
      +
      +func (c *client) simpleCall(req []byte) error {
      +	resp, err := c.call(req)
      +	if err != nil {
      +		return err
      +	}
      +	if _, ok := resp.(*successAgentMsg); ok {
      +		return nil
      +	}
      +	return errors.New("agent: failure")
      +}
      +
      +func (c *client) RemoveAll() error {
      +	return c.simpleCall([]byte{agentRemoveAllIdentities})
      +}
      +
      +func (c *client) Remove(key ssh.PublicKey) error {
      +	req := ssh.Marshal(&agentRemoveIdentityMsg{
      +		KeyBlob: key.Marshal(),
      +	})
      +	return c.simpleCall(req)
      +}
      +
      +func (c *client) Lock(passphrase []byte) error {
      +	req := ssh.Marshal(&agentLockMsg{
      +		Passphrase: passphrase,
      +	})
      +	return c.simpleCall(req)
      +}
      +
      +func (c *client) Unlock(passphrase []byte) error {
      +	req := ssh.Marshal(&agentUnlockMsg{
      +		Passphrase: passphrase,
      +	})
      +	return c.simpleCall(req)
      +}
      +
      +// List returns the identities known to the agent.
      +func (c *client) List() ([]*Key, error) {
      +	// see [PROTOCOL.agent] section 2.5.2.
      +	req := []byte{agentRequestIdentities}
      +
      +	msg, err := c.call(req)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	switch msg := msg.(type) {
      +	case *identitiesAnswerAgentMsg:
      +		if msg.NumKeys > maxAgentResponseBytes/8 {
      +			return nil, errors.New("agent: too many keys in agent reply")
      +		}
      +		keys := make([]*Key, msg.NumKeys)
      +		data := msg.Keys
      +		for i := uint32(0); i < msg.NumKeys; i++ {
      +			var key *Key
      +			var err error
      +			if key, data, err = parseKey(data); err != nil {
      +				return nil, err
      +			}
      +			keys[i] = key
      +		}
      +		return keys, nil
      +	case *failureAgentMsg:
      +		return nil, errors.New("agent: failed to list keys")
      +	}
      +	panic("unreachable")
      +}
      +
      +// Sign has the agent sign the data using a protocol 2 key as defined
      +// in [PROTOCOL.agent] section 2.6.2.
      +func (c *client) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) {
      +	req := ssh.Marshal(signRequestAgentMsg{
      +		KeyBlob: key.Marshal(),
      +		Data:    data,
      +	})
      +
      +	msg, err := c.call(req)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	switch msg := msg.(type) {
      +	case *signResponseAgentMsg:
      +		var sig ssh.Signature
      +		if err := ssh.Unmarshal(msg.SigBlob, &sig); err != nil {
      +			return nil, err
      +		}
      +
      +		return &sig, nil
      +	case *failureAgentMsg:
      +		return nil, errors.New("agent: failed to sign challenge")
      +	}
      +	panic("unreachable")
      +}
      +
      +// unmarshal parses an agent message in packet, returning the parsed
      +// form and the message type of packet.
      +func unmarshal(packet []byte) (interface{}, error) {
      +	if len(packet) < 1 {
      +		return nil, errors.New("agent: empty packet")
      +	}
      +	var msg interface{}
      +	switch packet[0] {
      +	case agentFailure:
      +		return new(failureAgentMsg), nil
      +	case agentSuccess:
      +		return new(successAgentMsg), nil
      +	case agentIdentitiesAnswer:
      +		msg = new(identitiesAnswerAgentMsg)
      +	case agentSignResponse:
      +		msg = new(signResponseAgentMsg)
      +	default:
      +		return nil, fmt.Errorf("agent: unknown type tag %d", packet[0])
      +	}
      +	if err := ssh.Unmarshal(packet, msg); err != nil {
      +		return nil, err
      +	}
      +	return msg, nil
      +}
      +
      +type rsaKeyMsg struct {
      +	Type        string `sshtype:"17"`
      +	N           *big.Int
      +	E           *big.Int
      +	D           *big.Int
      +	Iqmp        *big.Int // IQMP = Inverse Q Mod P
      +	P           *big.Int
      +	Q           *big.Int
      +	Comments    string
      +	Constraints []byte `ssh:"rest"`
      +}
      +
      +type dsaKeyMsg struct {
      +	Type        string `sshtype:"17"`
      +	P           *big.Int
      +	Q           *big.Int
      +	G           *big.Int
      +	Y           *big.Int
      +	X           *big.Int
      +	Comments    string
      +	Constraints []byte `ssh:"rest"`
      +}
      +
      +type ecdsaKeyMsg struct {
      +	Type        string `sshtype:"17"`
      +	Curve       string
      +	KeyBytes    []byte
      +	D           *big.Int
      +	Comments    string
      +	Constraints []byte `ssh:"rest"`
      +}
      +
      +// Insert adds a private key to the agent.
      +func (c *client) insertKey(s interface{}, comment string, constraints []byte) error {
      +	var req []byte
      +	switch k := s.(type) {
      +	case *rsa.PrivateKey:
      +		if len(k.Primes) != 2 {
      +			return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes))
      +		}
      +		k.Precompute()
      +		req = ssh.Marshal(rsaKeyMsg{
      +			Type:        ssh.KeyAlgoRSA,
      +			N:           k.N,
      +			E:           big.NewInt(int64(k.E)),
      +			D:           k.D,
      +			Iqmp:        k.Precomputed.Qinv,
      +			P:           k.Primes[0],
      +			Q:           k.Primes[1],
      +			Comments:    comment,
      +			Constraints: constraints,
      +		})
      +	case *dsa.PrivateKey:
      +		req = ssh.Marshal(dsaKeyMsg{
      +			Type:        ssh.KeyAlgoDSA,
      +			P:           k.P,
      +			Q:           k.Q,
      +			G:           k.G,
      +			Y:           k.Y,
      +			X:           k.X,
      +			Comments:    comment,
      +			Constraints: constraints,
      +		})
      +	case *ecdsa.PrivateKey:
      +		nistID := fmt.Sprintf("nistp%d", k.Params().BitSize)
      +		req = ssh.Marshal(ecdsaKeyMsg{
      +			Type:        "ecdsa-sha2-" + nistID,
      +			Curve:       nistID,
      +			KeyBytes:    elliptic.Marshal(k.Curve, k.X, k.Y),
      +			D:           k.D,
      +			Comments:    comment,
      +			Constraints: constraints,
      +		})
      +	default:
      +		return fmt.Errorf("agent: unsupported key type %T", s)
      +	}
      +
      +	// if constraints are present then the message type needs to be changed.
      +	if len(constraints) != 0 {
      +		req[0] = agentAddIdConstrained
      +	}
      +
      +	resp, err := c.call(req)
      +	if err != nil {
      +		return err
      +	}
      +	if _, ok := resp.(*successAgentMsg); ok {
      +		return nil
      +	}
      +	return errors.New("agent: failure")
      +}
      +
      +type rsaCertMsg struct {
      +	Type        string `sshtype:"17"`
      +	CertBytes   []byte
      +	D           *big.Int
      +	Iqmp        *big.Int // IQMP = Inverse Q Mod P
      +	P           *big.Int
      +	Q           *big.Int
      +	Comments    string
      +	Constraints []byte `ssh:"rest"`
      +}
      +
      +type dsaCertMsg struct {
      +	Type        string `sshtype:"17"`
      +	CertBytes   []byte
      +	X           *big.Int
      +	Comments    string
      +	Constraints []byte `ssh:"rest"`
      +}
      +
      +type ecdsaCertMsg struct {
      +	Type        string `sshtype:"17"`
      +	CertBytes   []byte
      +	D           *big.Int
      +	Comments    string
      +	Constraints []byte `ssh:"rest"`
      +}
      +
      +// Insert adds a private key to the agent. If a certificate is given,
      +// that certificate is added instead as public key.
      +func (c *client) Add(key AddedKey) error {
      +	var constraints []byte
      +
      +	if secs := key.LifetimeSecs; secs != 0 {
      +		constraints = append(constraints, agentConstrainLifetime)
      +
      +		var secsBytes [4]byte
      +		binary.BigEndian.PutUint32(secsBytes[:], secs)
      +		constraints = append(constraints, secsBytes[:]...)
      +	}
      +
      +	if key.ConfirmBeforeUse {
      +		constraints = append(constraints, agentConstrainConfirm)
      +	}
      +
      +	if cert := key.Certificate; cert == nil {
      +		return c.insertKey(key.PrivateKey, key.Comment, constraints)
      +	} else {
      +		return c.insertCert(key.PrivateKey, cert, key.Comment, constraints)
      +	}
      +}
      +
      +func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string, constraints []byte) error {
      +	var req []byte
      +	switch k := s.(type) {
      +	case *rsa.PrivateKey:
      +		if len(k.Primes) != 2 {
      +			return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes))
      +		}
      +		k.Precompute()
      +		req = ssh.Marshal(rsaCertMsg{
      +			Type:        cert.Type(),
      +			CertBytes:   cert.Marshal(),
      +			D:           k.D,
      +			Iqmp:        k.Precomputed.Qinv,
      +			P:           k.Primes[0],
      +			Q:           k.Primes[1],
      +			Comments:    comment,
      +			Constraints: constraints,
      +		})
      +	case *dsa.PrivateKey:
      +		req = ssh.Marshal(dsaCertMsg{
      +			Type:      cert.Type(),
      +			CertBytes: cert.Marshal(),
      +			X:         k.X,
      +			Comments:  comment,
      +		})
      +	case *ecdsa.PrivateKey:
      +		req = ssh.Marshal(ecdsaCertMsg{
      +			Type:      cert.Type(),
      +			CertBytes: cert.Marshal(),
      +			D:         k.D,
      +			Comments:  comment,
      +		})
      +	default:
      +		return fmt.Errorf("agent: unsupported key type %T", s)
      +	}
      +
      +	// if constraints are present then the message type needs to be changed.
      +	if len(constraints) != 0 {
      +		req[0] = agentAddIdConstrained
      +	}
      +
      +	signer, err := ssh.NewSignerFromKey(s)
      +	if err != nil {
      +		return err
      +	}
      +	if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 {
      +		return errors.New("agent: signer and cert have different public key")
      +	}
      +
      +	resp, err := c.call(req)
      +	if err != nil {
      +		return err
      +	}
      +	if _, ok := resp.(*successAgentMsg); ok {
      +		return nil
      +	}
      +	return errors.New("agent: failure")
      +}
      +
      +// Signers provides a callback for client authentication.
      +func (c *client) Signers() ([]ssh.Signer, error) {
      +	keys, err := c.List()
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	var result []ssh.Signer
      +	for _, k := range keys {
      +		result = append(result, &agentKeyringSigner{c, k})
      +	}
      +	return result, nil
      +}
      +
      +type agentKeyringSigner struct {
      +	agent *client
      +	pub   ssh.PublicKey
      +}
      +
      +func (s *agentKeyringSigner) PublicKey() ssh.PublicKey {
      +	return s.pub
      +}
      +
      +func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, error) {
      +	// The agent has its own entropy source, so the rand argument is ignored.
      +	return s.agent.Sign(s.pub, data)
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/agent/client_test.go b/vendor/golang.org/x/crypto/ssh/agent/client_test.go
      new file mode 100644
      index 00000000..ec7198d5
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/agent/client_test.go
      @@ -0,0 +1,287 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package agent
      +
      +import (
      +	"bytes"
      +	"crypto/rand"
      +	"errors"
      +	"net"
      +	"os"
      +	"os/exec"
      +	"path/filepath"
      +	"strconv"
      +	"testing"
      +
      +	"golang.org/x/crypto/ssh"
      +)
      +
      +// startAgent executes ssh-agent, and returns a Agent interface to it.
      +func startAgent(t *testing.T) (client Agent, socket string, cleanup func()) {
      +	if testing.Short() {
      +		// ssh-agent is not always available, and the key
      +		// types supported vary by platform.
      +		t.Skip("skipping test due to -short")
      +	}
      +
      +	bin, err := exec.LookPath("ssh-agent")
      +	if err != nil {
      +		t.Skip("could not find ssh-agent")
      +	}
      +
      +	cmd := exec.Command(bin, "-s")
      +	out, err := cmd.Output()
      +	if err != nil {
      +		t.Fatalf("cmd.Output: %v", err)
      +	}
      +
      +	/* Output looks like:
      +
      +		   SSH_AUTH_SOCK=/tmp/ssh-P65gpcqArqvH/agent.15541; export SSH_AUTH_SOCK;
      +	           SSH_AGENT_PID=15542; export SSH_AGENT_PID;
      +	           echo Agent pid 15542;
      +	*/
      +	fields := bytes.Split(out, []byte(";"))
      +	line := bytes.SplitN(fields[0], []byte("="), 2)
      +	line[0] = bytes.TrimLeft(line[0], "\n")
      +	if string(line[0]) != "SSH_AUTH_SOCK" {
      +		t.Fatalf("could not find key SSH_AUTH_SOCK in %q", fields[0])
      +	}
      +	socket = string(line[1])
      +
      +	line = bytes.SplitN(fields[2], []byte("="), 2)
      +	line[0] = bytes.TrimLeft(line[0], "\n")
      +	if string(line[0]) != "SSH_AGENT_PID" {
      +		t.Fatalf("could not find key SSH_AGENT_PID in %q", fields[2])
      +	}
      +	pidStr := line[1]
      +	pid, err := strconv.Atoi(string(pidStr))
      +	if err != nil {
      +		t.Fatalf("Atoi(%q): %v", pidStr, err)
      +	}
      +
      +	conn, err := net.Dial("unix", string(socket))
      +	if err != nil {
      +		t.Fatalf("net.Dial: %v", err)
      +	}
      +
      +	ac := NewClient(conn)
      +	return ac, socket, func() {
      +		proc, _ := os.FindProcess(pid)
      +		if proc != nil {
      +			proc.Kill()
      +		}
      +		conn.Close()
      +		os.RemoveAll(filepath.Dir(socket))
      +	}
      +}
      +
      +func testAgent(t *testing.T, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) {
      +	agent, _, cleanup := startAgent(t)
      +	defer cleanup()
      +
      +	testAgentInterface(t, agent, key, cert, lifetimeSecs)
      +}
      +
      +func testAgentInterface(t *testing.T, agent Agent, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) {
      +	signer, err := ssh.NewSignerFromKey(key)
      +	if err != nil {
      +		t.Fatalf("NewSignerFromKey(%T): %v", key, err)
      +	}
      +	// The agent should start up empty.
      +	if keys, err := agent.List(); err != nil {
      +		t.Fatalf("RequestIdentities: %v", err)
      +	} else if len(keys) > 0 {
      +		t.Fatalf("got %d keys, want 0: %v", len(keys), keys)
      +	}
      +
      +	// Attempt to insert the key, with certificate if specified.
      +	var pubKey ssh.PublicKey
      +	if cert != nil {
      +		err = agent.Add(AddedKey{
      +			PrivateKey:   key,
      +			Certificate:  cert,
      +			Comment:      "comment",
      +			LifetimeSecs: lifetimeSecs,
      +		})
      +		pubKey = cert
      +	} else {
      +		err = agent.Add(AddedKey{PrivateKey: key, Comment: "comment", LifetimeSecs: lifetimeSecs})
      +		pubKey = signer.PublicKey()
      +	}
      +	if err != nil {
      +		t.Fatalf("insert(%T): %v", key, err)
      +	}
      +
      +	// Did the key get inserted successfully?
      +	if keys, err := agent.List(); err != nil {
      +		t.Fatalf("List: %v", err)
      +	} else if len(keys) != 1 {
      +		t.Fatalf("got %v, want 1 key", keys)
      +	} else if keys[0].Comment != "comment" {
      +		t.Fatalf("key comment: got %v, want %v", keys[0].Comment, "comment")
      +	} else if !bytes.Equal(keys[0].Blob, pubKey.Marshal()) {
      +		t.Fatalf("key mismatch")
      +	}
      +
      +	// Can the agent make a valid signature?
      +	data := []byte("hello")
      +	sig, err := agent.Sign(pubKey, data)
      +	if err != nil {
      +		t.Fatalf("Sign(%s): %v", pubKey.Type(), err)
      +	}
      +
      +	if err := pubKey.Verify(data, sig); err != nil {
      +		t.Fatalf("Verify(%s): %v", pubKey.Type(), err)
      +	}
      +}
      +
      +func TestAgent(t *testing.T) {
      +	for _, keyType := range []string{"rsa", "dsa", "ecdsa"} {
      +		testAgent(t, testPrivateKeys[keyType], nil, 0)
      +	}
      +}
      +
      +func TestCert(t *testing.T) {
      +	cert := &ssh.Certificate{
      +		Key:         testPublicKeys["rsa"],
      +		ValidBefore: ssh.CertTimeInfinity,
      +		CertType:    ssh.UserCert,
      +	}
      +	cert.SignCert(rand.Reader, testSigners["ecdsa"])
      +
      +	testAgent(t, testPrivateKeys["rsa"], cert, 0)
      +}
      +
      +func TestConstraints(t *testing.T) {
      +	testAgent(t, testPrivateKeys["rsa"], nil, 3600 /* lifetime in seconds */)
      +}
      +
      +// netPipe is analogous to net.Pipe, but it uses a real net.Conn, and
      +// therefore is buffered (net.Pipe deadlocks if both sides start with
      +// a write.)
      +func netPipe() (net.Conn, net.Conn, error) {
      +	listener, err := net.Listen("tcp", "127.0.0.1:0")
      +	if err != nil {
      +		return nil, nil, err
      +	}
      +	defer listener.Close()
      +	c1, err := net.Dial("tcp", listener.Addr().String())
      +	if err != nil {
      +		return nil, nil, err
      +	}
      +
      +	c2, err := listener.Accept()
      +	if err != nil {
      +		c1.Close()
      +		return nil, nil, err
      +	}
      +
      +	return c1, c2, nil
      +}
      +
      +func TestAuth(t *testing.T) {
      +	a, b, err := netPipe()
      +	if err != nil {
      +		t.Fatalf("netPipe: %v", err)
      +	}
      +
      +	defer a.Close()
      +	defer b.Close()
      +
      +	agent, _, cleanup := startAgent(t)
      +	defer cleanup()
      +
      +	if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["rsa"], Comment: "comment"}); err != nil {
      +		t.Errorf("Add: %v", err)
      +	}
      +
      +	serverConf := ssh.ServerConfig{}
      +	serverConf.AddHostKey(testSigners["rsa"])
      +	serverConf.PublicKeyCallback = func(c ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
      +		if bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) {
      +			return nil, nil
      +		}
      +
      +		return nil, errors.New("pubkey rejected")
      +	}
      +
      +	go func() {
      +		conn, _, _, err := ssh.NewServerConn(a, &serverConf)
      +		if err != nil {
      +			t.Fatalf("Server: %v", err)
      +		}
      +		conn.Close()
      +	}()
      +
      +	conf := ssh.ClientConfig{}
      +	conf.Auth = append(conf.Auth, ssh.PublicKeysCallback(agent.Signers))
      +	conn, _, _, err := ssh.NewClientConn(b, "", &conf)
      +	if err != nil {
      +		t.Fatalf("NewClientConn: %v", err)
      +	}
      +	conn.Close()
      +}
      +
      +func TestLockClient(t *testing.T) {
      +	agent, _, cleanup := startAgent(t)
      +	defer cleanup()
      +	testLockAgent(agent, t)
      +}
      +
      +func testLockAgent(agent Agent, t *testing.T) {
      +	if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["rsa"], Comment: "comment 1"}); err != nil {
      +		t.Errorf("Add: %v", err)
      +	}
      +	if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["dsa"], Comment: "comment dsa"}); err != nil {
      +		t.Errorf("Add: %v", err)
      +	}
      +	if keys, err := agent.List(); err != nil {
      +		t.Errorf("List: %v", err)
      +	} else if len(keys) != 2 {
      +		t.Errorf("Want 2 keys, got %v", keys)
      +	}
      +
      +	passphrase := []byte("secret")
      +	if err := agent.Lock(passphrase); err != nil {
      +		t.Errorf("Lock: %v", err)
      +	}
      +
      +	if keys, err := agent.List(); err != nil {
      +		t.Errorf("List: %v", err)
      +	} else if len(keys) != 0 {
      +		t.Errorf("Want 0 keys, got %v", keys)
      +	}
      +
      +	signer, _ := ssh.NewSignerFromKey(testPrivateKeys["rsa"])
      +	if _, err := agent.Sign(signer.PublicKey(), []byte("hello")); err == nil {
      +		t.Fatalf("Sign did not fail")
      +	}
      +
      +	if err := agent.Remove(signer.PublicKey()); err == nil {
      +		t.Fatalf("Remove did not fail")
      +	}
      +
      +	if err := agent.RemoveAll(); err == nil {
      +		t.Fatalf("RemoveAll did not fail")
      +	}
      +
      +	if err := agent.Unlock(nil); err == nil {
      +		t.Errorf("Unlock with wrong passphrase succeeded")
      +	}
      +	if err := agent.Unlock(passphrase); err != nil {
      +		t.Errorf("Unlock: %v", err)
      +	}
      +
      +	if err := agent.Remove(signer.PublicKey()); err != nil {
      +		t.Fatalf("Remove: %v", err)
      +	}
      +
      +	if keys, err := agent.List(); err != nil {
      +		t.Errorf("List: %v", err)
      +	} else if len(keys) != 1 {
      +		t.Errorf("Want 1 keys, got %v", keys)
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/agent/example_test.go b/vendor/golang.org/x/crypto/ssh/agent/example_test.go
      new file mode 100644
      index 00000000..c1130f77
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/agent/example_test.go
      @@ -0,0 +1,40 @@
      +// Copyright 2016 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package agent_test
      +
      +import (
      +	"log"
      +	"os"
      +	"net"
      +
      +        "golang.org/x/crypto/ssh"
      +        "golang.org/x/crypto/ssh/agent"
      +)
      +
      +func ExampleClientAgent() {
      +	// ssh-agent has a UNIX socket under $SSH_AUTH_SOCK
      +	socket := os.Getenv("SSH_AUTH_SOCK")
      +        conn, err := net.Dial("unix", socket)
      +        if err != nil {
      +                log.Fatalf("net.Dial: %v", err)
      +        }
      +	agentClient := agent.NewClient(conn)
      +	config := &ssh.ClientConfig{
      +		User: "username",
      +		Auth: []ssh.AuthMethod{
      +			// Use a callback rather than PublicKeys
      +			// so we only consult the agent once the remote server
      +			// wants it.
      +			ssh.PublicKeysCallback(agentClient.Signers),
      +		},
      +	}
      +
      +	sshc, err := ssh.Dial("tcp", "localhost:22", config)
      +	if err != nil {
      +		log.Fatalf("Dial: %v", err)
      +	}
      +	// .. use sshc
      +	sshc.Close()
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/agent/forward.go b/vendor/golang.org/x/crypto/ssh/agent/forward.go
      new file mode 100644
      index 00000000..fd24ba90
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/agent/forward.go
      @@ -0,0 +1,103 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package agent
      +
      +import (
      +	"errors"
      +	"io"
      +	"net"
      +	"sync"
      +
      +	"golang.org/x/crypto/ssh"
      +)
      +
      +// RequestAgentForwarding sets up agent forwarding for the session.
      +// ForwardToAgent or ForwardToRemote should be called to route
      +// the authentication requests.
      +func RequestAgentForwarding(session *ssh.Session) error {
      +	ok, err := session.SendRequest("auth-agent-req@openssh.com", true, nil)
      +	if err != nil {
      +		return err
      +	}
      +	if !ok {
      +		return errors.New("forwarding request denied")
      +	}
      +	return nil
      +}
      +
      +// ForwardToAgent routes authentication requests to the given keyring.
      +func ForwardToAgent(client *ssh.Client, keyring Agent) error {
      +	channels := client.HandleChannelOpen(channelType)
      +	if channels == nil {
      +		return errors.New("agent: already have handler for " + channelType)
      +	}
      +
      +	go func() {
      +		for ch := range channels {
      +			channel, reqs, err := ch.Accept()
      +			if err != nil {
      +				continue
      +			}
      +			go ssh.DiscardRequests(reqs)
      +			go func() {
      +				ServeAgent(keyring, channel)
      +				channel.Close()
      +			}()
      +		}
      +	}()
      +	return nil
      +}
      +
      +const channelType = "auth-agent@openssh.com"
      +
      +// ForwardToRemote routes authentication requests to the ssh-agent
      +// process serving on the given unix socket.
      +func ForwardToRemote(client *ssh.Client, addr string) error {
      +	channels := client.HandleChannelOpen(channelType)
      +	if channels == nil {
      +		return errors.New("agent: already have handler for " + channelType)
      +	}
      +	conn, err := net.Dial("unix", addr)
      +	if err != nil {
      +		return err
      +	}
      +	conn.Close()
      +
      +	go func() {
      +		for ch := range channels {
      +			channel, reqs, err := ch.Accept()
      +			if err != nil {
      +				continue
      +			}
      +			go ssh.DiscardRequests(reqs)
      +			go forwardUnixSocket(channel, addr)
      +		}
      +	}()
      +	return nil
      +}
      +
      +func forwardUnixSocket(channel ssh.Channel, addr string) {
      +	conn, err := net.Dial("unix", addr)
      +	if err != nil {
      +		return
      +	}
      +
      +	var wg sync.WaitGroup
      +	wg.Add(2)
      +	go func() {
      +		io.Copy(conn, channel)
      +		conn.(*net.UnixConn).CloseWrite()
      +		wg.Done()
      +	}()
      +	go func() {
      +		io.Copy(channel, conn)
      +		channel.CloseWrite()
      +		wg.Done()
      +	}()
      +
      +	wg.Wait()
      +	conn.Close()
      +	channel.Close()
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go
      new file mode 100644
      index 00000000..12ffa82b
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/agent/keyring.go
      @@ -0,0 +1,184 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package agent
      +
      +import (
      +	"bytes"
      +	"crypto/rand"
      +	"crypto/subtle"
      +	"errors"
      +	"fmt"
      +	"sync"
      +
      +	"golang.org/x/crypto/ssh"
      +)
      +
      +type privKey struct {
      +	signer  ssh.Signer
      +	comment string
      +}
      +
      +type keyring struct {
      +	mu   sync.Mutex
      +	keys []privKey
      +
      +	locked     bool
      +	passphrase []byte
      +}
      +
      +var errLocked = errors.New("agent: locked")
      +
      +// NewKeyring returns an Agent that holds keys in memory.  It is safe
      +// for concurrent use by multiple goroutines.
      +func NewKeyring() Agent {
      +	return &keyring{}
      +}
      +
      +// RemoveAll removes all identities.
      +func (r *keyring) RemoveAll() error {
      +	r.mu.Lock()
      +	defer r.mu.Unlock()
      +	if r.locked {
      +		return errLocked
      +	}
      +
      +	r.keys = nil
      +	return nil
      +}
      +
      +// Remove removes all identities with the given public key.
      +func (r *keyring) Remove(key ssh.PublicKey) error {
      +	r.mu.Lock()
      +	defer r.mu.Unlock()
      +	if r.locked {
      +		return errLocked
      +	}
      +
      +	want := key.Marshal()
      +	found := false
      +	for i := 0; i < len(r.keys); {
      +		if bytes.Equal(r.keys[i].signer.PublicKey().Marshal(), want) {
      +			found = true
      +			r.keys[i] = r.keys[len(r.keys)-1]
      +			r.keys = r.keys[:len(r.keys)-1]
      +			continue
      +		} else {
      +			i++
      +		}
      +	}
      +
      +	if !found {
      +		return errors.New("agent: key not found")
      +	}
      +	return nil
      +}
      +
      +// Lock locks the agent. Sign and Remove will fail, and List will empty an empty list.
      +func (r *keyring) Lock(passphrase []byte) error {
      +	r.mu.Lock()
      +	defer r.mu.Unlock()
      +	if r.locked {
      +		return errLocked
      +	}
      +
      +	r.locked = true
      +	r.passphrase = passphrase
      +	return nil
      +}
      +
      +// Unlock undoes the effect of Lock
      +func (r *keyring) Unlock(passphrase []byte) error {
      +	r.mu.Lock()
      +	defer r.mu.Unlock()
      +	if !r.locked {
      +		return errors.New("agent: not locked")
      +	}
      +	if len(passphrase) != len(r.passphrase) || 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) {
      +		return fmt.Errorf("agent: incorrect passphrase")
      +	}
      +
      +	r.locked = false
      +	r.passphrase = nil
      +	return nil
      +}
      +
      +// List returns the identities known to the agent.
      +func (r *keyring) List() ([]*Key, error) {
      +	r.mu.Lock()
      +	defer r.mu.Unlock()
      +	if r.locked {
      +		// section 2.7: locked agents return empty.
      +		return nil, nil
      +	}
      +
      +	var ids []*Key
      +	for _, k := range r.keys {
      +		pub := k.signer.PublicKey()
      +		ids = append(ids, &Key{
      +			Format:  pub.Type(),
      +			Blob:    pub.Marshal(),
      +			Comment: k.comment})
      +	}
      +	return ids, nil
      +}
      +
      +// Insert adds a private key to the keyring. If a certificate
      +// is given, that certificate is added as public key. Note that
      +// any constraints given are ignored.
      +func (r *keyring) Add(key AddedKey) error {
      +	r.mu.Lock()
      +	defer r.mu.Unlock()
      +	if r.locked {
      +		return errLocked
      +	}
      +	signer, err := ssh.NewSignerFromKey(key.PrivateKey)
      +
      +	if err != nil {
      +		return err
      +	}
      +
      +	if cert := key.Certificate; cert != nil {
      +		signer, err = ssh.NewCertSigner(cert, signer)
      +		if err != nil {
      +			return err
      +		}
      +	}
      +
      +	r.keys = append(r.keys, privKey{signer, key.Comment})
      +
      +	return nil
      +}
      +
      +// Sign returns a signature for the data.
      +func (r *keyring) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) {
      +	r.mu.Lock()
      +	defer r.mu.Unlock()
      +	if r.locked {
      +		return nil, errLocked
      +	}
      +
      +	wanted := key.Marshal()
      +	for _, k := range r.keys {
      +		if bytes.Equal(k.signer.PublicKey().Marshal(), wanted) {
      +			return k.signer.Sign(rand.Reader, data)
      +		}
      +	}
      +	return nil, errors.New("not found")
      +}
      +
      +// Signers returns signers for all the known keys.
      +func (r *keyring) Signers() ([]ssh.Signer, error) {
      +	r.mu.Lock()
      +	defer r.mu.Unlock()
      +	if r.locked {
      +		return nil, errLocked
      +	}
      +
      +	s := make([]ssh.Signer, 0, len(r.keys))
      +	for _, k := range r.keys {
      +		s = append(s, k.signer)
      +	}
      +	return s, nil
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring_test.go b/vendor/golang.org/x/crypto/ssh/agent/keyring_test.go
      new file mode 100644
      index 00000000..7f059057
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/agent/keyring_test.go
      @@ -0,0 +1,78 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package agent
      +
      +import (
      +	"testing"
      +)
      +
      +func addTestKey(t *testing.T, a Agent, keyName string) {
      +	err := a.Add(AddedKey{
      +		PrivateKey: testPrivateKeys[keyName],
      +		Comment:    keyName,
      +	})
      +	if err != nil {
      +		t.Fatalf("failed to add key %q: %v", keyName, err)
      +	}
      +}
      +
      +func removeTestKey(t *testing.T, a Agent, keyName string) {
      +	err := a.Remove(testPublicKeys[keyName])
      +	if err != nil {
      +		t.Fatalf("failed to remove key %q: %v", keyName, err)
      +	}
      +}
      +
      +func validateListedKeys(t *testing.T, a Agent, expectedKeys []string) {
      +	listedKeys, err := a.List()
      +	if err != nil {
      +		t.Fatalf("failed to list keys: %v", err)
      +		return
      +	}
      +	actualKeys := make(map[string]bool)
      +	for _, key := range listedKeys {
      +		actualKeys[key.Comment] = true
      +	}
      +
      +	matchedKeys := make(map[string]bool)
      +	for _, expectedKey := range expectedKeys {
      +		if !actualKeys[expectedKey] {
      +			t.Fatalf("expected key %q, but was not found", expectedKey)
      +		} else {
      +			matchedKeys[expectedKey] = true
      +		}
      +	}
      +
      +	for actualKey := range actualKeys {
      +		if !matchedKeys[actualKey] {
      +			t.Fatalf("key %q was found, but was not expected", actualKey)
      +		}
      +	}
      +}
      +
      +func TestKeyringAddingAndRemoving(t *testing.T) {
      +	keyNames := []string{"dsa", "ecdsa", "rsa", "user"}
      +
      +	// add all test private keys
      +	k := NewKeyring()
      +	for _, keyName := range keyNames {
      +		addTestKey(t, k, keyName)
      +	}
      +	validateListedKeys(t, k, keyNames)
      +
      +	// remove a key in the middle
      +	keyToRemove := keyNames[1]
      +	keyNames = append(keyNames[:1], keyNames[2:]...)
      +
      +	removeTestKey(t, k, keyToRemove)
      +	validateListedKeys(t, k, keyNames)
      +
      +	// remove all keys
      +	err := k.RemoveAll()
      +	if err != nil {
      +		t.Fatalf("failed to remove all keys: %v", err)
      +	}
      +	validateListedKeys(t, k, []string{})
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/agent/server.go b/vendor/golang.org/x/crypto/ssh/agent/server.go
      new file mode 100644
      index 00000000..b21a2018
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/agent/server.go
      @@ -0,0 +1,209 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package agent
      +
      +import (
      +	"crypto/rsa"
      +	"encoding/binary"
      +	"fmt"
      +	"io"
      +	"log"
      +	"math/big"
      +
      +	"golang.org/x/crypto/ssh"
      +)
      +
      +// Server wraps an Agent and uses it to implement the agent side of
      +// the SSH-agent, wire protocol.
      +type server struct {
      +	agent Agent
      +}
      +
      +func (s *server) processRequestBytes(reqData []byte) []byte {
      +	rep, err := s.processRequest(reqData)
      +	if err != nil {
      +		if err != errLocked {
      +			// TODO(hanwen): provide better logging interface?
      +			log.Printf("agent %d: %v", reqData[0], err)
      +		}
      +		return []byte{agentFailure}
      +	}
      +
      +	if err == nil && rep == nil {
      +		return []byte{agentSuccess}
      +	}
      +
      +	return ssh.Marshal(rep)
      +}
      +
      +func marshalKey(k *Key) []byte {
      +	var record struct {
      +		Blob    []byte
      +		Comment string
      +	}
      +	record.Blob = k.Marshal()
      +	record.Comment = k.Comment
      +
      +	return ssh.Marshal(&record)
      +}
      +
      +type agentV1IdentityMsg struct {
      +	Numkeys uint32 `sshtype:"2"`
      +}
      +
      +type agentRemoveIdentityMsg struct {
      +	KeyBlob []byte `sshtype:"18"`
      +}
      +
      +type agentLockMsg struct {
      +	Passphrase []byte `sshtype:"22"`
      +}
      +
      +type agentUnlockMsg struct {
      +	Passphrase []byte `sshtype:"23"`
      +}
      +
      +func (s *server) processRequest(data []byte) (interface{}, error) {
      +	switch data[0] {
      +	case agentRequestV1Identities:
      +		return &agentV1IdentityMsg{0}, nil
      +	case agentRemoveIdentity:
      +		var req agentRemoveIdentityMsg
      +		if err := ssh.Unmarshal(data, &req); err != nil {
      +			return nil, err
      +		}
      +
      +		var wk wireKey
      +		if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil {
      +			return nil, err
      +		}
      +
      +		return nil, s.agent.Remove(&Key{Format: wk.Format, Blob: req.KeyBlob})
      +
      +	case agentRemoveAllIdentities:
      +		return nil, s.agent.RemoveAll()
      +
      +	case agentLock:
      +		var req agentLockMsg
      +		if err := ssh.Unmarshal(data, &req); err != nil {
      +			return nil, err
      +		}
      +
      +		return nil, s.agent.Lock(req.Passphrase)
      +
      +	case agentUnlock:
      +		var req agentLockMsg
      +		if err := ssh.Unmarshal(data, &req); err != nil {
      +			return nil, err
      +		}
      +		return nil, s.agent.Unlock(req.Passphrase)
      +
      +	case agentSignRequest:
      +		var req signRequestAgentMsg
      +		if err := ssh.Unmarshal(data, &req); err != nil {
      +			return nil, err
      +		}
      +
      +		var wk wireKey
      +		if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil {
      +			return nil, err
      +		}
      +
      +		k := &Key{
      +			Format: wk.Format,
      +			Blob:   req.KeyBlob,
      +		}
      +
      +		sig, err := s.agent.Sign(k, req.Data) //  TODO(hanwen): flags.
      +		if err != nil {
      +			return nil, err
      +		}
      +		return &signResponseAgentMsg{SigBlob: ssh.Marshal(sig)}, nil
      +	case agentRequestIdentities:
      +		keys, err := s.agent.List()
      +		if err != nil {
      +			return nil, err
      +		}
      +
      +		rep := identitiesAnswerAgentMsg{
      +			NumKeys: uint32(len(keys)),
      +		}
      +		for _, k := range keys {
      +			rep.Keys = append(rep.Keys, marshalKey(k)...)
      +		}
      +		return rep, nil
      +	case agentAddIdentity:
      +		return nil, s.insertIdentity(data)
      +	}
      +
      +	return nil, fmt.Errorf("unknown opcode %d", data[0])
      +}
      +
      +func (s *server) insertIdentity(req []byte) error {
      +	var record struct {
      +		Type string `sshtype:"17"`
      +		Rest []byte `ssh:"rest"`
      +	}
      +	if err := ssh.Unmarshal(req, &record); err != nil {
      +		return err
      +	}
      +
      +	switch record.Type {
      +	case ssh.KeyAlgoRSA:
      +		var k rsaKeyMsg
      +		if err := ssh.Unmarshal(req, &k); err != nil {
      +			return err
      +		}
      +
      +		priv := rsa.PrivateKey{
      +			PublicKey: rsa.PublicKey{
      +				E: int(k.E.Int64()),
      +				N: k.N,
      +			},
      +			D:      k.D,
      +			Primes: []*big.Int{k.P, k.Q},
      +		}
      +		priv.Precompute()
      +
      +		return s.agent.Add(AddedKey{PrivateKey: &priv, Comment: k.Comments})
      +	}
      +	return fmt.Errorf("not implemented: %s", record.Type)
      +}
      +
      +// ServeAgent serves the agent protocol on the given connection. It
      +// returns when an I/O error occurs.
      +func ServeAgent(agent Agent, c io.ReadWriter) error {
      +	s := &server{agent}
      +
      +	var length [4]byte
      +	for {
      +		if _, err := io.ReadFull(c, length[:]); err != nil {
      +			return err
      +		}
      +		l := binary.BigEndian.Uint32(length[:])
      +		if l > maxAgentResponseBytes {
      +			// We also cap requests.
      +			return fmt.Errorf("agent: request too large: %d", l)
      +		}
      +
      +		req := make([]byte, l)
      +		if _, err := io.ReadFull(c, req); err != nil {
      +			return err
      +		}
      +
      +		repData := s.processRequestBytes(req)
      +		if len(repData) > maxAgentResponseBytes {
      +			return fmt.Errorf("agent: reply too large: %d bytes", len(repData))
      +		}
      +
      +		binary.BigEndian.PutUint32(length[:], uint32(len(repData)))
      +		if _, err := c.Write(length[:]); err != nil {
      +			return err
      +		}
      +		if _, err := c.Write(repData); err != nil {
      +			return err
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/agent/server_test.go b/vendor/golang.org/x/crypto/ssh/agent/server_test.go
      new file mode 100644
      index 00000000..ef0ab293
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/agent/server_test.go
      @@ -0,0 +1,77 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package agent
      +
      +import (
      +	"testing"
      +
      +	"golang.org/x/crypto/ssh"
      +)
      +
      +func TestServer(t *testing.T) {
      +	c1, c2, err := netPipe()
      +	if err != nil {
      +		t.Fatalf("netPipe: %v", err)
      +	}
      +	defer c1.Close()
      +	defer c2.Close()
      +	client := NewClient(c1)
      +
      +	go ServeAgent(NewKeyring(), c2)
      +
      +	testAgentInterface(t, client, testPrivateKeys["rsa"], nil, 0)
      +}
      +
      +func TestLockServer(t *testing.T) {
      +	testLockAgent(NewKeyring(), t)
      +}
      +
      +func TestSetupForwardAgent(t *testing.T) {
      +	a, b, err := netPipe()
      +	if err != nil {
      +		t.Fatalf("netPipe: %v", err)
      +	}
      +
      +	defer a.Close()
      +	defer b.Close()
      +
      +	_, socket, cleanup := startAgent(t)
      +	defer cleanup()
      +
      +	serverConf := ssh.ServerConfig{
      +		NoClientAuth: true,
      +	}
      +	serverConf.AddHostKey(testSigners["rsa"])
      +	incoming := make(chan *ssh.ServerConn, 1)
      +	go func() {
      +		conn, _, _, err := ssh.NewServerConn(a, &serverConf)
      +		if err != nil {
      +			t.Fatalf("Server: %v", err)
      +		}
      +		incoming <- conn
      +	}()
      +
      +	conf := ssh.ClientConfig{}
      +	conn, chans, reqs, err := ssh.NewClientConn(b, "", &conf)
      +	if err != nil {
      +		t.Fatalf("NewClientConn: %v", err)
      +	}
      +	client := ssh.NewClient(conn, chans, reqs)
      +
      +	if err := ForwardToRemote(client, socket); err != nil {
      +		t.Fatalf("SetupForwardAgent: %v", err)
      +	}
      +
      +	server := <-incoming
      +	ch, reqs, err := server.OpenChannel(channelType, nil)
      +	if err != nil {
      +		t.Fatalf("OpenChannel(%q): %v", channelType, err)
      +	}
      +	go ssh.DiscardRequests(reqs)
      +
      +	agentClient := NewClient(ch)
      +	testAgentInterface(t, agentClient, testPrivateKeys["rsa"], nil, 0)
      +	conn.Close()
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/agent/testdata_test.go b/vendor/golang.org/x/crypto/ssh/agent/testdata_test.go
      new file mode 100644
      index 00000000..b7a8781e
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/agent/testdata_test.go
      @@ -0,0 +1,64 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// IMPLEMENTOR NOTE: To avoid a package loop, this file is in three places:
      +// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three
      +// instances.
      +
      +package agent
      +
      +import (
      +	"crypto/rand"
      +	"fmt"
      +
      +	"golang.org/x/crypto/ssh"
      +	"golang.org/x/crypto/ssh/testdata"
      +)
      +
      +var (
      +	testPrivateKeys map[string]interface{}
      +	testSigners     map[string]ssh.Signer
      +	testPublicKeys  map[string]ssh.PublicKey
      +)
      +
      +func init() {
      +	var err error
      +
      +	n := len(testdata.PEMBytes)
      +	testPrivateKeys = make(map[string]interface{}, n)
      +	testSigners = make(map[string]ssh.Signer, n)
      +	testPublicKeys = make(map[string]ssh.PublicKey, n)
      +	for t, k := range testdata.PEMBytes {
      +		testPrivateKeys[t], err = ssh.ParseRawPrivateKey(k)
      +		if err != nil {
      +			panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err))
      +		}
      +		testSigners[t], err = ssh.NewSignerFromKey(testPrivateKeys[t])
      +		if err != nil {
      +			panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err))
      +		}
      +		testPublicKeys[t] = testSigners[t].PublicKey()
      +	}
      +
      +	// Create a cert and sign it for use in tests.
      +	testCert := &ssh.Certificate{
      +		Nonce:           []byte{},                       // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
      +		ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage
      +		ValidAfter:      0,                              // unix epoch
      +		ValidBefore:     ssh.CertTimeInfinity,           // The end of currently representable time.
      +		Reserved:        []byte{},                       // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
      +		Key:             testPublicKeys["ecdsa"],
      +		SignatureKey:    testPublicKeys["rsa"],
      +		Permissions: ssh.Permissions{
      +			CriticalOptions: map[string]string{},
      +			Extensions:      map[string]string{},
      +		},
      +	}
      +	testCert.SignCert(rand.Reader, testSigners["rsa"])
      +	testPrivateKeys["cert"] = testPrivateKeys["ecdsa"]
      +	testSigners["cert"], err = ssh.NewCertSigner(testCert, testSigners["ecdsa"])
      +	if err != nil {
      +		panic(fmt.Sprintf("Unable to create certificate signer: %v", err))
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/benchmark_test.go b/vendor/golang.org/x/crypto/ssh/benchmark_test.go
      new file mode 100644
      index 00000000..d9f7eb9b
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/benchmark_test.go
      @@ -0,0 +1,122 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"errors"
      +	"io"
      +	"net"
      +	"testing"
      +)
      +
      +type server struct {
      +	*ServerConn
      +	chans <-chan NewChannel
      +}
      +
      +func newServer(c net.Conn, conf *ServerConfig) (*server, error) {
      +	sconn, chans, reqs, err := NewServerConn(c, conf)
      +	if err != nil {
      +		return nil, err
      +	}
      +	go DiscardRequests(reqs)
      +	return &server{sconn, chans}, nil
      +}
      +
      +func (s *server) Accept() (NewChannel, error) {
      +	n, ok := <-s.chans
      +	if !ok {
      +		return nil, io.EOF
      +	}
      +	return n, nil
      +}
      +
      +func sshPipe() (Conn, *server, error) {
      +	c1, c2, err := netPipe()
      +	if err != nil {
      +		return nil, nil, err
      +	}
      +
      +	clientConf := ClientConfig{
      +		User: "user",
      +	}
      +	serverConf := ServerConfig{
      +		NoClientAuth: true,
      +	}
      +	serverConf.AddHostKey(testSigners["ecdsa"])
      +	done := make(chan *server, 1)
      +	go func() {
      +		server, err := newServer(c2, &serverConf)
      +		if err != nil {
      +			done <- nil
      +		}
      +		done <- server
      +	}()
      +
      +	client, _, reqs, err := NewClientConn(c1, "", &clientConf)
      +	if err != nil {
      +		return nil, nil, err
      +	}
      +
      +	server := <-done
      +	if server == nil {
      +		return nil, nil, errors.New("server handshake failed.")
      +	}
      +	go DiscardRequests(reqs)
      +
      +	return client, server, nil
      +}
      +
      +func BenchmarkEndToEnd(b *testing.B) {
      +	b.StopTimer()
      +
      +	client, server, err := sshPipe()
      +	if err != nil {
      +		b.Fatalf("sshPipe: %v", err)
      +	}
      +
      +	defer client.Close()
      +	defer server.Close()
      +
      +	size := (1 << 20)
      +	input := make([]byte, size)
      +	output := make([]byte, size)
      +	b.SetBytes(int64(size))
      +	done := make(chan int, 1)
      +
      +	go func() {
      +		newCh, err := server.Accept()
      +		if err != nil {
      +			b.Fatalf("Client: %v", err)
      +		}
      +		ch, incoming, err := newCh.Accept()
      +		go DiscardRequests(incoming)
      +		for i := 0; i < b.N; i++ {
      +			if _, err := io.ReadFull(ch, output); err != nil {
      +				b.Fatalf("ReadFull: %v", err)
      +			}
      +		}
      +		ch.Close()
      +		done <- 1
      +	}()
      +
      +	ch, in, err := client.OpenChannel("speed", nil)
      +	if err != nil {
      +		b.Fatalf("OpenChannel: %v", err)
      +	}
      +	go DiscardRequests(in)
      +
      +	b.ResetTimer()
      +	b.StartTimer()
      +	for i := 0; i < b.N; i++ {
      +		if _, err := ch.Write(input); err != nil {
      +			b.Fatalf("WriteFull: %v", err)
      +		}
      +	}
      +	ch.Close()
      +	b.StopTimer()
      +
      +	<-done
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go
      new file mode 100644
      index 00000000..6931b511
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/buffer.go
      @@ -0,0 +1,98 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"io"
      +	"sync"
      +)
      +
      +// buffer provides a linked list buffer for data exchange
      +// between producer and consumer. Theoretically the buffer is
      +// of unlimited capacity as it does no allocation of its own.
      +type buffer struct {
      +	// protects concurrent access to head, tail and closed
      +	*sync.Cond
      +
      +	head *element // the buffer that will be read first
      +	tail *element // the buffer that will be read last
      +
      +	closed bool
      +}
      +
      +// An element represents a single link in a linked list.
      +type element struct {
      +	buf  []byte
      +	next *element
      +}
      +
      +// newBuffer returns an empty buffer that is not closed.
      +func newBuffer() *buffer {
      +	e := new(element)
      +	b := &buffer{
      +		Cond: newCond(),
      +		head: e,
      +		tail: e,
      +	}
      +	return b
      +}
      +
      +// write makes buf available for Read to receive.
      +// buf must not be modified after the call to write.
      +func (b *buffer) write(buf []byte) {
      +	b.Cond.L.Lock()
      +	e := &element{buf: buf}
      +	b.tail.next = e
      +	b.tail = e
      +	b.Cond.Signal()
      +	b.Cond.L.Unlock()
      +}
      +
      +// eof closes the buffer. Reads from the buffer once all
      +// the data has been consumed will receive os.EOF.
      +func (b *buffer) eof() error {
      +	b.Cond.L.Lock()
      +	b.closed = true
      +	b.Cond.Signal()
      +	b.Cond.L.Unlock()
      +	return nil
      +}
      +
      +// Read reads data from the internal buffer in buf.  Reads will block
      +// if no data is available, or until the buffer is closed.
      +func (b *buffer) Read(buf []byte) (n int, err error) {
      +	b.Cond.L.Lock()
      +	defer b.Cond.L.Unlock()
      +
      +	for len(buf) > 0 {
      +		// if there is data in b.head, copy it
      +		if len(b.head.buf) > 0 {
      +			r := copy(buf, b.head.buf)
      +			buf, b.head.buf = buf[r:], b.head.buf[r:]
      +			n += r
      +			continue
      +		}
      +		// if there is a next buffer, make it the head
      +		if len(b.head.buf) == 0 && b.head != b.tail {
      +			b.head = b.head.next
      +			continue
      +		}
      +
      +		// if at least one byte has been copied, return
      +		if n > 0 {
      +			break
      +		}
      +
      +		// if nothing was read, and there is nothing outstanding
      +		// check to see if the buffer is closed.
      +		if b.closed {
      +			err = io.EOF
      +			break
      +		}
      +		// out of buffers, wait for producer
      +		b.Cond.Wait()
      +	}
      +	return
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/buffer_test.go b/vendor/golang.org/x/crypto/ssh/buffer_test.go
      new file mode 100644
      index 00000000..d5781cb3
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/buffer_test.go
      @@ -0,0 +1,87 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"io"
      +	"testing"
      +)
      +
      +var alphabet = []byte("abcdefghijklmnopqrstuvwxyz")
      +
      +func TestBufferReadwrite(t *testing.T) {
      +	b := newBuffer()
      +	b.write(alphabet[:10])
      +	r, _ := b.Read(make([]byte, 10))
      +	if r != 10 {
      +		t.Fatalf("Expected written == read == 10, written: 10, read %d", r)
      +	}
      +
      +	b = newBuffer()
      +	b.write(alphabet[:5])
      +	r, _ = b.Read(make([]byte, 10))
      +	if r != 5 {
      +		t.Fatalf("Expected written == read == 5, written: 5, read %d", r)
      +	}
      +
      +	b = newBuffer()
      +	b.write(alphabet[:10])
      +	r, _ = b.Read(make([]byte, 5))
      +	if r != 5 {
      +		t.Fatalf("Expected written == 10, read == 5, written: 10, read %d", r)
      +	}
      +
      +	b = newBuffer()
      +	b.write(alphabet[:5])
      +	b.write(alphabet[5:15])
      +	r, _ = b.Read(make([]byte, 10))
      +	r2, _ := b.Read(make([]byte, 10))
      +	if r != 10 || r2 != 5 || 15 != r+r2 {
      +		t.Fatal("Expected written == read == 15")
      +	}
      +}
      +
      +func TestBufferClose(t *testing.T) {
      +	b := newBuffer()
      +	b.write(alphabet[:10])
      +	b.eof()
      +	_, err := b.Read(make([]byte, 5))
      +	if err != nil {
      +		t.Fatal("expected read of 5 to not return EOF")
      +	}
      +	b = newBuffer()
      +	b.write(alphabet[:10])
      +	b.eof()
      +	r, err := b.Read(make([]byte, 5))
      +	r2, err2 := b.Read(make([]byte, 10))
      +	if r != 5 || r2 != 5 || err != nil || err2 != nil {
      +		t.Fatal("expected reads of 5 and 5")
      +	}
      +
      +	b = newBuffer()
      +	b.write(alphabet[:10])
      +	b.eof()
      +	r, err = b.Read(make([]byte, 5))
      +	r2, err2 = b.Read(make([]byte, 10))
      +	r3, err3 := b.Read(make([]byte, 10))
      +	if r != 5 || r2 != 5 || r3 != 0 || err != nil || err2 != nil || err3 != io.EOF {
      +		t.Fatal("expected reads of 5 and 5 and 0, with EOF")
      +	}
      +
      +	b = newBuffer()
      +	b.write(make([]byte, 5))
      +	b.write(make([]byte, 10))
      +	b.eof()
      +	r, err = b.Read(make([]byte, 9))
      +	r2, err2 = b.Read(make([]byte, 3))
      +	r3, err3 = b.Read(make([]byte, 3))
      +	r4, err4 := b.Read(make([]byte, 10))
      +	if err != nil || err2 != nil || err3 != nil || err4 != io.EOF {
      +		t.Fatalf("Expected EOF on forth read only, err=%v, err2=%v, err3=%v, err4=%v", err, err2, err3, err4)
      +	}
      +	if r != 9 || r2 != 3 || r3 != 3 || r4 != 0 {
      +		t.Fatal("Expected written == read == 15", r, r2, r3, r4)
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go
      new file mode 100644
      index 00000000..38577003
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/certs.go
      @@ -0,0 +1,501 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"bytes"
      +	"errors"
      +	"fmt"
      +	"io"
      +	"net"
      +	"sort"
      +	"time"
      +)
      +
      +// These constants from [PROTOCOL.certkeys] represent the algorithm names
      +// for certificate types supported by this package.
      +const (
      +	CertAlgoRSAv01      = "ssh-rsa-cert-v01@openssh.com"
      +	CertAlgoDSAv01      = "ssh-dss-cert-v01@openssh.com"
      +	CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com"
      +	CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com"
      +	CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com"
      +)
      +
      +// Certificate types distinguish between host and user
      +// certificates. The values can be set in the CertType field of
      +// Certificate.
      +const (
      +	UserCert = 1
      +	HostCert = 2
      +)
      +
      +// Signature represents a cryptographic signature.
      +type Signature struct {
      +	Format string
      +	Blob   []byte
      +}
      +
      +// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that
      +// a certificate does not expire.
      +const CertTimeInfinity = 1<<64 - 1
      +
      +// An Certificate represents an OpenSSH certificate as defined in
      +// [PROTOCOL.certkeys]?rev=1.8.
      +type Certificate struct {
      +	Nonce           []byte
      +	Key             PublicKey
      +	Serial          uint64
      +	CertType        uint32
      +	KeyId           string
      +	ValidPrincipals []string
      +	ValidAfter      uint64
      +	ValidBefore     uint64
      +	Permissions
      +	Reserved     []byte
      +	SignatureKey PublicKey
      +	Signature    *Signature
      +}
      +
      +// genericCertData holds the key-independent part of the certificate data.
      +// Overall, certificates contain an nonce, public key fields and
      +// key-independent fields.
      +type genericCertData struct {
      +	Serial          uint64
      +	CertType        uint32
      +	KeyId           string
      +	ValidPrincipals []byte
      +	ValidAfter      uint64
      +	ValidBefore     uint64
      +	CriticalOptions []byte
      +	Extensions      []byte
      +	Reserved        []byte
      +	SignatureKey    []byte
      +	Signature       []byte
      +}
      +
      +func marshalStringList(namelist []string) []byte {
      +	var to []byte
      +	for _, name := range namelist {
      +		s := struct{ N string }{name}
      +		to = append(to, Marshal(&s)...)
      +	}
      +	return to
      +}
      +
      +type optionsTuple struct {
      +	Key   string
      +	Value []byte
      +}
      +
      +type optionsTupleValue struct {
      +	Value string
      +}
      +
      +// serialize a map of critical options or extensions
      +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation,
      +// we need two length prefixes for a non-empty string value
      +func marshalTuples(tups map[string]string) []byte {
      +	keys := make([]string, 0, len(tups))
      +	for key := range tups {
      +		keys = append(keys, key)
      +	}
      +	sort.Strings(keys)
      +
      +	var ret []byte
      +	for _, key := range keys {
      +		s := optionsTuple{Key: key}
      +		if value := tups[key]; len(value) > 0 {
      +			s.Value = Marshal(&optionsTupleValue{value})
      +		}
      +		ret = append(ret, Marshal(&s)...)
      +	}
      +	return ret
      +}
      +
      +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation,
      +// we need two length prefixes for a non-empty option value
      +func parseTuples(in []byte) (map[string]string, error) {
      +	tups := map[string]string{}
      +	var lastKey string
      +	var haveLastKey bool
      +
      +	for len(in) > 0 {
      +		var key, val, extra []byte
      +		var ok bool
      +
      +		if key, in, ok = parseString(in); !ok {
      +			return nil, errShortRead
      +		}
      +		keyStr := string(key)
      +		// according to [PROTOCOL.certkeys], the names must be in
      +		// lexical order.
      +		if haveLastKey && keyStr <= lastKey {
      +			return nil, fmt.Errorf("ssh: certificate options are not in lexical order")
      +		}
      +		lastKey, haveLastKey = keyStr, true
      +		// the next field is a data field, which if non-empty has a string embedded
      +		if val, in, ok = parseString(in); !ok {
      +			return nil, errShortRead
      +		}
      +		if len(val) > 0 {
      +			val, extra, ok = parseString(val)
      +			if !ok {
      +				return nil, errShortRead
      +			}
      +			if len(extra) > 0 {
      +				return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value")
      +			}
      +			tups[keyStr] = string(val)
      +		} else {
      +			tups[keyStr] = ""
      +		}
      +	}
      +	return tups, nil
      +}
      +
      +func parseCert(in []byte, privAlgo string) (*Certificate, error) {
      +	nonce, rest, ok := parseString(in)
      +	if !ok {
      +		return nil, errShortRead
      +	}
      +
      +	key, rest, err := parsePubKey(rest, privAlgo)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	var g genericCertData
      +	if err := Unmarshal(rest, &g); err != nil {
      +		return nil, err
      +	}
      +
      +	c := &Certificate{
      +		Nonce:       nonce,
      +		Key:         key,
      +		Serial:      g.Serial,
      +		CertType:    g.CertType,
      +		KeyId:       g.KeyId,
      +		ValidAfter:  g.ValidAfter,
      +		ValidBefore: g.ValidBefore,
      +	}
      +
      +	for principals := g.ValidPrincipals; len(principals) > 0; {
      +		principal, rest, ok := parseString(principals)
      +		if !ok {
      +			return nil, errShortRead
      +		}
      +		c.ValidPrincipals = append(c.ValidPrincipals, string(principal))
      +		principals = rest
      +	}
      +
      +	c.CriticalOptions, err = parseTuples(g.CriticalOptions)
      +	if err != nil {
      +		return nil, err
      +	}
      +	c.Extensions, err = parseTuples(g.Extensions)
      +	if err != nil {
      +		return nil, err
      +	}
      +	c.Reserved = g.Reserved
      +	k, err := ParsePublicKey(g.SignatureKey)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	c.SignatureKey = k
      +	c.Signature, rest, ok = parseSignatureBody(g.Signature)
      +	if !ok || len(rest) > 0 {
      +		return nil, errors.New("ssh: signature parse error")
      +	}
      +
      +	return c, nil
      +}
      +
      +type openSSHCertSigner struct {
      +	pub    *Certificate
      +	signer Signer
      +}
      +
      +// NewCertSigner returns a Signer that signs with the given Certificate, whose
      +// private key is held by signer. It returns an error if the public key in cert
      +// doesn't match the key used by signer.
      +func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) {
      +	if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 {
      +		return nil, errors.New("ssh: signer and cert have different public key")
      +	}
      +
      +	return &openSSHCertSigner{cert, signer}, nil
      +}
      +
      +func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
      +	return s.signer.Sign(rand, data)
      +}
      +
      +func (s *openSSHCertSigner) PublicKey() PublicKey {
      +	return s.pub
      +}
      +
      +const sourceAddressCriticalOption = "source-address"
      +
      +// CertChecker does the work of verifying a certificate. Its methods
      +// can be plugged into ClientConfig.HostKeyCallback and
      +// ServerConfig.PublicKeyCallback. For the CertChecker to work,
      +// minimally, the IsAuthority callback should be set.
      +type CertChecker struct {
      +	// SupportedCriticalOptions lists the CriticalOptions that the
      +	// server application layer understands. These are only used
      +	// for user certificates.
      +	SupportedCriticalOptions []string
      +
      +	// IsAuthority should return true if the key is recognized as
      +	// an authority. This allows for certificates to be signed by other
      +	// certificates.
      +	IsAuthority func(auth PublicKey) bool
      +
      +	// Clock is used for verifying time stamps. If nil, time.Now
      +	// is used.
      +	Clock func() time.Time
      +
      +	// UserKeyFallback is called when CertChecker.Authenticate encounters a
      +	// public key that is not a certificate. It must implement validation
      +	// of user keys or else, if nil, all such keys are rejected.
      +	UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
      +
      +	// HostKeyFallback is called when CertChecker.CheckHostKey encounters a
      +	// public key that is not a certificate. It must implement host key
      +	// validation or else, if nil, all such keys are rejected.
      +	HostKeyFallback func(addr string, remote net.Addr, key PublicKey) error
      +
      +	// IsRevoked is called for each certificate so that revocation checking
      +	// can be implemented. It should return true if the given certificate
      +	// is revoked and false otherwise. If nil, no certificates are
      +	// considered to have been revoked.
      +	IsRevoked func(cert *Certificate) bool
      +}
      +
      +// CheckHostKey checks a host key certificate. This method can be
      +// plugged into ClientConfig.HostKeyCallback.
      +func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error {
      +	cert, ok := key.(*Certificate)
      +	if !ok {
      +		if c.HostKeyFallback != nil {
      +			return c.HostKeyFallback(addr, remote, key)
      +		}
      +		return errors.New("ssh: non-certificate host key")
      +	}
      +	if cert.CertType != HostCert {
      +		return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType)
      +	}
      +
      +	return c.CheckCert(addr, cert)
      +}
      +
      +// Authenticate checks a user certificate. Authenticate can be used as
      +// a value for ServerConfig.PublicKeyCallback.
      +func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) {
      +	cert, ok := pubKey.(*Certificate)
      +	if !ok {
      +		if c.UserKeyFallback != nil {
      +			return c.UserKeyFallback(conn, pubKey)
      +		}
      +		return nil, errors.New("ssh: normal key pairs not accepted")
      +	}
      +
      +	if cert.CertType != UserCert {
      +		return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType)
      +	}
      +
      +	if err := c.CheckCert(conn.User(), cert); err != nil {
      +		return nil, err
      +	}
      +
      +	return &cert.Permissions, nil
      +}
      +
      +// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and
      +// the signature of the certificate.
      +func (c *CertChecker) CheckCert(principal string, cert *Certificate) error {
      +	if c.IsRevoked != nil && c.IsRevoked(cert) {
      +		return fmt.Errorf("ssh: certicate serial %d revoked", cert.Serial)
      +	}
      +
      +	for opt, _ := range cert.CriticalOptions {
      +		// sourceAddressCriticalOption will be enforced by
      +		// serverAuthenticate
      +		if opt == sourceAddressCriticalOption {
      +			continue
      +		}
      +
      +		found := false
      +		for _, supp := range c.SupportedCriticalOptions {
      +			if supp == opt {
      +				found = true
      +				break
      +			}
      +		}
      +		if !found {
      +			return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt)
      +		}
      +	}
      +
      +	if len(cert.ValidPrincipals) > 0 {
      +		// By default, certs are valid for all users/hosts.
      +		found := false
      +		for _, p := range cert.ValidPrincipals {
      +			if p == principal {
      +				found = true
      +				break
      +			}
      +		}
      +		if !found {
      +			return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals)
      +		}
      +	}
      +
      +	if !c.IsAuthority(cert.SignatureKey) {
      +		return fmt.Errorf("ssh: certificate signed by unrecognized authority")
      +	}
      +
      +	clock := c.Clock
      +	if clock == nil {
      +		clock = time.Now
      +	}
      +
      +	unixNow := clock().Unix()
      +	if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) {
      +		return fmt.Errorf("ssh: cert is not yet valid")
      +	}
      +	if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) {
      +		return fmt.Errorf("ssh: cert has expired")
      +	}
      +	if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil {
      +		return fmt.Errorf("ssh: certificate signature does not verify")
      +	}
      +
      +	return nil
      +}
      +
      +// SignCert sets c.SignatureKey to the authority's public key and stores a
      +// Signature, by authority, in the certificate.
      +func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
      +	c.Nonce = make([]byte, 32)
      +	if _, err := io.ReadFull(rand, c.Nonce); err != nil {
      +		return err
      +	}
      +	c.SignatureKey = authority.PublicKey()
      +
      +	sig, err := authority.Sign(rand, c.bytesForSigning())
      +	if err != nil {
      +		return err
      +	}
      +	c.Signature = sig
      +	return nil
      +}
      +
      +var certAlgoNames = map[string]string{
      +	KeyAlgoRSA:      CertAlgoRSAv01,
      +	KeyAlgoDSA:      CertAlgoDSAv01,
      +	KeyAlgoECDSA256: CertAlgoECDSA256v01,
      +	KeyAlgoECDSA384: CertAlgoECDSA384v01,
      +	KeyAlgoECDSA521: CertAlgoECDSA521v01,
      +}
      +
      +// certToPrivAlgo returns the underlying algorithm for a certificate algorithm.
      +// Panics if a non-certificate algorithm is passed.
      +func certToPrivAlgo(algo string) string {
      +	for privAlgo, pubAlgo := range certAlgoNames {
      +		if pubAlgo == algo {
      +			return privAlgo
      +		}
      +	}
      +	panic("unknown cert algorithm")
      +}
      +
      +func (cert *Certificate) bytesForSigning() []byte {
      +	c2 := *cert
      +	c2.Signature = nil
      +	out := c2.Marshal()
      +	// Drop trailing signature length.
      +	return out[:len(out)-4]
      +}
      +
      +// Marshal serializes c into OpenSSH's wire format. It is part of the
      +// PublicKey interface.
      +func (c *Certificate) Marshal() []byte {
      +	generic := genericCertData{
      +		Serial:          c.Serial,
      +		CertType:        c.CertType,
      +		KeyId:           c.KeyId,
      +		ValidPrincipals: marshalStringList(c.ValidPrincipals),
      +		ValidAfter:      uint64(c.ValidAfter),
      +		ValidBefore:     uint64(c.ValidBefore),
      +		CriticalOptions: marshalTuples(c.CriticalOptions),
      +		Extensions:      marshalTuples(c.Extensions),
      +		Reserved:        c.Reserved,
      +		SignatureKey:    c.SignatureKey.Marshal(),
      +	}
      +	if c.Signature != nil {
      +		generic.Signature = Marshal(c.Signature)
      +	}
      +	genericBytes := Marshal(&generic)
      +	keyBytes := c.Key.Marshal()
      +	_, keyBytes, _ = parseString(keyBytes)
      +	prefix := Marshal(&struct {
      +		Name  string
      +		Nonce []byte
      +		Key   []byte `ssh:"rest"`
      +	}{c.Type(), c.Nonce, keyBytes})
      +
      +	result := make([]byte, 0, len(prefix)+len(genericBytes))
      +	result = append(result, prefix...)
      +	result = append(result, genericBytes...)
      +	return result
      +}
      +
      +// Type returns the key name. It is part of the PublicKey interface.
      +func (c *Certificate) Type() string {
      +	algo, ok := certAlgoNames[c.Key.Type()]
      +	if !ok {
      +		panic("unknown cert key type")
      +	}
      +	return algo
      +}
      +
      +// Verify verifies a signature against the certificate's public
      +// key. It is part of the PublicKey interface.
      +func (c *Certificate) Verify(data []byte, sig *Signature) error {
      +	return c.Key.Verify(data, sig)
      +}
      +
      +func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) {
      +	format, in, ok := parseString(in)
      +	if !ok {
      +		return
      +	}
      +
      +	out = &Signature{
      +		Format: string(format),
      +	}
      +
      +	if out.Blob, in, ok = parseString(in); !ok {
      +		return
      +	}
      +
      +	return out, in, ok
      +}
      +
      +func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) {
      +	sigBytes, rest, ok := parseString(in)
      +	if !ok {
      +		return
      +	}
      +
      +	out, trailing, ok := parseSignatureBody(sigBytes)
      +	if !ok || len(trailing) > 0 {
      +		return nil, nil, false
      +	}
      +	return
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/certs_test.go b/vendor/golang.org/x/crypto/ssh/certs_test.go
      new file mode 100644
      index 00000000..c5f2e533
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/certs_test.go
      @@ -0,0 +1,216 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"bytes"
      +	"crypto/rand"
      +	"reflect"
      +	"testing"
      +	"time"
      +)
      +
      +// Cert generated by ssh-keygen 6.0p1 Debian-4.
      +// % ssh-keygen -s ca-key -I test user-key
      +const exampleSSHCert = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgb1srW/W3ZDjYAO45xLYAwzHBDLsJ4Ux6ICFIkTjb1LEAAAADAQABAAAAYQCkoR51poH0wE8w72cqSB8Sszx+vAhzcMdCO0wqHTj7UNENHWEXGrU0E0UQekD7U+yhkhtoyjbPOVIP7hNa6aRk/ezdh/iUnCIt4Jt1v3Z1h1P+hA4QuYFMHNB+rmjPwAcAAAAAAAAAAAAAAAEAAAAEdGVzdAAAAAAAAAAAAAAAAP//////////AAAAAAAAAIIAAAAVcGVybWl0LVgxMS1mb3J3YXJkaW5nAAAAAAAAABdwZXJtaXQtYWdlbnQtZm9yd2FyZGluZwAAAAAAAAAWcGVybWl0LXBvcnQtZm9yd2FyZGluZwAAAAAAAAAKcGVybWl0LXB0eQAAAAAAAAAOcGVybWl0LXVzZXItcmMAAAAAAAAAAAAAAHcAAAAHc3NoLXJzYQAAAAMBAAEAAABhANFS2kaktpSGc+CcmEKPyw9mJC4nZKxHKTgLVZeaGbFZOvJTNzBspQHdy7Q1uKSfktxpgjZnksiu/tFF9ngyY2KFoc+U88ya95IZUycBGCUbBQ8+bhDtw/icdDGQD5WnUwAAAG8AAAAHc3NoLXJzYQAAAGC8Y9Z2LQKhIhxf52773XaWrXdxP0t3GBVo4A10vUWiYoAGepr6rQIoGGXFxT4B9Gp+nEBJjOwKDXPrAevow0T9ca8gZN+0ykbhSrXLE5Ao48rqr3zP4O1/9P7e6gp0gw8=`
      +
      +func TestParseCert(t *testing.T) {
      +	authKeyBytes := []byte(exampleSSHCert)
      +
      +	key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes)
      +	if err != nil {
      +		t.Fatalf("ParseAuthorizedKey: %v", err)
      +	}
      +	if len(rest) > 0 {
      +		t.Errorf("rest: got %q, want empty", rest)
      +	}
      +
      +	if _, ok := key.(*Certificate); !ok {
      +		t.Fatalf("got %v (%T), want *Certificate", key, key)
      +	}
      +
      +	marshaled := MarshalAuthorizedKey(key)
      +	// Before comparison, remove the trailing newline that
      +	// MarshalAuthorizedKey adds.
      +	marshaled = marshaled[:len(marshaled)-1]
      +	if !bytes.Equal(authKeyBytes, marshaled) {
      +		t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes)
      +	}
      +}
      +
      +// Cert generated by ssh-keygen OpenSSH_6.8p1 OS X 10.10.3
      +// % ssh-keygen -s ca -I testcert -O source-address=192.168.1.0/24 -O force-command=/bin/sleep user.pub
      +// user.pub key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMN
      +// Critical Options:
      +//         force-command /bin/sleep
      +//         source-address 192.168.1.0/24
      +// Extensions:
      +//         permit-X11-forwarding
      +//         permit-agent-forwarding
      +//         permit-port-forwarding
      +//         permit-pty
      +//         permit-user-rc
      +const exampleSSHCertWithOptions = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgDyysCJY0XrO1n03EeRRoITnTPdjENFmWDs9X58PP3VUAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMNAAAAAAAAAAAAAAABAAAACHRlc3RjZXJ0AAAAAAAAAAAAAAAA//////////8AAABLAAAADWZvcmNlLWNvbW1hbmQAAAAOAAAACi9iaW4vc2xlZXAAAAAOc291cmNlLWFkZHJlc3MAAAASAAAADjE5Mi4xNjguMS4wLzI0AAAAggAAABVwZXJtaXQtWDExLWZvcndhcmRpbmcAAAAAAAAAF3Blcm1pdC1hZ2VudC1mb3J3YXJkaW5nAAAAAAAAABZwZXJtaXQtcG9ydC1mb3J3YXJkaW5nAAAAAAAAAApwZXJtaXQtcHR5AAAAAAAAAA5wZXJtaXQtdXNlci1yYwAAAAAAAAAAAAABFwAAAAdzc2gtcnNhAAAAAwEAAQAAAQEAwU+c5ui5A8+J/CFpjW8wCa52bEODA808WWQDCSuTG/eMXNf59v9Y8Pk0F1E9dGCosSNyVcB/hacUrc6He+i97+HJCyKavBsE6GDxrjRyxYqAlfcOXi/IVmaUGiO8OQ39d4GHrjToInKvExSUeleQyH4Y4/e27T/pILAqPFL3fyrvMLT5qU9QyIt6zIpa7GBP5+urouNavMprV3zsfIqNBbWypinOQAw823a5wN+zwXnhZrgQiHZ/USG09Y6k98y1dTVz8YHlQVR4D3lpTAsKDKJ5hCH9WU4fdf+lU8OyNGaJ/vz0XNqxcToe1l4numLTnaoSuH89pHryjqurB7lJKwAAAQ8AAAAHc3NoLXJzYQAAAQCaHvUIoPL1zWUHIXLvu96/HU1s/i4CAW2IIEuGgxCUCiFj6vyTyYtgxQxcmbfZf6eaITlS6XJZa7Qq4iaFZh75C1DXTX8labXhRSD4E2t//AIP9MC1rtQC5xo6FmbQ+BoKcDskr+mNACcbRSxs3IL3bwCfWDnIw2WbVox9ZdcthJKk4UoCW4ix4QwdHw7zlddlz++fGEEVhmTbll1SUkycGApPFBsAYRTMupUJcYPIeReBI/m8XfkoMk99bV8ZJQTAd7OekHY2/48Ff53jLmyDjP7kNw1F8OaPtkFs6dGJXta4krmaekPy87j+35In5hFj7yoOqvSbmYUkeX70/GGQ`
      +
      +func TestParseCertWithOptions(t *testing.T) {
      +	opts := map[string]string{
      +		"source-address": "192.168.1.0/24",
      +		"force-command":  "/bin/sleep",
      +	}
      +	exts := map[string]string{
      +		"permit-X11-forwarding":   "",
      +		"permit-agent-forwarding": "",
      +		"permit-port-forwarding":  "",
      +		"permit-pty":              "",
      +		"permit-user-rc":          "",
      +	}
      +	authKeyBytes := []byte(exampleSSHCertWithOptions)
      +
      +	key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes)
      +	if err != nil {
      +		t.Fatalf("ParseAuthorizedKey: %v", err)
      +	}
      +	if len(rest) > 0 {
      +		t.Errorf("rest: got %q, want empty", rest)
      +	}
      +	cert, ok := key.(*Certificate)
      +	if !ok {
      +		t.Fatalf("got %v (%T), want *Certificate", key, key)
      +	}
      +	if !reflect.DeepEqual(cert.CriticalOptions, opts) {
      +		t.Errorf("unexpected critical options - got %v, want %v", cert.CriticalOptions, opts)
      +	}
      +	if !reflect.DeepEqual(cert.Extensions, exts) {
      +		t.Errorf("unexpected Extensions - got %v, want %v", cert.Extensions, exts)
      +	}
      +	marshaled := MarshalAuthorizedKey(key)
      +	// Before comparison, remove the trailing newline that
      +	// MarshalAuthorizedKey adds.
      +	marshaled = marshaled[:len(marshaled)-1]
      +	if !bytes.Equal(authKeyBytes, marshaled) {
      +		t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes)
      +	}
      +}
      +
      +func TestValidateCert(t *testing.T) {
      +	key, _, _, _, err := ParseAuthorizedKey([]byte(exampleSSHCert))
      +	if err != nil {
      +		t.Fatalf("ParseAuthorizedKey: %v", err)
      +	}
      +	validCert, ok := key.(*Certificate)
      +	if !ok {
      +		t.Fatalf("got %v (%T), want *Certificate", key, key)
      +	}
      +	checker := CertChecker{}
      +	checker.IsAuthority = func(k PublicKey) bool {
      +		return bytes.Equal(k.Marshal(), validCert.SignatureKey.Marshal())
      +	}
      +
      +	if err := checker.CheckCert("user", validCert); err != nil {
      +		t.Errorf("Unable to validate certificate: %v", err)
      +	}
      +	invalidCert := &Certificate{
      +		Key:          testPublicKeys["rsa"],
      +		SignatureKey: testPublicKeys["ecdsa"],
      +		ValidBefore:  CertTimeInfinity,
      +		Signature:    &Signature{},
      +	}
      +	if err := checker.CheckCert("user", invalidCert); err == nil {
      +		t.Error("Invalid cert signature passed validation")
      +	}
      +}
      +
      +func TestValidateCertTime(t *testing.T) {
      +	cert := Certificate{
      +		ValidPrincipals: []string{"user"},
      +		Key:             testPublicKeys["rsa"],
      +		ValidAfter:      50,
      +		ValidBefore:     100,
      +	}
      +
      +	cert.SignCert(rand.Reader, testSigners["ecdsa"])
      +
      +	for ts, ok := range map[int64]bool{
      +		25:  false,
      +		50:  true,
      +		99:  true,
      +		100: false,
      +		125: false,
      +	} {
      +		checker := CertChecker{
      +			Clock: func() time.Time { return time.Unix(ts, 0) },
      +		}
      +		checker.IsAuthority = func(k PublicKey) bool {
      +			return bytes.Equal(k.Marshal(),
      +				testPublicKeys["ecdsa"].Marshal())
      +		}
      +
      +		if v := checker.CheckCert("user", &cert); (v == nil) != ok {
      +			t.Errorf("Authenticate(%d): %v", ts, v)
      +		}
      +	}
      +}
      +
      +// TODO(hanwen): tests for
      +//
      +// host keys:
      +// * fallbacks
      +
      +func TestHostKeyCert(t *testing.T) {
      +	cert := &Certificate{
      +		ValidPrincipals: []string{"hostname", "hostname.domain"},
      +		Key:             testPublicKeys["rsa"],
      +		ValidBefore:     CertTimeInfinity,
      +		CertType:        HostCert,
      +	}
      +	cert.SignCert(rand.Reader, testSigners["ecdsa"])
      +
      +	checker := &CertChecker{
      +		IsAuthority: func(p PublicKey) bool {
      +			return bytes.Equal(testPublicKeys["ecdsa"].Marshal(), p.Marshal())
      +		},
      +	}
      +
      +	certSigner, err := NewCertSigner(cert, testSigners["rsa"])
      +	if err != nil {
      +		t.Errorf("NewCertSigner: %v", err)
      +	}
      +
      +	for _, name := range []string{"hostname", "otherhost"} {
      +		c1, c2, err := netPipe()
      +		if err != nil {
      +			t.Fatalf("netPipe: %v", err)
      +		}
      +		defer c1.Close()
      +		defer c2.Close()
      +
      +		errc := make(chan error)
      +
      +		go func() {
      +			conf := ServerConfig{
      +				NoClientAuth: true,
      +			}
      +			conf.AddHostKey(certSigner)
      +			_, _, _, err := NewServerConn(c1, &conf)
      +			errc <- err
      +		}()
      +
      +		config := &ClientConfig{
      +			User:            "user",
      +			HostKeyCallback: checker.CheckHostKey,
      +		}
      +		_, _, _, err = NewClientConn(c2, name, config)
      +
      +		succeed := name == "hostname"
      +		if (err == nil) != succeed {
      +			t.Fatalf("NewClientConn(%q): %v", name, err)
      +		}
      +
      +		err = <-errc
      +		if (err == nil) != succeed {
      +			t.Fatalf("NewServerConn(%q): %v", name, err)
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go
      new file mode 100644
      index 00000000..5403c7e4
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/channel.go
      @@ -0,0 +1,631 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"encoding/binary"
      +	"errors"
      +	"fmt"
      +	"io"
      +	"log"
      +	"sync"
      +)
      +
      +const (
      +	minPacketLength = 9
      +	// channelMaxPacket contains the maximum number of bytes that will be
      +	// sent in a single packet. As per RFC 4253, section 6.1, 32k is also
      +	// the minimum.
      +	channelMaxPacket = 1 << 15
      +	// We follow OpenSSH here.
      +	channelWindowSize = 64 * channelMaxPacket
      +)
      +
      +// NewChannel represents an incoming request to a channel. It must either be
      +// accepted for use by calling Accept, or rejected by calling Reject.
      +type NewChannel interface {
      +	// Accept accepts the channel creation request. It returns the Channel
      +	// and a Go channel containing SSH requests. The Go channel must be
      +	// serviced otherwise the Channel will hang.
      +	Accept() (Channel, <-chan *Request, error)
      +
      +	// Reject rejects the channel creation request. After calling
      +	// this, no other methods on the Channel may be called.
      +	Reject(reason RejectionReason, message string) error
      +
      +	// ChannelType returns the type of the channel, as supplied by the
      +	// client.
      +	ChannelType() string
      +
      +	// ExtraData returns the arbitrary payload for this channel, as supplied
      +	// by the client. This data is specific to the channel type.
      +	ExtraData() []byte
      +}
      +
      +// A Channel is an ordered, reliable, flow-controlled, duplex stream
      +// that is multiplexed over an SSH connection.
      +type Channel interface {
      +	// Read reads up to len(data) bytes from the channel.
      +	Read(data []byte) (int, error)
      +
      +	// Write writes len(data) bytes to the channel.
      +	Write(data []byte) (int, error)
      +
      +	// Close signals end of channel use. No data may be sent after this
      +	// call.
      +	Close() error
      +
      +	// CloseWrite signals the end of sending in-band
      +	// data. Requests may still be sent, and the other side may
      +	// still send data
      +	CloseWrite() error
      +
      +	// SendRequest sends a channel request.  If wantReply is true,
      +	// it will wait for a reply and return the result as a
      +	// boolean, otherwise the return value will be false. Channel
      +	// requests are out-of-band messages so they may be sent even
      +	// if the data stream is closed or blocked by flow control.
      +	SendRequest(name string, wantReply bool, payload []byte) (bool, error)
      +
      +	// Stderr returns an io.ReadWriter that writes to this channel
      +	// with the extended data type set to stderr. Stderr may
      +	// safely be read and written from a different goroutine than
      +	// Read and Write respectively.
      +	Stderr() io.ReadWriter
      +}
      +
      +// Request is a request sent outside of the normal stream of
      +// data. Requests can either be specific to an SSH channel, or they
      +// can be global.
      +type Request struct {
      +	Type      string
      +	WantReply bool
      +	Payload   []byte
      +
      +	ch  *channel
      +	mux *mux
      +}
      +
      +// Reply sends a response to a request. It must be called for all requests
      +// where WantReply is true and is a no-op otherwise. The payload argument is
      +// ignored for replies to channel-specific requests.
      +func (r *Request) Reply(ok bool, payload []byte) error {
      +	if !r.WantReply {
      +		return nil
      +	}
      +
      +	if r.ch == nil {
      +		return r.mux.ackRequest(ok, payload)
      +	}
      +
      +	return r.ch.ackRequest(ok)
      +}
      +
      +// RejectionReason is an enumeration used when rejecting channel creation
      +// requests. See RFC 4254, section 5.1.
      +type RejectionReason uint32
      +
      +const (
      +	Prohibited RejectionReason = iota + 1
      +	ConnectionFailed
      +	UnknownChannelType
      +	ResourceShortage
      +)
      +
      +// String converts the rejection reason to human readable form.
      +func (r RejectionReason) String() string {
      +	switch r {
      +	case Prohibited:
      +		return "administratively prohibited"
      +	case ConnectionFailed:
      +		return "connect failed"
      +	case UnknownChannelType:
      +		return "unknown channel type"
      +	case ResourceShortage:
      +		return "resource shortage"
      +	}
      +	return fmt.Sprintf("unknown reason %d", int(r))
      +}
      +
      +func min(a uint32, b int) uint32 {
      +	if a < uint32(b) {
      +		return a
      +	}
      +	return uint32(b)
      +}
      +
      +type channelDirection uint8
      +
      +const (
      +	channelInbound channelDirection = iota
      +	channelOutbound
      +)
      +
      +// channel is an implementation of the Channel interface that works
      +// with the mux class.
      +type channel struct {
      +	// R/O after creation
      +	chanType          string
      +	extraData         []byte
      +	localId, remoteId uint32
      +
      +	// maxIncomingPayload and maxRemotePayload are the maximum
      +	// payload sizes of normal and extended data packets for
      +	// receiving and sending, respectively. The wire packet will
      +	// be 9 or 13 bytes larger (excluding encryption overhead).
      +	maxIncomingPayload uint32
      +	maxRemotePayload   uint32
      +
      +	mux *mux
      +
      +	// decided is set to true if an accept or reject message has been sent
      +	// (for outbound channels) or received (for inbound channels).
      +	decided bool
      +
      +	// direction contains either channelOutbound, for channels created
      +	// locally, or channelInbound, for channels created by the peer.
      +	direction channelDirection
      +
      +	// Pending internal channel messages.
      +	msg chan interface{}
      +
      +	// Since requests have no ID, there can be only one request
      +	// with WantReply=true outstanding.  This lock is held by a
      +	// goroutine that has such an outgoing request pending.
      +	sentRequestMu sync.Mutex
      +
      +	incomingRequests chan *Request
      +
      +	sentEOF bool
      +
      +	// thread-safe data
      +	remoteWin  window
      +	pending    *buffer
      +	extPending *buffer
      +
      +	// windowMu protects myWindow, the flow-control window.
      +	windowMu sync.Mutex
      +	myWindow uint32
      +
      +	// writeMu serializes calls to mux.conn.writePacket() and
      +	// protects sentClose and packetPool. This mutex must be
      +	// different from windowMu, as writePacket can block if there
      +	// is a key exchange pending.
      +	writeMu   sync.Mutex
      +	sentClose bool
      +
      +	// packetPool has a buffer for each extended channel ID to
      +	// save allocations during writes.
      +	packetPool map[uint32][]byte
      +}
      +
      +// writePacket sends a packet. If the packet is a channel close, it updates
      +// sentClose. This method takes the lock c.writeMu.
      +func (c *channel) writePacket(packet []byte) error {
      +	c.writeMu.Lock()
      +	if c.sentClose {
      +		c.writeMu.Unlock()
      +		return io.EOF
      +	}
      +	c.sentClose = (packet[0] == msgChannelClose)
      +	err := c.mux.conn.writePacket(packet)
      +	c.writeMu.Unlock()
      +	return err
      +}
      +
      +func (c *channel) sendMessage(msg interface{}) error {
      +	if debugMux {
      +		log.Printf("send %d: %#v", c.mux.chanList.offset, msg)
      +	}
      +
      +	p := Marshal(msg)
      +	binary.BigEndian.PutUint32(p[1:], c.remoteId)
      +	return c.writePacket(p)
      +}
      +
      +// WriteExtended writes data to a specific extended stream. These streams are
      +// used, for example, for stderr.
      +func (c *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) {
      +	if c.sentEOF {
      +		return 0, io.EOF
      +	}
      +	// 1 byte message type, 4 bytes remoteId, 4 bytes data length
      +	opCode := byte(msgChannelData)
      +	headerLength := uint32(9)
      +	if extendedCode > 0 {
      +		headerLength += 4
      +		opCode = msgChannelExtendedData
      +	}
      +
      +	c.writeMu.Lock()
      +	packet := c.packetPool[extendedCode]
      +	// We don't remove the buffer from packetPool, so
      +	// WriteExtended calls from different goroutines will be
      +	// flagged as errors by the race detector.
      +	c.writeMu.Unlock()
      +
      +	for len(data) > 0 {
      +		space := min(c.maxRemotePayload, len(data))
      +		if space, err = c.remoteWin.reserve(space); err != nil {
      +			return n, err
      +		}
      +		if want := headerLength + space; uint32(cap(packet)) < want {
      +			packet = make([]byte, want)
      +		} else {
      +			packet = packet[:want]
      +		}
      +
      +		todo := data[:space]
      +
      +		packet[0] = opCode
      +		binary.BigEndian.PutUint32(packet[1:], c.remoteId)
      +		if extendedCode > 0 {
      +			binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode))
      +		}
      +		binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo)))
      +		copy(packet[headerLength:], todo)
      +		if err = c.writePacket(packet); err != nil {
      +			return n, err
      +		}
      +
      +		n += len(todo)
      +		data = data[len(todo):]
      +	}
      +
      +	c.writeMu.Lock()
      +	c.packetPool[extendedCode] = packet
      +	c.writeMu.Unlock()
      +
      +	return n, err
      +}
      +
      +func (c *channel) handleData(packet []byte) error {
      +	headerLen := 9
      +	isExtendedData := packet[0] == msgChannelExtendedData
      +	if isExtendedData {
      +		headerLen = 13
      +	}
      +	if len(packet) < headerLen {
      +		// malformed data packet
      +		return parseError(packet[0])
      +	}
      +
      +	var extended uint32
      +	if isExtendedData {
      +		extended = binary.BigEndian.Uint32(packet[5:])
      +	}
      +
      +	length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen])
      +	if length == 0 {
      +		return nil
      +	}
      +	if length > c.maxIncomingPayload {
      +		// TODO(hanwen): should send Disconnect?
      +		return errors.New("ssh: incoming packet exceeds maximum payload size")
      +	}
      +
      +	data := packet[headerLen:]
      +	if length != uint32(len(data)) {
      +		return errors.New("ssh: wrong packet length")
      +	}
      +
      +	c.windowMu.Lock()
      +	if c.myWindow < length {
      +		c.windowMu.Unlock()
      +		// TODO(hanwen): should send Disconnect with reason?
      +		return errors.New("ssh: remote side wrote too much")
      +	}
      +	c.myWindow -= length
      +	c.windowMu.Unlock()
      +
      +	if extended == 1 {
      +		c.extPending.write(data)
      +	} else if extended > 0 {
      +		// discard other extended data.
      +	} else {
      +		c.pending.write(data)
      +	}
      +	return nil
      +}
      +
      +func (c *channel) adjustWindow(n uint32) error {
      +	c.windowMu.Lock()
      +	// Since myWindow is managed on our side, and can never exceed
      +	// the initial window setting, we don't worry about overflow.
      +	c.myWindow += uint32(n)
      +	c.windowMu.Unlock()
      +	return c.sendMessage(windowAdjustMsg{
      +		AdditionalBytes: uint32(n),
      +	})
      +}
      +
      +func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) {
      +	switch extended {
      +	case 1:
      +		n, err = c.extPending.Read(data)
      +	case 0:
      +		n, err = c.pending.Read(data)
      +	default:
      +		return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended)
      +	}
      +
      +	if n > 0 {
      +		err = c.adjustWindow(uint32(n))
      +		// sendWindowAdjust can return io.EOF if the remote
      +		// peer has closed the connection, however we want to
      +		// defer forwarding io.EOF to the caller of Read until
      +		// the buffer has been drained.
      +		if n > 0 && err == io.EOF {
      +			err = nil
      +		}
      +	}
      +
      +	return n, err
      +}
      +
      +func (c *channel) close() {
      +	c.pending.eof()
      +	c.extPending.eof()
      +	close(c.msg)
      +	close(c.incomingRequests)
      +	c.writeMu.Lock()
      +	// This is not necesary for a normal channel teardown, but if
      +	// there was another error, it is.
      +	c.sentClose = true
      +	c.writeMu.Unlock()
      +	// Unblock writers.
      +	c.remoteWin.close()
      +}
      +
      +// responseMessageReceived is called when a success or failure message is
      +// received on a channel to check that such a message is reasonable for the
      +// given channel.
      +func (c *channel) responseMessageReceived() error {
      +	if c.direction == channelInbound {
      +		return errors.New("ssh: channel response message received on inbound channel")
      +	}
      +	if c.decided {
      +		return errors.New("ssh: duplicate response received for channel")
      +	}
      +	c.decided = true
      +	return nil
      +}
      +
      +func (c *channel) handlePacket(packet []byte) error {
      +	switch packet[0] {
      +	case msgChannelData, msgChannelExtendedData:
      +		return c.handleData(packet)
      +	case msgChannelClose:
      +		c.sendMessage(channelCloseMsg{PeersId: c.remoteId})
      +		c.mux.chanList.remove(c.localId)
      +		c.close()
      +		return nil
      +	case msgChannelEOF:
      +		// RFC 4254 is mute on how EOF affects dataExt messages but
      +		// it is logical to signal EOF at the same time.
      +		c.extPending.eof()
      +		c.pending.eof()
      +		return nil
      +	}
      +
      +	decoded, err := decode(packet)
      +	if err != nil {
      +		return err
      +	}
      +
      +	switch msg := decoded.(type) {
      +	case *channelOpenFailureMsg:
      +		if err := c.responseMessageReceived(); err != nil {
      +			return err
      +		}
      +		c.mux.chanList.remove(msg.PeersId)
      +		c.msg <- msg
      +	case *channelOpenConfirmMsg:
      +		if err := c.responseMessageReceived(); err != nil {
      +			return err
      +		}
      +		if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
      +			return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize)
      +		}
      +		c.remoteId = msg.MyId
      +		c.maxRemotePayload = msg.MaxPacketSize
      +		c.remoteWin.add(msg.MyWindow)
      +		c.msg <- msg
      +	case *windowAdjustMsg:
      +		if !c.remoteWin.add(msg.AdditionalBytes) {
      +			return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes)
      +		}
      +	case *channelRequestMsg:
      +		req := Request{
      +			Type:      msg.Request,
      +			WantReply: msg.WantReply,
      +			Payload:   msg.RequestSpecificData,
      +			ch:        c,
      +		}
      +
      +		c.incomingRequests <- &req
      +	default:
      +		c.msg <- msg
      +	}
      +	return nil
      +}
      +
      +func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel {
      +	ch := &channel{
      +		remoteWin:        window{Cond: newCond()},
      +		myWindow:         channelWindowSize,
      +		pending:          newBuffer(),
      +		extPending:       newBuffer(),
      +		direction:        direction,
      +		incomingRequests: make(chan *Request, 16),
      +		msg:              make(chan interface{}, 16),
      +		chanType:         chanType,
      +		extraData:        extraData,
      +		mux:              m,
      +		packetPool:       make(map[uint32][]byte),
      +	}
      +	ch.localId = m.chanList.add(ch)
      +	return ch
      +}
      +
      +var errUndecided = errors.New("ssh: must Accept or Reject channel")
      +var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once")
      +
      +type extChannel struct {
      +	code uint32
      +	ch   *channel
      +}
      +
      +func (e *extChannel) Write(data []byte) (n int, err error) {
      +	return e.ch.WriteExtended(data, e.code)
      +}
      +
      +func (e *extChannel) Read(data []byte) (n int, err error) {
      +	return e.ch.ReadExtended(data, e.code)
      +}
      +
      +func (c *channel) Accept() (Channel, <-chan *Request, error) {
      +	if c.decided {
      +		return nil, nil, errDecidedAlready
      +	}
      +	c.maxIncomingPayload = channelMaxPacket
      +	confirm := channelOpenConfirmMsg{
      +		PeersId:       c.remoteId,
      +		MyId:          c.localId,
      +		MyWindow:      c.myWindow,
      +		MaxPacketSize: c.maxIncomingPayload,
      +	}
      +	c.decided = true
      +	if err := c.sendMessage(confirm); err != nil {
      +		return nil, nil, err
      +	}
      +
      +	return c, c.incomingRequests, nil
      +}
      +
      +func (ch *channel) Reject(reason RejectionReason, message string) error {
      +	if ch.decided {
      +		return errDecidedAlready
      +	}
      +	reject := channelOpenFailureMsg{
      +		PeersId:  ch.remoteId,
      +		Reason:   reason,
      +		Message:  message,
      +		Language: "en",
      +	}
      +	ch.decided = true
      +	return ch.sendMessage(reject)
      +}
      +
      +func (ch *channel) Read(data []byte) (int, error) {
      +	if !ch.decided {
      +		return 0, errUndecided
      +	}
      +	return ch.ReadExtended(data, 0)
      +}
      +
      +func (ch *channel) Write(data []byte) (int, error) {
      +	if !ch.decided {
      +		return 0, errUndecided
      +	}
      +	return ch.WriteExtended(data, 0)
      +}
      +
      +func (ch *channel) CloseWrite() error {
      +	if !ch.decided {
      +		return errUndecided
      +	}
      +	ch.sentEOF = true
      +	return ch.sendMessage(channelEOFMsg{
      +		PeersId: ch.remoteId})
      +}
      +
      +func (ch *channel) Close() error {
      +	if !ch.decided {
      +		return errUndecided
      +	}
      +
      +	return ch.sendMessage(channelCloseMsg{
      +		PeersId: ch.remoteId})
      +}
      +
      +// Extended returns an io.ReadWriter that sends and receives data on the given,
      +// SSH extended stream. Such streams are used, for example, for stderr.
      +func (ch *channel) Extended(code uint32) io.ReadWriter {
      +	if !ch.decided {
      +		return nil
      +	}
      +	return &extChannel{code, ch}
      +}
      +
      +func (ch *channel) Stderr() io.ReadWriter {
      +	return ch.Extended(1)
      +}
      +
      +func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) {
      +	if !ch.decided {
      +		return false, errUndecided
      +	}
      +
      +	if wantReply {
      +		ch.sentRequestMu.Lock()
      +		defer ch.sentRequestMu.Unlock()
      +	}
      +
      +	msg := channelRequestMsg{
      +		PeersId:             ch.remoteId,
      +		Request:             name,
      +		WantReply:           wantReply,
      +		RequestSpecificData: payload,
      +	}
      +
      +	if err := ch.sendMessage(msg); err != nil {
      +		return false, err
      +	}
      +
      +	if wantReply {
      +		m, ok := (<-ch.msg)
      +		if !ok {
      +			return false, io.EOF
      +		}
      +		switch m.(type) {
      +		case *channelRequestFailureMsg:
      +			return false, nil
      +		case *channelRequestSuccessMsg:
      +			return true, nil
      +		default:
      +			return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m)
      +		}
      +	}
      +
      +	return false, nil
      +}
      +
      +// ackRequest either sends an ack or nack to the channel request.
      +func (ch *channel) ackRequest(ok bool) error {
      +	if !ch.decided {
      +		return errUndecided
      +	}
      +
      +	var msg interface{}
      +	if !ok {
      +		msg = channelRequestFailureMsg{
      +			PeersId: ch.remoteId,
      +		}
      +	} else {
      +		msg = channelRequestSuccessMsg{
      +			PeersId: ch.remoteId,
      +		}
      +	}
      +	return ch.sendMessage(msg)
      +}
      +
      +func (ch *channel) ChannelType() string {
      +	return ch.chanType
      +}
      +
      +func (ch *channel) ExtraData() []byte {
      +	return ch.extraData
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go
      new file mode 100644
      index 00000000..2732963f
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/cipher.go
      @@ -0,0 +1,552 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"crypto/aes"
      +	"crypto/cipher"
      +	"crypto/rc4"
      +	"crypto/subtle"
      +	"encoding/binary"
      +	"errors"
      +	"fmt"
      +	"hash"
      +	"io"
      +	"io/ioutil"
      +)
      +
      +const (
      +	packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher.
      +
      +	// RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations
      +	// MUST be able to process (plus a few more kilobytes for padding and mac). The RFC
      +	// indicates implementations SHOULD be able to handle larger packet sizes, but then
      +	// waffles on about reasonable limits.
      +	//
      +	// OpenSSH caps their maxPacket at 256kB so we choose to do
      +	// the same. maxPacket is also used to ensure that uint32
      +	// length fields do not overflow, so it should remain well
      +	// below 4G.
      +	maxPacket = 256 * 1024
      +)
      +
      +// noneCipher implements cipher.Stream and provides no encryption. It is used
      +// by the transport before the first key-exchange.
      +type noneCipher struct{}
      +
      +func (c noneCipher) XORKeyStream(dst, src []byte) {
      +	copy(dst, src)
      +}
      +
      +func newAESCTR(key, iv []byte) (cipher.Stream, error) {
      +	c, err := aes.NewCipher(key)
      +	if err != nil {
      +		return nil, err
      +	}
      +	return cipher.NewCTR(c, iv), nil
      +}
      +
      +func newRC4(key, iv []byte) (cipher.Stream, error) {
      +	return rc4.NewCipher(key)
      +}
      +
      +type streamCipherMode struct {
      +	keySize    int
      +	ivSize     int
      +	skip       int
      +	createFunc func(key, iv []byte) (cipher.Stream, error)
      +}
      +
      +func (c *streamCipherMode) createStream(key, iv []byte) (cipher.Stream, error) {
      +	if len(key) < c.keySize {
      +		panic("ssh: key length too small for cipher")
      +	}
      +	if len(iv) < c.ivSize {
      +		panic("ssh: iv too small for cipher")
      +	}
      +
      +	stream, err := c.createFunc(key[:c.keySize], iv[:c.ivSize])
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	var streamDump []byte
      +	if c.skip > 0 {
      +		streamDump = make([]byte, 512)
      +	}
      +
      +	for remainingToDump := c.skip; remainingToDump > 0; {
      +		dumpThisTime := remainingToDump
      +		if dumpThisTime > len(streamDump) {
      +			dumpThisTime = len(streamDump)
      +		}
      +		stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime])
      +		remainingToDump -= dumpThisTime
      +	}
      +
      +	return stream, nil
      +}
      +
      +// cipherModes documents properties of supported ciphers. Ciphers not included
      +// are not supported and will not be negotiated, even if explicitly requested in
      +// ClientConfig.Crypto.Ciphers.
      +var cipherModes = map[string]*streamCipherMode{
      +	// Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms
      +	// are defined in the order specified in the RFC.
      +	"aes128-ctr": {16, aes.BlockSize, 0, newAESCTR},
      +	"aes192-ctr": {24, aes.BlockSize, 0, newAESCTR},
      +	"aes256-ctr": {32, aes.BlockSize, 0, newAESCTR},
      +
      +	// Ciphers from RFC4345, which introduces security-improved arcfour ciphers.
      +	// They are defined in the order specified in the RFC.
      +	"arcfour128": {16, 0, 1536, newRC4},
      +	"arcfour256": {32, 0, 1536, newRC4},
      +
      +	// Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol.
      +	// Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and
      +	// RC4) has problems with weak keys, and should be used with caution."
      +	// RFC4345 introduces improved versions of Arcfour.
      +	"arcfour": {16, 0, 0, newRC4},
      +
      +	// AES-GCM is not a stream cipher, so it is constructed with a
      +	// special case. If we add any more non-stream ciphers, we
      +	// should invest a cleaner way to do this.
      +	gcmCipherID: {16, 12, 0, nil},
      +
      +	// CBC mode is insecure and so is not included in the default config.
      +	// (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely
      +	// needed, it's possible to specify a custom Config to enable it.
      +	// You should expect that an active attacker can recover plaintext if
      +	// you do.
      +	aes128cbcID: {16, aes.BlockSize, 0, nil},
      +}
      +
      +// prefixLen is the length of the packet prefix that contains the packet length
      +// and number of padding bytes.
      +const prefixLen = 5
      +
      +// streamPacketCipher is a packetCipher using a stream cipher.
      +type streamPacketCipher struct {
      +	mac    hash.Hash
      +	cipher cipher.Stream
      +
      +	// The following members are to avoid per-packet allocations.
      +	prefix      [prefixLen]byte
      +	seqNumBytes [4]byte
      +	padding     [2 * packetSizeMultiple]byte
      +	packetData  []byte
      +	macResult   []byte
      +}
      +
      +// readPacket reads and decrypt a single packet from the reader argument.
      +func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
      +	if _, err := io.ReadFull(r, s.prefix[:]); err != nil {
      +		return nil, err
      +	}
      +
      +	s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
      +	length := binary.BigEndian.Uint32(s.prefix[0:4])
      +	paddingLength := uint32(s.prefix[4])
      +
      +	var macSize uint32
      +	if s.mac != nil {
      +		s.mac.Reset()
      +		binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
      +		s.mac.Write(s.seqNumBytes[:])
      +		s.mac.Write(s.prefix[:])
      +		macSize = uint32(s.mac.Size())
      +	}
      +
      +	if length <= paddingLength+1 {
      +		return nil, errors.New("ssh: invalid packet length, packet too small")
      +	}
      +
      +	if length > maxPacket {
      +		return nil, errors.New("ssh: invalid packet length, packet too large")
      +	}
      +
      +	// the maxPacket check above ensures that length-1+macSize
      +	// does not overflow.
      +	if uint32(cap(s.packetData)) < length-1+macSize {
      +		s.packetData = make([]byte, length-1+macSize)
      +	} else {
      +		s.packetData = s.packetData[:length-1+macSize]
      +	}
      +
      +	if _, err := io.ReadFull(r, s.packetData); err != nil {
      +		return nil, err
      +	}
      +	mac := s.packetData[length-1:]
      +	data := s.packetData[:length-1]
      +	s.cipher.XORKeyStream(data, data)
      +
      +	if s.mac != nil {
      +		s.mac.Write(data)
      +		s.macResult = s.mac.Sum(s.macResult[:0])
      +		if subtle.ConstantTimeCompare(s.macResult, mac) != 1 {
      +			return nil, errors.New("ssh: MAC failure")
      +		}
      +	}
      +
      +	return s.packetData[:length-paddingLength-1], nil
      +}
      +
      +// writePacket encrypts and sends a packet of data to the writer argument
      +func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
      +	if len(packet) > maxPacket {
      +		return errors.New("ssh: packet too large")
      +	}
      +
      +	paddingLength := packetSizeMultiple - (prefixLen+len(packet))%packetSizeMultiple
      +	if paddingLength < 4 {
      +		paddingLength += packetSizeMultiple
      +	}
      +
      +	length := len(packet) + 1 + paddingLength
      +	binary.BigEndian.PutUint32(s.prefix[:], uint32(length))
      +	s.prefix[4] = byte(paddingLength)
      +	padding := s.padding[:paddingLength]
      +	if _, err := io.ReadFull(rand, padding); err != nil {
      +		return err
      +	}
      +
      +	if s.mac != nil {
      +		s.mac.Reset()
      +		binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
      +		s.mac.Write(s.seqNumBytes[:])
      +		s.mac.Write(s.prefix[:])
      +		s.mac.Write(packet)
      +		s.mac.Write(padding)
      +	}
      +
      +	s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
      +	s.cipher.XORKeyStream(packet, packet)
      +	s.cipher.XORKeyStream(padding, padding)
      +
      +	if _, err := w.Write(s.prefix[:]); err != nil {
      +		return err
      +	}
      +	if _, err := w.Write(packet); err != nil {
      +		return err
      +	}
      +	if _, err := w.Write(padding); err != nil {
      +		return err
      +	}
      +
      +	if s.mac != nil {
      +		s.macResult = s.mac.Sum(s.macResult[:0])
      +		if _, err := w.Write(s.macResult); err != nil {
      +			return err
      +		}
      +	}
      +
      +	return nil
      +}
      +
      +type gcmCipher struct {
      +	aead   cipher.AEAD
      +	prefix [4]byte
      +	iv     []byte
      +	buf    []byte
      +}
      +
      +func newGCMCipher(iv, key, macKey []byte) (packetCipher, error) {
      +	c, err := aes.NewCipher(key)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	aead, err := cipher.NewGCM(c)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	return &gcmCipher{
      +		aead: aead,
      +		iv:   iv,
      +	}, nil
      +}
      +
      +const gcmTagSize = 16
      +
      +func (c *gcmCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
      +	// Pad out to multiple of 16 bytes. This is different from the
      +	// stream cipher because that encrypts the length too.
      +	padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple)
      +	if padding < 4 {
      +		padding += packetSizeMultiple
      +	}
      +
      +	length := uint32(len(packet) + int(padding) + 1)
      +	binary.BigEndian.PutUint32(c.prefix[:], length)
      +	if _, err := w.Write(c.prefix[:]); err != nil {
      +		return err
      +	}
      +
      +	if cap(c.buf) < int(length) {
      +		c.buf = make([]byte, length)
      +	} else {
      +		c.buf = c.buf[:length]
      +	}
      +
      +	c.buf[0] = padding
      +	copy(c.buf[1:], packet)
      +	if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil {
      +		return err
      +	}
      +	c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:])
      +	if _, err := w.Write(c.buf); err != nil {
      +		return err
      +	}
      +	c.incIV()
      +
      +	return nil
      +}
      +
      +func (c *gcmCipher) incIV() {
      +	for i := 4 + 7; i >= 4; i-- {
      +		c.iv[i]++
      +		if c.iv[i] != 0 {
      +			break
      +		}
      +	}
      +}
      +
      +func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
      +	if _, err := io.ReadFull(r, c.prefix[:]); err != nil {
      +		return nil, err
      +	}
      +	length := binary.BigEndian.Uint32(c.prefix[:])
      +	if length > maxPacket {
      +		return nil, errors.New("ssh: max packet length exceeded.")
      +	}
      +
      +	if cap(c.buf) < int(length+gcmTagSize) {
      +		c.buf = make([]byte, length+gcmTagSize)
      +	} else {
      +		c.buf = c.buf[:length+gcmTagSize]
      +	}
      +
      +	if _, err := io.ReadFull(r, c.buf); err != nil {
      +		return nil, err
      +	}
      +
      +	plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:])
      +	if err != nil {
      +		return nil, err
      +	}
      +	c.incIV()
      +
      +	padding := plain[0]
      +	if padding < 4 || padding >= 20 {
      +		return nil, fmt.Errorf("ssh: illegal padding %d", padding)
      +	}
      +
      +	if int(padding+1) >= len(plain) {
      +		return nil, fmt.Errorf("ssh: padding %d too large", padding)
      +	}
      +	plain = plain[1 : length-uint32(padding)]
      +	return plain, nil
      +}
      +
      +// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1
      +type cbcCipher struct {
      +	mac       hash.Hash
      +	macSize   uint32
      +	decrypter cipher.BlockMode
      +	encrypter cipher.BlockMode
      +
      +	// The following members are to avoid per-packet allocations.
      +	seqNumBytes [4]byte
      +	packetData  []byte
      +	macResult   []byte
      +
      +	// Amount of data we should still read to hide which
      +	// verification error triggered.
      +	oracleCamouflage uint32
      +}
      +
      +func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
      +	c, err := aes.NewCipher(key)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	cbc := &cbcCipher{
      +		mac:        macModes[algs.MAC].new(macKey),
      +		decrypter:  cipher.NewCBCDecrypter(c, iv),
      +		encrypter:  cipher.NewCBCEncrypter(c, iv),
      +		packetData: make([]byte, 1024),
      +	}
      +	if cbc.mac != nil {
      +		cbc.macSize = uint32(cbc.mac.Size())
      +	}
      +
      +	return cbc, nil
      +}
      +
      +func maxUInt32(a, b int) uint32 {
      +	if a > b {
      +		return uint32(a)
      +	}
      +	return uint32(b)
      +}
      +
      +const (
      +	cbcMinPacketSizeMultiple = 8
      +	cbcMinPacketSize         = 16
      +	cbcMinPaddingSize        = 4
      +)
      +
      +// cbcError represents a verification error that may leak information.
      +type cbcError string
      +
      +func (e cbcError) Error() string { return string(e) }
      +
      +func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
      +	p, err := c.readPacketLeaky(seqNum, r)
      +	if err != nil {
      +		if _, ok := err.(cbcError); ok {
      +			// Verification error: read a fixed amount of
      +			// data, to make distinguishing between
      +			// failing MAC and failing length check more
      +			// difficult.
      +			io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage))
      +		}
      +	}
      +	return p, err
      +}
      +
      +func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) {
      +	blockSize := c.decrypter.BlockSize()
      +
      +	// Read the header, which will include some of the subsequent data in the
      +	// case of block ciphers - this is copied back to the payload later.
      +	// How many bytes of payload/padding will be read with this first read.
      +	firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize)
      +	firstBlock := c.packetData[:firstBlockLength]
      +	if _, err := io.ReadFull(r, firstBlock); err != nil {
      +		return nil, err
      +	}
      +
      +	c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength
      +
      +	c.decrypter.CryptBlocks(firstBlock, firstBlock)
      +	length := binary.BigEndian.Uint32(firstBlock[:4])
      +	if length > maxPacket {
      +		return nil, cbcError("ssh: packet too large")
      +	}
      +	if length+4 < maxUInt32(cbcMinPacketSize, blockSize) {
      +		// The minimum size of a packet is 16 (or the cipher block size, whichever
      +		// is larger) bytes.
      +		return nil, cbcError("ssh: packet too small")
      +	}
      +	// The length of the packet (including the length field but not the MAC) must
      +	// be a multiple of the block size or 8, whichever is larger.
      +	if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 {
      +		return nil, cbcError("ssh: invalid packet length multiple")
      +	}
      +
      +	paddingLength := uint32(firstBlock[4])
      +	if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 {
      +		return nil, cbcError("ssh: invalid packet length")
      +	}
      +
      +	// Positions within the c.packetData buffer:
      +	macStart := 4 + length
      +	paddingStart := macStart - paddingLength
      +
      +	// Entire packet size, starting before length, ending at end of mac.
      +	entirePacketSize := macStart + c.macSize
      +
      +	// Ensure c.packetData is large enough for the entire packet data.
      +	if uint32(cap(c.packetData)) < entirePacketSize {
      +		// Still need to upsize and copy, but this should be rare at runtime, only
      +		// on upsizing the packetData buffer.
      +		c.packetData = make([]byte, entirePacketSize)
      +		copy(c.packetData, firstBlock)
      +	} else {
      +		c.packetData = c.packetData[:entirePacketSize]
      +	}
      +
      +	if n, err := io.ReadFull(r, c.packetData[firstBlockLength:]); err != nil {
      +		return nil, err
      +	} else {
      +		c.oracleCamouflage -= uint32(n)
      +	}
      +
      +	remainingCrypted := c.packetData[firstBlockLength:macStart]
      +	c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted)
      +
      +	mac := c.packetData[macStart:]
      +	if c.mac != nil {
      +		c.mac.Reset()
      +		binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum)
      +		c.mac.Write(c.seqNumBytes[:])
      +		c.mac.Write(c.packetData[:macStart])
      +		c.macResult = c.mac.Sum(c.macResult[:0])
      +		if subtle.ConstantTimeCompare(c.macResult, mac) != 1 {
      +			return nil, cbcError("ssh: MAC failure")
      +		}
      +	}
      +
      +	return c.packetData[prefixLen:paddingStart], nil
      +}
      +
      +func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
      +	effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize())
      +
      +	// Length of encrypted portion of the packet (header, payload, padding).
      +	// Enforce minimum padding and packet size.
      +	encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize)
      +	// Enforce block size.
      +	encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize
      +
      +	length := encLength - 4
      +	paddingLength := int(length) - (1 + len(packet))
      +
      +	// Overall buffer contains: header, payload, padding, mac.
      +	// Space for the MAC is reserved in the capacity but not the slice length.
      +	bufferSize := encLength + c.macSize
      +	if uint32(cap(c.packetData)) < bufferSize {
      +		c.packetData = make([]byte, encLength, bufferSize)
      +	} else {
      +		c.packetData = c.packetData[:encLength]
      +	}
      +
      +	p := c.packetData
      +
      +	// Packet header.
      +	binary.BigEndian.PutUint32(p, length)
      +	p = p[4:]
      +	p[0] = byte(paddingLength)
      +
      +	// Payload.
      +	p = p[1:]
      +	copy(p, packet)
      +
      +	// Padding.
      +	p = p[len(packet):]
      +	if _, err := io.ReadFull(rand, p); err != nil {
      +		return err
      +	}
      +
      +	if c.mac != nil {
      +		c.mac.Reset()
      +		binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum)
      +		c.mac.Write(c.seqNumBytes[:])
      +		c.mac.Write(c.packetData)
      +		// The MAC is now appended into the capacity reserved for it earlier.
      +		c.packetData = c.mac.Sum(c.packetData)
      +	}
      +
      +	c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength])
      +
      +	if _, err := w.Write(c.packetData); err != nil {
      +		return err
      +	}
      +
      +	return nil
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/cipher_test.go b/vendor/golang.org/x/crypto/ssh/cipher_test.go
      new file mode 100644
      index 00000000..54b92b6e
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/cipher_test.go
      @@ -0,0 +1,127 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"bytes"
      +	"crypto"
      +	"crypto/aes"
      +	"crypto/rand"
      +	"testing"
      +)
      +
      +func TestDefaultCiphersExist(t *testing.T) {
      +	for _, cipherAlgo := range supportedCiphers {
      +		if _, ok := cipherModes[cipherAlgo]; !ok {
      +			t.Errorf("default cipher %q is unknown", cipherAlgo)
      +		}
      +	}
      +}
      +
      +func TestPacketCiphers(t *testing.T) {
      +	// Still test aes128cbc cipher althought it's commented out.
      +	cipherModes[aes128cbcID] = &streamCipherMode{16, aes.BlockSize, 0, nil}
      +	defer delete(cipherModes, aes128cbcID)
      +
      +	for cipher := range cipherModes {
      +		kr := &kexResult{Hash: crypto.SHA1}
      +		algs := directionAlgorithms{
      +			Cipher:      cipher,
      +			MAC:         "hmac-sha1",
      +			Compression: "none",
      +		}
      +		client, err := newPacketCipher(clientKeys, algs, kr)
      +		if err != nil {
      +			t.Errorf("newPacketCipher(client, %q): %v", cipher, err)
      +			continue
      +		}
      +		server, err := newPacketCipher(clientKeys, algs, kr)
      +		if err != nil {
      +			t.Errorf("newPacketCipher(client, %q): %v", cipher, err)
      +			continue
      +		}
      +
      +		want := "bla bla"
      +		input := []byte(want)
      +		buf := &bytes.Buffer{}
      +		if err := client.writePacket(0, buf, rand.Reader, input); err != nil {
      +			t.Errorf("writePacket(%q): %v", cipher, err)
      +			continue
      +		}
      +
      +		packet, err := server.readPacket(0, buf)
      +		if err != nil {
      +			t.Errorf("readPacket(%q): %v", cipher, err)
      +			continue
      +		}
      +
      +		if string(packet) != want {
      +			t.Errorf("roundtrip(%q): got %q, want %q", cipher, packet, want)
      +		}
      +	}
      +}
      +
      +func TestCBCOracleCounterMeasure(t *testing.T) {
      +	cipherModes[aes128cbcID] = &streamCipherMode{16, aes.BlockSize, 0, nil}
      +	defer delete(cipherModes, aes128cbcID)
      +
      +	kr := &kexResult{Hash: crypto.SHA1}
      +	algs := directionAlgorithms{
      +		Cipher:      aes128cbcID,
      +		MAC:         "hmac-sha1",
      +		Compression: "none",
      +	}
      +	client, err := newPacketCipher(clientKeys, algs, kr)
      +	if err != nil {
      +		t.Fatalf("newPacketCipher(client): %v", err)
      +	}
      +
      +	want := "bla bla"
      +	input := []byte(want)
      +	buf := &bytes.Buffer{}
      +	if err := client.writePacket(0, buf, rand.Reader, input); err != nil {
      +		t.Errorf("writePacket: %v", err)
      +	}
      +
      +	packetSize := buf.Len()
      +	buf.Write(make([]byte, 2*maxPacket))
      +
      +	// We corrupt each byte, but this usually will only test the
      +	// 'packet too large' or 'MAC failure' cases.
      +	lastRead := -1
      +	for i := 0; i < packetSize; i++ {
      +		server, err := newPacketCipher(clientKeys, algs, kr)
      +		if err != nil {
      +			t.Fatalf("newPacketCipher(client): %v", err)
      +		}
      +
      +		fresh := &bytes.Buffer{}
      +		fresh.Write(buf.Bytes())
      +		fresh.Bytes()[i] ^= 0x01
      +
      +		before := fresh.Len()
      +		_, err = server.readPacket(0, fresh)
      +		if err == nil {
      +			t.Errorf("corrupt byte %d: readPacket succeeded ", i)
      +			continue
      +		}
      +		if _, ok := err.(cbcError); !ok {
      +			t.Errorf("corrupt byte %d: got %v (%T), want cbcError", i, err, err)
      +			continue
      +		}
      +
      +		after := fresh.Len()
      +		bytesRead := before - after
      +		if bytesRead < maxPacket {
      +			t.Errorf("corrupt byte %d: read %d bytes, want more than %d", i, bytesRead, maxPacket)
      +			continue
      +		}
      +
      +		if i > 0 && bytesRead != lastRead {
      +			t.Errorf("corrupt byte %d: read %d bytes, want %d bytes read", i, bytesRead, lastRead)
      +		}
      +		lastRead = bytesRead
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go
      new file mode 100644
      index 00000000..0b9fbe50
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/client.go
      @@ -0,0 +1,213 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"errors"
      +	"fmt"
      +	"net"
      +	"sync"
      +)
      +
      +// Client implements a traditional SSH client that supports shells,
      +// subprocesses, port forwarding and tunneled dialing.
      +type Client struct {
      +	Conn
      +
      +	forwards        forwardList // forwarded tcpip connections from the remote side
      +	mu              sync.Mutex
      +	channelHandlers map[string]chan NewChannel
      +}
      +
      +// HandleChannelOpen returns a channel on which NewChannel requests
      +// for the given type are sent. If the type already is being handled,
      +// nil is returned. The channel is closed when the connection is closed.
      +func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel {
      +	c.mu.Lock()
      +	defer c.mu.Unlock()
      +	if c.channelHandlers == nil {
      +		// The SSH channel has been closed.
      +		c := make(chan NewChannel)
      +		close(c)
      +		return c
      +	}
      +
      +	ch := c.channelHandlers[channelType]
      +	if ch != nil {
      +		return nil
      +	}
      +
      +	ch = make(chan NewChannel, 16)
      +	c.channelHandlers[channelType] = ch
      +	return ch
      +}
      +
      +// NewClient creates a Client on top of the given connection.
      +func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client {
      +	conn := &Client{
      +		Conn:            c,
      +		channelHandlers: make(map[string]chan NewChannel, 1),
      +	}
      +
      +	go conn.handleGlobalRequests(reqs)
      +	go conn.handleChannelOpens(chans)
      +	go func() {
      +		conn.Wait()
      +		conn.forwards.closeAll()
      +	}()
      +	go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip"))
      +	return conn
      +}
      +
      +// NewClientConn establishes an authenticated SSH connection using c
      +// as the underlying transport.  The Request and NewChannel channels
      +// must be serviced or the connection will hang.
      +func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) {
      +	fullConf := *config
      +	fullConf.SetDefaults()
      +	conn := &connection{
      +		sshConn: sshConn{conn: c},
      +	}
      +
      +	if err := conn.clientHandshake(addr, &fullConf); err != nil {
      +		c.Close()
      +		return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err)
      +	}
      +	conn.mux = newMux(conn.transport)
      +	return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil
      +}
      +
      +// clientHandshake performs the client side key exchange. See RFC 4253 Section
      +// 7.
      +func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error {
      +	if config.ClientVersion != "" {
      +		c.clientVersion = []byte(config.ClientVersion)
      +	} else {
      +		c.clientVersion = []byte(packageVersion)
      +	}
      +	var err error
      +	c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion)
      +	if err != nil {
      +		return err
      +	}
      +
      +	c.transport = newClientTransport(
      +		newTransport(c.sshConn.conn, config.Rand, true /* is client */),
      +		c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr())
      +	if err := c.transport.requestKeyChange(); err != nil {
      +		return err
      +	}
      +
      +	if packet, err := c.transport.readPacket(); err != nil {
      +		return err
      +	} else if packet[0] != msgNewKeys {
      +		return unexpectedMessageError(msgNewKeys, packet[0])
      +	}
      +
      +	// We just did the key change, so the session ID is established.
      +	c.sessionID = c.transport.getSessionID()
      +
      +	return c.clientAuthenticate(config)
      +}
      +
      +// verifyHostKeySignature verifies the host key obtained in the key
      +// exchange.
      +func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error {
      +	sig, rest, ok := parseSignatureBody(result.Signature)
      +	if len(rest) > 0 || !ok {
      +		return errors.New("ssh: signature parse error")
      +	}
      +
      +	return hostKey.Verify(result.H, sig)
      +}
      +
      +// NewSession opens a new Session for this client. (A session is a remote
      +// execution of a program.)
      +func (c *Client) NewSession() (*Session, error) {
      +	ch, in, err := c.OpenChannel("session", nil)
      +	if err != nil {
      +		return nil, err
      +	}
      +	return newSession(ch, in)
      +}
      +
      +func (c *Client) handleGlobalRequests(incoming <-chan *Request) {
      +	for r := range incoming {
      +		// This handles keepalive messages and matches
      +		// the behaviour of OpenSSH.
      +		r.Reply(false, nil)
      +	}
      +}
      +
      +// handleChannelOpens channel open messages from the remote side.
      +func (c *Client) handleChannelOpens(in <-chan NewChannel) {
      +	for ch := range in {
      +		c.mu.Lock()
      +		handler := c.channelHandlers[ch.ChannelType()]
      +		c.mu.Unlock()
      +
      +		if handler != nil {
      +			handler <- ch
      +		} else {
      +			ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType()))
      +		}
      +	}
      +
      +	c.mu.Lock()
      +	for _, ch := range c.channelHandlers {
      +		close(ch)
      +	}
      +	c.channelHandlers = nil
      +	c.mu.Unlock()
      +}
      +
      +// Dial starts a client connection to the given SSH server. It is a
      +// convenience function that connects to the given network address,
      +// initiates the SSH handshake, and then sets up a Client.  For access
      +// to incoming channels and requests, use net.Dial with NewClientConn
      +// instead.
      +func Dial(network, addr string, config *ClientConfig) (*Client, error) {
      +	conn, err := net.Dial(network, addr)
      +	if err != nil {
      +		return nil, err
      +	}
      +	c, chans, reqs, err := NewClientConn(conn, addr, config)
      +	if err != nil {
      +		return nil, err
      +	}
      +	return NewClient(c, chans, reqs), nil
      +}
      +
      +// A ClientConfig structure is used to configure a Client. It must not be
      +// modified after having been passed to an SSH function.
      +type ClientConfig struct {
      +	// Config contains configuration that is shared between clients and
      +	// servers.
      +	Config
      +
      +	// User contains the username to authenticate as.
      +	User string
      +
      +	// Auth contains possible authentication methods to use with the
      +	// server. Only the first instance of a particular RFC 4252 method will
      +	// be used during authentication.
      +	Auth []AuthMethod
      +
      +	// HostKeyCallback, if not nil, is called during the cryptographic
      +	// handshake to validate the server's host key. A nil HostKeyCallback
      +	// implies that all host keys are accepted.
      +	HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
      +
      +	// ClientVersion contains the version identification string that will
      +	// be used for the connection. If empty, a reasonable default is used.
      +	ClientVersion string
      +
      +	// HostKeyAlgorithms lists the key types that the client will
      +	// accept from the server as host key, in order of
      +	// preference. If empty, a reasonable default is used. Any
      +	// string returned from PublicKey.Type method may be used, or
      +	// any of the CertAlgoXxxx and KeyAlgoXxxx constants.
      +	HostKeyAlgorithms []string
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go
      new file mode 100644
      index 00000000..e15be3ef
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go
      @@ -0,0 +1,441 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"bytes"
      +	"errors"
      +	"fmt"
      +	"io"
      +)
      +
      +// clientAuthenticate authenticates with the remote server. See RFC 4252.
      +func (c *connection) clientAuthenticate(config *ClientConfig) error {
      +	// initiate user auth session
      +	if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil {
      +		return err
      +	}
      +	packet, err := c.transport.readPacket()
      +	if err != nil {
      +		return err
      +	}
      +	var serviceAccept serviceAcceptMsg
      +	if err := Unmarshal(packet, &serviceAccept); err != nil {
      +		return err
      +	}
      +
      +	// during the authentication phase the client first attempts the "none" method
      +	// then any untried methods suggested by the server.
      +	tried := make(map[string]bool)
      +	var lastMethods []string
      +	for auth := AuthMethod(new(noneAuth)); auth != nil; {
      +		ok, methods, err := auth.auth(c.transport.getSessionID(), config.User, c.transport, config.Rand)
      +		if err != nil {
      +			return err
      +		}
      +		if ok {
      +			// success
      +			return nil
      +		}
      +		tried[auth.method()] = true
      +		if methods == nil {
      +			methods = lastMethods
      +		}
      +		lastMethods = methods
      +
      +		auth = nil
      +
      +	findNext:
      +		for _, a := range config.Auth {
      +			candidateMethod := a.method()
      +			if tried[candidateMethod] {
      +				continue
      +			}
      +			for _, meth := range methods {
      +				if meth == candidateMethod {
      +					auth = a
      +					break findNext
      +				}
      +			}
      +		}
      +	}
      +	return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried))
      +}
      +
      +func keys(m map[string]bool) []string {
      +	s := make([]string, 0, len(m))
      +
      +	for key := range m {
      +		s = append(s, key)
      +	}
      +	return s
      +}
      +
      +// An AuthMethod represents an instance of an RFC 4252 authentication method.
      +type AuthMethod interface {
      +	// auth authenticates user over transport t.
      +	// Returns true if authentication is successful.
      +	// If authentication is not successful, a []string of alternative
      +	// method names is returned. If the slice is nil, it will be ignored
      +	// and the previous set of possible methods will be reused.
      +	auth(session []byte, user string, p packetConn, rand io.Reader) (bool, []string, error)
      +
      +	// method returns the RFC 4252 method name.
      +	method() string
      +}
      +
      +// "none" authentication, RFC 4252 section 5.2.
      +type noneAuth int
      +
      +func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
      +	if err := c.writePacket(Marshal(&userAuthRequestMsg{
      +		User:    user,
      +		Service: serviceSSH,
      +		Method:  "none",
      +	})); err != nil {
      +		return false, nil, err
      +	}
      +
      +	return handleAuthResponse(c)
      +}
      +
      +func (n *noneAuth) method() string {
      +	return "none"
      +}
      +
      +// passwordCallback is an AuthMethod that fetches the password through
      +// a function call, e.g. by prompting the user.
      +type passwordCallback func() (password string, err error)
      +
      +func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
      +	type passwordAuthMsg struct {
      +		User     string `sshtype:"50"`
      +		Service  string
      +		Method   string
      +		Reply    bool
      +		Password string
      +	}
      +
      +	pw, err := cb()
      +	// REVIEW NOTE: is there a need to support skipping a password attempt?
      +	// The program may only find out that the user doesn't have a password
      +	// when prompting.
      +	if err != nil {
      +		return false, nil, err
      +	}
      +
      +	if err := c.writePacket(Marshal(&passwordAuthMsg{
      +		User:     user,
      +		Service:  serviceSSH,
      +		Method:   cb.method(),
      +		Reply:    false,
      +		Password: pw,
      +	})); err != nil {
      +		return false, nil, err
      +	}
      +
      +	return handleAuthResponse(c)
      +}
      +
      +func (cb passwordCallback) method() string {
      +	return "password"
      +}
      +
      +// Password returns an AuthMethod using the given password.
      +func Password(secret string) AuthMethod {
      +	return passwordCallback(func() (string, error) { return secret, nil })
      +}
      +
      +// PasswordCallback returns an AuthMethod that uses a callback for
      +// fetching a password.
      +func PasswordCallback(prompt func() (secret string, err error)) AuthMethod {
      +	return passwordCallback(prompt)
      +}
      +
      +type publickeyAuthMsg struct {
      +	User    string `sshtype:"50"`
      +	Service string
      +	Method  string
      +	// HasSig indicates to the receiver packet that the auth request is signed and
      +	// should be used for authentication of the request.
      +	HasSig   bool
      +	Algoname string
      +	PubKey   []byte
      +	// Sig is tagged with "rest" so Marshal will exclude it during
      +	// validateKey
      +	Sig []byte `ssh:"rest"`
      +}
      +
      +// publicKeyCallback is an AuthMethod that uses a set of key
      +// pairs for authentication.
      +type publicKeyCallback func() ([]Signer, error)
      +
      +func (cb publicKeyCallback) method() string {
      +	return "publickey"
      +}
      +
      +func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
      +	// Authentication is performed in two stages. The first stage sends an
      +	// enquiry to test if each key is acceptable to the remote. The second
      +	// stage attempts to authenticate with the valid keys obtained in the
      +	// first stage.
      +
      +	signers, err := cb()
      +	if err != nil {
      +		return false, nil, err
      +	}
      +	var validKeys []Signer
      +	for _, signer := range signers {
      +		if ok, err := validateKey(signer.PublicKey(), user, c); ok {
      +			validKeys = append(validKeys, signer)
      +		} else {
      +			if err != nil {
      +				return false, nil, err
      +			}
      +		}
      +	}
      +
      +	// methods that may continue if this auth is not successful.
      +	var methods []string
      +	for _, signer := range validKeys {
      +		pub := signer.PublicKey()
      +
      +		pubKey := pub.Marshal()
      +		sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{
      +			User:    user,
      +			Service: serviceSSH,
      +			Method:  cb.method(),
      +		}, []byte(pub.Type()), pubKey))
      +		if err != nil {
      +			return false, nil, err
      +		}
      +
      +		// manually wrap the serialized signature in a string
      +		s := Marshal(sign)
      +		sig := make([]byte, stringLength(len(s)))
      +		marshalString(sig, s)
      +		msg := publickeyAuthMsg{
      +			User:     user,
      +			Service:  serviceSSH,
      +			Method:   cb.method(),
      +			HasSig:   true,
      +			Algoname: pub.Type(),
      +			PubKey:   pubKey,
      +			Sig:      sig,
      +		}
      +		p := Marshal(&msg)
      +		if err := c.writePacket(p); err != nil {
      +			return false, nil, err
      +		}
      +		var success bool
      +		success, methods, err = handleAuthResponse(c)
      +		if err != nil {
      +			return false, nil, err
      +		}
      +		if success {
      +			return success, methods, err
      +		}
      +	}
      +	return false, methods, nil
      +}
      +
      +// validateKey validates the key provided is acceptable to the server.
      +func validateKey(key PublicKey, user string, c packetConn) (bool, error) {
      +	pubKey := key.Marshal()
      +	msg := publickeyAuthMsg{
      +		User:     user,
      +		Service:  serviceSSH,
      +		Method:   "publickey",
      +		HasSig:   false,
      +		Algoname: key.Type(),
      +		PubKey:   pubKey,
      +	}
      +	if err := c.writePacket(Marshal(&msg)); err != nil {
      +		return false, err
      +	}
      +
      +	return confirmKeyAck(key, c)
      +}
      +
      +func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
      +	pubKey := key.Marshal()
      +	algoname := key.Type()
      +
      +	for {
      +		packet, err := c.readPacket()
      +		if err != nil {
      +			return false, err
      +		}
      +		switch packet[0] {
      +		case msgUserAuthBanner:
      +			// TODO(gpaul): add callback to present the banner to the user
      +		case msgUserAuthPubKeyOk:
      +			var msg userAuthPubKeyOkMsg
      +			if err := Unmarshal(packet, &msg); err != nil {
      +				return false, err
      +			}
      +			if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) {
      +				return false, nil
      +			}
      +			return true, nil
      +		case msgUserAuthFailure:
      +			return false, nil
      +		default:
      +			return false, unexpectedMessageError(msgUserAuthSuccess, packet[0])
      +		}
      +	}
      +}
      +
      +// PublicKeys returns an AuthMethod that uses the given key
      +// pairs.
      +func PublicKeys(signers ...Signer) AuthMethod {
      +	return publicKeyCallback(func() ([]Signer, error) { return signers, nil })
      +}
      +
      +// PublicKeysCallback returns an AuthMethod that runs the given
      +// function to obtain a list of key pairs.
      +func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod {
      +	return publicKeyCallback(getSigners)
      +}
      +
      +// handleAuthResponse returns whether the preceding authentication request succeeded
      +// along with a list of remaining authentication methods to try next and
      +// an error if an unexpected response was received.
      +func handleAuthResponse(c packetConn) (bool, []string, error) {
      +	for {
      +		packet, err := c.readPacket()
      +		if err != nil {
      +			return false, nil, err
      +		}
      +
      +		switch packet[0] {
      +		case msgUserAuthBanner:
      +			// TODO: add callback to present the banner to the user
      +		case msgUserAuthFailure:
      +			var msg userAuthFailureMsg
      +			if err := Unmarshal(packet, &msg); err != nil {
      +				return false, nil, err
      +			}
      +			return false, msg.Methods, nil
      +		case msgUserAuthSuccess:
      +			return true, nil, nil
      +		case msgDisconnect:
      +			return false, nil, io.EOF
      +		default:
      +			return false, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0])
      +		}
      +	}
      +}
      +
      +// KeyboardInteractiveChallenge should print questions, optionally
      +// disabling echoing (e.g. for passwords), and return all the answers.
      +// Challenge may be called multiple times in a single session. After
      +// successful authentication, the server may send a challenge with no
      +// questions, for which the user and instruction messages should be
      +// printed.  RFC 4256 section 3.3 details how the UI should behave for
      +// both CLI and GUI environments.
      +type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error)
      +
      +// KeyboardInteractive returns a AuthMethod using a prompt/response
      +// sequence controlled by the server.
      +func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod {
      +	return challenge
      +}
      +
      +func (cb KeyboardInteractiveChallenge) method() string {
      +	return "keyboard-interactive"
      +}
      +
      +func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
      +	type initiateMsg struct {
      +		User       string `sshtype:"50"`
      +		Service    string
      +		Method     string
      +		Language   string
      +		Submethods string
      +	}
      +
      +	if err := c.writePacket(Marshal(&initiateMsg{
      +		User:    user,
      +		Service: serviceSSH,
      +		Method:  "keyboard-interactive",
      +	})); err != nil {
      +		return false, nil, err
      +	}
      +
      +	for {
      +		packet, err := c.readPacket()
      +		if err != nil {
      +			return false, nil, err
      +		}
      +
      +		// like handleAuthResponse, but with less options.
      +		switch packet[0] {
      +		case msgUserAuthBanner:
      +			// TODO: Print banners during userauth.
      +			continue
      +		case msgUserAuthInfoRequest:
      +			// OK
      +		case msgUserAuthFailure:
      +			var msg userAuthFailureMsg
      +			if err := Unmarshal(packet, &msg); err != nil {
      +				return false, nil, err
      +			}
      +			return false, msg.Methods, nil
      +		case msgUserAuthSuccess:
      +			return true, nil, nil
      +		default:
      +			return false, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0])
      +		}
      +
      +		var msg userAuthInfoRequestMsg
      +		if err := Unmarshal(packet, &msg); err != nil {
      +			return false, nil, err
      +		}
      +
      +		// Manually unpack the prompt/echo pairs.
      +		rest := msg.Prompts
      +		var prompts []string
      +		var echos []bool
      +		for i := 0; i < int(msg.NumPrompts); i++ {
      +			prompt, r, ok := parseString(rest)
      +			if !ok || len(r) == 0 {
      +				return false, nil, errors.New("ssh: prompt format error")
      +			}
      +			prompts = append(prompts, string(prompt))
      +			echos = append(echos, r[0] != 0)
      +			rest = r[1:]
      +		}
      +
      +		if len(rest) != 0 {
      +			return false, nil, errors.New("ssh: extra data following keyboard-interactive pairs")
      +		}
      +
      +		answers, err := cb(msg.User, msg.Instruction, prompts, echos)
      +		if err != nil {
      +			return false, nil, err
      +		}
      +
      +		if len(answers) != len(prompts) {
      +			return false, nil, errors.New("ssh: not enough answers from keyboard-interactive callback")
      +		}
      +		responseLength := 1 + 4
      +		for _, a := range answers {
      +			responseLength += stringLength(len(a))
      +		}
      +		serialized := make([]byte, responseLength)
      +		p := serialized
      +		p[0] = msgUserAuthInfoResponse
      +		p = p[1:]
      +		p = marshalUint32(p, uint32(len(answers)))
      +		for _, a := range answers {
      +			p = marshalString(p, []byte(a))
      +		}
      +
      +		if err := c.writePacket(serialized); err != nil {
      +			return false, nil, err
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/client_auth_test.go b/vendor/golang.org/x/crypto/ssh/client_auth_test.go
      new file mode 100644
      index 00000000..2ea44624
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/client_auth_test.go
      @@ -0,0 +1,393 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"bytes"
      +	"crypto/rand"
      +	"errors"
      +	"fmt"
      +	"strings"
      +	"testing"
      +)
      +
      +type keyboardInteractive map[string]string
      +
      +func (cr keyboardInteractive) Challenge(user string, instruction string, questions []string, echos []bool) ([]string, error) {
      +	var answers []string
      +	for _, q := range questions {
      +		answers = append(answers, cr[q])
      +	}
      +	return answers, nil
      +}
      +
      +// reused internally by tests
      +var clientPassword = "tiger"
      +
      +// tryAuth runs a handshake with a given config against an SSH server
      +// with config serverConfig
      +func tryAuth(t *testing.T, config *ClientConfig) error {
      +	c1, c2, err := netPipe()
      +	if err != nil {
      +		t.Fatalf("netPipe: %v", err)
      +	}
      +	defer c1.Close()
      +	defer c2.Close()
      +
      +	certChecker := CertChecker{
      +		IsAuthority: func(k PublicKey) bool {
      +			return bytes.Equal(k.Marshal(), testPublicKeys["ecdsa"].Marshal())
      +		},
      +		UserKeyFallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) {
      +			if conn.User() == "testuser" && bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) {
      +				return nil, nil
      +			}
      +
      +			return nil, fmt.Errorf("pubkey for %q not acceptable", conn.User())
      +		},
      +		IsRevoked: func(c *Certificate) bool {
      +			return c.Serial == 666
      +		},
      +	}
      +
      +	serverConfig := &ServerConfig{
      +		PasswordCallback: func(conn ConnMetadata, pass []byte) (*Permissions, error) {
      +			if conn.User() == "testuser" && string(pass) == clientPassword {
      +				return nil, nil
      +			}
      +			return nil, errors.New("password auth failed")
      +		},
      +		PublicKeyCallback: certChecker.Authenticate,
      +		KeyboardInteractiveCallback: func(conn ConnMetadata, challenge KeyboardInteractiveChallenge) (*Permissions, error) {
      +			ans, err := challenge("user",
      +				"instruction",
      +				[]string{"question1", "question2"},
      +				[]bool{true, true})
      +			if err != nil {
      +				return nil, err
      +			}
      +			ok := conn.User() == "testuser" && ans[0] == "answer1" && ans[1] == "answer2"
      +			if ok {
      +				challenge("user", "motd", nil, nil)
      +				return nil, nil
      +			}
      +			return nil, errors.New("keyboard-interactive failed")
      +		},
      +		AuthLogCallback: func(conn ConnMetadata, method string, err error) {
      +			t.Logf("user %q, method %q: %v", conn.User(), method, err)
      +		},
      +	}
      +	serverConfig.AddHostKey(testSigners["rsa"])
      +
      +	go newServer(c1, serverConfig)
      +	_, _, _, err = NewClientConn(c2, "", config)
      +	return err
      +}
      +
      +func TestClientAuthPublicKey(t *testing.T) {
      +	config := &ClientConfig{
      +		User: "testuser",
      +		Auth: []AuthMethod{
      +			PublicKeys(testSigners["rsa"]),
      +		},
      +	}
      +	if err := tryAuth(t, config); err != nil {
      +		t.Fatalf("unable to dial remote side: %s", err)
      +	}
      +}
      +
      +func TestAuthMethodPassword(t *testing.T) {
      +	config := &ClientConfig{
      +		User: "testuser",
      +		Auth: []AuthMethod{
      +			Password(clientPassword),
      +		},
      +	}
      +
      +	if err := tryAuth(t, config); err != nil {
      +		t.Fatalf("unable to dial remote side: %s", err)
      +	}
      +}
      +
      +func TestAuthMethodFallback(t *testing.T) {
      +	var passwordCalled bool
      +	config := &ClientConfig{
      +		User: "testuser",
      +		Auth: []AuthMethod{
      +			PublicKeys(testSigners["rsa"]),
      +			PasswordCallback(
      +				func() (string, error) {
      +					passwordCalled = true
      +					return "WRONG", nil
      +				}),
      +		},
      +	}
      +
      +	if err := tryAuth(t, config); err != nil {
      +		t.Fatalf("unable to dial remote side: %s", err)
      +	}
      +
      +	if passwordCalled {
      +		t.Errorf("password auth tried before public-key auth.")
      +	}
      +}
      +
      +func TestAuthMethodWrongPassword(t *testing.T) {
      +	config := &ClientConfig{
      +		User: "testuser",
      +		Auth: []AuthMethod{
      +			Password("wrong"),
      +			PublicKeys(testSigners["rsa"]),
      +		},
      +	}
      +
      +	if err := tryAuth(t, config); err != nil {
      +		t.Fatalf("unable to dial remote side: %s", err)
      +	}
      +}
      +
      +func TestAuthMethodKeyboardInteractive(t *testing.T) {
      +	answers := keyboardInteractive(map[string]string{
      +		"question1": "answer1",
      +		"question2": "answer2",
      +	})
      +	config := &ClientConfig{
      +		User: "testuser",
      +		Auth: []AuthMethod{
      +			KeyboardInteractive(answers.Challenge),
      +		},
      +	}
      +
      +	if err := tryAuth(t, config); err != nil {
      +		t.Fatalf("unable to dial remote side: %s", err)
      +	}
      +}
      +
      +func TestAuthMethodWrongKeyboardInteractive(t *testing.T) {
      +	answers := keyboardInteractive(map[string]string{
      +		"question1": "answer1",
      +		"question2": "WRONG",
      +	})
      +	config := &ClientConfig{
      +		User: "testuser",
      +		Auth: []AuthMethod{
      +			KeyboardInteractive(answers.Challenge),
      +		},
      +	}
      +
      +	if err := tryAuth(t, config); err == nil {
      +		t.Fatalf("wrong answers should not have authenticated with KeyboardInteractive")
      +	}
      +}
      +
      +// the mock server will only authenticate ssh-rsa keys
      +func TestAuthMethodInvalidPublicKey(t *testing.T) {
      +	config := &ClientConfig{
      +		User: "testuser",
      +		Auth: []AuthMethod{
      +			PublicKeys(testSigners["dsa"]),
      +		},
      +	}
      +
      +	if err := tryAuth(t, config); err == nil {
      +		t.Fatalf("dsa private key should not have authenticated with rsa public key")
      +	}
      +}
      +
      +// the client should authenticate with the second key
      +func TestAuthMethodRSAandDSA(t *testing.T) {
      +	config := &ClientConfig{
      +		User: "testuser",
      +		Auth: []AuthMethod{
      +			PublicKeys(testSigners["dsa"], testSigners["rsa"]),
      +		},
      +	}
      +	if err := tryAuth(t, config); err != nil {
      +		t.Fatalf("client could not authenticate with rsa key: %v", err)
      +	}
      +}
      +
      +func TestClientHMAC(t *testing.T) {
      +	for _, mac := range supportedMACs {
      +		config := &ClientConfig{
      +			User: "testuser",
      +			Auth: []AuthMethod{
      +				PublicKeys(testSigners["rsa"]),
      +			},
      +			Config: Config{
      +				MACs: []string{mac},
      +			},
      +		}
      +		if err := tryAuth(t, config); err != nil {
      +			t.Fatalf("client could not authenticate with mac algo %s: %v", mac, err)
      +		}
      +	}
      +}
      +
      +// issue 4285.
      +func TestClientUnsupportedCipher(t *testing.T) {
      +	config := &ClientConfig{
      +		User: "testuser",
      +		Auth: []AuthMethod{
      +			PublicKeys(),
      +		},
      +		Config: Config{
      +			Ciphers: []string{"aes128-cbc"}, // not currently supported
      +		},
      +	}
      +	if err := tryAuth(t, config); err == nil {
      +		t.Errorf("expected no ciphers in common")
      +	}
      +}
      +
      +func TestClientUnsupportedKex(t *testing.T) {
      +	config := &ClientConfig{
      +		User: "testuser",
      +		Auth: []AuthMethod{
      +			PublicKeys(),
      +		},
      +		Config: Config{
      +			KeyExchanges: []string{"diffie-hellman-group-exchange-sha256"}, // not currently supported
      +		},
      +	}
      +	if err := tryAuth(t, config); err == nil || !strings.Contains(err.Error(), "common algorithm") {
      +		t.Errorf("got %v, expected 'common algorithm'", err)
      +	}
      +}
      +
      +func TestClientLoginCert(t *testing.T) {
      +	cert := &Certificate{
      +		Key:         testPublicKeys["rsa"],
      +		ValidBefore: CertTimeInfinity,
      +		CertType:    UserCert,
      +	}
      +	cert.SignCert(rand.Reader, testSigners["ecdsa"])
      +	certSigner, err := NewCertSigner(cert, testSigners["rsa"])
      +	if err != nil {
      +		t.Fatalf("NewCertSigner: %v", err)
      +	}
      +
      +	clientConfig := &ClientConfig{
      +		User: "user",
      +	}
      +	clientConfig.Auth = append(clientConfig.Auth, PublicKeys(certSigner))
      +
      +	t.Log("should succeed")
      +	if err := tryAuth(t, clientConfig); err != nil {
      +		t.Errorf("cert login failed: %v", err)
      +	}
      +
      +	t.Log("corrupted signature")
      +	cert.Signature.Blob[0]++
      +	if err := tryAuth(t, clientConfig); err == nil {
      +		t.Errorf("cert login passed with corrupted sig")
      +	}
      +
      +	t.Log("revoked")
      +	cert.Serial = 666
      +	cert.SignCert(rand.Reader, testSigners["ecdsa"])
      +	if err := tryAuth(t, clientConfig); err == nil {
      +		t.Errorf("revoked cert login succeeded")
      +	}
      +	cert.Serial = 1
      +
      +	t.Log("sign with wrong key")
      +	cert.SignCert(rand.Reader, testSigners["dsa"])
      +	if err := tryAuth(t, clientConfig); err == nil {
      +		t.Errorf("cert login passed with non-authoritive key")
      +	}
      +
      +	t.Log("host cert")
      +	cert.CertType = HostCert
      +	cert.SignCert(rand.Reader, testSigners["ecdsa"])
      +	if err := tryAuth(t, clientConfig); err == nil {
      +		t.Errorf("cert login passed with wrong type")
      +	}
      +	cert.CertType = UserCert
      +
      +	t.Log("principal specified")
      +	cert.ValidPrincipals = []string{"user"}
      +	cert.SignCert(rand.Reader, testSigners["ecdsa"])
      +	if err := tryAuth(t, clientConfig); err != nil {
      +		t.Errorf("cert login failed: %v", err)
      +	}
      +
      +	t.Log("wrong principal specified")
      +	cert.ValidPrincipals = []string{"fred"}
      +	cert.SignCert(rand.Reader, testSigners["ecdsa"])
      +	if err := tryAuth(t, clientConfig); err == nil {
      +		t.Errorf("cert login passed with wrong principal")
      +	}
      +	cert.ValidPrincipals = nil
      +
      +	t.Log("added critical option")
      +	cert.CriticalOptions = map[string]string{"root-access": "yes"}
      +	cert.SignCert(rand.Reader, testSigners["ecdsa"])
      +	if err := tryAuth(t, clientConfig); err == nil {
      +		t.Errorf("cert login passed with unrecognized critical option")
      +	}
      +
      +	t.Log("allowed source address")
      +	cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42/24"}
      +	cert.SignCert(rand.Reader, testSigners["ecdsa"])
      +	if err := tryAuth(t, clientConfig); err != nil {
      +		t.Errorf("cert login with source-address failed: %v", err)
      +	}
      +
      +	t.Log("disallowed source address")
      +	cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42"}
      +	cert.SignCert(rand.Reader, testSigners["ecdsa"])
      +	if err := tryAuth(t, clientConfig); err == nil {
      +		t.Errorf("cert login with source-address succeeded")
      +	}
      +}
      +
      +func testPermissionsPassing(withPermissions bool, t *testing.T) {
      +	serverConfig := &ServerConfig{
      +		PublicKeyCallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) {
      +			if conn.User() == "nopermissions" {
      +				return nil, nil
      +			} else {
      +				return &Permissions{}, nil
      +			}
      +		},
      +	}
      +	serverConfig.AddHostKey(testSigners["rsa"])
      +
      +	clientConfig := &ClientConfig{
      +		Auth: []AuthMethod{
      +			PublicKeys(testSigners["rsa"]),
      +		},
      +	}
      +	if withPermissions {
      +		clientConfig.User = "permissions"
      +	} else {
      +		clientConfig.User = "nopermissions"
      +	}
      +
      +	c1, c2, err := netPipe()
      +	if err != nil {
      +		t.Fatalf("netPipe: %v", err)
      +	}
      +	defer c1.Close()
      +	defer c2.Close()
      +
      +	go NewClientConn(c2, "", clientConfig)
      +	serverConn, err := newServer(c1, serverConfig)
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	if p := serverConn.Permissions; (p != nil) != withPermissions {
      +		t.Fatalf("withPermissions is %t, but Permissions object is %#v", withPermissions, p)
      +	}
      +}
      +
      +func TestPermissionsPassing(t *testing.T) {
      +	testPermissionsPassing(true, t)
      +}
      +
      +func TestNoPermissionsPassing(t *testing.T) {
      +	testPermissionsPassing(false, t)
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/client_test.go b/vendor/golang.org/x/crypto/ssh/client_test.go
      new file mode 100644
      index 00000000..1fe790cb
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/client_test.go
      @@ -0,0 +1,39 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"net"
      +	"testing"
      +)
      +
      +func testClientVersion(t *testing.T, config *ClientConfig, expected string) {
      +	clientConn, serverConn := net.Pipe()
      +	defer clientConn.Close()
      +	receivedVersion := make(chan string, 1)
      +	go func() {
      +		version, err := readVersion(serverConn)
      +		if err != nil {
      +			receivedVersion <- ""
      +		} else {
      +			receivedVersion <- string(version)
      +		}
      +		serverConn.Close()
      +	}()
      +	NewClientConn(clientConn, "", config)
      +	actual := <-receivedVersion
      +	if actual != expected {
      +		t.Fatalf("got %s; want %s", actual, expected)
      +	}
      +}
      +
      +func TestCustomClientVersion(t *testing.T) {
      +	version := "Test-Client-Version-0.0"
      +	testClientVersion(t, &ClientConfig{ClientVersion: version}, version)
      +}
      +
      +func TestDefaultClientVersion(t *testing.T) {
      +	testClientVersion(t, &ClientConfig{}, packageVersion)
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go
      new file mode 100644
      index 00000000..9fc739e1
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/common.go
      @@ -0,0 +1,354 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"crypto"
      +	"crypto/rand"
      +	"fmt"
      +	"io"
      +	"sync"
      +
      +	_ "crypto/sha1"
      +	_ "crypto/sha256"
      +	_ "crypto/sha512"
      +)
      +
      +// These are string constants in the SSH protocol.
      +const (
      +	compressionNone = "none"
      +	serviceUserAuth = "ssh-userauth"
      +	serviceSSH      = "ssh-connection"
      +)
      +
      +// supportedCiphers specifies the supported ciphers in preference order.
      +var supportedCiphers = []string{
      +	"aes128-ctr", "aes192-ctr", "aes256-ctr",
      +	"aes128-gcm@openssh.com",
      +	"arcfour256", "arcfour128",
      +}
      +
      +// supportedKexAlgos specifies the supported key-exchange algorithms in
      +// preference order.
      +var supportedKexAlgos = []string{
      +	kexAlgoCurve25519SHA256,
      +	// P384 and P521 are not constant-time yet, but since we don't
      +	// reuse ephemeral keys, using them for ECDH should be OK.
      +	kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
      +	kexAlgoDH14SHA1, kexAlgoDH1SHA1,
      +}
      +
      +// supportedKexAlgos specifies the supported host-key algorithms (i.e. methods
      +// of authenticating servers) in preference order.
      +var supportedHostKeyAlgos = []string{
      +	CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01,
      +	CertAlgoECDSA384v01, CertAlgoECDSA521v01,
      +
      +	KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
      +	KeyAlgoRSA, KeyAlgoDSA,
      +}
      +
      +// supportedMACs specifies a default set of MAC algorithms in preference order.
      +// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed
      +// because they have reached the end of their useful life.
      +var supportedMACs = []string{
      +	"hmac-sha2-256", "hmac-sha1", "hmac-sha1-96",
      +}
      +
      +var supportedCompressions = []string{compressionNone}
      +
      +// hashFuncs keeps the mapping of supported algorithms to their respective
      +// hashes needed for signature verification.
      +var hashFuncs = map[string]crypto.Hash{
      +	KeyAlgoRSA:          crypto.SHA1,
      +	KeyAlgoDSA:          crypto.SHA1,
      +	KeyAlgoECDSA256:     crypto.SHA256,
      +	KeyAlgoECDSA384:     crypto.SHA384,
      +	KeyAlgoECDSA521:     crypto.SHA512,
      +	CertAlgoRSAv01:      crypto.SHA1,
      +	CertAlgoDSAv01:      crypto.SHA1,
      +	CertAlgoECDSA256v01: crypto.SHA256,
      +	CertAlgoECDSA384v01: crypto.SHA384,
      +	CertAlgoECDSA521v01: crypto.SHA512,
      +}
      +
      +// unexpectedMessageError results when the SSH message that we received didn't
      +// match what we wanted.
      +func unexpectedMessageError(expected, got uint8) error {
      +	return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected)
      +}
      +
      +// parseError results from a malformed SSH message.
      +func parseError(tag uint8) error {
      +	return fmt.Errorf("ssh: parse error in message type %d", tag)
      +}
      +
      +func findCommon(what string, client []string, server []string) (common string, err error) {
      +	for _, c := range client {
      +		for _, s := range server {
      +			if c == s {
      +				return c, nil
      +			}
      +		}
      +	}
      +	return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server)
      +}
      +
      +type directionAlgorithms struct {
      +	Cipher      string
      +	MAC         string
      +	Compression string
      +}
      +
      +type algorithms struct {
      +	kex     string
      +	hostKey string
      +	w       directionAlgorithms
      +	r       directionAlgorithms
      +}
      +
      +func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) {
      +	result := &algorithms{}
      +
      +	result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos)
      +	if err != nil {
      +		return
      +	}
      +
      +	result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos)
      +	if err != nil {
      +		return
      +	}
      +
      +	result.w.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer)
      +	if err != nil {
      +		return
      +	}
      +
      +	result.r.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient)
      +	if err != nil {
      +		return
      +	}
      +
      +	result.w.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer)
      +	if err != nil {
      +		return
      +	}
      +
      +	result.r.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient)
      +	if err != nil {
      +		return
      +	}
      +
      +	result.w.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer)
      +	if err != nil {
      +		return
      +	}
      +
      +	result.r.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient)
      +	if err != nil {
      +		return
      +	}
      +
      +	return result, nil
      +}
      +
      +// If rekeythreshold is too small, we can't make any progress sending
      +// stuff.
      +const minRekeyThreshold uint64 = 256
      +
      +// Config contains configuration data common to both ServerConfig and
      +// ClientConfig.
      +type Config struct {
      +	// Rand provides the source of entropy for cryptographic
      +	// primitives. If Rand is nil, the cryptographic random reader
      +	// in package crypto/rand will be used.
      +	Rand io.Reader
      +
      +	// The maximum number of bytes sent or received after which a
      +	// new key is negotiated. It must be at least 256. If
      +	// unspecified, 1 gigabyte is used.
      +	RekeyThreshold uint64
      +
      +	// The allowed key exchanges algorithms. If unspecified then a
      +	// default set of algorithms is used.
      +	KeyExchanges []string
      +
      +	// The allowed cipher algorithms. If unspecified then a sensible
      +	// default is used.
      +	Ciphers []string
      +
      +	// The allowed MAC algorithms. If unspecified then a sensible default
      +	// is used.
      +	MACs []string
      +}
      +
      +// SetDefaults sets sensible values for unset fields in config. This is
      +// exported for testing: Configs passed to SSH functions are copied and have
      +// default values set automatically.
      +func (c *Config) SetDefaults() {
      +	if c.Rand == nil {
      +		c.Rand = rand.Reader
      +	}
      +	if c.Ciphers == nil {
      +		c.Ciphers = supportedCiphers
      +	}
      +	var ciphers []string
      +	for _, c := range c.Ciphers {
      +		if cipherModes[c] != nil {
      +			// reject the cipher if we have no cipherModes definition
      +			ciphers = append(ciphers, c)
      +		}
      +	}
      +	c.Ciphers = ciphers
      +
      +	if c.KeyExchanges == nil {
      +		c.KeyExchanges = supportedKexAlgos
      +	}
      +
      +	if c.MACs == nil {
      +		c.MACs = supportedMACs
      +	}
      +
      +	if c.RekeyThreshold == 0 {
      +		// RFC 4253, section 9 suggests rekeying after 1G.
      +		c.RekeyThreshold = 1 << 30
      +	}
      +	if c.RekeyThreshold < minRekeyThreshold {
      +		c.RekeyThreshold = minRekeyThreshold
      +	}
      +}
      +
      +// buildDataSignedForAuth returns the data that is signed in order to prove
      +// possession of a private key. See RFC 4252, section 7.
      +func buildDataSignedForAuth(sessionId []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte {
      +	data := struct {
      +		Session []byte
      +		Type    byte
      +		User    string
      +		Service string
      +		Method  string
      +		Sign    bool
      +		Algo    []byte
      +		PubKey  []byte
      +	}{
      +		sessionId,
      +		msgUserAuthRequest,
      +		req.User,
      +		req.Service,
      +		req.Method,
      +		true,
      +		algo,
      +		pubKey,
      +	}
      +	return Marshal(data)
      +}
      +
      +func appendU16(buf []byte, n uint16) []byte {
      +	return append(buf, byte(n>>8), byte(n))
      +}
      +
      +func appendU32(buf []byte, n uint32) []byte {
      +	return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
      +}
      +
      +func appendU64(buf []byte, n uint64) []byte {
      +	return append(buf,
      +		byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32),
      +		byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
      +}
      +
      +func appendInt(buf []byte, n int) []byte {
      +	return appendU32(buf, uint32(n))
      +}
      +
      +func appendString(buf []byte, s string) []byte {
      +	buf = appendU32(buf, uint32(len(s)))
      +	buf = append(buf, s...)
      +	return buf
      +}
      +
      +func appendBool(buf []byte, b bool) []byte {
      +	if b {
      +		return append(buf, 1)
      +	}
      +	return append(buf, 0)
      +}
      +
      +// newCond is a helper to hide the fact that there is no usable zero
      +// value for sync.Cond.
      +func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) }
      +
      +// window represents the buffer available to clients
      +// wishing to write to a channel.
      +type window struct {
      +	*sync.Cond
      +	win          uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1
      +	writeWaiters int
      +	closed       bool
      +}
      +
      +// add adds win to the amount of window available
      +// for consumers.
      +func (w *window) add(win uint32) bool {
      +	// a zero sized window adjust is a noop.
      +	if win == 0 {
      +		return true
      +	}
      +	w.L.Lock()
      +	if w.win+win < win {
      +		w.L.Unlock()
      +		return false
      +	}
      +	w.win += win
      +	// It is unusual that multiple goroutines would be attempting to reserve
      +	// window space, but not guaranteed. Use broadcast to notify all waiters
      +	// that additional window is available.
      +	w.Broadcast()
      +	w.L.Unlock()
      +	return true
      +}
      +
      +// close sets the window to closed, so all reservations fail
      +// immediately.
      +func (w *window) close() {
      +	w.L.Lock()
      +	w.closed = true
      +	w.Broadcast()
      +	w.L.Unlock()
      +}
      +
      +// reserve reserves win from the available window capacity.
      +// If no capacity remains, reserve will block. reserve may
      +// return less than requested.
      +func (w *window) reserve(win uint32) (uint32, error) {
      +	var err error
      +	w.L.Lock()
      +	w.writeWaiters++
      +	w.Broadcast()
      +	for w.win == 0 && !w.closed {
      +		w.Wait()
      +	}
      +	w.writeWaiters--
      +	if w.win < win {
      +		win = w.win
      +	}
      +	w.win -= win
      +	if w.closed {
      +		err = io.EOF
      +	}
      +	w.L.Unlock()
      +	return win, err
      +}
      +
      +// waitWriterBlocked waits until some goroutine is blocked for further
      +// writes. It is used in tests only.
      +func (w *window) waitWriterBlocked() {
      +	w.Cond.L.Lock()
      +	for w.writeWaiters == 0 {
      +		w.Cond.Wait()
      +	}
      +	w.Cond.L.Unlock()
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go
      new file mode 100644
      index 00000000..979d919e
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/connection.go
      @@ -0,0 +1,144 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"fmt"
      +	"net"
      +)
      +
      +// OpenChannelError is returned if the other side rejects an
      +// OpenChannel request.
      +type OpenChannelError struct {
      +	Reason  RejectionReason
      +	Message string
      +}
      +
      +func (e *OpenChannelError) Error() string {
      +	return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message)
      +}
      +
      +// ConnMetadata holds metadata for the connection.
      +type ConnMetadata interface {
      +	// User returns the user ID for this connection.
      +	// It is empty if no authentication is used.
      +	User() string
      +
      +	// SessionID returns the sesson hash, also denoted by H.
      +	SessionID() []byte
      +
      +	// ClientVersion returns the client's version string as hashed
      +	// into the session ID.
      +	ClientVersion() []byte
      +
      +	// ServerVersion returns the server's version string as hashed
      +	// into the session ID.
      +	ServerVersion() []byte
      +
      +	// RemoteAddr returns the remote address for this connection.
      +	RemoteAddr() net.Addr
      +
      +	// LocalAddr returns the local address for this connection.
      +	LocalAddr() net.Addr
      +}
      +
      +// Conn represents an SSH connection for both server and client roles.
      +// Conn is the basis for implementing an application layer, such
      +// as ClientConn, which implements the traditional shell access for
      +// clients.
      +type Conn interface {
      +	ConnMetadata
      +
      +	// SendRequest sends a global request, and returns the
      +	// reply. If wantReply is true, it returns the response status
      +	// and payload. See also RFC4254, section 4.
      +	SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error)
      +
      +	// OpenChannel tries to open an channel. If the request is
      +	// rejected, it returns *OpenChannelError. On success it returns
      +	// the SSH Channel and a Go channel for incoming, out-of-band
      +	// requests. The Go channel must be serviced, or the
      +	// connection will hang.
      +	OpenChannel(name string, data []byte) (Channel, <-chan *Request, error)
      +
      +	// Close closes the underlying network connection
      +	Close() error
      +
      +	// Wait blocks until the connection has shut down, and returns the
      +	// error causing the shutdown.
      +	Wait() error
      +
      +	// TODO(hanwen): consider exposing:
      +	//   RequestKeyChange
      +	//   Disconnect
      +}
      +
      +// DiscardRequests consumes and rejects all requests from the
      +// passed-in channel.
      +func DiscardRequests(in <-chan *Request) {
      +	for req := range in {
      +		if req.WantReply {
      +			req.Reply(false, nil)
      +		}
      +	}
      +}
      +
      +// A connection represents an incoming connection.
      +type connection struct {
      +	transport *handshakeTransport
      +	sshConn
      +
      +	// The connection protocol.
      +	*mux
      +}
      +
      +func (c *connection) Close() error {
      +	return c.sshConn.conn.Close()
      +}
      +
      +// sshconn provides net.Conn metadata, but disallows direct reads and
      +// writes.
      +type sshConn struct {
      +	conn net.Conn
      +
      +	user          string
      +	sessionID     []byte
      +	clientVersion []byte
      +	serverVersion []byte
      +}
      +
      +func dup(src []byte) []byte {
      +	dst := make([]byte, len(src))
      +	copy(dst, src)
      +	return dst
      +}
      +
      +func (c *sshConn) User() string {
      +	return c.user
      +}
      +
      +func (c *sshConn) RemoteAddr() net.Addr {
      +	return c.conn.RemoteAddr()
      +}
      +
      +func (c *sshConn) Close() error {
      +	return c.conn.Close()
      +}
      +
      +func (c *sshConn) LocalAddr() net.Addr {
      +	return c.conn.LocalAddr()
      +}
      +
      +func (c *sshConn) SessionID() []byte {
      +	return dup(c.sessionID)
      +}
      +
      +func (c *sshConn) ClientVersion() []byte {
      +	return dup(c.clientVersion)
      +}
      +
      +func (c *sshConn) ServerVersion() []byte {
      +	return dup(c.serverVersion)
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go
      new file mode 100644
      index 00000000..d6be8946
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/doc.go
      @@ -0,0 +1,18 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +/*
      +Package ssh implements an SSH client and server.
      +
      +SSH is a transport security protocol, an authentication protocol and a
      +family of application protocols. The most typical application level
      +protocol is a remote shell and this is specifically implemented.  However,
      +the multiplexed nature of SSH is exposed to users that wish to support
      +others.
      +
      +References:
      +  [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
      +  [SSH-PARAMETERS]:    http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
      +*/
      +package ssh // import "golang.org/x/crypto/ssh"
      diff --git a/vendor/golang.org/x/crypto/ssh/example_test.go b/vendor/golang.org/x/crypto/ssh/example_test.go
      new file mode 100644
      index 00000000..dfd9dcab
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/example_test.go
      @@ -0,0 +1,211 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh_test
      +
      +import (
      +	"bytes"
      +	"fmt"
      +	"io/ioutil"
      +	"log"
      +	"net"
      +	"net/http"
      +
      +	"golang.org/x/crypto/ssh"
      +	"golang.org/x/crypto/ssh/terminal"
      +)
      +
      +func ExampleNewServerConn() {
      +	// An SSH server is represented by a ServerConfig, which holds
      +	// certificate details and handles authentication of ServerConns.
      +	config := &ssh.ServerConfig{
      +		PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
      +			// Should use constant-time compare (or better, salt+hash) in
      +			// a production setting.
      +			if c.User() == "testuser" && string(pass) == "tiger" {
      +				return nil, nil
      +			}
      +			return nil, fmt.Errorf("password rejected for %q", c.User())
      +		},
      +	}
      +
      +	privateBytes, err := ioutil.ReadFile("id_rsa")
      +	if err != nil {
      +		panic("Failed to load private key")
      +	}
      +
      +	private, err := ssh.ParsePrivateKey(privateBytes)
      +	if err != nil {
      +		panic("Failed to parse private key")
      +	}
      +
      +	config.AddHostKey(private)
      +
      +	// Once a ServerConfig has been configured, connections can be
      +	// accepted.
      +	listener, err := net.Listen("tcp", "0.0.0.0:2022")
      +	if err != nil {
      +		panic("failed to listen for connection")
      +	}
      +	nConn, err := listener.Accept()
      +	if err != nil {
      +		panic("failed to accept incoming connection")
      +	}
      +
      +	// Before use, a handshake must be performed on the incoming
      +	// net.Conn.
      +	_, chans, reqs, err := ssh.NewServerConn(nConn, config)
      +	if err != nil {
      +		panic("failed to handshake")
      +	}
      +	// The incoming Request channel must be serviced.
      +	go ssh.DiscardRequests(reqs)
      +
      +	// Service the incoming Channel channel.
      +	for newChannel := range chans {
      +		// Channels have a type, depending on the application level
      +		// protocol intended. In the case of a shell, the type is
      +		// "session" and ServerShell may be used to present a simple
      +		// terminal interface.
      +		if newChannel.ChannelType() != "session" {
      +			newChannel.Reject(ssh.UnknownChannelType, "unknown channel type")
      +			continue
      +		}
      +		channel, requests, err := newChannel.Accept()
      +		if err != nil {
      +			panic("could not accept channel.")
      +		}
      +
      +		// Sessions have out-of-band requests such as "shell",
      +		// "pty-req" and "env".  Here we handle only the
      +		// "shell" request.
      +		go func(in <-chan *ssh.Request) {
      +			for req := range in {
      +				ok := false
      +				switch req.Type {
      +				case "shell":
      +					ok = true
      +					if len(req.Payload) > 0 {
      +						// We don't accept any
      +						// commands, only the
      +						// default shell.
      +						ok = false
      +					}
      +				}
      +				req.Reply(ok, nil)
      +			}
      +		}(requests)
      +
      +		term := terminal.NewTerminal(channel, "> ")
      +
      +		go func() {
      +			defer channel.Close()
      +			for {
      +				line, err := term.ReadLine()
      +				if err != nil {
      +					break
      +				}
      +				fmt.Println(line)
      +			}
      +		}()
      +	}
      +}
      +
      +func ExampleDial() {
      +	// An SSH client is represented with a ClientConn. Currently only
      +	// the "password" authentication method is supported.
      +	//
      +	// To authenticate with the remote server you must pass at least one
      +	// implementation of AuthMethod via the Auth field in ClientConfig.
      +	config := &ssh.ClientConfig{
      +		User: "username",
      +		Auth: []ssh.AuthMethod{
      +			ssh.Password("yourpassword"),
      +		},
      +	}
      +	client, err := ssh.Dial("tcp", "yourserver.com:22", config)
      +	if err != nil {
      +		panic("Failed to dial: " + err.Error())
      +	}
      +
      +	// Each ClientConn can support multiple interactive sessions,
      +	// represented by a Session.
      +	session, err := client.NewSession()
      +	if err != nil {
      +		panic("Failed to create session: " + err.Error())
      +	}
      +	defer session.Close()
      +
      +	// Once a Session is created, you can execute a single command on
      +	// the remote side using the Run method.
      +	var b bytes.Buffer
      +	session.Stdout = &b
      +	if err := session.Run("/usr/bin/whoami"); err != nil {
      +		panic("Failed to run: " + err.Error())
      +	}
      +	fmt.Println(b.String())
      +}
      +
      +func ExampleClient_Listen() {
      +	config := &ssh.ClientConfig{
      +		User: "username",
      +		Auth: []ssh.AuthMethod{
      +			ssh.Password("password"),
      +		},
      +	}
      +	// Dial your ssh server.
      +	conn, err := ssh.Dial("tcp", "localhost:22", config)
      +	if err != nil {
      +		log.Fatalf("unable to connect: %s", err)
      +	}
      +	defer conn.Close()
      +
      +	// Request the remote side to open port 8080 on all interfaces.
      +	l, err := conn.Listen("tcp", "0.0.0.0:8080")
      +	if err != nil {
      +		log.Fatalf("unable to register tcp forward: %v", err)
      +	}
      +	defer l.Close()
      +
      +	// Serve HTTP with your SSH server acting as a reverse proxy.
      +	http.Serve(l, http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
      +		fmt.Fprintf(resp, "Hello world!\n")
      +	}))
      +}
      +
      +func ExampleSession_RequestPty() {
      +	// Create client config
      +	config := &ssh.ClientConfig{
      +		User: "username",
      +		Auth: []ssh.AuthMethod{
      +			ssh.Password("password"),
      +		},
      +	}
      +	// Connect to ssh server
      +	conn, err := ssh.Dial("tcp", "localhost:22", config)
      +	if err != nil {
      +		log.Fatalf("unable to connect: %s", err)
      +	}
      +	defer conn.Close()
      +	// Create a session
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		log.Fatalf("unable to create session: %s", err)
      +	}
      +	defer session.Close()
      +	// Set up terminal modes
      +	modes := ssh.TerminalModes{
      +		ssh.ECHO:          0,     // disable echoing
      +		ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud
      +		ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud
      +	}
      +	// Request pseudo terminal
      +	if err := session.RequestPty("xterm", 80, 40, modes); err != nil {
      +		log.Fatalf("request for pseudo terminal failed: %s", err)
      +	}
      +	// Start remote shell
      +	if err := session.Shell(); err != nil {
      +		log.Fatalf("failed to start shell: %s", err)
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go
      new file mode 100644
      index 00000000..1c54f758
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/handshake.go
      @@ -0,0 +1,412 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"crypto/rand"
      +	"errors"
      +	"fmt"
      +	"io"
      +	"log"
      +	"net"
      +	"sync"
      +)
      +
      +// debugHandshake, if set, prints messages sent and received.  Key
      +// exchange messages are printed as if DH were used, so the debug
      +// messages are wrong when using ECDH.
      +const debugHandshake = false
      +
      +// keyingTransport is a packet based transport that supports key
      +// changes. It need not be thread-safe. It should pass through
      +// msgNewKeys in both directions.
      +type keyingTransport interface {
      +	packetConn
      +
      +	// prepareKeyChange sets up a key change. The key change for a
      +	// direction will be effected if a msgNewKeys message is sent
      +	// or received.
      +	prepareKeyChange(*algorithms, *kexResult) error
      +
      +	// getSessionID returns the session ID. prepareKeyChange must
      +	// have been called once.
      +	getSessionID() []byte
      +}
      +
      +// rekeyingTransport is the interface of handshakeTransport that we
      +// (internally) expose to ClientConn and ServerConn.
      +type rekeyingTransport interface {
      +	packetConn
      +
      +	// requestKeyChange asks the remote side to change keys. All
      +	// writes are blocked until the key change succeeds, which is
      +	// signaled by reading a msgNewKeys.
      +	requestKeyChange() error
      +
      +	// getSessionID returns the session ID. This is only valid
      +	// after the first key change has completed.
      +	getSessionID() []byte
      +}
      +
      +// handshakeTransport implements rekeying on top of a keyingTransport
      +// and offers a thread-safe writePacket() interface.
      +type handshakeTransport struct {
      +	conn   keyingTransport
      +	config *Config
      +
      +	serverVersion []byte
      +	clientVersion []byte
      +
      +	// hostKeys is non-empty if we are the server. In that case,
      +	// it contains all host keys that can be used to sign the
      +	// connection.
      +	hostKeys []Signer
      +
      +	// hostKeyAlgorithms is non-empty if we are the client. In that case,
      +	// we accept these key types from the server as host key.
      +	hostKeyAlgorithms []string
      +
      +	// On read error, incoming is closed, and readError is set.
      +	incoming  chan []byte
      +	readError error
      +
      +	// data for host key checking
      +	hostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
      +	dialAddress     string
      +	remoteAddr      net.Addr
      +
      +	readSinceKex uint64
      +
      +	// Protects the writing side of the connection
      +	mu              sync.Mutex
      +	cond            *sync.Cond
      +	sentInitPacket  []byte
      +	sentInitMsg     *kexInitMsg
      +	writtenSinceKex uint64
      +	writeError      error
      +}
      +
      +func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport {
      +	t := &handshakeTransport{
      +		conn:          conn,
      +		serverVersion: serverVersion,
      +		clientVersion: clientVersion,
      +		incoming:      make(chan []byte, 16),
      +		config:        config,
      +	}
      +	t.cond = sync.NewCond(&t.mu)
      +	return t
      +}
      +
      +func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport {
      +	t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
      +	t.dialAddress = dialAddr
      +	t.remoteAddr = addr
      +	t.hostKeyCallback = config.HostKeyCallback
      +	if config.HostKeyAlgorithms != nil {
      +		t.hostKeyAlgorithms = config.HostKeyAlgorithms
      +	} else {
      +		t.hostKeyAlgorithms = supportedHostKeyAlgos
      +	}
      +	go t.readLoop()
      +	return t
      +}
      +
      +func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport {
      +	t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
      +	t.hostKeys = config.hostKeys
      +	go t.readLoop()
      +	return t
      +}
      +
      +func (t *handshakeTransport) getSessionID() []byte {
      +	return t.conn.getSessionID()
      +}
      +
      +func (t *handshakeTransport) id() string {
      +	if len(t.hostKeys) > 0 {
      +		return "server"
      +	}
      +	return "client"
      +}
      +
      +func (t *handshakeTransport) readPacket() ([]byte, error) {
      +	p, ok := <-t.incoming
      +	if !ok {
      +		return nil, t.readError
      +	}
      +	return p, nil
      +}
      +
      +func (t *handshakeTransport) readLoop() {
      +	for {
      +		p, err := t.readOnePacket()
      +		if err != nil {
      +			t.readError = err
      +			close(t.incoming)
      +			break
      +		}
      +		if p[0] == msgIgnore || p[0] == msgDebug {
      +			continue
      +		}
      +		t.incoming <- p
      +	}
      +
      +	// If we can't read, declare the writing part dead too.
      +	t.mu.Lock()
      +	defer t.mu.Unlock()
      +	if t.writeError == nil {
      +		t.writeError = t.readError
      +	}
      +	t.cond.Broadcast()
      +}
      +
      +func (t *handshakeTransport) readOnePacket() ([]byte, error) {
      +	if t.readSinceKex > t.config.RekeyThreshold {
      +		if err := t.requestKeyChange(); err != nil {
      +			return nil, err
      +		}
      +	}
      +
      +	p, err := t.conn.readPacket()
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	t.readSinceKex += uint64(len(p))
      +	if debugHandshake {
      +		msg, err := decode(p)
      +		log.Printf("%s got %T %v (%v)", t.id(), msg, msg, err)
      +	}
      +	if p[0] != msgKexInit {
      +		return p, nil
      +	}
      +	err = t.enterKeyExchange(p)
      +
      +	t.mu.Lock()
      +	if err != nil {
      +		// drop connection
      +		t.conn.Close()
      +		t.writeError = err
      +	}
      +
      +	if debugHandshake {
      +		log.Printf("%s exited key exchange, err %v", t.id(), err)
      +	}
      +
      +	// Unblock writers.
      +	t.sentInitMsg = nil
      +	t.sentInitPacket = nil
      +	t.cond.Broadcast()
      +	t.writtenSinceKex = 0
      +	t.mu.Unlock()
      +
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	t.readSinceKex = 0
      +	return []byte{msgNewKeys}, nil
      +}
      +
      +// sendKexInit sends a key change message, and returns the message
      +// that was sent. After initiating the key change, all writes will be
      +// blocked until the change is done, and a failed key change will
      +// close the underlying transport. This function is safe for
      +// concurrent use by multiple goroutines.
      +func (t *handshakeTransport) sendKexInit() (*kexInitMsg, []byte, error) {
      +	t.mu.Lock()
      +	defer t.mu.Unlock()
      +	return t.sendKexInitLocked()
      +}
      +
      +func (t *handshakeTransport) requestKeyChange() error {
      +	_, _, err := t.sendKexInit()
      +	return err
      +}
      +
      +// sendKexInitLocked sends a key change message. t.mu must be locked
      +// while this happens.
      +func (t *handshakeTransport) sendKexInitLocked() (*kexInitMsg, []byte, error) {
      +	// kexInits may be sent either in response to the other side,
      +	// or because our side wants to initiate a key change, so we
      +	// may have already sent a kexInit. In that case, don't send a
      +	// second kexInit.
      +	if t.sentInitMsg != nil {
      +		return t.sentInitMsg, t.sentInitPacket, nil
      +	}
      +	msg := &kexInitMsg{
      +		KexAlgos:                t.config.KeyExchanges,
      +		CiphersClientServer:     t.config.Ciphers,
      +		CiphersServerClient:     t.config.Ciphers,
      +		MACsClientServer:        t.config.MACs,
      +		MACsServerClient:        t.config.MACs,
      +		CompressionClientServer: supportedCompressions,
      +		CompressionServerClient: supportedCompressions,
      +	}
      +	io.ReadFull(rand.Reader, msg.Cookie[:])
      +
      +	if len(t.hostKeys) > 0 {
      +		for _, k := range t.hostKeys {
      +			msg.ServerHostKeyAlgos = append(
      +				msg.ServerHostKeyAlgos, k.PublicKey().Type())
      +		}
      +	} else {
      +		msg.ServerHostKeyAlgos = t.hostKeyAlgorithms
      +	}
      +	packet := Marshal(msg)
      +
      +	// writePacket destroys the contents, so save a copy.
      +	packetCopy := make([]byte, len(packet))
      +	copy(packetCopy, packet)
      +
      +	if err := t.conn.writePacket(packetCopy); err != nil {
      +		return nil, nil, err
      +	}
      +
      +	t.sentInitMsg = msg
      +	t.sentInitPacket = packet
      +	return msg, packet, nil
      +}
      +
      +func (t *handshakeTransport) writePacket(p []byte) error {
      +	t.mu.Lock()
      +	defer t.mu.Unlock()
      +
      +	if t.writtenSinceKex > t.config.RekeyThreshold {
      +		t.sendKexInitLocked()
      +	}
      +	for t.sentInitMsg != nil && t.writeError == nil {
      +		t.cond.Wait()
      +	}
      +	if t.writeError != nil {
      +		return t.writeError
      +	}
      +	t.writtenSinceKex += uint64(len(p))
      +
      +	switch p[0] {
      +	case msgKexInit:
      +		return errors.New("ssh: only handshakeTransport can send kexInit")
      +	case msgNewKeys:
      +		return errors.New("ssh: only handshakeTransport can send newKeys")
      +	default:
      +		return t.conn.writePacket(p)
      +	}
      +}
      +
      +func (t *handshakeTransport) Close() error {
      +	return t.conn.Close()
      +}
      +
      +// enterKeyExchange runs the key exchange.
      +func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
      +	if debugHandshake {
      +		log.Printf("%s entered key exchange", t.id())
      +	}
      +	myInit, myInitPacket, err := t.sendKexInit()
      +	if err != nil {
      +		return err
      +	}
      +
      +	otherInit := &kexInitMsg{}
      +	if err := Unmarshal(otherInitPacket, otherInit); err != nil {
      +		return err
      +	}
      +
      +	magics := handshakeMagics{
      +		clientVersion: t.clientVersion,
      +		serverVersion: t.serverVersion,
      +		clientKexInit: otherInitPacket,
      +		serverKexInit: myInitPacket,
      +	}
      +
      +	clientInit := otherInit
      +	serverInit := myInit
      +	if len(t.hostKeys) == 0 {
      +		clientInit = myInit
      +		serverInit = otherInit
      +
      +		magics.clientKexInit = myInitPacket
      +		magics.serverKexInit = otherInitPacket
      +	}
      +
      +	algs, err := findAgreedAlgorithms(clientInit, serverInit)
      +	if err != nil {
      +		return err
      +	}
      +
      +	// We don't send FirstKexFollows, but we handle receiving it.
      +	if otherInit.FirstKexFollows && algs.kex != otherInit.KexAlgos[0] {
      +		// other side sent a kex message for the wrong algorithm,
      +		// which we have to ignore.
      +		if _, err := t.conn.readPacket(); err != nil {
      +			return err
      +		}
      +	}
      +
      +	kex, ok := kexAlgoMap[algs.kex]
      +	if !ok {
      +		return fmt.Errorf("ssh: unexpected key exchange algorithm %v", algs.kex)
      +	}
      +
      +	var result *kexResult
      +	if len(t.hostKeys) > 0 {
      +		result, err = t.server(kex, algs, &magics)
      +	} else {
      +		result, err = t.client(kex, algs, &magics)
      +	}
      +
      +	if err != nil {
      +		return err
      +	}
      +
      +	t.conn.prepareKeyChange(algs, result)
      +	if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil {
      +		return err
      +	}
      +	if packet, err := t.conn.readPacket(); err != nil {
      +		return err
      +	} else if packet[0] != msgNewKeys {
      +		return unexpectedMessageError(msgNewKeys, packet[0])
      +	}
      +	return nil
      +}
      +
      +func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
      +	var hostKey Signer
      +	for _, k := range t.hostKeys {
      +		if algs.hostKey == k.PublicKey().Type() {
      +			hostKey = k
      +		}
      +	}
      +
      +	r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey)
      +	return r, err
      +}
      +
      +func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
      +	result, err := kex.Client(t.conn, t.config.Rand, magics)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	hostKey, err := ParsePublicKey(result.HostKey)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	if err := verifyHostKeySignature(hostKey, result); err != nil {
      +		return nil, err
      +	}
      +
      +	if t.hostKeyCallback != nil {
      +		err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey)
      +		if err != nil {
      +			return nil, err
      +		}
      +	}
      +
      +	return result, nil
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/handshake_test.go b/vendor/golang.org/x/crypto/ssh/handshake_test.go
      new file mode 100644
      index 00000000..b86d369c
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/handshake_test.go
      @@ -0,0 +1,415 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"bytes"
      +	"crypto/rand"
      +	"errors"
      +	"fmt"
      +	"net"
      +	"runtime"
      +	"strings"
      +	"sync"
      +	"testing"
      +)
      +
      +type testChecker struct {
      +	calls []string
      +}
      +
      +func (t *testChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error {
      +	if dialAddr == "bad" {
      +		return fmt.Errorf("dialAddr is bad")
      +	}
      +
      +	if tcpAddr, ok := addr.(*net.TCPAddr); !ok || tcpAddr == nil {
      +		return fmt.Errorf("testChecker: got %T want *net.TCPAddr", addr)
      +	}
      +
      +	t.calls = append(t.calls, fmt.Sprintf("%s %v %s %x", dialAddr, addr, key.Type(), key.Marshal()))
      +
      +	return nil
      +}
      +
      +// netPipe is analogous to net.Pipe, but it uses a real net.Conn, and
      +// therefore is buffered (net.Pipe deadlocks if both sides start with
      +// a write.)
      +func netPipe() (net.Conn, net.Conn, error) {
      +	listener, err := net.Listen("tcp", "127.0.0.1:0")
      +	if err != nil {
      +		return nil, nil, err
      +	}
      +	defer listener.Close()
      +	c1, err := net.Dial("tcp", listener.Addr().String())
      +	if err != nil {
      +		return nil, nil, err
      +	}
      +
      +	c2, err := listener.Accept()
      +	if err != nil {
      +		c1.Close()
      +		return nil, nil, err
      +	}
      +
      +	return c1, c2, nil
      +}
      +
      +func handshakePair(clientConf *ClientConfig, addr string) (client *handshakeTransport, server *handshakeTransport, err error) {
      +	a, b, err := netPipe()
      +	if err != nil {
      +		return nil, nil, err
      +	}
      +
      +	trC := newTransport(a, rand.Reader, true)
      +	trS := newTransport(b, rand.Reader, false)
      +	clientConf.SetDefaults()
      +
      +	v := []byte("version")
      +	client = newClientTransport(trC, v, v, clientConf, addr, a.RemoteAddr())
      +
      +	serverConf := &ServerConfig{}
      +	serverConf.AddHostKey(testSigners["ecdsa"])
      +	serverConf.AddHostKey(testSigners["rsa"])
      +	serverConf.SetDefaults()
      +	server = newServerTransport(trS, v, v, serverConf)
      +
      +	return client, server, nil
      +}
      +
      +func TestHandshakeBasic(t *testing.T) {
      +	if runtime.GOOS == "plan9" {
      +		t.Skip("see golang.org/issue/7237")
      +	}
      +	checker := &testChecker{}
      +	trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr")
      +	if err != nil {
      +		t.Fatalf("handshakePair: %v", err)
      +	}
      +
      +	defer trC.Close()
      +	defer trS.Close()
      +
      +	go func() {
      +		// Client writes a bunch of stuff, and does a key
      +		// change in the middle. This should not confuse the
      +		// handshake in progress
      +		for i := 0; i < 10; i++ {
      +			p := []byte{msgRequestSuccess, byte(i)}
      +			if err := trC.writePacket(p); err != nil {
      +				t.Fatalf("sendPacket: %v", err)
      +			}
      +			if i == 5 {
      +				// halfway through, we request a key change.
      +				_, _, err := trC.sendKexInit()
      +				if err != nil {
      +					t.Fatalf("sendKexInit: %v", err)
      +				}
      +			}
      +		}
      +		trC.Close()
      +	}()
      +
      +	// Server checks that client messages come in cleanly
      +	i := 0
      +	for {
      +		p, err := trS.readPacket()
      +		if err != nil {
      +			break
      +		}
      +		if p[0] == msgNewKeys {
      +			continue
      +		}
      +		want := []byte{msgRequestSuccess, byte(i)}
      +		if bytes.Compare(p, want) != 0 {
      +			t.Errorf("message %d: got %q, want %q", i, p, want)
      +		}
      +		i++
      +	}
      +	if i != 10 {
      +		t.Errorf("received %d messages, want 10.", i)
      +	}
      +
      +	// If all went well, we registered exactly 1 key change.
      +	if len(checker.calls) != 1 {
      +		t.Fatalf("got %d host key checks, want 1", len(checker.calls))
      +	}
      +
      +	pub := testSigners["ecdsa"].PublicKey()
      +	want := fmt.Sprintf("%s %v %s %x", "addr", trC.remoteAddr, pub.Type(), pub.Marshal())
      +	if want != checker.calls[0] {
      +		t.Errorf("got %q want %q for host key check", checker.calls[0], want)
      +	}
      +}
      +
      +func TestHandshakeError(t *testing.T) {
      +	checker := &testChecker{}
      +	trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "bad")
      +	if err != nil {
      +		t.Fatalf("handshakePair: %v", err)
      +	}
      +	defer trC.Close()
      +	defer trS.Close()
      +
      +	// send a packet
      +	packet := []byte{msgRequestSuccess, 42}
      +	if err := trC.writePacket(packet); err != nil {
      +		t.Errorf("writePacket: %v", err)
      +	}
      +
      +	// Now request a key change.
      +	_, _, err = trC.sendKexInit()
      +	if err != nil {
      +		t.Errorf("sendKexInit: %v", err)
      +	}
      +
      +	// the key change will fail, and afterwards we can't write.
      +	if err := trC.writePacket([]byte{msgRequestSuccess, 43}); err == nil {
      +		t.Errorf("writePacket after botched rekey succeeded.")
      +	}
      +
      +	readback, err := trS.readPacket()
      +	if err != nil {
      +		t.Fatalf("server closed too soon: %v", err)
      +	}
      +	if bytes.Compare(readback, packet) != 0 {
      +		t.Errorf("got %q want %q", readback, packet)
      +	}
      +	readback, err = trS.readPacket()
      +	if err == nil {
      +		t.Errorf("got a message %q after failed key change", readback)
      +	}
      +}
      +
      +func TestHandshakeTwice(t *testing.T) {
      +	checker := &testChecker{}
      +	trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr")
      +	if err != nil {
      +		t.Fatalf("handshakePair: %v", err)
      +	}
      +
      +	defer trC.Close()
      +	defer trS.Close()
      +
      +	// send a packet
      +	packet := make([]byte, 5)
      +	packet[0] = msgRequestSuccess
      +	if err := trC.writePacket(packet); err != nil {
      +		t.Errorf("writePacket: %v", err)
      +	}
      +
      +	// Now request a key change.
      +	_, _, err = trC.sendKexInit()
      +	if err != nil {
      +		t.Errorf("sendKexInit: %v", err)
      +	}
      +
      +	// Send another packet. Use a fresh one, since writePacket destroys.
      +	packet = make([]byte, 5)
      +	packet[0] = msgRequestSuccess
      +	if err := trC.writePacket(packet); err != nil {
      +		t.Errorf("writePacket: %v", err)
      +	}
      +
      +	// 2nd key change.
      +	_, _, err = trC.sendKexInit()
      +	if err != nil {
      +		t.Errorf("sendKexInit: %v", err)
      +	}
      +
      +	packet = make([]byte, 5)
      +	packet[0] = msgRequestSuccess
      +	if err := trC.writePacket(packet); err != nil {
      +		t.Errorf("writePacket: %v", err)
      +	}
      +
      +	packet = make([]byte, 5)
      +	packet[0] = msgRequestSuccess
      +	for i := 0; i < 5; i++ {
      +		msg, err := trS.readPacket()
      +		if err != nil {
      +			t.Fatalf("server closed too soon: %v", err)
      +		}
      +		if msg[0] == msgNewKeys {
      +			continue
      +		}
      +
      +		if bytes.Compare(msg, packet) != 0 {
      +			t.Errorf("packet %d: got %q want %q", i, msg, packet)
      +		}
      +	}
      +	if len(checker.calls) != 2 {
      +		t.Errorf("got %d key changes, want 2", len(checker.calls))
      +	}
      +}
      +
      +func TestHandshakeAutoRekeyWrite(t *testing.T) {
      +	checker := &testChecker{}
      +	clientConf := &ClientConfig{HostKeyCallback: checker.Check}
      +	clientConf.RekeyThreshold = 500
      +	trC, trS, err := handshakePair(clientConf, "addr")
      +	if err != nil {
      +		t.Fatalf("handshakePair: %v", err)
      +	}
      +	defer trC.Close()
      +	defer trS.Close()
      +
      +	for i := 0; i < 5; i++ {
      +		packet := make([]byte, 251)
      +		packet[0] = msgRequestSuccess
      +		if err := trC.writePacket(packet); err != nil {
      +			t.Errorf("writePacket: %v", err)
      +		}
      +	}
      +
      +	j := 0
      +	for ; j < 5; j++ {
      +		_, err := trS.readPacket()
      +		if err != nil {
      +			break
      +		}
      +	}
      +
      +	if j != 5 {
      +		t.Errorf("got %d, want 5 messages", j)
      +	}
      +
      +	if len(checker.calls) != 2 {
      +		t.Errorf("got %d key changes, wanted 2", len(checker.calls))
      +	}
      +}
      +
      +type syncChecker struct {
      +	called chan int
      +}
      +
      +func (t *syncChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error {
      +	t.called <- 1
      +	return nil
      +}
      +
      +func TestHandshakeAutoRekeyRead(t *testing.T) {
      +	sync := &syncChecker{make(chan int, 2)}
      +	clientConf := &ClientConfig{
      +		HostKeyCallback: sync.Check,
      +	}
      +	clientConf.RekeyThreshold = 500
      +
      +	trC, trS, err := handshakePair(clientConf, "addr")
      +	if err != nil {
      +		t.Fatalf("handshakePair: %v", err)
      +	}
      +	defer trC.Close()
      +	defer trS.Close()
      +
      +	packet := make([]byte, 501)
      +	packet[0] = msgRequestSuccess
      +	if err := trS.writePacket(packet); err != nil {
      +		t.Fatalf("writePacket: %v", err)
      +	}
      +	// While we read out the packet, a key change will be
      +	// initiated.
      +	if _, err := trC.readPacket(); err != nil {
      +		t.Fatalf("readPacket(client): %v", err)
      +	}
      +
      +	<-sync.called
      +}
      +
      +// errorKeyingTransport generates errors after a given number of
      +// read/write operations.
      +type errorKeyingTransport struct {
      +	packetConn
      +	readLeft, writeLeft int
      +}
      +
      +func (n *errorKeyingTransport) prepareKeyChange(*algorithms, *kexResult) error {
      +	return nil
      +}
      +func (n *errorKeyingTransport) getSessionID() []byte {
      +	return nil
      +}
      +
      +func (n *errorKeyingTransport) writePacket(packet []byte) error {
      +	if n.writeLeft == 0 {
      +		n.Close()
      +		return errors.New("barf")
      +	}
      +
      +	n.writeLeft--
      +	return n.packetConn.writePacket(packet)
      +}
      +
      +func (n *errorKeyingTransport) readPacket() ([]byte, error) {
      +	if n.readLeft == 0 {
      +		n.Close()
      +		return nil, errors.New("barf")
      +	}
      +
      +	n.readLeft--
      +	return n.packetConn.readPacket()
      +}
      +
      +func TestHandshakeErrorHandlingRead(t *testing.T) {
      +	for i := 0; i < 20; i++ {
      +		testHandshakeErrorHandlingN(t, i, -1)
      +	}
      +}
      +
      +func TestHandshakeErrorHandlingWrite(t *testing.T) {
      +	for i := 0; i < 20; i++ {
      +		testHandshakeErrorHandlingN(t, -1, i)
      +	}
      +}
      +
      +// testHandshakeErrorHandlingN runs handshakes, injecting errors. If
      +// handshakeTransport deadlocks, the go runtime will detect it and
      +// panic.
      +func testHandshakeErrorHandlingN(t *testing.T, readLimit, writeLimit int) {
      +	msg := Marshal(&serviceRequestMsg{strings.Repeat("x", int(minRekeyThreshold)/4)})
      +
      +	a, b := memPipe()
      +	defer a.Close()
      +	defer b.Close()
      +
      +	key := testSigners["ecdsa"]
      +	serverConf := Config{RekeyThreshold: minRekeyThreshold}
      +	serverConf.SetDefaults()
      +	serverConn := newHandshakeTransport(&errorKeyingTransport{a, readLimit, writeLimit}, &serverConf, []byte{'a'}, []byte{'b'})
      +	serverConn.hostKeys = []Signer{key}
      +	go serverConn.readLoop()
      +
      +	clientConf := Config{RekeyThreshold: 10 * minRekeyThreshold}
      +	clientConf.SetDefaults()
      +	clientConn := newHandshakeTransport(&errorKeyingTransport{b, -1, -1}, &clientConf, []byte{'a'}, []byte{'b'})
      +	clientConn.hostKeyAlgorithms = []string{key.PublicKey().Type()}
      +	go clientConn.readLoop()
      +
      +	var wg sync.WaitGroup
      +	wg.Add(4)
      +
      +	for _, hs := range []packetConn{serverConn, clientConn} {
      +		go func(c packetConn) {
      +			for {
      +				err := c.writePacket(msg)
      +				if err != nil {
      +					break
      +				}
      +			}
      +			wg.Done()
      +		}(hs)
      +		go func(c packetConn) {
      +			for {
      +				_, err := c.readPacket()
      +				if err != nil {
      +					break
      +				}
      +			}
      +			wg.Done()
      +		}(hs)
      +	}
      +
      +	wg.Wait()
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go
      new file mode 100644
      index 00000000..3ec603c0
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/kex.go
      @@ -0,0 +1,526 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"crypto"
      +	"crypto/ecdsa"
      +	"crypto/elliptic"
      +	"crypto/rand"
      +	"crypto/subtle"
      +	"errors"
      +	"io"
      +	"math/big"
      +
      +	"golang.org/x/crypto/curve25519"
      +)
      +
      +const (
      +	kexAlgoDH1SHA1          = "diffie-hellman-group1-sha1"
      +	kexAlgoDH14SHA1         = "diffie-hellman-group14-sha1"
      +	kexAlgoECDH256          = "ecdh-sha2-nistp256"
      +	kexAlgoECDH384          = "ecdh-sha2-nistp384"
      +	kexAlgoECDH521          = "ecdh-sha2-nistp521"
      +	kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org"
      +)
      +
      +// kexResult captures the outcome of a key exchange.
      +type kexResult struct {
      +	// Session hash. See also RFC 4253, section 8.
      +	H []byte
      +
      +	// Shared secret. See also RFC 4253, section 8.
      +	K []byte
      +
      +	// Host key as hashed into H.
      +	HostKey []byte
      +
      +	// Signature of H.
      +	Signature []byte
      +
      +	// A cryptographic hash function that matches the security
      +	// level of the key exchange algorithm. It is used for
      +	// calculating H, and for deriving keys from H and K.
      +	Hash crypto.Hash
      +
      +	// The session ID, which is the first H computed. This is used
      +	// to signal data inside transport.
      +	SessionID []byte
      +}
      +
      +// handshakeMagics contains data that is always included in the
      +// session hash.
      +type handshakeMagics struct {
      +	clientVersion, serverVersion []byte
      +	clientKexInit, serverKexInit []byte
      +}
      +
      +func (m *handshakeMagics) write(w io.Writer) {
      +	writeString(w, m.clientVersion)
      +	writeString(w, m.serverVersion)
      +	writeString(w, m.clientKexInit)
      +	writeString(w, m.serverKexInit)
      +}
      +
      +// kexAlgorithm abstracts different key exchange algorithms.
      +type kexAlgorithm interface {
      +	// Server runs server-side key agreement, signing the result
      +	// with a hostkey.
      +	Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error)
      +
      +	// Client runs the client-side key agreement. Caller is
      +	// responsible for verifying the host key signature.
      +	Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error)
      +}
      +
      +// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement.
      +type dhGroup struct {
      +	g, p *big.Int
      +}
      +
      +func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) {
      +	if theirPublic.Sign() <= 0 || theirPublic.Cmp(group.p) >= 0 {
      +		return nil, errors.New("ssh: DH parameter out of bounds")
      +	}
      +	return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil
      +}
      +
      +func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
      +	hashFunc := crypto.SHA1
      +
      +	x, err := rand.Int(randSource, group.p)
      +	if err != nil {
      +		return nil, err
      +	}
      +	X := new(big.Int).Exp(group.g, x, group.p)
      +	kexDHInit := kexDHInitMsg{
      +		X: X,
      +	}
      +	if err := c.writePacket(Marshal(&kexDHInit)); err != nil {
      +		return nil, err
      +	}
      +
      +	packet, err := c.readPacket()
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	var kexDHReply kexDHReplyMsg
      +	if err = Unmarshal(packet, &kexDHReply); err != nil {
      +		return nil, err
      +	}
      +
      +	kInt, err := group.diffieHellman(kexDHReply.Y, x)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	h := hashFunc.New()
      +	magics.write(h)
      +	writeString(h, kexDHReply.HostKey)
      +	writeInt(h, X)
      +	writeInt(h, kexDHReply.Y)
      +	K := make([]byte, intLength(kInt))
      +	marshalInt(K, kInt)
      +	h.Write(K)
      +
      +	return &kexResult{
      +		H:         h.Sum(nil),
      +		K:         K,
      +		HostKey:   kexDHReply.HostKey,
      +		Signature: kexDHReply.Signature,
      +		Hash:      crypto.SHA1,
      +	}, nil
      +}
      +
      +func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
      +	hashFunc := crypto.SHA1
      +	packet, err := c.readPacket()
      +	if err != nil {
      +		return
      +	}
      +	var kexDHInit kexDHInitMsg
      +	if err = Unmarshal(packet, &kexDHInit); err != nil {
      +		return
      +	}
      +
      +	y, err := rand.Int(randSource, group.p)
      +	if err != nil {
      +		return
      +	}
      +
      +	Y := new(big.Int).Exp(group.g, y, group.p)
      +	kInt, err := group.diffieHellman(kexDHInit.X, y)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	hostKeyBytes := priv.PublicKey().Marshal()
      +
      +	h := hashFunc.New()
      +	magics.write(h)
      +	writeString(h, hostKeyBytes)
      +	writeInt(h, kexDHInit.X)
      +	writeInt(h, Y)
      +
      +	K := make([]byte, intLength(kInt))
      +	marshalInt(K, kInt)
      +	h.Write(K)
      +
      +	H := h.Sum(nil)
      +
      +	// H is already a hash, but the hostkey signing will apply its
      +	// own key-specific hash algorithm.
      +	sig, err := signAndMarshal(priv, randSource, H)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	kexDHReply := kexDHReplyMsg{
      +		HostKey:   hostKeyBytes,
      +		Y:         Y,
      +		Signature: sig,
      +	}
      +	packet = Marshal(&kexDHReply)
      +
      +	err = c.writePacket(packet)
      +	return &kexResult{
      +		H:         H,
      +		K:         K,
      +		HostKey:   hostKeyBytes,
      +		Signature: sig,
      +		Hash:      crypto.SHA1,
      +	}, nil
      +}
      +
      +// ecdh performs Elliptic Curve Diffie-Hellman key exchange as
      +// described in RFC 5656, section 4.
      +type ecdh struct {
      +	curve elliptic.Curve
      +}
      +
      +func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
      +	ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	kexInit := kexECDHInitMsg{
      +		ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y),
      +	}
      +
      +	serialized := Marshal(&kexInit)
      +	if err := c.writePacket(serialized); err != nil {
      +		return nil, err
      +	}
      +
      +	packet, err := c.readPacket()
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	var reply kexECDHReplyMsg
      +	if err = Unmarshal(packet, &reply); err != nil {
      +		return nil, err
      +	}
      +
      +	x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	// generate shared secret
      +	secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes())
      +
      +	h := ecHash(kex.curve).New()
      +	magics.write(h)
      +	writeString(h, reply.HostKey)
      +	writeString(h, kexInit.ClientPubKey)
      +	writeString(h, reply.EphemeralPubKey)
      +	K := make([]byte, intLength(secret))
      +	marshalInt(K, secret)
      +	h.Write(K)
      +
      +	return &kexResult{
      +		H:         h.Sum(nil),
      +		K:         K,
      +		HostKey:   reply.HostKey,
      +		Signature: reply.Signature,
      +		Hash:      ecHash(kex.curve),
      +	}, nil
      +}
      +
      +// unmarshalECKey parses and checks an EC key.
      +func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) {
      +	x, y = elliptic.Unmarshal(curve, pubkey)
      +	if x == nil {
      +		return nil, nil, errors.New("ssh: elliptic.Unmarshal failure")
      +	}
      +	if !validateECPublicKey(curve, x, y) {
      +		return nil, nil, errors.New("ssh: public key not on curve")
      +	}
      +	return x, y, nil
      +}
      +
      +// validateECPublicKey checks that the point is a valid public key for
      +// the given curve. See [SEC1], 3.2.2
      +func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool {
      +	if x.Sign() == 0 && y.Sign() == 0 {
      +		return false
      +	}
      +
      +	if x.Cmp(curve.Params().P) >= 0 {
      +		return false
      +	}
      +
      +	if y.Cmp(curve.Params().P) >= 0 {
      +		return false
      +	}
      +
      +	if !curve.IsOnCurve(x, y) {
      +		return false
      +	}
      +
      +	// We don't check if N * PubKey == 0, since
      +	//
      +	// - the NIST curves have cofactor = 1, so this is implicit.
      +	// (We don't foresee an implementation that supports non NIST
      +	// curves)
      +	//
      +	// - for ephemeral keys, we don't need to worry about small
      +	// subgroup attacks.
      +	return true
      +}
      +
      +func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
      +	packet, err := c.readPacket()
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	var kexECDHInit kexECDHInitMsg
      +	if err = Unmarshal(packet, &kexECDHInit); err != nil {
      +		return nil, err
      +	}
      +
      +	clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	// We could cache this key across multiple users/multiple
      +	// connection attempts, but the benefit is small. OpenSSH
      +	// generates a new key for each incoming connection.
      +	ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	hostKeyBytes := priv.PublicKey().Marshal()
      +
      +	serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y)
      +
      +	// generate shared secret
      +	secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes())
      +
      +	h := ecHash(kex.curve).New()
      +	magics.write(h)
      +	writeString(h, hostKeyBytes)
      +	writeString(h, kexECDHInit.ClientPubKey)
      +	writeString(h, serializedEphKey)
      +
      +	K := make([]byte, intLength(secret))
      +	marshalInt(K, secret)
      +	h.Write(K)
      +
      +	H := h.Sum(nil)
      +
      +	// H is already a hash, but the hostkey signing will apply its
      +	// own key-specific hash algorithm.
      +	sig, err := signAndMarshal(priv, rand, H)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	reply := kexECDHReplyMsg{
      +		EphemeralPubKey: serializedEphKey,
      +		HostKey:         hostKeyBytes,
      +		Signature:       sig,
      +	}
      +
      +	serialized := Marshal(&reply)
      +	if err := c.writePacket(serialized); err != nil {
      +		return nil, err
      +	}
      +
      +	return &kexResult{
      +		H:         H,
      +		K:         K,
      +		HostKey:   reply.HostKey,
      +		Signature: sig,
      +		Hash:      ecHash(kex.curve),
      +	}, nil
      +}
      +
      +var kexAlgoMap = map[string]kexAlgorithm{}
      +
      +func init() {
      +	// This is the group called diffie-hellman-group1-sha1 in RFC
      +	// 4253 and Oakley Group 2 in RFC 2409.
      +	p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16)
      +	kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{
      +		g: new(big.Int).SetInt64(2),
      +		p: p,
      +	}
      +
      +	// This is the group called diffie-hellman-group14-sha1 in RFC
      +	// 4253 and Oakley Group 14 in RFC 3526.
      +	p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
      +
      +	kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{
      +		g: new(big.Int).SetInt64(2),
      +		p: p,
      +	}
      +
      +	kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()}
      +	kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()}
      +	kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()}
      +	kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{}
      +}
      +
      +// curve25519sha256 implements the curve25519-sha256@libssh.org key
      +// agreement protocol, as described in
      +// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt
      +type curve25519sha256 struct{}
      +
      +type curve25519KeyPair struct {
      +	priv [32]byte
      +	pub  [32]byte
      +}
      +
      +func (kp *curve25519KeyPair) generate(rand io.Reader) error {
      +	if _, err := io.ReadFull(rand, kp.priv[:]); err != nil {
      +		return err
      +	}
      +	curve25519.ScalarBaseMult(&kp.pub, &kp.priv)
      +	return nil
      +}
      +
      +// curve25519Zeros is just an array of 32 zero bytes so that we have something
      +// convenient to compare against in order to reject curve25519 points with the
      +// wrong order.
      +var curve25519Zeros [32]byte
      +
      +func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
      +	var kp curve25519KeyPair
      +	if err := kp.generate(rand); err != nil {
      +		return nil, err
      +	}
      +	if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil {
      +		return nil, err
      +	}
      +
      +	packet, err := c.readPacket()
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	var reply kexECDHReplyMsg
      +	if err = Unmarshal(packet, &reply); err != nil {
      +		return nil, err
      +	}
      +	if len(reply.EphemeralPubKey) != 32 {
      +		return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
      +	}
      +
      +	var servPub, secret [32]byte
      +	copy(servPub[:], reply.EphemeralPubKey)
      +	curve25519.ScalarMult(&secret, &kp.priv, &servPub)
      +	if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
      +		return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
      +	}
      +
      +	h := crypto.SHA256.New()
      +	magics.write(h)
      +	writeString(h, reply.HostKey)
      +	writeString(h, kp.pub[:])
      +	writeString(h, reply.EphemeralPubKey)
      +
      +	kInt := new(big.Int).SetBytes(secret[:])
      +	K := make([]byte, intLength(kInt))
      +	marshalInt(K, kInt)
      +	h.Write(K)
      +
      +	return &kexResult{
      +		H:         h.Sum(nil),
      +		K:         K,
      +		HostKey:   reply.HostKey,
      +		Signature: reply.Signature,
      +		Hash:      crypto.SHA256,
      +	}, nil
      +}
      +
      +func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
      +	packet, err := c.readPacket()
      +	if err != nil {
      +		return
      +	}
      +	var kexInit kexECDHInitMsg
      +	if err = Unmarshal(packet, &kexInit); err != nil {
      +		return
      +	}
      +
      +	if len(kexInit.ClientPubKey) != 32 {
      +		return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
      +	}
      +
      +	var kp curve25519KeyPair
      +	if err := kp.generate(rand); err != nil {
      +		return nil, err
      +	}
      +
      +	var clientPub, secret [32]byte
      +	copy(clientPub[:], kexInit.ClientPubKey)
      +	curve25519.ScalarMult(&secret, &kp.priv, &clientPub)
      +	if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
      +		return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
      +	}
      +
      +	hostKeyBytes := priv.PublicKey().Marshal()
      +
      +	h := crypto.SHA256.New()
      +	magics.write(h)
      +	writeString(h, hostKeyBytes)
      +	writeString(h, kexInit.ClientPubKey)
      +	writeString(h, kp.pub[:])
      +
      +	kInt := new(big.Int).SetBytes(secret[:])
      +	K := make([]byte, intLength(kInt))
      +	marshalInt(K, kInt)
      +	h.Write(K)
      +
      +	H := h.Sum(nil)
      +
      +	sig, err := signAndMarshal(priv, rand, H)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	reply := kexECDHReplyMsg{
      +		EphemeralPubKey: kp.pub[:],
      +		HostKey:         hostKeyBytes,
      +		Signature:       sig,
      +	}
      +	if err := c.writePacket(Marshal(&reply)); err != nil {
      +		return nil, err
      +	}
      +	return &kexResult{
      +		H:         H,
      +		K:         K,
      +		HostKey:   hostKeyBytes,
      +		Signature: sig,
      +		Hash:      crypto.SHA256,
      +	}, nil
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/kex_test.go b/vendor/golang.org/x/crypto/ssh/kex_test.go
      new file mode 100644
      index 00000000..12ca0acd
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/kex_test.go
      @@ -0,0 +1,50 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +// Key exchange tests.
      +
      +import (
      +	"crypto/rand"
      +	"reflect"
      +	"testing"
      +)
      +
      +func TestKexes(t *testing.T) {
      +	type kexResultErr struct {
      +		result *kexResult
      +		err    error
      +	}
      +
      +	for name, kex := range kexAlgoMap {
      +		a, b := memPipe()
      +
      +		s := make(chan kexResultErr, 1)
      +		c := make(chan kexResultErr, 1)
      +		var magics handshakeMagics
      +		go func() {
      +			r, e := kex.Client(a, rand.Reader, &magics)
      +			a.Close()
      +			c <- kexResultErr{r, e}
      +		}()
      +		go func() {
      +			r, e := kex.Server(b, rand.Reader, &magics, testSigners["ecdsa"])
      +			b.Close()
      +			s <- kexResultErr{r, e}
      +		}()
      +
      +		clientRes := <-c
      +		serverRes := <-s
      +		if clientRes.err != nil {
      +			t.Errorf("client: %v", clientRes.err)
      +		}
      +		if serverRes.err != nil {
      +			t.Errorf("server: %v", serverRes.err)
      +		}
      +		if !reflect.DeepEqual(clientRes.result, serverRes.result) {
      +			t.Errorf("kex %q: mismatch %#v, %#v", name, clientRes.result, serverRes.result)
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go
      new file mode 100644
      index 00000000..cfc970b2
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/keys.go
      @@ -0,0 +1,720 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"bytes"
      +	"crypto"
      +	"crypto/dsa"
      +	"crypto/ecdsa"
      +	"crypto/elliptic"
      +	"crypto/rsa"
      +	"crypto/x509"
      +	"encoding/asn1"
      +	"encoding/base64"
      +	"encoding/pem"
      +	"errors"
      +	"fmt"
      +	"io"
      +	"math/big"
      +	"strings"
      +)
      +
      +// These constants represent the algorithm names for key types supported by this
      +// package.
      +const (
      +	KeyAlgoRSA      = "ssh-rsa"
      +	KeyAlgoDSA      = "ssh-dss"
      +	KeyAlgoECDSA256 = "ecdsa-sha2-nistp256"
      +	KeyAlgoECDSA384 = "ecdsa-sha2-nistp384"
      +	KeyAlgoECDSA521 = "ecdsa-sha2-nistp521"
      +)
      +
      +// parsePubKey parses a public key of the given algorithm.
      +// Use ParsePublicKey for keys with prepended algorithm.
      +func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) {
      +	switch algo {
      +	case KeyAlgoRSA:
      +		return parseRSA(in)
      +	case KeyAlgoDSA:
      +		return parseDSA(in)
      +	case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521:
      +		return parseECDSA(in)
      +	case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01:
      +		cert, err := parseCert(in, certToPrivAlgo(algo))
      +		if err != nil {
      +			return nil, nil, err
      +		}
      +		return cert, nil, nil
      +	}
      +	return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", err)
      +}
      +
      +// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format
      +// (see sshd(8) manual page) once the options and key type fields have been
      +// removed.
      +func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) {
      +	in = bytes.TrimSpace(in)
      +
      +	i := bytes.IndexAny(in, " \t")
      +	if i == -1 {
      +		i = len(in)
      +	}
      +	base64Key := in[:i]
      +
      +	key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key)))
      +	n, err := base64.StdEncoding.Decode(key, base64Key)
      +	if err != nil {
      +		return nil, "", err
      +	}
      +	key = key[:n]
      +	out, err = ParsePublicKey(key)
      +	if err != nil {
      +		return nil, "", err
      +	}
      +	comment = string(bytes.TrimSpace(in[i:]))
      +	return out, comment, nil
      +}
      +
      +// ParseKnownHosts parses an entry in the format of the known_hosts file.
      +//
      +// The known_hosts format is documented in the sshd(8) manual page. This
      +// function will parse a single entry from in. On successful return, marker
      +// will contain the optional marker value (i.e. "cert-authority" or "revoked")
      +// or else be empty, hosts will contain the hosts that this entry matches,
      +// pubKey will contain the public key and comment will contain any trailing
      +// comment at the end of the line. See the sshd(8) manual page for the various
      +// forms that a host string can take.
      +//
      +// The unparsed remainder of the input will be returned in rest. This function
      +// can be called repeatedly to parse multiple entries.
      +//
      +// If no entries were found in the input then err will be io.EOF. Otherwise a
      +// non-nil err value indicates a parse error.
      +func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) {
      +	for len(in) > 0 {
      +		end := bytes.IndexByte(in, '\n')
      +		if end != -1 {
      +			rest = in[end+1:]
      +			in = in[:end]
      +		} else {
      +			rest = nil
      +		}
      +
      +		end = bytes.IndexByte(in, '\r')
      +		if end != -1 {
      +			in = in[:end]
      +		}
      +
      +		in = bytes.TrimSpace(in)
      +		if len(in) == 0 || in[0] == '#' {
      +			in = rest
      +			continue
      +		}
      +
      +		i := bytes.IndexAny(in, " \t")
      +		if i == -1 {
      +			in = rest
      +			continue
      +		}
      +
      +		// Strip out the begining of the known_host key.
      +		// This is either an optional marker or a (set of) hostname(s).
      +		keyFields := bytes.Fields(in)
      +		if len(keyFields) < 3 || len(keyFields) > 5 {
      +			return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data")
      +		}
      +
      +		// keyFields[0] is either "@cert-authority", "@revoked" or a comma separated
      +		// list of hosts
      +		marker := ""
      +		if keyFields[0][0] == '@' {
      +			marker = string(keyFields[0][1:])
      +			keyFields = keyFields[1:]
      +		}
      +
      +		hosts := string(keyFields[0])
      +		// keyFields[1] contains the key type (e.g. “ssh-rsa”).
      +		// However, that information is duplicated inside the
      +		// base64-encoded key and so is ignored here.
      +
      +		key := bytes.Join(keyFields[2:], []byte(" "))
      +		if pubKey, comment, err = parseAuthorizedKey(key); err != nil {
      +			return "", nil, nil, "", nil, err
      +		}
      +
      +		return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil
      +	}
      +
      +	return "", nil, nil, "", nil, io.EOF
      +}
      +
      +// ParseAuthorizedKeys parses a public key from an authorized_keys
      +// file used in OpenSSH according to the sshd(8) manual page.
      +func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) {
      +	for len(in) > 0 {
      +		end := bytes.IndexByte(in, '\n')
      +		if end != -1 {
      +			rest = in[end+1:]
      +			in = in[:end]
      +		} else {
      +			rest = nil
      +		}
      +
      +		end = bytes.IndexByte(in, '\r')
      +		if end != -1 {
      +			in = in[:end]
      +		}
      +
      +		in = bytes.TrimSpace(in)
      +		if len(in) == 0 || in[0] == '#' {
      +			in = rest
      +			continue
      +		}
      +
      +		i := bytes.IndexAny(in, " \t")
      +		if i == -1 {
      +			in = rest
      +			continue
      +		}
      +
      +		if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
      +			return out, comment, options, rest, nil
      +		}
      +
      +		// No key type recognised. Maybe there's an options field at
      +		// the beginning.
      +		var b byte
      +		inQuote := false
      +		var candidateOptions []string
      +		optionStart := 0
      +		for i, b = range in {
      +			isEnd := !inQuote && (b == ' ' || b == '\t')
      +			if (b == ',' && !inQuote) || isEnd {
      +				if i-optionStart > 0 {
      +					candidateOptions = append(candidateOptions, string(in[optionStart:i]))
      +				}
      +				optionStart = i + 1
      +			}
      +			if isEnd {
      +				break
      +			}
      +			if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) {
      +				inQuote = !inQuote
      +			}
      +		}
      +		for i < len(in) && (in[i] == ' ' || in[i] == '\t') {
      +			i++
      +		}
      +		if i == len(in) {
      +			// Invalid line: unmatched quote
      +			in = rest
      +			continue
      +		}
      +
      +		in = in[i:]
      +		i = bytes.IndexAny(in, " \t")
      +		if i == -1 {
      +			in = rest
      +			continue
      +		}
      +
      +		if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
      +			options = candidateOptions
      +			return out, comment, options, rest, nil
      +		}
      +
      +		in = rest
      +		continue
      +	}
      +
      +	return nil, "", nil, nil, errors.New("ssh: no key found")
      +}
      +
      +// ParsePublicKey parses an SSH public key formatted for use in
      +// the SSH wire protocol according to RFC 4253, section 6.6.
      +func ParsePublicKey(in []byte) (out PublicKey, err error) {
      +	algo, in, ok := parseString(in)
      +	if !ok {
      +		return nil, errShortRead
      +	}
      +	var rest []byte
      +	out, rest, err = parsePubKey(in, string(algo))
      +	if len(rest) > 0 {
      +		return nil, errors.New("ssh: trailing junk in public key")
      +	}
      +
      +	return out, err
      +}
      +
      +// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH
      +// authorized_keys file. The return value ends with newline.
      +func MarshalAuthorizedKey(key PublicKey) []byte {
      +	b := &bytes.Buffer{}
      +	b.WriteString(key.Type())
      +	b.WriteByte(' ')
      +	e := base64.NewEncoder(base64.StdEncoding, b)
      +	e.Write(key.Marshal())
      +	e.Close()
      +	b.WriteByte('\n')
      +	return b.Bytes()
      +}
      +
      +// PublicKey is an abstraction of different types of public keys.
      +type PublicKey interface {
      +	// Type returns the key's type, e.g. "ssh-rsa".
      +	Type() string
      +
      +	// Marshal returns the serialized key data in SSH wire format,
      +	// with the name prefix.
      +	Marshal() []byte
      +
      +	// Verify that sig is a signature on the given data using this
      +	// key. This function will hash the data appropriately first.
      +	Verify(data []byte, sig *Signature) error
      +}
      +
      +// A Signer can create signatures that verify against a public key.
      +type Signer interface {
      +	// PublicKey returns an associated PublicKey instance.
      +	PublicKey() PublicKey
      +
      +	// Sign returns raw signature for the given data. This method
      +	// will apply the hash specified for the keytype to the data.
      +	Sign(rand io.Reader, data []byte) (*Signature, error)
      +}
      +
      +type rsaPublicKey rsa.PublicKey
      +
      +func (r *rsaPublicKey) Type() string {
      +	return "ssh-rsa"
      +}
      +
      +// parseRSA parses an RSA key according to RFC 4253, section 6.6.
      +func parseRSA(in []byte) (out PublicKey, rest []byte, err error) {
      +	var w struct {
      +		E    *big.Int
      +		N    *big.Int
      +		Rest []byte `ssh:"rest"`
      +	}
      +	if err := Unmarshal(in, &w); err != nil {
      +		return nil, nil, err
      +	}
      +
      +	if w.E.BitLen() > 24 {
      +		return nil, nil, errors.New("ssh: exponent too large")
      +	}
      +	e := w.E.Int64()
      +	if e < 3 || e&1 == 0 {
      +		return nil, nil, errors.New("ssh: incorrect exponent")
      +	}
      +
      +	var key rsa.PublicKey
      +	key.E = int(e)
      +	key.N = w.N
      +	return (*rsaPublicKey)(&key), w.Rest, nil
      +}
      +
      +func (r *rsaPublicKey) Marshal() []byte {
      +	e := new(big.Int).SetInt64(int64(r.E))
      +	wirekey := struct {
      +		Name string
      +		E    *big.Int
      +		N    *big.Int
      +	}{
      +		KeyAlgoRSA,
      +		e,
      +		r.N,
      +	}
      +	return Marshal(&wirekey)
      +}
      +
      +func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
      +	if sig.Format != r.Type() {
      +		return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type())
      +	}
      +	h := crypto.SHA1.New()
      +	h.Write(data)
      +	digest := h.Sum(nil)
      +	return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob)
      +}
      +
      +type dsaPublicKey dsa.PublicKey
      +
      +func (r *dsaPublicKey) Type() string {
      +	return "ssh-dss"
      +}
      +
      +// parseDSA parses an DSA key according to RFC 4253, section 6.6.
      +func parseDSA(in []byte) (out PublicKey, rest []byte, err error) {
      +	var w struct {
      +		P, Q, G, Y *big.Int
      +		Rest       []byte `ssh:"rest"`
      +	}
      +	if err := Unmarshal(in, &w); err != nil {
      +		return nil, nil, err
      +	}
      +
      +	key := &dsaPublicKey{
      +		Parameters: dsa.Parameters{
      +			P: w.P,
      +			Q: w.Q,
      +			G: w.G,
      +		},
      +		Y: w.Y,
      +	}
      +	return key, w.Rest, nil
      +}
      +
      +func (k *dsaPublicKey) Marshal() []byte {
      +	w := struct {
      +		Name       string
      +		P, Q, G, Y *big.Int
      +	}{
      +		k.Type(),
      +		k.P,
      +		k.Q,
      +		k.G,
      +		k.Y,
      +	}
      +
      +	return Marshal(&w)
      +}
      +
      +func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error {
      +	if sig.Format != k.Type() {
      +		return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
      +	}
      +	h := crypto.SHA1.New()
      +	h.Write(data)
      +	digest := h.Sum(nil)
      +
      +	// Per RFC 4253, section 6.6,
      +	// The value for 'dss_signature_blob' is encoded as a string containing
      +	// r, followed by s (which are 160-bit integers, without lengths or
      +	// padding, unsigned, and in network byte order).
      +	// For DSS purposes, sig.Blob should be exactly 40 bytes in length.
      +	if len(sig.Blob) != 40 {
      +		return errors.New("ssh: DSA signature parse error")
      +	}
      +	r := new(big.Int).SetBytes(sig.Blob[:20])
      +	s := new(big.Int).SetBytes(sig.Blob[20:])
      +	if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) {
      +		return nil
      +	}
      +	return errors.New("ssh: signature did not verify")
      +}
      +
      +type dsaPrivateKey struct {
      +	*dsa.PrivateKey
      +}
      +
      +func (k *dsaPrivateKey) PublicKey() PublicKey {
      +	return (*dsaPublicKey)(&k.PrivateKey.PublicKey)
      +}
      +
      +func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) {
      +	h := crypto.SHA1.New()
      +	h.Write(data)
      +	digest := h.Sum(nil)
      +	r, s, err := dsa.Sign(rand, k.PrivateKey, digest)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	sig := make([]byte, 40)
      +	rb := r.Bytes()
      +	sb := s.Bytes()
      +
      +	copy(sig[20-len(rb):20], rb)
      +	copy(sig[40-len(sb):], sb)
      +
      +	return &Signature{
      +		Format: k.PublicKey().Type(),
      +		Blob:   sig,
      +	}, nil
      +}
      +
      +type ecdsaPublicKey ecdsa.PublicKey
      +
      +func (key *ecdsaPublicKey) Type() string {
      +	return "ecdsa-sha2-" + key.nistID()
      +}
      +
      +func (key *ecdsaPublicKey) nistID() string {
      +	switch key.Params().BitSize {
      +	case 256:
      +		return "nistp256"
      +	case 384:
      +		return "nistp384"
      +	case 521:
      +		return "nistp521"
      +	}
      +	panic("ssh: unsupported ecdsa key size")
      +}
      +
      +func supportedEllipticCurve(curve elliptic.Curve) bool {
      +	return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521()
      +}
      +
      +// ecHash returns the hash to match the given elliptic curve, see RFC
      +// 5656, section 6.2.1
      +func ecHash(curve elliptic.Curve) crypto.Hash {
      +	bitSize := curve.Params().BitSize
      +	switch {
      +	case bitSize <= 256:
      +		return crypto.SHA256
      +	case bitSize <= 384:
      +		return crypto.SHA384
      +	}
      +	return crypto.SHA512
      +}
      +
      +// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1.
      +func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) {
      +	var w struct {
      +		Curve    string
      +		KeyBytes []byte
      +		Rest     []byte `ssh:"rest"`
      +	}
      +
      +	if err := Unmarshal(in, &w); err != nil {
      +		return nil, nil, err
      +	}
      +
      +	key := new(ecdsa.PublicKey)
      +
      +	switch w.Curve {
      +	case "nistp256":
      +		key.Curve = elliptic.P256()
      +	case "nistp384":
      +		key.Curve = elliptic.P384()
      +	case "nistp521":
      +		key.Curve = elliptic.P521()
      +	default:
      +		return nil, nil, errors.New("ssh: unsupported curve")
      +	}
      +
      +	key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes)
      +	if key.X == nil || key.Y == nil {
      +		return nil, nil, errors.New("ssh: invalid curve point")
      +	}
      +	return (*ecdsaPublicKey)(key), w.Rest, nil
      +}
      +
      +func (key *ecdsaPublicKey) Marshal() []byte {
      +	// See RFC 5656, section 3.1.
      +	keyBytes := elliptic.Marshal(key.Curve, key.X, key.Y)
      +	w := struct {
      +		Name string
      +		ID   string
      +		Key  []byte
      +	}{
      +		key.Type(),
      +		key.nistID(),
      +		keyBytes,
      +	}
      +
      +	return Marshal(&w)
      +}
      +
      +func (key *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
      +	if sig.Format != key.Type() {
      +		return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type())
      +	}
      +
      +	h := ecHash(key.Curve).New()
      +	h.Write(data)
      +	digest := h.Sum(nil)
      +
      +	// Per RFC 5656, section 3.1.2,
      +	// The ecdsa_signature_blob value has the following specific encoding:
      +	//    mpint    r
      +	//    mpint    s
      +	var ecSig struct {
      +		R *big.Int
      +		S *big.Int
      +	}
      +
      +	if err := Unmarshal(sig.Blob, &ecSig); err != nil {
      +		return err
      +	}
      +
      +	if ecdsa.Verify((*ecdsa.PublicKey)(key), digest, ecSig.R, ecSig.S) {
      +		return nil
      +	}
      +	return errors.New("ssh: signature did not verify")
      +}
      +
      +// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey,
      +// *ecdsa.PrivateKey or any other crypto.Signer and returns a corresponding
      +// Signer instance. ECDSA keys must use P-256, P-384 or P-521.
      +func NewSignerFromKey(key interface{}) (Signer, error) {
      +	switch key := key.(type) {
      +	case crypto.Signer:
      +		return NewSignerFromSigner(key)
      +	case *dsa.PrivateKey:
      +		return &dsaPrivateKey{key}, nil
      +	default:
      +		return nil, fmt.Errorf("ssh: unsupported key type %T", key)
      +	}
      +}
      +
      +type wrappedSigner struct {
      +	signer crypto.Signer
      +	pubKey PublicKey
      +}
      +
      +// NewSignerFromSigner takes any crypto.Signer implementation and
      +// returns a corresponding Signer interface. This can be used, for
      +// example, with keys kept in hardware modules.
      +func NewSignerFromSigner(signer crypto.Signer) (Signer, error) {
      +	pubKey, err := NewPublicKey(signer.Public())
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	return &wrappedSigner{signer, pubKey}, nil
      +}
      +
      +func (s *wrappedSigner) PublicKey() PublicKey {
      +	return s.pubKey
      +}
      +
      +func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
      +	var hashFunc crypto.Hash
      +
      +	switch key := s.pubKey.(type) {
      +	case *rsaPublicKey, *dsaPublicKey:
      +		hashFunc = crypto.SHA1
      +	case *ecdsaPublicKey:
      +		hashFunc = ecHash(key.Curve)
      +	default:
      +		return nil, fmt.Errorf("ssh: unsupported key type %T", key)
      +	}
      +
      +	h := hashFunc.New()
      +	h.Write(data)
      +	digest := h.Sum(nil)
      +
      +	signature, err := s.signer.Sign(rand, digest, hashFunc)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	// crypto.Signer.Sign is expected to return an ASN.1-encoded signature
      +	// for ECDSA and DSA, but that's not the encoding expected by SSH, so
      +	// re-encode.
      +	switch s.pubKey.(type) {
      +	case *ecdsaPublicKey, *dsaPublicKey:
      +		type asn1Signature struct {
      +			R, S *big.Int
      +		}
      +		asn1Sig := new(asn1Signature)
      +		_, err := asn1.Unmarshal(signature, asn1Sig)
      +		if err != nil {
      +			return nil, err
      +		}
      +
      +		switch s.pubKey.(type) {
      +		case *ecdsaPublicKey:
      +			signature = Marshal(asn1Sig)
      +
      +		case *dsaPublicKey:
      +			signature = make([]byte, 40)
      +			r := asn1Sig.R.Bytes()
      +			s := asn1Sig.S.Bytes()
      +			copy(signature[20-len(r):20], r)
      +			copy(signature[40-len(s):40], s)
      +		}
      +	}
      +
      +	return &Signature{
      +		Format: s.pubKey.Type(),
      +		Blob:   signature,
      +	}, nil
      +}
      +
      +// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or
      +// any other crypto.Signer and returns a corresponding Signer instance. ECDSA
      +// keys must use P-256, P-384 or P-521.
      +func NewPublicKey(key interface{}) (PublicKey, error) {
      +	switch key := key.(type) {
      +	case *rsa.PublicKey:
      +		return (*rsaPublicKey)(key), nil
      +	case *ecdsa.PublicKey:
      +		if !supportedEllipticCurve(key.Curve) {
      +			return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported.")
      +		}
      +		return (*ecdsaPublicKey)(key), nil
      +	case *dsa.PublicKey:
      +		return (*dsaPublicKey)(key), nil
      +	default:
      +		return nil, fmt.Errorf("ssh: unsupported key type %T", key)
      +	}
      +}
      +
      +// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports
      +// the same keys as ParseRawPrivateKey.
      +func ParsePrivateKey(pemBytes []byte) (Signer, error) {
      +	key, err := ParseRawPrivateKey(pemBytes)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	return NewSignerFromKey(key)
      +}
      +
      +// ParseRawPrivateKey returns a private key from a PEM encoded private key. It
      +// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys.
      +func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
      +	block, _ := pem.Decode(pemBytes)
      +	if block == nil {
      +		return nil, errors.New("ssh: no key found")
      +	}
      +
      +	switch block.Type {
      +	case "RSA PRIVATE KEY":
      +		return x509.ParsePKCS1PrivateKey(block.Bytes)
      +	case "EC PRIVATE KEY":
      +		return x509.ParseECPrivateKey(block.Bytes)
      +	case "DSA PRIVATE KEY":
      +		return ParseDSAPrivateKey(block.Bytes)
      +	default:
      +		return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type)
      +	}
      +}
      +
      +// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as
      +// specified by the OpenSSL DSA man page.
      +func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) {
      +	var k struct {
      +		Version int
      +		P       *big.Int
      +		Q       *big.Int
      +		G       *big.Int
      +		Priv    *big.Int
      +		Pub     *big.Int
      +	}
      +	rest, err := asn1.Unmarshal(der, &k)
      +	if err != nil {
      +		return nil, errors.New("ssh: failed to parse DSA key: " + err.Error())
      +	}
      +	if len(rest) > 0 {
      +		return nil, errors.New("ssh: garbage after DSA key")
      +	}
      +
      +	return &dsa.PrivateKey{
      +		PublicKey: dsa.PublicKey{
      +			Parameters: dsa.Parameters{
      +				P: k.P,
      +				Q: k.Q,
      +				G: k.G,
      +			},
      +			Y: k.Priv,
      +		},
      +		X: k.Pub,
      +	}, nil
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/keys_test.go b/vendor/golang.org/x/crypto/ssh/keys_test.go
      new file mode 100644
      index 00000000..27569473
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/keys_test.go
      @@ -0,0 +1,437 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"bytes"
      +	"crypto/dsa"
      +	"crypto/ecdsa"
      +	"crypto/elliptic"
      +	"crypto/rand"
      +	"crypto/rsa"
      +	"encoding/base64"
      +	"fmt"
      +	"reflect"
      +	"strings"
      +	"testing"
      +
      +	"golang.org/x/crypto/ssh/testdata"
      +)
      +
      +func rawKey(pub PublicKey) interface{} {
      +	switch k := pub.(type) {
      +	case *rsaPublicKey:
      +		return (*rsa.PublicKey)(k)
      +	case *dsaPublicKey:
      +		return (*dsa.PublicKey)(k)
      +	case *ecdsaPublicKey:
      +		return (*ecdsa.PublicKey)(k)
      +	case *Certificate:
      +		return k
      +	}
      +	panic("unknown key type")
      +}
      +
      +func TestKeyMarshalParse(t *testing.T) {
      +	for _, priv := range testSigners {
      +		pub := priv.PublicKey()
      +		roundtrip, err := ParsePublicKey(pub.Marshal())
      +		if err != nil {
      +			t.Errorf("ParsePublicKey(%T): %v", pub, err)
      +		}
      +
      +		k1 := rawKey(pub)
      +		k2 := rawKey(roundtrip)
      +
      +		if !reflect.DeepEqual(k1, k2) {
      +			t.Errorf("got %#v in roundtrip, want %#v", k2, k1)
      +		}
      +	}
      +}
      +
      +func TestUnsupportedCurves(t *testing.T) {
      +	raw, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader)
      +	if err != nil {
      +		t.Fatalf("GenerateKey: %v", err)
      +	}
      +
      +	if _, err = NewSignerFromKey(raw); err == nil || !strings.Contains(err.Error(), "only P-256") {
      +		t.Fatalf("NewPrivateKey should not succeed with P-224, got: %v", err)
      +	}
      +
      +	if _, err = NewPublicKey(&raw.PublicKey); err == nil || !strings.Contains(err.Error(), "only P-256") {
      +		t.Fatalf("NewPublicKey should not succeed with P-224, got: %v", err)
      +	}
      +}
      +
      +func TestNewPublicKey(t *testing.T) {
      +	for _, k := range testSigners {
      +		raw := rawKey(k.PublicKey())
      +		// Skip certificates, as NewPublicKey does not support them.
      +		if _, ok := raw.(*Certificate); ok {
      +			continue
      +		}
      +		pub, err := NewPublicKey(raw)
      +		if err != nil {
      +			t.Errorf("NewPublicKey(%#v): %v", raw, err)
      +		}
      +		if !reflect.DeepEqual(k.PublicKey(), pub) {
      +			t.Errorf("NewPublicKey(%#v) = %#v, want %#v", raw, pub, k.PublicKey())
      +		}
      +	}
      +}
      +
      +func TestKeySignVerify(t *testing.T) {
      +	for _, priv := range testSigners {
      +		pub := priv.PublicKey()
      +
      +		data := []byte("sign me")
      +		sig, err := priv.Sign(rand.Reader, data)
      +		if err != nil {
      +			t.Fatalf("Sign(%T): %v", priv, err)
      +		}
      +
      +		if err := pub.Verify(data, sig); err != nil {
      +			t.Errorf("publicKey.Verify(%T): %v", priv, err)
      +		}
      +		sig.Blob[5]++
      +		if err := pub.Verify(data, sig); err == nil {
      +			t.Errorf("publicKey.Verify on broken sig did not fail")
      +		}
      +	}
      +}
      +
      +func TestParseRSAPrivateKey(t *testing.T) {
      +	key := testPrivateKeys["rsa"]
      +
      +	rsa, ok := key.(*rsa.PrivateKey)
      +	if !ok {
      +		t.Fatalf("got %T, want *rsa.PrivateKey", rsa)
      +	}
      +
      +	if err := rsa.Validate(); err != nil {
      +		t.Errorf("Validate: %v", err)
      +	}
      +}
      +
      +func TestParseECPrivateKey(t *testing.T) {
      +	key := testPrivateKeys["ecdsa"]
      +
      +	ecKey, ok := key.(*ecdsa.PrivateKey)
      +	if !ok {
      +		t.Fatalf("got %T, want *ecdsa.PrivateKey", ecKey)
      +	}
      +
      +	if !validateECPublicKey(ecKey.Curve, ecKey.X, ecKey.Y) {
      +		t.Fatalf("public key does not validate.")
      +	}
      +}
      +
      +func TestParseDSA(t *testing.T) {
      +	// We actually exercise the ParsePrivateKey codepath here, as opposed to
      +	// using the ParseRawPrivateKey+NewSignerFromKey path that testdata_test.go
      +	// uses.
      +	s, err := ParsePrivateKey(testdata.PEMBytes["dsa"])
      +	if err != nil {
      +		t.Fatalf("ParsePrivateKey returned error: %s", err)
      +	}
      +
      +	data := []byte("sign me")
      +	sig, err := s.Sign(rand.Reader, data)
      +	if err != nil {
      +		t.Fatalf("dsa.Sign: %v", err)
      +	}
      +
      +	if err := s.PublicKey().Verify(data, sig); err != nil {
      +		t.Errorf("Verify failed: %v", err)
      +	}
      +}
      +
      +// Tests for authorized_keys parsing.
      +
      +// getTestKey returns a public key, and its base64 encoding.
      +func getTestKey() (PublicKey, string) {
      +	k := testPublicKeys["rsa"]
      +
      +	b := &bytes.Buffer{}
      +	e := base64.NewEncoder(base64.StdEncoding, b)
      +	e.Write(k.Marshal())
      +	e.Close()
      +
      +	return k, b.String()
      +}
      +
      +func TestMarshalParsePublicKey(t *testing.T) {
      +	pub, pubSerialized := getTestKey()
      +	line := fmt.Sprintf("%s %s user@host", pub.Type(), pubSerialized)
      +
      +	authKeys := MarshalAuthorizedKey(pub)
      +	actualFields := strings.Fields(string(authKeys))
      +	if len(actualFields) == 0 {
      +		t.Fatalf("failed authKeys: %v", authKeys)
      +	}
      +
      +	// drop the comment
      +	expectedFields := strings.Fields(line)[0:2]
      +
      +	if !reflect.DeepEqual(actualFields, expectedFields) {
      +		t.Errorf("got %v, expected %v", actualFields, expectedFields)
      +	}
      +
      +	actPub, _, _, _, err := ParseAuthorizedKey([]byte(line))
      +	if err != nil {
      +		t.Fatalf("cannot parse %v: %v", line, err)
      +	}
      +	if !reflect.DeepEqual(actPub, pub) {
      +		t.Errorf("got %v, expected %v", actPub, pub)
      +	}
      +}
      +
      +type authResult struct {
      +	pubKey   PublicKey
      +	options  []string
      +	comments string
      +	rest     string
      +	ok       bool
      +}
      +
      +func testAuthorizedKeys(t *testing.T, authKeys []byte, expected []authResult) {
      +	rest := authKeys
      +	var values []authResult
      +	for len(rest) > 0 {
      +		var r authResult
      +		var err error
      +		r.pubKey, r.comments, r.options, rest, err = ParseAuthorizedKey(rest)
      +		r.ok = (err == nil)
      +		t.Log(err)
      +		r.rest = string(rest)
      +		values = append(values, r)
      +	}
      +
      +	if !reflect.DeepEqual(values, expected) {
      +		t.Errorf("got %#v, expected %#v", values, expected)
      +	}
      +}
      +
      +func TestAuthorizedKeyBasic(t *testing.T) {
      +	pub, pubSerialized := getTestKey()
      +	line := "ssh-rsa " + pubSerialized + " user@host"
      +	testAuthorizedKeys(t, []byte(line),
      +		[]authResult{
      +			{pub, nil, "user@host", "", true},
      +		})
      +}
      +
      +func TestAuth(t *testing.T) {
      +	pub, pubSerialized := getTestKey()
      +	authWithOptions := []string{
      +		`# comments to ignore before any keys...`,
      +		``,
      +		`env="HOME=/home/root",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`,
      +		`# comments to ignore, along with a blank line`,
      +		``,
      +		`env="HOME=/home/root2" ssh-rsa ` + pubSerialized + ` user2@host2`,
      +		``,
      +		`# more comments, plus a invalid entry`,
      +		`ssh-rsa data-that-will-not-parse user@host3`,
      +	}
      +	for _, eol := range []string{"\n", "\r\n"} {
      +		authOptions := strings.Join(authWithOptions, eol)
      +		rest2 := strings.Join(authWithOptions[3:], eol)
      +		rest3 := strings.Join(authWithOptions[6:], eol)
      +		testAuthorizedKeys(t, []byte(authOptions), []authResult{
      +			{pub, []string{`env="HOME=/home/root"`, "no-port-forwarding"}, "user@host", rest2, true},
      +			{pub, []string{`env="HOME=/home/root2"`}, "user2@host2", rest3, true},
      +			{nil, nil, "", "", false},
      +		})
      +	}
      +}
      +
      +func TestAuthWithQuotedSpaceInEnv(t *testing.T) {
      +	pub, pubSerialized := getTestKey()
      +	authWithQuotedSpaceInEnv := []byte(`env="HOME=/home/root dir",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`)
      +	testAuthorizedKeys(t, []byte(authWithQuotedSpaceInEnv), []authResult{
      +		{pub, []string{`env="HOME=/home/root dir"`, "no-port-forwarding"}, "user@host", "", true},
      +	})
      +}
      +
      +func TestAuthWithQuotedCommaInEnv(t *testing.T) {
      +	pub, pubSerialized := getTestKey()
      +	authWithQuotedCommaInEnv := []byte(`env="HOME=/home/root,dir",no-port-forwarding ssh-rsa ` + pubSerialized + `   user@host`)
      +	testAuthorizedKeys(t, []byte(authWithQuotedCommaInEnv), []authResult{
      +		{pub, []string{`env="HOME=/home/root,dir"`, "no-port-forwarding"}, "user@host", "", true},
      +	})
      +}
      +
      +func TestAuthWithQuotedQuoteInEnv(t *testing.T) {
      +	pub, pubSerialized := getTestKey()
      +	authWithQuotedQuoteInEnv := []byte(`env="HOME=/home/\"root dir",no-port-forwarding` + "\t" + `ssh-rsa` + "\t" + pubSerialized + `   user@host`)
      +	authWithDoubleQuotedQuote := []byte(`no-port-forwarding,env="HOME=/home/ \"root dir\"" ssh-rsa ` + pubSerialized + "\t" + `user@host`)
      +	testAuthorizedKeys(t, []byte(authWithQuotedQuoteInEnv), []authResult{
      +		{pub, []string{`env="HOME=/home/\"root dir"`, "no-port-forwarding"}, "user@host", "", true},
      +	})
      +
      +	testAuthorizedKeys(t, []byte(authWithDoubleQuotedQuote), []authResult{
      +		{pub, []string{"no-port-forwarding", `env="HOME=/home/ \"root dir\""`}, "user@host", "", true},
      +	})
      +}
      +
      +func TestAuthWithInvalidSpace(t *testing.T) {
      +	_, pubSerialized := getTestKey()
      +	authWithInvalidSpace := []byte(`env="HOME=/home/root dir", no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host
      +#more to follow but still no valid keys`)
      +	testAuthorizedKeys(t, []byte(authWithInvalidSpace), []authResult{
      +		{nil, nil, "", "", false},
      +	})
      +}
      +
      +func TestAuthWithMissingQuote(t *testing.T) {
      +	pub, pubSerialized := getTestKey()
      +	authWithMissingQuote := []byte(`env="HOME=/home/root,no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host
      +env="HOME=/home/root",shared-control ssh-rsa ` + pubSerialized + ` user@host`)
      +
      +	testAuthorizedKeys(t, []byte(authWithMissingQuote), []authResult{
      +		{pub, []string{`env="HOME=/home/root"`, `shared-control`}, "user@host", "", true},
      +	})
      +}
      +
      +func TestInvalidEntry(t *testing.T) {
      +	authInvalid := []byte(`ssh-rsa`)
      +	_, _, _, _, err := ParseAuthorizedKey(authInvalid)
      +	if err == nil {
      +		t.Errorf("got valid entry for %q", authInvalid)
      +	}
      +}
      +
      +var knownHostsParseTests = []struct {
      +	input     string
      +	err       string
      +
      +	marker   string
      +	comment  string
      +	hosts    []string
      +	rest     string
      +} {
      +	{
      +		"",
      +		"EOF",
      +
      +		"", "", nil, "",
      +	},
      +	{
      +		"# Just a comment",
      +		"EOF",
      +
      +		"", "", nil, "",
      +	},
      +	{
      +		"   \t   ",
      +		"EOF",
      +
      +		"", "", nil, "",
      +	},
      +	{
      +		"localhost ssh-rsa {RSAPUB}",
      +		"",
      +
      +		"", "", []string{"localhost"}, "",
      +	},
      +	{
      +		"localhost\tssh-rsa {RSAPUB}",
      +		"",
      +
      +		"", "", []string{"localhost"}, "",
      +	},
      +	{
      +		"localhost\tssh-rsa {RSAPUB}\tcomment comment",
      +		"",
      +
      +		"", "comment comment", []string{"localhost"}, "",
      +	},
      +	{
      +		"localhost\tssh-rsa {RSAPUB}\tcomment comment\n",
      +		"",
      +
      +		"", "comment comment", []string{"localhost"}, "",
      +	},
      +	{
      +		"localhost\tssh-rsa {RSAPUB}\tcomment comment\r\n",
      +		"",
      +
      +		"", "comment comment", []string{"localhost"}, "",
      +	},
      +	{
      +		"localhost\tssh-rsa {RSAPUB}\tcomment comment\r\nnext line",
      +		"",
      +
      +		"", "comment comment", []string{"localhost"}, "next line",
      +	},
      +	{
      +		"localhost,[host2:123]\tssh-rsa {RSAPUB}\tcomment comment",
      +		"",
      +
      +		"", "comment comment", []string{"localhost","[host2:123]"}, "",
      +	},
      +	{
      +		"@marker \tlocalhost,[host2:123]\tssh-rsa {RSAPUB}",
      +		"",
      +
      +		"marker", "", []string{"localhost","[host2:123]"}, "",
      +	},
      +	{
      +		"@marker \tlocalhost,[host2:123]\tssh-rsa aabbccdd",
      +		"short read",
      +
      +		"", "", nil, "",
      +	},
      +}
      +
      +func TestKnownHostsParsing(t *testing.T) {
      +	rsaPub, rsaPubSerialized := getTestKey()
      +
      +	for i, test := range knownHostsParseTests {
      +		var expectedKey PublicKey
      +		const rsaKeyToken = "{RSAPUB}"
      +
      +		input := test.input
      +		if strings.Contains(input, rsaKeyToken) {
      +			expectedKey = rsaPub
      +			input = strings.Replace(test.input, rsaKeyToken, rsaPubSerialized, -1)
      +		}
      +
      +		marker, hosts, pubKey, comment, rest, err := ParseKnownHosts([]byte(input))
      +		if err != nil {
      +			if len(test.err) == 0 {
      +				t.Errorf("#%d: unexpectedly failed with %q", i, err)
      +			} else if !strings.Contains(err.Error(), test.err) {
      +				t.Errorf("#%d: expected error containing %q, but got %q", i, test.err, err)
      +			}
      +			continue
      +		} else if len(test.err) != 0 {
      +			t.Errorf("#%d: succeeded but expected error including %q", i, test.err)
      +			continue
      +		}
      +
      +		if !reflect.DeepEqual(expectedKey, pubKey) {
      +			t.Errorf("#%d: expected key %#v, but got %#v", i, expectedKey, pubKey)
      +		}
      +
      +		if marker != test.marker {
      +			t.Errorf("#%d: expected marker %q, but got %q", i, test.marker, marker)
      +		}
      +
      +		if comment != test.comment {
      +			t.Errorf("#%d: expected comment %q, but got %q", i, test.comment, comment)
      +		}
      +
      +		if !reflect.DeepEqual(test.hosts, hosts) {
      +			t.Errorf("#%d: expected hosts %#v, but got %#v", i, test.hosts, hosts)
      +		}
      +
      +		if rest := string(rest); rest != test.rest {
      +			t.Errorf("#%d: expected remaining input to be %q, but got %q", i, test.rest, rest)
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go
      new file mode 100644
      index 00000000..07744ad6
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/mac.go
      @@ -0,0 +1,57 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +// Message authentication support
      +
      +import (
      +	"crypto/hmac"
      +	"crypto/sha1"
      +	"crypto/sha256"
      +	"hash"
      +)
      +
      +type macMode struct {
      +	keySize int
      +	new     func(key []byte) hash.Hash
      +}
      +
      +// truncatingMAC wraps around a hash.Hash and truncates the output digest to
      +// a given size.
      +type truncatingMAC struct {
      +	length int
      +	hmac   hash.Hash
      +}
      +
      +func (t truncatingMAC) Write(data []byte) (int, error) {
      +	return t.hmac.Write(data)
      +}
      +
      +func (t truncatingMAC) Sum(in []byte) []byte {
      +	out := t.hmac.Sum(in)
      +	return out[:len(in)+t.length]
      +}
      +
      +func (t truncatingMAC) Reset() {
      +	t.hmac.Reset()
      +}
      +
      +func (t truncatingMAC) Size() int {
      +	return t.length
      +}
      +
      +func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
      +
      +var macModes = map[string]*macMode{
      +	"hmac-sha2-256": {32, func(key []byte) hash.Hash {
      +		return hmac.New(sha256.New, key)
      +	}},
      +	"hmac-sha1": {20, func(key []byte) hash.Hash {
      +		return hmac.New(sha1.New, key)
      +	}},
      +	"hmac-sha1-96": {20, func(key []byte) hash.Hash {
      +		return truncatingMAC{12, hmac.New(sha1.New, key)}
      +	}},
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/mempipe_test.go b/vendor/golang.org/x/crypto/ssh/mempipe_test.go
      new file mode 100644
      index 00000000..8697cd61
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/mempipe_test.go
      @@ -0,0 +1,110 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"io"
      +	"sync"
      +	"testing"
      +)
      +
      +// An in-memory packetConn. It is safe to call Close and writePacket
      +// from different goroutines.
      +type memTransport struct {
      +	eof     bool
      +	pending [][]byte
      +	write   *memTransport
      +	sync.Mutex
      +	*sync.Cond
      +}
      +
      +func (t *memTransport) readPacket() ([]byte, error) {
      +	t.Lock()
      +	defer t.Unlock()
      +	for {
      +		if len(t.pending) > 0 {
      +			r := t.pending[0]
      +			t.pending = t.pending[1:]
      +			return r, nil
      +		}
      +		if t.eof {
      +			return nil, io.EOF
      +		}
      +		t.Cond.Wait()
      +	}
      +}
      +
      +func (t *memTransport) closeSelf() error {
      +	t.Lock()
      +	defer t.Unlock()
      +	if t.eof {
      +		return io.EOF
      +	}
      +	t.eof = true
      +	t.Cond.Broadcast()
      +	return nil
      +}
      +
      +func (t *memTransport) Close() error {
      +	err := t.write.closeSelf()
      +	t.closeSelf()
      +	return err
      +}
      +
      +func (t *memTransport) writePacket(p []byte) error {
      +	t.write.Lock()
      +	defer t.write.Unlock()
      +	if t.write.eof {
      +		return io.EOF
      +	}
      +	c := make([]byte, len(p))
      +	copy(c, p)
      +	t.write.pending = append(t.write.pending, c)
      +	t.write.Cond.Signal()
      +	return nil
      +}
      +
      +func memPipe() (a, b packetConn) {
      +	t1 := memTransport{}
      +	t2 := memTransport{}
      +	t1.write = &t2
      +	t2.write = &t1
      +	t1.Cond = sync.NewCond(&t1.Mutex)
      +	t2.Cond = sync.NewCond(&t2.Mutex)
      +	return &t1, &t2
      +}
      +
      +func TestMemPipe(t *testing.T) {
      +	a, b := memPipe()
      +	if err := a.writePacket([]byte{42}); err != nil {
      +		t.Fatalf("writePacket: %v", err)
      +	}
      +	if err := a.Close(); err != nil {
      +		t.Fatal("Close: ", err)
      +	}
      +	p, err := b.readPacket()
      +	if err != nil {
      +		t.Fatal("readPacket: ", err)
      +	}
      +	if len(p) != 1 || p[0] != 42 {
      +		t.Fatalf("got %v, want {42}", p)
      +	}
      +	p, err = b.readPacket()
      +	if err != io.EOF {
      +		t.Fatalf("got %v, %v, want EOF", p, err)
      +	}
      +}
      +
      +func TestDoubleClose(t *testing.T) {
      +	a, _ := memPipe()
      +	err := a.Close()
      +	if err != nil {
      +		t.Errorf("Close: %v", err)
      +	}
      +	err = a.Close()
      +	if err != io.EOF {
      +		t.Errorf("expect EOF on double close.")
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go
      new file mode 100644
      index 00000000..eaf61066
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/messages.go
      @@ -0,0 +1,725 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"bytes"
      +	"encoding/binary"
      +	"errors"
      +	"fmt"
      +	"io"
      +	"math/big"
      +	"reflect"
      +	"strconv"
      +)
      +
      +// These are SSH message type numbers. They are scattered around several
      +// documents but many were taken from [SSH-PARAMETERS].
      +const (
      +	msgIgnore        = 2
      +	msgUnimplemented = 3
      +	msgDebug         = 4
      +	msgNewKeys       = 21
      +
      +	// Standard authentication messages
      +	msgUserAuthSuccess = 52
      +	msgUserAuthBanner  = 53
      +)
      +
      +// SSH messages:
      +//
      +// These structures mirror the wire format of the corresponding SSH messages.
      +// They are marshaled using reflection with the marshal and unmarshal functions
      +// in this file. The only wrinkle is that a final member of type []byte with a
      +// ssh tag of "rest" receives the remainder of a packet when unmarshaling.
      +
      +// See RFC 4253, section 11.1.
      +const msgDisconnect = 1
      +
      +// disconnectMsg is the message that signals a disconnect. It is also
      +// the error type returned from mux.Wait()
      +type disconnectMsg struct {
      +	Reason   uint32 `sshtype:"1"`
      +	Message  string
      +	Language string
      +}
      +
      +func (d *disconnectMsg) Error() string {
      +	return fmt.Sprintf("ssh: disconnect reason %d: %s", d.Reason, d.Message)
      +}
      +
      +// See RFC 4253, section 7.1.
      +const msgKexInit = 20
      +
      +type kexInitMsg struct {
      +	Cookie                  [16]byte `sshtype:"20"`
      +	KexAlgos                []string
      +	ServerHostKeyAlgos      []string
      +	CiphersClientServer     []string
      +	CiphersServerClient     []string
      +	MACsClientServer        []string
      +	MACsServerClient        []string
      +	CompressionClientServer []string
      +	CompressionServerClient []string
      +	LanguagesClientServer   []string
      +	LanguagesServerClient   []string
      +	FirstKexFollows         bool
      +	Reserved                uint32
      +}
      +
      +// See RFC 4253, section 8.
      +
      +// Diffie-Helman
      +const msgKexDHInit = 30
      +
      +type kexDHInitMsg struct {
      +	X *big.Int `sshtype:"30"`
      +}
      +
      +const msgKexECDHInit = 30
      +
      +type kexECDHInitMsg struct {
      +	ClientPubKey []byte `sshtype:"30"`
      +}
      +
      +const msgKexECDHReply = 31
      +
      +type kexECDHReplyMsg struct {
      +	HostKey         []byte `sshtype:"31"`
      +	EphemeralPubKey []byte
      +	Signature       []byte
      +}
      +
      +const msgKexDHReply = 31
      +
      +type kexDHReplyMsg struct {
      +	HostKey   []byte `sshtype:"31"`
      +	Y         *big.Int
      +	Signature []byte
      +}
      +
      +// See RFC 4253, section 10.
      +const msgServiceRequest = 5
      +
      +type serviceRequestMsg struct {
      +	Service string `sshtype:"5"`
      +}
      +
      +// See RFC 4253, section 10.
      +const msgServiceAccept = 6
      +
      +type serviceAcceptMsg struct {
      +	Service string `sshtype:"6"`
      +}
      +
      +// See RFC 4252, section 5.
      +const msgUserAuthRequest = 50
      +
      +type userAuthRequestMsg struct {
      +	User    string `sshtype:"50"`
      +	Service string
      +	Method  string
      +	Payload []byte `ssh:"rest"`
      +}
      +
      +// See RFC 4252, section 5.1
      +const msgUserAuthFailure = 51
      +
      +type userAuthFailureMsg struct {
      +	Methods        []string `sshtype:"51"`
      +	PartialSuccess bool
      +}
      +
      +// See RFC 4256, section 3.2
      +const msgUserAuthInfoRequest = 60
      +const msgUserAuthInfoResponse = 61
      +
      +type userAuthInfoRequestMsg struct {
      +	User               string `sshtype:"60"`
      +	Instruction        string
      +	DeprecatedLanguage string
      +	NumPrompts         uint32
      +	Prompts            []byte `ssh:"rest"`
      +}
      +
      +// See RFC 4254, section 5.1.
      +const msgChannelOpen = 90
      +
      +type channelOpenMsg struct {
      +	ChanType         string `sshtype:"90"`
      +	PeersId          uint32
      +	PeersWindow      uint32
      +	MaxPacketSize    uint32
      +	TypeSpecificData []byte `ssh:"rest"`
      +}
      +
      +const msgChannelExtendedData = 95
      +const msgChannelData = 94
      +
      +// See RFC 4254, section 5.1.
      +const msgChannelOpenConfirm = 91
      +
      +type channelOpenConfirmMsg struct {
      +	PeersId          uint32 `sshtype:"91"`
      +	MyId             uint32
      +	MyWindow         uint32
      +	MaxPacketSize    uint32
      +	TypeSpecificData []byte `ssh:"rest"`
      +}
      +
      +// See RFC 4254, section 5.1.
      +const msgChannelOpenFailure = 92
      +
      +type channelOpenFailureMsg struct {
      +	PeersId  uint32 `sshtype:"92"`
      +	Reason   RejectionReason
      +	Message  string
      +	Language string
      +}
      +
      +const msgChannelRequest = 98
      +
      +type channelRequestMsg struct {
      +	PeersId             uint32 `sshtype:"98"`
      +	Request             string
      +	WantReply           bool
      +	RequestSpecificData []byte `ssh:"rest"`
      +}
      +
      +// See RFC 4254, section 5.4.
      +const msgChannelSuccess = 99
      +
      +type channelRequestSuccessMsg struct {
      +	PeersId uint32 `sshtype:"99"`
      +}
      +
      +// See RFC 4254, section 5.4.
      +const msgChannelFailure = 100
      +
      +type channelRequestFailureMsg struct {
      +	PeersId uint32 `sshtype:"100"`
      +}
      +
      +// See RFC 4254, section 5.3
      +const msgChannelClose = 97
      +
      +type channelCloseMsg struct {
      +	PeersId uint32 `sshtype:"97"`
      +}
      +
      +// See RFC 4254, section 5.3
      +const msgChannelEOF = 96
      +
      +type channelEOFMsg struct {
      +	PeersId uint32 `sshtype:"96"`
      +}
      +
      +// See RFC 4254, section 4
      +const msgGlobalRequest = 80
      +
      +type globalRequestMsg struct {
      +	Type      string `sshtype:"80"`
      +	WantReply bool
      +	Data      []byte `ssh:"rest"`
      +}
      +
      +// See RFC 4254, section 4
      +const msgRequestSuccess = 81
      +
      +type globalRequestSuccessMsg struct {
      +	Data []byte `ssh:"rest" sshtype:"81"`
      +}
      +
      +// See RFC 4254, section 4
      +const msgRequestFailure = 82
      +
      +type globalRequestFailureMsg struct {
      +	Data []byte `ssh:"rest" sshtype:"82"`
      +}
      +
      +// See RFC 4254, section 5.2
      +const msgChannelWindowAdjust = 93
      +
      +type windowAdjustMsg struct {
      +	PeersId         uint32 `sshtype:"93"`
      +	AdditionalBytes uint32
      +}
      +
      +// See RFC 4252, section 7
      +const msgUserAuthPubKeyOk = 60
      +
      +type userAuthPubKeyOkMsg struct {
      +	Algo   string `sshtype:"60"`
      +	PubKey []byte
      +}
      +
      +// typeTag returns the type byte for the given type. The type should
      +// be struct.
      +func typeTag(structType reflect.Type) byte {
      +	var tag byte
      +	var tagStr string
      +	tagStr = structType.Field(0).Tag.Get("sshtype")
      +	i, err := strconv.Atoi(tagStr)
      +	if err == nil {
      +		tag = byte(i)
      +	}
      +	return tag
      +}
      +
      +func fieldError(t reflect.Type, field int, problem string) error {
      +	if problem != "" {
      +		problem = ": " + problem
      +	}
      +	return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem)
      +}
      +
      +var errShortRead = errors.New("ssh: short read")
      +
      +// Unmarshal parses data in SSH wire format into a structure. The out
      +// argument should be a pointer to struct. If the first member of the
      +// struct has the "sshtype" tag set to a number in decimal, the packet
      +// must start that number.  In case of error, Unmarshal returns a
      +// ParseError or UnexpectedMessageError.
      +func Unmarshal(data []byte, out interface{}) error {
      +	v := reflect.ValueOf(out).Elem()
      +	structType := v.Type()
      +	expectedType := typeTag(structType)
      +	if len(data) == 0 {
      +		return parseError(expectedType)
      +	}
      +	if expectedType > 0 {
      +		if data[0] != expectedType {
      +			return unexpectedMessageError(expectedType, data[0])
      +		}
      +		data = data[1:]
      +	}
      +
      +	var ok bool
      +	for i := 0; i < v.NumField(); i++ {
      +		field := v.Field(i)
      +		t := field.Type()
      +		switch t.Kind() {
      +		case reflect.Bool:
      +			if len(data) < 1 {
      +				return errShortRead
      +			}
      +			field.SetBool(data[0] != 0)
      +			data = data[1:]
      +		case reflect.Array:
      +			if t.Elem().Kind() != reflect.Uint8 {
      +				return fieldError(structType, i, "array of unsupported type")
      +			}
      +			if len(data) < t.Len() {
      +				return errShortRead
      +			}
      +			for j, n := 0, t.Len(); j < n; j++ {
      +				field.Index(j).Set(reflect.ValueOf(data[j]))
      +			}
      +			data = data[t.Len():]
      +		case reflect.Uint64:
      +			var u64 uint64
      +			if u64, data, ok = parseUint64(data); !ok {
      +				return errShortRead
      +			}
      +			field.SetUint(u64)
      +		case reflect.Uint32:
      +			var u32 uint32
      +			if u32, data, ok = parseUint32(data); !ok {
      +				return errShortRead
      +			}
      +			field.SetUint(uint64(u32))
      +		case reflect.Uint8:
      +			if len(data) < 1 {
      +				return errShortRead
      +			}
      +			field.SetUint(uint64(data[0]))
      +			data = data[1:]
      +		case reflect.String:
      +			var s []byte
      +			if s, data, ok = parseString(data); !ok {
      +				return fieldError(structType, i, "")
      +			}
      +			field.SetString(string(s))
      +		case reflect.Slice:
      +			switch t.Elem().Kind() {
      +			case reflect.Uint8:
      +				if structType.Field(i).Tag.Get("ssh") == "rest" {
      +					field.Set(reflect.ValueOf(data))
      +					data = nil
      +				} else {
      +					var s []byte
      +					if s, data, ok = parseString(data); !ok {
      +						return errShortRead
      +					}
      +					field.Set(reflect.ValueOf(s))
      +				}
      +			case reflect.String:
      +				var nl []string
      +				if nl, data, ok = parseNameList(data); !ok {
      +					return errShortRead
      +				}
      +				field.Set(reflect.ValueOf(nl))
      +			default:
      +				return fieldError(structType, i, "slice of unsupported type")
      +			}
      +		case reflect.Ptr:
      +			if t == bigIntType {
      +				var n *big.Int
      +				if n, data, ok = parseInt(data); !ok {
      +					return errShortRead
      +				}
      +				field.Set(reflect.ValueOf(n))
      +			} else {
      +				return fieldError(structType, i, "pointer to unsupported type")
      +			}
      +		default:
      +			return fieldError(structType, i, "unsupported type")
      +		}
      +	}
      +
      +	if len(data) != 0 {
      +		return parseError(expectedType)
      +	}
      +
      +	return nil
      +}
      +
      +// Marshal serializes the message in msg to SSH wire format.  The msg
      +// argument should be a struct or pointer to struct. If the first
      +// member has the "sshtype" tag set to a number in decimal, that
      +// number is prepended to the result. If the last of member has the
      +// "ssh" tag set to "rest", its contents are appended to the output.
      +func Marshal(msg interface{}) []byte {
      +	out := make([]byte, 0, 64)
      +	return marshalStruct(out, msg)
      +}
      +
      +func marshalStruct(out []byte, msg interface{}) []byte {
      +	v := reflect.Indirect(reflect.ValueOf(msg))
      +	msgType := typeTag(v.Type())
      +	if msgType > 0 {
      +		out = append(out, msgType)
      +	}
      +
      +	for i, n := 0, v.NumField(); i < n; i++ {
      +		field := v.Field(i)
      +		switch t := field.Type(); t.Kind() {
      +		case reflect.Bool:
      +			var v uint8
      +			if field.Bool() {
      +				v = 1
      +			}
      +			out = append(out, v)
      +		case reflect.Array:
      +			if t.Elem().Kind() != reflect.Uint8 {
      +				panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface()))
      +			}
      +			for j, l := 0, t.Len(); j < l; j++ {
      +				out = append(out, uint8(field.Index(j).Uint()))
      +			}
      +		case reflect.Uint32:
      +			out = appendU32(out, uint32(field.Uint()))
      +		case reflect.Uint64:
      +			out = appendU64(out, uint64(field.Uint()))
      +		case reflect.Uint8:
      +			out = append(out, uint8(field.Uint()))
      +		case reflect.String:
      +			s := field.String()
      +			out = appendInt(out, len(s))
      +			out = append(out, s...)
      +		case reflect.Slice:
      +			switch t.Elem().Kind() {
      +			case reflect.Uint8:
      +				if v.Type().Field(i).Tag.Get("ssh") != "rest" {
      +					out = appendInt(out, field.Len())
      +				}
      +				out = append(out, field.Bytes()...)
      +			case reflect.String:
      +				offset := len(out)
      +				out = appendU32(out, 0)
      +				if n := field.Len(); n > 0 {
      +					for j := 0; j < n; j++ {
      +						f := field.Index(j)
      +						if j != 0 {
      +							out = append(out, ',')
      +						}
      +						out = append(out, f.String()...)
      +					}
      +					// overwrite length value
      +					binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4))
      +				}
      +			default:
      +				panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface()))
      +			}
      +		case reflect.Ptr:
      +			if t == bigIntType {
      +				var n *big.Int
      +				nValue := reflect.ValueOf(&n)
      +				nValue.Elem().Set(field)
      +				needed := intLength(n)
      +				oldLength := len(out)
      +
      +				if cap(out)-len(out) < needed {
      +					newOut := make([]byte, len(out), 2*(len(out)+needed))
      +					copy(newOut, out)
      +					out = newOut
      +				}
      +				out = out[:oldLength+needed]
      +				marshalInt(out[oldLength:], n)
      +			} else {
      +				panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface()))
      +			}
      +		}
      +	}
      +
      +	return out
      +}
      +
      +var bigOne = big.NewInt(1)
      +
      +func parseString(in []byte) (out, rest []byte, ok bool) {
      +	if len(in) < 4 {
      +		return
      +	}
      +	length := binary.BigEndian.Uint32(in)
      +	in = in[4:]
      +	if uint32(len(in)) < length {
      +		return
      +	}
      +	out = in[:length]
      +	rest = in[length:]
      +	ok = true
      +	return
      +}
      +
      +var (
      +	comma         = []byte{','}
      +	emptyNameList = []string{}
      +)
      +
      +func parseNameList(in []byte) (out []string, rest []byte, ok bool) {
      +	contents, rest, ok := parseString(in)
      +	if !ok {
      +		return
      +	}
      +	if len(contents) == 0 {
      +		out = emptyNameList
      +		return
      +	}
      +	parts := bytes.Split(contents, comma)
      +	out = make([]string, len(parts))
      +	for i, part := range parts {
      +		out[i] = string(part)
      +	}
      +	return
      +}
      +
      +func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) {
      +	contents, rest, ok := parseString(in)
      +	if !ok {
      +		return
      +	}
      +	out = new(big.Int)
      +
      +	if len(contents) > 0 && contents[0]&0x80 == 0x80 {
      +		// This is a negative number
      +		notBytes := make([]byte, len(contents))
      +		for i := range notBytes {
      +			notBytes[i] = ^contents[i]
      +		}
      +		out.SetBytes(notBytes)
      +		out.Add(out, bigOne)
      +		out.Neg(out)
      +	} else {
      +		// Positive number
      +		out.SetBytes(contents)
      +	}
      +	ok = true
      +	return
      +}
      +
      +func parseUint32(in []byte) (uint32, []byte, bool) {
      +	if len(in) < 4 {
      +		return 0, nil, false
      +	}
      +	return binary.BigEndian.Uint32(in), in[4:], true
      +}
      +
      +func parseUint64(in []byte) (uint64, []byte, bool) {
      +	if len(in) < 8 {
      +		return 0, nil, false
      +	}
      +	return binary.BigEndian.Uint64(in), in[8:], true
      +}
      +
      +func intLength(n *big.Int) int {
      +	length := 4 /* length bytes */
      +	if n.Sign() < 0 {
      +		nMinus1 := new(big.Int).Neg(n)
      +		nMinus1.Sub(nMinus1, bigOne)
      +		bitLen := nMinus1.BitLen()
      +		if bitLen%8 == 0 {
      +			// The number will need 0xff padding
      +			length++
      +		}
      +		length += (bitLen + 7) / 8
      +	} else if n.Sign() == 0 {
      +		// A zero is the zero length string
      +	} else {
      +		bitLen := n.BitLen()
      +		if bitLen%8 == 0 {
      +			// The number will need 0x00 padding
      +			length++
      +		}
      +		length += (bitLen + 7) / 8
      +	}
      +
      +	return length
      +}
      +
      +func marshalUint32(to []byte, n uint32) []byte {
      +	binary.BigEndian.PutUint32(to, n)
      +	return to[4:]
      +}
      +
      +func marshalUint64(to []byte, n uint64) []byte {
      +	binary.BigEndian.PutUint64(to, n)
      +	return to[8:]
      +}
      +
      +func marshalInt(to []byte, n *big.Int) []byte {
      +	lengthBytes := to
      +	to = to[4:]
      +	length := 0
      +
      +	if n.Sign() < 0 {
      +		// A negative number has to be converted to two's-complement
      +		// form. So we'll subtract 1 and invert. If the
      +		// most-significant-bit isn't set then we'll need to pad the
      +		// beginning with 0xff in order to keep the number negative.
      +		nMinus1 := new(big.Int).Neg(n)
      +		nMinus1.Sub(nMinus1, bigOne)
      +		bytes := nMinus1.Bytes()
      +		for i := range bytes {
      +			bytes[i] ^= 0xff
      +		}
      +		if len(bytes) == 0 || bytes[0]&0x80 == 0 {
      +			to[0] = 0xff
      +			to = to[1:]
      +			length++
      +		}
      +		nBytes := copy(to, bytes)
      +		to = to[nBytes:]
      +		length += nBytes
      +	} else if n.Sign() == 0 {
      +		// A zero is the zero length string
      +	} else {
      +		bytes := n.Bytes()
      +		if len(bytes) > 0 && bytes[0]&0x80 != 0 {
      +			// We'll have to pad this with a 0x00 in order to
      +			// stop it looking like a negative number.
      +			to[0] = 0
      +			to = to[1:]
      +			length++
      +		}
      +		nBytes := copy(to, bytes)
      +		to = to[nBytes:]
      +		length += nBytes
      +	}
      +
      +	lengthBytes[0] = byte(length >> 24)
      +	lengthBytes[1] = byte(length >> 16)
      +	lengthBytes[2] = byte(length >> 8)
      +	lengthBytes[3] = byte(length)
      +	return to
      +}
      +
      +func writeInt(w io.Writer, n *big.Int) {
      +	length := intLength(n)
      +	buf := make([]byte, length)
      +	marshalInt(buf, n)
      +	w.Write(buf)
      +}
      +
      +func writeString(w io.Writer, s []byte) {
      +	var lengthBytes [4]byte
      +	lengthBytes[0] = byte(len(s) >> 24)
      +	lengthBytes[1] = byte(len(s) >> 16)
      +	lengthBytes[2] = byte(len(s) >> 8)
      +	lengthBytes[3] = byte(len(s))
      +	w.Write(lengthBytes[:])
      +	w.Write(s)
      +}
      +
      +func stringLength(n int) int {
      +	return 4 + n
      +}
      +
      +func marshalString(to []byte, s []byte) []byte {
      +	to[0] = byte(len(s) >> 24)
      +	to[1] = byte(len(s) >> 16)
      +	to[2] = byte(len(s) >> 8)
      +	to[3] = byte(len(s))
      +	to = to[4:]
      +	copy(to, s)
      +	return to[len(s):]
      +}
      +
      +var bigIntType = reflect.TypeOf((*big.Int)(nil))
      +
      +// Decode a packet into its corresponding message.
      +func decode(packet []byte) (interface{}, error) {
      +	var msg interface{}
      +	switch packet[0] {
      +	case msgDisconnect:
      +		msg = new(disconnectMsg)
      +	case msgServiceRequest:
      +		msg = new(serviceRequestMsg)
      +	case msgServiceAccept:
      +		msg = new(serviceAcceptMsg)
      +	case msgKexInit:
      +		msg = new(kexInitMsg)
      +	case msgKexDHInit:
      +		msg = new(kexDHInitMsg)
      +	case msgKexDHReply:
      +		msg = new(kexDHReplyMsg)
      +	case msgUserAuthRequest:
      +		msg = new(userAuthRequestMsg)
      +	case msgUserAuthFailure:
      +		msg = new(userAuthFailureMsg)
      +	case msgUserAuthPubKeyOk:
      +		msg = new(userAuthPubKeyOkMsg)
      +	case msgGlobalRequest:
      +		msg = new(globalRequestMsg)
      +	case msgRequestSuccess:
      +		msg = new(globalRequestSuccessMsg)
      +	case msgRequestFailure:
      +		msg = new(globalRequestFailureMsg)
      +	case msgChannelOpen:
      +		msg = new(channelOpenMsg)
      +	case msgChannelOpenConfirm:
      +		msg = new(channelOpenConfirmMsg)
      +	case msgChannelOpenFailure:
      +		msg = new(channelOpenFailureMsg)
      +	case msgChannelWindowAdjust:
      +		msg = new(windowAdjustMsg)
      +	case msgChannelEOF:
      +		msg = new(channelEOFMsg)
      +	case msgChannelClose:
      +		msg = new(channelCloseMsg)
      +	case msgChannelRequest:
      +		msg = new(channelRequestMsg)
      +	case msgChannelSuccess:
      +		msg = new(channelRequestSuccessMsg)
      +	case msgChannelFailure:
      +		msg = new(channelRequestFailureMsg)
      +	default:
      +		return nil, unexpectedMessageError(0, packet[0])
      +	}
      +	if err := Unmarshal(packet, msg); err != nil {
      +		return nil, err
      +	}
      +	return msg, nil
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/messages_test.go b/vendor/golang.org/x/crypto/ssh/messages_test.go
      new file mode 100644
      index 00000000..955b5127
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/messages_test.go
      @@ -0,0 +1,254 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"bytes"
      +	"math/big"
      +	"math/rand"
      +	"reflect"
      +	"testing"
      +	"testing/quick"
      +)
      +
      +var intLengthTests = []struct {
      +	val, length int
      +}{
      +	{0, 4 + 0},
      +	{1, 4 + 1},
      +	{127, 4 + 1},
      +	{128, 4 + 2},
      +	{-1, 4 + 1},
      +}
      +
      +func TestIntLength(t *testing.T) {
      +	for _, test := range intLengthTests {
      +		v := new(big.Int).SetInt64(int64(test.val))
      +		length := intLength(v)
      +		if length != test.length {
      +			t.Errorf("For %d, got length %d but expected %d", test.val, length, test.length)
      +		}
      +	}
      +}
      +
      +type msgAllTypes struct {
      +	Bool    bool `sshtype:"21"`
      +	Array   [16]byte
      +	Uint64  uint64
      +	Uint32  uint32
      +	Uint8   uint8
      +	String  string
      +	Strings []string
      +	Bytes   []byte
      +	Int     *big.Int
      +	Rest    []byte `ssh:"rest"`
      +}
      +
      +func (t *msgAllTypes) Generate(rand *rand.Rand, size int) reflect.Value {
      +	m := &msgAllTypes{}
      +	m.Bool = rand.Intn(2) == 1
      +	randomBytes(m.Array[:], rand)
      +	m.Uint64 = uint64(rand.Int63n(1<<63 - 1))
      +	m.Uint32 = uint32(rand.Intn((1 << 31) - 1))
      +	m.Uint8 = uint8(rand.Intn(1 << 8))
      +	m.String = string(m.Array[:])
      +	m.Strings = randomNameList(rand)
      +	m.Bytes = m.Array[:]
      +	m.Int = randomInt(rand)
      +	m.Rest = m.Array[:]
      +	return reflect.ValueOf(m)
      +}
      +
      +func TestMarshalUnmarshal(t *testing.T) {
      +	rand := rand.New(rand.NewSource(0))
      +	iface := &msgAllTypes{}
      +	ty := reflect.ValueOf(iface).Type()
      +
      +	n := 100
      +	if testing.Short() {
      +		n = 5
      +	}
      +	for j := 0; j < n; j++ {
      +		v, ok := quick.Value(ty, rand)
      +		if !ok {
      +			t.Errorf("failed to create value")
      +			break
      +		}
      +
      +		m1 := v.Elem().Interface()
      +		m2 := iface
      +
      +		marshaled := Marshal(m1)
      +		if err := Unmarshal(marshaled, m2); err != nil {
      +			t.Errorf("Unmarshal %#v: %s", m1, err)
      +			break
      +		}
      +
      +		if !reflect.DeepEqual(v.Interface(), m2) {
      +			t.Errorf("got: %#v\nwant:%#v\n%x", m2, m1, marshaled)
      +			break
      +		}
      +	}
      +}
      +
      +func TestUnmarshalEmptyPacket(t *testing.T) {
      +	var b []byte
      +	var m channelRequestSuccessMsg
      +	if err := Unmarshal(b, &m); err == nil {
      +		t.Fatalf("unmarshal of empty slice succeeded")
      +	}
      +}
      +
      +func TestUnmarshalUnexpectedPacket(t *testing.T) {
      +	type S struct {
      +		I uint32 `sshtype:"43"`
      +		S string
      +		B bool
      +	}
      +
      +	s := S{11, "hello", true}
      +	packet := Marshal(s)
      +	packet[0] = 42
      +	roundtrip := S{}
      +	err := Unmarshal(packet, &roundtrip)
      +	if err == nil {
      +		t.Fatal("expected error, not nil")
      +	}
      +}
      +
      +func TestMarshalPtr(t *testing.T) {
      +	s := struct {
      +		S string
      +	}{"hello"}
      +
      +	m1 := Marshal(s)
      +	m2 := Marshal(&s)
      +	if !bytes.Equal(m1, m2) {
      +		t.Errorf("got %q, want %q for marshaled pointer", m2, m1)
      +	}
      +}
      +
      +func TestBareMarshalUnmarshal(t *testing.T) {
      +	type S struct {
      +		I uint32
      +		S string
      +		B bool
      +	}
      +
      +	s := S{42, "hello", true}
      +	packet := Marshal(s)
      +	roundtrip := S{}
      +	Unmarshal(packet, &roundtrip)
      +
      +	if !reflect.DeepEqual(s, roundtrip) {
      +		t.Errorf("got %#v, want %#v", roundtrip, s)
      +	}
      +}
      +
      +func TestBareMarshal(t *testing.T) {
      +	type S2 struct {
      +		I uint32
      +	}
      +	s := S2{42}
      +	packet := Marshal(s)
      +	i, rest, ok := parseUint32(packet)
      +	if len(rest) > 0 || !ok {
      +		t.Errorf("parseInt(%q): parse error", packet)
      +	}
      +	if i != s.I {
      +		t.Errorf("got %d, want %d", i, s.I)
      +	}
      +}
      +
      +func TestUnmarshalShortKexInitPacket(t *testing.T) {
      +	// This used to panic.
      +	// Issue 11348
      +	packet := []byte{0x14, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xff, 0xff, 0xff, 0xff}
      +	kim := &kexInitMsg{}
      +	if err := Unmarshal(packet, kim); err == nil {
      +		t.Error("truncated packet unmarshaled without error")
      +	}
      +}
      +
      +func randomBytes(out []byte, rand *rand.Rand) {
      +	for i := 0; i < len(out); i++ {
      +		out[i] = byte(rand.Int31())
      +	}
      +}
      +
      +func randomNameList(rand *rand.Rand) []string {
      +	ret := make([]string, rand.Int31()&15)
      +	for i := range ret {
      +		s := make([]byte, 1+(rand.Int31()&15))
      +		for j := range s {
      +			s[j] = 'a' + uint8(rand.Int31()&15)
      +		}
      +		ret[i] = string(s)
      +	}
      +	return ret
      +}
      +
      +func randomInt(rand *rand.Rand) *big.Int {
      +	return new(big.Int).SetInt64(int64(int32(rand.Uint32())))
      +}
      +
      +func (*kexInitMsg) Generate(rand *rand.Rand, size int) reflect.Value {
      +	ki := &kexInitMsg{}
      +	randomBytes(ki.Cookie[:], rand)
      +	ki.KexAlgos = randomNameList(rand)
      +	ki.ServerHostKeyAlgos = randomNameList(rand)
      +	ki.CiphersClientServer = randomNameList(rand)
      +	ki.CiphersServerClient = randomNameList(rand)
      +	ki.MACsClientServer = randomNameList(rand)
      +	ki.MACsServerClient = randomNameList(rand)
      +	ki.CompressionClientServer = randomNameList(rand)
      +	ki.CompressionServerClient = randomNameList(rand)
      +	ki.LanguagesClientServer = randomNameList(rand)
      +	ki.LanguagesServerClient = randomNameList(rand)
      +	if rand.Int31()&1 == 1 {
      +		ki.FirstKexFollows = true
      +	}
      +	return reflect.ValueOf(ki)
      +}
      +
      +func (*kexDHInitMsg) Generate(rand *rand.Rand, size int) reflect.Value {
      +	dhi := &kexDHInitMsg{}
      +	dhi.X = randomInt(rand)
      +	return reflect.ValueOf(dhi)
      +}
      +
      +var (
      +	_kexInitMsg   = new(kexInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface()
      +	_kexDHInitMsg = new(kexDHInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface()
      +
      +	_kexInit   = Marshal(_kexInitMsg)
      +	_kexDHInit = Marshal(_kexDHInitMsg)
      +)
      +
      +func BenchmarkMarshalKexInitMsg(b *testing.B) {
      +	for i := 0; i < b.N; i++ {
      +		Marshal(_kexInitMsg)
      +	}
      +}
      +
      +func BenchmarkUnmarshalKexInitMsg(b *testing.B) {
      +	m := new(kexInitMsg)
      +	for i := 0; i < b.N; i++ {
      +		Unmarshal(_kexInit, m)
      +	}
      +}
      +
      +func BenchmarkMarshalKexDHInitMsg(b *testing.B) {
      +	for i := 0; i < b.N; i++ {
      +		Marshal(_kexDHInitMsg)
      +	}
      +}
      +
      +func BenchmarkUnmarshalKexDHInitMsg(b *testing.B) {
      +	m := new(kexDHInitMsg)
      +	for i := 0; i < b.N; i++ {
      +		Unmarshal(_kexDHInit, m)
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go
      new file mode 100644
      index 00000000..321880ad
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/mux.go
      @@ -0,0 +1,356 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"encoding/binary"
      +	"fmt"
      +	"io"
      +	"log"
      +	"sync"
      +	"sync/atomic"
      +)
      +
      +// debugMux, if set, causes messages in the connection protocol to be
      +// logged.
      +const debugMux = false
      +
      +// chanList is a thread safe channel list.
      +type chanList struct {
      +	// protects concurrent access to chans
      +	sync.Mutex
      +
      +	// chans are indexed by the local id of the channel, which the
      +	// other side should send in the PeersId field.
      +	chans []*channel
      +
      +	// This is a debugging aid: it offsets all IDs by this
      +	// amount. This helps distinguish otherwise identical
      +	// server/client muxes
      +	offset uint32
      +}
      +
      +// Assigns a channel ID to the given channel.
      +func (c *chanList) add(ch *channel) uint32 {
      +	c.Lock()
      +	defer c.Unlock()
      +	for i := range c.chans {
      +		if c.chans[i] == nil {
      +			c.chans[i] = ch
      +			return uint32(i) + c.offset
      +		}
      +	}
      +	c.chans = append(c.chans, ch)
      +	return uint32(len(c.chans)-1) + c.offset
      +}
      +
      +// getChan returns the channel for the given ID.
      +func (c *chanList) getChan(id uint32) *channel {
      +	id -= c.offset
      +
      +	c.Lock()
      +	defer c.Unlock()
      +	if id < uint32(len(c.chans)) {
      +		return c.chans[id]
      +	}
      +	return nil
      +}
      +
      +func (c *chanList) remove(id uint32) {
      +	id -= c.offset
      +	c.Lock()
      +	if id < uint32(len(c.chans)) {
      +		c.chans[id] = nil
      +	}
      +	c.Unlock()
      +}
      +
      +// dropAll forgets all channels it knows, returning them in a slice.
      +func (c *chanList) dropAll() []*channel {
      +	c.Lock()
      +	defer c.Unlock()
      +	var r []*channel
      +
      +	for _, ch := range c.chans {
      +		if ch == nil {
      +			continue
      +		}
      +		r = append(r, ch)
      +	}
      +	c.chans = nil
      +	return r
      +}
      +
      +// mux represents the state for the SSH connection protocol, which
      +// multiplexes many channels onto a single packet transport.
      +type mux struct {
      +	conn     packetConn
      +	chanList chanList
      +
      +	incomingChannels chan NewChannel
      +
      +	globalSentMu     sync.Mutex
      +	globalResponses  chan interface{}
      +	incomingRequests chan *Request
      +
      +	errCond *sync.Cond
      +	err     error
      +}
      +
      +// When debugging, each new chanList instantiation has a different
      +// offset.
      +var globalOff uint32
      +
      +func (m *mux) Wait() error {
      +	m.errCond.L.Lock()
      +	defer m.errCond.L.Unlock()
      +	for m.err == nil {
      +		m.errCond.Wait()
      +	}
      +	return m.err
      +}
      +
      +// newMux returns a mux that runs over the given connection.
      +func newMux(p packetConn) *mux {
      +	m := &mux{
      +		conn:             p,
      +		incomingChannels: make(chan NewChannel, 16),
      +		globalResponses:  make(chan interface{}, 1),
      +		incomingRequests: make(chan *Request, 16),
      +		errCond:          newCond(),
      +	}
      +	if debugMux {
      +		m.chanList.offset = atomic.AddUint32(&globalOff, 1)
      +	}
      +
      +	go m.loop()
      +	return m
      +}
      +
      +func (m *mux) sendMessage(msg interface{}) error {
      +	p := Marshal(msg)
      +	return m.conn.writePacket(p)
      +}
      +
      +func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) {
      +	if wantReply {
      +		m.globalSentMu.Lock()
      +		defer m.globalSentMu.Unlock()
      +	}
      +
      +	if err := m.sendMessage(globalRequestMsg{
      +		Type:      name,
      +		WantReply: wantReply,
      +		Data:      payload,
      +	}); err != nil {
      +		return false, nil, err
      +	}
      +
      +	if !wantReply {
      +		return false, nil, nil
      +	}
      +
      +	msg, ok := <-m.globalResponses
      +	if !ok {
      +		return false, nil, io.EOF
      +	}
      +	switch msg := msg.(type) {
      +	case *globalRequestFailureMsg:
      +		return false, msg.Data, nil
      +	case *globalRequestSuccessMsg:
      +		return true, msg.Data, nil
      +	default:
      +		return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg)
      +	}
      +}
      +
      +// ackRequest must be called after processing a global request that
      +// has WantReply set.
      +func (m *mux) ackRequest(ok bool, data []byte) error {
      +	if ok {
      +		return m.sendMessage(globalRequestSuccessMsg{Data: data})
      +	}
      +	return m.sendMessage(globalRequestFailureMsg{Data: data})
      +}
      +
      +// TODO(hanwen): Disconnect is a transport layer message. We should
      +// probably send and receive Disconnect somewhere in the transport
      +// code.
      +
      +// Disconnect sends a disconnect message.
      +func (m *mux) Disconnect(reason uint32, message string) error {
      +	return m.sendMessage(disconnectMsg{
      +		Reason:  reason,
      +		Message: message,
      +	})
      +}
      +
      +func (m *mux) Close() error {
      +	return m.conn.Close()
      +}
      +
      +// loop runs the connection machine. It will process packets until an
      +// error is encountered. To synchronize on loop exit, use mux.Wait.
      +func (m *mux) loop() {
      +	var err error
      +	for err == nil {
      +		err = m.onePacket()
      +	}
      +
      +	for _, ch := range m.chanList.dropAll() {
      +		ch.close()
      +	}
      +
      +	close(m.incomingChannels)
      +	close(m.incomingRequests)
      +	close(m.globalResponses)
      +
      +	m.conn.Close()
      +
      +	m.errCond.L.Lock()
      +	m.err = err
      +	m.errCond.Broadcast()
      +	m.errCond.L.Unlock()
      +
      +	if debugMux {
      +		log.Println("loop exit", err)
      +	}
      +}
      +
      +// onePacket reads and processes one packet.
      +func (m *mux) onePacket() error {
      +	packet, err := m.conn.readPacket()
      +	if err != nil {
      +		return err
      +	}
      +
      +	if debugMux {
      +		if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData {
      +			log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet))
      +		} else {
      +			p, _ := decode(packet)
      +			log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet))
      +		}
      +	}
      +
      +	switch packet[0] {
      +	case msgNewKeys:
      +		// Ignore notification of key change.
      +		return nil
      +	case msgDisconnect:
      +		return m.handleDisconnect(packet)
      +	case msgChannelOpen:
      +		return m.handleChannelOpen(packet)
      +	case msgGlobalRequest, msgRequestSuccess, msgRequestFailure:
      +		return m.handleGlobalPacket(packet)
      +	}
      +
      +	// assume a channel packet.
      +	if len(packet) < 5 {
      +		return parseError(packet[0])
      +	}
      +	id := binary.BigEndian.Uint32(packet[1:])
      +	ch := m.chanList.getChan(id)
      +	if ch == nil {
      +		return fmt.Errorf("ssh: invalid channel %d", id)
      +	}
      +
      +	return ch.handlePacket(packet)
      +}
      +
      +func (m *mux) handleDisconnect(packet []byte) error {
      +	var d disconnectMsg
      +	if err := Unmarshal(packet, &d); err != nil {
      +		return err
      +	}
      +
      +	if debugMux {
      +		log.Printf("caught disconnect: %v", d)
      +	}
      +	return &d
      +}
      +
      +func (m *mux) handleGlobalPacket(packet []byte) error {
      +	msg, err := decode(packet)
      +	if err != nil {
      +		return err
      +	}
      +
      +	switch msg := msg.(type) {
      +	case *globalRequestMsg:
      +		m.incomingRequests <- &Request{
      +			Type:      msg.Type,
      +			WantReply: msg.WantReply,
      +			Payload:   msg.Data,
      +			mux:       m,
      +		}
      +	case *globalRequestSuccessMsg, *globalRequestFailureMsg:
      +		m.globalResponses <- msg
      +	default:
      +		panic(fmt.Sprintf("not a global message %#v", msg))
      +	}
      +
      +	return nil
      +}
      +
      +// handleChannelOpen schedules a channel to be Accept()ed.
      +func (m *mux) handleChannelOpen(packet []byte) error {
      +	var msg channelOpenMsg
      +	if err := Unmarshal(packet, &msg); err != nil {
      +		return err
      +	}
      +
      +	if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
      +		failMsg := channelOpenFailureMsg{
      +			PeersId:  msg.PeersId,
      +			Reason:   ConnectionFailed,
      +			Message:  "invalid request",
      +			Language: "en_US.UTF-8",
      +		}
      +		return m.sendMessage(failMsg)
      +	}
      +
      +	c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData)
      +	c.remoteId = msg.PeersId
      +	c.maxRemotePayload = msg.MaxPacketSize
      +	c.remoteWin.add(msg.PeersWindow)
      +	m.incomingChannels <- c
      +	return nil
      +}
      +
      +func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) {
      +	ch, err := m.openChannel(chanType, extra)
      +	if err != nil {
      +		return nil, nil, err
      +	}
      +
      +	return ch, ch.incomingRequests, nil
      +}
      +
      +func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) {
      +	ch := m.newChannel(chanType, channelOutbound, extra)
      +
      +	ch.maxIncomingPayload = channelMaxPacket
      +
      +	open := channelOpenMsg{
      +		ChanType:         chanType,
      +		PeersWindow:      ch.myWindow,
      +		MaxPacketSize:    ch.maxIncomingPayload,
      +		TypeSpecificData: extra,
      +		PeersId:          ch.localId,
      +	}
      +	if err := m.sendMessage(open); err != nil {
      +		return nil, err
      +	}
      +
      +	switch msg := (<-ch.msg).(type) {
      +	case *channelOpenConfirmMsg:
      +		return ch, nil
      +	case *channelOpenFailureMsg:
      +		return nil, &OpenChannelError{msg.Reason, msg.Message}
      +	default:
      +		return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg)
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/mux_test.go b/vendor/golang.org/x/crypto/ssh/mux_test.go
      new file mode 100644
      index 00000000..52303896
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/mux_test.go
      @@ -0,0 +1,525 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"io"
      +	"io/ioutil"
      +	"sync"
      +	"testing"
      +)
      +
      +func muxPair() (*mux, *mux) {
      +	a, b := memPipe()
      +
      +	s := newMux(a)
      +	c := newMux(b)
      +
      +	return s, c
      +}
      +
      +// Returns both ends of a channel, and the mux for the the 2nd
      +// channel.
      +func channelPair(t *testing.T) (*channel, *channel, *mux) {
      +	c, s := muxPair()
      +
      +	res := make(chan *channel, 1)
      +	go func() {
      +		newCh, ok := <-s.incomingChannels
      +		if !ok {
      +			t.Fatalf("No incoming channel")
      +		}
      +		if newCh.ChannelType() != "chan" {
      +			t.Fatalf("got type %q want chan", newCh.ChannelType())
      +		}
      +		ch, _, err := newCh.Accept()
      +		if err != nil {
      +			t.Fatalf("Accept %v", err)
      +		}
      +		res <- ch.(*channel)
      +	}()
      +
      +	ch, err := c.openChannel("chan", nil)
      +	if err != nil {
      +		t.Fatalf("OpenChannel: %v", err)
      +	}
      +
      +	return <-res, ch, c
      +}
      +
      +// Test that stderr and stdout can be addressed from different
      +// goroutines. This is intended for use with the race detector.
      +func TestMuxChannelExtendedThreadSafety(t *testing.T) {
      +	writer, reader, mux := channelPair(t)
      +	defer writer.Close()
      +	defer reader.Close()
      +	defer mux.Close()
      +
      +	var wr, rd sync.WaitGroup
      +	magic := "hello world"
      +
      +	wr.Add(2)
      +	go func() {
      +		io.WriteString(writer, magic)
      +		wr.Done()
      +	}()
      +	go func() {
      +		io.WriteString(writer.Stderr(), magic)
      +		wr.Done()
      +	}()
      +
      +	rd.Add(2)
      +	go func() {
      +		c, err := ioutil.ReadAll(reader)
      +		if string(c) != magic {
      +			t.Fatalf("stdout read got %q, want %q (error %s)", c, magic, err)
      +		}
      +		rd.Done()
      +	}()
      +	go func() {
      +		c, err := ioutil.ReadAll(reader.Stderr())
      +		if string(c) != magic {
      +			t.Fatalf("stderr read got %q, want %q (error %s)", c, magic, err)
      +		}
      +		rd.Done()
      +	}()
      +
      +	wr.Wait()
      +	writer.CloseWrite()
      +	rd.Wait()
      +}
      +
      +func TestMuxReadWrite(t *testing.T) {
      +	s, c, mux := channelPair(t)
      +	defer s.Close()
      +	defer c.Close()
      +	defer mux.Close()
      +
      +	magic := "hello world"
      +	magicExt := "hello stderr"
      +	go func() {
      +		_, err := s.Write([]byte(magic))
      +		if err != nil {
      +			t.Fatalf("Write: %v", err)
      +		}
      +		_, err = s.Extended(1).Write([]byte(magicExt))
      +		if err != nil {
      +			t.Fatalf("Write: %v", err)
      +		}
      +		err = s.Close()
      +		if err != nil {
      +			t.Fatalf("Close: %v", err)
      +		}
      +	}()
      +
      +	var buf [1024]byte
      +	n, err := c.Read(buf[:])
      +	if err != nil {
      +		t.Fatalf("server Read: %v", err)
      +	}
      +	got := string(buf[:n])
      +	if got != magic {
      +		t.Fatalf("server: got %q want %q", got, magic)
      +	}
      +
      +	n, err = c.Extended(1).Read(buf[:])
      +	if err != nil {
      +		t.Fatalf("server Read: %v", err)
      +	}
      +
      +	got = string(buf[:n])
      +	if got != magicExt {
      +		t.Fatalf("server: got %q want %q", got, magic)
      +	}
      +}
      +
      +func TestMuxChannelOverflow(t *testing.T) {
      +	reader, writer, mux := channelPair(t)
      +	defer reader.Close()
      +	defer writer.Close()
      +	defer mux.Close()
      +
      +	wDone := make(chan int, 1)
      +	go func() {
      +		if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil {
      +			t.Errorf("could not fill window: %v", err)
      +		}
      +		writer.Write(make([]byte, 1))
      +		wDone <- 1
      +	}()
      +	writer.remoteWin.waitWriterBlocked()
      +
      +	// Send 1 byte.
      +	packet := make([]byte, 1+4+4+1)
      +	packet[0] = msgChannelData
      +	marshalUint32(packet[1:], writer.remoteId)
      +	marshalUint32(packet[5:], uint32(1))
      +	packet[9] = 42
      +
      +	if err := writer.mux.conn.writePacket(packet); err != nil {
      +		t.Errorf("could not send packet")
      +	}
      +	if _, err := reader.SendRequest("hello", true, nil); err == nil {
      +		t.Errorf("SendRequest succeeded.")
      +	}
      +	<-wDone
      +}
      +
      +func TestMuxChannelCloseWriteUnblock(t *testing.T) {
      +	reader, writer, mux := channelPair(t)
      +	defer reader.Close()
      +	defer writer.Close()
      +	defer mux.Close()
      +
      +	wDone := make(chan int, 1)
      +	go func() {
      +		if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil {
      +			t.Errorf("could not fill window: %v", err)
      +		}
      +		if _, err := writer.Write(make([]byte, 1)); err != io.EOF {
      +			t.Errorf("got %v, want EOF for unblock write", err)
      +		}
      +		wDone <- 1
      +	}()
      +
      +	writer.remoteWin.waitWriterBlocked()
      +	reader.Close()
      +	<-wDone
      +}
      +
      +func TestMuxConnectionCloseWriteUnblock(t *testing.T) {
      +	reader, writer, mux := channelPair(t)
      +	defer reader.Close()
      +	defer writer.Close()
      +	defer mux.Close()
      +
      +	wDone := make(chan int, 1)
      +	go func() {
      +		if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil {
      +			t.Errorf("could not fill window: %v", err)
      +		}
      +		if _, err := writer.Write(make([]byte, 1)); err != io.EOF {
      +			t.Errorf("got %v, want EOF for unblock write", err)
      +		}
      +		wDone <- 1
      +	}()
      +
      +	writer.remoteWin.waitWriterBlocked()
      +	mux.Close()
      +	<-wDone
      +}
      +
      +func TestMuxReject(t *testing.T) {
      +	client, server := muxPair()
      +	defer server.Close()
      +	defer client.Close()
      +
      +	go func() {
      +		ch, ok := <-server.incomingChannels
      +		if !ok {
      +			t.Fatalf("Accept")
      +		}
      +		if ch.ChannelType() != "ch" || string(ch.ExtraData()) != "extra" {
      +			t.Fatalf("unexpected channel: %q, %q", ch.ChannelType(), ch.ExtraData())
      +		}
      +		ch.Reject(RejectionReason(42), "message")
      +	}()
      +
      +	ch, err := client.openChannel("ch", []byte("extra"))
      +	if ch != nil {
      +		t.Fatal("openChannel not rejected")
      +	}
      +
      +	ocf, ok := err.(*OpenChannelError)
      +	if !ok {
      +		t.Errorf("got %#v want *OpenChannelError", err)
      +	} else if ocf.Reason != 42 || ocf.Message != "message" {
      +		t.Errorf("got %#v, want {Reason: 42, Message: %q}", ocf, "message")
      +	}
      +
      +	want := "ssh: rejected: unknown reason 42 (message)"
      +	if err.Error() != want {
      +		t.Errorf("got %q, want %q", err.Error(), want)
      +	}
      +}
      +
      +func TestMuxChannelRequest(t *testing.T) {
      +	client, server, mux := channelPair(t)
      +	defer server.Close()
      +	defer client.Close()
      +	defer mux.Close()
      +
      +	var received int
      +	var wg sync.WaitGroup
      +	wg.Add(1)
      +	go func() {
      +		for r := range server.incomingRequests {
      +			received++
      +			r.Reply(r.Type == "yes", nil)
      +		}
      +		wg.Done()
      +	}()
      +	_, err := client.SendRequest("yes", false, nil)
      +	if err != nil {
      +		t.Fatalf("SendRequest: %v", err)
      +	}
      +	ok, err := client.SendRequest("yes", true, nil)
      +	if err != nil {
      +		t.Fatalf("SendRequest: %v", err)
      +	}
      +
      +	if !ok {
      +		t.Errorf("SendRequest(yes): %v", ok)
      +
      +	}
      +
      +	ok, err = client.SendRequest("no", true, nil)
      +	if err != nil {
      +		t.Fatalf("SendRequest: %v", err)
      +	}
      +	if ok {
      +		t.Errorf("SendRequest(no): %v", ok)
      +
      +	}
      +
      +	client.Close()
      +	wg.Wait()
      +
      +	if received != 3 {
      +		t.Errorf("got %d requests, want %d", received, 3)
      +	}
      +}
      +
      +func TestMuxGlobalRequest(t *testing.T) {
      +	clientMux, serverMux := muxPair()
      +	defer serverMux.Close()
      +	defer clientMux.Close()
      +
      +	var seen bool
      +	go func() {
      +		for r := range serverMux.incomingRequests {
      +			seen = seen || r.Type == "peek"
      +			if r.WantReply {
      +				err := r.Reply(r.Type == "yes",
      +					append([]byte(r.Type), r.Payload...))
      +				if err != nil {
      +					t.Errorf("AckRequest: %v", err)
      +				}
      +			}
      +		}
      +	}()
      +
      +	_, _, err := clientMux.SendRequest("peek", false, nil)
      +	if err != nil {
      +		t.Errorf("SendRequest: %v", err)
      +	}
      +
      +	ok, data, err := clientMux.SendRequest("yes", true, []byte("a"))
      +	if !ok || string(data) != "yesa" || err != nil {
      +		t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v",
      +			ok, data, err)
      +	}
      +	if ok, data, err := clientMux.SendRequest("yes", true, []byte("a")); !ok || string(data) != "yesa" || err != nil {
      +		t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v",
      +			ok, data, err)
      +	}
      +
      +	if ok, data, err := clientMux.SendRequest("no", true, []byte("a")); ok || string(data) != "noa" || err != nil {
      +		t.Errorf("SendRequest(\"no\", true, \"a\"): %v %v %v",
      +			ok, data, err)
      +	}
      +
      +	clientMux.Disconnect(0, "")
      +	if !seen {
      +		t.Errorf("never saw 'peek' request")
      +	}
      +}
      +
      +func TestMuxGlobalRequestUnblock(t *testing.T) {
      +	clientMux, serverMux := muxPair()
      +	defer serverMux.Close()
      +	defer clientMux.Close()
      +
      +	result := make(chan error, 1)
      +	go func() {
      +		_, _, err := clientMux.SendRequest("hello", true, nil)
      +		result <- err
      +	}()
      +
      +	<-serverMux.incomingRequests
      +	serverMux.conn.Close()
      +	err := <-result
      +
      +	if err != io.EOF {
      +		t.Errorf("want EOF, got %v", io.EOF)
      +	}
      +}
      +
      +func TestMuxChannelRequestUnblock(t *testing.T) {
      +	a, b, connB := channelPair(t)
      +	defer a.Close()
      +	defer b.Close()
      +	defer connB.Close()
      +
      +	result := make(chan error, 1)
      +	go func() {
      +		_, err := a.SendRequest("hello", true, nil)
      +		result <- err
      +	}()
      +
      +	<-b.incomingRequests
      +	connB.conn.Close()
      +	err := <-result
      +
      +	if err != io.EOF {
      +		t.Errorf("want EOF, got %v", err)
      +	}
      +}
      +
      +func TestMuxDisconnect(t *testing.T) {
      +	a, b := muxPair()
      +	defer a.Close()
      +	defer b.Close()
      +
      +	go func() {
      +		for r := range b.incomingRequests {
      +			r.Reply(true, nil)
      +		}
      +	}()
      +
      +	a.Disconnect(42, "whatever")
      +	ok, _, err := a.SendRequest("hello", true, nil)
      +	if ok || err == nil {
      +		t.Errorf("got reply after disconnecting")
      +	}
      +	err = b.Wait()
      +	if d, ok := err.(*disconnectMsg); !ok || d.Reason != 42 {
      +		t.Errorf("got %#v, want disconnectMsg{Reason:42}", err)
      +	}
      +}
      +
      +func TestMuxCloseChannel(t *testing.T) {
      +	r, w, mux := channelPair(t)
      +	defer mux.Close()
      +	defer r.Close()
      +	defer w.Close()
      +
      +	result := make(chan error, 1)
      +	go func() {
      +		var b [1024]byte
      +		_, err := r.Read(b[:])
      +		result <- err
      +	}()
      +	if err := w.Close(); err != nil {
      +		t.Errorf("w.Close: %v", err)
      +	}
      +
      +	if _, err := w.Write([]byte("hello")); err != io.EOF {
      +		t.Errorf("got err %v, want io.EOF after Close", err)
      +	}
      +
      +	if err := <-result; err != io.EOF {
      +		t.Errorf("got %v (%T), want io.EOF", err, err)
      +	}
      +}
      +
      +func TestMuxCloseWriteChannel(t *testing.T) {
      +	r, w, mux := channelPair(t)
      +	defer mux.Close()
      +
      +	result := make(chan error, 1)
      +	go func() {
      +		var b [1024]byte
      +		_, err := r.Read(b[:])
      +		result <- err
      +	}()
      +	if err := w.CloseWrite(); err != nil {
      +		t.Errorf("w.CloseWrite: %v", err)
      +	}
      +
      +	if _, err := w.Write([]byte("hello")); err != io.EOF {
      +		t.Errorf("got err %v, want io.EOF after CloseWrite", err)
      +	}
      +
      +	if err := <-result; err != io.EOF {
      +		t.Errorf("got %v (%T), want io.EOF", err, err)
      +	}
      +}
      +
      +func TestMuxInvalidRecord(t *testing.T) {
      +	a, b := muxPair()
      +	defer a.Close()
      +	defer b.Close()
      +
      +	packet := make([]byte, 1+4+4+1)
      +	packet[0] = msgChannelData
      +	marshalUint32(packet[1:], 29348723 /* invalid channel id */)
      +	marshalUint32(packet[5:], 1)
      +	packet[9] = 42
      +
      +	a.conn.writePacket(packet)
      +	go a.SendRequest("hello", false, nil)
      +	// 'a' wrote an invalid packet, so 'b' has exited.
      +	req, ok := <-b.incomingRequests
      +	if ok {
      +		t.Errorf("got request %#v after receiving invalid packet", req)
      +	}
      +}
      +
      +func TestZeroWindowAdjust(t *testing.T) {
      +	a, b, mux := channelPair(t)
      +	defer a.Close()
      +	defer b.Close()
      +	defer mux.Close()
      +
      +	go func() {
      +		io.WriteString(a, "hello")
      +		// bogus adjust.
      +		a.sendMessage(windowAdjustMsg{})
      +		io.WriteString(a, "world")
      +		a.Close()
      +	}()
      +
      +	want := "helloworld"
      +	c, _ := ioutil.ReadAll(b)
      +	if string(c) != want {
      +		t.Errorf("got %q want %q", c, want)
      +	}
      +}
      +
      +func TestMuxMaxPacketSize(t *testing.T) {
      +	a, b, mux := channelPair(t)
      +	defer a.Close()
      +	defer b.Close()
      +	defer mux.Close()
      +
      +	large := make([]byte, a.maxRemotePayload+1)
      +	packet := make([]byte, 1+4+4+1+len(large))
      +	packet[0] = msgChannelData
      +	marshalUint32(packet[1:], a.remoteId)
      +	marshalUint32(packet[5:], uint32(len(large)))
      +	packet[9] = 42
      +
      +	if err := a.mux.conn.writePacket(packet); err != nil {
      +		t.Errorf("could not send packet")
      +	}
      +
      +	go a.SendRequest("hello", false, nil)
      +
      +	_, ok := <-b.incomingRequests
      +	if ok {
      +		t.Errorf("connection still alive after receiving large packet.")
      +	}
      +}
      +
      +// Don't ship code with debug=true.
      +func TestDebug(t *testing.T) {
      +	if debugMux {
      +		t.Error("mux debug switched on")
      +	}
      +	if debugHandshake {
      +		t.Error("handshake debug switched on")
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
      new file mode 100644
      index 00000000..4781eb78
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/server.go
      @@ -0,0 +1,495 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"bytes"
      +	"errors"
      +	"fmt"
      +	"io"
      +	"net"
      +)
      +
      +// The Permissions type holds fine-grained permissions that are
      +// specific to a user or a specific authentication method for a
      +// user. Permissions, except for "source-address", must be enforced in
      +// the server application layer, after successful authentication. The
      +// Permissions are passed on in ServerConn so a server implementation
      +// can honor them.
      +type Permissions struct {
      +	// Critical options restrict default permissions. Common
      +	// restrictions are "source-address" and "force-command". If
      +	// the server cannot enforce the restriction, or does not
      +	// recognize it, the user should not authenticate.
      +	CriticalOptions map[string]string
      +
      +	// Extensions are extra functionality that the server may
      +	// offer on authenticated connections. Common extensions are
      +	// "permit-agent-forwarding", "permit-X11-forwarding". Lack of
      +	// support for an extension does not preclude authenticating a
      +	// user.
      +	Extensions map[string]string
      +}
      +
      +// ServerConfig holds server specific configuration data.
      +type ServerConfig struct {
      +	// Config contains configuration shared between client and server.
      +	Config
      +
      +	hostKeys []Signer
      +
      +	// NoClientAuth is true if clients are allowed to connect without
      +	// authenticating.
      +	NoClientAuth bool
      +
      +	// PasswordCallback, if non-nil, is called when a user
      +	// attempts to authenticate using a password.
      +	PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error)
      +
      +	// PublicKeyCallback, if non-nil, is called when a client attempts public
      +	// key authentication. It must return true if the given public key is
      +	// valid for the given user. For example, see CertChecker.Authenticate.
      +	PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
      +
      +	// KeyboardInteractiveCallback, if non-nil, is called when
      +	// keyboard-interactive authentication is selected (RFC
      +	// 4256). The client object's Challenge function should be
      +	// used to query the user. The callback may offer multiple
      +	// Challenge rounds. To avoid information leaks, the client
      +	// should be presented a challenge even if the user is
      +	// unknown.
      +	KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error)
      +
      +	// AuthLogCallback, if non-nil, is called to log all authentication
      +	// attempts.
      +	AuthLogCallback func(conn ConnMetadata, method string, err error)
      +
      +	// ServerVersion is the version identification string to announce in
      +	// the public handshake.
      +	// If empty, a reasonable default is used.
      +	// Note that RFC 4253 section 4.2 requires that this string start with
      +	// "SSH-2.0-".
      +	ServerVersion string
      +}
      +
      +// AddHostKey adds a private key as a host key. If an existing host
      +// key exists with the same algorithm, it is overwritten. Each server
      +// config must have at least one host key.
      +func (s *ServerConfig) AddHostKey(key Signer) {
      +	for i, k := range s.hostKeys {
      +		if k.PublicKey().Type() == key.PublicKey().Type() {
      +			s.hostKeys[i] = key
      +			return
      +		}
      +	}
      +
      +	s.hostKeys = append(s.hostKeys, key)
      +}
      +
      +// cachedPubKey contains the results of querying whether a public key is
      +// acceptable for a user.
      +type cachedPubKey struct {
      +	user       string
      +	pubKeyData []byte
      +	result     error
      +	perms      *Permissions
      +}
      +
      +const maxCachedPubKeys = 16
      +
      +// pubKeyCache caches tests for public keys.  Since SSH clients
      +// will query whether a public key is acceptable before attempting to
      +// authenticate with it, we end up with duplicate queries for public
      +// key validity.  The cache only applies to a single ServerConn.
      +type pubKeyCache struct {
      +	keys []cachedPubKey
      +}
      +
      +// get returns the result for a given user/algo/key tuple.
      +func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) {
      +	for _, k := range c.keys {
      +		if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) {
      +			return k, true
      +		}
      +	}
      +	return cachedPubKey{}, false
      +}
      +
      +// add adds the given tuple to the cache.
      +func (c *pubKeyCache) add(candidate cachedPubKey) {
      +	if len(c.keys) < maxCachedPubKeys {
      +		c.keys = append(c.keys, candidate)
      +	}
      +}
      +
      +// ServerConn is an authenticated SSH connection, as seen from the
      +// server
      +type ServerConn struct {
      +	Conn
      +
      +	// If the succeeding authentication callback returned a
      +	// non-nil Permissions pointer, it is stored here.
      +	Permissions *Permissions
      +}
      +
      +// NewServerConn starts a new SSH server with c as the underlying
      +// transport.  It starts with a handshake and, if the handshake is
      +// unsuccessful, it closes the connection and returns an error.  The
      +// Request and NewChannel channels must be serviced, or the connection
      +// will hang.
      +func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) {
      +	fullConf := *config
      +	fullConf.SetDefaults()
      +	s := &connection{
      +		sshConn: sshConn{conn: c},
      +	}
      +	perms, err := s.serverHandshake(&fullConf)
      +	if err != nil {
      +		c.Close()
      +		return nil, nil, nil, err
      +	}
      +	return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil
      +}
      +
      +// signAndMarshal signs the data with the appropriate algorithm,
      +// and serializes the result in SSH wire format.
      +func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) {
      +	sig, err := k.Sign(rand, data)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	return Marshal(sig), nil
      +}
      +
      +// handshake performs key exchange and user authentication.
      +func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) {
      +	if len(config.hostKeys) == 0 {
      +		return nil, errors.New("ssh: server has no host keys")
      +	}
      +
      +	if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && config.KeyboardInteractiveCallback == nil {
      +		return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
      +	}
      +
      +	if config.ServerVersion != "" {
      +		s.serverVersion = []byte(config.ServerVersion)
      +	} else {
      +		s.serverVersion = []byte(packageVersion)
      +	}
      +	var err error
      +	s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */)
      +	s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config)
      +
      +	if err := s.transport.requestKeyChange(); err != nil {
      +		return nil, err
      +	}
      +
      +	if packet, err := s.transport.readPacket(); err != nil {
      +		return nil, err
      +	} else if packet[0] != msgNewKeys {
      +		return nil, unexpectedMessageError(msgNewKeys, packet[0])
      +	}
      +
      +	// We just did the key change, so the session ID is established.
      +	s.sessionID = s.transport.getSessionID()
      +
      +	var packet []byte
      +	if packet, err = s.transport.readPacket(); err != nil {
      +		return nil, err
      +	}
      +
      +	var serviceRequest serviceRequestMsg
      +	if err = Unmarshal(packet, &serviceRequest); err != nil {
      +		return nil, err
      +	}
      +	if serviceRequest.Service != serviceUserAuth {
      +		return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating")
      +	}
      +	serviceAccept := serviceAcceptMsg{
      +		Service: serviceUserAuth,
      +	}
      +	if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil {
      +		return nil, err
      +	}
      +
      +	perms, err := s.serverAuthenticate(config)
      +	if err != nil {
      +		return nil, err
      +	}
      +	s.mux = newMux(s.transport)
      +	return perms, err
      +}
      +
      +func isAcceptableAlgo(algo string) bool {
      +	switch algo {
      +	case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
      +		CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01:
      +		return true
      +	}
      +	return false
      +}
      +
      +func checkSourceAddress(addr net.Addr, sourceAddr string) error {
      +	if addr == nil {
      +		return errors.New("ssh: no address known for client, but source-address match required")
      +	}
      +
      +	tcpAddr, ok := addr.(*net.TCPAddr)
      +	if !ok {
      +		return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr)
      +	}
      +
      +	if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil {
      +		if bytes.Equal(allowedIP, tcpAddr.IP) {
      +			return nil
      +		}
      +	} else {
      +		_, ipNet, err := net.ParseCIDR(sourceAddr)
      +		if err != nil {
      +			return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err)
      +		}
      +
      +		if ipNet.Contains(tcpAddr.IP) {
      +			return nil
      +		}
      +	}
      +
      +	return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr)
      +}
      +
      +func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) {
      +	var err error
      +	var cache pubKeyCache
      +	var perms *Permissions
      +
      +userAuthLoop:
      +	for {
      +		var userAuthReq userAuthRequestMsg
      +		if packet, err := s.transport.readPacket(); err != nil {
      +			return nil, err
      +		} else if err = Unmarshal(packet, &userAuthReq); err != nil {
      +			return nil, err
      +		}
      +
      +		if userAuthReq.Service != serviceSSH {
      +			return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service)
      +		}
      +
      +		s.user = userAuthReq.User
      +		perms = nil
      +		authErr := errors.New("no auth passed yet")
      +
      +		switch userAuthReq.Method {
      +		case "none":
      +			if config.NoClientAuth {
      +				s.user = ""
      +				authErr = nil
      +			}
      +		case "password":
      +			if config.PasswordCallback == nil {
      +				authErr = errors.New("ssh: password auth not configured")
      +				break
      +			}
      +			payload := userAuthReq.Payload
      +			if len(payload) < 1 || payload[0] != 0 {
      +				return nil, parseError(msgUserAuthRequest)
      +			}
      +			payload = payload[1:]
      +			password, payload, ok := parseString(payload)
      +			if !ok || len(payload) > 0 {
      +				return nil, parseError(msgUserAuthRequest)
      +			}
      +
      +			perms, authErr = config.PasswordCallback(s, password)
      +		case "keyboard-interactive":
      +			if config.KeyboardInteractiveCallback == nil {
      +				authErr = errors.New("ssh: keyboard-interactive auth not configubred")
      +				break
      +			}
      +
      +			prompter := &sshClientKeyboardInteractive{s}
      +			perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge)
      +		case "publickey":
      +			if config.PublicKeyCallback == nil {
      +				authErr = errors.New("ssh: publickey auth not configured")
      +				break
      +			}
      +			payload := userAuthReq.Payload
      +			if len(payload) < 1 {
      +				return nil, parseError(msgUserAuthRequest)
      +			}
      +			isQuery := payload[0] == 0
      +			payload = payload[1:]
      +			algoBytes, payload, ok := parseString(payload)
      +			if !ok {
      +				return nil, parseError(msgUserAuthRequest)
      +			}
      +			algo := string(algoBytes)
      +			if !isAcceptableAlgo(algo) {
      +				authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo)
      +				break
      +			}
      +
      +			pubKeyData, payload, ok := parseString(payload)
      +			if !ok {
      +				return nil, parseError(msgUserAuthRequest)
      +			}
      +
      +			pubKey, err := ParsePublicKey(pubKeyData)
      +			if err != nil {
      +				return nil, err
      +			}
      +
      +			candidate, ok := cache.get(s.user, pubKeyData)
      +			if !ok {
      +				candidate.user = s.user
      +				candidate.pubKeyData = pubKeyData
      +				candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey)
      +				if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" {
      +					candidate.result = checkSourceAddress(
      +						s.RemoteAddr(),
      +						candidate.perms.CriticalOptions[sourceAddressCriticalOption])
      +				}
      +				cache.add(candidate)
      +			}
      +
      +			if isQuery {
      +				// The client can query if the given public key
      +				// would be okay.
      +				if len(payload) > 0 {
      +					return nil, parseError(msgUserAuthRequest)
      +				}
      +
      +				if candidate.result == nil {
      +					okMsg := userAuthPubKeyOkMsg{
      +						Algo:   algo,
      +						PubKey: pubKeyData,
      +					}
      +					if err = s.transport.writePacket(Marshal(&okMsg)); err != nil {
      +						return nil, err
      +					}
      +					continue userAuthLoop
      +				}
      +				authErr = candidate.result
      +			} else {
      +				sig, payload, ok := parseSignature(payload)
      +				if !ok || len(payload) > 0 {
      +					return nil, parseError(msgUserAuthRequest)
      +				}
      +				// Ensure the public key algo and signature algo
      +				// are supported.  Compare the private key
      +				// algorithm name that corresponds to algo with
      +				// sig.Format.  This is usually the same, but
      +				// for certs, the names differ.
      +				if !isAcceptableAlgo(sig.Format) {
      +					break
      +				}
      +				signedData := buildDataSignedForAuth(s.transport.getSessionID(), userAuthReq, algoBytes, pubKeyData)
      +
      +				if err := pubKey.Verify(signedData, sig); err != nil {
      +					return nil, err
      +				}
      +
      +				authErr = candidate.result
      +				perms = candidate.perms
      +			}
      +		default:
      +			authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method)
      +		}
      +
      +		if config.AuthLogCallback != nil {
      +			config.AuthLogCallback(s, userAuthReq.Method, authErr)
      +		}
      +
      +		if authErr == nil {
      +			break userAuthLoop
      +		}
      +
      +		var failureMsg userAuthFailureMsg
      +		if config.PasswordCallback != nil {
      +			failureMsg.Methods = append(failureMsg.Methods, "password")
      +		}
      +		if config.PublicKeyCallback != nil {
      +			failureMsg.Methods = append(failureMsg.Methods, "publickey")
      +		}
      +		if config.KeyboardInteractiveCallback != nil {
      +			failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive")
      +		}
      +
      +		if len(failureMsg.Methods) == 0 {
      +			return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
      +		}
      +
      +		if err = s.transport.writePacket(Marshal(&failureMsg)); err != nil {
      +			return nil, err
      +		}
      +	}
      +
      +	if err = s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil {
      +		return nil, err
      +	}
      +	return perms, nil
      +}
      +
      +// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by
      +// asking the client on the other side of a ServerConn.
      +type sshClientKeyboardInteractive struct {
      +	*connection
      +}
      +
      +func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) {
      +	if len(questions) != len(echos) {
      +		return nil, errors.New("ssh: echos and questions must have equal length")
      +	}
      +
      +	var prompts []byte
      +	for i := range questions {
      +		prompts = appendString(prompts, questions[i])
      +		prompts = appendBool(prompts, echos[i])
      +	}
      +
      +	if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{
      +		Instruction: instruction,
      +		NumPrompts:  uint32(len(questions)),
      +		Prompts:     prompts,
      +	})); err != nil {
      +		return nil, err
      +	}
      +
      +	packet, err := c.transport.readPacket()
      +	if err != nil {
      +		return nil, err
      +	}
      +	if packet[0] != msgUserAuthInfoResponse {
      +		return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0])
      +	}
      +	packet = packet[1:]
      +
      +	n, packet, ok := parseUint32(packet)
      +	if !ok || int(n) != len(questions) {
      +		return nil, parseError(msgUserAuthInfoResponse)
      +	}
      +
      +	for i := uint32(0); i < n; i++ {
      +		ans, rest, ok := parseString(packet)
      +		if !ok {
      +			return nil, parseError(msgUserAuthInfoResponse)
      +		}
      +
      +		answers = append(answers, string(ans))
      +		packet = rest
      +	}
      +	if len(packet) != 0 {
      +		return nil, errors.New("ssh: junk at end of message")
      +	}
      +
      +	return answers, nil
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go
      new file mode 100644
      index 00000000..fd10cd1a
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/session.go
      @@ -0,0 +1,605 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +// Session implements an interactive session described in
      +// "RFC 4254, section 6".
      +
      +import (
      +	"bytes"
      +	"errors"
      +	"fmt"
      +	"io"
      +	"io/ioutil"
      +	"sync"
      +)
      +
      +type Signal string
      +
      +// POSIX signals as listed in RFC 4254 Section 6.10.
      +const (
      +	SIGABRT Signal = "ABRT"
      +	SIGALRM Signal = "ALRM"
      +	SIGFPE  Signal = "FPE"
      +	SIGHUP  Signal = "HUP"
      +	SIGILL  Signal = "ILL"
      +	SIGINT  Signal = "INT"
      +	SIGKILL Signal = "KILL"
      +	SIGPIPE Signal = "PIPE"
      +	SIGQUIT Signal = "QUIT"
      +	SIGSEGV Signal = "SEGV"
      +	SIGTERM Signal = "TERM"
      +	SIGUSR1 Signal = "USR1"
      +	SIGUSR2 Signal = "USR2"
      +)
      +
      +var signals = map[Signal]int{
      +	SIGABRT: 6,
      +	SIGALRM: 14,
      +	SIGFPE:  8,
      +	SIGHUP:  1,
      +	SIGILL:  4,
      +	SIGINT:  2,
      +	SIGKILL: 9,
      +	SIGPIPE: 13,
      +	SIGQUIT: 3,
      +	SIGSEGV: 11,
      +	SIGTERM: 15,
      +}
      +
      +type TerminalModes map[uint8]uint32
      +
      +// POSIX terminal mode flags as listed in RFC 4254 Section 8.
      +const (
      +	tty_OP_END    = 0
      +	VINTR         = 1
      +	VQUIT         = 2
      +	VERASE        = 3
      +	VKILL         = 4
      +	VEOF          = 5
      +	VEOL          = 6
      +	VEOL2         = 7
      +	VSTART        = 8
      +	VSTOP         = 9
      +	VSUSP         = 10
      +	VDSUSP        = 11
      +	VREPRINT      = 12
      +	VWERASE       = 13
      +	VLNEXT        = 14
      +	VFLUSH        = 15
      +	VSWTCH        = 16
      +	VSTATUS       = 17
      +	VDISCARD      = 18
      +	IGNPAR        = 30
      +	PARMRK        = 31
      +	INPCK         = 32
      +	ISTRIP        = 33
      +	INLCR         = 34
      +	IGNCR         = 35
      +	ICRNL         = 36
      +	IUCLC         = 37
      +	IXON          = 38
      +	IXANY         = 39
      +	IXOFF         = 40
      +	IMAXBEL       = 41
      +	ISIG          = 50
      +	ICANON        = 51
      +	XCASE         = 52
      +	ECHO          = 53
      +	ECHOE         = 54
      +	ECHOK         = 55
      +	ECHONL        = 56
      +	NOFLSH        = 57
      +	TOSTOP        = 58
      +	IEXTEN        = 59
      +	ECHOCTL       = 60
      +	ECHOKE        = 61
      +	PENDIN        = 62
      +	OPOST         = 70
      +	OLCUC         = 71
      +	ONLCR         = 72
      +	OCRNL         = 73
      +	ONOCR         = 74
      +	ONLRET        = 75
      +	CS7           = 90
      +	CS8           = 91
      +	PARENB        = 92
      +	PARODD        = 93
      +	TTY_OP_ISPEED = 128
      +	TTY_OP_OSPEED = 129
      +)
      +
      +// A Session represents a connection to a remote command or shell.
      +type Session struct {
      +	// Stdin specifies the remote process's standard input.
      +	// If Stdin is nil, the remote process reads from an empty
      +	// bytes.Buffer.
      +	Stdin io.Reader
      +
      +	// Stdout and Stderr specify the remote process's standard
      +	// output and error.
      +	//
      +	// If either is nil, Run connects the corresponding file
      +	// descriptor to an instance of ioutil.Discard. There is a
      +	// fixed amount of buffering that is shared for the two streams.
      +	// If either blocks it may eventually cause the remote
      +	// command to block.
      +	Stdout io.Writer
      +	Stderr io.Writer
      +
      +	ch        Channel // the channel backing this session
      +	started   bool    // true once Start, Run or Shell is invoked.
      +	copyFuncs []func() error
      +	errors    chan error // one send per copyFunc
      +
      +	// true if pipe method is active
      +	stdinpipe, stdoutpipe, stderrpipe bool
      +
      +	// stdinPipeWriter is non-nil if StdinPipe has not been called
      +	// and Stdin was specified by the user; it is the write end of
      +	// a pipe connecting Session.Stdin to the stdin channel.
      +	stdinPipeWriter io.WriteCloser
      +
      +	exitStatus chan error
      +}
      +
      +// SendRequest sends an out-of-band channel request on the SSH channel
      +// underlying the session.
      +func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) {
      +	return s.ch.SendRequest(name, wantReply, payload)
      +}
      +
      +func (s *Session) Close() error {
      +	return s.ch.Close()
      +}
      +
      +// RFC 4254 Section 6.4.
      +type setenvRequest struct {
      +	Name  string
      +	Value string
      +}
      +
      +// Setenv sets an environment variable that will be applied to any
      +// command executed by Shell or Run.
      +func (s *Session) Setenv(name, value string) error {
      +	msg := setenvRequest{
      +		Name:  name,
      +		Value: value,
      +	}
      +	ok, err := s.ch.SendRequest("env", true, Marshal(&msg))
      +	if err == nil && !ok {
      +		err = errors.New("ssh: setenv failed")
      +	}
      +	return err
      +}
      +
      +// RFC 4254 Section 6.2.
      +type ptyRequestMsg struct {
      +	Term     string
      +	Columns  uint32
      +	Rows     uint32
      +	Width    uint32
      +	Height   uint32
      +	Modelist string
      +}
      +
      +// RequestPty requests the association of a pty with the session on the remote host.
      +func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error {
      +	var tm []byte
      +	for k, v := range termmodes {
      +		kv := struct {
      +			Key byte
      +			Val uint32
      +		}{k, v}
      +
      +		tm = append(tm, Marshal(&kv)...)
      +	}
      +	tm = append(tm, tty_OP_END)
      +	req := ptyRequestMsg{
      +		Term:     term,
      +		Columns:  uint32(w),
      +		Rows:     uint32(h),
      +		Width:    uint32(w * 8),
      +		Height:   uint32(h * 8),
      +		Modelist: string(tm),
      +	}
      +	ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req))
      +	if err == nil && !ok {
      +		err = errors.New("ssh: pty-req failed")
      +	}
      +	return err
      +}
      +
      +// RFC 4254 Section 6.5.
      +type subsystemRequestMsg struct {
      +	Subsystem string
      +}
      +
      +// RequestSubsystem requests the association of a subsystem with the session on the remote host.
      +// A subsystem is a predefined command that runs in the background when the ssh session is initiated
      +func (s *Session) RequestSubsystem(subsystem string) error {
      +	msg := subsystemRequestMsg{
      +		Subsystem: subsystem,
      +	}
      +	ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg))
      +	if err == nil && !ok {
      +		err = errors.New("ssh: subsystem request failed")
      +	}
      +	return err
      +}
      +
      +// RFC 4254 Section 6.9.
      +type signalMsg struct {
      +	Signal string
      +}
      +
      +// Signal sends the given signal to the remote process.
      +// sig is one of the SIG* constants.
      +func (s *Session) Signal(sig Signal) error {
      +	msg := signalMsg{
      +		Signal: string(sig),
      +	}
      +
      +	_, err := s.ch.SendRequest("signal", false, Marshal(&msg))
      +	return err
      +}
      +
      +// RFC 4254 Section 6.5.
      +type execMsg struct {
      +	Command string
      +}
      +
      +// Start runs cmd on the remote host. Typically, the remote
      +// server passes cmd to the shell for interpretation.
      +// A Session only accepts one call to Run, Start or Shell.
      +func (s *Session) Start(cmd string) error {
      +	if s.started {
      +		return errors.New("ssh: session already started")
      +	}
      +	req := execMsg{
      +		Command: cmd,
      +	}
      +
      +	ok, err := s.ch.SendRequest("exec", true, Marshal(&req))
      +	if err == nil && !ok {
      +		err = fmt.Errorf("ssh: command %v failed", cmd)
      +	}
      +	if err != nil {
      +		return err
      +	}
      +	return s.start()
      +}
      +
      +// Run runs cmd on the remote host. Typically, the remote
      +// server passes cmd to the shell for interpretation.
      +// A Session only accepts one call to Run, Start, Shell, Output,
      +// or CombinedOutput.
      +//
      +// The returned error is nil if the command runs, has no problems
      +// copying stdin, stdout, and stderr, and exits with a zero exit
      +// status.
      +//
      +// If the command fails to run or doesn't complete successfully, the
      +// error is of type *ExitError. Other error types may be
      +// returned for I/O problems.
      +func (s *Session) Run(cmd string) error {
      +	err := s.Start(cmd)
      +	if err != nil {
      +		return err
      +	}
      +	return s.Wait()
      +}
      +
      +// Output runs cmd on the remote host and returns its standard output.
      +func (s *Session) Output(cmd string) ([]byte, error) {
      +	if s.Stdout != nil {
      +		return nil, errors.New("ssh: Stdout already set")
      +	}
      +	var b bytes.Buffer
      +	s.Stdout = &b
      +	err := s.Run(cmd)
      +	return b.Bytes(), err
      +}
      +
      +type singleWriter struct {
      +	b  bytes.Buffer
      +	mu sync.Mutex
      +}
      +
      +func (w *singleWriter) Write(p []byte) (int, error) {
      +	w.mu.Lock()
      +	defer w.mu.Unlock()
      +	return w.b.Write(p)
      +}
      +
      +// CombinedOutput runs cmd on the remote host and returns its combined
      +// standard output and standard error.
      +func (s *Session) CombinedOutput(cmd string) ([]byte, error) {
      +	if s.Stdout != nil {
      +		return nil, errors.New("ssh: Stdout already set")
      +	}
      +	if s.Stderr != nil {
      +		return nil, errors.New("ssh: Stderr already set")
      +	}
      +	var b singleWriter
      +	s.Stdout = &b
      +	s.Stderr = &b
      +	err := s.Run(cmd)
      +	return b.b.Bytes(), err
      +}
      +
      +// Shell starts a login shell on the remote host. A Session only
      +// accepts one call to Run, Start, Shell, Output, or CombinedOutput.
      +func (s *Session) Shell() error {
      +	if s.started {
      +		return errors.New("ssh: session already started")
      +	}
      +
      +	ok, err := s.ch.SendRequest("shell", true, nil)
      +	if err == nil && !ok {
      +		return errors.New("ssh: could not start shell")
      +	}
      +	if err != nil {
      +		return err
      +	}
      +	return s.start()
      +}
      +
      +func (s *Session) start() error {
      +	s.started = true
      +
      +	type F func(*Session)
      +	for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} {
      +		setupFd(s)
      +	}
      +
      +	s.errors = make(chan error, len(s.copyFuncs))
      +	for _, fn := range s.copyFuncs {
      +		go func(fn func() error) {
      +			s.errors <- fn()
      +		}(fn)
      +	}
      +	return nil
      +}
      +
      +// Wait waits for the remote command to exit.
      +//
      +// The returned error is nil if the command runs, has no problems
      +// copying stdin, stdout, and stderr, and exits with a zero exit
      +// status.
      +//
      +// If the command fails to run or doesn't complete successfully, the
      +// error is of type *ExitError. Other error types may be
      +// returned for I/O problems.
      +func (s *Session) Wait() error {
      +	if !s.started {
      +		return errors.New("ssh: session not started")
      +	}
      +	waitErr := <-s.exitStatus
      +
      +	if s.stdinPipeWriter != nil {
      +		s.stdinPipeWriter.Close()
      +	}
      +	var copyError error
      +	for _ = range s.copyFuncs {
      +		if err := <-s.errors; err != nil && copyError == nil {
      +			copyError = err
      +		}
      +	}
      +	if waitErr != nil {
      +		return waitErr
      +	}
      +	return copyError
      +}
      +
      +func (s *Session) wait(reqs <-chan *Request) error {
      +	wm := Waitmsg{status: -1}
      +	// Wait for msg channel to be closed before returning.
      +	for msg := range reqs {
      +		switch msg.Type {
      +		case "exit-status":
      +			d := msg.Payload
      +			wm.status = int(d[0])<<24 | int(d[1])<<16 | int(d[2])<<8 | int(d[3])
      +		case "exit-signal":
      +			var sigval struct {
      +				Signal     string
      +				CoreDumped bool
      +				Error      string
      +				Lang       string
      +			}
      +			if err := Unmarshal(msg.Payload, &sigval); err != nil {
      +				return err
      +			}
      +
      +			// Must sanitize strings?
      +			wm.signal = sigval.Signal
      +			wm.msg = sigval.Error
      +			wm.lang = sigval.Lang
      +		default:
      +			// This handles keepalives and matches
      +			// OpenSSH's behaviour.
      +			if msg.WantReply {
      +				msg.Reply(false, nil)
      +			}
      +		}
      +	}
      +	if wm.status == 0 {
      +		return nil
      +	}
      +	if wm.status == -1 {
      +		// exit-status was never sent from server
      +		if wm.signal == "" {
      +			return errors.New("wait: remote command exited without exit status or exit signal")
      +		}
      +		wm.status = 128
      +		if _, ok := signals[Signal(wm.signal)]; ok {
      +			wm.status += signals[Signal(wm.signal)]
      +		}
      +	}
      +	return &ExitError{wm}
      +}
      +
      +func (s *Session) stdin() {
      +	if s.stdinpipe {
      +		return
      +	}
      +	var stdin io.Reader
      +	if s.Stdin == nil {
      +		stdin = new(bytes.Buffer)
      +	} else {
      +		r, w := io.Pipe()
      +		go func() {
      +			_, err := io.Copy(w, s.Stdin)
      +			w.CloseWithError(err)
      +		}()
      +		stdin, s.stdinPipeWriter = r, w
      +	}
      +	s.copyFuncs = append(s.copyFuncs, func() error {
      +		_, err := io.Copy(s.ch, stdin)
      +		if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF {
      +			err = err1
      +		}
      +		return err
      +	})
      +}
      +
      +func (s *Session) stdout() {
      +	if s.stdoutpipe {
      +		return
      +	}
      +	if s.Stdout == nil {
      +		s.Stdout = ioutil.Discard
      +	}
      +	s.copyFuncs = append(s.copyFuncs, func() error {
      +		_, err := io.Copy(s.Stdout, s.ch)
      +		return err
      +	})
      +}
      +
      +func (s *Session) stderr() {
      +	if s.stderrpipe {
      +		return
      +	}
      +	if s.Stderr == nil {
      +		s.Stderr = ioutil.Discard
      +	}
      +	s.copyFuncs = append(s.copyFuncs, func() error {
      +		_, err := io.Copy(s.Stderr, s.ch.Stderr())
      +		return err
      +	})
      +}
      +
      +// sessionStdin reroutes Close to CloseWrite.
      +type sessionStdin struct {
      +	io.Writer
      +	ch Channel
      +}
      +
      +func (s *sessionStdin) Close() error {
      +	return s.ch.CloseWrite()
      +}
      +
      +// StdinPipe returns a pipe that will be connected to the
      +// remote command's standard input when the command starts.
      +func (s *Session) StdinPipe() (io.WriteCloser, error) {
      +	if s.Stdin != nil {
      +		return nil, errors.New("ssh: Stdin already set")
      +	}
      +	if s.started {
      +		return nil, errors.New("ssh: StdinPipe after process started")
      +	}
      +	s.stdinpipe = true
      +	return &sessionStdin{s.ch, s.ch}, nil
      +}
      +
      +// StdoutPipe returns a pipe that will be connected to the
      +// remote command's standard output when the command starts.
      +// There is a fixed amount of buffering that is shared between
      +// stdout and stderr streams. If the StdoutPipe reader is
      +// not serviced fast enough it may eventually cause the
      +// remote command to block.
      +func (s *Session) StdoutPipe() (io.Reader, error) {
      +	if s.Stdout != nil {
      +		return nil, errors.New("ssh: Stdout already set")
      +	}
      +	if s.started {
      +		return nil, errors.New("ssh: StdoutPipe after process started")
      +	}
      +	s.stdoutpipe = true
      +	return s.ch, nil
      +}
      +
      +// StderrPipe returns a pipe that will be connected to the
      +// remote command's standard error when the command starts.
      +// There is a fixed amount of buffering that is shared between
      +// stdout and stderr streams. If the StderrPipe reader is
      +// not serviced fast enough it may eventually cause the
      +// remote command to block.
      +func (s *Session) StderrPipe() (io.Reader, error) {
      +	if s.Stderr != nil {
      +		return nil, errors.New("ssh: Stderr already set")
      +	}
      +	if s.started {
      +		return nil, errors.New("ssh: StderrPipe after process started")
      +	}
      +	s.stderrpipe = true
      +	return s.ch.Stderr(), nil
      +}
      +
      +// newSession returns a new interactive session on the remote host.
      +func newSession(ch Channel, reqs <-chan *Request) (*Session, error) {
      +	s := &Session{
      +		ch: ch,
      +	}
      +	s.exitStatus = make(chan error, 1)
      +	go func() {
      +		s.exitStatus <- s.wait(reqs)
      +	}()
      +
      +	return s, nil
      +}
      +
      +// An ExitError reports unsuccessful completion of a remote command.
      +type ExitError struct {
      +	Waitmsg
      +}
      +
      +func (e *ExitError) Error() string {
      +	return e.Waitmsg.String()
      +}
      +
      +// Waitmsg stores the information about an exited remote command
      +// as reported by Wait.
      +type Waitmsg struct {
      +	status int
      +	signal string
      +	msg    string
      +	lang   string
      +}
      +
      +// ExitStatus returns the exit status of the remote command.
      +func (w Waitmsg) ExitStatus() int {
      +	return w.status
      +}
      +
      +// Signal returns the exit signal of the remote command if
      +// it was terminated violently.
      +func (w Waitmsg) Signal() string {
      +	return w.signal
      +}
      +
      +// Msg returns the exit message given by the remote command
      +func (w Waitmsg) Msg() string {
      +	return w.msg
      +}
      +
      +// Lang returns the language tag. See RFC 3066
      +func (w Waitmsg) Lang() string {
      +	return w.lang
      +}
      +
      +func (w Waitmsg) String() string {
      +	return fmt.Sprintf("Process exited with: %v. Reason was: %v (%v)", w.status, w.msg, w.signal)
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/session_test.go b/vendor/golang.org/x/crypto/ssh/session_test.go
      new file mode 100644
      index 00000000..f7f0f764
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/session_test.go
      @@ -0,0 +1,774 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +// Session tests.
      +
      +import (
      +	"bytes"
      +	crypto_rand "crypto/rand"
      +	"errors"
      +	"io"
      +	"io/ioutil"
      +	"math/rand"
      +	"net"
      +	"testing"
      +
      +	"golang.org/x/crypto/ssh/terminal"
      +)
      +
      +type serverType func(Channel, <-chan *Request, *testing.T)
      +
      +// dial constructs a new test server and returns a *ClientConn.
      +func dial(handler serverType, t *testing.T) *Client {
      +	c1, c2, err := netPipe()
      +	if err != nil {
      +		t.Fatalf("netPipe: %v", err)
      +	}
      +
      +	go func() {
      +		defer c1.Close()
      +		conf := ServerConfig{
      +			NoClientAuth: true,
      +		}
      +		conf.AddHostKey(testSigners["rsa"])
      +
      +		_, chans, reqs, err := NewServerConn(c1, &conf)
      +		if err != nil {
      +			t.Fatalf("Unable to handshake: %v", err)
      +		}
      +		go DiscardRequests(reqs)
      +
      +		for newCh := range chans {
      +			if newCh.ChannelType() != "session" {
      +				newCh.Reject(UnknownChannelType, "unknown channel type")
      +				continue
      +			}
      +
      +			ch, inReqs, err := newCh.Accept()
      +			if err != nil {
      +				t.Errorf("Accept: %v", err)
      +				continue
      +			}
      +			go func() {
      +				handler(ch, inReqs, t)
      +			}()
      +		}
      +	}()
      +
      +	config := &ClientConfig{
      +		User: "testuser",
      +	}
      +
      +	conn, chans, reqs, err := NewClientConn(c2, "", config)
      +	if err != nil {
      +		t.Fatalf("unable to dial remote side: %v", err)
      +	}
      +
      +	return NewClient(conn, chans, reqs)
      +}
      +
      +// Test a simple string is returned to session.Stdout.
      +func TestSessionShell(t *testing.T) {
      +	conn := dial(shellHandler, t)
      +	defer conn.Close()
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("Unable to request new session: %v", err)
      +	}
      +	defer session.Close()
      +	stdout := new(bytes.Buffer)
      +	session.Stdout = stdout
      +	if err := session.Shell(); err != nil {
      +		t.Fatalf("Unable to execute command: %s", err)
      +	}
      +	if err := session.Wait(); err != nil {
      +		t.Fatalf("Remote command did not exit cleanly: %v", err)
      +	}
      +	actual := stdout.String()
      +	if actual != "golang" {
      +		t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual)
      +	}
      +}
      +
      +// TODO(dfc) add support for Std{in,err}Pipe when the Server supports it.
      +
      +// Test a simple string is returned via StdoutPipe.
      +func TestSessionStdoutPipe(t *testing.T) {
      +	conn := dial(shellHandler, t)
      +	defer conn.Close()
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("Unable to request new session: %v", err)
      +	}
      +	defer session.Close()
      +	stdout, err := session.StdoutPipe()
      +	if err != nil {
      +		t.Fatalf("Unable to request StdoutPipe(): %v", err)
      +	}
      +	var buf bytes.Buffer
      +	if err := session.Shell(); err != nil {
      +		t.Fatalf("Unable to execute command: %v", err)
      +	}
      +	done := make(chan bool, 1)
      +	go func() {
      +		if _, err := io.Copy(&buf, stdout); err != nil {
      +			t.Errorf("Copy of stdout failed: %v", err)
      +		}
      +		done <- true
      +	}()
      +	if err := session.Wait(); err != nil {
      +		t.Fatalf("Remote command did not exit cleanly: %v", err)
      +	}
      +	<-done
      +	actual := buf.String()
      +	if actual != "golang" {
      +		t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual)
      +	}
      +}
      +
      +// Test that a simple string is returned via the Output helper,
      +// and that stderr is discarded.
      +func TestSessionOutput(t *testing.T) {
      +	conn := dial(fixedOutputHandler, t)
      +	defer conn.Close()
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("Unable to request new session: %v", err)
      +	}
      +	defer session.Close()
      +
      +	buf, err := session.Output("") // cmd is ignored by fixedOutputHandler
      +	if err != nil {
      +		t.Error("Remote command did not exit cleanly:", err)
      +	}
      +	w := "this-is-stdout."
      +	g := string(buf)
      +	if g != w {
      +		t.Error("Remote command did not return expected string:")
      +		t.Logf("want %q", w)
      +		t.Logf("got  %q", g)
      +	}
      +}
      +
      +// Test that both stdout and stderr are returned
      +// via the CombinedOutput helper.
      +func TestSessionCombinedOutput(t *testing.T) {
      +	conn := dial(fixedOutputHandler, t)
      +	defer conn.Close()
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("Unable to request new session: %v", err)
      +	}
      +	defer session.Close()
      +
      +	buf, err := session.CombinedOutput("") // cmd is ignored by fixedOutputHandler
      +	if err != nil {
      +		t.Error("Remote command did not exit cleanly:", err)
      +	}
      +	const stdout = "this-is-stdout."
      +	const stderr = "this-is-stderr."
      +	g := string(buf)
      +	if g != stdout+stderr && g != stderr+stdout {
      +		t.Error("Remote command did not return expected string:")
      +		t.Logf("want %q, or %q", stdout+stderr, stderr+stdout)
      +		t.Logf("got  %q", g)
      +	}
      +}
      +
      +// Test non-0 exit status is returned correctly.
      +func TestExitStatusNonZero(t *testing.T) {
      +	conn := dial(exitStatusNonZeroHandler, t)
      +	defer conn.Close()
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("Unable to request new session: %v", err)
      +	}
      +	defer session.Close()
      +	if err := session.Shell(); err != nil {
      +		t.Fatalf("Unable to execute command: %v", err)
      +	}
      +	err = session.Wait()
      +	if err == nil {
      +		t.Fatalf("expected command to fail but it didn't")
      +	}
      +	e, ok := err.(*ExitError)
      +	if !ok {
      +		t.Fatalf("expected *ExitError but got %T", err)
      +	}
      +	if e.ExitStatus() != 15 {
      +		t.Fatalf("expected command to exit with 15 but got %v", e.ExitStatus())
      +	}
      +}
      +
      +// Test 0 exit status is returned correctly.
      +func TestExitStatusZero(t *testing.T) {
      +	conn := dial(exitStatusZeroHandler, t)
      +	defer conn.Close()
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("Unable to request new session: %v", err)
      +	}
      +	defer session.Close()
      +
      +	if err := session.Shell(); err != nil {
      +		t.Fatalf("Unable to execute command: %v", err)
      +	}
      +	err = session.Wait()
      +	if err != nil {
      +		t.Fatalf("expected nil but got %v", err)
      +	}
      +}
      +
      +// Test exit signal and status are both returned correctly.
      +func TestExitSignalAndStatus(t *testing.T) {
      +	conn := dial(exitSignalAndStatusHandler, t)
      +	defer conn.Close()
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("Unable to request new session: %v", err)
      +	}
      +	defer session.Close()
      +	if err := session.Shell(); err != nil {
      +		t.Fatalf("Unable to execute command: %v", err)
      +	}
      +	err = session.Wait()
      +	if err == nil {
      +		t.Fatalf("expected command to fail but it didn't")
      +	}
      +	e, ok := err.(*ExitError)
      +	if !ok {
      +		t.Fatalf("expected *ExitError but got %T", err)
      +	}
      +	if e.Signal() != "TERM" || e.ExitStatus() != 15 {
      +		t.Fatalf("expected command to exit with signal TERM and status 15 but got signal %s and status %v", e.Signal(), e.ExitStatus())
      +	}
      +}
      +
      +// Test exit signal and status are both returned correctly.
      +func TestKnownExitSignalOnly(t *testing.T) {
      +	conn := dial(exitSignalHandler, t)
      +	defer conn.Close()
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("Unable to request new session: %v", err)
      +	}
      +	defer session.Close()
      +	if err := session.Shell(); err != nil {
      +		t.Fatalf("Unable to execute command: %v", err)
      +	}
      +	err = session.Wait()
      +	if err == nil {
      +		t.Fatalf("expected command to fail but it didn't")
      +	}
      +	e, ok := err.(*ExitError)
      +	if !ok {
      +		t.Fatalf("expected *ExitError but got %T", err)
      +	}
      +	if e.Signal() != "TERM" || e.ExitStatus() != 143 {
      +		t.Fatalf("expected command to exit with signal TERM and status 143 but got signal %s and status %v", e.Signal(), e.ExitStatus())
      +	}
      +}
      +
      +// Test exit signal and status are both returned correctly.
      +func TestUnknownExitSignal(t *testing.T) {
      +	conn := dial(exitSignalUnknownHandler, t)
      +	defer conn.Close()
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("Unable to request new session: %v", err)
      +	}
      +	defer session.Close()
      +	if err := session.Shell(); err != nil {
      +		t.Fatalf("Unable to execute command: %v", err)
      +	}
      +	err = session.Wait()
      +	if err == nil {
      +		t.Fatalf("expected command to fail but it didn't")
      +	}
      +	e, ok := err.(*ExitError)
      +	if !ok {
      +		t.Fatalf("expected *ExitError but got %T", err)
      +	}
      +	if e.Signal() != "SYS" || e.ExitStatus() != 128 {
      +		t.Fatalf("expected command to exit with signal SYS and status 128 but got signal %s and status %v", e.Signal(), e.ExitStatus())
      +	}
      +}
      +
      +// Test WaitMsg is not returned if the channel closes abruptly.
      +func TestExitWithoutStatusOrSignal(t *testing.T) {
      +	conn := dial(exitWithoutSignalOrStatus, t)
      +	defer conn.Close()
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("Unable to request new session: %v", err)
      +	}
      +	defer session.Close()
      +	if err := session.Shell(); err != nil {
      +		t.Fatalf("Unable to execute command: %v", err)
      +	}
      +	err = session.Wait()
      +	if err == nil {
      +		t.Fatalf("expected command to fail but it didn't")
      +	}
      +	_, ok := err.(*ExitError)
      +	if ok {
      +		// you can't actually test for errors.errorString
      +		// because it's not exported.
      +		t.Fatalf("expected *errorString but got %T", err)
      +	}
      +}
      +
      +// windowTestBytes is the number of bytes that we'll send to the SSH server.
      +const windowTestBytes = 16000 * 200
      +
      +// TestServerWindow writes random data to the server. The server is expected to echo
      +// the same data back, which is compared against the original.
      +func TestServerWindow(t *testing.T) {
      +	origBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes))
      +	io.CopyN(origBuf, crypto_rand.Reader, windowTestBytes)
      +	origBytes := origBuf.Bytes()
      +
      +	conn := dial(echoHandler, t)
      +	defer conn.Close()
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	defer session.Close()
      +	result := make(chan []byte)
      +
      +	go func() {
      +		defer close(result)
      +		echoedBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes))
      +		serverStdout, err := session.StdoutPipe()
      +		if err != nil {
      +			t.Errorf("StdoutPipe failed: %v", err)
      +			return
      +		}
      +		n, err := copyNRandomly("stdout", echoedBuf, serverStdout, windowTestBytes)
      +		if err != nil && err != io.EOF {
      +			t.Errorf("Read only %d bytes from server, expected %d: %v", n, windowTestBytes, err)
      +		}
      +		result <- echoedBuf.Bytes()
      +	}()
      +
      +	serverStdin, err := session.StdinPipe()
      +	if err != nil {
      +		t.Fatalf("StdinPipe failed: %v", err)
      +	}
      +	written, err := copyNRandomly("stdin", serverStdin, origBuf, windowTestBytes)
      +	if err != nil {
      +		t.Fatalf("failed to copy origBuf to serverStdin: %v", err)
      +	}
      +	if written != windowTestBytes {
      +		t.Fatalf("Wrote only %d of %d bytes to server", written, windowTestBytes)
      +	}
      +
      +	echoedBytes := <-result
      +
      +	if !bytes.Equal(origBytes, echoedBytes) {
      +		t.Fatalf("Echoed buffer differed from original, orig %d, echoed %d", len(origBytes), len(echoedBytes))
      +	}
      +}
      +
      +// Verify the client can handle a keepalive packet from the server.
      +func TestClientHandlesKeepalives(t *testing.T) {
      +	conn := dial(channelKeepaliveSender, t)
      +	defer conn.Close()
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	defer session.Close()
      +	if err := session.Shell(); err != nil {
      +		t.Fatalf("Unable to execute command: %v", err)
      +	}
      +	err = session.Wait()
      +	if err != nil {
      +		t.Fatalf("expected nil but got: %v", err)
      +	}
      +}
      +
      +type exitStatusMsg struct {
      +	Status uint32
      +}
      +
      +type exitSignalMsg struct {
      +	Signal     string
      +	CoreDumped bool
      +	Errmsg     string
      +	Lang       string
      +}
      +
      +func handleTerminalRequests(in <-chan *Request) {
      +	for req := range in {
      +		ok := false
      +		switch req.Type {
      +		case "shell":
      +			ok = true
      +			if len(req.Payload) > 0 {
      +				// We don't accept any commands, only the default shell.
      +				ok = false
      +			}
      +		case "env":
      +			ok = true
      +		}
      +		req.Reply(ok, nil)
      +	}
      +}
      +
      +func newServerShell(ch Channel, in <-chan *Request, prompt string) *terminal.Terminal {
      +	term := terminal.NewTerminal(ch, prompt)
      +	go handleTerminalRequests(in)
      +	return term
      +}
      +
      +func exitStatusZeroHandler(ch Channel, in <-chan *Request, t *testing.T) {
      +	defer ch.Close()
      +	// this string is returned to stdout
      +	shell := newServerShell(ch, in, "> ")
      +	readLine(shell, t)
      +	sendStatus(0, ch, t)
      +}
      +
      +func exitStatusNonZeroHandler(ch Channel, in <-chan *Request, t *testing.T) {
      +	defer ch.Close()
      +	shell := newServerShell(ch, in, "> ")
      +	readLine(shell, t)
      +	sendStatus(15, ch, t)
      +}
      +
      +func exitSignalAndStatusHandler(ch Channel, in <-chan *Request, t *testing.T) {
      +	defer ch.Close()
      +	shell := newServerShell(ch, in, "> ")
      +	readLine(shell, t)
      +	sendStatus(15, ch, t)
      +	sendSignal("TERM", ch, t)
      +}
      +
      +func exitSignalHandler(ch Channel, in <-chan *Request, t *testing.T) {
      +	defer ch.Close()
      +	shell := newServerShell(ch, in, "> ")
      +	readLine(shell, t)
      +	sendSignal("TERM", ch, t)
      +}
      +
      +func exitSignalUnknownHandler(ch Channel, in <-chan *Request, t *testing.T) {
      +	defer ch.Close()
      +	shell := newServerShell(ch, in, "> ")
      +	readLine(shell, t)
      +	sendSignal("SYS", ch, t)
      +}
      +
      +func exitWithoutSignalOrStatus(ch Channel, in <-chan *Request, t *testing.T) {
      +	defer ch.Close()
      +	shell := newServerShell(ch, in, "> ")
      +	readLine(shell, t)
      +}
      +
      +func shellHandler(ch Channel, in <-chan *Request, t *testing.T) {
      +	defer ch.Close()
      +	// this string is returned to stdout
      +	shell := newServerShell(ch, in, "golang")
      +	readLine(shell, t)
      +	sendStatus(0, ch, t)
      +}
      +
      +// Ignores the command, writes fixed strings to stderr and stdout.
      +// Strings are "this-is-stdout." and "this-is-stderr.".
      +func fixedOutputHandler(ch Channel, in <-chan *Request, t *testing.T) {
      +	defer ch.Close()
      +	_, err := ch.Read(nil)
      +
      +	req, ok := <-in
      +	if !ok {
      +		t.Fatalf("error: expected channel request, got: %#v", err)
      +		return
      +	}
      +
      +	// ignore request, always send some text
      +	req.Reply(true, nil)
      +
      +	_, err = io.WriteString(ch, "this-is-stdout.")
      +	if err != nil {
      +		t.Fatalf("error writing on server: %v", err)
      +	}
      +	_, err = io.WriteString(ch.Stderr(), "this-is-stderr.")
      +	if err != nil {
      +		t.Fatalf("error writing on server: %v", err)
      +	}
      +	sendStatus(0, ch, t)
      +}
      +
      +func readLine(shell *terminal.Terminal, t *testing.T) {
      +	if _, err := shell.ReadLine(); err != nil && err != io.EOF {
      +		t.Errorf("unable to read line: %v", err)
      +	}
      +}
      +
      +func sendStatus(status uint32, ch Channel, t *testing.T) {
      +	msg := exitStatusMsg{
      +		Status: status,
      +	}
      +	if _, err := ch.SendRequest("exit-status", false, Marshal(&msg)); err != nil {
      +		t.Errorf("unable to send status: %v", err)
      +	}
      +}
      +
      +func sendSignal(signal string, ch Channel, t *testing.T) {
      +	sig := exitSignalMsg{
      +		Signal:     signal,
      +		CoreDumped: false,
      +		Errmsg:     "Process terminated",
      +		Lang:       "en-GB-oed",
      +	}
      +	if _, err := ch.SendRequest("exit-signal", false, Marshal(&sig)); err != nil {
      +		t.Errorf("unable to send signal: %v", err)
      +	}
      +}
      +
      +func discardHandler(ch Channel, t *testing.T) {
      +	defer ch.Close()
      +	io.Copy(ioutil.Discard, ch)
      +}
      +
      +func echoHandler(ch Channel, in <-chan *Request, t *testing.T) {
      +	defer ch.Close()
      +	if n, err := copyNRandomly("echohandler", ch, ch, windowTestBytes); err != nil {
      +		t.Errorf("short write, wrote %d, expected %d: %v ", n, windowTestBytes, err)
      +	}
      +}
      +
      +// copyNRandomly copies n bytes from src to dst. It uses a variable, and random,
      +// buffer size to exercise more code paths.
      +func copyNRandomly(title string, dst io.Writer, src io.Reader, n int) (int, error) {
      +	var (
      +		buf       = make([]byte, 32*1024)
      +		written   int
      +		remaining = n
      +	)
      +	for remaining > 0 {
      +		l := rand.Intn(1 << 15)
      +		if remaining < l {
      +			l = remaining
      +		}
      +		nr, er := src.Read(buf[:l])
      +		nw, ew := dst.Write(buf[:nr])
      +		remaining -= nw
      +		written += nw
      +		if ew != nil {
      +			return written, ew
      +		}
      +		if nr != nw {
      +			return written, io.ErrShortWrite
      +		}
      +		if er != nil && er != io.EOF {
      +			return written, er
      +		}
      +	}
      +	return written, nil
      +}
      +
      +func channelKeepaliveSender(ch Channel, in <-chan *Request, t *testing.T) {
      +	defer ch.Close()
      +	shell := newServerShell(ch, in, "> ")
      +	readLine(shell, t)
      +	if _, err := ch.SendRequest("keepalive@openssh.com", true, nil); err != nil {
      +		t.Errorf("unable to send channel keepalive request: %v", err)
      +	}
      +	sendStatus(0, ch, t)
      +}
      +
      +func TestClientWriteEOF(t *testing.T) {
      +	conn := dial(simpleEchoHandler, t)
      +	defer conn.Close()
      +
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	defer session.Close()
      +	stdin, err := session.StdinPipe()
      +	if err != nil {
      +		t.Fatalf("StdinPipe failed: %v", err)
      +	}
      +	stdout, err := session.StdoutPipe()
      +	if err != nil {
      +		t.Fatalf("StdoutPipe failed: %v", err)
      +	}
      +
      +	data := []byte(`0000`)
      +	_, err = stdin.Write(data)
      +	if err != nil {
      +		t.Fatalf("Write failed: %v", err)
      +	}
      +	stdin.Close()
      +
      +	res, err := ioutil.ReadAll(stdout)
      +	if err != nil {
      +		t.Fatalf("Read failed: %v", err)
      +	}
      +
      +	if !bytes.Equal(data, res) {
      +		t.Fatalf("Read differed from write, wrote: %v, read: %v", data, res)
      +	}
      +}
      +
      +func simpleEchoHandler(ch Channel, in <-chan *Request, t *testing.T) {
      +	defer ch.Close()
      +	data, err := ioutil.ReadAll(ch)
      +	if err != nil {
      +		t.Errorf("handler read error: %v", err)
      +	}
      +	_, err = ch.Write(data)
      +	if err != nil {
      +		t.Errorf("handler write error: %v", err)
      +	}
      +}
      +
      +func TestSessionID(t *testing.T) {
      +	c1, c2, err := netPipe()
      +	if err != nil {
      +		t.Fatalf("netPipe: %v", err)
      +	}
      +	defer c1.Close()
      +	defer c2.Close()
      +
      +	serverID := make(chan []byte, 1)
      +	clientID := make(chan []byte, 1)
      +
      +	serverConf := &ServerConfig{
      +		NoClientAuth: true,
      +	}
      +	serverConf.AddHostKey(testSigners["ecdsa"])
      +	clientConf := &ClientConfig{
      +		User: "user",
      +	}
      +
      +	go func() {
      +		conn, chans, reqs, err := NewServerConn(c1, serverConf)
      +		if err != nil {
      +			t.Fatalf("server handshake: %v", err)
      +		}
      +		serverID <- conn.SessionID()
      +		go DiscardRequests(reqs)
      +		for ch := range chans {
      +			ch.Reject(Prohibited, "")
      +		}
      +	}()
      +
      +	go func() {
      +		conn, chans, reqs, err := NewClientConn(c2, "", clientConf)
      +		if err != nil {
      +			t.Fatalf("client handshake: %v", err)
      +		}
      +		clientID <- conn.SessionID()
      +		go DiscardRequests(reqs)
      +		for ch := range chans {
      +			ch.Reject(Prohibited, "")
      +		}
      +	}()
      +
      +	s := <-serverID
      +	c := <-clientID
      +	if bytes.Compare(s, c) != 0 {
      +		t.Errorf("server session ID (%x) != client session ID (%x)", s, c)
      +	} else if len(s) == 0 {
      +		t.Errorf("client and server SessionID were empty.")
      +	}
      +}
      +
      +type noReadConn struct {
      +	readSeen bool
      +	net.Conn
      +}
      +
      +func (c *noReadConn) Close() error {
      +	return nil
      +}
      +
      +func (c *noReadConn) Read(b []byte) (int, error) {
      +	c.readSeen = true
      +	return 0, errors.New("noReadConn error")
      +}
      +
      +func TestInvalidServerConfiguration(t *testing.T) {
      +	c1, c2, err := netPipe()
      +	if err != nil {
      +		t.Fatalf("netPipe: %v", err)
      +	}
      +	defer c1.Close()
      +	defer c2.Close()
      +
      +	serveConn := noReadConn{Conn: c1}
      +	serverConf := &ServerConfig{}
      +
      +	NewServerConn(&serveConn, serverConf)
      +	if serveConn.readSeen {
      +		t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing host key")
      +	}
      +
      +	serverConf.AddHostKey(testSigners["ecdsa"])
      +
      +	NewServerConn(&serveConn, serverConf)
      +	if serveConn.readSeen {
      +		t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing authentication method")
      +	}
      +}
      +
      +func TestHostKeyAlgorithms(t *testing.T) {
      +	serverConf := &ServerConfig{
      +		NoClientAuth: true,
      +	}
      +	serverConf.AddHostKey(testSigners["rsa"])
      +	serverConf.AddHostKey(testSigners["ecdsa"])
      +
      +	connect := func(clientConf *ClientConfig, want string) {
      +		var alg string
      +		clientConf.HostKeyCallback = func(h string, a net.Addr, key PublicKey) error {
      +			alg = key.Type()
      +			return nil
      +		}
      +		c1, c2, err := netPipe()
      +		if err != nil {
      +			t.Fatalf("netPipe: %v", err)
      +		}
      +		defer c1.Close()
      +		defer c2.Close()
      +
      +		go NewServerConn(c1, serverConf)
      +		_, _, _, err = NewClientConn(c2, "", clientConf)
      +		if err != nil {
      +			t.Fatalf("NewClientConn: %v", err)
      +		}
      +		if alg != want {
      +			t.Errorf("selected key algorithm %s, want %s", alg, want)
      +		}
      +	}
      +
      +	// By default, we get the preferred algorithm, which is ECDSA 256.
      +
      +	clientConf := &ClientConfig{}
      +	connect(clientConf, KeyAlgoECDSA256)
      +
      +	// Client asks for RSA explicitly.
      +	clientConf.HostKeyAlgorithms = []string{KeyAlgoRSA}
      +	connect(clientConf, KeyAlgoRSA)
      +
      +	c1, c2, err := netPipe()
      +	if err != nil {
      +		t.Fatalf("netPipe: %v", err)
      +	}
      +	defer c1.Close()
      +	defer c2.Close()
      +
      +	go NewServerConn(c1, serverConf)
      +	clientConf.HostKeyAlgorithms = []string{"nonexistent-hostkey-algo"}
      +	_, _, _, err = NewClientConn(c2, "", clientConf)
      +	if err == nil {
      +		t.Fatal("succeeded connecting with unknown hostkey algorithm")
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go
      new file mode 100644
      index 00000000..6151241f
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/tcpip.go
      @@ -0,0 +1,407 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"errors"
      +	"fmt"
      +	"io"
      +	"math/rand"
      +	"net"
      +	"strconv"
      +	"strings"
      +	"sync"
      +	"time"
      +)
      +
      +// Listen requests the remote peer open a listening socket on
      +// addr. Incoming connections will be available by calling Accept on
      +// the returned net.Listener. The listener must be serviced, or the
      +// SSH connection may hang.
      +func (c *Client) Listen(n, addr string) (net.Listener, error) {
      +	laddr, err := net.ResolveTCPAddr(n, addr)
      +	if err != nil {
      +		return nil, err
      +	}
      +	return c.ListenTCP(laddr)
      +}
      +
      +// Automatic port allocation is broken with OpenSSH before 6.0. See
      +// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017.  In
      +// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0,
      +// rather than the actual port number. This means you can never open
      +// two different listeners with auto allocated ports. We work around
      +// this by trying explicit ports until we succeed.
      +
      +const openSSHPrefix = "OpenSSH_"
      +
      +var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano()))
      +
      +// isBrokenOpenSSHVersion returns true if the given version string
      +// specifies a version of OpenSSH that is known to have a bug in port
      +// forwarding.
      +func isBrokenOpenSSHVersion(versionStr string) bool {
      +	i := strings.Index(versionStr, openSSHPrefix)
      +	if i < 0 {
      +		return false
      +	}
      +	i += len(openSSHPrefix)
      +	j := i
      +	for ; j < len(versionStr); j++ {
      +		if versionStr[j] < '0' || versionStr[j] > '9' {
      +			break
      +		}
      +	}
      +	version, _ := strconv.Atoi(versionStr[i:j])
      +	return version < 6
      +}
      +
      +// autoPortListenWorkaround simulates automatic port allocation by
      +// trying random ports repeatedly.
      +func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) {
      +	var sshListener net.Listener
      +	var err error
      +	const tries = 10
      +	for i := 0; i < tries; i++ {
      +		addr := *laddr
      +		addr.Port = 1024 + portRandomizer.Intn(60000)
      +		sshListener, err = c.ListenTCP(&addr)
      +		if err == nil {
      +			laddr.Port = addr.Port
      +			return sshListener, err
      +		}
      +	}
      +	return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err)
      +}
      +
      +// RFC 4254 7.1
      +type channelForwardMsg struct {
      +	addr  string
      +	rport uint32
      +}
      +
      +// ListenTCP requests the remote peer open a listening socket
      +// on laddr. Incoming connections will be available by calling
      +// Accept on the returned net.Listener.
      +func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) {
      +	if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) {
      +		return c.autoPortListenWorkaround(laddr)
      +	}
      +
      +	m := channelForwardMsg{
      +		laddr.IP.String(),
      +		uint32(laddr.Port),
      +	}
      +	// send message
      +	ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m))
      +	if err != nil {
      +		return nil, err
      +	}
      +	if !ok {
      +		return nil, errors.New("ssh: tcpip-forward request denied by peer")
      +	}
      +
      +	// If the original port was 0, then the remote side will
      +	// supply a real port number in the response.
      +	if laddr.Port == 0 {
      +		var p struct {
      +			Port uint32
      +		}
      +		if err := Unmarshal(resp, &p); err != nil {
      +			return nil, err
      +		}
      +		laddr.Port = int(p.Port)
      +	}
      +
      +	// Register this forward, using the port number we obtained.
      +	ch := c.forwards.add(*laddr)
      +
      +	return &tcpListener{laddr, c, ch}, nil
      +}
      +
      +// forwardList stores a mapping between remote
      +// forward requests and the tcpListeners.
      +type forwardList struct {
      +	sync.Mutex
      +	entries []forwardEntry
      +}
      +
      +// forwardEntry represents an established mapping of a laddr on a
      +// remote ssh server to a channel connected to a tcpListener.
      +type forwardEntry struct {
      +	laddr net.TCPAddr
      +	c     chan forward
      +}
      +
      +// forward represents an incoming forwarded tcpip connection. The
      +// arguments to add/remove/lookup should be address as specified in
      +// the original forward-request.
      +type forward struct {
      +	newCh NewChannel   // the ssh client channel underlying this forward
      +	raddr *net.TCPAddr // the raddr of the incoming connection
      +}
      +
      +func (l *forwardList) add(addr net.TCPAddr) chan forward {
      +	l.Lock()
      +	defer l.Unlock()
      +	f := forwardEntry{
      +		addr,
      +		make(chan forward, 1),
      +	}
      +	l.entries = append(l.entries, f)
      +	return f.c
      +}
      +
      +// See RFC 4254, section 7.2
      +type forwardedTCPPayload struct {
      +	Addr       string
      +	Port       uint32
      +	OriginAddr string
      +	OriginPort uint32
      +}
      +
      +// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr.
      +func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) {
      +	if port == 0 || port > 65535 {
      +		return nil, fmt.Errorf("ssh: port number out of range: %d", port)
      +	}
      +	ip := net.ParseIP(string(addr))
      +	if ip == nil {
      +		return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr)
      +	}
      +	return &net.TCPAddr{IP: ip, Port: int(port)}, nil
      +}
      +
      +func (l *forwardList) handleChannels(in <-chan NewChannel) {
      +	for ch := range in {
      +		var payload forwardedTCPPayload
      +		if err := Unmarshal(ch.ExtraData(), &payload); err != nil {
      +			ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error())
      +			continue
      +		}
      +
      +		// RFC 4254 section 7.2 specifies that incoming
      +		// addresses should list the address, in string
      +		// format. It is implied that this should be an IP
      +		// address, as it would be impossible to connect to it
      +		// otherwise.
      +		laddr, err := parseTCPAddr(payload.Addr, payload.Port)
      +		if err != nil {
      +			ch.Reject(ConnectionFailed, err.Error())
      +			continue
      +		}
      +		raddr, err := parseTCPAddr(payload.OriginAddr, payload.OriginPort)
      +		if err != nil {
      +			ch.Reject(ConnectionFailed, err.Error())
      +			continue
      +		}
      +
      +		if ok := l.forward(*laddr, *raddr, ch); !ok {
      +			// Section 7.2, implementations MUST reject spurious incoming
      +			// connections.
      +			ch.Reject(Prohibited, "no forward for address")
      +			continue
      +		}
      +	}
      +}
      +
      +// remove removes the forward entry, and the channel feeding its
      +// listener.
      +func (l *forwardList) remove(addr net.TCPAddr) {
      +	l.Lock()
      +	defer l.Unlock()
      +	for i, f := range l.entries {
      +		if addr.IP.Equal(f.laddr.IP) && addr.Port == f.laddr.Port {
      +			l.entries = append(l.entries[:i], l.entries[i+1:]...)
      +			close(f.c)
      +			return
      +		}
      +	}
      +}
      +
      +// closeAll closes and clears all forwards.
      +func (l *forwardList) closeAll() {
      +	l.Lock()
      +	defer l.Unlock()
      +	for _, f := range l.entries {
      +		close(f.c)
      +	}
      +	l.entries = nil
      +}
      +
      +func (l *forwardList) forward(laddr, raddr net.TCPAddr, ch NewChannel) bool {
      +	l.Lock()
      +	defer l.Unlock()
      +	for _, f := range l.entries {
      +		if laddr.IP.Equal(f.laddr.IP) && laddr.Port == f.laddr.Port {
      +			f.c <- forward{ch, &raddr}
      +			return true
      +		}
      +	}
      +	return false
      +}
      +
      +type tcpListener struct {
      +	laddr *net.TCPAddr
      +
      +	conn *Client
      +	in   <-chan forward
      +}
      +
      +// Accept waits for and returns the next connection to the listener.
      +func (l *tcpListener) Accept() (net.Conn, error) {
      +	s, ok := <-l.in
      +	if !ok {
      +		return nil, io.EOF
      +	}
      +	ch, incoming, err := s.newCh.Accept()
      +	if err != nil {
      +		return nil, err
      +	}
      +	go DiscardRequests(incoming)
      +
      +	return &tcpChanConn{
      +		Channel: ch,
      +		laddr:   l.laddr,
      +		raddr:   s.raddr,
      +	}, nil
      +}
      +
      +// Close closes the listener.
      +func (l *tcpListener) Close() error {
      +	m := channelForwardMsg{
      +		l.laddr.IP.String(),
      +		uint32(l.laddr.Port),
      +	}
      +
      +	// this also closes the listener.
      +	l.conn.forwards.remove(*l.laddr)
      +	ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m))
      +	if err == nil && !ok {
      +		err = errors.New("ssh: cancel-tcpip-forward failed")
      +	}
      +	return err
      +}
      +
      +// Addr returns the listener's network address.
      +func (l *tcpListener) Addr() net.Addr {
      +	return l.laddr
      +}
      +
      +// Dial initiates a connection to the addr from the remote host.
      +// The resulting connection has a zero LocalAddr() and RemoteAddr().
      +func (c *Client) Dial(n, addr string) (net.Conn, error) {
      +	// Parse the address into host and numeric port.
      +	host, portString, err := net.SplitHostPort(addr)
      +	if err != nil {
      +		return nil, err
      +	}
      +	port, err := strconv.ParseUint(portString, 10, 16)
      +	if err != nil {
      +		return nil, err
      +	}
      +	// Use a zero address for local and remote address.
      +	zeroAddr := &net.TCPAddr{
      +		IP:   net.IPv4zero,
      +		Port: 0,
      +	}
      +	ch, err := c.dial(net.IPv4zero.String(), 0, host, int(port))
      +	if err != nil {
      +		return nil, err
      +	}
      +	return &tcpChanConn{
      +		Channel: ch,
      +		laddr:   zeroAddr,
      +		raddr:   zeroAddr,
      +	}, nil
      +}
      +
      +// DialTCP connects to the remote address raddr on the network net,
      +// which must be "tcp", "tcp4", or "tcp6".  If laddr is not nil, it is used
      +// as the local address for the connection.
      +func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) {
      +	if laddr == nil {
      +		laddr = &net.TCPAddr{
      +			IP:   net.IPv4zero,
      +			Port: 0,
      +		}
      +	}
      +	ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port)
      +	if err != nil {
      +		return nil, err
      +	}
      +	return &tcpChanConn{
      +		Channel: ch,
      +		laddr:   laddr,
      +		raddr:   raddr,
      +	}, nil
      +}
      +
      +// RFC 4254 7.2
      +type channelOpenDirectMsg struct {
      +	raddr string
      +	rport uint32
      +	laddr string
      +	lport uint32
      +}
      +
      +func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) {
      +	msg := channelOpenDirectMsg{
      +		raddr: raddr,
      +		rport: uint32(rport),
      +		laddr: laddr,
      +		lport: uint32(lport),
      +	}
      +	ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg))
      +	if err != nil {
      +		return nil, err
      +	}
      +	go DiscardRequests(in)
      +	return ch, err
      +}
      +
      +type tcpChan struct {
      +	Channel // the backing channel
      +}
      +
      +// tcpChanConn fulfills the net.Conn interface without
      +// the tcpChan having to hold laddr or raddr directly.
      +type tcpChanConn struct {
      +	Channel
      +	laddr, raddr net.Addr
      +}
      +
      +// LocalAddr returns the local network address.
      +func (t *tcpChanConn) LocalAddr() net.Addr {
      +	return t.laddr
      +}
      +
      +// RemoteAddr returns the remote network address.
      +func (t *tcpChanConn) RemoteAddr() net.Addr {
      +	return t.raddr
      +}
      +
      +// SetDeadline sets the read and write deadlines associated
      +// with the connection.
      +func (t *tcpChanConn) SetDeadline(deadline time.Time) error {
      +	if err := t.SetReadDeadline(deadline); err != nil {
      +		return err
      +	}
      +	return t.SetWriteDeadline(deadline)
      +}
      +
      +// SetReadDeadline sets the read deadline.
      +// A zero value for t means Read will not time out.
      +// After the deadline, the error from Read will implement net.Error
      +// with Timeout() == true.
      +func (t *tcpChanConn) SetReadDeadline(deadline time.Time) error {
      +	return errors.New("ssh: tcpChan: deadline not supported")
      +}
      +
      +// SetWriteDeadline exists to satisfy the net.Conn interface
      +// but is not implemented by this type.  It always returns an error.
      +func (t *tcpChanConn) SetWriteDeadline(deadline time.Time) error {
      +	return errors.New("ssh: tcpChan: deadline not supported")
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/tcpip_test.go b/vendor/golang.org/x/crypto/ssh/tcpip_test.go
      new file mode 100644
      index 00000000..f1265cb4
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/tcpip_test.go
      @@ -0,0 +1,20 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"testing"
      +)
      +
      +func TestAutoPortListenBroken(t *testing.T) {
      +	broken := "SSH-2.0-OpenSSH_5.9hh11"
      +	works := "SSH-2.0-OpenSSH_6.1"
      +	if !isBrokenOpenSSHVersion(broken) {
      +		t.Errorf("version %q not marked as broken", broken)
      +	}
      +	if isBrokenOpenSSHVersion(works) {
      +		t.Errorf("version %q marked as broken", works)
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
      new file mode 100644
      index 00000000..741eeb13
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
      @@ -0,0 +1,892 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package terminal
      +
      +import (
      +	"bytes"
      +	"io"
      +	"sync"
      +	"unicode/utf8"
      +)
      +
      +// EscapeCodes contains escape sequences that can be written to the terminal in
      +// order to achieve different styles of text.
      +type EscapeCodes struct {
      +	// Foreground colors
      +	Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte
      +
      +	// Reset all attributes
      +	Reset []byte
      +}
      +
      +var vt100EscapeCodes = EscapeCodes{
      +	Black:   []byte{keyEscape, '[', '3', '0', 'm'},
      +	Red:     []byte{keyEscape, '[', '3', '1', 'm'},
      +	Green:   []byte{keyEscape, '[', '3', '2', 'm'},
      +	Yellow:  []byte{keyEscape, '[', '3', '3', 'm'},
      +	Blue:    []byte{keyEscape, '[', '3', '4', 'm'},
      +	Magenta: []byte{keyEscape, '[', '3', '5', 'm'},
      +	Cyan:    []byte{keyEscape, '[', '3', '6', 'm'},
      +	White:   []byte{keyEscape, '[', '3', '7', 'm'},
      +
      +	Reset: []byte{keyEscape, '[', '0', 'm'},
      +}
      +
      +// Terminal contains the state for running a VT100 terminal that is capable of
      +// reading lines of input.
      +type Terminal struct {
      +	// AutoCompleteCallback, if non-null, is called for each keypress with
      +	// the full input line and the current position of the cursor (in
      +	// bytes, as an index into |line|). If it returns ok=false, the key
      +	// press is processed normally. Otherwise it returns a replacement line
      +	// and the new cursor position.
      +	AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool)
      +
      +	// Escape contains a pointer to the escape codes for this terminal.
      +	// It's always a valid pointer, although the escape codes themselves
      +	// may be empty if the terminal doesn't support them.
      +	Escape *EscapeCodes
      +
      +	// lock protects the terminal and the state in this object from
      +	// concurrent processing of a key press and a Write() call.
      +	lock sync.Mutex
      +
      +	c      io.ReadWriter
      +	prompt []rune
      +
      +	// line is the current line being entered.
      +	line []rune
      +	// pos is the logical position of the cursor in line
      +	pos int
      +	// echo is true if local echo is enabled
      +	echo bool
      +	// pasteActive is true iff there is a bracketed paste operation in
      +	// progress.
      +	pasteActive bool
      +
      +	// cursorX contains the current X value of the cursor where the left
      +	// edge is 0. cursorY contains the row number where the first row of
      +	// the current line is 0.
      +	cursorX, cursorY int
      +	// maxLine is the greatest value of cursorY so far.
      +	maxLine int
      +
      +	termWidth, termHeight int
      +
      +	// outBuf contains the terminal data to be sent.
      +	outBuf []byte
      +	// remainder contains the remainder of any partial key sequences after
      +	// a read. It aliases into inBuf.
      +	remainder []byte
      +	inBuf     [256]byte
      +
      +	// history contains previously entered commands so that they can be
      +	// accessed with the up and down keys.
      +	history stRingBuffer
      +	// historyIndex stores the currently accessed history entry, where zero
      +	// means the immediately previous entry.
      +	historyIndex int
      +	// When navigating up and down the history it's possible to return to
      +	// the incomplete, initial line. That value is stored in
      +	// historyPending.
      +	historyPending string
      +}
      +
      +// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is
      +// a local terminal, that terminal must first have been put into raw mode.
      +// prompt is a string that is written at the start of each input line (i.e.
      +// "> ").
      +func NewTerminal(c io.ReadWriter, prompt string) *Terminal {
      +	return &Terminal{
      +		Escape:       &vt100EscapeCodes,
      +		c:            c,
      +		prompt:       []rune(prompt),
      +		termWidth:    80,
      +		termHeight:   24,
      +		echo:         true,
      +		historyIndex: -1,
      +	}
      +}
      +
      +const (
      +	keyCtrlD     = 4
      +	keyCtrlU     = 21
      +	keyEnter     = '\r'
      +	keyEscape    = 27
      +	keyBackspace = 127
      +	keyUnknown   = 0xd800 /* UTF-16 surrogate area */ + iota
      +	keyUp
      +	keyDown
      +	keyLeft
      +	keyRight
      +	keyAltLeft
      +	keyAltRight
      +	keyHome
      +	keyEnd
      +	keyDeleteWord
      +	keyDeleteLine
      +	keyClearScreen
      +	keyPasteStart
      +	keyPasteEnd
      +)
      +
      +var pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'}
      +var pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'}
      +
      +// bytesToKey tries to parse a key sequence from b. If successful, it returns
      +// the key and the remainder of the input. Otherwise it returns utf8.RuneError.
      +func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
      +	if len(b) == 0 {
      +		return utf8.RuneError, nil
      +	}
      +
      +	if !pasteActive {
      +		switch b[0] {
      +		case 1: // ^A
      +			return keyHome, b[1:]
      +		case 5: // ^E
      +			return keyEnd, b[1:]
      +		case 8: // ^H
      +			return keyBackspace, b[1:]
      +		case 11: // ^K
      +			return keyDeleteLine, b[1:]
      +		case 12: // ^L
      +			return keyClearScreen, b[1:]
      +		case 23: // ^W
      +			return keyDeleteWord, b[1:]
      +		}
      +	}
      +
      +	if b[0] != keyEscape {
      +		if !utf8.FullRune(b) {
      +			return utf8.RuneError, b
      +		}
      +		r, l := utf8.DecodeRune(b)
      +		return r, b[l:]
      +	}
      +
      +	if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' {
      +		switch b[2] {
      +		case 'A':
      +			return keyUp, b[3:]
      +		case 'B':
      +			return keyDown, b[3:]
      +		case 'C':
      +			return keyRight, b[3:]
      +		case 'D':
      +			return keyLeft, b[3:]
      +		case 'H':
      +			return keyHome, b[3:]
      +		case 'F':
      +			return keyEnd, b[3:]
      +		}
      +	}
      +
      +	if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' {
      +		switch b[5] {
      +		case 'C':
      +			return keyAltRight, b[6:]
      +		case 'D':
      +			return keyAltLeft, b[6:]
      +		}
      +	}
      +
      +	if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) {
      +		return keyPasteStart, b[6:]
      +	}
      +
      +	if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) {
      +		return keyPasteEnd, b[6:]
      +	}
      +
      +	// If we get here then we have a key that we don't recognise, or a
      +	// partial sequence. It's not clear how one should find the end of a
      +	// sequence without knowing them all, but it seems that [a-zA-Z~] only
      +	// appears at the end of a sequence.
      +	for i, c := range b[0:] {
      +		if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' {
      +			return keyUnknown, b[i+1:]
      +		}
      +	}
      +
      +	return utf8.RuneError, b
      +}
      +
      +// queue appends data to the end of t.outBuf
      +func (t *Terminal) queue(data []rune) {
      +	t.outBuf = append(t.outBuf, []byte(string(data))...)
      +}
      +
      +var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'}
      +var space = []rune{' '}
      +
      +func isPrintable(key rune) bool {
      +	isInSurrogateArea := key >= 0xd800 && key <= 0xdbff
      +	return key >= 32 && !isInSurrogateArea
      +}
      +
      +// moveCursorToPos appends data to t.outBuf which will move the cursor to the
      +// given, logical position in the text.
      +func (t *Terminal) moveCursorToPos(pos int) {
      +	if !t.echo {
      +		return
      +	}
      +
      +	x := visualLength(t.prompt) + pos
      +	y := x / t.termWidth
      +	x = x % t.termWidth
      +
      +	up := 0
      +	if y < t.cursorY {
      +		up = t.cursorY - y
      +	}
      +
      +	down := 0
      +	if y > t.cursorY {
      +		down = y - t.cursorY
      +	}
      +
      +	left := 0
      +	if x < t.cursorX {
      +		left = t.cursorX - x
      +	}
      +
      +	right := 0
      +	if x > t.cursorX {
      +		right = x - t.cursorX
      +	}
      +
      +	t.cursorX = x
      +	t.cursorY = y
      +	t.move(up, down, left, right)
      +}
      +
      +func (t *Terminal) move(up, down, left, right int) {
      +	movement := make([]rune, 3*(up+down+left+right))
      +	m := movement
      +	for i := 0; i < up; i++ {
      +		m[0] = keyEscape
      +		m[1] = '['
      +		m[2] = 'A'
      +		m = m[3:]
      +	}
      +	for i := 0; i < down; i++ {
      +		m[0] = keyEscape
      +		m[1] = '['
      +		m[2] = 'B'
      +		m = m[3:]
      +	}
      +	for i := 0; i < left; i++ {
      +		m[0] = keyEscape
      +		m[1] = '['
      +		m[2] = 'D'
      +		m = m[3:]
      +	}
      +	for i := 0; i < right; i++ {
      +		m[0] = keyEscape
      +		m[1] = '['
      +		m[2] = 'C'
      +		m = m[3:]
      +	}
      +
      +	t.queue(movement)
      +}
      +
      +func (t *Terminal) clearLineToRight() {
      +	op := []rune{keyEscape, '[', 'K'}
      +	t.queue(op)
      +}
      +
      +const maxLineLength = 4096
      +
      +func (t *Terminal) setLine(newLine []rune, newPos int) {
      +	if t.echo {
      +		t.moveCursorToPos(0)
      +		t.writeLine(newLine)
      +		for i := len(newLine); i < len(t.line); i++ {
      +			t.writeLine(space)
      +		}
      +		t.moveCursorToPos(newPos)
      +	}
      +	t.line = newLine
      +	t.pos = newPos
      +}
      +
      +func (t *Terminal) advanceCursor(places int) {
      +	t.cursorX += places
      +	t.cursorY += t.cursorX / t.termWidth
      +	if t.cursorY > t.maxLine {
      +		t.maxLine = t.cursorY
      +	}
      +	t.cursorX = t.cursorX % t.termWidth
      +
      +	if places > 0 && t.cursorX == 0 {
      +		// Normally terminals will advance the current position
      +		// when writing a character. But that doesn't happen
      +		// for the last character in a line. However, when
      +		// writing a character (except a new line) that causes
      +		// a line wrap, the position will be advanced two
      +		// places.
      +		//
      +		// So, if we are stopping at the end of a line, we
      +		// need to write a newline so that our cursor can be
      +		// advanced to the next line.
      +		t.outBuf = append(t.outBuf, '\n')
      +	}
      +}
      +
      +func (t *Terminal) eraseNPreviousChars(n int) {
      +	if n == 0 {
      +		return
      +	}
      +
      +	if t.pos < n {
      +		n = t.pos
      +	}
      +	t.pos -= n
      +	t.moveCursorToPos(t.pos)
      +
      +	copy(t.line[t.pos:], t.line[n+t.pos:])
      +	t.line = t.line[:len(t.line)-n]
      +	if t.echo {
      +		t.writeLine(t.line[t.pos:])
      +		for i := 0; i < n; i++ {
      +			t.queue(space)
      +		}
      +		t.advanceCursor(n)
      +		t.moveCursorToPos(t.pos)
      +	}
      +}
      +
      +// countToLeftWord returns then number of characters from the cursor to the
      +// start of the previous word.
      +func (t *Terminal) countToLeftWord() int {
      +	if t.pos == 0 {
      +		return 0
      +	}
      +
      +	pos := t.pos - 1
      +	for pos > 0 {
      +		if t.line[pos] != ' ' {
      +			break
      +		}
      +		pos--
      +	}
      +	for pos > 0 {
      +		if t.line[pos] == ' ' {
      +			pos++
      +			break
      +		}
      +		pos--
      +	}
      +
      +	return t.pos - pos
      +}
      +
      +// countToRightWord returns then number of characters from the cursor to the
      +// start of the next word.
      +func (t *Terminal) countToRightWord() int {
      +	pos := t.pos
      +	for pos < len(t.line) {
      +		if t.line[pos] == ' ' {
      +			break
      +		}
      +		pos++
      +	}
      +	for pos < len(t.line) {
      +		if t.line[pos] != ' ' {
      +			break
      +		}
      +		pos++
      +	}
      +	return pos - t.pos
      +}
      +
      +// visualLength returns the number of visible glyphs in s.
      +func visualLength(runes []rune) int {
      +	inEscapeSeq := false
      +	length := 0
      +
      +	for _, r := range runes {
      +		switch {
      +		case inEscapeSeq:
      +			if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') {
      +				inEscapeSeq = false
      +			}
      +		case r == '\x1b':
      +			inEscapeSeq = true
      +		default:
      +			length++
      +		}
      +	}
      +
      +	return length
      +}
      +
      +// handleKey processes the given key and, optionally, returns a line of text
      +// that the user has entered.
      +func (t *Terminal) handleKey(key rune) (line string, ok bool) {
      +	if t.pasteActive && key != keyEnter {
      +		t.addKeyToLine(key)
      +		return
      +	}
      +
      +	switch key {
      +	case keyBackspace:
      +		if t.pos == 0 {
      +			return
      +		}
      +		t.eraseNPreviousChars(1)
      +	case keyAltLeft:
      +		// move left by a word.
      +		t.pos -= t.countToLeftWord()
      +		t.moveCursorToPos(t.pos)
      +	case keyAltRight:
      +		// move right by a word.
      +		t.pos += t.countToRightWord()
      +		t.moveCursorToPos(t.pos)
      +	case keyLeft:
      +		if t.pos == 0 {
      +			return
      +		}
      +		t.pos--
      +		t.moveCursorToPos(t.pos)
      +	case keyRight:
      +		if t.pos == len(t.line) {
      +			return
      +		}
      +		t.pos++
      +		t.moveCursorToPos(t.pos)
      +	case keyHome:
      +		if t.pos == 0 {
      +			return
      +		}
      +		t.pos = 0
      +		t.moveCursorToPos(t.pos)
      +	case keyEnd:
      +		if t.pos == len(t.line) {
      +			return
      +		}
      +		t.pos = len(t.line)
      +		t.moveCursorToPos(t.pos)
      +	case keyUp:
      +		entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1)
      +		if !ok {
      +			return "", false
      +		}
      +		if t.historyIndex == -1 {
      +			t.historyPending = string(t.line)
      +		}
      +		t.historyIndex++
      +		runes := []rune(entry)
      +		t.setLine(runes, len(runes))
      +	case keyDown:
      +		switch t.historyIndex {
      +		case -1:
      +			return
      +		case 0:
      +			runes := []rune(t.historyPending)
      +			t.setLine(runes, len(runes))
      +			t.historyIndex--
      +		default:
      +			entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1)
      +			if ok {
      +				t.historyIndex--
      +				runes := []rune(entry)
      +				t.setLine(runes, len(runes))
      +			}
      +		}
      +	case keyEnter:
      +		t.moveCursorToPos(len(t.line))
      +		t.queue([]rune("\r\n"))
      +		line = string(t.line)
      +		ok = true
      +		t.line = t.line[:0]
      +		t.pos = 0
      +		t.cursorX = 0
      +		t.cursorY = 0
      +		t.maxLine = 0
      +	case keyDeleteWord:
      +		// Delete zero or more spaces and then one or more characters.
      +		t.eraseNPreviousChars(t.countToLeftWord())
      +	case keyDeleteLine:
      +		// Delete everything from the current cursor position to the
      +		// end of line.
      +		for i := t.pos; i < len(t.line); i++ {
      +			t.queue(space)
      +			t.advanceCursor(1)
      +		}
      +		t.line = t.line[:t.pos]
      +		t.moveCursorToPos(t.pos)
      +	case keyCtrlD:
      +		// Erase the character under the current position.
      +		// The EOF case when the line is empty is handled in
      +		// readLine().
      +		if t.pos < len(t.line) {
      +			t.pos++
      +			t.eraseNPreviousChars(1)
      +		}
      +	case keyCtrlU:
      +		t.eraseNPreviousChars(t.pos)
      +	case keyClearScreen:
      +		// Erases the screen and moves the cursor to the home position.
      +		t.queue([]rune("\x1b[2J\x1b[H"))
      +		t.queue(t.prompt)
      +		t.cursorX, t.cursorY = 0, 0
      +		t.advanceCursor(visualLength(t.prompt))
      +		t.setLine(t.line, t.pos)
      +	default:
      +		if t.AutoCompleteCallback != nil {
      +			prefix := string(t.line[:t.pos])
      +			suffix := string(t.line[t.pos:])
      +
      +			t.lock.Unlock()
      +			newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key)
      +			t.lock.Lock()
      +
      +			if completeOk {
      +				t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos]))
      +				return
      +			}
      +		}
      +		if !isPrintable(key) {
      +			return
      +		}
      +		if len(t.line) == maxLineLength {
      +			return
      +		}
      +		t.addKeyToLine(key)
      +	}
      +	return
      +}
      +
      +// addKeyToLine inserts the given key at the current position in the current
      +// line.
      +func (t *Terminal) addKeyToLine(key rune) {
      +	if len(t.line) == cap(t.line) {
      +		newLine := make([]rune, len(t.line), 2*(1+len(t.line)))
      +		copy(newLine, t.line)
      +		t.line = newLine
      +	}
      +	t.line = t.line[:len(t.line)+1]
      +	copy(t.line[t.pos+1:], t.line[t.pos:])
      +	t.line[t.pos] = key
      +	if t.echo {
      +		t.writeLine(t.line[t.pos:])
      +	}
      +	t.pos++
      +	t.moveCursorToPos(t.pos)
      +}
      +
      +func (t *Terminal) writeLine(line []rune) {
      +	for len(line) != 0 {
      +		remainingOnLine := t.termWidth - t.cursorX
      +		todo := len(line)
      +		if todo > remainingOnLine {
      +			todo = remainingOnLine
      +		}
      +		t.queue(line[:todo])
      +		t.advanceCursor(visualLength(line[:todo]))
      +		line = line[todo:]
      +	}
      +}
      +
      +func (t *Terminal) Write(buf []byte) (n int, err error) {
      +	t.lock.Lock()
      +	defer t.lock.Unlock()
      +
      +	if t.cursorX == 0 && t.cursorY == 0 {
      +		// This is the easy case: there's nothing on the screen that we
      +		// have to move out of the way.
      +		return t.c.Write(buf)
      +	}
      +
      +	// We have a prompt and possibly user input on the screen. We
      +	// have to clear it first.
      +	t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */)
      +	t.cursorX = 0
      +	t.clearLineToRight()
      +
      +	for t.cursorY > 0 {
      +		t.move(1 /* up */, 0, 0, 0)
      +		t.cursorY--
      +		t.clearLineToRight()
      +	}
      +
      +	if _, err = t.c.Write(t.outBuf); err != nil {
      +		return
      +	}
      +	t.outBuf = t.outBuf[:0]
      +
      +	if n, err = t.c.Write(buf); err != nil {
      +		return
      +	}
      +
      +	t.writeLine(t.prompt)
      +	if t.echo {
      +		t.writeLine(t.line)
      +	}
      +
      +	t.moveCursorToPos(t.pos)
      +
      +	if _, err = t.c.Write(t.outBuf); err != nil {
      +		return
      +	}
      +	t.outBuf = t.outBuf[:0]
      +	return
      +}
      +
      +// ReadPassword temporarily changes the prompt and reads a password, without
      +// echo, from the terminal.
      +func (t *Terminal) ReadPassword(prompt string) (line string, err error) {
      +	t.lock.Lock()
      +	defer t.lock.Unlock()
      +
      +	oldPrompt := t.prompt
      +	t.prompt = []rune(prompt)
      +	t.echo = false
      +
      +	line, err = t.readLine()
      +
      +	t.prompt = oldPrompt
      +	t.echo = true
      +
      +	return
      +}
      +
      +// ReadLine returns a line of input from the terminal.
      +func (t *Terminal) ReadLine() (line string, err error) {
      +	t.lock.Lock()
      +	defer t.lock.Unlock()
      +
      +	return t.readLine()
      +}
      +
      +func (t *Terminal) readLine() (line string, err error) {
      +	// t.lock must be held at this point
      +
      +	if t.cursorX == 0 && t.cursorY == 0 {
      +		t.writeLine(t.prompt)
      +		t.c.Write(t.outBuf)
      +		t.outBuf = t.outBuf[:0]
      +	}
      +
      +	lineIsPasted := t.pasteActive
      +
      +	for {
      +		rest := t.remainder
      +		lineOk := false
      +		for !lineOk {
      +			var key rune
      +			key, rest = bytesToKey(rest, t.pasteActive)
      +			if key == utf8.RuneError {
      +				break
      +			}
      +			if !t.pasteActive {
      +				if key == keyCtrlD {
      +					if len(t.line) == 0 {
      +						return "", io.EOF
      +					}
      +				}
      +				if key == keyPasteStart {
      +					t.pasteActive = true
      +					if len(t.line) == 0 {
      +						lineIsPasted = true
      +					}
      +					continue
      +				}
      +			} else if key == keyPasteEnd {
      +				t.pasteActive = false
      +				continue
      +			}
      +			if !t.pasteActive {
      +				lineIsPasted = false
      +			}
      +			line, lineOk = t.handleKey(key)
      +		}
      +		if len(rest) > 0 {
      +			n := copy(t.inBuf[:], rest)
      +			t.remainder = t.inBuf[:n]
      +		} else {
      +			t.remainder = nil
      +		}
      +		t.c.Write(t.outBuf)
      +		t.outBuf = t.outBuf[:0]
      +		if lineOk {
      +			if t.echo {
      +				t.historyIndex = -1
      +				t.history.Add(line)
      +			}
      +			if lineIsPasted {
      +				err = ErrPasteIndicator
      +			}
      +			return
      +		}
      +
      +		// t.remainder is a slice at the beginning of t.inBuf
      +		// containing a partial key sequence
      +		readBuf := t.inBuf[len(t.remainder):]
      +		var n int
      +
      +		t.lock.Unlock()
      +		n, err = t.c.Read(readBuf)
      +		t.lock.Lock()
      +
      +		if err != nil {
      +			return
      +		}
      +
      +		t.remainder = t.inBuf[:n+len(t.remainder)]
      +	}
      +
      +	panic("unreachable") // for Go 1.0.
      +}
      +
      +// SetPrompt sets the prompt to be used when reading subsequent lines.
      +func (t *Terminal) SetPrompt(prompt string) {
      +	t.lock.Lock()
      +	defer t.lock.Unlock()
      +
      +	t.prompt = []rune(prompt)
      +}
      +
      +func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) {
      +	// Move cursor to column zero at the start of the line.
      +	t.move(t.cursorY, 0, t.cursorX, 0)
      +	t.cursorX, t.cursorY = 0, 0
      +	t.clearLineToRight()
      +	for t.cursorY < numPrevLines {
      +		// Move down a line
      +		t.move(0, 1, 0, 0)
      +		t.cursorY++
      +		t.clearLineToRight()
      +	}
      +	// Move back to beginning.
      +	t.move(t.cursorY, 0, 0, 0)
      +	t.cursorX, t.cursorY = 0, 0
      +
      +	t.queue(t.prompt)
      +	t.advanceCursor(visualLength(t.prompt))
      +	t.writeLine(t.line)
      +	t.moveCursorToPos(t.pos)
      +}
      +
      +func (t *Terminal) SetSize(width, height int) error {
      +	t.lock.Lock()
      +	defer t.lock.Unlock()
      +
      +	if width == 0 {
      +		width = 1
      +	}
      +
      +	oldWidth := t.termWidth
      +	t.termWidth, t.termHeight = width, height
      +
      +	switch {
      +	case width == oldWidth:
      +		// If the width didn't change then nothing else needs to be
      +		// done.
      +		return nil
      +	case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0:
      +		// If there is nothing on current line and no prompt printed,
      +		// just do nothing
      +		return nil
      +	case width < oldWidth:
      +		// Some terminals (e.g. xterm) will truncate lines that were
      +		// too long when shinking. Others, (e.g. gnome-terminal) will
      +		// attempt to wrap them. For the former, repainting t.maxLine
      +		// works great, but that behaviour goes badly wrong in the case
      +		// of the latter because they have doubled every full line.
      +
      +		// We assume that we are working on a terminal that wraps lines
      +		// and adjust the cursor position based on every previous line
      +		// wrapping and turning into two. This causes the prompt on
      +		// xterms to move upwards, which isn't great, but it avoids a
      +		// huge mess with gnome-terminal.
      +		if t.cursorX >= t.termWidth {
      +			t.cursorX = t.termWidth - 1
      +		}
      +		t.cursorY *= 2
      +		t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2)
      +	case width > oldWidth:
      +		// If the terminal expands then our position calculations will
      +		// be wrong in the future because we think the cursor is
      +		// |t.pos| chars into the string, but there will be a gap at
      +		// the end of any wrapped line.
      +		//
      +		// But the position will actually be correct until we move, so
      +		// we can move back to the beginning and repaint everything.
      +		t.clearAndRepaintLinePlusNPrevious(t.maxLine)
      +	}
      +
      +	_, err := t.c.Write(t.outBuf)
      +	t.outBuf = t.outBuf[:0]
      +	return err
      +}
      +
      +type pasteIndicatorError struct{}
      +
      +func (pasteIndicatorError) Error() string {
      +	return "terminal: ErrPasteIndicator not correctly handled"
      +}
      +
      +// ErrPasteIndicator may be returned from ReadLine as the error, in addition
      +// to valid line data. It indicates that bracketed paste mode is enabled and
      +// that the returned line consists only of pasted data. Programs may wish to
      +// interpret pasted data more literally than typed data.
      +var ErrPasteIndicator = pasteIndicatorError{}
      +
      +// SetBracketedPasteMode requests that the terminal bracket paste operations
      +// with markers. Not all terminals support this but, if it is supported, then
      +// enabling this mode will stop any autocomplete callback from running due to
      +// pastes. Additionally, any lines that are completely pasted will be returned
      +// from ReadLine with the error set to ErrPasteIndicator.
      +func (t *Terminal) SetBracketedPasteMode(on bool) {
      +	if on {
      +		io.WriteString(t.c, "\x1b[?2004h")
      +	} else {
      +		io.WriteString(t.c, "\x1b[?2004l")
      +	}
      +}
      +
      +// stRingBuffer is a ring buffer of strings.
      +type stRingBuffer struct {
      +	// entries contains max elements.
      +	entries []string
      +	max     int
      +	// head contains the index of the element most recently added to the ring.
      +	head int
      +	// size contains the number of elements in the ring.
      +	size int
      +}
      +
      +func (s *stRingBuffer) Add(a string) {
      +	if s.entries == nil {
      +		const defaultNumEntries = 100
      +		s.entries = make([]string, defaultNumEntries)
      +		s.max = defaultNumEntries
      +	}
      +
      +	s.head = (s.head + 1) % s.max
      +	s.entries[s.head] = a
      +	if s.size < s.max {
      +		s.size++
      +	}
      +}
      +
      +// NthPreviousEntry returns the value passed to the nth previous call to Add.
      +// If n is zero then the immediately prior value is returned, if one, then the
      +// next most recent, and so on. If such an element doesn't exist then ok is
      +// false.
      +func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {
      +	if n >= s.size {
      +		return "", false
      +	}
      +	index := s.head - n
      +	if index < 0 {
      +		index += s.max
      +	}
      +	return s.entries[index], true
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go
      new file mode 100644
      index 00000000..a663fe41
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go
      @@ -0,0 +1,269 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package terminal
      +
      +import (
      +	"io"
      +	"testing"
      +)
      +
      +type MockTerminal struct {
      +	toSend       []byte
      +	bytesPerRead int
      +	received     []byte
      +}
      +
      +func (c *MockTerminal) Read(data []byte) (n int, err error) {
      +	n = len(data)
      +	if n == 0 {
      +		return
      +	}
      +	if n > len(c.toSend) {
      +		n = len(c.toSend)
      +	}
      +	if n == 0 {
      +		return 0, io.EOF
      +	}
      +	if c.bytesPerRead > 0 && n > c.bytesPerRead {
      +		n = c.bytesPerRead
      +	}
      +	copy(data, c.toSend[:n])
      +	c.toSend = c.toSend[n:]
      +	return
      +}
      +
      +func (c *MockTerminal) Write(data []byte) (n int, err error) {
      +	c.received = append(c.received, data...)
      +	return len(data), nil
      +}
      +
      +func TestClose(t *testing.T) {
      +	c := &MockTerminal{}
      +	ss := NewTerminal(c, "> ")
      +	line, err := ss.ReadLine()
      +	if line != "" {
      +		t.Errorf("Expected empty line but got: %s", line)
      +	}
      +	if err != io.EOF {
      +		t.Errorf("Error should have been EOF but got: %s", err)
      +	}
      +}
      +
      +var keyPressTests = []struct {
      +	in             string
      +	line           string
      +	err            error
      +	throwAwayLines int
      +}{
      +	{
      +		err: io.EOF,
      +	},
      +	{
      +		in:   "\r",
      +		line: "",
      +	},
      +	{
      +		in:   "foo\r",
      +		line: "foo",
      +	},
      +	{
      +		in:   "a\x1b[Cb\r", // right
      +		line: "ab",
      +	},
      +	{
      +		in:   "a\x1b[Db\r", // left
      +		line: "ba",
      +	},
      +	{
      +		in:   "a\177b\r", // backspace
      +		line: "b",
      +	},
      +	{
      +		in: "\x1b[A\r", // up
      +	},
      +	{
      +		in: "\x1b[B\r", // down
      +	},
      +	{
      +		in:   "line\x1b[A\x1b[B\r", // up then down
      +		line: "line",
      +	},
      +	{
      +		in:             "line1\rline2\x1b[A\r", // recall previous line.
      +		line:           "line1",
      +		throwAwayLines: 1,
      +	},
      +	{
      +		// recall two previous lines and append.
      +		in:             "line1\rline2\rline3\x1b[A\x1b[Axxx\r",
      +		line:           "line1xxx",
      +		throwAwayLines: 2,
      +	},
      +	{
      +		// Ctrl-A to move to beginning of line followed by ^K to kill
      +		// line.
      +		in:   "a b \001\013\r",
      +		line: "",
      +	},
      +	{
      +		// Ctrl-A to move to beginning of line, Ctrl-E to move to end,
      +		// finally ^K to kill nothing.
      +		in:   "a b \001\005\013\r",
      +		line: "a b ",
      +	},
      +	{
      +		in:   "\027\r",
      +		line: "",
      +	},
      +	{
      +		in:   "a\027\r",
      +		line: "",
      +	},
      +	{
      +		in:   "a \027\r",
      +		line: "",
      +	},
      +	{
      +		in:   "a b\027\r",
      +		line: "a ",
      +	},
      +	{
      +		in:   "a b \027\r",
      +		line: "a ",
      +	},
      +	{
      +		in:   "one two thr\x1b[D\027\r",
      +		line: "one two r",
      +	},
      +	{
      +		in:   "\013\r",
      +		line: "",
      +	},
      +	{
      +		in:   "a\013\r",
      +		line: "a",
      +	},
      +	{
      +		in:   "ab\x1b[D\013\r",
      +		line: "a",
      +	},
      +	{
      +		in:   "Ξεσκεπάζω\r",
      +		line: "Ξεσκεπάζω",
      +	},
      +	{
      +		in:             "£\r\x1b[A\177\r", // non-ASCII char, enter, up, backspace.
      +		line:           "",
      +		throwAwayLines: 1,
      +	},
      +	{
      +		in:             "£\r££\x1b[A\x1b[B\177\r", // non-ASCII char, enter, 2x non-ASCII, up, down, backspace, enter.
      +		line:           "£",
      +		throwAwayLines: 1,
      +	},
      +	{
      +		// Ctrl-D at the end of the line should be ignored.
      +		in:   "a\004\r",
      +		line: "a",
      +	},
      +	{
      +		// a, b, left, Ctrl-D should erase the b.
      +		in:   "ab\x1b[D\004\r",
      +		line: "a",
      +	},
      +	{
      +		// a, b, c, d, left, left, ^U should erase to the beginning of
      +		// the line.
      +		in:   "abcd\x1b[D\x1b[D\025\r",
      +		line: "cd",
      +	},
      +	{
      +		// Bracketed paste mode: control sequences should be returned
      +		// verbatim in paste mode.
      +		in:   "abc\x1b[200~de\177f\x1b[201~\177\r",
      +		line: "abcde\177",
      +	},
      +	{
      +		// Enter in bracketed paste mode should still work.
      +		in:             "abc\x1b[200~d\refg\x1b[201~h\r",
      +		line:           "efgh",
      +		throwAwayLines: 1,
      +	},
      +	{
      +		// Lines consisting entirely of pasted data should be indicated as such.
      +		in:   "\x1b[200~a\r",
      +		line: "a",
      +		err:  ErrPasteIndicator,
      +	},
      +}
      +
      +func TestKeyPresses(t *testing.T) {
      +	for i, test := range keyPressTests {
      +		for j := 1; j < len(test.in); j++ {
      +			c := &MockTerminal{
      +				toSend:       []byte(test.in),
      +				bytesPerRead: j,
      +			}
      +			ss := NewTerminal(c, "> ")
      +			for k := 0; k < test.throwAwayLines; k++ {
      +				_, err := ss.ReadLine()
      +				if err != nil {
      +					t.Errorf("Throwaway line %d from test %d resulted in error: %s", k, i, err)
      +				}
      +			}
      +			line, err := ss.ReadLine()
      +			if line != test.line {
      +				t.Errorf("Line resulting from test %d (%d bytes per read) was '%s', expected '%s'", i, j, line, test.line)
      +				break
      +			}
      +			if err != test.err {
      +				t.Errorf("Error resulting from test %d (%d bytes per read) was '%v', expected '%v'", i, j, err, test.err)
      +				break
      +			}
      +		}
      +	}
      +}
      +
      +func TestPasswordNotSaved(t *testing.T) {
      +	c := &MockTerminal{
      +		toSend:       []byte("password\r\x1b[A\r"),
      +		bytesPerRead: 1,
      +	}
      +	ss := NewTerminal(c, "> ")
      +	pw, _ := ss.ReadPassword("> ")
      +	if pw != "password" {
      +		t.Fatalf("failed to read password, got %s", pw)
      +	}
      +	line, _ := ss.ReadLine()
      +	if len(line) > 0 {
      +		t.Fatalf("password was saved in history")
      +	}
      +}
      +
      +var setSizeTests = []struct {
      +	width, height int
      +}{
      +	{40, 13},
      +	{80, 24},
      +	{132, 43},
      +}
      +
      +func TestTerminalSetSize(t *testing.T) {
      +	for _, setSize := range setSizeTests {
      +		c := &MockTerminal{
      +			toSend:       []byte("password\r\x1b[A\r"),
      +			bytesPerRead: 1,
      +		}
      +		ss := NewTerminal(c, "> ")
      +		ss.SetSize(setSize.width, setSize.height)
      +		pw, _ := ss.ReadPassword("Password: ")
      +		if pw != "password" {
      +			t.Fatalf("failed to read password, got %s", pw)
      +		}
      +		if string(c.received) != "Password: \r\n" {
      +			t.Errorf("failed to set the temporary prompt expected %q, got %q", "Password: ", c.received)
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go
      new file mode 100644
      index 00000000..598e3df7
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go
      @@ -0,0 +1,128 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd
      +
      +// Package terminal provides support functions for dealing with terminals, as
      +// commonly found on UNIX systems.
      +//
      +// Putting a terminal into raw mode is the most common requirement:
      +//
      +// 	oldState, err := terminal.MakeRaw(0)
      +// 	if err != nil {
      +// 	        panic(err)
      +// 	}
      +// 	defer terminal.Restore(0, oldState)
      +package terminal // import "golang.org/x/crypto/ssh/terminal"
      +
      +import (
      +	"io"
      +	"syscall"
      +	"unsafe"
      +)
      +
      +// State contains the state of a terminal.
      +type State struct {
      +	termios syscall.Termios
      +}
      +
      +// IsTerminal returns true if the given file descriptor is a terminal.
      +func IsTerminal(fd int) bool {
      +	var termios syscall.Termios
      +	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
      +	return err == 0
      +}
      +
      +// MakeRaw put the terminal connected to the given file descriptor into raw
      +// mode and returns the previous state of the terminal so that it can be
      +// restored.
      +func MakeRaw(fd int) (*State, error) {
      +	var oldState State
      +	if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
      +		return nil, err
      +	}
      +
      +	newState := oldState.termios
      +	newState.Iflag &^= syscall.ISTRIP | syscall.INLCR | syscall.ICRNL | syscall.IGNCR | syscall.IXON | syscall.IXOFF
      +	newState.Lflag &^= syscall.ECHO | syscall.ICANON | syscall.ISIG
      +	if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
      +		return nil, err
      +	}
      +
      +	return &oldState, nil
      +}
      +
      +// GetState returns the current state of a terminal which may be useful to
      +// restore the terminal after a signal.
      +func GetState(fd int) (*State, error) {
      +	var oldState State
      +	if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
      +		return nil, err
      +	}
      +
      +	return &oldState, nil
      +}
      +
      +// Restore restores the terminal connected to the given file descriptor to a
      +// previous state.
      +func Restore(fd int, state *State) error {
      +	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0)
      +	return err
      +}
      +
      +// GetSize returns the dimensions of the given terminal.
      +func GetSize(fd int) (width, height int, err error) {
      +	var dimensions [4]uint16
      +
      +	if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 {
      +		return -1, -1, err
      +	}
      +	return int(dimensions[1]), int(dimensions[0]), nil
      +}
      +
      +// ReadPassword reads a line of input from a terminal without local echo.  This
      +// is commonly used for inputting passwords and other sensitive data. The slice
      +// returned does not include the \n.
      +func ReadPassword(fd int) ([]byte, error) {
      +	var oldState syscall.Termios
      +	if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 {
      +		return nil, err
      +	}
      +
      +	newState := oldState
      +	newState.Lflag &^= syscall.ECHO
      +	newState.Lflag |= syscall.ICANON | syscall.ISIG
      +	newState.Iflag |= syscall.ICRNL
      +	if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
      +		return nil, err
      +	}
      +
      +	defer func() {
      +		syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0)
      +	}()
      +
      +	var buf [16]byte
      +	var ret []byte
      +	for {
      +		n, err := syscall.Read(fd, buf[:])
      +		if err != nil {
      +			return nil, err
      +		}
      +		if n == 0 {
      +			if len(ret) == 0 {
      +				return nil, io.EOF
      +			}
      +			break
      +		}
      +		if buf[n-1] == '\n' {
      +			n--
      +		}
      +		ret = append(ret, buf[:n]...)
      +		if n < len(buf) {
      +			break
      +		}
      +	}
      +
      +	return ret, nil
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go
      new file mode 100644
      index 00000000..9c1ffd14
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go
      @@ -0,0 +1,12 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build darwin dragonfly freebsd netbsd openbsd
      +
      +package terminal
      +
      +import "syscall"
      +
      +const ioctlReadTermios = syscall.TIOCGETA
      +const ioctlWriteTermios = syscall.TIOCSETA
      diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
      new file mode 100644
      index 00000000..5883b22d
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
      @@ -0,0 +1,11 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package terminal
      +
      +// These constants are declared here, rather than importing
      +// them from the syscall package as some syscall packages, even
      +// on linux, for example gccgo, do not declare them.
      +const ioctlReadTermios = 0x5401  // syscall.TCGETS
      +const ioctlWriteTermios = 0x5402 // syscall.TCSETS
      diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
      new file mode 100644
      index 00000000..2dd6c3d9
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
      @@ -0,0 +1,174 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build windows
      +
      +// Package terminal provides support functions for dealing with terminals, as
      +// commonly found on UNIX systems.
      +//
      +// Putting a terminal into raw mode is the most common requirement:
      +//
      +// 	oldState, err := terminal.MakeRaw(0)
      +// 	if err != nil {
      +// 	        panic(err)
      +// 	}
      +// 	defer terminal.Restore(0, oldState)
      +package terminal
      +
      +import (
      +	"io"
      +	"syscall"
      +	"unsafe"
      +)
      +
      +const (
      +	enableLineInput       = 2
      +	enableEchoInput       = 4
      +	enableProcessedInput  = 1
      +	enableWindowInput     = 8
      +	enableMouseInput      = 16
      +	enableInsertMode      = 32
      +	enableQuickEditMode   = 64
      +	enableExtendedFlags   = 128
      +	enableAutoPosition    = 256
      +	enableProcessedOutput = 1
      +	enableWrapAtEolOutput = 2
      +)
      +
      +var kernel32 = syscall.NewLazyDLL("kernel32.dll")
      +
      +var (
      +	procGetConsoleMode             = kernel32.NewProc("GetConsoleMode")
      +	procSetConsoleMode             = kernel32.NewProc("SetConsoleMode")
      +	procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
      +)
      +
      +type (
      +	short int16
      +	word  uint16
      +
      +	coord struct {
      +		x short
      +		y short
      +	}
      +	smallRect struct {
      +		left   short
      +		top    short
      +		right  short
      +		bottom short
      +	}
      +	consoleScreenBufferInfo struct {
      +		size              coord
      +		cursorPosition    coord
      +		attributes        word
      +		window            smallRect
      +		maximumWindowSize coord
      +	}
      +)
      +
      +type State struct {
      +	mode uint32
      +}
      +
      +// IsTerminal returns true if the given file descriptor is a terminal.
      +func IsTerminal(fd int) bool {
      +	var st uint32
      +	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
      +	return r != 0 && e == 0
      +}
      +
      +// MakeRaw put the terminal connected to the given file descriptor into raw
      +// mode and returns the previous state of the terminal so that it can be
      +// restored.
      +func MakeRaw(fd int) (*State, error) {
      +	var st uint32
      +	_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
      +	if e != 0 {
      +		return nil, error(e)
      +	}
      +	st &^= (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput)
      +	_, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)
      +	if e != 0 {
      +		return nil, error(e)
      +	}
      +	return &State{st}, nil
      +}
      +
      +// GetState returns the current state of a terminal which may be useful to
      +// restore the terminal after a signal.
      +func GetState(fd int) (*State, error) {
      +	var st uint32
      +	_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
      +	if e != 0 {
      +		return nil, error(e)
      +	}
      +	return &State{st}, nil
      +}
      +
      +// Restore restores the terminal connected to the given file descriptor to a
      +// previous state.
      +func Restore(fd int, state *State) error {
      +	_, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0)
      +	return err
      +}
      +
      +// GetSize returns the dimensions of the given terminal.
      +func GetSize(fd int) (width, height int, err error) {
      +	var info consoleScreenBufferInfo
      +	_, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0)
      +	if e != 0 {
      +		return 0, 0, error(e)
      +	}
      +	return int(info.size.x), int(info.size.y), nil
      +}
      +
      +// ReadPassword reads a line of input from a terminal without local echo.  This
      +// is commonly used for inputting passwords and other sensitive data. The slice
      +// returned does not include the \n.
      +func ReadPassword(fd int) ([]byte, error) {
      +	var st uint32
      +	_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
      +	if e != 0 {
      +		return nil, error(e)
      +	}
      +	old := st
      +
      +	st &^= (enableEchoInput)
      +	st |= (enableProcessedInput | enableLineInput | enableProcessedOutput)
      +	_, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)
      +	if e != 0 {
      +		return nil, error(e)
      +	}
      +
      +	defer func() {
      +		syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0)
      +	}()
      +
      +	var buf [16]byte
      +	var ret []byte
      +	for {
      +		n, err := syscall.Read(syscall.Handle(fd), buf[:])
      +		if err != nil {
      +			return nil, err
      +		}
      +		if n == 0 {
      +			if len(ret) == 0 {
      +				return nil, io.EOF
      +			}
      +			break
      +		}
      +		if buf[n-1] == '\n' {
      +			n--
      +		}
      +		if n > 0 && buf[n-1] == '\r' {
      +			n--
      +		}
      +		ret = append(ret, buf[:n]...)
      +		if n < len(buf) {
      +			break
      +		}
      +	}
      +
      +	return ret, nil
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/test/agent_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/agent_unix_test.go
      new file mode 100644
      index 00000000..f481253c
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/test/agent_unix_test.go
      @@ -0,0 +1,59 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build darwin dragonfly freebsd linux netbsd openbsd
      +
      +package test
      +
      +import (
      +	"bytes"
      +	"testing"
      +
      +	"golang.org/x/crypto/ssh"
      +	"golang.org/x/crypto/ssh/agent"
      +)
      +
      +func TestAgentForward(t *testing.T) {
      +	server := newServer(t)
      +	defer server.Shutdown()
      +	conn := server.Dial(clientConfig())
      +	defer conn.Close()
      +
      +	keyring := agent.NewKeyring()
      +	if err := keyring.Add(agent.AddedKey{PrivateKey: testPrivateKeys["dsa"]}); err != nil {
      +		t.Fatalf("Error adding key: %s", err)
      +	}
      +	if err := keyring.Add(agent.AddedKey{
      +		PrivateKey:       testPrivateKeys["dsa"],
      +		ConfirmBeforeUse: true,
      +		LifetimeSecs:     3600,
      +	}); err != nil {
      +		t.Fatalf("Error adding key with constraints: %s", err)
      +	}
      +	pub := testPublicKeys["dsa"]
      +
      +	sess, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("NewSession: %v", err)
      +	}
      +	if err := agent.RequestAgentForwarding(sess); err != nil {
      +		t.Fatalf("RequestAgentForwarding: %v", err)
      +	}
      +
      +	if err := agent.ForwardToAgent(conn, keyring); err != nil {
      +		t.Fatalf("SetupForwardKeyring: %v", err)
      +	}
      +	out, err := sess.CombinedOutput("ssh-add -L")
      +	if err != nil {
      +		t.Fatalf("running ssh-add: %v, out %s", err, out)
      +	}
      +	key, _, _, _, err := ssh.ParseAuthorizedKey(out)
      +	if err != nil {
      +		t.Fatalf("ParseAuthorizedKey(%q): %v", out, err)
      +	}
      +
      +	if !bytes.Equal(key.Marshal(), pub.Marshal()) {
      +		t.Fatalf("got key %s, want %s", ssh.MarshalAuthorizedKey(key), ssh.MarshalAuthorizedKey(pub))
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/test/cert_test.go b/vendor/golang.org/x/crypto/ssh/test/cert_test.go
      new file mode 100644
      index 00000000..364790f1
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/test/cert_test.go
      @@ -0,0 +1,47 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build darwin dragonfly freebsd linux netbsd openbsd
      +
      +package test
      +
      +import (
      +	"crypto/rand"
      +	"testing"
      +
      +	"golang.org/x/crypto/ssh"
      +)
      +
      +func TestCertLogin(t *testing.T) {
      +	s := newServer(t)
      +	defer s.Shutdown()
      +
      +	// Use a key different from the default.
      +	clientKey := testSigners["dsa"]
      +	caAuthKey := testSigners["ecdsa"]
      +	cert := &ssh.Certificate{
      +		Key:             clientKey.PublicKey(),
      +		ValidPrincipals: []string{username()},
      +		CertType:        ssh.UserCert,
      +		ValidBefore:     ssh.CertTimeInfinity,
      +	}
      +	if err := cert.SignCert(rand.Reader, caAuthKey); err != nil {
      +		t.Fatalf("SetSignature: %v", err)
      +	}
      +
      +	certSigner, err := ssh.NewCertSigner(cert, clientKey)
      +	if err != nil {
      +		t.Fatalf("NewCertSigner: %v", err)
      +	}
      +
      +	conf := &ssh.ClientConfig{
      +		User: username(),
      +	}
      +	conf.Auth = append(conf.Auth, ssh.PublicKeys(certSigner))
      +	client, err := s.TryDial(conf)
      +	if err != nil {
      +		t.Fatalf("TryDial: %v", err)
      +	}
      +	client.Close()
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/test/doc.go b/vendor/golang.org/x/crypto/ssh/test/doc.go
      new file mode 100644
      index 00000000..3f9b3346
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/test/doc.go
      @@ -0,0 +1,7 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// This package contains integration tests for the
      +// golang.org/x/crypto/ssh package.
      +package test // import "golang.org/x/crypto/ssh/test"
      diff --git a/vendor/golang.org/x/crypto/ssh/test/forward_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/forward_unix_test.go
      new file mode 100644
      index 00000000..877a88cd
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/test/forward_unix_test.go
      @@ -0,0 +1,160 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build darwin dragonfly freebsd linux netbsd openbsd
      +
      +package test
      +
      +import (
      +	"bytes"
      +	"io"
      +	"io/ioutil"
      +	"math/rand"
      +	"net"
      +	"testing"
      +	"time"
      +)
      +
      +func TestPortForward(t *testing.T) {
      +	server := newServer(t)
      +	defer server.Shutdown()
      +	conn := server.Dial(clientConfig())
      +	defer conn.Close()
      +
      +	sshListener, err := conn.Listen("tcp", "localhost:0")
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	go func() {
      +		sshConn, err := sshListener.Accept()
      +		if err != nil {
      +			t.Fatalf("listen.Accept failed: %v", err)
      +		}
      +
      +		_, err = io.Copy(sshConn, sshConn)
      +		if err != nil && err != io.EOF {
      +			t.Fatalf("ssh client copy: %v", err)
      +		}
      +		sshConn.Close()
      +	}()
      +
      +	forwardedAddr := sshListener.Addr().String()
      +	tcpConn, err := net.Dial("tcp", forwardedAddr)
      +	if err != nil {
      +		t.Fatalf("TCP dial failed: %v", err)
      +	}
      +
      +	readChan := make(chan []byte)
      +	go func() {
      +		data, _ := ioutil.ReadAll(tcpConn)
      +		readChan <- data
      +	}()
      +
      +	// Invent some data.
      +	data := make([]byte, 100*1000)
      +	for i := range data {
      +		data[i] = byte(i % 255)
      +	}
      +
      +	var sent []byte
      +	for len(sent) < 1000*1000 {
      +		// Send random sized chunks
      +		m := rand.Intn(len(data))
      +		n, err := tcpConn.Write(data[:m])
      +		if err != nil {
      +			break
      +		}
      +		sent = append(sent, data[:n]...)
      +	}
      +	if err := tcpConn.(*net.TCPConn).CloseWrite(); err != nil {
      +		t.Errorf("tcpConn.CloseWrite: %v", err)
      +	}
      +
      +	read := <-readChan
      +
      +	if len(sent) != len(read) {
      +		t.Fatalf("got %d bytes, want %d", len(read), len(sent))
      +	}
      +	if bytes.Compare(sent, read) != 0 {
      +		t.Fatalf("read back data does not match")
      +	}
      +
      +	if err := sshListener.Close(); err != nil {
      +		t.Fatalf("sshListener.Close: %v", err)
      +	}
      +
      +	// Check that the forward disappeared.
      +	tcpConn, err = net.Dial("tcp", forwardedAddr)
      +	if err == nil {
      +		tcpConn.Close()
      +		t.Errorf("still listening to %s after closing", forwardedAddr)
      +	}
      +}
      +
      +func TestAcceptClose(t *testing.T) {
      +	server := newServer(t)
      +	defer server.Shutdown()
      +	conn := server.Dial(clientConfig())
      +
      +	sshListener, err := conn.Listen("tcp", "localhost:0")
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	quit := make(chan error, 1)
      +	go func() {
      +		for {
      +			c, err := sshListener.Accept()
      +			if err != nil {
      +				quit <- err
      +				break
      +			}
      +			c.Close()
      +		}
      +	}()
      +	sshListener.Close()
      +
      +	select {
      +	case <-time.After(1 * time.Second):
      +		t.Errorf("timeout: listener did not close.")
      +	case err := <-quit:
      +		t.Logf("quit as expected (error %v)", err)
      +	}
      +}
      +
      +// Check that listeners exit if the underlying client transport dies.
      +func TestPortForwardConnectionClose(t *testing.T) {
      +	server := newServer(t)
      +	defer server.Shutdown()
      +	conn := server.Dial(clientConfig())
      +
      +	sshListener, err := conn.Listen("tcp", "localhost:0")
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +
      +	quit := make(chan error, 1)
      +	go func() {
      +		for {
      +			c, err := sshListener.Accept()
      +			if err != nil {
      +				quit <- err
      +				break
      +			}
      +			c.Close()
      +		}
      +	}()
      +
      +	// It would be even nicer if we closed the server side, but it
      +	// is more involved as the fd for that side is dup()ed.
      +	server.clientConn.Close()
      +
      +	select {
      +	case <-time.After(1 * time.Second):
      +		t.Errorf("timeout: listener did not close.")
      +	case err := <-quit:
      +		t.Logf("quit as expected (error %v)", err)
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/test/session_test.go b/vendor/golang.org/x/crypto/ssh/test/session_test.go
      new file mode 100644
      index 00000000..c0e714ba
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/test/session_test.go
      @@ -0,0 +1,340 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build !windows
      +
      +package test
      +
      +// Session functional tests.
      +
      +import (
      +	"bytes"
      +	"errors"
      +	"io"
      +	"strings"
      +	"testing"
      +
      +	"golang.org/x/crypto/ssh"
      +)
      +
      +func TestRunCommandSuccess(t *testing.T) {
      +	server := newServer(t)
      +	defer server.Shutdown()
      +	conn := server.Dial(clientConfig())
      +	defer conn.Close()
      +
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("session failed: %v", err)
      +	}
      +	defer session.Close()
      +	err = session.Run("true")
      +	if err != nil {
      +		t.Fatalf("session failed: %v", err)
      +	}
      +}
      +
      +func TestHostKeyCheck(t *testing.T) {
      +	server := newServer(t)
      +	defer server.Shutdown()
      +
      +	conf := clientConfig()
      +	hostDB := hostKeyDB()
      +	conf.HostKeyCallback = hostDB.Check
      +
      +	// change the keys.
      +	hostDB.keys[ssh.KeyAlgoRSA][25]++
      +	hostDB.keys[ssh.KeyAlgoDSA][25]++
      +	hostDB.keys[ssh.KeyAlgoECDSA256][25]++
      +
      +	conn, err := server.TryDial(conf)
      +	if err == nil {
      +		conn.Close()
      +		t.Fatalf("dial should have failed.")
      +	} else if !strings.Contains(err.Error(), "host key mismatch") {
      +		t.Fatalf("'host key mismatch' not found in %v", err)
      +	}
      +}
      +
      +func TestRunCommandStdin(t *testing.T) {
      +	server := newServer(t)
      +	defer server.Shutdown()
      +	conn := server.Dial(clientConfig())
      +	defer conn.Close()
      +
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("session failed: %v", err)
      +	}
      +	defer session.Close()
      +
      +	r, w := io.Pipe()
      +	defer r.Close()
      +	defer w.Close()
      +	session.Stdin = r
      +
      +	err = session.Run("true")
      +	if err != nil {
      +		t.Fatalf("session failed: %v", err)
      +	}
      +}
      +
      +func TestRunCommandStdinError(t *testing.T) {
      +	server := newServer(t)
      +	defer server.Shutdown()
      +	conn := server.Dial(clientConfig())
      +	defer conn.Close()
      +
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("session failed: %v", err)
      +	}
      +	defer session.Close()
      +
      +	r, w := io.Pipe()
      +	defer r.Close()
      +	session.Stdin = r
      +	pipeErr := errors.New("closing write end of pipe")
      +	w.CloseWithError(pipeErr)
      +
      +	err = session.Run("true")
      +	if err != pipeErr {
      +		t.Fatalf("expected %v, found %v", pipeErr, err)
      +	}
      +}
      +
      +func TestRunCommandFailed(t *testing.T) {
      +	server := newServer(t)
      +	defer server.Shutdown()
      +	conn := server.Dial(clientConfig())
      +	defer conn.Close()
      +
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("session failed: %v", err)
      +	}
      +	defer session.Close()
      +	err = session.Run(`bash -c "kill -9 $$"`)
      +	if err == nil {
      +		t.Fatalf("session succeeded: %v", err)
      +	}
      +}
      +
      +func TestRunCommandWeClosed(t *testing.T) {
      +	server := newServer(t)
      +	defer server.Shutdown()
      +	conn := server.Dial(clientConfig())
      +	defer conn.Close()
      +
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("session failed: %v", err)
      +	}
      +	err = session.Shell()
      +	if err != nil {
      +		t.Fatalf("shell failed: %v", err)
      +	}
      +	err = session.Close()
      +	if err != nil {
      +		t.Fatalf("shell failed: %v", err)
      +	}
      +}
      +
      +func TestFuncLargeRead(t *testing.T) {
      +	server := newServer(t)
      +	defer server.Shutdown()
      +	conn := server.Dial(clientConfig())
      +	defer conn.Close()
      +
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("unable to create new session: %s", err)
      +	}
      +
      +	stdout, err := session.StdoutPipe()
      +	if err != nil {
      +		t.Fatalf("unable to acquire stdout pipe: %s", err)
      +	}
      +
      +	err = session.Start("dd if=/dev/urandom bs=2048 count=1024")
      +	if err != nil {
      +		t.Fatalf("unable to execute remote command: %s", err)
      +	}
      +
      +	buf := new(bytes.Buffer)
      +	n, err := io.Copy(buf, stdout)
      +	if err != nil {
      +		t.Fatalf("error reading from remote stdout: %s", err)
      +	}
      +
      +	if n != 2048*1024 {
      +		t.Fatalf("Expected %d bytes but read only %d from remote command", 2048, n)
      +	}
      +}
      +
      +func TestKeyChange(t *testing.T) {
      +	server := newServer(t)
      +	defer server.Shutdown()
      +	conf := clientConfig()
      +	hostDB := hostKeyDB()
      +	conf.HostKeyCallback = hostDB.Check
      +	conf.RekeyThreshold = 1024
      +	conn := server.Dial(conf)
      +	defer conn.Close()
      +
      +	for i := 0; i < 4; i++ {
      +		session, err := conn.NewSession()
      +		if err != nil {
      +			t.Fatalf("unable to create new session: %s", err)
      +		}
      +
      +		stdout, err := session.StdoutPipe()
      +		if err != nil {
      +			t.Fatalf("unable to acquire stdout pipe: %s", err)
      +		}
      +
      +		err = session.Start("dd if=/dev/urandom bs=1024 count=1")
      +		if err != nil {
      +			t.Fatalf("unable to execute remote command: %s", err)
      +		}
      +		buf := new(bytes.Buffer)
      +		n, err := io.Copy(buf, stdout)
      +		if err != nil {
      +			t.Fatalf("error reading from remote stdout: %s", err)
      +		}
      +
      +		want := int64(1024)
      +		if n != want {
      +			t.Fatalf("Expected %d bytes but read only %d from remote command", want, n)
      +		}
      +	}
      +
      +	if changes := hostDB.checkCount; changes < 4 {
      +		t.Errorf("got %d key changes, want 4", changes)
      +	}
      +}
      +
      +func TestInvalidTerminalMode(t *testing.T) {
      +	server := newServer(t)
      +	defer server.Shutdown()
      +	conn := server.Dial(clientConfig())
      +	defer conn.Close()
      +
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("session failed: %v", err)
      +	}
      +	defer session.Close()
      +
      +	if err = session.RequestPty("vt100", 80, 40, ssh.TerminalModes{255: 1984}); err == nil {
      +		t.Fatalf("req-pty failed: successful request with invalid mode")
      +	}
      +}
      +
      +func TestValidTerminalMode(t *testing.T) {
      +	server := newServer(t)
      +	defer server.Shutdown()
      +	conn := server.Dial(clientConfig())
      +	defer conn.Close()
      +
      +	session, err := conn.NewSession()
      +	if err != nil {
      +		t.Fatalf("session failed: %v", err)
      +	}
      +	defer session.Close()
      +
      +	stdout, err := session.StdoutPipe()
      +	if err != nil {
      +		t.Fatalf("unable to acquire stdout pipe: %s", err)
      +	}
      +
      +	stdin, err := session.StdinPipe()
      +	if err != nil {
      +		t.Fatalf("unable to acquire stdin pipe: %s", err)
      +	}
      +
      +	tm := ssh.TerminalModes{ssh.ECHO: 0}
      +	if err = session.RequestPty("xterm", 80, 40, tm); err != nil {
      +		t.Fatalf("req-pty failed: %s", err)
      +	}
      +
      +	err = session.Shell()
      +	if err != nil {
      +		t.Fatalf("session failed: %s", err)
      +	}
      +
      +	stdin.Write([]byte("stty -a && exit\n"))
      +
      +	var buf bytes.Buffer
      +	if _, err := io.Copy(&buf, stdout); err != nil {
      +		t.Fatalf("reading failed: %s", err)
      +	}
      +
      +	if sttyOutput := buf.String(); !strings.Contains(sttyOutput, "-echo ") {
      +		t.Fatalf("terminal mode failure: expected -echo in stty output, got %s", sttyOutput)
      +	}
      +}
      +
      +func TestCiphers(t *testing.T) {
      +	var config ssh.Config
      +	config.SetDefaults()
      +	cipherOrder := config.Ciphers
      +	// This cipher will not be tested when commented out in cipher.go it will
      +	// fallback to the next available as per line 292.
      +	cipherOrder = append(cipherOrder, "aes128-cbc")
      +
      +	for _, ciph := range cipherOrder {
      +		server := newServer(t)
      +		defer server.Shutdown()
      +		conf := clientConfig()
      +		conf.Ciphers = []string{ciph}
      +		// Don't fail if sshd doesnt have the cipher.
      +		conf.Ciphers = append(conf.Ciphers, cipherOrder...)
      +		conn, err := server.TryDial(conf)
      +		if err == nil {
      +			conn.Close()
      +		} else {
      +			t.Fatalf("failed for cipher %q", ciph)
      +		}
      +	}
      +}
      +
      +func TestMACs(t *testing.T) {
      +	var config ssh.Config
      +	config.SetDefaults()
      +	macOrder := config.MACs
      +
      +	for _, mac := range macOrder {
      +		server := newServer(t)
      +		defer server.Shutdown()
      +		conf := clientConfig()
      +		conf.MACs = []string{mac}
      +		// Don't fail if sshd doesnt have the MAC.
      +		conf.MACs = append(conf.MACs, macOrder...)
      +		if conn, err := server.TryDial(conf); err == nil {
      +			conn.Close()
      +		} else {
      +			t.Fatalf("failed for MAC %q", mac)
      +		}
      +	}
      +}
      +
      +func TestKeyExchanges(t *testing.T) {
      +	var config ssh.Config
      +	config.SetDefaults()
      +	kexOrder := config.KeyExchanges
      +	for _, kex := range kexOrder {
      +		server := newServer(t)
      +		defer server.Shutdown()
      +		conf := clientConfig()
      +		// Don't fail if sshd doesnt have the kex.
      +		conf.KeyExchanges = append([]string{kex}, kexOrder...)
      +		conn, err := server.TryDial(conf)
      +		if err == nil {
      +			conn.Close()
      +		} else {
      +			t.Errorf("failed for kex %q", kex)
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/test/tcpip_test.go b/vendor/golang.org/x/crypto/ssh/test/tcpip_test.go
      new file mode 100644
      index 00000000..a2eb9358
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/test/tcpip_test.go
      @@ -0,0 +1,46 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build !windows
      +
      +package test
      +
      +// direct-tcpip functional tests
      +
      +import (
      +	"io"
      +	"net"
      +	"testing"
      +)
      +
      +func TestDial(t *testing.T) {
      +	server := newServer(t)
      +	defer server.Shutdown()
      +	sshConn := server.Dial(clientConfig())
      +	defer sshConn.Close()
      +
      +	l, err := net.Listen("tcp", "127.0.0.1:0")
      +	if err != nil {
      +		t.Fatalf("Listen: %v", err)
      +	}
      +	defer l.Close()
      +
      +	go func() {
      +		for {
      +			c, err := l.Accept()
      +			if err != nil {
      +				break
      +			}
      +
      +			io.WriteString(c, c.RemoteAddr().String())
      +			c.Close()
      +		}
      +	}()
      +
      +	conn, err := sshConn.Dial("tcp", l.Addr().String())
      +	if err != nil {
      +		t.Fatalf("Dial: %v", err)
      +	}
      +	defer conn.Close()
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go
      new file mode 100644
      index 00000000..f1fc50b2
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go
      @@ -0,0 +1,261 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build darwin dragonfly freebsd linux netbsd openbsd plan9
      +
      +package test
      +
      +// functional test harness for unix.
      +
      +import (
      +	"bytes"
      +	"fmt"
      +	"io/ioutil"
      +	"log"
      +	"net"
      +	"os"
      +	"os/exec"
      +	"os/user"
      +	"path/filepath"
      +	"testing"
      +	"text/template"
      +
      +	"golang.org/x/crypto/ssh"
      +	"golang.org/x/crypto/ssh/testdata"
      +)
      +
      +const sshd_config = `
      +Protocol 2
      +HostKey {{.Dir}}/id_rsa
      +HostKey {{.Dir}}/id_dsa
      +HostKey {{.Dir}}/id_ecdsa
      +Pidfile {{.Dir}}/sshd.pid
      +#UsePrivilegeSeparation no
      +KeyRegenerationInterval 3600
      +ServerKeyBits 768
      +SyslogFacility AUTH
      +LogLevel DEBUG2
      +LoginGraceTime 120
      +PermitRootLogin no
      +StrictModes no
      +RSAAuthentication yes
      +PubkeyAuthentication yes
      +AuthorizedKeysFile	{{.Dir}}/id_user.pub
      +TrustedUserCAKeys {{.Dir}}/id_ecdsa.pub
      +IgnoreRhosts yes
      +RhostsRSAAuthentication no
      +HostbasedAuthentication no
      +`
      +
      +var configTmpl = template.Must(template.New("").Parse(sshd_config))
      +
      +type server struct {
      +	t          *testing.T
      +	cleanup    func() // executed during Shutdown
      +	configfile string
      +	cmd        *exec.Cmd
      +	output     bytes.Buffer // holds stderr from sshd process
      +
      +	// Client half of the network connection.
      +	clientConn net.Conn
      +}
      +
      +func username() string {
      +	var username string
      +	if user, err := user.Current(); err == nil {
      +		username = user.Username
      +	} else {
      +		// user.Current() currently requires cgo. If an error is
      +		// returned attempt to get the username from the environment.
      +		log.Printf("user.Current: %v; falling back on $USER", err)
      +		username = os.Getenv("USER")
      +	}
      +	if username == "" {
      +		panic("Unable to get username")
      +	}
      +	return username
      +}
      +
      +type storedHostKey struct {
      +	// keys map from an algorithm string to binary key data.
      +	keys map[string][]byte
      +
      +	// checkCount counts the Check calls. Used for testing
      +	// rekeying.
      +	checkCount int
      +}
      +
      +func (k *storedHostKey) Add(key ssh.PublicKey) {
      +	if k.keys == nil {
      +		k.keys = map[string][]byte{}
      +	}
      +	k.keys[key.Type()] = key.Marshal()
      +}
      +
      +func (k *storedHostKey) Check(addr string, remote net.Addr, key ssh.PublicKey) error {
      +	k.checkCount++
      +	algo := key.Type()
      +
      +	if k.keys == nil || bytes.Compare(key.Marshal(), k.keys[algo]) != 0 {
      +		return fmt.Errorf("host key mismatch. Got %q, want %q", key, k.keys[algo])
      +	}
      +	return nil
      +}
      +
      +func hostKeyDB() *storedHostKey {
      +	keyChecker := &storedHostKey{}
      +	keyChecker.Add(testPublicKeys["ecdsa"])
      +	keyChecker.Add(testPublicKeys["rsa"])
      +	keyChecker.Add(testPublicKeys["dsa"])
      +	return keyChecker
      +}
      +
      +func clientConfig() *ssh.ClientConfig {
      +	config := &ssh.ClientConfig{
      +		User: username(),
      +		Auth: []ssh.AuthMethod{
      +			ssh.PublicKeys(testSigners["user"]),
      +		},
      +		HostKeyCallback: hostKeyDB().Check,
      +	}
      +	return config
      +}
      +
      +// unixConnection creates two halves of a connected net.UnixConn.  It
      +// is used for connecting the Go SSH client with sshd without opening
      +// ports.
      +func unixConnection() (*net.UnixConn, *net.UnixConn, error) {
      +	dir, err := ioutil.TempDir("", "unixConnection")
      +	if err != nil {
      +		return nil, nil, err
      +	}
      +	defer os.Remove(dir)
      +
      +	addr := filepath.Join(dir, "ssh")
      +	listener, err := net.Listen("unix", addr)
      +	if err != nil {
      +		return nil, nil, err
      +	}
      +	defer listener.Close()
      +	c1, err := net.Dial("unix", addr)
      +	if err != nil {
      +		return nil, nil, err
      +	}
      +
      +	c2, err := listener.Accept()
      +	if err != nil {
      +		c1.Close()
      +		return nil, nil, err
      +	}
      +
      +	return c1.(*net.UnixConn), c2.(*net.UnixConn), nil
      +}
      +
      +func (s *server) TryDial(config *ssh.ClientConfig) (*ssh.Client, error) {
      +	sshd, err := exec.LookPath("sshd")
      +	if err != nil {
      +		s.t.Skipf("skipping test: %v", err)
      +	}
      +
      +	c1, c2, err := unixConnection()
      +	if err != nil {
      +		s.t.Fatalf("unixConnection: %v", err)
      +	}
      +
      +	s.cmd = exec.Command(sshd, "-f", s.configfile, "-i", "-e")
      +	f, err := c2.File()
      +	if err != nil {
      +		s.t.Fatalf("UnixConn.File: %v", err)
      +	}
      +	defer f.Close()
      +	s.cmd.Stdin = f
      +	s.cmd.Stdout = f
      +	s.cmd.Stderr = &s.output
      +	if err := s.cmd.Start(); err != nil {
      +		s.t.Fail()
      +		s.Shutdown()
      +		s.t.Fatalf("s.cmd.Start: %v", err)
      +	}
      +	s.clientConn = c1
      +	conn, chans, reqs, err := ssh.NewClientConn(c1, "", config)
      +	if err != nil {
      +		return nil, err
      +	}
      +	return ssh.NewClient(conn, chans, reqs), nil
      +}
      +
      +func (s *server) Dial(config *ssh.ClientConfig) *ssh.Client {
      +	conn, err := s.TryDial(config)
      +	if err != nil {
      +		s.t.Fail()
      +		s.Shutdown()
      +		s.t.Fatalf("ssh.Client: %v", err)
      +	}
      +	return conn
      +}
      +
      +func (s *server) Shutdown() {
      +	if s.cmd != nil && s.cmd.Process != nil {
      +		// Don't check for errors; if it fails it's most
      +		// likely "os: process already finished", and we don't
      +		// care about that. Use os.Interrupt, so child
      +		// processes are killed too.
      +		s.cmd.Process.Signal(os.Interrupt)
      +		s.cmd.Wait()
      +	}
      +	if s.t.Failed() {
      +		// log any output from sshd process
      +		s.t.Logf("sshd: %s", s.output.String())
      +	}
      +	s.cleanup()
      +}
      +
      +func writeFile(path string, contents []byte) {
      +	f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0600)
      +	if err != nil {
      +		panic(err)
      +	}
      +	defer f.Close()
      +	if _, err := f.Write(contents); err != nil {
      +		panic(err)
      +	}
      +}
      +
      +// newServer returns a new mock ssh server.
      +func newServer(t *testing.T) *server {
      +	if testing.Short() {
      +		t.Skip("skipping test due to -short")
      +	}
      +	dir, err := ioutil.TempDir("", "sshtest")
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	f, err := os.Create(filepath.Join(dir, "sshd_config"))
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	err = configTmpl.Execute(f, map[string]string{
      +		"Dir": dir,
      +	})
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	f.Close()
      +
      +	for k, v := range testdata.PEMBytes {
      +		filename := "id_" + k
      +		writeFile(filepath.Join(dir, filename), v)
      +		writeFile(filepath.Join(dir, filename+".pub"), ssh.MarshalAuthorizedKey(testPublicKeys[k]))
      +	}
      +
      +	return &server{
      +		t:          t,
      +		configfile: f.Name(),
      +		cleanup: func() {
      +			if err := os.RemoveAll(dir); err != nil {
      +				t.Error(err)
      +			}
      +		},
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/test/testdata_test.go b/vendor/golang.org/x/crypto/ssh/test/testdata_test.go
      new file mode 100644
      index 00000000..ae48c751
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/test/testdata_test.go
      @@ -0,0 +1,64 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// IMPLEMENTOR NOTE: To avoid a package loop, this file is in three places:
      +// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three
      +// instances.
      +
      +package test
      +
      +import (
      +	"crypto/rand"
      +	"fmt"
      +
      +	"golang.org/x/crypto/ssh"
      +	"golang.org/x/crypto/ssh/testdata"
      +)
      +
      +var (
      +	testPrivateKeys map[string]interface{}
      +	testSigners     map[string]ssh.Signer
      +	testPublicKeys  map[string]ssh.PublicKey
      +)
      +
      +func init() {
      +	var err error
      +
      +	n := len(testdata.PEMBytes)
      +	testPrivateKeys = make(map[string]interface{}, n)
      +	testSigners = make(map[string]ssh.Signer, n)
      +	testPublicKeys = make(map[string]ssh.PublicKey, n)
      +	for t, k := range testdata.PEMBytes {
      +		testPrivateKeys[t], err = ssh.ParseRawPrivateKey(k)
      +		if err != nil {
      +			panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err))
      +		}
      +		testSigners[t], err = ssh.NewSignerFromKey(testPrivateKeys[t])
      +		if err != nil {
      +			panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err))
      +		}
      +		testPublicKeys[t] = testSigners[t].PublicKey()
      +	}
      +
      +	// Create a cert and sign it for use in tests.
      +	testCert := &ssh.Certificate{
      +		Nonce:           []byte{},                       // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
      +		ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage
      +		ValidAfter:      0,                              // unix epoch
      +		ValidBefore:     ssh.CertTimeInfinity,           // The end of currently representable time.
      +		Reserved:        []byte{},                       // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
      +		Key:             testPublicKeys["ecdsa"],
      +		SignatureKey:    testPublicKeys["rsa"],
      +		Permissions: ssh.Permissions{
      +			CriticalOptions: map[string]string{},
      +			Extensions:      map[string]string{},
      +		},
      +	}
      +	testCert.SignCert(rand.Reader, testSigners["rsa"])
      +	testPrivateKeys["cert"] = testPrivateKeys["ecdsa"]
      +	testSigners["cert"], err = ssh.NewCertSigner(testCert, testSigners["ecdsa"])
      +	if err != nil {
      +		panic(fmt.Sprintf("Unable to create certificate signer: %v", err))
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/testdata/doc.go b/vendor/golang.org/x/crypto/ssh/testdata/doc.go
      new file mode 100644
      index 00000000..fcae47ca
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/testdata/doc.go
      @@ -0,0 +1,8 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// This package contains test data shared between the various subpackages of
      +// the golang.org/x/crypto/ssh package. Under no circumstance should
      +// this data be used for production code.
      +package testdata // import "golang.org/x/crypto/ssh/testdata"
      diff --git a/vendor/golang.org/x/crypto/ssh/testdata/keys.go b/vendor/golang.org/x/crypto/ssh/testdata/keys.go
      new file mode 100644
      index 00000000..5ff1c0e0
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/testdata/keys.go
      @@ -0,0 +1,43 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package testdata
      +
      +var PEMBytes = map[string][]byte{
      +	"dsa": []byte(`-----BEGIN DSA PRIVATE KEY-----
      +MIIBuwIBAAKBgQD6PDSEyXiI9jfNs97WuM46MSDCYlOqWw80ajN16AohtBncs1YB
      +lHk//dQOvCYOsYaE+gNix2jtoRjwXhDsc25/IqQbU1ahb7mB8/rsaILRGIbA5WH3
      +EgFtJmXFovDz3if6F6TzvhFpHgJRmLYVR8cqsezL3hEZOvvs2iH7MorkxwIVAJHD
      +nD82+lxh2fb4PMsIiaXudAsBAoGAQRf7Q/iaPRn43ZquUhd6WwvirqUj+tkIu6eV
      +2nZWYmXLlqFQKEy4Tejl7Wkyzr2OSYvbXLzo7TNxLKoWor6ips0phYPPMyXld14r
      +juhT24CrhOzuLMhDduMDi032wDIZG4Y+K7ElU8Oufn8Sj5Wge8r6ANmmVgmFfynr
      +FhdYCngCgYEA3ucGJ93/Mx4q4eKRDxcWD3QzWyqpbRVRRV1Vmih9Ha/qC994nJFz
      +DQIdjxDIT2Rk2AGzMqFEB68Zc3O+Wcsmz5eWWzEwFxaTwOGWTyDqsDRLm3fD+QYj
      +nOwuxb0Kce+gWI8voWcqC9cyRm09jGzu2Ab3Bhtpg8JJ8L7gS3MRZK4CFEx4UAfY
      +Fmsr0W6fHB9nhS4/UXM8
      +-----END DSA PRIVATE KEY-----
      +`),
      +	"ecdsa": []byte(`-----BEGIN EC PRIVATE KEY-----
      +MHcCAQEEINGWx0zo6fhJ/0EAfrPzVFyFC9s18lBt3cRoEDhS3ARooAoGCCqGSM49
      +AwEHoUQDQgAEi9Hdw6KvZcWxfg2IDhA7UkpDtzzt6ZqJXSsFdLd+Kx4S3Sx4cVO+
      +6/ZOXRnPmNAlLUqjShUsUBBngG0u2fqEqA==
      +-----END EC PRIVATE KEY-----
      +`),
      +	"rsa": []byte(`-----BEGIN RSA PRIVATE KEY-----
      +MIIBOwIBAAJBALdGZxkXDAjsYk10ihwU6Id2KeILz1TAJuoq4tOgDWxEEGeTrcld
      +r/ZwVaFzjWzxaf6zQIJbfaSEAhqD5yo72+sCAwEAAQJBAK8PEVU23Wj8mV0QjwcJ
      +tZ4GcTUYQL7cF4+ezTCE9a1NrGnCP2RuQkHEKxuTVrxXt+6OF15/1/fuXnxKjmJC
      +nxkCIQDaXvPPBi0c7vAxGwNY9726x01/dNbHCE0CBtcotobxpwIhANbbQbh3JHVW
      +2haQh4fAG5mhesZKAGcxTyv4mQ7uMSQdAiAj+4dzMpJWdSzQ+qGHlHMIBvVHLkqB
      +y2VdEyF7DPCZewIhAI7GOI/6LDIFOvtPo6Bj2nNmyQ1HU6k/LRtNIXi4c9NJAiAr
      +rrxx26itVhJmcvoUhOjwuzSlP2bE5VHAvkGB352YBg==
      +-----END RSA PRIVATE KEY-----
      +`),
      +	"user": []byte(`-----BEGIN EC PRIVATE KEY-----
      +MHcCAQEEILYCAeq8f7V4vSSypRw7pxy8yz3V5W4qg8kSC3zJhqpQoAoGCCqGSM49
      +AwEHoUQDQgAEYcO2xNKiRUYOLEHM7VYAp57HNyKbOdYtHD83Z4hzNPVC4tM5mdGD
      +PLL8IEwvYu2wq+lpXfGQnNMbzYf9gspG0w==
      +-----END EC PRIVATE KEY-----
      +`),
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/testdata_test.go b/vendor/golang.org/x/crypto/ssh/testdata_test.go
      new file mode 100644
      index 00000000..f2828c1b
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/testdata_test.go
      @@ -0,0 +1,63 @@
      +// Copyright 2014 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// IMPLEMENTOR NOTE: To avoid a package loop, this file is in three places:
      +// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three
      +// instances.
      +
      +package ssh
      +
      +import (
      +	"crypto/rand"
      +	"fmt"
      +
      +	"golang.org/x/crypto/ssh/testdata"
      +)
      +
      +var (
      +	testPrivateKeys map[string]interface{}
      +	testSigners     map[string]Signer
      +	testPublicKeys  map[string]PublicKey
      +)
      +
      +func init() {
      +	var err error
      +
      +	n := len(testdata.PEMBytes)
      +	testPrivateKeys = make(map[string]interface{}, n)
      +	testSigners = make(map[string]Signer, n)
      +	testPublicKeys = make(map[string]PublicKey, n)
      +	for t, k := range testdata.PEMBytes {
      +		testPrivateKeys[t], err = ParseRawPrivateKey(k)
      +		if err != nil {
      +			panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err))
      +		}
      +		testSigners[t], err = NewSignerFromKey(testPrivateKeys[t])
      +		if err != nil {
      +			panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err))
      +		}
      +		testPublicKeys[t] = testSigners[t].PublicKey()
      +	}
      +
      +	// Create a cert and sign it for use in tests.
      +	testCert := &Certificate{
      +		Nonce:           []byte{},                       // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
      +		ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage
      +		ValidAfter:      0,                              // unix epoch
      +		ValidBefore:     CertTimeInfinity,               // The end of currently representable time.
      +		Reserved:        []byte{},                       // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
      +		Key:             testPublicKeys["ecdsa"],
      +		SignatureKey:    testPublicKeys["rsa"],
      +		Permissions: Permissions{
      +			CriticalOptions: map[string]string{},
      +			Extensions:      map[string]string{},
      +		},
      +	}
      +	testCert.SignCert(rand.Reader, testSigners["rsa"])
      +	testPrivateKeys["cert"] = testPrivateKeys["ecdsa"]
      +	testSigners["cert"], err = NewCertSigner(testCert, testSigners["ecdsa"])
      +	if err != nil {
      +		panic(fmt.Sprintf("Unable to create certificate signer: %v", err))
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go
      new file mode 100644
      index 00000000..8351d378
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/transport.go
      @@ -0,0 +1,332 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"bufio"
      +	"errors"
      +	"io"
      +)
      +
      +const (
      +	gcmCipherID = "aes128-gcm@openssh.com"
      +	aes128cbcID = "aes128-cbc"
      +)
      +
      +// packetConn represents a transport that implements packet based
      +// operations.
      +type packetConn interface {
      +	// Encrypt and send a packet of data to the remote peer.
      +	writePacket(packet []byte) error
      +
      +	// Read a packet from the connection
      +	readPacket() ([]byte, error)
      +
      +	// Close closes the write-side of the connection.
      +	Close() error
      +}
      +
      +// transport is the keyingTransport that implements the SSH packet
      +// protocol.
      +type transport struct {
      +	reader connectionState
      +	writer connectionState
      +
      +	bufReader *bufio.Reader
      +	bufWriter *bufio.Writer
      +	rand      io.Reader
      +
      +	io.Closer
      +
      +	// Initial H used for the session ID. Once assigned this does
      +	// not change, even during subsequent key exchanges.
      +	sessionID []byte
      +}
      +
      +// getSessionID returns the ID of the SSH connection. The return value
      +// should not be modified.
      +func (t *transport) getSessionID() []byte {
      +	if t.sessionID == nil {
      +		panic("session ID not set yet")
      +	}
      +	return t.sessionID
      +}
      +
      +// packetCipher represents a combination of SSH encryption/MAC
      +// protocol.  A single instance should be used for one direction only.
      +type packetCipher interface {
      +	// writePacket encrypts the packet and writes it to w. The
      +	// contents of the packet are generally scrambled.
      +	writePacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error
      +
      +	// readPacket reads and decrypts a packet of data. The
      +	// returned packet may be overwritten by future calls of
      +	// readPacket.
      +	readPacket(seqnum uint32, r io.Reader) ([]byte, error)
      +}
      +
      +// connectionState represents one side (read or write) of the
      +// connection. This is necessary because each direction has its own
      +// keys, and can even have its own algorithms
      +type connectionState struct {
      +	packetCipher
      +	seqNum           uint32
      +	dir              direction
      +	pendingKeyChange chan packetCipher
      +}
      +
      +// prepareKeyChange sets up key material for a keychange. The key changes in
      +// both directions are triggered by reading and writing a msgNewKey packet
      +// respectively.
      +func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error {
      +	if t.sessionID == nil {
      +		t.sessionID = kexResult.H
      +	}
      +
      +	kexResult.SessionID = t.sessionID
      +
      +	if ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult); err != nil {
      +		return err
      +	} else {
      +		t.reader.pendingKeyChange <- ciph
      +	}
      +
      +	if ciph, err := newPacketCipher(t.writer.dir, algs.w, kexResult); err != nil {
      +		return err
      +	} else {
      +		t.writer.pendingKeyChange <- ciph
      +	}
      +
      +	return nil
      +}
      +
      +// Read and decrypt next packet.
      +func (t *transport) readPacket() ([]byte, error) {
      +	return t.reader.readPacket(t.bufReader)
      +}
      +
      +func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) {
      +	packet, err := s.packetCipher.readPacket(s.seqNum, r)
      +	s.seqNum++
      +	if err == nil && len(packet) == 0 {
      +		err = errors.New("ssh: zero length packet")
      +	}
      +
      +	if len(packet) > 0 && packet[0] == msgNewKeys {
      +		select {
      +		case cipher := <-s.pendingKeyChange:
      +			s.packetCipher = cipher
      +		default:
      +			return nil, errors.New("ssh: got bogus newkeys message.")
      +		}
      +	}
      +
      +	// The packet may point to an internal buffer, so copy the
      +	// packet out here.
      +	fresh := make([]byte, len(packet))
      +	copy(fresh, packet)
      +
      +	return fresh, err
      +}
      +
      +func (t *transport) writePacket(packet []byte) error {
      +	return t.writer.writePacket(t.bufWriter, t.rand, packet)
      +}
      +
      +func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error {
      +	changeKeys := len(packet) > 0 && packet[0] == msgNewKeys
      +
      +	err := s.packetCipher.writePacket(s.seqNum, w, rand, packet)
      +	if err != nil {
      +		return err
      +	}
      +	if err = w.Flush(); err != nil {
      +		return err
      +	}
      +	s.seqNum++
      +	if changeKeys {
      +		select {
      +		case cipher := <-s.pendingKeyChange:
      +			s.packetCipher = cipher
      +		default:
      +			panic("ssh: no key material for msgNewKeys")
      +		}
      +	}
      +	return err
      +}
      +
      +func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport {
      +	t := &transport{
      +		bufReader: bufio.NewReader(rwc),
      +		bufWriter: bufio.NewWriter(rwc),
      +		rand:      rand,
      +		reader: connectionState{
      +			packetCipher:     &streamPacketCipher{cipher: noneCipher{}},
      +			pendingKeyChange: make(chan packetCipher, 1),
      +		},
      +		writer: connectionState{
      +			packetCipher:     &streamPacketCipher{cipher: noneCipher{}},
      +			pendingKeyChange: make(chan packetCipher, 1),
      +		},
      +		Closer: rwc,
      +	}
      +	if isClient {
      +		t.reader.dir = serverKeys
      +		t.writer.dir = clientKeys
      +	} else {
      +		t.reader.dir = clientKeys
      +		t.writer.dir = serverKeys
      +	}
      +
      +	return t
      +}
      +
      +type direction struct {
      +	ivTag     []byte
      +	keyTag    []byte
      +	macKeyTag []byte
      +}
      +
      +var (
      +	serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}}
      +	clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}}
      +)
      +
      +// generateKeys generates key material for IV, MAC and encryption.
      +func generateKeys(d direction, algs directionAlgorithms, kex *kexResult) (iv, key, macKey []byte) {
      +	cipherMode := cipherModes[algs.Cipher]
      +	macMode := macModes[algs.MAC]
      +
      +	iv = make([]byte, cipherMode.ivSize)
      +	key = make([]byte, cipherMode.keySize)
      +	macKey = make([]byte, macMode.keySize)
      +
      +	generateKeyMaterial(iv, d.ivTag, kex)
      +	generateKeyMaterial(key, d.keyTag, kex)
      +	generateKeyMaterial(macKey, d.macKeyTag, kex)
      +	return
      +}
      +
      +// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as
      +// described in RFC 4253, section 6.4. direction should either be serverKeys
      +// (to setup server->client keys) or clientKeys (for client->server keys).
      +func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) {
      +	iv, key, macKey := generateKeys(d, algs, kex)
      +
      +	if algs.Cipher == gcmCipherID {
      +		return newGCMCipher(iv, key, macKey)
      +	}
      +
      +	if algs.Cipher == aes128cbcID {
      +		return newAESCBCCipher(iv, key, macKey, algs)
      +	}
      +
      +	c := &streamPacketCipher{
      +		mac: macModes[algs.MAC].new(macKey),
      +	}
      +	c.macResult = make([]byte, c.mac.Size())
      +
      +	var err error
      +	c.cipher, err = cipherModes[algs.Cipher].createStream(key, iv)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	return c, nil
      +}
      +
      +// generateKeyMaterial fills out with key material generated from tag, K, H
      +// and sessionId, as specified in RFC 4253, section 7.2.
      +func generateKeyMaterial(out, tag []byte, r *kexResult) {
      +	var digestsSoFar []byte
      +
      +	h := r.Hash.New()
      +	for len(out) > 0 {
      +		h.Reset()
      +		h.Write(r.K)
      +		h.Write(r.H)
      +
      +		if len(digestsSoFar) == 0 {
      +			h.Write(tag)
      +			h.Write(r.SessionID)
      +		} else {
      +			h.Write(digestsSoFar)
      +		}
      +
      +		digest := h.Sum(nil)
      +		n := copy(out, digest)
      +		out = out[n:]
      +		if len(out) > 0 {
      +			digestsSoFar = append(digestsSoFar, digest...)
      +		}
      +	}
      +}
      +
      +const packageVersion = "SSH-2.0-Go"
      +
      +// Sends and receives a version line.  The versionLine string should
      +// be US ASCII, start with "SSH-2.0-", and should not include a
      +// newline. exchangeVersions returns the other side's version line.
      +func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) {
      +	// Contrary to the RFC, we do not ignore lines that don't
      +	// start with "SSH-2.0-" to make the library usable with
      +	// nonconforming servers.
      +	for _, c := range versionLine {
      +		// The spec disallows non US-ASCII chars, and
      +		// specifically forbids null chars.
      +		if c < 32 {
      +			return nil, errors.New("ssh: junk character in version line")
      +		}
      +	}
      +	if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil {
      +		return
      +	}
      +
      +	them, err = readVersion(rw)
      +	return them, err
      +}
      +
      +// maxVersionStringBytes is the maximum number of bytes that we'll
      +// accept as a version string. RFC 4253 section 4.2 limits this at 255
      +// chars
      +const maxVersionStringBytes = 255
      +
      +// Read version string as specified by RFC 4253, section 4.2.
      +func readVersion(r io.Reader) ([]byte, error) {
      +	versionString := make([]byte, 0, 64)
      +	var ok bool
      +	var buf [1]byte
      +
      +	for len(versionString) < maxVersionStringBytes {
      +		_, err := io.ReadFull(r, buf[:])
      +		if err != nil {
      +			return nil, err
      +		}
      +		// The RFC says that the version should be terminated with \r\n
      +		// but several SSH servers actually only send a \n.
      +		if buf[0] == '\n' {
      +			ok = true
      +			break
      +		}
      +
      +		// non ASCII chars are disallowed, but we are lenient,
      +		// since Go doesn't use null-terminated strings.
      +
      +		// The RFC allows a comment after a space, however,
      +		// all of it (version and comments) goes into the
      +		// session hash.
      +		versionString = append(versionString, buf[0])
      +	}
      +
      +	if !ok {
      +		return nil, errors.New("ssh: overflow reading version string")
      +	}
      +
      +	// There might be a '\r' on the end which we should remove.
      +	if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' {
      +		versionString = versionString[:len(versionString)-1]
      +	}
      +	return versionString, nil
      +}
      diff --git a/vendor/golang.org/x/crypto/ssh/transport_test.go b/vendor/golang.org/x/crypto/ssh/transport_test.go
      new file mode 100644
      index 00000000..92d83abf
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/ssh/transport_test.go
      @@ -0,0 +1,109 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package ssh
      +
      +import (
      +	"bytes"
      +	"crypto/rand"
      +	"encoding/binary"
      +	"strings"
      +	"testing"
      +)
      +
      +func TestReadVersion(t *testing.T) {
      +	longversion := strings.Repeat("SSH-2.0-bla", 50)[:253]
      +	cases := map[string]string{
      +		"SSH-2.0-bla\r\n":    "SSH-2.0-bla",
      +		"SSH-2.0-bla\n":      "SSH-2.0-bla",
      +		longversion + "\r\n": longversion,
      +	}
      +
      +	for in, want := range cases {
      +		result, err := readVersion(bytes.NewBufferString(in))
      +		if err != nil {
      +			t.Errorf("readVersion(%q): %s", in, err)
      +		}
      +		got := string(result)
      +		if got != want {
      +			t.Errorf("got %q, want %q", got, want)
      +		}
      +	}
      +}
      +
      +func TestReadVersionError(t *testing.T) {
      +	longversion := strings.Repeat("SSH-2.0-bla", 50)[:253]
      +	cases := []string{
      +		longversion + "too-long\r\n",
      +	}
      +	for _, in := range cases {
      +		if _, err := readVersion(bytes.NewBufferString(in)); err == nil {
      +			t.Errorf("readVersion(%q) should have failed", in)
      +		}
      +	}
      +}
      +
      +func TestExchangeVersionsBasic(t *testing.T) {
      +	v := "SSH-2.0-bla"
      +	buf := bytes.NewBufferString(v + "\r\n")
      +	them, err := exchangeVersions(buf, []byte("xyz"))
      +	if err != nil {
      +		t.Errorf("exchangeVersions: %v", err)
      +	}
      +
      +	if want := "SSH-2.0-bla"; string(them) != want {
      +		t.Errorf("got %q want %q for our version", them, want)
      +	}
      +}
      +
      +func TestExchangeVersions(t *testing.T) {
      +	cases := []string{
      +		"not\x000allowed",
      +		"not allowed\n",
      +	}
      +	for _, c := range cases {
      +		buf := bytes.NewBufferString("SSH-2.0-bla\r\n")
      +		if _, err := exchangeVersions(buf, []byte(c)); err == nil {
      +			t.Errorf("exchangeVersions(%q): should have failed", c)
      +		}
      +	}
      +}
      +
      +type closerBuffer struct {
      +	bytes.Buffer
      +}
      +
      +func (b *closerBuffer) Close() error {
      +	return nil
      +}
      +
      +func TestTransportMaxPacketWrite(t *testing.T) {
      +	buf := &closerBuffer{}
      +	tr := newTransport(buf, rand.Reader, true)
      +	huge := make([]byte, maxPacket+1)
      +	err := tr.writePacket(huge)
      +	if err == nil {
      +		t.Errorf("transport accepted write for a huge packet.")
      +	}
      +}
      +
      +func TestTransportMaxPacketReader(t *testing.T) {
      +	var header [5]byte
      +	huge := make([]byte, maxPacket+128)
      +	binary.BigEndian.PutUint32(header[0:], uint32(len(huge)))
      +	// padding.
      +	header[4] = 0
      +
      +	buf := &closerBuffer{}
      +	buf.Write(header[:])
      +	buf.Write(huge)
      +
      +	tr := newTransport(buf, rand.Reader, true)
      +	_, err := tr.readPacket()
      +	if err == nil {
      +		t.Errorf("transport succeeded reading huge packet.")
      +	} else if !strings.Contains(err.Error(), "large") {
      +		t.Errorf("got %q, should mention %q", err.Error(), "large")
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/tea/cipher.go b/vendor/golang.org/x/crypto/tea/cipher.go
      new file mode 100644
      index 00000000..9c13d12a
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/tea/cipher.go
      @@ -0,0 +1,109 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package tea implements the TEA algorithm, as defined in Needham and
      +// Wheeler's 1994 technical report, “TEA, a Tiny Encryption Algorithm”. See
      +// http://www.cix.co.uk/~klockstone/tea.pdf for details.
      +
      +package tea
      +
      +import (
      +	"crypto/cipher"
      +	"encoding/binary"
      +	"errors"
      +)
      +
      +const (
      +	// BlockSize is the size of a TEA block, in bytes.
      +	BlockSize = 8
      +
      +	// KeySize is the size of a TEA key, in bytes.
      +	KeySize = 16
      +
      +	// delta is the TEA key schedule constant.
      +	delta = 0x9e3779b9
      +
      +	// numRounds is the standard number of rounds in TEA.
      +	numRounds = 64
      +)
      +
      +// tea is an instance of the TEA cipher with a particular key.
      +type tea struct {
      +	key    [16]byte
      +	rounds int
      +}
      +
      +// NewCipher returns an instance of the TEA cipher with the standard number of
      +// rounds. The key argument must be 16 bytes long.
      +func NewCipher(key []byte) (cipher.Block, error) {
      +	return NewCipherWithRounds(key, numRounds)
      +}
      +
      +// NewCipherWithRounds returns an instance of the TEA cipher with a given
      +// number of rounds, which must be even. The key argument must be 16 bytes
      +// long.
      +func NewCipherWithRounds(key []byte, rounds int) (cipher.Block, error) {
      +	if len(key) != 16 {
      +		return nil, errors.New("tea: incorrect key size")
      +	}
      +
      +	if rounds&1 != 0 {
      +		return nil, errors.New("tea: odd number of rounds specified")
      +	}
      +
      +	c := &tea{
      +		rounds: rounds,
      +	}
      +	copy(c.key[:], key)
      +
      +	return c, nil
      +}
      +
      +// BlockSize returns the TEA block size, which is eight bytes. It is necessary
      +// to satisfy the Block interface in the package "crypto/cipher".
      +func (*tea) BlockSize() int {
      +	return BlockSize
      +}
      +
      +// Encrypt encrypts the 8 byte buffer src using the key in t and stores the
      +// result in dst. Note that for amounts of data larger than a block, it is not
      +// safe to just call Encrypt on successive blocks; instead, use an encryption
      +// mode like CBC (see crypto/cipher/cbc.go).
      +func (t *tea) Encrypt(dst, src []byte) {
      +	e := binary.BigEndian
      +	v0, v1 := e.Uint32(src), e.Uint32(src[4:])
      +	k0, k1, k2, k3 := e.Uint32(t.key[0:]), e.Uint32(t.key[4:]), e.Uint32(t.key[8:]), e.Uint32(t.key[12:])
      +
      +	sum := uint32(0)
      +	delta := uint32(delta)
      +
      +	for i := 0; i < t.rounds/2; i++ {
      +		sum += delta
      +		v0 += ((v1 << 4) + k0) ^ (v1 + sum) ^ ((v1 >> 5) + k1)
      +		v1 += ((v0 << 4) + k2) ^ (v0 + sum) ^ ((v0 >> 5) + k3)
      +	}
      +
      +	e.PutUint32(dst, v0)
      +	e.PutUint32(dst[4:], v1)
      +}
      +
      +// Decrypt decrypts the 8 byte buffer src using the key in t and stores the
      +// result in dst.
      +func (t *tea) Decrypt(dst, src []byte) {
      +	e := binary.BigEndian
      +	v0, v1 := e.Uint32(src), e.Uint32(src[4:])
      +	k0, k1, k2, k3 := e.Uint32(t.key[0:]), e.Uint32(t.key[4:]), e.Uint32(t.key[8:]), e.Uint32(t.key[12:])
      +
      +	delta := uint32(delta)
      +	sum := delta * uint32(t.rounds/2) // in general, sum = delta * n
      +
      +	for i := 0; i < t.rounds/2; i++ {
      +		v1 -= ((v0 << 4) + k2) ^ (v0 + sum) ^ ((v0 >> 5) + k3)
      +		v0 -= ((v1 << 4) + k0) ^ (v1 + sum) ^ ((v1 >> 5) + k1)
      +		sum -= delta
      +	}
      +
      +	e.PutUint32(dst, v0)
      +	e.PutUint32(dst[4:], v1)
      +}
      diff --git a/vendor/golang.org/x/crypto/tea/tea_test.go b/vendor/golang.org/x/crypto/tea/tea_test.go
      new file mode 100644
      index 00000000..eb98d1e0
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/tea/tea_test.go
      @@ -0,0 +1,93 @@
      +// Copyright 2015 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package tea
      +
      +import (
      +	"bytes"
      +	"testing"
      +)
      +
      +// A sample test key for when we just want to initialize a cipher
      +var testKey = []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF}
      +
      +// Test that the block size for tea is correct
      +func TestBlocksize(t *testing.T) {
      +	c, err := NewCipher(testKey)
      +	if err != nil {
      +		t.Fatalf("NewCipher returned error: %s", err)
      +	}
      +
      +	if result := c.BlockSize(); result != BlockSize {
      +		t.Errorf("cipher.BlockSize returned %d, but expected %d", result, BlockSize)
      +	}
      +}
      +
      +// Test that invalid key sizes return an error
      +func TestInvalidKeySize(t *testing.T) {
      +	var key [KeySize + 1]byte
      +
      +	if _, err := NewCipher(key[:]); err == nil {
      +		t.Errorf("invalid key size %d didn't result in an error.", len(key))
      +	}
      +
      +	if _, err := NewCipher(key[:KeySize-1]); err == nil {
      +		t.Errorf("invalid key size %d didn't result in an error.", KeySize-1)
      +	}
      +}
      +
      +// Test Vectors
      +type teaTest struct {
      +	rounds     int
      +	key        []byte
      +	plaintext  []byte
      +	ciphertext []byte
      +}
      +
      +var teaTests = []teaTest{
      +	// These were sourced from https://github.com/froydnj/ironclad/blob/master/testing/test-vectors/tea.testvec
      +	{
      +		numRounds,
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x41, 0xea, 0x3a, 0x0a, 0x94, 0xba, 0xa9, 0x40},
      +	},
      +	{
      +		numRounds,
      +		[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
      +		[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
      +		[]byte{0x31, 0x9b, 0xbe, 0xfb, 0x01, 0x6a, 0xbd, 0xb2},
      +	},
      +	{
      +		16,
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0xed, 0x28, 0x5d, 0xa1, 0x45, 0x5b, 0x33, 0xc1},
      +	},
      +}
      +
      +// Test encryption
      +func TestCipherEncrypt(t *testing.T) {
      +	// Test encryption with standard 64 rounds
      +	for i, test := range teaTests {
      +		c, err := NewCipherWithRounds(test.key, test.rounds)
      +		if err != nil {
      +			t.Fatalf("#%d: NewCipher returned error: %s", i, err)
      +		}
      +
      +		var ciphertext [BlockSize]byte
      +		c.Encrypt(ciphertext[:], test.plaintext)
      +
      +		if !bytes.Equal(ciphertext[:], test.ciphertext) {
      +			t.Errorf("#%d: incorrect ciphertext. Got %x, wanted %x", i, ciphertext, test.ciphertext)
      +		}
      +
      +		var plaintext2 [BlockSize]byte
      +		c.Decrypt(plaintext2[:], ciphertext[:])
      +
      +		if !bytes.Equal(plaintext2[:], test.plaintext) {
      +			t.Errorf("#%d: incorrect plaintext. Got %x, wanted %x", i, plaintext2, test.plaintext)
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/twofish/twofish.go b/vendor/golang.org/x/crypto/twofish/twofish.go
      new file mode 100644
      index 00000000..376fa0ec
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/twofish/twofish.go
      @@ -0,0 +1,342 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package twofish implements Bruce Schneier's Twofish encryption algorithm.
      +package twofish // import "golang.org/x/crypto/twofish"
      +
      +// Twofish is defined in http://www.schneier.com/paper-twofish-paper.pdf [TWOFISH]
      +
      +// This code is a port of the LibTom C implementation.
      +// See http://libtom.org/?page=features&newsitems=5&whatfile=crypt.
      +// LibTomCrypt is free for all purposes under the public domain.
      +// It was heavily inspired by the go blowfish package.
      +
      +import "strconv"
      +
      +// BlockSize is the constant block size of Twofish.
      +const BlockSize = 16
      +
      +const mdsPolynomial = 0x169 // x^8 + x^6 + x^5 + x^3 + 1, see [TWOFISH] 4.2
      +const rsPolynomial = 0x14d  // x^8 + x^6 + x^3 + x^2 + 1, see [TWOFISH] 4.3
      +
      +// A Cipher is an instance of Twofish encryption using a particular key.
      +type Cipher struct {
      +	s [4][256]uint32
      +	k [40]uint32
      +}
      +
      +type KeySizeError int
      +
      +func (k KeySizeError) Error() string {
      +	return "crypto/twofish: invalid key size " + strconv.Itoa(int(k))
      +}
      +
      +// NewCipher creates and returns a Cipher.
      +// The key argument should be the Twofish key, 16, 24 or 32 bytes.
      +func NewCipher(key []byte) (*Cipher, error) {
      +	keylen := len(key)
      +
      +	if keylen != 16 && keylen != 24 && keylen != 32 {
      +		return nil, KeySizeError(keylen)
      +	}
      +
      +	// k is the number of 64 bit words in key
      +	k := keylen / 8
      +
      +	// Create the S[..] words
      +	var S [4 * 4]byte
      +	for i := 0; i < k; i++ {
      +		// Computes [y0 y1 y2 y3] = rs . [x0 x1 x2 x3 x4 x5 x6 x7]
      +		for j, rsRow := range rs {
      +			for k, rsVal := range rsRow {
      +				S[4*i+j] ^= gfMult(key[8*i+k], rsVal, rsPolynomial)
      +			}
      +		}
      +	}
      +
      +	// Calculate subkeys
      +	c := new(Cipher)
      +	var tmp [4]byte
      +	for i := byte(0); i < 20; i++ {
      +		// A = h(p * 2x, Me)
      +		for j := range tmp {
      +			tmp[j] = 2 * i
      +		}
      +		A := h(tmp[:], key, 0)
      +
      +		// B = rolc(h(p * (2x + 1), Mo), 8)
      +		for j := range tmp {
      +			tmp[j] = 2*i + 1
      +		}
      +		B := h(tmp[:], key, 1)
      +		B = rol(B, 8)
      +
      +		c.k[2*i] = A + B
      +
      +		// K[2i+1] = (A + 2B) <<< 9
      +		c.k[2*i+1] = rol(2*B+A, 9)
      +	}
      +
      +	// Calculate sboxes
      +	switch k {
      +	case 2:
      +		for i := range c.s[0] {
      +			c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][byte(i)]^S[0]]^S[4]], 0)
      +			c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][byte(i)]^S[1]]^S[5]], 1)
      +			c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][byte(i)]^S[2]]^S[6]], 2)
      +			c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][byte(i)]^S[3]]^S[7]], 3)
      +		}
      +	case 3:
      +		for i := range c.s[0] {
      +			c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][sbox[1][byte(i)]^S[0]]^S[4]]^S[8]], 0)
      +			c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][sbox[1][byte(i)]^S[1]]^S[5]]^S[9]], 1)
      +			c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][sbox[0][byte(i)]^S[2]]^S[6]]^S[10]], 2)
      +			c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][sbox[0][byte(i)]^S[3]]^S[7]]^S[11]], 3)
      +		}
      +	default:
      +		for i := range c.s[0] {
      +			c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][sbox[1][sbox[1][byte(i)]^S[0]]^S[4]]^S[8]]^S[12]], 0)
      +			c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][sbox[1][sbox[0][byte(i)]^S[1]]^S[5]]^S[9]]^S[13]], 1)
      +			c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][sbox[0][sbox[0][byte(i)]^S[2]]^S[6]]^S[10]]^S[14]], 2)
      +			c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][sbox[0][sbox[1][byte(i)]^S[3]]^S[7]]^S[11]]^S[15]], 3)
      +		}
      +	}
      +
      +	return c, nil
      +}
      +
      +// BlockSize returns the Twofish block size, 16 bytes.
      +func (c *Cipher) BlockSize() int { return BlockSize }
      +
      +// store32l stores src in dst in little-endian form.
      +func store32l(dst []byte, src uint32) {
      +	dst[0] = byte(src)
      +	dst[1] = byte(src >> 8)
      +	dst[2] = byte(src >> 16)
      +	dst[3] = byte(src >> 24)
      +	return
      +}
      +
      +// load32l reads a little-endian uint32 from src.
      +func load32l(src []byte) uint32 {
      +	return uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24
      +}
      +
      +// rol returns x after a left circular rotation of y bits.
      +func rol(x, y uint32) uint32 {
      +	return (x << (y & 31)) | (x >> (32 - (y & 31)))
      +}
      +
      +// ror returns x after a right circular rotation of y bits.
      +func ror(x, y uint32) uint32 {
      +	return (x >> (y & 31)) | (x << (32 - (y & 31)))
      +}
      +
      +// The RS matrix. See [TWOFISH] 4.3
      +var rs = [4][8]byte{
      +	{0x01, 0xA4, 0x55, 0x87, 0x5A, 0x58, 0xDB, 0x9E},
      +	{0xA4, 0x56, 0x82, 0xF3, 0x1E, 0xC6, 0x68, 0xE5},
      +	{0x02, 0xA1, 0xFC, 0xC1, 0x47, 0xAE, 0x3D, 0x19},
      +	{0xA4, 0x55, 0x87, 0x5A, 0x58, 0xDB, 0x9E, 0x03},
      +}
      +
      +// sbox tables
      +var sbox = [2][256]byte{
      +	{
      +		0xa9, 0x67, 0xb3, 0xe8, 0x04, 0xfd, 0xa3, 0x76, 0x9a, 0x92, 0x80, 0x78, 0xe4, 0xdd, 0xd1, 0x38,
      +		0x0d, 0xc6, 0x35, 0x98, 0x18, 0xf7, 0xec, 0x6c, 0x43, 0x75, 0x37, 0x26, 0xfa, 0x13, 0x94, 0x48,
      +		0xf2, 0xd0, 0x8b, 0x30, 0x84, 0x54, 0xdf, 0x23, 0x19, 0x5b, 0x3d, 0x59, 0xf3, 0xae, 0xa2, 0x82,
      +		0x63, 0x01, 0x83, 0x2e, 0xd9, 0x51, 0x9b, 0x7c, 0xa6, 0xeb, 0xa5, 0xbe, 0x16, 0x0c, 0xe3, 0x61,
      +		0xc0, 0x8c, 0x3a, 0xf5, 0x73, 0x2c, 0x25, 0x0b, 0xbb, 0x4e, 0x89, 0x6b, 0x53, 0x6a, 0xb4, 0xf1,
      +		0xe1, 0xe6, 0xbd, 0x45, 0xe2, 0xf4, 0xb6, 0x66, 0xcc, 0x95, 0x03, 0x56, 0xd4, 0x1c, 0x1e, 0xd7,
      +		0xfb, 0xc3, 0x8e, 0xb5, 0xe9, 0xcf, 0xbf, 0xba, 0xea, 0x77, 0x39, 0xaf, 0x33, 0xc9, 0x62, 0x71,
      +		0x81, 0x79, 0x09, 0xad, 0x24, 0xcd, 0xf9, 0xd8, 0xe5, 0xc5, 0xb9, 0x4d, 0x44, 0x08, 0x86, 0xe7,
      +		0xa1, 0x1d, 0xaa, 0xed, 0x06, 0x70, 0xb2, 0xd2, 0x41, 0x7b, 0xa0, 0x11, 0x31, 0xc2, 0x27, 0x90,
      +		0x20, 0xf6, 0x60, 0xff, 0x96, 0x5c, 0xb1, 0xab, 0x9e, 0x9c, 0x52, 0x1b, 0x5f, 0x93, 0x0a, 0xef,
      +		0x91, 0x85, 0x49, 0xee, 0x2d, 0x4f, 0x8f, 0x3b, 0x47, 0x87, 0x6d, 0x46, 0xd6, 0x3e, 0x69, 0x64,
      +		0x2a, 0xce, 0xcb, 0x2f, 0xfc, 0x97, 0x05, 0x7a, 0xac, 0x7f, 0xd5, 0x1a, 0x4b, 0x0e, 0xa7, 0x5a,
      +		0x28, 0x14, 0x3f, 0x29, 0x88, 0x3c, 0x4c, 0x02, 0xb8, 0xda, 0xb0, 0x17, 0x55, 0x1f, 0x8a, 0x7d,
      +		0x57, 0xc7, 0x8d, 0x74, 0xb7, 0xc4, 0x9f, 0x72, 0x7e, 0x15, 0x22, 0x12, 0x58, 0x07, 0x99, 0x34,
      +		0x6e, 0x50, 0xde, 0x68, 0x65, 0xbc, 0xdb, 0xf8, 0xc8, 0xa8, 0x2b, 0x40, 0xdc, 0xfe, 0x32, 0xa4,
      +		0xca, 0x10, 0x21, 0xf0, 0xd3, 0x5d, 0x0f, 0x00, 0x6f, 0x9d, 0x36, 0x42, 0x4a, 0x5e, 0xc1, 0xe0,
      +	},
      +	{
      +		0x75, 0xf3, 0xc6, 0xf4, 0xdb, 0x7b, 0xfb, 0xc8, 0x4a, 0xd3, 0xe6, 0x6b, 0x45, 0x7d, 0xe8, 0x4b,
      +		0xd6, 0x32, 0xd8, 0xfd, 0x37, 0x71, 0xf1, 0xe1, 0x30, 0x0f, 0xf8, 0x1b, 0x87, 0xfa, 0x06, 0x3f,
      +		0x5e, 0xba, 0xae, 0x5b, 0x8a, 0x00, 0xbc, 0x9d, 0x6d, 0xc1, 0xb1, 0x0e, 0x80, 0x5d, 0xd2, 0xd5,
      +		0xa0, 0x84, 0x07, 0x14, 0xb5, 0x90, 0x2c, 0xa3, 0xb2, 0x73, 0x4c, 0x54, 0x92, 0x74, 0x36, 0x51,
      +		0x38, 0xb0, 0xbd, 0x5a, 0xfc, 0x60, 0x62, 0x96, 0x6c, 0x42, 0xf7, 0x10, 0x7c, 0x28, 0x27, 0x8c,
      +		0x13, 0x95, 0x9c, 0xc7, 0x24, 0x46, 0x3b, 0x70, 0xca, 0xe3, 0x85, 0xcb, 0x11, 0xd0, 0x93, 0xb8,
      +		0xa6, 0x83, 0x20, 0xff, 0x9f, 0x77, 0xc3, 0xcc, 0x03, 0x6f, 0x08, 0xbf, 0x40, 0xe7, 0x2b, 0xe2,
      +		0x79, 0x0c, 0xaa, 0x82, 0x41, 0x3a, 0xea, 0xb9, 0xe4, 0x9a, 0xa4, 0x97, 0x7e, 0xda, 0x7a, 0x17,
      +		0x66, 0x94, 0xa1, 0x1d, 0x3d, 0xf0, 0xde, 0xb3, 0x0b, 0x72, 0xa7, 0x1c, 0xef, 0xd1, 0x53, 0x3e,
      +		0x8f, 0x33, 0x26, 0x5f, 0xec, 0x76, 0x2a, 0x49, 0x81, 0x88, 0xee, 0x21, 0xc4, 0x1a, 0xeb, 0xd9,
      +		0xc5, 0x39, 0x99, 0xcd, 0xad, 0x31, 0x8b, 0x01, 0x18, 0x23, 0xdd, 0x1f, 0x4e, 0x2d, 0xf9, 0x48,
      +		0x4f, 0xf2, 0x65, 0x8e, 0x78, 0x5c, 0x58, 0x19, 0x8d, 0xe5, 0x98, 0x57, 0x67, 0x7f, 0x05, 0x64,
      +		0xaf, 0x63, 0xb6, 0xfe, 0xf5, 0xb7, 0x3c, 0xa5, 0xce, 0xe9, 0x68, 0x44, 0xe0, 0x4d, 0x43, 0x69,
      +		0x29, 0x2e, 0xac, 0x15, 0x59, 0xa8, 0x0a, 0x9e, 0x6e, 0x47, 0xdf, 0x34, 0x35, 0x6a, 0xcf, 0xdc,
      +		0x22, 0xc9, 0xc0, 0x9b, 0x89, 0xd4, 0xed, 0xab, 0x12, 0xa2, 0x0d, 0x52, 0xbb, 0x02, 0x2f, 0xa9,
      +		0xd7, 0x61, 0x1e, 0xb4, 0x50, 0x04, 0xf6, 0xc2, 0x16, 0x25, 0x86, 0x56, 0x55, 0x09, 0xbe, 0x91,
      +	},
      +}
      +
      +// gfMult returns a·b in GF(2^8)/p
      +func gfMult(a, b byte, p uint32) byte {
      +	B := [2]uint32{0, uint32(b)}
      +	P := [2]uint32{0, p}
      +	var result uint32
      +
      +	// branchless GF multiplier
      +	for i := 0; i < 7; i++ {
      +		result ^= B[a&1]
      +		a >>= 1
      +		B[1] = P[B[1]>>7] ^ (B[1] << 1)
      +	}
      +	result ^= B[a&1]
      +	return byte(result)
      +}
      +
      +// mdsColumnMult calculates y{col} where [y0 y1 y2 y3] = MDS · [x0]
      +func mdsColumnMult(in byte, col int) uint32 {
      +	mul01 := in
      +	mul5B := gfMult(in, 0x5B, mdsPolynomial)
      +	mulEF := gfMult(in, 0xEF, mdsPolynomial)
      +
      +	switch col {
      +	case 0:
      +		return uint32(mul01) | uint32(mul5B)<<8 | uint32(mulEF)<<16 | uint32(mulEF)<<24
      +	case 1:
      +		return uint32(mulEF) | uint32(mulEF)<<8 | uint32(mul5B)<<16 | uint32(mul01)<<24
      +	case 2:
      +		return uint32(mul5B) | uint32(mulEF)<<8 | uint32(mul01)<<16 | uint32(mulEF)<<24
      +	case 3:
      +		return uint32(mul5B) | uint32(mul01)<<8 | uint32(mulEF)<<16 | uint32(mul5B)<<24
      +	}
      +
      +	panic("unreachable")
      +}
      +
      +// h implements the S-box generation function. See [TWOFISH] 4.3.5
      +func h(in, key []byte, offset int) uint32 {
      +	var y [4]byte
      +	for x := range y {
      +		y[x] = in[x]
      +	}
      +	switch len(key) / 8 {
      +	case 4:
      +		y[0] = sbox[1][y[0]] ^ key[4*(6+offset)+0]
      +		y[1] = sbox[0][y[1]] ^ key[4*(6+offset)+1]
      +		y[2] = sbox[0][y[2]] ^ key[4*(6+offset)+2]
      +		y[3] = sbox[1][y[3]] ^ key[4*(6+offset)+3]
      +		fallthrough
      +	case 3:
      +		y[0] = sbox[1][y[0]] ^ key[4*(4+offset)+0]
      +		y[1] = sbox[1][y[1]] ^ key[4*(4+offset)+1]
      +		y[2] = sbox[0][y[2]] ^ key[4*(4+offset)+2]
      +		y[3] = sbox[0][y[3]] ^ key[4*(4+offset)+3]
      +		fallthrough
      +	case 2:
      +		y[0] = sbox[1][sbox[0][sbox[0][y[0]]^key[4*(2+offset)+0]]^key[4*(0+offset)+0]]
      +		y[1] = sbox[0][sbox[0][sbox[1][y[1]]^key[4*(2+offset)+1]]^key[4*(0+offset)+1]]
      +		y[2] = sbox[1][sbox[1][sbox[0][y[2]]^key[4*(2+offset)+2]]^key[4*(0+offset)+2]]
      +		y[3] = sbox[0][sbox[1][sbox[1][y[3]]^key[4*(2+offset)+3]]^key[4*(0+offset)+3]]
      +	}
      +	// [y0 y1 y2 y3] = MDS . [x0 x1 x2 x3]
      +	var mdsMult uint32
      +	for i := range y {
      +		mdsMult ^= mdsColumnMult(y[i], i)
      +	}
      +	return mdsMult
      +}
      +
      +// Encrypt encrypts a 16-byte block from src to dst, which may overlap.
      +// Note that for amounts of data larger than a block,
      +// it is not safe to just call Encrypt on successive blocks;
      +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
      +func (c *Cipher) Encrypt(dst, src []byte) {
      +	S1 := c.s[0]
      +	S2 := c.s[1]
      +	S3 := c.s[2]
      +	S4 := c.s[3]
      +
      +	// Load input
      +	ia := load32l(src[0:4])
      +	ib := load32l(src[4:8])
      +	ic := load32l(src[8:12])
      +	id := load32l(src[12:16])
      +
      +	// Pre-whitening
      +	ia ^= c.k[0]
      +	ib ^= c.k[1]
      +	ic ^= c.k[2]
      +	id ^= c.k[3]
      +
      +	for i := 0; i < 8; i++ {
      +		k := c.k[8+i*4 : 12+i*4]
      +		t2 := S2[byte(ib)] ^ S3[byte(ib>>8)] ^ S4[byte(ib>>16)] ^ S1[byte(ib>>24)]
      +		t1 := S1[byte(ia)] ^ S2[byte(ia>>8)] ^ S3[byte(ia>>16)] ^ S4[byte(ia>>24)] + t2
      +		ic = ror(ic^(t1+k[0]), 1)
      +		id = rol(id, 1) ^ (t2 + t1 + k[1])
      +
      +		t2 = S2[byte(id)] ^ S3[byte(id>>8)] ^ S4[byte(id>>16)] ^ S1[byte(id>>24)]
      +		t1 = S1[byte(ic)] ^ S2[byte(ic>>8)] ^ S3[byte(ic>>16)] ^ S4[byte(ic>>24)] + t2
      +		ia = ror(ia^(t1+k[2]), 1)
      +		ib = rol(ib, 1) ^ (t2 + t1 + k[3])
      +	}
      +
      +	// Output with "undo last swap"
      +	ta := ic ^ c.k[4]
      +	tb := id ^ c.k[5]
      +	tc := ia ^ c.k[6]
      +	td := ib ^ c.k[7]
      +
      +	store32l(dst[0:4], ta)
      +	store32l(dst[4:8], tb)
      +	store32l(dst[8:12], tc)
      +	store32l(dst[12:16], td)
      +}
      +
      +// Decrypt decrypts a 16-byte block from src to dst, which may overlap.
      +func (c *Cipher) Decrypt(dst, src []byte) {
      +	S1 := c.s[0]
      +	S2 := c.s[1]
      +	S3 := c.s[2]
      +	S4 := c.s[3]
      +
      +	// Load input
      +	ta := load32l(src[0:4])
      +	tb := load32l(src[4:8])
      +	tc := load32l(src[8:12])
      +	td := load32l(src[12:16])
      +
      +	// Undo undo final swap
      +	ia := tc ^ c.k[6]
      +	ib := td ^ c.k[7]
      +	ic := ta ^ c.k[4]
      +	id := tb ^ c.k[5]
      +
      +	for i := 8; i > 0; i-- {
      +		k := c.k[4+i*4 : 8+i*4]
      +		t2 := S2[byte(id)] ^ S3[byte(id>>8)] ^ S4[byte(id>>16)] ^ S1[byte(id>>24)]
      +		t1 := S1[byte(ic)] ^ S2[byte(ic>>8)] ^ S3[byte(ic>>16)] ^ S4[byte(ic>>24)] + t2
      +		ia = rol(ia, 1) ^ (t1 + k[2])
      +		ib = ror(ib^(t2+t1+k[3]), 1)
      +
      +		t2 = S2[byte(ib)] ^ S3[byte(ib>>8)] ^ S4[byte(ib>>16)] ^ S1[byte(ib>>24)]
      +		t1 = S1[byte(ia)] ^ S2[byte(ia>>8)] ^ S3[byte(ia>>16)] ^ S4[byte(ia>>24)] + t2
      +		ic = rol(ic, 1) ^ (t1 + k[0])
      +		id = ror(id^(t2+t1+k[1]), 1)
      +	}
      +
      +	// Undo pre-whitening
      +	ia ^= c.k[0]
      +	ib ^= c.k[1]
      +	ic ^= c.k[2]
      +	id ^= c.k[3]
      +
      +	store32l(dst[0:4], ia)
      +	store32l(dst[4:8], ib)
      +	store32l(dst[8:12], ic)
      +	store32l(dst[12:16], id)
      +}
      diff --git a/vendor/golang.org/x/crypto/twofish/twofish_test.go b/vendor/golang.org/x/crypto/twofish/twofish_test.go
      new file mode 100644
      index 00000000..303081f3
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/twofish/twofish_test.go
      @@ -0,0 +1,129 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package twofish
      +
      +import (
      +	"bytes"
      +	"testing"
      +)
      +
      +var qbox = [2][4][16]byte{
      +	{
      +		{0x8, 0x1, 0x7, 0xD, 0x6, 0xF, 0x3, 0x2, 0x0, 0xB, 0x5, 0x9, 0xE, 0xC, 0xA, 0x4},
      +		{0xE, 0xC, 0xB, 0x8, 0x1, 0x2, 0x3, 0x5, 0xF, 0x4, 0xA, 0x6, 0x7, 0x0, 0x9, 0xD},
      +		{0xB, 0xA, 0x5, 0xE, 0x6, 0xD, 0x9, 0x0, 0xC, 0x8, 0xF, 0x3, 0x2, 0x4, 0x7, 0x1},
      +		{0xD, 0x7, 0xF, 0x4, 0x1, 0x2, 0x6, 0xE, 0x9, 0xB, 0x3, 0x0, 0x8, 0x5, 0xC, 0xA},
      +	},
      +	{
      +		{0x2, 0x8, 0xB, 0xD, 0xF, 0x7, 0x6, 0xE, 0x3, 0x1, 0x9, 0x4, 0x0, 0xA, 0xC, 0x5},
      +		{0x1, 0xE, 0x2, 0xB, 0x4, 0xC, 0x3, 0x7, 0x6, 0xD, 0xA, 0x5, 0xF, 0x9, 0x0, 0x8},
      +		{0x4, 0xC, 0x7, 0x5, 0x1, 0x6, 0x9, 0xA, 0x0, 0xE, 0xD, 0x8, 0x2, 0xB, 0x3, 0xF},
      +		{0xB, 0x9, 0x5, 0x1, 0xC, 0x3, 0xD, 0xE, 0x6, 0x4, 0x7, 0xF, 0x2, 0x0, 0x8, 0xA},
      +	},
      +}
      +
      +// genSbox generates the variable sbox
      +func genSbox(qi int, x byte) byte {
      +	a0, b0 := x/16, x%16
      +	for i := 0; i < 2; i++ {
      +		a1 := a0 ^ b0
      +		b1 := (a0 ^ ((b0 << 3) | (b0 >> 1)) ^ (a0 << 3)) & 15
      +		a0 = qbox[qi][2*i][a1]
      +		b0 = qbox[qi][2*i+1][b1]
      +	}
      +	return (b0 << 4) + a0
      +}
      +
      +func TestSbox(t *testing.T) {
      +	for n := range sbox {
      +		for m := range sbox[n] {
      +			if genSbox(n, byte(m)) != sbox[n][m] {
      +				t.Errorf("#%d|%d: sbox value = %d want %d", n, m, sbox[n][m], genSbox(n, byte(m)))
      +			}
      +		}
      +	}
      +}
      +
      +var testVectors = []struct {
      +	key []byte
      +	dec []byte
      +	enc []byte
      +}{
      +	// These tests are extracted from LibTom
      +	{
      +		[]byte{0x9F, 0x58, 0x9F, 0x5C, 0xF6, 0x12, 0x2C, 0x32, 0xB6, 0xBF, 0xEC, 0x2F, 0x2A, 0xE8, 0xC3, 0x5A},
      +		[]byte{0xD4, 0x91, 0xDB, 0x16, 0xE7, 0xB1, 0xC3, 0x9E, 0x86, 0xCB, 0x08, 0x6B, 0x78, 0x9F, 0x54, 0x19},
      +		[]byte{0x01, 0x9F, 0x98, 0x09, 0xDE, 0x17, 0x11, 0x85, 0x8F, 0xAA, 0xC3, 0xA3, 0xBA, 0x20, 0xFB, 0xC3},
      +	},
      +	{
      +		[]byte{0x88, 0xB2, 0xB2, 0x70, 0x6B, 0x10, 0x5E, 0x36, 0xB4, 0x46, 0xBB, 0x6D, 0x73, 0x1A, 0x1E, 0x88,
      +			0xEF, 0xA7, 0x1F, 0x78, 0x89, 0x65, 0xBD, 0x44},
      +		[]byte{0x39, 0xDA, 0x69, 0xD6, 0xBA, 0x49, 0x97, 0xD5, 0x85, 0xB6, 0xDC, 0x07, 0x3C, 0xA3, 0x41, 0xB2},
      +		[]byte{0x18, 0x2B, 0x02, 0xD8, 0x14, 0x97, 0xEA, 0x45, 0xF9, 0xDA, 0xAC, 0xDC, 0x29, 0x19, 0x3A, 0x65},
      +	},
      +	{
      +		[]byte{0xD4, 0x3B, 0xB7, 0x55, 0x6E, 0xA3, 0x2E, 0x46, 0xF2, 0xA2, 0x82, 0xB7, 0xD4, 0x5B, 0x4E, 0x0D,
      +			0x57, 0xFF, 0x73, 0x9D, 0x4D, 0xC9, 0x2C, 0x1B, 0xD7, 0xFC, 0x01, 0x70, 0x0C, 0xC8, 0x21, 0x6F},
      +		[]byte{0x90, 0xAF, 0xE9, 0x1B, 0xB2, 0x88, 0x54, 0x4F, 0x2C, 0x32, 0xDC, 0x23, 0x9B, 0x26, 0x35, 0xE6},
      +		[]byte{0x6C, 0xB4, 0x56, 0x1C, 0x40, 0xBF, 0x0A, 0x97, 0x05, 0x93, 0x1C, 0xB6, 0xD4, 0x08, 0xE7, 0xFA},
      +	},
      +	// These test are derived from http://www.schneier.com/code/ecb_ival.txt
      +	{
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x9F, 0x58, 0x9F, 0x5C, 0xF6, 0x12, 0x2C, 0x32, 0xB6, 0xBF, 0xEC, 0x2F, 0x2A, 0xE8, 0xC3, 0x5A},
      +	},
      +	{
      +		[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10,
      +			0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
      +		},
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0xCF, 0xD1, 0xD2, 0xE5, 0xA9, 0xBE, 0x9C, 0xDF, 0x50, 0x1F, 0x13, 0xB8, 0x92, 0xBD, 0x22, 0x48},
      +	},
      +	{
      +		[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10,
      +			0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF,
      +		},
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x37, 0x52, 0x7B, 0xE0, 0x05, 0x23, 0x34, 0xB8, 0x9F, 0x0C, 0xFC, 0xCA, 0xE8, 0x7C, 0xFA, 0x20},
      +	},
      +}
      +
      +func TestCipher(t *testing.T) {
      +	for n, tt := range testVectors {
      +		// Test if the plaintext (dec) is encrypts to the given
      +		// ciphertext (enc) using the given key. Test also if enc can
      +		// be decrypted again into dec.
      +		c, err := NewCipher(tt.key)
      +		if err != nil {
      +			t.Errorf("#%d: NewCipher: %v", n, err)
      +			return
      +		}
      +
      +		buf := make([]byte, 16)
      +		c.Encrypt(buf, tt.dec)
      +		if !bytes.Equal(buf, tt.enc) {
      +			t.Errorf("#%d: encrypt = %x want %x", n, buf, tt.enc)
      +		}
      +		c.Decrypt(buf, tt.enc)
      +		if !bytes.Equal(buf, tt.dec) {
      +			t.Errorf("#%d: decrypt = %x want %x", n, buf, tt.dec)
      +		}
      +
      +		// Test that 16 zero bytes, encrypted 1000 times then decrypted
      +		// 1000 times results in zero bytes again.
      +		zero := make([]byte, 16)
      +		buf = make([]byte, 16)
      +		for i := 0; i < 1000; i++ {
      +			c.Encrypt(buf, buf)
      +		}
      +		for i := 0; i < 1000; i++ {
      +			c.Decrypt(buf, buf)
      +		}
      +		if !bytes.Equal(buf, zero) {
      +			t.Errorf("#%d: encrypt/decrypt 1000: have %x want %x", n, buf, zero)
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/xtea/block.go b/vendor/golang.org/x/crypto/xtea/block.go
      new file mode 100644
      index 00000000..bf5d2459
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/xtea/block.go
      @@ -0,0 +1,66 @@
      +// Copyright 2009 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +/*
      +	Implementation adapted from Needham and Wheeler's paper:
      +	http://www.cix.co.uk/~klockstone/xtea.pdf
      +
      +	A precalculated look up table is used during encryption/decryption for values that are based purely on the key.
      +*/
      +
      +package xtea
      +
      +// XTEA is based on 64 rounds.
      +const numRounds = 64
      +
      +// blockToUint32 reads an 8 byte slice into two uint32s.
      +// The block is treated as big endian.
      +func blockToUint32(src []byte) (uint32, uint32) {
      +	r0 := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
      +	r1 := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
      +	return r0, r1
      +}
      +
      +// uint32ToBlock writes two uint32s into an 8 byte data block.
      +// Values are written as big endian.
      +func uint32ToBlock(v0, v1 uint32, dst []byte) {
      +	dst[0] = byte(v0 >> 24)
      +	dst[1] = byte(v0 >> 16)
      +	dst[2] = byte(v0 >> 8)
      +	dst[3] = byte(v0)
      +	dst[4] = byte(v1 >> 24)
      +	dst[5] = byte(v1 >> 16)
      +	dst[6] = byte(v1 >> 8)
      +	dst[7] = byte(v1 >> 0)
      +}
      +
      +// encryptBlock encrypts a single 8 byte block using XTEA.
      +func encryptBlock(c *Cipher, dst, src []byte) {
      +	v0, v1 := blockToUint32(src)
      +
      +	// Two rounds of XTEA applied per loop
      +	for i := 0; i < numRounds; {
      +		v0 += ((v1<<4 ^ v1>>5) + v1) ^ c.table[i]
      +		i++
      +		v1 += ((v0<<4 ^ v0>>5) + v0) ^ c.table[i]
      +		i++
      +	}
      +
      +	uint32ToBlock(v0, v1, dst)
      +}
      +
      +// decryptBlock decrypt a single 8 byte block using XTEA.
      +func decryptBlock(c *Cipher, dst, src []byte) {
      +	v0, v1 := blockToUint32(src)
      +
      +	// Two rounds of XTEA applied per loop
      +	for i := numRounds; i > 0; {
      +		i--
      +		v1 -= ((v0<<4 ^ v0>>5) + v0) ^ c.table[i]
      +		i--
      +		v0 -= ((v1<<4 ^ v1>>5) + v1) ^ c.table[i]
      +	}
      +
      +	uint32ToBlock(v0, v1, dst)
      +}
      diff --git a/vendor/golang.org/x/crypto/xtea/cipher.go b/vendor/golang.org/x/crypto/xtea/cipher.go
      new file mode 100644
      index 00000000..108b4263
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/xtea/cipher.go
      @@ -0,0 +1,82 @@
      +// Copyright 2009 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package xtea implements XTEA encryption, as defined in Needham and Wheeler's
      +// 1997 technical report, "Tea extensions."
      +package xtea // import "golang.org/x/crypto/xtea"
      +
      +// For details, see http://www.cix.co.uk/~klockstone/xtea.pdf
      +
      +import "strconv"
      +
      +// The XTEA block size in bytes.
      +const BlockSize = 8
      +
      +// A Cipher is an instance of an XTEA cipher using a particular key.
      +// table contains a series of precalculated values that are used each round.
      +type Cipher struct {
      +	table [64]uint32
      +}
      +
      +type KeySizeError int
      +
      +func (k KeySizeError) Error() string {
      +	return "crypto/xtea: invalid key size " + strconv.Itoa(int(k))
      +}
      +
      +// NewCipher creates and returns a new Cipher.
      +// The key argument should be the XTEA key.
      +// XTEA only supports 128 bit (16 byte) keys.
      +func NewCipher(key []byte) (*Cipher, error) {
      +	k := len(key)
      +	switch k {
      +	default:
      +		return nil, KeySizeError(k)
      +	case 16:
      +		break
      +	}
      +
      +	c := new(Cipher)
      +	initCipher(c, key)
      +
      +	return c, nil
      +}
      +
      +// BlockSize returns the XTEA block size, 8 bytes.
      +// It is necessary to satisfy the Block interface in the
      +// package "crypto/cipher".
      +func (c *Cipher) BlockSize() int { return BlockSize }
      +
      +// Encrypt encrypts the 8 byte buffer src using the key and stores the result in dst.
      +// Note that for amounts of data larger than a block,
      +// it is not safe to just call Encrypt on successive blocks;
      +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
      +func (c *Cipher) Encrypt(dst, src []byte) { encryptBlock(c, dst, src) }
      +
      +// Decrypt decrypts the 8 byte buffer src using the key k and stores the result in dst.
      +func (c *Cipher) Decrypt(dst, src []byte) { decryptBlock(c, dst, src) }
      +
      +// initCipher initializes the cipher context by creating a look up table
      +// of precalculated values that are based on the key.
      +func initCipher(c *Cipher, key []byte) {
      +	// Load the key into four uint32s
      +	var k [4]uint32
      +	for i := 0; i < len(k); i++ {
      +		j := i << 2 // Multiply by 4
      +		k[i] = uint32(key[j+0])<<24 | uint32(key[j+1])<<16 | uint32(key[j+2])<<8 | uint32(key[j+3])
      +	}
      +
      +	// Precalculate the table
      +	const delta = 0x9E3779B9
      +	var sum uint32 = 0
      +
      +	// Two rounds of XTEA applied per loop
      +	for i := 0; i < numRounds; {
      +		c.table[i] = sum + k[sum&3]
      +		i++
      +		sum += delta
      +		c.table[i] = sum + k[(sum>>11)&3]
      +		i++
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/xtea/xtea_test.go b/vendor/golang.org/x/crypto/xtea/xtea_test.go
      new file mode 100644
      index 00000000..be711bf5
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/xtea/xtea_test.go
      @@ -0,0 +1,229 @@
      +// Copyright 2009 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package xtea
      +
      +import (
      +	"testing"
      +)
      +
      +// A sample test key for when we just want to initialize a cipher
      +var testKey = []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF}
      +
      +// Test that the block size for XTEA is correct
      +func TestBlocksize(t *testing.T) {
      +	if BlockSize != 8 {
      +		t.Errorf("BlockSize constant - expected 8, got %d", BlockSize)
      +		return
      +	}
      +
      +	c, err := NewCipher(testKey)
      +	if err != nil {
      +		t.Errorf("NewCipher(%d bytes) = %s", len(testKey), err)
      +		return
      +	}
      +
      +	result := c.BlockSize()
      +	if result != 8 {
      +		t.Errorf("BlockSize function - expected 8, got %d", result)
      +		return
      +	}
      +}
      +
      +// A series of test values to confirm that the Cipher.table array was initialized correctly
      +var testTable = []uint32{
      +	0x00112233, 0x6B1568B8, 0xE28CE030, 0xC5089E2D, 0xC5089E2D, 0x1EFBD3A2, 0xA7845C2A, 0x78EF0917,
      +	0x78EF0917, 0x172682D0, 0x5B6AC714, 0x822AC955, 0x3DE68511, 0xDC1DFECA, 0x2062430E, 0x3611343F,
      +	0xF1CCEFFB, 0x900469B4, 0xD448ADF8, 0x2E3BE36D, 0xB6C46BF5, 0x994029F2, 0x994029F2, 0xF3335F67,
      +	0x6AAAD6DF, 0x4D2694DC, 0x4D2694DC, 0xEB5E0E95, 0x2FA252D9, 0x4551440A, 0x121E10D6, 0xB0558A8F,
      +	0xE388BDC3, 0x0A48C004, 0xC6047BC0, 0x643BF579, 0xA88039BD, 0x02736F32, 0x8AFBF7BA, 0x5C66A4A7,
      +	0x5C66A4A7, 0xC76AEB2C, 0x3EE262A4, 0x215E20A1, 0x215E20A1, 0x7B515616, 0x03D9DE9E, 0x1988CFCF,
      +	0xD5448B8B, 0x737C0544, 0xB7C04988, 0xDE804BC9, 0x9A3C0785, 0x3873813E, 0x7CB7C582, 0xD6AAFAF7,
      +	0x4E22726F, 0x309E306C, 0x309E306C, 0x8A9165E1, 0x1319EE69, 0xF595AC66, 0xF595AC66, 0x4F88E1DB,
      +}
      +
      +// Test that the cipher context is initialized correctly
      +func TestCipherInit(t *testing.T) {
      +	c, err := NewCipher(testKey)
      +	if err != nil {
      +		t.Errorf("NewCipher(%d bytes) = %s", len(testKey), err)
      +		return
      +	}
      +
      +	for i := 0; i < len(c.table); i++ {
      +		if c.table[i] != testTable[i] {
      +			t.Errorf("NewCipher() failed to initialize Cipher.table[%d] correctly. Expected %08X, got %08X", i, testTable[i], c.table[i])
      +			break
      +		}
      +	}
      +}
      +
      +// Test that invalid key sizes return an error
      +func TestInvalidKeySize(t *testing.T) {
      +	// Test a long key
      +	key := []byte{
      +		0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF,
      +		0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF,
      +	}
      +
      +	_, err := NewCipher(key)
      +	if err == nil {
      +		t.Errorf("Invalid key size %d didn't result in an error.", len(key))
      +	}
      +
      +	// Test a short key
      +	key = []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77}
      +
      +	_, err = NewCipher(key)
      +	if err == nil {
      +		t.Errorf("Invalid key size %d didn't result in an error.", len(key))
      +	}
      +}
      +
      +// Test that we can correctly decode some bytes we have encoded
      +func TestEncodeDecode(t *testing.T) {
      +	original := []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}
      +	input := original
      +	output := make([]byte, BlockSize)
      +
      +	c, err := NewCipher(testKey)
      +	if err != nil {
      +		t.Errorf("NewCipher(%d bytes) = %s", len(testKey), err)
      +		return
      +	}
      +
      +	// Encrypt the input block
      +	c.Encrypt(output, input)
      +
      +	// Check that the output does not match the input
      +	differs := false
      +	for i := 0; i < len(input); i++ {
      +		if output[i] != input[i] {
      +			differs = true
      +			break
      +		}
      +	}
      +	if differs == false {
      +		t.Error("Cipher.Encrypt: Failed to encrypt the input block.")
      +		return
      +	}
      +
      +	// Decrypt the block we just encrypted
      +	input = output
      +	output = make([]byte, BlockSize)
      +	c.Decrypt(output, input)
      +
      +	// Check that the output from decrypt matches our initial input
      +	for i := 0; i < len(input); i++ {
      +		if output[i] != original[i] {
      +			t.Errorf("Decrypted byte %d differed. Expected %02X, got %02X\n", i, original[i], output[i])
      +			return
      +		}
      +	}
      +}
      +
      +// Test Vectors
      +type CryptTest struct {
      +	key        []byte
      +	plainText  []byte
      +	cipherText []byte
      +}
      +
      +var CryptTests = []CryptTest{
      +	// These were sourced from http://www.freemedialibrary.com/index.php/XTEA_test_vectors
      +	{
      +		[]byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f},
      +		[]byte{0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48},
      +		[]byte{0x49, 0x7d, 0xf3, 0xd0, 0x72, 0x61, 0x2c, 0xb5},
      +	},
      +	{
      +		[]byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f},
      +		[]byte{0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41},
      +		[]byte{0xe7, 0x8f, 0x2d, 0x13, 0x74, 0x43, 0x41, 0xd8},
      +	},
      +	{
      +		[]byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f},
      +		[]byte{0x5a, 0x5b, 0x6e, 0x27, 0x89, 0x48, 0xd7, 0x7f},
      +		[]byte{0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41},
      +	},
      +	{
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48},
      +		[]byte{0xa0, 0x39, 0x05, 0x89, 0xf8, 0xb8, 0xef, 0xa5},
      +	},
      +	{
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41},
      +		[]byte{0xed, 0x23, 0x37, 0x5a, 0x82, 0x1a, 0x8c, 0x2d},
      +	},
      +	{
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x70, 0xe1, 0x22, 0x5d, 0x6e, 0x4e, 0x76, 0x55},
      +		[]byte{0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41},
      +	},
      +
      +	// These vectors are from http://wiki.secondlife.com/wiki/XTEA_Strong_Encryption_Implementation#Bouncy_Castle_C.23_API
      +	{
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0xDE, 0xE9, 0xD4, 0xD8, 0xF7, 0x13, 0x1E, 0xD9},
      +	},
      +	{
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08},
      +		[]byte{0x06, 0x5C, 0x1B, 0x89, 0x75, 0xC6, 0xA8, 0x16},
      +	},
      +	{
      +		[]byte{0x01, 0x23, 0x45, 0x67, 0x12, 0x34, 0x56, 0x78, 0x23, 0x45, 0x67, 0x89, 0x34, 0x56, 0x78, 0x9A},
      +		[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
      +		[]byte{0x1F, 0xF9, 0xA0, 0x26, 0x1A, 0xC6, 0x42, 0x64},
      +	},
      +	{
      +		[]byte{0x01, 0x23, 0x45, 0x67, 0x12, 0x34, 0x56, 0x78, 0x23, 0x45, 0x67, 0x89, 0x34, 0x56, 0x78, 0x9A},
      +		[]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08},
      +		[]byte{0x8C, 0x67, 0x15, 0x5B, 0x2E, 0xF9, 0x1E, 0xAD},
      +	},
      +}
      +
      +// Test encryption
      +func TestCipherEncrypt(t *testing.T) {
      +	for i, tt := range CryptTests {
      +		c, err := NewCipher(tt.key)
      +		if err != nil {
      +			t.Errorf("NewCipher(%d bytes), vector %d = %s", len(tt.key), i, err)
      +			continue
      +		}
      +
      +		out := make([]byte, len(tt.plainText))
      +		c.Encrypt(out, tt.plainText)
      +
      +		for j := 0; j < len(out); j++ {
      +			if out[j] != tt.cipherText[j] {
      +				t.Errorf("Cipher.Encrypt %d: out[%d] = %02X, expected %02X", i, j, out[j], tt.cipherText[j])
      +				break
      +			}
      +		}
      +	}
      +}
      +
      +// Test decryption
      +func TestCipherDecrypt(t *testing.T) {
      +	for i, tt := range CryptTests {
      +		c, err := NewCipher(tt.key)
      +		if err != nil {
      +			t.Errorf("NewCipher(%d bytes), vector %d = %s", len(tt.key), i, err)
      +			continue
      +		}
      +
      +		out := make([]byte, len(tt.cipherText))
      +		c.Decrypt(out, tt.cipherText)
      +
      +		for j := 0; j < len(out); j++ {
      +			if out[j] != tt.plainText[j] {
      +				t.Errorf("Cipher.Decrypt %d: out[%d] = %02X, expected %02X", i, j, out[j], tt.plainText[j])
      +				break
      +			}
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/xts/xts.go b/vendor/golang.org/x/crypto/xts/xts.go
      new file mode 100644
      index 00000000..c9a283b2
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/xts/xts.go
      @@ -0,0 +1,138 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package xts implements the XTS cipher mode as specified in IEEE P1619/D16.
      +//
      +// XTS mode is typically used for disk encryption, which presents a number of
      +// novel problems that make more common modes inapplicable. The disk is
      +// conceptually an array of sectors and we must be able to encrypt and decrypt
      +// a sector in isolation. However, an attacker must not be able to transpose
      +// two sectors of plaintext by transposing their ciphertext.
      +//
      +// XTS wraps a block cipher with Rogaway's XEX mode in order to build a
      +// tweakable block cipher. This allows each sector to have a unique tweak and
      +// effectively create a unique key for each sector.
      +//
      +// XTS does not provide any authentication. An attacker can manipulate the
      +// ciphertext and randomise a block (16 bytes) of the plaintext.
      +//
      +// (Note: this package does not implement ciphertext-stealing so sectors must
      +// be a multiple of 16 bytes.)
      +package xts // import "golang.org/x/crypto/xts"
      +
      +import (
      +	"crypto/cipher"
      +	"errors"
      +)
      +
      +// Cipher contains an expanded key structure. It doesn't contain mutable state
      +// and therefore can be used concurrently.
      +type Cipher struct {
      +	k1, k2 cipher.Block
      +}
      +
      +// blockSize is the block size that the underlying cipher must have. XTS is
      +// only defined for 16-byte ciphers.
      +const blockSize = 16
      +
      +// NewCipher creates a Cipher given a function for creating the underlying
      +// block cipher (which must have a block size of 16 bytes). The key must be
      +// twice the length of the underlying cipher's key.
      +func NewCipher(cipherFunc func([]byte) (cipher.Block, error), key []byte) (c *Cipher, err error) {
      +	c = new(Cipher)
      +	if c.k1, err = cipherFunc(key[:len(key)/2]); err != nil {
      +		return
      +	}
      +	c.k2, err = cipherFunc(key[len(key)/2:])
      +
      +	if c.k1.BlockSize() != blockSize {
      +		err = errors.New("xts: cipher does not have a block size of 16")
      +	}
      +
      +	return
      +}
      +
      +// Encrypt encrypts a sector of plaintext and puts the result into ciphertext.
      +// Plaintext and ciphertext may be the same slice but should not overlap.
      +// Sectors must be a multiple of 16 bytes and less than 2²⁴ bytes.
      +func (c *Cipher) Encrypt(ciphertext, plaintext []byte, sectorNum uint64) {
      +	if len(ciphertext) < len(plaintext) {
      +		panic("xts: ciphertext is smaller than plaintext")
      +	}
      +	if len(plaintext)%blockSize != 0 {
      +		panic("xts: plaintext is not a multiple of the block size")
      +	}
      +
      +	var tweak [blockSize]byte
      +	for i := 0; i < 8; i++ {
      +		tweak[i] = byte(sectorNum)
      +		sectorNum >>= 8
      +	}
      +
      +	c.k2.Encrypt(tweak[:], tweak[:])
      +
      +	for i := 0; i < len(plaintext); i += blockSize {
      +		for j := 0; j < blockSize; j++ {
      +			ciphertext[i+j] = plaintext[i+j] ^ tweak[j]
      +		}
      +		c.k1.Encrypt(ciphertext[i:], ciphertext[i:])
      +		for j := 0; j < blockSize; j++ {
      +			ciphertext[i+j] ^= tweak[j]
      +		}
      +
      +		mul2(&tweak)
      +	}
      +}
      +
      +// Decrypt decrypts a sector of ciphertext and puts the result into plaintext.
      +// Plaintext and ciphertext may be the same slice but should not overlap.
      +// Sectors must be a multiple of 16 bytes and less than 2²⁴ bytes.
      +func (c *Cipher) Decrypt(plaintext, ciphertext []byte, sectorNum uint64) {
      +	if len(plaintext) < len(ciphertext) {
      +		panic("xts: plaintext is smaller than ciphertext")
      +	}
      +	if len(ciphertext)%blockSize != 0 {
      +		panic("xts: ciphertext is not a multiple of the block size")
      +	}
      +
      +	var tweak [blockSize]byte
      +	for i := 0; i < 8; i++ {
      +		tweak[i] = byte(sectorNum)
      +		sectorNum >>= 8
      +	}
      +
      +	c.k2.Encrypt(tweak[:], tweak[:])
      +
      +	for i := 0; i < len(plaintext); i += blockSize {
      +		for j := 0; j < blockSize; j++ {
      +			plaintext[i+j] = ciphertext[i+j] ^ tweak[j]
      +		}
      +		c.k1.Decrypt(plaintext[i:], plaintext[i:])
      +		for j := 0; j < blockSize; j++ {
      +			plaintext[i+j] ^= tweak[j]
      +		}
      +
      +		mul2(&tweak)
      +	}
      +}
      +
      +// mul2 multiplies tweak by 2 in GF(2¹²⁸) with an irreducible polynomial of
      +// x¹²⁸ + x⁷ + x² + x + 1.
      +func mul2(tweak *[blockSize]byte) {
      +	var carryIn byte
      +	for j := range tweak {
      +		carryOut := tweak[j] >> 7
      +		tweak[j] = (tweak[j] << 1) + carryIn
      +		carryIn = carryOut
      +	}
      +	if carryIn != 0 {
      +		// If we have a carry bit then we need to subtract a multiple
      +		// of the irreducible polynomial (x¹²⁸ + x⁷ + x² + x + 1).
      +		// By dropping the carry bit, we're subtracting the x^128 term
      +		// so all that remains is to subtract x⁷ + x² + x + 1.
      +		// Subtraction (and addition) in this representation is just
      +		// XOR.
      +		tweak[0] ^= 1<<7 | 1<<2 | 1<<1 | 1
      +	}
      +}
      diff --git a/vendor/golang.org/x/crypto/xts/xts_test.go b/vendor/golang.org/x/crypto/xts/xts_test.go
      new file mode 100644
      index 00000000..7a5e9fad
      --- /dev/null
      +++ b/vendor/golang.org/x/crypto/xts/xts_test.go
      @@ -0,0 +1,85 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package xts
      +
      +import (
      +	"bytes"
      +	"crypto/aes"
      +	"encoding/hex"
      +	"testing"
      +)
      +
      +// These test vectors have been taken from IEEE P1619/D16, Annex B.
      +var xtsTestVectors = []struct {
      +	key        string
      +	sector     uint64
      +	plaintext  string
      +	ciphertext string
      +}{
      +	{
      +		"0000000000000000000000000000000000000000000000000000000000000000",
      +		0,
      +		"0000000000000000000000000000000000000000000000000000000000000000",
      +		"917cf69ebd68b2ec9b9fe9a3eadda692cd43d2f59598ed858c02c2652fbf922e",
      +	}, {
      +		"1111111111111111111111111111111122222222222222222222222222222222",
      +		0x3333333333,
      +		"4444444444444444444444444444444444444444444444444444444444444444",
      +		"c454185e6a16936e39334038acef838bfb186fff7480adc4289382ecd6d394f0",
      +	}, {
      +		"fffefdfcfbfaf9f8f7f6f5f4f3f2f1f022222222222222222222222222222222",
      +		0x3333333333,
      +		"4444444444444444444444444444444444444444444444444444444444444444",
      +		"af85336b597afc1a900b2eb21ec949d292df4c047e0b21532186a5971a227a89",
      +	}, {
      +		"2718281828459045235360287471352631415926535897932384626433832795",
      +		0,
      +		"000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
      +		"27a7479befa1d476489f308cd4cfa6e2a96e4bbe3208ff25287dd3819616e89cc78cf7f5e543445f8333d8fa7f56000005279fa5d8b5e4ad40e736ddb4d35412328063fd2aab53e5ea1e0a9f332500a5df9487d07a5c92cc512c8866c7e860ce93fdf166a24912b422976146ae20ce846bb7dc9ba94a767aaef20c0d61ad02655ea92dc4c4e41a8952c651d33174be51a10c421110e6d81588ede82103a252d8a750e8768defffed9122810aaeb99f9172af82b604dc4b8e51bcb08235a6f4341332e4ca60482a4ba1a03b3e65008fc5da76b70bf1690db4eae29c5f1badd03c5ccf2a55d705ddcd86d449511ceb7ec30bf12b1fa35b913f9f747a8afd1b130e94bff94effd01a91735ca1726acd0b197c4e5b03393697e126826fb6bbde8ecc1e08298516e2c9ed03ff3c1b7860f6de76d4cecd94c8119855ef5297ca67e9f3e7ff72b1e99785ca0a7e7720c5b36dc6d72cac9574c8cbbc2f801e23e56fd344b07f22154beba0f08ce8891e643ed995c94d9a69c9f1b5f499027a78572aeebd74d20cc39881c213ee770b1010e4bea718846977ae119f7a023ab58cca0ad752afe656bb3c17256a9f6e9bf19fdd5a38fc82bbe872c5539edb609ef4f79c203ebb140f2e583cb2ad15b4aa5b655016a8449277dbd477ef2c8d6c017db738b18deb4a427d1923ce3ff262735779a418f20a282df920147beabe421ee5319d0568",
      +	}, {
      +		"2718281828459045235360287471352631415926535897932384626433832795",
      +		1,
      +		"27a7479befa1d476489f308cd4cfa6e2a96e4bbe3208ff25287dd3819616e89cc78cf7f5e543445f8333d8fa7f56000005279fa5d8b5e4ad40e736ddb4d35412328063fd2aab53e5ea1e0a9f332500a5df9487d07a5c92cc512c8866c7e860ce93fdf166a24912b422976146ae20ce846bb7dc9ba94a767aaef20c0d61ad02655ea92dc4c4e41a8952c651d33174be51a10c421110e6d81588ede82103a252d8a750e8768defffed9122810aaeb99f9172af82b604dc4b8e51bcb08235a6f4341332e4ca60482a4ba1a03b3e65008fc5da76b70bf1690db4eae29c5f1badd03c5ccf2a55d705ddcd86d449511ceb7ec30bf12b1fa35b913f9f747a8afd1b130e94bff94effd01a91735ca1726acd0b197c4e5b03393697e126826fb6bbde8ecc1e08298516e2c9ed03ff3c1b7860f6de76d4cecd94c8119855ef5297ca67e9f3e7ff72b1e99785ca0a7e7720c5b36dc6d72cac9574c8cbbc2f801e23e56fd344b07f22154beba0f08ce8891e643ed995c94d9a69c9f1b5f499027a78572aeebd74d20cc39881c213ee770b1010e4bea718846977ae119f7a023ab58cca0ad752afe656bb3c17256a9f6e9bf19fdd5a38fc82bbe872c5539edb609ef4f79c203ebb140f2e583cb2ad15b4aa5b655016a8449277dbd477ef2c8d6c017db738b18deb4a427d1923ce3ff262735779a418f20a282df920147beabe421ee5319d0568",
      +		"264d3ca8512194fec312c8c9891f279fefdd608d0c027b60483a3fa811d65ee59d52d9e40ec5672d81532b38b6b089ce951f0f9c35590b8b978d175213f329bb1c2fd30f2f7f30492a61a532a79f51d36f5e31a7c9a12c286082ff7d2394d18f783e1a8e72c722caaaa52d8f065657d2631fd25bfd8e5baad6e527d763517501c68c5edc3cdd55435c532d7125c8614deed9adaa3acade5888b87bef641c4c994c8091b5bcd387f3963fb5bc37aa922fbfe3df4e5b915e6eb514717bdd2a74079a5073f5c4bfd46adf7d282e7a393a52579d11a028da4d9cd9c77124f9648ee383b1ac763930e7162a8d37f350b2f74b8472cf09902063c6b32e8c2d9290cefbd7346d1c779a0df50edcde4531da07b099c638e83a755944df2aef1aa31752fd323dcb710fb4bfbb9d22b925bc3577e1b8949e729a90bbafeacf7f7879e7b1147e28ba0bae940db795a61b15ecf4df8db07b824bb062802cc98a9545bb2aaeed77cb3fc6db15dcd7d80d7d5bc406c4970a3478ada8899b329198eb61c193fb6275aa8ca340344a75a862aebe92eee1ce032fd950b47d7704a3876923b4ad62844bf4a09c4dbe8b4397184b7471360c9564880aedddb9baa4af2e75394b08cd32ff479c57a07d3eab5d54de5f9738b8d27f27a9f0ab11799d7b7ffefb2704c95c6ad12c39f1e867a4b7b1d7818a4b753dfd2a89ccb45e001a03a867b187f225dd",
      +	}, {
      +		"27182818284590452353602874713526624977572470936999595749669676273141592653589793238462643383279502884197169399375105820974944592",
      +		0xff,
      +		"000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
      +		"1c3b3a102f770386e4836c99e370cf9bea00803f5e482357a4ae12d414a3e63b5d31e276f8fe4a8d66b317f9ac683f44680a86ac35adfc3345befecb4bb188fd5776926c49a3095eb108fd1098baec70aaa66999a72a82f27d848b21d4a741b0c5cd4d5fff9dac89aeba122961d03a757123e9870f8acf1000020887891429ca2a3e7a7d7df7b10355165c8b9a6d0a7de8b062c4500dc4cd120c0f7418dae3d0b5781c34803fa75421c790dfe1de1834f280d7667b327f6c8cd7557e12ac3a0f93ec05c52e0493ef31a12d3d9260f79a289d6a379bc70c50841473d1a8cc81ec583e9645e07b8d9670655ba5bbcfecc6dc3966380ad8fecb17b6ba02469a020a84e18e8f84252070c13e9f1f289be54fbc481457778f616015e1327a02b140f1505eb309326d68378f8374595c849d84f4c333ec4423885143cb47bd71c5edae9be69a2ffeceb1bec9de244fbe15992b11b77c040f12bd8f6a975a44a0f90c29a9abc3d4d893927284c58754cce294529f8614dcd2aba991925fedc4ae74ffac6e333b93eb4aff0479da9a410e4450e0dd7ae4c6e2910900575da401fc07059f645e8b7e9bfdef33943054ff84011493c27b3429eaedb4ed5376441a77ed43851ad77f16f541dfd269d50d6a5f14fb0aab1cbb4c1550be97f7ab4066193c4caa773dad38014bd2092fa755c824bb5e54c4f36ffda9fcea70b9c6e693e148c151",
      +	},
      +}
      +
      +func fromHex(s string) []byte {
      +	ret, err := hex.DecodeString(s)
      +	if err != nil {
      +		panic("xts: invalid hex in test")
      +	}
      +	return ret
      +}
      +
      +func TestXTS(t *testing.T) {
      +	for i, test := range xtsTestVectors {
      +		c, err := NewCipher(aes.NewCipher, fromHex(test.key))
      +		if err != nil {
      +			t.Errorf("#%d: failed to create cipher: %s", i, err)
      +			continue
      +		}
      +		plaintext := fromHex(test.plaintext)
      +		ciphertext := make([]byte, len(plaintext))
      +		c.Encrypt(ciphertext, plaintext, test.sector)
      +
      +		expectedCiphertext := fromHex(test.ciphertext)
      +		if !bytes.Equal(ciphertext, expectedCiphertext) {
      +			t.Errorf("#%d: encrypted failed, got: %x, want: %x", i, ciphertext, expectedCiphertext)
      +			continue
      +		}
      +
      +		decrypted := make([]byte, len(ciphertext))
      +		c.Decrypt(decrypted, ciphertext, test.sector)
      +		if !bytes.Equal(decrypted, plaintext) {
      +			t.Errorf("#%d: decryption failed, got: %x, want: %x", i, decrypted, plaintext)
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/net/.gitattributes b/vendor/golang.org/x/net/.gitattributes
      new file mode 100644
      index 00000000..d2f212e5
      --- /dev/null
      +++ b/vendor/golang.org/x/net/.gitattributes
      @@ -0,0 +1,10 @@
      +# Treat all files in this repo as binary, with no git magic updating
      +# line endings. Windows users contributing to Go will need to use a
      +# modern version of git and editors capable of LF line endings.
      +#
      +# We'll prevent accidental CRLF line endings from entering the repo
      +# via the git-review gofmt checks.
      +#
      +# See golang.org/issue/9281
      +
      +* -text
      diff --git a/vendor/golang.org/x/net/.gitignore b/vendor/golang.org/x/net/.gitignore
      new file mode 100644
      index 00000000..8339fd61
      --- /dev/null
      +++ b/vendor/golang.org/x/net/.gitignore
      @@ -0,0 +1,2 @@
      +# Add no patterns to .hgignore except for files generated by the build.
      +last-change
      diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS
      new file mode 100644
      index 00000000..15167cd7
      --- /dev/null
      +++ b/vendor/golang.org/x/net/AUTHORS
      @@ -0,0 +1,3 @@
      +# This source code refers to The Go Authors for copyright purposes.
      +# The master list of authors is in the main Go distribution,
      +# visible at http://tip.golang.org/AUTHORS.
      diff --git a/vendor/golang.org/x/net/CONTRIBUTING.md b/vendor/golang.org/x/net/CONTRIBUTING.md
      new file mode 100644
      index 00000000..88dff59b
      --- /dev/null
      +++ b/vendor/golang.org/x/net/CONTRIBUTING.md
      @@ -0,0 +1,31 @@
      +# Contributing to Go
      +
      +Go is an open source project.
      +
      +It is the work of hundreds of contributors. We appreciate your help!
      +
      +
      +## Filing issues
      +
      +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions:
      +
      +1. What version of Go are you using (`go version`)?
      +2. What operating system and processor architecture are you using?
      +3. What did you do?
      +4. What did you expect to see?
      +5. What did you see instead?
      +
      +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
      +The gophers there will answer or ask you to file an issue if you've tripped over a bug.
      +
      +## Contributing code
      +
      +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
      +before sending patches.
      +
      +**We do not accept GitHub pull requests**
      +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
      +
      +Unless otherwise noted, the Go source files are distributed under
      +the BSD-style license found in the LICENSE file.
      +
      diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS
      new file mode 100644
      index 00000000..1c4577e9
      --- /dev/null
      +++ b/vendor/golang.org/x/net/CONTRIBUTORS
      @@ -0,0 +1,3 @@
      +# This source code was written by the Go contributors.
      +# The master list of contributors is in the main Go distribution,
      +# visible at http://tip.golang.org/CONTRIBUTORS.
      diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE
      new file mode 100644
      index 00000000..6a66aea5
      --- /dev/null
      +++ b/vendor/golang.org/x/net/LICENSE
      @@ -0,0 +1,27 @@
      +Copyright (c) 2009 The Go Authors. All rights reserved.
      +
      +Redistribution and use in source and binary forms, with or without
      +modification, are permitted provided that the following conditions are
      +met:
      +
      +   * Redistributions of source code must retain the above copyright
      +notice, this list of conditions and the following disclaimer.
      +   * Redistributions in binary form must reproduce the above
      +copyright notice, this list of conditions and the following disclaimer
      +in the documentation and/or other materials provided with the
      +distribution.
      +   * Neither the name of Google Inc. nor the names of its
      +contributors may be used to endorse or promote products derived from
      +this software without specific prior written permission.
      +
      +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
      +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
      +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
      +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
      +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
      +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS
      new file mode 100644
      index 00000000..73309904
      --- /dev/null
      +++ b/vendor/golang.org/x/net/PATENTS
      @@ -0,0 +1,22 @@
      +Additional IP Rights Grant (Patents)
      +
      +"This implementation" means the copyrightable works distributed by
      +Google as part of the Go project.
      +
      +Google hereby grants to You a perpetual, worldwide, non-exclusive,
      +no-charge, royalty-free, irrevocable (except as stated in this section)
      +patent license to make, have made, use, offer to sell, sell, import,
      +transfer and otherwise run, modify and propagate the contents of this
      +implementation of Go, where such license applies only to those patent
      +claims, both currently owned or controlled by Google and acquired in
      +the future, licensable by Google that are necessarily infringed by this
      +implementation of Go.  This grant does not include claims that would be
      +infringed only as a consequence of further modification of this
      +implementation.  If you or your agent or exclusive licensee institute or
      +order or agree to the institution of patent litigation against any
      +entity (including a cross-claim or counterclaim in a lawsuit) alleging
      +that this implementation of Go or any code incorporated within this
      +implementation of Go constitutes direct or contributory patent
      +infringement, or inducement of patent infringement, then any patent
      +rights granted to you under this License for this implementation of Go
      +shall terminate as of the date such litigation is filed.
      diff --git a/vendor/golang.org/x/net/README b/vendor/golang.org/x/net/README
      new file mode 100644
      index 00000000..6b13d8e5
      --- /dev/null
      +++ b/vendor/golang.org/x/net/README
      @@ -0,0 +1,3 @@
      +This repository holds supplementary Go networking libraries.
      +
      +To submit changes to this repository, see http://golang.org/doc/contribute.html.
      diff --git a/vendor/golang.org/x/net/bpf/asm.go b/vendor/golang.org/x/net/bpf/asm.go
      new file mode 100644
      index 00000000..15e21b18
      --- /dev/null
      +++ b/vendor/golang.org/x/net/bpf/asm.go
      @@ -0,0 +1,41 @@
      +// Copyright 2016 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package bpf
      +
      +import "fmt"
      +
      +// Assemble converts insts into raw instructions suitable for loading
      +// into a BPF virtual machine.
      +//
      +// Currently, no optimization is attempted, the assembled program flow
      +// is exactly as provided.
      +func Assemble(insts []Instruction) ([]RawInstruction, error) {
      +	ret := make([]RawInstruction, len(insts))
      +	var err error
      +	for i, inst := range insts {
      +		ret[i], err = inst.Assemble()
      +		if err != nil {
      +			return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err)
      +		}
      +	}
      +	return ret, nil
      +}
      +
      +// Disassemble attempts to parse raw back into
      +// Instructions. Unrecognized RawInstructions are assumed to be an
      +// extension not implemented by this package, and are passed through
      +// unchanged to the output. The allDecoded value reports whether insts
      +// contains no RawInstructions.
      +func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) {
      +	insts = make([]Instruction, len(raw))
      +	allDecoded = true
      +	for i, r := range raw {
      +		insts[i] = r.Disassemble()
      +		if _, ok := insts[i].(RawInstruction); ok {
      +			allDecoded = false
      +		}
      +	}
      +	return insts, allDecoded
      +}
      diff --git a/vendor/golang.org/x/net/bpf/constants.go b/vendor/golang.org/x/net/bpf/constants.go
      new file mode 100644
      index 00000000..2c8bbab7
      --- /dev/null
      +++ b/vendor/golang.org/x/net/bpf/constants.go
      @@ -0,0 +1,215 @@
      +// Copyright 2016 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package bpf
      +
      +// A Register is a register of the BPF virtual machine.
      +type Register uint16
      +
      +const (
      +	// RegA is the accumulator register. RegA is always the
      +	// destination register of ALU operations.
      +	RegA Register = iota
      +	// RegX is the indirection register, used by LoadIndirect
      +	// operations.
      +	RegX
      +)
      +
      +// An ALUOp is an arithmetic or logic operation.
      +type ALUOp uint16
      +
      +// ALU binary operation types.
      +const (
      +	ALUOpAdd ALUOp = iota << 4
      +	ALUOpSub
      +	ALUOpMul
      +	ALUOpDiv
      +	ALUOpOr
      +	ALUOpAnd
      +	ALUOpShiftLeft
      +	ALUOpShiftRight
      +	aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type.
      +	ALUOpMod
      +	ALUOpXor
      +)
      +
      +// A JumpTest is a comparison operator used in conditional jumps.
      +type JumpTest uint16
      +
      +// Supported operators for conditional jumps.
      +const (
      +	// K == A
      +	JumpEqual JumpTest = iota
      +	// K != A
      +	JumpNotEqual
      +	// K > A
      +	JumpGreaterThan
      +	// K < A
      +	JumpLessThan
      +	// K >= A
      +	JumpGreaterOrEqual
      +	// K <= A
      +	JumpLessOrEqual
      +	// K & A != 0
      +	JumpBitsSet
      +	// K & A == 0
      +	JumpBitsNotSet
      +)
      +
      +// An Extension is a function call provided by the kernel that
      +// performs advanced operations that are expensive or impossible
      +// within the BPF virtual machine.
      +//
      +// Extensions are only implemented by the Linux kernel.
      +//
      +// TODO: should we prune this list? Some of these extensions seem
      +// either broken or near-impossible to use correctly, whereas other
      +// (len, random, ifindex) are quite useful.
      +type Extension int
      +
      +// Extension functions available in the Linux kernel.
      +const (
      +	// ExtLen returns the length of the packet.
      +	ExtLen Extension = 1
      +	// ExtProto returns the packet's L3 protocol type.
      +	ExtProto = 0
      +	// ExtType returns the packet's type (skb->pkt_type in the kernel)
      +	//
      +	// TODO: better documentation. How nice an API do we want to
      +	// provide for these esoteric extensions?
      +	ExtType = 4
      +	// ExtPayloadOffset returns the offset of the packet payload, or
      +	// the first protocol header that the kernel does not know how to
      +	// parse.
      +	ExtPayloadOffset = 52
      +	// ExtInterfaceIndex returns the index of the interface on which
      +	// the packet was received.
      +	ExtInterfaceIndex = 8
      +	// ExtNetlinkAttr returns the netlink attribute of type X at
      +	// offset A.
      +	ExtNetlinkAttr = 12
      +	// ExtNetlinkAttrNested returns the nested netlink attribute of
      +	// type X at offset A.
      +	ExtNetlinkAttrNested = 16
      +	// ExtMark returns the packet's mark value.
      +	ExtMark = 20
      +	// ExtQueue returns the packet's assigned hardware queue.
      +	ExtQueue = 24
      +	// ExtLinkLayerType returns the packet's hardware address type
      +	// (e.g. Ethernet, Infiniband).
      +	ExtLinkLayerType = 28
      +	// ExtRXHash returns the packets receive hash.
      +	//
      +	// TODO: figure out what this rxhash actually is.
      +	ExtRXHash = 32
      +	// ExtCPUID returns the ID of the CPU processing the current
      +	// packet.
      +	ExtCPUID = 36
      +	// ExtVLANTag returns the packet's VLAN tag.
      +	ExtVLANTag = 44
      +	// ExtVLANTagPresent returns non-zero if the packet has a VLAN
      +	// tag.
      +	//
      +	// TODO: I think this might be a lie: it reads bit 0x1000 of the
      +	// VLAN header, which changed meaning in recent revisions of the
      +	// spec - this extension may now return meaningless information.
      +	ExtVLANTagPresent = 48
      +	// ExtVLANProto returns 0x8100 if the frame has a VLAN header,
      +	// 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some
      +	// other value if no VLAN information is present.
      +	ExtVLANProto = 60
      +	// ExtRand returns a uniformly random uint32.
      +	ExtRand = 56
      +)
      +
      +// The following gives names to various bit patterns used in opcode construction.
      +
      +const (
      +	opMaskCls uint16 = 0x7
      +	// opClsLoad masks
      +	opMaskLoadDest  = 0x01
      +	opMaskLoadWidth = 0x18
      +	opMaskLoadMode  = 0xe0
      +	// opClsALU
      +	opMaskOperandSrc = 0x08
      +	opMaskOperator   = 0xf0
      +	// opClsJump
      +	opMaskJumpConst = 0x0f
      +	opMaskJumpCond  = 0xf0
      +)
      +
      +const (
      +	// +---------------+-----------------+---+---+---+
      +	// | AddrMode (3b) | LoadWidth (2b)  | 0 | 0 | 0 |
      +	// +---------------+-----------------+---+---+---+
      +	opClsLoadA uint16 = iota
      +	// +---------------+-----------------+---+---+---+
      +	// | AddrMode (3b) | LoadWidth (2b)  | 0 | 0 | 1 |
      +	// +---------------+-----------------+---+---+---+
      +	opClsLoadX
      +	// +---+---+---+---+---+---+---+---+
      +	// | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
      +	// +---+---+---+---+---+---+---+---+
      +	opClsStoreA
      +	// +---+---+---+---+---+---+---+---+
      +	// | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
      +	// +---+---+---+---+---+---+---+---+
      +	opClsStoreX
      +	// +---------------+-----------------+---+---+---+
      +	// | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 |
      +	// +---------------+-----------------+---+---+---+
      +	opClsALU
      +	// +-----------------------------+---+---+---+---+
      +	// |      TestOperator (4b)      | 0 | 1 | 0 | 1 |
      +	// +-----------------------------+---+---+---+---+
      +	opClsJump
      +	// +---+-------------------------+---+---+---+---+
      +	// | 0 | 0 | 0 |   RetSrc (1b)   | 0 | 1 | 1 | 0 |
      +	// +---+-------------------------+---+---+---+---+
      +	opClsReturn
      +	// +---+-------------------------+---+---+---+---+
      +	// | 0 | 0 | 0 |  TXAorTAX (1b)  | 0 | 1 | 1 | 1 |
      +	// +---+-------------------------+---+---+---+---+
      +	opClsMisc
      +)
      +
      +const (
      +	opAddrModeImmediate uint16 = iota << 5
      +	opAddrModeAbsolute
      +	opAddrModeIndirect
      +	opAddrModeScratch
      +	opAddrModePacketLen // actually an extension, not an addressing mode.
      +	opAddrModeMemShift
      +)
      +
      +const (
      +	opLoadWidth4 uint16 = iota << 3
      +	opLoadWidth2
      +	opLoadWidth1
      +)
      +
      +// Operator defined by ALUOp*
      +
      +const (
      +	opALUSrcConstant uint16 = iota << 3
      +	opALUSrcX
      +)
      +
      +const (
      +	opJumpAlways = iota << 4
      +	opJumpEqual
      +	opJumpGT
      +	opJumpGE
      +	opJumpSet
      +)
      +
      +const (
      +	opRetSrcConstant uint16 = iota << 4
      +	opRetSrcA
      +)
      +
      +const (
      +	opMiscTAX = 0x00
      +	opMiscTXA = 0x80
      +)
      diff --git a/vendor/golang.org/x/net/bpf/doc.go b/vendor/golang.org/x/net/bpf/doc.go
      new file mode 100644
      index 00000000..bf2564b7
      --- /dev/null
      +++ b/vendor/golang.org/x/net/bpf/doc.go
      @@ -0,0 +1,81 @@
      +// Copyright 2016 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +/*
      +
      +Package bpf implements marshaling and unmarshaling of programs for the
      +Berkeley Packet Filter virtual machine.
      +
      +BPF's main use is to specify a packet filter for network taps, so that
      +the kernel doesn't have to expensively copy every packet it sees to
      +userspace. However, it's been repurposed to other areas where running
      +user code in-kernel is needed. For example, Linux's seccomp uses BPF
      +to apply security policies to system calls. For simplicity, this
      +documentation refers only to packets, but other uses of BPF have their
      +own data payloads.
      +
      +BPF programs run in a restricted virtual machine. It has almost no
      +access to kernel functions, and while conditional branches are
      +allowed, they can only jump forwards, to guarantee that there are no
      +infinite loops.
      +
      +The virtual machine
      +
      +The BPF VM is an accumulator machine. Its main register, called
      +register A, is an implicit source and destination in all arithmetic
      +and logic operations. The machine also has 16 scratch registers for
      +temporary storage, and an indirection register (register X) for
      +indirect memory access. All registers are 32 bits wide.
      +
      +Each run of a BPF program is given one packet, which is placed in the
      +VM's read-only "main memory". LoadAbsolute and LoadIndirect
      +instructions can fetch up to 32 bits at a time into register A for
      +examination.
      +
      +The goal of a BPF program is to produce and return a verdict (uint32),
      +which tells the kernel what to do with the packet. In the context of
      +packet filtering, the returned value is the number of bytes of the
      +packet to forward to userspace, or 0 to ignore the packet. Other
      +contexts like seccomp define their own return values.
      +
      +In order to simplify programs, attempts to read past the end of the
      +packet terminate the program execution with a verdict of 0 (ignore
      +packet). This means that the vast majority of BPF programs don't need
      +to do any explicit bounds checking.
      +
      +In addition to the bytes of the packet, some BPF programs have access
      +to extensions, which are essentially calls to kernel utility
      +functions. Currently, the only extensions supported by this package
      +are the Linux packet filter extensions.
      +
      +Examples
      +
      +This packet filter selects all ARP packets.
      +
      +	bpf.Assemble([]bpf.Instruction{
      +		// Load "EtherType" field from the ethernet header.
      +		bpf.LoadAbsolute{Off: 12, Size: 2},
      +		// Skip over the next instruction if EtherType is not ARP.
      +		bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1},
      +		// Verdict is "send up to 4k of the packet to userspace."
      +		bpf.RetConstant{Val: 4096},
      +		// Verdict is "ignore packet."
      +		bpf.RetConstant{Val: 0},
      +	})
      +
      +This packet filter captures a random 1% sample of traffic.
      +
      +	bpf.Assemble([]bpf.Instruction{
      +		// Get a 32-bit random number from the Linux kernel.
      +		bpf.LoadExtension{Num: bpf.ExtRand},
      +		// 1% dice roll?
      +		bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1},
      +		// Capture.
      +		bpf.RetConstant{Val: 4096},
      +		// Ignore.
      +		bpf.RetConstant{Val: 0},
      +	})
      +
      +*/
      +package bpf // import "golang.org/x/net/bpf"
      diff --git a/vendor/golang.org/x/net/bpf/instructions.go b/vendor/golang.org/x/net/bpf/instructions.go
      new file mode 100644
      index 00000000..68ae6f54
      --- /dev/null
      +++ b/vendor/golang.org/x/net/bpf/instructions.go
      @@ -0,0 +1,434 @@
      +// Copyright 2016 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package bpf
      +
      +import "fmt"
      +
      +// An Instruction is one instruction executed by the BPF virtual
      +// machine.
      +type Instruction interface {
      +	// Assemble assembles the Instruction into a RawInstruction.
      +	Assemble() (RawInstruction, error)
      +}
      +
      +// A RawInstruction is a raw BPF virtual machine instruction.
      +type RawInstruction struct {
      +	// Operation to execute.
      +	Op uint16
      +	// For conditional jump instructions, the number of instructions
      +	// to skip if the condition is true/false.
      +	Jt uint8
      +	Jf uint8
      +	// Constant parameter. The meaning depends on the Op.
      +	K uint32
      +}
      +
      +// Assemble implements the Instruction Assemble method.
      +func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil }
      +
      +// Disassemble parses ri into an Instruction and returns it. If ri is
      +// not recognized by this package, ri itself is returned.
      +func (ri RawInstruction) Disassemble() Instruction {
      +	switch ri.Op & opMaskCls {
      +	case opClsLoadA, opClsLoadX:
      +		reg := Register(ri.Op & opMaskLoadDest)
      +		sz := 0
      +		switch ri.Op & opMaskLoadWidth {
      +		case opLoadWidth4:
      +			sz = 4
      +		case opLoadWidth2:
      +			sz = 2
      +		case opLoadWidth1:
      +			sz = 1
      +		default:
      +			return ri
      +		}
      +		switch ri.Op & opMaskLoadMode {
      +		case opAddrModeImmediate:
      +			if sz != 4 {
      +				return ri
      +			}
      +			return LoadConstant{Dst: reg, Val: ri.K}
      +		case opAddrModeScratch:
      +			if sz != 4 || ri.K > 15 {
      +				return ri
      +			}
      +			return LoadScratch{Dst: reg, N: int(ri.K)}
      +		case opAddrModeAbsolute:
      +			return LoadAbsolute{Size: sz, Off: ri.K}
      +		case opAddrModeIndirect:
      +			return LoadIndirect{Size: sz, Off: ri.K}
      +		case opAddrModePacketLen:
      +			if sz != 4 {
      +				return ri
      +			}
      +			return LoadExtension{Num: ExtLen}
      +		case opAddrModeMemShift:
      +			return LoadMemShift{Off: ri.K}
      +		default:
      +			return ri
      +		}
      +
      +	case opClsStoreA:
      +		if ri.Op != opClsStoreA || ri.K > 15 {
      +			return ri
      +		}
      +		return StoreScratch{Src: RegA, N: int(ri.K)}
      +
      +	case opClsStoreX:
      +		if ri.Op != opClsStoreX || ri.K > 15 {
      +			return ri
      +		}
      +		return StoreScratch{Src: RegX, N: int(ri.K)}
      +
      +	case opClsALU:
      +		switch op := ALUOp(ri.Op & opMaskOperator); op {
      +		case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor:
      +			if ri.Op&opMaskOperandSrc != 0 {
      +				return ALUOpX{Op: op}
      +			}
      +			return ALUOpConstant{Op: op, Val: ri.K}
      +		case aluOpNeg:
      +			return NegateA{}
      +		default:
      +			return ri
      +		}
      +
      +	case opClsJump:
      +		if ri.Op&opMaskJumpConst != opClsJump {
      +			return ri
      +		}
      +		switch ri.Op & opMaskJumpCond {
      +		case opJumpAlways:
      +			return Jump{Skip: ri.K}
      +		case opJumpEqual:
      +			return JumpIf{
      +				Cond:      JumpEqual,
      +				Val:       ri.K,
      +				SkipTrue:  ri.Jt,
      +				SkipFalse: ri.Jf,
      +			}
      +		case opJumpGT:
      +			return JumpIf{
      +				Cond:      JumpGreaterThan,
      +				Val:       ri.K,
      +				SkipTrue:  ri.Jt,
      +				SkipFalse: ri.Jf,
      +			}
      +		case opJumpGE:
      +			return JumpIf{
      +				Cond:      JumpGreaterOrEqual,
      +				Val:       ri.K,
      +				SkipTrue:  ri.Jt,
      +				SkipFalse: ri.Jf,
      +			}
      +		case opJumpSet:
      +			return JumpIf{
      +				Cond:      JumpBitsSet,
      +				Val:       ri.K,
      +				SkipTrue:  ri.Jt,
      +				SkipFalse: ri.Jf,
      +			}
      +		default:
      +			return ri
      +		}
      +
      +	case opClsReturn:
      +		switch ri.Op {
      +		case opClsReturn | opRetSrcA:
      +			return RetA{}
      +		case opClsReturn | opRetSrcConstant:
      +			return RetConstant{Val: ri.K}
      +		default:
      +			return ri
      +		}
      +
      +	case opClsMisc:
      +		switch ri.Op {
      +		case opClsMisc | opMiscTAX:
      +			return TAX{}
      +		case opClsMisc | opMiscTXA:
      +			return TXA{}
      +		default:
      +			return ri
      +		}
      +
      +	default:
      +		panic("unreachable") // switch is exhaustive on the bit pattern
      +	}
      +}
      +
      +// LoadConstant loads Val into register Dst.
      +type LoadConstant struct {
      +	Dst Register
      +	Val uint32
      +}
      +
      +// Assemble implements the Instruction Assemble method.
      +func (a LoadConstant) Assemble() (RawInstruction, error) {
      +	return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val)
      +}
      +
      +// LoadScratch loads scratch[N] into register Dst.
      +type LoadScratch struct {
      +	Dst Register
      +	N   int // 0-15
      +}
      +
      +// Assemble implements the Instruction Assemble method.
      +func (a LoadScratch) Assemble() (RawInstruction, error) {
      +	if a.N < 0 || a.N > 15 {
      +		return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N)
      +	}
      +	return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N))
      +}
      +
      +// LoadAbsolute loads packet[Off:Off+Size] as an integer value into
      +// register A.
      +type LoadAbsolute struct {
      +	Off  uint32
      +	Size int // 1, 2 or 4
      +}
      +
      +// Assemble implements the Instruction Assemble method.
      +func (a LoadAbsolute) Assemble() (RawInstruction, error) {
      +	return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off)
      +}
      +
      +// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value
      +// into register A.
      +type LoadIndirect struct {
      +	Off  uint32
      +	Size int // 1, 2 or 4
      +}
      +
      +// Assemble implements the Instruction Assemble method.
      +func (a LoadIndirect) Assemble() (RawInstruction, error) {
      +	return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off)
      +}
      +
      +// LoadMemShift multiplies the first 4 bits of the byte at packet[Off]
      +// by 4 and stores the result in register X.
      +//
      +// This instruction is mainly useful to load into X the length of an
      +// IPv4 packet header in a single instruction, rather than have to do
      +// the arithmetic on the header's first byte by hand.
      +type LoadMemShift struct {
      +	Off uint32
      +}
      +
      +// Assemble implements the Instruction Assemble method.
      +func (a LoadMemShift) Assemble() (RawInstruction, error) {
      +	return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off)
      +}
      +
      +// LoadExtension invokes a linux-specific extension and stores the
      +// result in register A.
      +type LoadExtension struct {
      +	Num Extension
      +}
      +
      +// Assemble implements the Instruction Assemble method.
      +func (a LoadExtension) Assemble() (RawInstruction, error) {
      +	if a.Num == ExtLen {
      +		return assembleLoad(RegA, 4, opAddrModePacketLen, 0)
      +	}
      +	return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(-0x1000+a.Num))
      +}
      +
      +// StoreScratch stores register Src into scratch[N].
      +type StoreScratch struct {
      +	Src Register
      +	N   int // 0-15
      +}
      +
      +// Assemble implements the Instruction Assemble method.
      +func (a StoreScratch) Assemble() (RawInstruction, error) {
      +	if a.N < 0 || a.N > 15 {
      +		return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N)
      +	}
      +	var op uint16
      +	switch a.Src {
      +	case RegA:
      +		op = opClsStoreA
      +	case RegX:
      +		op = opClsStoreX
      +	default:
      +		return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src)
      +	}
      +
      +	return RawInstruction{
      +		Op: op,
      +		K:  uint32(a.N),
      +	}, nil
      +}
      +
      +// ALUOpConstant executes A = A <Op> Val.
      +type ALUOpConstant struct {
      +	Op  ALUOp
      +	Val uint32
      +}
      +
      +// Assemble implements the Instruction Assemble method.
      +func (a ALUOpConstant) Assemble() (RawInstruction, error) {
      +	return RawInstruction{
      +		Op: opClsALU | opALUSrcConstant | uint16(a.Op),
      +		K:  a.Val,
      +	}, nil
      +}
      +
      +// ALUOpX executes A = A <Op> X
      +type ALUOpX struct {
      +	Op ALUOp
      +}
      +
      +// Assemble implements the Instruction Assemble method.
      +func (a ALUOpX) Assemble() (RawInstruction, error) {
      +	return RawInstruction{
      +		Op: opClsALU | opALUSrcX | uint16(a.Op),
      +	}, nil
      +}
      +
      +// NegateA executes A = -A.
      +type NegateA struct{}
      +
      +// Assemble implements the Instruction Assemble method.
      +func (a NegateA) Assemble() (RawInstruction, error) {
      +	return RawInstruction{
      +		Op: opClsALU | uint16(aluOpNeg),
      +	}, nil
      +}
      +
      +// Jump skips the following Skip instructions in the program.
      +type Jump struct {
      +	Skip uint32
      +}
      +
      +// Assemble implements the Instruction Assemble method.
      +func (a Jump) Assemble() (RawInstruction, error) {
      +	return RawInstruction{
      +		Op: opClsJump | opJumpAlways,
      +		K:  a.Skip,
      +	}, nil
      +}
      +
      +// JumpIf skips the following Skip instructions in the program if A
      +// <Cond> Val is true.
      +type JumpIf struct {
      +	Cond      JumpTest
      +	Val       uint32
      +	SkipTrue  uint8
      +	SkipFalse uint8
      +}
      +
      +// Assemble implements the Instruction Assemble method.
      +func (a JumpIf) Assemble() (RawInstruction, error) {
      +	var (
      +		cond uint16
      +		flip bool
      +	)
      +	switch a.Cond {
      +	case JumpEqual:
      +		cond = opJumpEqual
      +	case JumpNotEqual:
      +		cond, flip = opJumpEqual, true
      +	case JumpGreaterThan:
      +		cond = opJumpGT
      +	case JumpLessThan:
      +		cond, flip = opJumpGE, true
      +	case JumpGreaterOrEqual:
      +		cond = opJumpGE
      +	case JumpLessOrEqual:
      +		cond, flip = opJumpGT, true
      +	case JumpBitsSet:
      +		cond = opJumpSet
      +	case JumpBitsNotSet:
      +		cond, flip = opJumpSet, true
      +	default:
      +		return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", a.Cond)
      +	}
      +	jt, jf := a.SkipTrue, a.SkipFalse
      +	if flip {
      +		jt, jf = jf, jt
      +	}
      +	return RawInstruction{
      +		Op: opClsJump | cond,
      +		Jt: jt,
      +		Jf: jf,
      +		K:  a.Val,
      +	}, nil
      +}
      +
      +// RetA exits the BPF program, returning the value of register A.
      +type RetA struct{}
      +
      +// Assemble implements the Instruction Assemble method.
      +func (a RetA) Assemble() (RawInstruction, error) {
      +	return RawInstruction{
      +		Op: opClsReturn | opRetSrcA,
      +	}, nil
      +}
      +
      +// RetConstant exits the BPF program, returning a constant value.
      +type RetConstant struct {
      +	Val uint32
      +}
      +
      +// Assemble implements the Instruction Assemble method.
      +func (a RetConstant) Assemble() (RawInstruction, error) {
      +	return RawInstruction{
      +		Op: opClsReturn | opRetSrcConstant,
      +		K:  a.Val,
      +	}, nil
      +}
      +
      +// TXA copies the value of register X to register A.
      +type TXA struct{}
      +
      +// Assemble implements the Instruction Assemble method.
      +func (a TXA) Assemble() (RawInstruction, error) {
      +	return RawInstruction{
      +		Op: opClsMisc | opMiscTXA,
      +	}, nil
      +}
      +
      +// TAX copies the value of register A to register X.
      +type TAX struct{}
      +
      +// Assemble implements the Instruction Assemble method.
      +func (a TAX) Assemble() (RawInstruction, error) {
      +	return RawInstruction{
      +		Op: opClsMisc | opMiscTAX,
      +	}, nil
      +}
      +
      +func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) {
      +	var (
      +		cls uint16
      +		sz  uint16
      +	)
      +	switch dst {
      +	case RegA:
      +		cls = opClsLoadA
      +	case RegX:
      +		cls = opClsLoadX
      +	default:
      +		return RawInstruction{}, fmt.Errorf("invalid target register %v", dst)
      +	}
      +	switch loadSize {
      +	case 1:
      +		sz = opLoadWidth1
      +	case 2:
      +		sz = opLoadWidth2
      +	case 4:
      +		sz = opLoadWidth4
      +	default:
      +		return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz)
      +	}
      +	return RawInstruction{
      +		Op: cls | sz | mode,
      +		K:  k,
      +	}, nil
      +}
      diff --git a/vendor/golang.org/x/net/bpf/instructions_test.go b/vendor/golang.org/x/net/bpf/instructions_test.go
      new file mode 100644
      index 00000000..833d1e17
      --- /dev/null
      +++ b/vendor/golang.org/x/net/bpf/instructions_test.go
      @@ -0,0 +1,184 @@
      +// Copyright 2016 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package bpf
      +
      +import (
      +	"io/ioutil"
      +	"reflect"
      +	"strconv"
      +	"strings"
      +	"testing"
      +)
      +
      +// This is a direct translation of the program in
      +// testdata/all_instructions.txt.
      +var allInstructions = []Instruction{
      +	LoadConstant{Dst: RegA, Val: 42},
      +	LoadConstant{Dst: RegX, Val: 42},
      +
      +	LoadScratch{Dst: RegA, N: 3},
      +	LoadScratch{Dst: RegX, N: 3},
      +
      +	LoadAbsolute{Off: 42, Size: 1},
      +	LoadAbsolute{Off: 42, Size: 2},
      +	LoadAbsolute{Off: 42, Size: 4},
      +
      +	LoadIndirect{Off: 42, Size: 1},
      +	LoadIndirect{Off: 42, Size: 2},
      +	LoadIndirect{Off: 42, Size: 4},
      +
      +	LoadMemShift{Off: 42},
      +
      +	LoadExtension{Num: ExtLen},
      +	LoadExtension{Num: ExtProto},
      +	LoadExtension{Num: ExtType},
      +	LoadExtension{Num: ExtRand},
      +
      +	StoreScratch{Src: RegA, N: 3},
      +	StoreScratch{Src: RegX, N: 3},
      +
      +	ALUOpConstant{Op: ALUOpAdd, Val: 42},
      +	ALUOpConstant{Op: ALUOpSub, Val: 42},
      +	ALUOpConstant{Op: ALUOpMul, Val: 42},
      +	ALUOpConstant{Op: ALUOpDiv, Val: 42},
      +	ALUOpConstant{Op: ALUOpOr, Val: 42},
      +	ALUOpConstant{Op: ALUOpAnd, Val: 42},
      +	ALUOpConstant{Op: ALUOpShiftLeft, Val: 42},
      +	ALUOpConstant{Op: ALUOpShiftRight, Val: 42},
      +	ALUOpConstant{Op: ALUOpMod, Val: 42},
      +	ALUOpConstant{Op: ALUOpXor, Val: 42},
      +
      +	ALUOpX{Op: ALUOpAdd},
      +	ALUOpX{Op: ALUOpSub},
      +	ALUOpX{Op: ALUOpMul},
      +	ALUOpX{Op: ALUOpDiv},
      +	ALUOpX{Op: ALUOpOr},
      +	ALUOpX{Op: ALUOpAnd},
      +	ALUOpX{Op: ALUOpShiftLeft},
      +	ALUOpX{Op: ALUOpShiftRight},
      +	ALUOpX{Op: ALUOpMod},
      +	ALUOpX{Op: ALUOpXor},
      +
      +	NegateA{},
      +
      +	Jump{Skip: 10},
      +	JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8, SkipFalse: 9},
      +	JumpIf{Cond: JumpNotEqual, Val: 42, SkipTrue: 8},
      +	JumpIf{Cond: JumpLessThan, Val: 42, SkipTrue: 7},
      +	JumpIf{Cond: JumpLessOrEqual, Val: 42, SkipTrue: 6},
      +	JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4, SkipFalse: 5},
      +	JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3, SkipFalse: 4},
      +	JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2, SkipFalse: 3},
      +
      +	TAX{},
      +	TXA{},
      +
      +	RetA{},
      +	RetConstant{Val: 42},
      +}
      +var allInstructionsExpected = "testdata/all_instructions.bpf"
      +
      +// Check that we produce the same output as the canonical bpf_asm
      +// linux kernel tool.
      +func TestInterop(t *testing.T) {
      +	out, err := Assemble(allInstructions)
      +	if err != nil {
      +		t.Fatalf("assembly of allInstructions program failed: %s", err)
      +	}
      +	t.Logf("Assembled program is %d instructions long", len(out))
      +
      +	bs, err := ioutil.ReadFile(allInstructionsExpected)
      +	if err != nil {
      +		t.Fatalf("reading %s: %s", allInstructionsExpected, err)
      +	}
      +	// First statement is the number of statements, last statement is
      +	// empty. We just ignore both and rely on slice length.
      +	stmts := strings.Split(string(bs), ",")
      +	if len(stmts)-2 != len(out) {
      +		t.Fatalf("test program lengths don't match: %s has %d, Go implementation has %d", allInstructionsExpected, len(stmts)-2, len(allInstructions))
      +	}
      +
      +	for i, stmt := range stmts[1 : len(stmts)-2] {
      +		nums := strings.Split(stmt, " ")
      +		if len(nums) != 4 {
      +			t.Fatalf("malformed instruction %d in %s: %s", i+1, allInstructionsExpected, stmt)
      +		}
      +
      +		actual := out[i]
      +
      +		op, err := strconv.ParseUint(nums[0], 10, 16)
      +		if err != nil {
      +			t.Fatalf("malformed opcode %s in instruction %d of %s", nums[0], i+1, allInstructionsExpected)
      +		}
      +		if actual.Op != uint16(op) {
      +			t.Errorf("opcode mismatch on instruction %d (%#v): got 0x%02x, want 0x%02x", i+1, allInstructions[i], actual.Op, op)
      +		}
      +
      +		jt, err := strconv.ParseUint(nums[1], 10, 8)
      +		if err != nil {
      +			t.Fatalf("malformed jt offset %s in instruction %d of %s", nums[1], i+1, allInstructionsExpected)
      +		}
      +		if actual.Jt != uint8(jt) {
      +			t.Errorf("jt mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jt, jt)
      +		}
      +
      +		jf, err := strconv.ParseUint(nums[2], 10, 8)
      +		if err != nil {
      +			t.Fatalf("malformed jf offset %s in instruction %d of %s", nums[2], i+1, allInstructionsExpected)
      +		}
      +		if actual.Jf != uint8(jf) {
      +			t.Errorf("jf mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jf, jf)
      +		}
      +
      +		k, err := strconv.ParseUint(nums[3], 10, 32)
      +		if err != nil {
      +			t.Fatalf("malformed constant %s in instruction %d of %s", nums[3], i+1, allInstructionsExpected)
      +		}
      +		if actual.K != uint32(k) {
      +			t.Errorf("constant mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.K, k)
      +		}
      +	}
      +}
      +
      +// Check that assembly and disassembly match each other.
      +//
      +// Because we offer "fake" jump conditions that don't appear in the
      +// machine code, disassembly won't be a 1:1 match with the original
      +// source, although the behavior will be identical. However,
      +// reassembling the disassembly should produce an identical program.
      +func TestAsmDisasm(t *testing.T) {
      +	prog1, err := Assemble(allInstructions)
      +	if err != nil {
      +		t.Fatalf("assembly of allInstructions program failed: %s", err)
      +	}
      +	t.Logf("Assembled program is %d instructions long", len(prog1))
      +
      +	src, allDecoded := Disassemble(prog1)
      +	if !allDecoded {
      +		t.Errorf("Disassemble(Assemble(allInstructions)) produced unrecognized instructions:")
      +		for i, inst := range src {
      +			if r, ok := inst.(RawInstruction); ok {
      +				t.Logf("  insn %d, %#v --> %#v", i+1, allInstructions[i], r)
      +			}
      +		}
      +	}
      +
      +	prog2, err := Assemble(src)
      +	if err != nil {
      +		t.Fatalf("assembly of Disassemble(Assemble(allInstructions)) failed: %s", err)
      +	}
      +
      +	if len(prog2) != len(prog1) {
      +		t.Fatalf("disassembly changed program size: %d insns before, %d insns after", len(prog1), len(prog2))
      +	}
      +	if !reflect.DeepEqual(prog1, prog2) {
      +		t.Errorf("program mutated by disassembly:")
      +		for i := range prog2 {
      +			if !reflect.DeepEqual(prog1[i], prog2[i]) {
      +				t.Logf("  insn %d, s: %#v, p1: %#v, p2: %#v", i+1, allInstructions[i], prog1[i], prog2[i])
      +			}
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf b/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf
      new file mode 100644
      index 00000000..f8714406
      --- /dev/null
      +++ b/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf
      @@ -0,0 +1 @@
      +50,0 0 0 42,1 0 0 42,96 0 0 3,97 0 0 3,48 0 0 42,40 0 0 42,32 0 0 42,80 0 0 42,72 0 0 42,64 0 0 42,177 0 0 42,128 0 0 0,32 0 0 4294963200,32 0 0 4294963204,32 0 0 4294963256,2 0 0 3,3 0 0 3,4 0 0 42,20 0 0 42,36 0 0 42,52 0 0 42,68 0 0 42,84 0 0 42,100 0 0 42,116 0 0 42,148 0 0 42,164 0 0 42,12 0 0 0,28 0 0 0,44 0 0 0,60 0 0 0,76 0 0 0,92 0 0 0,108 0 0 0,124 0 0 0,156 0 0 0,172 0 0 0,132 0 0 0,5 0 0 10,21 8 9 42,21 0 8 42,53 0 7 42,37 0 6 42,37 4 5 42,53 3 4 42,69 2 3 42,7 0 0 0,135 0 0 0,22 0 0 0,6 0 0 0,
      diff --git a/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt b/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt
      new file mode 100644
      index 00000000..30455015
      --- /dev/null
      +++ b/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt
      @@ -0,0 +1,79 @@
      +# This filter is compiled to all_instructions.bpf by the `bpf_asm`
      +# tool, which can be found in the linux kernel source tree under
      +# tools/net.
      +
      +# Load immediate
      +ld #42
      +ldx #42
      +
      +# Load scratch
      +ld M[3]
      +ldx M[3]
      +
      +# Load absolute
      +ldb [42]
      +ldh [42]
      +ld [42]
      +
      +# Load indirect
      +ldb [x + 42]
      +ldh [x + 42]
      +ld [x + 42]
      +
      +# Load IPv4 header length
      +ldx 4*([42]&0xf)
      +
      +# Run extension function
      +ld #len
      +ld #proto
      +ld #type
      +ld #rand
      +
      +# Store scratch
      +st M[3]
      +stx M[3]
      +
      +# A <op> constant
      +add #42
      +sub #42
      +mul #42
      +div #42
      +or #42
      +and #42
      +lsh #42
      +rsh #42
      +mod #42
      +xor #42
      +
      +# A <op> X
      +add x
      +sub x
      +mul x
      +div x
      +or x
      +and x
      +lsh x
      +rsh x
      +mod x
      +xor x
      +
      +# !A
      +neg
      +
      +# Jumps
      +ja end
      +jeq #42,prev,end
      +jne #42,end
      +jlt #42,end
      +jle #42,end
      +jgt #42,prev,end
      +jge #42,prev,end
      +jset #42,prev,end
      +
      +# Register transfers
      +tax
      +txa
      +
      +# Returns
      +prev: ret a
      +end: ret #42
      diff --git a/vendor/golang.org/x/net/codereview.cfg b/vendor/golang.org/x/net/codereview.cfg
      new file mode 100644
      index 00000000..3f8b14b6
      --- /dev/null
      +++ b/vendor/golang.org/x/net/codereview.cfg
      @@ -0,0 +1 @@
      +issuerepo: golang/go
      diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
      index 56efb95b..7350678d 100644
      --- a/vendor/golang.org/x/net/context/context.go
      +++ b/vendor/golang.org/x/net/context/context.go
      @@ -34,7 +34,7 @@
       //
       // See http://blog.golang.org/context for example code for a server that uses
       // Contexts.
      -package context
      +package context // import "golang.org/x/net/context"
       
       import "time"
       
      diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
      index e35860a7..a7ed8d81 100644
      --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
      +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
      @@ -3,7 +3,7 @@
       // license that can be found in the LICENSE file.
       
       // Package ctxhttp provides helper functions for performing context-aware HTTP requests.
      -package ctxhttp
      +package ctxhttp // import "golang.org/x/net/context/ctxhttp"
       
       import (
       	"io"
      diff --git a/vendor/golang.org/x/net/dict/dict.go b/vendor/golang.org/x/net/dict/dict.go
      new file mode 100644
      index 00000000..58fef89e
      --- /dev/null
      +++ b/vendor/golang.org/x/net/dict/dict.go
      @@ -0,0 +1,210 @@
      +// Copyright 2010 The Go Authors.  All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package dict implements the Dictionary Server Protocol
      +// as defined in RFC 2229.
      +package dict // import "golang.org/x/net/dict"
      +
      +import (
      +	"net/textproto"
      +	"strconv"
      +	"strings"
      +)
      +
      +// A Client represents a client connection to a dictionary server.
      +type Client struct {
      +	text *textproto.Conn
      +}
      +
      +// Dial returns a new client connected to a dictionary server at
      +// addr on the given network.
      +func Dial(network, addr string) (*Client, error) {
      +	text, err := textproto.Dial(network, addr)
      +	if err != nil {
      +		return nil, err
      +	}
      +	_, _, err = text.ReadCodeLine(220)
      +	if err != nil {
      +		text.Close()
      +		return nil, err
      +	}
      +	return &Client{text: text}, nil
      +}
      +
      +// Close closes the connection to the dictionary server.
      +func (c *Client) Close() error {
      +	return c.text.Close()
      +}
      +
      +// A Dict represents a dictionary available on the server.
      +type Dict struct {
      +	Name string // short name of dictionary
      +	Desc string // long description
      +}
      +
      +// Dicts returns a list of the dictionaries available on the server.
      +func (c *Client) Dicts() ([]Dict, error) {
      +	id, err := c.text.Cmd("SHOW DB")
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	c.text.StartResponse(id)
      +	defer c.text.EndResponse(id)
      +
      +	_, _, err = c.text.ReadCodeLine(110)
      +	if err != nil {
      +		return nil, err
      +	}
      +	lines, err := c.text.ReadDotLines()
      +	if err != nil {
      +		return nil, err
      +	}
      +	_, _, err = c.text.ReadCodeLine(250)
      +
      +	dicts := make([]Dict, len(lines))
      +	for i := range dicts {
      +		d := &dicts[i]
      +		a, _ := fields(lines[i])
      +		if len(a) < 2 {
      +			return nil, textproto.ProtocolError("invalid dictionary: " + lines[i])
      +		}
      +		d.Name = a[0]
      +		d.Desc = a[1]
      +	}
      +	return dicts, err
      +}
      +
      +// A Defn represents a definition.
      +type Defn struct {
      +	Dict Dict   // Dict where definition was found
      +	Word string // Word being defined
      +	Text []byte // Definition text, typically multiple lines
      +}
      +
      +// Define requests the definition of the given word.
      +// The argument dict names the dictionary to use,
      +// the Name field of a Dict returned by Dicts.
      +//
      +// The special dictionary name "*" means to look in all the
      +// server's dictionaries.
      +// The special dictionary name "!" means to look in all the
      +// server's dictionaries in turn, stopping after finding the word
      +// in one of them.
      +func (c *Client) Define(dict, word string) ([]*Defn, error) {
      +	id, err := c.text.Cmd("DEFINE %s %q", dict, word)
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	c.text.StartResponse(id)
      +	defer c.text.EndResponse(id)
      +
      +	_, line, err := c.text.ReadCodeLine(150)
      +	if err != nil {
      +		return nil, err
      +	}
      +	a, _ := fields(line)
      +	if len(a) < 1 {
      +		return nil, textproto.ProtocolError("malformed response: " + line)
      +	}
      +	n, err := strconv.Atoi(a[0])
      +	if err != nil {
      +		return nil, textproto.ProtocolError("invalid definition count: " + a[0])
      +	}
      +	def := make([]*Defn, n)
      +	for i := 0; i < n; i++ {
      +		_, line, err = c.text.ReadCodeLine(151)
      +		if err != nil {
      +			return nil, err
      +		}
      +		a, _ := fields(line)
      +		if len(a) < 3 {
      +			// skip it, to keep protocol in sync
      +			i--
      +			n--
      +			def = def[0:n]
      +			continue
      +		}
      +		d := &Defn{Word: a[0], Dict: Dict{a[1], a[2]}}
      +		d.Text, err = c.text.ReadDotBytes()
      +		if err != nil {
      +			return nil, err
      +		}
      +		def[i] = d
      +	}
      +	_, _, err = c.text.ReadCodeLine(250)
      +	return def, err
      +}
      +
      +// Fields returns the fields in s.
      +// Fields are space separated unquoted words
      +// or quoted with single or double quote.
      +func fields(s string) ([]string, error) {
      +	var v []string
      +	i := 0
      +	for {
      +		for i < len(s) && (s[i] == ' ' || s[i] == '\t') {
      +			i++
      +		}
      +		if i >= len(s) {
      +			break
      +		}
      +		if s[i] == '"' || s[i] == '\'' {
      +			q := s[i]
      +			// quoted string
      +			var j int
      +			for j = i + 1; ; j++ {
      +				if j >= len(s) {
      +					return nil, textproto.ProtocolError("malformed quoted string")
      +				}
      +				if s[j] == '\\' {
      +					j++
      +					continue
      +				}
      +				if s[j] == q {
      +					j++
      +					break
      +				}
      +			}
      +			v = append(v, unquote(s[i+1:j-1]))
      +			i = j
      +		} else {
      +			// atom
      +			var j int
      +			for j = i; j < len(s); j++ {
      +				if s[j] == ' ' || s[j] == '\t' || s[j] == '\\' || s[j] == '"' || s[j] == '\'' {
      +					break
      +				}
      +			}
      +			v = append(v, s[i:j])
      +			i = j
      +		}
      +		if i < len(s) {
      +			c := s[i]
      +			if c != ' ' && c != '\t' {
      +				return nil, textproto.ProtocolError("quotes not on word boundaries")
      +			}
      +		}
      +	}
      +	return v, nil
      +}
      +
      +func unquote(s string) string {
      +	if strings.Index(s, "\\") < 0 {
      +		return s
      +	}
      +	b := []byte(s)
      +	w := 0
      +	for r := 0; r < len(b); r++ {
      +		c := b[r]
      +		if c == '\\' {
      +			r++
      +			c = b[r]
      +		}
      +		b[w] = c
      +		w++
      +	}
      +	return string(b[0:w])
      +}
      diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go
      new file mode 100644
      index 00000000..cd0a8ac1
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/atom/atom.go
      @@ -0,0 +1,78 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package atom provides integer codes (also known as atoms) for a fixed set of
      +// frequently occurring HTML strings: tag names and attribute keys such as "p"
      +// and "id".
      +//
      +// Sharing an atom's name between all elements with the same tag can result in
      +// fewer string allocations when tokenizing and parsing HTML. Integer
      +// comparisons are also generally faster than string comparisons.
      +//
      +// The value of an atom's particular code is not guaranteed to stay the same
      +// between versions of this package. Neither is any ordering guaranteed:
      +// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to
      +// be dense. The only guarantees are that e.g. looking up "div" will yield
      +// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0.
      +package atom // import "golang.org/x/net/html/atom"
      +
      +// Atom is an integer code for a string. The zero value maps to "".
      +type Atom uint32
      +
      +// String returns the atom's name.
      +func (a Atom) String() string {
      +	start := uint32(a >> 8)
      +	n := uint32(a & 0xff)
      +	if start+n > uint32(len(atomText)) {
      +		return ""
      +	}
      +	return atomText[start : start+n]
      +}
      +
      +func (a Atom) string() string {
      +	return atomText[a>>8 : a>>8+a&0xff]
      +}
      +
      +// fnv computes the FNV hash with an arbitrary starting value h.
      +func fnv(h uint32, s []byte) uint32 {
      +	for i := range s {
      +		h ^= uint32(s[i])
      +		h *= 16777619
      +	}
      +	return h
      +}
      +
      +func match(s string, t []byte) bool {
      +	for i, c := range t {
      +		if s[i] != c {
      +			return false
      +		}
      +	}
      +	return true
      +}
      +
      +// Lookup returns the atom whose name is s. It returns zero if there is no
      +// such atom. The lookup is case sensitive.
      +func Lookup(s []byte) Atom {
      +	if len(s) == 0 || len(s) > maxAtomLen {
      +		return 0
      +	}
      +	h := fnv(hash0, s)
      +	if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
      +		return a
      +	}
      +	if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
      +		return a
      +	}
      +	return 0
      +}
      +
      +// String returns a string whose contents are equal to s. In that sense, it is
      +// equivalent to string(s) but may be more efficient.
      +func String(s []byte) string {
      +	if a := Lookup(s); a != 0 {
      +		return a.String()
      +	}
      +	return string(s)
      +}
      diff --git a/vendor/golang.org/x/net/html/atom/atom_test.go b/vendor/golang.org/x/net/html/atom/atom_test.go
      new file mode 100644
      index 00000000..6e33704d
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/atom/atom_test.go
      @@ -0,0 +1,109 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package atom
      +
      +import (
      +	"sort"
      +	"testing"
      +)
      +
      +func TestKnown(t *testing.T) {
      +	for _, s := range testAtomList {
      +		if atom := Lookup([]byte(s)); atom.String() != s {
      +			t.Errorf("Lookup(%q) = %#x (%q)", s, uint32(atom), atom.String())
      +		}
      +	}
      +}
      +
      +func TestHits(t *testing.T) {
      +	for _, a := range table {
      +		if a == 0 {
      +			continue
      +		}
      +		got := Lookup([]byte(a.String()))
      +		if got != a {
      +			t.Errorf("Lookup(%q) = %#x, want %#x", a.String(), uint32(got), uint32(a))
      +		}
      +	}
      +}
      +
      +func TestMisses(t *testing.T) {
      +	testCases := []string{
      +		"",
      +		"\x00",
      +		"\xff",
      +		"A",
      +		"DIV",
      +		"Div",
      +		"dIV",
      +		"aa",
      +		"a\x00",
      +		"ab",
      +		"abb",
      +		"abbr0",
      +		"abbr ",
      +		" abbr",
      +		" a",
      +		"acceptcharset",
      +		"acceptCharset",
      +		"accept_charset",
      +		"h0",
      +		"h1h2",
      +		"h7",
      +		"onClick",
      +		"λ",
      +		// The following string has the same hash (0xa1d7fab7) as "onmouseover".
      +		"\x00\x00\x00\x00\x00\x50\x18\xae\x38\xd0\xb7",
      +	}
      +	for _, tc := range testCases {
      +		got := Lookup([]byte(tc))
      +		if got != 0 {
      +			t.Errorf("Lookup(%q): got %d, want 0", tc, got)
      +		}
      +	}
      +}
      +
      +func TestForeignObject(t *testing.T) {
      +	const (
      +		afo = Foreignobject
      +		afO = ForeignObject
      +		sfo = "foreignobject"
      +		sfO = "foreignObject"
      +	)
      +	if got := Lookup([]byte(sfo)); got != afo {
      +		t.Errorf("Lookup(%q): got %#v, want %#v", sfo, got, afo)
      +	}
      +	if got := Lookup([]byte(sfO)); got != afO {
      +		t.Errorf("Lookup(%q): got %#v, want %#v", sfO, got, afO)
      +	}
      +	if got := afo.String(); got != sfo {
      +		t.Errorf("Atom(%#v).String(): got %q, want %q", afo, got, sfo)
      +	}
      +	if got := afO.String(); got != sfO {
      +		t.Errorf("Atom(%#v).String(): got %q, want %q", afO, got, sfO)
      +	}
      +}
      +
      +func BenchmarkLookup(b *testing.B) {
      +	sortedTable := make([]string, 0, len(table))
      +	for _, a := range table {
      +		if a != 0 {
      +			sortedTable = append(sortedTable, a.String())
      +		}
      +	}
      +	sort.Strings(sortedTable)
      +
      +	x := make([][]byte, 1000)
      +	for i := range x {
      +		x[i] = []byte(sortedTable[i%len(sortedTable)])
      +	}
      +
      +	b.ResetTimer()
      +	for i := 0; i < b.N; i++ {
      +		for _, s := range x {
      +			Lookup(s)
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go
      new file mode 100644
      index 00000000..6bfa8660
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/atom/gen.go
      @@ -0,0 +1,648 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// +build ignore
      +
      +package main
      +
      +// This program generates table.go and table_test.go.
      +// Invoke as
      +//
      +//	go run gen.go |gofmt >table.go
      +//	go run gen.go -test |gofmt >table_test.go
      +
      +import (
      +	"flag"
      +	"fmt"
      +	"math/rand"
      +	"os"
      +	"sort"
      +	"strings"
      +)
      +
      +// identifier converts s to a Go exported identifier.
      +// It converts "div" to "Div" and "accept-charset" to "AcceptCharset".
      +func identifier(s string) string {
      +	b := make([]byte, 0, len(s))
      +	cap := true
      +	for _, c := range s {
      +		if c == '-' {
      +			cap = true
      +			continue
      +		}
      +		if cap && 'a' <= c && c <= 'z' {
      +			c -= 'a' - 'A'
      +		}
      +		cap = false
      +		b = append(b, byte(c))
      +	}
      +	return string(b)
      +}
      +
      +var test = flag.Bool("test", false, "generate table_test.go")
      +
      +func main() {
      +	flag.Parse()
      +
      +	var all []string
      +	all = append(all, elements...)
      +	all = append(all, attributes...)
      +	all = append(all, eventHandlers...)
      +	all = append(all, extra...)
      +	sort.Strings(all)
      +
      +	if *test {
      +		fmt.Printf("// generated by go run gen.go -test; DO NOT EDIT\n\n")
      +		fmt.Printf("package atom\n\n")
      +		fmt.Printf("var testAtomList = []string{\n")
      +		for _, s := range all {
      +			fmt.Printf("\t%q,\n", s)
      +		}
      +		fmt.Printf("}\n")
      +		return
      +	}
      +
      +	// uniq - lists have dups
      +	// compute max len too
      +	maxLen := 0
      +	w := 0
      +	for _, s := range all {
      +		if w == 0 || all[w-1] != s {
      +			if maxLen < len(s) {
      +				maxLen = len(s)
      +			}
      +			all[w] = s
      +			w++
      +		}
      +	}
      +	all = all[:w]
      +
      +	// Find hash that minimizes table size.
      +	var best *table
      +	for i := 0; i < 1000000; i++ {
      +		if best != nil && 1<<(best.k-1) < len(all) {
      +			break
      +		}
      +		h := rand.Uint32()
      +		for k := uint(0); k <= 16; k++ {
      +			if best != nil && k >= best.k {
      +				break
      +			}
      +			var t table
      +			if t.init(h, k, all) {
      +				best = &t
      +				break
      +			}
      +		}
      +	}
      +	if best == nil {
      +		fmt.Fprintf(os.Stderr, "failed to construct string table\n")
      +		os.Exit(1)
      +	}
      +
      +	// Lay out strings, using overlaps when possible.
      +	layout := append([]string{}, all...)
      +
      +	// Remove strings that are substrings of other strings
      +	for changed := true; changed; {
      +		changed = false
      +		for i, s := range layout {
      +			if s == "" {
      +				continue
      +			}
      +			for j, t := range layout {
      +				if i != j && t != "" && strings.Contains(s, t) {
      +					changed = true
      +					layout[j] = ""
      +				}
      +			}
      +		}
      +	}
      +
      +	// Join strings where one suffix matches another prefix.
      +	for {
      +		// Find best i, j, k such that layout[i][len-k:] == layout[j][:k],
      +		// maximizing overlap length k.
      +		besti := -1
      +		bestj := -1
      +		bestk := 0
      +		for i, s := range layout {
      +			if s == "" {
      +				continue
      +			}
      +			for j, t := range layout {
      +				if i == j {
      +					continue
      +				}
      +				for k := bestk + 1; k <= len(s) && k <= len(t); k++ {
      +					if s[len(s)-k:] == t[:k] {
      +						besti = i
      +						bestj = j
      +						bestk = k
      +					}
      +				}
      +			}
      +		}
      +		if bestk > 0 {
      +			layout[besti] += layout[bestj][bestk:]
      +			layout[bestj] = ""
      +			continue
      +		}
      +		break
      +	}
      +
      +	text := strings.Join(layout, "")
      +
      +	atom := map[string]uint32{}
      +	for _, s := range all {
      +		off := strings.Index(text, s)
      +		if off < 0 {
      +			panic("lost string " + s)
      +		}
      +		atom[s] = uint32(off<<8 | len(s))
      +	}
      +
      +	// Generate the Go code.
      +	fmt.Printf("// generated by go run gen.go; DO NOT EDIT\n\n")
      +	fmt.Printf("package atom\n\nconst (\n")
      +	for _, s := range all {
      +		fmt.Printf("\t%s Atom = %#x\n", identifier(s), atom[s])
      +	}
      +	fmt.Printf(")\n\n")
      +
      +	fmt.Printf("const hash0 = %#x\n\n", best.h0)
      +	fmt.Printf("const maxAtomLen = %d\n\n", maxLen)
      +
      +	fmt.Printf("var table = [1<<%d]Atom{\n", best.k)
      +	for i, s := range best.tab {
      +		if s == "" {
      +			continue
      +		}
      +		fmt.Printf("\t%#x: %#x, // %s\n", i, atom[s], s)
      +	}
      +	fmt.Printf("}\n")
      +	datasize := (1 << best.k) * 4
      +
      +	fmt.Printf("const atomText =\n")
      +	textsize := len(text)
      +	for len(text) > 60 {
      +		fmt.Printf("\t%q +\n", text[:60])
      +		text = text[60:]
      +	}
      +	fmt.Printf("\t%q\n\n", text)
      +
      +	fmt.Fprintf(os.Stderr, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize)
      +}
      +
      +type byLen []string
      +
      +func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) }
      +func (x byLen) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
      +func (x byLen) Len() int           { return len(x) }
      +
      +// fnv computes the FNV hash with an arbitrary starting value h.
      +func fnv(h uint32, s string) uint32 {
      +	for i := 0; i < len(s); i++ {
      +		h ^= uint32(s[i])
      +		h *= 16777619
      +	}
      +	return h
      +}
      +
      +// A table represents an attempt at constructing the lookup table.
      +// The lookup table uses cuckoo hashing, meaning that each string
      +// can be found in one of two positions.
      +type table struct {
      +	h0   uint32
      +	k    uint
      +	mask uint32
      +	tab  []string
      +}
      +
      +// hash returns the two hashes for s.
      +func (t *table) hash(s string) (h1, h2 uint32) {
      +	h := fnv(t.h0, s)
      +	h1 = h & t.mask
      +	h2 = (h >> 16) & t.mask
      +	return
      +}
      +
      +// init initializes the table with the given parameters.
      +// h0 is the initial hash value,
      +// k is the number of bits of hash value to use, and
      +// x is the list of strings to store in the table.
      +// init returns false if the table cannot be constructed.
      +func (t *table) init(h0 uint32, k uint, x []string) bool {
      +	t.h0 = h0
      +	t.k = k
      +	t.tab = make([]string, 1<<k)
      +	t.mask = 1<<k - 1
      +	for _, s := range x {
      +		if !t.insert(s) {
      +			return false
      +		}
      +	}
      +	return true
      +}
      +
      +// insert inserts s in the table.
      +func (t *table) insert(s string) bool {
      +	h1, h2 := t.hash(s)
      +	if t.tab[h1] == "" {
      +		t.tab[h1] = s
      +		return true
      +	}
      +	if t.tab[h2] == "" {
      +		t.tab[h2] = s
      +		return true
      +	}
      +	if t.push(h1, 0) {
      +		t.tab[h1] = s
      +		return true
      +	}
      +	if t.push(h2, 0) {
      +		t.tab[h2] = s
      +		return true
      +	}
      +	return false
      +}
      +
      +// push attempts to push aside the entry in slot i.
      +func (t *table) push(i uint32, depth int) bool {
      +	if depth > len(t.tab) {
      +		return false
      +	}
      +	s := t.tab[i]
      +	h1, h2 := t.hash(s)
      +	j := h1 + h2 - i
      +	if t.tab[j] != "" && !t.push(j, depth+1) {
      +		return false
      +	}
      +	t.tab[j] = s
      +	return true
      +}
      +
      +// The lists of element names and attribute keys were taken from
      +// https://html.spec.whatwg.org/multipage/indices.html#index
      +// as of the "HTML Living Standard - Last Updated 21 February 2015" version.
      +
      +var elements = []string{
      +	"a",
      +	"abbr",
      +	"address",
      +	"area",
      +	"article",
      +	"aside",
      +	"audio",
      +	"b",
      +	"base",
      +	"bdi",
      +	"bdo",
      +	"blockquote",
      +	"body",
      +	"br",
      +	"button",
      +	"canvas",
      +	"caption",
      +	"cite",
      +	"code",
      +	"col",
      +	"colgroup",
      +	"command",
      +	"data",
      +	"datalist",
      +	"dd",
      +	"del",
      +	"details",
      +	"dfn",
      +	"dialog",
      +	"div",
      +	"dl",
      +	"dt",
      +	"em",
      +	"embed",
      +	"fieldset",
      +	"figcaption",
      +	"figure",
      +	"footer",
      +	"form",
      +	"h1",
      +	"h2",
      +	"h3",
      +	"h4",
      +	"h5",
      +	"h6",
      +	"head",
      +	"header",
      +	"hgroup",
      +	"hr",
      +	"html",
      +	"i",
      +	"iframe",
      +	"img",
      +	"input",
      +	"ins",
      +	"kbd",
      +	"keygen",
      +	"label",
      +	"legend",
      +	"li",
      +	"link",
      +	"map",
      +	"mark",
      +	"menu",
      +	"menuitem",
      +	"meta",
      +	"meter",
      +	"nav",
      +	"noscript",
      +	"object",
      +	"ol",
      +	"optgroup",
      +	"option",
      +	"output",
      +	"p",
      +	"param",
      +	"pre",
      +	"progress",
      +	"q",
      +	"rp",
      +	"rt",
      +	"ruby",
      +	"s",
      +	"samp",
      +	"script",
      +	"section",
      +	"select",
      +	"small",
      +	"source",
      +	"span",
      +	"strong",
      +	"style",
      +	"sub",
      +	"summary",
      +	"sup",
      +	"table",
      +	"tbody",
      +	"td",
      +	"template",
      +	"textarea",
      +	"tfoot",
      +	"th",
      +	"thead",
      +	"time",
      +	"title",
      +	"tr",
      +	"track",
      +	"u",
      +	"ul",
      +	"var",
      +	"video",
      +	"wbr",
      +}
      +
      +// https://html.spec.whatwg.org/multipage/indices.html#attributes-3
      +
      +var attributes = []string{
      +	"abbr",
      +	"accept",
      +	"accept-charset",
      +	"accesskey",
      +	"action",
      +	"alt",
      +	"async",
      +	"autocomplete",
      +	"autofocus",
      +	"autoplay",
      +	"challenge",
      +	"charset",
      +	"checked",
      +	"cite",
      +	"class",
      +	"cols",
      +	"colspan",
      +	"command",
      +	"content",
      +	"contenteditable",
      +	"contextmenu",
      +	"controls",
      +	"coords",
      +	"crossorigin",
      +	"data",
      +	"datetime",
      +	"default",
      +	"defer",
      +	"dir",
      +	"dirname",
      +	"disabled",
      +	"download",
      +	"draggable",
      +	"dropzone",
      +	"enctype",
      +	"for",
      +	"form",
      +	"formaction",
      +	"formenctype",
      +	"formmethod",
      +	"formnovalidate",
      +	"formtarget",
      +	"headers",
      +	"height",
      +	"hidden",
      +	"high",
      +	"href",
      +	"hreflang",
      +	"http-equiv",
      +	"icon",
      +	"id",
      +	"inputmode",
      +	"ismap",
      +	"itemid",
      +	"itemprop",
      +	"itemref",
      +	"itemscope",
      +	"itemtype",
      +	"keytype",
      +	"kind",
      +	"label",
      +	"lang",
      +	"list",
      +	"loop",
      +	"low",
      +	"manifest",
      +	"max",
      +	"maxlength",
      +	"media",
      +	"mediagroup",
      +	"method",
      +	"min",
      +	"minlength",
      +	"multiple",
      +	"muted",
      +	"name",
      +	"novalidate",
      +	"open",
      +	"optimum",
      +	"pattern",
      +	"ping",
      +	"placeholder",
      +	"poster",
      +	"preload",
      +	"radiogroup",
      +	"readonly",
      +	"rel",
      +	"required",
      +	"reversed",
      +	"rows",
      +	"rowspan",
      +	"sandbox",
      +	"spellcheck",
      +	"scope",
      +	"scoped",
      +	"seamless",
      +	"selected",
      +	"shape",
      +	"size",
      +	"sizes",
      +	"sortable",
      +	"sorted",
      +	"span",
      +	"src",
      +	"srcdoc",
      +	"srclang",
      +	"start",
      +	"step",
      +	"style",
      +	"tabindex",
      +	"target",
      +	"title",
      +	"translate",
      +	"type",
      +	"typemustmatch",
      +	"usemap",
      +	"value",
      +	"width",
      +	"wrap",
      +}
      +
      +var eventHandlers = []string{
      +	"onabort",
      +	"onautocomplete",
      +	"onautocompleteerror",
      +	"onafterprint",
      +	"onbeforeprint",
      +	"onbeforeunload",
      +	"onblur",
      +	"oncancel",
      +	"oncanplay",
      +	"oncanplaythrough",
      +	"onchange",
      +	"onclick",
      +	"onclose",
      +	"oncontextmenu",
      +	"oncuechange",
      +	"ondblclick",
      +	"ondrag",
      +	"ondragend",
      +	"ondragenter",
      +	"ondragleave",
      +	"ondragover",
      +	"ondragstart",
      +	"ondrop",
      +	"ondurationchange",
      +	"onemptied",
      +	"onended",
      +	"onerror",
      +	"onfocus",
      +	"onhashchange",
      +	"oninput",
      +	"oninvalid",
      +	"onkeydown",
      +	"onkeypress",
      +	"onkeyup",
      +	"onlanguagechange",
      +	"onload",
      +	"onloadeddata",
      +	"onloadedmetadata",
      +	"onloadstart",
      +	"onmessage",
      +	"onmousedown",
      +	"onmousemove",
      +	"onmouseout",
      +	"onmouseover",
      +	"onmouseup",
      +	"onmousewheel",
      +	"onoffline",
      +	"ononline",
      +	"onpagehide",
      +	"onpageshow",
      +	"onpause",
      +	"onplay",
      +	"onplaying",
      +	"onpopstate",
      +	"onprogress",
      +	"onratechange",
      +	"onreset",
      +	"onresize",
      +	"onscroll",
      +	"onseeked",
      +	"onseeking",
      +	"onselect",
      +	"onshow",
      +	"onsort",
      +	"onstalled",
      +	"onstorage",
      +	"onsubmit",
      +	"onsuspend",
      +	"ontimeupdate",
      +	"ontoggle",
      +	"onunload",
      +	"onvolumechange",
      +	"onwaiting",
      +}
      +
      +// extra are ad-hoc values not covered by any of the lists above.
      +var extra = []string{
      +	"align",
      +	"annotation",
      +	"annotation-xml",
      +	"applet",
      +	"basefont",
      +	"bgsound",
      +	"big",
      +	"blink",
      +	"center",
      +	"color",
      +	"desc",
      +	"face",
      +	"font",
      +	"foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive.
      +	"foreignobject",
      +	"frame",
      +	"frameset",
      +	"image",
      +	"isindex",
      +	"listing",
      +	"malignmark",
      +	"marquee",
      +	"math",
      +	"mglyph",
      +	"mi",
      +	"mn",
      +	"mo",
      +	"ms",
      +	"mtext",
      +	"nobr",
      +	"noembed",
      +	"noframes",
      +	"plaintext",
      +	"prompt",
      +	"public",
      +	"spacer",
      +	"strike",
      +	"svg",
      +	"system",
      +	"tt",
      +	"xmp",
      +}
      diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go
      new file mode 100644
      index 00000000..2605ba31
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/atom/table.go
      @@ -0,0 +1,713 @@
      +// generated by go run gen.go; DO NOT EDIT
      +
      +package atom
      +
      +const (
      +	A                   Atom = 0x1
      +	Abbr                Atom = 0x4
      +	Accept              Atom = 0x2106
      +	AcceptCharset       Atom = 0x210e
      +	Accesskey           Atom = 0x3309
      +	Action              Atom = 0x1f606
      +	Address             Atom = 0x4f307
      +	Align               Atom = 0x1105
      +	Alt                 Atom = 0x4503
      +	Annotation          Atom = 0x1670a
      +	AnnotationXml       Atom = 0x1670e
      +	Applet              Atom = 0x2b306
      +	Area                Atom = 0x2fa04
      +	Article             Atom = 0x38807
      +	Aside               Atom = 0x8305
      +	Async               Atom = 0x7b05
      +	Audio               Atom = 0xa605
      +	Autocomplete        Atom = 0x1fc0c
      +	Autofocus           Atom = 0xb309
      +	Autoplay            Atom = 0xce08
      +	B                   Atom = 0x101
      +	Base                Atom = 0xd604
      +	Basefont            Atom = 0xd608
      +	Bdi                 Atom = 0x1a03
      +	Bdo                 Atom = 0xe703
      +	Bgsound             Atom = 0x11807
      +	Big                 Atom = 0x12403
      +	Blink               Atom = 0x12705
      +	Blockquote          Atom = 0x12c0a
      +	Body                Atom = 0x2f04
      +	Br                  Atom = 0x202
      +	Button              Atom = 0x13606
      +	Canvas              Atom = 0x7f06
      +	Caption             Atom = 0x1bb07
      +	Center              Atom = 0x5b506
      +	Challenge           Atom = 0x21f09
      +	Charset             Atom = 0x2807
      +	Checked             Atom = 0x32807
      +	Cite                Atom = 0x3c804
      +	Class               Atom = 0x4de05
      +	Code                Atom = 0x14904
      +	Col                 Atom = 0x15003
      +	Colgroup            Atom = 0x15008
      +	Color               Atom = 0x15d05
      +	Cols                Atom = 0x16204
      +	Colspan             Atom = 0x16207
      +	Command             Atom = 0x17507
      +	Content             Atom = 0x42307
      +	Contenteditable     Atom = 0x4230f
      +	Contextmenu         Atom = 0x3310b
      +	Controls            Atom = 0x18808
      +	Coords              Atom = 0x19406
      +	Crossorigin         Atom = 0x19f0b
      +	Data                Atom = 0x44a04
      +	Datalist            Atom = 0x44a08
      +	Datetime            Atom = 0x23c08
      +	Dd                  Atom = 0x26702
      +	Default             Atom = 0x8607
      +	Defer               Atom = 0x14b05
      +	Del                 Atom = 0x3ef03
      +	Desc                Atom = 0x4db04
      +	Details             Atom = 0x4807
      +	Dfn                 Atom = 0x6103
      +	Dialog              Atom = 0x1b06
      +	Dir                 Atom = 0x6903
      +	Dirname             Atom = 0x6907
      +	Disabled            Atom = 0x10c08
      +	Div                 Atom = 0x11303
      +	Dl                  Atom = 0x11e02
      +	Download            Atom = 0x40008
      +	Draggable           Atom = 0x17b09
      +	Dropzone            Atom = 0x39108
      +	Dt                  Atom = 0x50902
      +	Em                  Atom = 0x6502
      +	Embed               Atom = 0x6505
      +	Enctype             Atom = 0x21107
      +	Face                Atom = 0x5b304
      +	Fieldset            Atom = 0x1b008
      +	Figcaption          Atom = 0x1b80a
      +	Figure              Atom = 0x1cc06
      +	Font                Atom = 0xda04
      +	Footer              Atom = 0x8d06
      +	For                 Atom = 0x1d803
      +	ForeignObject       Atom = 0x1d80d
      +	Foreignobject       Atom = 0x1e50d
      +	Form                Atom = 0x1f204
      +	Formaction          Atom = 0x1f20a
      +	Formenctype         Atom = 0x20d0b
      +	Formmethod          Atom = 0x2280a
      +	Formnovalidate      Atom = 0x2320e
      +	Formtarget          Atom = 0x2470a
      +	Frame               Atom = 0x9a05
      +	Frameset            Atom = 0x9a08
      +	H1                  Atom = 0x26e02
      +	H2                  Atom = 0x29402
      +	H3                  Atom = 0x2a702
      +	H4                  Atom = 0x2e902
      +	H5                  Atom = 0x2f302
      +	H6                  Atom = 0x50b02
      +	Head                Atom = 0x2d504
      +	Header              Atom = 0x2d506
      +	Headers             Atom = 0x2d507
      +	Height              Atom = 0x25106
      +	Hgroup              Atom = 0x25906
      +	Hidden              Atom = 0x26506
      +	High                Atom = 0x26b04
      +	Hr                  Atom = 0x27002
      +	Href                Atom = 0x27004
      +	Hreflang            Atom = 0x27008
      +	Html                Atom = 0x25504
      +	HttpEquiv           Atom = 0x2780a
      +	I                   Atom = 0x601
      +	Icon                Atom = 0x42204
      +	Id                  Atom = 0x8502
      +	Iframe              Atom = 0x29606
      +	Image               Atom = 0x29c05
      +	Img                 Atom = 0x2a103
      +	Input               Atom = 0x3e805
      +	Inputmode           Atom = 0x3e809
      +	Ins                 Atom = 0x1a803
      +	Isindex             Atom = 0x2a907
      +	Ismap               Atom = 0x2b005
      +	Itemid              Atom = 0x33c06
      +	Itemprop            Atom = 0x3c908
      +	Itemref             Atom = 0x5ad07
      +	Itemscope           Atom = 0x2b909
      +	Itemtype            Atom = 0x2c308
      +	Kbd                 Atom = 0x1903
      +	Keygen              Atom = 0x3906
      +	Keytype             Atom = 0x53707
      +	Kind                Atom = 0x10904
      +	Label               Atom = 0xf005
      +	Lang                Atom = 0x27404
      +	Legend              Atom = 0x18206
      +	Li                  Atom = 0x1202
      +	Link                Atom = 0x12804
      +	List                Atom = 0x44e04
      +	Listing             Atom = 0x44e07
      +	Loop                Atom = 0xf404
      +	Low                 Atom = 0x11f03
      +	Malignmark          Atom = 0x100a
      +	Manifest            Atom = 0x5f108
      +	Map                 Atom = 0x2b203
      +	Mark                Atom = 0x1604
      +	Marquee             Atom = 0x2cb07
      +	Math                Atom = 0x2d204
      +	Max                 Atom = 0x2e103
      +	Maxlength           Atom = 0x2e109
      +	Media               Atom = 0x6e05
      +	Mediagroup          Atom = 0x6e0a
      +	Menu                Atom = 0x33804
      +	Menuitem            Atom = 0x33808
      +	Meta                Atom = 0x45d04
      +	Meter               Atom = 0x24205
      +	Method              Atom = 0x22c06
      +	Mglyph              Atom = 0x2a206
      +	Mi                  Atom = 0x2eb02
      +	Min                 Atom = 0x2eb03
      +	Minlength           Atom = 0x2eb09
      +	Mn                  Atom = 0x23502
      +	Mo                  Atom = 0x3ed02
      +	Ms                  Atom = 0x2bc02
      +	Mtext               Atom = 0x2f505
      +	Multiple            Atom = 0x30308
      +	Muted               Atom = 0x30b05
      +	Name                Atom = 0x6c04
      +	Nav                 Atom = 0x3e03
      +	Nobr                Atom = 0x5704
      +	Noembed             Atom = 0x6307
      +	Noframes            Atom = 0x9808
      +	Noscript            Atom = 0x3d208
      +	Novalidate          Atom = 0x2360a
      +	Object              Atom = 0x1ec06
      +	Ol                  Atom = 0xc902
      +	Onabort             Atom = 0x13a07
      +	Onafterprint        Atom = 0x1c00c
      +	Onautocomplete      Atom = 0x1fa0e
      +	Onautocompleteerror Atom = 0x1fa13
      +	Onbeforeprint       Atom = 0x6040d
      +	Onbeforeunload      Atom = 0x4e70e
      +	Onblur              Atom = 0xaa06
      +	Oncancel            Atom = 0xe908
      +	Oncanplay           Atom = 0x28509
      +	Oncanplaythrough    Atom = 0x28510
      +	Onchange            Atom = 0x3a708
      +	Onclick             Atom = 0x31007
      +	Onclose             Atom = 0x31707
      +	Oncontextmenu       Atom = 0x32f0d
      +	Oncuechange         Atom = 0x3420b
      +	Ondblclick          Atom = 0x34d0a
      +	Ondrag              Atom = 0x35706
      +	Ondragend           Atom = 0x35709
      +	Ondragenter         Atom = 0x3600b
      +	Ondragleave         Atom = 0x36b0b
      +	Ondragover          Atom = 0x3760a
      +	Ondragstart         Atom = 0x3800b
      +	Ondrop              Atom = 0x38f06
      +	Ondurationchange    Atom = 0x39f10
      +	Onemptied           Atom = 0x39609
      +	Onended             Atom = 0x3af07
      +	Onerror             Atom = 0x3b607
      +	Onfocus             Atom = 0x3bd07
      +	Onhashchange        Atom = 0x3da0c
      +	Oninput             Atom = 0x3e607
      +	Oninvalid           Atom = 0x3f209
      +	Onkeydown           Atom = 0x3fb09
      +	Onkeypress          Atom = 0x4080a
      +	Onkeyup             Atom = 0x41807
      +	Onlanguagechange    Atom = 0x43210
      +	Onload              Atom = 0x44206
      +	Onloadeddata        Atom = 0x4420c
      +	Onloadedmetadata    Atom = 0x45510
      +	Onloadstart         Atom = 0x46b0b
      +	Onmessage           Atom = 0x47609
      +	Onmousedown         Atom = 0x47f0b
      +	Onmousemove         Atom = 0x48a0b
      +	Onmouseout          Atom = 0x4950a
      +	Onmouseover         Atom = 0x4a20b
      +	Onmouseup           Atom = 0x4ad09
      +	Onmousewheel        Atom = 0x4b60c
      +	Onoffline           Atom = 0x4c209
      +	Ononline            Atom = 0x4cb08
      +	Onpagehide          Atom = 0x4d30a
      +	Onpageshow          Atom = 0x4fe0a
      +	Onpause             Atom = 0x50d07
      +	Onplay              Atom = 0x51706
      +	Onplaying           Atom = 0x51709
      +	Onpopstate          Atom = 0x5200a
      +	Onprogress          Atom = 0x52a0a
      +	Onratechange        Atom = 0x53e0c
      +	Onreset             Atom = 0x54a07
      +	Onresize            Atom = 0x55108
      +	Onscroll            Atom = 0x55f08
      +	Onseeked            Atom = 0x56708
      +	Onseeking           Atom = 0x56f09
      +	Onselect            Atom = 0x57808
      +	Onshow              Atom = 0x58206
      +	Onsort              Atom = 0x58b06
      +	Onstalled           Atom = 0x59509
      +	Onstorage           Atom = 0x59e09
      +	Onsubmit            Atom = 0x5a708
      +	Onsuspend           Atom = 0x5bb09
      +	Ontimeupdate        Atom = 0xdb0c
      +	Ontoggle            Atom = 0x5c408
      +	Onunload            Atom = 0x5cc08
      +	Onvolumechange      Atom = 0x5d40e
      +	Onwaiting           Atom = 0x5e209
      +	Open                Atom = 0x3cf04
      +	Optgroup            Atom = 0xf608
      +	Optimum             Atom = 0x5eb07
      +	Option              Atom = 0x60006
      +	Output              Atom = 0x49c06
      +	P                   Atom = 0xc01
      +	Param               Atom = 0xc05
      +	Pattern             Atom = 0x5107
      +	Ping                Atom = 0x7704
      +	Placeholder         Atom = 0xc30b
      +	Plaintext           Atom = 0xfd09
      +	Poster              Atom = 0x15706
      +	Pre                 Atom = 0x25e03
      +	Preload             Atom = 0x25e07
      +	Progress            Atom = 0x52c08
      +	Prompt              Atom = 0x5fa06
      +	Public              Atom = 0x41e06
      +	Q                   Atom = 0x13101
      +	Radiogroup          Atom = 0x30a
      +	Readonly            Atom = 0x2fb08
      +	Rel                 Atom = 0x25f03
      +	Required            Atom = 0x1d008
      +	Reversed            Atom = 0x5a08
      +	Rows                Atom = 0x9204
      +	Rowspan             Atom = 0x9207
      +	Rp                  Atom = 0x1c602
      +	Rt                  Atom = 0x13f02
      +	Ruby                Atom = 0xaf04
      +	S                   Atom = 0x2c01
      +	Samp                Atom = 0x4e04
      +	Sandbox             Atom = 0xbb07
      +	Scope               Atom = 0x2bd05
      +	Scoped              Atom = 0x2bd06
      +	Script              Atom = 0x3d406
      +	Seamless            Atom = 0x31c08
      +	Section             Atom = 0x4e207
      +	Select              Atom = 0x57a06
      +	Selected            Atom = 0x57a08
      +	Shape               Atom = 0x4f905
      +	Size                Atom = 0x55504
      +	Sizes               Atom = 0x55505
      +	Small               Atom = 0x18f05
      +	Sortable            Atom = 0x58d08
      +	Sorted              Atom = 0x19906
      +	Source              Atom = 0x1aa06
      +	Spacer              Atom = 0x2db06
      +	Span                Atom = 0x9504
      +	Spellcheck          Atom = 0x3230a
      +	Src                 Atom = 0x3c303
      +	Srcdoc              Atom = 0x3c306
      +	Srclang             Atom = 0x41107
      +	Start               Atom = 0x38605
      +	Step                Atom = 0x5f704
      +	Strike              Atom = 0x53306
      +	Strong              Atom = 0x55906
      +	Style               Atom = 0x61105
      +	Sub                 Atom = 0x5a903
      +	Summary             Atom = 0x61607
      +	Sup                 Atom = 0x61d03
      +	Svg                 Atom = 0x62003
      +	System              Atom = 0x62306
      +	Tabindex            Atom = 0x46308
      +	Table               Atom = 0x42d05
      +	Target              Atom = 0x24b06
      +	Tbody               Atom = 0x2e05
      +	Td                  Atom = 0x4702
      +	Template            Atom = 0x62608
      +	Textarea            Atom = 0x2f608
      +	Tfoot               Atom = 0x8c05
      +	Th                  Atom = 0x22e02
      +	Thead               Atom = 0x2d405
      +	Time                Atom = 0xdd04
      +	Title               Atom = 0xa105
      +	Tr                  Atom = 0x10502
      +	Track               Atom = 0x10505
      +	Translate           Atom = 0x14009
      +	Tt                  Atom = 0x5302
      +	Type                Atom = 0x21404
      +	Typemustmatch       Atom = 0x2140d
      +	U                   Atom = 0xb01
      +	Ul                  Atom = 0x8a02
      +	Usemap              Atom = 0x51106
      +	Value               Atom = 0x4005
      +	Var                 Atom = 0x11503
      +	Video               Atom = 0x28105
      +	Wbr                 Atom = 0x12103
      +	Width               Atom = 0x50705
      +	Wrap                Atom = 0x58704
      +	Xmp                 Atom = 0xc103
      +)
      +
      +const hash0 = 0xc17da63e
      +
      +const maxAtomLen = 19
      +
      +var table = [1 << 9]Atom{
      +	0x1:   0x48a0b, // onmousemove
      +	0x2:   0x5e209, // onwaiting
      +	0x3:   0x1fa13, // onautocompleteerror
      +	0x4:   0x5fa06, // prompt
      +	0x7:   0x5eb07, // optimum
      +	0x8:   0x1604,  // mark
      +	0xa:   0x5ad07, // itemref
      +	0xb:   0x4fe0a, // onpageshow
      +	0xc:   0x57a06, // select
      +	0xd:   0x17b09, // draggable
      +	0xe:   0x3e03,  // nav
      +	0xf:   0x17507, // command
      +	0x11:  0xb01,   // u
      +	0x14:  0x2d507, // headers
      +	0x15:  0x44a08, // datalist
      +	0x17:  0x4e04,  // samp
      +	0x1a:  0x3fb09, // onkeydown
      +	0x1b:  0x55f08, // onscroll
      +	0x1c:  0x15003, // col
      +	0x20:  0x3c908, // itemprop
      +	0x21:  0x2780a, // http-equiv
      +	0x22:  0x61d03, // sup
      +	0x24:  0x1d008, // required
      +	0x2b:  0x25e07, // preload
      +	0x2c:  0x6040d, // onbeforeprint
      +	0x2d:  0x3600b, // ondragenter
      +	0x2e:  0x50902, // dt
      +	0x2f:  0x5a708, // onsubmit
      +	0x30:  0x27002, // hr
      +	0x31:  0x32f0d, // oncontextmenu
      +	0x33:  0x29c05, // image
      +	0x34:  0x50d07, // onpause
      +	0x35:  0x25906, // hgroup
      +	0x36:  0x7704,  // ping
      +	0x37:  0x57808, // onselect
      +	0x3a:  0x11303, // div
      +	0x3b:  0x1fa0e, // onautocomplete
      +	0x40:  0x2eb02, // mi
      +	0x41:  0x31c08, // seamless
      +	0x42:  0x2807,  // charset
      +	0x43:  0x8502,  // id
      +	0x44:  0x5200a, // onpopstate
      +	0x45:  0x3ef03, // del
      +	0x46:  0x2cb07, // marquee
      +	0x47:  0x3309,  // accesskey
      +	0x49:  0x8d06,  // footer
      +	0x4a:  0x44e04, // list
      +	0x4b:  0x2b005, // ismap
      +	0x51:  0x33804, // menu
      +	0x52:  0x2f04,  // body
      +	0x55:  0x9a08,  // frameset
      +	0x56:  0x54a07, // onreset
      +	0x57:  0x12705, // blink
      +	0x58:  0xa105,  // title
      +	0x59:  0x38807, // article
      +	0x5b:  0x22e02, // th
      +	0x5d:  0x13101, // q
      +	0x5e:  0x3cf04, // open
      +	0x5f:  0x2fa04, // area
      +	0x61:  0x44206, // onload
      +	0x62:  0xda04,  // font
      +	0x63:  0xd604,  // base
      +	0x64:  0x16207, // colspan
      +	0x65:  0x53707, // keytype
      +	0x66:  0x11e02, // dl
      +	0x68:  0x1b008, // fieldset
      +	0x6a:  0x2eb03, // min
      +	0x6b:  0x11503, // var
      +	0x6f:  0x2d506, // header
      +	0x70:  0x13f02, // rt
      +	0x71:  0x15008, // colgroup
      +	0x72:  0x23502, // mn
      +	0x74:  0x13a07, // onabort
      +	0x75:  0x3906,  // keygen
      +	0x76:  0x4c209, // onoffline
      +	0x77:  0x21f09, // challenge
      +	0x78:  0x2b203, // map
      +	0x7a:  0x2e902, // h4
      +	0x7b:  0x3b607, // onerror
      +	0x7c:  0x2e109, // maxlength
      +	0x7d:  0x2f505, // mtext
      +	0x7e:  0xbb07,  // sandbox
      +	0x7f:  0x58b06, // onsort
      +	0x80:  0x100a,  // malignmark
      +	0x81:  0x45d04, // meta
      +	0x82:  0x7b05,  // async
      +	0x83:  0x2a702, // h3
      +	0x84:  0x26702, // dd
      +	0x85:  0x27004, // href
      +	0x86:  0x6e0a,  // mediagroup
      +	0x87:  0x19406, // coords
      +	0x88:  0x41107, // srclang
      +	0x89:  0x34d0a, // ondblclick
      +	0x8a:  0x4005,  // value
      +	0x8c:  0xe908,  // oncancel
      +	0x8e:  0x3230a, // spellcheck
      +	0x8f:  0x9a05,  // frame
      +	0x91:  0x12403, // big
      +	0x94:  0x1f606, // action
      +	0x95:  0x6903,  // dir
      +	0x97:  0x2fb08, // readonly
      +	0x99:  0x42d05, // table
      +	0x9a:  0x61607, // summary
      +	0x9b:  0x12103, // wbr
      +	0x9c:  0x30a,   // radiogroup
      +	0x9d:  0x6c04,  // name
      +	0x9f:  0x62306, // system
      +	0xa1:  0x15d05, // color
      +	0xa2:  0x7f06,  // canvas
      +	0xa3:  0x25504, // html
      +	0xa5:  0x56f09, // onseeking
      +	0xac:  0x4f905, // shape
      +	0xad:  0x25f03, // rel
      +	0xae:  0x28510, // oncanplaythrough
      +	0xaf:  0x3760a, // ondragover
      +	0xb0:  0x62608, // template
      +	0xb1:  0x1d80d, // foreignObject
      +	0xb3:  0x9204,  // rows
      +	0xb6:  0x44e07, // listing
      +	0xb7:  0x49c06, // output
      +	0xb9:  0x3310b, // contextmenu
      +	0xbb:  0x11f03, // low
      +	0xbc:  0x1c602, // rp
      +	0xbd:  0x5bb09, // onsuspend
      +	0xbe:  0x13606, // button
      +	0xbf:  0x4db04, // desc
      +	0xc1:  0x4e207, // section
      +	0xc2:  0x52a0a, // onprogress
      +	0xc3:  0x59e09, // onstorage
      +	0xc4:  0x2d204, // math
      +	0xc5:  0x4503,  // alt
      +	0xc7:  0x8a02,  // ul
      +	0xc8:  0x5107,  // pattern
      +	0xc9:  0x4b60c, // onmousewheel
      +	0xca:  0x35709, // ondragend
      +	0xcb:  0xaf04,  // ruby
      +	0xcc:  0xc01,   // p
      +	0xcd:  0x31707, // onclose
      +	0xce:  0x24205, // meter
      +	0xcf:  0x11807, // bgsound
      +	0xd2:  0x25106, // height
      +	0xd4:  0x101,   // b
      +	0xd5:  0x2c308, // itemtype
      +	0xd8:  0x1bb07, // caption
      +	0xd9:  0x10c08, // disabled
      +	0xdb:  0x33808, // menuitem
      +	0xdc:  0x62003, // svg
      +	0xdd:  0x18f05, // small
      +	0xde:  0x44a04, // data
      +	0xe0:  0x4cb08, // ononline
      +	0xe1:  0x2a206, // mglyph
      +	0xe3:  0x6505,  // embed
      +	0xe4:  0x10502, // tr
      +	0xe5:  0x46b0b, // onloadstart
      +	0xe7:  0x3c306, // srcdoc
      +	0xeb:  0x5c408, // ontoggle
      +	0xed:  0xe703,  // bdo
      +	0xee:  0x4702,  // td
      +	0xef:  0x8305,  // aside
      +	0xf0:  0x29402, // h2
      +	0xf1:  0x52c08, // progress
      +	0xf2:  0x12c0a, // blockquote
      +	0xf4:  0xf005,  // label
      +	0xf5:  0x601,   // i
      +	0xf7:  0x9207,  // rowspan
      +	0xfb:  0x51709, // onplaying
      +	0xfd:  0x2a103, // img
      +	0xfe:  0xf608,  // optgroup
      +	0xff:  0x42307, // content
      +	0x101: 0x53e0c, // onratechange
      +	0x103: 0x3da0c, // onhashchange
      +	0x104: 0x4807,  // details
      +	0x106: 0x40008, // download
      +	0x109: 0x14009, // translate
      +	0x10b: 0x4230f, // contenteditable
      +	0x10d: 0x36b0b, // ondragleave
      +	0x10e: 0x2106,  // accept
      +	0x10f: 0x57a08, // selected
      +	0x112: 0x1f20a, // formaction
      +	0x113: 0x5b506, // center
      +	0x115: 0x45510, // onloadedmetadata
      +	0x116: 0x12804, // link
      +	0x117: 0xdd04,  // time
      +	0x118: 0x19f0b, // crossorigin
      +	0x119: 0x3bd07, // onfocus
      +	0x11a: 0x58704, // wrap
      +	0x11b: 0x42204, // icon
      +	0x11d: 0x28105, // video
      +	0x11e: 0x4de05, // class
      +	0x121: 0x5d40e, // onvolumechange
      +	0x122: 0xaa06,  // onblur
      +	0x123: 0x2b909, // itemscope
      +	0x124: 0x61105, // style
      +	0x127: 0x41e06, // public
      +	0x129: 0x2320e, // formnovalidate
      +	0x12a: 0x58206, // onshow
      +	0x12c: 0x51706, // onplay
      +	0x12d: 0x3c804, // cite
      +	0x12e: 0x2bc02, // ms
      +	0x12f: 0xdb0c,  // ontimeupdate
      +	0x130: 0x10904, // kind
      +	0x131: 0x2470a, // formtarget
      +	0x135: 0x3af07, // onended
      +	0x136: 0x26506, // hidden
      +	0x137: 0x2c01,  // s
      +	0x139: 0x2280a, // formmethod
      +	0x13a: 0x3e805, // input
      +	0x13c: 0x50b02, // h6
      +	0x13d: 0xc902,  // ol
      +	0x13e: 0x3420b, // oncuechange
      +	0x13f: 0x1e50d, // foreignobject
      +	0x143: 0x4e70e, // onbeforeunload
      +	0x144: 0x2bd05, // scope
      +	0x145: 0x39609, // onemptied
      +	0x146: 0x14b05, // defer
      +	0x147: 0xc103,  // xmp
      +	0x148: 0x39f10, // ondurationchange
      +	0x149: 0x1903,  // kbd
      +	0x14c: 0x47609, // onmessage
      +	0x14d: 0x60006, // option
      +	0x14e: 0x2eb09, // minlength
      +	0x14f: 0x32807, // checked
      +	0x150: 0xce08,  // autoplay
      +	0x152: 0x202,   // br
      +	0x153: 0x2360a, // novalidate
      +	0x156: 0x6307,  // noembed
      +	0x159: 0x31007, // onclick
      +	0x15a: 0x47f0b, // onmousedown
      +	0x15b: 0x3a708, // onchange
      +	0x15e: 0x3f209, // oninvalid
      +	0x15f: 0x2bd06, // scoped
      +	0x160: 0x18808, // controls
      +	0x161: 0x30b05, // muted
      +	0x162: 0x58d08, // sortable
      +	0x163: 0x51106, // usemap
      +	0x164: 0x1b80a, // figcaption
      +	0x165: 0x35706, // ondrag
      +	0x166: 0x26b04, // high
      +	0x168: 0x3c303, // src
      +	0x169: 0x15706, // poster
      +	0x16b: 0x1670e, // annotation-xml
      +	0x16c: 0x5f704, // step
      +	0x16d: 0x4,     // abbr
      +	0x16e: 0x1b06,  // dialog
      +	0x170: 0x1202,  // li
      +	0x172: 0x3ed02, // mo
      +	0x175: 0x1d803, // for
      +	0x176: 0x1a803, // ins
      +	0x178: 0x55504, // size
      +	0x179: 0x43210, // onlanguagechange
      +	0x17a: 0x8607,  // default
      +	0x17b: 0x1a03,  // bdi
      +	0x17c: 0x4d30a, // onpagehide
      +	0x17d: 0x6907,  // dirname
      +	0x17e: 0x21404, // type
      +	0x17f: 0x1f204, // form
      +	0x181: 0x28509, // oncanplay
      +	0x182: 0x6103,  // dfn
      +	0x183: 0x46308, // tabindex
      +	0x186: 0x6502,  // em
      +	0x187: 0x27404, // lang
      +	0x189: 0x39108, // dropzone
      +	0x18a: 0x4080a, // onkeypress
      +	0x18b: 0x23c08, // datetime
      +	0x18c: 0x16204, // cols
      +	0x18d: 0x1,     // a
      +	0x18e: 0x4420c, // onloadeddata
      +	0x190: 0xa605,  // audio
      +	0x192: 0x2e05,  // tbody
      +	0x193: 0x22c06, // method
      +	0x195: 0xf404,  // loop
      +	0x196: 0x29606, // iframe
      +	0x198: 0x2d504, // head
      +	0x19e: 0x5f108, // manifest
      +	0x19f: 0xb309,  // autofocus
      +	0x1a0: 0x14904, // code
      +	0x1a1: 0x55906, // strong
      +	0x1a2: 0x30308, // multiple
      +	0x1a3: 0xc05,   // param
      +	0x1a6: 0x21107, // enctype
      +	0x1a7: 0x5b304, // face
      +	0x1a8: 0xfd09,  // plaintext
      +	0x1a9: 0x26e02, // h1
      +	0x1aa: 0x59509, // onstalled
      +	0x1ad: 0x3d406, // script
      +	0x1ae: 0x2db06, // spacer
      +	0x1af: 0x55108, // onresize
      +	0x1b0: 0x4a20b, // onmouseover
      +	0x1b1: 0x5cc08, // onunload
      +	0x1b2: 0x56708, // onseeked
      +	0x1b4: 0x2140d, // typemustmatch
      +	0x1b5: 0x1cc06, // figure
      +	0x1b6: 0x4950a, // onmouseout
      +	0x1b7: 0x25e03, // pre
      +	0x1b8: 0x50705, // width
      +	0x1b9: 0x19906, // sorted
      +	0x1bb: 0x5704,  // nobr
      +	0x1be: 0x5302,  // tt
      +	0x1bf: 0x1105,  // align
      +	0x1c0: 0x3e607, // oninput
      +	0x1c3: 0x41807, // onkeyup
      +	0x1c6: 0x1c00c, // onafterprint
      +	0x1c7: 0x210e,  // accept-charset
      +	0x1c8: 0x33c06, // itemid
      +	0x1c9: 0x3e809, // inputmode
      +	0x1cb: 0x53306, // strike
      +	0x1cc: 0x5a903, // sub
      +	0x1cd: 0x10505, // track
      +	0x1ce: 0x38605, // start
      +	0x1d0: 0xd608,  // basefont
      +	0x1d6: 0x1aa06, // source
      +	0x1d7: 0x18206, // legend
      +	0x1d8: 0x2d405, // thead
      +	0x1da: 0x8c05,  // tfoot
      +	0x1dd: 0x1ec06, // object
      +	0x1de: 0x6e05,  // media
      +	0x1df: 0x1670a, // annotation
      +	0x1e0: 0x20d0b, // formenctype
      +	0x1e2: 0x3d208, // noscript
      +	0x1e4: 0x55505, // sizes
      +	0x1e5: 0x1fc0c, // autocomplete
      +	0x1e6: 0x9504,  // span
      +	0x1e7: 0x9808,  // noframes
      +	0x1e8: 0x24b06, // target
      +	0x1e9: 0x38f06, // ondrop
      +	0x1ea: 0x2b306, // applet
      +	0x1ec: 0x5a08,  // reversed
      +	0x1f0: 0x2a907, // isindex
      +	0x1f3: 0x27008, // hreflang
      +	0x1f5: 0x2f302, // h5
      +	0x1f6: 0x4f307, // address
      +	0x1fa: 0x2e103, // max
      +	0x1fb: 0xc30b,  // placeholder
      +	0x1fc: 0x2f608, // textarea
      +	0x1fe: 0x4ad09, // onmouseup
      +	0x1ff: 0x3800b, // ondragstart
      +}
      +
      +const atomText = "abbradiogrouparamalignmarkbdialogaccept-charsetbodyaccesskey" +
      +	"genavaluealtdetailsampatternobreversedfnoembedirnamediagroup" +
      +	"ingasyncanvasidefaultfooterowspanoframesetitleaudionblurubya" +
      +	"utofocusandboxmplaceholderautoplaybasefontimeupdatebdoncance" +
      +	"labelooptgrouplaintextrackindisabledivarbgsoundlowbrbigblink" +
      +	"blockquotebuttonabortranslatecodefercolgroupostercolorcolspa" +
      +	"nnotation-xmlcommandraggablegendcontrolsmallcoordsortedcross" +
      +	"originsourcefieldsetfigcaptionafterprintfigurequiredforeignO" +
      +	"bjectforeignobjectformactionautocompleteerrorformenctypemust" +
      +	"matchallengeformmethodformnovalidatetimeterformtargetheightm" +
      +	"lhgroupreloadhiddenhigh1hreflanghttp-equivideoncanplaythroug" +
      +	"h2iframeimageimglyph3isindexismappletitemscopeditemtypemarqu" +
      +	"eematheaderspacermaxlength4minlength5mtextareadonlymultiplem" +
      +	"utedonclickoncloseamlesspellcheckedoncontextmenuitemidoncuec" +
      +	"hangeondblclickondragendondragenterondragleaveondragoverondr" +
      +	"agstarticleondropzonemptiedondurationchangeonendedonerroronf" +
      +	"ocusrcdocitempropenoscriptonhashchangeoninputmodeloninvalido" +
      +	"nkeydownloadonkeypressrclangonkeyupublicontenteditableonlang" +
      +	"uagechangeonloadeddatalistingonloadedmetadatabindexonloadsta" +
      +	"rtonmessageonmousedownonmousemoveonmouseoutputonmouseoveronm" +
      +	"ouseuponmousewheelonofflineononlineonpagehidesclassectionbef" +
      +	"oreunloaddresshapeonpageshowidth6onpausemaponplayingonpopsta" +
      +	"teonprogresstrikeytypeonratechangeonresetonresizestrongonscr" +
      +	"ollonseekedonseekingonselectedonshowraponsortableonstalledon" +
      +	"storageonsubmitemrefacenteronsuspendontoggleonunloadonvolume" +
      +	"changeonwaitingoptimumanifestepromptoptionbeforeprintstylesu" +
      +	"mmarysupsvgsystemplate"
      diff --git a/vendor/golang.org/x/net/html/atom/table_test.go b/vendor/golang.org/x/net/html/atom/table_test.go
      new file mode 100644
      index 00000000..0f2ecce4
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/atom/table_test.go
      @@ -0,0 +1,351 @@
      +// generated by go run gen.go -test; DO NOT EDIT
      +
      +package atom
      +
      +var testAtomList = []string{
      +	"a",
      +	"abbr",
      +	"abbr",
      +	"accept",
      +	"accept-charset",
      +	"accesskey",
      +	"action",
      +	"address",
      +	"align",
      +	"alt",
      +	"annotation",
      +	"annotation-xml",
      +	"applet",
      +	"area",
      +	"article",
      +	"aside",
      +	"async",
      +	"audio",
      +	"autocomplete",
      +	"autofocus",
      +	"autoplay",
      +	"b",
      +	"base",
      +	"basefont",
      +	"bdi",
      +	"bdo",
      +	"bgsound",
      +	"big",
      +	"blink",
      +	"blockquote",
      +	"body",
      +	"br",
      +	"button",
      +	"canvas",
      +	"caption",
      +	"center",
      +	"challenge",
      +	"charset",
      +	"checked",
      +	"cite",
      +	"cite",
      +	"class",
      +	"code",
      +	"col",
      +	"colgroup",
      +	"color",
      +	"cols",
      +	"colspan",
      +	"command",
      +	"command",
      +	"content",
      +	"contenteditable",
      +	"contextmenu",
      +	"controls",
      +	"coords",
      +	"crossorigin",
      +	"data",
      +	"data",
      +	"datalist",
      +	"datetime",
      +	"dd",
      +	"default",
      +	"defer",
      +	"del",
      +	"desc",
      +	"details",
      +	"dfn",
      +	"dialog",
      +	"dir",
      +	"dirname",
      +	"disabled",
      +	"div",
      +	"dl",
      +	"download",
      +	"draggable",
      +	"dropzone",
      +	"dt",
      +	"em",
      +	"embed",
      +	"enctype",
      +	"face",
      +	"fieldset",
      +	"figcaption",
      +	"figure",
      +	"font",
      +	"footer",
      +	"for",
      +	"foreignObject",
      +	"foreignobject",
      +	"form",
      +	"form",
      +	"formaction",
      +	"formenctype",
      +	"formmethod",
      +	"formnovalidate",
      +	"formtarget",
      +	"frame",
      +	"frameset",
      +	"h1",
      +	"h2",
      +	"h3",
      +	"h4",
      +	"h5",
      +	"h6",
      +	"head",
      +	"header",
      +	"headers",
      +	"height",
      +	"hgroup",
      +	"hidden",
      +	"high",
      +	"hr",
      +	"href",
      +	"hreflang",
      +	"html",
      +	"http-equiv",
      +	"i",
      +	"icon",
      +	"id",
      +	"iframe",
      +	"image",
      +	"img",
      +	"input",
      +	"inputmode",
      +	"ins",
      +	"isindex",
      +	"ismap",
      +	"itemid",
      +	"itemprop",
      +	"itemref",
      +	"itemscope",
      +	"itemtype",
      +	"kbd",
      +	"keygen",
      +	"keytype",
      +	"kind",
      +	"label",
      +	"label",
      +	"lang",
      +	"legend",
      +	"li",
      +	"link",
      +	"list",
      +	"listing",
      +	"loop",
      +	"low",
      +	"malignmark",
      +	"manifest",
      +	"map",
      +	"mark",
      +	"marquee",
      +	"math",
      +	"max",
      +	"maxlength",
      +	"media",
      +	"mediagroup",
      +	"menu",
      +	"menuitem",
      +	"meta",
      +	"meter",
      +	"method",
      +	"mglyph",
      +	"mi",
      +	"min",
      +	"minlength",
      +	"mn",
      +	"mo",
      +	"ms",
      +	"mtext",
      +	"multiple",
      +	"muted",
      +	"name",
      +	"nav",
      +	"nobr",
      +	"noembed",
      +	"noframes",
      +	"noscript",
      +	"novalidate",
      +	"object",
      +	"ol",
      +	"onabort",
      +	"onafterprint",
      +	"onautocomplete",
      +	"onautocompleteerror",
      +	"onbeforeprint",
      +	"onbeforeunload",
      +	"onblur",
      +	"oncancel",
      +	"oncanplay",
      +	"oncanplaythrough",
      +	"onchange",
      +	"onclick",
      +	"onclose",
      +	"oncontextmenu",
      +	"oncuechange",
      +	"ondblclick",
      +	"ondrag",
      +	"ondragend",
      +	"ondragenter",
      +	"ondragleave",
      +	"ondragover",
      +	"ondragstart",
      +	"ondrop",
      +	"ondurationchange",
      +	"onemptied",
      +	"onended",
      +	"onerror",
      +	"onfocus",
      +	"onhashchange",
      +	"oninput",
      +	"oninvalid",
      +	"onkeydown",
      +	"onkeypress",
      +	"onkeyup",
      +	"onlanguagechange",
      +	"onload",
      +	"onloadeddata",
      +	"onloadedmetadata",
      +	"onloadstart",
      +	"onmessage",
      +	"onmousedown",
      +	"onmousemove",
      +	"onmouseout",
      +	"onmouseover",
      +	"onmouseup",
      +	"onmousewheel",
      +	"onoffline",
      +	"ononline",
      +	"onpagehide",
      +	"onpageshow",
      +	"onpause",
      +	"onplay",
      +	"onplaying",
      +	"onpopstate",
      +	"onprogress",
      +	"onratechange",
      +	"onreset",
      +	"onresize",
      +	"onscroll",
      +	"onseeked",
      +	"onseeking",
      +	"onselect",
      +	"onshow",
      +	"onsort",
      +	"onstalled",
      +	"onstorage",
      +	"onsubmit",
      +	"onsuspend",
      +	"ontimeupdate",
      +	"ontoggle",
      +	"onunload",
      +	"onvolumechange",
      +	"onwaiting",
      +	"open",
      +	"optgroup",
      +	"optimum",
      +	"option",
      +	"output",
      +	"p",
      +	"param",
      +	"pattern",
      +	"ping",
      +	"placeholder",
      +	"plaintext",
      +	"poster",
      +	"pre",
      +	"preload",
      +	"progress",
      +	"prompt",
      +	"public",
      +	"q",
      +	"radiogroup",
      +	"readonly",
      +	"rel",
      +	"required",
      +	"reversed",
      +	"rows",
      +	"rowspan",
      +	"rp",
      +	"rt",
      +	"ruby",
      +	"s",
      +	"samp",
      +	"sandbox",
      +	"scope",
      +	"scoped",
      +	"script",
      +	"seamless",
      +	"section",
      +	"select",
      +	"selected",
      +	"shape",
      +	"size",
      +	"sizes",
      +	"small",
      +	"sortable",
      +	"sorted",
      +	"source",
      +	"spacer",
      +	"span",
      +	"span",
      +	"spellcheck",
      +	"src",
      +	"srcdoc",
      +	"srclang",
      +	"start",
      +	"step",
      +	"strike",
      +	"strong",
      +	"style",
      +	"style",
      +	"sub",
      +	"summary",
      +	"sup",
      +	"svg",
      +	"system",
      +	"tabindex",
      +	"table",
      +	"target",
      +	"tbody",
      +	"td",
      +	"template",
      +	"textarea",
      +	"tfoot",
      +	"th",
      +	"thead",
      +	"time",
      +	"title",
      +	"title",
      +	"tr",
      +	"track",
      +	"translate",
      +	"tt",
      +	"type",
      +	"typemustmatch",
      +	"u",
      +	"ul",
      +	"usemap",
      +	"value",
      +	"var",
      +	"video",
      +	"wbr",
      +	"width",
      +	"wrap",
      +	"xmp",
      +}
      diff --git a/vendor/golang.org/x/net/html/charset/charset.go b/vendor/golang.org/x/net/html/charset/charset.go
      new file mode 100644
      index 00000000..13bed159
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/charset/charset.go
      @@ -0,0 +1,257 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// Package charset provides common text encodings for HTML documents.
      +//
      +// The mapping from encoding labels to encodings is defined at
      +// https://encoding.spec.whatwg.org/.
      +package charset // import "golang.org/x/net/html/charset"
      +
      +import (
      +	"bytes"
      +	"fmt"
      +	"io"
      +	"mime"
      +	"strings"
      +	"unicode/utf8"
      +
      +	"golang.org/x/net/html"
      +	"golang.org/x/text/encoding"
      +	"golang.org/x/text/encoding/charmap"
      +	"golang.org/x/text/encoding/htmlindex"
      +	"golang.org/x/text/transform"
      +)
      +
      +// Lookup returns the encoding with the specified label, and its canonical
      +// name. It returns nil and the empty string if label is not one of the
      +// standard encodings for HTML. Matching is case-insensitive and ignores
      +// leading and trailing whitespace. Encoders will use HTML escape sequences for
      +// runes that are not supported by the character set.
      +func Lookup(label string) (e encoding.Encoding, name string) {
      +	e, err := htmlindex.Get(label)
      +	if err != nil {
      +		return nil, ""
      +	}
      +	name, _ = htmlindex.Name(e)
      +	return &htmlEncoding{e}, name
      +}
      +
      +type htmlEncoding struct{ encoding.Encoding }
      +
      +func (h *htmlEncoding) NewEncoder() *encoding.Encoder {
      +	// HTML requires a non-terminating legacy encoder. We use HTML escapes to
      +	// substitute unsupported code points.
      +	return encoding.HTMLEscapeUnsupported(h.Encoding.NewEncoder())
      +}
      +
      +// DetermineEncoding determines the encoding of an HTML document by examining
      +// up to the first 1024 bytes of content and the declared Content-Type.
      +//
      +// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding
      +func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) {
      +	if len(content) > 1024 {
      +		content = content[:1024]
      +	}
      +
      +	for _, b := range boms {
      +		if bytes.HasPrefix(content, b.bom) {
      +			e, name = Lookup(b.enc)
      +			return e, name, true
      +		}
      +	}
      +
      +	if _, params, err := mime.ParseMediaType(contentType); err == nil {
      +		if cs, ok := params["charset"]; ok {
      +			if e, name = Lookup(cs); e != nil {
      +				return e, name, true
      +			}
      +		}
      +	}
      +
      +	if len(content) > 0 {
      +		e, name = prescan(content)
      +		if e != nil {
      +			return e, name, false
      +		}
      +	}
      +
      +	// Try to detect UTF-8.
      +	// First eliminate any partial rune at the end.
      +	for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- {
      +		b := content[i]
      +		if b < 0x80 {
      +			break
      +		}
      +		if utf8.RuneStart(b) {
      +			content = content[:i]
      +			break
      +		}
      +	}
      +	hasHighBit := false
      +	for _, c := range content {
      +		if c >= 0x80 {
      +			hasHighBit = true
      +			break
      +		}
      +	}
      +	if hasHighBit && utf8.Valid(content) {
      +		return encoding.Nop, "utf-8", false
      +	}
      +
      +	// TODO: change default depending on user's locale?
      +	return charmap.Windows1252, "windows-1252", false
      +}
      +
      +// NewReader returns an io.Reader that converts the content of r to UTF-8.
      +// It calls DetermineEncoding to find out what r's encoding is.
      +func NewReader(r io.Reader, contentType string) (io.Reader, error) {
      +	preview := make([]byte, 1024)
      +	n, err := io.ReadFull(r, preview)
      +	switch {
      +	case err == io.ErrUnexpectedEOF:
      +		preview = preview[:n]
      +		r = bytes.NewReader(preview)
      +	case err != nil:
      +		return nil, err
      +	default:
      +		r = io.MultiReader(bytes.NewReader(preview), r)
      +	}
      +
      +	if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop {
      +		r = transform.NewReader(r, e.NewDecoder())
      +	}
      +	return r, nil
      +}
      +
      +// NewReaderLabel returns a reader that converts from the specified charset to
      +// UTF-8. It uses Lookup to find the encoding that corresponds to label, and
      +// returns an error if Lookup returns nil. It is suitable for use as
      +// encoding/xml.Decoder's CharsetReader function.
      +func NewReaderLabel(label string, input io.Reader) (io.Reader, error) {
      +	e, _ := Lookup(label)
      +	if e == nil {
      +		return nil, fmt.Errorf("unsupported charset: %q", label)
      +	}
      +	return transform.NewReader(input, e.NewDecoder()), nil
      +}
      +
      +func prescan(content []byte) (e encoding.Encoding, name string) {
      +	z := html.NewTokenizer(bytes.NewReader(content))
      +	for {
      +		switch z.Next() {
      +		case html.ErrorToken:
      +			return nil, ""
      +
      +		case html.StartTagToken, html.SelfClosingTagToken:
      +			tagName, hasAttr := z.TagName()
      +			if !bytes.Equal(tagName, []byte("meta")) {
      +				continue
      +			}
      +			attrList := make(map[string]bool)
      +			gotPragma := false
      +
      +			const (
      +				dontKnow = iota
      +				doNeedPragma
      +				doNotNeedPragma
      +			)
      +			needPragma := dontKnow
      +
      +			name = ""
      +			e = nil
      +			for hasAttr {
      +				var key, val []byte
      +				key, val, hasAttr = z.TagAttr()
      +				ks := string(key)
      +				if attrList[ks] {
      +					continue
      +				}
      +				attrList[ks] = true
      +				for i, c := range val {
      +					if 'A' <= c && c <= 'Z' {
      +						val[i] = c + 0x20
      +					}
      +				}
      +
      +				switch ks {
      +				case "http-equiv":
      +					if bytes.Equal(val, []byte("content-type")) {
      +						gotPragma = true
      +					}
      +
      +				case "content":
      +					if e == nil {
      +						name = fromMetaElement(string(val))
      +						if name != "" {
      +							e, name = Lookup(name)
      +							if e != nil {
      +								needPragma = doNeedPragma
      +							}
      +						}
      +					}
      +
      +				case "charset":
      +					e, name = Lookup(string(val))
      +					needPragma = doNotNeedPragma
      +				}
      +			}
      +
      +			if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma {
      +				continue
      +			}
      +
      +			if strings.HasPrefix(name, "utf-16") {
      +				name = "utf-8"
      +				e = encoding.Nop
      +			}
      +
      +			if e != nil {
      +				return e, name
      +			}
      +		}
      +	}
      +}
      +
      +func fromMetaElement(s string) string {
      +	for s != "" {
      +		csLoc := strings.Index(s, "charset")
      +		if csLoc == -1 {
      +			return ""
      +		}
      +		s = s[csLoc+len("charset"):]
      +		s = strings.TrimLeft(s, " \t\n\f\r")
      +		if !strings.HasPrefix(s, "=") {
      +			continue
      +		}
      +		s = s[1:]
      +		s = strings.TrimLeft(s, " \t\n\f\r")
      +		if s == "" {
      +			return ""
      +		}
      +		if q := s[0]; q == '"' || q == '\'' {
      +			s = s[1:]
      +			closeQuote := strings.IndexRune(s, rune(q))
      +			if closeQuote == -1 {
      +				return ""
      +			}
      +			return s[:closeQuote]
      +		}
      +
      +		end := strings.IndexAny(s, "; \t\n\f\r")
      +		if end == -1 {
      +			end = len(s)
      +		}
      +		return s[:end]
      +	}
      +	return ""
      +}
      +
      +var boms = []struct {
      +	bom []byte
      +	enc string
      +}{
      +	{[]byte{0xfe, 0xff}, "utf-16be"},
      +	{[]byte{0xff, 0xfe}, "utf-16le"},
      +	{[]byte{0xef, 0xbb, 0xbf}, "utf-8"},
      +}
      diff --git a/vendor/golang.org/x/net/html/charset/charset_test.go b/vendor/golang.org/x/net/html/charset/charset_test.go
      new file mode 100644
      index 00000000..e4e7d86b
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/charset/charset_test.go
      @@ -0,0 +1,237 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package charset
      +
      +import (
      +	"bytes"
      +	"encoding/xml"
      +	"io/ioutil"
      +	"runtime"
      +	"strings"
      +	"testing"
      +
      +	"golang.org/x/text/transform"
      +)
      +
      +func transformString(t transform.Transformer, s string) (string, error) {
      +	r := transform.NewReader(strings.NewReader(s), t)
      +	b, err := ioutil.ReadAll(r)
      +	return string(b), err
      +}
      +
      +type testCase struct {
      +	utf8, other, otherEncoding string
      +}
      +
      +// testCases for encoding and decoding.
      +var testCases = []testCase{
      +	{"Résumé", "Résumé", "utf8"},
      +	{"Résumé", "R\xe9sum\xe9", "latin1"},
      +	{"これは漢字です。", "S0\x8c0o0\"oW[g0Y0\x020", "UTF-16LE"},
      +	{"これは漢字です。", "0S0\x8c0oo\"[W0g0Y0\x02", "UTF-16BE"},
      +	{"Hello, world", "Hello, world", "ASCII"},
      +	{"Gdańsk", "Gda\xf1sk", "ISO-8859-2"},
      +	{"Ââ Čč Đđ Ŋŋ Õõ Šš Žž Åå Ää", "\xc2\xe2 \xc8\xe8 \xa9\xb9 \xaf\xbf \xd5\xf5 \xaa\xba \xac\xbc \xc5\xe5 \xc4\xe4", "ISO-8859-10"},
      +	{"สำหรับ", "\xca\xd3\xcb\xc3\u047a", "ISO-8859-11"},
      +	{"latviešu", "latvie\xf0u", "ISO-8859-13"},
      +	{"Seònaid", "Se\xf2naid", "ISO-8859-14"},
      +	{"€1 is cheap", "\xa41 is cheap", "ISO-8859-15"},
      +	{"românește", "rom\xe2ne\xbate", "ISO-8859-16"},
      +	{"nutraĵo", "nutra\xbco", "ISO-8859-3"},
      +	{"Kalâdlit", "Kal\xe2dlit", "ISO-8859-4"},
      +	{"русский", "\xe0\xe3\xe1\xe1\xda\xd8\xd9", "ISO-8859-5"},
      +	{"ελληνικά", "\xe5\xeb\xeb\xe7\xed\xe9\xea\xdc", "ISO-8859-7"},
      +	{"Kağan", "Ka\xf0an", "ISO-8859-9"},
      +	{"Résumé", "R\x8esum\x8e", "macintosh"},
      +	{"Gdańsk", "Gda\xf1sk", "windows-1250"},
      +	{"русский", "\xf0\xf3\xf1\xf1\xea\xe8\xe9", "windows-1251"},
      +	{"Résumé", "R\xe9sum\xe9", "windows-1252"},
      +	{"ελληνικά", "\xe5\xeb\xeb\xe7\xed\xe9\xea\xdc", "windows-1253"},
      +	{"Kağan", "Ka\xf0an", "windows-1254"},
      +	{"עִבְרִית", "\xf2\xc4\xe1\xc0\xf8\xc4\xe9\xfa", "windows-1255"},
      +	{"العربية", "\xc7\xe1\xda\xd1\xc8\xed\xc9", "windows-1256"},
      +	{"latviešu", "latvie\xf0u", "windows-1257"},
      +	{"Việt", "Vi\xea\xf2t", "windows-1258"},
      +	{"สำหรับ", "\xca\xd3\xcb\xc3\u047a", "windows-874"},
      +	{"русский", "\xd2\xd5\xd3\xd3\xcb\xc9\xca", "KOI8-R"},
      +	{"українська", "\xd5\xcb\xd2\xc1\xa7\xce\xd3\xd8\xcb\xc1", "KOI8-U"},
      +	{"Hello 常用國字標準字體表", "Hello \xb1`\xa5\u03b0\xea\xa6r\xbc\u0437\u01e6r\xc5\xe9\xaa\xed", "big5"},
      +	{"Hello 常用國字標準字體表", "Hello \xb3\xa3\xd3\xc3\x87\xf8\xd7\xd6\x98\xcb\x9c\xca\xd7\xd6\xf3\x77\xb1\xed", "gbk"},
      +	{"Hello 常用國字標準字體表", "Hello \xb3\xa3\xd3\xc3\x87\xf8\xd7\xd6\x98\xcb\x9c\xca\xd7\xd6\xf3\x77\xb1\xed", "gb18030"},
      +	{"עִבְרִית", "\x81\x30\xfb\x30\x81\x30\xf6\x34\x81\x30\xf9\x33\x81\x30\xf6\x30\x81\x30\xfb\x36\x81\x30\xf6\x34\x81\x30\xfa\x31\x81\x30\xfb\x38", "gb18030"},
      +	{"㧯", "\x82\x31\x89\x38", "gb18030"},
      +	{"これは漢字です。", "\x82\xb1\x82\xea\x82\xcd\x8a\xbf\x8e\x9a\x82\xc5\x82\xb7\x81B", "SJIS"},
      +	{"Hello, 世界!", "Hello, \x90\xa2\x8aE!", "SJIS"},
      +	{"イウエオカ", "\xb2\xb3\xb4\xb5\xb6", "SJIS"},
      +	{"これは漢字です。", "\xa4\xb3\xa4\xec\xa4\u03f4\xc1\xbb\xfa\xa4\u01e4\xb9\xa1\xa3", "EUC-JP"},
      +	{"Hello, 世界!", "Hello, \x1b$B@$3&\x1b(B!", "ISO-2022-JP"},
      +	{"다음과 같은 조건을 따라야 합니다: 저작자표시", "\xb4\xd9\xc0\xbd\xb0\xfa \xb0\xb0\xc0\xba \xc1\xb6\xb0\xc7\xc0\xbb \xb5\xfb\xb6\xf3\xbe\xdf \xc7մϴ\xd9: \xc0\xfa\xc0\xdb\xc0\xdaǥ\xbd\xc3", "EUC-KR"},
      +}
      +
      +func TestDecode(t *testing.T) {
      +	testCases := append(testCases, []testCase{
      +		// Replace multi-byte maximum subpart of ill-formed subsequence with
      +		// single replacement character (WhatWG requirement).
      +		{"Rés\ufffdumé", "Rés\xe1\x80umé", "utf8"},
      +	}...)
      +	for _, tc := range testCases {
      +		e, _ := Lookup(tc.otherEncoding)
      +		if e == nil {
      +			t.Errorf("%s: not found", tc.otherEncoding)
      +			continue
      +		}
      +		s, err := transformString(e.NewDecoder(), tc.other)
      +		if err != nil {
      +			t.Errorf("%s: decode %q: %v", tc.otherEncoding, tc.other, err)
      +			continue
      +		}
      +		if s != tc.utf8 {
      +			t.Errorf("%s: got %q, want %q", tc.otherEncoding, s, tc.utf8)
      +		}
      +	}
      +}
      +
      +func TestEncode(t *testing.T) {
      +	testCases := append(testCases, []testCase{
      +		// Use Go-style replacement.
      +		{"Rés\xe1\x80umé", "Rés\ufffd\ufffdumé", "utf8"},
      +		// U+0144 LATIN SMALL LETTER N WITH ACUTE not supported by encoding.
      +		{"Gdańsk", "Gda&#324;sk", "ISO-8859-11"},
      +		{"\ufffd", "&#65533;", "ISO-8859-11"},
      +		{"a\xe1\x80b", "a&#65533;&#65533;b", "ISO-8859-11"},
      +	}...)
      +	for _, tc := range testCases {
      +		e, _ := Lookup(tc.otherEncoding)
      +		if e == nil {
      +			t.Errorf("%s: not found", tc.otherEncoding)
      +			continue
      +		}
      +		s, err := transformString(e.NewEncoder(), tc.utf8)
      +		if err != nil {
      +			t.Errorf("%s: encode %q: %s", tc.otherEncoding, tc.utf8, err)
      +			continue
      +		}
      +		if s != tc.other {
      +			t.Errorf("%s: got %q, want %q", tc.otherEncoding, s, tc.other)
      +		}
      +	}
      +}
      +
      +var sniffTestCases = []struct {
      +	filename, declared, want string
      +}{
      +	{"HTTP-charset.html", "text/html; charset=iso-8859-15", "iso-8859-15"},
      +	{"UTF-16LE-BOM.html", "", "utf-16le"},
      +	{"UTF-16BE-BOM.html", "", "utf-16be"},
      +	{"meta-content-attribute.html", "text/html", "iso-8859-15"},
      +	{"meta-charset-attribute.html", "text/html", "iso-8859-15"},
      +	{"No-encoding-declaration.html", "text/html", "utf-8"},
      +	{"HTTP-vs-UTF-8-BOM.html", "text/html; charset=iso-8859-15", "utf-8"},
      +	{"HTTP-vs-meta-content.html", "text/html; charset=iso-8859-15", "iso-8859-15"},
      +	{"HTTP-vs-meta-charset.html", "text/html; charset=iso-8859-15", "iso-8859-15"},
      +	{"UTF-8-BOM-vs-meta-content.html", "text/html", "utf-8"},
      +	{"UTF-8-BOM-vs-meta-charset.html", "text/html", "utf-8"},
      +}
      +
      +func TestSniff(t *testing.T) {
      +	switch runtime.GOOS {
      +	case "nacl": // platforms that don't permit direct file system access
      +		t.Skipf("not supported on %q", runtime.GOOS)
      +	}
      +
      +	for _, tc := range sniffTestCases {
      +		content, err := ioutil.ReadFile("testdata/" + tc.filename)
      +		if err != nil {
      +			t.Errorf("%s: error reading file: %v", tc.filename, err)
      +			continue
      +		}
      +
      +		_, name, _ := DetermineEncoding(content, tc.declared)
      +		if name != tc.want {
      +			t.Errorf("%s: got %q, want %q", tc.filename, name, tc.want)
      +			continue
      +		}
      +	}
      +}
      +
      +func TestReader(t *testing.T) {
      +	switch runtime.GOOS {
      +	case "nacl": // platforms that don't permit direct file system access
      +		t.Skipf("not supported on %q", runtime.GOOS)
      +	}
      +
      +	for _, tc := range sniffTestCases {
      +		content, err := ioutil.ReadFile("testdata/" + tc.filename)
      +		if err != nil {
      +			t.Errorf("%s: error reading file: %v", tc.filename, err)
      +			continue
      +		}
      +
      +		r, err := NewReader(bytes.NewReader(content), tc.declared)
      +		if err != nil {
      +			t.Errorf("%s: error creating reader: %v", tc.filename, err)
      +			continue
      +		}
      +
      +		got, err := ioutil.ReadAll(r)
      +		if err != nil {
      +			t.Errorf("%s: error reading from charset.NewReader: %v", tc.filename, err)
      +			continue
      +		}
      +
      +		e, _ := Lookup(tc.want)
      +		want, err := ioutil.ReadAll(transform.NewReader(bytes.NewReader(content), e.NewDecoder()))
      +		if err != nil {
      +			t.Errorf("%s: error decoding with hard-coded charset name: %v", tc.filename, err)
      +			continue
      +		}
      +
      +		if !bytes.Equal(got, want) {
      +			t.Errorf("%s: got %q, want %q", tc.filename, got, want)
      +			continue
      +		}
      +	}
      +}
      +
      +var metaTestCases = []struct {
      +	meta, want string
      +}{
      +	{"", ""},
      +	{"text/html", ""},
      +	{"text/html; charset utf-8", ""},
      +	{"text/html; charset=latin-2", "latin-2"},
      +	{"text/html; charset; charset = utf-8", "utf-8"},
      +	{`charset="big5"`, "big5"},
      +	{"charset='shift_jis'", "shift_jis"},
      +}
      +
      +func TestFromMeta(t *testing.T) {
      +	for _, tc := range metaTestCases {
      +		got := fromMetaElement(tc.meta)
      +		if got != tc.want {
      +			t.Errorf("%q: got %q, want %q", tc.meta, got, tc.want)
      +		}
      +	}
      +}
      +
      +func TestXML(t *testing.T) {
      +	const s = "<?xml version=\"1.0\" encoding=\"windows-1252\"?><a><Word>r\xe9sum\xe9</Word></a>"
      +
      +	d := xml.NewDecoder(strings.NewReader(s))
      +	d.CharsetReader = NewReaderLabel
      +
      +	var a struct {
      +		Word string
      +	}
      +	err := d.Decode(&a)
      +	if err != nil {
      +		t.Fatalf("Decode: %v", err)
      +	}
      +
      +	want := "résumé"
      +	if a.Word != want {
      +		t.Errorf("got %q, want %q", a.Word, want)
      +	}
      +}
      diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html
      new file mode 100644
      index 00000000..9915fa0e
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html
      @@ -0,0 +1,48 @@
      +<!DOCTYPE html>
      +<html  lang="en" >
      +<head>
      +  <title>HTTP charset</title>
      +<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
      +<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
      +<link rel="stylesheet" type="text/css" href="./generatedtests.css">
      +<script src="http://w3c-test.org/resources/testharness.js"></script>
      +<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
      +<meta name='flags' content='http'>
      +<meta name="assert" content="The character encoding of a page can be set using the HTTP header charset declaration.">
      +<style type='text/css'>
      +.test div { width: 50px; }</style>
      +<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
      +</head>
      +<body>
      +<p class='title'>HTTP charset</p>
      +
      +
      +<div id='log'></div>
      +
      +
      +<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
      +
      +
      +
      +
      +
      +<div class='description'>
      +<p class="assertion" title="Assertion">The character encoding of a page can be set using the HTTP header charset declaration.</p>
      +<div class="notes"><p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p><p>The only character encoding declaration for this HTML file is in the HTTP header, which sets the encoding to ISO 8859-15.</p></p>
      +</div>
      +</div>
      +<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-003">Next test</a></div><div class="doctype">HTML5</div>
      +<p class="jump">the-input-byte-stream-001<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#basics" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-001" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
      +<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
      +				<li>The test is read from a server that supports HTTP.</li></ul></div>
      +</div>
      +<script>
      +test(function() {
      +assert_equals(document.getElementById('box').offsetWidth, 100);
      +}, " ");
      +</script>
      +
      +</body>
      +</html>
      +
      +
      diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html
      new file mode 100644
      index 00000000..26e5d8b4
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html
      @@ -0,0 +1,48 @@
      +<!DOCTYPE html>
      +<html  lang="en" >
      +<head>
      +  <title>HTTP vs UTF-8 BOM</title>
      +<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
      +<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
      +<link rel="stylesheet" type="text/css" href="./generatedtests.css">
      +<script src="http://w3c-test.org/resources/testharness.js"></script>
      +<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
      +<meta name='flags' content='http'>
      +<meta name="assert" content="A character encoding set in the HTTP header has lower precedence than the UTF-8 signature.">
      +<style type='text/css'>
      +.test div { width: 50px; }</style>
      +<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-utf8.css">
      +</head>
      +<body>
      +<p class='title'>HTTP vs UTF-8 BOM</p>
      +
      +
      +<div id='log'></div>
      +
      +
      +<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
      +
      +
      +
      +
      +
      +<div class='description'>
      +<p class="assertion" title="Assertion">A character encoding set in the HTTP header has lower precedence than the UTF-8 signature.</p>
      +<div class="notes"><p><p>The HTTP header attempts to set the character encoding to ISO 8859-15. The page starts with a UTF-8 signature.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00FD;&#x00E4;&#x00E8;</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p><p>If the test is unsuccessful, the characters &#x00EF;&#x00BB;&#x00BF; should appear at the top of the page.  These represent the bytes that make up the UTF-8 signature when encountered in the ISO 8859-15 encoding.</p></p>
      +</div>
      +</div>
      +<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-022">Next test</a></div><div class="doctype">HTML5</div>
      +<p class="jump">the-input-byte-stream-034<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-034" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
      +<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
      +				<li>The test is read from a server that supports HTTP.</li></ul></div>
      +</div>
      +<script>
      +test(function() {
      +assert_equals(document.getElementById('box').offsetWidth, 100);
      +}, " ");
      +</script>
      +
      +</body>
      +</html>
      +
      +
      diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html
      new file mode 100644
      index 00000000..2f07e951
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html
      @@ -0,0 +1,49 @@
      +<!DOCTYPE html>
      +<html  lang="en" >
      +<head>
      + <meta charset="iso-8859-1" > <title>HTTP vs meta charset</title>
      +<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
      +<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
      +<link rel="stylesheet" type="text/css" href="./generatedtests.css">
      +<script src="http://w3c-test.org/resources/testharness.js"></script>
      +<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
      +<meta name='flags' content='http'>
      +<meta name="assert" content="The HTTP header has a higher precedence than an encoding declaration in a meta charset attribute.">
      +<style type='text/css'>
      +.test div { width: 50px; }.test div { width: 90px; }
      +</style>
      +<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
      +</head>
      +<body>
      +<p class='title'>HTTP vs meta charset</p>
      +
      +
      +<div id='log'></div>
      +
      +
      +<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
      +
      +
      +
      +
      +
      +<div class='description'>
      +<p class="assertion" title="Assertion">The HTTP header has a higher precedence than an encoding declaration in a meta charset attribute.</p>
      +<div class="notes"><p><p>The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-1.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>
      +</div>
      +</div>
      +<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-037">Next test</a></div><div class="doctype">HTML5</div>
      +<p class="jump">the-input-byte-stream-018<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-018" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
      +<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
      +				<li>The test is read from a server that supports HTTP.</li></ul></div>
      +</div>
      +<script>
      +test(function() {
      +assert_equals(document.getElementById('box').offsetWidth, 100);
      +}, " ");
      +</script>
      +
      +</body>
      +</html>
      +
      +
      diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html
      new file mode 100644
      index 00000000..6853cdde
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html
      @@ -0,0 +1,49 @@
      +<!DOCTYPE html>
      +<html  lang="en" >
      +<head>
      + <meta http-equiv="content-type" content="text/html;charset=iso-8859-1" > <title>HTTP vs meta content</title>
      +<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
      +<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
      +<link rel="stylesheet" type="text/css" href="./generatedtests.css">
      +<script src="http://w3c-test.org/resources/testharness.js"></script>
      +<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
      +<meta name='flags' content='http'>
      +<meta name="assert" content="The HTTP header has a higher precedence than an encoding declaration in a meta content attribute.">
      +<style type='text/css'>
      +.test div { width: 50px; }.test div { width: 90px; }
      +</style>
      +<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
      +</head>
      +<body>
      +<p class='title'>HTTP vs meta content</p>
      +
      +
      +<div id='log'></div>
      +
      +
      +<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
      +
      +
      +
      +
      +
      +<div class='description'>
      +<p class="assertion" title="Assertion">The HTTP header has a higher precedence than an encoding declaration in a meta content attribute.</p>
      +<div class="notes"><p><p>The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-1.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>
      +</div>
      +</div>
      +<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-018">Next test</a></div><div class="doctype">HTML5</div>
      +<p class="jump">the-input-byte-stream-016<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-016" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
      +<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
      +				<li>The test is read from a server that supports HTTP.</li></ul></div>
      +</div>
      +<script>
      +test(function() {
      +assert_equals(document.getElementById('box').offsetWidth, 100);
      +}, " ");
      +</script>
      +
      +</body>
      +</html>
      +
      +
      diff --git a/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html b/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html
      new file mode 100644
      index 00000000..612e26c6
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html
      @@ -0,0 +1,47 @@
      +<!DOCTYPE html>
      +<html  lang="en" >
      +<head>
      +  <title>No encoding declaration</title>
      +<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
      +<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
      +<link rel="stylesheet" type="text/css" href="./generatedtests.css">
      +<script src="http://w3c-test.org/resources/testharness.js"></script>
      +<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
      +<meta name='flags' content='http'>
      +<meta name="assert" content="A page with no encoding information in HTTP, BOM, XML declaration or meta element will be treated as UTF-8.">
      +<style type='text/css'>
      +.test div { width: 50px; }</style>
      +<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-utf8.css">
      +</head>
      +<body>
      +<p class='title'>No encoding declaration</p>
      +
      +
      +<div id='log'></div>
      +
      +
      +<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
      +
      +
      +
      +
      +
      +<div class='description'>
      +<p class="assertion" title="Assertion">A page with no encoding information in HTTP, BOM, XML declaration or meta element will be treated as UTF-8.</p>
      +<div class="notes"><p><p>The test on this page contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00FD;&#x00E4;&#x00E8;</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p></p>
      +</div>
      +</div>
      +<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-034">Next test</a></div><div class="doctype">HTML5</div>
      +<p class="jump">the-input-byte-stream-015<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#basics" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-015" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
      +<div class='prereq'>Assumptions: <ul><li>The test is read from a server that supports HTTP.</li></ul></div>
      +</div>
      +<script>
      +test(function() {
      +assert_equals(document.getElementById('box').offsetWidth, 100);
      +}, " ");
      +</script>
      +
      +</body>
      +</html>
      +
      +
      diff --git a/vendor/golang.org/x/net/html/charset/testdata/README b/vendor/golang.org/x/net/html/charset/testdata/README
      new file mode 100644
      index 00000000..38ef0f9f
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/charset/testdata/README
      @@ -0,0 +1,9 @@
      +These test cases come from
      +http://www.w3.org/International/tests/repository/html5/the-input-byte-stream/results-basics
      +
      +Distributed under both the W3C Test Suite License
      +(http://www.w3.org/Consortium/Legal/2008/04-testsuite-license)
      +and the W3C 3-clause BSD License
      +(http://www.w3.org/Consortium/Legal/2008/03-bsd-license).
      +To contribute to a W3C Test Suite, see the policies and contribution
      +forms (http://www.w3.org/2004/10/27-testcases).
      diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html
      new file mode 100644
      index 00000000..3abf7a93
      Binary files /dev/null and b/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html differ
      diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html
      new file mode 100644
      index 00000000..76254c98
      Binary files /dev/null and b/vendor/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html differ
      diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html
      new file mode 100644
      index 00000000..83de4333
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html
      @@ -0,0 +1,49 @@
      +<!DOCTYPE html>
      +<html  lang="en" >
      +<head>
      + <meta charset="iso-8859-15"> <title>UTF-8 BOM vs meta charset</title>
      +<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
      +<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
      +<link rel="stylesheet" type="text/css" href="./generatedtests.css">
      +<script src="http://w3c-test.org/resources/testharness.js"></script>
      +<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
      +<meta name='flags' content='http'>
      +<meta name="assert" content="A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta charset attribute declares a different encoding.">
      +<style type='text/css'>
      +.test div { width: 50px; }.test div { width: 90px; }
      +</style>
      +<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-utf8.css">
      +</head>
      +<body>
      +<p class='title'>UTF-8 BOM vs meta charset</p>
      +
      +
      +<div id='log'></div>
      +
      +
      +<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
      +
      +
      +
      +
      +
      +<div class='description'>
      +<p class="assertion" title="Assertion">A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta charset attribute declares a different encoding.</p>
      +<div class="notes"><p><p>The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00FD;&#x00E4;&#x00E8;</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p></p>
      +</div>
      +</div>
      +<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-024">Next test</a></div><div class="doctype">HTML5</div>
      +<p class="jump">the-input-byte-stream-038<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-038" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
      +<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
      +				<li>The test is read from a server that supports HTTP.</li></ul></div>
      +</div>
      +<script>
      +test(function() {
      +assert_equals(document.getElementById('box').offsetWidth, 100);
      +}, " ");
      +</script>
      +
      +</body>
      +</html>
      +
      +
      diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html
      new file mode 100644
      index 00000000..501aac2d
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html
      @@ -0,0 +1,48 @@
      +<!DOCTYPE html>
      +<html  lang="en" >
      +<head>
      + <meta http-equiv="content-type" content="text/html; charset=iso-8859-15"> <title>UTF-8 BOM vs meta content</title>
      +<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
      +<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
      +<link rel="stylesheet" type="text/css" href="./generatedtests.css">
      +<script src="http://w3c-test.org/resources/testharness.js"></script>
      +<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
      +<meta name='flags' content='http'>
      +<meta name="assert" content="A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta content attribute declares a different encoding.">
      +<style type='text/css'>
      +.test div { width: 50px; }</style>
      +<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-utf8.css">
      +</head>
      +<body>
      +<p class='title'>UTF-8 BOM vs meta content</p>
      +
      +
      +<div id='log'></div>
      +
      +
      +<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
      +
      +
      +
      +
      +
      +<div class='description'>
      +<p class="assertion" title="Assertion">A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta content attribute declares a different encoding.</p>
      +<div class="notes"><p><p>The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00FD;&#x00E4;&#x00E8;</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p></p>
      +</div>
      +</div>
      +<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-038">Next test</a></div><div class="doctype">HTML5</div>
      +<p class="jump">the-input-byte-stream-037<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-037" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
      +<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
      +				<li>The test is read from a server that supports HTTP.</li></ul></div>
      +</div>
      +<script>
      +test(function() {
      +assert_equals(document.getElementById('box').offsetWidth, 100);
      +}, " ");
      +</script>
      +
      +</body>
      +</html>
      +
      +
      diff --git a/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html b/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html
      new file mode 100644
      index 00000000..2d7d25ab
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html
      @@ -0,0 +1,48 @@
      +<!DOCTYPE html>
      +<html  lang="en" >
      +<head>
      + <meta charset="iso-8859-15"> <title>meta charset attribute</title>
      +<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
      +<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
      +<link rel="stylesheet" type="text/css" href="./generatedtests.css">
      +<script src="http://w3c-test.org/resources/testharness.js"></script>
      +<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
      +<meta name='flags' content='http'>
      +<meta name="assert" content="The character encoding of the page can be set by a meta element with charset attribute.">
      +<style type='text/css'>
      +.test div { width: 50px; }</style>
      +<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
      +</head>
      +<body>
      +<p class='title'>meta charset attribute</p>
      +
      +
      +<div id='log'></div>
      +
      +
      +<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
      +
      +
      +
      +
      +
      +<div class='description'>
      +<p class="assertion" title="Assertion">The character encoding of the page can be set by a meta element with charset attribute.</p>
      +<div class="notes"><p><p>The only character encoding declaration for this HTML file is in the charset attribute of the meta element, which declares the encoding to be ISO 8859-15.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>
      +</div>
      +</div>
      +<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-015">Next test</a></div><div class="doctype">HTML5</div>
      +<p class="jump">the-input-byte-stream-009<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#basics" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-009" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
      +<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
      +				<li>The test is read from a server that supports HTTP.</li></ul></div>
      +</div>
      +<script>
      +test(function() {
      +assert_equals(document.getElementById('box').offsetWidth, 100);
      +}, " ");
      +</script>
      +
      +</body>
      +</html>
      +
      +
      diff --git a/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html b/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html
      new file mode 100644
      index 00000000..1c3f228e
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html
      @@ -0,0 +1,48 @@
      +<!DOCTYPE html>
      +<html  lang="en" >
      +<head>
      + <meta http-equiv="content-type" content="text/html; charset=iso-8859-15"> <title>meta content attribute</title>
      +<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
      +<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
      +<link rel="stylesheet" type="text/css" href="./generatedtests.css">
      +<script src="http://w3c-test.org/resources/testharness.js"></script>
      +<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
      +<meta name='flags' content='http'>
      +<meta name="assert" content="The character encoding of the page can be set by a meta element with http-equiv and content attributes.">
      +<style type='text/css'>
      +.test div { width: 50px; }</style>
      +<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
      +</head>
      +<body>
      +<p class='title'>meta content attribute</p>
      +
      +
      +<div id='log'></div>
      +
      +
      +<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
      +
      +
      +
      +
      +
      +<div class='description'>
      +<p class="assertion" title="Assertion">The character encoding of the page can be set by a meta element with http-equiv and content attributes.</p>
      +<div class="notes"><p><p>The only character encoding declaration for this HTML file is in the content attribute of the meta element, which declares the encoding to be ISO 8859-15.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>
      +</div>
      +</div>
      +<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-009">Next test</a></div><div class="doctype">HTML5</div>
      +<p class="jump">the-input-byte-stream-007<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#basics" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-007" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
      +<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
      +				<li>The test is read from a server that supports HTTP.</li></ul></div>
      +</div>
      +<script>
      +test(function() {
      +assert_equals(document.getElementById('box').offsetWidth, 100);
      +}, " ");
      +</script>
      +
      +</body>
      +</html>
      +
      +
      diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go
      new file mode 100644
      index 00000000..52f651ff
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/const.go
      @@ -0,0 +1,102 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package html
      +
      +// Section 12.2.3.2 of the HTML5 specification says "The following elements
      +// have varying levels of special parsing rules".
      +// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements
      +var isSpecialElementMap = map[string]bool{
      +	"address":    true,
      +	"applet":     true,
      +	"area":       true,
      +	"article":    true,
      +	"aside":      true,
      +	"base":       true,
      +	"basefont":   true,
      +	"bgsound":    true,
      +	"blockquote": true,
      +	"body":       true,
      +	"br":         true,
      +	"button":     true,
      +	"caption":    true,
      +	"center":     true,
      +	"col":        true,
      +	"colgroup":   true,
      +	"dd":         true,
      +	"details":    true,
      +	"dir":        true,
      +	"div":        true,
      +	"dl":         true,
      +	"dt":         true,
      +	"embed":      true,
      +	"fieldset":   true,
      +	"figcaption": true,
      +	"figure":     true,
      +	"footer":     true,
      +	"form":       true,
      +	"frame":      true,
      +	"frameset":   true,
      +	"h1":         true,
      +	"h2":         true,
      +	"h3":         true,
      +	"h4":         true,
      +	"h5":         true,
      +	"h6":         true,
      +	"head":       true,
      +	"header":     true,
      +	"hgroup":     true,
      +	"hr":         true,
      +	"html":       true,
      +	"iframe":     true,
      +	"img":        true,
      +	"input":      true,
      +	"isindex":    true,
      +	"li":         true,
      +	"link":       true,
      +	"listing":    true,
      +	"marquee":    true,
      +	"menu":       true,
      +	"meta":       true,
      +	"nav":        true,
      +	"noembed":    true,
      +	"noframes":   true,
      +	"noscript":   true,
      +	"object":     true,
      +	"ol":         true,
      +	"p":          true,
      +	"param":      true,
      +	"plaintext":  true,
      +	"pre":        true,
      +	"script":     true,
      +	"section":    true,
      +	"select":     true,
      +	"source":     true,
      +	"style":      true,
      +	"summary":    true,
      +	"table":      true,
      +	"tbody":      true,
      +	"td":         true,
      +	"template":   true,
      +	"textarea":   true,
      +	"tfoot":      true,
      +	"th":         true,
      +	"thead":      true,
      +	"title":      true,
      +	"tr":         true,
      +	"track":      true,
      +	"ul":         true,
      +	"wbr":        true,
      +	"xmp":        true,
      +}
      +
      +func isSpecialElement(element *Node) bool {
      +	switch element.Namespace {
      +	case "", "html":
      +		return isSpecialElementMap[element.Data]
      +	case "svg":
      +		return element.Data == "foreignObject"
      +	}
      +	return false
      +}
      diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go
      new file mode 100644
      index 00000000..94f49687
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/doc.go
      @@ -0,0 +1,106 @@
      +// Copyright 2010 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +/*
      +Package html implements an HTML5-compliant tokenizer and parser.
      +
      +Tokenization is done by creating a Tokenizer for an io.Reader r. It is the
      +caller's responsibility to ensure that r provides UTF-8 encoded HTML.
      +
      +	z := html.NewTokenizer(r)
      +
      +Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(),
      +which parses the next token and returns its type, or an error:
      +
      +	for {
      +		tt := z.Next()
      +		if tt == html.ErrorToken {
      +			// ...
      +			return ...
      +		}
      +		// Process the current token.
      +	}
      +
      +There are two APIs for retrieving the current token. The high-level API is to
      +call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs
      +allow optionally calling Raw after Next but before Token, Text, TagName, or
      +TagAttr. In EBNF notation, the valid call sequence per token is:
      +
      +	Next {Raw} [ Token | Text | TagName {TagAttr} ]
      +
      +Token returns an independent data structure that completely describes a token.
      +Entities (such as "&lt;") are unescaped, tag names and attribute keys are
      +lower-cased, and attributes are collected into a []Attribute. For example:
      +
      +	for {
      +		if z.Next() == html.ErrorToken {
      +			// Returning io.EOF indicates success.
      +			return z.Err()
      +		}
      +		emitToken(z.Token())
      +	}
      +
      +The low-level API performs fewer allocations and copies, but the contents of
      +the []byte values returned by Text, TagName and TagAttr may change on the next
      +call to Next. For example, to extract an HTML page's anchor text:
      +
      +	depth := 0
      +	for {
      +		tt := z.Next()
      +		switch tt {
      +		case ErrorToken:
      +			return z.Err()
      +		case TextToken:
      +			if depth > 0 {
      +				// emitBytes should copy the []byte it receives,
      +				// if it doesn't process it immediately.
      +				emitBytes(z.Text())
      +			}
      +		case StartTagToken, EndTagToken:
      +			tn, _ := z.TagName()
      +			if len(tn) == 1 && tn[0] == 'a' {
      +				if tt == StartTagToken {
      +					depth++
      +				} else {
      +					depth--
      +				}
      +			}
      +		}
      +	}
      +
      +Parsing is done by calling Parse with an io.Reader, which returns the root of
      +the parse tree (the document element) as a *Node. It is the caller's
      +responsibility to ensure that the Reader provides UTF-8 encoded HTML. For
      +example, to process each anchor node in depth-first order:
      +
      +	doc, err := html.Parse(r)
      +	if err != nil {
      +		// ...
      +	}
      +	var f func(*html.Node)
      +	f = func(n *html.Node) {
      +		if n.Type == html.ElementNode && n.Data == "a" {
      +			// Do something with n...
      +		}
      +		for c := n.FirstChild; c != nil; c = c.NextSibling {
      +			f(c)
      +		}
      +	}
      +	f(doc)
      +
      +The relevant specifications include:
      +https://html.spec.whatwg.org/multipage/syntax.html and
      +https://html.spec.whatwg.org/multipage/syntax.html#tokenization
      +*/
      +package html // import "golang.org/x/net/html"
      +
      +// The tokenization algorithm implemented by this package is not a line-by-line
      +// transliteration of the relatively verbose state-machine in the WHATWG
      +// specification. A more direct approach is used instead, where the program
      +// counter implies the state, such as whether it is tokenizing a tag or a text
      +// node. Specification compliance is verified by checking expected and actual
      +// outputs over a test suite rather than aiming for algorithmic fidelity.
      +
      +// TODO(nigeltao): Does a DOM API belong in this package or a separate one?
      +// TODO(nigeltao): How does parsing interact with a JavaScript engine?
      diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go
      new file mode 100644
      index 00000000..c484e5a9
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/doctype.go
      @@ -0,0 +1,156 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package html
      +
      +import (
      +	"strings"
      +)
      +
      +// parseDoctype parses the data from a DoctypeToken into a name,
      +// public identifier, and system identifier. It returns a Node whose Type
      +// is DoctypeNode, whose Data is the name, and which has attributes
      +// named "system" and "public" for the two identifiers if they were present.
      +// quirks is whether the document should be parsed in "quirks mode".
      +func parseDoctype(s string) (n *Node, quirks bool) {
      +	n = &Node{Type: DoctypeNode}
      +
      +	// Find the name.
      +	space := strings.IndexAny(s, whitespace)
      +	if space == -1 {
      +		space = len(s)
      +	}
      +	n.Data = s[:space]
      +	// The comparison to "html" is case-sensitive.
      +	if n.Data != "html" {
      +		quirks = true
      +	}
      +	n.Data = strings.ToLower(n.Data)
      +	s = strings.TrimLeft(s[space:], whitespace)
      +
      +	if len(s) < 6 {
      +		// It can't start with "PUBLIC" or "SYSTEM".
      +		// Ignore the rest of the string.
      +		return n, quirks || s != ""
      +	}
      +
      +	key := strings.ToLower(s[:6])
      +	s = s[6:]
      +	for key == "public" || key == "system" {
      +		s = strings.TrimLeft(s, whitespace)
      +		if s == "" {
      +			break
      +		}
      +		quote := s[0]
      +		if quote != '"' && quote != '\'' {
      +			break
      +		}
      +		s = s[1:]
      +		q := strings.IndexRune(s, rune(quote))
      +		var id string
      +		if q == -1 {
      +			id = s
      +			s = ""
      +		} else {
      +			id = s[:q]
      +			s = s[q+1:]
      +		}
      +		n.Attr = append(n.Attr, Attribute{Key: key, Val: id})
      +		if key == "public" {
      +			key = "system"
      +		} else {
      +			key = ""
      +		}
      +	}
      +
      +	if key != "" || s != "" {
      +		quirks = true
      +	} else if len(n.Attr) > 0 {
      +		if n.Attr[0].Key == "public" {
      +			public := strings.ToLower(n.Attr[0].Val)
      +			switch public {
      +			case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html":
      +				quirks = true
      +			default:
      +				for _, q := range quirkyIDs {
      +					if strings.HasPrefix(public, q) {
      +						quirks = true
      +						break
      +					}
      +				}
      +			}
      +			// The following two public IDs only cause quirks mode if there is no system ID.
      +			if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") ||
      +				strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) {
      +				quirks = true
      +			}
      +		}
      +		if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" &&
      +			strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" {
      +			quirks = true
      +		}
      +	}
      +
      +	return n, quirks
      +}
      +
      +// quirkyIDs is a list of public doctype identifiers that cause a document
      +// to be interpreted in quirks mode. The identifiers should be in lower case.
      +var quirkyIDs = []string{
      +	"+//silmaril//dtd html pro v0r11 19970101//",
      +	"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
      +	"-//as//dtd html 3.0 aswedit + extensions//",
      +	"-//ietf//dtd html 2.0 level 1//",
      +	"-//ietf//dtd html 2.0 level 2//",
      +	"-//ietf//dtd html 2.0 strict level 1//",
      +	"-//ietf//dtd html 2.0 strict level 2//",
      +	"-//ietf//dtd html 2.0 strict//",
      +	"-//ietf//dtd html 2.0//",
      +	"-//ietf//dtd html 2.1e//",
      +	"-//ietf//dtd html 3.0//",
      +	"-//ietf//dtd html 3.2 final//",
      +	"-//ietf//dtd html 3.2//",
      +	"-//ietf//dtd html 3//",
      +	"-//ietf//dtd html level 0//",
      +	"-//ietf//dtd html level 1//",
      +	"-//ietf//dtd html level 2//",
      +	"-//ietf//dtd html level 3//",
      +	"-//ietf//dtd html strict level 0//",
      +	"-//ietf//dtd html strict level 1//",
      +	"-//ietf//dtd html strict level 2//",
      +	"-//ietf//dtd html strict level 3//",
      +	"-//ietf//dtd html strict//",
      +	"-//ietf//dtd html//",
      +	"-//metrius//dtd metrius presentational//",
      +	"-//microsoft//dtd internet explorer 2.0 html strict//",
      +	"-//microsoft//dtd internet explorer 2.0 html//",
      +	"-//microsoft//dtd internet explorer 2.0 tables//",
      +	"-//microsoft//dtd internet explorer 3.0 html strict//",
      +	"-//microsoft//dtd internet explorer 3.0 html//",
      +	"-//microsoft//dtd internet explorer 3.0 tables//",
      +	"-//netscape comm. corp.//dtd html//",
      +	"-//netscape comm. corp.//dtd strict html//",
      +	"-//o'reilly and associates//dtd html 2.0//",
      +	"-//o'reilly and associates//dtd html extended 1.0//",
      +	"-//o'reilly and associates//dtd html extended relaxed 1.0//",
      +	"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
      +	"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
      +	"-//spyglass//dtd html 2.0 extended//",
      +	"-//sq//dtd html 2.0 hotmetal + extensions//",
      +	"-//sun microsystems corp.//dtd hotjava html//",
      +	"-//sun microsystems corp.//dtd hotjava strict html//",
      +	"-//w3c//dtd html 3 1995-03-24//",
      +	"-//w3c//dtd html 3.2 draft//",
      +	"-//w3c//dtd html 3.2 final//",
      +	"-//w3c//dtd html 3.2//",
      +	"-//w3c//dtd html 3.2s draft//",
      +	"-//w3c//dtd html 4.0 frameset//",
      +	"-//w3c//dtd html 4.0 transitional//",
      +	"-//w3c//dtd html experimental 19960712//",
      +	"-//w3c//dtd html experimental 970421//",
      +	"-//w3c//dtd w3 html//",
      +	"-//w3o//dtd w3 html 3.0//",
      +	"-//webtechs//dtd mozilla html 2.0//",
      +	"-//webtechs//dtd mozilla html//",
      +}
      diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go
      new file mode 100644
      index 00000000..a50c04c6
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/entity.go
      @@ -0,0 +1,2253 @@
      +// Copyright 2010 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package html
      +
      +// All entities that do not end with ';' are 6 or fewer bytes long.
      +const longestEntityWithoutSemicolon = 6
      +
      +// entity is a map from HTML entity names to their values. The semicolon matters:
      +// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references
      +// lists both "amp" and "amp;" as two separate entries.
      +//
      +// Note that the HTML5 list is larger than the HTML4 list at
      +// http://www.w3.org/TR/html4/sgml/entities.html
      +var entity = map[string]rune{
      +	"AElig;":                           '\U000000C6',
      +	"AMP;":                             '\U00000026',
      +	"Aacute;":                          '\U000000C1',
      +	"Abreve;":                          '\U00000102',
      +	"Acirc;":                           '\U000000C2',
      +	"Acy;":                             '\U00000410',
      +	"Afr;":                             '\U0001D504',
      +	"Agrave;":                          '\U000000C0',
      +	"Alpha;":                           '\U00000391',
      +	"Amacr;":                           '\U00000100',
      +	"And;":                             '\U00002A53',
      +	"Aogon;":                           '\U00000104',
      +	"Aopf;":                            '\U0001D538',
      +	"ApplyFunction;":                   '\U00002061',
      +	"Aring;":                           '\U000000C5',
      +	"Ascr;":                            '\U0001D49C',
      +	"Assign;":                          '\U00002254',
      +	"Atilde;":                          '\U000000C3',
      +	"Auml;":                            '\U000000C4',
      +	"Backslash;":                       '\U00002216',
      +	"Barv;":                            '\U00002AE7',
      +	"Barwed;":                          '\U00002306',
      +	"Bcy;":                             '\U00000411',
      +	"Because;":                         '\U00002235',
      +	"Bernoullis;":                      '\U0000212C',
      +	"Beta;":                            '\U00000392',
      +	"Bfr;":                             '\U0001D505',
      +	"Bopf;":                            '\U0001D539',
      +	"Breve;":                           '\U000002D8',
      +	"Bscr;":                            '\U0000212C',
      +	"Bumpeq;":                          '\U0000224E',
      +	"CHcy;":                            '\U00000427',
      +	"COPY;":                            '\U000000A9',
      +	"Cacute;":                          '\U00000106',
      +	"Cap;":                             '\U000022D2',
      +	"CapitalDifferentialD;":            '\U00002145',
      +	"Cayleys;":                         '\U0000212D',
      +	"Ccaron;":                          '\U0000010C',
      +	"Ccedil;":                          '\U000000C7',
      +	"Ccirc;":                           '\U00000108',
      +	"Cconint;":                         '\U00002230',
      +	"Cdot;":                            '\U0000010A',
      +	"Cedilla;":                         '\U000000B8',
      +	"CenterDot;":                       '\U000000B7',
      +	"Cfr;":                             '\U0000212D',
      +	"Chi;":                             '\U000003A7',
      +	"CircleDot;":                       '\U00002299',
      +	"CircleMinus;":                     '\U00002296',
      +	"CirclePlus;":                      '\U00002295',
      +	"CircleTimes;":                     '\U00002297',
      +	"ClockwiseContourIntegral;":        '\U00002232',
      +	"CloseCurlyDoubleQuote;":           '\U0000201D',
      +	"CloseCurlyQuote;":                 '\U00002019',
      +	"Colon;":                           '\U00002237',
      +	"Colone;":                          '\U00002A74',
      +	"Congruent;":                       '\U00002261',
      +	"Conint;":                          '\U0000222F',
      +	"ContourIntegral;":                 '\U0000222E',
      +	"Copf;":                            '\U00002102',
      +	"Coproduct;":                       '\U00002210',
      +	"CounterClockwiseContourIntegral;": '\U00002233',
      +	"Cross;":                    '\U00002A2F',
      +	"Cscr;":                     '\U0001D49E',
      +	"Cup;":                      '\U000022D3',
      +	"CupCap;":                   '\U0000224D',
      +	"DD;":                       '\U00002145',
      +	"DDotrahd;":                 '\U00002911',
      +	"DJcy;":                     '\U00000402',
      +	"DScy;":                     '\U00000405',
      +	"DZcy;":                     '\U0000040F',
      +	"Dagger;":                   '\U00002021',
      +	"Darr;":                     '\U000021A1',
      +	"Dashv;":                    '\U00002AE4',
      +	"Dcaron;":                   '\U0000010E',
      +	"Dcy;":                      '\U00000414',
      +	"Del;":                      '\U00002207',
      +	"Delta;":                    '\U00000394',
      +	"Dfr;":                      '\U0001D507',
      +	"DiacriticalAcute;":         '\U000000B4',
      +	"DiacriticalDot;":           '\U000002D9',
      +	"DiacriticalDoubleAcute;":   '\U000002DD',
      +	"DiacriticalGrave;":         '\U00000060',
      +	"DiacriticalTilde;":         '\U000002DC',
      +	"Diamond;":                  '\U000022C4',
      +	"DifferentialD;":            '\U00002146',
      +	"Dopf;":                     '\U0001D53B',
      +	"Dot;":                      '\U000000A8',
      +	"DotDot;":                   '\U000020DC',
      +	"DotEqual;":                 '\U00002250',
      +	"DoubleContourIntegral;":    '\U0000222F',
      +	"DoubleDot;":                '\U000000A8',
      +	"DoubleDownArrow;":          '\U000021D3',
      +	"DoubleLeftArrow;":          '\U000021D0',
      +	"DoubleLeftRightArrow;":     '\U000021D4',
      +	"DoubleLeftTee;":            '\U00002AE4',
      +	"DoubleLongLeftArrow;":      '\U000027F8',
      +	"DoubleLongLeftRightArrow;": '\U000027FA',
      +	"DoubleLongRightArrow;":     '\U000027F9',
      +	"DoubleRightArrow;":         '\U000021D2',
      +	"DoubleRightTee;":           '\U000022A8',
      +	"DoubleUpArrow;":            '\U000021D1',
      +	"DoubleUpDownArrow;":        '\U000021D5',
      +	"DoubleVerticalBar;":        '\U00002225',
      +	"DownArrow;":                '\U00002193',
      +	"DownArrowBar;":             '\U00002913',
      +	"DownArrowUpArrow;":         '\U000021F5',
      +	"DownBreve;":                '\U00000311',
      +	"DownLeftRightVector;":      '\U00002950',
      +	"DownLeftTeeVector;":        '\U0000295E',
      +	"DownLeftVector;":           '\U000021BD',
      +	"DownLeftVectorBar;":        '\U00002956',
      +	"DownRightTeeVector;":       '\U0000295F',
      +	"DownRightVector;":          '\U000021C1',
      +	"DownRightVectorBar;":       '\U00002957',
      +	"DownTee;":                  '\U000022A4',
      +	"DownTeeArrow;":             '\U000021A7',
      +	"Downarrow;":                '\U000021D3',
      +	"Dscr;":                     '\U0001D49F',
      +	"Dstrok;":                   '\U00000110',
      +	"ENG;":                      '\U0000014A',
      +	"ETH;":                      '\U000000D0',
      +	"Eacute;":                   '\U000000C9',
      +	"Ecaron;":                   '\U0000011A',
      +	"Ecirc;":                    '\U000000CA',
      +	"Ecy;":                      '\U0000042D',
      +	"Edot;":                     '\U00000116',
      +	"Efr;":                      '\U0001D508',
      +	"Egrave;":                   '\U000000C8',
      +	"Element;":                  '\U00002208',
      +	"Emacr;":                    '\U00000112',
      +	"EmptySmallSquare;":         '\U000025FB',
      +	"EmptyVerySmallSquare;":     '\U000025AB',
      +	"Eogon;":                    '\U00000118',
      +	"Eopf;":                     '\U0001D53C',
      +	"Epsilon;":                  '\U00000395',
      +	"Equal;":                    '\U00002A75',
      +	"EqualTilde;":               '\U00002242',
      +	"Equilibrium;":              '\U000021CC',
      +	"Escr;":                     '\U00002130',
      +	"Esim;":                     '\U00002A73',
      +	"Eta;":                      '\U00000397',
      +	"Euml;":                     '\U000000CB',
      +	"Exists;":                   '\U00002203',
      +	"ExponentialE;":             '\U00002147',
      +	"Fcy;":                      '\U00000424',
      +	"Ffr;":                      '\U0001D509',
      +	"FilledSmallSquare;":        '\U000025FC',
      +	"FilledVerySmallSquare;":    '\U000025AA',
      +	"Fopf;":                     '\U0001D53D',
      +	"ForAll;":                   '\U00002200',
      +	"Fouriertrf;":               '\U00002131',
      +	"Fscr;":                     '\U00002131',
      +	"GJcy;":                     '\U00000403',
      +	"GT;":                       '\U0000003E',
      +	"Gamma;":                    '\U00000393',
      +	"Gammad;":                   '\U000003DC',
      +	"Gbreve;":                   '\U0000011E',
      +	"Gcedil;":                   '\U00000122',
      +	"Gcirc;":                    '\U0000011C',
      +	"Gcy;":                      '\U00000413',
      +	"Gdot;":                     '\U00000120',
      +	"Gfr;":                      '\U0001D50A',
      +	"Gg;":                       '\U000022D9',
      +	"Gopf;":                     '\U0001D53E',
      +	"GreaterEqual;":             '\U00002265',
      +	"GreaterEqualLess;":         '\U000022DB',
      +	"GreaterFullEqual;":         '\U00002267',
      +	"GreaterGreater;":           '\U00002AA2',
      +	"GreaterLess;":              '\U00002277',
      +	"GreaterSlantEqual;":        '\U00002A7E',
      +	"GreaterTilde;":             '\U00002273',
      +	"Gscr;":                     '\U0001D4A2',
      +	"Gt;":                       '\U0000226B',
      +	"HARDcy;":                   '\U0000042A',
      +	"Hacek;":                    '\U000002C7',
      +	"Hat;":                      '\U0000005E',
      +	"Hcirc;":                    '\U00000124',
      +	"Hfr;":                      '\U0000210C',
      +	"HilbertSpace;":             '\U0000210B',
      +	"Hopf;":                     '\U0000210D',
      +	"HorizontalLine;":           '\U00002500',
      +	"Hscr;":                     '\U0000210B',
      +	"Hstrok;":                   '\U00000126',
      +	"HumpDownHump;":             '\U0000224E',
      +	"HumpEqual;":                '\U0000224F',
      +	"IEcy;":                     '\U00000415',
      +	"IJlig;":                    '\U00000132',
      +	"IOcy;":                     '\U00000401',
      +	"Iacute;":                   '\U000000CD',
      +	"Icirc;":                    '\U000000CE',
      +	"Icy;":                      '\U00000418',
      +	"Idot;":                     '\U00000130',
      +	"Ifr;":                      '\U00002111',
      +	"Igrave;":                   '\U000000CC',
      +	"Im;":                       '\U00002111',
      +	"Imacr;":                    '\U0000012A',
      +	"ImaginaryI;":               '\U00002148',
      +	"Implies;":                  '\U000021D2',
      +	"Int;":                      '\U0000222C',
      +	"Integral;":                 '\U0000222B',
      +	"Intersection;":             '\U000022C2',
      +	"InvisibleComma;":           '\U00002063',
      +	"InvisibleTimes;":           '\U00002062',
      +	"Iogon;":                    '\U0000012E',
      +	"Iopf;":                     '\U0001D540',
      +	"Iota;":                     '\U00000399',
      +	"Iscr;":                     '\U00002110',
      +	"Itilde;":                   '\U00000128',
      +	"Iukcy;":                    '\U00000406',
      +	"Iuml;":                     '\U000000CF',
      +	"Jcirc;":                    '\U00000134',
      +	"Jcy;":                      '\U00000419',
      +	"Jfr;":                      '\U0001D50D',
      +	"Jopf;":                     '\U0001D541',
      +	"Jscr;":                     '\U0001D4A5',
      +	"Jsercy;":                   '\U00000408',
      +	"Jukcy;":                    '\U00000404',
      +	"KHcy;":                     '\U00000425',
      +	"KJcy;":                     '\U0000040C',
      +	"Kappa;":                    '\U0000039A',
      +	"Kcedil;":                   '\U00000136',
      +	"Kcy;":                      '\U0000041A',
      +	"Kfr;":                      '\U0001D50E',
      +	"Kopf;":                     '\U0001D542',
      +	"Kscr;":                     '\U0001D4A6',
      +	"LJcy;":                     '\U00000409',
      +	"LT;":                       '\U0000003C',
      +	"Lacute;":                   '\U00000139',
      +	"Lambda;":                   '\U0000039B',
      +	"Lang;":                     '\U000027EA',
      +	"Laplacetrf;":               '\U00002112',
      +	"Larr;":                     '\U0000219E',
      +	"Lcaron;":                   '\U0000013D',
      +	"Lcedil;":                   '\U0000013B',
      +	"Lcy;":                      '\U0000041B',
      +	"LeftAngleBracket;":         '\U000027E8',
      +	"LeftArrow;":                '\U00002190',
      +	"LeftArrowBar;":             '\U000021E4',
      +	"LeftArrowRightArrow;":      '\U000021C6',
      +	"LeftCeiling;":              '\U00002308',
      +	"LeftDoubleBracket;":        '\U000027E6',
      +	"LeftDownTeeVector;":        '\U00002961',
      +	"LeftDownVector;":           '\U000021C3',
      +	"LeftDownVectorBar;":        '\U00002959',
      +	"LeftFloor;":                '\U0000230A',
      +	"LeftRightArrow;":           '\U00002194',
      +	"LeftRightVector;":          '\U0000294E',
      +	"LeftTee;":                  '\U000022A3',
      +	"LeftTeeArrow;":             '\U000021A4',
      +	"LeftTeeVector;":            '\U0000295A',
      +	"LeftTriangle;":             '\U000022B2',
      +	"LeftTriangleBar;":          '\U000029CF',
      +	"LeftTriangleEqual;":        '\U000022B4',
      +	"LeftUpDownVector;":         '\U00002951',
      +	"LeftUpTeeVector;":          '\U00002960',
      +	"LeftUpVector;":             '\U000021BF',
      +	"LeftUpVectorBar;":          '\U00002958',
      +	"LeftVector;":               '\U000021BC',
      +	"LeftVectorBar;":            '\U00002952',
      +	"Leftarrow;":                '\U000021D0',
      +	"Leftrightarrow;":           '\U000021D4',
      +	"LessEqualGreater;":         '\U000022DA',
      +	"LessFullEqual;":            '\U00002266',
      +	"LessGreater;":              '\U00002276',
      +	"LessLess;":                 '\U00002AA1',
      +	"LessSlantEqual;":           '\U00002A7D',
      +	"LessTilde;":                '\U00002272',
      +	"Lfr;":                      '\U0001D50F',
      +	"Ll;":                       '\U000022D8',
      +	"Lleftarrow;":               '\U000021DA',
      +	"Lmidot;":                   '\U0000013F',
      +	"LongLeftArrow;":            '\U000027F5',
      +	"LongLeftRightArrow;":       '\U000027F7',
      +	"LongRightArrow;":           '\U000027F6',
      +	"Longleftarrow;":            '\U000027F8',
      +	"Longleftrightarrow;":       '\U000027FA',
      +	"Longrightarrow;":           '\U000027F9',
      +	"Lopf;":                     '\U0001D543',
      +	"LowerLeftArrow;":           '\U00002199',
      +	"LowerRightArrow;":          '\U00002198',
      +	"Lscr;":                     '\U00002112',
      +	"Lsh;":                      '\U000021B0',
      +	"Lstrok;":                   '\U00000141',
      +	"Lt;":                       '\U0000226A',
      +	"Map;":                      '\U00002905',
      +	"Mcy;":                      '\U0000041C',
      +	"MediumSpace;":              '\U0000205F',
      +	"Mellintrf;":                '\U00002133',
      +	"Mfr;":                      '\U0001D510',
      +	"MinusPlus;":                '\U00002213',
      +	"Mopf;":                     '\U0001D544',
      +	"Mscr;":                     '\U00002133',
      +	"Mu;":                       '\U0000039C',
      +	"NJcy;":                     '\U0000040A',
      +	"Nacute;":                   '\U00000143',
      +	"Ncaron;":                   '\U00000147',
      +	"Ncedil;":                   '\U00000145',
      +	"Ncy;":                      '\U0000041D',
      +	"NegativeMediumSpace;":      '\U0000200B',
      +	"NegativeThickSpace;":       '\U0000200B',
      +	"NegativeThinSpace;":        '\U0000200B',
      +	"NegativeVeryThinSpace;":    '\U0000200B',
      +	"NestedGreaterGreater;":     '\U0000226B',
      +	"NestedLessLess;":           '\U0000226A',
      +	"NewLine;":                  '\U0000000A',
      +	"Nfr;":                      '\U0001D511',
      +	"NoBreak;":                  '\U00002060',
      +	"NonBreakingSpace;":         '\U000000A0',
      +	"Nopf;":                     '\U00002115',
      +	"Not;":                      '\U00002AEC',
      +	"NotCongruent;":             '\U00002262',
      +	"NotCupCap;":                '\U0000226D',
      +	"NotDoubleVerticalBar;":     '\U00002226',
      +	"NotElement;":               '\U00002209',
      +	"NotEqual;":                 '\U00002260',
      +	"NotExists;":                '\U00002204',
      +	"NotGreater;":               '\U0000226F',
      +	"NotGreaterEqual;":          '\U00002271',
      +	"NotGreaterLess;":           '\U00002279',
      +	"NotGreaterTilde;":          '\U00002275',
      +	"NotLeftTriangle;":          '\U000022EA',
      +	"NotLeftTriangleEqual;":     '\U000022EC',
      +	"NotLess;":                  '\U0000226E',
      +	"NotLessEqual;":             '\U00002270',
      +	"NotLessGreater;":           '\U00002278',
      +	"NotLessTilde;":             '\U00002274',
      +	"NotPrecedes;":              '\U00002280',
      +	"NotPrecedesSlantEqual;":    '\U000022E0',
      +	"NotReverseElement;":        '\U0000220C',
      +	"NotRightTriangle;":         '\U000022EB',
      +	"NotRightTriangleEqual;":    '\U000022ED',
      +	"NotSquareSubsetEqual;":     '\U000022E2',
      +	"NotSquareSupersetEqual;":   '\U000022E3',
      +	"NotSubsetEqual;":           '\U00002288',
      +	"NotSucceeds;":              '\U00002281',
      +	"NotSucceedsSlantEqual;":    '\U000022E1',
      +	"NotSupersetEqual;":         '\U00002289',
      +	"NotTilde;":                 '\U00002241',
      +	"NotTildeEqual;":            '\U00002244',
      +	"NotTildeFullEqual;":        '\U00002247',
      +	"NotTildeTilde;":            '\U00002249',
      +	"NotVerticalBar;":           '\U00002224',
      +	"Nscr;":                     '\U0001D4A9',
      +	"Ntilde;":                   '\U000000D1',
      +	"Nu;":                       '\U0000039D',
      +	"OElig;":                    '\U00000152',
      +	"Oacute;":                   '\U000000D3',
      +	"Ocirc;":                    '\U000000D4',
      +	"Ocy;":                      '\U0000041E',
      +	"Odblac;":                   '\U00000150',
      +	"Ofr;":                      '\U0001D512',
      +	"Ograve;":                   '\U000000D2',
      +	"Omacr;":                    '\U0000014C',
      +	"Omega;":                    '\U000003A9',
      +	"Omicron;":                  '\U0000039F',
      +	"Oopf;":                     '\U0001D546',
      +	"OpenCurlyDoubleQuote;":     '\U0000201C',
      +	"OpenCurlyQuote;":           '\U00002018',
      +	"Or;":                       '\U00002A54',
      +	"Oscr;":                     '\U0001D4AA',
      +	"Oslash;":                   '\U000000D8',
      +	"Otilde;":                   '\U000000D5',
      +	"Otimes;":                   '\U00002A37',
      +	"Ouml;":                     '\U000000D6',
      +	"OverBar;":                  '\U0000203E',
      +	"OverBrace;":                '\U000023DE',
      +	"OverBracket;":              '\U000023B4',
      +	"OverParenthesis;":          '\U000023DC',
      +	"PartialD;":                 '\U00002202',
      +	"Pcy;":                      '\U0000041F',
      +	"Pfr;":                      '\U0001D513',
      +	"Phi;":                      '\U000003A6',
      +	"Pi;":                       '\U000003A0',
      +	"PlusMinus;":                '\U000000B1',
      +	"Poincareplane;":            '\U0000210C',
      +	"Popf;":                     '\U00002119',
      +	"Pr;":                       '\U00002ABB',
      +	"Precedes;":                 '\U0000227A',
      +	"PrecedesEqual;":            '\U00002AAF',
      +	"PrecedesSlantEqual;":       '\U0000227C',
      +	"PrecedesTilde;":            '\U0000227E',
      +	"Prime;":                    '\U00002033',
      +	"Product;":                  '\U0000220F',
      +	"Proportion;":               '\U00002237',
      +	"Proportional;":             '\U0000221D',
      +	"Pscr;":                     '\U0001D4AB',
      +	"Psi;":                      '\U000003A8',
      +	"QUOT;":                     '\U00000022',
      +	"Qfr;":                      '\U0001D514',
      +	"Qopf;":                     '\U0000211A',
      +	"Qscr;":                     '\U0001D4AC',
      +	"RBarr;":                    '\U00002910',
      +	"REG;":                      '\U000000AE',
      +	"Racute;":                   '\U00000154',
      +	"Rang;":                     '\U000027EB',
      +	"Rarr;":                     '\U000021A0',
      +	"Rarrtl;":                   '\U00002916',
      +	"Rcaron;":                   '\U00000158',
      +	"Rcedil;":                   '\U00000156',
      +	"Rcy;":                      '\U00000420',
      +	"Re;":                       '\U0000211C',
      +	"ReverseElement;":           '\U0000220B',
      +	"ReverseEquilibrium;":       '\U000021CB',
      +	"ReverseUpEquilibrium;":     '\U0000296F',
      +	"Rfr;":                      '\U0000211C',
      +	"Rho;":                      '\U000003A1',
      +	"RightAngleBracket;":        '\U000027E9',
      +	"RightArrow;":               '\U00002192',
      +	"RightArrowBar;":            '\U000021E5',
      +	"RightArrowLeftArrow;":      '\U000021C4',
      +	"RightCeiling;":             '\U00002309',
      +	"RightDoubleBracket;":       '\U000027E7',
      +	"RightDownTeeVector;":       '\U0000295D',
      +	"RightDownVector;":          '\U000021C2',
      +	"RightDownVectorBar;":       '\U00002955',
      +	"RightFloor;":               '\U0000230B',
      +	"RightTee;":                 '\U000022A2',
      +	"RightTeeArrow;":            '\U000021A6',
      +	"RightTeeVector;":           '\U0000295B',
      +	"RightTriangle;":            '\U000022B3',
      +	"RightTriangleBar;":         '\U000029D0',
      +	"RightTriangleEqual;":       '\U000022B5',
      +	"RightUpDownVector;":        '\U0000294F',
      +	"RightUpTeeVector;":         '\U0000295C',
      +	"RightUpVector;":            '\U000021BE',
      +	"RightUpVectorBar;":         '\U00002954',
      +	"RightVector;":              '\U000021C0',
      +	"RightVectorBar;":           '\U00002953',
      +	"Rightarrow;":               '\U000021D2',
      +	"Ropf;":                     '\U0000211D',
      +	"RoundImplies;":             '\U00002970',
      +	"Rrightarrow;":              '\U000021DB',
      +	"Rscr;":                     '\U0000211B',
      +	"Rsh;":                      '\U000021B1',
      +	"RuleDelayed;":              '\U000029F4',
      +	"SHCHcy;":                   '\U00000429',
      +	"SHcy;":                     '\U00000428',
      +	"SOFTcy;":                   '\U0000042C',
      +	"Sacute;":                   '\U0000015A',
      +	"Sc;":                       '\U00002ABC',
      +	"Scaron;":                   '\U00000160',
      +	"Scedil;":                   '\U0000015E',
      +	"Scirc;":                    '\U0000015C',
      +	"Scy;":                      '\U00000421',
      +	"Sfr;":                      '\U0001D516',
      +	"ShortDownArrow;":           '\U00002193',
      +	"ShortLeftArrow;":           '\U00002190',
      +	"ShortRightArrow;":          '\U00002192',
      +	"ShortUpArrow;":             '\U00002191',
      +	"Sigma;":                    '\U000003A3',
      +	"SmallCircle;":              '\U00002218',
      +	"Sopf;":                     '\U0001D54A',
      +	"Sqrt;":                     '\U0000221A',
      +	"Square;":                   '\U000025A1',
      +	"SquareIntersection;":       '\U00002293',
      +	"SquareSubset;":             '\U0000228F',
      +	"SquareSubsetEqual;":        '\U00002291',
      +	"SquareSuperset;":           '\U00002290',
      +	"SquareSupersetEqual;":      '\U00002292',
      +	"SquareUnion;":              '\U00002294',
      +	"Sscr;":                     '\U0001D4AE',
      +	"Star;":                     '\U000022C6',
      +	"Sub;":                      '\U000022D0',
      +	"Subset;":                   '\U000022D0',
      +	"SubsetEqual;":              '\U00002286',
      +	"Succeeds;":                 '\U0000227B',
      +	"SucceedsEqual;":            '\U00002AB0',
      +	"SucceedsSlantEqual;":       '\U0000227D',
      +	"SucceedsTilde;":            '\U0000227F',
      +	"SuchThat;":                 '\U0000220B',
      +	"Sum;":                      '\U00002211',
      +	"Sup;":                      '\U000022D1',
      +	"Superset;":                 '\U00002283',
      +	"SupersetEqual;":            '\U00002287',
      +	"Supset;":                   '\U000022D1',
      +	"THORN;":                    '\U000000DE',
      +	"TRADE;":                    '\U00002122',
      +	"TSHcy;":                    '\U0000040B',
      +	"TScy;":                     '\U00000426',
      +	"Tab;":                      '\U00000009',
      +	"Tau;":                      '\U000003A4',
      +	"Tcaron;":                   '\U00000164',
      +	"Tcedil;":                   '\U00000162',
      +	"Tcy;":                      '\U00000422',
      +	"Tfr;":                      '\U0001D517',
      +	"Therefore;":                '\U00002234',
      +	"Theta;":                    '\U00000398',
      +	"ThinSpace;":                '\U00002009',
      +	"Tilde;":                    '\U0000223C',
      +	"TildeEqual;":               '\U00002243',
      +	"TildeFullEqual;":           '\U00002245',
      +	"TildeTilde;":               '\U00002248',
      +	"Topf;":                     '\U0001D54B',
      +	"TripleDot;":                '\U000020DB',
      +	"Tscr;":                     '\U0001D4AF',
      +	"Tstrok;":                   '\U00000166',
      +	"Uacute;":                   '\U000000DA',
      +	"Uarr;":                     '\U0000219F',
      +	"Uarrocir;":                 '\U00002949',
      +	"Ubrcy;":                    '\U0000040E',
      +	"Ubreve;":                   '\U0000016C',
      +	"Ucirc;":                    '\U000000DB',
      +	"Ucy;":                      '\U00000423',
      +	"Udblac;":                   '\U00000170',
      +	"Ufr;":                      '\U0001D518',
      +	"Ugrave;":                   '\U000000D9',
      +	"Umacr;":                    '\U0000016A',
      +	"UnderBar;":                 '\U0000005F',
      +	"UnderBrace;":               '\U000023DF',
      +	"UnderBracket;":             '\U000023B5',
      +	"UnderParenthesis;":         '\U000023DD',
      +	"Union;":                    '\U000022C3',
      +	"UnionPlus;":                '\U0000228E',
      +	"Uogon;":                    '\U00000172',
      +	"Uopf;":                     '\U0001D54C',
      +	"UpArrow;":                  '\U00002191',
      +	"UpArrowBar;":               '\U00002912',
      +	"UpArrowDownArrow;":         '\U000021C5',
      +	"UpDownArrow;":              '\U00002195',
      +	"UpEquilibrium;":            '\U0000296E',
      +	"UpTee;":                    '\U000022A5',
      +	"UpTeeArrow;":               '\U000021A5',
      +	"Uparrow;":                  '\U000021D1',
      +	"Updownarrow;":              '\U000021D5',
      +	"UpperLeftArrow;":           '\U00002196',
      +	"UpperRightArrow;":          '\U00002197',
      +	"Upsi;":                     '\U000003D2',
      +	"Upsilon;":                  '\U000003A5',
      +	"Uring;":                    '\U0000016E',
      +	"Uscr;":                     '\U0001D4B0',
      +	"Utilde;":                   '\U00000168',
      +	"Uuml;":                     '\U000000DC',
      +	"VDash;":                    '\U000022AB',
      +	"Vbar;":                     '\U00002AEB',
      +	"Vcy;":                      '\U00000412',
      +	"Vdash;":                    '\U000022A9',
      +	"Vdashl;":                   '\U00002AE6',
      +	"Vee;":                      '\U000022C1',
      +	"Verbar;":                   '\U00002016',
      +	"Vert;":                     '\U00002016',
      +	"VerticalBar;":              '\U00002223',
      +	"VerticalLine;":             '\U0000007C',
      +	"VerticalSeparator;":        '\U00002758',
      +	"VerticalTilde;":            '\U00002240',
      +	"VeryThinSpace;":            '\U0000200A',
      +	"Vfr;":                      '\U0001D519',
      +	"Vopf;":                     '\U0001D54D',
      +	"Vscr;":                     '\U0001D4B1',
      +	"Vvdash;":                   '\U000022AA',
      +	"Wcirc;":                    '\U00000174',
      +	"Wedge;":                    '\U000022C0',
      +	"Wfr;":                      '\U0001D51A',
      +	"Wopf;":                     '\U0001D54E',
      +	"Wscr;":                     '\U0001D4B2',
      +	"Xfr;":                      '\U0001D51B',
      +	"Xi;":                       '\U0000039E',
      +	"Xopf;":                     '\U0001D54F',
      +	"Xscr;":                     '\U0001D4B3',
      +	"YAcy;":                     '\U0000042F',
      +	"YIcy;":                     '\U00000407',
      +	"YUcy;":                     '\U0000042E',
      +	"Yacute;":                   '\U000000DD',
      +	"Ycirc;":                    '\U00000176',
      +	"Ycy;":                      '\U0000042B',
      +	"Yfr;":                      '\U0001D51C',
      +	"Yopf;":                     '\U0001D550',
      +	"Yscr;":                     '\U0001D4B4',
      +	"Yuml;":                     '\U00000178',
      +	"ZHcy;":                     '\U00000416',
      +	"Zacute;":                   '\U00000179',
      +	"Zcaron;":                   '\U0000017D',
      +	"Zcy;":                      '\U00000417',
      +	"Zdot;":                     '\U0000017B',
      +	"ZeroWidthSpace;":           '\U0000200B',
      +	"Zeta;":                     '\U00000396',
      +	"Zfr;":                      '\U00002128',
      +	"Zopf;":                     '\U00002124',
      +	"Zscr;":                     '\U0001D4B5',
      +	"aacute;":                   '\U000000E1',
      +	"abreve;":                   '\U00000103',
      +	"ac;":                       '\U0000223E',
      +	"acd;":                      '\U0000223F',
      +	"acirc;":                    '\U000000E2',
      +	"acute;":                    '\U000000B4',
      +	"acy;":                      '\U00000430',
      +	"aelig;":                    '\U000000E6',
      +	"af;":                       '\U00002061',
      +	"afr;":                      '\U0001D51E',
      +	"agrave;":                   '\U000000E0',
      +	"alefsym;":                  '\U00002135',
      +	"aleph;":                    '\U00002135',
      +	"alpha;":                    '\U000003B1',
      +	"amacr;":                    '\U00000101',
      +	"amalg;":                    '\U00002A3F',
      +	"amp;":                      '\U00000026',
      +	"and;":                      '\U00002227',
      +	"andand;":                   '\U00002A55',
      +	"andd;":                     '\U00002A5C',
      +	"andslope;":                 '\U00002A58',
      +	"andv;":                     '\U00002A5A',
      +	"ang;":                      '\U00002220',
      +	"ange;":                     '\U000029A4',
      +	"angle;":                    '\U00002220',
      +	"angmsd;":                   '\U00002221',
      +	"angmsdaa;":                 '\U000029A8',
      +	"angmsdab;":                 '\U000029A9',
      +	"angmsdac;":                 '\U000029AA',
      +	"angmsdad;":                 '\U000029AB',
      +	"angmsdae;":                 '\U000029AC',
      +	"angmsdaf;":                 '\U000029AD',
      +	"angmsdag;":                 '\U000029AE',
      +	"angmsdah;":                 '\U000029AF',
      +	"angrt;":                    '\U0000221F',
      +	"angrtvb;":                  '\U000022BE',
      +	"angrtvbd;":                 '\U0000299D',
      +	"angsph;":                   '\U00002222',
      +	"angst;":                    '\U000000C5',
      +	"angzarr;":                  '\U0000237C',
      +	"aogon;":                    '\U00000105',
      +	"aopf;":                     '\U0001D552',
      +	"ap;":                       '\U00002248',
      +	"apE;":                      '\U00002A70',
      +	"apacir;":                   '\U00002A6F',
      +	"ape;":                      '\U0000224A',
      +	"apid;":                     '\U0000224B',
      +	"apos;":                     '\U00000027',
      +	"approx;":                   '\U00002248',
      +	"approxeq;":                 '\U0000224A',
      +	"aring;":                    '\U000000E5',
      +	"ascr;":                     '\U0001D4B6',
      +	"ast;":                      '\U0000002A',
      +	"asymp;":                    '\U00002248',
      +	"asympeq;":                  '\U0000224D',
      +	"atilde;":                   '\U000000E3',
      +	"auml;":                     '\U000000E4',
      +	"awconint;":                 '\U00002233',
      +	"awint;":                    '\U00002A11',
      +	"bNot;":                     '\U00002AED',
      +	"backcong;":                 '\U0000224C',
      +	"backepsilon;":              '\U000003F6',
      +	"backprime;":                '\U00002035',
      +	"backsim;":                  '\U0000223D',
      +	"backsimeq;":                '\U000022CD',
      +	"barvee;":                   '\U000022BD',
      +	"barwed;":                   '\U00002305',
      +	"barwedge;":                 '\U00002305',
      +	"bbrk;":                     '\U000023B5',
      +	"bbrktbrk;":                 '\U000023B6',
      +	"bcong;":                    '\U0000224C',
      +	"bcy;":                      '\U00000431',
      +	"bdquo;":                    '\U0000201E',
      +	"becaus;":                   '\U00002235',
      +	"because;":                  '\U00002235',
      +	"bemptyv;":                  '\U000029B0',
      +	"bepsi;":                    '\U000003F6',
      +	"bernou;":                   '\U0000212C',
      +	"beta;":                     '\U000003B2',
      +	"beth;":                     '\U00002136',
      +	"between;":                  '\U0000226C',
      +	"bfr;":                      '\U0001D51F',
      +	"bigcap;":                   '\U000022C2',
      +	"bigcirc;":                  '\U000025EF',
      +	"bigcup;":                   '\U000022C3',
      +	"bigodot;":                  '\U00002A00',
      +	"bigoplus;":                 '\U00002A01',
      +	"bigotimes;":                '\U00002A02',
      +	"bigsqcup;":                 '\U00002A06',
      +	"bigstar;":                  '\U00002605',
      +	"bigtriangledown;":          '\U000025BD',
      +	"bigtriangleup;":            '\U000025B3',
      +	"biguplus;":                 '\U00002A04',
      +	"bigvee;":                   '\U000022C1',
      +	"bigwedge;":                 '\U000022C0',
      +	"bkarow;":                   '\U0000290D',
      +	"blacklozenge;":             '\U000029EB',
      +	"blacksquare;":              '\U000025AA',
      +	"blacktriangle;":            '\U000025B4',
      +	"blacktriangledown;":        '\U000025BE',
      +	"blacktriangleleft;":        '\U000025C2',
      +	"blacktriangleright;":       '\U000025B8',
      +	"blank;":                    '\U00002423',
      +	"blk12;":                    '\U00002592',
      +	"blk14;":                    '\U00002591',
      +	"blk34;":                    '\U00002593',
      +	"block;":                    '\U00002588',
      +	"bnot;":                     '\U00002310',
      +	"bopf;":                     '\U0001D553',
      +	"bot;":                      '\U000022A5',
      +	"bottom;":                   '\U000022A5',
      +	"bowtie;":                   '\U000022C8',
      +	"boxDL;":                    '\U00002557',
      +	"boxDR;":                    '\U00002554',
      +	"boxDl;":                    '\U00002556',
      +	"boxDr;":                    '\U00002553',
      +	"boxH;":                     '\U00002550',
      +	"boxHD;":                    '\U00002566',
      +	"boxHU;":                    '\U00002569',
      +	"boxHd;":                    '\U00002564',
      +	"boxHu;":                    '\U00002567',
      +	"boxUL;":                    '\U0000255D',
      +	"boxUR;":                    '\U0000255A',
      +	"boxUl;":                    '\U0000255C',
      +	"boxUr;":                    '\U00002559',
      +	"boxV;":                     '\U00002551',
      +	"boxVH;":                    '\U0000256C',
      +	"boxVL;":                    '\U00002563',
      +	"boxVR;":                    '\U00002560',
      +	"boxVh;":                    '\U0000256B',
      +	"boxVl;":                    '\U00002562',
      +	"boxVr;":                    '\U0000255F',
      +	"boxbox;":                   '\U000029C9',
      +	"boxdL;":                    '\U00002555',
      +	"boxdR;":                    '\U00002552',
      +	"boxdl;":                    '\U00002510',
      +	"boxdr;":                    '\U0000250C',
      +	"boxh;":                     '\U00002500',
      +	"boxhD;":                    '\U00002565',
      +	"boxhU;":                    '\U00002568',
      +	"boxhd;":                    '\U0000252C',
      +	"boxhu;":                    '\U00002534',
      +	"boxminus;":                 '\U0000229F',
      +	"boxplus;":                  '\U0000229E',
      +	"boxtimes;":                 '\U000022A0',
      +	"boxuL;":                    '\U0000255B',
      +	"boxuR;":                    '\U00002558',
      +	"boxul;":                    '\U00002518',
      +	"boxur;":                    '\U00002514',
      +	"boxv;":                     '\U00002502',
      +	"boxvH;":                    '\U0000256A',
      +	"boxvL;":                    '\U00002561',
      +	"boxvR;":                    '\U0000255E',
      +	"boxvh;":                    '\U0000253C',
      +	"boxvl;":                    '\U00002524',
      +	"boxvr;":                    '\U0000251C',
      +	"bprime;":                   '\U00002035',
      +	"breve;":                    '\U000002D8',
      +	"brvbar;":                   '\U000000A6',
      +	"bscr;":                     '\U0001D4B7',
      +	"bsemi;":                    '\U0000204F',
      +	"bsim;":                     '\U0000223D',
      +	"bsime;":                    '\U000022CD',
      +	"bsol;":                     '\U0000005C',
      +	"bsolb;":                    '\U000029C5',
      +	"bsolhsub;":                 '\U000027C8',
      +	"bull;":                     '\U00002022',
      +	"bullet;":                   '\U00002022',
      +	"bump;":                     '\U0000224E',
      +	"bumpE;":                    '\U00002AAE',
      +	"bumpe;":                    '\U0000224F',
      +	"bumpeq;":                   '\U0000224F',
      +	"cacute;":                   '\U00000107',
      +	"cap;":                      '\U00002229',
      +	"capand;":                   '\U00002A44',
      +	"capbrcup;":                 '\U00002A49',
      +	"capcap;":                   '\U00002A4B',
      +	"capcup;":                   '\U00002A47',
      +	"capdot;":                   '\U00002A40',
      +	"caret;":                    '\U00002041',
      +	"caron;":                    '\U000002C7',
      +	"ccaps;":                    '\U00002A4D',
      +	"ccaron;":                   '\U0000010D',
      +	"ccedil;":                   '\U000000E7',
      +	"ccirc;":                    '\U00000109',
      +	"ccups;":                    '\U00002A4C',
      +	"ccupssm;":                  '\U00002A50',
      +	"cdot;":                     '\U0000010B',
      +	"cedil;":                    '\U000000B8',
      +	"cemptyv;":                  '\U000029B2',
      +	"cent;":                     '\U000000A2',
      +	"centerdot;":                '\U000000B7',
      +	"cfr;":                      '\U0001D520',
      +	"chcy;":                     '\U00000447',
      +	"check;":                    '\U00002713',
      +	"checkmark;":                '\U00002713',
      +	"chi;":                      '\U000003C7',
      +	"cir;":                      '\U000025CB',
      +	"cirE;":                     '\U000029C3',
      +	"circ;":                     '\U000002C6',
      +	"circeq;":                   '\U00002257',
      +	"circlearrowleft;":          '\U000021BA',
      +	"circlearrowright;":         '\U000021BB',
      +	"circledR;":                 '\U000000AE',
      +	"circledS;":                 '\U000024C8',
      +	"circledast;":               '\U0000229B',
      +	"circledcirc;":              '\U0000229A',
      +	"circleddash;":              '\U0000229D',
      +	"cire;":                     '\U00002257',
      +	"cirfnint;":                 '\U00002A10',
      +	"cirmid;":                   '\U00002AEF',
      +	"cirscir;":                  '\U000029C2',
      +	"clubs;":                    '\U00002663',
      +	"clubsuit;":                 '\U00002663',
      +	"colon;":                    '\U0000003A',
      +	"colone;":                   '\U00002254',
      +	"coloneq;":                  '\U00002254',
      +	"comma;":                    '\U0000002C',
      +	"commat;":                   '\U00000040',
      +	"comp;":                     '\U00002201',
      +	"compfn;":                   '\U00002218',
      +	"complement;":               '\U00002201',
      +	"complexes;":                '\U00002102',
      +	"cong;":                     '\U00002245',
      +	"congdot;":                  '\U00002A6D',
      +	"conint;":                   '\U0000222E',
      +	"copf;":                     '\U0001D554',
      +	"coprod;":                   '\U00002210',
      +	"copy;":                     '\U000000A9',
      +	"copysr;":                   '\U00002117',
      +	"crarr;":                    '\U000021B5',
      +	"cross;":                    '\U00002717',
      +	"cscr;":                     '\U0001D4B8',
      +	"csub;":                     '\U00002ACF',
      +	"csube;":                    '\U00002AD1',
      +	"csup;":                     '\U00002AD0',
      +	"csupe;":                    '\U00002AD2',
      +	"ctdot;":                    '\U000022EF',
      +	"cudarrl;":                  '\U00002938',
      +	"cudarrr;":                  '\U00002935',
      +	"cuepr;":                    '\U000022DE',
      +	"cuesc;":                    '\U000022DF',
      +	"cularr;":                   '\U000021B6',
      +	"cularrp;":                  '\U0000293D',
      +	"cup;":                      '\U0000222A',
      +	"cupbrcap;":                 '\U00002A48',
      +	"cupcap;":                   '\U00002A46',
      +	"cupcup;":                   '\U00002A4A',
      +	"cupdot;":                   '\U0000228D',
      +	"cupor;":                    '\U00002A45',
      +	"curarr;":                   '\U000021B7',
      +	"curarrm;":                  '\U0000293C',
      +	"curlyeqprec;":              '\U000022DE',
      +	"curlyeqsucc;":              '\U000022DF',
      +	"curlyvee;":                 '\U000022CE',
      +	"curlywedge;":               '\U000022CF',
      +	"curren;":                   '\U000000A4',
      +	"curvearrowleft;":           '\U000021B6',
      +	"curvearrowright;":          '\U000021B7',
      +	"cuvee;":                    '\U000022CE',
      +	"cuwed;":                    '\U000022CF',
      +	"cwconint;":                 '\U00002232',
      +	"cwint;":                    '\U00002231',
      +	"cylcty;":                   '\U0000232D',
      +	"dArr;":                     '\U000021D3',
      +	"dHar;":                     '\U00002965',
      +	"dagger;":                   '\U00002020',
      +	"daleth;":                   '\U00002138',
      +	"darr;":                     '\U00002193',
      +	"dash;":                     '\U00002010',
      +	"dashv;":                    '\U000022A3',
      +	"dbkarow;":                  '\U0000290F',
      +	"dblac;":                    '\U000002DD',
      +	"dcaron;":                   '\U0000010F',
      +	"dcy;":                      '\U00000434',
      +	"dd;":                       '\U00002146',
      +	"ddagger;":                  '\U00002021',
      +	"ddarr;":                    '\U000021CA',
      +	"ddotseq;":                  '\U00002A77',
      +	"deg;":                      '\U000000B0',
      +	"delta;":                    '\U000003B4',
      +	"demptyv;":                  '\U000029B1',
      +	"dfisht;":                   '\U0000297F',
      +	"dfr;":                      '\U0001D521',
      +	"dharl;":                    '\U000021C3',
      +	"dharr;":                    '\U000021C2',
      +	"diam;":                     '\U000022C4',
      +	"diamond;":                  '\U000022C4',
      +	"diamondsuit;":              '\U00002666',
      +	"diams;":                    '\U00002666',
      +	"die;":                      '\U000000A8',
      +	"digamma;":                  '\U000003DD',
      +	"disin;":                    '\U000022F2',
      +	"div;":                      '\U000000F7',
      +	"divide;":                   '\U000000F7',
      +	"divideontimes;":            '\U000022C7',
      +	"divonx;":                   '\U000022C7',
      +	"djcy;":                     '\U00000452',
      +	"dlcorn;":                   '\U0000231E',
      +	"dlcrop;":                   '\U0000230D',
      +	"dollar;":                   '\U00000024',
      +	"dopf;":                     '\U0001D555',
      +	"dot;":                      '\U000002D9',
      +	"doteq;":                    '\U00002250',
      +	"doteqdot;":                 '\U00002251',
      +	"dotminus;":                 '\U00002238',
      +	"dotplus;":                  '\U00002214',
      +	"dotsquare;":                '\U000022A1',
      +	"doublebarwedge;":           '\U00002306',
      +	"downarrow;":                '\U00002193',
      +	"downdownarrows;":           '\U000021CA',
      +	"downharpoonleft;":          '\U000021C3',
      +	"downharpoonright;":         '\U000021C2',
      +	"drbkarow;":                 '\U00002910',
      +	"drcorn;":                   '\U0000231F',
      +	"drcrop;":                   '\U0000230C',
      +	"dscr;":                     '\U0001D4B9',
      +	"dscy;":                     '\U00000455',
      +	"dsol;":                     '\U000029F6',
      +	"dstrok;":                   '\U00000111',
      +	"dtdot;":                    '\U000022F1',
      +	"dtri;":                     '\U000025BF',
      +	"dtrif;":                    '\U000025BE',
      +	"duarr;":                    '\U000021F5',
      +	"duhar;":                    '\U0000296F',
      +	"dwangle;":                  '\U000029A6',
      +	"dzcy;":                     '\U0000045F',
      +	"dzigrarr;":                 '\U000027FF',
      +	"eDDot;":                    '\U00002A77',
      +	"eDot;":                     '\U00002251',
      +	"eacute;":                   '\U000000E9',
      +	"easter;":                   '\U00002A6E',
      +	"ecaron;":                   '\U0000011B',
      +	"ecir;":                     '\U00002256',
      +	"ecirc;":                    '\U000000EA',
      +	"ecolon;":                   '\U00002255',
      +	"ecy;":                      '\U0000044D',
      +	"edot;":                     '\U00000117',
      +	"ee;":                       '\U00002147',
      +	"efDot;":                    '\U00002252',
      +	"efr;":                      '\U0001D522',
      +	"eg;":                       '\U00002A9A',
      +	"egrave;":                   '\U000000E8',
      +	"egs;":                      '\U00002A96',
      +	"egsdot;":                   '\U00002A98',
      +	"el;":                       '\U00002A99',
      +	"elinters;":                 '\U000023E7',
      +	"ell;":                      '\U00002113',
      +	"els;":                      '\U00002A95',
      +	"elsdot;":                   '\U00002A97',
      +	"emacr;":                    '\U00000113',
      +	"empty;":                    '\U00002205',
      +	"emptyset;":                 '\U00002205',
      +	"emptyv;":                   '\U00002205',
      +	"emsp;":                     '\U00002003',
      +	"emsp13;":                   '\U00002004',
      +	"emsp14;":                   '\U00002005',
      +	"eng;":                      '\U0000014B',
      +	"ensp;":                     '\U00002002',
      +	"eogon;":                    '\U00000119',
      +	"eopf;":                     '\U0001D556',
      +	"epar;":                     '\U000022D5',
      +	"eparsl;":                   '\U000029E3',
      +	"eplus;":                    '\U00002A71',
      +	"epsi;":                     '\U000003B5',
      +	"epsilon;":                  '\U000003B5',
      +	"epsiv;":                    '\U000003F5',
      +	"eqcirc;":                   '\U00002256',
      +	"eqcolon;":                  '\U00002255',
      +	"eqsim;":                    '\U00002242',
      +	"eqslantgtr;":               '\U00002A96',
      +	"eqslantless;":              '\U00002A95',
      +	"equals;":                   '\U0000003D',
      +	"equest;":                   '\U0000225F',
      +	"equiv;":                    '\U00002261',
      +	"equivDD;":                  '\U00002A78',
      +	"eqvparsl;":                 '\U000029E5',
      +	"erDot;":                    '\U00002253',
      +	"erarr;":                    '\U00002971',
      +	"escr;":                     '\U0000212F',
      +	"esdot;":                    '\U00002250',
      +	"esim;":                     '\U00002242',
      +	"eta;":                      '\U000003B7',
      +	"eth;":                      '\U000000F0',
      +	"euml;":                     '\U000000EB',
      +	"euro;":                     '\U000020AC',
      +	"excl;":                     '\U00000021',
      +	"exist;":                    '\U00002203',
      +	"expectation;":              '\U00002130',
      +	"exponentiale;":             '\U00002147',
      +	"fallingdotseq;":            '\U00002252',
      +	"fcy;":                      '\U00000444',
      +	"female;":                   '\U00002640',
      +	"ffilig;":                   '\U0000FB03',
      +	"fflig;":                    '\U0000FB00',
      +	"ffllig;":                   '\U0000FB04',
      +	"ffr;":                      '\U0001D523',
      +	"filig;":                    '\U0000FB01',
      +	"flat;":                     '\U0000266D',
      +	"fllig;":                    '\U0000FB02',
      +	"fltns;":                    '\U000025B1',
      +	"fnof;":                     '\U00000192',
      +	"fopf;":                     '\U0001D557',
      +	"forall;":                   '\U00002200',
      +	"fork;":                     '\U000022D4',
      +	"forkv;":                    '\U00002AD9',
      +	"fpartint;":                 '\U00002A0D',
      +	"frac12;":                   '\U000000BD',
      +	"frac13;":                   '\U00002153',
      +	"frac14;":                   '\U000000BC',
      +	"frac15;":                   '\U00002155',
      +	"frac16;":                   '\U00002159',
      +	"frac18;":                   '\U0000215B',
      +	"frac23;":                   '\U00002154',
      +	"frac25;":                   '\U00002156',
      +	"frac34;":                   '\U000000BE',
      +	"frac35;":                   '\U00002157',
      +	"frac38;":                   '\U0000215C',
      +	"frac45;":                   '\U00002158',
      +	"frac56;":                   '\U0000215A',
      +	"frac58;":                   '\U0000215D',
      +	"frac78;":                   '\U0000215E',
      +	"frasl;":                    '\U00002044',
      +	"frown;":                    '\U00002322',
      +	"fscr;":                     '\U0001D4BB',
      +	"gE;":                       '\U00002267',
      +	"gEl;":                      '\U00002A8C',
      +	"gacute;":                   '\U000001F5',
      +	"gamma;":                    '\U000003B3',
      +	"gammad;":                   '\U000003DD',
      +	"gap;":                      '\U00002A86',
      +	"gbreve;":                   '\U0000011F',
      +	"gcirc;":                    '\U0000011D',
      +	"gcy;":                      '\U00000433',
      +	"gdot;":                     '\U00000121',
      +	"ge;":                       '\U00002265',
      +	"gel;":                      '\U000022DB',
      +	"geq;":                      '\U00002265',
      +	"geqq;":                     '\U00002267',
      +	"geqslant;":                 '\U00002A7E',
      +	"ges;":                      '\U00002A7E',
      +	"gescc;":                    '\U00002AA9',
      +	"gesdot;":                   '\U00002A80',
      +	"gesdoto;":                  '\U00002A82',
      +	"gesdotol;":                 '\U00002A84',
      +	"gesles;":                   '\U00002A94',
      +	"gfr;":                      '\U0001D524',
      +	"gg;":                       '\U0000226B',
      +	"ggg;":                      '\U000022D9',
      +	"gimel;":                    '\U00002137',
      +	"gjcy;":                     '\U00000453',
      +	"gl;":                       '\U00002277',
      +	"glE;":                      '\U00002A92',
      +	"gla;":                      '\U00002AA5',
      +	"glj;":                      '\U00002AA4',
      +	"gnE;":                      '\U00002269',
      +	"gnap;":                     '\U00002A8A',
      +	"gnapprox;":                 '\U00002A8A',
      +	"gne;":                      '\U00002A88',
      +	"gneq;":                     '\U00002A88',
      +	"gneqq;":                    '\U00002269',
      +	"gnsim;":                    '\U000022E7',
      +	"gopf;":                     '\U0001D558',
      +	"grave;":                    '\U00000060',
      +	"gscr;":                     '\U0000210A',
      +	"gsim;":                     '\U00002273',
      +	"gsime;":                    '\U00002A8E',
      +	"gsiml;":                    '\U00002A90',
      +	"gt;":                       '\U0000003E',
      +	"gtcc;":                     '\U00002AA7',
      +	"gtcir;":                    '\U00002A7A',
      +	"gtdot;":                    '\U000022D7',
      +	"gtlPar;":                   '\U00002995',
      +	"gtquest;":                  '\U00002A7C',
      +	"gtrapprox;":                '\U00002A86',
      +	"gtrarr;":                   '\U00002978',
      +	"gtrdot;":                   '\U000022D7',
      +	"gtreqless;":                '\U000022DB',
      +	"gtreqqless;":               '\U00002A8C',
      +	"gtrless;":                  '\U00002277',
      +	"gtrsim;":                   '\U00002273',
      +	"hArr;":                     '\U000021D4',
      +	"hairsp;":                   '\U0000200A',
      +	"half;":                     '\U000000BD',
      +	"hamilt;":                   '\U0000210B',
      +	"hardcy;":                   '\U0000044A',
      +	"harr;":                     '\U00002194',
      +	"harrcir;":                  '\U00002948',
      +	"harrw;":                    '\U000021AD',
      +	"hbar;":                     '\U0000210F',
      +	"hcirc;":                    '\U00000125',
      +	"hearts;":                   '\U00002665',
      +	"heartsuit;":                '\U00002665',
      +	"hellip;":                   '\U00002026',
      +	"hercon;":                   '\U000022B9',
      +	"hfr;":                      '\U0001D525',
      +	"hksearow;":                 '\U00002925',
      +	"hkswarow;":                 '\U00002926',
      +	"hoarr;":                    '\U000021FF',
      +	"homtht;":                   '\U0000223B',
      +	"hookleftarrow;":            '\U000021A9',
      +	"hookrightarrow;":           '\U000021AA',
      +	"hopf;":                     '\U0001D559',
      +	"horbar;":                   '\U00002015',
      +	"hscr;":                     '\U0001D4BD',
      +	"hslash;":                   '\U0000210F',
      +	"hstrok;":                   '\U00000127',
      +	"hybull;":                   '\U00002043',
      +	"hyphen;":                   '\U00002010',
      +	"iacute;":                   '\U000000ED',
      +	"ic;":                       '\U00002063',
      +	"icirc;":                    '\U000000EE',
      +	"icy;":                      '\U00000438',
      +	"iecy;":                     '\U00000435',
      +	"iexcl;":                    '\U000000A1',
      +	"iff;":                      '\U000021D4',
      +	"ifr;":                      '\U0001D526',
      +	"igrave;":                   '\U000000EC',
      +	"ii;":                       '\U00002148',
      +	"iiiint;":                   '\U00002A0C',
      +	"iiint;":                    '\U0000222D',
      +	"iinfin;":                   '\U000029DC',
      +	"iiota;":                    '\U00002129',
      +	"ijlig;":                    '\U00000133',
      +	"imacr;":                    '\U0000012B',
      +	"image;":                    '\U00002111',
      +	"imagline;":                 '\U00002110',
      +	"imagpart;":                 '\U00002111',
      +	"imath;":                    '\U00000131',
      +	"imof;":                     '\U000022B7',
      +	"imped;":                    '\U000001B5',
      +	"in;":                       '\U00002208',
      +	"incare;":                   '\U00002105',
      +	"infin;":                    '\U0000221E',
      +	"infintie;":                 '\U000029DD',
      +	"inodot;":                   '\U00000131',
      +	"int;":                      '\U0000222B',
      +	"intcal;":                   '\U000022BA',
      +	"integers;":                 '\U00002124',
      +	"intercal;":                 '\U000022BA',
      +	"intlarhk;":                 '\U00002A17',
      +	"intprod;":                  '\U00002A3C',
      +	"iocy;":                     '\U00000451',
      +	"iogon;":                    '\U0000012F',
      +	"iopf;":                     '\U0001D55A',
      +	"iota;":                     '\U000003B9',
      +	"iprod;":                    '\U00002A3C',
      +	"iquest;":                   '\U000000BF',
      +	"iscr;":                     '\U0001D4BE',
      +	"isin;":                     '\U00002208',
      +	"isinE;":                    '\U000022F9',
      +	"isindot;":                  '\U000022F5',
      +	"isins;":                    '\U000022F4',
      +	"isinsv;":                   '\U000022F3',
      +	"isinv;":                    '\U00002208',
      +	"it;":                       '\U00002062',
      +	"itilde;":                   '\U00000129',
      +	"iukcy;":                    '\U00000456',
      +	"iuml;":                     '\U000000EF',
      +	"jcirc;":                    '\U00000135',
      +	"jcy;":                      '\U00000439',
      +	"jfr;":                      '\U0001D527',
      +	"jmath;":                    '\U00000237',
      +	"jopf;":                     '\U0001D55B',
      +	"jscr;":                     '\U0001D4BF',
      +	"jsercy;":                   '\U00000458',
      +	"jukcy;":                    '\U00000454',
      +	"kappa;":                    '\U000003BA',
      +	"kappav;":                   '\U000003F0',
      +	"kcedil;":                   '\U00000137',
      +	"kcy;":                      '\U0000043A',
      +	"kfr;":                      '\U0001D528',
      +	"kgreen;":                   '\U00000138',
      +	"khcy;":                     '\U00000445',
      +	"kjcy;":                     '\U0000045C',
      +	"kopf;":                     '\U0001D55C',
      +	"kscr;":                     '\U0001D4C0',
      +	"lAarr;":                    '\U000021DA',
      +	"lArr;":                     '\U000021D0',
      +	"lAtail;":                   '\U0000291B',
      +	"lBarr;":                    '\U0000290E',
      +	"lE;":                       '\U00002266',
      +	"lEg;":                      '\U00002A8B',
      +	"lHar;":                     '\U00002962',
      +	"lacute;":                   '\U0000013A',
      +	"laemptyv;":                 '\U000029B4',
      +	"lagran;":                   '\U00002112',
      +	"lambda;":                   '\U000003BB',
      +	"lang;":                     '\U000027E8',
      +	"langd;":                    '\U00002991',
      +	"langle;":                   '\U000027E8',
      +	"lap;":                      '\U00002A85',
      +	"laquo;":                    '\U000000AB',
      +	"larr;":                     '\U00002190',
      +	"larrb;":                    '\U000021E4',
      +	"larrbfs;":                  '\U0000291F',
      +	"larrfs;":                   '\U0000291D',
      +	"larrhk;":                   '\U000021A9',
      +	"larrlp;":                   '\U000021AB',
      +	"larrpl;":                   '\U00002939',
      +	"larrsim;":                  '\U00002973',
      +	"larrtl;":                   '\U000021A2',
      +	"lat;":                      '\U00002AAB',
      +	"latail;":                   '\U00002919',
      +	"late;":                     '\U00002AAD',
      +	"lbarr;":                    '\U0000290C',
      +	"lbbrk;":                    '\U00002772',
      +	"lbrace;":                   '\U0000007B',
      +	"lbrack;":                   '\U0000005B',
      +	"lbrke;":                    '\U0000298B',
      +	"lbrksld;":                  '\U0000298F',
      +	"lbrkslu;":                  '\U0000298D',
      +	"lcaron;":                   '\U0000013E',
      +	"lcedil;":                   '\U0000013C',
      +	"lceil;":                    '\U00002308',
      +	"lcub;":                     '\U0000007B',
      +	"lcy;":                      '\U0000043B',
      +	"ldca;":                     '\U00002936',
      +	"ldquo;":                    '\U0000201C',
      +	"ldquor;":                   '\U0000201E',
      +	"ldrdhar;":                  '\U00002967',
      +	"ldrushar;":                 '\U0000294B',
      +	"ldsh;":                     '\U000021B2',
      +	"le;":                       '\U00002264',
      +	"leftarrow;":                '\U00002190',
      +	"leftarrowtail;":            '\U000021A2',
      +	"leftharpoondown;":          '\U000021BD',
      +	"leftharpoonup;":            '\U000021BC',
      +	"leftleftarrows;":           '\U000021C7',
      +	"leftrightarrow;":           '\U00002194',
      +	"leftrightarrows;":          '\U000021C6',
      +	"leftrightharpoons;":        '\U000021CB',
      +	"leftrightsquigarrow;":      '\U000021AD',
      +	"leftthreetimes;":           '\U000022CB',
      +	"leg;":                      '\U000022DA',
      +	"leq;":                      '\U00002264',
      +	"leqq;":                     '\U00002266',
      +	"leqslant;":                 '\U00002A7D',
      +	"les;":                      '\U00002A7D',
      +	"lescc;":                    '\U00002AA8',
      +	"lesdot;":                   '\U00002A7F',
      +	"lesdoto;":                  '\U00002A81',
      +	"lesdotor;":                 '\U00002A83',
      +	"lesges;":                   '\U00002A93',
      +	"lessapprox;":               '\U00002A85',
      +	"lessdot;":                  '\U000022D6',
      +	"lesseqgtr;":                '\U000022DA',
      +	"lesseqqgtr;":               '\U00002A8B',
      +	"lessgtr;":                  '\U00002276',
      +	"lesssim;":                  '\U00002272',
      +	"lfisht;":                   '\U0000297C',
      +	"lfloor;":                   '\U0000230A',
      +	"lfr;":                      '\U0001D529',
      +	"lg;":                       '\U00002276',
      +	"lgE;":                      '\U00002A91',
      +	"lhard;":                    '\U000021BD',
      +	"lharu;":                    '\U000021BC',
      +	"lharul;":                   '\U0000296A',
      +	"lhblk;":                    '\U00002584',
      +	"ljcy;":                     '\U00000459',
      +	"ll;":                       '\U0000226A',
      +	"llarr;":                    '\U000021C7',
      +	"llcorner;":                 '\U0000231E',
      +	"llhard;":                   '\U0000296B',
      +	"lltri;":                    '\U000025FA',
      +	"lmidot;":                   '\U00000140',
      +	"lmoust;":                   '\U000023B0',
      +	"lmoustache;":               '\U000023B0',
      +	"lnE;":                      '\U00002268',
      +	"lnap;":                     '\U00002A89',
      +	"lnapprox;":                 '\U00002A89',
      +	"lne;":                      '\U00002A87',
      +	"lneq;":                     '\U00002A87',
      +	"lneqq;":                    '\U00002268',
      +	"lnsim;":                    '\U000022E6',
      +	"loang;":                    '\U000027EC',
      +	"loarr;":                    '\U000021FD',
      +	"lobrk;":                    '\U000027E6',
      +	"longleftarrow;":            '\U000027F5',
      +	"longleftrightarrow;":       '\U000027F7',
      +	"longmapsto;":               '\U000027FC',
      +	"longrightarrow;":           '\U000027F6',
      +	"looparrowleft;":            '\U000021AB',
      +	"looparrowright;":           '\U000021AC',
      +	"lopar;":                    '\U00002985',
      +	"lopf;":                     '\U0001D55D',
      +	"loplus;":                   '\U00002A2D',
      +	"lotimes;":                  '\U00002A34',
      +	"lowast;":                   '\U00002217',
      +	"lowbar;":                   '\U0000005F',
      +	"loz;":                      '\U000025CA',
      +	"lozenge;":                  '\U000025CA',
      +	"lozf;":                     '\U000029EB',
      +	"lpar;":                     '\U00000028',
      +	"lparlt;":                   '\U00002993',
      +	"lrarr;":                    '\U000021C6',
      +	"lrcorner;":                 '\U0000231F',
      +	"lrhar;":                    '\U000021CB',
      +	"lrhard;":                   '\U0000296D',
      +	"lrm;":                      '\U0000200E',
      +	"lrtri;":                    '\U000022BF',
      +	"lsaquo;":                   '\U00002039',
      +	"lscr;":                     '\U0001D4C1',
      +	"lsh;":                      '\U000021B0',
      +	"lsim;":                     '\U00002272',
      +	"lsime;":                    '\U00002A8D',
      +	"lsimg;":                    '\U00002A8F',
      +	"lsqb;":                     '\U0000005B',
      +	"lsquo;":                    '\U00002018',
      +	"lsquor;":                   '\U0000201A',
      +	"lstrok;":                   '\U00000142',
      +	"lt;":                       '\U0000003C',
      +	"ltcc;":                     '\U00002AA6',
      +	"ltcir;":                    '\U00002A79',
      +	"ltdot;":                    '\U000022D6',
      +	"lthree;":                   '\U000022CB',
      +	"ltimes;":                   '\U000022C9',
      +	"ltlarr;":                   '\U00002976',
      +	"ltquest;":                  '\U00002A7B',
      +	"ltrPar;":                   '\U00002996',
      +	"ltri;":                     '\U000025C3',
      +	"ltrie;":                    '\U000022B4',
      +	"ltrif;":                    '\U000025C2',
      +	"lurdshar;":                 '\U0000294A',
      +	"luruhar;":                  '\U00002966',
      +	"mDDot;":                    '\U0000223A',
      +	"macr;":                     '\U000000AF',
      +	"male;":                     '\U00002642',
      +	"malt;":                     '\U00002720',
      +	"maltese;":                  '\U00002720',
      +	"map;":                      '\U000021A6',
      +	"mapsto;":                   '\U000021A6',
      +	"mapstodown;":               '\U000021A7',
      +	"mapstoleft;":               '\U000021A4',
      +	"mapstoup;":                 '\U000021A5',
      +	"marker;":                   '\U000025AE',
      +	"mcomma;":                   '\U00002A29',
      +	"mcy;":                      '\U0000043C',
      +	"mdash;":                    '\U00002014',
      +	"measuredangle;":            '\U00002221',
      +	"mfr;":                      '\U0001D52A',
      +	"mho;":                      '\U00002127',
      +	"micro;":                    '\U000000B5',
      +	"mid;":                      '\U00002223',
      +	"midast;":                   '\U0000002A',
      +	"midcir;":                   '\U00002AF0',
      +	"middot;":                   '\U000000B7',
      +	"minus;":                    '\U00002212',
      +	"minusb;":                   '\U0000229F',
      +	"minusd;":                   '\U00002238',
      +	"minusdu;":                  '\U00002A2A',
      +	"mlcp;":                     '\U00002ADB',
      +	"mldr;":                     '\U00002026',
      +	"mnplus;":                   '\U00002213',
      +	"models;":                   '\U000022A7',
      +	"mopf;":                     '\U0001D55E',
      +	"mp;":                       '\U00002213',
      +	"mscr;":                     '\U0001D4C2',
      +	"mstpos;":                   '\U0000223E',
      +	"mu;":                       '\U000003BC',
      +	"multimap;":                 '\U000022B8',
      +	"mumap;":                    '\U000022B8',
      +	"nLeftarrow;":               '\U000021CD',
      +	"nLeftrightarrow;":          '\U000021CE',
      +	"nRightarrow;":              '\U000021CF',
      +	"nVDash;":                   '\U000022AF',
      +	"nVdash;":                   '\U000022AE',
      +	"nabla;":                    '\U00002207',
      +	"nacute;":                   '\U00000144',
      +	"nap;":                      '\U00002249',
      +	"napos;":                    '\U00000149',
      +	"napprox;":                  '\U00002249',
      +	"natur;":                    '\U0000266E',
      +	"natural;":                  '\U0000266E',
      +	"naturals;":                 '\U00002115',
      +	"nbsp;":                     '\U000000A0',
      +	"ncap;":                     '\U00002A43',
      +	"ncaron;":                   '\U00000148',
      +	"ncedil;":                   '\U00000146',
      +	"ncong;":                    '\U00002247',
      +	"ncup;":                     '\U00002A42',
      +	"ncy;":                      '\U0000043D',
      +	"ndash;":                    '\U00002013',
      +	"ne;":                       '\U00002260',
      +	"neArr;":                    '\U000021D7',
      +	"nearhk;":                   '\U00002924',
      +	"nearr;":                    '\U00002197',
      +	"nearrow;":                  '\U00002197',
      +	"nequiv;":                   '\U00002262',
      +	"nesear;":                   '\U00002928',
      +	"nexist;":                   '\U00002204',
      +	"nexists;":                  '\U00002204',
      +	"nfr;":                      '\U0001D52B',
      +	"nge;":                      '\U00002271',
      +	"ngeq;":                     '\U00002271',
      +	"ngsim;":                    '\U00002275',
      +	"ngt;":                      '\U0000226F',
      +	"ngtr;":                     '\U0000226F',
      +	"nhArr;":                    '\U000021CE',
      +	"nharr;":                    '\U000021AE',
      +	"nhpar;":                    '\U00002AF2',
      +	"ni;":                       '\U0000220B',
      +	"nis;":                      '\U000022FC',
      +	"nisd;":                     '\U000022FA',
      +	"niv;":                      '\U0000220B',
      +	"njcy;":                     '\U0000045A',
      +	"nlArr;":                    '\U000021CD',
      +	"nlarr;":                    '\U0000219A',
      +	"nldr;":                     '\U00002025',
      +	"nle;":                      '\U00002270',
      +	"nleftarrow;":               '\U0000219A',
      +	"nleftrightarrow;":          '\U000021AE',
      +	"nleq;":                     '\U00002270',
      +	"nless;":                    '\U0000226E',
      +	"nlsim;":                    '\U00002274',
      +	"nlt;":                      '\U0000226E',
      +	"nltri;":                    '\U000022EA',
      +	"nltrie;":                   '\U000022EC',
      +	"nmid;":                     '\U00002224',
      +	"nopf;":                     '\U0001D55F',
      +	"not;":                      '\U000000AC',
      +	"notin;":                    '\U00002209',
      +	"notinva;":                  '\U00002209',
      +	"notinvb;":                  '\U000022F7',
      +	"notinvc;":                  '\U000022F6',
      +	"notni;":                    '\U0000220C',
      +	"notniva;":                  '\U0000220C',
      +	"notnivb;":                  '\U000022FE',
      +	"notnivc;":                  '\U000022FD',
      +	"npar;":                     '\U00002226',
      +	"nparallel;":                '\U00002226',
      +	"npolint;":                  '\U00002A14',
      +	"npr;":                      '\U00002280',
      +	"nprcue;":                   '\U000022E0',
      +	"nprec;":                    '\U00002280',
      +	"nrArr;":                    '\U000021CF',
      +	"nrarr;":                    '\U0000219B',
      +	"nrightarrow;":              '\U0000219B',
      +	"nrtri;":                    '\U000022EB',
      +	"nrtrie;":                   '\U000022ED',
      +	"nsc;":                      '\U00002281',
      +	"nsccue;":                   '\U000022E1',
      +	"nscr;":                     '\U0001D4C3',
      +	"nshortmid;":                '\U00002224',
      +	"nshortparallel;":           '\U00002226',
      +	"nsim;":                     '\U00002241',
      +	"nsime;":                    '\U00002244',
      +	"nsimeq;":                   '\U00002244',
      +	"nsmid;":                    '\U00002224',
      +	"nspar;":                    '\U00002226',
      +	"nsqsube;":                  '\U000022E2',
      +	"nsqsupe;":                  '\U000022E3',
      +	"nsub;":                     '\U00002284',
      +	"nsube;":                    '\U00002288',
      +	"nsubseteq;":                '\U00002288',
      +	"nsucc;":                    '\U00002281',
      +	"nsup;":                     '\U00002285',
      +	"nsupe;":                    '\U00002289',
      +	"nsupseteq;":                '\U00002289',
      +	"ntgl;":                     '\U00002279',
      +	"ntilde;":                   '\U000000F1',
      +	"ntlg;":                     '\U00002278',
      +	"ntriangleleft;":            '\U000022EA',
      +	"ntrianglelefteq;":          '\U000022EC',
      +	"ntriangleright;":           '\U000022EB',
      +	"ntrianglerighteq;":         '\U000022ED',
      +	"nu;":                       '\U000003BD',
      +	"num;":                      '\U00000023',
      +	"numero;":                   '\U00002116',
      +	"numsp;":                    '\U00002007',
      +	"nvDash;":                   '\U000022AD',
      +	"nvHarr;":                   '\U00002904',
      +	"nvdash;":                   '\U000022AC',
      +	"nvinfin;":                  '\U000029DE',
      +	"nvlArr;":                   '\U00002902',
      +	"nvrArr;":                   '\U00002903',
      +	"nwArr;":                    '\U000021D6',
      +	"nwarhk;":                   '\U00002923',
      +	"nwarr;":                    '\U00002196',
      +	"nwarrow;":                  '\U00002196',
      +	"nwnear;":                   '\U00002927',
      +	"oS;":                       '\U000024C8',
      +	"oacute;":                   '\U000000F3',
      +	"oast;":                     '\U0000229B',
      +	"ocir;":                     '\U0000229A',
      +	"ocirc;":                    '\U000000F4',
      +	"ocy;":                      '\U0000043E',
      +	"odash;":                    '\U0000229D',
      +	"odblac;":                   '\U00000151',
      +	"odiv;":                     '\U00002A38',
      +	"odot;":                     '\U00002299',
      +	"odsold;":                   '\U000029BC',
      +	"oelig;":                    '\U00000153',
      +	"ofcir;":                    '\U000029BF',
      +	"ofr;":                      '\U0001D52C',
      +	"ogon;":                     '\U000002DB',
      +	"ograve;":                   '\U000000F2',
      +	"ogt;":                      '\U000029C1',
      +	"ohbar;":                    '\U000029B5',
      +	"ohm;":                      '\U000003A9',
      +	"oint;":                     '\U0000222E',
      +	"olarr;":                    '\U000021BA',
      +	"olcir;":                    '\U000029BE',
      +	"olcross;":                  '\U000029BB',
      +	"oline;":                    '\U0000203E',
      +	"olt;":                      '\U000029C0',
      +	"omacr;":                    '\U0000014D',
      +	"omega;":                    '\U000003C9',
      +	"omicron;":                  '\U000003BF',
      +	"omid;":                     '\U000029B6',
      +	"ominus;":                   '\U00002296',
      +	"oopf;":                     '\U0001D560',
      +	"opar;":                     '\U000029B7',
      +	"operp;":                    '\U000029B9',
      +	"oplus;":                    '\U00002295',
      +	"or;":                       '\U00002228',
      +	"orarr;":                    '\U000021BB',
      +	"ord;":                      '\U00002A5D',
      +	"order;":                    '\U00002134',
      +	"orderof;":                  '\U00002134',
      +	"ordf;":                     '\U000000AA',
      +	"ordm;":                     '\U000000BA',
      +	"origof;":                   '\U000022B6',
      +	"oror;":                     '\U00002A56',
      +	"orslope;":                  '\U00002A57',
      +	"orv;":                      '\U00002A5B',
      +	"oscr;":                     '\U00002134',
      +	"oslash;":                   '\U000000F8',
      +	"osol;":                     '\U00002298',
      +	"otilde;":                   '\U000000F5',
      +	"otimes;":                   '\U00002297',
      +	"otimesas;":                 '\U00002A36',
      +	"ouml;":                     '\U000000F6',
      +	"ovbar;":                    '\U0000233D',
      +	"par;":                      '\U00002225',
      +	"para;":                     '\U000000B6',
      +	"parallel;":                 '\U00002225',
      +	"parsim;":                   '\U00002AF3',
      +	"parsl;":                    '\U00002AFD',
      +	"part;":                     '\U00002202',
      +	"pcy;":                      '\U0000043F',
      +	"percnt;":                   '\U00000025',
      +	"period;":                   '\U0000002E',
      +	"permil;":                   '\U00002030',
      +	"perp;":                     '\U000022A5',
      +	"pertenk;":                  '\U00002031',
      +	"pfr;":                      '\U0001D52D',
      +	"phi;":                      '\U000003C6',
      +	"phiv;":                     '\U000003D5',
      +	"phmmat;":                   '\U00002133',
      +	"phone;":                    '\U0000260E',
      +	"pi;":                       '\U000003C0',
      +	"pitchfork;":                '\U000022D4',
      +	"piv;":                      '\U000003D6',
      +	"planck;":                   '\U0000210F',
      +	"planckh;":                  '\U0000210E',
      +	"plankv;":                   '\U0000210F',
      +	"plus;":                     '\U0000002B',
      +	"plusacir;":                 '\U00002A23',
      +	"plusb;":                    '\U0000229E',
      +	"pluscir;":                  '\U00002A22',
      +	"plusdo;":                   '\U00002214',
      +	"plusdu;":                   '\U00002A25',
      +	"pluse;":                    '\U00002A72',
      +	"plusmn;":                   '\U000000B1',
      +	"plussim;":                  '\U00002A26',
      +	"plustwo;":                  '\U00002A27',
      +	"pm;":                       '\U000000B1',
      +	"pointint;":                 '\U00002A15',
      +	"popf;":                     '\U0001D561',
      +	"pound;":                    '\U000000A3',
      +	"pr;":                       '\U0000227A',
      +	"prE;":                      '\U00002AB3',
      +	"prap;":                     '\U00002AB7',
      +	"prcue;":                    '\U0000227C',
      +	"pre;":                      '\U00002AAF',
      +	"prec;":                     '\U0000227A',
      +	"precapprox;":               '\U00002AB7',
      +	"preccurlyeq;":              '\U0000227C',
      +	"preceq;":                   '\U00002AAF',
      +	"precnapprox;":              '\U00002AB9',
      +	"precneqq;":                 '\U00002AB5',
      +	"precnsim;":                 '\U000022E8',
      +	"precsim;":                  '\U0000227E',
      +	"prime;":                    '\U00002032',
      +	"primes;":                   '\U00002119',
      +	"prnE;":                     '\U00002AB5',
      +	"prnap;":                    '\U00002AB9',
      +	"prnsim;":                   '\U000022E8',
      +	"prod;":                     '\U0000220F',
      +	"profalar;":                 '\U0000232E',
      +	"profline;":                 '\U00002312',
      +	"profsurf;":                 '\U00002313',
      +	"prop;":                     '\U0000221D',
      +	"propto;":                   '\U0000221D',
      +	"prsim;":                    '\U0000227E',
      +	"prurel;":                   '\U000022B0',
      +	"pscr;":                     '\U0001D4C5',
      +	"psi;":                      '\U000003C8',
      +	"puncsp;":                   '\U00002008',
      +	"qfr;":                      '\U0001D52E',
      +	"qint;":                     '\U00002A0C',
      +	"qopf;":                     '\U0001D562',
      +	"qprime;":                   '\U00002057',
      +	"qscr;":                     '\U0001D4C6',
      +	"quaternions;":              '\U0000210D',
      +	"quatint;":                  '\U00002A16',
      +	"quest;":                    '\U0000003F',
      +	"questeq;":                  '\U0000225F',
      +	"quot;":                     '\U00000022',
      +	"rAarr;":                    '\U000021DB',
      +	"rArr;":                     '\U000021D2',
      +	"rAtail;":                   '\U0000291C',
      +	"rBarr;":                    '\U0000290F',
      +	"rHar;":                     '\U00002964',
      +	"racute;":                   '\U00000155',
      +	"radic;":                    '\U0000221A',
      +	"raemptyv;":                 '\U000029B3',
      +	"rang;":                     '\U000027E9',
      +	"rangd;":                    '\U00002992',
      +	"range;":                    '\U000029A5',
      +	"rangle;":                   '\U000027E9',
      +	"raquo;":                    '\U000000BB',
      +	"rarr;":                     '\U00002192',
      +	"rarrap;":                   '\U00002975',
      +	"rarrb;":                    '\U000021E5',
      +	"rarrbfs;":                  '\U00002920',
      +	"rarrc;":                    '\U00002933',
      +	"rarrfs;":                   '\U0000291E',
      +	"rarrhk;":                   '\U000021AA',
      +	"rarrlp;":                   '\U000021AC',
      +	"rarrpl;":                   '\U00002945',
      +	"rarrsim;":                  '\U00002974',
      +	"rarrtl;":                   '\U000021A3',
      +	"rarrw;":                    '\U0000219D',
      +	"ratail;":                   '\U0000291A',
      +	"ratio;":                    '\U00002236',
      +	"rationals;":                '\U0000211A',
      +	"rbarr;":                    '\U0000290D',
      +	"rbbrk;":                    '\U00002773',
      +	"rbrace;":                   '\U0000007D',
      +	"rbrack;":                   '\U0000005D',
      +	"rbrke;":                    '\U0000298C',
      +	"rbrksld;":                  '\U0000298E',
      +	"rbrkslu;":                  '\U00002990',
      +	"rcaron;":                   '\U00000159',
      +	"rcedil;":                   '\U00000157',
      +	"rceil;":                    '\U00002309',
      +	"rcub;":                     '\U0000007D',
      +	"rcy;":                      '\U00000440',
      +	"rdca;":                     '\U00002937',
      +	"rdldhar;":                  '\U00002969',
      +	"rdquo;":                    '\U0000201D',
      +	"rdquor;":                   '\U0000201D',
      +	"rdsh;":                     '\U000021B3',
      +	"real;":                     '\U0000211C',
      +	"realine;":                  '\U0000211B',
      +	"realpart;":                 '\U0000211C',
      +	"reals;":                    '\U0000211D',
      +	"rect;":                     '\U000025AD',
      +	"reg;":                      '\U000000AE',
      +	"rfisht;":                   '\U0000297D',
      +	"rfloor;":                   '\U0000230B',
      +	"rfr;":                      '\U0001D52F',
      +	"rhard;":                    '\U000021C1',
      +	"rharu;":                    '\U000021C0',
      +	"rharul;":                   '\U0000296C',
      +	"rho;":                      '\U000003C1',
      +	"rhov;":                     '\U000003F1',
      +	"rightarrow;":               '\U00002192',
      +	"rightarrowtail;":           '\U000021A3',
      +	"rightharpoondown;":         '\U000021C1',
      +	"rightharpoonup;":           '\U000021C0',
      +	"rightleftarrows;":          '\U000021C4',
      +	"rightleftharpoons;":        '\U000021CC',
      +	"rightrightarrows;":         '\U000021C9',
      +	"rightsquigarrow;":          '\U0000219D',
      +	"rightthreetimes;":          '\U000022CC',
      +	"ring;":                     '\U000002DA',
      +	"risingdotseq;":             '\U00002253',
      +	"rlarr;":                    '\U000021C4',
      +	"rlhar;":                    '\U000021CC',
      +	"rlm;":                      '\U0000200F',
      +	"rmoust;":                   '\U000023B1',
      +	"rmoustache;":               '\U000023B1',
      +	"rnmid;":                    '\U00002AEE',
      +	"roang;":                    '\U000027ED',
      +	"roarr;":                    '\U000021FE',
      +	"robrk;":                    '\U000027E7',
      +	"ropar;":                    '\U00002986',
      +	"ropf;":                     '\U0001D563',
      +	"roplus;":                   '\U00002A2E',
      +	"rotimes;":                  '\U00002A35',
      +	"rpar;":                     '\U00000029',
      +	"rpargt;":                   '\U00002994',
      +	"rppolint;":                 '\U00002A12',
      +	"rrarr;":                    '\U000021C9',
      +	"rsaquo;":                   '\U0000203A',
      +	"rscr;":                     '\U0001D4C7',
      +	"rsh;":                      '\U000021B1',
      +	"rsqb;":                     '\U0000005D',
      +	"rsquo;":                    '\U00002019',
      +	"rsquor;":                   '\U00002019',
      +	"rthree;":                   '\U000022CC',
      +	"rtimes;":                   '\U000022CA',
      +	"rtri;":                     '\U000025B9',
      +	"rtrie;":                    '\U000022B5',
      +	"rtrif;":                    '\U000025B8',
      +	"rtriltri;":                 '\U000029CE',
      +	"ruluhar;":                  '\U00002968',
      +	"rx;":                       '\U0000211E',
      +	"sacute;":                   '\U0000015B',
      +	"sbquo;":                    '\U0000201A',
      +	"sc;":                       '\U0000227B',
      +	"scE;":                      '\U00002AB4',
      +	"scap;":                     '\U00002AB8',
      +	"scaron;":                   '\U00000161',
      +	"sccue;":                    '\U0000227D',
      +	"sce;":                      '\U00002AB0',
      +	"scedil;":                   '\U0000015F',
      +	"scirc;":                    '\U0000015D',
      +	"scnE;":                     '\U00002AB6',
      +	"scnap;":                    '\U00002ABA',
      +	"scnsim;":                   '\U000022E9',
      +	"scpolint;":                 '\U00002A13',
      +	"scsim;":                    '\U0000227F',
      +	"scy;":                      '\U00000441',
      +	"sdot;":                     '\U000022C5',
      +	"sdotb;":                    '\U000022A1',
      +	"sdote;":                    '\U00002A66',
      +	"seArr;":                    '\U000021D8',
      +	"searhk;":                   '\U00002925',
      +	"searr;":                    '\U00002198',
      +	"searrow;":                  '\U00002198',
      +	"sect;":                     '\U000000A7',
      +	"semi;":                     '\U0000003B',
      +	"seswar;":                   '\U00002929',
      +	"setminus;":                 '\U00002216',
      +	"setmn;":                    '\U00002216',
      +	"sext;":                     '\U00002736',
      +	"sfr;":                      '\U0001D530',
      +	"sfrown;":                   '\U00002322',
      +	"sharp;":                    '\U0000266F',
      +	"shchcy;":                   '\U00000449',
      +	"shcy;":                     '\U00000448',
      +	"shortmid;":                 '\U00002223',
      +	"shortparallel;":            '\U00002225',
      +	"shy;":                      '\U000000AD',
      +	"sigma;":                    '\U000003C3',
      +	"sigmaf;":                   '\U000003C2',
      +	"sigmav;":                   '\U000003C2',
      +	"sim;":                      '\U0000223C',
      +	"simdot;":                   '\U00002A6A',
      +	"sime;":                     '\U00002243',
      +	"simeq;":                    '\U00002243',
      +	"simg;":                     '\U00002A9E',
      +	"simgE;":                    '\U00002AA0',
      +	"siml;":                     '\U00002A9D',
      +	"simlE;":                    '\U00002A9F',
      +	"simne;":                    '\U00002246',
      +	"simplus;":                  '\U00002A24',
      +	"simrarr;":                  '\U00002972',
      +	"slarr;":                    '\U00002190',
      +	"smallsetminus;":            '\U00002216',
      +	"smashp;":                   '\U00002A33',
      +	"smeparsl;":                 '\U000029E4',
      +	"smid;":                     '\U00002223',
      +	"smile;":                    '\U00002323',
      +	"smt;":                      '\U00002AAA',
      +	"smte;":                     '\U00002AAC',
      +	"softcy;":                   '\U0000044C',
      +	"sol;":                      '\U0000002F',
      +	"solb;":                     '\U000029C4',
      +	"solbar;":                   '\U0000233F',
      +	"sopf;":                     '\U0001D564',
      +	"spades;":                   '\U00002660',
      +	"spadesuit;":                '\U00002660',
      +	"spar;":                     '\U00002225',
      +	"sqcap;":                    '\U00002293',
      +	"sqcup;":                    '\U00002294',
      +	"sqsub;":                    '\U0000228F',
      +	"sqsube;":                   '\U00002291',
      +	"sqsubset;":                 '\U0000228F',
      +	"sqsubseteq;":               '\U00002291',
      +	"sqsup;":                    '\U00002290',
      +	"sqsupe;":                   '\U00002292',
      +	"sqsupset;":                 '\U00002290',
      +	"sqsupseteq;":               '\U00002292',
      +	"squ;":                      '\U000025A1',
      +	"square;":                   '\U000025A1',
      +	"squarf;":                   '\U000025AA',
      +	"squf;":                     '\U000025AA',
      +	"srarr;":                    '\U00002192',
      +	"sscr;":                     '\U0001D4C8',
      +	"ssetmn;":                   '\U00002216',
      +	"ssmile;":                   '\U00002323',
      +	"sstarf;":                   '\U000022C6',
      +	"star;":                     '\U00002606',
      +	"starf;":                    '\U00002605',
      +	"straightepsilon;":          '\U000003F5',
      +	"straightphi;":              '\U000003D5',
      +	"strns;":                    '\U000000AF',
      +	"sub;":                      '\U00002282',
      +	"subE;":                     '\U00002AC5',
      +	"subdot;":                   '\U00002ABD',
      +	"sube;":                     '\U00002286',
      +	"subedot;":                  '\U00002AC3',
      +	"submult;":                  '\U00002AC1',
      +	"subnE;":                    '\U00002ACB',
      +	"subne;":                    '\U0000228A',
      +	"subplus;":                  '\U00002ABF',
      +	"subrarr;":                  '\U00002979',
      +	"subset;":                   '\U00002282',
      +	"subseteq;":                 '\U00002286',
      +	"subseteqq;":                '\U00002AC5',
      +	"subsetneq;":                '\U0000228A',
      +	"subsetneqq;":               '\U00002ACB',
      +	"subsim;":                   '\U00002AC7',
      +	"subsub;":                   '\U00002AD5',
      +	"subsup;":                   '\U00002AD3',
      +	"succ;":                     '\U0000227B',
      +	"succapprox;":               '\U00002AB8',
      +	"succcurlyeq;":              '\U0000227D',
      +	"succeq;":                   '\U00002AB0',
      +	"succnapprox;":              '\U00002ABA',
      +	"succneqq;":                 '\U00002AB6',
      +	"succnsim;":                 '\U000022E9',
      +	"succsim;":                  '\U0000227F',
      +	"sum;":                      '\U00002211',
      +	"sung;":                     '\U0000266A',
      +	"sup;":                      '\U00002283',
      +	"sup1;":                     '\U000000B9',
      +	"sup2;":                     '\U000000B2',
      +	"sup3;":                     '\U000000B3',
      +	"supE;":                     '\U00002AC6',
      +	"supdot;":                   '\U00002ABE',
      +	"supdsub;":                  '\U00002AD8',
      +	"supe;":                     '\U00002287',
      +	"supedot;":                  '\U00002AC4',
      +	"suphsol;":                  '\U000027C9',
      +	"suphsub;":                  '\U00002AD7',
      +	"suplarr;":                  '\U0000297B',
      +	"supmult;":                  '\U00002AC2',
      +	"supnE;":                    '\U00002ACC',
      +	"supne;":                    '\U0000228B',
      +	"supplus;":                  '\U00002AC0',
      +	"supset;":                   '\U00002283',
      +	"supseteq;":                 '\U00002287',
      +	"supseteqq;":                '\U00002AC6',
      +	"supsetneq;":                '\U0000228B',
      +	"supsetneqq;":               '\U00002ACC',
      +	"supsim;":                   '\U00002AC8',
      +	"supsub;":                   '\U00002AD4',
      +	"supsup;":                   '\U00002AD6',
      +	"swArr;":                    '\U000021D9',
      +	"swarhk;":                   '\U00002926',
      +	"swarr;":                    '\U00002199',
      +	"swarrow;":                  '\U00002199',
      +	"swnwar;":                   '\U0000292A',
      +	"szlig;":                    '\U000000DF',
      +	"target;":                   '\U00002316',
      +	"tau;":                      '\U000003C4',
      +	"tbrk;":                     '\U000023B4',
      +	"tcaron;":                   '\U00000165',
      +	"tcedil;":                   '\U00000163',
      +	"tcy;":                      '\U00000442',
      +	"tdot;":                     '\U000020DB',
      +	"telrec;":                   '\U00002315',
      +	"tfr;":                      '\U0001D531',
      +	"there4;":                   '\U00002234',
      +	"therefore;":                '\U00002234',
      +	"theta;":                    '\U000003B8',
      +	"thetasym;":                 '\U000003D1',
      +	"thetav;":                   '\U000003D1',
      +	"thickapprox;":              '\U00002248',
      +	"thicksim;":                 '\U0000223C',
      +	"thinsp;":                   '\U00002009',
      +	"thkap;":                    '\U00002248',
      +	"thksim;":                   '\U0000223C',
      +	"thorn;":                    '\U000000FE',
      +	"tilde;":                    '\U000002DC',
      +	"times;":                    '\U000000D7',
      +	"timesb;":                   '\U000022A0',
      +	"timesbar;":                 '\U00002A31',
      +	"timesd;":                   '\U00002A30',
      +	"tint;":                     '\U0000222D',
      +	"toea;":                     '\U00002928',
      +	"top;":                      '\U000022A4',
      +	"topbot;":                   '\U00002336',
      +	"topcir;":                   '\U00002AF1',
      +	"topf;":                     '\U0001D565',
      +	"topfork;":                  '\U00002ADA',
      +	"tosa;":                     '\U00002929',
      +	"tprime;":                   '\U00002034',
      +	"trade;":                    '\U00002122',
      +	"triangle;":                 '\U000025B5',
      +	"triangledown;":             '\U000025BF',
      +	"triangleleft;":             '\U000025C3',
      +	"trianglelefteq;":           '\U000022B4',
      +	"triangleq;":                '\U0000225C',
      +	"triangleright;":            '\U000025B9',
      +	"trianglerighteq;":          '\U000022B5',
      +	"tridot;":                   '\U000025EC',
      +	"trie;":                     '\U0000225C',
      +	"triminus;":                 '\U00002A3A',
      +	"triplus;":                  '\U00002A39',
      +	"trisb;":                    '\U000029CD',
      +	"tritime;":                  '\U00002A3B',
      +	"trpezium;":                 '\U000023E2',
      +	"tscr;":                     '\U0001D4C9',
      +	"tscy;":                     '\U00000446',
      +	"tshcy;":                    '\U0000045B',
      +	"tstrok;":                   '\U00000167',
      +	"twixt;":                    '\U0000226C',
      +	"twoheadleftarrow;":         '\U0000219E',
      +	"twoheadrightarrow;":        '\U000021A0',
      +	"uArr;":                     '\U000021D1',
      +	"uHar;":                     '\U00002963',
      +	"uacute;":                   '\U000000FA',
      +	"uarr;":                     '\U00002191',
      +	"ubrcy;":                    '\U0000045E',
      +	"ubreve;":                   '\U0000016D',
      +	"ucirc;":                    '\U000000FB',
      +	"ucy;":                      '\U00000443',
      +	"udarr;":                    '\U000021C5',
      +	"udblac;":                   '\U00000171',
      +	"udhar;":                    '\U0000296E',
      +	"ufisht;":                   '\U0000297E',
      +	"ufr;":                      '\U0001D532',
      +	"ugrave;":                   '\U000000F9',
      +	"uharl;":                    '\U000021BF',
      +	"uharr;":                    '\U000021BE',
      +	"uhblk;":                    '\U00002580',
      +	"ulcorn;":                   '\U0000231C',
      +	"ulcorner;":                 '\U0000231C',
      +	"ulcrop;":                   '\U0000230F',
      +	"ultri;":                    '\U000025F8',
      +	"umacr;":                    '\U0000016B',
      +	"uml;":                      '\U000000A8',
      +	"uogon;":                    '\U00000173',
      +	"uopf;":                     '\U0001D566',
      +	"uparrow;":                  '\U00002191',
      +	"updownarrow;":              '\U00002195',
      +	"upharpoonleft;":            '\U000021BF',
      +	"upharpoonright;":           '\U000021BE',
      +	"uplus;":                    '\U0000228E',
      +	"upsi;":                     '\U000003C5',
      +	"upsih;":                    '\U000003D2',
      +	"upsilon;":                  '\U000003C5',
      +	"upuparrows;":               '\U000021C8',
      +	"urcorn;":                   '\U0000231D',
      +	"urcorner;":                 '\U0000231D',
      +	"urcrop;":                   '\U0000230E',
      +	"uring;":                    '\U0000016F',
      +	"urtri;":                    '\U000025F9',
      +	"uscr;":                     '\U0001D4CA',
      +	"utdot;":                    '\U000022F0',
      +	"utilde;":                   '\U00000169',
      +	"utri;":                     '\U000025B5',
      +	"utrif;":                    '\U000025B4',
      +	"uuarr;":                    '\U000021C8',
      +	"uuml;":                     '\U000000FC',
      +	"uwangle;":                  '\U000029A7',
      +	"vArr;":                     '\U000021D5',
      +	"vBar;":                     '\U00002AE8',
      +	"vBarv;":                    '\U00002AE9',
      +	"vDash;":                    '\U000022A8',
      +	"vangrt;":                   '\U0000299C',
      +	"varepsilon;":               '\U000003F5',
      +	"varkappa;":                 '\U000003F0',
      +	"varnothing;":               '\U00002205',
      +	"varphi;":                   '\U000003D5',
      +	"varpi;":                    '\U000003D6',
      +	"varpropto;":                '\U0000221D',
      +	"varr;":                     '\U00002195',
      +	"varrho;":                   '\U000003F1',
      +	"varsigma;":                 '\U000003C2',
      +	"vartheta;":                 '\U000003D1',
      +	"vartriangleleft;":          '\U000022B2',
      +	"vartriangleright;":         '\U000022B3',
      +	"vcy;":                      '\U00000432',
      +	"vdash;":                    '\U000022A2',
      +	"vee;":                      '\U00002228',
      +	"veebar;":                   '\U000022BB',
      +	"veeeq;":                    '\U0000225A',
      +	"vellip;":                   '\U000022EE',
      +	"verbar;":                   '\U0000007C',
      +	"vert;":                     '\U0000007C',
      +	"vfr;":                      '\U0001D533',
      +	"vltri;":                    '\U000022B2',
      +	"vopf;":                     '\U0001D567',
      +	"vprop;":                    '\U0000221D',
      +	"vrtri;":                    '\U000022B3',
      +	"vscr;":                     '\U0001D4CB',
      +	"vzigzag;":                  '\U0000299A',
      +	"wcirc;":                    '\U00000175',
      +	"wedbar;":                   '\U00002A5F',
      +	"wedge;":                    '\U00002227',
      +	"wedgeq;":                   '\U00002259',
      +	"weierp;":                   '\U00002118',
      +	"wfr;":                      '\U0001D534',
      +	"wopf;":                     '\U0001D568',
      +	"wp;":                       '\U00002118',
      +	"wr;":                       '\U00002240',
      +	"wreath;":                   '\U00002240',
      +	"wscr;":                     '\U0001D4CC',
      +	"xcap;":                     '\U000022C2',
      +	"xcirc;":                    '\U000025EF',
      +	"xcup;":                     '\U000022C3',
      +	"xdtri;":                    '\U000025BD',
      +	"xfr;":                      '\U0001D535',
      +	"xhArr;":                    '\U000027FA',
      +	"xharr;":                    '\U000027F7',
      +	"xi;":                       '\U000003BE',
      +	"xlArr;":                    '\U000027F8',
      +	"xlarr;":                    '\U000027F5',
      +	"xmap;":                     '\U000027FC',
      +	"xnis;":                     '\U000022FB',
      +	"xodot;":                    '\U00002A00',
      +	"xopf;":                     '\U0001D569',
      +	"xoplus;":                   '\U00002A01',
      +	"xotime;":                   '\U00002A02',
      +	"xrArr;":                    '\U000027F9',
      +	"xrarr;":                    '\U000027F6',
      +	"xscr;":                     '\U0001D4CD',
      +	"xsqcup;":                   '\U00002A06',
      +	"xuplus;":                   '\U00002A04',
      +	"xutri;":                    '\U000025B3',
      +	"xvee;":                     '\U000022C1',
      +	"xwedge;":                   '\U000022C0',
      +	"yacute;":                   '\U000000FD',
      +	"yacy;":                     '\U0000044F',
      +	"ycirc;":                    '\U00000177',
      +	"ycy;":                      '\U0000044B',
      +	"yen;":                      '\U000000A5',
      +	"yfr;":                      '\U0001D536',
      +	"yicy;":                     '\U00000457',
      +	"yopf;":                     '\U0001D56A',
      +	"yscr;":                     '\U0001D4CE',
      +	"yucy;":                     '\U0000044E',
      +	"yuml;":                     '\U000000FF',
      +	"zacute;":                   '\U0000017A',
      +	"zcaron;":                   '\U0000017E',
      +	"zcy;":                      '\U00000437',
      +	"zdot;":                     '\U0000017C',
      +	"zeetrf;":                   '\U00002128',
      +	"zeta;":                     '\U000003B6',
      +	"zfr;":                      '\U0001D537',
      +	"zhcy;":                     '\U00000436',
      +	"zigrarr;":                  '\U000021DD',
      +	"zopf;":                     '\U0001D56B',
      +	"zscr;":                     '\U0001D4CF',
      +	"zwj;":                      '\U0000200D',
      +	"zwnj;":                     '\U0000200C',
      +	"AElig":                     '\U000000C6',
      +	"AMP":                       '\U00000026',
      +	"Aacute":                    '\U000000C1',
      +	"Acirc":                     '\U000000C2',
      +	"Agrave":                    '\U000000C0',
      +	"Aring":                     '\U000000C5',
      +	"Atilde":                    '\U000000C3',
      +	"Auml":                      '\U000000C4',
      +	"COPY":                      '\U000000A9',
      +	"Ccedil":                    '\U000000C7',
      +	"ETH":                       '\U000000D0',
      +	"Eacute":                    '\U000000C9',
      +	"Ecirc":                     '\U000000CA',
      +	"Egrave":                    '\U000000C8',
      +	"Euml":                      '\U000000CB',
      +	"GT":                        '\U0000003E',
      +	"Iacute":                    '\U000000CD',
      +	"Icirc":                     '\U000000CE',
      +	"Igrave":                    '\U000000CC',
      +	"Iuml":                      '\U000000CF',
      +	"LT":                        '\U0000003C',
      +	"Ntilde":                    '\U000000D1',
      +	"Oacute":                    '\U000000D3',
      +	"Ocirc":                     '\U000000D4',
      +	"Ograve":                    '\U000000D2',
      +	"Oslash":                    '\U000000D8',
      +	"Otilde":                    '\U000000D5',
      +	"Ouml":                      '\U000000D6',
      +	"QUOT":                      '\U00000022',
      +	"REG":                       '\U000000AE',
      +	"THORN":                     '\U000000DE',
      +	"Uacute":                    '\U000000DA',
      +	"Ucirc":                     '\U000000DB',
      +	"Ugrave":                    '\U000000D9',
      +	"Uuml":                      '\U000000DC',
      +	"Yacute":                    '\U000000DD',
      +	"aacute":                    '\U000000E1',
      +	"acirc":                     '\U000000E2',
      +	"acute":                     '\U000000B4',
      +	"aelig":                     '\U000000E6',
      +	"agrave":                    '\U000000E0',
      +	"amp":                       '\U00000026',
      +	"aring":                     '\U000000E5',
      +	"atilde":                    '\U000000E3',
      +	"auml":                      '\U000000E4',
      +	"brvbar":                    '\U000000A6',
      +	"ccedil":                    '\U000000E7',
      +	"cedil":                     '\U000000B8',
      +	"cent":                      '\U000000A2',
      +	"copy":                      '\U000000A9',
      +	"curren":                    '\U000000A4',
      +	"deg":                       '\U000000B0',
      +	"divide":                    '\U000000F7',
      +	"eacute":                    '\U000000E9',
      +	"ecirc":                     '\U000000EA',
      +	"egrave":                    '\U000000E8',
      +	"eth":                       '\U000000F0',
      +	"euml":                      '\U000000EB',
      +	"frac12":                    '\U000000BD',
      +	"frac14":                    '\U000000BC',
      +	"frac34":                    '\U000000BE',
      +	"gt":                        '\U0000003E',
      +	"iacute":                    '\U000000ED',
      +	"icirc":                     '\U000000EE',
      +	"iexcl":                     '\U000000A1',
      +	"igrave":                    '\U000000EC',
      +	"iquest":                    '\U000000BF',
      +	"iuml":                      '\U000000EF',
      +	"laquo":                     '\U000000AB',
      +	"lt":                        '\U0000003C',
      +	"macr":                      '\U000000AF',
      +	"micro":                     '\U000000B5',
      +	"middot":                    '\U000000B7',
      +	"nbsp":                      '\U000000A0',
      +	"not":                       '\U000000AC',
      +	"ntilde":                    '\U000000F1',
      +	"oacute":                    '\U000000F3',
      +	"ocirc":                     '\U000000F4',
      +	"ograve":                    '\U000000F2',
      +	"ordf":                      '\U000000AA',
      +	"ordm":                      '\U000000BA',
      +	"oslash":                    '\U000000F8',
      +	"otilde":                    '\U000000F5',
      +	"ouml":                      '\U000000F6',
      +	"para":                      '\U000000B6',
      +	"plusmn":                    '\U000000B1',
      +	"pound":                     '\U000000A3',
      +	"quot":                      '\U00000022',
      +	"raquo":                     '\U000000BB',
      +	"reg":                       '\U000000AE',
      +	"sect":                      '\U000000A7',
      +	"shy":                       '\U000000AD',
      +	"sup1":                      '\U000000B9',
      +	"sup2":                      '\U000000B2',
      +	"sup3":                      '\U000000B3',
      +	"szlig":                     '\U000000DF',
      +	"thorn":                     '\U000000FE',
      +	"times":                     '\U000000D7',
      +	"uacute":                    '\U000000FA',
      +	"ucirc":                     '\U000000FB',
      +	"ugrave":                    '\U000000F9',
      +	"uml":                       '\U000000A8',
      +	"uuml":                      '\U000000FC',
      +	"yacute":                    '\U000000FD',
      +	"yen":                       '\U000000A5',
      +	"yuml":                      '\U000000FF',
      +}
      +
      +// HTML entities that are two unicode codepoints.
      +var entity2 = map[string][2]rune{
      +	// TODO(nigeltao): Handle replacements that are wider than their names.
      +	// "nLt;":                     {'\u226A', '\u20D2'},
      +	// "nGt;":                     {'\u226B', '\u20D2'},
      +	"NotEqualTilde;":           {'\u2242', '\u0338'},
      +	"NotGreaterFullEqual;":     {'\u2267', '\u0338'},
      +	"NotGreaterGreater;":       {'\u226B', '\u0338'},
      +	"NotGreaterSlantEqual;":    {'\u2A7E', '\u0338'},
      +	"NotHumpDownHump;":         {'\u224E', '\u0338'},
      +	"NotHumpEqual;":            {'\u224F', '\u0338'},
      +	"NotLeftTriangleBar;":      {'\u29CF', '\u0338'},
      +	"NotLessLess;":             {'\u226A', '\u0338'},
      +	"NotLessSlantEqual;":       {'\u2A7D', '\u0338'},
      +	"NotNestedGreaterGreater;": {'\u2AA2', '\u0338'},
      +	"NotNestedLessLess;":       {'\u2AA1', '\u0338'},
      +	"NotPrecedesEqual;":        {'\u2AAF', '\u0338'},
      +	"NotRightTriangleBar;":     {'\u29D0', '\u0338'},
      +	"NotSquareSubset;":         {'\u228F', '\u0338'},
      +	"NotSquareSuperset;":       {'\u2290', '\u0338'},
      +	"NotSubset;":               {'\u2282', '\u20D2'},
      +	"NotSucceedsEqual;":        {'\u2AB0', '\u0338'},
      +	"NotSucceedsTilde;":        {'\u227F', '\u0338'},
      +	"NotSuperset;":             {'\u2283', '\u20D2'},
      +	"ThickSpace;":              {'\u205F', '\u200A'},
      +	"acE;":                     {'\u223E', '\u0333'},
      +	"bne;":                     {'\u003D', '\u20E5'},
      +	"bnequiv;":                 {'\u2261', '\u20E5'},
      +	"caps;":                    {'\u2229', '\uFE00'},
      +	"cups;":                    {'\u222A', '\uFE00'},
      +	"fjlig;":                   {'\u0066', '\u006A'},
      +	"gesl;":                    {'\u22DB', '\uFE00'},
      +	"gvertneqq;":               {'\u2269', '\uFE00'},
      +	"gvnE;":                    {'\u2269', '\uFE00'},
      +	"lates;":                   {'\u2AAD', '\uFE00'},
      +	"lesg;":                    {'\u22DA', '\uFE00'},
      +	"lvertneqq;":               {'\u2268', '\uFE00'},
      +	"lvnE;":                    {'\u2268', '\uFE00'},
      +	"nGg;":                     {'\u22D9', '\u0338'},
      +	"nGtv;":                    {'\u226B', '\u0338'},
      +	"nLl;":                     {'\u22D8', '\u0338'},
      +	"nLtv;":                    {'\u226A', '\u0338'},
      +	"nang;":                    {'\u2220', '\u20D2'},
      +	"napE;":                    {'\u2A70', '\u0338'},
      +	"napid;":                   {'\u224B', '\u0338'},
      +	"nbump;":                   {'\u224E', '\u0338'},
      +	"nbumpe;":                  {'\u224F', '\u0338'},
      +	"ncongdot;":                {'\u2A6D', '\u0338'},
      +	"nedot;":                   {'\u2250', '\u0338'},
      +	"nesim;":                   {'\u2242', '\u0338'},
      +	"ngE;":                     {'\u2267', '\u0338'},
      +	"ngeqq;":                   {'\u2267', '\u0338'},
      +	"ngeqslant;":               {'\u2A7E', '\u0338'},
      +	"nges;":                    {'\u2A7E', '\u0338'},
      +	"nlE;":                     {'\u2266', '\u0338'},
      +	"nleqq;":                   {'\u2266', '\u0338'},
      +	"nleqslant;":               {'\u2A7D', '\u0338'},
      +	"nles;":                    {'\u2A7D', '\u0338'},
      +	"notinE;":                  {'\u22F9', '\u0338'},
      +	"notindot;":                {'\u22F5', '\u0338'},
      +	"nparsl;":                  {'\u2AFD', '\u20E5'},
      +	"npart;":                   {'\u2202', '\u0338'},
      +	"npre;":                    {'\u2AAF', '\u0338'},
      +	"npreceq;":                 {'\u2AAF', '\u0338'},
      +	"nrarrc;":                  {'\u2933', '\u0338'},
      +	"nrarrw;":                  {'\u219D', '\u0338'},
      +	"nsce;":                    {'\u2AB0', '\u0338'},
      +	"nsubE;":                   {'\u2AC5', '\u0338'},
      +	"nsubset;":                 {'\u2282', '\u20D2'},
      +	"nsubseteqq;":              {'\u2AC5', '\u0338'},
      +	"nsucceq;":                 {'\u2AB0', '\u0338'},
      +	"nsupE;":                   {'\u2AC6', '\u0338'},
      +	"nsupset;":                 {'\u2283', '\u20D2'},
      +	"nsupseteqq;":              {'\u2AC6', '\u0338'},
      +	"nvap;":                    {'\u224D', '\u20D2'},
      +	"nvge;":                    {'\u2265', '\u20D2'},
      +	"nvgt;":                    {'\u003E', '\u20D2'},
      +	"nvle;":                    {'\u2264', '\u20D2'},
      +	"nvlt;":                    {'\u003C', '\u20D2'},
      +	"nvltrie;":                 {'\u22B4', '\u20D2'},
      +	"nvrtrie;":                 {'\u22B5', '\u20D2'},
      +	"nvsim;":                   {'\u223C', '\u20D2'},
      +	"race;":                    {'\u223D', '\u0331'},
      +	"smtes;":                   {'\u2AAC', '\uFE00'},
      +	"sqcaps;":                  {'\u2293', '\uFE00'},
      +	"sqcups;":                  {'\u2294', '\uFE00'},
      +	"varsubsetneq;":            {'\u228A', '\uFE00'},
      +	"varsubsetneqq;":           {'\u2ACB', '\uFE00'},
      +	"varsupsetneq;":            {'\u228B', '\uFE00'},
      +	"varsupsetneqq;":           {'\u2ACC', '\uFE00'},
      +	"vnsub;":                   {'\u2282', '\u20D2'},
      +	"vnsup;":                   {'\u2283', '\u20D2'},
      +	"vsubnE;":                  {'\u2ACB', '\uFE00'},
      +	"vsubne;":                  {'\u228A', '\uFE00'},
      +	"vsupnE;":                  {'\u2ACC', '\uFE00'},
      +	"vsupne;":                  {'\u228B', '\uFE00'},
      +}
      diff --git a/vendor/golang.org/x/net/html/entity_test.go b/vendor/golang.org/x/net/html/entity_test.go
      new file mode 100644
      index 00000000..b53f866f
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/entity_test.go
      @@ -0,0 +1,29 @@
      +// Copyright 2010 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package html
      +
      +import (
      +	"testing"
      +	"unicode/utf8"
      +)
      +
      +func TestEntityLength(t *testing.T) {
      +	// We verify that the length of UTF-8 encoding of each value is <= 1 + len(key).
      +	// The +1 comes from the leading "&". This property implies that the length of
      +	// unescaped text is <= the length of escaped text.
      +	for k, v := range entity {
      +		if 1+len(k) < utf8.RuneLen(v) {
      +			t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v))
      +		}
      +		if len(k) > longestEntityWithoutSemicolon && k[len(k)-1] != ';' {
      +			t.Errorf("entity name %s is %d characters, but longestEntityWithoutSemicolon=%d", k, len(k), longestEntityWithoutSemicolon)
      +		}
      +	}
      +	for k, v := range entity2 {
      +		if 1+len(k) < utf8.RuneLen(v[0])+utf8.RuneLen(v[1]) {
      +			t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v[0]) + string(v[1]))
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go
      new file mode 100644
      index 00000000..d8561396
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/escape.go
      @@ -0,0 +1,258 @@
      +// Copyright 2010 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package html
      +
      +import (
      +	"bytes"
      +	"strings"
      +	"unicode/utf8"
      +)
      +
      +// These replacements permit compatibility with old numeric entities that
      +// assumed Windows-1252 encoding.
      +// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
      +var replacementTable = [...]rune{
      +	'\u20AC', // First entry is what 0x80 should be replaced with.
      +	'\u0081',
      +	'\u201A',
      +	'\u0192',
      +	'\u201E',
      +	'\u2026',
      +	'\u2020',
      +	'\u2021',
      +	'\u02C6',
      +	'\u2030',
      +	'\u0160',
      +	'\u2039',
      +	'\u0152',
      +	'\u008D',
      +	'\u017D',
      +	'\u008F',
      +	'\u0090',
      +	'\u2018',
      +	'\u2019',
      +	'\u201C',
      +	'\u201D',
      +	'\u2022',
      +	'\u2013',
      +	'\u2014',
      +	'\u02DC',
      +	'\u2122',
      +	'\u0161',
      +	'\u203A',
      +	'\u0153',
      +	'\u009D',
      +	'\u017E',
      +	'\u0178', // Last entry is 0x9F.
      +	// 0x00->'\uFFFD' is handled programmatically.
      +	// 0x0D->'\u000D' is a no-op.
      +}
      +
      +// unescapeEntity reads an entity like "&lt;" from b[src:] and writes the
      +// corresponding "<" to b[dst:], returning the incremented dst and src cursors.
      +// Precondition: b[src] == '&' && dst <= src.
      +// attribute should be true if parsing an attribute value.
      +func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) {
      +	// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
      +
      +	// i starts at 1 because we already know that s[0] == '&'.
      +	i, s := 1, b[src:]
      +
      +	if len(s) <= 1 {
      +		b[dst] = b[src]
      +		return dst + 1, src + 1
      +	}
      +
      +	if s[i] == '#' {
      +		if len(s) <= 3 { // We need to have at least "&#.".
      +			b[dst] = b[src]
      +			return dst + 1, src + 1
      +		}
      +		i++
      +		c := s[i]
      +		hex := false
      +		if c == 'x' || c == 'X' {
      +			hex = true
      +			i++
      +		}
      +
      +		x := '\x00'
      +		for i < len(s) {
      +			c = s[i]
      +			i++
      +			if hex {
      +				if '0' <= c && c <= '9' {
      +					x = 16*x + rune(c) - '0'
      +					continue
      +				} else if 'a' <= c && c <= 'f' {
      +					x = 16*x + rune(c) - 'a' + 10
      +					continue
      +				} else if 'A' <= c && c <= 'F' {
      +					x = 16*x + rune(c) - 'A' + 10
      +					continue
      +				}
      +			} else if '0' <= c && c <= '9' {
      +				x = 10*x + rune(c) - '0'
      +				continue
      +			}
      +			if c != ';' {
      +				i--
      +			}
      +			break
      +		}
      +
      +		if i <= 3 { // No characters matched.
      +			b[dst] = b[src]
      +			return dst + 1, src + 1
      +		}
      +
      +		if 0x80 <= x && x <= 0x9F {
      +			// Replace characters from Windows-1252 with UTF-8 equivalents.
      +			x = replacementTable[x-0x80]
      +		} else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF {
      +			// Replace invalid characters with the replacement character.
      +			x = '\uFFFD'
      +		}
      +
      +		return dst + utf8.EncodeRune(b[dst:], x), src + i
      +	}
      +
      +	// Consume the maximum number of characters possible, with the
      +	// consumed characters matching one of the named references.
      +
      +	for i < len(s) {
      +		c := s[i]
      +		i++
      +		// Lower-cased characters are more common in entities, so we check for them first.
      +		if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
      +			continue
      +		}
      +		if c != ';' {
      +			i--
      +		}
      +		break
      +	}
      +
      +	entityName := string(s[1:i])
      +	if entityName == "" {
      +		// No-op.
      +	} else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' {
      +		// No-op.
      +	} else if x := entity[entityName]; x != 0 {
      +		return dst + utf8.EncodeRune(b[dst:], x), src + i
      +	} else if x := entity2[entityName]; x[0] != 0 {
      +		dst1 := dst + utf8.EncodeRune(b[dst:], x[0])
      +		return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i
      +	} else if !attribute {
      +		maxLen := len(entityName) - 1
      +		if maxLen > longestEntityWithoutSemicolon {
      +			maxLen = longestEntityWithoutSemicolon
      +		}
      +		for j := maxLen; j > 1; j-- {
      +			if x := entity[entityName[:j]]; x != 0 {
      +				return dst + utf8.EncodeRune(b[dst:], x), src + j + 1
      +			}
      +		}
      +	}
      +
      +	dst1, src1 = dst+i, src+i
      +	copy(b[dst:dst1], b[src:src1])
      +	return dst1, src1
      +}
      +
      +// unescape unescapes b's entities in-place, so that "a&lt;b" becomes "a<b".
      +// attribute should be true if parsing an attribute value.
      +func unescape(b []byte, attribute bool) []byte {
      +	for i, c := range b {
      +		if c == '&' {
      +			dst, src := unescapeEntity(b, i, i, attribute)
      +			for src < len(b) {
      +				c := b[src]
      +				if c == '&' {
      +					dst, src = unescapeEntity(b, dst, src, attribute)
      +				} else {
      +					b[dst] = c
      +					dst, src = dst+1, src+1
      +				}
      +			}
      +			return b[0:dst]
      +		}
      +	}
      +	return b
      +}
      +
      +// lower lower-cases the A-Z bytes in b in-place, so that "aBc" becomes "abc".
      +func lower(b []byte) []byte {
      +	for i, c := range b {
      +		if 'A' <= c && c <= 'Z' {
      +			b[i] = c + 'a' - 'A'
      +		}
      +	}
      +	return b
      +}
      +
      +const escapedChars = "&'<>\"\r"
      +
      +func escape(w writer, s string) error {
      +	i := strings.IndexAny(s, escapedChars)
      +	for i != -1 {
      +		if _, err := w.WriteString(s[:i]); err != nil {
      +			return err
      +		}
      +		var esc string
      +		switch s[i] {
      +		case '&':
      +			esc = "&amp;"
      +		case '\'':
      +			// "&#39;" is shorter than "&apos;" and apos was not in HTML until HTML5.
      +			esc = "&#39;"
      +		case '<':
      +			esc = "&lt;"
      +		case '>':
      +			esc = "&gt;"
      +		case '"':
      +			// "&#34;" is shorter than "&quot;".
      +			esc = "&#34;"
      +		case '\r':
      +			esc = "&#13;"
      +		default:
      +			panic("unrecognized escape character")
      +		}
      +		s = s[i+1:]
      +		if _, err := w.WriteString(esc); err != nil {
      +			return err
      +		}
      +		i = strings.IndexAny(s, escapedChars)
      +	}
      +	_, err := w.WriteString(s)
      +	return err
      +}
      +
      +// EscapeString escapes special characters like "<" to become "&lt;". It
      +// escapes only five such characters: <, >, &, ' and ".
      +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
      +// always true.
      +func EscapeString(s string) string {
      +	if strings.IndexAny(s, escapedChars) == -1 {
      +		return s
      +	}
      +	var buf bytes.Buffer
      +	escape(&buf, s)
      +	return buf.String()
      +}
      +
      +// UnescapeString unescapes entities like "&lt;" to become "<". It unescapes a
      +// larger range of entities than EscapeString escapes. For example, "&aacute;"
      +// unescapes to "á", as does "&#225;" and "&xE1;".
      +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
      +// always true.
      +func UnescapeString(s string) string {
      +	for _, c := range s {
      +		if c == '&' {
      +			return string(unescape([]byte(s), false))
      +		}
      +	}
      +	return s
      +}
      diff --git a/vendor/golang.org/x/net/html/escape_test.go b/vendor/golang.org/x/net/html/escape_test.go
      new file mode 100644
      index 00000000..b405d4b4
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/escape_test.go
      @@ -0,0 +1,97 @@
      +// Copyright 2013 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package html
      +
      +import "testing"
      +
      +type unescapeTest struct {
      +	// A short description of the test case.
      +	desc string
      +	// The HTML text.
      +	html string
      +	// The unescaped text.
      +	unescaped string
      +}
      +
      +var unescapeTests = []unescapeTest{
      +	// Handle no entities.
      +	{
      +		"copy",
      +		"A\ttext\nstring",
      +		"A\ttext\nstring",
      +	},
      +	// Handle simple named entities.
      +	{
      +		"simple",
      +		"&amp; &gt; &lt;",
      +		"& > <",
      +	},
      +	// Handle hitting the end of the string.
      +	{
      +		"stringEnd",
      +		"&amp &amp",
      +		"& &",
      +	},
      +	// Handle entities with two codepoints.
      +	{
      +		"multiCodepoint",
      +		"text &gesl; blah",
      +		"text \u22db\ufe00 blah",
      +	},
      +	// Handle decimal numeric entities.
      +	{
      +		"decimalEntity",
      +		"Delta = &#916; ",
      +		"Delta = Δ ",
      +	},
      +	// Handle hexadecimal numeric entities.
      +	{
      +		"hexadecimalEntity",
      +		"Lambda = &#x3bb; = &#X3Bb ",
      +		"Lambda = λ = λ ",
      +	},
      +	// Handle numeric early termination.
      +	{
      +		"numericEnds",
      +		"&# &#x &#128;43 &copy = &#169f = &#xa9",
      +		"&# &#x €43 © = ©f = ©",
      +	},
      +	// Handle numeric ISO-8859-1 entity replacements.
      +	{
      +		"numericReplacements",
      +		"Footnote&#x87;",
      +		"Footnote‡",
      +	},
      +}
      +
      +func TestUnescape(t *testing.T) {
      +	for _, tt := range unescapeTests {
      +		unescaped := UnescapeString(tt.html)
      +		if unescaped != tt.unescaped {
      +			t.Errorf("TestUnescape %s: want %q, got %q", tt.desc, tt.unescaped, unescaped)
      +		}
      +	}
      +}
      +
      +func TestUnescapeEscape(t *testing.T) {
      +	ss := []string{
      +		``,
      +		`abc def`,
      +		`a & b`,
      +		`a&amp;b`,
      +		`a &amp b`,
      +		`&quot;`,
      +		`"`,
      +		`"<&>"`,
      +		`&quot;&lt;&amp;&gt;&quot;`,
      +		`3&5==1 && 0<1, "0&lt;1", a+acute=&aacute;`,
      +		`The special characters are: <, >, &, ' and "`,
      +	}
      +	for _, s := range ss {
      +		if got := UnescapeString(EscapeString(s)); got != s {
      +			t.Errorf("got %q want %q", got, s)
      +		}
      +	}
      +}
      diff --git a/vendor/golang.org/x/net/html/example_test.go b/vendor/golang.org/x/net/html/example_test.go
      new file mode 100644
      index 00000000..0b06ed77
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/example_test.go
      @@ -0,0 +1,40 @@
      +// Copyright 2012 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +// This example demonstrates parsing HTML data and walking the resulting tree.
      +package html_test
      +
      +import (
      +	"fmt"
      +	"log"
      +	"strings"
      +
      +	"golang.org/x/net/html"
      +)
      +
      +func ExampleParse() {
      +	s := `<p>Links:</p><ul><li><a href="foo">Foo</a><li><a href="/bar/baz">BarBaz</a></ul>`
      +	doc, err := html.Parse(strings.NewReader(s))
      +	if err != nil {
      +		log.Fatal(err)
      +	}
      +	var f func(*html.Node)
      +	f = func(n *html.Node) {
      +		if n.Type == html.ElementNode && n.Data == "a" {
      +			for _, a := range n.Attr {
      +				if a.Key == "href" {
      +					fmt.Println(a.Val)
      +					break
      +				}
      +			}
      +		}
      +		for c := n.FirstChild; c != nil; c = c.NextSibling {
      +			f(c)
      +		}
      +	}
      +	f(doc)
      +	// Output:
      +	// foo
      +	// /bar/baz
      +}
      diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go
      new file mode 100644
      index 00000000..d3b38440
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/foreign.go
      @@ -0,0 +1,226 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package html
      +
      +import (
      +	"strings"
      +)
      +
      +func adjustAttributeNames(aa []Attribute, nameMap map[string]string) {
      +	for i := range aa {
      +		if newName, ok := nameMap[aa[i].Key]; ok {
      +			aa[i].Key = newName
      +		}
      +	}
      +}
      +
      +func adjustForeignAttributes(aa []Attribute) {
      +	for i, a := range aa {
      +		if a.Key == "" || a.Key[0] != 'x' {
      +			continue
      +		}
      +		switch a.Key {
      +		case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show",
      +			"xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink":
      +			j := strings.Index(a.Key, ":")
      +			aa[i].Namespace = a.Key[:j]
      +			aa[i].Key = a.Key[j+1:]
      +		}
      +	}
      +}
      +
      +func htmlIntegrationPoint(n *Node) bool {
      +	if n.Type != ElementNode {
      +		return false
      +	}
      +	switch n.Namespace {
      +	case "math":
      +		if n.Data == "annotation-xml" {
      +			for _, a := range n.Attr {
      +				if a.Key == "encoding" {
      +					val := strings.ToLower(a.Val)
      +					if val == "text/html" || val == "application/xhtml+xml" {
      +						return true
      +					}
      +				}
      +			}
      +		}
      +	case "svg":
      +		switch n.Data {
      +		case "desc", "foreignObject", "title":
      +			return true
      +		}
      +	}
      +	return false
      +}
      +
      +func mathMLTextIntegrationPoint(n *Node) bool {
      +	if n.Namespace != "math" {
      +		return false
      +	}
      +	switch n.Data {
      +	case "mi", "mo", "mn", "ms", "mtext":
      +		return true
      +	}
      +	return false
      +}
      +
      +// Section 12.2.5.5.
      +var breakout = map[string]bool{
      +	"b":          true,
      +	"big":        true,
      +	"blockquote": true,
      +	"body":       true,
      +	"br":         true,
      +	"center":     true,
      +	"code":       true,
      +	"dd":         true,
      +	"div":        true,
      +	"dl":         true,
      +	"dt":         true,
      +	"em":         true,
      +	"embed":      true,
      +	"h1":         true,
      +	"h2":         true,
      +	"h3":         true,
      +	"h4":         true,
      +	"h5":         true,
      +	"h6":         true,
      +	"head":       true,
      +	"hr":         true,
      +	"i":          true,
      +	"img":        true,
      +	"li":         true,
      +	"listing":    true,
      +	"menu":       true,
      +	"meta":       true,
      +	"nobr":       true,
      +	"ol":         true,
      +	"p":          true,
      +	"pre":        true,
      +	"ruby":       true,
      +	"s":          true,
      +	"small":      true,
      +	"span":       true,
      +	"strong":     true,
      +	"strike":     true,
      +	"sub":        true,
      +	"sup":        true,
      +	"table":      true,
      +	"tt":         true,
      +	"u":          true,
      +	"ul":         true,
      +	"var":        true,
      +}
      +
      +// Section 12.2.5.5.
      +var svgTagNameAdjustments = map[string]string{
      +	"altglyph":            "altGlyph",
      +	"altglyphdef":         "altGlyphDef",
      +	"altglyphitem":        "altGlyphItem",
      +	"animatecolor":        "animateColor",
      +	"animatemotion":       "animateMotion",
      +	"animatetransform":    "animateTransform",
      +	"clippath":            "clipPath",
      +	"feblend":             "feBlend",
      +	"fecolormatrix":       "feColorMatrix",
      +	"fecomponenttransfer": "feComponentTransfer",
      +	"fecomposite":         "feComposite",
      +	"feconvolvematrix":    "feConvolveMatrix",
      +	"fediffuselighting":   "feDiffuseLighting",
      +	"fedisplacementmap":   "feDisplacementMap",
      +	"fedistantlight":      "feDistantLight",
      +	"feflood":             "feFlood",
      +	"fefunca":             "feFuncA",
      +	"fefuncb":             "feFuncB",
      +	"fefuncg":             "feFuncG",
      +	"fefuncr":             "feFuncR",
      +	"fegaussianblur":      "feGaussianBlur",
      +	"feimage":             "feImage",
      +	"femerge":             "feMerge",
      +	"femergenode":         "feMergeNode",
      +	"femorphology":        "feMorphology",
      +	"feoffset":            "feOffset",
      +	"fepointlight":        "fePointLight",
      +	"fespecularlighting":  "feSpecularLighting",
      +	"fespotlight":         "feSpotLight",
      +	"fetile":              "feTile",
      +	"feturbulence":        "feTurbulence",
      +	"foreignobject":       "foreignObject",
      +	"glyphref":            "glyphRef",
      +	"lineargradient":      "linearGradient",
      +	"radialgradient":      "radialGradient",
      +	"textpath":            "textPath",
      +}
      +
      +// Section 12.2.5.1
      +var mathMLAttributeAdjustments = map[string]string{
      +	"definitionurl": "definitionURL",
      +}
      +
      +var svgAttributeAdjustments = map[string]string{
      +	"attributename":             "attributeName",
      +	"attributetype":             "attributeType",
      +	"basefrequency":             "baseFrequency",
      +	"baseprofile":               "baseProfile",
      +	"calcmode":                  "calcMode",
      +	"clippathunits":             "clipPathUnits",
      +	"contentscripttype":         "contentScriptType",
      +	"contentstyletype":          "contentStyleType",
      +	"diffuseconstant":           "diffuseConstant",
      +	"edgemode":                  "edgeMode",
      +	"externalresourcesrequired": "externalResourcesRequired",
      +	"filterres":                 "filterRes",
      +	"filterunits":               "filterUnits",
      +	"glyphref":                  "glyphRef",
      +	"gradienttransform":         "gradientTransform",
      +	"gradientunits":             "gradientUnits",
      +	"kernelmatrix":              "kernelMatrix",
      +	"kernelunitlength":          "kernelUnitLength",
      +	"keypoints":                 "keyPoints",
      +	"keysplines":                "keySplines",
      +	"keytimes":                  "keyTimes",
      +	"lengthadjust":              "lengthAdjust",
      +	"limitingconeangle":         "limitingConeAngle",
      +	"markerheight":              "markerHeight",
      +	"markerunits":               "markerUnits",
      +	"markerwidth":               "markerWidth",
      +	"maskcontentunits":          "maskContentUnits",
      +	"maskunits":                 "maskUnits",
      +	"numoctaves":                "numOctaves",
      +	"pathlength":                "pathLength",
      +	"patterncontentunits":       "patternContentUnits",
      +	"patterntransform":          "patternTransform",
      +	"patternunits":              "patternUnits",
      +	"pointsatx":                 "pointsAtX",
      +	"pointsaty":                 "pointsAtY",
      +	"pointsatz":                 "pointsAtZ",
      +	"preservealpha":             "preserveAlpha",
      +	"preserveaspectratio":       "preserveAspectRatio",
      +	"primitiveunits":            "primitiveUnits",
      +	"refx":                      "refX",
      +	"refy":                      "refY",
      +	"repeatcount":               "repeatCount",
      +	"repeatdur":                 "repeatDur",
      +	"requiredextensions":        "requiredExtensions",
      +	"requiredfeatures":          "requiredFeatures",
      +	"specularconstant":          "specularConstant",
      +	"specularexponent":          "specularExponent",
      +	"spreadmethod":              "spreadMethod",
      +	"startoffset":               "startOffset",
      +	"stddeviation":              "stdDeviation",
      +	"stitchtiles":               "stitchTiles",
      +	"surfacescale":              "surfaceScale",
      +	"systemlanguage":            "systemLanguage",
      +	"tablevalues":               "tableValues",
      +	"targetx":                   "targetX",
      +	"targety":                   "targetY",
      +	"textlength":                "textLength",
      +	"viewbox":                   "viewBox",
      +	"viewtarget":                "viewTarget",
      +	"xchannelselector":          "xChannelSelector",
      +	"ychannelselector":          "yChannelSelector",
      +	"zoomandpan":                "zoomAndPan",
      +}
      diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go
      new file mode 100644
      index 00000000..26b657ae
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/node.go
      @@ -0,0 +1,193 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package html
      +
      +import (
      +	"golang.org/x/net/html/atom"
      +)
      +
      +// A NodeType is the type of a Node.
      +type NodeType uint32
      +
      +const (
      +	ErrorNode NodeType = iota
      +	TextNode
      +	DocumentNode
      +	ElementNode
      +	CommentNode
      +	DoctypeNode
      +	scopeMarkerNode
      +)
      +
      +// Section 12.2.3.3 says "scope markers are inserted when entering applet
      +// elements, buttons, object elements, marquees, table cells, and table
      +// captions, and are used to prevent formatting from 'leaking'".
      +var scopeMarker = Node{Type: scopeMarkerNode}
      +
      +// A Node consists of a NodeType and some Data (tag name for element nodes,
      +// content for text) and are part of a tree of Nodes. Element nodes may also
      +// have a Namespace and contain a slice of Attributes. Data is unescaped, so
      +// that it looks like "a<b" rather than "a&lt;b". For element nodes, DataAtom
      +// is the atom for Data, or zero if Data is not a known tag name.
      +//
      +// An empty Namespace implies a "http://www.w3.org/1999/xhtml" namespace.
      +// Similarly, "math" is short for "http://www.w3.org/1998/Math/MathML", and
      +// "svg" is short for "http://www.w3.org/2000/svg".
      +type Node struct {
      +	Parent, FirstChild, LastChild, PrevSibling, NextSibling *Node
      +
      +	Type      NodeType
      +	DataAtom  atom.Atom
      +	Data      string
      +	Namespace string
      +	Attr      []Attribute
      +}
      +
      +// InsertBefore inserts newChild as a child of n, immediately before oldChild
      +// in the sequence of n's children. oldChild may be nil, in which case newChild
      +// is appended to the end of n's children.
      +//
      +// It will panic if newChild already has a parent or siblings.
      +func (n *Node) InsertBefore(newChild, oldChild *Node) {
      +	if newChild.Parent != nil || newChild.PrevSibling != nil || newChild.NextSibling != nil {
      +		panic("html: InsertBefore called for an attached child Node")
      +	}
      +	var prev, next *Node
      +	if oldChild != nil {
      +		prev, next = oldChild.PrevSibling, oldChild
      +	} else {
      +		prev = n.LastChild
      +	}
      +	if prev != nil {
      +		prev.NextSibling = newChild
      +	} else {
      +		n.FirstChild = newChild
      +	}
      +	if next != nil {
      +		next.PrevSibling = newChild
      +	} else {
      +		n.LastChild = newChild
      +	}
      +	newChild.Parent = n
      +	newChild.PrevSibling = prev
      +	newChild.NextSibling = next
      +}
      +
      +// AppendChild adds a node c as a child of n.
      +//
      +// It will panic if c already has a parent or siblings.
      +func (n *Node) AppendChild(c *Node) {
      +	if c.Parent != nil || c.PrevSibling != nil || c.NextSibling != nil {
      +		panic("html: AppendChild called for an attached child Node")
      +	}
      +	last := n.LastChild
      +	if last != nil {
      +		last.NextSibling = c
      +	} else {
      +		n.FirstChild = c
      +	}
      +	n.LastChild = c
      +	c.Parent = n
      +	c.PrevSibling = last
      +}
      +
      +// RemoveChild removes a node c that is a child of n. Afterwards, c will have
      +// no parent and no siblings.
      +//
      +// It will panic if c's parent is not n.
      +func (n *Node) RemoveChild(c *Node) {
      +	if c.Parent != n {
      +		panic("html: RemoveChild called for a non-child Node")
      +	}
      +	if n.FirstChild == c {
      +		n.FirstChild = c.NextSibling
      +	}
      +	if c.NextSibling != nil {
      +		c.NextSibling.PrevSibling = c.PrevSibling
      +	}
      +	if n.LastChild == c {
      +		n.LastChild = c.PrevSibling
      +	}
      +	if c.PrevSibling != nil {
      +		c.PrevSibling.NextSibling = c.NextSibling
      +	}
      +	c.Parent = nil
      +	c.PrevSibling = nil
      +	c.NextSibling = nil
      +}
      +
      +// reparentChildren reparents all of src's child nodes to dst.
      +func reparentChildren(dst, src *Node) {
      +	for {
      +		child := src.FirstChild
      +		if child == nil {
      +			break
      +		}
      +		src.RemoveChild(child)
      +		dst.AppendChild(child)
      +	}
      +}
      +
      +// clone returns a new node with the same type, data and attributes.
      +// The clone has no parent, no siblings and no children.
      +func (n *Node) clone() *Node {
      +	m := &Node{
      +		Type:     n.Type,
      +		DataAtom: n.DataAtom,
      +		Data:     n.Data,
      +		Attr:     make([]Attribute, len(n.Attr)),
      +	}
      +	copy(m.Attr, n.Attr)
      +	return m
      +}
      +
      +// nodeStack is a stack of nodes.
      +type nodeStack []*Node
      +
      +// pop pops the stack. It will panic if s is empty.
      +func (s *nodeStack) pop() *Node {
      +	i := len(*s)
      +	n := (*s)[i-1]
      +	*s = (*s)[:i-1]
      +	return n
      +}
      +
      +// top returns the most recently pushed node, or nil if s is empty.
      +func (s *nodeStack) top() *Node {
      +	if i := len(*s); i > 0 {
      +		return (*s)[i-1]
      +	}
      +	return nil
      +}
      +
      +// index returns the index of the top-most occurrence of n in the stack, or -1
      +// if n is not present.
      +func (s *nodeStack) index(n *Node) int {
      +	for i := len(*s) - 1; i >= 0; i-- {
      +		if (*s)[i] == n {
      +			return i
      +		}
      +	}
      +	return -1
      +}
      +
      +// insert inserts a node at the given index.
      +func (s *nodeStack) insert(i int, n *Node) {
      +	(*s) = append(*s, nil)
      +	copy((*s)[i+1:], (*s)[i:])
      +	(*s)[i] = n
      +}
      +
      +// remove removes a node from the stack. It is a no-op if n is not present.
      +func (s *nodeStack) remove(n *Node) {
      +	i := s.index(n)
      +	if i == -1 {
      +		return
      +	}
      +	copy((*s)[i:], (*s)[i+1:])
      +	j := len(*s) - 1
      +	(*s)[j] = nil
      +	*s = (*s)[:j]
      +}
      diff --git a/vendor/golang.org/x/net/html/node_test.go b/vendor/golang.org/x/net/html/node_test.go
      new file mode 100644
      index 00000000..471102f3
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/node_test.go
      @@ -0,0 +1,146 @@
      +// Copyright 2010 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package html
      +
      +import (
      +	"fmt"
      +)
      +
      +// checkTreeConsistency checks that a node and its descendants are all
      +// consistent in their parent/child/sibling relationships.
      +func checkTreeConsistency(n *Node) error {
      +	return checkTreeConsistency1(n, 0)
      +}
      +
      +func checkTreeConsistency1(n *Node, depth int) error {
      +	if depth == 1e4 {
      +		return fmt.Errorf("html: tree looks like it contains a cycle")
      +	}
      +	if err := checkNodeConsistency(n); err != nil {
      +		return err
      +	}
      +	for c := n.FirstChild; c != nil; c = c.NextSibling {
      +		if err := checkTreeConsistency1(c, depth+1); err != nil {
      +			return err
      +		}
      +	}
      +	return nil
      +}
      +
      +// checkNodeConsistency checks that a node's parent/child/sibling relationships
      +// are consistent.
      +func checkNodeConsistency(n *Node) error {
      +	if n == nil {
      +		return nil
      +	}
      +
      +	nParent := 0
      +	for p := n.Parent; p != nil; p = p.Parent {
      +		nParent++
      +		if nParent == 1e4 {
      +			return fmt.Errorf("html: parent list looks like an infinite loop")
      +		}
      +	}
      +
      +	nForward := 0
      +	for c := n.FirstChild; c != nil; c = c.NextSibling {
      +		nForward++
      +		if nForward == 1e6 {
      +			return fmt.Errorf("html: forward list of children looks like an infinite loop")
      +		}
      +		if c.Parent != n {
      +			return fmt.Errorf("html: inconsistent child/parent relationship")
      +		}
      +	}
      +
      +	nBackward := 0
      +	for c := n.LastChild; c != nil; c = c.PrevSibling {
      +		nBackward++
      +		if nBackward == 1e6 {
      +			return fmt.Errorf("html: backward list of children looks like an infinite loop")
      +		}
      +		if c.Parent != n {
      +			return fmt.Errorf("html: inconsistent child/parent relationship")
      +		}
      +	}
      +
      +	if n.Parent != nil {
      +		if n.Parent == n {
      +			return fmt.Errorf("html: inconsistent parent relationship")
      +		}
      +		if n.Parent == n.FirstChild {
      +			return fmt.Errorf("html: inconsistent parent/first relationship")
      +		}
      +		if n.Parent == n.LastChild {
      +			return fmt.Errorf("html: inconsistent parent/last relationship")
      +		}
      +		if n.Parent == n.PrevSibling {
      +			return fmt.Errorf("html: inconsistent parent/prev relationship")
      +		}
      +		if n.Parent == n.NextSibling {
      +			return fmt.Errorf("html: inconsistent parent/next relationship")
      +		}
      +
      +		parentHasNAsAChild := false
      +		for c := n.Parent.FirstChild; c != nil; c = c.NextSibling {
      +			if c == n {
      +				parentHasNAsAChild = true
      +				break
      +			}
      +		}
      +		if !parentHasNAsAChild {
      +			return fmt.Errorf("html: inconsistent parent/child relationship")
      +		}
      +	}
      +
      +	if n.PrevSibling != nil && n.PrevSibling.NextSibling != n {
      +		return fmt.Errorf("html: inconsistent prev/next relationship")
      +	}
      +	if n.NextSibling != nil && n.NextSibling.PrevSibling != n {
      +		return fmt.Errorf("html: inconsistent next/prev relationship")
      +	}
      +
      +	if (n.FirstChild == nil) != (n.LastChild == nil) {
      +		return fmt.Errorf("html: inconsistent first/last relationship")
      +	}
      +	if n.FirstChild != nil && n.FirstChild == n.LastChild {
      +		// We have a sole child.
      +		if n.FirstChild.PrevSibling != nil || n.FirstChild.NextSibling != nil {
      +			return fmt.Errorf("html: inconsistent sole child's sibling relationship")
      +		}
      +	}
      +
      +	seen := map[*Node]bool{}
      +
      +	var last *Node
      +	for c := n.FirstChild; c != nil; c = c.NextSibling {
      +		if seen[c] {
      +			return fmt.Errorf("html: inconsistent repeated child")
      +		}
      +		seen[c] = true
      +		last = c
      +	}
      +	if last != n.LastChild {
      +		return fmt.Errorf("html: inconsistent last relationship")
      +	}
      +
      +	var first *Node
      +	for c := n.LastChild; c != nil; c = c.PrevSibling {
      +		if !seen[c] {
      +			return fmt.Errorf("html: inconsistent missing child")
      +		}
      +		delete(seen, c)
      +		first = c
      +	}
      +	if first != n.FirstChild {
      +		return fmt.Errorf("html: inconsistent first relationship")
      +	}
      +
      +	if len(seen) != 0 {
      +		return fmt.Errorf("html: inconsistent forwards/backwards child list")
      +	}
      +
      +	return nil
      +}
      diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go
      new file mode 100644
      index 00000000..be4b2bf5
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/parse.go
      @@ -0,0 +1,2094 @@
      +// Copyright 2010 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package html
      +
      +import (
      +	"errors"
      +	"fmt"
      +	"io"
      +	"strings"
      +
      +	a "golang.org/x/net/html/atom"
      +)
      +
      +// A parser implements the HTML5 parsing algorithm:
      +// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction
      +type parser struct {
      +	// tokenizer provides the tokens for the parser.
      +	tokenizer *Tokenizer
      +	// tok is the most recently read token.
      +	tok Token
      +	// Self-closing tags like <hr/> are treated as start tags, except that
      +	// hasSelfClosingToken is set while they are being processed.
      +	hasSelfClosingToken bool
      +	// doc is the document root element.
      +	doc *Node
      +	// The stack of open elements (section 12.2.3.2) and active formatting
      +	// elements (section 12.2.3.3).
      +	oe, afe nodeStack
      +	// Element pointers (section 12.2.3.4).
      +	head, form *Node
      +	// Other parsing state flags (section 12.2.3.5).
      +	scripting, framesetOK bool
      +	// im is the current insertion mode.
      +	im insertionMode
      +	// originalIM is the insertion mode to go back to after completing a text
      +	// or inTableText insertion mode.
      +	originalIM insertionMode
      +	// fosterParenting is whether new elements should be inserted according to
      +	// the foster parenting rules (section 12.2.5.3).
      +	fosterParenting bool
      +	// quirks is whether the parser is operating in "quirks mode."
      +	quirks bool
      +	// fragment is whether the parser is parsing an HTML fragment.
      +	fragment bool
      +	// context is the context element when parsing an HTML fragment
      +	// (section 12.4).
      +	context *Node
      +}
      +
      +func (p *parser) top() *Node {
      +	if n := p.oe.top(); n != nil {
      +		return n
      +	}
      +	return p.doc
      +}
      +
      +// Stop tags for use in popUntil. These come from section 12.2.3.2.
      +var (
      +	defaultScopeStopTags = map[string][]a.Atom{
      +		"":     {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template},
      +		"math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext},
      +		"svg":  {a.Desc, a.ForeignObject, a.Title},
      +	}
      +)
      +
      +type scope int
      +
      +const (
      +	defaultScope scope = iota
      +	listItemScope
      +	buttonScope
      +	tableScope
      +	tableRowScope
      +	tableBodyScope
      +	selectScope
      +)
      +
      +// popUntil pops the stack of open elements at the highest element whose tag
      +// is in matchTags, provided there is no higher element in the scope's stop
      +// tags (as defined in section 12.2.3.2). It returns whether or not there was
      +// such an element. If there was not, popUntil leaves the stack unchanged.
      +//
      +// For example, the set of stop tags for table scope is: "html", "table". If
      +// the stack was:
      +// ["html", "body", "font", "table", "b", "i", "u"]
      +// then popUntil(tableScope, "font") would return false, but
      +// popUntil(tableScope, "i") would return true and the stack would become:
      +// ["html", "body", "font", "table", "b"]
      +//
      +// If an element's tag is in both the stop tags and matchTags, then the stack
      +// will be popped and the function returns true (provided, of course, there was
      +// no higher element in the stack that was also in the stop tags). For example,
      +// popUntil(tableScope, "table") returns true and leaves:
      +// ["html", "body", "font"]
      +func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool {
      +	if i := p.indexOfElementInScope(s, matchTags...); i != -1 {
      +		p.oe = p.oe[:i]
      +		return true
      +	}
      +	return false
      +}
      +
      +// indexOfElementInScope returns the index in p.oe of the highest element whose
      +// tag is in matchTags that is in scope. If no matching element is in scope, it
      +// returns -1.
      +func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int {
      +	for i := len(p.oe) - 1; i >= 0; i-- {
      +		tagAtom := p.oe[i].DataAtom
      +		if p.oe[i].Namespace == "" {
      +			for _, t := range matchTags {
      +				if t == tagAtom {
      +					return i
      +				}
      +			}
      +			switch s {
      +			case defaultScope:
      +				// No-op.
      +			case listItemScope:
      +				if tagAtom == a.Ol || tagAtom == a.Ul {
      +					return -1
      +				}
      +			case buttonScope:
      +				if tagAtom == a.Button {
      +					return -1
      +				}
      +			case tableScope:
      +				if tagAtom == a.Html || tagAtom == a.Table {
      +					return -1
      +				}
      +			case selectScope:
      +				if tagAtom != a.Optgroup && tagAtom != a.Option {
      +					return -1
      +				}
      +			default:
      +				panic("unreachable")
      +			}
      +		}
      +		switch s {
      +		case defaultScope, listItemScope, buttonScope:
      +			for _, t := range defaultScopeStopTags[p.oe[i].Namespace] {
      +				if t == tagAtom {
      +					return -1
      +				}
      +			}
      +		}
      +	}
      +	return -1
      +}
      +
      +// elementInScope is like popUntil, except that it doesn't modify the stack of
      +// open elements.
      +func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool {
      +	return p.indexOfElementInScope(s, matchTags...) != -1
      +}
      +
      +// clearStackToContext pops elements off the stack of open elements until a
      +// scope-defined element is found.
      +func (p *parser) clearStackToContext(s scope) {
      +	for i := len(p.oe) - 1; i >= 0; i-- {
      +		tagAtom := p.oe[i].DataAtom
      +		switch s {
      +		case tableScope:
      +			if tagAtom == a.Html || tagAtom == a.Table {
      +				p.oe = p.oe[:i+1]
      +				return
      +			}
      +		case tableRowScope:
      +			if tagAtom == a.Html || tagAtom == a.Tr {
      +				p.oe = p.oe[:i+1]
      +				return
      +			}
      +		case tableBodyScope:
      +			if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead {
      +				p.oe = p.oe[:i+1]
      +				return
      +			}
      +		default:
      +			panic("unreachable")
      +		}
      +	}
      +}
      +
      +// generateImpliedEndTags pops nodes off the stack of open elements as long as
      +// the top node has a tag name of dd, dt, li, option, optgroup, p, rp, or rt.
      +// If exceptions are specified, nodes with that name will not be popped off.
      +func (p *parser) generateImpliedEndTags(exceptions ...string) {
      +	var i int
      +loop:
      +	for i = len(p.oe) - 1; i >= 0; i-- {
      +		n := p.oe[i]
      +		if n.Type == ElementNode {
      +			switch n.DataAtom {
      +			case a.Dd, a.Dt, a.Li, a.Option, a.Optgroup, a.P, a.Rp, a.Rt:
      +				for _, except := range exceptions {
      +					if n.Data == except {
      +						break loop
      +					}
      +				}
      +				continue
      +			}
      +		}
      +		break
      +	}
      +
      +	p.oe = p.oe[:i+1]
      +}
      +
      +// addChild adds a child node n to the top element, and pushes n onto the stack
      +// of open elements if it is an element node.
      +func (p *parser) addChild(n *Node) {
      +	if p.shouldFosterParent() {
      +		p.fosterParent(n)
      +	} else {
      +		p.top().AppendChild(n)
      +	}
      +
      +	if n.Type == ElementNode {
      +		p.oe = append(p.oe, n)
      +	}
      +}
      +
      +// shouldFosterParent returns whether the next node to be added should be
      +// foster parented.
      +func (p *parser) shouldFosterParent() bool {
      +	if p.fosterParenting {
      +		switch p.top().DataAtom {
      +		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
      +			return true
      +		}
      +	}
      +	return false
      +}
      +
      +// fosterParent adds a child node according to the foster parenting rules.
      +// Section 12.2.5.3, "foster parenting".
      +func (p *parser) fosterParent(n *Node) {
      +	var table, parent, prev *Node
      +	var i int
      +	for i = len(p.oe) - 1; i >= 0; i-- {
      +		if p.oe[i].DataAtom == a.Table {
      +			table = p.oe[i]
      +			break
      +		}
      +	}
      +
      +	if table == nil {
      +		// The foster parent is the html element.
      +		parent = p.oe[0]
      +	} else {
      +		parent = table.Parent
      +	}
      +	if parent == nil {
      +		parent = p.oe[i-1]
      +	}
      +
      +	if table != nil {
      +		prev = table.PrevSibling
      +	} else {
      +		prev = parent.LastChild
      +	}
      +	if prev != nil && prev.Type == TextNode && n.Type == TextNode {
      +		prev.Data += n.Data
      +		return
      +	}
      +
      +	parent.InsertBefore(n, table)
      +}
      +
      +// addText adds text to the preceding node if it is a text node, or else it
      +// calls addChild with a new text node.
      +func (p *parser) addText(text string) {
      +	if text == "" {
      +		return
      +	}
      +
      +	if p.shouldFosterParent() {
      +		p.fosterParent(&Node{
      +			Type: TextNode,
      +			Data: text,
      +		})
      +		return
      +	}
      +
      +	t := p.top()
      +	if n := t.LastChild; n != nil && n.Type == TextNode {
      +		n.Data += text
      +		return
      +	}
      +	p.addChild(&Node{
      +		Type: TextNode,
      +		Data: text,
      +	})
      +}
      +
      +// addElement adds a child element based on the current token.
      +func (p *parser) addElement() {
      +	p.addChild(&Node{
      +		Type:     ElementNode,
      +		DataAtom: p.tok.DataAtom,
      +		Data:     p.tok.Data,
      +		Attr:     p.tok.Attr,
      +	})
      +}
      +
      +// Section 12.2.3.3.
      +func (p *parser) addFormattingElement() {
      +	tagAtom, attr := p.tok.DataAtom, p.tok.Attr
      +	p.addElement()
      +
      +	// Implement the Noah's Ark clause, but with three per family instead of two.
      +	identicalElements := 0
      +findIdenticalElements:
      +	for i := len(p.afe) - 1; i >= 0; i-- {
      +		n := p.afe[i]
      +		if n.Type == scopeMarkerNode {
      +			break
      +		}
      +		if n.Type != ElementNode {
      +			continue
      +		}
      +		if n.Namespace != "" {
      +			continue
      +		}
      +		if n.DataAtom != tagAtom {
      +			continue
      +		}
      +		if len(n.Attr) != len(attr) {
      +			continue
      +		}
      +	compareAttributes:
      +		for _, t0 := range n.Attr {
      +			for _, t1 := range attr {
      +				if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val {
      +					// Found a match for this attribute, continue with the next attribute.
      +					continue compareAttributes
      +				}
      +			}
      +			// If we get here, there is no attribute that matches a.
      +			// Therefore the element is not identical to the new one.
      +			continue findIdenticalElements
      +		}
      +
      +		identicalElements++
      +		if identicalElements >= 3 {
      +			p.afe.remove(n)
      +		}
      +	}
      +
      +	p.afe = append(p.afe, p.top())
      +}
      +
      +// Section 12.2.3.3.
      +func (p *parser) clearActiveFormattingElements() {
      +	for {
      +		n := p.afe.pop()
      +		if len(p.afe) == 0 || n.Type == scopeMarkerNode {
      +			return
      +		}
      +	}
      +}
      +
      +// Section 12.2.3.3.
      +func (p *parser) reconstructActiveFormattingElements() {
      +	n := p.afe.top()
      +	if n == nil {
      +		return
      +	}
      +	if n.Type == scopeMarkerNode || p.oe.index(n) != -1 {
      +		return
      +	}
      +	i := len(p.afe) - 1
      +	for n.Type != scopeMarkerNode && p.oe.index(n) == -1 {
      +		if i == 0 {
      +			i = -1
      +			break
      +		}
      +		i--
      +		n = p.afe[i]
      +	}
      +	for {
      +		i++
      +		clone := p.afe[i].clone()
      +		p.addChild(clone)
      +		p.afe[i] = clone
      +		if i == len(p.afe)-1 {
      +			break
      +		}
      +	}
      +}
      +
      +// Section 12.2.4.
      +func (p *parser) acknowledgeSelfClosingTag() {
      +	p.hasSelfClosingToken = false
      +}
      +
      +// An insertion mode (section 12.2.3.1) is the state transition function from
      +// a particular state in the HTML5 parser's state machine. It updates the
      +// parser's fields depending on parser.tok (where ErrorToken means EOF).
      +// It returns whether the token was consumed.
      +type insertionMode func(*parser) bool
      +
      +// setOriginalIM sets the insertion mode to return to after completing a text or
      +// inTableText insertion mode.
      +// Section 12.2.3.1, "using the rules for".
      +func (p *parser) setOriginalIM() {
      +	if p.originalIM != nil {
      +		panic("html: bad parser state: originalIM was set twice")
      +	}
      +	p.originalIM = p.im
      +}
      +
      +// Section 12.2.3.1, "reset the insertion mode".
      +func (p *parser) resetInsertionMode() {
      +	for i := len(p.oe) - 1; i >= 0; i-- {
      +		n := p.oe[i]
      +		if i == 0 && p.context != nil {
      +			n = p.context
      +		}
      +
      +		switch n.DataAtom {
      +		case a.Select:
      +			p.im = inSelectIM
      +		case a.Td, a.Th:
      +			p.im = inCellIM
      +		case a.Tr:
      +			p.im = inRowIM
      +		case a.Tbody, a.Thead, a.Tfoot:
      +			p.im = inTableBodyIM
      +		case a.Caption:
      +			p.im = inCaptionIM
      +		case a.Colgroup:
      +			p.im = inColumnGroupIM
      +		case a.Table:
      +			p.im = inTableIM
      +		case a.Head:
      +			p.im = inBodyIM
      +		case a.Body:
      +			p.im = inBodyIM
      +		case a.Frameset:
      +			p.im = inFramesetIM
      +		case a.Html:
      +			p.im = beforeHeadIM
      +		default:
      +			continue
      +		}
      +		return
      +	}
      +	p.im = inBodyIM
      +}
      +
      +const whitespace = " \t\r\n\f"
      +
      +// Section 12.2.5.4.1.
      +func initialIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case TextToken:
      +		p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
      +		if len(p.tok.Data) == 0 {
      +			// It was all whitespace, so ignore it.
      +			return true
      +		}
      +	case CommentToken:
      +		p.doc.AppendChild(&Node{
      +			Type: CommentNode,
      +			Data: p.tok.Data,
      +		})
      +		return true
      +	case DoctypeToken:
      +		n, quirks := parseDoctype(p.tok.Data)
      +		p.doc.AppendChild(n)
      +		p.quirks = quirks
      +		p.im = beforeHTMLIM
      +		return true
      +	}
      +	p.quirks = true
      +	p.im = beforeHTMLIM
      +	return false
      +}
      +
      +// Section 12.2.5.4.2.
      +func beforeHTMLIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case DoctypeToken:
      +		// Ignore the token.
      +		return true
      +	case TextToken:
      +		p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
      +		if len(p.tok.Data) == 0 {
      +			// It was all whitespace, so ignore it.
      +			return true
      +		}
      +	case StartTagToken:
      +		if p.tok.DataAtom == a.Html {
      +			p.addElement()
      +			p.im = beforeHeadIM
      +			return true
      +		}
      +	case EndTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Head, a.Body, a.Html, a.Br:
      +			p.parseImpliedToken(StartTagToken, a.Html, a.Html.String())
      +			return false
      +		default:
      +			// Ignore the token.
      +			return true
      +		}
      +	case CommentToken:
      +		p.doc.AppendChild(&Node{
      +			Type: CommentNode,
      +			Data: p.tok.Data,
      +		})
      +		return true
      +	}
      +	p.parseImpliedToken(StartTagToken, a.Html, a.Html.String())
      +	return false
      +}
      +
      +// Section 12.2.5.4.3.
      +func beforeHeadIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case TextToken:
      +		p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
      +		if len(p.tok.Data) == 0 {
      +			// It was all whitespace, so ignore it.
      +			return true
      +		}
      +	case StartTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Head:
      +			p.addElement()
      +			p.head = p.top()
      +			p.im = inHeadIM
      +			return true
      +		case a.Html:
      +			return inBodyIM(p)
      +		}
      +	case EndTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Head, a.Body, a.Html, a.Br:
      +			p.parseImpliedToken(StartTagToken, a.Head, a.Head.String())
      +			return false
      +		default:
      +			// Ignore the token.
      +			return true
      +		}
      +	case CommentToken:
      +		p.addChild(&Node{
      +			Type: CommentNode,
      +			Data: p.tok.Data,
      +		})
      +		return true
      +	case DoctypeToken:
      +		// Ignore the token.
      +		return true
      +	}
      +
      +	p.parseImpliedToken(StartTagToken, a.Head, a.Head.String())
      +	return false
      +}
      +
      +// Section 12.2.5.4.4.
      +func inHeadIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case TextToken:
      +		s := strings.TrimLeft(p.tok.Data, whitespace)
      +		if len(s) < len(p.tok.Data) {
      +			// Add the initial whitespace to the current node.
      +			p.addText(p.tok.Data[:len(p.tok.Data)-len(s)])
      +			if s == "" {
      +				return true
      +			}
      +			p.tok.Data = s
      +		}
      +	case StartTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Html:
      +			return inBodyIM(p)
      +		case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta:
      +			p.addElement()
      +			p.oe.pop()
      +			p.acknowledgeSelfClosingTag()
      +			return true
      +		case a.Script, a.Title, a.Noscript, a.Noframes, a.Style:
      +			p.addElement()
      +			p.setOriginalIM()
      +			p.im = textIM
      +			return true
      +		case a.Head:
      +			// Ignore the token.
      +			return true
      +		}
      +	case EndTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Head:
      +			n := p.oe.pop()
      +			if n.DataAtom != a.Head {
      +				panic("html: bad parser state: <head> element not found, in the in-head insertion mode")
      +			}
      +			p.im = afterHeadIM
      +			return true
      +		case a.Body, a.Html, a.Br:
      +			p.parseImpliedToken(EndTagToken, a.Head, a.Head.String())
      +			return false
      +		default:
      +			// Ignore the token.
      +			return true
      +		}
      +	case CommentToken:
      +		p.addChild(&Node{
      +			Type: CommentNode,
      +			Data: p.tok.Data,
      +		})
      +		return true
      +	case DoctypeToken:
      +		// Ignore the token.
      +		return true
      +	}
      +
      +	p.parseImpliedToken(EndTagToken, a.Head, a.Head.String())
      +	return false
      +}
      +
      +// Section 12.2.5.4.6.
      +func afterHeadIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case TextToken:
      +		s := strings.TrimLeft(p.tok.Data, whitespace)
      +		if len(s) < len(p.tok.Data) {
      +			// Add the initial whitespace to the current node.
      +			p.addText(p.tok.Data[:len(p.tok.Data)-len(s)])
      +			if s == "" {
      +				return true
      +			}
      +			p.tok.Data = s
      +		}
      +	case StartTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Html:
      +			return inBodyIM(p)
      +		case a.Body:
      +			p.addElement()
      +			p.framesetOK = false
      +			p.im = inBodyIM
      +			return true
      +		case a.Frameset:
      +			p.addElement()
      +			p.im = inFramesetIM
      +			return true
      +		case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:
      +			p.oe = append(p.oe, p.head)
      +			defer p.oe.remove(p.head)
      +			return inHeadIM(p)
      +		case a.Head:
      +			// Ignore the token.
      +			return true
      +		}
      +	case EndTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Body, a.Html, a.Br:
      +			// Drop down to creating an implied <body> tag.
      +		default:
      +			// Ignore the token.
      +			return true
      +		}
      +	case CommentToken:
      +		p.addChild(&Node{
      +			Type: CommentNode,
      +			Data: p.tok.Data,
      +		})
      +		return true
      +	case DoctypeToken:
      +		// Ignore the token.
      +		return true
      +	}
      +
      +	p.parseImpliedToken(StartTagToken, a.Body, a.Body.String())
      +	p.framesetOK = true
      +	return false
      +}
      +
      +// copyAttributes copies attributes of src not found on dst to dst.
      +func copyAttributes(dst *Node, src Token) {
      +	if len(src.Attr) == 0 {
      +		return
      +	}
      +	attr := map[string]string{}
      +	for _, t := range dst.Attr {
      +		attr[t.Key] = t.Val
      +	}
      +	for _, t := range src.Attr {
      +		if _, ok := attr[t.Key]; !ok {
      +			dst.Attr = append(dst.Attr, t)
      +			attr[t.Key] = t.Val
      +		}
      +	}
      +}
      +
      +// Section 12.2.5.4.7.
      +func inBodyIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case TextToken:
      +		d := p.tok.Data
      +		switch n := p.oe.top(); n.DataAtom {
      +		case a.Pre, a.Listing:
      +			if n.FirstChild == nil {
      +				// Ignore a newline at the start of a <pre> block.
      +				if d != "" && d[0] == '\r' {
      +					d = d[1:]
      +				}
      +				if d != "" && d[0] == '\n' {
      +					d = d[1:]
      +				}
      +			}
      +		}
      +		d = strings.Replace(d, "\x00", "", -1)
      +		if d == "" {
      +			return true
      +		}
      +		p.reconstructActiveFormattingElements()
      +		p.addText(d)
      +		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
      +			// There were non-whitespace characters inserted.
      +			p.framesetOK = false
      +		}
      +	case StartTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Html:
      +			copyAttributes(p.oe[0], p.tok)
      +		case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:
      +			return inHeadIM(p)
      +		case a.Body:
      +			if len(p.oe) >= 2 {
      +				body := p.oe[1]
      +				if body.Type == ElementNode && body.DataAtom == a.Body {
      +					p.framesetOK = false
      +					copyAttributes(body, p.tok)
      +				}
      +			}
      +		case a.Frameset:
      +			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
      +				// Ignore the token.
      +				return true
      +			}
      +			body := p.oe[1]
      +			if body.Parent != nil {
      +				body.Parent.RemoveChild(body)
      +			}
      +			p.oe = p.oe[:1]
      +			p.addElement()
      +			p.im = inFramesetIM
      +			return true
      +		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
      +			p.popUntil(buttonScope, a.P)
      +			p.addElement()
      +		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
      +			p.popUntil(buttonScope, a.P)
      +			switch n := p.top(); n.DataAtom {
      +			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
      +				p.oe.pop()
      +			}
      +			p.addElement()
      +		case a.Pre, a.Listing:
      +			p.popUntil(buttonScope, a.P)
      +			p.addElement()
      +			// The newline, if any, will be dealt with by the TextToken case.
      +			p.framesetOK = false
      +		case a.Form:
      +			if p.form == nil {
      +				p.popUntil(buttonScope, a.P)
      +				p.addElement()
      +				p.form = p.top()
      +			}
      +		case a.Li:
      +			p.framesetOK = false
      +			for i := len(p.oe) - 1; i >= 0; i-- {
      +				node := p.oe[i]
      +				switch node.DataAtom {
      +				case a.Li:
      +					p.oe = p.oe[:i]
      +				case a.Address, a.Div, a.P:
      +					continue
      +				default:
      +					if !isSpecialElement(node) {
      +						continue
      +					}
      +				}
      +				break
      +			}
      +			p.popUntil(buttonScope, a.P)
      +			p.addElement()
      +		case a.Dd, a.Dt:
      +			p.framesetOK = false
      +			for i := len(p.oe) - 1; i >= 0; i-- {
      +				node := p.oe[i]
      +				switch node.DataAtom {
      +				case a.Dd, a.Dt:
      +					p.oe = p.oe[:i]
      +				case a.Address, a.Div, a.P:
      +					continue
      +				default:
      +					if !isSpecialElement(node) {
      +						continue
      +					}
      +				}
      +				break
      +			}
      +			p.popUntil(buttonScope, a.P)
      +			p.addElement()
      +		case a.Plaintext:
      +			p.popUntil(buttonScope, a.P)
      +			p.addElement()
      +		case a.Button:
      +			p.popUntil(defaultScope, a.Button)
      +			p.reconstructActiveFormattingElements()
      +			p.addElement()
      +			p.framesetOK = false
      +		case a.A:
      +			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
      +				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
      +					p.inBodyEndTagFormatting(a.A)
      +					p.oe.remove(n)
      +					p.afe.remove(n)
      +					break
      +				}
      +			}
      +			p.reconstructActiveFormattingElements()
      +			p.addFormattingElement()
      +		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
      +			p.reconstructActiveFormattingElements()
      +			p.addFormattingElement()
      +		case a.Nobr:
      +			p.reconstructActiveFormattingElements()
      +			if p.elementInScope(defaultScope, a.Nobr) {
      +				p.inBodyEndTagFormatting(a.Nobr)
      +				p.reconstructActiveFormattingElements()
      +			}
      +			p.addFormattingElement()
      +		case a.Applet, a.Marquee, a.Object:
      +			p.reconstructActiveFormattingElements()
      +			p.addElement()
      +			p.afe = append(p.afe, &scopeMarker)
      +			p.framesetOK = false
      +		case a.Table:
      +			if !p.quirks {
      +				p.popUntil(buttonScope, a.P)
      +			}
      +			p.addElement()
      +			p.framesetOK = false
      +			p.im = inTableIM
      +			return true
      +		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
      +			p.reconstructActiveFormattingElements()
      +			p.addElement()
      +			p.oe.pop()
      +			p.acknowledgeSelfClosingTag()
      +			if p.tok.DataAtom == a.Input {
      +				for _, t := range p.tok.Attr {
      +					if t.Key == "type" {
      +						if strings.ToLower(t.Val) == "hidden" {
      +							// Skip setting framesetOK = false
      +							return true
      +						}
      +					}
      +				}
      +			}
      +			p.framesetOK = false
      +		case a.Param, a.Source, a.Track:
      +			p.addElement()
      +			p.oe.pop()
      +			p.acknowledgeSelfClosingTag()
      +		case a.Hr:
      +			p.popUntil(buttonScope, a.P)
      +			p.addElement()
      +			p.oe.pop()
      +			p.acknowledgeSelfClosingTag()
      +			p.framesetOK = false
      +		case a.Image:
      +			p.tok.DataAtom = a.Img
      +			p.tok.Data = a.Img.String()
      +			return false
      +		case a.Isindex:
      +			if p.form != nil {
      +				// Ignore the token.
      +				return true
      +			}
      +			action := ""
      +			prompt := "This is a searchable index. Enter search keywords: "
      +			attr := []Attribute{{Key: "name", Val: "isindex"}}
      +			for _, t := range p.tok.Attr {
      +				switch t.Key {
      +				case "action":
      +					action = t.Val
      +				case "name":
      +					// Ignore the attribute.
      +				case "prompt":
      +					prompt = t.Val
      +				default:
      +					attr = append(attr, t)
      +				}
      +			}
      +			p.acknowledgeSelfClosingTag()
      +			p.popUntil(buttonScope, a.P)
      +			p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
      +			if action != "" {
      +				p.form.Attr = []Attribute{{Key: "action", Val: action}}
      +			}
      +			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
      +			p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
      +			p.addText(prompt)
      +			p.addChild(&Node{
      +				Type:     ElementNode,
      +				DataAtom: a.Input,
      +				Data:     a.Input.String(),
      +				Attr:     attr,
      +			})
      +			p.oe.pop()
      +			p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
      +			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
      +			p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
      +		case a.Textarea:
      +			p.addElement()
      +			p.setOriginalIM()
      +			p.framesetOK = false
      +			p.im = textIM
      +		case a.Xmp:
      +			p.popUntil(buttonScope, a.P)
      +			p.reconstructActiveFormattingElements()
      +			p.framesetOK = false
      +			p.addElement()
      +			p.setOriginalIM()
      +			p.im = textIM
      +		case a.Iframe:
      +			p.framesetOK = false
      +			p.addElement()
      +			p.setOriginalIM()
      +			p.im = textIM
      +		case a.Noembed, a.Noscript:
      +			p.addElement()
      +			p.setOriginalIM()
      +			p.im = textIM
      +		case a.Select:
      +			p.reconstructActiveFormattingElements()
      +			p.addElement()
      +			p.framesetOK = false
      +			p.im = inSelectIM
      +			return true
      +		case a.Optgroup, a.Option:
      +			if p.top().DataAtom == a.Option {
      +				p.oe.pop()
      +			}
      +			p.reconstructActiveFormattingElements()
      +			p.addElement()
      +		case a.Rp, a.Rt:
      +			if p.elementInScope(defaultScope, a.Ruby) {
      +				p.generateImpliedEndTags()
      +			}
      +			p.addElement()
      +		case a.Math, a.Svg:
      +			p.reconstructActiveFormattingElements()
      +			if p.tok.DataAtom == a.Math {
      +				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
      +			} else {
      +				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
      +			}
      +			adjustForeignAttributes(p.tok.Attr)
      +			p.addElement()
      +			p.top().Namespace = p.tok.Data
      +			if p.hasSelfClosingToken {
      +				p.oe.pop()
      +				p.acknowledgeSelfClosingTag()
      +			}
      +			return true
      +		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
      +			// Ignore the token.
      +		default:
      +			p.reconstructActiveFormattingElements()
      +			p.addElement()
      +		}
      +	case EndTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Body:
      +			if p.elementInScope(defaultScope, a.Body) {
      +				p.im = afterBodyIM
      +			}
      +		case a.Html:
      +			if p.elementInScope(defaultScope, a.Body) {
      +				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
      +				return false
      +			}
      +			return true
      +		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
      +			p.popUntil(defaultScope, p.tok.DataAtom)
      +		case a.Form:
      +			node := p.form
      +			p.form = nil
      +			i := p.indexOfElementInScope(defaultScope, a.Form)
      +			if node == nil || i == -1 || p.oe[i] != node {
      +				// Ignore the token.
      +				return true
      +			}
      +			p.generateImpliedEndTags()
      +			p.oe.remove(node)
      +		case a.P:
      +			if !p.elementInScope(buttonScope, a.P) {
      +				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
      +			}
      +			p.popUntil(buttonScope, a.P)
      +		case a.Li:
      +			p.popUntil(listItemScope, a.Li)
      +		case a.Dd, a.Dt:
      +			p.popUntil(defaultScope, p.tok.DataAtom)
      +		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
      +			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
      +		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
      +			p.inBodyEndTagFormatting(p.tok.DataAtom)
      +		case a.Applet, a.Marquee, a.Object:
      +			if p.popUntil(defaultScope, p.tok.DataAtom) {
      +				p.clearActiveFormattingElements()
      +			}
      +		case a.Br:
      +			p.tok.Type = StartTagToken
      +			return false
      +		default:
      +			p.inBodyEndTagOther(p.tok.DataAtom)
      +		}
      +	case CommentToken:
      +		p.addChild(&Node{
      +			Type: CommentNode,
      +			Data: p.tok.Data,
      +		})
      +	}
      +
      +	return true
      +}
      +
      +func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
      +	// This is the "adoption agency" algorithm, described at
      +	// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
      +
      +	// TODO: this is a fairly literal line-by-line translation of that algorithm.
      +	// Once the code successfully parses the comprehensive test suite, we should
      +	// refactor this code to be more idiomatic.
      +
      +	// Steps 1-4. The outer loop.
      +	for i := 0; i < 8; i++ {
      +		// Step 5. Find the formatting element.
      +		var formattingElement *Node
      +		for j := len(p.afe) - 1; j >= 0; j-- {
      +			if p.afe[j].Type == scopeMarkerNode {
      +				break
      +			}
      +			if p.afe[j].DataAtom == tagAtom {
      +				formattingElement = p.afe[j]
      +				break
      +			}
      +		}
      +		if formattingElement == nil {
      +			p.inBodyEndTagOther(tagAtom)
      +			return
      +		}
      +		feIndex := p.oe.index(formattingElement)
      +		if feIndex == -1 {
      +			p.afe.remove(formattingElement)
      +			return
      +		}
      +		if !p.elementInScope(defaultScope, tagAtom) {
      +			// Ignore the tag.
      +			return
      +		}
      +
      +		// Steps 9-10. Find the furthest block.
      +		var furthestBlock *Node
      +		for _, e := range p.oe[feIndex:] {
      +			if isSpecialElement(e) {
      +				furthestBlock = e
      +				break
      +			}
      +		}
      +		if furthestBlock == nil {
      +			e := p.oe.pop()
      +			for e != formattingElement {
      +				e = p.oe.pop()
      +			}
      +			p.afe.remove(e)
      +			return
      +		}
      +
      +		// Steps 11-12. Find the common ancestor and bookmark node.
      +		commonAncestor := p.oe[feIndex-1]
      +		bookmark := p.afe.index(formattingElement)
      +
      +		// Step 13. The inner loop. Find the lastNode to reparent.
      +		lastNode := furthestBlock
      +		node := furthestBlock
      +		x := p.oe.index(node)
      +		// Steps 13.1-13.2
      +		for j := 0; j < 3; j++ {
      +			// Step 13.3.
      +			x--
      +			node = p.oe[x]
      +			// Step 13.4 - 13.5.
      +			if p.afe.index(node) == -1 {
      +				p.oe.remove(node)
      +				continue
      +			}
      +			// Step 13.6.
      +			if node == formattingElement {
      +				break
      +			}
      +			// Step 13.7.
      +			clone := node.clone()
      +			p.afe[p.afe.index(node)] = clone
      +			p.oe[p.oe.index(node)] = clone
      +			node = clone
      +			// Step 13.8.
      +			if lastNode == furthestBlock {
      +				bookmark = p.afe.index(node) + 1
      +			}
      +			// Step 13.9.
      +			if lastNode.Parent != nil {
      +				lastNode.Parent.RemoveChild(lastNode)
      +			}
      +			node.AppendChild(lastNode)
      +			// Step 13.10.
      +			lastNode = node
      +		}
      +
      +		// Step 14. Reparent lastNode to the common ancestor,
      +		// or for misnested table nodes, to the foster parent.
      +		if lastNode.Parent != nil {
      +			lastNode.Parent.RemoveChild(lastNode)
      +		}
      +		switch commonAncestor.DataAtom {
      +		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
      +			p.fosterParent(lastNode)
      +		default:
      +			commonAncestor.AppendChild(lastNode)
      +		}
      +
      +		// Steps 15-17. Reparent nodes from the furthest block's children
      +		// to a clone of the formatting element.
      +		clone := formattingElement.clone()
      +		reparentChildren(clone, furthestBlock)
      +		furthestBlock.AppendChild(clone)
      +
      +		// Step 18. Fix up the list of active formatting elements.
      +		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
      +			// Move the bookmark with the rest of the list.
      +			bookmark--
      +		}
      +		p.afe.remove(formattingElement)
      +		p.afe.insert(bookmark, clone)
      +
      +		// Step 19. Fix up the stack of open elements.
      +		p.oe.remove(formattingElement)
      +		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
      +	}
      +}
      +
      +// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
      +// "Any other end tag" handling from 12.2.5.5 The rules for parsing tokens in foreign content
      +// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
      +func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
      +	for i := len(p.oe) - 1; i >= 0; i-- {
      +		if p.oe[i].DataAtom == tagAtom {
      +			p.oe = p.oe[:i]
      +			break
      +		}
      +		if isSpecialElement(p.oe[i]) {
      +			break
      +		}
      +	}
      +}
      +
      +// Section 12.2.5.4.8.
      +func textIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case ErrorToken:
      +		p.oe.pop()
      +	case TextToken:
      +		d := p.tok.Data
      +		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
      +			// Ignore a newline at the start of a <textarea> block.
      +			if d != "" && d[0] == '\r' {
      +				d = d[1:]
      +			}
      +			if d != "" && d[0] == '\n' {
      +				d = d[1:]
      +			}
      +		}
      +		if d == "" {
      +			return true
      +		}
      +		p.addText(d)
      +		return true
      +	case EndTagToken:
      +		p.oe.pop()
      +	}
      +	p.im = p.originalIM
      +	p.originalIM = nil
      +	return p.tok.Type == EndTagToken
      +}
      +
      +// Section 12.2.5.4.9.
      +func inTableIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case ErrorToken:
      +		// Stop parsing.
      +		return true
      +	case TextToken:
      +		p.tok.Data = strings.Replace(p.tok.Data, "\x00", "", -1)
      +		switch p.oe.top().DataAtom {
      +		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
      +			if strings.Trim(p.tok.Data, whitespace) == "" {
      +				p.addText(p.tok.Data)
      +				return true
      +			}
      +		}
      +	case StartTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Caption:
      +			p.clearStackToContext(tableScope)
      +			p.afe = append(p.afe, &scopeMarker)
      +			p.addElement()
      +			p.im = inCaptionIM
      +			return true
      +		case a.Colgroup:
      +			p.clearStackToContext(tableScope)
      +			p.addElement()
      +			p.im = inColumnGroupIM
      +			return true
      +		case a.Col:
      +			p.parseImpliedToken(StartTagToken, a.Colgroup, a.Colgroup.String())
      +			return false
      +		case a.Tbody, a.Tfoot, a.Thead:
      +			p.clearStackToContext(tableScope)
      +			p.addElement()
      +			p.im = inTableBodyIM
      +			return true
      +		case a.Td, a.Th, a.Tr:
      +			p.parseImpliedToken(StartTagToken, a.Tbody, a.Tbody.String())
      +			return false
      +		case a.Table:
      +			if p.popUntil(tableScope, a.Table) {
      +				p.resetInsertionMode()
      +				return false
      +			}
      +			// Ignore the token.
      +			return true
      +		case a.Style, a.Script:
      +			return inHeadIM(p)
      +		case a.Input:
      +			for _, t := range p.tok.Attr {
      +				if t.Key == "type" && strings.ToLower(t.Val) == "hidden" {
      +					p.addElement()
      +					p.oe.pop()
      +					return true
      +				}
      +			}
      +			// Otherwise drop down to the default action.
      +		case a.Form:
      +			if p.form != nil {
      +				// Ignore the token.
      +				return true
      +			}
      +			p.addElement()
      +			p.form = p.oe.pop()
      +		case a.Select:
      +			p.reconstructActiveFormattingElements()
      +			switch p.top().DataAtom {
      +			case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
      +				p.fosterParenting = true
      +			}
      +			p.addElement()
      +			p.fosterParenting = false
      +			p.framesetOK = false
      +			p.im = inSelectInTableIM
      +			return true
      +		}
      +	case EndTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Table:
      +			if p.popUntil(tableScope, a.Table) {
      +				p.resetInsertionMode()
      +				return true
      +			}
      +			// Ignore the token.
      +			return true
      +		case a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
      +			// Ignore the token.
      +			return true
      +		}
      +	case CommentToken:
      +		p.addChild(&Node{
      +			Type: CommentNode,
      +			Data: p.tok.Data,
      +		})
      +		return true
      +	case DoctypeToken:
      +		// Ignore the token.
      +		return true
      +	}
      +
      +	p.fosterParenting = true
      +	defer func() { p.fosterParenting = false }()
      +
      +	return inBodyIM(p)
      +}
      +
      +// Section 12.2.5.4.11.
      +func inCaptionIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case StartTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Td, a.Tfoot, a.Thead, a.Tr:
      +			if p.popUntil(tableScope, a.Caption) {
      +				p.clearActiveFormattingElements()
      +				p.im = inTableIM
      +				return false
      +			} else {
      +				// Ignore the token.
      +				return true
      +			}
      +		case a.Select:
      +			p.reconstructActiveFormattingElements()
      +			p.addElement()
      +			p.framesetOK = false
      +			p.im = inSelectInTableIM
      +			return true
      +		}
      +	case EndTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Caption:
      +			if p.popUntil(tableScope, a.Caption) {
      +				p.clearActiveFormattingElements()
      +				p.im = inTableIM
      +			}
      +			return true
      +		case a.Table:
      +			if p.popUntil(tableScope, a.Caption) {
      +				p.clearActiveFormattingElements()
      +				p.im = inTableIM
      +				return false
      +			} else {
      +				// Ignore the token.
      +				return true
      +			}
      +		case a.Body, a.Col, a.Colgroup, a.Html, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
      +			// Ignore the token.
      +			return true
      +		}
      +	}
      +	return inBodyIM(p)
      +}
      +
      +// Section 12.2.5.4.12.
      +func inColumnGroupIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case TextToken:
      +		s := strings.TrimLeft(p.tok.Data, whitespace)
      +		if len(s) < len(p.tok.Data) {
      +			// Add the initial whitespace to the current node.
      +			p.addText(p.tok.Data[:len(p.tok.Data)-len(s)])
      +			if s == "" {
      +				return true
      +			}
      +			p.tok.Data = s
      +		}
      +	case CommentToken:
      +		p.addChild(&Node{
      +			Type: CommentNode,
      +			Data: p.tok.Data,
      +		})
      +		return true
      +	case DoctypeToken:
      +		// Ignore the token.
      +		return true
      +	case StartTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Html:
      +			return inBodyIM(p)
      +		case a.Col:
      +			p.addElement()
      +			p.oe.pop()
      +			p.acknowledgeSelfClosingTag()
      +			return true
      +		}
      +	case EndTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Colgroup:
      +			if p.oe.top().DataAtom != a.Html {
      +				p.oe.pop()
      +				p.im = inTableIM
      +			}
      +			return true
      +		case a.Col:
      +			// Ignore the token.
      +			return true
      +		}
      +	}
      +	if p.oe.top().DataAtom != a.Html {
      +		p.oe.pop()
      +		p.im = inTableIM
      +		return false
      +	}
      +	return true
      +}
      +
      +// Section 12.2.5.4.13.
      +func inTableBodyIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case StartTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Tr:
      +			p.clearStackToContext(tableBodyScope)
      +			p.addElement()
      +			p.im = inRowIM
      +			return true
      +		case a.Td, a.Th:
      +			p.parseImpliedToken(StartTagToken, a.Tr, a.Tr.String())
      +			return false
      +		case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead:
      +			if p.popUntil(tableScope, a.Tbody, a.Thead, a.Tfoot) {
      +				p.im = inTableIM
      +				return false
      +			}
      +			// Ignore the token.
      +			return true
      +		}
      +	case EndTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Tbody, a.Tfoot, a.Thead:
      +			if p.elementInScope(tableScope, p.tok.DataAtom) {
      +				p.clearStackToContext(tableBodyScope)
      +				p.oe.pop()
      +				p.im = inTableIM
      +			}
      +			return true
      +		case a.Table:
      +			if p.popUntil(tableScope, a.Tbody, a.Thead, a.Tfoot) {
      +				p.im = inTableIM
      +				return false
      +			}
      +			// Ignore the token.
      +			return true
      +		case a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Td, a.Th, a.Tr:
      +			// Ignore the token.
      +			return true
      +		}
      +	case CommentToken:
      +		p.addChild(&Node{
      +			Type: CommentNode,
      +			Data: p.tok.Data,
      +		})
      +		return true
      +	}
      +
      +	return inTableIM(p)
      +}
      +
      +// Section 12.2.5.4.14.
      +func inRowIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case StartTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Td, a.Th:
      +			p.clearStackToContext(tableRowScope)
      +			p.addElement()
      +			p.afe = append(p.afe, &scopeMarker)
      +			p.im = inCellIM
      +			return true
      +		case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead, a.Tr:
      +			if p.popUntil(tableScope, a.Tr) {
      +				p.im = inTableBodyIM
      +				return false
      +			}
      +			// Ignore the token.
      +			return true
      +		}
      +	case EndTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Tr:
      +			if p.popUntil(tableScope, a.Tr) {
      +				p.im = inTableBodyIM
      +				return true
      +			}
      +			// Ignore the token.
      +			return true
      +		case a.Table:
      +			if p.popUntil(tableScope, a.Tr) {
      +				p.im = inTableBodyIM
      +				return false
      +			}
      +			// Ignore the token.
      +			return true
      +		case a.Tbody, a.Tfoot, a.Thead:
      +			if p.elementInScope(tableScope, p.tok.DataAtom) {
      +				p.parseImpliedToken(EndTagToken, a.Tr, a.Tr.String())
      +				return false
      +			}
      +			// Ignore the token.
      +			return true
      +		case a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Td, a.Th:
      +			// Ignore the token.
      +			return true
      +		}
      +	}
      +
      +	return inTableIM(p)
      +}
      +
      +// Section 12.2.5.4.15.
      +func inCellIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case StartTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
      +			if p.popUntil(tableScope, a.Td, a.Th) {
      +				// Close the cell and reprocess.
      +				p.clearActiveFormattingElements()
      +				p.im = inRowIM
      +				return false
      +			}
      +			// Ignore the token.
      +			return true
      +		case a.Select:
      +			p.reconstructActiveFormattingElements()
      +			p.addElement()
      +			p.framesetOK = false
      +			p.im = inSelectInTableIM
      +			return true
      +		}
      +	case EndTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Td, a.Th:
      +			if !p.popUntil(tableScope, p.tok.DataAtom) {
      +				// Ignore the token.
      +				return true
      +			}
      +			p.clearActiveFormattingElements()
      +			p.im = inRowIM
      +			return true
      +		case a.Body, a.Caption, a.Col, a.Colgroup, a.Html:
      +			// Ignore the token.
      +			return true
      +		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
      +			if !p.elementInScope(tableScope, p.tok.DataAtom) {
      +				// Ignore the token.
      +				return true
      +			}
      +			// Close the cell and reprocess.
      +			p.popUntil(tableScope, a.Td, a.Th)
      +			p.clearActiveFormattingElements()
      +			p.im = inRowIM
      +			return false
      +		}
      +	}
      +	return inBodyIM(p)
      +}
      +
      +// Section 12.2.5.4.16.
      +func inSelectIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case ErrorToken:
      +		// Stop parsing.
      +		return true
      +	case TextToken:
      +		p.addText(strings.Replace(p.tok.Data, "\x00", "", -1))
      +	case StartTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Html:
      +			return inBodyIM(p)
      +		case a.Option:
      +			if p.top().DataAtom == a.Option {
      +				p.oe.pop()
      +			}
      +			p.addElement()
      +		case a.Optgroup:
      +			if p.top().DataAtom == a.Option {
      +				p.oe.pop()
      +			}
      +			if p.top().DataAtom == a.Optgroup {
      +				p.oe.pop()
      +			}
      +			p.addElement()
      +		case a.Select:
      +			p.tok.Type = EndTagToken
      +			return false
      +		case a.Input, a.Keygen, a.Textarea:
      +			if p.elementInScope(selectScope, a.Select) {
      +				p.parseImpliedToken(EndTagToken, a.Select, a.Select.String())
      +				return false
      +			}
      +			// In order to properly ignore <textarea>, we need to change the tokenizer mode.
      +			p.tokenizer.NextIsNotRawText()
      +			// Ignore the token.
      +			return true
      +		case a.Script:
      +			return inHeadIM(p)
      +		}
      +	case EndTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Option:
      +			if p.top().DataAtom == a.Option {
      +				p.oe.pop()
      +			}
      +		case a.Optgroup:
      +			i := len(p.oe) - 1
      +			if p.oe[i].DataAtom == a.Option {
      +				i--
      +			}
      +			if p.oe[i].DataAtom == a.Optgroup {
      +				p.oe = p.oe[:i]
      +			}
      +		case a.Select:
      +			if p.popUntil(selectScope, a.Select) {
      +				p.resetInsertionMode()
      +			}
      +		}
      +	case CommentToken:
      +		p.addChild(&Node{
      +			Type: CommentNode,
      +			Data: p.tok.Data,
      +		})
      +	case DoctypeToken:
      +		// Ignore the token.
      +		return true
      +	}
      +
      +	return true
      +}
      +
      +// Section 12.2.5.4.17.
      +func inSelectInTableIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case StartTagToken, EndTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Caption, a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr, a.Td, a.Th:
      +			if p.tok.Type == StartTagToken || p.elementInScope(tableScope, p.tok.DataAtom) {
      +				p.parseImpliedToken(EndTagToken, a.Select, a.Select.String())
      +				return false
      +			} else {
      +				// Ignore the token.
      +				return true
      +			}
      +		}
      +	}
      +	return inSelectIM(p)
      +}
      +
      +// Section 12.2.5.4.18.
      +func afterBodyIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case ErrorToken:
      +		// Stop parsing.
      +		return true
      +	case TextToken:
      +		s := strings.TrimLeft(p.tok.Data, whitespace)
      +		if len(s) == 0 {
      +			// It was all whitespace.
      +			return inBodyIM(p)
      +		}
      +	case StartTagToken:
      +		if p.tok.DataAtom == a.Html {
      +			return inBodyIM(p)
      +		}
      +	case EndTagToken:
      +		if p.tok.DataAtom == a.Html {
      +			if !p.fragment {
      +				p.im = afterAfterBodyIM
      +			}
      +			return true
      +		}
      +	case CommentToken:
      +		// The comment is attached to the <html> element.
      +		if len(p.oe) < 1 || p.oe[0].DataAtom != a.Html {
      +			panic("html: bad parser state: <html> element not found, in the after-body insertion mode")
      +		}
      +		p.oe[0].AppendChild(&Node{
      +			Type: CommentNode,
      +			Data: p.tok.Data,
      +		})
      +		return true
      +	}
      +	p.im = inBodyIM
      +	return false
      +}
      +
      +// Section 12.2.5.4.19.
      +func inFramesetIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case CommentToken:
      +		p.addChild(&Node{
      +			Type: CommentNode,
      +			Data: p.tok.Data,
      +		})
      +	case TextToken:
      +		// Ignore all text but whitespace.
      +		s := strings.Map(func(c rune) rune {
      +			switch c {
      +			case ' ', '\t', '\n', '\f', '\r':
      +				return c
      +			}
      +			return -1
      +		}, p.tok.Data)
      +		if s != "" {
      +			p.addText(s)
      +		}
      +	case StartTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Html:
      +			return inBodyIM(p)
      +		case a.Frameset:
      +			p.addElement()
      +		case a.Frame:
      +			p.addElement()
      +			p.oe.pop()
      +			p.acknowledgeSelfClosingTag()
      +		case a.Noframes:
      +			return inHeadIM(p)
      +		}
      +	case EndTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Frameset:
      +			if p.oe.top().DataAtom != a.Html {
      +				p.oe.pop()
      +				if p.oe.top().DataAtom != a.Frameset {
      +					p.im = afterFramesetIM
      +					return true
      +				}
      +			}
      +		}
      +	default:
      +		// Ignore the token.
      +	}
      +	return true
      +}
      +
      +// Section 12.2.5.4.20.
      +func afterFramesetIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case CommentToken:
      +		p.addChild(&Node{
      +			Type: CommentNode,
      +			Data: p.tok.Data,
      +		})
      +	case TextToken:
      +		// Ignore all text but whitespace.
      +		s := strings.Map(func(c rune) rune {
      +			switch c {
      +			case ' ', '\t', '\n', '\f', '\r':
      +				return c
      +			}
      +			return -1
      +		}, p.tok.Data)
      +		if s != "" {
      +			p.addText(s)
      +		}
      +	case StartTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Html:
      +			return inBodyIM(p)
      +		case a.Noframes:
      +			return inHeadIM(p)
      +		}
      +	case EndTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Html:
      +			p.im = afterAfterFramesetIM
      +			return true
      +		}
      +	default:
      +		// Ignore the token.
      +	}
      +	return true
      +}
      +
      +// Section 12.2.5.4.21.
      +func afterAfterBodyIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case ErrorToken:
      +		// Stop parsing.
      +		return true
      +	case TextToken:
      +		s := strings.TrimLeft(p.tok.Data, whitespace)
      +		if len(s) == 0 {
      +			// It was all whitespace.
      +			return inBodyIM(p)
      +		}
      +	case StartTagToken:
      +		if p.tok.DataAtom == a.Html {
      +			return inBodyIM(p)
      +		}
      +	case CommentToken:
      +		p.doc.AppendChild(&Node{
      +			Type: CommentNode,
      +			Data: p.tok.Data,
      +		})
      +		return true
      +	case DoctypeToken:
      +		return inBodyIM(p)
      +	}
      +	p.im = inBodyIM
      +	return false
      +}
      +
      +// Section 12.2.5.4.22.
      +func afterAfterFramesetIM(p *parser) bool {
      +	switch p.tok.Type {
      +	case CommentToken:
      +		p.doc.AppendChild(&Node{
      +			Type: CommentNode,
      +			Data: p.tok.Data,
      +		})
      +	case TextToken:
      +		// Ignore all text but whitespace.
      +		s := strings.Map(func(c rune) rune {
      +			switch c {
      +			case ' ', '\t', '\n', '\f', '\r':
      +				return c
      +			}
      +			return -1
      +		}, p.tok.Data)
      +		if s != "" {
      +			p.tok.Data = s
      +			return inBodyIM(p)
      +		}
      +	case StartTagToken:
      +		switch p.tok.DataAtom {
      +		case a.Html:
      +			return inBodyIM(p)
      +		case a.Noframes:
      +			return inHeadIM(p)
      +		}
      +	case DoctypeToken:
      +		return inBodyIM(p)
      +	default:
      +		// Ignore the token.
      +	}
      +	return true
      +}
      +
      +const whitespaceOrNUL = whitespace + "\x00"
      +
      +// Section 12.2.5.5.
      +func parseForeignContent(p *parser) bool {
      +	switch p.tok.Type {
      +	case TextToken:
      +		if p.framesetOK {
      +			p.framesetOK = strings.TrimLeft(p.tok.Data, whitespaceOrNUL) == ""
      +		}
      +		p.tok.Data = strings.Replace(p.tok.Data, "\x00", "\ufffd", -1)
      +		p.addText(p.tok.Data)
      +	case CommentToken:
      +		p.addChild(&Node{
      +			Type: CommentNode,
      +			Data: p.tok.Data,
      +		})
      +	case StartTagToken:
      +		b := breakout[p.tok.Data]
      +		if p.tok.DataAtom == a.Font {
      +		loop:
      +			for _, attr := range p.tok.Attr {
      +				switch attr.Key {
      +				case "color", "face", "size":
      +					b = true
      +					break loop
      +				}
      +			}
      +		}
      +		if b {
      +			for i := len(p.oe) - 1; i >= 0; i-- {
      +				n := p.oe[i]
      +				if n.Namespace == "" || htmlIntegrationPoint(n) || mathMLTextIntegrationPoint(n) {
      +					p.oe = p.oe[:i+1]
      +					break
      +				}
      +			}
      +			return false
      +		}
      +		switch p.top().Namespace {
      +		case "math":
      +			adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
      +		case "svg":
      +			// Adjust SVG tag names. The tokenizer lower-cases tag names, but
      +			// SVG wants e.g. "foreignObject" with a capital second "O".
      +			if x := svgTagNameAdjustments[p.tok.Data]; x != "" {
      +				p.tok.DataAtom = a.Lookup([]byte(x))
      +				p.tok.Data = x
      +			}
      +			adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
      +		default:
      +			panic("html: bad parser state: unexpected namespace")
      +		}
      +		adjustForeignAttributes(p.tok.Attr)
      +		namespace := p.top().Namespace
      +		p.addElement()
      +		p.top().Namespace = namespace
      +		if namespace != "" {
      +			// Don't let the tokenizer go into raw text mode in foreign content
      +			// (e.g. in an SVG <title> tag).
      +			p.tokenizer.NextIsNotRawText()
      +		}
      +		if p.hasSelfClosingToken {
      +			p.oe.pop()
      +			p.acknowledgeSelfClosingTag()
      +		}
      +	case EndTagToken:
      +		for i := len(p.oe) - 1; i >= 0; i-- {
      +			if p.oe[i].Namespace == "" {
      +				return p.im(p)
      +			}
      +			if strings.EqualFold(p.oe[i].Data, p.tok.Data) {
      +				p.oe = p.oe[:i]
      +				break
      +			}
      +		}
      +		return true
      +	default:
      +		// Ignore the token.
      +	}
      +	return true
      +}
      +
      +// Section 12.2.5.
      +func (p *parser) inForeignContent() bool {
      +	if len(p.oe) == 0 {
      +		return false
      +	}
      +	n := p.oe[len(p.oe)-1]
      +	if n.Namespace == "" {
      +		return false
      +	}
      +	if mathMLTextIntegrationPoint(n) {
      +		if p.tok.Type == StartTagToken && p.tok.DataAtom != a.Mglyph && p.tok.DataAtom != a.Malignmark {
      +			return false
      +		}
      +		if p.tok.Type == TextToken {
      +			return false
      +		}
      +	}
      +	if n.Namespace == "math" && n.DataAtom == a.AnnotationXml && p.tok.Type == StartTagToken && p.tok.DataAtom == a.Svg {
      +		return false
      +	}
      +	if htmlIntegrationPoint(n) && (p.tok.Type == StartTagToken || p.tok.Type == TextToken) {
      +		return false
      +	}
      +	if p.tok.Type == ErrorToken {
      +		return false
      +	}
      +	return true
      +}
      +
      +// parseImpliedToken parses a token as though it had appeared in the parser's
      +// input.
      +func (p *parser) parseImpliedToken(t TokenType, dataAtom a.Atom, data string) {
      +	realToken, selfClosing := p.tok, p.hasSelfClosingToken
      +	p.tok = Token{
      +		Type:     t,
      +		DataAtom: dataAtom,
      +		Data:     data,
      +	}
      +	p.hasSelfClosingToken = false
      +	p.parseCurrentToken()
      +	p.tok, p.hasSelfClosingToken = realToken, selfClosing
      +}
      +
      +// parseCurrentToken runs the current token through the parsing routines
      +// until it is consumed.
      +func (p *parser) parseCurrentToken() {
      +	if p.tok.Type == SelfClosingTagToken {
      +		p.hasSelfClosingToken = true
      +		p.tok.Type = StartTagToken
      +	}
      +
      +	consumed := false
      +	for !consumed {
      +		if p.inForeignContent() {
      +			consumed = parseForeignContent(p)
      +		} else {
      +			consumed = p.im(p)
      +		}
      +	}
      +
      +	if p.hasSelfClosingToken {
      +		// This is a parse error, but ignore it.
      +		p.hasSelfClosingToken = false
      +	}
      +}
      +
      +func (p *parser) parse() error {
      +	// Iterate until EOF. Any other error will cause an early return.
      +	var err error
      +	for err != io.EOF {
      +		// CDATA sections are allowed only in foreign content.
      +		n := p.oe.top()
      +		p.tokenizer.AllowCDATA(n != nil && n.Namespace != "")
      +		// Read and parse the next token.
      +		p.tokenizer.Next()
      +		p.tok = p.tokenizer.Token()
      +		if p.tok.Type == ErrorToken {
      +			err = p.tokenizer.Err()
      +			if err != nil && err != io.EOF {
      +				return err
      +			}
      +		}
      +		p.parseCurrentToken()
      +	}
      +	return nil
      +}
      +
      +// Parse returns the parse tree for the HTML from the given Reader.
      +// The input is assumed to be UTF-8 encoded.
      +func Parse(r io.Reader) (*Node, error) {
      +	p := &parser{
      +		tokenizer: NewTokenizer(r),
      +		doc: &Node{
      +			Type: DocumentNode,
      +		},
      +		scripting:  true,
      +		framesetOK: true,
      +		im:         initialIM,
      +	}
      +	err := p.parse()
      +	if err != nil {
      +		return nil, err
      +	}
      +	return p.doc, nil
      +}
      +
      +// ParseFragment parses a fragment of HTML and returns the nodes that were
      +// found. If the fragment is the InnerHTML for an existing element, pass that
      +// element in context.
      +func ParseFragment(r io.Reader, context *Node) ([]*Node, error) {
      +	contextTag := ""
      +	if context != nil {
      +		if context.Type != ElementNode {
      +			return nil, errors.New("html: ParseFragment of non-element Node")
      +		}
      +		// The next check isn't just context.DataAtom.String() == context.Data because
      +		// it is valid to pass an element whose tag isn't a known atom. For example,
      +		// DataAtom == 0 and Data = "tagfromthefuture" is perfectly consistent.
      +		if context.DataAtom != a.Lookup([]byte(context.Data)) {
      +			return nil, fmt.Errorf("html: inconsistent Node: DataAtom=%q, Data=%q", context.DataAtom, context.Data)
      +		}
      +		contextTag = context.DataAtom.String()
      +	}
      +	p := &parser{
      +		tokenizer: NewTokenizerFragment(r, contextTag),
      +		doc: &Node{
      +			Type: DocumentNode,
      +		},
      +		scripting: true,
      +		fragment:  true,
      +		context:   context,
      +	}
      +
      +	root := &Node{
      +		Type:     ElementNode,
      +		DataAtom: a.Html,
      +		Data:     a.Html.String(),
      +	}
      +	p.doc.AppendChild(root)
      +	p.oe = nodeStack{root}
      +	p.resetInsertionMode()
      +
      +	for n := context; n != nil; n = n.Parent {
      +		if n.Type == ElementNode && n.DataAtom == a.Form {
      +			p.form = n
      +			break
      +		}
      +	}
      +
      +	err := p.parse()
      +	if err != nil {
      +		return nil, err
      +	}
      +
      +	parent := p.doc
      +	if context != nil {
      +		parent = root
      +	}
      +
      +	var result []*Node
      +	for c := parent.FirstChild; c != nil; {
      +		next := c.NextSibling
      +		parent.RemoveChild(c)
      +		result = append(result, c)
      +		c = next
      +	}
      +	return result, nil
      +}
      diff --git a/vendor/golang.org/x/net/html/parse_test.go b/vendor/golang.org/x/net/html/parse_test.go
      new file mode 100644
      index 00000000..7e47d11b
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/parse_test.go
      @@ -0,0 +1,388 @@
      +// Copyright 2010 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package html
      +
      +import (
      +	"bufio"
      +	"bytes"
      +	"errors"
      +	"fmt"
      +	"io"
      +	"io/ioutil"
      +	"os"
      +	"path/filepath"
      +	"runtime"
      +	"sort"
      +	"strings"
      +	"testing"
      +
      +	"golang.org/x/net/html/atom"
      +)
      +
      +// readParseTest reads a single test case from r.
      +func readParseTest(r *bufio.Reader) (text, want, context string, err error) {
      +	line, err := r.ReadSlice('\n')
      +	if err != nil {
      +		return "", "", "", err
      +	}
      +	var b []byte
      +
      +	// Read the HTML.
      +	if string(line) != "#data\n" {
      +		return "", "", "", fmt.Errorf(`got %q want "#data\n"`, line)
      +	}
      +	for {
      +		line, err = r.ReadSlice('\n')
      +		if err != nil {
      +			return "", "", "", err
      +		}
      +		if line[0] == '#' {
      +			break
      +		}
      +		b = append(b, line...)
      +	}
      +	text = strings.TrimSuffix(string(b), "\n")
      +	b = b[:0]
      +
      +	// Skip the error list.
      +	if string(line) != "#errors\n" {
      +		return "", "", "", fmt.Errorf(`got %q want "#errors\n"`, line)
      +	}
      +	for {
      +		line, err = r.ReadSlice('\n')
      +		if err != nil {
      +			return "", "", "", err
      +		}
      +		if line[0] == '#' {
      +			break
      +		}
      +	}
      +
      +	if string(line) == "#document-fragment\n" {
      +		line, err = r.ReadSlice('\n')
      +		if err != nil {
      +			return "", "", "", err
      +		}
      +		context = strings.TrimSpace(string(line))
      +		line, err = r.ReadSlice('\n')
      +		if err != nil {
      +			return "", "", "", err
      +		}
      +	}
      +
      +	// Read the dump of what the parse tree should be.
      +	if string(line) != "#document\n" {
      +		return "", "", "", fmt.Errorf(`got %q want "#document\n"`, line)
      +	}
      +	inQuote := false
      +	for {
      +		line, err = r.ReadSlice('\n')
      +		if err != nil && err != io.EOF {
      +			return "", "", "", err
      +		}
      +		trimmed := bytes.Trim(line, "| \n")
      +		if len(trimmed) > 0 {
      +			if line[0] == '|' && trimmed[0] == '"' {
      +				inQuote = true
      +			}
      +			if trimmed[len(trimmed)-1] == '"' && !(line[0] == '|' && len(trimmed) == 1) {
      +				inQuote = false
      +			}
      +		}
      +		if len(line) == 0 || len(line) == 1 && line[0] == '\n' && !inQuote {
      +			break
      +		}
      +		b = append(b, line...)
      +	}
      +	return text, string(b), context, nil
      +}
      +
      +func dumpIndent(w io.Writer, level int) {
      +	io.WriteString(w, "| ")
      +	for i := 0; i < level; i++ {
      +		io.WriteString(w, "  ")
      +	}
      +}
      +
      +type sortedAttributes []Attribute
      +
      +func (a sortedAttributes) Len() int {
      +	return len(a)
      +}
      +
      +func (a sortedAttributes) Less(i, j int) bool {
      +	if a[i].Namespace != a[j].Namespace {
      +		return a[i].Namespace < a[j].Namespace
      +	}
      +	return a[i].Key < a[j].Key
      +}
      +
      +func (a sortedAttributes) Swap(i, j int) {
      +	a[i], a[j] = a[j], a[i]
      +}
      +
      +func dumpLevel(w io.Writer, n *Node, level int) error {
      +	dumpIndent(w, level)
      +	switch n.Type {
      +	case ErrorNode:
      +		return errors.New("unexpected ErrorNode")
      +	case DocumentNode:
      +		return errors.New("unexpected DocumentNode")
      +	case ElementNode:
      +		if n.Namespace != "" {
      +			fmt.Fprintf(w, "<%s %s>", n.Namespace, n.Data)
      +		} else {
      +			fmt.Fprintf(w, "<%s>", n.Data)
      +		}
      +		attr := sortedAttributes(n.Attr)
      +		sort.Sort(attr)
      +		for _, a := range attr {
      +			io.WriteString(w, "\n")
      +			dumpIndent(w, level+1)
      +			if a.Namespace != "" {
      +				fmt.Fprintf(w, `%s %s="%s"`, a.Namespace, a.Key, a.Val)
      +			} else {
      +				fmt.Fprintf(w, `%s="%s"`, a.Key, a.Val)
      +			}
      +		}
      +	case TextNode:
      +		fmt.Fprintf(w, `"%s"`, n.Data)
      +	case CommentNode:
      +		fmt.Fprintf(w, "<!-- %s -->", n.Data)
      +	case DoctypeNode:
      +		fmt.Fprintf(w, "<!DOCTYPE %s", n.Data)
      +		if n.Attr != nil {
      +			var p, s string
      +			for _, a := range n.Attr {
      +				switch a.Key {
      +				case "public":
      +					p = a.Val
      +				case "system":
      +					s = a.Val
      +				}
      +			}
      +			if p != "" || s != "" {
      +				fmt.Fprintf(w, ` "%s"`, p)
      +				fmt.Fprintf(w, ` "%s"`, s)
      +			}
      +		}
      +		io.WriteString(w, ">")
      +	case scopeMarkerNode:
      +		return errors.New("unexpected scopeMarkerNode")
      +	default:
      +		return errors.New("unknown node type")
      +	}
      +	io.WriteString(w, "\n")
      +	for c := n.FirstChild; c != nil; c = c.NextSibling {
      +		if err := dumpLevel(w, c, level+1); err != nil {
      +			return err
      +		}
      +	}
      +	return nil
      +}
      +
      +func dump(n *Node) (string, error) {
      +	if n == nil || n.FirstChild == nil {
      +		return "", nil
      +	}
      +	var b bytes.Buffer
      +	for c := n.FirstChild; c != nil; c = c.NextSibling {
      +		if err := dumpLevel(&b, c, 0); err != nil {
      +			return "", err
      +		}
      +	}
      +	return b.String(), nil
      +}
      +
      +const testDataDir = "testdata/webkit/"
      +
      +func TestParser(t *testing.T) {
      +	testFiles, err := filepath.Glob(testDataDir + "*.dat")
      +	if err != nil {
      +		t.Fatal(err)
      +	}
      +	for _, tf := range testFiles {
      +		f, err := os.Open(tf)
      +		if err != nil {
      +			t.Fatal(err)
      +		}
      +		defer f.Close()
      +		r := bufio.NewReader(f)
      +
      +		for i := 0; ; i++ {
      +			text, want, context, err := readParseTest(r)
      +			if err == io.EOF {
      +				break
      +			}
      +			if err != nil {
      +				t.Fatal(err)
      +			}
      +
      +			err = testParseCase(text, want, context)
      +
      +			if err != nil {
      +				t.Errorf("%s test #%d %q, %s", tf, i, text, err)
      +			}
      +		}
      +	}
      +}
      +
      +// testParseCase tests one test case from the test files. If the test does not
      +// pass, it returns an error that explains the failure.
      +// text is the HTML to be parsed, want is a dump of the correct parse tree,
      +// and context is the name of the context node, if any.
      +func testParseCase(text, want, context string) (err error) {
      +	defer func() {
      +		if x := recover(); x != nil {
      +			switch e := x.(type) {
      +			case error:
      +				err = e
      +			default:
      +				err = fmt.Errorf("%v", e)
      +			}
      +		}
      +	}()
      +
      +	var doc *Node
      +	if context == "" {
      +		doc, err = Parse(strings.NewReader(text))
      +		if err != nil {
      +			return err
      +		}
      +	} else {
      +		contextNode := &Node{
      +			Type:     ElementNode,
      +			DataAtom: atom.Lookup([]byte(context)),
      +			Data:     context,
      +		}
      +		nodes, err := ParseFragment(strings.NewReader(text), contextNode)
      +		if err != nil {
      +			return err
      +		}
      +		doc = &Node{
      +			Type: DocumentNode,
      +		}
      +		for _, n := range nodes {
      +			doc.AppendChild(n)
      +		}
      +	}
      +
      +	if err := checkTreeConsistency(doc); err != nil {
      +		return err
      +	}
      +
      +	got, err := dump(doc)
      +	if err != nil {
      +		return err
      +	}
      +	// Compare the parsed tree to the #document section.
      +	if got != want {
      +		return fmt.Errorf("got vs want:\n----\n%s----\n%s----", got, want)
      +	}
      +
      +	if renderTestBlacklist[text] || context != "" {
      +		return nil
      +	}
      +
      +	// Check that rendering and re-parsing results in an identical tree.
      +	pr, pw := io.Pipe()
      +	go func() {
      +		pw.CloseWithError(Render(pw, doc))
      +	}()
      +	doc1, err := Parse(pr)
      +	if err != nil {
      +		return err
      +	}
      +	got1, err := dump(doc1)
      +	if err != nil {
      +		return err
      +	}
      +	if got != got1 {
      +		return fmt.Errorf("got vs got1:\n----\n%s----\n%s----", got, got1)
      +	}
      +
      +	return nil
      +}
      +
      +// Some test input result in parse trees are not 'well-formed' despite
      +// following the HTML5 recovery algorithms. Rendering and re-parsing such a
      +// tree will not result in an exact clone of that tree. We blacklist such
      +// inputs from the render test.
      +var renderTestBlacklist = map[string]bool{
      +	// The second <a> will be reparented to the first <table>'s parent. This
      +	// results in an <a> whose parent is an <a>, which is not 'well-formed'.
      +	`<a><table><td><a><table></table><a></tr><a></table><b>X</b>C<a>Y`: true,
      +	// The same thing with a <p>:
      +	`<p><table></p>`: true,
      +	// More cases of <a> being reparented:
      +	`<a href="blah">aba<table><a href="foo">br<tr><td></td></tr>x</table>aoe`: true,
      +	`<a><table><a></table><p><a><div><a>`:                                     true,
      +	`<a><table><td><a><table></table><a></tr><a></table><a>`:                  true,
      +	// A similar reparenting situation involving <nobr>:
      +	`<!DOCTYPE html><body><b><nobr>1<table><nobr></b><i><nobr>2<nobr></i>3`: true,
      +	// A <plaintext> element is reparented, putting it before a table.
      +	// A <plaintext> element can't have anything after it in HTML.
      +	`<table><plaintext><td>`:                                   true,
      +	`<!doctype html><table><plaintext></plaintext>`:            true,
      +	`<!doctype html><table><tbody><plaintext></plaintext>`:     true,
      +	`<!doctype html><table><tbody><tr><plaintext></plaintext>`: true,
      +	// A form inside a table inside a form doesn't work either.
      +	`<!doctype html><form><table></form><form></table></form>`: true,
      +	// A script that ends at EOF may escape its own closing tag when rendered.
      +	`<!doctype html><script><!--<script `:          true,
      +	`<!doctype html><script><!--<script <`:         true,
      +	`<!doctype html><script><!--<script <a`:        true,
      +	`<!doctype html><script><!--<script </`:        true,
      +	`<!doctype html><script><!--<script </s`:       true,
      +	`<!doctype html><script><!--<script </script`:  true,
      +	`<!doctype html><script><!--<script </scripta`: true,
      +	`<!doctype html><script><!--<script -`:         true,
      +	`<!doctype html><script><!--<script -a`:        true,
      +	`<!doctype html><script><!--<script -<`:        true,
      +	`<!doctype html><script><!--<script --`:        true,
      +	`<!doctype html><script><!--<script --a`:       true,
      +	`<!doctype html><script><!--<script --<`:       true,
      +	`<script><!--<script `:                         true,
      +	`<script><!--<script <a`:                       true,
      +	`<script><!--<script </script`:                 true,
      +	`<script><!--<script </scripta`:                true,
      +	`<script><!--<script -`:                        true,
      +	`<script><!--<script -a`:                       true,
      +	`<script><!--<script --`:                       true,
      +	`<script><!--<script --a`:                      true,
      +	`<script><!--<script <`:                        true,
      +	`<script><!--<script </`:                       true,
      +	`<script><!--<script </s`:                      true,
      +	// Reconstructing the active formatting elements results in a <plaintext>
      +	// element that contains an <a> element.
      +	`<!doctype html><p><a><plaintext>b`: true,
      +}
      +
      +func TestNodeConsistency(t *testing.T) {
      +	// inconsistentNode is a Node whose DataAtom and Data do not agree.
      +	inconsistentNode := &Node{
      +		Type:     ElementNode,
      +		DataAtom: atom.Frameset,
      +		Data:     "table",
      +	}
      +	_, err := ParseFragment(strings.NewReader("<p>hello</p>"), inconsistentNode)
      +	if err == nil {
      +		t.Errorf("got nil error, want non-nil")
      +	}
      +}
      +
      +func BenchmarkParser(b *testing.B) {
      +	buf, err := ioutil.ReadFile("testdata/go1.html")
      +	if err != nil {
      +		b.Fatalf("could not read testdata/go1.html: %v", err)
      +	}
      +	b.SetBytes(int64(len(buf)))
      +	runtime.GC()
      +	b.ReportAllocs()
      +	b.ResetTimer()
      +	for i := 0; i < b.N; i++ {
      +		Parse(bytes.NewBuffer(buf))
      +	}
      +}
      diff --git a/vendor/golang.org/x/net/html/render.go b/vendor/golang.org/x/net/html/render.go
      new file mode 100644
      index 00000000..d34564f4
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/render.go
      @@ -0,0 +1,271 @@
      +// Copyright 2011 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package html
      +
      +import (
      +	"bufio"
      +	"errors"
      +	"fmt"
      +	"io"
      +	"strings"
      +)
      +
      +type writer interface {
      +	io.Writer
      +	io.ByteWriter
      +	WriteString(string) (int, error)
      +}
      +
      +// Render renders the parse tree n to the given writer.
      +//
      +// Rendering is done on a 'best effort' basis: calling Parse on the output of
      +// Render will always result in something similar to the original tree, but it
      +// is not necessarily an exact clone unless the original tree was 'well-formed'.
      +// 'Well-formed' is not easily specified; the HTML5 specification is
      +// complicated.
      +//
      +// Calling Parse on arbitrary input typically results in a 'well-formed' parse
      +// tree. However, it is possible for Parse to yield a 'badly-formed' parse tree.
      +// For example, in a 'well-formed' parse tree, no <a> element is a child of
      +// another <a> element: parsing "<a><a>" results in two sibling elements.
      +// Similarly, in a 'well-formed' parse tree, no <a> element is a child of a
      +// <table> element: parsing "<p><table><a>" results in a <p> with two sibling
      +// children; the <a> is reparented to the <table>'s parent. However, calling
      +// Parse on "<a><table><a>" does not return an error, but the result has an <a>
      +// element with an <a> child, and is therefore not 'well-formed'.
      +//
      +// Programmatically constructed trees are typically also 'well-formed', but it
      +// is possible to construct a tree that looks innocuous but, when rendered and
      +// re-parsed, results in a different tree. A simple example is that a solitary
      +// text node would become a tree containing <html>, <head> and <body> elements.
      +// Another example is that the programmatic equivalent of "a<head>b</head>c"
      +// becomes "<html><head><head/><body>abc</body></html>".
      +func Render(w io.Writer, n *Node) error {
      +	if x, ok := w.(writer); ok {
      +		return render(x, n)
      +	}
      +	buf := bufio.NewWriter(w)
      +	if err := render(buf, n); err != nil {
      +		return err
      +	}
      +	return buf.Flush()
      +}
      +
      +// plaintextAbort is returned from render1 when a <plaintext> element
      +// has been rendered. No more end tags should be rendered after that.
      +var plaintextAbort = errors.New("html: internal error (plaintext abort)")
      +
      +func render(w writer, n *Node) error {
      +	err := render1(w, n)
      +	if err == plaintextAbort {
      +		err = nil
      +	}
      +	return err
      +}
      +
      +func render1(w writer, n *Node) error {
      +	// Render non-element nodes; these are the easy cases.
      +	switch n.Type {
      +	case ErrorNode:
      +		return errors.New("html: cannot render an ErrorNode node")
      +	case TextNode:
      +		return escape(w, n.Data)
      +	case DocumentNode:
      +		for c := n.FirstChild; c != nil; c = c.NextSibling {
      +			if err := render1(w, c); err != nil {
      +				return err
      +			}
      +		}
      +		return nil
      +	case ElementNode:
      +		// No-op.
      +	case CommentNode:
      +		if _, err := w.WriteString("<!--"); err != nil {
      +			return err
      +		}
      +		if _, err := w.WriteString(n.Data); err != nil {
      +			return err
      +		}
      +		if _, err := w.WriteString("-->"); err != nil {
      +			return err
      +		}
      +		return nil
      +	case DoctypeNode:
      +		if _, err := w.WriteString("<!DOCTYPE "); err != nil {
      +			return err
      +		}
      +		if _, err := w.WriteString(n.Data); err != nil {
      +			return err
      +		}
      +		if n.Attr != nil {
      +			var p, s string
      +			for _, a := range n.Attr {
      +				switch a.Key {
      +				case "public":
      +					p = a.Val
      +				case "system":
      +					s = a.Val
      +				}
      +			}
      +			if p != "" {
      +				if _, err := w.WriteString(" PUBLIC "); err != nil {
      +					return err
      +				}
      +				if err := writeQuoted(w, p); err != nil {
      +					return err
      +				}
      +				if s != "" {
      +					if err := w.WriteByte(' '); err != nil {
      +						return err
      +					}
      +					if err := writeQuoted(w, s); err != nil {
      +						return err
      +					}
      +				}
      +			} else if s != "" {
      +				if _, err := w.WriteString(" SYSTEM "); err != nil {
      +					return err
      +				}
      +				if err := writeQuoted(w, s); err != nil {
      +					return err
      +				}
      +			}
      +		}
      +		return w.WriteByte('>')
      +	default:
      +		return errors.New("html: unknown node type")
      +	}
      +
      +	// Render the <xxx> opening tag.
      +	if err := w.WriteByte('<'); err != nil {
      +		return err
      +	}
      +	if _, err := w.WriteString(n.Data); err != nil {
      +		return err
      +	}
      +	for _, a := range n.Attr {
      +		if err := w.WriteByte(' '); err != nil {
      +			return err
      +		}
      +		if a.Namespace != "" {
      +			if _, err := w.WriteString(a.Namespace); err != nil {
      +				return err
      +			}
      +			if err := w.WriteByte(':'); err != nil {
      +				return err
      +			}
      +		}
      +		if _, err := w.WriteString(a.Key); err != nil {
      +			return err
      +		}
      +		if _, err := w.WriteString(`="`); err != nil {
      +			return err
      +		}
      +		if err := escape(w, a.Val); err != nil {
      +			return err
      +		}
      +		if err := w.WriteByte('"'); err != nil {
      +			return err
      +		}
      +	}
      +	if voidElements[n.Data] {
      +		if n.FirstChild != nil {
      +			return fmt.Errorf("html: void element <%s> has child nodes", n.Data)
      +		}
      +		_, err := w.WriteString("/>")
      +		return err
      +	}
      +	if err := w.WriteByte('>'); err != nil {
      +		return err
      +	}
      +
      +	// Add initial newline where there is danger of a newline beging ignored.
      +	if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") {
      +		switch n.Data {
      +		case "pre", "listing", "textarea":
      +			if err := w.WriteByte('\n'); err != nil {
      +				return err
      +			}
      +		}
      +	}
      +
      +	// Render any child nodes.
      +	switch n.Data {
      +	case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
      +		for c := n.FirstChild; c != nil; c = c.NextSibling {
      +			if c.Type == TextNode {
      +				if _, err := w.WriteString(c.Data); err != nil {
      +					return err
      +				}
      +			} else {
      +				if err := render1(w, c); err != nil {
      +					return err
      +				}
      +			}
      +		}
      +		if n.Data == "plaintext" {
      +			// Don't render anything else. <plaintext> must be the
      +			// last element in the file, with no closing tag.
      +			return plaintextAbort
      +		}
      +	default:
      +		for c := n.FirstChild; c != nil; c = c.NextSibling {
      +			if err := render1(w, c); err != nil {
      +				return err
      +			}
      +		}
      +	}
      +
      +	// Render the </xxx> closing tag.
      +	if _, err := w.WriteString("</"); err != nil {
      +		return err
      +	}
      +	if _, err := w.WriteString(n.Data); err != nil {
      +		return err
      +	}
      +	return w.WriteByte('>')
      +}
      +
      +// writeQuoted writes s to w surrounded by quotes. Normally it will use double
      +// quotes, but if s contains a double quote, it will use single quotes.
      +// It is used for writing the identifiers in a doctype declaration.
      +// In valid HTML, they can't contain both types of quotes.
      +func writeQuoted(w writer, s string) error {
      +	var q byte = '"'
      +	if strings.Contains(s, `"`) {
      +		q = '\''
      +	}
      +	if err := w.WriteByte(q); err != nil {
      +		return err
      +	}
      +	if _, err := w.WriteString(s); err != nil {
      +		return err
      +	}
      +	if err := w.WriteByte(q); err != nil {
      +		return err
      +	}
      +	return nil
      +}
      +
      +// Section 12.1.2, "Elements", gives this list of void elements. Void elements
      +// are those that can't have any contents.
      +var voidElements = map[string]bool{
      +	"area":    true,
      +	"base":    true,
      +	"br":      true,
      +	"col":     true,
      +	"command": true,
      +	"embed":   true,
      +	"hr":      true,
      +	"img":     true,
      +	"input":   true,
      +	"keygen":  true,
      +	"link":    true,
      +	"meta":    true,
      +	"param":   true,
      +	"source":  true,
      +	"track":   true,
      +	"wbr":     true,
      +}
      diff --git a/vendor/golang.org/x/net/html/render_test.go b/vendor/golang.org/x/net/html/render_test.go
      new file mode 100644
      index 00000000..11da54b3
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/render_test.go
      @@ -0,0 +1,156 @@
      +// Copyright 2010 The Go Authors. All rights reserved.
      +// Use of this source code is governed by a BSD-style
      +// license that can be found in the LICENSE file.
      +
      +package html
      +
      +import (
      +	"bytes"
      +	"testing"
      +)
      +
      +func TestRenderer(t *testing.T) {
      +	nodes := [...]*Node{
      +		0: {
      +			Type: ElementNode,
      +			Data: "html",
      +		},
      +		1: {
      +			Type: ElementNode,
      +			Data: "head",
      +		},
      +		2: {
      +			Type: ElementNode,
      +			Data: "body",
      +		},
      +		3: {
      +			Type: TextNode,
      +			Data: "0<1",
      +		},
      +		4: {
      +			Type: ElementNode,
      +			Data: "p",
      +			Attr: []Attribute{
      +				{
      +					Key: "id",
      +					Val: "A",
      +				},
      +				{
      +					Key: "foo",
      +					Val: `abc"def`,
      +				},
      +			},
      +		},
      +		5: {
      +			Type: TextNode,
      +			Data: "2",
      +		},
      +		6: {
      +			Type: ElementNode,
      +			Data: "b",
      +			Attr: []Attribute{
      +				{
      +					Key: "empty",
      +					Val: "",
      +				},
      +			},
      +		},
      +		7: {
      +			Type: TextNode,
      +			Data: "3",
      +		},
      +		8: {
      +			Type: ElementNode,
      +			Data: "i",
      +			Attr: []Attribute{
      +				{
      +					Key: "backslash",
      +					Val: `\`,
      +				},
      +			},
      +		},
      +		9: {
      +			Type: TextNode,
      +			Data: "&4",
      +		},
      +		10: {
      +			Type: TextNode,
      +			Data: "5",
      +		},
      +		11: {
      +			Type: ElementNode,
      +			Data: "blockquote",
      +		},
      +		12: {
      +			Type: ElementNode,
      +			Data: "br",
      +		},
      +		13: {
      +			Type: TextNode,
      +			Data: "6",
      +		},
      +	}
      +
      +	// Build a tree out of those nodes, based on a textual representation.
      +	// Only the ".\t"s are significant. The trailing HTML-like text is
      +	// just commentary. The "0:" prefixes are for easy cross-reference with
      +	// the nodes array.
      +	treeAsText := [...]string{
      +		0: `<html>`,
      +		1: `.	<head>`,
      +		2: `.	<body>`,
      +		3: `.	.	"0&lt;1"`,
      +		4: `.	.	<p id="A" foo="abc&#34;def">`,
      +		5: `.	.	.	"2"`,
      +		6: `.	.	.	<b empty="">`,
      +		7: `.	.	.	.	"3"`,
      +		8: `.	.	.	<i backslash="\">`,
      +		9: `.	.	.	.	"&amp;4"`,
      +		10: `.	.	"5"`,
      +		11: `.	.	<blockquote>`,
      +		12: `.	.	<br>`,
      +		13: `.	.	"6"`,
      +	}
      +	if len(nodes) != len(treeAsText) {
      +		t.Fatal("len(nodes) != len(treeAsText)")
      +	}
      +	var stack [8]*Node
      +	for i, line := range treeAsText {
      +		level := 0
      +		for line[0] == '.' {
      +			// Strip a leading ".\t".
      +			line = line[2:]
      +			level++
      +		}
      +		n := nodes[i]
      +		if level == 0 {
      +			if stack[0] != nil {
      +				t.Fatal("multiple root nodes")
      +			}
      +			stack[0] = n
      +		} else {
      +			stack[level-1].AppendChild(n)
      +			stack[level] = n
      +			for i := level + 1; i < len(stack); i++ {
      +				stack[i] = nil
      +			}
      +		}
      +		// At each stage of tree construction, we check all nodes for consistency.
      +		for j, m := range nodes {
      +			if err := checkNodeConsistency(m); err != nil {
      +				t.Fatalf("i=%d, j=%d: %v", i, j, err)
      +			}
      +		}
      +	}
      +
      +	want := `<html><head></head><body>0&lt;1<p id="A" foo="abc&#34;def">` +
      +		`2<b empty="">3</b><i backslash="\">&amp;4</i></p>` +
      +		`5<blockquote></blockquote><br/>6</body></html>`
      +	b := new(bytes.Buffer)
      +	if err := Render(b, nodes[0]); err != nil {
      +		t.Fatal(err)
      +	}
      +	if got := b.String(); got != want {
      +		t.Errorf("got vs want:\n%s\n%s\n", got, want)
      +	}
      +}
      diff --git a/vendor/golang.org/x/net/html/testdata/go1.html b/vendor/golang.org/x/net/html/testdata/go1.html
      new file mode 100644
      index 00000000..d238257c
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/go1.html
      @@ -0,0 +1,2237 @@
      +<!DOCTYPE html>
      +<html>
      +<head>
      +<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
      +
      +  <title>Go 1 Release Notes - The Go Programming Language</title>
      +
      +<link type="text/css" rel="stylesheet" href="/doc/style.css">
      +<script type="text/javascript" src="/doc/godocs.js"></script>
      +
      +<link rel="search" type="application/opensearchdescription+xml" title="godoc" href="/opensearch.xml" />
      +
      +<script type="text/javascript">
      +var _gaq = _gaq || [];
      +_gaq.push(["_setAccount", "UA-11222381-2"]);
      +_gaq.push(["_trackPageview"]);
      +</script>
      +</head>
      +<body>
      +
      +<div id="topbar"><div class="container wide">
      +
      +<form method="GET" action="/search">
      +<div id="menu">
      +<a href="/doc/">Documents</a>
      +<a href="/ref/">References</a>
      +<a href="/pkg/">Packages</a>
      +<a href="/project/">The Project</a>
      +<a href="/help/">Help</a>
      +<input type="text" id="search" name="q" class="inactive" value="Search">
      +</div>
      +<div id="heading"><a href="/">The Go Programming Language</a></div>
      +</form>
      +
      +</div></div>
      +
      +<div id="page" class="wide">
      +
      +
      +  <div id="plusone"><g:plusone size="small" annotation="none"></g:plusone></div>
      +  <h1>Go 1 Release Notes</h1>
      +
      +
      +
      +
      +<div id="nav"></div>
      +
      +
      +
      +
      +<h2 id="introduction">Introduction to Go 1</h2>
      +
      +<p>
      +Go version 1, Go 1 for short, defines a language and a set of core libraries
      +that provide a stable foundation for creating reliable products, projects, and
      +publications.
      +</p>
      +
      +<p>
      +The driving motivation for Go 1 is stability for its users. People should be able to
      +write Go programs and expect that they will continue to compile and run without
      +change, on a time scale of years, including in production environments such as
      +Google App Engine. Similarly, people should be able to write books about Go, be
      +able to say which version of Go the book is describing, and have that version
      +number still be meaningful much later.
      +</p>
      +
      +<p>
      +Code that compiles in Go 1 should, with few exceptions, continue to compile and
      +run throughout the lifetime of that version, even as we issue updates and bug
      +fixes such as Go version 1.1, 1.2, and so on. Other than critical fixes, changes
      +made to the language and library for subsequent releases of Go 1 may
      +add functionality but will not break existing Go 1 programs.
      +<a href="go1compat.html">The Go 1 compatibility document</a>
      +explains the compatibility guidelines in more detail.
      +</p>
      +
      +<p>
      +Go 1 is a representation of Go as it used today, not a wholesale rethinking of
      +the language. We avoided designing new features and instead focused on cleaning
      +up problems and inconsistencies and improving portability. There are a number
      +changes to the Go language and packages that we had considered for some time and
      +prototyped but not released primarily because they are significant and
      +backwards-incompatible. Go 1 was an opportunity to get them out, which is
      +helpful for the long term, but also means that Go 1 introduces incompatibilities
      +for old programs. Fortunately, the <code>go</code> <code>fix</code> tool can
      +automate much of the work needed to bring programs up to the Go 1 standard.
      +</p>
      +
      +<p>
      +This document outlines the major changes in Go 1 that will affect programmers
      +updating existing code; its reference point is the prior release, r60 (tagged as
      +r60.3). It also explains how to update code from r60 to run under Go 1.
      +</p>
      +
      +<h2 id="language">Changes to the language</h2>
      +
      +<h3 id="append">Append</h3>
      +
      +<p>
      +The <code>append</code> predeclared variadic function makes it easy to grow a slice
      +by adding elements to the end.
      +A common use is to add bytes to the end of a byte slice when generating output.
      +However, <code>append</code> did not provide a way to append a string to a <code>[]byte</code>,
      +which is another common case.
      +</p>
      +
      +<pre><!--{{code "/doc/progs/go1.go" `/greeting := ..byte/` `/append.*hello/`}}
      +-->    greeting := []byte{}
      +    greeting = append(greeting, []byte(&#34;hello &#34;)...)</pre>
      +
      +<p>
      +By analogy with the similar property of <code>copy</code>, Go 1
      +permits a string to be appended (byte-wise) directly to a byte
      +slice, reducing the friction between strings and byte slices.
      +The conversion is no longer necessary:
      +</p>
      +
      +<pre><!--{{code "/doc/progs/go1.go" `/append.*world/`}}
      +-->    greeting = append(greeting, &#34;world&#34;...)</pre>
      +
      +<p>
      +<em>Updating</em>:
      +This is a new feature, so existing code needs no changes.
      +</p>
      +
      +<h3 id="close">Close</h3>
      +
      +<p>
      +The <code>close</code> predeclared function provides a mechanism
      +for a sender to signal that no more values will be sent.
      +It is important to the implementation of <code>for</code> <code>range</code>
      +loops over channels and is helpful in other situations.
      +Partly by design and partly because of race conditions that can occur otherwise,
      +it is intended for use only by the goroutine sending on the channel,
      +not by the goroutine receiving data.
      +However, before Go 1 there was no compile-time checking that <code>close</code>
      +was being used correctly.
      +</p>
      +
      +<p>
      +To close this gap, at least in part, Go 1 disallows <code>close</code> on receive-only channels.
      +Attempting to close such a channel is a compile-time error.
      +</p>
      +
      +<pre>
      +    var c chan int
      +    var csend chan&lt;- int = c
      +    var crecv &lt;-chan int = c
      +    close(c)     // legal
      +    close(csend) // legal
      +    close(crecv) // illegal
      +</pre>
      +
      +<p>
      +<em>Updating</em>:
      +Existing code that attempts to close a receive-only channel was
      +erroneous even before Go 1 and should be fixed.  The compiler will
      +now reject such code.
      +</p>
      +
      +<h3 id="literals">Composite literals</h3>
      +
      +<p>
      +In Go 1, a composite literal of array, slice, or map type can elide the
      +type specification for the elements' initializers if they are of pointer type.
      +All four of the initializations in this example are legal; the last one was illegal before Go 1.
      +</p>
      +
      +<pre><!--{{code "/doc/progs/go1.go" `/type Date struct/` `/STOP/`}}
      +-->    type Date struct {
      +        month string
      +        day   int
      +    }
      +    <span class="comment">// Struct values, fully qualified; always legal.</span>
      +    holiday1 := []Date{
      +        Date{&#34;Feb&#34;, 14},
      +        Date{&#34;Nov&#34;, 11},
      +        Date{&#34;Dec&#34;, 25},
      +    }
      +    <span class="comment">// Struct values, type name elided; always legal.</span>
      +    holiday2 := []Date{
      +        {&#34;Feb&#34;, 14},
      +        {&#34;Nov&#34;, 11},
      +        {&#34;Dec&#34;, 25},
      +    }
      +    <span class="comment">// Pointers, fully qualified, always legal.</span>
      +    holiday3 := []*Date{
      +        &amp;Date{&#34;Feb&#34;, 14},
      +        &amp;Date{&#34;Nov&#34;, 11},
      +        &amp;Date{&#34;Dec&#34;, 25},
      +    }
      +    <span class="comment">// Pointers, type name elided; legal in Go 1.</span>
      +    holiday4 := []*Date{
      +        {&#34;Feb&#34;, 14},
      +        {&#34;Nov&#34;, 11},
      +        {&#34;Dec&#34;, 25},
      +    }</pre>
      +
      +<p>
      +<em>Updating</em>:
      +This change has no effect on existing code, but the command
      +<code>gofmt</code> <code>-s</code> applied to existing source
      +will, among other things, elide explicit element types wherever permitted.
      +</p>
      +
      +
      +<h3 id="init">Goroutines during init</h3>
      +
      +<p>
      +The old language defined that <code>go</code> statements executed during initialization created goroutines but that they did not begin to run until initialization of the entire program was complete.
      +This introduced clumsiness in many places and, in effect, limited the utility
      +of the <code>init</code> construct:
      +if it was possible for another package to use the library during initialization, the library
      +was forced to avoid goroutines.
      +This design was done for reasons of simplicity and safety but,
      +as our confidence in the language grew, it seemed unnecessary.
      +Running goroutines during initialization is no more complex or unsafe than running them during normal execution.
      +</p>
      +
      +<p>
      +In Go 1, code that uses goroutines can be called from
      +<code>init</code> routines and global initialization expressions
      +without introducing a deadlock.
      +</p>
      +
      +<pre><!--{{code "/doc/progs/go1.go" `/PackageGlobal/` `/^}/`}}
      +-->var PackageGlobal int
      +
      +func init() {
      +    c := make(chan int)
      +    go initializationFunction(c)
      +    PackageGlobal = &lt;-c
      +}</pre>
      +
      +<p>
      +<em>Updating</em>:
      +This is a new feature, so existing code needs no changes,
      +although it's possible that code that depends on goroutines not starting before <code>main</code> will break.
      +There was no such code in the standard repository.
      +</p>
      +
      +<h3 id="rune">The rune type</h3>
      +
      +<p>
      +The language spec allows the <code>int</code> type to be 32 or 64 bits wide, but current implementations set <code>int</code> to 32 bits even on 64-bit platforms.
      +It would be preferable to have <code>int</code> be 64 bits on 64-bit platforms.
      +(There are important consequences for indexing large slices.)
      +However, this change would waste space when processing Unicode characters with
      +the old language because the <code>int</code> type was also used to hold Unicode code points: each code point would waste an extra 32 bits of storage if <code>int</code> grew from 32 bits to 64.
      +</p>
      +
      +<p>
      +To make changing to 64-bit <code>int</code> feasible,
      +Go 1 introduces a new basic type, <code>rune</code>, to represent
      +individual Unicode code points.
      +It is an alias for <code>int32</code>, analogous to <code>byte</code>
      +as an alias for <code>uint8</code>.
      +</p>
      +
      +<p>
      +Character literals such as <code>'a'</code>, <code>'語'</code>, and <code>'\u0345'</code>
      +now have default type <code>rune</code>,
      +analogous to <code>1.0</code> having default type <code>float64</code>.
      +A variable initialized to a character constant will therefore
      +have type <code>rune</code> unless otherwise specified.
      +</p>
      +
      +<p>
      +Libraries have been updated to use <code>rune</code> rather than <code>int</code>
      +when appropriate. For instance, the functions <code>unicode.ToLower</code> and
      +relatives now take and return a <code>rune</code>.
      +</p>
      +
      +<pre><!--{{code "/doc/progs/go1.go" `/STARTRUNE/` `/ENDRUNE/`}}
      +-->    delta := &#39;δ&#39; <span class="comment">// delta has type rune.</span>
      +    var DELTA rune
      +    DELTA = unicode.ToUpper(delta)
      +    epsilon := unicode.ToLower(DELTA + 1)
      +    if epsilon != &#39;δ&#39;+1 {
      +        log.Fatal(&#34;inconsistent casing for Greek&#34;)
      +    }</pre>
      +
      +<p>
      +<em>Updating</em>:
      +Most source code will be unaffected by this because the type inference from
      +<code>:=</code> initializers introduces the new type silently, and it propagates
      +from there.
      +Some code may get type errors that a trivial conversion will resolve.
      +</p>
      +
      +<h3 id="error">The error type</h3>
      +
      +<p>
      +Go 1 introduces a new built-in type, <code>error</code>, which has the following definition:
      +</p>
      +
      +<pre>
      +    type error interface {
      +        Error() string
      +    }
      +</pre>
      +
      +<p>
      +Since the consequences of this type are all in the package library,
      +it is discussed <a href="#errors">below</a>.
      +</p>
      +
      +<h3 id="delete">Deleting from maps</h3>
      +
      +<p>
      +In the old language, to delete the entry with key <code>k</code> from map <code>m</code>, one wrote the statement,
      +</p>
      +
      +<pre>
      +    m[k] = value, false
      +</pre>
      +
      +<p>
      +This syntax was a peculiar special case, the only two-to-one assignment.
      +It required passing a value (usually ignored) that is evaluated but discarded,
      +plus a boolean that was nearly always the constant <code>false</code>.
      +It did the job but was odd and a point of contention.
      +</p>
      +
      +<p>
      +In Go 1, that syntax has gone; instead there is a new built-in
      +function, <code>delete</code>.  The call
      +</p>
      +
      +<pre><!--{{code "/doc/progs/go1.go" `/delete\(m, k\)/`}}
      +-->    delete(m, k)</pre>
      +
      +<p>
      +will delete the map entry retrieved by the expression <code>m[k]</code>.
      +There is no return value. Deleting a non-existent entry is a no-op.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Running <code>go</code> <code>fix</code> will convert expressions of the form <code>m[k] = value,
      +false</code> into <code>delete(m, k)</code> when it is clear that
      +the ignored value can be safely discarded from the program and
      +<code>false</code> refers to the predefined boolean constant.
      +The fix tool
      +will flag other uses of the syntax for inspection by the programmer.
      +</p>
      +
      +<h3 id="iteration">Iterating in maps</h3>
      +
      +<p>
      +The old language specification did not define the order of iteration for maps,
      +and in practice it differed across hardware platforms.
      +This caused tests that iterated over maps to be fragile and non-portable, with the
      +unpleasant property that a test might always pass on one machine but break on another.
      +</p>
      +
      +<p>
      +In Go 1, the order in which elements are visited when iterating
      +over a map using a <code>for</code> <code>range</code> statement
      +is defined to be unpredictable, even if the same loop is run multiple
      +times with the same map.
      +Code should not assume that the elements are visited in any particular order.
      +</p>
      +
      +<p>
      +This change means that code that depends on iteration order is very likely to break early and be fixed long before it becomes a problem.
      +Just as important, it allows the map implementation to ensure better map balancing even when programs are using range loops to select an element from a map.
      +</p>
      +
      +<pre><!--{{code "/doc/progs/go1.go" `/Sunday/` `/^	}/`}}
      +-->    m := map[string]int{&#34;Sunday&#34;: 0, &#34;Monday&#34;: 1}
      +    for name, value := range m {
      +        <span class="comment">// This loop should not assume Sunday will be visited first.</span>
      +        f(name, value)
      +    }</pre>
      +
      +<p>
      +<em>Updating</em>:
      +This is one change where tools cannot help.  Most existing code
      +will be unaffected, but some programs may break or misbehave; we
      +recommend manual checking of all range statements over maps to
      +verify they do not depend on iteration order. There were a few such
      +examples in the standard repository; they have been fixed.
      +Note that it was already incorrect to depend on the iteration order, which
      +was unspecified. This change codifies the unpredictability.
      +</p>
      +
      +<h3 id="multiple_assignment">Multiple assignment</h3>
      +
      +<p>
      +The language specification has long guaranteed that in assignments
      +the right-hand-side expressions are all evaluated before any left-hand-side expressions are assigned.
      +To guarantee predictable behavior,
      +Go 1 refines the specification further.
      +</p>
      +
      +<p>
      +If the left-hand side of the assignment
      +statement contains expressions that require evaluation, such as
      +function calls or array indexing operations, these will all be done
      +using the usual left-to-right rule before any variables are assigned
      +their value.  Once everything is evaluated, the actual assignments
      +proceed in left-to-right order.
      +</p>
      +
      +<p>
      +These examples illustrate the behavior.
      +</p>
      +
      +<pre><!--{{code "/doc/progs/go1.go" `/sa :=/` `/then sc.0. = 2/`}}
      +-->    sa := []int{1, 2, 3}
      +    i := 0
      +    i, sa[i] = 1, 2 <span class="comment">// sets i = 1, sa[0] = 2</span>
      +
      +    sb := []int{1, 2, 3}
      +    j := 0
      +    sb[j], j = 2, 1 <span class="comment">// sets sb[0] = 2, j = 1</span>
      +
      +    sc := []int{1, 2, 3}
      +    sc[0], sc[0] = 1, 2 <span class="comment">// sets sc[0] = 1, then sc[0] = 2 (so sc[0] = 2 at end)</span></pre>
      +
      +<p>
      +<em>Updating</em>:
      +This is one change where tools cannot help, but breakage is unlikely.
      +No code in the standard repository was broken by this change, and code
      +that depended on the previous unspecified behavior was already incorrect.
      +</p>
      +
      +<h3 id="shadowing">Returns and shadowed variables</h3>
      +
      +<p>
      +A common mistake is to use <code>return</code> (without arguments) after an assignment to a variable that has the same name as a result variable but is not the same variable.
      +This situation is called <em>shadowing</em>: the result variable has been shadowed by another variable with the same name declared in an inner scope.
      +</p>
      +
      +<p>
      +In functions with named return values,
      +the Go 1 compilers disallow return statements without arguments if any of the named return values is shadowed at the point of the return statement.
      +(It isn't part of the specification, because this is one area we are still exploring;
      +the situation is analogous to the compilers rejecting functions that do not end with an explicit return statement.)
      +</p>
      +
      +<p>
      +This function implicitly returns a shadowed return value and will be rejected by the compiler:
      +</p>
      +
      +<pre>
      +    func Bug() (i, j, k int) {
      +        for i = 0; i &lt; 5; i++ {
      +            for j := 0; j &lt; 5; j++ { // Redeclares j.
      +                k += i*j
      +                if k > 100 {
      +                    return // Rejected: j is shadowed here.
      +                }
      +            }
      +        }
      +        return // OK: j is not shadowed here.
      +    }
      +</pre>
      +
      +<p>
      +<em>Updating</em>:
      +Code that shadows return values in this way will be rejected by the compiler and will need to be fixed by hand.
      +The few cases that arose in the standard repository were mostly bugs.
      +</p>
      +
      +<h3 id="unexported">Copying structs with unexported fields</h3>
      +
      +<p>
      +The old language did not allow a package to make a copy of a struct value containing unexported fields belonging to a different package.
      +There was, however, a required exception for a method receiver;
      +also, the implementations of <code>copy</code> and <code>append</code> have never honored the restriction.
      +</p>
      +
      +<p>
      +Go 1 will allow packages to copy struct values containing unexported fields from other packages.
      +Besides resolving the inconsistency,
      +this change admits a new kind of API: a package can return an opaque value without resorting to a pointer or interface.
      +The new implementations of <code>time.Time</code> and
      +<code>reflect.Value</code> are examples of types taking advantage of this new property.
      +</p>
      +
      +<p>
      +As an example, if package <code>p</code> includes the definitions,
      +</p>
      +
      +<pre>
      +    type Struct struct {
      +        Public int
      +        secret int
      +    }
      +    func NewStruct(a int) Struct {  // Note: not a pointer.
      +        return Struct{a, f(a)}
      +    }
      +    func (s Struct) String() string {
      +        return fmt.Sprintf("{%d (secret %d)}", s.Public, s.secret)
      +    }
      +</pre>
      +
      +<p>
      +a package that imports <code>p</code> can assign and copy values of type
      +<code>p.Struct</code> at will.
      +Behind the scenes the unexported fields will be assigned and copied just
      +as if they were exported,
      +but the client code will never be aware of them. The code
      +</p>
      +
      +<pre>
      +    import "p"
      +
      +    myStruct := p.NewStruct(23)
      +    copyOfMyStruct := myStruct
      +    fmt.Println(myStruct, copyOfMyStruct)
      +</pre>
      +
      +<p>
      +will show that the secret field of the struct has been copied to the new value.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +This is a new feature, so existing code needs no changes.
      +</p>
      +
      +<h3 id="equality">Equality</h3>
      +
      +<p>
      +Before Go 1, the language did not define equality on struct and array values.
      +This meant,
      +among other things, that structs and arrays could not be used as map keys.
      +On the other hand, Go did define equality on function and map values.
      +Function equality was problematic in the presence of closures
      +(when are two closures equal?)
      +while map equality compared pointers, not the maps' content, which was usually
      +not what the user would want.
      +</p>
      +
      +<p>
      +Go 1 addressed these issues.
      +First, structs and arrays can be compared for equality and inequality
      +(<code>==</code> and <code>!=</code>),
      +and therefore be used as map keys,
      +provided they are composed from elements for which equality is also defined,
      +using element-wise comparison.
      +</p>
      +
      +<pre><!--{{code "/doc/progs/go1.go" `/type Day struct/` `/Printf/`}}
      +-->    type Day struct {
      +        long  string
      +        short string
      +    }
      +    Christmas := Day{&#34;Christmas&#34;, &#34;XMas&#34;}
      +    Thanksgiving := Day{&#34;Thanksgiving&#34;, &#34;Turkey&#34;}
      +    holiday := map[Day]bool{
      +        Christmas:    true,
      +        Thanksgiving: true,
      +    }
      +    fmt.Printf(&#34;Christmas is a holiday: %t\n&#34;, holiday[Christmas])</pre>
      +
      +<p>
      +Second, Go 1 removes the definition of equality for function values,
      +except for comparison with <code>nil</code>.
      +Finally, map equality is gone too, also except for comparison with <code>nil</code>.
      +</p>
      +
      +<p>
      +Note that equality is still undefined for slices, for which the
      +calculation is in general infeasible.  Also note that the ordered
      +comparison operators (<code>&lt;</code> <code>&lt;=</code>
      +<code>&gt;</code> <code>&gt;=</code>) are still undefined for
      +structs and arrays.
      +
      +<p>
      +<em>Updating</em>:
      +Struct and array equality is a new feature, so existing code needs no changes.
      +Existing code that depends on function or map equality will be
      +rejected by the compiler and will need to be fixed by hand.
      +Few programs will be affected, but the fix may require some
      +redesign.
      +</p>
      +
      +<h2 id="packages">The package hierarchy</h2>
      +
      +<p>
      +Go 1 addresses many deficiencies in the old standard library and
      +cleans up a number of packages, making them more internally consistent
      +and portable.
      +</p>
      +
      +<p>
      +This section describes how the packages have been rearranged in Go 1.
      +Some have moved, some have been renamed, some have been deleted.
      +New packages are described in later sections.
      +</p>
      +
      +<h3 id="hierarchy">The package hierarchy</h3>
      +
      +<p>
      +Go 1 has a rearranged package hierarchy that groups related items
      +into subdirectories. For instance, <code>utf8</code> and
      +<code>utf16</code> now occupy subdirectories of <code>unicode</code>.
      +Also, <a href="#subrepo">some packages</a> have moved into
      +subrepositories of
      +<a href="http://code.google.com/p/go"><code>code.google.com/p/go</code></a>
      +while <a href="#deleted">others</a> have been deleted outright.
      +</p>
      +
      +<table class="codetable" frame="border" summary="Moved packages">
      +<colgroup align="left" width="60%"></colgroup>
      +<colgroup align="left" width="40%"></colgroup>
      +<tr>
      +<th align="left">Old path</th>
      +<th align="left">New path</th>
      +</tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>asn1</td> <td>encoding/asn1</td></tr>
      +<tr><td>csv</td> <td>encoding/csv</td></tr>
      +<tr><td>gob</td> <td>encoding/gob</td></tr>
      +<tr><td>json</td> <td>encoding/json</td></tr>
      +<tr><td>xml</td> <td>encoding/xml</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>exp/template/html</td> <td>html/template</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>big</td> <td>math/big</td></tr>
      +<tr><td>cmath</td> <td>math/cmplx</td></tr>
      +<tr><td>rand</td> <td>math/rand</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>http</td> <td>net/http</td></tr>
      +<tr><td>http/cgi</td> <td>net/http/cgi</td></tr>
      +<tr><td>http/fcgi</td> <td>net/http/fcgi</td></tr>
      +<tr><td>http/httptest</td> <td>net/http/httptest</td></tr>
      +<tr><td>http/pprof</td> <td>net/http/pprof</td></tr>
      +<tr><td>mail</td> <td>net/mail</td></tr>
      +<tr><td>rpc</td> <td>net/rpc</td></tr>
      +<tr><td>rpc/jsonrpc</td> <td>net/rpc/jsonrpc</td></tr>
      +<tr><td>smtp</td> <td>net/smtp</td></tr>
      +<tr><td>url</td> <td>net/url</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>exec</td> <td>os/exec</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>scanner</td> <td>text/scanner</td></tr>
      +<tr><td>tabwriter</td> <td>text/tabwriter</td></tr>
      +<tr><td>template</td> <td>text/template</td></tr>
      +<tr><td>template/parse</td> <td>text/template/parse</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>utf8</td> <td>unicode/utf8</td></tr>
      +<tr><td>utf16</td> <td>unicode/utf16</td></tr>
      +</table>
      +
      +<p>
      +Note that the package names for the old <code>cmath</code> and
      +<code>exp/template/html</code> packages have changed to <code>cmplx</code>
      +and <code>template</code>.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Running <code>go</code> <code>fix</code> will update all imports and package renames for packages that
      +remain inside the standard repository.  Programs that import packages
      +that are no longer in the standard repository will need to be edited
      +by hand.
      +</p>
      +
      +<h3 id="exp">The package tree exp</h3>
      +
      +<p>
      +Because they are not standardized, the packages under the <code>exp</code> directory will not be available in the
      +standard Go 1 release distributions, although they will be available in source code form
      +in <a href="http://code.google.com/p/go/">the repository</a> for
      +developers who wish to use them.
      +</p>
      +
      +<p>
      +Several packages have moved under <code>exp</code> at the time of Go 1's release:
      +</p>
      +
      +<ul>
      +<li><code>ebnf</code></li>
      +<li><code>html</code><sup>&#8224;</sup></li>
      +<li><code>go/types</code></li>
      +</ul>
      +
      +<p>
      +(<sup>&#8224;</sup>The <code>EscapeString</code> and <code>UnescapeString</code> types remain
      +in package <code>html</code>.)
      +</p>
      +
      +<p>
      +All these packages are available under the same names, with the prefix <code>exp/</code>: <code>exp/ebnf</code> etc.
      +</p>
      +
      +<p>
      +Also, the <code>utf8.String</code> type has been moved to its own package, <code>exp/utf8string</code>.
      +</p>
      +
      +<p>
      +Finally, the <code>gotype</code> command now resides in <code>exp/gotype</code>, while
      +<code>ebnflint</code> is now in <code>exp/ebnflint</code>.
      +If they are installed, they now reside in <code>$GOROOT/bin/tool</code>.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Code that uses packages in <code>exp</code> will need to be updated by hand,
      +or else compiled from an installation that has <code>exp</code> available.
      +The <code>go</code> <code>fix</code> tool or the compiler will complain about such uses.
      +</p>
      +
      +<h3 id="old">The package tree old</h3>
      +
      +<p>
      +Because they are deprecated, the packages under the <code>old</code> directory will not be available in the
      +standard Go 1 release distributions, although they will be available in source code form for
      +developers who wish to use them.
      +</p>
      +
      +<p>
      +The packages in their new locations are:
      +</p>
      +
      +<ul>
      +<li><code>old/netchan</code></li>
      +<li><code>old/regexp</code></li>
      +<li><code>old/template</code></li>
      +</ul>
      +
      +<p>
      +<em>Updating</em>:
      +Code that uses packages now in <code>old</code> will need to be updated by hand,
      +or else compiled from an installation that has <code>old</code> available.
      +The <code>go</code> <code>fix</code> tool will warn about such uses.
      +</p>
      +
      +<h3 id="deleted">Deleted packages</h3>
      +
      +<p>
      +Go 1 deletes several packages outright:
      +</p>
      +
      +<ul>
      +<li><code>container/vector</code></li>
      +<li><code>exp/datafmt</code></li>
      +<li><code>go/typechecker</code></li>
      +<li><code>try</code></li>
      +</ul>
      +
      +<p>
      +and also the command <code>gotry</code>.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Code that uses <code>container/vector</code> should be updated to use
      +slices directly.  See
      +<a href="http://code.google.com/p/go-wiki/wiki/SliceTricks">the Go
      +Language Community Wiki</a> for some suggestions.
      +Code that uses the other packages (there should be almost zero) will need to be rethought.
      +</p>
      +
      +<h3 id="subrepo">Packages moving to subrepositories</h3>
      +
      +<p>
      +Go 1 has moved a number of packages into other repositories, usually sub-repositories of
      +<a href="http://code.google.com/p/go/">the main Go repository</a>.
      +This table lists the old and new import paths:
      +
      +<table class="codetable" frame="border" summary="Sub-repositories">
      +<colgroup align="left" width="40%"></colgroup>
      +<colgroup align="left" width="60%"></colgroup>
      +<tr>
      +<th align="left">Old</th>
      +<th align="left">New</th>
      +</tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>crypto/bcrypt</td> <td>code.google.com/p/go.crypto/bcrypt</tr>
      +<tr><td>crypto/blowfish</td> <td>code.google.com/p/go.crypto/blowfish</tr>
      +<tr><td>crypto/cast5</td> <td>code.google.com/p/go.crypto/cast5</tr>
      +<tr><td>crypto/md4</td> <td>code.google.com/p/go.crypto/md4</tr>
      +<tr><td>crypto/ocsp</td> <td>code.google.com/p/go.crypto/ocsp</tr>
      +<tr><td>crypto/openpgp</td> <td>code.google.com/p/go.crypto/openpgp</tr>
      +<tr><td>crypto/openpgp/armor</td> <td>code.google.com/p/go.crypto/openpgp/armor</tr>
      +<tr><td>crypto/openpgp/elgamal</td> <td>code.google.com/p/go.crypto/openpgp/elgamal</tr>
      +<tr><td>crypto/openpgp/errors</td> <td>code.google.com/p/go.crypto/openpgp/errors</tr>
      +<tr><td>crypto/openpgp/packet</td> <td>code.google.com/p/go.crypto/openpgp/packet</tr>
      +<tr><td>crypto/openpgp/s2k</td> <td>code.google.com/p/go.crypto/openpgp/s2k</tr>
      +<tr><td>crypto/ripemd160</td> <td>code.google.com/p/go.crypto/ripemd160</tr>
      +<tr><td>crypto/twofish</td> <td>code.google.com/p/go.crypto/twofish</tr>
      +<tr><td>crypto/xtea</td> <td>code.google.com/p/go.crypto/xtea</tr>
      +<tr><td>exp/ssh</td> <td>code.google.com/p/go.crypto/ssh</tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>image/bmp</td> <td>code.google.com/p/go.image/bmp</tr>
      +<tr><td>image/tiff</td> <td>code.google.com/p/go.image/tiff</tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>net/dict</td> <td>code.google.com/p/go.net/dict</tr>
      +<tr><td>net/websocket</td> <td>code.google.com/p/go.net/websocket</tr>
      +<tr><td>exp/spdy</td> <td>code.google.com/p/go.net/spdy</tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>encoding/git85</td> <td>code.google.com/p/go.codereview/git85</tr>
      +<tr><td>patch</td> <td>code.google.com/p/go.codereview/patch</tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>exp/wingui</td> <td>code.google.com/p/gowingui</tr>
      +</table>
      +
      +<p>
      +<em>Updating</em>:
      +Running <code>go</code> <code>fix</code> will update imports of these packages to use the new import paths.
      +Installations that depend on these packages will need to install them using
      +a <code>go get</code> command.
      +</p>
      +
      +<h2 id="major">Major changes to the library</h2>
      +
      +<p>
      +This section describes significant changes to the core libraries, the ones that
      +affect the most programs.
      +</p>
      +
      +<h3 id="errors">The error type and errors package</h3>
      +
      +<p>
      +The placement of <code>os.Error</code> in package <code>os</code> is mostly historical: errors first came up when implementing package <code>os</code>, and they seemed system-related at the time.
      +Since then it has become clear that errors are more fundamental than the operating system.  For example, it would be nice to use <code>Errors</code> in packages that <code>os</code> depends on, like <code>syscall</code>.
      +Also, having <code>Error</code> in <code>os</code> introduces many dependencies on <code>os</code> that would otherwise not exist.
      +</p>
      +
      +<p>
      +Go 1 solves these problems by introducing a built-in <code>error</code> interface type and a separate <code>errors</code> package (analogous to <code>bytes</code> and <code>strings</code>) that contains utility functions.
      +It replaces <code>os.NewError</code> with
      +<a href="/pkg/errors/#New"><code>errors.New</code></a>,
      +giving errors a more central place in the environment.
      +</p>
      +
      +<p>
      +So the widely-used <code>String</code> method does not cause accidental satisfaction
      +of the <code>error</code> interface, the <code>error</code> interface uses instead
      +the name <code>Error</code> for that method:
      +</p>
      +
      +<pre>
      +    type error interface {
      +        Error() string
      +    }
      +</pre>
      +
      +<p>
      +The <code>fmt</code> library automatically invokes <code>Error</code>, as it already
      +does for <code>String</code>, for easy printing of error values.
      +</p>
      +
      +<pre><!--{{code "/doc/progs/go1.go" `/START ERROR EXAMPLE/` `/END ERROR EXAMPLE/`}}
      +-->type SyntaxError struct {
      +    File    string
      +    Line    int
      +    Message string
      +}
      +
      +func (se *SyntaxError) Error() string {
      +    return fmt.Sprintf(&#34;%s:%d: %s&#34;, se.File, se.Line, se.Message)
      +}</pre>
      +
      +<p>
      +All standard packages have been updated to use the new interface; the old <code>os.Error</code> is gone.
      +</p>
      +
      +<p>
      +A new package, <a href="/pkg/errors/"><code>errors</code></a>, contains the function
      +</p>
      +
      +<pre>
      +func New(text string) error
      +</pre>
      +
      +<p>
      +to turn a string into an error. It replaces the old <code>os.NewError</code>.
      +</p>
      +
      +<pre><!--{{code "/doc/progs/go1.go" `/ErrSyntax/`}}
      +-->    var ErrSyntax = errors.New(&#34;syntax error&#34;)</pre>
      +		
      +<p>
      +<em>Updating</em>:
      +Running <code>go</code> <code>fix</code> will update almost all code affected by the change.
      +Code that defines error types with a <code>String</code> method will need to be updated
      +by hand to rename the methods to <code>Error</code>.
      +</p>
      +
      +<h3 id="errno">System call errors</h3>
      +
      +<p>
      +The old <code>syscall</code> package, which predated <code>os.Error</code>
      +(and just about everything else),
      +returned errors as <code>int</code> values.
      +In turn, the <code>os</code> package forwarded many of these errors, such
      +as <code>EINVAL</code>, but using a different set of errors on each platform.
      +This behavior was unpleasant and unportable.
      +</p>
      +
      +<p>
      +In Go 1, the
      +<a href="/pkg/syscall/"><code>syscall</code></a>
      +package instead returns an <code>error</code> for system call errors.
      +On Unix, the implementation is done by a
      +<a href="/pkg/syscall/#Errno"><code>syscall.Errno</code></a> type
      +that satisfies <code>error</code> and replaces the old <code>os.Errno</code>.
      +</p>
      +
      +<p>
      +The changes affecting <code>os.EINVAL</code> and relatives are
      +described <a href="#os">elsewhere</a>.
      +
      +<p>
      +<em>Updating</em>:
      +Running <code>go</code> <code>fix</code> will update almost all code affected by the change.
      +Regardless, most code should use the <code>os</code> package
      +rather than <code>syscall</code> and so will be unaffected.
      +</p>
      +
      +<h3 id="time">Time</h3>
      +
      +<p>
      +Time is always a challenge to support well in a programming language.
      +The old Go <code>time</code> package had <code>int64</code> units, no
      +real type safety,
      +and no distinction between absolute times and durations.
      +</p>
      +
      +<p>
      +One of the most sweeping changes in the Go 1 library is therefore a
      +complete redesign of the
      +<a href="/pkg/time/"><code>time</code></a> package.
      +Instead of an integer number of nanoseconds as an <code>int64</code>,
      +and a separate <code>*time.Time</code> type to deal with human
      +units such as hours and years,
      +there are now two fundamental types:
      +<a href="/pkg/time/#Time"><code>time.Time</code></a>
      +(a value, so the <code>*</code> is gone), which represents a moment in time;
      +and <a href="/pkg/time/#Duration"><code>time.Duration</code></a>,
      +which represents an interval.
      +Both have nanosecond resolution.
      +A <code>Time</code> can represent any time into the ancient
      +past and remote future, while a <code>Duration</code> can
      +span plus or minus only about 290 years.
      +There are methods on these types, plus a number of helpful
      +predefined constant durations such as <code>time.Second</code>.
      +</p>
      +
      +<p>
      +Among the new methods are things like
      +<a href="/pkg/time/#Time.Add"><code>Time.Add</code></a>,
      +which adds a <code>Duration</code> to a <code>Time</code>, and
      +<a href="/pkg/time/#Time.Sub"><code>Time.Sub</code></a>,
      +which subtracts two <code>Times</code> to yield a <code>Duration</code>.
      +</p>
      +
      +<p>
      +The most important semantic change is that the Unix epoch (Jan 1, 1970) is now
      +relevant only for those functions and methods that mention Unix:
      +<a href="/pkg/time/#Unix"><code>time.Unix</code></a>
      +and the <a href="/pkg/time/#Time.Unix"><code>Unix</code></a>
      +and <a href="/pkg/time/#Time.UnixNano"><code>UnixNano</code></a> methods
      +of the <code>Time</code> type.
      +In particular,
      +<a href="/pkg/time/#Now"><code>time.Now</code></a>
      +returns a <code>time.Time</code> value rather than, in the old
      +API, an integer nanosecond count since the Unix epoch.
      +</p>
      +
      +<pre><!--{{code "/doc/progs/go1.go" `/sleepUntil/` `/^}/`}}
      +--><span class="comment">// sleepUntil sleeps until the specified time. It returns immediately if it&#39;s too late.</span>
      +func sleepUntil(wakeup time.Time) {
      +    now := time.Now() <span class="comment">// A Time.</span>
      +    if !wakeup.After(now) {
      +        return
      +    }
      +    delta := wakeup.Sub(now) <span class="comment">// A Duration.</span>
      +    fmt.Printf(&#34;Sleeping for %.3fs\n&#34;, delta.Seconds())
      +    time.Sleep(delta)
      +}</pre>
      +
      +<p>
      +The new types, methods, and constants have been propagated through
      +all the standard packages that use time, such as <code>os</code> and
      +its representation of file time stamps.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +The <code>go</code> <code>fix</code> tool will update many uses of the old <code>time</code> package to use the new
      +types and methods, although it does not replace values such as <code>1e9</code>
      +representing nanoseconds per second.
      +Also, because of type changes in some of the values that arise,
      +some of the expressions rewritten by the fix tool may require
      +further hand editing; in such cases the rewrite will include
      +the correct function or method for the old functionality, but
      +may have the wrong type or require further analysis.
      +</p>
      +
      +<h2 id="minor">Minor changes to the library</h2>
      +
      +<p>
      +This section describes smaller changes, such as those to less commonly
      +used packages or that affect
      +few programs beyond the need to run <code>go</code> <code>fix</code>.
      +This category includes packages that are new in Go 1.
      +Collectively they improve portability, regularize behavior, and
      +make the interfaces more modern and Go-like.
      +</p>
      +
      +<h3 id="archive_zip">The archive/zip package</h3>
      +
      +<p>
      +In Go 1, <a href="/pkg/archive/zip/#Writer"><code>*zip.Writer</code></a> no
      +longer has a <code>Write</code> method. Its presence was a mistake.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +What little code is affected will be caught by the compiler and must be updated by hand.
      +</p>
      +
      +<h3 id="bufio">The bufio package</h3>
      +
      +<p>
      +In Go 1, <a href="/pkg/bufio/#NewReaderSize"><code>bufio.NewReaderSize</code></a>
      +and
      +<a href="/pkg/bufio/#NewWriterSize"><code>bufio.NewWriterSize</code></a>
      +functions no longer return an error for invalid sizes.
      +If the argument size is too small or invalid, it is adjusted.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Running <code>go</code> <code>fix</code> will update calls that assign the error to _.
      +Calls that aren't fixed will be caught by the compiler and must be updated by hand.
      +</p>
      +
      +<h3 id="compress">The compress/flate, compress/gzip and compress/zlib packages</h3>
      +
      +<p>
      +In Go 1, the <code>NewWriterXxx</code> functions in
      +<a href="/pkg/compress/flate"><code>compress/flate</code></a>,
      +<a href="/pkg/compress/gzip"><code>compress/gzip</code></a> and
      +<a href="/pkg/compress/zlib"><code>compress/zlib</code></a>
      +all return <code>(*Writer, error)</code> if they take a compression level,
      +and <code>*Writer</code> otherwise. Package <code>gzip</code>'s
      +<code>Compressor</code> and <code>Decompressor</code> types have been renamed
      +to <code>Writer</code> and <code>Reader</code>. Package <code>flate</code>'s
      +<code>WrongValueError</code> type has been removed.
      +</p>
      +
      +<p>
      +<em>Updating</em>
      +Running <code>go</code> <code>fix</code> will update old names and calls that assign the error to _.
      +Calls that aren't fixed will be caught by the compiler and must be updated by hand.
      +</p>
      +
      +<h3 id="crypto_aes_des">The crypto/aes and crypto/des packages</h3>
      +
      +<p>
      +In Go 1, the <code>Reset</code> method has been removed. Go does not guarantee
      +that memory is not copied and therefore this method was misleading.
      +</p>
      +
      +<p>
      +The cipher-specific types <code>*aes.Cipher</code>, <code>*des.Cipher</code>,
      +and <code>*des.TripleDESCipher</code> have been removed in favor of
      +<code>cipher.Block</code>.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Remove the calls to Reset. Replace uses of the specific cipher types with
      +cipher.Block.
      +</p>
      +
      +<h3 id="crypto_elliptic">The crypto/elliptic package</h3>
      +
      +<p>
      +In Go 1, <a href="/pkg/crypto/elliptic/#Curve"><code>elliptic.Curve</code></a>
      +has been made an interface to permit alternative implementations. The curve
      +parameters have been moved to the
      +<a href="/pkg/crypto/elliptic/#CurveParams"><code>elliptic.CurveParams</code></a>
      +structure.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Existing users of <code>*elliptic.Curve</code> will need to change to
      +simply <code>elliptic.Curve</code>. Calls to <code>Marshal</code>,
      +<code>Unmarshal</code> and <code>GenerateKey</code> are now functions
      +in <code>crypto/elliptic</code> that take an <code>elliptic.Curve</code>
      +as their first argument.
      +</p>
      +
      +<h3 id="crypto_hmac">The crypto/hmac package</h3>
      +
      +<p>
      +In Go 1, the hash-specific functions, such as <code>hmac.NewMD5</code>, have
      +been removed from <code>crypto/hmac</code>. Instead, <code>hmac.New</code> takes
      +a function that returns a <code>hash.Hash</code>, such as <code>md5.New</code>.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Running <code>go</code> <code>fix</code> will perform the needed changes.
      +</p>
      +
      +<h3 id="crypto_x509">The crypto/x509 package</h3>
      +
      +<p>
      +In Go 1, the
      +<a href="/pkg/crypto/x509/#CreateCertificate"><code>CreateCertificate</code></a>
      +and
      +<a href="/pkg/crypto/x509/#CreateCRL"><code>CreateCRL</code></a>
      +functions in <code>crypto/x509</code> have been altered to take an
      +<code>interface{}</code> where they previously took a <code>*rsa.PublicKey</code>
      +or <code>*rsa.PrivateKey</code>. This will allow other public key algorithms
      +to be implemented in the future.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +No changes will be needed.
      +</p>
      +
      +<h3 id="encoding_binary">The encoding/binary package</h3>
      +
      +<p>
      +In Go 1, the <code>binary.TotalSize</code> function has been replaced by
      +<a href="/pkg/encoding/binary/#Size"><code>Size</code></a>,
      +which takes an <code>interface{}</code> argument rather than
      +a <code>reflect.Value</code>.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +What little code is affected will be caught by the compiler and must be updated by hand.
      +</p>
      +
      +<h3 id="encoding_xml">The encoding/xml package</h3>
      +
      +<p>
      +In Go 1, the <a href="/pkg/encoding/xml/"><code>xml</code></a> package
      +has been brought closer in design to the other marshaling packages such
      +as <a href="/pkg/encoding/gob/"><code>encoding/gob</code></a>.
      +</p>
      +
      +<p>
      +The old <code>Parser</code> type is renamed
      +<a href="/pkg/encoding/xml/#Decoder"><code>Decoder</code></a> and has a new
      +<a href="/pkg/encoding/xml/#Decoder.Decode"><code>Decode</code></a> method. An
      +<a href="/pkg/encoding/xml/#Encoder"><code>Encoder</code></a> type was also introduced.
      +</p>
      +
      +<p>
      +The functions <a href="/pkg/encoding/xml/#Marshal"><code>Marshal</code></a>
      +and <a href="/pkg/encoding/xml/#Unmarshal"><code>Unmarshal</code></a>
      +work with <code>[]byte</code> values now. To work with streams,
      +use the new <a href="/pkg/encoding/xml/#Encoder"><code>Encoder</code></a>
      +and <a href="/pkg/encoding/xml/#Decoder"><code>Decoder</code></a> types.
      +</p>
      +
      +<p>
      +When marshaling or unmarshaling values, the format of supported flags in
      +field tags has changed to be closer to the
      +<a href="/pkg/encoding/json"><code>json</code></a> package
      +(<code>`xml:"name,flag"`</code>). The matching done between field tags, field
      +names, and the XML attribute and element names is now case-sensitive.
      +The <code>XMLName</code> field tag, if present, must also match the name
      +of the XML element being marshaled.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Running <code>go</code> <code>fix</code> will update most uses of the package except for some calls to
      +<code>Unmarshal</code>. Special care must be taken with field tags,
      +since the fix tool will not update them and if not fixed by hand they will
      +misbehave silently in some cases. For example, the old
      +<code>"attr"</code> is now written <code>",attr"</code> while plain
      +<code>"attr"</code> remains valid but with a different meaning.
      +</p>
      +
      +<h3 id="expvar">The expvar package</h3>
      +
      +<p>
      +In Go 1, the <code>RemoveAll</code> function has been removed.
      +The <code>Iter</code> function and Iter method on <code>*Map</code> have
      +been replaced by
      +<a href="/pkg/expvar/#Do"><code>Do</code></a>
      +and
      +<a href="/pkg/expvar/#Map.Do"><code>(*Map).Do</code></a>.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Most code using <code>expvar</code> will not need changing. The rare code that used
      +<code>Iter</code> can be updated to pass a closure to <code>Do</code> to achieve the same effect.
      +</p>
      +
      +<h3 id="flag">The flag package</h3>
      +
      +<p>
      +In Go 1, the interface <a href="/pkg/flag/#Value"><code>flag.Value</code></a> has changed slightly.
      +The <code>Set</code> method now returns an <code>error</code> instead of
      +a <code>bool</code> to indicate success or failure.
      +</p>
      +
      +<p>
      +There is also a new kind of flag, <code>Duration</code>, to support argument
      +values specifying time intervals.
      +Values for such flags must be given units, just as <code>time.Duration</code>
      +formats them: <code>10s</code>, <code>1h30m</code>, etc.
      +</p>
      +
      +<pre><!--{{code "/doc/progs/go1.go" `/timeout/`}}
      +-->var timeout = flag.Duration(&#34;timeout&#34;, 30*time.Second, &#34;how long to wait for completion&#34;)</pre>
      +
      +<p>
      +<em>Updating</em>:
      +Programs that implement their own flags will need minor manual fixes to update their
      +<code>Set</code> methods.
      +The <code>Duration</code> flag is new and affects no existing code.
      +</p>
      +
      +
      +<h3 id="go">The go/* packages</h3>
      +
      +<p>
      +Several packages under <code>go</code> have slightly revised APIs.
      +</p>
      +
      +<p>
      +A concrete <code>Mode</code> type was introduced for configuration mode flags
      +in the packages
      +<a href="/pkg/go/scanner/"><code>go/scanner</code></a>,
      +<a href="/pkg/go/parser/"><code>go/parser</code></a>,
      +<a href="/pkg/go/printer/"><code>go/printer</code></a>, and
      +<a href="/pkg/go/doc/"><code>go/doc</code></a>.
      +</p>
      +
      +<p>
      +The modes <code>AllowIllegalChars</code> and <code>InsertSemis</code> have been removed
      +from the <a href="/pkg/go/scanner/"><code>go/scanner</code></a> package. They were mostly
      +useful for scanning text other then Go source files. Instead, the
      +<a href="/pkg/text/scanner/"><code>text/scanner</code></a> package should be used
      +for that purpose.
      +</p>
      +
      +<p>
      +The <a href="/pkg/go/scanner/#ErrorHandler"><code>ErrorHandler</code></a> provided
      +to the scanner's <a href="/pkg/go/scanner/#Scanner.Init"><code>Init</code></a> method is
      +now simply a function rather than an interface. The <code>ErrorVector</code> type has
      +been removed in favor of the (existing) <a href="/pkg/go/scanner/#ErrorList"><code>ErrorList</code></a>
      +type, and the <code>ErrorVector</code> methods have been migrated. Instead of embedding
      +an <code>ErrorVector</code> in a client of the scanner, now a client should maintain
      +an <code>ErrorList</code>.
      +</p>
      +
      +<p>
      +The set of parse functions provided by the <a href="/pkg/go/parser/"><code>go/parser</code></a>
      +package has been reduced to the primary parse function
      +<a href="/pkg/go/parser/#ParseFile"><code>ParseFile</code></a>, and a couple of
      +convenience functions <a href="/pkg/go/parser/#ParseDir"><code>ParseDir</code></a>
      +and <a href="/pkg/go/parser/#ParseExpr"><code>ParseExpr</code></a>.
      +</p>
      +
      +<p>
      +The <a href="/pkg/go/printer/"><code>go/printer</code></a> package supports an additional
      +configuration mode <a href="/pkg/go/printer/#Mode"><code>SourcePos</code></a>;
      +if set, the printer will emit <code>//line</code> comments such that the generated
      +output contains the original source code position information. The new type
      +<a href="/pkg/go/printer/#CommentedNode"><code>CommentedNode</code></a> can be
      +used to provide comments associated with an arbitrary
      +<a href="/pkg/go/ast/#Node"><code>ast.Node</code></a> (until now only
      +<a href="/pkg/go/ast/#File"><code>ast.File</code></a> carried comment information).
      +</p>
      +
      +<p>
      +The type names of the <a href="/pkg/go/doc/"><code>go/doc</code></a> package have been
      +streamlined by removing the <code>Doc</code> suffix: <code>PackageDoc</code>
      +is now <code>Package</code>, <code>ValueDoc</code> is <code>Value</code>, etc.
      +Also, all types now consistently have a <code>Name</code> field (or <code>Names</code>,
      +in the case of type <code>Value</code>) and <code>Type.Factories</code> has become
      +<code>Type.Funcs</code>.
      +Instead of calling <code>doc.NewPackageDoc(pkg, importpath)</code>,
      +documentation for a package is created with:
      +</p>
      +
      +<pre>
      +    doc.New(pkg, importpath, mode)
      +</pre>
      +
      +<p>
      +where the new <code>mode</code> parameter specifies the operation mode:
      +if set to <a href="/pkg/go/doc/#AllDecls"><code>AllDecls</code></a>, all declarations
      +(not just exported ones) are considered.
      +The function <code>NewFileDoc</code> was removed, and the function
      +<code>CommentText</code> has become the method
      +<a href="/pkg/go/ast/#Text"><code>Text</code></a> of
      +<a href="/pkg/go/ast/#CommentGroup"><code>ast.CommentGroup</code></a>.
      +</p>
      +
      +<p>
      +In package <a href="/pkg/go/token/"><code>go/token</code></a>, the
      +<a href="/pkg/go/token/#FileSet"><code>token.FileSet</code></a> method <code>Files</code>
      +(which originally returned a channel of <code>*token.File</code>s) has been replaced
      +with the iterator <a href="/pkg/go/token/#FileSet.Iterate"><code>Iterate</code></a> that
      +accepts a function argument instead.
      +</p>
      +
      +<p>
      +In package <a href="/pkg/go/build/"><code>go/build</code></a>, the API
      +has been nearly completely replaced.
      +The package still computes Go package information
      +but it does not run the build: the <code>Cmd</code> and <code>Script</code>
      +types are gone.
      +(To build code, use the new
      +<a href="/cmd/go/"><code>go</code></a> command instead.)
      +The <code>DirInfo</code> type is now named
      +<a href="/pkg/go/build/#Package"><code>Package</code></a>.
      +<code>FindTree</code> and <code>ScanDir</code> are replaced by
      +<a href="/pkg/go/build/#Import"><code>Import</code></a>
      +and
      +<a href="/pkg/go/build/#ImportDir"><code>ImportDir</code></a>.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Code that uses packages in <code>go</code> will have to be updated by hand; the
      +compiler will reject incorrect uses. Templates used in conjunction with any of the
      +<code>go/doc</code> types may need manual fixes; the renamed fields will lead
      +to run-time errors.
      +</p>
      +
      +<h3 id="hash">The hash package</h3>
      +
      +<p>
      +In Go 1, the definition of <a href="/pkg/hash/#Hash"><code>hash.Hash</code></a> includes
      +a new method, <code>BlockSize</code>.  This new method is used primarily in the
      +cryptographic libraries.
      +</p>
      +
      +<p>
      +The <code>Sum</code> method of the
      +<a href="/pkg/hash/#Hash"><code>hash.Hash</code></a> interface now takes a
      +<code>[]byte</code> argument, to which the hash value will be appended.
      +The previous behavior can be recreated by adding a <code>nil</code> argument to the call.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Existing implementations of <code>hash.Hash</code> will need to add a
      +<code>BlockSize</code> method.  Hashes that process the input one byte at
      +a time can implement <code>BlockSize</code> to return 1.
      +Running <code>go</code> <code>fix</code> will update calls to the <code>Sum</code> methods of the various
      +implementations of <code>hash.Hash</code>.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Since the package's functionality is new, no updating is necessary.
      +</p>
      +
      +<h3 id="http">The http package</h3>
      +
      +<p>
      +In Go 1 the <a href="/pkg/net/http/"><code>http</code></a> package is refactored,
      +putting some of the utilities into a
      +<a href="/pkg/net/http/httputil/"><code>httputil</code></a> subdirectory.
      +These pieces are only rarely needed by HTTP clients.
      +The affected items are:
      +</p>
      +
      +<ul>
      +<li>ClientConn</li>
      +<li>DumpRequest</li>
      +<li>DumpRequestOut</li>
      +<li>DumpResponse</li>
      +<li>NewChunkedReader</li>
      +<li>NewChunkedWriter</li>
      +<li>NewClientConn</li>
      +<li>NewProxyClientConn</li>
      +<li>NewServerConn</li>
      +<li>NewSingleHostReverseProxy</li>
      +<li>ReverseProxy</li>
      +<li>ServerConn</li>
      +</ul>
      +
      +<p>
      +The <code>Request.RawURL</code> field has been removed; it was a
      +historical artifact.
      +</p>
      +
      +<p>
      +The <code>Handle</code> and <code>HandleFunc</code>
      +functions, and the similarly-named methods of <code>ServeMux</code>,
      +now panic if an attempt is made to register the same pattern twice.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Running <code>go</code> <code>fix</code> will update the few programs that are affected except for
      +uses of <code>RawURL</code>, which must be fixed by hand.
      +</p>
      +
      +<h3 id="image">The image package</h3>
      +
      +<p>
      +The <a href="/pkg/image/"><code>image</code></a> package has had a number of
      +minor changes, rearrangements and renamings.
      +</p>
      +
      +<p>
      +Most of the color handling code has been moved into its own package,
      +<a href="/pkg/image/color/"><code>image/color</code></a>.
      +For the elements that moved, a symmetry arises; for instance,
      +each pixel of an
      +<a href="/pkg/image/#RGBA"><code>image.RGBA</code></a>
      +is a
      +<a href="/pkg/image/color/#RGBA"><code>color.RGBA</code></a>.
      +</p>
      +
      +<p>
      +The old <code>image/ycbcr</code> package has been folded, with some
      +renamings, into the
      +<a href="/pkg/image/"><code>image</code></a>
      +and
      +<a href="/pkg/image/color/"><code>image/color</code></a>
      +packages.
      +</p>
      +
      +<p>
      +The old <code>image.ColorImage</code> type is still in the <code>image</code>
      +package but has been renamed
      +<a href="/pkg/image/#Uniform"><code>image.Uniform</code></a>,
      +while <code>image.Tiled</code> has been removed.
      +</p>
      +
      +<p>
      +This table lists the renamings.
      +</p>
      +
      +<table class="codetable" frame="border" summary="image renames">
      +<colgroup align="left" width="50%"></colgroup>
      +<colgroup align="left" width="50%"></colgroup>
      +<tr>
      +<th align="left">Old</th>
      +<th align="left">New</th>
      +</tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>image.Color</td> <td>color.Color</td></tr>
      +<tr><td>image.ColorModel</td> <td>color.Model</td></tr>
      +<tr><td>image.ColorModelFunc</td> <td>color.ModelFunc</td></tr>
      +<tr><td>image.PalettedColorModel</td> <td>color.Palette</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>image.RGBAColor</td> <td>color.RGBA</td></tr>
      +<tr><td>image.RGBA64Color</td> <td>color.RGBA64</td></tr>
      +<tr><td>image.NRGBAColor</td> <td>color.NRGBA</td></tr>
      +<tr><td>image.NRGBA64Color</td> <td>color.NRGBA64</td></tr>
      +<tr><td>image.AlphaColor</td> <td>color.Alpha</td></tr>
      +<tr><td>image.Alpha16Color</td> <td>color.Alpha16</td></tr>
      +<tr><td>image.GrayColor</td> <td>color.Gray</td></tr>
      +<tr><td>image.Gray16Color</td> <td>color.Gray16</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>image.RGBAColorModel</td> <td>color.RGBAModel</td></tr>
      +<tr><td>image.RGBA64ColorModel</td> <td>color.RGBA64Model</td></tr>
      +<tr><td>image.NRGBAColorModel</td> <td>color.NRGBAModel</td></tr>
      +<tr><td>image.NRGBA64ColorModel</td> <td>color.NRGBA64Model</td></tr>
      +<tr><td>image.AlphaColorModel</td> <td>color.AlphaModel</td></tr>
      +<tr><td>image.Alpha16ColorModel</td> <td>color.Alpha16Model</td></tr>
      +<tr><td>image.GrayColorModel</td> <td>color.GrayModel</td></tr>
      +<tr><td>image.Gray16ColorModel</td> <td>color.Gray16Model</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>ycbcr.RGBToYCbCr</td> <td>color.RGBToYCbCr</td></tr>
      +<tr><td>ycbcr.YCbCrToRGB</td> <td>color.YCbCrToRGB</td></tr>
      +<tr><td>ycbcr.YCbCrColorModel</td> <td>color.YCbCrModel</td></tr>
      +<tr><td>ycbcr.YCbCrColor</td> <td>color.YCbCr</td></tr>
      +<tr><td>ycbcr.YCbCr</td> <td>image.YCbCr</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>ycbcr.SubsampleRatio444</td> <td>image.YCbCrSubsampleRatio444</td></tr>
      +<tr><td>ycbcr.SubsampleRatio422</td> <td>image.YCbCrSubsampleRatio422</td></tr>
      +<tr><td>ycbcr.SubsampleRatio420</td> <td>image.YCbCrSubsampleRatio420</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>image.ColorImage</td> <td>image.Uniform</td></tr>
      +</table>
      +
      +<p>
      +The image package's <code>New</code> functions
      +(<a href="/pkg/image/#NewRGBA"><code>NewRGBA</code></a>,
      +<a href="/pkg/image/#NewRGBA64"><code>NewRGBA64</code></a>, etc.)
      +take an <a href="/pkg/image/#Rectangle"><code>image.Rectangle</code></a> as an argument
      +instead of four integers.
      +</p>
      +
      +<p>
      +Finally, there are new predefined <code>color.Color</code> variables
      +<a href="/pkg/image/color/#Black"><code>color.Black</code></a>,
      +<a href="/pkg/image/color/#White"><code>color.White</code></a>,
      +<a href="/pkg/image/color/#Opaque"><code>color.Opaque</code></a>
      +and
      +<a href="/pkg/image/color/#Transparent"><code>color.Transparent</code></a>.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Running <code>go</code> <code>fix</code> will update almost all code affected by the change.
      +</p>
      +
      +<h3 id="log_syslog">The log/syslog package</h3>
      +
      +<p>
      +In Go 1, the <a href="/pkg/log/syslog/#NewLogger"><code>syslog.NewLogger</code></a>
      +function returns an error as well as a <code>log.Logger</code>.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +What little code is affected will be caught by the compiler and must be updated by hand.
      +</p>
      +
      +<h3 id="mime">The mime package</h3>
      +
      +<p>
      +In Go 1, the <a href="/pkg/mime/#FormatMediaType"><code>FormatMediaType</code></a> function
      +of the <code>mime</code> package has  been simplified to make it
      +consistent with
      +<a href="/pkg/mime/#ParseMediaType"><code>ParseMediaType</code></a>.
      +It now takes <code>"text/html"</code> rather than <code>"text"</code> and <code>"html"</code>.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +What little code is affected will be caught by the compiler and must be updated by hand.
      +</p>
      +
      +<h3 id="net">The net package</h3>
      +
      +<p>
      +In Go 1, the various <code>SetTimeout</code>,
      +<code>SetReadTimeout</code>, and <code>SetWriteTimeout</code> methods
      +have been replaced with
      +<a href="/pkg/net/#IPConn.SetDeadline"><code>SetDeadline</code></a>,
      +<a href="/pkg/net/#IPConn.SetReadDeadline"><code>SetReadDeadline</code></a>, and
      +<a href="/pkg/net/#IPConn.SetWriteDeadline"><code>SetWriteDeadline</code></a>,
      +respectively.  Rather than taking a timeout value in nanoseconds that
      +apply to any activity on the connection, the new methods set an
      +absolute deadline (as a <code>time.Time</code> value) after which
      +reads and writes will time out and no longer block.
      +</p>
      +
      +<p>
      +There are also new functions
      +<a href="/pkg/net/#DialTimeout"><code>net.DialTimeout</code></a>
      +to simplify timing out dialing a network address and
      +<a href="/pkg/net/#ListenMulticastUDP"><code>net.ListenMulticastUDP</code></a>
      +to allow multicast UDP to listen concurrently across multiple listeners.
      +The <code>net.ListenMulticastUDP</code> function replaces the old
      +<code>JoinGroup</code> and <code>LeaveGroup</code> methods.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Code that uses the old methods will fail to compile and must be updated by hand.
      +The semantic change makes it difficult for the fix tool to update automatically.
      +</p>
      +
      +<h3 id="os">The os package</h3>
      +
      +<p>
      +The <code>Time</code> function has been removed; callers should use
      +the <a href="/pkg/time/#Time"><code>Time</code></a> type from the
      +<code>time</code> package.
      +</p>
      +
      +<p>
      +The <code>Exec</code> function has been removed; callers should use
      +<code>Exec</code> from the <code>syscall</code> package, where available.
      +</p>
      +
      +<p>
      +The <code>ShellExpand</code> function has been renamed to <a
      +href="/pkg/os/#ExpandEnv"><code>ExpandEnv</code></a>.
      +</p>
      +
      +<p>
      +The <a href="/pkg/os/#NewFile"><code>NewFile</code></a> function
      +now takes a <code>uintptr</code> fd, instead of an <code>int</code>.
      +The <a href="/pkg/os/#File.Fd"><code>Fd</code></a> method on files now
      +also returns a <code>uintptr</code>.
      +</p>
      +
      +<p>
      +There are no longer error constants such as <code>EINVAL</code>
      +in the <code>os</code> package, since the set of values varied with
      +the underlying operating system. There are new portable functions like
      +<a href="/pkg/os/#IsPermission"><code>IsPermission</code></a>
      +to test common error properties, plus a few new error values
      +with more Go-like names, such as
      +<a href="/pkg/os/#ErrPermission"><code>ErrPermission</code></a>
      +and
      +<a href="/pkg/os/#ErrNoEnv"><code>ErrNoEnv</code></a>.
      +</p>
      +
      +<p>
      +The <code>Getenverror</code> function has been removed. To distinguish
      +between a non-existent environment variable and an empty string,
      +use <a href="/pkg/os/#Environ"><code>os.Environ</code></a> or
      +<a href="/pkg/syscall/#Getenv"><code>syscall.Getenv</code></a>.
      +</p>
      +
      +
      +<p>
      +The <a href="/pkg/os/#Process.Wait"><code>Process.Wait</code></a> method has
      +dropped its option argument and the associated constants are gone
      +from the package.
      +Also, the function <code>Wait</code> is gone; only the method of
      +the <code>Process</code> type persists.
      +</p>
      +
      +<p>
      +The <code>Waitmsg</code> type returned by
      +<a href="/pkg/os/#Process.Wait"><code>Process.Wait</code></a>
      +has been replaced with a more portable
      +<a href="/pkg/os/#ProcessState"><code>ProcessState</code></a>
      +type with accessor methods to recover information about the
      +process.
      +Because of changes to <code>Wait</code>, the <code>ProcessState</code>
      +value always describes an exited process.
      +Portability concerns simplified the interface in other ways, but the values returned by the
      +<a href="/pkg/os/#ProcessState.Sys"><code>ProcessState.Sys</code></a> and
      +<a href="/pkg/os/#ProcessState.SysUsage"><code>ProcessState.SysUsage</code></a>
      +methods can be type-asserted to underlying system-specific data structures such as
      +<a href="/pkg/syscall/#WaitStatus"><code>syscall.WaitStatus</code></a> and
      +<a href="/pkg/syscall/#Rusage"><code>syscall.Rusage</code></a> on Unix.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Running <code>go</code> <code>fix</code> will drop a zero argument to <code>Process.Wait</code>.
      +All other changes will be caught by the compiler and must be updated by hand.
      +</p>
      +
      +<h4 id="os_fileinfo">The os.FileInfo type</h4>
      +
      +<p>
      +Go 1 redefines the <a href="/pkg/os/#FileInfo"><code>os.FileInfo</code></a> type,
      +changing it from a struct to an interface:
      +</p>
      +
      +<pre>
      +    type FileInfo interface {
      +        Name() string       // base name of the file
      +        Size() int64        // length in bytes
      +        Mode() FileMode     // file mode bits
      +        ModTime() time.Time // modification time
      +        IsDir() bool        // abbreviation for Mode().IsDir()
      +        Sys() interface{}   // underlying data source (can return nil)
      +    }
      +</pre>
      +
      +<p>
      +The file mode information has been moved into a subtype called
      +<a href="/pkg/os/#FileMode"><code>os.FileMode</code></a>,
      +a simple integer type with <code>IsDir</code>, <code>Perm</code>, and <code>String</code>
      +methods.
      +</p>
      +
      +<p>
      +The system-specific details of file modes and properties such as (on Unix)
      +i-number have been removed from <code>FileInfo</code> altogether.
      +Instead, each operating system's <code>os</code> package provides an
      +implementation of the <code>FileInfo</code> interface, which
      +has a <code>Sys</code> method that returns the
      +system-specific representation of file metadata.
      +For instance, to discover the i-number of a file on a Unix system, unpack
      +the <code>FileInfo</code> like this:
      +</p>
      +
      +<pre>
      +    fi, err := os.Stat("hello.go")
      +    if err != nil {
      +        log.Fatal(err)
      +    }
      +    // Check that it's a Unix file.
      +    unixStat, ok := fi.Sys().(*syscall.Stat_t)
      +    if !ok {
      +        log.Fatal("hello.go: not a Unix file")
      +    }
      +    fmt.Printf("file i-number: %d\n", unixStat.Ino)
      +</pre>
      +
      +<p>
      +Assuming (which is unwise) that <code>"hello.go"</code> is a Unix file,
      +the i-number expression could be contracted to
      +</p>
      +
      +<pre>
      +    fi.Sys().(*syscall.Stat_t).Ino
      +</pre>
      +
      +<p>
      +The vast majority of uses of <code>FileInfo</code> need only the methods
      +of the standard interface.
      +</p>
      +
      +<p>
      +The <code>os</code> package no longer contains wrappers for the POSIX errors
      +such as <code>ENOENT</code>.
      +For the few programs that need to verify particular error conditions, there are
      +now the boolean functions
      +<a href="/pkg/os/#IsExist"><code>IsExist</code></a>,
      +<a href="/pkg/os/#IsNotExist"><code>IsNotExist</code></a>
      +and
      +<a href="/pkg/os/#IsPermission"><code>IsPermission</code></a>.
      +</p>
      +
      +<pre><!--{{code "/doc/progs/go1.go" `/os\.Open/` `/}/`}}
      +-->    f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
      +    if os.IsExist(err) {
      +        log.Printf(&#34;%s already exists&#34;, name)
      +    }</pre>
      +
      +<p>
      +<em>Updating</em>:
      +Running <code>go</code> <code>fix</code> will update code that uses the old equivalent of the current <code>os.FileInfo</code>
      +and <code>os.FileMode</code> API.
      +Code that needs system-specific file details will need to be updated by hand.
      +Code that uses the old POSIX error values from the <code>os</code> package
      +will fail to compile and will also need to be updated by hand.
      +</p>
      +
      +<h3 id="os_signal">The os/signal package</h3>
      +
      +<p>
      +The <code>os/signal</code> package in Go 1 replaces the
      +<code>Incoming</code> function, which returned a channel
      +that received all incoming signals,
      +with the selective <code>Notify</code> function, which asks
      +for delivery of specific signals on an existing channel.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Code must be updated by hand.
      +A literal translation of
      +</p>
      +<pre>
      +c := signal.Incoming()
      +</pre>
      +<p>
      +is
      +</p>
      +<pre>
      +c := make(chan os.Signal)
      +signal.Notify(c) // ask for all signals
      +</pre>
      +<p>
      +but most code should list the specific signals it wants to handle instead:
      +</p>
      +<pre>
      +c := make(chan os.Signal)
      +signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT)
      +</pre>
      +
      +<h3 id="path_filepath">The path/filepath package</h3>
      +
      +<p>
      +In Go 1, the <a href="/pkg/path/filepath/#Walk"><code>Walk</code></a> function of the
      +<code>path/filepath</code> package
      +has been changed to take a function value of type
      +<a href="/pkg/path/filepath/#WalkFunc"><code>WalkFunc</code></a>
      +instead of a <code>Visitor</code> interface value.
      +<code>WalkFunc</code> unifies the handling of both files and directories.
      +</p>
      +
      +<pre>
      +    type WalkFunc func(path string, info os.FileInfo, err error) error
      +</pre>
      +
      +<p>
      +The <code>WalkFunc</code> function will be called even for files or directories that could not be opened;
      +in such cases the error argument will describe the failure.
      +If a directory's contents are to be skipped,
      +the function should return the value <a href="/pkg/path/filepath/#variables"><code>filepath.SkipDir</code></a>
      +</p>
      +
      +<pre><!--{{code "/doc/progs/go1.go" `/STARTWALK/` `/ENDWALK/`}}
      +-->    markFn := func(path string, info os.FileInfo, err error) error {
      +        if path == &#34;pictures&#34; { <span class="comment">// Will skip walking of directory pictures and its contents.</span>
      +            return filepath.SkipDir
      +        }
      +        if err != nil {
      +            return err
      +        }
      +        log.Println(path)
      +        return nil
      +    }
      +    err := filepath.Walk(&#34;.&#34;, markFn)
      +    if err != nil {
      +        log.Fatal(err)
      +    }</pre>
      +
      +<p>
      +<em>Updating</em>:
      +The change simplifies most code but has subtle consequences, so affected programs
      +will need to be updated by hand.
      +The compiler will catch code using the old interface.
      +</p>
      +
      +<h3 id="regexp">The regexp package</h3>
      +
      +<p>
      +The <a href="/pkg/regexp/"><code>regexp</code></a> package has been rewritten.
      +It has the same interface but the specification of the regular expressions
      +it supports has changed from the old "egrep" form to that of
      +<a href="http://code.google.com/p/re2/">RE2</a>.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Code that uses the package should have its regular expressions checked by hand.
      +</p>
      +
      +<h3 id="runtime">The runtime package</h3>
      +
      +<p>
      +In Go 1, much of the API exported by package
      +<code>runtime</code> has been removed in favor of
      +functionality provided by other packages.
      +Code using the <code>runtime.Type</code> interface
      +or its specific concrete type implementations should
      +now use package <a href="/pkg/reflect/"><code>reflect</code></a>.
      +Code using <code>runtime.Semacquire</code> or <code>runtime.Semrelease</code>
      +should use channels or the abstractions in package <a href="/pkg/sync/"><code>sync</code></a>.
      +The <code>runtime.Alloc</code>, <code>runtime.Free</code>,
      +and <code>runtime.Lookup</code> functions, an unsafe API created for
      +debugging the memory allocator, have no replacement.
      +</p>
      +
      +<p>
      +Before, <code>runtime.MemStats</code> was a global variable holding
      +statistics about memory allocation, and calls to <code>runtime.UpdateMemStats</code>
      +ensured that it was up to date.
      +In Go 1, <code>runtime.MemStats</code> is a struct type, and code should use
      +<a href="/pkg/runtime/#ReadMemStats"><code>runtime.ReadMemStats</code></a>
      +to obtain the current statistics.
      +</p>
      +
      +<p>
      +The package adds a new function,
      +<a href="/pkg/runtime/#NumCPU"><code>runtime.NumCPU</code></a>, that returns the number of CPUs available
      +for parallel execution, as reported by the operating system kernel.
      +Its value can inform the setting of <code>GOMAXPROCS</code>.
      +The <code>runtime.Cgocalls</code> and <code>runtime.Goroutines</code> functions
      +have been renamed to <code>runtime.NumCgoCall</code> and <code>runtime.NumGoroutine</code>.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Running <code>go</code> <code>fix</code> will update code for the function renamings.
      +Other code will need to be updated by hand.
      +</p>
      +
      +<h3 id="strconv">The strconv package</h3>
      +
      +<p>
      +In Go 1, the
      +<a href="/pkg/strconv/"><code>strconv</code></a>
      +package has been significantly reworked to make it more Go-like and less C-like,
      +although <code>Atoi</code> lives on (it's similar to
      +<code>int(ParseInt(x, 10, 0))</code>, as does
      +<code>Itoa(x)</code> (<code>FormatInt(int64(x), 10)</code>).
      +There are also new variants of some of the functions that append to byte slices rather than
      +return strings, to allow control over allocation.
      +</p>
      +
      +<p>
      +This table summarizes the renamings; see the
      +<a href="/pkg/strconv/">package documentation</a>
      +for full details.
      +</p>
      +
      +<table class="codetable" frame="border" summary="strconv renames">
      +<colgroup align="left" width="50%"></colgroup>
      +<colgroup align="left" width="50%"></colgroup>
      +<tr>
      +<th align="left">Old call</th>
      +<th align="left">New call</th>
      +</tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>Atob(x)</td> <td>ParseBool(x)</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>Atof32(x)</td> <td>ParseFloat(x, 32)§</td></tr>
      +<tr><td>Atof64(x)</td> <td>ParseFloat(x, 64)</td></tr>
      +<tr><td>AtofN(x, n)</td> <td>ParseFloat(x, n)</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>Atoi(x)</td> <td>Atoi(x)</td></tr>
      +<tr><td>Atoi(x)</td> <td>ParseInt(x, 10, 0)§</td></tr>
      +<tr><td>Atoi64(x)</td> <td>ParseInt(x, 10, 64)</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>Atoui(x)</td> <td>ParseUint(x, 10, 0)§</td></tr>
      +<tr><td>Atoui64(x)</td> <td>ParseUint(x, 10, 64)</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>Btoi64(x, b)</td> <td>ParseInt(x, b, 64)</td></tr>
      +<tr><td>Btoui64(x, b)</td> <td>ParseUint(x, b, 64)</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>Btoa(x)</td> <td>FormatBool(x)</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>Ftoa32(x, f, p)</td> <td>FormatFloat(float64(x), f, p, 32)</td></tr>
      +<tr><td>Ftoa64(x, f, p)</td> <td>FormatFloat(x, f, p, 64)</td></tr>
      +<tr><td>FtoaN(x, f, p, n)</td> <td>FormatFloat(x, f, p, n)</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>Itoa(x)</td> <td>Itoa(x)</td></tr>
      +<tr><td>Itoa(x)</td> <td>FormatInt(int64(x), 10)</td></tr>
      +<tr><td>Itoa64(x)</td> <td>FormatInt(x, 10)</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>Itob(x, b)</td> <td>FormatInt(int64(x), b)</td></tr>
      +<tr><td>Itob64(x, b)</td> <td>FormatInt(x, b)</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>Uitoa(x)</td> <td>FormatUint(uint64(x), 10)</td></tr>
      +<tr><td>Uitoa64(x)</td> <td>FormatUint(x, 10)</td></tr>
      +<tr>
      +<td colspan="2"><hr></td>
      +</tr>
      +<tr><td>Uitob(x, b)</td> <td>FormatUint(uint64(x), b)</td></tr>
      +<tr><td>Uitob64(x, b)</td> <td>FormatUint(x, b)</td></tr>
      +</table>
      +		
      +<p>
      +<em>Updating</em>:
      +Running <code>go</code> <code>fix</code> will update almost all code affected by the change.
      +<br>
      +§ <code>Atoi</code> persists but <code>Atoui</code> and <code>Atof32</code> do not, so
      +they may require
      +a cast that must be added by hand; the <code>go</code> <code>fix</code> tool will warn about it.
      +</p>
      +
      +
      +<h3 id="templates">The template packages</h3>
      +
      +<p>
      +The <code>template</code> and <code>exp/template/html</code> packages have moved to 
      +<a href="/pkg/text/template/"><code>text/template</code></a> and
      +<a href="/pkg/html/template/"><code>html/template</code></a>.
      +More significant, the interface to these packages has been simplified.
      +The template language is the same, but the concept of "template set" is gone
      +and the functions and methods of the packages have changed accordingly,
      +often by elimination.
      +</p>
      +
      +<p>
      +Instead of sets, a <code>Template</code> object
      +may contain multiple named template definitions,
      +in effect constructing
      +name spaces for template invocation.
      +A template can invoke any other template associated with it, but only those
      +templates associated with it.
      +The simplest way to associate templates is to parse them together, something
      +made easier with the new structure of the packages.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +The imports will be updated by fix tool.
      +Single-template uses will be otherwise be largely unaffected.
      +Code that uses multiple templates in concert will need to be updated by hand.
      +The <a href="/pkg/text/template/#examples">examples</a> in
      +the documentation for <code>text/template</code> can provide guidance.
      +</p>
      +
      +<h3 id="testing">The testing package</h3>
      +
      +<p>
      +The testing package has a type, <code>B</code>, passed as an argument to benchmark functions.
      +In Go 1, <code>B</code> has new methods, analogous to those of <code>T</code>, enabling
      +logging and failure reporting.
      +</p>
      +
      +<pre><!--{{code "/doc/progs/go1.go" `/func.*Benchmark/` `/^}/`}}
      +-->func BenchmarkSprintf(b *testing.B) {
      +    <span class="comment">// Verify correctness before running benchmark.</span>
      +    b.StopTimer()
      +    got := fmt.Sprintf(&#34;%x&#34;, 23)
      +    const expect = &#34;17&#34;
      +    if expect != got {
      +        b.Fatalf(&#34;expected %q; got %q&#34;, expect, got)
      +    }
      +    b.StartTimer()
      +    for i := 0; i &lt; b.N; i++ {
      +        fmt.Sprintf(&#34;%x&#34;, 23)
      +    }
      +}</pre>
      +
      +<p>
      +<em>Updating</em>:
      +Existing code is unaffected, although benchmarks that use <code>println</code>
      +or <code>panic</code> should be updated to use the new methods.
      +</p>
      +
      +<h3 id="testing_script">The testing/script package</h3>
      +
      +<p>
      +The testing/script package has been deleted. It was a dreg.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +No code is likely to be affected.
      +</p>
      +
      +<h3 id="unsafe">The unsafe package</h3>
      +
      +<p>
      +In Go 1, the functions
      +<code>unsafe.Typeof</code>, <code>unsafe.Reflect</code>,
      +<code>unsafe.Unreflect</code>, <code>unsafe.New</code>, and
      +<code>unsafe.NewArray</code> have been removed;
      +they duplicated safer functionality provided by
      +package <a href="/pkg/reflect/"><code>reflect</code></a>.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Code using these functions must be rewritten to use
      +package <a href="/pkg/reflect/"><code>reflect</code></a>.
      +The changes to <a href="http://code.google.com/p/go/source/detail?r=2646dc956207">encoding/gob</a> and the <a href="http://code.google.com/p/goprotobuf/source/detail?r=5340ad310031">protocol buffer library</a>
      +may be helpful as examples.
      +</p>
      +
      +<h3 id="url">The url package</h3>
      +
      +<p>
      +In Go 1 several fields from the <a href="/pkg/net/url/#URL"><code>url.URL</code></a> type
      +were removed or replaced.
      +</p>
      +
      +<p>
      +The <a href="/pkg/net/url/#URL.String"><code>String</code></a> method now
      +predictably rebuilds an encoded URL string using all of <code>URL</code>'s
      +fields as necessary. The resulting string will also no longer have
      +passwords escaped.
      +</p>
      +
      +<p>
      +The <code>Raw</code> field has been removed. In most cases the <code>String</code>
      +method may be used in its place.
      +</p>
      +
      +<p>
      +The old <code>RawUserinfo</code> field is replaced by the <code>User</code>
      +field, of type <a href="/pkg/net/url/#Userinfo"><code>*net.Userinfo</code></a>.
      +Values of this type may be created using the new <a href="/pkg/net/url/#User"><code>net.User</code></a>
      +and <a href="/pkg/net/url/#UserPassword"><code>net.UserPassword</code></a>
      +functions. The <code>EscapeUserinfo</code> and <code>UnescapeUserinfo</code>
      +functions are also gone.
      +</p>
      +
      +<p>
      +The <code>RawAuthority</code> field has been removed. The same information is
      +available in the <code>Host</code> and <code>User</code> fields.
      +</p>
      +
      +<p>
      +The <code>RawPath</code> field and the <code>EncodedPath</code> method have
      +been removed. The path information in rooted URLs (with a slash following the
      +schema) is now available only in decoded form in the <code>Path</code> field.
      +Occasionally, the encoded data may be required to obtain information that
      +was lost in the decoding process. These cases must be handled by accessing
      +the data the URL was built from.
      +</p>
      +
      +<p>
      +URLs with non-rooted paths, such as <code>"mailto:dev@golang.org?subject=Hi"</code>,
      +are also handled differently. The <code>OpaquePath</code> boolean field has been
      +removed and a new <code>Opaque</code> string field introduced to hold the encoded
      +path for such URLs. In Go 1, the cited URL parses as:
      +</p>
      +
      +<pre>
      +    URL{
      +        Scheme: "mailto",
      +        Opaque: "dev@golang.org",
      +        RawQuery: "subject=Hi",
      +    }
      +</pre>
      +
      +<p>
      +A new <a href="/pkg/net/url/#URL.RequestURI"><code>RequestURI</code></a> method was
      +added to <code>URL</code>.
      +</p>
      +
      +<p>
      +The <code>ParseWithReference</code> function has been renamed to <code>ParseWithFragment</code>.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Code that uses the old fields will fail to compile and must be updated by hand.
      +The semantic changes make it difficult for the fix tool to update automatically.
      +</p>
      +
      +<h2 id="cmd_go">The go command</h2>
      +
      +<p>
      +Go 1 introduces the <a href="/cmd/go/">go command</a>, a tool for fetching,
      +building, and installing Go packages and commands. The <code>go</code> command
      +does away with makefiles, instead using Go source code to find dependencies and
      +determine build conditions. Most existing Go programs will no longer require
      +makefiles to be built.
      +</p>
      +
      +<p>
      +See <a href="/doc/code.html">How to Write Go Code</a> for a primer on the
      +<code>go</code> command and the <a href="/cmd/go/">go command documentation</a>
      +for the full details.
      +</p>
      +
      +<p>
      +<em>Updating</em>:
      +Projects that depend on the Go project's old makefile-based build
      +infrastructure (<code>Make.pkg</code>, <code>Make.cmd</code>, and so on) should
      +switch to using the <code>go</code> command for building Go code and, if
      +necessary, rewrite their makefiles to perform any auxiliary build tasks.
      +</p>
      +
      +<h2 id="cmd_cgo">The cgo command</h2>
      +
      +<p>
      +In Go 1, the <a href="/cmd/cgo">cgo command</a>
      +uses a different <code>_cgo_export.h</code>
      +file, which is generated for packages containing <code>//export</code> lines.
      +The <code>_cgo_export.h</code> file now begins with the C preamble comment,
      +so that exported function definitions can use types defined there.
      +This has the effect of compiling the preamble multiple times, so a
      +package using <code>//export</code> must not put function definitions
      +or variable initializations in the C preamble.
      +</p>
      +
      +<h2 id="releases">Packaged releases</h2>
      +
      +<p>
      +One of the most significant changes associated with Go 1 is the availability
      +of prepackaged, downloadable distributions.
      +They are available for many combinations of architecture and operating system
      +(including Windows) and the list will grow.
      +Installation details are described on the
      +<a href="/doc/install">Getting Started</a> page, while
      +the distributions themselves are listed on the
      +<a href="http://code.google.com/p/go/downloads/list">downloads page</a>.
      +
      +
      +</div>
      +
      +<div id="footer">
      +Build version go1.0.1.<br>
      +A link <a href="http://code.google.com/policies.html#restrictions">noted</a>,
      +and then, coming up on the very next line, we will
      +find yet another link, link 3.0 if you will,
      +after a few more words <a href="/LINK">link text</a>.<br>
      +<a href="/doc/tos.html">Terms of Service</a> | 
      +<a href="http://www.google.com/intl/en/privacy/privacy-policy.html">Privacy Policy</a>
      +</div>
      +
      +<script type="text/javascript">
      +(function() {
      +  var ga = document.createElement("script"); ga.type = "text/javascript"; ga.async = true;
      +  ga.src = ("https:" == document.location.protocol ? "https://ssl" : "http://www") + ".google-analytics.com/ga.js";
      +  var s = document.getElementsByTagName("script")[0]; s.parentNode.insertBefore(ga, s);
      +})();
      +</script>
      +</body>
      +<script type="text/javascript">
      +  (function() {
      +    var po = document.createElement('script'); po.type = 'text/javascript'; po.async = true;
      +    po.src = 'https://apis.google.com/js/plusone.js';
      +    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(po, s);
      +  })();
      +</script>
      +</html>
      +
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/README b/vendor/golang.org/x/net/html/testdata/webkit/README
      new file mode 100644
      index 00000000..9b4c2d8b
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/README
      @@ -0,0 +1,28 @@
      +The *.dat files in this directory are copied from The WebKit Open Source
      +Project, specifically $WEBKITROOT/LayoutTests/html5lib/resources.
      +WebKit is licensed under a BSD style license.
      +http://webkit.org/coding/bsd-license.html says:
      +
      +Copyright (C) 2009 Apple Inc. All rights reserved.
      +
      +Redistribution and use in source and binary forms, with or without
      +modification, are permitted provided that the following conditions are met:
      +
      +1. Redistributions of source code must retain the above copyright notice,
      +this list of conditions and the following disclaimer.
      +
      +2. Redistributions in binary form must reproduce the above copyright notice,
      +this list of conditions and the following disclaimer in the documentation
      +and/or other materials provided with the distribution.
      +
      +THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND ANY
      +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
      +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
      +DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
      +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
      +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
      +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
      +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
      +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/adoption01.dat b/vendor/golang.org/x/net/html/testdata/webkit/adoption01.dat
      new file mode 100644
      index 00000000..787e1b01
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/adoption01.dat
      @@ -0,0 +1,194 @@
      +#data
      +<a><p></a></p>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|     <p>
      +|       <a>
      +
      +#data
      +<a>1<p>2</a>3</p>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       "1"
      +|     <p>
      +|       <a>
      +|         "2"
      +|       "3"
      +
      +#data
      +<a>1<button>2</a>3</button>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       "1"
      +|     <button>
      +|       <a>
      +|         "2"
      +|       "3"
      +
      +#data
      +<a>1<b>2</a>3</b>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       "1"
      +|       <b>
      +|         "2"
      +|     <b>
      +|       "3"
      +
      +#data
      +<a>1<div>2<div>3</a>4</div>5</div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       "1"
      +|     <div>
      +|       <a>
      +|         "2"
      +|       <div>
      +|         <a>
      +|           "3"
      +|         "4"
      +|       "5"
      +
      +#data
      +<table><a>1<p>2</a>3</p>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       "1"
      +|     <p>
      +|       <a>
      +|         "2"
      +|       "3"
      +|     <table>
      +
      +#data
      +<b><b><a><p></a>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       <b>
      +|         <a>
      +|         <p>
      +|           <a>
      +
      +#data
      +<b><a><b><p></a>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       <a>
      +|         <b>
      +|       <b>
      +|         <p>
      +|           <a>
      +
      +#data
      +<a><b><b><p></a>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       <b>
      +|         <b>
      +|     <b>
      +|       <b>
      +|         <p>
      +|           <a>
      +
      +#data
      +<p>1<s id="A">2<b id="B">3</p>4</s>5</b>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       "1"
      +|       <s>
      +|         id="A"
      +|         "2"
      +|         <b>
      +|           id="B"
      +|           "3"
      +|     <s>
      +|       id="A"
      +|       <b>
      +|         id="B"
      +|         "4"
      +|     <b>
      +|       id="B"
      +|       "5"
      +
      +#data
      +<table><a>1<td>2</td>3</table>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       "1"
      +|     <a>
      +|       "3"
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             "2"
      +
      +#data
      +<table>A<td>B</td>C</table>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "AC"
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             "B"
      +
      +#data
      +<a><svg><tr><input></a>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       <svg svg>
      +|         <svg tr>
      +|           <svg input>
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/adoption02.dat b/vendor/golang.org/x/net/html/testdata/webkit/adoption02.dat
      new file mode 100644
      index 00000000..d18151b4
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/adoption02.dat
      @@ -0,0 +1,31 @@
      +#data
      +<b>1<i>2<p>3</b>4
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       "1"
      +|       <i>
      +|         "2"
      +|     <i>
      +|       <p>
      +|         <b>
      +|           "3"
      +|         "4"
      +
      +#data
      +<a><div><style></style><address><a>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|     <div>
      +|       <a>
      +|         <style>
      +|       <address>
      +|         <a>
      +|         <a>
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/comments01.dat b/vendor/golang.org/x/net/html/testdata/webkit/comments01.dat
      new file mode 100644
      index 00000000..44f18768
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/comments01.dat
      @@ -0,0 +1,135 @@
      +#data
      +FOO<!-- BAR -->BAZ
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <!--  BAR  -->
      +|     "BAZ"
      +
      +#data
      +FOO<!-- BAR --!>BAZ
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <!--  BAR  -->
      +|     "BAZ"
      +
      +#data
      +FOO<!-- BAR --   >BAZ
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <!--  BAR --   >BAZ -->
      +
      +#data
      +FOO<!-- BAR -- <QUX> -- MUX -->BAZ
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <!--  BAR -- <QUX> -- MUX  -->
      +|     "BAZ"
      +
      +#data
      +FOO<!-- BAR -- <QUX> -- MUX --!>BAZ
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <!--  BAR -- <QUX> -- MUX  -->
      +|     "BAZ"
      +
      +#data
      +FOO<!-- BAR -- <QUX> -- MUX -- >BAZ
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <!--  BAR -- <QUX> -- MUX -- >BAZ -->
      +
      +#data
      +FOO<!---->BAZ
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <!--  -->
      +|     "BAZ"
      +
      +#data
      +FOO<!--->BAZ
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <!--  -->
      +|     "BAZ"
      +
      +#data
      +FOO<!-->BAZ
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <!--  -->
      +|     "BAZ"
      +
      +#data
      +<?xml version="1.0">Hi
      +#errors
      +#document
      +| <!-- ?xml version="1.0" -->
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hi"
      +
      +#data
      +<?xml version="1.0">
      +#errors
      +#document
      +| <!-- ?xml version="1.0" -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<?xml version
      +#errors
      +#document
      +| <!-- ?xml version -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +FOO<!----->BAZ
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <!-- - -->
      +|     "BAZ"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/doctype01.dat b/vendor/golang.org/x/net/html/testdata/webkit/doctype01.dat
      new file mode 100644
      index 00000000..ae457328
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/doctype01.dat
      @@ -0,0 +1,370 @@
      +#data
      +<!DOCTYPE html>Hello
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!dOctYpE HtMl>Hello
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPEhtml>Hello
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE>Hello
      +#errors
      +#document
      +| <!DOCTYPE >
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE >Hello
      +#errors
      +#document
      +| <!DOCTYPE >
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato>Hello
      +#errors
      +#document
      +| <!DOCTYPE potato>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato >Hello
      +#errors
      +#document
      +| <!DOCTYPE potato>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato taco>Hello
      +#errors
      +#document
      +| <!DOCTYPE potato>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato taco "ddd>Hello
      +#errors
      +#document
      +| <!DOCTYPE potato>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato sYstEM>Hello
      +#errors
      +#document
      +| <!DOCTYPE potato>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato sYstEM    >Hello
      +#errors
      +#document
      +| <!DOCTYPE potato>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE   potato       sYstEM  ggg>Hello
      +#errors
      +#document
      +| <!DOCTYPE potato>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato SYSTEM taco  >Hello
      +#errors
      +#document
      +| <!DOCTYPE potato>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato SYSTEM 'taco"'>Hello
      +#errors
      +#document
      +| <!DOCTYPE potato "" "taco"">
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato SYSTEM "taco">Hello
      +#errors
      +#document
      +| <!DOCTYPE potato "" "taco">
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato SYSTEM "tai'co">Hello
      +#errors
      +#document
      +| <!DOCTYPE potato "" "tai'co">
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato SYSTEMtaco "ddd">Hello
      +#errors
      +#document
      +| <!DOCTYPE potato>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato grass SYSTEM taco>Hello
      +#errors
      +#document
      +| <!DOCTYPE potato>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato pUbLIc>Hello
      +#errors
      +#document
      +| <!DOCTYPE potato>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato pUbLIc >Hello
      +#errors
      +#document
      +| <!DOCTYPE potato>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato pUbLIcgoof>Hello
      +#errors
      +#document
      +| <!DOCTYPE potato>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato PUBLIC goof>Hello
      +#errors
      +#document
      +| <!DOCTYPE potato>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato PUBLIC "go'of">Hello
      +#errors
      +#document
      +| <!DOCTYPE potato "go'of" "">
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato PUBLIC 'go'of'>Hello
      +#errors
      +#document
      +| <!DOCTYPE potato "go" "">
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato PUBLIC 'go:hh   of' >Hello
      +#errors
      +#document
      +| <!DOCTYPE potato "go:hh   of" "">
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE potato PUBLIC "W3C-//dfdf" SYSTEM ggg>Hello
      +#errors
      +#document
      +| <!DOCTYPE potato "W3C-//dfdf" "">
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
      +   "http://www.w3.org/TR/html4/strict.dtd">Hello
      +#errors
      +#document
      +| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE ...>Hello
      +#errors
      +#document
      +| <!DOCTYPE ...>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Hello"
      +
      +#data
      +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
      +"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
      +#errors
      +#document
      +| <!DOCTYPE html "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN"
      +"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">
      +#errors
      +#document
      +| <!DOCTYPE html "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE root-element [SYSTEM OR PUBLIC FPI] "uri" [ 
      +<!-- internal declarations -->
      +]>
      +#errors
      +#document
      +| <!DOCTYPE root-element>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "]>"
      +
      +#data
      +<!DOCTYPE html PUBLIC
      +  "-//WAPFORUM//DTD XHTML Mobile 1.0//EN"
      +    "http://www.wapforum.org/DTD/xhtml-mobile10.dtd">
      +#errors
      +#document
      +| <!DOCTYPE html "-//WAPFORUM//DTD XHTML Mobile 1.0//EN" "http://www.wapforum.org/DTD/xhtml-mobile10.dtd">
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE HTML SYSTEM "http://www.w3.org/DTD/HTML4-strict.dtd"><body><b>Mine!</b></body>
      +#errors
      +#document
      +| <!DOCTYPE html "" "http://www.w3.org/DTD/HTML4-strict.dtd">
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       "Mine!"
      +
      +#data
      +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN""http://www.w3.org/TR/html4/strict.dtd">
      +#errors
      +#document
      +| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"'http://www.w3.org/TR/html4/strict.dtd'>
      +#errors
      +#document
      +| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE HTML PUBLIC"-//W3C//DTD HTML 4.01//EN"'http://www.w3.org/TR/html4/strict.dtd'>
      +#errors
      +#document
      +| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE HTML PUBLIC'-//W3C//DTD HTML 4.01//EN''http://www.w3.org/TR/html4/strict.dtd'>
      +#errors
      +#document
      +| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
      +| <html>
      +|   <head>
      +|   <body>
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/entities01.dat b/vendor/golang.org/x/net/html/testdata/webkit/entities01.dat
      new file mode 100644
      index 00000000..c8073b78
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/entities01.dat
      @@ -0,0 +1,603 @@
      +#data
      +FOO&gt;BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO>BAR"
      +
      +#data
      +FOO&gtBAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO>BAR"
      +
      +#data
      +FOO&gt BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO> BAR"
      +
      +#data
      +FOO&gt;;;BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO>;;BAR"
      +
      +#data
      +I'm &notit; I tell you
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "I'm ¬it; I tell you"
      +
      +#data
      +I'm &notin; I tell you
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "I'm ∉ I tell you"
      +
      +#data
      +FOO& BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO& BAR"
      +
      +#data
      +FOO&<BAR>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO&"
      +|     <bar>
      +
      +#data
      +FOO&&&&gt;BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO&&&>BAR"
      +
      +#data
      +FOO&#41;BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO)BAR"
      +
      +#data
      +FOO&#x41;BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOABAR"
      +
      +#data
      +FOO&#X41;BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOABAR"
      +
      +#data
      +FOO&#BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO&#BAR"
      +
      +#data
      +FOO&#ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO&#ZOO"
      +
      +#data
      +FOO&#xBAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOºR"
      +
      +#data
      +FOO&#xZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO&#xZOO"
      +
      +#data
      +FOO&#XZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO&#XZOO"
      +
      +#data
      +FOO&#41BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO)BAR"
      +
      +#data
      +FOO&#x41BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO䆺R"
      +
      +#data
      +FOO&#x41ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOAZOO"
      +
      +#data
      +FOO&#x0000;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO�ZOO"
      +
      +#data
      +FOO&#x0078;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOxZOO"
      +
      +#data
      +FOO&#x0079;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOyZOO"
      +
      +#data
      +FOO&#x0080;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO€ZOO"
      +
      +#data
      +FOO&#x0081;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOZOO"
      +
      +#data
      +FOO&#x0082;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO‚ZOO"
      +
      +#data
      +FOO&#x0083;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOƒZOO"
      +
      +#data
      +FOO&#x0084;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO„ZOO"
      +
      +#data
      +FOO&#x0085;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO…ZOO"
      +
      +#data
      +FOO&#x0086;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO†ZOO"
      +
      +#data
      +FOO&#x0087;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO‡ZOO"
      +
      +#data
      +FOO&#x0088;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOˆZOO"
      +
      +#data
      +FOO&#x0089;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO‰ZOO"
      +
      +#data
      +FOO&#x008A;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOŠZOO"
      +
      +#data
      +FOO&#x008B;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO‹ZOO"
      +
      +#data
      +FOO&#x008C;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOŒZOO"
      +
      +#data
      +FOO&#x008D;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOZOO"
      +
      +#data
      +FOO&#x008E;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOŽZOO"
      +
      +#data
      +FOO&#x008F;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOZOO"
      +
      +#data
      +FOO&#x0090;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOZOO"
      +
      +#data
      +FOO&#x0091;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO‘ZOO"
      +
      +#data
      +FOO&#x0092;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO’ZOO"
      +
      +#data
      +FOO&#x0093;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO“ZOO"
      +
      +#data
      +FOO&#x0094;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO”ZOO"
      +
      +#data
      +FOO&#x0095;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO•ZOO"
      +
      +#data
      +FOO&#x0096;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO–ZOO"
      +
      +#data
      +FOO&#x0097;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO—ZOO"
      +
      +#data
      +FOO&#x0098;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO˜ZOO"
      +
      +#data
      +FOO&#x0099;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO™ZOO"
      +
      +#data
      +FOO&#x009A;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOšZOO"
      +
      +#data
      +FOO&#x009B;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO›ZOO"
      +
      +#data
      +FOO&#x009C;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOœZOO"
      +
      +#data
      +FOO&#x009D;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOZOO"
      +
      +#data
      +FOO&#x009E;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOžZOO"
      +
      +#data
      +FOO&#x009F;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOŸZOO"
      +
      +#data
      +FOO&#x00A0;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO ZOO"
      +
      +#data
      +FOO&#xD7FF;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO퟿ZOO"
      +
      +#data
      +FOO&#xD800;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO�ZOO"
      +
      +#data
      +FOO&#xD801;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO�ZOO"
      +
      +#data
      +FOO&#xDFFE;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO�ZOO"
      +
      +#data
      +FOO&#xDFFF;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO�ZOO"
      +
      +#data
      +FOO&#xE000;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOOZOO"
      +
      +#data
      +FOO&#x10FFFE;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO􏿾ZOO"
      +
      +#data
      +FOO&#x1087D4;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO􈟔ZOO"
      +
      +#data
      +FOO&#x10FFFF;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO􏿿ZOO"
      +
      +#data
      +FOO&#x110000;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO�ZOO"
      +
      +#data
      +FOO&#xFFFFFF;ZOO
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO�ZOO"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/entities02.dat b/vendor/golang.org/x/net/html/testdata/webkit/entities02.dat
      new file mode 100644
      index 00000000..e2fb42a0
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/entities02.dat
      @@ -0,0 +1,249 @@
      +#data
      +<div bar="ZZ&gt;YY"></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ>YY"
      +
      +#data
      +<div bar="ZZ&"></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ&"
      +
      +#data
      +<div bar='ZZ&'></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ&"
      +
      +#data
      +<div bar=ZZ&></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ&"
      +
      +#data
      +<div bar="ZZ&gt=YY"></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ&gt=YY"
      +
      +#data
      +<div bar="ZZ&gt0YY"></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ&gt0YY"
      +
      +#data
      +<div bar="ZZ&gt9YY"></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ&gt9YY"
      +
      +#data
      +<div bar="ZZ&gtaYY"></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ&gtaYY"
      +
      +#data
      +<div bar="ZZ&gtZYY"></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ&gtZYY"
      +
      +#data
      +<div bar="ZZ&gt YY"></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ> YY"
      +
      +#data
      +<div bar="ZZ&gt"></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ>"
      +
      +#data
      +<div bar='ZZ&gt'></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ>"
      +
      +#data
      +<div bar=ZZ&gt></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ>"
      +
      +#data
      +<div bar="ZZ&pound_id=23"></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ£_id=23"
      +
      +#data
      +<div bar="ZZ&prod_id=23"></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ&prod_id=23"
      +
      +#data
      +<div bar="ZZ&pound;_id=23"></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ£_id=23"
      +
      +#data
      +<div bar="ZZ&prod;_id=23"></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ∏_id=23"
      +
      +#data
      +<div bar="ZZ&pound=23"></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ&pound=23"
      +
      +#data
      +<div bar="ZZ&prod=23"></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       bar="ZZ&prod=23"
      +
      +#data
      +<div>ZZ&pound_id=23</div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       "ZZ£_id=23"
      +
      +#data
      +<div>ZZ&prod_id=23</div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       "ZZ&prod_id=23"
      +
      +#data
      +<div>ZZ&pound;_id=23</div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       "ZZ£_id=23"
      +
      +#data
      +<div>ZZ&prod;_id=23</div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       "ZZ∏_id=23"
      +
      +#data
      +<div>ZZ&pound=23</div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       "ZZ£=23"
      +
      +#data
      +<div>ZZ&prod=23</div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       "ZZ&prod=23"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/html5test-com.dat b/vendor/golang.org/x/net/html/testdata/webkit/html5test-com.dat
      new file mode 100644
      index 00000000..d7cb71db
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/html5test-com.dat
      @@ -0,0 +1,246 @@
      +#data
      +<div<div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div<div>
      +
      +#data
      +<div foo<bar=''>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       foo<bar=""
      +
      +#data
      +<div foo=`bar`>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       foo="`bar`"
      +
      +#data
      +<div \"foo=''>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       \"foo=""
      +
      +#data
      +<a href='\nbar'></a>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       href="\nbar"
      +
      +#data
      +<!DOCTYPE html>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +&lang;&rang;
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "⟨⟩"
      +
      +#data
      +&apos;
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "'"
      +
      +#data
      +&ImaginaryI;
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "ⅈ"
      +
      +#data
      +&Kopf;
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "𝕂"
      +
      +#data
      +&notinva;
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "∉"
      +
      +#data
      +<?import namespace="foo" implementation="#bar">
      +#errors
      +#document
      +| <!-- ?import namespace="foo" implementation="#bar" -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!--foo--bar-->
      +#errors
      +#document
      +| <!-- foo--bar -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<![CDATA[x]]>
      +#errors
      +#document
      +| <!-- [CDATA[x]] -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<textarea><!--</textarea>--></textarea>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <textarea>
      +|       "<!--"
      +|     "-->"
      +
      +#data
      +<textarea><!--</textarea>-->
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <textarea>
      +|       "<!--"
      +|     "-->"
      +
      +#data
      +<style><!--</style>--></style>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|       "<!--"
      +|   <body>
      +|     "-->"
      +
      +#data
      +<style><!--</style>-->
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|       "<!--"
      +|   <body>
      +|     "-->"
      +
      +#data
      +<ul><li>A </li> <li>B</li></ul>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <ul>
      +|       <li>
      +|         "A "
      +|       " "
      +|       <li>
      +|         "B"
      +
      +#data
      +<table><form><input type=hidden><input></form><div></div></table>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <input>
      +|     <div>
      +|     <table>
      +|       <form>
      +|       <input>
      +|         type="hidden"
      +
      +#data
      +<i>A<b>B<p></i>C</b>D
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <i>
      +|       "A"
      +|       <b>
      +|         "B"
      +|     <b>
      +|     <p>
      +|       <b>
      +|         <i>
      +|         "C"
      +|       "D"
      +
      +#data
      +<div></div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +
      +#data
      +<svg></svg>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +
      +#data
      +<math></math>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/inbody01.dat b/vendor/golang.org/x/net/html/testdata/webkit/inbody01.dat
      new file mode 100644
      index 00000000..3f2bd374
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/inbody01.dat
      @@ -0,0 +1,43 @@
      +#data
      +<button>1</foo>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <button>
      +|       "1"
      +
      +#data
      +<foo>1<p>2</foo>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <foo>
      +|       "1"
      +|       <p>
      +|         "2"
      +
      +#data
      +<dd>1</foo>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <dd>
      +|       "1"
      +
      +#data
      +<foo>1<dd>2</foo>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <foo>
      +|       "1"
      +|       <dd>
      +|         "2"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/isindex.dat b/vendor/golang.org/x/net/html/testdata/webkit/isindex.dat
      new file mode 100644
      index 00000000..88325ffe
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/isindex.dat
      @@ -0,0 +1,40 @@
      +#data
      +<isindex>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <form>
      +|       <hr>
      +|       <label>
      +|         "This is a searchable index. Enter search keywords: "
      +|         <input>
      +|           name="isindex"
      +|       <hr>
      +
      +#data
      +<isindex name="A" action="B" prompt="C" foo="D">
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <form>
      +|       action="B"
      +|       <hr>
      +|       <label>
      +|         "C"
      +|         <input>
      +|           foo="D"
      +|           name="isindex"
      +|       <hr>
      +
      +#data
      +<form><isindex>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <form>
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes-plain-text-unsafe.dat b/vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes-plain-text-unsafe.dat
      new file mode 100644
      index 00000000..a5ebb1eb
      Binary files /dev/null and b/vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes-plain-text-unsafe.dat differ
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes.dat b/vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes.dat
      new file mode 100644
      index 00000000..5a920846
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes.dat
      @@ -0,0 +1,52 @@
      +#data
      +<input type="hidden"><frameset>
      +#errors
      +21: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”.
      +31: “frameset” start tag seen.
      +31: End of file seen and there were open elements.
      +#document
      +| <html>
      +|   <head>
      +|   <frameset>
      +
      +#data
      +<!DOCTYPE html><table><caption><svg>foo</table>bar
      +#errors
      +47: End tag “table” did not match the name of the current open element (“svg”).
      +47: “table” closed but “caption” was still open.
      +47: End tag “table” seen, but there were open elements.
      +36: Unclosed element “svg”.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <caption>
      +|         <svg svg>
      +|           "foo"
      +|     "bar"
      +
      +#data
      +<table><tr><td><svg><desc><td></desc><circle>
      +#errors
      +7: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”.
      +30: A table cell was implicitly closed, but there were open elements.
      +26: Unclosed element “desc”.
      +20: Unclosed element “svg”.
      +37: Stray end tag “desc”.
      +45: End of file seen and there were open elements.
      +45: Unclosed element “circle”.
      +7: Unclosed element “table”.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             <svg svg>
      +|               <svg desc>
      +|           <td>
      +|             <circle>
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/plain-text-unsafe.dat b/vendor/golang.org/x/net/html/testdata/webkit/plain-text-unsafe.dat
      new file mode 100644
      index 00000000..04cc11fb
      Binary files /dev/null and b/vendor/golang.org/x/net/html/testdata/webkit/plain-text-unsafe.dat differ
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/scriptdata01.dat b/vendor/golang.org/x/net/html/testdata/webkit/scriptdata01.dat
      new file mode 100644
      index 00000000..76b67f4b
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/scriptdata01.dat
      @@ -0,0 +1,308 @@
      +#data
      +FOO<script>'Hello'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       "'Hello'"
      +|     "BAR"
      +
      +#data
      +FOO<script></script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|     "BAR"
      +
      +#data
      +FOO<script></script >BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|     "BAR"
      +
      +#data
      +FOO<script></script/>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|     "BAR"
      +
      +#data
      +FOO<script></script/ >BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|     "BAR"
      +
      +#data
      +FOO<script type="text/plain"></scriptx>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       type="text/plain"
      +|       "</scriptx>BAR"
      +
      +#data
      +FOO<script></script foo=">" dd>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|     "BAR"
      +
      +#data
      +FOO<script>'<'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       "'<'"
      +|     "BAR"
      +
      +#data
      +FOO<script>'<!'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       "'<!'"
      +|     "BAR"
      +
      +#data
      +FOO<script>'<!-'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       "'<!-'"
      +|     "BAR"
      +
      +#data
      +FOO<script>'<!--'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       "'<!--'"
      +|     "BAR"
      +
      +#data
      +FOO<script>'<!---'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       "'<!---'"
      +|     "BAR"
      +
      +#data
      +FOO<script>'<!-->'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       "'<!-->'"
      +|     "BAR"
      +
      +#data
      +FOO<script>'<!-->'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       "'<!-->'"
      +|     "BAR"
      +
      +#data
      +FOO<script>'<!-- potato'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       "'<!-- potato'"
      +|     "BAR"
      +
      +#data
      +FOO<script>'<!-- <sCrIpt'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       "'<!-- <sCrIpt'"
      +|     "BAR"
      +
      +#data
      +FOO<script type="text/plain">'<!-- <sCrIpt>'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       type="text/plain"
      +|       "'<!-- <sCrIpt>'</script>BAR"
      +
      +#data
      +FOO<script type="text/plain">'<!-- <sCrIpt> -'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       type="text/plain"
      +|       "'<!-- <sCrIpt> -'</script>BAR"
      +
      +#data
      +FOO<script type="text/plain">'<!-- <sCrIpt> --'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       type="text/plain"
      +|       "'<!-- <sCrIpt> --'</script>BAR"
      +
      +#data
      +FOO<script>'<!-- <sCrIpt> -->'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       "'<!-- <sCrIpt> -->'"
      +|     "BAR"
      +
      +#data
      +FOO<script type="text/plain">'<!-- <sCrIpt> --!>'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       type="text/plain"
      +|       "'<!-- <sCrIpt> --!>'</script>BAR"
      +
      +#data
      +FOO<script type="text/plain">'<!-- <sCrIpt> -- >'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       type="text/plain"
      +|       "'<!-- <sCrIpt> -- >'</script>BAR"
      +
      +#data
      +FOO<script type="text/plain">'<!-- <sCrIpt '</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       type="text/plain"
      +|       "'<!-- <sCrIpt '</script>BAR"
      +
      +#data
      +FOO<script type="text/plain">'<!-- <sCrIpt/'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       type="text/plain"
      +|       "'<!-- <sCrIpt/'</script>BAR"
      +
      +#data
      +FOO<script type="text/plain">'<!-- <sCrIpt\'</script>BAR
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       type="text/plain"
      +|       "'<!-- <sCrIpt\'"
      +|     "BAR"
      +
      +#data
      +FOO<script type="text/plain">'<!-- <sCrIpt/'</script>BAR</script>QUX
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "FOO"
      +|     <script>
      +|       type="text/plain"
      +|       "'<!-- <sCrIpt/'</script>BAR"
      +|     "QUX"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/scripted/adoption01.dat b/vendor/golang.org/x/net/html/testdata/webkit/scripted/adoption01.dat
      new file mode 100644
      index 00000000..4e08d0e8
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/scripted/adoption01.dat
      @@ -0,0 +1,15 @@
      +#data
      +<p><b id="A"><script>document.getElementById("A").id = "B"</script></p>TEXT</b>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <b>
      +|         id="B"
      +|         <script>
      +|           "document.getElementById("A").id = "B""
      +|     <b>
      +|       id="A"
      +|       "TEXT"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/scripted/webkit01.dat b/vendor/golang.org/x/net/html/testdata/webkit/scripted/webkit01.dat
      new file mode 100644
      index 00000000..ef4a41ca
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/scripted/webkit01.dat
      @@ -0,0 +1,28 @@
      +#data
      +1<script>document.write("2")</script>3
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "1"
      +|     <script>
      +|       "document.write("2")"
      +|     "23"
      +
      +#data
      +1<script>document.write("<script>document.write('2')</scr"+ "ipt><script>document.write('3')</scr" + "ipt>")</script>4
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "1"
      +|     <script>
      +|       "document.write("<script>document.write('2')</scr"+ "ipt><script>document.write('3')</scr" + "ipt>")"
      +|     <script>
      +|       "document.write('2')"
      +|     "2"
      +|     <script>
      +|       "document.write('3')"
      +|     "34"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tables01.dat b/vendor/golang.org/x/net/html/testdata/webkit/tables01.dat
      new file mode 100644
      index 00000000..c4b47e48
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tables01.dat
      @@ -0,0 +1,212 @@
      +#data
      +<table><th>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <th>
      +
      +#data
      +<table><td>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +
      +#data
      +<table><col foo='bar'>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <colgroup>
      +|         <col>
      +|           foo="bar"
      +
      +#data
      +<table><colgroup></html>foo
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "foo"
      +|     <table>
      +|       <colgroup>
      +
      +#data
      +<table></table><p>foo
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|     <p>
      +|       "foo"
      +
      +#data
      +<table></body></caption></col></colgroup></html></tbody></td></tfoot></th></thead></tr><td>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +
      +#data
      +<table><select><option>3</select></table>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|       <option>
      +|         "3"
      +|     <table>
      +
      +#data
      +<table><select><table></table></select></table>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|     <table>
      +|     <table>
      +
      +#data
      +<table><select></table>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|     <table>
      +
      +#data
      +<table><select><option>A<tr><td>B</td></tr></table>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|       <option>
      +|         "A"
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             "B"
      +
      +#data
      +<table><td></body></caption></col></colgroup></html>foo
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             "foo"
      +
      +#data
      +<table><td>A</table>B
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             "A"
      +|     "B"
      +
      +#data
      +<table><tr><caption>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|       <caption>
      +
      +#data
      +<table><tr></body></caption></col></colgroup></html></td></th><td>foo
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             "foo"
      +
      +#data
      +<table><td><tr>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|         <tr>
      +
      +#data
      +<table><td><button><td>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             <button>
      +|           <td>
      +
      +#data
      +<table><tr><td><svg><desc><td>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             <svg svg>
      +|               <svg desc>
      +|           <td>
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests1.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests1.dat
      new file mode 100644
      index 00000000..cbf8bdda
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests1.dat
      @@ -0,0 +1,1952 @@
      +#data
      +Test
      +#errors
      +Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Test"
      +
      +#data
      +<p>One<p>Two
      +#errors
      +Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       "One"
      +|     <p>
      +|       "Two"
      +
      +#data
      +Line1<br>Line2<br>Line3<br>Line4
      +#errors
      +Line: 1 Col: 5 Unexpected non-space characters. Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Line1"
      +|     <br>
      +|     "Line2"
      +|     <br>
      +|     "Line3"
      +|     <br>
      +|     "Line4"
      +
      +#data
      +<html>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<head>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<body>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (body). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<html><head>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<html><head></head>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<html><head></head><body>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<html><head></head><body></body>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<html><head><body></body></html>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<html><head></body></html>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
      +Line: 1 Col: 19 Unexpected end tag (body).
      +Line: 1 Col: 26 Unexpected end tag (html).
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<html><head><body></html>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<html><body></html>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<body></html>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (body). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<head></html>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
      +Line: 1 Col: 13 Unexpected end tag (html). Ignored.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +</head>
      +#errors
      +Line: 1 Col: 7 Unexpected end tag (head). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +</body>
      +#errors
      +Line: 1 Col: 7 Unexpected end tag (body). Expected DOCTYPE.
      +Line: 1 Col: 7 Unexpected end tag (body) after the (implied) root element.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +</html>
      +#errors
      +Line: 1 Col: 7 Unexpected end tag (html). Expected DOCTYPE.
      +Line: 1 Col: 7 Unexpected end tag (html) after the (implied) root element.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<b><table><td><i></table>
      +#errors
      +Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
      +Line: 1 Col: 14 Unexpected table cell start tag (td) in the table body phase.
      +Line: 1 Col: 25 Got table cell end tag (td) while required end tags are missing.
      +Line: 1 Col: 25 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       <table>
      +|         <tbody>
      +|           <tr>
      +|             <td>
      +|               <i>
      +
      +#data
      +<b><table><td></b><i></table>X
      +#errors
      +Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
      +Line: 1 Col: 14 Unexpected table cell start tag (td) in the table body phase.
      +Line: 1 Col: 18 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 29 Got table cell end tag (td) while required end tags are missing.
      +Line: 1 Col: 30 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       <table>
      +|         <tbody>
      +|           <tr>
      +|             <td>
      +|               <i>
      +|       "X"
      +
      +#data
      +<h1>Hello<h2>World
      +#errors
      +4: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”.
      +13: Heading cannot be a child of another heading.
      +18: End of file seen and there were open elements.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <h1>
      +|       "Hello"
      +|     <h2>
      +|       "World"
      +
      +#data
      +<a><p>X<a>Y</a>Z</p></a>
      +#errors
      +Line: 1 Col: 3 Unexpected start tag (a). Expected DOCTYPE.
      +Line: 1 Col: 10 Unexpected start tag (a) implies end tag (a).
      +Line: 1 Col: 10 End tag (a) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 24 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|     <p>
      +|       <a>
      +|         "X"
      +|       <a>
      +|         "Y"
      +|       "Z"
      +
      +#data
      +<b><button>foo</b>bar
      +#errors
      +Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
      +Line: 1 Col: 15 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|     <button>
      +|       <b>
      +|         "foo"
      +|       "bar"
      +
      +#data
      +<!DOCTYPE html><span><button>foo</span>bar
      +#errors
      +39: End tag “span” seen but there were unclosed elements.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <span>
      +|       <button>
      +|         "foobar"
      +
      +#data
      +<p><b><div><marquee></p></b></div>X
      +#errors
      +Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE.
      +Line: 1 Col: 11 Unexpected end tag (p). Ignored.
      +Line: 1 Col: 24 Unexpected end tag (p). Ignored.
      +Line: 1 Col: 28 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 34 End tag (div) seen too early. Expected other end tag.
      +Line: 1 Col: 35 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <b>
      +|     <div>
      +|       <b>
      +|         <marquee>
      +|           <p>
      +|           "X"
      +
      +#data
      +<script><div></script></div><title><p></title><p><p>
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 28 Unexpected end tag (div). Ignored.
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<div>"
      +|     <title>
      +|       "<p>"
      +|   <body>
      +|     <p>
      +|     <p>
      +
      +#data
      +<!--><div>--<!-->
      +#errors
      +Line: 1 Col: 5 Incorrect comment.
      +Line: 1 Col: 10 Unexpected start tag (div). Expected DOCTYPE.
      +Line: 1 Col: 17 Incorrect comment.
      +Line: 1 Col: 17 Expected closing tag. Unexpected end of file.
      +#document
      +| <!--  -->
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       "--"
      +|       <!--  -->
      +
      +#data
      +<p><hr></p>
      +#errors
      +Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE.
      +Line: 1 Col: 11 Unexpected end tag (p). Ignored.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|     <hr>
      +|     <p>
      +
      +#data
      +<select><b><option><select><option></b></select>X
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (select). Expected DOCTYPE.
      +Line: 1 Col: 11 Unexpected start tag token (b) in the select phase. Ignored.
      +Line: 1 Col: 27 Unexpected select start tag in the select phase treated as select end tag.
      +Line: 1 Col: 39 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 48 Unexpected end tag (select). Ignored.
      +Line: 1 Col: 49 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|       <option>
      +|     <option>
      +|       "X"
      +
      +#data
      +<a><table><td><a><table></table><a></tr><a></table><b>X</b>C<a>Y
      +#errors
      +Line: 1 Col: 3 Unexpected start tag (a). Expected DOCTYPE.
      +Line: 1 Col: 14 Unexpected table cell start tag (td) in the table body phase.
      +Line: 1 Col: 35 Unexpected start tag (a) implies end tag (a).
      +Line: 1 Col: 40 Got table cell end tag (td) while required end tags are missing.
      +Line: 1 Col: 43 Unexpected start tag (a) in table context caused voodoo mode.
      +Line: 1 Col: 43 Unexpected start tag (a) implies end tag (a).
      +Line: 1 Col: 43 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 51 Unexpected implied end tag (a) in the table phase.
      +Line: 1 Col: 63 Unexpected start tag (a) implies end tag (a).
      +Line: 1 Col: 64 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       <a>
      +|       <table>
      +|         <tbody>
      +|           <tr>
      +|             <td>
      +|               <a>
      +|                 <table>
      +|               <a>
      +|     <a>
      +|       <b>
      +|         "X"
      +|       "C"
      +|     <a>
      +|       "Y"
      +
      +#data
      +<a X>0<b>1<a Y>2
      +#errors
      +Line: 1 Col: 5 Unexpected start tag (a). Expected DOCTYPE.
      +Line: 1 Col: 15 Unexpected start tag (a) implies end tag (a).
      +Line: 1 Col: 15 End tag (a) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 16 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       x=""
      +|       "0"
      +|       <b>
      +|         "1"
      +|     <b>
      +|       <a>
      +|         y=""
      +|         "2"
      +
      +#data
      +<!-----><font><div>hello<table>excite!<b>me!<th><i>please!</tr><!--X-->
      +#errors
      +Line: 1 Col: 7 Unexpected '-' after '--' found in comment.
      +Line: 1 Col: 14 Unexpected start tag (font). Expected DOCTYPE.
      +Line: 1 Col: 38 Unexpected non-space characters in table context caused voodoo mode.
      +Line: 1 Col: 41 Unexpected start tag (b) in table context caused voodoo mode.
      +Line: 1 Col: 48 Unexpected implied end tag (b) in the table phase.
      +Line: 1 Col: 48 Unexpected table cell start tag (th) in the table body phase.
      +Line: 1 Col: 63 Got table cell end tag (th) while required end tags are missing.
      +Line: 1 Col: 71 Unexpected end of file. Expected table content.
      +#document
      +| <!-- - -->
      +| <html>
      +|   <head>
      +|   <body>
      +|     <font>
      +|       <div>
      +|         "helloexcite!"
      +|         <b>
      +|           "me!"
      +|         <table>
      +|           <tbody>
      +|             <tr>
      +|               <th>
      +|                 <i>
      +|                   "please!"
      +|             <!-- X -->
      +
      +#data
      +<!DOCTYPE html><li>hello<li>world<ul>how<li>do</ul>you</body><!--do-->
      +#errors
      +Line: 1 Col: 61 Unexpected end tag (li). Missing end tag (body).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <li>
      +|       "hello"
      +|     <li>
      +|       "world"
      +|       <ul>
      +|         "how"
      +|         <li>
      +|           "do"
      +|       "you"
      +|   <!-- do -->
      +
      +#data
      +<!DOCTYPE html>A<option>B<optgroup>C<select>D</option>E
      +#errors
      +Line: 1 Col: 54 Unexpected end tag (option) in the select phase. Ignored.
      +Line: 1 Col: 55 Expected closing tag. Unexpected end of file.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "A"
      +|     <option>
      +|       "B"
      +|     <optgroup>
      +|       "C"
      +|       <select>
      +|         "DE"
      +
      +#data
      +<
      +#errors
      +Line: 1 Col: 1 Expected tag name. Got something else instead
      +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "<"
      +
      +#data
      +<#
      +#errors
      +Line: 1 Col: 1 Expected tag name. Got something else instead
      +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "<#"
      +
      +#data
      +</
      +#errors
      +Line: 1 Col: 2 Expected closing tag. Unexpected end of file.
      +Line: 1 Col: 2 Unexpected non-space characters. Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "</"
      +
      +#data
      +</#
      +#errors
      +Line: 1 Col: 2 Expected closing tag. Unexpected character '#' found.
      +Line: 1 Col: 3 Unexpected End of file. Expected DOCTYPE.
      +#document
      +| <!-- # -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<?
      +#errors
      +Line: 1 Col: 1 Expected tag name. Got '?' instead. (HTML doesn't support processing instructions.)
      +Line: 1 Col: 2 Unexpected End of file. Expected DOCTYPE.
      +#document
      +| <!-- ? -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<?#
      +#errors
      +Line: 1 Col: 1 Expected tag name. Got '?' instead. (HTML doesn't support processing instructions.)
      +Line: 1 Col: 3 Unexpected End of file. Expected DOCTYPE.
      +#document
      +| <!-- ?# -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!
      +#errors
      +Line: 1 Col: 2 Expected '--' or 'DOCTYPE'. Not found.
      +Line: 1 Col: 2 Unexpected End of file. Expected DOCTYPE.
      +#document
      +| <!--  -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!#
      +#errors
      +Line: 1 Col: 3 Expected '--' or 'DOCTYPE'. Not found.
      +Line: 1 Col: 3 Unexpected End of file. Expected DOCTYPE.
      +#document
      +| <!-- # -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<?COMMENT?>
      +#errors
      +Line: 1 Col: 1 Expected tag name. Got '?' instead. (HTML doesn't support processing instructions.)
      +Line: 1 Col: 11 Unexpected End of file. Expected DOCTYPE.
      +#document
      +| <!-- ?COMMENT? -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!COMMENT>
      +#errors
      +Line: 1 Col: 2 Expected '--' or 'DOCTYPE'. Not found.
      +Line: 1 Col: 10 Unexpected End of file. Expected DOCTYPE.
      +#document
      +| <!-- COMMENT -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +</ COMMENT >
      +#errors
      +Line: 1 Col: 2 Expected closing tag. Unexpected character ' ' found.
      +Line: 1 Col: 12 Unexpected End of file. Expected DOCTYPE.
      +#document
      +| <!--  COMMENT  -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<?COM--MENT?>
      +#errors
      +Line: 1 Col: 1 Expected tag name. Got '?' instead. (HTML doesn't support processing instructions.)
      +Line: 1 Col: 13 Unexpected End of file. Expected DOCTYPE.
      +#document
      +| <!-- ?COM--MENT? -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!COM--MENT>
      +#errors
      +Line: 1 Col: 2 Expected '--' or 'DOCTYPE'. Not found.
      +Line: 1 Col: 12 Unexpected End of file. Expected DOCTYPE.
      +#document
      +| <!-- COM--MENT -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +</ COM--MENT >
      +#errors
      +Line: 1 Col: 2 Expected closing tag. Unexpected character ' ' found.
      +Line: 1 Col: 14 Unexpected End of file. Expected DOCTYPE.
      +#document
      +| <!--  COM--MENT  -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE html><style> EOF
      +#errors
      +Line: 1 Col: 26 Unexpected end of file. Expected end tag (style).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <style>
      +|       " EOF"
      +|   <body>
      +
      +#data
      +<!DOCTYPE html><script> <!-- </script> --> </script> EOF
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       " <!-- "
      +|     " "
      +|   <body>
      +|     "-->  EOF"
      +
      +#data
      +<b><p></b>TEST
      +#errors
      +Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
      +Line: 1 Col: 10 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|     <p>
      +|       <b>
      +|       "TEST"
      +
      +#data
      +<p id=a><b><p id=b></b>TEST
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (p). Expected DOCTYPE.
      +Line: 1 Col: 19 Unexpected end tag (p). Ignored.
      +Line: 1 Col: 23 End tag (b) violates step 1, paragraph 2 of the adoption agency algorithm.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       id="a"
      +|       <b>
      +|     <p>
      +|       id="b"
      +|       "TEST"
      +
      +#data
      +<b id=a><p><b id=b></p></b>TEST
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (b). Expected DOCTYPE.
      +Line: 1 Col: 23 Unexpected end tag (p). Ignored.
      +Line: 1 Col: 27 End tag (b) violates step 1, paragraph 2 of the adoption agency algorithm.
      +Line: 1 Col: 31 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       id="a"
      +|       <p>
      +|         <b>
      +|           id="b"
      +|       "TEST"
      +
      +#data
      +<!DOCTYPE html><title>U-test</title><body><div><p>Test<u></p></div></body>
      +#errors
      +Line: 1 Col: 61 Unexpected end tag (p). Ignored.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <title>
      +|       "U-test"
      +|   <body>
      +|     <div>
      +|       <p>
      +|         "Test"
      +|         <u>
      +
      +#data
      +<!DOCTYPE html><font><table></font></table></font>
      +#errors
      +Line: 1 Col: 35 Unexpected end tag (font) in table context caused voodoo mode.
      +Line: 1 Col: 35 End tag (font) violates step 1, paragraph 1 of the adoption agency algorithm.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <font>
      +|       <table>
      +
      +#data
      +<font><p>hello<b>cruel</font>world
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (font). Expected DOCTYPE.
      +Line: 1 Col: 29 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 29 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 34 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <font>
      +|     <p>
      +|       <font>
      +|         "hello"
      +|         <b>
      +|           "cruel"
      +|       <b>
      +|         "world"
      +
      +#data
      +<b>Test</i>Test
      +#errors
      +Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
      +Line: 1 Col: 11 End tag (i) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 15 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       "TestTest"
      +
      +#data
      +<b>A<cite>B<div>C
      +#errors
      +Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
      +Line: 1 Col: 17 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       "A"
      +|       <cite>
      +|         "B"
      +|         <div>
      +|           "C"
      +
      +#data
      +<b>A<cite>B<div>C</cite>D
      +#errors
      +Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
      +Line: 1 Col: 24 Unexpected end tag (cite). Ignored.
      +Line: 1 Col: 25 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       "A"
      +|       <cite>
      +|         "B"
      +|         <div>
      +|           "CD"
      +
      +#data
      +<b>A<cite>B<div>C</b>D
      +#errors
      +Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
      +Line: 1 Col: 21 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 22 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       "A"
      +|       <cite>
      +|         "B"
      +|     <div>
      +|       <b>
      +|         "C"
      +|       "D"
      +
      +#data
      +
      +#errors
      +Line: 1 Col: 0 Unexpected End of file. Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<DIV>
      +#errors
      +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
      +Line: 1 Col: 5 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +
      +#data
      +<DIV> abc
      +#errors
      +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
      +Line: 1 Col: 9 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       " abc"
      +
      +#data
      +<DIV> abc <B>
      +#errors
      +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
      +Line: 1 Col: 13 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       " abc "
      +|       <b>
      +
      +#data
      +<DIV> abc <B> def
      +#errors
      +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
      +Line: 1 Col: 17 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       " abc "
      +|       <b>
      +|         " def"
      +
      +#data
      +<DIV> abc <B> def <I>
      +#errors
      +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
      +Line: 1 Col: 21 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       " abc "
      +|       <b>
      +|         " def "
      +|         <i>
      +
      +#data
      +<DIV> abc <B> def <I> ghi
      +#errors
      +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
      +Line: 1 Col: 25 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       " abc "
      +|       <b>
      +|         " def "
      +|         <i>
      +|           " ghi"
      +
      +#data
      +<DIV> abc <B> def <I> ghi <P>
      +#errors
      +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
      +Line: 1 Col: 29 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       " abc "
      +|       <b>
      +|         " def "
      +|         <i>
      +|           " ghi "
      +|           <p>
      +
      +#data
      +<DIV> abc <B> def <I> ghi <P> jkl
      +#errors
      +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
      +Line: 1 Col: 33 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       " abc "
      +|       <b>
      +|         " def "
      +|         <i>
      +|           " ghi "
      +|           <p>
      +|             " jkl"
      +
      +#data
      +<DIV> abc <B> def <I> ghi <P> jkl </B>
      +#errors
      +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
      +Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 38 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       " abc "
      +|       <b>
      +|         " def "
      +|         <i>
      +|           " ghi "
      +|       <i>
      +|         <p>
      +|           <b>
      +|             " jkl "
      +
      +#data
      +<DIV> abc <B> def <I> ghi <P> jkl </B> mno
      +#errors
      +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
      +Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 42 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       " abc "
      +|       <b>
      +|         " def "
      +|         <i>
      +|           " ghi "
      +|       <i>
      +|         <p>
      +|           <b>
      +|             " jkl "
      +|           " mno"
      +
      +#data
      +<DIV> abc <B> def <I> ghi <P> jkl </B> mno </I>
      +#errors
      +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
      +Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 47 End tag (i) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 47 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       " abc "
      +|       <b>
      +|         " def "
      +|         <i>
      +|           " ghi "
      +|       <i>
      +|       <p>
      +|         <i>
      +|           <b>
      +|             " jkl "
      +|           " mno "
      +
      +#data
      +<DIV> abc <B> def <I> ghi <P> jkl </B> mno </I> pqr
      +#errors
      +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
      +Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 47 End tag (i) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 51 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       " abc "
      +|       <b>
      +|         " def "
      +|         <i>
      +|           " ghi "
      +|       <i>
      +|       <p>
      +|         <i>
      +|           <b>
      +|             " jkl "
      +|           " mno "
      +|         " pqr"
      +
      +#data
      +<DIV> abc <B> def <I> ghi <P> jkl </B> mno </I> pqr </P>
      +#errors
      +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
      +Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 47 End tag (i) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 56 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       " abc "
      +|       <b>
      +|         " def "
      +|         <i>
      +|           " ghi "
      +|       <i>
      +|       <p>
      +|         <i>
      +|           <b>
      +|             " jkl "
      +|           " mno "
      +|         " pqr "
      +
      +#data
      +<DIV> abc <B> def <I> ghi <P> jkl </B> mno </I> pqr </P> stu
      +#errors
      +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
      +Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 47 End tag (i) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 60 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       " abc "
      +|       <b>
      +|         " def "
      +|         <i>
      +|           " ghi "
      +|       <i>
      +|       <p>
      +|         <i>
      +|           <b>
      +|             " jkl "
      +|           " mno "
      +|         " pqr "
      +|       " stu"
      +
      +#data
      +<test attribute---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------->
      +#errors
      +Line: 1 Col: 1040 Unexpected start tag (test). Expected DOCTYPE.
      +Line: 1 Col: 1040 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <test>
      +|       attribute----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------=""
      +
      +#data
      +<a href="blah">aba<table><a href="foo">br<tr><td></td></tr>x</table>aoe
      +#errors
      +Line: 1 Col: 15 Unexpected start tag (a). Expected DOCTYPE.
      +Line: 1 Col: 39 Unexpected start tag (a) in table context caused voodoo mode.
      +Line: 1 Col: 39 Unexpected start tag (a) implies end tag (a).
      +Line: 1 Col: 39 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 45 Unexpected implied end tag (a) in the table phase.
      +Line: 1 Col: 68 Unexpected implied end tag (a) in the table phase.
      +Line: 1 Col: 71 Expected closing tag. Unexpected end of file.
      +
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       href="blah"
      +|       "aba"
      +|       <a>
      +|         href="foo"
      +|         "br"
      +|       <a>
      +|         href="foo"
      +|         "x"
      +|       <table>
      +|         <tbody>
      +|           <tr>
      +|             <td>
      +|     <a>
      +|       href="foo"
      +|       "aoe"
      +
      +#data
      +<a href="blah">aba<table><tr><td><a href="foo">br</td></tr>x</table>aoe
      +#errors
      +Line: 1 Col: 15 Unexpected start tag (a). Expected DOCTYPE.
      +Line: 1 Col: 54 Got table cell end tag (td) while required end tags are missing.
      +Line: 1 Col: 60 Unexpected non-space characters in table context caused voodoo mode.
      +Line: 1 Col: 71 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       href="blah"
      +|       "abax"
      +|       <table>
      +|         <tbody>
      +|           <tr>
      +|             <td>
      +|               <a>
      +|                 href="foo"
      +|                 "br"
      +|       "aoe"
      +
      +#data
      +<table><a href="blah">aba<tr><td><a href="foo">br</td></tr>x</table>aoe
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
      +Line: 1 Col: 22 Unexpected start tag (a) in table context caused voodoo mode.
      +Line: 1 Col: 29 Unexpected implied end tag (a) in the table phase.
      +Line: 1 Col: 54 Got table cell end tag (td) while required end tags are missing.
      +Line: 1 Col: 68 Unexpected implied end tag (a) in the table phase.
      +Line: 1 Col: 71 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       href="blah"
      +|       "aba"
      +|     <a>
      +|       href="blah"
      +|       "x"
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             <a>
      +|               href="foo"
      +|               "br"
      +|     <a>
      +|       href="blah"
      +|       "aoe"
      +
      +#data
      +<a href=a>aa<marquee>aa<a href=b>bb</marquee>aa
      +#errors
      +Line: 1 Col: 10 Unexpected start tag (a). Expected DOCTYPE.
      +Line: 1 Col: 45 End tag (marquee) seen too early. Expected other end tag.
      +Line: 1 Col: 47 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       href="a"
      +|       "aa"
      +|       <marquee>
      +|         "aa"
      +|         <a>
      +|           href="b"
      +|           "bb"
      +|       "aa"
      +
      +#data
      +<wbr><strike><code></strike><code><strike></code>
      +#errors
      +Line: 1 Col: 5 Unexpected start tag (wbr). Expected DOCTYPE.
      +Line: 1 Col: 28 End tag (strike) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 49 Unexpected end tag (code). Ignored.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <wbr>
      +|     <strike>
      +|       <code>
      +|     <code>
      +|       <code>
      +|         <strike>
      +
      +#data
      +<!DOCTYPE html><spacer>foo
      +#errors
      +26: End of file seen and there were open elements.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <spacer>
      +|       "foo"
      +
      +#data
      +<title><meta></title><link><title><meta></title>
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <title>
      +|       "<meta>"
      +|     <link>
      +|     <title>
      +|       "<meta>"
      +|   <body>
      +
      +#data
      +<style><!--</style><meta><script>--><link></script>
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
      +Line: 1 Col: 51 Unexpected end of file. Expected end tag (style).
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|       "<!--"
      +|     <meta>
      +|     <script>
      +|       "--><link>"
      +|   <body>
      +
      +#data
      +<head><meta></head><link>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
      +Line: 1 Col: 25 Unexpected start tag (link) that can be in head. Moved.
      +#document
      +| <html>
      +|   <head>
      +|     <meta>
      +|     <link>
      +|   <body>
      +
      +#data
      +<table><tr><tr><td><td><span><th><span>X</table>
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
      +Line: 1 Col: 33 Got table cell end tag (td) while required end tags are missing.
      +Line: 1 Col: 48 Got table cell end tag (th) while required end tags are missing.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|         <tr>
      +|           <td>
      +|           <td>
      +|             <span>
      +|           <th>
      +|             <span>
      +|               "X"
      +
      +#data
      +<body><body><base><link><meta><title><p></title><body><p></body>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (body). Expected DOCTYPE.
      +Line: 1 Col: 12 Unexpected start tag (body).
      +Line: 1 Col: 54 Unexpected start tag (body).
      +Line: 1 Col: 64 Unexpected end tag (p). Missing end tag (body).
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <base>
      +|     <link>
      +|     <meta>
      +|     <title>
      +|       "<p>"
      +|     <p>
      +
      +#data
      +<textarea><p></textarea>
      +#errors
      +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <textarea>
      +|       "<p>"
      +
      +#data
      +<p></isindex></noembed></noframes></noscript></optgroup></option></plaintext></textarea>
      +#errors
      +Line: 1 Col: 9 Unexpected end tag (strong). Expected DOCTYPE.
      +Line: 1 Col: 9 Unexpected end tag (strong) after the (implied) root element.
      +Line: 1 Col: 13 Unexpected end tag (b) after the (implied) root element.
      +Line: 1 Col: 18 Unexpected end tag (em) after the (implied) root element.
      +Line: 1 Col: 22 Unexpected end tag (i) after the (implied) root element.
      +Line: 1 Col: 26 Unexpected end tag (u) after the (implied) root element.
      +Line: 1 Col: 35 Unexpected end tag (strike) after the (implied) root element.
      +Line: 1 Col: 39 Unexpected end tag (s) after the (implied) root element.
      +Line: 1 Col: 47 Unexpected end tag (blink) after the (implied) root element.
      +Line: 1 Col: 52 Unexpected end tag (tt) after the (implied) root element.
      +Line: 1 Col: 58 Unexpected end tag (pre) after the (implied) root element.
      +Line: 1 Col: 64 Unexpected end tag (big) after the (implied) root element.
      +Line: 1 Col: 72 Unexpected end tag (small) after the (implied) root element.
      +Line: 1 Col: 79 Unexpected end tag (font) after the (implied) root element.
      +Line: 1 Col: 88 Unexpected end tag (select) after the (implied) root element.
      +Line: 1 Col: 93 Unexpected end tag (h1) after the (implied) root element.
      +Line: 1 Col: 98 Unexpected end tag (h2) after the (implied) root element.
      +Line: 1 Col: 103 Unexpected end tag (h3) after the (implied) root element.
      +Line: 1 Col: 108 Unexpected end tag (h4) after the (implied) root element.
      +Line: 1 Col: 113 Unexpected end tag (h5) after the (implied) root element.
      +Line: 1 Col: 118 Unexpected end tag (h6) after the (implied) root element.
      +Line: 1 Col: 125 Unexpected end tag (body) after the (implied) root element.
      +Line: 1 Col: 130 Unexpected end tag (br). Treated as br element.
      +Line: 1 Col: 134 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 140 This element (img) has no end tag.
      +Line: 1 Col: 148 Unexpected end tag (title). Ignored.
      +Line: 1 Col: 155 Unexpected end tag (span). Ignored.
      +Line: 1 Col: 163 Unexpected end tag (style). Ignored.
      +Line: 1 Col: 172 Unexpected end tag (script). Ignored.
      +Line: 1 Col: 180 Unexpected end tag (table). Ignored.
      +Line: 1 Col: 185 Unexpected end tag (th). Ignored.
      +Line: 1 Col: 190 Unexpected end tag (td). Ignored.
      +Line: 1 Col: 195 Unexpected end tag (tr). Ignored.
      +Line: 1 Col: 203 This element (frame) has no end tag.
      +Line: 1 Col: 210 This element (area) has no end tag.
      +Line: 1 Col: 217 Unexpected end tag (link). Ignored.
      +Line: 1 Col: 225 This element (param) has no end tag.
      +Line: 1 Col: 230 This element (hr) has no end tag.
      +Line: 1 Col: 238 This element (input) has no end tag.
      +Line: 1 Col: 244 Unexpected end tag (col). Ignored.
      +Line: 1 Col: 251 Unexpected end tag (base). Ignored.
      +Line: 1 Col: 258 Unexpected end tag (meta). Ignored.
      +Line: 1 Col: 269 This element (basefont) has no end tag.
      +Line: 1 Col: 279 This element (bgsound) has no end tag.
      +Line: 1 Col: 287 This element (embed) has no end tag.
      +Line: 1 Col: 296 This element (spacer) has no end tag.
      +Line: 1 Col: 300 Unexpected end tag (p). Ignored.
      +Line: 1 Col: 305 End tag (dd) seen too early. Expected other end tag.
      +Line: 1 Col: 310 End tag (dt) seen too early. Expected other end tag.
      +Line: 1 Col: 320 Unexpected end tag (caption). Ignored.
      +Line: 1 Col: 331 Unexpected end tag (colgroup). Ignored.
      +Line: 1 Col: 339 Unexpected end tag (tbody). Ignored.
      +Line: 1 Col: 347 Unexpected end tag (tfoot). Ignored.
      +Line: 1 Col: 355 Unexpected end tag (thead). Ignored.
      +Line: 1 Col: 365 End tag (address) seen too early. Expected other end tag.
      +Line: 1 Col: 378 End tag (blockquote) seen too early. Expected other end tag.
      +Line: 1 Col: 387 End tag (center) seen too early. Expected other end tag.
      +Line: 1 Col: 393 Unexpected end tag (dir). Ignored.
      +Line: 1 Col: 399 End tag (div) seen too early. Expected other end tag.
      +Line: 1 Col: 404 End tag (dl) seen too early. Expected other end tag.
      +Line: 1 Col: 415 End tag (fieldset) seen too early. Expected other end tag.
      +Line: 1 Col: 425 End tag (listing) seen too early. Expected other end tag.
      +Line: 1 Col: 432 End tag (menu) seen too early. Expected other end tag.
      +Line: 1 Col: 437 End tag (ol) seen too early. Expected other end tag.
      +Line: 1 Col: 442 End tag (ul) seen too early. Expected other end tag.
      +Line: 1 Col: 447 End tag (li) seen too early. Expected other end tag.
      +Line: 1 Col: 454 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 460 This element (wbr) has no end tag.
      +Line: 1 Col: 476 End tag (button) seen too early. Expected other end tag.
      +Line: 1 Col: 486 End tag (marquee) seen too early. Expected other end tag.
      +Line: 1 Col: 495 End tag (object) seen too early. Expected other end tag.
      +Line: 1 Col: 513 Unexpected end tag (html). Ignored.
      +Line: 1 Col: 513 Unexpected end tag (frameset). Ignored.
      +Line: 1 Col: 520 Unexpected end tag (head). Ignored.
      +Line: 1 Col: 529 Unexpected end tag (iframe). Ignored.
      +Line: 1 Col: 537 This element (image) has no end tag.
      +Line: 1 Col: 547 This element (isindex) has no end tag.
      +Line: 1 Col: 557 Unexpected end tag (noembed). Ignored.
      +Line: 1 Col: 568 Unexpected end tag (noframes). Ignored.
      +Line: 1 Col: 579 Unexpected end tag (noscript). Ignored.
      +Line: 1 Col: 590 Unexpected end tag (optgroup). Ignored.
      +Line: 1 Col: 599 Unexpected end tag (option). Ignored.
      +Line: 1 Col: 611 Unexpected end tag (plaintext). Ignored.
      +Line: 1 Col: 622 Unexpected end tag (textarea). Ignored.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <br>
      +|     <p>
      +
      +#data
      +<table><tr></strong></b></em></i></u></strike></s></blink></tt></pre></big></small></font></select></h1></h2></h3></h4></h5></h6></body></br></a></img></title></span></style></script></table></th></td></tr></frame></area></link></param></hr></input></col></base></meta></basefont></bgsound></embed></spacer></p></dd></dt></caption></colgroup></tbody></tfoot></thead></address></blockquote></center></dir></div></dl></fieldset></listing></menu></ol></ul></li></nobr></wbr></form></button></marquee></object></html></frameset></head></iframe></image></isindex></noembed></noframes></noscript></optgroup></option></plaintext></textarea>
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
      +Line: 1 Col: 20 Unexpected end tag (strong) in table context caused voodoo mode.
      +Line: 1 Col: 20 End tag (strong) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 24 Unexpected end tag (b) in table context caused voodoo mode.
      +Line: 1 Col: 24 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 29 Unexpected end tag (em) in table context caused voodoo mode.
      +Line: 1 Col: 29 End tag (em) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 33 Unexpected end tag (i) in table context caused voodoo mode.
      +Line: 1 Col: 33 End tag (i) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 37 Unexpected end tag (u) in table context caused voodoo mode.
      +Line: 1 Col: 37 End tag (u) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 46 Unexpected end tag (strike) in table context caused voodoo mode.
      +Line: 1 Col: 46 End tag (strike) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 50 Unexpected end tag (s) in table context caused voodoo mode.
      +Line: 1 Col: 50 End tag (s) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 58 Unexpected end tag (blink) in table context caused voodoo mode.
      +Line: 1 Col: 58 Unexpected end tag (blink). Ignored.
      +Line: 1 Col: 63 Unexpected end tag (tt) in table context caused voodoo mode.
      +Line: 1 Col: 63 End tag (tt) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 69 Unexpected end tag (pre) in table context caused voodoo mode.
      +Line: 1 Col: 69 End tag (pre) seen too early. Expected other end tag.
      +Line: 1 Col: 75 Unexpected end tag (big) in table context caused voodoo mode.
      +Line: 1 Col: 75 End tag (big) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 83 Unexpected end tag (small) in table context caused voodoo mode.
      +Line: 1 Col: 83 End tag (small) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 90 Unexpected end tag (font) in table context caused voodoo mode.
      +Line: 1 Col: 90 End tag (font) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 99 Unexpected end tag (select) in table context caused voodoo mode.
      +Line: 1 Col: 99 Unexpected end tag (select). Ignored.
      +Line: 1 Col: 104 Unexpected end tag (h1) in table context caused voodoo mode.
      +Line: 1 Col: 104 End tag (h1) seen too early. Expected other end tag.
      +Line: 1 Col: 109 Unexpected end tag (h2) in table context caused voodoo mode.
      +Line: 1 Col: 109 End tag (h2) seen too early. Expected other end tag.
      +Line: 1 Col: 114 Unexpected end tag (h3) in table context caused voodoo mode.
      +Line: 1 Col: 114 End tag (h3) seen too early. Expected other end tag.
      +Line: 1 Col: 119 Unexpected end tag (h4) in table context caused voodoo mode.
      +Line: 1 Col: 119 End tag (h4) seen too early. Expected other end tag.
      +Line: 1 Col: 124 Unexpected end tag (h5) in table context caused voodoo mode.
      +Line: 1 Col: 124 End tag (h5) seen too early. Expected other end tag.
      +Line: 1 Col: 129 Unexpected end tag (h6) in table context caused voodoo mode.
      +Line: 1 Col: 129 End tag (h6) seen too early. Expected other end tag.
      +Line: 1 Col: 136 Unexpected end tag (body) in the table row phase. Ignored.
      +Line: 1 Col: 141 Unexpected end tag (br) in table context caused voodoo mode.
      +Line: 1 Col: 141 Unexpected end tag (br). Treated as br element.
      +Line: 1 Col: 145 Unexpected end tag (a) in table context caused voodoo mode.
      +Line: 1 Col: 145 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 151 Unexpected end tag (img) in table context caused voodoo mode.
      +Line: 1 Col: 151 This element (img) has no end tag.
      +Line: 1 Col: 159 Unexpected end tag (title) in table context caused voodoo mode.
      +Line: 1 Col: 159 Unexpected end tag (title). Ignored.
      +Line: 1 Col: 166 Unexpected end tag (span) in table context caused voodoo mode.
      +Line: 1 Col: 166 Unexpected end tag (span). Ignored.
      +Line: 1 Col: 174 Unexpected end tag (style) in table context caused voodoo mode.
      +Line: 1 Col: 174 Unexpected end tag (style). Ignored.
      +Line: 1 Col: 183 Unexpected end tag (script) in table context caused voodoo mode.
      +Line: 1 Col: 183 Unexpected end tag (script). Ignored.
      +Line: 1 Col: 196 Unexpected end tag (th). Ignored.
      +Line: 1 Col: 201 Unexpected end tag (td). Ignored.
      +Line: 1 Col: 206 Unexpected end tag (tr). Ignored.
      +Line: 1 Col: 214 This element (frame) has no end tag.
      +Line: 1 Col: 221 This element (area) has no end tag.
      +Line: 1 Col: 228 Unexpected end tag (link). Ignored.
      +Line: 1 Col: 236 This element (param) has no end tag.
      +Line: 1 Col: 241 This element (hr) has no end tag.
      +Line: 1 Col: 249 This element (input) has no end tag.
      +Line: 1 Col: 255 Unexpected end tag (col). Ignored.
      +Line: 1 Col: 262 Unexpected end tag (base). Ignored.
      +Line: 1 Col: 269 Unexpected end tag (meta). Ignored.
      +Line: 1 Col: 280 This element (basefont) has no end tag.
      +Line: 1 Col: 290 This element (bgsound) has no end tag.
      +Line: 1 Col: 298 This element (embed) has no end tag.
      +Line: 1 Col: 307 This element (spacer) has no end tag.
      +Line: 1 Col: 311 Unexpected end tag (p). Ignored.
      +Line: 1 Col: 316 End tag (dd) seen too early. Expected other end tag.
      +Line: 1 Col: 321 End tag (dt) seen too early. Expected other end tag.
      +Line: 1 Col: 331 Unexpected end tag (caption). Ignored.
      +Line: 1 Col: 342 Unexpected end tag (colgroup). Ignored.
      +Line: 1 Col: 350 Unexpected end tag (tbody). Ignored.
      +Line: 1 Col: 358 Unexpected end tag (tfoot). Ignored.
      +Line: 1 Col: 366 Unexpected end tag (thead). Ignored.
      +Line: 1 Col: 376 End tag (address) seen too early. Expected other end tag.
      +Line: 1 Col: 389 End tag (blockquote) seen too early. Expected other end tag.
      +Line: 1 Col: 398 End tag (center) seen too early. Expected other end tag.
      +Line: 1 Col: 404 Unexpected end tag (dir). Ignored.
      +Line: 1 Col: 410 End tag (div) seen too early. Expected other end tag.
      +Line: 1 Col: 415 End tag (dl) seen too early. Expected other end tag.
      +Line: 1 Col: 426 End tag (fieldset) seen too early. Expected other end tag.
      +Line: 1 Col: 436 End tag (listing) seen too early. Expected other end tag.
      +Line: 1 Col: 443 End tag (menu) seen too early. Expected other end tag.
      +Line: 1 Col: 448 End tag (ol) seen too early. Expected other end tag.
      +Line: 1 Col: 453 End tag (ul) seen too early. Expected other end tag.
      +Line: 1 Col: 458 End tag (li) seen too early. Expected other end tag.
      +Line: 1 Col: 465 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm.
      +Line: 1 Col: 471 This element (wbr) has no end tag.
      +Line: 1 Col: 487 End tag (button) seen too early. Expected other end tag.
      +Line: 1 Col: 497 End tag (marquee) seen too early. Expected other end tag.
      +Line: 1 Col: 506 End tag (object) seen too early. Expected other end tag.
      +Line: 1 Col: 524 Unexpected end tag (html). Ignored.
      +Line: 1 Col: 524 Unexpected end tag (frameset). Ignored.
      +Line: 1 Col: 531 Unexpected end tag (head). Ignored.
      +Line: 1 Col: 540 Unexpected end tag (iframe). Ignored.
      +Line: 1 Col: 548 This element (image) has no end tag.
      +Line: 1 Col: 558 This element (isindex) has no end tag.
      +Line: 1 Col: 568 Unexpected end tag (noembed). Ignored.
      +Line: 1 Col: 579 Unexpected end tag (noframes). Ignored.
      +Line: 1 Col: 590 Unexpected end tag (noscript). Ignored.
      +Line: 1 Col: 601 Unexpected end tag (optgroup). Ignored.
      +Line: 1 Col: 610 Unexpected end tag (option). Ignored.
      +Line: 1 Col: 622 Unexpected end tag (plaintext). Ignored.
      +Line: 1 Col: 633 Unexpected end tag (textarea). Ignored.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <br>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|     <p>
      +
      +#data
      +<frameset>
      +#errors
      +Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE.
      +Line: 1 Col: 10 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <frameset>
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat
      new file mode 100644
      index 00000000..4f8df86f
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat
      @@ -0,0 +1,799 @@
      +#data
      +<!DOCTYPE html><svg></svg>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +
      +#data
      +<!DOCTYPE html><svg></svg><![CDATA[a]]>
      +#errors
      +29: Bogus comment
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|     <!-- [CDATA[a]] -->
      +
      +#data
      +<!DOCTYPE html><body><svg></svg>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +
      +#data
      +<!DOCTYPE html><body><select><svg></svg></select>
      +#errors
      +35: Stray “svg” start tag.
      +42: Stray end tag “svg”
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +
      +#data
      +<!DOCTYPE html><body><select><option><svg></svg></option></select>
      +#errors
      +43: Stray “svg” start tag.
      +50: Stray end tag “svg”
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|       <option>
      +
      +#data
      +<!DOCTYPE html><body><table><svg></svg></table>
      +#errors
      +34: Start tag “svg” seen in “table”.
      +41: Stray end tag “svg”.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|     <table>
      +
      +#data
      +<!DOCTYPE html><body><table><svg><g>foo</g></svg></table>
      +#errors
      +34: Start tag “svg” seen in “table”.
      +46: Stray end tag “g”.
      +53: Stray end tag “svg”.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg g>
      +|         "foo"
      +|     <table>
      +
      +#data
      +<!DOCTYPE html><body><table><svg><g>foo</g><g>bar</g></svg></table>
      +#errors
      +34: Start tag “svg” seen in “table”.
      +46: Stray end tag “g”.
      +58: Stray end tag “g”.
      +65: Stray end tag “svg”.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg g>
      +|         "foo"
      +|       <svg g>
      +|         "bar"
      +|     <table>
      +
      +#data
      +<!DOCTYPE html><body><table><tbody><svg><g>foo</g><g>bar</g></svg></tbody></table>
      +#errors
      +41: Start tag “svg” seen in “table”.
      +53: Stray end tag “g”.
      +65: Stray end tag “g”.
      +72: Stray end tag “svg”.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg g>
      +|         "foo"
      +|       <svg g>
      +|         "bar"
      +|     <table>
      +|       <tbody>
      +
      +#data
      +<!DOCTYPE html><body><table><tbody><tr><svg><g>foo</g><g>bar</g></svg></tr></tbody></table>
      +#errors
      +45: Start tag “svg” seen in “table”.
      +57: Stray end tag “g”.
      +69: Stray end tag “g”.
      +76: Stray end tag “svg”.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg g>
      +|         "foo"
      +|       <svg g>
      +|         "bar"
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +
      +#data
      +<!DOCTYPE html><body><table><tbody><tr><td><svg><g>foo</g><g>bar</g></svg></td></tr></tbody></table>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             <svg svg>
      +|               <svg g>
      +|                 "foo"
      +|               <svg g>
      +|                 "bar"
      +
      +#data
      +<!DOCTYPE html><body><table><tbody><tr><td><svg><g>foo</g><g>bar</g></svg><p>baz</td></tr></tbody></table>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             <svg svg>
      +|               <svg g>
      +|                 "foo"
      +|               <svg g>
      +|                 "bar"
      +|             <p>
      +|               "baz"
      +
      +#data
      +<!DOCTYPE html><body><table><caption><svg><g>foo</g><g>bar</g></svg><p>baz</caption></table>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <caption>
      +|         <svg svg>
      +|           <svg g>
      +|             "foo"
      +|           <svg g>
      +|             "bar"
      +|         <p>
      +|           "baz"
      +
      +#data
      +<!DOCTYPE html><body><table><caption><svg><g>foo</g><g>bar</g><p>baz</table><p>quux
      +#errors
      +70: HTML start tag “p” in a foreign namespace context.
      +81: “table” closed but “caption” was still open.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <caption>
      +|         <svg svg>
      +|           <svg g>
      +|             "foo"
      +|           <svg g>
      +|             "bar"
      +|         <p>
      +|           "baz"
      +|     <p>
      +|       "quux"
      +
      +#data
      +<!DOCTYPE html><body><table><caption><svg><g>foo</g><g>bar</g>baz</table><p>quux
      +#errors
      +78: “table” closed but “caption” was still open.
      +78: Unclosed elements on stack.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <caption>
      +|         <svg svg>
      +|           <svg g>
      +|             "foo"
      +|           <svg g>
      +|             "bar"
      +|           "baz"
      +|     <p>
      +|       "quux"
      +
      +#data
      +<!DOCTYPE html><body><table><colgroup><svg><g>foo</g><g>bar</g><p>baz</table><p>quux
      +#errors
      +44: Start tag “svg” seen in “table”.
      +56: Stray end tag “g”.
      +68: Stray end tag “g”.
      +71: HTML start tag “p” in a foreign namespace context.
      +71: Start tag “p” seen in “table”.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg g>
      +|         "foo"
      +|       <svg g>
      +|         "bar"
      +|     <p>
      +|       "baz"
      +|     <table>
      +|       <colgroup>
      +|     <p>
      +|       "quux"
      +
      +#data
      +<!DOCTYPE html><body><table><tr><td><select><svg><g>foo</g><g>bar</g><p>baz</table><p>quux
      +#errors
      +50: Stray “svg” start tag.
      +54: Stray “g” start tag.
      +62: Stray end tag “g”
      +66: Stray “g” start tag.
      +74: Stray end tag “g”
      +77: Stray “p” start tag.
      +88: “table” end tag with “select” open.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             <select>
      +|               "foobarbaz"
      +|     <p>
      +|       "quux"
      +
      +#data
      +<!DOCTYPE html><body><table><select><svg><g>foo</g><g>bar</g><p>baz</table><p>quux
      +#errors
      +36: Start tag “select” seen in “table”.
      +42: Stray “svg” start tag.
      +46: Stray “g” start tag.
      +54: Stray end tag “g”
      +58: Stray “g” start tag.
      +66: Stray end tag “g”
      +69: Stray “p” start tag.
      +80: “table” end tag with “select” open.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|       "foobarbaz"
      +|     <table>
      +|     <p>
      +|       "quux"
      +
      +#data
      +<!DOCTYPE html><body></body></html><svg><g>foo</g><g>bar</g><p>baz
      +#errors
      +41: Stray “svg” start tag.
      +68: HTML start tag “p” in a foreign namespace context.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg g>
      +|         "foo"
      +|       <svg g>
      +|         "bar"
      +|     <p>
      +|       "baz"
      +
      +#data
      +<!DOCTYPE html><body></body><svg><g>foo</g><g>bar</g><p>baz
      +#errors
      +34: Stray “svg” start tag.
      +61: HTML start tag “p” in a foreign namespace context.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg g>
      +|         "foo"
      +|       <svg g>
      +|         "bar"
      +|     <p>
      +|       "baz"
      +
      +#data
      +<!DOCTYPE html><frameset><svg><g></g><g></g><p><span>
      +#errors
      +31: Stray “svg” start tag.
      +35: Stray “g” start tag.
      +40: Stray end tag “g”
      +44: Stray “g” start tag.
      +49: Stray end tag “g”
      +52: Stray “p” start tag.
      +58: Stray “span” start tag.
      +58: End of file seen and there were open elements.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +
      +#data
      +<!DOCTYPE html><frameset></frameset><svg><g></g><g></g><p><span>
      +#errors
      +42: Stray “svg” start tag.
      +46: Stray “g” start tag.
      +51: Stray end tag “g”
      +55: Stray “g” start tag.
      +60: Stray end tag “g”
      +63: Stray “p” start tag.
      +69: Stray “span” start tag.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +
      +#data
      +<!DOCTYPE html><body xlink:href=foo><svg xlink:href=foo></svg>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     xlink:href="foo"
      +|     <svg svg>
      +|       xlink href="foo"
      +
      +#data
      +<!DOCTYPE html><body xlink:href=foo xml:lang=en><svg><g xml:lang=en xlink:href=foo></g></svg>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     xlink:href="foo"
      +|     xml:lang="en"
      +|     <svg svg>
      +|       <svg g>
      +|         xlink href="foo"
      +|         xml lang="en"
      +
      +#data
      +<!DOCTYPE html><body xlink:href=foo xml:lang=en><svg><g xml:lang=en xlink:href=foo /></svg>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     xlink:href="foo"
      +|     xml:lang="en"
      +|     <svg svg>
      +|       <svg g>
      +|         xlink href="foo"
      +|         xml lang="en"
      +
      +#data
      +<!DOCTYPE html><body xlink:href=foo xml:lang=en><svg><g xml:lang=en xlink:href=foo />bar</svg>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     xlink:href="foo"
      +|     xml:lang="en"
      +|     <svg svg>
      +|       <svg g>
      +|         xlink href="foo"
      +|         xml lang="en"
      +|       "bar"
      +
      +#data
      +<svg></path>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +
      +#data
      +<div><svg></div>a
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       <svg svg>
      +|     "a"
      +
      +#data
      +<div><svg><path></div>a
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       <svg svg>
      +|         <svg path>
      +|     "a"
      +
      +#data
      +<div><svg><path></svg><path>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       <svg svg>
      +|         <svg path>
      +|       <path>
      +
      +#data
      +<div><svg><path><foreignObject><math></div>a
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       <svg svg>
      +|         <svg path>
      +|           <svg foreignObject>
      +|             <math math>
      +|               "a"
      +
      +#data
      +<div><svg><path><foreignObject><p></div>a
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       <svg svg>
      +|         <svg path>
      +|           <svg foreignObject>
      +|             <p>
      +|               "a"
      +
      +#data
      +<!DOCTYPE html><svg><desc><div><svg><ul>a
      +#errors
      +40: HTML start tag “ul” in a foreign namespace context.
      +41: End of file in a foreign namespace context.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg desc>
      +|         <div>
      +|           <svg svg>
      +|           <ul>
      +|             "a"
      +
      +#data
      +<!DOCTYPE html><svg><desc><svg><ul>a
      +#errors
      +35: HTML start tag “ul” in a foreign namespace context.
      +36: End of file in a foreign namespace context.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg desc>
      +|         <svg svg>
      +|         <ul>
      +|           "a"
      +
      +#data
      +<!DOCTYPE html><p><svg><desc><p>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <svg svg>
      +|         <svg desc>
      +|           <p>
      +
      +#data
      +<!DOCTYPE html><p><svg><title><p>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <svg svg>
      +|         <svg title>
      +|           <p>
      +
      +#data
      +<div><svg><path><foreignObject><p></foreignObject><p>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       <svg svg>
      +|         <svg path>
      +|           <svg foreignObject>
      +|             <p>
      +|             <p>
      +
      +#data
      +<math><mi><div><object><div><span></span></div></object></div></mi><mi>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math mi>
      +|         <div>
      +|           <object>
      +|             <div>
      +|               <span>
      +|       <math mi>
      +
      +#data
      +<math><mi><svg><foreignObject><div><div></div></div></foreignObject></svg></mi><mi>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math mi>
      +|         <svg svg>
      +|           <svg foreignObject>
      +|             <div>
      +|               <div>
      +|       <math mi>
      +
      +#data
      +<svg><script></script><path>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg script>
      +|       <svg path>
      +
      +#data
      +<table><svg></svg><tr>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +
      +#data
      +<math><mi><mglyph>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math mi>
      +|         <math mglyph>
      +
      +#data
      +<math><mi><malignmark>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math mi>
      +|         <math malignmark>
      +
      +#data
      +<math><mo><mglyph>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math mo>
      +|         <math mglyph>
      +
      +#data
      +<math><mo><malignmark>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math mo>
      +|         <math malignmark>
      +
      +#data
      +<math><mn><mglyph>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math mn>
      +|         <math mglyph>
      +
      +#data
      +<math><mn><malignmark>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math mn>
      +|         <math malignmark>
      +
      +#data
      +<math><ms><mglyph>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math ms>
      +|         <math mglyph>
      +
      +#data
      +<math><ms><malignmark>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math ms>
      +|         <math malignmark>
      +
      +#data
      +<math><mtext><mglyph>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math mtext>
      +|         <math mglyph>
      +
      +#data
      +<math><mtext><malignmark>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math mtext>
      +|         <math malignmark>
      +
      +#data
      +<math><annotation-xml><svg></svg></annotation-xml><mi>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math annotation-xml>
      +|         <svg svg>
      +|       <math mi>
      +
      +#data
      +<math><annotation-xml><svg><foreignObject><div><math><mi></mi></math><span></span></div></foreignObject><path></path></svg></annotation-xml><mi>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math annotation-xml>
      +|         <svg svg>
      +|           <svg foreignObject>
      +|             <div>
      +|               <math math>
      +|                 <math mi>
      +|               <span>
      +|           <svg path>
      +|       <math mi>
      +
      +#data
      +<math><annotation-xml><svg><foreignObject><math><mi><svg></svg></mi><mo></mo></math><span></span></foreignObject><path></path></svg></annotation-xml><mi>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math annotation-xml>
      +|         <svg svg>
      +|           <svg foreignObject>
      +|             <math math>
      +|               <math mi>
      +|                 <svg svg>
      +|               <math mo>
      +|             <span>
      +|           <svg path>
      +|       <math mi>
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat
      new file mode 100644
      index 00000000..638cde47
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat
      @@ -0,0 +1,482 @@
      +#data
      +<!DOCTYPE html><body><svg attributeName='' attributeType='' baseFrequency='' baseProfile='' calcMode='' clipPathUnits='' contentScriptType='' contentStyleType='' diffuseConstant='' edgeMode='' externalResourcesRequired='' filterRes='' filterUnits='' glyphRef='' gradientTransform='' gradientUnits='' kernelMatrix='' kernelUnitLength='' keyPoints='' keySplines='' keyTimes='' lengthAdjust='' limitingConeAngle='' markerHeight='' markerUnits='' markerWidth='' maskContentUnits='' maskUnits='' numOctaves='' pathLength='' patternContentUnits='' patternTransform='' patternUnits='' pointsAtX='' pointsAtY='' pointsAtZ='' preserveAlpha='' preserveAspectRatio='' primitiveUnits='' refX='' refY='' repeatCount='' repeatDur='' requiredExtensions='' requiredFeatures='' specularConstant='' specularExponent='' spreadMethod='' startOffset='' stdDeviation='' stitchTiles='' surfaceScale='' systemLanguage='' tableValues='' targetX='' targetY='' textLength='' viewBox='' viewTarget='' xChannelSelector='' yChannelSelector='' zoomAndPan=''></svg>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       attributeName=""
      +|       attributeType=""
      +|       baseFrequency=""
      +|       baseProfile=""
      +|       calcMode=""
      +|       clipPathUnits=""
      +|       contentScriptType=""
      +|       contentStyleType=""
      +|       diffuseConstant=""
      +|       edgeMode=""
      +|       externalResourcesRequired=""
      +|       filterRes=""
      +|       filterUnits=""
      +|       glyphRef=""
      +|       gradientTransform=""
      +|       gradientUnits=""
      +|       kernelMatrix=""
      +|       kernelUnitLength=""
      +|       keyPoints=""
      +|       keySplines=""
      +|       keyTimes=""
      +|       lengthAdjust=""
      +|       limitingConeAngle=""
      +|       markerHeight=""
      +|       markerUnits=""
      +|       markerWidth=""
      +|       maskContentUnits=""
      +|       maskUnits=""
      +|       numOctaves=""
      +|       pathLength=""
      +|       patternContentUnits=""
      +|       patternTransform=""
      +|       patternUnits=""
      +|       pointsAtX=""
      +|       pointsAtY=""
      +|       pointsAtZ=""
      +|       preserveAlpha=""
      +|       preserveAspectRatio=""
      +|       primitiveUnits=""
      +|       refX=""
      +|       refY=""
      +|       repeatCount=""
      +|       repeatDur=""
      +|       requiredExtensions=""
      +|       requiredFeatures=""
      +|       specularConstant=""
      +|       specularExponent=""
      +|       spreadMethod=""
      +|       startOffset=""
      +|       stdDeviation=""
      +|       stitchTiles=""
      +|       surfaceScale=""
      +|       systemLanguage=""
      +|       tableValues=""
      +|       targetX=""
      +|       targetY=""
      +|       textLength=""
      +|       viewBox=""
      +|       viewTarget=""
      +|       xChannelSelector=""
      +|       yChannelSelector=""
      +|       zoomAndPan=""
      +
      +#data
      +<!DOCTYPE html><BODY><SVG ATTRIBUTENAME='' ATTRIBUTETYPE='' BASEFREQUENCY='' BASEPROFILE='' CALCMODE='' CLIPPATHUNITS='' CONTENTSCRIPTTYPE='' CONTENTSTYLETYPE='' DIFFUSECONSTANT='' EDGEMODE='' EXTERNALRESOURCESREQUIRED='' FILTERRES='' FILTERUNITS='' GLYPHREF='' GRADIENTTRANSFORM='' GRADIENTUNITS='' KERNELMATRIX='' KERNELUNITLENGTH='' KEYPOINTS='' KEYSPLINES='' KEYTIMES='' LENGTHADJUST='' LIMITINGCONEANGLE='' MARKERHEIGHT='' MARKERUNITS='' MARKERWIDTH='' MASKCONTENTUNITS='' MASKUNITS='' NUMOCTAVES='' PATHLENGTH='' PATTERNCONTENTUNITS='' PATTERNTRANSFORM='' PATTERNUNITS='' POINTSATX='' POINTSATY='' POINTSATZ='' PRESERVEALPHA='' PRESERVEASPECTRATIO='' PRIMITIVEUNITS='' REFX='' REFY='' REPEATCOUNT='' REPEATDUR='' REQUIREDEXTENSIONS='' REQUIREDFEATURES='' SPECULARCONSTANT='' SPECULAREXPONENT='' SPREADMETHOD='' STARTOFFSET='' STDDEVIATION='' STITCHTILES='' SURFACESCALE='' SYSTEMLANGUAGE='' TABLEVALUES='' TARGETX='' TARGETY='' TEXTLENGTH='' VIEWBOX='' VIEWTARGET='' XCHANNELSELECTOR='' YCHANNELSELECTOR='' ZOOMANDPAN=''></SVG>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       attributeName=""
      +|       attributeType=""
      +|       baseFrequency=""
      +|       baseProfile=""
      +|       calcMode=""
      +|       clipPathUnits=""
      +|       contentScriptType=""
      +|       contentStyleType=""
      +|       diffuseConstant=""
      +|       edgeMode=""
      +|       externalResourcesRequired=""
      +|       filterRes=""
      +|       filterUnits=""
      +|       glyphRef=""
      +|       gradientTransform=""
      +|       gradientUnits=""
      +|       kernelMatrix=""
      +|       kernelUnitLength=""
      +|       keyPoints=""
      +|       keySplines=""
      +|       keyTimes=""
      +|       lengthAdjust=""
      +|       limitingConeAngle=""
      +|       markerHeight=""
      +|       markerUnits=""
      +|       markerWidth=""
      +|       maskContentUnits=""
      +|       maskUnits=""
      +|       numOctaves=""
      +|       pathLength=""
      +|       patternContentUnits=""
      +|       patternTransform=""
      +|       patternUnits=""
      +|       pointsAtX=""
      +|       pointsAtY=""
      +|       pointsAtZ=""
      +|       preserveAlpha=""
      +|       preserveAspectRatio=""
      +|       primitiveUnits=""
      +|       refX=""
      +|       refY=""
      +|       repeatCount=""
      +|       repeatDur=""
      +|       requiredExtensions=""
      +|       requiredFeatures=""
      +|       specularConstant=""
      +|       specularExponent=""
      +|       spreadMethod=""
      +|       startOffset=""
      +|       stdDeviation=""
      +|       stitchTiles=""
      +|       surfaceScale=""
      +|       systemLanguage=""
      +|       tableValues=""
      +|       targetX=""
      +|       targetY=""
      +|       textLength=""
      +|       viewBox=""
      +|       viewTarget=""
      +|       xChannelSelector=""
      +|       yChannelSelector=""
      +|       zoomAndPan=""
      +
      +#data
      +<!DOCTYPE html><body><svg attributename='' attributetype='' basefrequency='' baseprofile='' calcmode='' clippathunits='' contentscripttype='' contentstyletype='' diffuseconstant='' edgemode='' externalresourcesrequired='' filterres='' filterunits='' glyphref='' gradienttransform='' gradientunits='' kernelmatrix='' kernelunitlength='' keypoints='' keysplines='' keytimes='' lengthadjust='' limitingconeangle='' markerheight='' markerunits='' markerwidth='' maskcontentunits='' maskunits='' numoctaves='' pathlength='' patterncontentunits='' patterntransform='' patternunits='' pointsatx='' pointsaty='' pointsatz='' preservealpha='' preserveaspectratio='' primitiveunits='' refx='' refy='' repeatcount='' repeatdur='' requiredextensions='' requiredfeatures='' specularconstant='' specularexponent='' spreadmethod='' startoffset='' stddeviation='' stitchtiles='' surfacescale='' systemlanguage='' tablevalues='' targetx='' targety='' textlength='' viewbox='' viewtarget='' xchannelselector='' ychannelselector='' zoomandpan=''></svg>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       attributeName=""
      +|       attributeType=""
      +|       baseFrequency=""
      +|       baseProfile=""
      +|       calcMode=""
      +|       clipPathUnits=""
      +|       contentScriptType=""
      +|       contentStyleType=""
      +|       diffuseConstant=""
      +|       edgeMode=""
      +|       externalResourcesRequired=""
      +|       filterRes=""
      +|       filterUnits=""
      +|       glyphRef=""
      +|       gradientTransform=""
      +|       gradientUnits=""
      +|       kernelMatrix=""
      +|       kernelUnitLength=""
      +|       keyPoints=""
      +|       keySplines=""
      +|       keyTimes=""
      +|       lengthAdjust=""
      +|       limitingConeAngle=""
      +|       markerHeight=""
      +|       markerUnits=""
      +|       markerWidth=""
      +|       maskContentUnits=""
      +|       maskUnits=""
      +|       numOctaves=""
      +|       pathLength=""
      +|       patternContentUnits=""
      +|       patternTransform=""
      +|       patternUnits=""
      +|       pointsAtX=""
      +|       pointsAtY=""
      +|       pointsAtZ=""
      +|       preserveAlpha=""
      +|       preserveAspectRatio=""
      +|       primitiveUnits=""
      +|       refX=""
      +|       refY=""
      +|       repeatCount=""
      +|       repeatDur=""
      +|       requiredExtensions=""
      +|       requiredFeatures=""
      +|       specularConstant=""
      +|       specularExponent=""
      +|       spreadMethod=""
      +|       startOffset=""
      +|       stdDeviation=""
      +|       stitchTiles=""
      +|       surfaceScale=""
      +|       systemLanguage=""
      +|       tableValues=""
      +|       targetX=""
      +|       targetY=""
      +|       textLength=""
      +|       viewBox=""
      +|       viewTarget=""
      +|       xChannelSelector=""
      +|       yChannelSelector=""
      +|       zoomAndPan=""
      +
      +#data
      +<!DOCTYPE html><body><math attributeName='' attributeType='' baseFrequency='' baseProfile='' calcMode='' clipPathUnits='' contentScriptType='' contentStyleType='' diffuseConstant='' edgeMode='' externalResourcesRequired='' filterRes='' filterUnits='' glyphRef='' gradientTransform='' gradientUnits='' kernelMatrix='' kernelUnitLength='' keyPoints='' keySplines='' keyTimes='' lengthAdjust='' limitingConeAngle='' markerHeight='' markerUnits='' markerWidth='' maskContentUnits='' maskUnits='' numOctaves='' pathLength='' patternContentUnits='' patternTransform='' patternUnits='' pointsAtX='' pointsAtY='' pointsAtZ='' preserveAlpha='' preserveAspectRatio='' primitiveUnits='' refX='' refY='' repeatCount='' repeatDur='' requiredExtensions='' requiredFeatures='' specularConstant='' specularExponent='' spreadMethod='' startOffset='' stdDeviation='' stitchTiles='' surfaceScale='' systemLanguage='' tableValues='' targetX='' targetY='' textLength='' viewBox='' viewTarget='' xChannelSelector='' yChannelSelector='' zoomAndPan=''></math>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       attributename=""
      +|       attributetype=""
      +|       basefrequency=""
      +|       baseprofile=""
      +|       calcmode=""
      +|       clippathunits=""
      +|       contentscripttype=""
      +|       contentstyletype=""
      +|       diffuseconstant=""
      +|       edgemode=""
      +|       externalresourcesrequired=""
      +|       filterres=""
      +|       filterunits=""
      +|       glyphref=""
      +|       gradienttransform=""
      +|       gradientunits=""
      +|       kernelmatrix=""
      +|       kernelunitlength=""
      +|       keypoints=""
      +|       keysplines=""
      +|       keytimes=""
      +|       lengthadjust=""
      +|       limitingconeangle=""
      +|       markerheight=""
      +|       markerunits=""
      +|       markerwidth=""
      +|       maskcontentunits=""
      +|       maskunits=""
      +|       numoctaves=""
      +|       pathlength=""
      +|       patterncontentunits=""
      +|       patterntransform=""
      +|       patternunits=""
      +|       pointsatx=""
      +|       pointsaty=""
      +|       pointsatz=""
      +|       preservealpha=""
      +|       preserveaspectratio=""
      +|       primitiveunits=""
      +|       refx=""
      +|       refy=""
      +|       repeatcount=""
      +|       repeatdur=""
      +|       requiredextensions=""
      +|       requiredfeatures=""
      +|       specularconstant=""
      +|       specularexponent=""
      +|       spreadmethod=""
      +|       startoffset=""
      +|       stddeviation=""
      +|       stitchtiles=""
      +|       surfacescale=""
      +|       systemlanguage=""
      +|       tablevalues=""
      +|       targetx=""
      +|       targety=""
      +|       textlength=""
      +|       viewbox=""
      +|       viewtarget=""
      +|       xchannelselector=""
      +|       ychannelselector=""
      +|       zoomandpan=""
      +
      +#data
      +<!DOCTYPE html><body><svg><altGlyph /><altGlyphDef /><altGlyphItem /><animateColor /><animateMotion /><animateTransform /><clipPath /><feBlend /><feColorMatrix /><feComponentTransfer /><feComposite /><feConvolveMatrix /><feDiffuseLighting /><feDisplacementMap /><feDistantLight /><feFlood /><feFuncA /><feFuncB /><feFuncG /><feFuncR /><feGaussianBlur /><feImage /><feMerge /><feMergeNode /><feMorphology /><feOffset /><fePointLight /><feSpecularLighting /><feSpotLight /><feTile /><feTurbulence /><foreignObject /><glyphRef /><linearGradient /><radialGradient /><textPath /></svg>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg altGlyph>
      +|       <svg altGlyphDef>
      +|       <svg altGlyphItem>
      +|       <svg animateColor>
      +|       <svg animateMotion>
      +|       <svg animateTransform>
      +|       <svg clipPath>
      +|       <svg feBlend>
      +|       <svg feColorMatrix>
      +|       <svg feComponentTransfer>
      +|       <svg feComposite>
      +|       <svg feConvolveMatrix>
      +|       <svg feDiffuseLighting>
      +|       <svg feDisplacementMap>
      +|       <svg feDistantLight>
      +|       <svg feFlood>
      +|       <svg feFuncA>
      +|       <svg feFuncB>
      +|       <svg feFuncG>
      +|       <svg feFuncR>
      +|       <svg feGaussianBlur>
      +|       <svg feImage>
      +|       <svg feMerge>
      +|       <svg feMergeNode>
      +|       <svg feMorphology>
      +|       <svg feOffset>
      +|       <svg fePointLight>
      +|       <svg feSpecularLighting>
      +|       <svg feSpotLight>
      +|       <svg feTile>
      +|       <svg feTurbulence>
      +|       <svg foreignObject>
      +|       <svg glyphRef>
      +|       <svg linearGradient>
      +|       <svg radialGradient>
      +|       <svg textPath>
      +
      +#data
      +<!DOCTYPE html><body><svg><altglyph /><altglyphdef /><altglyphitem /><animatecolor /><animatemotion /><animatetransform /><clippath /><feblend /><fecolormatrix /><fecomponenttransfer /><fecomposite /><feconvolvematrix /><fediffuselighting /><fedisplacementmap /><fedistantlight /><feflood /><fefunca /><fefuncb /><fefuncg /><fefuncr /><fegaussianblur /><feimage /><femerge /><femergenode /><femorphology /><feoffset /><fepointlight /><fespecularlighting /><fespotlight /><fetile /><feturbulence /><foreignobject /><glyphref /><lineargradient /><radialgradient /><textpath /></svg>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg altGlyph>
      +|       <svg altGlyphDef>
      +|       <svg altGlyphItem>
      +|       <svg animateColor>
      +|       <svg animateMotion>
      +|       <svg animateTransform>
      +|       <svg clipPath>
      +|       <svg feBlend>
      +|       <svg feColorMatrix>
      +|       <svg feComponentTransfer>
      +|       <svg feComposite>
      +|       <svg feConvolveMatrix>
      +|       <svg feDiffuseLighting>
      +|       <svg feDisplacementMap>
      +|       <svg feDistantLight>
      +|       <svg feFlood>
      +|       <svg feFuncA>
      +|       <svg feFuncB>
      +|       <svg feFuncG>
      +|       <svg feFuncR>
      +|       <svg feGaussianBlur>
      +|       <svg feImage>
      +|       <svg feMerge>
      +|       <svg feMergeNode>
      +|       <svg feMorphology>
      +|       <svg feOffset>
      +|       <svg fePointLight>
      +|       <svg feSpecularLighting>
      +|       <svg feSpotLight>
      +|       <svg feTile>
      +|       <svg feTurbulence>
      +|       <svg foreignObject>
      +|       <svg glyphRef>
      +|       <svg linearGradient>
      +|       <svg radialGradient>
      +|       <svg textPath>
      +
      +#data
      +<!DOCTYPE html><BODY><SVG><ALTGLYPH /><ALTGLYPHDEF /><ALTGLYPHITEM /><ANIMATECOLOR /><ANIMATEMOTION /><ANIMATETRANSFORM /><CLIPPATH /><FEBLEND /><FECOLORMATRIX /><FECOMPONENTTRANSFER /><FECOMPOSITE /><FECONVOLVEMATRIX /><FEDIFFUSELIGHTING /><FEDISPLACEMENTMAP /><FEDISTANTLIGHT /><FEFLOOD /><FEFUNCA /><FEFUNCB /><FEFUNCG /><FEFUNCR /><FEGAUSSIANBLUR /><FEIMAGE /><FEMERGE /><FEMERGENODE /><FEMORPHOLOGY /><FEOFFSET /><FEPOINTLIGHT /><FESPECULARLIGHTING /><FESPOTLIGHT /><FETILE /><FETURBULENCE /><FOREIGNOBJECT /><GLYPHREF /><LINEARGRADIENT /><RADIALGRADIENT /><TEXTPATH /></SVG>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg altGlyph>
      +|       <svg altGlyphDef>
      +|       <svg altGlyphItem>
      +|       <svg animateColor>
      +|       <svg animateMotion>
      +|       <svg animateTransform>
      +|       <svg clipPath>
      +|       <svg feBlend>
      +|       <svg feColorMatrix>
      +|       <svg feComponentTransfer>
      +|       <svg feComposite>
      +|       <svg feConvolveMatrix>
      +|       <svg feDiffuseLighting>
      +|       <svg feDisplacementMap>
      +|       <svg feDistantLight>
      +|       <svg feFlood>
      +|       <svg feFuncA>
      +|       <svg feFuncB>
      +|       <svg feFuncG>
      +|       <svg feFuncR>
      +|       <svg feGaussianBlur>
      +|       <svg feImage>
      +|       <svg feMerge>
      +|       <svg feMergeNode>
      +|       <svg feMorphology>
      +|       <svg feOffset>
      +|       <svg fePointLight>
      +|       <svg feSpecularLighting>
      +|       <svg feSpotLight>
      +|       <svg feTile>
      +|       <svg feTurbulence>
      +|       <svg foreignObject>
      +|       <svg glyphRef>
      +|       <svg linearGradient>
      +|       <svg radialGradient>
      +|       <svg textPath>
      +
      +#data
      +<!DOCTYPE html><body><math><altGlyph /><altGlyphDef /><altGlyphItem /><animateColor /><animateMotion /><animateTransform /><clipPath /><feBlend /><feColorMatrix /><feComponentTransfer /><feComposite /><feConvolveMatrix /><feDiffuseLighting /><feDisplacementMap /><feDistantLight /><feFlood /><feFuncA /><feFuncB /><feFuncG /><feFuncR /><feGaussianBlur /><feImage /><feMerge /><feMergeNode /><feMorphology /><feOffset /><fePointLight /><feSpecularLighting /><feSpotLight /><feTile /><feTurbulence /><foreignObject /><glyphRef /><linearGradient /><radialGradient /><textPath /></math>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math altglyph>
      +|       <math altglyphdef>
      +|       <math altglyphitem>
      +|       <math animatecolor>
      +|       <math animatemotion>
      +|       <math animatetransform>
      +|       <math clippath>
      +|       <math feblend>
      +|       <math fecolormatrix>
      +|       <math fecomponenttransfer>
      +|       <math fecomposite>
      +|       <math feconvolvematrix>
      +|       <math fediffuselighting>
      +|       <math fedisplacementmap>
      +|       <math fedistantlight>
      +|       <math feflood>
      +|       <math fefunca>
      +|       <math fefuncb>
      +|       <math fefuncg>
      +|       <math fefuncr>
      +|       <math fegaussianblur>
      +|       <math feimage>
      +|       <math femerge>
      +|       <math femergenode>
      +|       <math femorphology>
      +|       <math feoffset>
      +|       <math fepointlight>
      +|       <math fespecularlighting>
      +|       <math fespotlight>
      +|       <math fetile>
      +|       <math feturbulence>
      +|       <math foreignobject>
      +|       <math glyphref>
      +|       <math lineargradient>
      +|       <math radialgradient>
      +|       <math textpath>
      +
      +#data
      +<!DOCTYPE html><body><svg><solidColor /></svg>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg solidcolor>
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat
      new file mode 100644
      index 00000000..63107d27
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat
      @@ -0,0 +1,62 @@
      +#data
      +<!DOCTYPE html><body><p>foo<math><mtext><i>baz</i></mtext><annotation-xml><svg><desc><b>eggs</b></desc><g><foreignObject><P>spam<TABLE><tr><td><img></td></table></foreignObject></g><g>quux</g></svg></annotation-xml></math>bar
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       "foo"
      +|       <math math>
      +|         <math mtext>
      +|           <i>
      +|             "baz"
      +|         <math annotation-xml>
      +|           <svg svg>
      +|             <svg desc>
      +|               <b>
      +|                 "eggs"
      +|             <svg g>
      +|               <svg foreignObject>
      +|                 <p>
      +|                   "spam"
      +|                 <table>
      +|                   <tbody>
      +|                     <tr>
      +|                       <td>
      +|                         <img>
      +|             <svg g>
      +|               "quux"
      +|       "bar"
      +
      +#data
      +<!DOCTYPE html><body>foo<math><mtext><i>baz</i></mtext><annotation-xml><svg><desc><b>eggs</b></desc><g><foreignObject><P>spam<TABLE><tr><td><img></td></table></foreignObject></g><g>quux</g></svg></annotation-xml></math>bar
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "foo"
      +|     <math math>
      +|       <math mtext>
      +|         <i>
      +|           "baz"
      +|       <math annotation-xml>
      +|         <svg svg>
      +|           <svg desc>
      +|             <b>
      +|               "eggs"
      +|           <svg g>
      +|             <svg foreignObject>
      +|               <p>
      +|                 "spam"
      +|               <table>
      +|                 <tbody>
      +|                   <tr>
      +|                     <td>
      +|                       <img>
      +|           <svg g>
      +|             "quux"
      +|     "bar"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat
      new file mode 100644
      index 00000000..b8713f88
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat
      @@ -0,0 +1,74 @@
      +#data
      +<!DOCTYPE html><html><body><xyz:abc></xyz:abc>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <xyz:abc>
      +
      +#data
      +<!DOCTYPE html><html><body><xyz:abc></xyz:abc><span></span>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <xyz:abc>
      +|     <span>
      +
      +#data
      +<!DOCTYPE html><html><html abc:def=gh><xyz:abc></xyz:abc>
      +#errors
      +15: Unexpected start tag html
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   abc:def="gh"
      +|   <head>
      +|   <body>
      +|     <xyz:abc>
      +
      +#data
      +<!DOCTYPE html><html xml:lang=bar><html xml:lang=foo>
      +#errors
      +15: Unexpected start tag html
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   xml:lang="bar"
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE html><html 123=456>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   123="456"
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE html><html 123=456><html 789=012>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   123="456"
      +|   789="012"
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE html><html><body 789=012>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     789="012"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat
      new file mode 100644
      index 00000000..6ce1c0d1
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat
      @@ -0,0 +1,208 @@
      +#data
      +<!DOCTYPE html><p><b><i><u></p> <p>X
      +#errors
      +Line: 1 Col: 31 Unexpected end tag (p). Ignored.
      +Line: 1 Col: 36 Expected closing tag. Unexpected end of file.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <b>
      +|         <i>
      +|           <u>
      +|     <b>
      +|       <i>
      +|         <u>
      +|           " "
      +|           <p>
      +|             "X"
      +
      +#data
      +<p><b><i><u></p>
      +<p>X
      +#errors
      +Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE.
      +Line: 1 Col: 16 Unexpected end tag (p). Ignored.
      +Line: 2 Col: 4 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <b>
      +|         <i>
      +|           <u>
      +|     <b>
      +|       <i>
      +|         <u>
      +|           "
      +"
      +|           <p>
      +|             "X"
      +
      +#data
      +<!doctype html></html> <head>
      +#errors
      +Line: 1 Col: 22 Unexpected end tag (html) after the (implied) root element.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     " "
      +
      +#data
      +<!doctype html></body><meta>
      +#errors
      +Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <meta>
      +
      +#data
      +<html></html><!-- foo -->
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
      +Line: 1 Col: 13 Unexpected end tag (html) after the (implied) root element.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +| <!--  foo  -->
      +
      +#data
      +<!doctype html></body><title>X</title>
      +#errors
      +Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <title>
      +|       "X"
      +
      +#data
      +<!doctype html><table> X<meta></table>
      +#errors
      +Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode.
      +Line: 1 Col: 30 Unexpected start tag (meta) in table context caused voodoo mode.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     " X"
      +|     <meta>
      +|     <table>
      +
      +#data
      +<!doctype html><table> x</table>
      +#errors
      +Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     " x"
      +|     <table>
      +
      +#data
      +<!doctype html><table> x </table>
      +#errors
      +Line: 1 Col: 25 Unexpected non-space characters in table context caused voodoo mode.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     " x "
      +|     <table>
      +
      +#data
      +<!doctype html><table><tr> x</table>
      +#errors
      +Line: 1 Col: 28 Unexpected non-space characters in table context caused voodoo mode.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     " x"
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +
      +#data
      +<!doctype html><table>X<style> <tr>x </style> </table>
      +#errors
      +Line: 1 Col: 23 Unexpected non-space characters in table context caused voodoo mode.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "X"
      +|     <table>
      +|       <style>
      +|         " <tr>x "
      +|       " "
      +
      +#data
      +<!doctype html><div><table><a>foo</a> <tr><td>bar</td> </tr></table></div>
      +#errors
      +Line: 1 Col: 30 Unexpected start tag (a) in table context caused voodoo mode.
      +Line: 1 Col: 37 Unexpected end tag (a) in table context caused voodoo mode.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       <a>
      +|         "foo"
      +|       <table>
      +|         " "
      +|         <tbody>
      +|           <tr>
      +|             <td>
      +|               "bar"
      +|             " "
      +
      +#data
      +<frame></frame></frame><frameset><frame><frameset><frame></frameset><noframes></frameset><noframes>
      +#errors
      +6: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”.
      +13: Stray start tag “frame”.
      +21: Stray end tag “frame”.
      +29: Stray end tag “frame”.
      +39: “frameset” start tag after “body” already open.
      +105: End of file seen inside an [R]CDATA element.
      +105: End of file seen and there were open elements.
      +XXX: These errors are wrong, please fix me!
      +#document
      +| <html>
      +|   <head>
      +|   <frameset>
      +|     <frame>
      +|     <frameset>
      +|       <frame>
      +|     <noframes>
      +|       "</frameset><noframes>"
      +
      +#data
      +<!DOCTYPE html><object></html>
      +#errors
      +1: Expected closing tag. Unexpected end of file
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <object>
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat
      new file mode 100644
      index 00000000..c8ef66f0
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat
      @@ -0,0 +1,2299 @@
      +#data
      +<!doctype html><script>
      +#errors
      +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|   <body>
      +
      +#data
      +<!doctype html><script>a
      +#errors
      +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "a"
      +|   <body>
      +
      +#data
      +<!doctype html><script><
      +#errors
      +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<"
      +|   <body>
      +
      +#data
      +<!doctype html><script></
      +#errors
      +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</"
      +|   <body>
      +
      +#data
      +<!doctype html><script></S
      +#errors
      +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</S"
      +|   <body>
      +
      +#data
      +<!doctype html><script></SC
      +#errors
      +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</SC"
      +|   <body>
      +
      +#data
      +<!doctype html><script></SCR
      +#errors
      +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</SCR"
      +|   <body>
      +
      +#data
      +<!doctype html><script></SCRI
      +#errors
      +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</SCRI"
      +|   <body>
      +
      +#data
      +<!doctype html><script></SCRIP
      +#errors
      +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</SCRIP"
      +|   <body>
      +
      +#data
      +<!doctype html><script></SCRIPT
      +#errors
      +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</SCRIPT"
      +|   <body>
      +
      +#data
      +<!doctype html><script></SCRIPT 
      +#errors
      +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|   <body>
      +
      +#data
      +<!doctype html><script></s
      +#errors
      +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</s"
      +|   <body>
      +
      +#data
      +<!doctype html><script></sc
      +#errors
      +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</sc"
      +|   <body>
      +
      +#data
      +<!doctype html><script></scr
      +#errors
      +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</scr"
      +|   <body>
      +
      +#data
      +<!doctype html><script></scri
      +#errors
      +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</scri"
      +|   <body>
      +
      +#data
      +<!doctype html><script></scrip
      +#errors
      +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</scrip"
      +|   <body>
      +
      +#data
      +<!doctype html><script></script
      +#errors
      +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</script"
      +|   <body>
      +
      +#data
      +<!doctype html><script></script 
      +#errors
      +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|   <body>
      +
      +#data
      +<!doctype html><script><!
      +#errors
      +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!a
      +#errors
      +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!a"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!-
      +#errors
      +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!-"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!-a
      +#errors
      +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!-a"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--
      +#errors
      +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--a
      +#errors
      +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--a"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<
      +#errors
      +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<a
      +#errors
      +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<a"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--</
      +#errors
      +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--</"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--</script
      +#errors
      +Line: 1 Col: 35 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--</script"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--</script 
      +#errors
      +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<s
      +#errors
      +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<s"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script
      +#errors
      +Line: 1 Col: 34 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script 
      +#errors
      +Line: 1 Col: 35 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script "
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script <
      +#errors
      +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script <"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script <a
      +#errors
      +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script <a"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script </
      +#errors
      +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script </s
      +#errors
      +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </s"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script </script
      +#errors
      +Line: 1 Col: 43 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script </scripta
      +#errors
      +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </scripta"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script </script 
      +#errors
      +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script "
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script </script>
      +#errors
      +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script>"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script </script/
      +#errors
      +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script/"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script </script <
      +#errors
      +Line: 1 Col: 45 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script <"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script </script <a
      +#errors
      +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script <a"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script </script </
      +#errors
      +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script </"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script </script </script
      +#errors
      +Line: 1 Col: 52 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script </script"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script </script </script 
      +#errors
      +Line: 1 Col: 53 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script "
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script </script </script/
      +#errors
      +Line: 1 Col: 53 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script "
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script </script </script>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script "
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script -
      +#errors
      +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script -"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script -a
      +#errors
      +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script -a"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script -<
      +#errors
      +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script -<"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script --
      +#errors
      +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script --"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script --a
      +#errors
      +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script --a"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script --<
      +#errors
      +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script --<"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script -->
      +#errors
      +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script -->"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script --><
      +#errors
      +Line: 1 Col: 39 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script --><"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script --></
      +#errors
      +Line: 1 Col: 40 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script --></"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script --></script
      +#errors
      +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script --></script"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script --></script 
      +#errors
      +Line: 1 Col: 47 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script -->"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script --></script/
      +#errors
      +Line: 1 Col: 47 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script -->"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script --></script>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script -->"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script><\/script>--></script>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script><\/script>-->"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script></scr'+'ipt>--></script>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script></scr'+'ipt>-->"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script></script><script></script></script>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script></script><script></script>"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script></script><script></script>--><!--</script>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script></script><script></script>--><!--"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script></script><script></script>-- ></script>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script></script><script></script>-- >"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script></script><script></script>- -></script>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script></script><script></script>- ->"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script></script><script></script>- - ></script>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script></script><script></script>- - >"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script></script><script></script>-></script>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script></script><script></script>->"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<script>--!></script>X
      +#errors
      +Line: 1 Col: 49 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script>--!></script>X"
      +|   <body>
      +
      +#data
      +<!doctype html><script><!--<scr'+'ipt></script>--></script>
      +#errors
      +Line: 1 Col: 59 Unexpected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<scr'+'ipt>"
      +|   <body>
      +|     "-->"
      +
      +#data
      +<!doctype html><script><!--<script></scr'+'ipt></script>X
      +#errors
      +Line: 1 Col: 57 Unexpected end of file. Expected end tag (script).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script></scr'+'ipt></script>X"
      +|   <body>
      +
      +#data
      +<!doctype html><style><!--<style></style>--></style>
      +#errors
      +Line: 1 Col: 52 Unexpected end tag (style).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <style>
      +|       "<!--<style>"
      +|   <body>
      +|     "-->"
      +
      +#data
      +<!doctype html><style><!--</style>X
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <style>
      +|       "<!--"
      +|   <body>
      +|     "X"
      +
      +#data
      +<!doctype html><style><!--...</style>...--></style>
      +#errors
      +Line: 1 Col: 51 Unexpected end tag (style).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <style>
      +|       "<!--..."
      +|   <body>
      +|     "...-->"
      +
      +#data
      +<!doctype html><style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <style>
      +|       "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>"
      +|   <body>
      +|     "X"
      +
      +#data
      +<!doctype html><style><!--...<style><!--...--!></style>--></style>
      +#errors
      +Line: 1 Col: 66 Unexpected end tag (style).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <style>
      +|       "<!--...<style><!--...--!>"
      +|   <body>
      +|     "-->"
      +
      +#data
      +<!doctype html><style><!--...</style><!-- --><style>@import ...</style>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <style>
      +|       "<!--..."
      +|     <!--   -->
      +|     <style>
      +|       "@import ..."
      +|   <body>
      +
      +#data
      +<!doctype html><style>...<style><!--...</style><!-- --></style>
      +#errors
      +Line: 1 Col: 63 Unexpected end tag (style).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <style>
      +|       "...<style><!--..."
      +|     <!--   -->
      +|   <body>
      +
      +#data
      +<!doctype html><style>...<!--[if IE]><style>...</style>X
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <style>
      +|       "...<!--[if IE]><style>..."
      +|   <body>
      +|     "X"
      +
      +#data
      +<!doctype html><title><!--<title></title>--></title>
      +#errors
      +Line: 1 Col: 52 Unexpected end tag (title).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <title>
      +|       "<!--<title>"
      +|   <body>
      +|     "-->"
      +
      +#data
      +<!doctype html><title>&lt;/title></title>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <title>
      +|       "</title>"
      +|   <body>
      +
      +#data
      +<!doctype html><title>foo/title><link></head><body>X
      +#errors
      +Line: 1 Col: 52 Unexpected end of file. Expected end tag (title).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <title>
      +|       "foo/title><link></head><body>X"
      +|   <body>
      +
      +#data
      +<!doctype html><noscript><!--<noscript></noscript>--></noscript>
      +#errors
      +Line: 1 Col: 64 Unexpected end tag (noscript).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <noscript>
      +|       "<!--<noscript>"
      +|   <body>
      +|     "-->"
      +
      +#data
      +<!doctype html><noscript><!--</noscript>X<noscript>--></noscript>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <noscript>
      +|       "<!--"
      +|   <body>
      +|     "X"
      +|     <noscript>
      +|       "-->"
      +
      +#data
      +<!doctype html><noscript><iframe></noscript>X
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <noscript>
      +|       "<iframe>"
      +|   <body>
      +|     "X"
      +
      +#data
      +<!doctype html><noframes><!--<noframes></noframes>--></noframes>
      +#errors
      +Line: 1 Col: 64 Unexpected end tag (noframes).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <noframes>
      +|       "<!--<noframes>"
      +|   <body>
      +|     "-->"
      +
      +#data
      +<!doctype html><noframes><body><script><!--...</script></body></noframes></html>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <noframes>
      +|       "<body><script><!--...</script></body>"
      +|   <body>
      +
      +#data
      +<!doctype html><textarea><!--<textarea></textarea>--></textarea>
      +#errors
      +Line: 1 Col: 64 Unexpected end tag (textarea).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <textarea>
      +|       "<!--<textarea>"
      +|     "-->"
      +
      +#data
      +<!doctype html><textarea>&lt;/textarea></textarea>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <textarea>
      +|       "</textarea>"
      +
      +#data
      +<!doctype html><textarea>&lt;</textarea>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <textarea>
      +|       "<"
      +
      +#data
      +<!doctype html><textarea>a&lt;b</textarea>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <textarea>
      +|       "a<b"
      +
      +#data
      +<!doctype html><iframe><!--<iframe></iframe>--></iframe>
      +#errors
      +Line: 1 Col: 56 Unexpected end tag (iframe).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <iframe>
      +|       "<!--<iframe>"
      +|     "-->"
      +
      +#data
      +<!doctype html><iframe>...<!--X->...<!--/X->...</iframe>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <iframe>
      +|       "...<!--X->...<!--/X->..."
      +
      +#data
      +<!doctype html><xmp><!--<xmp>-->
      +#errors
      +Line: 1 Col: 44 Unexpected end tag (xmp).
      +#document
      +| 
      +| 
      +|   
      +|   
      +|     
      +|       "<!--<xmp>"
      +|     "-->"
      +
      +#data
      +<!doctype html><noembed><!--<noembed></noembed>--></noembed>
      +#errors
      +Line: 1 Col: 60 Unexpected end tag (noembed).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <noembed>
      +|       "<!--<noembed>"
      +|     "-->"
      +
      +#data
      +<script>
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 8 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|   <body>
      +
      +#data
      +<script>a
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 9 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "a"
      +|   <body>
      +
      +#data
      +<script><
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 9 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<"
      +|   <body>
      +
      +#data
      +<script></
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 10 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</"
      +|   <body>
      +
      +#data
      +<script></S
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</S"
      +|   <body>
      +
      +#data
      +<script></SC
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</SC"
      +|   <body>
      +
      +#data
      +<script></SCR
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</SCR"
      +|   <body>
      +
      +#data
      +<script></SCRI
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</SCRI"
      +|   <body>
      +
      +#data
      +<script></SCRIP
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 15 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</SCRIP"
      +|   <body>
      +
      +#data
      +<script></SCRIPT
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 16 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</SCRIPT"
      +|   <body>
      +
      +#data
      +<script></SCRIPT 
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 17 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|   <body>
      +
      +#data
      +<script></s
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</s"
      +|   <body>
      +
      +#data
      +<script></sc
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</sc"
      +|   <body>
      +
      +#data
      +<script></scr
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</scr"
      +|   <body>
      +
      +#data
      +<script></scri
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</scri"
      +|   <body>
      +
      +#data
      +<script></scrip
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 15 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</scrip"
      +|   <body>
      +
      +#data
      +<script></script
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 16 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</script"
      +|   <body>
      +
      +#data
      +<script></script 
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 17 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|   <body>
      +
      +#data
      +<script><!
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 10 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!"
      +|   <body>
      +
      +#data
      +<script><!a
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!a"
      +|   <body>
      +
      +#data
      +<script><!-
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!-"
      +|   <body>
      +
      +#data
      +<script><!-a
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!-a"
      +|   <body>
      +
      +#data
      +<script><!--
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--"
      +|   <body>
      +
      +#data
      +<script><!--a
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--a"
      +|   <body>
      +
      +#data
      +<script><!--<
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<"
      +|   <body>
      +
      +#data
      +<script><!--<a
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<a"
      +|   <body>
      +
      +#data
      +<script><!--</
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--</"
      +|   <body>
      +
      +#data
      +<script><!--</script
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 20 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--</script"
      +|   <body>
      +
      +#data
      +<script><!--</script 
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--"
      +|   <body>
      +
      +#data
      +<script><!--<s
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<s"
      +|   <body>
      +
      +#data
      +<script><!--<script
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 19 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script"
      +|   <body>
      +
      +#data
      +<script><!--<script 
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 20 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script "
      +|   <body>
      +
      +#data
      +<script><!--<script <
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script <"
      +|   <body>
      +
      +#data
      +<script><!--<script <a
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script <a"
      +|   <body>
      +
      +#data
      +<script><!--<script </
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </"
      +|   <body>
      +
      +#data
      +<script><!--<script </s
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </s"
      +|   <body>
      +
      +#data
      +<script><!--<script </script
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script"
      +|   <body>
      +
      +#data
      +<script><!--<script </scripta
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </scripta"
      +|   <body>
      +
      +#data
      +<script><!--<script </script 
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script "
      +|   <body>
      +
      +#data
      +<script><!--<script </script>
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script>"
      +|   <body>
      +
      +#data
      +<script><!--<script </script/
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script/"
      +|   <body>
      +
      +#data
      +<script><!--<script </script <
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script <"
      +|   <body>
      +
      +#data
      +<script><!--<script </script <a
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script <a"
      +|   <body>
      +
      +#data
      +<script><!--<script </script </
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script </"
      +|   <body>
      +
      +#data
      +<script><!--<script </script </script
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script </script"
      +|   <body>
      +
      +#data
      +<script><!--<script </script </script 
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script "
      +|   <body>
      +
      +#data
      +<script><!--<script </script </script/
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script "
      +|   <body>
      +
      +#data
      +<script><!--<script </script </script>
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script </script "
      +|   <body>
      +
      +#data
      +<script><!--<script -
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script -"
      +|   <body>
      +
      +#data
      +<script><!--<script -a
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script -a"
      +|   <body>
      +
      +#data
      +<script><!--<script --
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script --"
      +|   <body>
      +
      +#data
      +<script><!--<script --a
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script --a"
      +|   <body>
      +
      +#data
      +<script><!--<script -->
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script -->"
      +|   <body>
      +
      +#data
      +<script><!--<script --><
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script --><"
      +|   <body>
      +
      +#data
      +<script><!--<script --></
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script --></"
      +|   <body>
      +
      +#data
      +<script><!--<script --></script
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script --></script"
      +|   <body>
      +
      +#data
      +<script><!--<script --></script 
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script -->"
      +|   <body>
      +
      +#data
      +<script><!--<script --></script/
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script -->"
      +|   <body>
      +
      +#data
      +<script><!--<script --></script>
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script -->"
      +|   <body>
      +
      +#data
      +<script><!--<script><\/script>--></script>
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script><\/script>-->"
      +|   <body>
      +
      +#data
      +<script><!--<script></scr'+'ipt>--></script>
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script></scr'+'ipt>-->"
      +|   <body>
      +
      +#data
      +<script><!--<script></script><script></script></script>
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script></script><script></script>"
      +|   <body>
      +
      +#data
      +<script><!--<script></script><script></script>--><!--</script>
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script></script><script></script>--><!--"
      +|   <body>
      +
      +#data
      +<script><!--<script></script><script></script>-- ></script>
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script></script><script></script>-- >"
      +|   <body>
      +
      +#data
      +<script><!--<script></script><script></script>- -></script>
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script></script><script></script>- ->"
      +|   <body>
      +
      +#data
      +<script><!--<script></script><script></script>- - ></script>
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script></script><script></script>- - >"
      +|   <body>
      +
      +#data
      +<script><!--<script></script><script></script>-></script>
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script></script><script></script>->"
      +|   <body>
      +
      +#data
      +<script><!--<script>--!></script>X
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 34 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script>--!></script>X"
      +|   <body>
      +
      +#data
      +<script><!--<scr'+'ipt></script>--></script>
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 44 Unexpected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<scr'+'ipt>"
      +|   <body>
      +|     "-->"
      +
      +#data
      +<script><!--<script></scr'+'ipt></script>X
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 42 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "<!--<script></scr'+'ipt></script>X"
      +|   <body>
      +
      +#data
      +<style><!--<style></style>--></style>
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
      +Line: 1 Col: 37 Unexpected end tag (style).
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|       "<!--<style>"
      +|   <body>
      +|     "-->"
      +
      +#data
      +<style><!--</style>X
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|       "<!--"
      +|   <body>
      +|     "X"
      +
      +#data
      +<style><!--...</style>...--></style>
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
      +Line: 1 Col: 36 Unexpected end tag (style).
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|       "<!--..."
      +|   <body>
      +|     "...-->"
      +
      +#data
      +<style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|       "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>"
      +|   <body>
      +|     "X"
      +
      +#data
      +<style><!--...<style><!--...--!></style>--></style>
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
      +Line: 1 Col: 51 Unexpected end tag (style).
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|       "<!--...<style><!--...--!>"
      +|   <body>
      +|     "-->"
      +
      +#data
      +<style><!--...</style><!-- --><style>@import ...</style>
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|       "<!--..."
      +|     <!--   -->
      +|     <style>
      +|       "@import ..."
      +|   <body>
      +
      +#data
      +<style>...<style><!--...</style><!-- --></style>
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
      +Line: 1 Col: 48 Unexpected end tag (style).
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|       "...<style><!--..."
      +|     <!--   -->
      +|   <body>
      +
      +#data
      +<style>...<!--[if IE]><style>...</style>X
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|       "...<!--[if IE]><style>..."
      +|   <body>
      +|     "X"
      +
      +#data
      +<title><!--<title></title>--></title>
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
      +Line: 1 Col: 37 Unexpected end tag (title).
      +#document
      +| <html>
      +|   <head>
      +|     <title>
      +|       "<!--<title>"
      +|   <body>
      +|     "-->"
      +
      +#data
      +<title>&lt;/title></title>
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <title>
      +|       "</title>"
      +|   <body>
      +
      +#data
      +<title>foo/title><link></head><body>X
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
      +Line: 1 Col: 37 Unexpected end of file. Expected end tag (title).
      +#document
      +| <html>
      +|   <head>
      +|     <title>
      +|       "foo/title><link></head><body>X"
      +|   <body>
      +
      +#data
      +<noscript><!--<noscript></noscript>--></noscript>
      +#errors
      +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE.
      +Line: 1 Col: 49 Unexpected end tag (noscript).
      +#document
      +| <html>
      +|   <head>
      +|     <noscript>
      +|       "<!--<noscript>"
      +|   <body>
      +|     "-->"
      +
      +#data
      +<noscript><!--</noscript>X<noscript>--></noscript>
      +#errors
      +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <noscript>
      +|       "<!--"
      +|   <body>
      +|     "X"
      +|     <noscript>
      +|       "-->"
      +
      +#data
      +<noscript><iframe></noscript>X
      +#errors
      +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <noscript>
      +|       "<iframe>"
      +|   <body>
      +|     "X"
      +
      +#data
      +<noframes><!--<noframes></noframes>--></noframes>
      +#errors
      +Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE.
      +Line: 1 Col: 49 Unexpected end tag (noframes).
      +#document
      +| <html>
      +|   <head>
      +|     <noframes>
      +|       "<!--<noframes>"
      +|   <body>
      +|     "-->"
      +
      +#data
      +<noframes><body><script><!--...</script></body></noframes></html>
      +#errors
      +Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <noframes>
      +|       "<body><script><!--...</script></body>"
      +|   <body>
      +
      +#data
      +<textarea><!--<textarea></textarea>--></textarea>
      +#errors
      +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
      +Line: 1 Col: 49 Unexpected end tag (textarea).
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <textarea>
      +|       "<!--<textarea>"
      +|     "-->"
      +
      +#data
      +<textarea>&lt;/textarea></textarea>
      +#errors
      +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <textarea>
      +|       "</textarea>"
      +
      +#data
      +<iframe><!--<iframe></iframe>--></iframe>
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE.
      +Line: 1 Col: 41 Unexpected end tag (iframe).
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <iframe>
      +|       "<!--<iframe>"
      +|     "-->"
      +
      +#data
      +<iframe>...<!--X->...<!--/X->...</iframe>
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <iframe>
      +|       "...<!--X->...<!--/X->..."
      +
      +#data
      +<xmp><!--<xmp>-->
      +#errors
      +Line: 1 Col: 5 Unexpected start tag (xmp). Expected DOCTYPE.
      +Line: 1 Col: 29 Unexpected end tag (xmp).
      +#document
      +| 
      +|   
      +|   
      +|     
      +|       "<!--<xmp>"
      +|     "-->"
      +
      +#data
      +<noembed><!--<noembed></noembed>--></noembed>
      +#errors
      +Line: 1 Col: 9 Unexpected start tag (noembed). Expected DOCTYPE.
      +Line: 1 Col: 45 Unexpected end tag (noembed).
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <noembed>
      +|       "<!--<noembed>"
      +|     "-->"
      +
      +#data
      +<!doctype html><table>
      +
      +#errors
      +Line 2 Col 0 Unexpected end of file. Expected table content.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       "
      +"
      +
      +#data
      +<!doctype html><table><td><span><font></span><span>
      +#errors
      +Line 1 Col 26 Unexpected table cell start tag (td) in the table body phase.
      +Line 1 Col 45 Unexpected end tag (span).
      +Line 1 Col 51 Expected closing tag. Unexpected end of file.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             <span>
      +|               <font>
      +|             <font>
      +|               <span>
      +
      +#data
      +<!doctype html><form><table></form><form></table></form>
      +#errors
      +35: Stray end tag “form”.
      +41: Start tag “form” seen in “table”.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <form>
      +|       <table>
      +|         <form>
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat
      new file mode 100644
      index 00000000..7b555f88
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat
      @@ -0,0 +1,153 @@
      +#data
      +<!doctype html><table><tbody><select><tr>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +
      +#data
      +<!doctype html><table><tr><select><td>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +
      +#data
      +<!doctype html><table><tr><td><select><td>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             <select>
      +|           <td>
      +
      +#data
      +<!doctype html><table><tr><th><select><td>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <th>
      +|             <select>
      +|           <td>
      +
      +#data
      +<!doctype html><table><caption><select><tr>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <caption>
      +|         <select>
      +|       <tbody>
      +|         <tr>
      +
      +#data
      +<!doctype html><select><tr>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +
      +#data
      +<!doctype html><select><td>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +
      +#data
      +<!doctype html><select><th>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +
      +#data
      +<!doctype html><select><tbody>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +
      +#data
      +<!doctype html><select><thead>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +
      +#data
      +<!doctype html><select><tfoot>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +
      +#data
      +<!doctype html><select><caption>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +
      +#data
      +<!doctype html><table><tr></table>a
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|     "a"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat
      new file mode 100644
      index 00000000..680e1f06
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat
      @@ -0,0 +1,269 @@
      +#data
      +<!doctype html><plaintext></plaintext>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <plaintext>
      +|       "</plaintext>"
      +
      +#data
      +<!doctype html><table><plaintext></plaintext>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <plaintext>
      +|       "</plaintext>"
      +|     <table>
      +
      +#data
      +<!doctype html><table><tbody><plaintext></plaintext>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <plaintext>
      +|       "</plaintext>"
      +|     <table>
      +|       <tbody>
      +
      +#data
      +<!doctype html><table><tbody><tr><plaintext></plaintext>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <plaintext>
      +|       "</plaintext>"
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +
      +#data
      +<!doctype html><table><tbody><tr><plaintext></plaintext>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <plaintext>
      +|       "</plaintext>"
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +
      +#data
      +<!doctype html><table><td><plaintext></plaintext>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             <plaintext>
      +|               "</plaintext>"
      +
      +#data
      +<!doctype html><table><caption><plaintext></plaintext>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <caption>
      +|         <plaintext>
      +|           "</plaintext>"
      +
      +#data
      +<!doctype html><table><tr><style></script></style>abc
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "abc"
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <style>
      +|             "</script>"
      +
      +#data
      +<!doctype html><table><tr><script></style></script>abc
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "abc"
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <script>
      +|             "</style>"
      +
      +#data
      +<!doctype html><table><caption><style></script></style>abc
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <caption>
      +|         <style>
      +|           "</script>"
      +|         "abc"
      +
      +#data
      +<!doctype html><table><td><style></script></style>abc
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             <style>
      +|               "</script>"
      +|             "abc"
      +
      +#data
      +<!doctype html><select><script></style></script>abc
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|       <script>
      +|         "</style>"
      +|       "abc"
      +
      +#data
      +<!doctype html><table><select><script></style></script>abc
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|       <script>
      +|         "</style>"
      +|       "abc"
      +|     <table>
      +
      +#data
      +<!doctype html><table><tr><select><script></style></script>abc
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|       <script>
      +|         "</style>"
      +|       "abc"
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +
      +#data
      +<!doctype html><frameset></frameset><noframes>abc
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +|   <noframes>
      +|     "abc"
      +
      +#data
      +<!doctype html><frameset></frameset><noframes>abc</noframes><!--abc-->
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +|   <noframes>
      +|     "abc"
      +|   <!-- abc -->
      +
      +#data
      +<!doctype html><frameset></frameset></html><noframes>abc
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +|   <noframes>
      +|     "abc"
      +
      +#data
      +<!doctype html><frameset></frameset></html><noframes>abc</noframes><!--abc-->
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +|   <noframes>
      +|     "abc"
      +| <!-- abc -->
      +
      +#data
      +<!doctype html><table><tr></tbody><tfoot>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|       <tfoot>
      +
      +#data
      +<!doctype html><table><td><svg></svg>abc<td>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             <svg svg>
      +|             "abc"
      +|           <td>
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat
      new file mode 100644
      index 00000000..0d62f5a5
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat
      @@ -0,0 +1,1237 @@
      +#data
      +<!doctype html><math><mn DefinitionUrl="foo">
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math mn>
      +|         definitionURL="foo"
      +
      +#data
      +<!doctype html><html></p><!--foo-->
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <!-- foo -->
      +|   <head>
      +|   <body>
      +
      +#data
      +<!doctype html><head></head></p><!--foo-->
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <!-- foo -->
      +|   <body>
      +
      +#data
      +<!doctype html><body><p><pre>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|     <pre>
      +
      +#data
      +<!doctype html><body><p><listing>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|     <listing>
      +
      +#data
      +<!doctype html><p><plaintext>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|     <plaintext>
      +
      +#data
      +<!doctype html><p><h1>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|     <h1>
      +
      +#data
      +<!doctype html><form><isindex>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <form>
      +
      +#data
      +<!doctype html><isindex action="POST">
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <form>
      +|       action="POST"
      +|       <hr>
      +|       <label>
      +|         "This is a searchable index. Enter search keywords: "
      +|         <input>
      +|           name="isindex"
      +|       <hr>
      +
      +#data
      +<!doctype html><isindex prompt="this is isindex">
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <form>
      +|       <hr>
      +|       <label>
      +|         "this is isindex"
      +|         <input>
      +|           name="isindex"
      +|       <hr>
      +
      +#data
      +<!doctype html><isindex type="hidden">
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <form>
      +|       <hr>
      +|       <label>
      +|         "This is a searchable index. Enter search keywords: "
      +|         <input>
      +|           name="isindex"
      +|           type="hidden"
      +|       <hr>
      +
      +#data
      +<!doctype html><isindex name="foo">
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <form>
      +|       <hr>
      +|       <label>
      +|         "This is a searchable index. Enter search keywords: "
      +|         <input>
      +|           name="isindex"
      +|       <hr>
      +
      +#data
      +<!doctype html><ruby><p><rp>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <ruby>
      +|       <p>
      +|       <rp>
      +
      +#data
      +<!doctype html><ruby><div><span><rp>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <ruby>
      +|       <div>
      +|         <span>
      +|           <rp>
      +
      +#data
      +<!doctype html><ruby><div><p><rp>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <ruby>
      +|       <div>
      +|         <p>
      +|         <rp>
      +
      +#data
      +<!doctype html><ruby><p><rt>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <ruby>
      +|       <p>
      +|       <rt>
      +
      +#data
      +<!doctype html><ruby><div><span><rt>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <ruby>
      +|       <div>
      +|         <span>
      +|           <rt>
      +
      +#data
      +<!doctype html><ruby><div><p><rt>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <ruby>
      +|       <div>
      +|         <p>
      +|         <rt>
      +
      +#data
      +<!doctype html><math/><foo>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|     <foo>
      +
      +#data
      +<!doctype html><svg/><foo>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|     <foo>
      +
      +#data
      +<!doctype html><div></body><!--foo-->
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|   <!-- foo -->
      +
      +#data
      +<!doctype html><h1><div><h3><span></h1>foo
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <h1>
      +|       <div>
      +|         <h3>
      +|           <span>
      +|         "foo"
      +
      +#data
      +<!doctype html><p></h3>foo
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       "foo"
      +
      +#data
      +<!doctype html><h3><li>abc</h2>foo
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <h3>
      +|       <li>
      +|         "abc"
      +|     "foo"
      +
      +#data
      +<!doctype html><table>abc<!--foo-->
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "abc"
      +|     <table>
      +|       <!-- foo -->
      +
      +#data
      +<!doctype html><table>  <!--foo-->
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       "  "
      +|       <!-- foo -->
      +
      +#data
      +<!doctype html><table> b <!--foo-->
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     " b "
      +|     <table>
      +|       <!-- foo -->
      +
      +#data
      +<!doctype html><select><option><option>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|       <option>
      +|       <option>
      +
      +#data
      +<!doctype html><select><option></optgroup>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|       <option>
      +
      +#data
      +<!doctype html><select><option></optgroup>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|       <option>
      +
      +#data
      +<!doctype html><p><math><mi><p><h1>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <math math>
      +|         <math mi>
      +|           <p>
      +|           <h1>
      +
      +#data
      +<!doctype html><p><math><mo><p><h1>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <math math>
      +|         <math mo>
      +|           <p>
      +|           <h1>
      +
      +#data
      +<!doctype html><p><math><mn><p><h1>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <math math>
      +|         <math mn>
      +|           <p>
      +|           <h1>
      +
      +#data
      +<!doctype html><p><math><ms><p><h1>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <math math>
      +|         <math ms>
      +|           <p>
      +|           <h1>
      +
      +#data
      +<!doctype html><p><math><mtext><p><h1>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <math math>
      +|         <math mtext>
      +|           <p>
      +|           <h1>
      +
      +#data
      +<!doctype html><frameset></noframes>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +
      +#data
      +<!doctype html><html c=d><body></html><html a=b>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   a="b"
      +|   c="d"
      +|   <head>
      +|   <body>
      +
      +#data
      +<!doctype html><html c=d><frameset></frameset></html><html a=b>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   a="b"
      +|   c="d"
      +|   <head>
      +|   <frameset>
      +
      +#data
      +<!doctype html><html><frameset></frameset></html><!--foo-->
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +| <!-- foo -->
      +
      +#data
      +<!doctype html><html><frameset></frameset></html>  
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +|   "  "
      +
      +#data
      +<!doctype html><html><frameset></frameset></html>abc
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +
      +#data
      +<!doctype html><html><frameset></frameset></html><p>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +
      +#data
      +<!doctype html><html><frameset></frameset></html></p>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +
      +#data
      +<html><frameset></frameset></html><!doctype html>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <frameset>
      +
      +#data
      +<!doctype html><body><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!doctype html><p><frameset><frame>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +|     <frame>
      +
      +#data
      +<!doctype html><p>a<frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       "a"
      +
      +#data
      +<!doctype html><p> <frameset><frame>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +|     <frame>
      +
      +#data
      +<!doctype html><pre><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <pre>
      +
      +#data
      +<!doctype html><listing><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <listing>
      +
      +#data
      +<!doctype html><li><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <li>
      +
      +#data
      +<!doctype html><dd><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <dd>
      +
      +#data
      +<!doctype html><dt><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <dt>
      +
      +#data
      +<!doctype html><button><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <button>
      +
      +#data
      +<!doctype html><applet><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <applet>
      +
      +#data
      +<!doctype html><marquee><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <marquee>
      +
      +#data
      +<!doctype html><object><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <object>
      +
      +#data
      +<!doctype html><table><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +
      +#data
      +<!doctype html><area><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <area>
      +
      +#data
      +<!doctype html><basefont><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <basefont>
      +|   <frameset>
      +
      +#data
      +<!doctype html><bgsound><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <bgsound>
      +|   <frameset>
      +
      +#data
      +<!doctype html><br><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <br>
      +
      +#data
      +<!doctype html><embed><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <embed>
      +
      +#data
      +<!doctype html><img><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <img>
      +
      +#data
      +<!doctype html><input><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <input>
      +
      +#data
      +<!doctype html><keygen><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <keygen>
      +
      +#data
      +<!doctype html><wbr><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <wbr>
      +
      +#data
      +<!doctype html><hr><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <hr>
      +
      +#data
      +<!doctype html><textarea></textarea><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <textarea>
      +
      +#data
      +<!doctype html><xmp>
      +#errors
      +#document
      +| 
      +| 
      +|   
      +|   
      +|     
      +
      +#data
      +<!doctype html><iframe></iframe><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <iframe>
      +
      +#data
      +<!doctype html><select></select><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +
      +#data
      +<!doctype html><svg></svg><frameset><frame>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +|     <frame>
      +
      +#data
      +<!doctype html><math></math><frameset><frame>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +|     <frame>
      +
      +#data
      +<!doctype html><svg><foreignObject><div> <frameset><frame>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +|     <frame>
      +
      +#data
      +<!doctype html><svg>a</svg><frameset><frame>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "a"
      +
      +#data
      +<!doctype html><svg> </svg><frameset><frame>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +|     <frame>
      +
      +#data
      +<html>aaa<frameset></frameset>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "aaa"
      +
      +#data
      +<html> a <frameset></frameset>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "a "
      +
      +#data
      +<!doctype html><div><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +
      +#data
      +<!doctype html><div><body><frameset>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +
      +#data
      +<!doctype html><p><math></p>a
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <math math>
      +|     "a"
      +
      +#data
      +<!doctype html><p><math><mn><span></p>a
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <math math>
      +|         <math mn>
      +|           <span>
      +|             <p>
      +|             "a"
      +
      +#data
      +<!doctype html><math></html>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +
      +#data
      +<!doctype html><meta charset="ascii">
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <meta>
      +|       charset="ascii"
      +|   <body>
      +
      +#data
      +<!doctype html><meta http-equiv="content-type" content="text/html;charset=ascii">
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <meta>
      +|       content="text/html;charset=ascii"
      +|       http-equiv="content-type"
      +|   <body>
      +
      +#data
      +<!doctype html><head><!--aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa--><meta charset="utf8">
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <!-- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa -->
      +|     <meta>
      +|       charset="utf8"
      +|   <body>
      +
      +#data
      +<!doctype html><html a=b><head></head><html c=d>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   a="b"
      +|   c="d"
      +|   <head>
      +|   <body>
      +
      +#data
      +<!doctype html><image/>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <img>
      +
      +#data
      +<!doctype html>a<i>b<table>c<b>d</i>e</b>f
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "a"
      +|     <i>
      +|       "bc"
      +|       <b>
      +|         "de"
      +|       "f"
      +|       <table>
      +
      +#data
      +<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <i>
      +|       "a"
      +|       <b>
      +|         "b"
      +|     <b>
      +|     <div>
      +|       <b>
      +|         <i>
      +|           "c"
      +|           <a>
      +|             "d"
      +|         <a>
      +|           "e"
      +|       <a>
      +|         "f"
      +|     <table>
      +
      +#data
      +<!doctype html><i>a<b>b<div>c<a>d</i>e</b>f
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <i>
      +|       "a"
      +|       <b>
      +|         "b"
      +|     <b>
      +|     <div>
      +|       <b>
      +|         <i>
      +|           "c"
      +|           <a>
      +|             "d"
      +|         <a>
      +|           "e"
      +|       <a>
      +|         "f"
      +
      +#data
      +<!doctype html><table><i>a<b>b<div>c</i>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <i>
      +|       "a"
      +|       <b>
      +|         "b"
      +|     <b>
      +|       <div>
      +|         <i>
      +|           "c"
      +|     <table>
      +
      +#data
      +<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <i>
      +|       "a"
      +|       <b>
      +|         "b"
      +|     <b>
      +|     <div>
      +|       <b>
      +|         <i>
      +|           "c"
      +|           <a>
      +|             "d"
      +|         <a>
      +|           "e"
      +|       <a>
      +|         "f"
      +|     <table>
      +
      +#data
      +<!doctype html><table><i>a<div>b<tr>c<b>d</i>e
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <i>
      +|       "a"
      +|       <div>
      +|         "b"
      +|     <i>
      +|       "c"
      +|       <b>
      +|         "d"
      +|     <b>
      +|       "e"
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +
      +#data
      +<!doctype html><table><td><table><i>a<div>b<b>c</i>d
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             <i>
      +|               "a"
      +|             <div>
      +|               <i>
      +|                 "b"
      +|                 <b>
      +|                   "c"
      +|               <b>
      +|                 "d"
      +|             <table>
      +
      +#data
      +<!doctype html><body><bgsound>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <bgsound>
      +
      +#data
      +<!doctype html><body><basefont>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <basefont>
      +
      +#data
      +<!doctype html><a><b></a><basefont>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       <b>
      +|     <basefont>
      +
      +#data
      +<!doctype html><a><b></a><bgsound>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       <b>
      +|     <bgsound>
      +
      +#data
      +<!doctype html><figcaption><article></figcaption>a
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <figcaption>
      +|       <article>
      +|     "a"
      +
      +#data
      +<!doctype html><summary><article></summary>a
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <summary>
      +|       <article>
      +|     "a"
      +
      +#data
      +<!doctype html><p><a><plaintext>b
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <a>
      +|     <plaintext>
      +|       <a>
      +|         "b"
      +
      +#data
      +<!DOCTYPE html><div>a<a></div>b<p>c</p>d
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       "a"
      +|       <a>
      +|     <a>
      +|       "b"
      +|       <p>
      +|         "c"
      +|       "d"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat
      new file mode 100644
      index 00000000..60d85922
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat
      @@ -0,0 +1,763 @@
      +#data
      +<!DOCTYPE html>Test
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "Test"
      +
      +#data
      +<textarea>test</div>test
      +#errors
      +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
      +Line: 1 Col: 24 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <textarea>
      +|       "test</div>test"
      +
      +#data
      +<table><td>
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
      +Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase.
      +Line: 1 Col: 11 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +
      +#data
      +<table><td>test</tbody></table>
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
      +Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             "test"
      +
      +#data
      +<frame>test
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (frame). Expected DOCTYPE.
      +Line: 1 Col: 7 Unexpected start tag frame. Ignored.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "test"
      +
      +#data
      +<!DOCTYPE html><frameset>test
      +#errors
      +Line: 1 Col: 29 Unepxected characters in the frameset phase. Characters ignored.
      +Line: 1 Col: 29 Expected closing tag. Unexpected end of file.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +
      +#data
      +<!DOCTYPE html><frameset><!DOCTYPE html>
      +#errors
      +Line: 1 Col: 40 Unexpected DOCTYPE. Ignored.
      +Line: 1 Col: 40 Expected closing tag. Unexpected end of file.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <frameset>
      +
      +#data
      +<!DOCTYPE html><font><p><b>test</font>
      +#errors
      +Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <font>
      +|     <p>
      +|       <font>
      +|         <b>
      +|           "test"
      +
      +#data
      +<!DOCTYPE html><dt><div><dd>
      +#errors
      +Line: 1 Col: 28 Missing end tag (div, dt).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <dt>
      +|       <div>
      +|     <dd>
      +
      +#data
      +<script></x
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script).
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       "</x"
      +|   <body>
      +
      +#data
      +<table><plaintext><td>
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
      +Line: 1 Col: 18 Unexpected start tag (plaintext) in table context caused voodoo mode.
      +Line: 1 Col: 22 Unexpected end of file. Expected table content.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <plaintext>
      +|       "<td>"
      +|     <table>
      +
      +#data
      +<plaintext></plaintext>
      +#errors
      +Line: 1 Col: 11 Unexpected start tag (plaintext). Expected DOCTYPE.
      +Line: 1 Col: 23 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <plaintext>
      +|       "</plaintext>"
      +
      +#data
      +<!DOCTYPE html><table><tr>TEST
      +#errors
      +Line: 1 Col: 30 Unexpected non-space characters in table context caused voodoo mode.
      +Line: 1 Col: 30 Unexpected end of file. Expected table content.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "TEST"
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +
      +#data
      +<!DOCTYPE html><body t1=1><body t2=2><body t3=3 t4=4>
      +#errors
      +Line: 1 Col: 37 Unexpected start tag (body).
      +Line: 1 Col: 53 Unexpected start tag (body).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     t1="1"
      +|     t2="2"
      +|     t3="3"
      +|     t4="4"
      +
      +#data
      +</b test
      +#errors
      +Line: 1 Col: 8 Unexpected end of file in attribute name.
      +Line: 1 Col: 8 End tag contains unexpected attributes.
      +Line: 1 Col: 8 Unexpected end tag (b). Expected DOCTYPE.
      +Line: 1 Col: 8 Unexpected end tag (b) after the (implied) root element.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE html></b test<b &=&amp>X
      +#errors
      +Line: 1 Col: 32 Named entity didn't end with ';'.
      +Line: 1 Col: 33 End tag contains unexpected attributes.
      +Line: 1 Col: 33 Unexpected end tag (b) after the (implied) root element.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "X"
      +
      +#data
      +<!doctypehtml><scrIPt type=text/x-foobar;baz>X</SCRipt
      +#errors
      +Line: 1 Col: 9 No space after literal string 'DOCTYPE'.
      +Line: 1 Col: 54 Unexpected end of file in the tag name.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       type="text/x-foobar;baz"
      +|       "X</SCRipt"
      +|   <body>
      +
      +#data
      +&
      +#errors
      +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "&"
      +
      +#data
      +&#
      +#errors
      +Line: 1 Col: 1 Numeric entity expected. Got end of file instead.
      +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "&#"
      +
      +#data
      +&#X
      +#errors
      +Line: 1 Col: 3 Numeric entity expected but none found.
      +Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "&#X"
      +
      +#data
      +&#x
      +#errors
      +Line: 1 Col: 3 Numeric entity expected but none found.
      +Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "&#x"
      +
      +#data
      +&#45
      +#errors
      +Line: 1 Col: 4 Numeric entity didn't end with ';'.
      +Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "-"
      +
      +#data
      +&x-test
      +#errors
      +Line: 1 Col: 1 Named entity expected. Got none.
      +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "&x-test"
      +
      +#data
      +<!doctypehtml><p><li>
      +#errors
      +Line: 1 Col: 9 No space after literal string 'DOCTYPE'.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|     <li>
      +
      +#data
      +<!doctypehtml><p><dt>
      +#errors
      +Line: 1 Col: 9 No space after literal string 'DOCTYPE'.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|     <dt>
      +
      +#data
      +<!doctypehtml><p><dd>
      +#errors
      +Line: 1 Col: 9 No space after literal string 'DOCTYPE'.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|     <dd>
      +
      +#data
      +<!doctypehtml><p><form>
      +#errors
      +Line: 1 Col: 9 No space after literal string 'DOCTYPE'.
      +Line: 1 Col: 23 Expected closing tag. Unexpected end of file.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|     <form>
      +
      +#data
      +<!DOCTYPE html><p></P>X
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|     "X"
      +
      +#data
      +&AMP
      +#errors
      +Line: 1 Col: 4 Named entity didn't end with ';'.
      +Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "&"
      +
      +#data
      +&AMp;
      +#errors
      +Line: 1 Col: 1 Named entity expected. Got none.
      +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "&AMp;"
      +
      +#data
      +<!DOCTYPE html><html><head></head><body><thisISasillyTESTelementNameToMakeSureCrazyTagNamesArePARSEDcorrectLY>
      +#errors
      +Line: 1 Col: 110 Expected closing tag. Unexpected end of file.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <thisisasillytestelementnametomakesurecrazytagnamesareparsedcorrectly>
      +
      +#data
      +<!DOCTYPE html>X</body>X
      +#errors
      +Line: 1 Col: 24 Unexpected non-space characters in the after body phase.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "XX"
      +
      +#data
      +<!DOCTYPE html><!-- X
      +#errors
      +Line: 1 Col: 21 Unexpected end of file in comment.
      +#document
      +| <!DOCTYPE html>
      +| <!--  X -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE html><table><caption>test TEST</caption><td>test
      +#errors
      +Line: 1 Col: 54 Unexpected table cell start tag (td) in the table body phase.
      +Line: 1 Col: 58 Expected closing tag. Unexpected end of file.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <caption>
      +|         "test TEST"
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             "test"
      +
      +#data
      +<!DOCTYPE html><select><option><optgroup>
      +#errors
      +Line: 1 Col: 41 Expected closing tag. Unexpected end of file.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|       <option>
      +|       <optgroup>
      +
      +#data
      +<!DOCTYPE html><select><optgroup><option></optgroup><option><select><option>
      +#errors
      +Line: 1 Col: 68 Unexpected select start tag in the select phase treated as select end tag.
      +Line: 1 Col: 76 Expected closing tag. Unexpected end of file.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|       <optgroup>
      +|         <option>
      +|       <option>
      +|     <option>
      +
      +#data
      +<!DOCTYPE html><select><optgroup><option><optgroup>
      +#errors
      +Line: 1 Col: 51 Expected closing tag. Unexpected end of file.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|       <optgroup>
      +|         <option>
      +|       <optgroup>
      +
      +#data
      +<!DOCTYPE html><datalist><option>foo</datalist>bar
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <datalist>
      +|       <option>
      +|         "foo"
      +|     "bar"
      +
      +#data
      +<!DOCTYPE html><font><input><input></font>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <font>
      +|       <input>
      +|       <input>
      +
      +#data
      +<!DOCTYPE html><!-- XXX - XXX -->
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <!--  XXX - XXX  -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE html><!-- XXX - XXX
      +#errors
      +Line: 1 Col: 29 Unexpected end of file in comment (-)
      +#document
      +| <!DOCTYPE html>
      +| <!--  XXX - XXX -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE html><!-- XXX - XXX - XXX -->
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <!--  XXX - XXX - XXX  -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<isindex test=x name=x>
      +#errors
      +Line: 1 Col: 23 Unexpected start tag (isindex). Expected DOCTYPE.
      +Line: 1 Col: 23 Unexpected start tag isindex. Don't use it!
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <form>
      +|       <hr>
      +|       <label>
      +|         "This is a searchable index. Enter search keywords: "
      +|         <input>
      +|           name="isindex"
      +|           test="x"
      +|       <hr>
      +
      +#data
      +test
      +test
      +#errors
      +Line: 2 Col: 4 Unexpected non-space characters. Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     "test
      +test"
      +
      +#data
      +<!DOCTYPE html><body><title>test</body></title>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <title>
      +|       "test</body>"
      +
      +#data
      +<!DOCTYPE html><body><title>X</title><meta name=z><link rel=foo><style>
      +x { content:"</style" } </style>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <title>
      +|       "X"
      +|     <meta>
      +|       name="z"
      +|     <link>
      +|       rel="foo"
      +|     <style>
      +|       "
      +x { content:"</style" } "
      +
      +#data
      +<!DOCTYPE html><select><optgroup></optgroup></select>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <select>
      +|       <optgroup>
      +
      +#data
      + 
      + 
      +#errors
      +Line: 2 Col: 1 Unexpected End of file. Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE html>  <html>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE html><script>
      +</script>  <title>x</title>  </head>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <script>
      +|       "
      +"
      +|     "  "
      +|     <title>
      +|       "x"
      +|     "  "
      +|   <body>
      +
      +#data
      +<!DOCTYPE html><html><body><html id=x>
      +#errors
      +Line: 1 Col: 38 html needs to be the first start tag.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   id="x"
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE html>X</body><html id="x">
      +#errors
      +Line: 1 Col: 36 Unexpected start tag token (html) in the after body phase.
      +Line: 1 Col: 36 html needs to be the first start tag.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   id="x"
      +|   <head>
      +|   <body>
      +|     "X"
      +
      +#data
      +<!DOCTYPE html><head><html id=x>
      +#errors
      +Line: 1 Col: 32 html needs to be the first start tag.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   id="x"
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE html>X</html>X
      +#errors
      +Line: 1 Col: 24 Unexpected non-space characters in the after body phase.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "XX"
      +
      +#data
      +<!DOCTYPE html>X</html> 
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "X "
      +
      +#data
      +<!DOCTYPE html>X</html><p>X
      +#errors
      +Line: 1 Col: 26 Unexpected start tag (p).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "X"
      +|     <p>
      +|       "X"
      +
      +#data
      +<!DOCTYPE html>X<p/x/y/z>
      +#errors
      +Line: 1 Col: 19 Expected a > after the /.
      +Line: 1 Col: 21 Solidus (/) incorrectly placed in tag.
      +Line: 1 Col: 23 Solidus (/) incorrectly placed in tag.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "X"
      +|     <p>
      +|       x=""
      +|       y=""
      +|       z=""
      +
      +#data
      +<!DOCTYPE html><!--x--
      +#errors
      +Line: 1 Col: 22 Unexpected end of file in comment (--).
      +#document
      +| <!DOCTYPE html>
      +| <!-- x -->
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<!DOCTYPE html><table><tr><td></p></table>
      +#errors
      +Line: 1 Col: 34 Unexpected end tag (p). Ignored.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             <p>
      +
      +#data
      +<!DOCTYPE <!DOCTYPE HTML>><!--<!--x-->-->
      +#errors
      +Line: 1 Col: 20 Expected space or '>'. Got ''
      +Line: 1 Col: 25 Erroneous DOCTYPE.
      +Line: 1 Col: 35 Unexpected character in comment found.
      +#document
      +| <!DOCTYPE <!doctype>
      +| <html>
      +|   <head>
      +|   <body>
      +|     ">"
      +|     <!-- <!--x -->
      +|     "-->"
      +
      +#data
      +<!doctype html><div><form></form><div></div></div>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       <form>
      +|       <div>
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests20.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests20.dat
      new file mode 100644
      index 00000000..6bd82560
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests20.dat
      @@ -0,0 +1,455 @@
      +#data
      +<!doctype html><p><button><button>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|       <button>
      +
      +#data
      +<!doctype html><p><button><address>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <address>
      +
      +#data
      +<!doctype html><p><button><blockquote>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <blockquote>
      +
      +#data
      +<!doctype html><p><button><menu>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <menu>
      +
      +#data
      +<!doctype html><p><button><p>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <p>
      +
      +#data
      +<!doctype html><p><button><ul>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <ul>
      +
      +#data
      +<!doctype html><p><button><h1>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <h1>
      +
      +#data
      +<!doctype html><p><button><h6>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <h6>
      +
      +#data
      +<!doctype html><p><button><listing>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <listing>
      +
      +#data
      +<!doctype html><p><button><pre>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <pre>
      +
      +#data
      +<!doctype html><p><button><form>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <form>
      +
      +#data
      +<!doctype html><p><button><li>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <li>
      +
      +#data
      +<!doctype html><p><button><dd>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <dd>
      +
      +#data
      +<!doctype html><p><button><dt>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <dt>
      +
      +#data
      +<!doctype html><p><button><plaintext>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <plaintext>
      +
      +#data
      +<!doctype html><p><button><table>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <table>
      +
      +#data
      +<!doctype html><p><button><hr>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <hr>
      +
      +#data
      +<!doctype html><p><button><xmp>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <xmp>
      +
      +#data
      +<!doctype html><p><button></p>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <button>
      +|         <p>
      +
      +#data
      +<!doctype html><address><button></address>a
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <address>
      +|       <button>
      +|     "a"
      +
      +#data
      +<!doctype html><address><button></address>a
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <address>
      +|       <button>
      +|     "a"
      +
      +#data
      +<p><table></p>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <p>
      +|       <table>
      +
      +#data
      +<!doctype html><svg>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +
      +#data
      +<!doctype html><p><figcaption>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|     <figcaption>
      +
      +#data
      +<!doctype html><p><summary>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|     <summary>
      +
      +#data
      +<!doctype html><form><table><form>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <form>
      +|       <table>
      +
      +#data
      +<!doctype html><table><form><form>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <form>
      +
      +#data
      +<!doctype html><table><form></table><form>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <form>
      +
      +#data
      +<!doctype html><svg><foreignObject><p>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg foreignObject>
      +|         <p>
      +
      +#data
      +<!doctype html><svg><title>abc
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg title>
      +|         "abc"
      +
      +#data
      +<option><span><option>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <option>
      +|       <span>
      +|         <option>
      +
      +#data
      +<option><option>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <option>
      +|     <option>
      +
      +#data
      +<math><annotation-xml><div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math annotation-xml>
      +|     <div>
      +
      +#data
      +<math><annotation-xml encoding="application/svg+xml"><div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math annotation-xml>
      +|         encoding="application/svg+xml"
      +|     <div>
      +
      +#data
      +<math><annotation-xml encoding="application/xhtml+xml"><div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math annotation-xml>
      +|         encoding="application/xhtml+xml"
      +|         <div>
      +
      +#data
      +<math><annotation-xml encoding="aPPlication/xhtmL+xMl"><div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math annotation-xml>
      +|         encoding="aPPlication/xhtmL+xMl"
      +|         <div>
      +
      +#data
      +<math><annotation-xml encoding="text/html"><div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math annotation-xml>
      +|         encoding="text/html"
      +|         <div>
      +
      +#data
      +<math><annotation-xml encoding="Text/htmL"><div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math annotation-xml>
      +|         encoding="Text/htmL"
      +|         <div>
      +
      +#data
      +<math><annotation-xml encoding=" text/html "><div>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math annotation-xml>
      +|         encoding=" text/html "
      +|     <div>
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests21.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests21.dat
      new file mode 100644
      index 00000000..1260ec03
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests21.dat
      @@ -0,0 +1,221 @@
      +#data
      +<svg><![CDATA[foo]]>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "foo"
      +
      +#data
      +<math><![CDATA[foo]]>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       "foo"
      +
      +#data
      +<div><![CDATA[foo]]>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       <!-- [CDATA[foo]] -->
      +
      +#data
      +<svg><![CDATA[foo
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "foo"
      +
      +#data
      +<svg><![CDATA[foo
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "foo"
      +
      +#data
      +<svg><![CDATA[
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +
      +#data
      +<svg><![CDATA[]]>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +
      +#data
      +<svg><![CDATA[]] >]]>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "]] >"
      +
      +#data
      +<svg><![CDATA[]] >]]>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "]] >"
      +
      +#data
      +<svg><![CDATA[]]
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "]]"
      +
      +#data
      +<svg><![CDATA[]
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "]"
      +
      +#data
      +<svg><![CDATA[]>a
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "]>a"
      +
      +#data
      +<svg><foreignObject><div><![CDATA[foo]]>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg foreignObject>
      +|         <div>
      +|           <!-- [CDATA[foo]] -->
      +
      +#data
      +<svg><![CDATA[<svg>]]>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "<svg>"
      +
      +#data
      +<svg><![CDATA[</svg>a]]>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "</svg>a"
      +
      +#data
      +<svg><![CDATA[<svg>a
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "<svg>a"
      +
      +#data
      +<svg><![CDATA[</svg>a
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "</svg>a"
      +
      +#data
      +<svg><![CDATA[<svg>]]><path>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "<svg>"
      +|       <svg path>
      +
      +#data
      +<svg><![CDATA[<svg>]]></path>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "<svg>"
      +
      +#data
      +<svg><![CDATA[<svg>]]><!--path-->
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "<svg>"
      +|       <!-- path -->
      +
      +#data
      +<svg><![CDATA[<svg>]]>path
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "<svg>path"
      +
      +#data
      +<svg><![CDATA[<!--svg-->]]>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       "<!--svg-->"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests22.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests22.dat
      new file mode 100644
      index 00000000..aab27b2e
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests22.dat
      @@ -0,0 +1,157 @@
      +#data
      +<a><b><big><em><strong><div>X</a>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       <b>
      +|         <big>
      +|           <em>
      +|             <strong>
      +|     <big>
      +|       <em>
      +|         <strong>
      +|           <div>
      +|             <a>
      +|               "X"
      +
      +#data
      +<a><b><div id=1><div id=2><div id=3><div id=4><div id=5><div id=6><div id=7><div id=8>A</a>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       <b>
      +|     <b>
      +|       <div>
      +|         id="1"
      +|         <a>
      +|         <div>
      +|           id="2"
      +|           <a>
      +|           <div>
      +|             id="3"
      +|             <a>
      +|             <div>
      +|               id="4"
      +|               <a>
      +|               <div>
      +|                 id="5"
      +|                 <a>
      +|                 <div>
      +|                   id="6"
      +|                   <a>
      +|                   <div>
      +|                     id="7"
      +|                     <a>
      +|                     <div>
      +|                       id="8"
      +|                       <a>
      +|                         "A"
      +
      +#data
      +<a><b><div id=1><div id=2><div id=3><div id=4><div id=5><div id=6><div id=7><div id=8><div id=9>A</a>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       <b>
      +|     <b>
      +|       <div>
      +|         id="1"
      +|         <a>
      +|         <div>
      +|           id="2"
      +|           <a>
      +|           <div>
      +|             id="3"
      +|             <a>
      +|             <div>
      +|               id="4"
      +|               <a>
      +|               <div>
      +|                 id="5"
      +|                 <a>
      +|                 <div>
      +|                   id="6"
      +|                   <a>
      +|                   <div>
      +|                     id="7"
      +|                     <a>
      +|                     <div>
      +|                       id="8"
      +|                       <a>
      +|                         <div>
      +|                           id="9"
      +|                           "A"
      +
      +#data
      +<a><b><div id=1><div id=2><div id=3><div id=4><div id=5><div id=6><div id=7><div id=8><div id=9><div id=10>A</a>
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       <b>
      +|     <b>
      +|       <div>
      +|         id="1"
      +|         <a>
      +|         <div>
      +|           id="2"
      +|           <a>
      +|           <div>
      +|             id="3"
      +|             <a>
      +|             <div>
      +|               id="4"
      +|               <a>
      +|               <div>
      +|                 id="5"
      +|                 <a>
      +|                 <div>
      +|                   id="6"
      +|                   <a>
      +|                   <div>
      +|                     id="7"
      +|                     <a>
      +|                     <div>
      +|                       id="8"
      +|                       <a>
      +|                         <div>
      +|                           id="9"
      +|                           <div>
      +|                             id="10"
      +|                             "A"
      +
      +#data
      +<cite><b><cite><i><cite><i><cite><i><div>X</b>TEST
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (cite). Expected DOCTYPE.
      +Line: 1 Col: 46 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
      +Line: 1 Col: 50 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <cite>
      +|       <b>
      +|         <cite>
      +|           <i>
      +|             <cite>
      +|               <i>
      +|                 <cite>
      +|                   <i>
      +|       <i>
      +|         <i>
      +|           <div>
      +|             <b>
      +|               "X"
      +|             "TEST"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests23.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests23.dat
      new file mode 100644
      index 00000000..34d2a73f
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests23.dat
      @@ -0,0 +1,155 @@
      +#data
      +<p><font size=4><font color=red><font size=4><font size=4><font size=4><font size=4><font size=4><font color=red><p>X
      +#errors
      +3: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”.
      +116: Unclosed elements.
      +117: End of file seen and there were open elements.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <font>
      +|         size="4"
      +|         <font>
      +|           color="red"
      +|           <font>
      +|             size="4"
      +|             <font>
      +|               size="4"
      +|               <font>
      +|                 size="4"
      +|                 <font>
      +|                   size="4"
      +|                   <font>
      +|                     size="4"
      +|                     <font>
      +|                       color="red"
      +|     <p>
      +|       <font>
      +|         color="red"
      +|         <font>
      +|           size="4"
      +|           <font>
      +|             size="4"
      +|             <font>
      +|               size="4"
      +|               <font>
      +|                 color="red"
      +|                 "X"
      +
      +#data
      +<p><font size=4><font size=4><font size=4><font size=4><p>X
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <font>
      +|         size="4"
      +|         <font>
      +|           size="4"
      +|           <font>
      +|             size="4"
      +|             <font>
      +|               size="4"
      +|     <p>
      +|       <font>
      +|         size="4"
      +|         <font>
      +|           size="4"
      +|           <font>
      +|             size="4"
      +|             "X"
      +
      +#data
      +<p><font size=4><font size=4><font size=4><font size="5"><font size=4><p>X
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <font>
      +|         size="4"
      +|         <font>
      +|           size="4"
      +|           <font>
      +|             size="4"
      +|             <font>
      +|               size="5"
      +|               <font>
      +|                 size="4"
      +|     <p>
      +|       <font>
      +|         size="4"
      +|         <font>
      +|           size="4"
      +|           <font>
      +|             size="5"
      +|             <font>
      +|               size="4"
      +|               "X"
      +
      +#data
      +<p><font size=4 id=a><font size=4 id=b><font size=4><font size=4><p>X
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <font>
      +|         id="a"
      +|         size="4"
      +|         <font>
      +|           id="b"
      +|           size="4"
      +|           <font>
      +|             size="4"
      +|             <font>
      +|               size="4"
      +|     <p>
      +|       <font>
      +|         id="a"
      +|         size="4"
      +|         <font>
      +|           id="b"
      +|           size="4"
      +|           <font>
      +|             size="4"
      +|             <font>
      +|               size="4"
      +|               "X"
      +
      +#data
      +<p><b id=a><b id=a><b id=a><b><object><b id=a><b id=a>X</object><p>Y
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <b>
      +|         id="a"
      +|         <b>
      +|           id="a"
      +|           <b>
      +|             id="a"
      +|             <b>
      +|               <object>
      +|                 <b>
      +|                   id="a"
      +|                   <b>
      +|                     id="a"
      +|                     "X"
      +|     <p>
      +|       <b>
      +|         id="a"
      +|         <b>
      +|           id="a"
      +|           <b>
      +|             id="a"
      +|             <b>
      +|               "Y"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests24.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests24.dat
      new file mode 100644
      index 00000000..f6dc7eb4
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests24.dat
      @@ -0,0 +1,79 @@
      +#data
      +<!DOCTYPE html>&NotEqualTilde;
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "≂̸"
      +
      +#data
      +<!DOCTYPE html>&NotEqualTilde;A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "≂̸A"
      +
      +#data
      +<!DOCTYPE html>&ThickSpace;
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "  "
      +
      +#data
      +<!DOCTYPE html>&ThickSpace;A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "  A"
      +
      +#data
      +<!DOCTYPE html>&NotSubset;
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "⊂⃒"
      +
      +#data
      +<!DOCTYPE html>&NotSubset;A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "⊂⃒A"
      +
      +#data
      +<!DOCTYPE html>&Gopf;
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "𝔾"
      +
      +#data
      +<!DOCTYPE html>&Gopf;A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "𝔾A"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests25.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests25.dat
      new file mode 100644
      index 00000000..00de7295
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests25.dat
      @@ -0,0 +1,219 @@
      +#data
      +<!DOCTYPE html><body><foo>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <foo>
      +|       "A"
      +
      +#data
      +<!DOCTYPE html><body><area>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <area>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><base>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <base>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><basefont>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <basefont>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><bgsound>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <bgsound>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><br>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <br>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><col>A
      +#errors
      +26: Stray start tag “col”.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><command>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <command>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><embed>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <embed>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><frame>A
      +#errors
      +26: Stray start tag “frame”.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><hr>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <hr>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><img>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <img>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><input>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <input>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><keygen>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <keygen>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><link>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <link>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><meta>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <meta>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><param>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <param>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><source>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <source>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><track>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <track>
      +|     "A"
      +
      +#data
      +<!DOCTYPE html><body><wbr>A
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <wbr>
      +|     "A"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests26.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests26.dat
      new file mode 100644
      index 00000000..fae11ffd
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests26.dat
      @@ -0,0 +1,313 @@
      +#data
      +<!DOCTYPE html><body><a href='#1'><nobr>1<nobr></a><br><a href='#2'><nobr>2<nobr></a><br><a href='#3'><nobr>3<nobr></a>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <a>
      +|       href="#1"
      +|       <nobr>
      +|         "1"
      +|       <nobr>
      +|     <nobr>
      +|       <br>
      +|       <a>
      +|         href="#2"
      +|     <a>
      +|       href="#2"
      +|       <nobr>
      +|         "2"
      +|       <nobr>
      +|     <nobr>
      +|       <br>
      +|       <a>
      +|         href="#3"
      +|     <a>
      +|       href="#3"
      +|       <nobr>
      +|         "3"
      +|       <nobr>
      +
      +#data
      +<!DOCTYPE html><body><b><nobr>1<nobr></b><i><nobr>2<nobr></i>3
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       <nobr>
      +|         "1"
      +|       <nobr>
      +|     <nobr>
      +|       <i>
      +|     <i>
      +|       <nobr>
      +|         "2"
      +|       <nobr>
      +|     <nobr>
      +|       "3"
      +
      +#data
      +<!DOCTYPE html><body><b><nobr>1<table><nobr></b><i><nobr>2<nobr></i>3
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       <nobr>
      +|         "1"
      +|         <nobr>
      +|           <i>
      +|         <i>
      +|           <nobr>
      +|             "2"
      +|           <nobr>
      +|         <nobr>
      +|           "3"
      +|         <table>
      +
      +#data
      +<!DOCTYPE html><body><b><nobr>1<table><tr><td><nobr></b><i><nobr>2<nobr></i>3
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       <nobr>
      +|         "1"
      +|         <table>
      +|           <tbody>
      +|             <tr>
      +|               <td>
      +|                 <nobr>
      +|                   <i>
      +|                 <i>
      +|                   <nobr>
      +|                     "2"
      +|                   <nobr>
      +|                 <nobr>
      +|                   "3"
      +
      +#data
      +<!DOCTYPE html><body><b><nobr>1<div><nobr></b><i><nobr>2<nobr></i>3
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       <nobr>
      +|         "1"
      +|     <div>
      +|       <b>
      +|         <nobr>
      +|         <nobr>
      +|       <nobr>
      +|         <i>
      +|       <i>
      +|         <nobr>
      +|           "2"
      +|         <nobr>
      +|       <nobr>
      +|         "3"
      +
      +#data
      +<!DOCTYPE html><body><b><nobr>1<nobr></b><div><i><nobr>2<nobr></i>3
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       <nobr>
      +|         "1"
      +|       <nobr>
      +|     <div>
      +|       <nobr>
      +|         <i>
      +|       <i>
      +|         <nobr>
      +|           "2"
      +|         <nobr>
      +|       <nobr>
      +|         "3"
      +
      +#data
      +<!DOCTYPE html><body><b><nobr>1<nobr><ins></b><i><nobr>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       <nobr>
      +|         "1"
      +|       <nobr>
      +|         <ins>
      +|     <nobr>
      +|       <i>
      +|     <i>
      +|       <nobr>
      +
      +#data
      +<!DOCTYPE html><body><b><nobr>1<ins><nobr></b><i>2
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       <nobr>
      +|         "1"
      +|         <ins>
      +|       <nobr>
      +|     <nobr>
      +|       <i>
      +|         "2"
      +
      +#data
      +<!DOCTYPE html><body><b>1<nobr></b><i><nobr>2</i>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <b>
      +|       "1"
      +|       <nobr>
      +|     <nobr>
      +|       <i>
      +|     <i>
      +|       <nobr>
      +|         "2"
      +
      +#data
      +<p><code x</code></p>
      +
      +#errors
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <code>
      +|         code=""
      +|         x<=""
      +|     <code>
      +|       code=""
      +|       x<=""
      +|       "
      +"
      +
      +#data
      +<!DOCTYPE html><svg><foreignObject><p><i></p>a
      +#errors
      +45: End tag “p” seen, but there were open elements.
      +41: Unclosed element “i”.
      +46: End of file seen and there were open elements.
      +35: Unclosed element “foreignObject”.
      +20: Unclosed element “svg”.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <svg svg>
      +|       <svg foreignObject>
      +|         <p>
      +|           <i>
      +|         <i>
      +|           "a"
      +
      +#data
      +<!DOCTYPE html><table><tr><td><svg><foreignObject><p><i></p>a
      +#errors
      +56: End tag “p” seen, but there were open elements.
      +52: Unclosed element “i”.
      +57: End of file seen and there were open elements.
      +46: Unclosed element “foreignObject”.
      +31: Unclosed element “svg”.
      +22: Unclosed element “table”.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             <svg svg>
      +|               <svg foreignObject>
      +|                 <p>
      +|                   <i>
      +|                 <i>
      +|                   "a"
      +
      +#data
      +<!DOCTYPE html><math><mtext><p><i></p>a
      +#errors
      +38: End tag “p” seen, but there were open elements.
      +34: Unclosed element “i”.
      +39: End of file in a foreign namespace context.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <math math>
      +|       <math mtext>
      +|         <p>
      +|           <i>
      +|         <i>
      +|           "a"
      +
      +#data
      +<!DOCTYPE html><table><tr><td><math><mtext><p><i></p>a
      +#errors
      +53: End tag “p” seen, but there were open elements.
      +49: Unclosed element “i”.
      +54: End of file in a foreign namespace context.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <table>
      +|       <tbody>
      +|         <tr>
      +|           <td>
      +|             <math math>
      +|               <math mtext>
      +|                 <p>
      +|                   <i>
      +|                 <i>
      +|                   "a"
      +
      +#data
      +<!DOCTYPE html><body><div><!/div>a
      +#errors
      +29: Bogus comment.
      +34: End of file seen and there were open elements.
      +26: Unclosed element “div”.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <div>
      +|       <!-- /div -->
      +|       "a"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests3.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests3.dat
      new file mode 100644
      index 00000000..38dc501b
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests3.dat
      @@ -0,0 +1,305 @@
      +#data
      +<head></head><style></style>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
      +Line: 1 Col: 20 Unexpected start tag (style) that can be in head. Moved.
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|   <body>
      +
      +#data
      +<head></head><script></script>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
      +Line: 1 Col: 21 Unexpected start tag (script) that can be in head. Moved.
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|   <body>
      +
      +#data
      +<head></head><!-- --><style></style><!-- --><script></script>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
      +Line: 1 Col: 28 Unexpected start tag (style) that can be in head. Moved.
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|     <script>
      +|   <!--   -->
      +|   <!--   -->
      +|   <body>
      +
      +#data
      +<head></head><!-- -->x<style></style><!-- --><script></script>
      +#errors
      +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <!--   -->
      +|   <body>
      +|     "x"
      +|     <style>
      +|     <!--   -->
      +|     <script>
      +
      +#data
      +<!DOCTYPE html><html><head></head><body><pre>
      +</pre></body></html>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <pre>
      +
      +#data
      +<!DOCTYPE html><html><head></head><body><pre>
      +foo</pre></body></html>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <pre>
      +|       "foo"
      +
      +#data
      +<!DOCTYPE html><html><head></head><body><pre>
      +
      +foo</pre></body></html>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <pre>
      +|       "
      +foo"
      +
      +#data
      +<!DOCTYPE html><html><head></head><body><pre>
      +foo
      +</pre></body></html>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <pre>
      +|       "foo
      +"
      +
      +#data
      +<!DOCTYPE html><html><head></head><body><pre>x</pre><span>
      +</span></body></html>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <pre>
      +|       "x"
      +|     <span>
      +|       "
      +"
      +
      +#data
      +<!DOCTYPE html><html><head></head><body><pre>x
      +y</pre></body></html>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <pre>
      +|       "x
      +y"
      +
      +#data
      +<!DOCTYPE html><html><head></head><body><pre>x<div>
      +y</pre></body></html>
      +#errors
      +Line: 2 Col: 7 End tag (pre) seen too early. Expected other end tag.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <pre>
      +|       "x"
      +|       <div>
      +|         "
      +y"
      +
      +#data
      +<!DOCTYPE html><pre>&#x0a;&#x0a;A</pre>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <pre>
      +|       "
      +A"
      +
      +#data
      +<!DOCTYPE html><HTML><META><HEAD></HEAD></HTML>
      +#errors
      +Line: 1 Col: 33 Unexpected start tag head in existing head. Ignored.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|     <meta>
      +|   <body>
      +
      +#data
      +<!DOCTYPE html><HTML><HEAD><head></HEAD></HTML>
      +#errors
      +Line: 1 Col: 33 Unexpected start tag head in existing head. Ignored.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +
      +#data
      +<textarea>foo<span>bar</span><i>baz
      +#errors
      +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
      +Line: 1 Col: 35 Expected closing tag. Unexpected end of file.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <textarea>
      +|       "foo<span>bar</span><i>baz"
      +
      +#data
      +<title>foo<span>bar</em><i>baz
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
      +Line: 1 Col: 30 Unexpected end of file. Expected end tag (title).
      +#document
      +| <html>
      +|   <head>
      +|     <title>
      +|       "foo<span>bar</em><i>baz"
      +|   <body>
      +
      +#data
      +<!DOCTYPE html><textarea>
      +</textarea>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <textarea>
      +
      +#data
      +<!DOCTYPE html><textarea>
      +foo</textarea>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <textarea>
      +|       "foo"
      +
      +#data
      +<!DOCTYPE html><textarea>
      +
      +foo</textarea>
      +#errors
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <textarea>
      +|       "
      +foo"
      +
      +#data
      +<!DOCTYPE html><html><head></head><body><ul><li><div><p><li></ul></body></html>
      +#errors
      +Line: 1 Col: 60 Missing end tag (div, li).
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <ul>
      +|       <li>
      +|         <div>
      +|           <p>
      +|       <li>
      +
      +#data
      +<!doctype html><nobr><nobr><nobr>
      +#errors
      +Line: 1 Col: 27 Unexpected start tag (nobr) implies end tag (nobr).
      +Line: 1 Col: 33 Unexpected start tag (nobr) implies end tag (nobr).
      +Line: 1 Col: 33 Expected closing tag. Unexpected end of file.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <nobr>
      +|     <nobr>
      +|     <nobr>
      +
      +#data
      +<!doctype html><nobr><nobr></nobr><nobr>
      +#errors
      +Line: 1 Col: 27 Unexpected start tag (nobr) implies end tag (nobr).
      +Line: 1 Col: 40 Expected closing tag. Unexpected end of file.
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <nobr>
      +|     <nobr>
      +|     <nobr>
      +
      +#data
      +<!doctype html><html><body><p><table></table></body></html>
      +#errors
      +Not known
      +#document
      +| <!DOCTYPE html>
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|     <table>
      +
      +#data
      +<p><table></table>
      +#errors
      +Not known
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <p>
      +|       <table>
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests4.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests4.dat
      new file mode 100644
      index 00000000..3c506326
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests4.dat
      @@ -0,0 +1,59 @@
      +#data
      +direct div content
      +#errors
      +#document-fragment
      +div
      +#document
      +| "direct div content"
      +
      +#data
      +direct textarea content
      +#errors
      +#document-fragment
      +textarea
      +#document
      +| "direct textarea content"
      +
      +#data
      +textarea content with <em>pseudo</em> <foo>markup
      +#errors
      +#document-fragment
      +textarea
      +#document
      +| "textarea content with <em>pseudo</em> <foo>markup"
      +
      +#data
      +this is &#x0043;DATA inside a <style> element
      +#errors
      +#document-fragment
      +style
      +#document
      +| "this is &#x0043;DATA inside a <style> element"
      +
      +#data
      +</plaintext>
      +#errors
      +#document-fragment
      +plaintext
      +#document
      +| "</plaintext>"
      +
      +#data
      +setting html's innerHTML
      +#errors
      +Line: 1 Col: 24 Unexpected EOF in inner html mode.
      +#document-fragment
      +html
      +#document
      +| <head>
      +| <body>
      +|   "setting html's innerHTML"
      +
      +#data
      +<title>setting head's innerHTML</title>
      +#errors
      +#document-fragment
      +head
      +#document
      +| <title>
      +|   "setting head's innerHTML"
      diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests5.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests5.dat
      new file mode 100644
      index 00000000..d7b5128a
      --- /dev/null
      +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests5.dat
      @@ -0,0 +1,191 @@
      +#data
      +<style> <!-- </style>x
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
      +Line: 1 Col: 22 Unexpected end of file. Expected end tag (style).
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|       " <!-- "
      +|   <body>
      +|     "x"
      +
      +#data
      +<style> <!-- </style> --> </style>x
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|       " <!-- "
      +|     " "
      +|   <body>
      +|     "--> x"
      +
      +#data
      +<style> <!--> </style>x
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|       " <!--> "
      +|   <body>
      +|     "x"
      +
      +#data
      +<style> <!---> </style>x
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|       " <!---> "
      +|   <body>
      +|     "x"
      +
      +#data
      +<iframe> <!---> </iframe>x
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <iframe>
      +|       " <!---> "
      +|     "x"
      +
      +#data
      +<iframe> <!--- </iframe>->x</iframe> --> </iframe>x
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <iframe>
      +|       " <!--- "
      +|     "->x --> x"
      +
      +#data
      +<script> <!-- </script> --> </script>x
      +#errors
      +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <script>
      +|       " <!-- "
      +|     " "
      +|   <body>
      +|     "--> x"
      +
      +#data
      +<title> <!-- </title> --> </title>x
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <title>
      +|       " <!-- "
      +|     " "
      +|   <body>
      +|     "--> x"
      +
      +#data
      +<textarea> <!--- </textarea>->x</textarea> --> </textarea>x
      +#errors
      +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|   <body>
      +|     <textarea>
      +|       " <!--- "
      +|     "->x --> x"
      +
      +#data
      +<style> <!</-- </style>x
      +#errors
      +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
      +#document
      +| <html>
      +|   <head>
      +|     <style>
      +|       " <!</-- "
      +|   <body>
      +|     "x"
      +
      +#data
      +<p><xmp>
      +#errors
      +XXX: Unknown
      +#document
      +| 
      +|   
      +|   
      +|     

      +|

      + +#data +<xmp> <!-- > --> +#errors +Line: 1 Col: 5 Unexpected start tag (xmp). Expected DOCTYPE. +#document +| +| +| +| +| " <!-- > --> " + +#data +<title>&amp;</title> +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +#document +| <html> +| <head> +| <title> +| "&" +| <body> + +#data +<title><!--&amp;--></title> +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +#document +| <html> +| <head> +| <title> +| "<!--&-->" +| <body> + +#data +<title><!--</title> +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +Line: 1 Col: 19 Unexpected end of file. Expected end tag (title). +#document +| <html> +| <head> +| <title> +| "<!--" +| <body> + +#data +<noscript><!--</noscript>--></noscript> +#errors +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. +#document +| <html> +| <head> +| <noscript> +| "<!--" +| <body> +| "-->" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests6.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests6.dat new file mode 100644 index 00000000..f28ece4f --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests6.dat @@ -0,0 +1,663 @@ +#data +<!doctype html></head> <head> +#errors +Line: 1 Col: 29 Unexpected start tag head. Ignored. +#document +| <!DOCTYPE html> +| <html> +| <head> +| " " +| <body> + +#data +<!doctype html><form><div></form><div> +#errors +33: End tag "form" seen but there were unclosed elements. +38: End of file seen and there were open elements. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <div> +| <div> + +#data +<!doctype html><title>&amp;</title> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <title> +| "&" +| <body> + +#data +<!doctype html><title><!--&amp;--></title> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <title> +| "<!--&-->" +| <body> + +#data +<!doctype> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +Line: 1 Col: 10 Unexpected > character. Expected DOCTYPE name. +Line: 1 Col: 10 Erroneous DOCTYPE. +#document +| <!DOCTYPE > +| <html> +| <head> +| <body> + +#data +<!---x +#errors +Line: 1 Col: 6 Unexpected end of file in comment. +Line: 1 Col: 6 Unexpected End of file. Expected DOCTYPE. +#document +| <!-- -x --> +| <html> +| <head> +| <body> + +#data +<body> +<div> +#errors +Line: 1 Col: 6 Unexpected start tag (body). +Line: 2 Col: 5 Expected closing tag. Unexpected end of file. +#document-fragment +div +#document +| " +" +| <div> + +#data +<frameset></frameset> +foo +#errors +Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE. +Line: 2 Col: 3 Unexpected non-space characters in the after frameset phase. Ignored. +#document +| <html> +| <head> +| <frameset> +| " +" + +#data +<frameset></frameset> +<noframes> +#errors +Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE. +Line: 2 Col: 10 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <frameset> +| " +" +| <noframes> + +#data +<frameset></frameset> +<div> +#errors +Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE. +Line: 2 Col: 5 Unexpected start tag (div) in the after frameset phase. Ignored. +#document +| <html> +| <head> +| <frameset> +| " +" + +#data +<frameset></frameset> +</html> +#errors +Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE. +#document +| <html> +| <head> +| <frameset> +| " +" + +#data +<frameset></frameset> +</div> +#errors +Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE. +Line: 2 Col: 6 Unexpected end tag (div) in the after frameset phase. Ignored. +#document +| <html> +| <head> +| <frameset> +| " +" + +#data +<form><form> +#errors +Line: 1 Col: 6 Unexpected start tag (form). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected start tag (form). +Line: 1 Col: 12 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <form> + +#data +<button><button> +#errors +Line: 1 Col: 8 Unexpected start tag (button). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected start tag (button) implies end tag (button). +Line: 1 Col: 16 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <button> +| <button> + +#data +<table><tr><td></th> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end tag (th). Ignored. +Line: 1 Col: 20 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> + +#data +<table><caption><td> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end tag (td). Ignored. +Line: 1 Col: 20 Unexpected table cell start tag (td) in the table body phase. +Line: 1 Col: 20 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <table> +| <caption> +| <tbody> +| <tr> +| <td> + +#data +<table><caption><div> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 21 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <table> +| <caption> +| <div> + +#data +</caption><div> +#errors +Line: 1 Col: 10 Unexpected end tag (caption). Ignored. +Line: 1 Col: 15 Expected closing tag. Unexpected end of file. +#document-fragment +caption +#document +| <div> + +#data +<table><caption><div></caption> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 31 Unexpected end tag (caption). Missing end tag (div). +Line: 1 Col: 31 Unexpected end of file. Expected table content. +#document +| <html> +| <head> +| <body> +| <table> +| <caption> +| <div> + +#data +<table><caption></table> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 24 Unexpected end table tag in caption. Generates implied end caption. +#document +| <html> +| <head> +| <body> +| <table> +| <caption> + +#data +</table><div> +#errors +Line: 1 Col: 8 Unexpected end table tag in caption. Generates implied end caption. +Line: 1 Col: 8 Unexpected end tag (caption). Ignored. +Line: 1 Col: 13 Expected closing tag. Unexpected end of file. +#document-fragment +caption +#document +| <div> + +#data +<table><caption></body></col></colgroup></html></tbody></td></tfoot></th></thead></tr> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected end tag (body). Ignored. +Line: 1 Col: 29 Unexpected end tag (col). Ignored. +Line: 1 Col: 40 Unexpected end tag (colgroup). Ignored. +Line: 1 Col: 47 Unexpected end tag (html). Ignored. +Line: 1 Col: 55 Unexpected end tag (tbody). Ignored. +Line: 1 Col: 60 Unexpected end tag (td). Ignored. +Line: 1 Col: 68 Unexpected end tag (tfoot). Ignored. +Line: 1 Col: 73 Unexpected end tag (th). Ignored. +Line: 1 Col: 81 Unexpected end tag (thead). Ignored. +Line: 1 Col: 86 Unexpected end tag (tr). Ignored. +Line: 1 Col: 86 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <table> +| <caption> + +#data +<table><caption><div></div> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 27 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <table> +| <caption> +| <div> + +#data +<table><tr><td></body></caption></col></colgroup></html> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end tag (body). Ignored. +Line: 1 Col: 32 Unexpected end tag (caption). Ignored. +Line: 1 Col: 38 Unexpected end tag (col). Ignored. +Line: 1 Col: 49 Unexpected end tag (colgroup). Ignored. +Line: 1 Col: 56 Unexpected end tag (html). Ignored. +Line: 1 Col: 56 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> + +#data +</table></tbody></tfoot></thead></tr><div> +#errors +Line: 1 Col: 8 Unexpected end tag (table). Ignored. +Line: 1 Col: 16 Unexpected end tag (tbody). Ignored. +Line: 1 Col: 24 Unexpected end tag (tfoot). Ignored. +Line: 1 Col: 32 Unexpected end tag (thead). Ignored. +Line: 1 Col: 37 Unexpected end tag (tr). Ignored. +Line: 1 Col: 42 Expected closing tag. Unexpected end of file. +#document-fragment +td +#document +| <div> + +#data +<table><colgroup>foo +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected non-space characters in table context caused voodoo mode. +Line: 1 Col: 20 Unexpected end of file. Expected table content. +#document +| <html> +| <head> +| <body> +| "foo" +| <table> +| <colgroup> + +#data +foo<col> +#errors +Line: 1 Col: 3 Unexpected end tag (colgroup). Ignored. +#document-fragment +colgroup +#document +| <col> + +#data +<table><colgroup></col> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 23 This element (col) has no end tag. +Line: 1 Col: 23 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <table> +| <colgroup> + +#data +<frameset><div> +#errors +Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE. +Line: 1 Col: 15 Unexpected start tag token (div) in the frameset phase. Ignored. +Line: 1 Col: 15 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <frameset> + +#data +</frameset><frame> +#errors +Line: 1 Col: 11 Unexpected end tag token (frameset) in the frameset phase (innerHTML). +#document-fragment +frameset +#document +| <frame> + +#data +<frameset></div> +#errors +Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected end tag token (div) in the frameset phase. Ignored. +Line: 1 Col: 16 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <frameset> + +#data +</body><div> +#errors +Line: 1 Col: 7 Unexpected end tag (body). Ignored. +Line: 1 Col: 12 Expected closing tag. Unexpected end of file. +#document-fragment +body +#document +| <div> + +#data +<table><tr><div> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected start tag (div) in table context caused voodoo mode. +Line: 1 Col: 16 Unexpected end of file. Expected table content. +#document +| <html> +| <head> +| <body> +| <div> +| <table> +| <tbody> +| <tr> + +#data +</tr><td> +#errors +Line: 1 Col: 5 Unexpected end tag (tr). Ignored. +#document-fragment +tr +#document +| <td> + +#data +</tbody></tfoot></thead><td> +#errors +Line: 1 Col: 8 Unexpected end tag (tbody). Ignored. +Line: 1 Col: 16 Unexpected end tag (tfoot). Ignored. +Line: 1 Col: 24 Unexpected end tag (thead). Ignored. +#document-fragment +tr +#document +| <td> + +#data +<table><tr><div><td> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected start tag (div) in table context caused voodoo mode. +Line: 1 Col: 20 Unexpected implied end tag (div) in the table row phase. +Line: 1 Col: 20 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <div> +| <table> +| <tbody> +| <tr> +| <td> + +#data +<caption><col><colgroup><tbody><tfoot><thead><tr> +#errors +Line: 1 Col: 9 Unexpected start tag (caption). +Line: 1 Col: 14 Unexpected start tag (col). +Line: 1 Col: 24 Unexpected start tag (colgroup). +Line: 1 Col: 31 Unexpected start tag (tbody). +Line: 1 Col: 38 Unexpected start tag (tfoot). +Line: 1 Col: 45 Unexpected start tag (thead). +Line: 1 Col: 49 Unexpected end of file. Expected table content. +#document-fragment +tbody +#document +| <tr> + +#data +<table><tbody></thead> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end tag (thead) in the table body phase. Ignored. +Line: 1 Col: 22 Unexpected end of file. Expected table content. +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> + +#data +</table><tr> +#errors +Line: 1 Col: 8 Unexpected end tag (table). Ignored. +Line: 1 Col: 12 Unexpected end of file. Expected table content. +#document-fragment +tbody +#document +| <tr> + +#data +<table><tbody></body></caption></col></colgroup></html></td></th></tr> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected end tag (body) in the table body phase. Ignored. +Line: 1 Col: 31 Unexpected end tag (caption) in the table body phase. Ignored. +Line: 1 Col: 37 Unexpected end tag (col) in the table body phase. Ignored. +Line: 1 Col: 48 Unexpected end tag (colgroup) in the table body phase. Ignored. +Line: 1 Col: 55 Unexpected end tag (html) in the table body phase. Ignored. +Line: 1 Col: 60 Unexpected end tag (td) in the table body phase. Ignored. +Line: 1 Col: 65 Unexpected end tag (th) in the table body phase. Ignored. +Line: 1 Col: 70 Unexpected end tag (tr) in the table body phase. Ignored. +Line: 1 Col: 70 Unexpected end of file. Expected table content. +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> + +#data +<table><tbody></div> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end tag (div) in table context caused voodoo mode. +Line: 1 Col: 20 End tag (div) seen too early. Expected other end tag. +Line: 1 Col: 20 Unexpected end of file. Expected table content. +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> + +#data +<table><table> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected start tag (table) implies end tag (table). +Line: 1 Col: 14 Unexpected end of file. Expected table content. +#document +| <html> +| <head> +| <body> +| <table> +| <table> + +#data +<table></body></caption></col></colgroup></html></tbody></td></tfoot></th></thead></tr> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end tag (body). Ignored. +Line: 1 Col: 24 Unexpected end tag (caption). Ignored. +Line: 1 Col: 30 Unexpected end tag (col). Ignored. +Line: 1 Col: 41 Unexpected end tag (colgroup). Ignored. +Line: 1 Col: 48 Unexpected end tag (html). Ignored. +Line: 1 Col: 56 Unexpected end tag (tbody). Ignored. +Line: 1 Col: 61 Unexpected end tag (td). Ignored. +Line: 1 Col: 69 Unexpected end tag (tfoot). Ignored. +Line: 1 Col: 74 Unexpected end tag (th). Ignored. +Line: 1 Col: 82 Unexpected end tag (thead). Ignored. +Line: 1 Col: 87 Unexpected end tag (tr). Ignored. +Line: 1 Col: 87 Unexpected end of file. Expected table content. +#document +| <html> +| <head> +| <body> +| <table> + +#data +</table><tr> +#errors +Line: 1 Col: 8 Unexpected end tag (table). Ignored. +Line: 1 Col: 12 Unexpected end of file. Expected table content. +#document-fragment +table +#document +| <tbody> +| <tr> + +#data +<body></body></html> +#errors +Line: 1 Col: 20 Unexpected html end tag in inner html mode. +Line: 1 Col: 20 Unexpected EOF in inner html mode. +#document-fragment +html +#document +| <head> +| <body> + +#data +<html><frameset></frameset></html> +#errors +Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE. +#document +| <html> +| <head> +| <frameset> +| " " + +#data +<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"><html></html> +#errors +Line: 1 Col: 50 Erroneous DOCTYPE. +Line: 1 Col: 63 Unexpected end tag (html) after the (implied) root element. +#document +| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" ""> +| <html> +| <head> +| <body> + +#data +<param><frameset></frameset> +#errors +Line: 1 Col: 7 Unexpected start tag (param). Expected DOCTYPE. +Line: 1 Col: 17 Unexpected start tag (frameset). +#document +| <html> +| <head> +| <frameset> + +#data +<source><frameset></frameset> +#errors +Line: 1 Col: 7 Unexpected start tag (source). Expected DOCTYPE. +Line: 1 Col: 17 Unexpected start tag (frameset). +#document +| <html> +| <head> +| <frameset> + +#data +<track><frameset></frameset> +#errors +Line: 1 Col: 7 Unexpected start tag (track). Expected DOCTYPE. +Line: 1 Col: 17 Unexpected start tag (frameset). +#document +| <html> +| <head> +| <frameset> + +#data +</html><frameset></frameset> +#errors +7: End tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”. +17: Stray “frameset” start tag. +17: “frameset” start tag seen. +#document +| <html> +| <head> +| <frameset> + +#data +</body><frameset></frameset> +#errors +7: End tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”. +17: Stray “frameset” start tag. +17: “frameset” start tag seen. +#document +| <html> +| <head> +| <frameset> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests7.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests7.dat new file mode 100644 index 00000000..f5193c66 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests7.dat @@ -0,0 +1,390 @@ +#data +<!doctype html><body><title>X</title> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <title> +| "X" + +#data +<!doctype html><table><title>X</title></table> +#errors +Line: 1 Col: 29 Unexpected start tag (title) in table context caused voodoo mode. +Line: 1 Col: 38 Unexpected end tag (title) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <title> +| "X" +| <table> + +#data +<!doctype html><head></head><title>X</title> +#errors +Line: 1 Col: 35 Unexpected start tag (title) that can be in head. Moved. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <title> +| "X" +| <body> + +#data +<!doctype html></head><title>X</title> +#errors +Line: 1 Col: 29 Unexpected start tag (title) that can be in head. Moved. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <title> +| "X" +| <body> + +#data +<!doctype html><table><meta></table> +#errors +Line: 1 Col: 28 Unexpected start tag (meta) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <meta> +| <table> + +#data +<!doctype html><table>X<tr><td><table> <meta></table></table> +#errors +Line: 1 Col: 23 Unexpected non-space characters in table context caused voodoo mode. +Line: 1 Col: 45 Unexpected start tag (meta) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "X" +| <table> +| <tbody> +| <tr> +| <td> +| <meta> +| <table> +| " " + +#data +<!doctype html><html> <head> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> + +#data +<!doctype html> <head> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> + +#data +<!doctype html><table><style> <tr>x </style> </table> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <style> +| " <tr>x " +| " " + +#data +<!doctype html><table><TBODY><script> <tr>x </script> </table> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <script> +| " <tr>x " +| " " + +#data +<!doctype html><p><applet><p>X</p></applet> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <applet> +| <p> +| "X" + +#data +<!doctype html><listing> +X</listing> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <listing> +| "X" + +#data +<!doctype html><select><input>X +#errors +Line: 1 Col: 30 Unexpected input start tag in the select phase. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <input> +| "X" + +#data +<!doctype html><select><select>X +#errors +Line: 1 Col: 31 Unexpected select start tag in the select phase treated as select end tag. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| "X" + +#data +<!doctype html><table><input type=hidDEN></table> +#errors +Line: 1 Col: 41 Unexpected input with type hidden in table context. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <input> +| type="hidDEN" + +#data +<!doctype html><table>X<input type=hidDEN></table> +#errors +Line: 1 Col: 23 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "X" +| <table> +| <input> +| type="hidDEN" + +#data +<!doctype html><table> <input type=hidDEN></table> +#errors +Line: 1 Col: 43 Unexpected input with type hidden in table context. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| " " +| <input> +| type="hidDEN" + +#data +<!doctype html><table> <input type='hidDEN'></table> +#errors +Line: 1 Col: 45 Unexpected input with type hidden in table context. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| " " +| <input> +| type="hidDEN" + +#data +<!doctype html><table><input type=" hidden"><input type=hidDEN></table> +#errors +Line: 1 Col: 44 Unexpected start tag (input) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <input> +| type=" hidden" +| <table> +| <input> +| type="hidDEN" + +#data +<!doctype html><table><select>X<tr> +#errors +Line: 1 Col: 30 Unexpected start tag (select) in table context caused voodoo mode. +Line: 1 Col: 35 Unexpected table element start tag (trs) in the select in table phase. +Line: 1 Col: 35 Unexpected end of file. Expected table content. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| "X" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><select>X</select> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| "X" + +#data +<!DOCTYPE hTmL><html></html> +#errors +Line: 1 Col: 28 Unexpected end tag (html) after the (implied) root element. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE HTML><html></html> +#errors +Line: 1 Col: 28 Unexpected end tag (html) after the (implied) root element. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> + +#data +<body>X</body></body> +#errors +Line: 1 Col: 21 Unexpected end tag token (body) in the after body phase. +Line: 1 Col: 21 Unexpected EOF in inner html mode. +#document-fragment +html +#document +| <head> +| <body> +| "X" + +#data +<div><p>a</x> b +#errors +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end tag (x). Ignored. +Line: 1 Col: 15 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <div> +| <p> +| "a b" + +#data +<table><tr><td><code></code> </table> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <code> +| " " + +#data +<table><b><tr><td>aaa</td></tr>bbb</table>ccc +#errors +XXX: Fix me +#document +| <html> +| <head> +| <body> +| <b> +| <b> +| "bbb" +| <table> +| <tbody> +| <tr> +| <td> +| "aaa" +| <b> +| "ccc" + +#data +A<table><tr> B</tr> B</table> +#errors +XXX: Fix me +#document +| <html> +| <head> +| <body> +| "A B B" +| <table> +| <tbody> +| <tr> + +#data +A<table><tr> B</tr> </em>C</table> +#errors +XXX: Fix me +#document +| <html> +| <head> +| <body> +| "A BC" +| <table> +| <tbody> +| <tr> +| " " + +#data +<select><keygen> +#errors +Not known +#document +| <html> +| <head> +| <body> +| <select> +| <keygen> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests8.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests8.dat new file mode 100644 index 00000000..90e6c919 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests8.dat @@ -0,0 +1,148 @@ +#data +<div> +<div></div> +</span>x +#errors +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE. +Line: 3 Col: 7 Unexpected end tag (span). Ignored. +Line: 3 Col: 8 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <div> +| " +" +| <div> +| " +x" + +#data +<div>x<div></div> +</span>x +#errors +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE. +Line: 2 Col: 7 Unexpected end tag (span). Ignored. +Line: 2 Col: 8 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <div> +| "x" +| <div> +| " +x" + +#data +<div>x<div></div>x</span>x +#errors +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE. +Line: 1 Col: 25 Unexpected end tag (span). Ignored. +Line: 1 Col: 26 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <div> +| "x" +| <div> +| "xx" + +#data +<div>x<div></div>y</span>z +#errors +Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE. +Line: 1 Col: 25 Unexpected end tag (span). Ignored. +Line: 1 Col: 26 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <div> +| "x" +| <div> +| "yz" + +#data +<table><div>x<div></div>x</span>x +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected start tag (div) in table context caused voodoo mode. +Line: 1 Col: 18 Unexpected start tag (div) in table context caused voodoo mode. +Line: 1 Col: 24 Unexpected end tag (div) in table context caused voodoo mode. +Line: 1 Col: 32 Unexpected end tag (span) in table context caused voodoo mode. +Line: 1 Col: 32 Unexpected end tag (span). Ignored. +Line: 1 Col: 33 Unexpected end of file. Expected table content. +#document +| <html> +| <head> +| <body> +| <div> +| "x" +| <div> +| "xx" +| <table> + +#data +x<table>x +#errors +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +Line: 1 Col: 9 Unexpected non-space characters in table context caused voodoo mode. +Line: 1 Col: 9 Unexpected end of file. Expected table content. +#document +| <html> +| <head> +| <body> +| "xx" +| <table> + +#data +x<table><table>x +#errors +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +Line: 1 Col: 15 Unexpected start tag (table) implies end tag (table). +Line: 1 Col: 16 Unexpected non-space characters in table context caused voodoo mode. +Line: 1 Col: 16 Unexpected end of file. Expected table content. +#document +| <html> +| <head> +| <body> +| "x" +| <table> +| "x" +| <table> + +#data +<b>a<div></div><div></b>y +#errors +Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE. +Line: 1 Col: 24 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm. +Line: 1 Col: 25 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <b> +| "a" +| <div> +| <div> +| <b> +| "y" + +#data +<a><div><p></a> +#errors +Line: 1 Col: 3 Unexpected start tag (a). Expected DOCTYPE. +Line: 1 Col: 15 End tag (a) violates step 1, paragraph 3 of the adoption agency algorithm. +Line: 1 Col: 15 End tag (a) violates step 1, paragraph 3 of the adoption agency algorithm. +Line: 1 Col: 15 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <a> +| <div> +| <a> +| <p> +| <a> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests9.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests9.dat new file mode 100644 index 00000000..554e27ae --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests9.dat @@ -0,0 +1,457 @@ +#data +<!DOCTYPE html><math></math> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> + +#data +<!DOCTYPE html><body><math></math> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> + +#data +<!DOCTYPE html><math><mi> +#errors +25: End of file in a foreign namespace context. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <math mi> + +#data +<!DOCTYPE html><math><annotation-xml><svg><u> +#errors +45: HTML start tag “u” in a foreign namespace context. +45: End of file seen and there were open elements. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <math annotation-xml> +| <svg svg> +| <u> + +#data +<!DOCTYPE html><body><select><math></math></select> +#errors +Line: 1 Col: 35 Unexpected start tag token (math) in the select phase. Ignored. +Line: 1 Col: 42 Unexpected end tag (math) in the select phase. Ignored. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!DOCTYPE html><body><select><option><math></math></option></select> +#errors +Line: 1 Col: 43 Unexpected start tag token (math) in the select phase. Ignored. +Line: 1 Col: 50 Unexpected end tag (math) in the select phase. Ignored. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> + +#data +<!DOCTYPE html><body><table><math></math></table> +#errors +Line: 1 Col: 34 Unexpected start tag (math) in table context caused voodoo mode. +Line: 1 Col: 41 Unexpected end tag (math) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <table> + +#data +<!DOCTYPE html><body><table><math><mi>foo</mi></math></table> +#errors +Line: 1 Col: 34 Unexpected start tag (math) in table context caused voodoo mode. +Line: 1 Col: 46 Unexpected end tag (mi) in table context caused voodoo mode. +Line: 1 Col: 53 Unexpected end tag (math) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <math mi> +| "foo" +| <table> + +#data +<!DOCTYPE html><body><table><math><mi>foo</mi><mi>bar</mi></math></table> +#errors +Line: 1 Col: 34 Unexpected start tag (math) in table context caused voodoo mode. +Line: 1 Col: 46 Unexpected end tag (mi) in table context caused voodoo mode. +Line: 1 Col: 58 Unexpected end tag (mi) in table context caused voodoo mode. +Line: 1 Col: 65 Unexpected end tag (math) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <math mi> +| "foo" +| <math mi> +| "bar" +| <table> + +#data +<!DOCTYPE html><body><table><tbody><math><mi>foo</mi><mi>bar</mi></math></tbody></table> +#errors +Line: 1 Col: 41 Unexpected start tag (math) in table context caused voodoo mode. +Line: 1 Col: 53 Unexpected end tag (mi) in table context caused voodoo mode. +Line: 1 Col: 65 Unexpected end tag (mi) in table context caused voodoo mode. +Line: 1 Col: 72 Unexpected end tag (math) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <math mi> +| "foo" +| <math mi> +| "bar" +| <table> +| <tbody> + +#data +<!DOCTYPE html><body><table><tbody><tr><math><mi>foo</mi><mi>bar</mi></math></tr></tbody></table> +#errors +Line: 1 Col: 45 Unexpected start tag (math) in table context caused voodoo mode. +Line: 1 Col: 57 Unexpected end tag (mi) in table context caused voodoo mode. +Line: 1 Col: 69 Unexpected end tag (mi) in table context caused voodoo mode. +Line: 1 Col: 76 Unexpected end tag (math) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <math mi> +| "foo" +| <math mi> +| "bar" +| <table> +| <tbody> +| <tr> + +#data +<!DOCTYPE html><body><table><tbody><tr><td><math><mi>foo</mi><mi>bar</mi></math></td></tr></tbody></table> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <math math> +| <math mi> +| "foo" +| <math mi> +| "bar" + +#data +<!DOCTYPE html><body><table><tbody><tr><td><math><mi>foo</mi><mi>bar</mi></math><p>baz</td></tr></tbody></table> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <math math> +| <math mi> +| "foo" +| <math mi> +| "bar" +| <p> +| "baz" + +#data +<!DOCTYPE html><body><table><caption><math><mi>foo</mi><mi>bar</mi></math><p>baz</caption></table> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <math math> +| <math mi> +| "foo" +| <math mi> +| "bar" +| <p> +| "baz" + +#data +<!DOCTYPE html><body><table><caption><math><mi>foo</mi><mi>bar</mi><p>baz</table><p>quux +#errors +Line: 1 Col: 70 HTML start tag "p" in a foreign namespace context. +Line: 1 Col: 81 Unexpected end table tag in caption. Generates implied end caption. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <math math> +| <math mi> +| "foo" +| <math mi> +| "bar" +| <p> +| "baz" +| <p> +| "quux" + +#data +<!DOCTYPE html><body><table><caption><math><mi>foo</mi><mi>bar</mi>baz</table><p>quux +#errors +Line: 1 Col: 78 Unexpected end table tag in caption. Generates implied end caption. +Line: 1 Col: 78 Unexpected end tag (caption). Missing end tag (math). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <math math> +| <math mi> +| "foo" +| <math mi> +| "bar" +| "baz" +| <p> +| "quux" + +#data +<!DOCTYPE html><body><table><colgroup><math><mi>foo</mi><mi>bar</mi><p>baz</table><p>quux +#errors +Line: 1 Col: 44 Unexpected start tag (math) in table context caused voodoo mode. +Line: 1 Col: 56 Unexpected end tag (mi) in table context caused voodoo mode. +Line: 1 Col: 68 Unexpected end tag (mi) in table context caused voodoo mode. +Line: 1 Col: 71 HTML start tag "p" in a foreign namespace context. +Line: 1 Col: 71 Unexpected start tag (p) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <math mi> +| "foo" +| <math mi> +| "bar" +| <p> +| "baz" +| <table> +| <colgroup> +| <p> +| "quux" + +#data +<!DOCTYPE html><body><table><tr><td><select><math><mi>foo</mi><mi>bar</mi><p>baz</table><p>quux +#errors +Line: 1 Col: 50 Unexpected start tag token (math) in the select phase. Ignored. +Line: 1 Col: 54 Unexpected start tag token (mi) in the select phase. Ignored. +Line: 1 Col: 62 Unexpected end tag (mi) in the select phase. Ignored. +Line: 1 Col: 66 Unexpected start tag token (mi) in the select phase. Ignored. +Line: 1 Col: 74 Unexpected end tag (mi) in the select phase. Ignored. +Line: 1 Col: 77 Unexpected start tag token (p) in the select phase. Ignored. +Line: 1 Col: 88 Unexpected table element end tag (tables) in the select in table phase. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <select> +| "foobarbaz" +| <p> +| "quux" + +#data +<!DOCTYPE html><body><table><select><math><mi>foo</mi><mi>bar</mi><p>baz</table><p>quux +#errors +Line: 1 Col: 36 Unexpected start tag (select) in table context caused voodoo mode. +Line: 1 Col: 42 Unexpected start tag token (math) in the select phase. Ignored. +Line: 1 Col: 46 Unexpected start tag token (mi) in the select phase. Ignored. +Line: 1 Col: 54 Unexpected end tag (mi) in the select phase. Ignored. +Line: 1 Col: 58 Unexpected start tag token (mi) in the select phase. Ignored. +Line: 1 Col: 66 Unexpected end tag (mi) in the select phase. Ignored. +Line: 1 Col: 69 Unexpected start tag token (p) in the select phase. Ignored. +Line: 1 Col: 80 Unexpected table element end tag (tables) in the select in table phase. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| "foobarbaz" +| <table> +| <p> +| "quux" + +#data +<!DOCTYPE html><body></body></html><math><mi>foo</mi><mi>bar</mi><p>baz +#errors +Line: 1 Col: 41 Unexpected start tag (math). +Line: 1 Col: 68 HTML start tag "p" in a foreign namespace context. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <math mi> +| "foo" +| <math mi> +| "bar" +| <p> +| "baz" + +#data +<!DOCTYPE html><body></body><math><mi>foo</mi><mi>bar</mi><p>baz +#errors +Line: 1 Col: 34 Unexpected start tag token (math) in the after body phase. +Line: 1 Col: 61 HTML start tag "p" in a foreign namespace context. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <math mi> +| "foo" +| <math mi> +| "bar" +| <p> +| "baz" + +#data +<!DOCTYPE html><frameset><math><mi></mi><mi></mi><p><span> +#errors +Line: 1 Col: 31 Unexpected start tag token (math) in the frameset phase. Ignored. +Line: 1 Col: 35 Unexpected start tag token (mi) in the frameset phase. Ignored. +Line: 1 Col: 40 Unexpected end tag token (mi) in the frameset phase. Ignored. +Line: 1 Col: 44 Unexpected start tag token (mi) in the frameset phase. Ignored. +Line: 1 Col: 49 Unexpected end tag token (mi) in the frameset phase. Ignored. +Line: 1 Col: 52 Unexpected start tag token (p) in the frameset phase. Ignored. +Line: 1 Col: 58 Unexpected start tag token (span) in the frameset phase. Ignored. +Line: 1 Col: 58 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!DOCTYPE html><frameset></frameset><math><mi></mi><mi></mi><p><span> +#errors +Line: 1 Col: 42 Unexpected start tag (math) in the after frameset phase. Ignored. +Line: 1 Col: 46 Unexpected start tag (mi) in the after frameset phase. Ignored. +Line: 1 Col: 51 Unexpected end tag (mi) in the after frameset phase. Ignored. +Line: 1 Col: 55 Unexpected start tag (mi) in the after frameset phase. Ignored. +Line: 1 Col: 60 Unexpected end tag (mi) in the after frameset phase. Ignored. +Line: 1 Col: 63 Unexpected start tag (p) in the after frameset phase. Ignored. +Line: 1 Col: 69 Unexpected start tag (span) in the after frameset phase. Ignored. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!DOCTYPE html><body xlink:href=foo><math xlink:href=foo></math> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| xlink:href="foo" +| <math math> +| xlink href="foo" + +#data +<!DOCTYPE html><body xlink:href=foo xml:lang=en><math><mi xml:lang=en xlink:href=foo></mi></math> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| xlink:href="foo" +| xml:lang="en" +| <math math> +| <math mi> +| xlink href="foo" +| xml lang="en" + +#data +<!DOCTYPE html><body xlink:href=foo xml:lang=en><math><mi xml:lang=en xlink:href=foo /></math> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| xlink:href="foo" +| xml:lang="en" +| <math math> +| <math mi> +| xlink href="foo" +| xml lang="en" + +#data +<!DOCTYPE html><body xlink:href=foo xml:lang=en><math><mi xml:lang=en xlink:href=foo />bar</math> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| xlink:href="foo" +| xml:lang="en" +| <math math> +| <math mi> +| xlink href="foo" +| xml lang="en" +| "bar" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests_innerHTML_1.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests_innerHTML_1.dat new file mode 100644 index 00000000..6c78661e --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests_innerHTML_1.dat @@ -0,0 +1,741 @@ +#data +<body><span> +#errors +#document-fragment +body +#document +| <span> + +#data +<span><body> +#errors +#document-fragment +body +#document +| <span> + +#data +<span><body> +#errors +#document-fragment +div +#document +| <span> + +#data +<body><span> +#errors +#document-fragment +html +#document +| <head> +| <body> +| <span> + +#data +<frameset><span> +#errors +#document-fragment +body +#document +| <span> + +#data +<span><frameset> +#errors +#document-fragment +body +#document +| <span> + +#data +<span><frameset> +#errors +#document-fragment +div +#document +| <span> + +#data +<frameset><span> +#errors +#document-fragment +html +#document +| <head> +| <frameset> + +#data +<table><tr> +#errors +#document-fragment +table +#document +| <tbody> +| <tr> + +#data +</table><tr> +#errors +#document-fragment +table +#document +| <tbody> +| <tr> + +#data +<a> +#errors +#document-fragment +table +#document +| <a> + +#data +<a> +#errors +#document-fragment +table +#document +| <a> + +#data +<a><caption>a +#errors +#document-fragment +table +#document +| <a> +| <caption> +| "a" + +#data +<a><colgroup><col> +#errors +#document-fragment +table +#document +| <a> +| <colgroup> +| <col> + +#data +<a><tbody><tr> +#errors +#document-fragment +table +#document +| <a> +| <tbody> +| <tr> + +#data +<a><tfoot><tr> +#errors +#document-fragment +table +#document +| <a> +| <tfoot> +| <tr> + +#data +<a><thead><tr> +#errors +#document-fragment +table +#document +| <a> +| <thead> +| <tr> + +#data +<a><tr> +#errors +#document-fragment +table +#document +| <a> +| <tbody> +| <tr> + +#data +<a><th> +#errors +#document-fragment +table +#document +| <a> +| <tbody> +| <tr> +| <th> + +#data +<a><td> +#errors +#document-fragment +table +#document +| <a> +| <tbody> +| <tr> +| <td> + +#data +<table></table><tbody> +#errors +#document-fragment +caption +#document +| <table> + +#data +</table><span> +#errors +#document-fragment +caption +#document +| <span> + +#data +<span></table> +#errors +#document-fragment +caption +#document +| <span> + +#data +</caption><span> +#errors +#document-fragment +caption +#document +| <span> + +#data +<span></caption><span> +#errors +#document-fragment +caption +#document +| <span> +| <span> + +#data +<span><caption><span> +#errors +#document-fragment +caption +#document +| <span> +| <span> + +#data +<span><col><span> +#errors +#document-fragment +caption +#document +| <span> +| <span> + +#data +<span><colgroup><span> +#errors +#document-fragment +caption +#document +| <span> +| <span> + +#data +<span><html><span> +#errors +#document-fragment +caption +#document +| <span> +| <span> + +#data +<span><tbody><span> +#errors +#document-fragment +caption +#document +| <span> +| <span> + +#data +<span><td><span> +#errors +#document-fragment +caption +#document +| <span> +| <span> + +#data +<span><tfoot><span> +#errors +#document-fragment +caption +#document +| <span> +| <span> + +#data +<span><thead><span> +#errors +#document-fragment +caption +#document +| <span> +| <span> + +#data +<span><th><span> +#errors +#document-fragment +caption +#document +| <span> +| <span> + +#data +<span><tr><span> +#errors +#document-fragment +caption +#document +| <span> +| <span> + +#data +<span></table><span> +#errors +#document-fragment +caption +#document +| <span> +| <span> + +#data +</colgroup><col> +#errors +#document-fragment +colgroup +#document +| <col> + +#data +<a><col> +#errors +#document-fragment +colgroup +#document +| <col> + +#data +<caption><a> +#errors +#document-fragment +tbody +#document +| <a> + +#data +<col><a> +#errors +#document-fragment +tbody +#document +| <a> + +#data +<colgroup><a> +#errors +#document-fragment +tbody +#document +| <a> + +#data +<tbody><a> +#errors +#document-fragment +tbody +#document +| <a> + +#data +<tfoot><a> +#errors +#document-fragment +tbody +#document +| <a> + +#data +<thead><a> +#errors +#document-fragment +tbody +#document +| <a> + +#data +</table><a> +#errors +#document-fragment +tbody +#document +| <a> + +#data +<a><tr> +#errors +#document-fragment +tbody +#document +| <a> +| <tr> + +#data +<a><td> +#errors +#document-fragment +tbody +#document +| <a> +| <tr> +| <td> + +#data +<a><td> +#errors +#document-fragment +tbody +#document +| <a> +| <tr> +| <td> + +#data +<a><td> +#errors +#document-fragment +tbody +#document +| <a> +| <tr> +| <td> + +#data +<td><table><tbody><a><tr> +#errors +#document-fragment +tbody +#document +| <tr> +| <td> +| <a> +| <table> +| <tbody> +| <tr> + +#data +</tr><td> +#errors +#document-fragment +tr +#document +| <td> + +#data +<td><table><a><tr></tr><tr> +#errors +#document-fragment +tr +#document +| <td> +| <a> +| <table> +| <tbody> +| <tr> +| <tr> + +#data +<caption><td> +#errors +#document-fragment +tr +#document +| <td> + +#data +<col><td> +#errors +#document-fragment +tr +#document +| <td> + +#data +<colgroup><td> +#errors +#document-fragment +tr +#document +| <td> + +#data +<tbody><td> +#errors +#document-fragment +tr +#document +| <td> + +#data +<tfoot><td> +#errors +#document-fragment +tr +#document +| <td> + +#data +<thead><td> +#errors +#document-fragment +tr +#document +| <td> + +#data +<tr><td> +#errors +#document-fragment +tr +#document +| <td> + +#data +</table><td> +#errors +#document-fragment +tr +#document +| <td> + +#data +<td><table></table><td> +#errors +#document-fragment +tr +#document +| <td> +| <table> +| <td> + +#data +<td><table></table><td> +#errors +#document-fragment +tr +#document +| <td> +| <table> +| <td> + +#data +<caption><a> +#errors +#document-fragment +td +#document +| <a> + +#data +<col><a> +#errors +#document-fragment +td +#document +| <a> + +#data +<colgroup><a> +#errors +#document-fragment +td +#document +| <a> + +#data +<tbody><a> +#errors +#document-fragment +td +#document +| <a> + +#data +<tfoot><a> +#errors +#document-fragment +td +#document +| <a> + +#data +<th><a> +#errors +#document-fragment +td +#document +| <a> + +#data +<thead><a> +#errors +#document-fragment +td +#document +| <a> + +#data +<tr><a> +#errors +#document-fragment +td +#document +| <a> + +#data +</table><a> +#errors +#document-fragment +td +#document +| <a> + +#data +</tbody><a> +#errors +#document-fragment +td +#document +| <a> + +#data +</td><a> +#errors +#document-fragment +td +#document +| <a> + +#data +</tfoot><a> +#errors +#document-fragment +td +#document +| <a> + +#data +</thead><a> +#errors +#document-fragment +td +#document +| <a> + +#data +</th><a> +#errors +#document-fragment +td +#document +| <a> + +#data +</tr><a> +#errors +#document-fragment +td +#document +| <a> + +#data +<table><td><td> +#errors +#document-fragment +td +#document +| <table> +| <tbody> +| <tr> +| <td> +| <td> + +#data +</select><option> +#errors +#document-fragment +select +#document +| <option> + +#data +<input><option> +#errors +#document-fragment +select +#document +| <option> + +#data +<keygen><option> +#errors +#document-fragment +select +#document +| <option> + +#data +<textarea><option> +#errors +#document-fragment +select +#document +| <option> + +#data +</html><!--abc--> +#errors +#document-fragment +html +#document +| <head> +| <body> +| <!-- abc --> + +#data +</frameset><frame> +#errors +#document-fragment +frameset +#document +| <frame> + +#data +#errors +#document-fragment +html +#document +| <head> +| <body> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tricky01.dat b/vendor/golang.org/x/net/html/testdata/webkit/tricky01.dat new file mode 100644 index 00000000..08419924 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tricky01.dat @@ -0,0 +1,261 @@ +#data +<b><p>Bold </b> Not bold</p> +Also not bold. +#errors +#document +| <html> +| <head> +| <body> +| <b> +| <p> +| <b> +| "Bold " +| " Not bold" +| " +Also not bold." + +#data +<html> +<font color=red><i>Italic and Red<p>Italic and Red </font> Just italic.</p> Italic only.</i> Plain +<p>I should not be red. <font color=red>Red. <i>Italic and red.</p> +<p>Italic and red. </i> Red.</font> I should not be red.</p> +<b>Bold <i>Bold and italic</b> Only Italic </i> Plain +#errors +#document +| <html> +| <head> +| <body> +| <font> +| color="red" +| <i> +| "Italic and Red" +| <i> +| <p> +| <font> +| color="red" +| "Italic and Red " +| " Just italic." +| " Italic only." +| " Plain +" +| <p> +| "I should not be red. " +| <font> +| color="red" +| "Red. " +| <i> +| "Italic and red." +| <font> +| color="red" +| <i> +| " +" +| <p> +| <font> +| color="red" +| <i> +| "Italic and red. " +| " Red." +| " I should not be red." +| " +" +| <b> +| "Bold " +| <i> +| "Bold and italic" +| <i> +| " Only Italic " +| " Plain" + +#data +<html><body> +<p><font size="7">First paragraph.</p> +<p>Second paragraph.</p></font> +<b><p><i>Bold and Italic</b> Italic</p> +#errors +#document +| <html> +| <head> +| <body> +| " +" +| <p> +| <font> +| size="7" +| "First paragraph." +| <font> +| size="7" +| " +" +| <p> +| "Second paragraph." +| " +" +| <b> +| <p> +| <b> +| <i> +| "Bold and Italic" +| <i> +| " Italic" + +#data +<html> +<dl> +<dt><b>Boo +<dd>Goo? +</dl> +</html> +#errors +#document +| <html> +| <head> +| <body> +| <dl> +| " +" +| <dt> +| <b> +| "Boo +" +| <dd> +| <b> +| "Goo? +" +| <b> +| " +" + +#data +<html><body> +<label><a><div>Hello<div>World</div></a></label> +</body></html> +#errors +#document +| <html> +| <head> +| <body> +| " +" +| <label> +| <a> +| <div> +| <a> +| "Hello" +| <div> +| "World" +| " +" + +#data +<table><center> <font>a</center> <img> <tr><td> </td> </tr> </table> +#errors +#document +| <html> +| <head> +| <body> +| <center> +| " " +| <font> +| "a" +| <font> +| <img> +| " " +| <table> +| " " +| <tbody> +| <tr> +| <td> +| " " +| " " +| " " + +#data +<table><tr><p><a><p>You should see this text. +#errors +#document +| <html> +| <head> +| <body> +| <p> +| <a> +| <p> +| <a> +| "You should see this text." +| <table> +| <tbody> +| <tr> + +#data +<TABLE> +<TR> +<CENTER><CENTER><TD></TD></TR><TR> +<FONT> +<TABLE><tr></tr></TABLE> +</P> +<a></font><font></a> +This page contains an insanely badly-nested tag sequence. +#errors +#document +| <html> +| <head> +| <body> +| <center> +| <center> +| <font> +| " +" +| <table> +| " +" +| <tbody> +| <tr> +| " +" +| <td> +| <tr> +| " +" +| <table> +| <tbody> +| <tr> +| <font> +| " +" +| <p> +| " +" +| <a> +| <a> +| <font> +| <font> +| " +This page contains an insanely badly-nested tag sequence." + +#data +<html> +<body> +<b><nobr><div>This text is in a div inside a nobr</nobr>More text that should not be in the nobr, i.e., the +nobr should have closed the div inside it implicitly. </b><pre>A pre tag outside everything else.</pre> +</body> +</html> +#errors +#document +| <html> +| <head> +| <body> +| " +" +| <b> +| <nobr> +| <div> +| <b> +| <nobr> +| "This text is in a div inside a nobr" +| "More text that should not be in the nobr, i.e., the +nobr should have closed the div inside it implicitly. " +| <pre> +| "A pre tag outside everything else." +| " + +" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/webkit01.dat b/vendor/golang.org/x/net/html/testdata/webkit/webkit01.dat new file mode 100644 index 00000000..9d425e99 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/webkit01.dat @@ -0,0 +1,610 @@ +#data +Test +#errors +Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "Test" + +#data +<div></div> +#errors +#document +| <html> +| <head> +| <body> +| <div> + +#data +<div>Test</div> +#errors +#document +| <html> +| <head> +| <body> +| <div> +| "Test" + +#data +<di +#errors +#document +| <html> +| <head> +| <body> + +#data +<div>Hello</div> +<script> +console.log("PASS"); +</script> +<div>Bye</div> +#errors +#document +| <html> +| <head> +| <body> +| <div> +| "Hello" +| " +" +| <script> +| " +console.log("PASS"); +" +| " +" +| <div> +| "Bye" + +#data +<div foo="bar">Hello</div> +#errors +#document +| <html> +| <head> +| <body> +| <div> +| foo="bar" +| "Hello" + +#data +<div>Hello</div> +<script> +console.log("FOO<span>BAR</span>BAZ"); +</script> +<div>Bye</div> +#errors +#document +| <html> +| <head> +| <body> +| <div> +| "Hello" +| " +" +| <script> +| " +console.log("FOO<span>BAR</span>BAZ"); +" +| " +" +| <div> +| "Bye" + +#data +<foo bar="baz"></foo><potato quack="duck"></potato> +#errors +#document +| <html> +| <head> +| <body> +| <foo> +| bar="baz" +| <potato> +| quack="duck" + +#data +<foo bar="baz"><potato quack="duck"></potato></foo> +#errors +#document +| <html> +| <head> +| <body> +| <foo> +| bar="baz" +| <potato> +| quack="duck" + +#data +<foo></foo bar="baz"><potato></potato quack="duck"> +#errors +#document +| <html> +| <head> +| <body> +| <foo> +| <potato> + +#data +</ tttt> +#errors +#document +| <!-- tttt --> +| <html> +| <head> +| <body> + +#data +<div FOO ><img><img></div> +#errors +#document +| <html> +| <head> +| <body> +| <div> +| foo="" +| <img> +| <img> + +#data +<p>Test</p<p>Test2</p> +#errors +#document +| <html> +| <head> +| <body> +| <p> +| "TestTest2" + +#data +<rdar://problem/6869687> +#errors +#document +| <html> +| <head> +| <body> +| <rdar:> +| 6869687="" +| problem="" + +#data +<A>test< /A> +#errors +#document +| <html> +| <head> +| <body> +| <a> +| "test< /A>" + +#data +&lt; +#errors +#document +| <html> +| <head> +| <body> +| "<" + +#data +<body foo='bar'><body foo='baz' yo='mama'> +#errors +#document +| <html> +| <head> +| <body> +| foo="bar" +| yo="mama" + +#data +<body></br foo="bar"></body> +#errors +#document +| <html> +| <head> +| <body> +| <br> + +#data +<bdy><br foo="bar"></body> +#errors +#document +| <html> +| <head> +| <body> +| <bdy> +| <br> +| foo="bar" + +#data +<body></body></br foo="bar"> +#errors +#document +| <html> +| <head> +| <body> +| <br> + +#data +<bdy></body><br foo="bar"> +#errors +#document +| <html> +| <head> +| <body> +| <bdy> +| <br> +| foo="bar" + +#data +<html><body></body></html><!-- Hi there --> +#errors +#document +| <html> +| <head> +| <body> +| <!-- Hi there --> + +#data +<html><body></body></html>x<!-- Hi there --> +#errors +#document +| <html> +| <head> +| <body> +| "x" +| <!-- Hi there --> + +#data +<html><body></body></html>x<!-- Hi there --></html><!-- Again --> +#errors +#document +| <html> +| <head> +| <body> +| "x" +| <!-- Hi there --> +| <!-- Again --> + +#data +<html><body></body></html>x<!-- Hi there --></body></html><!-- Again --> +#errors +#document +| <html> +| <head> +| <body> +| "x" +| <!-- Hi there --> +| <!-- Again --> + +#data +<html><body><ruby><div><rp>xx</rp></div></ruby></body></html> +#errors +#document +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <rp> +| "xx" + +#data +<html><body><ruby><div><rt>xx</rt></div></ruby></body></html> +#errors +#document +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <rt> +| "xx" + +#data +<html><frameset><!--1--><noframes>A</noframes><!--2--></frameset><!--3--><noframes>B</noframes><!--4--></html><!--5--><noframes>C</noframes><!--6--> +#errors +#document +| <html> +| <head> +| <frameset> +| <!-- 1 --> +| <noframes> +| "A" +| <!-- 2 --> +| <!-- 3 --> +| <noframes> +| "B" +| <!-- 4 --> +| <noframes> +| "C" +| <!-- 5 --> +| <!-- 6 --> + +#data +<select><option>A<select><option>B<select><option>C<select><option>D<select><option>E<select><option>F<select><option>G<select> +#errors +#document +| <html> +| <head> +| <body> +| <select> +| <option> +| "A" +| <option> +| "B" +| <select> +| <option> +| "C" +| <option> +| "D" +| <select> +| <option> +| "E" +| <option> +| "F" +| <select> +| <option> +| "G" + +#data +<dd><dd><dt><dt><dd><li><li> +#errors +#document +| <html> +| <head> +| <body> +| <dd> +| <dd> +| <dt> +| <dt> +| <dd> +| <li> +| <li> + +#data +<div><b></div><div><nobr>a<nobr> +#errors +#document +| <html> +| <head> +| <body> +| <div> +| <b> +| <div> +| <b> +| <nobr> +| "a" +| <nobr> + +#data +<head></head> +<body></body> +#errors +#document +| <html> +| <head> +| " +" +| <body> + +#data +<head></head> <style></style>ddd +#errors +#document +| <html> +| <head> +| <style> +| " " +| <body> +| "ddd" + +#data +<kbd><table></kbd><col><select><tr> +#errors +#document +| <html> +| <head> +| <body> +| <kbd> +| <select> +| <table> +| <colgroup> +| <col> +| <tbody> +| <tr> + +#data +<kbd><table></kbd><col><select><tr></table><div> +#errors +#document +| <html> +| <head> +| <body> +| <kbd> +| <select> +| <table> +| <colgroup> +| <col> +| <tbody> +| <tr> +| <div> + +#data +<a><li><style></style><title></title></a> +#errors +#document +| <html> +| <head> +| <body> +| <a> +| <li> +| <a> +| <style> +| <title> + +#data +<font></p><p><meta><title></title></font> +#errors +#document +| <html> +| <head> +| <body> +| <font> +| <p> +| <p> +| <font> +| <meta> +| <title> + +#data +<a><center><title></title><a> +#errors +#document +| <html> +| <head> +| <body> +| <a> +| <center> +| <a> +| <title> +| <a> + +#data +<svg><title><div> +#errors +#document +| <html> +| <head> +| <body> +| <svg svg> +| <svg title> +| <div> + +#data +<svg><title><rect><div> +#errors +#document +| <html> +| <head> +| <body> +| <svg svg> +| <svg title> +| <rect> +| <div> + +#data +<svg><title><svg><div> +#errors +#document +| <html> +| <head> +| <body> +| <svg svg> +| <svg title> +| <svg svg> +| <div> + +#data +<img <="" FAIL> +#errors +#document +| <html> +| <head> +| <body> +| <img> +| <="" +| fail="" + +#data +<ul><li><div id='foo'/>A</li><li>B<div>C</div></li></ul> +#errors +#document +| <html> +| <head> +| <body> +| <ul> +| <li> +| <div> +| id="foo" +| "A" +| <li> +| "B" +| <div> +| "C" + +#data +<svg><em><desc></em> +#errors +#document +| <html> +| <head> +| <body> +| <svg svg> +| <em> +| <desc> + +#data +<table><tr><td><svg><desc><td></desc><circle> +#errors +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <svg svg> +| <svg desc> +| <td> +| <circle> + +#data +<svg><tfoot></mi><td> +#errors +#document +| <html> +| <head> +| <body> +| <svg svg> +| <svg tfoot> +| <svg td> + +#data +<math><mrow><mrow><mn>1</mn></mrow><mi>a</mi></mrow></math> +#errors +#document +| <html> +| <head> +| <body> +| <math math> +| <math mrow> +| <math mrow> +| <math mn> +| "1" +| <math mi> +| "a" + +#data +<!doctype html><input type="hidden"><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><input type="button"><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <input> +| type="button" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/webkit02.dat b/vendor/golang.org/x/net/html/testdata/webkit/webkit02.dat new file mode 100644 index 00000000..905783d3 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/webkit02.dat @@ -0,0 +1,159 @@ +#data +<foo bar=qux/> +#errors +#document +| <html> +| <head> +| <body> +| <foo> +| bar="qux/" + +#data +<p id="status"><noscript><strong>A</strong></noscript><span>B</span></p> +#errors +#document +| <html> +| <head> +| <body> +| <p> +| id="status" +| <noscript> +| "<strong>A</strong>" +| <span> +| "B" + +#data +<div><sarcasm><div></div></sarcasm></div> +#errors +#document +| <html> +| <head> +| <body> +| <div> +| <sarcasm> +| <div> + +#data +<html><body><img src="" border="0" alt="><div>A</div></body></html> +#errors +#document +| <html> +| <head> +| <body> + +#data +<table><td></tbody>A +#errors +#document +| <html> +| <head> +| <body> +| "A" +| <table> +| <tbody> +| <tr> +| <td> + +#data +<table><td></thead>A +#errors +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| "A" + +#data +<table><td></tfoot>A +#errors +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| "A" + +#data +<table><thead><td></tbody>A +#errors +#document +| <html> +| <head> +| <body> +| <table> +| <thead> +| <tr> +| <td> +| "A" + +#data +<legend>test</legend> +#errors +#document +| <html> +| <head> +| <body> +| <legend> +| "test" + +#data +<table><input> +#errors +#document +| <html> +| <head> +| <body> +| <input> +| <table> + +#data +<b><em><dcell><postfield><postfield><postfield><postfield><missing_glyph><missing_glyph><missing_glyph><missing_glyph><hkern><aside></b></em> +#errors +#document-fragment +div +#document +| <b> +| <em> +| <dcell> +| <postfield> +| <postfield> +| <postfield> +| <postfield> +| <missing_glyph> +| <missing_glyph> +| <missing_glyph> +| <missing_glyph> +| <hkern> +| <aside> +| <em> +| <b> + +#data +<isindex action="x"> +#errors +#document-fragment +table +#document +| <form> +| action="x" +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| <hr> + +#data +<option><XH<optgroup></optgroup> +#errors +#document-fragment +select +#document +| <option> diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go new file mode 100644 index 00000000..893e272a --- /dev/null +++ b/vendor/golang.org/x/net/html/token.go @@ -0,0 +1,1219 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "errors" + "io" + "strconv" + "strings" + + "golang.org/x/net/html/atom" +) + +// A TokenType is the type of a Token. +type TokenType uint32 + +const ( + // ErrorToken means that an error occurred during tokenization. + ErrorToken TokenType = iota + // TextToken means a text node. + TextToken + // A StartTagToken looks like <a>. + StartTagToken + // An EndTagToken looks like </a>. + EndTagToken + // A SelfClosingTagToken tag looks like <br/>. + SelfClosingTagToken + // A CommentToken looks like <!--x-->. + CommentToken + // A DoctypeToken looks like <!DOCTYPE x> + DoctypeToken +) + +// ErrBufferExceeded means that the buffering limit was exceeded. +var ErrBufferExceeded = errors.New("max buffer exceeded") + +// String returns a string representation of the TokenType. +func (t TokenType) String() string { + switch t { + case ErrorToken: + return "Error" + case TextToken: + return "Text" + case StartTagToken: + return "StartTag" + case EndTagToken: + return "EndTag" + case SelfClosingTagToken: + return "SelfClosingTag" + case CommentToken: + return "Comment" + case DoctypeToken: + return "Doctype" + } + return "Invalid(" + strconv.Itoa(int(t)) + ")" +} + +// An Attribute is an attribute namespace-key-value triple. Namespace is +// non-empty for foreign attributes like xlink, Key is alphabetic (and hence +// does not contain escapable characters like '&', '<' or '>'), and Val is +// unescaped (it looks like "a<b" rather than "a&lt;b"). +// +// Namespace is only used by the parser, not the tokenizer. +type Attribute struct { + Namespace, Key, Val string +} + +// A Token consists of a TokenType and some Data (tag name for start and end +// tags, content for text, comments and doctypes). A tag Token may also contain +// a slice of Attributes. Data is unescaped for all Tokens (it looks like "a<b" +// rather than "a&lt;b"). For tag Tokens, DataAtom is the atom for Data, or +// zero if Data is not a known tag name. +type Token struct { + Type TokenType + DataAtom atom.Atom + Data string + Attr []Attribute +} + +// tagString returns a string representation of a tag Token's Data and Attr. +func (t Token) tagString() string { + if len(t.Attr) == 0 { + return t.Data + } + buf := bytes.NewBufferString(t.Data) + for _, a := range t.Attr { + buf.WriteByte(' ') + buf.WriteString(a.Key) + buf.WriteString(`="`) + escape(buf, a.Val) + buf.WriteByte('"') + } + return buf.String() +} + +// String returns a string representation of the Token. +func (t Token) String() string { + switch t.Type { + case ErrorToken: + return "" + case TextToken: + return EscapeString(t.Data) + case StartTagToken: + return "<" + t.tagString() + ">" + case EndTagToken: + return "</" + t.tagString() + ">" + case SelfClosingTagToken: + return "<" + t.tagString() + "/>" + case CommentToken: + return "<!--" + t.Data + "-->" + case DoctypeToken: + return "<!DOCTYPE " + t.Data + ">" + } + return "Invalid(" + strconv.Itoa(int(t.Type)) + ")" +} + +// span is a range of bytes in a Tokenizer's buffer. The start is inclusive, +// the end is exclusive. +type span struct { + start, end int +} + +// A Tokenizer returns a stream of HTML Tokens. +type Tokenizer struct { + // r is the source of the HTML text. + r io.Reader + // tt is the TokenType of the current token. + tt TokenType + // err is the first error encountered during tokenization. It is possible + // for tt != Error && err != nil to hold: this means that Next returned a + // valid token but the subsequent Next call will return an error token. + // For example, if the HTML text input was just "plain", then the first + // Next call would set z.err to io.EOF but return a TextToken, and all + // subsequent Next calls would return an ErrorToken. + // err is never reset. Once it becomes non-nil, it stays non-nil. + err error + // readErr is the error returned by the io.Reader r. It is separate from + // err because it is valid for an io.Reader to return (n int, err1 error) + // such that n > 0 && err1 != nil, and callers should always process the + // n > 0 bytes before considering the error err1. + readErr error + // buf[raw.start:raw.end] holds the raw bytes of the current token. + // buf[raw.end:] is buffered input that will yield future tokens. + raw span + buf []byte + // maxBuf limits the data buffered in buf. A value of 0 means unlimited. + maxBuf int + // buf[data.start:data.end] holds the raw bytes of the current token's data: + // a text token's text, a tag token's tag name, etc. + data span + // pendingAttr is the attribute key and value currently being tokenized. + // When complete, pendingAttr is pushed onto attr. nAttrReturned is + // incremented on each call to TagAttr. + pendingAttr [2]span + attr [][2]span + nAttrReturned int + // rawTag is the "script" in "</script>" that closes the next token. If + // non-empty, the subsequent call to Next will return a raw or RCDATA text + // token: one that treats "<p>" as text instead of an element. + // rawTag's contents are lower-cased. + rawTag string + // textIsRaw is whether the current text token's data is not escaped. + textIsRaw bool + // convertNUL is whether NUL bytes in the current token's data should + // be converted into \ufffd replacement characters. + convertNUL bool + // allowCDATA is whether CDATA sections are allowed in the current context. + allowCDATA bool +} + +// AllowCDATA sets whether or not the tokenizer recognizes <![CDATA[foo]]> as +// the text "foo". The default value is false, which means to recognize it as +// a bogus comment "<!-- [CDATA[foo]] -->" instead. +// +// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and +// only if tokenizing foreign content, such as MathML and SVG. However, +// tracking foreign-contentness is difficult to do purely in the tokenizer, +// as opposed to the parser, due to HTML integration points: an <svg> element +// can contain a <foreignObject> that is foreign-to-SVG but not foreign-to- +// HTML. For strict compliance with the HTML5 tokenization algorithm, it is the +// responsibility of the user of a tokenizer to call AllowCDATA as appropriate. +// In practice, if using the tokenizer without caring whether MathML or SVG +// CDATA is text or comments, such as tokenizing HTML to find all the anchor +// text, it is acceptable to ignore this responsibility. +func (z *Tokenizer) AllowCDATA(allowCDATA bool) { + z.allowCDATA = allowCDATA +} + +// NextIsNotRawText instructs the tokenizer that the next token should not be +// considered as 'raw text'. Some elements, such as script and title elements, +// normally require the next token after the opening tag to be 'raw text' that +// has no child elements. For example, tokenizing "<title>a<b>c</b>d</title>" +// yields a start tag token for "<title>", a text token for "a<b>c</b>d", and +// an end tag token for "</title>". There are no distinct start tag or end tag +// tokens for the "<b>" and "</b>". +// +// This tokenizer implementation will generally look for raw text at the right +// times. Strictly speaking, an HTML5 compliant tokenizer should not look for +// raw text if in foreign content: <title> generally needs raw text, but a +// <title> inside an <svg> does not. Another example is that a <textarea> +// generally needs raw text, but a <textarea> is not allowed as an immediate +// child of a <select>; in normal parsing, a <textarea> implies </select>, but +// one cannot close the implicit element when parsing a <select>'s InnerHTML. +// Similarly to AllowCDATA, tracking the correct moment to override raw-text- +// ness is difficult to do purely in the tokenizer, as opposed to the parser. +// For strict compliance with the HTML5 tokenization algorithm, it is the +// responsibility of the user of a tokenizer to call NextIsNotRawText as +// appropriate. In practice, like AllowCDATA, it is acceptable to ignore this +// responsibility for basic usage. +// +// Note that this 'raw text' concept is different from the one offered by the +// Tokenizer.Raw method. +func (z *Tokenizer) NextIsNotRawText() { + z.rawTag = "" +} + +// Err returns the error associated with the most recent ErrorToken token. +// This is typically io.EOF, meaning the end of tokenization. +func (z *Tokenizer) Err() error { + if z.tt != ErrorToken { + return nil + } + return z.err +} + +// readByte returns the next byte from the input stream, doing a buffered read +// from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte +// slice that holds all the bytes read so far for the current token. +// It sets z.err if the underlying reader returns an error. +// Pre-condition: z.err == nil. +func (z *Tokenizer) readByte() byte { + if z.raw.end >= len(z.buf) { + // Our buffer is exhausted and we have to read from z.r. Check if the + // previous read resulted in an error. + if z.readErr != nil { + z.err = z.readErr + return 0 + } + // We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length + // z.raw.end - z.raw.start is more than half the capacity of z.buf, then we + // allocate a new buffer before the copy. + c := cap(z.buf) + d := z.raw.end - z.raw.start + var buf1 []byte + if 2*d > c { + buf1 = make([]byte, d, 2*c) + } else { + buf1 = z.buf[:d] + } + copy(buf1, z.buf[z.raw.start:z.raw.end]) + if x := z.raw.start; x != 0 { + // Adjust the data/attr spans to refer to the same contents after the copy. + z.data.start -= x + z.data.end -= x + z.pendingAttr[0].start -= x + z.pendingAttr[0].end -= x + z.pendingAttr[1].start -= x + z.pendingAttr[1].end -= x + for i := range z.attr { + z.attr[i][0].start -= x + z.attr[i][0].end -= x + z.attr[i][1].start -= x + z.attr[i][1].end -= x + } + } + z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d] + // Now that we have copied the live bytes to the start of the buffer, + // we read from z.r into the remainder. + var n int + n, z.readErr = readAtLeastOneByte(z.r, buf1[d:cap(buf1)]) + if n == 0 { + z.err = z.readErr + return 0 + } + z.buf = buf1[:d+n] + } + x := z.buf[z.raw.end] + z.raw.end++ + if z.maxBuf > 0 && z.raw.end-z.raw.start >= z.maxBuf { + z.err = ErrBufferExceeded + return 0 + } + return x +} + +// Buffered returns a slice containing data buffered but not yet tokenized. +func (z *Tokenizer) Buffered() []byte { + return z.buf[z.raw.end:] +} + +// readAtLeastOneByte wraps an io.Reader so that reading cannot return (0, nil). +// It returns io.ErrNoProgress if the underlying r.Read method returns (0, nil) +// too many times in succession. +func readAtLeastOneByte(r io.Reader, b []byte) (int, error) { + for i := 0; i < 100; i++ { + n, err := r.Read(b) + if n != 0 || err != nil { + return n, err + } + } + return 0, io.ErrNoProgress +} + +// skipWhiteSpace skips past any white space. +func (z *Tokenizer) skipWhiteSpace() { + if z.err != nil { + return + } + for { + c := z.readByte() + if z.err != nil { + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + // No-op. + default: + z.raw.end-- + return + } + } +} + +// readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and +// is typically something like "script" or "textarea". +func (z *Tokenizer) readRawOrRCDATA() { + if z.rawTag == "script" { + z.readScript() + z.textIsRaw = true + z.rawTag = "" + return + } +loop: + for { + c := z.readByte() + if z.err != nil { + break loop + } + if c != '<' { + continue loop + } + c = z.readByte() + if z.err != nil { + break loop + } + if c != '/' { + continue loop + } + if z.readRawEndTag() || z.err != nil { + break loop + } + } + z.data.end = z.raw.end + // A textarea's or title's RCDATA can contain escaped entities. + z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title" + z.rawTag = "" +} + +// readRawEndTag attempts to read a tag like "</foo>", where "foo" is z.rawTag. +// If it succeeds, it backs up the input position to reconsume the tag and +// returns true. Otherwise it returns false. The opening "</" has already been +// consumed. +func (z *Tokenizer) readRawEndTag() bool { + for i := 0; i < len(z.rawTag); i++ { + c := z.readByte() + if z.err != nil { + return false + } + if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') { + z.raw.end-- + return false + } + } + c := z.readByte() + if z.err != nil { + return false + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // The 3 is 2 for the leading "</" plus 1 for the trailing character c. + z.raw.end -= 3 + len(z.rawTag) + return true + } + z.raw.end-- + return false +} + +// readScript reads until the next </script> tag, following the byzantine +// rules for escaping/hiding the closing tag. +func (z *Tokenizer) readScript() { + defer func() { + z.data.end = z.raw.end + }() + var c byte + +scriptData: + c = z.readByte() + if z.err != nil { + return + } + if c == '<' { + goto scriptDataLessThanSign + } + goto scriptData + +scriptDataLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '/': + goto scriptDataEndTagOpen + case '!': + goto scriptDataEscapeStart + } + z.raw.end-- + goto scriptData + +scriptDataEndTagOpen: + if z.readRawEndTag() || z.err != nil { + return + } + goto scriptData + +scriptDataEscapeStart: + c = z.readByte() + if z.err != nil { + return + } + if c == '-' { + goto scriptDataEscapeStartDash + } + z.raw.end-- + goto scriptData + +scriptDataEscapeStartDash: + c = z.readByte() + if z.err != nil { + return + } + if c == '-' { + goto scriptDataEscapedDashDash + } + z.raw.end-- + goto scriptData + +scriptDataEscaped: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDash + case '<': + goto scriptDataEscapedLessThanSign + } + goto scriptDataEscaped + +scriptDataEscapedDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDashDash + case '<': + goto scriptDataEscapedLessThanSign + } + goto scriptDataEscaped + +scriptDataEscapedDashDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDashDash + case '<': + goto scriptDataEscapedLessThanSign + case '>': + goto scriptData + } + goto scriptDataEscaped + +scriptDataEscapedLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + if c == '/' { + goto scriptDataEscapedEndTagOpen + } + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { + goto scriptDataDoubleEscapeStart + } + z.raw.end-- + goto scriptData + +scriptDataEscapedEndTagOpen: + if z.readRawEndTag() || z.err != nil { + return + } + goto scriptDataEscaped + +scriptDataDoubleEscapeStart: + z.raw.end-- + for i := 0; i < len("script"); i++ { + c = z.readByte() + if z.err != nil { + return + } + if c != "script"[i] && c != "SCRIPT"[i] { + z.raw.end-- + goto scriptDataEscaped + } + } + c = z.readByte() + if z.err != nil { + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/', '>': + goto scriptDataDoubleEscaped + } + z.raw.end-- + goto scriptDataEscaped + +scriptDataDoubleEscaped: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDashDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedDashDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDashDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + case '>': + goto scriptData + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + if c == '/' { + goto scriptDataDoubleEscapeEnd + } + z.raw.end-- + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapeEnd: + if z.readRawEndTag() { + z.raw.end += len("</script>") + goto scriptDataEscaped + } + if z.err != nil { + return + } + goto scriptDataDoubleEscaped +} + +// readComment reads the next comment token starting with "<!--". The opening +// "<!--" has already been consumed. +func (z *Tokenizer) readComment() { + z.data.start = z.raw.end + defer func() { + if z.data.end < z.data.start { + // It's a comment with no data, like <!-->. + z.data.end = z.data.start + } + }() + for dashCount := 2; ; { + c := z.readByte() + if z.err != nil { + // Ignore up to two dashes at EOF. + if dashCount > 2 { + dashCount = 2 + } + z.data.end = z.raw.end - dashCount + return + } + switch c { + case '-': + dashCount++ + continue + case '>': + if dashCount >= 2 { + z.data.end = z.raw.end - len("-->") + return + } + case '!': + if dashCount >= 2 { + c = z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + if c == '>' { + z.data.end = z.raw.end - len("--!>") + return + } + } + } + dashCount = 0 + } +} + +// readUntilCloseAngle reads until the next ">". +func (z *Tokenizer) readUntilCloseAngle() { + z.data.start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + if c == '>' { + z.data.end = z.raw.end - len(">") + return + } + } +} + +// readMarkupDeclaration reads the next token starting with "<!". It might be +// a "<!--comment-->", a "<!DOCTYPE foo>", a "<![CDATA[section]]>" or +// "<!a bogus comment". The opening "<!" has already been consumed. +func (z *Tokenizer) readMarkupDeclaration() TokenType { + z.data.start = z.raw.end + var c [2]byte + for i := 0; i < 2; i++ { + c[i] = z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return CommentToken + } + } + if c[0] == '-' && c[1] == '-' { + z.readComment() + return CommentToken + } + z.raw.end -= 2 + if z.readDoctype() { + return DoctypeToken + } + if z.allowCDATA && z.readCDATA() { + z.convertNUL = true + return TextToken + } + // It's a bogus comment. + z.readUntilCloseAngle() + return CommentToken +} + +// readDoctype attempts to read a doctype declaration and returns true if +// successful. The opening "<!" has already been consumed. +func (z *Tokenizer) readDoctype() bool { + const s = "DOCTYPE" + for i := 0; i < len(s); i++ { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return false + } + if c != s[i] && c != s[i]+('a'-'A') { + // Back up to read the fragment of "DOCTYPE" again. + z.raw.end = z.data.start + return false + } + } + if z.skipWhiteSpace(); z.err != nil { + z.data.start = z.raw.end + z.data.end = z.raw.end + return true + } + z.readUntilCloseAngle() + return true +} + +// readCDATA attempts to read a CDATA section and returns true if +// successful. The opening "<!" has already been consumed. +func (z *Tokenizer) readCDATA() bool { + const s = "[CDATA[" + for i := 0; i < len(s); i++ { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return false + } + if c != s[i] { + // Back up to read the fragment of "[CDATA[" again. + z.raw.end = z.data.start + return false + } + } + z.data.start = z.raw.end + brackets := 0 + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return true + } + switch c { + case ']': + brackets++ + case '>': + if brackets >= 2 { + z.data.end = z.raw.end - len("]]>") + return true + } + brackets = 0 + default: + brackets = 0 + } + } +} + +// startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end] +// case-insensitively matches any element of ss. +func (z *Tokenizer) startTagIn(ss ...string) bool { +loop: + for _, s := range ss { + if z.data.end-z.data.start != len(s) { + continue loop + } + for i := 0; i < len(s); i++ { + c := z.buf[z.data.start+i] + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + if c != s[i] { + continue loop + } + } + return true + } + return false +} + +// readStartTag reads the next start tag token. The opening "<a" has already +// been consumed, where 'a' means anything in [A-Za-z]. +func (z *Tokenizer) readStartTag() TokenType { + z.readTag(true) + if z.err != nil { + return ErrorToken + } + // Several tags flag the tokenizer's next token as raw. + c, raw := z.buf[z.data.start], false + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + switch c { + case 'i': + raw = z.startTagIn("iframe") + case 'n': + raw = z.startTagIn("noembed", "noframes", "noscript") + case 'p': + raw = z.startTagIn("plaintext") + case 's': + raw = z.startTagIn("script", "style") + case 't': + raw = z.startTagIn("textarea", "title") + case 'x': + raw = z.startTagIn("xmp") + } + if raw { + z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end])) + } + // Look for a self-closing token like "<br/>". + if z.err == nil && z.buf[z.raw.end-2] == '/' { + return SelfClosingTagToken + } + return StartTagToken +} + +// readTag reads the next tag token and its attributes. If saveAttr, those +// attributes are saved in z.attr, otherwise z.attr is set to an empty slice. +// The opening "<a" or "</a" has already been consumed, where 'a' means anything +// in [A-Za-z]. +func (z *Tokenizer) readTag(saveAttr bool) { + z.attr = z.attr[:0] + z.nAttrReturned = 0 + // Read the tag name and attribute key/value pairs. + z.readTagName() + if z.skipWhiteSpace(); z.err != nil { + return + } + for { + c := z.readByte() + if z.err != nil || c == '>' { + break + } + z.raw.end-- + z.readTagAttrKey() + z.readTagAttrVal() + // Save pendingAttr if saveAttr and that attribute has a non-empty key. + if saveAttr && z.pendingAttr[0].start != z.pendingAttr[0].end { + z.attr = append(z.attr, z.pendingAttr) + } + if z.skipWhiteSpace(); z.err != nil { + break + } + } +} + +// readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end) +// is positioned such that the first byte of the tag name (the "d" in "<div") +// has already been consumed. +func (z *Tokenizer) readTagName() { + z.data.start = z.raw.end - 1 + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + z.data.end = z.raw.end - 1 + return + case '/', '>': + z.raw.end-- + z.data.end = z.raw.end + return + } + } +} + +// readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>". +// Precondition: z.err == nil. +func (z *Tokenizer) readTagAttrKey() { + z.pendingAttr[0].start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[0].end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/': + z.pendingAttr[0].end = z.raw.end - 1 + return + case '=', '>': + z.raw.end-- + z.pendingAttr[0].end = z.raw.end + return + } + } +} + +// readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>". +func (z *Tokenizer) readTagAttrVal() { + z.pendingAttr[1].start = z.raw.end + z.pendingAttr[1].end = z.raw.end + if z.skipWhiteSpace(); z.err != nil { + return + } + c := z.readByte() + if z.err != nil { + return + } + if c != '=' { + z.raw.end-- + return + } + if z.skipWhiteSpace(); z.err != nil { + return + } + quote := z.readByte() + if z.err != nil { + return + } + switch quote { + case '>': + z.raw.end-- + return + + case '\'', '"': + z.pendingAttr[1].start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[1].end = z.raw.end + return + } + if c == quote { + z.pendingAttr[1].end = z.raw.end - 1 + return + } + } + + default: + z.pendingAttr[1].start = z.raw.end - 1 + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[1].end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + z.pendingAttr[1].end = z.raw.end - 1 + return + case '>': + z.raw.end-- + z.pendingAttr[1].end = z.raw.end + return + } + } + } +} + +// Next scans the next token and returns its type. +func (z *Tokenizer) Next() TokenType { + z.raw.start = z.raw.end + z.data.start = z.raw.end + z.data.end = z.raw.end + if z.err != nil { + z.tt = ErrorToken + return z.tt + } + if z.rawTag != "" { + if z.rawTag == "plaintext" { + // Read everything up to EOF. + for z.err == nil { + z.readByte() + } + z.data.end = z.raw.end + z.textIsRaw = true + } else { + z.readRawOrRCDATA() + } + if z.data.end > z.data.start { + z.tt = TextToken + z.convertNUL = true + return z.tt + } + } + z.textIsRaw = false + z.convertNUL = false + +loop: + for { + c := z.readByte() + if z.err != nil { + break loop + } + if c != '<' { + continue loop + } + + // Check if the '<' we have just read is part of a tag, comment + // or doctype. If not, it's part of the accumulated text token. + c = z.readByte() + if z.err != nil { + break loop + } + var tokenType TokenType + switch { + case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z': + tokenType = StartTagToken + case c == '/': + tokenType = EndTagToken + case c == '!' || c == '?': + // We use CommentToken to mean any of "<!--actual comments-->", + // "<!DOCTYPE declarations>" and "<?xml processing instructions?>". + tokenType = CommentToken + default: + // Reconsume the current character. + z.raw.end-- + continue + } + + // We have a non-text token, but we might have accumulated some text + // before that. If so, we return the text first, and return the non- + // text token on the subsequent call to Next. + if x := z.raw.end - len("<a"); z.raw.start < x { + z.raw.end = x + z.data.end = x + z.tt = TextToken + return z.tt + } + switch tokenType { + case StartTagToken: + z.tt = z.readStartTag() + return z.tt + case EndTagToken: + c = z.readByte() + if z.err != nil { + break loop + } + if c == '>' { + // "</>" does not generate a token at all. Generate an empty comment + // to allow passthrough clients to pick up the data using Raw. + // Reset the tokenizer state and start again. + z.tt = CommentToken + return z.tt + } + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { + z.readTag(false) + if z.err != nil { + z.tt = ErrorToken + } else { + z.tt = EndTagToken + } + return z.tt + } + z.raw.end-- + z.readUntilCloseAngle() + z.tt = CommentToken + return z.tt + case CommentToken: + if c == '!' { + z.tt = z.readMarkupDeclaration() + return z.tt + } + z.raw.end-- + z.readUntilCloseAngle() + z.tt = CommentToken + return z.tt + } + } + if z.raw.start < z.raw.end { + z.data.end = z.raw.end + z.tt = TextToken + return z.tt + } + z.tt = ErrorToken + return z.tt +} + +// Raw returns the unmodified text of the current token. Calling Next, Token, +// Text, TagName or TagAttr may change the contents of the returned slice. +func (z *Tokenizer) Raw() []byte { + return z.buf[z.raw.start:z.raw.end] +} + +// convertNewlines converts "\r" and "\r\n" in s to "\n". +// The conversion happens in place, but the resulting slice may be shorter. +func convertNewlines(s []byte) []byte { + for i, c := range s { + if c != '\r' { + continue + } + + src := i + 1 + if src >= len(s) || s[src] != '\n' { + s[i] = '\n' + continue + } + + dst := i + for src < len(s) { + if s[src] == '\r' { + if src+1 < len(s) && s[src+1] == '\n' { + src++ + } + s[dst] = '\n' + } else { + s[dst] = s[src] + } + src++ + dst++ + } + return s[:dst] + } + return s +} + +var ( + nul = []byte("\x00") + replacement = []byte("\ufffd") +) + +// Text returns the unescaped text of a text, comment or doctype token. The +// contents of the returned slice may change on the next call to Next. +func (z *Tokenizer) Text() []byte { + switch z.tt { + case TextToken, CommentToken, DoctypeToken: + s := z.buf[z.data.start:z.data.end] + z.data.start = z.raw.end + z.data.end = z.raw.end + s = convertNewlines(s) + if (z.convertNUL || z.tt == CommentToken) && bytes.Contains(s, nul) { + s = bytes.Replace(s, nul, replacement, -1) + } + if !z.textIsRaw { + s = unescape(s, false) + } + return s + } + return nil +} + +// TagName returns the lower-cased name of a tag token (the `img` out of +// `<IMG SRC="foo">`) and whether the tag has attributes. +// The contents of the returned slice may change on the next call to Next. +func (z *Tokenizer) TagName() (name []byte, hasAttr bool) { + if z.data.start < z.data.end { + switch z.tt { + case StartTagToken, EndTagToken, SelfClosingTagToken: + s := z.buf[z.data.start:z.data.end] + z.data.start = z.raw.end + z.data.end = z.raw.end + return lower(s), z.nAttrReturned < len(z.attr) + } + } + return nil, false +} + +// TagAttr returns the lower-cased key and unescaped value of the next unparsed +// attribute for the current tag token and whether there are more attributes. +// The contents of the returned slices may change on the next call to Next. +func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) { + if z.nAttrReturned < len(z.attr) { + switch z.tt { + case StartTagToken, SelfClosingTagToken: + x := z.attr[z.nAttrReturned] + z.nAttrReturned++ + key = z.buf[x[0].start:x[0].end] + val = z.buf[x[1].start:x[1].end] + return lower(key), unescape(convertNewlines(val), true), z.nAttrReturned < len(z.attr) + } + } + return nil, nil, false +} + +// Token returns the next Token. The result's Data and Attr values remain valid +// after subsequent Next calls. +func (z *Tokenizer) Token() Token { + t := Token{Type: z.tt} + switch z.tt { + case TextToken, CommentToken, DoctypeToken: + t.Data = string(z.Text()) + case StartTagToken, SelfClosingTagToken, EndTagToken: + name, moreAttr := z.TagName() + for moreAttr { + var key, val []byte + key, val, moreAttr = z.TagAttr() + t.Attr = append(t.Attr, Attribute{"", atom.String(key), string(val)}) + } + if a := atom.Lookup(name); a != 0 { + t.DataAtom, t.Data = a, a.String() + } else { + t.DataAtom, t.Data = 0, string(name) + } + } + return t +} + +// SetMaxBuf sets a limit on the amount of data buffered during tokenization. +// A value of 0 means unlimited. +func (z *Tokenizer) SetMaxBuf(n int) { + z.maxBuf = n +} + +// NewTokenizer returns a new HTML Tokenizer for the given Reader. +// The input is assumed to be UTF-8 encoded. +func NewTokenizer(r io.Reader) *Tokenizer { + return NewTokenizerFragment(r, "") +} + +// NewTokenizerFragment returns a new HTML Tokenizer for the given Reader, for +// tokenizing an existing element's InnerHTML fragment. contextTag is that +// element's tag, such as "div" or "iframe". +// +// For example, how the InnerHTML "a<b" is tokenized depends on whether it is +// for a <p> tag or a <script> tag. +// +// The input is assumed to be UTF-8 encoded. +func NewTokenizerFragment(r io.Reader, contextTag string) *Tokenizer { + z := &Tokenizer{ + r: r, + buf: make([]byte, 0, 4096), + } + if contextTag != "" { + switch s := strings.ToLower(contextTag); s { + case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "title", "textarea", "xmp": + z.rawTag = s + } + } + return z +} diff --git a/vendor/golang.org/x/net/html/token_test.go b/vendor/golang.org/x/net/html/token_test.go new file mode 100644 index 00000000..20221c32 --- /dev/null +++ b/vendor/golang.org/x/net/html/token_test.go @@ -0,0 +1,748 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "io" + "io/ioutil" + "reflect" + "runtime" + "strings" + "testing" +) + +type tokenTest struct { + // A short description of the test case. + desc string + // The HTML to parse. + html string + // The string representations of the expected tokens, joined by '$'. + golden string +} + +var tokenTests = []tokenTest{ + { + "empty", + "", + "", + }, + // A single text node. The tokenizer should not break text nodes on whitespace, + // nor should it normalize whitespace within a text node. + { + "text", + "foo bar", + "foo bar", + }, + // An entity. + { + "entity", + "one &lt; two", + "one &lt; two", + }, + // A start, self-closing and end tag. The tokenizer does not care if the start + // and end tokens don't match; that is the job of the parser. + { + "tags", + "<a>b<c/>d</e>", + "<a>$b$<c/>$d$</e>", + }, + // Angle brackets that aren't a tag. + { + "not a tag #0", + "<", + "&lt;", + }, + { + "not a tag #1", + "</", + "&lt;/", + }, + { + "not a tag #2", + "</>", + "<!---->", + }, + { + "not a tag #3", + "a</>b", + "a$<!---->$b", + }, + { + "not a tag #4", + "</ >", + "<!-- -->", + }, + { + "not a tag #5", + "</.", + "<!--.-->", + }, + { + "not a tag #6", + "</.>", + "<!--.-->", + }, + { + "not a tag #7", + "a < b", + "a &lt; b", + }, + { + "not a tag #8", + "<.>", + "&lt;.&gt;", + }, + { + "not a tag #9", + "a<<<b>>>c", + "a&lt;&lt;$<b>$&gt;&gt;c", + }, + { + "not a tag #10", + "if x<0 and y < 0 then x*y>0", + "if x&lt;0 and y &lt; 0 then x*y&gt;0", + }, + { + "not a tag #11", + "<<p>", + "&lt;$<p>", + }, + // EOF in a tag name. + { + "tag name eof #0", + "<a", + "", + }, + { + "tag name eof #1", + "<a ", + "", + }, + { + "tag name eof #2", + "a<b", + "a", + }, + { + "tag name eof #3", + "<a><b", + "<a>", + }, + { + "tag name eof #4", + `<a x`, + ``, + }, + // Some malformed tags that are missing a '>'. + { + "malformed tag #0", + `<p</p>`, + `<p< p="">`, + }, + { + "malformed tag #1", + `<p </p>`, + `<p <="" p="">`, + }, + { + "malformed tag #2", + `<p id`, + ``, + }, + { + "malformed tag #3", + `<p id=`, + ``, + }, + { + "malformed tag #4", + `<p id=>`, + `<p id="">`, + }, + { + "malformed tag #5", + `<p id=0`, + ``, + }, + { + "malformed tag #6", + `<p id=0</p>`, + `<p id="0&lt;/p">`, + }, + { + "malformed tag #7", + `<p id="0</p>`, + ``, + }, + { + "malformed tag #8", + `<p id="0"</p>`, + `<p id="0" <="" p="">`, + }, + { + "malformed tag #9", + `<p></p id`, + `<p>`, + }, + // Raw text and RCDATA. + { + "basic raw text", + "<script><a></b></script>", + "<script>$&lt;a&gt;&lt;/b&gt;$</script>", + }, + { + "unfinished script end tag", + "<SCRIPT>a</SCR", + "<script>$a&lt;/SCR", + }, + { + "broken script end tag", + "<SCRIPT>a</SCR ipt>", + "<script>$a&lt;/SCR ipt&gt;", + }, + { + "EOF in script end tag", + "<SCRIPT>a</SCRipt", + "<script>$a&lt;/SCRipt", + }, + { + "scriptx end tag", + "<SCRIPT>a</SCRiptx", + "<script>$a&lt;/SCRiptx", + }, + { + "' ' completes script end tag", + "<SCRIPT>a</SCRipt ", + "<script>$a", + }, + { + "'>' completes script end tag", + "<SCRIPT>a</SCRipt>", + "<script>$a$</script>", + }, + { + "self-closing script end tag", + "<SCRIPT>a</SCRipt/>", + "<script>$a$</script>", + }, + { + "nested script tag", + "<SCRIPT>a</SCRipt<script>", + "<script>$a&lt;/SCRipt&lt;script&gt;", + }, + { + "script end tag after unfinished", + "<SCRIPT>a</SCRipt</script>", + "<script>$a&lt;/SCRipt$</script>", + }, + { + "script/style mismatched tags", + "<script>a</style>", + "<script>$a&lt;/style&gt;", + }, + { + "style element with entity", + "<style>&apos;", + "<style>$&amp;apos;", + }, + { + "textarea with tag", + "<textarea><div></textarea>", + "<textarea>$&lt;div&gt;$</textarea>", + }, + { + "title with tag and entity", + "<title><b>K&amp;R C</b></title>", + "<title>$&lt;b&gt;K&amp;R C&lt;/b&gt;$</title>", + }, + // DOCTYPE tests. + { + "Proper DOCTYPE", + "<!DOCTYPE html>", + "<!DOCTYPE html>", + }, + { + "DOCTYPE with no space", + "<!doctypehtml>", + "<!DOCTYPE html>", + }, + { + "DOCTYPE with two spaces", + "<!doctype html>", + "<!DOCTYPE html>", + }, + { + "looks like DOCTYPE but isn't", + "<!DOCUMENT html>", + "<!--DOCUMENT html-->", + }, + { + "DOCTYPE at EOF", + "<!DOCtype", + "<!DOCTYPE >", + }, + // XML processing instructions. + { + "XML processing instruction", + "<?xml?>", + "<!--?xml?-->", + }, + // Comments. + { + "comment0", + "abc<b><!-- skipme --></b>def", + "abc$<b>$<!-- skipme -->$</b>$def", + }, + { + "comment1", + "a<!-->z", + "a$<!---->$z", + }, + { + "comment2", + "a<!--->z", + "a$<!---->$z", + }, + { + "comment3", + "a<!--x>-->z", + "a$<!--x>-->$z", + }, + { + "comment4", + "a<!--x->-->z", + "a$<!--x->-->$z", + }, + { + "comment5", + "a<!>z", + "a$<!---->$z", + }, + { + "comment6", + "a<!->z", + "a$<!----->$z", + }, + { + "comment7", + "a<!---<>z", + "a$<!---<>z-->", + }, + { + "comment8", + "a<!--z", + "a$<!--z-->", + }, + { + "comment9", + "a<!--z-", + "a$<!--z-->", + }, + { + "comment10", + "a<!--z--", + "a$<!--z-->", + }, + { + "comment11", + "a<!--z---", + "a$<!--z--->", + }, + { + "comment12", + "a<!--z----", + "a$<!--z---->", + }, + { + "comment13", + "a<!--x--!>z", + "a$<!--x-->$z", + }, + // An attribute with a backslash. + { + "backslash", + `<p id="a\"b">`, + `<p id="a\" b"="">`, + }, + // Entities, tag name and attribute key lower-casing, and whitespace + // normalization within a tag. + { + "tricky", + "<p \t\n iD=\"a&quot;B\" foo=\"bar\"><EM>te&lt;&amp;;xt</em></p>", + `<p id="a&#34;B" foo="bar">$<em>$te&lt;&amp;;xt$</em>$</p>`, + }, + // A nonexistent entity. Tokenizing and converting back to a string should + // escape the "&" to become "&amp;". + { + "noSuchEntity", + `<a b="c&noSuchEntity;d">&lt;&alsoDoesntExist;&`, + `<a b="c&amp;noSuchEntity;d">$&lt;&amp;alsoDoesntExist;&amp;`, + }, + { + "entity without semicolon", + `&notit;&notin;<a b="q=z&amp=5&notice=hello&not;=world">`, + `¬it;∉$<a b="q=z&amp;amp=5&amp;notice=hello¬=world">`, + }, + { + "entity with digits", + "&frac12;", + "½", + }, + // Attribute tests: + // http://dev.w3.org/html5/pf-summary/Overview.html#attributes + { + "Empty attribute", + `<input disabled FOO>`, + `<input disabled="" foo="">`, + }, + { + "Empty attribute, whitespace", + `<input disabled FOO >`, + `<input disabled="" foo="">`, + }, + { + "Unquoted attribute value", + `<input value=yes FOO=BAR>`, + `<input value="yes" foo="BAR">`, + }, + { + "Unquoted attribute value, spaces", + `<input value = yes FOO = BAR>`, + `<input value="yes" foo="BAR">`, + }, + { + "Unquoted attribute value, trailing space", + `<input value=yes FOO=BAR >`, + `<input value="yes" foo="BAR">`, + }, + { + "Single-quoted attribute value", + `<input value='yes' FOO='BAR'>`, + `<input value="yes" foo="BAR">`, + }, + { + "Single-quoted attribute value, trailing space", + `<input value='yes' FOO='BAR' >`, + `<input value="yes" foo="BAR">`, + }, + { + "Double-quoted attribute value", + `<input value="I'm an attribute" FOO="BAR">`, + `<input value="I&#39;m an attribute" foo="BAR">`, + }, + { + "Attribute name characters", + `<meta http-equiv="content-type">`, + `<meta http-equiv="content-type">`, + }, + { + "Mixed attributes", + `a<P V="0 1" w='2' X=3 y>z`, + `a$<p v="0 1" w="2" x="3" y="">$z`, + }, + { + "Attributes with a solitary single quote", + `<p id=can't><p id=won't>`, + `<p id="can&#39;t">$<p id="won&#39;t">`, + }, +} + +func TestTokenizer(t *testing.T) { +loop: + for _, tt := range tokenTests { + z := NewTokenizer(strings.NewReader(tt.html)) + if tt.golden != "" { + for i, s := range strings.Split(tt.golden, "$") { + if z.Next() == ErrorToken { + t.Errorf("%s token %d: want %q got error %v", tt.desc, i, s, z.Err()) + continue loop + } + actual := z.Token().String() + if s != actual { + t.Errorf("%s token %d: want %q got %q", tt.desc, i, s, actual) + continue loop + } + } + } + z.Next() + if z.Err() != io.EOF { + t.Errorf("%s: want EOF got %q", tt.desc, z.Err()) + } + } +} + +func TestMaxBuffer(t *testing.T) { + // Exceeding the maximum buffer size generates ErrBufferExceeded. + z := NewTokenizer(strings.NewReader("<" + strings.Repeat("t", 10))) + z.SetMaxBuf(5) + tt := z.Next() + if got, want := tt, ErrorToken; got != want { + t.Fatalf("token type: got: %v want: %v", got, want) + } + if got, want := z.Err(), ErrBufferExceeded; got != want { + t.Errorf("error type: got: %v want: %v", got, want) + } + if got, want := string(z.Raw()), "<tttt"; got != want { + t.Fatalf("buffered before overflow: got: %q want: %q", got, want) + } +} + +func TestMaxBufferReconstruction(t *testing.T) { + // Exceeding the maximum buffer size at any point while tokenizing permits + // reconstructing the original input. +tests: + for _, test := range tokenTests { + for maxBuf := 1; ; maxBuf++ { + r := strings.NewReader(test.html) + z := NewTokenizer(r) + z.SetMaxBuf(maxBuf) + var tokenized bytes.Buffer + for { + tt := z.Next() + tokenized.Write(z.Raw()) + if tt == ErrorToken { + if err := z.Err(); err != io.EOF && err != ErrBufferExceeded { + t.Errorf("%s: unexpected error: %v", test.desc, err) + } + break + } + } + // Anything tokenized along with untokenized input or data left in the reader. + assembled, err := ioutil.ReadAll(io.MultiReader(&tokenized, bytes.NewReader(z.Buffered()), r)) + if err != nil { + t.Errorf("%s: ReadAll: %v", test.desc, err) + continue tests + } + if got, want := string(assembled), test.html; got != want { + t.Errorf("%s: reassembled html:\n got: %q\nwant: %q", test.desc, got, want) + continue tests + } + // EOF indicates that we completed tokenization and hence found the max + // maxBuf that generates ErrBufferExceeded, so continue to the next test. + if z.Err() == io.EOF { + break + } + } // buffer sizes + } // tests +} + +func TestPassthrough(t *testing.T) { + // Accumulating the raw output for each parse event should reconstruct the + // original input. + for _, test := range tokenTests { + z := NewTokenizer(strings.NewReader(test.html)) + var parsed bytes.Buffer + for { + tt := z.Next() + parsed.Write(z.Raw()) + if tt == ErrorToken { + break + } + } + if got, want := parsed.String(), test.html; got != want { + t.Errorf("%s: parsed output:\n got: %q\nwant: %q", test.desc, got, want) + } + } +} + +func TestBufAPI(t *testing.T) { + s := "0<a>1</a>2<b>3<a>4<a>5</a>6</b>7</a>8<a/>9" + z := NewTokenizer(bytes.NewBufferString(s)) + var result bytes.Buffer + depth := 0 +loop: + for { + tt := z.Next() + switch tt { + case ErrorToken: + if z.Err() != io.EOF { + t.Error(z.Err()) + } + break loop + case TextToken: + if depth > 0 { + result.Write(z.Text()) + } + case StartTagToken, EndTagToken: + tn, _ := z.TagName() + if len(tn) == 1 && tn[0] == 'a' { + if tt == StartTagToken { + depth++ + } else { + depth-- + } + } + } + } + u := "14567" + v := string(result.Bytes()) + if u != v { + t.Errorf("TestBufAPI: want %q got %q", u, v) + } +} + +func TestConvertNewlines(t *testing.T) { + testCases := map[string]string{ + "Mac\rDOS\r\nUnix\n": "Mac\nDOS\nUnix\n", + "Unix\nMac\rDOS\r\n": "Unix\nMac\nDOS\n", + "DOS\r\nDOS\r\nDOS\r\n": "DOS\nDOS\nDOS\n", + "": "", + "\n": "\n", + "\n\r": "\n\n", + "\r": "\n", + "\r\n": "\n", + "\r\n\n": "\n\n", + "\r\n\r": "\n\n", + "\r\n\r\n": "\n\n", + "\r\r": "\n\n", + "\r\r\n": "\n\n", + "\r\r\n\n": "\n\n\n", + "\r\r\r\n": "\n\n\n", + "\r \n": "\n \n", + "xyz": "xyz", + } + for in, want := range testCases { + if got := string(convertNewlines([]byte(in))); got != want { + t.Errorf("input %q: got %q, want %q", in, got, want) + } + } +} + +func TestReaderEdgeCases(t *testing.T) { + const s = "<p>An io.Reader can return (0, nil) or (n, io.EOF).</p>" + testCases := []io.Reader{ + &zeroOneByteReader{s: s}, + &eofStringsReader{s: s}, + &stuckReader{}, + } + for i, tc := range testCases { + got := []TokenType{} + z := NewTokenizer(tc) + for { + tt := z.Next() + if tt == ErrorToken { + break + } + got = append(got, tt) + } + if err := z.Err(); err != nil && err != io.EOF { + if err != io.ErrNoProgress { + t.Errorf("i=%d: %v", i, err) + } + continue + } + want := []TokenType{ + StartTagToken, + TextToken, + EndTagToken, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("i=%d: got %v, want %v", i, got, want) + continue + } + } +} + +// zeroOneByteReader is like a strings.Reader that alternates between +// returning 0 bytes and 1 byte at a time. +type zeroOneByteReader struct { + s string + n int +} + +func (r *zeroOneByteReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + if len(r.s) == 0 { + return 0, io.EOF + } + r.n++ + if r.n%2 != 0 { + return 0, nil + } + p[0], r.s = r.s[0], r.s[1:] + return 1, nil +} + +// eofStringsReader is like a strings.Reader but can return an (n, err) where +// n > 0 && err != nil. +type eofStringsReader struct { + s string +} + +func (r *eofStringsReader) Read(p []byte) (int, error) { + n := copy(p, r.s) + r.s = r.s[n:] + if r.s != "" { + return n, nil + } + return n, io.EOF +} + +// stuckReader is an io.Reader that always returns no data and no error. +type stuckReader struct{} + +func (*stuckReader) Read(p []byte) (int, error) { + return 0, nil +} + +const ( + rawLevel = iota + lowLevel + highLevel +) + +func benchmarkTokenizer(b *testing.B, level int) { + buf, err := ioutil.ReadFile("testdata/go1.html") + if err != nil { + b.Fatalf("could not read testdata/go1.html: %v", err) + } + b.SetBytes(int64(len(buf))) + runtime.GC() + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + z := NewTokenizer(bytes.NewBuffer(buf)) + for { + tt := z.Next() + if tt == ErrorToken { + if err := z.Err(); err != nil && err != io.EOF { + b.Fatalf("tokenizer error: %v", err) + } + break + } + switch level { + case rawLevel: + // Calling z.Raw just returns the raw bytes of the token. It does + // not unescape &lt; to <, or lower-case tag names and attribute keys. + z.Raw() + case lowLevel: + // Caling z.Text, z.TagName and z.TagAttr returns []byte values + // whose contents may change on the next call to z.Next. + switch tt { + case TextToken, CommentToken, DoctypeToken: + z.Text() + case StartTagToken, SelfClosingTagToken: + _, more := z.TagName() + for more { + _, _, more = z.TagAttr() + } + case EndTagToken: + z.TagName() + } + case highLevel: + // Calling z.Token converts []byte values to strings whose validity + // extend beyond the next call to z.Next. + z.Token() + } + } + } +} + +func BenchmarkRawLevelTokenizer(b *testing.B) { benchmarkTokenizer(b, rawLevel) } +func BenchmarkLowLevelTokenizer(b *testing.B) { benchmarkTokenizer(b, lowLevel) } +func BenchmarkHighLevelTokenizer(b *testing.B) { benchmarkTokenizer(b, highLevel) } diff --git a/vendor/golang.org/x/net/icmp/dstunreach.go b/vendor/golang.org/x/net/icmp/dstunreach.go new file mode 100644 index 00000000..75db991d --- /dev/null +++ b/vendor/golang.org/x/net/icmp/dstunreach.go @@ -0,0 +1,41 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A DstUnreach represents an ICMP destination unreachable message +// body. +type DstUnreach struct { + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *DstUnreach) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *DstUnreach) Marshal(proto int) ([]byte, error) { + return marshalMultipartMessageBody(proto, p.Data, p.Extensions) +} + +// parseDstUnreach parses b as an ICMP destination unreachable message +// body. +func parseDstUnreach(proto int, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &DstUnreach{} + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/echo.go b/vendor/golang.org/x/net/icmp/echo.go new file mode 100644 index 00000000..dd551811 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/echo.go @@ -0,0 +1,45 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// An Echo represents an ICMP echo request or reply message body. +type Echo struct { + ID int // identifier + Seq int // sequence number + Data []byte // data +} + +// Len implements the Len method of MessageBody interface. +func (p *Echo) Len(proto int) int { + if p == nil { + return 0 + } + return 4 + len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *Echo) Marshal(proto int) ([]byte, error) { + b := make([]byte, 4+len(p.Data)) + binary.BigEndian.PutUint16(b[:2], uint16(p.ID)) + binary.BigEndian.PutUint16(b[2:4], uint16(p.Seq)) + copy(b[4:], p.Data) + return b, nil +} + +// parseEcho parses b as an ICMP echo request or reply message body. +func parseEcho(proto int, b []byte) (MessageBody, error) { + bodyLen := len(b) + if bodyLen < 4 { + return nil, errMessageTooShort + } + p := &Echo{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(binary.BigEndian.Uint16(b[2:4]))} + if bodyLen > 4 { + p.Data = make([]byte, bodyLen-4) + copy(p.Data, b[4:]) + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/endpoint.go b/vendor/golang.org/x/net/icmp/endpoint.go new file mode 100644 index 00000000..a68bfb01 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/endpoint.go @@ -0,0 +1,113 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + "runtime" + "syscall" + "time" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var _ net.PacketConn = &PacketConn{} + +// A PacketConn represents a packet network endpoint that uses either +// ICMPv4 or ICMPv6. +type PacketConn struct { + c net.PacketConn + p4 *ipv4.PacketConn + p6 *ipv6.PacketConn +} + +func (c *PacketConn) ok() bool { return c != nil && c.c != nil } + +// IPv4PacketConn returns the ipv4.PacketConn of c. +// It returns nil when c is not created as the endpoint for ICMPv4. +func (c *PacketConn) IPv4PacketConn() *ipv4.PacketConn { + if !c.ok() { + return nil + } + return c.p4 +} + +// IPv6PacketConn returns the ipv6.PacketConn of c. +// It returns nil when c is not created as the endpoint for ICMPv6. +func (c *PacketConn) IPv6PacketConn() *ipv6.PacketConn { + if !c.ok() { + return nil + } + return c.p6 +} + +// ReadFrom reads an ICMP message from the connection. +func (c *PacketConn) ReadFrom(b []byte) (int, net.Addr, error) { + if !c.ok() { + return 0, nil, syscall.EINVAL + } + // Please be informed that ipv4.NewPacketConn enables + // IP_STRIPHDR option by default on Darwin. + // See golang.org/issue/9395 for further information. + if runtime.GOOS == "darwin" && c.p4 != nil { + n, _, peer, err := c.p4.ReadFrom(b) + return n, peer, err + } + return c.c.ReadFrom(b) +} + +// WriteTo writes the ICMP message b to dst. +// Dst must be net.UDPAddr when c is a non-privileged +// datagram-oriented ICMP endpoint. Otherwise it must be net.IPAddr. +func (c *PacketConn) WriteTo(b []byte, dst net.Addr) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + return c.c.WriteTo(b, dst) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.Close() +} + +// LocalAddr returns the local network address. +func (c *PacketConn) LocalAddr() net.Addr { + if !c.ok() { + return nil + } + return c.c.LocalAddr() +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetWriteDeadline(t) +} diff --git a/vendor/golang.org/x/net/icmp/example_test.go b/vendor/golang.org/x/net/icmp/example_test.go new file mode 100644 index 00000000..1df4cecc --- /dev/null +++ b/vendor/golang.org/x/net/icmp/example_test.go @@ -0,0 +1,63 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "log" + "net" + "os" + "runtime" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv6" +) + +func ExamplePacketConn_nonPrivilegedPing() { + switch runtime.GOOS { + case "darwin": + case "linux": + log.Println("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + log.Println("not supported on", runtime.GOOS) + return + } + + c, err := icmp.ListenPacket("udp6", "fe80::1%en0") + if err != nil { + log.Fatal(err) + } + defer c.Close() + + wm := icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: 1, + Data: []byte("HELLO-R-U-THERE"), + }, + } + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + if _, err := c.WriteTo(wb, &net.UDPAddr{IP: net.ParseIP("ff02::1"), Zone: "en0"}); err != nil { + log.Fatal(err) + } + + rb := make([]byte, 1500) + n, peer, err := c.ReadFrom(rb) + if err != nil { + log.Fatal(err) + } + rm, err := icmp.ParseMessage(58, rb[:n]) + if err != nil { + log.Fatal(err) + } + switch rm.Type { + case ipv6.ICMPTypeEchoReply: + log.Printf("got reflection from %v", peer) + default: + log.Printf("got %+v; want echo reply", rm) + } +} diff --git a/vendor/golang.org/x/net/icmp/extension.go b/vendor/golang.org/x/net/icmp/extension.go new file mode 100644 index 00000000..402a7514 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/extension.go @@ -0,0 +1,89 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// An Extension represents an ICMP extension. +type Extension interface { + // Len returns the length of ICMP extension. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Len(proto int) int + + // Marshal returns the binary encoding of ICMP extension. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Marshal(proto int) ([]byte, error) +} + +const extensionVersion = 2 + +func validExtensionHeader(b []byte) bool { + v := int(b[0]&0xf0) >> 4 + s := binary.BigEndian.Uint16(b[2:4]) + if s != 0 { + s = checksum(b) + } + if v != extensionVersion || s != 0 { + return false + } + return true +} + +// parseExtensions parses b as a list of ICMP extensions. +// The length attribute l must be the length attribute field in +// received icmp messages. +// +// It will return a list of ICMP extensions and an adjusted length +// attribute that represents the length of the padded original +// datagram field. Otherwise, it returns an error. +func parseExtensions(b []byte, l int) ([]Extension, int, error) { + // Still a lot of non-RFC 4884 compliant implementations are + // out there. Set the length attribute l to 128 when it looks + // inappropriate for backwards compatibility. + // + // A minimal extension at least requires 8 octets; 4 octets + // for an extension header, and 4 octets for a single object + // header. + // + // See RFC 4884 for further information. + if 128 > l || l+8 > len(b) { + l = 128 + } + if l+8 > len(b) { + return nil, -1, errNoExtension + } + if !validExtensionHeader(b[l:]) { + if l == 128 { + return nil, -1, errNoExtension + } + l = 128 + if !validExtensionHeader(b[l:]) { + return nil, -1, errNoExtension + } + } + var exts []Extension + for b = b[l+4:]; len(b) >= 4; { + ol := int(binary.BigEndian.Uint16(b[:2])) + if 4 > ol || ol > len(b) { + break + } + switch b[2] { + case classMPLSLabelStack: + ext, err := parseMPLSLabelStack(b[:ol]) + if err != nil { + return nil, -1, err + } + exts = append(exts, ext) + case classInterfaceInfo: + ext, err := parseInterfaceInfo(b[:ol]) + if err != nil { + return nil, -1, err + } + exts = append(exts, ext) + } + b = b[ol:] + } + return exts, l, nil +} diff --git a/vendor/golang.org/x/net/icmp/extension_test.go b/vendor/golang.org/x/net/icmp/extension_test.go new file mode 100644 index 00000000..0b3f7b9e --- /dev/null +++ b/vendor/golang.org/x/net/icmp/extension_test.go @@ -0,0 +1,259 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + "reflect" + "testing" + + "golang.org/x/net/internal/iana" +) + +var marshalAndParseExtensionTests = []struct { + proto int + hdr []byte + obj []byte + exts []Extension +}{ + // MPLS label stack with no label + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x04, 0x01, 0x01, + }, + exts: []Extension{ + &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + }, + }, + }, + // MPLS label stack with a single label + { + proto: iana.ProtocolIPv6ICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x08, 0x01, 0x01, + 0x03, 0xe8, 0xe9, 0xff, + }, + exts: []Extension{ + &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + Labels: []MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + }, + }, + // MPLS label stack with multiple labels + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x0c, 0x01, 0x01, + 0x03, 0xe8, 0xde, 0xfe, + 0x03, 0xe8, 0xe1, 0xff, + }, + exts: []Extension{ + &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + Labels: []MPLSLabel{ + { + Label: 16013, + TC: 0x7, + S: false, + TTL: 254, + }, + { + Label: 16014, + TC: 0, + S: true, + TTL: 255, + }, + }, + }, + }, + }, + // Interface information with no attribute + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x04, 0x02, 0x00, + }, + exts: []Extension{ + &InterfaceInfo{ + Class: classInterfaceInfo, + }, + }, + }, + // Interface information with ifIndex and name + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x10, 0x02, 0x0a, + 0x00, 0x00, 0x00, 0x10, + 0x08, byte('e'), byte('n'), byte('1'), + byte('0'), byte('1'), 0x00, 0x00, + }, + exts: []Extension{ + &InterfaceInfo{ + Class: classInterfaceInfo, + Type: 0x0a, + Interface: &net.Interface{ + Index: 16, + Name: "en101", + }, + }, + }, + }, + // Interface information with ifIndex, IPAddr, name and MTU + { + proto: iana.ProtocolIPv6ICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x28, 0x02, 0x0f, + 0x00, 0x00, 0x00, 0x0f, + 0x00, 0x02, 0x00, 0x00, + 0xfe, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x08, byte('e'), byte('n'), byte('1'), + byte('0'), byte('1'), 0x00, 0x00, + 0x00, 0x00, 0x20, 0x00, + }, + exts: []Extension{ + &InterfaceInfo{ + Class: classInterfaceInfo, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + }, + }, +} + +func TestMarshalAndParseExtension(t *testing.T) { + for i, tt := range marshalAndParseExtensionTests { + for j, ext := range tt.exts { + var err error + var b []byte + switch ext := ext.(type) { + case *MPLSLabelStack: + b, err = ext.Marshal(tt.proto) + if err != nil { + t.Errorf("#%v/%v: %v", i, j, err) + continue + } + case *InterfaceInfo: + b, err = ext.Marshal(tt.proto) + if err != nil { + t.Errorf("#%v/%v: %v", i, j, err) + continue + } + } + if !reflect.DeepEqual(b, tt.obj) { + t.Errorf("#%v/%v: got %#v; want %#v", i, j, b, tt.obj) + continue + } + } + + for j, wire := range []struct { + data []byte // original datagram + inlattr int // length of padded original datagram, a hint + outlattr int // length of padded original datagram, a want + err error + }{ + {nil, 0, -1, errNoExtension}, + {make([]byte, 127), 128, -1, errNoExtension}, + + {make([]byte, 128), 127, -1, errNoExtension}, + {make([]byte, 128), 128, -1, errNoExtension}, + {make([]byte, 128), 129, -1, errNoExtension}, + + {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 127, 128, nil}, + {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 128, 128, nil}, + {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 129, 128, nil}, + + {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 511, -1, errNoExtension}, + {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 512, 512, nil}, + {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 513, -1, errNoExtension}, + } { + exts, l, err := parseExtensions(wire.data, wire.inlattr) + if err != wire.err { + t.Errorf("#%v/%v: got %v; want %v", i, j, err, wire.err) + continue + } + if wire.err != nil { + continue + } + if l != wire.outlattr { + t.Errorf("#%v/%v: got %v; want %v", i, j, l, wire.outlattr) + } + if !reflect.DeepEqual(exts, tt.exts) { + for j, ext := range exts { + switch ext := ext.(type) { + case *MPLSLabelStack: + want := tt.exts[j].(*MPLSLabelStack) + t.Errorf("#%v/%v: got %#v; want %#v", i, j, ext, want) + case *InterfaceInfo: + want := tt.exts[j].(*InterfaceInfo) + t.Errorf("#%v/%v: got %#v; want %#v", i, j, ext, want) + } + } + continue + } + } + } +} + +var parseInterfaceNameTests = []struct { + b []byte + error +}{ + {[]byte{0, 'e', 'n', '0'}, errInvalidExtension}, + {[]byte{4, 'e', 'n', '0'}, nil}, + {[]byte{7, 'e', 'n', '0', 0xff, 0xff, 0xff, 0xff}, errInvalidExtension}, + {[]byte{8, 'e', 'n', '0', 0xff, 0xff, 0xff}, errMessageTooShort}, +} + +func TestParseInterfaceName(t *testing.T) { + ifi := InterfaceInfo{Interface: &net.Interface{}} + for i, tt := range parseInterfaceNameTests { + if _, err := ifi.parseName(tt.b); err != tt.error { + t.Errorf("#%d: got %v; want %v", i, err, tt.error) + } + } +} diff --git a/vendor/golang.org/x/net/icmp/helper.go b/vendor/golang.org/x/net/icmp/helper.go new file mode 100644 index 00000000..6c4e633b --- /dev/null +++ b/vendor/golang.org/x/net/icmp/helper.go @@ -0,0 +1,27 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "unsafe" +) + +var ( + // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. + freebsdVersion uint32 + + nativeEndian binary.ByteOrder +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = binary.LittleEndian + } else { + nativeEndian = binary.BigEndian + } +} diff --git a/vendor/golang.org/x/net/icmp/helper_posix.go b/vendor/golang.org/x/net/icmp/helper_posix.go new file mode 100644 index 00000000..398fd388 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/helper_posix.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package icmp + +import ( + "net" + "strconv" + "syscall" +) + +func sockaddr(family int, address string) (syscall.Sockaddr, error) { + switch family { + case syscall.AF_INET: + a, err := net.ResolveIPAddr("ip4", address) + if err != nil { + return nil, err + } + if len(a.IP) == 0 { + a.IP = net.IPv4zero + } + if a.IP = a.IP.To4(); a.IP == nil { + return nil, net.InvalidAddrError("non-ipv4 address") + } + sa := &syscall.SockaddrInet4{} + copy(sa.Addr[:], a.IP) + return sa, nil + case syscall.AF_INET6: + a, err := net.ResolveIPAddr("ip6", address) + if err != nil { + return nil, err + } + if len(a.IP) == 0 { + a.IP = net.IPv6unspecified + } + if a.IP.Equal(net.IPv4zero) { + a.IP = net.IPv6unspecified + } + if a.IP = a.IP.To16(); a.IP == nil || a.IP.To4() != nil { + return nil, net.InvalidAddrError("non-ipv6 address") + } + sa := &syscall.SockaddrInet6{ZoneId: zoneToUint32(a.Zone)} + copy(sa.Addr[:], a.IP) + return sa, nil + default: + return nil, net.InvalidAddrError("unexpected family") + } +} + +func zoneToUint32(zone string) uint32 { + if zone == "" { + return 0 + } + if ifi, err := net.InterfaceByName(zone); err == nil { + return uint32(ifi.Index) + } + n, err := strconv.Atoi(zone) + if err != nil { + return 0 + } + return uint32(n) +} + +func last(s string, b byte) int { + i := len(s) + for i--; i >= 0; i-- { + if s[i] == b { + break + } + } + return i +} diff --git a/vendor/golang.org/x/net/icmp/interface.go b/vendor/golang.org/x/net/icmp/interface.go new file mode 100644 index 00000000..78b5b98b --- /dev/null +++ b/vendor/golang.org/x/net/icmp/interface.go @@ -0,0 +1,236 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "strings" + + "golang.org/x/net/internal/iana" +) + +const ( + classInterfaceInfo = 2 + + afiIPv4 = 1 + afiIPv6 = 2 +) + +const ( + attrMTU = 1 << iota + attrName + attrIPAddr + attrIfIndex +) + +// An InterfaceInfo represents interface and next-hop identification. +type InterfaceInfo struct { + Class int // extension object class number + Type int // extension object sub-type + Interface *net.Interface + Addr *net.IPAddr +} + +func (ifi *InterfaceInfo) nameLen() int { + if len(ifi.Interface.Name) > 63 { + return 64 + } + l := 1 + len(ifi.Interface.Name) + return (l + 3) &^ 3 +} + +func (ifi *InterfaceInfo) attrsAndLen(proto int) (attrs, l int) { + l = 4 + if ifi.Interface != nil && ifi.Interface.Index > 0 { + attrs |= attrIfIndex + l += 4 + if len(ifi.Interface.Name) > 0 { + attrs |= attrName + l += ifi.nameLen() + } + if ifi.Interface.MTU > 0 { + attrs |= attrMTU + l += 4 + } + } + if ifi.Addr != nil { + switch proto { + case iana.ProtocolICMP: + if ifi.Addr.IP.To4() != nil { + attrs |= attrIPAddr + l += 4 + net.IPv4len + } + case iana.ProtocolIPv6ICMP: + if ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil { + attrs |= attrIPAddr + l += 4 + net.IPv6len + } + } + } + return +} + +// Len implements the Len method of Extension interface. +func (ifi *InterfaceInfo) Len(proto int) int { + _, l := ifi.attrsAndLen(proto) + return l +} + +// Marshal implements the Marshal method of Extension interface. +func (ifi *InterfaceInfo) Marshal(proto int) ([]byte, error) { + attrs, l := ifi.attrsAndLen(proto) + b := make([]byte, l) + if err := ifi.marshal(proto, b, attrs, l); err != nil { + return nil, err + } + return b, nil +} + +func (ifi *InterfaceInfo) marshal(proto int, b []byte, attrs, l int) error { + binary.BigEndian.PutUint16(b[:2], uint16(l)) + b[2], b[3] = classInterfaceInfo, byte(ifi.Type) + for b = b[4:]; len(b) > 0 && attrs != 0; { + switch { + case attrs&attrIfIndex != 0: + b = ifi.marshalIfIndex(proto, b) + attrs &^= attrIfIndex + case attrs&attrIPAddr != 0: + b = ifi.marshalIPAddr(proto, b) + attrs &^= attrIPAddr + case attrs&attrName != 0: + b = ifi.marshalName(proto, b) + attrs &^= attrName + case attrs&attrMTU != 0: + b = ifi.marshalMTU(proto, b) + attrs &^= attrMTU + } + } + return nil +} + +func (ifi *InterfaceInfo) marshalIfIndex(proto int, b []byte) []byte { + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.Index)) + return b[4:] +} + +func (ifi *InterfaceInfo) parseIfIndex(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + ifi.Interface.Index = int(binary.BigEndian.Uint32(b[:4])) + return b[4:], nil +} + +func (ifi *InterfaceInfo) marshalIPAddr(proto int, b []byte) []byte { + switch proto { + case iana.ProtocolICMP: + binary.BigEndian.PutUint16(b[:2], uint16(afiIPv4)) + copy(b[4:4+net.IPv4len], ifi.Addr.IP.To4()) + b = b[4+net.IPv4len:] + case iana.ProtocolIPv6ICMP: + binary.BigEndian.PutUint16(b[:2], uint16(afiIPv6)) + copy(b[4:4+net.IPv6len], ifi.Addr.IP.To16()) + b = b[4+net.IPv6len:] + } + return b +} + +func (ifi *InterfaceInfo) parseIPAddr(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + afi := int(binary.BigEndian.Uint16(b[:2])) + b = b[4:] + switch afi { + case afiIPv4: + if len(b) < net.IPv4len { + return nil, errMessageTooShort + } + ifi.Addr.IP = make(net.IP, net.IPv4len) + copy(ifi.Addr.IP, b[:net.IPv4len]) + b = b[net.IPv4len:] + case afiIPv6: + if len(b) < net.IPv6len { + return nil, errMessageTooShort + } + ifi.Addr.IP = make(net.IP, net.IPv6len) + copy(ifi.Addr.IP, b[:net.IPv6len]) + b = b[net.IPv6len:] + } + return b, nil +} + +func (ifi *InterfaceInfo) marshalName(proto int, b []byte) []byte { + l := byte(ifi.nameLen()) + b[0] = l + copy(b[1:], []byte(ifi.Interface.Name)) + return b[l:] +} + +func (ifi *InterfaceInfo) parseName(b []byte) ([]byte, error) { + if 4 > len(b) || len(b) < int(b[0]) { + return nil, errMessageTooShort + } + l := int(b[0]) + if l%4 != 0 || 4 > l || l > 64 { + return nil, errInvalidExtension + } + var name [63]byte + copy(name[:], b[1:l]) + ifi.Interface.Name = strings.Trim(string(name[:]), "\000") + return b[l:], nil +} + +func (ifi *InterfaceInfo) marshalMTU(proto int, b []byte) []byte { + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.MTU)) + return b[4:] +} + +func (ifi *InterfaceInfo) parseMTU(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + ifi.Interface.MTU = int(binary.BigEndian.Uint32(b[:4])) + return b[4:], nil +} + +func parseInterfaceInfo(b []byte) (Extension, error) { + ifi := &InterfaceInfo{ + Class: int(b[2]), + Type: int(b[3]), + } + if ifi.Type&(attrIfIndex|attrName|attrMTU) != 0 { + ifi.Interface = &net.Interface{} + } + if ifi.Type&attrIPAddr != 0 { + ifi.Addr = &net.IPAddr{} + } + attrs := ifi.Type & (attrIfIndex | attrIPAddr | attrName | attrMTU) + for b = b[4:]; len(b) > 0 && attrs != 0; { + var err error + switch { + case attrs&attrIfIndex != 0: + b, err = ifi.parseIfIndex(b) + attrs &^= attrIfIndex + case attrs&attrIPAddr != 0: + b, err = ifi.parseIPAddr(b) + attrs &^= attrIPAddr + case attrs&attrName != 0: + b, err = ifi.parseName(b) + attrs &^= attrName + case attrs&attrMTU != 0: + b, err = ifi.parseMTU(b) + attrs &^= attrMTU + } + if err != nil { + return nil, err + } + } + if ifi.Interface != nil && ifi.Interface.Name != "" && ifi.Addr != nil && ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil { + ifi.Addr.Zone = ifi.Interface.Name + } + return ifi, nil +} diff --git a/vendor/golang.org/x/net/icmp/ipv4.go b/vendor/golang.org/x/net/icmp/ipv4.go new file mode 100644 index 00000000..729ddc97 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv4.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "runtime" + + "golang.org/x/net/ipv4" +) + +// ParseIPv4Header parses b as an IPv4 header of ICMP error message +// invoking packet, which is contained in ICMP error message. +func ParseIPv4Header(b []byte) (*ipv4.Header, error) { + if len(b) < ipv4.HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + if hdrlen > len(b) { + return nil, errBufferTooShort + } + h := &ipv4.Header{ + Version: int(b[0] >> 4), + Len: hdrlen, + TOS: int(b[1]), + ID: int(binary.BigEndian.Uint16(b[4:6])), + FragOff: int(binary.BigEndian.Uint16(b[6:8])), + TTL: int(b[8]), + Protocol: int(b[9]), + Checksum: int(binary.BigEndian.Uint16(b[10:12])), + Src: net.IPv4(b[12], b[13], b[14], b[15]), + Dst: net.IPv4(b[16], b[17], b[18], b[19]), + } + switch runtime.GOOS { + case "darwin": + h.TotalLen = int(nativeEndian.Uint16(b[2:4])) + case "freebsd": + if freebsdVersion >= 1000000 { + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + } else { + h.TotalLen = int(nativeEndian.Uint16(b[2:4])) + } + default: + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + } + h.Flags = ipv4.HeaderFlags(h.FragOff&0xe000) >> 13 + h.FragOff = h.FragOff & 0x1fff + if hdrlen-ipv4.HeaderLen > 0 { + h.Options = make([]byte, hdrlen-ipv4.HeaderLen) + copy(h.Options, b[ipv4.HeaderLen:]) + } + return h, nil +} diff --git a/vendor/golang.org/x/net/icmp/ipv4_test.go b/vendor/golang.org/x/net/icmp/ipv4_test.go new file mode 100644 index 00000000..b05c6973 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv4_test.go @@ -0,0 +1,71 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/ipv4" +) + +var ( + wireHeaderFromKernel = [ipv4.HeaderLen]byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + } + wireHeaderFromTradBSDKernel = [ipv4.HeaderLen]byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + } + // TODO(mikio): Add platform dependent wire header formats when + // we support new platforms. + + testHeader = &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: 1, + TotalLen: 0xbeef, + ID: 0xcafe, + Flags: ipv4.DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + } +) + +func TestParseIPv4Header(t *testing.T) { + var wh []byte + switch runtime.GOOS { + case "darwin": + wh = wireHeaderFromTradBSDKernel[:] + case "freebsd": + if freebsdVersion >= 1000000 { + wh = wireHeaderFromKernel[:] + } else { + wh = wireHeaderFromTradBSDKernel[:] + } + default: + wh = wireHeaderFromKernel[:] + } + h, err := ParseIPv4Header(wh) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, testHeader) { + t.Fatalf("got %#v; want %#v", h, testHeader) + } +} diff --git a/vendor/golang.org/x/net/icmp/ipv6.go b/vendor/golang.org/x/net/icmp/ipv6.go new file mode 100644 index 00000000..58eaa77d --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv6.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + + "golang.org/x/net/internal/iana" +) + +const ipv6PseudoHeaderLen = 2*net.IPv6len + 8 + +// IPv6PseudoHeader returns an IPv6 pseudo header for checksum +// calculation. +func IPv6PseudoHeader(src, dst net.IP) []byte { + b := make([]byte, ipv6PseudoHeaderLen) + copy(b, src.To16()) + copy(b[net.IPv6len:], dst.To16()) + b[len(b)-1] = byte(iana.ProtocolIPv6ICMP) + return b +} diff --git a/vendor/golang.org/x/net/icmp/listen_posix.go b/vendor/golang.org/x/net/icmp/listen_posix.go new file mode 100644 index 00000000..b9f26079 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/listen_posix.go @@ -0,0 +1,98 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package icmp + +import ( + "net" + "os" + "runtime" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +const sysIP_STRIPHDR = 0x17 // for now only darwin supports this option + +// ListenPacket listens for incoming ICMP packets addressed to +// address. See net.Dial for the syntax of address. +// +// For non-privileged datagram-oriented ICMP endpoints, network must +// be "udp4" or "udp6". The endpoint allows to read, write a few +// limited ICMP messages such as echo request and echo reply. +// Currently only Darwin and Linux support this. +// +// Examples: +// ListenPacket("udp4", "192.168.0.1") +// ListenPacket("udp4", "0.0.0.0") +// ListenPacket("udp6", "fe80::1%en0") +// ListenPacket("udp6", "::") +// +// For privileged raw ICMP endpoints, network must be "ip4" or "ip6" +// followed by a colon and an ICMP protocol number or name. +// +// Examples: +// ListenPacket("ip4:icmp", "192.168.0.1") +// ListenPacket("ip4:1", "0.0.0.0") +// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0") +// ListenPacket("ip6:58", "::") +func ListenPacket(network, address string) (*PacketConn, error) { + var family, proto int + switch network { + case "udp4": + family, proto = syscall.AF_INET, iana.ProtocolICMP + case "udp6": + family, proto = syscall.AF_INET6, iana.ProtocolIPv6ICMP + default: + i := last(network, ':') + switch network[:i] { + case "ip4": + proto = iana.ProtocolICMP + case "ip6": + proto = iana.ProtocolIPv6ICMP + } + } + var cerr error + var c net.PacketConn + switch family { + case syscall.AF_INET, syscall.AF_INET6: + s, err := syscall.Socket(family, syscall.SOCK_DGRAM, proto) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + defer syscall.Close(s) + if runtime.GOOS == "darwin" && family == syscall.AF_INET { + if err := syscall.SetsockoptInt(s, iana.ProtocolIP, sysIP_STRIPHDR, 1); err != nil { + return nil, os.NewSyscallError("setsockopt", err) + } + } + sa, err := sockaddr(family, address) + if err != nil { + return nil, err + } + if err := syscall.Bind(s, sa); err != nil { + return nil, os.NewSyscallError("bind", err) + } + f := os.NewFile(uintptr(s), "datagram-oriented icmp") + defer f.Close() + c, cerr = net.FilePacketConn(f) + default: + c, cerr = net.ListenPacket(network, address) + } + if cerr != nil { + return nil, cerr + } + switch proto { + case iana.ProtocolICMP: + return &PacketConn{c: c, p4: ipv4.NewPacketConn(c)}, nil + case iana.ProtocolIPv6ICMP: + return &PacketConn{c: c, p6: ipv6.NewPacketConn(c)}, nil + default: + return &PacketConn{c: c}, nil + } +} diff --git a/vendor/golang.org/x/net/icmp/listen_stub.go b/vendor/golang.org/x/net/icmp/listen_stub.go new file mode 100644 index 00000000..668728d1 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/listen_stub.go @@ -0,0 +1,33 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package icmp + +// ListenPacket listens for incoming ICMP packets addressed to +// address. See net.Dial for the syntax of address. +// +// For non-privileged datagram-oriented ICMP endpoints, network must +// be "udp4" or "udp6". The endpoint allows to read, write a few +// limited ICMP messages such as echo request and echo reply. +// Currently only Darwin and Linux support this. +// +// Examples: +// ListenPacket("udp4", "192.168.0.1") +// ListenPacket("udp4", "0.0.0.0") +// ListenPacket("udp6", "fe80::1%en0") +// ListenPacket("udp6", "::") +// +// For privileged raw ICMP endpoints, network must be "ip4" or "ip6" +// followed by a colon and an ICMP protocol number or name. +// +// Examples: +// ListenPacket("ip4:icmp", "192.168.0.1") +// ListenPacket("ip4:1", "0.0.0.0") +// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0") +// ListenPacket("ip6:58", "::") +func ListenPacket(network, address string) (*PacketConn, error) { + return nil, errOpNoSupport +} diff --git a/vendor/golang.org/x/net/icmp/message.go b/vendor/golang.org/x/net/icmp/message.go new file mode 100644 index 00000000..42d6df2c --- /dev/null +++ b/vendor/golang.org/x/net/icmp/message.go @@ -0,0 +1,150 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package icmp provides basic functions for the manipulation of +// messages used in the Internet Control Message Protocols, +// ICMPv4 and ICMPv6. +// +// ICMPv4 and ICMPv6 are defined in RFC 792 and RFC 4443. +// Multi-part message support for ICMP is defined in RFC 4884. +// ICMP extensions for MPLS are defined in RFC 4950. +// ICMP extensions for interface and next-hop identification are +// defined in RFC 5837. +package icmp // import "golang.org/x/net/icmp" + +import ( + "encoding/binary" + "errors" + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var ( + errMessageTooShort = errors.New("message too short") + errHeaderTooShort = errors.New("header too short") + errBufferTooShort = errors.New("buffer too short") + errOpNoSupport = errors.New("operation not supported") + errNoExtension = errors.New("no extension") + errInvalidExtension = errors.New("invalid extension") +) + +func checksum(b []byte) uint16 { + csumcv := len(b) - 1 // checksum coverage + s := uint32(0) + for i := 0; i < csumcv; i += 2 { + s += uint32(b[i+1])<<8 | uint32(b[i]) + } + if csumcv&1 == 0 { + s += uint32(b[csumcv]) + } + s = s>>16 + s&0xffff + s = s + s>>16 + return ^uint16(s) +} + +// A Type represents an ICMP message type. +type Type interface { + Protocol() int +} + +// A Message represents an ICMP message. +type Message struct { + Type Type // type, either ipv4.ICMPType or ipv6.ICMPType + Code int // code + Checksum int // checksum + Body MessageBody // body +} + +// Marshal returns the binary encoding of the ICMP message m. +// +// For an ICMPv4 message, the returned message always contains the +// calculated checksum field. +// +// For an ICMPv6 message, the returned message contains the calculated +// checksum field when psh is not nil, otherwise the kernel will +// compute the checksum field during the message transmission. +// When psh is not nil, it must be the pseudo header for IPv6. +func (m *Message) Marshal(psh []byte) ([]byte, error) { + var mtype int + switch typ := m.Type.(type) { + case ipv4.ICMPType: + mtype = int(typ) + case ipv6.ICMPType: + mtype = int(typ) + default: + return nil, syscall.EINVAL + } + b := []byte{byte(mtype), byte(m.Code), 0, 0} + if m.Type.Protocol() == iana.ProtocolIPv6ICMP && psh != nil { + b = append(psh, b...) + } + if m.Body != nil && m.Body.Len(m.Type.Protocol()) != 0 { + mb, err := m.Body.Marshal(m.Type.Protocol()) + if err != nil { + return nil, err + } + b = append(b, mb...) + } + if m.Type.Protocol() == iana.ProtocolIPv6ICMP { + if psh == nil { // cannot calculate checksum here + return b, nil + } + off, l := 2*net.IPv6len, len(b)-len(psh) + binary.BigEndian.PutUint32(b[off:off+4], uint32(l)) + } + s := checksum(b) + // Place checksum back in header; using ^= avoids the + // assumption the checksum bytes are zero. + b[len(psh)+2] ^= byte(s) + b[len(psh)+3] ^= byte(s >> 8) + return b[len(psh):], nil +} + +var parseFns = map[Type]func(int, []byte) (MessageBody, error){ + ipv4.ICMPTypeDestinationUnreachable: parseDstUnreach, + ipv4.ICMPTypeTimeExceeded: parseTimeExceeded, + ipv4.ICMPTypeParameterProblem: parseParamProb, + + ipv4.ICMPTypeEcho: parseEcho, + ipv4.ICMPTypeEchoReply: parseEcho, + + ipv6.ICMPTypeDestinationUnreachable: parseDstUnreach, + ipv6.ICMPTypePacketTooBig: parsePacketTooBig, + ipv6.ICMPTypeTimeExceeded: parseTimeExceeded, + ipv6.ICMPTypeParameterProblem: parseParamProb, + + ipv6.ICMPTypeEchoRequest: parseEcho, + ipv6.ICMPTypeEchoReply: parseEcho, +} + +// ParseMessage parses b as an ICMP message. +// Proto must be either the ICMPv4 or ICMPv6 protocol number. +func ParseMessage(proto int, b []byte) (*Message, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + var err error + m := &Message{Code: int(b[1]), Checksum: int(binary.BigEndian.Uint16(b[2:4]))} + switch proto { + case iana.ProtocolICMP: + m.Type = ipv4.ICMPType(b[0]) + case iana.ProtocolIPv6ICMP: + m.Type = ipv6.ICMPType(b[0]) + default: + return nil, syscall.EINVAL + } + if fn, ok := parseFns[m.Type]; !ok { + m.Body, err = parseDefaultMessageBody(proto, b[4:]) + } else { + m.Body, err = fn(proto, b[4:]) + } + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/icmp/message_test.go b/vendor/golang.org/x/net/icmp/message_test.go new file mode 100644 index 00000000..5d2605f8 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/message_test.go @@ -0,0 +1,134 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "net" + "reflect" + "testing" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var marshalAndParseMessageForIPv4Tests = []icmp.Message{ + { + Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: 1, Seq: 2, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + { + Type: ipv4.ICMPTypePhoturis, + Body: &icmp.DefaultMessageBody{ + Data: []byte{0x80, 0x40, 0x20, 0x10}, + }, + }, +} + +func TestMarshalAndParseMessageForIPv4(t *testing.T) { + for i, tt := range marshalAndParseMessageForIPv4Tests { + b, err := tt.Marshal(nil) + if err != nil { + t.Fatal(err) + } + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + if !reflect.DeepEqual(m.Body, tt.Body) { + t.Errorf("#%v: got %v; want %v", i, m.Body, tt.Body) + } + } +} + +var marshalAndParseMessageForIPv6Tests = []icmp.Message{ + { + Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypePacketTooBig, Code: 0, + Body: &icmp.PacketTooBig{ + MTU: 1<<16 - 1, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: 1, Seq: 2, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + { + Type: ipv6.ICMPTypeDuplicateAddressConfirmation, + Body: &icmp.DefaultMessageBody{ + Data: []byte{0x80, 0x40, 0x20, 0x10}, + }, + }, +} + +func TestMarshalAndParseMessageForIPv6(t *testing.T) { + pshicmp := icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1")) + for i, tt := range marshalAndParseMessageForIPv6Tests { + for _, psh := range [][]byte{pshicmp, nil} { + b, err := tt.Marshal(psh) + if err != nil { + t.Fatal(err) + } + m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + if !reflect.DeepEqual(m.Body, tt.Body) { + t.Errorf("#%v: got %v; want %v", i, m.Body, tt.Body) + } + } + } +} diff --git a/vendor/golang.org/x/net/icmp/messagebody.go b/vendor/golang.org/x/net/icmp/messagebody.go new file mode 100644 index 00000000..2121a17b --- /dev/null +++ b/vendor/golang.org/x/net/icmp/messagebody.go @@ -0,0 +1,41 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A MessageBody represents an ICMP message body. +type MessageBody interface { + // Len returns the length of ICMP message body. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Len(proto int) int + + // Marshal returns the binary encoding of ICMP message body. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Marshal(proto int) ([]byte, error) +} + +// A DefaultMessageBody represents the default message body. +type DefaultMessageBody struct { + Data []byte // data +} + +// Len implements the Len method of MessageBody interface. +func (p *DefaultMessageBody) Len(proto int) int { + if p == nil { + return 0 + } + return len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *DefaultMessageBody) Marshal(proto int) ([]byte, error) { + return p.Data, nil +} + +// parseDefaultMessageBody parses b as an ICMP message body. +func parseDefaultMessageBody(proto int, b []byte) (MessageBody, error) { + p := &DefaultMessageBody{Data: make([]byte, len(b))} + copy(p.Data, b) + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/mpls.go b/vendor/golang.org/x/net/icmp/mpls.go new file mode 100644 index 00000000..c3149174 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/mpls.go @@ -0,0 +1,77 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// A MPLSLabel represents a MPLS label stack entry. +type MPLSLabel struct { + Label int // label value + TC int // traffic class; formerly experimental use + S bool // bottom of stack + TTL int // time to live +} + +const ( + classMPLSLabelStack = 1 + typeIncomingMPLSLabelStack = 1 +) + +// A MPLSLabelStack represents a MPLS label stack. +type MPLSLabelStack struct { + Class int // extension object class number + Type int // extension object sub-type + Labels []MPLSLabel +} + +// Len implements the Len method of Extension interface. +func (ls *MPLSLabelStack) Len(proto int) int { + return 4 + (4 * len(ls.Labels)) +} + +// Marshal implements the Marshal method of Extension interface. +func (ls *MPLSLabelStack) Marshal(proto int) ([]byte, error) { + b := make([]byte, ls.Len(proto)) + if err := ls.marshal(proto, b); err != nil { + return nil, err + } + return b, nil +} + +func (ls *MPLSLabelStack) marshal(proto int, b []byte) error { + l := ls.Len(proto) + binary.BigEndian.PutUint16(b[:2], uint16(l)) + b[2], b[3] = classMPLSLabelStack, typeIncomingMPLSLabelStack + off := 4 + for _, ll := range ls.Labels { + b[off], b[off+1], b[off+2] = byte(ll.Label>>12), byte(ll.Label>>4&0xff), byte(ll.Label<<4&0xf0) + b[off+2] |= byte(ll.TC << 1 & 0x0e) + if ll.S { + b[off+2] |= 0x1 + } + b[off+3] = byte(ll.TTL) + off += 4 + } + return nil +} + +func parseMPLSLabelStack(b []byte) (Extension, error) { + ls := &MPLSLabelStack{ + Class: int(b[2]), + Type: int(b[3]), + } + for b = b[4:]; len(b) >= 4; b = b[4:] { + ll := MPLSLabel{ + Label: int(b[0])<<12 | int(b[1])<<4 | int(b[2])>>4, + TC: int(b[2]&0x0e) >> 1, + TTL: int(b[3]), + } + if b[2]&0x1 != 0 { + ll.S = true + } + ls.Labels = append(ls.Labels, ll) + } + return ls, nil +} diff --git a/vendor/golang.org/x/net/icmp/multipart.go b/vendor/golang.org/x/net/icmp/multipart.go new file mode 100644 index 00000000..f2713566 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/multipart.go @@ -0,0 +1,109 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "golang.org/x/net/internal/iana" + +// multipartMessageBodyDataLen takes b as an original datagram and +// exts as extensions, and returns a required length for message body +// and a required length for a padded original datagram in wire +// format. +func multipartMessageBodyDataLen(proto int, b []byte, exts []Extension) (bodyLen, dataLen int) { + for _, ext := range exts { + bodyLen += ext.Len(proto) + } + if bodyLen > 0 { + dataLen = multipartMessageOrigDatagramLen(proto, b) + bodyLen += 4 // length of extension header + } else { + dataLen = len(b) + } + bodyLen += dataLen + return bodyLen, dataLen +} + +// multipartMessageOrigDatagramLen takes b as an original datagram, +// and returns a required length for a padded orignal datagram in wire +// format. +func multipartMessageOrigDatagramLen(proto int, b []byte) int { + roundup := func(b []byte, align int) int { + // According to RFC 4884, the padded original datagram + // field must contain at least 128 octets. + if len(b) < 128 { + return 128 + } + r := len(b) + return (r + align - 1) & ^(align - 1) + } + switch proto { + case iana.ProtocolICMP: + return roundup(b, 4) + case iana.ProtocolIPv6ICMP: + return roundup(b, 8) + default: + return len(b) + } +} + +// marshalMultipartMessageBody takes data as an original datagram and +// exts as extesnsions, and returns a binary encoding of message body. +// It can be used for non-multipart message bodies when exts is nil. +func marshalMultipartMessageBody(proto int, data []byte, exts []Extension) ([]byte, error) { + bodyLen, dataLen := multipartMessageBodyDataLen(proto, data, exts) + b := make([]byte, 4+bodyLen) + copy(b[4:], data) + off := dataLen + 4 + if len(exts) > 0 { + b[dataLen+4] = byte(extensionVersion << 4) + off += 4 // length of object header + for _, ext := range exts { + switch ext := ext.(type) { + case *MPLSLabelStack: + if err := ext.marshal(proto, b[off:]); err != nil { + return nil, err + } + off += ext.Len(proto) + case *InterfaceInfo: + attrs, l := ext.attrsAndLen(proto) + if err := ext.marshal(proto, b[off:], attrs, l); err != nil { + return nil, err + } + off += ext.Len(proto) + } + } + s := checksum(b[dataLen+4:]) + b[dataLen+4+2] ^= byte(s) + b[dataLen+4+3] ^= byte(s >> 8) + switch proto { + case iana.ProtocolICMP: + b[1] = byte(dataLen / 4) + case iana.ProtocolIPv6ICMP: + b[0] = byte(dataLen / 8) + } + } + return b, nil +} + +// parseMultipartMessageBody parses b as either a non-multipart +// message body or a multipart message body. +func parseMultipartMessageBody(proto int, b []byte) ([]byte, []Extension, error) { + var l int + switch proto { + case iana.ProtocolICMP: + l = 4 * int(b[1]) + case iana.ProtocolIPv6ICMP: + l = 8 * int(b[0]) + } + if len(b) == 4 { + return nil, nil, nil + } + exts, l, err := parseExtensions(b[4:], l) + if err != nil { + l = len(b) - 4 + } + data := make([]byte, l) + copy(data, b[4:]) + return data, exts, nil +} diff --git a/vendor/golang.org/x/net/icmp/multipart_test.go b/vendor/golang.org/x/net/icmp/multipart_test.go new file mode 100644 index 00000000..966ccb8d --- /dev/null +++ b/vendor/golang.org/x/net/icmp/multipart_test.go @@ -0,0 +1,442 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "fmt" + "net" + "reflect" + "testing" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var marshalAndParseMultipartMessageForIPv4Tests = []icmp.Message{ + { + Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + }, + }, + }, + { + Type: ipv4.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + }, + }, + }, + { + Type: ipv4.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x2f, + Interface: &net.Interface{ + Index: 16, + Name: "en102", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 2).To4(), + }, + }, + }, + }, + }, +} + +func TestMarshalAndParseMultipartMessageForIPv4(t *testing.T) { + for i, tt := range marshalAndParseMultipartMessageForIPv4Tests { + b, err := tt.Marshal(nil) + if err != nil { + t.Fatal(err) + } + if b[5] != 32 { + t.Errorf("#%v: got %v; want 32", i, b[5]) + } + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + switch m.Type { + case ipv4.ICMPTypeDestinationUnreachable: + got, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + case ipv4.ICMPTypeTimeExceeded: + got, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + case ipv4.ICMPTypeParameterProblem: + got, want := m.Body.(*icmp.ParamProb), tt.Body.(*icmp.ParamProb) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + } + } +} + +var marshalAndParseMultipartMessageForIPv6Tests = []icmp.Message{ + { + Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + }, + }, + }, + { + Type: ipv6.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x2f, + Interface: &net.Interface{ + Index: 16, + Name: "en102", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en102", + }, + }, + }, + }, + }, +} + +func TestMarshalAndParseMultipartMessageForIPv6(t *testing.T) { + pshicmp := icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1")) + for i, tt := range marshalAndParseMultipartMessageForIPv6Tests { + for _, psh := range [][]byte{pshicmp, nil} { + b, err := tt.Marshal(psh) + if err != nil { + t.Fatal(err) + } + if b[4] != 16 { + t.Errorf("#%v: got %v; want 16", i, b[4]) + } + m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + switch m.Type { + case ipv6.ICMPTypeDestinationUnreachable: + got, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + case ipv6.ICMPTypeTimeExceeded: + got, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + } + } + } +} + +func dumpExtensions(i int, gotExts, wantExts []icmp.Extension) string { + var s string + for j, got := range gotExts { + switch got := got.(type) { + case *icmp.MPLSLabelStack: + want := wantExts[j].(*icmp.MPLSLabelStack) + if !reflect.DeepEqual(got, want) { + s += fmt.Sprintf("#%v/%v: got %#v; want %#v\n", i, j, got, want) + } + case *icmp.InterfaceInfo: + want := wantExts[j].(*icmp.InterfaceInfo) + if !reflect.DeepEqual(got, want) { + s += fmt.Sprintf("#%v/%v: got %#v, %#v, %#v; want %#v, %#v, %#v\n", i, j, got, got.Interface, got.Addr, want, want.Interface, want.Addr) + } + } + } + return s[:len(s)-1] +} + +var multipartMessageBodyLenTests = []struct { + proto int + in icmp.MessageBody + out int +}{ + { + iana.ProtocolICMP, + &icmp.DstUnreach{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolICMP, + &icmp.TimeExceeded{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // [pointer, unused] and original datagram + }, + + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv4.HeaderLen), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload, original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, 128), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, 129), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 132, // [pointer, length, unused], extension header, object header, object payload and original datagram + }, + + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.PacketTooBig{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // mtu and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.TimeExceeded{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // pointer and original datagram + }, + + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 127), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 128), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 129), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 136, // [length, unused], extension header, object header, object payload and original datagram + }, +} + +func TestMultipartMessageBodyLen(t *testing.T) { + for i, tt := range multipartMessageBodyLenTests { + if out := tt.in.Len(tt.proto); out != tt.out { + t.Errorf("#%d: got %d; want %d", i, out, tt.out) + } + } +} diff --git a/vendor/golang.org/x/net/icmp/packettoobig.go b/vendor/golang.org/x/net/icmp/packettoobig.go new file mode 100644 index 00000000..a1c9df7b --- /dev/null +++ b/vendor/golang.org/x/net/icmp/packettoobig.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// A PacketTooBig represents an ICMP packet too big message body. +type PacketTooBig struct { + MTU int // maximum transmission unit of the nexthop link + Data []byte // data, known as original datagram field +} + +// Len implements the Len method of MessageBody interface. +func (p *PacketTooBig) Len(proto int) int { + if p == nil { + return 0 + } + return 4 + len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *PacketTooBig) Marshal(proto int) ([]byte, error) { + b := make([]byte, 4+len(p.Data)) + binary.BigEndian.PutUint32(b[:4], uint32(p.MTU)) + copy(b[4:], p.Data) + return b, nil +} + +// parsePacketTooBig parses b as an ICMP packet too big message body. +func parsePacketTooBig(proto int, b []byte) (MessageBody, error) { + bodyLen := len(b) + if bodyLen < 4 { + return nil, errMessageTooShort + } + p := &PacketTooBig{MTU: int(binary.BigEndian.Uint32(b[:4]))} + if bodyLen > 4 { + p.Data = make([]byte, bodyLen-4) + copy(p.Data, b[4:]) + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/paramprob.go b/vendor/golang.org/x/net/icmp/paramprob.go new file mode 100644 index 00000000..0a2548da --- /dev/null +++ b/vendor/golang.org/x/net/icmp/paramprob.go @@ -0,0 +1,63 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "golang.org/x/net/internal/iana" +) + +// A ParamProb represents an ICMP parameter problem message body. +type ParamProb struct { + Pointer uintptr // offset within the data where the error was detected + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *ParamProb) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *ParamProb) Marshal(proto int) ([]byte, error) { + if proto == iana.ProtocolIPv6ICMP { + b := make([]byte, p.Len(proto)) + binary.BigEndian.PutUint32(b[:4], uint32(p.Pointer)) + copy(b[4:], p.Data) + return b, nil + } + b, err := marshalMultipartMessageBody(proto, p.Data, p.Extensions) + if err != nil { + return nil, err + } + b[0] = byte(p.Pointer) + return b, nil +} + +// parseParamProb parses b as an ICMP parameter problem message body. +func parseParamProb(proto int, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &ParamProb{} + if proto == iana.ProtocolIPv6ICMP { + p.Pointer = uintptr(binary.BigEndian.Uint32(b[:4])) + p.Data = make([]byte, len(b)-4) + copy(p.Data, b[4:]) + return p, nil + } + p.Pointer = uintptr(b[0]) + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/ping_test.go b/vendor/golang.org/x/net/icmp/ping_test.go new file mode 100644 index 00000000..4ec26928 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ping_test.go @@ -0,0 +1,166 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "errors" + "fmt" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +func googleAddr(c *icmp.PacketConn, protocol int) (net.Addr, error) { + const host = "www.google.com" + ips, err := net.LookupIP(host) + if err != nil { + return nil, err + } + netaddr := func(ip net.IP) (net.Addr, error) { + switch c.LocalAddr().(type) { + case *net.UDPAddr: + return &net.UDPAddr{IP: ip}, nil + case *net.IPAddr: + return &net.IPAddr{IP: ip}, nil + default: + return nil, errors.New("neither UDPAddr nor IPAddr") + } + } + for _, ip := range ips { + switch protocol { + case iana.ProtocolICMP: + if ip.To4() != nil { + return netaddr(ip) + } + case iana.ProtocolIPv6ICMP: + if ip.To16() != nil && ip.To4() == nil { + return netaddr(ip) + } + } + } + return nil, errors.New("no A or AAAA record") +} + +type pingTest struct { + network, address string + protocol int + mtype icmp.Type +} + +var nonPrivilegedPingTests = []pingTest{ + {"udp4", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho}, + + {"udp6", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest}, +} + +func TestNonPrivilegedPing(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + switch runtime.GOOS { + case "darwin": + case "linux": + t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + + for i, tt := range nonPrivilegedPingTests { + if err := doPing(tt, i); err != nil { + t.Error(err) + } + } +} + +var privilegedPingTests = []pingTest{ + {"ip4:icmp", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho}, + + {"ip6:ipv6-icmp", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest}, +} + +func TestPrivilegedPing(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + for i, tt := range privilegedPingTests { + if err := doPing(tt, i); err != nil { + t.Error(err) + } + } +} + +func doPing(tt pingTest, seq int) error { + c, err := icmp.ListenPacket(tt.network, tt.address) + if err != nil { + return err + } + defer c.Close() + + dst, err := googleAddr(c, tt.protocol) + if err != nil { + return err + } + + if tt.network != "udp6" && tt.protocol == iana.ProtocolIPv6ICMP { + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeDestinationUnreachable) + f.Accept(ipv6.ICMPTypePacketTooBig) + f.Accept(ipv6.ICMPTypeTimeExceeded) + f.Accept(ipv6.ICMPTypeParameterProblem) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := c.IPv6PacketConn().SetICMPFilter(&f); err != nil { + return err + } + } + + wm := icmp.Message{ + Type: tt.mtype, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: 1 << uint(seq), + Data: []byte("HELLO-R-U-THERE"), + }, + } + wb, err := wm.Marshal(nil) + if err != nil { + return err + } + if n, err := c.WriteTo(wb, dst); err != nil { + return err + } else if n != len(wb) { + return fmt.Errorf("got %v; want %v", n, len(wb)) + } + + rb := make([]byte, 1500) + if err := c.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + return err + } + n, peer, err := c.ReadFrom(rb) + if err != nil { + return err + } + rm, err := icmp.ParseMessage(tt.protocol, rb[:n]) + if err != nil { + return err + } + switch rm.Type { + case ipv4.ICMPTypeEchoReply, ipv6.ICMPTypeEchoReply: + return nil + default: + return fmt.Errorf("got %+v from %v; want echo reply", rm, peer) + } +} diff --git a/vendor/golang.org/x/net/icmp/sys_freebsd.go b/vendor/golang.org/x/net/icmp/sys_freebsd.go new file mode 100644 index 00000000..c75f3dda --- /dev/null +++ b/vendor/golang.org/x/net/icmp/sys_freebsd.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "syscall" + +func init() { + freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") +} diff --git a/vendor/golang.org/x/net/icmp/timeexceeded.go b/vendor/golang.org/x/net/icmp/timeexceeded.go new file mode 100644 index 00000000..344e1584 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/timeexceeded.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A TimeExceeded represents an ICMP time exceeded message body. +type TimeExceeded struct { + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *TimeExceeded) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *TimeExceeded) Marshal(proto int) ([]byte, error) { + return marshalMultipartMessageBody(proto, p.Data, p.Extensions) +} + +// parseTimeExceeded parses b as an ICMP time exceeded message body. +func parseTimeExceeded(proto int, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &TimeExceeded{} + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go new file mode 100644 index 00000000..3daa8979 --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package idna implements IDNA2008 (Internationalized Domain Names for +// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and +// RFC 5894. +package idna // import "golang.org/x/net/idna" + +import ( + "strings" + "unicode/utf8" +) + +// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or +// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11 + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". +func ToASCII(s string) (string, error) { + if ascii(s) { + return s, nil + } + labels := strings.Split(s, ".") + for i, label := range labels { + if !ascii(label) { + a, err := encode(acePrefix, label) + if err != nil { + return "", err + } + labels[i] = a + } + } + return strings.Join(labels, "."), nil +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". +func ToUnicode(s string) (string, error) { + if !strings.Contains(s, acePrefix) { + return s, nil + } + labels := strings.Split(s, ".") + for i, label := range labels { + if strings.HasPrefix(label, acePrefix) { + u, err := decode(label[len(acePrefix):]) + if err != nil { + return "", err + } + labels[i] = u + } + } + return strings.Join(labels, "."), nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/idna_test.go b/vendor/golang.org/x/net/idna/idna_test.go new file mode 100644 index 00000000..b1bc6fa2 --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna_test.go @@ -0,0 +1,43 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +import ( + "testing" +) + +var idnaTestCases = [...]struct { + ascii, unicode string +}{ + // Labels. + {"books", "books"}, + {"xn--bcher-kva", "bücher"}, + + // Domains. + {"foo--xn--bar.org", "foo--xn--bar.org"}, + {"golang.org", "golang.org"}, + {"example.xn--p1ai", "example.рф"}, + {"xn--czrw28b.tw", "商業.tw"}, + {"www.xn--mller-kva.de", "www.müller.de"}, +} + +func TestIDNA(t *testing.T) { + for _, tc := range idnaTestCases { + if a, err := ToASCII(tc.unicode); err != nil { + t.Errorf("ToASCII(%q): %v", tc.unicode, err) + } else if a != tc.ascii { + t.Errorf("ToASCII(%q): got %q, want %q", tc.unicode, a, tc.ascii) + } + + if u, err := ToUnicode(tc.ascii); err != nil { + t.Errorf("ToUnicode(%q): %v", tc.ascii, err) + } else if u != tc.unicode { + t.Errorf("ToUnicode(%q): got %q, want %q", tc.ascii, u, tc.unicode) + } + } +} + +// TODO(nigeltao): test errors, once we've specified when ToASCII and ToUnicode +// return errors. diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go new file mode 100644 index 00000000..92e733f6 --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -0,0 +1,200 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "fmt" + "math" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +// decode decodes a string as specified in section 6.2. +func decode(encoded string) (string, error) { + if encoded == "" { + return "", nil + } + pos := 1 + strings.LastIndex(encoded, "-") + if pos == 1 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + if pos == len(encoded) { + return encoded[:len(encoded)-1], nil + } + output := make([]rune, 0, len(encoded)) + if pos != 0 { + for _, r := range encoded[:pos-1] { + output = append(output, r) + } + } + i, n, bias := int32(0), initialN, initialBias + for pos < len(encoded) { + oldI, w := i, int32(1) + for k := base; ; k += base { + if pos == len(encoded) { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + digit, ok := decodeDigit(encoded[pos]) + if !ok { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + pos++ + i += digit * w + if i < 0 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if digit < t { + break + } + w *= base - t + if w >= math.MaxInt32/base { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + } + x := int32(len(output) + 1) + bias = adapt(i-oldI, x, oldI == 0) + n += i / x + i %= x + if n > utf8.MaxRune || len(output) >= 1024 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + output = append(output, 0) + copy(output[i+1:], output[i:]) + output[i] = n + i++ + } + return string(output), nil +} + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", fmt.Errorf("idna: invalid label %q", s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", fmt.Errorf("idna: invalid label %q", s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func decodeDigit(x byte) (digit int32, ok bool) { + switch { + case '0' <= x && x <= '9': + return int32(x - ('0' - 26)), true + case 'A' <= x && x <= 'Z': + return int32(x - 'A'), true + case 'a' <= x && x <= 'z': + return int32(x - 'a'), true + } + return 0, false +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("idna: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} diff --git a/vendor/golang.org/x/net/idna/punycode_test.go b/vendor/golang.org/x/net/idna/punycode_test.go new file mode 100644 index 00000000..bfec81de --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode_test.go @@ -0,0 +1,198 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +import ( + "strings" + "testing" +) + +var punycodeTestCases = [...]struct { + s, encoded string +}{ + {"", ""}, + {"-", "--"}, + {"-a", "-a-"}, + {"-a-", "-a--"}, + {"a", "a-"}, + {"a-", "a--"}, + {"a-b", "a-b-"}, + {"books", "books-"}, + {"bücher", "bcher-kva"}, + {"Hello世界", "Hello-ck1hg65u"}, + {"ü", "tda"}, + {"üý", "tdac"}, + + // The test cases below come from RFC 3492 section 7.1 with Errata 3026. + { + // (A) Arabic (Egyptian). + "\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644" + + "\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F", + "egbpdaj6bu4bxfgehfvwxn", + }, + { + // (B) Chinese (simplified). + "\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587", + "ihqwcrb4cv8a8dqg056pqjye", + }, + { + // (C) Chinese (traditional). + "\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587", + "ihqwctvzc91f659drss3x8bo0yb", + }, + { + // (D) Czech. + "\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074" + + "\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D" + + "\u0065\u0073\u006B\u0079", + "Proprostnemluvesky-uyb24dma41a", + }, + { + // (E) Hebrew. + "\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8" + + "\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2" + + "\u05D1\u05E8\u05D9\u05EA", + "4dbcagdahymbxekheh6e0a7fei0b", + }, + { + // (F) Hindi (Devanagari). + "\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D" + + "\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939" + + "\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947" + + "\u0939\u0948\u0902", + "i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd", + }, + { + // (G) Japanese (kanji and hiragana). + "\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092" + + "\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B", + "n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa", + }, + { + // (H) Korean (Hangul syllables). + "\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774" + + "\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74" + + "\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C", + "989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j" + + "psd879ccm6fea98c", + }, + { + // (I) Russian (Cyrillic). + "\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E" + + "\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440" + + "\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A" + + "\u0438", + "b1abfaaepdrnnbgefbadotcwatmq2g4l", + }, + { + // (J) Spanish. + "\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070" + + "\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070" + + "\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061" + + "\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070" + + "\u0061\u00F1\u006F\u006C", + "PorqunopuedensimplementehablarenEspaol-fmd56a", + }, + { + // (K) Vietnamese. + "\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B" + + "\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068" + + "\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067" + + "\u0056\u0069\u1EC7\u0074", + "TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g", + }, + { + // (L) 3<nen>B<gumi><kinpachi><sensei>. + "\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F", + "3B-ww4c5e180e575a65lsy2b", + }, + { + // (M) <amuro><namie>-with-SUPER-MONKEYS. + "\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074" + + "\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D" + + "\u004F\u004E\u004B\u0045\u0059\u0053", + "-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n", + }, + { + // (N) Hello-Another-Way-<sorezore><no><basho>. + "\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F" + + "\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D" + + "\u305D\u308C\u305E\u308C\u306E\u5834\u6240", + "Hello-Another-Way--fc4qua05auwb3674vfr0b", + }, + { + // (O) <hitotsu><yane><no><shita>2. + "\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032", + "2-u9tlzr9756bt3uc0v", + }, + { + // (P) Maji<de>Koi<suru>5<byou><mae> + "\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059" + + "\u308B\u0035\u79D2\u524D", + "MajiKoi5-783gue6qz075azm5e", + }, + { + // (Q) <pafii>de<runba> + "\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", + "de-jg4avhby1noc0d", + }, + { + // (R) <sono><supiido><de> + "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067", + "d9juau41awczczp", + }, + { + // (S) -> $1.00 <- + "\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020" + + "\u003C\u002D", + "-> $1.00 <--", + }, +} + +func TestPunycode(t *testing.T) { + for _, tc := range punycodeTestCases { + if got, err := decode(tc.encoded); err != nil { + t.Errorf("decode(%q): %v", tc.encoded, err) + } else if got != tc.s { + t.Errorf("decode(%q): got %q, want %q", tc.encoded, got, tc.s) + } + + if got, err := encode("", tc.s); err != nil { + t.Errorf(`encode("", %q): %v`, tc.s, err) + } else if got != tc.encoded { + t.Errorf(`encode("", %q): got %q, want %q`, tc.s, got, tc.encoded) + } + } +} + +var punycodeErrorTestCases = [...]string{ + "decode -", // A sole '-' is invalid. + "decode foo\x00bar", // '\x00' is not in [0-9A-Za-z]. + "decode foo#bar", // '#' is not in [0-9A-Za-z]. + "decode foo\u00A3bar", // '\u00A3' is not in [0-9A-Za-z]. + "decode 9", // "9a" decodes to codepoint \u00A3; "9" is truncated. + "decode 99999a", // "99999a" decodes to codepoint \U0048A3C1, which is > \U0010FFFF. + "decode 9999999999a", // "9999999999a" overflows the int32 calculation. + + "encode " + strings.Repeat("x", 65536) + "\uff00", // int32 overflow. +} + +func TestPunycodeErrors(t *testing.T) { + for _, tc := range punycodeErrorTestCases { + var err error + switch { + case strings.HasPrefix(tc, "decode "): + _, err = decode(tc[7:]) + case strings.HasPrefix(tc, "encode "): + _, err = encode("", tc[7:]) + } + if err == nil { + if len(tc) > 256 { + tc = tc[:100] + "..." + tc[len(tc)-100:] + } + t.Errorf("no error for %s", tc) + } + } +} diff --git a/vendor/golang.org/x/net/internal/iana/const.go b/vendor/golang.org/x/net/internal/iana/const.go new file mode 100644 index 00000000..3438a27c --- /dev/null +++ b/vendor/golang.org/x/net/internal/iana/const.go @@ -0,0 +1,180 @@ +// go generate gen.go +// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA). +package iana // import "golang.org/x/net/internal/iana" + +// Differentiated Services Field Codepoints (DSCP), Updated: 2013-06-25 +const ( + DiffServCS0 = 0x0 // CS0 + DiffServCS1 = 0x20 // CS1 + DiffServCS2 = 0x40 // CS2 + DiffServCS3 = 0x60 // CS3 + DiffServCS4 = 0x80 // CS4 + DiffServCS5 = 0xa0 // CS5 + DiffServCS6 = 0xc0 // CS6 + DiffServCS7 = 0xe0 // CS7 + DiffServAF11 = 0x28 // AF11 + DiffServAF12 = 0x30 // AF12 + DiffServAF13 = 0x38 // AF13 + DiffServAF21 = 0x48 // AF21 + DiffServAF22 = 0x50 // AF22 + DiffServAF23 = 0x58 // AF23 + DiffServAF31 = 0x68 // AF31 + DiffServAF32 = 0x70 // AF32 + DiffServAF33 = 0x78 // AF33 + DiffServAF41 = 0x88 // AF41 + DiffServAF42 = 0x90 // AF42 + DiffServAF43 = 0x98 // AF43 + DiffServEFPHB = 0xb8 // EF PHB + DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT +) + +// IPv4 TOS Byte and IPv6 Traffic Class Octet, Updated: 2001-09-06 +const ( + NotECNTransport = 0x0 // Not-ECT (Not ECN-Capable Transport) + ECNTransport1 = 0x1 // ECT(1) (ECN-Capable Transport(1)) + ECNTransport0 = 0x2 // ECT(0) (ECN-Capable Transport(0)) + CongestionExperienced = 0x3 // CE (Congestion Experienced) +) + +// Protocol Numbers, Updated: 2015-10-06 +const ( + ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number + ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option + ProtocolICMP = 1 // Internet Control Message + ProtocolIGMP = 2 // Internet Group Management + ProtocolGGP = 3 // Gateway-to-Gateway + ProtocolIPv4 = 4 // IPv4 encapsulation + ProtocolST = 5 // Stream + ProtocolTCP = 6 // Transmission Control + ProtocolCBT = 7 // CBT + ProtocolEGP = 8 // Exterior Gateway Protocol + ProtocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP) + ProtocolBBNRCCMON = 10 // BBN RCC Monitoring + ProtocolNVPII = 11 // Network Voice Protocol + ProtocolPUP = 12 // PUP + ProtocolEMCON = 14 // EMCON + ProtocolXNET = 15 // Cross Net Debugger + ProtocolCHAOS = 16 // Chaos + ProtocolUDP = 17 // User Datagram + ProtocolMUX = 18 // Multiplexing + ProtocolDCNMEAS = 19 // DCN Measurement Subsystems + ProtocolHMP = 20 // Host Monitoring + ProtocolPRM = 21 // Packet Radio Measurement + ProtocolXNSIDP = 22 // XEROX NS IDP + ProtocolTRUNK1 = 23 // Trunk-1 + ProtocolTRUNK2 = 24 // Trunk-2 + ProtocolLEAF1 = 25 // Leaf-1 + ProtocolLEAF2 = 26 // Leaf-2 + ProtocolRDP = 27 // Reliable Data Protocol + ProtocolIRTP = 28 // Internet Reliable Transaction + ProtocolISOTP4 = 29 // ISO Transport Protocol Class 4 + ProtocolNETBLT = 30 // Bulk Data Transfer Protocol + ProtocolMFENSP = 31 // MFE Network Services Protocol + ProtocolMERITINP = 32 // MERIT Internodal Protocol + ProtocolDCCP = 33 // Datagram Congestion Control Protocol + Protocol3PC = 34 // Third Party Connect Protocol + ProtocolIDPR = 35 // Inter-Domain Policy Routing Protocol + ProtocolXTP = 36 // XTP + ProtocolDDP = 37 // Datagram Delivery Protocol + ProtocolIDPRCMTP = 38 // IDPR Control Message Transport Proto + ProtocolTPPP = 39 // TP++ Transport Protocol + ProtocolIL = 40 // IL Transport Protocol + ProtocolIPv6 = 41 // IPv6 encapsulation + ProtocolSDRP = 42 // Source Demand Routing Protocol + ProtocolIPv6Route = 43 // Routing Header for IPv6 + ProtocolIPv6Frag = 44 // Fragment Header for IPv6 + ProtocolIDRP = 45 // Inter-Domain Routing Protocol + ProtocolRSVP = 46 // Reservation Protocol + ProtocolGRE = 47 // Generic Routing Encapsulation + ProtocolDSR = 48 // Dynamic Source Routing Protocol + ProtocolBNA = 49 // BNA + ProtocolESP = 50 // Encap Security Payload + ProtocolAH = 51 // Authentication Header + ProtocolINLSP = 52 // Integrated Net Layer Security TUBA + ProtocolNARP = 54 // NBMA Address Resolution Protocol + ProtocolMOBILE = 55 // IP Mobility + ProtocolTLSP = 56 // Transport Layer Security Protocol using Kryptonet key management + ProtocolSKIP = 57 // SKIP + ProtocolIPv6ICMP = 58 // ICMP for IPv6 + ProtocolIPv6NoNxt = 59 // No Next Header for IPv6 + ProtocolIPv6Opts = 60 // Destination Options for IPv6 + ProtocolCFTP = 62 // CFTP + ProtocolSATEXPAK = 64 // SATNET and Backroom EXPAK + ProtocolKRYPTOLAN = 65 // Kryptolan + ProtocolRVD = 66 // MIT Remote Virtual Disk Protocol + ProtocolIPPC = 67 // Internet Pluribus Packet Core + ProtocolSATMON = 69 // SATNET Monitoring + ProtocolVISA = 70 // VISA Protocol + ProtocolIPCV = 71 // Internet Packet Core Utility + ProtocolCPNX = 72 // Computer Protocol Network Executive + ProtocolCPHB = 73 // Computer Protocol Heart Beat + ProtocolWSN = 74 // Wang Span Network + ProtocolPVP = 75 // Packet Video Protocol + ProtocolBRSATMON = 76 // Backroom SATNET Monitoring + ProtocolSUNND = 77 // SUN ND PROTOCOL-Temporary + ProtocolWBMON = 78 // WIDEBAND Monitoring + ProtocolWBEXPAK = 79 // WIDEBAND EXPAK + ProtocolISOIP = 80 // ISO Internet Protocol + ProtocolVMTP = 81 // VMTP + ProtocolSECUREVMTP = 82 // SECURE-VMTP + ProtocolVINES = 83 // VINES + ProtocolTTP = 84 // Transaction Transport Protocol + ProtocolIPTM = 84 // Internet Protocol Traffic Manager + ProtocolNSFNETIGP = 85 // NSFNET-IGP + ProtocolDGP = 86 // Dissimilar Gateway Protocol + ProtocolTCF = 87 // TCF + ProtocolEIGRP = 88 // EIGRP + ProtocolOSPFIGP = 89 // OSPFIGP + ProtocolSpriteRPC = 90 // Sprite RPC Protocol + ProtocolLARP = 91 // Locus Address Resolution Protocol + ProtocolMTP = 92 // Multicast Transport Protocol + ProtocolAX25 = 93 // AX.25 Frames + ProtocolIPIP = 94 // IP-within-IP Encapsulation Protocol + ProtocolSCCSP = 96 // Semaphore Communications Sec. Pro. + ProtocolETHERIP = 97 // Ethernet-within-IP Encapsulation + ProtocolENCAP = 98 // Encapsulation Header + ProtocolGMTP = 100 // GMTP + ProtocolIFMP = 101 // Ipsilon Flow Management Protocol + ProtocolPNNI = 102 // PNNI over IP + ProtocolPIM = 103 // Protocol Independent Multicast + ProtocolARIS = 104 // ARIS + ProtocolSCPS = 105 // SCPS + ProtocolQNX = 106 // QNX + ProtocolAN = 107 // Active Networks + ProtocolIPComp = 108 // IP Payload Compression Protocol + ProtocolSNP = 109 // Sitara Networks Protocol + ProtocolCompaqPeer = 110 // Compaq Peer Protocol + ProtocolIPXinIP = 111 // IPX in IP + ProtocolVRRP = 112 // Virtual Router Redundancy Protocol + ProtocolPGM = 113 // PGM Reliable Transport Protocol + ProtocolL2TP = 115 // Layer Two Tunneling Protocol + ProtocolDDX = 116 // D-II Data Exchange (DDX) + ProtocolIATP = 117 // Interactive Agent Transfer Protocol + ProtocolSTP = 118 // Schedule Transfer Protocol + ProtocolSRP = 119 // SpectraLink Radio Protocol + ProtocolUTI = 120 // UTI + ProtocolSMP = 121 // Simple Message Protocol + ProtocolPTP = 123 // Performance Transparency Protocol + ProtocolISIS = 124 // ISIS over IPv4 + ProtocolFIRE = 125 // FIRE + ProtocolCRTP = 126 // Combat Radio Transport Protocol + ProtocolCRUDP = 127 // Combat Radio User Datagram + ProtocolSSCOPMCE = 128 // SSCOPMCE + ProtocolIPLT = 129 // IPLT + ProtocolSPS = 130 // Secure Packet Shield + ProtocolPIPE = 131 // Private IP Encapsulation within IP + ProtocolSCTP = 132 // Stream Control Transmission Protocol + ProtocolFC = 133 // Fibre Channel + ProtocolRSVPE2EIGNORE = 134 // RSVP-E2E-IGNORE + ProtocolMobilityHeader = 135 // Mobility Header + ProtocolUDPLite = 136 // UDPLite + ProtocolMPLSinIP = 137 // MPLS-in-IP + ProtocolMANET = 138 // MANET Protocols + ProtocolHIP = 139 // Host Identity Protocol + ProtocolShim6 = 140 // Shim6 Protocol + ProtocolWESP = 141 // Wrapped Encapsulating Security Payload + ProtocolROHC = 142 // Robust Header Compression + ProtocolReserved = 255 // Reserved +) diff --git a/vendor/golang.org/x/net/internal/iana/gen.go b/vendor/golang.org/x/net/internal/iana/gen.go new file mode 100644 index 00000000..2d8c07ca --- /dev/null +++ b/vendor/golang.org/x/net/internal/iana/gen.go @@ -0,0 +1,293 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates internet protocol constants and tables by +// reading IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "strconv" + "strings" +) + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "http://www.iana.org/assignments/dscp-registry/dscp-registry.xml", + parseDSCPRegistry, + }, + { + "http://www.iana.org/assignments/ipv4-tos-byte/ipv4-tos-byte.xml", + parseTOSTCByte, + }, + { + "http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml", + parseProtocolNumbers, + }, +} + +func main() { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n") + fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url) + os.Exit(1) + } + if err := r.parse(&bb, resp.Body); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := ioutil.WriteFile("const.go", b, 0644); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func parseDSCPRegistry(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var dr dscpRegistry + if err := dec.Decode(&dr); err != nil { + return err + } + drs := dr.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated) + fmt.Fprintf(w, "const (\n") + for _, dr := range drs { + fmt.Fprintf(w, "DiffServ%s = %#x", dr.Name, dr.Value) + fmt.Fprintf(w, "// %s\n", dr.OrigName) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type dscpRegistry struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Note string `xml:"note"` + RegTitle string `xml:"registry>title"` + PoolRecords []struct { + Name string `xml:"name"` + Space string `xml:"space"` + } `xml:"registry>record"` + Records []struct { + Name string `xml:"name"` + Space string `xml:"space"` + } `xml:"registry>registry>record"` +} + +type canonDSCPRecord struct { + OrigName string + Name string + Value int +} + +func (drr *dscpRegistry) escape() []canonDSCPRecord { + drs := make([]canonDSCPRecord, len(drr.Records)) + sr := strings.NewReplacer( + "+", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, dr := range drr.Records { + s := strings.TrimSpace(dr.Name) + drs[i].OrigName = s + drs[i].Name = sr.Replace(s) + n, err := strconv.ParseUint(dr.Space, 2, 8) + if err != nil { + continue + } + drs[i].Value = int(n) << 2 + } + return drs +} + +func parseTOSTCByte(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var ttb tosTCByte + if err := dec.Decode(&ttb); err != nil { + return err + } + trs := ttb.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", ttb.Title, ttb.Updated) + fmt.Fprintf(w, "const (\n") + for _, tr := range trs { + fmt.Fprintf(w, "%s = %#x", tr.Keyword, tr.Value) + fmt.Fprintf(w, "// %s\n", tr.OrigKeyword) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type tosTCByte struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Note string `xml:"note"` + RegTitle string `xml:"registry>title"` + Records []struct { + Binary string `xml:"binary"` + Keyword string `xml:"keyword"` + } `xml:"registry>record"` +} + +type canonTOSTCByteRecord struct { + OrigKeyword string + Keyword string + Value int +} + +func (ttb *tosTCByte) escape() []canonTOSTCByteRecord { + trs := make([]canonTOSTCByteRecord, len(ttb.Records)) + sr := strings.NewReplacer( + "Capable", "", + "(", "", + ")", "", + "+", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, tr := range ttb.Records { + s := strings.TrimSpace(tr.Keyword) + trs[i].OrigKeyword = s + ss := strings.Split(s, " ") + if len(ss) > 1 { + trs[i].Keyword = strings.Join(ss[1:], " ") + } else { + trs[i].Keyword = ss[0] + } + trs[i].Keyword = sr.Replace(trs[i].Keyword) + n, err := strconv.ParseUint(tr.Binary, 2, 8) + if err != nil { + continue + } + trs[i].Value = int(n) + } + return trs +} + +func parseProtocolNumbers(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var pn protocolNumbers + if err := dec.Decode(&pn); err != nil { + return err + } + prs := pn.escape() + prs = append([]canonProtocolRecord{{ + Name: "IP", + Descr: "IPv4 encapsulation, pseudo protocol number", + Value: 0, + }}, prs...) + fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value) + s := pr.Descr + if s == "" { + s = pr.OrigName + } + fmt.Fprintf(w, "// %s\n", s) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type protocolNumbers struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + RegTitle string `xml:"registry>title"` + Note string `xml:"registry>note"` + Records []struct { + Value string `xml:"value"` + Name string `xml:"name"` + Descr string `xml:"description"` + } `xml:"registry>record"` +} + +type canonProtocolRecord struct { + OrigName string + Name string + Descr string + Value int +} + +func (pn *protocolNumbers) escape() []canonProtocolRecord { + prs := make([]canonProtocolRecord, len(pn.Records)) + sr := strings.NewReplacer( + "-in-", "in", + "-within-", "within", + "-over-", "over", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range pn.Records { + if strings.Contains(pr.Name, "Deprecated") || + strings.Contains(pr.Name, "deprecated") { + continue + } + prs[i].OrigName = pr.Name + s := strings.TrimSpace(pr.Name) + switch pr.Name { + case "ISIS over IPv4": + prs[i].Name = "ISIS" + case "manet": + prs[i].Name = "MANET" + default: + prs[i].Name = sr.Replace(s) + } + ss := strings.Split(pr.Descr, "\n") + for i := range ss { + ss[i] = strings.TrimSpace(ss[i]) + } + if len(ss) > 1 { + prs[i].Descr = strings.Join(ss, " ") + } else { + prs[i].Descr = ss[0] + } + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/internal/nettest/error_posix.go b/vendor/golang.org/x/net/internal/nettest/error_posix.go new file mode 100644 index 00000000..963ed996 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/error_posix.go @@ -0,0 +1,31 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package nettest + +import ( + "os" + "syscall" +) + +func protocolNotSupported(err error) bool { + switch err := err.(type) { + case syscall.Errno: + switch err { + case syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT: + return true + } + case *os.SyscallError: + switch err := err.Err.(type) { + case syscall.Errno: + switch err { + case syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT: + return true + } + } + } + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/error_stub.go b/vendor/golang.org/x/net/internal/nettest/error_stub.go new file mode 100644 index 00000000..3c74d812 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/error_stub.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package nettest + +func protocolNotSupported(err error) bool { + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/interface.go b/vendor/golang.org/x/net/internal/nettest/interface.go new file mode 100644 index 00000000..53ae13a9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/interface.go @@ -0,0 +1,94 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +import "net" + +// IsMulticastCapable reports whether ifi is an IP multicast-capable +// network interface. Network must be "ip", "ip4" or "ip6". +func IsMulticastCapable(network string, ifi *net.Interface) (net.IP, bool) { + switch network { + case "ip", "ip4", "ip6": + default: + return nil, false + } + if ifi == nil || ifi.Flags&net.FlagUp == 0 || ifi.Flags&net.FlagMulticast == 0 { + return nil, false + } + return hasRoutableIP(network, ifi) +} + +// RoutedInterface returns a network interface that can route IP +// traffic and satisfies flags. It returns nil when an appropriate +// network interface is not found. Network must be "ip", "ip4" or +// "ip6". +func RoutedInterface(network string, flags net.Flags) *net.Interface { + switch network { + case "ip", "ip4", "ip6": + default: + return nil + } + ift, err := net.Interfaces() + if err != nil { + return nil + } + for _, ifi := range ift { + if ifi.Flags&flags != flags { + continue + } + if _, ok := hasRoutableIP(network, &ifi); !ok { + continue + } + return &ifi + } + return nil +} + +func hasRoutableIP(network string, ifi *net.Interface) (net.IP, bool) { + ifat, err := ifi.Addrs() + if err != nil { + return nil, false + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := routableIP(network, ifa.IP); ip != nil { + return ip, true + } + case *net.IPNet: + if ip := routableIP(network, ifa.IP); ip != nil { + return ip, true + } + } + } + return nil, false +} + +func routableIP(network string, ip net.IP) net.IP { + if !ip.IsLoopback() && !ip.IsLinkLocalUnicast() && !ip.IsGlobalUnicast() { + return nil + } + switch network { + case "ip4": + if ip := ip.To4(); ip != nil { + return ip + } + case "ip6": + if ip.IsLoopback() { // addressing scope of the loopback address depends on each implementation + return nil + } + if ip := ip.To16(); ip != nil && ip.To4() == nil { + return ip + } + default: + if ip := ip.To4(); ip != nil { + return ip + } + if ip := ip.To16(); ip != nil { + return ip + } + } + return nil +} diff --git a/vendor/golang.org/x/net/internal/nettest/rlimit.go b/vendor/golang.org/x/net/internal/nettest/rlimit.go new file mode 100644 index 00000000..bb34aec0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/rlimit.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +const defaultMaxOpenFiles = 256 + +// MaxOpenFiles returns the maximum number of open files for the +// caller's process. +func MaxOpenFiles() int { return maxOpenFiles() } diff --git a/vendor/golang.org/x/net/internal/nettest/rlimit_stub.go b/vendor/golang.org/x/net/internal/nettest/rlimit_stub.go new file mode 100644 index 00000000..102bef93 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/rlimit_stub.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package nettest + +func maxOpenFiles() int { return defaultMaxOpenFiles } diff --git a/vendor/golang.org/x/net/internal/nettest/rlimit_unix.go b/vendor/golang.org/x/net/internal/nettest/rlimit_unix.go new file mode 100644 index 00000000..eb4312ce --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/rlimit_unix.go @@ -0,0 +1,17 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package nettest + +import "syscall" + +func maxOpenFiles() int { + var rlim syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim); err != nil { + return defaultMaxOpenFiles + } + return int(rlim.Cur) +} diff --git a/vendor/golang.org/x/net/internal/nettest/rlimit_windows.go b/vendor/golang.org/x/net/internal/nettest/rlimit_windows.go new file mode 100644 index 00000000..de927b56 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/rlimit_windows.go @@ -0,0 +1,7 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +func maxOpenFiles() int { return 4 * defaultMaxOpenFiles /* actually it's 16581375 */ } diff --git a/vendor/golang.org/x/net/internal/nettest/stack.go b/vendor/golang.org/x/net/internal/nettest/stack.go new file mode 100644 index 00000000..e07c015f --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/stack.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package nettest provides utilities for IP testing. +package nettest // import "golang.org/x/net/internal/nettest" + +import "net" + +// SupportsIPv4 reports whether the platform supports IPv4 networking +// functionality. +func SupportsIPv4() bool { + ln, err := net.Listen("tcp4", "127.0.0.1:0") + if err != nil { + return false + } + ln.Close() + return true +} + +// SupportsIPv6 reports whether the platform supports IPv6 networking +// functionality. +func SupportsIPv6() bool { + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + return false + } + ln.Close() + return true +} + +// ProtocolNotSupported reports whether err is a protocol not +// supported error. +func ProtocolNotSupported(err error) bool { + return protocolNotSupported(err) +} diff --git a/vendor/golang.org/x/net/internal/nettest/stack_stub.go b/vendor/golang.org/x/net/internal/nettest/stack_stub.go new file mode 100644 index 00000000..1b5fde1a --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/stack_stub.go @@ -0,0 +1,18 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package nettest + +import ( + "fmt" + "runtime" +) + +// SupportsRawIPSocket reports whether the platform supports raw IP +// sockets. +func SupportsRawIPSocket() (string, bool) { + return fmt.Sprintf("not supported on %s", runtime.GOOS), false +} diff --git a/vendor/golang.org/x/net/internal/nettest/stack_unix.go b/vendor/golang.org/x/net/internal/nettest/stack_unix.go new file mode 100644 index 00000000..af89229f --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/stack_unix.go @@ -0,0 +1,22 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package nettest + +import ( + "fmt" + "os" + "runtime" +) + +// SupportsRawIPSocket reports whether the platform supports raw IP +// sockets. +func SupportsRawIPSocket() (string, bool) { + if os.Getuid() != 0 { + return fmt.Sprintf("must be root on %s", runtime.GOOS), false + } + return "", true +} diff --git a/vendor/golang.org/x/net/internal/nettest/stack_windows.go b/vendor/golang.org/x/net/internal/nettest/stack_windows.go new file mode 100644 index 00000000..a21f4993 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/stack_windows.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +import ( + "fmt" + "runtime" + "syscall" +) + +// SupportsRawIPSocket reports whether the platform supports raw IP +// sockets. +func SupportsRawIPSocket() (string, bool) { + // From http://msdn.microsoft.com/en-us/library/windows/desktop/ms740548.aspx: + // Note: To use a socket of type SOCK_RAW requires administrative privileges. + // Users running Winsock applications that use raw sockets must be a member of + // the Administrators group on the local computer, otherwise raw socket calls + // will fail with an error code of WSAEACCES. On Windows Vista and later, access + // for raw sockets is enforced at socket creation. In earlier versions of Windows, + // access for raw sockets is enforced during other socket operations. + s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_RAW, 0) + if err == syscall.WSAEACCES { + return fmt.Sprintf("no access to raw socket allowed on %s", runtime.GOOS), false + } + if err != nil { + return err.Error(), false + } + syscall.Closesocket(s) + return "", true +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go index 3f90b730..1119f344 100644 --- a/vendor/golang.org/x/net/internal/timeseries/timeseries.go +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package timeseries implements a time series structure for stats collection. -package timeseries +package timeseries // import "golang.org/x/net/internal/timeseries" import ( "fmt" diff --git a/vendor/golang.org/x/net/ipv4/control.go b/vendor/golang.org/x/net/ipv4/control.go new file mode 100644 index 00000000..8cadfd7f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control.go @@ -0,0 +1,70 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "fmt" + "net" + "sync" +) + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +type ControlFlags uint + +const ( + FlagTTL ControlFlags = 1 << iota // pass the TTL on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet +) + +// A ControlMessage represents per packet basis IP-level socket options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn or RawConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn or RawConn allows to send the options + // to the protocol stack. + // + TTL int // time-to-live, receiving only + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "<nil>" + } + return fmt.Sprintf("ttl=%d src=%v dst=%v ifindex=%d", cm.TTL, cm.Src, cm.Dst, cm.IfIndex) +} + +// Ancillary data socket options +const ( + ctlTTL = iota // header field + ctlSrc // header field + ctlDst // header field + ctlInterface // inbound or outbound interface + ctlPacketInfo // inbound or outbound packet path + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv4/control_bsd.go b/vendor/golang.org/x/net/ipv4/control_bsd.go new file mode 100644 index 00000000..33d8bc8b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_bsd.go @@ -0,0 +1,40 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func marshalDst(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIP + m.Type = sysIP_RECVDSTADDR + m.SetLen(syscall.CmsgLen(net.IPv4len)) + return b[syscall.CmsgSpace(net.IPv4len):] +} + +func parseDst(cm *ControlMessage, b []byte) { + cm.Dst = b[:net.IPv4len] +} + +func marshalInterface(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIP + m.Type = sysIP_RECVIF + m.SetLen(syscall.CmsgLen(syscall.SizeofSockaddrDatalink)) + return b[syscall.CmsgSpace(syscall.SizeofSockaddrDatalink):] +} + +func parseInterface(cm *ControlMessage, b []byte) { + sadl := (*syscall.SockaddrDatalink)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(sadl.Index) +} diff --git a/vendor/golang.org/x/net/ipv4/control_pktinfo.go b/vendor/golang.org/x/net/ipv4/control_pktinfo.go new file mode 100644 index 00000000..444782f3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_pktinfo.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin linux + +package ipv4 + +import ( + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIP + m.Type = sysIP_PKTINFO + m.SetLen(syscall.CmsgLen(sysSizeofInetPktinfo)) + if cm != nil { + pi := (*sysInetPktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + if ip := cm.Src.To4(); ip != nil { + copy(pi.Spec_dst[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return b[syscall.CmsgSpace(sysSizeofInetPktinfo):] +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*sysInetPktinfo)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(pi.Ifindex) + cm.Dst = pi.Addr[:] +} diff --git a/vendor/golang.org/x/net/ipv4/control_stub.go b/vendor/golang.org/x/net/ipv4/control_stub.go new file mode 100644 index 00000000..4d850719 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_stub.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv4 + +func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { + return errOpNoSupport +} + +func newControlMessage(opt *rawOpt) []byte { + return nil +} + +func parseControlMessage(b []byte) (*ControlMessage, error) { + return nil, errOpNoSupport +} + +func marshalControlMessage(cm *ControlMessage) []byte { + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/control_unix.go b/vendor/golang.org/x/net/ipv4/control_unix.go new file mode 100644 index 00000000..3000c52e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_unix.go @@ -0,0 +1,164 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package ipv4 + +import ( + "os" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if cf&FlagTTL != 0 && sockOpts[ssoReceiveTTL].name > 0 { + if err := setInt(fd, &sockOpts[ssoReceiveTTL], boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTTL) + } else { + opt.clear(FlagTTL) + } + } + if sockOpts[ssoPacketInfo].name > 0 { + if cf&(FlagSrc|FlagDst|FlagInterface) != 0 { + if err := setInt(fd, &sockOpts[ssoPacketInfo], boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & (FlagSrc | FlagDst | FlagInterface)) + } else { + opt.clear(cf & (FlagSrc | FlagDst | FlagInterface)) + } + } + } else { + if cf&FlagDst != 0 && sockOpts[ssoReceiveDst].name > 0 { + if err := setInt(fd, &sockOpts[ssoReceiveDst], boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagDst) + } else { + opt.clear(FlagDst) + } + } + if cf&FlagInterface != 0 && sockOpts[ssoReceiveInterface].name > 0 { + if err := setInt(fd, &sockOpts[ssoReceiveInterface], boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagInterface) + } else { + opt.clear(FlagInterface) + } + } + } + return nil +} + +func newControlMessage(opt *rawOpt) (oob []byte) { + opt.RLock() + var l int + if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { + l += syscall.CmsgSpace(ctlOpts[ctlTTL].length) + } + if ctlOpts[ctlPacketInfo].name > 0 { + if opt.isset(FlagSrc | FlagDst | FlagInterface) { + l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length) + } + } else { + if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { + l += syscall.CmsgSpace(ctlOpts[ctlDst].length) + } + if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { + l += syscall.CmsgSpace(ctlOpts[ctlInterface].length) + } + } + if l > 0 { + oob = make([]byte, l) + b := oob + if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { + b = ctlOpts[ctlTTL].marshal(b, nil) + } + if ctlOpts[ctlPacketInfo].name > 0 { + if opt.isset(FlagSrc | FlagDst | FlagInterface) { + b = ctlOpts[ctlPacketInfo].marshal(b, nil) + } + } else { + if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { + b = ctlOpts[ctlDst].marshal(b, nil) + } + if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { + b = ctlOpts[ctlInterface].marshal(b, nil) + } + } + } + opt.RUnlock() + return +} + +func parseControlMessage(b []byte) (*ControlMessage, error) { + if len(b) == 0 { + return nil, nil + } + cmsgs, err := syscall.ParseSocketControlMessage(b) + if err != nil { + return nil, os.NewSyscallError("parse socket control message", err) + } + cm := &ControlMessage{} + for _, m := range cmsgs { + if m.Header.Level != iana.ProtocolIP { + continue + } + switch int(m.Header.Type) { + case ctlOpts[ctlTTL].name: + ctlOpts[ctlTTL].parse(cm, m.Data[:]) + case ctlOpts[ctlDst].name: + ctlOpts[ctlDst].parse(cm, m.Data[:]) + case ctlOpts[ctlInterface].name: + ctlOpts[ctlInterface].parse(cm, m.Data[:]) + case ctlOpts[ctlPacketInfo].name: + ctlOpts[ctlPacketInfo].parse(cm, m.Data[:]) + } + } + return cm, nil +} + +func marshalControlMessage(cm *ControlMessage) (oob []byte) { + if cm == nil { + return nil + } + var l int + pktinfo := false + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To4() != nil || cm.IfIndex > 0) { + pktinfo = true + l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length) + } + if l > 0 { + oob = make([]byte, l) + b := oob + if pktinfo { + b = ctlOpts[ctlPacketInfo].marshal(b, cm) + } + } + return +} + +func marshalTTL(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIP + m.Type = sysIP_RECVTTL + m.SetLen(syscall.CmsgLen(1)) + return b[syscall.CmsgSpace(1):] +} + +func parseTTL(cm *ControlMessage, b []byte) { + cm.TTL = int(*(*byte)(unsafe.Pointer(&b[:1][0]))) +} diff --git a/vendor/golang.org/x/net/ipv4/control_windows.go b/vendor/golang.org/x/net/ipv4/control_windows.go new file mode 100644 index 00000000..800f6377 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_windows.go @@ -0,0 +1,27 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "syscall" + +func setControlMessage(fd syscall.Handle, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return syscall.EWINDOWS +} + +func newControlMessage(opt *rawOpt) []byte { + // TODO(mikio): implement this + return nil +} + +func parseControlMessage(b []byte) (*ControlMessage, error) { + // TODO(mikio): implement this + return nil, syscall.EWINDOWS +} + +func marshalControlMessage(cm *ControlMessage) []byte { + // TODO(mikio): implement this + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/defs_darwin.go b/vendor/golang.org/x/net/ipv4/defs_darwin.go new file mode 100644 index 00000000..731d56a7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_darwin.go @@ -0,0 +1,77 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include <sys/socket.h> + +#include <netinet/in.h> +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_STRIPHDR = C.IP_STRIPHDR + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_BOUND_IF = C.IP_BOUND_IF + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_RECVPKTINFO = C.IP_RECVPKTINFO + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_MULTICAST_IFINDEX = C.IP_MULTICAST_IFINDEX + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sysSizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sysSizeofInetPktinfo = C.sizeof_struct_in_pktinfo + + sysSizeofIPMreq = C.sizeof_struct_ip_mreq + sysSizeofIPMreqn = C.sizeof_struct_ip_mreqn + sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sysSizeofGroupReq = C.sizeof_struct_group_req + sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sysSockaddrStorage C.struct_sockaddr_storage + +type sysSockaddrInet C.struct_sockaddr_in + +type sysInetPktinfo C.struct_in_pktinfo + +type sysIPMreq C.struct_ip_mreq + +type sysIPMreqn C.struct_ip_mreqn + +type sysIPMreqSource C.struct_ip_mreq_source + +type sysGroupReq C.struct_group_req + +type sysGroupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_dragonfly.go b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go new file mode 100644 index 00000000..08e3b855 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go @@ -0,0 +1,38 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include <netinet/in.h> +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sysSizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type sysIPMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_freebsd.go b/vendor/golang.org/x/net/ipv4/defs_freebsd.go new file mode 100644 index 00000000..f12ca327 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_freebsd.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include <sys/socket.h> + +#include <netinet/in.h> +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_SENDSRCADDR = C.IP_SENDSRCADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_ONESBCAST = C.IP_ONESBCAST + sysIP_BINDANY = C.IP_BINDANY + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_MINTTL = C.IP_MINTTL + sysIP_DONTFRAG = C.IP_DONTFRAG + sysIP_RECVTOS = C.IP_RECVTOS + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sysSizeofSockaddrInet = C.sizeof_struct_sockaddr_in + + sysSizeofIPMreq = C.sizeof_struct_ip_mreq + sysSizeofIPMreqn = C.sizeof_struct_ip_mreqn + sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sysSizeofGroupReq = C.sizeof_struct_group_req + sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sysSockaddrStorage C.struct_sockaddr_storage + +type sysSockaddrInet C.struct_sockaddr_in + +type sysIPMreq C.struct_ip_mreq + +type sysIPMreqn C.struct_ip_mreqn + +type sysIPMreqSource C.struct_ip_mreq_source + +type sysGroupReq C.struct_group_req + +type sysGroupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_linux.go b/vendor/golang.org/x/net/ipv4/defs_linux.go new file mode 100644 index 00000000..fdba148a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_linux.go @@ -0,0 +1,111 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include <time.h> + +#include <linux/errqueue.h> +#include <linux/icmp.h> +#include <linux/in.h> +*/ +import "C" + +const ( + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_ROUTER_ALERT = C.IP_ROUTER_ALERT + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_PKTOPTIONS = C.IP_PKTOPTIONS + sysIP_MTU_DISCOVER = C.IP_MTU_DISCOVER + sysIP_RECVERR = C.IP_RECVERR + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_RECVTOS = C.IP_RECVTOS + sysIP_MTU = C.IP_MTU + sysIP_FREEBIND = C.IP_FREEBIND + sysIP_TRANSPARENT = C.IP_TRANSPARENT + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_ORIGDSTADDR = C.IP_ORIGDSTADDR + sysIP_RECVORIGDSTADDR = C.IP_RECVORIGDSTADDR + sysIP_MINTTL = C.IP_MINTTL + sysIP_NODEFRAG = C.IP_NODEFRAG + sysIP_UNICAST_IF = C.IP_UNICAST_IF + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_MSFILTER = C.IP_MSFILTER + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_MSFILTER = C.MCAST_MSFILTER + sysIP_MULTICAST_ALL = C.IP_MULTICAST_ALL + + //sysIP_PMTUDISC_DONT = C.IP_PMTUDISC_DONT + //sysIP_PMTUDISC_WANT = C.IP_PMTUDISC_WANT + //sysIP_PMTUDISC_DO = C.IP_PMTUDISC_DO + //sysIP_PMTUDISC_PROBE = C.IP_PMTUDISC_PROBE + //sysIP_PMTUDISC_INTERFACE = C.IP_PMTUDISC_INTERFACE + //sysIP_PMTUDISC_OMIT = C.IP_PMTUDISC_OMIT + + sysICMP_FILTER = C.ICMP_FILTER + + sysSO_EE_ORIGIN_NONE = C.SO_EE_ORIGIN_NONE + sysSO_EE_ORIGIN_LOCAL = C.SO_EE_ORIGIN_LOCAL + sysSO_EE_ORIGIN_ICMP = C.SO_EE_ORIGIN_ICMP + sysSO_EE_ORIGIN_ICMP6 = C.SO_EE_ORIGIN_ICMP6 + sysSO_EE_ORIGIN_TXSTATUS = C.SO_EE_ORIGIN_TXSTATUS + sysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING + + sysSizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sysSizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sysSizeofInetPktinfo = C.sizeof_struct_in_pktinfo + sysSizeofSockExtendedErr = C.sizeof_struct_sock_extended_err + + sysSizeofIPMreq = C.sizeof_struct_ip_mreq + sysSizeofIPMreqn = C.sizeof_struct_ip_mreqn + sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sysSizeofGroupReq = C.sizeof_struct_group_req + sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sysSizeofICMPFilter = C.sizeof_struct_icmp_filter +) + +type sysKernelSockaddrStorage C.struct___kernel_sockaddr_storage + +type sysSockaddrInet C.struct_sockaddr_in + +type sysInetPktinfo C.struct_in_pktinfo + +type sysSockExtendedErr C.struct_sock_extended_err + +type sysIPMreq C.struct_ip_mreq + +type sysIPMreqn C.struct_ip_mreqn + +type sysIPMreqSource C.struct_ip_mreq_source + +type sysGroupReq C.struct_group_req + +type sysGroupSourceReq C.struct_group_source_req + +type sysICMPFilter C.struct_icmp_filter diff --git a/vendor/golang.org/x/net/ipv4/defs_netbsd.go b/vendor/golang.org/x/net/ipv4/defs_netbsd.go new file mode 100644 index 00000000..8642354f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_netbsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include <netinet/in.h> +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sysSizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type sysIPMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_openbsd.go b/vendor/golang.org/x/net/ipv4/defs_openbsd.go new file mode 100644 index 00000000..8642354f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_openbsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include <netinet/in.h> +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sysSizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type sysIPMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_solaris.go b/vendor/golang.org/x/net/ipv4/defs_solaris.go new file mode 100644 index 00000000..bb74afa4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_solaris.go @@ -0,0 +1,57 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include <netinet/in.h> +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVSLLA = C.IP_RECVSLLA + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_NEXTHOP = C.IP_NEXTHOP + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_RECVPKTINFO = C.IP_RECVPKTINFO + sysIP_DONTFRAG = C.IP_DONTFRAG + sysIP_BOUND_IF = C.IP_BOUND_IF + sysIP_UNSPEC_SRC = C.IP_UNSPEC_SRC + sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL + sysIP_DHCPINIT_IF = C.IP_DHCPINIT_IF + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + + sysSizeofInetPktinfo = C.sizeof_struct_in_pktinfo + + sysSizeofIPMreq = C.sizeof_struct_ip_mreq + sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source +) + +type sysInetPktinfo C.struct_in_pktinfo + +type sysIPMreq C.struct_ip_mreq + +type sysIPMreqSource C.struct_ip_mreq_source diff --git a/vendor/golang.org/x/net/ipv4/dgramopt_posix.go b/vendor/golang.org/x/net/ipv4/dgramopt_posix.go new file mode 100644 index 00000000..103c4f6d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/dgramopt_posix.go @@ -0,0 +1,251 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd windows + +package ipv4 + +import ( + "net" + "syscall" +) + +// MulticastTTL returns the time-to-live field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastTTL() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return 0, err + } + return getInt(fd, &sockOpts[ssoMulticastTTL]) +} + +// SetMulticastTTL sets the time-to-live field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastTTL(ttl int) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInt(fd, &sockOpts[ssoMulticastTTL], ttl) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return nil, err + } + return getInterface(fd, &sockOpts[ssoMulticastInterface]) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInterface(fd, &sockOpts[ssoMulticastInterface], ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return false, err + } + on, err := getInt(fd, &sockOpts[ssoMulticastLoopback]) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInt(fd, &sockOpts[ssoMulticastLoopback], boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return setGroup(fd, &sockOpts[ssoJoinGroup], ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return setGroup(fd, &sockOpts[ssoLeaveGroup], ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return setSourceGroup(fd, &sockOpts[ssoJoinSourceGroup], ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return setSourceGroup(fd, &sockOpts[ssoLeaveSourceGroup], ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return setSourceGroup(fd, &sockOpts[ssoBlockSourceGroup], ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return setSourceGroup(fd, &sockOpts[ssoUnblockSourceGroup], ifi, grp, src) +} + +// ICMPFilter returns an ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return nil, err + } + return getICMPFilter(fd, &sockOpts[ssoICMPFilter]) +} + +// SetICMPFilter deploys the ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setICMPFilter(fd, &sockOpts[ssoICMPFilter], f) +} diff --git a/vendor/golang.org/x/net/ipv4/dgramopt_stub.go b/vendor/golang.org/x/net/ipv4/dgramopt_stub.go new file mode 100644 index 00000000..b74df693 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/dgramopt_stub.go @@ -0,0 +1,106 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv4 + +import "net" + +// MulticastTTL returns the time-to-live field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastTTL() (int, error) { + return 0, errOpNoSupport +} + +// SetMulticastTTL sets the time-to-live field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastTTL(ttl int) error { + return errOpNoSupport +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + return nil, errOpNoSupport +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + return errOpNoSupport +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + return false, errOpNoSupport +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + return errOpNoSupport +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + return errOpNoSupport +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + return errOpNoSupport +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + return errOpNoSupport +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + return errOpNoSupport +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + return errOpNoSupport +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + return errOpNoSupport +} + +// ICMPFilter returns an ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +// SetICMPFilter deploys the ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/doc.go b/vendor/golang.org/x/net/ipv4/doc.go new file mode 100644 index 00000000..9a79badf --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/doc.go @@ -0,0 +1,242 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv4 implements IP-level socket options for the Internet +// Protocol version 4. +// +// The package provides IP-level socket options that allow +// manipulation of IPv4 facilities. +// +// The IPv4 protocol and basic host requirements for IPv4 are defined +// in RFC 791 and RFC 1122. +// Host extensions for multicasting and socket interface extensions +// for multicast source filters are defined in RFC 1112 and RFC 3678. +// IGMPv1, IGMPv2 and IGMPv3 are defined in RFC 1112, RFC 2236 and RFC +// 3376. +// Source-specific multicast is defined in RFC 4607. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv4 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, ipv4.Conn is used to set the type-of-service field on +// the IPv4 header for each packet. +// +// ln, err := net.Listen("tcp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv4.NewConn(c).SetTOS(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPconn which are created as network connections that use the +// IPv4 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.IPv4(224, 0, 0, 250) +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv4 and Ethernet. +// +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of ipv4.PacketConn is used to enable control +// message transmissons. +// +// if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, cm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if cm.Dst.IsMulticast() { +// if cm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTOS(0x0) +// p.SetTTL(16) +// if _, err := p.WriteTo(data, nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// for _, ifi := range []*net.Interface{en0, en1} { +// if err := p.SetMulticastInterface(ifi); err != nil { +// // error handling +// } +// p.SetMulticastTTL(2) +// if _, err := p.WriteTo(data, nil, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn or RawConn may join multiple +// multicast groups. For example, a UDP listener with port 1024 might +// join two different groups across over two different network +// interfaces by using: +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv4.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// p2 := ipv4.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn or RawConn on IGMPv3 supported +// platform is able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.IPv4(232, 7, 8, 9)} +// ssmsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)}) +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 254)} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on IGMPv3 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// IGMPv1 or IGMPv2 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv4 // import "golang.org/x/net/ipv4" diff --git a/vendor/golang.org/x/net/ipv4/endpoint.go b/vendor/golang.org/x/net/ipv4/endpoint.go new file mode 100644 index 00000000..bc45bf05 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/endpoint.go @@ -0,0 +1,187 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "time" +) + +// A Conn represents a network endpoint that uses the IPv4 transport. +// It is used to control basic IP-level socket options such as TOS and +// TTL. +type Conn struct { + genericOpt +} + +type genericOpt struct { + net.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + return &Conn{ + genericOpt: genericOpt{Conn: c}, + } +} + +// A PacketConn represents a packet network endpoint that uses the +// IPv4 transport. It is used to control several IP-level socket +// options including multicasting. It also provides datagram based +// network I/O methods specific to the IPv4 and higher layer protocols +// such as UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + net.PacketConn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.PacketConn != nil } + +// SetControlMessage sets the per packet IP-level socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + fd, err := c.payloadHandler.sysfd() + if err != nil { + return err + } + return setControlMessage(fd, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + p := &PacketConn{ + genericOpt: genericOpt{Conn: c.(net.Conn)}, + dgramOpt: dgramOpt{PacketConn: c}, + payloadHandler: payloadHandler{PacketConn: c}, + } + if _, ok := c.(*net.IPConn); ok && sockOpts[ssoStripHeader].name > 0 { + if fd, err := p.payloadHandler.sysfd(); err == nil { + setInt(fd, &sockOpts[ssoStripHeader], boolint(true)) + } + } + return p +} + +// A RawConn represents a packet network endpoint that uses the IPv4 +// transport. It is used to control several IP-level socket options +// including IPv4 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv4 and higher layer +// protocols that handle IPv4 datagram directly such as OSPF, GRE. +type RawConn struct { + genericOpt + dgramOpt + packetHandler +} + +// SetControlMessage sets the per packet IP-level socket options. +func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + fd, err := c.packetHandler.sysfd() + if err != nil { + return err + } + return setControlMessage(fd, &c.packetHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *RawConn) SetDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.c.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *RawConn) SetReadDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.c.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *RawConn) SetWriteDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.c.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *RawConn) Close() error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.c.Close() +} + +// NewRawConn returns a new RawConn using c as its underlying +// transport. +func NewRawConn(c net.PacketConn) (*RawConn, error) { + r := &RawConn{ + genericOpt: genericOpt{Conn: c.(net.Conn)}, + dgramOpt: dgramOpt{PacketConn: c}, + packetHandler: packetHandler{c: c.(*net.IPConn)}, + } + fd, err := r.packetHandler.sysfd() + if err != nil { + return nil, err + } + if err := setInt(fd, &sockOpts[ssoHeaderPrepend], boolint(true)); err != nil { + return nil, err + } + return r, nil +} diff --git a/vendor/golang.org/x/net/ipv4/example_test.go b/vendor/golang.org/x/net/ipv4/example_test.go new file mode 100644 index 00000000..4f5e2f31 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/example_test.go @@ -0,0 +1,224 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "fmt" + "log" + "net" + "os" + "runtime" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv4" +) + +func ExampleConn_markingTCP() { + ln, err := net.Listen("tcp", "0.0.0.0:1024") + if err != nil { + log.Fatal(err) + } + defer ln.Close() + + for { + c, err := ln.Accept() + if err != nil { + log.Fatal(err) + } + go func(c net.Conn) { + defer c.Close() + if c.RemoteAddr().(*net.TCPAddr).IP.To4() != nil { + p := ipv4.NewConn(c) + if err := p.SetTOS(0x28); err != nil { // DSCP AF11 + log.Fatal(err) + } + if err := p.SetTTL(128); err != nil { + log.Fatal(err) + } + } + if _, err := c.Write([]byte("HELLO-R-U-THERE-ACK")); err != nil { + log.Fatal(err) + } + }(c) + } +} + +func ExamplePacketConn_servingOneShotMulticastDNS() { + c, err := net.ListenPacket("udp4", "0.0.0.0:5353") // mDNS over UDP + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + mDNSLinkLocal := net.UDPAddr{IP: net.IPv4(224, 0, 0, 251)} + if err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &mDNSLinkLocal) + if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { + log.Fatal(err) + } + + b := make([]byte, 1500) + for { + _, cm, peer, err := p.ReadFrom(b) + if err != nil { + log.Fatal(err) + } + if !cm.Dst.IsMulticast() || !cm.Dst.Equal(mDNSLinkLocal.IP) { + continue + } + answers := []byte("FAKE-MDNS-ANSWERS") // fake mDNS answers, you need to implement this + if _, err := p.WriteTo(answers, nil, peer); err != nil { + log.Fatal(err) + } + } +} + +func ExamplePacketConn_tracingIPPacketRoute() { + // Tracing an IP packet route to www.google.com. + + const host = "www.google.com" + ips, err := net.LookupIP(host) + if err != nil { + log.Fatal(err) + } + var dst net.IPAddr + for _, ip := range ips { + if ip.To4() != nil { + dst.IP = ip + fmt.Printf("using %v for tracing an IP packet route to %s\n", dst.IP, host) + break + } + } + if dst.IP == nil { + log.Fatal("no A record found") + } + + c, err := net.ListenPacket("ip4:1", "0.0.0.0") // ICMP for IPv4 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + + if err := p.SetControlMessage(ipv4.FlagTTL|ipv4.FlagSrc|ipv4.FlagDst|ipv4.FlagInterface, true); err != nil { + log.Fatal(err) + } + wm := icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + } + + rb := make([]byte, 1500) + for i := 1; i <= 64; i++ { // up to 64 hops + wm.Body.(*icmp.Echo).Seq = i + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + if err := p.SetTTL(i); err != nil { + log.Fatal(err) + } + + // In the real world usually there are several + // multiple traffic-engineered paths for each hop. + // You may need to probe a few times to each hop. + begin := time.Now() + if _, err := p.WriteTo(wb, nil, &dst); err != nil { + log.Fatal(err) + } + if err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + log.Fatal(err) + } + n, cm, peer, err := p.ReadFrom(rb) + if err != nil { + if err, ok := err.(net.Error); ok && err.Timeout() { + fmt.Printf("%v\t*\n", i) + continue + } + log.Fatal(err) + } + rm, err := icmp.ParseMessage(1, rb[:n]) + if err != nil { + log.Fatal(err) + } + rtt := time.Since(begin) + + // In the real world you need to determine whether the + // received message is yours using ControlMessage.Src, + // ControlMessage.Dst, icmp.Echo.ID and icmp.Echo.Seq. + switch rm.Type { + case ipv4.ICMPTypeTimeExceeded: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, cm) + case ipv4.ICMPTypeEchoReply: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, cm) + return + default: + log.Printf("unknown ICMP message: %+v\n", rm) + } + } +} + +func ExampleRawConn_advertisingOSPFHello() { + c, err := net.ListenPacket("ip4:89", "0.0.0.0") // OSPF for IPv4 + if err != nil { + log.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + log.Fatal(err) + } + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + allSPFRouters := net.IPAddr{IP: net.IPv4(224, 0, 0, 5)} + if err := r.JoinGroup(en0, &allSPFRouters); err != nil { + log.Fatal(err) + } + defer r.LeaveGroup(en0, &allSPFRouters) + + hello := make([]byte, 24) // fake hello data, you need to implement this + ospf := make([]byte, 24) // fake ospf header, you need to implement this + ospf[0] = 2 // version 2 + ospf[1] = 1 // hello packet + ospf = append(ospf, hello...) + iph := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: 0xc0, // DSCP CS6 + TotalLen: ipv4.HeaderLen + len(ospf), + TTL: 1, + Protocol: 89, + Dst: allSPFRouters.IP.To4(), + } + + var cm *ipv4.ControlMessage + switch runtime.GOOS { + case "darwin", "linux": + cm = &ipv4.ControlMessage{IfIndex: en0.Index} + default: + if err := r.SetMulticastInterface(en0); err != nil { + log.Fatal(err) + } + } + if err := r.WriteTo(iph, ospf, cm); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/golang.org/x/net/ipv4/gen.go b/vendor/golang.org/x/net/ipv4/gen.go new file mode 100644 index 00000000..0bc7c155 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/gen.go @@ -0,0 +1,208 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates system adaptation constants and types, +// internet protocol constants and tables by reading template files +// and IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" +) + +func main() { + if err := genzsys(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := geniana(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func genzsys() error { + defs := "defs_" + runtime.GOOS + ".go" + f, err := os.Open(defs) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + f.Close() + cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) + b, err := cmd.Output() + if err != nil { + return err + } + // The ipv4 package still supports go1.2, and so we need to + // take care of additional platforms in go1.3 and above for + // working with go1.2. + switch { + case runtime.GOOS == "dragonfly" || runtime.GOOS == "solaris": + b = bytes.Replace(b, []byte("package ipv4\n"), []byte("// +build "+runtime.GOOS+"\n\npackage ipv4\n"), 1) + case runtime.GOOS == "linux" && (runtime.GOARCH == "arm64" || runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le"): + b = bytes.Replace(b, []byte("package ipv4\n"), []byte("// +build "+runtime.GOOS+","+runtime.GOARCH+"\n\npackage ipv4\n"), 1) + } + b, err = format.Source(b) + if err != nil { + return err + } + zsys := "zsys_" + runtime.GOOS + ".go" + switch runtime.GOOS { + case "freebsd", "linux": + zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" + } + if err := ioutil.WriteFile(zsys, b, 0644); err != nil { + return err + } + return nil +} + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml", + parseICMPv4Parameters, + }, +} + +func geniana() error { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "package ipv4\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) + } + if err := r.parse(&bb, resp.Body); err != nil { + return err + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + return err + } + if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { + return err + } + return nil +} + +func parseICMPv4Parameters(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var icp icmpv4Parameters + if err := dec.Decode(&icp); err != nil { + return err + } + prs := icp.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Descr == "" { + continue + } + fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Descr, pr.Value) + fmt.Fprintf(w, "// %s\n", pr.OrigDescr) + } + fmt.Fprintf(w, ")\n\n") + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") + for _, pr := range prs { + if pr.Descr == "" { + continue + } + fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigDescr)) + } + fmt.Fprintf(w, "}\n") + return nil +} + +type icmpv4Parameters struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Registries []struct { + Title string `xml:"title"` + Records []struct { + Value string `xml:"value"` + Descr string `xml:"description"` + } `xml:"record"` + } `xml:"registry"` +} + +type canonICMPv4ParamRecord struct { + OrigDescr string + Descr string + Value int +} + +func (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord { + id := -1 + for i, r := range icp.Registries { + if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { + id = i + break + } + } + if id < 0 { + return nil + } + prs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records)) + sr := strings.NewReplacer( + "Messages", "", + "Message", "", + "ICMP", "", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range icp.Registries[id].Records { + if strings.Contains(pr.Descr, "Reserved") || + strings.Contains(pr.Descr, "Unassigned") || + strings.Contains(pr.Descr, "Deprecated") || + strings.Contains(pr.Descr, "Experiment") || + strings.Contains(pr.Descr, "experiment") { + continue + } + ss := strings.Split(pr.Descr, "\n") + if len(ss) > 1 { + prs[i].Descr = strings.Join(ss, " ") + } else { + prs[i].Descr = ss[0] + } + s := strings.TrimSpace(prs[i].Descr) + prs[i].OrigDescr = s + prs[i].Descr = sr.Replace(s) + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/ipv4/genericopt_posix.go b/vendor/golang.org/x/net/ipv4/genericopt_posix.go new file mode 100644 index 00000000..fefa0be3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/genericopt_posix.go @@ -0,0 +1,59 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd windows + +package ipv4 + +import "syscall" + +// TOS returns the type-of-service field value for outgoing packets. +func (c *genericOpt) TOS() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return 0, err + } + return getInt(fd, &sockOpts[ssoTOS]) +} + +// SetTOS sets the type-of-service field value for future outgoing +// packets. +func (c *genericOpt) SetTOS(tos int) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInt(fd, &sockOpts[ssoTOS], tos) +} + +// TTL returns the time-to-live field value for outgoing packets. +func (c *genericOpt) TTL() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return 0, err + } + return getInt(fd, &sockOpts[ssoTTL]) +} + +// SetTTL sets the time-to-live field value for future outgoing +// packets. +func (c *genericOpt) SetTTL(ttl int) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInt(fd, &sockOpts[ssoTTL], ttl) +} diff --git a/vendor/golang.org/x/net/ipv4/genericopt_stub.go b/vendor/golang.org/x/net/ipv4/genericopt_stub.go new file mode 100644 index 00000000..1817badb --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/genericopt_stub.go @@ -0,0 +1,29 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv4 + +// TOS returns the type-of-service field value for outgoing packets. +func (c *genericOpt) TOS() (int, error) { + return 0, errOpNoSupport +} + +// SetTOS sets the type-of-service field value for future outgoing +// packets. +func (c *genericOpt) SetTOS(tos int) error { + return errOpNoSupport +} + +// TTL returns the time-to-live field value for outgoing packets. +func (c *genericOpt) TTL() (int, error) { + return 0, errOpNoSupport +} + +// SetTTL sets the time-to-live field value for future outgoing +// packets. +func (c *genericOpt) SetTTL(ttl int) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/header.go b/vendor/golang.org/x/net/ipv4/header.go new file mode 100644 index 00000000..363d9c21 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/header.go @@ -0,0 +1,132 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "encoding/binary" + "fmt" + "net" + "runtime" + "syscall" +) + +const ( + Version = 4 // protocol version + HeaderLen = 20 // header length without extension headers + maxHeaderLen = 60 // sensible default, revisit if later RFCs define new usage of version and header length fields +) + +type HeaderFlags int + +const ( + MoreFragments HeaderFlags = 1 << iota // more fragments flag + DontFragment // don't fragment flag +) + +// A Header represents an IPv4 header. +type Header struct { + Version int // protocol version + Len int // header length + TOS int // type-of-service + TotalLen int // packet total length + ID int // identification + Flags HeaderFlags // flags + FragOff int // fragment offset + TTL int // time-to-live + Protocol int // next protocol + Checksum int // checksum + Src net.IP // source address + Dst net.IP // destination address + Options []byte // options, extension headers +} + +func (h *Header) String() string { + if h == nil { + return "<nil>" + } + return fmt.Sprintf("ver=%d hdrlen=%d tos=%#x totallen=%d id=%#x flags=%#x fragoff=%#x ttl=%d proto=%d cksum=%#x src=%v dst=%v", h.Version, h.Len, h.TOS, h.TotalLen, h.ID, h.Flags, h.FragOff, h.TTL, h.Protocol, h.Checksum, h.Src, h.Dst) +} + +// Marshal returns the binary encoding of the IPv4 header h. +func (h *Header) Marshal() ([]byte, error) { + if h == nil { + return nil, syscall.EINVAL + } + if h.Len < HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := HeaderLen + len(h.Options) + b := make([]byte, hdrlen) + b[0] = byte(Version<<4 | (hdrlen >> 2 & 0x0f)) + b[1] = byte(h.TOS) + flagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13) + switch runtime.GOOS { + case "darwin", "dragonfly", "freebsd", "netbsd": + nativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + nativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + default: + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + binary.BigEndian.PutUint16(b[4:6], uint16(h.ID)) + b[8] = byte(h.TTL) + b[9] = byte(h.Protocol) + binary.BigEndian.PutUint16(b[10:12], uint16(h.Checksum)) + if ip := h.Src.To4(); ip != nil { + copy(b[12:16], ip[:net.IPv4len]) + } + if ip := h.Dst.To4(); ip != nil { + copy(b[16:20], ip[:net.IPv4len]) + } else { + return nil, errMissingAddress + } + if len(h.Options) > 0 { + copy(b[HeaderLen:], h.Options) + } + return b, nil +} + +// ParseHeader parses b as an IPv4 header. +func ParseHeader(b []byte) (*Header, error) { + if len(b) < HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + if hdrlen > len(b) { + return nil, errBufferTooShort + } + h := &Header{ + Version: int(b[0] >> 4), + Len: hdrlen, + TOS: int(b[1]), + ID: int(binary.BigEndian.Uint16(b[4:6])), + TTL: int(b[8]), + Protocol: int(b[9]), + Checksum: int(binary.BigEndian.Uint16(b[10:12])), + Src: net.IPv4(b[12], b[13], b[14], b[15]), + Dst: net.IPv4(b[16], b[17], b[18], b[19]), + } + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + h.TotalLen = int(nativeEndian.Uint16(b[2:4])) + hdrlen + h.FragOff = int(nativeEndian.Uint16(b[6:8])) + case "freebsd": + h.TotalLen = int(nativeEndian.Uint16(b[2:4])) + if freebsdVersion < 1000000 { + h.TotalLen += hdrlen + } + h.FragOff = int(nativeEndian.Uint16(b[6:8])) + default: + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + h.Flags = HeaderFlags(h.FragOff&0xe000) >> 13 + h.FragOff = h.FragOff & 0x1fff + if hdrlen-HeaderLen > 0 { + h.Options = make([]byte, hdrlen-HeaderLen) + copy(h.Options, b[HeaderLen:]) + } + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv4/header_test.go b/vendor/golang.org/x/net/ipv4/header_test.go new file mode 100644 index 00000000..ac89358c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/header_test.go @@ -0,0 +1,119 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "bytes" + "net" + "reflect" + "runtime" + "strings" + "testing" +) + +var ( + wireHeaderFromKernel = [HeaderLen]byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + } + wireHeaderToKernel = [HeaderLen]byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + } + wireHeaderFromTradBSDKernel = [HeaderLen]byte{ + 0x45, 0x01, 0xdb, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + } + wireHeaderFromFreeBSD10Kernel = [HeaderLen]byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + } + wireHeaderToTradBSDKernel = [HeaderLen]byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + } + // TODO(mikio): Add platform dependent wire header formats when + // we support new platforms. + + testHeader = &Header{ + Version: Version, + Len: HeaderLen, + TOS: 1, + TotalLen: 0xbeef, + ID: 0xcafe, + Flags: DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + } +) + +func TestMarshalHeader(t *testing.T) { + b, err := testHeader.Marshal() + if err != nil { + t.Fatal(err) + } + var wh []byte + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + wh = wireHeaderToTradBSDKernel[:] + case "freebsd": + if freebsdVersion < 1000000 { + wh = wireHeaderToTradBSDKernel[:] + } else { + wh = wireHeaderFromFreeBSD10Kernel[:] + } + default: + wh = wireHeaderToKernel[:] + } + if !bytes.Equal(b, wh) { + t.Fatalf("got %#v; want %#v", b, wh) + } +} + +func TestParseHeader(t *testing.T) { + var wh []byte + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + wh = wireHeaderFromTradBSDKernel[:] + case "freebsd": + if freebsdVersion < 1000000 { + wh = wireHeaderFromTradBSDKernel[:] + } else { + wh = wireHeaderFromFreeBSD10Kernel[:] + } + default: + wh = wireHeaderFromKernel[:] + } + h, err := ParseHeader(wh) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, testHeader) { + t.Fatalf("got %#v; want %#v", h, testHeader) + } + s := h.String() + if strings.Contains(s, ",") { + t.Fatalf("should be space-separated values: %s", s) + } +} diff --git a/vendor/golang.org/x/net/ipv4/helper.go b/vendor/golang.org/x/net/ipv4/helper.go new file mode 100644 index 00000000..acecfd0d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/helper.go @@ -0,0 +1,59 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "encoding/binary" + "errors" + "net" + "unsafe" +) + +var ( + errMissingAddress = errors.New("missing address") + errMissingHeader = errors.New("missing header") + errHeaderTooShort = errors.New("header too short") + errBufferTooShort = errors.New("buffer too short") + errInvalidConnType = errors.New("invalid conn type") + errOpNoSupport = errors.New("operation not supported") + errNoSuchInterface = errors.New("no such interface") + errNoSuchMulticastInterface = errors.New("no such multicast interface") + + // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. + freebsdVersion uint32 + + nativeEndian binary.ByteOrder +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = binary.LittleEndian + } else { + nativeEndian = binary.BigEndian + } +} + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP4(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/helper_stub.go b/vendor/golang.org/x/net/ipv4/helper_stub.go new file mode 100644 index 00000000..dc2120cf --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/helper_stub.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv4 + +func (c *genericOpt) sysfd() (int, error) { + return 0, errOpNoSupport +} + +func (c *dgramOpt) sysfd() (int, error) { + return 0, errOpNoSupport +} + +func (c *payloadHandler) sysfd() (int, error) { + return 0, errOpNoSupport +} + +func (c *packetHandler) sysfd() (int, error) { + return 0, errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/helper_unix.go b/vendor/golang.org/x/net/ipv4/helper_unix.go new file mode 100644 index 00000000..345ca7dc --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/helper_unix.go @@ -0,0 +1,50 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package ipv4 + +import ( + "net" + "reflect" +) + +func (c *genericOpt) sysfd() (int, error) { + switch p := c.Conn.(type) { + case *net.TCPConn, *net.UDPConn, *net.IPConn: + return sysfd(p) + } + return 0, errInvalidConnType +} + +func (c *dgramOpt) sysfd() (int, error) { + switch p := c.PacketConn.(type) { + case *net.UDPConn, *net.IPConn: + return sysfd(p.(net.Conn)) + } + return 0, errInvalidConnType +} + +func (c *payloadHandler) sysfd() (int, error) { + return sysfd(c.PacketConn.(net.Conn)) +} + +func (c *packetHandler) sysfd() (int, error) { + return sysfd(c.c) +} + +func sysfd(c net.Conn) (int, error) { + cv := reflect.ValueOf(c) + switch ce := cv.Elem(); ce.Kind() { + case reflect.Struct: + netfd := ce.FieldByName("conn").FieldByName("fd") + switch fe := netfd.Elem(); fe.Kind() { + case reflect.Struct: + fd := fe.FieldByName("sysfd") + return int(fd.Int()), nil + } + } + return 0, errInvalidConnType +} diff --git a/vendor/golang.org/x/net/ipv4/helper_windows.go b/vendor/golang.org/x/net/ipv4/helper_windows.go new file mode 100644 index 00000000..322b2a5e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/helper_windows.go @@ -0,0 +1,49 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "reflect" + "syscall" +) + +func (c *genericOpt) sysfd() (syscall.Handle, error) { + switch p := c.Conn.(type) { + case *net.TCPConn, *net.UDPConn, *net.IPConn: + return sysfd(p) + } + return syscall.InvalidHandle, errInvalidConnType +} + +func (c *dgramOpt) sysfd() (syscall.Handle, error) { + switch p := c.PacketConn.(type) { + case *net.UDPConn, *net.IPConn: + return sysfd(p.(net.Conn)) + } + return syscall.InvalidHandle, errInvalidConnType +} + +func (c *payloadHandler) sysfd() (syscall.Handle, error) { + return sysfd(c.PacketConn.(net.Conn)) +} + +func (c *packetHandler) sysfd() (syscall.Handle, error) { + return sysfd(c.c) +} + +func sysfd(c net.Conn) (syscall.Handle, error) { + cv := reflect.ValueOf(c) + switch ce := cv.Elem(); ce.Kind() { + case reflect.Struct: + netfd := ce.FieldByName("conn").FieldByName("fd") + switch fe := netfd.Elem(); fe.Kind() { + case reflect.Struct: + fd := fe.FieldByName("sysfd") + return syscall.Handle(fd.Uint()), nil + } + } + return syscall.InvalidHandle, errInvalidConnType +} diff --git a/vendor/golang.org/x/net/ipv4/iana.go b/vendor/golang.org/x/net/ipv4/iana.go new file mode 100644 index 00000000..be10c948 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/iana.go @@ -0,0 +1,34 @@ +// go generate gen.go +// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package ipv4 + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19 +const ( + ICMPTypeEchoReply ICMPType = 0 // Echo Reply + ICMPTypeDestinationUnreachable ICMPType = 3 // Destination Unreachable + ICMPTypeRedirect ICMPType = 5 // Redirect + ICMPTypeEcho ICMPType = 8 // Echo + ICMPTypeRouterAdvertisement ICMPType = 9 // Router Advertisement + ICMPTypeRouterSolicitation ICMPType = 10 // Router Solicitation + ICMPTypeTimeExceeded ICMPType = 11 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 12 // Parameter Problem + ICMPTypeTimestamp ICMPType = 13 // Timestamp + ICMPTypeTimestampReply ICMPType = 14 // Timestamp Reply + ICMPTypePhoturis ICMPType = 40 // Photuris +) + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19 +var icmpTypes = map[ICMPType]string{ + 0: "echo reply", + 3: "destination unreachable", + 5: "redirect", + 8: "echo", + 9: "router advertisement", + 10: "router solicitation", + 11: "time exceeded", + 12: "parameter problem", + 13: "timestamp", + 14: "timestamp reply", + 40: "photuris", +} diff --git a/vendor/golang.org/x/net/ipv4/icmp.go b/vendor/golang.org/x/net/ipv4/icmp.go new file mode 100644 index 00000000..dbd05cff --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/iana" + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "<nil>" + } + return s +} + +// Protocol returns the ICMPv4 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 2460 defines a reasonable role model and it works not +// only for IPv6 but IPv4. A node means a device that implements IP. +// A router means a node that forwards IP packets not explicitly +// addressed to itself, and a host means a node that is not a router. +type ICMPFilter struct { + sysICMPFilter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_linux.go b/vendor/golang.org/x/net/ipv4/icmp_linux.go new file mode 100644 index 00000000..c9122533 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_linux.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +func (f *sysICMPFilter) accept(typ ICMPType) { + f.Data &^= 1 << (uint32(typ) & 31) +} + +func (f *sysICMPFilter) block(typ ICMPType) { + f.Data |= 1 << (uint32(typ) & 31) +} + +func (f *sysICMPFilter) setAll(block bool) { + if block { + f.Data = 1<<32 - 1 + } else { + f.Data = 0 + } +} + +func (f *sysICMPFilter) willBlock(typ ICMPType) bool { + return f.Data&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_stub.go b/vendor/golang.org/x/net/ipv4/icmp_stub.go new file mode 100644 index 00000000..9ee9b6a3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_stub.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +const sysSizeofICMPFilter = 0x0 + +type sysICMPFilter struct { +} + +func (f *sysICMPFilter) accept(typ ICMPType) { +} + +func (f *sysICMPFilter) block(typ ICMPType) { +} + +func (f *sysICMPFilter) setAll(block bool) { +} + +func (f *sysICMPFilter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_test.go b/vendor/golang.org/x/net/ipv4/icmp_test.go new file mode 100644 index 00000000..3324b54d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_test.go @@ -0,0 +1,95 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var icmpStringTests = []struct { + in ipv4.ICMPType + out string +}{ + {ipv4.ICMPTypeDestinationUnreachable, "destination unreachable"}, + + {256, "<nil>"}, +} + +func TestICMPString(t *testing.T) { + for _, tt := range icmpStringTests { + s := tt.in.String() + if s != tt.out { + t.Errorf("got %s; want %s", s, tt.out) + } + } +} + +func TestICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "linux": + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + + var f ipv4.ICMPFilter + for _, toggle := range []bool{false, true} { + f.SetAll(toggle) + for _, typ := range []ipv4.ICMPType{ + ipv4.ICMPTypeDestinationUnreachable, + ipv4.ICMPTypeEchoReply, + ipv4.ICMPTypeTimeExceeded, + ipv4.ICMPTypeParameterProblem, + } { + f.Accept(typ) + if f.WillBlock(typ) { + t.Errorf("ipv4.ICMPFilter.Set(%v, false) failed", typ) + } + f.Block(typ) + if !f.WillBlock(typ) { + t.Errorf("ipv4.ICMPFilter.Set(%v, true) failed", typ) + } + } + } +} + +func TestSetICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "linux": + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip4:icmp", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + + var f ipv4.ICMPFilter + f.SetAll(true) + f.Accept(ipv4.ICMPTypeEcho) + f.Accept(ipv4.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + kf, err := p.ICMPFilter() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(kf, &f) { + t.Fatalf("got %#v; want %#v", kf, f) + } +} diff --git a/vendor/golang.org/x/net/ipv4/mocktransponder_test.go b/vendor/golang.org/x/net/ipv4/mocktransponder_test.go new file mode 100644 index 00000000..e55aaee9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/mocktransponder_test.go @@ -0,0 +1,21 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "testing" +) + +func acceptor(t *testing.T, ln net.Listener, done chan<- bool) { + defer func() { done <- true }() + + c, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + c.Close() +} diff --git a/vendor/golang.org/x/net/ipv4/multicast_test.go b/vendor/golang.org/x/net/ipv4/multicast_test.go new file mode 100644 index 00000000..d2bcf853 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicast_test.go @@ -0,0 +1,330 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var packetConnReadWriteMulticastUDPTests = []struct { + addr string + grp, src *net.UDPAddr +}{ + {"224.0.0.0:0", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {"232.0.1.0:0", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastUDPTests { + c, err := net.ListenPacket("udp4", tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + grp := *tt.grp + grp.Port = c.LocalAddr().(*net.UDPAddr).Port + p := ipv4.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, &grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, &grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + p.SetMulticastTTL(i + 1) + if n, err := p.WriteTo(wb, nil, &grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } + } +} + +var packetConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, tt.grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + p.SetMulticastTTL(i + 1) + if n, err := p.WriteTo(wb, nil, tt.grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n]) + if err != nil { + t.Fatal(err) + } + switch { + case m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1 + case m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0 + default: + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } + } +} + +var rawConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestRawConnReadWriteMulticastICMP(t *testing.T) { + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range rawConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + if tt.src == nil { + if err := r.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer r.LeaveGroup(ifi, tt.grp) + } else { + if err := r.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer r.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := r.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := r.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := r.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := r.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + wh := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: i + 1, + TotalLen: ipv4.HeaderLen + len(wb), + Protocol: 1, + Dst: tt.grp.IP, + } + if err := r.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := r.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + r.SetMulticastTTL(i + 1) + if err := r.WriteTo(wh, wb, nil); err != nil { + t.Fatal(err) + } + rb := make([]byte, ipv4.HeaderLen+128) + if rh, b, _, err := r.ReadFrom(rb); err != nil { + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + switch { + case (rh.Dst.IsLoopback() || rh.Dst.IsLinkLocalUnicast() || rh.Dst.IsGlobalUnicast()) && m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1 + case rh.Dst.IsMulticast() && m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0 + default: + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/multicastlistener_test.go b/vendor/golang.org/x/net/ipv4/multicastlistener_test.go new file mode 100644 index 00000000..e342bf1d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicastlistener_test.go @@ -0,0 +1,249 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var udpMultipleGroupListenerTests = []net.Addr{ + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, // see RFC 4727 + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}, + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)}, +} + +func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c, err := net.ListenPacket("udp4", "0.0.0.0:0") // wildcard address with no reusable port + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } +} + +func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c1, err := net.ListenPacket("udp4", "224.0.0.0:1024") // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c1.Close() + + c2, err := net.ListenPacket("udp4", "224.0.0.0:1024") // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + var ps [2]*ipv4.PacketConn + ps[0] = ipv4.NewPacketConn(c1) + ps[1] = ipv4.NewPacketConn(c2) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + for _, p := range ps { + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + for _, p := range ps { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } + } +} + +func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + type ml struct { + c *ipv4.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip4", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("udp4", ip.String()+":"+"1024") // unicast address with non-reusable port + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPSingleRawConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") // wildcard address + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + if err := r.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := r.LeaveGroup(ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPPerInterfaceSingleRawConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + type ml struct { + c *ipv4.RawConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip4", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("ip4:253", ip.String()) // unicast address + if err != nil { + t.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + if err := r.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{r, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go b/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go new file mode 100644 index 00000000..c76dbe4d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go @@ -0,0 +1,195 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var packetConnMulticastSocketOptionTests = []struct { + net, proto, addr string + grp, src net.Addr +}{ + {"udp4", "", "224.0.0.0:0", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, nil}, // see RFC 4727 + {"ip4", ":icmp", "0.0.0.0", &net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil}, // see RFC 4727 + + {"udp4", "", "232.0.0.0:0", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 249)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 + {"ip4", ":icmp", "0.0.0.0", &net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnMulticastSocketOptionTests { + if tt.net == "ip4" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + defer p.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, p, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src) + } + } +} + +var rawConnMulticastSocketOptionTests = []struct { + grp, src net.Addr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestRawConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range rawConnMulticastSocketOptionTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, r, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, r, ifi, tt.grp, tt.src) + } + } +} + +type testIPv4MulticastConn interface { + MulticastTTL() (int, error) + SetMulticastTTL(ttl int) error + MulticastLoopback() (bool, error) + SetMulticastLoopback(bool) error + JoinGroup(*net.Interface, net.Addr) error + LeaveGroup(*net.Interface, net.Addr) error + JoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + LeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + ExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + IncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error +} + +func testMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp net.Addr) { + const ttl = 255 + if err := c.SetMulticastTTL(ttl); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastTTL(); err != nil { + t.Error(err) + return + } else if v != ttl { + t.Errorf("got %v; want %v", v, ttl) + return + } + + for _, toggle := range []bool{true, false} { + if err := c.SetMulticastLoopback(toggle); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastLoopback(); err != nil { + t.Error(err) + return + } else if v != toggle { + t.Errorf("got %v; want %v", v, toggle) + return + } + } + + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} + +func testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp, src net.Addr) { + // MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + return + } + t.Error(err) + return + } + if err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} diff --git a/vendor/golang.org/x/net/ipv4/packet.go b/vendor/golang.org/x/net/ipv4/packet.go new file mode 100644 index 00000000..09864314 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet.go @@ -0,0 +1,97 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" +) + +// A packetHandler represents the IPv4 datagram handler. +type packetHandler struct { + c *net.IPConn + rawOpt +} + +func (c *packetHandler) ok() bool { return c != nil && c.c != nil } + +// ReadFrom reads an IPv4 datagram from the endpoint c, copying the +// datagram into b. It returns the received datagram as the IPv4 +// header h, the payload p and the control message cm. +func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + if !c.ok() { + return nil, nil, nil, syscall.EINVAL + } + oob := newControlMessage(&c.rawOpt) + n, oobn, _, src, err := c.c.ReadMsgIP(b, oob) + if err != nil { + return nil, nil, nil, err + } + var hs []byte + if hs, p, err = slicePacket(b[:n]); err != nil { + return nil, nil, nil, err + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, err + } + if cm, err = parseControlMessage(oob[:oobn]); err != nil { + return nil, nil, nil, err + } + if src != nil && cm != nil { + cm.Src = src.IP + } + return +} + +func slicePacket(b []byte) (h, p []byte, err error) { + if len(b) < HeaderLen { + return nil, nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + return b[:hdrlen], b[hdrlen:], nil +} + +// WriteTo writes an IPv4 datagram through the endpoint c, copying the +// datagram from the IPv4 header h and the payload p. The control +// message cm allows the datagram path and the outgoing interface to be +// specified. Currently only Darwin and Linux support this. The cm +// may be nil if control of the outgoing datagram is not required. +// +// The IPv4 header h must contain appropriate fields that include: +// +// Version = ipv4.Version +// Len = <must be specified> +// TOS = <must be specified> +// TotalLen = <must be specified> +// ID = platform sets an appropriate value if ID is zero +// FragOff = <must be specified> +// TTL = <must be specified> +// Protocol = <must be specified> +// Checksum = platform sets an appropriate value if Checksum is zero +// Src = platform sets an appropriate value if Src is nil +// Dst = <must be specified> +// Options = optional +func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error { + if !c.ok() { + return syscall.EINVAL + } + oob := marshalControlMessage(cm) + wh, err := h.Marshal() + if err != nil { + return err + } + dst := &net.IPAddr{} + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + wh = append(wh, p...) + _, _, err = c.c.WriteMsgIP(wh, oob, dst) + return err +} diff --git a/vendor/golang.org/x/net/ipv4/payload.go b/vendor/golang.org/x/net/ipv4/payload.go new file mode 100644 index 00000000..d7698cbd --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload.go @@ -0,0 +1,15 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "net" + +// A payloadHandler represents the IPv4 datagram payload handler. +type payloadHandler struct { + net.PacketConn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil } diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go new file mode 100644 index 00000000..d358fc3a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -0,0 +1,81 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!solaris,!windows + +package ipv4 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + oob := newControlMessage(&c.rawOpt) + var oobn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, oobn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + if sockOpts[ssoStripHeader].name > 0 { + if n, oobn, _, src, err = c.ReadMsgIP(b, oob); err != nil { + return 0, nil, nil, err + } + } else { + nb := make([]byte, maxHeaderLen+len(b)) + if n, oobn, _, src, err = c.ReadMsgIP(nb, oob); err != nil { + return 0, nil, nil, err + } + hdrlen := int(nb[0]&0x0f) << 2 + copy(b, nb[hdrlen:]) + n -= hdrlen + } + default: + return 0, nil, nil, errInvalidConnType + } + if cm, err = parseControlMessage(oob[:oobn]); err != nil { + return 0, nil, nil, err + } + if cm != nil { + cm.Src = netAddrToIP4(src) + } + return +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + oob := marshalControlMessage(cm) + if dst == nil { + return 0, errMissingAddress + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, errInvalidConnType + } + if err != nil { + return 0, err + } + return +} diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go new file mode 100644 index 00000000..d128c9c2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build plan9 solaris windows + +package ipv4 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv4/readwrite_test.go b/vendor/golang.org/x/net/ipv4/readwrite_test.go new file mode 100644 index 00000000..247d06c1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/readwrite_test.go @@ -0,0 +1,174 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func benchmarkUDPListener() (net.PacketConn, net.Addr, error) { + c, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + return nil, nil, err + } + dst, err := net.ResolveUDPAddr("udp4", c.LocalAddr().String()) + if err != nil { + c.Close() + return nil, nil, err + } + return c, dst, nil +} + +func BenchmarkReadWriteNetUDP(b *testing.B) { + c, dst, err := benchmarkUDPListener() + if err != nil { + b.Fatal(err) + } + defer c.Close() + + wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchmarkReadWriteNetUDP(b, c, wb, rb, dst) + } +} + +func benchmarkReadWriteNetUDP(b *testing.B, c net.PacketConn, wb, rb []byte, dst net.Addr) { + if _, err := c.WriteTo(wb, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(rb); err != nil { + b.Fatal(err) + } +} + +func BenchmarkReadWriteIPv4UDP(b *testing.B) { + c, dst, err := benchmarkUDPListener() + if err != nil { + b.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + defer p.Close() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + + wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchmarkReadWriteIPv4UDP(b, p, wb, rb, dst, ifi) + } +} + +func benchmarkReadWriteIPv4UDP(b *testing.B, p *ipv4.PacketConn, wb, rb []byte, dst net.Addr, ifi *net.Interface) { + cm := ipv4.ControlMessage{TTL: 1} + if ifi != nil { + cm.IfIndex = ifi.Index + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + b.Fatal(err) + } else if n != len(wb) { + b.Fatalf("got %v; want %v", n, len(wb)) + } + if _, _, _, err := p.ReadFrom(rb); err != nil { + b.Fatal(err) + } +} + +func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + defer p.Close() + + dst, err := net.ResolveUDPAddr("udp4", c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + rb := make([]byte, 128) + if n, cm, _, err := p.ReadFrom(rb); err != nil { + t.Error(err) + return + } else if !bytes.Equal(rb[:n], wb) { + t.Errorf("got %v; want %v", rb[:n], wb) + return + } else { + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + } + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Error(err) + return + } else if n != len(wb) { + t.Errorf("short write: %v", n) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt.go b/vendor/golang.org/x/net/ipv4/sockopt.go new file mode 100644 index 00000000..ace37d30 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt.go @@ -0,0 +1,46 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +// Sticky socket options +const ( + ssoTOS = iota // header field for unicast packet + ssoTTL // header field for unicast packet + ssoMulticastTTL // header field for multicast packet + ssoMulticastInterface // outbound interface for multicast packet + ssoMulticastLoopback // loopback for multicast packet + ssoReceiveTTL // header field on received packet + ssoReceiveDst // header field on received packet + ssoReceiveInterface // inbound interface on received packet + ssoPacketInfo // incbound or outbound packet path + ssoHeaderPrepend // ipv4 header prepend + ssoStripHeader // strip ipv4 header + ssoICMPFilter // icmp filter + ssoJoinGroup // any-source multicast + ssoLeaveGroup // any-source multicast + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoMax +) + +// Sticky socket option value types +const ( + ssoTypeByte = iota + 1 + ssoTypeInt + ssoTypeInterface + ssoTypeICMPFilter + ssoTypeIPMreq + ssoTypeIPMreqn + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + name int // option name, must be equal or greater than 1 + typ int // option value type, must be equal or greater than 1 +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go b/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go new file mode 100644 index 00000000..4a6aa78e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go @@ -0,0 +1,83 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd windows + +package ipv4 + +import "net" + +func setIPMreqInterface(mreq *sysIPMreq, ifi *net.Interface) error { + if ifi == nil { + return nil + } + ifat, err := ifi.Addrs() + if err != nil { + return err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + } + } + return errNoSuchInterface +} + +func netIP4ToInterface(ip net.IP) (*net.Interface, error) { + ift, err := net.Interfaces() + if err != nil { + return nil, err + } + for _, ifi := range ift { + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + case *net.IPNet: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + } + } + } + return nil, errNoSuchInterface +} + +func netInterfaceToIP4(ifi *net.Interface) (net.IP, error) { + if ifi == nil { + return net.IPv4zero.To4(), nil + } + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + } + } + return nil, errNoSuchInterface +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go new file mode 100644 index 00000000..45551528 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!windows + +package ipv4 + +import "net" + +func setsockoptIPMreq(fd, name int, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func getsockoptInterface(fd, name int) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func setsockoptInterface(fd, name int, ifi *net.Interface) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_asmreq_unix.go b/vendor/golang.org/x/net/ipv4/sockopt_asmreq_unix.go new file mode 100644 index 00000000..7b5c3290 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_asmreq_unix.go @@ -0,0 +1,46 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package ipv4 + +import ( + "net" + "os" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func setsockoptIPMreq(fd, name int, ifi *net.Interface, grp net.IP) error { + mreq := sysIPMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} + if err := setIPMreqInterface(&mreq, ifi); err != nil { + return err + } + return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&mreq), sysSizeofIPMreq)) +} + +func getsockoptInterface(fd, name int) (*net.Interface, error) { + var b [4]byte + l := uint32(4) + if err := getsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&b[0]), &l); err != nil { + return nil, os.NewSyscallError("getsockopt", err) + } + ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) + if err != nil { + return nil, err + } + return ifi, nil +} + +func setsockoptInterface(fd, name int, ifi *net.Interface) error { + ip, err := netInterfaceToIP4(ifi) + if err != nil { + return err + } + var b [4]byte + copy(b[:], ip) + return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&b[0]), uint32(4))) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_asmreq_windows.go b/vendor/golang.org/x/net/ipv4/sockopt_asmreq_windows.go new file mode 100644 index 00000000..431930df --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_asmreq_windows.go @@ -0,0 +1,45 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "os" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func setsockoptIPMreq(fd syscall.Handle, name int, ifi *net.Interface, grp net.IP) error { + mreq := sysIPMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} + if err := setIPMreqInterface(&mreq, ifi); err != nil { + return err + } + return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, iana.ProtocolIP, int32(name), (*byte)(unsafe.Pointer(&mreq)), int32(sysSizeofIPMreq))) +} + +func getsockoptInterface(fd syscall.Handle, name int) (*net.Interface, error) { + var b [4]byte + l := int32(4) + if err := syscall.Getsockopt(fd, iana.ProtocolIP, int32(name), (*byte)(unsafe.Pointer(&b[0])), &l); err != nil { + return nil, os.NewSyscallError("getsockopt", err) + } + ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) + if err != nil { + return nil, err + } + return ifi, nil +} + +func setsockoptInterface(fd syscall.Handle, name int, ifi *net.Interface) error { + ip, err := netInterfaceToIP4(ifi) + if err != nil { + return err + } + var b [4]byte + copy(b[:], ip) + return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, iana.ProtocolIP, int32(name), (*byte)(unsafe.Pointer(&b[0])), 4)) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go new file mode 100644 index 00000000..332f403e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go @@ -0,0 +1,17 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!windows + +package ipv4 + +import "net" + +func getsockoptIPMreqn(fd, name int) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func setsockoptIPMreqn(fd, name int, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go b/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go new file mode 100644 index 00000000..1f2b9a14 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux + +package ipv4 + +import ( + "net" + "os" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func getsockoptIPMreqn(fd, name int) (*net.Interface, error) { + var mreqn sysIPMreqn + l := uint32(sysSizeofIPMreqn) + if err := getsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&mreqn), &l); err != nil { + return nil, os.NewSyscallError("getsockopt", err) + } + if mreqn.Ifindex == 0 { + return nil, nil + } + ifi, err := net.InterfaceByIndex(int(mreqn.Ifindex)) + if err != nil { + return nil, err + } + return ifi, nil +} + +func setsockoptIPMreqn(fd, name int, ifi *net.Interface, grp net.IP) error { + var mreqn sysIPMreqn + if ifi != nil { + mreqn.Ifindex = int32(ifi.Index) + } + if grp != nil { + mreqn.Multiaddr = [4]byte{grp[0], grp[1], grp[2], grp[3]} + } + return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&mreqn), sysSizeofIPMreqn)) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go new file mode 100644 index 00000000..85465244 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go @@ -0,0 +1,17 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux + +package ipv4 + +import "net" + +func setsockoptGroupReq(fd, name int, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func setsockoptGroupSourceReq(fd, name int, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go b/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go new file mode 100644 index 00000000..0a672b6a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go @@ -0,0 +1,61 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux + +package ipv4 + +import ( + "net" + "os" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +var freebsd32o64 bool + +func setsockoptGroupReq(fd, name int, ifi *net.Interface, grp net.IP) error { + var gr sysGroupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var p unsafe.Pointer + var l uint32 + if freebsd32o64 { + var d [sysSizeofGroupReq + 4]byte + s := (*[sysSizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + p = unsafe.Pointer(&d[0]) + l = sysSizeofGroupReq + 4 + } else { + p = unsafe.Pointer(&gr) + l = sysSizeofGroupReq + } + return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, p, l)) +} + +func setsockoptGroupSourceReq(fd, name int, ifi *net.Interface, grp, src net.IP) error { + var gsr sysGroupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var p unsafe.Pointer + var l uint32 + if freebsd32o64 { + var d [sysSizeofGroupSourceReq + 4]byte + s := (*[sysSizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + p = unsafe.Pointer(&d[0]) + l = sysSizeofGroupSourceReq + 4 + } else { + p = unsafe.Pointer(&gsr) + l = sysSizeofGroupSourceReq + } + return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, p, l)) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_stub.go new file mode 100644 index 00000000..9d19f5df --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -0,0 +1,11 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv4 + +func setInt(fd int, opt *sockOpt, v int) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_unix.go b/vendor/golang.org/x/net/ipv4/sockopt_unix.go new file mode 100644 index 00000000..f7acc6b9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_unix.go @@ -0,0 +1,122 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package ipv4 + +import ( + "net" + "os" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func getInt(fd int, opt *sockOpt) (int, error) { + if opt.name < 1 || (opt.typ != ssoTypeByte && opt.typ != ssoTypeInt) { + return 0, errOpNoSupport + } + var i int32 + var b byte + p := unsafe.Pointer(&i) + l := uint32(4) + if opt.typ == ssoTypeByte { + p = unsafe.Pointer(&b) + l = 1 + } + if err := getsockopt(fd, iana.ProtocolIP, opt.name, p, &l); err != nil { + return 0, os.NewSyscallError("getsockopt", err) + } + if opt.typ == ssoTypeByte { + return int(b), nil + } + return int(i), nil +} + +func setInt(fd int, opt *sockOpt, v int) error { + if opt.name < 1 || (opt.typ != ssoTypeByte && opt.typ != ssoTypeInt) { + return errOpNoSupport + } + i := int32(v) + var b byte + p := unsafe.Pointer(&i) + l := uint32(4) + if opt.typ == ssoTypeByte { + b = byte(v) + p = unsafe.Pointer(&b) + l = 1 + } + return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, opt.name, p, l)) +} + +func getInterface(fd int, opt *sockOpt) (*net.Interface, error) { + if opt.name < 1 { + return nil, errOpNoSupport + } + switch opt.typ { + case ssoTypeInterface: + return getsockoptInterface(fd, opt.name) + case ssoTypeIPMreqn: + return getsockoptIPMreqn(fd, opt.name) + default: + return nil, errOpNoSupport + } +} + +func setInterface(fd int, opt *sockOpt, ifi *net.Interface) error { + if opt.name < 1 { + return errOpNoSupport + } + switch opt.typ { + case ssoTypeInterface: + return setsockoptInterface(fd, opt.name, ifi) + case ssoTypeIPMreqn: + return setsockoptIPMreqn(fd, opt.name, ifi, nil) + default: + return errOpNoSupport + } +} + +func getICMPFilter(fd int, opt *sockOpt) (*ICMPFilter, error) { + if opt.name < 1 || opt.typ != ssoTypeICMPFilter { + return nil, errOpNoSupport + } + var f ICMPFilter + l := uint32(sysSizeofICMPFilter) + if err := getsockopt(fd, iana.ProtocolReserved, opt.name, unsafe.Pointer(&f.sysICMPFilter), &l); err != nil { + return nil, os.NewSyscallError("getsockopt", err) + } + return &f, nil +} + +func setICMPFilter(fd int, opt *sockOpt, f *ICMPFilter) error { + if opt.name < 1 || opt.typ != ssoTypeICMPFilter { + return errOpNoSupport + } + return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolReserved, opt.name, unsafe.Pointer(&f.sysICMPFilter), sysSizeofICMPFilter)) +} + +func setGroup(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { + if opt.name < 1 { + return errOpNoSupport + } + switch opt.typ { + case ssoTypeIPMreq: + return setsockoptIPMreq(fd, opt.name, ifi, grp) + case ssoTypeIPMreqn: + return setsockoptIPMreqn(fd, opt.name, ifi, grp) + case ssoTypeGroupReq: + return setsockoptGroupReq(fd, opt.name, ifi, grp) + default: + return errOpNoSupport + } +} + +func setSourceGroup(fd int, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { + if opt.name < 1 || opt.typ != ssoTypeGroupSourceReq { + return errOpNoSupport + } + return setsockoptGroupSourceReq(fd, opt.name, ifi, grp, src) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_windows.go b/vendor/golang.org/x/net/ipv4/sockopt_windows.go new file mode 100644 index 00000000..c4c2441e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_windows.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "os" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func getInt(fd syscall.Handle, opt *sockOpt) (int, error) { + if opt.name < 1 || opt.typ != ssoTypeInt { + return 0, errOpNoSupport + } + var i int32 + l := int32(4) + if err := syscall.Getsockopt(fd, iana.ProtocolIP, int32(opt.name), (*byte)(unsafe.Pointer(&i)), &l); err != nil { + return 0, os.NewSyscallError("getsockopt", err) + } + return int(i), nil +} + +func setInt(fd syscall.Handle, opt *sockOpt, v int) error { + if opt.name < 1 || opt.typ != ssoTypeInt { + return errOpNoSupport + } + i := int32(v) + return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, iana.ProtocolIP, int32(opt.name), (*byte)(unsafe.Pointer(&i)), 4)) +} + +func getInterface(fd syscall.Handle, opt *sockOpt) (*net.Interface, error) { + if opt.name < 1 || opt.typ != ssoTypeInterface { + return nil, errOpNoSupport + } + return getsockoptInterface(fd, opt.name) +} + +func setInterface(fd syscall.Handle, opt *sockOpt, ifi *net.Interface) error { + if opt.name < 1 || opt.typ != ssoTypeInterface { + return errOpNoSupport + } + return setsockoptInterface(fd, opt.name, ifi) +} + +func getICMPFilter(fd syscall.Handle, opt *sockOpt) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func setICMPFilter(fd syscall.Handle, opt *sockOpt, f *ICMPFilter) error { + return errOpNoSupport +} + +func setGroup(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp net.IP) error { + if opt.name < 1 || opt.typ != ssoTypeIPMreq { + return errOpNoSupport + } + return setsockoptIPMreq(fd, opt.name, ifi, grp) +} + +func setSourceGroup(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { + // TODO(mikio): implement this + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bsd.go b/vendor/golang.org/x/net/ipv4/sys_bsd.go new file mode 100644 index 00000000..203033db --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bsd.go @@ -0,0 +1,34 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly netbsd + +package ipv4 + +import ( + "net" + "syscall" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoTOS: {sysIP_TOS, ssoTypeInt}, + ssoTTL: {sysIP_TTL, ssoTypeInt}, + ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte}, + ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, + ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, + ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt}, + ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt}, + ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, + ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq}, + ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_darwin.go b/vendor/golang.org/x/net/ipv4/sys_darwin.go new file mode 100644 index 00000000..b5f5bd51 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_darwin.go @@ -0,0 +1,96 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoTOS: {sysIP_TOS, ssoTypeInt}, + ssoTTL: {sysIP_TTL, ssoTypeInt}, + ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte}, + ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, + ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, + ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt}, + ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt}, + ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, + ssoStripHeader: {sysIP_STRIPHDR, ssoTypeInt}, + ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq}, + ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq}, + } +) + +func init() { + // Seems like kern.osreldate is veiled on latest OS X. We use + // kern.osrelease instead. + osver, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + var i int + for i = range osver { + if osver[i] == '.' { + break + } + } + // The IP_PKTINFO and protocol-independent multicast API were + // introduced in OS X 10.7 (Darwin 11.0.0). But it looks like + // those features require OS X 10.8 (Darwin 12.0.0) and above. + // See http://support.apple.com/kb/HT1633. + if i > 2 || i == 2 && osver[0] >= '1' && osver[1] >= '2' { + ctlOpts[ctlPacketInfo].name = sysIP_PKTINFO + ctlOpts[ctlPacketInfo].length = sysSizeofInetPktinfo + ctlOpts[ctlPacketInfo].marshal = marshalPacketInfo + ctlOpts[ctlPacketInfo].parse = parsePacketInfo + sockOpts[ssoPacketInfo].name = sysIP_RECVPKTINFO + sockOpts[ssoPacketInfo].typ = ssoTypeInt + sockOpts[ssoMulticastInterface].typ = ssoTypeIPMreqn + sockOpts[ssoJoinGroup].name = sysMCAST_JOIN_GROUP + sockOpts[ssoJoinGroup].typ = ssoTypeGroupReq + sockOpts[ssoLeaveGroup].name = sysMCAST_LEAVE_GROUP + sockOpts[ssoLeaveGroup].typ = ssoTypeGroupReq + sockOpts[ssoJoinSourceGroup].name = sysMCAST_JOIN_SOURCE_GROUP + sockOpts[ssoJoinSourceGroup].typ = ssoTypeGroupSourceReq + sockOpts[ssoLeaveSourceGroup].name = sysMCAST_LEAVE_SOURCE_GROUP + sockOpts[ssoLeaveSourceGroup].typ = ssoTypeGroupSourceReq + sockOpts[ssoBlockSourceGroup].name = sysMCAST_BLOCK_SOURCE + sockOpts[ssoBlockSourceGroup].typ = ssoTypeGroupSourceReq + sockOpts[ssoUnblockSourceGroup].name = sysMCAST_UNBLOCK_SOURCE + sockOpts[ssoUnblockSourceGroup].typ = ssoTypeGroupSourceReq + } +} + +func (pi *sysInetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *sysGroupReq) setGroup(grp net.IP) { + sa := (*sysSockaddrInet)(unsafe.Pointer(&gr.Pad_cgo_0[0])) + sa.Len = sysSizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sysSockaddrInet)(unsafe.Pointer(&gsr.Pad_cgo_0[0])) + sa.Len = sysSizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sysSockaddrInet)(unsafe.Pointer(&gsr.Pad_cgo_1[0])) + sa.Len = sysSizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_freebsd.go b/vendor/golang.org/x/net/ipv4/sys_freebsd.go new file mode 100644 index 00000000..163ff9a7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_freebsd.go @@ -0,0 +1,73 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoTOS: {sysIP_TOS, ssoTypeInt}, + ssoTTL: {sysIP_TTL, ssoTypeInt}, + ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte}, + ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, + ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, + ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt}, + ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt}, + ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, + ssoJoinGroup: {sysMCAST_JOIN_GROUP, ssoTypeGroupReq}, + ssoLeaveGroup: {sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}, + ssoJoinSourceGroup: {sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}, + } +) + +func init() { + freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") + if freebsdVersion >= 1000000 { + sockOpts[ssoMulticastInterface].typ = ssoTypeIPMreqn + } + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + freebsd32o64 = true + break + } + } + } +} + +func (gr *sysGroupReq) setGroup(grp net.IP) { + sa := (*sysSockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Len = sysSizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sysSockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Len = sysSizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sysSockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Len = sysSizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_linux.go b/vendor/golang.org/x/net/ipv4/sys_linux.go new file mode 100644 index 00000000..73e0d462 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_linux.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_TTL, 1, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sysSizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoTOS: {sysIP_TOS, ssoTypeInt}, + ssoTTL: {sysIP_TTL, ssoTypeInt}, + ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeInt}, + ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeIPMreqn}, + ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, + ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, + ssoPacketInfo: {sysIP_PKTINFO, ssoTypeInt}, + ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, + ssoICMPFilter: {sysICMP_FILTER, ssoTypeICMPFilter}, + ssoJoinGroup: {sysMCAST_JOIN_GROUP, ssoTypeGroupReq}, + ssoLeaveGroup: {sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}, + ssoJoinSourceGroup: {sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}, + } +) + +func (pi *sysInetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (gr *sysGroupReq) setGroup(grp net.IP) { + sa := (*sysSockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sysSockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sysSockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_openbsd.go b/vendor/golang.org/x/net/ipv4/sys_openbsd.go new file mode 100644 index 00000000..d78083a2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_openbsd.go @@ -0,0 +1,32 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoTOS: {sysIP_TOS, ssoTypeInt}, + ssoTTL: {sysIP_TTL, ssoTypeInt}, + ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte}, + ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeByte}, + ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, + ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt}, + ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt}, + ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, + ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq}, + ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_stub.go b/vendor/golang.org/x/net/ipv4/sys_stub.go new file mode 100644 index 00000000..c8e55cbc --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv4 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = [ssoMax]sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv4/sys_windows.go b/vendor/golang.org/x/net/ipv4/sys_windows.go new file mode 100644 index 00000000..466489fe --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_windows.go @@ -0,0 +1,61 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +const ( + // See ws2tcpip.h. + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_DONTFRAGMENT = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0xf + sysIP_DROP_SOURCE_MEMBERSHIP = 0x10 + sysIP_PKTINFO = 0x13 + + sysSizeofInetPktinfo = 0x8 + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqSource = 0xc +) + +type sysInetPktinfo struct { + Addr [4]byte + Ifindex int32 +} + +type sysIPMreq struct { + Multiaddr [4]byte + Interface [4]byte +} + +type sysIPMreqSource struct { + Multiaddr [4]byte + Sourceaddr [4]byte + Interface [4]byte +} + +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms738586(v=vs.85).aspx +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = [ssoMax]sockOpt{ + ssoTOS: {sysIP_TOS, ssoTypeInt}, + ssoTTL: {sysIP_TTL, ssoTypeInt}, + ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeInt}, + ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, + ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq}, + ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq}, + } +) + +func (pi *sysInetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} diff --git a/vendor/golang.org/x/net/ipv4/syscall_linux_386.go b/vendor/golang.org/x/net/ipv4/syscall_linux_386.go new file mode 100644 index 00000000..07a3a282 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/syscall_linux_386.go @@ -0,0 +1,31 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "syscall" + "unsafe" +) + +const ( + sysGETSOCKOPT = 0xf + sysSETSOCKOPT = 0xe +) + +func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (int, syscall.Errno) + +func getsockopt(fd, level, name int, v unsafe.Pointer, l *uint32) error { + if _, errno := socketcall(sysGETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { + return error(errno) + } + return nil +} + +func setsockopt(fd, level, name int, v unsafe.Pointer, l uint32) error { + if _, errno := socketcall(sysSETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { + return error(errno) + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/syscall_unix.go b/vendor/golang.org/x/net/ipv4/syscall_unix.go new file mode 100644 index 00000000..88a41b0c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/syscall_unix.go @@ -0,0 +1,26 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!386 netbsd openbsd + +package ipv4 + +import ( + "syscall" + "unsafe" +) + +func getsockopt(fd, level, name int, v unsafe.Pointer, l *uint32) error { + if _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { + return error(errno) + } + return nil +} + +func setsockopt(fd, level, name int, v unsafe.Pointer, l uint32) error { + if _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { + return error(errno) + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/thunk_linux_386.s b/vendor/golang.org/x/net/ipv4/thunk_linux_386.s new file mode 100644 index 00000000..daa78bc0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/thunk_linux_386.s @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.2 + +TEXT ·socketcall(SB),4,$0-36 + JMP syscall·socketcall(SB) diff --git a/vendor/golang.org/x/net/ipv4/unicast_test.go b/vendor/golang.org/x/net/ipv4/unicast_test.go new file mode 100644 index 00000000..9c632cd8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/unicast_test.go @@ -0,0 +1,246 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func TestPacketConnReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + dst, err := net.ResolveUDPAddr("udp4", c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + p := ipv4.NewPacketConn(c) + defer p.Close() + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + p.SetTTL(i + 1) + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, nil, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } +} + +func TestPacketConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + dst, err := net.ResolveIPAddr("ip4", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + p := ipv4.NewPacketConn(c) + defer p.Close() + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + p.SetTTL(i + 1) + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, nil, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + loop: + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n]) + if err != nil { + t.Fatal(err) + } + if runtime.GOOS == "linux" && m.Type == ipv4.ICMPTypeEcho { + // On Linux we must handle own sent packets. + goto loop + } + if m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } +} + +func TestRawConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + dst, err := net.ResolveIPAddr("ip4", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + wh := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: i + 1, + TotalLen: ipv4.HeaderLen + len(wb), + TTL: i + 1, + Protocol: 1, + Dst: dst.IP, + } + if err := r.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := r.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if err := r.WriteTo(wh, wb, nil); err != nil { + t.Fatal(err) + } + rb := make([]byte, ipv4.HeaderLen+128) + loop: + if err := r.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if _, b, _, err := r.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + if runtime.GOOS == "linux" && m.Type == ipv4.ICMPTypeEcho { + // On Linux we must handle own sent packets. + goto loop + } + if m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go b/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go new file mode 100644 index 00000000..25606f21 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go @@ -0,0 +1,139 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func TestConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + ln, err := net.Listen("tcp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + done := make(chan bool) + go acceptor(t, ln, done) + + c, err := net.Dial("tcp4", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv4.NewConn(c)) + + <-done +} + +var packetConnUnicastSocketOptionTests = []struct { + net, proto, addr string +}{ + {"udp4", "", "127.0.0.1:0"}, + {"ip4", ":icmp", "127.0.0.1"}, +} + +func TestPacketConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnUnicastSocketOptionTests { + if tt.net == "ip4" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv4.NewPacketConn(c)) + } +} + +func TestRawConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + + testUnicastSocketOptions(t, r) +} + +type testIPv4UnicastConn interface { + TOS() (int, error) + SetTOS(int) error + TTL() (int, error) + SetTTL(int) error +} + +func testUnicastSocketOptions(t *testing.T, c testIPv4UnicastConn) { + tos := iana.DiffServCS0 | iana.NotECNTransport + switch runtime.GOOS { + case "windows": + // IP_TOS option is supported on Windows 8 and beyond. + t.Skipf("not supported on %s", runtime.GOOS) + } + + if err := c.SetTOS(tos); err != nil { + t.Fatal(err) + } + if v, err := c.TOS(); err != nil { + t.Fatal(err) + } else if v != tos { + t.Fatalf("got %v; want %v", v, tos) + } + const ttl = 255 + if err := c.SetTTL(ttl); err != nil { + t.Fatal(err) + } + if v, err := c.TTL(); err != nil { + t.Fatal(err) + } else if v != ttl { + t.Fatalf("got %v; want %v", v, ttl) + } +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_darwin.go b/vendor/golang.org/x/net/ipv4/zsys_darwin.go new file mode 100644 index 00000000..087c6390 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_darwin.go @@ -0,0 +1,99 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_STRIPHDR = 0x17 + sysIP_RECVTTL = 0x18 + sysIP_BOUND_IF = 0x19 + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_MULTICAST_IFINDEX = 0x42 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysSizeofSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 +) + +type sysSockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sysSockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sysInetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go new file mode 100644 index 00000000..f5c9ccec --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go @@ -0,0 +1,33 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +// +build dragonfly + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x41 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sysSizeofIPMreq = 0x8 +) + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go new file mode 100644 index 00000000..6fd67e1e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go @@ -0,0 +1,93 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysSizeofSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 +) + +type sysSockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sysSockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysGroupReq struct { + Interface uint32 + Group sysSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Group sysSockaddrStorage + Source sysSockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go new file mode 100644 index 00000000..ebac6d79 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go @@ -0,0 +1,95 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysSizeofSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 +) + +type sysSockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sysSockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysSockaddrStorage + Source sysSockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go new file mode 100644 index 00000000..ebac6d79 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go @@ -0,0 +1,95 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysSizeofSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 +) + +type sysSockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sysSockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysSockaddrStorage + Source sysSockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_386.go b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go new file mode 100644 index 00000000..fc7a9ebf --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go @@ -0,0 +1,130 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go new file mode 100644 index 00000000..e324b81b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go @@ -0,0 +1,132 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go new file mode 100644 index 00000000..fc7a9ebf --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go @@ -0,0 +1,130 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go new file mode 100644 index 00000000..ce4194a6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go @@ -0,0 +1,134 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,arm64 + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go new file mode 100644 index 00000000..94116bfa --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go @@ -0,0 +1,134 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,mips64 + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go new file mode 100644 index 00000000..698d7db3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go @@ -0,0 +1,134 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,mips64le + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go new file mode 100644 index 00000000..9fe5ee2b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go @@ -0,0 +1,134 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,ppc64 + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go new file mode 100644 index 00000000..3891f54e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go @@ -0,0 +1,134 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,ppc64le + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_netbsd.go b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go new file mode 100644 index 00000000..8a440eb6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go @@ -0,0 +1,30 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x17 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sysSizeofIPMreq = 0x8 +) + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_openbsd.go b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go new file mode 100644 index 00000000..fd522b57 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go @@ -0,0 +1,30 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x1e + sysIP_RECVTTL = 0x1f + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sysSizeofIPMreq = 0x8 +) + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_solaris.go b/vendor/golang.org/x/net/ipv4/zsys_solaris.go new file mode 100644 index 00000000..d7c23349 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_solaris.go @@ -0,0 +1,60 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +// +build solaris + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x9 + sysIP_RECVSLLA = 0xa + sysIP_RECVTTL = 0xb + sysIP_NEXTHOP = 0x19 + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + sysIP_DONTFRAG = 0x1b + sysIP_BOUND_IF = 0x41 + sysIP_UNSPEC_SRC = 0x42 + sysIP_BROADCAST_TTL = 0x43 + sysIP_DHCPINIT_IF = 0x45 + + sysIP_MULTICAST_IF = 0x10 + sysIP_MULTICAST_TTL = 0x11 + sysIP_MULTICAST_LOOP = 0x12 + sysIP_ADD_MEMBERSHIP = 0x13 + sysIP_DROP_MEMBERSHIP = 0x14 + sysIP_BLOCK_SOURCE = 0x15 + sysIP_UNBLOCK_SOURCE = 0x16 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x17 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x18 + + sysSizeofInetPktinfo = 0xc + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqSource = 0xc +) + +type sysInetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv6/control.go b/vendor/golang.org/x/net/ipv6/control.go new file mode 100644 index 00000000..b7362aae --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control.go @@ -0,0 +1,85 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "fmt" + "net" + "sync" +) + +// Note that RFC 3542 obsoletes RFC 2292 but OS X Snow Leopard and the +// former still support RFC 2292 only. Please be aware that almost +// all protocol implementations prohibit using a combination of RFC +// 2292 and RFC 3542 for some practical reasons. + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +// A ControlFlags represents per packet basis IP-level socket option +// control flags. +type ControlFlags uint + +const ( + FlagTrafficClass ControlFlags = 1 << iota // pass the traffic class on the received packet + FlagHopLimit // pass the hop limit on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet + FlagPathMTU // pass the path MTU on the received packet path +) + +const flagPacketInfo = FlagDst | FlagInterface + +// A ControlMessage represents per packet basis IP-level socket +// options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn allows to send the options to the + // protocol stack. + // + TrafficClass int // traffic class, must be 1 <= value <= 255 when specifying + HopLimit int // hop limit, must be 1 <= value <= 255 when specifying + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying + NextHop net.IP // next hop address, specifying only + MTU int // path MTU, receiving only +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "<nil>" + } + return fmt.Sprintf("tclass=%#x hoplim=%d src=%v dst=%v ifindex=%d nexthop=%v mtu=%d", cm.TrafficClass, cm.HopLimit, cm.Src, cm.Dst, cm.IfIndex, cm.NextHop, cm.MTU) +} + +// Ancillary data socket options +const ( + ctlTrafficClass = iota // header field + ctlHopLimit // header field + ctlPacketInfo // inbound or outbound packet path + ctlNextHop // nexthop + ctlPathMTU // path mtu + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go new file mode 100644 index 00000000..80ec2e2f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package ipv6 + +import ( + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func marshal2292HopLimit(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIPv6 + m.Type = sysIPV6_2292HOPLIMIT + m.SetLen(syscall.CmsgLen(4)) + if cm != nil { + data := b[syscall.CmsgLen(0):] + nativeEndian.PutUint32(data[:4], uint32(cm.HopLimit)) + } + return b[syscall.CmsgSpace(4):] +} + +func marshal2292PacketInfo(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIPv6 + m.Type = sysIPV6_2292PKTINFO + m.SetLen(syscall.CmsgLen(sysSizeofInet6Pktinfo)) + if cm != nil { + pi := (*sysInet6Pktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return b[syscall.CmsgSpace(sysSizeofInet6Pktinfo):] +} + +func marshal2292NextHop(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIPv6 + m.Type = sysIPV6_2292NEXTHOP + m.SetLen(syscall.CmsgLen(sysSizeofSockaddrInet6)) + if cm != nil { + sa := (*sysSockaddrInet6)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return b[syscall.CmsgSpace(sysSizeofSockaddrInet6):] +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go new file mode 100644 index 00000000..f344d16d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -0,0 +1,99 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package ipv6 + +import ( + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func marshalTrafficClass(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIPv6 + m.Type = sysIPV6_TCLASS + m.SetLen(syscall.CmsgLen(4)) + if cm != nil { + data := b[syscall.CmsgLen(0):] + nativeEndian.PutUint32(data[:4], uint32(cm.TrafficClass)) + } + return b[syscall.CmsgSpace(4):] +} + +func parseTrafficClass(cm *ControlMessage, b []byte) { + cm.TrafficClass = int(nativeEndian.Uint32(b[:4])) +} + +func marshalHopLimit(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIPv6 + m.Type = sysIPV6_HOPLIMIT + m.SetLen(syscall.CmsgLen(4)) + if cm != nil { + data := b[syscall.CmsgLen(0):] + nativeEndian.PutUint32(data[:4], uint32(cm.HopLimit)) + } + return b[syscall.CmsgSpace(4):] +} + +func parseHopLimit(cm *ControlMessage, b []byte) { + cm.HopLimit = int(nativeEndian.Uint32(b[:4])) +} + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIPv6 + m.Type = sysIPV6_PKTINFO + m.SetLen(syscall.CmsgLen(sysSizeofInet6Pktinfo)) + if cm != nil { + pi := (*sysInet6Pktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return b[syscall.CmsgSpace(sysSizeofInet6Pktinfo):] +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*sysInet6Pktinfo)(unsafe.Pointer(&b[0])) + cm.Dst = pi.Addr[:] + cm.IfIndex = int(pi.Ifindex) +} + +func marshalNextHop(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIPv6 + m.Type = sysIPV6_NEXTHOP + m.SetLen(syscall.CmsgLen(sysSizeofSockaddrInet6)) + if cm != nil { + sa := (*sysSockaddrInet6)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return b[syscall.CmsgSpace(sysSizeofSockaddrInet6):] +} + +func parseNextHop(cm *ControlMessage, b []byte) { +} + +func marshalPathMTU(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIPv6 + m.Type = sysIPV6_PATHMTU + m.SetLen(syscall.CmsgLen(sysSizeofIPv6Mtuinfo)) + return b[syscall.CmsgSpace(sysSizeofIPv6Mtuinfo):] +} + +func parsePathMTU(cm *ControlMessage, b []byte) { + mi := (*sysIPv6Mtuinfo)(unsafe.Pointer(&b[0])) + cm.Dst = mi.Addr.Addr[:] + cm.IfIndex = int(mi.Addr.Scope_id) + cm.MTU = int(mi.Mtu) +} diff --git a/vendor/golang.org/x/net/ipv6/control_stub.go b/vendor/golang.org/x/net/ipv6/control_stub.go new file mode 100644 index 00000000..2fecf7e5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_stub.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv6 + +func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { + return errOpNoSupport +} + +func newControlMessage(opt *rawOpt) (oob []byte) { + return nil +} + +func parseControlMessage(b []byte) (*ControlMessage, error) { + return nil, errOpNoSupport +} + +func marshalControlMessage(cm *ControlMessage) (oob []byte) { + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/control_unix.go b/vendor/golang.org/x/net/ipv6/control_unix.go new file mode 100644 index 00000000..2af5beb4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_unix.go @@ -0,0 +1,166 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package ipv6 + +import ( + "os" + "syscall" + + "golang.org/x/net/internal/iana" +) + +func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if cf&FlagTrafficClass != 0 && sockOpts[ssoReceiveTrafficClass].name > 0 { + if err := setInt(fd, &sockOpts[ssoReceiveTrafficClass], boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTrafficClass) + } else { + opt.clear(FlagTrafficClass) + } + } + if cf&FlagHopLimit != 0 && sockOpts[ssoReceiveHopLimit].name > 0 { + if err := setInt(fd, &sockOpts[ssoReceiveHopLimit], boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagHopLimit) + } else { + opt.clear(FlagHopLimit) + } + } + if cf&flagPacketInfo != 0 && sockOpts[ssoReceivePacketInfo].name > 0 { + if err := setInt(fd, &sockOpts[ssoReceivePacketInfo], boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & flagPacketInfo) + } else { + opt.clear(cf & flagPacketInfo) + } + } + if cf&FlagPathMTU != 0 && sockOpts[ssoReceivePathMTU].name > 0 { + if err := setInt(fd, &sockOpts[ssoReceivePathMTU], boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagPathMTU) + } else { + opt.clear(FlagPathMTU) + } + } + return nil +} + +func newControlMessage(opt *rawOpt) (oob []byte) { + opt.RLock() + var l int + if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { + l += syscall.CmsgSpace(ctlOpts[ctlTrafficClass].length) + } + if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { + l += syscall.CmsgSpace(ctlOpts[ctlHopLimit].length) + } + if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { + l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length) + } + if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { + l += syscall.CmsgSpace(ctlOpts[ctlPathMTU].length) + } + if l > 0 { + oob = make([]byte, l) + b := oob + if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { + b = ctlOpts[ctlTrafficClass].marshal(b, nil) + } + if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { + b = ctlOpts[ctlHopLimit].marshal(b, nil) + } + if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { + b = ctlOpts[ctlPacketInfo].marshal(b, nil) + } + if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { + b = ctlOpts[ctlPathMTU].marshal(b, nil) + } + } + opt.RUnlock() + return +} + +func parseControlMessage(b []byte) (*ControlMessage, error) { + if len(b) == 0 { + return nil, nil + } + cmsgs, err := syscall.ParseSocketControlMessage(b) + if err != nil { + return nil, os.NewSyscallError("parse socket control message", err) + } + cm := &ControlMessage{} + for _, m := range cmsgs { + if m.Header.Level != iana.ProtocolIPv6 { + continue + } + switch int(m.Header.Type) { + case ctlOpts[ctlTrafficClass].name: + ctlOpts[ctlTrafficClass].parse(cm, m.Data[:]) + case ctlOpts[ctlHopLimit].name: + ctlOpts[ctlHopLimit].parse(cm, m.Data[:]) + case ctlOpts[ctlPacketInfo].name: + ctlOpts[ctlPacketInfo].parse(cm, m.Data[:]) + case ctlOpts[ctlPathMTU].name: + ctlOpts[ctlPathMTU].parse(cm, m.Data[:]) + } + } + return cm, nil +} + +func marshalControlMessage(cm *ControlMessage) (oob []byte) { + if cm == nil { + return + } + var l int + tclass := false + if ctlOpts[ctlTrafficClass].name > 0 && cm.TrafficClass > 0 { + tclass = true + l += syscall.CmsgSpace(ctlOpts[ctlTrafficClass].length) + } + hoplimit := false + if ctlOpts[ctlHopLimit].name > 0 && cm.HopLimit > 0 { + hoplimit = true + l += syscall.CmsgSpace(ctlOpts[ctlHopLimit].length) + } + pktinfo := false + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To16() != nil && cm.Src.To4() == nil || cm.IfIndex > 0) { + pktinfo = true + l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length) + } + nexthop := false + if ctlOpts[ctlNextHop].name > 0 && cm.NextHop.To16() != nil && cm.NextHop.To4() == nil { + nexthop = true + l += syscall.CmsgSpace(ctlOpts[ctlNextHop].length) + } + if l > 0 { + oob = make([]byte, l) + b := oob + if tclass { + b = ctlOpts[ctlTrafficClass].marshal(b, cm) + } + if hoplimit { + b = ctlOpts[ctlHopLimit].marshal(b, cm) + } + if pktinfo { + b = ctlOpts[ctlPacketInfo].marshal(b, cm) + } + if nexthop { + b = ctlOpts[ctlNextHop].marshal(b, cm) + } + } + return +} diff --git a/vendor/golang.org/x/net/ipv6/control_windows.go b/vendor/golang.org/x/net/ipv6/control_windows.go new file mode 100644 index 00000000..72fdc1b0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_windows.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "syscall" + +func setControlMessage(fd syscall.Handle, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return syscall.EWINDOWS +} + +func newControlMessage(opt *rawOpt) (oob []byte) { + // TODO(mikio): implement this + return nil +} + +func parseControlMessage(b []byte) (*ControlMessage, error) { + // TODO(mikio): implement this + return nil, syscall.EWINDOWS +} + +func marshalControlMessage(cm *ControlMessage) (oob []byte) { + // TODO(mikio): implement this + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/defs_darwin.go b/vendor/golang.org/x/net/ipv6/defs_darwin.go new file mode 100644 index 00000000..4c7f476a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_darwin.go @@ -0,0 +1,112 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#define __APPLE_USE_RFC_3542 +#include <netinet/in.h> +#include <netinet/icmp6.h> +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO + sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT + sysIPV6_2292NEXTHOP = C.IPV6_2292NEXTHOP + sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS + sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS + sysIPV6_2292RTHDR = C.IPV6_2292RTHDR + + sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_TCLASS = C.IPV6_TCLASS + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_MSFILTER = C.IPV6_MSFILTER + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysIPV6_BOUND_IF = C.IPV6_BOUND_IF + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sysSizeofGroupReq = C.sizeof_struct_group_req + sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sysSockaddrStorage C.struct_sockaddr_storage + +type sysSockaddrInet6 C.struct_sockaddr_in6 + +type sysInet6Pktinfo C.struct_in6_pktinfo + +type sysIPv6Mtuinfo C.struct_ip6_mtuinfo + +type sysIPv6Mreq C.struct_ipv6_mreq + +type sysICMPv6Filter C.struct_icmp6_filter + +type sysGroupReq C.struct_group_req + +type sysGroupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv6/defs_dragonfly.go b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go new file mode 100644 index 00000000..c72487ce --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go @@ -0,0 +1,84 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include <sys/param.h> +#include <sys/socket.h> + +#include <netinet/in.h> +#include <netinet/icmp6.h> +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sysSockaddrInet6 C.struct_sockaddr_in6 + +type sysInet6Pktinfo C.struct_in6_pktinfo + +type sysIPv6Mtuinfo C.struct_ip6_mtuinfo + +type sysIPv6Mreq C.struct_ipv6_mreq + +type sysICMPv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_freebsd.go b/vendor/golang.org/x/net/ipv6/defs_freebsd.go new file mode 100644 index 00000000..de199ec6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_freebsd.go @@ -0,0 +1,105 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include <sys/param.h> +#include <sys/socket.h> + +#include <netinet/in.h> +#include <netinet/icmp6.h> +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_BINDANY = C.IPV6_BINDANY + + sysIPV6_MSFILTER = C.IPV6_MSFILTER + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sysSizeofGroupReq = C.sizeof_struct_group_req + sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sysSockaddrStorage C.struct_sockaddr_storage + +type sysSockaddrInet6 C.struct_sockaddr_in6 + +type sysInet6Pktinfo C.struct_in6_pktinfo + +type sysIPv6Mtuinfo C.struct_ip6_mtuinfo + +type sysIPv6Mreq C.struct_ipv6_mreq + +type sysGroupReq C.struct_group_req + +type sysGroupSourceReq C.struct_group_source_req + +type sysICMPv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_linux.go b/vendor/golang.org/x/net/ipv6/defs_linux.go new file mode 100644 index 00000000..d83abce3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_linux.go @@ -0,0 +1,136 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include <linux/in.h> +#include <linux/in6.h> +#include <linux/ipv6.h> +#include <linux/icmpv6.h> +*/ +import "C" + +const ( + sysIPV6_ADDRFORM = C.IPV6_ADDRFORM + sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO + sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS + sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS + sysIPV6_2292RTHDR = C.IPV6_2292RTHDR + sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_FLOWINFO = C.IPV6_FLOWINFO + + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_ADD_MEMBERSHIP = C.IPV6_ADD_MEMBERSHIP + sysIPV6_DROP_MEMBERSHIP = C.IPV6_DROP_MEMBERSHIP + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_MSFILTER = C.MCAST_MSFILTER + sysIPV6_ROUTER_ALERT = C.IPV6_ROUTER_ALERT + sysIPV6_MTU_DISCOVER = C.IPV6_MTU_DISCOVER + sysIPV6_MTU = C.IPV6_MTU + sysIPV6_RECVERR = C.IPV6_RECVERR + sysIPV6_V6ONLY = C.IPV6_V6ONLY + sysIPV6_JOIN_ANYCAST = C.IPV6_JOIN_ANYCAST + sysIPV6_LEAVE_ANYCAST = C.IPV6_LEAVE_ANYCAST + + //sysIPV6_PMTUDISC_DONT = C.IPV6_PMTUDISC_DONT + //sysIPV6_PMTUDISC_WANT = C.IPV6_PMTUDISC_WANT + //sysIPV6_PMTUDISC_DO = C.IPV6_PMTUDISC_DO + //sysIPV6_PMTUDISC_PROBE = C.IPV6_PMTUDISC_PROBE + //sysIPV6_PMTUDISC_INTERFACE = C.IPV6_PMTUDISC_INTERFACE + //sysIPV6_PMTUDISC_OMIT = C.IPV6_PMTUDISC_OMIT + + sysIPV6_FLOWLABEL_MGR = C.IPV6_FLOWLABEL_MGR + sysIPV6_FLOWINFO_SEND = C.IPV6_FLOWINFO_SEND + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + sysIPV6_XFRM_POLICY = C.IPV6_XFRM_POLICY + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RTHDR = C.IPV6_RTHDR + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_TCLASS = C.IPV6_TCLASS + + sysIPV6_ADDR_PREFERENCES = C.IPV6_ADDR_PREFERENCES + + sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP + sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = C.IPV6_PREFER_SRC_PUBTMP_DEFAULT + sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA + sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME + sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA + sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA + + sysIPV6_MINHOPCOUNT = C.IPV6_MINHOPCOUNT + + sysIPV6_ORIGDSTADDR = C.IPV6_ORIGDSTADDR + sysIPV6_RECVORIGDSTADDR = C.IPV6_RECVORIGDSTADDR + sysIPV6_TRANSPARENT = C.IPV6_TRANSPARENT + sysIPV6_UNICAST_IF = C.IPV6_UNICAST_IF + + sysICMPV6_FILTER = C.ICMPV6_FILTER + + sysICMPV6_FILTER_BLOCK = C.ICMPV6_FILTER_BLOCK + sysICMPV6_FILTER_PASS = C.ICMPV6_FILTER_PASS + sysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS + sysICMPV6_FILTER_PASSONLY = C.ICMPV6_FILTER_PASSONLY + + sysSizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + sysSizeofIPv6FlowlabelReq = C.sizeof_struct_in6_flowlabel_req + + sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sysSizeofGroupReq = C.sizeof_struct_group_req + sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sysKernelSockaddrStorage C.struct___kernel_sockaddr_storage + +type sysSockaddrInet6 C.struct_sockaddr_in6 + +type sysInet6Pktinfo C.struct_in6_pktinfo + +type sysIPv6Mtuinfo C.struct_ip6_mtuinfo + +type sysIPv6FlowlabelReq C.struct_in6_flowlabel_req + +type sysIPv6Mreq C.struct_ipv6_mreq + +type sysGroupReq C.struct_group_req + +type sysGroupSourceReq C.struct_group_source_req + +type sysICMPv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_netbsd.go b/vendor/golang.org/x/net/ipv6/defs_netbsd.go new file mode 100644 index 00000000..7bd09e8e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_netbsd.go @@ -0,0 +1,80 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include <sys/param.h> +#include <sys/socket.h> + +#include <netinet/in.h> +#include <netinet/icmp6.h> +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sysSockaddrInet6 C.struct_sockaddr_in6 + +type sysInet6Pktinfo C.struct_in6_pktinfo + +type sysIPv6Mtuinfo C.struct_ip6_mtuinfo + +type sysIPv6Mreq C.struct_ipv6_mreq + +type sysICMPv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_openbsd.go b/vendor/golang.org/x/net/ipv6/defs_openbsd.go new file mode 100644 index 00000000..6796d9b2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_openbsd.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include <sys/param.h> +#include <sys/socket.h> + +#include <netinet/in.h> +#include <netinet/icmp6.h> +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_AUTH_LEVEL = C.IPV6_AUTH_LEVEL + sysIPV6_ESP_TRANS_LEVEL = C.IPV6_ESP_TRANS_LEVEL + sysIPV6_ESP_NETWORK_LEVEL = C.IPV6_ESP_NETWORK_LEVEL + sysIPSEC6_OUTSA = C.IPSEC6_OUTSA + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + sysIPV6_IPCOMP_LEVEL = C.IPV6_IPCOMP_LEVEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + sysIPV6_PIPEX = C.IPV6_PIPEX + + sysIPV6_RTABLE = C.IPV6_RTABLE + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sysSockaddrInet6 C.struct_sockaddr_in6 + +type sysInet6Pktinfo C.struct_in6_pktinfo + +type sysIPv6Mtuinfo C.struct_ip6_mtuinfo + +type sysIPv6Mreq C.struct_ipv6_mreq + +type sysICMPv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_solaris.go b/vendor/golang.org/x/net/ipv6/defs_solaris.go new file mode 100644 index 00000000..972b1712 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_solaris.go @@ -0,0 +1,96 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include <netinet/in.h> +#include <netinet/icmp6.h> +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + + sysIPV6_RTHDR = C.IPV6_RTHDR + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + + sysIPV6_RECVRTHDRDSTOPTS = C.IPV6_RECVRTHDRDSTOPTS + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + sysIPV6_SEC_OPT = C.IPV6_SEC_OPT + sysIPV6_SRC_PREFERENCES = C.IPV6_SRC_PREFERENCES + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME + sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA + sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC + sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP + sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA + sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA + + sysIPV6_PREFER_SRC_MIPMASK = C.IPV6_PREFER_SRC_MIPMASK + sysIPV6_PREFER_SRC_MIPDEFAULT = C.IPV6_PREFER_SRC_MIPDEFAULT + sysIPV6_PREFER_SRC_TMPMASK = C.IPV6_PREFER_SRC_TMPMASK + sysIPV6_PREFER_SRC_TMPDEFAULT = C.IPV6_PREFER_SRC_TMPDEFAULT + sysIPV6_PREFER_SRC_CGAMASK = C.IPV6_PREFER_SRC_CGAMASK + sysIPV6_PREFER_SRC_CGADEFAULT = C.IPV6_PREFER_SRC_CGADEFAULT + + sysIPV6_PREFER_SRC_MASK = C.IPV6_PREFER_SRC_MASK + + sysIPV6_PREFER_SRC_DEFAULT = C.IPV6_PREFER_SRC_DEFAULT + + sysIPV6_BOUND_IF = C.IPV6_BOUND_IF + sysIPV6_UNSPEC_SRC = C.IPV6_UNSPEC_SRC + + sysICMP6_FILTER = C.ICMP6_FILTER + + sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sysSockaddrInet6 C.struct_sockaddr_in6 + +type sysInet6Pktinfo C.struct_in6_pktinfo + +type sysIPv6Mtuinfo C.struct_ip6_mtuinfo + +type sysIPv6Mreq C.struct_ipv6_mreq + +type sysICMPv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/dgramopt_posix.go b/vendor/golang.org/x/net/ipv6/dgramopt_posix.go new file mode 100644 index 00000000..93ff2f1a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/dgramopt_posix.go @@ -0,0 +1,288 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd windows + +package ipv6 + +import ( + "net" + "syscall" +) + +// MulticastHopLimit returns the hop limit field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastHopLimit() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return 0, err + } + return getInt(fd, &sockOpts[ssoMulticastHopLimit]) +} + +// SetMulticastHopLimit sets the hop limit field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInt(fd, &sockOpts[ssoMulticastHopLimit], hoplim) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return nil, err + } + return getInterface(fd, &sockOpts[ssoMulticastInterface]) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInterface(fd, &sockOpts[ssoMulticastInterface], ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return false, err + } + on, err := getInt(fd, &sockOpts[ssoMulticastLoopback]) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInt(fd, &sockOpts[ssoMulticastLoopback], boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return setGroup(fd, &sockOpts[ssoJoinGroup], ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return setGroup(fd, &sockOpts[ssoLeaveGroup], ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return setSourceGroup(fd, &sockOpts[ssoJoinSourceGroup], ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return setSourceGroup(fd, &sockOpts[ssoLeaveSourceGroup], ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return setSourceGroup(fd, &sockOpts[ssoBlockSourceGroup], ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return setSourceGroup(fd, &sockOpts[ssoUnblockSourceGroup], ifi, grp, src) +} + +// Checksum reports whether the kernel will compute, store or verify a +// checksum for both incoming and outgoing packets. If on is true, it +// returns an offset in bytes into the data of where the checksum +// field is located. +func (c *dgramOpt) Checksum() (on bool, offset int, err error) { + if !c.ok() { + return false, 0, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return false, 0, err + } + offset, err = getInt(fd, &sockOpts[ssoChecksum]) + if err != nil { + return false, 0, err + } + if offset < 0 { + return false, 0, nil + } + return true, offset, nil +} + +// SetChecksum enables the kernel checksum processing. If on is ture, +// the offset should be an offset in bytes into the data of where the +// checksum field is located. +func (c *dgramOpt) SetChecksum(on bool, offset int) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + if !on { + offset = -1 + } + return setInt(fd, &sockOpts[ssoChecksum], offset) +} + +// ICMPFilter returns an ICMP filter. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return nil, err + } + return getICMPFilter(fd, &sockOpts[ssoICMPFilter]) +} + +// SetICMPFilter deploys the ICMP filter. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setICMPFilter(fd, &sockOpts[ssoICMPFilter], f) +} diff --git a/vendor/golang.org/x/net/ipv6/dgramopt_stub.go b/vendor/golang.org/x/net/ipv6/dgramopt_stub.go new file mode 100644 index 00000000..fb067fb2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/dgramopt_stub.go @@ -0,0 +1,119 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv6 + +import "net" + +// MulticastHopLimit returns the hop limit field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastHopLimit() (int, error) { + return 0, errOpNoSupport +} + +// SetMulticastHopLimit sets the hop limit field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { + return errOpNoSupport +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + return nil, errOpNoSupport +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + return errOpNoSupport +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + return false, errOpNoSupport +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + return errOpNoSupport +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + return errOpNoSupport +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + return errOpNoSupport +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + return errOpNoSupport +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + return errOpNoSupport +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + return errOpNoSupport +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + return errOpNoSupport +} + +// Checksum reports whether the kernel will compute, store or verify a +// checksum for both incoming and outgoing packets. If on is true, it +// returns an offset in bytes into the data of where the checksum +// field is located. +func (c *dgramOpt) Checksum() (on bool, offset int, err error) { + return false, 0, errOpNoSupport +} + +// SetChecksum enables the kernel checksum processing. If on is ture, +// the offset should be an offset in bytes into the data of where the +// checksum field is located. +func (c *dgramOpt) SetChecksum(on bool, offset int) error { + return errOpNoSupport +} + +// ICMPFilter returns an ICMP filter. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +// SetICMPFilter deploys the ICMP filter. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/doc.go b/vendor/golang.org/x/net/ipv6/doc.go new file mode 100644 index 00000000..dd13aa21 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/doc.go @@ -0,0 +1,240 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv6 implements IP-level socket options for the Internet +// Protocol version 6. +// +// The package provides IP-level socket options that allow +// manipulation of IPv6 facilities. +// +// The IPv6 protocol is defined in RFC 2460. +// Basic and advanced socket interface extensions are defined in RFC +// 3493 and RFC 3542. +// Socket interface extensions for multicast source filters are +// defined in RFC 3678. +// MLDv1 and MLDv2 are defined in RFC 2710 and RFC 3810. +// Source-specific multicast is defined in RFC 4607. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv6 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, ipv6.Conn is used to set the traffic class field on the +// IPv6 header for each packet. +// +// ln, err := net.Listen("tcp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv6.NewConn(c).SetTrafficClass(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPconn which are created as network connections that use the +// IPv6 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.ParseIP("ff02::114") +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv6 and Ethernet. +// +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of ipv6.PacketConn is used to enable control +// message transmissons. +// +// if err := p.SetControlMessage(ipv6.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, rcm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if rcm.Dst.IsMulticast() { +// if rcm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTrafficClass(0x0) +// p.SetHopLimit(16) +// if _, err := p.WriteTo(data[:n], nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// wcm := ipv6.ControlMessage{TrafficClass: 0xe0, HopLimit: 1} +// for _, ifi := range []*net.Interface{en0, en1} { +// wcm.IfIndex = ifi.Index +// if _, err := p.WriteTo(data[:n], &wcm, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn may join multiple multicast +// groups. For example, a UDP listener with port 1024 might join two +// different groups across over two different network interfaces by +// using: +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv6.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// p2 := ipv6.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff01::114")}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn on MLDv2 supported platform is +// able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.ParseIP("ff32::8000:9")} +// ssmsource := net.UDPAddr{IP: net.ParseIP("fe80::cafe")} +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.ParseIP("fe80::dead")} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on MLDv2 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// MLDv1 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv6 // import "golang.org/x/net/ipv6" diff --git a/vendor/golang.org/x/net/ipv6/endpoint.go b/vendor/golang.org/x/net/ipv6/endpoint.go new file mode 100644 index 00000000..966eaa89 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/endpoint.go @@ -0,0 +1,123 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "time" +) + +// A Conn represents a network endpoint that uses IPv6 transport. +// It allows to set basic IP-level socket options such as traffic +// class and hop limit. +type Conn struct { + genericOpt +} + +type genericOpt struct { + net.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// PathMTU returns a path MTU value for the destination associated +// with the endpoint. +func (c *Conn) PathMTU() (int, error) { + if !c.genericOpt.ok() { + return 0, syscall.EINVAL + } + fd, err := c.genericOpt.sysfd() + if err != nil { + return 0, err + } + _, mtu, err := getMTUInfo(fd, &sockOpts[ssoPathMTU]) + if err != nil { + return 0, err + } + return mtu, nil +} + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + return &Conn{ + genericOpt: genericOpt{Conn: c}, + } +} + +// A PacketConn represents a packet network endpoint that uses IPv6 +// transport. It is used to control several IP-level socket options +// including IPv6 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv6 and higher layer +// protocols such as OSPF, GRE, and UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + net.PacketConn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.PacketConn != nil } + +// SetControlMessage allows to receive the per packet basis IP-level +// socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + fd, err := c.payloadHandler.sysfd() + if err != nil { + return err + } + return setControlMessage(fd, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + return &PacketConn{ + genericOpt: genericOpt{Conn: c.(net.Conn)}, + dgramOpt: dgramOpt{PacketConn: c}, + payloadHandler: payloadHandler{PacketConn: c}, + } +} diff --git a/vendor/golang.org/x/net/ipv6/example_test.go b/vendor/golang.org/x/net/ipv6/example_test.go new file mode 100644 index 00000000..e761aa2a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/example_test.go @@ -0,0 +1,216 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "fmt" + "log" + "net" + "os" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv6" +) + +func ExampleConn_markingTCP() { + ln, err := net.Listen("tcp", "[::]:1024") + if err != nil { + log.Fatal(err) + } + defer ln.Close() + + for { + c, err := ln.Accept() + if err != nil { + log.Fatal(err) + } + go func(c net.Conn) { + defer c.Close() + if c.RemoteAddr().(*net.TCPAddr).IP.To16() != nil && c.RemoteAddr().(*net.TCPAddr).IP.To4() == nil { + p := ipv6.NewConn(c) + if err := p.SetTrafficClass(0x28); err != nil { // DSCP AF11 + log.Fatal(err) + } + if err := p.SetHopLimit(128); err != nil { + log.Fatal(err) + } + } + if _, err := c.Write([]byte("HELLO-R-U-THERE-ACK")); err != nil { + log.Fatal(err) + } + }(c) + } +} + +func ExamplePacketConn_servingOneShotMulticastDNS() { + c, err := net.ListenPacket("udp6", "[::]:5353") // mDNS over UDP + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + mDNSLinkLocal := net.UDPAddr{IP: net.ParseIP("ff02::fb")} + if err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &mDNSLinkLocal) + if err := p.SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true); err != nil { + log.Fatal(err) + } + + var wcm ipv6.ControlMessage + b := make([]byte, 1500) + for { + _, rcm, peer, err := p.ReadFrom(b) + if err != nil { + log.Fatal(err) + } + if !rcm.Dst.IsMulticast() || !rcm.Dst.Equal(mDNSLinkLocal.IP) { + continue + } + wcm.IfIndex = rcm.IfIndex + answers := []byte("FAKE-MDNS-ANSWERS") // fake mDNS answers, you need to implement this + if _, err := p.WriteTo(answers, &wcm, peer); err != nil { + log.Fatal(err) + } + } +} + +func ExamplePacketConn_tracingIPPacketRoute() { + // Tracing an IP packet route to www.google.com. + + const host = "www.google.com" + ips, err := net.LookupIP(host) + if err != nil { + log.Fatal(err) + } + var dst net.IPAddr + for _, ip := range ips { + if ip.To16() != nil && ip.To4() == nil { + dst.IP = ip + fmt.Printf("using %v for tracing an IP packet route to %s\n", dst.IP, host) + break + } + } + if dst.IP == nil { + log.Fatal("no AAAA record found") + } + + c, err := net.ListenPacket("ip6:58", "::") // ICMP for IPv6 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + if err := p.SetControlMessage(ipv6.FlagHopLimit|ipv6.FlagSrc|ipv6.FlagDst|ipv6.FlagInterface, true); err != nil { + log.Fatal(err) + } + wm := icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + } + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeTimeExceeded) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + log.Fatal(err) + } + + var wcm ipv6.ControlMessage + rb := make([]byte, 1500) + for i := 1; i <= 64; i++ { // up to 64 hops + wm.Body.(*icmp.Echo).Seq = i + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + + // In the real world usually there are several + // multiple traffic-engineered paths for each hop. + // You may need to probe a few times to each hop. + begin := time.Now() + wcm.HopLimit = i + if _, err := p.WriteTo(wb, &wcm, &dst); err != nil { + log.Fatal(err) + } + if err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + log.Fatal(err) + } + n, rcm, peer, err := p.ReadFrom(rb) + if err != nil { + if err, ok := err.(net.Error); ok && err.Timeout() { + fmt.Printf("%v\t*\n", i) + continue + } + log.Fatal(err) + } + rm, err := icmp.ParseMessage(58, rb[:n]) + if err != nil { + log.Fatal(err) + } + rtt := time.Since(begin) + + // In the real world you need to determine whether the + // received message is yours using ControlMessage.Src, + // ControlMesage.Dst, icmp.Echo.ID and icmp.Echo.Seq. + switch rm.Type { + case ipv6.ICMPTypeTimeExceeded: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, rcm) + case ipv6.ICMPTypeEchoReply: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, rcm) + return + } + } +} + +func ExamplePacketConn_advertisingOSPFHello() { + c, err := net.ListenPacket("ip6:89", "::") // OSPF for IPv6 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + allSPFRouters := net.IPAddr{IP: net.ParseIP("ff02::5")} + if err := p.JoinGroup(en0, &allSPFRouters); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &allSPFRouters) + + hello := make([]byte, 24) // fake hello data, you need to implement this + ospf := make([]byte, 16) // fake ospf header, you need to implement this + ospf[0] = 3 // version 3 + ospf[1] = 1 // hello packet + ospf = append(ospf, hello...) + if err := p.SetChecksum(true, 12); err != nil { + log.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: 0xc0, // DSCP CS6 + HopLimit: 1, + IfIndex: en0.Index, + } + if _, err := p.WriteTo(ospf, &cm, &allSPFRouters); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/golang.org/x/net/ipv6/gen.go b/vendor/golang.org/x/net/ipv6/gen.go new file mode 100644 index 00000000..3924b476 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/gen.go @@ -0,0 +1,208 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates system adaptation constants and types, +// internet protocol constants and tables by reading template files +// and IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" +) + +func main() { + if err := genzsys(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := geniana(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func genzsys() error { + defs := "defs_" + runtime.GOOS + ".go" + f, err := os.Open(defs) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + f.Close() + cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) + b, err := cmd.Output() + if err != nil { + return err + } + // The ipv6 package still supports go1.2, and so we need to + // take care of additional platforms in go1.3 and above for + // working with go1.2. + switch { + case runtime.GOOS == "dragonfly" || runtime.GOOS == "solaris": + b = bytes.Replace(b, []byte("package ipv6\n"), []byte("// +build "+runtime.GOOS+"\n\npackage ipv6\n"), 1) + case runtime.GOOS == "linux" && (runtime.GOARCH == "arm64" || runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le"): + b = bytes.Replace(b, []byte("package ipv6\n"), []byte("// +build "+runtime.GOOS+","+runtime.GOARCH+"\n\npackage ipv6\n"), 1) + } + b, err = format.Source(b) + if err != nil { + return err + } + zsys := "zsys_" + runtime.GOOS + ".go" + switch runtime.GOOS { + case "freebsd", "linux": + zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" + } + if err := ioutil.WriteFile(zsys, b, 0644); err != nil { + return err + } + return nil +} + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "http://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml", + parseICMPv6Parameters, + }, +} + +func geniana() error { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "package ipv6\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) + } + if err := r.parse(&bb, resp.Body); err != nil { + return err + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + return err + } + if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { + return err + } + return nil +} + +func parseICMPv6Parameters(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var icp icmpv6Parameters + if err := dec.Decode(&icp); err != nil { + return err + } + prs := icp.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Name, pr.Value) + fmt.Fprintf(w, "// %s\n", pr.OrigName) + } + fmt.Fprintf(w, ")\n\n") + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigName)) + } + fmt.Fprintf(w, "}\n") + return nil +} + +type icmpv6Parameters struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Registries []struct { + Title string `xml:"title"` + Records []struct { + Value string `xml:"value"` + Name string `xml:"name"` + } `xml:"record"` + } `xml:"registry"` +} + +type canonICMPv6ParamRecord struct { + OrigName string + Name string + Value int +} + +func (icp *icmpv6Parameters) escape() []canonICMPv6ParamRecord { + id := -1 + for i, r := range icp.Registries { + if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { + id = i + break + } + } + if id < 0 { + return nil + } + prs := make([]canonICMPv6ParamRecord, len(icp.Registries[id].Records)) + sr := strings.NewReplacer( + "Messages", "", + "Message", "", + "ICMP", "", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range icp.Registries[id].Records { + if strings.Contains(pr.Name, "Reserved") || + strings.Contains(pr.Name, "Unassigned") || + strings.Contains(pr.Name, "Deprecated") || + strings.Contains(pr.Name, "Experiment") || + strings.Contains(pr.Name, "experiment") { + continue + } + ss := strings.Split(pr.Name, "\n") + if len(ss) > 1 { + prs[i].Name = strings.Join(ss, " ") + } else { + prs[i].Name = ss[0] + } + s := strings.TrimSpace(prs[i].Name) + prs[i].OrigName = s + prs[i].Name = sr.Replace(s) + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/ipv6/genericopt_posix.go b/vendor/golang.org/x/net/ipv6/genericopt_posix.go new file mode 100644 index 00000000..dd77a016 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/genericopt_posix.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd windows + +package ipv6 + +import "syscall" + +// TrafficClass returns the traffic class field value for outgoing +// packets. +func (c *genericOpt) TrafficClass() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return 0, err + } + return getInt(fd, &sockOpts[ssoTrafficClass]) +} + +// SetTrafficClass sets the traffic class field value for future +// outgoing packets. +func (c *genericOpt) SetTrafficClass(tclass int) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInt(fd, &sockOpts[ssoTrafficClass], tclass) +} + +// HopLimit returns the hop limit field value for outgoing packets. +func (c *genericOpt) HopLimit() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return 0, err + } + return getInt(fd, &sockOpts[ssoHopLimit]) +} + +// SetHopLimit sets the hop limit field value for future outgoing +// packets. +func (c *genericOpt) SetHopLimit(hoplim int) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInt(fd, &sockOpts[ssoHopLimit], hoplim) +} diff --git a/vendor/golang.org/x/net/ipv6/genericopt_stub.go b/vendor/golang.org/x/net/ipv6/genericopt_stub.go new file mode 100644 index 00000000..f5c37224 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/genericopt_stub.go @@ -0,0 +1,30 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv6 + +// TrafficClass returns the traffic class field value for outgoing +// packets. +func (c *genericOpt) TrafficClass() (int, error) { + return 0, errOpNoSupport +} + +// SetTrafficClass sets the traffic class field value for future +// outgoing packets. +func (c *genericOpt) SetTrafficClass(tclass int) error { + return errOpNoSupport +} + +// HopLimit returns the hop limit field value for outgoing packets. +func (c *genericOpt) HopLimit() (int, error) { + return 0, errOpNoSupport +} + +// SetHopLimit sets the hop limit field value for future outgoing +// packets. +func (c *genericOpt) SetHopLimit(hoplim int) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/header.go b/vendor/golang.org/x/net/ipv6/header.go new file mode 100644 index 00000000..e05cb08b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/header.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "encoding/binary" + "fmt" + "net" +) + +const ( + Version = 6 // protocol version + HeaderLen = 40 // header length +) + +// A Header represents an IPv6 base header. +type Header struct { + Version int // protocol version + TrafficClass int // traffic class + FlowLabel int // flow label + PayloadLen int // payload length + NextHeader int // next header + HopLimit int // hop limit + Src net.IP // source address + Dst net.IP // destination address +} + +func (h *Header) String() string { + if h == nil { + return "<nil>" + } + return fmt.Sprintf("ver=%d tclass=%#x flowlbl=%#x payloadlen=%d nxthdr=%d hoplim=%d src=%v dst=%v", h.Version, h.TrafficClass, h.FlowLabel, h.PayloadLen, h.NextHeader, h.HopLimit, h.Src, h.Dst) +} + +// ParseHeader parses b as an IPv6 base header. +func ParseHeader(b []byte) (*Header, error) { + if len(b) < HeaderLen { + return nil, errHeaderTooShort + } + h := &Header{ + Version: int(b[0]) >> 4, + TrafficClass: int(b[0]&0x0f)<<4 | int(b[1])>>4, + FlowLabel: int(b[1]&0x0f)<<16 | int(b[2])<<8 | int(b[3]), + PayloadLen: int(binary.BigEndian.Uint16(b[4:6])), + NextHeader: int(b[6]), + HopLimit: int(b[7]), + } + h.Src = make(net.IP, net.IPv6len) + copy(h.Src, b[8:24]) + h.Dst = make(net.IP, net.IPv6len) + copy(h.Dst, b[24:40]) + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv6/header_test.go b/vendor/golang.org/x/net/ipv6/header_test.go new file mode 100644 index 00000000..ca11dc23 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/header_test.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "reflect" + "strings" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv6" +) + +var ( + wireHeaderFromKernel = [ipv6.HeaderLen]byte{ + 0x69, 0x8b, 0xee, 0xf1, + 0xca, 0xfe, 0x2c, 0x01, + 0x20, 0x01, 0x0d, 0xb8, + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, + 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + } + + testHeader = &ipv6.Header{ + Version: ipv6.Version, + TrafficClass: iana.DiffServAF43, + FlowLabel: 0xbeef1, + PayloadLen: 0xcafe, + NextHeader: iana.ProtocolIPv6Frag, + HopLimit: 1, + Src: net.ParseIP("2001:db8:1::1"), + Dst: net.ParseIP("2001:db8:2::1"), + } +) + +func TestParseHeader(t *testing.T) { + h, err := ipv6.ParseHeader(wireHeaderFromKernel[:]) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, testHeader) { + t.Fatalf("got %#v; want %#v", h, testHeader) + } + s := h.String() + if strings.Contains(s, ",") { + t.Fatalf("should be space-separated values: %s", s) + } +} diff --git a/vendor/golang.org/x/net/ipv6/helper.go b/vendor/golang.org/x/net/ipv6/helper.go new file mode 100644 index 00000000..53b99990 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/helper.go @@ -0,0 +1,53 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "encoding/binary" + "errors" + "net" + "unsafe" +) + +var ( + errMissingAddress = errors.New("missing address") + errHeaderTooShort = errors.New("header too short") + errInvalidConnType = errors.New("invalid conn type") + errOpNoSupport = errors.New("operation not supported") + errNoSuchInterface = errors.New("no such interface") + + nativeEndian binary.ByteOrder +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = binary.LittleEndian + } else { + nativeEndian = binary.BigEndian + } +} + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP16(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/helper_stub.go b/vendor/golang.org/x/net/ipv6/helper_stub.go new file mode 100644 index 00000000..20354ab2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/helper_stub.go @@ -0,0 +1,19 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv6 + +func (c *genericOpt) sysfd() (int, error) { + return 0, errOpNoSupport +} + +func (c *dgramOpt) sysfd() (int, error) { + return 0, errOpNoSupport +} + +func (c *payloadHandler) sysfd() (int, error) { + return 0, errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/helper_unix.go b/vendor/golang.org/x/net/ipv6/helper_unix.go new file mode 100644 index 00000000..92868ed2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/helper_unix.go @@ -0,0 +1,46 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package ipv6 + +import ( + "net" + "reflect" +) + +func (c *genericOpt) sysfd() (int, error) { + switch p := c.Conn.(type) { + case *net.TCPConn, *net.UDPConn, *net.IPConn: + return sysfd(p) + } + return 0, errInvalidConnType +} + +func (c *dgramOpt) sysfd() (int, error) { + switch p := c.PacketConn.(type) { + case *net.UDPConn, *net.IPConn: + return sysfd(p.(net.Conn)) + } + return 0, errInvalidConnType +} + +func (c *payloadHandler) sysfd() (int, error) { + return sysfd(c.PacketConn.(net.Conn)) +} + +func sysfd(c net.Conn) (int, error) { + cv := reflect.ValueOf(c) + switch ce := cv.Elem(); ce.Kind() { + case reflect.Struct: + nfd := ce.FieldByName("conn").FieldByName("fd") + switch fe := nfd.Elem(); fe.Kind() { + case reflect.Struct: + fd := fe.FieldByName("sysfd") + return int(fd.Int()), nil + } + } + return 0, errInvalidConnType +} diff --git a/vendor/golang.org/x/net/ipv6/helper_windows.go b/vendor/golang.org/x/net/ipv6/helper_windows.go new file mode 100644 index 00000000..28c401b5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/helper_windows.go @@ -0,0 +1,45 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "reflect" + "syscall" +) + +func (c *genericOpt) sysfd() (syscall.Handle, error) { + switch p := c.Conn.(type) { + case *net.TCPConn, *net.UDPConn, *net.IPConn: + return sysfd(p) + } + return syscall.InvalidHandle, errInvalidConnType +} + +func (c *dgramOpt) sysfd() (syscall.Handle, error) { + switch p := c.PacketConn.(type) { + case *net.UDPConn, *net.IPConn: + return sysfd(p.(net.Conn)) + } + return syscall.InvalidHandle, errInvalidConnType +} + +func (c *payloadHandler) sysfd() (syscall.Handle, error) { + return sysfd(c.PacketConn.(net.Conn)) +} + +func sysfd(c net.Conn) (syscall.Handle, error) { + cv := reflect.ValueOf(c) + switch ce := cv.Elem(); ce.Kind() { + case reflect.Struct: + netfd := ce.FieldByName("conn").FieldByName("fd") + switch fe := netfd.Elem(); fe.Kind() { + case reflect.Struct: + fd := fe.FieldByName("sysfd") + return syscall.Handle(fd.Uint()), nil + } + } + return syscall.InvalidHandle, errInvalidConnType +} diff --git a/vendor/golang.org/x/net/ipv6/iana.go b/vendor/golang.org/x/net/ipv6/iana.go new file mode 100644 index 00000000..3c6214fb --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/iana.go @@ -0,0 +1,82 @@ +// go generate gen.go +// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package ipv6 + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07 +const ( + ICMPTypeDestinationUnreachable ICMPType = 1 // Destination Unreachable + ICMPTypePacketTooBig ICMPType = 2 // Packet Too Big + ICMPTypeTimeExceeded ICMPType = 3 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 4 // Parameter Problem + ICMPTypeEchoRequest ICMPType = 128 // Echo Request + ICMPTypeEchoReply ICMPType = 129 // Echo Reply + ICMPTypeMulticastListenerQuery ICMPType = 130 // Multicast Listener Query + ICMPTypeMulticastListenerReport ICMPType = 131 // Multicast Listener Report + ICMPTypeMulticastListenerDone ICMPType = 132 // Multicast Listener Done + ICMPTypeRouterSolicitation ICMPType = 133 // Router Solicitation + ICMPTypeRouterAdvertisement ICMPType = 134 // Router Advertisement + ICMPTypeNeighborSolicitation ICMPType = 135 // Neighbor Solicitation + ICMPTypeNeighborAdvertisement ICMPType = 136 // Neighbor Advertisement + ICMPTypeRedirect ICMPType = 137 // Redirect Message + ICMPTypeRouterRenumbering ICMPType = 138 // Router Renumbering + ICMPTypeNodeInformationQuery ICMPType = 139 // ICMP Node Information Query + ICMPTypeNodeInformationResponse ICMPType = 140 // ICMP Node Information Response + ICMPTypeInverseNeighborDiscoverySolicitation ICMPType = 141 // Inverse Neighbor Discovery Solicitation Message + ICMPTypeInverseNeighborDiscoveryAdvertisement ICMPType = 142 // Inverse Neighbor Discovery Advertisement Message + ICMPTypeVersion2MulticastListenerReport ICMPType = 143 // Version 2 Multicast Listener Report + ICMPTypeHomeAgentAddressDiscoveryRequest ICMPType = 144 // Home Agent Address Discovery Request Message + ICMPTypeHomeAgentAddressDiscoveryReply ICMPType = 145 // Home Agent Address Discovery Reply Message + ICMPTypeMobilePrefixSolicitation ICMPType = 146 // Mobile Prefix Solicitation + ICMPTypeMobilePrefixAdvertisement ICMPType = 147 // Mobile Prefix Advertisement + ICMPTypeCertificationPathSolicitation ICMPType = 148 // Certification Path Solicitation Message + ICMPTypeCertificationPathAdvertisement ICMPType = 149 // Certification Path Advertisement Message + ICMPTypeMulticastRouterAdvertisement ICMPType = 151 // Multicast Router Advertisement + ICMPTypeMulticastRouterSolicitation ICMPType = 152 // Multicast Router Solicitation + ICMPTypeMulticastRouterTermination ICMPType = 153 // Multicast Router Termination + ICMPTypeFMIPv6 ICMPType = 154 // FMIPv6 Messages + ICMPTypeRPLControl ICMPType = 155 // RPL Control Message + ICMPTypeILNPv6LocatorUpdate ICMPType = 156 // ILNPv6 Locator Update Message + ICMPTypeDuplicateAddressRequest ICMPType = 157 // Duplicate Address Request + ICMPTypeDuplicateAddressConfirmation ICMPType = 158 // Duplicate Address Confirmation + ICMPTypeMPLControl ICMPType = 159 // MPL Control Message +) + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07 +var icmpTypes = map[ICMPType]string{ + 1: "destination unreachable", + 2: "packet too big", + 3: "time exceeded", + 4: "parameter problem", + 128: "echo request", + 129: "echo reply", + 130: "multicast listener query", + 131: "multicast listener report", + 132: "multicast listener done", + 133: "router solicitation", + 134: "router advertisement", + 135: "neighbor solicitation", + 136: "neighbor advertisement", + 137: "redirect message", + 138: "router renumbering", + 139: "icmp node information query", + 140: "icmp node information response", + 141: "inverse neighbor discovery solicitation message", + 142: "inverse neighbor discovery advertisement message", + 143: "version 2 multicast listener report", + 144: "home agent address discovery request message", + 145: "home agent address discovery reply message", + 146: "mobile prefix solicitation", + 147: "mobile prefix advertisement", + 148: "certification path solicitation message", + 149: "certification path advertisement message", + 151: "multicast router advertisement", + 152: "multicast router solicitation", + 153: "multicast router termination", + 154: "fmipv6 messages", + 155: "rpl control message", + 156: "ilnpv6 locator update message", + 157: "duplicate address request", + 158: "duplicate address confirmation", + 159: "mpl control message", +} diff --git a/vendor/golang.org/x/net/ipv6/icmp.go b/vendor/golang.org/x/net/ipv6/icmp.go new file mode 100644 index 00000000..a2de65a0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/iana" + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "<nil>" + } + return s +} + +// Protocol returns the ICMPv6 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolIPv6ICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 2460 defines a reasonable role model. A node means a +// device that implements IP. A router means a node that forwards IP +// packets not explicitly addressed to itself, and a host means a node +// that is not a router. +type ICMPFilter struct { + sysICMPv6Filter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/vendor/golang.org/x/net/ipv6/icmp_bsd.go new file mode 100644 index 00000000..30e3ce42 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_bsd.go @@ -0,0 +1,29 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package ipv6 + +func (f *sysICMPv6Filter) accept(typ ICMPType) { + f.Filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *sysICMPv6Filter) block(typ ICMPType) { + f.Filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *sysICMPv6Filter) setAll(block bool) { + for i := range f.Filt { + if block { + f.Filt[i] = 0 + } else { + f.Filt[i] = 1<<32 - 1 + } + } +} + +func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { + return f.Filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_linux.go b/vendor/golang.org/x/net/ipv6/icmp_linux.go new file mode 100644 index 00000000..a67ecf69 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_linux.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *sysICMPv6Filter) accept(typ ICMPType) { + f.Data[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *sysICMPv6Filter) block(typ ICMPType) { + f.Data[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *sysICMPv6Filter) setAll(block bool) { + for i := range f.Data { + if block { + f.Data[i] = 1<<32 - 1 + } else { + f.Data[i] = 0 + } + } +} + +func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { + return f.Data[typ>>5]&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_solaris.go b/vendor/golang.org/x/net/ipv6/icmp_solaris.go new file mode 100644 index 00000000..a942f354 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_solaris.go @@ -0,0 +1,24 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package ipv6 + +func (f *sysICMPv6Filter) accept(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *sysICMPv6Filter) block(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *sysICMPv6Filter) setAll(block bool) { + // TODO(mikio): implement this +} + +func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { + // TODO(mikio): implement this + return false +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_stub.go b/vendor/golang.org/x/net/ipv6/icmp_stub.go new file mode 100644 index 00000000..c1263eca --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package ipv6 + +type sysICMPv6Filter struct { +} + +func (f *sysICMPv6Filter) accept(typ ICMPType) { +} + +func (f *sysICMPv6Filter) block(typ ICMPType) { +} + +func (f *sysICMPv6Filter) setAll(block bool) { +} + +func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_test.go b/vendor/golang.org/x/net/ipv6/icmp_test.go new file mode 100644 index 00000000..e192d6d8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_test.go @@ -0,0 +1,96 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var icmpStringTests = []struct { + in ipv6.ICMPType + out string +}{ + {ipv6.ICMPTypeDestinationUnreachable, "destination unreachable"}, + + {256, "<nil>"}, +} + +func TestICMPString(t *testing.T) { + for _, tt := range icmpStringTests { + s := tt.in.String() + if s != tt.out { + t.Errorf("got %s; want %s", s, tt.out) + } + } +} + +func TestICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + var f ipv6.ICMPFilter + for _, toggle := range []bool{false, true} { + f.SetAll(toggle) + for _, typ := range []ipv6.ICMPType{ + ipv6.ICMPTypeDestinationUnreachable, + ipv6.ICMPTypeEchoReply, + ipv6.ICMPTypeNeighborSolicitation, + ipv6.ICMPTypeDuplicateAddressConfirmation, + } { + f.Accept(typ) + if f.WillBlock(typ) { + t.Errorf("ipv6.ICMPFilter.Set(%v, false) failed", typ) + } + f.Block(typ) + if !f.WillBlock(typ) { + t.Errorf("ipv6.ICMPFilter.Set(%v, true) failed", typ) + } + } + } +} + +func TestSetICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoRequest) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + kf, err := p.ICMPFilter() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(kf, &f) { + t.Fatalf("got %#v; want %#v", kf, f) + } +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_windows.go b/vendor/golang.org/x/net/ipv6/icmp_windows.go new file mode 100644 index 00000000..9dcfb810 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_windows.go @@ -0,0 +1,26 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +type sysICMPv6Filter struct { + // TODO(mikio): implement this +} + +func (f *sysICMPv6Filter) accept(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *sysICMPv6Filter) block(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *sysICMPv6Filter) setAll(block bool) { + // TODO(mikio): implement this +} + +func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { + // TODO(mikio): implement this + return false +} diff --git a/vendor/golang.org/x/net/ipv6/mocktransponder_test.go b/vendor/golang.org/x/net/ipv6/mocktransponder_test.go new file mode 100644 index 00000000..d587922a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/mocktransponder_test.go @@ -0,0 +1,32 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "testing" +) + +func connector(t *testing.T, network, addr string, done chan<- bool) { + defer func() { done <- true }() + + c, err := net.Dial(network, addr) + if err != nil { + t.Error(err) + return + } + c.Close() +} + +func acceptor(t *testing.T, ln net.Listener, done chan<- bool) { + defer func() { done <- true }() + + c, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + c.Close() +} diff --git a/vendor/golang.org/x/net/ipv6/multicast_test.go b/vendor/golang.org/x/net/ipv6/multicast_test.go new file mode 100644 index 00000000..a3a8979d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicast_test.go @@ -0,0 +1,260 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var packetConnReadWriteMulticastUDPTests = []struct { + addr string + grp, src *net.UDPAddr +}{ + {"[ff02::]:0", &net.UDPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + + {"[ff30::8000:0]:0", &net.UDPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastUDP(t *testing.T) { + switch runtime.GOOS { + case "freebsd": // due to a bug on loopback marking + // See http://www.freebsd.org/cgi/query-pr.cgi?pr=180065. + t.Skipf("not supported on %s", runtime.GOOS) + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastUDPTests { + c, err := net.ListenPacket("udp6", tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + grp := *tt.grp + grp.Port = c.LocalAddr().(*net.UDPAddr).Port + p := ipv6.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, &grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, &grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + IfIndex: ifi.Index, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + cm.HopLimit = i + 1 + if n, err := p.WriteTo(wb, &cm, &grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatal(err) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } + } +} + +var packetConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.IPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastICMP(t *testing.T) { + switch runtime.GOOS { + case "freebsd": // due to a bug on loopback marking + // See http://www.freebsd.org/cgi/query-pr.cgi?pr=180065. + t.Skipf("not supported on %s", runtime.GOOS) + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip6:ipv6-icmp", "::") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + pshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, tt.grp.IP) + p := ipv6.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, tt.grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + IfIndex: ifi.Index, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + + var psh []byte + for i, toggle := range []bool{true, false, true} { + if toggle { + psh = nil + if err := p.SetChecksum(true, 2); err != nil { + t.Fatal(err) + } + } else { + psh = pshicmp + // Some platforms never allow to + // disable the kernel checksum + // processing. + p.SetChecksum(false, -1) + } + wb, err := (&icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(psh) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + cm.HopLimit = i + 1 + if n, err := p.WriteTo(wb, &cm, tt.grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + if m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil { + t.Fatal(err) + } else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0) + } + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/multicastlistener_test.go b/vendor/golang.org/x/net/ipv6/multicastlistener_test.go new file mode 100644 index 00000000..9711f751 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicastlistener_test.go @@ -0,0 +1,246 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "fmt" + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var udpMultipleGroupListenerTests = []net.Addr{ + &net.UDPAddr{IP: net.ParseIP("ff02::114")}, // see RFC 4727 + &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}, + &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}, +} + +func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c, err := net.ListenPacket("udp6", "[::]:0") // wildcard address with non-reusable port + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } +} + +func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c1, err := net.ListenPacket("udp6", "[ff02::]:1024") // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c1.Close() + + c2, err := net.ListenPacket("udp6", "[ff02::]:1024") // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + var ps [2]*ipv6.PacketConn + ps[0] = ipv6.NewPacketConn(c1) + ps[1] = ipv6.NewPacketConn(c2) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + for _, p := range ps { + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + for _, p := range ps { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } + } +} + +func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + type ml struct { + c *ipv6.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip6", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("udp6", fmt.Sprintf("[%s%%%s]:1024", ip.String(), ifi.Name)) // unicast address with non-reusable port + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::") // wildcard address + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "darwin", "dragonfly", "openbsd": // platforms that return fe80::1%lo0: bind: can't assign requested address + t.Skipf("not supported on %s", runtime.GOOS) + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + type ml struct { + c *ipv6.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip6", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("ip6:ipv6-icmp", fmt.Sprintf("%s%%%s", ip.String(), ifi.Name)) // unicast address + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go b/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go new file mode 100644 index 00000000..fe0e6e1b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go @@ -0,0 +1,157 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var packetConnMulticastSocketOptionTests = []struct { + net, proto, addr string + grp, src net.Addr +}{ + {"udp6", "", "[ff02::]:0", &net.UDPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + {"ip6", ":ipv6-icmp", "::", &net.IPAddr{IP: net.ParseIP("ff02::115")}, nil}, // see RFC 4727 + + {"udp6", "", "[ff30::8000:0]:0", &net.UDPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771 + {"ip6", ":ipv6-icmp", "::", &net.IPAddr{IP: net.ParseIP("ff30::8000:2")}, &net.IPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnMulticastSocketOptionTests { + if tt.net == "ip6" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, p, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src) + } + } +} + +type testIPv6MulticastConn interface { + MulticastHopLimit() (int, error) + SetMulticastHopLimit(ttl int) error + MulticastLoopback() (bool, error) + SetMulticastLoopback(bool) error + JoinGroup(*net.Interface, net.Addr) error + LeaveGroup(*net.Interface, net.Addr) error + JoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + LeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + ExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + IncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error +} + +func testMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp net.Addr) { + const hoplim = 255 + if err := c.SetMulticastHopLimit(hoplim); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastHopLimit(); err != nil { + t.Error(err) + return + } else if v != hoplim { + t.Errorf("got %v; want %v", v, hoplim) + return + } + + for _, toggle := range []bool{true, false} { + if err := c.SetMulticastLoopback(toggle); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastLoopback(); err != nil { + t.Error(err) + return + } else if v != toggle { + t.Errorf("got %v; want %v", v, toggle) + return + } + } + + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} + +func testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp, src net.Addr) { + // MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + return + } + t.Error(err) + return + } + if err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} diff --git a/vendor/golang.org/x/net/ipv6/payload.go b/vendor/golang.org/x/net/ipv6/payload.go new file mode 100644 index 00000000..529b20bc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload.go @@ -0,0 +1,15 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "net" + +// A payloadHandler represents the IPv6 datagram payload handler. +type payloadHandler struct { + net.PacketConn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil } diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go new file mode 100644 index 00000000..8e90d324 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -0,0 +1,70 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !nacl,!plan9,!windows + +package ipv6 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + oob := newControlMessage(&c.rawOpt) + var oobn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, oobn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + if n, oobn, _, src, err = c.ReadMsgIP(b, oob); err != nil { + return 0, nil, nil, err + } + default: + return 0, nil, nil, errInvalidConnType + } + if cm, err = parseControlMessage(oob[:oobn]); err != nil { + return 0, nil, nil, err + } + if cm != nil { + cm.Src = netAddrToIP16(src) + } + return +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + oob := marshalControlMessage(cm) + if dst == nil { + return 0, errMissingAddress + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, errInvalidConnType + } + if err != nil { + return 0, err + } + return +} diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go new file mode 100644 index 00000000..499204d0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -0,0 +1,41 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 windows + +package ipv6 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv6/readwrite_test.go b/vendor/golang.org/x/net/ipv6/readwrite_test.go new file mode 100644 index 00000000..8c8c6fde --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/readwrite_test.go @@ -0,0 +1,189 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func benchmarkUDPListener() (net.PacketConn, net.Addr, error) { + c, err := net.ListenPacket("udp6", "[::1]:0") + if err != nil { + return nil, nil, err + } + dst, err := net.ResolveUDPAddr("udp6", c.LocalAddr().String()) + if err != nil { + c.Close() + return nil, nil, err + } + return c, dst, nil +} + +func BenchmarkReadWriteNetUDP(b *testing.B) { + if !supportsIPv6 { + b.Skip("ipv6 is not supported") + } + + c, dst, err := benchmarkUDPListener() + if err != nil { + b.Fatal(err) + } + defer c.Close() + + wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchmarkReadWriteNetUDP(b, c, wb, rb, dst) + } +} + +func benchmarkReadWriteNetUDP(b *testing.B, c net.PacketConn, wb, rb []byte, dst net.Addr) { + if _, err := c.WriteTo(wb, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(rb); err != nil { + b.Fatal(err) + } +} + +func BenchmarkReadWriteIPv6UDP(b *testing.B) { + if !supportsIPv6 { + b.Skip("ipv6 is not supported") + } + + c, dst, err := benchmarkUDPListener() + if err != nil { + b.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + + wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchmarkReadWriteIPv6UDP(b, p, wb, rb, dst, ifi) + } +} + +func benchmarkReadWriteIPv6UDP(b *testing.B, p *ipv6.PacketConn, wb, rb []byte, dst net.Addr, ifi *net.Interface) { + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + b.Fatal(err) + } else if n != len(wb) { + b.Fatalf("got %v; want %v", n, len(wb)) + } + if _, _, _, err := p.ReadFrom(rb); err != nil { + b.Fatal(err) + } +} + +func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + c, err := net.ListenPacket("udp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst, err := net.ResolveUDPAddr("udp6", c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + wb := []byte("HELLO-R-U-THERE") + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + rb := make([]byte, 128) + if n, cm, _, err := p.ReadFrom(rb); err != nil { + t.Error(err) + return + } else if !bytes.Equal(rb[:n], wb) { + t.Errorf("got %v; want %v", rb[:n], wb) + return + } else { + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + } + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Error(err) + return + } else if n != len(wb) { + t.Errorf("got %v; want %v", n, len(wb)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt.go b/vendor/golang.org/x/net/ipv6/sockopt.go new file mode 100644 index 00000000..f0cfc2f9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt.go @@ -0,0 +1,46 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +// Sticky socket options +const ( + ssoTrafficClass = iota // header field for unicast packet, RFC 3542 + ssoHopLimit // header field for unicast packet, RFC 3493 + ssoMulticastInterface // outbound interface for multicast packet, RFC 3493 + ssoMulticastHopLimit // header field for multicast packet, RFC 3493 + ssoMulticastLoopback // loopback for multicast packet, RFC 3493 + ssoReceiveTrafficClass // header field on received packet, RFC 3542 + ssoReceiveHopLimit // header field on received packet, RFC 2292 or 3542 + ssoReceivePacketInfo // incbound or outbound packet path, RFC 2292 or 3542 + ssoReceivePathMTU // path mtu, RFC 3542 + ssoPathMTU // path mtu, RFC 3542 + ssoChecksum // packet checksum, RFC 2292 or 3542 + ssoICMPFilter // icmp filter, RFC 2292 or 3542 + ssoJoinGroup // any-source multicast, RFC 3493 + ssoLeaveGroup // any-source multicast, RFC 3493 + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoMax +) + +// Sticky socket option value types +const ( + ssoTypeInt = iota + 1 + ssoTypeInterface + ssoTypeICMPFilter + ssoTypeMTUInfo + ssoTypeIPMreq + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + level int // option level + name int // option name, must be equal or greater than 1 + typ int // option value type, must be equal or greater than 1 +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_asmreq_unix.go b/vendor/golang.org/x/net/ipv6/sockopt_asmreq_unix.go new file mode 100644 index 00000000..b7fd4fe6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_asmreq_unix.go @@ -0,0 +1,22 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package ipv6 + +import ( + "net" + "os" + "unsafe" +) + +func setsockoptIPMreq(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { + var mreq sysIPv6Mreq + copy(mreq.Multiaddr[:], grp) + if ifi != nil { + mreq.setIfindex(ifi.Index) + } + return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&mreq), sysSizeofIPv6Mreq)) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_asmreq_windows.go b/vendor/golang.org/x/net/ipv6/sockopt_asmreq_windows.go new file mode 100644 index 00000000..c03c7313 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_asmreq_windows.go @@ -0,0 +1,21 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "os" + "syscall" + "unsafe" +) + +func setsockoptIPMreq(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp net.IP) error { + var mreq sysIPv6Mreq + copy(mreq.Multiaddr[:], grp) + if ifi != nil { + mreq.setIfindex(ifi.Index) + } + return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&mreq)), sysSizeofIPv6Mreq)) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go new file mode 100644 index 00000000..7732e49f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go @@ -0,0 +1,17 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux + +package ipv6 + +import "net" + +func setsockoptGroupReq(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func setsockoptGroupSourceReq(fd int, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go b/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go new file mode 100644 index 00000000..a36a7e03 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go @@ -0,0 +1,59 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux + +package ipv6 + +import ( + "net" + "os" + "unsafe" +) + +var freebsd32o64 bool + +func setsockoptGroupReq(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { + var gr sysGroupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var p unsafe.Pointer + var l uint32 + if freebsd32o64 { + var d [sysSizeofGroupReq + 4]byte + s := (*[sysSizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + p = unsafe.Pointer(&d[0]) + l = sysSizeofGroupReq + 4 + } else { + p = unsafe.Pointer(&gr) + l = sysSizeofGroupReq + } + return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, p, l)) +} + +func setsockoptGroupSourceReq(fd int, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { + var gsr sysGroupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var p unsafe.Pointer + var l uint32 + if freebsd32o64 { + var d [sysSizeofGroupSourceReq + 4]byte + s := (*[sysSizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + p = unsafe.Pointer(&d[0]) + l = sysSizeofGroupSourceReq + 4 + } else { + p = unsafe.Pointer(&gsr) + l = sysSizeofGroupSourceReq + } + return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, p, l)) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_stub.go new file mode 100644 index 00000000..b8dacfde --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -0,0 +1,13 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv6 + +import "net" + +func getMTUInfo(fd int, opt *sockOpt) (*net.Interface, int, error) { + return nil, 0, errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_test.go b/vendor/golang.org/x/net/ipv6/sockopt_test.go new file mode 100644 index 00000000..9c219031 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_test.go @@ -0,0 +1,133 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "fmt" + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var supportsIPv6 bool = nettest.SupportsIPv6() + +func TestConnInitiatorPathMTU(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + done := make(chan bool) + go acceptor(t, ln, done) + + c, err := net.Dial("tcp6", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + if pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_PATHMTU option + t.Logf("not supported on %s", runtime.GOOS) + default: + t.Fatal(err) + } + } else { + t.Logf("path mtu for %v: %v", c.RemoteAddr(), pmtu) + } + + <-done +} + +func TestConnResponderPathMTU(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + done := make(chan bool) + go connector(t, "tcp6", ln.Addr().String(), done) + + c, err := ln.Accept() + if err != nil { + t.Fatal(err) + } + defer c.Close() + + if pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_PATHMTU option + t.Logf("not supported on %s", runtime.GOOS) + default: + t.Fatal(err) + } + } else { + t.Logf("path mtu for %v: %v", c.RemoteAddr(), pmtu) + } + + <-done +} + +func TestPacketConnChecksum(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolOSPFIGP), "::") // OSPF for IPv6 + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + offset := 12 // see RFC 5340 + + for _, toggle := range []bool{false, true} { + if err := p.SetChecksum(toggle, offset); err != nil { + if toggle { + t.Fatalf("ipv6.PacketConn.SetChecksum(%v, %v) failed: %v", toggle, offset, err) + } else { + // Some platforms never allow to disable the kernel + // checksum processing. + t.Logf("ipv6.PacketConn.SetChecksum(%v, %v) failed: %v", toggle, offset, err) + } + } + if on, offset, err := p.Checksum(); err != nil { + t.Fatal(err) + } else { + t.Logf("kernel checksum processing enabled=%v, offset=%v", on, offset) + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_unix.go b/vendor/golang.org/x/net/ipv6/sockopt_unix.go new file mode 100644 index 00000000..7115b18e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_unix.go @@ -0,0 +1,122 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package ipv6 + +import ( + "net" + "os" + "unsafe" +) + +func getInt(fd int, opt *sockOpt) (int, error) { + if opt.name < 1 || opt.typ != ssoTypeInt { + return 0, errOpNoSupport + } + var i int32 + l := uint32(4) + if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), &l); err != nil { + return 0, os.NewSyscallError("getsockopt", err) + } + return int(i), nil +} + +func setInt(fd int, opt *sockOpt, v int) error { + if opt.name < 1 || opt.typ != ssoTypeInt { + return errOpNoSupport + } + i := int32(v) + return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), 4)) +} + +func getInterface(fd int, opt *sockOpt) (*net.Interface, error) { + if opt.name < 1 || opt.typ != ssoTypeInterface { + return nil, errOpNoSupport + } + var i int32 + l := uint32(4) + if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), &l); err != nil { + return nil, os.NewSyscallError("getsockopt", err) + } + if i == 0 { + return nil, nil + } + ifi, err := net.InterfaceByIndex(int(i)) + if err != nil { + return nil, err + } + return ifi, nil +} + +func setInterface(fd int, opt *sockOpt, ifi *net.Interface) error { + if opt.name < 1 || opt.typ != ssoTypeInterface { + return errOpNoSupport + } + var i int32 + if ifi != nil { + i = int32(ifi.Index) + } + return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), 4)) +} + +func getICMPFilter(fd int, opt *sockOpt) (*ICMPFilter, error) { + if opt.name < 1 || opt.typ != ssoTypeICMPFilter { + return nil, errOpNoSupport + } + var f ICMPFilter + l := uint32(sysSizeofICMPv6Filter) + if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&f.sysICMPv6Filter), &l); err != nil { + return nil, os.NewSyscallError("getsockopt", err) + } + return &f, nil +} + +func setICMPFilter(fd int, opt *sockOpt, f *ICMPFilter) error { + if opt.name < 1 || opt.typ != ssoTypeICMPFilter { + return errOpNoSupport + } + return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&f.sysICMPv6Filter), sysSizeofICMPv6Filter)) +} + +func getMTUInfo(fd int, opt *sockOpt) (*net.Interface, int, error) { + if opt.name < 1 || opt.typ != ssoTypeMTUInfo { + return nil, 0, errOpNoSupport + } + var mi sysIPv6Mtuinfo + l := uint32(sysSizeofIPv6Mtuinfo) + if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&mi), &l); err != nil { + return nil, 0, os.NewSyscallError("getsockopt", err) + } + if mi.Addr.Scope_id == 0 { + return nil, int(mi.Mtu), nil + } + ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id)) + if err != nil { + return nil, 0, err + } + return ifi, int(mi.Mtu), nil +} + +func setGroup(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { + if opt.name < 1 { + return errOpNoSupport + } + switch opt.typ { + case ssoTypeIPMreq: + return setsockoptIPMreq(fd, opt, ifi, grp) + case ssoTypeGroupReq: + return setsockoptGroupReq(fd, opt, ifi, grp) + default: + return errOpNoSupport + } +} + +func setSourceGroup(fd int, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { + if opt.name < 1 || opt.typ != ssoTypeGroupSourceReq { + return errOpNoSupport + } + return setsockoptGroupSourceReq(fd, opt, ifi, grp, src) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_windows.go b/vendor/golang.org/x/net/ipv6/sockopt_windows.go new file mode 100644 index 00000000..32c73b72 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_windows.go @@ -0,0 +1,86 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "os" + "syscall" + "unsafe" +) + +func getInt(fd syscall.Handle, opt *sockOpt) (int, error) { + if opt.name < 1 || opt.typ != ssoTypeInt { + return 0, errOpNoSupport + } + var i int32 + l := int32(4) + if err := syscall.Getsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&i)), &l); err != nil { + return 0, os.NewSyscallError("getsockopt", err) + } + return int(i), nil +} + +func setInt(fd syscall.Handle, opt *sockOpt, v int) error { + if opt.name < 1 || opt.typ != ssoTypeInt { + return errOpNoSupport + } + i := int32(v) + return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&i)), 4)) +} + +func getInterface(fd syscall.Handle, opt *sockOpt) (*net.Interface, error) { + if opt.name < 1 || opt.typ != ssoTypeInterface { + return nil, errOpNoSupport + } + var i int32 + l := int32(4) + if err := syscall.Getsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&i)), &l); err != nil { + return nil, os.NewSyscallError("getsockopt", err) + } + if i == 0 { + return nil, nil + } + ifi, err := net.InterfaceByIndex(int(i)) + if err != nil { + return nil, err + } + return ifi, nil +} + +func setInterface(fd syscall.Handle, opt *sockOpt, ifi *net.Interface) error { + if opt.name < 1 || opt.typ != ssoTypeInterface { + return errOpNoSupport + } + var i int32 + if ifi != nil { + i = int32(ifi.Index) + } + return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&i)), 4)) +} + +func getICMPFilter(fd syscall.Handle, opt *sockOpt) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func setICMPFilter(fd syscall.Handle, opt *sockOpt, f *ICMPFilter) error { + return errOpNoSupport +} + +func getMTUInfo(fd syscall.Handle, opt *sockOpt) (*net.Interface, int, error) { + return nil, 0, errOpNoSupport +} + +func setGroup(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp net.IP) error { + if opt.name < 1 || opt.typ != ssoTypeIPMreq { + return errOpNoSupport + } + return setsockoptIPMreq(fd, opt, ifi, grp) +} + +func setSourceGroup(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { + // TODO(mikio): implement this + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bsd.go b/vendor/golang.org/x/net/ipv6/sys_bsd.go new file mode 100644 index 00000000..0ee43e6d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bsd.go @@ -0,0 +1,56 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly netbsd openbsd + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sysSizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sysSizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sysSizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoTrafficClass: {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt}, + ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, + ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, + ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, + ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt}, + ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt}, + ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt}, + ssoReceivePathMTU: {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt}, + ssoPathMTU: {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo}, + ssoChecksum: {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt}, + ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter}, + ssoJoinGroup: {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq}, + ssoLeaveGroup: {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq}, + } +) + +func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *sysInet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *sysIPv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_darwin.go b/vendor/golang.org/x/net/ipv6/sys_darwin.go new file mode 100644 index 00000000..c263f08d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_darwin.go @@ -0,0 +1,133 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlHopLimit: {sysIPV6_2292HOPLIMIT, 4, marshal2292HopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_2292PKTINFO, sysSizeofInet6Pktinfo, marshal2292PacketInfo, parsePacketInfo}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, + ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, + ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, + ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_2292HOPLIMIT, ssoTypeInt}, + ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_2292PKTINFO, ssoTypeInt}, + ssoChecksum: {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt}, + ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter}, + ssoJoinGroup: {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq}, + ssoLeaveGroup: {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq}, + } +) + +func init() { + // Seems like kern.osreldate is veiled on latest OS X. We use + // kern.osrelease instead. + osver, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + var i int + for i = range osver { + if osver[i] == '.' { + break + } + } + // The IP_PKTINFO and protocol-independent multicast API were + // introduced in OS X 10.7 (Darwin 11.0.0). But it looks like + // those features require OS X 10.8 (Darwin 12.0.0) and above. + // See http://support.apple.com/kb/HT1633. + if i > 2 || i == 2 && osver[0] >= '1' && osver[1] >= '2' { + ctlOpts[ctlTrafficClass].name = sysIPV6_TCLASS + ctlOpts[ctlTrafficClass].length = 4 + ctlOpts[ctlTrafficClass].marshal = marshalTrafficClass + ctlOpts[ctlTrafficClass].parse = parseTrafficClass + ctlOpts[ctlHopLimit].name = sysIPV6_HOPLIMIT + ctlOpts[ctlHopLimit].marshal = marshalHopLimit + ctlOpts[ctlPacketInfo].name = sysIPV6_PKTINFO + ctlOpts[ctlPacketInfo].marshal = marshalPacketInfo + ctlOpts[ctlNextHop].name = sysIPV6_NEXTHOP + ctlOpts[ctlNextHop].length = sysSizeofSockaddrInet6 + ctlOpts[ctlNextHop].marshal = marshalNextHop + ctlOpts[ctlNextHop].parse = parseNextHop + ctlOpts[ctlPathMTU].name = sysIPV6_PATHMTU + ctlOpts[ctlPathMTU].length = sysSizeofIPv6Mtuinfo + ctlOpts[ctlPathMTU].marshal = marshalPathMTU + ctlOpts[ctlPathMTU].parse = parsePathMTU + sockOpts[ssoTrafficClass].level = iana.ProtocolIPv6 + sockOpts[ssoTrafficClass].name = sysIPV6_TCLASS + sockOpts[ssoTrafficClass].typ = ssoTypeInt + sockOpts[ssoReceiveTrafficClass].level = iana.ProtocolIPv6 + sockOpts[ssoReceiveTrafficClass].name = sysIPV6_RECVTCLASS + sockOpts[ssoReceiveTrafficClass].typ = ssoTypeInt + sockOpts[ssoReceiveHopLimit].name = sysIPV6_RECVHOPLIMIT + sockOpts[ssoReceivePacketInfo].name = sysIPV6_RECVPKTINFO + sockOpts[ssoReceivePathMTU].level = iana.ProtocolIPv6 + sockOpts[ssoReceivePathMTU].name = sysIPV6_RECVPATHMTU + sockOpts[ssoReceivePathMTU].typ = ssoTypeInt + sockOpts[ssoPathMTU].level = iana.ProtocolIPv6 + sockOpts[ssoPathMTU].name = sysIPV6_PATHMTU + sockOpts[ssoPathMTU].typ = ssoTypeMTUInfo + sockOpts[ssoJoinGroup].name = sysMCAST_JOIN_GROUP + sockOpts[ssoJoinGroup].typ = ssoTypeGroupReq + sockOpts[ssoLeaveGroup].name = sysMCAST_LEAVE_GROUP + sockOpts[ssoLeaveGroup].typ = ssoTypeGroupReq + sockOpts[ssoJoinSourceGroup].level = iana.ProtocolIPv6 + sockOpts[ssoJoinSourceGroup].name = sysMCAST_JOIN_SOURCE_GROUP + sockOpts[ssoJoinSourceGroup].typ = ssoTypeGroupSourceReq + sockOpts[ssoLeaveSourceGroup].level = iana.ProtocolIPv6 + sockOpts[ssoLeaveSourceGroup].name = sysMCAST_LEAVE_SOURCE_GROUP + sockOpts[ssoLeaveSourceGroup].typ = ssoTypeGroupSourceReq + sockOpts[ssoBlockSourceGroup].level = iana.ProtocolIPv6 + sockOpts[ssoBlockSourceGroup].name = sysMCAST_BLOCK_SOURCE + sockOpts[ssoBlockSourceGroup].typ = ssoTypeGroupSourceReq + sockOpts[ssoUnblockSourceGroup].level = iana.ProtocolIPv6 + sockOpts[ssoUnblockSourceGroup].name = sysMCAST_UNBLOCK_SOURCE + sockOpts[ssoUnblockSourceGroup].typ = ssoTypeGroupSourceReq + } +} + +func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *sysInet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *sysIPv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *sysGroupReq) setGroup(grp net.IP) { + sa := (*sysSockaddrInet6)(unsafe.Pointer(&gr.Pad_cgo_0[0])) + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Pad_cgo_0[0])) + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Pad_cgo_1[0])) + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_freebsd.go b/vendor/golang.org/x/net/ipv6/sys_freebsd.go new file mode 100644 index 00000000..5527001f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_freebsd.go @@ -0,0 +1,91 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sysSizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sysSizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sysSizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoTrafficClass: {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt}, + ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, + ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, + ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, + ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt}, + ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt}, + ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt}, + ssoReceivePathMTU: {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt}, + ssoPathMTU: {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo}, + ssoChecksum: {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt}, + ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter}, + ssoJoinGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_GROUP, ssoTypeGroupReq}, + ssoLeaveGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}, + ssoJoinSourceGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {iana.ProtocolIPv6, sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {iana.ProtocolIPv6, sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}, + } +) + +func init() { + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + freebsd32o64 = true + break + } + } + } +} + +func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *sysInet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *sysIPv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *sysGroupReq) setGroup(grp net.IP) { + sa := (*sysSockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_linux.go b/vendor/golang.org/x/net/ipv6/sys_linux.go new file mode 100644 index 00000000..fd7d5b18 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_linux.go @@ -0,0 +1,72 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sysSizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlPathMTU: {sysIPV6_PATHMTU, sysSizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoTrafficClass: {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt}, + ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, + ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, + ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, + ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt}, + ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt}, + ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt}, + ssoReceivePathMTU: {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt}, + ssoPathMTU: {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo}, + ssoChecksum: {iana.ProtocolReserved, sysIPV6_CHECKSUM, ssoTypeInt}, + ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMPV6_FILTER, ssoTypeICMPFilter}, + ssoJoinGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_GROUP, ssoTypeGroupReq}, + ssoLeaveGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}, + ssoJoinSourceGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {iana.ProtocolIPv6, sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {iana.ProtocolIPv6, sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}, + } +) + +func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *sysInet6Pktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (mreq *sysIPv6Mreq) setIfindex(i int) { + mreq.Ifindex = int32(i) +} + +func (gr *sysGroupReq) setGroup(grp net.IP) { + sa := (*sysSockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_stub.go b/vendor/golang.org/x/net/ipv6/sys_stub.go new file mode 100644 index 00000000..ead0f4d1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv6 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = [ssoMax]sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv6/sys_windows.go b/vendor/golang.org/x/net/ipv6/sys_windows.go new file mode 100644 index 00000000..fda87573 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_windows.go @@ -0,0 +1,63 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" +) + +const ( + // See ws2tcpip.h. + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PKTINFO = 0x13 + + sysSizeofSockaddrInet6 = 0x1c + + sysSizeofIPv6Mreq = 0x14 +) + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = [ssoMax]sockOpt{ + ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, + ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, + ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, + ssoJoinGroup: {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq}, + ssoLeaveGroup: {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq}, + } +) + +func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (mreq *sysIPv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/syscall_linux_386.go b/vendor/golang.org/x/net/ipv6/syscall_linux_386.go new file mode 100644 index 00000000..64a3c665 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/syscall_linux_386.go @@ -0,0 +1,31 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "syscall" + "unsafe" +) + +const ( + sysGETSOCKOPT = 0xf + sysSETSOCKOPT = 0xe +) + +func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (int, syscall.Errno) + +func getsockopt(fd, level, name int, v unsafe.Pointer, l *uint32) error { + if _, errno := socketcall(sysGETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { + return error(errno) + } + return nil +} + +func setsockopt(fd, level, name int, v unsafe.Pointer, l uint32) error { + if _, errno := socketcall(sysSETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { + return error(errno) + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/syscall_unix.go b/vendor/golang.org/x/net/ipv6/syscall_unix.go new file mode 100644 index 00000000..925fd2fb --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/syscall_unix.go @@ -0,0 +1,26 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!386 netbsd openbsd + +package ipv6 + +import ( + "syscall" + "unsafe" +) + +func getsockopt(fd, level, name int, v unsafe.Pointer, l *uint32) error { + if _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { + return error(errno) + } + return nil +} + +func setsockopt(fd, level, name int, v unsafe.Pointer, l uint32) error { + if _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { + return error(errno) + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/thunk_linux_386.s b/vendor/golang.org/x/net/ipv6/thunk_linux_386.s new file mode 100644 index 00000000..daa78bc0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/thunk_linux_386.s @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.2 + +TEXT ·socketcall(SB),4,$0-36 + JMP syscall·socketcall(SB) diff --git a/vendor/golang.org/x/net/ipv6/unicast_test.go b/vendor/golang.org/x/net/ipv6/unicast_test.go new file mode 100644 index 00000000..db5b08a2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/unicast_test.go @@ -0,0 +1,182 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func TestPacketConnReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + c, err := net.ListenPacket("udp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst, err := net.ResolveUDPAddr("udp6", c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + cm.HopLimit = i + 1 + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } +} + +func TestPacketConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst, err := net.ResolveIPAddr("ip6", "::1") + if err != nil { + t.Fatal(err) + } + + pshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, dst.IP) + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + + var psh []byte + for i, toggle := range []bool{true, false, true} { + if toggle { + psh = nil + if err := p.SetChecksum(true, 2); err != nil { + t.Fatal(err) + } + } else { + psh = pshicmp + // Some platforms never allow to disable the + // kernel checksum processing. + p.SetChecksum(false, -1) + } + wb, err := (&icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(psh) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + cm.HopLimit = i + 1 + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + if m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil { + t.Fatal(err) + } else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0) + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go b/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go new file mode 100644 index 00000000..7bb2e440 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go @@ -0,0 +1,111 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func TestConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + done := make(chan bool) + go acceptor(t, ln, done) + + c, err := net.Dial("tcp6", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv6.NewConn(c)) + + <-done +} + +var packetConnUnicastSocketOptionTests = []struct { + net, proto, addr string +}{ + {"udp6", "", "[::1]:0"}, + {"ip6", ":ipv6-icmp", "::1"}, +} + +func TestPacketConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnUnicastSocketOptionTests { + if tt.net == "ip6" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv6.NewPacketConn(c)) + } +} + +type testIPv6UnicastConn interface { + TrafficClass() (int, error) + SetTrafficClass(int) error + HopLimit() (int, error) + SetHopLimit(int) error +} + +func testUnicastSocketOptions(t *testing.T, c testIPv6UnicastConn) { + tclass := iana.DiffServCS0 | iana.NotECNTransport + if err := c.SetTrafficClass(tclass); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_TCLASS option + t.Logf("not supported on %s", runtime.GOOS) + goto next + } + t.Fatal(err) + } + if v, err := c.TrafficClass(); err != nil { + t.Fatal(err) + } else if v != tclass { + t.Fatalf("got %v; want %v", v, tclass) + } + +next: + hoplim := 255 + if err := c.SetHopLimit(hoplim); err != nil { + t.Fatal(err) + } + if v, err := c.HopLimit(); err != nil { + t.Fatal(err) + } else if v != hoplim { + t.Fatalf("got %v; want %v", v, hoplim) + } +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_darwin.go b/vendor/golang.org/x/net/ipv6/zsys_darwin.go new file mode 100644 index 00000000..cb044b03 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_darwin.go @@ -0,0 +1,131 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + sysIPV6_2292PKTINFO = 0x13 + sysIPV6_2292HOPLIMIT = 0x14 + sysIPV6_2292NEXTHOP = 0x15 + sysIPV6_2292HOPOPTS = 0x16 + sysIPV6_2292DSTOPTS = 0x17 + sysIPV6_2292RTHDR = 0x18 + + sysIPV6_2292PKTOPTIONS = 0x19 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RECVTCLASS = 0x23 + sysIPV6_TCLASS = 0x24 + + sysIPV6_RTHDRDSTOPTS = 0x39 + + sysIPV6_RECVPKTINFO = 0x3d + + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_MSFILTER = 0x4a + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_BOUND_IF = 0x7d + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sysSizeofSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysSockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sysSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type sysICMPv6Filter struct { + Filt [8]uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go new file mode 100644 index 00000000..5a03ab73 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go @@ -0,0 +1,90 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +// +build dragonfly + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + + sysSizeofIPv6Mreq = 0x14 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type sysICMPv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go new file mode 100644 index 00000000..4ace96f0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go @@ -0,0 +1,122 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sysSizeofSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysSockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sysSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type sysGroupReq struct { + Interface uint32 + Group sysSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Group sysSockaddrStorage + Source sysSockaddrStorage +} + +type sysICMPv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go new file mode 100644 index 00000000..4a62c2d5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go @@ -0,0 +1,124 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sysSizeofSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysSockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sysSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysSockaddrStorage + Source sysSockaddrStorage +} + +type sysICMPv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go new file mode 100644 index 00000000..4a62c2d5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go @@ -0,0 +1,124 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sysSizeofSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysSockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sysSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysSockaddrStorage + Source sysSockaddrStorage +} + +type sysICMPv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_386.go b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go new file mode 100644 index 00000000..27279292 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go @@ -0,0 +1,152 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go new file mode 100644 index 00000000..2f742e95 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go @@ -0,0 +1,154 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go new file mode 100644 index 00000000..27279292 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go @@ -0,0 +1,152 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go new file mode 100644 index 00000000..ab104645 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go @@ -0,0 +1,156 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,arm64 + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go new file mode 100644 index 00000000..ec8ce157 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go @@ -0,0 +1,156 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,mips64 + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go new file mode 100644 index 00000000..2341ae67 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go @@ -0,0 +1,156 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,mips64le + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go new file mode 100644 index 00000000..b99b8a51 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go @@ -0,0 +1,156 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,ppc64 + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go new file mode 100644 index 00000000..992b56e2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go @@ -0,0 +1,156 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,ppc64le + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_netbsd.go b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go new file mode 100644 index 00000000..d6ec88e3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go @@ -0,0 +1,84 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + + sysSizeofIPv6Mreq = 0x14 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type sysICMPv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_openbsd.go b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go new file mode 100644 index 00000000..3e080b78 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go @@ -0,0 +1,93 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTH_LEVEL = 0x35 + sysIPV6_ESP_TRANS_LEVEL = 0x36 + sysIPV6_ESP_NETWORK_LEVEL = 0x37 + sysIPSEC6_OUTSA = 0x38 + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + sysIPV6_IPCOMP_LEVEL = 0x3c + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + sysIPV6_PIPEX = 0x3f + + sysIPV6_RTABLE = 0x1021 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + + sysSizeofIPv6Mreq = 0x14 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type sysICMPv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_solaris.go b/vendor/golang.org/x/net/ipv6/zsys_solaris.go new file mode 100644 index 00000000..cdf00c25 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_solaris.go @@ -0,0 +1,105 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +// +build solaris + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x5 + sysIPV6_MULTICAST_IF = 0x6 + sysIPV6_MULTICAST_HOPS = 0x7 + sysIPV6_MULTICAST_LOOP = 0x8 + sysIPV6_JOIN_GROUP = 0x9 + sysIPV6_LEAVE_GROUP = 0xa + + sysIPV6_PKTINFO = 0xb + + sysIPV6_HOPLIMIT = 0xc + sysIPV6_NEXTHOP = 0xd + sysIPV6_HOPOPTS = 0xe + sysIPV6_DSTOPTS = 0xf + + sysIPV6_RTHDR = 0x10 + sysIPV6_RTHDRDSTOPTS = 0x11 + + sysIPV6_RECVPKTINFO = 0x12 + sysIPV6_RECVHOPLIMIT = 0x13 + sysIPV6_RECVHOPOPTS = 0x14 + + sysIPV6_RECVRTHDR = 0x16 + + sysIPV6_RECVRTHDRDSTOPTS = 0x17 + + sysIPV6_CHECKSUM = 0x18 + sysIPV6_RECVTCLASS = 0x19 + sysIPV6_USE_MIN_MTU = 0x20 + sysIPV6_DONTFRAG = 0x21 + sysIPV6_SEC_OPT = 0x22 + sysIPV6_SRC_PREFERENCES = 0x23 + sysIPV6_RECVPATHMTU = 0x24 + sysIPV6_PATHMTU = 0x25 + sysIPV6_TCLASS = 0x26 + sysIPV6_V6ONLY = 0x27 + + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_PREFER_SRC_HOME = 0x1 + sysIPV6_PREFER_SRC_COA = 0x2 + sysIPV6_PREFER_SRC_PUBLIC = 0x4 + sysIPV6_PREFER_SRC_TMP = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x10 + sysIPV6_PREFER_SRC_CGA = 0x20 + + sysIPV6_PREFER_SRC_MIPMASK = 0x3 + sysIPV6_PREFER_SRC_MIPDEFAULT = 0x1 + sysIPV6_PREFER_SRC_TMPMASK = 0xc + sysIPV6_PREFER_SRC_TMPDEFAULT = 0x4 + sysIPV6_PREFER_SRC_CGAMASK = 0x30 + sysIPV6_PREFER_SRC_CGADEFAULT = 0x10 + + sysIPV6_PREFER_SRC_MASK = 0x3f + + sysIPV6_PREFER_SRC_DEFAULT = 0x15 + + sysIPV6_BOUND_IF = 0x41 + sysIPV6_UNSPEC_SRC = 0x42 + + sysICMP6_FILTER = 0x1 + + sysSizeofSockaddrInet6 = 0x20 + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x24 + + sysSizeofIPv6Mreq = 0x14 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type sysICMPv6Filter struct { + X__icmp6_filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/netutil/listen.go b/vendor/golang.org/x/net/netutil/listen.go new file mode 100644 index 00000000..b317ba2e --- /dev/null +++ b/vendor/golang.org/x/net/netutil/listen.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package netutil provides network utility functions, complementing the more +// common ones in the net package. +package netutil // import "golang.org/x/net/netutil" + +import ( + "net" + "sync" +) + +// LimitListener returns a Listener that accepts at most n simultaneous +// connections from the provided Listener. +func LimitListener(l net.Listener, n int) net.Listener { + return &limitListener{l, make(chan struct{}, n)} +} + +type limitListener struct { + net.Listener + sem chan struct{} +} + +func (l *limitListener) acquire() { l.sem <- struct{}{} } +func (l *limitListener) release() { <-l.sem } + +func (l *limitListener) Accept() (net.Conn, error) { + l.acquire() + c, err := l.Listener.Accept() + if err != nil { + l.release() + return nil, err + } + return &limitListenerConn{Conn: c, release: l.release}, nil +} + +type limitListenerConn struct { + net.Conn + releaseOnce sync.Once + release func() +} + +func (l *limitListenerConn) Close() error { + err := l.Conn.Close() + l.releaseOnce.Do(l.release) + return err +} diff --git a/vendor/golang.org/x/net/netutil/listen_test.go b/vendor/golang.org/x/net/netutil/listen_test.go new file mode 100644 index 00000000..c1a3d552 --- /dev/null +++ b/vendor/golang.org/x/net/netutil/listen_test.go @@ -0,0 +1,101 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package netutil + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/internal/nettest" +) + +func TestLimitListener(t *testing.T) { + const max = 5 + attempts := (nettest.MaxOpenFiles() - max) / 2 + if attempts > 256 { // maximum length of accept queue is 128 by default + attempts = 256 + } + + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + l = LimitListener(l, max) + + var open int32 + go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if n := atomic.AddInt32(&open, 1); n > max { + t.Errorf("%d open connections, want <= %d", n, max) + } + defer atomic.AddInt32(&open, -1) + time.Sleep(10 * time.Millisecond) + fmt.Fprint(w, "some body") + })) + + var wg sync.WaitGroup + var failed int32 + for i := 0; i < attempts; i++ { + wg.Add(1) + go func() { + defer wg.Done() + c := http.Client{Timeout: 3 * time.Second} + r, err := c.Get("http://" + l.Addr().String()) + if err != nil { + t.Log(err) + atomic.AddInt32(&failed, 1) + return + } + defer r.Body.Close() + io.Copy(ioutil.Discard, r.Body) + }() + } + wg.Wait() + + // We expect some Gets to fail as the kernel's accept queue is filled, + // but most should succeed. + if int(failed) >= attempts/2 { + t.Errorf("%d requests failed within %d attempts", failed, attempts) + } +} + +type errorListener struct { + net.Listener +} + +func (errorListener) Accept() (net.Conn, error) { + return nil, errFake +} + +var errFake = errors.New("fake error from errorListener") + +// This used to hang. +func TestLimitListenerError(t *testing.T) { + donec := make(chan bool, 1) + go func() { + const n = 2 + ll := LimitListener(errorListener{}, n) + for i := 0; i < n+1; i++ { + _, err := ll.Accept() + if err != errFake { + t.Fatalf("Accept error = %v; want errFake", err) + } + } + donec <- true + }() + select { + case <-donec: + case <-time.After(5 * time.Second): + t.Fatal("timeout. deadlock?") + } +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go new file mode 100644 index 00000000..4c5ad88b --- /dev/null +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -0,0 +1,18 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" +) + +type direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var Direct = direct{} + +func (direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go new file mode 100644 index 00000000..f540b196 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -0,0 +1,140 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" + "strings" +) + +// A PerHost directs connections to a default Dialer unless the hostname +// requested matches one of a number of exceptions. +type PerHost struct { + def, bypass Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func NewPerHost(defaultDialer, bypass Dialer) *PerHost { + return &PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *PerHost) dialerForRequest(host string) Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone "example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a hostname +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a hostname that will use the bypass proxy. +func (p *PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} diff --git a/vendor/golang.org/x/net/proxy/per_host_test.go b/vendor/golang.org/x/net/proxy/per_host_test.go new file mode 100644 index 00000000..a7d80957 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host_test.go @@ -0,0 +1,55 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "errors" + "net" + "reflect" + "testing" +) + +type recordingProxy struct { + addrs []string +} + +func (r *recordingProxy) Dial(network, addr string) (net.Conn, error) { + r.addrs = append(r.addrs, addr) + return nil, errors.New("recordingProxy") +} + +func TestPerHost(t *testing.T) { + var def, bypass recordingProxy + perHost := NewPerHost(&def, &bypass) + perHost.AddFromString("localhost,*.zone,127.0.0.1,10.0.0.1/8,1000::/16") + + expectedDef := []string{ + "example.com:123", + "1.2.3.4:123", + "[1001::]:123", + } + expectedBypass := []string{ + "localhost:123", + "zone:123", + "foo.zone:123", + "127.0.0.1:123", + "10.1.2.3:123", + "[1000::]:123", + } + + for _, addr := range expectedDef { + perHost.Dial("tcp", addr) + } + for _, addr := range expectedBypass { + perHost.Dial("tcp", addr) + } + + if !reflect.DeepEqual(expectedDef, def.addrs) { + t.Errorf("Hosts which went to the default proxy didn't match. Got %v, want %v", def.addrs, expectedDef) + } + if !reflect.DeepEqual(expectedBypass, bypass.addrs) { + t.Errorf("Hosts which went to the bypass proxy didn't match. Got %v, want %v", bypass.addrs, expectedBypass) + } +} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go new file mode 100644 index 00000000..78a8b7be --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -0,0 +1,94 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proxy provides support for a variety of protocols to proxy network +// data. +package proxy // import "golang.org/x/net/proxy" + +import ( + "errors" + "net" + "net/url" + "os" +) + +// A Dialer is a means to establish a connection. +type Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func FromEnvironment() Dialer { + allProxy := os.Getenv("all_proxy") + if len(allProxy) == 0 { + return Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return Direct + } + proxy, err := FromURL(proxyURL, Direct) + if err != nil { + return Direct + } + + noProxy := os.Getenv("no_proxy") + if len(noProxy) == 0 { + return proxy + } + + perHost := NewPerHost(proxy, Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { + if proxySchemes == nil { + proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) + } + proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func FromURL(u *url.URL, forward Dialer) (Dialer, error) { + var auth *Auth + if u.User != nil { + auth = new(Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxySchemes != nil { + if f, ok := proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} diff --git a/vendor/golang.org/x/net/proxy/proxy_test.go b/vendor/golang.org/x/net/proxy/proxy_test.go new file mode 100644 index 00000000..c19a5c06 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy_test.go @@ -0,0 +1,142 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "io" + "net" + "net/url" + "strconv" + "sync" + "testing" +) + +func TestFromURL(t *testing.T) { + endSystem, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer endSystem.Close() + gateway, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer gateway.Close() + + var wg sync.WaitGroup + wg.Add(1) + go socks5Gateway(t, gateway, endSystem, socks5Domain, &wg) + + url, err := url.Parse("socks5://user:password@" + gateway.Addr().String()) + if err != nil { + t.Fatalf("url.Parse failed: %v", err) + } + proxy, err := FromURL(url, Direct) + if err != nil { + t.Fatalf("FromURL failed: %v", err) + } + _, port, err := net.SplitHostPort(endSystem.Addr().String()) + if err != nil { + t.Fatalf("net.SplitHostPort failed: %v", err) + } + if c, err := proxy.Dial("tcp", "localhost:"+port); err != nil { + t.Fatalf("FromURL.Dial failed: %v", err) + } else { + c.Close() + } + + wg.Wait() +} + +func TestSOCKS5(t *testing.T) { + endSystem, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer endSystem.Close() + gateway, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer gateway.Close() + + var wg sync.WaitGroup + wg.Add(1) + go socks5Gateway(t, gateway, endSystem, socks5IP4, &wg) + + proxy, err := SOCKS5("tcp", gateway.Addr().String(), nil, Direct) + if err != nil { + t.Fatalf("SOCKS5 failed: %v", err) + } + if c, err := proxy.Dial("tcp", endSystem.Addr().String()); err != nil { + t.Fatalf("SOCKS5.Dial failed: %v", err) + } else { + c.Close() + } + + wg.Wait() +} + +func socks5Gateway(t *testing.T, gateway, endSystem net.Listener, typ byte, wg *sync.WaitGroup) { + defer wg.Done() + + c, err := gateway.Accept() + if err != nil { + t.Errorf("net.Listener.Accept failed: %v", err) + return + } + defer c.Close() + + b := make([]byte, 32) + var n int + if typ == socks5Domain { + n = 4 + } else { + n = 3 + } + if _, err := io.ReadFull(c, b[:n]); err != nil { + t.Errorf("io.ReadFull failed: %v", err) + return + } + if _, err := c.Write([]byte{socks5Version, socks5AuthNone}); err != nil { + t.Errorf("net.Conn.Write failed: %v", err) + return + } + if typ == socks5Domain { + n = 16 + } else { + n = 10 + } + if _, err := io.ReadFull(c, b[:n]); err != nil { + t.Errorf("io.ReadFull failed: %v", err) + return + } + if b[0] != socks5Version || b[1] != socks5Connect || b[2] != 0x00 || b[3] != typ { + t.Errorf("got an unexpected packet: %#02x %#02x %#02x %#02x", b[0], b[1], b[2], b[3]) + return + } + if typ == socks5Domain { + copy(b[:5], []byte{socks5Version, 0x00, 0x00, socks5Domain, 9}) + b = append(b, []byte("localhost")...) + } else { + copy(b[:4], []byte{socks5Version, 0x00, 0x00, socks5IP4}) + } + host, port, err := net.SplitHostPort(endSystem.Addr().String()) + if err != nil { + t.Errorf("net.SplitHostPort failed: %v", err) + return + } + b = append(b, []byte(net.ParseIP(host).To4())...) + p, err := strconv.Atoi(port) + if err != nil { + t.Errorf("strconv.Atoi failed: %v", err) + return + } + b = append(b, []byte{byte(p >> 8), byte(p)}...) + if _, err := c.Write(b); err != nil { + t.Errorf("net.Conn.Write failed: %v", err) + return + } +} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go new file mode 100644 index 00000000..9b962823 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -0,0 +1,210 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "errors" + "io" + "net" + "strconv" +) + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928. +func SOCKS5(network, addr string, auth *Auth, forward Dialer) (Dialer, error) { + s := &socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type socks5 struct { + user, password string + network, addr string + forward Dialer +} + +const socks5Version = 5 + +const ( + socks5AuthNone = 0 + socks5AuthPassword = 2 +) + +const socks5Connect = 1 + +const ( + socks5IP4 = 1 + socks5Domain = 3 + socks5IP6 = 4 +) + +var socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the network net via the SOCKS5 proxy. +func (s *socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + closeConn := &conn + defer func() { + if closeConn != nil { + (*closeConn).Close() + } + }() + + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return nil, errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, socks5AuthNone, socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return nil, errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return nil, errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + if buf[1] == socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return nil, errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return nil, errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, socks5Version, socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, socks5IP4) + ip = ip4 + } else { + buf = append(buf, socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return nil, errors.New("proxy: destination hostname too long: " + host) + } + buf = append(buf, socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return nil, errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return nil, errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(socks5Errors) { + failure = socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case socks5IP4: + bytesToDiscard = net.IPv4len + case socks5IP6: + bytesToDiscard = net.IPv6len + case socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return nil, errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return nil, errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return nil, errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return nil, errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + closeConn = nil + return conn, nil +} diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go new file mode 100644 index 00000000..5c8d7b5f --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/gen.go @@ -0,0 +1,663 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This program generates table.go and table_test.go. +// Invoke as: +// +// go run gen.go -version "xxx" >table.go +// go run gen.go -version "xxx" -test >table_test.go +// +// Pass -v to print verbose progress information. +// +// The version is derived from information found at +// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat +// +// To fetch a particular git revision, such as 5c70ccd250, pass +// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "go/format" + "io" + "net/http" + "os" + "regexp" + "sort" + "strings" + + "golang.org/x/net/idna" +) + +const ( + // These sum of these four values must be no greater than 32. + nodesBitsChildren = 9 + nodesBitsICANN = 1 + nodesBitsTextOffset = 15 + nodesBitsTextLength = 6 + + // These sum of these four values must be no greater than 32. + childrenBitsWildcard = 1 + childrenBitsNodeType = 2 + childrenBitsHi = 14 + childrenBitsLo = 14 +) + +var ( + maxChildren int + maxTextOffset int + maxTextLength int + maxHi uint32 + maxLo uint32 +) + +func max(a, b int) int { + if a < b { + return b + } + return a +} + +func u32max(a, b uint32) uint32 { + if a < b { + return b + } + return a +} + +const ( + nodeTypeNormal = 0 + nodeTypeException = 1 + nodeTypeParentOnly = 2 + numNodeType = 3 +) + +func nodeTypeStr(n int) string { + switch n { + case nodeTypeNormal: + return "+" + case nodeTypeException: + return "!" + case nodeTypeParentOnly: + return "o" + } + panic("unreachable") +} + +var ( + labelEncoding = map[string]uint32{} + labelsList = []string{} + labelsMap = map[string]bool{} + rules = []string{} + + // validSuffix is used to check that the entries in the public suffix list + // are in canonical form (after Punycode encoding). Specifically, capital + // letters are not allowed. + validSuffix = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) + + subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") + url = flag.String("url", + "https://publicsuffix.org/list/effective_tld_names.dat", + "URL of the publicsuffix.org list. If empty, stdin is read instead") + v = flag.Bool("v", false, "verbose output (to stderr)") + version = flag.String("version", "", "the effective_tld_names.dat version") + test = flag.Bool("test", false, "generate table_test.go") +) + +func main() { + if err := main1(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func main1() error { + flag.Parse() + if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { + return fmt.Errorf("not enough bits to encode the nodes table") + } + if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { + return fmt.Errorf("not enough bits to encode the children table") + } + if *version == "" { + return fmt.Errorf("-version was not specified") + } + var r io.Reader = os.Stdin + if *url != "" { + res, err := http.Get(*url) + if err != nil { + return err + } + if res.StatusCode != http.StatusOK { + return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) + } + r = res.Body + defer res.Body.Close() + } + + var root node + icann := false + buf := new(bytes.Buffer) + br := bufio.NewReader(r) + for { + s, err := br.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + return err + } + s = strings.TrimSpace(s) + if strings.Contains(s, "BEGIN ICANN DOMAINS") { + icann = true + continue + } + if strings.Contains(s, "END ICANN DOMAINS") { + icann = false + continue + } + if s == "" || strings.HasPrefix(s, "//") { + continue + } + s, err = idna.ToASCII(s) + if err != nil { + return err + } + if !validSuffix.MatchString(s) { + return fmt.Errorf("bad publicsuffix.org list data: %q", s) + } + + if *subset { + switch { + case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): + case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): + case s == "ao" || strings.HasSuffix(s, ".ao"): + case s == "ar" || strings.HasSuffix(s, ".ar"): + case s == "arpa" || strings.HasSuffix(s, ".arpa"): + case s == "cy" || strings.HasSuffix(s, ".cy"): + case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): + case s == "jp": + case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): + case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): + case s == "om" || strings.HasSuffix(s, ".om"): + case s == "uk" || strings.HasSuffix(s, ".uk"): + case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): + case s == "tw" || strings.HasSuffix(s, ".tw"): + case s == "zw" || strings.HasSuffix(s, ".zw"): + case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): + // xn--p1ai is Russian-Cyrillic "рф". + default: + continue + } + } + + rules = append(rules, s) + + nt, wildcard := nodeTypeNormal, false + switch { + case strings.HasPrefix(s, "*."): + s, nt = s[2:], nodeTypeParentOnly + wildcard = true + case strings.HasPrefix(s, "!"): + s, nt = s[1:], nodeTypeException + } + labels := strings.Split(s, ".") + for n, i := &root, len(labels)-1; i >= 0; i-- { + label := labels[i] + n = n.child(label) + if i == 0 { + if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { + n.nodeType = nt + } + n.icann = n.icann && icann + n.wildcard = n.wildcard || wildcard + } + labelsMap[label] = true + } + } + labelsList = make([]string, 0, len(labelsMap)) + for label := range labelsMap { + labelsList = append(labelsList, label) + } + sort.Strings(labelsList) + + p := printReal + if *test { + p = printTest + } + if err := p(buf, &root); err != nil { + return err + } + + b, err := format.Source(buf.Bytes()) + if err != nil { + return err + } + _, err = os.Stdout.Write(b) + return err +} + +func printTest(w io.Writer, n *node) error { + fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") + fmt.Fprintf(w, "package publicsuffix\n\nvar rules = [...]string{\n") + for _, rule := range rules { + fmt.Fprintf(w, "%q,\n", rule) + } + fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") + if err := n.walk(w, printNodeLabel); err != nil { + return err + } + fmt.Fprintf(w, "}\n") + return nil +} + +func printReal(w io.Writer, n *node) error { + const header = `// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +const version = %q + +const ( + nodesBitsChildren = %d + nodesBitsICANN = %d + nodesBitsTextOffset = %d + nodesBitsTextLength = %d + + childrenBitsWildcard = %d + childrenBitsNodeType = %d + childrenBitsHi = %d + childrenBitsLo = %d +) + +const ( + nodeTypeNormal = %d + nodeTypeException = %d + nodeTypeParentOnly = %d +) + +// numTLD is the number of top level domains. +const numTLD = %d + +` + fmt.Fprintf(w, header, *version, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, + nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) + + text := combineText(labelsList) + if text == "" { + return fmt.Errorf("internal error: makeText returned no text") + } + for _, label := range labelsList { + offset, length := strings.Index(text, label), len(label) + if offset < 0 { + return fmt.Errorf("internal error: could not find %q in text %q", label, text) + } + maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) + if offset >= 1<<nodesBitsTextOffset { + return fmt.Errorf("text offset %d is too large, or nodeBitsTextOffset is too small", offset) + } + if length >= 1<<nodesBitsTextLength { + return fmt.Errorf("text length %d is too large, or nodeBitsTextLength is too small", length) + } + labelEncoding[label] = uint32(offset)<<nodesBitsTextLength | uint32(length) + } + fmt.Fprintf(w, "// Text is the combined text of all labels.\nconst text = ") + for len(text) > 0 { + n, plus := len(text), "" + if n > 64 { + n, plus = 64, " +" + } + fmt.Fprintf(w, "%q%s\n", text[:n], plus) + text = text[n:] + } + + if err := n.walk(w, assignIndexes); err != nil { + return err + } + + fmt.Fprintf(w, ` + +// nodes is the list of nodes. Each node is represented as a uint32, which +// encodes the node's children, wildcard bit and node type (as an index into +// the children array), ICANN bit and text. +// +// In the //-comment after each node's data, the nodes indexes of the children +// are formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// nodeType is printed as + for normal, ! for exception, and o for parent-only +// nodes that have children but don't match a domain label in their own right. +// An I denotes an ICANN domain. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] children index +// [%2d bits] ICANN bit +// [%2d bits] text index +// [%2d bits] text length +var nodes = [...]uint32{ +`, + 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) + if err := n.walk(w, printNode); err != nil { + return err + } + fmt.Fprintf(w, `} + +// children is the list of nodes' children, the parent's wildcard bit and the +// parent's node type. If a node has no children then their children index +// will be in the range [0, 6), depending on the wildcard bit and node type. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] wildcard bit +// [%2d bits] node type +// [%2d bits] high nodes index (exclusive) of children +// [%2d bits] low nodes index (inclusive) of children +var children=[...]uint32{ +`, + 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) + for i, c := range childrenEncoding { + s := "---------------" + lo := c & (1<<childrenBitsLo - 1) + hi := (c >> childrenBitsLo) & (1<<childrenBitsHi - 1) + if lo != hi { + s = fmt.Sprintf("n0x%04x-n0x%04x", lo, hi) + } + nodeType := int(c>>(childrenBitsLo+childrenBitsHi)) & (1<<childrenBitsNodeType - 1) + wildcard := c>>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 + fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", + c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) + } + fmt.Fprintf(w, "}\n\n") + fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<<nodesBitsChildren-1) + fmt.Fprintf(w, "// max text offset %d (capacity %d)\n", maxTextOffset, 1<<nodesBitsTextOffset-1) + fmt.Fprintf(w, "// max text length %d (capacity %d)\n", maxTextLength, 1<<nodesBitsTextLength-1) + fmt.Fprintf(w, "// max hi %d (capacity %d)\n", maxHi, 1<<childrenBitsHi-1) + fmt.Fprintf(w, "// max lo %d (capacity %d)\n", maxLo, 1<<childrenBitsLo-1) + return nil +} + +type node struct { + label string + nodeType int + icann bool + wildcard bool + // nodesIndex and childrenIndex are the index of this node in the nodes + // and the index of its children offset/length in the children arrays. + nodesIndex, childrenIndex int + // firstChild is the index of this node's first child, or zero if this + // node has no children. + firstChild int + // children are the node's children, in strictly increasing node label order. + children []*node +} + +func (n *node) walk(w io.Writer, f func(w1 io.Writer, n1 *node) error) error { + if err := f(w, n); err != nil { + return err + } + for _, c := range n.children { + if err := c.walk(w, f); err != nil { + return err + } + } + return nil +} + +// child returns the child of n with the given label. The child is created if +// it did not exist beforehand. +func (n *node) child(label string) *node { + for _, c := range n.children { + if c.label == label { + return c + } + } + c := &node{ + label: label, + nodeType: nodeTypeParentOnly, + icann: true, + } + n.children = append(n.children, c) + sort.Sort(byLabel(n.children)) + return c +} + +type byLabel []*node + +func (b byLabel) Len() int { return len(b) } +func (b byLabel) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byLabel) Less(i, j int) bool { return b[i].label < b[j].label } + +var nextNodesIndex int + +// childrenEncoding are the encoded entries in the generated children array. +// All these pre-defined entries have no children. +var childrenEncoding = []uint32{ + 0 << (childrenBitsLo + childrenBitsHi), // Without wildcard bit, nodeTypeNormal. + 1 << (childrenBitsLo + childrenBitsHi), // Without wildcard bit, nodeTypeException. + 2 << (childrenBitsLo + childrenBitsHi), // Without wildcard bit, nodeTypeParentOnly. + 4 << (childrenBitsLo + childrenBitsHi), // With wildcard bit, nodeTypeNormal. + 5 << (childrenBitsLo + childrenBitsHi), // With wildcard bit, nodeTypeException. + 6 << (childrenBitsLo + childrenBitsHi), // With wildcard bit, nodeTypeParentOnly. +} + +var firstCallToAssignIndexes = true + +func assignIndexes(w io.Writer, n *node) error { + if len(n.children) != 0 { + // Assign nodesIndex. + n.firstChild = nextNodesIndex + for _, c := range n.children { + c.nodesIndex = nextNodesIndex + nextNodesIndex++ + } + + // The root node's children is implicit. + if firstCallToAssignIndexes { + firstCallToAssignIndexes = false + return nil + } + + // Assign childrenIndex. + maxChildren = max(maxChildren, len(childrenEncoding)) + if len(childrenEncoding) >= 1<<nodesBitsChildren { + return fmt.Errorf("children table size %d is too large, or nodeBitsChildren is too small", len(childrenEncoding)) + } + n.childrenIndex = len(childrenEncoding) + lo := uint32(n.firstChild) + hi := lo + uint32(len(n.children)) + maxLo, maxHi = u32max(maxLo, lo), u32max(maxHi, hi) + if lo >= 1<<childrenBitsLo { + return fmt.Errorf("children lo %d is too large, or childrenBitsLo is too small", lo) + } + if hi >= 1<<childrenBitsHi { + return fmt.Errorf("children hi %d is too large, or childrenBitsHi is too small", hi) + } + enc := hi<<childrenBitsLo | lo + enc |= uint32(n.nodeType) << (childrenBitsLo + childrenBitsHi) + if n.wildcard { + enc |= 1 << (childrenBitsLo + childrenBitsHi + childrenBitsNodeType) + } + childrenEncoding = append(childrenEncoding, enc) + } else { + n.childrenIndex = n.nodeType + if n.wildcard { + n.childrenIndex += numNodeType + } + } + return nil +} + +func printNode(w io.Writer, n *node) error { + for _, c := range n.children { + s := "---------------" + if len(c.children) != 0 { + s = fmt.Sprintf("n0x%04x-n0x%04x", c.firstChild, c.firstChild+len(c.children)) + } + encoding := labelEncoding[c.label] + if c.icann { + encoding |= 1 << (nodesBitsTextLength + nodesBitsTextOffset) + } + encoding |= uint32(c.childrenIndex) << (nodesBitsTextLength + nodesBitsTextOffset + nodesBitsICANN) + fmt.Fprintf(w, "0x%08x, // n0x%04x c0x%04x (%s)%s %s %s %s\n", + encoding, c.nodesIndex, c.childrenIndex, s, wildcardStr(c.wildcard), + nodeTypeStr(c.nodeType), icannStr(c.icann), c.label, + ) + } + return nil +} + +func printNodeLabel(w io.Writer, n *node) error { + for _, c := range n.children { + fmt.Fprintf(w, "%q,\n", c.label) + } + return nil +} + +func icannStr(icann bool) string { + if icann { + return "I" + } + return " " +} + +func wildcardStr(wildcard bool) string { + if wildcard { + return "*" + } + return " " +} + +// combineText combines all the strings in labelsList to form one giant string. +// Overlapping strings will be merged: "arpa" and "parliament" could yield +// "arparliament". +func combineText(labelsList []string) string { + beforeLength := 0 + for _, s := range labelsList { + beforeLength += len(s) + } + + text := crush(removeSubstrings(labelsList)) + if *v { + fmt.Fprintf(os.Stderr, "crushed %d bytes to become %d bytes\n", beforeLength, len(text)) + } + return text +} + +type byLength []string + +func (s byLength) Len() int { return len(s) } +func (s byLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byLength) Less(i, j int) bool { return len(s[i]) < len(s[j]) } + +// removeSubstrings returns a copy of its input with any strings removed +// that are substrings of other provided strings. +func removeSubstrings(input []string) []string { + // Make a copy of input. + ss := append(make([]string, 0, len(input)), input...) + sort.Sort(byLength(ss)) + + for i, shortString := range ss { + // For each string, only consider strings higher than it in sort order, i.e. + // of equal length or greater. + for _, longString := range ss[i+1:] { + if strings.Contains(longString, shortString) { + ss[i] = "" + break + } + } + } + + // Remove the empty strings. + sort.Strings(ss) + for len(ss) > 0 && ss[0] == "" { + ss = ss[1:] + } + return ss +} + +// crush combines a list of strings, taking advantage of overlaps. It returns a +// single string that contains each input string as a substring. +func crush(ss []string) string { + maxLabelLen := 0 + for _, s := range ss { + if maxLabelLen < len(s) { + maxLabelLen = len(s) + } + } + + for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { + prefixes := makePrefixMap(ss, prefixLen) + for i, s := range ss { + if len(s) <= prefixLen { + continue + } + mergeLabel(ss, i, prefixLen, prefixes) + } + } + + return strings.Join(ss, "") +} + +// mergeLabel merges the label at ss[i] with the first available matching label +// in prefixMap, where the last "prefixLen" characters in ss[i] match the first +// "prefixLen" characters in the matching label. +// It will merge ss[i] repeatedly until no more matches are available. +// All matching labels merged into ss[i] are replaced by "". +func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { + s := ss[i] + suffix := s[len(s)-prefixLen:] + for _, j := range prefixes[suffix] { + // Empty strings mean "already used." Also avoid merging with self. + if ss[j] == "" || i == j { + continue + } + if *v { + fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", + prefixLen, i, j, ss[i], ss[j], suffix) + } + ss[i] += ss[j][prefixLen:] + ss[j] = "" + // ss[i] has a new suffix, so merge again if possible. + // Note: we only have to merge again at the same prefix length. Shorter + // prefix lengths will be handled in the next iteration of crush's for loop. + // Can there be matches for longer prefix lengths, introduced by the merge? + // I believe that any such matches would by necessity have been eliminated + // during substring removal or merged at a higher prefix length. For + // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" + // would yield "abcde", which could be merged with "bcdef." However, in + // practice "cde" would already have been elimintated by removeSubstrings. + mergeLabel(ss, i, prefixLen, prefixes) + return + } +} + +// prefixMap maps from a prefix to a list of strings containing that prefix. The +// list of strings is represented as indexes into a slice of strings stored +// elsewhere. +type prefixMap map[string][]int + +// makePrefixMap constructs a prefixMap from a slice of strings. +func makePrefixMap(ss []string, prefixLen int) prefixMap { + prefixes := make(prefixMap) + for i, s := range ss { + // We use < rather than <= because if a label matches on a prefix equal to + // its full length, that's actually a substring match handled by + // removeSubstrings. + if prefixLen < len(s) { + prefix := s[:prefixLen] + prefixes[prefix] = append(prefixes[prefix], i) + } + } + + return prefixes +} diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go new file mode 100644 index 00000000..9419ca99 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -0,0 +1,133 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package publicsuffix provides a public suffix list based on data from +// http://publicsuffix.org/. A public suffix is one under which Internet users +// can directly register names. +package publicsuffix // import "golang.org/x/net/publicsuffix" + +// TODO: specify case sensitivity and leading/trailing dot behavior for +// func PublicSuffix and func EffectiveTLDPlusOne. + +import ( + "fmt" + "net/http/cookiejar" + "strings" +) + +// List implements the cookiejar.PublicSuffixList interface by calling the +// PublicSuffix function. +var List cookiejar.PublicSuffixList = list{} + +type list struct{} + +func (list) PublicSuffix(domain string) string { + ps, _ := PublicSuffix(domain) + return ps +} + +func (list) String() string { + return version +} + +// PublicSuffix returns the public suffix of the domain using a copy of the +// publicsuffix.org database compiled into the library. +// +// icann is whether the public suffix is managed by the Internet Corporation +// for Assigned Names and Numbers. If not, the public suffix is privately +// managed. For example, foo.org and foo.co.uk are ICANN domains, +// foo.dyndns.org and foo.blogspot.co.uk are private domains. +// +// Use cases for distinguishing ICANN domains like foo.com from private +// domains like foo.appspot.com can be found at +// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + lo, hi := uint32(0), uint32(numTLD) + s, suffix, wildcard := domain, len(domain), false +loop: + for { + dot := strings.LastIndex(s, ".") + if wildcard { + suffix = 1 + dot + } + if lo == hi { + break + } + f := find(s[1+dot:], lo, hi) + if f == notFound { + break + } + + u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) + icann = u&(1<<nodesBitsICANN-1) != 0 + u >>= nodesBitsICANN + u = children[u&(1<<nodesBitsChildren-1)] + lo = u & (1<<childrenBitsLo - 1) + u >>= childrenBitsLo + hi = u & (1<<childrenBitsHi - 1) + u >>= childrenBitsHi + switch u & (1<<childrenBitsNodeType - 1) { + case nodeTypeNormal: + suffix = 1 + dot + case nodeTypeException: + suffix = 1 + len(s) + break loop + } + u >>= childrenBitsNodeType + wildcard = u&(1<<childrenBitsWildcard-1) != 0 + + if dot == -1 { + break + } + s = s[:dot] + } + if suffix == len(domain) { + // If no rules match, the prevailing rule is "*". + return domain[1+strings.LastIndex(domain, "."):], icann + } + return domain[suffix:], icann +} + +const notFound uint32 = 1<<32 - 1 + +// find returns the index of the node in the range [lo, hi) whose label equals +// label, or notFound if there is no such node. The range is assumed to be in +// strictly increasing node label order. +func find(label string, lo, hi uint32) uint32 { + for lo < hi { + mid := lo + (hi-lo)/2 + s := nodeLabel(mid) + if s < label { + lo = mid + 1 + } else if s == label { + return mid + } else { + hi = mid + } + } + return notFound +} + +// nodeLabel returns the label for the i'th node. +func nodeLabel(i uint32) string { + x := nodes[i] + length := x & (1<<nodesBitsTextLength - 1) + x >>= nodesBitsTextLength + offset := x & (1<<nodesBitsTextOffset - 1) + return text[offset : offset+length] +} + +// EffectiveTLDPlusOne returns the effective top level domain plus one more +// label. For example, the eTLD+1 for "foo.bar.golang.org" is "golang.org". +func EffectiveTLDPlusOne(domain string) (string, error) { + suffix, _ := PublicSuffix(domain) + if len(domain) <= len(suffix) { + return "", fmt.Errorf("publicsuffix: cannot derive eTLD+1 for domain %q", domain) + } + i := len(domain) - len(suffix) - 1 + if domain[i] != '.' { + return "", fmt.Errorf("publicsuffix: invalid public suffix %q for domain %q", suffix, domain) + } + return domain[1+strings.LastIndex(domain[:i], "."):], nil +} diff --git a/vendor/golang.org/x/net/publicsuffix/list_test.go b/vendor/golang.org/x/net/publicsuffix/list_test.go new file mode 100644 index 00000000..a08e64ea --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list_test.go @@ -0,0 +1,416 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package publicsuffix + +import ( + "sort" + "strings" + "testing" +) + +func TestNodeLabel(t *testing.T) { + for i, want := range nodeLabels { + got := nodeLabel(uint32(i)) + if got != want { + t.Errorf("%d: got %q, want %q", i, got, want) + } + } +} + +func TestFind(t *testing.T) { + testCases := []string{ + "", + "a", + "a0", + "aaaa", + "ao", + "ap", + "ar", + "aro", + "arp", + "arpa", + "arpaa", + "arpb", + "az", + "b", + "b0", + "ba", + "z", + "zu", + "zv", + "zw", + "zx", + "zy", + "zz", + "zzzz", + } + for _, tc := range testCases { + got := find(tc, 0, numTLD) + want := notFound + for i := uint32(0); i < numTLD; i++ { + if tc == nodeLabel(i) { + want = i + break + } + } + if got != want { + t.Errorf("%q: got %d, want %d", tc, got, want) + } + } +} + +func TestICANN(t *testing.T) { + testCases := map[string]bool{ + "foo.org": true, + "foo.co.uk": true, + "foo.dyndns.org": false, + "foo.go.dyndns.org": false, + "foo.blogspot.co.uk": false, + "foo.intranet": false, + } + for domain, want := range testCases { + _, got := PublicSuffix(domain) + if got != want { + t.Errorf("%q: got %v, want %v", domain, got, want) + } + } +} + +var publicSuffixTestCases = []struct { + domain, want string +}{ + // Empty string. + {"", ""}, + + // The .ao rules are: + // ao + // ed.ao + // gv.ao + // og.ao + // co.ao + // pb.ao + // it.ao + {"ao", "ao"}, + {"www.ao", "ao"}, + {"pb.ao", "pb.ao"}, + {"www.pb.ao", "pb.ao"}, + {"www.xxx.yyy.zzz.pb.ao", "pb.ao"}, + + // The .ar rules are: + // ar + // com.ar + // edu.ar + // gob.ar + // gov.ar + // int.ar + // mil.ar + // net.ar + // org.ar + // tur.ar + // blogspot.com.ar + {"ar", "ar"}, + {"www.ar", "ar"}, + {"nic.ar", "ar"}, + {"www.nic.ar", "ar"}, + {"com.ar", "com.ar"}, + {"www.com.ar", "com.ar"}, + {"blogspot.com.ar", "blogspot.com.ar"}, + {"www.blogspot.com.ar", "blogspot.com.ar"}, + {"www.xxx.yyy.zzz.blogspot.com.ar", "blogspot.com.ar"}, + {"logspot.com.ar", "com.ar"}, + {"zlogspot.com.ar", "com.ar"}, + {"zblogspot.com.ar", "com.ar"}, + + // The .arpa rules are: + // arpa + // e164.arpa + // in-addr.arpa + // ip6.arpa + // iris.arpa + // uri.arpa + // urn.arpa + {"arpa", "arpa"}, + {"www.arpa", "arpa"}, + {"urn.arpa", "urn.arpa"}, + {"www.urn.arpa", "urn.arpa"}, + {"www.xxx.yyy.zzz.urn.arpa", "urn.arpa"}, + + // The relevant {kobe,kyoto}.jp rules are: + // jp + // *.kobe.jp + // !city.kobe.jp + // kyoto.jp + // ide.kyoto.jp + {"jp", "jp"}, + {"kobe.jp", "jp"}, + {"c.kobe.jp", "c.kobe.jp"}, + {"b.c.kobe.jp", "c.kobe.jp"}, + {"a.b.c.kobe.jp", "c.kobe.jp"}, + {"city.kobe.jp", "kobe.jp"}, + {"www.city.kobe.jp", "kobe.jp"}, + {"kyoto.jp", "kyoto.jp"}, + {"test.kyoto.jp", "kyoto.jp"}, + {"ide.kyoto.jp", "ide.kyoto.jp"}, + {"b.ide.kyoto.jp", "ide.kyoto.jp"}, + {"a.b.ide.kyoto.jp", "ide.kyoto.jp"}, + + // The .tw rules are: + // tw + // edu.tw + // gov.tw + // mil.tw + // com.tw + // net.tw + // org.tw + // idv.tw + // game.tw + // ebiz.tw + // club.tw + // 網路.tw (xn--zf0ao64a.tw) + // 組織.tw (xn--uc0atv.tw) + // 商業.tw (xn--czrw28b.tw) + // blogspot.tw + {"tw", "tw"}, + {"aaa.tw", "tw"}, + {"www.aaa.tw", "tw"}, + {"xn--czrw28b.aaa.tw", "tw"}, + {"edu.tw", "edu.tw"}, + {"www.edu.tw", "edu.tw"}, + {"xn--czrw28b.edu.tw", "edu.tw"}, + {"xn--czrw28b.tw", "xn--czrw28b.tw"}, + {"www.xn--czrw28b.tw", "xn--czrw28b.tw"}, + {"xn--uc0atv.xn--czrw28b.tw", "xn--czrw28b.tw"}, + {"xn--kpry57d.tw", "tw"}, + + // The .uk rules are: + // uk + // ac.uk + // co.uk + // gov.uk + // ltd.uk + // me.uk + // net.uk + // nhs.uk + // org.uk + // plc.uk + // police.uk + // *.sch.uk + // blogspot.co.uk + {"uk", "uk"}, + {"aaa.uk", "uk"}, + {"www.aaa.uk", "uk"}, + {"mod.uk", "uk"}, + {"www.mod.uk", "uk"}, + {"sch.uk", "uk"}, + {"mod.sch.uk", "mod.sch.uk"}, + {"www.sch.uk", "www.sch.uk"}, + {"blogspot.co.uk", "blogspot.co.uk"}, + {"blogspot.nic.uk", "uk"}, + {"blogspot.sch.uk", "blogspot.sch.uk"}, + + // The .рф rules are + // рф (xn--p1ai) + {"xn--p1ai", "xn--p1ai"}, + {"aaa.xn--p1ai", "xn--p1ai"}, + {"www.xxx.yyy.xn--p1ai", "xn--p1ai"}, + + // The .zw rules are: + // *.zw + {"zw", "zw"}, + {"www.zw", "www.zw"}, + {"zzz.zw", "zzz.zw"}, + {"www.zzz.zw", "zzz.zw"}, + {"www.xxx.yyy.zzz.zw", "zzz.zw"}, + + // There are no .nosuchtld rules. + {"nosuchtld", "nosuchtld"}, + {"foo.nosuchtld", "nosuchtld"}, + {"bar.foo.nosuchtld", "nosuchtld"}, +} + +func BenchmarkPublicSuffix(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, tc := range publicSuffixTestCases { + List.PublicSuffix(tc.domain) + } + } +} + +func TestPublicSuffix(t *testing.T) { + for _, tc := range publicSuffixTestCases { + got := List.PublicSuffix(tc.domain) + if got != tc.want { + t.Errorf("%q: got %q, want %q", tc.domain, got, tc.want) + } + } +} + +func TestSlowPublicSuffix(t *testing.T) { + for _, tc := range publicSuffixTestCases { + got := slowPublicSuffix(tc.domain) + if got != tc.want { + t.Errorf("%q: got %q, want %q", tc.domain, got, tc.want) + } + } +} + +// slowPublicSuffix implements the canonical (but O(number of rules)) public +// suffix algorithm described at http://publicsuffix.org/list/. +// +// 1. Match domain against all rules and take note of the matching ones. +// 2. If no rules match, the prevailing rule is "*". +// 3. If more than one rule matches, the prevailing rule is the one which is an exception rule. +// 4. If there is no matching exception rule, the prevailing rule is the one with the most labels. +// 5. If the prevailing rule is a exception rule, modify it by removing the leftmost label. +// 6. The public suffix is the set of labels from the domain which directly match the labels of the prevailing rule (joined by dots). +// 7. The registered or registrable domain is the public suffix plus one additional label. +// +// This function returns the public suffix, not the registrable domain, and so +// it stops after step 6. +func slowPublicSuffix(domain string) string { + match := func(rulePart, domainPart string) bool { + switch rulePart[0] { + case '*': + return true + case '!': + return rulePart[1:] == domainPart + } + return rulePart == domainPart + } + + domainParts := strings.Split(domain, ".") + var matchingRules [][]string + +loop: + for _, rule := range rules { + ruleParts := strings.Split(rule, ".") + if len(domainParts) < len(ruleParts) { + continue + } + for i := range ruleParts { + rulePart := ruleParts[len(ruleParts)-1-i] + domainPart := domainParts[len(domainParts)-1-i] + if !match(rulePart, domainPart) { + continue loop + } + } + matchingRules = append(matchingRules, ruleParts) + } + if len(matchingRules) == 0 { + matchingRules = append(matchingRules, []string{"*"}) + } else { + sort.Sort(byPriority(matchingRules)) + } + prevailing := matchingRules[0] + if prevailing[0][0] == '!' { + prevailing = prevailing[1:] + } + if prevailing[0][0] == '*' { + replaced := domainParts[len(domainParts)-len(prevailing)] + prevailing = append([]string{replaced}, prevailing[1:]...) + } + return strings.Join(prevailing, ".") +} + +type byPriority [][]string + +func (b byPriority) Len() int { return len(b) } +func (b byPriority) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byPriority) Less(i, j int) bool { + if b[i][0][0] == '!' { + return true + } + if b[j][0][0] == '!' { + return false + } + return len(b[i]) > len(b[j]) +} + +// eTLDPlusOneTestCases come from +// https://github.com/publicsuffix/list/blob/master/tests/test_psl.txt +var eTLDPlusOneTestCases = []struct { + domain, want string +}{ + // Empty input. + {"", ""}, + // Unlisted TLD. + {"example", ""}, + {"example.example", "example.example"}, + {"b.example.example", "example.example"}, + {"a.b.example.example", "example.example"}, + // TLD with only 1 rule. + {"biz", ""}, + {"domain.biz", "domain.biz"}, + {"b.domain.biz", "domain.biz"}, + {"a.b.domain.biz", "domain.biz"}, + // TLD with some 2-level rules. + {"com", ""}, + {"example.com", "example.com"}, + {"b.example.com", "example.com"}, + {"a.b.example.com", "example.com"}, + {"uk.com", ""}, + {"example.uk.com", "example.uk.com"}, + {"b.example.uk.com", "example.uk.com"}, + {"a.b.example.uk.com", "example.uk.com"}, + {"test.ac", "test.ac"}, + // TLD with only 1 (wildcard) rule. + {"mm", ""}, + {"c.mm", ""}, + {"b.c.mm", "b.c.mm"}, + {"a.b.c.mm", "b.c.mm"}, + // More complex TLD. + {"jp", ""}, + {"test.jp", "test.jp"}, + {"www.test.jp", "test.jp"}, + {"ac.jp", ""}, + {"test.ac.jp", "test.ac.jp"}, + {"www.test.ac.jp", "test.ac.jp"}, + {"kyoto.jp", ""}, + {"test.kyoto.jp", "test.kyoto.jp"}, + {"ide.kyoto.jp", ""}, + {"b.ide.kyoto.jp", "b.ide.kyoto.jp"}, + {"a.b.ide.kyoto.jp", "b.ide.kyoto.jp"}, + {"c.kobe.jp", ""}, + {"b.c.kobe.jp", "b.c.kobe.jp"}, + {"a.b.c.kobe.jp", "b.c.kobe.jp"}, + {"city.kobe.jp", "city.kobe.jp"}, + {"www.city.kobe.jp", "city.kobe.jp"}, + // TLD with a wildcard rule and exceptions. + {"ck", ""}, + {"test.ck", ""}, + {"b.test.ck", "b.test.ck"}, + {"a.b.test.ck", "b.test.ck"}, + {"www.ck", "www.ck"}, + {"www.www.ck", "www.ck"}, + // US K12. + {"us", ""}, + {"test.us", "test.us"}, + {"www.test.us", "test.us"}, + {"ak.us", ""}, + {"test.ak.us", "test.ak.us"}, + {"www.test.ak.us", "test.ak.us"}, + {"k12.ak.us", ""}, + {"test.k12.ak.us", "test.k12.ak.us"}, + {"www.test.k12.ak.us", "test.k12.ak.us"}, + // Punycoded IDN labels + {"xn--85x722f.com.cn", "xn--85x722f.com.cn"}, + {"xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"}, + {"www.xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"}, + {"shishi.xn--55qx5d.cn", "shishi.xn--55qx5d.cn"}, + {"xn--55qx5d.cn", ""}, + {"xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"}, + {"www.xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"}, + {"shishi.xn--fiqs8s", "shishi.xn--fiqs8s"}, + {"xn--fiqs8s", ""}, +} + +func TestEffectiveTLDPlusOne(t *testing.T) { + for _, tc := range eTLDPlusOneTestCases { + got, _ := EffectiveTLDPlusOne(tc.domain) + if got != tc.want { + t.Errorf("%q: got %q, want %q", tc.domain, got, tc.want) + } + } +} diff --git a/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/golang.org/x/net/publicsuffix/table.go new file mode 100644 index 00000000..ebcb1395 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/table.go @@ -0,0 +1,8786 @@ +// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +const version = "publicsuffix.org's public_suffix_list.dat, git revision bade64c (2016-03-01)" + +const ( + nodesBitsChildren = 9 + nodesBitsICANN = 1 + nodesBitsTextOffset = 15 + nodesBitsTextLength = 6 + + childrenBitsWildcard = 1 + childrenBitsNodeType = 2 + childrenBitsHi = 14 + childrenBitsLo = 14 +) + +const ( + nodeTypeNormal = 0 + nodeTypeException = 1 + nodeTypeParentOnly = 2 +) + +// numTLD is the number of top level domains. +const numTLD = 1545 + +// Text is the combined text of all labels. +const text = "bievatmallorcadaquesanfranciscotlandupontarioceanographiquebifuk" + + "agawalmartateshinanomachintaijinuyamanouchikuhokuryugasakitashio" + + "barabihorologyusuharabikedagestangebilbaogakievenesangoddabillus" + + "trationikkoebenhavnikolaeverbankashiwarabiomutashinainvestmentsa" + + "njotateyamabirdartcenterprisesakikonaircraftraeumtgeradealstahau" + + "gesundurbanamexeterbirkenesoddtangenovaravennaharimalvikashiwaza" + + "kiyokawarabirthplacebjarkoyusuisservicesannanikonantanangerbjerk" + + "reimmobilieninohelplfinancialipetskasukabedzin-the-bandaioiraseb" + + "astopologyeongnamegawakembuchikumagayagawakkanaibetsubamericanfa" + + "milydscloudappspotenzachpomorskienebakkeshibechambagriculturenne" + + "budapest-a-la-masioninomiyakonojoshkar-olayangroupaleostrowiecar" + + "toonartdecoffeedbackasumigaurawa-mazowszextraspace-to-rentalstom" + + "akomaibarabjugnirasakis-a-candidateblockbusternidurhamburgliwice" + + "bloombergbauernrtatsunostrowwlkpmglobalashovhachinoheguris-a-cat" + + "ererbluedatingloboehringerikebmoattachmentsannohelsinkitahiroshi" + + "marshallstatebankasuyakutiabmsanokaszubyuudmurtiabmwegroweibolza" + + "nore-og-uvdalivornobnpparibaselburglogoweirbomloansantabarbarabo" + + "ndvrdnsantacruzsantafedexhibitionishiazais-a-celticsfanishigotpa" + + "ntheonishiharabonnishiizunazukis-a-chefarsundwgloppenzaogashimad" + + "achicagobododgemologicallyngenglandyndns-homednsanukis-a-conserv" + + "ativefsncfailomzansimagicasadelamonedavvesiidazaifudaigodoesntex" + + "istanbullensakerbookingmbhartiffanynysafetysfjordyndns-ip6bootsa" + + "otomeldalorenskogminakamichigangwonishikatakazakis-a-cpadoval-da" + + "ostavalleyuzawaboschaefflerdalotenkawabostikatowicebostonakijins" + + "ekikogentingmodenakasatsunairtrafficaseihichisobetsuitairabotani" + + "calgardenishikatsuragithubusercontentattoolsztynsettlersapodhale" + + "vangerbotanicgardenishikawazukanazawabotanyuzhno-sakhalinskatsus" + + "hikabeeldengeluidyndns-mailotteboutiquebecngmxboxenapponazure-mo" + + "bilebozentsujiiebradescorporationishimerabrandywinevalleybrasilj" + + "an-mayenishinomiyashironobresciabrindisibenikebristolgalsacebrit" + + "ishcolumbialowiezagannakadomari-elasticbeanstalkatsuyamasfjorden" + + "ishinoomotegotsukisosakitagatakamatsukawabroadcastlebtimnetzgora" + + "broadwaybroke-itaxihuanishinoshimatta-varjjatgorybrokerrypropert" + + "iesapporobronnoysundyndns-office-on-the-webcambridgestonewspaper" + + "brothermesaverdefensejnybrumunddalottokigawabrunelblagdenesnaase" + + "ralingenkainanaejrietisalatinabenogatachikawakayamagadancebetsuk" + + "ubabia-goracleaningatlantagajobojis-a-cubicle-slavellinowtvallea" + + "ostavernishiokoppegardyndns-picsaratovalled-aostavropolicebrusse" + + "lsardegnamsskoganeis-a-democratjeldsundyndns-remotegildeskalmyki" + + "abruxellesardiniabryanskjakdnepropetrovskiervaapsteiermarkaufeni" + + "shitosashimizunaminamiashigarabryneustarhubalestrandabergamoarek" + + "ehimejibestadishakotankarmoyokozembroideryomitanobninskarpaczela" + + "dz-1buskerudinewhampshirechtrainingretakamoriokamchatkameokameya" + + "mashinatsukigatakanabeatsarlouvrepairbusantiquest-a-la-maisondre" + + "-landebusinessebykleclercasertaishinomakikuchikuseikarugapartmen" + + "tsarpsborgrimstadyndns-serverbaniabuzenishiwakis-a-designerbuzzg" + + "orzeleccollegersundyndns-weberlincolnissandnessjoenissayokoshiba" + + "hikariwanumataketomisatokuyamatteledatabaseballooningripebwfashi" + + "onissedalovegaskimitsubatamicabbottjmaxxxfinitybzhitomirkutskjer" + + "voyagecloudfunctionsaudacntkmaxxn--11b4c3dcolognewmexicoldwarmia" + + "miastaplesauheradcolonialwilliamsburguideventsavannahgacoloradop" + + "lateaudiocolumbusheycommunitysnesaves-the-whalessandria-trani-ba" + + "rletta-andriatranibarlettaandriacomobaracomparemarkerryhotelsavo" + + "naplesaxocompute-1computerhistoryofscience-fictioncomsecuritysva" + + "rdoharuhrcondoshichinohedmarkhangelskypescaravantaaconferencecon" + + "structionconsuladollsbschokoladenconsultanthropologyconsultingvo" + + "llutskddielddanuorrikuzentakatajirissagaeroclubmedecincinnationw" + + "idealerimo-i-ranadexchangeiseiyoichiropracticbcn-north-1contactm" + + "palmspringsakercontemporaryarteducationalchikugojomedicaltanisse" + + "ttaiwanairguardcontractorskenconventureshinodesashibetsuikimobet" + + "suliguriacookingchannelveruminamibosogndaluxembourguitarscholars" + + "hipschooluxurycoolkuszgradcoopocznorthwesternmutualuzerncopenhag" + + "encyclopedicdn77-sslattumetlifeinsurancecorsicagliaridagawarszaw" + + "ashingtondclkfhskhabarovskhakassiacorvettemasekharkivguccipriani" + + "igataitogitsuldalvivano-frankivskharkovalledaostakkofuelcosenzam" + + "amibuilderschulexuslivinghistorycostumedio-campidano-mediocampid" + + "anomediocouncilcouponschwarzgwangjuifminamidaitomangotembaixadac" + + "ourseschweizippodlasiellakasamatsudovre-eikercq-acranbrookuwanal" + + "yticsciencecentersciencehistorycreditcardcreditunioncremonashoro" + + "kanaiecrewiiheyaizuwakamatsubushikusakadogawacricketrzyncrimeacr" + + "otonewportlligatewaycrowncrscientistor-elvdalcruisescjohnsoncuis" + + "inellajollamericanexpressexyzjcbnlculturalcentertainmentoyokawac" + + "uneocupcakecxn--1ck2e1balsanagochihayaakasakawaharaumakeupowiath" + + "letajimabariakepnordkappgjesdalillyonabaruconnectarnobrzegjovika" + + "ruizawaugustowadaegubs3-ap-southeast-2cymruovatoyonakagyokutoshi" + + "macyouthdfcbankhersonfilateliafilminamiechizenfinalfinancefinear" + + "tsettsurfastlyfinlandfinnoyfirebaseappamperedchefauskedsmokorset" + + "agayaseljordfirenzefirestonextdirectoryfirmdalegoldpointelligenc" + + "efishingolfbsbxn--1ctwolominamatamayukis-a-geekhmelnitskiyamashi" + + "kefitjarqhachiojiyahikobeautydalfitnessettlementoyookarasjohkami" + + "noyamatsuris-a-greenfjalerflickragerotikaluganskhmelnytskyivalle" + + "e-aosteroyflightsevastopolezajskhvalleeaosteigenflirumansionseve" + + "nassisicilyfloguchikuzenfloraflorencefloridafloristanohatakaharu" + + "ssiafloromskoguovdageaidnulminamifuranoflowersewildlifestyleflsm" + + "idthruhereggio-emilia-romagnakanotoddenflynnhubalsfjordiskstatio" + + "naustdalimanowarudaukraanghke164fndfolldalfoodnetworkangerfor-be" + + "tter-thandafor-ourfor-somedizinhistorischesfranziskanerimamatera" + + "mochizukirafor-theaterforexrothachirogatakanezawaforgotdnshangri" + + "langevagrarboretumbriaforli-cesena-forlicesenaforlikes-piedmontb" + + "lancomeeresharis-a-gurulsandoyforsaleikangerforsandasuolodingenf" + + "ortmissoulan-udell-ogliastrakhanawawilliamhillfortworthadanotoga" + + "waforuminamiiselectoyosatotalfosnesharpanamafotoyotaris-a-hard-w" + + "orkerfoxn--1lqs03nfreiburgushikamifuranotaireshawaiijimarylandfr" + + "eightcmwinbaltimore-og-romsdalimitedunetflixilimoliserniaurskog-" + + "holandroverhalla-speziaetnagahamaroygardendoftheinternetcimdbala" + + "tinordre-landds3-ap-northeast-2freseniusdecorativeartshellaspezi" + + "afribourgxn--1lqs71dfriuli-v-giuliafriuli-ve-giuliafriuli-vegiul" + + "iafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv" + + "-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafriulive" + + "neziagiuliafriulivgiuliafrlfroganshimokawafrognfrolandfrom-akreh" + + "amnfrom-alfrom-arfrom-azlgzpanasonicheltenham-radio-operaunitele" + + "markautokeinofrom-canonoichikawamisatodayfrom-collectionfrom-cto" + + "yotomiyazakis-a-hunterfrom-dchelyabinskodjeffersonisshinguernsey" + + "from-dellogliastraderfrom-flandershimokitayamafrom-gaulardalfrom" + + "-higashiagatsumagoirmitakeharafrom-iafrom-idfrom-ilfrom-incheonf" + + "rom-kshimonitayanagivestbytomaritimekeepingfrom-kyknetoyotsukaid" + + "ownloadfrom-lanbibaidarfrom-manxn--1qqw23afrom-mdfrom-meetoyoura" + + "from-microsoftbanklabudhabikinokawabarthadselfipirangafrom-mnfro" + + "m-modalenfrom-mshimonosekikawafrom-mtnfrom-nchernigovernmentjome" + + "morialucaniafrom-ndfrom-nexusgardenfrom-nhktoystre-slidrettozawa" + + "from-njcparaglidingfrom-nminamiizukamitondabayashiogamagoriziafr" + + "om-nvanylvenicefrom-nyfrom-ohkurafrom-oketogurafrom-orfrom-pader" + + "bornfrom-pratohmaoris-a-knightozsdefrom-ris-a-landscaperugiafrom" + + "-schoenbrunnfrom-sdnipropetrovskmpspbambleborkarumaifarmsteadivt" + + "asvuodnakaiwamizawaustevollavangenativeamericanantiques3-eu-cent" + + "ral-1from-tnfrom-txn--2m4a15efrom-utazuerichardlikescandyndns-at" + + "-homedepotaruis-a-lawyerfrom-vadsochildrensgardenfrom-vtranbyfro" + + "m-wafrom-wielunnerfrom-wvaolbia-tempio-olbiatempioolbialystokkem" + + "erovodkagoshimaintenancefrom-wyfrosinonefrostalowa-wolawafroyaha" + + "badajozorahkkeravjudygarlandfstcgrouparisor-fronfujiiderafujikaw" + + "aguchikonefujiminohtawaramotoineppugliafujinomiyadafujiokayamarb" + + "urgfujisatoshonairportland-4-salernogiessengerdalaskanittedallas" + + "alleaseeklogesquarezzoologyfujisawafujishiroishidakabiratoridelm" + + "enhorstalbanshimosuwalkis-a-liberalfujitsurugashimarinefujixerox" + + "n--30rr7yfujiyoshidafukayabeardubaiduckdnsdojoburgfukuchiyamadaf" + + "ukudominichernihivanovosibirskydivingrondarfukuis-a-libertarianf" + + "ukumitsubishigakirkeneshimotsukefukuokazakirovogradoyfukuroishik" + + "arikaturindalfukusakiryuohaebaruminamimakis-a-linux-useranishiar" + + "itabashikaoizumizakitaurayasudafukuyamagatakahashimamakisarazure" + + "websiteshikagamiishibukawafunabashiriuchinadafunagatakahatakaish" + + "imoichinosekigaharafunahashikamiamakusatsumasendaisennangonoheji" + + "s-a-llamarylhursteinkjerusalembetsukuis-a-musicianfundaciofuoisk" + + "ujukuriyamarcheaparliamentrani-andria-barletta-trani-andriafuoss" + + "koczowindmillfurnitureggiocalabriafurubiraquarelleasingleshimots" + + "umafurudonostiafurukawairtelecityeatshinichinanfusodegaurafussai" + + "kishiwadafutabayamaguchinomigawafutboldlygoingnowhere-for-morego" + + "ntrailroadfuttsurugiminamiminowafvgfyis-a-nascarfanfylkesbiblack" + + "fridayfyresdalhannovareserveftparocherkasyzrankoshigayaltaikis-a" + + "-painteractivegarsheis-a-patsfanhanyuzenhapmirhappoulvikokonoeha" + + "reidsbergenharstadharvestcelebrationhasamarahasaminami-alpssells" + + "-for-unzenhashbanghasudahasvikolobrzegyptianpachigasakidsmynaspe" + + "rschlesischesurancehatogayahoooshikamaishimofusartshinkamigotoyo" + + "hashimotomobellunordreisa-geekomaganehatoyamazakitahatakaokamiki" + + "tayamatotakadahatsukaichiharahattfjelldalhayashimamotobuildingha" + + "zuminobusells-itraniandriabarlettatraniandriahbofagehembygdsforb" + + "undhemneshinshinotsurgeonshalloffamelhustkamisunagawahemsedalher" + + "okussldheroyhgtvarggatranoyhigashichichibungotakadatsunanjoetsuw" + + "anouchikujogaszkoladbrokesennumamurogawalterhigashihiroshimanehi" + + "gashiizumozakitakamiizumisanofiatransportrapaniimimatakatoris-a-" + + "personaltrainerhigashikagawahigashikagurasoedahigashikawakitaaik" + + "itakatakarazukamikoaniikappulawyhigashikurumeguroroskoleirvikoma" + + "kiyosatokamachippubetsubetsugaruhigashimatsushimarugame-hostingh" + + "igashimatsuyamakitaakitadaitoigawahigashimurayamalatvuopmidoris-" + + "a-photographerokuapparshinshirohigashinarusellsyourhomegoodshint" + + "okushimahigashinehigashiomihachimanchesterhigashiosakasayamamoto" + + "rcycleshintomikasaharahigashishirakawamatakasagooglecodespotrave" + + "lchannelhigashisumiyoshikawaminamiaikitakyushuaiahigashitsunowru" + + "zhgorodoyhigashiurausukitamidsundhigashiyamatokoriyamanakakogawa" + + "higashiyodogawahigashiyoshinogaris-a-playerhiraizumisatohnoshooh" + + "irakatashinagawahiranais-a-republicancerresearchaeologicaliforni" + + "ahirarahiratsukagawahirayaitakasakitamotosumitakaginankokubunjis" + + "-a-rockstarachowicehisayamanashiibaghdadultravelersinsurancehist" + + "orichouseshinyoshitomiokaniepcehitachiomiyaginowaniihamatamakawa" + + "jimaritimodellinghitachiotagopartis-a-socialistmeindianapolis-a-" + + "bloggerhitoyoshimifunehitradinghjartdalhjelmelandholeckobierzyce" + + "holidayhomeipartnershiojirishirifujiedahomelinuxn--32vp30hagebos" + + "tadhomesensembokukitanakagusukumoduminamiogunicomcastresistanceh" + + "omeunixn--3bst00minamisanrikubetsupplyhondahonefosshioyameloyali" + + "stockholmestrandhoneywellhongorgehonjyoitakashimarumorimachidaho" + + "rnindalhorseminehortendofinternetrdhoteleshirahamatonbetsurgeryh" + + "otmailhoyangerhoylandetroitskomatsushimashikiyosemitehumanitiesh" + + "irakoenighurdalhurumajis-a-soxfanhyllestadhyogoris-a-studentalhy" + + "ugawarahyundaiwafunejgorajlchitachinakagawatchandclockazimierz-d" + + "olnyjlljmpartshishikuis-an-actorjnjelenia-gorajoyokaichibahcavuo" + + "tnagaraholtalenjpmorganichitosetogakushimotoganewjerseyjpnchloej" + + "prshisognejuniperjurkristiansandcatshisuifuettertdasnetzwindowsh" + + "itaramakristiansundkrodsheradkrokstadelvaldaostarostwodzislawinn" + + "ershizukuishimogosenkryminamitanekumatorinokumejimasudakumenanyo" + + "kkaichirurgiens-dentisteshizuokanoyakagekunisakis-an-entertainer" + + "kunitachiarailwaykunitomigusukumamotoyamassa-carrara-massacarrar" + + "amassabunkyonanaoshimageandsoundandvisionkunneppupartykunstsamml" + + "ungkunstunddesignkuokgroupasadenamsosnowiechocolatelevisionrwhal" + + "ingrongausdaluccapebretonamiasakuchinotsuchiurakawassamukawatari" + + "cohdavvenjargamvikazokureitrentino-stirolkurgankurobelaudiblebes" + + "byglandkurogimilitarykuroisoftwarendalenugkuromatsunais-bykurota" + + "kikawasakis-certifiedekakegawakurskomonokushirogawakustanais-fou" + + "ndationkusupersportrentino-sud-tirolkutchanelkutnokuzbassnillfjo" + + "rdkuzumakis-gonekvafjordkvalsundkvamlidlugolekagaminord-aurdalvd" + + "alipayufuchukotkafjordkvanangenkvinesdalkvinnheradkviteseidskogk" + + "vitsoykwpspjelkavikomorotsukamishihoronobeokaminokawanishiaizuba" + + "ngekyotobetsupplieshoujis-into-animeiwamaseratis-a-therapistoiak" + + "yowariasahikawamishimatsumotofukemissileksvikongsbergmisugitokon" + + "amegatakayamatsunomitourismolanciamitoyoakemiuramiyazumiyotamano" + + "mjondalenmlbarclaycards3-us-west-1monmouthaibarakitagawamonsterm" + + "onticellolmontrealestatefarmequipmentrentino-sudtirolmonza-brian" + + "zaporizhzhekinannestadmonza-e-della-brianzaporizhzhiamonzabrianz" + + "apposlombardiamondshowtimemerckongsvingermonzaebrianzaramonzaede" + + "llabrianzamoparachutingmordoviajessheiminamiuonumatsumaebashimod" + + "atemoriyamatsusakahoginozawaonsenmoriyoshiokamitsuemormoneymoroy" + + "amatsushigemortgagemoscowiostrolekaneyamaxunjargamoseushistorymo" + + "sjoenmoskeneshriramsterdambulanceomossienarashinomosvikoninjamis" + + "onmoviemovistargardmtpccwitdkonskowolancashirehabmermtranakayama" + + "tsuuramuenstermugithubcloudusercontentrentino-sued-tirolmuikamog" + + "awamukochikushinonsenergymulhouservebbsigdalmultichoicemunakatan" + + "emuncieszynmuosattemupassagensimbirskonsulatrobeermurmanskonyvel" + + "oftrentino-s-tirollagrigentomologyeonggiehtavuoatnagaivuotnagaok" + + "akyotambabydgoszczecinemailmurotorcraftrentino-suedtirolmusashim" + + "urayamatsuzakis-leetrentino-a-adigemusashinoharamuseetrentinoa-a" + + "digemuseumverenigingmutsuzawamutuellelmyokohamamatsudamypetsimpl" + + "e-urlmyphotoshibahccavuotnagareyamaizurubtsovskiptveterinairebun" + + "goonomichinomiyakemytis-a-bookkeeperminamiyamashirokawanabelgoro" + + "deophiladelphiaareadmyblogsitephilatelyphilipsyphoenixn--3e0b707" + + "ephotographysiopiagetmyipassenger-associationpictetrentinoaadige" + + "pictureslupskooris-an-actresshiraois-a-techietis-a-teacherkassym" + + "antechnologypiemontepilotsmolenskopervikommunalforbundpinkoryola" + + "sitepioneerpippupiszpittsburghofedjejuegoshikiminokamoenairlineb" + + "raskaunbieidsvollpiwatepizzapkosaigawaplanetariuminanoplantation" + + "plantsnoasaitamatsukuris-lostre-toteneis-an-accountantshiranukan" + + "makiwakunigamihamadaplatformincommbankomvuxn--3ds443gplaystation" + + "plazaplchofunatorientexpressasayamaplombardyndns-at-workinggroup" + + "aviancapetownplumbingotvbarclays3-us-west-2plusterpmnpodzonepohl" + + "pokerpokrovskosakaerodromegallupinbarcelonagasakijobservercellie" + + "rneues3-us-gov-west-1politiendapolkowicepoltavalle-aostathellewi" + + "smillerpomorzeszowithgoogleapisa-hockeynutrentinoalto-adigeporde" + + "nonepornporsangerporsangugeporsgrunnanpoznanpraxis-a-bruinsfansn" + + "zprdpreservationpresidioprgmrprimelbourneprincipeprivneprochowic" + + "eproductionsokanraprofermobilyprogressivenneslaskerrylogisticsok" + + "ndalprojectrentinoaltoadigepromombetsupportrentinos-tirolpropert" + + "yprotectionprudentialpruszkowithyoutubeneventochiokinoshimalselv" + + "endrellprzeworskogptzpvtrentinostirolpwchonanbugattipschmidtre-g" + + "auldalucernepzqldqponqslgbtrentinosud-tirolqvchoseiroumuenchenst" + + "orfjordstpetersburgstreamurskinderoystudiostudyndns-freemasonryo" + + "kamikawanehonbetsurutaharastuff-4-salestuttgartrentinosuedtirols" + + "urnadalsurreysusakis-slickomforbananarepublicargodaddynathomebui" + + "ltarumizusawaustinnaturalhistorymuseumcentereviewskrakowebhopage" + + "frontappagespeedmobilizerobihirosakikamijimagroks-thisamitsukeis" + + "enbahnasushiobaraeroportalabamagasakishimabarackmaze-burggfarmer" + + "seinewyorkshireggio-calabriabruzzoologicalvinklein-addrammenuern" + + "bergdyniabogadocscbg12000susonosuzakanumazurysuzukanzakiwiensuzu" + + "kis-uberleetrentino-aadigesvalbardudinkakamigaharasveiosvelvikos" + + "himizumakiyosumykolaivaroysvizzeraswedenswidnicapitalonewholland" + + "swiebodzindianmarketingswiftcoverisignswinoujscienceandhistorysw" + + "isshikis-very-badaddjamalborkdalsxn--3oq18vl8pn36atuscanytushuis" + + "sier-justicetuvalle-daostavangervestnesopotrentinosudtirolvestre" + + "-slidreamhostersor-odalvestre-totennishiawakuravestvagoyvevelsta" + + "dvibo-valentiavibovalentiavideovillaskoyabearalvahkihokumakogeni" + + "waizumiotsukumiyamazonawsabaerobaticketsor-varangervinnicarbonia" + + "-iglesias-carboniaiglesiascarboniavinnytsiavipsinaappfizervirgin" + + "iavirtualvirtuelvisakatakinouevistaprintuitrentottoris-very-evil" + + "lageviterboltrevisohughesolognevivoldavladikavkazanvladimirvladi" + + "vostokaizukarasuyamazoevlogvolkenkunderseaportroandinosaurepbody" + + "ndns-blogdnsolundbeckoseis-an-anarchistoricalsocietyvolkswagents" + + "orfoldvologdanskostromahachijorpelandvolvolgogradvolyngdalvorone" + + "zhytomyrvossevangenvotevotingvotoursorreisahayakawakamiichikaise" + + "is-saveducatorahimeshimakanegasakinkobayashikshacknetnedalvrnwor" + + "se-thangglidingwowiwatsukiyonowritesthisblogspotrogstadwroclawlo" + + "clawekosugewtchoshibuyachiyodawtferrarawuozuwwworldwzmiuwajimaxn" + + "--4gq48lf9jeonnamerikawauexn--4it168dxn--4it797kotouraxn--4pvxso" + + "rtlandxn--54b7fta0cchromediaxn--55qw42gxn--55qx5dxn--5js045dxn--" + + "5rtp49chryslerxn--5rtq34kouhokutamakis-an-artistjohnxn--5su34j93" + + "6bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--" + + "7t0a264chungbukazunoxn--80adxhksoruminnesotaketakatsukis-into-ca" + + "rshiraokannamiharuxn--80ao21axn--80aqecdr1axn--80asehdbarefootba" + + "llangenoamishirasatobishimalopolskanlandivttasvuotnakamagayachts" + + "akuraibigawaustraliaisondriodejaneirochesterhcloudcontrolledigit" + + "alaziobirakunedre-eikereportarantomsk-uralsk12xn--80aswgxn--80au" + + "dnedalnxn--8ltr62kounosunndalxn--8pvr4uxn--8y0a063axn--90a3acade" + + "mydroboatsaritsynologyeongbukouyamashikokuchuoxn--90aishobaraomo" + + "riguchiharagusaarlandxn--90azhair-surveillancexn--9dbhblg6diethn" + + "ologyxn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byan" + + "agawaxn--asky-iraxn--aurskog-hland-jnbargainstitutelefonicafeder" + + "ationaval-d-aosta-valleyonagoyaustrheimatunduhrennesoyekaterinbu" + + "rgjemnes3-eu-west-1xn--avery-yuasakegawaxn--b-5gaxn--b4w605ferdx" + + "n--bck1b9a5dre4chungnamdalseidfjordyroyrvikingrossetouchijiwadel" + + "tajimicrolightingroundhandlingroznyxn--bdddj-mrabdxn--bearalvhki" + + "-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr" + + "-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyanaizuxn--bjddar-ptamb" + + "oversaillesooxn--blt-elaborxn--bmlo-graingerxn--bod-2naroyxn--br" + + "nny-wuaccident-investigationjukudoyamaceratabuseat-band-campania" + + "mallamadridvagsoyericssonlineat-urlxn--brnnysund-m8accident-prev" + + "entionxn--brum-voagatromsakakinokiaxn--btsfjord-9zaxn--c1avgxn--" + + "c2br7gxn--c3s14minternationalfirearmshowaxn--cck2b3barreauctiona" + + "vigationavuotnakhodkanagawauthordalandroidiscountyumenaturalscie" + + "ncesnaturelles3-external-1xn--cg4bkis-very-goodhandsonxn--ciqpnx" + + "n--clchc0ea0b2g2a9gcdn77-securecipesaro-urbino-pesarourbinopesar" + + "omaniwakuratelekommunikationxn--comunicaes-v6a2oxn--correios-e-t" + + "elecomunicaes-ghc29axn--czr694barrel-of-knowledgeometre-experts-" + + "comptablesakyotanabellevuelosangelesjaguarchitecturealtychyattor" + + "neyagawalbrzycharternopilawalesundiyonaguniversityoriikasaokamio" + + "kamiminersalangenayoroceanographicsalondonetskashibatakasugaibmd" + + "npalacemergencyberlevagangaviikanonjiinetatamotorsaltdalindasiau" + + "tomotivecodyn-o-saurlandes3-external-2xn--czrs0tromsojavald-aost" + + "arnbergxn--czru2dxn--czrw28barrell-of-knowledgeorgeorgiautoscana" + + "daejeonbukariyakumoldebinagisoccertificationaturbruksgymnaturhis" + + "torisches3-fips-us-gov-west-1xn--d1acj3bashkiriaveroykenvironmen" + + "talconservationatuurwetenschappenaumburgjerdrumckinseyokosukarel" + + "iancebinosegawasmatartanddesignieznorddalavagiske12xn--d1alfarom" + + "eoxn--d1atrusteexn--d5qv7z876churchaseljeepilepsydneyxn--davvenj" + + "rga-y4axn--djrs72d6uyxn--djty4kouzushimasoyxn--dnna-grajewolters" + + "kluwerxn--drbak-wuaxn--dyry-iraxn--eckvdtc9dxn--efvn9southcaroli" + + "nazawaxn--efvy88hakatanotteroyxn--ehqz56nxn--elqq16hakodatevaksd" + + "alxn--estv75gxn--eveni-0qa01gaxn--f6qx53axn--fct429kozagawaxn--f" + + "hbeiarnxn--finny-yuaxn--fiq228c5hsouthwestfalenxn--fiq64basilica" + + "taniavocatanzaroweddingjerstadotsuruokamakurazakisofukushimarnar" + + "dalillesandefjordiscoveryggeelvinckarlsoyokotebizenakaniikawatan" + + "agurasnesoddenmarkets3-ap-southeast-1kappleangaviikadenaamesjevu" + + "emielnoboribetsucks3-ap-northeast-1xn--fiqs8sowaxn--fiqz9spreadb" + + "ettingxn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351ex" + + "n--fpcrj9c3dxn--frde-grandrapidspydebergxn--frna-woaraisaijosoyr" + + "ovigorlicexn--frya-hraxn--fzc2c9e2chuvashiaxn--fzys8d69uvgmailxn" + + "--g2xx48circlegallocuscountryestateofdelawarecreationxn--gckr3f0" + + "ferrarittogokasells-for-lesscrapper-sitexn--gecrj9circuscultured" + + "umbrellahppiacenzakopanerairforcechirealtorlandxn--ggaviika-8ya4" + + "7hakonexn--gildeskl-g0axn--givuotna-8yandexn--3pxu8kotohiradomai" + + "nsureisenxn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-ve" + + "ry-nicexn--gmqw5axn--h-2fairwindsrlxn--h1aeghakubankmshinjournal" + + "ismailillehammerfest-mon-blogueurovisionxn--h2brj9citadeliverybn" + + "ikahokutogliattiresaskatchewanggouvicenzaxn--hbmer-xqaxn--hcesuo" + + "lo-7ya35basketballfinanz-2xn--hery-iraxn--hgebostad-g3axn--hmmrf" + + "easta-s4acctrverranzanxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxa" + + "xn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b" + + "1a6a2exn--imr513nxn--indery-fyaotsurgutsiracusaitokyotangovtrysi" + + "lkoshunantokashikizunokunimilanoxn--io0a7is-very-sweetrentino-al" + + "to-adigexn--j1aeferreroticampobassociatescrappingujolsterxn--j1a" + + "mhakuis-a-nurseoullensvanguardxn--j6w193gxn--jlq61u9w7batochigif" + + "tsalvadordalibabaikaliszczytnordlandrangedalindesnesalzburgladel" + + "oittenrightathomeftpaccessamegawavoues3-sa-east-1xn--jlster-byar" + + "oslavlaanderenxn--jrpeland-54axn--jvr189misakis-into-cartoonshir" + + "atakahagivingxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn" + + "--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5" + + "xn--42c2d9axn--koluokta-7ya57hakusandiegoodyearthagakhanamigawax" + + "n--kprw13dxn--kpry57dxn--kpu716fetsundxn--kput3is-with-thebandoo" + + "mdnsaliascolipicenord-odalxn--krager-gyasakaiminatoyakokamisatoh" + + "obby-sitexasdaburyatiaarpharmaciensirdalxn--kranghke-b0axn--krds" + + "herad-m8axn--krehamn-dxaxn--krjohka-hwab49jetztrentino-altoadige" + + "xn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasugisleofmandalxn--kvn" + + "angen-k0axn--l-1faitheguardianquanconagawakuyabukicks-assediciti" + + "cateringebudejjuedischesapeakebayernurembergriwataraidyndns-work" + + "shoppdalowiczest-le-patrondheimperiaxn--l1accentureklamborghinii" + + "zaxn--laheadju-7yasuokaratexn--langevg-jxaxn--lcvr32dxn--ldingen" + + "-q1axn--leagaviika-52batsfjordrivelandrobaknoluoktainaikawachina" + + "ganoharamcoalaheadjudaicaaarborteaches-yogasawaracingroks-theatr" + + "eemersongdalenviknakanojohanamakinoharaxastronomydstvedestrandgc" + + "ahcesuolocalhistoryazannefrankfurtargets-itargi234xn--lesund-hua" + + "xn--lgbbat1ad8jevnakerxn--lgrd-poacoachampionshiphopenair-traffi" + + "c-controlleyxn--lhppi-xqaxn--linds-pramericanartulansolutionsola" + + "rssonxn--lns-qlanxessrtrentinosued-tirolxn--loabt-0qaxn--lrdal-s" + + "raxn--lrenskog-54axn--lt-liacivilaviationxn--lten-granexn--lury-" + + "iraxn--mely-iraxn--merker-kuaxn--mgb2ddesrvdonskosherbrookegawax" + + "n--mgb9awbfgulenxn--mgba3a3ejtunesomaxn--mgba3a4f16axn--mgba3a4f" + + "ranamizuholdingsmileirfjordxn--mgba7c0bbn0axn--mgbaakc7dvfidelit" + + "yxn--mgbaam7a8haldenxn--mgbab2bdxn--mgbai9a5eva00bauhausposts-an" + + "d-telecommunicationsnasadoes-itveronagasukemrxn--mgbai9azgqp6jew" + + "elryxn--mgbayh7gpaduaxn--mgbb9fbpobanazawaxn--mgbbh1a71exn--mgbc" + + "0a9azcgxn--mgbca7dzdoxn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgb" + + "i4ecexposedxn--mgbpl2fhvalerxn--mgbqly7c0a67fbcivilisationxn--mg" + + "bqly7cvafredrikstadtvstoragexn--mgbt3dhdxn--mgbtf8flekkefjordxn-" + + "-mgbtx2bbcarrierxn--mgbx4cd0abbvieeexn--mix082fidonnakamuratakah" + + "amannortonsbergunmarriottoyonezawaxn--mix891fieldxn--mjndalen-64" + + "axn--mk0axindustriesteamfamberkeleyxn--mk1bu44civilizationxn--mk" + + "ru45issmarterthanyouxn--mlatvuopmi-s4axn--mli-tlapyatigorskozaki" + + "s-an-engineeringxn--mlselv-iuaxn--moreke-juaxn--mori-qsakuhokkai" + + "dontexisteingeekpnxn--mosjen-eyatominamiawajikiwchiryukyuragifue" + + "fukihaborokunohealthcareersarufutsunomiyawakasaikaitakoelniyodog" + + "awaxn--mot-tlaquilancasterxn--mre-og-romsdal-qqbbtatarstanflatan" + + "gerxn--msy-ula0halsaintlouis-a-anarchistoireggioemiliaromagnakat" + + "ombetsumidatlantichernivtsiciliaxn--mtta-vrjjat-k7afamilycompany" + + "civilwarmanagementjxjaworznoxn--muost-0qaxn--mxtq1misasaguris-in" + + "to-gamessinashikitchenxn--ngbc5azdxn--ngbe9e0axn--ngbrxn--45brj9" + + "choyodobashichikashukujitawaraxn--nit225kppspiegelxn--nmesjevuem" + + "ie-tcbajddarchaeologyxn--nnx388axn--nodessakuragawaxn--nqv7fs00e" + + "maxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeservegame" + + "-serverdalxn--nvuotna-hwaxn--nyqy26axn--o1achattanooganorilsklep" + + "pharmacyslingxn--o3cw4hammarfeastafricamagichernovtsykkylvenetoe" + + "iheijis-a-doctorayxn--od0algxn--od0aq3bbvacationswatch-and-clock" + + "erxn--ogbpf8flesbergxn--oppegrd-ixaxn--ostery-fyatsukaratsuginam" + + "ikatagamihoboleslawieclaimsassaris-a-financialadvisor-aurdaluroy" + + "xn--osyro-wuaxn--p1acfdxn--p1aixn--pbt977clickchristiansburgrpal" + + "ermomasvuotnakatsugawaxn--pgbs0dhlxn--porsgu-sta26figuerestauran" + + "toyonoxn--pssu33lxn--pssy2uxn--q9jyb4clinicatholicasinorfolkebib" + + "lefrakkestadyndns-wikindlegnicamerakershus-east-1xn--qcka1pmcdon" + + "aldstordalxn--qqqt11misawaxn--qxamusementurystykarasjoksnesomnar" + + "itakurashikis-not-certifiedogawarabikomaezakirunoshiroomuraxn--r" + + "ady-iraxn--rdal-poaxn--rde-ularvikrasnodarxn--rdy-0nabarixn--ren" + + "nesy-v1axn--rhkkervju-01aflakstadaokagakibichuoxn--rholt-mragowo" + + "odsidexn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5narusawa" + + "xn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byatsushiroxn" + + "--rny31hamurakamigoriginshinjukumanoxn--rovu88bentleyukuhashimoj" + + "iitatebayashijonawatextileitungsenfshostrodawaraxn--rros-granvin" + + "dafjordxn--rskog-uuaxn--rst-0narutokorozawaxn--rsta-francaisehar" + + "axn--ryken-vuaxn--ryrvik-byawaraxn--s-1fareastcoastaldefencexn--" + + "s9brj9cliniquenoharaxn--sandnessjen-ogbizhevskrasnoyarskommunexn" + + "--sandy-yuaxn--seral-lraxn--ses554gxn--sgne-gratangenxn--skierv-" + + "utazaskvolloabathsbclintonoshoesatxn--0trq7p7nnxn--skjervy-v1axn" + + "--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5narviikananporov" + + "noxn--slt-elabourxn--smla-hraxn--smna-gratis-a-bulls-fanxn--snas" + + "e-nraxn--sndre-land-0cbremangerxn--snes-poaxn--snsa-roaxn--sr-au" + + "rdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbeppubol" + + "ognagatorockartuzyurihonjournalistjordalshalsenhsamnangerxn--srf" + + "old-byawatahamaxn--srreisa-q1axn--srum-grazxn--stfold-9xaxn--stj" + + "rdal-s1axn--stjrdalshalsen-sqberndunloppacificartierxn--stre-tot" + + "en-zcbstorenburgxn--t60b56axn--tckweatherchannelxn--tiq49xqyjewi" + + "shartgalleryxn--tjme-hraxn--tn0agrinetbankzxn--tnsberg-q1axn--to" + + "r131oxn--trany-yuaxn--trgstad-r1axn--trna-woaxn--troms-zuaxn--ty" + + "svr-vraxn--uc0atversicherungxn--uc0ay4axn--uist22hangoutsystemsc" + + "loudcontrolapparmaxn--uisz3gxn--unjrga-rtaobaokinawashirosatobam" + + "agazinemurorangeologyxn--unup4yxn--uuwu58axn--vads-jraxn--vard-j" + + "raxn--vegrshei-c0axn--vermgensberater-ctbeskidynaliascoli-piceno" + + "rd-frontierxn--vermgensberatung-pwbestbuyshousesamsclubindalinka" + + "shiharaxn--vestvgy-ixa6oxn--vg-yiabcgxn--vgan-qoaxn--vgsy-qoa0jf" + + "komitamamuraxn--vgu402clothingruexn--vhquvestfoldxn--vler-qoaxn-" + + "-vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861betainaboxfor" + + "deatnuorogersvpalanaklodzkodairaxn--w4r85el8fhu5dnraxn--w4rs40lx" + + "n--wcvs22dxn--wgbh1cloudfrontdoorxn--wgbl6axn--xhq521bielawallon" + + "ieruchomoscienceandindustrynikiiyamanobeauxartsandcraftsamsungla" + + "ssassinationalheritagematsubarakawagoepostfoldnavyatkakudamatsue" + + "psonyoursidegreevje-og-hornnesandvikcoromantovalle-d-aostatoilin" + + "zainfinitinfoggiaxn--xkc2al3hye2axn--xkc2dl3a5ee0hannanmokuizumo" + + "dernxn--y9a3aquariumisconfusedxn--yer-znarvikredstonexn--yfro4i6" + + "7oxn--ygarden-p1axn--ygbi2ammxn--45q11christmasakikugawatchesase" + + "boknowsitallukowhoswhokksundynv6xn--ystre-slidre-ujbiellaakesvue" + + "mieleccexn--zbx025dxn--zf0ao64axn--zf0avxn--4gbriminingxn--zfr16" + + "4bieszczadygeyachimataipeigersundunsagamiharaxperiaxz" + +// nodes is the list of nodes. Each node is represented as a uint32, which +// encodes the node's children, wildcard bit and node type (as an index into +// the children array), ICANN bit and text. +// +// In the //-comment after each node's data, the nodes indexes of the children +// are formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// nodeType is printed as + for normal, ! for exception, and o for parent-only +// nodes that have children but don't match a domain label in their own right. +// An I denotes an ICANN domain. +// +// The layout within the uint32, from MSB to LSB, is: +// [ 1 bits] unused +// [ 9 bits] children index +// [ 1 bits] ICANN bit +// [15 bits] text index +// [ 6 bits] text length +var nodes = [...]uint32{ + 0x00355603, // n0x0000 c0x0000 (---------------) + I aaa + 0x0034d544, // n0x0001 c0x0000 (---------------) + I aarp + 0x0026b886, // n0x0002 c0x0000 (---------------) + I abarth + 0x00230743, // n0x0003 c0x0000 (---------------) + I abb + 0x00230746, // n0x0004 c0x0000 (---------------) + I abbott + 0x00365706, // n0x0005 c0x0000 (---------------) + I abbvie + 0x00399843, // n0x0006 c0x0000 (---------------) + I abc + 0x0031f144, // n0x0007 c0x0000 (---------------) + I able + 0x002ee207, // n0x0008 c0x0000 (---------------) + I abogado + 0x0026b4c8, // n0x0009 c0x0000 (---------------) + I abudhabi + 0x01a01542, // n0x000a c0x0006 (n0x0609-n0x060f) + I ac + 0x0030aec7, // n0x000b c0x0000 (---------------) + I academy + 0x00352a89, // n0x000c c0x0000 (---------------) + I accenture + 0x002d9b0a, // n0x000d c0x0000 (---------------) + I accountant + 0x002d9b0b, // n0x000e c0x0000 (---------------) + I accountants + 0x00232d83, // n0x000f c0x0000 (---------------) + I aco + 0x0028a206, // n0x0010 c0x0000 (---------------) + I active + 0x0023b505, // n0x0011 c0x0000 (---------------) + I actor + 0x01e00342, // n0x0012 c0x0007 (n0x060f-n0x0610) + I ad + 0x00212f84, // n0x0013 c0x0000 (---------------) + I adac + 0x0026ba03, // n0x0014 c0x0000 (---------------) + I ads + 0x002a1985, // n0x0015 c0x0000 (---------------) + I adult + 0x022035c2, // n0x0016 c0x0008 (n0x0610-n0x0618) + I ae + 0x0024a403, // n0x0017 c0x0000 (---------------) + I aeg + 0x026389c4, // n0x0018 c0x0009 (n0x0618-n0x066f) + I aero + 0x0025e585, // n0x0019 c0x0000 (---------------) + I aetna + 0x02a04a42, // n0x001a c0x000a (n0x066f-n0x0674) + I af + 0x0036ec8e, // n0x001b c0x0000 (---------------) + I afamilycompany + 0x00252703, // n0x001c c0x0000 (---------------) + I afl + 0x00375846, // n0x001d c0x0000 (---------------) + I africa + 0x0037584b, // n0x001e c0x0000 (---------------) + I africamagic + 0x02e01002, // n0x001f c0x000b (n0x0674-n0x0679) + I ag + 0x0034ac47, // n0x0020 c0x0000 (---------------) + I agakhan + 0x0023df86, // n0x0021 c0x0000 (---------------) + I agency + 0x032016c2, // n0x0022 c0x000c (n0x0679-n0x067d) + I ai + 0x00214d03, // n0x0023 c0x0000 (---------------) + I aig + 0x00214d04, // n0x0024 c0x0000 (---------------) + I aigo + 0x0022b886, // n0x0025 c0x0000 (---------------) + I airbus + 0x00338648, // n0x0026 c0x0000 (---------------) + I airforce + 0x00286586, // n0x0027 c0x0000 (---------------) + I airtel + 0x00227644, // n0x0028 c0x0000 (---------------) + I akdn + 0x036001c2, // n0x0029 c0x000d (n0x067d-n0x0684) + I al + 0x00328e49, // n0x002a c0x0000 (---------------) + I alfaromeo + 0x00345287, // n0x002b c0x0000 (---------------) + I alibaba + 0x002bc006, // n0x002c c0x0000 (---------------) + I alipay + 0x0033e3c9, // n0x002d c0x0000 (---------------) + I allfinanz + 0x0020f148, // n0x002e c0x0000 (---------------) + I allstate + 0x00213584, // n0x002f c0x0000 (---------------) + I ally + 0x0021dd86, // n0x0030 c0x0000 (---------------) + I alsace + 0x0020be86, // n0x0031 c0x0000 (---------------) + I alstom + 0x03a01882, // n0x0032 c0x000e (n0x0684-n0x0685) + I am + 0x0024728f, // n0x0033 c0x0000 (---------------) + I americanexpress + 0x00208d8e, // n0x0034 c0x0000 (---------------) + I americanfamily + 0x002052c4, // n0x0035 c0x0000 (---------------) + I amex + 0x00367585, // n0x0036 c0x0000 (---------------) + I amfam + 0x00230645, // n0x0037 c0x0000 (---------------) + I amica + 0x002c8449, // n0x0038 c0x0000 (---------------) + I amsterdam + 0x00243f09, // n0x0039 c0x0000 (---------------) + I analytics + 0x0031a8c7, // n0x003a c0x0000 (---------------) + I android + 0x00350706, // n0x003b c0x0000 (---------------) + I anquan + 0x00256b43, // n0x003c c0x0000 (---------------) + I anz + 0x03e029c2, // n0x003d c0x000f (n0x0685-n0x068b) + I ao + 0x00275643, // n0x003e c0x0000 (---------------) + I aol + 0x0022ce0a, // n0x003f c0x0000 (---------------) + I apartments + 0x002092c3, // n0x0040 c0x0000 (---------------) + I app + 0x00331985, // n0x0041 c0x0000 (---------------) + I apple + 0x002003c2, // n0x0042 c0x0000 (---------------) + I aq + 0x00285a49, // n0x0043 c0x0000 (---------------) + I aquarelle + 0x04200a42, // n0x0044 c0x0010 (n0x068b-n0x0694) + I ar + 0x00202044, // n0x0045 c0x0000 (---------------) + I arab + 0x00355146, // n0x0046 c0x0000 (---------------) + I aramco + 0x002fb805, // n0x0047 c0x0000 (---------------) + I archi + 0x00348744, // n0x0048 c0x0000 (---------------) + I army + 0x04a29dc4, // n0x0049 c0x0012 (n0x0695-n0x069b) + I arpa + 0x0023a6c4, // n0x004a c0x0000 (---------------) + I arte + 0x04e01d42, // n0x004b c0x0013 (n0x069b-n0x069c) + I as + 0x0034d284, // n0x004c c0x0000 (---------------) + I asda + 0x00322ec4, // n0x004d c0x0000 (---------------) + I asia + 0x003437ca, // n0x004e c0x0000 (---------------) + I associates + 0x05200102, // n0x004f c0x0014 (n0x069c-n0x06a3) + I at + 0x00248f47, // n0x0050 c0x0000 (---------------) + I athleta + 0x0031fec8, // n0x0051 c0x0000 (---------------) + I attorney + 0x05a04f82, // n0x0052 c0x0016 (n0x06a4-n0x06b6) + I au + 0x00319e07, // n0x0053 c0x0000 (---------------) + I auction + 0x00233104, // n0x0054 c0x0000 (---------------) + I audi + 0x002b7d87, // n0x0055 c0x0000 (---------------) + I audible + 0x00233105, // n0x0056 c0x0000 (---------------) + I audio + 0x0035fd47, // n0x0057 c0x0000 (---------------) + I auspost + 0x0031a686, // n0x0058 c0x0000 (---------------) + I author + 0x00265104, // n0x0059 c0x0000 (---------------) + I auto + 0x00324dc5, // n0x005a c0x0000 (---------------) + I autos + 0x002dc007, // n0x005b c0x0000 (---------------) + I avianca + 0x06a01082, // n0x005c c0x001a (n0x06c4-n0x06c5) + I aw + 0x002f6583, // n0x005d c0x0000 (---------------) + I aws + 0x00220402, // n0x005e c0x0000 (---------------) + I ax + 0x00356983, // n0x005f c0x0000 (---------------) + I axa + 0x06e05f42, // n0x0060 c0x001b (n0x06c5-n0x06d1) + I az + 0x0021be05, // n0x0061 c0x0000 (---------------) + I azure + 0x07202002, // n0x0062 c0x001c (n0x06d1-n0x06dc) + I ba + 0x002ce1c4, // n0x0063 c0x0000 (---------------) + I baby + 0x0027c805, // n0x0064 c0x0000 (---------------) + I baidu + 0x00205207, // n0x0065 c0x0000 (---------------) + I banamex + 0x002e97ce, // n0x0066 c0x0000 (---------------) + I bananarepublic + 0x00207cc4, // n0x0067 c0x0000 (---------------) + I band + 0x00203704, // n0x0068 c0x0000 (---------------) + I bank + 0x00202003, // n0x0069 c0x0000 (---------------) + I bar + 0x002dd889, // n0x006a c0x0000 (---------------) + I barcelona + 0x002c12cb, // n0x006b c0x0000 (---------------) + I barclaycard + 0x002dc608, // n0x006c c0x0000 (---------------) + I barclays + 0x00306d88, // n0x006d c0x0000 (---------------) + I barefoot + 0x0030e948, // n0x006e c0x0000 (---------------) + I bargains + 0x0022f9c8, // n0x006f c0x0000 (---------------) + I baseball + 0x0033e20a, // n0x0070 c0x0000 (---------------) + I basketball + 0x0035fc47, // n0x0071 c0x0000 (---------------) + I bauhaus + 0x00351786, // n0x0072 c0x0000 (---------------) + I bayern + 0x07630782, // n0x0073 c0x001d (n0x06dc-n0x06e6) + I bb + 0x003651c3, // n0x0074 c0x0000 (---------------) + I bbc + 0x0036cbc3, // n0x0075 c0x0000 (---------------) + I bbt + 0x00376984, // n0x0076 c0x0000 (---------------) + I bbva + 0x00399883, // n0x0077 c0x0000 (---------------) + I bcg + 0x00239b43, // n0x0078 c0x0000 (---------------) + I bcn + 0x017129c2, // n0x0079 c0x0005 (---------------)* o I bd + 0x07a03302, // n0x007a c0x001e (n0x06e6-n0x06e8) + I be + 0x0022b505, // n0x007b c0x0000 (---------------) + I beats + 0x0024f5c6, // n0x007c c0x0000 (---------------) + I beauty + 0x002ccb84, // n0x007d c0x0000 (---------------) + I beer + 0x00383b87, // n0x007e c0x0000 (---------------) + I bentley + 0x0022e6c6, // n0x007f c0x0000 (---------------) + I berlin + 0x002291c4, // n0x0080 c0x0000 (---------------) + I best + 0x00398887, // n0x0081 c0x0000 (---------------) + I bestbuy + 0x00208c03, // n0x0082 c0x0000 (---------------) + I bet + 0x07f5d242, // n0x0083 c0x001f (n0x06e8-n0x06e9) + I bf + 0x082ee482, // n0x0084 c0x0020 (n0x06e9-n0x070e) + I bg + 0x08615602, // n0x0085 c0x0021 (n0x070e-n0x0713) + I bh + 0x00215606, // n0x0086 c0x0000 (---------------) + I bharti + 0x08a00002, // n0x0087 c0x0022 (n0x0713-n0x0718) + I bi + 0x0037bf45, // n0x0088 c0x0000 (---------------) + I bible + 0x00313f03, // n0x0089 c0x0000 (---------------) + I bid + 0x00202544, // n0x008a c0x0000 (---------------) + I bike + 0x002dc444, // n0x008b c0x0000 (---------------) + I bing + 0x002dc445, // n0x008c c0x0000 (---------------) + I bingo + 0x00203a03, // n0x008d c0x0000 (---------------) + I bio + 0x08f30b83, // n0x008e c0x0023 (n0x0718-n0x0720) + I biz + 0x09206502, // n0x008f c0x0024 (n0x0720-n0x0724) + I bj + 0x00288ec5, // n0x0090 c0x0000 (---------------) + I black + 0x00288ecb, // n0x0091 c0x0000 (---------------) + I blackfriday + 0x00258fc6, // n0x0092 c0x0000 (---------------) + I blanco + 0x0020c8cb, // n0x0093 c0x0000 (---------------) + I blockbuster + 0x002a4004, // n0x0094 c0x0000 (---------------) + I blog + 0x0020d009, // n0x0095 c0x0000 (---------------) + I bloomberg + 0x0020e104, // n0x0096 c0x0000 (---------------) + I blue + 0x0960e742, // n0x0097 c0x0025 (n0x0724-n0x0729) + I bm + 0x0020f6c3, // n0x0098 c0x0000 (---------------) + I bms + 0x0020fc43, // n0x0099 c0x0000 (---------------) + I bmw + 0x016104c2, // n0x009a c0x0005 (---------------)* o I bn + 0x002477c3, // n0x009b c0x0000 (---------------) + I bnl + 0x002104ca, // n0x009c c0x0000 (---------------) + I bnpparibas + 0x09a0e402, // n0x009d c0x0026 (n0x0729-n0x0732) + I bo + 0x0030b145, // n0x009e c0x0000 (---------------) + I boats + 0x0020e40a, // n0x009f c0x0000 (---------------) + I boehringer + 0x00290b84, // n0x00a0 c0x0000 (---------------) + I bofa + 0x00210ac3, // n0x00a1 c0x0000 (---------------) + I bom + 0x00210f84, // n0x00a2 c0x0000 (---------------) + I bond + 0x00215403, // n0x00a3 c0x0000 (---------------) + I boo + 0x00215404, // n0x00a4 c0x0000 (---------------) + I book + 0x00215407, // n0x00a5 c0x0000 (---------------) + I booking + 0x00215e85, // n0x00a6 c0x0000 (---------------) + I boots + 0x002173c5, // n0x00a7 c0x0000 (---------------) + I bosch + 0x00217986, // n0x00a8 c0x0000 (---------------) + I bostik + 0x00217cc6, // n0x00a9 c0x0000 (---------------) + I boston + 0x00218e83, // n0x00aa c0x0000 (---------------) + I bot + 0x0021b7c8, // n0x00ab c0x0000 (---------------) + I boutique + 0x0021bb83, // n0x00ac c0x0000 (---------------) + I box + 0x09e1c402, // n0x00ad c0x0027 (n0x0732-n0x0778) + I br + 0x0021c408, // n0x00ae c0x0000 (---------------) + I bradesco + 0x00221b8b, // n0x00af c0x0000 (---------------) + I bridgestone + 0x00220008, // n0x00b0 c0x0000 (---------------) + I broadway + 0x00220c46, // n0x00b1 c0x0000 (---------------) + I broker + 0x00222007, // n0x00b2 c0x0000 (---------------) + I brother + 0x00225e88, // n0x00b3 c0x0000 (---------------) + I brussels + 0x0a637542, // n0x00b4 c0x0029 (n0x0779-n0x077e) + I bs + 0x0aa1fd02, // n0x00b5 c0x002a (n0x077e-n0x0783) + I bt + 0x0020a008, // n0x00b6 c0x0000 (---------------) + I budapest + 0x002e5b47, // n0x00b7 c0x0000 (---------------) + I bugatti + 0x002410c5, // n0x00b8 c0x0000 (---------------) + I build + 0x002410c8, // n0x00b9 c0x0000 (---------------) + I builders + 0x0022c188, // n0x00ba c0x0000 (---------------) + I business + 0x003004c3, // n0x00bb c0x0000 (---------------) + I buy + 0x0022dec4, // n0x00bc c0x0000 (---------------) + I buzz + 0x00365782, // n0x00bd c0x0000 (---------------) + I bv + 0x0ae2fe42, // n0x00be c0x002b (n0x0783-n0x0785) + I bw + 0x0b20f982, // n0x00bf c0x002c (n0x0785-n0x0789) + I by + 0x0ba30bc2, // n0x00c0 c0x002e (n0x078a-n0x0790) + I bz + 0x00230bc3, // n0x00c1 c0x0000 (---------------) + I bzh + 0x0be00302, // n0x00c2 c0x002f (n0x0790-n0x07a1) + I ca + 0x00230703, // n0x00c3 c0x0000 (---------------) + I cab + 0x0030ee44, // n0x00c4 c0x0000 (---------------) + I cafe + 0x00213543, // n0x00c5 c0x0000 (---------------) + I cal + 0x00213544, // n0x00c6 c0x0000 (---------------) + I call + 0x002ed9cb, // n0x00c7 c0x0000 (---------------) + I calvinklein + 0x0037c746, // n0x00c8 c0x0000 (---------------) + I camera + 0x00241ac4, // n0x00c9 c0x0000 (---------------) + I camp + 0x0029f88e, // n0x00ca c0x0000 (---------------) + I cancerresearch + 0x00265485, // n0x00cb c0x0000 (---------------) + I canon + 0x002dc148, // n0x00cc c0x0000 (---------------) + I capetown + 0x002f0b87, // n0x00cd c0x0000 (---------------) + I capital + 0x002f0b8a, // n0x00ce c0x0000 (---------------) + I capitalone + 0x0020af43, // n0x00cf c0x0000 (---------------) + I car + 0x00236a47, // n0x00d0 c0x0000 (---------------) + I caravan + 0x002c1485, // n0x00d1 c0x0000 (---------------) + I cards + 0x0036b504, // n0x00d2 c0x0000 (---------------) + I care + 0x0036b506, // n0x00d3 c0x0000 (---------------) + I career + 0x0036b507, // n0x00d4 c0x0000 (---------------) + I careers + 0x00305f84, // n0x00d5 c0x0000 (---------------) + I cars + 0x00390b47, // n0x00d6 c0x0000 (---------------) + I cartier + 0x00214604, // n0x00d7 c0x0000 (---------------) + I casa + 0x002188c4, // n0x00d8 c0x0000 (---------------) + I case + 0x002188c6, // n0x00d9 c0x0000 (---------------) + I caseih + 0x002c99c4, // n0x00da c0x0000 (---------------) + I cash + 0x0037bc46, // n0x00db c0x0000 (---------------) + I casino + 0x0020df43, // n0x00dc c0x0000 (---------------) + I cat + 0x00351008, // n0x00dd c0x0000 (---------------) + I catering + 0x0037ba88, // n0x00de c0x0000 (---------------) + I catholic + 0x0024b283, // n0x00df c0x0000 (---------------) + I cba + 0x00247783, // n0x00e0 c0x0000 (---------------) + I cbn + 0x0038c544, // n0x00e1 c0x0000 (---------------) + I cbre + 0x00391103, // n0x00e2 c0x0000 (---------------) + I cbs + 0x0c22e182, // n0x00e3 c0x0030 (n0x07a1-n0x07a5) + I cc + 0x0c63e2c2, // n0x00e4 c0x0031 (n0x07a5-n0x07a6) + I cd + 0x00206483, // n0x00e5 c0x0000 (---------------) + I ceb + 0x00204486, // n0x00e6 c0x0000 (---------------) + I center + 0x002c87c3, // n0x00e7 c0x0000 (---------------) + I ceo + 0x002e61c4, // n0x00e8 c0x0000 (---------------) + I cern + 0x0ca14202, // n0x00e9 c0x0032 (n0x07a6-n0x07a7) + I cf + 0x00214203, // n0x00ea c0x0000 (---------------) + I cfa + 0x00379503, // n0x00eb c0x0000 (---------------) + I cfd + 0x0021a302, // n0x00ec c0x0000 (---------------) + I cg + 0x0ce01582, // n0x00ed c0x0033 (n0x07a7-n0x07a8) + I ch + 0x002ba9c6, // n0x00ee c0x0000 (---------------) + I chanel + 0x0023c407, // n0x00ef c0x0000 (---------------) + I channel + 0x00329845, // n0x00f0 c0x0000 (---------------) + I chase + 0x0022ac44, // n0x00f1 c0x0000 (---------------) + I chat + 0x00284305, // n0x00f2 c0x0000 (---------------) + I cheap + 0x00201587, // n0x00f3 c0x0000 (---------------) + I chintai + 0x002aee85, // n0x00f4 c0x0000 (---------------) + I chloe + 0x003a3989, // n0x00f5 c0x0000 (---------------) + I christmas + 0x003024c6, // n0x00f6 c0x0000 (---------------) + I chrome + 0x00303188, // n0x00f7 c0x0000 (---------------) + I chrysler + 0x00329746, // n0x00f8 c0x0000 (---------------) + I church + 0x0d200682, // n0x00f9 c0x0034 (n0x07a8-n0x07b7) + I ci + 0x0023fe08, // n0x00fa c0x0000 (---------------) + I cipriani + 0x00336206, // n0x00fb c0x0000 (---------------) + I circle + 0x00200685, // n0x00fc c0x0000 (---------------) + I cisco + 0x0033cc87, // n0x00fd c0x0000 (---------------) + I citadel + 0x00350f04, // n0x00fe c0x0000 (---------------) + I citi + 0x00350f05, // n0x00ff c0x0000 (---------------) + I citic + 0x00286744, // n0x0100 c0x0000 (---------------) + I city + 0x00286748, // n0x0101 c0x0000 (---------------) + I cityeats + 0x0d60b482, // n0x0102 c0x0035 (n0x07b7-n0x07b8)* o I ck + 0x0da09182, // n0x0103 c0x0036 (n0x07b8-n0x07bd) + I cl + 0x00378546, // n0x0104 c0x0000 (---------------) + I claims + 0x002242c8, // n0x0105 c0x0000 (---------------) + I cleaning + 0x00379a45, // n0x0106 c0x0000 (---------------) + I click + 0x0037b946, // n0x0107 c0x0000 (---------------) + I clinic + 0x00387188, // n0x0108 c0x0000 (---------------) + I clinique + 0x0039a588, // n0x0109 c0x0000 (---------------) + I clothing + 0x00209185, // n0x010a c0x0000 (---------------) + I cloud + 0x00238ac4, // n0x010b c0x0000 (---------------) + I club + 0x00238ac7, // n0x010c c0x0000 (---------------) + I clubmed + 0x0de5d142, // n0x010d c0x0037 (n0x07bd-n0x07c1) + I cm + 0x0e21ba42, // n0x010e c0x0038 (n0x07c1-n0x07ee) + I cn + 0x0fa00742, // n0x010f c0x003e (n0x07f3-n0x0800) + I co + 0x00358885, // n0x0110 c0x0000 (---------------) + I coach + 0x0029bd05, // n0x0111 c0x0000 (---------------) + I codes + 0x0020b246, // n0x0112 c0x0000 (---------------) + I coffee + 0x0022e1c7, // n0x0113 c0x0000 (---------------) + I college + 0x00231a87, // n0x0114 c0x0000 (---------------) + I cologne + 0x10233503, // n0x0115 c0x0040 (n0x0801-n0x08d7) + I com + 0x002a6b87, // n0x0116 c0x0000 (---------------) + I comcast + 0x002da788, // n0x0117 c0x0000 (---------------) + I commbank + 0x00233509, // n0x0118 c0x0000 (---------------) + I community + 0x0036ee47, // n0x0119 c0x0000 (---------------) + I company + 0x00234a07, // n0x011a c0x0000 (---------------) + I compare + 0x00235488, // n0x011b c0x0000 (---------------) + I computer + 0x00235c86, // n0x011c c0x0000 (---------------) + I comsec + 0x00236246, // n0x011d c0x0000 (---------------) + I condos + 0x00236f4c, // n0x011e c0x0000 (---------------) + I construction + 0x00237d0a, // n0x011f c0x0000 (---------------) + I consulting + 0x00239e07, // n0x0120 c0x0000 (---------------) + I contact + 0x0023b3cb, // n0x0121 c0x0000 (---------------) + I contractors + 0x0023c247, // n0x0122 c0x0000 (---------------) + I cooking + 0x0023c24e, // n0x0123 c0x0000 (---------------) + I cookingchannel + 0x0023d384, // n0x0124 c0x0000 (---------------) + I cool + 0x0023d684, // n0x0125 c0x0000 (---------------) + I coop + 0x0023ea07, // n0x0126 c0x0000 (---------------) + I corsica + 0x00336587, // n0x0127 c0x0000 (---------------) + I country + 0x002423c6, // n0x0128 c0x0000 (---------------) + I coupon + 0x002423c7, // n0x0129 c0x0000 (---------------) + I coupons + 0x00242fc7, // n0x012a c0x0000 (---------------) + I courses + 0x126049c2, // n0x012b c0x0049 (n0x08fe-n0x0905) + I cr + 0x002447c6, // n0x012c c0x0000 (---------------) + I credit + 0x002447ca, // n0x012d c0x0000 (---------------) + I creditcard + 0x00244a4b, // n0x012e c0x0000 (---------------) + I creditunion + 0x00245b47, // n0x012f c0x0000 (---------------) + I cricket + 0x00246505, // n0x0130 c0x0000 (---------------) + I crown + 0x00246643, // n0x0131 c0x0000 (---------------) + I crs + 0x00246b46, // n0x0132 c0x0000 (---------------) + I cruise + 0x00246b47, // n0x0133 c0x0000 (---------------) + I cruises + 0x002440c3, // n0x0134 c0x0000 (---------------) + I csc + 0x12a09d82, // n0x0135 c0x004a (n0x0905-n0x090b) + I cu + 0x00246f0a, // n0x0136 c0x0000 (---------------) + I cuisinella + 0x12f53bc2, // n0x0137 c0x004b (n0x090b-n0x090c) + I cv + 0x132c95c2, // n0x0138 c0x004c (n0x090c-n0x0910) + I cw + 0x136482c2, // n0x0139 c0x004d (n0x0910-n0x0912) + I cx + 0x13a3e082, // n0x013a c0x004e (n0x0912-n0x091f) o I cy + 0x0024a985, // n0x013b c0x0000 (---------------) + I cymru + 0x0024b084, // n0x013c c0x0000 (---------------) + I cyou + 0x14229ec2, // n0x013d c0x0050 (n0x0920-n0x0922) + I cz + 0x0034d305, // n0x013e c0x0000 (---------------) + I dabur + 0x002a1943, // n0x013f c0x0000 (---------------) + I dad + 0x00223d45, // n0x0140 c0x0000 (---------------) + I dance + 0x0020c7c4, // n0x0141 c0x0000 (---------------) + I date + 0x0020e206, // n0x0142 c0x0000 (---------------) + I dating + 0x00292c46, // n0x0143 c0x0000 (---------------) + I datsun + 0x00265983, // n0x0144 c0x0000 (---------------) + I day + 0x0023f244, // n0x0145 c0x0000 (---------------) + I dclk + 0x0025f303, // n0x0146 c0x0000 (---------------) + I dds + 0x14604d82, // n0x0147 c0x0051 (n0x0922-n0x092a) + I de + 0x00204d84, // n0x0148 c0x0000 (---------------) + I deal + 0x00239046, // n0x0149 c0x0000 (---------------) + I dealer + 0x00204d85, // n0x014a c0x0000 (---------------) + I deals + 0x003a0286, // n0x014b c0x0000 (---------------) + I degree + 0x0033cd88, // n0x014c c0x0000 (---------------) + I delivery + 0x0025a384, // n0x014d c0x0000 (---------------) + I dell + 0x00345f48, // n0x014e c0x0000 (---------------) + I deloitte + 0x00311f45, // n0x014f c0x0000 (---------------) + I delta + 0x002265c8, // n0x0150 c0x0000 (---------------) + I democrat + 0x002abe06, // n0x0151 c0x0000 (---------------) + I dental + 0x002b2407, // n0x0152 c0x0000 (---------------) + I dentist + 0x0022dcc4, // n0x0153 c0x0000 (---------------) + I desi + 0x0022dcc6, // n0x0154 c0x0000 (---------------) + I design + 0x002329c3, // n0x0155 c0x0000 (---------------) + I dev + 0x0037a7c3, // n0x0156 c0x0000 (---------------) + I dhl + 0x002c42c8, // n0x0157 c0x0000 (---------------) + I diamonds + 0x0030ce84, // n0x0158 c0x0000 (---------------) + I diet + 0x00308ec7, // n0x0159 c0x0000 (---------------) + I digital + 0x0024d786, // n0x015a c0x0000 (---------------) + I direct + 0x0024d789, // n0x015b c0x0000 (---------------) + I directory + 0x0031aa48, // n0x015c c0x0000 (---------------) + I discount + 0x00330408, // n0x015d c0x0000 (---------------) + I discover + 0x00229304, // n0x015e c0x0000 (---------------) + I dish + 0x00320843, // n0x015f c0x0000 (---------------) + I diy + 0x00266a02, // n0x0160 c0x0000 (---------------) + I dj + 0x14a494c2, // n0x0161 c0x0052 (n0x092a-n0x092b) + I dk + 0x14e0fa82, // n0x0162 c0x0053 (n0x092b-n0x0930) + I dm + 0x00321fc3, // n0x0163 c0x0000 (---------------) + I dnp + 0x15213282, // n0x0164 c0x0054 (n0x0930-n0x093a) + I do + 0x002ee344, // n0x0165 c0x0000 (---------------) + I docs + 0x00213285, // n0x0166 c0x0000 (---------------) + I dodge + 0x002459c3, // n0x0167 c0x0000 (---------------) + I dog + 0x00236044, // n0x0168 c0x0000 (---------------) + I doha + 0x00339ec7, // n0x0169 c0x0000 (---------------) + I domains + 0x0032f743, // n0x016a c0x0000 (---------------) + I dot + 0x00269fc8, // n0x016b c0x0000 (---------------) + I download + 0x00354745, // n0x016c c0x0000 (---------------) + I drive + 0x00356c44, // n0x016d c0x0000 (---------------) + I dstv + 0x00364483, // n0x016e c0x0000 (---------------) + I dtv + 0x0027c785, // n0x016f c0x0000 (---------------) + I dubai + 0x0027c8c4, // n0x0170 c0x0000 (---------------) + I duck + 0x00390846, // n0x0171 c0x0000 (---------------) + I dunlop + 0x003a6804, // n0x0172 c0x0000 (---------------) + I duns + 0x002008c6, // n0x0173 c0x0000 (---------------) + I dupont + 0x00205146, // n0x0174 c0x0000 (---------------) + I durban + 0x00317284, // n0x0175 c0x0000 (---------------) + I dvag + 0x00212b03, // n0x0176 c0x0000 (---------------) + I dwg + 0x15607a82, // n0x0177 c0x0055 (n0x093a-n0x0942) + I dz + 0x0034ab05, // n0x0178 c0x0000 (---------------) + I earth + 0x0022b543, // n0x0179 c0x0000 (---------------) + I eat + 0x15a09b02, // n0x017a c0x0056 (n0x0942-n0x094e) + I ec + 0x002b94c5, // n0x017b c0x0000 (---------------) + I edeka + 0x0023a783, // n0x017c c0x0000 (---------------) + I edu + 0x0023a789, // n0x017d c0x0000 (---------------) + I education + 0x15e0b342, // n0x017e c0x0057 (n0x094e-n0x0958) + I ee + 0x16608442, // n0x017f c0x0059 (n0x0959-n0x0962) + I eg + 0x002ce585, // n0x0180 c0x0000 (---------------) + I email + 0x002c4646, // n0x0181 c0x0000 (---------------) + I emerck + 0x00356047, // n0x0182 c0x0000 (---------------) + I emerson + 0x002cb4c6, // n0x0183 c0x0000 (---------------) + I energy + 0x00369148, // n0x0184 c0x0000 (---------------) + I engineer + 0x0036914b, // n0x0185 c0x0000 (---------------) + I engineering + 0x002044cb, // n0x0186 c0x0000 (---------------) + I enterprises + 0x0039f945, // n0x0187 c0x0000 (---------------) + I epost + 0x0039ffc5, // n0x0188 c0x0000 (---------------) + I epson + 0x002c2709, // n0x0189 c0x0000 (---------------) + I equipment + 0x01603682, // n0x018a c0x0005 (---------------)* o I er + 0x00317448, // n0x018b c0x0000 (---------------) + I ericsson + 0x0020cb04, // n0x018c c0x0000 (---------------) + I erni + 0x16e00482, // n0x018d c0x005b (n0x0963-n0x0968) + I es + 0x0027a303, // n0x018e c0x0000 (---------------) + I esq + 0x002c2486, // n0x018f c0x0000 (---------------) + I estate + 0x0028d2c8, // n0x0190 c0x0000 (---------------) + I esurance + 0x176053c2, // n0x0191 c0x005d (n0x0969-n0x0971) + I et + 0x002234c8, // n0x0192 c0x0000 (---------------) + I etisalat + 0x00204b82, // n0x0193 c0x0000 (---------------) + I eu + 0x0033c78a, // n0x0194 c0x0000 (---------------) + I eurovision + 0x00228883, // n0x0195 c0x0000 (---------------) + I eus + 0x00232a06, // n0x0196 c0x0000 (---------------) + I events + 0x00203608, // n0x0197 c0x0000 (---------------) + I everbank + 0x00239488, // n0x0198 c0x0000 (---------------) + I exchange + 0x0031ee06, // n0x0199 c0x0000 (---------------) + I expert + 0x00363107, // n0x019a c0x0000 (---------------) + I exposed + 0x00247487, // n0x019b c0x0000 (---------------) + I express + 0x0020ba0a, // n0x019c c0x0000 (---------------) + I extraspace + 0x00290c04, // n0x019d c0x0000 (---------------) + I fage + 0x00214244, // n0x019e c0x0000 (---------------) + I fail + 0x0033b609, // n0x019f c0x0000 (---------------) + I fairwinds + 0x00350405, // n0x01a0 c0x0000 (---------------) + I faith + 0x00208f86, // n0x01a1 c0x0000 (---------------) + I family + 0x00211d03, // n0x01a2 c0x0000 (---------------) + I fan + 0x002e0ec4, // n0x01a3 c0x0000 (---------------) + I fans + 0x00271d04, // n0x01a4 c0x0000 (---------------) + I farm + 0x002ece87, // n0x01a5 c0x0000 (---------------) + I farmers + 0x0022fec7, // n0x01a6 c0x0000 (---------------) + I fashion + 0x0024c204, // n0x01a7 c0x0000 (---------------) + I fast + 0x00211505, // n0x01a8 c0x0000 (---------------) + I fedex + 0x0020b308, // n0x01a9 c0x0000 (---------------) + I feedback + 0x00337007, // n0x01aa c0x0000 (---------------) + I ferrari + 0x00343407, // n0x01ab c0x0000 (---------------) + I ferrero + 0x17a07502, // n0x01ac c0x005e (n0x0971-n0x0974) + I fi + 0x00294744, // n0x01ad c0x0000 (---------------) + I fiat + 0x0035ee48, // n0x01ae c0x0000 (---------------) + I fidelity + 0x00365b84, // n0x01af c0x0000 (---------------) + I fido + 0x0024b784, // n0x01b0 c0x0000 (---------------) + I film + 0x0024bb85, // n0x01b1 c0x0000 (---------------) + I final + 0x0024bcc7, // n0x01b2 c0x0000 (---------------) + I finance + 0x00207509, // n0x01b3 c0x0000 (---------------) + I financial + 0x0024c6c4, // n0x01b4 c0x0000 (---------------) + I fire + 0x0024d4c9, // n0x01b5 c0x0000 (---------------) + I firestone + 0x0024d9c8, // n0x01b6 c0x0000 (---------------) + I firmdale + 0x0024e044, // n0x01b7 c0x0000 (---------------) + I fish + 0x0024e047, // n0x01b8 c0x0000 (---------------) + I fishing + 0x0024f083, // n0x01b9 c0x0000 (---------------) + I fit + 0x0024f807, // n0x01ba c0x0000 (---------------) + I fitness + 0x01615b02, // n0x01bb c0x0005 (---------------)* o I fj + 0x01799fc2, // n0x01bc c0x0005 (---------------)* o I fk + 0x00250686, // n0x01bd c0x0000 (---------------) + I flickr + 0x00251287, // n0x01be c0x0000 (---------------) + I flights + 0x00251c04, // n0x01bf c0x0000 (---------------) + I flir + 0x00252b07, // n0x01c0 c0x0000 (---------------) + I florist + 0x002539c7, // n0x01c1 c0x0000 (---------------) + I flowers + 0x00253f08, // n0x01c2 c0x0000 (---------------) + I flsmidth + 0x002549c3, // n0x01c3 c0x0000 (---------------) + I fly + 0x00242902, // n0x01c4 c0x0000 (---------------) + I fm + 0x002558c2, // n0x01c5 c0x0000 (---------------) + I fo + 0x00255a83, // n0x01c6 c0x0000 (---------------) + I foo + 0x00255a8b, // n0x01c7 c0x0000 (---------------) + I foodnetwork + 0x00306e88, // n0x01c8 c0x0000 (---------------) + I football + 0x0039bf44, // n0x01c9 c0x0000 (---------------) + I ford + 0x00257585, // n0x01ca c0x0000 (---------------) + I forex + 0x00259787, // n0x01cb c0x0000 (---------------) + I forsale + 0x0025b085, // n0x01cc c0x0000 (---------------) + I forum + 0x002b9f4a, // n0x01cd c0x0000 (---------------) + I foundation + 0x0025c143, // n0x01ce c0x0000 (---------------) + I fox + 0x17e00582, // n0x01cf c0x005f (n0x0974-n0x098c) + I fr + 0x002e7d04, // n0x01d0 c0x0000 (---------------) + I free + 0x0025f7c9, // n0x01d1 c0x0000 (---------------) + I fresenius + 0x00263603, // n0x01d2 c0x0000 (---------------) + I frl + 0x002636c7, // n0x01d3 c0x0000 (---------------) + I frogans + 0x0039d609, // n0x01d4 c0x0000 (---------------) + I frontdoor + 0x003980c8, // n0x01d5 c0x0000 (---------------) + I frontier + 0x00204a83, // n0x01d6 c0x0000 (---------------) + I ftr + 0x0027b8c7, // n0x01d7 c0x0000 (---------------) + I fujitsu + 0x0027bdc9, // n0x01d8 c0x0000 (---------------) + I fujixerox + 0x002312c3, // n0x01d9 c0x0000 (---------------) + I fun + 0x00283c84, // n0x01da c0x0000 (---------------) + I fund + 0x00285349, // n0x01db c0x0000 (---------------) + I furniture + 0x00287806, // n0x01dc c0x0000 (---------------) + I futbol + 0x002888c3, // n0x01dd c0x0000 (---------------) + I fyi + 0x00201042, // n0x01de c0x0000 (---------------) + I ga + 0x0021dd43, // n0x01df c0x0000 (---------------) + I gal + 0x00392147, // n0x01e0 c0x0000 (---------------) + I gallery + 0x00336385, // n0x01e1 c0x0000 (---------------) + I gallo + 0x002dd686, // n0x01e2 c0x0000 (---------------) + I gallup + 0x00297cc4, // n0x01e3 c0x0000 (---------------) + I game + 0x003700c5, // n0x01e4 c0x0000 (---------------) + I games + 0x0022cdc3, // n0x01e5 c0x0000 (---------------) + I gap + 0x002190c6, // n0x01e6 c0x0000 (---------------) + I garden + 0x0020d202, // n0x01e7 c0x0000 (---------------) + I gb + 0x00387944, // n0x01e8 c0x0000 (---------------) + I gbiz + 0x00222d42, // n0x01e9 c0x0000 (---------------) + I gd + 0x002fb203, // n0x01ea c0x0000 (---------------) + I gdn + 0x182026c2, // n0x01eb c0x0060 (n0x098c-n0x0993) + I ge + 0x002534c3, // n0x01ec c0x0000 (---------------) + I gea + 0x00218144, // n0x01ed c0x0000 (---------------) + I gent + 0x00218147, // n0x01ee c0x0000 (---------------) + I genting + 0x00324b46, // n0x01ef c0x0000 (---------------) + I george + 0x00269a82, // n0x01f0 c0x0000 (---------------) + I gf + 0x18654282, // n0x01f1 c0x0061 (n0x0993-n0x0996) + I gg + 0x00330644, // n0x01f2 c0x0000 (---------------) + I ggee + 0x18a41602, // n0x01f3 c0x0062 (n0x0996-n0x099b) + I gh + 0x18e134c2, // n0x01f4 c0x0063 (n0x099b-n0x09a1) + I gi + 0x00344f44, // n0x01f5 c0x0000 (---------------) + I gift + 0x00344f45, // n0x01f6 c0x0000 (---------------) + I gifts + 0x00269485, // n0x01f7 c0x0000 (---------------) + I gives + 0x003481c6, // n0x01f8 c0x0000 (---------------) + I giving + 0x1920ce42, // n0x01f9 c0x0064 (n0x09a1-n0x09a6) + I gl + 0x00345e85, // n0x01fa c0x0000 (---------------) + I glade + 0x0039ef45, // n0x01fb c0x0000 (---------------) + I glass + 0x00285d83, // n0x01fc c0x0000 (---------------) + I gle + 0x0020d846, // n0x01fd c0x0000 (---------------) + I global + 0x0020e345, // n0x01fe c0x0000 (---------------) + I globo + 0x00215582, // n0x01ff c0x0000 (---------------) + I gm + 0x00335e45, // n0x0200 c0x0000 (---------------) + I gmail + 0x00215584, // n0x0201 c0x0000 (---------------) + I gmbh + 0x002182c3, // n0x0202 c0x0000 (---------------) + I gmo + 0x0021bac3, // n0x0203 c0x0000 (---------------) + I gmx + 0x19608342, // n0x0204 c0x0065 (n0x09a6-n0x09ac) + I gn + 0x002e9bc7, // n0x0205 c0x0000 (---------------) + I godaddy + 0x0024dbc4, // n0x0206 c0x0000 (---------------) + I gold + 0x0024dbc9, // n0x0207 c0x0000 (---------------) + I goldpoint + 0x0024e1c4, // n0x0208 c0x0000 (---------------) + I golf + 0x00299dc3, // n0x0209 c0x0000 (---------------) + I goo + 0x0031bac9, // n0x020a c0x0000 (---------------) + I goodhands + 0x0034a9c8, // n0x020b c0x0000 (---------------) + I goodyear + 0x0029bb84, // n0x020c c0x0000 (---------------) + I goog + 0x0029bb86, // n0x020d c0x0000 (---------------) + I google + 0x002a36c3, // n0x020e c0x0000 (---------------) + I gop + 0x00211ec3, // n0x020f c0x0000 (---------------) + I got + 0x002dc504, // n0x0210 c0x0000 (---------------) + I gotv + 0x0026cc83, // n0x0211 c0x0000 (---------------) + I gov + 0x19adad02, // n0x0212 c0x0066 (n0x09ac-n0x09b2) + I gp + 0x003010c2, // n0x0213 c0x0000 (---------------) + I gq + 0x19e00c82, // n0x0214 c0x0067 (n0x09b2-n0x09b8) + I gr + 0x00315908, // n0x0215 c0x0000 (---------------) + I grainger + 0x003216c8, // n0x0216 c0x0000 (---------------) + I graphics + 0x0038b986, // n0x0217 c0x0000 (---------------) + I gratis + 0x002503c5, // n0x0218 c0x0000 (---------------) + I green + 0x0022fd05, // n0x0219 c0x0000 (---------------) + I gripe + 0x0020ab45, // n0x021a c0x0000 (---------------) + I group + 0x0023a242, // n0x021b c0x0000 (---------------) + I gs + 0x1a23f142, // n0x021c c0x0068 (n0x09b8-n0x09bf) + I gt + 0x0160dd42, // n0x021d c0x0005 (---------------)* o I gu + 0x00350588, // n0x021e c0x0000 (---------------) + I guardian + 0x0023fd45, // n0x021f c0x0000 (---------------) + I gucci + 0x002e05c4, // n0x0220 c0x0000 (---------------) + I guge + 0x00232905, // n0x0221 c0x0000 (---------------) + I guide + 0x0023cc87, // n0x0222 c0x0000 (---------------) + I guitars + 0x002594c4, // n0x0223 c0x0000 (---------------) + I guru + 0x002167c2, // n0x0224 c0x0000 (---------------) + I gw + 0x1a602302, // n0x0225 c0x0069 (n0x09bf-n0x09c5) + I gy + 0x0030c744, // n0x0226 c0x0000 (---------------) + I hair + 0x0020ccc7, // n0x0227 c0x0000 (---------------) + I hamburg + 0x00394c47, // n0x0228 c0x0000 (---------------) + I hangout + 0x0035fd04, // n0x0229 c0x0000 (---------------) + I haus + 0x00290b43, // n0x022a c0x0000 (---------------) + I hbo + 0x0024b1c4, // n0x022b c0x0000 (---------------) + I hdfc + 0x0024b1c8, // n0x022c c0x0000 (---------------) + I hdfcbank + 0x0036b386, // n0x022d c0x0000 (---------------) + I health + 0x0036b38a, // n0x022e c0x0000 (---------------) + I healthcare + 0x002073c4, // n0x022f c0x0000 (---------------) + I help + 0x0020ebc8, // n0x0230 c0x0000 (---------------) + I helsinki + 0x00254184, // n0x0231 c0x0000 (---------------) + I here + 0x00222106, // n0x0232 c0x0000 (---------------) + I hermes + 0x00292304, // n0x0233 c0x0000 (---------------) + I hgtv + 0x00358b86, // n0x0234 c0x0000 (---------------) + I hiphop + 0x002ebcc9, // n0x0235 c0x0000 (---------------) + I hisamitsu + 0x002a27c7, // n0x0236 c0x0000 (---------------) + I hitachi + 0x0027d3c3, // n0x0237 c0x0000 (---------------) + I hiv + 0x1aa0a882, // n0x0238 c0x006a (n0x09c5-n0x09dd) + I hk + 0x0026da43, // n0x0239 c0x0000 (---------------) + I hkt + 0x0020e942, // n0x023a c0x0000 (---------------) + I hm + 0x1ae1ab42, // n0x023b c0x006b (n0x09dd-n0x09e3) + I hn + 0x002df846, // n0x023c c0x0000 (---------------) + I hockey + 0x0035e208, // n0x023d c0x0000 (---------------) + I holdings + 0x002a5007, // n0x023e c0x0000 (---------------) + I holiday + 0x00274049, // n0x023f c0x0000 (---------------) + I homedepot + 0x00299cc9, // n0x0240 c0x0000 (---------------) + I homegoods + 0x002a60c5, // n0x0241 c0x0000 (---------------) + I homes + 0x002a60c9, // n0x0242 c0x0000 (---------------) + I homesense + 0x002a7985, // n0x0243 c0x0000 (---------------) + I honda + 0x002a8409, // n0x0244 c0x0000 (---------------) + I honeywell + 0x002a91c5, // n0x0245 c0x0000 (---------------) + I horse + 0x00297e04, // n0x0246 c0x0000 (---------------) + I host + 0x00297e07, // n0x0247 c0x0000 (---------------) + I hosting + 0x00234dc3, // n0x0248 c0x0000 (---------------) + I hot + 0x002a98c7, // n0x0249 c0x0000 (---------------) + I hoteles + 0x002a9fc7, // n0x024a c0x0000 (---------------) + I hotmail + 0x002a2105, // n0x024b c0x0000 (---------------) + I house + 0x002a1343, // n0x024c c0x0000 (---------------) + I how + 0x1b20e4c2, // n0x024d c0x006c (n0x09e3-n0x09e8) + I hr + 0x00389404, // n0x024e c0x0000 (---------------) + I hsbc + 0x1b62a682, // n0x024f c0x006d (n0x09e8-n0x09f9) + I ht + 0x0025d0c3, // n0x0250 c0x0000 (---------------) + I htc + 0x1ba195c2, // n0x0251 c0x006e (n0x09f9-n0x0a19) + I hu + 0x002f94c6, // n0x0252 c0x0000 (---------------) + I hughes + 0x0031fe45, // n0x0253 c0x0000 (---------------) + I hyatt + 0x002ac1c7, // n0x0254 c0x0000 (---------------) + I hyundai + 0x00321f03, // n0x0255 c0x0000 (---------------) + I ibm + 0x00239ac4, // n0x0256 c0x0000 (---------------) + I icbc + 0x00206903, // n0x0257 c0x0000 (---------------) + I ice + 0x00209d43, // n0x0258 c0x0000 (---------------) + I icu + 0x1be0c782, // n0x0259 c0x006f (n0x0a19-n0x0a24) + I id + 0x1c600042, // n0x025a c0x0071 (n0x0a25-n0x0a27) + I ie + 0x00365804, // n0x025b c0x0000 (---------------) + I ieee + 0x002428c3, // n0x025c c0x0000 (---------------) + I ifm + 0x00322905, // n0x025d c0x0000 (---------------) + I iinet + 0x00322745, // n0x025e c0x0000 (---------------) + I ikano + 0x1ca02902, // n0x025f c0x0072 (n0x0a27-n0x0a2f) + I il + 0x1d205c42, // n0x0260 c0x0074 (n0x0a30-n0x0a37) + I im + 0x00256dc6, // n0x0261 c0x0000 (---------------) + I imamat + 0x0025ee44, // n0x0262 c0x0000 (---------------) + I imdb + 0x00207084, // n0x0263 c0x0000 (---------------) + I immo + 0x0020708a, // n0x0264 c0x0000 (---------------) + I immobilien + 0x1da013c2, // n0x0265 c0x0076 (n0x0a39-n0x0a46) + I in + 0x0036728a, // n0x0266 c0x0000 (---------------) + I industries + 0x003a1088, // n0x0267 c0x0000 (---------------) + I infiniti + 0x1dfa1244, // n0x0268 c0x0077 (n0x0a46-n0x0a50) + I info + 0x0020e2c3, // n0x0269 c0x0000 (---------------) + I ing + 0x0020ecc3, // n0x026a c0x0000 (---------------) + I ink + 0x0030ea89, // n0x026b c0x0000 (---------------) + I institute + 0x0023e7c9, // n0x026c c0x0000 (---------------) + I insurance + 0x00339fc6, // n0x026d c0x0000 (---------------) + I insure + 0x1e201603, // n0x026e c0x0078 (n0x0a50-n0x0a51) + I int + 0x0024dd45, // n0x026f c0x0000 (---------------) + I intel + 0x0031940d, // n0x0270 c0x0000 (---------------) + I international + 0x002f8946, // n0x0271 c0x0000 (---------------) + I intuit + 0x00203d0b, // n0x0272 c0x0000 (---------------) + I investments + 0x1e600ac2, // n0x0273 c0x0079 (n0x0a51-n0x0a57) + I io + 0x0026bb88, // n0x0274 c0x0000 (---------------) + I ipiranga + 0x1ea00dc2, // n0x0275 c0x007a (n0x0a57-n0x0a5d) + I iq + 0x1ee04302, // n0x0276 c0x007b (n0x0a5d-n0x0a66) + I ir + 0x002a5605, // n0x0277 c0x0000 (---------------) + I irish + 0x1f2006c2, // n0x0278 c0x007c (n0x0a66-n0x0a6e) + I is + 0x0025b307, // n0x0279 c0x0000 (---------------) + I iselect + 0x0033c007, // n0x027a c0x0000 (---------------) + I ismaili + 0x00215003, // n0x027b c0x0000 (---------------) + I ist + 0x00215008, // n0x027c c0x0000 (---------------) + I istanbul + 0x1f601e42, // n0x027d c0x007d (n0x0a6e-n0x0bdf) + I it + 0x002804c4, // n0x027e c0x0000 (---------------) + I itau + 0x00360743, // n0x027f c0x0000 (---------------) + I itv + 0x00323145, // n0x0280 c0x0000 (---------------) + I iveco + 0x0036ab83, // n0x0281 c0x0000 (---------------) + I iwc + 0x0031f906, // n0x0282 c0x0000 (---------------) + I jaguar + 0x00323d44, // n0x0283 c0x0000 (---------------) + I java + 0x00247743, // n0x0284 c0x0000 (---------------) + I jcb + 0x0026e183, // n0x0285 c0x0000 (---------------) + I jcp + 0x1fa06f02, // n0x0286 c0x007e (n0x0bdf-n0x0be2) + I je + 0x003299c4, // n0x0287 c0x0000 (---------------) + I jeep + 0x0034ea85, // n0x0288 c0x0000 (---------------) + I jetzt + 0x00360f47, // n0x0289 c0x0000 (---------------) + I jewelry + 0x00278d43, // n0x028a c0x0000 (---------------) + I jio + 0x002ac643, // n0x028b c0x0000 (---------------) + I jlc + 0x002ad103, // n0x028c c0x0000 (---------------) + I jll + 0x016308c2, // n0x028d c0x0005 (---------------)* o I jm + 0x002ad1c3, // n0x028e c0x0000 (---------------) + I jmp + 0x002ad803, // n0x028f c0x0000 (---------------) + I jnj + 0x1fe04042, // n0x0290 c0x007f (n0x0be2-n0x0bea) + I jo + 0x002ddc44, // n0x0291 c0x0000 (---------------) + I jobs + 0x0027cb06, // n0x0292 c0x0000 (---------------) + I joburg + 0x00204043, // n0x0293 c0x0000 (---------------) + I jot + 0x002adb83, // n0x0294 c0x0000 (---------------) + I joy + 0x202ae3c2, // n0x0295 c0x0080 (n0x0bea-n0x0c59) + I jp + 0x002ae3c8, // n0x0296 c0x0000 (---------------) + I jpmorgan + 0x002aefc4, // n0x0297 c0x0000 (---------------) + I jprs + 0x002d7906, // n0x0298 c0x0000 (---------------) + I juegos + 0x002af287, // n0x0299 c0x0000 (---------------) + I juniper + 0x00227e46, // n0x029a c0x0000 (---------------) + I kaufen + 0x00238144, // n0x029b c0x0000 (---------------) + I kddi + 0x2de025c2, // n0x029c c0x00b7 (n0x12ed-n0x12ee)* o I ke + 0x00234c8b, // n0x029d c0x0000 (---------------) + I kerryhotels + 0x002e2c0e, // n0x029e c0x0000 (---------------) + I kerrylogistics + 0x00220d0f, // n0x029f c0x0000 (---------------) + I kerryproperties + 0x0023f303, // n0x02a0 c0x0000 (---------------) + I kfh + 0x2e6b5502, // n0x02a1 c0x00b9 (n0x12ef-n0x12f5) + I kg + 0x0161acc2, // n0x02a2 c0x0005 (---------------)* o I kh + 0x2ea01e02, // n0x02a3 c0x00ba (n0x12f5-n0x12fc) + I ki + 0x00226f83, // n0x02a4 c0x0000 (---------------) + I kia + 0x002303c3, // n0x02a5 c0x0000 (---------------) + I kim + 0x002e7706, // n0x02a6 c0x0000 (---------------) + I kinder + 0x0037c506, // n0x02a7 c0x0000 (---------------) + I kindle + 0x003703c7, // n0x02a8 c0x0000 (---------------) + I kitchen + 0x002eed84, // n0x02a9 c0x0000 (---------------) + I kiwi + 0x2ee316c2, // n0x02aa c0x00bb (n0x12fc-n0x130d) + I km + 0x2f269c82, // n0x02ab c0x00bc (n0x130d-n0x1311) + I kn + 0x0036bd45, // n0x02ac c0x0000 (---------------) + I koeln + 0x002aa707, // n0x02ad c0x0000 (---------------) + I komatsu + 0x0035cbc6, // n0x02ae c0x0000 (---------------) + I kosher + 0x2f60d782, // n0x02af c0x00bd (n0x1311-n0x1317) + I kp + 0x0020d784, // n0x02b0 c0x0000 (---------------) + I kpmg + 0x0036a3c3, // n0x02b1 c0x0000 (---------------) + I kpn + 0x2fa06fc2, // n0x02b2 c0x00be (n0x1317-n0x1335) + I kr + 0x0034df03, // n0x02b3 c0x0000 (---------------) + I krd + 0x003a2b04, // n0x02b4 c0x0000 (---------------) + I kred + 0x002b5449, // n0x02b5 c0x0000 (---------------) + I kuokgroup + 0x016bd182, // n0x02b6 c0x0005 (---------------)* o I kw + 0x2fe36902, // n0x02b7 c0x00bf (n0x1335-n0x133a) + I ky + 0x00269c06, // n0x02b8 c0x0000 (---------------) + I kyknet + 0x002be0c5, // n0x02b9 c0x0000 (---------------) + I kyoto + 0x30392a42, // n0x02ba c0x00c0 (n0x133a-n0x1340) + I kz + 0x30600802, // n0x02bb c0x00c1 (n0x1340-n0x1349) + I la + 0x0033aa87, // n0x02bc c0x0000 (---------------) + I lacaixa + 0x00293449, // n0x02bd c0x0000 (---------------) + I ladbrokes + 0x00352d0b, // n0x02be c0x0000 (---------------) + I lamborghini + 0x00247245, // n0x02bf c0x0000 (---------------) + I lamer + 0x0036c449, // n0x02c0 c0x0000 (---------------) + I lancaster + 0x002c0706, // n0x02c1 c0x0000 (---------------) + I lancia + 0x00259007, // n0x02c2 c0x0000 (---------------) + I lancome + 0x00200804, // n0x02c3 c0x0000 (---------------) + I land + 0x0025e089, // n0x02c4 c0x0000 (---------------) + I landrover + 0x0035a387, // n0x02c5 c0x0000 (---------------) + I lanxess + 0x00279f47, // n0x02c6 c0x0000 (---------------) + I lasalle + 0x00223603, // n0x02c7 c0x0000 (---------------) + I lat + 0x0025ef86, // n0x02c8 c0x0000 (---------------) + I latino + 0x002cca47, // n0x02c9 c0x0000 (---------------) + I latrobe + 0x00274483, // n0x02ca c0x0000 (---------------) + I law + 0x00274486, // n0x02cb c0x0000 (---------------) + I lawyer + 0x30a02942, // n0x02cc c0x00c2 (n0x1349-n0x134e) + I lb + 0x30e3aa02, // n0x02cd c0x00c3 (n0x134e-n0x1354) + I lc + 0x00226843, // n0x02ce c0x0000 (---------------) + I lds + 0x0027a085, // n0x02cf c0x0000 (---------------) + I lease + 0x0022c487, // n0x02d0 c0x0000 (---------------) + I leclerc + 0x0037c006, // n0x02d1 c0x0000 (---------------) + I lefrak + 0x00336305, // n0x02d2 c0x0000 (---------------) + I legal + 0x0024db44, // n0x02d3 c0x0000 (---------------) + I lego + 0x00241385, // n0x02d4 c0x0000 (---------------) + I lexus + 0x002e65c4, // n0x02d5 c0x0000 (---------------) + I lgbt + 0x31207202, // n0x02d6 c0x00c4 (n0x1354-n0x1355) + I li + 0x00308447, // n0x02d7 c0x0000 (---------------) + I liaison + 0x002bb904, // n0x02d8 c0x0000 (---------------) + I lidl + 0x0023e6c4, // n0x02d9 c0x0000 (---------------) + I life + 0x0023e6cd, // n0x02da c0x0000 (---------------) + I lifeinsurance + 0x00253cc9, // n0x02db c0x0000 (---------------) + I lifestyle + 0x00312248, // n0x02dc c0x0000 (---------------) + I lighting + 0x00258c44, // n0x02dd c0x0000 (---------------) + I like + 0x00249785, // n0x02de c0x0000 (---------------) + I lilly + 0x0025d747, // n0x02df c0x0000 (---------------) + I limited + 0x0025db44, // n0x02e0 c0x0000 (---------------) + I limo + 0x0022e787, // n0x02e1 c0x0000 (---------------) + I lincoln + 0x00345ac5, // n0x02e2 c0x0000 (---------------) + I linde + 0x00398ec4, // n0x02e3 c0x0000 (---------------) + I link + 0x002d3a85, // n0x02e4 c0x0000 (---------------) + I lipsy + 0x002622c4, // n0x02e5 c0x0000 (---------------) + I live + 0x002414c6, // n0x02e6 c0x0000 (---------------) + I living + 0x0025da45, // n0x02e7 c0x0000 (---------------) + I lixil + 0x3160d742, // n0x02e8 c0x00c5 (n0x1355-n0x1364) + I lk + 0x00210b84, // n0x02e9 c0x0000 (---------------) + I loan + 0x00210b85, // n0x02ea c0x0000 (---------------) + I loans + 0x00376f06, // n0x02eb c0x0000 (---------------) + I locker + 0x00336445, // n0x02ec c0x0000 (---------------) + I locus + 0x002ccfc4, // n0x02ed c0x0000 (---------------) + I loft + 0x002c21c3, // n0x02ee c0x0000 (---------------) + I lol + 0x00321906, // n0x02ef c0x0000 (---------------) + I london + 0x0021b685, // n0x02f0 c0x0000 (---------------) + I lotte + 0x00222845, // n0x02f1 c0x0000 (---------------) + I lotto + 0x00230204, // n0x02f2 c0x0000 (---------------) + I love + 0x00207443, // n0x02f3 c0x0000 (---------------) + I lpl + 0x0020744c, // n0x02f4 c0x0000 (---------------) + I lplfinancial + 0x31a88142, // n0x02f5 c0x00c6 (n0x1364-n0x1369) + I lr + 0x31e04e42, // n0x02f6 c0x00c7 (n0x1369-n0x136b) + I ls + 0x32209e02, // n0x02f7 c0x00c8 (n0x136b-n0x136d) + I lt + 0x00322cc3, // n0x02f8 c0x0000 (---------------) + I ltd + 0x00322cc4, // n0x02f9 c0x0000 (---------------) + I ltda + 0x32602f42, // n0x02fa c0x00c9 (n0x136d-n0x136e) + I lu + 0x002fb348, // n0x02fb c0x0000 (---------------) + I lundbeck + 0x002dd745, // n0x02fc c0x0000 (---------------) + I lupin + 0x0023ca44, // n0x02fd c0x0000 (---------------) + I luxe + 0x0023d206, // n0x02fe c0x0000 (---------------) + I luxury + 0x32a05d02, // n0x02ff c0x00ca (n0x136e-n0x1377) + I lv + 0x32e09082, // n0x0300 c0x00cb (n0x1377-n0x1380) + I ly + 0x33200182, // n0x0301 c0x00cc (n0x1380-n0x1386) + I ma + 0x00375105, // n0x0302 c0x0000 (---------------) + I macys + 0x00317146, // n0x0303 c0x0000 (---------------) + I madrid + 0x00271c44, // n0x0304 c0x0000 (---------------) + I maif + 0x0022bdc6, // n0x0305 c0x0000 (---------------) + I maison + 0x00248d06, // n0x0306 c0x0000 (---------------) + I makeup + 0x002018c3, // n0x0307 c0x0000 (---------------) + I man + 0x0036f20a, // n0x0308 c0x0000 (---------------) + I management + 0x00242c05, // n0x0309 c0x0000 (---------------) + I mango + 0x002f1386, // n0x030a c0x0000 (---------------) + I market + 0x002f1389, // n0x030b c0x0000 (---------------) + I marketing + 0x00331387, // n0x030c c0x0000 (---------------) + I markets + 0x00366448, // n0x030d c0x0000 (---------------) + I marriott + 0x0020f009, // n0x030e c0x0000 (---------------) + I marshalls + 0x002be9c8, // n0x030f c0x0000 (---------------) + I maserati + 0x0022f706, // n0x0310 c0x0000 (---------------) + I mattel + 0x00209c03, // n0x0311 c0x0000 (---------------) + I mba + 0x3362ac02, // n0x0312 c0x00cd (n0x1386-n0x1388) + I mc + 0x0037cec3, // n0x0313 c0x0000 (---------------) + I mcd + 0x0037cec9, // n0x0314 c0x0000 (---------------) + I mcdonalds + 0x00327b88, // n0x0315 c0x0000 (---------------) + I mckinsey + 0x33a4da82, // n0x0316 c0x00ce (n0x1388-n0x1389) + I md + 0x33e03e82, // n0x0317 c0x00cf (n0x1389-n0x1396) + I me + 0x00213ac3, // n0x0318 c0x0000 (---------------) + I med + 0x003025c5, // n0x0319 c0x0000 (---------------) + I media + 0x0026ad84, // n0x031a c0x0000 (---------------) + I meet + 0x002e1809, // n0x031b c0x0000 (---------------) + I melbourne + 0x002c4604, // n0x031c c0x0000 (---------------) + I meme + 0x0026cf88, // n0x031d c0x0000 (---------------) + I memorial + 0x00203e83, // n0x031e c0x0000 (---------------) + I men + 0x002ede44, // n0x031f c0x0000 (---------------) + I menu + 0x0022adc3, // n0x0320 c0x0000 (---------------) + I meo + 0x0023e607, // n0x0321 c0x0000 (---------------) + I metlife + 0x3420d802, // n0x0322 c0x00d0 (n0x1396-n0x139f) + I mg + 0x0025aa42, // n0x0323 c0x0000 (---------------) + I mh + 0x00231f45, // n0x0324 c0x0000 (---------------) + I miami + 0x0026b149, // n0x0325 c0x0000 (---------------) + I microsoft + 0x00209003, // n0x0326 c0x0000 (---------------) + I mil + 0x0027d144, // n0x0327 c0x0000 (---------------) + I mini + 0x003193c4, // n0x0328 c0x0000 (---------------) + I mint + 0x00229ac3, // n0x0329 c0x0000 (---------------) + I mit + 0x0027e0ca, // n0x032a c0x0000 (---------------) + I mitsubishi + 0x34767142, // n0x032b c0x00d1 (n0x139f-n0x13a7) + I mk + 0x34a10b42, // n0x032c c0x00d2 (n0x13a7-n0x13ae) + I ml + 0x002c1243, // n0x032d c0x0000 (---------------) + I mlb + 0x00369503, // n0x032e c0x0000 (---------------) + I mls + 0x016070c2, // n0x032f c0x0005 (---------------)* o I mm + 0x00375603, // n0x0330 c0x0000 (---------------) + I mma + 0x34e1fdc2, // n0x0331 c0x00d3 (n0x13ae-n0x13b2) + I mn + 0x0021fdc4, // n0x0332 c0x0000 (---------------) + I mnet + 0x35207102, // n0x0333 c0x00d4 (n0x13b2-n0x13b7) + I mo + 0x35607104, // n0x0334 c0x00d5 (n0x13b7-n0x13b8) + I mobi + 0x002e2606, // n0x0335 c0x0000 (---------------) + I mobily + 0x0026c084, // n0x0336 c0x0000 (---------------) + I moda + 0x002d7d03, // n0x0337 c0x0000 (---------------) + I moe + 0x00282043, // n0x0338 c0x0000 (---------------) + I moi + 0x002e3783, // n0x0339 c0x0000 (---------------) + I mom + 0x00244dc6, // n0x033a c0x0000 (---------------) + I monash + 0x002c6d85, // n0x033b c0x0000 (---------------) + I money + 0x002c1e07, // n0x033c c0x0000 (---------------) + I monster + 0x00258ec9, // n0x033d c0x0000 (---------------) + I montblanc + 0x002c5285, // n0x033e c0x0000 (---------------) + I mopar + 0x002c6cc6, // n0x033f c0x0000 (---------------) + I mormon + 0x002c72c8, // n0x0340 c0x0000 (---------------) + I mortgage + 0x002c74c6, // n0x0341 c0x0000 (---------------) + I moscow + 0x00278644, // n0x0342 c0x0000 (---------------) + I moto + 0x0029af0b, // n0x0343 c0x0000 (---------------) + I motorcycles + 0x002c9083, // n0x0344 c0x0000 (---------------) + I mov + 0x002c9085, // n0x0345 c0x0000 (---------------) + I movie + 0x002c91c8, // n0x0346 c0x0000 (---------------) + I movistar + 0x0022a482, // n0x0347 c0x0000 (---------------) + I mp + 0x0033ad82, // n0x0348 c0x0000 (---------------) + I mq + 0x35a4aa02, // n0x0349 c0x00d6 (n0x13b8-n0x13ba) + I mr + 0x35e0f702, // n0x034a c0x00d7 (n0x13ba-n0x13bf) + I ms + 0x0025d643, // n0x034b c0x0000 (---------------) + I msd + 0x36204c02, // n0x034c c0x00d8 (n0x13bf-n0x13c3) + I mt + 0x0026c8c3, // n0x034d c0x0000 (---------------) + I mtn + 0x002c94c4, // n0x034e c0x0000 (---------------) + I mtpc + 0x002c9d03, // n0x034f c0x0000 (---------------) + I mtr + 0x36a03ac2, // n0x0350 c0x00da (n0x13c4-n0x13cb) + I mu + 0x002cbb0b, // n0x0351 c0x0000 (---------------) + I multichoice + 0x36ed0106, // n0x0352 c0x00db (n0x13cb-n0x15ef) + I museum + 0x0023db46, // n0x0353 c0x0000 (---------------) + I mutual + 0x002d0748, // n0x0354 c0x0000 (---------------) + I mutuelle + 0x372b7382, // n0x0355 c0x00dc (n0x15ef-n0x15fd) + I mv + 0x3760fc82, // n0x0356 c0x00dd (n0x15fd-n0x1608) + I mw + 0x37a1bb02, // n0x0357 c0x00de (n0x1608-n0x160e) + I mx + 0x37e26f02, // n0x0358 c0x00df (n0x160e-n0x1616) + I my + 0x38214382, // n0x0359 c0x00e0 (n0x1616-n0x1617)* o I mz + 0x0021438b, // n0x035a c0x0000 (---------------) + I mzansimagic + 0x38601402, // n0x035b c0x00e1 (n0x1617-n0x1628) + I na + 0x00223703, // n0x035c c0x0000 (---------------) + I nab + 0x002393c5, // n0x035d c0x0000 (---------------) + I nadex + 0x0030f646, // n0x035e c0x0000 (---------------) + I nagoya + 0x38a05284, // n0x035f c0x00e2 (n0x1628-n0x162a) + I name + 0x0028cec7, // n0x0360 c0x0000 (---------------) + I naspers + 0x00238e4a, // n0x0361 c0x0000 (---------------) + I nationwide + 0x002ea486, // n0x0362 c0x0000 (---------------) + I natura + 0x0039fb84, // n0x0363 c0x0000 (---------------) + I navy + 0x0025d243, // n0x0364 c0x0000 (---------------) + I nba + 0x39600642, // n0x0365 c0x00e5 (n0x162c-n0x162d) + I nc + 0x00202c02, // n0x0366 c0x0000 (---------------) + I ne + 0x00249b43, // n0x0367 c0x0000 (---------------) + I nec + 0x39a1fe03, // n0x0368 c0x00e6 (n0x162d-n0x1663) + I net + 0x003928c7, // n0x0369 c0x0000 (---------------) + I netbank + 0x0025d947, // n0x036a c0x0000 (---------------) + I netflix + 0x00255b87, // n0x036b c0x0000 (---------------) + I network + 0x00228847, // n0x036c c0x0000 (---------------) + I neustar + 0x00221dc3, // n0x036d c0x0000 (---------------) + I new + 0x002f0d8a, // n0x036e c0x0000 (---------------) + I newholland + 0x00221dc4, // n0x036f c0x0000 (---------------) + I news + 0x0024d684, // n0x0370 c0x0000 (---------------) + I next + 0x0024d68a, // n0x0371 c0x0000 (---------------) + I nextdirect + 0x0026d605, // n0x0372 c0x0000 (---------------) + I nexus + 0x3ae00542, // n0x0373 c0x00eb (n0x166b-n0x1675) + I nf + 0x00251bc3, // n0x0374 c0x0000 (---------------) + I nfl + 0x3b202802, // n0x0375 c0x00ec (n0x1675-n0x167f) + I ng + 0x00202d03, // n0x0376 c0x0000 (---------------) + I ngo + 0x0026da03, // n0x0377 c0x0000 (---------------) + I nhk + 0x3ba03182, // n0x0378 c0x00ee (n0x1680-n0x168e) o I ni + 0x002a6b04, // n0x0379 c0x0000 (---------------) + I nico + 0x0021da84, // n0x037a c0x0000 (---------------) + I nike + 0x00206b05, // n0x037b c0x0000 (---------------) + I nikon + 0x002c8e05, // n0x037c c0x0000 (---------------) + I ninja + 0x0022e906, // n0x037d c0x0000 (---------------) + I nissan + 0x0022ec86, // n0x037e c0x0000 (---------------) + I nissay + 0x3be47802, // n0x037f c0x00ef (n0x168e-n0x1691) + I nl + 0x3c200c02, // n0x0380 c0x00f0 (n0x1691-n0x1967) + I no + 0x00318785, // n0x0381 c0x0000 (---------------) + I nokia + 0x0023d852, // n0x0382 c0x0000 (---------------) + I northwesternmutual + 0x00366106, // n0x0383 c0x0000 (---------------) + I norton + 0x00224d83, // n0x0384 c0x0000 (---------------) + I now + 0x0029cec6, // n0x0385 c0x0000 (---------------) + I nowruz + 0x00224d85, // n0x0386 c0x0000 (---------------) + I nowtv + 0x01610502, // n0x0387 c0x0005 (---------------)* o I np + 0x4460d382, // n0x0388 c0x0111 (n0x198f-n0x1996) + I nr + 0x002e23c3, // n0x0389 c0x0000 (---------------) + I nra + 0x002b5e83, // n0x038a c0x0000 (---------------) + I nrw + 0x00373b03, // n0x038b c0x0000 (---------------) + I ntt + 0x44a017c2, // n0x038c c0x0112 (n0x1996-n0x1999) + I nu + 0x0036ef83, // n0x038d c0x0000 (---------------) + I nyc + 0x44e094c2, // n0x038e c0x0113 (n0x1999-n0x19a9) + I nz + 0x00207143, // n0x038f c0x0000 (---------------) + I obi + 0x002ddc88, // n0x0390 c0x0000 (---------------) + I observer + 0x0020b283, // n0x0391 c0x0000 (---------------) + I off + 0x00221686, // n0x0392 c0x0000 (---------------) + I office + 0x00395b47, // n0x0393 c0x0000 (---------------) + I okinawa + 0x0020a9c6, // n0x0394 c0x0000 (---------------) + I olayan + 0x0020a9cb, // n0x0395 c0x0000 (---------------) + I olayangroup + 0x0039fac7, // n0x0396 c0x0000 (---------------) + I oldnavy + 0x00389204, // n0x0397 c0x0000 (---------------) + I ollo + 0x456014c2, // n0x0398 c0x0115 (n0x19aa-n0x19b3) + I om + 0x002dd5c5, // n0x0399 c0x0000 (---------------) + I omega + 0x00214843, // n0x039a c0x0000 (---------------) + I one + 0x002082c3, // n0x039b c0x0000 (---------------) + I ong + 0x003175c3, // n0x039c c0x0000 (---------------) + I onl + 0x003175c6, // n0x039d c0x0000 (---------------) + I online + 0x003a008a, // n0x039e c0x0000 (---------------) + I onyourside + 0x0028d703, // n0x039f c0x0000 (---------------) + I ooo + 0x0023de44, // n0x03a0 c0x0000 (---------------) + I open + 0x00224206, // n0x03a1 c0x0000 (---------------) + I oracle + 0x00396286, // n0x03a2 c0x0000 (---------------) + I orange + 0x45a2d1c3, // n0x03a3 c0x0116 (n0x19b3-n0x19f0) + I org + 0x002ae487, // n0x03a4 c0x0000 (---------------) + I organic + 0x002db3cd, // n0x03a5 c0x0000 (---------------) + I orientexpress + 0x00383487, // n0x03a6 c0x0000 (---------------) + I origins + 0x0029ac45, // n0x03a7 c0x0000 (---------------) + I osaka + 0x00269e06, // n0x03a8 c0x0000 (---------------) + I otsuka + 0x0021b6c3, // n0x03a9 c0x0000 (---------------) + I ott + 0x0020da83, // n0x03aa c0x0000 (---------------) + I ovh + 0x4720ac42, // n0x03ab c0x011c (n0x1a2d-n0x1a38) + I pa + 0x002eaf04, // n0x03ac c0x0000 (---------------) + I page + 0x0024c94c, // n0x03ad c0x0000 (---------------) + I pamperedchef + 0x002646c9, // n0x03ae c0x0000 (---------------) + I panasonic + 0x00338507, // n0x03af c0x0000 (---------------) + I panerai + 0x00277905, // n0x03b0 c0x0000 (---------------) + I paris + 0x002994c4, // n0x03b1 c0x0000 (---------------) + I pars + 0x002a5308, // n0x03b2 c0x0000 (---------------) + I partners + 0x002ad245, // n0x03b3 c0x0000 (---------------) + I parts + 0x002b4c45, // n0x03b4 c0x0000 (---------------) + I party + 0x002cc549, // n0x03b5 c0x0000 (---------------) + I passagens + 0x002bc0c3, // n0x03b6 c0x0000 (---------------) + I pay + 0x002bc0c4, // n0x03b7 c0x0000 (---------------) + I payu + 0x002c9544, // n0x03b8 c0x0000 (---------------) + I pccw + 0x47607782, // n0x03b9 c0x011d (n0x1a38-n0x1a40) + I pe + 0x00207783, // n0x03ba c0x0000 (---------------) + I pet + 0x47af7d02, // n0x03bb c0x011e (n0x1a40-n0x1a43) + I pf + 0x002f7d06, // n0x03bc c0x0000 (---------------) + I pfizer + 0x016495c2, // n0x03bd c0x0005 (---------------)* o I pg + 0x47e00d42, // n0x03be c0x011f (n0x1a43-n0x1a4b) + I ph + 0x00375008, // n0x03bf c0x0000 (---------------) + I pharmacy + 0x002d39c7, // n0x03c0 c0x0000 (---------------) + I philips + 0x00299085, // n0x03c1 c0x0000 (---------------) + I photo + 0x002d404b, // n0x03c2 c0x0000 (---------------) + I photography + 0x002d11c6, // n0x03c3 c0x0000 (---------------) + I photos + 0x002d4246, // n0x03c4 c0x0000 (---------------) + I physio + 0x002d43c6, // n0x03c5 c0x0000 (---------------) + I piaget + 0x00225704, // n0x03c6 c0x0000 (---------------) + I pics + 0x002d4b46, // n0x03c7 c0x0000 (---------------) + I pictet + 0x002d5008, // n0x03c8 c0x0000 (---------------) + I pictures + 0x00241b83, // n0x03c9 c0x0000 (---------------) + I pid + 0x002699c3, // n0x03ca c0x0000 (---------------) + I pin + 0x002699c4, // n0x03cb c0x0000 (---------------) + I ping + 0x002d6d84, // n0x03cc c0x0000 (---------------) + I pink + 0x002d7107, // n0x03cd c0x0000 (---------------) + I pioneer + 0x002d85c5, // n0x03ce c0x0000 (---------------) + I pizza + 0x482d8702, // n0x03cf c0x0120 (n0x1a4b-n0x1a59) + I pk + 0x486063c2, // n0x03d0 c0x0121 (n0x1a59-n0x1afe) + I pl + 0x002063c5, // n0x03d1 c0x0000 (---------------) + I place + 0x0029e944, // n0x03d2 c0x0000 (---------------) + I play + 0x002dad4b, // n0x03d3 c0x0000 (---------------) + I playstation + 0x002dc348, // n0x03d4 c0x0000 (---------------) + I plumbing + 0x002dcac4, // n0x03d5 c0x0000 (---------------) + I plus + 0x0020d7c2, // n0x03d6 c0x0000 (---------------) + I pm + 0x48e493c2, // n0x03d7 c0x0123 (n0x1b2d-n0x1b32) + I pn + 0x002aee03, // n0x03d8 c0x0000 (---------------) + I pnc + 0x002dcf04, // n0x03d9 c0x0000 (---------------) + I pohl + 0x002dd005, // n0x03da c0x0000 (---------------) + I poker + 0x002de547, // n0x03db c0x0000 (---------------) + I politie + 0x002e0104, // n0x03dc c0x0000 (---------------) + I porn + 0x0035fe04, // n0x03dd c0x0000 (---------------) + I post + 0x49204602, // n0x03de c0x0124 (n0x1b32-n0x1b3f) + I pr + 0x003598c9, // n0x03df c0x0000 (---------------) + I pramerica + 0x002e0b05, // n0x03e0 c0x0000 (---------------) + I praxi + 0x00247505, // n0x03e1 c0x0000 (---------------) + I press + 0x002e1745, // n0x03e2 c0x0000 (---------------) + I prime + 0x49620e43, // n0x03e3 c0x0125 (n0x1b3f-n0x1b4a) + I pro + 0x002e2044, // n0x03e4 c0x0000 (---------------) + I prod + 0x002e204b, // n0x03e5 c0x0000 (---------------) + I productions + 0x002e2484, // n0x03e6 c0x0000 (---------------) + I prof + 0x002e278b, // n0x03e7 c0x0000 (---------------) + I progressive + 0x002e36c5, // n0x03e8 c0x0000 (---------------) + I promo + 0x00220e4a, // n0x03e9 c0x0000 (---------------) + I properties + 0x002e3e48, // n0x03ea c0x0000 (---------------) + I property + 0x002e404a, // n0x03eb c0x0000 (---------------) + I protection + 0x002e42c3, // n0x03ec c0x0000 (---------------) + I pru + 0x002e42ca, // n0x03ed c0x0000 (---------------) + I prudential + 0x49a09342, // n0x03ee c0x0126 (n0x1b4a-n0x1b51) + I ps + 0x49e8c9c2, // n0x03ef c0x0127 (n0x1b51-n0x1b5a) + I pt + 0x00297403, // n0x03f0 c0x0000 (---------------) + I pub + 0x4a2e5942, // n0x03f1 c0x0128 (n0x1b5a-n0x1b60) + I pw + 0x002e5943, // n0x03f2 c0x0000 (---------------) + I pwc + 0x4a734802, // n0x03f3 c0x0129 (n0x1b60-n0x1b67) + I py + 0x4ab14682, // n0x03f4 c0x012a (n0x1b67-n0x1b70) + I qa + 0x002e6444, // n0x03f5 c0x0000 (---------------) + I qpon + 0x0021b906, // n0x03f6 c0x0000 (---------------) + I quebec + 0x0022bb05, // n0x03f7 c0x0000 (---------------) + I quest + 0x002e6ac3, // n0x03f8 c0x0000 (---------------) + I qvc + 0x00355bc6, // n0x03f9 c0x0000 (---------------) + I racing + 0x00351c84, // n0x03fa c0x0000 (---------------) + I raid + 0x4ae07002, // n0x03fb c0x012b (n0x1b70-n0x1b74) + I re + 0x002d3404, // n0x03fc c0x0000 (---------------) + I read + 0x002c238a, // n0x03fd c0x0000 (---------------) + I realestate + 0x00338907, // n0x03fe c0x0000 (---------------) + I realtor + 0x0031fc86, // n0x03ff c0x0000 (---------------) + I realty + 0x0031c747, // n0x0400 c0x0000 (---------------) + I recipes + 0x00244803, // n0x0401 c0x0000 (---------------) + I red + 0x003a2b48, // n0x0402 c0x0000 (---------------) + I redstone + 0x00337f4b, // n0x0403 c0x0000 (---------------) + I redumbrella + 0x002c9b05, // n0x0404 c0x0000 (---------------) + I rehab + 0x0033a0c5, // n0x0405 c0x0000 (---------------) + I reise + 0x0033a0c6, // n0x0406 c0x0000 (---------------) + I reisen + 0x002b75c4, // n0x0407 c0x0000 (---------------) + I reit + 0x00327f48, // n0x0408 c0x0000 (---------------) + I reliance + 0x00209ec3, // n0x0409 c0x0000 (---------------) + I ren + 0x0020bd84, // n0x040a c0x0000 (---------------) + I rent + 0x0020bd87, // n0x040b c0x0000 (---------------) + I rentals + 0x0022b7c6, // n0x040c c0x0000 (---------------) + I repair + 0x00309586, // n0x040d c0x0000 (---------------) + I report + 0x0029f6ca, // n0x040e c0x0000 (---------------) + I republican + 0x0024d544, // n0x040f c0x0000 (---------------) + I rest + 0x0037adca, // n0x0410 c0x0000 (---------------) + I restaurant + 0x002eaac6, // n0x0411 c0x0000 (---------------) + I review + 0x002eaac7, // n0x0412 c0x0000 (---------------) + I reviews + 0x00257607, // n0x0413 c0x0000 (---------------) + I rexroth + 0x00273a04, // n0x0414 c0x0000 (---------------) + I rich + 0x00273a09, // n0x0415 c0x0000 (---------------) + I richardli + 0x002b6f85, // n0x0416 c0x0000 (---------------) + I ricoh + 0x0034618b, // n0x0417 c0x0000 (---------------) + I rightathome + 0x00257f83, // n0x0418 c0x0000 (---------------) + I ril + 0x00200a83, // n0x0419 c0x0000 (---------------) + I rio + 0x0022fd43, // n0x041a c0x0000 (---------------) + I rip + 0x002684c4, // n0x041b c0x0000 (---------------) + I rmit + 0x4b202202, // n0x041c c0x012c (n0x1b74-n0x1b80) + I ro + 0x00289806, // n0x041d c0x0000 (---------------) + I rocher + 0x002a10c5, // n0x041e c0x0000 (---------------) + I rocks + 0x002d2f85, // n0x041f c0x0000 (---------------) + I rodeo + 0x0039c1c6, // n0x0420 c0x0000 (---------------) + I rogers + 0x0037ed04, // n0x0421 c0x0000 (---------------) + I room + 0x4b609702, // n0x0422 c0x012d (n0x1b80-n0x1b87) + I rs + 0x0039c2c4, // n0x0423 c0x0000 (---------------) + I rsvp + 0x4ba11302, // n0x0424 c0x012e (n0x1b87-n0x1c0a) + I ru + 0x00236144, // n0x0425 c0x0000 (---------------) + I ruhr + 0x00222b43, // n0x0426 c0x0000 (---------------) + I run + 0x4beb5ec2, // n0x0427 c0x012f (n0x1c0a-n0x1c13) + I rw + 0x003274c3, // n0x0428 c0x0000 (---------------) + I rwe + 0x0036acc6, // n0x0429 c0x0000 (---------------) + I ryukyu + 0x4c2004c2, // n0x042a c0x0130 (n0x1c13-n0x1c1b) + I sa + 0x0030c348, // n0x042b c0x0000 (---------------) + I saarland + 0x00215944, // n0x042c c0x0000 (---------------) + I safe + 0x00215946, // n0x042d c0x0000 (---------------) + I safety + 0x00307fc6, // n0x042e c0x0000 (---------------) + I sakura + 0x00259844, // n0x042f c0x0000 (---------------) + I sale + 0x00321885, // n0x0430 c0x0000 (---------------) + I salon + 0x00398bc8, // n0x0431 c0x0000 (---------------) + I samsclub + 0x0039edc7, // n0x0432 c0x0000 (---------------) + I samsung + 0x003a0747, // n0x0433 c0x0000 (---------------) + I sandvik + 0x003a074f, // n0x0434 c0x0000 (---------------) + I sandvikcoromant + 0x00294646, // n0x0435 c0x0000 (---------------) + I sanofi + 0x00219dc3, // n0x0436 c0x0000 (---------------) + I sap + 0x00219dc4, // n0x0437 c0x0000 (---------------) + I sapo + 0x0022b604, // n0x0438 c0x0000 (---------------) + I sarl + 0x00228143, // n0x0439 c0x0000 (---------------) + I sas + 0x00222244, // n0x043a c0x0000 (---------------) + I save + 0x00235144, // n0x043b c0x0000 (---------------) + I saxo + 0x4c62d142, // n0x043c c0x0131 (n0x1c1b-n0x1c20) + I sb + 0x00288e03, // n0x043d c0x0000 (---------------) + I sbi + 0x00237503, // n0x043e c0x0000 (---------------) + I sbs + 0x4ca00702, // n0x043f c0x0132 (n0x1c20-n0x1c25) + I sc + 0x00236a03, // n0x0440 c0x0000 (---------------) + I sca + 0x002ee403, // n0x0441 c0x0000 (---------------) + I scb + 0x0021744a, // n0x0442 c0x0000 (---------------) + I schaeffler + 0x002e5d47, // n0x0443 c0x0000 (---------------) + I schmidt + 0x0023ce0c, // n0x0444 c0x0000 (---------------) + I scholarships + 0x0023d0c6, // n0x0445 c0x0000 (---------------) + I school + 0x00241286, // n0x0446 c0x0000 (---------------) + I schule + 0x00242547, // n0x0447 c0x0000 (---------------) + I schwarz + 0x002358c7, // n0x0448 c0x0000 (---------------) + I science + 0x00246cc9, // n0x0449 c0x0000 (---------------) + I scjohnson + 0x0021c544, // n0x044a c0x0000 (---------------) + I scor + 0x00200704, // n0x044b c0x0000 (---------------) + I scot + 0x4ce496c2, // n0x044c c0x0133 (n0x1c25-n0x1c2d) + I sd + 0x4d2046c2, // n0x044d c0x0134 (n0x1c2d-n0x1c56) + I se + 0x00316b84, // n0x044e c0x0000 (---------------) + I seat + 0x0031c646, // n0x044f c0x0000 (---------------) + I secure + 0x00235d48, // n0x0450 c0x0000 (---------------) + I security + 0x0027a144, // n0x0451 c0x0000 (---------------) + I seek + 0x0025b346, // n0x0452 c0x0000 (---------------) + I select + 0x002cb485, // n0x0453 c0x0000 (---------------) + I sener + 0x00206808, // n0x0454 c0x0000 (---------------) + I services + 0x002046c3, // n0x0455 c0x0000 (---------------) + I ses + 0x00251f05, // n0x0456 c0x0000 (---------------) + I seven + 0x00253b43, // n0x0457 c0x0000 (---------------) + I sew + 0x00247603, // n0x0458 c0x0000 (---------------) + I sex + 0x00247604, // n0x0459 c0x0000 (---------------) + I sexy + 0x00256a83, // n0x045a c0x0000 (---------------) + I sfr + 0x4d66d702, // n0x045b c0x0135 (n0x1c56-n0x1c5d) + I sg + 0x4da01342, // n0x045c c0x0136 (n0x1c5d-n0x1c64) + I sh + 0x00257e49, // n0x045d c0x0000 (---------------) + I shangrila + 0x0025b885, // n0x045e c0x0000 (---------------) + I sharp + 0x0025cb44, // n0x045f c0x0000 (---------------) + I shaw + 0x0025fd45, // n0x0460 c0x0000 (---------------) + I shell + 0x00211884, // n0x0461 c0x0000 (---------------) + I shia + 0x002fea47, // n0x0462 c0x0000 (---------------) + I shiksha + 0x003896c5, // n0x0463 c0x0000 (---------------) + I shoes + 0x002be486, // n0x0464 c0x0000 (---------------) + I shouji + 0x002c4484, // n0x0465 c0x0000 (---------------) + I show + 0x002c4488, // n0x0466 c0x0000 (---------------) + I showtime + 0x002c8307, // n0x0467 c0x0000 (---------------) + I shriram + 0x4de0a402, // n0x0468 c0x0137 (n0x1c64-n0x1c65) + I si + 0x00341f84, // n0x0469 c0x0000 (---------------) + I silk + 0x002f7b84, // n0x046a c0x0000 (---------------) + I sina + 0x00285cc7, // n0x046b c0x0000 (---------------) + I singles + 0x002810c4, // n0x046c c0x0000 (---------------) + I site + 0x0022eb82, // n0x046d c0x0000 (---------------) + I sj + 0x4e207842, // n0x046e c0x0138 (n0x1c65-n0x1c66) + I sk + 0x00209743, // n0x046f c0x0000 (---------------) + I ski + 0x002e76c4, // n0x0470 c0x0000 (---------------) + I skin + 0x002368c3, // n0x0471 c0x0000 (---------------) + I sky + 0x002368c5, // n0x0472 c0x0000 (---------------) + I skype + 0x4e624b82, // n0x0473 c0x0139 (n0x1c66-n0x1c6b) + I sl + 0x00375205, // n0x0474 c0x0000 (---------------) + I sling + 0x0024cdc2, // n0x0475 c0x0000 (---------------) + I sm + 0x00368185, // n0x0476 c0x0000 (---------------) + I smart + 0x0035e3c5, // n0x0477 c0x0000 (---------------) + I smile + 0x4ea14182, // n0x0478 c0x013a (n0x1c6b-n0x1c73) + I sn + 0x00214184, // n0x0479 c0x0000 (---------------) + I sncf + 0x4ee05682, // n0x047a c0x013b (n0x1c73-n0x1c76) + I so + 0x00325706, // n0x047b c0x0000 (---------------) + I soccer + 0x002a3986, // n0x047c c0x0000 (---------------) + I social + 0x0026b288, // n0x047d c0x0000 (---------------) + I softbank + 0x002b8688, // n0x047e c0x0000 (---------------) + I software + 0x002f9444, // n0x047f c0x0000 (---------------) + I sohu + 0x00359f05, // n0x0480 c0x0000 (---------------) + I solar + 0x00359d09, // n0x0481 c0x0000 (---------------) + I solutions + 0x00356144, // n0x0482 c0x0000 (---------------) + I song + 0x003a0044, // n0x0483 c0x0000 (---------------) + I sony + 0x002bd0c3, // n0x0484 c0x0000 (---------------) + I soy + 0x0020bb45, // n0x0485 c0x0000 (---------------) + I space + 0x00371ac7, // n0x0486 c0x0000 (---------------) + I spiegel + 0x00209384, // n0x0487 c0x0000 (---------------) + I spot + 0x00332e4d, // n0x0488 c0x0000 (---------------) + I spreadbetting + 0x0033b802, // n0x0489 c0x0000 (---------------) + I sr + 0x0033b803, // n0x048a c0x0000 (---------------) + I srl + 0x0035a503, // n0x048b c0x0000 (---------------) + I srt + 0x4f202742, // n0x048c c0x013c (n0x1c76-n0x1c82) + I st + 0x00380745, // n0x048d c0x0000 (---------------) + I stada + 0x002320c7, // n0x048e c0x0000 (---------------) + I staples + 0x00228904, // n0x048f c0x0000 (---------------) + I star + 0x00228907, // n0x0490 c0x0000 (---------------) + I starhub + 0x0020f209, // n0x0491 c0x0000 (---------------) + I statebank + 0x002c24c9, // n0x0492 c0x0000 (---------------) + I statefarm + 0x003a0dc7, // n0x0493 c0x0000 (---------------) + I statoil + 0x00277743, // n0x0494 c0x0000 (---------------) + I stc + 0x00277748, // n0x0495 c0x0000 (---------------) + I stcgroup + 0x002a8009, // n0x0496 c0x0000 (---------------) + I stockholm + 0x00364547, // n0x0497 c0x0000 (---------------) + I storage + 0x00391185, // n0x0498 c0x0000 (---------------) + I store + 0x002e74c6, // n0x0499 c0x0000 (---------------) + I stream + 0x002e7906, // n0x049a c0x0000 (---------------) + I studio + 0x002e7a85, // n0x049b c0x0000 (---------------) + I study + 0x00253dc5, // n0x049c c0x0000 (---------------) + I style + 0x4f6023c2, // n0x049d c0x013d (n0x1c82-n0x1ca2) + I su + 0x00332385, // n0x049e c0x0000 (---------------) + I sucks + 0x002ba24a, // n0x049f c0x0000 (---------------) + I supersport + 0x002be2c8, // n0x04a0 c0x0000 (---------------) + I supplies + 0x002a7806, // n0x04a1 c0x0000 (---------------) + I supply + 0x002e3907, // n0x04a2 c0x0000 (---------------) + I support + 0x0024c144, // n0x04a3 c0x0000 (---------------) + I surf + 0x002a9e07, // n0x04a4 c0x0000 (---------------) + I surgery + 0x002eef06, // n0x04a5 c0x0000 (---------------) + I suzuki + 0x4fa35f42, // n0x04a6 c0x013e (n0x1ca2-n0x1ca7) + I sv + 0x00376c06, // n0x04a7 c0x0000 (---------------) + I swatch + 0x002f15ca, // n0x04a8 c0x0000 (---------------) + I swiftcover + 0x002f1f85, // n0x04a9 c0x0000 (---------------) + I swiss + 0x4fef2802, // n0x04aa c0x013f (n0x1ca7-n0x1ca8) + I sx + 0x50289a02, // n0x04ab c0x0140 (n0x1ca8-n0x1cae) + I sy + 0x00329bc6, // n0x04ac c0x0000 (---------------) + I sydney + 0x002d5f48, // n0x04ad c0x0000 (---------------) + I symantec + 0x00394e07, // n0x04ae c0x0000 (---------------) + I systems + 0x5060b982, // n0x04af c0x0141 (n0x1cae-n0x1cb1) + I sz + 0x00210d43, // n0x04b0 c0x0000 (---------------) + I tab + 0x003a6506, // n0x04b1 c0x0000 (---------------) + I taipei + 0x0021eb04, // n0x04b2 c0x0000 (---------------) + I talk + 0x00395a06, // n0x04b3 c0x0000 (---------------) + I taobao + 0x00357846, // n0x04b4 c0x0000 (---------------) + I target + 0x00322a0a, // n0x04b5 c0x0000 (---------------) + I tatamotors + 0x0036cc45, // n0x04b6 c0x0000 (---------------) + I tatar + 0x00219906, // n0x04b7 c0x0000 (---------------) + I tattoo + 0x002203c3, // n0x04b8 c0x0000 (---------------) + I tax + 0x002203c4, // n0x04b9 c0x0000 (---------------) + I taxi + 0x00204442, // n0x04ba c0x0000 (---------------) + I tc + 0x0025edc3, // n0x04bb c0x0000 (---------------) + I tci + 0x50a0b182, // n0x04bc c0x0142 (n0x1cb1-n0x1cb2) + I td + 0x002c9683, // n0x04bd c0x0000 (---------------) + I tdk + 0x00367504, // n0x04be c0x0000 (---------------) + I team + 0x002d59c4, // n0x04bf c0x0000 (---------------) + I tech + 0x002d608a, // n0x04c0 c0x0000 (---------------) + I technology + 0x0022f7c3, // n0x04c1 c0x0000 (---------------) + I tel + 0x00286648, // n0x04c2 c0x0000 (---------------) + I telecity + 0x0030ec4a, // n0x04c3 c0x0000 (---------------) + I telefonica + 0x0023fa07, // n0x04c4 c0x0000 (---------------) + I temasek + 0x002f4806, // n0x04c5 c0x0000 (---------------) + I tennis + 0x0032ce44, // n0x04c6 c0x0000 (---------------) + I teva + 0x0025d9c2, // n0x04c7 c0x0000 (---------------) + I tf + 0x00204c42, // n0x04c8 c0x0000 (---------------) + I tg + 0x50e06342, // n0x04c9 c0x0143 (n0x1cb2-n0x1cb9) + I th + 0x0024b183, // n0x04ca c0x0000 (---------------) + I thd + 0x002573c7, // n0x04cb c0x0000 (---------------) + I theater + 0x00355e87, // n0x04cc c0x0000 (---------------) + I theatre + 0x003504cb, // n0x04cd c0x0000 (---------------) + I theguardian + 0x0034d4c4, // n0x04ce c0x0000 (---------------) + I tiaa + 0x002f6847, // n0x04cf c0x0000 (---------------) + I tickets + 0x002de646, // n0x04d0 c0x0000 (---------------) + I tienda + 0x00215707, // n0x04d1 c0x0000 (---------------) + I tiffany + 0x002e5c84, // n0x04d2 c0x0000 (---------------) + I tips + 0x0033d385, // n0x04d3 c0x0000 (---------------) + I tires + 0x002b7905, // n0x04d4 c0x0000 (---------------) + I tirol + 0x51226782, // n0x04d5 c0x0144 (n0x1cb9-n0x1cc8) + I tj + 0x00230886, // n0x04d6 c0x0000 (---------------) + I tjmaxx + 0x0036f443, // n0x04d7 c0x0000 (---------------) + I tjx + 0x0022ad02, // n0x04d8 c0x0000 (---------------) + I tk + 0x00231686, // n0x04d9 c0x0000 (---------------) + I tkmaxx + 0x516007c2, // n0x04da c0x0145 (n0x1cc8-n0x1cc9) + I tl + 0x51a00142, // n0x04db c0x0146 (n0x1cc9-n0x1cd1) + I tm + 0x00200145, // n0x04dc c0x0000 (---------------) + I tmall + 0x51e4f882, // n0x04dd c0x0147 (n0x1cd1-n0x1ce5) + I tn + 0x52208082, // n0x04de c0x0148 (n0x1ce5-n0x1ceb) + I to + 0x00265905, // n0x04df c0x0000 (---------------) + I today + 0x00341c05, // n0x04e0 c0x0000 (---------------) + I tokyo + 0x002199c5, // n0x04e1 c0x0000 (---------------) + I tools + 0x00208083, // n0x04e2 c0x0000 (---------------) + I top + 0x00376345, // n0x04e3 c0x0000 (---------------) + I toray + 0x002d1287, // n0x04e4 c0x0000 (---------------) + I toshiba + 0x0025b605, // n0x04e5 c0x0000 (---------------) + I total + 0x002fd7c5, // n0x04e6 c0x0000 (---------------) + I tours + 0x002dc244, // n0x04e7 c0x0000 (---------------) + I town + 0x0025bb86, // n0x04e8 c0x0000 (---------------) + I toyota + 0x0026dac4, // n0x04e9 c0x0000 (---------------) + I toys + 0x52603002, // n0x04ea c0x0149 (n0x1ceb-n0x1d00) + I tr + 0x002673c5, // n0x04eb c0x0000 (---------------) + I trade + 0x002a4607, // n0x04ec c0x0000 (---------------) + I trading + 0x0022a6c8, // n0x04ed c0x0000 (---------------) + I training + 0x0029bec6, // n0x04ee c0x0000 (---------------) + I travel + 0x0029becd, // n0x04ef c0x0000 (---------------) + I travelchannel + 0x002a1a89, // n0x04f0 c0x0000 (---------------) + I travelers + 0x002a1a92, // n0x04f1 c0x0000 (---------------) + I travelersinsurance + 0x00329245, // n0x04f2 c0x0000 (---------------) + I trust + 0x0033f2c3, // n0x04f3 c0x0000 (---------------) + I trv + 0x5320e842, // n0x04f4 c0x014c (n0x1d02-n0x1d13) + I tt + 0x002e48c4, // n0x04f5 c0x0000 (---------------) + I tube + 0x002f89c3, // n0x04f6 c0x0000 (---------------) + I tui + 0x0035d745, // n0x04f7 c0x0000 (---------------) + I tunes + 0x002f2e45, // n0x04f8 c0x0000 (---------------) + I tushu + 0x53624e42, // n0x04f9 c0x014d (n0x1d13-n0x1d17) + I tv + 0x003644c3, // n0x04fa c0x0000 (---------------) + I tvs + 0x53a4e502, // n0x04fb c0x014e (n0x1d17-n0x1d25) + I tw + 0x53e1fe82, // n0x04fc c0x014f (n0x1d25-n0x1d31) + I tz + 0x54220502, // n0x04fd c0x0150 (n0x1d31-n0x1d80) + I ua + 0x0033bbc5, // n0x04fe c0x0000 (---------------) + I ubank + 0x0024a4c3, // n0x04ff c0x0000 (---------------) + I ubs + 0x00249a48, // n0x0500 c0x0000 (---------------) + I uconnect + 0x54601cc2, // n0x0501 c0x0151 (n0x1d80-n0x1d89) + I ug + 0x54a00f82, // n0x0502 c0x0152 (n0x1d89-n0x1d94) + I uk + 0x002a6ac6, // n0x0503 c0x0000 (---------------) + I unicom + 0x00320a0a, // n0x0504 c0x0000 (---------------) + I university + 0x0020d503, // n0x0505 c0x0000 (---------------) + I uno + 0x00259d43, // n0x0506 c0x0000 (---------------) + I uol + 0x002d5243, // n0x0507 c0x0000 (---------------) + I ups + 0x55602382, // n0x0508 c0x0155 (n0x1d96-n0x1dd5) + I us + 0x63a01802, // n0x0509 c0x018e (n0x1e78-n0x1e7e) + I uy + 0x64211342, // n0x050a c0x0190 (n0x1e7f-n0x1e83) + I uz + 0x002000c2, // n0x050b c0x0000 (---------------) + I va + 0x00376a09, // n0x050c c0x0000 (---------------) + I vacations + 0x002bc5c4, // n0x050d c0x0000 (---------------) + I vana + 0x00344588, // n0x050e c0x0000 (---------------) + I vanguard + 0x646e6b02, // n0x050f c0x0191 (n0x1e83-n0x1e89) + I vc + 0x64a02b82, // n0x0510 c0x0192 (n0x1e89-n0x1e9a) + I ve + 0x00230285, // n0x0511 c0x0000 (---------------) + I vegas + 0x0023b808, // n0x0512 c0x0000 (---------------) + I ventures + 0x002f1788, // n0x0513 c0x0000 (---------------) + I verisign + 0x0039440c, // n0x0514 c0x0000 (---------------) + I versicherung + 0x0023f943, // n0x0515 c0x0000 (---------------) + I vet + 0x0023fd02, // n0x0516 c0x0000 (---------------) + I vg + 0x64e05d42, // n0x0517 c0x0193 (n0x1e9a-n0x1e9f) + I vi + 0x002c5706, // n0x0518 c0x0000 (---------------) + I viajes + 0x002f5685, // n0x0519 c0x0000 (---------------) + I video + 0x0031a003, // n0x051a c0x0000 (---------------) + I vig + 0x00311a06, // n0x051b c0x0000 (---------------) + I viking + 0x002f57c6, // n0x051c c0x0000 (---------------) + I villas + 0x00241543, // n0x051d c0x0000 (---------------) + I vin + 0x002f7ac3, // n0x051e c0x0000 (---------------) + I vip + 0x002f7e86, // n0x051f c0x0000 (---------------) + I virgin + 0x002f8404, // n0x0520 c0x0000 (---------------) + I visa + 0x002b48c6, // n0x0521 c0x0000 (---------------) + I vision + 0x002c9245, // n0x0522 c0x0000 (---------------) + I vista + 0x002f878a, // n0x0523 c0x0000 (---------------) + I vistaprint + 0x00240444, // n0x0524 c0x0000 (---------------) + I viva + 0x002f97c4, // n0x0525 c0x0000 (---------------) + I vivo + 0x0034710a, // n0x0526 c0x0000 (---------------) + I vlaanderen + 0x65203442, // n0x0527 c0x0194 (n0x1e9f-n0x1eac) + I vn + 0x002760c5, // n0x0528 c0x0000 (---------------) + I vodka + 0x002fbd0a, // n0x0529 c0x0000 (---------------) + I volkswagen + 0x002fc945, // n0x052a c0x0000 (---------------) + I volvo + 0x002fd4c4, // n0x052b c0x0000 (---------------) + I vote + 0x002fd5c6, // n0x052c c0x0000 (---------------) + I voting + 0x002fd744, // n0x052d c0x0000 (---------------) + I voto + 0x00231006, // n0x052e c0x0000 (---------------) + I voyage + 0x65672082, // n0x052f c0x0195 (n0x1eac-n0x1eb0) + I vu + 0x0031f5c6, // n0x0530 c0x0000 (---------------) + I vuelos + 0x00320685, // n0x0531 c0x0000 (---------------) + I wales + 0x002010c7, // n0x0532 c0x0000 (---------------) + I walmart + 0x00293986, // n0x0533 c0x0000 (---------------) + I walter + 0x00242744, // n0x0534 c0x0000 (---------------) + I wang + 0x0033d6c7, // n0x0535 c0x0000 (---------------) + I wanggou + 0x0036f146, // n0x0536 c0x0000 (---------------) + I warman + 0x002aca45, // n0x0537 c0x0000 (---------------) + I watch + 0x003a3d87, // n0x0538 c0x0000 (---------------) + I watches + 0x00391887, // n0x0539 c0x0000 (---------------) + I weather + 0x0039188e, // n0x053a c0x0000 (---------------) + I weatherchannel + 0x00221a06, // n0x053b c0x0000 (---------------) + I webcam + 0x0022e645, // n0x053c c0x0000 (---------------) + I weber + 0x00281007, // n0x053d c0x0000 (---------------) + I website + 0x002f08c3, // n0x053e c0x0000 (---------------) + I wed + 0x0032f407, // n0x053f c0x0000 (---------------) + I wedding + 0x0020fe05, // n0x0540 c0x0000 (---------------) + I weibo + 0x002109c4, // n0x0541 c0x0000 (---------------) + I weir + 0x0022fe82, // n0x0542 c0x0000 (---------------) + I wf + 0x003a43c7, // n0x0543 c0x0000 (---------------) + I whoswho + 0x002eee04, // n0x0544 c0x0000 (---------------) + I wien + 0x0037c484, // n0x0545 c0x0000 (---------------) + I wiki + 0x0025a8cb, // n0x0546 c0x0000 (---------------) + I williamhill + 0x0021cbc3, // n0x0547 c0x0000 (---------------) + I win + 0x002afe07, // n0x0548 c0x0000 (---------------) + I windows + 0x0021cbc4, // n0x0549 c0x0000 (---------------) + I wine + 0x002b0f07, // n0x054a c0x0000 (---------------) + I winners + 0x00231c43, // n0x054b c0x0000 (---------------) + I wme + 0x0032ae4d, // n0x054c c0x0000 (---------------) + I wolterskluwer + 0x00380f88, // n0x054d c0x0000 (---------------) + I woodside + 0x00255c44, // n0x054e c0x0000 (---------------) + I work + 0x00351f05, // n0x054f c0x0000 (---------------) + I works + 0x00300b85, // n0x0550 c0x0000 (---------------) + I world + 0x002ff3c3, // n0x0551 c0x0000 (---------------) + I wow + 0x65a0b942, // n0x0552 c0x0196 (n0x1eb0-n0x1eb7) + I ws + 0x003002c3, // n0x0553 c0x0000 (---------------) + I wtc + 0x00300783, // n0x0554 c0x0000 (---------------) + I wtf + 0x0021bb44, // n0x0555 c0x0000 (---------------) + I xbox + 0x0027bec5, // n0x0556 c0x0000 (---------------) + I xerox + 0x00230a07, // n0x0557 c0x0000 (---------------) + I xfinity + 0x00220446, // n0x0558 c0x0000 (---------------) + I xihuan + 0x00367243, // n0x0559 c0x0000 (---------------) + I xin + 0x002317cb, // n0x055a c0x0000 (---------------) + I xn--11b4c3d + 0x0024830b, // n0x055b c0x0000 (---------------) + I xn--1ck2e1b + 0x0026a7cb, // n0x055c c0x0000 (---------------) + I xn--1qqw23a + 0x0027bfca, // n0x055d c0x0000 (---------------) + I xn--30rr7y + 0x002a718b, // n0x055e c0x0000 (---------------) + I xn--3bst00m + 0x002daa8b, // n0x055f c0x0000 (---------------) + I xn--3ds443g + 0x002d3d4c, // n0x0560 c0x0000 (---------------) + I xn--3e0b707e + 0x002f2851, // n0x0561 c0x0000 (---------------) + I xn--3oq18vl8pn36a + 0x00339a8a, // n0x0562 c0x0000 (---------------) + I xn--3pxu8k + 0x0034a00b, // n0x0563 c0x0000 (---------------) + I xn--42c2d9a + 0x00370d8b, // n0x0564 c0x0000 (---------------) + I xn--45brj9c + 0x003a374a, // n0x0565 c0x0000 (---------------) + I xn--45q11c + 0x003a5a0a, // n0x0566 c0x0000 (---------------) + I xn--4gbrim + 0x00300f8d, // n0x0567 c0x0000 (---------------) + I xn--4gq48lf9j + 0x0030218e, // n0x0568 c0x0000 (---------------) + I xn--54b7fta0cc + 0x0030270b, // n0x0569 c0x0000 (---------------) + I xn--55qw42g + 0x003029ca, // n0x056a c0x0000 (---------------) + I xn--55qx5d + 0x00303d11, // n0x056b c0x0000 (---------------) + I xn--5su34j936bgsg + 0x0030414a, // n0x056c c0x0000 (---------------) + I xn--5tzm5g + 0x0030464b, // n0x056d c0x0000 (---------------) + I xn--6frz82g + 0x00304b8e, // n0x056e c0x0000 (---------------) + I xn--6qq986b3xl + 0x0030550c, // n0x056f c0x0000 (---------------) + I xn--80adxhks + 0x0030648b, // n0x0570 c0x0000 (---------------) + I xn--80ao21a + 0x0030674e, // n0x0571 c0x0000 (---------------) + I xn--80aqecdr1a + 0x00306acc, // n0x0572 c0x0000 (---------------) + I xn--80asehdb + 0x00309b8a, // n0x0573 c0x0000 (---------------) + I xn--80aswg + 0x0030a9cc, // n0x0574 c0x0000 (---------------) + I xn--8y0a063a + 0x65f0acca, // n0x0575 c0x0197 (n0x1eb7-n0x1ebd) + I xn--90a3ac + 0x0030bb89, // n0x0576 c0x0000 (---------------) + I xn--90ais + 0x0030d14a, // n0x0577 c0x0000 (---------------) + I xn--9dbq2a + 0x0030d3ca, // n0x0578 c0x0000 (---------------) + I xn--9et52u + 0x0030d64b, // n0x0579 c0x0000 (---------------) + I xn--9krt00a + 0x00310c4e, // n0x057a c0x0000 (---------------) + I xn--b4w605ferd + 0x00310fd1, // n0x057b c0x0000 (---------------) + I xn--bck1b9a5dre4c + 0x00318cc9, // n0x057c c0x0000 (---------------) + I xn--c1avg + 0x00318f0a, // n0x057d c0x0000 (---------------) + I xn--c2br7g + 0x00319a4b, // n0x057e c0x0000 (---------------) + I xn--cck2b3b + 0x0031b68a, // n0x057f c0x0000 (---------------) + I xn--cg4bki + 0x0031bfd6, // n0x0580 c0x0000 (---------------) + I xn--clchc0ea0b2g2a9gcd + 0x0031e50b, // n0x0581 c0x0000 (---------------) + I xn--czr694b + 0x0032398a, // n0x0582 c0x0000 (---------------) + I xn--czrs0t + 0x003241ca, // n0x0583 c0x0000 (---------------) + I xn--czru2d + 0x0032674b, // n0x0584 c0x0000 (---------------) + I xn--d1acj3b + 0x00328cc9, // n0x0585 c0x0000 (---------------) + I xn--d1alf + 0x0032b7cd, // n0x0586 c0x0000 (---------------) + I xn--eckvdtc9d + 0x0032c18b, // n0x0587 c0x0000 (---------------) + I xn--efvy88h + 0x0032d08b, // n0x0588 c0x0000 (---------------) + I xn--estv75g + 0x0032da4b, // n0x0589 c0x0000 (---------------) + I xn--fct429k + 0x0032dec9, // n0x058a c0x0000 (---------------) + I xn--fhbei + 0x0032e50e, // n0x058b c0x0000 (---------------) + I xn--fiq228c5hs + 0x0032ebca, // n0x058c c0x0000 (---------------) + I xn--fiq64b + 0x003328ca, // n0x058d c0x0000 (---------------) + I xn--fiqs8s + 0x00332c0a, // n0x058e c0x0000 (---------------) + I xn--fiqz9s + 0x003334cb, // n0x058f c0x0000 (---------------) + I xn--fjq720a + 0x00333d0b, // n0x0590 c0x0000 (---------------) + I xn--flw351e + 0x00333fcd, // n0x0591 c0x0000 (---------------) + I xn--fpcrj9c3d + 0x0033558d, // n0x0592 c0x0000 (---------------) + I xn--fzc2c9e2c + 0x00335ad0, // n0x0593 c0x0000 (---------------) + I xn--fzys8d69uvgm + 0x00335f8b, // n0x0594 c0x0000 (---------------) + I xn--g2xx48c + 0x00336d4c, // n0x0595 c0x0000 (---------------) + I xn--gckr3f0f + 0x00337a0b, // n0x0596 c0x0000 (---------------) + I xn--gecrj9c + 0x0033a58b, // n0x0597 c0x0000 (---------------) + I xn--gk3at1e + 0x0033ca0b, // n0x0598 c0x0000 (---------------) + I xn--h2brj9c + 0x0034034b, // n0x0599 c0x0000 (---------------) + I xn--hxt814e + 0x00340dcf, // n0x059a c0x0000 (---------------) + I xn--i1b6b1a6a2e + 0x0034118b, // n0x059b c0x0000 (---------------) + I xn--imr513n + 0x0034280a, // n0x059c c0x0000 (---------------) + I xn--io0a7i + 0x00343209, // n0x059d c0x0000 (---------------) + I xn--j1aef + 0x00343e49, // n0x059e c0x0000 (---------------) + I xn--j1amh + 0x0034478b, // n0x059f c0x0000 (---------------) + I xn--j6w193g + 0x00344a4e, // n0x05a0 c0x0000 (---------------) + I xn--jlq61u9w7b + 0x0034778b, // n0x05a1 c0x0000 (---------------) + I xn--jvr189m + 0x00348bcf, // n0x05a2 c0x0000 (---------------) + I xn--kcrx77d1x4a + 0x0034afcb, // n0x05a3 c0x0000 (---------------) + I xn--kprw13d + 0x0034b28b, // n0x05a4 c0x0000 (---------------) + I xn--kpry57d + 0x0034b54b, // n0x05a5 c0x0000 (---------------) + I xn--kpu716f + 0x0034b98a, // n0x05a6 c0x0000 (---------------) + I xn--kput3i + 0x00352909, // n0x05a7 c0x0000 (---------------) + I xn--l1acc + 0x0035800f, // n0x05a8 c0x0000 (---------------) + I xn--lgbbat1ad8j + 0x0035c74c, // n0x05a9 c0x0000 (---------------) + I xn--mgb2ddes + 0x0035cfcc, // n0x05aa c0x0000 (---------------) + I xn--mgb9awbf + 0x0035d40e, // n0x05ab c0x0000 (---------------) + I xn--mgba3a3ejt + 0x0035d94f, // n0x05ac c0x0000 (---------------) + I xn--mgba3a4f16a + 0x0035dd0e, // n0x05ad c0x0000 (---------------) + I xn--mgba3a4fra + 0x0035e6d0, // n0x05ae c0x0000 (---------------) + I xn--mgba7c0bbn0a + 0x0035eacf, // n0x05af c0x0000 (---------------) + I xn--mgbaakc7dvf + 0x0035f04e, // n0x05b0 c0x0000 (---------------) + I xn--mgbaam7a8h + 0x0035f50c, // n0x05b1 c0x0000 (---------------) + I xn--mgbab2bd + 0x0035f812, // n0x05b2 c0x0000 (---------------) + I xn--mgbai9a5eva00b + 0x00360b51, // n0x05b3 c0x0000 (---------------) + I xn--mgbai9azgqp6j + 0x0036110e, // n0x05b4 c0x0000 (---------------) + I xn--mgbayh7gpa + 0x0036154e, // n0x05b5 c0x0000 (---------------) + I xn--mgbb9fbpob + 0x00361a8e, // n0x05b6 c0x0000 (---------------) + I xn--mgbbh1a71e + 0x00361e0f, // n0x05b7 c0x0000 (---------------) + I xn--mgbc0a9azcg + 0x003621ce, // n0x05b8 c0x0000 (---------------) + I xn--mgbca7dzdo + 0x00362553, // n0x05b9 c0x0000 (---------------) + I xn--mgberp4a5d4a87g + 0x00362a11, // n0x05ba c0x0000 (---------------) + I xn--mgberp4a5d4ar + 0x00362e4e, // n0x05bb c0x0000 (---------------) + I xn--mgbi4ecexp + 0x003632cc, // n0x05bc c0x0000 (---------------) + I xn--mgbpl2fh + 0x00363713, // n0x05bd c0x0000 (---------------) + I xn--mgbqly7c0a67fbc + 0x00363e90, // n0x05be c0x0000 (---------------) + I xn--mgbqly7cvafr + 0x0036470c, // n0x05bf c0x0000 (---------------) + I xn--mgbt3dhd + 0x00364a0c, // n0x05c0 c0x0000 (---------------) + I xn--mgbtf8fl + 0x00364f4b, // n0x05c1 c0x0000 (---------------) + I xn--mgbtx2b + 0x0036540e, // n0x05c2 c0x0000 (---------------) + I xn--mgbx4cd0ab + 0x0036590b, // n0x05c3 c0x0000 (---------------) + I xn--mix082f + 0x0036688b, // n0x05c4 c0x0000 (---------------) + I xn--mix891f + 0x003678cc, // n0x05c5 c0x0000 (---------------) + I xn--mk1bu44c + 0x0036fa4a, // n0x05c6 c0x0000 (---------------) + I xn--mxtq1m + 0x0037058c, // n0x05c7 c0x0000 (---------------) + I xn--ngbc5azd + 0x0037088c, // n0x05c8 c0x0000 (---------------) + I xn--ngbe9e0a + 0x00370b89, // n0x05c9 c0x0000 (---------------) + I xn--ngbrx + 0x0037254b, // n0x05ca c0x0000 (---------------) + I xn--nnx388a + 0x00372808, // n0x05cb c0x0000 (---------------) + I xn--node + 0x00372cc9, // n0x05cc c0x0000 (---------------) + I xn--nqv7f + 0x00372ccf, // n0x05cd c0x0000 (---------------) + I xn--nqv7fs00ema + 0x0037464b, // n0x05ce c0x0000 (---------------) + I xn--nyqy26a + 0x0037534a, // n0x05cf c0x0000 (---------------) + I xn--o3cw4h + 0x0037708c, // n0x05d0 c0x0000 (---------------) + I xn--ogbpf8fl + 0x00379349, // n0x05d1 c0x0000 (---------------) + I xn--p1acf + 0x003795c8, // n0x05d2 c0x0000 (---------------) + I xn--p1ai + 0x003797cb, // n0x05d3 c0x0000 (---------------) + I xn--pbt977c + 0x0037a58b, // n0x05d4 c0x0000 (---------------) + I xn--pgbs0dh + 0x0037b44a, // n0x05d5 c0x0000 (---------------) + I xn--pssy2u + 0x0037b6cb, // n0x05d6 c0x0000 (---------------) + I xn--q9jyb4c + 0x0037cc4c, // n0x05d7 c0x0000 (---------------) + I xn--qcka1pmc + 0x0037d688, // n0x05d8 c0x0000 (---------------) + I xn--qxam + 0x0038118b, // n0x05d9 c0x0000 (---------------) + I xn--rhqv96g + 0x0038390b, // n0x05da c0x0000 (---------------) + I xn--rovu88b + 0x00386f0b, // n0x05db c0x0000 (---------------) + I xn--s9brj9c + 0x0038860b, // n0x05dc c0x0000 (---------------) + I xn--ses554g + 0x0039140b, // n0x05dd c0x0000 (---------------) + I xn--t60b56a + 0x003916c9, // n0x05de c0x0000 (---------------) + I xn--tckwe + 0x00391c0d, // n0x05df c0x0000 (---------------) + I xn--tiq49xqyj + 0x0039654a, // n0x05e0 c0x0000 (---------------) + I xn--unup4y + 0x00397497, // n0x05e1 c0x0000 (---------------) + I xn--vermgensberater-ctb + 0x003982d8, // n0x05e2 c0x0000 (---------------) + I xn--vermgensberatung-pwb + 0x0039a849, // n0x05e3 c0x0000 (---------------) + I xn--vhquv + 0x0039ba4b, // n0x05e4 c0x0000 (---------------) + I xn--vuq861b + 0x0039c814, // n0x05e5 c0x0000 (---------------) + I xn--w4r85el8fhu5dnra + 0x0039cd0b, // n0x05e6 c0x0000 (---------------) + I xn--w4rs40l + 0x0039d28a, // n0x05e7 c0x0000 (---------------) + I xn--wgbh1c + 0x0039d84a, // n0x05e8 c0x0000 (---------------) + I xn--wgbl6a + 0x0039dacb, // n0x05e9 c0x0000 (---------------) + I xn--xhq521b + 0x003a1450, // n0x05ea c0x0000 (---------------) + I xn--xkc2al3hye2a + 0x003a1851, // n0x05eb c0x0000 (---------------) + I xn--xkc2dl3a5ee0h + 0x003a210a, // n0x05ec c0x0000 (---------------) + I xn--y9a3aq + 0x003a2d4d, // n0x05ed c0x0000 (---------------) + I xn--yfro4i67o + 0x003a344d, // n0x05ee c0x0000 (---------------) + I xn--ygbi2ammx + 0x003a5dcb, // n0x05ef c0x0000 (---------------) + I xn--zfr164b + 0x003a6b46, // n0x05f0 c0x0000 (---------------) + I xperia + 0x00230983, // n0x05f1 c0x0000 (---------------) + I xxx + 0x00247683, // n0x05f2 c0x0000 (---------------) + I xyz + 0x00307e86, // n0x05f3 c0x0000 (---------------) + I yachts + 0x0028d645, // n0x05f4 c0x0000 (---------------) + I yahoo + 0x002c7947, // n0x05f5 c0x0000 (---------------) + I yamaxun + 0x00339946, // n0x05f6 c0x0000 (---------------) + I yandex + 0x01608242, // n0x05f7 c0x0005 (---------------)* o I ye + 0x003710c9, // n0x05f8 c0x0000 (---------------) + I yodobashi + 0x003559c4, // n0x05f9 c0x0000 (---------------) + I yoga + 0x002d09c8, // n0x05fa c0x0000 (---------------) + I yokohama + 0x0024b0c3, // n0x05fb c0x0000 (---------------) + I you + 0x002e4807, // n0x05fc c0x0000 (---------------) + I youtube + 0x00244002, // n0x05fd c0x0000 (---------------) + I yt + 0x002ac203, // n0x05fe c0x0000 (---------------) + I yun + 0x66205f82, // n0x05ff c0x0198 (n0x1ebd-n0x1ece) o I za + 0x002c3fc6, // n0x0600 c0x0000 (---------------) + I zappos + 0x002c4d04, // n0x0601 c0x0000 (---------------) + I zara + 0x002eb584, // n0x0602 c0x0000 (---------------) + I zero + 0x002432c3, // n0x0603 c0x0000 (---------------) + I zip + 0x002432c5, // n0x0604 c0x0000 (---------------) + I zippo + 0x01700d02, // n0x0605 c0x0005 (---------------)* o I zm + 0x002dce04, // n0x0606 c0x0000 (---------------) + I zone + 0x00273947, // n0x0607 c0x0000 (---------------) + I zuerich + 0x016afdc2, // n0x0608 c0x0005 (---------------)* o I zw + 0x00233503, // n0x0609 c0x0000 (---------------) + I com + 0x0023a783, // n0x060a c0x0000 (---------------) + I edu + 0x0026cc83, // n0x060b c0x0000 (---------------) + I gov + 0x00209003, // n0x060c c0x0000 (---------------) + I mil + 0x0021fe03, // n0x060d c0x0000 (---------------) + I net + 0x0022d1c3, // n0x060e c0x0000 (---------------) + I org + 0x00201483, // n0x060f c0x0000 (---------------) + I nom + 0x00201542, // n0x0610 c0x0000 (---------------) + I ac + 0x000ffa08, // n0x0611 c0x0000 (---------------) + blogspot + 0x00200742, // n0x0612 c0x0000 (---------------) + I co + 0x0026cc83, // n0x0613 c0x0000 (---------------) + I gov + 0x00209003, // n0x0614 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x0615 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0616 c0x0000 (---------------) + I org + 0x00217443, // n0x0617 c0x0000 (---------------) + I sch + 0x00316196, // n0x0618 c0x0000 (---------------) + I accident-investigation + 0x00317cd3, // n0x0619 c0x0000 (---------------) + I accident-prevention + 0x002f66c9, // n0x061a c0x0000 (---------------) + I aerobatic + 0x002389c8, // n0x061b c0x0000 (---------------) + I aeroclub + 0x002dd449, // n0x061c c0x0000 (---------------) + I aerodrome + 0x002fbe86, // n0x061d c0x0000 (---------------) + I agents + 0x0030c790, // n0x061e c0x0000 (---------------) + I air-surveillance + 0x00358d93, // n0x061f c0x0000 (---------------) + I air-traffic-control + 0x00204908, // n0x0620 c0x0000 (---------------) + I aircraft + 0x002d7e07, // n0x0621 c0x0000 (---------------) + I airline + 0x002793c7, // n0x0622 c0x0000 (---------------) + I airport + 0x0021868a, // n0x0623 c0x0000 (---------------) + I airtraffic + 0x002c8609, // n0x0624 c0x0000 (---------------) + I ambulance + 0x0037d809, // n0x0625 c0x0000 (---------------) + I amusement + 0x002d488b, // n0x0626 c0x0000 (---------------) + I association + 0x0031a686, // n0x0627 c0x0000 (---------------) + I author + 0x0022faca, // n0x0628 c0x0000 (---------------) + I ballooning + 0x00220c46, // n0x0629 c0x0000 (---------------) + I broker + 0x003555c3, // n0x062a c0x0000 (---------------) + I caa + 0x002e9b05, // n0x062b c0x0000 (---------------) + I cargo + 0x00351008, // n0x062c c0x0000 (---------------) + I catering + 0x003257cd, // n0x062d c0x0000 (---------------) + I certification + 0x0035894c, // n0x062e c0x0000 (---------------) + I championship + 0x00320347, // n0x062f c0x0000 (---------------) + I charter + 0x0035b70d, // n0x0630 c0x0000 (---------------) + I civilaviation + 0x00238ac4, // n0x0631 c0x0000 (---------------) + I club + 0x00236cca, // n0x0632 c0x0000 (---------------) + I conference + 0x0023784a, // n0x0633 c0x0000 (---------------) + I consultant + 0x00237d0a, // n0x0634 c0x0000 (---------------) + I consulting + 0x00308c87, // n0x0635 c0x0000 (---------------) + I control + 0x00242207, // n0x0636 c0x0000 (---------------) + I council + 0x00245184, // n0x0637 c0x0000 (---------------) + I crew + 0x0022dcc6, // n0x0638 c0x0000 (---------------) + I design + 0x00356f44, // n0x0639 c0x0000 (---------------) + I dgca + 0x002fe188, // n0x063a c0x0000 (---------------) + I educator + 0x00322189, // n0x063b c0x0000 (---------------) + I emergency + 0x00369146, // n0x063c c0x0000 (---------------) + I engine + 0x00369148, // n0x063d c0x0000 (---------------) + I engineer + 0x00247acd, // n0x063e c0x0000 (---------------) + I entertainment + 0x002c2709, // n0x063f c0x0000 (---------------) + I equipment + 0x00239488, // n0x0640 c0x0000 (---------------) + I exchange + 0x00247487, // n0x0641 c0x0000 (---------------) + I express + 0x0030eeca, // n0x0642 c0x0000 (---------------) + I federation + 0x00251286, // n0x0643 c0x0000 (---------------) + I flight + 0x0025cf87, // n0x0644 c0x0000 (---------------) + I freight + 0x00240d04, // n0x0645 c0x0000 (---------------) + I fuel + 0x0026e307, // n0x0646 c0x0000 (---------------) + I gliding + 0x0026cc8a, // n0x0647 c0x0000 (---------------) + I government + 0x0031240e, // n0x0648 c0x0000 (---------------) + I groundhandling + 0x0020ab45, // n0x0649 c0x0000 (---------------) + I group + 0x002ff10b, // n0x064a c0x0000 (---------------) + I hanggliding + 0x002e9e49, // n0x064b c0x0000 (---------------) + I homebuilt + 0x0023e7c9, // n0x064c c0x0000 (---------------) + I insurance + 0x0033be47, // n0x064d c0x0000 (---------------) + I journal + 0x0038e60a, // n0x064e c0x0000 (---------------) + I journalist + 0x00285c07, // n0x064f c0x0000 (---------------) + I leasing + 0x002e2d49, // n0x0650 c0x0000 (---------------) + I logistics + 0x00395fc8, // n0x0651 c0x0000 (---------------) + I magazine + 0x0027634b, // n0x0652 c0x0000 (---------------) + I maintenance + 0x003025c5, // n0x0653 c0x0000 (---------------) + I media + 0x0031210a, // n0x0654 c0x0000 (---------------) + I microlight + 0x002a3209, // n0x0655 c0x0000 (---------------) + I modelling + 0x00319f8a, // n0x0656 c0x0000 (---------------) + I navigation + 0x002c530b, // n0x0657 c0x0000 (---------------) + I parachuting + 0x0026e20b, // n0x0658 c0x0000 (---------------) + I paragliding + 0x002d4615, // n0x0659 c0x0000 (---------------) + I passenger-association + 0x002d6505, // n0x065a c0x0000 (---------------) + I pilot + 0x00247505, // n0x065b c0x0000 (---------------) + I press + 0x002e204a, // n0x065c c0x0000 (---------------) + I production + 0x00336aca, // n0x065d c0x0000 (---------------) + I recreation + 0x002fae47, // n0x065e c0x0000 (---------------) + I repbody + 0x0021d683, // n0x065f c0x0000 (---------------) + I res + 0x0029fa08, // n0x0660 c0x0000 (---------------) + I research + 0x002ce74a, // n0x0661 c0x0000 (---------------) + I rotorcraft + 0x00215946, // n0x0662 c0x0000 (---------------) + I safety + 0x002466c9, // n0x0663 c0x0000 (---------------) + I scientist + 0x00206808, // n0x0664 c0x0000 (---------------) + I services + 0x002c4484, // n0x0665 c0x0000 (---------------) + I show + 0x0027d709, // n0x0666 c0x0000 (---------------) + I skydiving + 0x002b8688, // n0x0667 c0x0000 (---------------) + I software + 0x002abd47, // n0x0668 c0x0000 (---------------) + I student + 0x002673c6, // n0x0669 c0x0000 (---------------) + I trader + 0x002a4607, // n0x066a c0x0000 (---------------) + I trading + 0x00295207, // n0x066b c0x0000 (---------------) + I trainer + 0x00244bc5, // n0x066c c0x0000 (---------------) + I union + 0x002dbd0c, // n0x066d c0x0000 (---------------) + I workinggroup + 0x00351f05, // n0x066e c0x0000 (---------------) + I works + 0x00233503, // n0x066f c0x0000 (---------------) + I com + 0x0023a783, // n0x0670 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0671 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x0672 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0673 c0x0000 (---------------) + I org + 0x00200742, // n0x0674 c0x0000 (---------------) + I co + 0x00233503, // n0x0675 c0x0000 (---------------) + I com + 0x0021fe03, // n0x0676 c0x0000 (---------------) + I net + 0x00201483, // n0x0677 c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x0678 c0x0000 (---------------) + I org + 0x00233503, // n0x0679 c0x0000 (---------------) + I com + 0x0021fe03, // n0x067a c0x0000 (---------------) + I net + 0x0020b283, // n0x067b c0x0000 (---------------) + I off + 0x0022d1c3, // n0x067c c0x0000 (---------------) + I org + 0x000ffa08, // n0x067d c0x0000 (---------------) + blogspot + 0x00233503, // n0x067e c0x0000 (---------------) + I com + 0x0023a783, // n0x067f c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0680 c0x0000 (---------------) + I gov + 0x00209003, // n0x0681 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x0682 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0683 c0x0000 (---------------) + I org + 0x000ffa08, // n0x0684 c0x0000 (---------------) + blogspot + 0x00200742, // n0x0685 c0x0000 (---------------) + I co + 0x00202602, // n0x0686 c0x0000 (---------------) + I ed + 0x00237f42, // n0x0687 c0x0000 (---------------) + I gv + 0x00201e42, // n0x0688 c0x0000 (---------------) + I it + 0x00200c42, // n0x0689 c0x0000 (---------------) + I og + 0x002718c2, // n0x068a c0x0000 (---------------) + I pb + 0x04633503, // n0x068b c0x0011 (n0x0694-n0x0695) + I com + 0x0023a783, // n0x068c c0x0000 (---------------) + I edu + 0x00213183, // n0x068d c0x0000 (---------------) + I gob + 0x0026cc83, // n0x068e c0x0000 (---------------) + I gov + 0x00201603, // n0x068f c0x0000 (---------------) + I int + 0x00209003, // n0x0690 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x0691 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0692 c0x0000 (---------------) + I org + 0x00209e43, // n0x0693 c0x0000 (---------------) + I tur + 0x000ffa08, // n0x0694 c0x0000 (---------------) + blogspot + 0x00255704, // n0x0695 c0x0000 (---------------) + I e164 + 0x002edc07, // n0x0696 c0x0000 (---------------) + I in-addr + 0x00215dc3, // n0x0697 c0x0000 (---------------) + I ip6 + 0x00238804, // n0x0698 c0x0000 (---------------) + I iris + 0x0020dd83, // n0x0699 c0x0000 (---------------) + I uri + 0x00285383, // n0x069a c0x0000 (---------------) + I urn + 0x0026cc83, // n0x069b c0x0000 (---------------) + I gov + 0x00201542, // n0x069c c0x0000 (---------------) + I ac + 0x00130b83, // n0x069d c0x0000 (---------------) + biz + 0x05600742, // n0x069e c0x0015 (n0x06a3-n0x06a4) + I co + 0x00237f42, // n0x069f c0x0000 (---------------) + I gv + 0x001a1244, // n0x06a0 c0x0000 (---------------) + info + 0x00200282, // n0x06a1 c0x0000 (---------------) + I or + 0x000e1c44, // n0x06a2 c0x0000 (---------------) + priv + 0x000ffa08, // n0x06a3 c0x0000 (---------------) + blogspot + 0x00239a03, // n0x06a4 c0x0000 (---------------) + I act + 0x002afc83, // n0x06a5 c0x0000 (---------------) + I asn + 0x05e33503, // n0x06a6 c0x0017 (n0x06b6-n0x06b7) + I com + 0x00236cc4, // n0x06a7 c0x0000 (---------------) + I conf + 0x0623a783, // n0x06a8 c0x0018 (n0x06b7-n0x06bf) + I edu + 0x0666cc83, // n0x06a9 c0x0019 (n0x06bf-n0x06c4) + I gov + 0x0020c782, // n0x06aa c0x0000 (---------------) + I id + 0x003a1244, // n0x06ab c0x0000 (---------------) + I info + 0x0021fe03, // n0x06ac c0x0000 (---------------) + I net + 0x002f09c3, // n0x06ad c0x0000 (---------------) + I nsw + 0x002009c2, // n0x06ae c0x0000 (---------------) + I nt + 0x0022d1c3, // n0x06af c0x0000 (---------------) + I org + 0x0021c142, // n0x06b0 c0x0000 (---------------) + I oz + 0x002e6383, // n0x06b1 c0x0000 (---------------) + I qld + 0x002004c2, // n0x06b2 c0x0000 (---------------) + I sa + 0x00201e83, // n0x06b3 c0x0000 (---------------) + I tas + 0x002068c3, // n0x06b4 c0x0000 (---------------) + I vic + 0x002010c2, // n0x06b5 c0x0000 (---------------) + I wa + 0x000ffa08, // n0x06b6 c0x0000 (---------------) + blogspot + 0x00239a03, // n0x06b7 c0x0000 (---------------) + I act + 0x002f09c3, // n0x06b8 c0x0000 (---------------) + I nsw + 0x002009c2, // n0x06b9 c0x0000 (---------------) + I nt + 0x002e6383, // n0x06ba c0x0000 (---------------) + I qld + 0x002004c2, // n0x06bb c0x0000 (---------------) + I sa + 0x00201e83, // n0x06bc c0x0000 (---------------) + I tas + 0x002068c3, // n0x06bd c0x0000 (---------------) + I vic + 0x002010c2, // n0x06be c0x0000 (---------------) + I wa + 0x002e6383, // n0x06bf c0x0000 (---------------) + I qld + 0x002004c2, // n0x06c0 c0x0000 (---------------) + I sa + 0x00201e83, // n0x06c1 c0x0000 (---------------) + I tas + 0x002068c3, // n0x06c2 c0x0000 (---------------) + I vic + 0x002010c2, // n0x06c3 c0x0000 (---------------) + I wa + 0x00233503, // n0x06c4 c0x0000 (---------------) + I com + 0x00330b83, // n0x06c5 c0x0000 (---------------) + I biz + 0x00233503, // n0x06c6 c0x0000 (---------------) + I com + 0x0023a783, // n0x06c7 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x06c8 c0x0000 (---------------) + I gov + 0x003a1244, // n0x06c9 c0x0000 (---------------) + I info + 0x00201603, // n0x06ca c0x0000 (---------------) + I int + 0x00209003, // n0x06cb c0x0000 (---------------) + I mil + 0x00205284, // n0x06cc c0x0000 (---------------) + I name + 0x0021fe03, // n0x06cd c0x0000 (---------------) + I net + 0x0022d1c3, // n0x06ce c0x0000 (---------------) + I org + 0x00209302, // n0x06cf c0x0000 (---------------) + I pp + 0x00220e43, // n0x06d0 c0x0000 (---------------) + I pro + 0x000ffa08, // n0x06d1 c0x0000 (---------------) + blogspot + 0x00200742, // n0x06d2 c0x0000 (---------------) + I co + 0x00233503, // n0x06d3 c0x0000 (---------------) + I com + 0x0023a783, // n0x06d4 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x06d5 c0x0000 (---------------) + I gov + 0x00209003, // n0x06d6 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x06d7 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x06d8 c0x0000 (---------------) + I org + 0x00209702, // n0x06d9 c0x0000 (---------------) + I rs + 0x002d8144, // n0x06da c0x0000 (---------------) + I unbi + 0x003a6844, // n0x06db c0x0000 (---------------) + I unsa + 0x00330b83, // n0x06dc c0x0000 (---------------) + I biz + 0x00200742, // n0x06dd c0x0000 (---------------) + I co + 0x00233503, // n0x06de c0x0000 (---------------) + I com + 0x0023a783, // n0x06df c0x0000 (---------------) + I edu + 0x0026cc83, // n0x06e0 c0x0000 (---------------) + I gov + 0x003a1244, // n0x06e1 c0x0000 (---------------) + I info + 0x0021fe03, // n0x06e2 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x06e3 c0x0000 (---------------) + I org + 0x00391185, // n0x06e4 c0x0000 (---------------) + I store + 0x00224e42, // n0x06e5 c0x0000 (---------------) + I tv + 0x00201542, // n0x06e6 c0x0000 (---------------) + I ac + 0x000ffa08, // n0x06e7 c0x0000 (---------------) + blogspot + 0x0026cc83, // n0x06e8 c0x0000 (---------------) + I gov + 0x0025c3c1, // n0x06e9 c0x0000 (---------------) + I 0 + 0x0022a0c1, // n0x06ea c0x0000 (---------------) + I 1 + 0x002484c1, // n0x06eb c0x0000 (---------------) + I 2 + 0x00231a01, // n0x06ec c0x0000 (---------------) + I 3 + 0x00231981, // n0x06ed c0x0000 (---------------) + I 4 + 0x002736c1, // n0x06ee c0x0000 (---------------) + I 5 + 0x00215e41, // n0x06ef c0x0000 (---------------) + I 6 + 0x0023e381, // n0x06f0 c0x0000 (---------------) + I 7 + 0x002f2a41, // n0x06f1 c0x0000 (---------------) + I 8 + 0x00301241, // n0x06f2 c0x0000 (---------------) + I 9 + 0x00200101, // n0x06f3 c0x0000 (---------------) + I a + 0x00200001, // n0x06f4 c0x0000 (---------------) + I b + 0x000ffa08, // n0x06f5 c0x0000 (---------------) + blogspot + 0x00200301, // n0x06f6 c0x0000 (---------------) + I c + 0x00200381, // n0x06f7 c0x0000 (---------------) + I d + 0x00200081, // n0x06f8 c0x0000 (---------------) + I e + 0x00200581, // n0x06f9 c0x0000 (---------------) + I f + 0x00200c81, // n0x06fa c0x0000 (---------------) + I g + 0x00200d81, // n0x06fb c0x0000 (---------------) + I h + 0x00200041, // n0x06fc c0x0000 (---------------) + I i + 0x00201741, // n0x06fd c0x0000 (---------------) + I j + 0x00200fc1, // n0x06fe c0x0000 (---------------) + I k + 0x00200201, // n0x06ff c0x0000 (---------------) + I l + 0x00200181, // n0x0700 c0x0000 (---------------) + I m + 0x00200541, // n0x0701 c0x0000 (---------------) + I n + 0x00200281, // n0x0702 c0x0000 (---------------) + I o + 0x00200941, // n0x0703 c0x0000 (---------------) + I p + 0x00200401, // n0x0704 c0x0000 (---------------) + I q + 0x002002c1, // n0x0705 c0x0000 (---------------) + I r + 0x002004c1, // n0x0706 c0x0000 (---------------) + I s + 0x00200141, // n0x0707 c0x0000 (---------------) + I t + 0x00200441, // n0x0708 c0x0000 (---------------) + I u + 0x002000c1, // n0x0709 c0x0000 (---------------) + I v + 0x002010c1, // n0x070a c0x0000 (---------------) + I w + 0x00205381, // n0x070b c0x0000 (---------------) + I x + 0x00201841, // n0x070c c0x0000 (---------------) + I y + 0x00205f81, // n0x070d c0x0000 (---------------) + I z + 0x00233503, // n0x070e c0x0000 (---------------) + I com + 0x0023a783, // n0x070f c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0710 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x0711 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0712 c0x0000 (---------------) + I org + 0x00200742, // n0x0713 c0x0000 (---------------) + I co + 0x00233503, // n0x0714 c0x0000 (---------------) + I com + 0x0023a783, // n0x0715 c0x0000 (---------------) + I edu + 0x00200282, // n0x0716 c0x0000 (---------------) + I or + 0x0022d1c3, // n0x0717 c0x0000 (---------------) + I org + 0x00009107, // n0x0718 c0x0000 (---------------) + dscloud + 0x00013886, // n0x0719 c0x0000 (---------------) + dyndns + 0x00055e8a, // n0x071a c0x0000 (---------------) + for-better + 0x00087d88, // n0x071b c0x0000 (---------------) + for-more + 0x00056488, // n0x071c c0x0000 (---------------) + for-some + 0x000572c7, // n0x071d c0x0000 (---------------) + for-the + 0x0006ba86, // n0x071e c0x0000 (---------------) + selfip + 0x000eadc6, // n0x071f c0x0000 (---------------) + webhop + 0x002d4884, // n0x0720 c0x0000 (---------------) + I asso + 0x00319cc7, // n0x0721 c0x0000 (---------------) + I barreau + 0x000ffa08, // n0x0722 c0x0000 (---------------) + blogspot + 0x0033d7c4, // n0x0723 c0x0000 (---------------) + I gouv + 0x00233503, // n0x0724 c0x0000 (---------------) + I com + 0x0023a783, // n0x0725 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0726 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x0727 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0728 c0x0000 (---------------) + I org + 0x00233503, // n0x0729 c0x0000 (---------------) + I com + 0x0023a783, // n0x072a c0x0000 (---------------) + I edu + 0x00213183, // n0x072b c0x0000 (---------------) + I gob + 0x0026cc83, // n0x072c c0x0000 (---------------) + I gov + 0x00201603, // n0x072d c0x0000 (---------------) + I int + 0x00209003, // n0x072e c0x0000 (---------------) + I mil + 0x0021fe03, // n0x072f c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0730 c0x0000 (---------------) + I org + 0x00224e42, // n0x0731 c0x0000 (---------------) + I tv + 0x002c3503, // n0x0732 c0x0000 (---------------) + I adm + 0x002f4fc3, // n0x0733 c0x0000 (---------------) + I adv + 0x00209c83, // n0x0734 c0x0000 (---------------) + I agr + 0x00201882, // n0x0735 c0x0000 (---------------) + I am + 0x0024f183, // n0x0736 c0x0000 (---------------) + I arq + 0x002011c3, // n0x0737 c0x0000 (---------------) + I art + 0x00217b03, // n0x0738 c0x0000 (---------------) + I ato + 0x00200001, // n0x0739 c0x0000 (---------------) + I b + 0x00203a03, // n0x073a c0x0000 (---------------) + I bio + 0x002a4004, // n0x073b c0x0000 (---------------) + I blog + 0x00321f43, // n0x073c c0x0000 (---------------) + I bmd + 0x0025ee03, // n0x073d c0x0000 (---------------) + I cim + 0x0021ba43, // n0x073e c0x0000 (---------------) + I cng + 0x00231603, // n0x073f c0x0000 (---------------) + I cnt + 0x0a233503, // n0x0740 c0x0028 (n0x0778-n0x0779) + I com + 0x0023d684, // n0x0741 c0x0000 (---------------) + I coop + 0x0021ba03, // n0x0742 c0x0000 (---------------) + I ecn + 0x0020b203, // n0x0743 c0x0000 (---------------) + I eco + 0x0023a783, // n0x0744 c0x0000 (---------------) + I edu + 0x0023a4c3, // n0x0745 c0x0000 (---------------) + I emp + 0x00213703, // n0x0746 c0x0000 (---------------) + I eng + 0x0029bdc3, // n0x0747 c0x0000 (---------------) + I esp + 0x0025ed83, // n0x0748 c0x0000 (---------------) + I etc + 0x002234c3, // n0x0749 c0x0000 (---------------) + I eti + 0x00212983, // n0x074a c0x0000 (---------------) + I far + 0x002522c4, // n0x074b c0x0000 (---------------) + I flog + 0x00242902, // n0x074c c0x0000 (---------------) + I fm + 0x00255803, // n0x074d c0x0000 (---------------) + I fnd + 0x0025bb03, // n0x074e c0x0000 (---------------) + I fot + 0x00277703, // n0x074f c0x0000 (---------------) + I fst + 0x002ee4c3, // n0x0750 c0x0000 (---------------) + I g12 + 0x002ece03, // n0x0751 c0x0000 (---------------) + I ggf + 0x0026cc83, // n0x0752 c0x0000 (---------------) + I gov + 0x002cc783, // n0x0753 c0x0000 (---------------) + I imb + 0x0021d883, // n0x0754 c0x0000 (---------------) + I ind + 0x003a1083, // n0x0755 c0x0000 (---------------) + I inf + 0x00215b43, // n0x0756 c0x0000 (---------------) + I jor + 0x002f3143, // n0x0757 c0x0000 (---------------) + I jus + 0x0022e283, // n0x0758 c0x0000 (---------------) + I leg + 0x002d08c3, // n0x0759 c0x0000 (---------------) + I lel + 0x0021f803, // n0x075a c0x0000 (---------------) + I mat + 0x00213ac3, // n0x075b c0x0000 (---------------) + I med + 0x00209003, // n0x075c c0x0000 (---------------) + I mil + 0x0022a482, // n0x075d c0x0000 (---------------) + I mp + 0x00283a83, // n0x075e c0x0000 (---------------) + I mus + 0x0021fe03, // n0x075f c0x0000 (---------------) + I net + 0x01601483, // n0x0760 c0x0005 (---------------)* o I nom + 0x002547c3, // n0x0761 c0x0000 (---------------) + I not + 0x0023b443, // n0x0762 c0x0000 (---------------) + I ntr + 0x00213243, // n0x0763 c0x0000 (---------------) + I odo + 0x0022d1c3, // n0x0764 c0x0000 (---------------) + I org + 0x00249583, // n0x0765 c0x0000 (---------------) + I ppg + 0x00220e43, // n0x0766 c0x0000 (---------------) + I pro + 0x0023d083, // n0x0767 c0x0000 (---------------) + I psc + 0x002f7b43, // n0x0768 c0x0000 (---------------) + I psi + 0x002e6543, // n0x0769 c0x0000 (---------------) + I qsl + 0x00264b85, // n0x076a c0x0000 (---------------) + I radio + 0x0022a5c3, // n0x076b c0x0000 (---------------) + I rec + 0x002e6583, // n0x076c c0x0000 (---------------) + I slg + 0x0035ca03, // n0x076d c0x0000 (---------------) + I srv + 0x002203c4, // n0x076e c0x0000 (---------------) + I taxi + 0x00336843, // n0x076f c0x0000 (---------------) + I teo + 0x00239f83, // n0x0770 c0x0000 (---------------) + I tmp + 0x002a9803, // n0x0771 c0x0000 (---------------) + I trd + 0x00209e43, // n0x0772 c0x0000 (---------------) + I tur + 0x00224e42, // n0x0773 c0x0000 (---------------) + I tv + 0x0023f943, // n0x0774 c0x0000 (---------------) + I vet + 0x002fa5c4, // n0x0775 c0x0000 (---------------) + I vlog + 0x0037c484, // n0x0776 c0x0000 (---------------) + I wiki + 0x002645c3, // n0x0777 c0x0000 (---------------) + I zlg + 0x000ffa08, // n0x0778 c0x0000 (---------------) + blogspot + 0x00233503, // n0x0779 c0x0000 (---------------) + I com + 0x0023a783, // n0x077a c0x0000 (---------------) + I edu + 0x0026cc83, // n0x077b c0x0000 (---------------) + I gov + 0x0021fe03, // n0x077c c0x0000 (---------------) + I net + 0x0022d1c3, // n0x077d c0x0000 (---------------) + I org + 0x00233503, // n0x077e c0x0000 (---------------) + I com + 0x0023a783, // n0x077f c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0780 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x0781 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0782 c0x0000 (---------------) + I org + 0x00200742, // n0x0783 c0x0000 (---------------) + I co + 0x0022d1c3, // n0x0784 c0x0000 (---------------) + I org + 0x0b633503, // n0x0785 c0x002d (n0x0789-n0x078a) + I com + 0x0026cc83, // n0x0786 c0x0000 (---------------) + I gov + 0x00209003, // n0x0787 c0x0000 (---------------) + I mil + 0x0020b282, // n0x0788 c0x0000 (---------------) + I of + 0x000ffa08, // n0x0789 c0x0000 (---------------) + blogspot + 0x00233503, // n0x078a c0x0000 (---------------) + I com + 0x0023a783, // n0x078b c0x0000 (---------------) + I edu + 0x0026cc83, // n0x078c c0x0000 (---------------) + I gov + 0x0021fe03, // n0x078d c0x0000 (---------------) + I net + 0x0022d1c3, // n0x078e c0x0000 (---------------) + I org + 0x00005f82, // n0x078f c0x0000 (---------------) + za + 0x002020c2, // n0x0790 c0x0000 (---------------) + I ab + 0x00221a82, // n0x0791 c0x0000 (---------------) + I bc + 0x000ffa08, // n0x0792 c0x0000 (---------------) + blogspot + 0x00000742, // n0x0793 c0x0000 (---------------) + co + 0x0023c3c2, // n0x0794 c0x0000 (---------------) + I gc + 0x00208602, // n0x0795 c0x0000 (---------------) + I mb + 0x00215102, // n0x0796 c0x0000 (---------------) + I nb + 0x00200542, // n0x0797 c0x0000 (---------------) + I nf + 0x00247802, // n0x0798 c0x0000 (---------------) + I nl + 0x00210c42, // n0x0799 c0x0000 (---------------) + I ns + 0x002009c2, // n0x079a c0x0000 (---------------) + I nt + 0x002017c2, // n0x079b c0x0000 (---------------) + I nu + 0x00200982, // n0x079c c0x0000 (---------------) + I on + 0x00207782, // n0x079d c0x0000 (---------------) + I pe + 0x0037cd42, // n0x079e c0x0000 (---------------) + I qc + 0x00207842, // n0x079f c0x0000 (---------------) + I sk + 0x00226f42, // n0x07a0 c0x0000 (---------------) + I yk + 0x00146449, // n0x07a1 c0x0000 (---------------) + ftpaccess + 0x00173f0b, // n0x07a2 c0x0000 (---------------) + game-server + 0x000d1148, // n0x07a3 c0x0000 (---------------) + myphotos + 0x00143a09, // n0x07a4 c0x0000 (---------------) + scrapping + 0x0026cc83, // n0x07a5 c0x0000 (---------------) + I gov + 0x000ffa08, // n0x07a6 c0x0000 (---------------) + blogspot + 0x000ffa08, // n0x07a7 c0x0000 (---------------) + blogspot + 0x00201542, // n0x07a8 c0x0000 (---------------) + I ac + 0x002d4884, // n0x07a9 c0x0000 (---------------) + I asso + 0x00200742, // n0x07aa c0x0000 (---------------) + I co + 0x00233503, // n0x07ab c0x0000 (---------------) + I com + 0x00202602, // n0x07ac c0x0000 (---------------) + I ed + 0x0023a783, // n0x07ad c0x0000 (---------------) + I edu + 0x00202d42, // n0x07ae c0x0000 (---------------) + I go + 0x0033d7c4, // n0x07af c0x0000 (---------------) + I gouv + 0x00201603, // n0x07b0 c0x0000 (---------------) + I int + 0x0024da82, // n0x07b1 c0x0000 (---------------) + I md + 0x0021fe03, // n0x07b2 c0x0000 (---------------) + I net + 0x00200282, // n0x07b3 c0x0000 (---------------) + I or + 0x0022d1c3, // n0x07b4 c0x0000 (---------------) + I org + 0x00247506, // n0x07b5 c0x0000 (---------------) + I presse + 0x0030dc0f, // n0x07b6 c0x0000 (---------------) + I xn--aroport-bya + 0x00700b03, // n0x07b7 c0x0001 (---------------) ! I www + 0x000ffa08, // n0x07b8 c0x0000 (---------------) + blogspot + 0x00200742, // n0x07b9 c0x0000 (---------------) + I co + 0x00213183, // n0x07ba c0x0000 (---------------) + I gob + 0x0026cc83, // n0x07bb c0x0000 (---------------) + I gov + 0x00209003, // n0x07bc c0x0000 (---------------) + I mil + 0x00200742, // n0x07bd c0x0000 (---------------) + I co + 0x00233503, // n0x07be c0x0000 (---------------) + I com + 0x0026cc83, // n0x07bf c0x0000 (---------------) + I gov + 0x0021fe03, // n0x07c0 c0x0000 (---------------) + I net + 0x00201542, // n0x07c1 c0x0000 (---------------) + I ac + 0x00204f02, // n0x07c2 c0x0000 (---------------) + I ah + 0x0e6f6409, // n0x07c3 c0x0039 (n0x07ee-n0x07ef) o I amazonaws + 0x00206502, // n0x07c4 c0x0000 (---------------) + I bj + 0x0ee33503, // n0x07c5 c0x003b (n0x07f0-n0x07f1) + I com + 0x00243b42, // n0x07c6 c0x0000 (---------------) + I cq + 0x0023a783, // n0x07c7 c0x0000 (---------------) + I edu + 0x00215b02, // n0x07c8 c0x0000 (---------------) + I fj + 0x00222d42, // n0x07c9 c0x0000 (---------------) + I gd + 0x0026cc83, // n0x07ca c0x0000 (---------------) + I gov + 0x0023a242, // n0x07cb c0x0000 (---------------) + I gs + 0x00260202, // n0x07cc c0x0000 (---------------) + I gx + 0x00264642, // n0x07cd c0x0000 (---------------) + I gz + 0x00202442, // n0x07ce c0x0000 (---------------) + I ha + 0x0028c342, // n0x07cf c0x0000 (---------------) + I hb + 0x002073c2, // n0x07d0 c0x0000 (---------------) + I he + 0x00200d82, // n0x07d1 c0x0000 (---------------) + I hi + 0x0020a882, // n0x07d2 c0x0000 (---------------) + I hk + 0x00248fc2, // n0x07d3 c0x0000 (---------------) + I hl + 0x0021ab42, // n0x07d4 c0x0000 (---------------) + I hn + 0x002ac642, // n0x07d5 c0x0000 (---------------) + I jl + 0x00251742, // n0x07d6 c0x0000 (---------------) + I js + 0x00313402, // n0x07d7 c0x0000 (---------------) + I jx + 0x0022e8c2, // n0x07d8 c0x0000 (---------------) + I ln + 0x00209003, // n0x07d9 c0x0000 (---------------) + I mil + 0x00207102, // n0x07da c0x0000 (---------------) + I mo + 0x0021fe03, // n0x07db c0x0000 (---------------) + I net + 0x0023db02, // n0x07dc c0x0000 (---------------) + I nm + 0x0026a782, // n0x07dd c0x0000 (---------------) + I nx + 0x0022d1c3, // n0x07de c0x0000 (---------------) + I org + 0x0024f202, // n0x07df c0x0000 (---------------) + I qh + 0x00200702, // n0x07e0 c0x0000 (---------------) + I sc + 0x002496c2, // n0x07e1 c0x0000 (---------------) + I sd + 0x00201342, // n0x07e2 c0x0000 (---------------) + I sh + 0x00214182, // n0x07e3 c0x0000 (---------------) + I sn + 0x002f2802, // n0x07e4 c0x0000 (---------------) + I sx + 0x00226782, // n0x07e5 c0x0000 (---------------) + I tj + 0x0024e502, // n0x07e6 c0x0000 (---------------) + I tw + 0x0036f4c2, // n0x07e7 c0x0000 (---------------) + I xj + 0x003029ca, // n0x07e8 c0x0000 (---------------) + I xn--55qx5d + 0x0034280a, // n0x07e9 c0x0000 (---------------) + I xn--io0a7i + 0x0037648a, // n0x07ea c0x0000 (---------------) + I xn--od0alg + 0x003a6cc2, // n0x07eb c0x0000 (---------------) + I xz + 0x00213642, // n0x07ec c0x0000 (---------------) + I yn + 0x00247702, // n0x07ed c0x0000 (---------------) + I zj + 0x0e835247, // n0x07ee c0x003a (n0x07ef-n0x07f0) + compute + 0x00039b8a, // n0x07ef c0x0000 (---------------) + cn-north-1 + 0x0f2f6409, // n0x07f0 c0x003c (n0x07f1-n0x07f2) o I amazonaws + 0x0f639b8a, // n0x07f1 c0x003d (n0x07f2-n0x07f3) o I cn-north-1 + 0x0004a542, // n0x07f2 c0x0000 (---------------) + s3 + 0x0024bf84, // n0x07f3 c0x0000 (---------------) + I arts + 0x0fe33503, // n0x07f4 c0x003f (n0x0800-n0x0801) + I com + 0x0023a783, // n0x07f5 c0x0000 (---------------) + I edu + 0x0024d9c4, // n0x07f6 c0x0000 (---------------) + I firm + 0x0026cc83, // n0x07f7 c0x0000 (---------------) + I gov + 0x003a1244, // n0x07f8 c0x0000 (---------------) + I info + 0x00201603, // n0x07f9 c0x0000 (---------------) + I int + 0x00209003, // n0x07fa c0x0000 (---------------) + I mil + 0x0021fe03, // n0x07fb c0x0000 (---------------) + I net + 0x00201483, // n0x07fc c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x07fd c0x0000 (---------------) + I org + 0x0022a5c3, // n0x07fe c0x0000 (---------------) + I rec + 0x00221a03, // n0x07ff c0x0000 (---------------) + I web + 0x000ffa08, // n0x0800 c0x0000 (---------------) + blogspot + 0x00131905, // n0x0801 c0x0000 (---------------) + 1kapp + 0x0010a942, // n0x0802 c0x0000 (---------------) + 4u + 0x00175846, // n0x0803 c0x0000 (---------------) + africa + 0x106f6409, // n0x0804 c0x0041 (n0x08d7-n0x08eb) o I amazonaws + 0x000092c7, // n0x0805 c0x0000 (---------------) + appspot + 0x00000a42, // n0x0806 c0x0000 (---------------) + ar + 0x0019bcca, // n0x0807 c0x0000 (---------------) + betainabox + 0x000fb147, // n0x0808 c0x0000 (---------------) + blogdns + 0x000ffa08, // n0x0809 c0x0000 (---------------) + blogspot + 0x0001c402, // n0x080a c0x0000 (---------------) + br + 0x001387c7, // n0x080b c0x0000 (---------------) + cechire + 0x00194fcf, // n0x080c c0x0000 (---------------) + cloudcontrolapp + 0x00108b4f, // n0x080d c0x0000 (---------------) + cloudcontrolled + 0x0001ba42, // n0x080e c0x0000 (---------------) + cn + 0x00000742, // n0x080f c0x0000 (---------------) + co + 0x0009bd08, // n0x0810 c0x0000 (---------------) + codespot + 0x00004d82, // n0x0811 c0x0000 (---------------) + de + 0x0014c048, // n0x0812 c0x0000 (---------------) + dnsalias + 0x0007c9c7, // n0x0813 c0x0000 (---------------) + dnsdojo + 0x00014e0b, // n0x0814 c0x0000 (---------------) + doesntexist + 0x0016a009, // n0x0815 c0x0000 (---------------) + dontexist + 0x0014bf47, // n0x0816 c0x0000 (---------------) + doomdns + 0x000f410c, // n0x0817 c0x0000 (---------------) + dreamhosters + 0x0008cdc7, // n0x0818 c0x0000 (---------------) + dsmynas + 0x0012328a, // n0x0819 c0x0000 (---------------) + dyn-o-saur + 0x00197b48, // n0x081a c0x0000 (---------------) + dynalias + 0x00073dce, // n0x081b c0x0000 (---------------) + dyndns-at-home + 0x000dba8e, // n0x081c c0x0000 (---------------) + dyndns-at-work + 0x000faf8b, // n0x081d c0x0000 (---------------) + dyndns-blog + 0x000e7b4b, // n0x081e c0x0000 (---------------) + dyndns-free + 0x0001388b, // n0x081f c0x0000 (---------------) + dyndns-home + 0x00015c09, // n0x0820 c0x0000 (---------------) + dyndns-ip + 0x0001b40b, // n0x0821 c0x0000 (---------------) + dyndns-mail + 0x000214cd, // n0x0822 c0x0000 (---------------) + dyndns-office + 0x0002554b, // n0x0823 c0x0000 (---------------) + dyndns-pics + 0x0002698d, // n0x0824 c0x0000 (---------------) + dyndns-remote + 0x0002d40d, // n0x0825 c0x0000 (---------------) + dyndns-server + 0x0002e48a, // n0x0826 c0x0000 (---------------) + dyndns-web + 0x0017c2cb, // n0x0827 c0x0000 (---------------) + dyndns-wiki + 0x00151d4b, // n0x0828 c0x0000 (---------------) + dyndns-work + 0x0001e810, // n0x0829 c0x0000 (---------------) + elasticbeanstalk + 0x0002bb8f, // n0x082a c0x0000 (---------------) + est-a-la-maison + 0x0000a14f, // n0x082b c0x0000 (---------------) + est-a-la-masion + 0x0015234d, // n0x082c c0x0000 (---------------) + est-le-patron + 0x0013c450, // n0x082d c0x0000 (---------------) + est-mon-blogueur + 0x00004b82, // n0x082e c0x0000 (---------------) + eu + 0x00008f88, // n0x082f c0x0000 (---------------) + familyds + 0x11a4e285, // n0x0830 c0x0046 (n0x08f9-n0x08fa) o I fbsbx + 0x0004c6cb, // n0x0831 c0x0000 (---------------) + firebaseapp + 0x000549c8, // n0x0832 c0x0000 (---------------) + flynnhub + 0x00063d87, // n0x0833 c0x0000 (---------------) + from-ak + 0x000640c7, // n0x0834 c0x0000 (---------------) + from-al + 0x00064287, // n0x0835 c0x0000 (---------------) + from-ar + 0x00065347, // n0x0836 c0x0000 (---------------) + from-ca + 0x00065e07, // n0x0837 c0x0000 (---------------) + from-ct + 0x00066587, // n0x0838 c0x0000 (---------------) + from-dc + 0x00067007, // n0x0839 c0x0000 (---------------) + from-de + 0x00067547, // n0x083a c0x0000 (---------------) + from-fl + 0x00067b87, // n0x083b c0x0000 (---------------) + from-ga + 0x00067f07, // n0x083c c0x0000 (---------------) + from-hi + 0x00068787, // n0x083d c0x0000 (---------------) + from-ia + 0x00068947, // n0x083e c0x0000 (---------------) + from-id + 0x00068b07, // n0x083f c0x0000 (---------------) + from-il + 0x00068cc7, // n0x0840 c0x0000 (---------------) + from-in + 0x00068fc7, // n0x0841 c0x0000 (---------------) + from-ks + 0x00069ac7, // n0x0842 c0x0000 (---------------) + from-ky + 0x0006a5c7, // n0x0843 c0x0000 (---------------) + from-ma + 0x0006aa87, // n0x0844 c0x0000 (---------------) + from-md + 0x0006b007, // n0x0845 c0x0000 (---------------) + from-mi + 0x0006bd87, // n0x0846 c0x0000 (---------------) + from-mn + 0x0006bf47, // n0x0847 c0x0000 (---------------) + from-mo + 0x0006c247, // n0x0848 c0x0000 (---------------) + from-ms + 0x0006c787, // n0x0849 c0x0000 (---------------) + from-mt + 0x0006c987, // n0x084a c0x0000 (---------------) + from-nc + 0x0006d307, // n0x084b c0x0000 (---------------) + from-nd + 0x0006d4c7, // n0x084c c0x0000 (---------------) + from-ne + 0x0006d8c7, // n0x084d c0x0000 (---------------) + from-nh + 0x0006e007, // n0x084e c0x0000 (---------------) + from-nj + 0x0006e4c7, // n0x084f c0x0000 (---------------) + from-nm + 0x0006ef87, // n0x0850 c0x0000 (---------------) + from-nv + 0x0006f587, // n0x0851 c0x0000 (---------------) + from-oh + 0x0006f847, // n0x0852 c0x0000 (---------------) + from-ok + 0x0006fbc7, // n0x0853 c0x0000 (---------------) + from-or + 0x0006fd87, // n0x0854 c0x0000 (---------------) + from-pa + 0x00070107, // n0x0855 c0x0000 (---------------) + from-pr + 0x000708c7, // n0x0856 c0x0000 (---------------) + from-ri + 0x00070f07, // n0x0857 c0x0000 (---------------) + from-sc + 0x00071307, // n0x0858 c0x0000 (---------------) + from-sd + 0x00073147, // n0x0859 c0x0000 (---------------) + from-tn + 0x00073307, // n0x085a c0x0000 (---------------) + from-tx + 0x00073747, // n0x085b c0x0000 (---------------) + from-ut + 0x00074607, // n0x085c c0x0000 (---------------) + from-va + 0x00074c47, // n0x085d c0x0000 (---------------) + from-vt + 0x00074f47, // n0x085e c0x0000 (---------------) + from-wa + 0x00075107, // n0x085f c0x0000 (---------------) + from-wi + 0x00075487, // n0x0860 c0x0000 (---------------) + from-wv + 0x00076607, // n0x0861 c0x0000 (---------------) + from-wy + 0x0000d202, // n0x0862 c0x0000 (---------------) + gb + 0x000d4487, // n0x0863 c0x0000 (---------------) + getmyip + 0x11cca40b, // n0x0864 c0x0047 (n0x08fa-n0x08fd) + githubcloud + 0x014ca416, // n0x0865 c0x0005 (---------------)* o githubcloudusercontent + 0x00019511, // n0x0866 c0x0000 (---------------) + githubusercontent + 0x000df54a, // n0x0867 c0x0000 (---------------) + googleapis + 0x0009bb8a, // n0x0868 c0x0000 (---------------) + googlecode + 0x00057d06, // n0x0869 c0x0000 (---------------) + gotdns + 0x00011ecb, // n0x086a c0x0000 (---------------) + gotpantheon + 0x00000c82, // n0x086b c0x0000 (---------------) + gr + 0x000992c9, // n0x086c c0x0000 (---------------) + herokuapp + 0x00091f49, // n0x086d c0x0000 (---------------) + herokussl + 0x0000a882, // n0x086e c0x0000 (---------------) + hk + 0x0014cfca, // n0x086f c0x0000 (---------------) + hobby-site + 0x000a59c9, // n0x0870 c0x0000 (---------------) + homelinux + 0x000a6fc8, // n0x0871 c0x0000 (---------------) + homeunix + 0x000195c2, // n0x0872 c0x0000 (---------------) + hu + 0x00116f89, // n0x0873 c0x0000 (---------------) + iamallama + 0x0016d68e, // n0x0874 c0x0000 (---------------) + is-a-anarchist + 0x000a3ecc, // n0x0875 c0x0000 (---------------) + is-a-blogger + 0x000d254f, // n0x0876 c0x0000 (---------------) + is-a-bookkeeper + 0x0018ba8e, // n0x0877 c0x0000 (---------------) + is-a-bulls-fan + 0x0000de0c, // n0x0878 c0x0000 (---------------) + is-a-caterer + 0x00012789, // n0x0879 c0x0000 (---------------) + is-a-chef + 0x00013d11, // n0x087a c0x0000 (---------------) + is-a-conservative + 0x00016c08, // n0x087b c0x0000 (---------------) + is-a-cpa + 0x00024852, // n0x087c c0x0000 (---------------) + is-a-cubicle-slave + 0x0002648d, // n0x087d c0x0000 (---------------) + is-a-democrat + 0x0002db8d, // n0x087e c0x0000 (---------------) + is-a-designer + 0x0017614b, // n0x087f c0x0000 (---------------) + is-a-doctor + 0x00178815, // n0x0880 c0x0000 (---------------) + is-a-financialadvisor + 0x0004e989, // n0x0881 c0x0000 (---------------) + is-a-geek + 0x0005028a, // n0x0882 c0x0000 (---------------) + is-a-green + 0x00059389, // n0x0883 c0x0000 (---------------) + is-a-guru + 0x0005bd50, // n0x0884 c0x0000 (---------------) + is-a-hard-worker + 0x000662cb, // n0x0885 c0x0000 (---------------) + is-a-hunter + 0x00070a4f, // n0x0886 c0x0000 (---------------) + is-a-landscaper + 0x0007434b, // n0x0887 c0x0000 (---------------) + is-a-lawyer + 0x0007b5cc, // n0x0888 c0x0000 (---------------) + is-a-liberal + 0x0007dbd0, // n0x0889 c0x0000 (---------------) + is-a-libertarian + 0x00082fca, // n0x088a c0x0000 (---------------) + is-a-llama + 0x0008394d, // n0x088b c0x0000 (---------------) + is-a-musician + 0x0008894e, // n0x088c c0x0000 (---------------) + is-a-nascarfan + 0x0014414a, // n0x088d c0x0000 (---------------) + is-a-nurse + 0x00089f0c, // n0x088e c0x0000 (---------------) + is-a-painter + 0x00094ed4, // n0x088f c0x0000 (---------------) + is-a-personaltrainer + 0x00098f51, // n0x0890 c0x0000 (---------------) + is-a-photographer + 0x0009e80b, // n0x0891 c0x0000 (---------------) + is-a-player + 0x0009f58f, // n0x0892 c0x0000 (---------------) + is-a-republican + 0x000a0f8d, // n0x0893 c0x0000 (---------------) + is-a-rockstar + 0x000a384e, // n0x0894 c0x0000 (---------------) + is-a-socialist + 0x000abc0c, // n0x0895 c0x0000 (---------------) + is-a-student + 0x000d5b8c, // n0x0896 c0x0000 (---------------) + is-a-teacher + 0x000d588b, // n0x0897 c0x0000 (---------------) + is-a-techie + 0x000beb8e, // n0x0898 c0x0000 (---------------) + is-a-therapist + 0x000d9990, // n0x0899 c0x0000 (---------------) + is-an-accountant + 0x000ad54b, // n0x089a c0x0000 (---------------) + is-an-actor + 0x000d540d, // n0x089b c0x0000 (---------------) + is-an-actress + 0x000fb60f, // n0x089c c0x0000 (---------------) + is-an-anarchist + 0x0010390c, // n0x089d c0x0000 (---------------) + is-an-artist + 0x00168fce, // n0x089e c0x0000 (---------------) + is-an-engineer + 0x000b2bd1, // n0x089f c0x0000 (---------------) + is-an-entertainer + 0x000b924c, // n0x08a0 c0x0000 (---------------) + is-certified + 0x000bb247, // n0x08a1 c0x0000 (---------------) + is-gone + 0x000be5cd, // n0x08a2 c0x0000 (---------------) + is-into-anime + 0x00105d8c, // n0x08a3 c0x0000 (---------------) + is-into-cars + 0x00147b50, // n0x08a4 c0x0000 (---------------) + is-into-cartoons + 0x0016fecd, // n0x08a5 c0x0000 (---------------) + is-into-games + 0x000cf347, // n0x08a6 c0x0000 (---------------) + is-leet + 0x0017e2d0, // n0x08a7 c0x0000 (---------------) + is-not-certified + 0x000e9488, // n0x08a8 c0x0000 (---------------) + is-slick + 0x000ef04b, // n0x08a9 c0x0000 (---------------) + is-uberleet + 0x0014bbcf, // n0x08aa c0x0000 (---------------) + is-with-theband + 0x0008e588, // n0x08ab c0x0000 (---------------) + isa-geek + 0x000df74d, // n0x08ac c0x0000 (---------------) + isa-hockeynut + 0x00168110, // n0x08ad c0x0000 (---------------) + issmarterthanyou + 0x000aedc3, // n0x08ae c0x0000 (---------------) + jpn + 0x00006fc2, // n0x08af c0x0000 (---------------) + kr + 0x00058c49, // n0x08b0 c0x0000 (---------------) + likes-pie + 0x00073bca, // n0x08b1 c0x0000 (---------------) + likescandy + 0x00005303, // n0x08b2 c0x0000 (---------------) + mex + 0x0010b007, // n0x08b3 c0x0000 (---------------) + mydrobo + 0x001176c8, // n0x08b4 c0x0000 (---------------) + neat-url + 0x00184847, // n0x08b5 c0x0000 (---------------) + nfshost + 0x00000c02, // n0x08b6 c0x0000 (---------------) + no + 0x00064d0a, // n0x08b7 c0x0000 (---------------) + operaunite + 0x00194d4f, // n0x08b8 c0x0000 (---------------) + outsystemscloud + 0x000eaf0c, // n0x08b9 c0x0000 (---------------) + pagefrontapp + 0x000eb1d2, // n0x08ba c0x0000 (---------------) + pagespeedmobilizer + 0x122e1605, // n0x08bb c0x0048 (n0x08fd-n0x08fe) o I prgmr + 0x00114683, // n0x08bc c0x0000 (---------------) + qa2 + 0x0017cd42, // n0x08bd c0x0000 (---------------) + qc + 0x000ecb08, // n0x08be c0x0000 (---------------) + rackmaze + 0x00108ac7, // n0x08bf c0x0000 (---------------) + rhcloud + 0x00002202, // n0x08c0 c0x0000 (---------------) + ro + 0x00011302, // n0x08c1 c0x0000 (---------------) + ru + 0x000004c2, // n0x08c2 c0x0000 (---------------) + sa + 0x00033810, // n0x08c3 c0x0000 (---------------) + saves-the-whales + 0x000046c2, // n0x08c4 c0x0000 (---------------) + se + 0x0006ba86, // n0x08c5 c0x0000 (---------------) + selfip + 0x0013738e, // n0x08c6 c0x0000 (---------------) + sells-for-less + 0x0008becb, // n0x08c7 c0x0000 (---------------) + sells-for-u + 0x000cb7c8, // n0x08c8 c0x0000 (---------------) + servebbs + 0x000d0eca, // n0x08c9 c0x0000 (---------------) + simple-url + 0x000f7b87, // n0x08ca c0x0000 (---------------) + sinaapp + 0x0000bb4d, // n0x08cb c0x0000 (---------------) + space-to-rent + 0x001557cc, // n0x08cc c0x0000 (---------------) + teaches-yoga + 0x00000f82, // n0x08cd c0x0000 (---------------) + uk + 0x00002382, // n0x08ce c0x0000 (---------------) + us + 0x00001802, // n0x08cf c0x0000 (---------------) + uy + 0x000f7aca, // n0x08d0 c0x0000 (---------------) + vipsinaapp + 0x000df44a, // n0x08d1 c0x0000 (---------------) + withgoogle + 0x000e470b, // n0x08d2 c0x0000 (---------------) + withyoutube + 0x000ff78e, // n0x08d3 c0x0000 (---------------) + writesthisblog + 0x0001bc0d, // n0x08d4 c0x0000 (---------------) + xenapponazure + 0x000d6f08, // n0x08d5 c0x0000 (---------------) + yolasite + 0x00005f82, // n0x08d6 c0x0000 (---------------) + za + 0x10a5f44e, // n0x08d7 c0x0042 (n0x08eb-n0x08ec) o I ap-northeast-2 + 0x10c35247, // n0x08d8 c0x0043 (n0x08ec-n0x08f6) + compute + 0x11035249, // n0x08d9 c0x0044 (n0x08f6-n0x08f8) + compute-1 + 0x00010743, // n0x08da c0x0000 (---------------) + elb + 0x11672e4c, // n0x08db c0x0045 (n0x08f8-n0x08f9) o I eu-central-1 + 0x0004a542, // n0x08dc c0x0000 (---------------) + s3 + 0x00132491, // n0x08dd c0x0000 (---------------) + s3-ap-northeast-1 + 0x0005f391, // n0x08de c0x0000 (---------------) + s3-ap-northeast-2 + 0x00131511, // n0x08df c0x0000 (---------------) + s3-ap-southeast-1 + 0x0004a551, // n0x08e0 c0x0000 (---------------) + s3-ap-southeast-2 + 0x00072d8f, // n0x08e1 c0x0000 (---------------) + s3-eu-central-1 + 0x001101cc, // n0x08e2 c0x0000 (---------------) + s3-eu-west-1 + 0x0011b34d, // n0x08e3 c0x0000 (---------------) + s3-external-1 + 0x0012364d, // n0x08e4 c0x0000 (---------------) + s3-external-2 + 0x00126215, // n0x08e5 c0x0000 (---------------) + s3-fips-us-gov-west-1 + 0x0014694c, // n0x08e6 c0x0000 (---------------) + s3-sa-east-1 + 0x000de150, // n0x08e7 c0x0000 (---------------) + s3-us-gov-west-1 + 0x000c158c, // n0x08e8 c0x0000 (---------------) + s3-us-west-1 + 0x000dc7cc, // n0x08e9 c0x0000 (---------------) + s3-us-west-2 + 0x0017ca09, // n0x08ea c0x0000 (---------------) + us-east-1 + 0x0004a542, // n0x08eb c0x0000 (---------------) + s3 + 0x0013254e, // n0x08ec c0x0000 (---------------) + ap-northeast-1 + 0x0005f44e, // n0x08ed c0x0000 (---------------) + ap-northeast-2 + 0x001315ce, // n0x08ee c0x0000 (---------------) + ap-southeast-1 + 0x0004a60e, // n0x08ef c0x0000 (---------------) + ap-southeast-2 + 0x00072e4c, // n0x08f0 c0x0000 (---------------) + eu-central-1 + 0x00110289, // n0x08f1 c0x0000 (---------------) + eu-west-1 + 0x00146a09, // n0x08f2 c0x0000 (---------------) + sa-east-1 + 0x000de20d, // n0x08f3 c0x0000 (---------------) + us-gov-west-1 + 0x000c1649, // n0x08f4 c0x0000 (---------------) + us-west-1 + 0x000dc889, // n0x08f5 c0x0000 (---------------) + us-west-2 + 0x0002a043, // n0x08f6 c0x0000 (---------------) + z-1 + 0x0013e5c3, // n0x08f7 c0x0000 (---------------) + z-2 + 0x0004a542, // n0x08f8 c0x0000 (---------------) + s3 + 0x000092c4, // n0x08f9 c0x0000 (---------------) + apps + 0x014bedc3, // n0x08fa c0x0005 (---------------)* o api + 0x0140ba03, // n0x08fb c0x0005 (---------------)* o ext + 0x000e2dc4, // n0x08fc c0x0000 (---------------) + gist + 0x0001bc03, // n0x08fd c0x0000 (---------------) + xen + 0x00201542, // n0x08fe c0x0000 (---------------) + I ac + 0x00200742, // n0x08ff c0x0000 (---------------) + I co + 0x00202602, // n0x0900 c0x0000 (---------------) + I ed + 0x00207502, // n0x0901 c0x0000 (---------------) + I fi + 0x00202d42, // n0x0902 c0x0000 (---------------) + I go + 0x00200282, // n0x0903 c0x0000 (---------------) + I or + 0x002004c2, // n0x0904 c0x0000 (---------------) + I sa + 0x00233503, // n0x0905 c0x0000 (---------------) + I com + 0x0023a783, // n0x0906 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0907 c0x0000 (---------------) + I gov + 0x003a1083, // n0x0908 c0x0000 (---------------) + I inf + 0x0021fe03, // n0x0909 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x090a c0x0000 (---------------) + I org + 0x000ffa08, // n0x090b c0x0000 (---------------) + blogspot + 0x00233503, // n0x090c c0x0000 (---------------) + I com + 0x0023a783, // n0x090d c0x0000 (---------------) + I edu + 0x0021fe03, // n0x090e c0x0000 (---------------) + I net + 0x0022d1c3, // n0x090f c0x0000 (---------------) + I org + 0x00048f43, // n0x0910 c0x0000 (---------------) + ath + 0x0026cc83, // n0x0911 c0x0000 (---------------) + I gov + 0x00201542, // n0x0912 c0x0000 (---------------) + I ac + 0x00330b83, // n0x0913 c0x0000 (---------------) + I biz + 0x13e33503, // n0x0914 c0x004f (n0x091f-n0x0920) + I com + 0x0027a1c7, // n0x0915 c0x0000 (---------------) + I ekloges + 0x0026cc83, // n0x0916 c0x0000 (---------------) + I gov + 0x00322cc3, // n0x0917 c0x0000 (---------------) + I ltd + 0x00205284, // n0x0918 c0x0000 (---------------) + I name + 0x0021fe03, // n0x0919 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x091a c0x0000 (---------------) + I org + 0x0028440a, // n0x091b c0x0000 (---------------) + I parliament + 0x00247505, // n0x091c c0x0000 (---------------) + I press + 0x00220e43, // n0x091d c0x0000 (---------------) + I pro + 0x00200142, // n0x091e c0x0000 (---------------) + I tm + 0x000ffa08, // n0x091f c0x0000 (---------------) + blogspot + 0x000ffa08, // n0x0920 c0x0000 (---------------) + blogspot + 0x00000742, // n0x0921 c0x0000 (---------------) + co + 0x000ffa08, // n0x0922 c0x0000 (---------------) + blogspot + 0x00033503, // n0x0923 c0x0000 (---------------) + com + 0x000afa4f, // n0x0924 c0x0000 (---------------) + fuettertdasnetz + 0x0016a18a, // n0x0925 c0x0000 (---------------) + isteingeek + 0x000a3b07, // n0x0926 c0x0000 (---------------) + istmein + 0x0001fc8a, // n0x0927 c0x0000 (---------------) + lebtimnetz + 0x0018460a, // n0x0928 c0x0000 (---------------) + leitungsen + 0x00004acd, // n0x0929 c0x0000 (---------------) + traeumtgerade + 0x000ffa08, // n0x092a c0x0000 (---------------) + blogspot + 0x00233503, // n0x092b c0x0000 (---------------) + I com + 0x0023a783, // n0x092c c0x0000 (---------------) + I edu + 0x0026cc83, // n0x092d c0x0000 (---------------) + I gov + 0x0021fe03, // n0x092e c0x0000 (---------------) + I net + 0x0022d1c3, // n0x092f c0x0000 (---------------) + I org + 0x002011c3, // n0x0930 c0x0000 (---------------) + I art + 0x00233503, // n0x0931 c0x0000 (---------------) + I com + 0x0023a783, // n0x0932 c0x0000 (---------------) + I edu + 0x00213183, // n0x0933 c0x0000 (---------------) + I gob + 0x0026cc83, // n0x0934 c0x0000 (---------------) + I gov + 0x00209003, // n0x0935 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x0936 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0937 c0x0000 (---------------) + I org + 0x00292103, // n0x0938 c0x0000 (---------------) + I sld + 0x00221a03, // n0x0939 c0x0000 (---------------) + I web + 0x002011c3, // n0x093a c0x0000 (---------------) + I art + 0x002d4884, // n0x093b c0x0000 (---------------) + I asso + 0x00233503, // n0x093c c0x0000 (---------------) + I com + 0x0023a783, // n0x093d c0x0000 (---------------) + I edu + 0x0026cc83, // n0x093e c0x0000 (---------------) + I gov + 0x0021fe03, // n0x093f c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0940 c0x0000 (---------------) + I org + 0x00208103, // n0x0941 c0x0000 (---------------) + I pol + 0x00233503, // n0x0942 c0x0000 (---------------) + I com + 0x0023a783, // n0x0943 c0x0000 (---------------) + I edu + 0x00207503, // n0x0944 c0x0000 (---------------) + I fin + 0x00213183, // n0x0945 c0x0000 (---------------) + I gob + 0x0026cc83, // n0x0946 c0x0000 (---------------) + I gov + 0x003a1244, // n0x0947 c0x0000 (---------------) + I info + 0x00309ac3, // n0x0948 c0x0000 (---------------) + I k12 + 0x00213ac3, // n0x0949 c0x0000 (---------------) + I med + 0x00209003, // n0x094a c0x0000 (---------------) + I mil + 0x0021fe03, // n0x094b c0x0000 (---------------) + I net + 0x0022d1c3, // n0x094c c0x0000 (---------------) + I org + 0x00220e43, // n0x094d c0x0000 (---------------) + I pro + 0x003a6543, // n0x094e c0x0000 (---------------) + I aip + 0x16233503, // n0x094f c0x0058 (n0x0958-n0x0959) + I com + 0x0023a783, // n0x0950 c0x0000 (---------------) + I edu + 0x002b9443, // n0x0951 c0x0000 (---------------) + I fie + 0x0026cc83, // n0x0952 c0x0000 (---------------) + I gov + 0x0027b703, // n0x0953 c0x0000 (---------------) + I lib + 0x00213ac3, // n0x0954 c0x0000 (---------------) + I med + 0x0022d1c3, // n0x0955 c0x0000 (---------------) + I org + 0x00204603, // n0x0956 c0x0000 (---------------) + I pri + 0x00320cc4, // n0x0957 c0x0000 (---------------) + I riik + 0x000ffa08, // n0x0958 c0x0000 (---------------) + blogspot + 0x16a33503, // n0x0959 c0x005a (n0x0962-n0x0963) + I com + 0x0023a783, // n0x095a c0x0000 (---------------) + I edu + 0x002a7083, // n0x095b c0x0000 (---------------) + I eun + 0x0026cc83, // n0x095c c0x0000 (---------------) + I gov + 0x00209003, // n0x095d c0x0000 (---------------) + I mil + 0x00205284, // n0x095e c0x0000 (---------------) + I name + 0x0021fe03, // n0x095f c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0960 c0x0000 (---------------) + I org + 0x0021d703, // n0x0961 c0x0000 (---------------) + I sci + 0x000ffa08, // n0x0962 c0x0000 (---------------) + blogspot + 0x17233503, // n0x0963 c0x005c (n0x0968-n0x0969) + I com + 0x0023a783, // n0x0964 c0x0000 (---------------) + I edu + 0x00213183, // n0x0965 c0x0000 (---------------) + I gob + 0x00201483, // n0x0966 c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x0967 c0x0000 (---------------) + I org + 0x000ffa08, // n0x0968 c0x0000 (---------------) + blogspot + 0x00330b83, // n0x0969 c0x0000 (---------------) + I biz + 0x00233503, // n0x096a c0x0000 (---------------) + I com + 0x0023a783, // n0x096b c0x0000 (---------------) + I edu + 0x0026cc83, // n0x096c c0x0000 (---------------) + I gov + 0x003a1244, // n0x096d c0x0000 (---------------) + I info + 0x00205284, // n0x096e c0x0000 (---------------) + I name + 0x0021fe03, // n0x096f c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0970 c0x0000 (---------------) + I org + 0x0031a845, // n0x0971 c0x0000 (---------------) + I aland + 0x000ffa08, // n0x0972 c0x0000 (---------------) + blogspot + 0x0003be03, // n0x0973 c0x0000 (---------------) + iki + 0x002ec408, // n0x0974 c0x0000 (---------------) + I aeroport + 0x00350d87, // n0x0975 c0x0000 (---------------) + I assedic + 0x002d4884, // n0x0976 c0x0000 (---------------) + I asso + 0x0032f106, // n0x0977 c0x0000 (---------------) + I avocat + 0x00346806, // n0x0978 c0x0000 (---------------) + I avoues + 0x000ffa08, // n0x0979 c0x0000 (---------------) + blogspot + 0x0023fdc3, // n0x097a c0x0000 (---------------) + I cci + 0x00209b49, // n0x097b c0x0000 (---------------) + I chambagri + 0x002b2115, // n0x097c c0x0000 (---------------) + I chirurgiens-dentistes + 0x00233503, // n0x097d c0x0000 (---------------) + I com + 0x0031ee12, // n0x097e c0x0000 (---------------) + I experts-comptables + 0x0031ebcf, // n0x097f c0x0000 (---------------) + I geometre-expert + 0x0033d7c4, // n0x0980 c0x0000 (---------------) + I gouv + 0x0022a885, // n0x0981 c0x0000 (---------------) + I greta + 0x002f2f10, // n0x0982 c0x0000 (---------------) + I huissier-justice + 0x00238bc7, // n0x0983 c0x0000 (---------------) + I medecin + 0x00201483, // n0x0984 c0x0000 (---------------) + I nom + 0x0025c988, // n0x0985 c0x0000 (---------------) + I notaires + 0x0034d60a, // n0x0986 c0x0000 (---------------) + I pharmacien + 0x00246184, // n0x0987 c0x0000 (---------------) + I port + 0x002e1043, // n0x0988 c0x0000 (---------------) + I prd + 0x00247506, // n0x0989 c0x0000 (---------------) + I presse + 0x00200142, // n0x098a c0x0000 (---------------) + I tm + 0x002d1c8b, // n0x098b c0x0000 (---------------) + I veterinaire + 0x00233503, // n0x098c c0x0000 (---------------) + I com + 0x0023a783, // n0x098d c0x0000 (---------------) + I edu + 0x0026cc83, // n0x098e c0x0000 (---------------) + I gov + 0x00209003, // n0x098f c0x0000 (---------------) + I mil + 0x0021fe03, // n0x0990 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0991 c0x0000 (---------------) + I org + 0x002e5543, // n0x0992 c0x0000 (---------------) + I pvt + 0x00200742, // n0x0993 c0x0000 (---------------) + I co + 0x0021fe03, // n0x0994 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0995 c0x0000 (---------------) + I org + 0x00233503, // n0x0996 c0x0000 (---------------) + I com + 0x0023a783, // n0x0997 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0998 c0x0000 (---------------) + I gov + 0x00209003, // n0x0999 c0x0000 (---------------) + I mil + 0x0022d1c3, // n0x099a c0x0000 (---------------) + I org + 0x00233503, // n0x099b c0x0000 (---------------) + I com + 0x0023a783, // n0x099c c0x0000 (---------------) + I edu + 0x0026cc83, // n0x099d c0x0000 (---------------) + I gov + 0x00322cc3, // n0x099e c0x0000 (---------------) + I ltd + 0x00218303, // n0x099f c0x0000 (---------------) + I mod + 0x0022d1c3, // n0x09a0 c0x0000 (---------------) + I org + 0x00200742, // n0x09a1 c0x0000 (---------------) + I co + 0x00233503, // n0x09a2 c0x0000 (---------------) + I com + 0x0023a783, // n0x09a3 c0x0000 (---------------) + I edu + 0x0021fe03, // n0x09a4 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09a5 c0x0000 (---------------) + I org + 0x00201542, // n0x09a6 c0x0000 (---------------) + I ac + 0x00233503, // n0x09a7 c0x0000 (---------------) + I com + 0x0023a783, // n0x09a8 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x09a9 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x09aa c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09ab c0x0000 (---------------) + I org + 0x002d4884, // n0x09ac c0x0000 (---------------) + I asso + 0x00233503, // n0x09ad c0x0000 (---------------) + I com + 0x0023a783, // n0x09ae c0x0000 (---------------) + I edu + 0x00207104, // n0x09af c0x0000 (---------------) + I mobi + 0x0021fe03, // n0x09b0 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09b1 c0x0000 (---------------) + I org + 0x000ffa08, // n0x09b2 c0x0000 (---------------) + blogspot + 0x00233503, // n0x09b3 c0x0000 (---------------) + I com + 0x0023a783, // n0x09b4 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x09b5 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x09b6 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09b7 c0x0000 (---------------) + I org + 0x00233503, // n0x09b8 c0x0000 (---------------) + I com + 0x0023a783, // n0x09b9 c0x0000 (---------------) + I edu + 0x00213183, // n0x09ba c0x0000 (---------------) + I gob + 0x0021d883, // n0x09bb c0x0000 (---------------) + I ind + 0x00209003, // n0x09bc c0x0000 (---------------) + I mil + 0x0021fe03, // n0x09bd c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09be c0x0000 (---------------) + I org + 0x00200742, // n0x09bf c0x0000 (---------------) + I co + 0x00233503, // n0x09c0 c0x0000 (---------------) + I com + 0x0023a783, // n0x09c1 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x09c2 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x09c3 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09c4 c0x0000 (---------------) + I org + 0x000ffa08, // n0x09c5 c0x0000 (---------------) + blogspot + 0x00233503, // n0x09c6 c0x0000 (---------------) + I com + 0x0023a783, // n0x09c7 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x09c8 c0x0000 (---------------) + I gov + 0x00317243, // n0x09c9 c0x0000 (---------------) + I idv + 0x0002e7c3, // n0x09ca c0x0000 (---------------) + inc + 0x00122cc3, // n0x09cb c0x0000 (---------------) + ltd + 0x0021fe03, // n0x09cc c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09cd c0x0000 (---------------) + I org + 0x003029ca, // n0x09ce c0x0000 (---------------) + I xn--55qx5d + 0x0031bd89, // n0x09cf c0x0000 (---------------) + I xn--ciqpn + 0x0033ac4b, // n0x09d0 c0x0000 (---------------) + I xn--gmq050i + 0x0033b1ca, // n0x09d1 c0x0000 (---------------) + I xn--gmqw5a + 0x0034280a, // n0x09d2 c0x0000 (---------------) + I xn--io0a7i + 0x00353a8b, // n0x09d3 c0x0000 (---------------) + I xn--lcvr32d + 0x0036704a, // n0x09d4 c0x0000 (---------------) + I xn--mk0axi + 0x0036fa4a, // n0x09d5 c0x0000 (---------------) + I xn--mxtq1m + 0x0037648a, // n0x09d6 c0x0000 (---------------) + I xn--od0alg + 0x0037670b, // n0x09d7 c0x0000 (---------------) + I xn--od0aq3b + 0x00392609, // n0x09d8 c0x0000 (---------------) + I xn--tn0ag + 0x003941ca, // n0x09d9 c0x0000 (---------------) + I xn--uc0atv + 0x0039470b, // n0x09da c0x0000 (---------------) + I xn--uc0ay4a + 0x0039cfcb, // n0x09db c0x0000 (---------------) + I xn--wcvs22d + 0x003a57ca, // n0x09dc c0x0000 (---------------) + I xn--zf0avx + 0x00233503, // n0x09dd c0x0000 (---------------) + I com + 0x0023a783, // n0x09de c0x0000 (---------------) + I edu + 0x00213183, // n0x09df c0x0000 (---------------) + I gob + 0x00209003, // n0x09e0 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x09e1 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09e2 c0x0000 (---------------) + I org + 0x000ffa08, // n0x09e3 c0x0000 (---------------) + blogspot + 0x00233503, // n0x09e4 c0x0000 (---------------) + I com + 0x00263d84, // n0x09e5 c0x0000 (---------------) + I from + 0x00212582, // n0x09e6 c0x0000 (---------------) + I iz + 0x00205284, // n0x09e7 c0x0000 (---------------) + I name + 0x002a1985, // n0x09e8 c0x0000 (---------------) + I adult + 0x002011c3, // n0x09e9 c0x0000 (---------------) + I art + 0x002d4884, // n0x09ea c0x0000 (---------------) + I asso + 0x00233503, // n0x09eb c0x0000 (---------------) + I com + 0x0023d684, // n0x09ec c0x0000 (---------------) + I coop + 0x0023a783, // n0x09ed c0x0000 (---------------) + I edu + 0x0024d9c4, // n0x09ee c0x0000 (---------------) + I firm + 0x0033d7c4, // n0x09ef c0x0000 (---------------) + I gouv + 0x003a1244, // n0x09f0 c0x0000 (---------------) + I info + 0x00213ac3, // n0x09f1 c0x0000 (---------------) + I med + 0x0021fe03, // n0x09f2 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09f3 c0x0000 (---------------) + I org + 0x00295005, // n0x09f4 c0x0000 (---------------) + I perso + 0x00208103, // n0x09f5 c0x0000 (---------------) + I pol + 0x00220e43, // n0x09f6 c0x0000 (---------------) + I pro + 0x00285b43, // n0x09f7 c0x0000 (---------------) + I rel + 0x00352004, // n0x09f8 c0x0000 (---------------) + I shop + 0x002ee544, // n0x09f9 c0x0000 (---------------) + I 2000 + 0x00258185, // n0x09fa c0x0000 (---------------) + I agrar + 0x000ffa08, // n0x09fb c0x0000 (---------------) + blogspot + 0x002f9244, // n0x09fc c0x0000 (---------------) + I bolt + 0x0037bc46, // n0x09fd c0x0000 (---------------) + I casino + 0x00286744, // n0x09fe c0x0000 (---------------) + I city + 0x00200742, // n0x09ff c0x0000 (---------------) + I co + 0x00343507, // n0x0a00 c0x0000 (---------------) + I erotica + 0x00250887, // n0x0a01 c0x0000 (---------------) + I erotika + 0x0024b784, // n0x0a02 c0x0000 (---------------) + I film + 0x0025b085, // n0x0a03 c0x0000 (---------------) + I forum + 0x003700c5, // n0x0a04 c0x0000 (---------------) + I games + 0x00234dc5, // n0x0a05 c0x0000 (---------------) + I hotel + 0x003a1244, // n0x0a06 c0x0000 (---------------) + I info + 0x00224408, // n0x0a07 c0x0000 (---------------) + I ingatlan + 0x00293246, // n0x0a08 c0x0000 (---------------) + I jogasz + 0x002cce48, // n0x0a09 c0x0000 (---------------) + I konyvelo + 0x002435c5, // n0x0a0a c0x0000 (---------------) + I lakas + 0x003025c5, // n0x0a0b c0x0000 (---------------) + I media + 0x00221dc4, // n0x0a0c c0x0000 (---------------) + I news + 0x0022d1c3, // n0x0a0d c0x0000 (---------------) + I org + 0x002e1c44, // n0x0a0e c0x0000 (---------------) + I priv + 0x00352c46, // n0x0a0f c0x0000 (---------------) + I reklam + 0x00247603, // n0x0a10 c0x0000 (---------------) + I sex + 0x00352004, // n0x0a11 c0x0000 (---------------) + I shop + 0x00294905, // n0x0a12 c0x0000 (---------------) + I sport + 0x0023c004, // n0x0a13 c0x0000 (---------------) + I suli + 0x0020b984, // n0x0a14 c0x0000 (---------------) + I szex + 0x00200142, // n0x0a15 c0x0000 (---------------) + I tm + 0x00270746, // n0x0a16 c0x0000 (---------------) + I tozsde + 0x00389006, // n0x0a17 c0x0000 (---------------) + I utazas + 0x002f5685, // n0x0a18 c0x0000 (---------------) + I video + 0x00201542, // n0x0a19 c0x0000 (---------------) + I ac + 0x00330b83, // n0x0a1a c0x0000 (---------------) + I biz + 0x1c200742, // n0x0a1b c0x0070 (n0x0a24-n0x0a25) + I co + 0x0023bb04, // n0x0a1c c0x0000 (---------------) + I desa + 0x00202d42, // n0x0a1d c0x0000 (---------------) + I go + 0x00209003, // n0x0a1e c0x0000 (---------------) + I mil + 0x00226f02, // n0x0a1f c0x0000 (---------------) + I my + 0x0021fe03, // n0x0a20 c0x0000 (---------------) + I net + 0x00200282, // n0x0a21 c0x0000 (---------------) + I or + 0x00217443, // n0x0a22 c0x0000 (---------------) + I sch + 0x00221a03, // n0x0a23 c0x0000 (---------------) + I web + 0x000ffa08, // n0x0a24 c0x0000 (---------------) + blogspot + 0x000ffa08, // n0x0a25 c0x0000 (---------------) + blogspot + 0x0026cc83, // n0x0a26 c0x0000 (---------------) + I gov + 0x00201542, // n0x0a27 c0x0000 (---------------) + I ac + 0x1ce00742, // n0x0a28 c0x0073 (n0x0a2f-n0x0a30) + I co + 0x0026cc83, // n0x0a29 c0x0000 (---------------) + I gov + 0x00268a83, // n0x0a2a c0x0000 (---------------) + I idf + 0x00309ac3, // n0x0a2b c0x0000 (---------------) + I k12 + 0x002335c4, // n0x0a2c c0x0000 (---------------) + I muni + 0x0021fe03, // n0x0a2d c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0a2e c0x0000 (---------------) + I org + 0x000ffa08, // n0x0a2f c0x0000 (---------------) + blogspot + 0x00201542, // n0x0a30 c0x0000 (---------------) + I ac + 0x1d600742, // n0x0a31 c0x0075 (n0x0a37-n0x0a39) + I co + 0x00233503, // n0x0a32 c0x0000 (---------------) + I com + 0x0021fe03, // n0x0a33 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0a34 c0x0000 (---------------) + I org + 0x0020e842, // n0x0a35 c0x0000 (---------------) + I tt + 0x00224e42, // n0x0a36 c0x0000 (---------------) + I tv + 0x00322cc3, // n0x0a37 c0x0000 (---------------) + I ltd + 0x002db143, // n0x0a38 c0x0000 (---------------) + I plc + 0x00201542, // n0x0a39 c0x0000 (---------------) + I ac + 0x000ffa08, // n0x0a3a c0x0000 (---------------) + blogspot + 0x00200742, // n0x0a3b c0x0000 (---------------) + I co + 0x0023a783, // n0x0a3c c0x0000 (---------------) + I edu + 0x0024d9c4, // n0x0a3d c0x0000 (---------------) + I firm + 0x00205843, // n0x0a3e c0x0000 (---------------) + I gen + 0x0026cc83, // n0x0a3f c0x0000 (---------------) + I gov + 0x0021d883, // n0x0a40 c0x0000 (---------------) + I ind + 0x00209003, // n0x0a41 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x0a42 c0x0000 (---------------) + I net + 0x00218f83, // n0x0a43 c0x0000 (---------------) + I nic + 0x0022d1c3, // n0x0a44 c0x0000 (---------------) + I org + 0x0021d683, // n0x0a45 c0x0000 (---------------) + I res + 0x0011e793, // n0x0a46 c0x0000 (---------------) + barrel-of-knowledge + 0x001246d4, // n0x0a47 c0x0000 (---------------) + barrell-of-knowledge + 0x00013886, // n0x0a48 c0x0000 (---------------) + dyndns + 0x000562c7, // n0x0a49 c0x0000 (---------------) + for-our + 0x00155d09, // n0x0a4a c0x0000 (---------------) + groks-the + 0x000ebb0a, // n0x0a4b c0x0000 (---------------) + groks-this + 0x00087c4d, // n0x0a4c c0x0000 (---------------) + here-for-more + 0x001a408a, // n0x0a4d c0x0000 (---------------) + knowsitall + 0x0006ba86, // n0x0a4e c0x0000 (---------------) + selfip + 0x000eadc6, // n0x0a4f c0x0000 (---------------) + webhop + 0x00204b82, // n0x0a50 c0x0000 (---------------) + I eu + 0x00233503, // n0x0a51 c0x0000 (---------------) + I com + 0x00019506, // n0x0a52 c0x0000 (---------------) + github + 0x00155cc5, // n0x0a53 c0x0000 (---------------) + ngrok + 0x0000cb83, // n0x0a54 c0x0000 (---------------) + nid + 0x00011f88, // n0x0a55 c0x0000 (---------------) + pantheon + 0x000af708, // n0x0a56 c0x0000 (---------------) + sandcats + 0x00233503, // n0x0a57 c0x0000 (---------------) + I com + 0x0023a783, // n0x0a58 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0a59 c0x0000 (---------------) + I gov + 0x00209003, // n0x0a5a c0x0000 (---------------) + I mil + 0x0021fe03, // n0x0a5b c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0a5c c0x0000 (---------------) + I org + 0x00201542, // n0x0a5d c0x0000 (---------------) + I ac + 0x00200742, // n0x0a5e c0x0000 (---------------) + I co + 0x0026cc83, // n0x0a5f c0x0000 (---------------) + I gov + 0x0020c782, // n0x0a60 c0x0000 (---------------) + I id + 0x0021fe03, // n0x0a61 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0a62 c0x0000 (---------------) + I org + 0x00217443, // n0x0a63 c0x0000 (---------------) + I sch + 0x0035d94f, // n0x0a64 c0x0000 (---------------) + I xn--mgba3a4f16a + 0x0035dd0e, // n0x0a65 c0x0000 (---------------) + I xn--mgba3a4fra + 0x000ffa08, // n0x0a66 c0x0000 (---------------) + blogspot + 0x00233503, // n0x0a67 c0x0000 (---------------) + I com + 0x00048107, // n0x0a68 c0x0000 (---------------) + cupcake + 0x0023a783, // n0x0a69 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0a6a c0x0000 (---------------) + I gov + 0x00201603, // n0x0a6b c0x0000 (---------------) + I int + 0x0021fe03, // n0x0a6c c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0a6d c0x0000 (---------------) + I org + 0x0021ca03, // n0x0a6e c0x0000 (---------------) + I abr + 0x002ed6c7, // n0x0a6f c0x0000 (---------------) + I abruzzo + 0x00201002, // n0x0a70 c0x0000 (---------------) + I ag + 0x002cd4c9, // n0x0a71 c0x0000 (---------------) + I agrigento + 0x002001c2, // n0x0a72 c0x0000 (---------------) + I al + 0x00233b0b, // n0x0a73 c0x0000 (---------------) + I alessandria + 0x002dfc4a, // n0x0a74 c0x0000 (---------------) + I alto-adige + 0x002e3489, // n0x0a75 c0x0000 (---------------) + I altoadige + 0x00200502, // n0x0a76 c0x0000 (---------------) + I an + 0x00350806, // n0x0a77 c0x0000 (---------------) + I ancona + 0x002847d5, // n0x0a78 c0x0000 (---------------) + I andria-barletta-trani + 0x00233c55, // n0x0a79 c0x0000 (---------------) + I andria-trani-barletta + 0x00290513, // n0x0a7a c0x0000 (---------------) + I andriabarlettatrani + 0x002341d3, // n0x0a7b c0x0000 (---------------) + I andriatranibarletta + 0x002029c2, // n0x0a7c c0x0000 (---------------) + I ao + 0x00216fc5, // n0x0a7d c0x0000 (---------------) + I aosta + 0x0030f30c, // n0x0a7e c0x0000 (---------------) + I aosta-valley + 0x00216fcb, // n0x0a7f c0x0000 (---------------) + I aostavalley + 0x00251085, // n0x0a80 c0x0000 (---------------) + I aoste + 0x00200d02, // n0x0a81 c0x0000 (---------------) + I ap + 0x002003c2, // n0x0a82 c0x0000 (---------------) + I aq + 0x0036c346, // n0x0a83 c0x0000 (---------------) + I aquila + 0x00200a42, // n0x0a84 c0x0000 (---------------) + I ar + 0x0027a406, // n0x0a85 c0x0000 (---------------) + I arezzo + 0x00397ccd, // n0x0a86 c0x0000 (---------------) + I ascoli-piceno + 0x0034c1cc, // n0x0a87 c0x0000 (---------------) + I ascolipiceno + 0x0021e884, // n0x0a88 c0x0000 (---------------) + I asti + 0x00200102, // n0x0a89 c0x0000 (---------------) + I at + 0x00203402, // n0x0a8a c0x0000 (---------------) + I av + 0x00224c08, // n0x0a8b c0x0000 (---------------) + I avellino + 0x00202002, // n0x0a8c c0x0000 (---------------) + I ba + 0x00248586, // n0x0a8d c0x0000 (---------------) + I balsan + 0x00249204, // n0x0a8e c0x0000 (---------------) + I bari + 0x00284995, // n0x0a8f c0x0000 (---------------) + I barletta-trani-andria + 0x00290693, // n0x0a90 c0x0000 (---------------) + I barlettatraniandria + 0x00207fc3, // n0x0a91 c0x0000 (---------------) + I bas + 0x0032ee0a, // n0x0a92 c0x0000 (---------------) + I basilicata + 0x0028e2c7, // n0x0a93 c0x0000 (---------------) + I belluno + 0x002e4949, // n0x0a94 c0x0000 (---------------) + I benevento + 0x00228d47, // n0x0a95 c0x0000 (---------------) + I bergamo + 0x002ee482, // n0x0a96 c0x0000 (---------------) + I bg + 0x00200002, // n0x0a97 c0x0000 (---------------) + I bi + 0x003a4cc6, // n0x0a98 c0x0000 (---------------) + I biella + 0x0020c8c2, // n0x0a99 c0x0000 (---------------) + I bl + 0x000ffa08, // n0x0a9a c0x0000 (---------------) + blogspot + 0x002104c2, // n0x0a9b c0x0000 (---------------) + I bn + 0x0020e402, // n0x0a9c c0x0000 (---------------) + I bo + 0x0038df47, // n0x0a9d c0x0000 (---------------) + I bologna + 0x0020fec7, // n0x0a9e c0x0000 (---------------) + I bolzano + 0x0021c105, // n0x0a9f c0x0000 (---------------) + I bozen + 0x0021c402, // n0x0aa0 c0x0000 (---------------) + I br + 0x0021d647, // n0x0aa1 c0x0000 (---------------) + I brescia + 0x0021d808, // n0x0aa2 c0x0000 (---------------) + I brindisi + 0x00237542, // n0x0aa3 c0x0000 (---------------) + I bs + 0x0021fd02, // n0x0aa4 c0x0000 (---------------) + I bt + 0x00230bc2, // n0x0aa5 c0x0000 (---------------) + I bz + 0x00200302, // n0x0aa6 c0x0000 (---------------) + I ca + 0x0023eb48, // n0x0aa7 c0x0000 (---------------) + I cagliari + 0x00213543, // n0x0aa8 c0x0000 (---------------) + I cal + 0x00285688, // n0x0aa9 c0x0000 (---------------) + I calabria + 0x0023ad8d, // n0x0aaa c0x0000 (---------------) + I caltanissetta + 0x00221ac3, // n0x0aab c0x0000 (---------------) + I cam + 0x00316e08, // n0x0aac c0x0000 (---------------) + I campania + 0x00241acf, // n0x0aad c0x0000 (---------------) + I campidano-medio + 0x00241e8e, // n0x0aae c0x0000 (---------------) + I campidanomedio + 0x0034364a, // n0x0aaf c0x0000 (---------------) + I campobasso + 0x002f6e11, // n0x0ab0 c0x0000 (---------------) + I carbonia-iglesias + 0x002f7290, // n0x0ab1 c0x0000 (---------------) + I carboniaiglesias + 0x002b3b4d, // n0x0ab2 c0x0000 (---------------) + I carrara-massa + 0x002b3e8c, // n0x0ab3 c0x0000 (---------------) + I carraramassa + 0x0022c607, // n0x0ab4 c0x0000 (---------------) + I caserta + 0x0032ef87, // n0x0ab5 c0x0000 (---------------) + I catania + 0x0032f1c9, // n0x0ab6 c0x0000 (---------------) + I catanzaro + 0x0021e982, // n0x0ab7 c0x0000 (---------------) + I cb + 0x00200b42, // n0x0ab8 c0x0000 (---------------) + I ce + 0x0025870c, // n0x0ab9 c0x0000 (---------------) + I cesena-forli + 0x00258a0b, // n0x0aba c0x0000 (---------------) + I cesenaforli + 0x00201582, // n0x0abb c0x0000 (---------------) + I ch + 0x002d5a46, // n0x0abc c0x0000 (---------------) + I chieti + 0x00200682, // n0x0abd c0x0000 (---------------) + I ci + 0x00209182, // n0x0abe c0x0000 (---------------) + I cl + 0x0021ba42, // n0x0abf c0x0000 (---------------) + I cn + 0x00200742, // n0x0ac0 c0x0000 (---------------) + I co + 0x00234804, // n0x0ac1 c0x0000 (---------------) + I como + 0x00240e07, // n0x0ac2 c0x0000 (---------------) + I cosenza + 0x002049c2, // n0x0ac3 c0x0000 (---------------) + I cr + 0x00244d07, // n0x0ac4 c0x0000 (---------------) + I cremona + 0x00245f87, // n0x0ac5 c0x0000 (---------------) + I crotone + 0x00211c82, // n0x0ac6 c0x0000 (---------------) + I cs + 0x00231382, // n0x0ac7 c0x0000 (---------------) + I ct + 0x00247fc5, // n0x0ac8 c0x0000 (---------------) + I cuneo + 0x00229ec2, // n0x0ac9 c0x0000 (---------------) + I cz + 0x0025a38e, // n0x0aca c0x0000 (---------------) + I dell-ogliastra + 0x0026714d, // n0x0acb c0x0000 (---------------) + I dellogliastra + 0x0023a783, // n0x0acc c0x0000 (---------------) + I edu + 0x002543ce, // n0x0acd c0x0000 (---------------) + I emilia-romagna + 0x0036dc0d, // n0x0ace c0x0000 (---------------) + I emiliaromagna + 0x00360a83, // n0x0acf c0x0000 (---------------) + I emr + 0x00202bc2, // n0x0ad0 c0x0000 (---------------) + I en + 0x00205a84, // n0x0ad1 c0x0000 (---------------) + I enna + 0x0024b242, // n0x0ad2 c0x0000 (---------------) + I fc + 0x0020b302, // n0x0ad3 c0x0000 (---------------) + I fe + 0x002e2545, // n0x0ad4 c0x0000 (---------------) + I fermo + 0x00300807, // n0x0ad5 c0x0000 (---------------) + I ferrara + 0x0035d282, // n0x0ad6 c0x0000 (---------------) + I fg + 0x00207502, // n0x0ad7 c0x0000 (---------------) + I fi + 0x0024d307, // n0x0ad8 c0x0000 (---------------) + I firenze + 0x00252748, // n0x0ad9 c0x0000 (---------------) + I florence + 0x00242902, // n0x0ada c0x0000 (---------------) + I fm + 0x003a12c6, // n0x0adb c0x0000 (---------------) + I foggia + 0x0025858c, // n0x0adc c0x0000 (---------------) + I forli-cesena + 0x002588cb, // n0x0add c0x0000 (---------------) + I forlicesena + 0x00200582, // n0x0ade c0x0000 (---------------) + I fr + 0x0026050f, // n0x0adf c0x0000 (---------------) + I friuli-v-giulia + 0x002608d0, // n0x0ae0 c0x0000 (---------------) + I friuli-ve-giulia + 0x00260ccf, // n0x0ae1 c0x0000 (---------------) + I friuli-vegiulia + 0x00261095, // n0x0ae2 c0x0000 (---------------) + I friuli-venezia-giulia + 0x002615d4, // n0x0ae3 c0x0000 (---------------) + I friuli-veneziagiulia + 0x00261ace, // n0x0ae4 c0x0000 (---------------) + I friuli-vgiulia + 0x00261e4e, // n0x0ae5 c0x0000 (---------------) + I friuliv-giulia + 0x002621cf, // n0x0ae6 c0x0000 (---------------) + I friulive-giulia + 0x0026258e, // n0x0ae7 c0x0000 (---------------) + I friulivegiulia + 0x00262914, // n0x0ae8 c0x0000 (---------------) + I friulivenezia-giulia + 0x00262e13, // n0x0ae9 c0x0000 (---------------) + I friuliveneziagiulia + 0x002632cd, // n0x0aea c0x0000 (---------------) + I friulivgiulia + 0x002767c9, // n0x0aeb c0x0000 (---------------) + I frosinone + 0x00288803, // n0x0aec c0x0000 (---------------) + I fvg + 0x002026c2, // n0x0aed c0x0000 (---------------) + I ge + 0x00307105, // n0x0aee c0x0000 (---------------) + I genoa + 0x00205846, // n0x0aef c0x0000 (---------------) + I genova + 0x00202d42, // n0x0af0 c0x0000 (---------------) + I go + 0x0026edc7, // n0x0af1 c0x0000 (---------------) + I gorizia + 0x0026cc83, // n0x0af2 c0x0000 (---------------) + I gov + 0x00200c82, // n0x0af3 c0x0000 (---------------) + I gr + 0x00311b48, // n0x0af4 c0x0000 (---------------) + I grosseto + 0x002f7051, // n0x0af5 c0x0000 (---------------) + I iglesias-carbonia + 0x002f7490, // n0x0af6 c0x0000 (---------------) + I iglesiascarbonia + 0x00205c42, // n0x0af7 c0x0000 (---------------) + I im + 0x00352747, // n0x0af8 c0x0000 (---------------) + I imperia + 0x002006c2, // n0x0af9 c0x0000 (---------------) + I is + 0x0025dc87, // n0x0afa c0x0000 (---------------) + I isernia + 0x00206fc2, // n0x0afb c0x0000 (---------------) + I kr + 0x0025e389, // n0x0afc c0x0000 (---------------) + I la-spezia + 0x0036c307, // n0x0afd c0x0000 (---------------) + I laquila + 0x0025fe48, // n0x0afe c0x0000 (---------------) + I laspezia + 0x00223606, // n0x0aff c0x0000 (---------------) + I latina + 0x002db043, // n0x0b00 c0x0000 (---------------) + I laz + 0x00309045, // n0x0b01 c0x0000 (---------------) + I lazio + 0x0023aa02, // n0x0b02 c0x0000 (---------------) + I lc + 0x0020acc2, // n0x0b03 c0x0000 (---------------) + I le + 0x003a50c5, // n0x0b04 c0x0000 (---------------) + I lecce + 0x0022e105, // n0x0b05 c0x0000 (---------------) + I lecco + 0x00207202, // n0x0b06 c0x0000 (---------------) + I li + 0x0023c083, // n0x0b07 c0x0000 (---------------) + I lig + 0x0023c087, // n0x0b08 c0x0000 (---------------) + I liguria + 0x00210307, // n0x0b09 c0x0000 (---------------) + I livorno + 0x00200242, // n0x0b0a c0x0000 (---------------) + I lo + 0x00259dc4, // n0x0b0b c0x0000 (---------------) + I lodi + 0x00214303, // n0x0b0c c0x0000 (---------------) + I lom + 0x002c4149, // n0x0b0d c0x0000 (---------------) + I lombardia + 0x002db908, // n0x0b0e c0x0000 (---------------) + I lombardy + 0x00209e02, // n0x0b0f c0x0000 (---------------) + I lt + 0x00202f42, // n0x0b10 c0x0000 (---------------) + I lu + 0x0026d147, // n0x0b11 c0x0000 (---------------) + I lucania + 0x002b6305, // n0x0b12 c0x0000 (---------------) + I lucca + 0x00316908, // n0x0b13 c0x0000 (---------------) + I macerata + 0x003a0a07, // n0x0b14 c0x0000 (---------------) + I mantova + 0x00201183, // n0x0b15 c0x0000 (---------------) + I mar + 0x00284246, // n0x0b16 c0x0000 (---------------) + I marche + 0x002b39cd, // n0x0b17 c0x0000 (---------------) + I massa-carrara + 0x002b3d4c, // n0x0b18 c0x0000 (---------------) + I massacarrara + 0x00256e86, // n0x0b19 c0x0000 (---------------) + I matera + 0x00208602, // n0x0b1a c0x0000 (---------------) + I mb + 0x0022ac02, // n0x0b1b c0x0000 (---------------) + I mc + 0x00203e82, // n0x0b1c c0x0000 (---------------) + I me + 0x0024194f, // n0x0b1d c0x0000 (---------------) + I medio-campidano + 0x00241d4e, // n0x0b1e c0x0000 (---------------) + I mediocampidano + 0x00370147, // n0x0b1f c0x0000 (---------------) + I messina + 0x00209002, // n0x0b20 c0x0000 (---------------) + I mi + 0x00342685, // n0x0b21 c0x0000 (---------------) + I milan + 0x00342686, // n0x0b22 c0x0000 (---------------) + I milano + 0x0021fdc2, // n0x0b23 c0x0000 (---------------) + I mn + 0x00207102, // n0x0b24 c0x0000 (---------------) + I mo + 0x00218306, // n0x0b25 c0x0000 (---------------) + I modena + 0x002133c3, // n0x0b26 c0x0000 (---------------) + I mol + 0x0025dbc6, // n0x0b27 c0x0000 (---------------) + I molise + 0x002c2d45, // n0x0b28 c0x0000 (---------------) + I monza + 0x002c2d4d, // n0x0b29 c0x0000 (---------------) + I monza-brianza + 0x002c3595, // n0x0b2a c0x0000 (---------------) + I monza-e-della-brianza + 0x002c3d4c, // n0x0b2b c0x0000 (---------------) + I monzabrianza + 0x002c4a4d, // n0x0b2c c0x0000 (---------------) + I monzaebrianza + 0x002c4e12, // n0x0b2d c0x0000 (---------------) + I monzaedellabrianza + 0x0020f702, // n0x0b2e c0x0000 (---------------) + I ms + 0x00204c02, // n0x0b2f c0x0000 (---------------) + I mt + 0x00201402, // n0x0b30 c0x0000 (---------------) + I na + 0x00235006, // n0x0b31 c0x0000 (---------------) + I naples + 0x002a3d86, // n0x0b32 c0x0000 (---------------) + I napoli + 0x00200c02, // n0x0b33 c0x0000 (---------------) + I no + 0x002058c6, // n0x0b34 c0x0000 (---------------) + I novara + 0x002017c2, // n0x0b35 c0x0000 (---------------) + I nu + 0x0039c105, // n0x0b36 c0x0000 (---------------) + I nuoro + 0x00200c42, // n0x0b37 c0x0000 (---------------) + I og + 0x0025a4c9, // n0x0b38 c0x0000 (---------------) + I ogliastra + 0x0027568c, // n0x0b39 c0x0000 (---------------) + I olbia-tempio + 0x002759cb, // n0x0b3a c0x0000 (---------------) + I olbiatempio + 0x00200282, // n0x0b3b c0x0000 (---------------) + I or + 0x00252b88, // n0x0b3c c0x0000 (---------------) + I oristano + 0x00200782, // n0x0b3d c0x0000 (---------------) + I ot + 0x0020ac42, // n0x0b3e c0x0000 (---------------) + I pa + 0x00216d86, // n0x0b3f c0x0000 (---------------) + I padova + 0x00361405, // n0x0b40 c0x0000 (---------------) + I padua + 0x00379f47, // n0x0b41 c0x0000 (---------------) + I palermo + 0x00395345, // n0x0b42 c0x0000 (---------------) + I parma + 0x002dbfc5, // n0x0b43 c0x0000 (---------------) + I pavia + 0x00248182, // n0x0b44 c0x0000 (---------------) + I pc + 0x00352102, // n0x0b45 c0x0000 (---------------) + I pd + 0x00207782, // n0x0b46 c0x0000 (---------------) + I pe + 0x00270d47, // n0x0b47 c0x0000 (---------------) + I perugia + 0x0031c84d, // n0x0b48 c0x0000 (---------------) + I pesaro-urbino + 0x0031cbcc, // n0x0b49 c0x0000 (---------------) + I pesarourbino + 0x00236987, // n0x0b4a c0x0000 (---------------) + I pescara + 0x002495c2, // n0x0b4b c0x0000 (---------------) + I pg + 0x00225702, // n0x0b4c c0x0000 (---------------) + I pi + 0x00338288, // n0x0b4d c0x0000 (---------------) + I piacenza + 0x00258dc8, // n0x0b4e c0x0000 (---------------) + I piedmont + 0x002d6308, // n0x0b4f c0x0000 (---------------) + I piemonte + 0x002df704, // n0x0b50 c0x0000 (---------------) + I pisa + 0x002bee07, // n0x0b51 c0x0000 (---------------) + I pistoia + 0x002dcc83, // n0x0b52 c0x0000 (---------------) + I pmn + 0x002493c2, // n0x0b53 c0x0000 (---------------) + I pn + 0x00200942, // n0x0b54 c0x0000 (---------------) + I po + 0x002dfec9, // n0x0b55 c0x0000 (---------------) + I pordenone + 0x002093c7, // n0x0b56 c0x0000 (---------------) + I potenza + 0x00204602, // n0x0b57 c0x0000 (---------------) + I pr + 0x00270245, // n0x0b58 c0x0000 (---------------) + I prato + 0x0028c9c2, // n0x0b59 c0x0000 (---------------) + I pt + 0x00235302, // n0x0b5a c0x0000 (---------------) + I pu + 0x00278843, // n0x0b5b c0x0000 (---------------) + I pug + 0x00278846, // n0x0b5c c0x0000 (---------------) + I puglia + 0x002e5542, // n0x0b5d c0x0000 (---------------) + I pv + 0x002e6302, // n0x0b5e c0x0000 (---------------) + I pz + 0x002005c2, // n0x0b5f c0x0000 (---------------) + I ra + 0x0030c246, // n0x0b60 c0x0000 (---------------) + I ragusa + 0x002059c7, // n0x0b61 c0x0000 (---------------) + I ravenna + 0x002002c2, // n0x0b62 c0x0000 (---------------) + I rc + 0x00207002, // n0x0b63 c0x0000 (---------------) + I re + 0x002ed34f, // n0x0b64 c0x0000 (---------------) + I reggio-calabria + 0x0025420d, // n0x0b65 c0x0000 (---------------) + I reggio-emilia + 0x0028550e, // n0x0b66 c0x0000 (---------------) + I reggiocalabria + 0x0036da8c, // n0x0b67 c0x0000 (---------------) + I reggioemilia + 0x0020ce02, // n0x0b68 c0x0000 (---------------) + I rg + 0x00200a82, // n0x0b69 c0x0000 (---------------) + I ri + 0x00223445, // n0x0b6a c0x0000 (---------------) + I rieti + 0x003a5bc6, // n0x0b6b c0x0000 (---------------) + I rimini + 0x00222182, // n0x0b6c c0x0000 (---------------) + I rm + 0x0020cb42, // n0x0b6d c0x0000 (---------------) + I rn + 0x00202202, // n0x0b6e c0x0000 (---------------) + I ro + 0x00254584, // n0x0b6f c0x0000 (---------------) + I roma + 0x002dd584, // n0x0b70 c0x0000 (---------------) + I rome + 0x00334fc6, // n0x0b71 c0x0000 (---------------) + I rovigo + 0x002004c2, // n0x0b72 c0x0000 (---------------) + I sa + 0x00279747, // n0x0b73 c0x0000 (---------------) + I salerno + 0x002257c3, // n0x0b74 c0x0000 (---------------) + I sar + 0x00226048, // n0x0b75 c0x0000 (---------------) + I sardegna + 0x00227248, // n0x0b76 c0x0000 (---------------) + I sardinia + 0x00378687, // n0x0b77 c0x0000 (---------------) + I sassari + 0x00234f06, // n0x0b78 c0x0000 (---------------) + I savona + 0x0020a402, // n0x0b79 c0x0000 (---------------) + I si + 0x0023eac3, // n0x0b7a c0x0000 (---------------) + I sic + 0x0036e647, // n0x0b7b c0x0000 (---------------) + I sicilia + 0x00252146, // n0x0b7c c0x0000 (---------------) + I sicily + 0x002c8945, // n0x0b7d c0x0000 (---------------) + I siena + 0x003419c8, // n0x0b7e c0x0000 (---------------) + I siracusa + 0x00205682, // n0x0b7f c0x0000 (---------------) + I so + 0x00308547, // n0x0b80 c0x0000 (---------------) + I sondrio + 0x00209382, // n0x0b81 c0x0000 (---------------) + I sp + 0x0033b802, // n0x0b82 c0x0000 (---------------) + I sr + 0x002067c2, // n0x0b83 c0x0000 (---------------) + I ss + 0x002cebc9, // n0x0b84 c0x0000 (---------------) + I suedtirol + 0x00235f42, // n0x0b85 c0x0000 (---------------) + I sv + 0x00200a02, // n0x0b86 c0x0000 (---------------) + I ta + 0x00234603, // n0x0b87 c0x0000 (---------------) + I taa + 0x003096c7, // n0x0b88 c0x0000 (---------------) + I taranto + 0x002012c2, // n0x0b89 c0x0000 (---------------) + I te + 0x0027580c, // n0x0b8a c0x0000 (---------------) + I tempio-olbia + 0x00275b0b, // n0x0b8b c0x0000 (---------------) + I tempioolbia + 0x00256f06, // n0x0b8c c0x0000 (---------------) + I teramo + 0x0020cac5, // n0x0b8d c0x0000 (---------------) + I terni + 0x0024f882, // n0x0b8e c0x0000 (---------------) + I tn + 0x00208082, // n0x0b8f c0x0000 (---------------) + I to + 0x002b1946, // n0x0b90 c0x0000 (---------------) + I torino + 0x002280c3, // n0x0b91 c0x0000 (---------------) + I tos + 0x00324e47, // n0x0b92 c0x0000 (---------------) + I toscana + 0x00211f42, // n0x0b93 c0x0000 (---------------) + I tp + 0x00203002, // n0x0b94 c0x0000 (---------------) + I tr + 0x00284655, // n0x0b95 c0x0000 (---------------) + I trani-andria-barletta + 0x00233e15, // n0x0b96 c0x0000 (---------------) + I trani-barletta-andria + 0x002903d3, // n0x0b97 c0x0000 (---------------) + I traniandriabarletta + 0x00234353, // n0x0b98 c0x0000 (---------------) + I tranibarlettaandria + 0x00294a07, // n0x0b99 c0x0000 (---------------) + I trapani + 0x002b7688, // n0x0b9a c0x0000 (---------------) + I trentino + 0x002cf4d0, // n0x0b9b c0x0000 (---------------) + I trentino-a-adige + 0x002ef2cf, // n0x0b9c c0x0000 (---------------) + I trentino-aadige + 0x00342d53, // n0x0b9d c0x0000 (---------------) + I trentino-alto-adige + 0x0034eb92, // n0x0b9e c0x0000 (---------------) + I trentino-altoadige + 0x002cd090, // n0x0b9f c0x0000 (---------------) + I trentino-s-tirol + 0x002b768f, // n0x0ba0 c0x0000 (---------------) + I trentino-stirol + 0x002ba492, // n0x0ba1 c0x0000 (---------------) + I trentino-sud-tirol + 0x002c2911, // n0x0ba2 c0x0000 (---------------) + I trentino-sudtirol + 0x002ca953, // n0x0ba3 c0x0000 (---------------) + I trentino-sued-tirol + 0x002ce992, // n0x0ba4 c0x0000 (---------------) + I trentino-suedtirol + 0x002cfd4f, // n0x0ba5 c0x0000 (---------------) + I trentinoa-adige + 0x002d4c8e, // n0x0ba6 c0x0000 (---------------) + I trentinoaadige + 0x002dfa52, // n0x0ba7 c0x0000 (---------------) + I trentinoalto-adige + 0x002e3291, // n0x0ba8 c0x0000 (---------------) + I trentinoaltoadige + 0x002e3a8f, // n0x0ba9 c0x0000 (---------------) + I trentinos-tirol + 0x002e55ce, // n0x0baa c0x0000 (---------------) + I trentinostirol + 0x002e6691, // n0x0bab c0x0000 (---------------) + I trentinosud-tirol + 0x002f3a90, // n0x0bac c0x0000 (---------------) + I trentinosudtirol + 0x0035a592, // n0x0bad c0x0000 (---------------) + I trentinosued-tirol + 0x002e8b91, // n0x0bae c0x0000 (---------------) + I trentinosuedtirol + 0x002f8a86, // n0x0baf c0x0000 (---------------) + I trento + 0x002f9307, // n0x0bb0 c0x0000 (---------------) + I treviso + 0x003673c7, // n0x0bb1 c0x0000 (---------------) + I trieste + 0x00203f42, // n0x0bb2 c0x0000 (---------------) + I ts + 0x0027f145, // n0x0bb3 c0x0000 (---------------) + I turin + 0x002f2c87, // n0x0bb4 c0x0000 (---------------) + I tuscany + 0x00224e42, // n0x0bb5 c0x0000 (---------------) + I tv + 0x00209242, // n0x0bb6 c0x0000 (---------------) + I ud + 0x0022a285, // n0x0bb7 c0x0000 (---------------) + I udine + 0x0021e183, // n0x0bb8 c0x0000 (---------------) + I umb + 0x00258406, // n0x0bb9 c0x0000 (---------------) + I umbria + 0x0031ca0d, // n0x0bba c0x0000 (---------------) + I urbino-pesaro + 0x0031cd4c, // n0x0bbb c0x0000 (---------------) + I urbinopesaro + 0x002000c2, // n0x0bbc c0x0000 (---------------) + I va + 0x0030f18b, // n0x0bbd c0x0000 (---------------) + I val-d-aosta + 0x00216e8a, // n0x0bbe c0x0000 (---------------) + I val-daosta + 0x00323dca, // n0x0bbf c0x0000 (---------------) + I vald-aosta + 0x002b09c9, // n0x0bc0 c0x0000 (---------------) + I valdaosta + 0x002deb4b, // n0x0bc1 c0x0000 (---------------) + I valle-aosta + 0x003a0b4d, // n0x0bc2 c0x0000 (---------------) + I valle-d-aosta + 0x002f338c, // n0x0bc3 c0x0000 (---------------) + I valle-daosta + 0x00224e8a, // n0x0bc4 c0x0000 (---------------) + I valleaosta + 0x0022594c, // n0x0bc5 c0x0000 (---------------) + I valled-aosta + 0x0024098b, // n0x0bc6 c0x0000 (---------------) + I valledaosta + 0x00250ecc, // n0x0bc7 c0x0000 (---------------) + I vallee-aoste + 0x0025184b, // n0x0bc8 c0x0000 (---------------) + I valleeaoste + 0x00275603, // n0x0bc9 c0x0000 (---------------) + I vao + 0x002894c6, // n0x0bca c0x0000 (---------------) + I varese + 0x002dc5c2, // n0x0bcb c0x0000 (---------------) + I vb + 0x002e6b02, // n0x0bcc c0x0000 (---------------) + I vc + 0x00210243, // n0x0bcd c0x0000 (---------------) + I vda + 0x00202b82, // n0x0bce c0x0000 (---------------) + I ve + 0x00202b83, // n0x0bcf c0x0000 (---------------) + I ven + 0x00375e46, // n0x0bd0 c0x0000 (---------------) + I veneto + 0x00261247, // n0x0bd1 c0x0000 (---------------) + I venezia + 0x0026f246, // n0x0bd2 c0x0000 (---------------) + I venice + 0x0022d688, // n0x0bd3 c0x0000 (---------------) + I verbania + 0x002dddc8, // n0x0bd4 c0x0000 (---------------) + I vercelli + 0x003607c6, // n0x0bd5 c0x0000 (---------------) + I verona + 0x00205d42, // n0x0bd6 c0x0000 (---------------) + I vi + 0x002f504d, // n0x0bd7 c0x0000 (---------------) + I vibo-valentia + 0x002f538c, // n0x0bd8 c0x0000 (---------------) + I vibovalentia + 0x0033d887, // n0x0bd9 c0x0000 (---------------) + I vicenza + 0x002f9107, // n0x0bda c0x0000 (---------------) + I viterbo + 0x00211082, // n0x0bdb c0x0000 (---------------) + I vr + 0x00227982, // n0x0bdc c0x0000 (---------------) + I vs + 0x00271f82, // n0x0bdd c0x0000 (---------------) + I vt + 0x00214982, // n0x0bde c0x0000 (---------------) + I vv + 0x00200742, // n0x0bdf c0x0000 (---------------) + I co + 0x0021fe03, // n0x0be0 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0be1 c0x0000 (---------------) + I org + 0x00233503, // n0x0be2 c0x0000 (---------------) + I com + 0x0023a783, // n0x0be3 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0be4 c0x0000 (---------------) + I gov + 0x00209003, // n0x0be5 c0x0000 (---------------) + I mil + 0x00205284, // n0x0be6 c0x0000 (---------------) + I name + 0x0021fe03, // n0x0be7 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0be8 c0x0000 (---------------) + I org + 0x00217443, // n0x0be9 c0x0000 (---------------) + I sch + 0x00201542, // n0x0bea c0x0000 (---------------) + I ac + 0x00200342, // n0x0beb c0x0000 (---------------) + I ad + 0x2068f505, // n0x0bec c0x0081 (n0x0c59-n0x0c8d) + I aichi + 0x20a01dc5, // n0x0bed c0x0082 (n0x0c8d-n0x0ca9) + I akita + 0x20f0bf06, // n0x0bee c0x0083 (n0x0ca9-n0x0cbf) + I aomori + 0x000ffa08, // n0x0bef c0x0000 (---------------) + blogspot + 0x212add45, // n0x0bf0 c0x0084 (n0x0cbf-n0x0cf9) + I chiba + 0x00200742, // n0x0bf1 c0x0000 (---------------) + I co + 0x00202602, // n0x0bf2 c0x0000 (---------------) + I ed + 0x21629005, // n0x0bf3 c0x0085 (n0x0cf9-n0x0d0f) + I ehime + 0x21a7dac5, // n0x0bf4 c0x0086 (n0x0d0f-n0x0d1e) + I fukui + 0x21e7e807, // n0x0bf5 c0x0087 (n0x0d1e-n0x0d5d) + I fukuoka + 0x2232fcc9, // n0x0bf6 c0x0088 (n0x0d5d-n0x0d90) + I fukushima + 0x2276aec4, // n0x0bf7 c0x0089 (n0x0d90-n0x0db6) + I gifu + 0x00202d42, // n0x0bf8 c0x0000 (---------------) + I go + 0x00200c82, // n0x0bf9 c0x0000 (---------------) + I gr + 0x22b66385, // n0x0bfa c0x008a (n0x0db6-n0x0dda) + I gunma + 0x22e0ee49, // n0x0bfb c0x008b (n0x0dda-n0x0df3) + I hiroshima + 0x23369e88, // n0x0bfc c0x008c (n0x0df3-n0x0e81) + I hokkaido + 0x236aba85, // n0x0bfd c0x008d (n0x0e81-n0x0eaf) + I hyogo + 0x23ac1ac7, // n0x0bfe c0x008e (n0x0eaf-n0x0ee2) + I ibaraki + 0x23e1a4c8, // n0x0bff c0x008f (n0x0ee2-n0x0ef5) + I ishikawa + 0x242d8485, // n0x0c00 c0x0090 (n0x0ef5-n0x0f17) + I iwate + 0x24600fc6, // n0x0c01 c0x0091 (n0x0f17-n0x0f26) + I kagawa + 0x24a76189, // n0x0c02 c0x0092 (n0x0f26-n0x0f3a) + I kagoshima + 0x24f1a4c8, // n0x0c03 c0x0093 (n0x0f3a-n0x0f58) + I kanagawa + 0x252b9088, // n0x0c04 c0x0094 (n0x0f58-n0x0f59)* o I kawasaki + 0x2569c90a, // n0x0c05 c0x0095 (n0x0f59-n0x0f5a)* o I kitakyushu + 0x25a4f544, // n0x0c06 c0x0096 (n0x0f5a-n0x0f5b)* o I kobe + 0x25ecb145, // n0x0c07 c0x0097 (n0x0f5b-n0x0f7a) + I kochi + 0x262b3748, // n0x0c08 c0x0098 (n0x0f7a-n0x0f94) + I kumamoto + 0x266be0c5, // n0x0c09 c0x0099 (n0x0f94-n0x0fb3) + I kyoto + 0x00219082, // n0x0c0a c0x0000 (---------------) + I lg + 0x26a4b943, // n0x0c0b c0x009a (n0x0fb3-n0x0fd1) + I mie + 0x26ea29c6, // n0x0c0c c0x009b (n0x0fd1-n0x0ff2) + I miyagi + 0x27266108, // n0x0c0d c0x009c (n0x0ff2-n0x100d) + I miyazaki + 0x27754f86, // n0x0c0e c0x009d (n0x100d-n0x1058) + I nagano + 0x27adda48, // n0x0c0f c0x009e (n0x1058-n0x106e) + I nagasaki + 0x27f0f646, // n0x0c10 c0x009f (n0x106e-n0x106f)* o I nagoya + 0x282c8a04, // n0x0c11 c0x00a0 (n0x106f-n0x1095) + I nara + 0x00202c02, // n0x0c12 c0x0000 (---------------) + I ne + 0x2863ff87, // n0x0c13 c0x00a1 (n0x1095-n0x10b7) + I niigata + 0x28aa8984, // n0x0c14 c0x00a2 (n0x10b7-n0x10ca) + I oita + 0x28e78dc7, // n0x0c15 c0x00a3 (n0x10ca-n0x10e4) + I okayama + 0x29395b47, // n0x0c16 c0x00a4 (n0x10e4-n0x110e) + I okinawa + 0x00200282, // n0x0c17 c0x0000 (---------------) + I or + 0x2969ac45, // n0x0c18 c0x00a5 (n0x110e-n0x1140) + I osaka + 0x29a38904, // n0x0c19 c0x00a6 (n0x1140-n0x115a) + I saga + 0x29ed9247, // n0x0c1a c0x00a7 (n0x115a-n0x119f) + I saitama + 0x2a221087, // n0x0c1b c0x00a8 (n0x119f-n0x11a0)* o I sapporo + 0x2a682b06, // n0x0c1c c0x00a9 (n0x11a0-n0x11a1)* o I sendai + 0x2aa285c5, // n0x0c1d c0x00aa (n0x11a1-n0x11b8) + I shiga + 0x2ae93dc7, // n0x0c1e c0x00ab (n0x11b8-n0x11cf) + I shimane + 0x2b2b2608, // n0x0c1f c0x00ac (n0x11cf-n0x11f3) + I shizuoka + 0x2b744e07, // n0x0c20 c0x00ad (n0x11f3-n0x1212) + I tochigi + 0x2ba99fc9, // n0x0c21 c0x00ae (n0x1212-n0x1223) + I tokushima + 0x2bf41c05, // n0x0c22 c0x00af (n0x1223-n0x125c) + I tokyo + 0x2c2f8b87, // n0x0c23 c0x00b0 (n0x125c-n0x1269) + I tottori + 0x2c68e9c6, // n0x0c24 c0x00b1 (n0x1269-n0x1281) + I toyama + 0x2ca23ac8, // n0x0c25 c0x00b2 (n0x1281-n0x129e) + I wakayama + 0x0038988d, // n0x0c26 c0x0000 (---------------) + I xn--0trq7p7nn + 0x0024e389, // n0x0c27 c0x0000 (---------------) + I xn--1ctwo + 0x0025c1cb, // n0x0c28 c0x0000 (---------------) + I xn--1lqs03n + 0x0026024b, // n0x0c29 c0x0000 (---------------) + I xn--1lqs71d + 0x0027348b, // n0x0c2a c0x0000 (---------------) + I xn--2m4a15e + 0x002a5bcb, // n0x0c2b c0x0000 (---------------) + I xn--32vp30h + 0x0030168b, // n0x0c2c c0x0000 (---------------) + I xn--4it168d + 0x0030194b, // n0x0c2d c0x0000 (---------------) + I xn--4it797k + 0x00301d89, // n0x0c2e c0x0000 (---------------) + I xn--4pvxs + 0x00302c4b, // n0x0c2f c0x0000 (---------------) + I xn--5js045d + 0x00302f0b, // n0x0c30 c0x0000 (---------------) + I xn--5rtp49c + 0x0030338b, // n0x0c31 c0x0000 (---------------) + I xn--5rtq34k + 0x003043ca, // n0x0c32 c0x0000 (---------------) + I xn--6btw5a + 0x0030490a, // n0x0c33 c0x0000 (---------------) + I xn--6orx2r + 0x00304f0c, // n0x0c34 c0x0000 (---------------) + I xn--7t0a264c + 0x0030a1cb, // n0x0c35 c0x0000 (---------------) + I xn--8ltr62k + 0x0030a74a, // n0x0c36 c0x0000 (---------------) + I xn--8pvr4u + 0x0031918a, // n0x0c37 c0x0000 (---------------) + I xn--c3s14m + 0x0032940e, // n0x0c38 c0x0000 (---------------) + I xn--d5qv7z876c + 0x0032a1ce, // n0x0c39 c0x0000 (---------------) + I xn--djrs72d6uy + 0x0032a54a, // n0x0c3a c0x0000 (---------------) + I xn--djty4k + 0x0032bb0a, // n0x0c3b c0x0000 (---------------) + I xn--efvn9s + 0x0032c78b, // n0x0c3c c0x0000 (---------------) + I xn--ehqz56n + 0x0032ca4b, // n0x0c3d c0x0000 (---------------) + I xn--elqq16h + 0x0032d78b, // n0x0c3e c0x0000 (---------------) + I xn--f6qx53a + 0x0034834b, // n0x0c3f c0x0000 (---------------) + I xn--k7yn95e + 0x0034894a, // n0x0c40 c0x0000 (---------------) + I xn--kbrq7o + 0x0034960b, // n0x0c41 c0x0000 (---------------) + I xn--klt787d + 0x003498ca, // n0x0c42 c0x0000 (---------------) + I xn--kltp7d + 0x00349b4a, // n0x0c43 c0x0000 (---------------) + I xn--kltx9a + 0x00349dca, // n0x0c44 c0x0000 (---------------) + I xn--klty5x + 0x00367e8b, // n0x0c45 c0x0000 (---------------) + I xn--mkru45i + 0x0037178b, // n0x0c46 c0x0000 (---------------) + I xn--nit225k + 0x003733ce, // n0x0c47 c0x0000 (---------------) + I xn--ntso0iqx3a + 0x0037374b, // n0x0c48 c0x0000 (---------------) + I xn--ntsq17g + 0x0037b18b, // n0x0c49 c0x0000 (---------------) + I xn--pssu33l + 0x0037d28b, // n0x0c4a c0x0000 (---------------) + I xn--qqqt11m + 0x0038144a, // n0x0c4b c0x0000 (---------------) + I xn--rht27z + 0x003816c9, // n0x0c4c c0x0000 (---------------) + I xn--rht3d + 0x0038190a, // n0x0c4d c0x0000 (---------------) + I xn--rht61e + 0x00382f8a, // n0x0c4e c0x0000 (---------------) + I xn--rny31h + 0x00392e8b, // n0x0c4f c0x0000 (---------------) + I xn--tor131o + 0x003949cb, // n0x0c50 c0x0000 (---------------) + I xn--uist22h + 0x0039548a, // n0x0c51 c0x0000 (---------------) + I xn--uisz3g + 0x003967cb, // n0x0c52 c0x0000 (---------------) + I xn--uuwu58a + 0x0039a30b, // n0x0c53 c0x0000 (---------------) + I xn--vgu402c + 0x003a520b, // n0x0c54 c0x0000 (---------------) + I xn--zbx025d + 0x2ce808c8, // n0x0c55 c0x00b3 (n0x129e-n0x12c0) + I yamagata + 0x2d2873c9, // n0x0c56 c0x00b4 (n0x12c0-n0x12d0) + I yamaguchi + 0x2d6a15c9, // n0x0c57 c0x00b5 (n0x12d0-n0x12ec) + I yamanashi + 0x2dad09c8, // n0x0c58 c0x00b6 (n0x12ec-n0x12ed)* o I yokohama + 0x00334d45, // n0x0c59 c0x0000 (---------------) + I aisai + 0x00201883, // n0x0c5a c0x0000 (---------------) + I ama + 0x00203fc4, // n0x0c5b c0x0000 (---------------) + I anjo + 0x00360985, // n0x0c5c c0x0000 (---------------) + I asuke + 0x0036ac06, // n0x0c5d c0x0000 (---------------) + I chiryu + 0x002ac6c5, // n0x0c5e c0x0000 (---------------) + I chita + 0x00286bc4, // n0x0c5f c0x0000 (---------------) + I fuso + 0x0026ecc8, // n0x0c60 c0x0000 (---------------) + I gamagori + 0x00256185, // n0x0c61 c0x0000 (---------------) + I handa + 0x0028ff84, // n0x0c62 c0x0000 (---------------) + I hazu + 0x002c3247, // n0x0c63 c0x0000 (---------------) + I hekinan + 0x0029d24a, // n0x0c64 c0x0000 (---------------) + I higashiura + 0x002d218a, // n0x0c65 c0x0000 (---------------) + I ichinomiya + 0x0032bfc7, // n0x0c66 c0x0000 (---------------) + I inazawa + 0x00201787, // n0x0c67 c0x0000 (---------------) + I inuyama + 0x002f2007, // n0x0c68 c0x0000 (---------------) + I isshiki + 0x0031d107, // n0x0c69 c0x0000 (---------------) + I iwakura + 0x002a25c5, // n0x0c6a c0x0000 (---------------) + I kanie + 0x00325246, // n0x0c6b c0x0000 (---------------) + I kariya + 0x00321d87, // n0x0c6c c0x0000 (---------------) + I kasugai + 0x002571c4, // n0x0c6d c0x0000 (---------------) + I kira + 0x002f0206, // n0x0c6e c0x0000 (---------------) + I kiyosu + 0x00296f06, // n0x0c6f c0x0000 (---------------) + I komaki + 0x00206b85, // n0x0c70 c0x0000 (---------------) + I konan + 0x00229444, // n0x0c71 c0x0000 (---------------) + I kota + 0x002da306, // n0x0c72 c0x0000 (---------------) + I mihama + 0x0029c447, // n0x0c73 c0x0000 (---------------) + I miyoshi + 0x002251c6, // n0x0c74 c0x0000 (---------------) + I nishio + 0x00266c47, // n0x0c75 c0x0000 (---------------) + I nisshin + 0x0027cb43, // n0x0c76 c0x0000 (---------------) + I obu + 0x00252346, // n0x0c77 c0x0000 (---------------) + I oguchi + 0x00236085, // n0x0c78 c0x0000 (---------------) + I oharu + 0x0027e907, // n0x0c79 c0x0000 (---------------) + I okazaki + 0x002bf04a, // n0x0c7a c0x0000 (---------------) + I owariasahi + 0x002ae744, // n0x0c7b c0x0000 (---------------) + I seto + 0x00219288, // n0x0c7c c0x0000 (---------------) + I shikatsu + 0x00299589, // n0x0c7d c0x0000 (---------------) + I shinshiro + 0x002aff87, // n0x0c7e c0x0000 (---------------) + I shitara + 0x002e8506, // n0x0c7f c0x0000 (---------------) + I tahara + 0x00365ec8, // n0x0c80 c0x0000 (---------------) + I takahama + 0x00307489, // n0x0c81 c0x0000 (---------------) + I tobishima + 0x00375f44, // n0x0c82 c0x0000 (---------------) + I toei + 0x00337204, // n0x0c83 c0x0000 (---------------) + I togo + 0x002fa0c5, // n0x0c84 c0x0000 (---------------) + I tokai + 0x002bfec8, // n0x0c85 c0x0000 (---------------) + I tokoname + 0x002c0907, // n0x0c86 c0x0000 (---------------) + I toyoake + 0x0028df09, // n0x0c87 c0x0000 (---------------) + I toyohashi + 0x00247dc8, // n0x0c88 c0x0000 (---------------) + I toyokawa + 0x00366606, // n0x0c89 c0x0000 (---------------) + I toyone + 0x0025bb86, // n0x0c8a c0x0000 (---------------) + I toyota + 0x00297a48, // n0x0c8b c0x0000 (---------------) + I tsushima + 0x0036a786, // n0x0c8c c0x0000 (---------------) + I yatomi + 0x00201dc5, // n0x0c8d c0x0000 (---------------) + I akita + 0x00282bc6, // n0x0c8e c0x0000 (---------------) + I daisen + 0x002790c8, // n0x0c8f c0x0000 (---------------) + I fujisato + 0x0023ab86, // n0x0c90 c0x0000 (---------------) + I gojome + 0x0025778b, // n0x0c91 c0x0000 (---------------) + I hachirogata + 0x0028ab86, // n0x0c92 c0x0000 (---------------) + I happou + 0x002997cd, // n0x0c93 c0x0000 (---------------) + I higashinaruse + 0x0038e545, // n0x0c94 c0x0000 (---------------) + I honjo + 0x002a8846, // n0x0c95 c0x0000 (---------------) + I honjyo + 0x0021a585, // n0x0c96 c0x0000 (---------------) + I ikawa + 0x00296349, // n0x0c97 c0x0000 (---------------) + I kamikoani + 0x00320ec7, // n0x0c98 c0x0000 (---------------) + I kamioka + 0x00378048, // n0x0c99 c0x0000 (---------------) + I katagami + 0x00305386, // n0x0c9a c0x0000 (---------------) + I kazuno + 0x002983c9, // n0x0c9b c0x0000 (---------------) + I kitaakita + 0x002dd306, // n0x0c9c c0x0000 (---------------) + I kosaka + 0x002befc5, // n0x0c9d c0x0000 (---------------) + I kyowa + 0x0022f486, // n0x0c9e c0x0000 (---------------) + I misato + 0x002b16c6, // n0x0c9f c0x0000 (---------------) + I mitane + 0x002c6849, // n0x0ca0 c0x0000 (---------------) + I moriyoshi + 0x0033cfc6, // n0x0ca1 c0x0000 (---------------) + I nikaho + 0x0037ebc7, // n0x0ca2 c0x0000 (---------------) + I noshiro + 0x002c5f85, // n0x0ca3 c0x0000 (---------------) + I odate + 0x00202a03, // n0x0ca4 c0x0000 (---------------) + I oga + 0x00223845, // n0x0ca5 c0x0000 (---------------) + I ogata + 0x002a6287, // n0x0ca6 c0x0000 (---------------) + I semboku + 0x00330a06, // n0x0ca7 c0x0000 (---------------) + I yokote + 0x0038e449, // n0x0ca8 c0x0000 (---------------) + I yurihonjo + 0x0030bf06, // n0x0ca9 c0x0000 (---------------) + I aomori + 0x00282e06, // n0x0caa c0x0000 (---------------) + I gonohe + 0x0020db09, // n0x0cab c0x0000 (---------------) + I hachinohe + 0x002825c9, // n0x0cac c0x0000 (---------------) + I hashikami + 0x0029f407, // n0x0cad c0x0000 (---------------) + I hiranai + 0x002eb708, // n0x0cae c0x0000 (---------------) + I hirosaki + 0x002692c9, // n0x0caf c0x0000 (---------------) + I itayanagi + 0x0027edc8, // n0x0cb0 c0x0000 (---------------) + I kuroishi + 0x0037d506, // n0x0cb1 c0x0000 (---------------) + I misawa + 0x002d0505, // n0x0cb2 c0x0000 (---------------) + I mutsu + 0x0021e54a, // n0x0cb3 c0x0000 (---------------) + I nakadomari + 0x00282e86, // n0x0cb4 c0x0000 (---------------) + I noheji + 0x00207e46, // n0x0cb5 c0x0000 (---------------) + I oirase + 0x002a2b85, // n0x0cb6 c0x0000 (---------------) + I owani + 0x0036b208, // n0x0cb7 c0x0000 (---------------) + I rokunohe + 0x0020ea87, // n0x0cb8 c0x0000 (---------------) + I sannohe + 0x0023638a, // n0x0cb9 c0x0000 (---------------) + I shichinohe + 0x0024e0c6, // n0x0cba c0x0000 (---------------) + I shingo + 0x00240bc5, // n0x0cbb c0x0000 (---------------) + I takko + 0x0024a2c6, // n0x0cbc c0x0000 (---------------) + I towada + 0x00297647, // n0x0cbd c0x0000 (---------------) + I tsugaru + 0x002e83c7, // n0x0cbe c0x0000 (---------------) + I tsuruta + 0x0037e845, // n0x0cbf c0x0000 (---------------) + I abiko + 0x002bf185, // n0x0cc0 c0x0000 (---------------) + I asahi + 0x002e59c6, // n0x0cc1 c0x0000 (---------------) + I chonan + 0x002e6b46, // n0x0cc2 c0x0000 (---------------) + I chosei + 0x00300346, // n0x0cc3 c0x0000 (---------------) + I choshi + 0x0030ba84, // n0x0cc4 c0x0000 (---------------) + I chuo + 0x00281689, // n0x0cc5 c0x0000 (---------------) + I funabashi + 0x00288286, // n0x0cc6 c0x0000 (---------------) + I futtsu + 0x0034ad4a, // n0x0cc7 c0x0000 (---------------) + I hanamigawa + 0x0028f548, // n0x0cc8 c0x0000 (---------------) + I ichihara + 0x00265608, // n0x0cc9 c0x0000 (---------------) + I ichikawa + 0x002d218a, // n0x0cca c0x0000 (---------------) + I ichinomiya + 0x003a0f85, // n0x0ccb c0x0000 (---------------) + I inzai + 0x0029c385, // n0x0ccc c0x0000 (---------------) + I isumi + 0x00307d08, // n0x0ccd c0x0000 (---------------) + I kamagaya + 0x002caec8, // n0x0cce c0x0000 (---------------) + I kamogawa + 0x002037c7, // n0x0ccf c0x0000 (---------------) + I kashiwa + 0x00294d86, // n0x0cd0 c0x0000 (---------------) + I katori + 0x003141c8, // n0x0cd1 c0x0000 (---------------) + I katsuura + 0x002303c7, // n0x0cd2 c0x0000 (---------------) + I kimitsu + 0x00280d88, // n0x0cd3 c0x0000 (---------------) + I kisarazu + 0x00368e86, // n0x0cd4 c0x0000 (---------------) + I kozaki + 0x00283fc8, // n0x0cd5 c0x0000 (---------------) + I kujukuri + 0x002b4246, // n0x0cd6 c0x0000 (---------------) + I kyonan + 0x00243747, // n0x0cd7 c0x0000 (---------------) + I matsudo + 0x00298e06, // n0x0cd8 c0x0000 (---------------) + I midori + 0x002da306, // n0x0cd9 c0x0000 (---------------) + I mihama + 0x0023c6ca, // n0x0cda c0x0000 (---------------) + I minamiboso + 0x00234886, // n0x0cdb c0x0000 (---------------) + I mobara + 0x002d0509, // n0x0cdc c0x0000 (---------------) + I mutsuzawa + 0x002ae046, // n0x0cdd c0x0000 (---------------) + I nagara + 0x002d164a, // n0x0cde c0x0000 (---------------) + I nagareyama + 0x002c8a09, // n0x0cdf c0x0000 (---------------) + I narashino + 0x0037df46, // n0x0ce0 c0x0000 (---------------) + I narita + 0x0037f944, // n0x0ce1 c0x0000 (---------------) + I noda + 0x003071cd, // n0x0ce2 c0x0000 (---------------) + I oamishirasato + 0x00287647, // n0x0ce3 c0x0000 (---------------) + I omigawa + 0x00316686, // n0x0ce4 c0x0000 (---------------) + I onjuku + 0x002b8f45, // n0x0ce5 c0x0000 (---------------) + I otaki + 0x002dd385, // n0x0ce6 c0x0000 (---------------) + I sakae + 0x00307fc6, // n0x0ce7 c0x0000 (---------------) + I sakura + 0x0028d9c9, // n0x0ce8 c0x0000 (---------------) + I shimofusa + 0x002aaf87, // n0x0ce9 c0x0000 (---------------) + I shirako + 0x0027a9c6, // n0x0cea c0x0000 (---------------) + I shiroi + 0x002af8c6, // n0x0ceb c0x0000 (---------------) + I shisui + 0x00286c49, // n0x0cec c0x0000 (---------------) + I sodegaura + 0x0021f484, // n0x0ced c0x0000 (---------------) + I sosa + 0x0036bcc4, // n0x0cee c0x0000 (---------------) + I tako + 0x002040c8, // n0x0cef c0x0000 (---------------) + I tateyama + 0x002aea86, // n0x0cf0 c0x0000 (---------------) + I togane + 0x0029ed88, // n0x0cf1 c0x0000 (---------------) + I tohnosho + 0x0022f408, // n0x0cf2 c0x0000 (---------------) + I tomisato + 0x00280587, // n0x0cf3 c0x0000 (---------------) + I urayasu + 0x003a6349, // n0x0cf4 c0x0000 (---------------) + I yachimata + 0x00300547, // n0x0cf5 c0x0000 (---------------) + I yachiyo + 0x002adc0a, // n0x0cf6 c0x0000 (---------------) + I yokaichiba + 0x0022edcf, // n0x0cf7 c0x0000 (---------------) + I yokoshibahikari + 0x00269dca, // n0x0cf8 c0x0000 (---------------) + I yotsukaido + 0x00223245, // n0x0cf9 c0x0000 (---------------) + I ainan + 0x00279305, // n0x0cfa c0x0000 (---------------) + I honai + 0x00216985, // n0x0cfb c0x0000 (---------------) + I ikata + 0x00249147, // n0x0cfc c0x0000 (---------------) + I imabari + 0x00206043, // n0x0cfd c0x0000 (---------------) + I iyo + 0x002eb908, // n0x0cfe c0x0000 (---------------) + I kamijima + 0x002f5c86, // n0x0cff c0x0000 (---------------) + I kihoku + 0x002f5d89, // n0x0d00 c0x0000 (---------------) + I kumakogen + 0x003a3b06, // n0x0d01 c0x0000 (---------------) + I masaki + 0x002c02c7, // n0x0d02 c0x0000 (---------------) + I matsuno + 0x00298189, // n0x0d03 c0x0000 (---------------) + I matsuyama + 0x00377f48, // n0x0d04 c0x0000 (---------------) + I namikata + 0x002a2c47, // n0x0d05 c0x0000 (---------------) + I niihama + 0x00300a43, // n0x0d06 c0x0000 (---------------) + I ozu + 0x00334dc5, // n0x0d07 c0x0000 (---------------) + I saijo + 0x002396c5, // n0x0d08 c0x0000 (---------------) + I seiyo + 0x0030b8cb, // n0x0d09 c0x0000 (---------------) + I shikokuchuo + 0x002be184, // n0x0d0a c0x0000 (---------------) + I tobe + 0x0020b004, // n0x0d0b c0x0000 (---------------) + I toon + 0x00278086, // n0x0d0c c0x0000 (---------------) + I uchiko + 0x00300dc7, // n0x0d0d c0x0000 (---------------) + I uwajima + 0x0038f14a, // n0x0d0e c0x0000 (---------------) + I yawatahama + 0x0024b9c7, // n0x0d0f c0x0000 (---------------) + I echizen + 0x00375fc7, // n0x0d10 c0x0000 (---------------) + I eiheiji + 0x0027dac5, // n0x0d11 c0x0000 (---------------) + I fukui + 0x00202585, // n0x0d12 c0x0000 (---------------) + I ikeda + 0x0021ebc9, // n0x0d13 c0x0000 (---------------) + I katsuyama + 0x002da306, // n0x0d14 c0x0000 (---------------) + I mihama + 0x0024b84d, // n0x0d15 c0x0000 (---------------) + I minamiechizen + 0x00395f05, // n0x0d16 c0x0000 (---------------) + I obama + 0x00299783, // n0x0d17 c0x0000 (---------------) + I ohi + 0x0020a703, // n0x0d18 c0x0000 (---------------) + I ono + 0x002f6605, // n0x0d19 c0x0000 (---------------) + I sabae + 0x0034ca05, // n0x0d1a c0x0000 (---------------) + I sakai + 0x00365ec8, // n0x0d1b c0x0000 (---------------) + I takahama + 0x0027b9c7, // n0x0d1c c0x0000 (---------------) + I tsuruga + 0x0036ba46, // n0x0d1d c0x0000 (---------------) + I wakasa + 0x0029d906, // n0x0d1e c0x0000 (---------------) + I ashiya + 0x0022d885, // n0x0d1f c0x0000 (---------------) + I buzen + 0x0023aa47, // n0x0d20 c0x0000 (---------------) + I chikugo + 0x00201a07, // n0x0d21 c0x0000 (---------------) + I chikuho + 0x00293107, // n0x0d22 c0x0000 (---------------) + I chikujo + 0x002cb1ca, // n0x0d23 c0x0000 (---------------) + I chikushino + 0x00252408, // n0x0d24 c0x0000 (---------------) + I chikuzen + 0x0030ba84, // n0x0d25 c0x0000 (---------------) + I chuo + 0x00214b07, // n0x0d26 c0x0000 (---------------) + I dazaifu + 0x0027cc87, // n0x0d27 c0x0000 (---------------) + I fukuchi + 0x0032c406, // n0x0d28 c0x0000 (---------------) + I hakata + 0x00268047, // n0x0d29 c0x0000 (---------------) + I higashi + 0x002d2bc8, // n0x0d2a c0x0000 (---------------) + I hirokawa + 0x002a14c8, // n0x0d2b c0x0000 (---------------) + I hisayama + 0x0026e786, // n0x0d2c c0x0000 (---------------) + I iizuka + 0x0022b108, // n0x0d2d c0x0000 (---------------) + I inatsuki + 0x002c6404, // n0x0d2e c0x0000 (---------------) + I kaho + 0x00321d86, // n0x0d2f c0x0000 (---------------) + I kasuga + 0x0020f406, // n0x0d30 c0x0000 (---------------) + I kasuya + 0x00206106, // n0x0d31 c0x0000 (---------------) + I kawara + 0x002ebf06, // n0x0d32 c0x0000 (---------------) + I keisen + 0x00226304, // n0x0d33 c0x0000 (---------------) + I koga + 0x0031d1c6, // n0x0d34 c0x0000 (---------------) + I kurate + 0x002b81c6, // n0x0d35 c0x0000 (---------------) + I kurogi + 0x002969c6, // n0x0d36 c0x0000 (---------------) + I kurume + 0x00228406, // n0x0d37 c0x0000 (---------------) + I minami + 0x0020a5c6, // n0x0d38 c0x0000 (---------------) + I miyako + 0x002d2a06, // n0x0d39 c0x0000 (---------------) + I miyama + 0x0036b948, // n0x0d3a c0x0000 (---------------) + I miyawaka + 0x002f0088, // n0x0d3b c0x0000 (---------------) + I mizumaki + 0x002cbdc8, // n0x0d3c c0x0000 (---------------) + I munakata + 0x002ac8c8, // n0x0d3d c0x0000 (---------------) + I nakagawa + 0x00307c86, // n0x0d3e c0x0000 (---------------) + I nakama + 0x00211805, // n0x0d3f c0x0000 (---------------) + I nishi + 0x00223806, // n0x0d40 c0x0000 (---------------) + I nogata + 0x002abb05, // n0x0d41 c0x0000 (---------------) + I ogori + 0x00380887, // n0x0d42 c0x0000 (---------------) + I okagaki + 0x002060c5, // n0x0d43 c0x0000 (---------------) + I okawa + 0x00215483, // n0x0d44 c0x0000 (---------------) + I oki + 0x00203a85, // n0x0d45 c0x0000 (---------------) + I omuta + 0x002b6104, // n0x0d46 c0x0000 (---------------) + I onga + 0x0020a705, // n0x0d47 c0x0000 (---------------) + I onojo + 0x00216003, // n0x0d48 c0x0000 (---------------) + I oto + 0x002d87c7, // n0x0d49 c0x0000 (---------------) + I saigawa + 0x0036fd08, // n0x0d4a c0x0000 (---------------) + I sasaguri + 0x00266d06, // n0x0d4b c0x0000 (---------------) + I shingu + 0x002a224d, // n0x0d4c c0x0000 (---------------) + I shinyoshitomi + 0x002792c6, // n0x0d4d c0x0000 (---------------) + I shonai + 0x00295a45, // n0x0d4e c0x0000 (---------------) + I soeda + 0x002c6c03, // n0x0d4f c0x0000 (---------------) + I sue + 0x002b3109, // n0x0d50 c0x0000 (---------------) + I tachiarai + 0x002c1c86, // n0x0d51 c0x0000 (---------------) + I tagawa + 0x00238646, // n0x0d52 c0x0000 (---------------) + I takata + 0x0034cf44, // n0x0d53 c0x0000 (---------------) + I toho + 0x00269d47, // n0x0d54 c0x0000 (---------------) + I toyotsu + 0x0023bd46, // n0x0d55 c0x0000 (---------------) + I tsuiki + 0x0036b045, // n0x0d56 c0x0000 (---------------) + I ukiha + 0x0020b583, // n0x0d57 c0x0000 (---------------) + I umi + 0x002066c4, // n0x0d58 c0x0000 (---------------) + I usui + 0x0027ce46, // n0x0d59 c0x0000 (---------------) + I yamada + 0x002a7d84, // n0x0d5a c0x0000 (---------------) + I yame + 0x0030df48, // n0x0d5b c0x0000 (---------------) + I yanagawa + 0x00383d09, // n0x0d5c c0x0000 (---------------) + I yukuhashi + 0x002bde89, // n0x0d5d c0x0000 (---------------) + I aizubange + 0x0029eb8a, // n0x0d5e c0x0000 (---------------) + I aizumisato + 0x002453cd, // n0x0d5f c0x0000 (---------------) + I aizuwakamatsu + 0x00248a07, // n0x0d60 c0x0000 (---------------) + I asakawa + 0x00207cc6, // n0x0d61 c0x0000 (---------------) + I bandai + 0x0020c7c4, // n0x0d62 c0x0000 (---------------) + I date + 0x0032fcc9, // n0x0d63 c0x0000 (---------------) + I fukushima + 0x002860c8, // n0x0d64 c0x0000 (---------------) + I furudono + 0x00287246, // n0x0d65 c0x0000 (---------------) + I futaba + 0x0025a746, // n0x0d66 c0x0000 (---------------) + I hanawa + 0x00268047, // n0x0d67 c0x0000 (---------------) + I higashi + 0x00347f46, // n0x0d68 c0x0000 (---------------) + I hirata + 0x0021d4c6, // n0x0d69 c0x0000 (---------------) + I hirono + 0x00384006, // n0x0d6a c0x0000 (---------------) + I iitate + 0x00395bca, // n0x0d6b c0x0000 (---------------) + I inawashiro + 0x0021a4c8, // n0x0d6c c0x0000 (---------------) + I ishikawa + 0x0022da85, // n0x0d6d c0x0000 (---------------) + I iwaki + 0x002802c9, // n0x0d6e c0x0000 (---------------) + I izumizaki + 0x0028128a, // n0x0d6f c0x0000 (---------------) + I kagamiishi + 0x002c7848, // n0x0d70 c0x0000 (---------------) + I kaneyama + 0x0029b888, // n0x0d71 c0x0000 (---------------) + I kawamata + 0x00295fc8, // n0x0d72 c0x0000 (---------------) + I kitakata + 0x00201e0c, // n0x0d73 c0x0000 (---------------) + I kitashiobara + 0x002d5305, // n0x0d74 c0x0000 (---------------) + I koori + 0x0029db88, // n0x0d75 c0x0000 (---------------) + I koriyama + 0x00342586, // n0x0d76 c0x0000 (---------------) + I kunimi + 0x00306306, // n0x0d77 c0x0000 (---------------) + I miharu + 0x002bf3c7, // n0x0d78 c0x0000 (---------------) + I mishima + 0x0024b8c5, // n0x0d79 c0x0000 (---------------) + I namie + 0x00282d45, // n0x0d7a c0x0000 (---------------) + I nango + 0x002bdd49, // n0x0d7b c0x0000 (---------------) + I nishiaizu + 0x00211d87, // n0x0d7c c0x0000 (---------------) + I nishigo + 0x002f5d45, // n0x0d7d c0x0000 (---------------) + I okuma + 0x0021f187, // n0x0d7e c0x0000 (---------------) + I omotego + 0x0020a703, // n0x0d7f c0x0000 (---------------) + I ono + 0x002c0e45, // n0x0d80 c0x0000 (---------------) + I otama + 0x00346648, // n0x0d81 c0x0000 (---------------) + I samegawa + 0x002b1287, // n0x0d82 c0x0000 (---------------) + I shimogo + 0x0029b749, // n0x0d83 c0x0000 (---------------) + I shirakawa + 0x00319905, // n0x0d84 c0x0000 (---------------) + I showa + 0x0035d844, // n0x0d85 c0x0000 (---------------) + I soma + 0x002a0308, // n0x0d86 c0x0000 (---------------) + I sukagawa + 0x0022c747, // n0x0d87 c0x0000 (---------------) + I taishin + 0x002a2e08, // n0x0d88 c0x0000 (---------------) + I tamakawa + 0x00330f48, // n0x0d89 c0x0000 (---------------) + I tanagura + 0x002d9885, // n0x0d8a c0x0000 (---------------) + I tenei + 0x00350b06, // n0x0d8b c0x0000 (---------------) + I yabuki + 0x0028f086, // n0x0d8c c0x0000 (---------------) + I yamato + 0x00250089, // n0x0d8d c0x0000 (---------------) + I yamatsuri + 0x00314a47, // n0x0d8e c0x0000 (---------------) + I yanaizu + 0x002abfc6, // n0x0d8f c0x0000 (---------------) + I yugawa + 0x0028ca87, // n0x0d90 c0x0000 (---------------) + I anpachi + 0x002183c3, // n0x0d91 c0x0000 (---------------) + I ena + 0x0036aec4, // n0x0d92 c0x0000 (---------------) + I gifu + 0x002a0c45, // n0x0d93 c0x0000 (---------------) + I ginan + 0x00214d84, // n0x0d94 c0x0000 (---------------) + I godo + 0x00343c04, // n0x0d95 c0x0000 (---------------) + I gujo + 0x00280b47, // n0x0d96 c0x0000 (---------------) + I hashima + 0x00218a07, // n0x0d97 c0x0000 (---------------) + I hichiso + 0x0027ab84, // n0x0d98 c0x0000 (---------------) + I hida + 0x0029b590, // n0x0d99 c0x0000 (---------------) + I higashishirakawa + 0x00308147, // n0x0d9a c0x0000 (---------------) + I ibigawa + 0x00202585, // n0x0d9b c0x0000 (---------------) + I ikeda + 0x002ef98c, // n0x0d9c c0x0000 (---------------) + I kakamigahara + 0x00279cc4, // n0x0d9d c0x0000 (---------------) + I kani + 0x0029b388, // n0x0d9e c0x0000 (---------------) + I kasahara + 0x00243649, // n0x0d9f c0x0000 (---------------) + I kasamatsu + 0x00301506, // n0x0da0 c0x0000 (---------------) + I kawaue + 0x0021f588, // n0x0da1 c0x0000 (---------------) + I kitagata + 0x0024ff84, // n0x0da2 c0x0000 (---------------) + I mino + 0x002d7b88, // n0x0da3 c0x0000 (---------------) + I minokamo + 0x00268506, // n0x0da4 c0x0000 (---------------) + I mitake + 0x00228288, // n0x0da5 c0x0000 (---------------) + I mizunami + 0x002a0946, // n0x0da6 c0x0000 (---------------) + I motosu + 0x0037a2cb, // n0x0da7 c0x0000 (---------------) + I nakatsugawa + 0x00202a05, // n0x0da8 c0x0000 (---------------) + I ogaki + 0x002c6388, // n0x0da9 c0x0000 (---------------) + I sakahogi + 0x00217fc4, // n0x0daa c0x0000 (---------------) + I seki + 0x0028224a, // n0x0dab c0x0000 (---------------) + I sekigahara + 0x0029b749, // n0x0dac c0x0000 (---------------) + I shirakawa + 0x00312006, // n0x0dad c0x0000 (---------------) + I tajimi + 0x002c0148, // n0x0dae c0x0000 (---------------) + I takayama + 0x00274245, // n0x0daf c0x0000 (---------------) + I tarui + 0x00222904, // n0x0db0 c0x0000 (---------------) + I toki + 0x0029b286, // n0x0db1 c0x0000 (---------------) + I tomika + 0x00292fc8, // n0x0db2 c0x0000 (---------------) + I wanouchi + 0x002808c8, // n0x0db3 c0x0000 (---------------) + I yamagata + 0x00341746, // n0x0db4 c0x0000 (---------------) + I yaotsu + 0x00321484, // n0x0db5 c0x0000 (---------------) + I yoro + 0x0021e4c6, // n0x0db6 c0x0000 (---------------) + I annaka + 0x003005c7, // n0x0db7 c0x0000 (---------------) + I chiyoda + 0x00278cc7, // n0x0db8 c0x0000 (---------------) + I fujioka + 0x0026804f, // n0x0db9 c0x0000 (---------------) + I higashiagatsuma + 0x00204687, // n0x0dba c0x0000 (---------------) + I isesaki + 0x0037e007, // n0x0dbb c0x0000 (---------------) + I itakura + 0x003061c5, // n0x0dbc c0x0000 (---------------) + I kanna + 0x002e2345, // n0x0dbd c0x0000 (---------------) + I kanra + 0x0029f0c9, // n0x0dbe c0x0000 (---------------) + I katashina + 0x0026b7c6, // n0x0dbf c0x0000 (---------------) + I kawaba + 0x0027f4c5, // n0x0dc0 c0x0000 (---------------) + I kiryu + 0x002828c7, // n0x0dc1 c0x0000 (---------------) + I kusatsu + 0x002c5d48, // n0x0dc2 c0x0000 (---------------) + I maebashi + 0x002be885, // n0x0dc3 c0x0000 (---------------) + I meiwa + 0x00298e06, // n0x0dc4 c0x0000 (---------------) + I midori + 0x00216448, // n0x0dc5 c0x0000 (---------------) + I minakami + 0x00354f8a, // n0x0dc6 c0x0000 (---------------) + I naganohara + 0x00356448, // n0x0dc7 c0x0000 (---------------) + I nakanojo + 0x003a1d07, // n0x0dc8 c0x0000 (---------------) + I nanmoku + 0x0022f206, // n0x0dc9 c0x0000 (---------------) + I numata + 0x00280286, // n0x0dca c0x0000 (---------------) + I oizumi + 0x0021c683, // n0x0dcb c0x0000 (---------------) + I ora + 0x00204083, // n0x0dcc c0x0000 (---------------) + I ota + 0x00281449, // n0x0dcd c0x0000 (---------------) + I shibukawa + 0x00269149, // n0x0dce c0x0000 (---------------) + I shimonita + 0x00299ec6, // n0x0dcf c0x0000 (---------------) + I shinto + 0x00319905, // n0x0dd0 c0x0000 (---------------) + I showa + 0x002a06c8, // n0x0dd1 c0x0000 (---------------) + I takasaki + 0x002c0148, // n0x0dd2 c0x0000 (---------------) + I takayama + 0x0039a108, // n0x0dd3 c0x0000 (---------------) + I tamamura + 0x0038408b, // n0x0dd4 c0x0000 (---------------) + I tatebayashi + 0x002a2487, // n0x0dd5 c0x0000 (---------------) + I tomioka + 0x002ff549, // n0x0dd6 c0x0000 (---------------) + I tsukiyono + 0x002682c8, // n0x0dd7 c0x0000 (---------------) + I tsumagoi + 0x00387304, // n0x0dd8 c0x0000 (---------------) + I ueno + 0x002c6948, // n0x0dd9 c0x0000 (---------------) + I yoshioka + 0x0028bb49, // n0x0dda c0x0000 (---------------) + I asaminami + 0x002ac2c5, // n0x0ddb c0x0000 (---------------) + I daiwa + 0x00249047, // n0x0ddc c0x0000 (---------------) + I etajima + 0x002bc1c5, // n0x0ddd c0x0000 (---------------) + I fuchu + 0x002807c8, // n0x0dde c0x0000 (---------------) + I fukuyama + 0x0028f38b, // n0x0ddf c0x0000 (---------------) + I hatsukaichi + 0x00293b10, // n0x0de0 c0x0000 (---------------) + I higashihiroshima + 0x002a8645, // n0x0de1 c0x0000 (---------------) + I hongo + 0x00217f0c, // n0x0de2 c0x0000 (---------------) + I jinsekikogen + 0x0036bc05, // n0x0de3 c0x0000 (---------------) + I kaita + 0x0027db43, // n0x0de4 c0x0000 (---------------) + I kui + 0x00383786, // n0x0de5 c0x0000 (---------------) + I kumano + 0x002b7544, // n0x0de6 c0x0000 (---------------) + I kure + 0x003a69c6, // n0x0de7 c0x0000 (---------------) + I mihara + 0x0029c447, // n0x0de8 c0x0000 (---------------) + I miyoshi + 0x002164c4, // n0x0de9 c0x0000 (---------------) + I naka + 0x002d2088, // n0x0dea c0x0000 (---------------) + I onomichi + 0x002eb7cd, // n0x0deb c0x0000 (---------------) + I osakikamijima + 0x00305a45, // n0x0dec c0x0000 (---------------) + I otake + 0x002458c4, // n0x0ded c0x0000 (---------------) + I saka + 0x00222f84, // n0x0dee c0x0000 (---------------) + I sera + 0x0027fd49, // n0x0def c0x0000 (---------------) + I seranishi + 0x00286908, // n0x0df0 c0x0000 (---------------) + I shinichi + 0x0030bd87, // n0x0df1 c0x0000 (---------------) + I shobara + 0x00268588, // n0x0df2 c0x0000 (---------------) + I takehara + 0x00281748, // n0x0df3 c0x0000 (---------------) + I abashiri + 0x0027acc5, // n0x0df4 c0x0000 (---------------) + I abira + 0x00208b87, // n0x0df5 c0x0000 (---------------) + I aibetsu + 0x0027ac47, // n0x0df6 c0x0000 (---------------) + I akabira + 0x00209907, // n0x0df7 c0x0000 (---------------) + I akkeshi + 0x002bf189, // n0x0df8 c0x0000 (---------------) + I asahikawa + 0x0023bbc9, // n0x0df9 c0x0000 (---------------) + I ashibetsu + 0x00244e86, // n0x0dfa c0x0000 (---------------) + I ashoro + 0x002b4086, // n0x0dfb c0x0000 (---------------) + I assabu + 0x00268286, // n0x0dfc c0x0000 (---------------) + I atsuma + 0x0026a3c5, // n0x0dfd c0x0000 (---------------) + I bibai + 0x002d81c4, // n0x0dfe c0x0000 (---------------) + I biei + 0x00200ec6, // n0x0dff c0x0000 (---------------) + I bifuka + 0x00202106, // n0x0e00 c0x0000 (---------------) + I bihoro + 0x0027ad08, // n0x0e01 c0x0000 (---------------) + I biratori + 0x0029730b, // n0x0e02 c0x0000 (---------------) + I chippubetsu + 0x002ae607, // n0x0e03 c0x0000 (---------------) + I chitose + 0x0020c7c4, // n0x0e04 c0x0000 (---------------) + I date + 0x00223e46, // n0x0e05 c0x0000 (---------------) + I ebetsu + 0x00283707, // n0x0e06 c0x0000 (---------------) + I embetsu + 0x002f5f45, // n0x0e07 c0x0000 (---------------) + I eniwa + 0x00239145, // n0x0e08 c0x0000 (---------------) + I erimo + 0x00200484, // n0x0e09 c0x0000 (---------------) + I esan + 0x0023bb46, // n0x0e0a c0x0000 (---------------) + I esashi + 0x00200f48, // n0x0e0b c0x0000 (---------------) + I fukagawa + 0x0032fcc9, // n0x0e0c c0x0000 (---------------) + I fukushima + 0x00253846, // n0x0e0d c0x0000 (---------------) + I furano + 0x00285888, // n0x0e0e c0x0000 (---------------) + I furubira + 0x0036b106, // n0x0e0f c0x0000 (---------------) + I haboro + 0x0032ccc8, // n0x0e10 c0x0000 (---------------) + I hakodate + 0x002a9b8c, // n0x0e11 c0x0000 (---------------) + I hamatonbetsu + 0x0027ab86, // n0x0e12 c0x0000 (---------------) + I hidaka + 0x0029570d, // n0x0e13 c0x0000 (---------------) + I higashikagura + 0x00295b8b, // n0x0e14 c0x0000 (---------------) + I higashikawa + 0x0037ec85, // n0x0e15 c0x0000 (---------------) + I hiroo + 0x00201b47, // n0x0e16 c0x0000 (---------------) + I hokuryu + 0x0033d0c6, // n0x0e17 c0x0000 (---------------) + I hokuto + 0x002e8288, // n0x0e18 c0x0000 (---------------) + I honbetsu + 0x00244f09, // n0x0e19 c0x0000 (---------------) + I horokanai + 0x002bd888, // n0x0e1a c0x0000 (---------------) + I horonobe + 0x00202585, // n0x0e1b c0x0000 (---------------) + I ikeda + 0x002fe547, // n0x0e1c c0x0000 (---------------) + I imakane + 0x0027eec8, // n0x0e1d c0x0000 (---------------) + I ishikari + 0x00272289, // n0x0e1e c0x0000 (---------------) + I iwamizawa + 0x0023b0c6, // n0x0e1f c0x0000 (---------------) + I iwanai + 0x0025c78a, // n0x0e20 c0x0000 (---------------) + I kamifurano + 0x002e8008, // n0x0e21 c0x0000 (---------------) + I kamikawa + 0x002bd6cb, // n0x0e22 c0x0000 (---------------) + I kamishihoro + 0x00291a4c, // n0x0e23 c0x0000 (---------------) + I kamisunagawa + 0x002d7c88, // n0x0e24 c0x0000 (---------------) + I kamoenai + 0x0027c586, // n0x0e25 c0x0000 (---------------) + I kayabe + 0x00208588, // n0x0e26 c0x0000 (---------------) + I kembuchi + 0x002047c7, // n0x0e27 c0x0000 (---------------) + I kikonai + 0x0023be49, // n0x0e28 c0x0000 (---------------) + I kimobetsu + 0x0020ed4d, // n0x0e29 c0x0000 (---------------) + I kitahiroshima + 0x0029d586, // n0x0e2a c0x0000 (---------------) + I kitami + 0x00297008, // n0x0e2b c0x0000 (---------------) + I kiyosato + 0x002eff49, // n0x0e2c c0x0000 (---------------) + I koshimizu + 0x002b4a48, // n0x0e2d c0x0000 (---------------) + I kunneppu + 0x002840c8, // n0x0e2e c0x0000 (---------------) + I kuriyama + 0x002b8a8c, // n0x0e2f c0x0000 (---------------) + I kuromatsunai + 0x002b9a07, // n0x0e30 c0x0000 (---------------) + I kushiro + 0x002ba907, // n0x0e31 c0x0000 (---------------) + I kutchan + 0x002befc5, // n0x0e32 c0x0000 (---------------) + I kyowa + 0x0024eec7, // n0x0e33 c0x0000 (---------------) + I mashike + 0x002c5c08, // n0x0e34 c0x0000 (---------------) + I matsumae + 0x0029b306, // n0x0e35 c0x0000 (---------------) + I mikasa + 0x002536cc, // n0x0e36 c0x0000 (---------------) + I minamifurano + 0x002e3788, // n0x0e37 c0x0000 (---------------) + I mombetsu + 0x002c7c48, // n0x0e38 c0x0000 (---------------) + I moseushi + 0x002b6d86, // n0x0e39 c0x0000 (---------------) + I mukawa + 0x003961c7, // n0x0e3a c0x0000 (---------------) + I muroran + 0x00245084, // n0x0e3b c0x0000 (---------------) + I naie + 0x002ac8c8, // n0x0e3c c0x0000 (---------------) + I nakagawa + 0x0021840c, // n0x0e3d c0x0000 (---------------) + I nakasatsunai + 0x0036decc, // n0x0e3e c0x0000 (---------------) + I nakatombetsu + 0x002232c5, // n0x0e3f c0x0000 (---------------) + I nanae + 0x0038ae07, // n0x0e40 c0x0000 (---------------) + I nanporo + 0x00321406, // n0x0e41 c0x0000 (---------------) + I nayoro + 0x00396146, // n0x0e42 c0x0000 (---------------) + I nemuro + 0x00296508, // n0x0e43 c0x0000 (---------------) + I niikappu + 0x0039e6c4, // n0x0e44 c0x0000 (---------------) + I niki + 0x002251cb, // n0x0e45 c0x0000 (---------------) + I nishiokoppe + 0x0033214b, // n0x0e46 c0x0000 (---------------) + I noboribetsu + 0x0022f206, // n0x0e47 c0x0000 (---------------) + I numata + 0x002eb647, // n0x0e48 c0x0000 (---------------) + I obihiro + 0x00309145, // n0x0e49 c0x0000 (---------------) + I obira + 0x0026f985, // n0x0e4a c0x0000 (---------------) + I oketo + 0x00225306, // n0x0e4b c0x0000 (---------------) + I okoppe + 0x00274205, // n0x0e4c c0x0000 (---------------) + I otaru + 0x002be145, // n0x0e4d c0x0000 (---------------) + I otobe + 0x002bf687, // n0x0e4e c0x0000 (---------------) + I otofuke + 0x00278689, // n0x0e4f c0x0000 (---------------) + I otoineppu + 0x002e6d04, // n0x0e50 c0x0000 (---------------) + I oumu + 0x00277145, // n0x0e51 c0x0000 (---------------) + I ozora + 0x002d72c5, // n0x0e52 c0x0000 (---------------) + I pippu + 0x00289ac8, // n0x0e53 c0x0000 (---------------) + I rankoshi + 0x002d1ec5, // n0x0e54 c0x0000 (---------------) + I rebun + 0x002a7649, // n0x0e55 c0x0000 (---------------) + I rikubetsu + 0x002a5647, // n0x0e56 c0x0000 (---------------) + I rishiri + 0x002a564b, // n0x0e57 c0x0000 (---------------) + I rishirifuji + 0x0031cf46, // n0x0e58 c0x0000 (---------------) + I saroma + 0x0036b689, // n0x0e59 c0x0000 (---------------) + I sarufutsu + 0x00229388, // n0x0e5a c0x0000 (---------------) + I shakotan + 0x00259285, // n0x0e5b c0x0000 (---------------) + I shari + 0x00209a08, // n0x0e5c c0x0000 (---------------) + I shibecha + 0x0023bc08, // n0x0e5d c0x0000 (---------------) + I shibetsu + 0x0021afc7, // n0x0e5e c0x0000 (---------------) + I shikabe + 0x00280147, // n0x0e5f c0x0000 (---------------) + I shikaoi + 0x00280bc9, // n0x0e60 c0x0000 (---------------) + I shimamaki + 0x002281c7, // n0x0e61 c0x0000 (---------------) + I shimizu + 0x00263849, // n0x0e62 c0x0000 (---------------) + I shimokawa + 0x0029120c, // n0x0e63 c0x0000 (---------------) + I shinshinotsu + 0x00299ec8, // n0x0e64 c0x0000 (---------------) + I shintoku + 0x002d9d89, // n0x0e65 c0x0000 (---------------) + I shiranuka + 0x002d5707, // n0x0e66 c0x0000 (---------------) + I shiraoi + 0x00281809, // n0x0e67 c0x0000 (---------------) + I shiriuchi + 0x00218b47, // n0x0e68 c0x0000 (---------------) + I sobetsu + 0x00291b48, // n0x0e69 c0x0000 (---------------) + I sunagawa + 0x00289e05, // n0x0e6a c0x0000 (---------------) + I taiki + 0x00321d06, // n0x0e6b c0x0000 (---------------) + I takasu + 0x002b8f88, // n0x0e6c c0x0000 (---------------) + I takikawa + 0x002f8588, // n0x0e6d c0x0000 (---------------) + I takinoue + 0x00281149, // n0x0e6e c0x0000 (---------------) + I teshikaga + 0x002be187, // n0x0e6f c0x0000 (---------------) + I tobetsu + 0x00270305, // n0x0e70 c0x0000 (---------------) + I tohma + 0x0020bf49, // n0x0e71 c0x0000 (---------------) + I tomakomai + 0x00269686, // n0x0e72 c0x0000 (---------------) + I tomari + 0x0028e9c4, // n0x0e73 c0x0000 (---------------) + I toya + 0x0034cc46, // n0x0e74 c0x0000 (---------------) + I toyako + 0x00265f88, // n0x0e75 c0x0000 (---------------) + I toyotomi + 0x0026ae47, // n0x0e76 c0x0000 (---------------) + I toyoura + 0x00297508, // n0x0e77 c0x0000 (---------------) + I tsubetsu + 0x0022b1c9, // n0x0e78 c0x0000 (---------------) + I tsukigata + 0x002b6b07, // n0x0e79 c0x0000 (---------------) + I urakawa + 0x0029d406, // n0x0e7a c0x0000 (---------------) + I urausu + 0x00201c04, // n0x0e7b c0x0000 (---------------) + I uryu + 0x00203b09, // n0x0e7c c0x0000 (---------------) + I utashinai + 0x00208a08, // n0x0e7d c0x0000 (---------------) + I wakkanai + 0x002b6c47, // n0x0e7e c0x0000 (---------------) + I wassamu + 0x00325346, // n0x0e7f c0x0000 (---------------) + I yakumo + 0x00239786, // n0x0e80 c0x0000 (---------------) + I yoichi + 0x00207dc4, // n0x0e81 c0x0000 (---------------) + I aioi + 0x002a8a46, // n0x0e82 c0x0000 (---------------) + I akashi + 0x0020a683, // n0x0e83 c0x0000 (---------------) + I ako + 0x002ec709, // n0x0e84 c0x0000 (---------------) + I amagasaki + 0x002029c6, // n0x0e85 c0x0000 (---------------) + I aogaki + 0x0029bac5, // n0x0e86 c0x0000 (---------------) + I asago + 0x0029d906, // n0x0e87 c0x0000 (---------------) + I ashiya + 0x002a2f45, // n0x0e88 c0x0000 (---------------) + I awaji + 0x0027f348, // n0x0e89 c0x0000 (---------------) + I fukusaki + 0x002d79c7, // n0x0e8a c0x0000 (---------------) + I goshiki + 0x00205b86, // n0x0e8b c0x0000 (---------------) + I harima + 0x00229046, // n0x0e8c c0x0000 (---------------) + I himeji + 0x00265608, // n0x0e8d c0x0000 (---------------) + I ichikawa + 0x0029f247, // n0x0e8e c0x0000 (---------------) + I inagawa + 0x0029d5c5, // n0x0e8f c0x0000 (---------------) + I itami + 0x0029de08, // n0x0e90 c0x0000 (---------------) + I kakogawa + 0x00383348, // n0x0e91 c0x0000 (---------------) + I kamigori + 0x002e8008, // n0x0e92 c0x0000 (---------------) + I kamikawa + 0x0036bac5, // n0x0e93 c0x0000 (---------------) + I kasai + 0x00321d86, // n0x0e94 c0x0000 (---------------) + I kasuga + 0x002bdc49, // n0x0e95 c0x0000 (---------------) + I kawanishi + 0x0028ef04, // n0x0e96 c0x0000 (---------------) + I miki + 0x0036a88b, // n0x0e97 c0x0000 (---------------) + I minamiawaji + 0x0021d1cb, // n0x0e98 c0x0000 (---------------) + I nishinomiya + 0x0022d989, // n0x0e99 c0x0000 (---------------) + I nishiwaki + 0x0020a703, // n0x0e9a c0x0000 (---------------) + I ono + 0x00259bc5, // n0x0e9b c0x0000 (---------------) + I sanda + 0x002069c6, // n0x0e9c c0x0000 (---------------) + I sannan + 0x002db6c8, // n0x0e9d c0x0000 (---------------) + I sasayama + 0x0022ed44, // n0x0e9e c0x0000 (---------------) + I sayo + 0x00266d06, // n0x0e9f c0x0000 (---------------) + I shingu + 0x002cb309, // n0x0ea0 c0x0000 (---------------) + I shinonsen + 0x002af085, // n0x0ea1 c0x0000 (---------------) + I shiso + 0x002bf5c6, // n0x0ea2 c0x0000 (---------------) + I sumoto + 0x0022c746, // n0x0ea3 c0x0000 (---------------) + I taishi + 0x00216a44, // n0x0ea4 c0x0000 (---------------) + I taka + 0x0029614a, // n0x0ea5 c0x0000 (---------------) + I takarazuka + 0x0029ba08, // n0x0ea6 c0x0000 (---------------) + I takasago + 0x002f8586, // n0x0ea7 c0x0000 (---------------) + I takino + 0x002ce105, // n0x0ea8 c0x0000 (---------------) + I tamba + 0x0020d407, // n0x0ea9 c0x0000 (---------------) + I tatsuno + 0x0024fbc7, // n0x0eaa c0x0000 (---------------) + I toyooka + 0x00350b04, // n0x0eab c0x0000 (---------------) + I yabu + 0x0021d407, // n0x0eac c0x0000 (---------------) + I yashiro + 0x00206084, // n0x0ead c0x0000 (---------------) + I yoka + 0x00206086, // n0x0eae c0x0000 (---------------) + I yokawa + 0x00208fc3, // n0x0eaf c0x0000 (---------------) + I ami + 0x002bf185, // n0x0eb0 c0x0000 (---------------) + I asahi + 0x0034be85, // n0x0eb1 c0x0000 (---------------) + I bando + 0x0022cac8, // n0x0eb2 c0x0000 (---------------) + I chikusei + 0x00214cc5, // n0x0eb3 c0x0000 (---------------) + I daigo + 0x0027a8c9, // n0x0eb4 c0x0000 (---------------) + I fujishiro + 0x002a27c7, // n0x0eb5 c0x0000 (---------------) + I hitachi + 0x002ac70b, // n0x0eb6 c0x0000 (---------------) + I hitachinaka + 0x002a27cc, // n0x0eb7 c0x0000 (---------------) + I hitachiomiya + 0x002a344a, // n0x0eb8 c0x0000 (---------------) + I hitachiota + 0x002c1ac7, // n0x0eb9 c0x0000 (---------------) + I ibaraki + 0x002013c3, // n0x0eba c0x0000 (---------------) + I ina + 0x00370248, // n0x0ebb c0x0000 (---------------) + I inashiki + 0x0036bc85, // n0x0ebc c0x0000 (---------------) + I itako + 0x002be905, // n0x0ebd c0x0000 (---------------) + I iwama + 0x00334e84, // n0x0ebe c0x0000 (---------------) + I joso + 0x00291a46, // n0x0ebf c0x0000 (---------------) + I kamisu + 0x00243646, // n0x0ec0 c0x0000 (---------------) + I kasama + 0x002a8a87, // n0x0ec1 c0x0000 (---------------) + I kashima + 0x0020b4cb, // n0x0ec2 c0x0000 (---------------) + I kasumigaura + 0x00226304, // n0x0ec3 c0x0000 (---------------) + I koga + 0x003781c4, // n0x0ec4 c0x0000 (---------------) + I miho + 0x0026e904, // n0x0ec5 c0x0000 (---------------) + I mito + 0x002c60c6, // n0x0ec6 c0x0000 (---------------) + I moriya + 0x002164c4, // n0x0ec7 c0x0000 (---------------) + I naka + 0x002bffc8, // n0x0ec8 c0x0000 (---------------) + I namegata + 0x00334c85, // n0x0ec9 c0x0000 (---------------) + I oarai + 0x00245a05, // n0x0eca c0x0000 (---------------) + I ogawa + 0x0039a047, // n0x0ecb c0x0000 (---------------) + I omitama + 0x00201c49, // n0x0ecc c0x0000 (---------------) + I ryugasaki + 0x0034ca05, // n0x0ecd c0x0000 (---------------) + I sakai + 0x00372a4a, // n0x0ece c0x0000 (---------------) + I sakuragawa + 0x002c5e89, // n0x0ecf c0x0000 (---------------) + I shimodate + 0x00285e4a, // n0x0ed0 c0x0000 (---------------) + I shimotsuma + 0x00395d09, // n0x0ed1 c0x0000 (---------------) + I shirosato + 0x00332b04, // n0x0ed2 c0x0000 (---------------) + I sowa + 0x002af985, // n0x0ed3 c0x0000 (---------------) + I suifu + 0x00348048, // n0x0ed4 c0x0000 (---------------) + I takahagi + 0x002d930b, // n0x0ed5 c0x0000 (---------------) + I tamatsukuri + 0x002fa0c5, // n0x0ed6 c0x0000 (---------------) + I tokai + 0x0028e1c6, // n0x0ed7 c0x0000 (---------------) + I tomobe + 0x00221d44, // n0x0ed8 c0x0000 (---------------) + I tone + 0x0027ae06, // n0x0ed9 c0x0000 (---------------) + I toride + 0x002b6989, // n0x0eda c0x0000 (---------------) + I tsuchiura + 0x00223f07, // n0x0edb c0x0000 (---------------) + I tsukuba + 0x0030c0c8, // n0x0edc c0x0000 (---------------) + I uchihara + 0x00245746, // n0x0edd c0x0000 (---------------) + I ushiku + 0x00300547, // n0x0ede c0x0000 (---------------) + I yachiyo + 0x002808c8, // n0x0edf c0x0000 (---------------) + I yamagata + 0x00386686, // n0x0ee0 c0x0000 (---------------) + I yawara + 0x0024e8c4, // n0x0ee1 c0x0000 (---------------) + I yuki + 0x0035e047, // n0x0ee2 c0x0000 (---------------) + I anamizu + 0x00344045, // n0x0ee3 c0x0000 (---------------) + I hakui + 0x0034a747, // n0x0ee4 c0x0000 (---------------) + I hakusan + 0x00200fc4, // n0x0ee5 c0x0000 (---------------) + I kaga + 0x0033d046, // n0x0ee6 c0x0000 (---------------) + I kahoku + 0x0021a748, // n0x0ee7 c0x0000 (---------------) + I kanazawa + 0x00295d48, // n0x0ee8 c0x0000 (---------------) + I kawakita + 0x002aa707, // n0x0ee9 c0x0000 (---------------) + I komatsu + 0x002546c8, // n0x0eea c0x0000 (---------------) + I nakanoto + 0x002b4305, // n0x0eeb c0x0000 (---------------) + I nanao + 0x0020a544, // n0x0eec c0x0000 (---------------) + I nomi + 0x00265508, // n0x0eed c0x0000 (---------------) + I nonoichi + 0x002547c4, // n0x0eee c0x0000 (---------------) + I noto + 0x00216905, // n0x0eef c0x0000 (---------------) + I shika + 0x002eeb44, // n0x0ef0 c0x0000 (---------------) + I suzu + 0x002304c7, // n0x0ef1 c0x0000 (---------------) + I tsubata + 0x00288347, // n0x0ef2 c0x0000 (---------------) + I tsurugi + 0x00281948, // n0x0ef3 c0x0000 (---------------) + I uchinada + 0x002a2f86, // n0x0ef4 c0x0000 (---------------) + I wajima + 0x00214c45, // n0x0ef5 c0x0000 (---------------) + I fudai + 0x0027a6c8, // n0x0ef6 c0x0000 (---------------) + I fujisawa + 0x00356648, // n0x0ef7 c0x0000 (---------------) + I hanamaki + 0x0029eac9, // n0x0ef8 c0x0000 (---------------) + I hiraizumi + 0x0021d4c6, // n0x0ef9 c0x0000 (---------------) + I hirono + 0x00236408, // n0x0efa c0x0000 (---------------) + I ichinohe + 0x002820ca, // n0x0efb c0x0000 (---------------) + I ichinoseki + 0x002f5fc8, // n0x0efc c0x0000 (---------------) + I iwaizumi + 0x002d8485, // n0x0efd c0x0000 (---------------) + I iwate + 0x00224706, // n0x0efe c0x0000 (---------------) + I joboji + 0x0028d888, // n0x0eff c0x0000 (---------------) + I kamaishi + 0x002fe60a, // n0x0f00 c0x0000 (---------------) + I kanegasaki + 0x00271b47, // n0x0f01 c0x0000 (---------------) + I karumai + 0x002864c5, // n0x0f02 c0x0000 (---------------) + I kawai + 0x00294308, // n0x0f03 c0x0000 (---------------) + I kitakami + 0x00371504, // n0x0f04 c0x0000 (---------------) + I kuji + 0x0036b286, // n0x0f05 c0x0000 (---------------) + I kunohe + 0x002bb088, // n0x0f06 c0x0000 (---------------) + I kuzumaki + 0x0020a5c6, // n0x0f07 c0x0000 (---------------) + I miyako + 0x002ea148, // n0x0f08 c0x0000 (---------------) + I mizusawa + 0x0022aa47, // n0x0f09 c0x0000 (---------------) + I morioka + 0x002072c6, // n0x0f0a c0x0000 (---------------) + I ninohe + 0x0037f944, // n0x0f0b c0x0000 (---------------) + I noda + 0x002db247, // n0x0f0c c0x0000 (---------------) + I ofunato + 0x00342084, // n0x0f0d c0x0000 (---------------) + I oshu + 0x002b6947, // n0x0f0e c0x0000 (---------------) + I otsuchi + 0x0023848d, // n0x0f0f c0x0000 (---------------) + I rikuzentakata + 0x00203845, // n0x0f10 c0x0000 (---------------) + I shiwa + 0x002b108b, // n0x0f11 c0x0000 (---------------) + I shizukuishi + 0x002a0a46, // n0x0f12 c0x0000 (---------------) + I sumita + 0x00252c88, // n0x0f13 c0x0000 (---------------) + I tanohata + 0x003895c4, // n0x0f14 c0x0000 (---------------) + I tono + 0x00276f06, // n0x0f15 c0x0000 (---------------) + I yahaba + 0x0027ce46, // n0x0f16 c0x0000 (---------------) + I yamada + 0x002088c7, // n0x0f17 c0x0000 (---------------) + I ayagawa + 0x002953cd, // n0x0f18 c0x0000 (---------------) + I higashikagawa + 0x00322787, // n0x0f19 c0x0000 (---------------) + I kanonji + 0x00339cc8, // n0x0f1a c0x0000 (---------------) + I kotohira + 0x00366045, // n0x0f1b c0x0000 (---------------) + I manno + 0x00297bc8, // n0x0f1c c0x0000 (---------------) + I marugame + 0x002c0886, // n0x0f1d c0x0000 (---------------) + I mitoyo + 0x002b4388, // n0x0f1e c0x0000 (---------------) + I naoshima + 0x00213bc6, // n0x0f1f c0x0000 (---------------) + I sanuki + 0x0032f6c7, // n0x0f20 c0x0000 (---------------) + I tadotsu + 0x0021f709, // n0x0f21 c0x0000 (---------------) + I takamatsu + 0x003895c7, // n0x0f22 c0x0000 (---------------) + I tonosho + 0x00287508, // n0x0f23 c0x0000 (---------------) + I uchinomi + 0x00273885, // n0x0f24 c0x0000 (---------------) + I utazu + 0x0021c188, // n0x0f25 c0x0000 (---------------) + I zentsuji + 0x00309245, // n0x0f26 c0x0000 (---------------) + I akune + 0x00240f85, // n0x0f27 c0x0000 (---------------) + I amami + 0x002e4bc5, // n0x0f28 c0x0000 (---------------) + I hioki + 0x00223543, // n0x0f29 c0x0000 (---------------) + I isa + 0x00282c44, // n0x0f2a c0x0000 (---------------) + I isen + 0x002802c5, // n0x0f2b c0x0000 (---------------) + I izumi + 0x00276189, // n0x0f2c c0x0000 (---------------) + I kagoshima + 0x002b2786, // n0x0f2d c0x0000 (---------------) + I kanoya + 0x002d2cc8, // n0x0f2e c0x0000 (---------------) + I kawanabe + 0x002fe805, // n0x0f2f c0x0000 (---------------) + I kinko + 0x0030b707, // n0x0f30 c0x0000 (---------------) + I kouyama + 0x0032f9ca, // n0x0f31 c0x0000 (---------------) + I makurazaki + 0x002bf509, // n0x0f32 c0x0000 (---------------) + I matsumoto + 0x002b15ca, // n0x0f33 c0x0000 (---------------) + I minamitane + 0x002cbe48, // n0x0f34 c0x0000 (---------------) + I nakatane + 0x0021efcc, // n0x0f35 c0x0000 (---------------) + I nishinoomote + 0x0028294d, // n0x0f36 c0x0000 (---------------) + I satsumasendai + 0x00315283, // n0x0f37 c0x0000 (---------------) + I soo + 0x002ea048, // n0x0f38 c0x0000 (---------------) + I tarumizu + 0x00206685, // n0x0f39 c0x0000 (---------------) + I yusui + 0x00354d46, // n0x0f3a c0x0000 (---------------) + I aikawa + 0x00377dc6, // n0x0f3b c0x0000 (---------------) + I atsugi + 0x0024d085, // n0x0f3c c0x0000 (---------------) + I ayase + 0x0028cb89, // n0x0f3d c0x0000 (---------------) + I chigasaki + 0x00325545, // n0x0f3e c0x0000 (---------------) + I ebina + 0x0027a6c8, // n0x0f3f c0x0000 (---------------) + I fujisawa + 0x0025ad86, // n0x0f40 c0x0000 (---------------) + I hadano + 0x00339046, // n0x0f41 c0x0000 (---------------) + I hakone + 0x002a01c9, // n0x0f42 c0x0000 (---------------) + I hiratsuka + 0x00385e87, // n0x0f43 c0x0000 (---------------) + I isehara + 0x002fdec6, // n0x0f44 c0x0000 (---------------) + I kaisei + 0x0032f948, // n0x0f45 c0x0000 (---------------) + I kamakura + 0x00206008, // n0x0f46 c0x0000 (---------------) + I kiyokawa + 0x002d0bc7, // n0x0f47 c0x0000 (---------------) + I matsuda + 0x0022840e, // n0x0f48 c0x0000 (---------------) + I minamiashigara + 0x002c0ac5, // n0x0f49 c0x0000 (---------------) + I miura + 0x00272185, // n0x0f4a c0x0000 (---------------) + I nakai + 0x0020a4c8, // n0x0f4b c0x0000 (---------------) + I ninomiya + 0x00384a47, // n0x0f4c c0x0000 (---------------) + I odawara + 0x00207e42, // n0x0f4d c0x0000 (---------------) + I oi + 0x002b8604, // n0x0f4e c0x0000 (---------------) + I oiso + 0x003a68ca, // n0x0f4f c0x0000 (---------------) + I sagamihara + 0x002b6d08, // n0x0f50 c0x0000 (---------------) + I samukawa + 0x00283806, // n0x0f51 c0x0000 (---------------) + I tsukui + 0x002982c8, // n0x0f52 c0x0000 (---------------) + I yamakita + 0x0028f086, // n0x0f53 c0x0000 (---------------) + I yamato + 0x00327d48, // n0x0f54 c0x0000 (---------------) + I yokosuka + 0x002abfc8, // n0x0f55 c0x0000 (---------------) + I yugawara + 0x00240f44, // n0x0f56 c0x0000 (---------------) + I zama + 0x0032a845, // n0x0f57 c0x0000 (---------------) + I zushi + 0x00686744, // n0x0f58 c0x0001 (---------------) ! I city + 0x00686744, // n0x0f59 c0x0001 (---------------) ! I city + 0x00686744, // n0x0f5a c0x0001 (---------------) ! I city + 0x00201dc3, // n0x0f5b c0x0000 (---------------) + I aki + 0x00239606, // n0x0f5c c0x0000 (---------------) + I geisei + 0x0027ab86, // n0x0f5d c0x0000 (---------------) + I hidaka + 0x0029cc4c, // n0x0f5e c0x0000 (---------------) + I higashitsuno + 0x00207303, // n0x0f5f c0x0000 (---------------) + I ino + 0x00281286, // n0x0f60 c0x0000 (---------------) + I kagami + 0x00216544, // n0x0f61 c0x0000 (---------------) + I kami + 0x002c1c08, // n0x0f62 c0x0000 (---------------) + I kitagawa + 0x002cb145, // n0x0f63 c0x0000 (---------------) + I kochi + 0x003a69c6, // n0x0f64 c0x0000 (---------------) + I mihara + 0x002b3848, // n0x0f65 c0x0000 (---------------) + I motoyama + 0x002ce6c6, // n0x0f66 c0x0000 (---------------) + I muroto + 0x00205b06, // n0x0f67 c0x0000 (---------------) + I nahari + 0x00365cc8, // n0x0f68 c0x0000 (---------------) + I nakamura + 0x002a0cc7, // n0x0f69 c0x0000 (---------------) + I nankoku + 0x00227f89, // n0x0f6a c0x0000 (---------------) + I nishitosa + 0x0036be4a, // n0x0f6b c0x0000 (---------------) + I niyodogawa + 0x00248784, // n0x0f6c c0x0000 (---------------) + I ochi + 0x002060c5, // n0x0f6d c0x0000 (---------------) + I okawa + 0x0025bb45, // n0x0f6e c0x0000 (---------------) + I otoyo + 0x0021f306, // n0x0f6f c0x0000 (---------------) + I otsuki + 0x00248a46, // n0x0f70 c0x0000 (---------------) + I sakawa + 0x002a66c6, // n0x0f71 c0x0000 (---------------) + I sukumo + 0x002e9346, // n0x0f72 c0x0000 (---------------) + I susaki + 0x002280c4, // n0x0f73 c0x0000 (---------------) + I tosa + 0x002280cb, // n0x0f74 c0x0000 (---------------) + I tosashimizu + 0x00247dc4, // n0x0f75 c0x0000 (---------------) + I toyo + 0x0020d485, // n0x0f76 c0x0000 (---------------) + I tsuno + 0x002ab485, // n0x0f77 c0x0000 (---------------) + I umaji + 0x00280646, // n0x0f78 c0x0000 (---------------) + I yasuda + 0x00202348, // n0x0f79 c0x0000 (---------------) + I yusuhara + 0x00282807, // n0x0f7a c0x0000 (---------------) + I amakusa + 0x0030be84, // n0x0f7b c0x0000 (---------------) + I arao + 0x00264783, // n0x0f7c c0x0000 (---------------) + I aso + 0x00371005, // n0x0f7d c0x0000 (---------------) + I choyo + 0x0024ad87, // n0x0f7e c0x0000 (---------------) + I gyokuto + 0x002a41c9, // n0x0f7f c0x0000 (---------------) + I hitoyoshi + 0x0028270b, // n0x0f80 c0x0000 (---------------) + I kamiamakusa + 0x002a8a87, // n0x0f81 c0x0000 (---------------) + I kashima + 0x0022c9c7, // n0x0f82 c0x0000 (---------------) + I kikuchi + 0x002d8744, // n0x0f83 c0x0000 (---------------) + I kosa + 0x002b3748, // n0x0f84 c0x0000 (---------------) + I kumamoto + 0x002aa987, // n0x0f85 c0x0000 (---------------) + I mashiki + 0x002a4406, // n0x0f86 c0x0000 (---------------) + I mifune + 0x0024e648, // n0x0f87 c0x0000 (---------------) + I minamata + 0x002a68cb, // n0x0f88 c0x0000 (---------------) + I minamioguni + 0x003608c6, // n0x0f89 c0x0000 (---------------) + I nagasu + 0x00212149, // n0x0f8a c0x0000 (---------------) + I nishihara + 0x002a6a45, // n0x0f8b c0x0000 (---------------) + I oguni + 0x00300a43, // n0x0f8c c0x0000 (---------------) + I ozu + 0x002bf5c6, // n0x0f8d c0x0000 (---------------) + I sumoto + 0x0022a948, // n0x0f8e c0x0000 (---------------) + I takamori + 0x00212703, // n0x0f8f c0x0000 (---------------) + I uki + 0x0024ae83, // n0x0f90 c0x0000 (---------------) + I uto + 0x00223bc6, // n0x0f91 c0x0000 (---------------) + I yamaga + 0x0028f086, // n0x0f92 c0x0000 (---------------) + I yamato + 0x00382d0a, // n0x0f93 c0x0000 (---------------) + I yatsushiro + 0x0027c5c5, // n0x0f94 c0x0000 (---------------) + I ayabe + 0x0027cc8b, // n0x0f95 c0x0000 (---------------) + I fukuchiyama + 0x0029d84b, // n0x0f96 c0x0000 (---------------) + I higashiyama + 0x00229943, // n0x0f97 c0x0000 (---------------) + I ide + 0x0021cc03, // n0x0f98 c0x0000 (---------------) + I ine + 0x002adb84, // n0x0f99 c0x0000 (---------------) + I joyo + 0x0022ad47, // n0x0f9a c0x0000 (---------------) + I kameoka + 0x0022a9c4, // n0x0f9b c0x0000 (---------------) + I kamo + 0x00201e04, // n0x0f9c c0x0000 (---------------) + I kita + 0x00342404, // n0x0f9d c0x0000 (---------------) + I kizu + 0x002f62c8, // n0x0f9e c0x0000 (---------------) + I kumiyama + 0x002ce048, // n0x0f9f c0x0000 (---------------) + I kyotamba + 0x0031f2c9, // n0x0fa0 c0x0000 (---------------) + I kyotanabe + 0x00341c88, // n0x0fa1 c0x0000 (---------------) + I kyotango + 0x002d1847, // n0x0fa2 c0x0000 (---------------) + I maizuru + 0x00228406, // n0x0fa3 c0x0000 (---------------) + I minami + 0x002d290f, // n0x0fa4 c0x0000 (---------------) + I minamiyamashiro + 0x002c0c06, // n0x0fa5 c0x0000 (---------------) + I miyazu + 0x002cb0c4, // n0x0fa6 c0x0000 (---------------) + I muko + 0x002cde8a, // n0x0fa7 c0x0000 (---------------) + I nagaokakyo + 0x0024ac87, // n0x0fa8 c0x0000 (---------------) + I nakagyo + 0x00206c06, // n0x0fa9 c0x0000 (---------------) + I nantan + 0x0028ea09, // n0x0faa c0x0000 (---------------) + I oyamazaki + 0x0031f245, // n0x0fab c0x0000 (---------------) + I sakyo + 0x0022cc05, // n0x0fac c0x0000 (---------------) + I seika + 0x0031f386, // n0x0fad c0x0000 (---------------) + I tanabe + 0x0021c2c3, // n0x0fae c0x0000 (---------------) + I uji + 0x00371549, // n0x0faf c0x0000 (---------------) + I ujitawara + 0x0021a646, // n0x0fb0 c0x0000 (---------------) + I wazuka + 0x0022af89, // n0x0fb1 c0x0000 (---------------) + I yamashina + 0x0038f146, // n0x0fb2 c0x0000 (---------------) + I yawata + 0x002bf185, // n0x0fb3 c0x0000 (---------------) + I asahi + 0x002236c5, // n0x0fb4 c0x0000 (---------------) + I inabe + 0x00204683, // n0x0fb5 c0x0000 (---------------) + I ise + 0x0022ae88, // n0x0fb6 c0x0000 (---------------) + I kameyama + 0x0039f7c7, // n0x0fb7 c0x0000 (---------------) + I kawagoe + 0x002f5c84, // n0x0fb8 c0x0000 (---------------) + I kiho + 0x0021f408, // n0x0fb9 c0x0000 (---------------) + I kisosaki + 0x002da084, // n0x0fba c0x0000 (---------------) + I kiwa + 0x002b9886, // n0x0fbb c0x0000 (---------------) + I komono + 0x00383786, // n0x0fbc c0x0000 (---------------) + I kumano + 0x00243e46, // n0x0fbd c0x0000 (---------------) + I kuwana + 0x002c6249, // n0x0fbe c0x0000 (---------------) + I matsusaka + 0x002be885, // n0x0fbf c0x0000 (---------------) + I meiwa + 0x002da306, // n0x0fc0 c0x0000 (---------------) + I mihama + 0x0025b189, // n0x0fc1 c0x0000 (---------------) + I minamiise + 0x002bfd46, // n0x0fc2 c0x0000 (---------------) + I misugi + 0x002d2a06, // n0x0fc3 c0x0000 (---------------) + I miyama + 0x0037fcc6, // n0x0fc4 c0x0000 (---------------) + I nabari + 0x0020ef45, // n0x0fc5 c0x0000 (---------------) + I shima + 0x002eeb46, // n0x0fc6 c0x0000 (---------------) + I suzuka + 0x0032f6c4, // n0x0fc7 c0x0000 (---------------) + I tado + 0x00289e05, // n0x0fc8 c0x0000 (---------------) + I taiki + 0x002b8f84, // n0x0fc9 c0x0000 (---------------) + I taki + 0x003037c6, // n0x0fca c0x0000 (---------------) + I tamaki + 0x00395ec4, // n0x0fcb c0x0000 (---------------) + I toba + 0x00208c83, // n0x0fcc c0x0000 (---------------) + I tsu + 0x00286185, // n0x0fcd c0x0000 (---------------) + I udono + 0x0023b908, // n0x0fce c0x0000 (---------------) + I ureshino + 0x00351b87, // n0x0fcf c0x0000 (---------------) + I watarai + 0x002b1f89, // n0x0fd0 c0x0000 (---------------) + I yokkaichi + 0x002863c8, // n0x0fd1 c0x0000 (---------------) + I furukawa + 0x00297811, // n0x0fd2 c0x0000 (---------------) + I higashimatsushima + 0x0022c7ca, // n0x0fd3 c0x0000 (---------------) + I ishinomaki + 0x0022f147, // n0x0fd4 c0x0000 (---------------) + I iwanuma + 0x0039fd06, // n0x0fd5 c0x0000 (---------------) + I kakuda + 0x00216544, // n0x0fd6 c0x0000 (---------------) + I kami + 0x002b9088, // n0x0fd7 c0x0000 (---------------) + I kawasaki + 0x002935c9, // n0x0fd8 c0x0000 (---------------) + I kesennuma + 0x002a8bc8, // n0x0fd9 c0x0000 (---------------) + I marumori + 0x002979ca, // n0x0fda c0x0000 (---------------) + I matsushima + 0x002a740d, // n0x0fdb c0x0000 (---------------) + I minamisanriku + 0x0022f486, // n0x0fdc c0x0000 (---------------) + I misato + 0x00365dc6, // n0x0fdd c0x0000 (---------------) + I murata + 0x002db306, // n0x0fde c0x0000 (---------------) + I natori + 0x0037e6c7, // n0x0fdf c0x0000 (---------------) + I ogawara + 0x0029ef85, // n0x0fe0 c0x0000 (---------------) + I ohira + 0x003508c7, // n0x0fe1 c0x0000 (---------------) + I onagawa + 0x0021f4c5, // n0x0fe2 c0x0000 (---------------) + I osaki + 0x002a5784, // n0x0fe3 c0x0000 (---------------) + I rifu + 0x002a9286, // n0x0fe4 c0x0000 (---------------) + I semine + 0x00321bc7, // n0x0fe5 c0x0000 (---------------) + I shibata + 0x0037124d, // n0x0fe6 c0x0000 (---------------) + I shichikashuku + 0x0028d7c7, // n0x0fe7 c0x0000 (---------------) + I shikama + 0x0026ebc8, // n0x0fe8 c0x0000 (---------------) + I shiogama + 0x0027a9c9, // n0x0fe9 c0x0000 (---------------) + I shiroishi + 0x00224606, // n0x0fea c0x0000 (---------------) + I tagajo + 0x0023b045, // n0x0feb c0x0000 (---------------) + I taiwa + 0x00216044, // n0x0fec c0x0000 (---------------) + I tome + 0x00266086, // n0x0fed c0x0000 (---------------) + I tomiya + 0x00350a06, // n0x0fee c0x0000 (---------------) + I wakuya + 0x002b6e86, // n0x0fef c0x0000 (---------------) + I watari + 0x0029ae08, // n0x0ff0 c0x0000 (---------------) + I yamamoto + 0x00212d43, // n0x0ff1 c0x0000 (---------------) + I zao + 0x002088c3, // n0x0ff2 c0x0000 (---------------) + I aya + 0x00328105, // n0x0ff3 c0x0000 (---------------) + I ebino + 0x00337286, // n0x0ff4 c0x0000 (---------------) + I gokase + 0x002abf85, // n0x0ff5 c0x0000 (---------------) + I hyuga + 0x00245948, // n0x0ff6 c0x0000 (---------------) + I kadogawa + 0x0029c60a, // n0x0ff7 c0x0000 (---------------) + I kawaminami + 0x002ddbc4, // n0x0ff8 c0x0000 (---------------) + I kijo + 0x002c1c08, // n0x0ff9 c0x0000 (---------------) + I kitagawa + 0x00295fc8, // n0x0ffa c0x0000 (---------------) + I kitakata + 0x00280487, // n0x0ffb c0x0000 (---------------) + I kitaura + 0x002fe8c9, // n0x0ffc c0x0000 (---------------) + I kobayashi + 0x002b3448, // n0x0ffd c0x0000 (---------------) + I kunitomi + 0x0029a047, // n0x0ffe c0x0000 (---------------) + I kushima + 0x00294c06, // n0x0fff c0x0000 (---------------) + I mimata + 0x0020a5ca, // n0x1000 c0x0000 (---------------) + I miyakonojo + 0x00266108, // n0x1001 c0x0000 (---------------) + I miyazaki + 0x002bd509, // n0x1002 c0x0000 (---------------) + I morotsuka + 0x002869c8, // n0x1003 c0x0000 (---------------) + I nichinan + 0x0021c809, // n0x1004 c0x0000 (---------------) + I nishimera + 0x002bd987, // n0x1005 c0x0000 (---------------) + I nobeoka + 0x00341b45, // n0x1006 c0x0000 (---------------) + I saito + 0x002a1746, // n0x1007 c0x0000 (---------------) + I shiiba + 0x0029b188, // n0x1008 c0x0000 (---------------) + I shintomi + 0x00252e08, // n0x1009 c0x0000 (---------------) + I takaharu + 0x0022b388, // n0x100a c0x0000 (---------------) + I takanabe + 0x00216a48, // n0x100b c0x0000 (---------------) + I takazaki + 0x0020d485, // n0x100c c0x0000 (---------------) + I tsuno + 0x00201544, // n0x100d c0x0000 (---------------) + I achi + 0x0039f4c8, // n0x100e c0x0000 (---------------) + I agematsu + 0x00206d04, // n0x100f c0x0000 (---------------) + I anan + 0x00395b04, // n0x1010 c0x0000 (---------------) + I aoki + 0x002bf185, // n0x1011 c0x0000 (---------------) + I asahi + 0x0028ffc7, // n0x1012 c0x0000 (---------------) + I azumino + 0x00201a09, // n0x1013 c0x0000 (---------------) + I chikuhoku + 0x002086c7, // n0x1014 c0x0000 (---------------) + I chikuma + 0x0020db85, // n0x1015 c0x0000 (---------------) + I chino + 0x00278286, // n0x1016 c0x0000 (---------------) + I fujimi + 0x0033bb06, // n0x1017 c0x0000 (---------------) + I hakuba + 0x00202444, // n0x1018 c0x0000 (---------------) + I hara + 0x002a0506, // n0x1019 c0x0000 (---------------) + I hiraya + 0x00214a84, // n0x101a c0x0000 (---------------) + I iida + 0x0025cc86, // n0x101b c0x0000 (---------------) + I iijima + 0x0039e786, // n0x101c c0x0000 (---------------) + I iiyama + 0x00212546, // n0x101d c0x0000 (---------------) + I iizuna + 0x00202585, // n0x101e c0x0000 (---------------) + I ikeda + 0x00245807, // n0x101f c0x0000 (---------------) + I ikusaka + 0x002013c3, // n0x1020 c0x0000 (---------------) + I ina + 0x00249f89, // n0x1021 c0x0000 (---------------) + I karuizawa + 0x002fdbc8, // n0x1022 c0x0000 (---------------) + I kawakami + 0x0021f404, // n0x1023 c0x0000 (---------------) + I kiso + 0x0032fbcd, // n0x1024 c0x0000 (---------------) + I kisofukushima + 0x00295e48, // n0x1025 c0x0000 (---------------) + I kitaaiki + 0x0028e748, // n0x1026 c0x0000 (---------------) + I komagane + 0x002bd486, // n0x1027 c0x0000 (---------------) + I komoro + 0x0021f809, // n0x1028 c0x0000 (---------------) + I matsukawa + 0x002bf509, // n0x1029 c0x0000 (---------------) + I matsumoto + 0x002b6685, // n0x102a c0x0000 (---------------) + I miasa + 0x0029c70a, // n0x102b c0x0000 (---------------) + I minamiaiki + 0x0027f80a, // n0x102c c0x0000 (---------------) + I minamimaki + 0x0028850c, // n0x102d c0x0000 (---------------) + I minamiminowa + 0x00288686, // n0x102e c0x0000 (---------------) + I minowa + 0x00278b46, // n0x102f c0x0000 (---------------) + I miyada + 0x002c0d86, // n0x1030 c0x0000 (---------------) + I miyota + 0x00257009, // n0x1031 c0x0000 (---------------) + I mochizuki + 0x00354f86, // n0x1032 c0x0000 (---------------) + I nagano + 0x00291bc6, // n0x1033 c0x0000 (---------------) + I nagawa + 0x00325606, // n0x1034 c0x0000 (---------------) + I nagiso + 0x002ac8c8, // n0x1035 c0x0000 (---------------) + I nakagawa + 0x002546c6, // n0x1036 c0x0000 (---------------) + I nakano + 0x002c658b, // n0x1037 c0x0000 (---------------) + I nozawaonsen + 0x00290145, // n0x1038 c0x0000 (---------------) + I obuse + 0x00245a05, // n0x1039 c0x0000 (---------------) + I ogawa + 0x00278dc5, // n0x103a c0x0000 (---------------) + I okaya + 0x002014c6, // n0x103b c0x0000 (---------------) + I omachi + 0x0020a583, // n0x103c c0x0000 (---------------) + I omi + 0x00243dc6, // n0x103d c0x0000 (---------------) + I ookuwa + 0x0028d747, // n0x103e c0x0000 (---------------) + I ooshika + 0x002b8f45, // n0x103f c0x0000 (---------------) + I otaki + 0x0025bc45, // n0x1040 c0x0000 (---------------) + I otari + 0x002dd385, // n0x1041 c0x0000 (---------------) + I sakae + 0x00318606, // n0x1042 c0x0000 (---------------) + I sakaki + 0x002b6744, // n0x1043 c0x0000 (---------------) + I saku + 0x00369d86, // n0x1044 c0x0000 (---------------) + I sakuho + 0x0027b309, // n0x1045 c0x0000 (---------------) + I shimosuwa + 0x0020134c, // n0x1046 c0x0000 (---------------) + I shinanomachi + 0x002a54c8, // n0x1047 c0x0000 (---------------) + I shiojiri + 0x0027b444, // n0x1048 c0x0000 (---------------) + I suwa + 0x002ee7c6, // n0x1049 c0x0000 (---------------) + I suzaka + 0x002a0b46, // n0x104a c0x0000 (---------------) + I takagi + 0x0022a948, // n0x104b c0x0000 (---------------) + I takamori + 0x002c0148, // n0x104c c0x0000 (---------------) + I takayama + 0x00201249, // n0x104d c0x0000 (---------------) + I tateshina + 0x0020d407, // n0x104e c0x0000 (---------------) + I tatsuno + 0x002ae7c9, // n0x104f c0x0000 (---------------) + I togakushi + 0x0026fa46, // n0x1050 c0x0000 (---------------) + I togura + 0x0022f404, // n0x1051 c0x0000 (---------------) + I tomi + 0x0020e184, // n0x1052 c0x0000 (---------------) + I ueda + 0x0024a344, // n0x1053 c0x0000 (---------------) + I wada + 0x002808c8, // n0x1054 c0x0000 (---------------) + I yamagata + 0x0020184a, // n0x1055 c0x0000 (---------------) + I yamanouchi + 0x0034c986, // n0x1056 c0x0000 (---------------) + I yasaka + 0x00353407, // n0x1057 c0x0000 (---------------) + I yasuoka + 0x00311d87, // n0x1058 c0x0000 (---------------) + I chijiwa + 0x0036b785, // n0x1059 c0x0000 (---------------) + I futsu + 0x0028de84, // n0x105a c0x0000 (---------------) + I goto + 0x0028bb06, // n0x105b c0x0000 (---------------) + I hasami + 0x00339dc6, // n0x105c c0x0000 (---------------) + I hirado + 0x0023be03, // n0x105d c0x0000 (---------------) + I iki + 0x002fda07, // n0x105e c0x0000 (---------------) + I isahaya + 0x00330e48, // n0x105f c0x0000 (---------------) + I kawatana + 0x002b67ca, // n0x1060 c0x0000 (---------------) + I kuchinotsu + 0x002c9f88, // n0x1061 c0x0000 (---------------) + I matsuura + 0x002dda48, // n0x1062 c0x0000 (---------------) + I nagasaki + 0x00395f05, // n0x1063 c0x0000 (---------------) + I obama + 0x0037ed85, // n0x1064 c0x0000 (---------------) + I omura + 0x002ae705, // n0x1065 c0x0000 (---------------) + I oseto + 0x0036bb46, // n0x1066 c0x0000 (---------------) + I saikai + 0x003a3f06, // n0x1067 c0x0000 (---------------) + I sasebo + 0x00218945, // n0x1068 c0x0000 (---------------) + I seihi + 0x002ec949, // n0x1069 c0x0000 (---------------) + I shimabara + 0x0028dc8c, // n0x106a c0x0000 (---------------) + I shinkamigoto + 0x00240187, // n0x106b c0x0000 (---------------) + I togitsu + 0x00297a48, // n0x106c c0x0000 (---------------) + I tsushima + 0x0028c145, // n0x106d c0x0000 (---------------) + I unzen + 0x00686744, // n0x106e c0x0001 (---------------) ! I city + 0x00259644, // n0x106f c0x0000 (---------------) + I ando + 0x002b13c4, // n0x1070 c0x0000 (---------------) + I gose + 0x0020dcc6, // n0x1071 c0x0000 (---------------) + I heguri + 0x0029e3ce, // n0x1072 c0x0000 (---------------) + I higashiyoshino + 0x0022cc87, // n0x1073 c0x0000 (---------------) + I ikaruga + 0x00296ec5, // n0x1074 c0x0000 (---------------) + I ikoma + 0x0028ee8c, // n0x1075 c0x0000 (---------------) + I kamikitayama + 0x002d9f47, // n0x1076 c0x0000 (---------------) + I kanmaki + 0x00321b47, // n0x1077 c0x0000 (---------------) + I kashiba + 0x00398f89, // n0x1078 c0x0000 (---------------) + I kashihara + 0x00219349, // n0x1079 c0x0000 (---------------) + I katsuragi + 0x002864c5, // n0x107a c0x0000 (---------------) + I kawai + 0x002fdbc8, // n0x107b c0x0000 (---------------) + I kawakami + 0x002bdc49, // n0x107c c0x0000 (---------------) + I kawanishi + 0x002d6e45, // n0x107d c0x0000 (---------------) + I koryo + 0x002b8e88, // n0x107e c0x0000 (---------------) + I kurotaki + 0x002c6b46, // n0x107f c0x0000 (---------------) + I mitsue + 0x002d2306, // n0x1080 c0x0000 (---------------) + I miyake + 0x002c8a04, // n0x1081 c0x0000 (---------------) + I nara + 0x003281c8, // n0x1082 c0x0000 (---------------) + I nosegawa + 0x002247c3, // n0x1083 c0x0000 (---------------) + I oji + 0x00209204, // n0x1084 c0x0000 (---------------) + I ouda + 0x00371085, // n0x1085 c0x0000 (---------------) + I oyodo + 0x00307fc7, // n0x1086 c0x0000 (---------------) + I sakurai + 0x00202c85, // n0x1087 c0x0000 (---------------) + I sango + 0x00281f89, // n0x1088 c0x0000 (---------------) + I shimoichi + 0x0026784d, // n0x1089 c0x0000 (---------------) + I shimokitayama + 0x0033bd46, // n0x108a c0x0000 (---------------) + I shinjo + 0x002647c4, // n0x108b c0x0000 (---------------) + I soni + 0x00294d08, // n0x108c c0x0000 (---------------) + I takatori + 0x002784ca, // n0x108d c0x0000 (---------------) + I tawaramoto + 0x002177c7, // n0x108e c0x0000 (---------------) + I tenkawa + 0x003460c5, // n0x108f c0x0000 (---------------) + I tenri + 0x00209243, // n0x1090 c0x0000 (---------------) + I uda + 0x0029da0e, // n0x1091 c0x0000 (---------------) + I yamatokoriyama + 0x0028f08c, // n0x1092 c0x0000 (---------------) + I yamatotakada + 0x002fa407, // n0x1093 c0x0000 (---------------) + I yamazoe + 0x0029e587, // n0x1094 c0x0000 (---------------) + I yoshino + 0x00201003, // n0x1095 c0x0000 (---------------) + I aga + 0x00354fc5, // n0x1096 c0x0000 (---------------) + I agano + 0x002b13c5, // n0x1097 c0x0000 (---------------) + I gosen + 0x00298688, // n0x1098 c0x0000 (---------------) + I itoigawa + 0x00294149, // n0x1099 c0x0000 (---------------) + I izumozaki + 0x00292e46, // n0x109a c0x0000 (---------------) + I joetsu + 0x0022a9c4, // n0x109b c0x0000 (---------------) + I kamo + 0x0022f086, // n0x109c c0x0000 (---------------) + I kariwa + 0x00205dcb, // n0x109d c0x0000 (---------------) + I kashiwazaki + 0x002c598c, // n0x109e c0x0000 (---------------) + I minamiuonuma + 0x002ebdc7, // n0x109f c0x0000 (---------------) + I mitsuke + 0x002cae05, // n0x10a0 c0x0000 (---------------) + I muika + 0x00383248, // n0x10a1 c0x0000 (---------------) + I murakami + 0x002d0985, // n0x10a2 c0x0000 (---------------) + I myoko + 0x002cde87, // n0x10a3 c0x0000 (---------------) + I nagaoka + 0x0023ff87, // n0x10a4 c0x0000 (---------------) + I niigata + 0x0024f385, // n0x10a5 c0x0000 (---------------) + I ojiya + 0x0020a583, // n0x10a6 c0x0000 (---------------) + I omi + 0x00360584, // n0x10a7 c0x0000 (---------------) + I sado + 0x00203f85, // n0x10a8 c0x0000 (---------------) + I sanjo + 0x002e6c05, // n0x10a9 c0x0000 (---------------) + I seiro + 0x002e6c06, // n0x10aa c0x0000 (---------------) + I seirou + 0x0026c588, // n0x10ab c0x0000 (---------------) + I sekikawa + 0x00321bc7, // n0x10ac c0x0000 (---------------) + I shibata + 0x003780c6, // n0x10ad c0x0000 (---------------) + I tagami + 0x00354c46, // n0x10ae c0x0000 (---------------) + I tainai + 0x002e4b06, // n0x10af c0x0000 (---------------) + I tochio + 0x00297189, // n0x10b0 c0x0000 (---------------) + I tokamachi + 0x00208c87, // n0x10b1 c0x0000 (---------------) + I tsubame + 0x00292cc6, // n0x10b2 c0x0000 (---------------) + I tsunan + 0x002c5b06, // n0x10b3 c0x0000 (---------------) + I uonuma + 0x0024f446, // n0x10b4 c0x0000 (---------------) + I yahiko + 0x002a8945, // n0x10b5 c0x0000 (---------------) + I yoita + 0x00217246, // n0x10b6 c0x0000 (---------------) + I yuzawa + 0x0038de05, // n0x10b7 c0x0000 (---------------) + I beppu + 0x002d1f48, // n0x10b8 c0x0000 (---------------) + I bungoono + 0x00292a0b, // n0x10b9 c0x0000 (---------------) + I bungotakada + 0x0028b906, // n0x10ba c0x0000 (---------------) + I hasama + 0x00311dc4, // n0x10bb c0x0000 (---------------) + I hiji + 0x002fe3c9, // n0x10bc c0x0000 (---------------) + I himeshima + 0x002a27c4, // n0x10bd c0x0000 (---------------) + I hita + 0x002c6ac8, // n0x10be c0x0000 (---------------) + I kamitsue + 0x0028adc7, // n0x10bf c0x0000 (---------------) + I kokonoe + 0x00283fc4, // n0x10c0 c0x0000 (---------------) + I kuju + 0x002b2a08, // n0x10c1 c0x0000 (---------------) + I kunisaki + 0x002ba1c4, // n0x10c2 c0x0000 (---------------) + I kusu + 0x002a8984, // n0x10c3 c0x0000 (---------------) + I oita + 0x00286f45, // n0x10c4 c0x0000 (---------------) + I saiki + 0x00305a86, // n0x10c5 c0x0000 (---------------) + I taketa + 0x002f6207, // n0x10c6 c0x0000 (---------------) + I tsukumi + 0x0022b983, // n0x10c7 c0x0000 (---------------) + I usa + 0x0029d4c5, // n0x10c8 c0x0000 (---------------) + I usuki + 0x002bc144, // n0x10c9 c0x0000 (---------------) + I yufu + 0x002721c6, // n0x10ca c0x0000 (---------------) + I akaiwa + 0x002b6708, // n0x10cb c0x0000 (---------------) + I asakuchi + 0x00330b85, // n0x10cc c0x0000 (---------------) + I bizen + 0x0028fa49, // n0x10cd c0x0000 (---------------) + I hayashima + 0x0020c145, // n0x10ce c0x0000 (---------------) + I ibara + 0x002bbb48, // n0x10cf c0x0000 (---------------) + I kagamino + 0x00320d87, // n0x10d0 c0x0000 (---------------) + I kasaoka + 0x003809c8, // n0x10d1 c0x0000 (---------------) + I kibichuo + 0x002b1dc7, // n0x10d2 c0x0000 (---------------) + I kumenan + 0x0037e0c9, // n0x10d3 c0x0000 (---------------) + I kurashiki + 0x0031d046, // n0x10d4 c0x0000 (---------------) + I maniwa + 0x00347a06, // n0x10d5 c0x0000 (---------------) + I misaki + 0x00269404, // n0x10d6 c0x0000 (---------------) + I nagi + 0x00294b45, // n0x10d7 c0x0000 (---------------) + I niimi + 0x002f48cc, // n0x10d8 c0x0000 (---------------) + I nishiawakura + 0x00278dc7, // n0x10d9 c0x0000 (---------------) + I okayama + 0x002791c7, // n0x10da c0x0000 (---------------) + I satosho + 0x00311c48, // n0x10db c0x0000 (---------------) + I setouchi + 0x0033bd46, // n0x10dc c0x0000 (---------------) + I shinjo + 0x0029eec4, // n0x10dd c0x0000 (---------------) + I shoo + 0x00323cc4, // n0x10de c0x0000 (---------------) + I soja + 0x00280a49, // n0x10df c0x0000 (---------------) + I takahashi + 0x002c0e86, // n0x10e0 c0x0000 (---------------) + I tamano + 0x0021ec47, // n0x10e1 c0x0000 (---------------) + I tsuyama + 0x00208504, // n0x10e2 c0x0000 (---------------) + I wake + 0x002b2886, // n0x10e3 c0x0000 (---------------) + I yakage + 0x00320985, // n0x10e4 c0x0000 (---------------) + I aguni + 0x002a2ac7, // n0x10e5 c0x0000 (---------------) + I ginowan + 0x002c6506, // n0x10e6 c0x0000 (---------------) + I ginoza + 0x0025c649, // n0x10e7 c0x0000 (---------------) + I gushikami + 0x0027f647, // n0x10e8 c0x0000 (---------------) + I haebaru + 0x00268047, // n0x10e9 c0x0000 (---------------) + I higashi + 0x002a0046, // n0x10ea c0x0000 (---------------) + I hirara + 0x002452c5, // n0x10eb c0x0000 (---------------) + I iheya + 0x0027e248, // n0x10ec c0x0000 (---------------) + I ishigaki + 0x0021a4c8, // n0x10ed c0x0000 (---------------) + I ishikawa + 0x00242b46, // n0x10ee c0x0000 (---------------) + I itoman + 0x00330bc5, // n0x10ef c0x0000 (---------------) + I izena + 0x00331c86, // n0x10f0 c0x0000 (---------------) + I kadena + 0x002154c3, // n0x10f1 c0x0000 (---------------) + I kin + 0x00298509, // n0x10f2 c0x0000 (---------------) + I kitadaito + 0x002a644e, // n0x10f3 c0x0000 (---------------) + I kitanakagusuku + 0x002b1ac8, // n0x10f4 c0x0000 (---------------) + I kumejima + 0x002da188, // n0x10f5 c0x0000 (---------------) + I kunigami + 0x0024294b, // n0x10f6 c0x0000 (---------------) + I minamidaito + 0x0028fc86, // n0x10f7 c0x0000 (---------------) + I motobu + 0x002486c4, // n0x10f8 c0x0000 (---------------) + I nago + 0x00205b04, // n0x10f9 c0x0000 (---------------) + I naha + 0x002a654a, // n0x10fa c0x0000 (---------------) + I nakagusuku + 0x00217e07, // n0x10fb c0x0000 (---------------) + I nakijin + 0x00292d85, // n0x10fc c0x0000 (---------------) + I nanjo + 0x00212149, // n0x10fd c0x0000 (---------------) + I nishihara + 0x002b8285, // n0x10fe c0x0000 (---------------) + I ogimi + 0x00395b47, // n0x10ff c0x0000 (---------------) + I okinawa + 0x00301304, // n0x1100 c0x0000 (---------------) + I onna + 0x00383e87, // n0x1101 c0x0000 (---------------) + I shimoji + 0x0022f308, // n0x1102 c0x0000 (---------------) + I taketomi + 0x002b0046, // n0x1103 c0x0000 (---------------) + I tarama + 0x00342249, // n0x1104 c0x0000 (---------------) + I tokashiki + 0x002b354a, // n0x1105 c0x0000 (---------------) + I tomigusuku + 0x00217d86, // n0x1106 c0x0000 (---------------) + I tonaki + 0x00295986, // n0x1107 c0x0000 (---------------) + I urasoe + 0x002ab405, // n0x1108 c0x0000 (---------------) + I uruma + 0x00373d05, // n0x1109 c0x0000 (---------------) + I yaese + 0x00229a47, // n0x110a c0x0000 (---------------) + I yomitan + 0x00249888, // n0x110b c0x0000 (---------------) + I yonabaru + 0x003208c8, // n0x110c c0x0000 (---------------) + I yonaguni + 0x00240f46, // n0x110d c0x0000 (---------------) + I zamami + 0x00223745, // n0x110e c0x0000 (---------------) + I abeno + 0x002487ce, // n0x110f c0x0000 (---------------) + I chihayaakasaka + 0x0030ba84, // n0x1110 c0x0000 (---------------) + I chuo + 0x00242ac5, // n0x1111 c0x0000 (---------------) + I daito + 0x00277c09, // n0x1112 c0x0000 (---------------) + I fujiidera + 0x0026b5c8, // n0x1113 c0x0000 (---------------) + I habikino + 0x003a1c46, // n0x1114 c0x0000 (---------------) + I hannan + 0x0029aa8c, // n0x1115 c0x0000 (---------------) + I higashiosaka + 0x0029c210, // n0x1116 c0x0000 (---------------) + I higashisumiyoshi + 0x0029e00f, // n0x1117 c0x0000 (---------------) + I higashiyodogawa + 0x0029efc8, // n0x1118 c0x0000 (---------------) + I hirakata + 0x002c1ac7, // n0x1119 c0x0000 (---------------) + I ibaraki + 0x00202585, // n0x111a c0x0000 (---------------) + I ikeda + 0x002802c5, // n0x111b c0x0000 (---------------) + I izumi + 0x002f6089, // n0x111c c0x0000 (---------------) + I izumiotsu + 0x00294509, // n0x111d c0x0000 (---------------) + I izumisano + 0x0021e5c6, // n0x111e c0x0000 (---------------) + I kadoma + 0x002fa147, // n0x111f c0x0000 (---------------) + I kaizuka + 0x0038ad85, // n0x1120 c0x0000 (---------------) + I kanan + 0x002037c9, // n0x1121 c0x0000 (---------------) + I kashiwara + 0x0032c486, // n0x1122 c0x0000 (---------------) + I katano + 0x00354dcd, // n0x1123 c0x0000 (---------------) + I kawachinagano + 0x00287009, // n0x1124 c0x0000 (---------------) + I kishiwada + 0x00201e04, // n0x1125 c0x0000 (---------------) + I kita + 0x002b1848, // n0x1126 c0x0000 (---------------) + I kumatori + 0x0039f589, // n0x1127 c0x0000 (---------------) + I matsubara + 0x0034cb46, // n0x1128 c0x0000 (---------------) + I minato + 0x00278385, // n0x1129 c0x0000 (---------------) + I minoh + 0x00347a06, // n0x112a c0x0000 (---------------) + I misaki + 0x0030bf89, // n0x112b c0x0000 (---------------) + I moriguchi + 0x00320008, // n0x112c c0x0000 (---------------) + I neyagawa + 0x00211805, // n0x112d c0x0000 (---------------) + I nishi + 0x0026c504, // n0x112e c0x0000 (---------------) + I nose + 0x0029ac4b, // n0x112f c0x0000 (---------------) + I osakasayama + 0x0034ca05, // n0x1130 c0x0000 (---------------) + I sakai + 0x0029ad86, // n0x1131 c0x0000 (---------------) + I sayama + 0x00282c86, // n0x1132 c0x0000 (---------------) + I sennan + 0x0024c046, // n0x1133 c0x0000 (---------------) + I settsu + 0x0038428b, // n0x1134 c0x0000 (---------------) + I shijonawate + 0x0028fb49, // n0x1135 c0x0000 (---------------) + I shimamoto + 0x00218c85, // n0x1136 c0x0000 (---------------) + I suita + 0x00380787, // n0x1137 c0x0000 (---------------) + I tadaoka + 0x0022c746, // n0x1138 c0x0000 (---------------) + I taishi + 0x00238746, // n0x1139 c0x0000 (---------------) + I tajiri + 0x00281e48, // n0x113a c0x0000 (---------------) + I takaishi + 0x00305b89, // n0x113b c0x0000 (---------------) + I takatsuki + 0x0026e98c, // n0x113c c0x0000 (---------------) + I tondabayashi + 0x0024ab88, // n0x113d c0x0000 (---------------) + I toyonaka + 0x0037b006, // n0x113e c0x0000 (---------------) + I toyono + 0x00341743, // n0x113f c0x0000 (---------------) + I yao + 0x00249246, // n0x1140 c0x0000 (---------------) + I ariake + 0x0027ff85, // n0x1141 c0x0000 (---------------) + I arita + 0x0027cfc8, // n0x1142 c0x0000 (---------------) + I fukudomi + 0x00223146, // n0x1143 c0x0000 (---------------) + I genkai + 0x002a2d08, // n0x1144 c0x0000 (---------------) + I hamatama + 0x0024ba45, // n0x1145 c0x0000 (---------------) + I hizen + 0x0027bc05, // n0x1146 c0x0000 (---------------) + I imari + 0x00321008, // n0x1147 c0x0000 (---------------) + I kamimine + 0x002eec47, // n0x1148 c0x0000 (---------------) + I kanzaki + 0x00377d07, // n0x1149 c0x0000 (---------------) + I karatsu + 0x002a8a87, // n0x114a c0x0000 (---------------) + I kashima + 0x0021f588, // n0x114b c0x0000 (---------------) + I kitagata + 0x0028ebc8, // n0x114c c0x0000 (---------------) + I kitahata + 0x0024edc6, // n0x114d c0x0000 (---------------) + I kiyama + 0x00303607, // n0x114e c0x0000 (---------------) + I kouhoku + 0x0036ad87, // n0x114f c0x0000 (---------------) + I kyuragi + 0x0027fe4a, // n0x1150 c0x0000 (---------------) + I nishiarita + 0x00213483, // n0x1151 c0x0000 (---------------) + I ogi + 0x002014c6, // n0x1152 c0x0000 (---------------) + I omachi + 0x00201985, // n0x1153 c0x0000 (---------------) + I ouchi + 0x00238904, // n0x1154 c0x0000 (---------------) + I saga + 0x0027a9c9, // n0x1155 c0x0000 (---------------) + I shiroishi + 0x0037e044, // n0x1156 c0x0000 (---------------) + I taku + 0x002a1204, // n0x1157 c0x0000 (---------------) + I tara + 0x002a09c4, // n0x1158 c0x0000 (---------------) + I tosu + 0x0029e58b, // n0x1159 c0x0000 (---------------) + I yoshinogari + 0x0039f707, // n0x115a c0x0000 (---------------) + I arakawa + 0x00248a05, // n0x115b c0x0000 (---------------) + I asaka + 0x00292888, // n0x115c c0x0000 (---------------) + I chichibu + 0x00278286, // n0x115d c0x0000 (---------------) + I fujimi + 0x00278288, // n0x115e c0x0000 (---------------) + I fujimino + 0x0027c506, // n0x115f c0x0000 (---------------) + I fukaya + 0x00289385, // n0x1160 c0x0000 (---------------) + I hanno + 0x0028a805, // n0x1161 c0x0000 (---------------) + I hanyu + 0x0028c486, // n0x1162 c0x0000 (---------------) + I hasuda + 0x0028d4c8, // n0x1163 c0x0000 (---------------) + I hatogaya + 0x0028e948, // n0x1164 c0x0000 (---------------) + I hatoyama + 0x0027ab86, // n0x1165 c0x0000 (---------------) + I hidaka + 0x002926cf, // n0x1166 c0x0000 (---------------) + I higashichichibu + 0x00297fd0, // n0x1167 c0x0000 (---------------) + I higashimatsuyama + 0x0038e545, // n0x1168 c0x0000 (---------------) + I honjo + 0x002013c3, // n0x1169 c0x0000 (---------------) + I ina + 0x00251c85, // n0x116a c0x0000 (---------------) + I iruma + 0x002ff488, // n0x116b c0x0000 (---------------) + I iwatsuki + 0x00294409, // n0x116c c0x0000 (---------------) + I kamiizumi + 0x002e8008, // n0x116d c0x0000 (---------------) + I kamikawa + 0x0034cdc8, // n0x116e c0x0000 (---------------) + I kamisato + 0x00207888, // n0x116f c0x0000 (---------------) + I kasukabe + 0x0039f7c7, // n0x1170 c0x0000 (---------------) + I kawagoe + 0x00277f49, // n0x1171 c0x0000 (---------------) + I kawaguchi + 0x002a2f08, // n0x1172 c0x0000 (---------------) + I kawajima + 0x002b7444, // n0x1173 c0x0000 (---------------) + I kazo + 0x002a0848, // n0x1174 c0x0000 (---------------) + I kitamoto + 0x00289b89, // n0x1175 c0x0000 (---------------) + I koshigaya + 0x0030a447, // n0x1176 c0x0000 (---------------) + I kounosu + 0x002a63c4, // n0x1177 c0x0000 (---------------) + I kuki + 0x00208788, // n0x1178 c0x0000 (---------------) + I kumagaya + 0x002455ca, // n0x1179 c0x0000 (---------------) + I matsubushi + 0x002d8c06, // n0x117a c0x0000 (---------------) + I minano + 0x0022f486, // n0x117b c0x0000 (---------------) + I misato + 0x0021d389, // n0x117c c0x0000 (---------------) + I miyashiro + 0x0029c447, // n0x117d c0x0000 (---------------) + I miyoshi + 0x002c6ec8, // n0x117e c0x0000 (---------------) + I moroyama + 0x0038e088, // n0x117f c0x0000 (---------------) + I nagatoro + 0x00208388, // n0x1180 c0x0000 (---------------) + I namegawa + 0x00352f45, // n0x1181 c0x0000 (---------------) + I niiza + 0x00374cc5, // n0x1182 c0x0000 (---------------) + I ogano + 0x00245a05, // n0x1183 c0x0000 (---------------) + I ogawa + 0x002b1385, // n0x1184 c0x0000 (---------------) + I ogose + 0x0035ce07, // n0x1185 c0x0000 (---------------) + I okegawa + 0x0020a585, // n0x1186 c0x0000 (---------------) + I omiya + 0x002b8f45, // n0x1187 c0x0000 (---------------) + I otaki + 0x0033f406, // n0x1188 c0x0000 (---------------) + I ranzan + 0x002e7f47, // n0x1189 c0x0000 (---------------) + I ryokami + 0x002d9247, // n0x118a c0x0000 (---------------) + I saitama + 0x002458c6, // n0x118b c0x0000 (---------------) + I sakado + 0x002cc385, // n0x118c c0x0000 (---------------) + I satte + 0x0029ad86, // n0x118d c0x0000 (---------------) + I sayama + 0x002aaa05, // n0x118e c0x0000 (---------------) + I shiki + 0x00306048, // n0x118f c0x0000 (---------------) + I shiraoka + 0x002e22c4, // n0x1190 c0x0000 (---------------) + I soka + 0x002bfdc6, // n0x1191 c0x0000 (---------------) + I sugito + 0x00265904, // n0x1192 c0x0000 (---------------) + I toda + 0x00222908, // n0x1193 c0x0000 (---------------) + I tokigawa + 0x0038584a, // n0x1194 c0x0000 (---------------) + I tokorozawa + 0x0027b9cc, // n0x1195 c0x0000 (---------------) + I tsurugashima + 0x0020b6c5, // n0x1196 c0x0000 (---------------) + I urawa + 0x00203906, // n0x1197 c0x0000 (---------------) + I warabi + 0x0026eb46, // n0x1198 c0x0000 (---------------) + I yashio + 0x002296c6, // n0x1199 c0x0000 (---------------) + I yokoze + 0x002ff684, // n0x119a c0x0000 (---------------) + I yono + 0x00320c45, // n0x119b c0x0000 (---------------) + I yorii + 0x0027c347, // n0x119c c0x0000 (---------------) + I yoshida + 0x0029c4c9, // n0x119d c0x0000 (---------------) + I yoshikawa + 0x002a42c7, // n0x119e c0x0000 (---------------) + I yoshimi + 0x00686744, // n0x119f c0x0001 (---------------) ! I city + 0x00686744, // n0x11a0 c0x0001 (---------------) ! I city + 0x0030bd05, // n0x11a1 c0x0000 (---------------) + I aisho + 0x00228e04, // n0x11a2 c0x0000 (---------------) + I gamo + 0x0029a44a, // n0x11a3 c0x0000 (---------------) + I higashiomi + 0x00278106, // n0x11a4 c0x0000 (---------------) + I hikone + 0x0034cd44, // n0x11a5 c0x0000 (---------------) + I koka + 0x00206b85, // n0x11a6 c0x0000 (---------------) + I konan + 0x002fb505, // n0x11a7 c0x0000 (---------------) + I kosei + 0x00301bc4, // n0x11a8 c0x0000 (---------------) + I koto + 0x002828c7, // n0x11a9 c0x0000 (---------------) + I kusatsu + 0x0020c0c7, // n0x11aa c0x0000 (---------------) + I maibara + 0x002c60c8, // n0x11ab c0x0000 (---------------) + I moriyama + 0x0025e648, // n0x11ac c0x0000 (---------------) + I nagahama + 0x00211809, // n0x11ad c0x0000 (---------------) + I nishiazai + 0x0025ae88, // n0x11ae c0x0000 (---------------) + I notogawa + 0x0029a60b, // n0x11af c0x0000 (---------------) + I omihachiman + 0x0021f304, // n0x11b0 c0x0000 (---------------) + I otsu + 0x00337145, // n0x11b1 c0x0000 (---------------) + I ritto + 0x0027f545, // n0x11b2 c0x0000 (---------------) + I ryuoh + 0x002a8a09, // n0x11b3 c0x0000 (---------------) + I takashima + 0x00305b89, // n0x11b4 c0x0000 (---------------) + I takatsuki + 0x002fe2c8, // n0x11b5 c0x0000 (---------------) + I torahime + 0x0025b488, // n0x11b6 c0x0000 (---------------) + I toyosato + 0x00280644, // n0x11b7 c0x0000 (---------------) + I yasu + 0x002a0b85, // n0x11b8 c0x0000 (---------------) + I akagi + 0x00201883, // n0x11b9 c0x0000 (---------------) + I ama + 0x0021f2c5, // n0x11ba c0x0000 (---------------) + I gotsu + 0x002da386, // n0x11bb c0x0000 (---------------) + I hamada + 0x00293f8c, // n0x11bc c0x0000 (---------------) + I higashiizumo + 0x0021a546, // n0x11bd c0x0000 (---------------) + I hikawa + 0x002d7a86, // n0x11be c0x0000 (---------------) + I hikimi + 0x00294145, // n0x11bf c0x0000 (---------------) + I izumo + 0x00318688, // n0x11c0 c0x0000 (---------------) + I kakinoki + 0x002b1c46, // n0x11c1 c0x0000 (---------------) + I masuda + 0x0039fe86, // n0x11c2 c0x0000 (---------------) + I matsue + 0x0022f486, // n0x11c3 c0x0000 (---------------) + I misato + 0x0022058c, // n0x11c4 c0x0000 (---------------) + I nishinoshima + 0x002b7044, // n0x11c5 c0x0000 (---------------) + I ohda + 0x002e4c4a, // n0x11c6 c0x0000 (---------------) + I okinoshima + 0x003a1e08, // n0x11c7 c0x0000 (---------------) + I okuizumo + 0x00293dc7, // n0x11c8 c0x0000 (---------------) + I shimane + 0x0024e7c6, // n0x11c9 c0x0000 (---------------) + I tamayu + 0x00292f07, // n0x11ca c0x0000 (---------------) + I tsuwano + 0x002e0845, // n0x11cb c0x0000 (---------------) + I unnan + 0x00325346, // n0x11cc c0x0000 (---------------) + I yakumo + 0x0034fa06, // n0x11cd c0x0000 (---------------) + I yasugi + 0x00377bc7, // n0x11ce c0x0000 (---------------) + I yatsuka + 0x002b3244, // n0x11cf c0x0000 (---------------) + I arai + 0x002305c5, // n0x11d0 c0x0000 (---------------) + I atami + 0x00277c04, // n0x11d1 c0x0000 (---------------) + I fuji + 0x002a5807, // n0x11d2 c0x0000 (---------------) + I fujieda + 0x00277e48, // n0x11d3 c0x0000 (---------------) + I fujikawa + 0x002789ca, // n0x11d4 c0x0000 (---------------) + I fujinomiya + 0x0027ed47, // n0x11d5 c0x0000 (---------------) + I fukuroi + 0x00242cc7, // n0x11d6 c0x0000 (---------------) + I gotemba + 0x002c1a47, // n0x11d7 c0x0000 (---------------) + I haibara + 0x002d0ac9, // n0x11d8 c0x0000 (---------------) + I hamamatsu + 0x00293f8a, // n0x11d9 c0x0000 (---------------) + I higashiizu + 0x00228083, // n0x11da c0x0000 (---------------) + I ito + 0x00351b45, // n0x11db c0x0000 (---------------) + I iwata + 0x00212583, // n0x11dc c0x0000 (---------------) + I izu + 0x00342449, // n0x11dd c0x0000 (---------------) + I izunokuni + 0x002b9588, // n0x11de c0x0000 (---------------) + I kakegawa + 0x003061c7, // n0x11df c0x0000 (---------------) + I kannami + 0x002e8109, // n0x11e0 c0x0000 (---------------) + I kawanehon + 0x0021a5c6, // n0x11e1 c0x0000 (---------------) + I kawazu + 0x003a3c08, // n0x11e2 c0x0000 (---------------) + I kikugawa + 0x002d8745, // n0x11e3 c0x0000 (---------------) + I kosai + 0x0035674a, // n0x11e4 c0x0000 (---------------) + I makinohara + 0x002cf149, // n0x11e5 c0x0000 (---------------) + I matsuzaki + 0x0026e649, // n0x11e6 c0x0000 (---------------) + I minamiizu + 0x002bf3c7, // n0x11e7 c0x0000 (---------------) + I mishima + 0x002a8cc9, // n0x11e8 c0x0000 (---------------) + I morimachi + 0x00212448, // n0x11e9 c0x0000 (---------------) + I nishiizu + 0x002ee946, // n0x11ea c0x0000 (---------------) + I numazu + 0x0037e948, // n0x11eb c0x0000 (---------------) + I omaezaki + 0x00212e87, // n0x11ec c0x0000 (---------------) + I shimada + 0x002281c7, // n0x11ed c0x0000 (---------------) + I shimizu + 0x002c5e87, // n0x11ee c0x0000 (---------------) + I shimoda + 0x002b2608, // n0x11ef c0x0000 (---------------) + I shizuoka + 0x002ee646, // n0x11f0 c0x0000 (---------------) + I susono + 0x00245385, // n0x11f1 c0x0000 (---------------) + I yaizu + 0x0027c347, // n0x11f2 c0x0000 (---------------) + I yoshida + 0x00295488, // n0x11f3 c0x0000 (---------------) + I ashikaga + 0x00344d84, // n0x11f4 c0x0000 (---------------) + I bato + 0x0034ac04, // n0x11f5 c0x0000 (---------------) + I haga + 0x002fddc7, // n0x11f6 c0x0000 (---------------) + I ichikai + 0x002ac347, // n0x11f7 c0x0000 (---------------) + I iwafune + 0x002bdaca, // n0x11f8 c0x0000 (---------------) + I kaminokawa + 0x002ee8c6, // n0x11f9 c0x0000 (---------------) + I kanuma + 0x002fa28a, // n0x11fa c0x0000 (---------------) + I karasuyama + 0x002b8547, // n0x11fb c0x0000 (---------------) + I kuroiso + 0x0030b847, // n0x11fc c0x0000 (---------------) + I mashiko + 0x00241044, // n0x11fd c0x0000 (---------------) + I mibu + 0x00263904, // n0x11fe c0x0000 (---------------) + I moka + 0x00226bc6, // n0x11ff c0x0000 (---------------) + I motegi + 0x002ec144, // n0x1200 c0x0000 (---------------) + I nasu + 0x002ec14c, // n0x1201 c0x0000 (---------------) + I nasushiobara + 0x00203185, // n0x1202 c0x0000 (---------------) + I nikko + 0x00216889, // n0x1203 c0x0000 (---------------) + I nishikata + 0x00279884, // n0x1204 c0x0000 (---------------) + I nogi + 0x0029ef85, // n0x1205 c0x0000 (---------------) + I ohira + 0x00278448, // n0x1206 c0x0000 (---------------) + I ohtawara + 0x00250045, // n0x1207 c0x0000 (---------------) + I oyama + 0x00307fc6, // n0x1208 c0x0000 (---------------) + I sakura + 0x0020f744, // n0x1209 c0x0000 (---------------) + I sano + 0x0027e58a, // n0x120a c0x0000 (---------------) + I shimotsuke + 0x002a7c86, // n0x120b c0x0000 (---------------) + I shioya + 0x002579ca, // n0x120c c0x0000 (---------------) + I takanezawa + 0x00344e07, // n0x120d c0x0000 (---------------) + I tochigi + 0x00297645, // n0x120e c0x0000 (---------------) + I tsuga + 0x0021c2c5, // n0x120f c0x0000 (---------------) + I ujiie + 0x0036b7ca, // n0x1210 c0x0000 (---------------) + I utsunomiya + 0x002a0605, // n0x1211 c0x0000 (---------------) + I yaita + 0x0029eb86, // n0x1212 c0x0000 (---------------) + I aizumi + 0x00206d04, // n0x1213 c0x0000 (---------------) + I anan + 0x002add06, // n0x1214 c0x0000 (---------------) + I ichiba + 0x00229b05, // n0x1215 c0x0000 (---------------) + I itano + 0x00223206, // n0x1216 c0x0000 (---------------) + I kainan + 0x002aa70c, // n0x1217 c0x0000 (---------------) + I komatsushima + 0x002c704a, // n0x1218 c0x0000 (---------------) + I matsushige + 0x0027f904, // n0x1219 c0x0000 (---------------) + I mima + 0x00228406, // n0x121a c0x0000 (---------------) + I minami + 0x0029c447, // n0x121b c0x0000 (---------------) + I miyoshi + 0x002ca384, // n0x121c c0x0000 (---------------) + I mugi + 0x002ac8c8, // n0x121d c0x0000 (---------------) + I nakagawa + 0x00385746, // n0x121e c0x0000 (---------------) + I naruto + 0x00248649, // n0x121f c0x0000 (---------------) + I sanagochi + 0x002ad349, // n0x1220 c0x0000 (---------------) + I shishikui + 0x00299fc9, // n0x1221 c0x0000 (---------------) + I tokushima + 0x0036aa46, // n0x1222 c0x0000 (---------------) + I wajiki + 0x00212f86, // n0x1223 c0x0000 (---------------) + I adachi + 0x0037ea87, // n0x1224 c0x0000 (---------------) + I akiruno + 0x002ec888, // n0x1225 c0x0000 (---------------) + I akishima + 0x00212d89, // n0x1226 c0x0000 (---------------) + I aogashima + 0x0039f707, // n0x1227 c0x0000 (---------------) + I arakawa + 0x002b4186, // n0x1228 c0x0000 (---------------) + I bunkyo + 0x003005c7, // n0x1229 c0x0000 (---------------) + I chiyoda + 0x002db1c5, // n0x122a c0x0000 (---------------) + I chofu + 0x0030ba84, // n0x122b c0x0000 (---------------) + I chuo + 0x0037e647, // n0x122c c0x0000 (---------------) + I edogawa + 0x002bc1c5, // n0x122d c0x0000 (---------------) + I fuchu + 0x00286e85, // n0x122e c0x0000 (---------------) + I fussa + 0x002fc5c7, // n0x122f c0x0000 (---------------) + I hachijo + 0x0024f248, // n0x1230 c0x0000 (---------------) + I hachioji + 0x003831c6, // n0x1231 c0x0000 (---------------) + I hamura + 0x0029680d, // n0x1232 c0x0000 (---------------) + I higashikurume + 0x0029888f, // n0x1233 c0x0000 (---------------) + I higashimurayama + 0x0029d84d, // n0x1234 c0x0000 (---------------) + I higashiyamato + 0x0020dbc4, // n0x1235 c0x0000 (---------------) + I hino + 0x0023ba06, // n0x1236 c0x0000 (---------------) + I hinode + 0x002cfa08, // n0x1237 c0x0000 (---------------) + I hinohara + 0x003255c5, // n0x1238 c0x0000 (---------------) + I inagi + 0x00280008, // n0x1239 c0x0000 (---------------) + I itabashi + 0x0021ae8a, // n0x123a c0x0000 (---------------) + I katsushika + 0x00201e04, // n0x123b c0x0000 (---------------) + I kita + 0x002aaac6, // n0x123c c0x0000 (---------------) + I kiyose + 0x0039c647, // n0x123d c0x0000 (---------------) + I kodaira + 0x00226307, // n0x123e c0x0000 (---------------) + I koganei + 0x002a0d89, // n0x123f c0x0000 (---------------) + I kokubunji + 0x0037e905, // n0x1240 c0x0000 (---------------) + I komae + 0x00301bc4, // n0x1241 c0x0000 (---------------) + I koto + 0x0032a78a, // n0x1242 c0x0000 (---------------) + I kouzushima + 0x002b3009, // n0x1243 c0x0000 (---------------) + I kunitachi + 0x002a8dc7, // n0x1244 c0x0000 (---------------) + I machida + 0x00296ac6, // n0x1245 c0x0000 (---------------) + I meguro + 0x0034cb46, // n0x1246 c0x0000 (---------------) + I minato + 0x002a0ac6, // n0x1247 c0x0000 (---------------) + I mitaka + 0x0035e106, // n0x1248 c0x0000 (---------------) + I mizuho + 0x002cee0f, // n0x1249 c0x0000 (---------------) + I musashimurayama + 0x002cf8c9, // n0x124a c0x0000 (---------------) + I musashino + 0x002546c6, // n0x124b c0x0000 (---------------) + I nakano + 0x00256d06, // n0x124c c0x0000 (---------------) + I nerima + 0x00355a09, // n0x124d c0x0000 (---------------) + I ogasawara + 0x00303707, // n0x124e c0x0000 (---------------) + I okutama + 0x00213a83, // n0x124f c0x0000 (---------------) + I ome + 0x0020ef06, // n0x1250 c0x0000 (---------------) + I oshima + 0x00204083, // n0x1251 c0x0000 (---------------) + I ota + 0x0024cf48, // n0x1252 c0x0000 (---------------) + I setagaya + 0x00300407, // n0x1253 c0x0000 (---------------) + I shibuya + 0x0029f1c9, // n0x1254 c0x0000 (---------------) + I shinagawa + 0x00383608, // n0x1255 c0x0000 (---------------) + I shinjuku + 0x00377e48, // n0x1256 c0x0000 (---------------) + I suginami + 0x0036e146, // n0x1257 c0x0000 (---------------) + I sumida + 0x00223909, // n0x1258 c0x0000 (---------------) + I tachikawa + 0x002400c5, // n0x1259 c0x0000 (---------------) + I taito + 0x0024e7c4, // n0x125a c0x0000 (---------------) + I tama + 0x0024aec7, // n0x125b c0x0000 (---------------) + I toshima + 0x00257085, // n0x125c c0x0000 (---------------) + I chizu + 0x0020dbc4, // n0x125d c0x0000 (---------------) + I hino + 0x00248ac8, // n0x125e c0x0000 (---------------) + I kawahara + 0x002180c4, // n0x125f c0x0000 (---------------) + I koge + 0x00301bc7, // n0x1260 c0x0000 (---------------) + I kotoura + 0x0036fc86, // n0x1261 c0x0000 (---------------) + I misasa + 0x002e5a85, // n0x1262 c0x0000 (---------------) + I nanbu + 0x002869c8, // n0x1263 c0x0000 (---------------) + I nichinan + 0x0034ca0b, // n0x1264 c0x0000 (---------------) + I sakaiminato + 0x002f8b87, // n0x1265 c0x0000 (---------------) + I tottori + 0x0036ba46, // n0x1266 c0x0000 (---------------) + I wakasa + 0x002c0c84, // n0x1267 c0x0000 (---------------) + I yazu + 0x0030f5c6, // n0x1268 c0x0000 (---------------) + I yonago + 0x002bf185, // n0x1269 c0x0000 (---------------) + I asahi + 0x002bc1c5, // n0x126a c0x0000 (---------------) + I fuchu + 0x0027dfc9, // n0x126b c0x0000 (---------------) + I fukumitsu + 0x002824c9, // n0x126c c0x0000 (---------------) + I funahashi + 0x00228204, // n0x126d c0x0000 (---------------) + I himi + 0x00228245, // n0x126e c0x0000 (---------------) + I imizu + 0x00228445, // n0x126f c0x0000 (---------------) + I inami + 0x003565c6, // n0x1270 c0x0000 (---------------) + I johana + 0x002fdcc8, // n0x1271 c0x0000 (---------------) + I kamiichi + 0x002b7bc6, // n0x1272 c0x0000 (---------------) + I kurobe + 0x00330c8b, // n0x1273 c0x0000 (---------------) + I nakaniikawa + 0x0030138a, // n0x1274 c0x0000 (---------------) + I namerikawa + 0x00342185, // n0x1275 c0x0000 (---------------) + I nanto + 0x0028a886, // n0x1276 c0x0000 (---------------) + I nyuzen + 0x002f5985, // n0x1277 c0x0000 (---------------) + I oyabe + 0x00218d45, // n0x1278 c0x0000 (---------------) + I taira + 0x0028ed47, // n0x1279 c0x0000 (---------------) + I takaoka + 0x002040c8, // n0x127a c0x0000 (---------------) + I tateyama + 0x0025af04, // n0x127b c0x0000 (---------------) + I toga + 0x002b6586, // n0x127c c0x0000 (---------------) + I tonami + 0x0028e9c6, // n0x127d c0x0000 (---------------) + I toyama + 0x00212607, // n0x127e c0x0000 (---------------) + I unazuki + 0x00300a04, // n0x127f c0x0000 (---------------) + I uozu + 0x0027ce46, // n0x1280 c0x0000 (---------------) + I yamada + 0x0023ec85, // n0x1281 c0x0000 (---------------) + I arida + 0x0023ec89, // n0x1282 c0x0000 (---------------) + I aridagawa + 0x00213184, // n0x1283 c0x0000 (---------------) + I gobo + 0x0028e009, // n0x1284 c0x0000 (---------------) + I hashimoto + 0x0027ab86, // n0x1285 c0x0000 (---------------) + I hidaka + 0x002b9ac8, // n0x1286 c0x0000 (---------------) + I hirogawa + 0x00228445, // n0x1287 c0x0000 (---------------) + I inami + 0x00311e85, // n0x1288 c0x0000 (---------------) + I iwade + 0x00223206, // n0x1289 c0x0000 (---------------) + I kainan + 0x0026e889, // n0x128a c0x0000 (---------------) + I kamitonda + 0x00219349, // n0x128b c0x0000 (---------------) + I katsuragi + 0x002d7b06, // n0x128c c0x0000 (---------------) + I kimino + 0x0026b6c8, // n0x128d c0x0000 (---------------) + I kinokawa + 0x00267988, // n0x128e c0x0000 (---------------) + I kitayama + 0x002f5944, // n0x128f c0x0000 (---------------) + I koya + 0x0032dcc4, // n0x1290 c0x0000 (---------------) + I koza + 0x0032dcc8, // n0x1291 c0x0000 (---------------) + I kozagawa + 0x00316788, // n0x1292 c0x0000 (---------------) + I kudoyama + 0x002ae8c9, // n0x1293 c0x0000 (---------------) + I kushimoto + 0x002da306, // n0x1294 c0x0000 (---------------) + I mihama + 0x0022f486, // n0x1295 c0x0000 (---------------) + I misato + 0x0031408d, // n0x1296 c0x0000 (---------------) + I nachikatsuura + 0x00266d06, // n0x1297 c0x0000 (---------------) + I shingu + 0x002a9a49, // n0x1298 c0x0000 (---------------) + I shirahama + 0x00201685, // n0x1299 c0x0000 (---------------) + I taiji + 0x0031f386, // n0x129a c0x0000 (---------------) + I tanabe + 0x00223ac8, // n0x129b c0x0000 (---------------) + I wakayama + 0x00310745, // n0x129c c0x0000 (---------------) + I yuasa + 0x0036adc4, // n0x129d c0x0000 (---------------) + I yura + 0x002bf185, // n0x129e c0x0000 (---------------) + I asahi + 0x00281b48, // n0x129f c0x0000 (---------------) + I funagata + 0x0029a209, // n0x12a0 c0x0000 (---------------) + I higashine + 0x00277cc4, // n0x12a1 c0x0000 (---------------) + I iide + 0x0033d046, // n0x12a2 c0x0000 (---------------) + I kahoku + 0x0024ff0a, // n0x12a3 c0x0000 (---------------) + I kaminoyama + 0x002c7848, // n0x12a4 c0x0000 (---------------) + I kaneyama + 0x002bdc49, // n0x12a5 c0x0000 (---------------) + I kawanishi + 0x0029378a, // n0x12a6 c0x0000 (---------------) + I mamurogawa + 0x002e8086, // n0x12a7 c0x0000 (---------------) + I mikawa + 0x00298a48, // n0x12a8 c0x0000 (---------------) + I murayama + 0x002cdc45, // n0x12a9 c0x0000 (---------------) + I nagai + 0x002c9e08, // n0x12aa c0x0000 (---------------) + I nakayama + 0x002b1ec5, // n0x12ab c0x0000 (---------------) + I nanyo + 0x0021a489, // n0x12ac c0x0000 (---------------) + I nishikawa + 0x00361849, // n0x12ad c0x0000 (---------------) + I obanazawa + 0x00203282, // n0x12ae c0x0000 (---------------) + I oe + 0x002a6a45, // n0x12af c0x0000 (---------------) + I oguni + 0x0026f6c6, // n0x12b0 c0x0000 (---------------) + I ohkura + 0x0027aac7, // n0x12b1 c0x0000 (---------------) + I oishida + 0x00238905, // n0x12b2 c0x0000 (---------------) + I sagae + 0x002f8486, // n0x12b3 c0x0000 (---------------) + I sakata + 0x00310808, // n0x12b4 c0x0000 (---------------) + I sakegawa + 0x0033bd46, // n0x12b5 c0x0000 (---------------) + I shinjo + 0x00347f09, // n0x12b6 c0x0000 (---------------) + I shirataka + 0x002792c6, // n0x12b7 c0x0000 (---------------) + I shonai + 0x00281cc8, // n0x12b8 c0x0000 (---------------) + I takahata + 0x002a94c5, // n0x12b9 c0x0000 (---------------) + I tendo + 0x0026de86, // n0x12ba c0x0000 (---------------) + I tozawa + 0x0032f7c8, // n0x12bb c0x0000 (---------------) + I tsuruoka + 0x002808c8, // n0x12bc c0x0000 (---------------) + I yamagata + 0x0039e808, // n0x12bd c0x0000 (---------------) + I yamanobe + 0x00366688, // n0x12be c0x0000 (---------------) + I yonezawa + 0x00217244, // n0x12bf c0x0000 (---------------) + I yuza + 0x0022d843, // n0x12c0 c0x0000 (---------------) + I abu + 0x00348144, // n0x12c1 c0x0000 (---------------) + I hagi + 0x0022f006, // n0x12c2 c0x0000 (---------------) + I hikari + 0x002db204, // n0x12c3 c0x0000 (---------------) + I hofu + 0x002da0c7, // n0x12c4 c0x0000 (---------------) + I iwakuni + 0x0039fd89, // n0x12c5 c0x0000 (---------------) + I kudamatsu + 0x002c0485, // n0x12c6 c0x0000 (---------------) + I mitou + 0x0038e086, // n0x12c7 c0x0000 (---------------) + I nagato + 0x0020ef06, // n0x12c8 c0x0000 (---------------) + I oshima + 0x0026c3cb, // n0x12c9 c0x0000 (---------------) + I shimonoseki + 0x003420c6, // n0x12ca c0x0000 (---------------) + I shunan + 0x00316a86, // n0x12cb c0x0000 (---------------) + I tabuse + 0x0022f588, // n0x12cc c0x0000 (---------------) + I tokuyama + 0x0025bb86, // n0x12cd c0x0000 (---------------) + I toyota + 0x00297443, // n0x12ce c0x0000 (---------------) + I ube + 0x0020f9c3, // n0x12cf c0x0000 (---------------) + I yuu + 0x0030ba84, // n0x12d0 c0x0000 (---------------) + I chuo + 0x00236305, // n0x12d1 c0x0000 (---------------) + I doshi + 0x0036af47, // n0x12d2 c0x0000 (---------------) + I fuefuki + 0x00277e48, // n0x12d3 c0x0000 (---------------) + I fujikawa + 0x00277e4f, // n0x12d4 c0x0000 (---------------) + I fujikawaguchiko + 0x0027c24b, // n0x12d5 c0x0000 (---------------) + I fujiyoshida + 0x002fdac8, // n0x12d6 c0x0000 (---------------) + I hayakawa + 0x0033d0c6, // n0x12d7 c0x0000 (---------------) + I hokuto + 0x0026560e, // n0x12d8 c0x0000 (---------------) + I ichikawamisato + 0x00223203, // n0x12d9 c0x0000 (---------------) + I kai + 0x00240c84, // n0x12da c0x0000 (---------------) + I kofu + 0x00342045, // n0x12db c0x0000 (---------------) + I koshu + 0x00300146, // n0x12dc c0x0000 (---------------) + I kosuge + 0x0028bc0b, // n0x12dd c0x0000 (---------------) + I minami-alps + 0x00290086, // n0x12de c0x0000 (---------------) + I minobu + 0x002164c9, // n0x12df c0x0000 (---------------) + I nakamichi + 0x002e5a85, // n0x12e0 c0x0000 (---------------) + I nanbu + 0x00381e08, // n0x12e1 c0x0000 (---------------) + I narusawa + 0x0020c388, // n0x12e2 c0x0000 (---------------) + I nirasaki + 0x0021920c, // n0x12e3 c0x0000 (---------------) + I nishikatsura + 0x0029e5c6, // n0x12e4 c0x0000 (---------------) + I oshino + 0x0021f306, // n0x12e5 c0x0000 (---------------) + I otsuki + 0x00319905, // n0x12e6 c0x0000 (---------------) + I showa + 0x002872c8, // n0x12e7 c0x0000 (---------------) + I tabayama + 0x0027b9c5, // n0x12e8 c0x0000 (---------------) + I tsuru + 0x00387308, // n0x12e9 c0x0000 (---------------) + I uenohara + 0x0029dc8a, // n0x12ea c0x0000 (---------------) + I yamanakako + 0x002a15c9, // n0x12eb c0x0000 (---------------) + I yamanashi + 0x00686744, // n0x12ec c0x0001 (---------------) ! I city + 0x2e200742, // n0x12ed c0x00b8 (n0x12ee-n0x12ef) o I co + 0x000ffa08, // n0x12ee c0x0000 (---------------) + blogspot + 0x00233503, // n0x12ef c0x0000 (---------------) + I com + 0x0023a783, // n0x12f0 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x12f1 c0x0000 (---------------) + I gov + 0x00209003, // n0x12f2 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x12f3 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x12f4 c0x0000 (---------------) + I org + 0x00330b83, // n0x12f5 c0x0000 (---------------) + I biz + 0x00233503, // n0x12f6 c0x0000 (---------------) + I com + 0x0023a783, // n0x12f7 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x12f8 c0x0000 (---------------) + I gov + 0x003a1244, // n0x12f9 c0x0000 (---------------) + I info + 0x0021fe03, // n0x12fa c0x0000 (---------------) + I net + 0x0022d1c3, // n0x12fb c0x0000 (---------------) + I org + 0x0023f743, // n0x12fc c0x0000 (---------------) + I ass + 0x002d4884, // n0x12fd c0x0000 (---------------) + I asso + 0x00233503, // n0x12fe c0x0000 (---------------) + I com + 0x0023d684, // n0x12ff c0x0000 (---------------) + I coop + 0x0023a783, // n0x1300 c0x0000 (---------------) + I edu + 0x0033d7c4, // n0x1301 c0x0000 (---------------) + I gouv + 0x0026cc83, // n0x1302 c0x0000 (---------------) + I gov + 0x00238bc7, // n0x1303 c0x0000 (---------------) + I medecin + 0x00209003, // n0x1304 c0x0000 (---------------) + I mil + 0x00201483, // n0x1305 c0x0000 (---------------) + I nom + 0x0025c988, // n0x1306 c0x0000 (---------------) + I notaires + 0x0022d1c3, // n0x1307 c0x0000 (---------------) + I org + 0x0034d60b, // n0x1308 c0x0000 (---------------) + I pharmaciens + 0x002e1043, // n0x1309 c0x0000 (---------------) + I prd + 0x00247506, // n0x130a c0x0000 (---------------) + I presse + 0x00200142, // n0x130b c0x0000 (---------------) + I tm + 0x002d1c8b, // n0x130c c0x0000 (---------------) + I veterinaire + 0x0023a783, // n0x130d c0x0000 (---------------) + I edu + 0x0026cc83, // n0x130e c0x0000 (---------------) + I gov + 0x0021fe03, // n0x130f c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1310 c0x0000 (---------------) + I org + 0x00233503, // n0x1311 c0x0000 (---------------) + I com + 0x0023a783, // n0x1312 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1313 c0x0000 (---------------) + I gov + 0x0022d1c3, // n0x1314 c0x0000 (---------------) + I org + 0x0022b7c3, // n0x1315 c0x0000 (---------------) + I rep + 0x00203003, // n0x1316 c0x0000 (---------------) + I tra + 0x00201542, // n0x1317 c0x0000 (---------------) + I ac + 0x000ffa08, // n0x1318 c0x0000 (---------------) + blogspot + 0x0022b945, // n0x1319 c0x0000 (---------------) + I busan + 0x003051c8, // n0x131a c0x0000 (---------------) + I chungbuk + 0x003113c8, // n0x131b c0x0000 (---------------) + I chungnam + 0x00200742, // n0x131c c0x0000 (---------------) + I co + 0x0024a3c5, // n0x131d c0x0000 (---------------) + I daegu + 0x00325007, // n0x131e c0x0000 (---------------) + I daejeon + 0x00200482, // n0x131f c0x0000 (---------------) + I es + 0x00216707, // n0x1320 c0x0000 (---------------) + I gangwon + 0x00202d42, // n0x1321 c0x0000 (---------------) + I go + 0x00242707, // n0x1322 c0x0000 (---------------) + I gwangju + 0x0030b509, // n0x1323 c0x0000 (---------------) + I gyeongbuk + 0x002cd808, // n0x1324 c0x0000 (---------------) + I gyeonggi + 0x00208209, // n0x1325 c0x0000 (---------------) + I gyeongnam + 0x0023f382, // n0x1326 c0x0000 (---------------) + I hs + 0x00268e07, // n0x1327 c0x0000 (---------------) + I incheon + 0x002d7884, // n0x1328 c0x0000 (---------------) + I jeju + 0x003250c7, // n0x1329 c0x0000 (---------------) + I jeonbuk + 0x00301287, // n0x132a c0x0000 (---------------) + I jeonnam + 0x002b5502, // n0x132b c0x0000 (---------------) + I kg + 0x00209003, // n0x132c c0x0000 (---------------) + I mil + 0x0020f702, // n0x132d c0x0000 (---------------) + I ms + 0x00202c02, // n0x132e c0x0000 (---------------) + I ne + 0x00200282, // n0x132f c0x0000 (---------------) + I or + 0x00207782, // n0x1330 c0x0000 (---------------) + I pe + 0x00207002, // n0x1331 c0x0000 (---------------) + I re + 0x00200702, // n0x1332 c0x0000 (---------------) + I sc + 0x00344345, // n0x1333 c0x0000 (---------------) + I seoul + 0x00259585, // n0x1334 c0x0000 (---------------) + I ulsan + 0x00233503, // n0x1335 c0x0000 (---------------) + I com + 0x0023a783, // n0x1336 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1337 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1338 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1339 c0x0000 (---------------) + I org + 0x00233503, // n0x133a c0x0000 (---------------) + I com + 0x0023a783, // n0x133b c0x0000 (---------------) + I edu + 0x0026cc83, // n0x133c c0x0000 (---------------) + I gov + 0x00209003, // n0x133d c0x0000 (---------------) + I mil + 0x0021fe03, // n0x133e c0x0000 (---------------) + I net + 0x0022d1c3, // n0x133f c0x0000 (---------------) + I org + 0x00000301, // n0x1340 c0x0000 (---------------) + c + 0x00233503, // n0x1341 c0x0000 (---------------) + I com + 0x0023a783, // n0x1342 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1343 c0x0000 (---------------) + I gov + 0x003a1244, // n0x1344 c0x0000 (---------------) + I info + 0x00201603, // n0x1345 c0x0000 (---------------) + I int + 0x0021fe03, // n0x1346 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1347 c0x0000 (---------------) + I org + 0x00220f03, // n0x1348 c0x0000 (---------------) + I per + 0x00233503, // n0x1349 c0x0000 (---------------) + I com + 0x0023a783, // n0x134a c0x0000 (---------------) + I edu + 0x0026cc83, // n0x134b c0x0000 (---------------) + I gov + 0x0021fe03, // n0x134c c0x0000 (---------------) + I net + 0x0022d1c3, // n0x134d c0x0000 (---------------) + I org + 0x00200742, // n0x134e c0x0000 (---------------) + I co + 0x00233503, // n0x134f c0x0000 (---------------) + I com + 0x0023a783, // n0x1350 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1351 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1352 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1353 c0x0000 (---------------) + I org + 0x000ffa08, // n0x1354 c0x0000 (---------------) + blogspot + 0x00201542, // n0x1355 c0x0000 (---------------) + I ac + 0x002bad84, // n0x1356 c0x0000 (---------------) + I assn + 0x00233503, // n0x1357 c0x0000 (---------------) + I com + 0x0023a783, // n0x1358 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1359 c0x0000 (---------------) + I gov + 0x00379ec3, // n0x135a c0x0000 (---------------) + I grp + 0x00234dc5, // n0x135b c0x0000 (---------------) + I hotel + 0x00201603, // n0x135c c0x0000 (---------------) + I int + 0x00322cc3, // n0x135d c0x0000 (---------------) + I ltd + 0x0021fe03, // n0x135e c0x0000 (---------------) + I net + 0x00202d03, // n0x135f c0x0000 (---------------) + I ngo + 0x0022d1c3, // n0x1360 c0x0000 (---------------) + I org + 0x00217443, // n0x1361 c0x0000 (---------------) + I sch + 0x00274803, // n0x1362 c0x0000 (---------------) + I soc + 0x00221a03, // n0x1363 c0x0000 (---------------) + I web + 0x00233503, // n0x1364 c0x0000 (---------------) + I com + 0x0023a783, // n0x1365 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1366 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1367 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1368 c0x0000 (---------------) + I org + 0x00200742, // n0x1369 c0x0000 (---------------) + I co + 0x0022d1c3, // n0x136a c0x0000 (---------------) + I org + 0x000ffa08, // n0x136b c0x0000 (---------------) + blogspot + 0x0026cc83, // n0x136c c0x0000 (---------------) + I gov + 0x000ffa08, // n0x136d c0x0000 (---------------) + blogspot + 0x002afc83, // n0x136e c0x0000 (---------------) + I asn + 0x00233503, // n0x136f c0x0000 (---------------) + I com + 0x00236cc4, // n0x1370 c0x0000 (---------------) + I conf + 0x0023a783, // n0x1371 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1372 c0x0000 (---------------) + I gov + 0x0020c782, // n0x1373 c0x0000 (---------------) + I id + 0x00209003, // n0x1374 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1375 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1376 c0x0000 (---------------) + I org + 0x00233503, // n0x1377 c0x0000 (---------------) + I com + 0x0023a783, // n0x1378 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1379 c0x0000 (---------------) + I gov + 0x0020c782, // n0x137a c0x0000 (---------------) + I id + 0x00213ac3, // n0x137b c0x0000 (---------------) + I med + 0x0021fe03, // n0x137c c0x0000 (---------------) + I net + 0x0022d1c3, // n0x137d c0x0000 (---------------) + I org + 0x002db143, // n0x137e c0x0000 (---------------) + I plc + 0x00217443, // n0x137f c0x0000 (---------------) + I sch + 0x00201542, // n0x1380 c0x0000 (---------------) + I ac + 0x00200742, // n0x1381 c0x0000 (---------------) + I co + 0x0026cc83, // n0x1382 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1383 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1384 c0x0000 (---------------) + I org + 0x00247505, // n0x1385 c0x0000 (---------------) + I press + 0x002d4884, // n0x1386 c0x0000 (---------------) + I asso + 0x00200142, // n0x1387 c0x0000 (---------------) + I tm + 0x000ffa08, // n0x1388 c0x0000 (---------------) + blogspot + 0x00201542, // n0x1389 c0x0000 (---------------) + I ac + 0x00200742, // n0x138a c0x0000 (---------------) + I co + 0x00054d8b, // n0x138b c0x0000 (---------------) + diskstation + 0x00009107, // n0x138c c0x0000 (---------------) + dscloud + 0x0023a783, // n0x138d c0x0000 (---------------) + I edu + 0x0026cc83, // n0x138e c0x0000 (---------------) + I gov + 0x00157b84, // n0x138f c0x0000 (---------------) + i234 + 0x00230483, // n0x1390 c0x0000 (---------------) + I its + 0x00156bc4, // n0x1391 c0x0000 (---------------) + myds + 0x0021fe03, // n0x1392 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1393 c0x0000 (---------------) + I org + 0x002e1c44, // n0x1394 c0x0000 (---------------) + I priv + 0x0010b388, // n0x1395 c0x0000 (---------------) + synology + 0x00200742, // n0x1396 c0x0000 (---------------) + I co + 0x00233503, // n0x1397 c0x0000 (---------------) + I com + 0x0023a783, // n0x1398 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1399 c0x0000 (---------------) + I gov + 0x00209003, // n0x139a c0x0000 (---------------) + I mil + 0x00201483, // n0x139b c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x139c c0x0000 (---------------) + I org + 0x002e1043, // n0x139d c0x0000 (---------------) + I prd + 0x00200142, // n0x139e c0x0000 (---------------) + I tm + 0x000ffa08, // n0x139f c0x0000 (---------------) + blogspot + 0x00233503, // n0x13a0 c0x0000 (---------------) + I com + 0x0023a783, // n0x13a1 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x13a2 c0x0000 (---------------) + I gov + 0x003a1083, // n0x13a3 c0x0000 (---------------) + I inf + 0x00205284, // n0x13a4 c0x0000 (---------------) + I name + 0x0021fe03, // n0x13a5 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x13a6 c0x0000 (---------------) + I org + 0x00233503, // n0x13a7 c0x0000 (---------------) + I com + 0x0023a783, // n0x13a8 c0x0000 (---------------) + I edu + 0x0033d7c4, // n0x13a9 c0x0000 (---------------) + I gouv + 0x0026cc83, // n0x13aa c0x0000 (---------------) + I gov + 0x0021fe03, // n0x13ab c0x0000 (---------------) + I net + 0x0022d1c3, // n0x13ac c0x0000 (---------------) + I org + 0x00247506, // n0x13ad c0x0000 (---------------) + I presse + 0x0023a783, // n0x13ae c0x0000 (---------------) + I edu + 0x0026cc83, // n0x13af c0x0000 (---------------) + I gov + 0x0016ef83, // n0x13b0 c0x0000 (---------------) + nyc + 0x0022d1c3, // n0x13b1 c0x0000 (---------------) + I org + 0x00233503, // n0x13b2 c0x0000 (---------------) + I com + 0x0023a783, // n0x13b3 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x13b4 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x13b5 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x13b6 c0x0000 (---------------) + I org + 0x00009107, // n0x13b7 c0x0000 (---------------) + dscloud + 0x000ffa08, // n0x13b8 c0x0000 (---------------) + blogspot + 0x0026cc83, // n0x13b9 c0x0000 (---------------) + I gov + 0x00233503, // n0x13ba c0x0000 (---------------) + I com + 0x0023a783, // n0x13bb c0x0000 (---------------) + I edu + 0x0026cc83, // n0x13bc c0x0000 (---------------) + I gov + 0x0021fe03, // n0x13bd c0x0000 (---------------) + I net + 0x0022d1c3, // n0x13be c0x0000 (---------------) + I org + 0x36633503, // n0x13bf c0x00d9 (n0x13c3-n0x13c4) + I com + 0x0023a783, // n0x13c0 c0x0000 (---------------) + I edu + 0x0021fe03, // n0x13c1 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x13c2 c0x0000 (---------------) + I org + 0x000ffa08, // n0x13c3 c0x0000 (---------------) + blogspot + 0x00201542, // n0x13c4 c0x0000 (---------------) + I ac + 0x00200742, // n0x13c5 c0x0000 (---------------) + I co + 0x00233503, // n0x13c6 c0x0000 (---------------) + I com + 0x0026cc83, // n0x13c7 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x13c8 c0x0000 (---------------) + I net + 0x00200282, // n0x13c9 c0x0000 (---------------) + I or + 0x0022d1c3, // n0x13ca c0x0000 (---------------) + I org + 0x0030aec7, // n0x13cb c0x0000 (---------------) + I academy + 0x00209c8b, // n0x13cc c0x0000 (---------------) + I agriculture + 0x00204903, // n0x13cd c0x0000 (---------------) + I air + 0x0023b1c8, // n0x13ce c0x0000 (---------------) + I airguard + 0x002ec607, // n0x13cf c0x0000 (---------------) + I alabama + 0x00279bc6, // n0x13d0 c0x0000 (---------------) + I alaska + 0x00367645, // n0x13d1 c0x0000 (---------------) + I amber + 0x002c8609, // n0x13d2 c0x0000 (---------------) + I ambulance + 0x00208d88, // n0x13d3 c0x0000 (---------------) + I american + 0x002729c9, // n0x13d4 c0x0000 (---------------) + I americana + 0x002729d0, // n0x13d5 c0x0000 (---------------) + I americanantiques + 0x0035994b, // n0x13d6 c0x0000 (---------------) + I americanart + 0x002c8449, // n0x13d7 c0x0000 (---------------) + I amsterdam + 0x00200843, // n0x13d8 c0x0000 (---------------) + I and + 0x00357549, // n0x13d9 c0x0000 (---------------) + I annefrank + 0x00237a06, // n0x13da c0x0000 (---------------) + I anthro + 0x00237a0c, // n0x13db c0x0000 (---------------) + I anthropology + 0x0022ba08, // n0x13dc c0x0000 (---------------) + I antiques + 0x003a2308, // n0x13dd c0x0000 (---------------) + I aquarium + 0x00258249, // n0x13de c0x0000 (---------------) + I arboretum + 0x0029fb0e, // n0x13df c0x0000 (---------------) + I archaeological + 0x0037228b, // n0x13e0 c0x0000 (---------------) + I archaeology + 0x0031fa0c, // n0x13e1 c0x0000 (---------------) + I architecture + 0x002011c3, // n0x13e2 c0x0000 (---------------) + I art + 0x003284cc, // n0x13e3 c0x0000 (---------------) + I artanddesign + 0x002043c9, // n0x13e4 c0x0000 (---------------) + I artcenter + 0x0020b107, // n0x13e5 c0x0000 (---------------) + I artdeco + 0x0023a6cc, // n0x13e6 c0x0000 (---------------) + I arteducation + 0x0039208a, // n0x13e7 c0x0000 (---------------) + I artgallery + 0x0024bf84, // n0x13e8 c0x0000 (---------------) + I arts + 0x0039eacd, // n0x13e9 c0x0000 (---------------) + I artsandcrafts + 0x00328388, // n0x13ea c0x0000 (---------------) + I asmatart + 0x0039efcd, // n0x13eb c0x0000 (---------------) + I assassination + 0x00252046, // n0x13ec c0x0000 (---------------) + I assisi + 0x002d488b, // n0x13ed c0x0000 (---------------) + I association + 0x00356a09, // n0x13ee c0x0000 (---------------) + I astronomy + 0x002244c7, // n0x13ef c0x0000 (---------------) + I atlanta + 0x002ea306, // n0x13f0 c0x0000 (---------------) + I austin + 0x003082c9, // n0x13f1 c0x0000 (---------------) + I australia + 0x00322f8a, // n0x13f2 c0x0000 (---------------) + I automotive + 0x0035b848, // n0x13f3 c0x0000 (---------------) + I aviation + 0x002e0b84, // n0x13f4 c0x0000 (---------------) + I axis + 0x00277007, // n0x13f5 c0x0000 (---------------) + I badajoz + 0x002a1847, // n0x13f6 c0x0000 (---------------) + I baghdad + 0x002ec084, // n0x13f7 c0x0000 (---------------) + I bahn + 0x00228a84, // n0x13f8 c0x0000 (---------------) + I bale + 0x0025d289, // n0x13f9 c0x0000 (---------------) + I baltimore + 0x002dd889, // n0x13fa c0x0000 (---------------) + I barcelona + 0x0022f9c8, // n0x13fb c0x0000 (---------------) + I baseball + 0x00210685, // n0x13fc c0x0000 (---------------) + I basel + 0x00389345, // n0x13fd c0x0000 (---------------) + I baths + 0x0020d246, // n0x13fe c0x0000 (---------------) + I bauern + 0x0039e989, // n0x13ff c0x0000 (---------------) + I beauxarts + 0x0021b10d, // n0x1400 c0x0000 (---------------) + I beeldengeluid + 0x0031f488, // n0x1401 c0x0000 (---------------) + I bellevue + 0x0020d147, // n0x1402 c0x0000 (---------------) + I bergbau + 0x003676c8, // n0x1403 c0x0000 (---------------) + I berkeley + 0x0022e6c6, // n0x1404 c0x0000 (---------------) + I berlin + 0x00390744, // n0x1405 c0x0000 (---------------) + I bern + 0x0037bf45, // n0x1406 c0x0000 (---------------) + I bible + 0x002028c6, // n0x1407 c0x0000 (---------------) + I bilbao + 0x00202e84, // n0x1408 c0x0000 (---------------) + I bill + 0x002042c7, // n0x1409 c0x0000 (---------------) + I birdart + 0x0020628a, // n0x140a c0x0000 (---------------) + I birthplace + 0x00212384, // n0x140b c0x0000 (---------------) + I bonn + 0x00217cc6, // n0x140c c0x0000 (---------------) + I boston + 0x00218e89, // n0x140d c0x0000 (---------------) + I botanical + 0x00218e8f, // n0x140e c0x0000 (---------------) + I botanicalgarden + 0x0021a18d, // n0x140f c0x0000 (---------------) + I botanicgarden + 0x0021a946, // n0x1410 c0x0000 (---------------) + I botany + 0x0021ca50, // n0x1411 c0x0000 (---------------) + I brandywinevalley + 0x0021ce46, // n0x1412 c0x0000 (---------------) + I brasil + 0x0021db87, // n0x1413 c0x0000 (---------------) + I bristol + 0x0021df07, // n0x1414 c0x0000 (---------------) + I british + 0x0021df0f, // n0x1415 c0x0000 (---------------) + I britishcolumbia + 0x0021fa49, // n0x1416 c0x0000 (---------------) + I broadcast + 0x00222b06, // n0x1417 c0x0000 (---------------) + I brunel + 0x00225e87, // n0x1418 c0x0000 (---------------) + I brussel + 0x00225e88, // n0x1419 c0x0000 (---------------) + I brussels + 0x00227049, // n0x141a c0x0000 (---------------) + I bruxelles + 0x0028fd88, // n0x141b c0x0000 (---------------) + I building + 0x002d7647, // n0x141c c0x0000 (---------------) + I burghof + 0x0020ca03, // n0x141d c0x0000 (---------------) + I bus + 0x00233386, // n0x141e c0x0000 (---------------) + I bushey + 0x00200308, // n0x141f c0x0000 (---------------) + I cadaques + 0x0029fdca, // n0x1420 c0x0000 (---------------) + I california + 0x00221ac9, // n0x1421 c0x0000 (---------------) + I cambridge + 0x00208ec3, // n0x1422 c0x0000 (---------------) + I can + 0x00324f06, // n0x1423 c0x0000 (---------------) + I canada + 0x002b63ca, // n0x1424 c0x0000 (---------------) + I capebreton + 0x00365247, // n0x1425 c0x0000 (---------------) + I carrier + 0x0020af4a, // n0x1426 c0x0000 (---------------) + I cartoonart + 0x0021460e, // n0x1427 c0x0000 (---------------) + I casadelamoneda + 0x0021fb86, // n0x1428 c0x0000 (---------------) + I castle + 0x002a6c47, // n0x1429 c0x0000 (---------------) + I castres + 0x00211b46, // n0x142a c0x0000 (---------------) + I celtic + 0x00204486, // n0x142b c0x0000 (---------------) + I center + 0x00374acb, // n0x142c c0x0000 (---------------) + I chattanooga + 0x002648ca, // n0x142d c0x0000 (---------------) + I cheltenham + 0x0035150d, // n0x142e c0x0000 (---------------) + I chesapeakebay + 0x00213047, // n0x142f c0x0000 (---------------) + I chicago + 0x00274888, // n0x1430 c0x0000 (---------------) + I children + 0x00274889, // n0x1431 c0x0000 (---------------) + I childrens + 0x0027488f, // n0x1432 c0x0000 (---------------) + I childrensgarden + 0x0023984c, // n0x1433 c0x0000 (---------------) + I chiropractic + 0x002b5a89, // n0x1434 c0x0000 (---------------) + I chocolate + 0x00379b8e, // n0x1435 c0x0000 (---------------) + I christiansburg + 0x00238cca, // n0x1436 c0x0000 (---------------) + I cincinnati + 0x002ce4c6, // n0x1437 c0x0000 (---------------) + I cinema + 0x00337c86, // n0x1438 c0x0000 (---------------) + I circus + 0x00363b8c, // n0x1439 c0x0000 (---------------) + I civilisation + 0x00367b8c, // n0x143a c0x0000 (---------------) + I civilization + 0x0036f008, // n0x143b c0x0000 (---------------) + I civilwar + 0x003894c7, // n0x143c c0x0000 (---------------) + I clinton + 0x002acc45, // n0x143d c0x0000 (---------------) + I clock + 0x00355244, // n0x143e c0x0000 (---------------) + I coal + 0x00386b8e, // n0x143f c0x0000 (---------------) + I coastaldefence + 0x00323204, // n0x1440 c0x0000 (---------------) + I cody + 0x00231d87, // n0x1441 c0x0000 (---------------) + I coldwar + 0x00265b8a, // n0x1442 c0x0000 (---------------) + I collection + 0x00232454, // n0x1443 c0x0000 (---------------) + I colonialwilliamsburg + 0x00232dcf, // n0x1444 c0x0000 (---------------) + I coloradoplateau + 0x0021e0c8, // n0x1445 c0x0000 (---------------) + I columbia + 0x00233248, // n0x1446 c0x0000 (---------------) + I columbus + 0x0036018d, // n0x1447 c0x0000 (---------------) + I communication + 0x0036018e, // n0x1448 c0x0000 (---------------) + I communications + 0x00233509, // n0x1449 c0x0000 (---------------) + I community + 0x00235488, // n0x144a c0x0000 (---------------) + I computer + 0x0023548f, // n0x144b c0x0000 (---------------) + I computerhistory + 0x0023a3cc, // n0x144c c0x0000 (---------------) + I contemporary + 0x0023a3cf, // n0x144d c0x0000 (---------------) + I contemporaryart + 0x0023b747, // n0x144e c0x0000 (---------------) + I convent + 0x0023de0a, // n0x144f c0x0000 (---------------) + I copenhagen + 0x0021c58b, // n0x1450 c0x0000 (---------------) + I corporation + 0x0023f888, // n0x1451 c0x0000 (---------------) + I corvette + 0x00241807, // n0x1452 c0x0000 (---------------) + I costume + 0x0033658d, // n0x1453 c0x0000 (---------------) + I countryestate + 0x0031ab06, // n0x1454 c0x0000 (---------------) + I county + 0x0039ec86, // n0x1455 c0x0000 (---------------) + I crafts + 0x00243c49, // n0x1456 c0x0000 (---------------) + I cranbrook + 0x00336b48, // n0x1457 c0x0000 (---------------) + I creation + 0x00247888, // n0x1458 c0x0000 (---------------) + I cultural + 0x0024788e, // n0x1459 c0x0000 (---------------) + I culturalcenter + 0x00209d87, // n0x145a c0x0000 (---------------) + I culture + 0x00322345, // n0x145b c0x0000 (---------------) + I cyber + 0x0024a985, // n0x145c c0x0000 (---------------) + I cymru + 0x00210284, // n0x145d c0x0000 (---------------) + I dali + 0x00279e86, // n0x145e c0x0000 (---------------) + I dallas + 0x0022f8c8, // n0x145f c0x0000 (---------------) + I database + 0x002edd03, // n0x1460 c0x0000 (---------------) + I ddr + 0x0025fa0e, // n0x1461 c0x0000 (---------------) + I decorativearts + 0x00336948, // n0x1462 c0x0000 (---------------) + I delaware + 0x0027af0b, // n0x1463 c0x0000 (---------------) + I delmenhorst + 0x003312c7, // n0x1464 c0x0000 (---------------) + I denmark + 0x00274145, // n0x1465 c0x0000 (---------------) + I depot + 0x0022dcc6, // n0x1466 c0x0000 (---------------) + I design + 0x002aa507, // n0x1467 c0x0000 (---------------) + I detroit + 0x002fac88, // n0x1468 c0x0000 (---------------) + I dinosaur + 0x00330409, // n0x1469 c0x0000 (---------------) + I discovery + 0x00237405, // n0x146a c0x0000 (---------------) + I dolls + 0x002861c8, // n0x146b c0x0000 (---------------) + I donostia + 0x0020cc06, // n0x146c c0x0000 (---------------) + I durham + 0x0037574a, // n0x146d c0x0000 (---------------) + I eastafrica + 0x00386a89, // n0x146e c0x0000 (---------------) + I eastcoast + 0x0023a789, // n0x146f c0x0000 (---------------) + I education + 0x0023a78b, // n0x1470 c0x0000 (---------------) + I educational + 0x0028c908, // n0x1471 c0x0000 (---------------) + I egyptian + 0x002ebf49, // n0x1472 c0x0000 (---------------) + I eisenbahn + 0x00210746, // n0x1473 c0x0000 (---------------) + I elburg + 0x002e4f4a, // n0x1474 c0x0000 (---------------) + I elvendrell + 0x0022980a, // n0x1475 c0x0000 (---------------) + I embroidery + 0x0023e00c, // n0x1476 c0x0000 (---------------) + I encyclopedic + 0x00213707, // n0x1477 c0x0000 (---------------) + I england + 0x002cd60a, // n0x1478 c0x0000 (---------------) + I entomology + 0x00326d8b, // n0x1479 c0x0000 (---------------) + I environment + 0x00326d99, // n0x147a c0x0000 (---------------) + I environmentalconservation + 0x00329a48, // n0x147b c0x0000 (---------------) + I epilepsy + 0x00247585, // n0x147c c0x0000 (---------------) + I essex + 0x002c2486, // n0x147d c0x0000 (---------------) + I estate + 0x0030cf09, // n0x147e c0x0000 (---------------) + I ethnology + 0x00205346, // n0x147f c0x0000 (---------------) + I exeter + 0x002115ca, // n0x1480 c0x0000 (---------------) + I exhibition + 0x00208f86, // n0x1481 c0x0000 (---------------) + I family + 0x00271d04, // n0x1482 c0x0000 (---------------) + I farm + 0x002c260d, // n0x1483 c0x0000 (---------------) + I farmequipment + 0x002ece87, // n0x1484 c0x0000 (---------------) + I farmers + 0x00271d09, // n0x1485 c0x0000 (---------------) + I farmstead + 0x00366b05, // n0x1486 c0x0000 (---------------) + I field + 0x0037ac88, // n0x1487 c0x0000 (---------------) + I figueres + 0x0024b549, // n0x1488 c0x0000 (---------------) + I filatelia + 0x0024b784, // n0x1489 c0x0000 (---------------) + I film + 0x0024be87, // n0x148a c0x0000 (---------------) + I fineart + 0x0024be88, // n0x148b c0x0000 (---------------) + I finearts + 0x0024c387, // n0x148c c0x0000 (---------------) + I finland + 0x00267688, // n0x148d c0x0000 (---------------) + I flanders + 0x00252947, // n0x148e c0x0000 (---------------) + I florida + 0x00338705, // n0x148f c0x0000 (---------------) + I force + 0x00259fcc, // n0x1490 c0x0000 (---------------) + I fortmissoula + 0x0025ab89, // n0x1491 c0x0000 (---------------) + I fortworth + 0x002b9f4a, // n0x1492 c0x0000 (---------------) + I foundation + 0x00385d09, // n0x1493 c0x0000 (---------------) + I francaise + 0x00357649, // n0x1494 c0x0000 (---------------) + I frankfurt + 0x00256acc, // n0x1495 c0x0000 (---------------) + I franziskaner + 0x002e7d0b, // n0x1496 c0x0000 (---------------) + I freemasonry + 0x0025c488, // n0x1497 c0x0000 (---------------) + I freiburg + 0x00260048, // n0x1498 c0x0000 (---------------) + I fribourg + 0x002636c4, // n0x1499 c0x0000 (---------------) + I frog + 0x00283c88, // n0x149a c0x0000 (---------------) + I fundacio + 0x00285349, // n0x149b c0x0000 (---------------) + I furniture + 0x00392147, // n0x149c c0x0000 (---------------) + I gallery + 0x002190c6, // n0x149d c0x0000 (---------------) + I garden + 0x00246347, // n0x149e c0x0000 (---------------) + I gateway + 0x00330689, // n0x149f c0x0000 (---------------) + I geelvinck + 0x0021334b, // n0x14a0 c0x0000 (---------------) + I gemological + 0x00396387, // n0x14a1 c0x0000 (---------------) + I geology + 0x00324c47, // n0x14a2 c0x0000 (---------------) + I georgia + 0x00279907, // n0x14a3 c0x0000 (---------------) + I giessen + 0x0039ef44, // n0x14a4 c0x0000 (---------------) + I glas + 0x0039ef45, // n0x14a5 c0x0000 (---------------) + I glass + 0x002a8705, // n0x14a6 c0x0000 (---------------) + I gorge + 0x0033454b, // n0x14a7 c0x0000 (---------------) + I grandrapids + 0x0038f9c4, // n0x14a8 c0x0000 (---------------) + I graz + 0x00266e08, // n0x14a9 c0x0000 (---------------) + I guernsey + 0x0029168a, // n0x14aa c0x0000 (---------------) + I halloffame + 0x0020ccc7, // n0x14ab c0x0000 (---------------) + I hamburg + 0x0031bbc7, // n0x14ac c0x0000 (---------------) + I handson + 0x0028b492, // n0x14ad c0x0000 (---------------) + I harvestcelebration + 0x0025cb86, // n0x14ae c0x0000 (---------------) + I hawaii + 0x0036b386, // n0x14af c0x0000 (---------------) + I health + 0x0030f8ce, // n0x14b0 c0x0000 (---------------) + I heimatunduhren + 0x0025fd86, // n0x14b1 c0x0000 (---------------) + I hellas + 0x0020ebc8, // n0x14b2 c0x0000 (---------------) + I helsinki + 0x00290d0f, // n0x14b3 c0x0000 (---------------) + I hembygdsforbund + 0x0039f388, // n0x14b4 c0x0000 (---------------) + I heritage + 0x0036d908, // n0x14b5 c0x0000 (---------------) + I histoire + 0x002fb8ca, // n0x14b6 c0x0000 (---------------) + I historical + 0x002fb8d1, // n0x14b7 c0x0000 (---------------) + I historicalsociety + 0x002a1f0e, // n0x14b8 c0x0000 (---------------) + I historichouses + 0x002567ca, // n0x14b9 c0x0000 (---------------) + I historisch + 0x002567cc, // n0x14ba c0x0000 (---------------) + I historisches + 0x00235687, // n0x14bb c0x0000 (---------------) + I history + 0x00235690, // n0x14bc c0x0000 (---------------) + I historyofscience + 0x00202188, // n0x14bd c0x0000 (---------------) + I horology + 0x002a2105, // n0x14be c0x0000 (---------------) + I house + 0x002aad4a, // n0x14bf c0x0000 (---------------) + I humanities + 0x00202ecc, // n0x14c0 c0x0000 (---------------) + I illustration + 0x002b44cd, // n0x14c1 c0x0000 (---------------) + I imageandsound + 0x002a3c46, // n0x14c2 c0x0000 (---------------) + I indian + 0x002a3c47, // n0x14c3 c0x0000 (---------------) + I indiana + 0x002a3c4c, // n0x14c4 c0x0000 (---------------) + I indianapolis + 0x002f120c, // n0x14c5 c0x0000 (---------------) + I indianmarket + 0x0024dd4c, // n0x14c6 c0x0000 (---------------) + I intelligence + 0x0028a0cb, // n0x14c7 c0x0000 (---------------) + I interactive + 0x002859c4, // n0x14c8 c0x0000 (---------------) + I iraq + 0x0021d504, // n0x14c9 c0x0000 (---------------) + I iron + 0x0034fb49, // n0x14ca c0x0000 (---------------) + I isleofman + 0x002c8ec7, // n0x14cb c0x0000 (---------------) + I jamison + 0x00266a49, // n0x14cc c0x0000 (---------------) + I jefferson + 0x00283549, // n0x14cd c0x0000 (---------------) + I jerusalem + 0x00360f47, // n0x14ce c0x0000 (---------------) + I jewelry + 0x00391f06, // n0x14cf c0x0000 (---------------) + I jewish + 0x00391f09, // n0x14d0 c0x0000 (---------------) + I jewishart + 0x00399f83, // n0x14d1 c0x0000 (---------------) + I jfk + 0x0033be4a, // n0x14d2 c0x0000 (---------------) + I journalism + 0x00355487, // n0x14d3 c0x0000 (---------------) + I judaica + 0x0027744b, // n0x14d4 c0x0000 (---------------) + I judygarland + 0x0035138a, // n0x14d5 c0x0000 (---------------) + I juedisches + 0x00242844, // n0x14d6 c0x0000 (---------------) + I juif + 0x00353546, // n0x14d7 c0x0000 (---------------) + I karate + 0x0027efc9, // n0x14d8 c0x0000 (---------------) + I karikatur + 0x0028cd44, // n0x14d9 c0x0000 (---------------) + I kids + 0x0020324a, // n0x14da c0x0000 (---------------) + I koebenhavn + 0x0036bd45, // n0x14db c0x0000 (---------------) + I koeln + 0x002b4d85, // n0x14dc c0x0000 (---------------) + I kunst + 0x002b4d8d, // n0x14dd c0x0000 (---------------) + I kunstsammlung + 0x002b50ce, // n0x14de c0x0000 (---------------) + I kunstunddesign + 0x00315585, // n0x14df c0x0000 (---------------) + I labor + 0x0038b2c6, // n0x14e0 c0x0000 (---------------) + I labour + 0x00247107, // n0x14e1 c0x0000 (---------------) + I lajolla + 0x002c990a, // n0x14e2 c0x0000 (---------------) + I lancashire + 0x00323506, // n0x14e3 c0x0000 (---------------) + I landes + 0x00359c44, // n0x14e4 c0x0000 (---------------) + I lans + 0x00359f87, // n0x14e5 c0x0000 (---------------) + I larsson + 0x002def0b, // n0x14e6 c0x0000 (---------------) + I lewismiller + 0x0022e787, // n0x14e7 c0x0000 (---------------) + I lincoln + 0x003a0f44, // n0x14e8 c0x0000 (---------------) + I linz + 0x002414c6, // n0x14e9 c0x0000 (---------------) + I living + 0x002414cd, // n0x14ea c0x0000 (---------------) + I livinghistory + 0x003571cc, // n0x14eb c0x0000 (---------------) + I localhistory + 0x00321906, // n0x14ec c0x0000 (---------------) + I london + 0x0031f68a, // n0x14ed c0x0000 (---------------) + I losangeles + 0x0022b6c6, // n0x14ee c0x0000 (---------------) + I louvre + 0x002a7e88, // n0x14ef c0x0000 (---------------) + I loyalist + 0x002e6147, // n0x14f0 c0x0000 (---------------) + I lucerne + 0x0023ca4a, // n0x14f1 c0x0000 (---------------) + I luxembourg + 0x0023dc86, // n0x14f2 c0x0000 (---------------) + I luzern + 0x00212f43, // n0x14f3 c0x0000 (---------------) + I mad + 0x00317146, // n0x14f4 c0x0000 (---------------) + I madrid + 0x00200188, // n0x14f5 c0x0000 (---------------) + I mallorca + 0x0029a80a, // n0x14f6 c0x0000 (---------------) + I manchester + 0x00251d47, // n0x14f7 c0x0000 (---------------) + I mansion + 0x00251d48, // n0x14f8 c0x0000 (---------------) + I mansions + 0x0026a704, // n0x14f9 c0x0000 (---------------) + I manx + 0x00278f07, // n0x14fa c0x0000 (---------------) + I marburg + 0x00269708, // n0x14fb c0x0000 (---------------) + I maritime + 0x002a3088, // n0x14fc c0x0000 (---------------) + I maritimo + 0x0025cd88, // n0x14fd c0x0000 (---------------) + I maryland + 0x002831ca, // n0x14fe c0x0000 (---------------) + I marylhurst + 0x003025c5, // n0x14ff c0x0000 (---------------) + I media + 0x0023ac87, // n0x1500 c0x0000 (---------------) + I medical + 0x00256613, // n0x1501 c0x0000 (---------------) + I medizinhistorisches + 0x00259146, // n0x1502 c0x0000 (---------------) + I meeres + 0x0026cf88, // n0x1503 c0x0000 (---------------) + I memorial + 0x002221c9, // n0x1504 c0x0000 (---------------) + I mesaverde + 0x002165c8, // n0x1505 c0x0000 (---------------) + I michigan + 0x0036e1cb, // n0x1506 c0x0000 (---------------) + I midatlantic + 0x002b8348, // n0x1507 c0x0000 (---------------) + I military + 0x00285244, // n0x1508 c0x0000 (---------------) + I mill + 0x00321106, // n0x1509 c0x0000 (---------------) + I miners + 0x003a5c46, // n0x150a c0x0000 (---------------) + I mining + 0x003058c9, // n0x150b c0x0000 (---------------) + I minnesota + 0x002bf847, // n0x150c c0x0000 (---------------) + I missile + 0x0025a0c8, // n0x150d c0x0000 (---------------) + I missoula + 0x003a1f86, // n0x150e c0x0000 (---------------) + I modern + 0x0037a084, // n0x150f c0x0000 (---------------) + I moma + 0x002c6d85, // n0x1510 c0x0000 (---------------) + I money + 0x002c1888, // n0x1511 c0x0000 (---------------) + I monmouth + 0x002c1fca, // n0x1512 c0x0000 (---------------) + I monticello + 0x002c2288, // n0x1513 c0x0000 (---------------) + I montreal + 0x002c74c6, // n0x1514 c0x0000 (---------------) + I moscow + 0x0029af0a, // n0x1515 c0x0000 (---------------) + I motorcycle + 0x002e6d88, // n0x1516 c0x0000 (---------------) + I muenchen + 0x002ca188, // n0x1517 c0x0000 (---------------) + I muenster + 0x002cb648, // n0x1518 c0x0000 (---------------) + I mulhouse + 0x002cc046, // n0x1519 c0x0000 (---------------) + I muncie + 0x002cfc06, // n0x151a c0x0000 (---------------) + I museet + 0x002ea80c, // n0x151b c0x0000 (---------------) + I museumcenter + 0x002d0110, // n0x151c c0x0000 (---------------) + I museumvereniging + 0x00283a85, // n0x151d c0x0000 (---------------) + I music + 0x00319548, // n0x151e c0x0000 (---------------) + I national + 0x00319550, // n0x151f c0x0000 (---------------) + I nationalfirearms + 0x0039f190, // n0x1520 c0x0000 (---------------) + I nationalheritage + 0x0027284e, // n0x1521 c0x0000 (---------------) + I nativeamerican + 0x002ea48e, // n0x1522 c0x0000 (---------------) + I naturalhistory + 0x002ea494, // n0x1523 c0x0000 (---------------) + I naturalhistorymuseum + 0x0031ad4f, // n0x1524 c0x0000 (---------------) + I naturalsciences + 0x0031b106, // n0x1525 c0x0000 (---------------) + I nature + 0x00325e11, // n0x1526 c0x0000 (---------------) + I naturhistorisches + 0x00327393, // n0x1527 c0x0000 (---------------) + I natuurwetenschappen + 0x00327808, // n0x1528 c0x0000 (---------------) + I naumburg + 0x0030f105, // n0x1529 c0x0000 (---------------) + I naval + 0x002d7f48, // n0x152a c0x0000 (---------------) + I nebraska + 0x002de045, // n0x152b c0x0000 (---------------) + I neues + 0x0022a34c, // n0x152c c0x0000 (---------------) + I newhampshire + 0x002aeb89, // n0x152d c0x0000 (---------------) + I newjersey + 0x00231bc9, // n0x152e c0x0000 (---------------) + I newmexico + 0x002460c7, // n0x152f c0x0000 (---------------) + I newport + 0x00221dc9, // n0x1530 c0x0000 (---------------) + I newspaper + 0x002ed0c7, // n0x1531 c0x0000 (---------------) + I newyork + 0x002a2646, // n0x1532 c0x0000 (---------------) + I niepce + 0x0037bd47, // n0x1533 c0x0000 (---------------) + I norfolk + 0x00239c45, // n0x1534 c0x0000 (---------------) + I north + 0x002b5e83, // n0x1535 c0x0000 (---------------) + I nrw + 0x002edec9, // n0x1536 c0x0000 (---------------) + I nuernberg + 0x003518c9, // n0x1537 c0x0000 (---------------) + I nuremberg + 0x0036ef83, // n0x1538 c0x0000 (---------------) + I nyc + 0x00215844, // n0x1539 c0x0000 (---------------) + I nyny + 0x0032154d, // n0x153a c0x0000 (---------------) + I oceanographic + 0x00200b0f, // n0x153b c0x0000 (---------------) + I oceanographique + 0x002fc505, // n0x153c c0x0000 (---------------) + I omaha + 0x003175c6, // n0x153d c0x0000 (---------------) + I online + 0x00200987, // n0x153e c0x0000 (---------------) + I ontario + 0x00358c87, // n0x153f c0x0000 (---------------) + I openair + 0x00287ec6, // n0x1540 c0x0000 (---------------) + I oregon + 0x00287ecb, // n0x1541 c0x0000 (---------------) + I oregontrail + 0x002a3605, // n0x1542 c0x0000 (---------------) + I otago + 0x0039bec6, // n0x1543 c0x0000 (---------------) + I oxford + 0x003909c7, // n0x1544 c0x0000 (---------------) + I pacific + 0x0026fec9, // n0x1545 c0x0000 (---------------) + I paderborn + 0x00322046, // n0x1546 c0x0000 (---------------) + I palace + 0x0020ac45, // n0x1547 c0x0000 (---------------) + I paleo + 0x0023a00b, // n0x1548 c0x0000 (---------------) + I palmsprings + 0x0025b986, // n0x1549 c0x0000 (---------------) + I panama + 0x00277905, // n0x154a c0x0000 (---------------) + I paris + 0x002b5648, // n0x154b c0x0000 (---------------) + I pasadena + 0x00375008, // n0x154c c0x0000 (---------------) + I pharmacy + 0x002d30cc, // n0x154d c0x0000 (---------------) + I philadelphia + 0x002d30d0, // n0x154e c0x0000 (---------------) + I philadelphiaarea + 0x002d3789, // n0x154f c0x0000 (---------------) + I philately + 0x002d3bc7, // n0x1550 c0x0000 (---------------) + I phoenix + 0x002d404b, // n0x1551 c0x0000 (---------------) + I photography + 0x002d6506, // n0x1552 c0x0000 (---------------) + I pilots + 0x002d750a, // n0x1553 c0x0000 (---------------) + I pittsburgh + 0x002d898b, // n0x1554 c0x0000 (---------------) + I planetarium + 0x002d8d8a, // n0x1555 c0x0000 (---------------) + I plantation + 0x002d9006, // n0x1556 c0x0000 (---------------) + I plants + 0x002db005, // n0x1557 c0x0000 (---------------) + I plaza + 0x002ec506, // n0x1558 c0x0000 (---------------) + I portal + 0x00279488, // n0x1559 c0x0000 (---------------) + I portland + 0x0024618a, // n0x155a c0x0000 (---------------) + I portlligat + 0x0035fe1c, // n0x155b c0x0000 (---------------) + I posts-and-telecommunications + 0x002e110c, // n0x155c c0x0000 (---------------) + I preservation + 0x002e1408, // n0x155d c0x0000 (---------------) + I presidio + 0x00247505, // n0x155e c0x0000 (---------------) + I press + 0x002e3107, // n0x155f c0x0000 (---------------) + I project + 0x0029f746, // n0x1560 c0x0000 (---------------) + I public + 0x0038dec5, // n0x1561 c0x0000 (---------------) + I pubol + 0x0021b906, // n0x1562 c0x0000 (---------------) + I quebec + 0x00288088, // n0x1563 c0x0000 (---------------) + I railroad + 0x002b3287, // n0x1564 c0x0000 (---------------) + I railway + 0x0029fa08, // n0x1565 c0x0000 (---------------) + I research + 0x002a6d4a, // n0x1566 c0x0000 (---------------) + I resistance + 0x0030864c, // n0x1567 c0x0000 (---------------) + I riodejaneiro + 0x003088c9, // n0x1568 c0x0000 (---------------) + I rochester + 0x0038e207, // n0x1569 c0x0000 (---------------) + I rockart + 0x00254584, // n0x156a c0x0000 (---------------) + I roma + 0x00252f86, // n0x156b c0x0000 (---------------) + I russia + 0x0036d48a, // n0x156c c0x0000 (---------------) + I saintlouis + 0x00283645, // n0x156d c0x0000 (---------------) + I salem + 0x0034504c, // n0x156e c0x0000 (---------------) + I salvadordali + 0x00345cc8, // n0x156f c0x0000 (---------------) + I salzburg + 0x0034a848, // n0x1570 c0x0000 (---------------) + I sandiego + 0x002004cc, // n0x1571 c0x0000 (---------------) + I sanfrancisco + 0x00210c8c, // n0x1572 c0x0000 (---------------) + I santabarbara + 0x00211189, // n0x1573 c0x0000 (---------------) + I santacruz + 0x002113c7, // n0x1574 c0x0000 (---------------) + I santafe + 0x0033d48c, // n0x1575 c0x0000 (---------------) + I saskatchewan + 0x003897c4, // n0x1576 c0x0000 (---------------) + I satx + 0x00232b4a, // n0x1577 c0x0000 (---------------) + I savannahga + 0x0028d04c, // n0x1578 c0x0000 (---------------) + I schlesisches + 0x0027104b, // n0x1579 c0x0000 (---------------) + I schoenbrunn + 0x0023758b, // n0x157a c0x0000 (---------------) + I schokoladen + 0x0023d0c6, // n0x157b c0x0000 (---------------) + I school + 0x00243147, // n0x157c c0x0000 (---------------) + I schweiz + 0x002358c7, // n0x157d c0x0000 (---------------) + I science + 0x002358cf, // n0x157e c0x0000 (---------------) + I science-fiction + 0x002f1b51, // n0x157f c0x0000 (---------------) + I scienceandhistory + 0x0039e252, // n0x1580 c0x0000 (---------------) + I scienceandindustry + 0x0024410d, // n0x1581 c0x0000 (---------------) + I sciencecenter + 0x0024410e, // n0x1582 c0x0000 (---------------) + I sciencecenters + 0x0024444e, // n0x1583 c0x0000 (---------------) + I sciencehistory + 0x0031af08, // n0x1584 c0x0000 (---------------) + I sciences + 0x0031af12, // n0x1585 c0x0000 (---------------) + I sciencesnaturelles + 0x00200708, // n0x1586 c0x0000 (---------------) + I scotland + 0x002fa9c7, // n0x1587 c0x0000 (---------------) + I seaport + 0x0024f98a, // n0x1588 c0x0000 (---------------) + I settlement + 0x00219c08, // n0x1589 c0x0000 (---------------) + I settlers + 0x0025fd45, // n0x158a c0x0000 (---------------) + I shell + 0x0035cc4a, // n0x158b c0x0000 (---------------) + I sherbrooke + 0x0021d987, // n0x158c c0x0000 (---------------) + I sibenik + 0x00341f84, // n0x158d c0x0000 (---------------) + I silk + 0x00209743, // n0x158e c0x0000 (---------------) + I ski + 0x00296cc5, // n0x158f c0x0000 (---------------) + I skole + 0x002fbb47, // n0x1590 c0x0000 (---------------) + I society + 0x002f9607, // n0x1591 c0x0000 (---------------) + I sologne + 0x002b46ce, // n0x1592 c0x0000 (---------------) + I soundandvision + 0x0032bd4d, // n0x1593 c0x0000 (---------------) + I southcarolina + 0x0032e849, // n0x1594 c0x0000 (---------------) + I southwest + 0x0020bb45, // n0x1595 c0x0000 (---------------) + I space + 0x003347c3, // n0x1596 c0x0000 (---------------) + I spy + 0x0027a346, // n0x1597 c0x0000 (---------------) + I square + 0x003643c5, // n0x1598 c0x0000 (---------------) + I stadt + 0x0027b148, // n0x1599 c0x0000 (---------------) + I stalbans + 0x00323f89, // n0x159a c0x0000 (---------------) + I starnberg + 0x0020f205, // n0x159b c0x0000 (---------------) + I state + 0x0033678f, // n0x159c c0x0000 (---------------) + I stateofdelaware + 0x00254e87, // n0x159d c0x0000 (---------------) + I station + 0x003674c5, // n0x159e c0x0000 (---------------) + I steam + 0x00227c0a, // n0x159f c0x0000 (---------------) + I steiermark + 0x00303b86, // n0x15a0 c0x0000 (---------------) + I stjohn + 0x002a8009, // n0x15a1 c0x0000 (---------------) + I stockholm + 0x002e71cc, // n0x15a2 c0x0000 (---------------) + I stpetersburg + 0x002e8989, // n0x15a3 c0x0000 (---------------) + I stuttgart + 0x00206706, // n0x15a4 c0x0000 (---------------) + I suisse + 0x0029148c, // n0x15a5 c0x0000 (---------------) + I surgeonshall + 0x002e91c6, // n0x15a6 c0x0000 (---------------) + I surrey + 0x002f0688, // n0x15a7 c0x0000 (---------------) + I svizzera + 0x002f0886, // n0x15a8 c0x0000 (---------------) + I sweden + 0x00329bc6, // n0x15a9 c0x0000 (---------------) + I sydney + 0x002294c4, // n0x15aa c0x0000 (---------------) + I tank + 0x0025d103, // n0x15ab c0x0000 (---------------) + I tcm + 0x002d608a, // n0x15ac c0x0000 (---------------) + I technology + 0x0031d2d1, // n0x15ad c0x0000 (---------------) + I telekommunikation + 0x002b5c4a, // n0x15ae c0x0000 (---------------) + I television + 0x0034d1c5, // n0x15af c0x0000 (---------------) + I texas + 0x003844c7, // n0x15b0 c0x0000 (---------------) + I textile + 0x002573c7, // n0x15b1 c0x0000 (---------------) + I theater + 0x00269804, // n0x15b2 c0x0000 (---------------) + I time + 0x0026980b, // n0x15b3 c0x0000 (---------------) + I timekeeping + 0x00208088, // n0x15b4 c0x0000 (---------------) + I topology + 0x002b1946, // n0x15b5 c0x0000 (---------------) + I torino + 0x00311cc5, // n0x15b6 c0x0000 (---------------) + I touch + 0x002dc244, // n0x15b7 c0x0000 (---------------) + I town + 0x00294809, // n0x15b8 c0x0000 (---------------) + I transport + 0x00355f84, // n0x15b9 c0x0000 (---------------) + I tree + 0x00359147, // n0x15ba c0x0000 (---------------) + I trolley + 0x00329245, // n0x15bb c0x0000 (---------------) + I trust + 0x00329247, // n0x15bc c0x0000 (---------------) + I trustee + 0x0030fb05, // n0x15bd c0x0000 (---------------) + I uhren + 0x00253643, // n0x15be c0x0000 (---------------) + I ulm + 0x002fa888, // n0x15bf c0x0000 (---------------) + I undersea + 0x00320a0a, // n0x15c0 c0x0000 (---------------) + I university + 0x0022b983, // n0x15c1 c0x0000 (---------------) + I usa + 0x0022b98a, // n0x15c2 c0x0000 (---------------) + I usantiques + 0x0028db46, // n0x15c3 c0x0000 (---------------) + I usarts + 0x0033650f, // n0x15c4 c0x0000 (---------------) + I uscountryestate + 0x00337d89, // n0x15c5 c0x0000 (---------------) + I usculture + 0x0025f990, // n0x15c6 c0x0000 (---------------) + I usdecorativearts + 0x0026d6c8, // n0x15c7 c0x0000 (---------------) + I usgarden + 0x002c7d49, // n0x15c8 c0x0000 (---------------) + I ushistory + 0x0029ca87, // n0x15c9 c0x0000 (---------------) + I ushuaia + 0x0024144f, // n0x15ca c0x0000 (---------------) + I uslivinghistory + 0x002e84c4, // n0x15cb c0x0000 (---------------) + I utah + 0x0033d844, // n0x15cc c0x0000 (---------------) + I uvic + 0x00217106, // n0x15cd c0x0000 (---------------) + I valley + 0x00236b46, // n0x15ce c0x0000 (---------------) + I vantaa + 0x0031504a, // n0x15cf c0x0000 (---------------) + I versailles + 0x00311a06, // n0x15d0 c0x0000 (---------------) + I viking + 0x002f8f47, // n0x15d1 c0x0000 (---------------) + I village + 0x002f7e88, // n0x15d2 c0x0000 (---------------) + I virginia + 0x002f8087, // n0x15d3 c0x0000 (---------------) + I virtual + 0x002f8247, // n0x15d4 c0x0000 (---------------) + I virtuel + 0x0034710a, // n0x15d5 c0x0000 (---------------) + I vlaanderen + 0x002fa6cb, // n0x15d6 c0x0000 (---------------) + I volkenkunde + 0x00320685, // n0x15d7 c0x0000 (---------------) + I wales + 0x0039de88, // n0x15d8 c0x0000 (---------------) + I wallonie + 0x00203903, // n0x15d9 c0x0000 (---------------) + I war + 0x0023efcc, // n0x15da c0x0000 (---------------) + I washingtondc + 0x00376c4f, // n0x15db c0x0000 (---------------) + I watch-and-clock + 0x002aca4d, // n0x15dc c0x0000 (---------------) + I watchandclock + 0x0023d987, // n0x15dd c0x0000 (---------------) + I western + 0x0032e989, // n0x15de c0x0000 (---------------) + I westfalen + 0x002b5f07, // n0x15df c0x0000 (---------------) + I whaling + 0x00253bc8, // n0x15e0 c0x0000 (---------------) + I wildlife + 0x0023264c, // n0x15e1 c0x0000 (---------------) + I williamsburg + 0x00285148, // n0x15e2 c0x0000 (---------------) + I windmill + 0x00351f08, // n0x15e3 c0x0000 (---------------) + I workshop + 0x0030cb8e, // n0x15e4 c0x0000 (---------------) + I xn--9dbhblg6di + 0x0031d714, // n0x15e5 c0x0000 (---------------) + I xn--comunicaes-v6a2o + 0x0031dc24, // n0x15e6 c0x0000 (---------------) + I xn--correios-e-telecomunicaes-ghc29a + 0x0033b8ca, // n0x15e7 c0x0000 (---------------) + I xn--h1aegh + 0x0035a14b, // n0x15e8 c0x0000 (---------------) + I xn--lns-qla + 0x002ed184, // n0x15e9 c0x0000 (---------------) + I york + 0x002ed189, // n0x15ea c0x0000 (---------------) + I yorkshire + 0x002aab48, // n0x15eb c0x0000 (---------------) + I yosemite + 0x0024b0c5, // n0x15ec c0x0000 (---------------) + I youth + 0x002ed80a, // n0x15ed c0x0000 (---------------) + I zoological + 0x0027a507, // n0x15ee c0x0000 (---------------) + I zoology + 0x002389c4, // n0x15ef c0x0000 (---------------) + I aero + 0x00330b83, // n0x15f0 c0x0000 (---------------) + I biz + 0x00233503, // n0x15f1 c0x0000 (---------------) + I com + 0x0023d684, // n0x15f2 c0x0000 (---------------) + I coop + 0x0023a783, // n0x15f3 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x15f4 c0x0000 (---------------) + I gov + 0x003a1244, // n0x15f5 c0x0000 (---------------) + I info + 0x00201603, // n0x15f6 c0x0000 (---------------) + I int + 0x00209003, // n0x15f7 c0x0000 (---------------) + I mil + 0x002d0106, // n0x15f8 c0x0000 (---------------) + I museum + 0x00205284, // n0x15f9 c0x0000 (---------------) + I name + 0x0021fe03, // n0x15fa c0x0000 (---------------) + I net + 0x0022d1c3, // n0x15fb c0x0000 (---------------) + I org + 0x00220e43, // n0x15fc c0x0000 (---------------) + I pro + 0x00201542, // n0x15fd c0x0000 (---------------) + I ac + 0x00330b83, // n0x15fe c0x0000 (---------------) + I biz + 0x00200742, // n0x15ff c0x0000 (---------------) + I co + 0x00233503, // n0x1600 c0x0000 (---------------) + I com + 0x0023d684, // n0x1601 c0x0000 (---------------) + I coop + 0x0023a783, // n0x1602 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1603 c0x0000 (---------------) + I gov + 0x00201603, // n0x1604 c0x0000 (---------------) + I int + 0x002d0106, // n0x1605 c0x0000 (---------------) + I museum + 0x0021fe03, // n0x1606 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1607 c0x0000 (---------------) + I org + 0x000ffa08, // n0x1608 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1609 c0x0000 (---------------) + I com + 0x0023a783, // n0x160a c0x0000 (---------------) + I edu + 0x00213183, // n0x160b c0x0000 (---------------) + I gob + 0x0021fe03, // n0x160c c0x0000 (---------------) + I net + 0x0022d1c3, // n0x160d c0x0000 (---------------) + I org + 0x000ffa08, // n0x160e c0x0000 (---------------) + blogspot + 0x00233503, // n0x160f c0x0000 (---------------) + I com + 0x0023a783, // n0x1610 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1611 c0x0000 (---------------) + I gov + 0x00209003, // n0x1612 c0x0000 (---------------) + I mil + 0x00205284, // n0x1613 c0x0000 (---------------) + I name + 0x0021fe03, // n0x1614 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1615 c0x0000 (---------------) + I org + 0x0062f7c8, // n0x1616 c0x0001 (---------------) ! I teledata + 0x00200302, // n0x1617 c0x0000 (---------------) + I ca + 0x0022e182, // n0x1618 c0x0000 (---------------) + I cc + 0x00200742, // n0x1619 c0x0000 (---------------) + I co + 0x00233503, // n0x161a c0x0000 (---------------) + I com + 0x0022bf42, // n0x161b c0x0000 (---------------) + I dr + 0x002013c2, // n0x161c c0x0000 (---------------) + I in + 0x003a1244, // n0x161d c0x0000 (---------------) + I info + 0x00207104, // n0x161e c0x0000 (---------------) + I mobi + 0x0021bb02, // n0x161f c0x0000 (---------------) + I mx + 0x00205284, // n0x1620 c0x0000 (---------------) + I name + 0x00200282, // n0x1621 c0x0000 (---------------) + I or + 0x0022d1c3, // n0x1622 c0x0000 (---------------) + I org + 0x00220e43, // n0x1623 c0x0000 (---------------) + I pro + 0x0023d0c6, // n0x1624 c0x0000 (---------------) + I school + 0x00224e42, // n0x1625 c0x0000 (---------------) + I tv + 0x00202382, // n0x1626 c0x0000 (---------------) + I us + 0x0020b942, // n0x1627 c0x0000 (---------------) + I ws + 0x38e22103, // n0x1628 c0x00e3 (n0x162a-n0x162b) o I her + 0x39218ac3, // n0x1629 c0x00e4 (n0x162b-n0x162c) o I his + 0x00057c46, // n0x162a c0x0000 (---------------) + forgot + 0x00057c46, // n0x162b c0x0000 (---------------) + forgot + 0x002d4884, // n0x162c c0x0000 (---------------) + I asso + 0x00116c0c, // n0x162d c0x0000 (---------------) + at-band-camp + 0x0001be0c, // n0x162e c0x0000 (---------------) + azure-mobile + 0x00080ecd, // n0x162f c0x0000 (---------------) + azurewebsites + 0x000fb147, // n0x1630 c0x0000 (---------------) + blogdns + 0x00020208, // n0x1631 c0x0000 (---------------) + broke-it + 0x0019898a, // n0x1632 c0x0000 (---------------) + buyshouses + 0x39e3e2c5, // n0x1633 c0x00e7 (n0x1663-n0x1664) o I cdn77 + 0x0003e2c9, // n0x1634 c0x0000 (---------------) + cdn77-ssl + 0x00009188, // n0x1635 c0x0000 (---------------) + cloudapp + 0x0019d4ca, // n0x1636 c0x0000 (---------------) + cloudfront + 0x0003118e, // n0x1637 c0x0000 (---------------) + cloudfunctions + 0x0014c048, // n0x1638 c0x0000 (---------------) + dnsalias + 0x0007c9c7, // n0x1639 c0x0000 (---------------) + dnsdojo + 0x00160607, // n0x163a c0x0000 (---------------) + does-it + 0x0016a009, // n0x163b c0x0000 (---------------) + dontexist + 0x0008cdc7, // n0x163c c0x0000 (---------------) + dsmynas + 0x00197b48, // n0x163d c0x0000 (---------------) + dynalias + 0x000e9d09, // n0x163e c0x0000 (---------------) + dynathome + 0x001a46c5, // n0x163f c0x0000 (---------------) + dynv6 + 0x000a950d, // n0x1640 c0x0000 (---------------) + endofinternet + 0x00008f88, // n0x1641 c0x0000 (---------------) + familyds + 0x3a24c206, // n0x1642 c0x00e8 (n0x1664-n0x1666) o I fastly + 0x00064447, // n0x1643 c0x0000 (---------------) + from-az + 0x00065a47, // n0x1644 c0x0000 (---------------) + from-co + 0x0006a1c7, // n0x1645 c0x0000 (---------------) + from-la + 0x0006f3c7, // n0x1646 c0x0000 (---------------) + from-ny + 0x0000d202, // n0x1647 c0x0000 (---------------) + gb + 0x00157907, // n0x1648 c0x0000 (---------------) + gets-it + 0x00064a8c, // n0x1649 c0x0000 (---------------) + ham-radio-op + 0x00146347, // n0x164a c0x0000 (---------------) + homeftp + 0x000a51c6, // n0x164b c0x0000 (---------------) + homeip + 0x000a59c9, // n0x164c c0x0000 (---------------) + homelinux + 0x000a6fc8, // n0x164d c0x0000 (---------------) + homeunix + 0x000195c2, // n0x164e c0x0000 (---------------) + hu + 0x000013c2, // n0x164f c0x0000 (---------------) + in + 0x00007b0b, // n0x1650 c0x0000 (---------------) + in-the-band + 0x00012789, // n0x1651 c0x0000 (---------------) + is-a-chef + 0x0004e989, // n0x1652 c0x0000 (---------------) + is-a-geek + 0x0008e588, // n0x1653 c0x0000 (---------------) + isa-geek + 0x000ae3c2, // n0x1654 c0x0000 (---------------) + jp + 0x00150c09, // n0x1655 c0x0000 (---------------) + kicks-ass + 0x0002168d, // n0x1656 c0x0000 (---------------) + office-on-the + 0x000dcd47, // n0x1657 c0x0000 (---------------) + podzone + 0x000ecb08, // n0x1658 c0x0000 (---------------) + rackmaze + 0x001376cd, // n0x1659 c0x0000 (---------------) + scrapper-site + 0x000046c2, // n0x165a c0x0000 (---------------) + se + 0x0006ba86, // n0x165b c0x0000 (---------------) + selfip + 0x00090208, // n0x165c c0x0000 (---------------) + sells-it + 0x000cb7c8, // n0x165d c0x0000 (---------------) + servebbs + 0x000895c8, // n0x165e c0x0000 (---------------) + serveftp + 0x00054088, // n0x165f c0x0000 (---------------) + thruhere + 0x00000f82, // n0x1660 c0x0000 (---------------) + uk + 0x000eadc6, // n0x1661 c0x0000 (---------------) + webhop + 0x00005f82, // n0x1662 c0x0000 (---------------) + za + 0x000002c1, // n0x1663 c0x0000 (---------------) + r + 0x3a6e2044, // n0x1664 c0x00e9 (n0x1666-n0x1668) o I prod + 0x3aa3e443, // n0x1665 c0x00ea (n0x1668-n0x166b) o I ssl + 0x00000101, // n0x1666 c0x0000 (---------------) + a + 0x0000d846, // n0x1667 c0x0000 (---------------) + global + 0x00000101, // n0x1668 c0x0000 (---------------) + a + 0x00000001, // n0x1669 c0x0000 (---------------) + b + 0x0000d846, // n0x166a c0x0000 (---------------) + global + 0x0024bf84, // n0x166b c0x0000 (---------------) + I arts + 0x00233503, // n0x166c c0x0000 (---------------) + I com + 0x0024d9c4, // n0x166d c0x0000 (---------------) + I firm + 0x003a1244, // n0x166e c0x0000 (---------------) + I info + 0x0021fe03, // n0x166f c0x0000 (---------------) + I net + 0x00222085, // n0x1670 c0x0000 (---------------) + I other + 0x00220f03, // n0x1671 c0x0000 (---------------) + I per + 0x0022a5c3, // n0x1672 c0x0000 (---------------) + I rec + 0x00391185, // n0x1673 c0x0000 (---------------) + I store + 0x00221a03, // n0x1674 c0x0000 (---------------) + I web + 0x3b633503, // n0x1675 c0x00ed (n0x167f-n0x1680) + I com + 0x0023a783, // n0x1676 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1677 c0x0000 (---------------) + I gov + 0x00200041, // n0x1678 c0x0000 (---------------) + I i + 0x00209003, // n0x1679 c0x0000 (---------------) + I mil + 0x00207104, // n0x167a c0x0000 (---------------) + I mobi + 0x00205284, // n0x167b c0x0000 (---------------) + I name + 0x0021fe03, // n0x167c c0x0000 (---------------) + I net + 0x0022d1c3, // n0x167d c0x0000 (---------------) + I org + 0x00217443, // n0x167e c0x0000 (---------------) + I sch + 0x000ffa08, // n0x167f c0x0000 (---------------) + blogspot + 0x00201542, // n0x1680 c0x0000 (---------------) + I ac + 0x00330b83, // n0x1681 c0x0000 (---------------) + I biz + 0x00200742, // n0x1682 c0x0000 (---------------) + I co + 0x00233503, // n0x1683 c0x0000 (---------------) + I com + 0x0023a783, // n0x1684 c0x0000 (---------------) + I edu + 0x00213183, // n0x1685 c0x0000 (---------------) + I gob + 0x002013c2, // n0x1686 c0x0000 (---------------) + I in + 0x003a1244, // n0x1687 c0x0000 (---------------) + I info + 0x00201603, // n0x1688 c0x0000 (---------------) + I int + 0x00209003, // n0x1689 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x168a c0x0000 (---------------) + I net + 0x00201483, // n0x168b c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x168c c0x0000 (---------------) + I org + 0x00221a03, // n0x168d c0x0000 (---------------) + I web + 0x000ffa08, // n0x168e c0x0000 (---------------) + blogspot + 0x00365782, // n0x168f c0x0000 (---------------) + I bv + 0x00000742, // n0x1690 c0x0000 (---------------) + co + 0x3c622f02, // n0x1691 c0x00f1 (n0x1967-n0x1968) + I aa + 0x00355648, // n0x1692 c0x0000 (---------------) + I aarborte + 0x00223386, // n0x1693 c0x0000 (---------------) + I aejrie + 0x002bb486, // n0x1694 c0x0000 (---------------) + I afjord + 0x00222d07, // n0x1695 c0x0000 (---------------) + I agdenes + 0x3ca04f02, // n0x1696 c0x00f2 (n0x1968-n0x1969) + I ah + 0x3cf7c888, // n0x1697 c0x00f3 (n0x1969-n0x196a) o I akershus + 0x00354a4a, // n0x1698 c0x0000 (---------------) + I aknoluokta + 0x00263ec8, // n0x1699 c0x0000 (---------------) + I akrehamn + 0x002001c2, // n0x169a c0x0000 (---------------) + I al + 0x003552c9, // n0x169b c0x0000 (---------------) + I alaheadju + 0x003206c7, // n0x169c c0x0000 (---------------) + I alesund + 0x00219046, // n0x169d c0x0000 (---------------) + I algard + 0x00204e09, // n0x169e c0x0000 (---------------) + I alstahaug + 0x0023adc4, // n0x169f c0x0000 (---------------) + I alta + 0x002bbf06, // n0x16a0 c0x0000 (---------------) + I alvdal + 0x002bb884, // n0x16a1 c0x0000 (---------------) + I amli + 0x00278604, // n0x16a2 c0x0000 (---------------) + I amot + 0x00259c09, // n0x16a3 c0x0000 (---------------) + I andasuolo + 0x0022c086, // n0x16a4 c0x0000 (---------------) + I andebu + 0x00259645, // n0x16a5 c0x0000 (---------------) + I andoy + 0x00267dc5, // n0x16a6 c0x0000 (---------------) + I ardal + 0x00234b07, // n0x16a7 c0x0000 (---------------) + I aremark + 0x002b87c7, // n0x16a8 c0x0000 (---------------) + I arendal + 0x0032fec4, // n0x16a9 c0x0000 (---------------) + I arna + 0x00222f46, // n0x16aa c0x0000 (---------------) + I aseral + 0x002e2b85, // n0x16ab c0x0000 (---------------) + I asker + 0x00230345, // n0x16ac c0x0000 (---------------) + I askim + 0x002f58c5, // n0x16ad c0x0000 (---------------) + I askoy + 0x00389107, // n0x16ae c0x0000 (---------------) + I askvoll + 0x00331105, // n0x16af c0x0000 (---------------) + I asnes + 0x00309f89, // n0x16b0 c0x0000 (---------------) + I audnedaln + 0x00255485, // n0x16b1 c0x0000 (---------------) + I aukra + 0x002fadc4, // n0x16b2 c0x0000 (---------------) + I aure + 0x00323447, // n0x16b3 c0x0000 (---------------) + I aurland + 0x0025de0e, // n0x16b4 c0x0000 (---------------) + I aurskog-holand + 0x00272489, // n0x16b5 c0x0000 (---------------) + I austevoll + 0x0030f789, // n0x16b6 c0x0000 (---------------) + I austrheim + 0x00326bc6, // n0x16b7 c0x0000 (---------------) + I averoy + 0x002f2388, // n0x16b8 c0x0000 (---------------) + I badaddja + 0x002ade0b, // n0x16b9 c0x0000 (---------------) + I bahcavuotna + 0x002d13cc, // n0x16ba c0x0000 (---------------) + I bahccavuotna + 0x0026a446, // n0x16bb c0x0000 (---------------) + I baidar + 0x00372147, // n0x16bc c0x0000 (---------------) + I bajddar + 0x0025ef05, // n0x16bd c0x0000 (---------------) + I balat + 0x00228a8a, // n0x16be c0x0000 (---------------) + I balestrand + 0x00306f89, // n0x16bf c0x0000 (---------------) + I ballangen + 0x00254b89, // n0x16c0 c0x0000 (---------------) + I balsfjord + 0x00271906, // n0x16c1 c0x0000 (---------------) + I bamble + 0x002ef785, // n0x16c2 c0x0000 (---------------) + I bardu + 0x0027f705, // n0x16c3 c0x0000 (---------------) + I barum + 0x00354549, // n0x16c4 c0x0000 (---------------) + I batsfjord + 0x002f5a4b, // n0x16c5 c0x0000 (---------------) + I bearalvahki + 0x0027c686, // n0x16c6 c0x0000 (---------------) + I beardu + 0x0032e046, // n0x16c7 c0x0000 (---------------) + I beiarn + 0x0020d144, // n0x16c8 c0x0000 (---------------) + I berg + 0x0028b146, // n0x16c9 c0x0000 (---------------) + I bergen + 0x003223c8, // n0x16ca c0x0000 (---------------) + I berlevag + 0x00200006, // n0x16cb c0x0000 (---------------) + I bievat + 0x00398d86, // n0x16cc c0x0000 (---------------) + I bindal + 0x002054c8, // n0x16cd c0x0000 (---------------) + I birkenes + 0x00206507, // n0x16ce c0x0000 (---------------) + I bjarkoy + 0x00206ec9, // n0x16cf c0x0000 (---------------) + I bjerkreim + 0x0020c285, // n0x16d0 c0x0000 (---------------) + I bjugn + 0x000ffa08, // n0x16d1 c0x0000 (---------------) + blogspot + 0x00213204, // n0x16d2 c0x0000 (---------------) + I bodo + 0x003a4004, // n0x16d3 c0x0000 (---------------) + I bokn + 0x00210ac5, // n0x16d4 c0x0000 (---------------) + I bomlo + 0x0038c589, // n0x16d5 c0x0000 (---------------) + I bremanger + 0x00221247, // n0x16d6 c0x0000 (---------------) + I bronnoy + 0x0022124b, // n0x16d7 c0x0000 (---------------) + I bronnoysund + 0x0022260a, // n0x16d8 c0x0000 (---------------) + I brumunddal + 0x00228785, // n0x16d9 c0x0000 (---------------) + I bryne + 0x3d208642, // n0x16da c0x00f4 (n0x196a-n0x196b) + I bu + 0x00351247, // n0x16db c0x0000 (---------------) + I budejju + 0x3d62a108, // n0x16dc c0x00f5 (n0x196b-n0x196c) o I buskerud + 0x002b8007, // n0x16dd c0x0000 (---------------) + I bygland + 0x0022c3c5, // n0x16de c0x0000 (---------------) + I bykle + 0x00356fca, // n0x16df c0x0000 (---------------) + I cahcesuolo + 0x00000742, // n0x16e0 c0x0000 (---------------) + co + 0x002b70cb, // n0x16e1 c0x0000 (---------------) + I davvenjarga + 0x0021490a, // n0x16e2 c0x0000 (---------------) + I davvesiida + 0x0039c006, // n0x16e3 c0x0000 (---------------) + I deatnu + 0x00274143, // n0x16e4 c0x0000 (---------------) + I dep + 0x002381cd, // n0x16e5 c0x0000 (---------------) + I dielddanuorri + 0x00271f0c, // n0x16e6 c0x0000 (---------------) + I divtasvuodna + 0x003079cd, // n0x16e7 c0x0000 (---------------) + I divttasvuotna + 0x00365c05, // n0x16e8 c0x0000 (---------------) + I donna + 0x00243885, // n0x16e9 c0x0000 (---------------) + I dovre + 0x002edd47, // n0x16ea c0x0000 (---------------) + I drammen + 0x003458c9, // n0x16eb c0x0000 (---------------) + I drangedal + 0x00354946, // n0x16ec c0x0000 (---------------) + I drobak + 0x00311885, // n0x16ed c0x0000 (---------------) + I dyroy + 0x0022e2c8, // n0x16ee c0x0000 (---------------) + I egersund + 0x0028b043, // n0x16ef c0x0000 (---------------) + I eid + 0x003116c8, // n0x16f0 c0x0000 (---------------) + I eidfjord + 0x0028b048, // n0x16f1 c0x0000 (---------------) + I eidsberg + 0x002bce07, // n0x16f2 c0x0000 (---------------) + I eidskog + 0x002d8248, // n0x16f3 c0x0000 (---------------) + I eidsvoll + 0x003a6609, // n0x16f4 c0x0000 (---------------) + I eigersund + 0x0023c547, // n0x16f5 c0x0000 (---------------) + I elverum + 0x00209807, // n0x16f6 c0x0000 (---------------) + I enebakk + 0x00279a48, // n0x16f7 c0x0000 (---------------) + I engerdal + 0x002fecc4, // n0x16f8 c0x0000 (---------------) + I etne + 0x002fecc7, // n0x16f9 c0x0000 (---------------) + I etnedal + 0x00251f48, // n0x16fa c0x0000 (---------------) + I evenassi + 0x00202b46, // n0x16fb c0x0000 (---------------) + I evenes + 0x003a03cf, // n0x16fc c0x0000 (---------------) + I evje-og-hornnes + 0x00212987, // n0x16fd c0x0000 (---------------) + I farsund + 0x0024cc06, // n0x16fe c0x0000 (---------------) + I fauske + 0x002d77c5, // n0x16ff c0x0000 (---------------) + I fedje + 0x002159c3, // n0x1700 c0x0000 (---------------) + I fet + 0x0034b7c7, // n0x1701 c0x0000 (---------------) + I fetsund + 0x0023f343, // n0x1702 c0x0000 (---------------) + I fhs + 0x0024c546, // n0x1703 c0x0000 (---------------) + I finnoy + 0x0024f086, // n0x1704 c0x0000 (---------------) + I fitjar + 0x00250506, // n0x1705 c0x0000 (---------------) + I fjaler + 0x0028f845, // n0x1706 c0x0000 (---------------) + I fjell + 0x00267683, // n0x1707 c0x0000 (---------------) + I fla + 0x00380648, // n0x1708 c0x0000 (---------------) + I flakstad + 0x0036ce89, // n0x1709 c0x0000 (---------------) + I flatanger + 0x00364c8b, // n0x170a c0x0000 (---------------) + I flekkefjord + 0x00377308, // n0x170b c0x0000 (---------------) + I flesberg + 0x00252605, // n0x170c c0x0000 (---------------) + I flora + 0x00253105, // n0x170d c0x0000 (---------------) + I floro + 0x3da42902, // n0x170e c0x00f6 (n0x196c-n0x196d) + I fm + 0x0037be09, // n0x170f c0x0000 (---------------) + I folkebibl + 0x002558c7, // n0x1710 c0x0000 (---------------) + I folldal + 0x0039bf45, // n0x1711 c0x0000 (---------------) + I forde + 0x00259b07, // n0x1712 c0x0000 (---------------) + I forsand + 0x0025b746, // n0x1713 c0x0000 (---------------) + I fosnes + 0x0035dfc5, // n0x1714 c0x0000 (---------------) + I frana + 0x0036420b, // n0x1715 c0x0000 (---------------) + I fredrikstad + 0x0025c484, // n0x1716 c0x0000 (---------------) + I frei + 0x00263a85, // n0x1717 c0x0000 (---------------) + I frogn + 0x00263bc7, // n0x1718 c0x0000 (---------------) + I froland + 0x00276a06, // n0x1719 c0x0000 (---------------) + I frosta + 0x00276e45, // n0x171a c0x0000 (---------------) + I froya + 0x00283e87, // n0x171b c0x0000 (---------------) + I fuoisku + 0x00284ec7, // n0x171c c0x0000 (---------------) + I fuossko + 0x0028db04, // n0x171d c0x0000 (---------------) + I fusa + 0x00288cca, // n0x171e c0x0000 (---------------) + I fylkesbibl + 0x00289188, // n0x171f c0x0000 (---------------) + I fyresdal + 0x002cdcc9, // n0x1720 c0x0000 (---------------) + I gaivuotna + 0x0021dd45, // n0x1721 c0x0000 (---------------) + I galsa + 0x002b7306, // n0x1722 c0x0000 (---------------) + I gamvik + 0x0032258a, // n0x1723 c0x0000 (---------------) + I gangaviika + 0x00267cc6, // n0x1724 c0x0000 (---------------) + I gaular + 0x002b6187, // n0x1725 c0x0000 (---------------) + I gausdal + 0x002cd98d, // n0x1726 c0x0000 (---------------) + I giehtavuoatna + 0x00226cc9, // n0x1727 c0x0000 (---------------) + I gildeskal + 0x00328b05, // n0x1728 c0x0000 (---------------) + I giske + 0x00310047, // n0x1729 c0x0000 (---------------) + I gjemnes + 0x003279c8, // n0x172a c0x0000 (---------------) + I gjerdrum + 0x0032f588, // n0x172b c0x0000 (---------------) + I gjerstad + 0x00249607, // n0x172c c0x0000 (---------------) + I gjesdal + 0x00249e46, // n0x172d c0x0000 (---------------) + I gjovik + 0x00212b87, // n0x172e c0x0000 (---------------) + I gloppen + 0x0024dbc3, // n0x172f c0x0000 (---------------) + I gol + 0x00334544, // n0x1730 c0x0000 (---------------) + I gran + 0x0035bc85, // n0x1731 c0x0000 (---------------) + I grane + 0x00384e47, // n0x1732 c0x0000 (---------------) + I granvin + 0x00388b09, // n0x1733 c0x0000 (---------------) + I gratangen + 0x0022d248, // n0x1734 c0x0000 (---------------) + I grimstad + 0x002b6085, // n0x1735 c0x0000 (---------------) + I grong + 0x0039a744, // n0x1736 c0x0000 (---------------) + I grue + 0x0035d2c5, // n0x1737 c0x0000 (---------------) + I gulen + 0x0025334d, // n0x1738 c0x0000 (---------------) + I guovdageaidnu + 0x00202442, // n0x1739 c0x0000 (---------------) + I ha + 0x002c9b86, // n0x173a c0x0000 (---------------) + I habmer + 0x0026b9c6, // n0x173b c0x0000 (---------------) + I hadsel + 0x002a5e4a, // n0x173c c0x0000 (---------------) + I hagebostad + 0x0035f386, // n0x173d c0x0000 (---------------) + I halden + 0x0036d3c5, // n0x173e c0x0000 (---------------) + I halsa + 0x0025e745, // n0x173f c0x0000 (---------------) + I hamar + 0x0025e747, // n0x1740 c0x0000 (---------------) + I hamaroy + 0x0037558c, // n0x1741 c0x0000 (---------------) + I hammarfeasta + 0x0033c28a, // n0x1742 c0x0000 (---------------) + I hammerfest + 0x0028aa06, // n0x1743 c0x0000 (---------------) + I hapmir + 0x002cfb05, // n0x1744 c0x0000 (---------------) + I haram + 0x0028af86, // n0x1745 c0x0000 (---------------) + I hareid + 0x0028b2c7, // n0x1746 c0x0000 (---------------) + I harstad + 0x0028c606, // n0x1747 c0x0000 (---------------) + I hasvik + 0x0028f74c, // n0x1748 c0x0000 (---------------) + I hattfjelldal + 0x00204f49, // n0x1749 c0x0000 (---------------) + I haugesund + 0x3de36587, // n0x174a c0x00f7 (n0x196d-n0x1970) o I hedmark + 0x002910c5, // n0x174b c0x0000 (---------------) + I hemne + 0x002910c6, // n0x174c c0x0000 (---------------) + I hemnes + 0x00291d48, // n0x174d c0x0000 (---------------) + I hemsedal + 0x00232305, // n0x174e c0x0000 (---------------) + I herad + 0x002a4585, // n0x174f c0x0000 (---------------) + I hitra + 0x002a47c8, // n0x1750 c0x0000 (---------------) + I hjartdal + 0x002a49ca, // n0x1751 c0x0000 (---------------) + I hjelmeland + 0x3e248fc2, // n0x1752 c0x00f8 (n0x1970-n0x1971) + I hl + 0x3e60e942, // n0x1753 c0x00f9 (n0x1971-n0x1972) + I hm + 0x00378245, // n0x1754 c0x0000 (---------------) + I hobol + 0x002d7743, // n0x1755 c0x0000 (---------------) + I hof + 0x003a4508, // n0x1756 c0x0000 (---------------) + I hokksund + 0x0023ce83, // n0x1757 c0x0000 (---------------) + I hol + 0x002a4c44, // n0x1758 c0x0000 (---------------) + I hole + 0x002a814b, // n0x1759 c0x0000 (---------------) + I holmestrand + 0x002ae1c8, // n0x175a c0x0000 (---------------) + I holtalen + 0x002a7ac8, // n0x175b c0x0000 (---------------) + I honefoss + 0x3eb1a749, // n0x175c c0x00fa (n0x1972-n0x1973) o I hordaland + 0x002a8f89, // n0x175d c0x0000 (---------------) + I hornindal + 0x002a9406, // n0x175e c0x0000 (---------------) + I horten + 0x002aa188, // n0x175f c0x0000 (---------------) + I hoyanger + 0x002aa389, // n0x1760 c0x0000 (---------------) + I hoylandet + 0x002ab246, // n0x1761 c0x0000 (---------------) + I hurdal + 0x002ab3c5, // n0x1762 c0x0000 (---------------) + I hurum + 0x00363586, // n0x1763 c0x0000 (---------------) + I hvaler + 0x002ab849, // n0x1764 c0x0000 (---------------) + I hyllestad + 0x00229187, // n0x1765 c0x0000 (---------------) + I ibestad + 0x0026dd46, // n0x1766 c0x0000 (---------------) + I idrett + 0x002e7747, // n0x1767 c0x0000 (---------------) + I inderoy + 0x003547c7, // n0x1768 c0x0000 (---------------) + I iveland + 0x0023fcc4, // n0x1769 c0x0000 (---------------) + I ivgu + 0x3ee1cfc9, // n0x176a c0x00fb (n0x1973-n0x1974) + I jan-mayen + 0x002c57c8, // n0x176b c0x0000 (---------------) + I jessheim + 0x00358388, // n0x176c c0x0000 (---------------) + I jevnaker + 0x00343c87, // n0x176d c0x0000 (---------------) + I jolster + 0x002c1046, // n0x176e c0x0000 (---------------) + I jondal + 0x002fc709, // n0x176f c0x0000 (---------------) + I jorpeland + 0x002bc3c7, // n0x1770 c0x0000 (---------------) + I kafjord + 0x0024fd0a, // n0x1771 c0x0000 (---------------) + I karasjohka + 0x0037dbc8, // n0x1772 c0x0000 (---------------) + I karasjok + 0x00330887, // n0x1773 c0x0000 (---------------) + I karlsoy + 0x00229586, // n0x1774 c0x0000 (---------------) + I karmoy + 0x002650ca, // n0x1775 c0x0000 (---------------) + I kautokeino + 0x0027e3c8, // n0x1776 c0x0000 (---------------) + I kirkenes + 0x0026b445, // n0x1777 c0x0000 (---------------) + I klabu + 0x00374f05, // n0x1778 c0x0000 (---------------) + I klepp + 0x00387dc7, // n0x1779 c0x0000 (---------------) + I kommune + 0x002bfb09, // n0x177a c0x0000 (---------------) + I kongsberg + 0x002c478b, // n0x177b c0x0000 (---------------) + I kongsvinger + 0x002d6808, // n0x177c c0x0000 (---------------) + I kopervik + 0x00255509, // n0x177d c0x0000 (---------------) + I kraanghke + 0x00250787, // n0x177e c0x0000 (---------------) + I kragero + 0x002af50c, // n0x177f c0x0000 (---------------) + I kristiansand + 0x002b01cc, // n0x1780 c0x0000 (---------------) + I kristiansund + 0x002b04ca, // n0x1781 c0x0000 (---------------) + I krodsherad + 0x002b074c, // n0x1782 c0x0000 (---------------) + I krokstadelva + 0x002bb408, // n0x1783 c0x0000 (---------------) + I kvafjord + 0x002bb608, // n0x1784 c0x0000 (---------------) + I kvalsund + 0x002bb804, // n0x1785 c0x0000 (---------------) + I kvam + 0x002bc589, // n0x1786 c0x0000 (---------------) + I kvanangen + 0x002bc7c9, // n0x1787 c0x0000 (---------------) + I kvinesdal + 0x002bca0a, // n0x1788 c0x0000 (---------------) + I kvinnherad + 0x002bcc89, // n0x1789 c0x0000 (---------------) + I kviteseid + 0x002bcfc7, // n0x178a c0x0000 (---------------) + I kvitsoy + 0x003a4dcc, // n0x178b c0x0000 (---------------) + I laakesvuemie + 0x00338186, // n0x178c c0x0000 (---------------) + I lahppi + 0x00258008, // n0x178d c0x0000 (---------------) + I langevag + 0x00267d86, // n0x178e c0x0000 (---------------) + I lardal + 0x0037f706, // n0x178f c0x0000 (---------------) + I larvik + 0x00328a07, // n0x1790 c0x0000 (---------------) + I lavagis + 0x00272688, // n0x1791 c0x0000 (---------------) + I lavangen + 0x00331a4b, // n0x1792 c0x0000 (---------------) + I leangaviika + 0x002b7ec7, // n0x1793 c0x0000 (---------------) + I lebesby + 0x002598c9, // n0x1794 c0x0000 (---------------) + I leikanger + 0x0035e489, // n0x1795 c0x0000 (---------------) + I leirfjord + 0x00296d87, // n0x1796 c0x0000 (---------------) + I leirvik + 0x002bbac4, // n0x1797 c0x0000 (---------------) + I leka + 0x002bf987, // n0x1798 c0x0000 (---------------) + I leksvik + 0x003562c6, // n0x1799 c0x0000 (---------------) + I lenvik + 0x00217606, // n0x179a c0x0000 (---------------) + I lerdal + 0x0031f845, // n0x179b c0x0000 (---------------) + I lesja + 0x00219f88, // n0x179c c0x0000 (---------------) + I levanger + 0x002ddf44, // n0x179d c0x0000 (---------------) + I lier + 0x002ddf46, // n0x179e c0x0000 (---------------) + I lierne + 0x0033c14b, // n0x179f c0x0000 (---------------) + I lillehammer + 0x00330089, // n0x17a0 c0x0000 (---------------) + I lillesand + 0x00322dc6, // n0x17a1 c0x0000 (---------------) + I lindas + 0x00345ac9, // n0x17a2 c0x0000 (---------------) + I lindesnes + 0x00389286, // n0x17a3 c0x0000 (---------------) + I loabat + 0x00259dc8, // n0x17a4 c0x0000 (---------------) + I lodingen + 0x00214303, // n0x17a5 c0x0000 (---------------) + I lom + 0x00390905, // n0x17a6 c0x0000 (---------------) + I loppa + 0x00216209, // n0x17a7 c0x0000 (---------------) + I lorenskog + 0x00217745, // n0x17a8 c0x0000 (---------------) + I loten + 0x002fb344, // n0x17a9 c0x0000 (---------------) + I lund + 0x00275306, // n0x17aa c0x0000 (---------------) + I lunner + 0x00378ec5, // n0x17ab c0x0000 (---------------) + I luroy + 0x002dcb06, // n0x17ac c0x0000 (---------------) + I luster + 0x002fccc7, // n0x17ad c0x0000 (---------------) + I lyngdal + 0x00213606, // n0x17ae c0x0000 (---------------) + I lyngen + 0x00298bcb, // n0x17af c0x0000 (---------------) + I malatvuopmi + 0x002e4e47, // n0x17b0 c0x0000 (---------------) + I malselv + 0x00205c86, // n0x17b1 c0x0000 (---------------) + I malvik + 0x0034fcc6, // n0x17b2 c0x0000 (---------------) + I mandal + 0x00234bc6, // n0x17b3 c0x0000 (---------------) + I marker + 0x0032fe89, // n0x17b4 c0x0000 (---------------) + I marnardal + 0x0021ed8a, // n0x17b5 c0x0000 (---------------) + I masfjorden + 0x0032a985, // n0x17b6 c0x0000 (---------------) + I masoy + 0x0022080d, // n0x17b7 c0x0000 (---------------) + I matta-varjjat + 0x002a4ac6, // n0x17b8 c0x0000 (---------------) + I meland + 0x002160c6, // n0x17b9 c0x0000 (---------------) + I meldal + 0x00291886, // n0x17ba c0x0000 (---------------) + I melhus + 0x002a7e05, // n0x17bb c0x0000 (---------------) + I meloy + 0x0037c7c7, // n0x17bc c0x0000 (---------------) + I meraker + 0x0029d687, // n0x17bd c0x0000 (---------------) + I midsund + 0x002e5e0e, // n0x17be c0x0000 (---------------) + I midtre-gauldal + 0x00209003, // n0x17bf c0x0000 (---------------) + I mil + 0x002c1009, // n0x17c0 c0x0000 (---------------) + I mjondalen + 0x00239209, // n0x17c1 c0x0000 (---------------) + I mo-i-rana + 0x00228e87, // n0x17c2 c0x0000 (---------------) + I moareke + 0x0026c087, // n0x17c3 c0x0000 (---------------) + I modalen + 0x002a67c5, // n0x17c4 c0x0000 (---------------) + I modum + 0x00325445, // n0x17c5 c0x0000 (---------------) + I molde + 0x3f25d3cf, // n0x17c6 c0x00fc (n0x1974-n0x1976) o I more-og-romsdal + 0x002c7f87, // n0x17c7 c0x0000 (---------------) + I mosjoen + 0x002c8148, // n0x17c8 c0x0000 (---------------) + I moskenes + 0x002c8884, // n0x17c9 c0x0000 (---------------) + I moss + 0x002c8c46, // n0x17ca c0x0000 (---------------) + I mosvik + 0x3f64aa02, // n0x17cb c0x00fd (n0x1976-n0x1977) + I mr + 0x002cc2c6, // n0x17cc c0x0000 (---------------) + I muosat + 0x002d0106, // n0x17cd c0x0000 (---------------) + I museum + 0x00331d8e, // n0x17ce c0x0000 (---------------) + I naamesjevuemie + 0x0031150a, // n0x17cf c0x0000 (---------------) + I namdalseid + 0x002b57c6, // n0x17d0 c0x0000 (---------------) + I namsos + 0x002261ca, // n0x17d1 c0x0000 (---------------) + I namsskogan + 0x002c3349, // n0x17d2 c0x0000 (---------------) + I nannestad + 0x00315d45, // n0x17d3 c0x0000 (---------------) + I naroy + 0x0038ac08, // n0x17d4 c0x0000 (---------------) + I narviika + 0x003a29c6, // n0x17d5 c0x0000 (---------------) + I narvik + 0x00255008, // n0x17d6 c0x0000 (---------------) + I naustdal + 0x0031a1c8, // n0x17d7 c0x0000 (---------------) + I navuotna + 0x0030930b, // n0x17d8 c0x0000 (---------------) + I nedre-eiker + 0x00222e05, // n0x17d9 c0x0000 (---------------) + I nesna + 0x00331188, // n0x17da c0x0000 (---------------) + I nesodden + 0x0020560c, // n0x17db c0x0000 (---------------) + I nesoddtangen + 0x0022c287, // n0x17dc c0x0000 (---------------) + I nesseby + 0x0024f8c6, // n0x17dd c0x0000 (---------------) + I nesset + 0x00230048, // n0x17de c0x0000 (---------------) + I nissedal + 0x00279d48, // n0x17df c0x0000 (---------------) + I nittedal + 0x3fa47802, // n0x17e0 c0x00fe (n0x1977-n0x1978) + I nl + 0x002bbccb, // n0x17e1 c0x0000 (---------------) + I nord-aurdal + 0x00397f89, // n0x17e2 c0x0000 (---------------) + I nord-fron + 0x0034c449, // n0x17e3 c0x0000 (---------------) + I nord-odal + 0x00328887, // n0x17e4 c0x0000 (---------------) + I norddal + 0x00249408, // n0x17e5 c0x0000 (---------------) + I nordkapp + 0x3ff45708, // n0x17e6 c0x00ff (n0x1978-n0x197c) o I nordland + 0x0025f08b, // n0x17e7 c0x0000 (---------------) + I nordre-land + 0x0028e409, // n0x17e8 c0x0000 (---------------) + I nordreisa + 0x0021000d, // n0x17e9 c0x0000 (---------------) + I nore-og-uvdal + 0x002547c8, // n0x17ea c0x0000 (---------------) + I notodden + 0x0032c588, // n0x17eb c0x0000 (---------------) + I notteroy + 0x402009c2, // n0x17ec c0x0100 (n0x197c-n0x197d) + I nt + 0x00202d84, // n0x17ed c0x0000 (---------------) + I odda + 0x4060b282, // n0x17ee c0x0101 (n0x197d-n0x197e) + I of + 0x0037dd46, // n0x17ef c0x0000 (---------------) + I oksnes + 0x40a02242, // n0x17f0 c0x0102 (n0x197e-n0x197f) + I ol + 0x0037a0ca, // n0x17f1 c0x0000 (---------------) + I omasvuotna + 0x00352086, // n0x17f2 c0x0000 (---------------) + I oppdal + 0x00225388, // n0x17f3 c0x0000 (---------------) + I oppegard + 0x00255c88, // n0x17f4 c0x0000 (---------------) + I orkanger + 0x002f2686, // n0x17f5 c0x0000 (---------------) + I orkdal + 0x00338a46, // n0x17f6 c0x0000 (---------------) + I orland + 0x002e5306, // n0x17f7 c0x0000 (---------------) + I orskog + 0x0027b0c5, // n0x17f8 c0x0000 (---------------) + I orsta + 0x00240e44, // n0x17f9 c0x0000 (---------------) + I osen + 0x40ec40c4, // n0x17fa c0x0103 (n0x197f-n0x1980) + I oslo + 0x00334ec6, // n0x17fb c0x0000 (---------------) + I osoyro + 0x002510c7, // n0x17fc c0x0000 (---------------) + I osteroy + 0x4139f9c7, // n0x17fd c0x0104 (n0x1980-n0x1981) o I ostfold + 0x002d968b, // n0x17fe c0x0000 (---------------) + I ostre-toten + 0x0025e1c9, // n0x17ff c0x0000 (---------------) + I overhalla + 0x002438ca, // n0x1800 c0x0000 (---------------) + I ovre-eiker + 0x003173c4, // n0x1801 c0x0000 (---------------) + I oyer + 0x0025e888, // n0x1802 c0x0000 (---------------) + I oygarden + 0x0026db0d, // n0x1803 c0x0000 (---------------) + I oystre-slidre + 0x002e0209, // n0x1804 c0x0000 (---------------) + I porsanger + 0x002e0448, // n0x1805 c0x0000 (---------------) + I porsangu + 0x002e06c9, // n0x1806 c0x0000 (---------------) + I porsgrunn + 0x002e1c44, // n0x1807 c0x0000 (---------------) + I priv + 0x00204d04, // n0x1808 c0x0000 (---------------) + I rade + 0x0027ec05, // n0x1809 c0x0000 (---------------) + I radoy + 0x0027720b, // n0x180a c0x0000 (---------------) + I rahkkeravju + 0x002ae146, // n0x180b c0x0000 (---------------) + I raholt + 0x00334d05, // n0x180c c0x0000 (---------------) + I raisa + 0x0037c0c9, // n0x180d c0x0000 (---------------) + I rakkestad + 0x00223008, // n0x180e c0x0000 (---------------) + I ralingen + 0x00239344, // n0x180f c0x0000 (---------------) + I rana + 0x00228c09, // n0x1810 c0x0000 (---------------) + I randaberg + 0x00248c45, // n0x1811 c0x0000 (---------------) + I rauma + 0x002b8808, // n0x1812 c0x0000 (---------------) + I rendalen + 0x00209ec7, // n0x1813 c0x0000 (---------------) + I rennebu + 0x0030fb88, // n0x1814 c0x0000 (---------------) + I rennesoy + 0x0027f1c6, // n0x1815 c0x0000 (---------------) + I rindal + 0x00351107, // n0x1816 c0x0000 (---------------) + I ringebu + 0x0020e509, // n0x1817 c0x0000 (---------------) + I ringerike + 0x0023a189, // n0x1818 c0x0000 (---------------) + I ringsaker + 0x00277985, // n0x1819 c0x0000 (---------------) + I risor + 0x00238845, // n0x181a c0x0000 (---------------) + I rissa + 0x4162b682, // n0x181b c0x0105 (n0x1981-n0x1982) + I rl + 0x002fab84, // n0x181c c0x0000 (---------------) + I roan + 0x0029d105, // n0x181d c0x0000 (---------------) + I rodoy + 0x002cd3c6, // n0x181e c0x0000 (---------------) + I rollag + 0x00318545, // n0x181f c0x0000 (---------------) + I romsa + 0x002531c7, // n0x1820 c0x0000 (---------------) + I romskog + 0x00296bc5, // n0x1821 c0x0000 (---------------) + I roros + 0x00276a44, // n0x1822 c0x0000 (---------------) + I rost + 0x00326c86, // n0x1823 c0x0000 (---------------) + I royken + 0x00311907, // n0x1824 c0x0000 (---------------) + I royrvik + 0x0024aa46, // n0x1825 c0x0000 (---------------) + I ruovat + 0x003305c5, // n0x1826 c0x0000 (---------------) + I rygge + 0x00321248, // n0x1827 c0x0000 (---------------) + I salangen + 0x00223585, // n0x1828 c0x0000 (---------------) + I salat + 0x00322c47, // n0x1829 c0x0000 (---------------) + I saltdal + 0x0038ec09, // n0x182a c0x0000 (---------------) + I samnanger + 0x003301ca, // n0x182b c0x0000 (---------------) + I sandefjord + 0x0022e9c7, // n0x182c c0x0000 (---------------) + I sandnes + 0x0022e9cc, // n0x182d c0x0000 (---------------) + I sandnessjoen + 0x00259606, // n0x182e c0x0000 (---------------) + I sandoy + 0x0022d049, // n0x182f c0x0000 (---------------) + I sarpsborg + 0x002314c5, // n0x1830 c0x0000 (---------------) + I sauda + 0x00232248, // n0x1831 c0x0000 (---------------) + I sauherad + 0x00210703, // n0x1832 c0x0000 (---------------) + I sel + 0x00210705, // n0x1833 c0x0000 (---------------) + I selbu + 0x00329905, // n0x1834 c0x0000 (---------------) + I selje + 0x0024d147, // n0x1835 c0x0000 (---------------) + I seljord + 0x41a11cc2, // n0x1836 c0x0106 (n0x1982-n0x1983) + I sf + 0x002434c7, // n0x1837 c0x0000 (---------------) + I siellak + 0x002cb986, // n0x1838 c0x0000 (---------------) + I sigdal + 0x0021cf06, // n0x1839 c0x0000 (---------------) + I siljan + 0x0034d886, // n0x183a c0x0000 (---------------) + I sirdal + 0x00279c86, // n0x183b c0x0000 (---------------) + I skanit + 0x00307808, // n0x183c c0x0000 (---------------) + I skanland + 0x002d8085, // n0x183d c0x0000 (---------------) + I skaun + 0x0024ccc7, // n0x183e c0x0000 (---------------) + I skedsmo + 0x0024cccd, // n0x183f c0x0000 (---------------) + I skedsmokorset + 0x00209743, // n0x1840 c0x0000 (---------------) + I ski + 0x00209745, // n0x1841 c0x0000 (---------------) + I skien + 0x002279c7, // n0x1842 c0x0000 (---------------) + I skierva + 0x002d1b48, // n0x1843 c0x0000 (---------------) + I skiptvet + 0x00227585, // n0x1844 c0x0000 (---------------) + I skjak + 0x00230ec8, // n0x1845 c0x0000 (---------------) + I skjervoy + 0x00266946, // n0x1846 c0x0000 (---------------) + I skodje + 0x0023e487, // n0x1847 c0x0000 (---------------) + I slattum + 0x002c0645, // n0x1848 c0x0000 (---------------) + I smola + 0x00222e86, // n0x1849 c0x0000 (---------------) + I snaase + 0x003604c5, // n0x184a c0x0000 (---------------) + I snasa + 0x002bae0a, // n0x184b c0x0000 (---------------) + I snillfjord + 0x002d9146, // n0x184c c0x0000 (---------------) + I snoasa + 0x0023c8c7, // n0x184d c0x0000 (---------------) + I sogndal + 0x002af145, // n0x184e c0x0000 (---------------) + I sogne + 0x002e2f47, // n0x184f c0x0000 (---------------) + I sokndal + 0x00359f04, // n0x1850 c0x0000 (---------------) + I sola + 0x002fb2c6, // n0x1851 c0x0000 (---------------) + I solund + 0x0037de85, // n0x1852 c0x0000 (---------------) + I somna + 0x0022be8b, // n0x1853 c0x0000 (---------------) + I sondre-land + 0x00356149, // n0x1854 c0x0000 (---------------) + I songdalen + 0x00378c8a, // n0x1855 c0x0000 (---------------) + I sor-aurdal + 0x00277a08, // n0x1856 c0x0000 (---------------) + I sor-fron + 0x002f43c8, // n0x1857 c0x0000 (---------------) + I sor-odal + 0x002f69cc, // n0x1858 c0x0000 (---------------) + I sor-varanger + 0x002fbfc7, // n0x1859 c0x0000 (---------------) + I sorfold + 0x002fd8c8, // n0x185a c0x0000 (---------------) + I sorreisa + 0x00301f88, // n0x185b c0x0000 (---------------) + I sortland + 0x003057c5, // n0x185c c0x0000 (---------------) + I sorum + 0x002bd24a, // n0x185d c0x0000 (---------------) + I spjelkavik + 0x003347c9, // n0x185e c0x0000 (---------------) + I spydeberg + 0x41e02742, // n0x185f c0x0107 (n0x1983-n0x1984) + I st + 0x00202746, // n0x1860 c0x0000 (---------------) + I stange + 0x0020f204, // n0x1861 c0x0000 (---------------) + I stat + 0x002ded49, // n0x1862 c0x0000 (---------------) + I stathelle + 0x002f35c9, // n0x1863 c0x0000 (---------------) + I stavanger + 0x00225047, // n0x1864 c0x0000 (---------------) + I stavern + 0x00251a47, // n0x1865 c0x0000 (---------------) + I steigen + 0x002833c9, // n0x1866 c0x0000 (---------------) + I steinkjer + 0x0038e808, // n0x1867 c0x0000 (---------------) + I stjordal + 0x0038e80f, // n0x1868 c0x0000 (---------------) + I stjordalshalsen + 0x00275e46, // n0x1869 c0x0000 (---------------) + I stokke + 0x0024688b, // n0x186a c0x0000 (---------------) + I stor-elvdal + 0x0037d0c5, // n0x186b c0x0000 (---------------) + I stord + 0x0037d0c7, // n0x186c c0x0000 (---------------) + I stordal + 0x002e6f89, // n0x186d c0x0000 (---------------) + I storfjord + 0x00228b86, // n0x186e c0x0000 (---------------) + I strand + 0x00228b87, // n0x186f c0x0000 (---------------) + I stranda + 0x0039e5c5, // n0x1870 c0x0000 (---------------) + I stryn + 0x00237304, // n0x1871 c0x0000 (---------------) + I sula + 0x002402c6, // n0x1872 c0x0000 (---------------) + I suldal + 0x00205084, // n0x1873 c0x0000 (---------------) + I sund + 0x0030a587, // n0x1874 c0x0000 (---------------) + I sunndal + 0x002e8fc8, // n0x1875 c0x0000 (---------------) + I surnadal + 0x422ef688, // n0x1876 c0x0108 (n0x1984-n0x1985) + I svalbard + 0x002efc85, // n0x1877 c0x0000 (---------------) + I sveio + 0x002efdc7, // n0x1878 c0x0000 (---------------) + I svelvik + 0x00375cc9, // n0x1879 c0x0000 (---------------) + I sykkylven + 0x00206cc4, // n0x187a c0x0000 (---------------) + I tana + 0x00206cc8, // n0x187b c0x0000 (---------------) + I tananger + 0x42664f08, // n0x187c c0x0109 (n0x1985-n0x1987) o I telemark + 0x00269804, // n0x187d c0x0000 (---------------) + I time + 0x00237e88, // n0x187e c0x0000 (---------------) + I tingvoll + 0x002ea3c4, // n0x187f c0x0000 (---------------) + I tinn + 0x00226789, // n0x1880 c0x0000 (---------------) + I tjeldsund + 0x0026cec5, // n0x1881 c0x0000 (---------------) + I tjome + 0x42a00142, // n0x1882 c0x010a (n0x1987-n0x1988) + I tm + 0x00275e85, // n0x1883 c0x0000 (---------------) + I tokke + 0x0021dc85, // n0x1884 c0x0000 (---------------) + I tolga + 0x003661c8, // n0x1885 c0x0000 (---------------) + I tonsberg + 0x0023b587, // n0x1886 c0x0000 (---------------) + I torsken + 0x42e03002, // n0x1887 c0x010b (n0x1988-n0x1989) + I tr + 0x002c9d45, // n0x1888 c0x0000 (---------------) + I trana + 0x00274dc6, // n0x1889 c0x0000 (---------------) + I tranby + 0x00292546, // n0x188a c0x0000 (---------------) + I tranoy + 0x002fab48, // n0x188b c0x0000 (---------------) + I troandin + 0x002ffbc8, // n0x188c c0x0000 (---------------) + I trogstad + 0x00318506, // n0x188d c0x0000 (---------------) + I tromsa + 0x00323bc6, // n0x188e c0x0000 (---------------) + I tromso + 0x00352589, // n0x188f c0x0000 (---------------) + I trondheim + 0x00341ec6, // n0x1890 c0x0000 (---------------) + I trysil + 0x00356ccb, // n0x1891 c0x0000 (---------------) + I tvedestrand + 0x0024f6c5, // n0x1892 c0x0000 (---------------) + I tydal + 0x00219b46, // n0x1893 c0x0000 (---------------) + I tynset + 0x00215a48, // n0x1894 c0x0000 (---------------) + I tysfjord + 0x002336c6, // n0x1895 c0x0000 (---------------) + I tysnes + 0x00235ec6, // n0x1896 c0x0000 (---------------) + I tysvar + 0x0021518a, // n0x1897 c0x0000 (---------------) + I ullensaker + 0x0034440a, // n0x1898 c0x0000 (---------------) + I ullensvang + 0x0028acc5, // n0x1899 c0x0000 (---------------) + I ulvik + 0x002c7a87, // n0x189a c0x0000 (---------------) + I unjarga + 0x00341946, // n0x189b c0x0000 (---------------) + I utsira + 0x432000c2, // n0x189c c0x010c (n0x1989-n0x198a) + I va + 0x00227b07, // n0x189d c0x0000 (---------------) + I vaapste + 0x00274745, // n0x189e c0x0000 (---------------) + I vadso + 0x00322504, // n0x189f c0x0000 (---------------) + I vaga + 0x00322505, // n0x18a0 c0x0000 (---------------) + I vagan + 0x003172c6, // n0x18a1 c0x0000 (---------------) + I vagsoy + 0x0032cec7, // n0x18a2 c0x0000 (---------------) + I vaksdal + 0x00217105, // n0x18a3 c0x0000 (---------------) + I valle + 0x0021a004, // n0x18a4 c0x0000 (---------------) + I vang + 0x0026f108, // n0x18a5 c0x0000 (---------------) + I vanylven + 0x00235f85, // n0x18a6 c0x0000 (---------------) + I vardo + 0x002923c7, // n0x18a7 c0x0000 (---------------) + I varggat + 0x002f0545, // n0x18a8 c0x0000 (---------------) + I varoy + 0x002140c5, // n0x18a9 c0x0000 (---------------) + I vefsn + 0x00230284, // n0x18aa c0x0000 (---------------) + I vega + 0x0028a309, // n0x18ab c0x0000 (---------------) + I vegarshei + 0x002e29c8, // n0x18ac c0x0000 (---------------) + I vennesla + 0x00374106, // n0x18ad c0x0000 (---------------) + I verdal + 0x0033f346, // n0x18ae c0x0000 (---------------) + I verran + 0x00269506, // n0x18af c0x0000 (---------------) + I vestby + 0x4379aa48, // n0x18b0 c0x010d (n0x198a-n0x198b) o I vestfold + 0x002f3807, // n0x18b1 c0x0000 (---------------) + I vestnes + 0x002f3e8d, // n0x18b2 c0x0000 (---------------) + I vestre-slidre + 0x002f45cc, // n0x18b3 c0x0000 (---------------) + I vestre-toten + 0x002f4bc9, // n0x18b4 c0x0000 (---------------) + I vestvagoy + 0x002f4e09, // n0x18b5 c0x0000 (---------------) + I vevelstad + 0x43b4f482, // n0x18b6 c0x010e (n0x198b-n0x198c) + I vf + 0x00399d43, // n0x18b7 c0x0000 (---------------) + I vgs + 0x00205d43, // n0x18b8 c0x0000 (---------------) + I vik + 0x00356385, // n0x18b9 c0x0000 (---------------) + I vikna + 0x00384f4a, // n0x18ba c0x0000 (---------------) + I vindafjord + 0x003183c6, // n0x18bb c0x0000 (---------------) + I voagat + 0x002f9845, // n0x18bc c0x0000 (---------------) + I volda + 0x002fd204, // n0x18bd c0x0000 (---------------) + I voss + 0x002fd20b, // n0x18be c0x0000 (---------------) + I vossevangen + 0x0030d90c, // n0x18bf c0x0000 (---------------) + I xn--andy-ira + 0x0030e14c, // n0x18c0 c0x0000 (---------------) + I xn--asky-ira + 0x0030e455, // n0x18c1 c0x0000 (---------------) + I xn--aurskog-hland-jnb + 0x003104cd, // n0x18c2 c0x0000 (---------------) + I xn--avery-yua + 0x003128cf, // n0x18c3 c0x0000 (---------------) + I xn--bdddj-mrabd + 0x00312c92, // n0x18c4 c0x0000 (---------------) + I xn--bearalvhki-y4a + 0x0031310f, // n0x18c5 c0x0000 (---------------) + I xn--berlevg-jxa + 0x003134d2, // n0x18c6 c0x0000 (---------------) + I xn--bhcavuotna-s4a + 0x00313953, // n0x18c7 c0x0000 (---------------) + I xn--bhccavuotna-k7a + 0x00313e0d, // n0x18c8 c0x0000 (---------------) + I xn--bidr-5nac + 0x003143cd, // n0x18c9 c0x0000 (---------------) + I xn--bievt-0qa + 0x0031474e, // n0x18ca c0x0000 (---------------) + I xn--bjarky-fya + 0x00314c0e, // n0x18cb c0x0000 (---------------) + I xn--bjddar-pta + 0x0031534c, // n0x18cc c0x0000 (---------------) + I xn--blt-elab + 0x003156cc, // n0x18cd c0x0000 (---------------) + I xn--bmlo-gra + 0x00315b0b, // n0x18ce c0x0000 (---------------) + I xn--bod-2na + 0x00315e8e, // n0x18cf c0x0000 (---------------) + I xn--brnny-wuac + 0x003178d2, // n0x18d0 c0x0000 (---------------) + I xn--brnnysund-m8ac + 0x0031818c, // n0x18d1 c0x0000 (---------------) + I xn--brum-voa + 0x003188d0, // n0x18d2 c0x0000 (---------------) + I xn--btsfjord-9za + 0x00329d52, // n0x18d3 c0x0000 (---------------) + I xn--davvenjrga-y4a + 0x0032aacc, // n0x18d4 c0x0000 (---------------) + I xn--dnna-gra + 0x0032b18d, // n0x18d5 c0x0000 (---------------) + I xn--drbak-wua + 0x0032b4cc, // n0x18d6 c0x0000 (---------------) + I xn--dyry-ira + 0x0032d351, // n0x18d7 c0x0000 (---------------) + I xn--eveni-0qa01ga + 0x0032e1cd, // n0x18d8 c0x0000 (---------------) + I xn--finny-yua + 0x0033318d, // n0x18d9 c0x0000 (---------------) + I xn--fjord-lra + 0x0033378a, // n0x18da c0x0000 (---------------) + I xn--fl-zia + 0x00333a0c, // n0x18db c0x0000 (---------------) + I xn--flor-jra + 0x0033430c, // n0x18dc c0x0000 (---------------) + I xn--frde-gra + 0x00334a0c, // n0x18dd c0x0000 (---------------) + I xn--frna-woa + 0x0033528c, // n0x18de c0x0000 (---------------) + I xn--frya-hra + 0x00338bd3, // n0x18df c0x0000 (---------------) + I xn--ggaviika-8ya47h + 0x003391d0, // n0x18e0 c0x0000 (---------------) + I xn--gildeskl-g0a + 0x003395d0, // n0x18e1 c0x0000 (---------------) + I xn--givuotna-8ya + 0x0033a24d, // n0x18e2 c0x0000 (---------------) + I xn--gjvik-wua + 0x0033a84c, // n0x18e3 c0x0000 (---------------) + I xn--gls-elac + 0x0033b449, // n0x18e4 c0x0000 (---------------) + I xn--h-2fa + 0x0033da4d, // n0x18e5 c0x0000 (---------------) + I xn--hbmer-xqa + 0x0033dd93, // n0x18e6 c0x0000 (---------------) + I xn--hcesuolo-7ya35b + 0x0033e991, // n0x18e7 c0x0000 (---------------) + I xn--hgebostad-g3a + 0x0033edd3, // n0x18e8 c0x0000 (---------------) + I xn--hmmrfeasta-s4ac + 0x0033f58f, // n0x18e9 c0x0000 (---------------) + I xn--hnefoss-q1a + 0x0033f94c, // n0x18ea c0x0000 (---------------) + I xn--hobl-ira + 0x0033fc4f, // n0x18eb c0x0000 (---------------) + I xn--holtlen-hxa + 0x0034000d, // n0x18ec c0x0000 (---------------) + I xn--hpmir-xqa + 0x0034060f, // n0x18ed c0x0000 (---------------) + I xn--hyanger-q1a + 0x003409d0, // n0x18ee c0x0000 (---------------) + I xn--hylandet-54a + 0x0034144e, // n0x18ef c0x0000 (---------------) + I xn--indery-fya + 0x00346c4e, // n0x18f0 c0x0000 (---------------) + I xn--jlster-bya + 0x00347390, // n0x18f1 c0x0000 (---------------) + I xn--jrpeland-54a + 0x0034860d, // n0x18f2 c0x0000 (---------------) + I xn--karmy-yua + 0x00348f8e, // n0x18f3 c0x0000 (---------------) + I xn--kfjord-iua + 0x0034930c, // n0x18f4 c0x0000 (---------------) + I xn--klbu-woa + 0x0034a2d3, // n0x18f5 c0x0000 (---------------) + I xn--koluokta-7ya57h + 0x0034c68e, // n0x18f6 c0x0000 (---------------) + I xn--krager-gya + 0x0034da10, // n0x18f7 c0x0000 (---------------) + I xn--kranghke-b0a + 0x0034de11, // n0x18f8 c0x0000 (---------------) + I xn--krdsherad-m8a + 0x0034e24f, // n0x18f9 c0x0000 (---------------) + I xn--krehamn-dxa + 0x0034e613, // n0x18fa c0x0000 (---------------) + I xn--krjohka-hwab49j + 0x0034f00d, // n0x18fb c0x0000 (---------------) + I xn--ksnes-uua + 0x0034f34f, // n0x18fc c0x0000 (---------------) + I xn--kvfjord-nxa + 0x0034f70e, // n0x18fd c0x0000 (---------------) + I xn--kvitsy-fya + 0x0034fe50, // n0x18fe c0x0000 (---------------) + I xn--kvnangen-k0a + 0x00350249, // n0x18ff c0x0000 (---------------) + I xn--l-1fa + 0x00353090, // n0x1900 c0x0000 (---------------) + I xn--laheadju-7ya + 0x003536cf, // n0x1901 c0x0000 (---------------) + I xn--langevg-jxa + 0x00353d4f, // n0x1902 c0x0000 (---------------) + I xn--ldingen-q1a + 0x00354112, // n0x1903 c0x0000 (---------------) + I xn--leagaviika-52b + 0x00357c8e, // n0x1904 c0x0000 (---------------) + I xn--lesund-hua + 0x0035858d, // n0x1905 c0x0000 (---------------) + I xn--lgrd-poac + 0x0035930d, // n0x1906 c0x0000 (---------------) + I xn--lhppi-xqa + 0x0035964d, // n0x1907 c0x0000 (---------------) + I xn--linds-pra + 0x0035aa0d, // n0x1908 c0x0000 (---------------) + I xn--loabt-0qa + 0x0035ad4d, // n0x1909 c0x0000 (---------------) + I xn--lrdal-sra + 0x0035b090, // n0x190a c0x0000 (---------------) + I xn--lrenskog-54a + 0x0035b48b, // n0x190b c0x0000 (---------------) + I xn--lt-liac + 0x0035ba4c, // n0x190c c0x0000 (---------------) + I xn--lten-gra + 0x0035bdcc, // n0x190d c0x0000 (---------------) + I xn--lury-ira + 0x0035c0cc, // n0x190e c0x0000 (---------------) + I xn--mely-ira + 0x0035c3ce, // n0x190f c0x0000 (---------------) + I xn--merker-kua + 0x00366c50, // n0x1910 c0x0000 (---------------) + I xn--mjndalen-64a + 0x00368512, // n0x1911 c0x0000 (---------------) + I xn--mlatvuopmi-s4a + 0x0036898b, // n0x1912 c0x0000 (---------------) + I xn--mli-tla + 0x0036940e, // n0x1913 c0x0000 (---------------) + I xn--mlselv-iua + 0x0036978e, // n0x1914 c0x0000 (---------------) + I xn--moreke-jua + 0x0036a48e, // n0x1915 c0x0000 (---------------) + I xn--mosjen-eya + 0x0036c0cb, // n0x1916 c0x0000 (---------------) + I xn--mot-tla + 0x43f6c696, // n0x1917 c0x010f (n0x198c-n0x198e) o I xn--mre-og-romsdal-qqb + 0x0036d0cd, // n0x1918 c0x0000 (---------------) + I xn--msy-ula0h + 0x0036e814, // n0x1919 c0x0000 (---------------) + I xn--mtta-vrjjat-k7af + 0x0036f70d, // n0x191a c0x0000 (---------------) + I xn--muost-0qa + 0x00371c95, // n0x191b c0x0000 (---------------) + I xn--nmesjevuemie-tcba + 0x0037308d, // n0x191c c0x0000 (---------------) + I xn--nry-yla5g + 0x00373a0f, // n0x191d c0x0000 (---------------) + I xn--nttery-byae + 0x0037428f, // n0x191e c0x0000 (---------------) + I xn--nvuotna-hwa + 0x0037750f, // n0x191f c0x0000 (---------------) + I xn--oppegrd-ixa + 0x003778ce, // n0x1920 c0x0000 (---------------) + I xn--ostery-fya + 0x0037900d, // n0x1921 c0x0000 (---------------) + I xn--osyro-wua + 0x0037a891, // n0x1922 c0x0000 (---------------) + I xn--porsgu-sta26f + 0x0037eecc, // n0x1923 c0x0000 (---------------) + I xn--rady-ira + 0x0037f1cc, // n0x1924 c0x0000 (---------------) + I xn--rdal-poa + 0x0037f4cb, // n0x1925 c0x0000 (---------------) + I xn--rde-ula + 0x0037fa8c, // n0x1926 c0x0000 (---------------) + I xn--rdy-0nab + 0x0037fe4f, // n0x1927 c0x0000 (---------------) + I xn--rennesy-v1a + 0x00380212, // n0x1928 c0x0000 (---------------) + I xn--rhkkervju-01af + 0x00380bcd, // n0x1929 c0x0000 (---------------) + I xn--rholt-mra + 0x00381b8c, // n0x192a c0x0000 (---------------) + I xn--risa-5na + 0x0038200c, // n0x192b c0x0000 (---------------) + I xn--risr-ira + 0x0038230d, // n0x192c c0x0000 (---------------) + I xn--rland-uua + 0x0038264f, // n0x192d c0x0000 (---------------) + I xn--rlingen-mxa + 0x00382a0e, // n0x192e c0x0000 (---------------) + I xn--rmskog-bya + 0x00384c0c, // n0x192f c0x0000 (---------------) + I xn--rros-gra + 0x003851cd, // n0x1930 c0x0000 (---------------) + I xn--rskog-uua + 0x0038550b, // n0x1931 c0x0000 (---------------) + I xn--rst-0na + 0x00385acc, // n0x1932 c0x0000 (---------------) + I xn--rsta-fra + 0x0038604d, // n0x1933 c0x0000 (---------------) + I xn--ryken-vua + 0x0038638e, // n0x1934 c0x0000 (---------------) + I xn--ryrvik-bya + 0x00386809, // n0x1935 c0x0000 (---------------) + I xn--s-1fa + 0x00387513, // n0x1936 c0x0000 (---------------) + I xn--sandnessjen-ogb + 0x00387f8d, // n0x1937 c0x0000 (---------------) + I xn--sandy-yua + 0x003882cd, // n0x1938 c0x0000 (---------------) + I xn--seral-lra + 0x003888cc, // n0x1939 c0x0000 (---------------) + I xn--sgne-gra + 0x00388d4e, // n0x193a c0x0000 (---------------) + I xn--skierv-uta + 0x00389bcf, // n0x193b c0x0000 (---------------) + I xn--skjervy-v1a + 0x00389f8c, // n0x193c c0x0000 (---------------) + I xn--skjk-soa + 0x0038a28d, // n0x193d c0x0000 (---------------) + I xn--sknit-yqa + 0x0038a5cf, // n0x193e c0x0000 (---------------) + I xn--sknland-fxa + 0x0038a98c, // n0x193f c0x0000 (---------------) + I xn--slat-5na + 0x0038b08c, // n0x1940 c0x0000 (---------------) + I xn--slt-elab + 0x0038b44c, // n0x1941 c0x0000 (---------------) + I xn--smla-hra + 0x0038b74c, // n0x1942 c0x0000 (---------------) + I xn--smna-gra + 0x0038be0d, // n0x1943 c0x0000 (---------------) + I xn--snase-nra + 0x0038c152, // n0x1944 c0x0000 (---------------) + I xn--sndre-land-0cb + 0x0038c7cc, // n0x1945 c0x0000 (---------------) + I xn--snes-poa + 0x0038cacc, // n0x1946 c0x0000 (---------------) + I xn--snsa-roa + 0x0038cdd1, // n0x1947 c0x0000 (---------------) + I xn--sr-aurdal-l8a + 0x0038d20f, // n0x1948 c0x0000 (---------------) + I xn--sr-fron-q1a + 0x0038d5cf, // n0x1949 c0x0000 (---------------) + I xn--sr-odal-q1a + 0x0038d993, // n0x194a c0x0000 (---------------) + I xn--sr-varanger-ggb + 0x0038ee4e, // n0x194b c0x0000 (---------------) + I xn--srfold-bya + 0x0038f3cf, // n0x194c c0x0000 (---------------) + I xn--srreisa-q1a + 0x0038f78c, // n0x194d c0x0000 (---------------) + I xn--srum-gra + 0x4438face, // n0x194e c0x0110 (n0x198e-n0x198f) o I xn--stfold-9xa + 0x0038fe4f, // n0x194f c0x0000 (---------------) + I xn--stjrdal-s1a + 0x00390216, // n0x1950 c0x0000 (---------------) + I xn--stjrdalshalsen-sqb + 0x00390d12, // n0x1951 c0x0000 (---------------) + I xn--stre-toten-zcb + 0x0039230c, // n0x1952 c0x0000 (---------------) + I xn--tjme-hra + 0x00392acf, // n0x1953 c0x0000 (---------------) + I xn--tnsberg-q1a + 0x0039314d, // n0x1954 c0x0000 (---------------) + I xn--trany-yua + 0x0039348f, // n0x1955 c0x0000 (---------------) + I xn--trgstad-r1a + 0x0039384c, // n0x1956 c0x0000 (---------------) + I xn--trna-woa + 0x00393b4d, // n0x1957 c0x0000 (---------------) + I xn--troms-zua + 0x00393e8d, // n0x1958 c0x0000 (---------------) + I xn--tysvr-vra + 0x0039570e, // n0x1959 c0x0000 (---------------) + I xn--unjrga-rta + 0x00396a8c, // n0x195a c0x0000 (---------------) + I xn--vads-jra + 0x00396d8c, // n0x195b c0x0000 (---------------) + I xn--vard-jra + 0x00397090, // n0x195c c0x0000 (---------------) + I xn--vegrshei-c0a + 0x003991d1, // n0x195d c0x0000 (---------------) + I xn--vestvgy-ixa6o + 0x0039960b, // n0x195e c0x0000 (---------------) + I xn--vg-yiab + 0x0039994c, // n0x195f c0x0000 (---------------) + I xn--vgan-qoa + 0x00399c4e, // n0x1960 c0x0000 (---------------) + I xn--vgsy-qoa0j + 0x0039af51, // n0x1961 c0x0000 (---------------) + I xn--vre-eiker-k8a + 0x0039b38e, // n0x1962 c0x0000 (---------------) + I xn--vrggt-xqad + 0x0039b70d, // n0x1963 c0x0000 (---------------) + I xn--vry-yla5g + 0x003a278b, // n0x1964 c0x0000 (---------------) + I xn--yer-zna + 0x003a308f, // n0x1965 c0x0000 (---------------) + I xn--ygarden-p1a + 0x003a4814, // n0x1966 c0x0000 (---------------) + I xn--ystre-slidre-ujb + 0x0023a242, // n0x1967 c0x0000 (---------------) + I gs + 0x0023a242, // n0x1968 c0x0000 (---------------) + I gs + 0x00202c03, // n0x1969 c0x0000 (---------------) + I nes + 0x0023a242, // n0x196a c0x0000 (---------------) + I gs + 0x00202c03, // n0x196b c0x0000 (---------------) + I nes + 0x0023a242, // n0x196c c0x0000 (---------------) + I gs + 0x0020a802, // n0x196d c0x0000 (---------------) + I os + 0x003635c5, // n0x196e c0x0000 (---------------) + I valer + 0x0039ac4c, // n0x196f c0x0000 (---------------) + I xn--vler-qoa + 0x0023a242, // n0x1970 c0x0000 (---------------) + I gs + 0x0023a242, // n0x1971 c0x0000 (---------------) + I gs + 0x0020a802, // n0x1972 c0x0000 (---------------) + I os + 0x0023a242, // n0x1973 c0x0000 (---------------) + I gs + 0x002921c5, // n0x1974 c0x0000 (---------------) + I heroy + 0x003301c5, // n0x1975 c0x0000 (---------------) + I sande + 0x0023a242, // n0x1976 c0x0000 (---------------) + I gs + 0x0023a242, // n0x1977 c0x0000 (---------------) + I gs + 0x0020e402, // n0x1978 c0x0000 (---------------) + I bo + 0x002921c5, // n0x1979 c0x0000 (---------------) + I heroy + 0x00310a09, // n0x197a c0x0000 (---------------) + I xn--b-5ga + 0x0033e68c, // n0x197b c0x0000 (---------------) + I xn--hery-ira + 0x0023a242, // n0x197c c0x0000 (---------------) + I gs + 0x0023a242, // n0x197d c0x0000 (---------------) + I gs + 0x0023a242, // n0x197e c0x0000 (---------------) + I gs + 0x0023a242, // n0x197f c0x0000 (---------------) + I gs + 0x003635c5, // n0x1980 c0x0000 (---------------) + I valer + 0x0023a242, // n0x1981 c0x0000 (---------------) + I gs + 0x0023a242, // n0x1982 c0x0000 (---------------) + I gs + 0x0023a242, // n0x1983 c0x0000 (---------------) + I gs + 0x0023a242, // n0x1984 c0x0000 (---------------) + I gs + 0x0020e402, // n0x1985 c0x0000 (---------------) + I bo + 0x00310a09, // n0x1986 c0x0000 (---------------) + I xn--b-5ga + 0x0023a242, // n0x1987 c0x0000 (---------------) + I gs + 0x0023a242, // n0x1988 c0x0000 (---------------) + I gs + 0x0023a242, // n0x1989 c0x0000 (---------------) + I gs + 0x003301c5, // n0x198a c0x0000 (---------------) + I sande + 0x0023a242, // n0x198b c0x0000 (---------------) + I gs + 0x003301c5, // n0x198c c0x0000 (---------------) + I sande + 0x0033e68c, // n0x198d c0x0000 (---------------) + I xn--hery-ira + 0x0039ac4c, // n0x198e c0x0000 (---------------) + I xn--vler-qoa + 0x00330b83, // n0x198f c0x0000 (---------------) + I biz + 0x00233503, // n0x1990 c0x0000 (---------------) + I com + 0x0023a783, // n0x1991 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1992 c0x0000 (---------------) + I gov + 0x003a1244, // n0x1993 c0x0000 (---------------) + I info + 0x0021fe03, // n0x1994 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1995 c0x0000 (---------------) + I org + 0x000ecf48, // n0x1996 c0x0000 (---------------) + merseine + 0x000a9304, // n0x1997 c0x0000 (---------------) + mine + 0x000feb48, // n0x1998 c0x0000 (---------------) + shacknet + 0x00201542, // n0x1999 c0x0000 (---------------) + I ac + 0x45200742, // n0x199a c0x0114 (n0x19a9-n0x19aa) + I co + 0x00245b43, // n0x199b c0x0000 (---------------) + I cri + 0x0024eac4, // n0x199c c0x0000 (---------------) + I geek + 0x00205843, // n0x199d c0x0000 (---------------) + I gen + 0x00341e04, // n0x199e c0x0000 (---------------) + I govt + 0x0036b386, // n0x199f c0x0000 (---------------) + I health + 0x0020cec3, // n0x19a0 c0x0000 (---------------) + I iwi + 0x002eed84, // n0x19a1 c0x0000 (---------------) + I kiwi + 0x002703c5, // n0x19a2 c0x0000 (---------------) + I maori + 0x00209003, // n0x19a3 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x19a4 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x19a5 c0x0000 (---------------) + I org + 0x0028440a, // n0x19a6 c0x0000 (---------------) + I parliament + 0x0023d0c6, // n0x19a7 c0x0000 (---------------) + I school + 0x00369b0c, // n0x19a8 c0x0000 (---------------) + I xn--mori-qsa + 0x000ffa08, // n0x19a9 c0x0000 (---------------) + blogspot + 0x00200742, // n0x19aa c0x0000 (---------------) + I co + 0x00233503, // n0x19ab c0x0000 (---------------) + I com + 0x0023a783, // n0x19ac c0x0000 (---------------) + I edu + 0x0026cc83, // n0x19ad c0x0000 (---------------) + I gov + 0x00213ac3, // n0x19ae c0x0000 (---------------) + I med + 0x002d0106, // n0x19af c0x0000 (---------------) + I museum + 0x0021fe03, // n0x19b0 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x19b1 c0x0000 (---------------) + I org + 0x00220e43, // n0x19b2 c0x0000 (---------------) + I pro + 0x000035c2, // n0x19b3 c0x0000 (---------------) + ae + 0x000fb147, // n0x19b4 c0x0000 (---------------) + blogdns + 0x000d3588, // n0x19b5 c0x0000 (---------------) + blogsite + 0x0000e74e, // n0x19b6 c0x0000 (---------------) + bmoattachments + 0x000878d2, // n0x19b7 c0x0000 (---------------) + boldlygoingnowhere + 0x45e3e2c5, // n0x19b8 c0x0117 (n0x19f0-n0x19f2) o I cdn77 + 0x4631c4cc, // n0x19b9 c0x0118 (n0x19f2-n0x19f3) o I cdn77-secure + 0x0014c048, // n0x19ba c0x0000 (---------------) + dnsalias + 0x0007c9c7, // n0x19bb c0x0000 (---------------) + dnsdojo + 0x00014e0b, // n0x19bc c0x0000 (---------------) + doesntexist + 0x0016a009, // n0x19bd c0x0000 (---------------) + dontexist + 0x0014bf47, // n0x19be c0x0000 (---------------) + doomdns + 0x0008cdc7, // n0x19bf c0x0000 (---------------) + dsmynas + 0x0007c8c7, // n0x19c0 c0x0000 (---------------) + duckdns + 0x00011046, // n0x19c1 c0x0000 (---------------) + dvrdns + 0x00197b48, // n0x19c2 c0x0000 (---------------) + dynalias + 0x46813886, // n0x19c3 c0x011a (n0x19f4-n0x19f6) + dyndns + 0x000a950d, // n0x19c4 c0x0000 (---------------) + endofinternet + 0x0005ea10, // n0x19c5 c0x0000 (---------------) + endoftheinternet + 0x46c04b82, // n0x19c6 c0x011b (n0x19f6-n0x1a2d) + eu + 0x00008f88, // n0x19c7 c0x0000 (---------------) + familyds + 0x0006ac47, // n0x19c8 c0x0000 (---------------) + from-me + 0x00097cc9, // n0x19c9 c0x0000 (---------------) + game-host + 0x00057d06, // n0x19ca c0x0000 (---------------) + gotdns + 0x0000a882, // n0x19cb c0x0000 (---------------) + hk + 0x0014cfca, // n0x19cc c0x0000 (---------------) + hobby-site + 0x00013a47, // n0x19cd c0x0000 (---------------) + homedns + 0x00146347, // n0x19ce c0x0000 (---------------) + homeftp + 0x000a59c9, // n0x19cf c0x0000 (---------------) + homelinux + 0x000a6fc8, // n0x19d0 c0x0000 (---------------) + homeunix + 0x000e0c0e, // n0x19d1 c0x0000 (---------------) + is-a-bruinsfan + 0x0000c54e, // n0x19d2 c0x0000 (---------------) + is-a-candidate + 0x00011a0f, // n0x19d3 c0x0000 (---------------) + is-a-celticsfan + 0x00012789, // n0x19d4 c0x0000 (---------------) + is-a-chef + 0x0004e989, // n0x19d5 c0x0000 (---------------) + is-a-geek + 0x000704cb, // n0x19d6 c0x0000 (---------------) + is-a-knight + 0x0007fa4f, // n0x19d7 c0x0000 (---------------) + is-a-linux-user + 0x0008a50c, // n0x19d8 c0x0000 (---------------) + is-a-patsfan + 0x000ab58b, // n0x19d9 c0x0000 (---------------) + is-a-soxfan + 0x000b9e88, // n0x19da c0x0000 (---------------) + is-found + 0x000d9587, // n0x19db c0x0000 (---------------) + is-lost + 0x000fe008, // n0x19dc c0x0000 (---------------) + is-saved + 0x000f218b, // n0x19dd c0x0000 (---------------) + is-very-bad + 0x000f8d0c, // n0x19de c0x0000 (---------------) + is-very-evil + 0x0011b8cc, // n0x19df c0x0000 (---------------) + is-very-good + 0x0013aecc, // n0x19e0 c0x0000 (---------------) + is-very-nice + 0x00142a4d, // n0x19e1 c0x0000 (---------------) + is-very-sweet + 0x0008e588, // n0x19e2 c0x0000 (---------------) + isa-geek + 0x00150c09, // n0x19e3 c0x0000 (---------------) + kicks-ass + 0x001a24cb, // n0x19e4 c0x0000 (---------------) + misconfused + 0x000dcd47, // n0x19e5 c0x0000 (---------------) + podzone + 0x000d340a, // n0x19e6 c0x0000 (---------------) + readmyblog + 0x0006ba86, // n0x19e7 c0x0000 (---------------) + selfip + 0x00099a8d, // n0x19e8 c0x0000 (---------------) + sellsyourhome + 0x000cb7c8, // n0x19e9 c0x0000 (---------------) + servebbs + 0x000895c8, // n0x19ea c0x0000 (---------------) + serveftp + 0x00173dc9, // n0x19eb c0x0000 (---------------) + servegame + 0x000e868c, // n0x19ec c0x0000 (---------------) + stuff-4-sale + 0x00002382, // n0x19ed c0x0000 (---------------) + us + 0x000eadc6, // n0x19ee c0x0000 (---------------) + webhop + 0x00005f82, // n0x19ef c0x0000 (---------------) + za + 0x00000301, // n0x19f0 c0x0000 (---------------) + c + 0x0003cdc3, // n0x19f1 c0x0000 (---------------) + rsc + 0x46783486, // n0x19f2 c0x0119 (n0x19f3-n0x19f4) o I origin + 0x0003e443, // n0x19f3 c0x0000 (---------------) + ssl + 0x00002d42, // n0x19f4 c0x0000 (---------------) + go + 0x00013a44, // n0x19f5 c0x0000 (---------------) + home + 0x000001c2, // n0x19f6 c0x0000 (---------------) + al + 0x000d4884, // n0x19f7 c0x0000 (---------------) + asso + 0x00000102, // n0x19f8 c0x0000 (---------------) + at + 0x00004f82, // n0x19f9 c0x0000 (---------------) + au + 0x00003302, // n0x19fa c0x0000 (---------------) + be + 0x000ee482, // n0x19fb c0x0000 (---------------) + bg + 0x00000302, // n0x19fc c0x0000 (---------------) + ca + 0x0003e2c2, // n0x19fd c0x0000 (---------------) + cd + 0x00001582, // n0x19fe c0x0000 (---------------) + ch + 0x0001ba42, // n0x19ff c0x0000 (---------------) + cn + 0x0003e082, // n0x1a00 c0x0000 (---------------) + cy + 0x00029ec2, // n0x1a01 c0x0000 (---------------) + cz + 0x00004d82, // n0x1a02 c0x0000 (---------------) + de + 0x000494c2, // n0x1a03 c0x0000 (---------------) + dk + 0x0003a783, // n0x1a04 c0x0000 (---------------) + edu + 0x0000b342, // n0x1a05 c0x0000 (---------------) + ee + 0x00000482, // n0x1a06 c0x0000 (---------------) + es + 0x00007502, // n0x1a07 c0x0000 (---------------) + fi + 0x00000582, // n0x1a08 c0x0000 (---------------) + fr + 0x00000c82, // n0x1a09 c0x0000 (---------------) + gr + 0x0000e4c2, // n0x1a0a c0x0000 (---------------) + hr + 0x000195c2, // n0x1a0b c0x0000 (---------------) + hu + 0x00000042, // n0x1a0c c0x0000 (---------------) + ie + 0x00002902, // n0x1a0d c0x0000 (---------------) + il + 0x000013c2, // n0x1a0e c0x0000 (---------------) + in + 0x00001603, // n0x1a0f c0x0000 (---------------) + int + 0x000006c2, // n0x1a10 c0x0000 (---------------) + is + 0x00001e42, // n0x1a11 c0x0000 (---------------) + it + 0x000ae3c2, // n0x1a12 c0x0000 (---------------) + jp + 0x00006fc2, // n0x1a13 c0x0000 (---------------) + kr + 0x00009e02, // n0x1a14 c0x0000 (---------------) + lt + 0x00002f42, // n0x1a15 c0x0000 (---------------) + lu + 0x00005d02, // n0x1a16 c0x0000 (---------------) + lv + 0x0002ac02, // n0x1a17 c0x0000 (---------------) + mc + 0x00003e82, // n0x1a18 c0x0000 (---------------) + me + 0x00167142, // n0x1a19 c0x0000 (---------------) + mk + 0x00004c02, // n0x1a1a c0x0000 (---------------) + mt + 0x00026f02, // n0x1a1b c0x0000 (---------------) + my + 0x0001fe03, // n0x1a1c c0x0000 (---------------) + net + 0x00002802, // n0x1a1d c0x0000 (---------------) + ng + 0x00047802, // n0x1a1e c0x0000 (---------------) + nl + 0x00000c02, // n0x1a1f c0x0000 (---------------) + no + 0x000094c2, // n0x1a20 c0x0000 (---------------) + nz + 0x00077905, // n0x1a21 c0x0000 (---------------) + paris + 0x000063c2, // n0x1a22 c0x0000 (---------------) + pl + 0x0008c9c2, // n0x1a23 c0x0000 (---------------) + pt + 0x00043b83, // n0x1a24 c0x0000 (---------------) + q-a + 0x00002202, // n0x1a25 c0x0000 (---------------) + ro + 0x00011302, // n0x1a26 c0x0000 (---------------) + ru + 0x000046c2, // n0x1a27 c0x0000 (---------------) + se + 0x0000a402, // n0x1a28 c0x0000 (---------------) + si + 0x00007842, // n0x1a29 c0x0000 (---------------) + sk + 0x00003002, // n0x1a2a c0x0000 (---------------) + tr + 0x00000f82, // n0x1a2b c0x0000 (---------------) + uk + 0x00002382, // n0x1a2c c0x0000 (---------------) + us + 0x00210f43, // n0x1a2d c0x0000 (---------------) + I abo + 0x00201542, // n0x1a2e c0x0000 (---------------) + I ac + 0x00233503, // n0x1a2f c0x0000 (---------------) + I com + 0x0023a783, // n0x1a30 c0x0000 (---------------) + I edu + 0x00213183, // n0x1a31 c0x0000 (---------------) + I gob + 0x0020e2c3, // n0x1a32 c0x0000 (---------------) + I ing + 0x00213ac3, // n0x1a33 c0x0000 (---------------) + I med + 0x0021fe03, // n0x1a34 c0x0000 (---------------) + I net + 0x00201483, // n0x1a35 c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x1a36 c0x0000 (---------------) + I org + 0x00292103, // n0x1a37 c0x0000 (---------------) + I sld + 0x000ffa08, // n0x1a38 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1a39 c0x0000 (---------------) + I com + 0x0023a783, // n0x1a3a c0x0000 (---------------) + I edu + 0x00213183, // n0x1a3b c0x0000 (---------------) + I gob + 0x00209003, // n0x1a3c c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1a3d c0x0000 (---------------) + I net + 0x00201483, // n0x1a3e c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x1a3f c0x0000 (---------------) + I org + 0x00233503, // n0x1a40 c0x0000 (---------------) + I com + 0x0023a783, // n0x1a41 c0x0000 (---------------) + I edu + 0x0022d1c3, // n0x1a42 c0x0000 (---------------) + I org + 0x00233503, // n0x1a43 c0x0000 (---------------) + I com + 0x0023a783, // n0x1a44 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1a45 c0x0000 (---------------) + I gov + 0x00200041, // n0x1a46 c0x0000 (---------------) + I i + 0x00209003, // n0x1a47 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1a48 c0x0000 (---------------) + I net + 0x00202d03, // n0x1a49 c0x0000 (---------------) + I ngo + 0x0022d1c3, // n0x1a4a c0x0000 (---------------) + I org + 0x00330b83, // n0x1a4b c0x0000 (---------------) + I biz + 0x00233503, // n0x1a4c c0x0000 (---------------) + I com + 0x0023a783, // n0x1a4d c0x0000 (---------------) + I edu + 0x00208f83, // n0x1a4e c0x0000 (---------------) + I fam + 0x00213183, // n0x1a4f c0x0000 (---------------) + I gob + 0x00337283, // n0x1a50 c0x0000 (---------------) + I gok + 0x00282e03, // n0x1a51 c0x0000 (---------------) + I gon + 0x002a36c3, // n0x1a52 c0x0000 (---------------) + I gop + 0x00276203, // n0x1a53 c0x0000 (---------------) + I gos + 0x0026cc83, // n0x1a54 c0x0000 (---------------) + I gov + 0x003a1244, // n0x1a55 c0x0000 (---------------) + I info + 0x0021fe03, // n0x1a56 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1a57 c0x0000 (---------------) + I org + 0x00221a03, // n0x1a58 c0x0000 (---------------) + I web + 0x002ebac4, // n0x1a59 c0x0000 (---------------) + I agro + 0x00253543, // n0x1a5a c0x0000 (---------------) + I aid + 0x000011c3, // n0x1a5b c0x0000 (---------------) + art + 0x00200103, // n0x1a5c c0x0000 (---------------) + I atm + 0x0024a188, // n0x1a5d c0x0000 (---------------) + I augustow + 0x00265104, // n0x1a5e c0x0000 (---------------) + I auto + 0x0022404a, // n0x1a5f c0x0000 (---------------) + I babia-gora + 0x00207a06, // n0x1a60 c0x0000 (---------------) + I bedzin + 0x00397a07, // n0x1a61 c0x0000 (---------------) + I beskidy + 0x0021e20a, // n0x1a62 c0x0000 (---------------) + I bialowieza + 0x00275d09, // n0x1a63 c0x0000 (---------------) + I bialystok + 0x0039dd47, // n0x1a64 c0x0000 (---------------) + I bielawa + 0x003a604a, // n0x1a65 c0x0000 (---------------) + I bieszczady + 0x00330b83, // n0x1a66 c0x0000 (---------------) + I biz + 0x003782cb, // n0x1a67 c0x0000 (---------------) + I boleslawiec + 0x002ce249, // n0x1a68 c0x0000 (---------------) + I bydgoszcz + 0x00269605, // n0x1a69 c0x0000 (---------------) + I bytom + 0x002cc107, // n0x1a6a c0x0000 (---------------) + I cieszyn + 0x00000742, // n0x1a6b c0x0000 (---------------) + co + 0x00233503, // n0x1a6c c0x0000 (---------------) + I com + 0x00229ec7, // n0x1a6d c0x0000 (---------------) + I czeladz + 0x003522c5, // n0x1a6e c0x0000 (---------------) + I czest + 0x002bb989, // n0x1a6f c0x0000 (---------------) + I dlugoleka + 0x0023a783, // n0x1a70 c0x0000 (---------------) + I edu + 0x00222c06, // n0x1a71 c0x0000 (---------------) + I elblag + 0x002baac3, // n0x1a72 c0x0000 (---------------) + I elk + 0x000cba03, // n0x1a73 c0x0000 (---------------) + gda + 0x000fc286, // n0x1a74 c0x0000 (---------------) + gdansk + 0x000ee0c6, // n0x1a75 c0x0000 (---------------) + gdynia + 0x0000ce47, // n0x1a76 c0x0000 (---------------) + gliwice + 0x00210886, // n0x1a77 c0x0000 (---------------) + I glogow + 0x00216405, // n0x1a78 c0x0000 (---------------) + I gmina + 0x00328747, // n0x1a79 c0x0000 (---------------) + I gniezno + 0x003350c7, // n0x1a7a c0x0000 (---------------) + I gorlice + 0x48a6cc83, // n0x1a7b c0x0122 (n0x1afe-n0x1b2d) + I gov + 0x0032ad07, // n0x1a7c c0x0000 (---------------) + I grajewo + 0x0035e383, // n0x1a7d c0x0000 (---------------) + I gsm + 0x003205c5, // n0x1a7e c0x0000 (---------------) + I ilawa + 0x003a1244, // n0x1a7f c0x0000 (---------------) + I info + 0x0036f508, // n0x1a80 c0x0000 (---------------) + I jaworzno + 0x002ad88c, // n0x1a81 c0x0000 (---------------) + I jelenia-gora + 0x002ac505, // n0x1a82 c0x0000 (---------------) + I jgora + 0x00345486, // n0x1a83 c0x0000 (---------------) + I kalisz + 0x00229d87, // n0x1a84 c0x0000 (---------------) + I karpacz + 0x0038e2c7, // n0x1a85 c0x0000 (---------------) + I kartuzy + 0x0020f847, // n0x1a86 c0x0000 (---------------) + I kaszuby + 0x00217ac8, // n0x1a87 c0x0000 (---------------) + I katowice + 0x002acd4f, // n0x1a88 c0x0000 (---------------) + I kazimierz-dolny + 0x00249345, // n0x1a89 c0x0000 (---------------) + I kepno + 0x00245c47, // n0x1a8a c0x0000 (---------------) + I ketrzyn + 0x0039c507, // n0x1a8b c0x0000 (---------------) + I klodzko + 0x002a4d8a, // n0x1a8c c0x0000 (---------------) + I kobierzyce + 0x0028c749, // n0x1a8d c0x0000 (---------------) + I kolobrzeg + 0x002c8d85, // n0x1a8e c0x0000 (---------------) + I konin + 0x002c970a, // n0x1a8f c0x0000 (---------------) + I konskowola + 0x000eac86, // n0x1a90 c0x0000 (---------------) + krakow + 0x002bab45, // n0x1a91 c0x0000 (---------------) + I kutno + 0x00368bc4, // n0x1a92 c0x0000 (---------------) + I lapy + 0x00271a06, // n0x1a93 c0x0000 (---------------) + I lebork + 0x0037c607, // n0x1a94 c0x0000 (---------------) + I legnica + 0x00251647, // n0x1a95 c0x0000 (---------------) + I lezajsk + 0x002551c8, // n0x1a96 c0x0000 (---------------) + I limanowa + 0x00214305, // n0x1a97 c0x0000 (---------------) + I lomza + 0x003521c6, // n0x1a98 c0x0000 (---------------) + I lowicz + 0x00398d05, // n0x1a99 c0x0000 (---------------) + I lubin + 0x003a42c5, // n0x1a9a c0x0000 (---------------) + I lukow + 0x0021b5c4, // n0x1a9b c0x0000 (---------------) + I mail + 0x002f2587, // n0x1a9c c0x0000 (---------------) + I malbork + 0x0030764a, // n0x1a9d c0x0000 (---------------) + I malopolska + 0x0020b848, // n0x1a9e c0x0000 (---------------) + I mazowsze + 0x002ee9c6, // n0x1a9f c0x0000 (---------------) + I mazury + 0x00013ac3, // n0x1aa0 c0x0000 (---------------) + med + 0x003025c5, // n0x1aa1 c0x0000 (---------------) + I media + 0x00232006, // n0x1aa2 c0x0000 (---------------) + I miasta + 0x003a5006, // n0x1aa3 c0x0000 (---------------) + I mielec + 0x00332046, // n0x1aa4 c0x0000 (---------------) + I mielno + 0x00209003, // n0x1aa5 c0x0000 (---------------) + I mil + 0x00380e47, // n0x1aa6 c0x0000 (---------------) + I mragowo + 0x0039c485, // n0x1aa7 c0x0000 (---------------) + I naklo + 0x0021fe03, // n0x1aa8 c0x0000 (---------------) + I net + 0x0039dfcd, // n0x1aa9 c0x0000 (---------------) + I nieruchomosci + 0x00201483, // n0x1aaa c0x0000 (---------------) + I nom + 0x002552c8, // n0x1aab c0x0000 (---------------) + I nowaruda + 0x002158c4, // n0x1aac c0x0000 (---------------) + I nysa + 0x00276d05, // n0x1aad c0x0000 (---------------) + I olawa + 0x002a4c86, // n0x1aae c0x0000 (---------------) + I olecko + 0x0023d406, // n0x1aaf c0x0000 (---------------) + I olkusz + 0x00219a47, // n0x1ab0 c0x0000 (---------------) + I olsztyn + 0x0023d707, // n0x1ab1 c0x0000 (---------------) + I opoczno + 0x00251585, // n0x1ab2 c0x0000 (---------------) + I opole + 0x0022d1c3, // n0x1ab3 c0x0000 (---------------) + I org + 0x00384947, // n0x1ab4 c0x0000 (---------------) + I ostroda + 0x002c7689, // n0x1ab5 c0x0000 (---------------) + I ostroleka + 0x0020ad49, // n0x1ab6 c0x0000 (---------------) + I ostrowiec + 0x0020d58a, // n0x1ab7 c0x0000 (---------------) + I ostrowwlkp + 0x00248182, // n0x1ab8 c0x0000 (---------------) + I pc + 0x00320584, // n0x1ab9 c0x0000 (---------------) + I pila + 0x002d7404, // n0x1aba c0x0000 (---------------) + I pisz + 0x00219e47, // n0x1abb c0x0000 (---------------) + I podhale + 0x00243388, // n0x1abc c0x0000 (---------------) + I podlasie + 0x002de7c9, // n0x1abd c0x0000 (---------------) + I polkowice + 0x00209609, // n0x1abe c0x0000 (---------------) + I pomorskie + 0x002df1c7, // n0x1abf c0x0000 (---------------) + I pomorze + 0x00248e46, // n0x1ac0 c0x0000 (---------------) + I powiat + 0x000e0986, // n0x1ac1 c0x0000 (---------------) + poznan + 0x002e1c44, // n0x1ac2 c0x0000 (---------------) + I priv + 0x002e1dca, // n0x1ac3 c0x0000 (---------------) + I prochowice + 0x002e4548, // n0x1ac4 c0x0000 (---------------) + I pruszkow + 0x002e51c9, // n0x1ac5 c0x0000 (---------------) + I przeworsk + 0x00296686, // n0x1ac6 c0x0000 (---------------) + I pulawy + 0x00339e45, // n0x1ac7 c0x0000 (---------------) + I radom + 0x0020b708, // n0x1ac8 c0x0000 (---------------) + I rawa-maz + 0x002c238a, // n0x1ac9 c0x0000 (---------------) + I realestate + 0x00285b43, // n0x1aca c0x0000 (---------------) + I rel + 0x0033cf06, // n0x1acb c0x0000 (---------------) + I rybnik + 0x002df2c7, // n0x1acc c0x0000 (---------------) + I rzeszow + 0x0020f745, // n0x1acd c0x0000 (---------------) + I sanok + 0x002224c5, // n0x1ace c0x0000 (---------------) + I sejny + 0x00247603, // n0x1acf c0x0000 (---------------) + I sex + 0x00352004, // n0x1ad0 c0x0000 (---------------) + I shop + 0x00374ec5, // n0x1ad1 c0x0000 (---------------) + I sklep + 0x00284fc7, // n0x1ad2 c0x0000 (---------------) + I skoczow + 0x002e2b05, // n0x1ad3 c0x0000 (---------------) + I slask + 0x002d51c6, // n0x1ad4 c0x0000 (---------------) + I slupsk + 0x000f3985, // n0x1ad5 c0x0000 (---------------) + sopot + 0x0021f483, // n0x1ad6 c0x0000 (---------------) + I sos + 0x002b5889, // n0x1ad7 c0x0000 (---------------) + I sosnowiec + 0x00276acc, // n0x1ad8 c0x0000 (---------------) + I stalowa-wola + 0x002a11cc, // n0x1ad9 c0x0000 (---------------) + I starachowice + 0x002c92c8, // n0x1ada c0x0000 (---------------) + I stargard + 0x0027b447, // n0x1adb c0x0000 (---------------) + I suwalki + 0x002f0a08, // n0x1adc c0x0000 (---------------) + I swidnica + 0x002f100a, // n0x1add c0x0000 (---------------) + I swiebodzin + 0x002f198b, // n0x1ade c0x0000 (---------------) + I swinoujscie + 0x002ce388, // n0x1adf c0x0000 (---------------) + I szczecin + 0x00345588, // n0x1ae0 c0x0000 (---------------) + I szczytno + 0x00293346, // n0x1ae1 c0x0000 (---------------) + I szkola + 0x00357a85, // n0x1ae2 c0x0000 (---------------) + I targi + 0x00249c0a, // n0x1ae3 c0x0000 (---------------) + I tarnobrzeg + 0x00220b05, // n0x1ae4 c0x0000 (---------------) + I tgory + 0x00200142, // n0x1ae5 c0x0000 (---------------) + I tm + 0x002c0507, // n0x1ae6 c0x0000 (---------------) + I tourism + 0x0029bec6, // n0x1ae7 c0x0000 (---------------) + I travel + 0x00352bc5, // n0x1ae8 c0x0000 (---------------) + I turek + 0x0037da09, // n0x1ae9 c0x0000 (---------------) + I turystyka + 0x0031fd85, // n0x1aea c0x0000 (---------------) + I tychy + 0x00291985, // n0x1aeb c0x0000 (---------------) + I ustka + 0x00320189, // n0x1aec c0x0000 (---------------) + I walbrzych + 0x00231e86, // n0x1aed c0x0000 (---------------) + I warmia + 0x0023ee48, // n0x1aee c0x0000 (---------------) + I warszawa + 0x0025a843, // n0x1aef c0x0000 (---------------) + I waw + 0x0020fcc6, // n0x1af0 c0x0000 (---------------) + I wegrow + 0x00275246, // n0x1af1 c0x0000 (---------------) + I wielun + 0x002fff45, // n0x1af2 c0x0000 (---------------) + I wlocl + 0x002fff49, // n0x1af3 c0x0000 (---------------) + I wloclawek + 0x002b0d09, // n0x1af4 c0x0000 (---------------) + I wodzislaw + 0x0024e547, // n0x1af5 c0x0000 (---------------) + I wolomin + 0x000ffdc4, // n0x1af6 c0x0000 (---------------) + wroc + 0x002ffdc7, // n0x1af7 c0x0000 (---------------) + I wroclaw + 0x00209509, // n0x1af8 c0x0000 (---------------) + I zachpomor + 0x0021e405, // n0x1af9 c0x0000 (---------------) + I zagan + 0x00138408, // n0x1afa c0x0000 (---------------) + zakopane + 0x0032f305, // n0x1afb c0x0000 (---------------) + I zarow + 0x0021fec5, // n0x1afc c0x0000 (---------------) + I zgora + 0x0022df89, // n0x1afd c0x0000 (---------------) + I zgorzelec + 0x00200d02, // n0x1afe c0x0000 (---------------) + I ap + 0x00351ac4, // n0x1aff c0x0000 (---------------) + I griw + 0x00206902, // n0x1b00 c0x0000 (---------------) + I ic + 0x002006c2, // n0x1b01 c0x0000 (---------------) + I is + 0x002717c5, // n0x1b02 c0x0000 (---------------) + I kmpsp + 0x002cc908, // n0x1b03 c0x0000 (---------------) + I konsulat + 0x00371a05, // n0x1b04 c0x0000 (---------------) + I kppsp + 0x002bd183, // n0x1b05 c0x0000 (---------------) + I kwp + 0x002bd185, // n0x1b06 c0x0000 (---------------) + I kwpsp + 0x002cc4c3, // n0x1b07 c0x0000 (---------------) + I mup + 0x0020fc82, // n0x1b08 c0x0000 (---------------) + I mw + 0x00268444, // n0x1b09 c0x0000 (---------------) + I oirm + 0x002e6d03, // n0x1b0a c0x0000 (---------------) + I oum + 0x0020ac42, // n0x1b0b c0x0000 (---------------) + I pa + 0x002dd7c4, // n0x1b0c c0x0000 (---------------) + I pinb + 0x002d8443, // n0x1b0d c0x0000 (---------------) + I piw + 0x00200942, // n0x1b0e c0x0000 (---------------) + I po + 0x00209343, // n0x1b0f c0x0000 (---------------) + I psp + 0x0028be44, // n0x1b10 c0x0000 (---------------) + I psse + 0x002b4bc3, // n0x1b11 c0x0000 (---------------) + I pup + 0x00242684, // n0x1b12 c0x0000 (---------------) + I rzgw + 0x002004c2, // n0x1b13 c0x0000 (---------------) + I sa + 0x00271443, // n0x1b14 c0x0000 (---------------) + I sdn + 0x00216343, // n0x1b15 c0x0000 (---------------) + I sko + 0x00205682, // n0x1b16 c0x0000 (---------------) + I so + 0x0033b802, // n0x1b17 c0x0000 (---------------) + I sr + 0x002b0b49, // n0x1b18 c0x0000 (---------------) + I starostwo + 0x00201cc2, // n0x1b19 c0x0000 (---------------) + I ug + 0x00288444, // n0x1b1a c0x0000 (---------------) + I ugim + 0x00204bc2, // n0x1b1b c0x0000 (---------------) + I um + 0x0020b584, // n0x1b1c c0x0000 (---------------) + I umig + 0x00248e04, // n0x1b1d c0x0000 (---------------) + I upow + 0x002e3944, // n0x1b1e c0x0000 (---------------) + I uppo + 0x00202382, // n0x1b1f c0x0000 (---------------) + I us + 0x00243e82, // n0x1b20 c0x0000 (---------------) + I uw + 0x00211343, // n0x1b21 c0x0000 (---------------) + I uzs + 0x002f1603, // n0x1b22 c0x0000 (---------------) + I wif + 0x00245244, // n0x1b23 c0x0000 (---------------) + I wiih + 0x0025d1c4, // n0x1b24 c0x0000 (---------------) + I winb + 0x002c7604, // n0x1b25 c0x0000 (---------------) + I wios + 0x002c9604, // n0x1b26 c0x0000 (---------------) + I witd + 0x002ff443, // n0x1b27 c0x0000 (---------------) + I wiw + 0x002f65c3, // n0x1b28 c0x0000 (---------------) + I wsa + 0x002eac04, // n0x1b29 c0x0000 (---------------) + I wskr + 0x003009c4, // n0x1b2a c0x0000 (---------------) + I wuoz + 0x00300cc6, // n0x1b2b c0x0000 (---------------) + I wzmiuw + 0x00264682, // n0x1b2c c0x0000 (---------------) + I zp + 0x00200742, // n0x1b2d c0x0000 (---------------) + I co + 0x0023a783, // n0x1b2e c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1b2f c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1b30 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1b31 c0x0000 (---------------) + I org + 0x00201542, // n0x1b32 c0x0000 (---------------) + I ac + 0x00330b83, // n0x1b33 c0x0000 (---------------) + I biz + 0x00233503, // n0x1b34 c0x0000 (---------------) + I com + 0x0023a783, // n0x1b35 c0x0000 (---------------) + I edu + 0x00202703, // n0x1b36 c0x0000 (---------------) + I est + 0x0026cc83, // n0x1b37 c0x0000 (---------------) + I gov + 0x003a1244, // n0x1b38 c0x0000 (---------------) + I info + 0x002b0e04, // n0x1b39 c0x0000 (---------------) + I isla + 0x00205284, // n0x1b3a c0x0000 (---------------) + I name + 0x0021fe03, // n0x1b3b c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1b3c c0x0000 (---------------) + I org + 0x00220e43, // n0x1b3d c0x0000 (---------------) + I pro + 0x002e2484, // n0x1b3e c0x0000 (---------------) + I prof + 0x00355603, // n0x1b3f c0x0000 (---------------) + I aaa + 0x002b3e43, // n0x1b40 c0x0000 (---------------) + I aca + 0x0033f204, // n0x1b41 c0x0000 (---------------) + I acct + 0x0032f106, // n0x1b42 c0x0000 (---------------) + I avocat + 0x00202003, // n0x1b43 c0x0000 (---------------) + I bar + 0x00216d43, // n0x1b44 c0x0000 (---------------) + I cpa + 0x00213703, // n0x1b45 c0x0000 (---------------) + I eng + 0x002af443, // n0x1b46 c0x0000 (---------------) + I jur + 0x00274483, // n0x1b47 c0x0000 (---------------) + I law + 0x00213ac3, // n0x1b48 c0x0000 (---------------) + I med + 0x0022a5c5, // n0x1b49 c0x0000 (---------------) + I recht + 0x00233503, // n0x1b4a c0x0000 (---------------) + I com + 0x0023a783, // n0x1b4b c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1b4c c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1b4d c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1b4e c0x0000 (---------------) + I org + 0x002db8c3, // n0x1b4f c0x0000 (---------------) + I plo + 0x00235d43, // n0x1b50 c0x0000 (---------------) + I sec + 0x000ffa08, // n0x1b51 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1b52 c0x0000 (---------------) + I com + 0x0023a783, // n0x1b53 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1b54 c0x0000 (---------------) + I gov + 0x00201603, // n0x1b55 c0x0000 (---------------) + I int + 0x0021fe03, // n0x1b56 c0x0000 (---------------) + I net + 0x00242044, // n0x1b57 c0x0000 (---------------) + I nome + 0x0022d1c3, // n0x1b58 c0x0000 (---------------) + I org + 0x0029f744, // n0x1b59 c0x0000 (---------------) + I publ + 0x002b7cc5, // n0x1b5a c0x0000 (---------------) + I belau + 0x00200742, // n0x1b5b c0x0000 (---------------) + I co + 0x00202602, // n0x1b5c c0x0000 (---------------) + I ed + 0x00202d42, // n0x1b5d c0x0000 (---------------) + I go + 0x00202c02, // n0x1b5e c0x0000 (---------------) + I ne + 0x00200282, // n0x1b5f c0x0000 (---------------) + I or + 0x00233503, // n0x1b60 c0x0000 (---------------) + I com + 0x0023d684, // n0x1b61 c0x0000 (---------------) + I coop + 0x0023a783, // n0x1b62 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1b63 c0x0000 (---------------) + I gov + 0x00209003, // n0x1b64 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1b65 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1b66 c0x0000 (---------------) + I org + 0x000ffa08, // n0x1b67 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1b68 c0x0000 (---------------) + I com + 0x0023a783, // n0x1b69 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1b6a c0x0000 (---------------) + I gov + 0x00209003, // n0x1b6b c0x0000 (---------------) + I mil + 0x00205284, // n0x1b6c c0x0000 (---------------) + I name + 0x0021fe03, // n0x1b6d c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1b6e c0x0000 (---------------) + I org + 0x00217443, // n0x1b6f c0x0000 (---------------) + I sch + 0x002d4884, // n0x1b70 c0x0000 (---------------) + I asso + 0x000ffa08, // n0x1b71 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1b72 c0x0000 (---------------) + I com + 0x00201483, // n0x1b73 c0x0000 (---------------) + I nom + 0x0024bf84, // n0x1b74 c0x0000 (---------------) + I arts + 0x000ffa08, // n0x1b75 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1b76 c0x0000 (---------------) + I com + 0x0024d9c4, // n0x1b77 c0x0000 (---------------) + I firm + 0x003a1244, // n0x1b78 c0x0000 (---------------) + I info + 0x00201483, // n0x1b79 c0x0000 (---------------) + I nom + 0x002009c2, // n0x1b7a c0x0000 (---------------) + I nt + 0x0022d1c3, // n0x1b7b c0x0000 (---------------) + I org + 0x0022a5c3, // n0x1b7c c0x0000 (---------------) + I rec + 0x00391185, // n0x1b7d c0x0000 (---------------) + I store + 0x00200142, // n0x1b7e c0x0000 (---------------) + I tm + 0x00300b03, // n0x1b7f c0x0000 (---------------) + I www + 0x00201542, // n0x1b80 c0x0000 (---------------) + I ac + 0x000ffa08, // n0x1b81 c0x0000 (---------------) + blogspot + 0x00200742, // n0x1b82 c0x0000 (---------------) + I co + 0x0023a783, // n0x1b83 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1b84 c0x0000 (---------------) + I gov + 0x002013c2, // n0x1b85 c0x0000 (---------------) + I in + 0x0022d1c3, // n0x1b86 c0x0000 (---------------) + I org + 0x00201542, // n0x1b87 c0x0000 (---------------) + I ac + 0x003a6207, // n0x1b88 c0x0000 (---------------) + I adygeya + 0x00289d85, // n0x1b89 c0x0000 (---------------) + I altai + 0x002937c4, // n0x1b8a c0x0000 (---------------) + I amur + 0x002e75c6, // n0x1b8b c0x0000 (---------------) + I amursk + 0x0023668b, // n0x1b8c c0x0000 (---------------) + I arkhangelsk + 0x0025a5c9, // n0x1b8d c0x0000 (---------------) + I astrakhan + 0x003453c6, // n0x1b8e c0x0000 (---------------) + I baikal + 0x003269c9, // n0x1b8f c0x0000 (---------------) + I bashkiria + 0x002d2e48, // n0x1b90 c0x0000 (---------------) + I belgorod + 0x002042c3, // n0x1b91 c0x0000 (---------------) + I bir + 0x000ffa08, // n0x1b92 c0x0000 (---------------) + blogspot + 0x00227447, // n0x1b93 c0x0000 (---------------) + I bryansk + 0x0034d388, // n0x1b94 c0x0000 (---------------) + I buryatia + 0x002ee443, // n0x1b95 c0x0000 (---------------) + I cbg + 0x002648c4, // n0x1b96 c0x0000 (---------------) + I chel + 0x0026670b, // n0x1b97 c0x0000 (---------------) + I chelyabinsk + 0x002ac6c5, // n0x1b98 c0x0000 (---------------) + I chita + 0x002bc248, // n0x1b99 c0x0000 (---------------) + I chukotka + 0x00335889, // n0x1b9a c0x0000 (---------------) + I chuvashia + 0x0025d143, // n0x1b9b c0x0000 (---------------) + I cmw + 0x00233503, // n0x1b9c c0x0000 (---------------) + I com + 0x00202648, // n0x1b9d c0x0000 (---------------) + I dagestan + 0x002ef847, // n0x1b9e c0x0000 (---------------) + I dudinka + 0x002eccc6, // n0x1b9f c0x0000 (---------------) + I e-burg + 0x0023a783, // n0x1ba0 c0x0000 (---------------) + I edu + 0x003869c7, // n0x1ba1 c0x0000 (---------------) + I fareast + 0x0026cc83, // n0x1ba2 c0x0000 (---------------) + I gov + 0x00312746, // n0x1ba3 c0x0000 (---------------) + I grozny + 0x00201603, // n0x1ba4 c0x0000 (---------------) + I int + 0x00230d87, // n0x1ba5 c0x0000 (---------------) + I irkutsk + 0x0027d407, // n0x1ba6 c0x0000 (---------------) + I ivanovo + 0x003879c7, // n0x1ba7 c0x0000 (---------------) + I izhevsk + 0x002f2505, // n0x1ba8 c0x0000 (---------------) + I jamal + 0x00206543, // n0x1ba9 c0x0000 (---------------) + I jar + 0x0020a7cb, // n0x1baa c0x0000 (---------------) + I joshkar-ola + 0x00309908, // n0x1bab c0x0000 (---------------) + I k-uralsk + 0x00226e48, // n0x1bac c0x0000 (---------------) + I kalmykia + 0x002509c6, // n0x1bad c0x0000 (---------------) + I kaluga + 0x0022ab89, // n0x1bae c0x0000 (---------------) + I kamchatka + 0x00327ec7, // n0x1baf c0x0000 (---------------) + I karelia + 0x002f9b85, // n0x1bb0 c0x0000 (---------------) + I kazan + 0x00379b44, // n0x1bb1 c0x0000 (---------------) + I kchr + 0x00275f48, // n0x1bb2 c0x0000 (---------------) + I kemerovo + 0x0023f40a, // n0x1bb3 c0x0000 (---------------) + I khabarovsk + 0x0023f649, // n0x1bb4 c0x0000 (---------------) + I khakassia + 0x002517c3, // n0x1bb5 c0x0000 (---------------) + I khv + 0x0027ea45, // n0x1bb6 c0x0000 (---------------) + I kirov + 0x0033bcc3, // n0x1bb7 c0x0000 (---------------) + I kms + 0x002ab0c6, // n0x1bb8 c0x0000 (---------------) + I koenig + 0x0039a004, // n0x1bb9 c0x0000 (---------------) + I komi + 0x002fc3c8, // n0x1bba c0x0000 (---------------) + I kostroma + 0x00387b4b, // n0x1bbb c0x0000 (---------------) + I krasnoyarsk + 0x0033bb85, // n0x1bbc c0x0000 (---------------) + I kuban + 0x002b7a46, // n0x1bbd c0x0000 (---------------) + I kurgan + 0x002b9785, // n0x1bbe c0x0000 (---------------) + I kursk + 0x002b9cc8, // n0x1bbf c0x0000 (---------------) + I kustanai + 0x002bac87, // n0x1bc0 c0x0000 (---------------) + I kuzbass + 0x00207707, // n0x1bc1 c0x0000 (---------------) + I lipetsk + 0x00223c47, // n0x1bc2 c0x0000 (---------------) + I magadan + 0x0021e6c4, // n0x1bc3 c0x0000 (---------------) + I mari + 0x0021e6c7, // n0x1bc4 c0x0000 (---------------) + I mari-el + 0x0027bc46, // n0x1bc5 c0x0000 (---------------) + I marine + 0x00209003, // n0x1bc6 c0x0000 (---------------) + I mil + 0x002c55c8, // n0x1bc7 c0x0000 (---------------) + I mordovia + 0x00253243, // n0x1bc8 c0x0000 (---------------) + I msk + 0x002ccc88, // n0x1bc9 c0x0000 (---------------) + I murmansk + 0x002d2485, // n0x1bca c0x0000 (---------------) + I mytis + 0x0031a348, // n0x1bcb c0x0000 (---------------) + I nakhodka + 0x0023a987, // n0x1bcc c0x0000 (---------------) + I nalchik + 0x0021fe03, // n0x1bcd c0x0000 (---------------) + I net + 0x00392a03, // n0x1bce c0x0000 (---------------) + I nkz + 0x00289404, // n0x1bcf c0x0000 (---------------) + I nnov + 0x00374d87, // n0x1bd0 c0x0000 (---------------) + I norilsk + 0x002058c3, // n0x1bd1 c0x0000 (---------------) + I nov + 0x0027d4cb, // n0x1bd2 c0x0000 (---------------) + I novosibirsk + 0x00216303, // n0x1bd3 c0x0000 (---------------) + I nsk + 0x00253204, // n0x1bd4 c0x0000 (---------------) + I omsk + 0x00391208, // n0x1bd5 c0x0000 (---------------) + I orenburg + 0x0022d1c3, // n0x1bd6 c0x0000 (---------------) + I org + 0x002d6e85, // n0x1bd7 c0x0000 (---------------) + I oryol + 0x00296c85, // n0x1bd8 c0x0000 (---------------) + I oskol + 0x0039c386, // n0x1bd9 c0x0000 (---------------) + I palana + 0x00212c85, // n0x1bda c0x0000 (---------------) + I penza + 0x002d2844, // n0x1bdb c0x0000 (---------------) + I perm + 0x00209302, // n0x1bdc c0x0000 (---------------) + I pp + 0x002e5483, // n0x1bdd c0x0000 (---------------) + I ptz + 0x00368c4a, // n0x1bde c0x0000 (---------------) + I pyatigorsk + 0x003907c3, // n0x1bdf c0x0000 (---------------) + I rnd + 0x002d1989, // n0x1be0 c0x0000 (---------------) + I rubtsovsk + 0x00357446, // n0x1be1 c0x0000 (---------------) + I ryazan + 0x0021ac48, // n0x1be2 c0x0000 (---------------) + I sakhalin + 0x0028b986, // n0x1be3 c0x0000 (---------------) + I samara + 0x002257c7, // n0x1be4 c0x0000 (---------------) + I saratov + 0x002cc748, // n0x1be5 c0x0000 (---------------) + I simbirsk + 0x002d6648, // n0x1be6 c0x0000 (---------------) + I smolensk + 0x002e0f83, // n0x1be7 c0x0000 (---------------) + I snz + 0x00271883, // n0x1be8 c0x0000 (---------------) + I spb + 0x00225b89, // n0x1be9 c0x0000 (---------------) + I stavropol + 0x002f4c43, // n0x1bea c0x0000 (---------------) + I stv + 0x00341846, // n0x1beb c0x0000 (---------------) + I surgut + 0x00289a06, // n0x1bec c0x0000 (---------------) + I syzran + 0x00314f06, // n0x1bed c0x0000 (---------------) + I tambov + 0x0036cc49, // n0x1bee c0x0000 (---------------) + I tatarstan + 0x002ff844, // n0x1bef c0x0000 (---------------) + I test + 0x0020bf43, // n0x1bf0 c0x0000 (---------------) + I tom + 0x00309805, // n0x1bf1 c0x0000 (---------------) + I tomsk + 0x0030b209, // n0x1bf2 c0x0000 (---------------) + I tsaritsyn + 0x00207803, // n0x1bf3 c0x0000 (---------------) + I tsk + 0x00359bc4, // n0x1bf4 c0x0000 (---------------) + I tula + 0x002f3304, // n0x1bf5 c0x0000 (---------------) + I tuva + 0x00360784, // n0x1bf6 c0x0000 (---------------) + I tver + 0x0031ac06, // n0x1bf7 c0x0000 (---------------) + I tyumen + 0x0020fa43, // n0x1bf8 c0x0000 (---------------) + I udm + 0x0020fa48, // n0x1bf9 c0x0000 (---------------) + I udmurtia + 0x0025a208, // n0x1bfa c0x0000 (---------------) + I ulan-ude + 0x0035ca86, // n0x1bfb c0x0000 (---------------) + I vdonsk + 0x002f998b, // n0x1bfc c0x0000 (---------------) + I vladikavkaz + 0x002f9cc8, // n0x1bfd c0x0000 (---------------) + I vladimir + 0x002f9ecb, // n0x1bfe c0x0000 (---------------) + I vladivostok + 0x002fca09, // n0x1bff c0x0000 (---------------) + I volgograd + 0x002fc187, // n0x1c00 c0x0000 (---------------) + I vologda + 0x002fce88, // n0x1c01 c0x0000 (---------------) + I voronezh + 0x002fee83, // n0x1c02 c0x0000 (---------------) + I vrn + 0x0039fc06, // n0x1c03 c0x0000 (---------------) + I vyatka + 0x0020f507, // n0x1c04 c0x0000 (---------------) + I yakutia + 0x00298b45, // n0x1c05 c0x0000 (---------------) + I yamal + 0x00346f49, // n0x1c06 c0x0000 (---------------) + I yaroslavl + 0x0030fd4d, // n0x1c07 c0x0000 (---------------) + I yekaterinburg + 0x0021aa91, // n0x1c08 c0x0000 (---------------) + I yuzhno-sakhalinsk + 0x0023d545, // n0x1c09 c0x0000 (---------------) + I zgrad + 0x00201542, // n0x1c0a c0x0000 (---------------) + I ac + 0x00200742, // n0x1c0b c0x0000 (---------------) + I co + 0x00233503, // n0x1c0c c0x0000 (---------------) + I com + 0x0023a783, // n0x1c0d c0x0000 (---------------) + I edu + 0x0033d7c4, // n0x1c0e c0x0000 (---------------) + I gouv + 0x0026cc83, // n0x1c0f c0x0000 (---------------) + I gov + 0x00201603, // n0x1c10 c0x0000 (---------------) + I int + 0x00209003, // n0x1c11 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1c12 c0x0000 (---------------) + I net + 0x00233503, // n0x1c13 c0x0000 (---------------) + I com + 0x0023a783, // n0x1c14 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1c15 c0x0000 (---------------) + I gov + 0x00213ac3, // n0x1c16 c0x0000 (---------------) + I med + 0x0021fe03, // n0x1c17 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c18 c0x0000 (---------------) + I org + 0x00297403, // n0x1c19 c0x0000 (---------------) + I pub + 0x00217443, // n0x1c1a c0x0000 (---------------) + I sch + 0x00233503, // n0x1c1b c0x0000 (---------------) + I com + 0x0023a783, // n0x1c1c c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1c1d c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1c1e c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c1f c0x0000 (---------------) + I org + 0x00233503, // n0x1c20 c0x0000 (---------------) + I com + 0x0023a783, // n0x1c21 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1c22 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1c23 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c24 c0x0000 (---------------) + I org + 0x00233503, // n0x1c25 c0x0000 (---------------) + I com + 0x0023a783, // n0x1c26 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1c27 c0x0000 (---------------) + I gov + 0x003a1244, // n0x1c28 c0x0000 (---------------) + I info + 0x00213ac3, // n0x1c29 c0x0000 (---------------) + I med + 0x0021fe03, // n0x1c2a c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c2b c0x0000 (---------------) + I org + 0x00224e42, // n0x1c2c c0x0000 (---------------) + I tv + 0x00200101, // n0x1c2d c0x0000 (---------------) + I a + 0x00201542, // n0x1c2e c0x0000 (---------------) + I ac + 0x00200001, // n0x1c2f c0x0000 (---------------) + I b + 0x003129c2, // n0x1c30 c0x0000 (---------------) + I bd + 0x000ffa08, // n0x1c31 c0x0000 (---------------) + blogspot + 0x0021ca45, // n0x1c32 c0x0000 (---------------) + I brand + 0x00200301, // n0x1c33 c0x0000 (---------------) + I c + 0x00033503, // n0x1c34 c0x0000 (---------------) + com + 0x00200381, // n0x1c35 c0x0000 (---------------) + I d + 0x00200081, // n0x1c36 c0x0000 (---------------) + I e + 0x00200581, // n0x1c37 c0x0000 (---------------) + I f + 0x0023f342, // n0x1c38 c0x0000 (---------------) + I fh + 0x0023f344, // n0x1c39 c0x0000 (---------------) + I fhsk + 0x00363543, // n0x1c3a c0x0000 (---------------) + I fhv + 0x00200c81, // n0x1c3b c0x0000 (---------------) + I g + 0x00200d81, // n0x1c3c c0x0000 (---------------) + I h + 0x00200041, // n0x1c3d c0x0000 (---------------) + I i + 0x00200fc1, // n0x1c3e c0x0000 (---------------) + I k + 0x002e9647, // n0x1c3f c0x0000 (---------------) + I komforb + 0x002d69cf, // n0x1c40 c0x0000 (---------------) + I kommunalforbund + 0x002da946, // n0x1c41 c0x0000 (---------------) + I komvux + 0x00200201, // n0x1c42 c0x0000 (---------------) + I l + 0x0026a306, // n0x1c43 c0x0000 (---------------) + I lanbib + 0x00200181, // n0x1c44 c0x0000 (---------------) + I m + 0x00200541, // n0x1c45 c0x0000 (---------------) + I n + 0x00325ace, // n0x1c46 c0x0000 (---------------) + I naturbruksgymn + 0x00200281, // n0x1c47 c0x0000 (---------------) + I o + 0x0022d1c3, // n0x1c48 c0x0000 (---------------) + I org + 0x00200941, // n0x1c49 c0x0000 (---------------) + I p + 0x002a3745, // n0x1c4a c0x0000 (---------------) + I parti + 0x00209302, // n0x1c4b c0x0000 (---------------) + I pp + 0x00247505, // n0x1c4c c0x0000 (---------------) + I press + 0x002002c1, // n0x1c4d c0x0000 (---------------) + I r + 0x002004c1, // n0x1c4e c0x0000 (---------------) + I s + 0x00200141, // n0x1c4f c0x0000 (---------------) + I t + 0x00200142, // n0x1c50 c0x0000 (---------------) + I tm + 0x00200441, // n0x1c51 c0x0000 (---------------) + I u + 0x002010c1, // n0x1c52 c0x0000 (---------------) + I w + 0x00205381, // n0x1c53 c0x0000 (---------------) + I x + 0x00201841, // n0x1c54 c0x0000 (---------------) + I y + 0x00205f81, // n0x1c55 c0x0000 (---------------) + I z + 0x000ffa08, // n0x1c56 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1c57 c0x0000 (---------------) + I com + 0x0023a783, // n0x1c58 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1c59 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1c5a c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c5b c0x0000 (---------------) + I org + 0x00220f03, // n0x1c5c c0x0000 (---------------) + I per + 0x00233503, // n0x1c5d c0x0000 (---------------) + I com + 0x0026cc83, // n0x1c5e c0x0000 (---------------) + I gov + 0x0008c288, // n0x1c5f c0x0000 (---------------) + hashbang + 0x00209003, // n0x1c60 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1c61 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c62 c0x0000 (---------------) + I org + 0x014da508, // n0x1c63 c0x0005 (---------------)* o platform + 0x000ffa08, // n0x1c64 c0x0000 (---------------) + blogspot + 0x000ffa08, // n0x1c65 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1c66 c0x0000 (---------------) + I com + 0x0023a783, // n0x1c67 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1c68 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1c69 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c6a c0x0000 (---------------) + I org + 0x002011c3, // n0x1c6b c0x0000 (---------------) + I art + 0x000ffa08, // n0x1c6c c0x0000 (---------------) + blogspot + 0x00233503, // n0x1c6d c0x0000 (---------------) + I com + 0x0023a783, // n0x1c6e c0x0000 (---------------) + I edu + 0x0033d7c4, // n0x1c6f c0x0000 (---------------) + I gouv + 0x0022d1c3, // n0x1c70 c0x0000 (---------------) + I org + 0x00295005, // n0x1c71 c0x0000 (---------------) + I perso + 0x00320a04, // n0x1c72 c0x0000 (---------------) + I univ + 0x00233503, // n0x1c73 c0x0000 (---------------) + I com + 0x0021fe03, // n0x1c74 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c75 c0x0000 (---------------) + I org + 0x00200742, // n0x1c76 c0x0000 (---------------) + I co + 0x00233503, // n0x1c77 c0x0000 (---------------) + I com + 0x00237249, // n0x1c78 c0x0000 (---------------) + I consulado + 0x0023a783, // n0x1c79 c0x0000 (---------------) + I edu + 0x00242d89, // n0x1c7a c0x0000 (---------------) + I embaixada + 0x0026cc83, // n0x1c7b c0x0000 (---------------) + I gov + 0x00209003, // n0x1c7c c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1c7d c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c7e c0x0000 (---------------) + I org + 0x002e1a48, // n0x1c7f c0x0000 (---------------) + I principe + 0x00215f87, // n0x1c80 c0x0000 (---------------) + I saotome + 0x00391185, // n0x1c81 c0x0000 (---------------) + I store + 0x003a6207, // n0x1c82 c0x0000 (---------------) + I adygeya + 0x0023668b, // n0x1c83 c0x0000 (---------------) + I arkhangelsk + 0x0020d908, // n0x1c84 c0x0000 (---------------) + I balashov + 0x003269c9, // n0x1c85 c0x0000 (---------------) + I bashkiria + 0x00227447, // n0x1c86 c0x0000 (---------------) + I bryansk + 0x00202648, // n0x1c87 c0x0000 (---------------) + I dagestan + 0x00312746, // n0x1c88 c0x0000 (---------------) + I grozny + 0x0027d407, // n0x1c89 c0x0000 (---------------) + I ivanovo + 0x00226e48, // n0x1c8a c0x0000 (---------------) + I kalmykia + 0x002509c6, // n0x1c8b c0x0000 (---------------) + I kaluga + 0x00327ec7, // n0x1c8c c0x0000 (---------------) + I karelia + 0x0023f649, // n0x1c8d c0x0000 (---------------) + I khakassia + 0x0037f849, // n0x1c8e c0x0000 (---------------) + I krasnodar + 0x002b7a46, // n0x1c8f c0x0000 (---------------) + I kurgan + 0x002b8945, // n0x1c90 c0x0000 (---------------) + I lenug + 0x002c55c8, // n0x1c91 c0x0000 (---------------) + I mordovia + 0x00253243, // n0x1c92 c0x0000 (---------------) + I msk + 0x002ccc88, // n0x1c93 c0x0000 (---------------) + I murmansk + 0x0023a987, // n0x1c94 c0x0000 (---------------) + I nalchik + 0x002058c3, // n0x1c95 c0x0000 (---------------) + I nov + 0x00229c07, // n0x1c96 c0x0000 (---------------) + I obninsk + 0x00212c85, // n0x1c97 c0x0000 (---------------) + I penza + 0x002dd148, // n0x1c98 c0x0000 (---------------) + I pokrovsk + 0x00274805, // n0x1c99 c0x0000 (---------------) + I sochi + 0x00271883, // n0x1c9a c0x0000 (---------------) + I spb + 0x0033d1c9, // n0x1c9b c0x0000 (---------------) + I togliatti + 0x002aa587, // n0x1c9c c0x0000 (---------------) + I troitsk + 0x00359bc4, // n0x1c9d c0x0000 (---------------) + I tula + 0x002f3304, // n0x1c9e c0x0000 (---------------) + I tuva + 0x002f998b, // n0x1c9f c0x0000 (---------------) + I vladikavkaz + 0x002f9cc8, // n0x1ca0 c0x0000 (---------------) + I vladimir + 0x002fc187, // n0x1ca1 c0x0000 (---------------) + I vologda + 0x00233503, // n0x1ca2 c0x0000 (---------------) + I com + 0x0023a783, // n0x1ca3 c0x0000 (---------------) + I edu + 0x00213183, // n0x1ca4 c0x0000 (---------------) + I gob + 0x0022d1c3, // n0x1ca5 c0x0000 (---------------) + I org + 0x00244803, // n0x1ca6 c0x0000 (---------------) + I red + 0x0026cc83, // n0x1ca7 c0x0000 (---------------) + I gov + 0x00233503, // n0x1ca8 c0x0000 (---------------) + I com + 0x0023a783, // n0x1ca9 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1caa c0x0000 (---------------) + I gov + 0x00209003, // n0x1cab c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1cac c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1cad c0x0000 (---------------) + I org + 0x00201542, // n0x1cae c0x0000 (---------------) + I ac + 0x00200742, // n0x1caf c0x0000 (---------------) + I co + 0x0022d1c3, // n0x1cb0 c0x0000 (---------------) + I org + 0x000ffa08, // n0x1cb1 c0x0000 (---------------) + blogspot + 0x00201542, // n0x1cb2 c0x0000 (---------------) + I ac + 0x00200742, // n0x1cb3 c0x0000 (---------------) + I co + 0x00202d42, // n0x1cb4 c0x0000 (---------------) + I go + 0x002013c2, // n0x1cb5 c0x0000 (---------------) + I in + 0x00209002, // n0x1cb6 c0x0000 (---------------) + I mi + 0x0021fe03, // n0x1cb7 c0x0000 (---------------) + I net + 0x00200282, // n0x1cb8 c0x0000 (---------------) + I or + 0x00201542, // n0x1cb9 c0x0000 (---------------) + I ac + 0x00330b83, // n0x1cba c0x0000 (---------------) + I biz + 0x00200742, // n0x1cbb c0x0000 (---------------) + I co + 0x00233503, // n0x1cbc c0x0000 (---------------) + I com + 0x0023a783, // n0x1cbd c0x0000 (---------------) + I edu + 0x00202d42, // n0x1cbe c0x0000 (---------------) + I go + 0x0026cc83, // n0x1cbf c0x0000 (---------------) + I gov + 0x00201603, // n0x1cc0 c0x0000 (---------------) + I int + 0x00209003, // n0x1cc1 c0x0000 (---------------) + I mil + 0x00205284, // n0x1cc2 c0x0000 (---------------) + I name + 0x0021fe03, // n0x1cc3 c0x0000 (---------------) + I net + 0x00218f83, // n0x1cc4 c0x0000 (---------------) + I nic + 0x0022d1c3, // n0x1cc5 c0x0000 (---------------) + I org + 0x002ff844, // n0x1cc6 c0x0000 (---------------) + I test + 0x00221a03, // n0x1cc7 c0x0000 (---------------) + I web + 0x0026cc83, // n0x1cc8 c0x0000 (---------------) + I gov + 0x00200742, // n0x1cc9 c0x0000 (---------------) + I co + 0x00233503, // n0x1cca c0x0000 (---------------) + I com + 0x0023a783, // n0x1ccb c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1ccc c0x0000 (---------------) + I gov + 0x00209003, // n0x1ccd c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1cce c0x0000 (---------------) + I net + 0x00201483, // n0x1ccf c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x1cd0 c0x0000 (---------------) + I org + 0x003927c7, // n0x1cd1 c0x0000 (---------------) + I agrinet + 0x00233503, // n0x1cd2 c0x0000 (---------------) + I com + 0x00222387, // n0x1cd3 c0x0000 (---------------) + I defense + 0x0025d886, // n0x1cd4 c0x0000 (---------------) + I edunet + 0x00215243, // n0x1cd5 c0x0000 (---------------) + I ens + 0x00207503, // n0x1cd6 c0x0000 (---------------) + I fin + 0x0026cc83, // n0x1cd7 c0x0000 (---------------) + I gov + 0x0021d883, // n0x1cd8 c0x0000 (---------------) + I ind + 0x003a1244, // n0x1cd9 c0x0000 (---------------) + I info + 0x0036d504, // n0x1cda c0x0000 (---------------) + I intl + 0x002da6c6, // n0x1cdb c0x0000 (---------------) + I mincom + 0x0022b143, // n0x1cdc c0x0000 (---------------) + I nat + 0x0021fe03, // n0x1cdd c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1cde c0x0000 (---------------) + I org + 0x00295005, // n0x1cdf c0x0000 (---------------) + I perso + 0x0020d344, // n0x1ce0 c0x0000 (---------------) + I rnrt + 0x00266ec3, // n0x1ce1 c0x0000 (---------------) + I rns + 0x00351883, // n0x1ce2 c0x0000 (---------------) + I rnu + 0x002c0507, // n0x1ce3 c0x0000 (---------------) + I tourism + 0x00209e45, // n0x1ce4 c0x0000 (---------------) + I turen + 0x00233503, // n0x1ce5 c0x0000 (---------------) + I com + 0x0023a783, // n0x1ce6 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1ce7 c0x0000 (---------------) + I gov + 0x00209003, // n0x1ce8 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1ce9 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1cea c0x0000 (---------------) + I org + 0x00203402, // n0x1ceb c0x0000 (---------------) + I av + 0x002cb903, // n0x1cec c0x0000 (---------------) + I bbs + 0x0028e2c3, // n0x1ced c0x0000 (---------------) + I bel + 0x00330b83, // n0x1cee c0x0000 (---------------) + I biz + 0x52a33503, // n0x1cef c0x014a (n0x1d00-n0x1d01) + I com + 0x0022bf42, // n0x1cf0 c0x0000 (---------------) + I dr + 0x0023a783, // n0x1cf1 c0x0000 (---------------) + I edu + 0x00205843, // n0x1cf2 c0x0000 (---------------) + I gen + 0x0026cc83, // n0x1cf3 c0x0000 (---------------) + I gov + 0x003a1244, // n0x1cf4 c0x0000 (---------------) + I info + 0x00309ac3, // n0x1cf5 c0x0000 (---------------) + I k12 + 0x00249343, // n0x1cf6 c0x0000 (---------------) + I kep + 0x00209003, // n0x1cf7 c0x0000 (---------------) + I mil + 0x00205284, // n0x1cf8 c0x0000 (---------------) + I name + 0x52e00642, // n0x1cf9 c0x014b (n0x1d01-n0x1d02) + I nc + 0x0021fe03, // n0x1cfa c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1cfb c0x0000 (---------------) + I org + 0x00208103, // n0x1cfc c0x0000 (---------------) + I pol + 0x0022f7c3, // n0x1cfd c0x0000 (---------------) + I tel + 0x00224e42, // n0x1cfe c0x0000 (---------------) + I tv + 0x00221a03, // n0x1cff c0x0000 (---------------) + I web + 0x000ffa08, // n0x1d00 c0x0000 (---------------) + blogspot + 0x0026cc83, // n0x1d01 c0x0000 (---------------) + I gov + 0x002389c4, // n0x1d02 c0x0000 (---------------) + I aero + 0x00330b83, // n0x1d03 c0x0000 (---------------) + I biz + 0x00200742, // n0x1d04 c0x0000 (---------------) + I co + 0x00233503, // n0x1d05 c0x0000 (---------------) + I com + 0x0023d684, // n0x1d06 c0x0000 (---------------) + I coop + 0x0023a783, // n0x1d07 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1d08 c0x0000 (---------------) + I gov + 0x003a1244, // n0x1d09 c0x0000 (---------------) + I info + 0x00201603, // n0x1d0a c0x0000 (---------------) + I int + 0x002ddc44, // n0x1d0b c0x0000 (---------------) + I jobs + 0x00207104, // n0x1d0c c0x0000 (---------------) + I mobi + 0x002d0106, // n0x1d0d c0x0000 (---------------) + I museum + 0x00205284, // n0x1d0e c0x0000 (---------------) + I name + 0x0021fe03, // n0x1d0f c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1d10 c0x0000 (---------------) + I org + 0x00220e43, // n0x1d11 c0x0000 (---------------) + I pro + 0x0029bec6, // n0x1d12 c0x0000 (---------------) + I travel + 0x00055f8b, // n0x1d13 c0x0000 (---------------) + better-than + 0x00013886, // n0x1d14 c0x0000 (---------------) + dyndns + 0x0002184a, // n0x1d15 c0x0000 (---------------) + on-the-web + 0x000fef4a, // n0x1d16 c0x0000 (---------------) + worse-than + 0x000ffa08, // n0x1d17 c0x0000 (---------------) + blogspot + 0x00238ac4, // n0x1d18 c0x0000 (---------------) + I club + 0x00233503, // n0x1d19 c0x0000 (---------------) + I com + 0x00330b44, // n0x1d1a c0x0000 (---------------) + I ebiz + 0x0023a783, // n0x1d1b c0x0000 (---------------) + I edu + 0x00297cc4, // n0x1d1c c0x0000 (---------------) + I game + 0x0026cc83, // n0x1d1d c0x0000 (---------------) + I gov + 0x00317243, // n0x1d1e c0x0000 (---------------) + I idv + 0x00209003, // n0x1d1f c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1d20 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1d21 c0x0000 (---------------) + I org + 0x0032444b, // n0x1d22 c0x0000 (---------------) + I xn--czrw28b + 0x003941ca, // n0x1d23 c0x0000 (---------------) + I xn--uc0atv + 0x003a54cc, // n0x1d24 c0x0000 (---------------) + I xn--zf0ao64a + 0x00201542, // n0x1d25 c0x0000 (---------------) + I ac + 0x00200742, // n0x1d26 c0x0000 (---------------) + I co + 0x00202d42, // n0x1d27 c0x0000 (---------------) + I go + 0x00234dc5, // n0x1d28 c0x0000 (---------------) + I hotel + 0x003a1244, // n0x1d29 c0x0000 (---------------) + I info + 0x00203e82, // n0x1d2a c0x0000 (---------------) + I me + 0x00209003, // n0x1d2b c0x0000 (---------------) + I mil + 0x00207104, // n0x1d2c c0x0000 (---------------) + I mobi + 0x00202c02, // n0x1d2d c0x0000 (---------------) + I ne + 0x00200282, // n0x1d2e c0x0000 (---------------) + I or + 0x00200702, // n0x1d2f c0x0000 (---------------) + I sc + 0x00224e42, // n0x1d30 c0x0000 (---------------) + I tv + 0x00130b83, // n0x1d31 c0x0000 (---------------) + biz + 0x002d5d89, // n0x1d32 c0x0000 (---------------) + I cherkassy + 0x00289888, // n0x1d33 c0x0000 (---------------) + I cherkasy + 0x0026cb09, // n0x1d34 c0x0000 (---------------) + I chernigov + 0x0027d249, // n0x1d35 c0x0000 (---------------) + I chernihiv + 0x0036e44a, // n0x1d36 c0x0000 (---------------) + I chernivtsi + 0x00375aca, // n0x1d37 c0x0000 (---------------) + I chernovtsy + 0x0020b482, // n0x1d38 c0x0000 (---------------) + I ck + 0x0021ba42, // n0x1d39 c0x0000 (---------------) + I cn + 0x00000742, // n0x1d3a c0x0000 (---------------) + co + 0x00233503, // n0x1d3b c0x0000 (---------------) + I com + 0x002049c2, // n0x1d3c c0x0000 (---------------) + I cr + 0x00245e06, // n0x1d3d c0x0000 (---------------) + I crimea + 0x00353bc2, // n0x1d3e c0x0000 (---------------) + I cv + 0x00211102, // n0x1d3f c0x0000 (---------------) + I dn + 0x002276ce, // n0x1d40 c0x0000 (---------------) + I dnepropetrovsk + 0x0027148e, // n0x1d41 c0x0000 (---------------) + I dnipropetrovsk + 0x0027d0c7, // n0x1d42 c0x0000 (---------------) + I dominic + 0x003219c7, // n0x1d43 c0x0000 (---------------) + I donetsk + 0x0024dc82, // n0x1d44 c0x0000 (---------------) + I dp + 0x0023a783, // n0x1d45 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1d46 c0x0000 (---------------) + I gov + 0x00200f02, // n0x1d47 c0x0000 (---------------) + I if + 0x002013c2, // n0x1d48 c0x0000 (---------------) + I in + 0x0024048f, // n0x1d49 c0x0000 (---------------) + I ivano-frankivsk + 0x0021acc2, // n0x1d4a c0x0000 (---------------) + I kh + 0x0023fb87, // n0x1d4b c0x0000 (---------------) + I kharkiv + 0x00240807, // n0x1d4c c0x0000 (---------------) + I kharkov + 0x0024b387, // n0x1d4d c0x0000 (---------------) + I kherson + 0x0024eb8c, // n0x1d4e c0x0000 (---------------) + I khmelnitskiy + 0x00250bcc, // n0x1d4f c0x0000 (---------------) + I khmelnytskyi + 0x00202ac4, // n0x1d50 c0x0000 (---------------) + I kiev + 0x0027ea4a, // n0x1d51 c0x0000 (---------------) + I kirovograd + 0x002316c2, // n0x1d52 c0x0000 (---------------) + I km + 0x00206fc2, // n0x1d53 c0x0000 (---------------) + I kr + 0x002b1504, // n0x1d54 c0x0000 (---------------) + I krym + 0x00254e42, // n0x1d55 c0x0000 (---------------) + I ks + 0x002bb402, // n0x1d56 c0x0000 (---------------) + I kv + 0x00250e04, // n0x1d57 c0x0000 (---------------) + I kyiv + 0x00219082, // n0x1d58 c0x0000 (---------------) + I lg + 0x00209e02, // n0x1d59 c0x0000 (---------------) + I lt + 0x00250a47, // n0x1d5a c0x0000 (---------------) + I lugansk + 0x00238045, // n0x1d5b c0x0000 (---------------) + I lutsk + 0x00205d02, // n0x1d5c c0x0000 (---------------) + I lv + 0x00240404, // n0x1d5d c0x0000 (---------------) + I lviv + 0x00367142, // n0x1d5e c0x0000 (---------------) + I mk + 0x002f0388, // n0x1d5f c0x0000 (---------------) + I mykolaiv + 0x0021fe03, // n0x1d60 c0x0000 (---------------) + I net + 0x00203488, // n0x1d61 c0x0000 (---------------) + I nikolaev + 0x00202d82, // n0x1d62 c0x0000 (---------------) + I od + 0x0023bac5, // n0x1d63 c0x0000 (---------------) + I odesa + 0x00372946, // n0x1d64 c0x0000 (---------------) + I odessa + 0x0022d1c3, // n0x1d65 c0x0000 (---------------) + I org + 0x002063c2, // n0x1d66 c0x0000 (---------------) + I pl + 0x002dea07, // n0x1d67 c0x0000 (---------------) + I poltava + 0x00009302, // n0x1d68 c0x0000 (---------------) + pp + 0x002e1c85, // n0x1d69 c0x0000 (---------------) + I rivne + 0x0038af45, // n0x1d6a c0x0000 (---------------) + I rovno + 0x00206882, // n0x1d6b c0x0000 (---------------) + I rv + 0x0022d142, // n0x1d6c c0x0000 (---------------) + I sb + 0x00207f4a, // n0x1d6d c0x0000 (---------------) + I sebastopol + 0x0025140a, // n0x1d6e c0x0000 (---------------) + I sevastopol + 0x0024cdc2, // n0x1d6f c0x0000 (---------------) + I sm + 0x002f0304, // n0x1d70 c0x0000 (---------------) + I sumy + 0x002012c2, // n0x1d71 c0x0000 (---------------) + I te + 0x00320448, // n0x1d72 c0x0000 (---------------) + I ternopil + 0x00211342, // n0x1d73 c0x0000 (---------------) + I uz + 0x0029cfc8, // n0x1d74 c0x0000 (---------------) + I uzhgorod + 0x002f6cc7, // n0x1d75 c0x0000 (---------------) + I vinnica + 0x002f7889, // n0x1d76 c0x0000 (---------------) + I vinnytsia + 0x00203442, // n0x1d77 c0x0000 (---------------) + I vn + 0x002fcc45, // n0x1d78 c0x0000 (---------------) + I volyn + 0x00289d45, // n0x1d79 c0x0000 (---------------) + I yalta + 0x002c300b, // n0x1d7a c0x0000 (---------------) + I zaporizhzhe + 0x002c3a4c, // n0x1d7b c0x0000 (---------------) + I zaporizhzhia + 0x00230c08, // n0x1d7c c0x0000 (---------------) + I zhitomir + 0x002fd008, // n0x1d7d c0x0000 (---------------) + I zhytomyr + 0x00264682, // n0x1d7e c0x0000 (---------------) + I zp + 0x00219b02, // n0x1d7f c0x0000 (---------------) + I zt + 0x00201542, // n0x1d80 c0x0000 (---------------) + I ac + 0x000ffa08, // n0x1d81 c0x0000 (---------------) + blogspot + 0x00200742, // n0x1d82 c0x0000 (---------------) + I co + 0x00233503, // n0x1d83 c0x0000 (---------------) + I com + 0x00202d42, // n0x1d84 c0x0000 (---------------) + I go + 0x00202c02, // n0x1d85 c0x0000 (---------------) + I ne + 0x00200282, // n0x1d86 c0x0000 (---------------) + I or + 0x0022d1c3, // n0x1d87 c0x0000 (---------------) + I org + 0x00200702, // n0x1d88 c0x0000 (---------------) + I sc + 0x00201542, // n0x1d89 c0x0000 (---------------) + I ac + 0x54e00742, // n0x1d8a c0x0153 (n0x1d94-n0x1d95) + I co + 0x5526cc83, // n0x1d8b c0x0154 (n0x1d95-n0x1d96) + I gov + 0x00322cc3, // n0x1d8c c0x0000 (---------------) + I ltd + 0x00203e82, // n0x1d8d c0x0000 (---------------) + I me + 0x0021fe03, // n0x1d8e c0x0000 (---------------) + I net + 0x0038eb83, // n0x1d8f c0x0000 (---------------) + I nhs + 0x0022d1c3, // n0x1d90 c0x0000 (---------------) + I org + 0x002db143, // n0x1d91 c0x0000 (---------------) + I plc + 0x00225d06, // n0x1d92 c0x0000 (---------------) + I police + 0x01617443, // n0x1d93 c0x0005 (---------------)* o I sch + 0x000ffa08, // n0x1d94 c0x0000 (---------------) + blogspot + 0x00006807, // n0x1d95 c0x0000 (---------------) + service + 0x55a01dc2, // n0x1d96 c0x0156 (n0x1dd5-n0x1dd8) + I ak + 0x55e001c2, // n0x1d97 c0x0157 (n0x1dd8-n0x1ddb) + I al + 0x56200a42, // n0x1d98 c0x0158 (n0x1ddb-n0x1dde) + I ar + 0x56601d42, // n0x1d99 c0x0159 (n0x1dde-n0x1de1) + I as + 0x56a05f42, // n0x1d9a c0x015a (n0x1de1-n0x1de4) + I az + 0x56e00302, // n0x1d9b c0x015b (n0x1de4-n0x1de7) + I ca + 0x57200742, // n0x1d9c c0x015c (n0x1de7-n0x1dea) + I co + 0x57631382, // n0x1d9d c0x015d (n0x1dea-n0x1ded) + I ct + 0x57a1fb42, // n0x1d9e c0x015e (n0x1ded-n0x1df0) + I dc + 0x57e04d82, // n0x1d9f c0x015f (n0x1df0-n0x1df3) + I de + 0x00271483, // n0x1da0 c0x0000 (---------------) + I dni + 0x00211503, // n0x1da1 c0x0000 (---------------) + I fed + 0x582175c2, // n0x1da2 c0x0160 (n0x1df3-n0x1df6) + I fl + 0x58601042, // n0x1da3 c0x0161 (n0x1df6-n0x1df9) + I ga + 0x58a0dd42, // n0x1da4 c0x0162 (n0x1df9-n0x1dfc) + I gu + 0x58e00d82, // n0x1da5 c0x0163 (n0x1dfc-n0x1dfe) + I hi + 0x59207682, // n0x1da6 c0x0164 (n0x1dfe-n0x1e01) + I ia + 0x5960c782, // n0x1da7 c0x0165 (n0x1e01-n0x1e04) + I id + 0x59a02902, // n0x1da8 c0x0166 (n0x1e04-n0x1e07) + I il + 0x59e013c2, // n0x1da9 c0x0167 (n0x1e07-n0x1e0a) + I in + 0x000b8d45, // n0x1daa c0x0000 (---------------) + is-by + 0x00223543, // n0x1dab c0x0000 (---------------) + I isa + 0x0028cd44, // n0x1dac c0x0000 (---------------) + I kids + 0x5a254e42, // n0x1dad c0x0168 (n0x1e0a-n0x1e0d) + I ks + 0x5a636902, // n0x1dae c0x0169 (n0x1e0d-n0x1e10) + I ky + 0x5aa00802, // n0x1daf c0x016a (n0x1e10-n0x1e13) + I la + 0x0007958b, // n0x1db0 c0x0000 (---------------) + land-4-sale + 0x5ae00182, // n0x1db1 c0x016b (n0x1e13-n0x1e16) + I ma + 0x5b64da82, // n0x1db2 c0x016d (n0x1e19-n0x1e1c) + I md + 0x5ba03e82, // n0x1db3 c0x016e (n0x1e1c-n0x1e1f) + I me + 0x5be09002, // n0x1db4 c0x016f (n0x1e1f-n0x1e22) + I mi + 0x5c21fdc2, // n0x1db5 c0x0170 (n0x1e22-n0x1e25) + I mn + 0x5c607102, // n0x1db6 c0x0171 (n0x1e25-n0x1e28) + I mo + 0x5ca0f702, // n0x1db7 c0x0172 (n0x1e28-n0x1e2b) + I ms + 0x5ce04c02, // n0x1db8 c0x0173 (n0x1e2b-n0x1e2e) + I mt + 0x5d200642, // n0x1db9 c0x0174 (n0x1e2e-n0x1e31) + I nc + 0x5d600882, // n0x1dba c0x0175 (n0x1e31-n0x1e33) + I nd + 0x5da02c02, // n0x1dbb c0x0176 (n0x1e33-n0x1e36) + I ne + 0x5de03382, // n0x1dbc c0x0177 (n0x1e36-n0x1e39) + I nh + 0x5e204002, // n0x1dbd c0x0178 (n0x1e39-n0x1e3c) + I nj + 0x5e63db02, // n0x1dbe c0x0179 (n0x1e3c-n0x1e3f) + I nm + 0x002e0f43, // n0x1dbf c0x0000 (---------------) + I nsn + 0x5ea03d42, // n0x1dc0 c0x017a (n0x1e3f-n0x1e42) + I nv + 0x5ee15842, // n0x1dc1 c0x017b (n0x1e42-n0x1e45) + I ny + 0x5f207382, // n0x1dc2 c0x017c (n0x1e45-n0x1e48) + I oh + 0x5f601b82, // n0x1dc3 c0x017d (n0x1e48-n0x1e4b) + I ok + 0x5fa00282, // n0x1dc4 c0x017e (n0x1e4b-n0x1e4e) + I or + 0x5fe0ac42, // n0x1dc5 c0x017f (n0x1e4e-n0x1e51) + I pa + 0x60204602, // n0x1dc6 c0x0180 (n0x1e51-n0x1e54) + I pr + 0x60600a82, // n0x1dc7 c0x0181 (n0x1e54-n0x1e57) + I ri + 0x60a00702, // n0x1dc8 c0x0182 (n0x1e57-n0x1e5a) + I sc + 0x60e496c2, // n0x1dc9 c0x0183 (n0x1e5a-n0x1e5c) + I sd + 0x000e868c, // n0x1dca c0x0000 (---------------) + stuff-4-sale + 0x6124f882, // n0x1dcb c0x0184 (n0x1e5c-n0x1e5f) + I tn + 0x61673442, // n0x1dcc c0x0185 (n0x1e5f-n0x1e62) + I tx + 0x61a03b02, // n0x1dcd c0x0186 (n0x1e62-n0x1e65) + I ut + 0x61e000c2, // n0x1dce c0x0187 (n0x1e65-n0x1e68) + I va + 0x62205d42, // n0x1dcf c0x0188 (n0x1e68-n0x1e6b) + I vi + 0x62671f82, // n0x1dd0 c0x0189 (n0x1e6b-n0x1e6e) + I vt + 0x62a010c2, // n0x1dd1 c0x018a (n0x1e6e-n0x1e71) + I wa + 0x62e0ae82, // n0x1dd2 c0x018b (n0x1e71-n0x1e74) + I wi + 0x632755c2, // n0x1dd3 c0x018c (n0x1e74-n0x1e75) + I wv + 0x63674502, // n0x1dd4 c0x018d (n0x1e75-n0x1e78) + I wy + 0x0022e182, // n0x1dd5 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1dd6 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1dd7 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1dd8 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1dd9 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1dda c0x0000 (---------------) + I lib + 0x0022e182, // n0x1ddb c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1ddc c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1ddd c0x0000 (---------------) + I lib + 0x0022e182, // n0x1dde c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1ddf c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1de0 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1de1 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1de2 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1de3 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1de4 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1de5 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1de6 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1de7 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1de8 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1de9 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1dea c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1deb c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1dec c0x0000 (---------------) + I lib + 0x0022e182, // n0x1ded c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1dee c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1def c0x0000 (---------------) + I lib + 0x0022e182, // n0x1df0 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1df1 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1df2 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1df3 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1df4 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1df5 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1df6 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1df7 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1df8 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1df9 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1dfa c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1dfb c0x0000 (---------------) + I lib + 0x0022e182, // n0x1dfc c0x0000 (---------------) + I cc + 0x0027b703, // n0x1dfd c0x0000 (---------------) + I lib + 0x0022e182, // n0x1dfe c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1dff c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e00 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e01 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e02 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e03 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e04 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e05 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e06 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e07 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e08 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e09 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e0a c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e0b c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e0c c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e0d c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e0e c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e0f c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e10 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e11 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e12 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e13 c0x0000 (---------------) + I cc + 0x5b309ac3, // n0x1e14 c0x016c (n0x1e16-n0x1e19) + I k12 + 0x0027b703, // n0x1e15 c0x0000 (---------------) + I lib + 0x0022a644, // n0x1e16 c0x0000 (---------------) + I chtr + 0x00289786, // n0x1e17 c0x0000 (---------------) + I paroch + 0x002e5543, // n0x1e18 c0x0000 (---------------) + I pvt + 0x0022e182, // n0x1e19 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e1a c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e1b c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e1c c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e1d c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e1e c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e1f c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e20 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e21 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e22 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e23 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e24 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e25 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e26 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e27 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e28 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e29 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e2a c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e2b c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e2c c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e2d c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e2e c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e2f c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e30 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e31 c0x0000 (---------------) + I cc + 0x0027b703, // n0x1e32 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e33 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e34 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e35 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e36 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e37 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e38 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e39 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e3a c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e3b c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e3c c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e3d c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e3e c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e3f c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e40 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e41 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e42 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e43 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e44 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e45 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e46 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e47 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e48 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e49 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e4a c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e4b c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e4c c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e4d c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e4e c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e4f c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e50 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e51 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e52 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e53 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e54 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e55 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e56 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e57 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e58 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e59 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e5a c0x0000 (---------------) + I cc + 0x0027b703, // n0x1e5b c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e5c c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e5d c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e5e c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e5f c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e60 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e61 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e62 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e63 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e64 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e65 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e66 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e67 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e68 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e69 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e6a c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e6b c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e6c c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e6d c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e6e c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e6f c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e70 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e71 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e72 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e73 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e74 c0x0000 (---------------) + I cc + 0x0022e182, // n0x1e75 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e76 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e77 c0x0000 (---------------) + I lib + 0x63e33503, // n0x1e78 c0x018f (n0x1e7e-n0x1e7f) + I com + 0x0023a783, // n0x1e79 c0x0000 (---------------) + I edu + 0x0024a483, // n0x1e7a c0x0000 (---------------) + I gub + 0x00209003, // n0x1e7b c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1e7c c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1e7d c0x0000 (---------------) + I org + 0x000ffa08, // n0x1e7e c0x0000 (---------------) + blogspot + 0x00200742, // n0x1e7f c0x0000 (---------------) + I co + 0x00233503, // n0x1e80 c0x0000 (---------------) + I com + 0x0021fe03, // n0x1e81 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1e82 c0x0000 (---------------) + I org + 0x00233503, // n0x1e83 c0x0000 (---------------) + I com + 0x0023a783, // n0x1e84 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1e85 c0x0000 (---------------) + I gov + 0x00209003, // n0x1e86 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1e87 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1e88 c0x0000 (---------------) + I org + 0x0024bf84, // n0x1e89 c0x0000 (---------------) + I arts + 0x00200742, // n0x1e8a c0x0000 (---------------) + I co + 0x00233503, // n0x1e8b c0x0000 (---------------) + I com + 0x00328c03, // n0x1e8c c0x0000 (---------------) + I e12 + 0x0023a783, // n0x1e8d c0x0000 (---------------) + I edu + 0x0024d9c4, // n0x1e8e c0x0000 (---------------) + I firm + 0x00213183, // n0x1e8f c0x0000 (---------------) + I gob + 0x0026cc83, // n0x1e90 c0x0000 (---------------) + I gov + 0x003a1244, // n0x1e91 c0x0000 (---------------) + I info + 0x00201603, // n0x1e92 c0x0000 (---------------) + I int + 0x00209003, // n0x1e93 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1e94 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1e95 c0x0000 (---------------) + I org + 0x0022a5c3, // n0x1e96 c0x0000 (---------------) + I rec + 0x00391185, // n0x1e97 c0x0000 (---------------) + I store + 0x002d59c3, // n0x1e98 c0x0000 (---------------) + I tec + 0x00221a03, // n0x1e99 c0x0000 (---------------) + I web + 0x00200742, // n0x1e9a c0x0000 (---------------) + I co + 0x00233503, // n0x1e9b c0x0000 (---------------) + I com + 0x00309ac3, // n0x1e9c c0x0000 (---------------) + I k12 + 0x0021fe03, // n0x1e9d c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1e9e c0x0000 (---------------) + I org + 0x00201542, // n0x1e9f c0x0000 (---------------) + I ac + 0x00330b83, // n0x1ea0 c0x0000 (---------------) + I biz + 0x000ffa08, // n0x1ea1 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1ea2 c0x0000 (---------------) + I com + 0x0023a783, // n0x1ea3 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1ea4 c0x0000 (---------------) + I gov + 0x0036b386, // n0x1ea5 c0x0000 (---------------) + I health + 0x003a1244, // n0x1ea6 c0x0000 (---------------) + I info + 0x00201603, // n0x1ea7 c0x0000 (---------------) + I int + 0x00205284, // n0x1ea8 c0x0000 (---------------) + I name + 0x0021fe03, // n0x1ea9 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1eaa c0x0000 (---------------) + I org + 0x00220e43, // n0x1eab c0x0000 (---------------) + I pro + 0x00233503, // n0x1eac c0x0000 (---------------) + I com + 0x0023a783, // n0x1ead c0x0000 (---------------) + I edu + 0x0021fe03, // n0x1eae c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1eaf c0x0000 (---------------) + I org + 0x00233503, // n0x1eb0 c0x0000 (---------------) + I com + 0x00013886, // n0x1eb1 c0x0000 (---------------) + dyndns + 0x0023a783, // n0x1eb2 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1eb3 c0x0000 (---------------) + I gov + 0x000d0d86, // n0x1eb4 c0x0000 (---------------) + mypets + 0x0021fe03, // n0x1eb5 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1eb6 c0x0000 (---------------) + I org + 0x00309e08, // n0x1eb7 c0x0000 (---------------) + I xn--80au + 0x0030c549, // n0x1eb8 c0x0000 (---------------) + I xn--90azh + 0x00318cc9, // n0x1eb9 c0x0000 (---------------) + I xn--c1avg + 0x00329088, // n0x1eba c0x0000 (---------------) + I xn--d1at + 0x00374908, // n0x1ebb c0x0000 (---------------) + I xn--o1ac + 0x00374909, // n0x1ebc c0x0000 (---------------) + I xn--o1ach + 0x00201542, // n0x1ebd c0x0000 (---------------) + I ac + 0x00209c85, // n0x1ebe c0x0000 (---------------) + I agric + 0x0023adc3, // n0x1ebf c0x0000 (---------------) + I alt + 0x66600742, // n0x1ec0 c0x0199 (n0x1ece-n0x1ecf) + I co + 0x0023a783, // n0x1ec1 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1ec2 c0x0000 (---------------) + I gov + 0x0027d907, // n0x1ec3 c0x0000 (---------------) + I grondar + 0x00274483, // n0x1ec4 c0x0000 (---------------) + I law + 0x00209003, // n0x1ec5 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1ec6 c0x0000 (---------------) + I net + 0x00202d03, // n0x1ec7 c0x0000 (---------------) + I ngo + 0x00211803, // n0x1ec8 c0x0000 (---------------) + I nis + 0x00201483, // n0x1ec9 c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x1eca c0x0000 (---------------) + I org + 0x0023d0c6, // n0x1ecb c0x0000 (---------------) + I school + 0x00200142, // n0x1ecc c0x0000 (---------------) + I tm + 0x00221a03, // n0x1ecd c0x0000 (---------------) + I web + 0x000ffa08, // n0x1ece c0x0000 (---------------) + blogspot +} + +// children is the list of nodes' children, the parent's wildcard bit and the +// parent's node type. If a node has no children then their children index +// will be in the range [0, 6), depending on the wildcard bit and node type. +// +// The layout within the uint32, from MSB to LSB, is: +// [ 1 bits] unused +// [ 1 bits] wildcard bit +// [ 2 bits] node type +// [14 bits] high nodes index (exclusive) of children +// [14 bits] low nodes index (inclusive) of children +var children = [...]uint32{ + 0x00000000, // c0x0000 (---------------) + + 0x10000000, // c0x0001 (---------------) ! + 0x20000000, // c0x0002 (---------------) o + 0x40000000, // c0x0003 (---------------)* + + 0x50000000, // c0x0004 (---------------)* ! + 0x60000000, // c0x0005 (---------------)* o + 0x0183c609, // c0x0006 (n0x0609-n0x060f) + + 0x0184060f, // c0x0007 (n0x060f-n0x0610) + + 0x01860610, // c0x0008 (n0x0610-n0x0618) + + 0x019bc618, // c0x0009 (n0x0618-n0x066f) + + 0x019d066f, // c0x000a (n0x066f-n0x0674) + + 0x019e4674, // c0x000b (n0x0674-n0x0679) + + 0x019f4679, // c0x000c (n0x0679-n0x067d) + + 0x01a1067d, // c0x000d (n0x067d-n0x0684) + + 0x01a14684, // c0x000e (n0x0684-n0x0685) + + 0x01a2c685, // c0x000f (n0x0685-n0x068b) + + 0x01a5068b, // c0x0010 (n0x068b-n0x0694) + + 0x01a54694, // c0x0011 (n0x0694-n0x0695) + + 0x01a6c695, // c0x0012 (n0x0695-n0x069b) + + 0x01a7069b, // c0x0013 (n0x069b-n0x069c) + + 0x01a8c69c, // c0x0014 (n0x069c-n0x06a3) + + 0x01a906a3, // c0x0015 (n0x06a3-n0x06a4) + + 0x01ad86a4, // c0x0016 (n0x06a4-n0x06b6) + + 0x01adc6b6, // c0x0017 (n0x06b6-n0x06b7) + + 0x01afc6b7, // c0x0018 (n0x06b7-n0x06bf) + + 0x01b106bf, // c0x0019 (n0x06bf-n0x06c4) + + 0x01b146c4, // c0x001a (n0x06c4-n0x06c5) + + 0x01b446c5, // c0x001b (n0x06c5-n0x06d1) + + 0x01b706d1, // c0x001c (n0x06d1-n0x06dc) + + 0x01b986dc, // c0x001d (n0x06dc-n0x06e6) + + 0x01ba06e6, // c0x001e (n0x06e6-n0x06e8) + + 0x01ba46e8, // c0x001f (n0x06e8-n0x06e9) + + 0x01c386e9, // c0x0020 (n0x06e9-n0x070e) + + 0x01c4c70e, // c0x0021 (n0x070e-n0x0713) + + 0x01c60713, // c0x0022 (n0x0713-n0x0718) + + 0x01c80718, // c0x0023 (n0x0718-n0x0720) + + 0x01c90720, // c0x0024 (n0x0720-n0x0724) + + 0x01ca4724, // c0x0025 (n0x0724-n0x0729) + + 0x01cc8729, // c0x0026 (n0x0729-n0x0732) + + 0x01de0732, // c0x0027 (n0x0732-n0x0778) + + 0x01de4778, // c0x0028 (n0x0778-n0x0779) + + 0x01df8779, // c0x0029 (n0x0779-n0x077e) + + 0x01e0c77e, // c0x002a (n0x077e-n0x0783) + + 0x01e14783, // c0x002b (n0x0783-n0x0785) + + 0x01e24785, // c0x002c (n0x0785-n0x0789) + + 0x01e28789, // c0x002d (n0x0789-n0x078a) + + 0x01e4078a, // c0x002e (n0x078a-n0x0790) + + 0x01e84790, // c0x002f (n0x0790-n0x07a1) + + 0x01e947a1, // c0x0030 (n0x07a1-n0x07a5) + + 0x01e987a5, // c0x0031 (n0x07a5-n0x07a6) + + 0x01e9c7a6, // c0x0032 (n0x07a6-n0x07a7) + + 0x01ea07a7, // c0x0033 (n0x07a7-n0x07a8) + + 0x01edc7a8, // c0x0034 (n0x07a8-n0x07b7) + + 0x61ee07b7, // c0x0035 (n0x07b7-n0x07b8)* o + 0x01ef47b8, // c0x0036 (n0x07b8-n0x07bd) + + 0x01f047bd, // c0x0037 (n0x07bd-n0x07c1) + + 0x01fb87c1, // c0x0038 (n0x07c1-n0x07ee) + + 0x21fbc7ee, // c0x0039 (n0x07ee-n0x07ef) o + 0x01fc07ef, // c0x003a (n0x07ef-n0x07f0) + + 0x01fc47f0, // c0x003b (n0x07f0-n0x07f1) + + 0x21fc87f1, // c0x003c (n0x07f1-n0x07f2) o + 0x21fcc7f2, // c0x003d (n0x07f2-n0x07f3) o + 0x020007f3, // c0x003e (n0x07f3-n0x0800) + + 0x02004800, // c0x003f (n0x0800-n0x0801) + + 0x0235c801, // c0x0040 (n0x0801-n0x08d7) + + 0x223ac8d7, // c0x0041 (n0x08d7-n0x08eb) o + 0x223b08eb, // c0x0042 (n0x08eb-n0x08ec) o + 0x023d88ec, // c0x0043 (n0x08ec-n0x08f6) + + 0x023e08f6, // c0x0044 (n0x08f6-n0x08f8) + + 0x223e48f8, // c0x0045 (n0x08f8-n0x08f9) o + 0x223e88f9, // c0x0046 (n0x08f9-n0x08fa) o + 0x023f48fa, // c0x0047 (n0x08fa-n0x08fd) + + 0x223f88fd, // c0x0048 (n0x08fd-n0x08fe) o + 0x024148fe, // c0x0049 (n0x08fe-n0x0905) + + 0x0242c905, // c0x004a (n0x0905-n0x090b) + + 0x0243090b, // c0x004b (n0x090b-n0x090c) + + 0x0244090c, // c0x004c (n0x090c-n0x0910) + + 0x02448910, // c0x004d (n0x0910-n0x0912) + + 0x2247c912, // c0x004e (n0x0912-n0x091f) o + 0x0248091f, // c0x004f (n0x091f-n0x0920) + + 0x02488920, // c0x0050 (n0x0920-n0x0922) + + 0x024a8922, // c0x0051 (n0x0922-n0x092a) + + 0x024ac92a, // c0x0052 (n0x092a-n0x092b) + + 0x024c092b, // c0x0053 (n0x092b-n0x0930) + + 0x024e8930, // c0x0054 (n0x0930-n0x093a) + + 0x0250893a, // c0x0055 (n0x093a-n0x0942) + + 0x02538942, // c0x0056 (n0x0942-n0x094e) + + 0x0256094e, // c0x0057 (n0x094e-n0x0958) + + 0x02564958, // c0x0058 (n0x0958-n0x0959) + + 0x02588959, // c0x0059 (n0x0959-n0x0962) + + 0x0258c962, // c0x005a (n0x0962-n0x0963) + + 0x025a0963, // c0x005b (n0x0963-n0x0968) + + 0x025a4968, // c0x005c (n0x0968-n0x0969) + + 0x025c4969, // c0x005d (n0x0969-n0x0971) + + 0x025d0971, // c0x005e (n0x0971-n0x0974) + + 0x02630974, // c0x005f (n0x0974-n0x098c) + + 0x0264c98c, // c0x0060 (n0x098c-n0x0993) + + 0x02658993, // c0x0061 (n0x0993-n0x0996) + + 0x0266c996, // c0x0062 (n0x0996-n0x099b) + + 0x0268499b, // c0x0063 (n0x099b-n0x09a1) + + 0x026989a1, // c0x0064 (n0x09a1-n0x09a6) + + 0x026b09a6, // c0x0065 (n0x09a6-n0x09ac) + + 0x026c89ac, // c0x0066 (n0x09ac-n0x09b2) + + 0x026e09b2, // c0x0067 (n0x09b2-n0x09b8) + + 0x026fc9b8, // c0x0068 (n0x09b8-n0x09bf) + + 0x027149bf, // c0x0069 (n0x09bf-n0x09c5) + + 0x027749c5, // c0x006a (n0x09c5-n0x09dd) + + 0x0278c9dd, // c0x006b (n0x09dd-n0x09e3) + + 0x027a09e3, // c0x006c (n0x09e3-n0x09e8) + + 0x027e49e8, // c0x006d (n0x09e8-n0x09f9) + + 0x028649f9, // c0x006e (n0x09f9-n0x0a19) + + 0x02890a19, // c0x006f (n0x0a19-n0x0a24) + + 0x02894a24, // c0x0070 (n0x0a24-n0x0a25) + + 0x0289ca25, // c0x0071 (n0x0a25-n0x0a27) + + 0x028bca27, // c0x0072 (n0x0a27-n0x0a2f) + + 0x028c0a2f, // c0x0073 (n0x0a2f-n0x0a30) + + 0x028dca30, // c0x0074 (n0x0a30-n0x0a37) + + 0x028e4a37, // c0x0075 (n0x0a37-n0x0a39) + + 0x02918a39, // c0x0076 (n0x0a39-n0x0a46) + + 0x02940a46, // c0x0077 (n0x0a46-n0x0a50) + + 0x02944a50, // c0x0078 (n0x0a50-n0x0a51) + + 0x0295ca51, // c0x0079 (n0x0a51-n0x0a57) + + 0x02974a57, // c0x007a (n0x0a57-n0x0a5d) + + 0x02998a5d, // c0x007b (n0x0a5d-n0x0a66) + + 0x029b8a66, // c0x007c (n0x0a66-n0x0a6e) + + 0x02f7ca6e, // c0x007d (n0x0a6e-n0x0bdf) + + 0x02f88bdf, // c0x007e (n0x0bdf-n0x0be2) + + 0x02fa8be2, // c0x007f (n0x0be2-n0x0bea) + + 0x03164bea, // c0x0080 (n0x0bea-n0x0c59) + + 0x03234c59, // c0x0081 (n0x0c59-n0x0c8d) + + 0x032a4c8d, // c0x0082 (n0x0c8d-n0x0ca9) + + 0x032fcca9, // c0x0083 (n0x0ca9-n0x0cbf) + + 0x033e4cbf, // c0x0084 (n0x0cbf-n0x0cf9) + + 0x0343ccf9, // c0x0085 (n0x0cf9-n0x0d0f) + + 0x03478d0f, // c0x0086 (n0x0d0f-n0x0d1e) + + 0x03574d1e, // c0x0087 (n0x0d1e-n0x0d5d) + + 0x03640d5d, // c0x0088 (n0x0d5d-n0x0d90) + + 0x036d8d90, // c0x0089 (n0x0d90-n0x0db6) + + 0x03768db6, // c0x008a (n0x0db6-n0x0dda) + + 0x037ccdda, // c0x008b (n0x0dda-n0x0df3) + + 0x03a04df3, // c0x008c (n0x0df3-n0x0e81) + + 0x03abce81, // c0x008d (n0x0e81-n0x0eaf) + + 0x03b88eaf, // c0x008e (n0x0eaf-n0x0ee2) + + 0x03bd4ee2, // c0x008f (n0x0ee2-n0x0ef5) + + 0x03c5cef5, // c0x0090 (n0x0ef5-n0x0f17) + + 0x03c98f17, // c0x0091 (n0x0f17-n0x0f26) + + 0x03ce8f26, // c0x0092 (n0x0f26-n0x0f3a) + + 0x03d60f3a, // c0x0093 (n0x0f3a-n0x0f58) + + 0x63d64f58, // c0x0094 (n0x0f58-n0x0f59)* o + 0x63d68f59, // c0x0095 (n0x0f59-n0x0f5a)* o + 0x63d6cf5a, // c0x0096 (n0x0f5a-n0x0f5b)* o + 0x03de8f5b, // c0x0097 (n0x0f5b-n0x0f7a) + + 0x03e50f7a, // c0x0098 (n0x0f7a-n0x0f94) + + 0x03eccf94, // c0x0099 (n0x0f94-n0x0fb3) + + 0x03f44fb3, // c0x009a (n0x0fb3-n0x0fd1) + + 0x03fc8fd1, // c0x009b (n0x0fd1-n0x0ff2) + + 0x04034ff2, // c0x009c (n0x0ff2-n0x100d) + + 0x0416100d, // c0x009d (n0x100d-n0x1058) + + 0x041b9058, // c0x009e (n0x1058-n0x106e) + + 0x641bd06e, // c0x009f (n0x106e-n0x106f)* o + 0x0425506f, // c0x00a0 (n0x106f-n0x1095) + + 0x042dd095, // c0x00a1 (n0x1095-n0x10b7) + + 0x043290b7, // c0x00a2 (n0x10b7-n0x10ca) + + 0x043910ca, // c0x00a3 (n0x10ca-n0x10e4) + + 0x044390e4, // c0x00a4 (n0x10e4-n0x110e) + + 0x0450110e, // c0x00a5 (n0x110e-n0x1140) + + 0x04569140, // c0x00a6 (n0x1140-n0x115a) + + 0x0467d15a, // c0x00a7 (n0x115a-n0x119f) + + 0x6468119f, // c0x00a8 (n0x119f-n0x11a0)* o + 0x646851a0, // c0x00a9 (n0x11a0-n0x11a1)* o + 0x046e11a1, // c0x00aa (n0x11a1-n0x11b8) + + 0x0473d1b8, // c0x00ab (n0x11b8-n0x11cf) + + 0x047cd1cf, // c0x00ac (n0x11cf-n0x11f3) + + 0x048491f3, // c0x00ad (n0x11f3-n0x1212) + + 0x0488d212, // c0x00ae (n0x1212-n0x1223) + + 0x04971223, // c0x00af (n0x1223-n0x125c) + + 0x049a525c, // c0x00b0 (n0x125c-n0x1269) + + 0x04a05269, // c0x00b1 (n0x1269-n0x1281) + + 0x04a79281, // c0x00b2 (n0x1281-n0x129e) + + 0x04b0129e, // c0x00b3 (n0x129e-n0x12c0) + + 0x04b412c0, // c0x00b4 (n0x12c0-n0x12d0) + + 0x04bb12d0, // c0x00b5 (n0x12d0-n0x12ec) + + 0x64bb52ec, // c0x00b6 (n0x12ec-n0x12ed)* o + 0x64bb92ed, // c0x00b7 (n0x12ed-n0x12ee)* o + 0x24bbd2ee, // c0x00b8 (n0x12ee-n0x12ef) o + 0x04bd52ef, // c0x00b9 (n0x12ef-n0x12f5) + + 0x04bf12f5, // c0x00ba (n0x12f5-n0x12fc) + + 0x04c352fc, // c0x00bb (n0x12fc-n0x130d) + + 0x04c4530d, // c0x00bc (n0x130d-n0x1311) + + 0x04c5d311, // c0x00bd (n0x1311-n0x1317) + + 0x04cd5317, // c0x00be (n0x1317-n0x1335) + + 0x04ce9335, // c0x00bf (n0x1335-n0x133a) + + 0x04d0133a, // c0x00c0 (n0x133a-n0x1340) + + 0x04d25340, // c0x00c1 (n0x1340-n0x1349) + + 0x04d39349, // c0x00c2 (n0x1349-n0x134e) + + 0x04d5134e, // c0x00c3 (n0x134e-n0x1354) + + 0x04d55354, // c0x00c4 (n0x1354-n0x1355) + + 0x04d91355, // c0x00c5 (n0x1355-n0x1364) + + 0x04da5364, // c0x00c6 (n0x1364-n0x1369) + + 0x04dad369, // c0x00c7 (n0x1369-n0x136b) + + 0x04db536b, // c0x00c8 (n0x136b-n0x136d) + + 0x04db936d, // c0x00c9 (n0x136d-n0x136e) + + 0x04ddd36e, // c0x00ca (n0x136e-n0x1377) + + 0x04e01377, // c0x00cb (n0x1377-n0x1380) + + 0x04e19380, // c0x00cc (n0x1380-n0x1386) + + 0x04e21386, // c0x00cd (n0x1386-n0x1388) + + 0x04e25388, // c0x00ce (n0x1388-n0x1389) + + 0x04e59389, // c0x00cf (n0x1389-n0x1396) + + 0x04e7d396, // c0x00d0 (n0x1396-n0x139f) + + 0x04e9d39f, // c0x00d1 (n0x139f-n0x13a7) + + 0x04eb93a7, // c0x00d2 (n0x13a7-n0x13ae) + + 0x04ec93ae, // c0x00d3 (n0x13ae-n0x13b2) + + 0x04edd3b2, // c0x00d4 (n0x13b2-n0x13b7) + + 0x04ee13b7, // c0x00d5 (n0x13b7-n0x13b8) + + 0x04ee93b8, // c0x00d6 (n0x13b8-n0x13ba) + + 0x04efd3ba, // c0x00d7 (n0x13ba-n0x13bf) + + 0x04f0d3bf, // c0x00d8 (n0x13bf-n0x13c3) + + 0x04f113c3, // c0x00d9 (n0x13c3-n0x13c4) + + 0x04f2d3c4, // c0x00da (n0x13c4-n0x13cb) + + 0x057bd3cb, // c0x00db (n0x13cb-n0x15ef) + + 0x057f55ef, // c0x00dc (n0x15ef-n0x15fd) + + 0x058215fd, // c0x00dd (n0x15fd-n0x1608) + + 0x05839608, // c0x00de (n0x1608-n0x160e) + + 0x0585960e, // c0x00df (n0x160e-n0x1616) + + 0x6585d616, // c0x00e0 (n0x1616-n0x1617)* o + 0x058a1617, // c0x00e1 (n0x1617-n0x1628) + + 0x058a9628, // c0x00e2 (n0x1628-n0x162a) + + 0x258ad62a, // c0x00e3 (n0x162a-n0x162b) o + 0x258b162b, // c0x00e4 (n0x162b-n0x162c) o + 0x058b562c, // c0x00e5 (n0x162c-n0x162d) + + 0x0598d62d, // c0x00e6 (n0x162d-n0x1663) + + 0x25991663, // c0x00e7 (n0x1663-n0x1664) o + 0x25999664, // c0x00e8 (n0x1664-n0x1666) o + 0x259a1666, // c0x00e9 (n0x1666-n0x1668) o + 0x259ad668, // c0x00ea (n0x1668-n0x166b) o + 0x059d566b, // c0x00eb (n0x166b-n0x1675) + + 0x059fd675, // c0x00ec (n0x1675-n0x167f) + + 0x05a0167f, // c0x00ed (n0x167f-n0x1680) + + 0x25a39680, // c0x00ee (n0x1680-n0x168e) o + 0x05a4568e, // c0x00ef (n0x168e-n0x1691) + + 0x0659d691, // c0x00f0 (n0x1691-n0x1967) + + 0x065a1967, // c0x00f1 (n0x1967-n0x1968) + + 0x065a5968, // c0x00f2 (n0x1968-n0x1969) + + 0x265a9969, // c0x00f3 (n0x1969-n0x196a) o + 0x065ad96a, // c0x00f4 (n0x196a-n0x196b) + + 0x265b196b, // c0x00f5 (n0x196b-n0x196c) o + 0x065b596c, // c0x00f6 (n0x196c-n0x196d) + + 0x265c196d, // c0x00f7 (n0x196d-n0x1970) o + 0x065c5970, // c0x00f8 (n0x1970-n0x1971) + + 0x065c9971, // c0x00f9 (n0x1971-n0x1972) + + 0x265cd972, // c0x00fa (n0x1972-n0x1973) o + 0x065d1973, // c0x00fb (n0x1973-n0x1974) + + 0x265d9974, // c0x00fc (n0x1974-n0x1976) o + 0x065dd976, // c0x00fd (n0x1976-n0x1977) + + 0x065e1977, // c0x00fe (n0x1977-n0x1978) + + 0x265f1978, // c0x00ff (n0x1978-n0x197c) o + 0x065f597c, // c0x0100 (n0x197c-n0x197d) + + 0x065f997d, // c0x0101 (n0x197d-n0x197e) + + 0x065fd97e, // c0x0102 (n0x197e-n0x197f) + + 0x0660197f, // c0x0103 (n0x197f-n0x1980) + + 0x26605980, // c0x0104 (n0x1980-n0x1981) o + 0x06609981, // c0x0105 (n0x1981-n0x1982) + + 0x0660d982, // c0x0106 (n0x1982-n0x1983) + + 0x06611983, // c0x0107 (n0x1983-n0x1984) + + 0x06615984, // c0x0108 (n0x1984-n0x1985) + + 0x2661d985, // c0x0109 (n0x1985-n0x1987) o + 0x06621987, // c0x010a (n0x1987-n0x1988) + + 0x06625988, // c0x010b (n0x1988-n0x1989) + + 0x06629989, // c0x010c (n0x1989-n0x198a) + + 0x2662d98a, // c0x010d (n0x198a-n0x198b) o + 0x0663198b, // c0x010e (n0x198b-n0x198c) + + 0x2663998c, // c0x010f (n0x198c-n0x198e) o + 0x2663d98e, // c0x0110 (n0x198e-n0x198f) o + 0x0665998f, // c0x0111 (n0x198f-n0x1996) + + 0x06665996, // c0x0112 (n0x1996-n0x1999) + + 0x066a5999, // c0x0113 (n0x1999-n0x19a9) + + 0x066a99a9, // c0x0114 (n0x19a9-n0x19aa) + + 0x066cd9aa, // c0x0115 (n0x19aa-n0x19b3) + + 0x067c19b3, // c0x0116 (n0x19b3-n0x19f0) + + 0x267c99f0, // c0x0117 (n0x19f0-n0x19f2) o + 0x267cd9f2, // c0x0118 (n0x19f2-n0x19f3) o + 0x267d19f3, // c0x0119 (n0x19f3-n0x19f4) o + 0x067d99f4, // c0x011a (n0x19f4-n0x19f6) + + 0x068b59f6, // c0x011b (n0x19f6-n0x1a2d) + + 0x068e1a2d, // c0x011c (n0x1a2d-n0x1a38) + + 0x06901a38, // c0x011d (n0x1a38-n0x1a40) + + 0x0690da40, // c0x011e (n0x1a40-n0x1a43) + + 0x0692da43, // c0x011f (n0x1a43-n0x1a4b) + + 0x06965a4b, // c0x0120 (n0x1a4b-n0x1a59) + + 0x06bf9a59, // c0x0121 (n0x1a59-n0x1afe) + + 0x06cb5afe, // c0x0122 (n0x1afe-n0x1b2d) + + 0x06cc9b2d, // c0x0123 (n0x1b2d-n0x1b32) + + 0x06cfdb32, // c0x0124 (n0x1b32-n0x1b3f) + + 0x06d29b3f, // c0x0125 (n0x1b3f-n0x1b4a) + + 0x06d45b4a, // c0x0126 (n0x1b4a-n0x1b51) + + 0x06d69b51, // c0x0127 (n0x1b51-n0x1b5a) + + 0x06d81b5a, // c0x0128 (n0x1b5a-n0x1b60) + + 0x06d9db60, // c0x0129 (n0x1b60-n0x1b67) + + 0x06dc1b67, // c0x012a (n0x1b67-n0x1b70) + + 0x06dd1b70, // c0x012b (n0x1b70-n0x1b74) + + 0x06e01b74, // c0x012c (n0x1b74-n0x1b80) + + 0x06e1db80, // c0x012d (n0x1b80-n0x1b87) + + 0x07029b87, // c0x012e (n0x1b87-n0x1c0a) + + 0x0704dc0a, // c0x012f (n0x1c0a-n0x1c13) + + 0x0706dc13, // c0x0130 (n0x1c13-n0x1c1b) + + 0x07081c1b, // c0x0131 (n0x1c1b-n0x1c20) + + 0x07095c20, // c0x0132 (n0x1c20-n0x1c25) + + 0x070b5c25, // c0x0133 (n0x1c25-n0x1c2d) + + 0x07159c2d, // c0x0134 (n0x1c2d-n0x1c56) + + 0x07175c56, // c0x0135 (n0x1c56-n0x1c5d) + + 0x07191c5d, // c0x0136 (n0x1c5d-n0x1c64) + + 0x07195c64, // c0x0137 (n0x1c64-n0x1c65) + + 0x07199c65, // c0x0138 (n0x1c65-n0x1c66) + + 0x071adc66, // c0x0139 (n0x1c66-n0x1c6b) + + 0x071cdc6b, // c0x013a (n0x1c6b-n0x1c73) + + 0x071d9c73, // c0x013b (n0x1c73-n0x1c76) + + 0x07209c76, // c0x013c (n0x1c76-n0x1c82) + + 0x07289c82, // c0x013d (n0x1c82-n0x1ca2) + + 0x0729dca2, // c0x013e (n0x1ca2-n0x1ca7) + + 0x072a1ca7, // c0x013f (n0x1ca7-n0x1ca8) + + 0x072b9ca8, // c0x0140 (n0x1ca8-n0x1cae) + + 0x072c5cae, // c0x0141 (n0x1cae-n0x1cb1) + + 0x072c9cb1, // c0x0142 (n0x1cb1-n0x1cb2) + + 0x072e5cb2, // c0x0143 (n0x1cb2-n0x1cb9) + + 0x07321cb9, // c0x0144 (n0x1cb9-n0x1cc8) + + 0x07325cc8, // c0x0145 (n0x1cc8-n0x1cc9) + + 0x07345cc9, // c0x0146 (n0x1cc9-n0x1cd1) + + 0x07395cd1, // c0x0147 (n0x1cd1-n0x1ce5) + + 0x073adce5, // c0x0148 (n0x1ce5-n0x1ceb) + + 0x07401ceb, // c0x0149 (n0x1ceb-n0x1d00) + + 0x07405d00, // c0x014a (n0x1d00-n0x1d01) + + 0x07409d01, // c0x014b (n0x1d01-n0x1d02) + + 0x0744dd02, // c0x014c (n0x1d02-n0x1d13) + + 0x0745dd13, // c0x014d (n0x1d13-n0x1d17) + + 0x07495d17, // c0x014e (n0x1d17-n0x1d25) + + 0x074c5d25, // c0x014f (n0x1d25-n0x1d31) + + 0x07601d31, // c0x0150 (n0x1d31-n0x1d80) + + 0x07625d80, // c0x0151 (n0x1d80-n0x1d89) + + 0x07651d89, // c0x0152 (n0x1d89-n0x1d94) + + 0x07655d94, // c0x0153 (n0x1d94-n0x1d95) + + 0x07659d95, // c0x0154 (n0x1d95-n0x1d96) + + 0x07755d96, // c0x0155 (n0x1d96-n0x1dd5) + + 0x07761dd5, // c0x0156 (n0x1dd5-n0x1dd8) + + 0x0776ddd8, // c0x0157 (n0x1dd8-n0x1ddb) + + 0x07779ddb, // c0x0158 (n0x1ddb-n0x1dde) + + 0x07785dde, // c0x0159 (n0x1dde-n0x1de1) + + 0x07791de1, // c0x015a (n0x1de1-n0x1de4) + + 0x0779dde4, // c0x015b (n0x1de4-n0x1de7) + + 0x077a9de7, // c0x015c (n0x1de7-n0x1dea) + + 0x077b5dea, // c0x015d (n0x1dea-n0x1ded) + + 0x077c1ded, // c0x015e (n0x1ded-n0x1df0) + + 0x077cddf0, // c0x015f (n0x1df0-n0x1df3) + + 0x077d9df3, // c0x0160 (n0x1df3-n0x1df6) + + 0x077e5df6, // c0x0161 (n0x1df6-n0x1df9) + + 0x077f1df9, // c0x0162 (n0x1df9-n0x1dfc) + + 0x077f9dfc, // c0x0163 (n0x1dfc-n0x1dfe) + + 0x07805dfe, // c0x0164 (n0x1dfe-n0x1e01) + + 0x07811e01, // c0x0165 (n0x1e01-n0x1e04) + + 0x0781de04, // c0x0166 (n0x1e04-n0x1e07) + + 0x07829e07, // c0x0167 (n0x1e07-n0x1e0a) + + 0x07835e0a, // c0x0168 (n0x1e0a-n0x1e0d) + + 0x07841e0d, // c0x0169 (n0x1e0d-n0x1e10) + + 0x0784de10, // c0x016a (n0x1e10-n0x1e13) + + 0x07859e13, // c0x016b (n0x1e13-n0x1e16) + + 0x07865e16, // c0x016c (n0x1e16-n0x1e19) + + 0x07871e19, // c0x016d (n0x1e19-n0x1e1c) + + 0x0787de1c, // c0x016e (n0x1e1c-n0x1e1f) + + 0x07889e1f, // c0x016f (n0x1e1f-n0x1e22) + + 0x07895e22, // c0x0170 (n0x1e22-n0x1e25) + + 0x078a1e25, // c0x0171 (n0x1e25-n0x1e28) + + 0x078ade28, // c0x0172 (n0x1e28-n0x1e2b) + + 0x078b9e2b, // c0x0173 (n0x1e2b-n0x1e2e) + + 0x078c5e2e, // c0x0174 (n0x1e2e-n0x1e31) + + 0x078cde31, // c0x0175 (n0x1e31-n0x1e33) + + 0x078d9e33, // c0x0176 (n0x1e33-n0x1e36) + + 0x078e5e36, // c0x0177 (n0x1e36-n0x1e39) + + 0x078f1e39, // c0x0178 (n0x1e39-n0x1e3c) + + 0x078fde3c, // c0x0179 (n0x1e3c-n0x1e3f) + + 0x07909e3f, // c0x017a (n0x1e3f-n0x1e42) + + 0x07915e42, // c0x017b (n0x1e42-n0x1e45) + + 0x07921e45, // c0x017c (n0x1e45-n0x1e48) + + 0x0792de48, // c0x017d (n0x1e48-n0x1e4b) + + 0x07939e4b, // c0x017e (n0x1e4b-n0x1e4e) + + 0x07945e4e, // c0x017f (n0x1e4e-n0x1e51) + + 0x07951e51, // c0x0180 (n0x1e51-n0x1e54) + + 0x0795de54, // c0x0181 (n0x1e54-n0x1e57) + + 0x07969e57, // c0x0182 (n0x1e57-n0x1e5a) + + 0x07971e5a, // c0x0183 (n0x1e5a-n0x1e5c) + + 0x0797de5c, // c0x0184 (n0x1e5c-n0x1e5f) + + 0x07989e5f, // c0x0185 (n0x1e5f-n0x1e62) + + 0x07995e62, // c0x0186 (n0x1e62-n0x1e65) + + 0x079a1e65, // c0x0187 (n0x1e65-n0x1e68) + + 0x079ade68, // c0x0188 (n0x1e68-n0x1e6b) + + 0x079b9e6b, // c0x0189 (n0x1e6b-n0x1e6e) + + 0x079c5e6e, // c0x018a (n0x1e6e-n0x1e71) + + 0x079d1e71, // c0x018b (n0x1e71-n0x1e74) + + 0x079d5e74, // c0x018c (n0x1e74-n0x1e75) + + 0x079e1e75, // c0x018d (n0x1e75-n0x1e78) + + 0x079f9e78, // c0x018e (n0x1e78-n0x1e7e) + + 0x079fde7e, // c0x018f (n0x1e7e-n0x1e7f) + + 0x07a0de7f, // c0x0190 (n0x1e7f-n0x1e83) + + 0x07a25e83, // c0x0191 (n0x1e83-n0x1e89) + + 0x07a69e89, // c0x0192 (n0x1e89-n0x1e9a) + + 0x07a7de9a, // c0x0193 (n0x1e9a-n0x1e9f) + + 0x07ab1e9f, // c0x0194 (n0x1e9f-n0x1eac) + + 0x07ac1eac, // c0x0195 (n0x1eac-n0x1eb0) + + 0x07addeb0, // c0x0196 (n0x1eb0-n0x1eb7) + + 0x07af5eb7, // c0x0197 (n0x1eb7-n0x1ebd) + + 0x27b39ebd, // c0x0198 (n0x1ebd-n0x1ece) o + 0x07b3dece, // c0x0199 (n0x1ece-n0x1ecf) + +} + +// max children 409 (capacity 511) +// max text offset 27059 (capacity 32767) +// max text length 36 (capacity 63) +// max hi 7887 (capacity 16383) +// max lo 7886 (capacity 16383) diff --git a/vendor/golang.org/x/net/publicsuffix/table_test.go b/vendor/golang.org/x/net/publicsuffix/table_test.go new file mode 100644 index 00000000..5b635b97 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/table_test.go @@ -0,0 +1,15751 @@ +// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +var rules = [...]string{ + "ac", + "com.ac", + "edu.ac", + "gov.ac", + "net.ac", + "mil.ac", + "org.ac", + "ad", + "nom.ad", + "ae", + "co.ae", + "net.ae", + "org.ae", + "sch.ae", + "ac.ae", + "gov.ae", + "mil.ae", + "aero", + "accident-investigation.aero", + "accident-prevention.aero", + "aerobatic.aero", + "aeroclub.aero", + "aerodrome.aero", + "agents.aero", + "aircraft.aero", + "airline.aero", + "airport.aero", + "air-surveillance.aero", + "airtraffic.aero", + "air-traffic-control.aero", + "ambulance.aero", + "amusement.aero", + "association.aero", + "author.aero", + "ballooning.aero", + "broker.aero", + "caa.aero", + "cargo.aero", + "catering.aero", + "certification.aero", + "championship.aero", + "charter.aero", + "civilaviation.aero", + "club.aero", + "conference.aero", + "consultant.aero", + "consulting.aero", + "control.aero", + "council.aero", + "crew.aero", + "design.aero", + "dgca.aero", + "educator.aero", + "emergency.aero", + "engine.aero", + "engineer.aero", + "entertainment.aero", + "equipment.aero", + "exchange.aero", + "express.aero", + "federation.aero", + "flight.aero", + "freight.aero", + "fuel.aero", + "gliding.aero", + "government.aero", + "groundhandling.aero", + "group.aero", + "hanggliding.aero", + "homebuilt.aero", + "insurance.aero", + "journal.aero", + "journalist.aero", + "leasing.aero", + "logistics.aero", + "magazine.aero", + "maintenance.aero", + "media.aero", + "microlight.aero", + "modelling.aero", + "navigation.aero", + "parachuting.aero", + "paragliding.aero", + "passenger-association.aero", + "pilot.aero", + "press.aero", + "production.aero", + "recreation.aero", + "repbody.aero", + "res.aero", + "research.aero", + "rotorcraft.aero", + "safety.aero", + "scientist.aero", + "services.aero", + "show.aero", + "skydiving.aero", + "software.aero", + "student.aero", + "trader.aero", + "trading.aero", + "trainer.aero", + "union.aero", + "workinggroup.aero", + "works.aero", + "af", + "gov.af", + "com.af", + "org.af", + "net.af", + "edu.af", + "ag", + "com.ag", + "org.ag", + "net.ag", + "co.ag", + "nom.ag", + "ai", + "off.ai", + "com.ai", + "net.ai", + "org.ai", + "al", + "com.al", + "edu.al", + "gov.al", + "mil.al", + "net.al", + "org.al", + "am", + "ao", + "ed.ao", + "gv.ao", + "og.ao", + "co.ao", + "pb.ao", + "it.ao", + "aq", + "ar", + "com.ar", + "edu.ar", + "gob.ar", + "gov.ar", + "int.ar", + "mil.ar", + "net.ar", + "org.ar", + "tur.ar", + "arpa", + "e164.arpa", + "in-addr.arpa", + "ip6.arpa", + "iris.arpa", + "uri.arpa", + "urn.arpa", + "as", + "gov.as", + "asia", + "at", + "ac.at", + "co.at", + "gv.at", + "or.at", + "au", + "com.au", + "net.au", + "org.au", + "edu.au", + "gov.au", + "asn.au", + "id.au", + "info.au", + "conf.au", + "oz.au", + "act.au", + "nsw.au", + "nt.au", + "qld.au", + "sa.au", + "tas.au", + "vic.au", + "wa.au", + "act.edu.au", + "nsw.edu.au", + "nt.edu.au", + "qld.edu.au", + "sa.edu.au", + "tas.edu.au", + "vic.edu.au", + "wa.edu.au", + "qld.gov.au", + "sa.gov.au", + "tas.gov.au", + "vic.gov.au", + "wa.gov.au", + "aw", + "com.aw", + "ax", + "az", + "com.az", + "net.az", + "int.az", + "gov.az", + "org.az", + "edu.az", + "info.az", + "pp.az", + "mil.az", + "name.az", + "pro.az", + "biz.az", + "ba", + "org.ba", + "net.ba", + "edu.ba", + "gov.ba", + "mil.ba", + "unsa.ba", + "unbi.ba", + "co.ba", + "com.ba", + "rs.ba", + "bb", + "biz.bb", + "co.bb", + "com.bb", + "edu.bb", + "gov.bb", + "info.bb", + "net.bb", + "org.bb", + "store.bb", + "tv.bb", + "*.bd", + "be", + "ac.be", + "bf", + "gov.bf", + "bg", + "a.bg", + "b.bg", + "c.bg", + "d.bg", + "e.bg", + "f.bg", + "g.bg", + "h.bg", + "i.bg", + "j.bg", + "k.bg", + "l.bg", + "m.bg", + "n.bg", + "o.bg", + "p.bg", + "q.bg", + "r.bg", + "s.bg", + "t.bg", + "u.bg", + "v.bg", + "w.bg", + "x.bg", + "y.bg", + "z.bg", + "0.bg", + "1.bg", + "2.bg", + "3.bg", + "4.bg", + "5.bg", + "6.bg", + "7.bg", + "8.bg", + "9.bg", + "bh", + "com.bh", + "edu.bh", + "net.bh", + "org.bh", + "gov.bh", + "bi", + "co.bi", + "com.bi", + "edu.bi", + "or.bi", + "org.bi", + "biz", + "bj", + "asso.bj", + "barreau.bj", + "gouv.bj", + "bm", + "com.bm", + "edu.bm", + "gov.bm", + "net.bm", + "org.bm", + "*.bn", + "bo", + "com.bo", + "edu.bo", + "gov.bo", + "gob.bo", + "int.bo", + "org.bo", + "net.bo", + "mil.bo", + "tv.bo", + "br", + "adm.br", + "adv.br", + "agr.br", + "am.br", + "arq.br", + "art.br", + "ato.br", + "b.br", + "bio.br", + "blog.br", + "bmd.br", + "cim.br", + "cng.br", + "cnt.br", + "com.br", + "coop.br", + "ecn.br", + "eco.br", + "edu.br", + "emp.br", + "eng.br", + "esp.br", + "etc.br", + "eti.br", + "far.br", + "flog.br", + "fm.br", + "fnd.br", + "fot.br", + "fst.br", + "g12.br", + "ggf.br", + "gov.br", + "imb.br", + "ind.br", + "inf.br", + "jor.br", + "jus.br", + "leg.br", + "lel.br", + "mat.br", + "med.br", + "mil.br", + "mp.br", + "mus.br", + "net.br", + "*.nom.br", + "not.br", + "ntr.br", + "odo.br", + "org.br", + "ppg.br", + "pro.br", + "psc.br", + "psi.br", + "qsl.br", + "radio.br", + "rec.br", + "slg.br", + "srv.br", + "taxi.br", + "teo.br", + "tmp.br", + "trd.br", + "tur.br", + "tv.br", + "vet.br", + "vlog.br", + "wiki.br", + "zlg.br", + "bs", + "com.bs", + "net.bs", + "org.bs", + "edu.bs", + "gov.bs", + "bt", + "com.bt", + "edu.bt", + "gov.bt", + "net.bt", + "org.bt", + "bv", + "bw", + "co.bw", + "org.bw", + "by", + "gov.by", + "mil.by", + "com.by", + "of.by", + "bz", + "com.bz", + "net.bz", + "org.bz", + "edu.bz", + "gov.bz", + "ca", + "ab.ca", + "bc.ca", + "mb.ca", + "nb.ca", + "nf.ca", + "nl.ca", + "ns.ca", + "nt.ca", + "nu.ca", + "on.ca", + "pe.ca", + "qc.ca", + "sk.ca", + "yk.ca", + "gc.ca", + "cat", + "cc", + "cd", + "gov.cd", + "cf", + "cg", + "ch", + "ci", + "org.ci", + "or.ci", + "com.ci", + "co.ci", + "edu.ci", + "ed.ci", + "ac.ci", + "net.ci", + "go.ci", + "asso.ci", + "xn--aroport-bya.ci", + "int.ci", + "presse.ci", + "md.ci", + "gouv.ci", + "*.ck", + "!www.ck", + "cl", + "gov.cl", + "gob.cl", + "co.cl", + "mil.cl", + "cm", + "co.cm", + "com.cm", + "gov.cm", + "net.cm", + "cn", + "ac.cn", + "com.cn", + "edu.cn", + "gov.cn", + "net.cn", + "org.cn", + "mil.cn", + "xn--55qx5d.cn", + "xn--io0a7i.cn", + "xn--od0alg.cn", + "ah.cn", + "bj.cn", + "cq.cn", + "fj.cn", + "gd.cn", + "gs.cn", + "gz.cn", + "gx.cn", + "ha.cn", + "hb.cn", + "he.cn", + "hi.cn", + "hl.cn", + "hn.cn", + "jl.cn", + "js.cn", + "jx.cn", + "ln.cn", + "nm.cn", + "nx.cn", + "qh.cn", + "sc.cn", + "sd.cn", + "sh.cn", + "sn.cn", + "sx.cn", + "tj.cn", + "xj.cn", + "xz.cn", + "yn.cn", + "zj.cn", + "hk.cn", + "mo.cn", + "tw.cn", + "co", + "arts.co", + "com.co", + "edu.co", + "firm.co", + "gov.co", + "info.co", + "int.co", + "mil.co", + "net.co", + "nom.co", + "org.co", + "rec.co", + "web.co", + "com", + "coop", + "cr", + "ac.cr", + "co.cr", + "ed.cr", + "fi.cr", + "go.cr", + "or.cr", + "sa.cr", + "cu", + "com.cu", + "edu.cu", + "org.cu", + "net.cu", + "gov.cu", + "inf.cu", + "cv", + "cw", + "com.cw", + "edu.cw", + "net.cw", + "org.cw", + "cx", + "gov.cx", + "ac.cy", + "biz.cy", + "com.cy", + "ekloges.cy", + "gov.cy", + "ltd.cy", + "name.cy", + "net.cy", + "org.cy", + "parliament.cy", + "press.cy", + "pro.cy", + "tm.cy", + "cz", + "de", + "dj", + "dk", + "dm", + "com.dm", + "net.dm", + "org.dm", + "edu.dm", + "gov.dm", + "do", + "art.do", + "com.do", + "edu.do", + "gob.do", + "gov.do", + "mil.do", + "net.do", + "org.do", + "sld.do", + "web.do", + "dz", + "com.dz", + "org.dz", + "net.dz", + "gov.dz", + "edu.dz", + "asso.dz", + "pol.dz", + "art.dz", + "ec", + "com.ec", + "info.ec", + "net.ec", + "fin.ec", + "k12.ec", + "med.ec", + "pro.ec", + "org.ec", + "edu.ec", + "gov.ec", + "gob.ec", + "mil.ec", + "edu", + "ee", + "edu.ee", + "gov.ee", + "riik.ee", + "lib.ee", + "med.ee", + "com.ee", + "pri.ee", + "aip.ee", + "org.ee", + "fie.ee", + "eg", + "com.eg", + "edu.eg", + "eun.eg", + "gov.eg", + "mil.eg", + "name.eg", + "net.eg", + "org.eg", + "sci.eg", + "*.er", + "es", + "com.es", + "nom.es", + "org.es", + "gob.es", + "edu.es", + "et", + "com.et", + "gov.et", + "org.et", + "edu.et", + "biz.et", + "name.et", + "info.et", + "net.et", + "eu", + "fi", + "aland.fi", + "*.fj", + "*.fk", + "fm", + "fo", + "fr", + "com.fr", + "asso.fr", + "nom.fr", + "prd.fr", + "presse.fr", + "tm.fr", + "aeroport.fr", + "assedic.fr", + "avocat.fr", + "avoues.fr", + "cci.fr", + "chambagri.fr", + "chirurgiens-dentistes.fr", + "experts-comptables.fr", + "geometre-expert.fr", + "gouv.fr", + "greta.fr", + "huissier-justice.fr", + "medecin.fr", + "notaires.fr", + "pharmacien.fr", + "port.fr", + "veterinaire.fr", + "ga", + "gb", + "gd", + "ge", + "com.ge", + "edu.ge", + "gov.ge", + "org.ge", + "mil.ge", + "net.ge", + "pvt.ge", + "gf", + "gg", + "co.gg", + "net.gg", + "org.gg", + "gh", + "com.gh", + "edu.gh", + "gov.gh", + "org.gh", + "mil.gh", + "gi", + "com.gi", + "ltd.gi", + "gov.gi", + "mod.gi", + "edu.gi", + "org.gi", + "gl", + "co.gl", + "com.gl", + "edu.gl", + "net.gl", + "org.gl", + "gm", + "gn", + "ac.gn", + "com.gn", + "edu.gn", + "gov.gn", + "org.gn", + "net.gn", + "gov", + "gp", + "com.gp", + "net.gp", + "mobi.gp", + "edu.gp", + "org.gp", + "asso.gp", + "gq", + "gr", + "com.gr", + "edu.gr", + "net.gr", + "org.gr", + "gov.gr", + "gs", + "gt", + "com.gt", + "edu.gt", + "gob.gt", + "ind.gt", + "mil.gt", + "net.gt", + "org.gt", + "*.gu", + "gw", + "gy", + "co.gy", + "com.gy", + "edu.gy", + "gov.gy", + "net.gy", + "org.gy", + "hk", + "com.hk", + "edu.hk", + "gov.hk", + "idv.hk", + "net.hk", + "org.hk", + "xn--55qx5d.hk", + "xn--wcvs22d.hk", + "xn--lcvr32d.hk", + "xn--mxtq1m.hk", + "xn--gmqw5a.hk", + "xn--ciqpn.hk", + "xn--gmq050i.hk", + "xn--zf0avx.hk", + "xn--io0a7i.hk", + "xn--mk0axi.hk", + "xn--od0alg.hk", + "xn--od0aq3b.hk", + "xn--tn0ag.hk", + "xn--uc0atv.hk", + "xn--uc0ay4a.hk", + "hm", + "hn", + "com.hn", + "edu.hn", + "org.hn", + "net.hn", + "mil.hn", + "gob.hn", + "hr", + "iz.hr", + "from.hr", + "name.hr", + "com.hr", + "ht", + "com.ht", + "shop.ht", + "firm.ht", + "info.ht", + "adult.ht", + "net.ht", + "pro.ht", + "org.ht", + "med.ht", + "art.ht", + "coop.ht", + "pol.ht", + "asso.ht", + "edu.ht", + "rel.ht", + "gouv.ht", + "perso.ht", + "hu", + "co.hu", + "info.hu", + "org.hu", + "priv.hu", + "sport.hu", + "tm.hu", + "2000.hu", + "agrar.hu", + "bolt.hu", + "casino.hu", + "city.hu", + "erotica.hu", + "erotika.hu", + "film.hu", + "forum.hu", + "games.hu", + "hotel.hu", + "ingatlan.hu", + "jogasz.hu", + "konyvelo.hu", + "lakas.hu", + "media.hu", + "news.hu", + "reklam.hu", + "sex.hu", + "shop.hu", + "suli.hu", + "szex.hu", + "tozsde.hu", + "utazas.hu", + "video.hu", + "id", + "ac.id", + "biz.id", + "co.id", + "desa.id", + "go.id", + "mil.id", + "my.id", + "net.id", + "or.id", + "sch.id", + "web.id", + "ie", + "gov.ie", + "il", + "ac.il", + "co.il", + "gov.il", + "idf.il", + "k12.il", + "muni.il", + "net.il", + "org.il", + "im", + "ac.im", + "co.im", + "com.im", + "ltd.co.im", + "net.im", + "org.im", + "plc.co.im", + "tt.im", + "tv.im", + "in", + "co.in", + "firm.in", + "net.in", + "org.in", + "gen.in", + "ind.in", + "nic.in", + "ac.in", + "edu.in", + "res.in", + "gov.in", + "mil.in", + "info", + "int", + "eu.int", + "io", + "com.io", + "iq", + "gov.iq", + "edu.iq", + "mil.iq", + "com.iq", + "org.iq", + "net.iq", + "ir", + "ac.ir", + "co.ir", + "gov.ir", + "id.ir", + "net.ir", + "org.ir", + "sch.ir", + "xn--mgba3a4f16a.ir", + "xn--mgba3a4fra.ir", + "is", + "net.is", + "com.is", + "edu.is", + "gov.is", + "org.is", + "int.is", + "it", + "gov.it", + "edu.it", + "abr.it", + "abruzzo.it", + "aosta-valley.it", + "aostavalley.it", + "bas.it", + "basilicata.it", + "cal.it", + "calabria.it", + "cam.it", + "campania.it", + "emilia-romagna.it", + "emiliaromagna.it", + "emr.it", + "friuli-v-giulia.it", + "friuli-ve-giulia.it", + "friuli-vegiulia.it", + "friuli-venezia-giulia.it", + "friuli-veneziagiulia.it", + "friuli-vgiulia.it", + "friuliv-giulia.it", + "friulive-giulia.it", + "friulivegiulia.it", + "friulivenezia-giulia.it", + "friuliveneziagiulia.it", + "friulivgiulia.it", + "fvg.it", + "laz.it", + "lazio.it", + "lig.it", + "liguria.it", + "lom.it", + "lombardia.it", + "lombardy.it", + "lucania.it", + "mar.it", + "marche.it", + "mol.it", + "molise.it", + "piedmont.it", + "piemonte.it", + "pmn.it", + "pug.it", + "puglia.it", + "sar.it", + "sardegna.it", + "sardinia.it", + "sic.it", + "sicilia.it", + "sicily.it", + "taa.it", + "tos.it", + "toscana.it", + "trentino-a-adige.it", + "trentino-aadige.it", + "trentino-alto-adige.it", + "trentino-altoadige.it", + "trentino-s-tirol.it", + "trentino-stirol.it", + "trentino-sud-tirol.it", + "trentino-sudtirol.it", + "trentino-sued-tirol.it", + "trentino-suedtirol.it", + "trentinoa-adige.it", + "trentinoaadige.it", + "trentinoalto-adige.it", + "trentinoaltoadige.it", + "trentinos-tirol.it", + "trentinostirol.it", + "trentinosud-tirol.it", + "trentinosudtirol.it", + "trentinosued-tirol.it", + "trentinosuedtirol.it", + "tuscany.it", + "umb.it", + "umbria.it", + "val-d-aosta.it", + "val-daosta.it", + "vald-aosta.it", + "valdaosta.it", + "valle-aosta.it", + "valle-d-aosta.it", + "valle-daosta.it", + "valleaosta.it", + "valled-aosta.it", + "valledaosta.it", + "vallee-aoste.it", + "valleeaoste.it", + "vao.it", + "vda.it", + "ven.it", + "veneto.it", + "ag.it", + "agrigento.it", + "al.it", + "alessandria.it", + "alto-adige.it", + "altoadige.it", + "an.it", + "ancona.it", + "andria-barletta-trani.it", + "andria-trani-barletta.it", + "andriabarlettatrani.it", + "andriatranibarletta.it", + "ao.it", + "aosta.it", + "aoste.it", + "ap.it", + "aq.it", + "aquila.it", + "ar.it", + "arezzo.it", + "ascoli-piceno.it", + "ascolipiceno.it", + "asti.it", + "at.it", + "av.it", + "avellino.it", + "ba.it", + "balsan.it", + "bari.it", + "barletta-trani-andria.it", + "barlettatraniandria.it", + "belluno.it", + "benevento.it", + "bergamo.it", + "bg.it", + "bi.it", + "biella.it", + "bl.it", + "bn.it", + "bo.it", + "bologna.it", + "bolzano.it", + "bozen.it", + "br.it", + "brescia.it", + "brindisi.it", + "bs.it", + "bt.it", + "bz.it", + "ca.it", + "cagliari.it", + "caltanissetta.it", + "campidano-medio.it", + "campidanomedio.it", + "campobasso.it", + "carbonia-iglesias.it", + "carboniaiglesias.it", + "carrara-massa.it", + "carraramassa.it", + "caserta.it", + "catania.it", + "catanzaro.it", + "cb.it", + "ce.it", + "cesena-forli.it", + "cesenaforli.it", + "ch.it", + "chieti.it", + "ci.it", + "cl.it", + "cn.it", + "co.it", + "como.it", + "cosenza.it", + "cr.it", + "cremona.it", + "crotone.it", + "cs.it", + "ct.it", + "cuneo.it", + "cz.it", + "dell-ogliastra.it", + "dellogliastra.it", + "en.it", + "enna.it", + "fc.it", + "fe.it", + "fermo.it", + "ferrara.it", + "fg.it", + "fi.it", + "firenze.it", + "florence.it", + "fm.it", + "foggia.it", + "forli-cesena.it", + "forlicesena.it", + "fr.it", + "frosinone.it", + "ge.it", + "genoa.it", + "genova.it", + "go.it", + "gorizia.it", + "gr.it", + "grosseto.it", + "iglesias-carbonia.it", + "iglesiascarbonia.it", + "im.it", + "imperia.it", + "is.it", + "isernia.it", + "kr.it", + "la-spezia.it", + "laquila.it", + "laspezia.it", + "latina.it", + "lc.it", + "le.it", + "lecce.it", + "lecco.it", + "li.it", + "livorno.it", + "lo.it", + "lodi.it", + "lt.it", + "lu.it", + "lucca.it", + "macerata.it", + "mantova.it", + "massa-carrara.it", + "massacarrara.it", + "matera.it", + "mb.it", + "mc.it", + "me.it", + "medio-campidano.it", + "mediocampidano.it", + "messina.it", + "mi.it", + "milan.it", + "milano.it", + "mn.it", + "mo.it", + "modena.it", + "monza-brianza.it", + "monza-e-della-brianza.it", + "monza.it", + "monzabrianza.it", + "monzaebrianza.it", + "monzaedellabrianza.it", + "ms.it", + "mt.it", + "na.it", + "naples.it", + "napoli.it", + "no.it", + "novara.it", + "nu.it", + "nuoro.it", + "og.it", + "ogliastra.it", + "olbia-tempio.it", + "olbiatempio.it", + "or.it", + "oristano.it", + "ot.it", + "pa.it", + "padova.it", + "padua.it", + "palermo.it", + "parma.it", + "pavia.it", + "pc.it", + "pd.it", + "pe.it", + "perugia.it", + "pesaro-urbino.it", + "pesarourbino.it", + "pescara.it", + "pg.it", + "pi.it", + "piacenza.it", + "pisa.it", + "pistoia.it", + "pn.it", + "po.it", + "pordenone.it", + "potenza.it", + "pr.it", + "prato.it", + "pt.it", + "pu.it", + "pv.it", + "pz.it", + "ra.it", + "ragusa.it", + "ravenna.it", + "rc.it", + "re.it", + "reggio-calabria.it", + "reggio-emilia.it", + "reggiocalabria.it", + "reggioemilia.it", + "rg.it", + "ri.it", + "rieti.it", + "rimini.it", + "rm.it", + "rn.it", + "ro.it", + "roma.it", + "rome.it", + "rovigo.it", + "sa.it", + "salerno.it", + "sassari.it", + "savona.it", + "si.it", + "siena.it", + "siracusa.it", + "so.it", + "sondrio.it", + "sp.it", + "sr.it", + "ss.it", + "suedtirol.it", + "sv.it", + "ta.it", + "taranto.it", + "te.it", + "tempio-olbia.it", + "tempioolbia.it", + "teramo.it", + "terni.it", + "tn.it", + "to.it", + "torino.it", + "tp.it", + "tr.it", + "trani-andria-barletta.it", + "trani-barletta-andria.it", + "traniandriabarletta.it", + "tranibarlettaandria.it", + "trapani.it", + "trentino.it", + "trento.it", + "treviso.it", + "trieste.it", + "ts.it", + "turin.it", + "tv.it", + "ud.it", + "udine.it", + "urbino-pesaro.it", + "urbinopesaro.it", + "va.it", + "varese.it", + "vb.it", + "vc.it", + "ve.it", + "venezia.it", + "venice.it", + "verbania.it", + "vercelli.it", + "verona.it", + "vi.it", + "vibo-valentia.it", + "vibovalentia.it", + "vicenza.it", + "viterbo.it", + "vr.it", + "vs.it", + "vt.it", + "vv.it", + "je", + "co.je", + "net.je", + "org.je", + "*.jm", + "jo", + "com.jo", + "org.jo", + "net.jo", + "edu.jo", + "sch.jo", + "gov.jo", + "mil.jo", + "name.jo", + "jobs", + "jp", + "ac.jp", + "ad.jp", + "co.jp", + "ed.jp", + "go.jp", + "gr.jp", + "lg.jp", + "ne.jp", + "or.jp", + "aichi.jp", + "akita.jp", + "aomori.jp", + "chiba.jp", + "ehime.jp", + "fukui.jp", + "fukuoka.jp", + "fukushima.jp", + "gifu.jp", + "gunma.jp", + "hiroshima.jp", + "hokkaido.jp", + "hyogo.jp", + "ibaraki.jp", + "ishikawa.jp", + "iwate.jp", + "kagawa.jp", + "kagoshima.jp", + "kanagawa.jp", + "kochi.jp", + "kumamoto.jp", + "kyoto.jp", + "mie.jp", + "miyagi.jp", + "miyazaki.jp", + "nagano.jp", + "nagasaki.jp", + "nara.jp", + "niigata.jp", + "oita.jp", + "okayama.jp", + "okinawa.jp", + "osaka.jp", + "saga.jp", + "saitama.jp", + "shiga.jp", + "shimane.jp", + "shizuoka.jp", + "tochigi.jp", + "tokushima.jp", + "tokyo.jp", + "tottori.jp", + "toyama.jp", + "wakayama.jp", + "yamagata.jp", + "yamaguchi.jp", + "yamanashi.jp", + "xn--4pvxs.jp", + "xn--vgu402c.jp", + "xn--c3s14m.jp", + "xn--f6qx53a.jp", + "xn--8pvr4u.jp", + "xn--uist22h.jp", + "xn--djrs72d6uy.jp", + "xn--mkru45i.jp", + "xn--0trq7p7nn.jp", + "xn--8ltr62k.jp", + "xn--2m4a15e.jp", + "xn--efvn9s.jp", + "xn--32vp30h.jp", + "xn--4it797k.jp", + "xn--1lqs71d.jp", + "xn--5rtp49c.jp", + "xn--5js045d.jp", + "xn--ehqz56n.jp", + "xn--1lqs03n.jp", + "xn--qqqt11m.jp", + "xn--kbrq7o.jp", + "xn--pssu33l.jp", + "xn--ntsq17g.jp", + "xn--uisz3g.jp", + "xn--6btw5a.jp", + "xn--1ctwo.jp", + "xn--6orx2r.jp", + "xn--rht61e.jp", + "xn--rht27z.jp", + "xn--djty4k.jp", + "xn--nit225k.jp", + "xn--rht3d.jp", + "xn--klty5x.jp", + "xn--kltx9a.jp", + "xn--kltp7d.jp", + "xn--uuwu58a.jp", + "xn--zbx025d.jp", + "xn--ntso0iqx3a.jp", + "xn--elqq16h.jp", + "xn--4it168d.jp", + "xn--klt787d.jp", + "xn--rny31h.jp", + "xn--7t0a264c.jp", + "xn--5rtq34k.jp", + "xn--k7yn95e.jp", + "xn--tor131o.jp", + "xn--d5qv7z876c.jp", + "*.kawasaki.jp", + "*.kitakyushu.jp", + "*.kobe.jp", + "*.nagoya.jp", + "*.sapporo.jp", + "*.sendai.jp", + "*.yokohama.jp", + "!city.kawasaki.jp", + "!city.kitakyushu.jp", + "!city.kobe.jp", + "!city.nagoya.jp", + "!city.sapporo.jp", + "!city.sendai.jp", + "!city.yokohama.jp", + "aisai.aichi.jp", + "ama.aichi.jp", + "anjo.aichi.jp", + "asuke.aichi.jp", + "chiryu.aichi.jp", + "chita.aichi.jp", + "fuso.aichi.jp", + "gamagori.aichi.jp", + "handa.aichi.jp", + "hazu.aichi.jp", + "hekinan.aichi.jp", + "higashiura.aichi.jp", + "ichinomiya.aichi.jp", + "inazawa.aichi.jp", + "inuyama.aichi.jp", + "isshiki.aichi.jp", + "iwakura.aichi.jp", + "kanie.aichi.jp", + "kariya.aichi.jp", + "kasugai.aichi.jp", + "kira.aichi.jp", + "kiyosu.aichi.jp", + "komaki.aichi.jp", + "konan.aichi.jp", + "kota.aichi.jp", + "mihama.aichi.jp", + "miyoshi.aichi.jp", + "nishio.aichi.jp", + "nisshin.aichi.jp", + "obu.aichi.jp", + "oguchi.aichi.jp", + "oharu.aichi.jp", + "okazaki.aichi.jp", + "owariasahi.aichi.jp", + "seto.aichi.jp", + "shikatsu.aichi.jp", + "shinshiro.aichi.jp", + "shitara.aichi.jp", + "tahara.aichi.jp", + "takahama.aichi.jp", + "tobishima.aichi.jp", + "toei.aichi.jp", + "togo.aichi.jp", + "tokai.aichi.jp", + "tokoname.aichi.jp", + "toyoake.aichi.jp", + "toyohashi.aichi.jp", + "toyokawa.aichi.jp", + "toyone.aichi.jp", + "toyota.aichi.jp", + "tsushima.aichi.jp", + "yatomi.aichi.jp", + "akita.akita.jp", + "daisen.akita.jp", + "fujisato.akita.jp", + "gojome.akita.jp", + "hachirogata.akita.jp", + "happou.akita.jp", + "higashinaruse.akita.jp", + "honjo.akita.jp", + "honjyo.akita.jp", + "ikawa.akita.jp", + "kamikoani.akita.jp", + "kamioka.akita.jp", + "katagami.akita.jp", + "kazuno.akita.jp", + "kitaakita.akita.jp", + "kosaka.akita.jp", + "kyowa.akita.jp", + "misato.akita.jp", + "mitane.akita.jp", + "moriyoshi.akita.jp", + "nikaho.akita.jp", + "noshiro.akita.jp", + "odate.akita.jp", + "oga.akita.jp", + "ogata.akita.jp", + "semboku.akita.jp", + "yokote.akita.jp", + "yurihonjo.akita.jp", + "aomori.aomori.jp", + "gonohe.aomori.jp", + "hachinohe.aomori.jp", + "hashikami.aomori.jp", + "hiranai.aomori.jp", + "hirosaki.aomori.jp", + "itayanagi.aomori.jp", + "kuroishi.aomori.jp", + "misawa.aomori.jp", + "mutsu.aomori.jp", + "nakadomari.aomori.jp", + "noheji.aomori.jp", + "oirase.aomori.jp", + "owani.aomori.jp", + "rokunohe.aomori.jp", + "sannohe.aomori.jp", + "shichinohe.aomori.jp", + "shingo.aomori.jp", + "takko.aomori.jp", + "towada.aomori.jp", + "tsugaru.aomori.jp", + "tsuruta.aomori.jp", + "abiko.chiba.jp", + "asahi.chiba.jp", + "chonan.chiba.jp", + "chosei.chiba.jp", + "choshi.chiba.jp", + "chuo.chiba.jp", + "funabashi.chiba.jp", + "futtsu.chiba.jp", + "hanamigawa.chiba.jp", + "ichihara.chiba.jp", + "ichikawa.chiba.jp", + "ichinomiya.chiba.jp", + "inzai.chiba.jp", + "isumi.chiba.jp", + "kamagaya.chiba.jp", + "kamogawa.chiba.jp", + "kashiwa.chiba.jp", + "katori.chiba.jp", + "katsuura.chiba.jp", + "kimitsu.chiba.jp", + "kisarazu.chiba.jp", + "kozaki.chiba.jp", + "kujukuri.chiba.jp", + "kyonan.chiba.jp", + "matsudo.chiba.jp", + "midori.chiba.jp", + "mihama.chiba.jp", + "minamiboso.chiba.jp", + "mobara.chiba.jp", + "mutsuzawa.chiba.jp", + "nagara.chiba.jp", + "nagareyama.chiba.jp", + "narashino.chiba.jp", + "narita.chiba.jp", + "noda.chiba.jp", + "oamishirasato.chiba.jp", + "omigawa.chiba.jp", + "onjuku.chiba.jp", + "otaki.chiba.jp", + "sakae.chiba.jp", + "sakura.chiba.jp", + "shimofusa.chiba.jp", + "shirako.chiba.jp", + "shiroi.chiba.jp", + "shisui.chiba.jp", + "sodegaura.chiba.jp", + "sosa.chiba.jp", + "tako.chiba.jp", + "tateyama.chiba.jp", + "togane.chiba.jp", + "tohnosho.chiba.jp", + "tomisato.chiba.jp", + "urayasu.chiba.jp", + "yachimata.chiba.jp", + "yachiyo.chiba.jp", + "yokaichiba.chiba.jp", + "yokoshibahikari.chiba.jp", + "yotsukaido.chiba.jp", + "ainan.ehime.jp", + "honai.ehime.jp", + "ikata.ehime.jp", + "imabari.ehime.jp", + "iyo.ehime.jp", + "kamijima.ehime.jp", + "kihoku.ehime.jp", + "kumakogen.ehime.jp", + "masaki.ehime.jp", + "matsuno.ehime.jp", + "matsuyama.ehime.jp", + "namikata.ehime.jp", + "niihama.ehime.jp", + "ozu.ehime.jp", + "saijo.ehime.jp", + "seiyo.ehime.jp", + "shikokuchuo.ehime.jp", + "tobe.ehime.jp", + "toon.ehime.jp", + "uchiko.ehime.jp", + "uwajima.ehime.jp", + "yawatahama.ehime.jp", + "echizen.fukui.jp", + "eiheiji.fukui.jp", + "fukui.fukui.jp", + "ikeda.fukui.jp", + "katsuyama.fukui.jp", + "mihama.fukui.jp", + "minamiechizen.fukui.jp", + "obama.fukui.jp", + "ohi.fukui.jp", + "ono.fukui.jp", + "sabae.fukui.jp", + "sakai.fukui.jp", + "takahama.fukui.jp", + "tsuruga.fukui.jp", + "wakasa.fukui.jp", + "ashiya.fukuoka.jp", + "buzen.fukuoka.jp", + "chikugo.fukuoka.jp", + "chikuho.fukuoka.jp", + "chikujo.fukuoka.jp", + "chikushino.fukuoka.jp", + "chikuzen.fukuoka.jp", + "chuo.fukuoka.jp", + "dazaifu.fukuoka.jp", + "fukuchi.fukuoka.jp", + "hakata.fukuoka.jp", + "higashi.fukuoka.jp", + "hirokawa.fukuoka.jp", + "hisayama.fukuoka.jp", + "iizuka.fukuoka.jp", + "inatsuki.fukuoka.jp", + "kaho.fukuoka.jp", + "kasuga.fukuoka.jp", + "kasuya.fukuoka.jp", + "kawara.fukuoka.jp", + "keisen.fukuoka.jp", + "koga.fukuoka.jp", + "kurate.fukuoka.jp", + "kurogi.fukuoka.jp", + "kurume.fukuoka.jp", + "minami.fukuoka.jp", + "miyako.fukuoka.jp", + "miyama.fukuoka.jp", + "miyawaka.fukuoka.jp", + "mizumaki.fukuoka.jp", + "munakata.fukuoka.jp", + "nakagawa.fukuoka.jp", + "nakama.fukuoka.jp", + "nishi.fukuoka.jp", + "nogata.fukuoka.jp", + "ogori.fukuoka.jp", + "okagaki.fukuoka.jp", + "okawa.fukuoka.jp", + "oki.fukuoka.jp", + "omuta.fukuoka.jp", + "onga.fukuoka.jp", + "onojo.fukuoka.jp", + "oto.fukuoka.jp", + "saigawa.fukuoka.jp", + "sasaguri.fukuoka.jp", + "shingu.fukuoka.jp", + "shinyoshitomi.fukuoka.jp", + "shonai.fukuoka.jp", + "soeda.fukuoka.jp", + "sue.fukuoka.jp", + "tachiarai.fukuoka.jp", + "tagawa.fukuoka.jp", + "takata.fukuoka.jp", + "toho.fukuoka.jp", + "toyotsu.fukuoka.jp", + "tsuiki.fukuoka.jp", + "ukiha.fukuoka.jp", + "umi.fukuoka.jp", + "usui.fukuoka.jp", + "yamada.fukuoka.jp", + "yame.fukuoka.jp", + "yanagawa.fukuoka.jp", + "yukuhashi.fukuoka.jp", + "aizubange.fukushima.jp", + "aizumisato.fukushima.jp", + "aizuwakamatsu.fukushima.jp", + "asakawa.fukushima.jp", + "bandai.fukushima.jp", + "date.fukushima.jp", + "fukushima.fukushima.jp", + "furudono.fukushima.jp", + "futaba.fukushima.jp", + "hanawa.fukushima.jp", + "higashi.fukushima.jp", + "hirata.fukushima.jp", + "hirono.fukushima.jp", + "iitate.fukushima.jp", + "inawashiro.fukushima.jp", + "ishikawa.fukushima.jp", + "iwaki.fukushima.jp", + "izumizaki.fukushima.jp", + "kagamiishi.fukushima.jp", + "kaneyama.fukushima.jp", + "kawamata.fukushima.jp", + "kitakata.fukushima.jp", + "kitashiobara.fukushima.jp", + "koori.fukushima.jp", + "koriyama.fukushima.jp", + "kunimi.fukushima.jp", + "miharu.fukushima.jp", + "mishima.fukushima.jp", + "namie.fukushima.jp", + "nango.fukushima.jp", + "nishiaizu.fukushima.jp", + "nishigo.fukushima.jp", + "okuma.fukushima.jp", + "omotego.fukushima.jp", + "ono.fukushima.jp", + "otama.fukushima.jp", + "samegawa.fukushima.jp", + "shimogo.fukushima.jp", + "shirakawa.fukushima.jp", + "showa.fukushima.jp", + "soma.fukushima.jp", + "sukagawa.fukushima.jp", + "taishin.fukushima.jp", + "tamakawa.fukushima.jp", + "tanagura.fukushima.jp", + "tenei.fukushima.jp", + "yabuki.fukushima.jp", + "yamato.fukushima.jp", + "yamatsuri.fukushima.jp", + "yanaizu.fukushima.jp", + "yugawa.fukushima.jp", + "anpachi.gifu.jp", + "ena.gifu.jp", + "gifu.gifu.jp", + "ginan.gifu.jp", + "godo.gifu.jp", + "gujo.gifu.jp", + "hashima.gifu.jp", + "hichiso.gifu.jp", + "hida.gifu.jp", + "higashishirakawa.gifu.jp", + "ibigawa.gifu.jp", + "ikeda.gifu.jp", + "kakamigahara.gifu.jp", + "kani.gifu.jp", + "kasahara.gifu.jp", + "kasamatsu.gifu.jp", + "kawaue.gifu.jp", + "kitagata.gifu.jp", + "mino.gifu.jp", + "minokamo.gifu.jp", + "mitake.gifu.jp", + "mizunami.gifu.jp", + "motosu.gifu.jp", + "nakatsugawa.gifu.jp", + "ogaki.gifu.jp", + "sakahogi.gifu.jp", + "seki.gifu.jp", + "sekigahara.gifu.jp", + "shirakawa.gifu.jp", + "tajimi.gifu.jp", + "takayama.gifu.jp", + "tarui.gifu.jp", + "toki.gifu.jp", + "tomika.gifu.jp", + "wanouchi.gifu.jp", + "yamagata.gifu.jp", + "yaotsu.gifu.jp", + "yoro.gifu.jp", + "annaka.gunma.jp", + "chiyoda.gunma.jp", + "fujioka.gunma.jp", + "higashiagatsuma.gunma.jp", + "isesaki.gunma.jp", + "itakura.gunma.jp", + "kanna.gunma.jp", + "kanra.gunma.jp", + "katashina.gunma.jp", + "kawaba.gunma.jp", + "kiryu.gunma.jp", + "kusatsu.gunma.jp", + "maebashi.gunma.jp", + "meiwa.gunma.jp", + "midori.gunma.jp", + "minakami.gunma.jp", + "naganohara.gunma.jp", + "nakanojo.gunma.jp", + "nanmoku.gunma.jp", + "numata.gunma.jp", + "oizumi.gunma.jp", + "ora.gunma.jp", + "ota.gunma.jp", + "shibukawa.gunma.jp", + "shimonita.gunma.jp", + "shinto.gunma.jp", + "showa.gunma.jp", + "takasaki.gunma.jp", + "takayama.gunma.jp", + "tamamura.gunma.jp", + "tatebayashi.gunma.jp", + "tomioka.gunma.jp", + "tsukiyono.gunma.jp", + "tsumagoi.gunma.jp", + "ueno.gunma.jp", + "yoshioka.gunma.jp", + "asaminami.hiroshima.jp", + "daiwa.hiroshima.jp", + "etajima.hiroshima.jp", + "fuchu.hiroshima.jp", + "fukuyama.hiroshima.jp", + "hatsukaichi.hiroshima.jp", + "higashihiroshima.hiroshima.jp", + "hongo.hiroshima.jp", + "jinsekikogen.hiroshima.jp", + "kaita.hiroshima.jp", + "kui.hiroshima.jp", + "kumano.hiroshima.jp", + "kure.hiroshima.jp", + "mihara.hiroshima.jp", + "miyoshi.hiroshima.jp", + "naka.hiroshima.jp", + "onomichi.hiroshima.jp", + "osakikamijima.hiroshima.jp", + "otake.hiroshima.jp", + "saka.hiroshima.jp", + "sera.hiroshima.jp", + "seranishi.hiroshima.jp", + "shinichi.hiroshima.jp", + "shobara.hiroshima.jp", + "takehara.hiroshima.jp", + "abashiri.hokkaido.jp", + "abira.hokkaido.jp", + "aibetsu.hokkaido.jp", + "akabira.hokkaido.jp", + "akkeshi.hokkaido.jp", + "asahikawa.hokkaido.jp", + "ashibetsu.hokkaido.jp", + "ashoro.hokkaido.jp", + "assabu.hokkaido.jp", + "atsuma.hokkaido.jp", + "bibai.hokkaido.jp", + "biei.hokkaido.jp", + "bifuka.hokkaido.jp", + "bihoro.hokkaido.jp", + "biratori.hokkaido.jp", + "chippubetsu.hokkaido.jp", + "chitose.hokkaido.jp", + "date.hokkaido.jp", + "ebetsu.hokkaido.jp", + "embetsu.hokkaido.jp", + "eniwa.hokkaido.jp", + "erimo.hokkaido.jp", + "esan.hokkaido.jp", + "esashi.hokkaido.jp", + "fukagawa.hokkaido.jp", + "fukushima.hokkaido.jp", + "furano.hokkaido.jp", + "furubira.hokkaido.jp", + "haboro.hokkaido.jp", + "hakodate.hokkaido.jp", + "hamatonbetsu.hokkaido.jp", + "hidaka.hokkaido.jp", + "higashikagura.hokkaido.jp", + "higashikawa.hokkaido.jp", + "hiroo.hokkaido.jp", + "hokuryu.hokkaido.jp", + "hokuto.hokkaido.jp", + "honbetsu.hokkaido.jp", + "horokanai.hokkaido.jp", + "horonobe.hokkaido.jp", + "ikeda.hokkaido.jp", + "imakane.hokkaido.jp", + "ishikari.hokkaido.jp", + "iwamizawa.hokkaido.jp", + "iwanai.hokkaido.jp", + "kamifurano.hokkaido.jp", + "kamikawa.hokkaido.jp", + "kamishihoro.hokkaido.jp", + "kamisunagawa.hokkaido.jp", + "kamoenai.hokkaido.jp", + "kayabe.hokkaido.jp", + "kembuchi.hokkaido.jp", + "kikonai.hokkaido.jp", + "kimobetsu.hokkaido.jp", + "kitahiroshima.hokkaido.jp", + "kitami.hokkaido.jp", + "kiyosato.hokkaido.jp", + "koshimizu.hokkaido.jp", + "kunneppu.hokkaido.jp", + "kuriyama.hokkaido.jp", + "kuromatsunai.hokkaido.jp", + "kushiro.hokkaido.jp", + "kutchan.hokkaido.jp", + "kyowa.hokkaido.jp", + "mashike.hokkaido.jp", + "matsumae.hokkaido.jp", + "mikasa.hokkaido.jp", + "minamifurano.hokkaido.jp", + "mombetsu.hokkaido.jp", + "moseushi.hokkaido.jp", + "mukawa.hokkaido.jp", + "muroran.hokkaido.jp", + "naie.hokkaido.jp", + "nakagawa.hokkaido.jp", + "nakasatsunai.hokkaido.jp", + "nakatombetsu.hokkaido.jp", + "nanae.hokkaido.jp", + "nanporo.hokkaido.jp", + "nayoro.hokkaido.jp", + "nemuro.hokkaido.jp", + "niikappu.hokkaido.jp", + "niki.hokkaido.jp", + "nishiokoppe.hokkaido.jp", + "noboribetsu.hokkaido.jp", + "numata.hokkaido.jp", + "obihiro.hokkaido.jp", + "obira.hokkaido.jp", + "oketo.hokkaido.jp", + "okoppe.hokkaido.jp", + "otaru.hokkaido.jp", + "otobe.hokkaido.jp", + "otofuke.hokkaido.jp", + "otoineppu.hokkaido.jp", + "oumu.hokkaido.jp", + "ozora.hokkaido.jp", + "pippu.hokkaido.jp", + "rankoshi.hokkaido.jp", + "rebun.hokkaido.jp", + "rikubetsu.hokkaido.jp", + "rishiri.hokkaido.jp", + "rishirifuji.hokkaido.jp", + "saroma.hokkaido.jp", + "sarufutsu.hokkaido.jp", + "shakotan.hokkaido.jp", + "shari.hokkaido.jp", + "shibecha.hokkaido.jp", + "shibetsu.hokkaido.jp", + "shikabe.hokkaido.jp", + "shikaoi.hokkaido.jp", + "shimamaki.hokkaido.jp", + "shimizu.hokkaido.jp", + "shimokawa.hokkaido.jp", + "shinshinotsu.hokkaido.jp", + "shintoku.hokkaido.jp", + "shiranuka.hokkaido.jp", + "shiraoi.hokkaido.jp", + "shiriuchi.hokkaido.jp", + "sobetsu.hokkaido.jp", + "sunagawa.hokkaido.jp", + "taiki.hokkaido.jp", + "takasu.hokkaido.jp", + "takikawa.hokkaido.jp", + "takinoue.hokkaido.jp", + "teshikaga.hokkaido.jp", + "tobetsu.hokkaido.jp", + "tohma.hokkaido.jp", + "tomakomai.hokkaido.jp", + "tomari.hokkaido.jp", + "toya.hokkaido.jp", + "toyako.hokkaido.jp", + "toyotomi.hokkaido.jp", + "toyoura.hokkaido.jp", + "tsubetsu.hokkaido.jp", + "tsukigata.hokkaido.jp", + "urakawa.hokkaido.jp", + "urausu.hokkaido.jp", + "uryu.hokkaido.jp", + "utashinai.hokkaido.jp", + "wakkanai.hokkaido.jp", + "wassamu.hokkaido.jp", + "yakumo.hokkaido.jp", + "yoichi.hokkaido.jp", + "aioi.hyogo.jp", + "akashi.hyogo.jp", + "ako.hyogo.jp", + "amagasaki.hyogo.jp", + "aogaki.hyogo.jp", + "asago.hyogo.jp", + "ashiya.hyogo.jp", + "awaji.hyogo.jp", + "fukusaki.hyogo.jp", + "goshiki.hyogo.jp", + "harima.hyogo.jp", + "himeji.hyogo.jp", + "ichikawa.hyogo.jp", + "inagawa.hyogo.jp", + "itami.hyogo.jp", + "kakogawa.hyogo.jp", + "kamigori.hyogo.jp", + "kamikawa.hyogo.jp", + "kasai.hyogo.jp", + "kasuga.hyogo.jp", + "kawanishi.hyogo.jp", + "miki.hyogo.jp", + "minamiawaji.hyogo.jp", + "nishinomiya.hyogo.jp", + "nishiwaki.hyogo.jp", + "ono.hyogo.jp", + "sanda.hyogo.jp", + "sannan.hyogo.jp", + "sasayama.hyogo.jp", + "sayo.hyogo.jp", + "shingu.hyogo.jp", + "shinonsen.hyogo.jp", + "shiso.hyogo.jp", + "sumoto.hyogo.jp", + "taishi.hyogo.jp", + "taka.hyogo.jp", + "takarazuka.hyogo.jp", + "takasago.hyogo.jp", + "takino.hyogo.jp", + "tamba.hyogo.jp", + "tatsuno.hyogo.jp", + "toyooka.hyogo.jp", + "yabu.hyogo.jp", + "yashiro.hyogo.jp", + "yoka.hyogo.jp", + "yokawa.hyogo.jp", + "ami.ibaraki.jp", + "asahi.ibaraki.jp", + "bando.ibaraki.jp", + "chikusei.ibaraki.jp", + "daigo.ibaraki.jp", + "fujishiro.ibaraki.jp", + "hitachi.ibaraki.jp", + "hitachinaka.ibaraki.jp", + "hitachiomiya.ibaraki.jp", + "hitachiota.ibaraki.jp", + "ibaraki.ibaraki.jp", + "ina.ibaraki.jp", + "inashiki.ibaraki.jp", + "itako.ibaraki.jp", + "iwama.ibaraki.jp", + "joso.ibaraki.jp", + "kamisu.ibaraki.jp", + "kasama.ibaraki.jp", + "kashima.ibaraki.jp", + "kasumigaura.ibaraki.jp", + "koga.ibaraki.jp", + "miho.ibaraki.jp", + "mito.ibaraki.jp", + "moriya.ibaraki.jp", + "naka.ibaraki.jp", + "namegata.ibaraki.jp", + "oarai.ibaraki.jp", + "ogawa.ibaraki.jp", + "omitama.ibaraki.jp", + "ryugasaki.ibaraki.jp", + "sakai.ibaraki.jp", + "sakuragawa.ibaraki.jp", + "shimodate.ibaraki.jp", + "shimotsuma.ibaraki.jp", + "shirosato.ibaraki.jp", + "sowa.ibaraki.jp", + "suifu.ibaraki.jp", + "takahagi.ibaraki.jp", + "tamatsukuri.ibaraki.jp", + "tokai.ibaraki.jp", + "tomobe.ibaraki.jp", + "tone.ibaraki.jp", + "toride.ibaraki.jp", + "tsuchiura.ibaraki.jp", + "tsukuba.ibaraki.jp", + "uchihara.ibaraki.jp", + "ushiku.ibaraki.jp", + "yachiyo.ibaraki.jp", + "yamagata.ibaraki.jp", + "yawara.ibaraki.jp", + "yuki.ibaraki.jp", + "anamizu.ishikawa.jp", + "hakui.ishikawa.jp", + "hakusan.ishikawa.jp", + "kaga.ishikawa.jp", + "kahoku.ishikawa.jp", + "kanazawa.ishikawa.jp", + "kawakita.ishikawa.jp", + "komatsu.ishikawa.jp", + "nakanoto.ishikawa.jp", + "nanao.ishikawa.jp", + "nomi.ishikawa.jp", + "nonoichi.ishikawa.jp", + "noto.ishikawa.jp", + "shika.ishikawa.jp", + "suzu.ishikawa.jp", + "tsubata.ishikawa.jp", + "tsurugi.ishikawa.jp", + "uchinada.ishikawa.jp", + "wajima.ishikawa.jp", + "fudai.iwate.jp", + "fujisawa.iwate.jp", + "hanamaki.iwate.jp", + "hiraizumi.iwate.jp", + "hirono.iwate.jp", + "ichinohe.iwate.jp", + "ichinoseki.iwate.jp", + "iwaizumi.iwate.jp", + "iwate.iwate.jp", + "joboji.iwate.jp", + "kamaishi.iwate.jp", + "kanegasaki.iwate.jp", + "karumai.iwate.jp", + "kawai.iwate.jp", + "kitakami.iwate.jp", + "kuji.iwate.jp", + "kunohe.iwate.jp", + "kuzumaki.iwate.jp", + "miyako.iwate.jp", + "mizusawa.iwate.jp", + "morioka.iwate.jp", + "ninohe.iwate.jp", + "noda.iwate.jp", + "ofunato.iwate.jp", + "oshu.iwate.jp", + "otsuchi.iwate.jp", + "rikuzentakata.iwate.jp", + "shiwa.iwate.jp", + "shizukuishi.iwate.jp", + "sumita.iwate.jp", + "tanohata.iwate.jp", + "tono.iwate.jp", + "yahaba.iwate.jp", + "yamada.iwate.jp", + "ayagawa.kagawa.jp", + "higashikagawa.kagawa.jp", + "kanonji.kagawa.jp", + "kotohira.kagawa.jp", + "manno.kagawa.jp", + "marugame.kagawa.jp", + "mitoyo.kagawa.jp", + "naoshima.kagawa.jp", + "sanuki.kagawa.jp", + "tadotsu.kagawa.jp", + "takamatsu.kagawa.jp", + "tonosho.kagawa.jp", + "uchinomi.kagawa.jp", + "utazu.kagawa.jp", + "zentsuji.kagawa.jp", + "akune.kagoshima.jp", + "amami.kagoshima.jp", + "hioki.kagoshima.jp", + "isa.kagoshima.jp", + "isen.kagoshima.jp", + "izumi.kagoshima.jp", + "kagoshima.kagoshima.jp", + "kanoya.kagoshima.jp", + "kawanabe.kagoshima.jp", + "kinko.kagoshima.jp", + "kouyama.kagoshima.jp", + "makurazaki.kagoshima.jp", + "matsumoto.kagoshima.jp", + "minamitane.kagoshima.jp", + "nakatane.kagoshima.jp", + "nishinoomote.kagoshima.jp", + "satsumasendai.kagoshima.jp", + "soo.kagoshima.jp", + "tarumizu.kagoshima.jp", + "yusui.kagoshima.jp", + "aikawa.kanagawa.jp", + "atsugi.kanagawa.jp", + "ayase.kanagawa.jp", + "chigasaki.kanagawa.jp", + "ebina.kanagawa.jp", + "fujisawa.kanagawa.jp", + "hadano.kanagawa.jp", + "hakone.kanagawa.jp", + "hiratsuka.kanagawa.jp", + "isehara.kanagawa.jp", + "kaisei.kanagawa.jp", + "kamakura.kanagawa.jp", + "kiyokawa.kanagawa.jp", + "matsuda.kanagawa.jp", + "minamiashigara.kanagawa.jp", + "miura.kanagawa.jp", + "nakai.kanagawa.jp", + "ninomiya.kanagawa.jp", + "odawara.kanagawa.jp", + "oi.kanagawa.jp", + "oiso.kanagawa.jp", + "sagamihara.kanagawa.jp", + "samukawa.kanagawa.jp", + "tsukui.kanagawa.jp", + "yamakita.kanagawa.jp", + "yamato.kanagawa.jp", + "yokosuka.kanagawa.jp", + "yugawara.kanagawa.jp", + "zama.kanagawa.jp", + "zushi.kanagawa.jp", + "aki.kochi.jp", + "geisei.kochi.jp", + "hidaka.kochi.jp", + "higashitsuno.kochi.jp", + "ino.kochi.jp", + "kagami.kochi.jp", + "kami.kochi.jp", + "kitagawa.kochi.jp", + "kochi.kochi.jp", + "mihara.kochi.jp", + "motoyama.kochi.jp", + "muroto.kochi.jp", + "nahari.kochi.jp", + "nakamura.kochi.jp", + "nankoku.kochi.jp", + "nishitosa.kochi.jp", + "niyodogawa.kochi.jp", + "ochi.kochi.jp", + "okawa.kochi.jp", + "otoyo.kochi.jp", + "otsuki.kochi.jp", + "sakawa.kochi.jp", + "sukumo.kochi.jp", + "susaki.kochi.jp", + "tosa.kochi.jp", + "tosashimizu.kochi.jp", + "toyo.kochi.jp", + "tsuno.kochi.jp", + "umaji.kochi.jp", + "yasuda.kochi.jp", + "yusuhara.kochi.jp", + "amakusa.kumamoto.jp", + "arao.kumamoto.jp", + "aso.kumamoto.jp", + "choyo.kumamoto.jp", + "gyokuto.kumamoto.jp", + "hitoyoshi.kumamoto.jp", + "kamiamakusa.kumamoto.jp", + "kashima.kumamoto.jp", + "kikuchi.kumamoto.jp", + "kosa.kumamoto.jp", + "kumamoto.kumamoto.jp", + "mashiki.kumamoto.jp", + "mifune.kumamoto.jp", + "minamata.kumamoto.jp", + "minamioguni.kumamoto.jp", + "nagasu.kumamoto.jp", + "nishihara.kumamoto.jp", + "oguni.kumamoto.jp", + "ozu.kumamoto.jp", + "sumoto.kumamoto.jp", + "takamori.kumamoto.jp", + "uki.kumamoto.jp", + "uto.kumamoto.jp", + "yamaga.kumamoto.jp", + "yamato.kumamoto.jp", + "yatsushiro.kumamoto.jp", + "ayabe.kyoto.jp", + "fukuchiyama.kyoto.jp", + "higashiyama.kyoto.jp", + "ide.kyoto.jp", + "ine.kyoto.jp", + "joyo.kyoto.jp", + "kameoka.kyoto.jp", + "kamo.kyoto.jp", + "kita.kyoto.jp", + "kizu.kyoto.jp", + "kumiyama.kyoto.jp", + "kyotamba.kyoto.jp", + "kyotanabe.kyoto.jp", + "kyotango.kyoto.jp", + "maizuru.kyoto.jp", + "minami.kyoto.jp", + "minamiyamashiro.kyoto.jp", + "miyazu.kyoto.jp", + "muko.kyoto.jp", + "nagaokakyo.kyoto.jp", + "nakagyo.kyoto.jp", + "nantan.kyoto.jp", + "oyamazaki.kyoto.jp", + "sakyo.kyoto.jp", + "seika.kyoto.jp", + "tanabe.kyoto.jp", + "uji.kyoto.jp", + "ujitawara.kyoto.jp", + "wazuka.kyoto.jp", + "yamashina.kyoto.jp", + "yawata.kyoto.jp", + "asahi.mie.jp", + "inabe.mie.jp", + "ise.mie.jp", + "kameyama.mie.jp", + "kawagoe.mie.jp", + "kiho.mie.jp", + "kisosaki.mie.jp", + "kiwa.mie.jp", + "komono.mie.jp", + "kumano.mie.jp", + "kuwana.mie.jp", + "matsusaka.mie.jp", + "meiwa.mie.jp", + "mihama.mie.jp", + "minamiise.mie.jp", + "misugi.mie.jp", + "miyama.mie.jp", + "nabari.mie.jp", + "shima.mie.jp", + "suzuka.mie.jp", + "tado.mie.jp", + "taiki.mie.jp", + "taki.mie.jp", + "tamaki.mie.jp", + "toba.mie.jp", + "tsu.mie.jp", + "udono.mie.jp", + "ureshino.mie.jp", + "watarai.mie.jp", + "yokkaichi.mie.jp", + "furukawa.miyagi.jp", + "higashimatsushima.miyagi.jp", + "ishinomaki.miyagi.jp", + "iwanuma.miyagi.jp", + "kakuda.miyagi.jp", + "kami.miyagi.jp", + "kawasaki.miyagi.jp", + "kesennuma.miyagi.jp", + "marumori.miyagi.jp", + "matsushima.miyagi.jp", + "minamisanriku.miyagi.jp", + "misato.miyagi.jp", + "murata.miyagi.jp", + "natori.miyagi.jp", + "ogawara.miyagi.jp", + "ohira.miyagi.jp", + "onagawa.miyagi.jp", + "osaki.miyagi.jp", + "rifu.miyagi.jp", + "semine.miyagi.jp", + "shibata.miyagi.jp", + "shichikashuku.miyagi.jp", + "shikama.miyagi.jp", + "shiogama.miyagi.jp", + "shiroishi.miyagi.jp", + "tagajo.miyagi.jp", + "taiwa.miyagi.jp", + "tome.miyagi.jp", + "tomiya.miyagi.jp", + "wakuya.miyagi.jp", + "watari.miyagi.jp", + "yamamoto.miyagi.jp", + "zao.miyagi.jp", + "aya.miyazaki.jp", + "ebino.miyazaki.jp", + "gokase.miyazaki.jp", + "hyuga.miyazaki.jp", + "kadogawa.miyazaki.jp", + "kawaminami.miyazaki.jp", + "kijo.miyazaki.jp", + "kitagawa.miyazaki.jp", + "kitakata.miyazaki.jp", + "kitaura.miyazaki.jp", + "kobayashi.miyazaki.jp", + "kunitomi.miyazaki.jp", + "kushima.miyazaki.jp", + "mimata.miyazaki.jp", + "miyakonojo.miyazaki.jp", + "miyazaki.miyazaki.jp", + "morotsuka.miyazaki.jp", + "nichinan.miyazaki.jp", + "nishimera.miyazaki.jp", + "nobeoka.miyazaki.jp", + "saito.miyazaki.jp", + "shiiba.miyazaki.jp", + "shintomi.miyazaki.jp", + "takaharu.miyazaki.jp", + "takanabe.miyazaki.jp", + "takazaki.miyazaki.jp", + "tsuno.miyazaki.jp", + "achi.nagano.jp", + "agematsu.nagano.jp", + "anan.nagano.jp", + "aoki.nagano.jp", + "asahi.nagano.jp", + "azumino.nagano.jp", + "chikuhoku.nagano.jp", + "chikuma.nagano.jp", + "chino.nagano.jp", + "fujimi.nagano.jp", + "hakuba.nagano.jp", + "hara.nagano.jp", + "hiraya.nagano.jp", + "iida.nagano.jp", + "iijima.nagano.jp", + "iiyama.nagano.jp", + "iizuna.nagano.jp", + "ikeda.nagano.jp", + "ikusaka.nagano.jp", + "ina.nagano.jp", + "karuizawa.nagano.jp", + "kawakami.nagano.jp", + "kiso.nagano.jp", + "kisofukushima.nagano.jp", + "kitaaiki.nagano.jp", + "komagane.nagano.jp", + "komoro.nagano.jp", + "matsukawa.nagano.jp", + "matsumoto.nagano.jp", + "miasa.nagano.jp", + "minamiaiki.nagano.jp", + "minamimaki.nagano.jp", + "minamiminowa.nagano.jp", + "minowa.nagano.jp", + "miyada.nagano.jp", + "miyota.nagano.jp", + "mochizuki.nagano.jp", + "nagano.nagano.jp", + "nagawa.nagano.jp", + "nagiso.nagano.jp", + "nakagawa.nagano.jp", + "nakano.nagano.jp", + "nozawaonsen.nagano.jp", + "obuse.nagano.jp", + "ogawa.nagano.jp", + "okaya.nagano.jp", + "omachi.nagano.jp", + "omi.nagano.jp", + "ookuwa.nagano.jp", + "ooshika.nagano.jp", + "otaki.nagano.jp", + "otari.nagano.jp", + "sakae.nagano.jp", + "sakaki.nagano.jp", + "saku.nagano.jp", + "sakuho.nagano.jp", + "shimosuwa.nagano.jp", + "shinanomachi.nagano.jp", + "shiojiri.nagano.jp", + "suwa.nagano.jp", + "suzaka.nagano.jp", + "takagi.nagano.jp", + "takamori.nagano.jp", + "takayama.nagano.jp", + "tateshina.nagano.jp", + "tatsuno.nagano.jp", + "togakushi.nagano.jp", + "togura.nagano.jp", + "tomi.nagano.jp", + "ueda.nagano.jp", + "wada.nagano.jp", + "yamagata.nagano.jp", + "yamanouchi.nagano.jp", + "yasaka.nagano.jp", + "yasuoka.nagano.jp", + "chijiwa.nagasaki.jp", + "futsu.nagasaki.jp", + "goto.nagasaki.jp", + "hasami.nagasaki.jp", + "hirado.nagasaki.jp", + "iki.nagasaki.jp", + "isahaya.nagasaki.jp", + "kawatana.nagasaki.jp", + "kuchinotsu.nagasaki.jp", + "matsuura.nagasaki.jp", + "nagasaki.nagasaki.jp", + "obama.nagasaki.jp", + "omura.nagasaki.jp", + "oseto.nagasaki.jp", + "saikai.nagasaki.jp", + "sasebo.nagasaki.jp", + "seihi.nagasaki.jp", + "shimabara.nagasaki.jp", + "shinkamigoto.nagasaki.jp", + "togitsu.nagasaki.jp", + "tsushima.nagasaki.jp", + "unzen.nagasaki.jp", + "ando.nara.jp", + "gose.nara.jp", + "heguri.nara.jp", + "higashiyoshino.nara.jp", + "ikaruga.nara.jp", + "ikoma.nara.jp", + "kamikitayama.nara.jp", + "kanmaki.nara.jp", + "kashiba.nara.jp", + "kashihara.nara.jp", + "katsuragi.nara.jp", + "kawai.nara.jp", + "kawakami.nara.jp", + "kawanishi.nara.jp", + "koryo.nara.jp", + "kurotaki.nara.jp", + "mitsue.nara.jp", + "miyake.nara.jp", + "nara.nara.jp", + "nosegawa.nara.jp", + "oji.nara.jp", + "ouda.nara.jp", + "oyodo.nara.jp", + "sakurai.nara.jp", + "sango.nara.jp", + "shimoichi.nara.jp", + "shimokitayama.nara.jp", + "shinjo.nara.jp", + "soni.nara.jp", + "takatori.nara.jp", + "tawaramoto.nara.jp", + "tenkawa.nara.jp", + "tenri.nara.jp", + "uda.nara.jp", + "yamatokoriyama.nara.jp", + "yamatotakada.nara.jp", + "yamazoe.nara.jp", + "yoshino.nara.jp", + "aga.niigata.jp", + "agano.niigata.jp", + "gosen.niigata.jp", + "itoigawa.niigata.jp", + "izumozaki.niigata.jp", + "joetsu.niigata.jp", + "kamo.niigata.jp", + "kariwa.niigata.jp", + "kashiwazaki.niigata.jp", + "minamiuonuma.niigata.jp", + "mitsuke.niigata.jp", + "muika.niigata.jp", + "murakami.niigata.jp", + "myoko.niigata.jp", + "nagaoka.niigata.jp", + "niigata.niigata.jp", + "ojiya.niigata.jp", + "omi.niigata.jp", + "sado.niigata.jp", + "sanjo.niigata.jp", + "seiro.niigata.jp", + "seirou.niigata.jp", + "sekikawa.niigata.jp", + "shibata.niigata.jp", + "tagami.niigata.jp", + "tainai.niigata.jp", + "tochio.niigata.jp", + "tokamachi.niigata.jp", + "tsubame.niigata.jp", + "tsunan.niigata.jp", + "uonuma.niigata.jp", + "yahiko.niigata.jp", + "yoita.niigata.jp", + "yuzawa.niigata.jp", + "beppu.oita.jp", + "bungoono.oita.jp", + "bungotakada.oita.jp", + "hasama.oita.jp", + "hiji.oita.jp", + "himeshima.oita.jp", + "hita.oita.jp", + "kamitsue.oita.jp", + "kokonoe.oita.jp", + "kuju.oita.jp", + "kunisaki.oita.jp", + "kusu.oita.jp", + "oita.oita.jp", + "saiki.oita.jp", + "taketa.oita.jp", + "tsukumi.oita.jp", + "usa.oita.jp", + "usuki.oita.jp", + "yufu.oita.jp", + "akaiwa.okayama.jp", + "asakuchi.okayama.jp", + "bizen.okayama.jp", + "hayashima.okayama.jp", + "ibara.okayama.jp", + "kagamino.okayama.jp", + "kasaoka.okayama.jp", + "kibichuo.okayama.jp", + "kumenan.okayama.jp", + "kurashiki.okayama.jp", + "maniwa.okayama.jp", + "misaki.okayama.jp", + "nagi.okayama.jp", + "niimi.okayama.jp", + "nishiawakura.okayama.jp", + "okayama.okayama.jp", + "satosho.okayama.jp", + "setouchi.okayama.jp", + "shinjo.okayama.jp", + "shoo.okayama.jp", + "soja.okayama.jp", + "takahashi.okayama.jp", + "tamano.okayama.jp", + "tsuyama.okayama.jp", + "wake.okayama.jp", + "yakage.okayama.jp", + "aguni.okinawa.jp", + "ginowan.okinawa.jp", + "ginoza.okinawa.jp", + "gushikami.okinawa.jp", + "haebaru.okinawa.jp", + "higashi.okinawa.jp", + "hirara.okinawa.jp", + "iheya.okinawa.jp", + "ishigaki.okinawa.jp", + "ishikawa.okinawa.jp", + "itoman.okinawa.jp", + "izena.okinawa.jp", + "kadena.okinawa.jp", + "kin.okinawa.jp", + "kitadaito.okinawa.jp", + "kitanakagusuku.okinawa.jp", + "kumejima.okinawa.jp", + "kunigami.okinawa.jp", + "minamidaito.okinawa.jp", + "motobu.okinawa.jp", + "nago.okinawa.jp", + "naha.okinawa.jp", + "nakagusuku.okinawa.jp", + "nakijin.okinawa.jp", + "nanjo.okinawa.jp", + "nishihara.okinawa.jp", + "ogimi.okinawa.jp", + "okinawa.okinawa.jp", + "onna.okinawa.jp", + "shimoji.okinawa.jp", + "taketomi.okinawa.jp", + "tarama.okinawa.jp", + "tokashiki.okinawa.jp", + "tomigusuku.okinawa.jp", + "tonaki.okinawa.jp", + "urasoe.okinawa.jp", + "uruma.okinawa.jp", + "yaese.okinawa.jp", + "yomitan.okinawa.jp", + "yonabaru.okinawa.jp", + "yonaguni.okinawa.jp", + "zamami.okinawa.jp", + "abeno.osaka.jp", + "chihayaakasaka.osaka.jp", + "chuo.osaka.jp", + "daito.osaka.jp", + "fujiidera.osaka.jp", + "habikino.osaka.jp", + "hannan.osaka.jp", + "higashiosaka.osaka.jp", + "higashisumiyoshi.osaka.jp", + "higashiyodogawa.osaka.jp", + "hirakata.osaka.jp", + "ibaraki.osaka.jp", + "ikeda.osaka.jp", + "izumi.osaka.jp", + "izumiotsu.osaka.jp", + "izumisano.osaka.jp", + "kadoma.osaka.jp", + "kaizuka.osaka.jp", + "kanan.osaka.jp", + "kashiwara.osaka.jp", + "katano.osaka.jp", + "kawachinagano.osaka.jp", + "kishiwada.osaka.jp", + "kita.osaka.jp", + "kumatori.osaka.jp", + "matsubara.osaka.jp", + "minato.osaka.jp", + "minoh.osaka.jp", + "misaki.osaka.jp", + "moriguchi.osaka.jp", + "neyagawa.osaka.jp", + "nishi.osaka.jp", + "nose.osaka.jp", + "osakasayama.osaka.jp", + "sakai.osaka.jp", + "sayama.osaka.jp", + "sennan.osaka.jp", + "settsu.osaka.jp", + "shijonawate.osaka.jp", + "shimamoto.osaka.jp", + "suita.osaka.jp", + "tadaoka.osaka.jp", + "taishi.osaka.jp", + "tajiri.osaka.jp", + "takaishi.osaka.jp", + "takatsuki.osaka.jp", + "tondabayashi.osaka.jp", + "toyonaka.osaka.jp", + "toyono.osaka.jp", + "yao.osaka.jp", + "ariake.saga.jp", + "arita.saga.jp", + "fukudomi.saga.jp", + "genkai.saga.jp", + "hamatama.saga.jp", + "hizen.saga.jp", + "imari.saga.jp", + "kamimine.saga.jp", + "kanzaki.saga.jp", + "karatsu.saga.jp", + "kashima.saga.jp", + "kitagata.saga.jp", + "kitahata.saga.jp", + "kiyama.saga.jp", + "kouhoku.saga.jp", + "kyuragi.saga.jp", + "nishiarita.saga.jp", + "ogi.saga.jp", + "omachi.saga.jp", + "ouchi.saga.jp", + "saga.saga.jp", + "shiroishi.saga.jp", + "taku.saga.jp", + "tara.saga.jp", + "tosu.saga.jp", + "yoshinogari.saga.jp", + "arakawa.saitama.jp", + "asaka.saitama.jp", + "chichibu.saitama.jp", + "fujimi.saitama.jp", + "fujimino.saitama.jp", + "fukaya.saitama.jp", + "hanno.saitama.jp", + "hanyu.saitama.jp", + "hasuda.saitama.jp", + "hatogaya.saitama.jp", + "hatoyama.saitama.jp", + "hidaka.saitama.jp", + "higashichichibu.saitama.jp", + "higashimatsuyama.saitama.jp", + "honjo.saitama.jp", + "ina.saitama.jp", + "iruma.saitama.jp", + "iwatsuki.saitama.jp", + "kamiizumi.saitama.jp", + "kamikawa.saitama.jp", + "kamisato.saitama.jp", + "kasukabe.saitama.jp", + "kawagoe.saitama.jp", + "kawaguchi.saitama.jp", + "kawajima.saitama.jp", + "kazo.saitama.jp", + "kitamoto.saitama.jp", + "koshigaya.saitama.jp", + "kounosu.saitama.jp", + "kuki.saitama.jp", + "kumagaya.saitama.jp", + "matsubushi.saitama.jp", + "minano.saitama.jp", + "misato.saitama.jp", + "miyashiro.saitama.jp", + "miyoshi.saitama.jp", + "moroyama.saitama.jp", + "nagatoro.saitama.jp", + "namegawa.saitama.jp", + "niiza.saitama.jp", + "ogano.saitama.jp", + "ogawa.saitama.jp", + "ogose.saitama.jp", + "okegawa.saitama.jp", + "omiya.saitama.jp", + "otaki.saitama.jp", + "ranzan.saitama.jp", + "ryokami.saitama.jp", + "saitama.saitama.jp", + "sakado.saitama.jp", + "satte.saitama.jp", + "sayama.saitama.jp", + "shiki.saitama.jp", + "shiraoka.saitama.jp", + "soka.saitama.jp", + "sugito.saitama.jp", + "toda.saitama.jp", + "tokigawa.saitama.jp", + "tokorozawa.saitama.jp", + "tsurugashima.saitama.jp", + "urawa.saitama.jp", + "warabi.saitama.jp", + "yashio.saitama.jp", + "yokoze.saitama.jp", + "yono.saitama.jp", + "yorii.saitama.jp", + "yoshida.saitama.jp", + "yoshikawa.saitama.jp", + "yoshimi.saitama.jp", + "aisho.shiga.jp", + "gamo.shiga.jp", + "higashiomi.shiga.jp", + "hikone.shiga.jp", + "koka.shiga.jp", + "konan.shiga.jp", + "kosei.shiga.jp", + "koto.shiga.jp", + "kusatsu.shiga.jp", + "maibara.shiga.jp", + "moriyama.shiga.jp", + "nagahama.shiga.jp", + "nishiazai.shiga.jp", + "notogawa.shiga.jp", + "omihachiman.shiga.jp", + "otsu.shiga.jp", + "ritto.shiga.jp", + "ryuoh.shiga.jp", + "takashima.shiga.jp", + "takatsuki.shiga.jp", + "torahime.shiga.jp", + "toyosato.shiga.jp", + "yasu.shiga.jp", + "akagi.shimane.jp", + "ama.shimane.jp", + "gotsu.shimane.jp", + "hamada.shimane.jp", + "higashiizumo.shimane.jp", + "hikawa.shimane.jp", + "hikimi.shimane.jp", + "izumo.shimane.jp", + "kakinoki.shimane.jp", + "masuda.shimane.jp", + "matsue.shimane.jp", + "misato.shimane.jp", + "nishinoshima.shimane.jp", + "ohda.shimane.jp", + "okinoshima.shimane.jp", + "okuizumo.shimane.jp", + "shimane.shimane.jp", + "tamayu.shimane.jp", + "tsuwano.shimane.jp", + "unnan.shimane.jp", + "yakumo.shimane.jp", + "yasugi.shimane.jp", + "yatsuka.shimane.jp", + "arai.shizuoka.jp", + "atami.shizuoka.jp", + "fuji.shizuoka.jp", + "fujieda.shizuoka.jp", + "fujikawa.shizuoka.jp", + "fujinomiya.shizuoka.jp", + "fukuroi.shizuoka.jp", + "gotemba.shizuoka.jp", + "haibara.shizuoka.jp", + "hamamatsu.shizuoka.jp", + "higashiizu.shizuoka.jp", + "ito.shizuoka.jp", + "iwata.shizuoka.jp", + "izu.shizuoka.jp", + "izunokuni.shizuoka.jp", + "kakegawa.shizuoka.jp", + "kannami.shizuoka.jp", + "kawanehon.shizuoka.jp", + "kawazu.shizuoka.jp", + "kikugawa.shizuoka.jp", + "kosai.shizuoka.jp", + "makinohara.shizuoka.jp", + "matsuzaki.shizuoka.jp", + "minamiizu.shizuoka.jp", + "mishima.shizuoka.jp", + "morimachi.shizuoka.jp", + "nishiizu.shizuoka.jp", + "numazu.shizuoka.jp", + "omaezaki.shizuoka.jp", + "shimada.shizuoka.jp", + "shimizu.shizuoka.jp", + "shimoda.shizuoka.jp", + "shizuoka.shizuoka.jp", + "susono.shizuoka.jp", + "yaizu.shizuoka.jp", + "yoshida.shizuoka.jp", + "ashikaga.tochigi.jp", + "bato.tochigi.jp", + "haga.tochigi.jp", + "ichikai.tochigi.jp", + "iwafune.tochigi.jp", + "kaminokawa.tochigi.jp", + "kanuma.tochigi.jp", + "karasuyama.tochigi.jp", + "kuroiso.tochigi.jp", + "mashiko.tochigi.jp", + "mibu.tochigi.jp", + "moka.tochigi.jp", + "motegi.tochigi.jp", + "nasu.tochigi.jp", + "nasushiobara.tochigi.jp", + "nikko.tochigi.jp", + "nishikata.tochigi.jp", + "nogi.tochigi.jp", + "ohira.tochigi.jp", + "ohtawara.tochigi.jp", + "oyama.tochigi.jp", + "sakura.tochigi.jp", + "sano.tochigi.jp", + "shimotsuke.tochigi.jp", + "shioya.tochigi.jp", + "takanezawa.tochigi.jp", + "tochigi.tochigi.jp", + "tsuga.tochigi.jp", + "ujiie.tochigi.jp", + "utsunomiya.tochigi.jp", + "yaita.tochigi.jp", + "aizumi.tokushima.jp", + "anan.tokushima.jp", + "ichiba.tokushima.jp", + "itano.tokushima.jp", + "kainan.tokushima.jp", + "komatsushima.tokushima.jp", + "matsushige.tokushima.jp", + "mima.tokushima.jp", + "minami.tokushima.jp", + "miyoshi.tokushima.jp", + "mugi.tokushima.jp", + "nakagawa.tokushima.jp", + "naruto.tokushima.jp", + "sanagochi.tokushima.jp", + "shishikui.tokushima.jp", + "tokushima.tokushima.jp", + "wajiki.tokushima.jp", + "adachi.tokyo.jp", + "akiruno.tokyo.jp", + "akishima.tokyo.jp", + "aogashima.tokyo.jp", + "arakawa.tokyo.jp", + "bunkyo.tokyo.jp", + "chiyoda.tokyo.jp", + "chofu.tokyo.jp", + "chuo.tokyo.jp", + "edogawa.tokyo.jp", + "fuchu.tokyo.jp", + "fussa.tokyo.jp", + "hachijo.tokyo.jp", + "hachioji.tokyo.jp", + "hamura.tokyo.jp", + "higashikurume.tokyo.jp", + "higashimurayama.tokyo.jp", + "higashiyamato.tokyo.jp", + "hino.tokyo.jp", + "hinode.tokyo.jp", + "hinohara.tokyo.jp", + "inagi.tokyo.jp", + "itabashi.tokyo.jp", + "katsushika.tokyo.jp", + "kita.tokyo.jp", + "kiyose.tokyo.jp", + "kodaira.tokyo.jp", + "koganei.tokyo.jp", + "kokubunji.tokyo.jp", + "komae.tokyo.jp", + "koto.tokyo.jp", + "kouzushima.tokyo.jp", + "kunitachi.tokyo.jp", + "machida.tokyo.jp", + "meguro.tokyo.jp", + "minato.tokyo.jp", + "mitaka.tokyo.jp", + "mizuho.tokyo.jp", + "musashimurayama.tokyo.jp", + "musashino.tokyo.jp", + "nakano.tokyo.jp", + "nerima.tokyo.jp", + "ogasawara.tokyo.jp", + "okutama.tokyo.jp", + "ome.tokyo.jp", + "oshima.tokyo.jp", + "ota.tokyo.jp", + "setagaya.tokyo.jp", + "shibuya.tokyo.jp", + "shinagawa.tokyo.jp", + "shinjuku.tokyo.jp", + "suginami.tokyo.jp", + "sumida.tokyo.jp", + "tachikawa.tokyo.jp", + "taito.tokyo.jp", + "tama.tokyo.jp", + "toshima.tokyo.jp", + "chizu.tottori.jp", + "hino.tottori.jp", + "kawahara.tottori.jp", + "koge.tottori.jp", + "kotoura.tottori.jp", + "misasa.tottori.jp", + "nanbu.tottori.jp", + "nichinan.tottori.jp", + "sakaiminato.tottori.jp", + "tottori.tottori.jp", + "wakasa.tottori.jp", + "yazu.tottori.jp", + "yonago.tottori.jp", + "asahi.toyama.jp", + "fuchu.toyama.jp", + "fukumitsu.toyama.jp", + "funahashi.toyama.jp", + "himi.toyama.jp", + "imizu.toyama.jp", + "inami.toyama.jp", + "johana.toyama.jp", + "kamiichi.toyama.jp", + "kurobe.toyama.jp", + "nakaniikawa.toyama.jp", + "namerikawa.toyama.jp", + "nanto.toyama.jp", + "nyuzen.toyama.jp", + "oyabe.toyama.jp", + "taira.toyama.jp", + "takaoka.toyama.jp", + "tateyama.toyama.jp", + "toga.toyama.jp", + "tonami.toyama.jp", + "toyama.toyama.jp", + "unazuki.toyama.jp", + "uozu.toyama.jp", + "yamada.toyama.jp", + "arida.wakayama.jp", + "aridagawa.wakayama.jp", + "gobo.wakayama.jp", + "hashimoto.wakayama.jp", + "hidaka.wakayama.jp", + "hirogawa.wakayama.jp", + "inami.wakayama.jp", + "iwade.wakayama.jp", + "kainan.wakayama.jp", + "kamitonda.wakayama.jp", + "katsuragi.wakayama.jp", + "kimino.wakayama.jp", + "kinokawa.wakayama.jp", + "kitayama.wakayama.jp", + "koya.wakayama.jp", + "koza.wakayama.jp", + "kozagawa.wakayama.jp", + "kudoyama.wakayama.jp", + "kushimoto.wakayama.jp", + "mihama.wakayama.jp", + "misato.wakayama.jp", + "nachikatsuura.wakayama.jp", + "shingu.wakayama.jp", + "shirahama.wakayama.jp", + "taiji.wakayama.jp", + "tanabe.wakayama.jp", + "wakayama.wakayama.jp", + "yuasa.wakayama.jp", + "yura.wakayama.jp", + "asahi.yamagata.jp", + "funagata.yamagata.jp", + "higashine.yamagata.jp", + "iide.yamagata.jp", + "kahoku.yamagata.jp", + "kaminoyama.yamagata.jp", + "kaneyama.yamagata.jp", + "kawanishi.yamagata.jp", + "mamurogawa.yamagata.jp", + "mikawa.yamagata.jp", + "murayama.yamagata.jp", + "nagai.yamagata.jp", + "nakayama.yamagata.jp", + "nanyo.yamagata.jp", + "nishikawa.yamagata.jp", + "obanazawa.yamagata.jp", + "oe.yamagata.jp", + "oguni.yamagata.jp", + "ohkura.yamagata.jp", + "oishida.yamagata.jp", + "sagae.yamagata.jp", + "sakata.yamagata.jp", + "sakegawa.yamagata.jp", + "shinjo.yamagata.jp", + "shirataka.yamagata.jp", + "shonai.yamagata.jp", + "takahata.yamagata.jp", + "tendo.yamagata.jp", + "tozawa.yamagata.jp", + "tsuruoka.yamagata.jp", + "yamagata.yamagata.jp", + "yamanobe.yamagata.jp", + "yonezawa.yamagata.jp", + "yuza.yamagata.jp", + "abu.yamaguchi.jp", + "hagi.yamaguchi.jp", + "hikari.yamaguchi.jp", + "hofu.yamaguchi.jp", + "iwakuni.yamaguchi.jp", + "kudamatsu.yamaguchi.jp", + "mitou.yamaguchi.jp", + "nagato.yamaguchi.jp", + "oshima.yamaguchi.jp", + "shimonoseki.yamaguchi.jp", + "shunan.yamaguchi.jp", + "tabuse.yamaguchi.jp", + "tokuyama.yamaguchi.jp", + "toyota.yamaguchi.jp", + "ube.yamaguchi.jp", + "yuu.yamaguchi.jp", + "chuo.yamanashi.jp", + "doshi.yamanashi.jp", + "fuefuki.yamanashi.jp", + "fujikawa.yamanashi.jp", + "fujikawaguchiko.yamanashi.jp", + "fujiyoshida.yamanashi.jp", + "hayakawa.yamanashi.jp", + "hokuto.yamanashi.jp", + "ichikawamisato.yamanashi.jp", + "kai.yamanashi.jp", + "kofu.yamanashi.jp", + "koshu.yamanashi.jp", + "kosuge.yamanashi.jp", + "minami-alps.yamanashi.jp", + "minobu.yamanashi.jp", + "nakamichi.yamanashi.jp", + "nanbu.yamanashi.jp", + "narusawa.yamanashi.jp", + "nirasaki.yamanashi.jp", + "nishikatsura.yamanashi.jp", + "oshino.yamanashi.jp", + "otsuki.yamanashi.jp", + "showa.yamanashi.jp", + "tabayama.yamanashi.jp", + "tsuru.yamanashi.jp", + "uenohara.yamanashi.jp", + "yamanakako.yamanashi.jp", + "yamanashi.yamanashi.jp", + "*.ke", + "kg", + "org.kg", + "net.kg", + "com.kg", + "edu.kg", + "gov.kg", + "mil.kg", + "*.kh", + "ki", + "edu.ki", + "biz.ki", + "net.ki", + "org.ki", + "gov.ki", + "info.ki", + "com.ki", + "km", + "org.km", + "nom.km", + "gov.km", + "prd.km", + "tm.km", + "edu.km", + "mil.km", + "ass.km", + "com.km", + "coop.km", + "asso.km", + "presse.km", + "medecin.km", + "notaires.km", + "pharmaciens.km", + "veterinaire.km", + "gouv.km", + "kn", + "net.kn", + "org.kn", + "edu.kn", + "gov.kn", + "kp", + "com.kp", + "edu.kp", + "gov.kp", + "org.kp", + "rep.kp", + "tra.kp", + "kr", + "ac.kr", + "co.kr", + "es.kr", + "go.kr", + "hs.kr", + "kg.kr", + "mil.kr", + "ms.kr", + "ne.kr", + "or.kr", + "pe.kr", + "re.kr", + "sc.kr", + "busan.kr", + "chungbuk.kr", + "chungnam.kr", + "daegu.kr", + "daejeon.kr", + "gangwon.kr", + "gwangju.kr", + "gyeongbuk.kr", + "gyeonggi.kr", + "gyeongnam.kr", + "incheon.kr", + "jeju.kr", + "jeonbuk.kr", + "jeonnam.kr", + "seoul.kr", + "ulsan.kr", + "*.kw", + "ky", + "edu.ky", + "gov.ky", + "com.ky", + "org.ky", + "net.ky", + "kz", + "org.kz", + "edu.kz", + "net.kz", + "gov.kz", + "mil.kz", + "com.kz", + "la", + "int.la", + "net.la", + "info.la", + "edu.la", + "gov.la", + "per.la", + "com.la", + "org.la", + "lb", + "com.lb", + "edu.lb", + "gov.lb", + "net.lb", + "org.lb", + "lc", + "com.lc", + "net.lc", + "co.lc", + "org.lc", + "edu.lc", + "gov.lc", + "li", + "lk", + "gov.lk", + "sch.lk", + "net.lk", + "int.lk", + "com.lk", + "org.lk", + "edu.lk", + "ngo.lk", + "soc.lk", + "web.lk", + "ltd.lk", + "assn.lk", + "grp.lk", + "hotel.lk", + "ac.lk", + "lr", + "com.lr", + "edu.lr", + "gov.lr", + "org.lr", + "net.lr", + "ls", + "co.ls", + "org.ls", + "lt", + "gov.lt", + "lu", + "lv", + "com.lv", + "edu.lv", + "gov.lv", + "org.lv", + "mil.lv", + "id.lv", + "net.lv", + "asn.lv", + "conf.lv", + "ly", + "com.ly", + "net.ly", + "gov.ly", + "plc.ly", + "edu.ly", + "sch.ly", + "med.ly", + "org.ly", + "id.ly", + "ma", + "co.ma", + "net.ma", + "gov.ma", + "org.ma", + "ac.ma", + "press.ma", + "mc", + "tm.mc", + "asso.mc", + "md", + "me", + "co.me", + "net.me", + "org.me", + "edu.me", + "ac.me", + "gov.me", + "its.me", + "priv.me", + "mg", + "org.mg", + "nom.mg", + "gov.mg", + "prd.mg", + "tm.mg", + "edu.mg", + "mil.mg", + "com.mg", + "co.mg", + "mh", + "mil", + "mk", + "com.mk", + "org.mk", + "net.mk", + "edu.mk", + "gov.mk", + "inf.mk", + "name.mk", + "ml", + "com.ml", + "edu.ml", + "gouv.ml", + "gov.ml", + "net.ml", + "org.ml", + "presse.ml", + "*.mm", + "mn", + "gov.mn", + "edu.mn", + "org.mn", + "mo", + "com.mo", + "net.mo", + "org.mo", + "edu.mo", + "gov.mo", + "mobi", + "mp", + "mq", + "mr", + "gov.mr", + "ms", + "com.ms", + "edu.ms", + "gov.ms", + "net.ms", + "org.ms", + "mt", + "com.mt", + "edu.mt", + "net.mt", + "org.mt", + "mu", + "com.mu", + "net.mu", + "org.mu", + "gov.mu", + "ac.mu", + "co.mu", + "or.mu", + "museum", + "academy.museum", + "agriculture.museum", + "air.museum", + "airguard.museum", + "alabama.museum", + "alaska.museum", + "amber.museum", + "ambulance.museum", + "american.museum", + "americana.museum", + "americanantiques.museum", + "americanart.museum", + "amsterdam.museum", + "and.museum", + "annefrank.museum", + "anthro.museum", + "anthropology.museum", + "antiques.museum", + "aquarium.museum", + "arboretum.museum", + "archaeological.museum", + "archaeology.museum", + "architecture.museum", + "art.museum", + "artanddesign.museum", + "artcenter.museum", + "artdeco.museum", + "arteducation.museum", + "artgallery.museum", + "arts.museum", + "artsandcrafts.museum", + "asmatart.museum", + "assassination.museum", + "assisi.museum", + "association.museum", + "astronomy.museum", + "atlanta.museum", + "austin.museum", + "australia.museum", + "automotive.museum", + "aviation.museum", + "axis.museum", + "badajoz.museum", + "baghdad.museum", + "bahn.museum", + "bale.museum", + "baltimore.museum", + "barcelona.museum", + "baseball.museum", + "basel.museum", + "baths.museum", + "bauern.museum", + "beauxarts.museum", + "beeldengeluid.museum", + "bellevue.museum", + "bergbau.museum", + "berkeley.museum", + "berlin.museum", + "bern.museum", + "bible.museum", + "bilbao.museum", + "bill.museum", + "birdart.museum", + "birthplace.museum", + "bonn.museum", + "boston.museum", + "botanical.museum", + "botanicalgarden.museum", + "botanicgarden.museum", + "botany.museum", + "brandywinevalley.museum", + "brasil.museum", + "bristol.museum", + "british.museum", + "britishcolumbia.museum", + "broadcast.museum", + "brunel.museum", + "brussel.museum", + "brussels.museum", + "bruxelles.museum", + "building.museum", + "burghof.museum", + "bus.museum", + "bushey.museum", + "cadaques.museum", + "california.museum", + "cambridge.museum", + "can.museum", + "canada.museum", + "capebreton.museum", + "carrier.museum", + "cartoonart.museum", + "casadelamoneda.museum", + "castle.museum", + "castres.museum", + "celtic.museum", + "center.museum", + "chattanooga.museum", + "cheltenham.museum", + "chesapeakebay.museum", + "chicago.museum", + "children.museum", + "childrens.museum", + "childrensgarden.museum", + "chiropractic.museum", + "chocolate.museum", + "christiansburg.museum", + "cincinnati.museum", + "cinema.museum", + "circus.museum", + "civilisation.museum", + "civilization.museum", + "civilwar.museum", + "clinton.museum", + "clock.museum", + "coal.museum", + "coastaldefence.museum", + "cody.museum", + "coldwar.museum", + "collection.museum", + "colonialwilliamsburg.museum", + "coloradoplateau.museum", + "columbia.museum", + "columbus.museum", + "communication.museum", + "communications.museum", + "community.museum", + "computer.museum", + "computerhistory.museum", + "xn--comunicaes-v6a2o.museum", + "contemporary.museum", + "contemporaryart.museum", + "convent.museum", + "copenhagen.museum", + "corporation.museum", + "xn--correios-e-telecomunicaes-ghc29a.museum", + "corvette.museum", + "costume.museum", + "countryestate.museum", + "county.museum", + "crafts.museum", + "cranbrook.museum", + "creation.museum", + "cultural.museum", + "culturalcenter.museum", + "culture.museum", + "cyber.museum", + "cymru.museum", + "dali.museum", + "dallas.museum", + "database.museum", + "ddr.museum", + "decorativearts.museum", + "delaware.museum", + "delmenhorst.museum", + "denmark.museum", + "depot.museum", + "design.museum", + "detroit.museum", + "dinosaur.museum", + "discovery.museum", + "dolls.museum", + "donostia.museum", + "durham.museum", + "eastafrica.museum", + "eastcoast.museum", + "education.museum", + "educational.museum", + "egyptian.museum", + "eisenbahn.museum", + "elburg.museum", + "elvendrell.museum", + "embroidery.museum", + "encyclopedic.museum", + "england.museum", + "entomology.museum", + "environment.museum", + "environmentalconservation.museum", + "epilepsy.museum", + "essex.museum", + "estate.museum", + "ethnology.museum", + "exeter.museum", + "exhibition.museum", + "family.museum", + "farm.museum", + "farmequipment.museum", + "farmers.museum", + "farmstead.museum", + "field.museum", + "figueres.museum", + "filatelia.museum", + "film.museum", + "fineart.museum", + "finearts.museum", + "finland.museum", + "flanders.museum", + "florida.museum", + "force.museum", + "fortmissoula.museum", + "fortworth.museum", + "foundation.museum", + "francaise.museum", + "frankfurt.museum", + "franziskaner.museum", + "freemasonry.museum", + "freiburg.museum", + "fribourg.museum", + "frog.museum", + "fundacio.museum", + "furniture.museum", + "gallery.museum", + "garden.museum", + "gateway.museum", + "geelvinck.museum", + "gemological.museum", + "geology.museum", + "georgia.museum", + "giessen.museum", + "glas.museum", + "glass.museum", + "gorge.museum", + "grandrapids.museum", + "graz.museum", + "guernsey.museum", + "halloffame.museum", + "hamburg.museum", + "handson.museum", + "harvestcelebration.museum", + "hawaii.museum", + "health.museum", + "heimatunduhren.museum", + "hellas.museum", + "helsinki.museum", + "hembygdsforbund.museum", + "heritage.museum", + "histoire.museum", + "historical.museum", + "historicalsociety.museum", + "historichouses.museum", + "historisch.museum", + "historisches.museum", + "history.museum", + "historyofscience.museum", + "horology.museum", + "house.museum", + "humanities.museum", + "illustration.museum", + "imageandsound.museum", + "indian.museum", + "indiana.museum", + "indianapolis.museum", + "indianmarket.museum", + "intelligence.museum", + "interactive.museum", + "iraq.museum", + "iron.museum", + "isleofman.museum", + "jamison.museum", + "jefferson.museum", + "jerusalem.museum", + "jewelry.museum", + "jewish.museum", + "jewishart.museum", + "jfk.museum", + "journalism.museum", + "judaica.museum", + "judygarland.museum", + "juedisches.museum", + "juif.museum", + "karate.museum", + "karikatur.museum", + "kids.museum", + "koebenhavn.museum", + "koeln.museum", + "kunst.museum", + "kunstsammlung.museum", + "kunstunddesign.museum", + "labor.museum", + "labour.museum", + "lajolla.museum", + "lancashire.museum", + "landes.museum", + "lans.museum", + "xn--lns-qla.museum", + "larsson.museum", + "lewismiller.museum", + "lincoln.museum", + "linz.museum", + "living.museum", + "livinghistory.museum", + "localhistory.museum", + "london.museum", + "losangeles.museum", + "louvre.museum", + "loyalist.museum", + "lucerne.museum", + "luxembourg.museum", + "luzern.museum", + "mad.museum", + "madrid.museum", + "mallorca.museum", + "manchester.museum", + "mansion.museum", + "mansions.museum", + "manx.museum", + "marburg.museum", + "maritime.museum", + "maritimo.museum", + "maryland.museum", + "marylhurst.museum", + "media.museum", + "medical.museum", + "medizinhistorisches.museum", + "meeres.museum", + "memorial.museum", + "mesaverde.museum", + "michigan.museum", + "midatlantic.museum", + "military.museum", + "mill.museum", + "miners.museum", + "mining.museum", + "minnesota.museum", + "missile.museum", + "missoula.museum", + "modern.museum", + "moma.museum", + "money.museum", + "monmouth.museum", + "monticello.museum", + "montreal.museum", + "moscow.museum", + "motorcycle.museum", + "muenchen.museum", + "muenster.museum", + "mulhouse.museum", + "muncie.museum", + "museet.museum", + "museumcenter.museum", + "museumvereniging.museum", + "music.museum", + "national.museum", + "nationalfirearms.museum", + "nationalheritage.museum", + "nativeamerican.museum", + "naturalhistory.museum", + "naturalhistorymuseum.museum", + "naturalsciences.museum", + "nature.museum", + "naturhistorisches.museum", + "natuurwetenschappen.museum", + "naumburg.museum", + "naval.museum", + "nebraska.museum", + "neues.museum", + "newhampshire.museum", + "newjersey.museum", + "newmexico.museum", + "newport.museum", + "newspaper.museum", + "newyork.museum", + "niepce.museum", + "norfolk.museum", + "north.museum", + "nrw.museum", + "nuernberg.museum", + "nuremberg.museum", + "nyc.museum", + "nyny.museum", + "oceanographic.museum", + "oceanographique.museum", + "omaha.museum", + "online.museum", + "ontario.museum", + "openair.museum", + "oregon.museum", + "oregontrail.museum", + "otago.museum", + "oxford.museum", + "pacific.museum", + "paderborn.museum", + "palace.museum", + "paleo.museum", + "palmsprings.museum", + "panama.museum", + "paris.museum", + "pasadena.museum", + "pharmacy.museum", + "philadelphia.museum", + "philadelphiaarea.museum", + "philately.museum", + "phoenix.museum", + "photography.museum", + "pilots.museum", + "pittsburgh.museum", + "planetarium.museum", + "plantation.museum", + "plants.museum", + "plaza.museum", + "portal.museum", + "portland.museum", + "portlligat.museum", + "posts-and-telecommunications.museum", + "preservation.museum", + "presidio.museum", + "press.museum", + "project.museum", + "public.museum", + "pubol.museum", + "quebec.museum", + "railroad.museum", + "railway.museum", + "research.museum", + "resistance.museum", + "riodejaneiro.museum", + "rochester.museum", + "rockart.museum", + "roma.museum", + "russia.museum", + "saintlouis.museum", + "salem.museum", + "salvadordali.museum", + "salzburg.museum", + "sandiego.museum", + "sanfrancisco.museum", + "santabarbara.museum", + "santacruz.museum", + "santafe.museum", + "saskatchewan.museum", + "satx.museum", + "savannahga.museum", + "schlesisches.museum", + "schoenbrunn.museum", + "schokoladen.museum", + "school.museum", + "schweiz.museum", + "science.museum", + "scienceandhistory.museum", + "scienceandindustry.museum", + "sciencecenter.museum", + "sciencecenters.museum", + "science-fiction.museum", + "sciencehistory.museum", + "sciences.museum", + "sciencesnaturelles.museum", + "scotland.museum", + "seaport.museum", + "settlement.museum", + "settlers.museum", + "shell.museum", + "sherbrooke.museum", + "sibenik.museum", + "silk.museum", + "ski.museum", + "skole.museum", + "society.museum", + "sologne.museum", + "soundandvision.museum", + "southcarolina.museum", + "southwest.museum", + "space.museum", + "spy.museum", + "square.museum", + "stadt.museum", + "stalbans.museum", + "starnberg.museum", + "state.museum", + "stateofdelaware.museum", + "station.museum", + "steam.museum", + "steiermark.museum", + "stjohn.museum", + "stockholm.museum", + "stpetersburg.museum", + "stuttgart.museum", + "suisse.museum", + "surgeonshall.museum", + "surrey.museum", + "svizzera.museum", + "sweden.museum", + "sydney.museum", + "tank.museum", + "tcm.museum", + "technology.museum", + "telekommunikation.museum", + "television.museum", + "texas.museum", + "textile.museum", + "theater.museum", + "time.museum", + "timekeeping.museum", + "topology.museum", + "torino.museum", + "touch.museum", + "town.museum", + "transport.museum", + "tree.museum", + "trolley.museum", + "trust.museum", + "trustee.museum", + "uhren.museum", + "ulm.museum", + "undersea.museum", + "university.museum", + "usa.museum", + "usantiques.museum", + "usarts.museum", + "uscountryestate.museum", + "usculture.museum", + "usdecorativearts.museum", + "usgarden.museum", + "ushistory.museum", + "ushuaia.museum", + "uslivinghistory.museum", + "utah.museum", + "uvic.museum", + "valley.museum", + "vantaa.museum", + "versailles.museum", + "viking.museum", + "village.museum", + "virginia.museum", + "virtual.museum", + "virtuel.museum", + "vlaanderen.museum", + "volkenkunde.museum", + "wales.museum", + "wallonie.museum", + "war.museum", + "washingtondc.museum", + "watchandclock.museum", + "watch-and-clock.museum", + "western.museum", + "westfalen.museum", + "whaling.museum", + "wildlife.museum", + "williamsburg.museum", + "windmill.museum", + "workshop.museum", + "york.museum", + "yorkshire.museum", + "yosemite.museum", + "youth.museum", + "zoological.museum", + "zoology.museum", + "xn--9dbhblg6di.museum", + "xn--h1aegh.museum", + "mv", + "aero.mv", + "biz.mv", + "com.mv", + "coop.mv", + "edu.mv", + "gov.mv", + "info.mv", + "int.mv", + "mil.mv", + "museum.mv", + "name.mv", + "net.mv", + "org.mv", + "pro.mv", + "mw", + "ac.mw", + "biz.mw", + "co.mw", + "com.mw", + "coop.mw", + "edu.mw", + "gov.mw", + "int.mw", + "museum.mw", + "net.mw", + "org.mw", + "mx", + "com.mx", + "org.mx", + "gob.mx", + "edu.mx", + "net.mx", + "my", + "com.my", + "net.my", + "org.my", + "gov.my", + "edu.my", + "mil.my", + "name.my", + "*.mz", + "!teledata.mz", + "na", + "info.na", + "pro.na", + "name.na", + "school.na", + "or.na", + "dr.na", + "us.na", + "mx.na", + "ca.na", + "in.na", + "cc.na", + "tv.na", + "ws.na", + "mobi.na", + "co.na", + "com.na", + "org.na", + "name", + "nc", + "asso.nc", + "ne", + "net", + "nf", + "com.nf", + "net.nf", + "per.nf", + "rec.nf", + "web.nf", + "arts.nf", + "firm.nf", + "info.nf", + "other.nf", + "store.nf", + "ng", + "com.ng", + "edu.ng", + "gov.ng", + "i.ng", + "mil.ng", + "mobi.ng", + "name.ng", + "net.ng", + "org.ng", + "sch.ng", + "com.ni", + "gob.ni", + "edu.ni", + "org.ni", + "nom.ni", + "net.ni", + "mil.ni", + "co.ni", + "biz.ni", + "web.ni", + "int.ni", + "ac.ni", + "in.ni", + "info.ni", + "nl", + "bv.nl", + "no", + "fhs.no", + "vgs.no", + "fylkesbibl.no", + "folkebibl.no", + "museum.no", + "idrett.no", + "priv.no", + "mil.no", + "stat.no", + "dep.no", + "kommune.no", + "herad.no", + "aa.no", + "ah.no", + "bu.no", + "fm.no", + "hl.no", + "hm.no", + "jan-mayen.no", + "mr.no", + "nl.no", + "nt.no", + "of.no", + "ol.no", + "oslo.no", + "rl.no", + "sf.no", + "st.no", + "svalbard.no", + "tm.no", + "tr.no", + "va.no", + "vf.no", + "gs.aa.no", + "gs.ah.no", + "gs.bu.no", + "gs.fm.no", + "gs.hl.no", + "gs.hm.no", + "gs.jan-mayen.no", + "gs.mr.no", + "gs.nl.no", + "gs.nt.no", + "gs.of.no", + "gs.ol.no", + "gs.oslo.no", + "gs.rl.no", + "gs.sf.no", + "gs.st.no", + "gs.svalbard.no", + "gs.tm.no", + "gs.tr.no", + "gs.va.no", + "gs.vf.no", + "akrehamn.no", + "xn--krehamn-dxa.no", + "algard.no", + "xn--lgrd-poac.no", + "arna.no", + "brumunddal.no", + "bryne.no", + "bronnoysund.no", + "xn--brnnysund-m8ac.no", + "drobak.no", + "xn--drbak-wua.no", + "egersund.no", + "fetsund.no", + "floro.no", + "xn--flor-jra.no", + "fredrikstad.no", + "hokksund.no", + "honefoss.no", + "xn--hnefoss-q1a.no", + "jessheim.no", + "jorpeland.no", + "xn--jrpeland-54a.no", + "kirkenes.no", + "kopervik.no", + "krokstadelva.no", + "langevag.no", + "xn--langevg-jxa.no", + "leirvik.no", + "mjondalen.no", + "xn--mjndalen-64a.no", + "mo-i-rana.no", + "mosjoen.no", + "xn--mosjen-eya.no", + "nesoddtangen.no", + "orkanger.no", + "osoyro.no", + "xn--osyro-wua.no", + "raholt.no", + "xn--rholt-mra.no", + "sandnessjoen.no", + "xn--sandnessjen-ogb.no", + "skedsmokorset.no", + "slattum.no", + "spjelkavik.no", + "stathelle.no", + "stavern.no", + "stjordalshalsen.no", + "xn--stjrdalshalsen-sqb.no", + "tananger.no", + "tranby.no", + "vossevangen.no", + "afjord.no", + "xn--fjord-lra.no", + "agdenes.no", + "al.no", + "xn--l-1fa.no", + "alesund.no", + "xn--lesund-hua.no", + "alstahaug.no", + "alta.no", + "xn--lt-liac.no", + "alaheadju.no", + "xn--laheadju-7ya.no", + "alvdal.no", + "amli.no", + "xn--mli-tla.no", + "amot.no", + "xn--mot-tla.no", + "andebu.no", + "andoy.no", + "xn--andy-ira.no", + "andasuolo.no", + "ardal.no", + "xn--rdal-poa.no", + "aremark.no", + "arendal.no", + "xn--s-1fa.no", + "aseral.no", + "xn--seral-lra.no", + "asker.no", + "askim.no", + "askvoll.no", + "askoy.no", + "xn--asky-ira.no", + "asnes.no", + "xn--snes-poa.no", + "audnedaln.no", + "aukra.no", + "aure.no", + "aurland.no", + "aurskog-holand.no", + "xn--aurskog-hland-jnb.no", + "austevoll.no", + "austrheim.no", + "averoy.no", + "xn--avery-yua.no", + "balestrand.no", + "ballangen.no", + "balat.no", + "xn--blt-elab.no", + "balsfjord.no", + "bahccavuotna.no", + "xn--bhccavuotna-k7a.no", + "bamble.no", + "bardu.no", + "beardu.no", + "beiarn.no", + "bajddar.no", + "xn--bjddar-pta.no", + "baidar.no", + "xn--bidr-5nac.no", + "berg.no", + "bergen.no", + "berlevag.no", + "xn--berlevg-jxa.no", + "bearalvahki.no", + "xn--bearalvhki-y4a.no", + "bindal.no", + "birkenes.no", + "bjarkoy.no", + "xn--bjarky-fya.no", + "bjerkreim.no", + "bjugn.no", + "bodo.no", + "xn--bod-2na.no", + "badaddja.no", + "xn--bdddj-mrabd.no", + "budejju.no", + "bokn.no", + "bremanger.no", + "bronnoy.no", + "xn--brnny-wuac.no", + "bygland.no", + "bykle.no", + "barum.no", + "xn--brum-voa.no", + "bo.telemark.no", + "xn--b-5ga.telemark.no", + "bo.nordland.no", + "xn--b-5ga.nordland.no", + "bievat.no", + "xn--bievt-0qa.no", + "bomlo.no", + "xn--bmlo-gra.no", + "batsfjord.no", + "xn--btsfjord-9za.no", + "bahcavuotna.no", + "xn--bhcavuotna-s4a.no", + "dovre.no", + "drammen.no", + "drangedal.no", + "dyroy.no", + "xn--dyry-ira.no", + "donna.no", + "xn--dnna-gra.no", + "eid.no", + "eidfjord.no", + "eidsberg.no", + "eidskog.no", + "eidsvoll.no", + "eigersund.no", + "elverum.no", + "enebakk.no", + "engerdal.no", + "etne.no", + "etnedal.no", + "evenes.no", + "evenassi.no", + "xn--eveni-0qa01ga.no", + "evje-og-hornnes.no", + "farsund.no", + "fauske.no", + "fuossko.no", + "fuoisku.no", + "fedje.no", + "fet.no", + "finnoy.no", + "xn--finny-yua.no", + "fitjar.no", + "fjaler.no", + "fjell.no", + "flakstad.no", + "flatanger.no", + "flekkefjord.no", + "flesberg.no", + "flora.no", + "fla.no", + "xn--fl-zia.no", + "folldal.no", + "forsand.no", + "fosnes.no", + "frei.no", + "frogn.no", + "froland.no", + "frosta.no", + "frana.no", + "xn--frna-woa.no", + "froya.no", + "xn--frya-hra.no", + "fusa.no", + "fyresdal.no", + "forde.no", + "xn--frde-gra.no", + "gamvik.no", + "gangaviika.no", + "xn--ggaviika-8ya47h.no", + "gaular.no", + "gausdal.no", + "gildeskal.no", + "xn--gildeskl-g0a.no", + "giske.no", + "gjemnes.no", + "gjerdrum.no", + "gjerstad.no", + "gjesdal.no", + "gjovik.no", + "xn--gjvik-wua.no", + "gloppen.no", + "gol.no", + "gran.no", + "grane.no", + "granvin.no", + "gratangen.no", + "grimstad.no", + "grong.no", + "kraanghke.no", + "xn--kranghke-b0a.no", + "grue.no", + "gulen.no", + "hadsel.no", + "halden.no", + "halsa.no", + "hamar.no", + "hamaroy.no", + "habmer.no", + "xn--hbmer-xqa.no", + "hapmir.no", + "xn--hpmir-xqa.no", + "hammerfest.no", + "hammarfeasta.no", + "xn--hmmrfeasta-s4ac.no", + "haram.no", + "hareid.no", + "harstad.no", + "hasvik.no", + "aknoluokta.no", + "xn--koluokta-7ya57h.no", + "hattfjelldal.no", + "aarborte.no", + "haugesund.no", + "hemne.no", + "hemnes.no", + "hemsedal.no", + "heroy.more-og-romsdal.no", + "xn--hery-ira.xn--mre-og-romsdal-qqb.no", + "heroy.nordland.no", + "xn--hery-ira.nordland.no", + "hitra.no", + "hjartdal.no", + "hjelmeland.no", + "hobol.no", + "xn--hobl-ira.no", + "hof.no", + "hol.no", + "hole.no", + "holmestrand.no", + "holtalen.no", + "xn--holtlen-hxa.no", + "hornindal.no", + "horten.no", + "hurdal.no", + "hurum.no", + "hvaler.no", + "hyllestad.no", + "hagebostad.no", + "xn--hgebostad-g3a.no", + "hoyanger.no", + "xn--hyanger-q1a.no", + "hoylandet.no", + "xn--hylandet-54a.no", + "ha.no", + "xn--h-2fa.no", + "ibestad.no", + "inderoy.no", + "xn--indery-fya.no", + "iveland.no", + "jevnaker.no", + "jondal.no", + "jolster.no", + "xn--jlster-bya.no", + "karasjok.no", + "karasjohka.no", + "xn--krjohka-hwab49j.no", + "karlsoy.no", + "galsa.no", + "xn--gls-elac.no", + "karmoy.no", + "xn--karmy-yua.no", + "kautokeino.no", + "guovdageaidnu.no", + "klepp.no", + "klabu.no", + "xn--klbu-woa.no", + "kongsberg.no", + "kongsvinger.no", + "kragero.no", + "xn--krager-gya.no", + "kristiansand.no", + "kristiansund.no", + "krodsherad.no", + "xn--krdsherad-m8a.no", + "kvalsund.no", + "rahkkeravju.no", + "xn--rhkkervju-01af.no", + "kvam.no", + "kvinesdal.no", + "kvinnherad.no", + "kviteseid.no", + "kvitsoy.no", + "xn--kvitsy-fya.no", + "kvafjord.no", + "xn--kvfjord-nxa.no", + "giehtavuoatna.no", + "kvanangen.no", + "xn--kvnangen-k0a.no", + "navuotna.no", + "xn--nvuotna-hwa.no", + "kafjord.no", + "xn--kfjord-iua.no", + "gaivuotna.no", + "xn--givuotna-8ya.no", + "larvik.no", + "lavangen.no", + "lavagis.no", + "loabat.no", + "xn--loabt-0qa.no", + "lebesby.no", + "davvesiida.no", + "leikanger.no", + "leirfjord.no", + "leka.no", + "leksvik.no", + "lenvik.no", + "leangaviika.no", + "xn--leagaviika-52b.no", + "lesja.no", + "levanger.no", + "lier.no", + "lierne.no", + "lillehammer.no", + "lillesand.no", + "lindesnes.no", + "lindas.no", + "xn--linds-pra.no", + "lom.no", + "loppa.no", + "lahppi.no", + "xn--lhppi-xqa.no", + "lund.no", + "lunner.no", + "luroy.no", + "xn--lury-ira.no", + "luster.no", + "lyngdal.no", + "lyngen.no", + "ivgu.no", + "lardal.no", + "lerdal.no", + "xn--lrdal-sra.no", + "lodingen.no", + "xn--ldingen-q1a.no", + "lorenskog.no", + "xn--lrenskog-54a.no", + "loten.no", + "xn--lten-gra.no", + "malvik.no", + "masoy.no", + "xn--msy-ula0h.no", + "muosat.no", + "xn--muost-0qa.no", + "mandal.no", + "marker.no", + "marnardal.no", + "masfjorden.no", + "meland.no", + "meldal.no", + "melhus.no", + "meloy.no", + "xn--mely-ira.no", + "meraker.no", + "xn--merker-kua.no", + "moareke.no", + "xn--moreke-jua.no", + "midsund.no", + "midtre-gauldal.no", + "modalen.no", + "modum.no", + "molde.no", + "moskenes.no", + "moss.no", + "mosvik.no", + "malselv.no", + "xn--mlselv-iua.no", + "malatvuopmi.no", + "xn--mlatvuopmi-s4a.no", + "namdalseid.no", + "aejrie.no", + "namsos.no", + "namsskogan.no", + "naamesjevuemie.no", + "xn--nmesjevuemie-tcba.no", + "laakesvuemie.no", + "nannestad.no", + "narvik.no", + "narviika.no", + "naustdal.no", + "nedre-eiker.no", + "nes.akershus.no", + "nes.buskerud.no", + "nesna.no", + "nesodden.no", + "nesseby.no", + "unjarga.no", + "xn--unjrga-rta.no", + "nesset.no", + "nissedal.no", + "nittedal.no", + "nord-aurdal.no", + "nord-fron.no", + "nord-odal.no", + "norddal.no", + "nordkapp.no", + "davvenjarga.no", + "xn--davvenjrga-y4a.no", + "nordre-land.no", + "nordreisa.no", + "raisa.no", + "xn--risa-5na.no", + "nore-og-uvdal.no", + "notodden.no", + "naroy.no", + "xn--nry-yla5g.no", + "notteroy.no", + "xn--nttery-byae.no", + "odda.no", + "oksnes.no", + "xn--ksnes-uua.no", + "oppdal.no", + "oppegard.no", + "xn--oppegrd-ixa.no", + "orkdal.no", + "orland.no", + "xn--rland-uua.no", + "orskog.no", + "xn--rskog-uua.no", + "orsta.no", + "xn--rsta-fra.no", + "os.hedmark.no", + "os.hordaland.no", + "osen.no", + "osteroy.no", + "xn--ostery-fya.no", + "ostre-toten.no", + "xn--stre-toten-zcb.no", + "overhalla.no", + "ovre-eiker.no", + "xn--vre-eiker-k8a.no", + "oyer.no", + "xn--yer-zna.no", + "oygarden.no", + "xn--ygarden-p1a.no", + "oystre-slidre.no", + "xn--ystre-slidre-ujb.no", + "porsanger.no", + "porsangu.no", + "xn--porsgu-sta26f.no", + "porsgrunn.no", + "radoy.no", + "xn--rady-ira.no", + "rakkestad.no", + "rana.no", + "ruovat.no", + "randaberg.no", + "rauma.no", + "rendalen.no", + "rennebu.no", + "rennesoy.no", + "xn--rennesy-v1a.no", + "rindal.no", + "ringebu.no", + "ringerike.no", + "ringsaker.no", + "rissa.no", + "risor.no", + "xn--risr-ira.no", + "roan.no", + "rollag.no", + "rygge.no", + "ralingen.no", + "xn--rlingen-mxa.no", + "rodoy.no", + "xn--rdy-0nab.no", + "romskog.no", + "xn--rmskog-bya.no", + "roros.no", + "xn--rros-gra.no", + "rost.no", + "xn--rst-0na.no", + "royken.no", + "xn--ryken-vua.no", + "royrvik.no", + "xn--ryrvik-bya.no", + "rade.no", + "xn--rde-ula.no", + "salangen.no", + "siellak.no", + "saltdal.no", + "salat.no", + "xn--slt-elab.no", + "xn--slat-5na.no", + "samnanger.no", + "sande.more-og-romsdal.no", + "sande.xn--mre-og-romsdal-qqb.no", + "sande.vestfold.no", + "sandefjord.no", + "sandnes.no", + "sandoy.no", + "xn--sandy-yua.no", + "sarpsborg.no", + "sauda.no", + "sauherad.no", + "sel.no", + "selbu.no", + "selje.no", + "seljord.no", + "sigdal.no", + "siljan.no", + "sirdal.no", + "skaun.no", + "skedsmo.no", + "ski.no", + "skien.no", + "skiptvet.no", + "skjervoy.no", + "xn--skjervy-v1a.no", + "skierva.no", + "xn--skierv-uta.no", + "skjak.no", + "xn--skjk-soa.no", + "skodje.no", + "skanland.no", + "xn--sknland-fxa.no", + "skanit.no", + "xn--sknit-yqa.no", + "smola.no", + "xn--smla-hra.no", + "snillfjord.no", + "snasa.no", + "xn--snsa-roa.no", + "snoasa.no", + "snaase.no", + "xn--snase-nra.no", + "sogndal.no", + "sokndal.no", + "sola.no", + "solund.no", + "songdalen.no", + "sortland.no", + "spydeberg.no", + "stange.no", + "stavanger.no", + "steigen.no", + "steinkjer.no", + "stjordal.no", + "xn--stjrdal-s1a.no", + "stokke.no", + "stor-elvdal.no", + "stord.no", + "stordal.no", + "storfjord.no", + "omasvuotna.no", + "strand.no", + "stranda.no", + "stryn.no", + "sula.no", + "suldal.no", + "sund.no", + "sunndal.no", + "surnadal.no", + "sveio.no", + "svelvik.no", + "sykkylven.no", + "sogne.no", + "xn--sgne-gra.no", + "somna.no", + "xn--smna-gra.no", + "sondre-land.no", + "xn--sndre-land-0cb.no", + "sor-aurdal.no", + "xn--sr-aurdal-l8a.no", + "sor-fron.no", + "xn--sr-fron-q1a.no", + "sor-odal.no", + "xn--sr-odal-q1a.no", + "sor-varanger.no", + "xn--sr-varanger-ggb.no", + "matta-varjjat.no", + "xn--mtta-vrjjat-k7af.no", + "sorfold.no", + "xn--srfold-bya.no", + "sorreisa.no", + "xn--srreisa-q1a.no", + "sorum.no", + "xn--srum-gra.no", + "tana.no", + "deatnu.no", + "time.no", + "tingvoll.no", + "tinn.no", + "tjeldsund.no", + "dielddanuorri.no", + "tjome.no", + "xn--tjme-hra.no", + "tokke.no", + "tolga.no", + "torsken.no", + "tranoy.no", + "xn--trany-yua.no", + "tromso.no", + "xn--troms-zua.no", + "tromsa.no", + "romsa.no", + "trondheim.no", + "troandin.no", + "trysil.no", + "trana.no", + "xn--trna-woa.no", + "trogstad.no", + "xn--trgstad-r1a.no", + "tvedestrand.no", + "tydal.no", + "tynset.no", + "tysfjord.no", + "divtasvuodna.no", + "divttasvuotna.no", + "tysnes.no", + "tysvar.no", + "xn--tysvr-vra.no", + "tonsberg.no", + "xn--tnsberg-q1a.no", + "ullensaker.no", + "ullensvang.no", + "ulvik.no", + "utsira.no", + "vadso.no", + "xn--vads-jra.no", + "cahcesuolo.no", + "xn--hcesuolo-7ya35b.no", + "vaksdal.no", + "valle.no", + "vang.no", + "vanylven.no", + "vardo.no", + "xn--vard-jra.no", + "varggat.no", + "xn--vrggt-xqad.no", + "vefsn.no", + "vaapste.no", + "vega.no", + "vegarshei.no", + "xn--vegrshei-c0a.no", + "vennesla.no", + "verdal.no", + "verran.no", + "vestby.no", + "vestnes.no", + "vestre-slidre.no", + "vestre-toten.no", + "vestvagoy.no", + "xn--vestvgy-ixa6o.no", + "vevelstad.no", + "vik.no", + "vikna.no", + "vindafjord.no", + "volda.no", + "voss.no", + "varoy.no", + "xn--vry-yla5g.no", + "vagan.no", + "xn--vgan-qoa.no", + "voagat.no", + "vagsoy.no", + "xn--vgsy-qoa0j.no", + "vaga.no", + "xn--vg-yiab.no", + "valer.ostfold.no", + "xn--vler-qoa.xn--stfold-9xa.no", + "valer.hedmark.no", + "xn--vler-qoa.hedmark.no", + "*.np", + "nr", + "biz.nr", + "info.nr", + "gov.nr", + "edu.nr", + "org.nr", + "net.nr", + "com.nr", + "nu", + "nz", + "ac.nz", + "co.nz", + "cri.nz", + "geek.nz", + "gen.nz", + "govt.nz", + "health.nz", + "iwi.nz", + "kiwi.nz", + "maori.nz", + "mil.nz", + "xn--mori-qsa.nz", + "net.nz", + "org.nz", + "parliament.nz", + "school.nz", + "om", + "co.om", + "com.om", + "edu.om", + "gov.om", + "med.om", + "museum.om", + "net.om", + "org.om", + "pro.om", + "org", + "pa", + "ac.pa", + "gob.pa", + "com.pa", + "org.pa", + "sld.pa", + "edu.pa", + "net.pa", + "ing.pa", + "abo.pa", + "med.pa", + "nom.pa", + "pe", + "edu.pe", + "gob.pe", + "nom.pe", + "mil.pe", + "org.pe", + "com.pe", + "net.pe", + "pf", + "com.pf", + "org.pf", + "edu.pf", + "*.pg", + "ph", + "com.ph", + "net.ph", + "org.ph", + "gov.ph", + "edu.ph", + "ngo.ph", + "mil.ph", + "i.ph", + "pk", + "com.pk", + "net.pk", + "edu.pk", + "org.pk", + "fam.pk", + "biz.pk", + "web.pk", + "gov.pk", + "gob.pk", + "gok.pk", + "gon.pk", + "gop.pk", + "gos.pk", + "info.pk", + "pl", + "com.pl", + "net.pl", + "org.pl", + "aid.pl", + "agro.pl", + "atm.pl", + "auto.pl", + "biz.pl", + "edu.pl", + "gmina.pl", + "gsm.pl", + "info.pl", + "mail.pl", + "miasta.pl", + "media.pl", + "mil.pl", + "nieruchomosci.pl", + "nom.pl", + "pc.pl", + "powiat.pl", + "priv.pl", + "realestate.pl", + "rel.pl", + "sex.pl", + "shop.pl", + "sklep.pl", + "sos.pl", + "szkola.pl", + "targi.pl", + "tm.pl", + "tourism.pl", + "travel.pl", + "turystyka.pl", + "gov.pl", + "ap.gov.pl", + "ic.gov.pl", + "is.gov.pl", + "us.gov.pl", + "kmpsp.gov.pl", + "kppsp.gov.pl", + "kwpsp.gov.pl", + "psp.gov.pl", + "wskr.gov.pl", + "kwp.gov.pl", + "mw.gov.pl", + "ug.gov.pl", + "um.gov.pl", + "umig.gov.pl", + "ugim.gov.pl", + "upow.gov.pl", + "uw.gov.pl", + "starostwo.gov.pl", + "pa.gov.pl", + "po.gov.pl", + "psse.gov.pl", + "pup.gov.pl", + "rzgw.gov.pl", + "sa.gov.pl", + "so.gov.pl", + "sr.gov.pl", + "wsa.gov.pl", + "sko.gov.pl", + "uzs.gov.pl", + "wiih.gov.pl", + "winb.gov.pl", + "pinb.gov.pl", + "wios.gov.pl", + "witd.gov.pl", + "wzmiuw.gov.pl", + "piw.gov.pl", + "wiw.gov.pl", + "griw.gov.pl", + "wif.gov.pl", + "oum.gov.pl", + "sdn.gov.pl", + "zp.gov.pl", + "uppo.gov.pl", + "mup.gov.pl", + "wuoz.gov.pl", + "konsulat.gov.pl", + "oirm.gov.pl", + "augustow.pl", + "babia-gora.pl", + "bedzin.pl", + "beskidy.pl", + "bialowieza.pl", + "bialystok.pl", + "bielawa.pl", + "bieszczady.pl", + "boleslawiec.pl", + "bydgoszcz.pl", + "bytom.pl", + "cieszyn.pl", + "czeladz.pl", + "czest.pl", + "dlugoleka.pl", + "elblag.pl", + "elk.pl", + "glogow.pl", + "gniezno.pl", + "gorlice.pl", + "grajewo.pl", + "ilawa.pl", + "jaworzno.pl", + "jelenia-gora.pl", + "jgora.pl", + "kalisz.pl", + "kazimierz-dolny.pl", + "karpacz.pl", + "kartuzy.pl", + "kaszuby.pl", + "katowice.pl", + "kepno.pl", + "ketrzyn.pl", + "klodzko.pl", + "kobierzyce.pl", + "kolobrzeg.pl", + "konin.pl", + "konskowola.pl", + "kutno.pl", + "lapy.pl", + "lebork.pl", + "legnica.pl", + "lezajsk.pl", + "limanowa.pl", + "lomza.pl", + "lowicz.pl", + "lubin.pl", + "lukow.pl", + "malbork.pl", + "malopolska.pl", + "mazowsze.pl", + "mazury.pl", + "mielec.pl", + "mielno.pl", + "mragowo.pl", + "naklo.pl", + "nowaruda.pl", + "nysa.pl", + "olawa.pl", + "olecko.pl", + "olkusz.pl", + "olsztyn.pl", + "opoczno.pl", + "opole.pl", + "ostroda.pl", + "ostroleka.pl", + "ostrowiec.pl", + "ostrowwlkp.pl", + "pila.pl", + "pisz.pl", + "podhale.pl", + "podlasie.pl", + "polkowice.pl", + "pomorze.pl", + "pomorskie.pl", + "prochowice.pl", + "pruszkow.pl", + "przeworsk.pl", + "pulawy.pl", + "radom.pl", + "rawa-maz.pl", + "rybnik.pl", + "rzeszow.pl", + "sanok.pl", + "sejny.pl", + "slask.pl", + "slupsk.pl", + "sosnowiec.pl", + "stalowa-wola.pl", + "skoczow.pl", + "starachowice.pl", + "stargard.pl", + "suwalki.pl", + "swidnica.pl", + "swiebodzin.pl", + "swinoujscie.pl", + "szczecin.pl", + "szczytno.pl", + "tarnobrzeg.pl", + "tgory.pl", + "turek.pl", + "tychy.pl", + "ustka.pl", + "walbrzych.pl", + "warmia.pl", + "warszawa.pl", + "waw.pl", + "wegrow.pl", + "wielun.pl", + "wlocl.pl", + "wloclawek.pl", + "wodzislaw.pl", + "wolomin.pl", + "wroclaw.pl", + "zachpomor.pl", + "zagan.pl", + "zarow.pl", + "zgora.pl", + "zgorzelec.pl", + "pm", + "pn", + "gov.pn", + "co.pn", + "org.pn", + "edu.pn", + "net.pn", + "post", + "pr", + "com.pr", + "net.pr", + "org.pr", + "gov.pr", + "edu.pr", + "isla.pr", + "pro.pr", + "biz.pr", + "info.pr", + "name.pr", + "est.pr", + "prof.pr", + "ac.pr", + "pro", + "aaa.pro", + "aca.pro", + "acct.pro", + "avocat.pro", + "bar.pro", + "cpa.pro", + "eng.pro", + "jur.pro", + "law.pro", + "med.pro", + "recht.pro", + "ps", + "edu.ps", + "gov.ps", + "sec.ps", + "plo.ps", + "com.ps", + "org.ps", + "net.ps", + "pt", + "net.pt", + "gov.pt", + "org.pt", + "edu.pt", + "int.pt", + "publ.pt", + "com.pt", + "nome.pt", + "pw", + "co.pw", + "ne.pw", + "or.pw", + "ed.pw", + "go.pw", + "belau.pw", + "py", + "com.py", + "coop.py", + "edu.py", + "gov.py", + "mil.py", + "net.py", + "org.py", + "qa", + "com.qa", + "edu.qa", + "gov.qa", + "mil.qa", + "name.qa", + "net.qa", + "org.qa", + "sch.qa", + "re", + "asso.re", + "com.re", + "nom.re", + "ro", + "arts.ro", + "com.ro", + "firm.ro", + "info.ro", + "nom.ro", + "nt.ro", + "org.ro", + "rec.ro", + "store.ro", + "tm.ro", + "www.ro", + "rs", + "ac.rs", + "co.rs", + "edu.rs", + "gov.rs", + "in.rs", + "org.rs", + "ru", + "ac.ru", + "com.ru", + "edu.ru", + "int.ru", + "net.ru", + "org.ru", + "pp.ru", + "adygeya.ru", + "altai.ru", + "amur.ru", + "arkhangelsk.ru", + "astrakhan.ru", + "bashkiria.ru", + "belgorod.ru", + "bir.ru", + "bryansk.ru", + "buryatia.ru", + "cbg.ru", + "chel.ru", + "chelyabinsk.ru", + "chita.ru", + "chukotka.ru", + "chuvashia.ru", + "dagestan.ru", + "dudinka.ru", + "e-burg.ru", + "grozny.ru", + "irkutsk.ru", + "ivanovo.ru", + "izhevsk.ru", + "jar.ru", + "joshkar-ola.ru", + "kalmykia.ru", + "kaluga.ru", + "kamchatka.ru", + "karelia.ru", + "kazan.ru", + "kchr.ru", + "kemerovo.ru", + "khabarovsk.ru", + "khakassia.ru", + "khv.ru", + "kirov.ru", + "koenig.ru", + "komi.ru", + "kostroma.ru", + "krasnoyarsk.ru", + "kuban.ru", + "kurgan.ru", + "kursk.ru", + "lipetsk.ru", + "magadan.ru", + "mari.ru", + "mari-el.ru", + "marine.ru", + "mordovia.ru", + "msk.ru", + "murmansk.ru", + "nalchik.ru", + "nnov.ru", + "nov.ru", + "novosibirsk.ru", + "nsk.ru", + "omsk.ru", + "orenburg.ru", + "oryol.ru", + "palana.ru", + "penza.ru", + "perm.ru", + "ptz.ru", + "rnd.ru", + "ryazan.ru", + "sakhalin.ru", + "samara.ru", + "saratov.ru", + "simbirsk.ru", + "smolensk.ru", + "spb.ru", + "stavropol.ru", + "stv.ru", + "surgut.ru", + "tambov.ru", + "tatarstan.ru", + "tom.ru", + "tomsk.ru", + "tsaritsyn.ru", + "tsk.ru", + "tula.ru", + "tuva.ru", + "tver.ru", + "tyumen.ru", + "udm.ru", + "udmurtia.ru", + "ulan-ude.ru", + "vladikavkaz.ru", + "vladimir.ru", + "vladivostok.ru", + "volgograd.ru", + "vologda.ru", + "voronezh.ru", + "vrn.ru", + "vyatka.ru", + "yakutia.ru", + "yamal.ru", + "yaroslavl.ru", + "yekaterinburg.ru", + "yuzhno-sakhalinsk.ru", + "amursk.ru", + "baikal.ru", + "cmw.ru", + "fareast.ru", + "jamal.ru", + "kms.ru", + "k-uralsk.ru", + "kustanai.ru", + "kuzbass.ru", + "mytis.ru", + "nakhodka.ru", + "nkz.ru", + "norilsk.ru", + "oskol.ru", + "pyatigorsk.ru", + "rubtsovsk.ru", + "snz.ru", + "syzran.ru", + "vdonsk.ru", + "zgrad.ru", + "gov.ru", + "mil.ru", + "test.ru", + "rw", + "gov.rw", + "net.rw", + "edu.rw", + "ac.rw", + "com.rw", + "co.rw", + "int.rw", + "mil.rw", + "gouv.rw", + "sa", + "com.sa", + "net.sa", + "org.sa", + "gov.sa", + "med.sa", + "pub.sa", + "edu.sa", + "sch.sa", + "sb", + "com.sb", + "edu.sb", + "gov.sb", + "net.sb", + "org.sb", + "sc", + "com.sc", + "gov.sc", + "net.sc", + "org.sc", + "edu.sc", + "sd", + "com.sd", + "net.sd", + "org.sd", + "edu.sd", + "med.sd", + "tv.sd", + "gov.sd", + "info.sd", + "se", + "a.se", + "ac.se", + "b.se", + "bd.se", + "brand.se", + "c.se", + "d.se", + "e.se", + "f.se", + "fh.se", + "fhsk.se", + "fhv.se", + "g.se", + "h.se", + "i.se", + "k.se", + "komforb.se", + "kommunalforbund.se", + "komvux.se", + "l.se", + "lanbib.se", + "m.se", + "n.se", + "naturbruksgymn.se", + "o.se", + "org.se", + "p.se", + "parti.se", + "pp.se", + "press.se", + "r.se", + "s.se", + "t.se", + "tm.se", + "u.se", + "w.se", + "x.se", + "y.se", + "z.se", + "sg", + "com.sg", + "net.sg", + "org.sg", + "gov.sg", + "edu.sg", + "per.sg", + "sh", + "com.sh", + "net.sh", + "gov.sh", + "org.sh", + "mil.sh", + "si", + "sj", + "sk", + "sl", + "com.sl", + "net.sl", + "edu.sl", + "gov.sl", + "org.sl", + "sm", + "sn", + "art.sn", + "com.sn", + "edu.sn", + "gouv.sn", + "org.sn", + "perso.sn", + "univ.sn", + "so", + "com.so", + "net.so", + "org.so", + "sr", + "st", + "co.st", + "com.st", + "consulado.st", + "edu.st", + "embaixada.st", + "gov.st", + "mil.st", + "net.st", + "org.st", + "principe.st", + "saotome.st", + "store.st", + "su", + "adygeya.su", + "arkhangelsk.su", + "balashov.su", + "bashkiria.su", + "bryansk.su", + "dagestan.su", + "grozny.su", + "ivanovo.su", + "kalmykia.su", + "kaluga.su", + "karelia.su", + "khakassia.su", + "krasnodar.su", + "kurgan.su", + "lenug.su", + "mordovia.su", + "msk.su", + "murmansk.su", + "nalchik.su", + "nov.su", + "obninsk.su", + "penza.su", + "pokrovsk.su", + "sochi.su", + "spb.su", + "togliatti.su", + "troitsk.su", + "tula.su", + "tuva.su", + "vladikavkaz.su", + "vladimir.su", + "vologda.su", + "sv", + "com.sv", + "edu.sv", + "gob.sv", + "org.sv", + "red.sv", + "sx", + "gov.sx", + "sy", + "edu.sy", + "gov.sy", + "net.sy", + "mil.sy", + "com.sy", + "org.sy", + "sz", + "co.sz", + "ac.sz", + "org.sz", + "tc", + "td", + "tel", + "tf", + "tg", + "th", + "ac.th", + "co.th", + "go.th", + "in.th", + "mi.th", + "net.th", + "or.th", + "tj", + "ac.tj", + "biz.tj", + "co.tj", + "com.tj", + "edu.tj", + "go.tj", + "gov.tj", + "int.tj", + "mil.tj", + "name.tj", + "net.tj", + "nic.tj", + "org.tj", + "test.tj", + "web.tj", + "tk", + "tl", + "gov.tl", + "tm", + "com.tm", + "co.tm", + "org.tm", + "net.tm", + "nom.tm", + "gov.tm", + "mil.tm", + "edu.tm", + "tn", + "com.tn", + "ens.tn", + "fin.tn", + "gov.tn", + "ind.tn", + "intl.tn", + "nat.tn", + "net.tn", + "org.tn", + "info.tn", + "perso.tn", + "tourism.tn", + "edunet.tn", + "rnrt.tn", + "rns.tn", + "rnu.tn", + "mincom.tn", + "agrinet.tn", + "defense.tn", + "turen.tn", + "to", + "com.to", + "gov.to", + "net.to", + "org.to", + "edu.to", + "mil.to", + "tr", + "com.tr", + "info.tr", + "biz.tr", + "net.tr", + "org.tr", + "web.tr", + "gen.tr", + "tv.tr", + "av.tr", + "dr.tr", + "bbs.tr", + "name.tr", + "tel.tr", + "gov.tr", + "bel.tr", + "pol.tr", + "mil.tr", + "k12.tr", + "edu.tr", + "kep.tr", + "nc.tr", + "gov.nc.tr", + "travel", + "tt", + "co.tt", + "com.tt", + "org.tt", + "net.tt", + "biz.tt", + "info.tt", + "pro.tt", + "int.tt", + "coop.tt", + "jobs.tt", + "mobi.tt", + "travel.tt", + "museum.tt", + "aero.tt", + "name.tt", + "gov.tt", + "edu.tt", + "tv", + "tw", + "edu.tw", + "gov.tw", + "mil.tw", + "com.tw", + "net.tw", + "org.tw", + "idv.tw", + "game.tw", + "ebiz.tw", + "club.tw", + "xn--zf0ao64a.tw", + "xn--uc0atv.tw", + "xn--czrw28b.tw", + "tz", + "ac.tz", + "co.tz", + "go.tz", + "hotel.tz", + "info.tz", + "me.tz", + "mil.tz", + "mobi.tz", + "ne.tz", + "or.tz", + "sc.tz", + "tv.tz", + "ua", + "com.ua", + "edu.ua", + "gov.ua", + "in.ua", + "net.ua", + "org.ua", + "cherkassy.ua", + "cherkasy.ua", + "chernigov.ua", + "chernihiv.ua", + "chernivtsi.ua", + "chernovtsy.ua", + "ck.ua", + "cn.ua", + "cr.ua", + "crimea.ua", + "cv.ua", + "dn.ua", + "dnepropetrovsk.ua", + "dnipropetrovsk.ua", + "dominic.ua", + "donetsk.ua", + "dp.ua", + "if.ua", + "ivano-frankivsk.ua", + "kh.ua", + "kharkiv.ua", + "kharkov.ua", + "kherson.ua", + "khmelnitskiy.ua", + "khmelnytskyi.ua", + "kiev.ua", + "kirovograd.ua", + "km.ua", + "kr.ua", + "krym.ua", + "ks.ua", + "kv.ua", + "kyiv.ua", + "lg.ua", + "lt.ua", + "lugansk.ua", + "lutsk.ua", + "lv.ua", + "lviv.ua", + "mk.ua", + "mykolaiv.ua", + "nikolaev.ua", + "od.ua", + "odesa.ua", + "odessa.ua", + "pl.ua", + "poltava.ua", + "rivne.ua", + "rovno.ua", + "rv.ua", + "sb.ua", + "sebastopol.ua", + "sevastopol.ua", + "sm.ua", + "sumy.ua", + "te.ua", + "ternopil.ua", + "uz.ua", + "uzhgorod.ua", + "vinnica.ua", + "vinnytsia.ua", + "vn.ua", + "volyn.ua", + "yalta.ua", + "zaporizhzhe.ua", + "zaporizhzhia.ua", + "zhitomir.ua", + "zhytomyr.ua", + "zp.ua", + "zt.ua", + "ug", + "co.ug", + "or.ug", + "ac.ug", + "sc.ug", + "go.ug", + "ne.ug", + "com.ug", + "org.ug", + "uk", + "ac.uk", + "co.uk", + "gov.uk", + "ltd.uk", + "me.uk", + "net.uk", + "nhs.uk", + "org.uk", + "plc.uk", + "police.uk", + "*.sch.uk", + "us", + "dni.us", + "fed.us", + "isa.us", + "kids.us", + "nsn.us", + "ak.us", + "al.us", + "ar.us", + "as.us", + "az.us", + "ca.us", + "co.us", + "ct.us", + "dc.us", + "de.us", + "fl.us", + "ga.us", + "gu.us", + "hi.us", + "ia.us", + "id.us", + "il.us", + "in.us", + "ks.us", + "ky.us", + "la.us", + "ma.us", + "md.us", + "me.us", + "mi.us", + "mn.us", + "mo.us", + "ms.us", + "mt.us", + "nc.us", + "nd.us", + "ne.us", + "nh.us", + "nj.us", + "nm.us", + "nv.us", + "ny.us", + "oh.us", + "ok.us", + "or.us", + "pa.us", + "pr.us", + "ri.us", + "sc.us", + "sd.us", + "tn.us", + "tx.us", + "ut.us", + "vi.us", + "vt.us", + "va.us", + "wa.us", + "wi.us", + "wv.us", + "wy.us", + "k12.ak.us", + "k12.al.us", + "k12.ar.us", + "k12.as.us", + "k12.az.us", + "k12.ca.us", + "k12.co.us", + "k12.ct.us", + "k12.dc.us", + "k12.de.us", + "k12.fl.us", + "k12.ga.us", + "k12.gu.us", + "k12.ia.us", + "k12.id.us", + "k12.il.us", + "k12.in.us", + "k12.ks.us", + "k12.ky.us", + "k12.la.us", + "k12.ma.us", + "k12.md.us", + "k12.me.us", + "k12.mi.us", + "k12.mn.us", + "k12.mo.us", + "k12.ms.us", + "k12.mt.us", + "k12.nc.us", + "k12.ne.us", + "k12.nh.us", + "k12.nj.us", + "k12.nm.us", + "k12.nv.us", + "k12.ny.us", + "k12.oh.us", + "k12.ok.us", + "k12.or.us", + "k12.pa.us", + "k12.pr.us", + "k12.ri.us", + "k12.sc.us", + "k12.tn.us", + "k12.tx.us", + "k12.ut.us", + "k12.vi.us", + "k12.vt.us", + "k12.va.us", + "k12.wa.us", + "k12.wi.us", + "k12.wy.us", + "cc.ak.us", + "cc.al.us", + "cc.ar.us", + "cc.as.us", + "cc.az.us", + "cc.ca.us", + "cc.co.us", + "cc.ct.us", + "cc.dc.us", + "cc.de.us", + "cc.fl.us", + "cc.ga.us", + "cc.gu.us", + "cc.hi.us", + "cc.ia.us", + "cc.id.us", + "cc.il.us", + "cc.in.us", + "cc.ks.us", + "cc.ky.us", + "cc.la.us", + "cc.ma.us", + "cc.md.us", + "cc.me.us", + "cc.mi.us", + "cc.mn.us", + "cc.mo.us", + "cc.ms.us", + "cc.mt.us", + "cc.nc.us", + "cc.nd.us", + "cc.ne.us", + "cc.nh.us", + "cc.nj.us", + "cc.nm.us", + "cc.nv.us", + "cc.ny.us", + "cc.oh.us", + "cc.ok.us", + "cc.or.us", + "cc.pa.us", + "cc.pr.us", + "cc.ri.us", + "cc.sc.us", + "cc.sd.us", + "cc.tn.us", + "cc.tx.us", + "cc.ut.us", + "cc.vi.us", + "cc.vt.us", + "cc.va.us", + "cc.wa.us", + "cc.wi.us", + "cc.wv.us", + "cc.wy.us", + "lib.ak.us", + "lib.al.us", + "lib.ar.us", + "lib.as.us", + "lib.az.us", + "lib.ca.us", + "lib.co.us", + "lib.ct.us", + "lib.dc.us", + "lib.de.us", + "lib.fl.us", + "lib.ga.us", + "lib.gu.us", + "lib.hi.us", + "lib.ia.us", + "lib.id.us", + "lib.il.us", + "lib.in.us", + "lib.ks.us", + "lib.ky.us", + "lib.la.us", + "lib.ma.us", + "lib.md.us", + "lib.me.us", + "lib.mi.us", + "lib.mn.us", + "lib.mo.us", + "lib.ms.us", + "lib.mt.us", + "lib.nc.us", + "lib.nd.us", + "lib.ne.us", + "lib.nh.us", + "lib.nj.us", + "lib.nm.us", + "lib.nv.us", + "lib.ny.us", + "lib.oh.us", + "lib.ok.us", + "lib.or.us", + "lib.pa.us", + "lib.pr.us", + "lib.ri.us", + "lib.sc.us", + "lib.sd.us", + "lib.tn.us", + "lib.tx.us", + "lib.ut.us", + "lib.vi.us", + "lib.vt.us", + "lib.va.us", + "lib.wa.us", + "lib.wi.us", + "lib.wy.us", + "pvt.k12.ma.us", + "chtr.k12.ma.us", + "paroch.k12.ma.us", + "uy", + "com.uy", + "edu.uy", + "gub.uy", + "mil.uy", + "net.uy", + "org.uy", + "uz", + "co.uz", + "com.uz", + "net.uz", + "org.uz", + "va", + "vc", + "com.vc", + "net.vc", + "org.vc", + "gov.vc", + "mil.vc", + "edu.vc", + "ve", + "arts.ve", + "co.ve", + "com.ve", + "e12.ve", + "edu.ve", + "firm.ve", + "gob.ve", + "gov.ve", + "info.ve", + "int.ve", + "mil.ve", + "net.ve", + "org.ve", + "rec.ve", + "store.ve", + "tec.ve", + "web.ve", + "vg", + "vi", + "co.vi", + "com.vi", + "k12.vi", + "net.vi", + "org.vi", + "vn", + "com.vn", + "net.vn", + "org.vn", + "edu.vn", + "gov.vn", + "int.vn", + "ac.vn", + "biz.vn", + "info.vn", + "name.vn", + "pro.vn", + "health.vn", + "vu", + "com.vu", + "edu.vu", + "net.vu", + "org.vu", + "wf", + "ws", + "com.ws", + "net.ws", + "org.ws", + "gov.ws", + "edu.ws", + "yt", + "xn--mgbaam7a8h", + "xn--y9a3aq", + "xn--54b7fta0cc", + "xn--90ais", + "xn--fiqs8s", + "xn--fiqz9s", + "xn--lgbbat1ad8j", + "xn--wgbh1c", + "xn--node", + "xn--qxam", + "xn--j6w193g", + "xn--h2brj9c", + "xn--mgbbh1a71e", + "xn--fpcrj9c3d", + "xn--gecrj9c", + "xn--s9brj9c", + "xn--45brj9c", + "xn--xkc2dl3a5ee0h", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "xn--mgbtx2b", + "xn--mgbayh7gpa", + "xn--3e0b707e", + "xn--80ao21a", + "xn--fzc2c9e2c", + "xn--xkc2al3hye2a", + "xn--mgbc0a9azcg", + "xn--d1alf", + "xn--l1acc", + "xn--mix891f", + "xn--mix082f", + "xn--mgbx4cd0ab", + "xn--mgb9awbf", + "xn--mgbai9azgqp6j", + "xn--mgbai9a5eva00b", + "xn--ygbi2ammx", + "xn--90a3ac", + "xn--o1ac.xn--90a3ac", + "xn--c1avg.xn--90a3ac", + "xn--90azh.xn--90a3ac", + "xn--d1at.xn--90a3ac", + "xn--o1ach.xn--90a3ac", + "xn--80au.xn--90a3ac", + "xn--p1ai", + "xn--wgbl6a", + "xn--mgberp4a5d4ar", + "xn--mgberp4a5d4a87g", + "xn--mgbqly7c0a67fbc", + "xn--mgbqly7cvafr", + "xn--mgbpl2fh", + "xn--yfro4i67o", + "xn--clchc0ea0b2g2a9gcd", + "xn--ogbpf8fl", + "xn--mgbtf8fl", + "xn--o3cw4h", + "xn--pgbs0dh", + "xn--kpry57d", + "xn--kprw13d", + "xn--nnx388a", + "xn--j1amh", + "xn--mgb2ddes", + "xxx", + "*.ye", + "ac.za", + "agric.za", + "alt.za", + "co.za", + "edu.za", + "gov.za", + "grondar.za", + "law.za", + "mil.za", + "net.za", + "ngo.za", + "nis.za", + "nom.za", + "org.za", + "school.za", + "tm.za", + "web.za", + "*.zm", + "*.zw", + "aaa", + "aarp", + "abarth", + "abb", + "abbott", + "abbvie", + "abc", + "able", + "abogado", + "abudhabi", + "academy", + "accenture", + "accountant", + "accountants", + "aco", + "active", + "actor", + "adac", + "ads", + "adult", + "aeg", + "aetna", + "afamilycompany", + "afl", + "africa", + "africamagic", + "agakhan", + "agency", + "aig", + "aigo", + "airbus", + "airforce", + "airtel", + "akdn", + "alfaromeo", + "alibaba", + "alipay", + "allfinanz", + "allstate", + "ally", + "alsace", + "alstom", + "americanexpress", + "americanfamily", + "amex", + "amfam", + "amica", + "amsterdam", + "analytics", + "android", + "anquan", + "anz", + "aol", + "apartments", + "app", + "apple", + "aquarelle", + "arab", + "aramco", + "archi", + "army", + "arte", + "asda", + "associates", + "athleta", + "attorney", + "auction", + "audi", + "audible", + "audio", + "auspost", + "author", + "auto", + "autos", + "avianca", + "aws", + "axa", + "azure", + "baby", + "baidu", + "banamex", + "bananarepublic", + "band", + "bank", + "bar", + "barcelona", + "barclaycard", + "barclays", + "barefoot", + "bargains", + "baseball", + "basketball", + "bauhaus", + "bayern", + "bbc", + "bbt", + "bbva", + "bcg", + "bcn", + "beats", + "beauty", + "beer", + "bentley", + "berlin", + "best", + "bestbuy", + "bet", + "bharti", + "bible", + "bid", + "bike", + "bing", + "bingo", + "bio", + "black", + "blackfriday", + "blanco", + "blockbuster", + "blog", + "bloomberg", + "blue", + "bms", + "bmw", + "bnl", + "bnpparibas", + "boats", + "boehringer", + "bofa", + "bom", + "bond", + "boo", + "book", + "booking", + "boots", + "bosch", + "bostik", + "boston", + "bot", + "boutique", + "box", + "bradesco", + "bridgestone", + "broadway", + "broker", + "brother", + "brussels", + "budapest", + "bugatti", + "build", + "builders", + "business", + "buy", + "buzz", + "bzh", + "cab", + "cafe", + "cal", + "call", + "calvinklein", + "camera", + "camp", + "cancerresearch", + "canon", + "capetown", + "capital", + "capitalone", + "car", + "caravan", + "cards", + "care", + "career", + "careers", + "cars", + "cartier", + "casa", + "case", + "caseih", + "cash", + "casino", + "catering", + "catholic", + "cba", + "cbn", + "cbre", + "cbs", + "ceb", + "center", + "ceo", + "cern", + "cfa", + "cfd", + "chanel", + "channel", + "chase", + "chat", + "cheap", + "chintai", + "chloe", + "christmas", + "chrome", + "chrysler", + "church", + "cipriani", + "circle", + "cisco", + "citadel", + "citi", + "citic", + "city", + "cityeats", + "claims", + "cleaning", + "click", + "clinic", + "clinique", + "clothing", + "cloud", + "club", + "clubmed", + "coach", + "codes", + "coffee", + "college", + "cologne", + "comcast", + "commbank", + "community", + "company", + "compare", + "computer", + "comsec", + "condos", + "construction", + "consulting", + "contact", + "contractors", + "cooking", + "cookingchannel", + "cool", + "corsica", + "country", + "coupon", + "coupons", + "courses", + "credit", + "creditcard", + "creditunion", + "cricket", + "crown", + "crs", + "cruise", + "cruises", + "csc", + "cuisinella", + "cymru", + "cyou", + "dabur", + "dad", + "dance", + "date", + "dating", + "datsun", + "day", + "dclk", + "dds", + "deal", + "dealer", + "deals", + "degree", + "delivery", + "dell", + "deloitte", + "delta", + "democrat", + "dental", + "dentist", + "desi", + "design", + "dev", + "dhl", + "diamonds", + "diet", + "digital", + "direct", + "directory", + "discount", + "discover", + "dish", + "diy", + "dnp", + "docs", + "dodge", + "dog", + "doha", + "domains", + "dot", + "download", + "drive", + "dstv", + "dtv", + "dubai", + "duck", + "dunlop", + "duns", + "dupont", + "durban", + "dvag", + "dwg", + "earth", + "eat", + "edeka", + "education", + "email", + "emerck", + "emerson", + "energy", + "engineer", + "engineering", + "enterprises", + "epost", + "epson", + "equipment", + "ericsson", + "erni", + "esq", + "estate", + "esurance", + "etisalat", + "eurovision", + "eus", + "events", + "everbank", + "exchange", + "expert", + "exposed", + "express", + "extraspace", + "fage", + "fail", + "fairwinds", + "faith", + "family", + "fan", + "fans", + "farm", + "farmers", + "fashion", + "fast", + "fedex", + "feedback", + "ferrari", + "ferrero", + "fiat", + "fidelity", + "fido", + "film", + "final", + "finance", + "financial", + "fire", + "firestone", + "firmdale", + "fish", + "fishing", + "fit", + "fitness", + "flickr", + "flights", + "flir", + "florist", + "flowers", + "flsmidth", + "fly", + "foo", + "foodnetwork", + "football", + "ford", + "forex", + "forsale", + "forum", + "foundation", + "fox", + "free", + "fresenius", + "frl", + "frogans", + "frontdoor", + "frontier", + "ftr", + "fujitsu", + "fujixerox", + "fun", + "fund", + "furniture", + "futbol", + "fyi", + "gal", + "gallery", + "gallo", + "gallup", + "game", + "games", + "gap", + "garden", + "gbiz", + "gdn", + "gea", + "gent", + "genting", + "george", + "ggee", + "gift", + "gifts", + "gives", + "giving", + "glade", + "glass", + "gle", + "global", + "globo", + "gmail", + "gmbh", + "gmo", + "gmx", + "godaddy", + "gold", + "goldpoint", + "golf", + "goo", + "goodhands", + "goodyear", + "goog", + "google", + "gop", + "got", + "gotv", + "grainger", + "graphics", + "gratis", + "green", + "gripe", + "group", + "guardian", + "gucci", + "guge", + "guide", + "guitars", + "guru", + "hair", + "hamburg", + "hangout", + "haus", + "hbo", + "hdfc", + "hdfcbank", + "health", + "healthcare", + "help", + "helsinki", + "here", + "hermes", + "hgtv", + "hiphop", + "hisamitsu", + "hitachi", + "hiv", + "hkt", + "hockey", + "holdings", + "holiday", + "homedepot", + "homegoods", + "homes", + "homesense", + "honda", + "honeywell", + "horse", + "host", + "hosting", + "hot", + "hoteles", + "hotmail", + "house", + "how", + "hsbc", + "htc", + "hughes", + "hyatt", + "hyundai", + "ibm", + "icbc", + "ice", + "icu", + "ieee", + "ifm", + "iinet", + "ikano", + "imamat", + "imdb", + "immo", + "immobilien", + "industries", + "infiniti", + "ing", + "ink", + "institute", + "insurance", + "insure", + "intel", + "international", + "intuit", + "investments", + "ipiranga", + "irish", + "iselect", + "ismaili", + "ist", + "istanbul", + "itau", + "itv", + "iveco", + "iwc", + "jaguar", + "java", + "jcb", + "jcp", + "jeep", + "jetzt", + "jewelry", + "jio", + "jlc", + "jll", + "jmp", + "jnj", + "joburg", + "jot", + "joy", + "jpmorgan", + "jprs", + "juegos", + "juniper", + "kaufen", + "kddi", + "kerryhotels", + "kerrylogistics", + "kerryproperties", + "kfh", + "kia", + "kim", + "kinder", + "kindle", + "kitchen", + "kiwi", + "koeln", + "komatsu", + "kosher", + "kpmg", + "kpn", + "krd", + "kred", + "kuokgroup", + "kyknet", + "kyoto", + "lacaixa", + "ladbrokes", + "lamborghini", + "lamer", + "lancaster", + "lancia", + "lancome", + "land", + "landrover", + "lanxess", + "lasalle", + "lat", + "latino", + "latrobe", + "law", + "lawyer", + "lds", + "lease", + "leclerc", + "lefrak", + "legal", + "lego", + "lexus", + "lgbt", + "liaison", + "lidl", + "life", + "lifeinsurance", + "lifestyle", + "lighting", + "like", + "lilly", + "limited", + "limo", + "lincoln", + "linde", + "link", + "lipsy", + "live", + "living", + "lixil", + "loan", + "loans", + "locker", + "locus", + "loft", + "lol", + "london", + "lotte", + "lotto", + "love", + "lpl", + "lplfinancial", + "ltd", + "ltda", + "lundbeck", + "lupin", + "luxe", + "luxury", + "macys", + "madrid", + "maif", + "maison", + "makeup", + "man", + "management", + "mango", + "market", + "marketing", + "markets", + "marriott", + "marshalls", + "maserati", + "mattel", + "mba", + "mcd", + "mcdonalds", + "mckinsey", + "med", + "media", + "meet", + "melbourne", + "meme", + "memorial", + "men", + "menu", + "meo", + "metlife", + "miami", + "microsoft", + "mini", + "mint", + "mit", + "mitsubishi", + "mlb", + "mls", + "mma", + "mnet", + "mobily", + "moda", + "moe", + "moi", + "mom", + "monash", + "money", + "monster", + "montblanc", + "mopar", + "mormon", + "mortgage", + "moscow", + "moto", + "motorcycles", + "mov", + "movie", + "movistar", + "msd", + "mtn", + "mtpc", + "mtr", + "multichoice", + "mutual", + "mutuelle", + "mzansimagic", + "nab", + "nadex", + "nagoya", + "naspers", + "nationwide", + "natura", + "navy", + "nba", + "nec", + "netbank", + "netflix", + "network", + "neustar", + "new", + "newholland", + "news", + "next", + "nextdirect", + "nexus", + "nfl", + "ngo", + "nhk", + "nico", + "nike", + "nikon", + "ninja", + "nissan", + "nissay", + "nokia", + "northwesternmutual", + "norton", + "now", + "nowruz", + "nowtv", + "nra", + "nrw", + "ntt", + "nyc", + "obi", + "observer", + "off", + "office", + "okinawa", + "olayan", + "olayangroup", + "oldnavy", + "ollo", + "omega", + "one", + "ong", + "onl", + "online", + "onyourside", + "ooo", + "open", + "oracle", + "orange", + "organic", + "orientexpress", + "origins", + "osaka", + "otsuka", + "ott", + "ovh", + "page", + "pamperedchef", + "panasonic", + "panerai", + "paris", + "pars", + "partners", + "parts", + "party", + "passagens", + "pay", + "payu", + "pccw", + "pet", + "pfizer", + "pharmacy", + "philips", + "photo", + "photography", + "photos", + "physio", + "piaget", + "pics", + "pictet", + "pictures", + "pid", + "pin", + "ping", + "pink", + "pioneer", + "pizza", + "place", + "play", + "playstation", + "plumbing", + "plus", + "pnc", + "pohl", + "poker", + "politie", + "porn", + "pramerica", + "praxi", + "press", + "prime", + "prod", + "productions", + "prof", + "progressive", + "promo", + "properties", + "property", + "protection", + "pru", + "prudential", + "pub", + "pwc", + "qpon", + "quebec", + "quest", + "qvc", + "racing", + "raid", + "read", + "realestate", + "realtor", + "realty", + "recipes", + "red", + "redstone", + "redumbrella", + "rehab", + "reise", + "reisen", + "reit", + "reliance", + "ren", + "rent", + "rentals", + "repair", + "report", + "republican", + "rest", + "restaurant", + "review", + "reviews", + "rexroth", + "rich", + "richardli", + "ricoh", + "rightathome", + "ril", + "rio", + "rip", + "rmit", + "rocher", + "rocks", + "rodeo", + "rogers", + "room", + "rsvp", + "ruhr", + "run", + "rwe", + "ryukyu", + "saarland", + "safe", + "safety", + "sakura", + "sale", + "salon", + "samsclub", + "samsung", + "sandvik", + "sandvikcoromant", + "sanofi", + "sap", + "sapo", + "sarl", + "sas", + "save", + "saxo", + "sbi", + "sbs", + "sca", + "scb", + "schaeffler", + "schmidt", + "scholarships", + "school", + "schule", + "schwarz", + "science", + "scjohnson", + "scor", + "scot", + "seat", + "secure", + "security", + "seek", + "select", + "sener", + "services", + "ses", + "seven", + "sew", + "sex", + "sexy", + "sfr", + "shangrila", + "sharp", + "shaw", + "shell", + "shia", + "shiksha", + "shoes", + "shouji", + "show", + "showtime", + "shriram", + "silk", + "sina", + "singles", + "site", + "ski", + "skin", + "sky", + "skype", + "sling", + "smart", + "smile", + "sncf", + "soccer", + "social", + "softbank", + "software", + "sohu", + "solar", + "solutions", + "song", + "sony", + "soy", + "space", + "spiegel", + "spot", + "spreadbetting", + "srl", + "srt", + "stada", + "staples", + "star", + "starhub", + "statebank", + "statefarm", + "statoil", + "stc", + "stcgroup", + "stockholm", + "storage", + "store", + "stream", + "studio", + "study", + "style", + "sucks", + "supersport", + "supplies", + "supply", + "support", + "surf", + "surgery", + "suzuki", + "swatch", + "swiftcover", + "swiss", + "sydney", + "symantec", + "systems", + "tab", + "taipei", + "talk", + "taobao", + "target", + "tatamotors", + "tatar", + "tattoo", + "tax", + "taxi", + "tci", + "tdk", + "team", + "tech", + "technology", + "telecity", + "telefonica", + "temasek", + "tennis", + "teva", + "thd", + "theater", + "theatre", + "theguardian", + "tiaa", + "tickets", + "tienda", + "tiffany", + "tips", + "tires", + "tirol", + "tjmaxx", + "tjx", + "tkmaxx", + "tmall", + "today", + "tokyo", + "tools", + "top", + "toray", + "toshiba", + "total", + "tours", + "town", + "toyota", + "toys", + "trade", + "trading", + "training", + "travelchannel", + "travelers", + "travelersinsurance", + "trust", + "trv", + "tube", + "tui", + "tunes", + "tushu", + "tvs", + "ubank", + "ubs", + "uconnect", + "unicom", + "university", + "uno", + "uol", + "ups", + "vacations", + "vana", + "vanguard", + "vegas", + "ventures", + "verisign", + "versicherung", + "vet", + "viajes", + "video", + "vig", + "viking", + "villas", + "vin", + "vip", + "virgin", + "visa", + "vision", + "vista", + "vistaprint", + "viva", + "vivo", + "vlaanderen", + "vodka", + "volkswagen", + "volvo", + "vote", + "voting", + "voto", + "voyage", + "vuelos", + "wales", + "walmart", + "walter", + "wang", + "wanggou", + "warman", + "watch", + "watches", + "weather", + "weatherchannel", + "webcam", + "weber", + "website", + "wed", + "wedding", + "weibo", + "weir", + "whoswho", + "wien", + "wiki", + "williamhill", + "win", + "windows", + "wine", + "winners", + "wme", + "wolterskluwer", + "woodside", + "work", + "works", + "world", + "wow", + "wtc", + "wtf", + "xbox", + "xerox", + "xfinity", + "xihuan", + "xin", + "xn--11b4c3d", + "xn--1ck2e1b", + "xn--1qqw23a", + "xn--30rr7y", + "xn--3bst00m", + "xn--3ds443g", + "xn--3oq18vl8pn36a", + "xn--3pxu8k", + "xn--42c2d9a", + "xn--45q11c", + "xn--4gbrim", + "xn--4gq48lf9j", + "xn--55qw42g", + "xn--55qx5d", + "xn--5su34j936bgsg", + "xn--5tzm5g", + "xn--6frz82g", + "xn--6qq986b3xl", + "xn--80adxhks", + "xn--80aqecdr1a", + "xn--80asehdb", + "xn--80aswg", + "xn--8y0a063a", + "xn--9dbq2a", + "xn--9et52u", + "xn--9krt00a", + "xn--b4w605ferd", + "xn--bck1b9a5dre4c", + "xn--c1avg", + "xn--c2br7g", + "xn--cck2b3b", + "xn--cg4bki", + "xn--czr694b", + "xn--czrs0t", + "xn--czru2d", + "xn--d1acj3b", + "xn--eckvdtc9d", + "xn--efvy88h", + "xn--estv75g", + "xn--fct429k", + "xn--fhbei", + "xn--fiq228c5hs", + "xn--fiq64b", + "xn--fjq720a", + "xn--flw351e", + "xn--fzys8d69uvgm", + "xn--g2xx48c", + "xn--gckr3f0f", + "xn--gk3at1e", + "xn--hxt814e", + "xn--i1b6b1a6a2e", + "xn--imr513n", + "xn--io0a7i", + "xn--j1aef", + "xn--jlq61u9w7b", + "xn--jvr189m", + "xn--kcrx77d1x4a", + "xn--kpu716f", + "xn--kput3i", + "xn--mgba3a3ejt", + "xn--mgba7c0bbn0a", + "xn--mgbaakc7dvf", + "xn--mgbab2bd", + "xn--mgbb9fbpob", + "xn--mgbca7dzdo", + "xn--mgbi4ecexp", + "xn--mgbt3dhd", + "xn--mk1bu44c", + "xn--mxtq1m", + "xn--ngbc5azd", + "xn--ngbe9e0a", + "xn--ngbrx", + "xn--nqv7f", + "xn--nqv7fs00ema", + "xn--nyqy26a", + "xn--p1acf", + "xn--pbt977c", + "xn--pssy2u", + "xn--q9jyb4c", + "xn--qcka1pmc", + "xn--rhqv96g", + "xn--rovu88b", + "xn--ses554g", + "xn--t60b56a", + "xn--tckwe", + "xn--tiq49xqyj", + "xn--unup4y", + "xn--vermgensberater-ctb", + "xn--vermgensberatung-pwb", + "xn--vhquv", + "xn--vuq861b", + "xn--w4r85el8fhu5dnra", + "xn--w4rs40l", + "xn--xhq521b", + "xn--zfr164b", + "xperia", + "xyz", + "yachts", + "yahoo", + "yamaxun", + "yandex", + "yodobashi", + "yoga", + "yokohama", + "you", + "youtube", + "yun", + "zappos", + "zara", + "zero", + "zip", + "zippo", + "zone", + "zuerich", + "cloudfront.net", + "ap-northeast-1.compute.amazonaws.com", + "ap-northeast-2.compute.amazonaws.com", + "ap-southeast-1.compute.amazonaws.com", + "ap-southeast-2.compute.amazonaws.com", + "cn-north-1.compute.amazonaws.cn", + "compute-1.amazonaws.com", + "compute.amazonaws.cn", + "compute.amazonaws.com", + "eu-central-1.compute.amazonaws.com", + "eu-west-1.compute.amazonaws.com", + "sa-east-1.compute.amazonaws.com", + "us-east-1.amazonaws.com", + "us-gov-west-1.compute.amazonaws.com", + "us-west-1.compute.amazonaws.com", + "us-west-2.compute.amazonaws.com", + "z-1.compute-1.amazonaws.com", + "z-2.compute-1.amazonaws.com", + "elasticbeanstalk.com", + "elb.amazonaws.com", + "s3.amazonaws.com", + "s3-ap-northeast-1.amazonaws.com", + "s3-ap-northeast-2.amazonaws.com", + "s3-ap-southeast-1.amazonaws.com", + "s3-ap-southeast-2.amazonaws.com", + "s3-eu-central-1.amazonaws.com", + "s3-eu-west-1.amazonaws.com", + "s3-external-1.amazonaws.com", + "s3-external-2.amazonaws.com", + "s3-fips-us-gov-west-1.amazonaws.com", + "s3-sa-east-1.amazonaws.com", + "s3-us-gov-west-1.amazonaws.com", + "s3-us-west-1.amazonaws.com", + "s3-us-west-2.amazonaws.com", + "s3.ap-northeast-2.amazonaws.com", + "s3.cn-north-1.amazonaws.com.cn", + "s3.eu-central-1.amazonaws.com", + "betainabox.com", + "ae.org", + "ar.com", + "br.com", + "cn.com", + "com.de", + "com.se", + "de.com", + "eu.com", + "gb.com", + "gb.net", + "hu.com", + "hu.net", + "jp.net", + "jpn.com", + "kr.com", + "mex.com", + "no.com", + "qc.com", + "ru.com", + "sa.com", + "se.com", + "se.net", + "uk.com", + "uk.net", + "us.com", + "uy.com", + "za.bz", + "za.com", + "africa.com", + "xenapponazure.com", + "gr.com", + "in.net", + "us.org", + "co.com", + "c.la", + "cloudcontrolled.com", + "cloudcontrolapp.com", + "co.ca", + "co.cz", + "c.cdn77.org", + "cdn77-ssl.net", + "r.cdn77.net", + "rsc.cdn77.org", + "ssl.origin.cdn77-secure.org", + "co.nl", + "co.no", + "*.platform.sh", + "cupcake.is", + "dreamhosters.com", + "mydrobo.com", + "duckdns.org", + "dyndns-at-home.com", + "dyndns-at-work.com", + "dyndns-blog.com", + "dyndns-free.com", + "dyndns-home.com", + "dyndns-ip.com", + "dyndns-mail.com", + "dyndns-office.com", + "dyndns-pics.com", + "dyndns-remote.com", + "dyndns-server.com", + "dyndns-web.com", + "dyndns-wiki.com", + "dyndns-work.com", + "dyndns.biz", + "dyndns.info", + "dyndns.org", + "dyndns.tv", + "at-band-camp.net", + "ath.cx", + "barrel-of-knowledge.info", + "barrell-of-knowledge.info", + "better-than.tv", + "blogdns.com", + "blogdns.net", + "blogdns.org", + "blogsite.org", + "boldlygoingnowhere.org", + "broke-it.net", + "buyshouses.net", + "cechire.com", + "dnsalias.com", + "dnsalias.net", + "dnsalias.org", + "dnsdojo.com", + "dnsdojo.net", + "dnsdojo.org", + "does-it.net", + "doesntexist.com", + "doesntexist.org", + "dontexist.com", + "dontexist.net", + "dontexist.org", + "doomdns.com", + "doomdns.org", + "dvrdns.org", + "dyn-o-saur.com", + "dynalias.com", + "dynalias.net", + "dynalias.org", + "dynathome.net", + "dyndns.ws", + "endofinternet.net", + "endofinternet.org", + "endoftheinternet.org", + "est-a-la-maison.com", + "est-a-la-masion.com", + "est-le-patron.com", + "est-mon-blogueur.com", + "for-better.biz", + "for-more.biz", + "for-our.info", + "for-some.biz", + "for-the.biz", + "forgot.her.name", + "forgot.his.name", + "from-ak.com", + "from-al.com", + "from-ar.com", + "from-az.net", + "from-ca.com", + "from-co.net", + "from-ct.com", + "from-dc.com", + "from-de.com", + "from-fl.com", + "from-ga.com", + "from-hi.com", + "from-ia.com", + "from-id.com", + "from-il.com", + "from-in.com", + "from-ks.com", + "from-ky.com", + "from-la.net", + "from-ma.com", + "from-md.com", + "from-me.org", + "from-mi.com", + "from-mn.com", + "from-mo.com", + "from-ms.com", + "from-mt.com", + "from-nc.com", + "from-nd.com", + "from-ne.com", + "from-nh.com", + "from-nj.com", + "from-nm.com", + "from-nv.com", + "from-ny.net", + "from-oh.com", + "from-ok.com", + "from-or.com", + "from-pa.com", + "from-pr.com", + "from-ri.com", + "from-sc.com", + "from-sd.com", + "from-tn.com", + "from-tx.com", + "from-ut.com", + "from-va.com", + "from-vt.com", + "from-wa.com", + "from-wi.com", + "from-wv.com", + "from-wy.com", + "ftpaccess.cc", + "fuettertdasnetz.de", + "game-host.org", + "game-server.cc", + "getmyip.com", + "gets-it.net", + "go.dyndns.org", + "gotdns.com", + "gotdns.org", + "groks-the.info", + "groks-this.info", + "ham-radio-op.net", + "here-for-more.info", + "hobby-site.com", + "hobby-site.org", + "home.dyndns.org", + "homedns.org", + "homeftp.net", + "homeftp.org", + "homeip.net", + "homelinux.com", + "homelinux.net", + "homelinux.org", + "homeunix.com", + "homeunix.net", + "homeunix.org", + "iamallama.com", + "in-the-band.net", + "is-a-anarchist.com", + "is-a-blogger.com", + "is-a-bookkeeper.com", + "is-a-bruinsfan.org", + "is-a-bulls-fan.com", + "is-a-candidate.org", + "is-a-caterer.com", + "is-a-celticsfan.org", + "is-a-chef.com", + "is-a-chef.net", + "is-a-chef.org", + "is-a-conservative.com", + "is-a-cpa.com", + "is-a-cubicle-slave.com", + "is-a-democrat.com", + "is-a-designer.com", + "is-a-doctor.com", + "is-a-financialadvisor.com", + "is-a-geek.com", + "is-a-geek.net", + "is-a-geek.org", + "is-a-green.com", + "is-a-guru.com", + "is-a-hard-worker.com", + "is-a-hunter.com", + "is-a-knight.org", + "is-a-landscaper.com", + "is-a-lawyer.com", + "is-a-liberal.com", + "is-a-libertarian.com", + "is-a-linux-user.org", + "is-a-llama.com", + "is-a-musician.com", + "is-a-nascarfan.com", + "is-a-nurse.com", + "is-a-painter.com", + "is-a-patsfan.org", + "is-a-personaltrainer.com", + "is-a-photographer.com", + "is-a-player.com", + "is-a-republican.com", + "is-a-rockstar.com", + "is-a-socialist.com", + "is-a-soxfan.org", + "is-a-student.com", + "is-a-teacher.com", + "is-a-techie.com", + "is-a-therapist.com", + "is-an-accountant.com", + "is-an-actor.com", + "is-an-actress.com", + "is-an-anarchist.com", + "is-an-artist.com", + "is-an-engineer.com", + "is-an-entertainer.com", + "is-by.us", + "is-certified.com", + "is-found.org", + "is-gone.com", + "is-into-anime.com", + "is-into-cars.com", + "is-into-cartoons.com", + "is-into-games.com", + "is-leet.com", + "is-lost.org", + "is-not-certified.com", + "is-saved.org", + "is-slick.com", + "is-uberleet.com", + "is-very-bad.org", + "is-very-evil.org", + "is-very-good.org", + "is-very-nice.org", + "is-very-sweet.org", + "is-with-theband.com", + "isa-geek.com", + "isa-geek.net", + "isa-geek.org", + "isa-hockeynut.com", + "issmarterthanyou.com", + "isteingeek.de", + "istmein.de", + "kicks-ass.net", + "kicks-ass.org", + "knowsitall.info", + "land-4-sale.us", + "lebtimnetz.de", + "leitungsen.de", + "likes-pie.com", + "likescandy.com", + "merseine.nu", + "mine.nu", + "misconfused.org", + "mypets.ws", + "myphotos.cc", + "neat-url.com", + "office-on-the.net", + "on-the-web.tv", + "podzone.net", + "podzone.org", + "readmyblog.org", + "saves-the-whales.com", + "scrapper-site.net", + "scrapping.cc", + "selfip.biz", + "selfip.com", + "selfip.info", + "selfip.net", + "selfip.org", + "sells-for-less.com", + "sells-for-u.com", + "sells-it.net", + "sellsyourhome.org", + "servebbs.com", + "servebbs.net", + "servebbs.org", + "serveftp.net", + "serveftp.org", + "servegame.org", + "shacknet.nu", + "simple-url.com", + "space-to-rent.com", + "stuff-4-sale.org", + "stuff-4-sale.us", + "teaches-yoga.com", + "thruhere.net", + "traeumtgerade.de", + "webhop.biz", + "webhop.info", + "webhop.net", + "webhop.org", + "worse-than.tv", + "writesthisblog.com", + "dynv6.net", + "eu.org", + "al.eu.org", + "asso.eu.org", + "at.eu.org", + "au.eu.org", + "be.eu.org", + "bg.eu.org", + "ca.eu.org", + "cd.eu.org", + "ch.eu.org", + "cn.eu.org", + "cy.eu.org", + "cz.eu.org", + "de.eu.org", + "dk.eu.org", + "edu.eu.org", + "ee.eu.org", + "es.eu.org", + "fi.eu.org", + "fr.eu.org", + "gr.eu.org", + "hr.eu.org", + "hu.eu.org", + "ie.eu.org", + "il.eu.org", + "in.eu.org", + "int.eu.org", + "is.eu.org", + "it.eu.org", + "jp.eu.org", + "kr.eu.org", + "lt.eu.org", + "lu.eu.org", + "lv.eu.org", + "mc.eu.org", + "me.eu.org", + "mk.eu.org", + "mt.eu.org", + "my.eu.org", + "net.eu.org", + "ng.eu.org", + "nl.eu.org", + "no.eu.org", + "nz.eu.org", + "paris.eu.org", + "pl.eu.org", + "pt.eu.org", + "q-a.eu.org", + "ro.eu.org", + "ru.eu.org", + "se.eu.org", + "si.eu.org", + "sk.eu.org", + "tr.eu.org", + "uk.eu.org", + "us.eu.org", + "apps.fbsbx.com", + "a.ssl.fastly.net", + "b.ssl.fastly.net", + "global.ssl.fastly.net", + "a.prod.fastly.net", + "global.prod.fastly.net", + "firebaseapp.com", + "flynnhub.com", + "service.gov.uk", + "github.io", + "githubusercontent.com", + "githubcloud.com", + "*.api.githubcloud.com", + "*.ext.githubcloud.com", + "gist.githubcloud.com", + "*.githubcloudusercontent.com", + "ro.com", + "appspot.com", + "blogspot.ae", + "blogspot.al", + "blogspot.am", + "blogspot.ba", + "blogspot.be", + "blogspot.bg", + "blogspot.bj", + "blogspot.ca", + "blogspot.cf", + "blogspot.ch", + "blogspot.cl", + "blogspot.co.at", + "blogspot.co.id", + "blogspot.co.il", + "blogspot.co.ke", + "blogspot.co.nz", + "blogspot.co.uk", + "blogspot.co.za", + "blogspot.com", + "blogspot.com.ar", + "blogspot.com.au", + "blogspot.com.br", + "blogspot.com.by", + "blogspot.com.co", + "blogspot.com.cy", + "blogspot.com.ee", + "blogspot.com.eg", + "blogspot.com.es", + "blogspot.com.mt", + "blogspot.com.ng", + "blogspot.com.tr", + "blogspot.com.uy", + "blogspot.cv", + "blogspot.cz", + "blogspot.de", + "blogspot.dk", + "blogspot.fi", + "blogspot.fr", + "blogspot.gr", + "blogspot.hk", + "blogspot.hr", + "blogspot.hu", + "blogspot.ie", + "blogspot.in", + "blogspot.is", + "blogspot.it", + "blogspot.jp", + "blogspot.kr", + "blogspot.li", + "blogspot.lt", + "blogspot.lu", + "blogspot.md", + "blogspot.mk", + "blogspot.mr", + "blogspot.mx", + "blogspot.my", + "blogspot.nl", + "blogspot.no", + "blogspot.pe", + "blogspot.pt", + "blogspot.qa", + "blogspot.re", + "blogspot.ro", + "blogspot.rs", + "blogspot.ru", + "blogspot.se", + "blogspot.sg", + "blogspot.si", + "blogspot.sk", + "blogspot.sn", + "blogspot.td", + "blogspot.tw", + "blogspot.ug", + "blogspot.vn", + "cloudfunctions.net", + "codespot.com", + "googleapis.com", + "googlecode.com", + "pagespeedmobilizer.com", + "withgoogle.com", + "withyoutube.com", + "hashbang.sh", + "herokuapp.com", + "herokussl.com", + "iki.fi", + "biz.at", + "info.at", + "co.pl", + "azurewebsites.net", + "azure-mobile.net", + "cloudapp.net", + "bmoattachments.org", + "4u.com", + "ngrok.io", + "nfshost.com", + "nyc.mn", + "nid.io", + "operaunite.com", + "outsystemscloud.com", + "pagefrontapp.com", + "art.pl", + "gliwice.pl", + "krakow.pl", + "poznan.pl", + "wroc.pl", + "zakopane.pl", + "pantheon.io", + "gotpantheon.com", + "xen.prgmr.com", + "priv.at", + "qa2.com", + "rackmaze.com", + "rackmaze.net", + "rhcloud.com", + "sandcats.io", + "biz.ua", + "co.ua", + "pp.ua", + "sinaapp.com", + "vipsinaapp.com", + "1kapp.com", + "diskstation.me", + "dscloud.biz", + "dscloud.me", + "dscloud.mobi", + "dsmynas.com", + "dsmynas.net", + "dsmynas.org", + "familyds.com", + "familyds.net", + "familyds.org", + "i234.me", + "myds.me", + "synology.me", + "gda.pl", + "gdansk.pl", + "gdynia.pl", + "med.pl", + "sopot.pl", + "hk.com", + "hk.org", + "ltd.hk", + "inc.hk", + "yolasite.com", + "za.net", + "za.org", +} + +var nodeLabels = [...]string{ + "aaa", + "aarp", + "abarth", + "abb", + "abbott", + "abbvie", + "abc", + "able", + "abogado", + "abudhabi", + "ac", + "academy", + "accenture", + "accountant", + "accountants", + "aco", + "active", + "actor", + "ad", + "adac", + "ads", + "adult", + "ae", + "aeg", + "aero", + "aetna", + "af", + "afamilycompany", + "afl", + "africa", + "africamagic", + "ag", + "agakhan", + "agency", + "ai", + "aig", + "aigo", + "airbus", + "airforce", + "airtel", + "akdn", + "al", + "alfaromeo", + "alibaba", + "alipay", + "allfinanz", + "allstate", + "ally", + "alsace", + "alstom", + "am", + "americanexpress", + "americanfamily", + "amex", + "amfam", + "amica", + "amsterdam", + "analytics", + "android", + "anquan", + "anz", + "ao", + "aol", + "apartments", + "app", + "apple", + "aq", + "aquarelle", + "ar", + "arab", + "aramco", + "archi", + "army", + "arpa", + "arte", + "as", + "asda", + "asia", + "associates", + "at", + "athleta", + "attorney", + "au", + "auction", + "audi", + "audible", + "audio", + "auspost", + "author", + "auto", + "autos", + "avianca", + "aw", + "aws", + "ax", + "axa", + "az", + "azure", + "ba", + "baby", + "baidu", + "banamex", + "bananarepublic", + "band", + "bank", + "bar", + "barcelona", + "barclaycard", + "barclays", + "barefoot", + "bargains", + "baseball", + "basketball", + "bauhaus", + "bayern", + "bb", + "bbc", + "bbt", + "bbva", + "bcg", + "bcn", + "bd", + "be", + "beats", + "beauty", + "beer", + "bentley", + "berlin", + "best", + "bestbuy", + "bet", + "bf", + "bg", + "bh", + "bharti", + "bi", + "bible", + "bid", + "bike", + "bing", + "bingo", + "bio", + "biz", + "bj", + "black", + "blackfriday", + "blanco", + "blockbuster", + "blog", + "bloomberg", + "blue", + "bm", + "bms", + "bmw", + "bn", + "bnl", + "bnpparibas", + "bo", + "boats", + "boehringer", + "bofa", + "bom", + "bond", + "boo", + "book", + "booking", + "boots", + "bosch", + "bostik", + "boston", + "bot", + "boutique", + "box", + "br", + "bradesco", + "bridgestone", + "broadway", + "broker", + "brother", + "brussels", + "bs", + "bt", + "budapest", + "bugatti", + "build", + "builders", + "business", + "buy", + "buzz", + "bv", + "bw", + "by", + "bz", + "bzh", + "ca", + "cab", + "cafe", + "cal", + "call", + "calvinklein", + "camera", + "camp", + "cancerresearch", + "canon", + "capetown", + "capital", + "capitalone", + "car", + "caravan", + "cards", + "care", + "career", + "careers", + "cars", + "cartier", + "casa", + "case", + "caseih", + "cash", + "casino", + "cat", + "catering", + "catholic", + "cba", + "cbn", + "cbre", + "cbs", + "cc", + "cd", + "ceb", + "center", + "ceo", + "cern", + "cf", + "cfa", + "cfd", + "cg", + "ch", + "chanel", + "channel", + "chase", + "chat", + "cheap", + "chintai", + "chloe", + "christmas", + "chrome", + "chrysler", + "church", + "ci", + "cipriani", + "circle", + "cisco", + "citadel", + "citi", + "citic", + "city", + "cityeats", + "ck", + "cl", + "claims", + "cleaning", + "click", + "clinic", + "clinique", + "clothing", + "cloud", + "club", + "clubmed", + "cm", + "cn", + "co", + "coach", + "codes", + "coffee", + "college", + "cologne", + "com", + "comcast", + "commbank", + "community", + "company", + "compare", + "computer", + "comsec", + "condos", + "construction", + "consulting", + "contact", + "contractors", + "cooking", + "cookingchannel", + "cool", + "coop", + "corsica", + "country", + "coupon", + "coupons", + "courses", + "cr", + "credit", + "creditcard", + "creditunion", + "cricket", + "crown", + "crs", + "cruise", + "cruises", + "csc", + "cu", + "cuisinella", + "cv", + "cw", + "cx", + "cy", + "cymru", + "cyou", + "cz", + "dabur", + "dad", + "dance", + "date", + "dating", + "datsun", + "day", + "dclk", + "dds", + "de", + "deal", + "dealer", + "deals", + "degree", + "delivery", + "dell", + "deloitte", + "delta", + "democrat", + "dental", + "dentist", + "desi", + "design", + "dev", + "dhl", + "diamonds", + "diet", + "digital", + "direct", + "directory", + "discount", + "discover", + "dish", + "diy", + "dj", + "dk", + "dm", + "dnp", + "do", + "docs", + "dodge", + "dog", + "doha", + "domains", + "dot", + "download", + "drive", + "dstv", + "dtv", + "dubai", + "duck", + "dunlop", + "duns", + "dupont", + "durban", + "dvag", + "dwg", + "dz", + "earth", + "eat", + "ec", + "edeka", + "edu", + "education", + "ee", + "eg", + "email", + "emerck", + "emerson", + "energy", + "engineer", + "engineering", + "enterprises", + "epost", + "epson", + "equipment", + "er", + "ericsson", + "erni", + "es", + "esq", + "estate", + "esurance", + "et", + "etisalat", + "eu", + "eurovision", + "eus", + "events", + "everbank", + "exchange", + "expert", + "exposed", + "express", + "extraspace", + "fage", + "fail", + "fairwinds", + "faith", + "family", + "fan", + "fans", + "farm", + "farmers", + "fashion", + "fast", + "fedex", + "feedback", + "ferrari", + "ferrero", + "fi", + "fiat", + "fidelity", + "fido", + "film", + "final", + "finance", + "financial", + "fire", + "firestone", + "firmdale", + "fish", + "fishing", + "fit", + "fitness", + "fj", + "fk", + "flickr", + "flights", + "flir", + "florist", + "flowers", + "flsmidth", + "fly", + "fm", + "fo", + "foo", + "foodnetwork", + "football", + "ford", + "forex", + "forsale", + "forum", + "foundation", + "fox", + "fr", + "free", + "fresenius", + "frl", + "frogans", + "frontdoor", + "frontier", + "ftr", + "fujitsu", + "fujixerox", + "fun", + "fund", + "furniture", + "futbol", + "fyi", + "ga", + "gal", + "gallery", + "gallo", + "gallup", + "game", + "games", + "gap", + "garden", + "gb", + "gbiz", + "gd", + "gdn", + "ge", + "gea", + "gent", + "genting", + "george", + "gf", + "gg", + "ggee", + "gh", + "gi", + "gift", + "gifts", + "gives", + "giving", + "gl", + "glade", + "glass", + "gle", + "global", + "globo", + "gm", + "gmail", + "gmbh", + "gmo", + "gmx", + "gn", + "godaddy", + "gold", + "goldpoint", + "golf", + "goo", + "goodhands", + "goodyear", + "goog", + "google", + "gop", + "got", + "gotv", + "gov", + "gp", + "gq", + "gr", + "grainger", + "graphics", + "gratis", + "green", + "gripe", + "group", + "gs", + "gt", + "gu", + "guardian", + "gucci", + "guge", + "guide", + "guitars", + "guru", + "gw", + "gy", + "hair", + "hamburg", + "hangout", + "haus", + "hbo", + "hdfc", + "hdfcbank", + "health", + "healthcare", + "help", + "helsinki", + "here", + "hermes", + "hgtv", + "hiphop", + "hisamitsu", + "hitachi", + "hiv", + "hk", + "hkt", + "hm", + "hn", + "hockey", + "holdings", + "holiday", + "homedepot", + "homegoods", + "homes", + "homesense", + "honda", + "honeywell", + "horse", + "host", + "hosting", + "hot", + "hoteles", + "hotmail", + "house", + "how", + "hr", + "hsbc", + "ht", + "htc", + "hu", + "hughes", + "hyatt", + "hyundai", + "ibm", + "icbc", + "ice", + "icu", + "id", + "ie", + "ieee", + "ifm", + "iinet", + "ikano", + "il", + "im", + "imamat", + "imdb", + "immo", + "immobilien", + "in", + "industries", + "infiniti", + "info", + "ing", + "ink", + "institute", + "insurance", + "insure", + "int", + "intel", + "international", + "intuit", + "investments", + "io", + "ipiranga", + "iq", + "ir", + "irish", + "is", + "iselect", + "ismaili", + "ist", + "istanbul", + "it", + "itau", + "itv", + "iveco", + "iwc", + "jaguar", + "java", + "jcb", + "jcp", + "je", + "jeep", + "jetzt", + "jewelry", + "jio", + "jlc", + "jll", + "jm", + "jmp", + "jnj", + "jo", + "jobs", + "joburg", + "jot", + "joy", + "jp", + "jpmorgan", + "jprs", + "juegos", + "juniper", + "kaufen", + "kddi", + "ke", + "kerryhotels", + "kerrylogistics", + "kerryproperties", + "kfh", + "kg", + "kh", + "ki", + "kia", + "kim", + "kinder", + "kindle", + "kitchen", + "kiwi", + "km", + "kn", + "koeln", + "komatsu", + "kosher", + "kp", + "kpmg", + "kpn", + "kr", + "krd", + "kred", + "kuokgroup", + "kw", + "ky", + "kyknet", + "kyoto", + "kz", + "la", + "lacaixa", + "ladbrokes", + "lamborghini", + "lamer", + "lancaster", + "lancia", + "lancome", + "land", + "landrover", + "lanxess", + "lasalle", + "lat", + "latino", + "latrobe", + "law", + "lawyer", + "lb", + "lc", + "lds", + "lease", + "leclerc", + "lefrak", + "legal", + "lego", + "lexus", + "lgbt", + "li", + "liaison", + "lidl", + "life", + "lifeinsurance", + "lifestyle", + "lighting", + "like", + "lilly", + "limited", + "limo", + "lincoln", + "linde", + "link", + "lipsy", + "live", + "living", + "lixil", + "lk", + "loan", + "loans", + "locker", + "locus", + "loft", + "lol", + "london", + "lotte", + "lotto", + "love", + "lpl", + "lplfinancial", + "lr", + "ls", + "lt", + "ltd", + "ltda", + "lu", + "lundbeck", + "lupin", + "luxe", + "luxury", + "lv", + "ly", + "ma", + "macys", + "madrid", + "maif", + "maison", + "makeup", + "man", + "management", + "mango", + "market", + "marketing", + "markets", + "marriott", + "marshalls", + "maserati", + "mattel", + "mba", + "mc", + "mcd", + "mcdonalds", + "mckinsey", + "md", + "me", + "med", + "media", + "meet", + "melbourne", + "meme", + "memorial", + "men", + "menu", + "meo", + "metlife", + "mg", + "mh", + "miami", + "microsoft", + "mil", + "mini", + "mint", + "mit", + "mitsubishi", + "mk", + "ml", + "mlb", + "mls", + "mm", + "mma", + "mn", + "mnet", + "mo", + "mobi", + "mobily", + "moda", + "moe", + "moi", + "mom", + "monash", + "money", + "monster", + "montblanc", + "mopar", + "mormon", + "mortgage", + "moscow", + "moto", + "motorcycles", + "mov", + "movie", + "movistar", + "mp", + "mq", + "mr", + "ms", + "msd", + "mt", + "mtn", + "mtpc", + "mtr", + "mu", + "multichoice", + "museum", + "mutual", + "mutuelle", + "mv", + "mw", + "mx", + "my", + "mz", + "mzansimagic", + "na", + "nab", + "nadex", + "nagoya", + "name", + "naspers", + "nationwide", + "natura", + "navy", + "nba", + "nc", + "ne", + "nec", + "net", + "netbank", + "netflix", + "network", + "neustar", + "new", + "newholland", + "news", + "next", + "nextdirect", + "nexus", + "nf", + "nfl", + "ng", + "ngo", + "nhk", + "ni", + "nico", + "nike", + "nikon", + "ninja", + "nissan", + "nissay", + "nl", + "no", + "nokia", + "northwesternmutual", + "norton", + "now", + "nowruz", + "nowtv", + "np", + "nr", + "nra", + "nrw", + "ntt", + "nu", + "nyc", + "nz", + "obi", + "observer", + "off", + "office", + "okinawa", + "olayan", + "olayangroup", + "oldnavy", + "ollo", + "om", + "omega", + "one", + "ong", + "onl", + "online", + "onyourside", + "ooo", + "open", + "oracle", + "orange", + "org", + "organic", + "orientexpress", + "origins", + "osaka", + "otsuka", + "ott", + "ovh", + "pa", + "page", + "pamperedchef", + "panasonic", + "panerai", + "paris", + "pars", + "partners", + "parts", + "party", + "passagens", + "pay", + "payu", + "pccw", + "pe", + "pet", + "pf", + "pfizer", + "pg", + "ph", + "pharmacy", + "philips", + "photo", + "photography", + "photos", + "physio", + "piaget", + "pics", + "pictet", + "pictures", + "pid", + "pin", + "ping", + "pink", + "pioneer", + "pizza", + "pk", + "pl", + "place", + "play", + "playstation", + "plumbing", + "plus", + "pm", + "pn", + "pnc", + "pohl", + "poker", + "politie", + "porn", + "post", + "pr", + "pramerica", + "praxi", + "press", + "prime", + "pro", + "prod", + "productions", + "prof", + "progressive", + "promo", + "properties", + "property", + "protection", + "pru", + "prudential", + "ps", + "pt", + "pub", + "pw", + "pwc", + "py", + "qa", + "qpon", + "quebec", + "quest", + "qvc", + "racing", + "raid", + "re", + "read", + "realestate", + "realtor", + "realty", + "recipes", + "red", + "redstone", + "redumbrella", + "rehab", + "reise", + "reisen", + "reit", + "reliance", + "ren", + "rent", + "rentals", + "repair", + "report", + "republican", + "rest", + "restaurant", + "review", + "reviews", + "rexroth", + "rich", + "richardli", + "ricoh", + "rightathome", + "ril", + "rio", + "rip", + "rmit", + "ro", + "rocher", + "rocks", + "rodeo", + "rogers", + "room", + "rs", + "rsvp", + "ru", + "ruhr", + "run", + "rw", + "rwe", + "ryukyu", + "sa", + "saarland", + "safe", + "safety", + "sakura", + "sale", + "salon", + "samsclub", + "samsung", + "sandvik", + "sandvikcoromant", + "sanofi", + "sap", + "sapo", + "sarl", + "sas", + "save", + "saxo", + "sb", + "sbi", + "sbs", + "sc", + "sca", + "scb", + "schaeffler", + "schmidt", + "scholarships", + "school", + "schule", + "schwarz", + "science", + "scjohnson", + "scor", + "scot", + "sd", + "se", + "seat", + "secure", + "security", + "seek", + "select", + "sener", + "services", + "ses", + "seven", + "sew", + "sex", + "sexy", + "sfr", + "sg", + "sh", + "shangrila", + "sharp", + "shaw", + "shell", + "shia", + "shiksha", + "shoes", + "shouji", + "show", + "showtime", + "shriram", + "si", + "silk", + "sina", + "singles", + "site", + "sj", + "sk", + "ski", + "skin", + "sky", + "skype", + "sl", + "sling", + "sm", + "smart", + "smile", + "sn", + "sncf", + "so", + "soccer", + "social", + "softbank", + "software", + "sohu", + "solar", + "solutions", + "song", + "sony", + "soy", + "space", + "spiegel", + "spot", + "spreadbetting", + "sr", + "srl", + "srt", + "st", + "stada", + "staples", + "star", + "starhub", + "statebank", + "statefarm", + "statoil", + "stc", + "stcgroup", + "stockholm", + "storage", + "store", + "stream", + "studio", + "study", + "style", + "su", + "sucks", + "supersport", + "supplies", + "supply", + "support", + "surf", + "surgery", + "suzuki", + "sv", + "swatch", + "swiftcover", + "swiss", + "sx", + "sy", + "sydney", + "symantec", + "systems", + "sz", + "tab", + "taipei", + "talk", + "taobao", + "target", + "tatamotors", + "tatar", + "tattoo", + "tax", + "taxi", + "tc", + "tci", + "td", + "tdk", + "team", + "tech", + "technology", + "tel", + "telecity", + "telefonica", + "temasek", + "tennis", + "teva", + "tf", + "tg", + "th", + "thd", + "theater", + "theatre", + "theguardian", + "tiaa", + "tickets", + "tienda", + "tiffany", + "tips", + "tires", + "tirol", + "tj", + "tjmaxx", + "tjx", + "tk", + "tkmaxx", + "tl", + "tm", + "tmall", + "tn", + "to", + "today", + "tokyo", + "tools", + "top", + "toray", + "toshiba", + "total", + "tours", + "town", + "toyota", + "toys", + "tr", + "trade", + "trading", + "training", + "travel", + "travelchannel", + "travelers", + "travelersinsurance", + "trust", + "trv", + "tt", + "tube", + "tui", + "tunes", + "tushu", + "tv", + "tvs", + "tw", + "tz", + "ua", + "ubank", + "ubs", + "uconnect", + "ug", + "uk", + "unicom", + "university", + "uno", + "uol", + "ups", + "us", + "uy", + "uz", + "va", + "vacations", + "vana", + "vanguard", + "vc", + "ve", + "vegas", + "ventures", + "verisign", + "versicherung", + "vet", + "vg", + "vi", + "viajes", + "video", + "vig", + "viking", + "villas", + "vin", + "vip", + "virgin", + "visa", + "vision", + "vista", + "vistaprint", + "viva", + "vivo", + "vlaanderen", + "vn", + "vodka", + "volkswagen", + "volvo", + "vote", + "voting", + "voto", + "voyage", + "vu", + "vuelos", + "wales", + "walmart", + "walter", + "wang", + "wanggou", + "warman", + "watch", + "watches", + "weather", + "weatherchannel", + "webcam", + "weber", + "website", + "wed", + "wedding", + "weibo", + "weir", + "wf", + "whoswho", + "wien", + "wiki", + "williamhill", + "win", + "windows", + "wine", + "winners", + "wme", + "wolterskluwer", + "woodside", + "work", + "works", + "world", + "wow", + "ws", + "wtc", + "wtf", + "xbox", + "xerox", + "xfinity", + "xihuan", + "xin", + "xn--11b4c3d", + "xn--1ck2e1b", + "xn--1qqw23a", + "xn--30rr7y", + "xn--3bst00m", + "xn--3ds443g", + "xn--3e0b707e", + "xn--3oq18vl8pn36a", + "xn--3pxu8k", + "xn--42c2d9a", + "xn--45brj9c", + "xn--45q11c", + "xn--4gbrim", + "xn--4gq48lf9j", + "xn--54b7fta0cc", + "xn--55qw42g", + "xn--55qx5d", + "xn--5su34j936bgsg", + "xn--5tzm5g", + "xn--6frz82g", + "xn--6qq986b3xl", + "xn--80adxhks", + "xn--80ao21a", + "xn--80aqecdr1a", + "xn--80asehdb", + "xn--80aswg", + "xn--8y0a063a", + "xn--90a3ac", + "xn--90ais", + "xn--9dbq2a", + "xn--9et52u", + "xn--9krt00a", + "xn--b4w605ferd", + "xn--bck1b9a5dre4c", + "xn--c1avg", + "xn--c2br7g", + "xn--cck2b3b", + "xn--cg4bki", + "xn--clchc0ea0b2g2a9gcd", + "xn--czr694b", + "xn--czrs0t", + "xn--czru2d", + "xn--d1acj3b", + "xn--d1alf", + "xn--eckvdtc9d", + "xn--efvy88h", + "xn--estv75g", + "xn--fct429k", + "xn--fhbei", + "xn--fiq228c5hs", + "xn--fiq64b", + "xn--fiqs8s", + "xn--fiqz9s", + "xn--fjq720a", + "xn--flw351e", + "xn--fpcrj9c3d", + "xn--fzc2c9e2c", + "xn--fzys8d69uvgm", + "xn--g2xx48c", + "xn--gckr3f0f", + "xn--gecrj9c", + "xn--gk3at1e", + "xn--h2brj9c", + "xn--hxt814e", + "xn--i1b6b1a6a2e", + "xn--imr513n", + "xn--io0a7i", + "xn--j1aef", + "xn--j1amh", + "xn--j6w193g", + "xn--jlq61u9w7b", + "xn--jvr189m", + "xn--kcrx77d1x4a", + "xn--kprw13d", + "xn--kpry57d", + "xn--kpu716f", + "xn--kput3i", + "xn--l1acc", + "xn--lgbbat1ad8j", + "xn--mgb2ddes", + "xn--mgb9awbf", + "xn--mgba3a3ejt", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "xn--mgba7c0bbn0a", + "xn--mgbaakc7dvf", + "xn--mgbaam7a8h", + "xn--mgbab2bd", + "xn--mgbai9a5eva00b", + "xn--mgbai9azgqp6j", + "xn--mgbayh7gpa", + "xn--mgbb9fbpob", + "xn--mgbbh1a71e", + "xn--mgbc0a9azcg", + "xn--mgbca7dzdo", + "xn--mgberp4a5d4a87g", + "xn--mgberp4a5d4ar", + "xn--mgbi4ecexp", + "xn--mgbpl2fh", + "xn--mgbqly7c0a67fbc", + "xn--mgbqly7cvafr", + "xn--mgbt3dhd", + "xn--mgbtf8fl", + "xn--mgbtx2b", + "xn--mgbx4cd0ab", + "xn--mix082f", + "xn--mix891f", + "xn--mk1bu44c", + "xn--mxtq1m", + "xn--ngbc5azd", + "xn--ngbe9e0a", + "xn--ngbrx", + "xn--nnx388a", + "xn--node", + "xn--nqv7f", + "xn--nqv7fs00ema", + "xn--nyqy26a", + "xn--o3cw4h", + "xn--ogbpf8fl", + "xn--p1acf", + "xn--p1ai", + "xn--pbt977c", + "xn--pgbs0dh", + "xn--pssy2u", + "xn--q9jyb4c", + "xn--qcka1pmc", + "xn--qxam", + "xn--rhqv96g", + "xn--rovu88b", + "xn--s9brj9c", + "xn--ses554g", + "xn--t60b56a", + "xn--tckwe", + "xn--tiq49xqyj", + "xn--unup4y", + "xn--vermgensberater-ctb", + "xn--vermgensberatung-pwb", + "xn--vhquv", + "xn--vuq861b", + "xn--w4r85el8fhu5dnra", + "xn--w4rs40l", + "xn--wgbh1c", + "xn--wgbl6a", + "xn--xhq521b", + "xn--xkc2al3hye2a", + "xn--xkc2dl3a5ee0h", + "xn--y9a3aq", + "xn--yfro4i67o", + "xn--ygbi2ammx", + "xn--zfr164b", + "xperia", + "xxx", + "xyz", + "yachts", + "yahoo", + "yamaxun", + "yandex", + "ye", + "yodobashi", + "yoga", + "yokohama", + "you", + "youtube", + "yt", + "yun", + "za", + "zappos", + "zara", + "zero", + "zip", + "zippo", + "zm", + "zone", + "zuerich", + "zw", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "nom", + "ac", + "blogspot", + "co", + "gov", + "mil", + "net", + "org", + "sch", + "accident-investigation", + "accident-prevention", + "aerobatic", + "aeroclub", + "aerodrome", + "agents", + "air-surveillance", + "air-traffic-control", + "aircraft", + "airline", + "airport", + "airtraffic", + "ambulance", + "amusement", + "association", + "author", + "ballooning", + "broker", + "caa", + "cargo", + "catering", + "certification", + "championship", + "charter", + "civilaviation", + "club", + "conference", + "consultant", + "consulting", + "control", + "council", + "crew", + "design", + "dgca", + "educator", + "emergency", + "engine", + "engineer", + "entertainment", + "equipment", + "exchange", + "express", + "federation", + "flight", + "freight", + "fuel", + "gliding", + "government", + "groundhandling", + "group", + "hanggliding", + "homebuilt", + "insurance", + "journal", + "journalist", + "leasing", + "logistics", + "magazine", + "maintenance", + "media", + "microlight", + "modelling", + "navigation", + "parachuting", + "paragliding", + "passenger-association", + "pilot", + "press", + "production", + "recreation", + "repbody", + "res", + "research", + "rotorcraft", + "safety", + "scientist", + "services", + "show", + "skydiving", + "software", + "student", + "trader", + "trading", + "trainer", + "union", + "workinggroup", + "works", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "net", + "nom", + "org", + "com", + "net", + "off", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "blogspot", + "co", + "ed", + "gv", + "it", + "og", + "pb", + "com", + "edu", + "gob", + "gov", + "int", + "mil", + "net", + "org", + "tur", + "blogspot", + "e164", + "in-addr", + "ip6", + "iris", + "uri", + "urn", + "gov", + "ac", + "biz", + "co", + "gv", + "info", + "or", + "priv", + "blogspot", + "act", + "asn", + "com", + "conf", + "edu", + "gov", + "id", + "info", + "net", + "nsw", + "nt", + "org", + "oz", + "qld", + "sa", + "tas", + "vic", + "wa", + "blogspot", + "act", + "nsw", + "nt", + "qld", + "sa", + "tas", + "vic", + "wa", + "qld", + "sa", + "tas", + "vic", + "wa", + "com", + "biz", + "com", + "edu", + "gov", + "info", + "int", + "mil", + "name", + "net", + "org", + "pp", + "pro", + "blogspot", + "co", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "rs", + "unbi", + "unsa", + "biz", + "co", + "com", + "edu", + "gov", + "info", + "net", + "org", + "store", + "tv", + "ac", + "blogspot", + "gov", + "0", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "a", + "b", + "blogspot", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "edu", + "or", + "org", + "dscloud", + "dyndns", + "for-better", + "for-more", + "for-some", + "for-the", + "selfip", + "webhop", + "asso", + "barreau", + "blogspot", + "gouv", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gob", + "gov", + "int", + "mil", + "net", + "org", + "tv", + "adm", + "adv", + "agr", + "am", + "arq", + "art", + "ato", + "b", + "bio", + "blog", + "bmd", + "cim", + "cng", + "cnt", + "com", + "coop", + "ecn", + "eco", + "edu", + "emp", + "eng", + "esp", + "etc", + "eti", + "far", + "flog", + "fm", + "fnd", + "fot", + "fst", + "g12", + "ggf", + "gov", + "imb", + "ind", + "inf", + "jor", + "jus", + "leg", + "lel", + "mat", + "med", + "mil", + "mp", + "mus", + "net", + "nom", + "not", + "ntr", + "odo", + "org", + "ppg", + "pro", + "psc", + "psi", + "qsl", + "radio", + "rec", + "slg", + "srv", + "taxi", + "teo", + "tmp", + "trd", + "tur", + "tv", + "vet", + "vlog", + "wiki", + "zlg", + "blogspot", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "net", + "org", + "co", + "org", + "com", + "gov", + "mil", + "of", + "blogspot", + "com", + "edu", + "gov", + "net", + "org", + "za", + "ab", + "bc", + "blogspot", + "co", + "gc", + "mb", + "nb", + "nf", + "nl", + "ns", + "nt", + "nu", + "on", + "pe", + "qc", + "sk", + "yk", + "ftpaccess", + "game-server", + "myphotos", + "scrapping", + "gov", + "blogspot", + "blogspot", + "ac", + "asso", + "co", + "com", + "ed", + "edu", + "go", + "gouv", + "int", + "md", + "net", + "or", + "org", + "presse", + "xn--aroport-bya", + "www", + "blogspot", + "co", + "gob", + "gov", + "mil", + "co", + "com", + "gov", + "net", + "ac", + "ah", + "amazonaws", + "bj", + "com", + "cq", + "edu", + "fj", + "gd", + "gov", + "gs", + "gx", + "gz", + "ha", + "hb", + "he", + "hi", + "hk", + "hl", + "hn", + "jl", + "js", + "jx", + "ln", + "mil", + "mo", + "net", + "nm", + "nx", + "org", + "qh", + "sc", + "sd", + "sh", + "sn", + "sx", + "tj", + "tw", + "xj", + "xn--55qx5d", + "xn--io0a7i", + "xn--od0alg", + "xz", + "yn", + "zj", + "compute", + "cn-north-1", + "amazonaws", + "cn-north-1", + "s3", + "arts", + "com", + "edu", + "firm", + "gov", + "info", + "int", + "mil", + "net", + "nom", + "org", + "rec", + "web", + "blogspot", + "1kapp", + "4u", + "africa", + "amazonaws", + "appspot", + "ar", + "betainabox", + "blogdns", + "blogspot", + "br", + "cechire", + "cloudcontrolapp", + "cloudcontrolled", + "cn", + "co", + "codespot", + "de", + "dnsalias", + "dnsdojo", + "doesntexist", + "dontexist", + "doomdns", + "dreamhosters", + "dsmynas", + "dyn-o-saur", + "dynalias", + "dyndns-at-home", + "dyndns-at-work", + "dyndns-blog", + "dyndns-free", + "dyndns-home", + "dyndns-ip", + "dyndns-mail", + "dyndns-office", + "dyndns-pics", + "dyndns-remote", + "dyndns-server", + "dyndns-web", + "dyndns-wiki", + "dyndns-work", + "elasticbeanstalk", + "est-a-la-maison", + "est-a-la-masion", + "est-le-patron", + "est-mon-blogueur", + "eu", + "familyds", + "fbsbx", + "firebaseapp", + "flynnhub", + "from-ak", + "from-al", + "from-ar", + "from-ca", + "from-ct", + "from-dc", + "from-de", + "from-fl", + "from-ga", + "from-hi", + "from-ia", + "from-id", + "from-il", + "from-in", + "from-ks", + "from-ky", + "from-ma", + "from-md", + "from-mi", + "from-mn", + "from-mo", + "from-ms", + "from-mt", + "from-nc", + "from-nd", + "from-ne", + "from-nh", + "from-nj", + "from-nm", + "from-nv", + "from-oh", + "from-ok", + "from-or", + "from-pa", + "from-pr", + "from-ri", + "from-sc", + "from-sd", + "from-tn", + "from-tx", + "from-ut", + "from-va", + "from-vt", + "from-wa", + "from-wi", + "from-wv", + "from-wy", + "gb", + "getmyip", + "githubcloud", + "githubcloudusercontent", + "githubusercontent", + "googleapis", + "googlecode", + "gotdns", + "gotpantheon", + "gr", + "herokuapp", + "herokussl", + "hk", + "hobby-site", + "homelinux", + "homeunix", + "hu", + "iamallama", + "is-a-anarchist", + "is-a-blogger", + "is-a-bookkeeper", + "is-a-bulls-fan", + "is-a-caterer", + "is-a-chef", + "is-a-conservative", + "is-a-cpa", + "is-a-cubicle-slave", + "is-a-democrat", + "is-a-designer", + "is-a-doctor", + "is-a-financialadvisor", + "is-a-geek", + "is-a-green", + "is-a-guru", + "is-a-hard-worker", + "is-a-hunter", + "is-a-landscaper", + "is-a-lawyer", + "is-a-liberal", + "is-a-libertarian", + "is-a-llama", + "is-a-musician", + "is-a-nascarfan", + "is-a-nurse", + "is-a-painter", + "is-a-personaltrainer", + "is-a-photographer", + "is-a-player", + "is-a-republican", + "is-a-rockstar", + "is-a-socialist", + "is-a-student", + "is-a-teacher", + "is-a-techie", + "is-a-therapist", + "is-an-accountant", + "is-an-actor", + "is-an-actress", + "is-an-anarchist", + "is-an-artist", + "is-an-engineer", + "is-an-entertainer", + "is-certified", + "is-gone", + "is-into-anime", + "is-into-cars", + "is-into-cartoons", + "is-into-games", + "is-leet", + "is-not-certified", + "is-slick", + "is-uberleet", + "is-with-theband", + "isa-geek", + "isa-hockeynut", + "issmarterthanyou", + "jpn", + "kr", + "likes-pie", + "likescandy", + "mex", + "mydrobo", + "neat-url", + "nfshost", + "no", + "operaunite", + "outsystemscloud", + "pagefrontapp", + "pagespeedmobilizer", + "prgmr", + "qa2", + "qc", + "rackmaze", + "rhcloud", + "ro", + "ru", + "sa", + "saves-the-whales", + "se", + "selfip", + "sells-for-less", + "sells-for-u", + "servebbs", + "simple-url", + "sinaapp", + "space-to-rent", + "teaches-yoga", + "uk", + "us", + "uy", + "vipsinaapp", + "withgoogle", + "withyoutube", + "writesthisblog", + "xenapponazure", + "yolasite", + "za", + "ap-northeast-2", + "compute", + "compute-1", + "elb", + "eu-central-1", + "s3", + "s3-ap-northeast-1", + "s3-ap-northeast-2", + "s3-ap-southeast-1", + "s3-ap-southeast-2", + "s3-eu-central-1", + "s3-eu-west-1", + "s3-external-1", + "s3-external-2", + "s3-fips-us-gov-west-1", + "s3-sa-east-1", + "s3-us-gov-west-1", + "s3-us-west-1", + "s3-us-west-2", + "us-east-1", + "s3", + "ap-northeast-1", + "ap-northeast-2", + "ap-southeast-1", + "ap-southeast-2", + "eu-central-1", + "eu-west-1", + "sa-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", + "z-1", + "z-2", + "s3", + "apps", + "api", + "ext", + "gist", + "xen", + "ac", + "co", + "ed", + "fi", + "go", + "or", + "sa", + "com", + "edu", + "gov", + "inf", + "net", + "org", + "blogspot", + "com", + "edu", + "net", + "org", + "ath", + "gov", + "ac", + "biz", + "com", + "ekloges", + "gov", + "ltd", + "name", + "net", + "org", + "parliament", + "press", + "pro", + "tm", + "blogspot", + "blogspot", + "co", + "blogspot", + "com", + "fuettertdasnetz", + "isteingeek", + "istmein", + "lebtimnetz", + "leitungsen", + "traeumtgerade", + "blogspot", + "com", + "edu", + "gov", + "net", + "org", + "art", + "com", + "edu", + "gob", + "gov", + "mil", + "net", + "org", + "sld", + "web", + "art", + "asso", + "com", + "edu", + "gov", + "net", + "org", + "pol", + "com", + "edu", + "fin", + "gob", + "gov", + "info", + "k12", + "med", + "mil", + "net", + "org", + "pro", + "aip", + "com", + "edu", + "fie", + "gov", + "lib", + "med", + "org", + "pri", + "riik", + "blogspot", + "com", + "edu", + "eun", + "gov", + "mil", + "name", + "net", + "org", + "sci", + "blogspot", + "com", + "edu", + "gob", + "nom", + "org", + "blogspot", + "biz", + "com", + "edu", + "gov", + "info", + "name", + "net", + "org", + "aland", + "blogspot", + "iki", + "aeroport", + "assedic", + "asso", + "avocat", + "avoues", + "blogspot", + "cci", + "chambagri", + "chirurgiens-dentistes", + "com", + "experts-comptables", + "geometre-expert", + "gouv", + "greta", + "huissier-justice", + "medecin", + "nom", + "notaires", + "pharmacien", + "port", + "prd", + "presse", + "tm", + "veterinaire", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "pvt", + "co", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "org", + "com", + "edu", + "gov", + "ltd", + "mod", + "org", + "co", + "com", + "edu", + "net", + "org", + "ac", + "com", + "edu", + "gov", + "net", + "org", + "asso", + "com", + "edu", + "mobi", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gob", + "ind", + "mil", + "net", + "org", + "co", + "com", + "edu", + "gov", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "idv", + "inc", + "ltd", + "net", + "org", + "xn--55qx5d", + "xn--ciqpn", + "xn--gmq050i", + "xn--gmqw5a", + "xn--io0a7i", + "xn--lcvr32d", + "xn--mk0axi", + "xn--mxtq1m", + "xn--od0alg", + "xn--od0aq3b", + "xn--tn0ag", + "xn--uc0atv", + "xn--uc0ay4a", + "xn--wcvs22d", + "xn--zf0avx", + "com", + "edu", + "gob", + "mil", + "net", + "org", + "blogspot", + "com", + "from", + "iz", + "name", + "adult", + "art", + "asso", + "com", + "coop", + "edu", + "firm", + "gouv", + "info", + "med", + "net", + "org", + "perso", + "pol", + "pro", + "rel", + "shop", + "2000", + "agrar", + "blogspot", + "bolt", + "casino", + "city", + "co", + "erotica", + "erotika", + "film", + "forum", + "games", + "hotel", + "info", + "ingatlan", + "jogasz", + "konyvelo", + "lakas", + "media", + "news", + "org", + "priv", + "reklam", + "sex", + "shop", + "sport", + "suli", + "szex", + "tm", + "tozsde", + "utazas", + "video", + "ac", + "biz", + "co", + "desa", + "go", + "mil", + "my", + "net", + "or", + "sch", + "web", + "blogspot", + "blogspot", + "gov", + "ac", + "co", + "gov", + "idf", + "k12", + "muni", + "net", + "org", + "blogspot", + "ac", + "co", + "com", + "net", + "org", + "tt", + "tv", + "ltd", + "plc", + "ac", + "blogspot", + "co", + "edu", + "firm", + "gen", + "gov", + "ind", + "mil", + "net", + "nic", + "org", + "res", + "barrel-of-knowledge", + "barrell-of-knowledge", + "dyndns", + "for-our", + "groks-the", + "groks-this", + "here-for-more", + "knowsitall", + "selfip", + "webhop", + "eu", + "com", + "github", + "ngrok", + "nid", + "pantheon", + "sandcats", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "ac", + "co", + "gov", + "id", + "net", + "org", + "sch", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "blogspot", + "com", + "cupcake", + "edu", + "gov", + "int", + "net", + "org", + "abr", + "abruzzo", + "ag", + "agrigento", + "al", + "alessandria", + "alto-adige", + "altoadige", + "an", + "ancona", + "andria-barletta-trani", + "andria-trani-barletta", + "andriabarlettatrani", + "andriatranibarletta", + "ao", + "aosta", + "aosta-valley", + "aostavalley", + "aoste", + "ap", + "aq", + "aquila", + "ar", + "arezzo", + "ascoli-piceno", + "ascolipiceno", + "asti", + "at", + "av", + "avellino", + "ba", + "balsan", + "bari", + "barletta-trani-andria", + "barlettatraniandria", + "bas", + "basilicata", + "belluno", + "benevento", + "bergamo", + "bg", + "bi", + "biella", + "bl", + "blogspot", + "bn", + "bo", + "bologna", + "bolzano", + "bozen", + "br", + "brescia", + "brindisi", + "bs", + "bt", + "bz", + "ca", + "cagliari", + "cal", + "calabria", + "caltanissetta", + "cam", + "campania", + "campidano-medio", + "campidanomedio", + "campobasso", + "carbonia-iglesias", + "carboniaiglesias", + "carrara-massa", + "carraramassa", + "caserta", + "catania", + "catanzaro", + "cb", + "ce", + "cesena-forli", + "cesenaforli", + "ch", + "chieti", + "ci", + "cl", + "cn", + "co", + "como", + "cosenza", + "cr", + "cremona", + "crotone", + "cs", + "ct", + "cuneo", + "cz", + "dell-ogliastra", + "dellogliastra", + "edu", + "emilia-romagna", + "emiliaromagna", + "emr", + "en", + "enna", + "fc", + "fe", + "fermo", + "ferrara", + "fg", + "fi", + "firenze", + "florence", + "fm", + "foggia", + "forli-cesena", + "forlicesena", + "fr", + "friuli-v-giulia", + "friuli-ve-giulia", + "friuli-vegiulia", + "friuli-venezia-giulia", + "friuli-veneziagiulia", + "friuli-vgiulia", + "friuliv-giulia", + "friulive-giulia", + "friulivegiulia", + "friulivenezia-giulia", + "friuliveneziagiulia", + "friulivgiulia", + "frosinone", + "fvg", + "ge", + "genoa", + "genova", + "go", + "gorizia", + "gov", + "gr", + "grosseto", + "iglesias-carbonia", + "iglesiascarbonia", + "im", + "imperia", + "is", + "isernia", + "kr", + "la-spezia", + "laquila", + "laspezia", + "latina", + "laz", + "lazio", + "lc", + "le", + "lecce", + "lecco", + "li", + "lig", + "liguria", + "livorno", + "lo", + "lodi", + "lom", + "lombardia", + "lombardy", + "lt", + "lu", + "lucania", + "lucca", + "macerata", + "mantova", + "mar", + "marche", + "massa-carrara", + "massacarrara", + "matera", + "mb", + "mc", + "me", + "medio-campidano", + "mediocampidano", + "messina", + "mi", + "milan", + "milano", + "mn", + "mo", + "modena", + "mol", + "molise", + "monza", + "monza-brianza", + "monza-e-della-brianza", + "monzabrianza", + "monzaebrianza", + "monzaedellabrianza", + "ms", + "mt", + "na", + "naples", + "napoli", + "no", + "novara", + "nu", + "nuoro", + "og", + "ogliastra", + "olbia-tempio", + "olbiatempio", + "or", + "oristano", + "ot", + "pa", + "padova", + "padua", + "palermo", + "parma", + "pavia", + "pc", + "pd", + "pe", + "perugia", + "pesaro-urbino", + "pesarourbino", + "pescara", + "pg", + "pi", + "piacenza", + "piedmont", + "piemonte", + "pisa", + "pistoia", + "pmn", + "pn", + "po", + "pordenone", + "potenza", + "pr", + "prato", + "pt", + "pu", + "pug", + "puglia", + "pv", + "pz", + "ra", + "ragusa", + "ravenna", + "rc", + "re", + "reggio-calabria", + "reggio-emilia", + "reggiocalabria", + "reggioemilia", + "rg", + "ri", + "rieti", + "rimini", + "rm", + "rn", + "ro", + "roma", + "rome", + "rovigo", + "sa", + "salerno", + "sar", + "sardegna", + "sardinia", + "sassari", + "savona", + "si", + "sic", + "sicilia", + "sicily", + "siena", + "siracusa", + "so", + "sondrio", + "sp", + "sr", + "ss", + "suedtirol", + "sv", + "ta", + "taa", + "taranto", + "te", + "tempio-olbia", + "tempioolbia", + "teramo", + "terni", + "tn", + "to", + "torino", + "tos", + "toscana", + "tp", + "tr", + "trani-andria-barletta", + "trani-barletta-andria", + "traniandriabarletta", + "tranibarlettaandria", + "trapani", + "trentino", + "trentino-a-adige", + "trentino-aadige", + "trentino-alto-adige", + "trentino-altoadige", + "trentino-s-tirol", + "trentino-stirol", + "trentino-sud-tirol", + "trentino-sudtirol", + "trentino-sued-tirol", + "trentino-suedtirol", + "trentinoa-adige", + "trentinoaadige", + "trentinoalto-adige", + "trentinoaltoadige", + "trentinos-tirol", + "trentinostirol", + "trentinosud-tirol", + "trentinosudtirol", + "trentinosued-tirol", + "trentinosuedtirol", + "trento", + "treviso", + "trieste", + "ts", + "turin", + "tuscany", + "tv", + "ud", + "udine", + "umb", + "umbria", + "urbino-pesaro", + "urbinopesaro", + "va", + "val-d-aosta", + "val-daosta", + "vald-aosta", + "valdaosta", + "valle-aosta", + "valle-d-aosta", + "valle-daosta", + "valleaosta", + "valled-aosta", + "valledaosta", + "vallee-aoste", + "valleeaoste", + "vao", + "varese", + "vb", + "vc", + "vda", + "ve", + "ven", + "veneto", + "venezia", + "venice", + "verbania", + "vercelli", + "verona", + "vi", + "vibo-valentia", + "vibovalentia", + "vicenza", + "viterbo", + "vr", + "vs", + "vt", + "vv", + "co", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "org", + "sch", + "ac", + "ad", + "aichi", + "akita", + "aomori", + "blogspot", + "chiba", + "co", + "ed", + "ehime", + "fukui", + "fukuoka", + "fukushima", + "gifu", + "go", + "gr", + "gunma", + "hiroshima", + "hokkaido", + "hyogo", + "ibaraki", + "ishikawa", + "iwate", + "kagawa", + "kagoshima", + "kanagawa", + "kawasaki", + "kitakyushu", + "kobe", + "kochi", + "kumamoto", + "kyoto", + "lg", + "mie", + "miyagi", + "miyazaki", + "nagano", + "nagasaki", + "nagoya", + "nara", + "ne", + "niigata", + "oita", + "okayama", + "okinawa", + "or", + "osaka", + "saga", + "saitama", + "sapporo", + "sendai", + "shiga", + "shimane", + "shizuoka", + "tochigi", + "tokushima", + "tokyo", + "tottori", + "toyama", + "wakayama", + "xn--0trq7p7nn", + "xn--1ctwo", + "xn--1lqs03n", + "xn--1lqs71d", + "xn--2m4a15e", + "xn--32vp30h", + "xn--4it168d", + "xn--4it797k", + "xn--4pvxs", + "xn--5js045d", + "xn--5rtp49c", + "xn--5rtq34k", + "xn--6btw5a", + "xn--6orx2r", + "xn--7t0a264c", + "xn--8ltr62k", + "xn--8pvr4u", + "xn--c3s14m", + "xn--d5qv7z876c", + "xn--djrs72d6uy", + "xn--djty4k", + "xn--efvn9s", + "xn--ehqz56n", + "xn--elqq16h", + "xn--f6qx53a", + "xn--k7yn95e", + "xn--kbrq7o", + "xn--klt787d", + "xn--kltp7d", + "xn--kltx9a", + "xn--klty5x", + "xn--mkru45i", + "xn--nit225k", + "xn--ntso0iqx3a", + "xn--ntsq17g", + "xn--pssu33l", + "xn--qqqt11m", + "xn--rht27z", + "xn--rht3d", + "xn--rht61e", + "xn--rny31h", + "xn--tor131o", + "xn--uist22h", + "xn--uisz3g", + "xn--uuwu58a", + "xn--vgu402c", + "xn--zbx025d", + "yamagata", + "yamaguchi", + "yamanashi", + "yokohama", + "aisai", + "ama", + "anjo", + "asuke", + "chiryu", + "chita", + "fuso", + "gamagori", + "handa", + "hazu", + "hekinan", + "higashiura", + "ichinomiya", + "inazawa", + "inuyama", + "isshiki", + "iwakura", + "kanie", + "kariya", + "kasugai", + "kira", + "kiyosu", + "komaki", + "konan", + "kota", + "mihama", + "miyoshi", + "nishio", + "nisshin", + "obu", + "oguchi", + "oharu", + "okazaki", + "owariasahi", + "seto", + "shikatsu", + "shinshiro", + "shitara", + "tahara", + "takahama", + "tobishima", + "toei", + "togo", + "tokai", + "tokoname", + "toyoake", + "toyohashi", + "toyokawa", + "toyone", + "toyota", + "tsushima", + "yatomi", + "akita", + "daisen", + "fujisato", + "gojome", + "hachirogata", + "happou", + "higashinaruse", + "honjo", + "honjyo", + "ikawa", + "kamikoani", + "kamioka", + "katagami", + "kazuno", + "kitaakita", + "kosaka", + "kyowa", + "misato", + "mitane", + "moriyoshi", + "nikaho", + "noshiro", + "odate", + "oga", + "ogata", + "semboku", + "yokote", + "yurihonjo", + "aomori", + "gonohe", + "hachinohe", + "hashikami", + "hiranai", + "hirosaki", + "itayanagi", + "kuroishi", + "misawa", + "mutsu", + "nakadomari", + "noheji", + "oirase", + "owani", + "rokunohe", + "sannohe", + "shichinohe", + "shingo", + "takko", + "towada", + "tsugaru", + "tsuruta", + "abiko", + "asahi", + "chonan", + "chosei", + "choshi", + "chuo", + "funabashi", + "futtsu", + "hanamigawa", + "ichihara", + "ichikawa", + "ichinomiya", + "inzai", + "isumi", + "kamagaya", + "kamogawa", + "kashiwa", + "katori", + "katsuura", + "kimitsu", + "kisarazu", + "kozaki", + "kujukuri", + "kyonan", + "matsudo", + "midori", + "mihama", + "minamiboso", + "mobara", + "mutsuzawa", + "nagara", + "nagareyama", + "narashino", + "narita", + "noda", + "oamishirasato", + "omigawa", + "onjuku", + "otaki", + "sakae", + "sakura", + "shimofusa", + "shirako", + "shiroi", + "shisui", + "sodegaura", + "sosa", + "tako", + "tateyama", + "togane", + "tohnosho", + "tomisato", + "urayasu", + "yachimata", + "yachiyo", + "yokaichiba", + "yokoshibahikari", + "yotsukaido", + "ainan", + "honai", + "ikata", + "imabari", + "iyo", + "kamijima", + "kihoku", + "kumakogen", + "masaki", + "matsuno", + "matsuyama", + "namikata", + "niihama", + "ozu", + "saijo", + "seiyo", + "shikokuchuo", + "tobe", + "toon", + "uchiko", + "uwajima", + "yawatahama", + "echizen", + "eiheiji", + "fukui", + "ikeda", + "katsuyama", + "mihama", + "minamiechizen", + "obama", + "ohi", + "ono", + "sabae", + "sakai", + "takahama", + "tsuruga", + "wakasa", + "ashiya", + "buzen", + "chikugo", + "chikuho", + "chikujo", + "chikushino", + "chikuzen", + "chuo", + "dazaifu", + "fukuchi", + "hakata", + "higashi", + "hirokawa", + "hisayama", + "iizuka", + "inatsuki", + "kaho", + "kasuga", + "kasuya", + "kawara", + "keisen", + "koga", + "kurate", + "kurogi", + "kurume", + "minami", + "miyako", + "miyama", + "miyawaka", + "mizumaki", + "munakata", + "nakagawa", + "nakama", + "nishi", + "nogata", + "ogori", + "okagaki", + "okawa", + "oki", + "omuta", + "onga", + "onojo", + "oto", + "saigawa", + "sasaguri", + "shingu", + "shinyoshitomi", + "shonai", + "soeda", + "sue", + "tachiarai", + "tagawa", + "takata", + "toho", + "toyotsu", + "tsuiki", + "ukiha", + "umi", + "usui", + "yamada", + "yame", + "yanagawa", + "yukuhashi", + "aizubange", + "aizumisato", + "aizuwakamatsu", + "asakawa", + "bandai", + "date", + "fukushima", + "furudono", + "futaba", + "hanawa", + "higashi", + "hirata", + "hirono", + "iitate", + "inawashiro", + "ishikawa", + "iwaki", + "izumizaki", + "kagamiishi", + "kaneyama", + "kawamata", + "kitakata", + "kitashiobara", + "koori", + "koriyama", + "kunimi", + "miharu", + "mishima", + "namie", + "nango", + "nishiaizu", + "nishigo", + "okuma", + "omotego", + "ono", + "otama", + "samegawa", + "shimogo", + "shirakawa", + "showa", + "soma", + "sukagawa", + "taishin", + "tamakawa", + "tanagura", + "tenei", + "yabuki", + "yamato", + "yamatsuri", + "yanaizu", + "yugawa", + "anpachi", + "ena", + "gifu", + "ginan", + "godo", + "gujo", + "hashima", + "hichiso", + "hida", + "higashishirakawa", + "ibigawa", + "ikeda", + "kakamigahara", + "kani", + "kasahara", + "kasamatsu", + "kawaue", + "kitagata", + "mino", + "minokamo", + "mitake", + "mizunami", + "motosu", + "nakatsugawa", + "ogaki", + "sakahogi", + "seki", + "sekigahara", + "shirakawa", + "tajimi", + "takayama", + "tarui", + "toki", + "tomika", + "wanouchi", + "yamagata", + "yaotsu", + "yoro", + "annaka", + "chiyoda", + "fujioka", + "higashiagatsuma", + "isesaki", + "itakura", + "kanna", + "kanra", + "katashina", + "kawaba", + "kiryu", + "kusatsu", + "maebashi", + "meiwa", + "midori", + "minakami", + "naganohara", + "nakanojo", + "nanmoku", + "numata", + "oizumi", + "ora", + "ota", + "shibukawa", + "shimonita", + "shinto", + "showa", + "takasaki", + "takayama", + "tamamura", + "tatebayashi", + "tomioka", + "tsukiyono", + "tsumagoi", + "ueno", + "yoshioka", + "asaminami", + "daiwa", + "etajima", + "fuchu", + "fukuyama", + "hatsukaichi", + "higashihiroshima", + "hongo", + "jinsekikogen", + "kaita", + "kui", + "kumano", + "kure", + "mihara", + "miyoshi", + "naka", + "onomichi", + "osakikamijima", + "otake", + "saka", + "sera", + "seranishi", + "shinichi", + "shobara", + "takehara", + "abashiri", + "abira", + "aibetsu", + "akabira", + "akkeshi", + "asahikawa", + "ashibetsu", + "ashoro", + "assabu", + "atsuma", + "bibai", + "biei", + "bifuka", + "bihoro", + "biratori", + "chippubetsu", + "chitose", + "date", + "ebetsu", + "embetsu", + "eniwa", + "erimo", + "esan", + "esashi", + "fukagawa", + "fukushima", + "furano", + "furubira", + "haboro", + "hakodate", + "hamatonbetsu", + "hidaka", + "higashikagura", + "higashikawa", + "hiroo", + "hokuryu", + "hokuto", + "honbetsu", + "horokanai", + "horonobe", + "ikeda", + "imakane", + "ishikari", + "iwamizawa", + "iwanai", + "kamifurano", + "kamikawa", + "kamishihoro", + "kamisunagawa", + "kamoenai", + "kayabe", + "kembuchi", + "kikonai", + "kimobetsu", + "kitahiroshima", + "kitami", + "kiyosato", + "koshimizu", + "kunneppu", + "kuriyama", + "kuromatsunai", + "kushiro", + "kutchan", + "kyowa", + "mashike", + "matsumae", + "mikasa", + "minamifurano", + "mombetsu", + "moseushi", + "mukawa", + "muroran", + "naie", + "nakagawa", + "nakasatsunai", + "nakatombetsu", + "nanae", + "nanporo", + "nayoro", + "nemuro", + "niikappu", + "niki", + "nishiokoppe", + "noboribetsu", + "numata", + "obihiro", + "obira", + "oketo", + "okoppe", + "otaru", + "otobe", + "otofuke", + "otoineppu", + "oumu", + "ozora", + "pippu", + "rankoshi", + "rebun", + "rikubetsu", + "rishiri", + "rishirifuji", + "saroma", + "sarufutsu", + "shakotan", + "shari", + "shibecha", + "shibetsu", + "shikabe", + "shikaoi", + "shimamaki", + "shimizu", + "shimokawa", + "shinshinotsu", + "shintoku", + "shiranuka", + "shiraoi", + "shiriuchi", + "sobetsu", + "sunagawa", + "taiki", + "takasu", + "takikawa", + "takinoue", + "teshikaga", + "tobetsu", + "tohma", + "tomakomai", + "tomari", + "toya", + "toyako", + "toyotomi", + "toyoura", + "tsubetsu", + "tsukigata", + "urakawa", + "urausu", + "uryu", + "utashinai", + "wakkanai", + "wassamu", + "yakumo", + "yoichi", + "aioi", + "akashi", + "ako", + "amagasaki", + "aogaki", + "asago", + "ashiya", + "awaji", + "fukusaki", + "goshiki", + "harima", + "himeji", + "ichikawa", + "inagawa", + "itami", + "kakogawa", + "kamigori", + "kamikawa", + "kasai", + "kasuga", + "kawanishi", + "miki", + "minamiawaji", + "nishinomiya", + "nishiwaki", + "ono", + "sanda", + "sannan", + "sasayama", + "sayo", + "shingu", + "shinonsen", + "shiso", + "sumoto", + "taishi", + "taka", + "takarazuka", + "takasago", + "takino", + "tamba", + "tatsuno", + "toyooka", + "yabu", + "yashiro", + "yoka", + "yokawa", + "ami", + "asahi", + "bando", + "chikusei", + "daigo", + "fujishiro", + "hitachi", + "hitachinaka", + "hitachiomiya", + "hitachiota", + "ibaraki", + "ina", + "inashiki", + "itako", + "iwama", + "joso", + "kamisu", + "kasama", + "kashima", + "kasumigaura", + "koga", + "miho", + "mito", + "moriya", + "naka", + "namegata", + "oarai", + "ogawa", + "omitama", + "ryugasaki", + "sakai", + "sakuragawa", + "shimodate", + "shimotsuma", + "shirosato", + "sowa", + "suifu", + "takahagi", + "tamatsukuri", + "tokai", + "tomobe", + "tone", + "toride", + "tsuchiura", + "tsukuba", + "uchihara", + "ushiku", + "yachiyo", + "yamagata", + "yawara", + "yuki", + "anamizu", + "hakui", + "hakusan", + "kaga", + "kahoku", + "kanazawa", + "kawakita", + "komatsu", + "nakanoto", + "nanao", + "nomi", + "nonoichi", + "noto", + "shika", + "suzu", + "tsubata", + "tsurugi", + "uchinada", + "wajima", + "fudai", + "fujisawa", + "hanamaki", + "hiraizumi", + "hirono", + "ichinohe", + "ichinoseki", + "iwaizumi", + "iwate", + "joboji", + "kamaishi", + "kanegasaki", + "karumai", + "kawai", + "kitakami", + "kuji", + "kunohe", + "kuzumaki", + "miyako", + "mizusawa", + "morioka", + "ninohe", + "noda", + "ofunato", + "oshu", + "otsuchi", + "rikuzentakata", + "shiwa", + "shizukuishi", + "sumita", + "tanohata", + "tono", + "yahaba", + "yamada", + "ayagawa", + "higashikagawa", + "kanonji", + "kotohira", + "manno", + "marugame", + "mitoyo", + "naoshima", + "sanuki", + "tadotsu", + "takamatsu", + "tonosho", + "uchinomi", + "utazu", + "zentsuji", + "akune", + "amami", + "hioki", + "isa", + "isen", + "izumi", + "kagoshima", + "kanoya", + "kawanabe", + "kinko", + "kouyama", + "makurazaki", + "matsumoto", + "minamitane", + "nakatane", + "nishinoomote", + "satsumasendai", + "soo", + "tarumizu", + "yusui", + "aikawa", + "atsugi", + "ayase", + "chigasaki", + "ebina", + "fujisawa", + "hadano", + "hakone", + "hiratsuka", + "isehara", + "kaisei", + "kamakura", + "kiyokawa", + "matsuda", + "minamiashigara", + "miura", + "nakai", + "ninomiya", + "odawara", + "oi", + "oiso", + "sagamihara", + "samukawa", + "tsukui", + "yamakita", + "yamato", + "yokosuka", + "yugawara", + "zama", + "zushi", + "city", + "city", + "city", + "aki", + "geisei", + "hidaka", + "higashitsuno", + "ino", + "kagami", + "kami", + "kitagawa", + "kochi", + "mihara", + "motoyama", + "muroto", + "nahari", + "nakamura", + "nankoku", + "nishitosa", + "niyodogawa", + "ochi", + "okawa", + "otoyo", + "otsuki", + "sakawa", + "sukumo", + "susaki", + "tosa", + "tosashimizu", + "toyo", + "tsuno", + "umaji", + "yasuda", + "yusuhara", + "amakusa", + "arao", + "aso", + "choyo", + "gyokuto", + "hitoyoshi", + "kamiamakusa", + "kashima", + "kikuchi", + "kosa", + "kumamoto", + "mashiki", + "mifune", + "minamata", + "minamioguni", + "nagasu", + "nishihara", + "oguni", + "ozu", + "sumoto", + "takamori", + "uki", + "uto", + "yamaga", + "yamato", + "yatsushiro", + "ayabe", + "fukuchiyama", + "higashiyama", + "ide", + "ine", + "joyo", + "kameoka", + "kamo", + "kita", + "kizu", + "kumiyama", + "kyotamba", + "kyotanabe", + "kyotango", + "maizuru", + "minami", + "minamiyamashiro", + "miyazu", + "muko", + "nagaokakyo", + "nakagyo", + "nantan", + "oyamazaki", + "sakyo", + "seika", + "tanabe", + "uji", + "ujitawara", + "wazuka", + "yamashina", + "yawata", + "asahi", + "inabe", + "ise", + "kameyama", + "kawagoe", + "kiho", + "kisosaki", + "kiwa", + "komono", + "kumano", + "kuwana", + "matsusaka", + "meiwa", + "mihama", + "minamiise", + "misugi", + "miyama", + "nabari", + "shima", + "suzuka", + "tado", + "taiki", + "taki", + "tamaki", + "toba", + "tsu", + "udono", + "ureshino", + "watarai", + "yokkaichi", + "furukawa", + "higashimatsushima", + "ishinomaki", + "iwanuma", + "kakuda", + "kami", + "kawasaki", + "kesennuma", + "marumori", + "matsushima", + "minamisanriku", + "misato", + "murata", + "natori", + "ogawara", + "ohira", + "onagawa", + "osaki", + "rifu", + "semine", + "shibata", + "shichikashuku", + "shikama", + "shiogama", + "shiroishi", + "tagajo", + "taiwa", + "tome", + "tomiya", + "wakuya", + "watari", + "yamamoto", + "zao", + "aya", + "ebino", + "gokase", + "hyuga", + "kadogawa", + "kawaminami", + "kijo", + "kitagawa", + "kitakata", + "kitaura", + "kobayashi", + "kunitomi", + "kushima", + "mimata", + "miyakonojo", + "miyazaki", + "morotsuka", + "nichinan", + "nishimera", + "nobeoka", + "saito", + "shiiba", + "shintomi", + "takaharu", + "takanabe", + "takazaki", + "tsuno", + "achi", + "agematsu", + "anan", + "aoki", + "asahi", + "azumino", + "chikuhoku", + "chikuma", + "chino", + "fujimi", + "hakuba", + "hara", + "hiraya", + "iida", + "iijima", + "iiyama", + "iizuna", + "ikeda", + "ikusaka", + "ina", + "karuizawa", + "kawakami", + "kiso", + "kisofukushima", + "kitaaiki", + "komagane", + "komoro", + "matsukawa", + "matsumoto", + "miasa", + "minamiaiki", + "minamimaki", + "minamiminowa", + "minowa", + "miyada", + "miyota", + "mochizuki", + "nagano", + "nagawa", + "nagiso", + "nakagawa", + "nakano", + "nozawaonsen", + "obuse", + "ogawa", + "okaya", + "omachi", + "omi", + "ookuwa", + "ooshika", + "otaki", + "otari", + "sakae", + "sakaki", + "saku", + "sakuho", + "shimosuwa", + "shinanomachi", + "shiojiri", + "suwa", + "suzaka", + "takagi", + "takamori", + "takayama", + "tateshina", + "tatsuno", + "togakushi", + "togura", + "tomi", + "ueda", + "wada", + "yamagata", + "yamanouchi", + "yasaka", + "yasuoka", + "chijiwa", + "futsu", + "goto", + "hasami", + "hirado", + "iki", + "isahaya", + "kawatana", + "kuchinotsu", + "matsuura", + "nagasaki", + "obama", + "omura", + "oseto", + "saikai", + "sasebo", + "seihi", + "shimabara", + "shinkamigoto", + "togitsu", + "tsushima", + "unzen", + "city", + "ando", + "gose", + "heguri", + "higashiyoshino", + "ikaruga", + "ikoma", + "kamikitayama", + "kanmaki", + "kashiba", + "kashihara", + "katsuragi", + "kawai", + "kawakami", + "kawanishi", + "koryo", + "kurotaki", + "mitsue", + "miyake", + "nara", + "nosegawa", + "oji", + "ouda", + "oyodo", + "sakurai", + "sango", + "shimoichi", + "shimokitayama", + "shinjo", + "soni", + "takatori", + "tawaramoto", + "tenkawa", + "tenri", + "uda", + "yamatokoriyama", + "yamatotakada", + "yamazoe", + "yoshino", + "aga", + "agano", + "gosen", + "itoigawa", + "izumozaki", + "joetsu", + "kamo", + "kariwa", + "kashiwazaki", + "minamiuonuma", + "mitsuke", + "muika", + "murakami", + "myoko", + "nagaoka", + "niigata", + "ojiya", + "omi", + "sado", + "sanjo", + "seiro", + "seirou", + "sekikawa", + "shibata", + "tagami", + "tainai", + "tochio", + "tokamachi", + "tsubame", + "tsunan", + "uonuma", + "yahiko", + "yoita", + "yuzawa", + "beppu", + "bungoono", + "bungotakada", + "hasama", + "hiji", + "himeshima", + "hita", + "kamitsue", + "kokonoe", + "kuju", + "kunisaki", + "kusu", + "oita", + "saiki", + "taketa", + "tsukumi", + "usa", + "usuki", + "yufu", + "akaiwa", + "asakuchi", + "bizen", + "hayashima", + "ibara", + "kagamino", + "kasaoka", + "kibichuo", + "kumenan", + "kurashiki", + "maniwa", + "misaki", + "nagi", + "niimi", + "nishiawakura", + "okayama", + "satosho", + "setouchi", + "shinjo", + "shoo", + "soja", + "takahashi", + "tamano", + "tsuyama", + "wake", + "yakage", + "aguni", + "ginowan", + "ginoza", + "gushikami", + "haebaru", + "higashi", + "hirara", + "iheya", + "ishigaki", + "ishikawa", + "itoman", + "izena", + "kadena", + "kin", + "kitadaito", + "kitanakagusuku", + "kumejima", + "kunigami", + "minamidaito", + "motobu", + "nago", + "naha", + "nakagusuku", + "nakijin", + "nanjo", + "nishihara", + "ogimi", + "okinawa", + "onna", + "shimoji", + "taketomi", + "tarama", + "tokashiki", + "tomigusuku", + "tonaki", + "urasoe", + "uruma", + "yaese", + "yomitan", + "yonabaru", + "yonaguni", + "zamami", + "abeno", + "chihayaakasaka", + "chuo", + "daito", + "fujiidera", + "habikino", + "hannan", + "higashiosaka", + "higashisumiyoshi", + "higashiyodogawa", + "hirakata", + "ibaraki", + "ikeda", + "izumi", + "izumiotsu", + "izumisano", + "kadoma", + "kaizuka", + "kanan", + "kashiwara", + "katano", + "kawachinagano", + "kishiwada", + "kita", + "kumatori", + "matsubara", + "minato", + "minoh", + "misaki", + "moriguchi", + "neyagawa", + "nishi", + "nose", + "osakasayama", + "sakai", + "sayama", + "sennan", + "settsu", + "shijonawate", + "shimamoto", + "suita", + "tadaoka", + "taishi", + "tajiri", + "takaishi", + "takatsuki", + "tondabayashi", + "toyonaka", + "toyono", + "yao", + "ariake", + "arita", + "fukudomi", + "genkai", + "hamatama", + "hizen", + "imari", + "kamimine", + "kanzaki", + "karatsu", + "kashima", + "kitagata", + "kitahata", + "kiyama", + "kouhoku", + "kyuragi", + "nishiarita", + "ogi", + "omachi", + "ouchi", + "saga", + "shiroishi", + "taku", + "tara", + "tosu", + "yoshinogari", + "arakawa", + "asaka", + "chichibu", + "fujimi", + "fujimino", + "fukaya", + "hanno", + "hanyu", + "hasuda", + "hatogaya", + "hatoyama", + "hidaka", + "higashichichibu", + "higashimatsuyama", + "honjo", + "ina", + "iruma", + "iwatsuki", + "kamiizumi", + "kamikawa", + "kamisato", + "kasukabe", + "kawagoe", + "kawaguchi", + "kawajima", + "kazo", + "kitamoto", + "koshigaya", + "kounosu", + "kuki", + "kumagaya", + "matsubushi", + "minano", + "misato", + "miyashiro", + "miyoshi", + "moroyama", + "nagatoro", + "namegawa", + "niiza", + "ogano", + "ogawa", + "ogose", + "okegawa", + "omiya", + "otaki", + "ranzan", + "ryokami", + "saitama", + "sakado", + "satte", + "sayama", + "shiki", + "shiraoka", + "soka", + "sugito", + "toda", + "tokigawa", + "tokorozawa", + "tsurugashima", + "urawa", + "warabi", + "yashio", + "yokoze", + "yono", + "yorii", + "yoshida", + "yoshikawa", + "yoshimi", + "city", + "city", + "aisho", + "gamo", + "higashiomi", + "hikone", + "koka", + "konan", + "kosei", + "koto", + "kusatsu", + "maibara", + "moriyama", + "nagahama", + "nishiazai", + "notogawa", + "omihachiman", + "otsu", + "ritto", + "ryuoh", + "takashima", + "takatsuki", + "torahime", + "toyosato", + "yasu", + "akagi", + "ama", + "gotsu", + "hamada", + "higashiizumo", + "hikawa", + "hikimi", + "izumo", + "kakinoki", + "masuda", + "matsue", + "misato", + "nishinoshima", + "ohda", + "okinoshima", + "okuizumo", + "shimane", + "tamayu", + "tsuwano", + "unnan", + "yakumo", + "yasugi", + "yatsuka", + "arai", + "atami", + "fuji", + "fujieda", + "fujikawa", + "fujinomiya", + "fukuroi", + "gotemba", + "haibara", + "hamamatsu", + "higashiizu", + "ito", + "iwata", + "izu", + "izunokuni", + "kakegawa", + "kannami", + "kawanehon", + "kawazu", + "kikugawa", + "kosai", + "makinohara", + "matsuzaki", + "minamiizu", + "mishima", + "morimachi", + "nishiizu", + "numazu", + "omaezaki", + "shimada", + "shimizu", + "shimoda", + "shizuoka", + "susono", + "yaizu", + "yoshida", + "ashikaga", + "bato", + "haga", + "ichikai", + "iwafune", + "kaminokawa", + "kanuma", + "karasuyama", + "kuroiso", + "mashiko", + "mibu", + "moka", + "motegi", + "nasu", + "nasushiobara", + "nikko", + "nishikata", + "nogi", + "ohira", + "ohtawara", + "oyama", + "sakura", + "sano", + "shimotsuke", + "shioya", + "takanezawa", + "tochigi", + "tsuga", + "ujiie", + "utsunomiya", + "yaita", + "aizumi", + "anan", + "ichiba", + "itano", + "kainan", + "komatsushima", + "matsushige", + "mima", + "minami", + "miyoshi", + "mugi", + "nakagawa", + "naruto", + "sanagochi", + "shishikui", + "tokushima", + "wajiki", + "adachi", + "akiruno", + "akishima", + "aogashima", + "arakawa", + "bunkyo", + "chiyoda", + "chofu", + "chuo", + "edogawa", + "fuchu", + "fussa", + "hachijo", + "hachioji", + "hamura", + "higashikurume", + "higashimurayama", + "higashiyamato", + "hino", + "hinode", + "hinohara", + "inagi", + "itabashi", + "katsushika", + "kita", + "kiyose", + "kodaira", + "koganei", + "kokubunji", + "komae", + "koto", + "kouzushima", + "kunitachi", + "machida", + "meguro", + "minato", + "mitaka", + "mizuho", + "musashimurayama", + "musashino", + "nakano", + "nerima", + "ogasawara", + "okutama", + "ome", + "oshima", + "ota", + "setagaya", + "shibuya", + "shinagawa", + "shinjuku", + "suginami", + "sumida", + "tachikawa", + "taito", + "tama", + "toshima", + "chizu", + "hino", + "kawahara", + "koge", + "kotoura", + "misasa", + "nanbu", + "nichinan", + "sakaiminato", + "tottori", + "wakasa", + "yazu", + "yonago", + "asahi", + "fuchu", + "fukumitsu", + "funahashi", + "himi", + "imizu", + "inami", + "johana", + "kamiichi", + "kurobe", + "nakaniikawa", + "namerikawa", + "nanto", + "nyuzen", + "oyabe", + "taira", + "takaoka", + "tateyama", + "toga", + "tonami", + "toyama", + "unazuki", + "uozu", + "yamada", + "arida", + "aridagawa", + "gobo", + "hashimoto", + "hidaka", + "hirogawa", + "inami", + "iwade", + "kainan", + "kamitonda", + "katsuragi", + "kimino", + "kinokawa", + "kitayama", + "koya", + "koza", + "kozagawa", + "kudoyama", + "kushimoto", + "mihama", + "misato", + "nachikatsuura", + "shingu", + "shirahama", + "taiji", + "tanabe", + "wakayama", + "yuasa", + "yura", + "asahi", + "funagata", + "higashine", + "iide", + "kahoku", + "kaminoyama", + "kaneyama", + "kawanishi", + "mamurogawa", + "mikawa", + "murayama", + "nagai", + "nakayama", + "nanyo", + "nishikawa", + "obanazawa", + "oe", + "oguni", + "ohkura", + "oishida", + "sagae", + "sakata", + "sakegawa", + "shinjo", + "shirataka", + "shonai", + "takahata", + "tendo", + "tozawa", + "tsuruoka", + "yamagata", + "yamanobe", + "yonezawa", + "yuza", + "abu", + "hagi", + "hikari", + "hofu", + "iwakuni", + "kudamatsu", + "mitou", + "nagato", + "oshima", + "shimonoseki", + "shunan", + "tabuse", + "tokuyama", + "toyota", + "ube", + "yuu", + "chuo", + "doshi", + "fuefuki", + "fujikawa", + "fujikawaguchiko", + "fujiyoshida", + "hayakawa", + "hokuto", + "ichikawamisato", + "kai", + "kofu", + "koshu", + "kosuge", + "minami-alps", + "minobu", + "nakamichi", + "nanbu", + "narusawa", + "nirasaki", + "nishikatsura", + "oshino", + "otsuki", + "showa", + "tabayama", + "tsuru", + "uenohara", + "yamanakako", + "yamanashi", + "city", + "co", + "blogspot", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "biz", + "com", + "edu", + "gov", + "info", + "net", + "org", + "ass", + "asso", + "com", + "coop", + "edu", + "gouv", + "gov", + "medecin", + "mil", + "nom", + "notaires", + "org", + "pharmaciens", + "prd", + "presse", + "tm", + "veterinaire", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "org", + "rep", + "tra", + "ac", + "blogspot", + "busan", + "chungbuk", + "chungnam", + "co", + "daegu", + "daejeon", + "es", + "gangwon", + "go", + "gwangju", + "gyeongbuk", + "gyeonggi", + "gyeongnam", + "hs", + "incheon", + "jeju", + "jeonbuk", + "jeonnam", + "kg", + "mil", + "ms", + "ne", + "or", + "pe", + "re", + "sc", + "seoul", + "ulsan", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "c", + "com", + "edu", + "gov", + "info", + "int", + "net", + "org", + "per", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "edu", + "gov", + "net", + "org", + "blogspot", + "ac", + "assn", + "com", + "edu", + "gov", + "grp", + "hotel", + "int", + "ltd", + "net", + "ngo", + "org", + "sch", + "soc", + "web", + "com", + "edu", + "gov", + "net", + "org", + "co", + "org", + "blogspot", + "gov", + "blogspot", + "asn", + "com", + "conf", + "edu", + "gov", + "id", + "mil", + "net", + "org", + "com", + "edu", + "gov", + "id", + "med", + "net", + "org", + "plc", + "sch", + "ac", + "co", + "gov", + "net", + "org", + "press", + "asso", + "tm", + "blogspot", + "ac", + "co", + "diskstation", + "dscloud", + "edu", + "gov", + "i234", + "its", + "myds", + "net", + "org", + "priv", + "synology", + "co", + "com", + "edu", + "gov", + "mil", + "nom", + "org", + "prd", + "tm", + "blogspot", + "com", + "edu", + "gov", + "inf", + "name", + "net", + "org", + "com", + "edu", + "gouv", + "gov", + "net", + "org", + "presse", + "edu", + "gov", + "nyc", + "org", + "com", + "edu", + "gov", + "net", + "org", + "dscloud", + "blogspot", + "gov", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "net", + "org", + "blogspot", + "ac", + "co", + "com", + "gov", + "net", + "or", + "org", + "academy", + "agriculture", + "air", + "airguard", + "alabama", + "alaska", + "amber", + "ambulance", + "american", + "americana", + "americanantiques", + "americanart", + "amsterdam", + "and", + "annefrank", + "anthro", + "anthropology", + "antiques", + "aquarium", + "arboretum", + "archaeological", + "archaeology", + "architecture", + "art", + "artanddesign", + "artcenter", + "artdeco", + "arteducation", + "artgallery", + "arts", + "artsandcrafts", + "asmatart", + "assassination", + "assisi", + "association", + "astronomy", + "atlanta", + "austin", + "australia", + "automotive", + "aviation", + "axis", + "badajoz", + "baghdad", + "bahn", + "bale", + "baltimore", + "barcelona", + "baseball", + "basel", + "baths", + "bauern", + "beauxarts", + "beeldengeluid", + "bellevue", + "bergbau", + "berkeley", + "berlin", + "bern", + "bible", + "bilbao", + "bill", + "birdart", + "birthplace", + "bonn", + "boston", + "botanical", + "botanicalgarden", + "botanicgarden", + "botany", + "brandywinevalley", + "brasil", + "bristol", + "british", + "britishcolumbia", + "broadcast", + "brunel", + "brussel", + "brussels", + "bruxelles", + "building", + "burghof", + "bus", + "bushey", + "cadaques", + "california", + "cambridge", + "can", + "canada", + "capebreton", + "carrier", + "cartoonart", + "casadelamoneda", + "castle", + "castres", + "celtic", + "center", + "chattanooga", + "cheltenham", + "chesapeakebay", + "chicago", + "children", + "childrens", + "childrensgarden", + "chiropractic", + "chocolate", + "christiansburg", + "cincinnati", + "cinema", + "circus", + "civilisation", + "civilization", + "civilwar", + "clinton", + "clock", + "coal", + "coastaldefence", + "cody", + "coldwar", + "collection", + "colonialwilliamsburg", + "coloradoplateau", + "columbia", + "columbus", + "communication", + "communications", + "community", + "computer", + "computerhistory", + "contemporary", + "contemporaryart", + "convent", + "copenhagen", + "corporation", + "corvette", + "costume", + "countryestate", + "county", + "crafts", + "cranbrook", + "creation", + "cultural", + "culturalcenter", + "culture", + "cyber", + "cymru", + "dali", + "dallas", + "database", + "ddr", + "decorativearts", + "delaware", + "delmenhorst", + "denmark", + "depot", + "design", + "detroit", + "dinosaur", + "discovery", + "dolls", + "donostia", + "durham", + "eastafrica", + "eastcoast", + "education", + "educational", + "egyptian", + "eisenbahn", + "elburg", + "elvendrell", + "embroidery", + "encyclopedic", + "england", + "entomology", + "environment", + "environmentalconservation", + "epilepsy", + "essex", + "estate", + "ethnology", + "exeter", + "exhibition", + "family", + "farm", + "farmequipment", + "farmers", + "farmstead", + "field", + "figueres", + "filatelia", + "film", + "fineart", + "finearts", + "finland", + "flanders", + "florida", + "force", + "fortmissoula", + "fortworth", + "foundation", + "francaise", + "frankfurt", + "franziskaner", + "freemasonry", + "freiburg", + "fribourg", + "frog", + "fundacio", + "furniture", + "gallery", + "garden", + "gateway", + "geelvinck", + "gemological", + "geology", + "georgia", + "giessen", + "glas", + "glass", + "gorge", + "grandrapids", + "graz", + "guernsey", + "halloffame", + "hamburg", + "handson", + "harvestcelebration", + "hawaii", + "health", + "heimatunduhren", + "hellas", + "helsinki", + "hembygdsforbund", + "heritage", + "histoire", + "historical", + "historicalsociety", + "historichouses", + "historisch", + "historisches", + "history", + "historyofscience", + "horology", + "house", + "humanities", + "illustration", + "imageandsound", + "indian", + "indiana", + "indianapolis", + "indianmarket", + "intelligence", + "interactive", + "iraq", + "iron", + "isleofman", + "jamison", + "jefferson", + "jerusalem", + "jewelry", + "jewish", + "jewishart", + "jfk", + "journalism", + "judaica", + "judygarland", + "juedisches", + "juif", + "karate", + "karikatur", + "kids", + "koebenhavn", + "koeln", + "kunst", + "kunstsammlung", + "kunstunddesign", + "labor", + "labour", + "lajolla", + "lancashire", + "landes", + "lans", + "larsson", + "lewismiller", + "lincoln", + "linz", + "living", + "livinghistory", + "localhistory", + "london", + "losangeles", + "louvre", + "loyalist", + "lucerne", + "luxembourg", + "luzern", + "mad", + "madrid", + "mallorca", + "manchester", + "mansion", + "mansions", + "manx", + "marburg", + "maritime", + "maritimo", + "maryland", + "marylhurst", + "media", + "medical", + "medizinhistorisches", + "meeres", + "memorial", + "mesaverde", + "michigan", + "midatlantic", + "military", + "mill", + "miners", + "mining", + "minnesota", + "missile", + "missoula", + "modern", + "moma", + "money", + "monmouth", + "monticello", + "montreal", + "moscow", + "motorcycle", + "muenchen", + "muenster", + "mulhouse", + "muncie", + "museet", + "museumcenter", + "museumvereniging", + "music", + "national", + "nationalfirearms", + "nationalheritage", + "nativeamerican", + "naturalhistory", + "naturalhistorymuseum", + "naturalsciences", + "nature", + "naturhistorisches", + "natuurwetenschappen", + "naumburg", + "naval", + "nebraska", + "neues", + "newhampshire", + "newjersey", + "newmexico", + "newport", + "newspaper", + "newyork", + "niepce", + "norfolk", + "north", + "nrw", + "nuernberg", + "nuremberg", + "nyc", + "nyny", + "oceanographic", + "oceanographique", + "omaha", + "online", + "ontario", + "openair", + "oregon", + "oregontrail", + "otago", + "oxford", + "pacific", + "paderborn", + "palace", + "paleo", + "palmsprings", + "panama", + "paris", + "pasadena", + "pharmacy", + "philadelphia", + "philadelphiaarea", + "philately", + "phoenix", + "photography", + "pilots", + "pittsburgh", + "planetarium", + "plantation", + "plants", + "plaza", + "portal", + "portland", + "portlligat", + "posts-and-telecommunications", + "preservation", + "presidio", + "press", + "project", + "public", + "pubol", + "quebec", + "railroad", + "railway", + "research", + "resistance", + "riodejaneiro", + "rochester", + "rockart", + "roma", + "russia", + "saintlouis", + "salem", + "salvadordali", + "salzburg", + "sandiego", + "sanfrancisco", + "santabarbara", + "santacruz", + "santafe", + "saskatchewan", + "satx", + "savannahga", + "schlesisches", + "schoenbrunn", + "schokoladen", + "school", + "schweiz", + "science", + "science-fiction", + "scienceandhistory", + "scienceandindustry", + "sciencecenter", + "sciencecenters", + "sciencehistory", + "sciences", + "sciencesnaturelles", + "scotland", + "seaport", + "settlement", + "settlers", + "shell", + "sherbrooke", + "sibenik", + "silk", + "ski", + "skole", + "society", + "sologne", + "soundandvision", + "southcarolina", + "southwest", + "space", + "spy", + "square", + "stadt", + "stalbans", + "starnberg", + "state", + "stateofdelaware", + "station", + "steam", + "steiermark", + "stjohn", + "stockholm", + "stpetersburg", + "stuttgart", + "suisse", + "surgeonshall", + "surrey", + "svizzera", + "sweden", + "sydney", + "tank", + "tcm", + "technology", + "telekommunikation", + "television", + "texas", + "textile", + "theater", + "time", + "timekeeping", + "topology", + "torino", + "touch", + "town", + "transport", + "tree", + "trolley", + "trust", + "trustee", + "uhren", + "ulm", + "undersea", + "university", + "usa", + "usantiques", + "usarts", + "uscountryestate", + "usculture", + "usdecorativearts", + "usgarden", + "ushistory", + "ushuaia", + "uslivinghistory", + "utah", + "uvic", + "valley", + "vantaa", + "versailles", + "viking", + "village", + "virginia", + "virtual", + "virtuel", + "vlaanderen", + "volkenkunde", + "wales", + "wallonie", + "war", + "washingtondc", + "watch-and-clock", + "watchandclock", + "western", + "westfalen", + "whaling", + "wildlife", + "williamsburg", + "windmill", + "workshop", + "xn--9dbhblg6di", + "xn--comunicaes-v6a2o", + "xn--correios-e-telecomunicaes-ghc29a", + "xn--h1aegh", + "xn--lns-qla", + "york", + "yorkshire", + "yosemite", + "youth", + "zoological", + "zoology", + "aero", + "biz", + "com", + "coop", + "edu", + "gov", + "info", + "int", + "mil", + "museum", + "name", + "net", + "org", + "pro", + "ac", + "biz", + "co", + "com", + "coop", + "edu", + "gov", + "int", + "museum", + "net", + "org", + "blogspot", + "com", + "edu", + "gob", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "org", + "teledata", + "ca", + "cc", + "co", + "com", + "dr", + "in", + "info", + "mobi", + "mx", + "name", + "or", + "org", + "pro", + "school", + "tv", + "us", + "ws", + "her", + "his", + "forgot", + "forgot", + "asso", + "at-band-camp", + "azure-mobile", + "azurewebsites", + "blogdns", + "broke-it", + "buyshouses", + "cdn77", + "cdn77-ssl", + "cloudapp", + "cloudfront", + "cloudfunctions", + "dnsalias", + "dnsdojo", + "does-it", + "dontexist", + "dsmynas", + "dynalias", + "dynathome", + "dynv6", + "endofinternet", + "familyds", + "fastly", + "from-az", + "from-co", + "from-la", + "from-ny", + "gb", + "gets-it", + "ham-radio-op", + "homeftp", + "homeip", + "homelinux", + "homeunix", + "hu", + "in", + "in-the-band", + "is-a-chef", + "is-a-geek", + "isa-geek", + "jp", + "kicks-ass", + "office-on-the", + "podzone", + "rackmaze", + "scrapper-site", + "se", + "selfip", + "sells-it", + "servebbs", + "serveftp", + "thruhere", + "uk", + "webhop", + "za", + "r", + "prod", + "ssl", + "a", + "global", + "a", + "b", + "global", + "arts", + "com", + "firm", + "info", + "net", + "other", + "per", + "rec", + "store", + "web", + "com", + "edu", + "gov", + "i", + "mil", + "mobi", + "name", + "net", + "org", + "sch", + "blogspot", + "ac", + "biz", + "co", + "com", + "edu", + "gob", + "in", + "info", + "int", + "mil", + "net", + "nom", + "org", + "web", + "blogspot", + "bv", + "co", + "aa", + "aarborte", + "aejrie", + "afjord", + "agdenes", + "ah", + "akershus", + "aknoluokta", + "akrehamn", + "al", + "alaheadju", + "alesund", + "algard", + "alstahaug", + "alta", + "alvdal", + "amli", + "amot", + "andasuolo", + "andebu", + "andoy", + "ardal", + "aremark", + "arendal", + "arna", + "aseral", + "asker", + "askim", + "askoy", + "askvoll", + "asnes", + "audnedaln", + "aukra", + "aure", + "aurland", + "aurskog-holand", + "austevoll", + "austrheim", + "averoy", + "badaddja", + "bahcavuotna", + "bahccavuotna", + "baidar", + "bajddar", + "balat", + "balestrand", + "ballangen", + "balsfjord", + "bamble", + "bardu", + "barum", + "batsfjord", + "bearalvahki", + "beardu", + "beiarn", + "berg", + "bergen", + "berlevag", + "bievat", + "bindal", + "birkenes", + "bjarkoy", + "bjerkreim", + "bjugn", + "blogspot", + "bodo", + "bokn", + "bomlo", + "bremanger", + "bronnoy", + "bronnoysund", + "brumunddal", + "bryne", + "bu", + "budejju", + "buskerud", + "bygland", + "bykle", + "cahcesuolo", + "co", + "davvenjarga", + "davvesiida", + "deatnu", + "dep", + "dielddanuorri", + "divtasvuodna", + "divttasvuotna", + "donna", + "dovre", + "drammen", + "drangedal", + "drobak", + "dyroy", + "egersund", + "eid", + "eidfjord", + "eidsberg", + "eidskog", + "eidsvoll", + "eigersund", + "elverum", + "enebakk", + "engerdal", + "etne", + "etnedal", + "evenassi", + "evenes", + "evje-og-hornnes", + "farsund", + "fauske", + "fedje", + "fet", + "fetsund", + "fhs", + "finnoy", + "fitjar", + "fjaler", + "fjell", + "fla", + "flakstad", + "flatanger", + "flekkefjord", + "flesberg", + "flora", + "floro", + "fm", + "folkebibl", + "folldal", + "forde", + "forsand", + "fosnes", + "frana", + "fredrikstad", + "frei", + "frogn", + "froland", + "frosta", + "froya", + "fuoisku", + "fuossko", + "fusa", + "fylkesbibl", + "fyresdal", + "gaivuotna", + "galsa", + "gamvik", + "gangaviika", + "gaular", + "gausdal", + "giehtavuoatna", + "gildeskal", + "giske", + "gjemnes", + "gjerdrum", + "gjerstad", + "gjesdal", + "gjovik", + "gloppen", + "gol", + "gran", + "grane", + "granvin", + "gratangen", + "grimstad", + "grong", + "grue", + "gulen", + "guovdageaidnu", + "ha", + "habmer", + "hadsel", + "hagebostad", + "halden", + "halsa", + "hamar", + "hamaroy", + "hammarfeasta", + "hammerfest", + "hapmir", + "haram", + "hareid", + "harstad", + "hasvik", + "hattfjelldal", + "haugesund", + "hedmark", + "hemne", + "hemnes", + "hemsedal", + "herad", + "hitra", + "hjartdal", + "hjelmeland", + "hl", + "hm", + "hobol", + "hof", + "hokksund", + "hol", + "hole", + "holmestrand", + "holtalen", + "honefoss", + "hordaland", + "hornindal", + "horten", + "hoyanger", + "hoylandet", + "hurdal", + "hurum", + "hvaler", + "hyllestad", + "ibestad", + "idrett", + "inderoy", + "iveland", + "ivgu", + "jan-mayen", + "jessheim", + "jevnaker", + "jolster", + "jondal", + "jorpeland", + "kafjord", + "karasjohka", + "karasjok", + "karlsoy", + "karmoy", + "kautokeino", + "kirkenes", + "klabu", + "klepp", + "kommune", + "kongsberg", + "kongsvinger", + "kopervik", + "kraanghke", + "kragero", + "kristiansand", + "kristiansund", + "krodsherad", + "krokstadelva", + "kvafjord", + "kvalsund", + "kvam", + "kvanangen", + "kvinesdal", + "kvinnherad", + "kviteseid", + "kvitsoy", + "laakesvuemie", + "lahppi", + "langevag", + "lardal", + "larvik", + "lavagis", + "lavangen", + "leangaviika", + "lebesby", + "leikanger", + "leirfjord", + "leirvik", + "leka", + "leksvik", + "lenvik", + "lerdal", + "lesja", + "levanger", + "lier", + "lierne", + "lillehammer", + "lillesand", + "lindas", + "lindesnes", + "loabat", + "lodingen", + "lom", + "loppa", + "lorenskog", + "loten", + "lund", + "lunner", + "luroy", + "luster", + "lyngdal", + "lyngen", + "malatvuopmi", + "malselv", + "malvik", + "mandal", + "marker", + "marnardal", + "masfjorden", + "masoy", + "matta-varjjat", + "meland", + "meldal", + "melhus", + "meloy", + "meraker", + "midsund", + "midtre-gauldal", + "mil", + "mjondalen", + "mo-i-rana", + "moareke", + "modalen", + "modum", + "molde", + "more-og-romsdal", + "mosjoen", + "moskenes", + "moss", + "mosvik", + "mr", + "muosat", + "museum", + "naamesjevuemie", + "namdalseid", + "namsos", + "namsskogan", + "nannestad", + "naroy", + "narviika", + "narvik", + "naustdal", + "navuotna", + "nedre-eiker", + "nesna", + "nesodden", + "nesoddtangen", + "nesseby", + "nesset", + "nissedal", + "nittedal", + "nl", + "nord-aurdal", + "nord-fron", + "nord-odal", + "norddal", + "nordkapp", + "nordland", + "nordre-land", + "nordreisa", + "nore-og-uvdal", + "notodden", + "notteroy", + "nt", + "odda", + "of", + "oksnes", + "ol", + "omasvuotna", + "oppdal", + "oppegard", + "orkanger", + "orkdal", + "orland", + "orskog", + "orsta", + "osen", + "oslo", + "osoyro", + "osteroy", + "ostfold", + "ostre-toten", + "overhalla", + "ovre-eiker", + "oyer", + "oygarden", + "oystre-slidre", + "porsanger", + "porsangu", + "porsgrunn", + "priv", + "rade", + "radoy", + "rahkkeravju", + "raholt", + "raisa", + "rakkestad", + "ralingen", + "rana", + "randaberg", + "rauma", + "rendalen", + "rennebu", + "rennesoy", + "rindal", + "ringebu", + "ringerike", + "ringsaker", + "risor", + "rissa", + "rl", + "roan", + "rodoy", + "rollag", + "romsa", + "romskog", + "roros", + "rost", + "royken", + "royrvik", + "ruovat", + "rygge", + "salangen", + "salat", + "saltdal", + "samnanger", + "sandefjord", + "sandnes", + "sandnessjoen", + "sandoy", + "sarpsborg", + "sauda", + "sauherad", + "sel", + "selbu", + "selje", + "seljord", + "sf", + "siellak", + "sigdal", + "siljan", + "sirdal", + "skanit", + "skanland", + "skaun", + "skedsmo", + "skedsmokorset", + "ski", + "skien", + "skierva", + "skiptvet", + "skjak", + "skjervoy", + "skodje", + "slattum", + "smola", + "snaase", + "snasa", + "snillfjord", + "snoasa", + "sogndal", + "sogne", + "sokndal", + "sola", + "solund", + "somna", + "sondre-land", + "songdalen", + "sor-aurdal", + "sor-fron", + "sor-odal", + "sor-varanger", + "sorfold", + "sorreisa", + "sortland", + "sorum", + "spjelkavik", + "spydeberg", + "st", + "stange", + "stat", + "stathelle", + "stavanger", + "stavern", + "steigen", + "steinkjer", + "stjordal", + "stjordalshalsen", + "stokke", + "stor-elvdal", + "stord", + "stordal", + "storfjord", + "strand", + "stranda", + "stryn", + "sula", + "suldal", + "sund", + "sunndal", + "surnadal", + "svalbard", + "sveio", + "svelvik", + "sykkylven", + "tana", + "tananger", + "telemark", + "time", + "tingvoll", + "tinn", + "tjeldsund", + "tjome", + "tm", + "tokke", + "tolga", + "tonsberg", + "torsken", + "tr", + "trana", + "tranby", + "tranoy", + "troandin", + "trogstad", + "tromsa", + "tromso", + "trondheim", + "trysil", + "tvedestrand", + "tydal", + "tynset", + "tysfjord", + "tysnes", + "tysvar", + "ullensaker", + "ullensvang", + "ulvik", + "unjarga", + "utsira", + "va", + "vaapste", + "vadso", + "vaga", + "vagan", + "vagsoy", + "vaksdal", + "valle", + "vang", + "vanylven", + "vardo", + "varggat", + "varoy", + "vefsn", + "vega", + "vegarshei", + "vennesla", + "verdal", + "verran", + "vestby", + "vestfold", + "vestnes", + "vestre-slidre", + "vestre-toten", + "vestvagoy", + "vevelstad", + "vf", + "vgs", + "vik", + "vikna", + "vindafjord", + "voagat", + "volda", + "voss", + "vossevangen", + "xn--andy-ira", + "xn--asky-ira", + "xn--aurskog-hland-jnb", + "xn--avery-yua", + "xn--bdddj-mrabd", + "xn--bearalvhki-y4a", + "xn--berlevg-jxa", + "xn--bhcavuotna-s4a", + "xn--bhccavuotna-k7a", + "xn--bidr-5nac", + "xn--bievt-0qa", + "xn--bjarky-fya", + "xn--bjddar-pta", + "xn--blt-elab", + "xn--bmlo-gra", + "xn--bod-2na", + "xn--brnny-wuac", + "xn--brnnysund-m8ac", + "xn--brum-voa", + "xn--btsfjord-9za", + "xn--davvenjrga-y4a", + "xn--dnna-gra", + "xn--drbak-wua", + "xn--dyry-ira", + "xn--eveni-0qa01ga", + "xn--finny-yua", + "xn--fjord-lra", + "xn--fl-zia", + "xn--flor-jra", + "xn--frde-gra", + "xn--frna-woa", + "xn--frya-hra", + "xn--ggaviika-8ya47h", + "xn--gildeskl-g0a", + "xn--givuotna-8ya", + "xn--gjvik-wua", + "xn--gls-elac", + "xn--h-2fa", + "xn--hbmer-xqa", + "xn--hcesuolo-7ya35b", + "xn--hgebostad-g3a", + "xn--hmmrfeasta-s4ac", + "xn--hnefoss-q1a", + "xn--hobl-ira", + "xn--holtlen-hxa", + "xn--hpmir-xqa", + "xn--hyanger-q1a", + "xn--hylandet-54a", + "xn--indery-fya", + "xn--jlster-bya", + "xn--jrpeland-54a", + "xn--karmy-yua", + "xn--kfjord-iua", + "xn--klbu-woa", + "xn--koluokta-7ya57h", + "xn--krager-gya", + "xn--kranghke-b0a", + "xn--krdsherad-m8a", + "xn--krehamn-dxa", + "xn--krjohka-hwab49j", + "xn--ksnes-uua", + "xn--kvfjord-nxa", + "xn--kvitsy-fya", + "xn--kvnangen-k0a", + "xn--l-1fa", + "xn--laheadju-7ya", + "xn--langevg-jxa", + "xn--ldingen-q1a", + "xn--leagaviika-52b", + "xn--lesund-hua", + "xn--lgrd-poac", + "xn--lhppi-xqa", + "xn--linds-pra", + "xn--loabt-0qa", + "xn--lrdal-sra", + "xn--lrenskog-54a", + "xn--lt-liac", + "xn--lten-gra", + "xn--lury-ira", + "xn--mely-ira", + "xn--merker-kua", + "xn--mjndalen-64a", + "xn--mlatvuopmi-s4a", + "xn--mli-tla", + "xn--mlselv-iua", + "xn--moreke-jua", + "xn--mosjen-eya", + "xn--mot-tla", + "xn--mre-og-romsdal-qqb", + "xn--msy-ula0h", + "xn--mtta-vrjjat-k7af", + "xn--muost-0qa", + "xn--nmesjevuemie-tcba", + "xn--nry-yla5g", + "xn--nttery-byae", + "xn--nvuotna-hwa", + "xn--oppegrd-ixa", + "xn--ostery-fya", + "xn--osyro-wua", + "xn--porsgu-sta26f", + "xn--rady-ira", + "xn--rdal-poa", + "xn--rde-ula", + "xn--rdy-0nab", + "xn--rennesy-v1a", + "xn--rhkkervju-01af", + "xn--rholt-mra", + "xn--risa-5na", + "xn--risr-ira", + "xn--rland-uua", + "xn--rlingen-mxa", + "xn--rmskog-bya", + "xn--rros-gra", + "xn--rskog-uua", + "xn--rst-0na", + "xn--rsta-fra", + "xn--ryken-vua", + "xn--ryrvik-bya", + "xn--s-1fa", + "xn--sandnessjen-ogb", + "xn--sandy-yua", + "xn--seral-lra", + "xn--sgne-gra", + "xn--skierv-uta", + "xn--skjervy-v1a", + "xn--skjk-soa", + "xn--sknit-yqa", + "xn--sknland-fxa", + "xn--slat-5na", + "xn--slt-elab", + "xn--smla-hra", + "xn--smna-gra", + "xn--snase-nra", + "xn--sndre-land-0cb", + "xn--snes-poa", + "xn--snsa-roa", + "xn--sr-aurdal-l8a", + "xn--sr-fron-q1a", + "xn--sr-odal-q1a", + "xn--sr-varanger-ggb", + "xn--srfold-bya", + "xn--srreisa-q1a", + "xn--srum-gra", + "xn--stfold-9xa", + "xn--stjrdal-s1a", + "xn--stjrdalshalsen-sqb", + "xn--stre-toten-zcb", + "xn--tjme-hra", + "xn--tnsberg-q1a", + "xn--trany-yua", + "xn--trgstad-r1a", + "xn--trna-woa", + "xn--troms-zua", + "xn--tysvr-vra", + "xn--unjrga-rta", + "xn--vads-jra", + "xn--vard-jra", + "xn--vegrshei-c0a", + "xn--vestvgy-ixa6o", + "xn--vg-yiab", + "xn--vgan-qoa", + "xn--vgsy-qoa0j", + "xn--vre-eiker-k8a", + "xn--vrggt-xqad", + "xn--vry-yla5g", + "xn--yer-zna", + "xn--ygarden-p1a", + "xn--ystre-slidre-ujb", + "gs", + "gs", + "nes", + "gs", + "nes", + "gs", + "os", + "valer", + "xn--vler-qoa", + "gs", + "gs", + "os", + "gs", + "heroy", + "sande", + "gs", + "gs", + "bo", + "heroy", + "xn--b-5ga", + "xn--hery-ira", + "gs", + "gs", + "gs", + "gs", + "valer", + "gs", + "gs", + "gs", + "gs", + "bo", + "xn--b-5ga", + "gs", + "gs", + "gs", + "sande", + "gs", + "sande", + "xn--hery-ira", + "xn--vler-qoa", + "biz", + "com", + "edu", + "gov", + "info", + "net", + "org", + "merseine", + "mine", + "shacknet", + "ac", + "co", + "cri", + "geek", + "gen", + "govt", + "health", + "iwi", + "kiwi", + "maori", + "mil", + "net", + "org", + "parliament", + "school", + "xn--mori-qsa", + "blogspot", + "co", + "com", + "edu", + "gov", + "med", + "museum", + "net", + "org", + "pro", + "ae", + "blogdns", + "blogsite", + "bmoattachments", + "boldlygoingnowhere", + "cdn77", + "cdn77-secure", + "dnsalias", + "dnsdojo", + "doesntexist", + "dontexist", + "doomdns", + "dsmynas", + "duckdns", + "dvrdns", + "dynalias", + "dyndns", + "endofinternet", + "endoftheinternet", + "eu", + "familyds", + "from-me", + "game-host", + "gotdns", + "hk", + "hobby-site", + "homedns", + "homeftp", + "homelinux", + "homeunix", + "is-a-bruinsfan", + "is-a-candidate", + "is-a-celticsfan", + "is-a-chef", + "is-a-geek", + "is-a-knight", + "is-a-linux-user", + "is-a-patsfan", + "is-a-soxfan", + "is-found", + "is-lost", + "is-saved", + "is-very-bad", + "is-very-evil", + "is-very-good", + "is-very-nice", + "is-very-sweet", + "isa-geek", + "kicks-ass", + "misconfused", + "podzone", + "readmyblog", + "selfip", + "sellsyourhome", + "servebbs", + "serveftp", + "servegame", + "stuff-4-sale", + "us", + "webhop", + "za", + "c", + "rsc", + "origin", + "ssl", + "go", + "home", + "al", + "asso", + "at", + "au", + "be", + "bg", + "ca", + "cd", + "ch", + "cn", + "cy", + "cz", + "de", + "dk", + "edu", + "ee", + "es", + "fi", + "fr", + "gr", + "hr", + "hu", + "ie", + "il", + "in", + "int", + "is", + "it", + "jp", + "kr", + "lt", + "lu", + "lv", + "mc", + "me", + "mk", + "mt", + "my", + "net", + "ng", + "nl", + "no", + "nz", + "paris", + "pl", + "pt", + "q-a", + "ro", + "ru", + "se", + "si", + "sk", + "tr", + "uk", + "us", + "abo", + "ac", + "com", + "edu", + "gob", + "ing", + "med", + "net", + "nom", + "org", + "sld", + "blogspot", + "com", + "edu", + "gob", + "mil", + "net", + "nom", + "org", + "com", + "edu", + "org", + "com", + "edu", + "gov", + "i", + "mil", + "net", + "ngo", + "org", + "biz", + "com", + "edu", + "fam", + "gob", + "gok", + "gon", + "gop", + "gos", + "gov", + "info", + "net", + "org", + "web", + "agro", + "aid", + "art", + "atm", + "augustow", + "auto", + "babia-gora", + "bedzin", + "beskidy", + "bialowieza", + "bialystok", + "bielawa", + "bieszczady", + "biz", + "boleslawiec", + "bydgoszcz", + "bytom", + "cieszyn", + "co", + "com", + "czeladz", + "czest", + "dlugoleka", + "edu", + "elblag", + "elk", + "gda", + "gdansk", + "gdynia", + "gliwice", + "glogow", + "gmina", + "gniezno", + "gorlice", + "gov", + "grajewo", + "gsm", + "ilawa", + "info", + "jaworzno", + "jelenia-gora", + "jgora", + "kalisz", + "karpacz", + "kartuzy", + "kaszuby", + "katowice", + "kazimierz-dolny", + "kepno", + "ketrzyn", + "klodzko", + "kobierzyce", + "kolobrzeg", + "konin", + "konskowola", + "krakow", + "kutno", + "lapy", + "lebork", + "legnica", + "lezajsk", + "limanowa", + "lomza", + "lowicz", + "lubin", + "lukow", + "mail", + "malbork", + "malopolska", + "mazowsze", + "mazury", + "med", + "media", + "miasta", + "mielec", + "mielno", + "mil", + "mragowo", + "naklo", + "net", + "nieruchomosci", + "nom", + "nowaruda", + "nysa", + "olawa", + "olecko", + "olkusz", + "olsztyn", + "opoczno", + "opole", + "org", + "ostroda", + "ostroleka", + "ostrowiec", + "ostrowwlkp", + "pc", + "pila", + "pisz", + "podhale", + "podlasie", + "polkowice", + "pomorskie", + "pomorze", + "powiat", + "poznan", + "priv", + "prochowice", + "pruszkow", + "przeworsk", + "pulawy", + "radom", + "rawa-maz", + "realestate", + "rel", + "rybnik", + "rzeszow", + "sanok", + "sejny", + "sex", + "shop", + "sklep", + "skoczow", + "slask", + "slupsk", + "sopot", + "sos", + "sosnowiec", + "stalowa-wola", + "starachowice", + "stargard", + "suwalki", + "swidnica", + "swiebodzin", + "swinoujscie", + "szczecin", + "szczytno", + "szkola", + "targi", + "tarnobrzeg", + "tgory", + "tm", + "tourism", + "travel", + "turek", + "turystyka", + "tychy", + "ustka", + "walbrzych", + "warmia", + "warszawa", + "waw", + "wegrow", + "wielun", + "wlocl", + "wloclawek", + "wodzislaw", + "wolomin", + "wroc", + "wroclaw", + "zachpomor", + "zagan", + "zakopane", + "zarow", + "zgora", + "zgorzelec", + "ap", + "griw", + "ic", + "is", + "kmpsp", + "konsulat", + "kppsp", + "kwp", + "kwpsp", + "mup", + "mw", + "oirm", + "oum", + "pa", + "pinb", + "piw", + "po", + "psp", + "psse", + "pup", + "rzgw", + "sa", + "sdn", + "sko", + "so", + "sr", + "starostwo", + "ug", + "ugim", + "um", + "umig", + "upow", + "uppo", + "us", + "uw", + "uzs", + "wif", + "wiih", + "winb", + "wios", + "witd", + "wiw", + "wsa", + "wskr", + "wuoz", + "wzmiuw", + "zp", + "co", + "edu", + "gov", + "net", + "org", + "ac", + "biz", + "com", + "edu", + "est", + "gov", + "info", + "isla", + "name", + "net", + "org", + "pro", + "prof", + "aaa", + "aca", + "acct", + "avocat", + "bar", + "cpa", + "eng", + "jur", + "law", + "med", + "recht", + "com", + "edu", + "gov", + "net", + "org", + "plo", + "sec", + "blogspot", + "com", + "edu", + "gov", + "int", + "net", + "nome", + "org", + "publ", + "belau", + "co", + "ed", + "go", + "ne", + "or", + "com", + "coop", + "edu", + "gov", + "mil", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "org", + "sch", + "asso", + "blogspot", + "com", + "nom", + "arts", + "blogspot", + "com", + "firm", + "info", + "nom", + "nt", + "org", + "rec", + "store", + "tm", + "www", + "ac", + "blogspot", + "co", + "edu", + "gov", + "in", + "org", + "ac", + "adygeya", + "altai", + "amur", + "amursk", + "arkhangelsk", + "astrakhan", + "baikal", + "bashkiria", + "belgorod", + "bir", + "blogspot", + "bryansk", + "buryatia", + "cbg", + "chel", + "chelyabinsk", + "chita", + "chukotka", + "chuvashia", + "cmw", + "com", + "dagestan", + "dudinka", + "e-burg", + "edu", + "fareast", + "gov", + "grozny", + "int", + "irkutsk", + "ivanovo", + "izhevsk", + "jamal", + "jar", + "joshkar-ola", + "k-uralsk", + "kalmykia", + "kaluga", + "kamchatka", + "karelia", + "kazan", + "kchr", + "kemerovo", + "khabarovsk", + "khakassia", + "khv", + "kirov", + "kms", + "koenig", + "komi", + "kostroma", + "krasnoyarsk", + "kuban", + "kurgan", + "kursk", + "kustanai", + "kuzbass", + "lipetsk", + "magadan", + "mari", + "mari-el", + "marine", + "mil", + "mordovia", + "msk", + "murmansk", + "mytis", + "nakhodka", + "nalchik", + "net", + "nkz", + "nnov", + "norilsk", + "nov", + "novosibirsk", + "nsk", + "omsk", + "orenburg", + "org", + "oryol", + "oskol", + "palana", + "penza", + "perm", + "pp", + "ptz", + "pyatigorsk", + "rnd", + "rubtsovsk", + "ryazan", + "sakhalin", + "samara", + "saratov", + "simbirsk", + "smolensk", + "snz", + "spb", + "stavropol", + "stv", + "surgut", + "syzran", + "tambov", + "tatarstan", + "test", + "tom", + "tomsk", + "tsaritsyn", + "tsk", + "tula", + "tuva", + "tver", + "tyumen", + "udm", + "udmurtia", + "ulan-ude", + "vdonsk", + "vladikavkaz", + "vladimir", + "vladivostok", + "volgograd", + "vologda", + "voronezh", + "vrn", + "vyatka", + "yakutia", + "yamal", + "yaroslavl", + "yekaterinburg", + "yuzhno-sakhalinsk", + "zgrad", + "ac", + "co", + "com", + "edu", + "gouv", + "gov", + "int", + "mil", + "net", + "com", + "edu", + "gov", + "med", + "net", + "org", + "pub", + "sch", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "info", + "med", + "net", + "org", + "tv", + "a", + "ac", + "b", + "bd", + "blogspot", + "brand", + "c", + "com", + "d", + "e", + "f", + "fh", + "fhsk", + "fhv", + "g", + "h", + "i", + "k", + "komforb", + "kommunalforbund", + "komvux", + "l", + "lanbib", + "m", + "n", + "naturbruksgymn", + "o", + "org", + "p", + "parti", + "pp", + "press", + "r", + "s", + "t", + "tm", + "u", + "w", + "x", + "y", + "z", + "blogspot", + "com", + "edu", + "gov", + "net", + "org", + "per", + "com", + "gov", + "hashbang", + "mil", + "net", + "org", + "platform", + "blogspot", + "blogspot", + "com", + "edu", + "gov", + "net", + "org", + "art", + "blogspot", + "com", + "edu", + "gouv", + "org", + "perso", + "univ", + "com", + "net", + "org", + "co", + "com", + "consulado", + "edu", + "embaixada", + "gov", + "mil", + "net", + "org", + "principe", + "saotome", + "store", + "adygeya", + "arkhangelsk", + "balashov", + "bashkiria", + "bryansk", + "dagestan", + "grozny", + "ivanovo", + "kalmykia", + "kaluga", + "karelia", + "khakassia", + "krasnodar", + "kurgan", + "lenug", + "mordovia", + "msk", + "murmansk", + "nalchik", + "nov", + "obninsk", + "penza", + "pokrovsk", + "sochi", + "spb", + "togliatti", + "troitsk", + "tula", + "tuva", + "vladikavkaz", + "vladimir", + "vologda", + "com", + "edu", + "gob", + "org", + "red", + "gov", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "ac", + "co", + "org", + "blogspot", + "ac", + "co", + "go", + "in", + "mi", + "net", + "or", + "ac", + "biz", + "co", + "com", + "edu", + "go", + "gov", + "int", + "mil", + "name", + "net", + "nic", + "org", + "test", + "web", + "gov", + "co", + "com", + "edu", + "gov", + "mil", + "net", + "nom", + "org", + "agrinet", + "com", + "defense", + "edunet", + "ens", + "fin", + "gov", + "ind", + "info", + "intl", + "mincom", + "nat", + "net", + "org", + "perso", + "rnrt", + "rns", + "rnu", + "tourism", + "turen", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "av", + "bbs", + "bel", + "biz", + "com", + "dr", + "edu", + "gen", + "gov", + "info", + "k12", + "kep", + "mil", + "name", + "nc", + "net", + "org", + "pol", + "tel", + "tv", + "web", + "blogspot", + "gov", + "aero", + "biz", + "co", + "com", + "coop", + "edu", + "gov", + "info", + "int", + "jobs", + "mobi", + "museum", + "name", + "net", + "org", + "pro", + "travel", + "better-than", + "dyndns", + "on-the-web", + "worse-than", + "blogspot", + "club", + "com", + "ebiz", + "edu", + "game", + "gov", + "idv", + "mil", + "net", + "org", + "xn--czrw28b", + "xn--uc0atv", + "xn--zf0ao64a", + "ac", + "co", + "go", + "hotel", + "info", + "me", + "mil", + "mobi", + "ne", + "or", + "sc", + "tv", + "biz", + "cherkassy", + "cherkasy", + "chernigov", + "chernihiv", + "chernivtsi", + "chernovtsy", + "ck", + "cn", + "co", + "com", + "cr", + "crimea", + "cv", + "dn", + "dnepropetrovsk", + "dnipropetrovsk", + "dominic", + "donetsk", + "dp", + "edu", + "gov", + "if", + "in", + "ivano-frankivsk", + "kh", + "kharkiv", + "kharkov", + "kherson", + "khmelnitskiy", + "khmelnytskyi", + "kiev", + "kirovograd", + "km", + "kr", + "krym", + "ks", + "kv", + "kyiv", + "lg", + "lt", + "lugansk", + "lutsk", + "lv", + "lviv", + "mk", + "mykolaiv", + "net", + "nikolaev", + "od", + "odesa", + "odessa", + "org", + "pl", + "poltava", + "pp", + "rivne", + "rovno", + "rv", + "sb", + "sebastopol", + "sevastopol", + "sm", + "sumy", + "te", + "ternopil", + "uz", + "uzhgorod", + "vinnica", + "vinnytsia", + "vn", + "volyn", + "yalta", + "zaporizhzhe", + "zaporizhzhia", + "zhitomir", + "zhytomyr", + "zp", + "zt", + "ac", + "blogspot", + "co", + "com", + "go", + "ne", + "or", + "org", + "sc", + "ac", + "co", + "gov", + "ltd", + "me", + "net", + "nhs", + "org", + "plc", + "police", + "sch", + "blogspot", + "service", + "ak", + "al", + "ar", + "as", + "az", + "ca", + "co", + "ct", + "dc", + "de", + "dni", + "fed", + "fl", + "ga", + "gu", + "hi", + "ia", + "id", + "il", + "in", + "is-by", + "isa", + "kids", + "ks", + "ky", + "la", + "land-4-sale", + "ma", + "md", + "me", + "mi", + "mn", + "mo", + "ms", + "mt", + "nc", + "nd", + "ne", + "nh", + "nj", + "nm", + "nsn", + "nv", + "ny", + "oh", + "ok", + "or", + "pa", + "pr", + "ri", + "sc", + "sd", + "stuff-4-sale", + "tn", + "tx", + "ut", + "va", + "vi", + "vt", + "wa", + "wi", + "wv", + "wy", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "chtr", + "paroch", + "pvt", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "cc", + "k12", + "lib", + "com", + "edu", + "gub", + "mil", + "net", + "org", + "blogspot", + "co", + "com", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "arts", + "co", + "com", + "e12", + "edu", + "firm", + "gob", + "gov", + "info", + "int", + "mil", + "net", + "org", + "rec", + "store", + "tec", + "web", + "co", + "com", + "k12", + "net", + "org", + "ac", + "biz", + "blogspot", + "com", + "edu", + "gov", + "health", + "info", + "int", + "name", + "net", + "org", + "pro", + "com", + "edu", + "net", + "org", + "com", + "dyndns", + "edu", + "gov", + "mypets", + "net", + "org", + "xn--80au", + "xn--90azh", + "xn--c1avg", + "xn--d1at", + "xn--o1ac", + "xn--o1ach", + "ac", + "agric", + "alt", + "co", + "edu", + "gov", + "grondar", + "law", + "mil", + "net", + "ngo", + "nis", + "nom", + "org", + "school", + "tm", + "web", + "blogspot", +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go index 9ee19362..dd67007d 100644 --- a/vendor/golang.org/x/net/trace/trace.go +++ b/vendor/golang.org/x/net/trace/trace.go @@ -60,7 +60,7 @@ The /debug/events HTTP endpoint organizes the event logs by family and by time since the last error. The expanded view displays recent log entries and the log's call stack. */ -package trace +package trace // import "golang.org/x/net/trace" import ( "bytes" diff --git a/vendor/golang.org/x/net/webdav/file.go b/vendor/golang.org/x/net/webdav/file.go new file mode 100644 index 00000000..3d95c6cb --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file.go @@ -0,0 +1,794 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "io" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "sync" + "time" +) + +// slashClean is equivalent to but slightly more efficient than +// path.Clean("/" + name). +func slashClean(name string) string { + if name == "" || name[0] != '/' { + name = "/" + name + } + return path.Clean(name) +} + +// A FileSystem implements access to a collection of named files. The elements +// in a file path are separated by slash ('/', U+002F) characters, regardless +// of host operating system convention. +// +// Each method has the same semantics as the os package's function of the same +// name. +// +// Note that the os.Rename documentation says that "OS-specific restrictions +// might apply". In particular, whether or not renaming a file or directory +// overwriting another existing file or directory is an error is OS-dependent. +type FileSystem interface { + Mkdir(name string, perm os.FileMode) error + OpenFile(name string, flag int, perm os.FileMode) (File, error) + RemoveAll(name string) error + Rename(oldName, newName string) error + Stat(name string) (os.FileInfo, error) +} + +// A File is returned by a FileSystem's OpenFile method and can be served by a +// Handler. +// +// A File may optionally implement the DeadPropsHolder interface, if it can +// load and save dead properties. +type File interface { + http.File + io.Writer +} + +// A Dir implements FileSystem using the native file system restricted to a +// specific directory tree. +// +// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's +// string value is a filename on the native file system, not a URL, so it is +// separated by filepath.Separator, which isn't necessarily '/'. +// +// An empty Dir is treated as ".". +type Dir string + +func (d Dir) resolve(name string) string { + // This implementation is based on Dir.Open's code in the standard net/http package. + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return "" + } + dir := string(d) + if dir == "" { + dir = "." + } + return filepath.Join(dir, filepath.FromSlash(slashClean(name))) +} + +func (d Dir) Mkdir(name string, perm os.FileMode) error { + if name = d.resolve(name); name == "" { + return os.ErrNotExist + } + return os.Mkdir(name, perm) +} + +func (d Dir) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if name = d.resolve(name); name == "" { + return nil, os.ErrNotExist + } + f, err := os.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + return f, nil +} + +func (d Dir) RemoveAll(name string) error { + if name = d.resolve(name); name == "" { + return os.ErrNotExist + } + if name == filepath.Clean(string(d)) { + // Prohibit removing the virtual root directory. + return os.ErrInvalid + } + return os.RemoveAll(name) +} + +func (d Dir) Rename(oldName, newName string) error { + if oldName = d.resolve(oldName); oldName == "" { + return os.ErrNotExist + } + if newName = d.resolve(newName); newName == "" { + return os.ErrNotExist + } + if root := filepath.Clean(string(d)); root == oldName || root == newName { + // Prohibit renaming from or to the virtual root directory. + return os.ErrInvalid + } + return os.Rename(oldName, newName) +} + +func (d Dir) Stat(name string) (os.FileInfo, error) { + if name = d.resolve(name); name == "" { + return nil, os.ErrNotExist + } + return os.Stat(name) +} + +// NewMemFS returns a new in-memory FileSystem implementation. +func NewMemFS() FileSystem { + return &memFS{ + root: memFSNode{ + children: make(map[string]*memFSNode), + mode: 0660 | os.ModeDir, + modTime: time.Now(), + }, + } +} + +// A memFS implements FileSystem, storing all metadata and actual file data +// in-memory. No limits on filesystem size are used, so it is not recommended +// this be used where the clients are untrusted. +// +// Concurrent access is permitted. The tree structure is protected by a mutex, +// and each node's contents and metadata are protected by a per-node mutex. +// +// TODO: Enforce file permissions. +type memFS struct { + mu sync.Mutex + root memFSNode +} + +// TODO: clean up and rationalize the walk/find code. + +// walk walks the directory tree for the fullname, calling f at each step. If f +// returns an error, the walk will be aborted and return that same error. +// +// dir is the directory at that step, frag is the name fragment, and final is +// whether it is the final step. For example, walking "/foo/bar/x" will result +// in 3 calls to f: +// - "/", "foo", false +// - "/foo/", "bar", false +// - "/foo/bar/", "x", true +// The frag argument will be empty only if dir is the root node and the walk +// ends at that root node. +func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error { + original := fullname + fullname = slashClean(fullname) + + // Strip any leading "/"s to make fullname a relative path, as the walk + // starts at fs.root. + if fullname[0] == '/' { + fullname = fullname[1:] + } + dir := &fs.root + + for { + frag, remaining := fullname, "" + i := strings.IndexRune(fullname, '/') + final := i < 0 + if !final { + frag, remaining = fullname[:i], fullname[i+1:] + } + if frag == "" && dir != &fs.root { + panic("webdav: empty path fragment for a clean path") + } + if err := f(dir, frag, final); err != nil { + return &os.PathError{ + Op: op, + Path: original, + Err: err, + } + } + if final { + break + } + child := dir.children[frag] + if child == nil { + return &os.PathError{ + Op: op, + Path: original, + Err: os.ErrNotExist, + } + } + if !child.mode.IsDir() { + return &os.PathError{ + Op: op, + Path: original, + Err: os.ErrInvalid, + } + } + dir, fullname = child, remaining + } + return nil +} + +// find returns the parent of the named node and the relative name fragment +// from the parent to the child. For example, if finding "/foo/bar/baz" then +// parent will be the node for "/foo/bar" and frag will be "baz". +// +// If the fullname names the root node, then parent, frag and err will be zero. +// +// find returns an error if the parent does not already exist or the parent +// isn't a directory, but it will not return an error per se if the child does +// not already exist. The error returned is either nil or an *os.PathError +// whose Op is op. +func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) { + err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error { + if !final { + return nil + } + if frag0 != "" { + parent, frag = parent0, frag0 + } + return nil + }) + return parent, frag, err +} + +func (fs *memFS) Mkdir(name string, perm os.FileMode) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("mkdir", name) + if err != nil { + return err + } + if dir == nil { + // We can't create the root. + return os.ErrInvalid + } + if _, ok := dir.children[frag]; ok { + return os.ErrExist + } + dir.children[frag] = &memFSNode{ + children: make(map[string]*memFSNode), + mode: perm.Perm() | os.ModeDir, + modTime: time.Now(), + } + return nil +} + +func (fs *memFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("open", name) + if err != nil { + return nil, err + } + var n *memFSNode + if dir == nil { + // We're opening the root. + if flag&(os.O_WRONLY|os.O_RDWR) != 0 { + return nil, os.ErrPermission + } + n, frag = &fs.root, "/" + + } else { + n = dir.children[frag] + if flag&(os.O_SYNC|os.O_APPEND) != 0 { + // memFile doesn't support these flags yet. + return nil, os.ErrInvalid + } + if flag&os.O_CREATE != 0 { + if flag&os.O_EXCL != 0 && n != nil { + return nil, os.ErrExist + } + if n == nil { + n = &memFSNode{ + mode: perm.Perm(), + } + dir.children[frag] = n + } + } + if n == nil { + return nil, os.ErrNotExist + } + if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 { + n.mu.Lock() + n.data = nil + n.mu.Unlock() + } + } + + children := make([]os.FileInfo, 0, len(n.children)) + for cName, c := range n.children { + children = append(children, c.stat(cName)) + } + return &memFile{ + n: n, + nameSnapshot: frag, + childrenSnapshot: children, + }, nil +} + +func (fs *memFS) RemoveAll(name string) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("remove", name) + if err != nil { + return err + } + if dir == nil { + // We can't remove the root. + return os.ErrInvalid + } + delete(dir.children, frag) + return nil +} + +func (fs *memFS) Rename(oldName, newName string) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + oldName = slashClean(oldName) + newName = slashClean(newName) + if oldName == newName { + return nil + } + if strings.HasPrefix(newName, oldName+"/") { + // We can't rename oldName to be a sub-directory of itself. + return os.ErrInvalid + } + + oDir, oFrag, err := fs.find("rename", oldName) + if err != nil { + return err + } + if oDir == nil { + // We can't rename from the root. + return os.ErrInvalid + } + + nDir, nFrag, err := fs.find("rename", newName) + if err != nil { + return err + } + if nDir == nil { + // We can't rename to the root. + return os.ErrInvalid + } + + oNode, ok := oDir.children[oFrag] + if !ok { + return os.ErrNotExist + } + if oNode.children != nil { + if nNode, ok := nDir.children[nFrag]; ok { + if nNode.children == nil { + return errNotADirectory + } + if len(nNode.children) != 0 { + return errDirectoryNotEmpty + } + } + } + delete(oDir.children, oFrag) + nDir.children[nFrag] = oNode + return nil +} + +func (fs *memFS) Stat(name string) (os.FileInfo, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("stat", name) + if err != nil { + return nil, err + } + if dir == nil { + // We're stat'ting the root. + return fs.root.stat("/"), nil + } + if n, ok := dir.children[frag]; ok { + return n.stat(path.Base(name)), nil + } + return nil, os.ErrNotExist +} + +// A memFSNode represents a single entry in the in-memory filesystem and also +// implements os.FileInfo. +type memFSNode struct { + // children is protected by memFS.mu. + children map[string]*memFSNode + + mu sync.Mutex + data []byte + mode os.FileMode + modTime time.Time + deadProps map[xml.Name]Property +} + +func (n *memFSNode) stat(name string) *memFileInfo { + n.mu.Lock() + defer n.mu.Unlock() + return &memFileInfo{ + name: name, + size: int64(len(n.data)), + mode: n.mode, + modTime: n.modTime, + } +} + +func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) { + n.mu.Lock() + defer n.mu.Unlock() + if len(n.deadProps) == 0 { + return nil, nil + } + ret := make(map[xml.Name]Property, len(n.deadProps)) + for k, v := range n.deadProps { + ret[k] = v + } + return ret, nil +} + +func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) { + n.mu.Lock() + defer n.mu.Unlock() + pstat := Propstat{Status: http.StatusOK} + for _, patch := range patches { + for _, p := range patch.Props { + pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) + if patch.Remove { + delete(n.deadProps, p.XMLName) + continue + } + if n.deadProps == nil { + n.deadProps = map[xml.Name]Property{} + } + n.deadProps[p.XMLName] = p + } + } + return []Propstat{pstat}, nil +} + +type memFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (f *memFileInfo) Name() string { return f.name } +func (f *memFileInfo) Size() int64 { return f.size } +func (f *memFileInfo) Mode() os.FileMode { return f.mode } +func (f *memFileInfo) ModTime() time.Time { return f.modTime } +func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() } +func (f *memFileInfo) Sys() interface{} { return nil } + +// A memFile is a File implementation for a memFSNode. It is a per-file (not +// per-node) read/write position, and a snapshot of the memFS' tree structure +// (a node's name and children) for that node. +type memFile struct { + n *memFSNode + nameSnapshot string + childrenSnapshot []os.FileInfo + // pos is protected by n.mu. + pos int +} + +// A *memFile implements the optional DeadPropsHolder interface. +var _ DeadPropsHolder = (*memFile)(nil) + +func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() } +func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) } + +func (f *memFile) Close() error { + return nil +} + +func (f *memFile) Read(p []byte) (int, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + if f.n.mode.IsDir() { + return 0, os.ErrInvalid + } + if f.pos >= len(f.n.data) { + return 0, io.EOF + } + n := copy(p, f.n.data[f.pos:]) + f.pos += n + return n, nil +} + +func (f *memFile) Readdir(count int) ([]os.FileInfo, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + if !f.n.mode.IsDir() { + return nil, os.ErrInvalid + } + old := f.pos + if old >= len(f.childrenSnapshot) { + // The os.File Readdir docs say that at the end of a directory, + // the error is io.EOF if count > 0 and nil if count <= 0. + if count > 0 { + return nil, io.EOF + } + return nil, nil + } + if count > 0 { + f.pos += count + if f.pos > len(f.childrenSnapshot) { + f.pos = len(f.childrenSnapshot) + } + } else { + f.pos = len(f.childrenSnapshot) + old = 0 + } + return f.childrenSnapshot[old:f.pos], nil +} + +func (f *memFile) Seek(offset int64, whence int) (int64, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + npos := f.pos + // TODO: How to handle offsets greater than the size of system int? + switch whence { + case os.SEEK_SET: + npos = int(offset) + case os.SEEK_CUR: + npos += int(offset) + case os.SEEK_END: + npos = len(f.n.data) + int(offset) + default: + npos = -1 + } + if npos < 0 { + return 0, os.ErrInvalid + } + f.pos = npos + return int64(f.pos), nil +} + +func (f *memFile) Stat() (os.FileInfo, error) { + return f.n.stat(f.nameSnapshot), nil +} + +func (f *memFile) Write(p []byte) (int, error) { + lenp := len(p) + f.n.mu.Lock() + defer f.n.mu.Unlock() + + if f.n.mode.IsDir() { + return 0, os.ErrInvalid + } + if f.pos < len(f.n.data) { + n := copy(f.n.data[f.pos:], p) + f.pos += n + p = p[n:] + } else if f.pos > len(f.n.data) { + // Write permits the creation of holes, if we've seek'ed past the + // existing end of file. + if f.pos <= cap(f.n.data) { + oldLen := len(f.n.data) + f.n.data = f.n.data[:f.pos] + hole := f.n.data[oldLen:] + for i := range hole { + hole[i] = 0 + } + } else { + d := make([]byte, f.pos, f.pos+len(p)) + copy(d, f.n.data) + f.n.data = d + } + } + + if len(p) > 0 { + // We should only get here if f.pos == len(f.n.data). + f.n.data = append(f.n.data, p...) + f.pos = len(f.n.data) + } + f.n.modTime = time.Now() + return lenp, nil +} + +// moveFiles moves files and/or directories from src to dst. +// +// See section 9.9.4 for when various HTTP status codes apply. +func moveFiles(fs FileSystem, src, dst string, overwrite bool) (status int, err error) { + created := false + if _, err := fs.Stat(dst); err != nil { + if !os.IsNotExist(err) { + return http.StatusForbidden, err + } + created = true + } else if overwrite { + // Section 9.9.3 says that "If a resource exists at the destination + // and the Overwrite header is "T", then prior to performing the move, + // the server must perform a DELETE with "Depth: infinity" on the + // destination resource. + if err := fs.RemoveAll(dst); err != nil { + return http.StatusForbidden, err + } + } else { + return http.StatusPreconditionFailed, os.ErrExist + } + if err := fs.Rename(src, dst); err != nil { + return http.StatusForbidden, err + } + if created { + return http.StatusCreated, nil + } + return http.StatusNoContent, nil +} + +func copyProps(dst, src File) error { + d, ok := dst.(DeadPropsHolder) + if !ok { + return nil + } + s, ok := src.(DeadPropsHolder) + if !ok { + return nil + } + m, err := s.DeadProps() + if err != nil { + return err + } + props := make([]Property, 0, len(m)) + for _, prop := range m { + props = append(props, prop) + } + _, err = d.Patch([]Proppatch{{Props: props}}) + return err +} + +// copyFiles copies files and/or directories from src to dst. +// +// See section 9.8.5 for when various HTTP status codes apply. +func copyFiles(fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) { + if recursion == 1000 { + return http.StatusInternalServerError, errRecursionTooDeep + } + recursion++ + + // TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/ + // into /A/B/ could lead to infinite recursion if not handled correctly." + + srcFile, err := fs.OpenFile(src, os.O_RDONLY, 0) + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusInternalServerError, err + } + defer srcFile.Close() + srcStat, err := srcFile.Stat() + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusInternalServerError, err + } + srcPerm := srcStat.Mode() & os.ModePerm + + created := false + if _, err := fs.Stat(dst); err != nil { + if os.IsNotExist(err) { + created = true + } else { + return http.StatusForbidden, err + } + } else { + if !overwrite { + return http.StatusPreconditionFailed, os.ErrExist + } + if err := fs.RemoveAll(dst); err != nil && !os.IsNotExist(err) { + return http.StatusForbidden, err + } + } + + if srcStat.IsDir() { + if err := fs.Mkdir(dst, srcPerm); err != nil { + return http.StatusForbidden, err + } + if depth == infiniteDepth { + children, err := srcFile.Readdir(-1) + if err != nil { + return http.StatusForbidden, err + } + for _, c := range children { + name := c.Name() + s := path.Join(src, name) + d := path.Join(dst, name) + cStatus, cErr := copyFiles(fs, s, d, overwrite, depth, recursion) + if cErr != nil { + // TODO: MultiStatus. + return cStatus, cErr + } + } + } + + } else { + dstFile, err := fs.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm) + if err != nil { + if os.IsNotExist(err) { + return http.StatusConflict, err + } + return http.StatusForbidden, err + + } + _, copyErr := io.Copy(dstFile, srcFile) + propsErr := copyProps(dstFile, srcFile) + closeErr := dstFile.Close() + if copyErr != nil { + return http.StatusInternalServerError, copyErr + } + if propsErr != nil { + return http.StatusInternalServerError, propsErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + } + + if created { + return http.StatusCreated, nil + } + return http.StatusNoContent, nil +} + +// walkFS traverses filesystem fs starting at name up to depth levels. +// +// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node, +// walkFS calls walkFn. If a visited file system node is a directory and +// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node. +func walkFS(fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error { + // This implementation is based on Walk's code in the standard path/filepath package. + err := walkFn(name, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + if !info.IsDir() || depth == 0 { + return nil + } + if depth == 1 { + depth = 0 + } + + // Read directory names. + f, err := fs.OpenFile(name, os.O_RDONLY, 0) + if err != nil { + return walkFn(name, info, err) + } + fileInfos, err := f.Readdir(0) + f.Close() + if err != nil { + return walkFn(name, info, err) + } + + for _, fileInfo := range fileInfos { + filename := path.Join(name, fileInfo.Name()) + fileInfo, err := fs.Stat(filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walkFS(fs, depth, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} diff --git a/vendor/golang.org/x/net/webdav/file_test.go b/vendor/golang.org/x/net/webdav/file_test.go new file mode 100644 index 00000000..7ce6c127 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file_test.go @@ -0,0 +1,1166 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "testing" +) + +func TestSlashClean(t *testing.T) { + testCases := []string{ + "", + ".", + "/", + "/./", + "//", + "//.", + "//a", + "/a", + "/a/b/c", + "/a//b/./../c/d/", + "a", + "a/b/c", + } + for _, tc := range testCases { + got := slashClean(tc) + want := path.Clean("/" + tc) + if got != want { + t.Errorf("tc=%q: got %q, want %q", tc, got, want) + } + } +} + +func TestDirResolve(t *testing.T) { + testCases := []struct { + dir, name, want string + }{ + {"/", "", "/"}, + {"/", "/", "/"}, + {"/", ".", "/"}, + {"/", "./a", "/a"}, + {"/", "..", "/"}, + {"/", "..", "/"}, + {"/", "../", "/"}, + {"/", "../.", "/"}, + {"/", "../a", "/a"}, + {"/", "../..", "/"}, + {"/", "../bar/a", "/bar/a"}, + {"/", "../baz/a", "/baz/a"}, + {"/", "...", "/..."}, + {"/", ".../a", "/.../a"}, + {"/", ".../..", "/"}, + {"/", "a", "/a"}, + {"/", "a/./b", "/a/b"}, + {"/", "a/../../b", "/b"}, + {"/", "a/../b", "/b"}, + {"/", "a/b", "/a/b"}, + {"/", "a/b/c/../../d", "/a/d"}, + {"/", "a/b/c/../../../d", "/d"}, + {"/", "a/b/c/../../../../d", "/d"}, + {"/", "a/b/c/d", "/a/b/c/d"}, + + {"/foo/bar", "", "/foo/bar"}, + {"/foo/bar", "/", "/foo/bar"}, + {"/foo/bar", ".", "/foo/bar"}, + {"/foo/bar", "./a", "/foo/bar/a"}, + {"/foo/bar", "..", "/foo/bar"}, + {"/foo/bar", "../", "/foo/bar"}, + {"/foo/bar", "../.", "/foo/bar"}, + {"/foo/bar", "../a", "/foo/bar/a"}, + {"/foo/bar", "../..", "/foo/bar"}, + {"/foo/bar", "../bar/a", "/foo/bar/bar/a"}, + {"/foo/bar", "../baz/a", "/foo/bar/baz/a"}, + {"/foo/bar", "...", "/foo/bar/..."}, + {"/foo/bar", ".../a", "/foo/bar/.../a"}, + {"/foo/bar", ".../..", "/foo/bar"}, + {"/foo/bar", "a", "/foo/bar/a"}, + {"/foo/bar", "a/./b", "/foo/bar/a/b"}, + {"/foo/bar", "a/../../b", "/foo/bar/b"}, + {"/foo/bar", "a/../b", "/foo/bar/b"}, + {"/foo/bar", "a/b", "/foo/bar/a/b"}, + {"/foo/bar", "a/b/c/../../d", "/foo/bar/a/d"}, + {"/foo/bar", "a/b/c/../../../d", "/foo/bar/d"}, + {"/foo/bar", "a/b/c/../../../../d", "/foo/bar/d"}, + {"/foo/bar", "a/b/c/d", "/foo/bar/a/b/c/d"}, + + {"/foo/bar/", "", "/foo/bar"}, + {"/foo/bar/", "/", "/foo/bar"}, + {"/foo/bar/", ".", "/foo/bar"}, + {"/foo/bar/", "./a", "/foo/bar/a"}, + {"/foo/bar/", "..", "/foo/bar"}, + + {"/foo//bar///", "", "/foo/bar"}, + {"/foo//bar///", "/", "/foo/bar"}, + {"/foo//bar///", ".", "/foo/bar"}, + {"/foo//bar///", "./a", "/foo/bar/a"}, + {"/foo//bar///", "..", "/foo/bar"}, + + {"/x/y/z", "ab/c\x00d/ef", ""}, + + {".", "", "."}, + {".", "/", "."}, + {".", ".", "."}, + {".", "./a", "a"}, + {".", "..", "."}, + {".", "..", "."}, + {".", "../", "."}, + {".", "../.", "."}, + {".", "../a", "a"}, + {".", "../..", "."}, + {".", "../bar/a", "bar/a"}, + {".", "../baz/a", "baz/a"}, + {".", "...", "..."}, + {".", ".../a", ".../a"}, + {".", ".../..", "."}, + {".", "a", "a"}, + {".", "a/./b", "a/b"}, + {".", "a/../../b", "b"}, + {".", "a/../b", "b"}, + {".", "a/b", "a/b"}, + {".", "a/b/c/../../d", "a/d"}, + {".", "a/b/c/../../../d", "d"}, + {".", "a/b/c/../../../../d", "d"}, + {".", "a/b/c/d", "a/b/c/d"}, + + {"", "", "."}, + {"", "/", "."}, + {"", ".", "."}, + {"", "./a", "a"}, + {"", "..", "."}, + } + + for _, tc := range testCases { + d := Dir(filepath.FromSlash(tc.dir)) + if got := filepath.ToSlash(d.resolve(tc.name)); got != tc.want { + t.Errorf("dir=%q, name=%q: got %q, want %q", tc.dir, tc.name, got, tc.want) + } + } +} + +func TestWalk(t *testing.T) { + type walkStep struct { + name, frag string + final bool + } + + testCases := []struct { + dir string + want []walkStep + }{ + {"", []walkStep{ + {"", "", true}, + }}, + {"/", []walkStep{ + {"", "", true}, + }}, + {"/a", []walkStep{ + {"", "a", true}, + }}, + {"/a/", []walkStep{ + {"", "a", true}, + }}, + {"/a/b", []walkStep{ + {"", "a", false}, + {"a", "b", true}, + }}, + {"/a/b/", []walkStep{ + {"", "a", false}, + {"a", "b", true}, + }}, + {"/a/b/c", []walkStep{ + {"", "a", false}, + {"a", "b", false}, + {"b", "c", true}, + }}, + // The following test case is the one mentioned explicitly + // in the method description. + {"/foo/bar/x", []walkStep{ + {"", "foo", false}, + {"foo", "bar", false}, + {"bar", "x", true}, + }}, + } + + for _, tc := range testCases { + fs := NewMemFS().(*memFS) + + parts := strings.Split(tc.dir, "/") + for p := 2; p < len(parts); p++ { + d := strings.Join(parts[:p], "/") + if err := fs.Mkdir(d, 0666); err != nil { + t.Errorf("tc.dir=%q: mkdir: %q: %v", tc.dir, d, err) + } + } + + i, prevFrag := 0, "" + err := fs.walk("test", tc.dir, func(dir *memFSNode, frag string, final bool) error { + got := walkStep{ + name: prevFrag, + frag: frag, + final: final, + } + want := tc.want[i] + + if got != want { + return fmt.Errorf("got %+v, want %+v", got, want) + } + i, prevFrag = i+1, frag + return nil + }) + if err != nil { + t.Errorf("tc.dir=%q: %v", tc.dir, err) + } + } +} + +// find appends to ss the names of the named file and its children. It is +// analogous to the Unix find command. +// +// The returned strings are not guaranteed to be in any particular order. +func find(ss []string, fs FileSystem, name string) ([]string, error) { + stat, err := fs.Stat(name) + if err != nil { + return nil, err + } + ss = append(ss, name) + if stat.IsDir() { + f, err := fs.OpenFile(name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + children, err := f.Readdir(-1) + if err != nil { + return nil, err + } + for _, c := range children { + ss, err = find(ss, fs, path.Join(name, c.Name())) + if err != nil { + return nil, err + } + } + } + return ss, nil +} + +func testFS(t *testing.T, fs FileSystem) { + errStr := func(err error) string { + switch { + case os.IsExist(err): + return "errExist" + case os.IsNotExist(err): + return "errNotExist" + case err != nil: + return "err" + } + return "ok" + } + + // The non-"find" non-"stat" test cases should change the file system state. The + // indentation of the "find"s and "stat"s helps distinguish such test cases. + testCases := []string{ + " stat / want dir", + " stat /a want errNotExist", + " stat /d want errNotExist", + " stat /d/e want errNotExist", + "create /a A want ok", + " stat /a want 1", + "create /d/e EEE want errNotExist", + "mk-dir /a want errExist", + "mk-dir /d/m want errNotExist", + "mk-dir /d want ok", + " stat /d want dir", + "create /d/e EEE want ok", + " stat /d/e want 3", + " find / /a /d /d/e", + "create /d/f FFFF want ok", + "create /d/g GGGGGGG want ok", + "mk-dir /d/m want ok", + "mk-dir /d/m want errExist", + "create /d/m/p PPPPP want ok", + " stat /d/e want 3", + " stat /d/f want 4", + " stat /d/g want 7", + " stat /d/h want errNotExist", + " stat /d/m want dir", + " stat /d/m/p want 5", + " find / /a /d /d/e /d/f /d/g /d/m /d/m/p", + "rm-all /d want ok", + " stat /a want 1", + " stat /d want errNotExist", + " stat /d/e want errNotExist", + " stat /d/f want errNotExist", + " stat /d/g want errNotExist", + " stat /d/m want errNotExist", + " stat /d/m/p want errNotExist", + " find / /a", + "mk-dir /d/m want errNotExist", + "mk-dir /d want ok", + "create /d/f FFFF want ok", + "rm-all /d/f want ok", + "mk-dir /d/m want ok", + "rm-all /z want ok", + "rm-all / want err", + "create /b BB want ok", + " stat / want dir", + " stat /a want 1", + " stat /b want 2", + " stat /c want errNotExist", + " stat /d want dir", + " stat /d/m want dir", + " find / /a /b /d /d/m", + "move__ o=F /b /c want ok", + " stat /b want errNotExist", + " stat /c want 2", + " stat /d/m want dir", + " stat /d/n want errNotExist", + " find / /a /c /d /d/m", + "move__ o=F /d/m /d/n want ok", + "create /d/n/q QQQQ want ok", + " stat /d/m want errNotExist", + " stat /d/n want dir", + " stat /d/n/q want 4", + "move__ o=F /d /d/n/z want err", + "move__ o=T /c /d/n/q want ok", + " stat /c want errNotExist", + " stat /d/n/q want 2", + " find / /a /d /d/n /d/n/q", + "create /d/n/r RRRRR want ok", + "mk-dir /u want ok", + "mk-dir /u/v want ok", + "move__ o=F /d/n /u want errExist", + "create /t TTTTTT want ok", + "move__ o=F /d/n /t want errExist", + "rm-all /t want ok", + "move__ o=F /d/n /t want ok", + " stat /d want dir", + " stat /d/n want errNotExist", + " stat /d/n/r want errNotExist", + " stat /t want dir", + " stat /t/q want 2", + " stat /t/r want 5", + " find / /a /d /t /t/q /t/r /u /u/v", + "move__ o=F /t / want errExist", + "move__ o=T /t /u/v want ok", + " stat /u/v/r want 5", + "move__ o=F / /z want err", + " find / /a /d /u /u/v /u/v/q /u/v/r", + " stat /a want 1", + " stat /b want errNotExist", + " stat /c want errNotExist", + " stat /u/v/r want 5", + "copy__ o=F d=0 /a /b want ok", + "copy__ o=T d=0 /a /c want ok", + " stat /a want 1", + " stat /b want 1", + " stat /c want 1", + " stat /u/v/r want 5", + "copy__ o=F d=0 /u/v/r /b want errExist", + " stat /b want 1", + "copy__ o=T d=0 /u/v/r /b want ok", + " stat /a want 1", + " stat /b want 5", + " stat /u/v/r want 5", + "rm-all /a want ok", + "rm-all /b want ok", + "mk-dir /u/v/w want ok", + "create /u/v/w/s SSSSSSSS want ok", + " stat /d want dir", + " stat /d/x want errNotExist", + " stat /d/y want errNotExist", + " stat /u/v/r want 5", + " stat /u/v/w/s want 8", + " find / /c /d /u /u/v /u/v/q /u/v/r /u/v/w /u/v/w/s", + "copy__ o=T d=0 /u/v /d/x want ok", + "copy__ o=T d=∞ /u/v /d/y want ok", + "rm-all /u want ok", + " stat /d/x want dir", + " stat /d/x/q want errNotExist", + " stat /d/x/r want errNotExist", + " stat /d/x/w want errNotExist", + " stat /d/x/w/s want errNotExist", + " stat /d/y want dir", + " stat /d/y/q want 2", + " stat /d/y/r want 5", + " stat /d/y/w want dir", + " stat /d/y/w/s want 8", + " stat /u want errNotExist", + " find / /c /d /d/x /d/y /d/y/q /d/y/r /d/y/w /d/y/w/s", + "copy__ o=F d=∞ /d/y /d/x want errExist", + } + + for i, tc := range testCases { + tc = strings.TrimSpace(tc) + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "create": + parts := strings.Split(arg, " ") + if len(parts) != 4 || parts[2] != "want" { + t.Fatalf("test case #%d %q: invalid write", i, tc) + } + f, opErr := fs.OpenFile(parts[0], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if got := errStr(opErr); got != parts[3] { + t.Fatalf("test case #%d %q: OpenFile: got %q (%v), want %q", i, tc, got, opErr, parts[3]) + } + if f != nil { + if _, err := f.Write([]byte(parts[1])); err != nil { + t.Fatalf("test case #%d %q: Write: %v", i, tc, err) + } + if err := f.Close(); err != nil { + t.Fatalf("test case #%d %q: Close: %v", i, tc, err) + } + } + + case "find": + got, err := find(nil, fs, "/") + if err != nil { + t.Fatalf("test case #%d %q: find: %v", i, tc, err) + } + sort.Strings(got) + want := strings.Split(arg, " ") + if !reflect.DeepEqual(got, want) { + t.Fatalf("test case #%d %q:\ngot %s\nwant %s", i, tc, got, want) + } + + case "copy__", "mk-dir", "move__", "rm-all", "stat": + nParts := 3 + switch op { + case "copy__": + nParts = 6 + case "move__": + nParts = 5 + } + parts := strings.Split(arg, " ") + if len(parts) != nParts { + t.Fatalf("test case #%d %q: invalid %s", i, tc, op) + } + + got, opErr := "", error(nil) + switch op { + case "copy__": + depth := 0 + if parts[1] == "d=∞" { + depth = infiniteDepth + } + _, opErr = copyFiles(fs, parts[2], parts[3], parts[0] == "o=T", depth, 0) + case "mk-dir": + opErr = fs.Mkdir(parts[0], 0777) + case "move__": + _, opErr = moveFiles(fs, parts[1], parts[2], parts[0] == "o=T") + case "rm-all": + opErr = fs.RemoveAll(parts[0]) + case "stat": + var stat os.FileInfo + fileName := parts[0] + if stat, opErr = fs.Stat(fileName); opErr == nil { + if stat.IsDir() { + got = "dir" + } else { + got = strconv.Itoa(int(stat.Size())) + } + + if fileName == "/" { + // For a Dir FileSystem, the virtual file system root maps to a + // real file system name like "/tmp/webdav-test012345", which does + // not end with "/". We skip such cases. + } else if statName := stat.Name(); path.Base(fileName) != statName { + t.Fatalf("test case #%d %q: file name %q inconsistent with stat name %q", + i, tc, fileName, statName) + } + } + } + if got == "" { + got = errStr(opErr) + } + + if parts[len(parts)-2] != "want" { + t.Fatalf("test case #%d %q: invalid %s", i, tc, op) + } + if want := parts[len(parts)-1]; got != want { + t.Fatalf("test case #%d %q: got %q (%v), want %q", i, tc, got, opErr, want) + } + } + } +} + +func TestDir(t *testing.T) { + switch runtime.GOOS { + case "nacl": + t.Skip("see golang.org/issue/12004") + case "plan9": + t.Skip("see golang.org/issue/11453") + } + + td, err := ioutil.TempDir("", "webdav-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + testFS(t, Dir(td)) +} + +func TestMemFS(t *testing.T) { + testFS(t, NewMemFS()) +} + +func TestMemFSRoot(t *testing.T) { + fs := NewMemFS() + for i := 0; i < 5; i++ { + stat, err := fs.Stat("/") + if err != nil { + t.Fatalf("i=%d: Stat: %v", i, err) + } + if !stat.IsDir() { + t.Fatalf("i=%d: Stat.IsDir is false, want true", i) + } + + f, err := fs.OpenFile("/", os.O_RDONLY, 0) + if err != nil { + t.Fatalf("i=%d: OpenFile: %v", i, err) + } + defer f.Close() + children, err := f.Readdir(-1) + if err != nil { + t.Fatalf("i=%d: Readdir: %v", i, err) + } + if len(children) != i { + t.Fatalf("i=%d: got %d children, want %d", i, len(children), i) + } + + if _, err := f.Write(make([]byte, 1)); err == nil { + t.Fatalf("i=%d: Write: got nil error, want non-nil", i) + } + + if err := fs.Mkdir(fmt.Sprintf("/dir%d", i), 0777); err != nil { + t.Fatalf("i=%d: Mkdir: %v", i, err) + } + } +} + +func TestMemFileReaddir(t *testing.T) { + fs := NewMemFS() + if err := fs.Mkdir("/foo", 0777); err != nil { + t.Fatalf("Mkdir: %v", err) + } + readdir := func(count int) ([]os.FileInfo, error) { + f, err := fs.OpenFile("/foo", os.O_RDONLY, 0) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + return f.Readdir(count) + } + if got, err := readdir(-1); len(got) != 0 || err != nil { + t.Fatalf("readdir(-1): got %d fileInfos with err=%v, want 0, <nil>", len(got), err) + } + if got, err := readdir(+1); len(got) != 0 || err != io.EOF { + t.Fatalf("readdir(+1): got %d fileInfos with err=%v, want 0, EOF", len(got), err) + } +} + +func TestMemFile(t *testing.T) { + testCases := []string{ + "wantData ", + "wantSize 0", + "write abc", + "wantData abc", + "write de", + "wantData abcde", + "wantSize 5", + "write 5*x", + "write 4*y+2*z", + "write 3*st", + "wantData abcdexxxxxyyyyzzststst", + "wantSize 22", + "seek set 4 want 4", + "write EFG", + "wantData abcdEFGxxxyyyyzzststst", + "wantSize 22", + "seek set 2 want 2", + "read cdEF", + "read Gx", + "seek cur 0 want 8", + "seek cur 2 want 10", + "seek cur -1 want 9", + "write J", + "wantData abcdEFGxxJyyyyzzststst", + "wantSize 22", + "seek cur -4 want 6", + "write ghijk", + "wantData abcdEFghijkyyyzzststst", + "wantSize 22", + "read yyyz", + "seek cur 0 want 15", + "write ", + "seek cur 0 want 15", + "read ", + "seek cur 0 want 15", + "seek end -3 want 19", + "write ZZ", + "wantData abcdEFghijkyyyzzstsZZt", + "wantSize 22", + "write 4*A", + "wantData abcdEFghijkyyyzzstsZZAAAA", + "wantSize 25", + "seek end 0 want 25", + "seek end -5 want 20", + "read Z+4*A", + "write 5*B", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB", + "wantSize 30", + "seek end 10 want 40", + "write C", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........C", + "wantSize 41", + "write D", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD", + "wantSize 42", + "seek set 43 want 43", + "write E", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD.E", + "wantSize 44", + "seek set 0 want 0", + "write 5*123456789_", + "wantData 123456789_123456789_123456789_123456789_123456789_", + "wantSize 50", + "seek cur 0 want 50", + "seek cur -99 want err", + } + + const filename = "/foo" + fs := NewMemFS() + f, err := fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + + for i, tc := range testCases { + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + + // Expand an arg like "3*a+2*b" to "aaabb". + parts := strings.Split(arg, "+") + for j, part := range parts { + if k := strings.IndexByte(part, '*'); k >= 0 { + repeatCount, repeatStr := part[:k], part[k+1:] + n, err := strconv.Atoi(repeatCount) + if err != nil { + t.Fatalf("test case #%d %q: invalid repeat count %q", i, tc, repeatCount) + } + parts[j] = strings.Repeat(repeatStr, n) + } + } + arg = strings.Join(parts, "") + + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "read": + buf := make([]byte, len(arg)) + if _, err := io.ReadFull(f, buf); err != nil { + t.Fatalf("test case #%d %q: ReadFull: %v", i, tc, err) + } + if got := string(buf); got != arg { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg) + } + + case "seek": + parts := strings.Split(arg, " ") + if len(parts) != 4 { + t.Fatalf("test case #%d %q: invalid seek", i, tc) + } + + whence := 0 + switch parts[0] { + default: + t.Fatalf("test case #%d %q: invalid seek whence", i, tc) + case "set": + whence = os.SEEK_SET + case "cur": + whence = os.SEEK_CUR + case "end": + whence = os.SEEK_END + } + offset, err := strconv.Atoi(parts[1]) + if err != nil { + t.Fatalf("test case #%d %q: invalid offset %q", i, tc, parts[1]) + } + + if parts[2] != "want" { + t.Fatalf("test case #%d %q: invalid seek", i, tc) + } + if parts[3] == "err" { + _, err := f.Seek(int64(offset), whence) + if err == nil { + t.Fatalf("test case #%d %q: Seek returned nil error, want non-nil", i, tc) + } + } else { + got, err := f.Seek(int64(offset), whence) + if err != nil { + t.Fatalf("test case #%d %q: Seek: %v", i, tc, err) + } + want, err := strconv.Atoi(parts[3]) + if err != nil { + t.Fatalf("test case #%d %q: invalid want %q", i, tc, parts[3]) + } + if got != int64(want) { + t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want) + } + } + + case "write": + n, err := f.Write([]byte(arg)) + if err != nil { + t.Fatalf("test case #%d %q: write: %v", i, tc, err) + } + if n != len(arg) { + t.Fatalf("test case #%d %q: write returned %d bytes, want %d", i, tc, n, len(arg)) + } + + case "wantData": + g, err := fs.OpenFile(filename, os.O_RDONLY, 0666) + if err != nil { + t.Fatalf("test case #%d %q: OpenFile: %v", i, tc, err) + } + gotBytes, err := ioutil.ReadAll(g) + if err != nil { + t.Fatalf("test case #%d %q: ReadAll: %v", i, tc, err) + } + for i, c := range gotBytes { + if c == '\x00' { + gotBytes[i] = '.' + } + } + got := string(gotBytes) + if got != arg { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg) + } + if err := g.Close(); err != nil { + t.Fatalf("test case #%d %q: Close: %v", i, tc, err) + } + + case "wantSize": + n, err := strconv.Atoi(arg) + if err != nil { + t.Fatalf("test case #%d %q: invalid size %q", i, tc, arg) + } + fi, err := fs.Stat(filename) + if err != nil { + t.Fatalf("test case #%d %q: Stat: %v", i, tc, err) + } + if got, want := fi.Size(), int64(n); got != want { + t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want) + } + } + } +} + +// TestMemFileWriteAllocs tests that writing N consecutive 1KiB chunks to a +// memFile doesn't allocate a new buffer for each of those N times. Otherwise, +// calling io.Copy(aMemFile, src) is likely to have quadratic complexity. +func TestMemFileWriteAllocs(t *testing.T) { + fs := NewMemFS() + f, err := fs.OpenFile("/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + + xxx := make([]byte, 1024) + for i := range xxx { + xxx[i] = 'x' + } + + a := testing.AllocsPerRun(100, func() { + f.Write(xxx) + }) + // AllocsPerRun returns an integral value, so we compare the rounded-down + // number to zero. + if a > 0 { + t.Fatalf("%v allocs per run, want 0", a) + } +} + +func BenchmarkMemFileWrite(b *testing.B) { + fs := NewMemFS() + xxx := make([]byte, 1024) + for i := range xxx { + xxx[i] = 'x' + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + f, err := fs.OpenFile("/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + b.Fatalf("OpenFile: %v", err) + } + for j := 0; j < 100; j++ { + f.Write(xxx) + } + if err := f.Close(); err != nil { + b.Fatalf("Close: %v", err) + } + if err := fs.RemoveAll("/xxx"); err != nil { + b.Fatalf("RemoveAll: %v", err) + } + } +} + +func TestCopyMoveProps(t *testing.T) { + fs := NewMemFS() + create := func(name string) error { + f, err := fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + _, wErr := f.Write([]byte("contents")) + cErr := f.Close() + if wErr != nil { + return wErr + } + return cErr + } + patch := func(name string, patches ...Proppatch) error { + f, err := fs.OpenFile(name, os.O_RDWR, 0666) + if err != nil { + return err + } + _, pErr := f.(DeadPropsHolder).Patch(patches) + cErr := f.Close() + if pErr != nil { + return pErr + } + return cErr + } + props := func(name string) (map[xml.Name]Property, error) { + f, err := fs.OpenFile(name, os.O_RDWR, 0666) + if err != nil { + return nil, err + } + m, pErr := f.(DeadPropsHolder).DeadProps() + cErr := f.Close() + if pErr != nil { + return nil, pErr + } + if cErr != nil { + return nil, cErr + } + return m, nil + } + + p0 := Property{ + XMLName: xml.Name{Space: "x:", Local: "boat"}, + InnerXML: []byte("pea-green"), + } + p1 := Property{ + XMLName: xml.Name{Space: "x:", Local: "ring"}, + InnerXML: []byte("1 shilling"), + } + p2 := Property{ + XMLName: xml.Name{Space: "x:", Local: "spoon"}, + InnerXML: []byte("runcible"), + } + p3 := Property{ + XMLName: xml.Name{Space: "x:", Local: "moon"}, + InnerXML: []byte("light"), + } + + if err := create("/src"); err != nil { + t.Fatalf("create /src: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p0, p1}}); err != nil { + t.Fatalf("patch /src +p0 +p1: %v", err) + } + if _, err := copyFiles(fs, "/src", "/tmp", true, infiniteDepth, 0); err != nil { + t.Fatalf("copyFiles /src /tmp: %v", err) + } + if _, err := moveFiles(fs, "/tmp", "/dst", true); err != nil { + t.Fatalf("moveFiles /tmp /dst: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p0}, Remove: true}); err != nil { + t.Fatalf("patch /src -p0: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p2}}); err != nil { + t.Fatalf("patch /src +p2: %v", err) + } + if err := patch("/dst", Proppatch{Props: []Property{p1}, Remove: true}); err != nil { + t.Fatalf("patch /dst -p1: %v", err) + } + if err := patch("/dst", Proppatch{Props: []Property{p3}}); err != nil { + t.Fatalf("patch /dst +p3: %v", err) + } + + gotSrc, err := props("/src") + if err != nil { + t.Fatalf("props /src: %v", err) + } + wantSrc := map[xml.Name]Property{ + p1.XMLName: p1, + p2.XMLName: p2, + } + if !reflect.DeepEqual(gotSrc, wantSrc) { + t.Fatalf("props /src:\ngot %v\nwant %v", gotSrc, wantSrc) + } + + gotDst, err := props("/dst") + if err != nil { + t.Fatalf("props /dst: %v", err) + } + wantDst := map[xml.Name]Property{ + p0.XMLName: p0, + p3.XMLName: p3, + } + if !reflect.DeepEqual(gotDst, wantDst) { + t.Fatalf("props /dst:\ngot %v\nwant %v", gotDst, wantDst) + } +} + +func TestWalkFS(t *testing.T) { + testCases := []struct { + desc string + buildfs []string + startAt string + depth int + walkFn filepath.WalkFunc + want []string + }{{ + "just root", + []string{}, + "/", + infiniteDepth, + nil, + []string{ + "/", + }, + }, { + "infinite walk from root", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/", + infiniteDepth, + nil, + []string{ + "/", + "/a", + "/a/b", + "/a/b/c", + "/a/d", + "/e", + "/f", + }, + }, { + "infinite walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/a", + infiniteDepth, + nil, + []string{ + "/a", + "/a/b", + "/a/b/c", + "/a/d", + }, + }, { + "depth 1 walk from root", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/", + 1, + nil, + []string{ + "/", + "/a", + "/e", + "/f", + }, + }, { + "depth 1 walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + }, + "/a/b", + 1, + nil, + []string{ + "/a/b", + "/a/b/c", + "/a/b/g", + }, + }, { + "depth 0 walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + }, + "/a/b", + 0, + nil, + []string{ + "/a/b", + }, + }, { + "infinite walk from file", + []string{ + "mkdir /a", + "touch /a/b", + "touch /a/c", + }, + "/a/b", + 0, + nil, + []string{ + "/a/b", + }, + }, { + "infinite walk with skipped subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + "touch /a/b/z", + }, + "/", + infiniteDepth, + func(path string, info os.FileInfo, err error) error { + if path == "/a/b/g" { + return filepath.SkipDir + } + return nil + }, + []string{ + "/", + "/a", + "/a/b", + "/a/b/c", + "/a/b/z", + }, + }} + for _, tc := range testCases { + fs, err := buildTestFS(tc.buildfs) + if err != nil { + t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err) + } + var got []string + traceFn := func(path string, info os.FileInfo, err error) error { + if tc.walkFn != nil { + err = tc.walkFn(path, info, err) + if err != nil { + return err + } + } + got = append(got, path) + return nil + } + fi, err := fs.Stat(tc.startAt) + if err != nil { + t.Fatalf("%s: cannot stat: %v", tc.desc, err) + } + err = walkFS(fs, tc.depth, tc.startAt, fi, traceFn) + if err != nil { + t.Errorf("%s:\ngot error %v, want nil", tc.desc, err) + continue + } + sort.Strings(got) + sort.Strings(tc.want) + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s:\ngot %q\nwant %q", tc.desc, got, tc.want) + continue + } + } +} + +func buildTestFS(buildfs []string) (FileSystem, error) { + // TODO: Could this be merged with the build logic in TestFS? + + fs := NewMemFS() + for _, b := range buildfs { + op := strings.Split(b, " ") + switch op[0] { + case "mkdir": + err := fs.Mkdir(op[1], os.ModeDir|0777) + if err != nil { + return nil, err + } + case "touch": + f, err := fs.OpenFile(op[1], os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + f.Close() + case "write": + f, err := fs.OpenFile(op[1], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return nil, err + } + _, err = f.Write([]byte(op[2])) + f.Close() + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unknown file operation %q", op[0]) + } + } + return fs, nil +} diff --git a/vendor/golang.org/x/net/webdav/if.go b/vendor/golang.org/x/net/webdav/if.go new file mode 100644 index 00000000..416e81cd --- /dev/null +++ b/vendor/golang.org/x/net/webdav/if.go @@ -0,0 +1,173 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +// The If header is covered by Section 10.4. +// http://www.webdav.org/specs/rfc4918.html#HEADER_If + +import ( + "strings" +) + +// ifHeader is a disjunction (OR) of ifLists. +type ifHeader struct { + lists []ifList +} + +// ifList is a conjunction (AND) of Conditions, and an optional resource tag. +type ifList struct { + resourceTag string + conditions []Condition +} + +// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string +// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is +// returned by req.Header.Get("If") for a http.Request req. +func parseIfHeader(httpHeader string) (h ifHeader, ok bool) { + s := strings.TrimSpace(httpHeader) + switch tokenType, _, _ := lex(s); tokenType { + case '(': + return parseNoTagLists(s) + case angleTokenType: + return parseTaggedLists(s) + default: + return ifHeader{}, false + } +} + +func parseNoTagLists(s string) (h ifHeader, ok bool) { + for { + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + } +} + +func parseTaggedLists(s string) (h ifHeader, ok bool) { + resourceTag, n := "", 0 + for first := true; ; first = false { + tokenType, tokenStr, remaining := lex(s) + switch tokenType { + case angleTokenType: + if !first && n == 0 { + return ifHeader{}, false + } + resourceTag, n = tokenStr, 0 + s = remaining + case '(': + n++ + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + l.resourceTag = resourceTag + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + default: + return ifHeader{}, false + } + } +} + +func parseList(s string) (l ifList, remaining string, ok bool) { + tokenType, _, s := lex(s) + if tokenType != '(' { + return ifList{}, "", false + } + for { + tokenType, _, remaining = lex(s) + if tokenType == ')' { + if len(l.conditions) == 0 { + return ifList{}, "", false + } + return l, remaining, true + } + c, remaining, ok := parseCondition(s) + if !ok { + return ifList{}, "", false + } + l.conditions = append(l.conditions, c) + s = remaining + } +} + +func parseCondition(s string) (c Condition, remaining string, ok bool) { + tokenType, tokenStr, s := lex(s) + if tokenType == notTokenType { + c.Not = true + tokenType, tokenStr, s = lex(s) + } + switch tokenType { + case strTokenType, angleTokenType: + c.Token = tokenStr + case squareTokenType: + c.ETag = tokenStr + default: + return Condition{}, "", false + } + return c, s, true +} + +// Single-rune tokens like '(' or ')' have a token type equal to their rune. +// All other tokens have a negative token type. +const ( + errTokenType = rune(-1) + eofTokenType = rune(-2) + strTokenType = rune(-3) + notTokenType = rune(-4) + angleTokenType = rune(-5) + squareTokenType = rune(-6) +) + +func lex(s string) (tokenType rune, tokenStr string, remaining string) { + // The net/textproto Reader that parses the HTTP header will collapse + // Linear White Space that spans multiple "\r\n" lines to a single " ", + // so we don't need to look for '\r' or '\n'. + for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') { + s = s[1:] + } + if len(s) == 0 { + return eofTokenType, "", "" + } + i := 0 +loop: + for ; i < len(s); i++ { + switch s[i] { + case '\t', ' ', '(', ')', '<', '>', '[', ']': + break loop + } + } + + if i != 0 { + tokenStr, remaining = s[:i], s[i:] + if tokenStr == "Not" { + return notTokenType, "", remaining + } + return strTokenType, tokenStr, remaining + } + + j := 0 + switch s[0] { + case '<': + j, tokenType = strings.IndexByte(s, '>'), angleTokenType + case '[': + j, tokenType = strings.IndexByte(s, ']'), squareTokenType + default: + return rune(s[0]), "", s[1:] + } + if j < 0 { + return errTokenType, "", "" + } + return tokenType, s[1:j], s[j+1:] +} diff --git a/vendor/golang.org/x/net/webdav/if_test.go b/vendor/golang.org/x/net/webdav/if_test.go new file mode 100644 index 00000000..aad61a40 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/if_test.go @@ -0,0 +1,322 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "reflect" + "strings" + "testing" +) + +func TestParseIfHeader(t *testing.T) { + // The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + input string + want ifHeader + }{{ + "bad: empty", + ``, + ifHeader{}, + }, { + "bad: no parens", + `foobar`, + ifHeader{}, + }, { + "bad: empty list #1", + `()`, + ifHeader{}, + }, { + "bad: empty list #2", + `(a) (b c) () (d)`, + ifHeader{}, + }, { + "bad: no list after resource #1", + `<foo>`, + ifHeader{}, + }, { + "bad: no list after resource #2", + `<foo> <bar> (a)`, + ifHeader{}, + }, { + "bad: no list after resource #3", + `<foo> (a) (b) <bar>`, + ifHeader{}, + }, { + "bad: no-tag-list followed by tagged-list", + `(a) (b) <foo> (c)`, + ifHeader{}, + }, { + "bad: unfinished list", + `(a`, + ifHeader{}, + }, { + "bad: unfinished ETag", + `([b`, + ifHeader{}, + }, { + "bad: unfinished Notted list", + `(Not a`, + ifHeader{}, + }, { + "bad: double Not", + `(Not Not a)`, + ifHeader{}, + }, { + "good: one list with a Token", + `(a)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `a`, + }}, + }}, + }, + }, { + "good: one list with an ETag", + `([a])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + ETag: `a`, + }}, + }}, + }, + }, { + "good: one list with three Nots", + `(Not a Not b Not [d])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `a`, + }, { + Not: true, + Token: `b`, + }, { + Not: true, + ETag: `d`, + }}, + }}, + }, + }, { + "good: two lists", + `(a) (b)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `a`, + }}, + }, { + conditions: []Condition{{ + Token: `b`, + }}, + }}, + }, + }, { + "good: two Notted lists", + `(Not a) (Not b)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `a`, + }}, + }, { + conditions: []Condition{{ + Not: true, + Token: `b`, + }}, + }}, + }, + }, { + "section 7.5.1", + `<http://www.example.com/users/f/fielding/index.html> + (<urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6>)`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://www.example.com/users/f/fielding/index.html`, + conditions: []Condition{{ + Token: `urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6`, + }}, + }}, + }, + }, { + "section 7.5.2 #1", + `(<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 7.5.2 #2", + `<http://example.com/locked/> + (<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://example.com/locked/`, + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 7.5.2 #3", + `<http://example.com/locked/member> + (<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://example.com/locked/member`, + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 9.9.6", + `(<urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4>) + (<urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77>)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4`, + }}, + }, { + conditions: []Condition{{ + Token: `urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77`, + }}, + }}, + }, + }, { + "section 9.10.8", + `(<urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4>)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4`, + }}, + }}, + }, + }, { + "section 10.4.6", + `(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2> + ["I am an ETag"]) + (["I am another ETag"])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + ETag: `"I am an ETag"`, + }}, + }, { + conditions: []Condition{{ + ETag: `"I am another ETag"`, + }}, + }}, + }, + }, { + "section 10.4.7", + `(Not <urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2> + <urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092>)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + Token: `urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092`, + }}, + }}, + }, + }, { + "section 10.4.8", + `(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>) + (Not <DAV:no-lock>)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }}, + }, { + conditions: []Condition{{ + Not: true, + Token: `DAV:no-lock`, + }}, + }}, + }, + }, { + "section 10.4.9", + `</resource1> + (<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2> + [W/"A weak ETag"]) (["strong ETag"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/resource1`, + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + ETag: `W/"A weak ETag"`, + }}, + }, { + resourceTag: `/resource1`, + conditions: []Condition{{ + ETag: `"strong ETag"`, + }}, + }}, + }, + }, { + "section 10.4.10", + `<http://www.example.com/specs/> + (<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>)`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://www.example.com/specs/`, + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }}, + }}, + }, + }, { + "section 10.4.11 #1", + `</specs/rfc2518.doc> (["4217"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/specs/rfc2518.doc`, + conditions: []Condition{{ + ETag: `"4217"`, + }}, + }}, + }, + }, { + "section 10.4.11 #2", + `</specs/rfc2518.doc> (Not ["4217"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/specs/rfc2518.doc`, + conditions: []Condition{{ + Not: true, + ETag: `"4217"`, + }}, + }}, + }, + }} + + for _, tc := range testCases { + got, ok := parseIfHeader(strings.Replace(tc.input, "\n", "", -1)) + if gotEmpty := reflect.DeepEqual(got, ifHeader{}); gotEmpty == ok { + t.Errorf("%s: should be different: empty header == %t, ok == %t", tc.desc, gotEmpty, ok) + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s:\ngot %v\nwant %v", tc.desc, got, tc.want) + continue + } + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/README b/vendor/golang.org/x/net/webdav/internal/xml/README new file mode 100644 index 00000000..89656f48 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/README @@ -0,0 +1,11 @@ +This is a fork of the encoding/xml package at ca1d6c4, the last commit before +https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name +space behavior" made late in the lead-up to the Go 1.5 release. + +The list of encoding/xml changes is at +https://go.googlesource.com/go/+log/master/src/encoding/xml + +This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is +released. + +See http://golang.org/issue/11841 diff --git a/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go b/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go new file mode 100644 index 00000000..a7128431 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go @@ -0,0 +1,56 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import "time" + +var atomValue = &Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Example Feed", + Link: []Link{{Href: "http://example.org/"}}, + Updated: ParseTime("2003-12-13T18:30:02Z"), + Author: Person{Name: "John Doe"}, + Id: "urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6", + + Entry: []Entry{ + { + Title: "Atom-Powered Robots Run Amok", + Link: []Link{{Href: "http://example.org/2003/12/13/atom03"}}, + Id: "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a", + Updated: ParseTime("2003-12-13T18:30:02Z"), + Summary: NewText("Some text."), + }, + }, +} + +var atomXml = `` + + `<feed xmlns="http://www.w3.org/2005/Atom" updated="2003-12-13T18:30:02Z">` + + `<title>Example Feed</title>` + + `<id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id>` + + `<link href="http://example.org/"></link>` + + `<author><name>John Doe</name><uri></uri><email></email></author>` + + `<entry>` + + `<title>Atom-Powered Robots Run Amok</title>` + + `<id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>` + + `<link href="http://example.org/2003/12/13/atom03"></link>` + + `<updated>2003-12-13T18:30:02Z</updated>` + + `<author><name></name><uri></uri><email></email></author>` + + `<summary>Some text.</summary>` + + `</entry>` + + `</feed>` + +func ParseTime(str string) time.Time { + t, err := time.Parse(time.RFC3339, str) + if err != nil { + panic(err) + } + return t +} + +func NewText(text string) Text { + return Text{ + Body: text, + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/example_test.go b/vendor/golang.org/x/net/webdav/internal/xml/example_test.go new file mode 100644 index 00000000..becedd58 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/example_test.go @@ -0,0 +1,151 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml_test + +import ( + "encoding/xml" + "fmt" + "os" +) + +func ExampleMarshalIndent() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + output, err := xml.MarshalIndent(v, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + os.Stdout.Write(output) + // Output: + // <person id="13"> + // <name> + // <first>John</first> + // <last>Doe</last> + // </name> + // <age>42</age> + // <Married>false</Married> + // <City>Hanga Roa</City> + // <State>Easter Island</State> + // <!-- Need more details. --> + // </person> +} + +func ExampleEncoder() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + enc := xml.NewEncoder(os.Stdout) + enc.Indent(" ", " ") + if err := enc.Encode(v); err != nil { + fmt.Printf("error: %v\n", err) + } + + // Output: + // <person id="13"> + // <name> + // <first>John</first> + // <last>Doe</last> + // </name> + // <age>42</age> + // <Married>false</Married> + // <City>Hanga Roa</City> + // <State>Easter Island</State> + // <!-- Need more details. --> + // </person> +} + +// This example demonstrates unmarshaling an XML excerpt into a value with +// some preset fields. Note that the Phone field isn't modified and that +// the XML <Company> element is ignored. Also, the Groups field is assigned +// considering the element path provided in its tag. +func ExampleUnmarshal() { + type Email struct { + Where string `xml:"where,attr"` + Addr string + } + type Address struct { + City, State string + } + type Result struct { + XMLName xml.Name `xml:"Person"` + Name string `xml:"FullName"` + Phone string + Email []Email + Groups []string `xml:"Group>Value"` + Address + } + v := Result{Name: "none", Phone: "none"} + + data := ` + <Person> + <FullName>Grace R. Emlin</FullName> + <Company>Example Inc.</Company> + <Email where="home"> + <Addr>gre@example.com</Addr> + </Email> + <Email where='work'> + <Addr>gre@work.com</Addr> + </Email> + <Group> + <Value>Friends</Value> + <Value>Squash</Value> + </Group> + <City>Hanga Roa</City> + <State>Easter Island</State> + </Person> + ` + err := xml.Unmarshal([]byte(data), &v) + if err != nil { + fmt.Printf("error: %v", err) + return + } + fmt.Printf("XMLName: %#v\n", v.XMLName) + fmt.Printf("Name: %q\n", v.Name) + fmt.Printf("Phone: %q\n", v.Phone) + fmt.Printf("Email: %v\n", v.Email) + fmt.Printf("Groups: %v\n", v.Groups) + fmt.Printf("Address: %v\n", v.Address) + // Output: + // XMLName: xml.Name{Space:"", Local:"Person"} + // Name: "Grace R. Emlin" + // Phone: "none" + // Email: [{home gre@example.com} {work gre@work.com}] + // Groups: [Friends Squash] + // Address: {Hanga Roa Easter Island} +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go new file mode 100644 index 00000000..3c3b6aca --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go @@ -0,0 +1,1223 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bufio" + "bytes" + "encoding" + "fmt" + "io" + "reflect" + "strconv" + "strings" +) + +const ( + // A generic XML header suitable for use with the output of Marshal. + // This is not automatically added to any output of this package, + // it is provided as a convenience. + Header = `<?xml version="1.0" encoding="UTF-8"?>` + "\n" +) + +// Marshal returns the XML encoding of v. +// +// Marshal handles an array or slice by marshalling each of the elements. +// Marshal handles a pointer by marshalling the value it points at or, if the +// pointer is nil, by writing nothing. Marshal handles an interface value by +// marshalling the value it contains or, if the interface value is nil, by +// writing nothing. Marshal handles all other data by writing one or more XML +// elements containing the data. +// +// The name for the XML elements is taken from, in order of preference: +// - the tag on the XMLName field, if the data is a struct +// - the value of the XMLName field of type xml.Name +// - the tag of the struct field used to obtain the data +// - the name of the struct field used to obtain the data +// - the name of the marshalled type +// +// The XML element for a struct contains marshalled elements for each of the +// exported fields of the struct, with these exceptions: +// - the XMLName field, described above, is omitted. +// - a field with tag "-" is omitted. +// - a field with tag "name,attr" becomes an attribute with +// the given name in the XML element. +// - a field with tag ",attr" becomes an attribute with the +// field name in the XML element. +// - a field with tag ",chardata" is written as character data, +// not as an XML element. +// - a field with tag ",innerxml" is written verbatim, not subject +// to the usual marshalling procedure. +// - a field with tag ",comment" is written as an XML comment, not +// subject to the usual marshalling procedure. It must not contain +// the "--" string within it. +// - a field with a tag including the "omitempty" option is omitted +// if the field value is empty. The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or +// string of length zero. +// - an anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// If a field uses a tag "a>b>c", then the element c will be nested inside +// parent elements a and b. Fields that appear next to each other that name +// the same parent will be enclosed in one XML element. +// +// See MarshalIndent for an example. +// +// Marshal will return an error if asked to marshal a channel, function, or map. +func Marshal(v interface{}) ([]byte, error) { + var b bytes.Buffer + if err := NewEncoder(&b).Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Marshaler is the interface implemented by objects that can marshal +// themselves into valid XML elements. +// +// MarshalXML encodes the receiver as zero or more XML elements. +// By convention, arrays or slices are typically encoded as a sequence +// of elements, one per entry. +// Using start as the element tag is not required, but doing so +// will enable Unmarshal to match the XML elements to the correct +// struct field. +// One common implementation strategy is to construct a separate +// value with a layout corresponding to the desired XML and then +// to encode it using e.EncodeElement. +// Another common strategy is to use repeated calls to e.EncodeToken +// to generate the XML output one token at a time. +// The sequence of encoded tokens must make up zero or more valid +// XML elements. +type Marshaler interface { + MarshalXML(e *Encoder, start StartElement) error +} + +// MarshalerAttr is the interface implemented by objects that can marshal +// themselves into valid XML attributes. +// +// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver. +// Using name as the attribute name is not required, but doing so +// will enable Unmarshal to match the attribute to the correct +// struct field. +// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute +// will be generated in the output. +// MarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type MarshalerAttr interface { + MarshalXMLAttr(name Name) (Attr, error) +} + +// MarshalIndent works like Marshal, but each XML element begins on a new +// indented line that starts with prefix and is followed by one or more +// copies of indent according to the nesting depth. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + var b bytes.Buffer + enc := NewEncoder(&b) + enc.Indent(prefix, indent) + if err := enc.Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// An Encoder writes XML data to an output stream. +type Encoder struct { + p printer +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{printer{Writer: bufio.NewWriter(w)}} + e.p.encoder = e + return e +} + +// Indent sets the encoder to generate XML in which each element +// begins on a new indented line that starts with prefix and is followed by +// one or more copies of indent according to the nesting depth. +func (enc *Encoder) Indent(prefix, indent string) { + enc.p.prefix = prefix + enc.p.indent = indent +} + +// Encode writes the XML encoding of v to the stream. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// Encode calls Flush before returning. +func (enc *Encoder) Encode(v interface{}) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil) + if err != nil { + return err + } + return enc.p.Flush() +} + +// EncodeElement writes the XML encoding of v to the stream, +// using start as the outermost tag in the encoding. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// EncodeElement calls Flush before returning. +func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start) + if err != nil { + return err + } + return enc.p.Flush() +} + +var ( + begComment = []byte("<!--") + endComment = []byte("-->") + endProcInst = []byte("?>") + endDirective = []byte(">") +) + +// EncodeToken writes the given XML token to the stream. +// It returns an error if StartElement and EndElement tokens are not +// properly matched. +// +// EncodeToken does not call Flush, because usually it is part of a +// larger operation such as Encode or EncodeElement (or a custom +// Marshaler's MarshalXML invoked during those), and those will call +// Flush when finished. Callers that create an Encoder and then invoke +// EncodeToken directly, without using Encode or EncodeElement, need to +// call Flush when finished to ensure that the XML is written to the +// underlying writer. +// +// EncodeToken allows writing a ProcInst with Target set to "xml" only +// as the first token in the stream. +// +// When encoding a StartElement holding an XML namespace prefix +// declaration for a prefix that is not already declared, contained +// elements (including the StartElement itself) will use the declared +// prefix when encoding names with matching namespace URIs. +func (enc *Encoder) EncodeToken(t Token) error { + + p := &enc.p + switch t := t.(type) { + case StartElement: + if err := p.writeStart(&t); err != nil { + return err + } + case EndElement: + if err := p.writeEnd(t.Name); err != nil { + return err + } + case CharData: + escapeText(p, t, false) + case Comment: + if bytes.Contains(t, endComment) { + return fmt.Errorf("xml: EncodeToken of Comment containing --> marker") + } + p.WriteString("<!--") + p.Write(t) + p.WriteString("-->") + return p.cachedWriteError() + case ProcInst: + // First token to be encoded which is also a ProcInst with target of xml + // is the xml declaration. The only ProcInst where target of xml is allowed. + if t.Target == "xml" && p.Buffered() != 0 { + return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded") + } + if !isNameString(t.Target) { + return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target") + } + if bytes.Contains(t.Inst, endProcInst) { + return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker") + } + p.WriteString("<?") + p.WriteString(t.Target) + if len(t.Inst) > 0 { + p.WriteByte(' ') + p.Write(t.Inst) + } + p.WriteString("?>") + case Directive: + if !isValidDirective(t) { + return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers") + } + p.WriteString("<!") + p.Write(t) + p.WriteString(">") + default: + return fmt.Errorf("xml: EncodeToken of invalid token type") + + } + return p.cachedWriteError() +} + +// isValidDirective reports whether dir is a valid directive text, +// meaning angle brackets are matched, ignoring comments and strings. +func isValidDirective(dir Directive) bool { + var ( + depth int + inquote uint8 + incomment bool + ) + for i, c := range dir { + switch { + case incomment: + if c == '>' { + if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) { + incomment = false + } + } + // Just ignore anything in comment + case inquote != 0: + if c == inquote { + inquote = 0 + } + // Just ignore anything within quotes + case c == '\'' || c == '"': + inquote = c + case c == '<': + if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) { + incomment = true + } else { + depth++ + } + case c == '>': + if depth == 0 { + return false + } + depth-- + } + } + return depth == 0 && inquote == 0 && !incomment +} + +// Flush flushes any buffered XML to the underlying writer. +// See the EncodeToken documentation for details about when it is necessary. +func (enc *Encoder) Flush() error { + return enc.p.Flush() +} + +type printer struct { + *bufio.Writer + encoder *Encoder + seq int + indent string + prefix string + depth int + indentedIn bool + putNewline bool + defaultNS string + attrNS map[string]string // map prefix -> name space + attrPrefix map[string]string // map name space -> prefix + prefixes []printerPrefix + tags []Name +} + +// printerPrefix holds a namespace undo record. +// When an element is popped, the prefix record +// is set back to the recorded URL. The empty +// prefix records the URL for the default name space. +// +// The start of an element is recorded with an element +// that has mark=true. +type printerPrefix struct { + prefix string + url string + mark bool +} + +func (p *printer) prefixForNS(url string, isAttr bool) string { + // The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml" + // and must be referred to that way. + // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns", + // but users should not be trying to use that one directly - that's our job.) + if url == xmlURL { + return "xml" + } + if !isAttr && url == p.defaultNS { + // We can use the default name space. + return "" + } + return p.attrPrefix[url] +} + +// defineNS pushes any namespace definition found in the given attribute. +// If ignoreNonEmptyDefault is true, an xmlns="nonempty" +// attribute will be ignored. +func (p *printer) defineNS(attr Attr, ignoreNonEmptyDefault bool) error { + var prefix string + if attr.Name.Local == "xmlns" { + if attr.Name.Space != "" && attr.Name.Space != "xml" && attr.Name.Space != xmlURL { + return fmt.Errorf("xml: cannot redefine xmlns attribute prefix") + } + } else if attr.Name.Space == "xmlns" && attr.Name.Local != "" { + prefix = attr.Name.Local + if attr.Value == "" { + // Technically, an empty XML namespace is allowed for an attribute. + // From http://www.w3.org/TR/xml-names11/#scoping-defaulting: + // + // The attribute value in a namespace declaration for a prefix may be + // empty. This has the effect, within the scope of the declaration, of removing + // any association of the prefix with a namespace name. + // + // However our namespace prefixes here are used only as hints. There's + // no need to respect the removal of a namespace prefix, so we ignore it. + return nil + } + } else { + // Ignore: it's not a namespace definition + return nil + } + if prefix == "" { + if attr.Value == p.defaultNS { + // No need for redefinition. + return nil + } + if attr.Value != "" && ignoreNonEmptyDefault { + // We have an xmlns="..." value but + // it can't define a name space in this context, + // probably because the element has an empty + // name space. In this case, we just ignore + // the name space declaration. + return nil + } + } else if _, ok := p.attrPrefix[attr.Value]; ok { + // There's already a prefix for the given name space, + // so use that. This prevents us from + // having two prefixes for the same name space + // so attrNS and attrPrefix can remain bijective. + return nil + } + p.pushPrefix(prefix, attr.Value) + return nil +} + +// createNSPrefix creates a name space prefix attribute +// to use for the given name space, defining a new prefix +// if necessary. +// If isAttr is true, the prefix is to be created for an attribute +// prefix, which means that the default name space cannot +// be used. +func (p *printer) createNSPrefix(url string, isAttr bool) { + if _, ok := p.attrPrefix[url]; ok { + // We already have a prefix for the given URL. + return + } + switch { + case !isAttr && url == p.defaultNS: + // We can use the default name space. + return + case url == "": + // The only way we can encode names in the empty + // name space is by using the default name space, + // so we must use that. + if p.defaultNS != "" { + // The default namespace is non-empty, so we + // need to set it to empty. + p.pushPrefix("", "") + } + return + case url == xmlURL: + return + } + // TODO If the URL is an existing prefix, we could + // use it as is. That would enable the + // marshaling of elements that had been unmarshaled + // and with a name space prefix that was not found. + // although technically it would be incorrect. + + // Pick a name. We try to use the final element of the path + // but fall back to _. + prefix := strings.TrimRight(url, "/") + if i := strings.LastIndex(prefix, "/"); i >= 0 { + prefix = prefix[i+1:] + } + if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") { + prefix = "_" + } + if strings.HasPrefix(prefix, "xml") { + // xmlanything is reserved. + prefix = "_" + prefix + } + if p.attrNS[prefix] != "" { + // Name is taken. Find a better one. + for p.seq++; ; p.seq++ { + if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" { + prefix = id + break + } + } + } + + p.pushPrefix(prefix, url) +} + +// writeNamespaces writes xmlns attributes for all the +// namespace prefixes that have been defined in +// the current element. +func (p *printer) writeNamespaces() { + for i := len(p.prefixes) - 1; i >= 0; i-- { + prefix := p.prefixes[i] + if prefix.mark { + return + } + p.WriteString(" ") + if prefix.prefix == "" { + // Default name space. + p.WriteString(`xmlns="`) + } else { + p.WriteString("xmlns:") + p.WriteString(prefix.prefix) + p.WriteString(`="`) + } + EscapeText(p, []byte(p.nsForPrefix(prefix.prefix))) + p.WriteString(`"`) + } +} + +// pushPrefix pushes a new prefix on the prefix stack +// without checking to see if it is already defined. +func (p *printer) pushPrefix(prefix, url string) { + p.prefixes = append(p.prefixes, printerPrefix{ + prefix: prefix, + url: p.nsForPrefix(prefix), + }) + p.setAttrPrefix(prefix, url) +} + +// nsForPrefix returns the name space for the given +// prefix. Note that this is not valid for the +// empty attribute prefix, which always has an empty +// name space. +func (p *printer) nsForPrefix(prefix string) string { + if prefix == "" { + return p.defaultNS + } + return p.attrNS[prefix] +} + +// markPrefix marks the start of an element on the prefix +// stack. +func (p *printer) markPrefix() { + p.prefixes = append(p.prefixes, printerPrefix{ + mark: true, + }) +} + +// popPrefix pops all defined prefixes for the current +// element. +func (p *printer) popPrefix() { + for len(p.prefixes) > 0 { + prefix := p.prefixes[len(p.prefixes)-1] + p.prefixes = p.prefixes[:len(p.prefixes)-1] + if prefix.mark { + break + } + p.setAttrPrefix(prefix.prefix, prefix.url) + } +} + +// setAttrPrefix sets an attribute name space prefix. +// If url is empty, the attribute is removed. +// If prefix is empty, the default name space is set. +func (p *printer) setAttrPrefix(prefix, url string) { + if prefix == "" { + p.defaultNS = url + return + } + if url == "" { + delete(p.attrPrefix, p.attrNS[prefix]) + delete(p.attrNS, prefix) + return + } + if p.attrPrefix == nil { + // Need to define a new name space. + p.attrPrefix = make(map[string]string) + p.attrNS = make(map[string]string) + } + // Remove any old prefix value. This is OK because we maintain a + // strict one-to-one mapping between prefix and URL (see + // defineNS) + delete(p.attrPrefix, p.attrNS[prefix]) + p.attrPrefix[url] = prefix + p.attrNS[prefix] = url +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() +) + +// marshalValue writes one or more XML elements representing val. +// If val was obtained from a struct field, finfo must have its details. +func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error { + if startTemplate != nil && startTemplate.Name.Local == "" { + return fmt.Errorf("xml: EncodeElement of StartElement with missing name") + } + + if !val.IsValid() { + return nil + } + if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) { + return nil + } + + // Drill into interfaces and pointers. + // This can turn into an infinite loop given a cyclic chain, + // but it matches the Go 1 behavior. + for val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + if val.IsNil() { + return nil + } + val = val.Elem() + } + + kind := val.Kind() + typ := val.Type() + + // Check for marshaler. + if val.CanInterface() && typ.Implements(marshalerType) { + return p.marshalInterface(val.Interface().(Marshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerType) { + return p.marshalInterface(pv.Interface().(Marshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Check for text marshaler. + if val.CanInterface() && typ.Implements(textMarshalerType) { + return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Slices and arrays iterate over the elements. They do not have an enclosing tag. + if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 { + for i, n := 0, val.Len(); i < n; i++ { + if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil { + return err + } + } + return nil + } + + tinfo, err := getTypeInfo(typ) + if err != nil { + return err + } + + // Create start element. + // Precedence for the XML element name is: + // 0. startTemplate + // 1. XMLName field in underlying struct; + // 2. field name/tag in the struct field; and + // 3. type name + var start StartElement + + // explicitNS records whether the element's name space has been + // explicitly set (for example an XMLName field). + explicitNS := false + + if startTemplate != nil { + start.Name = startTemplate.Name + explicitNS = true + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if tinfo.xmlname != nil { + xmlname := tinfo.xmlname + if xmlname.name != "" { + start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name + } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" { + start.Name = v + } + explicitNS = true + } + if start.Name.Local == "" && finfo != nil { + start.Name.Local = finfo.name + if finfo.xmlns != "" { + start.Name.Space = finfo.xmlns + explicitNS = true + } + } + if start.Name.Local == "" { + name := typ.Name() + if name == "" { + return &UnsupportedTypeError{typ} + } + start.Name.Local = name + } + + // defaultNS records the default name space as set by a xmlns="..." + // attribute. We don't set p.defaultNS because we want to let + // the attribute writing code (in p.defineNS) be solely responsible + // for maintaining that. + defaultNS := p.defaultNS + + // Attributes + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr == 0 { + continue + } + attr, err := p.fieldAttr(finfo, val) + if err != nil { + return err + } + if attr.Name.Local == "" { + continue + } + start.Attr = append(start.Attr, attr) + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + defaultNS = attr.Value + } + } + if !explicitNS { + // Historic behavior: elements use the default name space + // they are contained in by default. + start.Name.Space = defaultNS + } + // Historic behaviour: an element that's in a namespace sets + // the default namespace for all elements contained within it. + start.setDefaultNamespace() + + if err := p.writeStart(&start); err != nil { + return err + } + + if val.Kind() == reflect.Struct { + err = p.marshalStruct(tinfo, val) + } else { + s, b, err1 := p.marshalSimple(typ, val) + if err1 != nil { + err = err1 + } else if b != nil { + EscapeText(p, b) + } else { + p.EscapeString(s) + } + } + if err != nil { + return err + } + + if err := p.writeEnd(start.Name); err != nil { + return err + } + + return p.cachedWriteError() +} + +// fieldAttr returns the attribute of the given field. +// If the returned attribute has an empty Name.Local, +// it should not be used. +// The given value holds the value containing the field. +func (p *printer) fieldAttr(finfo *fieldInfo, val reflect.Value) (Attr, error) { + fv := finfo.value(val) + name := Name{Space: finfo.xmlns, Local: finfo.name} + if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) { + return Attr{}, nil + } + if fv.Kind() == reflect.Interface && fv.IsNil() { + return Attr{}, nil + } + if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) { + attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { + attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + } + if fv.CanInterface() && fv.Type().Implements(textMarshalerType) { + text, err := fv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + } + // Dereference or skip nil pointer, interface values. + switch fv.Kind() { + case reflect.Ptr, reflect.Interface: + if fv.IsNil() { + return Attr{}, nil + } + fv = fv.Elem() + } + s, b, err := p.marshalSimple(fv.Type(), fv) + if err != nil { + return Attr{}, err + } + if b != nil { + s = string(b) + } + return Attr{name, s}, nil +} + +// defaultStart returns the default start element to use, +// given the reflect type, field info, and start template. +func (p *printer) defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement { + var start StartElement + // Precedence for the XML element name is as above, + // except that we do not look inside structs for the first field. + if startTemplate != nil { + start.Name = startTemplate.Name + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if finfo != nil && finfo.name != "" { + start.Name.Local = finfo.name + start.Name.Space = finfo.xmlns + } else if typ.Name() != "" { + start.Name.Local = typ.Name() + } else { + // Must be a pointer to a named type, + // since it has the Marshaler methods. + start.Name.Local = typ.Elem().Name() + } + // Historic behaviour: elements use the name space of + // the element they are contained in by default. + if start.Name.Space == "" { + start.Name.Space = p.defaultNS + } + start.setDefaultNamespace() + return start +} + +// marshalInterface marshals a Marshaler interface value. +func (p *printer) marshalInterface(val Marshaler, start StartElement) error { + // Push a marker onto the tag stack so that MarshalXML + // cannot close the XML tags that it did not open. + p.tags = append(p.tags, Name{}) + n := len(p.tags) + + err := val.MarshalXML(p.encoder, start) + if err != nil { + return err + } + + // Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark. + if len(p.tags) > n { + return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local) + } + p.tags = p.tags[:n-1] + return nil +} + +// marshalTextInterface marshals a TextMarshaler interface value. +func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error { + if err := p.writeStart(&start); err != nil { + return err + } + text, err := val.MarshalText() + if err != nil { + return err + } + EscapeText(p, text) + return p.writeEnd(start.Name) +} + +// writeStart writes the given start element. +func (p *printer) writeStart(start *StartElement) error { + if start.Name.Local == "" { + return fmt.Errorf("xml: start tag with no name") + } + + p.tags = append(p.tags, start.Name) + p.markPrefix() + // Define any name spaces explicitly declared in the attributes. + // We do this as a separate pass so that explicitly declared prefixes + // will take precedence over implicitly declared prefixes + // regardless of the order of the attributes. + ignoreNonEmptyDefault := start.Name.Space == "" + for _, attr := range start.Attr { + if err := p.defineNS(attr, ignoreNonEmptyDefault); err != nil { + return err + } + } + // Define any new name spaces implied by the attributes. + for _, attr := range start.Attr { + name := attr.Name + // From http://www.w3.org/TR/xml-names11/#defaulting + // "Default namespace declarations do not apply directly + // to attribute names; the interpretation of unprefixed + // attributes is determined by the element on which they + // appear." + // This means we don't need to create a new namespace + // when an attribute name space is empty. + if name.Space != "" && !name.isNamespace() { + p.createNSPrefix(name.Space, true) + } + } + p.createNSPrefix(start.Name.Space, false) + + p.writeIndent(1) + p.WriteByte('<') + p.writeName(start.Name, false) + p.writeNamespaces() + for _, attr := range start.Attr { + name := attr.Name + if name.Local == "" || name.isNamespace() { + // Namespaces have already been written by writeNamespaces above. + continue + } + p.WriteByte(' ') + p.writeName(name, true) + p.WriteString(`="`) + p.EscapeString(attr.Value) + p.WriteByte('"') + } + p.WriteByte('>') + return nil +} + +// writeName writes the given name. It assumes +// that p.createNSPrefix(name) has already been called. +func (p *printer) writeName(name Name, isAttr bool) { + if prefix := p.prefixForNS(name.Space, isAttr); prefix != "" { + p.WriteString(prefix) + p.WriteByte(':') + } + p.WriteString(name.Local) +} + +func (p *printer) writeEnd(name Name) error { + if name.Local == "" { + return fmt.Errorf("xml: end tag with no name") + } + if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" { + return fmt.Errorf("xml: end tag </%s> without start tag", name.Local) + } + if top := p.tags[len(p.tags)-1]; top != name { + if top.Local != name.Local { + return fmt.Errorf("xml: end tag </%s> does not match start tag <%s>", name.Local, top.Local) + } + return fmt.Errorf("xml: end tag </%s> in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space) + } + p.tags = p.tags[:len(p.tags)-1] + + p.writeIndent(-1) + p.WriteByte('<') + p.WriteByte('/') + p.writeName(name, false) + p.WriteByte('>') + p.popPrefix() + return nil +} + +func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) { + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(val.Int(), 10), nil, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(val.Uint(), 10), nil, nil + case reflect.Float32, reflect.Float64: + return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil + case reflect.String: + return val.String(), nil, nil + case reflect.Bool: + return strconv.FormatBool(val.Bool()), nil, nil + case reflect.Array: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // [...]byte + var bytes []byte + if val.CanAddr() { + bytes = val.Slice(0, val.Len()).Bytes() + } else { + bytes = make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(bytes), val) + } + return "", bytes, nil + case reflect.Slice: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // []byte + return "", val.Bytes(), nil + } + return "", nil, &UnsupportedTypeError{typ} +} + +var ddBytes = []byte("--") + +func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { + s := parentStack{p: p} + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr != 0 { + continue + } + vf := finfo.value(val) + + // Dereference or skip nil pointer, interface values. + switch vf.Kind() { + case reflect.Ptr, reflect.Interface: + if !vf.IsNil() { + vf = vf.Elem() + } + } + + switch finfo.flags & fMode { + case fCharData: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { + data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + if vf.CanAddr() { + pv := vf.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + data, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + } + var scratch [64]byte + switch vf.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)) + case reflect.Float32, reflect.Float64: + Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())) + case reflect.Bool: + Escape(p, strconv.AppendBool(scratch[:0], vf.Bool())) + case reflect.String: + if err := EscapeText(p, []byte(vf.String())); err != nil { + return err + } + case reflect.Slice: + if elem, ok := vf.Interface().([]byte); ok { + if err := EscapeText(p, elem); err != nil { + return err + } + } + } + continue + + case fComment: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + k := vf.Kind() + if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) { + return fmt.Errorf("xml: bad type for comment field of %s", val.Type()) + } + if vf.Len() == 0 { + continue + } + p.writeIndent(0) + p.WriteString("<!--") + dashDash := false + dashLast := false + switch k { + case reflect.String: + s := vf.String() + dashDash = strings.Index(s, "--") >= 0 + dashLast = s[len(s)-1] == '-' + if !dashDash { + p.WriteString(s) + } + case reflect.Slice: + b := vf.Bytes() + dashDash = bytes.Index(b, ddBytes) >= 0 + dashLast = b[len(b)-1] == '-' + if !dashDash { + p.Write(b) + } + default: + panic("can't happen") + } + if dashDash { + return fmt.Errorf(`xml: comments must not contain "--"`) + } + if dashLast { + // "--->" is invalid grammar. Make it "- -->" + p.WriteByte(' ') + } + p.WriteString("-->") + continue + + case fInnerXml: + iface := vf.Interface() + switch raw := iface.(type) { + case []byte: + p.Write(raw) + continue + case string: + p.WriteString(raw) + continue + } + + case fElement, fElement | fAny: + if err := s.setParents(finfo, vf); err != nil { + return err + } + } + if err := p.marshalValue(vf, finfo, nil); err != nil { + return err + } + } + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + return p.cachedWriteError() +} + +var noField fieldInfo + +// return the bufio Writer's cached write error +func (p *printer) cachedWriteError() error { + _, err := p.Write(nil) + return err +} + +func (p *printer) writeIndent(depthDelta int) { + if len(p.prefix) == 0 && len(p.indent) == 0 { + return + } + if depthDelta < 0 { + p.depth-- + if p.indentedIn { + p.indentedIn = false + return + } + p.indentedIn = false + } + if p.putNewline { + p.WriteByte('\n') + } else { + p.putNewline = true + } + if len(p.prefix) > 0 { + p.WriteString(p.prefix) + } + if len(p.indent) > 0 { + for i := 0; i < p.depth; i++ { + p.WriteString(p.indent) + } + } + if depthDelta > 0 { + p.depth++ + p.indentedIn = true + } +} + +type parentStack struct { + p *printer + xmlns string + parents []string +} + +// setParents sets the stack of current parents to those found in finfo. +// It only writes the start elements if vf holds a non-nil value. +// If finfo is &noField, it pops all elements. +func (s *parentStack) setParents(finfo *fieldInfo, vf reflect.Value) error { + xmlns := s.p.defaultNS + if finfo.xmlns != "" { + xmlns = finfo.xmlns + } + commonParents := 0 + if xmlns == s.xmlns { + for ; commonParents < len(finfo.parents) && commonParents < len(s.parents); commonParents++ { + if finfo.parents[commonParents] != s.parents[commonParents] { + break + } + } + } + // Pop off any parents that aren't in common with the previous field. + for i := len(s.parents) - 1; i >= commonParents; i-- { + if err := s.p.writeEnd(Name{ + Space: s.xmlns, + Local: s.parents[i], + }); err != nil { + return err + } + } + s.parents = finfo.parents + s.xmlns = xmlns + if commonParents >= len(s.parents) { + // No new elements to push. + return nil + } + if (vf.Kind() == reflect.Ptr || vf.Kind() == reflect.Interface) && vf.IsNil() { + // The element is nil, so no need for the start elements. + s.parents = s.parents[:commonParents] + return nil + } + // Push any new parents required. + for _, name := range s.parents[commonParents:] { + start := &StartElement{ + Name: Name{ + Space: s.xmlns, + Local: name, + }, + } + // Set the default name space for parent elements + // to match what we do with other elements. + if s.xmlns != s.p.defaultNS { + start.setDefaultNamespace() + } + if err := s.p.writeStart(start); err != nil { + return err + } + } + return nil +} + +// A MarshalXMLError is returned when Marshal encounters a type +// that cannot be converted into XML. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "xml: unsupported type: " + e.Type.String() +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go new file mode 100644 index 00000000..5dc78e74 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go @@ -0,0 +1,1939 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "sync" + "testing" + "time" +) + +type DriveType int + +const ( + HyperDrive DriveType = iota + ImprobabilityDrive +) + +type Passenger struct { + Name []string `xml:"name"` + Weight float32 `xml:"weight"` +} + +type Ship struct { + XMLName struct{} `xml:"spaceship"` + + Name string `xml:"name,attr"` + Pilot string `xml:"pilot,attr"` + Drive DriveType `xml:"drive"` + Age uint `xml:"age"` + Passenger []*Passenger `xml:"passenger"` + secret string +} + +type NamedType string + +type Port struct { + XMLName struct{} `xml:"port"` + Type string `xml:"type,attr,omitempty"` + Comment string `xml:",comment"` + Number string `xml:",chardata"` +} + +type Domain struct { + XMLName struct{} `xml:"domain"` + Country string `xml:",attr,omitempty"` + Name []byte `xml:",chardata"` + Comment []byte `xml:",comment"` +} + +type Book struct { + XMLName struct{} `xml:"book"` + Title string `xml:",chardata"` +} + +type Event struct { + XMLName struct{} `xml:"event"` + Year int `xml:",chardata"` +} + +type Movie struct { + XMLName struct{} `xml:"movie"` + Length uint `xml:",chardata"` +} + +type Pi struct { + XMLName struct{} `xml:"pi"` + Approximation float32 `xml:",chardata"` +} + +type Universe struct { + XMLName struct{} `xml:"universe"` + Visible float64 `xml:",chardata"` +} + +type Particle struct { + XMLName struct{} `xml:"particle"` + HasMass bool `xml:",chardata"` +} + +type Departure struct { + XMLName struct{} `xml:"departure"` + When time.Time `xml:",chardata"` +} + +type SecretAgent struct { + XMLName struct{} `xml:"agent"` + Handle string `xml:"handle,attr"` + Identity string + Obfuscate string `xml:",innerxml"` +} + +type NestedItems struct { + XMLName struct{} `xml:"result"` + Items []string `xml:">item"` + Item1 []string `xml:"Items>item1"` +} + +type NestedOrder struct { + XMLName struct{} `xml:"result"` + Field1 string `xml:"parent>c"` + Field2 string `xml:"parent>b"` + Field3 string `xml:"parent>a"` +} + +type MixedNested struct { + XMLName struct{} `xml:"result"` + A string `xml:"parent1>a"` + B string `xml:"b"` + C string `xml:"parent1>parent2>c"` + D string `xml:"parent1>d"` +} + +type NilTest struct { + A interface{} `xml:"parent1>parent2>a"` + B interface{} `xml:"parent1>b"` + C interface{} `xml:"parent1>parent2>c"` +} + +type Service struct { + XMLName struct{} `xml:"service"` + Domain *Domain `xml:"host>domain"` + Port *Port `xml:"host>port"` + Extra1 interface{} + Extra2 interface{} `xml:"host>extra2"` +} + +var nilStruct *Ship + +type EmbedA struct { + EmbedC + EmbedB EmbedB + FieldA string +} + +type EmbedB struct { + FieldB string + *EmbedC +} + +type EmbedC struct { + FieldA1 string `xml:"FieldA>A1"` + FieldA2 string `xml:"FieldA>A2"` + FieldB string + FieldC string +} + +type NameCasing struct { + XMLName struct{} `xml:"casing"` + Xy string + XY string + XyA string `xml:"Xy,attr"` + XYA string `xml:"XY,attr"` +} + +type NamePrecedence struct { + XMLName Name `xml:"Parent"` + FromTag XMLNameWithoutTag `xml:"InTag"` + FromNameVal XMLNameWithoutTag + FromNameTag XMLNameWithTag + InFieldName string +} + +type XMLNameWithTag struct { + XMLName Name `xml:"InXMLNameTag"` + Value string `xml:",chardata"` +} + +type XMLNameWithNSTag struct { + XMLName Name `xml:"ns InXMLNameWithNSTag"` + Value string `xml:",chardata"` +} + +type XMLNameWithoutTag struct { + XMLName Name + Value string `xml:",chardata"` +} + +type NameInField struct { + Foo Name `xml:"ns foo"` +} + +type AttrTest struct { + Int int `xml:",attr"` + Named int `xml:"int,attr"` + Float float64 `xml:",attr"` + Uint8 uint8 `xml:",attr"` + Bool bool `xml:",attr"` + Str string `xml:",attr"` + Bytes []byte `xml:",attr"` +} + +type OmitAttrTest struct { + Int int `xml:",attr,omitempty"` + Named int `xml:"int,attr,omitempty"` + Float float64 `xml:",attr,omitempty"` + Uint8 uint8 `xml:",attr,omitempty"` + Bool bool `xml:",attr,omitempty"` + Str string `xml:",attr,omitempty"` + Bytes []byte `xml:",attr,omitempty"` +} + +type OmitFieldTest struct { + Int int `xml:",omitempty"` + Named int `xml:"int,omitempty"` + Float float64 `xml:",omitempty"` + Uint8 uint8 `xml:",omitempty"` + Bool bool `xml:",omitempty"` + Str string `xml:",omitempty"` + Bytes []byte `xml:",omitempty"` + Ptr *PresenceTest `xml:",omitempty"` +} + +type AnyTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField AnyHolder `xml:",any"` +} + +type AnyOmitTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField *AnyHolder `xml:",any,omitempty"` +} + +type AnySliceTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField []AnyHolder `xml:",any"` +} + +type AnyHolder struct { + XMLName Name + XML string `xml:",innerxml"` +} + +type RecurseA struct { + A string + B *RecurseB +} + +type RecurseB struct { + A *RecurseA + B string +} + +type PresenceTest struct { + Exists *struct{} +} + +type IgnoreTest struct { + PublicSecret string `xml:"-"` +} + +type MyBytes []byte + +type Data struct { + Bytes []byte + Attr []byte `xml:",attr"` + Custom MyBytes +} + +type Plain struct { + V interface{} +} + +type MyInt int + +type EmbedInt struct { + MyInt +} + +type Strings struct { + X []string `xml:"A>B,omitempty"` +} + +type PointerFieldsTest struct { + XMLName Name `xml:"dummy"` + Name *string `xml:"name,attr"` + Age *uint `xml:"age,attr"` + Empty *string `xml:"empty,attr"` + Contents *string `xml:",chardata"` +} + +type ChardataEmptyTest struct { + XMLName Name `xml:"test"` + Contents *string `xml:",chardata"` +} + +type MyMarshalerTest struct { +} + +var _ Marshaler = (*MyMarshalerTest)(nil) + +func (m *MyMarshalerTest) MarshalXML(e *Encoder, start StartElement) error { + e.EncodeToken(start) + e.EncodeToken(CharData([]byte("hello world"))) + e.EncodeToken(EndElement{start.Name}) + return nil +} + +type MyMarshalerAttrTest struct{} + +var _ MarshalerAttr = (*MyMarshalerAttrTest)(nil) + +func (m *MyMarshalerAttrTest) MarshalXMLAttr(name Name) (Attr, error) { + return Attr{name, "hello world"}, nil +} + +type MyMarshalerValueAttrTest struct{} + +var _ MarshalerAttr = MyMarshalerValueAttrTest{} + +func (m MyMarshalerValueAttrTest) MarshalXMLAttr(name Name) (Attr, error) { + return Attr{name, "hello world"}, nil +} + +type MarshalerStruct struct { + Foo MyMarshalerAttrTest `xml:",attr"` +} + +type MarshalerValueStruct struct { + Foo MyMarshalerValueAttrTest `xml:",attr"` +} + +type InnerStruct struct { + XMLName Name `xml:"testns outer"` +} + +type OuterStruct struct { + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterNamedStruct struct { + InnerStruct + XMLName Name `xml:"outerns test"` + IntAttr int `xml:"int,attr"` +} + +type OuterNamedOrderedStruct struct { + XMLName Name `xml:"outerns test"` + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterOuterStruct struct { + OuterStruct +} + +type NestedAndChardata struct { + AB []string `xml:"A>B"` + Chardata string `xml:",chardata"` +} + +type NestedAndComment struct { + AB []string `xml:"A>B"` + Comment string `xml:",comment"` +} + +type XMLNSFieldStruct struct { + Ns string `xml:"xmlns,attr"` + Body string +} + +type NamedXMLNSFieldStruct struct { + XMLName struct{} `xml:"testns test"` + Ns string `xml:"xmlns,attr"` + Body string +} + +type XMLNSFieldStructWithOmitEmpty struct { + Ns string `xml:"xmlns,attr,omitempty"` + Body string +} + +type NamedXMLNSFieldStructWithEmptyNamespace struct { + XMLName struct{} `xml:"test"` + Ns string `xml:"xmlns,attr"` + Body string +} + +type RecursiveXMLNSFieldStruct struct { + Ns string `xml:"xmlns,attr"` + Body *RecursiveXMLNSFieldStruct `xml:",omitempty"` + Text string `xml:",omitempty"` +} + +func ifaceptr(x interface{}) interface{} { + return &x +} + +var ( + nameAttr = "Sarah" + ageAttr = uint(12) + contentsAttr = "lorem ipsum" +) + +// Unless explicitly stated as such (or *Plain), all of the +// tests below are two-way tests. When introducing new tests, +// please try to make them two-way as well to ensure that +// marshalling and unmarshalling are as symmetrical as feasible. +var marshalTests = []struct { + Value interface{} + ExpectXML string + MarshalOnly bool + UnmarshalOnly bool +}{ + // Test nil marshals to nothing + {Value: nil, ExpectXML: ``, MarshalOnly: true}, + {Value: nilStruct, ExpectXML: ``, MarshalOnly: true}, + + // Test value types + {Value: &Plain{true}, ExpectXML: `<Plain><V>true</V></Plain>`}, + {Value: &Plain{false}, ExpectXML: `<Plain><V>false</V></Plain>`}, + {Value: &Plain{int(42)}, ExpectXML: `<Plain><V>42</V></Plain>`}, + {Value: &Plain{int8(42)}, ExpectXML: `<Plain><V>42</V></Plain>`}, + {Value: &Plain{int16(42)}, ExpectXML: `<Plain><V>42</V></Plain>`}, + {Value: &Plain{int32(42)}, ExpectXML: `<Plain><V>42</V></Plain>`}, + {Value: &Plain{uint(42)}, ExpectXML: `<Plain><V>42</V></Plain>`}, + {Value: &Plain{uint8(42)}, ExpectXML: `<Plain><V>42</V></Plain>`}, + {Value: &Plain{uint16(42)}, ExpectXML: `<Plain><V>42</V></Plain>`}, + {Value: &Plain{uint32(42)}, ExpectXML: `<Plain><V>42</V></Plain>`}, + {Value: &Plain{float32(1.25)}, ExpectXML: `<Plain><V>1.25</V></Plain>`}, + {Value: &Plain{float64(1.25)}, ExpectXML: `<Plain><V>1.25</V></Plain>`}, + {Value: &Plain{uintptr(0xFFDD)}, ExpectXML: `<Plain><V>65501</V></Plain>`}, + {Value: &Plain{"gopher"}, ExpectXML: `<Plain><V>gopher</V></Plain>`}, + {Value: &Plain{[]byte("gopher")}, ExpectXML: `<Plain><V>gopher</V></Plain>`}, + {Value: &Plain{"</>"}, ExpectXML: `<Plain><V>&lt;/&gt;</V></Plain>`}, + {Value: &Plain{[]byte("</>")}, ExpectXML: `<Plain><V>&lt;/&gt;</V></Plain>`}, + {Value: &Plain{[3]byte{'<', '/', '>'}}, ExpectXML: `<Plain><V>&lt;/&gt;</V></Plain>`}, + {Value: &Plain{NamedType("potato")}, ExpectXML: `<Plain><V>potato</V></Plain>`}, + {Value: &Plain{[]int{1, 2, 3}}, ExpectXML: `<Plain><V>1</V><V>2</V><V>3</V></Plain>`}, + {Value: &Plain{[3]int{1, 2, 3}}, ExpectXML: `<Plain><V>1</V><V>2</V><V>3</V></Plain>`}, + {Value: ifaceptr(true), MarshalOnly: true, ExpectXML: `<bool>true</bool>`}, + + // Test time. + { + Value: &Plain{time.Unix(1e9, 123456789).UTC()}, + ExpectXML: `<Plain><V>2001-09-09T01:46:40.123456789Z</V></Plain>`, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: `<PresenceTest><Exists></Exists></PresenceTest>`, + }, + { + Value: &PresenceTest{}, + ExpectXML: `<PresenceTest></PresenceTest>`, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: `<PresenceTest><Exists></Exists></PresenceTest>`, + }, + { + Value: &PresenceTest{}, + ExpectXML: `<PresenceTest></PresenceTest>`, + }, + + // A []byte field is only nil if the element was not found. + { + Value: &Data{}, + ExpectXML: `<Data></Data>`, + UnmarshalOnly: true, + }, + { + Value: &Data{Bytes: []byte{}, Custom: MyBytes{}, Attr: []byte{}}, + ExpectXML: `<Data Attr=""><Bytes></Bytes><Custom></Custom></Data>`, + UnmarshalOnly: true, + }, + + // Check that []byte works, including named []byte types. + { + Value: &Data{Bytes: []byte("ab"), Custom: MyBytes("cd"), Attr: []byte{'v'}}, + ExpectXML: `<Data Attr="v"><Bytes>ab</Bytes><Custom>cd</Custom></Data>`, + }, + + // Test innerxml + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "<redacted/>", + }, + ExpectXML: `<agent handle="007"><Identity>James Bond</Identity><redacted/></agent>`, + MarshalOnly: true, + }, + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "<Identity>James Bond</Identity><redacted/>", + }, + ExpectXML: `<agent handle="007"><Identity>James Bond</Identity><redacted/></agent>`, + UnmarshalOnly: true, + }, + + // Test structs + {Value: &Port{Type: "ssl", Number: "443"}, ExpectXML: `<port type="ssl">443</port>`}, + {Value: &Port{Number: "443"}, ExpectXML: `<port>443</port>`}, + {Value: &Port{Type: "<unix>"}, ExpectXML: `<port type="&lt;unix&gt;"></port>`}, + {Value: &Port{Number: "443", Comment: "https"}, ExpectXML: `<port><!--https-->443</port>`}, + {Value: &Port{Number: "443", Comment: "add space-"}, ExpectXML: `<port><!--add space- -->443</port>`, MarshalOnly: true}, + {Value: &Domain{Name: []byte("google.com&friends")}, ExpectXML: `<domain>google.com&amp;friends</domain>`}, + {Value: &Domain{Name: []byte("google.com"), Comment: []byte(" &friends ")}, ExpectXML: `<domain>google.com<!-- &friends --></domain>`}, + {Value: &Book{Title: "Pride & Prejudice"}, ExpectXML: `<book>Pride &amp; Prejudice</book>`}, + {Value: &Event{Year: -3114}, ExpectXML: `<event>-3114</event>`}, + {Value: &Movie{Length: 13440}, ExpectXML: `<movie>13440</movie>`}, + {Value: &Pi{Approximation: 3.14159265}, ExpectXML: `<pi>3.1415927</pi>`}, + {Value: &Universe{Visible: 9.3e13}, ExpectXML: `<universe>9.3e+13</universe>`}, + {Value: &Particle{HasMass: true}, ExpectXML: `<particle>true</particle>`}, + {Value: &Departure{When: ParseTime("2013-01-09T00:15:00-09:00")}, ExpectXML: `<departure>2013-01-09T00:15:00-09:00</departure>`}, + {Value: atomValue, ExpectXML: atomXml}, + { + Value: &Ship{ + Name: "Heart of Gold", + Pilot: "Computer", + Age: 1, + Drive: ImprobabilityDrive, + Passenger: []*Passenger{ + { + Name: []string{"Zaphod", "Beeblebrox"}, + Weight: 7.25, + }, + { + Name: []string{"Trisha", "McMillen"}, + Weight: 5.5, + }, + { + Name: []string{"Ford", "Prefect"}, + Weight: 7, + }, + { + Name: []string{"Arthur", "Dent"}, + Weight: 6.75, + }, + }, + }, + ExpectXML: `<spaceship name="Heart of Gold" pilot="Computer">` + + `<drive>` + strconv.Itoa(int(ImprobabilityDrive)) + `</drive>` + + `<age>1</age>` + + `<passenger>` + + `<name>Zaphod</name>` + + `<name>Beeblebrox</name>` + + `<weight>7.25</weight>` + + `</passenger>` + + `<passenger>` + + `<name>Trisha</name>` + + `<name>McMillen</name>` + + `<weight>5.5</weight>` + + `</passenger>` + + `<passenger>` + + `<name>Ford</name>` + + `<name>Prefect</name>` + + `<weight>7</weight>` + + `</passenger>` + + `<passenger>` + + `<name>Arthur</name>` + + `<name>Dent</name>` + + `<weight>6.75</weight>` + + `</passenger>` + + `</spaceship>`, + }, + + // Test a>b + { + Value: &NestedItems{Items: nil, Item1: nil}, + ExpectXML: `<result>` + + `<Items>` + + `</Items>` + + `</result>`, + }, + { + Value: &NestedItems{Items: []string{}, Item1: []string{}}, + ExpectXML: `<result>` + + `<Items>` + + `</Items>` + + `</result>`, + MarshalOnly: true, + }, + { + Value: &NestedItems{Items: nil, Item1: []string{"A"}}, + ExpectXML: `<result>` + + `<Items>` + + `<item1>A</item1>` + + `</Items>` + + `</result>`, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: nil}, + ExpectXML: `<result>` + + `<Items>` + + `<item>A</item>` + + `<item>B</item>` + + `</Items>` + + `</result>`, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: []string{"C"}}, + ExpectXML: `<result>` + + `<Items>` + + `<item>A</item>` + + `<item>B</item>` + + `<item1>C</item1>` + + `</Items>` + + `</result>`, + }, + { + Value: &NestedOrder{Field1: "C", Field2: "B", Field3: "A"}, + ExpectXML: `<result>` + + `<parent>` + + `<c>C</c>` + + `<b>B</b>` + + `<a>A</a>` + + `</parent>` + + `</result>`, + }, + { + Value: &NilTest{A: "A", B: nil, C: "C"}, + ExpectXML: `<NilTest>` + + `<parent1>` + + `<parent2><a>A</a></parent2>` + + `<parent2><c>C</c></parent2>` + + `</parent1>` + + `</NilTest>`, + MarshalOnly: true, // Uses interface{} + }, + { + Value: &MixedNested{A: "A", B: "B", C: "C", D: "D"}, + ExpectXML: `<result>` + + `<parent1><a>A</a></parent1>` + + `<b>B</b>` + + `<parent1>` + + `<parent2><c>C</c></parent2>` + + `<d>D</d>` + + `</parent1>` + + `</result>`, + }, + { + Value: &Service{Port: &Port{Number: "80"}}, + ExpectXML: `<service><host><port>80</port></host></service>`, + }, + { + Value: &Service{}, + ExpectXML: `<service></service>`, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra1: "A", Extra2: "B"}, + ExpectXML: `<service>` + + `<host><port>80</port></host>` + + `<Extra1>A</Extra1>` + + `<host><extra2>B</extra2></host>` + + `</service>`, + MarshalOnly: true, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra2: "example"}, + ExpectXML: `<service>` + + `<host><port>80</port></host>` + + `<host><extra2>example</extra2></host>` + + `</service>`, + MarshalOnly: true, + }, + { + Value: &struct { + XMLName struct{} `xml:"space top"` + A string `xml:"x>a"` + B string `xml:"x>b"` + C string `xml:"space x>c"` + C1 string `xml:"space1 x>c"` + D1 string `xml:"space1 x>d"` + E1 string `xml:"x>e"` + }{ + A: "a", + B: "b", + C: "c", + C1: "c1", + D1: "d1", + E1: "e1", + }, + ExpectXML: `<top xmlns="space">` + + `<x><a>a</a><b>b</b><c>c</c></x>` + + `<x xmlns="space1">` + + `<c>c1</c>` + + `<d>d1</d>` + + `</x>` + + `<x>` + + `<e>e1</e>` + + `</x>` + + `</top>`, + }, + { + Value: &struct { + XMLName Name + A string `xml:"x>a"` + B string `xml:"x>b"` + C string `xml:"space x>c"` + C1 string `xml:"space1 x>c"` + D1 string `xml:"space1 x>d"` + }{ + XMLName: Name{ + Space: "space0", + Local: "top", + }, + A: "a", + B: "b", + C: "c", + C1: "c1", + D1: "d1", + }, + ExpectXML: `<top xmlns="space0">` + + `<x><a>a</a><b>b</b></x>` + + `<x xmlns="space"><c>c</c></x>` + + `<x xmlns="space1">` + + `<c>c1</c>` + + `<d>d1</d>` + + `</x>` + + `</top>`, + }, + { + Value: &struct { + XMLName struct{} `xml:"top"` + B string `xml:"space x>b"` + B1 string `xml:"space1 x>b"` + }{ + B: "b", + B1: "b1", + }, + ExpectXML: `<top>` + + `<x xmlns="space"><b>b</b></x>` + + `<x xmlns="space1"><b>b1</b></x>` + + `</top>`, + }, + + // Test struct embedding + { + Value: &EmbedA{ + EmbedC: EmbedC{ + FieldA1: "", // Shadowed by A.A + FieldA2: "", // Shadowed by A.A + FieldB: "A.C.B", + FieldC: "A.C.C", + }, + EmbedB: EmbedB{ + FieldB: "A.B.B", + EmbedC: &EmbedC{ + FieldA1: "A.B.C.A1", + FieldA2: "A.B.C.A2", + FieldB: "", // Shadowed by A.B.B + FieldC: "A.B.C.C", + }, + }, + FieldA: "A.A", + }, + ExpectXML: `<EmbedA>` + + `<FieldB>A.C.B</FieldB>` + + `<FieldC>A.C.C</FieldC>` + + `<EmbedB>` + + `<FieldB>A.B.B</FieldB>` + + `<FieldA>` + + `<A1>A.B.C.A1</A1>` + + `<A2>A.B.C.A2</A2>` + + `</FieldA>` + + `<FieldC>A.B.C.C</FieldC>` + + `</EmbedB>` + + `<FieldA>A.A</FieldA>` + + `</EmbedA>`, + }, + + // Test that name casing matters + { + Value: &NameCasing{Xy: "mixed", XY: "upper", XyA: "mixedA", XYA: "upperA"}, + ExpectXML: `<casing Xy="mixedA" XY="upperA"><Xy>mixed</Xy><XY>upper</XY></casing>`, + }, + + // Test the order in which the XML element name is chosen + { + Value: &NamePrecedence{ + FromTag: XMLNameWithoutTag{Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "InXMLName"}, Value: "B"}, + FromNameTag: XMLNameWithTag{Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `<Parent>` + + `<InTag>A</InTag>` + + `<InXMLName>B</InXMLName>` + + `<InXMLNameTag>C</InXMLNameTag>` + + `<InFieldName>D</InFieldName>` + + `</Parent>`, + MarshalOnly: true, + }, + { + Value: &NamePrecedence{ + XMLName: Name{Local: "Parent"}, + FromTag: XMLNameWithoutTag{XMLName: Name{Local: "InTag"}, Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "FromNameVal"}, Value: "B"}, + FromNameTag: XMLNameWithTag{XMLName: Name{Local: "InXMLNameTag"}, Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `<Parent>` + + `<InTag>A</InTag>` + + `<FromNameVal>B</FromNameVal>` + + `<InXMLNameTag>C</InXMLNameTag>` + + `<InFieldName>D</InFieldName>` + + `</Parent>`, + UnmarshalOnly: true, + }, + + // xml.Name works in a plain field as well. + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: `<NameInField><foo xmlns="ns"></foo></NameInField>`, + }, + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: `<NameInField><foo xmlns="ns"><ignore></ignore></foo></NameInField>`, + UnmarshalOnly: true, + }, + + // Marshaling zero xml.Name uses the tag or field name. + { + Value: &NameInField{}, + ExpectXML: `<NameInField><foo xmlns="ns"></foo></NameInField>`, + MarshalOnly: true, + }, + + // Test attributes + { + Value: &AttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: `<AttrTest Int="8" int="9" Float="23.5" Uint8="255"` + + ` Bool="true" Str="str" Bytes="byt"></AttrTest>`, + }, + { + Value: &AttrTest{Bytes: []byte{}}, + ExpectXML: `<AttrTest Int="0" int="0" Float="0" Uint8="0"` + + ` Bool="false" Str="" Bytes=""></AttrTest>`, + }, + { + Value: &OmitAttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: `<OmitAttrTest Int="8" int="9" Float="23.5" Uint8="255"` + + ` Bool="true" Str="str" Bytes="byt"></OmitAttrTest>`, + }, + { + Value: &OmitAttrTest{}, + ExpectXML: `<OmitAttrTest></OmitAttrTest>`, + }, + + // pointer fields + { + Value: &PointerFieldsTest{Name: &nameAttr, Age: &ageAttr, Contents: &contentsAttr}, + ExpectXML: `<dummy name="Sarah" age="12">lorem ipsum</dummy>`, + MarshalOnly: true, + }, + + // empty chardata pointer field + { + Value: &ChardataEmptyTest{}, + ExpectXML: `<test></test>`, + MarshalOnly: true, + }, + + // omitempty on fields + { + Value: &OmitFieldTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + Ptr: &PresenceTest{}, + }, + ExpectXML: `<OmitFieldTest>` + + `<Int>8</Int>` + + `<int>9</int>` + + `<Float>23.5</Float>` + + `<Uint8>255</Uint8>` + + `<Bool>true</Bool>` + + `<Str>str</Str>` + + `<Bytes>byt</Bytes>` + + `<Ptr></Ptr>` + + `</OmitFieldTest>`, + }, + { + Value: &OmitFieldTest{}, + ExpectXML: `<OmitFieldTest></OmitFieldTest>`, + }, + + // Test ",any" + { + ExpectXML: `<a><nested><value>known</value></nested><other><sub>unknown</sub></other></a>`, + Value: &AnyTest{ + Nested: "known", + AnyField: AnyHolder{ + XMLName: Name{Local: "other"}, + XML: "<sub>unknown</sub>", + }, + }, + }, + { + Value: &AnyTest{Nested: "known", + AnyField: AnyHolder{ + XML: "<unknown/>", + XMLName: Name{Local: "AnyField"}, + }, + }, + ExpectXML: `<a><nested><value>known</value></nested><AnyField><unknown/></AnyField></a>`, + }, + { + ExpectXML: `<a><nested><value>b</value></nested></a>`, + Value: &AnyOmitTest{ + Nested: "b", + }, + }, + { + ExpectXML: `<a><nested><value>b</value></nested><c><d>e</d></c><g xmlns="f"><h>i</h></g></a>`, + Value: &AnySliceTest{ + Nested: "b", + AnyField: []AnyHolder{ + { + XMLName: Name{Local: "c"}, + XML: "<d>e</d>", + }, + { + XMLName: Name{Space: "f", Local: "g"}, + XML: "<h>i</h>", + }, + }, + }, + }, + { + ExpectXML: `<a><nested><value>b</value></nested></a>`, + Value: &AnySliceTest{ + Nested: "b", + }, + }, + + // Test recursive types. + { + Value: &RecurseA{ + A: "a1", + B: &RecurseB{ + A: &RecurseA{"a2", nil}, + B: "b1", + }, + }, + ExpectXML: `<RecurseA><A>a1</A><B><A><A>a2</A></A><B>b1</B></B></RecurseA>`, + }, + + // Test ignoring fields via "-" tag + { + ExpectXML: `<IgnoreTest></IgnoreTest>`, + Value: &IgnoreTest{}, + }, + { + ExpectXML: `<IgnoreTest></IgnoreTest>`, + Value: &IgnoreTest{PublicSecret: "can't tell"}, + MarshalOnly: true, + }, + { + ExpectXML: `<IgnoreTest><PublicSecret>ignore me</PublicSecret></IgnoreTest>`, + Value: &IgnoreTest{}, + UnmarshalOnly: true, + }, + + // Test escaping. + { + ExpectXML: `<a><nested><value>dquote: &#34;; squote: &#39;; ampersand: &amp;; less: &lt;; greater: &gt;;</value></nested><empty></empty></a>`, + Value: &AnyTest{ + Nested: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, + AnyField: AnyHolder{XMLName: Name{Local: "empty"}}, + }, + }, + { + ExpectXML: `<a><nested><value>newline: &#xA;; cr: &#xD;; tab: &#x9;;</value></nested><AnyField></AnyField></a>`, + Value: &AnyTest{ + Nested: "newline: \n; cr: \r; tab: \t;", + AnyField: AnyHolder{XMLName: Name{Local: "AnyField"}}, + }, + }, + { + ExpectXML: "<a><nested><value>1\r2\r\n3\n\r4\n5</value></nested></a>", + Value: &AnyTest{ + Nested: "1\n2\n3\n\n4\n5", + }, + UnmarshalOnly: true, + }, + { + ExpectXML: `<EmbedInt><MyInt>42</MyInt></EmbedInt>`, + Value: &EmbedInt{ + MyInt: 42, + }, + }, + // Test omitempty with parent chain; see golang.org/issue/4168. + { + ExpectXML: `<Strings><A></A></Strings>`, + Value: &Strings{}, + }, + // Custom marshalers. + { + ExpectXML: `<MyMarshalerTest>hello world</MyMarshalerTest>`, + Value: &MyMarshalerTest{}, + }, + { + ExpectXML: `<MarshalerStruct Foo="hello world"></MarshalerStruct>`, + Value: &MarshalerStruct{}, + }, + { + ExpectXML: `<MarshalerValueStruct Foo="hello world"></MarshalerValueStruct>`, + Value: &MarshalerValueStruct{}, + }, + { + ExpectXML: `<outer xmlns="testns" int="10"></outer>`, + Value: &OuterStruct{IntAttr: 10}, + }, + { + ExpectXML: `<test xmlns="outerns" int="10"></test>`, + Value: &OuterNamedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: `<test xmlns="outerns" int="10"></test>`, + Value: &OuterNamedOrderedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: `<outer xmlns="testns" int="10"></outer>`, + Value: &OuterOuterStruct{OuterStruct{IntAttr: 10}}, + }, + { + ExpectXML: `<NestedAndChardata><A><B></B><B></B></A>test</NestedAndChardata>`, + Value: &NestedAndChardata{AB: make([]string, 2), Chardata: "test"}, + }, + { + ExpectXML: `<NestedAndComment><A><B></B><B></B></A><!--test--></NestedAndComment>`, + Value: &NestedAndComment{AB: make([]string, 2), Comment: "test"}, + }, + { + ExpectXML: `<XMLNSFieldStruct xmlns="http://example.com/ns"><Body>hello world</Body></XMLNSFieldStruct>`, + Value: &XMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, + }, + { + ExpectXML: `<testns:test xmlns:testns="testns" xmlns="http://example.com/ns"><Body>hello world</Body></testns:test>`, + Value: &NamedXMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, + }, + { + ExpectXML: `<testns:test xmlns:testns="testns"><Body>hello world</Body></testns:test>`, + Value: &NamedXMLNSFieldStruct{Ns: "", Body: "hello world"}, + }, + { + ExpectXML: `<XMLNSFieldStructWithOmitEmpty><Body>hello world</Body></XMLNSFieldStructWithOmitEmpty>`, + Value: &XMLNSFieldStructWithOmitEmpty{Body: "hello world"}, + }, + { + // The xmlns attribute must be ignored because the <test> + // element is in the empty namespace, so it's not possible + // to set the default namespace to something non-empty. + ExpectXML: `<test><Body>hello world</Body></test>`, + Value: &NamedXMLNSFieldStructWithEmptyNamespace{Ns: "foo", Body: "hello world"}, + MarshalOnly: true, + }, + { + ExpectXML: `<RecursiveXMLNSFieldStruct xmlns="foo"><Body xmlns=""><Text>hello world</Text></Body></RecursiveXMLNSFieldStruct>`, + Value: &RecursiveXMLNSFieldStruct{ + Ns: "foo", + Body: &RecursiveXMLNSFieldStruct{ + Text: "hello world", + }, + }, + }, +} + +func TestMarshal(t *testing.T) { + for idx, test := range marshalTests { + if test.UnmarshalOnly { + continue + } + data, err := Marshal(test.Value) + if err != nil { + t.Errorf("#%d: marshal(%#v): %s", idx, test.Value, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + if strings.Contains(want, "\n") { + t.Errorf("#%d: marshal(%#v):\nHAVE:\n%s\nWANT:\n%s", idx, test.Value, got, want) + } else { + t.Errorf("#%d: marshal(%#v):\nhave %#q\nwant %#q", idx, test.Value, got, want) + } + } + } +} + +type AttrParent struct { + X string `xml:"X>Y,attr"` +} + +type BadAttr struct { + Name []string `xml:"name,attr"` +} + +var marshalErrorTests = []struct { + Value interface{} + Err string + Kind reflect.Kind +}{ + { + Value: make(chan bool), + Err: "xml: unsupported type: chan bool", + Kind: reflect.Chan, + }, + { + Value: map[string]string{ + "question": "What do you get when you multiply six by nine?", + "answer": "42", + }, + Err: "xml: unsupported type: map[string]string", + Kind: reflect.Map, + }, + { + Value: map[*Ship]bool{nil: false}, + Err: "xml: unsupported type: map[*xml.Ship]bool", + Kind: reflect.Map, + }, + { + Value: &Domain{Comment: []byte("f--bar")}, + Err: `xml: comments must not contain "--"`, + }, + // Reject parent chain with attr, never worked; see golang.org/issue/5033. + { + Value: &AttrParent{}, + Err: `xml: X>Y chain not valid with attr flag`, + }, + { + Value: BadAttr{[]string{"X", "Y"}}, + Err: `xml: unsupported type: []string`, + }, +} + +var marshalIndentTests = []struct { + Value interface{} + Prefix string + Indent string + ExpectXML string +}{ + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "<redacted/>", + }, + Prefix: "", + Indent: "\t", + ExpectXML: fmt.Sprintf("<agent handle=\"007\">\n\t<Identity>James Bond</Identity><redacted/>\n</agent>"), + }, +} + +func TestMarshalErrors(t *testing.T) { + for idx, test := range marshalErrorTests { + data, err := Marshal(test.Value) + if err == nil { + t.Errorf("#%d: marshal(%#v) = [success] %q, want error %v", idx, test.Value, data, test.Err) + continue + } + if err.Error() != test.Err { + t.Errorf("#%d: marshal(%#v) = [error] %v, want %v", idx, test.Value, err, test.Err) + } + if test.Kind != reflect.Invalid { + if kind := err.(*UnsupportedTypeError).Type.Kind(); kind != test.Kind { + t.Errorf("#%d: marshal(%#v) = [error kind] %s, want %s", idx, test.Value, kind, test.Kind) + } + } + } +} + +// Do invertibility testing on the various structures that we test +func TestUnmarshal(t *testing.T) { + for i, test := range marshalTests { + if test.MarshalOnly { + continue + } + if _, ok := test.Value.(*Plain); ok { + continue + } + vt := reflect.TypeOf(test.Value) + dest := reflect.New(vt.Elem()).Interface() + err := Unmarshal([]byte(test.ExpectXML), dest) + + switch fix := dest.(type) { + case *Feed: + fix.Author.InnerXML = "" + for i := range fix.Entry { + fix.Entry[i].Author.InnerXML = "" + } + } + + if err != nil { + t.Errorf("#%d: unexpected error: %#v", i, err) + } else if got, want := dest, test.Value; !reflect.DeepEqual(got, want) { + t.Errorf("#%d: unmarshal(%q):\nhave %#v\nwant %#v", i, test.ExpectXML, got, want) + } + } +} + +func TestMarshalIndent(t *testing.T) { + for i, test := range marshalIndentTests { + data, err := MarshalIndent(test.Value, test.Prefix, test.Indent) + if err != nil { + t.Errorf("#%d: Error: %s", i, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + t.Errorf("#%d: MarshalIndent:\nGot:%s\nWant:\n%s", i, got, want) + } + } +} + +type limitedBytesWriter struct { + w io.Writer + remain int // until writes fail +} + +func (lw *limitedBytesWriter) Write(p []byte) (n int, err error) { + if lw.remain <= 0 { + println("error") + return 0, errors.New("write limit hit") + } + if len(p) > lw.remain { + p = p[:lw.remain] + n, _ = lw.w.Write(p) + lw.remain = 0 + return n, errors.New("write limit hit") + } + n, err = lw.w.Write(p) + lw.remain -= n + return n, err +} + +func TestMarshalWriteErrors(t *testing.T) { + var buf bytes.Buffer + const writeCap = 1024 + w := &limitedBytesWriter{&buf, writeCap} + enc := NewEncoder(w) + var err error + var i int + const n = 4000 + for i = 1; i <= n; i++ { + err = enc.Encode(&Passenger{ + Name: []string{"Alice", "Bob"}, + Weight: 5, + }) + if err != nil { + break + } + } + if err == nil { + t.Error("expected an error") + } + if i == n { + t.Errorf("expected to fail before the end") + } + if buf.Len() != writeCap { + t.Errorf("buf.Len() = %d; want %d", buf.Len(), writeCap) + } +} + +func TestMarshalWriteIOErrors(t *testing.T) { + enc := NewEncoder(errWriter{}) + + expectErr := "unwritable" + err := enc.Encode(&Passenger{}) + if err == nil || err.Error() != expectErr { + t.Errorf("EscapeTest = [error] %v, want %v", err, expectErr) + } +} + +func TestMarshalFlush(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + if err := enc.EncodeToken(CharData("hello world")); err != nil { + t.Fatalf("enc.EncodeToken: %v", err) + } + if buf.Len() > 0 { + t.Fatalf("enc.EncodeToken caused actual write: %q", buf.Bytes()) + } + if err := enc.Flush(); err != nil { + t.Fatalf("enc.Flush: %v", err) + } + if buf.String() != "hello world" { + t.Fatalf("after enc.Flush, buf.String() = %q, want %q", buf.String(), "hello world") + } +} + +var encodeElementTests = []struct { + desc string + value interface{} + start StartElement + expectXML string +}{{ + desc: "simple string", + value: "hello", + start: StartElement{ + Name: Name{Local: "a"}, + }, + expectXML: `<a>hello</a>`, +}, { + desc: "string with added attributes", + value: "hello", + start: StartElement{ + Name: Name{Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "x"}, + Value: "y", + }, { + Name: Name{Local: "foo"}, + Value: "bar", + }}, + }, + expectXML: `<a x="y" foo="bar">hello</a>`, +}, { + desc: "start element with default name space", + value: struct { + Foo XMLNameWithNSTag + }{ + Foo: XMLNameWithNSTag{ + Value: "hello", + }, + }, + start: StartElement{ + Name: Name{Space: "ns", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `<a xmlns="ns"><InXMLNameWithNSTag>hello</InXMLNameWithNSTag></a>`, +}, { + desc: "start element in name space with different default name space", + value: struct { + Foo XMLNameWithNSTag + }{ + Foo: XMLNameWithNSTag{ + Value: "hello", + }, + }, + start: StartElement{ + Name: Name{Space: "ns2", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `<ns2:a xmlns:ns2="ns2" xmlns="ns"><InXMLNameWithNSTag>hello</InXMLNameWithNSTag></ns2:a>`, +}, { + desc: "XMLMarshaler with start element with default name space", + value: &MyMarshalerTest{}, + start: StartElement{ + Name: Name{Space: "ns2", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `<ns2:a xmlns:ns2="ns2" xmlns="ns">hello world</ns2:a>`, +}} + +func TestEncodeElement(t *testing.T) { + for idx, test := range encodeElementTests { + var buf bytes.Buffer + enc := NewEncoder(&buf) + err := enc.EncodeElement(test.value, test.start) + if err != nil { + t.Fatalf("enc.EncodeElement: %v", err) + } + err = enc.Flush() + if err != nil { + t.Fatalf("enc.Flush: %v", err) + } + if got, want := buf.String(), test.expectXML; got != want { + t.Errorf("#%d(%s): EncodeElement(%#v, %#v):\nhave %#q\nwant %#q", idx, test.desc, test.value, test.start, got, want) + } + } +} + +func BenchmarkMarshal(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + Marshal(atomValue) + } +} + +func BenchmarkUnmarshal(b *testing.B) { + b.ReportAllocs() + xml := []byte(atomXml) + for i := 0; i < b.N; i++ { + Unmarshal(xml, &Feed{}) + } +} + +// golang.org/issue/6556 +func TestStructPointerMarshal(t *testing.T) { + type A struct { + XMLName string `xml:"a"` + B []interface{} + } + type C struct { + XMLName Name + Value string `xml:"value"` + } + + a := new(A) + a.B = append(a.B, &C{ + XMLName: Name{Local: "c"}, + Value: "x", + }) + + b, err := Marshal(a) + if err != nil { + t.Fatal(err) + } + if x := string(b); x != "<a><c><value>x</value></c></a>" { + t.Fatal(x) + } + var v A + err = Unmarshal(b, &v) + if err != nil { + t.Fatal(err) + } +} + +var encodeTokenTests = []struct { + desc string + toks []Token + want string + err string +}{{ + desc: "start element with name space", + toks: []Token{ + StartElement{Name{"space", "local"}, nil}, + }, + want: `<space:local xmlns:space="space">`, +}, { + desc: "start element with no name", + toks: []Token{ + StartElement{Name{"space", ""}, nil}, + }, + err: "xml: start tag with no name", +}, { + desc: "end element with no name", + toks: []Token{ + EndElement{Name{"space", ""}}, + }, + err: "xml: end tag with no name", +}, { + desc: "char data", + toks: []Token{ + CharData("foo"), + }, + want: `foo`, +}, { + desc: "char data with escaped chars", + toks: []Token{ + CharData(" \t\n"), + }, + want: " &#x9;\n", +}, { + desc: "comment", + toks: []Token{ + Comment("foo"), + }, + want: `<!--foo-->`, +}, { + desc: "comment with invalid content", + toks: []Token{ + Comment("foo-->"), + }, + err: "xml: EncodeToken of Comment containing --> marker", +}, { + desc: "proc instruction", + toks: []Token{ + ProcInst{"Target", []byte("Instruction")}, + }, + want: `<?Target Instruction?>`, +}, { + desc: "proc instruction with empty target", + toks: []Token{ + ProcInst{"", []byte("Instruction")}, + }, + err: "xml: EncodeToken of ProcInst with invalid Target", +}, { + desc: "proc instruction with bad content", + toks: []Token{ + ProcInst{"", []byte("Instruction?>")}, + }, + err: "xml: EncodeToken of ProcInst with invalid Target", +}, { + desc: "directive", + toks: []Token{ + Directive("foo"), + }, + want: `<!foo>`, +}, { + desc: "more complex directive", + toks: []Token{ + Directive("DOCTYPE doc [ <!ELEMENT doc '>'> <!-- com>ment --> ]"), + }, + want: `<!DOCTYPE doc [ <!ELEMENT doc '>'> <!-- com>ment --> ]>`, +}, { + desc: "directive instruction with bad name", + toks: []Token{ + Directive("foo>"), + }, + err: "xml: EncodeToken of Directive containing wrong < or > markers", +}, { + desc: "end tag without start tag", + toks: []Token{ + EndElement{Name{"foo", "bar"}}, + }, + err: "xml: end tag </bar> without start tag", +}, { + desc: "mismatching end tag local name", + toks: []Token{ + StartElement{Name{"", "foo"}, nil}, + EndElement{Name{"", "bar"}}, + }, + err: "xml: end tag </bar> does not match start tag <foo>", + want: `<foo>`, +}, { + desc: "mismatching end tag namespace", + toks: []Token{ + StartElement{Name{"space", "foo"}, nil}, + EndElement{Name{"another", "foo"}}, + }, + err: "xml: end tag </foo> in namespace another does not match start tag <foo> in namespace space", + want: `<space:foo xmlns:space="space">`, +}, { + desc: "start element with explicit namespace", + toks: []Token{ + StartElement{Name{"space", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + {Name{"space", "foo"}, "value"}, + }}, + }, + want: `<x:local xmlns:x="space" x:foo="value">`, +}, { + desc: "start element with explicit namespace and colliding prefix", + toks: []Token{ + StartElement{Name{"space", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + {Name{"space", "foo"}, "value"}, + {Name{"x", "bar"}, "other"}, + }}, + }, + want: `<x:local xmlns:x_1="x" xmlns:x="space" x:foo="value" x_1:bar="other">`, +}, { + desc: "start element using previously defined namespace", + toks: []Token{ + StartElement{Name{"", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"space", "x"}, "y"}, + }}, + }, + want: `<local xmlns:x="space"><x:foo x:x="y">`, +}, { + desc: "nested name space with same prefix", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space1"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space2"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"space1", "a"}, "space1 value"}, + {Name{"space2", "b"}, "space2 value"}, + }}, + EndElement{Name{"", "foo"}}, + EndElement{Name{"", "foo"}}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"space1", "a"}, "space1 value"}, + {Name{"space2", "b"}, "space2 value"}, + }}, + }, + want: `<foo xmlns:x="space1"><foo xmlns:x="space2"><foo xmlns:space1="space1" space1:a="space1 value" x:b="space2 value"></foo></foo><foo xmlns:space2="space2" x:a="space1 value" space2:b="space2 value">`, +}, { + desc: "start element defining several prefixes for the same name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "a"}, "space"}, + {Name{"xmlns", "b"}, "space"}, + {Name{"space", "x"}, "value"}, + }}, + }, + want: `<a:foo xmlns:a="space" a:x="value">`, +}, { + desc: "nested element redefines name space", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "y"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: `<foo xmlns:x="space"><x:foo x:a="value">`, +}, { + desc: "nested element creates alias for default name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "y"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: `<foo xmlns="space"><foo xmlns:y="space" y:a="value">`, +}, { + desc: "nested element defines default name space with existing prefix", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: `<foo xmlns:x="space"><foo xmlns="space" x:a="value">`, +}, { + desc: "nested element uses empty attribute name space when default ns defined", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "attr"}, "value"}, + }}, + }, + want: `<foo xmlns="space"><foo attr="value">`, +}, { + desc: "redefine xmlns", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"foo", "xmlns"}, "space"}, + }}, + }, + err: `xml: cannot redefine xmlns attribute prefix`, +}, { + desc: "xmlns with explicit name space #1", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xml", "xmlns"}, "space"}, + }}, + }, + want: `<foo xmlns="space">`, +}, { + desc: "xmlns with explicit name space #2", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{xmlURL, "xmlns"}, "space"}, + }}, + }, + want: `<foo xmlns="space">`, +}, { + desc: "empty name space declaration is ignored", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "foo"}, ""}, + }}, + }, + want: `<foo>`, +}, { + desc: "attribute with no name is ignored", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", ""}, "value"}, + }}, + }, + want: `<foo>`, +}, { + desc: "namespace URL with non-valid name", + toks: []Token{ + StartElement{Name{"/34", "foo"}, []Attr{ + {Name{"/34", "x"}, "value"}, + }}, + }, + want: `<_:foo xmlns:_="/34" _:x="value">`, +}, { + desc: "nested element resets default namespace to empty", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", "xmlns"}, ""}, + {Name{"", "x"}, "value"}, + {Name{"space", "x"}, "value"}, + }}, + }, + want: `<foo xmlns="space"><foo xmlns:space="space" xmlns="" x="value" space:x="value">`, +}, { + desc: "nested element requires empty default name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"", "foo"}, nil}, + }, + want: `<foo xmlns="space"><foo xmlns="">`, +}, { + desc: "attribute uses name space from xmlns", + toks: []Token{ + StartElement{Name{"some/space", "foo"}, []Attr{ + {Name{"", "attr"}, "value"}, + {Name{"some/space", "other"}, "other value"}, + }}, + }, + want: `<space:foo xmlns:space="some/space" attr="value" space:other="other value">`, +}, { + desc: "default name space should not be used by attributes", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"xmlns", "bar"}, "space"}, + {Name{"space", "baz"}, "foo"}, + }}, + StartElement{Name{"space", "baz"}, nil}, + EndElement{Name{"space", "baz"}}, + EndElement{Name{"space", "foo"}}, + }, + want: `<foo xmlns:bar="space" xmlns="space" bar:baz="foo"><baz></baz></foo>`, +}, { + desc: "default name space not used by attributes, not explicitly defined", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"space", "baz"}, "foo"}, + }}, + StartElement{Name{"space", "baz"}, nil}, + EndElement{Name{"space", "baz"}}, + EndElement{Name{"space", "foo"}}, + }, + want: `<foo xmlns:space="space" xmlns="space" space:baz="foo"><baz></baz></foo>`, +}, { + desc: "impossible xmlns declaration", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "bar"}, []Attr{ + {Name{"space", "attr"}, "value"}, + }}, + }, + want: `<foo><space:bar xmlns:space="space" space:attr="value">`, +}} + +func TestEncodeToken(t *testing.T) { +loop: + for i, tt := range encodeTokenTests { + var buf bytes.Buffer + enc := NewEncoder(&buf) + var err error + for j, tok := range tt.toks { + err = enc.EncodeToken(tok) + if err != nil && j < len(tt.toks)-1 { + t.Errorf("#%d %s token #%d: %v", i, tt.desc, j, err) + continue loop + } + } + errorf := func(f string, a ...interface{}) { + t.Errorf("#%d %s token #%d:%s", i, tt.desc, len(tt.toks)-1, fmt.Sprintf(f, a...)) + } + switch { + case tt.err != "" && err == nil: + errorf(" expected error; got none") + continue + case tt.err == "" && err != nil: + errorf(" got error: %v", err) + continue + case tt.err != "" && err != nil && tt.err != err.Error(): + errorf(" error mismatch; got %v, want %v", err, tt.err) + continue + } + if err := enc.Flush(); err != nil { + errorf(" %v", err) + continue + } + if got := buf.String(); got != tt.want { + errorf("\ngot %v\nwant %v", got, tt.want) + continue + } + } +} + +func TestProcInstEncodeToken(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to encode xml target ProcInst as first token, %s", err) + } + + if err := enc.EncodeToken(ProcInst{"Target", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to add non-xml target ProcInst") + } + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err == nil { + t.Fatalf("enc.EncodeToken: expected to not be allowed to encode xml target ProcInst when not first token") + } +} + +func TestDecodeEncode(t *testing.T) { + var in, out bytes.Buffer + in.WriteString(`<?xml version="1.0" encoding="UTF-8"?> +<?Target Instruction?> +<root> +</root> +`) + dec := NewDecoder(&in) + enc := NewEncoder(&out) + for tok, err := dec.Token(); err == nil; tok, err = dec.Token() { + err = enc.EncodeToken(tok) + if err != nil { + t.Fatalf("enc.EncodeToken: Unable to encode token (%#v), %v", tok, err) + } + } +} + +// Issue 9796. Used to fail with GORACE="halt_on_error=1" -race. +func TestRace9796(t *testing.T) { + type A struct{} + type B struct { + C []A `xml:"X>Y"` + } + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + Marshal(B{[]A{A{}}}) + wg.Done() + }() + } + wg.Wait() +} + +func TestIsValidDirective(t *testing.T) { + testOK := []string{ + "<>", + "< < > >", + "<!DOCTYPE '<' '>' '>' <!--nothing-->>", + "<!DOCTYPE doc [ <!ELEMENT doc ANY> <!ELEMENT doc ANY> ]>", + "<!DOCTYPE doc [ <!ELEMENT doc \"ANY> '<' <!E\" LEMENT '>' doc ANY> ]>", + "<!DOCTYPE doc <!-- just>>>> a < comment --> [ <!ITEM anything> ] >", + } + testKO := []string{ + "<", + ">", + "<!--", + "-->", + "< > > < < >", + "<!dummy <!-- > -->", + "<!DOCTYPE doc '>", + "<!DOCTYPE doc '>'", + "<!DOCTYPE doc <!--comment>", + } + for _, s := range testOK { + if !isValidDirective(Directive(s)) { + t.Errorf("Directive %q is expected to be valid", s) + } + } + for _, s := range testKO { + if isValidDirective(Directive(s)) { + t.Errorf("Directive %q is expected to be invalid", s) + } + } +} + +// Issue 11719. EncodeToken used to silently eat tokens with an invalid type. +func TestSimpleUseOfEncodeToken(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + if err := enc.EncodeToken(&StartElement{Name: Name{"", "object1"}}); err == nil { + t.Errorf("enc.EncodeToken: pointer type should be rejected") + } + if err := enc.EncodeToken(&EndElement{Name: Name{"", "object1"}}); err == nil { + t.Errorf("enc.EncodeToken: pointer type should be rejected") + } + if err := enc.EncodeToken(StartElement{Name: Name{"", "object2"}}); err != nil { + t.Errorf("enc.EncodeToken: StartElement %s", err) + } + if err := enc.EncodeToken(EndElement{Name: Name{"", "object2"}}); err != nil { + t.Errorf("enc.EncodeToken: EndElement %s", err) + } + if err := enc.EncodeToken(Universe{}); err == nil { + t.Errorf("enc.EncodeToken: invalid type not caught") + } + if err := enc.Flush(); err != nil { + t.Errorf("enc.Flush: %s", err) + } + if buf.Len() == 0 { + t.Errorf("enc.EncodeToken: empty buffer") + } + want := "<object2></object2>" + if buf.String() != want { + t.Errorf("enc.EncodeToken: expected %q; got %q", want, buf.String()) + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read.go b/vendor/golang.org/x/net/webdav/internal/xml/read.go new file mode 100644 index 00000000..75b9f2ba --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/read.go @@ -0,0 +1,692 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +// BUG(rsc): Mapping between XML elements and data structures is inherently flawed: +// an XML element is an order-dependent collection of anonymous +// values, while a data structure is an order-independent collection +// of named values. +// See package json for a textual representation more suitable +// to data structures. + +// Unmarshal parses the XML-encoded data and stores the result in +// the value pointed to by v, which must be an arbitrary struct, +// slice, or string. Well-formed data that does not fit into v is +// discarded. +// +// Because Unmarshal uses the reflect package, it can only assign +// to exported (upper case) fields. Unmarshal uses a case-sensitive +// comparison to match XML element names to tag values and struct +// field names. +// +// Unmarshal maps an XML element to a struct using the following rules. +// In the rules, the tag of a field refers to the value associated with the +// key 'xml' in the struct field's tag (see the example above). +// +// * If the struct has a field of type []byte or string with tag +// ",innerxml", Unmarshal accumulates the raw XML nested inside the +// element in that field. The rest of the rules still apply. +// +// * If the struct has a field named XMLName of type xml.Name, +// Unmarshal records the element name in that field. +// +// * If the XMLName field has an associated tag of the form +// "name" or "namespace-URL name", the XML element must have +// the given name (and, optionally, name space) or else Unmarshal +// returns an error. +// +// * If the XML element has an attribute whose name matches a +// struct field name with an associated tag containing ",attr" or +// the explicit name in a struct field tag of the form "name,attr", +// Unmarshal records the attribute value in that field. +// +// * If the XML element contains character data, that data is +// accumulated in the first struct field that has tag ",chardata". +// The struct field may have type []byte or string. +// If there is no such field, the character data is discarded. +// +// * If the XML element contains comments, they are accumulated in +// the first struct field that has tag ",comment". The struct +// field may have type []byte or string. If there is no such +// field, the comments are discarded. +// +// * If the XML element contains a sub-element whose name matches +// the prefix of a tag formatted as "a" or "a>b>c", unmarshal +// will descend into the XML structure looking for elements with the +// given names, and will map the innermost elements to that struct +// field. A tag starting with ">" is equivalent to one starting +// with the field name followed by ">". +// +// * If the XML element contains a sub-element whose name matches +// a struct field's XMLName tag and the struct field has no +// explicit name tag as per the previous rule, unmarshal maps +// the sub-element to that struct field. +// +// * If the XML element contains a sub-element whose name matches a +// field without any mode flags (",attr", ",chardata", etc), Unmarshal +// maps the sub-element to that struct field. +// +// * If the XML element contains a sub-element that hasn't matched any +// of the above rules and the struct has a field with tag ",any", +// unmarshal maps the sub-element to that struct field. +// +// * An anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// * A struct field with tag "-" is never unmarshalled into. +// +// Unmarshal maps an XML element to a string or []byte by saving the +// concatenation of that element's character data in the string or +// []byte. The saved []byte is never nil. +// +// Unmarshal maps an attribute value to a string or []byte by saving +// the value in the string or slice. +// +// Unmarshal maps an XML element to a slice by extending the length of +// the slice and mapping the element to the newly created value. +// +// Unmarshal maps an XML element or attribute value to a bool by +// setting it to the boolean value represented by the string. +// +// Unmarshal maps an XML element or attribute value to an integer or +// floating-point field by setting the field to the result of +// interpreting the string value in decimal. There is no check for +// overflow. +// +// Unmarshal maps an XML element to an xml.Name by recording the +// element name. +// +// Unmarshal maps an XML element to a pointer by setting the pointer +// to a freshly allocated value and then mapping the element to that value. +// +func Unmarshal(data []byte, v interface{}) error { + return NewDecoder(bytes.NewReader(data)).Decode(v) +} + +// Decode works like xml.Unmarshal, except it reads the decoder +// stream to find the start element. +func (d *Decoder) Decode(v interface{}) error { + return d.DecodeElement(v, nil) +} + +// DecodeElement works like xml.Unmarshal except that it takes +// a pointer to the start XML element to decode into v. +// It is useful when a client reads some raw XML tokens itself +// but also wants to defer to Unmarshal for some elements. +func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error { + val := reflect.ValueOf(v) + if val.Kind() != reflect.Ptr { + return errors.New("non-pointer passed to Unmarshal") + } + return d.unmarshal(val.Elem(), start) +} + +// An UnmarshalError represents an error in the unmarshalling process. +type UnmarshalError string + +func (e UnmarshalError) Error() string { return string(e) } + +// Unmarshaler is the interface implemented by objects that can unmarshal +// an XML element description of themselves. +// +// UnmarshalXML decodes a single XML element +// beginning with the given start element. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXML must consume exactly one XML element. +// One common implementation strategy is to unmarshal into +// a separate value with a layout matching the expected XML +// using d.DecodeElement, and then to copy the data from +// that value into the receiver. +// Another common strategy is to use d.Token to process the +// XML object one token at a time. +// UnmarshalXML may not use d.RawToken. +type Unmarshaler interface { + UnmarshalXML(d *Decoder, start StartElement) error +} + +// UnmarshalerAttr is the interface implemented by objects that can unmarshal +// an XML attribute description of themselves. +// +// UnmarshalXMLAttr decodes a single XML attribute. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type UnmarshalerAttr interface { + UnmarshalXMLAttr(attr Attr) error +} + +// receiverType returns the receiver type to use in an expression like "%s.MethodName". +func receiverType(val interface{}) string { + t := reflect.TypeOf(val) + if t.Name() != "" { + return t.String() + } + return "(" + t.String() + ")" +} + +// unmarshalInterface unmarshals a single XML element into val. +// start is the opening tag of the element. +func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { + // Record that decoder must stop at end tag corresponding to start. + p.pushEOF() + + p.unmarshalDepth++ + err := val.UnmarshalXML(p, *start) + p.unmarshalDepth-- + if err != nil { + p.popEOF() + return err + } + + if !p.popEOF() { + return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local) + } + + return nil +} + +// unmarshalTextInterface unmarshals a single XML element into val. +// The chardata contained in the element (but not its children) +// is passed to the text unmarshaler. +func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error { + var buf []byte + depth := 1 + for depth > 0 { + t, err := p.Token() + if err != nil { + return err + } + switch t := t.(type) { + case CharData: + if depth == 1 { + buf = append(buf, t...) + } + case StartElement: + depth++ + case EndElement: + depth-- + } + } + return val.UnmarshalText(buf) +} + +// unmarshalAttr unmarshals a single XML attribute into val. +func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) { + return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + } + + // Not an UnmarshalerAttr; try encoding.TextUnmarshaler. + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + } + + copyValue(val, []byte(attr.Value)) + return nil +} + +var ( + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// Unmarshal a single XML element into val. +func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { + // Find start element if we need it. + if start == nil { + for { + tok, err := p.Token() + if err != nil { + return err + } + if t, ok := tok.(StartElement); ok { + start = &t + break + } + } + } + + // Load value from interface, but only if the result will be + // usefully addressable. + if val.Kind() == reflect.Interface && !val.IsNil() { + e := val.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() { + val = e + } + } + + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return p.unmarshalInterface(val.Interface().(Unmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { + return p.unmarshalInterface(pv.Interface().(Unmarshaler), start) + } + } + + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start) + } + } + + var ( + data []byte + saveData reflect.Value + comment []byte + saveComment reflect.Value + saveXML reflect.Value + saveXMLIndex int + saveXMLData []byte + saveAny reflect.Value + sv reflect.Value + tinfo *typeInfo + err error + ) + + switch v := val; v.Kind() { + default: + return errors.New("unknown type " + v.Type().String()) + + case reflect.Interface: + // TODO: For now, simply ignore the field. In the near + // future we may choose to unmarshal the start + // element on it, if not nil. + return p.Skip() + + case reflect.Slice: + typ := v.Type() + if typ.Elem().Kind() == reflect.Uint8 { + // []byte + saveData = v + break + } + + // Slice of element values. + // Grow slice. + n := v.Len() + if n >= v.Cap() { + ncap := 2 * n + if ncap < 4 { + ncap = 4 + } + new := reflect.MakeSlice(typ, n, ncap) + reflect.Copy(new, v) + v.Set(new) + } + v.SetLen(n + 1) + + // Recur to read element into slice. + if err := p.unmarshal(v.Index(n), start); err != nil { + v.SetLen(n) + return err + } + return nil + + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String: + saveData = v + + case reflect.Struct: + typ := v.Type() + if typ == nameType { + v.Set(reflect.ValueOf(start.Name)) + break + } + + sv = v + tinfo, err = getTypeInfo(typ) + if err != nil { + return err + } + + // Validate and assign element name. + if tinfo.xmlname != nil { + finfo := tinfo.xmlname + if finfo.name != "" && finfo.name != start.Name.Local { + return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">") + } + if finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have " + if start.Name.Space == "" { + e += "no name space" + } else { + e += start.Name.Space + } + return UnmarshalError(e) + } + fv := finfo.value(sv) + if _, ok := fv.Interface().(Name); ok { + fv.Set(reflect.ValueOf(start.Name)) + } + } + + // Assign attributes. + // Also, determine whether we need to save character data or comments. + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + switch finfo.flags & fMode { + case fAttr: + strv := finfo.value(sv) + // Look for attribute. + for _, a := range start.Attr { + if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) { + if err := p.unmarshalAttr(strv, a); err != nil { + return err + } + break + } + } + + case fCharData: + if !saveData.IsValid() { + saveData = finfo.value(sv) + } + + case fComment: + if !saveComment.IsValid() { + saveComment = finfo.value(sv) + } + + case fAny, fAny | fElement: + if !saveAny.IsValid() { + saveAny = finfo.value(sv) + } + + case fInnerXml: + if !saveXML.IsValid() { + saveXML = finfo.value(sv) + if p.saved == nil { + saveXMLIndex = 0 + p.saved = new(bytes.Buffer) + } else { + saveXMLIndex = p.savedOffset() + } + } + } + } + } + + // Find end element. + // Process sub-elements along the way. +Loop: + for { + var savedOffset int + if saveXML.IsValid() { + savedOffset = p.savedOffset() + } + tok, err := p.Token() + if err != nil { + return err + } + switch t := tok.(type) { + case StartElement: + consumed := false + if sv.IsValid() { + consumed, err = p.unmarshalPath(tinfo, sv, nil, &t) + if err != nil { + return err + } + if !consumed && saveAny.IsValid() { + consumed = true + if err := p.unmarshal(saveAny, &t); err != nil { + return err + } + } + } + if !consumed { + if err := p.Skip(); err != nil { + return err + } + } + + case EndElement: + if saveXML.IsValid() { + saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset] + if saveXMLIndex == 0 { + p.saved = nil + } + } + break Loop + + case CharData: + if saveData.IsValid() { + data = append(data, t...) + } + + case Comment: + if saveComment.IsValid() { + comment = append(comment, t...) + } + } + } + + if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) { + if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + + if saveData.IsValid() && saveData.CanAddr() { + pv := saveData.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + } + + if err := copyValue(saveData, data); err != nil { + return err + } + + switch t := saveComment; t.Kind() { + case reflect.String: + t.SetString(string(comment)) + case reflect.Slice: + t.Set(reflect.ValueOf(comment)) + } + + switch t := saveXML; t.Kind() { + case reflect.String: + t.SetString(string(saveXMLData)) + case reflect.Slice: + t.Set(reflect.ValueOf(saveXMLData)) + } + + return nil +} + +func copyValue(dst reflect.Value, src []byte) (err error) { + dst0 := dst + + if dst.Kind() == reflect.Ptr { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + + // Save accumulated data. + switch dst.Kind() { + case reflect.Invalid: + // Probably a comment. + default: + return errors.New("cannot unmarshal into " + dst0.Type().String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetInt(itmp) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetUint(utmp) + case reflect.Float32, reflect.Float64: + ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits()) + if err != nil { + return err + } + dst.SetFloat(ftmp) + case reflect.Bool: + value, err := strconv.ParseBool(strings.TrimSpace(string(src))) + if err != nil { + return err + } + dst.SetBool(value) + case reflect.String: + dst.SetString(string(src)) + case reflect.Slice: + if len(src) == 0 { + // non-nil to flag presence + src = []byte{} + } + dst.SetBytes(src) + } + return nil +} + +// unmarshalPath walks down an XML structure looking for wanted +// paths, and calls unmarshal on them. +// The consumed result tells whether XML elements have been consumed +// from the Decoder until start's matching end element, or if it's +// still untouched because start is uninteresting for sv's fields. +func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { + recurse := false +Loop: + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + continue + } + for j := range parents { + if parents[j] != finfo.parents[j] { + continue Loop + } + } + if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { + // It's a perfect match, unmarshal the field. + return true, p.unmarshal(finfo.value(sv), start) + } + if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { + // It's a prefix for the field. Break and recurse + // since it's not ok for one field path to be itself + // the prefix for another field path. + recurse = true + + // We can reuse the same slice as long as we + // don't try to append to it. + parents = finfo.parents[:len(parents)+1] + break + } + } + if !recurse { + // We have no business with this element. + return false, nil + } + // The element is not a perfect match for any field, but one + // or more fields have the path to this element as a parent + // prefix. Recurse and attempt to match these. + for { + var tok Token + tok, err = p.Token() + if err != nil { + return true, err + } + switch t := tok.(type) { + case StartElement: + consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t) + if err != nil { + return true, err + } + if !consumed2 { + if err := p.Skip(); err != nil { + return true, err + } + } + case EndElement: + return true, nil + } + } +} + +// Skip reads tokens until it has consumed the end element +// matching the most recent start element already consumed. +// It recurs if it encounters a start element, so it can be used to +// skip nested structures. +// It returns nil if it finds an end element matching the start +// element; otherwise it returns an error describing the problem. +func (d *Decoder) Skip() error { + for { + tok, err := d.Token() + if err != nil { + return err + } + switch tok.(type) { + case StartElement: + if err := d.Skip(); err != nil { + return err + } + case EndElement: + return nil + } + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read_test.go b/vendor/golang.org/x/net/webdav/internal/xml/read_test.go new file mode 100644 index 00000000..02f1e10c --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/read_test.go @@ -0,0 +1,744 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" + "testing" + "time" +) + +// Stripped down Atom feed data structures. + +func TestUnmarshalFeed(t *testing.T) { + var f Feed + if err := Unmarshal([]byte(atomFeedString), &f); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(f, atomFeed) { + t.Fatalf("have %#v\nwant %#v", f, atomFeed) + } +} + +// hget http://codereview.appspot.com/rss/mine/rsc +const atomFeedString = ` +<?xml version="1.0" encoding="utf-8"?> +<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-us" updated="2009-10-04T01:35:58+00:00"><title>Code Review - My issues</title><link href="http://codereview.appspot.com/" rel="alternate"></link><link href="http://codereview.appspot.com/rss/mine/rsc" rel="self"></link><id>http://codereview.appspot.com/</id><author><name>rietveld&lt;&gt;</name></author><entry><title>rietveld: an attempt at pubsubhubbub +</title><link href="http://codereview.appspot.com/126085" rel="alternate"></link><updated>2009-10-04T01:35:58+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:134d9179c41f806be79b3a5f7877d19a</id><summary type="html"> + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a &amp;lt;link rel=&amp;quot;hub&amp;quot; href=&amp;quot;hub-server&amp;quot;&amp;gt; tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can&amp;#39;t quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed&amp;#39;s actual URL in +the link rel=&amp;quot;self&amp;quot;, but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +</summary></entry><entry><title>rietveld: correct tab handling +</title><link href="http://codereview.appspot.com/124106" rel="alternate"></link><updated>2009-10-03T23:02:17+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:0a2a4f19bb815101f0ba2904aed7c35a</id><summary type="html"> + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn&amp;#39;t know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + +</summary></entry></feed> ` + +type Feed struct { + XMLName Name `xml:"http://www.w3.org/2005/Atom feed"` + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated,attr"` + Author Person `xml:"author"` + Entry []Entry `xml:"entry"` +} + +type Entry struct { + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated"` + Author Person `xml:"author"` + Summary Text `xml:"summary"` +} + +type Link struct { + Rel string `xml:"rel,attr,omitempty"` + Href string `xml:"href,attr"` +} + +type Person struct { + Name string `xml:"name"` + URI string `xml:"uri"` + Email string `xml:"email"` + InnerXML string `xml:",innerxml"` +} + +type Text struct { + Type string `xml:"type,attr,omitempty"` + Body string `xml:",chardata"` +} + +var atomFeed = Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Code Review - My issues", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/"}, + {Rel: "self", Href: "http://codereview.appspot.com/rss/mine/rsc"}, + }, + Id: "http://codereview.appspot.com/", + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "rietveld<>", + InnerXML: "<name>rietveld&lt;&gt;</name>", + }, + Entry: []Entry{ + { + Title: "rietveld: an attempt at pubsubhubbub\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/126085"}, + }, + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "<name>email-address-removed</name>", + }, + Id: "urn:md5:134d9179c41f806be79b3a5f7877d19a", + Summary: Text{ + Type: "html", + Body: ` + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can&#39;t quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed&#39;s actual URL in +the link rel=&quot;self&quot;, but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +`, + }, + }, + { + Title: "rietveld: correct tab handling\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/124106"}, + }, + Updated: ParseTime("2009-10-03T23:02:17+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "<name>email-address-removed</name>", + }, + Id: "urn:md5:0a2a4f19bb815101f0ba2904aed7c35a", + Summary: Text{ + Type: "html", + Body: ` + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn&#39;t know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + +`, + }, + }, + }, +} + +const pathTestString = ` +<Result> + <Before>1</Before> + <Items> + <Item1> + <Value>A</Value> + </Item1> + <Item2> + <Value>B</Value> + </Item2> + <Item1> + <Value>C</Value> + <Value>D</Value> + </Item1> + <_> + <Value>E</Value> + </_> + </Items> + <After>2</After> +</Result> +` + +type PathTestItem struct { + Value string +} + +type PathTestA struct { + Items []PathTestItem `xml:">Item1"` + Before, After string +} + +type PathTestB struct { + Other []PathTestItem `xml:"Items>Item1"` + Before, After string +} + +type PathTestC struct { + Values1 []string `xml:"Items>Item1>Value"` + Values2 []string `xml:"Items>Item2>Value"` + Before, After string +} + +type PathTestSet struct { + Item1 []PathTestItem +} + +type PathTestD struct { + Other PathTestSet `xml:"Items"` + Before, After string +} + +type PathTestE struct { + Underline string `xml:"Items>_>Value"` + Before, After string +} + +var pathTests = []interface{}{ + &PathTestA{Items: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestB{Other: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestC{Values1: []string{"A", "C", "D"}, Values2: []string{"B"}, Before: "1", After: "2"}, + &PathTestD{Other: PathTestSet{Item1: []PathTestItem{{"A"}, {"D"}}}, Before: "1", After: "2"}, + &PathTestE{Underline: "E", Before: "1", After: "2"}, +} + +func TestUnmarshalPaths(t *testing.T) { + for _, pt := range pathTests { + v := reflect.New(reflect.TypeOf(pt).Elem()).Interface() + if err := Unmarshal([]byte(pathTestString), v); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(v, pt) { + t.Fatalf("have %#v\nwant %#v", v, pt) + } + } +} + +type BadPathTestA struct { + First string `xml:"items>item1"` + Other string `xml:"items>item2"` + Second string `xml:"items"` +} + +type BadPathTestB struct { + Other string `xml:"items>item2>value"` + First string `xml:"items>item1"` + Second string `xml:"items>item1>value"` +} + +type BadPathTestC struct { + First string + Second string `xml:"First"` +} + +type BadPathTestD struct { + BadPathEmbeddedA + BadPathEmbeddedB +} + +type BadPathEmbeddedA struct { + First string +} + +type BadPathEmbeddedB struct { + Second string `xml:"First"` +} + +var badPathTests = []struct { + v, e interface{} +}{ + {&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items"}}, + {&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}}, + {&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), "First", "", "Second", "First"}}, + {&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), "First", "", "Second", "First"}}, +} + +func TestUnmarshalBadPaths(t *testing.T) { + for _, tt := range badPathTests { + err := Unmarshal([]byte(pathTestString), tt.v) + if !reflect.DeepEqual(err, tt.e) { + t.Fatalf("Unmarshal with %#v didn't fail properly:\nhave %#v,\nwant %#v", tt.v, err, tt.e) + } + } +} + +const OK = "OK" +const withoutNameTypeData = ` +<?xml version="1.0" charset="utf-8"?> +<Test3 Attr="OK" />` + +type TestThree struct { + XMLName Name `xml:"Test3"` + Attr string `xml:",attr"` +} + +func TestUnmarshalWithoutNameType(t *testing.T) { + var x TestThree + if err := Unmarshal([]byte(withoutNameTypeData), &x); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if x.Attr != OK { + t.Fatalf("have %v\nwant %v", x.Attr, OK) + } +} + +func TestUnmarshalAttr(t *testing.T) { + type ParamVal struct { + Int int `xml:"int,attr"` + } + + type ParamPtr struct { + Int *int `xml:"int,attr"` + } + + type ParamStringPtr struct { + Int *string `xml:"int,attr"` + } + + x := []byte(`<Param int="1" />`) + + p1 := &ParamPtr{} + if err := Unmarshal(x, p1); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p1.Int == nil { + t.Fatalf("Unmarshal failed in to *int field") + } else if *p1.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p1.Int, 1) + } + + p2 := &ParamVal{} + if err := Unmarshal(x, p2); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p2.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p2.Int, 1) + } + + p3 := &ParamStringPtr{} + if err := Unmarshal(x, p3); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p3.Int == nil { + t.Fatalf("Unmarshal failed in to *string field") + } else if *p3.Int != "1" { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p3.Int, 1) + } +} + +type Tables struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table"` + FTable string `xml:"http://www.w3schools.com/furniture table"` +} + +var tables = []struct { + xml string + tab Tables + ns string +}{ + { + xml: `<Tables>` + + `<table xmlns="http://www.w3.org/TR/html4/">hello</table>` + + `<table xmlns="http://www.w3schools.com/furniture">world</table>` + + `</Tables>`, + tab: Tables{"hello", "world"}, + }, + { + xml: `<Tables>` + + `<table xmlns="http://www.w3schools.com/furniture">world</table>` + + `<table xmlns="http://www.w3.org/TR/html4/">hello</table>` + + `</Tables>`, + tab: Tables{"hello", "world"}, + }, + { + xml: `<Tables xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/">` + + `<f:table>world</f:table>` + + `<h:table>hello</h:table>` + + `</Tables>`, + tab: Tables{"hello", "world"}, + }, + { + xml: `<Tables>` + + `<table>bogus</table>` + + `</Tables>`, + tab: Tables{}, + }, + { + xml: `<Tables>` + + `<table>only</table>` + + `</Tables>`, + tab: Tables{HTable: "only"}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: `<Tables>` + + `<table>only</table>` + + `</Tables>`, + tab: Tables{FTable: "only"}, + ns: "http://www.w3schools.com/furniture", + }, + { + xml: `<Tables>` + + `<table>only</table>` + + `</Tables>`, + tab: Tables{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNS(t *testing.T) { + for i, tt := range tables { + var dst Tables + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestRoundTrip(t *testing.T) { + // From issue 7535 + const s = `<ex:element xmlns:ex="http://example.com/schema"></ex:element>` + in := bytes.NewBufferString(s) + for i := 0; i < 10; i++ { + out := &bytes.Buffer{} + d := NewDecoder(in) + e := NewEncoder(out) + + for { + t, err := d.Token() + if err == io.EOF { + break + } + if err != nil { + fmt.Println("failed:", err) + return + } + e.EncodeToken(t) + } + e.Flush() + in = out + } + if got := in.String(); got != s { + t.Errorf("have: %q\nwant: %q\n", got, s) + } +} + +func TestMarshalNS(t *testing.T) { + dst := Tables{"hello", "world"} + data, err := Marshal(&dst) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `<Tables><table xmlns="http://www.w3.org/TR/html4/">hello</table><table xmlns="http://www.w3schools.com/furniture">world</table></Tables>` + str := string(data) + if str != want { + t.Errorf("have: %q\nwant: %q\n", str, want) + } +} + +type TableAttrs struct { + TAttr TAttr +} + +type TAttr struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table,attr"` + FTable string `xml:"http://www.w3schools.com/furniture table,attr"` + Lang string `xml:"http://www.w3.org/XML/1998/namespace lang,attr,omitempty"` + Other1 string `xml:"http://golang.org/xml/ other,attr,omitempty"` + Other2 string `xml:"http://golang.org/xmlfoo/ other,attr,omitempty"` + Other3 string `xml:"http://golang.org/json/ other,attr,omitempty"` + Other4 string `xml:"http://golang.org/2/json/ other,attr,omitempty"` +} + +var tableAttrs = []struct { + xml string + tab TableAttrs + ns string +}{ + { + xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` + + `h:table="hello" f:table="world" ` + + `/></TableAttrs>`, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: `<TableAttrs><TAttr xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/" ` + + `h:table="hello" f:table="world" ` + + `/></TableAttrs>`, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: `<TableAttrs><TAttr ` + + `h:table="hello" f:table="world" xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/" ` + + `/></TableAttrs>`, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + // Default space does not apply to attribute names. + xml: `<TableAttrs xmlns="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` + + `h:table="hello" table="world" ` + + `/></TableAttrs>`, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + }, + { + // Default space does not apply to attribute names. + xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture"><TAttr xmlns="http://www.w3.org/TR/html4/" ` + + `table="hello" f:table="world" ` + + `/></TableAttrs>`, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + }, + { + xml: `<TableAttrs><TAttr ` + + `table="bogus" ` + + `/></TableAttrs>`, + tab: TableAttrs{}, + }, + { + // Default space does not apply to attribute names. + xml: `<TableAttrs xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` + + `h:table="hello" table="world" ` + + `/></TableAttrs>`, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + ns: "http://www.w3schools.com/furniture", + }, + { + // Default space does not apply to attribute names. + xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture"><TAttr ` + + `table="hello" f:table="world" ` + + `/></TableAttrs>`, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: `<TableAttrs><TAttr ` + + `table="bogus" ` + + `/></TableAttrs>`, + tab: TableAttrs{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNSAttr(t *testing.T) { + for i, tt := range tableAttrs { + var dst TableAttrs + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestMarshalNSAttr(t *testing.T) { + src := TableAttrs{TAttr{"hello", "world", "en_US", "other1", "other2", "other3", "other4"}} + data, err := Marshal(&src) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `<TableAttrs><TAttr xmlns:json_1="http://golang.org/2/json/" xmlns:json="http://golang.org/json/" xmlns:_xmlfoo="http://golang.org/xmlfoo/" xmlns:_xml="http://golang.org/xml/" xmlns:furniture="http://www.w3schools.com/furniture" xmlns:html4="http://www.w3.org/TR/html4/" html4:table="hello" furniture:table="world" xml:lang="en_US" _xml:other="other1" _xmlfoo:other="other2" json:other="other3" json_1:other="other4"></TAttr></TableAttrs>` + str := string(data) + if str != want { + t.Errorf("Marshal:\nhave: %#q\nwant: %#q\n", str, want) + } + + var dst TableAttrs + if err := Unmarshal(data, &dst); err != nil { + t.Errorf("Unmarshal: %v", err) + } + + if dst != src { + t.Errorf("Unmarshal = %q, want %q", dst, src) + } +} + +type MyCharData struct { + body string +} + +func (m *MyCharData) UnmarshalXML(d *Decoder, start StartElement) error { + for { + t, err := d.Token() + if err == io.EOF { // found end of element + break + } + if err != nil { + return err + } + if char, ok := t.(CharData); ok { + m.body += string(char) + } + } + return nil +} + +var _ Unmarshaler = (*MyCharData)(nil) + +func (m *MyCharData) UnmarshalXMLAttr(attr Attr) error { + panic("must not call") +} + +type MyAttr struct { + attr string +} + +func (m *MyAttr) UnmarshalXMLAttr(attr Attr) error { + m.attr = attr.Value + return nil +} + +var _ UnmarshalerAttr = (*MyAttr)(nil) + +type MyStruct struct { + Data *MyCharData + Attr *MyAttr `xml:",attr"` + + Data2 MyCharData + Attr2 MyAttr `xml:",attr"` +} + +func TestUnmarshaler(t *testing.T) { + xml := `<?xml version="1.0" encoding="utf-8"?> + <MyStruct Attr="attr1" Attr2="attr2"> + <Data>hello <!-- comment -->world</Data> + <Data2>howdy <!-- comment -->world</Data2> + </MyStruct> + ` + + var m MyStruct + if err := Unmarshal([]byte(xml), &m); err != nil { + t.Fatal(err) + } + + if m.Data == nil || m.Attr == nil || m.Data.body != "hello world" || m.Attr.attr != "attr1" || m.Data2.body != "howdy world" || m.Attr2.attr != "attr2" { + t.Errorf("m=%#+v\n", m) + } +} + +type Pea struct { + Cotelydon string +} + +type Pod struct { + Pea interface{} `xml:"Pea"` +} + +// https://golang.org/issue/6836 +func TestUnmarshalIntoInterface(t *testing.T) { + pod := new(Pod) + pod.Pea = new(Pea) + xml := `<Pod><Pea><Cotelydon>Green stuff</Cotelydon></Pea></Pod>` + err := Unmarshal([]byte(xml), pod) + if err != nil { + t.Fatalf("failed to unmarshal %q: %v", xml, err) + } + pea, ok := pod.Pea.(*Pea) + if !ok { + t.Fatalf("unmarshalled into wrong type: have %T want *Pea", pod.Pea) + } + have, want := pea.Cotelydon, "Green stuff" + if have != want { + t.Errorf("failed to unmarshal into interface, have %q want %q", have, want) + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go new file mode 100644 index 00000000..c9a6421f --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go @@ -0,0 +1,371 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// typeInfo holds details for the xml representation of a type. +type typeInfo struct { + xmlname *fieldInfo + fields []fieldInfo +} + +// fieldInfo holds details for the xml representation of a single field. +type fieldInfo struct { + idx []int + name string + xmlns string + flags fieldFlags + parents []string +} + +type fieldFlags int + +const ( + fElement fieldFlags = 1 << iota + fAttr + fCharData + fInnerXml + fComment + fAny + + fOmitEmpty + + fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny +) + +var tinfoMap = make(map[reflect.Type]*typeInfo) +var tinfoLock sync.RWMutex + +var nameType = reflect.TypeOf(Name{}) + +// getTypeInfo returns the typeInfo structure with details necessary +// for marshalling and unmarshalling typ. +func getTypeInfo(typ reflect.Type) (*typeInfo, error) { + tinfoLock.RLock() + tinfo, ok := tinfoMap[typ] + tinfoLock.RUnlock() + if ok { + return tinfo, nil + } + tinfo = &typeInfo{} + if typ.Kind() == reflect.Struct && typ != nameType { + n := typ.NumField() + for i := 0; i < n; i++ { + f := typ.Field(i) + if f.PkgPath != "" || f.Tag.Get("xml") == "-" { + continue // Private field + } + + // For embedded structs, embed its fields. + if f.Anonymous { + t := f.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() == reflect.Struct { + inner, err := getTypeInfo(t) + if err != nil { + return nil, err + } + if tinfo.xmlname == nil { + tinfo.xmlname = inner.xmlname + } + for _, finfo := range inner.fields { + finfo.idx = append([]int{i}, finfo.idx...) + if err := addFieldInfo(typ, tinfo, &finfo); err != nil { + return nil, err + } + } + continue + } + } + + finfo, err := structFieldInfo(typ, &f) + if err != nil { + return nil, err + } + + if f.Name == "XMLName" { + tinfo.xmlname = finfo + continue + } + + // Add the field if it doesn't conflict with other fields. + if err := addFieldInfo(typ, tinfo, finfo); err != nil { + return nil, err + } + } + } + tinfoLock.Lock() + tinfoMap[typ] = tinfo + tinfoLock.Unlock() + return tinfo, nil +} + +// structFieldInfo builds and returns a fieldInfo for f. +func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) { + finfo := &fieldInfo{idx: f.Index} + + // Split the tag from the xml namespace if necessary. + tag := f.Tag.Get("xml") + if i := strings.Index(tag, " "); i >= 0 { + finfo.xmlns, tag = tag[:i], tag[i+1:] + } + + // Parse flags. + tokens := strings.Split(tag, ",") + if len(tokens) == 1 { + finfo.flags = fElement + } else { + tag = tokens[0] + for _, flag := range tokens[1:] { + switch flag { + case "attr": + finfo.flags |= fAttr + case "chardata": + finfo.flags |= fCharData + case "innerxml": + finfo.flags |= fInnerXml + case "comment": + finfo.flags |= fComment + case "any": + finfo.flags |= fAny + case "omitempty": + finfo.flags |= fOmitEmpty + } + } + + // Validate the flags used. + valid := true + switch mode := finfo.flags & fMode; mode { + case 0: + finfo.flags |= fElement + case fAttr, fCharData, fInnerXml, fComment, fAny: + if f.Name == "XMLName" || tag != "" && mode != fAttr { + valid = false + } + default: + // This will also catch multiple modes in a single field. + valid = false + } + if finfo.flags&fMode == fAny { + finfo.flags |= fElement + } + if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 { + valid = false + } + if !valid { + return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + } + + // Use of xmlns without a name is not allowed. + if finfo.xmlns != "" && tag == "" { + return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + + if f.Name == "XMLName" { + // The XMLName field records the XML element name. Don't + // process it as usual because its name should default to + // empty rather than to the field name. + finfo.name = tag + return finfo, nil + } + + if tag == "" { + // If the name part of the tag is completely empty, get + // default from XMLName of underlying struct if feasible, + // or field name otherwise. + if xmlname := lookupXMLName(f.Type); xmlname != nil { + finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name + } else { + finfo.name = f.Name + } + return finfo, nil + } + + if finfo.xmlns == "" && finfo.flags&fAttr == 0 { + // If it's an element no namespace specified, get the default + // from the XMLName of enclosing struct if possible. + if xmlname := lookupXMLName(typ); xmlname != nil { + finfo.xmlns = xmlname.xmlns + } + } + + // Prepare field name and parents. + parents := strings.Split(tag, ">") + if parents[0] == "" { + parents[0] = f.Name + } + if parents[len(parents)-1] == "" { + return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ) + } + finfo.name = parents[len(parents)-1] + if len(parents) > 1 { + if (finfo.flags & fElement) == 0 { + return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ",")) + } + finfo.parents = parents[:len(parents)-1] + } + + // If the field type has an XMLName field, the names must match + // so that the behavior of both marshalling and unmarshalling + // is straightforward and unambiguous. + if finfo.flags&fElement != 0 { + ftyp := f.Type + xmlname := lookupXMLName(ftyp) + if xmlname != nil && xmlname.name != finfo.name { + return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName", + finfo.name, typ, f.Name, xmlname.name, ftyp) + } + } + return finfo, nil +} + +// lookupXMLName returns the fieldInfo for typ's XMLName field +// in case it exists and has a valid xml field tag, otherwise +// it returns nil. +func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + if typ.Kind() != reflect.Struct { + return nil + } + for i, n := 0, typ.NumField(); i < n; i++ { + f := typ.Field(i) + if f.Name != "XMLName" { + continue + } + finfo, err := structFieldInfo(typ, &f) + if finfo.name != "" && err == nil { + return finfo + } + // Also consider errors as a non-existent field tag + // and let getTypeInfo itself report the error. + break + } + return nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +// addFieldInfo adds finfo to tinfo.fields if there are no +// conflicts, or if conflicts arise from previous fields that were +// obtained from deeper embedded structures than finfo. In the latter +// case, the conflicting entries are dropped. +// A conflict occurs when the path (parent + name) to a field is +// itself a prefix of another path, or when two paths match exactly. +// It is okay for field paths to share a common, shorter prefix. +func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error { + var conflicts []int +Loop: + // First, figure all conflicts. Most working code will have none. + for i := range tinfo.fields { + oldf := &tinfo.fields[i] + if oldf.flags&fMode != newf.flags&fMode { + continue + } + if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns { + continue + } + minl := min(len(newf.parents), len(oldf.parents)) + for p := 0; p < minl; p++ { + if oldf.parents[p] != newf.parents[p] { + continue Loop + } + } + if len(oldf.parents) > len(newf.parents) { + if oldf.parents[len(newf.parents)] == newf.name { + conflicts = append(conflicts, i) + } + } else if len(oldf.parents) < len(newf.parents) { + if newf.parents[len(oldf.parents)] == oldf.name { + conflicts = append(conflicts, i) + } + } else { + if newf.name == oldf.name { + conflicts = append(conflicts, i) + } + } + } + // Without conflicts, add the new field and return. + if conflicts == nil { + tinfo.fields = append(tinfo.fields, *newf) + return nil + } + + // If any conflict is shallower, ignore the new field. + // This matches the Go field resolution on embedding. + for _, i := range conflicts { + if len(tinfo.fields[i].idx) < len(newf.idx) { + return nil + } + } + + // Otherwise, if any of them is at the same depth level, it's an error. + for _, i := range conflicts { + oldf := &tinfo.fields[i] + if len(oldf.idx) == len(newf.idx) { + f1 := typ.FieldByIndex(oldf.idx) + f2 := typ.FieldByIndex(newf.idx) + return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")} + } + } + + // Otherwise, the new field is shallower, and thus takes precedence, + // so drop the conflicting fields from tinfo and append the new one. + for c := len(conflicts) - 1; c >= 0; c-- { + i := conflicts[c] + copy(tinfo.fields[i:], tinfo.fields[i+1:]) + tinfo.fields = tinfo.fields[:len(tinfo.fields)-1] + } + tinfo.fields = append(tinfo.fields, *newf) + return nil +} + +// A TagPathError represents an error in the unmarshalling process +// caused by the use of field tags with conflicting paths. +type TagPathError struct { + Struct reflect.Type + Field1, Tag1 string + Field2, Tag2 string +} + +func (e *TagPathError) Error() string { + return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2) +} + +// value returns v's field value corresponding to finfo. +// It's equivalent to v.FieldByIndex(finfo.idx), but initializes +// and dereferences pointers as necessary. +func (finfo *fieldInfo) value(v reflect.Value) reflect.Value { + for i, x := range finfo.idx { + if i > 0 { + t := v.Type() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + v = v.Field(x) + } + return v +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/xml.go b/vendor/golang.org/x/net/webdav/internal/xml/xml.go new file mode 100644 index 00000000..ffab4a70 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/xml.go @@ -0,0 +1,1998 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xml implements a simple XML 1.0 parser that +// understands XML name spaces. +package xml + +// References: +// Annotated XML spec: http://www.xml.com/axml/testaxml.htm +// XML name spaces: http://www.w3.org/TR/REC-xml-names/ + +// TODO(rsc): +// Test error handling. + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A SyntaxError represents a syntax error in the XML input stream. +type SyntaxError struct { + Msg string + Line int +} + +func (e *SyntaxError) Error() string { + return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg +} + +// A Name represents an XML name (Local) annotated with a name space +// identifier (Space). In tokens returned by Decoder.Token, the Space +// identifier is given as a canonical URL, not the short prefix used in +// the document being parsed. +// +// As a special case, XML namespace declarations will use the literal +// string "xmlns" for the Space field instead of the fully resolved URL. +// See Encoder.EncodeToken for more information on namespace encoding +// behaviour. +type Name struct { + Space, Local string +} + +// isNamespace reports whether the name is a namespace-defining name. +func (name Name) isNamespace() bool { + return name.Local == "xmlns" || name.Space == "xmlns" +} + +// An Attr represents an attribute in an XML element (Name=Value). +type Attr struct { + Name Name + Value string +} + +// A Token is an interface holding one of the token types: +// StartElement, EndElement, CharData, Comment, ProcInst, or Directive. +type Token interface{} + +// A StartElement represents an XML start element. +type StartElement struct { + Name Name + Attr []Attr +} + +func (e StartElement) Copy() StartElement { + attrs := make([]Attr, len(e.Attr)) + copy(attrs, e.Attr) + e.Attr = attrs + return e +} + +// End returns the corresponding XML end element. +func (e StartElement) End() EndElement { + return EndElement{e.Name} +} + +// setDefaultNamespace sets the namespace of the element +// as the default for all elements contained within it. +func (e *StartElement) setDefaultNamespace() { + if e.Name.Space == "" { + // If there's no namespace on the element, don't + // set the default. Strictly speaking this might be wrong, as + // we can't tell if the element had no namespace set + // or was just using the default namespace. + return + } + // Don't add a default name space if there's already one set. + for _, attr := range e.Attr { + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + return + } + } + e.Attr = append(e.Attr, Attr{ + Name: Name{ + Local: "xmlns", + }, + Value: e.Name.Space, + }) +} + +// An EndElement represents an XML end element. +type EndElement struct { + Name Name +} + +// A CharData represents XML character data (raw text), +// in which XML escape sequences have been replaced by +// the characters they represent. +type CharData []byte + +func makeCopy(b []byte) []byte { + b1 := make([]byte, len(b)) + copy(b1, b) + return b1 +} + +func (c CharData) Copy() CharData { return CharData(makeCopy(c)) } + +// A Comment represents an XML comment of the form <!--comment-->. +// The bytes do not include the <!-- and --> comment markers. +type Comment []byte + +func (c Comment) Copy() Comment { return Comment(makeCopy(c)) } + +// A ProcInst represents an XML processing instruction of the form <?target inst?> +type ProcInst struct { + Target string + Inst []byte +} + +func (p ProcInst) Copy() ProcInst { + p.Inst = makeCopy(p.Inst) + return p +} + +// A Directive represents an XML directive of the form <!text>. +// The bytes do not include the <! and > markers. +type Directive []byte + +func (d Directive) Copy() Directive { return Directive(makeCopy(d)) } + +// CopyToken returns a copy of a Token. +func CopyToken(t Token) Token { + switch v := t.(type) { + case CharData: + return v.Copy() + case Comment: + return v.Copy() + case Directive: + return v.Copy() + case ProcInst: + return v.Copy() + case StartElement: + return v.Copy() + } + return t +} + +// A Decoder represents an XML parser reading a particular input stream. +// The parser assumes that its input is encoded in UTF-8. +type Decoder struct { + // Strict defaults to true, enforcing the requirements + // of the XML specification. + // If set to false, the parser allows input containing common + // mistakes: + // * If an element is missing an end tag, the parser invents + // end tags as necessary to keep the return values from Token + // properly balanced. + // * In attribute values and character data, unknown or malformed + // character entities (sequences beginning with &) are left alone. + // + // Setting: + // + // d.Strict = false; + // d.AutoClose = HTMLAutoClose; + // d.Entity = HTMLEntity + // + // creates a parser that can handle typical HTML. + // + // Strict mode does not enforce the requirements of the XML name spaces TR. + // In particular it does not reject name space tags using undefined prefixes. + // Such tags are recorded with the unknown prefix as the name space URL. + Strict bool + + // When Strict == false, AutoClose indicates a set of elements to + // consider closed immediately after they are opened, regardless + // of whether an end element is present. + AutoClose []string + + // Entity can be used to map non-standard entity names to string replacements. + // The parser behaves as if these standard mappings are present in the map, + // regardless of the actual map content: + // + // "lt": "<", + // "gt": ">", + // "amp": "&", + // "apos": "'", + // "quot": `"`, + Entity map[string]string + + // CharsetReader, if non-nil, defines a function to generate + // charset-conversion readers, converting from the provided + // non-UTF-8 charset into UTF-8. If CharsetReader is nil or + // returns an error, parsing stops with an error. One of the + // the CharsetReader's result values must be non-nil. + CharsetReader func(charset string, input io.Reader) (io.Reader, error) + + // DefaultSpace sets the default name space used for unadorned tags, + // as if the entire XML stream were wrapped in an element containing + // the attribute xmlns="DefaultSpace". + DefaultSpace string + + r io.ByteReader + buf bytes.Buffer + saved *bytes.Buffer + stk *stack + free *stack + needClose bool + toClose Name + nextToken Token + nextByte int + ns map[string]string + err error + line int + offset int64 + unmarshalDepth int +} + +// NewDecoder creates a new XML parser reading from r. +// If r does not implement io.ByteReader, NewDecoder will +// do its own buffering. +func NewDecoder(r io.Reader) *Decoder { + d := &Decoder{ + ns: make(map[string]string), + nextByte: -1, + line: 1, + Strict: true, + } + d.switchToReader(r) + return d +} + +// Token returns the next XML token in the input stream. +// At the end of the input stream, Token returns nil, io.EOF. +// +// Slices of bytes in the returned token data refer to the +// parser's internal buffer and remain valid only until the next +// call to Token. To acquire a copy of the bytes, call CopyToken +// or the token's Copy method. +// +// Token expands self-closing elements such as <br/> +// into separate start and end elements returned by successive calls. +// +// Token guarantees that the StartElement and EndElement +// tokens it returns are properly nested and matched: +// if Token encounters an unexpected end element, +// it will return an error. +// +// Token implements XML name spaces as described by +// http://www.w3.org/TR/REC-xml-names/. Each of the +// Name structures contained in the Token has the Space +// set to the URL identifying its name space when known. +// If Token encounters an unrecognized name space prefix, +// it uses the prefix as the Space rather than report an error. +func (d *Decoder) Token() (t Token, err error) { + if d.stk != nil && d.stk.kind == stkEOF { + err = io.EOF + return + } + if d.nextToken != nil { + t = d.nextToken + d.nextToken = nil + } else if t, err = d.rawToken(); err != nil { + return + } + + if !d.Strict { + if t1, ok := d.autoClose(t); ok { + d.nextToken = t + t = t1 + } + } + switch t1 := t.(type) { + case StartElement: + // In XML name spaces, the translations listed in the + // attributes apply to the element name and + // to the other attribute names, so process + // the translations first. + for _, a := range t1.Attr { + if a.Name.Space == "xmlns" { + v, ok := d.ns[a.Name.Local] + d.pushNs(a.Name.Local, v, ok) + d.ns[a.Name.Local] = a.Value + } + if a.Name.Space == "" && a.Name.Local == "xmlns" { + // Default space for untagged names + v, ok := d.ns[""] + d.pushNs("", v, ok) + d.ns[""] = a.Value + } + } + + d.translate(&t1.Name, true) + for i := range t1.Attr { + d.translate(&t1.Attr[i].Name, false) + } + d.pushElement(t1.Name) + t = t1 + + case EndElement: + d.translate(&t1.Name, true) + if !d.popElement(&t1) { + return nil, d.err + } + t = t1 + } + return +} + +const xmlURL = "http://www.w3.org/XML/1998/namespace" + +// Apply name space translation to name n. +// The default name space (for Space=="") +// applies only to element names, not to attribute names. +func (d *Decoder) translate(n *Name, isElementName bool) { + switch { + case n.Space == "xmlns": + return + case n.Space == "" && !isElementName: + return + case n.Space == "xml": + n.Space = xmlURL + case n.Space == "" && n.Local == "xmlns": + return + } + if v, ok := d.ns[n.Space]; ok { + n.Space = v + } else if n.Space == "" { + n.Space = d.DefaultSpace + } +} + +func (d *Decoder) switchToReader(r io.Reader) { + // Get efficient byte at a time reader. + // Assume that if reader has its own + // ReadByte, it's efficient enough. + // Otherwise, use bufio. + if rb, ok := r.(io.ByteReader); ok { + d.r = rb + } else { + d.r = bufio.NewReader(r) + } +} + +// Parsing state - stack holds old name space translations +// and the current set of open elements. The translations to pop when +// ending a given tag are *below* it on the stack, which is +// more work but forced on us by XML. +type stack struct { + next *stack + kind int + name Name + ok bool +} + +const ( + stkStart = iota + stkNs + stkEOF +) + +func (d *Decoder) push(kind int) *stack { + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.next = d.stk + s.kind = kind + d.stk = s + return s +} + +func (d *Decoder) pop() *stack { + s := d.stk + if s != nil { + d.stk = s.next + s.next = d.free + d.free = s + } + return s +} + +// Record that after the current element is finished +// (that element is already pushed on the stack) +// Token should return EOF until popEOF is called. +func (d *Decoder) pushEOF() { + // Walk down stack to find Start. + // It might not be the top, because there might be stkNs + // entries above it. + start := d.stk + for start.kind != stkStart { + start = start.next + } + // The stkNs entries below a start are associated with that + // element too; skip over them. + for start.next != nil && start.next.kind == stkNs { + start = start.next + } + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.kind = stkEOF + s.next = start.next + start.next = s +} + +// Undo a pushEOF. +// The element must have been finished, so the EOF should be at the top of the stack. +func (d *Decoder) popEOF() bool { + if d.stk == nil || d.stk.kind != stkEOF { + return false + } + d.pop() + return true +} + +// Record that we are starting an element with the given name. +func (d *Decoder) pushElement(name Name) { + s := d.push(stkStart) + s.name = name +} + +// Record that we are changing the value of ns[local]. +// The old value is url, ok. +func (d *Decoder) pushNs(local string, url string, ok bool) { + s := d.push(stkNs) + s.name.Local = local + s.name.Space = url + s.ok = ok +} + +// Creates a SyntaxError with the current line number. +func (d *Decoder) syntaxError(msg string) error { + return &SyntaxError{Msg: msg, Line: d.line} +} + +// Record that we are ending an element with the given name. +// The name must match the record at the top of the stack, +// which must be a pushElement record. +// After popping the element, apply any undo records from +// the stack to restore the name translations that existed +// before we saw this element. +func (d *Decoder) popElement(t *EndElement) bool { + s := d.pop() + name := t.Name + switch { + case s == nil || s.kind != stkStart: + d.err = d.syntaxError("unexpected end element </" + name.Local + ">") + return false + case s.name.Local != name.Local: + if !d.Strict { + d.needClose = true + d.toClose = t.Name + t.Name = s.name + return true + } + d.err = d.syntaxError("element <" + s.name.Local + "> closed by </" + name.Local + ">") + return false + case s.name.Space != name.Space: + d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space + + "closed by </" + name.Local + "> in space " + name.Space) + return false + } + + // Pop stack until a Start or EOF is on the top, undoing the + // translations that were associated with the element we just closed. + for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF { + s := d.pop() + if s.ok { + d.ns[s.name.Local] = s.name.Space + } else { + delete(d.ns, s.name.Local) + } + } + + return true +} + +// If the top element on the stack is autoclosing and +// t is not the end tag, invent the end tag. +func (d *Decoder) autoClose(t Token) (Token, bool) { + if d.stk == nil || d.stk.kind != stkStart { + return nil, false + } + name := strings.ToLower(d.stk.name.Local) + for _, s := range d.AutoClose { + if strings.ToLower(s) == name { + // This one should be auto closed if t doesn't close it. + et, ok := t.(EndElement) + if !ok || et.Name.Local != name { + return EndElement{d.stk.name}, true + } + break + } + } + return nil, false +} + +var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method") + +// RawToken is like Token but does not verify that +// start and end elements match and does not translate +// name space prefixes to their corresponding URLs. +func (d *Decoder) RawToken() (Token, error) { + if d.unmarshalDepth > 0 { + return nil, errRawToken + } + return d.rawToken() +} + +func (d *Decoder) rawToken() (Token, error) { + if d.err != nil { + return nil, d.err + } + if d.needClose { + // The last element we read was self-closing and + // we returned just the StartElement half. + // Return the EndElement half now. + d.needClose = false + return EndElement{d.toClose}, nil + } + + b, ok := d.getc() + if !ok { + return nil, d.err + } + + if b != '<' { + // Text section. + d.ungetc(b) + data := d.text(-1, false) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + switch b { + case '/': + // </: End element + var name Name + if name, ok = d.nsname(); !ok { + if d.err == nil { + d.err = d.syntaxError("expected element name after </") + } + return nil, d.err + } + d.space() + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if b != '>' { + d.err = d.syntaxError("invalid characters between </" + name.Local + " and >") + return nil, d.err + } + return EndElement{name}, nil + + case '?': + // <?: Processing instruction. + var target string + if target, ok = d.name(); !ok { + if d.err == nil { + d.err = d.syntaxError("expected target name after <?") + } + return nil, d.err + } + d.space() + d.buf.Reset() + var b0 byte + for { + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + d.buf.WriteByte(b) + if b0 == '?' && b == '>' { + break + } + b0 = b + } + data := d.buf.Bytes() + data = data[0 : len(data)-2] // chop ?> + + if target == "xml" { + content := string(data) + ver := procInst("version", content) + if ver != "" && ver != "1.0" { + d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver) + return nil, d.err + } + enc := procInst("encoding", content) + if enc != "" && enc != "utf-8" && enc != "UTF-8" { + if d.CharsetReader == nil { + d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc) + return nil, d.err + } + newr, err := d.CharsetReader(enc, d.r.(io.Reader)) + if err != nil { + d.err = fmt.Errorf("xml: opening charset %q: %v", enc, err) + return nil, d.err + } + if newr == nil { + panic("CharsetReader returned a nil Reader for charset " + enc) + } + d.switchToReader(newr) + } + } + return ProcInst{target, data}, nil + + case '!': + // <!: Maybe comment, maybe CDATA. + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + switch b { + case '-': // <!- + // Probably <!-- for a comment. + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if b != '-' { + d.err = d.syntaxError("invalid sequence <!- not part of <!--") + return nil, d.err + } + // Look for terminator. + d.buf.Reset() + var b0, b1 byte + for { + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + d.buf.WriteByte(b) + if b0 == '-' && b1 == '-' && b == '>' { + break + } + b0, b1 = b1, b + } + data := d.buf.Bytes() + data = data[0 : len(data)-3] // chop --> + return Comment(data), nil + + case '[': // <![ + // Probably <![CDATA[. + for i := 0; i < 6; i++ { + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if b != "CDATA["[i] { + d.err = d.syntaxError("invalid <![ sequence") + return nil, d.err + } + } + // Have <![CDATA[. Read text until ]]>. + data := d.text(-1, true) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + // Probably a directive: <!DOCTYPE ...>, <!ENTITY ...>, etc. + // We don't care, but accumulate for caller. Quoted angle + // brackets do not count for nesting. + d.buf.Reset() + d.buf.WriteByte(b) + inquote := uint8(0) + depth := 0 + for { + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if inquote == 0 && b == '>' && depth == 0 { + break + } + HandleB: + d.buf.WriteByte(b) + switch { + case b == inquote: + inquote = 0 + + case inquote != 0: + // in quotes, no special action + + case b == '\'' || b == '"': + inquote = b + + case b == '>' && inquote == 0: + depth-- + + case b == '<' && inquote == 0: + // Look for <!-- to begin comment. + s := "!--" + for i := 0; i < len(s); i++ { + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if b != s[i] { + for j := 0; j < i; j++ { + d.buf.WriteByte(s[j]) + } + depth++ + goto HandleB + } + } + + // Remove < that was written above. + d.buf.Truncate(d.buf.Len() - 1) + + // Look for terminator. + var b0, b1 byte + for { + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if b0 == '-' && b1 == '-' && b == '>' { + break + } + b0, b1 = b1, b + } + } + } + return Directive(d.buf.Bytes()), nil + } + + // Must be an open element like <a href="foo"> + d.ungetc(b) + + var ( + name Name + empty bool + attr []Attr + ) + if name, ok = d.nsname(); !ok { + if d.err == nil { + d.err = d.syntaxError("expected element name after <") + } + return nil, d.err + } + + attr = []Attr{} + for { + d.space() + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if b == '/' { + empty = true + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if b != '>' { + d.err = d.syntaxError("expected /> in element") + return nil, d.err + } + break + } + if b == '>' { + break + } + d.ungetc(b) + + n := len(attr) + if n >= cap(attr) { + nCap := 2 * cap(attr) + if nCap == 0 { + nCap = 4 + } + nattr := make([]Attr, n, nCap) + copy(nattr, attr) + attr = nattr + } + attr = attr[0 : n+1] + a := &attr[n] + if a.Name, ok = d.nsname(); !ok { + if d.err == nil { + d.err = d.syntaxError("expected attribute name in element") + } + return nil, d.err + } + d.space() + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if b != '=' { + if d.Strict { + d.err = d.syntaxError("attribute name without = in element") + return nil, d.err + } else { + d.ungetc(b) + a.Value = a.Name.Local + } + } else { + d.space() + data := d.attrval() + if data == nil { + return nil, d.err + } + a.Value = string(data) + } + } + if empty { + d.needClose = true + d.toClose = name + } + return StartElement{name, attr}, nil +} + +func (d *Decoder) attrval() []byte { + b, ok := d.mustgetc() + if !ok { + return nil + } + // Handle quoted attribute values + if b == '"' || b == '\'' { + return d.text(int(b), false) + } + // Handle unquoted attribute values for strict parsers + if d.Strict { + d.err = d.syntaxError("unquoted or missing attribute value in element") + return nil + } + // Handle unquoted attribute values for unstrict parsers + d.ungetc(b) + d.buf.Reset() + for { + b, ok = d.mustgetc() + if !ok { + return nil + } + // http://www.w3.org/TR/REC-html40/intro/sgmltut.html#h-3.2.2 + if 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' || + '0' <= b && b <= '9' || b == '_' || b == ':' || b == '-' { + d.buf.WriteByte(b) + } else { + d.ungetc(b) + break + } + } + return d.buf.Bytes() +} + +// Skip spaces if any +func (d *Decoder) space() { + for { + b, ok := d.getc() + if !ok { + return + } + switch b { + case ' ', '\r', '\n', '\t': + default: + d.ungetc(b) + return + } + } +} + +// Read a single byte. +// If there is no byte to read, return ok==false +// and leave the error in d.err. +// Maintain line number. +func (d *Decoder) getc() (b byte, ok bool) { + if d.err != nil { + return 0, false + } + if d.nextByte >= 0 { + b = byte(d.nextByte) + d.nextByte = -1 + } else { + b, d.err = d.r.ReadByte() + if d.err != nil { + return 0, false + } + if d.saved != nil { + d.saved.WriteByte(b) + } + } + if b == '\n' { + d.line++ + } + d.offset++ + return b, true +} + +// InputOffset returns the input stream byte offset of the current decoder position. +// The offset gives the location of the end of the most recently returned token +// and the beginning of the next token. +func (d *Decoder) InputOffset() int64 { + return d.offset +} + +// Return saved offset. +// If we did ungetc (nextByte >= 0), have to back up one. +func (d *Decoder) savedOffset() int { + n := d.saved.Len() + if d.nextByte >= 0 { + n-- + } + return n +} + +// Must read a single byte. +// If there is no byte to read, +// set d.err to SyntaxError("unexpected EOF") +// and return ok==false +func (d *Decoder) mustgetc() (b byte, ok bool) { + if b, ok = d.getc(); !ok { + if d.err == io.EOF { + d.err = d.syntaxError("unexpected EOF") + } + } + return +} + +// Unread a single byte. +func (d *Decoder) ungetc(b byte) { + if b == '\n' { + d.line-- + } + d.nextByte = int(b) + d.offset-- +} + +var entity = map[string]int{ + "lt": '<', + "gt": '>', + "amp": '&', + "apos": '\'', + "quot": '"', +} + +// Read plain text section (XML calls it character data). +// If quote >= 0, we are in a quoted string and need to find the matching quote. +// If cdata == true, we are in a <![CDATA[ section and need to find ]]>. +// On failure return nil and leave the error in d.err. +func (d *Decoder) text(quote int, cdata bool) []byte { + var b0, b1 byte + var trunc int + d.buf.Reset() +Input: + for { + b, ok := d.getc() + if !ok { + if cdata { + if d.err == io.EOF { + d.err = d.syntaxError("unexpected EOF in CDATA section") + } + return nil + } + break Input + } + + // <![CDATA[ section ends with ]]>. + // It is an error for ]]> to appear in ordinary text. + if b0 == ']' && b1 == ']' && b == '>' { + if cdata { + trunc = 2 + break Input + } + d.err = d.syntaxError("unescaped ]]> not in CDATA section") + return nil + } + + // Stop reading text if we see a <. + if b == '<' && !cdata { + if quote >= 0 { + d.err = d.syntaxError("unescaped < inside quoted string") + return nil + } + d.ungetc('<') + break Input + } + if quote >= 0 && b == byte(quote) { + break Input + } + if b == '&' && !cdata { + // Read escaped character expression up to semicolon. + // XML in all its glory allows a document to define and use + // its own character names with <!ENTITY ...> directives. + // Parsers are required to recognize lt, gt, amp, apos, and quot + // even if they have not been declared. + before := d.buf.Len() + d.buf.WriteByte('&') + var ok bool + var text string + var haveText bool + if b, ok = d.mustgetc(); !ok { + return nil + } + if b == '#' { + d.buf.WriteByte(b) + if b, ok = d.mustgetc(); !ok { + return nil + } + base := 10 + if b == 'x' { + base = 16 + d.buf.WriteByte(b) + if b, ok = d.mustgetc(); !ok { + return nil + } + } + start := d.buf.Len() + for '0' <= b && b <= '9' || + base == 16 && 'a' <= b && b <= 'f' || + base == 16 && 'A' <= b && b <= 'F' { + d.buf.WriteByte(b) + if b, ok = d.mustgetc(); !ok { + return nil + } + } + if b != ';' { + d.ungetc(b) + } else { + s := string(d.buf.Bytes()[start:]) + d.buf.WriteByte(';') + n, err := strconv.ParseUint(s, base, 64) + if err == nil && n <= unicode.MaxRune { + text = string(n) + haveText = true + } + } + } else { + d.ungetc(b) + if !d.readName() { + if d.err != nil { + return nil + } + ok = false + } + if b, ok = d.mustgetc(); !ok { + return nil + } + if b != ';' { + d.ungetc(b) + } else { + name := d.buf.Bytes()[before+1:] + d.buf.WriteByte(';') + if isName(name) { + s := string(name) + if r, ok := entity[s]; ok { + text = string(r) + haveText = true + } else if d.Entity != nil { + text, haveText = d.Entity[s] + } + } + } + } + + if haveText { + d.buf.Truncate(before) + d.buf.Write([]byte(text)) + b0, b1 = 0, 0 + continue Input + } + if !d.Strict { + b0, b1 = 0, 0 + continue Input + } + ent := string(d.buf.Bytes()[before:]) + if ent[len(ent)-1] != ';' { + ent += " (no semicolon)" + } + d.err = d.syntaxError("invalid character entity " + ent) + return nil + } + + // We must rewrite unescaped \r and \r\n into \n. + if b == '\r' { + d.buf.WriteByte('\n') + } else if b1 == '\r' && b == '\n' { + // Skip \r\n--we already wrote \n. + } else { + d.buf.WriteByte(b) + } + + b0, b1 = b1, b + } + data := d.buf.Bytes() + data = data[0 : len(data)-trunc] + + // Inspect each rune for being a disallowed character. + buf := data + for len(buf) > 0 { + r, size := utf8.DecodeRune(buf) + if r == utf8.RuneError && size == 1 { + d.err = d.syntaxError("invalid UTF-8") + return nil + } + buf = buf[size:] + if !isInCharacterRange(r) { + d.err = d.syntaxError(fmt.Sprintf("illegal character code %U", r)) + return nil + } + } + + return data +} + +// Decide whether the given rune is in the XML Character Range, per +// the Char production of http://www.xml.com/axml/testaxml.htm, +// Section 2.2 Characters. +func isInCharacterRange(r rune) (inrange bool) { + return r == 0x09 || + r == 0x0A || + r == 0x0D || + r >= 0x20 && r <= 0xDF77 || + r >= 0xE000 && r <= 0xFFFD || + r >= 0x10000 && r <= 0x10FFFF +} + +// Get name space name: name with a : stuck in the middle. +// The part before the : is the name space identifier. +func (d *Decoder) nsname() (name Name, ok bool) { + s, ok := d.name() + if !ok { + return + } + i := strings.Index(s, ":") + if i < 0 { + name.Local = s + } else { + name.Space = s[0:i] + name.Local = s[i+1:] + } + return name, true +} + +// Get name: /first(first|second)*/ +// Do not set d.err if the name is missing (unless unexpected EOF is received): +// let the caller provide better context. +func (d *Decoder) name() (s string, ok bool) { + d.buf.Reset() + if !d.readName() { + return "", false + } + + // Now we check the characters. + b := d.buf.Bytes() + if !isName(b) { + d.err = d.syntaxError("invalid XML name: " + string(b)) + return "", false + } + return string(b), true +} + +// Read a name and append its bytes to d.buf. +// The name is delimited by any single-byte character not valid in names. +// All multi-byte characters are accepted; the caller must check their validity. +func (d *Decoder) readName() (ok bool) { + var b byte + if b, ok = d.mustgetc(); !ok { + return + } + if b < utf8.RuneSelf && !isNameByte(b) { + d.ungetc(b) + return false + } + d.buf.WriteByte(b) + + for { + if b, ok = d.mustgetc(); !ok { + return + } + if b < utf8.RuneSelf && !isNameByte(b) { + d.ungetc(b) + break + } + d.buf.WriteByte(b) + } + return true +} + +func isNameByte(c byte) bool { + return 'A' <= c && c <= 'Z' || + 'a' <= c && c <= 'z' || + '0' <= c && c <= '9' || + c == '_' || c == ':' || c == '.' || c == '-' +} + +func isName(s []byte) bool { + if len(s) == 0 { + return false + } + c, n := utf8.DecodeRune(s) + if c == utf8.RuneError && n == 1 { + return false + } + if !unicode.Is(first, c) { + return false + } + for n < len(s) { + s = s[n:] + c, n = utf8.DecodeRune(s) + if c == utf8.RuneError && n == 1 { + return false + } + if !unicode.Is(first, c) && !unicode.Is(second, c) { + return false + } + } + return true +} + +func isNameString(s string) bool { + if len(s) == 0 { + return false + } + c, n := utf8.DecodeRuneInString(s) + if c == utf8.RuneError && n == 1 { + return false + } + if !unicode.Is(first, c) { + return false + } + for n < len(s) { + s = s[n:] + c, n = utf8.DecodeRuneInString(s) + if c == utf8.RuneError && n == 1 { + return false + } + if !unicode.Is(first, c) && !unicode.Is(second, c) { + return false + } + } + return true +} + +// These tables were generated by cut and paste from Appendix B of +// the XML spec at http://www.xml.com/axml/testaxml.htm +// and then reformatting. First corresponds to (Letter | '_' | ':') +// and second corresponds to NameChar. + +var first = &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x003A, 0x003A, 1}, + {0x0041, 0x005A, 1}, + {0x005F, 0x005F, 1}, + {0x0061, 0x007A, 1}, + {0x00C0, 0x00D6, 1}, + {0x00D8, 0x00F6, 1}, + {0x00F8, 0x00FF, 1}, + {0x0100, 0x0131, 1}, + {0x0134, 0x013E, 1}, + {0x0141, 0x0148, 1}, + {0x014A, 0x017E, 1}, + {0x0180, 0x01C3, 1}, + {0x01CD, 0x01F0, 1}, + {0x01F4, 0x01F5, 1}, + {0x01FA, 0x0217, 1}, + {0x0250, 0x02A8, 1}, + {0x02BB, 0x02C1, 1}, + {0x0386, 0x0386, 1}, + {0x0388, 0x038A, 1}, + {0x038C, 0x038C, 1}, + {0x038E, 0x03A1, 1}, + {0x03A3, 0x03CE, 1}, + {0x03D0, 0x03D6, 1}, + {0x03DA, 0x03E0, 2}, + {0x03E2, 0x03F3, 1}, + {0x0401, 0x040C, 1}, + {0x040E, 0x044F, 1}, + {0x0451, 0x045C, 1}, + {0x045E, 0x0481, 1}, + {0x0490, 0x04C4, 1}, + {0x04C7, 0x04C8, 1}, + {0x04CB, 0x04CC, 1}, + {0x04D0, 0x04EB, 1}, + {0x04EE, 0x04F5, 1}, + {0x04F8, 0x04F9, 1}, + {0x0531, 0x0556, 1}, + {0x0559, 0x0559, 1}, + {0x0561, 0x0586, 1}, + {0x05D0, 0x05EA, 1}, + {0x05F0, 0x05F2, 1}, + {0x0621, 0x063A, 1}, + {0x0641, 0x064A, 1}, + {0x0671, 0x06B7, 1}, + {0x06BA, 0x06BE, 1}, + {0x06C0, 0x06CE, 1}, + {0x06D0, 0x06D3, 1}, + {0x06D5, 0x06D5, 1}, + {0x06E5, 0x06E6, 1}, + {0x0905, 0x0939, 1}, + {0x093D, 0x093D, 1}, + {0x0958, 0x0961, 1}, + {0x0985, 0x098C, 1}, + {0x098F, 0x0990, 1}, + {0x0993, 0x09A8, 1}, + {0x09AA, 0x09B0, 1}, + {0x09B2, 0x09B2, 1}, + {0x09B6, 0x09B9, 1}, + {0x09DC, 0x09DD, 1}, + {0x09DF, 0x09E1, 1}, + {0x09F0, 0x09F1, 1}, + {0x0A05, 0x0A0A, 1}, + {0x0A0F, 0x0A10, 1}, + {0x0A13, 0x0A28, 1}, + {0x0A2A, 0x0A30, 1}, + {0x0A32, 0x0A33, 1}, + {0x0A35, 0x0A36, 1}, + {0x0A38, 0x0A39, 1}, + {0x0A59, 0x0A5C, 1}, + {0x0A5E, 0x0A5E, 1}, + {0x0A72, 0x0A74, 1}, + {0x0A85, 0x0A8B, 1}, + {0x0A8D, 0x0A8D, 1}, + {0x0A8F, 0x0A91, 1}, + {0x0A93, 0x0AA8, 1}, + {0x0AAA, 0x0AB0, 1}, + {0x0AB2, 0x0AB3, 1}, + {0x0AB5, 0x0AB9, 1}, + {0x0ABD, 0x0AE0, 0x23}, + {0x0B05, 0x0B0C, 1}, + {0x0B0F, 0x0B10, 1}, + {0x0B13, 0x0B28, 1}, + {0x0B2A, 0x0B30, 1}, + {0x0B32, 0x0B33, 1}, + {0x0B36, 0x0B39, 1}, + {0x0B3D, 0x0B3D, 1}, + {0x0B5C, 0x0B5D, 1}, + {0x0B5F, 0x0B61, 1}, + {0x0B85, 0x0B8A, 1}, + {0x0B8E, 0x0B90, 1}, + {0x0B92, 0x0B95, 1}, + {0x0B99, 0x0B9A, 1}, + {0x0B9C, 0x0B9C, 1}, + {0x0B9E, 0x0B9F, 1}, + {0x0BA3, 0x0BA4, 1}, + {0x0BA8, 0x0BAA, 1}, + {0x0BAE, 0x0BB5, 1}, + {0x0BB7, 0x0BB9, 1}, + {0x0C05, 0x0C0C, 1}, + {0x0C0E, 0x0C10, 1}, + {0x0C12, 0x0C28, 1}, + {0x0C2A, 0x0C33, 1}, + {0x0C35, 0x0C39, 1}, + {0x0C60, 0x0C61, 1}, + {0x0C85, 0x0C8C, 1}, + {0x0C8E, 0x0C90, 1}, + {0x0C92, 0x0CA8, 1}, + {0x0CAA, 0x0CB3, 1}, + {0x0CB5, 0x0CB9, 1}, + {0x0CDE, 0x0CDE, 1}, + {0x0CE0, 0x0CE1, 1}, + {0x0D05, 0x0D0C, 1}, + {0x0D0E, 0x0D10, 1}, + {0x0D12, 0x0D28, 1}, + {0x0D2A, 0x0D39, 1}, + {0x0D60, 0x0D61, 1}, + {0x0E01, 0x0E2E, 1}, + {0x0E30, 0x0E30, 1}, + {0x0E32, 0x0E33, 1}, + {0x0E40, 0x0E45, 1}, + {0x0E81, 0x0E82, 1}, + {0x0E84, 0x0E84, 1}, + {0x0E87, 0x0E88, 1}, + {0x0E8A, 0x0E8D, 3}, + {0x0E94, 0x0E97, 1}, + {0x0E99, 0x0E9F, 1}, + {0x0EA1, 0x0EA3, 1}, + {0x0EA5, 0x0EA7, 2}, + {0x0EAA, 0x0EAB, 1}, + {0x0EAD, 0x0EAE, 1}, + {0x0EB0, 0x0EB0, 1}, + {0x0EB2, 0x0EB3, 1}, + {0x0EBD, 0x0EBD, 1}, + {0x0EC0, 0x0EC4, 1}, + {0x0F40, 0x0F47, 1}, + {0x0F49, 0x0F69, 1}, + {0x10A0, 0x10C5, 1}, + {0x10D0, 0x10F6, 1}, + {0x1100, 0x1100, 1}, + {0x1102, 0x1103, 1}, + {0x1105, 0x1107, 1}, + {0x1109, 0x1109, 1}, + {0x110B, 0x110C, 1}, + {0x110E, 0x1112, 1}, + {0x113C, 0x1140, 2}, + {0x114C, 0x1150, 2}, + {0x1154, 0x1155, 1}, + {0x1159, 0x1159, 1}, + {0x115F, 0x1161, 1}, + {0x1163, 0x1169, 2}, + {0x116D, 0x116E, 1}, + {0x1172, 0x1173, 1}, + {0x1175, 0x119E, 0x119E - 0x1175}, + {0x11A8, 0x11AB, 0x11AB - 0x11A8}, + {0x11AE, 0x11AF, 1}, + {0x11B7, 0x11B8, 1}, + {0x11BA, 0x11BA, 1}, + {0x11BC, 0x11C2, 1}, + {0x11EB, 0x11F0, 0x11F0 - 0x11EB}, + {0x11F9, 0x11F9, 1}, + {0x1E00, 0x1E9B, 1}, + {0x1EA0, 0x1EF9, 1}, + {0x1F00, 0x1F15, 1}, + {0x1F18, 0x1F1D, 1}, + {0x1F20, 0x1F45, 1}, + {0x1F48, 0x1F4D, 1}, + {0x1F50, 0x1F57, 1}, + {0x1F59, 0x1F5B, 0x1F5B - 0x1F59}, + {0x1F5D, 0x1F5D, 1}, + {0x1F5F, 0x1F7D, 1}, + {0x1F80, 0x1FB4, 1}, + {0x1FB6, 0x1FBC, 1}, + {0x1FBE, 0x1FBE, 1}, + {0x1FC2, 0x1FC4, 1}, + {0x1FC6, 0x1FCC, 1}, + {0x1FD0, 0x1FD3, 1}, + {0x1FD6, 0x1FDB, 1}, + {0x1FE0, 0x1FEC, 1}, + {0x1FF2, 0x1FF4, 1}, + {0x1FF6, 0x1FFC, 1}, + {0x2126, 0x2126, 1}, + {0x212A, 0x212B, 1}, + {0x212E, 0x212E, 1}, + {0x2180, 0x2182, 1}, + {0x3007, 0x3007, 1}, + {0x3021, 0x3029, 1}, + {0x3041, 0x3094, 1}, + {0x30A1, 0x30FA, 1}, + {0x3105, 0x312C, 1}, + {0x4E00, 0x9FA5, 1}, + {0xAC00, 0xD7A3, 1}, + }, +} + +var second = &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x002D, 0x002E, 1}, + {0x0030, 0x0039, 1}, + {0x00B7, 0x00B7, 1}, + {0x02D0, 0x02D1, 1}, + {0x0300, 0x0345, 1}, + {0x0360, 0x0361, 1}, + {0x0387, 0x0387, 1}, + {0x0483, 0x0486, 1}, + {0x0591, 0x05A1, 1}, + {0x05A3, 0x05B9, 1}, + {0x05BB, 0x05BD, 1}, + {0x05BF, 0x05BF, 1}, + {0x05C1, 0x05C2, 1}, + {0x05C4, 0x0640, 0x0640 - 0x05C4}, + {0x064B, 0x0652, 1}, + {0x0660, 0x0669, 1}, + {0x0670, 0x0670, 1}, + {0x06D6, 0x06DC, 1}, + {0x06DD, 0x06DF, 1}, + {0x06E0, 0x06E4, 1}, + {0x06E7, 0x06E8, 1}, + {0x06EA, 0x06ED, 1}, + {0x06F0, 0x06F9, 1}, + {0x0901, 0x0903, 1}, + {0x093C, 0x093C, 1}, + {0x093E, 0x094C, 1}, + {0x094D, 0x094D, 1}, + {0x0951, 0x0954, 1}, + {0x0962, 0x0963, 1}, + {0x0966, 0x096F, 1}, + {0x0981, 0x0983, 1}, + {0x09BC, 0x09BC, 1}, + {0x09BE, 0x09BF, 1}, + {0x09C0, 0x09C4, 1}, + {0x09C7, 0x09C8, 1}, + {0x09CB, 0x09CD, 1}, + {0x09D7, 0x09D7, 1}, + {0x09E2, 0x09E3, 1}, + {0x09E6, 0x09EF, 1}, + {0x0A02, 0x0A3C, 0x3A}, + {0x0A3E, 0x0A3F, 1}, + {0x0A40, 0x0A42, 1}, + {0x0A47, 0x0A48, 1}, + {0x0A4B, 0x0A4D, 1}, + {0x0A66, 0x0A6F, 1}, + {0x0A70, 0x0A71, 1}, + {0x0A81, 0x0A83, 1}, + {0x0ABC, 0x0ABC, 1}, + {0x0ABE, 0x0AC5, 1}, + {0x0AC7, 0x0AC9, 1}, + {0x0ACB, 0x0ACD, 1}, + {0x0AE6, 0x0AEF, 1}, + {0x0B01, 0x0B03, 1}, + {0x0B3C, 0x0B3C, 1}, + {0x0B3E, 0x0B43, 1}, + {0x0B47, 0x0B48, 1}, + {0x0B4B, 0x0B4D, 1}, + {0x0B56, 0x0B57, 1}, + {0x0B66, 0x0B6F, 1}, + {0x0B82, 0x0B83, 1}, + {0x0BBE, 0x0BC2, 1}, + {0x0BC6, 0x0BC8, 1}, + {0x0BCA, 0x0BCD, 1}, + {0x0BD7, 0x0BD7, 1}, + {0x0BE7, 0x0BEF, 1}, + {0x0C01, 0x0C03, 1}, + {0x0C3E, 0x0C44, 1}, + {0x0C46, 0x0C48, 1}, + {0x0C4A, 0x0C4D, 1}, + {0x0C55, 0x0C56, 1}, + {0x0C66, 0x0C6F, 1}, + {0x0C82, 0x0C83, 1}, + {0x0CBE, 0x0CC4, 1}, + {0x0CC6, 0x0CC8, 1}, + {0x0CCA, 0x0CCD, 1}, + {0x0CD5, 0x0CD6, 1}, + {0x0CE6, 0x0CEF, 1}, + {0x0D02, 0x0D03, 1}, + {0x0D3E, 0x0D43, 1}, + {0x0D46, 0x0D48, 1}, + {0x0D4A, 0x0D4D, 1}, + {0x0D57, 0x0D57, 1}, + {0x0D66, 0x0D6F, 1}, + {0x0E31, 0x0E31, 1}, + {0x0E34, 0x0E3A, 1}, + {0x0E46, 0x0E46, 1}, + {0x0E47, 0x0E4E, 1}, + {0x0E50, 0x0E59, 1}, + {0x0EB1, 0x0EB1, 1}, + {0x0EB4, 0x0EB9, 1}, + {0x0EBB, 0x0EBC, 1}, + {0x0EC6, 0x0EC6, 1}, + {0x0EC8, 0x0ECD, 1}, + {0x0ED0, 0x0ED9, 1}, + {0x0F18, 0x0F19, 1}, + {0x0F20, 0x0F29, 1}, + {0x0F35, 0x0F39, 2}, + {0x0F3E, 0x0F3F, 1}, + {0x0F71, 0x0F84, 1}, + {0x0F86, 0x0F8B, 1}, + {0x0F90, 0x0F95, 1}, + {0x0F97, 0x0F97, 1}, + {0x0F99, 0x0FAD, 1}, + {0x0FB1, 0x0FB7, 1}, + {0x0FB9, 0x0FB9, 1}, + {0x20D0, 0x20DC, 1}, + {0x20E1, 0x3005, 0x3005 - 0x20E1}, + {0x302A, 0x302F, 1}, + {0x3031, 0x3035, 1}, + {0x3099, 0x309A, 1}, + {0x309D, 0x309E, 1}, + {0x30FC, 0x30FE, 1}, + }, +} + +// HTMLEntity is an entity map containing translations for the +// standard HTML entity characters. +var HTMLEntity = htmlEntity + +var htmlEntity = map[string]string{ + /* + hget http://www.w3.org/TR/html4/sgml/entities.html | + ssam ' + ,y /\&gt;/ x/\&lt;(.|\n)+/ s/\n/ /g + ,x v/^\&lt;!ENTITY/d + ,s/\&lt;!ENTITY ([^ ]+) .*U\+([0-9A-F][0-9A-F][0-9A-F][0-9A-F]) .+/ "\1": "\\u\2",/g + ' + */ + "nbsp": "\u00A0", + "iexcl": "\u00A1", + "cent": "\u00A2", + "pound": "\u00A3", + "curren": "\u00A4", + "yen": "\u00A5", + "brvbar": "\u00A6", + "sect": "\u00A7", + "uml": "\u00A8", + "copy": "\u00A9", + "ordf": "\u00AA", + "laquo": "\u00AB", + "not": "\u00AC", + "shy": "\u00AD", + "reg": "\u00AE", + "macr": "\u00AF", + "deg": "\u00B0", + "plusmn": "\u00B1", + "sup2": "\u00B2", + "sup3": "\u00B3", + "acute": "\u00B4", + "micro": "\u00B5", + "para": "\u00B6", + "middot": "\u00B7", + "cedil": "\u00B8", + "sup1": "\u00B9", + "ordm": "\u00BA", + "raquo": "\u00BB", + "frac14": "\u00BC", + "frac12": "\u00BD", + "frac34": "\u00BE", + "iquest": "\u00BF", + "Agrave": "\u00C0", + "Aacute": "\u00C1", + "Acirc": "\u00C2", + "Atilde": "\u00C3", + "Auml": "\u00C4", + "Aring": "\u00C5", + "AElig": "\u00C6", + "Ccedil": "\u00C7", + "Egrave": "\u00C8", + "Eacute": "\u00C9", + "Ecirc": "\u00CA", + "Euml": "\u00CB", + "Igrave": "\u00CC", + "Iacute": "\u00CD", + "Icirc": "\u00CE", + "Iuml": "\u00CF", + "ETH": "\u00D0", + "Ntilde": "\u00D1", + "Ograve": "\u00D2", + "Oacute": "\u00D3", + "Ocirc": "\u00D4", + "Otilde": "\u00D5", + "Ouml": "\u00D6", + "times": "\u00D7", + "Oslash": "\u00D8", + "Ugrave": "\u00D9", + "Uacute": "\u00DA", + "Ucirc": "\u00DB", + "Uuml": "\u00DC", + "Yacute": "\u00DD", + "THORN": "\u00DE", + "szlig": "\u00DF", + "agrave": "\u00E0", + "aacute": "\u00E1", + "acirc": "\u00E2", + "atilde": "\u00E3", + "auml": "\u00E4", + "aring": "\u00E5", + "aelig": "\u00E6", + "ccedil": "\u00E7", + "egrave": "\u00E8", + "eacute": "\u00E9", + "ecirc": "\u00EA", + "euml": "\u00EB", + "igrave": "\u00EC", + "iacute": "\u00ED", + "icirc": "\u00EE", + "iuml": "\u00EF", + "eth": "\u00F0", + "ntilde": "\u00F1", + "ograve": "\u00F2", + "oacute": "\u00F3", + "ocirc": "\u00F4", + "otilde": "\u00F5", + "ouml": "\u00F6", + "divide": "\u00F7", + "oslash": "\u00F8", + "ugrave": "\u00F9", + "uacute": "\u00FA", + "ucirc": "\u00FB", + "uuml": "\u00FC", + "yacute": "\u00FD", + "thorn": "\u00FE", + "yuml": "\u00FF", + "fnof": "\u0192", + "Alpha": "\u0391", + "Beta": "\u0392", + "Gamma": "\u0393", + "Delta": "\u0394", + "Epsilon": "\u0395", + "Zeta": "\u0396", + "Eta": "\u0397", + "Theta": "\u0398", + "Iota": "\u0399", + "Kappa": "\u039A", + "Lambda": "\u039B", + "Mu": "\u039C", + "Nu": "\u039D", + "Xi": "\u039E", + "Omicron": "\u039F", + "Pi": "\u03A0", + "Rho": "\u03A1", + "Sigma": "\u03A3", + "Tau": "\u03A4", + "Upsilon": "\u03A5", + "Phi": "\u03A6", + "Chi": "\u03A7", + "Psi": "\u03A8", + "Omega": "\u03A9", + "alpha": "\u03B1", + "beta": "\u03B2", + "gamma": "\u03B3", + "delta": "\u03B4", + "epsilon": "\u03B5", + "zeta": "\u03B6", + "eta": "\u03B7", + "theta": "\u03B8", + "iota": "\u03B9", + "kappa": "\u03BA", + "lambda": "\u03BB", + "mu": "\u03BC", + "nu": "\u03BD", + "xi": "\u03BE", + "omicron": "\u03BF", + "pi": "\u03C0", + "rho": "\u03C1", + "sigmaf": "\u03C2", + "sigma": "\u03C3", + "tau": "\u03C4", + "upsilon": "\u03C5", + "phi": "\u03C6", + "chi": "\u03C7", + "psi": "\u03C8", + "omega": "\u03C9", + "thetasym": "\u03D1", + "upsih": "\u03D2", + "piv": "\u03D6", + "bull": "\u2022", + "hellip": "\u2026", + "prime": "\u2032", + "Prime": "\u2033", + "oline": "\u203E", + "frasl": "\u2044", + "weierp": "\u2118", + "image": "\u2111", + "real": "\u211C", + "trade": "\u2122", + "alefsym": "\u2135", + "larr": "\u2190", + "uarr": "\u2191", + "rarr": "\u2192", + "darr": "\u2193", + "harr": "\u2194", + "crarr": "\u21B5", + "lArr": "\u21D0", + "uArr": "\u21D1", + "rArr": "\u21D2", + "dArr": "\u21D3", + "hArr": "\u21D4", + "forall": "\u2200", + "part": "\u2202", + "exist": "\u2203", + "empty": "\u2205", + "nabla": "\u2207", + "isin": "\u2208", + "notin": "\u2209", + "ni": "\u220B", + "prod": "\u220F", + "sum": "\u2211", + "minus": "\u2212", + "lowast": "\u2217", + "radic": "\u221A", + "prop": "\u221D", + "infin": "\u221E", + "ang": "\u2220", + "and": "\u2227", + "or": "\u2228", + "cap": "\u2229", + "cup": "\u222A", + "int": "\u222B", + "there4": "\u2234", + "sim": "\u223C", + "cong": "\u2245", + "asymp": "\u2248", + "ne": "\u2260", + "equiv": "\u2261", + "le": "\u2264", + "ge": "\u2265", + "sub": "\u2282", + "sup": "\u2283", + "nsub": "\u2284", + "sube": "\u2286", + "supe": "\u2287", + "oplus": "\u2295", + "otimes": "\u2297", + "perp": "\u22A5", + "sdot": "\u22C5", + "lceil": "\u2308", + "rceil": "\u2309", + "lfloor": "\u230A", + "rfloor": "\u230B", + "lang": "\u2329", + "rang": "\u232A", + "loz": "\u25CA", + "spades": "\u2660", + "clubs": "\u2663", + "hearts": "\u2665", + "diams": "\u2666", + "quot": "\u0022", + "amp": "\u0026", + "lt": "\u003C", + "gt": "\u003E", + "OElig": "\u0152", + "oelig": "\u0153", + "Scaron": "\u0160", + "scaron": "\u0161", + "Yuml": "\u0178", + "circ": "\u02C6", + "tilde": "\u02DC", + "ensp": "\u2002", + "emsp": "\u2003", + "thinsp": "\u2009", + "zwnj": "\u200C", + "zwj": "\u200D", + "lrm": "\u200E", + "rlm": "\u200F", + "ndash": "\u2013", + "mdash": "\u2014", + "lsquo": "\u2018", + "rsquo": "\u2019", + "sbquo": "\u201A", + "ldquo": "\u201C", + "rdquo": "\u201D", + "bdquo": "\u201E", + "dagger": "\u2020", + "Dagger": "\u2021", + "permil": "\u2030", + "lsaquo": "\u2039", + "rsaquo": "\u203A", + "euro": "\u20AC", +} + +// HTMLAutoClose is the set of HTML elements that +// should be considered to close automatically. +var HTMLAutoClose = htmlAutoClose + +var htmlAutoClose = []string{ + /* + hget http://www.w3.org/TR/html4/loose.dtd | + 9 sed -n 's/<!ELEMENT ([^ ]*) +- O EMPTY.+/ "\1",/p' | tr A-Z a-z + */ + "basefont", + "br", + "area", + "link", + "img", + "param", + "hr", + "input", + "col", + "frame", + "isindex", + "base", + "meta", +} + +var ( + esc_quot = []byte("&#34;") // shorter than "&quot;" + esc_apos = []byte("&#39;") // shorter than "&apos;" + esc_amp = []byte("&amp;") + esc_lt = []byte("&lt;") + esc_gt = []byte("&gt;") + esc_tab = []byte("&#x9;") + esc_nl = []byte("&#xA;") + esc_cr = []byte("&#xD;") + esc_fffd = []byte("\uFFFD") // Unicode replacement character +) + +// EscapeText writes to w the properly escaped XML equivalent +// of the plain text data s. +func EscapeText(w io.Writer, s []byte) error { + return escapeText(w, s, true) +} + +// escapeText writes to w the properly escaped XML equivalent +// of the plain text data s. If escapeNewline is true, newline +// characters will be escaped. +func escapeText(w io.Writer, s []byte, escapeNewline bool) error { + var esc []byte + last := 0 + for i := 0; i < len(s); { + r, width := utf8.DecodeRune(s[i:]) + i += width + switch r { + case '"': + esc = esc_quot + case '\'': + esc = esc_apos + case '&': + esc = esc_amp + case '<': + esc = esc_lt + case '>': + esc = esc_gt + case '\t': + esc = esc_tab + case '\n': + if !escapeNewline { + continue + } + esc = esc_nl + case '\r': + esc = esc_cr + default: + if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { + esc = esc_fffd + break + } + continue + } + if _, err := w.Write(s[last : i-width]); err != nil { + return err + } + if _, err := w.Write(esc); err != nil { + return err + } + last = i + } + if _, err := w.Write(s[last:]); err != nil { + return err + } + return nil +} + +// EscapeString writes to p the properly escaped XML equivalent +// of the plain text data s. +func (p *printer) EscapeString(s string) { + var esc []byte + last := 0 + for i := 0; i < len(s); { + r, width := utf8.DecodeRuneInString(s[i:]) + i += width + switch r { + case '"': + esc = esc_quot + case '\'': + esc = esc_apos + case '&': + esc = esc_amp + case '<': + esc = esc_lt + case '>': + esc = esc_gt + case '\t': + esc = esc_tab + case '\n': + esc = esc_nl + case '\r': + esc = esc_cr + default: + if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { + esc = esc_fffd + break + } + continue + } + p.WriteString(s[last : i-width]) + p.Write(esc) + last = i + } + p.WriteString(s[last:]) +} + +// Escape is like EscapeText but omits the error return value. +// It is provided for backwards compatibility with Go 1.0. +// Code targeting Go 1.1 or later should use EscapeText. +func Escape(w io.Writer, s []byte) { + EscapeText(w, s) +} + +// procInst parses the `param="..."` or `param='...'` +// value out of the provided string, returning "" if not found. +func procInst(param, s string) string { + // TODO: this parsing is somewhat lame and not exact. + // It works for all actual cases, though. + param = param + "=" + idx := strings.Index(s, param) + if idx == -1 { + return "" + } + v := s[idx+len(param):] + if v == "" { + return "" + } + if v[0] != '\'' && v[0] != '"' { + return "" + } + idx = strings.IndexRune(v[1:], rune(v[0])) + if idx == -1 { + return "" + } + return v[1 : idx+1] +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/xml_test.go b/vendor/golang.org/x/net/webdav/internal/xml/xml_test.go new file mode 100644 index 00000000..312a7c98 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/xml_test.go @@ -0,0 +1,752 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" + "testing" + "unicode/utf8" +) + +const testInput = ` +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> +<body xmlns:foo="ns1" xmlns="ns2" xmlns:tag="ns3" ` + + "\r\n\t" + ` > + <hello lang="en">World &lt;&gt;&apos;&quot; &#x767d;&#40300;翔</hello> + <query>&何; &is-it;</query> + <goodbye /> + <outer foo:attr="value" xmlns:tag="ns4"> + <inner/> + </outer> + <tag:name> + <![CDATA[Some text here.]]> + </tag:name> +</body><!-- missing final newline -->` + +var testEntity = map[string]string{"何": "What", "is-it": "is it?"} + +var rawTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"", "hello"}}, + CharData("\n "), + StartElement{Name{"", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"", "query"}}, + CharData("\n "), + StartElement{Name{"", "goodbye"}, []Attr{}}, + EndElement{Name{"", "goodbye"}}, + CharData("\n "), + StartElement{Name{"", "outer"}, []Attr{{Name{"foo", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"", "inner"}, []Attr{}}, + EndElement{Name{"", "inner"}}, + CharData("\n "), + EndElement{Name{"", "outer"}}, + CharData("\n "), + StartElement{Name{"tag", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"tag", "name"}}, + CharData("\n"), + EndElement{Name{"", "body"}}, + Comment(" missing final newline "), +} + +var cookedTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"ns2", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"ns2", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"ns2", "hello"}}, + CharData("\n "), + StartElement{Name{"ns2", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"ns2", "query"}}, + CharData("\n "), + StartElement{Name{"ns2", "goodbye"}, []Attr{}}, + EndElement{Name{"ns2", "goodbye"}}, + CharData("\n "), + StartElement{Name{"ns2", "outer"}, []Attr{{Name{"ns1", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"ns2", "inner"}, []Attr{}}, + EndElement{Name{"ns2", "inner"}}, + CharData("\n "), + EndElement{Name{"ns2", "outer"}}, + CharData("\n "), + StartElement{Name{"ns3", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"ns3", "name"}}, + CharData("\n"), + EndElement{Name{"ns2", "body"}}, + Comment(" missing final newline "), +} + +const testInputAltEncoding = ` +<?xml version="1.0" encoding="x-testing-uppercase"?> +<TAG>VALUE</TAG>` + +var rawTokensAltEncoding = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="x-testing-uppercase"`)}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("value"), + EndElement{Name{"", "tag"}}, +} + +var xmlInput = []string{ + // unexpected EOF cases + "<", + "<t", + "<t ", + "<t/", + "<!", + "<!-", + "<!--", + "<!--c-", + "<!--c--", + "<!d", + "<t></", + "<t></t", + "<?", + "<?p", + "<t a", + "<t a=", + "<t a='", + "<t a=''", + "<t/><![", + "<t/><![C", + "<t/><![CDATA[d", + "<t/><![CDATA[d]", + "<t/><![CDATA[d]]", + + // other Syntax errors + "<>", + "<t/a", + "<0 />", + "<?0 >", + // "<!0 >", // let the Token() caller handle + "</0>", + "<t 0=''>", + "<t a='&'>", + "<t a='<'>", + "<t>&nbspc;</t>", + "<t a>", + "<t a=>", + "<t a=v>", + // "<![CDATA[d]]>", // let the Token() caller handle + "<t></e>", + "<t></>", + "<t></t!", + "<t>cdata]]></t>", +} + +func TestRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + testRawToken(t, d, testInput, rawTokens) +} + +const nonStrictInput = ` +<tag>non&entity</tag> +<tag>&unknown;entity</tag> +<tag>&#123</tag> +<tag>&#zzz;</tag> +<tag>&なまえ3;</tag> +<tag>&lt-gt;</tag> +<tag>&;</tag> +<tag>&0a;</tag> +` + +var nonStringEntity = map[string]string{"": "oops!", "0a": "oops!"} + +var nonStrictTokens = []Token{ + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("non&entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&unknown;entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&#123"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&#zzz;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&なまえ3;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&lt-gt;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&0a;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), +} + +func TestNonStrictRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(nonStrictInput)) + d.Strict = false + testRawToken(t, d, nonStrictInput, nonStrictTokens) +} + +type downCaser struct { + t *testing.T + r io.ByteReader +} + +func (d *downCaser) ReadByte() (c byte, err error) { + c, err = d.r.ReadByte() + if c >= 'A' && c <= 'Z' { + c += 'a' - 'A' + } + return +} + +func (d *downCaser) Read(p []byte) (int, error) { + d.t.Fatalf("unexpected Read call on downCaser reader") + panic("unreachable") +} + +func TestRawTokenAltEncoding(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + d.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) { + if charset != "x-testing-uppercase" { + t.Fatalf("unexpected charset %q", charset) + } + return &downCaser{t, input.(io.ByteReader)}, nil + } + testRawToken(t, d, testInputAltEncoding, rawTokensAltEncoding) +} + +func TestRawTokenAltEncodingNoConverter(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + token, err := d.RawToken() + if token == nil { + t.Fatalf("expected a token on first RawToken call") + } + if err != nil { + t.Fatal(err) + } + token, err = d.RawToken() + if token != nil { + t.Errorf("expected a nil token; got %#v", token) + } + if err == nil { + t.Fatalf("expected an error on second RawToken call") + } + const encoding = "x-testing-uppercase" + if !strings.Contains(err.Error(), encoding) { + t.Errorf("expected error to contain %q; got error: %v", + encoding, err) + } +} + +func testRawToken(t *testing.T, d *Decoder, raw string, rawTokens []Token) { + lastEnd := int64(0) + for i, want := range rawTokens { + start := d.InputOffset() + have, err := d.RawToken() + end := d.InputOffset() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + var shave, swant string + if _, ok := have.(CharData); ok { + shave = fmt.Sprintf("CharData(%q)", have) + } else { + shave = fmt.Sprintf("%#v", have) + } + if _, ok := want.(CharData); ok { + swant = fmt.Sprintf("CharData(%q)", want) + } else { + swant = fmt.Sprintf("%#v", want) + } + t.Errorf("token %d = %s, want %s", i, shave, swant) + } + + // Check that InputOffset returned actual token. + switch { + case start < lastEnd: + t.Errorf("token %d: position [%d,%d) for %T is before previous token", i, start, end, have) + case start >= end: + // Special case: EndElement can be synthesized. + if start == end && end == lastEnd { + break + } + t.Errorf("token %d: position [%d,%d) for %T is empty", i, start, end, have) + case end > int64(len(raw)): + t.Errorf("token %d: position [%d,%d) for %T extends beyond input", i, start, end, have) + default: + text := raw[start:end] + if strings.ContainsAny(text, "<>") && (!strings.HasPrefix(text, "<") || !strings.HasSuffix(text, ">")) { + t.Errorf("token %d: misaligned raw token %#q for %T", i, text, have) + } + } + lastEnd = end + } +} + +// Ensure that directives (specifically !DOCTYPE) include the complete +// text of any nested directives, noting that < and > do not change +// nesting depth if they are in single or double quotes. + +var nestedDirectivesInput = ` +<!DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]> +<!DOCTYPE [<!ENTITY xlt ">">]> +<!DOCTYPE [<!ENTITY xlt "<">]> +<!DOCTYPE [<!ENTITY xlt '>'>]> +<!DOCTYPE [<!ENTITY xlt '<'>]> +<!DOCTYPE [<!ENTITY xlt '">'>]> +<!DOCTYPE [<!ENTITY xlt "'<">]> +` + +var nestedDirectivesTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]`), + CharData("\n"), + Directive(`DOCTYPE [<!ENTITY xlt ">">]`), + CharData("\n"), + Directive(`DOCTYPE [<!ENTITY xlt "<">]`), + CharData("\n"), + Directive(`DOCTYPE [<!ENTITY xlt '>'>]`), + CharData("\n"), + Directive(`DOCTYPE [<!ENTITY xlt '<'>]`), + CharData("\n"), + Directive(`DOCTYPE [<!ENTITY xlt '">'>]`), + CharData("\n"), + Directive(`DOCTYPE [<!ENTITY xlt "'<">]`), + CharData("\n"), +} + +func TestNestedDirectives(t *testing.T) { + d := NewDecoder(strings.NewReader(nestedDirectivesInput)) + + for i, want := range nestedDirectivesTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + + for i, want := range cookedTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestSyntax(t *testing.T) { + for i := range xmlInput { + d := NewDecoder(strings.NewReader(xmlInput[i])) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if _, ok := err.(*SyntaxError); !ok { + t.Fatalf(`xmlInput "%s": expected SyntaxError not received`, xmlInput[i]) + } + } +} + +type allScalars struct { + True1 bool + True2 bool + False1 bool + False2 bool + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + Uint int + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + Uintptr uintptr + Float32 float32 + Float64 float64 + String string + PtrString *string +} + +var all = allScalars{ + True1: true, + True2: true, + False1: false, + False2: false, + Int: 1, + Int8: -2, + Int16: 3, + Int32: -4, + Int64: 5, + Uint: 6, + Uint8: 7, + Uint16: 8, + Uint32: 9, + Uint64: 10, + Uintptr: 11, + Float32: 13.0, + Float64: 14.0, + String: "15", + PtrString: &sixteen, +} + +var sixteen = "16" + +const testScalarsInput = `<allscalars> + <True1>true</True1> + <True2>1</True2> + <False1>false</False1> + <False2>0</False2> + <Int>1</Int> + <Int8>-2</Int8> + <Int16>3</Int16> + <Int32>-4</Int32> + <Int64>5</Int64> + <Uint>6</Uint> + <Uint8>7</Uint8> + <Uint16>8</Uint16> + <Uint32>9</Uint32> + <Uint64>10</Uint64> + <Uintptr>11</Uintptr> + <Float>12.0</Float> + <Float32>13.0</Float32> + <Float64>14.0</Float64> + <String>15</String> + <PtrString>16</PtrString> +</allscalars>` + +func TestAllScalars(t *testing.T) { + var a allScalars + err := Unmarshal([]byte(testScalarsInput), &a) + + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a, all) { + t.Errorf("have %+v want %+v", a, all) + } +} + +type item struct { + Field_a string +} + +func TestIssue569(t *testing.T) { + data := `<item><Field_a>abcd</Field_a></item>` + var i item + err := Unmarshal([]byte(data), &i) + + if err != nil || i.Field_a != "abcd" { + t.Fatal("Expecting abcd") + } +} + +func TestUnquotedAttrs(t *testing.T) { + data := "<tag attr=azAZ09:-_\t>" + d := NewDecoder(strings.NewReader(data)) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != "tag" { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != "azAZ09:-_" { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != "attr" { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } +} + +func TestValuelessAttrs(t *testing.T) { + tests := [][3]string{ + {"<p nowrap>", "p", "nowrap"}, + {"<p nowrap >", "p", "nowrap"}, + {"<input checked/>", "input", "checked"}, + {"<input checked />", "input", "checked"}, + } + for _, test := range tests { + d := NewDecoder(strings.NewReader(test[0])) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != test[1] { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != test[2] { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != test[2] { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } + } +} + +func TestCopyTokenCharData(t *testing.T) { + data := []byte("same data") + var tok1 Token = CharData(data) + tok2 := CopyToken(tok1) + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) != CharData") + } + data[1] = 'o' + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestCopyTokenStartElement(t *testing.T) { + elt := StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}} + var tok1 Token = elt + tok2 := CopyToken(tok1) + if tok1.(StartElement).Attr[0].Value != "en" { + t.Error("CopyToken overwrote Attr[0]") + } + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(StartElement) != StartElement") + } + tok1.(StartElement).Attr[0] = Attr{Name{"", "lang"}, "de"} + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestSyntaxErrorLineNum(t *testing.T) { + testInput := "<P>Foo<P>\n\n<P>Bar</>\n" + d := NewDecoder(strings.NewReader(testInput)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Error("Expected SyntaxError.") + } + if synerr.Line != 3 { + t.Error("SyntaxError didn't have correct line number.") + } +} + +func TestTrailingRawToken(t *testing.T) { + input := `<FOO></FOO> ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.RawToken(); err == nil; _, err = d.RawToken() { + } + if err != io.EOF { + t.Fatalf("d.RawToken() = _, %v, want _, io.EOF", err) + } +} + +func TestTrailingToken(t *testing.T) { + input := `<FOO></FOO> ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +func TestEntityInsideCDATA(t *testing.T) { + input := `<test><![CDATA[ &val=foo ]]></test>` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +var characterTests = []struct { + in string + err string +}{ + {"\x12<doc/>", "illegal character code U+0012"}, + {"<?xml version=\"1.0\"?>\x0b<doc/>", "illegal character code U+000B"}, + {"\xef\xbf\xbe<doc/>", "illegal character code U+FFFE"}, + {"<?xml version=\"1.0\"?><doc>\r\n<hiya/>\x07<toots/></doc>", "illegal character code U+0007"}, + {"<?xml version=\"1.0\"?><doc \x12='value'>what's up</doc>", "expected attribute name in element"}, + {"<doc>&abc\x01;</doc>", "invalid character entity &abc (no semicolon)"}, + {"<doc>&\x01;</doc>", "invalid character entity & (no semicolon)"}, + {"<doc>&\xef\xbf\xbe;</doc>", "invalid character entity &\uFFFE;"}, + {"<doc>&hello;</doc>", "invalid character entity &hello;"}, +} + +func TestDisallowedCharacters(t *testing.T) { + + for i, tt := range characterTests { + d := NewDecoder(strings.NewReader(tt.in)) + var err error + + for err == nil { + _, err = d.Token() + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Fatalf("input %d d.Token() = _, %v, want _, *SyntaxError", i, err) + } + if synerr.Msg != tt.err { + t.Fatalf("input %d synerr.Msg wrong: want %q, got %q", i, tt.err, synerr.Msg) + } + } +} + +type procInstEncodingTest struct { + expect, got string +} + +var procInstTests = []struct { + input string + expect [2]string +}{ + {`version="1.0" encoding="utf-8"`, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding='utf-8'`, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding='utf-8' `, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding=utf-8`, [2]string{"1.0", ""}}, + {`encoding="FOO" `, [2]string{"", "FOO"}}, +} + +func TestProcInstEncoding(t *testing.T) { + for _, test := range procInstTests { + if got := procInst("version", test.input); got != test.expect[0] { + t.Errorf("procInst(version, %q) = %q; want %q", test.input, got, test.expect[0]) + } + if got := procInst("encoding", test.input); got != test.expect[1] { + t.Errorf("procInst(encoding, %q) = %q; want %q", test.input, got, test.expect[1]) + } + } +} + +// Ensure that directives with comments include the complete +// text of any nested directives. + +var directivesWithCommentsInput = ` +<!DOCTYPE [<!-- a comment --><!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]> +<!DOCTYPE [<!ENTITY go "Golang"><!-- a comment-->]> +<!DOCTYPE <!-> <!> <!----> <!-->--> <!--->--> [<!ENTITY go "Golang"><!-- a comment-->]> +` + +var directivesWithCommentsTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]`), + CharData("\n"), + Directive(`DOCTYPE [<!ENTITY go "Golang">]`), + CharData("\n"), + Directive(`DOCTYPE <!-> <!> [<!ENTITY go "Golang">]`), + CharData("\n"), +} + +func TestDirectivesWithComments(t *testing.T) { + d := NewDecoder(strings.NewReader(directivesWithCommentsInput)) + + for i, want := range directivesWithCommentsTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +// Writer whose Write method always returns an error. +type errWriter struct{} + +func (errWriter) Write(p []byte) (n int, err error) { return 0, fmt.Errorf("unwritable") } + +func TestEscapeTextIOErrors(t *testing.T) { + expectErr := "unwritable" + err := EscapeText(errWriter{}, []byte{'A'}) + + if err == nil || err.Error() != expectErr { + t.Errorf("have %v, want %v", err, expectErr) + } +} + +func TestEscapeTextInvalidChar(t *testing.T) { + input := []byte("A \x00 terminated string.") + expected := "A \uFFFD terminated string." + + buff := new(bytes.Buffer) + if err := EscapeText(buff, input); err != nil { + t.Fatalf("have %v, want nil", err) + } + text := buff.String() + + if text != expected { + t.Errorf("have %v, want %v", text, expected) + } +} + +func TestIssue5880(t *testing.T) { + type T []byte + data, err := Marshal(T{192, 168, 0, 1}) + if err != nil { + t.Errorf("Marshal error: %v", err) + } + if !utf8.Valid(data) { + t.Errorf("Marshal generated invalid UTF-8: %x", data) + } +} diff --git a/vendor/golang.org/x/net/webdav/litmus_test_server.go b/vendor/golang.org/x/net/webdav/litmus_test_server.go new file mode 100644 index 00000000..514db5dd --- /dev/null +++ b/vendor/golang.org/x/net/webdav/litmus_test_server.go @@ -0,0 +1,94 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +This program is a server for the WebDAV 'litmus' compliance test at +http://www.webdav.org/neon/litmus/ +To run the test: + +go run litmus_test_server.go + +and separately, from the downloaded litmus-xxx directory: + +make URL=http://localhost:9999/ check +*/ +package main + +import ( + "flag" + "fmt" + "log" + "net/http" + "net/url" + + "golang.org/x/net/webdav" +) + +var port = flag.Int("port", 9999, "server port") + +func main() { + flag.Parse() + log.SetFlags(0) + h := &webdav.Handler{ + FileSystem: webdav.NewMemFS(), + LockSystem: webdav.NewMemLS(), + Logger: func(r *http.Request, err error) { + litmus := r.Header.Get("X-Litmus") + if len(litmus) > 19 { + litmus = litmus[:16] + "..." + } + + switch r.Method { + case "COPY", "MOVE": + dst := "" + if u, err := url.Parse(r.Header.Get("Destination")); err == nil { + dst = u.Path + } + o := r.Header.Get("Overwrite") + log.Printf("%-20s%-10s%-30s%-30so=%-2s%v", litmus, r.Method, r.URL.Path, dst, o, err) + default: + log.Printf("%-20s%-10s%-30s%v", litmus, r.Method, r.URL.Path, err) + } + }, + } + + // The next line would normally be: + // http.Handle("/", h) + // but we wrap that HTTP handler h to cater for a special case. + // + // The propfind_invalid2 litmus test case expects an empty namespace prefix + // declaration to be an error. The FAQ in the webdav litmus test says: + // + // "What does the "propfind_invalid2" test check for?... + // + // If a request was sent with an XML body which included an empty namespace + // prefix declaration (xmlns:ns1=""), then the server must reject that with + // a "400 Bad Request" response, as it is invalid according to the XML + // Namespace specification." + // + // On the other hand, the Go standard library's encoding/xml package + // accepts an empty xmlns namespace, as per the discussion at + // https://github.com/golang/go/issues/8068 + // + // Empty namespaces seem disallowed in the second (2006) edition of the XML + // standard, but allowed in a later edition. The grammar differs between + // http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and + // http://www.w3.org/TR/REC-xml-names/#dt-prefix + // + // Thus, we assume that the propfind_invalid2 test is obsolete, and + // hard-code the 400 Bad Request response that the test expects. + http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("X-Litmus") == "props: 3 (propfind_invalid2)" { + http.Error(w, "400 Bad Request", http.StatusBadRequest) + return + } + h.ServeHTTP(w, r) + })) + + addr := fmt.Sprintf(":%d", *port) + log.Printf("Serving %v", addr) + log.Fatal(http.ListenAndServe(addr, nil)) +} diff --git a/vendor/golang.org/x/net/webdav/lock.go b/vendor/golang.org/x/net/webdav/lock.go new file mode 100644 index 00000000..344ac5ce --- /dev/null +++ b/vendor/golang.org/x/net/webdav/lock.go @@ -0,0 +1,445 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "container/heap" + "errors" + "strconv" + "strings" + "sync" + "time" +) + +var ( + // ErrConfirmationFailed is returned by a LockSystem's Confirm method. + ErrConfirmationFailed = errors.New("webdav: confirmation failed") + // ErrForbidden is returned by a LockSystem's Unlock method. + ErrForbidden = errors.New("webdav: forbidden") + // ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods. + ErrLocked = errors.New("webdav: locked") + // ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods. + ErrNoSuchLock = errors.New("webdav: no such lock") +) + +// Condition can match a WebDAV resource, based on a token or ETag. +// Exactly one of Token and ETag should be non-empty. +type Condition struct { + Not bool + Token string + ETag string +} + +// LockSystem manages access to a collection of named resources. The elements +// in a lock name are separated by slash ('/', U+002F) characters, regardless +// of host operating system convention. +type LockSystem interface { + // Confirm confirms that the caller can claim all of the locks specified by + // the given conditions, and that holding the union of all of those locks + // gives exclusive access to all of the named resources. Up to two resources + // can be named. Empty names are ignored. + // + // Exactly one of release and err will be non-nil. If release is non-nil, + // all of the requested locks are held until release is called. Calling + // release does not unlock the lock, in the WebDAV UNLOCK sense, but once + // Confirm has confirmed that a lock claim is valid, that lock cannot be + // Confirmed again until it has been released. + // + // If Confirm returns ErrConfirmationFailed then the Handler will continue + // to try any other set of locks presented (a WebDAV HTTP request can + // present more than one set of locks). If it returns any other non-nil + // error, the Handler will write a "500 Internal Server Error" HTTP status. + Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error) + + // Create creates a lock with the given depth, duration, owner and root + // (name). The depth will either be negative (meaning infinite) or zero. + // + // If Create returns ErrLocked then the Handler will write a "423 Locked" + // HTTP status. If it returns any other non-nil error, the Handler will + // write a "500 Internal Server Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for + // when to use each error. + // + // The token returned identifies the created lock. It should be an absolute + // URI as defined by RFC 3986, Section 4.3. In particular, it should not + // contain whitespace. + Create(now time.Time, details LockDetails) (token string, err error) + + // Refresh refreshes the lock with the given token. + // + // If Refresh returns ErrLocked then the Handler will write a "423 Locked" + // HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write + // a "412 Precondition Failed" HTTP Status. If it returns any other non-nil + // error, the Handler will write a "500 Internal Server Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for + // when to use each error. + Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) + + // Unlock unlocks the lock with the given token. + // + // If Unlock returns ErrForbidden then the Handler will write a "403 + // Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler + // will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock + // then the Handler will write a "409 Conflict" HTTP Status. If it returns + // any other non-nil error, the Handler will write a "500 Internal Server + // Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for + // when to use each error. + Unlock(now time.Time, token string) error +} + +// LockDetails are a lock's metadata. +type LockDetails struct { + // Root is the root resource name being locked. For a zero-depth lock, the + // root is the only resource being locked. + Root string + // Duration is the lock timeout. A negative duration means infinite. + Duration time.Duration + // OwnerXML is the verbatim <owner> XML given in a LOCK HTTP request. + // + // TODO: does the "verbatim" nature play well with XML namespaces? + // Does the OwnerXML field need to have more structure? See + // https://codereview.appspot.com/175140043/#msg2 + OwnerXML string + // ZeroDepth is whether the lock has zero depth. If it does not have zero + // depth, it has infinite depth. + ZeroDepth bool +} + +// NewMemLS returns a new in-memory LockSystem. +func NewMemLS() LockSystem { + return &memLS{ + byName: make(map[string]*memLSNode), + byToken: make(map[string]*memLSNode), + gen: uint64(time.Now().Unix()), + } +} + +type memLS struct { + mu sync.Mutex + byName map[string]*memLSNode + byToken map[string]*memLSNode + gen uint64 + // byExpiry only contains those nodes whose LockDetails have a finite + // Duration and are yet to expire. + byExpiry byExpiry +} + +func (m *memLS) nextToken() string { + m.gen++ + return strconv.FormatUint(m.gen, 10) +} + +func (m *memLS) collectExpiredNodes(now time.Time) { + for len(m.byExpiry) > 0 { + if now.Before(m.byExpiry[0].expiry) { + break + } + m.remove(m.byExpiry[0]) + } +} + +func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + var n0, n1 *memLSNode + if name0 != "" { + if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil { + return nil, ErrConfirmationFailed + } + } + if name1 != "" { + if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil { + return nil, ErrConfirmationFailed + } + } + + // Don't hold the same node twice. + if n1 == n0 { + n1 = nil + } + + if n0 != nil { + m.hold(n0) + } + if n1 != nil { + m.hold(n1) + } + return func() { + m.mu.Lock() + defer m.mu.Unlock() + if n1 != nil { + m.unhold(n1) + } + if n0 != nil { + m.unhold(n0) + } + }, nil +} + +// lookup returns the node n that locks the named resource, provided that n +// matches at least one of the given conditions and that lock isn't held by +// another party. Otherwise, it returns nil. +// +// n may be a parent of the named resource, if n is an infinite depth lock. +func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) { + // TODO: support Condition.Not and Condition.ETag. + for _, c := range conditions { + n = m.byToken[c.Token] + if n == nil || n.held { + continue + } + if name == n.details.Root { + return n + } + if n.details.ZeroDepth { + continue + } + if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") { + return n + } + } + return nil +} + +func (m *memLS) hold(n *memLSNode) { + if n.held { + panic("webdav: memLS inconsistent held state") + } + n.held = true + if n.details.Duration >= 0 && n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } +} + +func (m *memLS) unhold(n *memLSNode) { + if !n.held { + panic("webdav: memLS inconsistent held state") + } + n.held = false + if n.details.Duration >= 0 { + heap.Push(&m.byExpiry, n) + } +} + +func (m *memLS) Create(now time.Time, details LockDetails) (string, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + details.Root = slashClean(details.Root) + + if !m.canCreate(details.Root, details.ZeroDepth) { + return "", ErrLocked + } + n := m.create(details.Root) + n.token = m.nextToken() + m.byToken[n.token] = n + n.details = details + if n.details.Duration >= 0 { + n.expiry = now.Add(n.details.Duration) + heap.Push(&m.byExpiry, n) + } + return n.token, nil +} + +func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + n := m.byToken[token] + if n == nil { + return LockDetails{}, ErrNoSuchLock + } + if n.held { + return LockDetails{}, ErrLocked + } + if n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } + n.details.Duration = duration + if n.details.Duration >= 0 { + n.expiry = now.Add(n.details.Duration) + heap.Push(&m.byExpiry, n) + } + return n.details, nil +} + +func (m *memLS) Unlock(now time.Time, token string) error { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + n := m.byToken[token] + if n == nil { + return ErrNoSuchLock + } + if n.held { + return ErrLocked + } + m.remove(n) + return nil +} + +func (m *memLS) canCreate(name string, zeroDepth bool) bool { + return walkToRoot(name, func(name0 string, first bool) bool { + n := m.byName[name0] + if n == nil { + return true + } + if first { + if n.token != "" { + // The target node is already locked. + return false + } + if !zeroDepth { + // The requested lock depth is infinite, and the fact that n exists + // (n != nil) means that a descendent of the target node is locked. + return false + } + } else if n.token != "" && !n.details.ZeroDepth { + // An ancestor of the target node is locked with infinite depth. + return false + } + return true + }) +} + +func (m *memLS) create(name string) (ret *memLSNode) { + walkToRoot(name, func(name0 string, first bool) bool { + n := m.byName[name0] + if n == nil { + n = &memLSNode{ + details: LockDetails{ + Root: name0, + }, + byExpiryIndex: -1, + } + m.byName[name0] = n + } + n.refCount++ + if first { + ret = n + } + return true + }) + return ret +} + +func (m *memLS) remove(n *memLSNode) { + delete(m.byToken, n.token) + n.token = "" + walkToRoot(n.details.Root, func(name0 string, first bool) bool { + x := m.byName[name0] + x.refCount-- + if x.refCount == 0 { + delete(m.byName, name0) + } + return true + }) + if n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } +} + +func walkToRoot(name string, f func(name0 string, first bool) bool) bool { + for first := true; ; first = false { + if !f(name, first) { + return false + } + if name == "/" { + break + } + name = name[:strings.LastIndex(name, "/")] + if name == "" { + name = "/" + } + } + return true +} + +type memLSNode struct { + // details are the lock metadata. Even if this node's name is not explicitly locked, + // details.Root will still equal the node's name. + details LockDetails + // token is the unique identifier for this node's lock. An empty token means that + // this node is not explicitly locked. + token string + // refCount is the number of self-or-descendent nodes that are explicitly locked. + refCount int + // expiry is when this node's lock expires. + expiry time.Time + // byExpiryIndex is the index of this node in memLS.byExpiry. It is -1 + // if this node does not expire, or has expired. + byExpiryIndex int + // held is whether this node's lock is actively held by a Confirm call. + held bool +} + +type byExpiry []*memLSNode + +func (b *byExpiry) Len() int { + return len(*b) +} + +func (b *byExpiry) Less(i, j int) bool { + return (*b)[i].expiry.Before((*b)[j].expiry) +} + +func (b *byExpiry) Swap(i, j int) { + (*b)[i], (*b)[j] = (*b)[j], (*b)[i] + (*b)[i].byExpiryIndex = i + (*b)[j].byExpiryIndex = j +} + +func (b *byExpiry) Push(x interface{}) { + n := x.(*memLSNode) + n.byExpiryIndex = len(*b) + *b = append(*b, n) +} + +func (b *byExpiry) Pop() interface{} { + i := len(*b) - 1 + n := (*b)[i] + (*b)[i] = nil + n.byExpiryIndex = -1 + *b = (*b)[:i] + return n +} + +const infiniteTimeout = -1 + +// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is +// empty, an infiniteTimeout is returned. +func parseTimeout(s string) (time.Duration, error) { + if s == "" { + return infiniteTimeout, nil + } + if i := strings.IndexByte(s, ','); i >= 0 { + s = s[:i] + } + s = strings.TrimSpace(s) + if s == "Infinite" { + return infiniteTimeout, nil + } + const pre = "Second-" + if !strings.HasPrefix(s, pre) { + return 0, errInvalidTimeout + } + s = s[len(pre):] + if s == "" || s[0] < '0' || '9' < s[0] { + return 0, errInvalidTimeout + } + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || 1<<32-1 < n { + return 0, errInvalidTimeout + } + return time.Duration(n) * time.Second, nil +} diff --git a/vendor/golang.org/x/net/webdav/lock_test.go b/vendor/golang.org/x/net/webdav/lock_test.go new file mode 100644 index 00000000..116d6c0d --- /dev/null +++ b/vendor/golang.org/x/net/webdav/lock_test.go @@ -0,0 +1,731 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "fmt" + "math/rand" + "path" + "reflect" + "sort" + "strconv" + "strings" + "testing" + "time" +) + +func TestWalkToRoot(t *testing.T) { + testCases := []struct { + name string + want []string + }{{ + "/a/b/c/d", + []string{ + "/a/b/c/d", + "/a/b/c", + "/a/b", + "/a", + "/", + }, + }, { + "/a", + []string{ + "/a", + "/", + }, + }, { + "/", + []string{ + "/", + }, + }} + + for _, tc := range testCases { + var got []string + if !walkToRoot(tc.name, func(name0 string, first bool) bool { + if first != (len(got) == 0) { + t.Errorf("name=%q: first=%t but len(got)==%d", tc.name, first, len(got)) + return false + } + got = append(got, name0) + return true + }) { + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("name=%q:\ngot %q\nwant %q", tc.name, got, tc.want) + } + } +} + +var lockTestDurations = []time.Duration{ + infiniteTimeout, // infiniteTimeout means to never expire. + 0, // A zero duration means to expire immediately. + 100 * time.Hour, // A very large duration will not expire in these tests. +} + +// lockTestNames are the names of a set of mutually compatible locks. For each +// name fragment: +// - _ means no explicit lock. +// - i means a infinite-depth lock, +// - z means a zero-depth lock, +var lockTestNames = []string{ + "/_/_/_/_/z", + "/_/_/i", + "/_/z", + "/_/z/i", + "/_/z/z", + "/_/z/_/i", + "/_/z/_/z", + "/i", + "/z", + "/z/_/i", + "/z/_/z", +} + +func lockTestZeroDepth(name string) bool { + switch name[len(name)-1] { + case 'i': + return false + case 'z': + return true + } + panic(fmt.Sprintf("lock name %q did not end with 'i' or 'z'", name)) +} + +func TestMemLSCanCreate(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + + for _, name := range lockTestNames { + _, err := m.Create(now, LockDetails{ + Root: name, + Duration: infiniteTimeout, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("creating lock for %q: %v", name, err) + } + } + + wantCanCreate := func(name string, zeroDepth bool) bool { + for _, n := range lockTestNames { + switch { + case n == name: + // An existing lock has the same name as the proposed lock. + return false + case strings.HasPrefix(n, name): + // An existing lock would be a child of the proposed lock, + // which conflicts if the proposed lock has infinite depth. + if !zeroDepth { + return false + } + case strings.HasPrefix(name, n): + // An existing lock would be an ancestor of the proposed lock, + // which conflicts if the ancestor has infinite depth. + if n[len(n)-1] == 'i' { + return false + } + } + } + return true + } + + var check func(int, string) + check = func(recursion int, name string) { + for _, zeroDepth := range []bool{false, true} { + got := m.canCreate(name, zeroDepth) + want := wantCanCreate(name, zeroDepth) + if got != want { + t.Errorf("canCreate name=%q zeroDepth=%t: got %t, want %t", name, zeroDepth, got, want) + } + } + if recursion == 6 { + return + } + if name != "/" { + name += "/" + } + for _, c := range "_iz" { + check(recursion+1, name+string(c)) + } + } + check(0, "/") +} + +func TestMemLSLookup(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + + badToken := m.nextToken() + t.Logf("badToken=%q", badToken) + + for _, name := range lockTestNames { + token, err := m.Create(now, LockDetails{ + Root: name, + Duration: infiniteTimeout, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("creating lock for %q: %v", name, err) + } + t.Logf("%-15q -> node=%p token=%q", name, m.byName[name], token) + } + + baseNames := append([]string{"/a", "/b/c"}, lockTestNames...) + for _, baseName := range baseNames { + for _, suffix := range []string{"", "/0", "/1/2/3"} { + name := baseName + suffix + + goodToken := "" + base := m.byName[baseName] + if base != nil && (suffix == "" || !lockTestZeroDepth(baseName)) { + goodToken = base.token + } + + for _, token := range []string{badToken, goodToken} { + if token == "" { + continue + } + + got := m.lookup(name, Condition{Token: token}) + want := base + if token == badToken { + want = nil + } + if got != want { + t.Errorf("name=%-20qtoken=%q (bad=%t): got %p, want %p", + name, token, token == badToken, got, want) + } + } + } + } +} + +func TestMemLSConfirm(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + alice, err := m.Create(now, LockDetails{ + Root: "/alice", + Duration: infiniteTimeout, + ZeroDepth: false, + }) + tweedle, err := m.Create(now, LockDetails{ + Root: "/tweedle", + Duration: infiniteTimeout, + ZeroDepth: false, + }) + if err != nil { + t.Fatalf("Create: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Create: inconsistent state: %v", err) + } + + // Test a mismatch between name and condition. + _, err = m.Confirm(now, "/tweedle/dee", "", Condition{Token: alice}) + if err != ErrConfirmationFailed { + t.Fatalf("Confirm (mismatch): got %v, want ErrConfirmationFailed", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (mismatch): inconsistent state: %v", err) + } + + // Test two names (that fall under the same lock) in the one Confirm call. + release, err := m.Confirm(now, "/tweedle/dee", "/tweedle/dum", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (twins): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (twins): inconsistent state: %v", err) + } + release() + if err := m.consistent(); err != nil { + t.Fatalf("release (twins): inconsistent state: %v", err) + } + + // Test the same two names in overlapping Confirm / release calls. + releaseDee, err := m.Confirm(now, "/tweedle/dee", "", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (sequence #0): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #0): inconsistent state: %v", err) + } + + _, err = m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle}) + if err != ErrConfirmationFailed { + t.Fatalf("Confirm (sequence #1): got %v, want ErrConfirmationFailed", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #1): inconsistent state: %v", err) + } + + releaseDee() + if err := m.consistent(); err != nil { + t.Fatalf("release (sequence #2): inconsistent state: %v", err) + } + + releaseDum, err := m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (sequence #3): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #3): inconsistent state: %v", err) + } + + // Test that you can't unlock a held lock. + err = m.Unlock(now, tweedle) + if err != ErrLocked { + t.Fatalf("Unlock (sequence #4): got %v, want ErrLocked", err) + } + + releaseDum() + if err := m.consistent(); err != nil { + t.Fatalf("release (sequence #5): inconsistent state: %v", err) + } + + err = m.Unlock(now, tweedle) + if err != nil { + t.Fatalf("Unlock (sequence #6): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Unlock (sequence #6): inconsistent state: %v", err) + } +} + +func TestMemLSNonCanonicalRoot(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + token, err := m.Create(now, LockDetails{ + Root: "/foo/./bar//", + Duration: 1 * time.Second, + }) + if err != nil { + t.Fatalf("Create: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Create: inconsistent state: %v", err) + } + if err := m.Unlock(now, token); err != nil { + t.Fatalf("Unlock: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Unlock: inconsistent state: %v", err) + } +} + +func TestMemLSExpiry(t *testing.T) { + m := NewMemLS().(*memLS) + testCases := []string{ + "setNow 0", + "create /a.5", + "want /a.5", + "create /c.6", + "want /a.5 /c.6", + "create /a/b.7", + "want /a.5 /a/b.7 /c.6", + "setNow 4", + "want /a.5 /a/b.7 /c.6", + "setNow 5", + "want /a/b.7 /c.6", + "setNow 6", + "want /a/b.7", + "setNow 7", + "want ", + "setNow 8", + "want ", + "create /a.12", + "create /b.13", + "create /c.15", + "create /a/d.16", + "want /a.12 /a/d.16 /b.13 /c.15", + "refresh /a.14", + "want /a.14 /a/d.16 /b.13 /c.15", + "setNow 12", + "want /a.14 /a/d.16 /b.13 /c.15", + "setNow 13", + "want /a.14 /a/d.16 /c.15", + "setNow 14", + "want /a/d.16 /c.15", + "refresh /a/d.20", + "refresh /c.20", + "want /a/d.20 /c.20", + "setNow 20", + "want ", + } + + tokens := map[string]string{} + zTime := time.Unix(0, 0) + now := zTime + for i, tc := range testCases { + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "create", "refresh": + parts := strings.Split(arg, ".") + if len(parts) != 2 { + t.Fatalf("test case #%d %q: invalid create", i, tc) + } + root := parts[0] + d, err := strconv.Atoi(parts[1]) + if err != nil { + t.Fatalf("test case #%d %q: invalid duration", i, tc) + } + dur := time.Unix(0, 0).Add(time.Duration(d) * time.Second).Sub(now) + + switch op { + case "create": + token, err := m.Create(now, LockDetails{ + Root: root, + Duration: dur, + ZeroDepth: true, + }) + if err != nil { + t.Fatalf("test case #%d %q: Create: %v", i, tc, err) + } + tokens[root] = token + + case "refresh": + token := tokens[root] + if token == "" { + t.Fatalf("test case #%d %q: no token for %q", i, tc, root) + } + got, err := m.Refresh(now, token, dur) + if err != nil { + t.Fatalf("test case #%d %q: Refresh: %v", i, tc, err) + } + want := LockDetails{ + Root: root, + Duration: dur, + ZeroDepth: true, + } + if got != want { + t.Fatalf("test case #%d %q:\ngot %v\nwant %v", i, tc, got, want) + } + } + + case "setNow": + d, err := strconv.Atoi(arg) + if err != nil { + t.Fatalf("test case #%d %q: invalid duration", i, tc) + } + now = time.Unix(0, 0).Add(time.Duration(d) * time.Second) + + case "want": + m.mu.Lock() + m.collectExpiredNodes(now) + got := make([]string, 0, len(m.byToken)) + for _, n := range m.byToken { + got = append(got, fmt.Sprintf("%s.%d", + n.details.Root, n.expiry.Sub(zTime)/time.Second)) + } + m.mu.Unlock() + sort.Strings(got) + want := []string{} + if arg != "" { + want = strings.Split(arg, " ") + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, want) + } + } + + if err := m.consistent(); err != nil { + t.Fatalf("test case #%d %q: inconsistent state: %v", i, tc, err) + } + } +} + +func TestMemLS(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + rng := rand.New(rand.NewSource(0)) + tokens := map[string]string{} + nConfirm, nCreate, nRefresh, nUnlock := 0, 0, 0, 0 + const N = 2000 + + for i := 0; i < N; i++ { + name := lockTestNames[rng.Intn(len(lockTestNames))] + duration := lockTestDurations[rng.Intn(len(lockTestDurations))] + confirmed, unlocked := false, false + + // If the name was already locked, we randomly confirm/release, refresh + // or unlock it. Otherwise, we create a lock. + token := tokens[name] + if token != "" { + switch rng.Intn(3) { + case 0: + confirmed = true + nConfirm++ + release, err := m.Confirm(now, name, "", Condition{Token: token}) + if err != nil { + t.Fatalf("iteration #%d: Confirm %q: %v", i, name, err) + } + if err := m.consistent(); err != nil { + t.Fatalf("iteration #%d: inconsistent state: %v", i, err) + } + release() + + case 1: + nRefresh++ + if _, err := m.Refresh(now, token, duration); err != nil { + t.Fatalf("iteration #%d: Refresh %q: %v", i, name, err) + } + + case 2: + unlocked = true + nUnlock++ + if err := m.Unlock(now, token); err != nil { + t.Fatalf("iteration #%d: Unlock %q: %v", i, name, err) + } + } + + } else { + nCreate++ + var err error + token, err = m.Create(now, LockDetails{ + Root: name, + Duration: duration, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("iteration #%d: Create %q: %v", i, name, err) + } + } + + if !confirmed { + if duration == 0 || unlocked { + // A zero-duration lock should expire immediately and is + // effectively equivalent to being unlocked. + tokens[name] = "" + } else { + tokens[name] = token + } + } + + if err := m.consistent(); err != nil { + t.Fatalf("iteration #%d: inconsistent state: %v", i, err) + } + } + + if nConfirm < N/10 { + t.Fatalf("too few Confirm calls: got %d, want >= %d", nConfirm, N/10) + } + if nCreate < N/10 { + t.Fatalf("too few Create calls: got %d, want >= %d", nCreate, N/10) + } + if nRefresh < N/10 { + t.Fatalf("too few Refresh calls: got %d, want >= %d", nRefresh, N/10) + } + if nUnlock < N/10 { + t.Fatalf("too few Unlock calls: got %d, want >= %d", nUnlock, N/10) + } +} + +func (m *memLS) consistent() error { + m.mu.Lock() + defer m.mu.Unlock() + + // If m.byName is non-empty, then it must contain an entry for the root "/", + // and its refCount should equal the number of locked nodes. + if len(m.byName) > 0 { + n := m.byName["/"] + if n == nil { + return fmt.Errorf(`non-empty m.byName does not contain the root "/"`) + } + if n.refCount != len(m.byToken) { + return fmt.Errorf("root node refCount=%d, differs from len(m.byToken)=%d", n.refCount, len(m.byToken)) + } + } + + for name, n := range m.byName { + // The map keys should be consistent with the node's copy of the key. + if n.details.Root != name { + return fmt.Errorf("node name %q != byName map key %q", n.details.Root, name) + } + + // A name must be clean, and start with a "/". + if len(name) == 0 || name[0] != '/' { + return fmt.Errorf(`node name %q does not start with "/"`, name) + } + if name != path.Clean(name) { + return fmt.Errorf(`node name %q is not clean`, name) + } + + // A node's refCount should be positive. + if n.refCount <= 0 { + return fmt.Errorf("non-positive refCount for node at name %q", name) + } + + // A node's refCount should be the number of self-or-descendents that + // are locked (i.e. have a non-empty token). + var list []string + for name0, n0 := range m.byName { + // All of lockTestNames' name fragments are one byte long: '_', 'i' or 'z', + // so strings.HasPrefix is equivalent to self-or-descendent name match. + // We don't have to worry about "/foo/bar" being a false positive match + // for "/foo/b". + if strings.HasPrefix(name0, name) && n0.token != "" { + list = append(list, name0) + } + } + if n.refCount != len(list) { + sort.Strings(list) + return fmt.Errorf("node at name %q has refCount %d but locked self-or-descendents are %q (len=%d)", + name, n.refCount, list, len(list)) + } + + // A node n is in m.byToken if it has a non-empty token. + if n.token != "" { + if _, ok := m.byToken[n.token]; !ok { + return fmt.Errorf("node at name %q has token %q but not in m.byToken", name, n.token) + } + } + + // A node n is in m.byExpiry if it has a non-negative byExpiryIndex. + if n.byExpiryIndex >= 0 { + if n.byExpiryIndex >= len(m.byExpiry) { + return fmt.Errorf("node at name %q has byExpiryIndex %d but m.byExpiry has length %d", name, n.byExpiryIndex, len(m.byExpiry)) + } + if n != m.byExpiry[n.byExpiryIndex] { + return fmt.Errorf("node at name %q has byExpiryIndex %d but that indexes a different node", name, n.byExpiryIndex) + } + } + } + + for token, n := range m.byToken { + // The map keys should be consistent with the node's copy of the key. + if n.token != token { + return fmt.Errorf("node token %q != byToken map key %q", n.token, token) + } + + // Every node in m.byToken is in m.byName. + if _, ok := m.byName[n.details.Root]; !ok { + return fmt.Errorf("node at name %q in m.byToken but not in m.byName", n.details.Root) + } + } + + for i, n := range m.byExpiry { + // The slice indices should be consistent with the node's copy of the index. + if n.byExpiryIndex != i { + return fmt.Errorf("node byExpiryIndex %d != byExpiry slice index %d", n.byExpiryIndex, i) + } + + // Every node in m.byExpiry is in m.byName. + if _, ok := m.byName[n.details.Root]; !ok { + return fmt.Errorf("node at name %q in m.byExpiry but not in m.byName", n.details.Root) + } + + // No node in m.byExpiry should be held. + if n.held { + return fmt.Errorf("node at name %q in m.byExpiry is held", n.details.Root) + } + } + return nil +} + +func TestParseTimeout(t *testing.T) { + testCases := []struct { + s string + want time.Duration + wantErr error + }{{ + "", + infiniteTimeout, + nil, + }, { + "Infinite", + infiniteTimeout, + nil, + }, { + "Infinitesimal", + 0, + errInvalidTimeout, + }, { + "infinite", + 0, + errInvalidTimeout, + }, { + "Second-0", + 0 * time.Second, + nil, + }, { + "Second-123", + 123 * time.Second, + nil, + }, { + " Second-456 ", + 456 * time.Second, + nil, + }, { + "Second-4100000000", + 4100000000 * time.Second, + nil, + }, { + "junk", + 0, + errInvalidTimeout, + }, { + "Second-", + 0, + errInvalidTimeout, + }, { + "Second--1", + 0, + errInvalidTimeout, + }, { + "Second--123", + 0, + errInvalidTimeout, + }, { + "Second-+123", + 0, + errInvalidTimeout, + }, { + "Second-0x123", + 0, + errInvalidTimeout, + }, { + "second-123", + 0, + errInvalidTimeout, + }, { + "Second-4294967295", + 4294967295 * time.Second, + nil, + }, { + // Section 10.7 says that "The timeout value for TimeType "Second" + // must not be greater than 2^32-1." + "Second-4294967296", + 0, + errInvalidTimeout, + }, { + // This test case comes from section 9.10.9 of the spec. It says, + // + // "In this request, the client has specified that it desires an + // infinite-length lock, if available, otherwise a timeout of 4.1 + // billion seconds, if available." + // + // The Go WebDAV package always supports infinite length locks, + // and ignores the fallback after the comma. + "Infinite, Second-4100000000", + infiniteTimeout, + nil, + }} + + for _, tc := range testCases { + got, gotErr := parseTimeout(tc.s) + if got != tc.want || gotErr != tc.wantErr { + t.Errorf("parsing %q:\ngot %v, %v\nwant %v, %v", tc.s, got, gotErr, tc.want, tc.wantErr) + } + } +} diff --git a/vendor/golang.org/x/net/webdav/prop.go b/vendor/golang.org/x/net/webdav/prop.go new file mode 100644 index 00000000..ac6c65a1 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/prop.go @@ -0,0 +1,388 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "fmt" + "io" + "mime" + "net/http" + "os" + "path/filepath" + "strconv" +) + +// Proppatch describes a property update instruction as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH +type Proppatch struct { + // Remove specifies whether this patch removes properties. If it does not + // remove them, it sets them. + Remove bool + // Props contains the properties to be set or removed. + Props []Property +} + +// Propstat describes a XML propstat element as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat +type Propstat struct { + // Props contains the properties for which Status applies. + Props []Property + + // Status defines the HTTP status code of the properties in Prop. + // Allowed values include, but are not limited to the WebDAV status + // code extensions for HTTP/1.1. + // http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11 + Status int + + // XMLError contains the XML representation of the optional error element. + // XML content within this field must not rely on any predefined + // namespace declarations or prefixes. If empty, the XML error element + // is omitted. + XMLError string + + // ResponseDescription contains the contents of the optional + // responsedescription field. If empty, the XML element is omitted. + ResponseDescription string +} + +// makePropstats returns a slice containing those of x and y whose Props slice +// is non-empty. If both are empty, it returns a slice containing an otherwise +// zero Propstat whose HTTP status code is 200 OK. +func makePropstats(x, y Propstat) []Propstat { + pstats := make([]Propstat, 0, 2) + if len(x.Props) != 0 { + pstats = append(pstats, x) + } + if len(y.Props) != 0 { + pstats = append(pstats, y) + } + if len(pstats) == 0 { + pstats = append(pstats, Propstat{ + Status: http.StatusOK, + }) + } + return pstats +} + +// DeadPropsHolder holds the dead properties of a resource. +// +// Dead properties are those properties that are explicitly defined. In +// comparison, live properties, such as DAV:getcontentlength, are implicitly +// defined by the underlying resource, and cannot be explicitly overridden or +// removed. See the Terminology section of +// http://www.webdav.org/specs/rfc4918.html#rfc.section.3 +// +// There is a whitelist of the names of live properties. This package handles +// all live properties, and will only pass non-whitelisted names to the Patch +// method of DeadPropsHolder implementations. +type DeadPropsHolder interface { + // DeadProps returns a copy of the dead properties held. + DeadProps() (map[xml.Name]Property, error) + + // Patch patches the dead properties held. + // + // Patching is atomic; either all or no patches succeed. It returns (nil, + // non-nil) if an internal server error occurred, otherwise the Propstats + // collectively contain one Property for each proposed patch Property. If + // all patches succeed, Patch returns a slice of length one and a Propstat + // element with a 200 OK HTTP status code. If none succeed, for reasons + // other than an internal server error, no Propstat has status 200 OK. + // + // For more details on when various HTTP status codes apply, see + // http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status + Patch([]Proppatch) ([]Propstat, error) +} + +// liveProps contains all supported, protected DAV: properties. +var liveProps = map[xml.Name]struct { + // findFn implements the propfind function of this property. If nil, + // it indicates a hidden property. + findFn func(FileSystem, LockSystem, string, os.FileInfo) (string, error) + // dir is true if the property applies to directories. + dir bool +}{ + xml.Name{Space: "DAV:", Local: "resourcetype"}: { + findFn: findResourceType, + dir: true, + }, + xml.Name{Space: "DAV:", Local: "displayname"}: { + findFn: findDisplayName, + dir: true, + }, + xml.Name{Space: "DAV:", Local: "getcontentlength"}: { + findFn: findContentLength, + dir: false, + }, + xml.Name{Space: "DAV:", Local: "getlastmodified"}: { + findFn: findLastModified, + dir: false, + }, + xml.Name{Space: "DAV:", Local: "creationdate"}: { + findFn: nil, + dir: false, + }, + xml.Name{Space: "DAV:", Local: "getcontentlanguage"}: { + findFn: nil, + dir: false, + }, + xml.Name{Space: "DAV:", Local: "getcontenttype"}: { + findFn: findContentType, + dir: false, + }, + xml.Name{Space: "DAV:", Local: "getetag"}: { + findFn: findETag, + // findETag implements ETag as the concatenated hex values of a file's + // modification time and size. This is not a reliable synchronization + // mechanism for directories, so we do not advertise getetag for DAV + // collections. + dir: false, + }, + + // TODO: The lockdiscovery property requires LockSystem to list the + // active locks on a resource. + xml.Name{Space: "DAV:", Local: "lockdiscovery"}: {}, + xml.Name{Space: "DAV:", Local: "supportedlock"}: { + findFn: findSupportedLock, + dir: true, + }, +} + +// TODO(nigeltao) merge props and allprop? + +// Props returns the status of the properties named pnames for resource name. +// +// Each Propstat has a unique status and each property name will only be part +// of one Propstat element. +func props(fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) { + f, err := fs.OpenFile(name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return nil, err + } + isDir := fi.IsDir() + + var deadProps map[xml.Name]Property + if dph, ok := f.(DeadPropsHolder); ok { + deadProps, err = dph.DeadProps() + if err != nil { + return nil, err + } + } + + pstatOK := Propstat{Status: http.StatusOK} + pstatNotFound := Propstat{Status: http.StatusNotFound} + for _, pn := range pnames { + // If this file has dead properties, check if they contain pn. + if dp, ok := deadProps[pn]; ok { + pstatOK.Props = append(pstatOK.Props, dp) + continue + } + // Otherwise, it must either be a live property or we don't know it. + if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) { + innerXML, err := prop.findFn(fs, ls, name, fi) + if err != nil { + return nil, err + } + pstatOK.Props = append(pstatOK.Props, Property{ + XMLName: pn, + InnerXML: []byte(innerXML), + }) + } else { + pstatNotFound.Props = append(pstatNotFound.Props, Property{ + XMLName: pn, + }) + } + } + return makePropstats(pstatOK, pstatNotFound), nil +} + +// Propnames returns the property names defined for resource name. +func propnames(fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) { + f, err := fs.OpenFile(name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return nil, err + } + isDir := fi.IsDir() + + var deadProps map[xml.Name]Property + if dph, ok := f.(DeadPropsHolder); ok { + deadProps, err = dph.DeadProps() + if err != nil { + return nil, err + } + } + + pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps)) + for pn, prop := range liveProps { + if prop.findFn != nil && (prop.dir || !isDir) { + pnames = append(pnames, pn) + } + } + for pn := range deadProps { + pnames = append(pnames, pn) + } + return pnames, nil +} + +// Allprop returns the properties defined for resource name and the properties +// named in include. +// +// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined +// within the RFC plus dead properties. Other live properties should only be +// returned if they are named in 'include'. +// +// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND +func allprop(fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) { + pnames, err := propnames(fs, ls, name) + if err != nil { + return nil, err + } + // Add names from include if they are not already covered in pnames. + nameset := make(map[xml.Name]bool) + for _, pn := range pnames { + nameset[pn] = true + } + for _, pn := range include { + if !nameset[pn] { + pnames = append(pnames, pn) + } + } + return props(fs, ls, name, pnames) +} + +// Patch patches the properties of resource name. The return values are +// constrained in the same manner as DeadPropsHolder.Patch. +func patch(fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) { + conflict := false +loop: + for _, patch := range patches { + for _, p := range patch.Props { + if _, ok := liveProps[p.XMLName]; ok { + conflict = true + break loop + } + } + } + if conflict { + pstatForbidden := Propstat{ + Status: http.StatusForbidden, + XMLError: `<D:cannot-modify-protected-property xmlns:D="DAV:"/>`, + } + pstatFailedDep := Propstat{ + Status: StatusFailedDependency, + } + for _, patch := range patches { + for _, p := range patch.Props { + if _, ok := liveProps[p.XMLName]; ok { + pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName}) + } else { + pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName}) + } + } + } + return makePropstats(pstatForbidden, pstatFailedDep), nil + } + + f, err := fs.OpenFile(name, os.O_RDWR, 0) + if err != nil { + return nil, err + } + defer f.Close() + if dph, ok := f.(DeadPropsHolder); ok { + ret, err := dph.Patch(patches) + if err != nil { + return nil, err + } + // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that + // "The contents of the prop XML element must only list the names of + // properties to which the result in the status element applies." + for _, pstat := range ret { + for i, p := range pstat.Props { + pstat.Props[i] = Property{XMLName: p.XMLName} + } + } + return ret, nil + } + // The file doesn't implement the optional DeadPropsHolder interface, so + // all patches are forbidden. + pstat := Propstat{Status: http.StatusForbidden} + for _, patch := range patches { + for _, p := range patch.Props { + pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) + } + } + return []Propstat{pstat}, nil +} + +func findResourceType(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + if fi.IsDir() { + return `<D:collection xmlns:D="DAV:"/>`, nil + } + return "", nil +} + +func findDisplayName(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + if slashClean(name) == "/" { + // Hide the real name of a possibly prefixed root directory. + return "", nil + } + return fi.Name(), nil +} + +func findContentLength(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return strconv.FormatInt(fi.Size(), 10), nil +} + +func findLastModified(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return fi.ModTime().Format(http.TimeFormat), nil +} + +func findContentType(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + f, err := fs.OpenFile(name, os.O_RDONLY, 0) + if err != nil { + return "", err + } + defer f.Close() + // This implementation is based on serveContent's code in the standard net/http package. + ctype := mime.TypeByExtension(filepath.Ext(name)) + if ctype != "" { + return ctype, nil + } + // Read a chunk to decide between utf-8 text and binary. + var buf [512]byte + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return "", err + } + ctype = http.DetectContentType(buf[:n]) + // Rewind file. + _, err = f.Seek(0, os.SEEK_SET) + return ctype, err +} + +func findETag(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + // The Apache http 2.4 web server by default concatenates the + // modification time and size of a file. We replicate the heuristic + // with nanosecond granularity. + return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil +} + +func findSupportedLock(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return `` + + `<D:lockentry xmlns:D="DAV:">` + + `<D:lockscope><D:exclusive/></D:lockscope>` + + `<D:locktype><D:write/></D:locktype>` + + `</D:lockentry>`, nil +} diff --git a/vendor/golang.org/x/net/webdav/prop_test.go b/vendor/golang.org/x/net/webdav/prop_test.go new file mode 100644 index 00000000..ee188acd --- /dev/null +++ b/vendor/golang.org/x/net/webdav/prop_test.go @@ -0,0 +1,606 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "fmt" + "net/http" + "os" + "reflect" + "sort" + "testing" +) + +func TestMemPS(t *testing.T) { + // calcProps calculates the getlastmodified and getetag DAV: property + // values in pstats for resource name in file-system fs. + calcProps := func(name string, fs FileSystem, ls LockSystem, pstats []Propstat) error { + fi, err := fs.Stat(name) + if err != nil { + return err + } + for _, pst := range pstats { + for i, p := range pst.Props { + switch p.XMLName { + case xml.Name{Space: "DAV:", Local: "getlastmodified"}: + p.InnerXML = []byte(fi.ModTime().Format(http.TimeFormat)) + pst.Props[i] = p + case xml.Name{Space: "DAV:", Local: "getetag"}: + if fi.IsDir() { + continue + } + etag, err := findETag(fs, ls, name, fi) + if err != nil { + return err + } + p.InnerXML = []byte(etag) + pst.Props[i] = p + } + } + } + return nil + } + + const ( + lockEntry = `` + + `<D:lockentry xmlns:D="DAV:">` + + `<D:lockscope><D:exclusive/></D:lockscope>` + + `<D:locktype><D:write/></D:locktype>` + + `</D:lockentry>` + statForbiddenError = `<D:cannot-modify-protected-property xmlns:D="DAV:"/>` + ) + + type propOp struct { + op string + name string + pnames []xml.Name + patches []Proppatch + wantPnames []xml.Name + wantPropstats []Propstat + } + + testCases := []struct { + desc string + noDeadProps bool + buildfs []string + propOp []propOp + }{{ + desc: "propname", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propname", + name: "/dir", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "supportedlock"}, + }, + }, { + op: "propname", + name: "/file", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, + }, + }}, + }, { + desc: "allprop dir and file", + buildfs: []string{"mkdir /dir", "write /file foobarbaz"}, + propOp: []propOp{{ + op: "allprop", + name: "/dir", + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(`<D:collection xmlns:D="DAV:"/>`), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("dir"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}, + }}, + }, { + op: "allprop", + name: "/file", + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("file"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"}, + InnerXML: []byte("9"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"}, + InnerXML: []byte("text/plain; charset=utf-8"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}, + }}, + }, { + op: "allprop", + name: "/file", + pnames: []xml.Name{ + {"DAV:", "resourcetype"}, + {"foo", "bar"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("file"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"}, + InnerXML: []byte("9"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"}, + InnerXML: []byte("text/plain; charset=utf-8"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}}, { + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}}, + }, + }}, + }, { + desc: "propfind DAV:resourcetype", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "resourcetype"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(`<D:collection xmlns:D="DAV:"/>`), + }}, + }}, + }, { + op: "propfind", + name: "/file", + pnames: []xml.Name{{"DAV:", "resourcetype"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }}, + }}, + }}, + }, { + desc: "propfind unsupported DAV properties", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "getcontentlanguage"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlanguage"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "creationdate"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "creationdate"}, + }}, + }}, + }}, + }, { + desc: "propfind getetag for files but not for directories", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "getetag"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + }, { + op: "propfind", + name: "/file", + pnames: []xml.Name{{"DAV:", "getetag"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }}, + }}, + }}, + }, { + desc: "proppatch property on no-dead-properties file system", + buildfs: []string{"mkdir /dir"}, + noDeadProps: true, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + XMLError: statForbiddenError, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + }}, + }, { + desc: "proppatch dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "foo", Local: "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + }}, + }, { + desc: "proppatch dead property with failed dependency", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }, { + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("xxx"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + XMLError: statForbiddenError, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + }}, + }, { + Status: StatusFailedDependency, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "foo", Local: "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }}, + }, { + desc: "proppatch remove dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{ + {Space: "foo", Local: "bar"}, + {Space: "spam", Local: "ham"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + }, { + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Remove: true, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{ + {Space: "foo", Local: "bar"}, + {Space: "spam", Local: "ham"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }, { + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + }}, + }, { + desc: "propname with dead property", + buildfs: []string{"touch /file"}, + propOp: []propOp{{ + op: "proppatch", + name: "/file", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propname", + name: "/file", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, + {Space: "foo", Local: "bar"}, + }, + }}, + }, { + desc: "proppatch remove unknown dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Remove: true, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }}, + }, { + desc: "bad: propfind unknown property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"foo:", "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo:", Local: "bar"}, + }}, + }}, + }}, + }} + + for _, tc := range testCases { + fs, err := buildTestFS(tc.buildfs) + if err != nil { + t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err) + } + if tc.noDeadProps { + fs = noDeadPropsFS{fs} + } + ls := NewMemLS() + for _, op := range tc.propOp { + desc := fmt.Sprintf("%s: %s %s", tc.desc, op.op, op.name) + if err = calcProps(op.name, fs, ls, op.wantPropstats); err != nil { + t.Fatalf("%s: calcProps: %v", desc, err) + } + + // Call property system. + var propstats []Propstat + switch op.op { + case "propname": + pnames, err := propnames(fs, ls, op.name) + if err != nil { + t.Errorf("%s: got error %v, want nil", desc, err) + continue + } + sort.Sort(byXMLName(pnames)) + sort.Sort(byXMLName(op.wantPnames)) + if !reflect.DeepEqual(pnames, op.wantPnames) { + t.Errorf("%s: pnames\ngot %q\nwant %q", desc, pnames, op.wantPnames) + } + continue + case "allprop": + propstats, err = allprop(fs, ls, op.name, op.pnames) + case "propfind": + propstats, err = props(fs, ls, op.name, op.pnames) + case "proppatch": + propstats, err = patch(fs, ls, op.name, op.patches) + default: + t.Fatalf("%s: %s not implemented", desc, op.op) + } + if err != nil { + t.Errorf("%s: got error %v, want nil", desc, err) + continue + } + // Compare return values from allprop, propfind or proppatch. + for _, pst := range propstats { + sort.Sort(byPropname(pst.Props)) + } + for _, pst := range op.wantPropstats { + sort.Sort(byPropname(pst.Props)) + } + sort.Sort(byStatus(propstats)) + sort.Sort(byStatus(op.wantPropstats)) + if !reflect.DeepEqual(propstats, op.wantPropstats) { + t.Errorf("%s: propstat\ngot %q\nwant %q", desc, propstats, op.wantPropstats) + } + } + } +} + +func cmpXMLName(a, b xml.Name) bool { + if a.Space != b.Space { + return a.Space < b.Space + } + return a.Local < b.Local +} + +type byXMLName []xml.Name + +func (b byXMLName) Len() int { return len(b) } +func (b byXMLName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byXMLName) Less(i, j int) bool { return cmpXMLName(b[i], b[j]) } + +type byPropname []Property + +func (b byPropname) Len() int { return len(b) } +func (b byPropname) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byPropname) Less(i, j int) bool { return cmpXMLName(b[i].XMLName, b[j].XMLName) } + +type byStatus []Propstat + +func (b byStatus) Len() int { return len(b) } +func (b byStatus) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byStatus) Less(i, j int) bool { return b[i].Status < b[j].Status } + +type noDeadPropsFS struct { + FileSystem +} + +func (fs noDeadPropsFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, err := fs.FileSystem.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + return noDeadPropsFile{f}, nil +} + +// noDeadPropsFile wraps a File but strips any optional DeadPropsHolder methods +// provided by the underlying File implementation. +type noDeadPropsFile struct { + f File +} + +func (f noDeadPropsFile) Close() error { return f.f.Close() } +func (f noDeadPropsFile) Read(p []byte) (int, error) { return f.f.Read(p) } +func (f noDeadPropsFile) Readdir(count int) ([]os.FileInfo, error) { return f.f.Readdir(count) } +func (f noDeadPropsFile) Seek(off int64, whence int) (int64, error) { return f.f.Seek(off, whence) } +func (f noDeadPropsFile) Stat() (os.FileInfo, error) { return f.f.Stat() } +func (f noDeadPropsFile) Write(p []byte) (int, error) { return f.f.Write(p) } diff --git a/vendor/golang.org/x/net/webdav/webdav.go b/vendor/golang.org/x/net/webdav/webdav.go new file mode 100644 index 00000000..c15c7c15 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/webdav.go @@ -0,0 +1,686 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package webdav provides a WebDAV server implementation. +package webdav // import "golang.org/x/net/webdav" + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path" + "strings" + "time" +) + +type Handler struct { + // Prefix is the URL path prefix to strip from WebDAV resource paths. + Prefix string + // FileSystem is the virtual file system. + FileSystem FileSystem + // LockSystem is the lock management system. + LockSystem LockSystem + // Logger is an optional error logger. If non-nil, it will be called + // for all HTTP requests. + Logger func(*http.Request, error) +} + +func (h *Handler) stripPrefix(p string) (string, int, error) { + if h.Prefix == "" { + return p, http.StatusOK, nil + } + if r := strings.TrimPrefix(p, h.Prefix); len(r) < len(p) { + return r, http.StatusOK, nil + } + return p, http.StatusNotFound, errPrefixMismatch +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + status, err := http.StatusBadRequest, errUnsupportedMethod + if h.FileSystem == nil { + status, err = http.StatusInternalServerError, errNoFileSystem + } else if h.LockSystem == nil { + status, err = http.StatusInternalServerError, errNoLockSystem + } else { + switch r.Method { + case "OPTIONS": + status, err = h.handleOptions(w, r) + case "GET", "HEAD", "POST": + status, err = h.handleGetHeadPost(w, r) + case "DELETE": + status, err = h.handleDelete(w, r) + case "PUT": + status, err = h.handlePut(w, r) + case "MKCOL": + status, err = h.handleMkcol(w, r) + case "COPY", "MOVE": + status, err = h.handleCopyMove(w, r) + case "LOCK": + status, err = h.handleLock(w, r) + case "UNLOCK": + status, err = h.handleUnlock(w, r) + case "PROPFIND": + status, err = h.handlePropfind(w, r) + case "PROPPATCH": + status, err = h.handleProppatch(w, r) + } + } + + if status != 0 { + w.WriteHeader(status) + if status != http.StatusNoContent { + w.Write([]byte(StatusText(status))) + } + } + if h.Logger != nil { + h.Logger(r, err) + } +} + +func (h *Handler) lock(now time.Time, root string) (token string, status int, err error) { + token, err = h.LockSystem.Create(now, LockDetails{ + Root: root, + Duration: infiniteTimeout, + ZeroDepth: true, + }) + if err != nil { + if err == ErrLocked { + return "", StatusLocked, err + } + return "", http.StatusInternalServerError, err + } + return token, 0, nil +} + +func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) { + hdr := r.Header.Get("If") + if hdr == "" { + // An empty If header means that the client hasn't previously created locks. + // Even if this client doesn't care about locks, we still need to check that + // the resources aren't locked by another client, so we create temporary + // locks that would conflict with another client's locks. These temporary + // locks are unlocked at the end of the HTTP request. + now, srcToken, dstToken := time.Now(), "", "" + if src != "" { + srcToken, status, err = h.lock(now, src) + if err != nil { + return nil, status, err + } + } + if dst != "" { + dstToken, status, err = h.lock(now, dst) + if err != nil { + if srcToken != "" { + h.LockSystem.Unlock(now, srcToken) + } + return nil, status, err + } + } + + return func() { + if dstToken != "" { + h.LockSystem.Unlock(now, dstToken) + } + if srcToken != "" { + h.LockSystem.Unlock(now, srcToken) + } + }, 0, nil + } + + ih, ok := parseIfHeader(hdr) + if !ok { + return nil, http.StatusBadRequest, errInvalidIfHeader + } + // ih is a disjunction (OR) of ifLists, so any ifList will do. + for _, l := range ih.lists { + lsrc := l.resourceTag + if lsrc == "" { + lsrc = src + } else { + u, err := url.Parse(lsrc) + if err != nil { + continue + } + if u.Host != r.Host { + continue + } + lsrc = u.Path + } + release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...) + if err == ErrConfirmationFailed { + continue + } + if err != nil { + return nil, http.StatusInternalServerError, err + } + return release, 0, nil + } + // Section 10.4.1 says that "If this header is evaluated and all state lists + // fail, then the request must fail with a 412 (Precondition Failed) status." + // We follow the spec even though the cond_put_corrupt_token test case from + // the litmus test warns on seeing a 412 instead of a 423 (Locked). + return nil, http.StatusPreconditionFailed, ErrLocked +} + +func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + allow := "OPTIONS, LOCK, PUT, MKCOL" + if fi, err := h.FileSystem.Stat(reqPath); err == nil { + if fi.IsDir() { + allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND" + } else { + allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT" + } + } + w.Header().Set("Allow", allow) + // http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes + w.Header().Set("DAV", "1, 2") + // http://msdn.microsoft.com/en-au/library/cc250217.aspx + w.Header().Set("MS-Author-Via", "DAV") + return 0, nil +} + +func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + // TODO: check locks for read-only access?? + f, err := h.FileSystem.OpenFile(reqPath, os.O_RDONLY, 0) + if err != nil { + return http.StatusNotFound, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return http.StatusNotFound, err + } + if fi.IsDir() { + return http.StatusMethodNotAllowed, nil + } + etag, err := findETag(h.FileSystem, h.LockSystem, reqPath, fi) + if err != nil { + return http.StatusInternalServerError, err + } + w.Header().Set("ETag", etag) + // Let ServeContent determine the Content-Type header. + http.ServeContent(w, r, reqPath, fi.ModTime(), f) + return 0, nil +} + +func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + // TODO: return MultiStatus where appropriate. + + // "godoc os RemoveAll" says that "If the path does not exist, RemoveAll + // returns nil (no error)." WebDAV semantics are that it should return a + // "404 Not Found". We therefore have to Stat before we RemoveAll. + if _, err := h.FileSystem.Stat(reqPath); err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + if err := h.FileSystem.RemoveAll(reqPath); err != nil { + return http.StatusMethodNotAllowed, err + } + return http.StatusNoContent, nil +} + +func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + // TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz' + // comments in http.checkEtag. + + f, err := h.FileSystem.OpenFile(reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return http.StatusNotFound, err + } + _, copyErr := io.Copy(f, r.Body) + fi, statErr := f.Stat() + closeErr := f.Close() + // TODO(rost): Returning 405 Method Not Allowed might not be appropriate. + if copyErr != nil { + return http.StatusMethodNotAllowed, copyErr + } + if statErr != nil { + return http.StatusMethodNotAllowed, statErr + } + if closeErr != nil { + return http.StatusMethodNotAllowed, closeErr + } + etag, err := findETag(h.FileSystem, h.LockSystem, reqPath, fi) + if err != nil { + return http.StatusInternalServerError, err + } + w.Header().Set("ETag", etag) + return http.StatusCreated, nil +} + +func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + if r.ContentLength > 0 { + return http.StatusUnsupportedMediaType, nil + } + if err := h.FileSystem.Mkdir(reqPath, 0777); err != nil { + if os.IsNotExist(err) { + return http.StatusConflict, err + } + return http.StatusMethodNotAllowed, err + } + return http.StatusCreated, nil +} + +func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) { + hdr := r.Header.Get("Destination") + if hdr == "" { + return http.StatusBadRequest, errInvalidDestination + } + u, err := url.Parse(hdr) + if err != nil { + return http.StatusBadRequest, errInvalidDestination + } + if u.Host != r.Host { + return http.StatusBadGateway, errInvalidDestination + } + + src, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + + dst, status, err := h.stripPrefix(u.Path) + if err != nil { + return status, err + } + + if dst == "" { + return http.StatusBadGateway, errInvalidDestination + } + if dst == src { + return http.StatusForbidden, errDestinationEqualsSource + } + + if r.Method == "COPY" { + // Section 7.5.1 says that a COPY only needs to lock the destination, + // not both destination and source. Strictly speaking, this is racy, + // even though a COPY doesn't modify the source, if a concurrent + // operation modifies the source. However, the litmus test explicitly + // checks that COPYing a locked-by-another source is OK. + release, status, err := h.confirmLocks(r, "", dst) + if err != nil { + return status, err + } + defer release() + + // Section 9.8.3 says that "The COPY method on a collection without a Depth + // header must act as if a Depth header with value "infinity" was included". + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth != 0 && depth != infiniteDepth { + // Section 9.8.3 says that "A client may submit a Depth header on a + // COPY on a collection with a value of "0" or "infinity"." + return http.StatusBadRequest, errInvalidDepth + } + } + return copyFiles(h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0) + } + + release, status, err := h.confirmLocks(r, src, dst) + if err != nil { + return status, err + } + defer release() + + // Section 9.9.2 says that "The MOVE method on a collection must act as if + // a "Depth: infinity" header was used on it. A client must not submit a + // Depth header on a MOVE on a collection with any value but "infinity"." + if hdr := r.Header.Get("Depth"); hdr != "" { + if parseDepth(hdr) != infiniteDepth { + return http.StatusBadRequest, errInvalidDepth + } + } + return moveFiles(h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T") +} + +func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) { + duration, err := parseTimeout(r.Header.Get("Timeout")) + if err != nil { + return http.StatusBadRequest, err + } + li, status, err := readLockInfo(r.Body) + if err != nil { + return status, err + } + + token, ld, now, created := "", LockDetails{}, time.Now(), false + if li == (lockInfo{}) { + // An empty lockInfo means to refresh the lock. + ih, ok := parseIfHeader(r.Header.Get("If")) + if !ok { + return http.StatusBadRequest, errInvalidIfHeader + } + if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 { + token = ih.lists[0].conditions[0].Token + } + if token == "" { + return http.StatusBadRequest, errInvalidLockToken + } + ld, err = h.LockSystem.Refresh(now, token, duration) + if err != nil { + if err == ErrNoSuchLock { + return http.StatusPreconditionFailed, err + } + return http.StatusInternalServerError, err + } + + } else { + // Section 9.10.3 says that "If no Depth header is submitted on a LOCK request, + // then the request MUST act as if a "Depth:infinity" had been submitted." + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth != 0 && depth != infiniteDepth { + // Section 9.10.3 says that "Values other than 0 or infinity must not be + // used with the Depth header on a LOCK method". + return http.StatusBadRequest, errInvalidDepth + } + } + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + ld = LockDetails{ + Root: reqPath, + Duration: duration, + OwnerXML: li.Owner.InnerXML, + ZeroDepth: depth == 0, + } + token, err = h.LockSystem.Create(now, ld) + if err != nil { + if err == ErrLocked { + return StatusLocked, err + } + return http.StatusInternalServerError, err + } + defer func() { + if retErr != nil { + h.LockSystem.Unlock(now, token) + } + }() + + // Create the resource if it didn't previously exist. + if _, err := h.FileSystem.Stat(reqPath); err != nil { + f, err := h.FileSystem.OpenFile(reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + // TODO: detect missing intermediate dirs and return http.StatusConflict? + return http.StatusInternalServerError, err + } + f.Close() + created = true + } + + // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the + // Lock-Token value is a Coded-URL. We add angle brackets. + w.Header().Set("Lock-Token", "<"+token+">") + } + + w.Header().Set("Content-Type", "application/xml; charset=utf-8") + if created { + // This is "w.WriteHeader(http.StatusCreated)" and not "return + // http.StatusCreated, nil" because we write our own (XML) response to w + // and Handler.ServeHTTP would otherwise write "Created". + w.WriteHeader(http.StatusCreated) + } + writeLockInfo(w, token, ld) + return 0, nil +} + +func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) { + // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the + // Lock-Token value is a Coded-URL. We strip its angle brackets. + t := r.Header.Get("Lock-Token") + if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' { + return http.StatusBadRequest, errInvalidLockToken + } + t = t[1 : len(t)-1] + + switch err = h.LockSystem.Unlock(time.Now(), t); err { + case nil: + return http.StatusNoContent, err + case ErrForbidden: + return http.StatusForbidden, err + case ErrLocked: + return StatusLocked, err + case ErrNoSuchLock: + return http.StatusConflict, err + default: + return http.StatusInternalServerError, err + } +} + +func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + fi, err := h.FileSystem.Stat(reqPath) + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth == invalidDepth { + return http.StatusBadRequest, errInvalidDepth + } + } + pf, status, err := readPropfind(r.Body) + if err != nil { + return status, err + } + + mw := multistatusWriter{w: w} + + walkFn := func(reqPath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + var pstats []Propstat + if pf.Propname != nil { + pnames, err := propnames(h.FileSystem, h.LockSystem, reqPath) + if err != nil { + return err + } + pstat := Propstat{Status: http.StatusOK} + for _, xmlname := range pnames { + pstat.Props = append(pstat.Props, Property{XMLName: xmlname}) + } + pstats = append(pstats, pstat) + } else if pf.Allprop != nil { + pstats, err = allprop(h.FileSystem, h.LockSystem, reqPath, pf.Prop) + } else { + pstats, err = props(h.FileSystem, h.LockSystem, reqPath, pf.Prop) + } + if err != nil { + return err + } + return mw.write(makePropstatResponse(path.Join(h.Prefix, reqPath), pstats)) + } + + walkErr := walkFS(h.FileSystem, depth, reqPath, fi, walkFn) + closeErr := mw.close() + if walkErr != nil { + return http.StatusInternalServerError, walkErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + return 0, nil +} + +func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + if _, err := h.FileSystem.Stat(reqPath); err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + patches, status, err := readProppatch(r.Body) + if err != nil { + return status, err + } + pstats, err := patch(h.FileSystem, h.LockSystem, reqPath, patches) + if err != nil { + return http.StatusInternalServerError, err + } + mw := multistatusWriter{w: w} + writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats)) + closeErr := mw.close() + if writeErr != nil { + return http.StatusInternalServerError, writeErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + return 0, nil +} + +func makePropstatResponse(href string, pstats []Propstat) *response { + resp := response{ + Href: []string{(&url.URL{Path: href}).EscapedPath()}, + Propstat: make([]propstat, 0, len(pstats)), + } + for _, p := range pstats { + var xmlErr *xmlError + if p.XMLError != "" { + xmlErr = &xmlError{InnerXML: []byte(p.XMLError)} + } + resp.Propstat = append(resp.Propstat, propstat{ + Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)), + Prop: p.Props, + ResponseDescription: p.ResponseDescription, + Error: xmlErr, + }) + } + return &resp +} + +const ( + infiniteDepth = -1 + invalidDepth = -2 +) + +// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and +// infiniteDepth. Parsing any other string returns invalidDepth. +// +// Different WebDAV methods have further constraints on valid depths: +// - PROPFIND has no further restrictions, as per section 9.1. +// - COPY accepts only "0" or "infinity", as per section 9.8.3. +// - MOVE accepts only "infinity", as per section 9.9.2. +// - LOCK accepts only "0" or "infinity", as per section 9.10.3. +// These constraints are enforced by the handleXxx methods. +func parseDepth(s string) int { + switch s { + case "0": + return 0 + case "1": + return 1 + case "infinity": + return infiniteDepth + } + return invalidDepth +} + +// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11 +const ( + StatusMulti = 207 + StatusUnprocessableEntity = 422 + StatusLocked = 423 + StatusFailedDependency = 424 + StatusInsufficientStorage = 507 +) + +func StatusText(code int) string { + switch code { + case StatusMulti: + return "Multi-Status" + case StatusUnprocessableEntity: + return "Unprocessable Entity" + case StatusLocked: + return "Locked" + case StatusFailedDependency: + return "Failed Dependency" + case StatusInsufficientStorage: + return "Insufficient Storage" + } + return http.StatusText(code) +} + +var ( + errDestinationEqualsSource = errors.New("webdav: destination equals source") + errDirectoryNotEmpty = errors.New("webdav: directory not empty") + errInvalidDepth = errors.New("webdav: invalid depth") + errInvalidDestination = errors.New("webdav: invalid destination") + errInvalidIfHeader = errors.New("webdav: invalid If header") + errInvalidLockInfo = errors.New("webdav: invalid lock info") + errInvalidLockToken = errors.New("webdav: invalid lock token") + errInvalidPropfind = errors.New("webdav: invalid propfind") + errInvalidProppatch = errors.New("webdav: invalid proppatch") + errInvalidResponse = errors.New("webdav: invalid response") + errInvalidTimeout = errors.New("webdav: invalid timeout") + errNoFileSystem = errors.New("webdav: no file system") + errNoLockSystem = errors.New("webdav: no lock system") + errNotADirectory = errors.New("webdav: not a directory") + errPrefixMismatch = errors.New("webdav: prefix mismatch") + errRecursionTooDeep = errors.New("webdav: recursion too deep") + errUnsupportedLockInfo = errors.New("webdav: unsupported lock info") + errUnsupportedMethod = errors.New("webdav: unsupported method") +) diff --git a/vendor/golang.org/x/net/webdav/webdav_test.go b/vendor/golang.org/x/net/webdav/webdav_test.go new file mode 100644 index 00000000..befd96a8 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/webdav_test.go @@ -0,0 +1,242 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "regexp" + "sort" + "strings" + "testing" +) + +// TODO: add tests to check XML responses with the expected prefix path +func TestPrefix(t *testing.T) { + const dst, blah = "Destination", "blah blah blah" + + do := func(method, urlStr string, body io.Reader, wantStatusCode int, headers ...string) error { + req, err := http.NewRequest(method, urlStr, body) + if err != nil { + return err + } + for len(headers) >= 2 { + req.Header.Add(headers[0], headers[1]) + headers = headers[2:] + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != wantStatusCode { + return fmt.Errorf("got status code %d, want %d", res.StatusCode, wantStatusCode) + } + return nil + } + + prefixes := []string{ + "/", + "/a/", + "/a/b/", + "/a/b/c/", + } + for _, prefix := range prefixes { + fs := NewMemFS() + h := &Handler{ + FileSystem: fs, + LockSystem: NewMemLS(), + } + mux := http.NewServeMux() + if prefix != "/" { + h.Prefix = prefix + } + mux.Handle(prefix, h) + srv := httptest.NewServer(mux) + defer srv.Close() + + // The script is: + // MKCOL /a + // MKCOL /a/b + // PUT /a/b/c + // COPY /a/b/c /a/b/d + // MKCOL /a/b/e + // MOVE /a/b/d /a/b/e/f + // which should yield the (possibly stripped) filenames /a/b/c and + // /a/b/e/f, plus their parent directories. + + wantA := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusMovedPermanently, + "/a/b/": http.StatusNotFound, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if err := do("MKCOL", srv.URL+"/a", nil, wantA); err != nil { + t.Errorf("prefix=%-9q MKCOL /a: %v", prefix, err) + continue + } + + wantB := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusMovedPermanently, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if err := do("MKCOL", srv.URL+"/a/b", nil, wantB); err != nil { + t.Errorf("prefix=%-9q MKCOL /a/b: %v", prefix, err) + continue + } + + wantC := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusMovedPermanently, + }[prefix] + if err := do("PUT", srv.URL+"/a/b/c", strings.NewReader(blah), wantC); err != nil { + t.Errorf("prefix=%-9q PUT /a/b/c: %v", prefix, err) + continue + } + + wantD := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusMovedPermanently, + }[prefix] + if err := do("COPY", srv.URL+"/a/b/c", nil, wantD, dst, srv.URL+"/a/b/d"); err != nil { + t.Errorf("prefix=%-9q COPY /a/b/c /a/b/d: %v", prefix, err) + continue + } + + wantE := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if err := do("MKCOL", srv.URL+"/a/b/e", nil, wantE); err != nil { + t.Errorf("prefix=%-9q MKCOL /a/b/e: %v", prefix, err) + continue + } + + wantF := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if err := do("MOVE", srv.URL+"/a/b/d", nil, wantF, dst, srv.URL+"/a/b/e/f"); err != nil { + t.Errorf("prefix=%-9q MOVE /a/b/d /a/b/e/f: %v", prefix, err) + continue + } + + got, err := find(nil, fs, "/") + if err != nil { + t.Errorf("prefix=%-9q find: %v", prefix, err) + continue + } + sort.Strings(got) + want := map[string][]string{ + "/": {"/", "/a", "/a/b", "/a/b/c", "/a/b/e", "/a/b/e/f"}, + "/a/": {"/", "/b", "/b/c", "/b/e", "/b/e/f"}, + "/a/b/": {"/", "/c", "/e", "/e/f"}, + "/a/b/c/": {"/"}, + }[prefix] + if !reflect.DeepEqual(got, want) { + t.Errorf("prefix=%-9q find:\ngot %v\nwant %v", prefix, got, want) + continue + } + } +} + +func TestFilenameEscape(t *testing.T) { + re := regexp.MustCompile(`<D:href>([^<]*)</D:href>`) + do := func(method, urlStr string) (string, error) { + req, err := http.NewRequest(method, urlStr, nil) + if err != nil { + return "", err + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return "", err + } + defer res.Body.Close() + + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", err + } + m := re.FindStringSubmatch(string(b)) + if len(m) != 2 { + return "", errors.New("D:href not found") + } + + return m[1], nil + } + + testCases := []struct { + name, want string + }{{ + name: `/foo%bar`, + want: `/foo%25bar`, + }, { + name: `/こんにちわ世界`, + want: `/%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F%E4%B8%96%E7%95%8C`, + }, { + name: `/Program Files/`, + want: `/Program%20Files`, + }, { + name: `/go+lang`, + want: `/go+lang`, + }, { + name: `/go&lang`, + want: `/go&amp;lang`, + }} + fs := NewMemFS() + for _, tc := range testCases { + if strings.HasSuffix(tc.name, "/") { + if err := fs.Mkdir(tc.name, 0755); err != nil { + t.Fatalf("name=%q: Mkdir: %v", tc.name, err) + } + } else { + f, err := fs.OpenFile(tc.name, os.O_CREATE, 0644) + if err != nil { + t.Fatalf("name=%q: OpenFile: %v", tc.name, err) + } + f.Close() + } + } + + srv := httptest.NewServer(&Handler{ + FileSystem: fs, + LockSystem: NewMemLS(), + }) + defer srv.Close() + + u, err := url.Parse(srv.URL) + if err != nil { + t.Fatal(err) + } + + for _, tc := range testCases { + u.Path = tc.name + got, err := do("PROPFIND", u.String()) + if err != nil { + t.Errorf("name=%q: PROPFIND: %v", tc.name, err) + continue + } + if got != tc.want { + t.Errorf("name=%q: got %q, want %q", tc.name, got, tc.want) + } + } +} diff --git a/vendor/golang.org/x/net/webdav/xml.go b/vendor/golang.org/x/net/webdav/xml.go new file mode 100644 index 00000000..790dc816 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/xml.go @@ -0,0 +1,519 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +// The XML encoding is covered by Section 14. +// http://www.webdav.org/specs/rfc4918.html#xml.element.definitions + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "time" + + // As of https://go-review.googlesource.com/#/c/12772/ which was submitted + // in July 2015, this package uses an internal fork of the standard + // library's encoding/xml package, due to changes in the way namespaces + // were encoded. Such changes were introduced in the Go 1.5 cycle, but were + // rolled back in response to https://github.com/golang/go/issues/11841 + // + // However, this package's exported API, specifically the Property and + // DeadPropsHolder types, need to refer to the standard library's version + // of the xml.Name type, as code that imports this package cannot refer to + // the internal version. + // + // This file therefore imports both the internal and external versions, as + // ixml and xml, and converts between them. + // + // In the long term, this package should use the standard library's version + // only, and the internal fork deleted, once + // https://github.com/golang/go/issues/13400 is resolved. + ixml "golang.org/x/net/webdav/internal/xml" +) + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_lockinfo +type lockInfo struct { + XMLName ixml.Name `xml:"lockinfo"` + Exclusive *struct{} `xml:"lockscope>exclusive"` + Shared *struct{} `xml:"lockscope>shared"` + Write *struct{} `xml:"locktype>write"` + Owner owner `xml:"owner"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner +type owner struct { + InnerXML string `xml:",innerxml"` +} + +func readLockInfo(r io.Reader) (li lockInfo, status int, err error) { + c := &countingReader{r: r} + if err = ixml.NewDecoder(c).Decode(&li); err != nil { + if err == io.EOF { + if c.n == 0 { + // An empty body means to refresh the lock. + // http://www.webdav.org/specs/rfc4918.html#refreshing-locks + return lockInfo{}, 0, nil + } + err = errInvalidLockInfo + } + return lockInfo{}, http.StatusBadRequest, err + } + // We only support exclusive (non-shared) write locks. In practice, these are + // the only types of locks that seem to matter. + if li.Exclusive == nil || li.Shared != nil || li.Write == nil { + return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo + } + return li, 0, nil +} + +type countingReader struct { + n int + r io.Reader +} + +func (c *countingReader) Read(p []byte) (int, error) { + n, err := c.r.Read(p) + c.n += n + return n, err +} + +func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) { + depth := "infinity" + if ld.ZeroDepth { + depth = "0" + } + timeout := ld.Duration / time.Second + return fmt.Fprintf(w, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"+ + "<D:prop xmlns:D=\"DAV:\"><D:lockdiscovery><D:activelock>\n"+ + " <D:locktype><D:write/></D:locktype>\n"+ + " <D:lockscope><D:exclusive/></D:lockscope>\n"+ + " <D:depth>%s</D:depth>\n"+ + " <D:owner>%s</D:owner>\n"+ + " <D:timeout>Second-%d</D:timeout>\n"+ + " <D:locktoken><D:href>%s</D:href></D:locktoken>\n"+ + " <D:lockroot><D:href>%s</D:href></D:lockroot>\n"+ + "</D:activelock></D:lockdiscovery></D:prop>", + depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root), + ) +} + +func escape(s string) string { + for i := 0; i < len(s); i++ { + switch s[i] { + case '"', '&', '\'', '<', '>': + b := bytes.NewBuffer(nil) + ixml.EscapeText(b, []byte(s)) + return b.String() + } + } + return s +} + +// Next returns the next token, if any, in the XML stream of d. +// RFC 4918 requires to ignore comments, processing instructions +// and directives. +// http://www.webdav.org/specs/rfc4918.html#property_values +// http://www.webdav.org/specs/rfc4918.html#xml-extensibility +func next(d *ixml.Decoder) (ixml.Token, error) { + for { + t, err := d.Token() + if err != nil { + return t, err + } + switch t.(type) { + case ixml.Comment, ixml.Directive, ixml.ProcInst: + continue + default: + return t, nil + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind) +type propfindProps []xml.Name + +// UnmarshalXML appends the property names enclosed within start to pn. +// +// It returns an error if start does not contain any properties or if +// properties contain values. Character data between properties is ignored. +func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + for { + t, err := next(d) + if err != nil { + return err + } + switch t.(type) { + case ixml.EndElement: + if len(*pn) == 0 { + return fmt.Errorf("%s must not be empty", start.Name.Local) + } + return nil + case ixml.StartElement: + name := t.(ixml.StartElement).Name + t, err = next(d) + if err != nil { + return err + } + if _, ok := t.(ixml.EndElement); !ok { + return fmt.Errorf("unexpected token %T", t) + } + *pn = append(*pn, xml.Name(name)) + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind +type propfind struct { + XMLName ixml.Name `xml:"DAV: propfind"` + Allprop *struct{} `xml:"DAV: allprop"` + Propname *struct{} `xml:"DAV: propname"` + Prop propfindProps `xml:"DAV: prop"` + Include propfindProps `xml:"DAV: include"` +} + +func readPropfind(r io.Reader) (pf propfind, status int, err error) { + c := countingReader{r: r} + if err = ixml.NewDecoder(&c).Decode(&pf); err != nil { + if err == io.EOF { + if c.n == 0 { + // An empty body means to propfind allprop. + // http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND + return propfind{Allprop: new(struct{})}, 0, nil + } + err = errInvalidPropfind + } + return propfind{}, http.StatusBadRequest, err + } + + if pf.Allprop == nil && pf.Include != nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Prop != nil && pf.Propname != nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + return pf, 0, nil +} + +// Property represents a single DAV resource property as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties +type Property struct { + // XMLName is the fully qualified name that identifies this property. + XMLName xml.Name + + // Lang is an optional xml:lang attribute. + Lang string `xml:"xml:lang,attr,omitempty"` + + // InnerXML contains the XML representation of the property value. + // See http://www.webdav.org/specs/rfc4918.html#property_values + // + // Property values of complex type or mixed-content must have fully + // expanded XML namespaces or be self-contained with according + // XML namespace declarations. They must not rely on any XML + // namespace declarations within the scope of the XML document, + // even including the DAV: namespace. + InnerXML []byte `xml:",innerxml"` +} + +// ixmlProperty is the same as the Property type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlProperty struct { + XMLName ixml.Name + Lang string `xml:"xml:lang,attr,omitempty"` + InnerXML []byte `xml:",innerxml"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error +// See multistatusWriter for the "D:" namespace prefix. +type xmlError struct { + XMLName ixml.Name `xml:"D:error"` + InnerXML []byte `xml:",innerxml"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat +// See multistatusWriter for the "D:" namespace prefix. +type propstat struct { + Prop []Property `xml:"D:prop>_ignored_"` + Status string `xml:"D:status"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// ixmlPropstat is the same as the propstat type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlPropstat struct { + Prop []ixmlProperty `xml:"D:prop>_ignored_"` + Status string `xml:"D:status"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace +// before encoding. See multistatusWriter. +func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error { + // Convert from a propstat to an ixmlPropstat. + ixmlPs := ixmlPropstat{ + Prop: make([]ixmlProperty, len(ps.Prop)), + Status: ps.Status, + Error: ps.Error, + ResponseDescription: ps.ResponseDescription, + } + for k, prop := range ps.Prop { + ixmlPs.Prop[k] = ixmlProperty{ + XMLName: ixml.Name(prop.XMLName), + Lang: prop.Lang, + InnerXML: prop.InnerXML, + } + } + + for k, prop := range ixmlPs.Prop { + if prop.XMLName.Space == "DAV:" { + prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local} + ixmlPs.Prop[k] = prop + } + } + // Distinct type to avoid infinite recursion of MarshalXML. + type newpropstat ixmlPropstat + return e.EncodeElement(newpropstat(ixmlPs), start) +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response +// See multistatusWriter for the "D:" namespace prefix. +type response struct { + XMLName ixml.Name `xml:"D:response"` + Href []string `xml:"D:href"` + Propstat []propstat `xml:"D:propstat"` + Status string `xml:"D:status,omitempty"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// MultistatusWriter marshals one or more Responses into a XML +// multistatus response. +// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus +// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as +// "DAV:" on this element, is prepended on the nested response, as well as on all +// its nested elements. All property names in the DAV: namespace are prefixed as +// well. This is because some versions of Mini-Redirector (on windows 7) ignore +// elements with a default namespace (no prefixed namespace). A less intrusive fix +// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177 +type multistatusWriter struct { + // ResponseDescription contains the optional responsedescription + // of the multistatus XML element. Only the latest content before + // close will be emitted. Empty response descriptions are not + // written. + responseDescription string + + w http.ResponseWriter + enc *ixml.Encoder +} + +// Write validates and emits a DAV response as part of a multistatus response +// element. +// +// It sets the HTTP status code of its underlying http.ResponseWriter to 207 +// (Multi-Status) and populates the Content-Type header. If r is the +// first, valid response to be written, Write prepends the XML representation +// of r with a multistatus tag. Callers must call close after the last response +// has been written. +func (w *multistatusWriter) write(r *response) error { + switch len(r.Href) { + case 0: + return errInvalidResponse + case 1: + if len(r.Propstat) > 0 != (r.Status == "") { + return errInvalidResponse + } + default: + if len(r.Propstat) > 0 || r.Status == "" { + return errInvalidResponse + } + } + err := w.writeHeader() + if err != nil { + return err + } + return w.enc.Encode(r) +} + +// writeHeader writes a XML multistatus start element on w's underlying +// http.ResponseWriter and returns the result of the write operation. +// After the first write attempt, writeHeader becomes a no-op. +func (w *multistatusWriter) writeHeader() error { + if w.enc != nil { + return nil + } + w.w.Header().Add("Content-Type", "text/xml; charset=utf-8") + w.w.WriteHeader(StatusMulti) + _, err := fmt.Fprintf(w.w, `<?xml version="1.0" encoding="UTF-8"?>`) + if err != nil { + return err + } + w.enc = ixml.NewEncoder(w.w) + return w.enc.EncodeToken(ixml.StartElement{ + Name: ixml.Name{ + Space: "DAV:", + Local: "multistatus", + }, + Attr: []ixml.Attr{{ + Name: ixml.Name{Space: "xmlns", Local: "D"}, + Value: "DAV:", + }}, + }) +} + +// Close completes the marshalling of the multistatus response. It returns +// an error if the multistatus response could not be completed. If both the +// return value and field enc of w are nil, then no multistatus response has +// been written. +func (w *multistatusWriter) close() error { + if w.enc == nil { + return nil + } + var end []ixml.Token + if w.responseDescription != "" { + name := ixml.Name{Space: "DAV:", Local: "responsedescription"} + end = append(end, + ixml.StartElement{Name: name}, + ixml.CharData(w.responseDescription), + ixml.EndElement{Name: name}, + ) + } + end = append(end, ixml.EndElement{ + Name: ixml.Name{Space: "DAV:", Local: "multistatus"}, + }) + for _, t := range end { + err := w.enc.EncodeToken(t) + if err != nil { + return err + } + } + return w.enc.Flush() +} + +var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"} + +func xmlLang(s ixml.StartElement, d string) string { + for _, attr := range s.Attr { + if attr.Name == xmlLangName { + return attr.Value + } + } + return d +} + +type xmlValue []byte + +func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + // The XML value of a property can be arbitrary, mixed-content XML. + // To make sure that the unmarshalled value contains all required + // namespaces, we encode all the property value XML tokens into a + // buffer. This forces the encoder to redeclare any used namespaces. + var b bytes.Buffer + e := ixml.NewEncoder(&b) + for { + t, err := next(d) + if err != nil { + return err + } + if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name { + break + } + if err = e.EncodeToken(t); err != nil { + return err + } + } + err := e.Flush() + if err != nil { + return err + } + *v = b.Bytes() + return nil +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch) +type proppatchProps []Property + +// UnmarshalXML appends the property names and values enclosed within start +// to ps. +// +// An xml:lang attribute that is defined either on the DAV:prop or property +// name XML element is propagated to the property's Lang field. +// +// UnmarshalXML returns an error if start does not contain any properties or if +// property values contain syntactically incorrect XML. +func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + lang := xmlLang(start, "") + for { + t, err := next(d) + if err != nil { + return err + } + switch elem := t.(type) { + case ixml.EndElement: + if len(*ps) == 0 { + return fmt.Errorf("%s must not be empty", start.Name.Local) + } + return nil + case ixml.StartElement: + p := Property{ + XMLName: xml.Name(t.(ixml.StartElement).Name), + Lang: xmlLang(t.(ixml.StartElement), lang), + } + err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem) + if err != nil { + return err + } + *ps = append(*ps, p) + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove +type setRemove struct { + XMLName ixml.Name + Lang string `xml:"xml:lang,attr,omitempty"` + Prop proppatchProps `xml:"DAV: prop"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate +type propertyupdate struct { + XMLName ixml.Name `xml:"DAV: propertyupdate"` + Lang string `xml:"xml:lang,attr,omitempty"` + SetRemove []setRemove `xml:",any"` +} + +func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) { + var pu propertyupdate + if err = ixml.NewDecoder(r).Decode(&pu); err != nil { + return nil, http.StatusBadRequest, err + } + for _, op := range pu.SetRemove { + remove := false + switch op.XMLName { + case ixml.Name{Space: "DAV:", Local: "set"}: + // No-op. + case ixml.Name{Space: "DAV:", Local: "remove"}: + for _, p := range op.Prop { + if len(p.InnerXML) > 0 { + return nil, http.StatusBadRequest, errInvalidProppatch + } + } + remove = true + default: + return nil, http.StatusBadRequest, errInvalidProppatch + } + patches = append(patches, Proppatch{Remove: remove, Props: op.Prop}) + } + return patches, 0, nil +} diff --git a/vendor/golang.org/x/net/webdav/xml_test.go b/vendor/golang.org/x/net/webdav/xml_test.go new file mode 100644 index 00000000..a3d9e1ed --- /dev/null +++ b/vendor/golang.org/x/net/webdav/xml_test.go @@ -0,0 +1,906 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/http/httptest" + "reflect" + "sort" + "strings" + "testing" + + ixml "golang.org/x/net/webdav/internal/xml" +) + +func TestReadLockInfo(t *testing.T) { + // The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + input string + wantLI lockInfo + wantStatus int + }{{ + "bad: junk", + "xxx", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: invalid owner XML", + "" + + "<D:lockinfo xmlns:D='DAV:'>\n" + + " <D:lockscope><D:exclusive/></D:lockscope>\n" + + " <D:locktype><D:write/></D:locktype>\n" + + " <D:owner>\n" + + " <D:href> no end tag \n" + + " </D:owner>\n" + + "</D:lockinfo>", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: invalid UTF-8", + "" + + "<D:lockinfo xmlns:D='DAV:'>\n" + + " <D:lockscope><D:exclusive/></D:lockscope>\n" + + " <D:locktype><D:write/></D:locktype>\n" + + " <D:owner>\n" + + " <D:href> \xff </D:href>\n" + + " </D:owner>\n" + + "</D:lockinfo>", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: unfinished XML #1", + "" + + "<D:lockinfo xmlns:D='DAV:'>\n" + + " <D:lockscope><D:exclusive/></D:lockscope>\n" + + " <D:locktype><D:write/></D:locktype>\n", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: unfinished XML #2", + "" + + "<D:lockinfo xmlns:D='DAV:'>\n" + + " <D:lockscope><D:exclusive/></D:lockscope>\n" + + " <D:locktype><D:write/></D:locktype>\n" + + " <D:owner>\n", + lockInfo{}, + http.StatusBadRequest, + }, { + "good: empty", + "", + lockInfo{}, + 0, + }, { + "good: plain-text owner", + "" + + "<D:lockinfo xmlns:D='DAV:'>\n" + + " <D:lockscope><D:exclusive/></D:lockscope>\n" + + " <D:locktype><D:write/></D:locktype>\n" + + " <D:owner>gopher</D:owner>\n" + + "</D:lockinfo>", + lockInfo{ + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, + Exclusive: new(struct{}), + Write: new(struct{}), + Owner: owner{ + InnerXML: "gopher", + }, + }, + 0, + }, { + "section 9.10.7", + "" + + "<D:lockinfo xmlns:D='DAV:'>\n" + + " <D:lockscope><D:exclusive/></D:lockscope>\n" + + " <D:locktype><D:write/></D:locktype>\n" + + " <D:owner>\n" + + " <D:href>http://example.org/~ejw/contact.html</D:href>\n" + + " </D:owner>\n" + + "</D:lockinfo>", + lockInfo{ + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, + Exclusive: new(struct{}), + Write: new(struct{}), + Owner: owner{ + InnerXML: "\n <D:href>http://example.org/~ejw/contact.html</D:href>\n ", + }, + }, + 0, + }} + + for _, tc := range testCases { + li, status, err := readLockInfo(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if !reflect.DeepEqual(li, tc.wantLI) || status != tc.wantStatus { + t.Errorf("%s:\ngot lockInfo=%v, status=%v\nwant lockInfo=%v, status=%v", + tc.desc, li, status, tc.wantLI, tc.wantStatus) + continue + } + } +} + +func TestReadPropfind(t *testing.T) { + testCases := []struct { + desc string + input string + wantPF propfind + wantStatus int + }{{ + desc: "propfind: propname", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:propname/>\n" + + "</A:propfind>", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Propname: new(struct{}), + }, + }, { + desc: "propfind: empty body means allprop", + input: "", + wantPF: propfind{ + Allprop: new(struct{}), + }, + }, { + desc: "propfind: allprop", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:allprop/>\n" + + "</A:propfind>", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + }, + }, { + desc: "propfind: allprop followed by include", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:allprop/>\n" + + " <A:include><A:displayname/></A:include>\n" + + "</A:propfind>", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: include followed by allprop", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:include><A:displayname/></A:include>\n" + + " <A:allprop/>\n" + + "</A:propfind>", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:prop><A:displayname/></A:prop>\n" + + "</A:propfind>", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: prop with ignored comments", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:prop>\n" + + " <!-- ignore -->\n" + + " <A:displayname><!-- ignore --></A:displayname>\n" + + " </A:prop>\n" + + "</A:propfind>", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind with ignored whitespace", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:prop> <A:displayname/></A:prop>\n" + + "</A:propfind>", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind with ignored mixed-content", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:prop>foo<A:displayname/>bar</A:prop>\n" + + "</A:propfind>", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propname with ignored element (section A.4)", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:propname/>\n" + + " <E:leave-out xmlns:E='E:'>*boss*</E:leave-out>\n" + + "</A:propfind>", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Propname: new(struct{}), + }, + }, { + desc: "propfind: bad: junk", + input: "xxx", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: propname and allprop (section A.3)", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:propname/>" + + " <A:allprop/>" + + "</A:propfind>", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: propname and prop", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:prop><A:displayname/></A:prop>\n" + + " <A:propname/>\n" + + "</A:propfind>", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: allprop and prop", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:allprop/>\n" + + " <A:prop><A:foo/><A:/prop>\n" + + "</A:propfind>", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: empty propfind with ignored element (section A.4)", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <E:expired-props/>\n" + + "</A:propfind>", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: empty prop", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:prop/>\n" + + "</A:propfind>", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: prop with just chardata", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:prop>foo</A:prop>\n" + + "</A:propfind>", + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: interrupted prop", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:prop><A:foo></A:prop>\n", + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: malformed end element prop", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:prop><A:foo/></A:bar></A:prop>\n", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: property with chardata value", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:prop><A:foo>bar</A:foo></A:prop>\n" + + "</A:propfind>", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: property with whitespace value", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:prop><A:foo> </A:foo></A:prop>\n" + + "</A:propfind>", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: include without allprop", + input: "" + + "<A:propfind xmlns:A='DAV:'>\n" + + " <A:include><A:foo/></A:include>\n" + + "</A:propfind>", + wantStatus: http.StatusBadRequest, + }} + + for _, tc := range testCases { + pf, status, err := readPropfind(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if !reflect.DeepEqual(pf, tc.wantPF) || status != tc.wantStatus { + t.Errorf("%s:\ngot propfind=%v, status=%v\nwant propfind=%v, status=%v", + tc.desc, pf, status, tc.wantPF, tc.wantStatus) + continue + } + } +} + +func TestMultistatusWriter(t *testing.T) { + ///The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + responses []response + respdesc string + writeHeader bool + wantXML string + wantCode int + wantErr error + }{{ + desc: "section 9.2.2 (failed dependency)", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://ns.example.com/", + Local: "Authors", + }, + }}, + Status: "HTTP/1.1 424 Failed Dependency", + }, { + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://ns.example.com/", + Local: "Copyright-Owner", + }, + }}, + Status: "HTTP/1.1 409 Conflict", + }}, + ResponseDescription: "Copyright Owner cannot be deleted or altered.", + }}, + wantXML: `` + + `<?xml version="1.0" encoding="UTF-8"?>` + + `<multistatus xmlns="DAV:">` + + ` <response>` + + ` <href>http://example.com/foo</href>` + + ` <propstat>` + + ` <prop>` + + ` <Authors xmlns="http://ns.example.com/"></Authors>` + + ` </prop>` + + ` <status>HTTP/1.1 424 Failed Dependency</status>` + + ` </propstat>` + + ` <propstat xmlns="DAV:">` + + ` <prop>` + + ` <Copyright-Owner xmlns="http://ns.example.com/"></Copyright-Owner>` + + ` </prop>` + + ` <status>HTTP/1.1 409 Conflict</status>` + + ` </propstat>` + + ` <responsedescription>Copyright Owner cannot be deleted or altered.</responsedescription>` + + `</response>` + + `</multistatus>`, + wantCode: StatusMulti, + }, { + desc: "section 9.6.2 (lock-token-submitted)", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Status: "HTTP/1.1 423 Locked", + Error: &xmlError{ + InnerXML: []byte(`<lock-token-submitted xmlns="DAV:"/>`), + }, + }}, + wantXML: `` + + `<?xml version="1.0" encoding="UTF-8"?>` + + `<multistatus xmlns="DAV:">` + + ` <response>` + + ` <href>http://example.com/foo</href>` + + ` <status>HTTP/1.1 423 Locked</status>` + + ` <error><lock-token-submitted xmlns="DAV:"/></error>` + + ` </response>` + + `</multistatus>`, + wantCode: StatusMulti, + }, { + desc: "section 9.1.3", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "bigbox"}, + InnerXML: []byte(`` + + `<BoxType xmlns="http://ns.example.com/boxschema/">` + + `Box type A` + + `</BoxType>`), + }, { + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "author"}, + InnerXML: []byte(`` + + `<Name xmlns="http://ns.example.com/boxschema/">` + + `J.J. Johnson` + + `</Name>`), + }}, + Status: "HTTP/1.1 200 OK", + }, { + Prop: []Property{{ + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "DingALing"}, + }, { + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "Random"}, + }}, + Status: "HTTP/1.1 403 Forbidden", + ResponseDescription: "The user does not have access to the DingALing property.", + }}, + }}, + respdesc: "There has been an access violation error.", + wantXML: `` + + `<?xml version="1.0" encoding="UTF-8"?>` + + `<multistatus xmlns="DAV:" xmlns:B="http://ns.example.com/boxschema/">` + + ` <response>` + + ` <href>http://example.com/foo</href>` + + ` <propstat>` + + ` <prop>` + + ` <B:bigbox><B:BoxType>Box type A</B:BoxType></B:bigbox>` + + ` <B:author><B:Name>J.J. Johnson</B:Name></B:author>` + + ` </prop>` + + ` <status>HTTP/1.1 200 OK</status>` + + ` </propstat>` + + ` <propstat>` + + ` <prop>` + + ` <B:DingALing/>` + + ` <B:Random/>` + + ` </prop>` + + ` <status>HTTP/1.1 403 Forbidden</status>` + + ` <responsedescription>The user does not have access to the DingALing property.</responsedescription>` + + ` </propstat>` + + ` </response>` + + ` <responsedescription>There has been an access violation error.</responsedescription>` + + `</multistatus>`, + wantCode: StatusMulti, + }, { + desc: "no response written", + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "no response written (with description)", + respdesc: "too bad", + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "empty multistatus with header", + writeHeader: true, + wantXML: `<multistatus xmlns="DAV:"></multistatus>`, + wantCode: StatusMulti, + }, { + desc: "bad: no href", + responses: []response{{ + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: multiple hrefs and no status", + responses: []response{{ + Href: []string{"http://example.com/foo", "http://example.com/bar"}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: one href and no propstat", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: status with one href and propstat", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + Status: "HTTP/1.1 200 OK", + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: multiple hrefs and propstat", + responses: []response{{ + Href: []string{ + "http://example.com/foo", + "http://example.com/bar", + }, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }} + + n := xmlNormalizer{omitWhitespace: true} +loop: + for _, tc := range testCases { + rec := httptest.NewRecorder() + w := multistatusWriter{w: rec, responseDescription: tc.respdesc} + if tc.writeHeader { + if err := w.writeHeader(); err != nil { + t.Errorf("%s: got writeHeader error %v, want nil", tc.desc, err) + continue + } + } + for _, r := range tc.responses { + if err := w.write(&r); err != nil { + if err != tc.wantErr { + t.Errorf("%s: got write error %v, want %v", + tc.desc, err, tc.wantErr) + } + continue loop + } + } + if err := w.close(); err != tc.wantErr { + t.Errorf("%s: got close error %v, want %v", + tc.desc, err, tc.wantErr) + continue + } + if rec.Code != tc.wantCode { + t.Errorf("%s: got HTTP status code %d, want %d\n", + tc.desc, rec.Code, tc.wantCode) + continue + } + gotXML := rec.Body.String() + eq, err := n.equalXML(strings.NewReader(gotXML), strings.NewReader(tc.wantXML)) + if err != nil { + t.Errorf("%s: equalXML: %v", tc.desc, err) + continue + } + if !eq { + t.Errorf("%s: XML body\ngot %s\nwant %s", tc.desc, gotXML, tc.wantXML) + } + } +} + +func TestReadProppatch(t *testing.T) { + ppStr := func(pps []Proppatch) string { + var outer []string + for _, pp := range pps { + var inner []string + for _, p := range pp.Props { + inner = append(inner, fmt.Sprintf("{XMLName: %q, Lang: %q, InnerXML: %q}", + p.XMLName, p.Lang, p.InnerXML)) + } + outer = append(outer, fmt.Sprintf("{Remove: %t, Props: [%s]}", + pp.Remove, strings.Join(inner, ", "))) + } + return "[" + strings.Join(outer, ", ") + "]" + } + + testCases := []struct { + desc string + input string + wantPP []Proppatch + wantStatus int + }{{ + desc: "proppatch: section 9.2 (with simple property value)", + input: `` + + `<?xml version="1.0" encoding="utf-8" ?>` + + `<D:propertyupdate xmlns:D="DAV:"` + + ` xmlns:Z="http://ns.example.com/z/">` + + ` <D:set>` + + ` <D:prop><Z:Authors>somevalue</Z:Authors></D:prop>` + + ` </D:set>` + + ` <D:remove>` + + ` <D:prop><Z:Copyright-Owner/></D:prop>` + + ` </D:remove>` + + `</D:propertyupdate>`, + wantPP: []Proppatch{{ + Props: []Property{{ + xml.Name{Space: "http://ns.example.com/z/", Local: "Authors"}, + "", + []byte(`somevalue`), + }}, + }, { + Remove: true, + Props: []Property{{ + xml.Name{Space: "http://ns.example.com/z/", Local: "Copyright-Owner"}, + "", + nil, + }}, + }}, + }, { + desc: "proppatch: lang attribute on prop", + input: `` + + `<?xml version="1.0" encoding="utf-8" ?>` + + `<D:propertyupdate xmlns:D="DAV:">` + + ` <D:set>` + + ` <D:prop xml:lang="en">` + + ` <foo xmlns="http://example.com/ns"/>` + + ` </D:prop>` + + ` </D:set>` + + `</D:propertyupdate>`, + wantPP: []Proppatch{{ + Props: []Property{{ + xml.Name{Space: "http://example.com/ns", Local: "foo"}, + "en", + nil, + }}, + }}, + }, { + desc: "bad: remove with value", + input: `` + + `<?xml version="1.0" encoding="utf-8" ?>` + + `<D:propertyupdate xmlns:D="DAV:"` + + ` xmlns:Z="http://ns.example.com/z/">` + + ` <D:remove>` + + ` <D:prop>` + + ` <Z:Authors>` + + ` <Z:Author>Jim Whitehead</Z:Author>` + + ` </Z:Authors>` + + ` </D:prop>` + + ` </D:remove>` + + `</D:propertyupdate>`, + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: empty propertyupdate", + input: `` + + `<?xml version="1.0" encoding="utf-8" ?>` + + `<D:propertyupdate xmlns:D="DAV:"` + + `</D:propertyupdate>`, + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: empty prop", + input: `` + + `<?xml version="1.0" encoding="utf-8" ?>` + + `<D:propertyupdate xmlns:D="DAV:"` + + ` xmlns:Z="http://ns.example.com/z/">` + + ` <D:remove>` + + ` <D:prop/>` + + ` </D:remove>` + + `</D:propertyupdate>`, + wantStatus: http.StatusBadRequest, + }} + + for _, tc := range testCases { + pp, status, err := readProppatch(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if status != tc.wantStatus { + t.Errorf("%s: got status %d, want %d", tc.desc, status, tc.wantStatus) + continue + } + if !reflect.DeepEqual(pp, tc.wantPP) || status != tc.wantStatus { + t.Errorf("%s: proppatch\ngot %v\nwant %v", tc.desc, ppStr(pp), ppStr(tc.wantPP)) + } + } +} + +func TestUnmarshalXMLValue(t *testing.T) { + testCases := []struct { + desc string + input string + wantVal string + }{{ + desc: "simple char data", + input: "<root>foo</root>", + wantVal: "foo", + }, { + desc: "empty element", + input: "<root><foo/></root>", + wantVal: "<foo/>", + }, { + desc: "preserve namespace", + input: `<root><foo xmlns="bar"/></root>`, + wantVal: `<foo xmlns="bar"/>`, + }, { + desc: "preserve root element namespace", + input: `<root xmlns:bar="bar"><bar:foo/></root>`, + wantVal: `<foo xmlns="bar"/>`, + }, { + desc: "preserve whitespace", + input: "<root> \t </root>", + wantVal: " \t ", + }, { + desc: "preserve mixed content", + input: `<root xmlns="bar"> <foo>a<bam xmlns="baz"/> </foo> </root>`, + wantVal: ` <foo xmlns="bar">a<bam xmlns="baz"/> </foo> `, + }, { + desc: "section 9.2", + input: `` + + `<Z:Authors xmlns:Z="http://ns.example.com/z/">` + + ` <Z:Author>Jim Whitehead</Z:Author>` + + ` <Z:Author>Roy Fielding</Z:Author>` + + `</Z:Authors>`, + wantVal: `` + + ` <Author xmlns="http://ns.example.com/z/">Jim Whitehead</Author>` + + ` <Author xmlns="http://ns.example.com/z/">Roy Fielding</Author>`, + }, { + desc: "section 4.3.1 (mixed content)", + input: `` + + `<x:author ` + + ` xmlns:x='http://example.com/ns' ` + + ` xmlns:D="DAV:">` + + ` <x:name>Jane Doe</x:name>` + + ` <!-- Jane's contact info -->` + + ` <x:uri type='email'` + + ` added='2005-11-26'>mailto:jane.doe@example.com</x:uri>` + + ` <x:uri type='web'` + + ` added='2005-11-27'>http://www.example.com</x:uri>` + + ` <x:notes xmlns:h='http://www.w3.org/1999/xhtml'>` + + ` Jane has been working way <h:em>too</h:em> long on the` + + ` long-awaited revision of <![CDATA[<RFC2518>]]>.` + + ` </x:notes>` + + `</x:author>`, + wantVal: `` + + ` <name xmlns="http://example.com/ns">Jane Doe</name>` + + ` ` + + ` <uri type='email'` + + ` xmlns="http://example.com/ns" ` + + ` added='2005-11-26'>mailto:jane.doe@example.com</uri>` + + ` <uri added='2005-11-27'` + + ` type='web'` + + ` xmlns="http://example.com/ns">http://www.example.com</uri>` + + ` <notes xmlns="http://example.com/ns" ` + + ` xmlns:h="http://www.w3.org/1999/xhtml">` + + ` Jane has been working way <h:em>too</h:em> long on the` + + ` long-awaited revision of &lt;RFC2518&gt;.` + + ` </notes>`, + }} + + var n xmlNormalizer + for _, tc := range testCases { + d := ixml.NewDecoder(strings.NewReader(tc.input)) + var v xmlValue + if err := d.Decode(&v); err != nil { + t.Errorf("%s: got error %v, want nil", tc.desc, err) + continue + } + eq, err := n.equalXML(bytes.NewReader(v), strings.NewReader(tc.wantVal)) + if err != nil { + t.Errorf("%s: equalXML: %v", tc.desc, err) + continue + } + if !eq { + t.Errorf("%s:\ngot %s\nwant %s", tc.desc, string(v), tc.wantVal) + } + } +} + +// xmlNormalizer normalizes XML. +type xmlNormalizer struct { + // omitWhitespace instructs to ignore whitespace between element tags. + omitWhitespace bool + // omitComments instructs to ignore XML comments. + omitComments bool +} + +// normalize writes the normalized XML content of r to w. It applies the +// following rules +// +// * Rename namespace prefixes according to an internal heuristic. +// * Remove unnecessary namespace declarations. +// * Sort attributes in XML start elements in lexical order of their +// fully qualified name. +// * Remove XML directives and processing instructions. +// * Remove CDATA between XML tags that only contains whitespace, if +// instructed to do so. +// * Remove comments, if instructed to do so. +// +func (n *xmlNormalizer) normalize(w io.Writer, r io.Reader) error { + d := ixml.NewDecoder(r) + e := ixml.NewEncoder(w) + for { + t, err := d.Token() + if err != nil { + if t == nil && err == io.EOF { + break + } + return err + } + switch val := t.(type) { + case ixml.Directive, ixml.ProcInst: + continue + case ixml.Comment: + if n.omitComments { + continue + } + case ixml.CharData: + if n.omitWhitespace && len(bytes.TrimSpace(val)) == 0 { + continue + } + case ixml.StartElement: + start, _ := ixml.CopyToken(val).(ixml.StartElement) + attr := start.Attr[:0] + for _, a := range start.Attr { + if a.Name.Space == "xmlns" || a.Name.Local == "xmlns" { + continue + } + attr = append(attr, a) + } + sort.Sort(byName(attr)) + start.Attr = attr + t = start + } + err = e.EncodeToken(t) + if err != nil { + return err + } + } + return e.Flush() +} + +// equalXML tests for equality of the normalized XML contents of a and b. +func (n *xmlNormalizer) equalXML(a, b io.Reader) (bool, error) { + var buf bytes.Buffer + if err := n.normalize(&buf, a); err != nil { + return false, err + } + normA := buf.String() + buf.Reset() + if err := n.normalize(&buf, b); err != nil { + return false, err + } + normB := buf.String() + return normA == normB, nil +} + +type byName []ixml.Attr + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { + if a[i].Name.Space != a[j].Name.Space { + return a[i].Name.Space < a[j].Name.Space + } + return a[i].Name.Local < a[j].Name.Local +} diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go new file mode 100644 index 00000000..20d1e1e3 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/client.go @@ -0,0 +1,113 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "crypto/tls" + "io" + "net" + "net/http" + "net/url" +) + +// DialError is an error that occurs while dialling a websocket server. +type DialError struct { + *Config + Err error +} + +func (e *DialError) Error() string { + return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error() +} + +// NewConfig creates a new WebSocket config for client connection. +func NewConfig(server, origin string) (config *Config, err error) { + config = new(Config) + config.Version = ProtocolVersionHybi13 + config.Location, err = url.ParseRequestURI(server) + if err != nil { + return + } + config.Origin, err = url.ParseRequestURI(origin) + if err != nil { + return + } + config.Header = http.Header(make(map[string][]string)) + return +} + +// NewClient creates a new WebSocket client connection over rwc. +func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + err = hybiClientHandshake(config, br, bw) + if err != nil { + return + } + buf := bufio.NewReadWriter(br, bw) + ws = newHybiClientConn(config, buf, rwc) + return +} + +// Dial opens a new client connection to a WebSocket. +func Dial(url_, protocol, origin string) (ws *Conn, err error) { + config, err := NewConfig(url_, origin) + if err != nil { + return nil, err + } + if protocol != "" { + config.Protocol = []string{protocol} + } + return DialConfig(config) +} + +var portMap = map[string]string{ + "ws": "80", + "wss": "443", +} + +func parseAuthority(location *url.URL) string { + if _, ok := portMap[location.Scheme]; ok { + if _, _, err := net.SplitHostPort(location.Host); err != nil { + return net.JoinHostPort(location.Host, portMap[location.Scheme]) + } + } + return location.Host +} + +// DialConfig opens a new client connection to a WebSocket with a config. +func DialConfig(config *Config) (ws *Conn, err error) { + var client net.Conn + if config.Location == nil { + return nil, &DialError{config, ErrBadWebSocketLocation} + } + if config.Origin == nil { + return nil, &DialError{config, ErrBadWebSocketOrigin} + } + switch config.Location.Scheme { + case "ws": + client, err = net.Dial("tcp", parseAuthority(config.Location)) + + case "wss": + client, err = tls.Dial("tcp", parseAuthority(config.Location), config.TlsConfig) + + default: + err = ErrBadScheme + } + if err != nil { + goto Error + } + + ws, err = NewClient(config, client) + if err != nil { + client.Close() + goto Error + } + return + +Error: + return nil, &DialError{config, err} +} diff --git a/vendor/golang.org/x/net/websocket/exampledial_test.go b/vendor/golang.org/x/net/websocket/exampledial_test.go new file mode 100644 index 00000000..72bb9d48 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/exampledial_test.go @@ -0,0 +1,31 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket_test + +import ( + "fmt" + "log" + + "golang.org/x/net/websocket" +) + +// This example demonstrates a trivial client. +func ExampleDial() { + origin := "http://localhost/" + url := "ws://localhost:12345/ws" + ws, err := websocket.Dial(url, "", origin) + if err != nil { + log.Fatal(err) + } + if _, err := ws.Write([]byte("hello, world!\n")); err != nil { + log.Fatal(err) + } + var msg = make([]byte, 512) + var n int + if n, err = ws.Read(msg); err != nil { + log.Fatal(err) + } + fmt.Printf("Received: %s.\n", msg[:n]) +} diff --git a/vendor/golang.org/x/net/websocket/examplehandler_test.go b/vendor/golang.org/x/net/websocket/examplehandler_test.go new file mode 100644 index 00000000..f22a98fc --- /dev/null +++ b/vendor/golang.org/x/net/websocket/examplehandler_test.go @@ -0,0 +1,26 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket_test + +import ( + "io" + "net/http" + + "golang.org/x/net/websocket" +) + +// Echo the data received on the WebSocket. +func EchoServer(ws *websocket.Conn) { + io.Copy(ws, ws) +} + +// This example demonstrates a trivial echo server. +func ExampleHandler() { + http.Handle("/echo", websocket.Handler(EchoServer)) + err := http.ListenAndServe(":12345", nil) + if err != nil { + panic("ListenAndServe: " + err.Error()) + } +} diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go new file mode 100644 index 00000000..60bbc841 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/hybi.go @@ -0,0 +1,586 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +// This file implements a protocol of hybi draft. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +const ( + websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" + + closeStatusNormal = 1000 + closeStatusGoingAway = 1001 + closeStatusProtocolError = 1002 + closeStatusUnsupportedData = 1003 + closeStatusFrameTooLarge = 1004 + closeStatusNoStatusRcvd = 1005 + closeStatusAbnormalClosure = 1006 + closeStatusBadMessageData = 1007 + closeStatusPolicyViolation = 1008 + closeStatusTooBigData = 1009 + closeStatusExtensionMismatch = 1010 + + maxControlFramePayloadLength = 125 +) + +var ( + ErrBadMaskingKey = &ProtocolError{"bad masking key"} + ErrBadPongMessage = &ProtocolError{"bad pong message"} + ErrBadClosingStatus = &ProtocolError{"bad closing status"} + ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"} + ErrNotImplemented = &ProtocolError{"not implemented"} + + handshakeHeader = map[string]bool{ + "Host": true, + "Upgrade": true, + "Connection": true, + "Sec-Websocket-Key": true, + "Sec-Websocket-Origin": true, + "Sec-Websocket-Version": true, + "Sec-Websocket-Protocol": true, + "Sec-Websocket-Accept": true, + } +) + +// A hybiFrameHeader is a frame header as defined in hybi draft. +type hybiFrameHeader struct { + Fin bool + Rsv [3]bool + OpCode byte + Length int64 + MaskingKey []byte + + data *bytes.Buffer +} + +// A hybiFrameReader is a reader for hybi frame. +type hybiFrameReader struct { + reader io.Reader + + header hybiFrameHeader + pos int64 + length int +} + +func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) { + n, err = frame.reader.Read(msg) + if err != nil { + return 0, err + } + if frame.header.MaskingKey != nil { + for i := 0; i < n; i++ { + msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4] + frame.pos++ + } + } + return n, err +} + +func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode } + +func (frame *hybiFrameReader) HeaderReader() io.Reader { + if frame.header.data == nil { + return nil + } + if frame.header.data.Len() == 0 { + return nil + } + return frame.header.data +} + +func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil } + +func (frame *hybiFrameReader) Len() (n int) { return frame.length } + +// A hybiFrameReaderFactory creates new frame reader based on its frame type. +type hybiFrameReaderFactory struct { + *bufio.Reader +} + +// NewFrameReader reads a frame header from the connection, and creates new reader for the frame. +// See Section 5.2 Base Framing protocol for detail. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2 +func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) { + hybiFrame := new(hybiFrameReader) + frame = hybiFrame + var header []byte + var b byte + // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0 + for i := 0; i < 3; i++ { + j := uint(6 - i) + hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0 + } + hybiFrame.header.OpCode = header[0] & 0x0f + + // Second byte. Mask/Payload len(7bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + mask := (b & 0x80) != 0 + b &= 0x7f + lengthFields := 0 + switch { + case b <= 125: // Payload length 7bits. + hybiFrame.header.Length = int64(b) + case b == 126: // Payload length 7+16bits + lengthFields = 2 + case b == 127: // Payload length 7+64bits + lengthFields = 8 + } + for i := 0; i < lengthFields; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits + b &= 0x7f + } + header = append(header, b) + hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b) + } + if mask { + // Masking key. 4 bytes. + for i := 0; i < 4; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b) + } + } + hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length) + hybiFrame.header.data = bytes.NewBuffer(header) + hybiFrame.length = len(header) + int(hybiFrame.header.Length) + return +} + +// A HybiFrameWriter is a writer for hybi frame. +type hybiFrameWriter struct { + writer *bufio.Writer + + header *hybiFrameHeader +} + +func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) { + var header []byte + var b byte + if frame.header.Fin { + b |= 0x80 + } + for i := 0; i < 3; i++ { + if frame.header.Rsv[i] { + j := uint(6 - i) + b |= 1 << j + } + } + b |= frame.header.OpCode + header = append(header, b) + if frame.header.MaskingKey != nil { + b = 0x80 + } else { + b = 0 + } + lengthFields := 0 + length := len(msg) + switch { + case length <= 125: + b |= byte(length) + case length < 65536: + b |= 126 + lengthFields = 2 + default: + b |= 127 + lengthFields = 8 + } + header = append(header, b) + for i := 0; i < lengthFields; i++ { + j := uint((lengthFields - i - 1) * 8) + b = byte((length >> j) & 0xff) + header = append(header, b) + } + if frame.header.MaskingKey != nil { + if len(frame.header.MaskingKey) != 4 { + return 0, ErrBadMaskingKey + } + header = append(header, frame.header.MaskingKey...) + frame.writer.Write(header) + data := make([]byte, length) + for i := range data { + data[i] = msg[i] ^ frame.header.MaskingKey[i%4] + } + frame.writer.Write(data) + err = frame.writer.Flush() + return length, err + } + frame.writer.Write(header) + frame.writer.Write(msg) + err = frame.writer.Flush() + return length, err +} + +func (frame *hybiFrameWriter) Close() error { return nil } + +type hybiFrameWriterFactory struct { + *bufio.Writer + needMaskingKey bool +} + +func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType} + if buf.needMaskingKey { + frameHeader.MaskingKey, err = generateMaskingKey() + if err != nil { + return nil, err + } + } + return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil +} + +type hybiFrameHandler struct { + conn *Conn + payloadType byte +} + +func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) { + if handler.conn.IsServerConn() { + // The client MUST mask all frames sent to the server. + if frame.(*hybiFrameReader).header.MaskingKey == nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } else { + // The server MUST NOT mask all frames. + if frame.(*hybiFrameReader).header.MaskingKey != nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } + if header := frame.HeaderReader(); header != nil { + io.Copy(ioutil.Discard, header) + } + switch frame.PayloadType() { + case ContinuationFrame: + frame.(*hybiFrameReader).header.OpCode = handler.payloadType + case TextFrame, BinaryFrame: + handler.payloadType = frame.PayloadType() + case CloseFrame: + return nil, io.EOF + case PingFrame, PongFrame: + b := make([]byte, maxControlFramePayloadLength) + n, err := io.ReadFull(frame, b) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return nil, err + } + io.Copy(ioutil.Discard, frame) + if frame.PayloadType() == PingFrame { + if _, err := handler.WritePong(b[:n]); err != nil { + return nil, err + } + } + return nil, nil + } + return frame, nil +} + +func (handler *hybiFrameHandler) WriteClose(status int) (err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame) + if err != nil { + return err + } + msg := make([]byte, 2) + binary.BigEndian.PutUint16(msg, uint16(status)) + _, err = w.Write(msg) + w.Close() + return err +} + +func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// newHybiConn creates a new WebSocket connection speaking hybi draft protocol. +func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + if buf == nil { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + buf = bufio.NewReadWriter(br, bw) + } + ws := &Conn{config: config, request: request, buf: buf, rwc: rwc, + frameReaderFactory: hybiFrameReaderFactory{buf.Reader}, + frameWriterFactory: hybiFrameWriterFactory{ + buf.Writer, request == nil}, + PayloadType: TextFrame, + defaultCloseStatus: closeStatusNormal} + ws.frameHandler = &hybiFrameHandler{conn: ws} + return ws +} + +// generateMaskingKey generates a masking key for a frame. +func generateMaskingKey() (maskingKey []byte, err error) { + maskingKey = make([]byte, 4) + if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil { + return + } + return +} + +// generateNonce generates a nonce consisting of a randomly selected 16-byte +// value that has been base64-encoded. +func generateNonce() (nonce []byte) { + key := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + panic(err) + } + nonce = make([]byte, 24) + base64.StdEncoding.Encode(nonce, key) + return +} + +// removeZone removes IPv6 zone identifer from host. +// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080" +func removeZone(host string) string { + if !strings.HasPrefix(host, "[") { + return host + } + i := strings.LastIndex(host, "]") + if i < 0 { + return host + } + j := strings.LastIndex(host[:i], "%") + if j < 0 { + return host + } + return host[:j] + host[i:] +} + +// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of +// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string. +func getNonceAccept(nonce []byte) (expected []byte, err error) { + h := sha1.New() + if _, err = h.Write(nonce); err != nil { + return + } + if _, err = h.Write([]byte(websocketGUID)); err != nil { + return + } + expected = make([]byte, 28) + base64.StdEncoding.Encode(expected, h.Sum(nil)) + return +} + +// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17 +func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) { + bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n") + + // According to RFC 6874, an HTTP client, proxy, or other + // intermediary must remove any IPv6 zone identifier attached + // to an outgoing URI. + bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n") + bw.WriteString("Upgrade: websocket\r\n") + bw.WriteString("Connection: Upgrade\r\n") + nonce := generateNonce() + if config.handshakeData != nil { + nonce = []byte(config.handshakeData["key"]) + } + bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n") + bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n") + + if config.Version != ProtocolVersionHybi13 { + return ErrBadProtocolVersion + } + + bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n") + if len(config.Protocol) > 0 { + bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + err = config.Header.WriteSubset(bw, handshakeHeader) + if err != nil { + return err + } + + bw.WriteString("\r\n") + if err = bw.Flush(); err != nil { + return err + } + + resp, err := http.ReadResponse(br, &http.Request{Method: "GET"}) + if err != nil { + return err + } + if resp.StatusCode != 101 { + return ErrBadStatus + } + if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" || + strings.ToLower(resp.Header.Get("Connection")) != "upgrade" { + return ErrBadUpgrade + } + expectedAccept, err := getNonceAccept(nonce) + if err != nil { + return err + } + if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) { + return ErrChallengeResponse + } + if resp.Header.Get("Sec-WebSocket-Extensions") != "" { + return ErrUnsupportedExtensions + } + offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol") + if offeredProtocol != "" { + protocolMatched := false + for i := 0; i < len(config.Protocol); i++ { + if config.Protocol[i] == offeredProtocol { + protocolMatched = true + break + } + } + if !protocolMatched { + return ErrBadWebSocketProtocol + } + config.Protocol = []string{offeredProtocol} + } + + return nil +} + +// newHybiClientConn creates a client WebSocket connection after handshake. +func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn { + return newHybiConn(config, buf, rwc, nil) +} + +// A HybiServerHandshaker performs a server handshake using hybi draft protocol. +type hybiServerHandshaker struct { + *Config + accept []byte +} + +func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) { + c.Version = ProtocolVersionHybi13 + if req.Method != "GET" { + return http.StatusMethodNotAllowed, ErrBadRequestMethod + } + // HTTP version can be safely ignored. + + if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" || + !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") { + return http.StatusBadRequest, ErrNotWebSocket + } + + key := req.Header.Get("Sec-Websocket-Key") + if key == "" { + return http.StatusBadRequest, ErrChallengeResponse + } + version := req.Header.Get("Sec-Websocket-Version") + switch version { + case "13": + c.Version = ProtocolVersionHybi13 + default: + return http.StatusBadRequest, ErrBadWebSocketVersion + } + var scheme string + if req.TLS != nil { + scheme = "wss" + } else { + scheme = "ws" + } + c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI()) + if err != nil { + return http.StatusBadRequest, err + } + protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol")) + if protocol != "" { + protocols := strings.Split(protocol, ",") + for i := 0; i < len(protocols); i++ { + c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i])) + } + } + c.accept, err = getNonceAccept([]byte(key)) + if err != nil { + return http.StatusInternalServerError, err + } + return http.StatusSwitchingProtocols, nil +} + +// Origin parses the Origin header in req. +// If the Origin header is not set, it returns nil and nil. +func Origin(config *Config, req *http.Request) (*url.URL, error) { + var origin string + switch config.Version { + case ProtocolVersionHybi13: + origin = req.Header.Get("Origin") + } + if origin == "" { + return nil, nil + } + return url.ParseRequestURI(origin) +} + +func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) { + if len(c.Protocol) > 0 { + if len(c.Protocol) != 1 { + // You need choose a Protocol in Handshake func in Server. + return ErrBadWebSocketProtocol + } + } + buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n") + buf.WriteString("Upgrade: websocket\r\n") + buf.WriteString("Connection: Upgrade\r\n") + buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n") + if len(c.Protocol) > 0 { + buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + if c.Header != nil { + err := c.Header.WriteSubset(buf, handshakeHeader) + if err != nil { + return err + } + } + buf.WriteString("\r\n") + return buf.Flush() +} + +func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiServerConn(c.Config, buf, rwc, request) +} + +// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol. +func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiConn(config, buf, rwc, request) +} diff --git a/vendor/golang.org/x/net/websocket/hybi_test.go b/vendor/golang.org/x/net/websocket/hybi_test.go new file mode 100644 index 00000000..9504aa2d --- /dev/null +++ b/vendor/golang.org/x/net/websocket/hybi_test.go @@ -0,0 +1,608 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "testing" +) + +// Test the getNonceAccept function with values in +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 +func TestSecWebSocketAccept(t *testing.T) { + nonce := []byte("dGhlIHNhbXBsZSBub25jZQ==") + expected := []byte("s3pPLMBiTxaQ9kYGzzhZRbK+xOo=") + accept, err := getNonceAccept(nonce) + if err != nil { + t.Errorf("getNonceAccept: returned error %v", err) + return + } + if !bytes.Equal(expected, accept) { + t.Errorf("getNonceAccept: expected %q got %q", expected, accept) + } +} + +func TestHybiClientHandshake(t *testing.T) { + type test struct { + url, host string + } + tests := []test{ + {"ws://server.example.com/chat", "server.example.com"}, + {"ws://127.0.0.1/chat", "127.0.0.1"}, + } + if _, err := url.ParseRequestURI("http://[fe80::1%25lo0]"); err == nil { + tests = append(tests, test{"ws://[fe80::1%25lo0]/chat", "[fe80::1]"}) + } + + for _, tt := range tests { + var b bytes.Buffer + bw := bufio.NewWriter(&b) + br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo= +Sec-WebSocket-Protocol: chat + +`)) + var err error + var config Config + config.Location, err = url.ParseRequestURI(tt.url) + if err != nil { + t.Fatal("location url", err) + } + config.Origin, err = url.ParseRequestURI("http://example.com") + if err != nil { + t.Fatal("origin url", err) + } + config.Protocol = append(config.Protocol, "chat") + config.Protocol = append(config.Protocol, "superchat") + config.Version = ProtocolVersionHybi13 + config.handshakeData = map[string]string{ + "key": "dGhlIHNhbXBsZSBub25jZQ==", + } + if err := hybiClientHandshake(&config, br, bw); err != nil { + t.Fatal("handshake", err) + } + req, err := http.ReadRequest(bufio.NewReader(&b)) + if err != nil { + t.Fatal("read request", err) + } + if req.Method != "GET" { + t.Errorf("request method expected GET, but got %s", req.Method) + } + if req.URL.Path != "/chat" { + t.Errorf("request path expected /chat, but got %s", req.URL.Path) + } + if req.Proto != "HTTP/1.1" { + t.Errorf("request proto expected HTTP/1.1, but got %s", req.Proto) + } + if req.Host != tt.host { + t.Errorf("request host expected %s, but got %s", tt.host, req.Host) + } + var expectedHeader = map[string]string{ + "Connection": "Upgrade", + "Upgrade": "websocket", + "Sec-Websocket-Key": config.handshakeData["key"], + "Origin": config.Origin.String(), + "Sec-Websocket-Protocol": "chat, superchat", + "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13), + } + for k, v := range expectedHeader { + if req.Header.Get(k) != v { + t.Errorf("%s expected %s, but got %v", k, v, req.Header.Get(k)) + } + } + } +} + +func TestHybiClientHandshakeWithHeader(t *testing.T) { + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo= +Sec-WebSocket-Protocol: chat + +`)) + var err error + config := new(Config) + config.Location, err = url.ParseRequestURI("ws://server.example.com/chat") + if err != nil { + t.Fatal("location url", err) + } + config.Origin, err = url.ParseRequestURI("http://example.com") + if err != nil { + t.Fatal("origin url", err) + } + config.Protocol = append(config.Protocol, "chat") + config.Protocol = append(config.Protocol, "superchat") + config.Version = ProtocolVersionHybi13 + config.Header = http.Header(make(map[string][]string)) + config.Header.Add("User-Agent", "test") + + config.handshakeData = map[string]string{ + "key": "dGhlIHNhbXBsZSBub25jZQ==", + } + err = hybiClientHandshake(config, br, bw) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + req, err := http.ReadRequest(bufio.NewReader(b)) + if err != nil { + t.Fatalf("read request: %v", err) + } + if req.Method != "GET" { + t.Errorf("request method expected GET, but got %q", req.Method) + } + if req.URL.Path != "/chat" { + t.Errorf("request path expected /chat, but got %q", req.URL.Path) + } + if req.Proto != "HTTP/1.1" { + t.Errorf("request proto expected HTTP/1.1, but got %q", req.Proto) + } + if req.Host != "server.example.com" { + t.Errorf("request Host expected server.example.com, but got %v", req.Host) + } + var expectedHeader = map[string]string{ + "Connection": "Upgrade", + "Upgrade": "websocket", + "Sec-Websocket-Key": config.handshakeData["key"], + "Origin": config.Origin.String(), + "Sec-Websocket-Protocol": "chat, superchat", + "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13), + "User-Agent": "test", + } + for k, v := range expectedHeader { + if req.Header.Get(k) != v { + t.Errorf(fmt.Sprintf("%s expected %q but got %q", k, v, req.Header.Get(k))) + } + } +} + +func TestHybiServerHandshake(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + expectedProtocols := []string{"chat", "superchat"} + if fmt.Sprintf("%v", config.Protocol) != fmt.Sprintf("%v", expectedProtocols) { + t.Errorf("protocol expected %q but got %q", expectedProtocols, config.Protocol) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + config.Protocol = config.Protocol[:1] + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "Sec-WebSocket-Protocol: chat", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} + +func TestHybiServerHandshakeNoSubProtocol(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + if len(config.Protocol) != 0 { + t.Errorf("len(config.Protocol) expected 0, but got %q", len(config.Protocol)) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} + +func TestHybiServerHandshakeHybiBadVersion(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Sec-WebSocket-Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 9 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != ErrBadWebSocketVersion { + t.Errorf("handshake expected err %q but got %q", ErrBadWebSocketVersion, err) + } + if code != http.StatusBadRequest { + t.Errorf("status expected %q but got %q", http.StatusBadRequest, code) + } +} + +func testHybiFrame(t *testing.T, testHeader, testPayload, testMaskedPayload []byte, frameHeader *hybiFrameHeader) { + b := bytes.NewBuffer([]byte{}) + frameWriterFactory := &hybiFrameWriterFactory{bufio.NewWriter(b), false} + w, _ := frameWriterFactory.NewFrameWriter(TextFrame) + w.(*hybiFrameWriter).header = frameHeader + _, err := w.Write(testPayload) + w.Close() + if err != nil { + t.Errorf("Write error %q", err) + } + var expectedFrame []byte + expectedFrame = append(expectedFrame, testHeader...) + expectedFrame = append(expectedFrame, testMaskedPayload...) + if !bytes.Equal(expectedFrame, b.Bytes()) { + t.Errorf("frame expected %q got %q", expectedFrame, b.Bytes()) + } + frameReaderFactory := &hybiFrameReaderFactory{bufio.NewReader(b)} + r, err := frameReaderFactory.NewFrameReader() + if err != nil { + t.Errorf("Read error %q", err) + } + if header := r.HeaderReader(); header == nil { + t.Errorf("no header") + } else { + actualHeader := make([]byte, r.Len()) + n, err := header.Read(actualHeader) + if err != nil { + t.Errorf("Read header error %q", err) + } else { + if n < len(testHeader) { + t.Errorf("header too short %q got %q", testHeader, actualHeader[:n]) + } + if !bytes.Equal(testHeader, actualHeader[:n]) { + t.Errorf("header expected %q got %q", testHeader, actualHeader[:n]) + } + } + } + if trailer := r.TrailerReader(); trailer != nil { + t.Errorf("unexpected trailer %q", trailer) + } + frame := r.(*hybiFrameReader) + if frameHeader.Fin != frame.header.Fin || + frameHeader.OpCode != frame.header.OpCode || + len(testPayload) != int(frame.header.Length) { + t.Errorf("mismatch %v (%d) vs %v", frameHeader, len(testPayload), frame) + } + payload := make([]byte, len(testPayload)) + _, err = r.Read(payload) + if err != nil && err != io.EOF { + t.Errorf("read %v", err) + } + if !bytes.Equal(testPayload, payload) { + t.Errorf("payload %q vs %q", testPayload, payload) + } +} + +func TestHybiShortTextFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame} + payload := []byte("hello") + testHybiFrame(t, []byte{0x81, 0x05}, payload, payload, frameHeader) + + payload = make([]byte, 125) + testHybiFrame(t, []byte{0x81, 125}, payload, payload, frameHeader) +} + +func TestHybiShortMaskedTextFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame, + MaskingKey: []byte{0xcc, 0x55, 0x80, 0x20}} + payload := []byte("hello") + maskedPayload := []byte{0xa4, 0x30, 0xec, 0x4c, 0xa3} + header := []byte{0x81, 0x85} + header = append(header, frameHeader.MaskingKey...) + testHybiFrame(t, header, payload, maskedPayload, frameHeader) +} + +func TestHybiShortBinaryFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: BinaryFrame} + payload := []byte("hello") + testHybiFrame(t, []byte{0x82, 0x05}, payload, payload, frameHeader) + + payload = make([]byte, 125) + testHybiFrame(t, []byte{0x82, 125}, payload, payload, frameHeader) +} + +func TestHybiControlFrame(t *testing.T) { + payload := []byte("hello") + + frameHeader := &hybiFrameHeader{Fin: true, OpCode: PingFrame} + testHybiFrame(t, []byte{0x89, 0x05}, payload, payload, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PingFrame} + testHybiFrame(t, []byte{0x89, 0x00}, nil, nil, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame} + testHybiFrame(t, []byte{0x8A, 0x05}, payload, payload, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame} + testHybiFrame(t, []byte{0x8A, 0x00}, nil, nil, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: CloseFrame} + payload = []byte{0x03, 0xe8} // 1000 + testHybiFrame(t, []byte{0x88, 0x02}, payload, payload, frameHeader) +} + +func TestHybiLongFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame} + payload := make([]byte, 126) + testHybiFrame(t, []byte{0x81, 126, 0x00, 126}, payload, payload, frameHeader) + + payload = make([]byte, 65535) + testHybiFrame(t, []byte{0x81, 126, 0xff, 0xff}, payload, payload, frameHeader) + + payload = make([]byte, 65536) + testHybiFrame(t, []byte{0x81, 127, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00}, payload, payload, frameHeader) +} + +func TestHybiClientRead(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o', + 0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping + 0x81, 0x05, 'w', 'o', 'r', 'l', 'd'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + msg := make([]byte, 512) + n, err := conn.Read(msg) + if err != nil { + t.Errorf("read 1st frame, error %q", err) + } + if n != 5 { + t.Errorf("read 1st frame, expect 5, got %d", n) + } + if !bytes.Equal(wireData[2:7], msg[:n]) { + t.Errorf("read 1st frame %v, got %v", wireData[2:7], msg[:n]) + } + n, err = conn.Read(msg) + if err != nil { + t.Errorf("read 2nd frame, error %q", err) + } + if n != 5 { + t.Errorf("read 2nd frame, expect 5, got %d", n) + } + if !bytes.Equal(wireData[16:21], msg[:n]) { + t.Errorf("read 2nd frame %v, got %v", wireData[16:21], msg[:n]) + } + n, err = conn.Read(msg) + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } +} + +func TestHybiShortRead(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o', + 0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping + 0x81, 0x05, 'w', 'o', 'r', 'l', 'd'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + step := 0 + pos := 0 + expectedPos := []int{2, 5, 16, 19} + expectedLen := []int{3, 2, 3, 2} + for { + msg := make([]byte, 3) + n, err := conn.Read(msg) + if step >= len(expectedPos) { + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } + return + } + pos = expectedPos[step] + endPos := pos + expectedLen[step] + if err != nil { + t.Errorf("read from %d, got error %q", pos, err) + return + } + if n != endPos-pos { + t.Errorf("read from %d, expect %d, got %d", pos, endPos-pos, n) + } + if !bytes.Equal(wireData[pos:endPos], msg[:n]) { + t.Errorf("read from %d, frame %v, got %v", pos, wireData[pos:endPos], msg[:n]) + } + step++ + } +} + +func TestHybiServerRead(t *testing.T) { + wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello + 0x89, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // ping: hello + 0x81, 0x85, 0xed, 0x83, 0xb4, 0x24, + 0x9a, 0xec, 0xc6, 0x48, 0x89, // world + } + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request)) + + expected := [][]byte{[]byte("hello"), []byte("world")} + + msg := make([]byte, 512) + n, err := conn.Read(msg) + if err != nil { + t.Errorf("read 1st frame, error %q", err) + } + if n != 5 { + t.Errorf("read 1st frame, expect 5, got %d", n) + } + if !bytes.Equal(expected[0], msg[:n]) { + t.Errorf("read 1st frame %q, got %q", expected[0], msg[:n]) + } + + n, err = conn.Read(msg) + if err != nil { + t.Errorf("read 2nd frame, error %q", err) + } + if n != 5 { + t.Errorf("read 2nd frame, expect 5, got %d", n) + } + if !bytes.Equal(expected[1], msg[:n]) { + t.Errorf("read 2nd frame %q, got %q", expected[1], msg[:n]) + } + + n, err = conn.Read(msg) + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } +} + +func TestHybiServerReadWithoutMasking(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request)) + // server MUST close the connection upon receiving a non-masked frame. + msg := make([]byte, 512) + _, err := conn.Read(msg) + if err != io.EOF { + t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err) + } +} + +func TestHybiClientReadWithMasking(t *testing.T) { + wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello + } + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + // client MUST close the connection upon receiving a masked frame. + msg := make([]byte, 512) + _, err := conn.Read(msg) + if err != io.EOF { + t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err) + } +} + +// Test the hybiServerHandshaker supports firefox implementation and +// checks Connection request header include (but it's not necessary +// equal to) "upgrade" +func TestHybiServerFirefoxHandshake(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: keep-alive, upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + config.Protocol = []string{"chat"} + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "Sec-WebSocket-Protocol: chat", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} diff --git a/vendor/golang.org/x/net/websocket/server.go b/vendor/golang.org/x/net/websocket/server.go new file mode 100644 index 00000000..0895dea1 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/server.go @@ -0,0 +1,113 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "fmt" + "io" + "net/http" +) + +func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) { + var hs serverHandshaker = &hybiServerHandshaker{Config: config} + code, err := hs.ReadHandshake(buf.Reader, req) + if err == ErrBadWebSocketVersion { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if err != nil { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if handshake != nil { + err = handshake(config, req) + if err != nil { + code = http.StatusForbidden + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + } + err = hs.AcceptHandshake(buf.Writer) + if err != nil { + code = http.StatusBadRequest + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + conn = hs.NewServerConn(buf, rwc, req) + return +} + +// Server represents a server of a WebSocket. +type Server struct { + // Config is a WebSocket configuration for new WebSocket connection. + Config + + // Handshake is an optional function in WebSocket handshake. + // For example, you can check, or don't check Origin header. + // Another example, you can select config.Protocol. + Handshake func(*Config, *http.Request) error + + // Handler handles a WebSocket connection. + Handler +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.serveWebSocket(w, req) +} + +func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) { + rwc, buf, err := w.(http.Hijacker).Hijack() + if err != nil { + panic("Hijack failed: " + err.Error()) + } + // The server should abort the WebSocket connection if it finds + // the client did not send a handshake that matches with protocol + // specification. + defer rwc.Close() + conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake) + if err != nil { + return + } + if conn == nil { + panic("unexpected nil conn") + } + s.Handler(conn) +} + +// Handler is a simple interface to a WebSocket browser client. +// It checks if Origin header is valid URL by default. +// You might want to verify websocket.Conn.Config().Origin in the func. +// If you use Server instead of Handler, you could call websocket.Origin and +// check the origin in your Handshake func. So, if you want to accept +// non-browser clients, which do not send an Origin header, set a +// Server.Handshake that does not check the origin. +type Handler func(*Conn) + +func checkOrigin(config *Config, req *http.Request) (err error) { + config.Origin, err = Origin(config, req) + if err == nil && config.Origin == nil { + return fmt.Errorf("null origin") + } + return err +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s := Server{Handler: h, Handshake: checkOrigin} + s.serveWebSocket(w, req) +} diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go new file mode 100644 index 00000000..9412191d --- /dev/null +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -0,0 +1,411 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements a client and server for the WebSocket protocol +// as specified in RFC 6455. +package websocket // import "golang.org/x/net/websocket" + +import ( + "bufio" + "crypto/tls" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "sync" + "time" +) + +const ( + ProtocolVersionHybi13 = 13 + ProtocolVersionHybi = ProtocolVersionHybi13 + SupportedProtocolVersion = "13" + + ContinuationFrame = 0 + TextFrame = 1 + BinaryFrame = 2 + CloseFrame = 8 + PingFrame = 9 + PongFrame = 10 + UnknownFrame = 255 +) + +// ProtocolError represents WebSocket protocol errors. +type ProtocolError struct { + ErrorString string +} + +func (err *ProtocolError) Error() string { return err.ErrorString } + +var ( + ErrBadProtocolVersion = &ProtocolError{"bad protocol version"} + ErrBadScheme = &ProtocolError{"bad scheme"} + ErrBadStatus = &ProtocolError{"bad status"} + ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"} + ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"} + ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"} + ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"} + ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"} + ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"} + ErrBadFrame = &ProtocolError{"bad frame"} + ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"} + ErrNotWebSocket = &ProtocolError{"not websocket protocol"} + ErrBadRequestMethod = &ProtocolError{"bad method"} + ErrNotSupported = &ProtocolError{"not supported"} +) + +// Addr is an implementation of net.Addr for WebSocket. +type Addr struct { + *url.URL +} + +// Network returns the network type for a WebSocket, "websocket". +func (addr *Addr) Network() string { return "websocket" } + +// Config is a WebSocket configuration +type Config struct { + // A WebSocket server address. + Location *url.URL + + // A Websocket client origin. + Origin *url.URL + + // WebSocket subprotocols. + Protocol []string + + // WebSocket protocol version. + Version int + + // TLS config for secure WebSocket (wss). + TlsConfig *tls.Config + + // Additional header fields to be sent in WebSocket opening handshake. + Header http.Header + + handshakeData map[string]string +} + +// serverHandshaker is an interface to handle WebSocket server side handshake. +type serverHandshaker interface { + // ReadHandshake reads handshake request message from client. + // Returns http response code and error if any. + ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) + + // AcceptHandshake accepts the client handshake request and sends + // handshake response back to client. + AcceptHandshake(buf *bufio.Writer) (err error) + + // NewServerConn creates a new WebSocket connection. + NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn) +} + +// frameReader is an interface to read a WebSocket frame. +type frameReader interface { + // Reader is to read payload of the frame. + io.Reader + + // PayloadType returns payload type. + PayloadType() byte + + // HeaderReader returns a reader to read header of the frame. + HeaderReader() io.Reader + + // TrailerReader returns a reader to read trailer of the frame. + // If it returns nil, there is no trailer in the frame. + TrailerReader() io.Reader + + // Len returns total length of the frame, including header and trailer. + Len() int +} + +// frameReaderFactory is an interface to creates new frame reader. +type frameReaderFactory interface { + NewFrameReader() (r frameReader, err error) +} + +// frameWriter is an interface to write a WebSocket frame. +type frameWriter interface { + // Writer is to write payload of the frame. + io.WriteCloser +} + +// frameWriterFactory is an interface to create new frame writer. +type frameWriterFactory interface { + NewFrameWriter(payloadType byte) (w frameWriter, err error) +} + +type frameHandler interface { + HandleFrame(frame frameReader) (r frameReader, err error) + WriteClose(status int) (err error) +} + +// Conn represents a WebSocket connection. +// +// Multiple goroutines may invoke methods on a Conn simultaneously. +type Conn struct { + config *Config + request *http.Request + + buf *bufio.ReadWriter + rwc io.ReadWriteCloser + + rio sync.Mutex + frameReaderFactory + frameReader + + wio sync.Mutex + frameWriterFactory + + frameHandler + PayloadType byte + defaultCloseStatus int +} + +// Read implements the io.Reader interface: +// it reads data of a frame from the WebSocket connection. +// if msg is not large enough for the frame data, it fills the msg and next Read +// will read the rest of the frame data. +// it reads Text frame or Binary frame. +func (ws *Conn) Read(msg []byte) (n int, err error) { + ws.rio.Lock() + defer ws.rio.Unlock() +again: + if ws.frameReader == nil { + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return 0, err + } + ws.frameReader, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return 0, err + } + if ws.frameReader == nil { + goto again + } + } + n, err = ws.frameReader.Read(msg) + if err == io.EOF { + if trailer := ws.frameReader.TrailerReader(); trailer != nil { + io.Copy(ioutil.Discard, trailer) + } + ws.frameReader = nil + goto again + } + return n, err +} + +// Write implements the io.Writer interface: +// it writes data as a frame to the WebSocket connection. +func (ws *Conn) Write(msg []byte) (n int, err error) { + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// Close implements the io.Closer interface. +func (ws *Conn) Close() error { + err := ws.frameHandler.WriteClose(ws.defaultCloseStatus) + err1 := ws.rwc.Close() + if err != nil { + return err + } + return err1 +} + +func (ws *Conn) IsClientConn() bool { return ws.request == nil } +func (ws *Conn) IsServerConn() bool { return ws.request != nil } + +// LocalAddr returns the WebSocket Origin for the connection for client, or +// the WebSocket location for server. +func (ws *Conn) LocalAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Origin} + } + return &Addr{ws.config.Location} +} + +// RemoteAddr returns the WebSocket location for the connection for client, or +// the Websocket Origin for server. +func (ws *Conn) RemoteAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Location} + } + return &Addr{ws.config.Origin} +} + +var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn") + +// SetDeadline sets the connection's network read & write deadlines. +func (ws *Conn) SetDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetDeadline(t) + } + return errSetDeadline +} + +// SetReadDeadline sets the connection's network read deadline. +func (ws *Conn) SetReadDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetReadDeadline(t) + } + return errSetDeadline +} + +// SetWriteDeadline sets the connection's network write deadline. +func (ws *Conn) SetWriteDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetWriteDeadline(t) + } + return errSetDeadline +} + +// Config returns the WebSocket config. +func (ws *Conn) Config() *Config { return ws.config } + +// Request returns the http request upgraded to the WebSocket. +// It is nil for client side. +func (ws *Conn) Request() *http.Request { return ws.request } + +// Codec represents a symmetric pair of functions that implement a codec. +type Codec struct { + Marshal func(v interface{}) (data []byte, payloadType byte, err error) + Unmarshal func(data []byte, payloadType byte, v interface{}) (err error) +} + +// Send sends v marshaled by cd.Marshal as single frame to ws. +func (cd Codec) Send(ws *Conn, v interface{}) (err error) { + data, payloadType, err := cd.Marshal(v) + if err != nil { + return err + } + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(payloadType) + if err != nil { + return err + } + _, err = w.Write(data) + w.Close() + return err +} + +// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores in v. +func (cd Codec) Receive(ws *Conn, v interface{}) (err error) { + ws.rio.Lock() + defer ws.rio.Unlock() + if ws.frameReader != nil { + _, err = io.Copy(ioutil.Discard, ws.frameReader) + if err != nil { + return err + } + ws.frameReader = nil + } +again: + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return err + } + frame, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return err + } + if frame == nil { + goto again + } + payloadType := frame.PayloadType() + data, err := ioutil.ReadAll(frame) + if err != nil { + return err + } + return cd.Unmarshal(data, payloadType, v) +} + +func marshal(v interface{}) (msg []byte, payloadType byte, err error) { + switch data := v.(type) { + case string: + return []byte(data), TextFrame, nil + case []byte: + return data, BinaryFrame, nil + } + return nil, UnknownFrame, ErrNotSupported +} + +func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + switch data := v.(type) { + case *string: + *data = string(msg) + return nil + case *[]byte: + *data = msg + return nil + } + return ErrNotSupported +} + +/* +Message is a codec to send/receive text/binary data in a frame on WebSocket connection. +To send/receive text frame, use string type. +To send/receive binary frame, use []byte type. + +Trivial usage: + + import "websocket" + + // receive text frame + var message string + websocket.Message.Receive(ws, &message) + + // send text frame + message = "hello" + websocket.Message.Send(ws, message) + + // receive binary frame + var data []byte + websocket.Message.Receive(ws, &data) + + // send binary frame + data = []byte{0, 1, 2} + websocket.Message.Send(ws, data) + +*/ +var Message = Codec{marshal, unmarshal} + +func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) { + msg, err = json.Marshal(v) + return msg, TextFrame, err +} + +func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + return json.Unmarshal(msg, v) +} + +/* +JSON is a codec to send/receive JSON data in a frame from a WebSocket connection. + +Trivial usage: + + import "websocket" + + type T struct { + Msg string + Count int + } + + // receive JSON type T + var data T + websocket.JSON.Receive(ws, &data) + + // send JSON type T + websocket.JSON.Send(ws, data) +*/ +var JSON = Codec{jsonMarshal, jsonUnmarshal} diff --git a/vendor/golang.org/x/net/websocket/websocket_test.go b/vendor/golang.org/x/net/websocket/websocket_test.go new file mode 100644 index 00000000..05b7e535 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/websocket_test.go @@ -0,0 +1,587 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "fmt" + "io" + "log" + "net" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +var serverAddr string +var once sync.Once + +func echoServer(ws *Conn) { + defer ws.Close() + io.Copy(ws, ws) +} + +type Count struct { + S string + N int +} + +func countServer(ws *Conn) { + defer ws.Close() + for { + var count Count + err := JSON.Receive(ws, &count) + if err != nil { + return + } + count.N++ + count.S = strings.Repeat(count.S, count.N) + err = JSON.Send(ws, count) + if err != nil { + return + } + } +} + +type testCtrlAndDataHandler struct { + hybiFrameHandler +} + +func (h *testCtrlAndDataHandler) WritePing(b []byte) (int, error) { + h.hybiFrameHandler.conn.wio.Lock() + defer h.hybiFrameHandler.conn.wio.Unlock() + w, err := h.hybiFrameHandler.conn.frameWriterFactory.NewFrameWriter(PingFrame) + if err != nil { + return 0, err + } + n, err := w.Write(b) + w.Close() + return n, err +} + +func ctrlAndDataServer(ws *Conn) { + defer ws.Close() + h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}} + ws.frameHandler = h + + go func() { + for i := 0; ; i++ { + var b []byte + if i%2 != 0 { // with or without payload + b = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-SERVER", i)) + } + if _, err := h.WritePing(b); err != nil { + break + } + if _, err := h.WritePong(b); err != nil { // unsolicited pong + break + } + time.Sleep(10 * time.Millisecond) + } + }() + + b := make([]byte, 128) + for { + n, err := ws.Read(b) + if err != nil { + break + } + if _, err := ws.Write(b[:n]); err != nil { + break + } + } +} + +func subProtocolHandshake(config *Config, req *http.Request) error { + for _, proto := range config.Protocol { + if proto == "chat" { + config.Protocol = []string{proto} + return nil + } + } + return ErrBadWebSocketProtocol +} + +func subProtoServer(ws *Conn) { + for _, proto := range ws.Config().Protocol { + io.WriteString(ws, proto) + } +} + +func startServer() { + http.Handle("/echo", Handler(echoServer)) + http.Handle("/count", Handler(countServer)) + http.Handle("/ctrldata", Handler(ctrlAndDataServer)) + subproto := Server{ + Handshake: subProtocolHandshake, + Handler: Handler(subProtoServer), + } + http.Handle("/subproto", subproto) + server := httptest.NewServer(nil) + serverAddr = server.Listener.Addr().String() + log.Print("Test WebSocket server listening on ", serverAddr) +} + +func newConfig(t *testing.T, path string) *Config { + config, _ := NewConfig(fmt.Sprintf("ws://%s%s", serverAddr, path), "http://localhost") + return config +} + +func TestEcho(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + msg := []byte("hello, world\n") + if _, err := conn.Write(msg); err != nil { + t.Errorf("Write: %v", err) + } + var actual_msg = make([]byte, 512) + n, err := conn.Read(actual_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + actual_msg = actual_msg[0:n] + if !bytes.Equal(msg, actual_msg) { + t.Errorf("Echo: expected %q got %q", msg, actual_msg) + } + conn.Close() +} + +func TestAddr(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + ra := conn.RemoteAddr().String() + if !strings.HasPrefix(ra, "ws://") || !strings.HasSuffix(ra, "/echo") { + t.Errorf("Bad remote addr: %v", ra) + } + la := conn.LocalAddr().String() + if !strings.HasPrefix(la, "http://") { + t.Errorf("Bad local addr: %v", la) + } + conn.Close() +} + +func TestCount(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/count"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + var count Count + count.S = "hello" + if err := JSON.Send(conn, count); err != nil { + t.Errorf("Write: %v", err) + } + if err := JSON.Receive(conn, &count); err != nil { + t.Errorf("Read: %v", err) + } + if count.N != 1 { + t.Errorf("count: expected %d got %d", 1, count.N) + } + if count.S != "hello" { + t.Errorf("count: expected %q got %q", "hello", count.S) + } + if err := JSON.Send(conn, count); err != nil { + t.Errorf("Write: %v", err) + } + if err := JSON.Receive(conn, &count); err != nil { + t.Errorf("Read: %v", err) + } + if count.N != 2 { + t.Errorf("count: expected %d got %d", 2, count.N) + } + if count.S != "hellohello" { + t.Errorf("count: expected %q got %q", "hellohello", count.S) + } + conn.Close() +} + +func TestWithQuery(t *testing.T) { + once.Do(startServer) + + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + config := newConfig(t, "/echo") + config.Location, err = url.ParseRequestURI(fmt.Sprintf("ws://%s/echo?q=v", serverAddr)) + if err != nil { + t.Fatal("location url", err) + } + + ws, err := NewClient(config, client) + if err != nil { + t.Errorf("WebSocket handshake: %v", err) + return + } + ws.Close() +} + +func testWithProtocol(t *testing.T, subproto []string) (string, error) { + once.Do(startServer) + + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + config := newConfig(t, "/subproto") + config.Protocol = subproto + + ws, err := NewClient(config, client) + if err != nil { + return "", err + } + msg := make([]byte, 16) + n, err := ws.Read(msg) + if err != nil { + return "", err + } + ws.Close() + return string(msg[:n]), nil +} + +func TestWithProtocol(t *testing.T) { + proto, err := testWithProtocol(t, []string{"chat"}) + if err != nil { + t.Errorf("SubProto: unexpected error: %v", err) + } + if proto != "chat" { + t.Errorf("SubProto: expected %q, got %q", "chat", proto) + } +} + +func TestWithTwoProtocol(t *testing.T) { + proto, err := testWithProtocol(t, []string{"test", "chat"}) + if err != nil { + t.Errorf("SubProto: unexpected error: %v", err) + } + if proto != "chat" { + t.Errorf("SubProto: expected %q, got %q", "chat", proto) + } +} + +func TestWithBadProtocol(t *testing.T) { + _, err := testWithProtocol(t, []string{"test"}) + if err != ErrBadStatus { + t.Errorf("SubProto: expected %v, got %v", ErrBadStatus, err) + } +} + +func TestHTTP(t *testing.T) { + once.Do(startServer) + + // If the client did not send a handshake that matches the protocol + // specification, the server MUST return an HTTP response with an + // appropriate error code (such as 400 Bad Request) + resp, err := http.Get(fmt.Sprintf("http://%s/echo", serverAddr)) + if err != nil { + t.Errorf("Get: error %#v", err) + return + } + if resp == nil { + t.Error("Get: resp is null") + return + } + if resp.StatusCode != http.StatusBadRequest { + t.Errorf("Get: expected %q got %q", http.StatusBadRequest, resp.StatusCode) + } +} + +func TestTrailingSpaces(t *testing.T) { + // http://code.google.com/p/go/issues/detail?id=955 + // The last runs of this create keys with trailing spaces that should not be + // generated by the client. + once.Do(startServer) + config := newConfig(t, "/echo") + for i := 0; i < 30; i++ { + // body + ws, err := DialConfig(config) + if err != nil { + t.Errorf("Dial #%d failed: %v", i, err) + break + } + ws.Close() + } +} + +func TestDialConfigBadVersion(t *testing.T) { + once.Do(startServer) + config := newConfig(t, "/echo") + config.Version = 1234 + + _, err := DialConfig(config) + + if dialerr, ok := err.(*DialError); ok { + if dialerr.Err != ErrBadProtocolVersion { + t.Errorf("dial expected err %q but got %q", ErrBadProtocolVersion, dialerr.Err) + } + } +} + +func TestSmallBuffer(t *testing.T) { + // http://code.google.com/p/go/issues/detail?id=1145 + // Read should be able to handle reading a fragment of a frame. + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + msg := []byte("hello, world\n") + if _, err := conn.Write(msg); err != nil { + t.Errorf("Write: %v", err) + } + var small_msg = make([]byte, 8) + n, err := conn.Read(small_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + if !bytes.Equal(msg[:len(small_msg)], small_msg) { + t.Errorf("Echo: expected %q got %q", msg[:len(small_msg)], small_msg) + } + var second_msg = make([]byte, len(msg)) + n, err = conn.Read(second_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + second_msg = second_msg[0:n] + if !bytes.Equal(msg[len(small_msg):], second_msg) { + t.Errorf("Echo: expected %q got %q", msg[len(small_msg):], second_msg) + } + conn.Close() +} + +var parseAuthorityTests = []struct { + in *url.URL + out string +}{ + { + &url.URL{ + Scheme: "ws", + Host: "www.google.com", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "wss", + Host: "www.google.com", + }, + "www.google.com:443", + }, + { + &url.URL{ + Scheme: "ws", + Host: "www.google.com:80", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "wss", + Host: "www.google.com:443", + }, + "www.google.com:443", + }, + // some invalid ones for parseAuthority. parseAuthority doesn't + // concern itself with the scheme unless it actually knows about it + { + &url.URL{ + Scheme: "http", + Host: "www.google.com", + }, + "www.google.com", + }, + { + &url.URL{ + Scheme: "http", + Host: "www.google.com:80", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "asdf", + Host: "127.0.0.1", + }, + "127.0.0.1", + }, + { + &url.URL{ + Scheme: "asdf", + Host: "www.google.com", + }, + "www.google.com", + }, +} + +func TestParseAuthority(t *testing.T) { + for _, tt := range parseAuthorityTests { + out := parseAuthority(tt.in) + if out != tt.out { + t.Errorf("got %v; want %v", out, tt.out) + } + } +} + +type closerConn struct { + net.Conn + closed int // count of the number of times Close was called +} + +func (c *closerConn) Close() error { + c.closed++ + return c.Conn.Close() +} + +func TestClose(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/11454") + } + + once.Do(startServer) + + conn, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + cc := closerConn{Conn: conn} + + client, err := NewClient(newConfig(t, "/echo"), &cc) + if err != nil { + t.Fatalf("WebSocket handshake: %v", err) + } + + // set the deadline to ten minutes ago, which will have expired by the time + // client.Close sends the close status frame. + conn.SetDeadline(time.Now().Add(-10 * time.Minute)) + + if err := client.Close(); err == nil { + t.Errorf("ws.Close(): expected error, got %v", err) + } + if cc.closed < 1 { + t.Fatalf("ws.Close(): expected underlying ws.rwc.Close to be called > 0 times, got: %v", cc.closed) + } +} + +var originTests = []struct { + req *http.Request + origin *url.URL +}{ + { + req: &http.Request{ + Header: http.Header{ + "Origin": []string{"http://www.example.com"}, + }, + }, + origin: &url.URL{ + Scheme: "http", + Host: "www.example.com", + }, + }, + { + req: &http.Request{}, + }, +} + +func TestOrigin(t *testing.T) { + conf := newConfig(t, "/echo") + conf.Version = ProtocolVersionHybi13 + for i, tt := range originTests { + origin, err := Origin(conf, tt.req) + if err != nil { + t.Error(err) + continue + } + if !reflect.DeepEqual(origin, tt.origin) { + t.Errorf("#%d: got origin %v; want %v", i, origin, tt.origin) + continue + } + } +} + +func TestCtrlAndData(t *testing.T) { + once.Do(startServer) + + c, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal(err) + } + ws, err := NewClient(newConfig(t, "/ctrldata"), c) + if err != nil { + t.Fatal(err) + } + defer ws.Close() + + h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}} + ws.frameHandler = h + + b := make([]byte, 128) + for i := 0; i < 2; i++ { + data := []byte(fmt.Sprintf("#%d-DATA-FRAME-FROM-CLIENT", i)) + if _, err := ws.Write(data); err != nil { + t.Fatalf("#%d: %v", i, err) + } + var ctrl []byte + if i%2 != 0 { // with or without payload + ctrl = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-CLIENT", i)) + } + if _, err := h.WritePing(ctrl); err != nil { + t.Fatalf("#%d: %v", i, err) + } + n, err := ws.Read(b) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + if !bytes.Equal(b[:n], data) { + t.Fatalf("#%d: got %v; want %v", i, b[:n], data) + } + } +} diff --git a/vendor/golang.org/x/net/xsrftoken/xsrf.go b/vendor/golang.org/x/net/xsrftoken/xsrf.go new file mode 100644 index 00000000..8d218787 --- /dev/null +++ b/vendor/golang.org/x/net/xsrftoken/xsrf.go @@ -0,0 +1,88 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xsrftoken provides methods for generating and validating secure XSRF tokens. +package xsrftoken // import "golang.org/x/net/xsrftoken" + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/subtle" + "encoding/base64" + "fmt" + "strconv" + "strings" + "time" +) + +// Timeout is the duration for which XSRF tokens are valid. +// It is exported so clients may set cookie timeouts that match generated tokens. +const Timeout = 24 * time.Hour + +// clean sanitizes a string for inclusion in a token by replacing all ":"s. +func clean(s string) string { + return strings.Replace(s, ":", "_", -1) +} + +// Generate returns a URL-safe secure XSRF token that expires in 24 hours. +// +// key is a secret key for your application. +// userID is a unique identifier for the user. +// actionID is the action the user is taking (e.g. POSTing to a particular path). +func Generate(key, userID, actionID string) string { + return generateTokenAtTime(key, userID, actionID, time.Now()) +} + +// generateTokenAtTime is like Generate, but returns a token that expires 24 hours from now. +func generateTokenAtTime(key, userID, actionID string, now time.Time) string { + // Round time up and convert to milliseconds. + milliTime := (now.UnixNano() + 1e6 - 1) / 1e6 + + h := hmac.New(sha1.New, []byte(key)) + fmt.Fprintf(h, "%s:%s:%d", clean(userID), clean(actionID), milliTime) + + // Get the padded base64 string then removing the padding. + tok := string(h.Sum(nil)) + tok = base64.URLEncoding.EncodeToString([]byte(tok)) + tok = strings.TrimRight(tok, "=") + + return fmt.Sprintf("%s:%d", tok, milliTime) +} + +// Valid reports whether a token is a valid, unexpired token returned by Generate. +func Valid(token, key, userID, actionID string) bool { + return validTokenAtTime(token, key, userID, actionID, time.Now()) +} + +// validTokenAtTime reports whether a token is valid at the given time. +func validTokenAtTime(token, key, userID, actionID string, now time.Time) bool { + // Extract the issue time of the token. + sep := strings.LastIndex(token, ":") + if sep < 0 { + return false + } + millis, err := strconv.ParseInt(token[sep+1:], 10, 64) + if err != nil { + return false + } + issueTime := time.Unix(0, millis*1e6) + + // Check that the token is not expired. + if now.Sub(issueTime) >= Timeout { + return false + } + + // Check that the token is not from the future. + // Allow 1 minute grace period in case the token is being verified on a + // machine whose clock is behind the machine that issued the token. + if issueTime.After(now.Add(1 * time.Minute)) { + return false + } + + expected := generateTokenAtTime(key, userID, actionID, issueTime) + + // Check that the token matches the expected value. + // Use constant time comparison to avoid timing attacks. + return subtle.ConstantTimeCompare([]byte(token), []byte(expected)) == 1 +} diff --git a/vendor/golang.org/x/net/xsrftoken/xsrf_test.go b/vendor/golang.org/x/net/xsrftoken/xsrf_test.go new file mode 100644 index 00000000..9933f867 --- /dev/null +++ b/vendor/golang.org/x/net/xsrftoken/xsrf_test.go @@ -0,0 +1,83 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xsrftoken + +import ( + "encoding/base64" + "testing" + "time" +) + +const ( + key = "quay" + userID = "12345678" + actionID = "POST /form" +) + +var ( + now = time.Now() + oneMinuteFromNow = now.Add(1 * time.Minute) +) + +func TestValidToken(t *testing.T) { + tok := generateTokenAtTime(key, userID, actionID, now) + if !validTokenAtTime(tok, key, userID, actionID, oneMinuteFromNow) { + t.Error("One second later: Expected token to be valid") + } + if !validTokenAtTime(tok, key, userID, actionID, now.Add(Timeout-1*time.Nanosecond)) { + t.Error("Just before timeout: Expected token to be valid") + } + if !validTokenAtTime(tok, key, userID, actionID, now.Add(-1*time.Minute+1*time.Millisecond)) { + t.Error("One minute in the past: Expected token to be valid") + } +} + +// TestSeparatorReplacement tests that separators are being correctly substituted +func TestSeparatorReplacement(t *testing.T) { + tok := generateTokenAtTime("foo:bar", "baz", "wah", now) + tok2 := generateTokenAtTime("foo", "bar:baz", "wah", now) + if tok == tok2 { + t.Errorf("Expected generated tokens to be different") + } +} + +func TestInvalidToken(t *testing.T) { + invalidTokenTests := []struct { + name, key, userID, actionID string + t time.Time + }{ + {"Bad key", "foobar", userID, actionID, oneMinuteFromNow}, + {"Bad userID", key, "foobar", actionID, oneMinuteFromNow}, + {"Bad actionID", key, userID, "foobar", oneMinuteFromNow}, + {"Expired", key, userID, actionID, now.Add(Timeout + 1*time.Millisecond)}, + {"More than 1 minute from the future", key, userID, actionID, now.Add(-1*time.Nanosecond - 1*time.Minute)}, + } + + tok := generateTokenAtTime(key, userID, actionID, now) + for _, itt := range invalidTokenTests { + if validTokenAtTime(tok, itt.key, itt.userID, itt.actionID, itt.t) { + t.Errorf("%v: Expected token to be invalid", itt.name) + } + } +} + +// TestValidateBadData primarily tests that no unexpected panics are triggered +// during parsing +func TestValidateBadData(t *testing.T) { + badDataTests := []struct { + name, tok string + }{ + {"Invalid Base64", "ASDab24(@)$*=="}, + {"No delimiter", base64.URLEncoding.EncodeToString([]byte("foobar12345678"))}, + {"Invalid time", base64.URLEncoding.EncodeToString([]byte("foobar:foobar"))}, + {"Wrong length", "1234" + generateTokenAtTime(key, userID, actionID, now)}, + } + + for _, bdt := range badDataTests { + if validTokenAtTime(bdt.tok, key, userID, actionID, oneMinuteFromNow) { + t.Errorf("%v: Expected token to be invalid", bdt.name) + } + } +} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh old mode 100644 new mode 100755 diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 37c5b860..e14b464a 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -33,7 +33,7 @@ // Package codes defines the canonical error codes used by gRPC. It is // consistent across various languages. -package codes +package codes // import "google.golang.org/grpc/codes" // A Code is an unsigned 32-bit error code as defined in the gRPC spec. type Code uint32 diff --git a/vendor/google.golang.org/grpc/coverage.sh b/vendor/google.golang.org/grpc/coverage.sh old mode 100644 new mode 100755 diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 0b0b89b6..681f64e4 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -35,7 +35,7 @@ // which encapsulate all the state needed by a client to authenticate with a // server and make various assertions, e.g., about the client's identity, role, // or whether it is authorized to make a particular call. -package credentials +package credentials // import "google.golang.org/grpc/credentials" import ( "crypto/tls" diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index b4c0e740..a35f2188 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -3,4 +3,4 @@ Package grpc implements an RPC system called gRPC. See www.grpc.io for more information about gRPC. */ -package grpc +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index 2cc09be4..3b293307 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -34,7 +34,7 @@ /* Package grpclog defines logging for grpc. */ -package grpclog +package grpclog // import "google.golang.org/grpc/grpclog" import ( "log" diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go old mode 100644 new mode 100755 diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 58469ddd..52070dbe 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -32,7 +32,7 @@ */ // Package metadata define the structure of the metadata supported by gRPC library. -package metadata +package metadata // import "google.golang.org/grpc/metadata" import ( "encoding/base64" diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go index d4ca0b7a..87fdf532 100644 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -35,7 +35,7 @@ Package transport defines and implements message oriented communication channel to complete various transactions (e.g., an RPC). */ -package transport +package transport // import "google.golang.org/grpc/transport" import ( "bytes"